diff --git a/.gitignore b/.gitignore index 06afdea2..6de4615d 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -dbdpy-dev \ No newline at end of file +dbdpy-dev +data diff --git a/dbdpy-env/bin/f2py b/dbdpy-env/bin/f2py new file mode 100755 index 00000000..f8761f37 --- /dev/null +++ b/dbdpy-env/bin/f2py @@ -0,0 +1,8 @@ +#!/Users/billchen/Desktop/dbdpy/dbdpy-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from numpy.f2py.f2py2e import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/dbdpy-env/bin/pip3.11 b/dbdpy-env/bin/pip3.11 new file mode 100755 index 00000000..9d550cfd --- /dev/null +++ b/dbdpy-env/bin/pip3.11 @@ -0,0 +1,8 @@ +#!/Users/billchen/Desktop/dbdpy/dbdpy-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/dbdpy-env/lib/python3.9/site-packages/pip-21.2.4.dist-info/INSTALLER b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/INSTALLER similarity index 100% rename from dbdpy-env/lib/python3.9/site-packages/pip-21.2.4.dist-info/INSTALLER rename to dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/INSTALLER diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/LICENSE.txt b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/LICENSE.txt new file mode 100644 index 00000000..c0afd2f3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/LICENSE.txt @@ -0,0 +1,975 @@ +Copyright (c) 2005-2023, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +The NumPy repository and source distributions bundle several libraries that are +compatibly licensed. We list these here. + +Name: lapack-lite +Files: numpy/linalg/lapack_lite/* +License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + +Name: tempita +Files: tools/npy_tempita/* +License: MIT + For details, see tools/npy_tempita/license.txt + +Name: dragon4 +Files: numpy/core/src/multiarray/dragon4.c +License: MIT + For license text, see numpy/core/src/multiarray/dragon4.c + +Name: libdivide +Files: numpy/core/include/numpy/libdivide/* +License: Zlib + For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt + + +Note that the following files are vendored in the repository and sdist but not +installed in built numpy packages: + +Name: Meson +Files: vendored-meson/meson/* +License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + +Name: meson-python +Files: vendored-meson/meson-python/* +License: MIT + For license text, see vendored-meson/meson-python/LICENSE + +Name: spin +Files: .spin/cmds.py +License: BSD-3 + For license text, see .spin/LICENSE + +---- + +This binary distribution of NumPy also bundles the following software: + +Name: OpenBLAS +Files: numpy/.dylibs/libopenblas*.so +Description: bundled as a dynamically linked library +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: numpy/.dylibs/libopenblas*.so +Description: bundled in OpenBLAS +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause-Attribution + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran +License: GPL-3.0-with-GCC-exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + +Name: libquadmath +Files: numpy/.dylibs/libquadmath*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath +License: LGPL-2.1-or-later + + GCC Quad-Precision Math Library + Copyright (C) 2010-2019 Free Software Foundation, Inc. + Written by Francois-Xavier Coudert + + This file is part of the libquadmath library. + Libquadmath is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + Libquadmath is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/METADATA b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/METADATA new file mode 100644 index 00000000..ef512dc9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/METADATA @@ -0,0 +1,1096 @@ +Metadata-Version: 2.1 +Name: numpy +Version: 1.26.3 +Summary: Fundamental package for array computing in Python +Home-page: https://numpy.org +Author: Travis E. Oliphant et al. +Maintainer-Email: NumPy Developers +License: Copyright (c) 2005-2023, NumPy Developers. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---- + + The NumPy repository and source distributions bundle several libraries that are + compatibly licensed. We list these here. + + Name: lapack-lite + Files: numpy/linalg/lapack_lite/* + License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + + Name: tempita + Files: tools/npy_tempita/* + License: MIT + For details, see tools/npy_tempita/license.txt + + Name: dragon4 + Files: numpy/core/src/multiarray/dragon4.c + License: MIT + For license text, see numpy/core/src/multiarray/dragon4.c + + Name: libdivide + Files: numpy/core/include/numpy/libdivide/* + License: Zlib + For license text, see numpy/core/include/numpy/libdivide/LICENSE.txt + + + Note that the following files are vendored in the repository and sdist but not + installed in built numpy packages: + + Name: Meson + Files: vendored-meson/meson/* + License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + + Name: meson-python + Files: vendored-meson/meson-python/* + License: MIT + For license text, see vendored-meson/meson-python/LICENSE + + Name: spin + Files: .spin/cmds.py + License: BSD-3 + For license text, see .spin/LICENSE + + ---- + + This binary distribution of NumPy also bundles the following software: + + Name: OpenBLAS + Files: numpy/.dylibs/libopenblas*.so + Description: bundled as a dynamically linked library + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: LAPACK + Files: numpy/.dylibs/libopenblas*.so + Description: bundled in OpenBLAS + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause-Attribution + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: GCC runtime library + Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran + License: GPL-3.0-with-GCC-exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + ---- + + Full text of license texts referred to above follows (that they are + listed below does not necessarily imply the conditions apply to the + present binary release): + + ---- + + GCC RUNTIME LIBRARY EXCEPTION + + Version 3.1, 31 March 2009 + + Copyright (C) 2009 Free Software Foundation, Inc. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + + This GCC Runtime Library Exception ("Exception") is an additional + permission under section 7 of the GNU General Public License, version + 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that + bears a notice placed by the copyright holder of the file stating that + the file is governed by GPLv3 along with this Exception. + + When you use GCC to compile a program, GCC may combine portions of + certain GCC header files and runtime libraries with the compiled + program. The purpose of this Exception is to allow compilation of + non-GPL (including proprietary) programs to use, in this way, the + header files and runtime libraries covered by this Exception. + + 0. Definitions. + + A file is an "Independent Module" if it either requires the Runtime + Library for execution after a Compilation Process, or makes use of an + interface provided by the Runtime Library, but is not otherwise based + on the Runtime Library. + + "GCC" means a version of the GNU Compiler Collection, with or without + modifications, governed by version 3 (or a specified later version) of + the GNU General Public License (GPL) with the option of using any + subsequent versions published by the FSF. + + "GPL-compatible Software" is software whose conditions of propagation, + modification and use would permit combination with GCC in accord with + the license of GCC. + + "Target Code" refers to output from any compiler for a real or virtual + target processor architecture, in executable form or suitable for + input to an assembler, loader, linker and/or execution + phase. Notwithstanding that, Target Code does not include data in any + format that is used as a compiler intermediate representation, or used + for producing a compiler intermediate representation. + + The "Compilation Process" transforms code entirely represented in + non-intermediate languages designed for human-written code, and/or in + Java Virtual Machine byte code, into Target Code. Thus, for example, + use of source code generators and preprocessors need not be considered + part of the Compilation Process, since the Compilation Process can be + understood as starting with the output of the generators or + preprocessors. + + A Compilation Process is "Eligible" if it is done using GCC, alone or + with other GPL-compatible software, or if it is done without using any + work based on GCC. For example, using non-GPL-compatible Software to + optimize any GCC intermediate representations would not qualify as an + Eligible Compilation Process. + + 1. Grant of Additional Permission. + + You have permission to propagate a work of Target Code formed by + combining the Runtime Library with Independent Modules, even if such + propagation would otherwise violate the terms of GPLv3, provided that + all Target Code was generated by Eligible Compilation Processes. You + may then convey such a combination under terms of your choice, + consistent with the licensing of the Independent Modules. + + 2. No Weakening of GCC Copyleft. + + The availability of this Exception does not imply any general + presumption that third-party software is unaffected by the copyleft + requirements of the license of GCC. + + ---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for + software and other kinds of works. + + The licenses for most software and other practical works are designed + to take away your freedom to share and change the works. By contrast, + the GNU General Public License is intended to guarantee your freedom to + share and change all versions of a program--to make sure it remains free + software for all its users. We, the Free Software Foundation, use the + GNU General Public License for most of our software; it applies also to + any other work released this way by its authors. You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + them if you wish), that you receive source code or can get it if you + want it, that you can change the software or use pieces of it in new + free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you + these rights or asking you to surrender the rights. Therefore, you have + certain responsibilities if you distribute copies of the software, or if + you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must pass on to the recipients the same + freedoms that you received. You must make sure that they, too, receive + or can get the source code. And you must show them these terms so they + know their rights. + + Developers that use the GNU GPL protect your rights with two steps: + (1) assert copyright on the software, and (2) offer you this License + giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains + that there is no warranty for this free software. For both users' and + authors' sake, the GPL requires that modified versions be marked as + changed, so that their problems will not be attributed erroneously to + authors of previous versions. + + Some devices are designed to deny users access to install or run + modified versions of the software inside them, although the manufacturer + can do so. This is fundamentally incompatible with the aim of + protecting users' freedom to change the software. The systematic + pattern of such abuse occurs in the area of products for individuals to + use, which is precisely where it is most unacceptable. Therefore, we + have designed this version of the GPL to prohibit the practice for those + products. If such problems arise substantially in other domains, we + stand ready to extend this provision to those domains in future versions + of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of + software on general-purpose computers, but in those that do, we wish to + avoid the special danger that patents applied to a free program could + make it effectively proprietary. To prevent this, the GPL assures that + patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and + modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of + works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this + License. Each licensee is addressed as "you". "Licensees" and + "recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work + in a fashion requiring copyright permission, other than the making of an + exact copy. The resulting work is called a "modified version" of the + earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based + on the Program. + + To "propagate" a work means to do anything with it that, without + permission, would make you directly or secondarily liable for + infringement under applicable copyright law, except executing it on a + computer or modifying a private copy. Propagation includes copying, + distribution (with or without modification), making available to the + public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other + parties to make or receive copies. Mere interaction with a user through + a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" + to the extent that it includes a convenient and prominently visible + feature that (1) displays an appropriate copyright notice, and (2) + tells the user that there is no warranty for the work (except to the + extent that warranties are provided), that licensees may convey the + work under this License, and how to view a copy of this License. If + the interface presents a list of user commands or options, such as a + menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work + for making modifications to it. "Object code" means any non-source + form of a work. + + A "Standard Interface" means an interface that either is an official + standard defined by a recognized standards body, or, in the case of + interfaces specified for a particular programming language, one that + is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other + than the work as a whole, that (a) is included in the normal form of + packaging a Major Component, but which is not part of that Major + Component, and (b) serves only to enable use of the work with that + Major Component, or to implement a Standard Interface for which an + implementation is available to the public in source code form. A + "Major Component", in this context, means a major essential component + (kernel, window system, and so on) of the specific operating system + (if any) on which the executable work runs, or a compiler used to + produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all + the source code needed to generate, install, and (for an executable + work) run the object code and to modify the work, including scripts to + control those activities. However, it does not include the work's + System Libraries, or general-purpose tools or generally available free + programs which are used unmodified in performing those activities but + which are not part of the work. For example, Corresponding Source + includes interface definition files associated with source files for + the work, and the source code for shared libraries and dynamically + linked subprograms that the work is specifically designed to require, + such as by intimate data communication or control flow between those + subprograms and other parts of the work. + + The Corresponding Source need not include anything that users + can regenerate automatically from other parts of the Corresponding + Source. + + The Corresponding Source for a work in source code form is that + same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of + copyright on the Program, and are irrevocable provided the stated + conditions are met. This License explicitly affirms your unlimited + permission to run the unmodified Program. The output from running a + covered work is covered by this License only if the output, given its + content, constitutes a covered work. This License acknowledges your + rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not + convey, without conditions so long as your license otherwise remains + in force. You may convey covered works to others for the sole purpose + of having them make modifications exclusively for you, or provide you + with facilities for running those works, provided that you comply with + the terms of this License in conveying all material for which you do + not control copyright. Those thus making or running the covered works + for you must do so exclusively on your behalf, under your direction + and control, on terms that prohibit them from making any copies of + your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under + the conditions stated below. Sublicensing is not allowed; section 10 + makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological + measure under any applicable law fulfilling obligations under article + 11 of the WIPO copyright treaty adopted on 20 December 1996, or + similar laws prohibiting or restricting circumvention of such + measures. + + When you convey a covered work, you waive any legal power to forbid + circumvention of technological measures to the extent such circumvention + is effected by exercising rights under this License with respect to + the covered work, and you disclaim any intention to limit operation or + modification of the work as a means of enforcing, against the work's + users, your or third parties' legal rights to forbid circumvention of + technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you + receive it, in any medium, provided that you conspicuously and + appropriately publish on each copy an appropriate copyright notice; + keep intact all notices stating that this License and any + non-permissive terms added in accord with section 7 apply to the code; + keep intact all notices of the absence of any warranty; and give all + recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, + and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to + produce it from the Program, in the form of source code under the + terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent + works, which are not by their nature extensions of the covered work, + and which are not combined with it such as to form a larger program, + in or on a volume of a storage or distribution medium, is called an + "aggregate" if the compilation and its resulting copyright are not + used to limit the access or legal rights of the compilation's users + beyond what the individual works permit. Inclusion of a covered work + in an aggregate does not cause this License to apply to the other + parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms + of sections 4 and 5, provided that you also convey the + machine-readable Corresponding Source under the terms of this License, + in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded + from the Corresponding Source as a System Library, need not be + included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any + tangible personal property which is normally used for personal, family, + or household purposes, or (2) anything designed or sold for incorporation + into a dwelling. In determining whether a product is a consumer product, + doubtful cases shall be resolved in favor of coverage. For a particular + product received by a particular user, "normally used" refers to a + typical or common use of that class of product, regardless of the status + of the particular user or of the way in which the particular user + actually uses, or expects or is expected to use, the product. A product + is a consumer product regardless of whether the product has substantial + commercial, industrial or non-consumer uses, unless such uses represent + the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, + procedures, authorization keys, or other information required to install + and execute modified versions of a covered work in that User Product from + a modified version of its Corresponding Source. The information must + suffice to ensure that the continued functioning of the modified object + code is in no case prevented or interfered with solely because + modification has been made. + + If you convey an object code work under this section in, or with, or + specifically for use in, a User Product, and the conveying occurs as + part of a transaction in which the right of possession and use of the + User Product is transferred to the recipient in perpetuity or for a + fixed term (regardless of how the transaction is characterized), the + Corresponding Source conveyed under this section must be accompanied + by the Installation Information. But this requirement does not apply + if neither you nor any third party retains the ability to install + modified object code on the User Product (for example, the work has + been installed in ROM). + + The requirement to provide Installation Information does not include a + requirement to continue to provide support service, warranty, or updates + for a work that has been modified or installed by the recipient, or for + the User Product in which it has been modified or installed. Access to a + network may be denied when the modification itself materially and + adversely affects the operation of the network or violates the rules and + protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, + in accord with this section must be in a format that is publicly + documented (and with an implementation available to the public in + source code form), and must require no special password or key for + unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this + License by making exceptions from one or more of its conditions. + Additional permissions that are applicable to the entire Program shall + be treated as though they were included in this License, to the extent + that they are valid under applicable law. If additional permissions + apply only to part of the Program, that part may be used separately + under those permissions, but the entire Program remains governed by + this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option + remove any additional permissions from that copy, or from any part of + it. (Additional permissions may be written to require their own + removal in certain cases when you modify the work.) You may place + additional permissions on material, added by you to a covered work, + for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you + add to a covered work, you may (if authorized by the copyright holders of + that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further + restrictions" within the meaning of section 10. If the Program as you + received it, or any part of it, contains a notice stating that it is + governed by this License along with a term that is a further + restriction, you may remove that term. If a license document contains + a further restriction but permits relicensing or conveying under this + License, you may add to a covered work material governed by the terms + of that license document, provided that the further restriction does + not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you + must place, in the relevant source files, a statement of the + additional terms that apply to those files, or a notice indicating + where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the + form of a separately written license, or stated as exceptions; + the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly + provided under this License. Any attempt otherwise to propagate or + modify it is void, and will automatically terminate your rights under + this License (including any patent licenses granted under the third + paragraph of section 11). + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the copyright + holder fails to notify you of the violation by some reasonable means + prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from that + copyright holder, and you cure the violation prior to 30 days after + your receipt of the notice. + + Termination of your rights under this section does not terminate the + licenses of parties who have received copies or rights from you under + this License. If your rights have been terminated and not permanently + reinstated, you do not qualify to receive new licenses for the same + material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or + run a copy of the Program. Ancillary propagation of a covered work + occurring solely as a consequence of using peer-to-peer transmission + to receive a copy likewise does not require acceptance. However, + nothing other than this License grants you permission to propagate or + modify any covered work. These actions infringe copyright if you do + not accept this License. Therefore, by modifying or propagating a + covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically + receives a license from the original licensors, to run, modify and + propagate that work, subject to this License. You are not responsible + for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an + organization, or substantially all assets of one, or subdividing an + organization, or merging organizations. If propagation of a covered + work results from an entity transaction, each party to that + transaction who receives a copy of the work also receives whatever + licenses to the work the party's predecessor in interest had or could + give under the previous paragraph, plus a right to possession of the + Corresponding Source of the work from the predecessor in interest, if + the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the + rights granted or affirmed under this License. For example, you may + not impose a license fee, royalty, or other charge for exercise of + rights granted under this License, and you may not initiate litigation + (including a cross-claim or counterclaim in a lawsuit) alleging that + any patent claim is infringed by making, using, selling, offering for + sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this + License of the Program or a work on which the Program is based. The + work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims + owned or controlled by the contributor, whether already acquired or + hereafter acquired, that would be infringed by some manner, permitted + by this License, of making, using, or selling its contributor version, + but do not include claims that would be infringed only as a + consequence of further modification of the contributor version. For + purposes of this definition, "control" includes the right to grant + patent sublicenses in a manner consistent with the requirements of + this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free + patent license under the contributor's essential patent claims, to + make, use, sell, offer for sale, import and otherwise run, modify and + propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express + agreement or commitment, however denominated, not to enforce a patent + (such as an express permission to practice a patent or covenant not to + sue for patent infringement). To "grant" such a patent license to a + party means to make such an agreement or commitment not to enforce a + patent against the party. + + If you convey a covered work, knowingly relying on a patent license, + and the Corresponding Source of the work is not available for anyone + to copy, free of charge and under the terms of this License, through a + publicly available network server or other readily accessible means, + then you must either (1) cause the Corresponding Source to be so + available, or (2) arrange to deprive yourself of the benefit of the + patent license for this particular work, or (3) arrange, in a manner + consistent with the requirements of this License, to extend the patent + license to downstream recipients. "Knowingly relying" means you have + actual knowledge that, but for the patent license, your conveying the + covered work in a country, or your recipient's use of the covered work + in a country, would infringe one or more identifiable patents in that + country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or + arrangement, you convey, or propagate by procuring conveyance of, a + covered work, and grant a patent license to some of the parties + receiving the covered work authorizing them to use, propagate, modify + or convey a specific copy of the covered work, then the patent license + you grant is automatically extended to all recipients of the covered + work and works based on it. + + A patent license is "discriminatory" if it does not include within + the scope of its coverage, prohibits the exercise of, or is + conditioned on the non-exercise of one or more of the rights that are + specifically granted under this License. You may not convey a covered + work if you are a party to an arrangement with a third party that is + in the business of distributing software, under which you make payment + to the third party based on the extent of your activity of conveying + the work, and under which the third party grants, to any of the + parties who would receive the covered work from you, a discriminatory + patent license (a) in connection with copies of the covered work + conveyed by you (or copies made from those copies), or (b) primarily + for and in connection with specific products or compilations that + contain the covered work, unless you entered into that arrangement, + or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot convey a + covered work so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you may + not convey it at all. For example, if you agree to terms that obligate you + to collect a royalty for further conveying from those to whom you convey + the Program, the only way you could satisfy both those terms and this + License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU Affero General Public License into a single + combined work, and to convey the resulting work. The terms of this + License will continue to apply to the part which is the covered work, + but the special requirements of the GNU Affero General Public License, + section 13, concerning interaction through a network will apply to the + combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of + the GNU General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU General + Public License "or any later version" applies to it, you have the + option of following the terms and conditions either of that numbered + version or of any later version published by the Free Software + Foundation. If the Program does not specify a version number of the + GNU General Public License, you may choose any version ever published + by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU General Public License can be used, that proxy's + public statement of acceptance of a version permanently authorizes you + to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT + HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY + OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM + IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF + ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS + THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY + GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE + USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF + DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), + EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely approximates + an absolute waiver of all civil liability in connection with the + Program, unless a warranty or assumption of liability accompanies a + copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + state the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short + notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, your program's commands + might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, + if any, to sign a "copyright disclaimer" for the program, if necessary. + For more information on this, and how to apply and follow the GNU GPL, see + . + + The GNU General Public License does not permit incorporating your program + into proprietary programs. If your program is a subroutine library, you + may consider it more useful to permit linking proprietary applications with + the library. If this is what you want to do, use the GNU Lesser General + Public License instead of this License. But first, please read + . + + Name: libquadmath + Files: numpy/.dylibs/libquadmath*.so + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath + License: LGPL-2.1-or-later + + GCC Quad-Precision Math Library + Copyright (C) 2010-2019 Free Software Foundation, Inc. + Written by Francois-Xavier Coudert + + This file is part of the libquadmath library. + Libquadmath is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + Libquadmath is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Typing :: Typed +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: Homepage, https://numpy.org +Project-URL: Documentation, https://numpy.org/doc/ +Project-URL: Source, https://github.com/numpy/numpy +Project-URL: Download, https://pypi.org/project/numpy/#files +Project-URL: Tracker, https://github.com/numpy/numpy/issues +Project-URL: Release notes, https://numpy.org/doc/stable/release +Requires-Python: >=3.9 +Description-Content-Type: text/markdown + +

+ +


+ + +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)]( +https://numfocus.org) +[![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)]( +https://pypi.org/project/numpy/) +[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)]( +https://anaconda.org/conda-forge/numpy) +[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)]( +https://stackoverflow.com/questions/tagged/numpy) +[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue)]( +https://doi.org/10.1038/s41586-020-2649-2) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://api.securityscorecards.dev/projects/github.com/numpy/numpy) + + +NumPy is the fundamental package for scientific computing with Python. + +- **Website:** https://www.numpy.org +- **Documentation:** https://numpy.org/doc +- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion +- **Source code:** https://github.com/numpy/numpy +- **Contributing:** https://www.numpy.org/devdocs/dev/index.html +- **Bug reports:** https://github.com/numpy/numpy/issues +- **Report a security vulnerability:** https://tidelift.com/docs/security + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities + +Testing: + +NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with: + + python -c "import numpy, sys; sys.exit(numpy.test() is False)" + +Code of Conduct +---------------------- + +NumPy is a community-driven open source project developed by a diverse group of +[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong +commitment to creating an open, inclusive, and positive community. Please read the +[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact +with others in a way that makes our community thrive. + +Call for Contributions +---------------------- + +The NumPy project welcomes your expertise and enthusiasm! + +Small improvements or fixes are always appreciated. If you are considering larger contributions +to the source code, please contact us through the [mailing +list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. + +Writing code isn’t the only way to contribute to NumPy. You can also: +- review pull requests +- help us stay on top of new and old issues +- develop tutorials, presentations, and other educational materials +- maintain and improve [our website](https://github.com/numpy/numpy.org) +- develop graphic design for our brand assets and promotional materials +- translate website content +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/). +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by opening a new issue or leaving a +comment on a relevant issue that is already open. + +Our preferred channels of communication are all public, but if you’d like to +speak to us in private first, contact our community coordinators at +numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for +an invitation). + +We also have a biweekly community call, details of which are announced on the +mailing list. You are very welcome to join. + +If you are new to contributing to open source, [this +guide](https://opensource.guide/how-to-contribute/) helps explain why, what, +and how to successfully get involved. diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/RECORD b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/RECORD new file mode 100644 index 00000000..4f9a7802 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/RECORD @@ -0,0 +1,1407 @@ +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/__config__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_core/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype_ctypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_internal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_multiarray_umath.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_core/multiarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_core/umath.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_distributor_init.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_globals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_pytesttester.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_add_docstring.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_array_like.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_char_codes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_dtype_like.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_extended_precision.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nbit.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_scalars.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_shape.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_convertions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_inspect.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_pep440.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_array_object.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_constants.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_creation_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_indexing_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_searching_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_set_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_typing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_utility_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/linalg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_indexing_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_manipulation_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/compat/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/compat/py3k.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/compat/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/compat/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/compat/tests/test_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_asarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_exceptions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_internal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_machar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_methods.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_string_helpers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_type_aliases.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/_ufunc_config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/arrayprint.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/cversions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/defchararray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/einsumfunc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/fromnumeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/function_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/getlimits.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/memmap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/multiarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/numerictypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/overrides.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/records.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/shape_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/_locales.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_abc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_argparse.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cython.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_einsum.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_errstate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_extint128.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_function_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_getlimits.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_half.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_machar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_memmap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_nditer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_nep50_promotions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numpy_2_0_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_overrides.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_print.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_protocols.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_records.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_strings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_unicode.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/umath.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/core/umath_tests.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ctypeslib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/_shell_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/armccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/autodist.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_clib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_ext.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_py.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_src.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/develop.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/egg_info.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_clib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_data.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_headers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/sdist.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/conv_template.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/core.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/cpuinfo.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/exec_command.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/extension.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/from_template.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fujitsuccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/intelccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/lib2def.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/line_endings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/log.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/misc_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/pathccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/system_info.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_log.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/unixccompiler.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/doc/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/doc/constants.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/doc/ufuncs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/exceptions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__main__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__version__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_backend.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_distutils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_meson.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_isocbind.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_src_pyf.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/auxfuncs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/capi_maps.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cb_rules.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cfuncs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/common_rules.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/crackfortran.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/diagnose.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f2py2e.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/func2subr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/rules.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/symbolic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_character.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_data.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_isoc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_pyf_src.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_size.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/use_rules.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/fft/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/fft/helper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_helper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_datasource.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_iotools.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_version.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraypad.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraysetops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arrayterator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/format.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/function_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/histograms.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/index_tricks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/mixins.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/nanfunctions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/npyio.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/polynomial.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/recfunctions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/scimath.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/shape_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/stride_tricks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__version.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_format.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_function_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_io.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_shape_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/twodim_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/type_check.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/ufunclike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/user_array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/lib/utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/linalg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/core.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/extras.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/mrecords.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_core.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_extras.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_old_ma.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/testutils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/ma/timer_comparison.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matlib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/_polybase.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/chebyshev.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite_e.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/laguerre.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/legendre.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polynomial.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polyutils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/data/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_direct.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_extending.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_random.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_regression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_smoke.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/extbuild.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/overrides.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/testing/tests/test_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test__all__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_lazyloading.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_matlib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_version.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_public_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_reloading.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_scripts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_warnings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/mypy_plugin.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/setup.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_like.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayprint.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayterator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/bitwise_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/comparisons.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/einsumfunc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/flatiter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/fromnumeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/index_tricks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_version.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/literal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/mod.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/modules.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/multiarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_misc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numerictypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/random.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple_py3.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunc_config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunclike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_typing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/numpy/version.cpython-39.pyc,, +../../../bin/f2py,sha256=YdgEvHTsJcE_Ja4FZjsSRxVRR9s0WlxYaGZGNLIQ-aw,251 +numpy-1.26.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-1.26.3.dist-info/LICENSE.txt,sha256=P5zDxjzN-G0aCMIwDe3G-iIAg5UI0UGAuwfQlrDCzf8,47882 +numpy-1.26.3.dist-info/METADATA,sha256=L5XkZiT0bROGFGGgkejtFpaV_G51wVvD6hepLdyeAwU,61234 +numpy-1.26.3.dist-info/RECORD,, +numpy-1.26.3.dist-info/WHEEL,sha256=rFEbl7VGpdcTiWMLyPHKRieKvxwiToMCxVXFuj50F_A,91 +numpy-1.26.3.dist-info/entry_points.txt,sha256=zddyYJuUw9Uud7LeLfynXk62_ry0lGihDwCIgugBdZM,144 +numpy/.dylibs/libgcc_s.1.1.dylib,sha256=uu4uYRjdVMCFL4gdcYUW1wiE3oS_qghd5djY-QuqOwY,158976 +numpy/.dylibs/libgfortran.5.dylib,sha256=XrAvpVBk_4rlN9vbGwrgshdchFrbugib1EYWwG7DIes,3700784 +numpy/.dylibs/libopenblas64_.0.dylib,sha256=3eK3NdAcqlMYhRFeqFO1pBcrk1FnuVoayyoQJD4Nl-c,23198400 +numpy/.dylibs/libquadmath.0.dylib,sha256=GzQa4nbErcswwHC3dOxuaOgZyIPol_DeiUHVU_eYmg0,371440 +numpy/__config__.py,sha256=Pi9b8OuywfFX21DZFZ0vpeNSAEilONpnn14QNlTqVgs,4721 +numpy/__init__.cython-30.pxd,sha256=yk2a3etxRNlBgj5uLfIho2RYDYDzhRW8oagAG-wzbPI,36690 +numpy/__init__.pxd,sha256=Pa0VYRSeQRSFepQ6ROgZrNtGY5TzBXIddWsMHtK0OkM,35066 +numpy/__init__.py,sha256=Is0VNfoU10729FfMoUn_3ICHX0YL4xO4-JUnP3i8QC4,17005 +numpy/__init__.pyi,sha256=9kK465XL9oS_X3fJLv0Na29NEYnWvtdMhXPtrnF_cG8,154080 +numpy/_core/__init__.py,sha256=C8_7wbHqUkB35JouY_XKsas1KLpRZ7JHWuZ7VGOPVpU,136 +numpy/_core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_core/_dtype.py,sha256=vE16-yiwUSYsAIbq7FlEY1GbXZAp8wjADDxJg3eBX-U,126 +numpy/_core/_dtype_ctypes.py,sha256=i5EhoWPUhu4kla3Xu4ZvXF1lVLPiI6Zg4h6o8jaiamo,147 +numpy/_core/_internal.py,sha256=g5ugmqDgUhSlie5-onOctcm4p0gcMHSIRLHVYtFTk1M,135 +numpy/_core/_multiarray_umath.py,sha256=VPtoT2uHnyU3rKL0G27CgmNmB1WRHM0mtc7Y9L85C3U,159 +numpy/_core/multiarray.py,sha256=kZxC_7P3Jwz1RApzQU2QGmqSq4MAEvKmaJEYnAsbSOs,138 +numpy/_core/umath.py,sha256=YcV0cdbGcem6D5P3yX7cR9HGYBrT8VMoAgCBzGwPhgg,123 +numpy/_distributor_init.py,sha256=IKy2THwmu5UgBjtVbwbD9H-Ap8uaUJoPJ2btQ4Jatdo,407 +numpy/_globals.py,sha256=neEdcfLZoHLwber_1Xyrn26LcXy0MrSta03Ze7aKa6g,3094 +numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/hook-numpy.py,sha256=PUQ-mNWje6bFALB-mLVFRPkvbM4JpLXunB6sjBbTy5g,1409 +numpy/_pyinstaller/pyinstaller-smoke.py,sha256=6iL-eHMQaG3rxnS5EgcvrCqElm9aKL07Cjr1FZJSXls,1143 +numpy/_pyinstaller/test_pyinstaller.py,sha256=8K-7QxmfoXCG0NwR0bhIgCNrDjGlrTzWnrR1sR8btgU,1135 +numpy/_pytesttester.py,sha256=lQUTvKVz6kT8b4yiMV-uW-vG9KSv9UzqAmxaEMezTd8,6731 +numpy/_pytesttester.pyi,sha256=OtyXSiuSy8o_78w3QNQRjMLpvvNyEdC0aMsx6T-vRxU,489 +numpy/_typing/__init__.py,sha256=6w9E9V9VaT7vTM-veua8XcySv50Je5qSPJzK9HTocIg,7003 +numpy/_typing/_add_docstring.py,sha256=xQhQX372aN_m3XN95CneMxOST2FdPcovR-MXM-9ep58,3922 +numpy/_typing/_array_like.py,sha256=L4gnx2KWG8yYcouz5b9boJIkkFNtOJV6QjcnGCrbnRY,4298 +numpy/_typing/_callable.pyi,sha256=Mf57BwohRn9ye6ixJqjNEnK0gKqnVPE9Gy8vK-6_zxo,11121 +numpy/_typing/_char_codes.py,sha256=LR51O5AUBDbCmJvlMoxyUvsfvb1p7WHrexgtTGtuWTc,5916 +numpy/_typing/_dtype_like.py,sha256=21Uxy0UgIawGM82xjDF_ifMq-nP-Bkhn_LpiK_HvWC4,5661 +numpy/_typing/_extended_precision.py,sha256=dGios-1k-QBGew7YFzONZTzVWxz-aYAaqlccl2_h5Bo,777 +numpy/_typing/_nbit.py,sha256=-EQOShHpB3r30b4RVEcruQRTcTaFAZwtqCJ4BsvpEzA,345 +numpy/_typing/_nested_sequence.py,sha256=5eNaVZAV9tZQLFWHYOuVs336JjoiaWxyZQ7cMKb6m1I,2566 +numpy/_typing/_scalars.py,sha256=eVP8PjlcTIlY7v0fRI3tFXPogWtpLJZ8nFvRRrLjDqs,980 +numpy/_typing/_shape.py,sha256=JPy7jJMkISGFTnkgiEifYM-4xTcjb7JMRkLIIjZLw08,211 +numpy/_typing/_ufunc.pyi,sha256=e74LtOP9e8kkRhvrIJ_RXz9Ua_L43Pd9IixwNwermnM,12638 +numpy/_typing/setup.py,sha256=SE0Q6HPqDjWUfceA4yXgkII8y3z7EiSF0Z-MNwOIyG4,337 +numpy/_utils/__init__.py,sha256=Hhetwsi3eTBe8HdWbG51zXmcrX1DiPLxkYSrslMLYcc,723 +numpy/_utils/_convertions.py,sha256=0xMxdeLOziDmHsRM_8luEh4S-kQdMoMg6GxNDDas69k,329 +numpy/_utils/_inspect.py,sha256=8Ma7QBRwfSWKeK1ShJpFNc7CDhE6fkIE_wr1FxrG1A8,7447 +numpy/_utils/_pep440.py,sha256=Vr7B3QsijR5p6h8YAz2LjNGUyzHUJ5gZ4v26NpZAKDc,14069 +numpy/array_api/__init__.py,sha256=SyA8oIVxaUKLW_HWayZuLKj57Ptu8e-ASK1dTPi3QcY,10364 +numpy/array_api/_array_object.py,sha256=rfCBzE6vUjk4HElQGTVwe6Tw2vxiUx7tmBpQEmm1iBk,43794 +numpy/array_api/_constants.py,sha256=AYayN2jf1Dp5rXZ7WPBdUhtPBo_JMCi-pD9oW5zmFkI,87 +numpy/array_api/_creation_functions.py,sha256=6SqHdzZqHOJFEyWFtqnj6KIKRivrGXxROlgnez_3Mt0,10050 +numpy/array_api/_data_type_functions.py,sha256=P57FOsNdXahNUriVtdldonbvBQrrZkVzxZbcqkR_8AA,6288 +numpy/array_api/_dtypes.py,sha256=kDU1NLvEQN-W2HPmJ2wGPx8jiNkFbrvTCD1T1RT8Pwo,4823 +numpy/array_api/_elementwise_functions.py,sha256=0kGuDX3Ur_Qp6tBMBWTO7LPUxzXNGAlA2SSJhdAp4DU,25992 +numpy/array_api/_indexing_functions.py,sha256=d-gzqzyvR45FQerRYJrbBzCWFnDsZWSI9pggA5QWRO4,715 +numpy/array_api/_manipulation_functions.py,sha256=qCoW5B5FXcFOWKPU9D9MXHdMeXIuzvnHUUvprNlwfjc,3317 +numpy/array_api/_searching_functions.py,sha256=mGZiqheYXGWiDK9rqXFiDKX0_B0mJ1OjdA-9FC2o5lA,1715 +numpy/array_api/_set_functions.py,sha256=ULpfK1zznW9joX1DXSiP0R3ahcDB_po7mZlpsRqi7Fs,2948 +numpy/array_api/_sorting_functions.py,sha256=7pszlxNN7-DNqEZlonGLFQrlXPP7evVA8jN31NShg00,2031 +numpy/array_api/_statistical_functions.py,sha256=HspfYteZWSa3InMs10KZz-sk3ZuW6teX6fNdo829T84,3584 +numpy/array_api/_typing.py,sha256=uKidRp6nYxgHnEPaqXXZsDDZ6tw1LshpbwLvy-09eeM,1347 +numpy/array_api/_utility_functions.py,sha256=HwycylbPAgRVz4nZvjvwqN3mQnJbqKA-NRMaAvIP-CE,824 +numpy/array_api/linalg.py,sha256=i4L6wFFcLFQkJiuihsKxjns1HoqnGNnfS3R56cr5lng,18285 +numpy/array_api/setup.py,sha256=Wx6qD7GU_APiqKolYPO0OHv4eHGYrjPZmDAgjWhOEhM,341 +numpy/array_api/tests/__init__.py,sha256=t_2GZ3lKcsu4ec4GMKPUDYaeMUJyDquBlQAcPgj7kFE,282 +numpy/array_api/tests/test_array_object.py,sha256=FQoAxP4CLDiv6iih8KKUDSLuYM6dtnDcB1f0pMHw4-M,17035 +numpy/array_api/tests/test_creation_functions.py,sha256=s3A1COWmXIAJdhzd8v7VtL-jbiSspskTqwYy0BTpmpw,5023 +numpy/array_api/tests/test_data_type_functions.py,sha256=qc8ktRlVXWC3PKhxPVWI_UF9f1zZtpmzHjdCtf3e16E,1018 +numpy/array_api/tests/test_elementwise_functions.py,sha256=CTj4LLwtusI51HkpzD0JPohP1ffNxogAVFz8WLuWFzM,3800 +numpy/array_api/tests/test_indexing_functions.py,sha256=AbuBGyEufEAf24b7fy8JQhdJtGPdP9XEIxPTJAfAFFo,627 +numpy/array_api/tests/test_manipulation_functions.py,sha256=wce25dSJjubrGhFxmiatzR_IpmNYp9ICJ9PZBBnZTOQ,1087 +numpy/array_api/tests/test_set_functions.py,sha256=D016G7v3ko49bND5sVERP8IqQXZiwr-2yrKbBPJ-oqg,546 +numpy/array_api/tests/test_sorting_functions.py,sha256=INPiYnuGBcsmWtYqdTTX3ENHmM4iUx4zs9KdwDaSmdA,602 +numpy/array_api/tests/test_validation.py,sha256=QUG9yWC3QhkPxNhbQeakwBbl-0Rr0iTuZ41_0sfVIGU,676 +numpy/compat/__init__.py,sha256=iAHrmsZWzouOMSyD9bdSE0APWMlRpqW92MQgF8y6x3E,448 +numpy/compat/py3k.py,sha256=Je74CVk_7qI_qX7pLbYcuQJsxlMq1poGIfRIrH99kZQ,3833 +numpy/compat/setup.py,sha256=36X1kF0C_NVROXfJ7w3SQeBm5AIDBuJbM5qT7cvSDgU,335 +numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/compat/tests/test_compat.py,sha256=YqV67pSN8nXPbXaEdjhmyaoVetNyFupVv57OMEgCwKA,579 +numpy/conftest.py,sha256=HZyWo_wJyrbgnyXxI8t05WOg_IrzNAMnEV7O8koHous,4623 +numpy/core/__init__.py,sha256=CNsO-Ab4ywM2Wz3AbqWOH3ig1q5Bno9PsUMrCv-HNS4,5780 +numpy/core/__init__.pyi,sha256=xtd9OFYza-ZG3jyEJrlzRPT-SkVoB_qYmVCe6FxRks0,126 +numpy/core/_add_newdocs.py,sha256=39JFaeDPN2OQlSwfpY6_Jq9fO5vML8ZMF8J4ZTx_nrs,208972 +numpy/core/_add_newdocs_scalars.py,sha256=PF9v8POcSNH6ELYltkx9e07DWgMmft6NJy9zER3Jk44,12106 +numpy/core/_asarray.py,sha256=P2ddlZAsg1iGleRRfoQv_aKs2N7AGwpo5K4ZQv4Ujlk,3884 +numpy/core/_asarray.pyi,sha256=gNNxUVhToNU_F1QpgeEvUYddpUFN-AKP0QWa4gqcTGw,1086 +numpy/core/_dtype.py,sha256=SihUz41pHRB3Q2LiYYkug6LgMBKh6VV89MOpLxnXQdo,10606 +numpy/core/_dtype_ctypes.py,sha256=Vug4i7xKhznK2tdIjmn4ebclClpaCJwSZUlvEoYl0Eg,3673 +numpy/core/_exceptions.py,sha256=dZWKqfdLRvJvbAEG_fof_8ikEKxjakADMty1kLC_l_M,5379 +numpy/core/_internal.py,sha256=f9kNDuT-FGxF1EtVOVIxXWnH9gM9n-J5V2zwHMv4HEk,28348 +numpy/core/_internal.pyi,sha256=_mCTOX6Su8D4R9fV4HNeohPJx7515B-WOlv4uq6mry8,1032 +numpy/core/_machar.py,sha256=G3a3TXu8VDW_1EMxKKLnGMbvUShEIUEve3ealBlJJ3E,11565 +numpy/core/_methods.py,sha256=m31p0WjcFUGckbJiHnCpSaIQGqv-Lq5niIYkdd33YMo,8613 +numpy/core/_multiarray_tests.cpython-39-darwin.so,sha256=n9zsvxMl2JtwCrG7xGlJWNeU_Ac6vOZmtMoamSbC1gk,122467 +numpy/core/_multiarray_umath.cpython-39-darwin.so,sha256=wC1AedZfi6KMhpCkhwLfKtW8-lIRm80Gurq2lGQfueQ,3164512 +numpy/core/_operand_flag_tests.cpython-39-darwin.so,sha256=hMXM5j060_BOKrfAYlxjW5mQux_PlQLGaMGj9pcg_-I,50981 +numpy/core/_rational_tests.cpython-39-darwin.so,sha256=b9ESj_KV6h2TSpXhhpwS7njYsUjS1UY_DFPt2nFi5Yc,72673 +numpy/core/_simd.cpython-39-darwin.so,sha256=TN4JtkaCK0mHTRvh_aN4v-zHOrXbiezf2_Ha1kV81IY,339719 +numpy/core/_string_helpers.py,sha256=-fQM8z5s8_yX440PmgNEH3SUjEoXMPpPSysZwWZNbuo,2852 +numpy/core/_struct_ufunc_tests.cpython-39-darwin.so,sha256=30V0KBDOGn4H9gIRZXtLqvUiLqECl2PizXeLDc6Dbho,51269 +numpy/core/_type_aliases.py,sha256=qV6AZlsUWHMWTydmZya73xuBkKXiUKq_WXLj7q2CbZ0,7534 +numpy/core/_type_aliases.pyi,sha256=lguMSqMwvqAFHuRtm8YZSdKbikVz985BdKo_lo7GQCg,404 +numpy/core/_ufunc_config.py,sha256=-Twpe8dnd45ccXH-w-B9nvU8yCOd1E0e3Wpsts3g_bQ,13944 +numpy/core/_ufunc_config.pyi,sha256=-615enOVQMBhVx7Pln7DY_s4H6JjSgSnBy89YkpvuLg,1066 +numpy/core/_umath_tests.cpython-39-darwin.so,sha256=eirsVb6VE3CGeWwvLUYibbmrsX5DrPlaHAntFMwgVok,70846 +numpy/core/arrayprint.py,sha256=ySZj4TZFFVCa5yhMmJKFYQYhuQTabZTRBb1YoiCD-ac,63608 +numpy/core/arrayprint.pyi,sha256=21pOWjTSfJOBaKgOOPzRox1ERb3c9ydufqL0b11_P_Q,4428 +numpy/core/cversions.py,sha256=H_iNIpx9-hY1cQNxqjT2d_5SXZhJbMo_caq4_q6LB7I,347 +numpy/core/defchararray.py,sha256=G1LExk-dMeVTYRhtYgcCZEsHk5tkawk7giXcK4Q5KVM,73617 +numpy/core/defchararray.pyi,sha256=ib3aWFcM7F4KooU57mWUNi4GlosNjdfgrLKBVSIKDvU,9216 +numpy/core/einsumfunc.py,sha256=TrL6t79F0H0AQH0y5Cj7Tq0_pzk4fVFi-4q4jJmujYQ,51868 +numpy/core/einsumfunc.pyi,sha256=IJZNdHHG_soig8XvCbXZl43gMr3MMKl9dckTYWecqLs,4860 +numpy/core/fromnumeric.py,sha256=YMtxOBg51VMem39AHXFs-4_vOb1p48ei7njXdYTRJ_Q,128821 +numpy/core/fromnumeric.pyi,sha256=KATMFeFxUJ8YNRaC-jd_dTOt3opz2ng6lHgke5u5COk,23726 +numpy/core/function_base.py,sha256=tHg1qSHTz1eO_wHXNFRt3Q40uqVtPT2eyQdrWbIi4wQ,19836 +numpy/core/function_base.pyi,sha256=3ZYad3cdaGwNEyP8VwK97IYMqk2PDoVjpjQzhIYHjk0,4725 +numpy/core/getlimits.py,sha256=AopcTZDCUXMPcEKIZE1botc3mEhmLb2p1_ejlq1CLqY,25865 +numpy/core/getlimits.pyi,sha256=qeIXUEtognTHr_T-tv-VcZI7n8Z2VzAyIpIgKXzsLkc,82 +numpy/core/include/numpy/__multiarray_api.c,sha256=nPRzTez_Wy3YXy3zZNJNPMspAzxbLOdohqhXwouwMLM,12116 +numpy/core/include/numpy/__multiarray_api.h,sha256=ZM--FKMhIaSQS39cPW0hj5dx8ngNMmbcy6SbgXZBd8U,61450 +numpy/core/include/numpy/__ufunc_api.c,sha256=670Gcz-vhkF4taBDmktCpFRBrZ9CHJnPRx7ag7Z6HsI,1714 +numpy/core/include/numpy/__ufunc_api.h,sha256=0MBOl7dgO3ldqdDi-SdciEOuqGv1UNsmk7mp7tEy4AY,12456 +numpy/core/include/numpy/_dtype_api.h,sha256=4veCexGvx9KNWMIUuEUAVOfcsei9GqugohDY5ud16pA,16697 +numpy/core/include/numpy/_neighborhood_iterator_imp.h,sha256=s-Hw_l5WRwKtYvsiIghF0bg-mA_CgWnzFFOYVFJ-q4k,1857 +numpy/core/include/numpy/_numpyconfig.h,sha256=K9Vamy8Eb6oNHG0eCLlcj-MYDIfj-cCjurEe9rVaO_M,857 +numpy/core/include/numpy/arrayobject.h,sha256=-BlWQ7kfVbzCqzHn0qaeMe0_08AbwliuG98XWG57lT8,282 +numpy/core/include/numpy/arrayscalars.h,sha256=C3vDRndZTZRbppiDyV5jp8sV3dRKsrwBIZcNlh9gSTA,3944 +numpy/core/include/numpy/experimental_dtype_api.h,sha256=tlehD5r_pYhHbGzIrUea6vtOgf6IQ8Txblnhx7455h8,15532 +numpy/core/include/numpy/halffloat.h,sha256=TRZfXgipa-dFppX2uNgkrjrPli-1BfJtadWjAembJ4s,1959 +numpy/core/include/numpy/ndarrayobject.h,sha256=PhY4NjRZDoU5Zbc8MW0swPEm81hwgWZ63gAU93bLVVI,10183 +numpy/core/include/numpy/ndarraytypes.h,sha256=EjWXv-J8C5JET4AlIbJRdctycL7-dyJZcnoWgnlCPc8,68009 +numpy/core/include/numpy/noprefix.h,sha256=d83l1QpCCVqMV2k29NMkL3Ld1qNjiC6hzOPWZAivEjQ,6830 +numpy/core/include/numpy/npy_1_7_deprecated_api.h,sha256=y0MJ8Qw7Bkt4H_4VxIzHzpkw5JqAdj5ECgtn08fZFrI,4327 +numpy/core/include/numpy/npy_3kcompat.h,sha256=SvN9yRA3i02O4JFMXxZz0Uq_vJ5ZpvC-pC2sfF56A5I,15883 +numpy/core/include/numpy/npy_common.h,sha256=apWBsCJeP8P5T0exgzhFcGohbASsUF8vtFdS2jc1VfU,37746 +numpy/core/include/numpy/npy_cpu.h,sha256=pcVRtj-Y6120C5kWB1VAiAjZoxkTPDEg0gGm5IAt3jM,4629 +numpy/core/include/numpy/npy_endian.h,sha256=we7X9fPeWzNpo_YTh09MPGDwdE0Rw_WDM4c9y4nBj5I,2786 +numpy/core/include/numpy/npy_interrupt.h,sha256=DQZIxi6FycLXD8drdHn2SSmLoRhIpo6osvPv13vowUA,1948 +numpy/core/include/numpy/npy_math.h,sha256=SbKRoc7O3gVuDl7HOZjk424O049I0zn-7i9GwBwNmmk,18945 +numpy/core/include/numpy/npy_no_deprecated_api.h,sha256=0yZrJcQEJ6MCHJInQk5TP9_qZ4t7EfBuoLOJ34IlJd4,678 +numpy/core/include/numpy/npy_os.h,sha256=hlQsg_7-RkvS3s8OM8KXy99xxyJbCm-W1AYVcdnO1cw,1256 +numpy/core/include/numpy/numpyconfig.h,sha256=Nr59kE3cXmen6y0UymIBaU7F1BSIuPwgKZ4gdV5Q5JU,5308 +numpy/core/include/numpy/old_defines.h,sha256=xuYQDDlMywu0Zsqm57hkgGwLsOFx6IvxzN2eiNF-gJY,6405 +numpy/core/include/numpy/random/LICENSE.txt,sha256=-8U59H0M-DvGE3gID7hz1cFGMBJsrL_nVANcOSbapew,1018 +numpy/core/include/numpy/random/bitgen.h,sha256=49AwKOR552r-NkhuSOF1usb_URiMSRMvD22JF5pKIng,488 +numpy/core/include/numpy/random/distributions.h,sha256=W5tOyETd0m1W0GdaZ5dJP8fKlBtsTpG23V2Zlmrlqpg,9861 +numpy/core/include/numpy/random/libdivide.h,sha256=ew9MNhPQd1LsCZiWiFmj9IZ7yOnA3HKOXffDeR9X1jw,80138 +numpy/core/include/numpy/ufuncobject.h,sha256=Xmnny_ulZo9VwxkfkXF-1HCTKDavIp9PV_H7XWhi0Z8,12070 +numpy/core/include/numpy/utils.h,sha256=wMNomSH3Dfj0q78PrjLVtFtN-FPo7UJ4o0ifCUO-6Es,1185 +numpy/core/lib/libnpymath.a,sha256=qqQpcleBtYtPx3zOtmzCZfb27sK8NlwKoYbnCVn_8vM,34272 +numpy/core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147 +numpy/core/lib/npy-pkg-config/npymath.ini,sha256=kamUNrYKAmXqQa8BcNv7D5sLqHh6bnChM0_5rZCsTfY,360 +numpy/core/memmap.py,sha256=yWBJLeVClHsD8BYusnf9bdqypOMPrj3_zoO_lQ2zVMc,11771 +numpy/core/memmap.pyi,sha256=sxIQ7T5hPLG-RBNndAc8JPvrsKEX1amBSH2HGg48Obo,55 +numpy/core/multiarray.py,sha256=zXaWf_DSkFEWjUQqVRCGeevwsI6kjQ3x6_MUwA1Y8fk,56097 +numpy/core/multiarray.pyi,sha256=_0X4W90U5ZiKt2n-9OscK-pcQyV6oGK-8jwGy5k1qxA,24768 +numpy/core/numeric.py,sha256=DgajaCDXiiQR-zuW_rrx_QhApSsa5k5FONK3Uk9mfTs,77014 +numpy/core/numeric.pyi,sha256=oVQkI4ABayFl_ZzCiGH4DxfYASL-3aETi-3B93THnEQ,14315 +numpy/core/numerictypes.py,sha256=qIf9v1OpNjjVQzXnKpD-3V01y5Bj9huw5F-U5Wa4glc,18098 +numpy/core/numerictypes.pyi,sha256=dEqtq9MLrGaqqeAF1sdXBgnEwDWOzlK02A6MTg1PS5g,3267 +numpy/core/overrides.py,sha256=YUZFS8RCBvOJ27sH-jDRcyMjOCn9VigMyuQY4J21JBI,7093 +numpy/core/records.py,sha256=4mpIjUp2XtZxY5cD2S8mgfn8GCzQGGrrkqLBqAJwM-Q,37533 +numpy/core/records.pyi,sha256=uYwE6cAoGKgN6U4ryfGZx_3m-3sY006jytjWLrDRRy0,5692 +numpy/core/shape_base.py,sha256=RPMKxA7_FCAgg_CruExl0LehnczSTFaxA6hrcfrUzns,29743 +numpy/core/shape_base.pyi,sha256=Ilb4joJmbjkIZLzKww7NJeaxg2FP3AfFib3HtfOsrC0,2774 +numpy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/tests/_locales.py,sha256=S4x5soqF0oxpBYOE8J9Iky72O9J25IiZ8349m93pWC4,2206 +numpy/core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/core/tests/data/generate_umath_validation_data.cpp,sha256=fyhQPNhIX9hzjeXujn6mhi1MVc133zELSV_hlSQ7BQU,5842 +numpy/core/tests/data/numpy_2_0_array.pkl,sha256=Vh02tdyCypa8Nb4QzdVhnDAiXEO2WQrcwcvOdDDFF5w,718 +numpy/core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/core/tests/data/umath-validation-set-README.txt,sha256=pxWwOaGGahaRd-AlAidDfocLyrAiDp0whf5hC7hYwqM,967 +numpy/core/tests/data/umath-validation-set-arccos.csv,sha256=W_aL99bjzVjlVyd5omfDUORag8jHzx6uctedPVZgOHQ,61365 +numpy/core/tests/data/umath-validation-set-arccosh.csv,sha256=Uko_d0kDXr1YlN-6Ii-fQQxUvbXAhRfC7Un4gJ23GJk,61365 +numpy/core/tests/data/umath-validation-set-arcsin.csv,sha256=15Aenze4WD2a2dF2aOBXpv9B7u3wwAeUVJdEm4TjOkQ,61339 +numpy/core/tests/data/umath-validation-set-arcsinh.csv,sha256=uDwx4PStpfV21IaPF8pmzQpul6i72g7zDwlfcynWaVQ,60289 +numpy/core/tests/data/umath-validation-set-arctan.csv,sha256=mw5tYze_BMs6ugGEZfg5mcXoInGYdn7fvSCYSUi9Bqw,60305 +numpy/core/tests/data/umath-validation-set-arctanh.csv,sha256=95l4Uu5RmZajljabfqlv5U34RVrifCMhhkop6iLeNBo,61339 +numpy/core/tests/data/umath-validation-set-cbrt.csv,sha256=v855MTZih-fZp_GuEDst2qaIsxU4a7vlAbeIJy2xKpc,60846 +numpy/core/tests/data/umath-validation-set-cos.csv,sha256=0PNnDqKkokZ7ERVDgbes8KNZc-ISJrZUlVZc5LkW18E,59122 +numpy/core/tests/data/umath-validation-set-cosh.csv,sha256=FGCNeUSUTAeASsb_j18iRSsCxXLxmzF-_C7tq1elVrQ,60869 +numpy/core/tests/data/umath-validation-set-exp.csv,sha256=BKg1_cyrKD2GXYMX_EB0DnXua8DI2O1KWODXf_BRhrk,17491 +numpy/core/tests/data/umath-validation-set-exp2.csv,sha256=f1b05MRXPOXihC9M-yi52udKBzVXalhbTuIcqoDAk-g,58624 +numpy/core/tests/data/umath-validation-set-expm1.csv,sha256=_ghc1xiUECNsBGrKCFUAy2lvu01_lkpeYJN0zDtCYWk,60299 +numpy/core/tests/data/umath-validation-set-log.csv,sha256=z9ej1ykKUoMRqYMUIJENWXbYi_A_x_RKs7K_GuXZJus,11692 +numpy/core/tests/data/umath-validation-set-log10.csv,sha256=RJgpruL16FVPgUT3-3xW4eppS_tn6o5yEW79KnITn48,68922 +numpy/core/tests/data/umath-validation-set-log1p.csv,sha256=IZZI-hi55HGCOvBat3vSBVha_8Nt-5alf2fqz6QeTG0,60303 +numpy/core/tests/data/umath-validation-set-log2.csv,sha256=HL2rOCsrEi378rNrbsXHPqlWlEGkXQq8R4e63YeTksU,68917 +numpy/core/tests/data/umath-validation-set-sin.csv,sha256=8PUjnQ_YfmxFb42XJrvpvmkeSpEOlEXSmNvIK4VgfAM,58611 +numpy/core/tests/data/umath-validation-set-sinh.csv,sha256=CYiibE8aX7MQnBatl__5k_PWc_9vHUifwS-sFZzzKk0,60293 +numpy/core/tests/data/umath-validation-set-tan.csv,sha256=Oq7gxMvblRVBrQ23kMxc8iT0bHnCWKg9EE4ZqzbJbOA,60299 +numpy/core/tests/data/umath-validation-set-tanh.csv,sha256=iolZF_MOyWRgYSa-SsD4df5mnyFK18zrICI740SWoTc,60299 +numpy/core/tests/examples/cython/checks.pyx,sha256=rKAhPSGHJ9oPK9Q_85YoUQyRTftEP1jcYOR5lSPB6oQ,662 +numpy/core/tests/examples/cython/meson.build,sha256=Qk4Q6OkpZ0xsLUkcGQVVrYkzb0ozoyL6YlSZ8_5tH1I,1088 +numpy/core/tests/examples/cython/setup.py,sha256=aAR-TvQabUabnCzuB6UdWdmRXaaPfIG7MzTIfMF-0tk,496 +numpy/core/tests/examples/limited_api/limited_api.c,sha256=mncE8TjjXmYpkwli433G0jB2zGQO_5NqWmGKdzRJZug,344 +numpy/core/tests/examples/limited_api/setup.py,sha256=p2w7F1ardi_GRXSrnNIR8W1oeH_pgmw_1P2wS0A2I6M,435 +numpy/core/tests/test__exceptions.py,sha256=QqxQSLXboPXEVwHz-TyE2JeIl_TC-rPugzfo25nbcns,2846 +numpy/core/tests/test_abc.py,sha256=FfgYA_HjYAi8XWGK_oOh6Zw86chB_KG_XoW_7ZlFp4c,2220 +numpy/core/tests/test_api.py,sha256=UMc7SvczAQ5ngHxE-NoXVvNpVzYRrn8oMwFNta1yMS0,22995 +numpy/core/tests/test_argparse.py,sha256=C0zBbwQ9xzzymXe_hHpWnnWQPwOi2ZdQB78gBAgJHvU,1969 +numpy/core/tests/test_array_coercion.py,sha256=zY4Pjlt4QZ0w71WxWGLHcrPnnhEF51yXYVLg5HMIy5c,34379 +numpy/core/tests/test_array_interface.py,sha256=8tGgj1Nzi76H_WF5GULkxqWL7Yu_Xf0lvTJZOwOBKsI,7774 +numpy/core/tests/test_arraymethod.py,sha256=VpjDYTmoMDTZcY7CsGzinBh0R_OICuwOykWCbmCRQZU,3244 +numpy/core/tests/test_arrayprint.py,sha256=cKaIoD9ZvsjJH0PHwZyOxmcRcBt1kN1WfFneqVqs0b8,40462 +numpy/core/tests/test_casting_floatingpoint_errors.py,sha256=W3Fgk0oKtXFv684fEZ7POwj6DHTYK0Jj_oGRLZ8UdyA,5063 +numpy/core/tests/test_casting_unittests.py,sha256=9-vkR0oXczQz8ED8DxGVPmalC8IZXe2jKgOCMGr8hIg,34298 +numpy/core/tests/test_conversion_utils.py,sha256=jNhbNNI-T8qtQnsIMEax7KFN30kjh0ICntLMwTyxJ5Q,6559 +numpy/core/tests/test_cpu_dispatcher.py,sha256=v_SlhUpENuoe7QYXizzYITLGXa7WfZ7jqcqmbSBg7JU,1542 +numpy/core/tests/test_cpu_features.py,sha256=mieGx7dxXFiyTYatbcCCjIjR67Un2hVcbJx4GEf2yFo,14892 +numpy/core/tests/test_custom_dtypes.py,sha256=JogRmttDLwfQ3PTbewEnGLKco9zV2Nu3yIfrMeCsx_I,9401 +numpy/core/tests/test_cython.py,sha256=t5-h4XSIFNLyw_9BIAQDYl8_80t_pH0SCfEa1Vf_3aI,3755 +numpy/core/tests/test_datetime.py,sha256=2vAGbrCQmsrWNXCVXOMZqUGZn2c-cQT-eZ1wTprYbcM,116211 +numpy/core/tests/test_defchararray.py,sha256=F88HUkByEP4H6cJ_ITvIe0a_T1BH2JOdRysMCu1XIn0,24997 +numpy/core/tests/test_deprecations.py,sha256=w2lhHb-W8hh7RoE_0Ftg8thpG86jvbFAJgior22DY2Q,31076 +numpy/core/tests/test_dlpack.py,sha256=cDlwFmTombb2rDeB8RHEAJ4eVMUiDbw8Oz5Jo1NQwk0,3522 +numpy/core/tests/test_dtype.py,sha256=J09pJF59v7UO6iNuJFISKP2DLPgdkQ_df5OAMDRLikU,75702 +numpy/core/tests/test_einsum.py,sha256=QzQAPIC-IjTV3Dxz97hBnvLBCmF8kpsBTBckThhgRjQ,53712 +numpy/core/tests/test_errstate.py,sha256=U3GT9I058jkF725mx4GdWUr9RoceCkGDV7Go79VA4wY,2219 +numpy/core/tests/test_extint128.py,sha256=gCZfAwPOb-F1TLsEEeDI0amQYwHk-60-OXi0ccZrrZ8,5643 +numpy/core/tests/test_function_base.py,sha256=Ibs6-WXZE5hsRx4VCnX-cZOWYKU-5PFXjouwAQzgnqQ,15595 +numpy/core/tests/test_getlimits.py,sha256=apdxr0zKkxaVHIUpLrqAvO39q54JKN14sV4xSbK2Ifs,6718 +numpy/core/tests/test_half.py,sha256=VYPyap9GYOWZuphsfFofcIRl-oa5Ufrtv83OTp6azdU,24593 +numpy/core/tests/test_hashtable.py,sha256=ZV8HL8NkDnoQZfnje7BP0fyIp4fSFqjKsQc40PaTggc,1011 +numpy/core/tests/test_indexerrors.py,sha256=kN9xLl6FVTzmI7fumn_cuZ3k0omXnTetgtCnPY44cvw,5130 +numpy/core/tests/test_indexing.py,sha256=x0ojWuhOwWD5MZuiJ9Ncim3CgkwI-GldWxrSCmjmFJM,54314 +numpy/core/tests/test_item_selection.py,sha256=kI30kiX8mIrZYPn0jw3lGGw1ruZF4PpE9zw-aai9EPA,6458 +numpy/core/tests/test_limited_api.py,sha256=5yO0nGmCKZ9b3S66QP7vY-HIgAoyOtHZmp8mvzKuOHI,1172 +numpy/core/tests/test_longdouble.py,sha256=jO8YMm_Hsz-XPKbmv6iMcOdHgTlIFkKTwAtxpy3Q1pE,13905 +numpy/core/tests/test_machar.py,sha256=_5_TDUVtAJvJI5jBfEFKpCZtAfKCsCFt7tXlWSkWzzc,1067 +numpy/core/tests/test_mem_overlap.py,sha256=QJ0unWD_LOoAGAo4ra0IvYenj56IYUtiz1fEJEmTY9Q,29086 +numpy/core/tests/test_mem_policy.py,sha256=CXa10FQw2Qj6MqJuaC8Fm4slsoipKFjCIpYF6c5IIAU,16801 +numpy/core/tests/test_memmap.py,sha256=tZ5lJs_4ZFsJmg392ZQ33fX0m8tdfZ8ZtY9Lq41LNtk,7477 +numpy/core/tests/test_multiarray.py,sha256=GPv4IJR9dijNG-icUsQsX2tBD2RdP3EhUehY4cxvVQU,380106 +numpy/core/tests/test_nditer.py,sha256=nVQ00aNxPHqf4ZcFs3e9AVDK64TCqlO0TzfocTAACZQ,130818 +numpy/core/tests/test_nep50_promotions.py,sha256=2TwtFvj1LBpYTtdR6NFe1RAAGXIJltLqwpA1vhQCVY4,8840 +numpy/core/tests/test_numeric.py,sha256=Cnsb-ZqYtmm9V-AogDuzvDkxlpGZ5uCMP-pw6LZfRn0,136806 +numpy/core/tests/test_numerictypes.py,sha256=f_xMjZJnyDwlc6XCrd71b6x1_6dAWOv-kZ3-NEq37hU,21687 +numpy/core/tests/test_numpy_2_0_compat.py,sha256=kVCTAXska7Xi5w_TYduWhid0nlCqI6Nvmt-gDnYsuKI,1630 +numpy/core/tests/test_overrides.py,sha256=t0gOZOzu7pevE58HA-npFYJqnInHR-LLBklnzKJWHqo,26080 +numpy/core/tests/test_print.py,sha256=ErZAWd88b0ygSEoYpd0BL2tFjkerMtn1vZ7dWvaNqTc,6837 +numpy/core/tests/test_protocols.py,sha256=fEXE9K9s22oiVWkX92BY-g00-uXCK-HxjZhZxxYAKFc,1168 +numpy/core/tests/test_records.py,sha256=pluit5x6jkWoPEIrHXM13L3xZuuSSiaxoXFsOdkakCU,20269 +numpy/core/tests/test_regression.py,sha256=SJo9cPTVr2SNjhgtW7boUMyNQlXxygsZ5g0oyqC8Eks,91595 +numpy/core/tests/test_scalar_ctors.py,sha256=qDIZV-tBukwAxNDhUmGtH3CemDXlS3xd_q3L52touuA,6115 +numpy/core/tests/test_scalar_methods.py,sha256=Uj-zU0zzzKAjMBdpkzsWZ3nSFj5gJkUlqi_euhOYdnU,7541 +numpy/core/tests/test_scalarbuffer.py,sha256=FSL94hriWX1_uV6Z33wB3ZXUrpmmX2-x87kNjIxUeBk,5580 +numpy/core/tests/test_scalarinherit.py,sha256=fMInDGKsiH3IS_2ejZtIcmJZ0Ry8c7kVsHx7wp5XDoM,2368 +numpy/core/tests/test_scalarmath.py,sha256=XZj_m2I2TLktJdFD1SWj2XtV8hT26VIxasDz3cAFvgA,43247 +numpy/core/tests/test_scalarprint.py,sha256=1599W5X0tjGhBnSQjalXkg6AY8eHXnr6PMqs4vYZQqs,18771 +numpy/core/tests/test_shape_base.py,sha256=D9haeuUVx3x3pOLmFQ9vUz7iU4T2bFTsPoI8HgSncFU,29723 +numpy/core/tests/test_simd.py,sha256=-L1UhIn9Eu_euLwaSU7bPRfYpWWOTb43qovoJS7Ws7w,48696 +numpy/core/tests/test_simd_module.py,sha256=OSpYhH_3QDxItyQcaW6SjXW57k2m-weRwpYOnJjCqN0,3902 +numpy/core/tests/test_strings.py,sha256=A9t1B65lFrYRLXgDJSg3mMDAe_hypIPcTMVOdAYIbU0,3835 +numpy/core/tests/test_ufunc.py,sha256=5pS2x3LACHn8GogYYad8LRAjByK7Gg9xTD9ik3d0Fm0,124907 +numpy/core/tests/test_umath.py,sha256=huHpclJqkO32k7BTflRHj8nImzg3p6yyryeS9LyHKWU,186482 +numpy/core/tests/test_umath_accuracy.py,sha256=mFcVdzXhhD9mqhzLDJVZsWfCHbjbFQ6XeEl5G8l-PTc,3897 +numpy/core/tests/test_umath_complex.py,sha256=WvZZZWeijo52RiOfx-G83bxzQOp_IJ3i9fEnUDVukLQ,23247 +numpy/core/tests/test_unicode.py,sha256=hUXIwMmoq89y_KXWzuXVyQaXvRwGjfY4TvKJsCbygEI,12775 +numpy/core/umath.py,sha256=JbT_SxnZ_3MEmjOI9UtX3CcAzX5Q-4RDlnnhDAEJ5Vo,2040 +numpy/core/umath_tests.py,sha256=TIzaDfrEHHgSc2J5kxFEibq8MOPhwSuyOZOUBsZNVSM,389 +numpy/ctypeslib.py,sha256=Po4XCWfxhwFQ1Q8x8DeayGiMCJLxREaCDkVyeladxBU,17247 +numpy/ctypeslib.pyi,sha256=A9te473aRO920iDVuyKypeVIQp-ueZK6EiI-qLSwJNg,7972 +numpy/distutils/__init__.py,sha256=BU1C21439HRo7yH1SsN9me6WCDPpOwRQ37ZpNwDMqCw,2074 +numpy/distutils/__init__.pyi,sha256=D8LRE6BNOmuBGO-oakJGnjT9UJTk9zSR5rxMfZzlX64,119 +numpy/distutils/__pycache__/conv_template.cpython-39.pyc,sha256=aDQmjMHaIkr2j7bDwT2pcooZ_Ii-6w8LlqgdR5r0z_A,8324 +numpy/distutils/_shell_utils.py,sha256=kMLOIoimB7PdFRgoVxCIyCFsIl1pP3d0hkm_s3E9XdA,2613 +numpy/distutils/armccompiler.py,sha256=8qUaYh8QHOJlz7MNvkuJNyYdCOCivuW0pbmf_2OPZu0,962 +numpy/distutils/ccompiler.py,sha256=6I-zQBLJCyZUZaYdmK23pmucM8MAn2OsvyzEdghPpW0,28618 +numpy/distutils/ccompiler_opt.py,sha256=diDOkSKj_j0xY08kP7-NoQsefxJgsN5clEirqXlavGY,100390 +numpy/distutils/checks/cpu_asimd.c,sha256=nXUsTLrSlhRL-UzDM8zMqn1uqJnR7TRlJi3Ixqw539w,818 +numpy/distutils/checks/cpu_asimddp.c,sha256=E4b9zT1IdSfGR2ACZJiQoR-BqaeDtzFqRNW8lBOXAaY,432 +numpy/distutils/checks/cpu_asimdfhm.c,sha256=6tXINVEpmA-lYRSbL6CrBu2ejNFmd9WONFGgg-JFXZE,529 +numpy/distutils/checks/cpu_asimdhp.c,sha256=SfwrEEA_091tmyI4vN3BNLs7ypUnrF_VbTg6gPl-ocs,379 +numpy/distutils/checks/cpu_avx.c,sha256=LuZW8o93VZZi7cYEP30dvKWTm7Mw1TLmCt5UaXDxCJg,779 +numpy/distutils/checks/cpu_avx2.c,sha256=jlDlea393op0JOiMJgmmPyKmyAXztLcObPOp9F9FaS0,749 +numpy/distutils/checks/cpu_avx512_clx.c,sha256=P-YHjj2XE4SithBkPwDgShOxGWnVSNUXg72h8O3kpbs,842 +numpy/distutils/checks/cpu_avx512_cnl.c,sha256=f_c2Z0xwAKTJeK3RYMIp1dgXYV8QyeOxUgKkMht4qko,948 +numpy/distutils/checks/cpu_avx512_icl.c,sha256=isI35-gm7Hqn2Qink5hP1XHWlh52a5vwKhEdW_CRviE,1004 +numpy/distutils/checks/cpu_avx512_knl.c,sha256=PVTkczTpHlXbTc7IQKlCFU9Cq4VGG-_JhVnT0_n-t1A,959 +numpy/distutils/checks/cpu_avx512_knm.c,sha256=eszPGr3XC9Js7mQUB0gFxlrNjQwfucQFz_UwFyNLjes,1132 +numpy/distutils/checks/cpu_avx512_skx.c,sha256=59VD8ebEJJHLlbY-4dakZV34bmq_lr9mBKz8BAcsdYc,1010 +numpy/distutils/checks/cpu_avx512_spr.c,sha256=i8DpADB8ZhIucKc8lt9JfYbQANRvR67u59oQf5winvg,904 +numpy/distutils/checks/cpu_avx512cd.c,sha256=Qfh5FJUv9ZWd_P5zxkvYYIkvqsPptgaDuKkeX_F8vyA,759 +numpy/distutils/checks/cpu_avx512f.c,sha256=d97NRcbJhqpvURnw7zyG0TOuEijKXvU0g4qOTWHbwxY,755 +numpy/distutils/checks/cpu_f16c.c,sha256=nzZzpUc8AfTtw-INR3KOxcjx9pyzVUM8OhsrdH2dO_w,868 +numpy/distutils/checks/cpu_fma3.c,sha256=YN6IDwuZALJHVVmpQ2tj-14HI_PcxH_giV8-XjzlmkU,817 +numpy/distutils/checks/cpu_fma4.c,sha256=qKdgTNNFg-n8vSB1Txco60HBLCcOi1aH23gZOX7yKqs,301 +numpy/distutils/checks/cpu_neon.c,sha256=Y0SjuVLzh3upcbY47igHjmKgjHbXxbvzncwB7acfjxw,600 +numpy/distutils/checks/cpu_neon_fp16.c,sha256=E7YOGyYP41u1sqiCHpCGGqjmo7Cs6yUkmJ46K7LZloc,251 +numpy/distutils/checks/cpu_neon_vfpv4.c,sha256=qFY1C_fQYz7M_a_8j0KTdn7vaE3NNVmWY2JGArDGM3w,609 +numpy/distutils/checks/cpu_popcnt.c,sha256=vRcXHVw2j1F9I_07eIZ_xzDX3fd3mqgiQXL1w3pULJk,1049 +numpy/distutils/checks/cpu_sse.c,sha256=6MHITtC76UpSR9uh0SiURpnkpPkLzT5tbrcXT4xBFxo,686 +numpy/distutils/checks/cpu_sse2.c,sha256=yUZzdjDtBS-vYlhfP-pEzj3m0UPmgZs-hA99TZAEACU,697 +numpy/distutils/checks/cpu_sse3.c,sha256=j5XRHumUuccgN9XPZyjWUUqkq8Nu8XCSWmvUhmJTJ08,689 +numpy/distutils/checks/cpu_sse41.c,sha256=y_k81P-1b-Hx8OeRVDE9V1O9JakS0zPvlFKJ3VbSmEw,675 +numpy/distutils/checks/cpu_sse42.c,sha256=3PXucdI2mII-txO7zFN99TlVveT_QUAETTGvRk-_hYw,692 +numpy/distutils/checks/cpu_ssse3.c,sha256=X6VWxIXMRpdSCBsHPXvot3yTZ4d5yK9Bi1ScQP3WC-Q,705 +numpy/distutils/checks/cpu_vsx.c,sha256=FVmR4iliKjcihzMCwloR1F2JYwSZK9P4f_hvIRLHSDQ,478 +numpy/distutils/checks/cpu_vsx2.c,sha256=yESs25Rt5ztb5-stuYbu3TbiyJKmllMpMLu01GOAHqE,263 +numpy/distutils/checks/cpu_vsx3.c,sha256=omC50tbEZNigsKMFPtE3zGRlIS2VuDTm3vZ9TBZWo4U,250 +numpy/distutils/checks/cpu_vsx4.c,sha256=ngezA1KuINqJkLAcMrZJR7bM0IeA25U6I-a5aISGXJo,305 +numpy/distutils/checks/cpu_vx.c,sha256=OpLU6jIfwvGJR4JPVVZLlUfvo7oAZ0YvsjafM2qtPlk,461 +numpy/distutils/checks/cpu_vxe.c,sha256=rYW_nKwXnlB0b8xCrJEr4TmvrEvS-NToxwyqqOHV8Bk,788 +numpy/distutils/checks/cpu_vxe2.c,sha256=Hv4wO23kwC2G6lqqercq4NE4K0nrvBxR7RIzr5HTXCc,624 +numpy/distutils/checks/cpu_xop.c,sha256=7uabsGeqvmVJQvuSEjs8-Sm8kpmvl6uZ9YHMF5h2opQ,234 +numpy/distutils/checks/extra_avx512bw_mask.c,sha256=pVPOhcu80yJVnIhOcHHXOlZ2proJ1MUf0XgccqhPoNk,636 +numpy/distutils/checks/extra_avx512dq_mask.c,sha256=nMfIvepISGFDexPrMYl5LWtdmt6Uy9TKPzF4BVayw2I,504 +numpy/distutils/checks/extra_avx512f_reduce.c,sha256=_NfbtfSAkm_A67umjR1oEb9yRnBL5EnTA76fvQIuNVk,1595 +numpy/distutils/checks/extra_vsx3_half_double.c,sha256=shHvIQZfR0o-sNefOt49BOh4WCmA0BpJvj4b7F9UdvQ,354 +numpy/distutils/checks/extra_vsx4_mma.c,sha256=GiQGZ9-6wYTgH42bJgSlXhWcTIrkjh5xv4uymj6rglk,499 +numpy/distutils/checks/extra_vsx_asm.c,sha256=BngiMVS9nyr22z6zMrOrHLeCloe_5luXhf5T5mYucgI,945 +numpy/distutils/checks/test_flags.c,sha256=uAIbhfAhyGe4nTdK_mZmoCefj9P0TGHNF9AUv_Cdx5A,16 +numpy/distutils/command/__init__.py,sha256=fW49zUB3syMFsKpf1oRBO0h8tmnTwRP3zUPrsB0R22M,1032 +numpy/distutils/command/autodist.py,sha256=8KWwr5mnjX20UpY4ITRDx-PreApyh9M7B92IwsEtTsQ,3718 +numpy/distutils/command/bdist_rpm.py,sha256=-tkZupIJr_jLqeX7xbRhE8-COXHRI0GoRpAKchVte54,709 +numpy/distutils/command/build.py,sha256=aj1SUGsDUTxs4Tch2ALLcPnuAVhaPjEPIZIobzMajm0,2613 +numpy/distutils/command/build_clib.py,sha256=TCuZDpRd8ZPZH6SRwIZcWZC3aoGc18Rll6FYcawS6qY,19317 +numpy/distutils/command/build_ext.py,sha256=UcyG8KKyrd5v1s6qDdKEkzwLwmoMlfHA893Lj-OOgl0,32983 +numpy/distutils/command/build_py.py,sha256=XiLZ2d_tmCE8uG5VAU5OK2zlzQayBfeY4l8FFEltbig,1144 +numpy/distutils/command/build_scripts.py,sha256=P2ytmZb3UpwfmbMXkFB2iMQk15tNUCynzMATllmp-Gs,1665 +numpy/distutils/command/build_src.py,sha256=sxsnfc8KBsnsSvI-8sKIKNo2KA2uvrrvW0WYZCqyjyk,31178 +numpy/distutils/command/config.py,sha256=SdN-Cxvwx3AD5k-Xx_VyS2WWpVGmflnYGiTIyruj_xM,20670 +numpy/distutils/command/config_compiler.py,sha256=Cp9RTpW72gg8XC_3-9dCTlLYr352pBfBRZA8YBWvOoY,4369 +numpy/distutils/command/develop.py,sha256=9SbbnFnVbSJVZxTFoV9pwlOcM1D30GnOWm2QonQDvHI,575 +numpy/distutils/command/egg_info.py,sha256=i-Zk4sftK5cMQVQ2jqSxTMpVI-gYyXN16-p5TvmjURc,921 +numpy/distutils/command/install.py,sha256=nkW2fl7OABcE3sUcoNM7iONkF64CBESdVlRjTLg3hVA,3073 +numpy/distutils/command/install_clib.py,sha256=1xv0_lPVu3g16GgICjjlh7T8zQ6PSlevCuq8Bocx5YM,1399 +numpy/distutils/command/install_data.py,sha256=Y59EBG61MWP_5C8XJvSCVfzYpMNVNVcH_Z6c0qgr9KA,848 +numpy/distutils/command/install_headers.py,sha256=tVpOGqkmh8AA_tam0K0SeCd4kvZj3UqSOjWKm6Kz4jY,919 +numpy/distutils/command/sdist.py,sha256=8Tsju1RwXNbPyQcjv8GRMFveFQqYlbNdSZh2X1OV-VU,733 +numpy/distutils/conv_template.py,sha256=F-4vkkfAjCb-fN79WYrXX3BMHMoiQO-W2u09q12OPuI,9536 +numpy/distutils/core.py,sha256=C-_z7rODE_12olz0dwtlKqwfaSLXEV3kZ1CyDJMsQh8,8200 +numpy/distutils/cpuinfo.py,sha256=XuNhsx_-tyrui_AOgn10yfZ9p4YBM68vW2_bGmKj07I,22639 +numpy/distutils/exec_command.py,sha256=0EGasX7tM47Q0k8yJA1q-BvIcjV_1UAC-zDmen-j6Lg,10283 +numpy/distutils/extension.py,sha256=YgeB8e2fVc2l_1etuRBv0P8c1NULOz4SaudHgsVBc30,3568 +numpy/distutils/fcompiler/__init__.py,sha256=DqfaiKGVagOFuL0v3VZxZZkRnWWvly0_lYHuLjaZTBo,40625 +numpy/distutils/fcompiler/absoft.py,sha256=yPUHBNZHOr_gxnte16I_X85o1iL9FI4RLHjG9JOuyYU,5516 +numpy/distutils/fcompiler/arm.py,sha256=MCri346qo1bYwjlm32xHRyRl-bAINTlfVIubN6HDz68,2090 +numpy/distutils/fcompiler/compaq.py,sha256=sjU2GKHJGuChtRb_MhnouMqvkIOQflmowFE6ErCWZhE,3903 +numpy/distutils/fcompiler/environment.py,sha256=DOD2FtKDk6O9k6U0h9UKWQ-65wU8z1tSPn3gUlRwCso,3080 +numpy/distutils/fcompiler/fujitsu.py,sha256=yK3wdHoF5qq25UcnIM6FzTXsJGJxdfKa_f__t04Ne7M,1333 +numpy/distutils/fcompiler/g95.py,sha256=FH4uww6re50OUT_BfdoWSLCDUqk8LvmQ2_j5RhF5nLQ,1330 +numpy/distutils/fcompiler/gnu.py,sha256=ag8v_pp-fYpDPKJsVmNaFwN621b1MFQAxew0T1KdE_Y,20502 +numpy/distutils/fcompiler/hpux.py,sha256=gloUjWGo7MgJmukorDq7ZxDnnUKXx-C6AQfryQshVM4,1353 +numpy/distutils/fcompiler/ibm.py,sha256=Ts2PXg2ocrXtX9eguvcHeQ4JB2ktpd5isXtRTpU9F5Y,3534 +numpy/distutils/fcompiler/intel.py,sha256=XYF0GLVhJWjS8noEx4TJ704Eqt-JGBolRZEOkwgNItE,6570 +numpy/distutils/fcompiler/lahey.py,sha256=U63KMfN8zDAd_jnvMkS2N-dvP4UiSRB9Ces290qLNXw,1327 +numpy/distutils/fcompiler/mips.py,sha256=LAwT0DY5yqlYh20hNMYR1-OKu8A9GNw-TbUfI8pvglM,1714 +numpy/distutils/fcompiler/nag.py,sha256=9pQCMUlwjRVHGKwZxvwd4bW5p-9v7VXcflELEImHg1g,2777 +numpy/distutils/fcompiler/none.py,sha256=6RX2X-mV1HuhJZnVfQmDmLVhIUWseIT4P5wf3rdLq9Y,758 +numpy/distutils/fcompiler/nv.py,sha256=LGBQY417zibQ-fnPis5rNtP_I1Qk9OlhEFOnPvmwXHI,1560 +numpy/distutils/fcompiler/pathf95.py,sha256=MiHVar6-beUEYVEpqXORIX4f8G29I47D36kreltdfoQ,1061 +numpy/distutils/fcompiler/pg.py,sha256=NOB1stzrjvQMZS7bIPTgWTcAFe3cjNveA5-SztUZqD0,3568 +numpy/distutils/fcompiler/sun.py,sha256=mfS3RTj9uYT6K9Ikp8RjmsEPIWAtUTzMhX9sGjEyF6I,1577 +numpy/distutils/fcompiler/vast.py,sha256=Xuxa4sNraUPcQmt45SogAfN0kDHFb6C73uNZNmX3RBE,1667 +numpy/distutils/from_template.py,sha256=hpoFQortsLZdMSr_fJILzXzrIwFlZoFjsDSo6jNtvWs,7913 +numpy/distutils/fujitsuccompiler.py,sha256=JDuUUE-GyPahkNnDZLWNHyAmJ2lJPCnLuIUFfHkjMzA,834 +numpy/distutils/intelccompiler.py,sha256=N_pvWjlLORdlH34cs97oU4LBNr_s9r5ddsmme7XEvs4,4234 +numpy/distutils/lib2def.py,sha256=-3rDf9FXsDik3-Qpp-A6N_cYZKTlmVjVi4Jzyo-pSlY,3630 +numpy/distutils/line_endings.py,sha256=a8ZZECrPRffsbs0UygeR47_fOUlZppnx-QPssrIXtB0,2032 +numpy/distutils/log.py,sha256=m8caNBwPhIG7YTnD9iq9jjc6_yJOeU9FHuau2CSulds,2879 +numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=cbsN3Lk9Hkwzr9c-yOP2xEBg1_ml1X7nwAMDWxGjzc8,77 +numpy/distutils/mingw32ccompiler.py,sha256=4G8t_6plw7xqoF0icDaWGNSBgbyDaHQn3GB5l9gikEA,22067 +numpy/distutils/misc_util.py,sha256=2MxXE4rex_wSUhpLuwxOFeeor-WxZLjisVvXWycNaq4,89359 +numpy/distutils/msvc9compiler.py,sha256=FCtP7g34AVuMIaqQlH8AV1ZBdIUXbk5G7eBeeTSr1zE,2192 +numpy/distutils/msvccompiler.py,sha256=ILookUifVJF9tAtPJoVCqZ673m5od6MVKuAHuA3Rcfk,2647 +numpy/distutils/npy_pkg_config.py,sha256=fIFyWLTqRySO3hn-0i0FNdHeblRN_hDv-wc68-sa3hQ,12972 +numpy/distutils/numpy_distribution.py,sha256=10Urolg1aDAG0EHYfcvObzOgqRV0ARh2GhDklEg4vS0,634 +numpy/distutils/pathccompiler.py,sha256=KnJEA5H4cXg7SLrMjwWtidD24VSvOdu72d17votiY9E,713 +numpy/distutils/setup.py,sha256=l9ke_Bws431UdBfysaq7ZeGtZ8dix76oh9Huq5qqbkU,634 +numpy/distutils/system_info.py,sha256=SCk1ku0HnZNwConQBJN8FVidbeKVnrMxUyNWUVx73pY,114022 +numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/distutils/tests/test_build_ext.py,sha256=RNrEi-YMTGQG5YDi5GWL8iJRkk_bQHBQKcqp43TdJco,2769 +numpy/distutils/tests/test_ccompiler_opt.py,sha256=N3pN-9gxPY1KvvMEjoXr7kLxTGN8aQOr8qo5gmlrm90,28778 +numpy/distutils/tests/test_ccompiler_opt_conf.py,sha256=maXytv39amuojbQIieIGIXMV4Cv-s0fsPMZeFEh9XyY,6347 +numpy/distutils/tests/test_exec_command.py,sha256=BK-hHfIIrkCep-jNmS5_Cwq5oESvsvX3V_0XDAkT1Ok,7395 +numpy/distutils/tests/test_fcompiler.py,sha256=mJXezTXDUbduhCwVGAfABHpEARWhnj8hLW9EOU3rn84,1277 +numpy/distutils/tests/test_fcompiler_gnu.py,sha256=nmfaFCVzbViIOQ2-MjgXt-bN8Uj674hCgiwr5Iol-_U,2136 +numpy/distutils/tests/test_fcompiler_intel.py,sha256=mxkfFD2rNfg8nn1pp_413S0uCdYXydPWBcz9ilgGkA0,1058 +numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=CKEjik7YVfSJGL4abuctkmlkIUhAhv-x2aUcXiTR9b0,1102 +numpy/distutils/tests/test_from_template.py,sha256=SDYoe0XUpAayyEQDq7ZhrvEEz7U9upJDLYzhcdoVifc,1103 +numpy/distutils/tests/test_log.py,sha256=0tSM4q-00CjbMIRb9QOJzI4A7GHUiRGOG1SOOLz8dnM,868 +numpy/distutils/tests/test_mingw32ccompiler.py,sha256=rMC8-IyBOiuZVfAoklV_KnD9qVeB_hFVvb5dStxfk08,1609 +numpy/distutils/tests/test_misc_util.py,sha256=Qs96vTr8GZSyVCWuamzcNlVMRa15vt0Y-T2yZSUm_QA,3218 +numpy/distutils/tests/test_npy_pkg_config.py,sha256=apGrmViPcXoPCEOgDthJgL13C9N0qQMs392QjZDxJd4,2557 +numpy/distutils/tests/test_shell_utils.py,sha256=UKU_t5oIa_kVMv89Ys9KN6Z_Fy5beqPDUsDAWPmcoR8,2114 +numpy/distutils/tests/test_system_info.py,sha256=wMV7bH5oB0luLDR2tunHrLaUxsD_-sIhLnNpj1blQPs,11405 +numpy/distutils/unixccompiler.py,sha256=fN4-LH6JJp44SLE7JkdG2kKQlK4LC8zuUpVC-RtmJ-U,5426 +numpy/doc/__init__.py,sha256=OYmE-F6x0CD05PCDY2MiW1HLlwB6i9vhDpk-a3r4lHY,508 +numpy/doc/constants.py,sha256=PlXoj7b4A8Aa9nADbg83uzTBRJaX8dvJmEdbn4FDPPo,9155 +numpy/doc/ufuncs.py,sha256=i1alLg19mNyCFZ2LYSOZGm--RsRN1x63U_UYU-N3x60,5357 +numpy/dtypes.py,sha256=BuBztrPQRasUmVZhXr2_NgJujdUTNhNwd59pZZHk3lA,2229 +numpy/dtypes.pyi,sha256=tIHniAYP7ALg2iT7NgSXO67jvE-zRlDod3MazEmD4M8,1315 +numpy/exceptions.py,sha256=7j7tv8cwXGZYgldyMisGmnAxAl2s4YU0vexME81yYlA,7339 +numpy/exceptions.pyi,sha256=KsZqWNvyPUEXUGR9EhZCUQF2f9EVSpBRlJUlGqRT02k,600 +numpy/f2py/__init__.py,sha256=m-ty_WiJZ4GVfV5--kJ3MFJaLXestz5Eo-4H0FPscK4,5565 +numpy/f2py/__init__.pyi,sha256=eA7uYXZr0p0aaz5rBW-EypLx9RchrvqDYtSnkEJQsYw,1087 +numpy/f2py/__main__.py,sha256=6i2jVH2fPriV1aocTY_dUFvWK18qa-zjpnISA-OpF3w,130 +numpy/f2py/__version__.py,sha256=7HHdjR82FCBmftwMRyrlhcEj-8mGQb6oCH-wlUPH4Nw,34 +numpy/f2py/_backends/__init__.py,sha256=7_bA7c_xDpLc4_8vPfH32-Lxn9fcUTgjQ25srdvwvAM,299 +numpy/f2py/_backends/_backend.py,sha256=GKb9-UaFszT045vUgVukPs1n97iyyjqahrWKxLOKNYo,1187 +numpy/f2py/_backends/_distutils.py,sha256=pxh2YURFYYSykIOvBFwVvhoNX1oSk-c30IPPhzlko-0,2383 +numpy/f2py/_backends/_meson.py,sha256=gi-nbnPFDC38sumfAjg-Q5FPu6nNkyQXTjEuVf9W9Cc,6916 +numpy/f2py/_backends/meson.build.template,sha256=oTPNMAQzS4CJ_lfEzYv-oBeJTtQuThUYVN5R6ROWpNU,1579 +numpy/f2py/_isocbind.py,sha256=zaBgpfPNRmxVG3doUIlbZIiyB990MsXiwDabrSj9HnQ,2360 +numpy/f2py/_src_pyf.py,sha256=4t6TN4ZKWciC4f1z6fwaGrpIGhHKRiwHfcrNj4FIzCg,7654 +numpy/f2py/auxfuncs.py,sha256=dNs4b2KDIcG4M1hPBvD09-Vh7CDzlPIrFscOdvL3p1o,26539 +numpy/f2py/capi_maps.py,sha256=ENjYyeZ3CCJcLwJJgmKOSYrD1KPuhpwauXqeizdV55o,30563 +numpy/f2py/cb_rules.py,sha256=5TuHbJWGjsF6yVNzKuV2tAnwdLyhcWlmdsjYlDOZOv4,24992 +numpy/f2py/cfuncs.py,sha256=KJyW7mdjmFSmxssfeegGJs5NZyF3mZMgNvOxN9-vYHQ,51913 +numpy/f2py/common_rules.py,sha256=gHB76WypbkVmhaD_RWhy8Od4zDTgj8cbDOdUdIp6PIQ,5131 +numpy/f2py/crackfortran.py,sha256=ErLdkWP8MxeyW5vVPGXwyvrxZAwymlvIBC0th2rvK74,148553 +numpy/f2py/diagnose.py,sha256=0SRXBE2hJgKJN_Rf4Zn00oKXC_Tka3efPWM47zg6BoY,5197 +numpy/f2py/f2py2e.py,sha256=5t093ZQ4xs0_0UbyaYVd2yA2EVOaOAcuU29JI-IU2Ag,27717 +numpy/f2py/f90mod_rules.py,sha256=otm3_dmVIna0eBVHLu_693s3a_82lU3pqeqDacWI37s,9594 +numpy/f2py/func2subr.py,sha256=6d2R5awuHRT4xzgfUfwS7JHTqhhAieSXcENlssD_2c4,10298 +numpy/f2py/rules.py,sha256=B4FxSYEfZ_1j_z9GulQNZ1BNrPrUvlU3ybxwTkrIxjI,62727 +numpy/f2py/setup.cfg,sha256=Fpn4sjqTl5OT5sp8haqKIRnUcTPZNM6MIvUJBU7BIhg,48 +numpy/f2py/setup.py,sha256=MmAVspT8DDTqDuL8ZJhxK62g0lcso4vqI6QNQ9CsfoQ,2422 +numpy/f2py/src/fortranobject.c,sha256=g4BKDO1_9pCu6hithKXD2oH_Mt-HH1NTnP6leCqJrzc,46017 +numpy/f2py/src/fortranobject.h,sha256=neMKotYWbHvrhW9KXz4QzQ8fzPkiQXLHHjy82vLSeog,5835 +numpy/f2py/symbolic.py,sha256=jWBoAwECCxRdWczR9r7O6UERcYmH_GbdcAReNp7cmJY,53270 +numpy/f2py/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=JFU2w98cB_XNwfrqNtI0yDTmpEdxYO_UEl2pgI_rnt8,658 +numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=gvQJIzNtvacWE0dhysxn30-iUeI65Hpq7DiE9oRauz8,105 +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=Ff5wHYV9-OJnZuelfFWcjAibRvDkEIlbTVczTyv6TG8,7299 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130 +numpy/f2py/tests/src/block_docstring/foo.f,sha256=y7lPCPu7_Fhs_Tf2hfdpDQo1bhtvNSKRaZAOpM_l3dg,97 +numpy/f2py/tests/src/callback/foo.f,sha256=C1hjfpRCQWiOVVzIHqnsYcnLrqQcixrnHCn8hd9GhVk,1254 +numpy/f2py/tests/src/callback/gh17797.f90,sha256=_Nrl0a2HgUbtymGU0twaJ--7rMa1Uco2A3swbWvHoMo,148 +numpy/f2py/tests/src/callback/gh18335.f90,sha256=NraOyKIXyvv_Y-3xGnmTjtNjW2Znsnlk8AViI8zfovc,506 +numpy/f2py/tests/src/callback/gh25211.f,sha256=a2sxlQhtDVbYn8KOKHUYqwc-aCFt7sDPSnJsXFG35uI,179 +numpy/f2py/tests/src/callback/gh25211.pyf,sha256=FWxo0JWQlw519BpZV8PoYeI_FZ_K6C-3Wk6gLrfBPlw,447 +numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=5rvOfCv-wSosB354LC9pExJmMoSHnbGZGl_rtA2fogA,142 +numpy/f2py/tests/src/cli/hi77.f,sha256=ttyI6vAP3qLnDqy82V04XmoqrXNM6uhMvvLri2p0dq0,71 +numpy/f2py/tests/src/cli/hiworld.f90,sha256=QWOLPrTxYQu1yrEtyQMbM0fE9M2RmXe7c185KnD5x3o,51 +numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224 +numpy/f2py/tests/src/common/gh19161.f90,sha256=BUejyhqpNVfHZHQ-QC7o7ZSo7lQ6YHyX08lSmQqs6YM,193 +numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=-5Din7YlY1TU7tUHD2p-_DSTxGBpDsWYNeT9WOwGhno,208 +numpy/f2py/tests/src/crackfortran/data_common.f,sha256=ZSUAh3uhn9CCF-cYqK5TNmosBGPfsuHBIEfudgysun4,193 +numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=jYrJKZWF_59JF9EMOSALUjn0UupWvp1teuGpcL5s1Sc,197 +numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=19YO7OGj0IksyBlmMLZGRBQLjoE3erfkR4tFvhznvvE,693 +numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=hoyXw330VHh8duMVmAQZjr1lgLVF4zFCIuEaUIrupv0,175 +numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=CaH7mnWTG7FcnJe2vXN_0zDbMadw6NCqK-JJ2HmDjK8,128 +numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=jJly1AzF5L9VxbVQ0vr-sf4LaUo4eQzJguhuemFxnvg,375 +numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=7K5dtOXGuBDAENPNCt-tAGJqTfNKz5OsqVSk16_e7Es,340 +numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=qZHPRNQljIeYNwbqPLxREnOrSdVV14f3fnaHqB1M7c0,241 +numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=w3tr_KcY3s7oSWGDmjfMHv5h0RYVGUpyXquNdNFOJQg,126 +numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=41W6Ire-5wjJTTg6oAo7O1WZfd1Ug9vvNtNgHS5MhEU,101 +numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=1v-hMCT_K7prhhamoM20nMU9zILam84Hr-imck_dYYk,205 +numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=LWDJTYR3t9h1IsrKC8dVXZlBfWX7clLeU006X6Ow8oI,332 +numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=gPNasx98SIf7Z9ibk_DHiGKCvl7ERtsfoGXiFDT7FbM,282 +numpy/f2py/tests/src/crackfortran/operators.f90,sha256=-Fc-qjW1wBr3Dkvdd5dMTrt0hnjnV-1AYo-NFWcwFSo,1184 +numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=7bubZGMIn7iD31wDkjF1TlXCUM7naCIK69M9d0e3y-U,174 +numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=Pnwyf56Qd6W3FUH-ZMgnXEYkb7gn18ptNTdwmGan0Jo,167 +numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=eYpJwBYLKGOxVbKgEqfny1znib-b7uYhxcRXIf7uwXg,165 +numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=aINLh6GlfTwFewxvDoqnMqwuCNb4XAqi5Nj5vXguXYs,98 +numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=iUOtfHd3OuT1Rz2-yiSgt4uPKGvCt5AzQ1iygJt_yjg,82 +numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=iJCD8a8MUTmuPuedbcmxW54Nr4alYuLhksBe1sHS4K0,298 +numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=jcw-fzrFh0w5U66uJYfeUW4gv94L5MnWQ_NpsV9y0oI,998 +numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347 +numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139 +numpy/f2py/tests/src/module_data/mod.mod,sha256=EkjrU7NTZrOH68yKrz6C_eyJMSFSxGgC2yMQT9Zscek,412 +numpy/f2py/tests/src/module_data/module_data_docstring.f90,sha256=tDZ3fUlazLL8ThJm3VwNGJ75QIlLcW70NnMFv-JA4W0,224 +numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=fdOPhRi7ipygwYCXcda7p_dlrws5Hd2GlpF9EZ-qnck,157 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610 +numpy/f2py/tests/src/quoted_character/foo.f,sha256=WjC9D9171fe2f7rkUAZUvik9bkIf9adByfRGzh6V0cM,482 +numpy/f2py/tests/src/regression/gh25337/data.f90,sha256=9Uz8CHB9i3_mjC3cTOmkTgPAF5tWSwYacG3MUrU-SY0,180 +numpy/f2py/tests/src/regression/gh25337/use_data.f90,sha256=WATiDGAoCKnGgMzm_iMgmfVU0UKOQlk5Fm0iXCmPAkE,179 +numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277 +numpy/f2py/tests/src/return_character/foo77.f,sha256=WzDNF3d_hUDSSZjtxd3DtE-bSx1ilOMEviGyYHbcFgM,980 +numpy/f2py/tests/src/return_character/foo90.f90,sha256=ULcETDEt7gXHRzmsMhPsGG4o3lGrcx-FEFaJsPGFKyA,1248 +numpy/f2py/tests/src/return_complex/foo77.f,sha256=8ECRJkfX82oFvGWKbIrCvKjf5QQQClx4sSEvsbkB6A8,973 +numpy/f2py/tests/src/return_complex/foo90.f90,sha256=c1BnrtWwL2dkrTr7wvlEqNDg59SeNMo3gyJuGdRwcDw,1238 +numpy/f2py/tests/src/return_integer/foo77.f,sha256=_8k1evlzBwvgZ047ofpdcbwKdF8Bm3eQ7VYl2Y8b5kA,1178 +numpy/f2py/tests/src/return_integer/foo90.f90,sha256=bzxbYtofivGRYH35Ang9ScnbNsVERN8-6ub5-eI-LGQ,1531 +numpy/f2py/tests/src/return_logical/foo77.f,sha256=FxiF_X0HkyXHzJM2rLyTubZJu4JB-ObLnVqfZwAQFl8,1188 +numpy/f2py/tests/src/return_logical/foo90.f90,sha256=9KmCe7yJYpi4ftkKOM3BCDnPOdBPTbUNrKxY3p37O14,1531 +numpy/f2py/tests/src/return_real/foo77.f,sha256=ZTrzb6oDrIDPlrVWP3Bmtkbz3ffHaaSQoXkfTGtCuFE,933 +numpy/f2py/tests/src/return_real/foo90.f90,sha256=gZuH5lj2lG6gqHlH766KQ3J4-Ero-G4WpOOo2MG3ohU,1194 +numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815 +numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618 +numpy/f2py/tests/src/string/fixed_string.f90,sha256=5n6IkuASFKgYICXY9foCVoqndfAY0AQZFEK8L8ARBGM,695 +numpy/f2py/tests/src/string/gh24008.f,sha256=UA8Pr-_yplfOFmc6m4v9ryFQ8W9OulaglulefkFWD68,217 +numpy/f2py/tests/src/string/gh24662.f90,sha256=-Tp9Kd1avvM7AIr8ZukFA9RVr-wusziAnE8AvG9QQI4,197 +numpy/f2py/tests/src/string/gh25286.f90,sha256=2EpxvC-0_dA58MBfGQcLyHzpZgKcMf_W9c73C_Mqnok,304 +numpy/f2py/tests/src/string/gh25286.pyf,sha256=GjgWKh1fHNdPGRiX5ek60i1XSeZsfFalydWqjISPVV8,381 +numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=6Y9zU66NfcGhTXlFOdFjCSMSwKXpq5ZfAe3FwpkAsm4,384 +numpy/f2py/tests/src/string/scalar_string.f90,sha256=ACxV2i6iPDk-a6L_Bs4jryVKYJMEGUTitEIYTjbJes4,176 +numpy/f2py/tests/src/string/string.f,sha256=shr3fLVZaa6SyUJFYIF1OZuhff8v5lCwsVNBU2B-3pk,248 +numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=JC0FfVXsnB2lZHb-nGbySnxv_9VHAyD0mKaLDowczFU,190 +numpy/f2py/tests/test_abstract_interface.py,sha256=C8-ly0_TqkmpQNZmwPHwo2IV2MBH0jQEjAhpqHrg8Y4,832 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=Txff89VUeEhWqUCRVybIqsqH4YQvpk4Uyjmh_XjyMi0,24049 +numpy/f2py/tests/test_assumed_shape.py,sha256=FeaqtrWyBf5uyArcmI0D2e_f763aSMpgU3QmdDXe-tA,1466 +numpy/f2py/tests/test_block_docstring.py,sha256=SEpuq73T9oVtHhRVilFf1xF7nb683d4-Kv7V0kfL4AA,564 +numpy/f2py/tests/test_callback.py,sha256=cReSlVjgnoT74wmtNn-oEIZiJUTfRX7ljjlqJi716IQ,6494 +numpy/f2py/tests/test_character.py,sha256=3ugjM1liymMRbY8wub1eiap-jdyNYVHxlNZBqNoRLe4,21868 +numpy/f2py/tests/test_common.py,sha256=m7TTSJt5zUZKJF-MQUeTtCyxW7YwRBSETINXGPFu8S4,896 +numpy/f2py/tests/test_compile_function.py,sha256=9d_FZ8P2wbIlQ2qPDRrsFqPb4nMH8tiWqYZN-P_shCs,4186 +numpy/f2py/tests/test_crackfortran.py,sha256=y1x3U-jlQWD5rmTXz1I2RlTz7LEfbI6qxCDkR5fzPwY,13441 +numpy/f2py/tests/test_data.py,sha256=HFcmPYbiveKa-swJ8x8XlRR9sM0ESB9FEN-txZnHTok,2876 +numpy/f2py/tests/test_docs.py,sha256=jqtuHE5ZjxP4D8Of3Fkzz36F8_0qKbeS040_m0ac4v4,1662 +numpy/f2py/tests/test_f2cmap.py,sha256=p-Sylbr3ctdKT3UQV9FzpCuYPH5U7Vyn8weXFAjiI9o,391 +numpy/f2py/tests/test_f2py2e.py,sha256=eoswH-daMEBlueoVpxXrDloahCpr0RLzHbr3zBHOsjk,25423 +numpy/f2py/tests/test_isoc.py,sha256=_nPTPxNEEagiKriZBeFNesOattIlHDzaNKmj35xxDBY,1406 +numpy/f2py/tests/test_kind.py,sha256=aOMQSBoD_dw49acKN25_abEvQBLI27DsnWIb9CNpSAE,1671 +numpy/f2py/tests/test_mixed.py,sha256=Ctuw-H7DxhPjSt7wZdJ2xffawIoEBCPWc5F7PSkY4HY,848 +numpy/f2py/tests/test_module_doc.py,sha256=sjCXWIKrqMD1NQ1DUAzgQqkjS5w9h9gvM_Lj29Rdcrg,863 +numpy/f2py/tests/test_parameter.py,sha256=ADI7EV_CM4ztICpqHqeq8LI-WdB6cX0ttatdRdjbsUA,3941 +numpy/f2py/tests/test_pyf_src.py,sha256=eD0bZu_GWfoCq--wWqEKRf-F2h5AwoTyO6GMA9wJPr4,1135 +numpy/f2py/tests/test_quoted_character.py,sha256=cpjMdrHwimnkoJkXd_W_FSlh43oWytY5VHySW9oskO4,454 +numpy/f2py/tests/test_regression.py,sha256=v_6RDQr6IcMmbCMElfzRSLPgZhHnH5l99uztrbJAzqE,2532 +numpy/f2py/tests/test_return_character.py,sha256=18HJtiRwQ7a_2mdPUonD5forKWZJEapD-Vi1DsbTjVs,1493 +numpy/f2py/tests/test_return_complex.py,sha256=BZIIqQ1abdiPLgVmu03_q37yCtND0ijxGSMhGz2Wf-o,2397 +numpy/f2py/tests/test_return_integer.py,sha256=t--9UsdLF9flLTQv7a0KTSVoBuoDtTnmOG2QIFPINVc,1758 +numpy/f2py/tests/test_return_logical.py,sha256=XCmp8E8I6BOeNYF59HjSFAdv1hM9WaDvl8UDS10_05o,2017 +numpy/f2py/tests/test_return_real.py,sha256=ATek5AM7dCCPeIvoMOQIt5yFNFzKrFb1Kno8B4M0rn4,3235 +numpy/f2py/tests/test_semicolon_split.py,sha256=_Mdsi84lES18pPjl9J-QsbGttV4tPFFjZvJvejNcqPc,1635 +numpy/f2py/tests/test_size.py,sha256=q6YqQvcyqdXJeWbGijTiCbxyEG3EkPcvT8AlAW6RCMo,1164 +numpy/f2py/tests/test_string.py,sha256=5xZOfdReoHnId0950XfmtfduPPfBbtMkzBoXMtygvMk,2962 +numpy/f2py/tests/test_symbolic.py,sha256=28quk2kTKfWhKe56n4vINJ8G9weKBfc7HysMlE9J3_g,18341 +numpy/f2py/tests/test_value_attrspec.py,sha256=rWwJBfE2qGzqilZZurJ-7ucNoJDICye6lLetQSLFees,323 +numpy/f2py/tests/util.py,sha256=F0fs80ln4tV6THNXfv-DXBqViEg9wu6lTelQQTSzHaI,11136 +numpy/f2py/use_rules.py,sha256=3pTDOPur6gbPHPtwuMJPQvpnUMw39Law1KFSH0coB_0,3527 +numpy/fft/__init__.py,sha256=HqjmF6s_dh0Ri4UZzUDtOKbNUyfAfJAWew3e3EL_KUk,8175 +numpy/fft/__init__.pyi,sha256=vD9Xzz5r13caF4AVL87Y4U9KOj9ic25Vci_wb3dmgpk,550 +numpy/fft/_pocketfft.py,sha256=Xkm8wcP4JyBNMbp0ZoHIWhNDlgliX24RzrDuo29uRks,52897 +numpy/fft/_pocketfft.pyi,sha256=S6-ylUuHbgm8vNbh7tLru6K2R5SJzE81BC_Sllm6QrQ,2371 +numpy/fft/_pocketfft_internal.cpython-39-darwin.so,sha256=z8fHWJIdo4SZSmZXjSidbIqnEHVqavZKKPlkuQ_V9UY,102309 +numpy/fft/helper.py,sha256=aNj1AcLvtfoX26RiLOwcR-k2QSMuBZkGj2Fu0CeFPJs,6154 +numpy/fft/helper.pyi,sha256=NLTEjy2Gz1aAMDZwCgssIyUne0ubjJqukfYkpsL3gXM,1176 +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/test_helper.py,sha256=whgeaQ8PzFf3B1wkbXobGZ5sF4WxPp4gf1UPUVZest8,6148 +numpy/fft/tests/test_pocketfft.py,sha256=RdeCCvUQmJYVvccOJwToobTKDg9yzUL06o9MkPmRfmI,12895 +numpy/lib/__init__.py,sha256=XMPNJkG_mQ__xuvbf0OcpotgMbA9owt10ZHYVnYHq8E,2713 +numpy/lib/__init__.pyi,sha256=y5ANokFm7EkrlNoHdeQm1FsUhLFxkYtLuanCbsWrGio,5596 +numpy/lib/_datasource.py,sha256=CDF3im6IxdY3Mu6fwRQmkSEBmXS3kQVInQ4plXsoX9c,22631 +numpy/lib/_iotools.py,sha256=Yg9HCfPg4tbhbdgLPcxSMiZXq1xDprvJKLebLwhDszY,30868 +numpy/lib/_version.py,sha256=6vK7czNSB_KrWx2rZJzJ1pyOc73Q07hAgfLB5ItUCnU,4855 +numpy/lib/_version.pyi,sha256=B572hyWrUWG-TAAAXrNNAT4AgyUAmJ4lvgpwMkDzunk,633 +numpy/lib/arraypad.py,sha256=bKP7ZS9NYFYzqSk8OnpFLFrMsua4m_hcqFsi7cGkrJE,31803 +numpy/lib/arraypad.pyi,sha256=ADXphtAORYl3EqvE5qs_u32B_TALKSOtF43jOLmoxRw,1728 +numpy/lib/arraysetops.py,sha256=GJ2RhkzIJmIbwyG6h3LOFTPXg62kM9tcV1a-7tdbVuU,33655 +numpy/lib/arraysetops.pyi,sha256=6X-5l5Yss_9y10LYyIsDLbGX77vt7PtVLDqxOlSRPfY,8372 +numpy/lib/arrayterator.py,sha256=BQ97S00zvfURUZfes0GZo-5hydYNRuvwX1I1bLzeRik,7063 +numpy/lib/arrayterator.pyi,sha256=f7Pwp83_6DiMYmJGUsffncM-FRAynB1iYGvhmHM_SZE,1537 +numpy/lib/format.py,sha256=T8qJMyG2DDVjjYNNpUvBgfA9tCo23IS0w9byRB6twwQ,34769 +numpy/lib/format.pyi,sha256=YWBxC3GdsZ7SKBN8I7nMwWeVuFD1aT9d-VJ8zE4-P-o,748 +numpy/lib/function_base.py,sha256=G8I3G6wqJv_tcvimu3iwdqh-39EDI-FlDEVd2dqjvsM,189103 +numpy/lib/function_base.pyi,sha256=KWaC5UOBANU4hiIoN2eptE4HYsm4vgp_8BMFV1Y3JX4,16585 +numpy/lib/histograms.py,sha256=xsj_qpaZoI2Bv1FBpY8mIMPJrYRiuIBszn_6kO7YFRA,37778 +numpy/lib/histograms.pyi,sha256=hNwR2xYWkgJCP-nfRGxc-EgHLTD3qm4zmWXthZLt08M,995 +numpy/lib/index_tricks.py,sha256=4PEvXk6VFTkttMViYBVC4yDhyOiKIon6JpIm0d_CmNg,31346 +numpy/lib/index_tricks.pyi,sha256=D2nkNXOB9Vea1PfMaTn94OGBGayjTaQ-bKMsjDmYpak,4251 +numpy/lib/mixins.py,sha256=y6_MzQuiNjv-1EFVROqv2y2cAJi5X4rQYzbZCyUyXgw,7071 +numpy/lib/mixins.pyi,sha256=h9N1kbZsUntF0zjOxPYeD_rCB2dMiG35TYYPl9ymkI4,3117 +numpy/lib/nanfunctions.py,sha256=6EjzydZlugIzfiENKtC4ycZ2Nckt8ZQg5v6D6tX1SiU,65775 +numpy/lib/nanfunctions.pyi,sha256=oPqAfCinmBL85Ji7ko4QlzAzLAK9nZL0t2_CllEbCEU,606 +numpy/lib/npyio.py,sha256=NUjtFvAmPdTjwJQ-ia-xbCr849M_M6NilP5IHfkKaRg,97316 +numpy/lib/npyio.pyi,sha256=SUFWJh90vWZCdd6GCSGbfYeXKlWut0XY_SHvZJc8yqY,9728 +numpy/lib/polynomial.py,sha256=6Aw3_2vdbh4urERQ6NaPhf9a_T1o1o6cjm3fb5Z3_YE,44133 +numpy/lib/polynomial.pyi,sha256=GerIpQnf5LdtFMOy9AxhOTqUyfn57k4MxqEYrfdckWE,6958 +numpy/lib/recfunctions.py,sha256=-90AbWWvVFOqVUPLh9K9NYdKUHYIgSEyg2Y35MnOVUA,59423 +numpy/lib/scimath.py,sha256=T4ITysZgqhY1J8IxyXCtioHjMTg2ci-4i3mr9TBF2UA,15037 +numpy/lib/scimath.pyi,sha256=E2roKJzMFwWSyhLu8UPUr54WOpxF8jp_pyXYBgsUSQ8,2883 +numpy/lib/setup.py,sha256=0K5NJKuvKvNEWp-EX7j0ODi3ZQQgIMHobzSFJq3G7yM,405 +numpy/lib/shape_base.py,sha256=AhCO9DEyysE-P-QJF9ryUtJ1ghU4_0mORhAJ59poObU,38947 +numpy/lib/shape_base.pyi,sha256=bGJhLA_RvUpVTiDFgCV-1rUjV8e1qCh0gK_3PLgXA_U,5341 +numpy/lib/stride_tricks.py,sha256=brY5b-0YQJuIH2CavfpIinMolyTUv5k9DUvLoZ-imis,17911 +numpy/lib/stride_tricks.pyi,sha256=0pQ4DP9l6g21q2Ajv6dJFRWMr9auPGTNV9BmZUbogPY,1747 +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=pTTVh8ezp-lwAK3fkgvdKU8Arp5NMKznVD-M6Ex_uA0,341 +numpy/lib/tests/data/py3-objarr.npz,sha256=qQR0gS57e9ta16d_vCQjaaKM74gPdlwCPkp55P-qrdw,449 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=65KXfUUvp8wXSqgQisuYlkhg-qHjBV5FXYetL8Ba-rc,10571 +numpy/lib/tests/test__iotools.py,sha256=HerCqvDE07JxjFQlWEfpZO7lC9z0Sbr3z20GSutoCPs,13743 +numpy/lib/tests/test__version.py,sha256=aO3YgkAohLsLzCNQ7vjIwdpFUMz0cPLbcuuxIkjuN74,1999 +numpy/lib/tests/test_arraypad.py,sha256=obohHbyM0gPYPUkd7iJSOSiDqyqtJsjDNtQX68NC4lM,54830 +numpy/lib/tests/test_arraysetops.py,sha256=5-T1MVhfIMivat8Z47GZw0ZaR811W_FskM1bAXnFyLU,35912 +numpy/lib/tests/test_arrayterator.py,sha256=AYs2SwV5ankgwnvKI9RSO1jZck118nu3SyZ4ngzZNso,1291 +numpy/lib/tests/test_financial_expired.py,sha256=yq5mqGMvqpkiiw9CuZhJgrYa7Squj1mXr_G-IvAFgwI,247 +numpy/lib/tests/test_format.py,sha256=xV0oi1eoRnVwAAhSOcPFQHQWF7TfsROtDYShQLPtdaA,41028 +numpy/lib/tests/test_function_base.py,sha256=tqIwgkd-4uWZlHCQ-7eplJuFXvSD_oWMD8Bibc8-ptk,157726 +numpy/lib/tests/test_histograms.py,sha256=16_XJp-eFgsuM8B4mDQpQ4w_Ib29Hg0EPO-WFsdaFWA,32815 +numpy/lib/tests/test_index_tricks.py,sha256=Vjz25Y6H_ih0iEE2AG0kaxO9U8PwcXSrofzqnN4XBwI,20256 +numpy/lib/tests/test_io.py,sha256=3Tow1pucrQ7z7osNN4a2grBYUoBGNkQEhjmCjXT6Vag,107891 +numpy/lib/tests/test_loadtxt.py,sha256=gwcDJDJmLJRMLpg322yjQ1IzI505w9EqJoq4DmDPCdI,38560 +numpy/lib/tests/test_mixins.py,sha256=Wivwz3XBWsEozGzrzsyyvL3qAuE14t1BHk2LPm9Z9Zc,7030 +numpy/lib/tests/test_nanfunctions.py,sha256=01r_mmTCvKVdZuOGTEHNDZXrMS724us_jwZANzCd74A,47609 +numpy/lib/tests/test_packbits.py,sha256=OWGAd5g5GG0gl7WHqNfwkZ7G-2rrtLt2sI854PG4nnw,17546 +numpy/lib/tests/test_polynomial.py,sha256=URouxJpr8FQ5hiKybqhtOcLA7e-3hj4kWzjLBROByyA,11395 +numpy/lib/tests/test_recfunctions.py,sha256=6jzouPEQ7Uhtj8_-W5yTI6ymNp2nLgmdHzxdd74jVuM,44001 +numpy/lib/tests/test_regression.py,sha256=KzGFkhTcvEG97mymoOQ2hP2CEr2nPZou0Ztf4-WaXCs,8257 +numpy/lib/tests/test_shape_base.py,sha256=2iQCEFR6evVpF8woaenxUOzooHkfuMYkBaUj8ecyJ-E,26817 +numpy/lib/tests/test_stride_tricks.py,sha256=wprpWWH5eq07DY7rzG0WDv5fMtLxzRQz6fm6TZWlScQ,22849 +numpy/lib/tests/test_twodim_base.py,sha256=ll-72RhqCItIPB97nOWhH7H292h4nVIX_w1toKTPMUg,18841 +numpy/lib/tests/test_type_check.py,sha256=lxCH5aApWVYhhSoDQSLDTCHLVHuK2c-jBbnfnZUrOaA,15114 +numpy/lib/tests/test_ufunclike.py,sha256=4hSnXGlSC8HE-_pRRMzD8-HI4hGHqsAWu1pD0o2kPI0,2982 +numpy/lib/tests/test_utils.py,sha256=RVAxrzSFu6N3C4_jIgAlTDOWF_B7wr2v1Y20dX5upYM,6218 +numpy/lib/twodim_base.py,sha256=Mvzn_PyShIb9m7nJjJ4IetdxwmLYEsCPHvJoK7n2viU,32947 +numpy/lib/twodim_base.pyi,sha256=xFRcEVJdDj4mrXW_6iVP1lTMoJx4QJjYRD3o2_9f2eY,5370 +numpy/lib/type_check.py,sha256=_EOtB296nFYlNT7ztBYoC_yK9aycIb0KTmRjvzVdZNg,19954 +numpy/lib/type_check.pyi,sha256=LPvAvIxU-p5i_Qe-ic7hEvo4OTfSrNpplxMG7OAZe8Q,5571 +numpy/lib/ufunclike.py,sha256=_ceBGbGCMOd3u_h2UVzyaRK6ZY7ryoJ0GJB7zqcJG3w,6325 +numpy/lib/ufunclike.pyi,sha256=hLxcYfQprh1tTY_UO2QscA3Hd9Zd7cVGXIINZLhMFqY,1293 +numpy/lib/user_array.py,sha256=LE958--CMkBI2r3l1SQxmCHdCSw6HY6-RhWCnduzGA4,7721 +numpy/lib/utils.py,sha256=6NdleaELZiqARdj-ECZjxtwLf1bqklOcK43m9yoZefs,37804 +numpy/lib/utils.pyi,sha256=mVHVzWuc2-M3Oz60lFsbok0v8LH_HRHMjZpXwrtzF_c,2360 +numpy/linalg/__init__.py,sha256=mpdlEXWtTvpF7In776ONLwp6RIyo4U_GLPT1L1eIJnw,1813 +numpy/linalg/__init__.pyi,sha256=XBy4ocuypsRVflw_mbSTUhR4N5Roemu6w5SfeVwbkAc,620 +numpy/linalg/_umath_linalg.cpython-39-darwin.so,sha256=-MGbUolS7oHftyPyytqwAHb-vDCRZUzBJEiA2Hf-978,173024 +numpy/linalg/lapack_lite.cpython-39-darwin.so,sha256=QXNCouGw61a_Rhc2qk544rZVGxP477S9nR-Z1sALhWI,70640 +numpy/linalg/linalg.py,sha256=kDVK1GBxbUjlRgxXCoEfkRJm8yrNr1Iu7hMn2rKK8RE,90923 +numpy/linalg/linalg.pyi,sha256=zD9U5BUCB1uQggSxfZaTGX_uB2Hkp75sttGmZbCGgBI,7505 +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/test_deprecations.py,sha256=9p_SRmtxj2zc1doY9Ie3dyy5JzWy-tCQWFoajcAJUmM,640 +numpy/linalg/tests/test_linalg.py,sha256=rgvmK6Or70u8mN04puetL3FgSxZ8fJrOlI5ptTgCU5k,78085 +numpy/linalg/tests/test_regression.py,sha256=qbugUmrENybkEaM1GhfA01RXQUy8AkzalbrfzSIgUmM,5434 +numpy/ma/API_CHANGES.txt,sha256=F_4jW8X5cYBbzpcwteymkonTmvzgKKY2kGrHF1AtnrI,3405 +numpy/ma/LICENSE,sha256=BfO4g1GYjs-tEKvpLAxQ5YdcZFLVAJoAhMwpFVH_zKY,1593 +numpy/ma/README.rst,sha256=q-gCsZ4Cw_gUGGvEjog556sJUHIm8WTAwkFK5Qnz9XA,9872 +numpy/ma/__init__.py,sha256=dgP0WdnOpph28Fd6UiqoyDKhfrct0H6QWqbCcETsk6M,1404 +numpy/ma/__init__.pyi,sha256=ppCg_TS0POutNB3moJE4kBabWURnc0WGXyYPquXZxS4,6063 +numpy/ma/core.py,sha256=4MglVRJtmQ9_iIVaQ2b-_Vmw1TjAhEsMJdtKOhyBFXQ,278213 +numpy/ma/core.pyi,sha256=YfgyuBuKxZ5v4I2JxZDvCLhnztOCRgzTeDg-JGTon_M,14305 +numpy/ma/extras.py,sha256=MC7QPS34PC4wxNbOp7pTy57dqF9B-L6L1KMI6rrfe2w,64383 +numpy/ma/extras.pyi,sha256=BBsiCZbaPpGCY506fkmqZdBkJNCXcglc3wcSBuAACNk,2646 +numpy/ma/mrecords.py,sha256=degd6dLaDEvEWNHmvSnUZXos1csIzaqjR_jAutm8JfI,27232 +numpy/ma/mrecords.pyi,sha256=r1a2I662ywnhGS6zvfcyK-9RHVvb4sHxiCx9Dhf5AE4,1934 +numpy/ma/setup.py,sha256=MqmMicr_xHkAGoG-T7NJ4YdUZIJLO4ZFp6AmEJDlyhw,418 +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/test_core.py,sha256=xd5S3oa0jObo8jnsJk0-o46d-KNC3RtgNRKinJeY_kE,215100 +numpy/ma/tests/test_deprecations.py,sha256=nq_wFVt2EBHcT3AHxattfKXx2JDf1K5D-QBzUU0_15A,2566 +numpy/ma/tests/test_extras.py,sha256=lX4cbdGDEXaBHzA3q8hJxve4635XCJw4AP7FO7zhOfk,74858 +numpy/ma/tests/test_mrecords.py,sha256=PsJhUlABgdpSsPUeijonfyFNqz5AfNSGQTtJUte7yts,19890 +numpy/ma/tests/test_old_ma.py,sha256=h4BncexBcBigqvZMA6RjDjpHPurWtt99A7KTag2rmOs,32690 +numpy/ma/tests/test_regression.py,sha256=foMpI0luAvwkkRpAfPDV_810h1URISXDZhmaNhxb50k,3287 +numpy/ma/tests/test_subclassing.py,sha256=HeTIE_n1I8atwzF8tpvNtGHp-0dmM8PT8AS4IDWbcso,16967 +numpy/ma/testutils.py,sha256=RQw0RyS7hOSVTk4KrCGleq0VHlnDqzwwaLtuZbRE4_I,10235 +numpy/ma/timer_comparison.py,sha256=pIGSZG-qYYYlRWSTgzPlyCAINbGKhXrZrDZBBjiM080,15658 +numpy/matlib.py,sha256=-54vTuGIgeTMg9ZUmElRPZ4Hr-XZ-om9xLzAsSoTvnc,10465 +numpy/matrixlib/__init__.py,sha256=BHBpQKoQv4EjT0UpWBA-Ck4L5OsMqTI2IuY24p-ucXk,242 +numpy/matrixlib/__init__.pyi,sha256=-t3ZuvbzRuRwWfZOeN4xlNWdm7gQEprhUsWzu8MRvUE,252 +numpy/matrixlib/defmatrix.py,sha256=JXdJGm1LayOOXfKpp7OVZfb0pzzP4Lwh45sTJrleALc,30656 +numpy/matrixlib/defmatrix.pyi,sha256=lmBMRahKcMOl2PHDo79J67VRAZOkI54BzfDaTLpE0LI,451 +numpy/matrixlib/setup.py,sha256=1r7JRkSM4HyVorgtjoKJGWLcOcPO3wmvivpeEsVtAEg,426 +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/test_defmatrix.py,sha256=8E_-y7VD2vsq1y8CcI8km37pp5qcAtkciO16xqf2UIs,14982 +numpy/matrixlib/tests/test_interaction.py,sha256=PpjmgjEKighDXvt38labKE6L7f2jP74UEmp3JRb_iOY,11875 +numpy/matrixlib/tests/test_masked_matrix.py,sha256=7YO_LCO8DOhW3CuXJuxH93rnmttfvHnU7El-MBzxzFw,8932 +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=ObbSUXU4R2pWajH__xAdizADrU2kBKDDCxkDV-oVBXc,2059 +numpy/matrixlib/tests/test_multiarray.py,sha256=jB3XCBmAtcqf-Wb9PwBW6uIykPpMPthuXLJ0giTKzZE,554 +numpy/matrixlib/tests/test_numeric.py,sha256=MP70qUwgshTtThKZaZDp7_6U-Z66NIV1geVhasGXejQ,441 +numpy/matrixlib/tests/test_regression.py,sha256=8sHDtO8Zi8p3a1eQKEWxtCmKrXmHoD3qxlIokg2AIAU,927 +numpy/polynomial/__init__.py,sha256=braLh6zP2QwuNKRKAaZGdC_qKWZ-tJlc3BN83LeuE_0,6781 +numpy/polynomial/__init__.pyi,sha256=W8szYtVUy0RUi83jmFLK58BN8CKVSoHA2CW7IcdUl1c,701 +numpy/polynomial/_polybase.py,sha256=YEnnQwlTgbn3dyD89ueraUx5nxx3x_pH6K6mmyEmhi8,39271 +numpy/polynomial/_polybase.pyi,sha256=J7yU9PPZW4W8mkqAltDfnL4ZNwljuM-bDEj4DPTJZpY,2321 +numpy/polynomial/chebyshev.py,sha256=NZCKjIblcX99foqZyp51i0_r8p0r1VKVGZFmQ1__kEk,62796 +numpy/polynomial/chebyshev.pyi,sha256=035CNdOas4dnb6lFLzRiBrYT_VnWh2T1-A3ibm_HYkI,1387 +numpy/polynomial/hermite.py,sha256=t5CFM-qE4tszYJiQZ301VcMn7IM67y2rUZPFPtnVRAc,52514 +numpy/polynomial/hermite.pyi,sha256=hdsvTULow8bIjnATudf0i6brpLHV7vbOoHzaMvbjMy0,1217 +numpy/polynomial/hermite_e.py,sha256=jRR3f8Oth8poV2Ix8c0eLEQR3UZary-2RupOrEAEUMY,52642 +numpy/polynomial/hermite_e.pyi,sha256=zV7msb9v9rV0iv_rnD3SjP-TGyc6pd3maCqiPCj3PbA,1238 +numpy/polynomial/laguerre.py,sha256=mcVw0ckWVX-kzJ1QIhdcuuxzPjuFmA3plQLkloQMOYM,50858 +numpy/polynomial/laguerre.pyi,sha256=Gxc9SLISNKMWrKdsVJ9fKFFFwfxxZzfF-Yc-2r__z5M,1178 +numpy/polynomial/legendre.py,sha256=wjtgFajmKEbYkSUk3vWSCveMHDP6UymK28bNUk4Ov0s,51550 +numpy/polynomial/legendre.pyi,sha256=9dmANwkxf7EbOHV3XQBPoaDtc56cCkf75Wo7FG9Zfj4,1178 +numpy/polynomial/polynomial.py,sha256=XsaZPHmLGJFqpJs7rPvO5E0loWQ1L3YHLIUybVu4dU8,49112 +numpy/polynomial/polynomial.pyi,sha256=bOPRnub4xXxsUwNGeiQLTT4PCfN1ysSrf6LBZIcAN2Y,1132 +numpy/polynomial/polyutils.py,sha256=Xy5qjdrjnRaqSlClG1ROmwWccLkAPC7IcHaNJLvhCf4,23237 +numpy/polynomial/polyutils.pyi,sha256=cFAyZ9Xzuw8Huhn9FEz4bhyD00m2Dp-2DiUSyogJwSo,264 +numpy/polynomial/setup.py,sha256=dXQfzVUMP9OcB6iKv5yo1GLEwFB3gJ48phIgo4N-eM0,373 +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/test_chebyshev.py,sha256=6tMsFP1h7K8Zf72mNOta6Tv52_fVTlXknseuffj080c,20522 +numpy/polynomial/tests/test_classes.py,sha256=DFyY2IQBj3r2GZkvbRIeZO2EEY466xbuwc4PShAl4Sw,18331 +numpy/polynomial/tests/test_hermite.py,sha256=N9b2dx2UWPyja5v02dSoWYPnKvb6H-Ozgtrx-xjWz2k,18577 +numpy/polynomial/tests/test_hermite_e.py,sha256=_A3ohAWS4HXrQG06S8L47dImdZGTwYosCXnoyw7L45o,18911 +numpy/polynomial/tests/test_laguerre.py,sha256=BZOgs49VBXOFBepHopxuEDkIROHEvFBfWe4X73UZhn8,17511 +numpy/polynomial/tests/test_legendre.py,sha256=b_bblHs0F_BWw9ESuSq52ZsLKcQKFR5eqPf_SppWFqo,18673 +numpy/polynomial/tests/test_polynomial.py,sha256=4cuO8-5wdIxcz5CrucB5Ix7ySuMROokUF12F7ogQ_hc,20529 +numpy/polynomial/tests/test_polyutils.py,sha256=IxkbVfpcBqe5lOZluHFUPbLATLu1rwVg7ghLASpfYrY,3579 +numpy/polynomial/tests/test_printing.py,sha256=rfP4MaQbjGcO52faHmYrgsaarkm3Ndi3onwr6DDuapE,20525 +numpy/polynomial/tests/test_symbol.py,sha256=msTPv7B1niaKujU33kuZmdxJvLYvOjfl1oykmlL0dXo,5371 +numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/LICENSE.md,sha256=EDFmtiuARDr7nrNIjgUuoGvgz_VmuQjxmeVh_eSa8Z8,3511 +numpy/random/__init__.pxd,sha256=9JbnX540aJNSothGs-7e23ozhilG6U8tINOUEp08M_k,431 +numpy/random/__init__.py,sha256=81Thnexg5umN5WZwD5TRyzNc2Yp-d14B6UC7NBgVKh8,7506 +numpy/random/__init__.pyi,sha256=RfW8mco48UaWDL1UC5ROv9vXiFZ9EGho62avhgEAHPc,2143 +numpy/random/_bounded_integers.cpython-39-darwin.so,sha256=rhNuiVxvBXcii8Dgr09QVC-zQ0QTKSENuPGru_u3dcw,364547 +numpy/random/_bounded_integers.pxd,sha256=hcoucPH5hkFEM2nm12zYO-5O_Rt8RujEXT5YWuAzl1Q,1669 +numpy/random/_common.cpython-39-darwin.so,sha256=31Z9mgOls23mNWxWBjBTn7BHVW6FH3SN_8yvWWdfxp0,250361 +numpy/random/_common.pxd,sha256=s2_IdIQ0MhNbogamulvXe-b93wbx882onmYkxqswwpo,4939 +numpy/random/_examples/cffi/extending.py,sha256=xSla3zWqxi6Hj48EvnYfD3WHfE189VvC4XsKu4_T_Iw,880 +numpy/random/_examples/cffi/parse.py,sha256=Bnb7t_6S_c5-3dZrQ-XX9EazOKhftUfcCejXXWyd1EU,1771 +numpy/random/_examples/cython/extending.pyx,sha256=4IE692pq1V53UhPZqQiQGcIHXDoNyqTx62x5a36puVg,2290 +numpy/random/_examples/cython/extending_distributions.pyx,sha256=oazFVWeemfE0eDzax7r7MMHNL1_Yofws2m-c_KT2Hbo,3870 +numpy/random/_examples/cython/meson.build,sha256=rXtugURMEo-ef4bPE1QIv4mzvWbeGjmcTdKCBvjxjtw,1443 +numpy/random/_examples/numba/extending.py,sha256=Ipyzel_h5iU_DMJ_vnXUgQC38uMDMn7adUpWSeEQLFE,1957 +numpy/random/_examples/numba/extending_distributions.py,sha256=Jnr9aWkHyIWygNbdae32GVURK-5T9BTGhuExRpvve98,2034 +numpy/random/_generator.cpython-39-darwin.so,sha256=y7IzEwCMs7A9UAKvASIXmHfV5Jaxcha5x0pCkojHjtU,826716 +numpy/random/_generator.pyi,sha256=zRvo_y6g0pWkE4fO1M9jLYUkxDfGdA6Enreb3U2AADM,22442 +numpy/random/_mt19937.cpython-39-darwin.so,sha256=JJfUmSE_c83w3_N4BuZI1RReDSFujr4GDHSqgJcKLxc,128858 +numpy/random/_mt19937.pyi,sha256=_iZKaAmuKBQ4itSggfQvYYj_KjktcN4rt-YpE6bqFAM,724 +numpy/random/_pcg64.cpython-39-darwin.so,sha256=CjxI50x3msCGiqcvb0K4lytZP4dgGeWlppK51_h-7pU,129368 +numpy/random/_pcg64.pyi,sha256=uxr5CbEJetN6lv9vBG21jlRhuzOK8SQnXrwqAQBxj_c,1091 +numpy/random/_philox.cpython-39-darwin.so,sha256=PacLVgiPZbV5HHZH6afxJh2p1M8HprMF1HFTpkCSuIw,111785 +numpy/random/_philox.pyi,sha256=OKlaiIU-hj72Bp04zjNifwusOD_3-mYxIfvyuys8c_o,978 +numpy/random/_pickle.py,sha256=4NhdT-yk7C0m3tyZWmouYAs3ZGNPdPVNGfUIyuh8HDY,2318 +numpy/random/_sfc64.cpython-39-darwin.so,sha256=uOdk_3Pbxfa205-mvVx5Dv_IoOwLitf6F5SN22I-JDE,93560 +numpy/random/_sfc64.pyi,sha256=09afHTedVW-519493ZXtGcl-H-_zluj-B_yfEJG8MMs,709 +numpy/random/bit_generator.cpython-39-darwin.so,sha256=JcqhWaDg_XhYxwoK-ot935hhksL_M97-maiYrB-M78Y,207807 +numpy/random/bit_generator.pxd,sha256=lArpIXSgTwVnJMYc4XX0NGxegXq3h_QsUDK6qeZKbNc,1007 +numpy/random/bit_generator.pyi,sha256=aXv7a_hwa0nkjY8P2YENslwWp89UcFRn09woXh7Uoc0,3510 +numpy/random/c_distributions.pxd,sha256=7DE-mV3H_Dihk4OK4gMHHkyD4tPX1cAi4570zi5CI30,6344 +numpy/random/lib/libnpyrandom.a,sha256=6k63VU_Y0u0yBJwCu1cwG0rpb18fWGOikc-dD7AHXsA,54544 +numpy/random/mtrand.cpython-39-darwin.so,sha256=Px2nQM9DNgJYIPPGCxu0DWq5-lrlCK47mnHry9dQRc4,700200 +numpy/random/mtrand.pyi,sha256=3vAGOXsvyFFv0yZl34pVVPP7Dgt22COyfn4tUoi_hEQ,19753 +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/mt19937-testset-1.csv,sha256=Xkef402AVB-eZgYQkVtoxERHkxffCA9Jyt_oMbtJGwY,15844 +numpy/random/tests/data/mt19937-testset-2.csv,sha256=nsBEQNnff-aFjHYK4thjvUK4xSXDSfv5aTbcE59pOkE,15825 +numpy/random/tests/data/pcg64-testset-1.csv,sha256=xB00DpknGUTTCxDr9L6aNo9Hs-sfzEMbUSS4t11TTfE,23839 +numpy/random/tests/data/pcg64-testset-2.csv,sha256=NTdzTKvG2U7_WyU_IoQUtMzU3kEvDH39CgnR6VzhTkw,23845 +numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=vNSUT-gXS_oEw_awR3O30ziVO4seNPUv1UIZ01SfVnI,23833 +numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=uylS8PU2AIKZ185OC04RBr_OePweGRtvn-dE4YN0yYA,23839 +numpy/random/tests/data/philox-testset-1.csv,sha256=SedRaIy5zFadmk71nKrGxCFZ6BwKz8g1A9-OZp3IkkY,23852 +numpy/random/tests/data/philox-testset-2.csv,sha256=dWECt-sbfvaSiK8-Ygp5AqyjoN5i26VEOrXqg01rk3g,23838 +numpy/random/tests/data/sfc64-testset-1.csv,sha256=iHs6iX6KR8bxGwKk-3tedAdMPz6ZW8slDSUECkAqC8Q,23840 +numpy/random/tests/data/sfc64-testset-2.csv,sha256=FIDIDFCaPZfWUSxsJMAe58hPNmMrU27kCd9FhCEYt_k,23833 +numpy/random/tests/test_direct.py,sha256=6vLpCyeKnAWFEZei7l2YihVLQ0rSewO1hJBWt7A5fyQ,17779 +numpy/random/tests/test_extending.py,sha256=S3Wrzu3di4uBhr-Pxnx5dOPvlBY0FRdZqVX6CC1IN6s,4038 +numpy/random/tests/test_generator_mt19937.py,sha256=35LBwV6TtWPnxhefutxTQmhLzAQ5Ee4YiY8ziDXM-eQ,115477 +numpy/random/tests/test_generator_mt19937_regressions.py,sha256=xGkdz76BMX1EK0QPfabVxpNx9qQ9OC-1ZStWOs6N_M8,6387 +numpy/random/tests/test_random.py,sha256=kEkQs3i7zcpm9MozIRIz1FIx5B6fmXk0QqX0l6l-u_Y,70087 +numpy/random/tests/test_randomstate.py,sha256=DxF7rMUSxaAlL4h1qC3onHcHR7T_6rKWPbr0nJH84nE,85031 +numpy/random/tests/test_randomstate_regression.py,sha256=VucYWIjA7sAquWsalvZMnfkmYLM1O6ysyWnLl931-lA,7917 +numpy/random/tests/test_regression.py,sha256=trntK51UvajOVELiluEO85l64CKSw5nvBSc5SqYyr9w,5439 +numpy/random/tests/test_seed_sequence.py,sha256=GNRJ4jyzrtfolOND3gUWamnbvK6-b_p1bBK_RIG0sfU,3311 +numpy/random/tests/test_smoke.py,sha256=jjNz0aEGD1_oQl9a9UWt6Mz_298alG7KryLT1pgHljw,28183 +numpy/testing/__init__.py,sha256=InpVKoDAzMKO_l_HNcatziW_u1k9_JZze__t2nybrL0,595 +numpy/testing/__init__.pyi,sha256=AhK5NuOpdD-JjIzXOlssE8_iSLyFAAHzyGV_w1BT7vA,1674 +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/extbuild.py,sha256=nG2dwP4nUmQS3e5eIRinxt0s_f4sxxA1YfohCg-navo,8017 +numpy/testing/_private/utils.py,sha256=3FrSTMi0OdpDODBDoncgiDQzdo5NKA6YVfQ3uKRSQnc,85242 +numpy/testing/_private/utils.pyi,sha256=MMNrvwEeSTYzZFWawSSzHnTFYG-cSAIiID-1FuJ1f8U,10123 +numpy/testing/overrides.py,sha256=u6fcKSBC8HIzMPWKAbdyowU71h2Fx2ekDQxpG5NhIr8,2123 +numpy/testing/print_coercion_tables.py,sha256=ndxOsS4XfrZ4UY_9nqRTCnxhkzgdqcuUHL8nezd7Op4,6180 +numpy/testing/setup.py,sha256=GPKAtTTBRsNW4kmR7NjP6mmBR_GTdpaTvkTm10_VcLg,709 +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/test_utils.py,sha256=IDOr-GXuNGlrsb-XzGSYUHXEqcGYJ78p60jOpBqyPM4,55740 +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/test__all__.py,sha256=L3mCnYPTpzAgNfedVuq9g7xPWbc0c1Pot94k9jZ9NpI,221 +numpy/tests/test_ctypeslib.py,sha256=B06QKeFRgDIEbkEPBy_zYA1H5E2exuhTi7IDkzV8gfo,12257 +numpy/tests/test_lazyloading.py,sha256=YETrYiDLAqLX04K_u5_3NVxAfxDoeguxwkIRfz6qKcY,1162 +numpy/tests/test_matlib.py,sha256=gwhIXrJJo9DiecaGLCHLJBjhx2nVGl6yHq80AOUQSRM,1852 +numpy/tests/test_numpy_config.py,sha256=qHvepgi9oyAbQuZD06k7hpcCC2MYhdzcY6D1iQDPNMI,1241 +numpy/tests/test_numpy_version.py,sha256=A8cXFzp4k-p6J5zkOxlDfDvkoFMxDW2hpTFVXcaQRVo,1479 +numpy/tests/test_public_api.py,sha256=DTq7SO84uBjC2tKPoqX17xazc-SLkTAbQ2fLZwGM2jc,18170 +numpy/tests/test_reloading.py,sha256=QuVaPQulcNLg4Fl31Lw-O89L42KclYCK68n5GVy0PNQ,2354 +numpy/tests/test_scripts.py,sha256=jluCLfG94VM1cuX-5RcLFBli_yaJZpIvmVuMxRKRJrc,1645 +numpy/tests/test_warnings.py,sha256=R7UQzoFqp8F3nIaQBdRWiYk5tfM5HtAE_ZMFRxSUF98,2397 +numpy/typing/__init__.py,sha256=VoTILNDrUWvZx0LK9_97lBLQFKtSGmDt4QLOH8zYvlo,5234 +numpy/typing/mypy_plugin.py,sha256=24zVk4Ei3qH4Hc3SSz3v0XtIsycTo8HKoY6ilhB_7AQ,6376 +numpy/typing/setup.py,sha256=Cnz9q53w-vJNyE6vYxqYvQXx0pJbrG9quHyz9sqxfek,374 +numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/typing/tests/data/fail/arithmetic.pyi,sha256=4rY_ASCERAl8WCus1RakOe0Aw-8vvjilL29mgdD4lv0,3850 +numpy/typing/tests/data/fail/array_constructors.pyi,sha256=X9y_jUYS17WfYmXW5NwkVudyiR6ouUaAwEh0JRte42o,1089 +numpy/typing/tests/data/fail/array_like.pyi,sha256=OVAlEJZ5k8ZRKt0aGpZQwIjlUGpy0PzOOYqfI-IMqBQ,455 +numpy/typing/tests/data/fail/array_pad.pyi,sha256=57oK0Yp53rtKjjIrRFYLcxa-IfIGhtI-bEem7ggJKwI,132 +numpy/typing/tests/data/fail/arrayprint.pyi,sha256=-Fs9VnQfxyfak008Hq8kJWfB0snA6jGDXZz8ljQnwGE,549 +numpy/typing/tests/data/fail/arrayterator.pyi,sha256=FoU4ahHkJZ67dwWXer5FXLjjjesKKg-w2Jq1X1bHymA,480 +numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=GN9dVqk4_HFXn7zbRrHzJq_UGRFBccoYVUG1UuE7bXs,515 +numpy/typing/tests/data/fail/char.pyi,sha256=-vgN6EmfQ8VaA4SOZ5Ol9u4-Z7Q5I7G78LmaxZOuZ90,2615 +numpy/typing/tests/data/fail/chararray.pyi,sha256=jrNryZFpr8nxG2IHb9e0x3ranpvJpBy_RDex-WpT5rU,2296 +numpy/typing/tests/data/fail/comparisons.pyi,sha256=U4neWzwwtxG6QXsKlNGJuKXHBtwzYBQOa47_7SKF5Wg,888 +numpy/typing/tests/data/fail/constants.pyi,sha256=YSqNbXdhbdMmYbs7ntH0FCKbnm8IFeqsDlZBqcU43iw,286 +numpy/typing/tests/data/fail/datasource.pyi,sha256=PRT2hixR-mVxr2UILvHa99Dr54EF2h3snJXE-v3rWcc,395 +numpy/typing/tests/data/fail/dtype.pyi,sha256=OAGABqdXNB8gClJFEGMckoycuZcIasMaAlS2RkiKROI,334 +numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=RS7GZqUCT_vEFJoyUx4gZlPO8GNFFNFWidxl-wLyRv0,539 +numpy/typing/tests/data/fail/false_positives.pyi,sha256=Q61qMsSsNCtmO0EMRxHj5Z7RYTyrELVpkzfJY5eK8Z0,366 +numpy/typing/tests/data/fail/flatiter.pyi,sha256=qLM4qm7gvJtEZ0rTHcyasUzoP5JbX4FREtqV3g1w6Lo,843 +numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=FH2mjkgtCbA9soqlJRhYN7IIfRRrUL1i9mwqcbYKZSc,5591 +numpy/typing/tests/data/fail/histograms.pyi,sha256=yAPVt0rYTwtxnigoGT-u7hhKCE9iYxsXc24x2HGBrmA,367 +numpy/typing/tests/data/fail/index_tricks.pyi,sha256=moINir9iQoi6Q1ZuVg5BuSB9hSBtbg_uzv-Qm_lLYZk,509 +numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=6y9T773CBLX-jUry1sCQGVuKVKM2wMuQ56Ni5V5j4Dw,2081 +numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=Ur7Y4iZX6WmoH5SDm0ePi8C8LPsuPs2Yr7g7P5O613g,899 +numpy/typing/tests/data/fail/lib_utils.pyi,sha256=VFpE6_DisvlDByyp1PiNPJEe5IcZp8cH0FlAJyoZipo,276 +numpy/typing/tests/data/fail/lib_version.pyi,sha256=7-ZJDZwDcB-wzpMN8TeYtZAgaqc7xnQ8Dnx2ISiX2Ts,158 +numpy/typing/tests/data/fail/linalg.pyi,sha256=yDd05aK1dI37RPt3pD2eJYo4dZFaT2yB1PEu3K0y9Tg,1322 +numpy/typing/tests/data/fail/memmap.pyi,sha256=HSTCQYNuW1Y6X1Woj361pN4rusSPs4oDCXywqk20yUo,159 +numpy/typing/tests/data/fail/modules.pyi,sha256=_ek4zKcdP-sIh_f-IDY0tP-RbLORKCSWelM9AOYxsyA,670 +numpy/typing/tests/data/fail/multiarray.pyi,sha256=XCdBxufNhR8ZtG8UMzk8nt9_NC5gJTKP9-xTqKO_K9I,1693 +numpy/typing/tests/data/fail/ndarray.pyi,sha256=YnjXy16RHs_esKelMjB07865CQ7gLyQnXhnitq5Kv5c,405 +numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=w-10xTDDWoff9Lq0dBO-jBeiBR-XjCz2qmes0dLx238,1372 +numpy/typing/tests/data/fail/nditer.pyi,sha256=w7emjnOxnf3NcvLktNLlke6Cuivn2gU3sVmGCfbG6rw,325 +numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=em4GZwLDFE0QSxxg081wVwhh-Dmtkn8f7wThI0DiXVs,427 +numpy/typing/tests/data/fail/npyio.pyi,sha256=56QuHo9SvVR3Uhzl6gQZncCpX575Gy5wugjMICh20m0,620 +numpy/typing/tests/data/fail/numerictypes.pyi,sha256=fevH9x80CafYkiyBJ7LMLVl6GyTvQrZ34trBu6O8TtM,276 +numpy/typing/tests/data/fail/random.pyi,sha256=p5WsUGyOL-MGIeALh9Y0dVhYSRQLaUwMdjXc3G6C_7Q,2830 +numpy/typing/tests/data/fail/rec.pyi,sha256=Ws3TyesnoQjt7Q0wwtpShRDJmZCs2jjP17buFMomVGA,704 +numpy/typing/tests/data/fail/scalars.pyi,sha256=o91BwSfzPTczYVtbXsirqQUoUoYP1C_msGjc2GYsV04,2952 +numpy/typing/tests/data/fail/shape_base.pyi,sha256=Y_f4buHtX2Q2ZA4kaDTyR8LErlPXTzCB_-jBoScGh_Q,152 +numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=IjA0Xrnx0lG3m07d1Hjbhtyo1Te5cXgjgr5fLUo4LYQ,315 +numpy/typing/tests/data/fail/testing.pyi,sha256=e7b5GKTWCtKGoB8z2a8edsW0Xjl1rMheALsvzEJjlCw,1370 +numpy/typing/tests/data/fail/twodim_base.pyi,sha256=ZqbRJfy5S_pW3fFLuomy4L5SBNqj6Nklexg9KDTo65c,899 +numpy/typing/tests/data/fail/type_check.pyi,sha256=CIyI0j0Buxv0QgCvNG2urjaKpoIZ-ZNawC2m6NzGlbo,379 +numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=ukA0xwfJHLoGfoOIpWIN-91wj-DG8oaIjYbO72ymjg4,733 +numpy/typing/tests/data/fail/ufunclike.pyi,sha256=lbxjJyfARmt_QK1HxhxFxvwQTqCEZwJ9I53Wp8X3KIY,679 +numpy/typing/tests/data/fail/ufuncs.pyi,sha256=YaDTL7QLmGSUxE6JVMzpOlZTjHWrgbOo0UIlkX-6ZQk,1347 +numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=PrbYDFI7IGN3Gf0OPBkVfefzQs4AXHwDQ495pvrX3RY,174 +numpy/typing/tests/data/misc/extended_precision.pyi,sha256=bS8bBeCFqjgtOiy-8_y39wfa7rwhdjLz2Vmo-RXAYD4,884 +numpy/typing/tests/data/mypy.ini,sha256=Ynv1VSx_kXTD2mFC3ZpgEFuCOg1F2VJXxPk0dxUnF2M,108 +numpy/typing/tests/data/pass/arithmetic.py,sha256=2z3dmuysQQmiPz8x0bg8SOOKW62mVJn97uMa9T0L7Vk,7455 +numpy/typing/tests/data/pass/array_constructors.py,sha256=3GrhfBcmWX53pJHD0NvhXjwr2-uNKREbR1I9WCcZ7rI,2419 +numpy/typing/tests/data/pass/array_like.py,sha256=ce_IVubBd7J6FkSpJmD7qMlRLuwmiidhOqhYfZb16Wo,916 +numpy/typing/tests/data/pass/arrayprint.py,sha256=y_KkuLz1uM7pv53qfq7GQOuud4LoXE3apK1wtARdVyM,766 +numpy/typing/tests/data/pass/arrayterator.py,sha256=FqcpKdUQBQ0FazHFxr9MsLEZG-jnJVGKWZX2owRr4DQ,393 +numpy/typing/tests/data/pass/bitwise_ops.py,sha256=UnmxVr9HwI8ifdrutGm_u3EZU4iOOPQhrOku7hTaH0c,970 +numpy/typing/tests/data/pass/comparisons.py,sha256=nTE-fvraLK6xTZcP4uPV02wOShzYKWDaoapx35AeDOY,2992 +numpy/typing/tests/data/pass/dtype.py,sha256=MqDKC6Ywv6jNkWsR8rdLuabzHUco5w1OylDHEdxve_I,1069 +numpy/typing/tests/data/pass/einsumfunc.py,sha256=eXj5L5MWPtQHgrHPsJ36qqrmBHqct9UoujjJCvHnF1k,1370 +numpy/typing/tests/data/pass/flatiter.py,sha256=0BnbuLMBC7MQlprNZ0QhNSscfYwPhEhXOhWoyiRACWU,174 +numpy/typing/tests/data/pass/fromnumeric.py,sha256=Xd_nJVVDoONdztUX8ddgo7EXJ2FD8AX51MO_Yujnmog,3742 +numpy/typing/tests/data/pass/index_tricks.py,sha256=oaFD9vY01_RI5OkrXt-xTk1n_dd-SpuPp-eZ58XR3c8,1492 +numpy/typing/tests/data/pass/lib_utils.py,sha256=sDQCjHVGUwct0RQqAtH5_16y241siSY4bXKZRsuJ8xA,434 +numpy/typing/tests/data/pass/lib_version.py,sha256=HnuGOx7tQA_bcxFIJ3dRoMAR0fockxg4lGqQ4g7LGIw,299 +numpy/typing/tests/data/pass/literal.py,sha256=DLzdWHD6ttW4S0NEvGQbsH_UEJjhZyhvO4OXJjoyvZQ,1331 +numpy/typing/tests/data/pass/mod.py,sha256=HB9aK4_wGJbc44tomaoroNy0foIL5cI9KIjknvMTbkk,1578 +numpy/typing/tests/data/pass/modules.py,sha256=t0KJxYWbrWd7HbbgIDFb3LAhJBiNNb6QPjjFDAgC2mU,576 +numpy/typing/tests/data/pass/multiarray.py,sha256=MxHax6l94yqlTVZleAqG77ILEbW6wU5osPcHzxJ85ns,1331 +numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=yPgzXG6paY1uF_z-QyHYrcmrZvhX7qtvTUh7ANLseCA,1626 +numpy/typing/tests/data/pass/ndarray_misc.py,sha256=z3mucbn9fLM1gxmbUhWlp2lcrOv4zFjqZFze0caE2EA,2715 +numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=37eYwMNqMLwanIW9-63hrokacnSz2K_qtPUlkdpsTjo,640 +numpy/typing/tests/data/pass/numeric.py,sha256=SdnsD5zv0wm8T2hnIylyS14ig2McSz6rG9YslckbNQ4,1490 +numpy/typing/tests/data/pass/numerictypes.py,sha256=r0_s-a0-H2MdWIn4U4P6W9RQO0V1xrDusgodHNZeIYM,750 +numpy/typing/tests/data/pass/random.py,sha256=uJCnzlsOn9hr_G1TpHLdsweJI4EdhUSEQ4dxROPjqAs,61881 +numpy/typing/tests/data/pass/scalars.py,sha256=En0adCZAwEigZrzdQ0JQwDEmrS0b-DMd1vvjkFcvwo8,3479 +numpy/typing/tests/data/pass/simple.py,sha256=HmAfCOdZBWQF211YaZFrIGisMgu5FzTELApKny08n3Y,2676 +numpy/typing/tests/data/pass/simple_py3.py,sha256=HuLrc5aphThQkLjU2_19KgGFaXwKOfSzXe0p2xMm8ZI,96 +numpy/typing/tests/data/pass/ufunc_config.py,sha256=_M8v-QWAeT1-2MkfSeAbNl_ZwyPvYfPTsLl6c1X8d_w,1204 +numpy/typing/tests/data/pass/ufunclike.py,sha256=Gve6cJ2AT3TAwOjUOQQDIUnqsRCGYq70_tv_sgODiiA,1039 +numpy/typing/tests/data/pass/ufuncs.py,sha256=xGuKuqPetUTS4io5YDHaki5nbYRu-wC29SGU32tzVIg,462 +numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=Pcg-QWfY4PAhTKyehae8q6LhtbUABxa2Ye63-3h1f4w,150 +numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=Ndmi_IFAl8z28RHsYTbOouf-B5FH91x_9ky-JwsdXVg,19765 +numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=DcT8Z2rEpqYfjXySBejk8cGOUidUmizZGE5ZEy7r14E,10600 +numpy/typing/tests/data/reveal/arraypad.pyi,sha256=Q1pcU4B3eRsw5jsv-S0MsEfNUbp_4aMdO_o3n0rtA2A,776 +numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=YyzzkL-wj4Rs-fdo3brpoaWtb5g3yk4Vn2HKu5KRo4w,876 +numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=ApCFQcZzQ08zV32SJ86Xyv_7jazl3XKMmJmULtNquJ8,4155 +numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=TF_1eneHoT0v9HqS9dKc5Xiv3iY3E330GR1RNcJ7s2Q,1111 +numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=nRkyUGrBB_Es7TKyDxS_s3u2dFgBfzjocInI9Ea-J10,3919 +numpy/typing/tests/data/reveal/char.pyi,sha256=M_iTa9Pn8F7jQ1k6RN9KvbhEn00g7UYJZ5PV57ikcZM,7289 +numpy/typing/tests/data/reveal/chararray.pyi,sha256=O0EfwnKc3W1Fnx1c7Yotb1O84kVMuqJLlMBXd2duvjI,6093 +numpy/typing/tests/data/reveal/comparisons.pyi,sha256=huaf-seaF5ndTqfoaBfPtMMkOYovq7ibJl5-CRoQW7s,7468 +numpy/typing/tests/data/reveal/constants.pyi,sha256=P9vFEMkPpJ5KeUnzqPOuyHlh3zAFl9lzB4WxyB2od7A,1949 +numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=-Pk2rLEGCzz3B_y8Mu10JSVA8gPFztl5fV1dspPzqig,4727 +numpy/typing/tests/data/reveal/datasource.pyi,sha256=e8wjn60tO5EdnkBF34JrZT5XvdyW7kRWD2abtgr6qUg,671 +numpy/typing/tests/data/reveal/dtype.pyi,sha256=TKrYyxMu5IGobs0SDTIRcPuWsZ5X7zMYB4pmUlTTJxA,2872 +numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=pbtSfzIWUJRkDpe2riHBlvFlNSC3CqVM-SbYtBgX9H0,2044 +numpy/typing/tests/data/reveal/emath.pyi,sha256=-muNpWOv_niIn-zS3gUnFO4qBZAouNlVGue2x1L5Ris,2423 +numpy/typing/tests/data/reveal/false_positives.pyi,sha256=AplTmZV7TS7nivU8vegbstMN5MdMv4U0JJdZ4IeeA5M,482 +numpy/typing/tests/data/reveal/fft.pyi,sha256=ReQ9qn5frvJEy-g0RWpUGlPBntUS1cFSIu6WfPotHzE,1749 +numpy/typing/tests/data/reveal/flatiter.pyi,sha256=e1OQsVxQpgyfqMNw2puUTATl-w3swvdknlctAiWxf_E,882 +numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=PNtGQR1VmGk_xNbd0eP7k7B2oNCMBz2XOJ17-_SdE5M,12101 +numpy/typing/tests/data/reveal/getlimits.pyi,sha256=nUGOMFpWj3pMgqLy6ZbR7A4G2q7iLIl5zEFBGf-Qcfw,1592 +numpy/typing/tests/data/reveal/histograms.pyi,sha256=MxKWoa7UoJRRLim53H6OoyYfz87P3_9YUXGYPTknGVQ,1303 +numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=HpD7lU7hcyDoLdZbeqskPXnX7KYwPtll7uJKYUzrlE8,3177 +numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=eSiSZUlmPXqVPKknM7GcEv76BDgj0IJRu3FXcZXpmqc,8318 +numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=TOzOdMPDqveDv3vDKSjtq6RRvN-j_s2J7aud2ySDAB0,5986 +numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=_zj7WGYGYMFXAHLK-F11aeFfDvjRvFARUjoXhbXn8V0,1049 +numpy/typing/tests/data/reveal/lib_version.pyi,sha256=UCioUeykot8-nWL6goKxZnKZxtgB4lFEi9wdN_xyF1U,672 +numpy/typing/tests/data/reveal/linalg.pyi,sha256=LPaY-RyYL7Xt3djCgNaWEgI8beI9Eo_XnvOwi6Y7-eo,4877 +numpy/typing/tests/data/reveal/matrix.pyi,sha256=ciJXsn5v2O1IZ3VEn5Ilp8-40NTQokfrOOgVXMFsvLo,2922 +numpy/typing/tests/data/reveal/memmap.pyi,sha256=A5PovMzjRp2zslF1vw3TdTQjj4Y0dIEJ__HDBV_svGM,842 +numpy/typing/tests/data/reveal/mod.pyi,sha256=-CNWft2jQGSdrO8dYRgwbl7OhL3a78Zo60JVmiY-gQI,5666 +numpy/typing/tests/data/reveal/modules.pyi,sha256=0WPq7A-aqWkJsV-IA1_7dFNCcxBacj1AWExaXbXErG4,1958 +numpy/typing/tests/data/reveal/multiarray.pyi,sha256=6MvfNKihK-oN6QwG9HFNelgheo4lnL0FCrmIF_qxdoA,5326 +numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=DRUMGatQvQXTuovKEMF4dzazIU6it6FU53LkOEo2vNo,657 +numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=BfjQD8U756l4gOfY0LD47HhDRxbq0yCFfEFKvbXs7Rs,1791 +numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=0EN-a47Msn4pZgKVdD-GrXCCmt-oxjlov5rszchBmOI,7126 +numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=QDQ9g6l-e73pTJp-Dosiynb-okbqi91D4KirjhIjcv4,1233 +numpy/typing/tests/data/reveal/nditer.pyi,sha256=VFXnT75BgWSUpb-dD-q5cZkfeOqsk-x9cH626g9FWT4,2021 +numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=IQyRlXduk-ZEakOtoliMLCqNgGbeg0mzZf-a-a3Gq_0,734 +numpy/typing/tests/data/reveal/npyio.pyi,sha256=YXagt2J-1suu5WXZ_si5NuJf7sHj_7NlaSLqQkam1Po,4209 +numpy/typing/tests/data/reveal/numeric.pyi,sha256=aJKnav-X45tjSFfgGD4iCetwEFcJXdNgU7valktjiCg,6160 +numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=-YQRhwjBjsFJHjpGCRqzafNnKDdsmbBHbmPwccP0pLI,2487 +numpy/typing/tests/data/reveal/random.pyi,sha256=s6T074ZIpGAUqHnA-yAlozTLvt7PNBjCBqd-nGMqWGg,104091 +numpy/typing/tests/data/reveal/rec.pyi,sha256=DbRVk6lc7-3qPe-7Q26tUWpdaH9B4UVoQSYrRGJUo1Q,3858 +numpy/typing/tests/data/reveal/scalars.pyi,sha256=Qn3B3rsqSN397Jh25xs4odt2pfCQtWkoJe-e0-oX8d4,4790 +numpy/typing/tests/data/reveal/shape_base.pyi,sha256=YjiVukrK6OOydvopOaOmeAIIa0YQ2hn9_I_-FyYkHVU,2427 +numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=EBZR8gSP385nhotwJ3GH9DOUD2q5nUEYbXfhLo5xrPo,1542 +numpy/typing/tests/data/reveal/testing.pyi,sha256=_WOAj_t5SWYiqN0KG26Mza8RvaD3WAa7rFUlgksjLms,8611 +numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=ZdNVo2HIJcx8iF9PA-z5W3Bs0hWM2nlVdbhLuAQlljM,3132 +numpy/typing/tests/data/reveal/type_check.pyi,sha256=yZSp50TtvPqv_PN7zmVcNOVUTUXMNYFGcguMNj25E9Y,3044 +numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=buwSvat3SVFAFl5k8TL6Mgpi32o6hHZYZ2Lpn6AHdEU,1327 +numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=V_gLcZVrTXJ21VkUMwA0HyxUgA1r6OzjsdJegaKL2GE,1329 +numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=VnwYr5KT_FLKfc0wV7dtNz7bNtaC9VIQt-oz56Hb5EE,2798 +numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=ImMlPt2PQBtX8Qf1EZFmLjNWm8fPE6IWQ_deaq_-85s,538 +numpy/typing/tests/test_isfile.py,sha256=BhKZs4-LrhFUfKjcG0yelySjE6ZITMxGIBYWGDHMRb8,864 +numpy/typing/tests/test_runtime.py,sha256=2qu8JEliITnZCBJ_QJpohacj_OQ08o73ixS2w2ooNXI,3275 +numpy/typing/tests/test_typing.py,sha256=t3ENN-poI6vik_0_3KJk4upn2Kqi2EoG5gqu_DZG2_k,8822 +numpy/version.py,sha256=e2TyYD0sabXwK2qHO1DyPfnPg0DDt3eO_J89yWIFxHc,216 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/WHEEL b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/WHEEL new file mode 100644 index 00000000..7991c975 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_11_0_arm64 \ No newline at end of file diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/entry_points.txt b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/entry_points.txt new file mode 100644 index 00000000..450d8ef2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy-1.26.3.dist-info/entry_points.txt @@ -0,0 +1,9 @@ +[array_api] +numpy = numpy.array_api + +[pyinstaller40] +hook-dirs = numpy:_pyinstaller_hooks_dir + +[console_scripts] +f2py = numpy.f2py.f2py2e:main + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libgcc_s.1.1.dylib b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libgcc_s.1.1.dylib new file mode 100755 index 00000000..71c96c0a Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libgcc_s.1.1.dylib differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libgfortran.5.dylib b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libgfortran.5.dylib new file mode 100755 index 00000000..fb1257a9 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libgfortran.5.dylib differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libopenblas64_.0.dylib b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libopenblas64_.0.dylib new file mode 100755 index 00000000..f026af7e Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libopenblas64_.0.dylib differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libquadmath.0.dylib b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libquadmath.0.dylib new file mode 100755 index 00000000..0955f0ab Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/.dylibs/libquadmath.0.dylib differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/__config__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/__config__.py new file mode 100644 index 00000000..00814540 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/__config__.py @@ -0,0 +1,156 @@ +# This file is generated by numpy's build process +# It contains system_info results at the time of building this package. +from enum import Enum +from numpy.core._multiarray_umath import ( + __cpu_features__, + __cpu_baseline__, + __cpu_dispatch__, +) + +__all__ = ["show"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)} + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "clang", + "linker": "ld64", + "version": "14.0.0", + "commands": "cc", + }, + "cython": { + "name": "cython", + "linker": "cython", + "version": "3.0.7", + "commands": "cython", + }, + "c++": { + "name": "clang", + "linker": "ld64", + "version": "14.0.0", + "commands": "c++", + }, + }, + "Machine Information": { + "host": { + "cpu": "aarch64", + "family": "aarch64", + "endian": "little", + "system": "darwin", + }, + "build": { + "cpu": "aarch64", + "family": "aarch64", + "endian": "little", + "system": "darwin", + }, + "cross-compiled": bool("False".lower().replace("false", "")), + }, + "Build Dependencies": { + "blas": { + "name": "openblas64", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.23.dev", + "detection method": "pkgconfig", + "include directory": r"/opt/arm64-builds/include", + "lib directory": r"/opt/arm64-builds/lib", + "openblas configuration": "USE_64BITINT=1 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS= NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= SANDYBRIDGE MAX_THREADS=3", + "pc file directory": r"/usr/local/lib/pkgconfig", + }, + "lapack": { + "name": "dep4409210864", + "found": bool("True".lower().replace("false", "")), + "version": "1.26.3", + "detection method": "internal", + "include directory": r"unknown", + "lib directory": r"unknown", + "openblas configuration": "unknown", + "pc file directory": r"unknown", + }, + }, + "Python Information": { + "path": r"/private/var/folders/76/zy5ktkns50v6gt5g8r0sf6sc0000gn/T/cibw-run-8524sa_l/cp39-macosx_arm64/build/venv/bin/python", + "version": "3.9", + }, + "SIMD Extensions": { + "baseline": __cpu_baseline__, + "found": [ + feature for feature in __cpu_dispatch__ if __cpu_features__[feature] + ], + "not found": [ + feature for feature in __cpu_dispatch__ if not __cpu_features__[feature] + ], + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which NumPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd new file mode 100644 index 00000000..1409514f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd @@ -0,0 +1,1050 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + +cdef extern from "numpy/arrayobject.h": + ctypedef Py_intptr_t npy_intp + ctypedef size_t npy_uintp + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS + + npy_intp NPY_MAX_ELSIZE + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags + cdef int type_num + cdef int itemsize "elsize" + cdef int alignment + cdef object fields + cdef tuple names + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + float real + float imag + + ctypedef struct npy_cdouble: + double real + double imag + + ctypedef struct npy_clongdouble: + long double real + long double imag + + ctypedef struct npy_complex64: + float real + float imag + + ctypedef struct npy_complex128: + double real + double imag + + ctypedef struct npy_complex160: + long double real + long double imag + + ctypedef struct npy_complex192: + long double real + long double imag + + ctypedef struct npy_complex256: + long double real + long double imag + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISPYTHON(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISPYTHON(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISPYTHON(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(object, int val) + npy_intp PyArray_REFCOUNT(object) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_SetNumericOps (object) except -1 + object PyArray_GetNumericOps () + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CastTo (ndarray, ndarray) except -1 + int PyArray_CastAnyTo (ndarray, ndarray) except -1 + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + object PyArray_ScalarFromObject (object) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + object PyArray_FromDims (int, int *, int) + #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_MoveInto (ndarray, ndarray) except -1 + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + void PyArray_FillObjectArray (ndarray, object) except * + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + object PyArray_NewFlagsObject (object) + npy_bool PyArray_CanCastScalar (type, type) + #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_FieldNames (object) + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + #int PyArray_As1D (object*, char **, int *, int) + #int PyArray_As2D (object*, char ***, int *, int *, int) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_CopyAndTranspose (object) + object PyArray_Correlate (object, object, int) + int PyArray_TypestrConvert (int, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_TypeNumFromName (char *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + void _PyArray_SigintHandler (int) + void* _PyArray_GetSigintBuf () + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_CompareString (char *, char *, size_t) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +# The int types are mapped a bit surprising -- +# numpy.int corresponds to 'l' and numpy.long to 'q' +ctypedef npy_long int_t +ctypedef npy_longlong longlong_t + +ctypedef npy_ulong uint_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef npy_cfloat cfloat_t +ctypedef npy_cdouble cdouble_t +ctypedef npy_clongdouble clongdouble_t + +ctypedef npy_cdouble complex_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_ERR_IGNORE + UFUNC_ERR_WARN + UFUNC_ERR_RAISE + UFUNC_ERR_CALL + UFUNC_ERR_PRINT + UFUNC_ERR_LOG + UFUNC_MASK_DIVIDEBYZERO + UFUNC_MASK_OVERFLOW + UFUNC_MASK_UNDERFLOW + UFUNC_MASK_INVALID + UFUNC_SHIFT_DIVIDEBYZERO + UFUNC_SHIFT_OVERFLOW + UFUNC_SHIFT_UNDERFLOW + UFUNC_SHIFT_INVALID + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + UFUNC_ERR_DEFAULT + UFUNC_ERR_DEFAULT2 + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **) + int PyUFunc_checkfperr \ + (int, PyObject *, int *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_handlefperr \ + (int, PyObject *, int, int *) except -1 + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.pxd b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.pxd new file mode 100644 index 00000000..ca0a3a6c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.pxd @@ -0,0 +1,1015 @@ +# NumPy static imports for Cython < 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) + +cdef extern from "numpy/arrayobject.h": + ctypedef Py_intptr_t npy_intp + ctypedef size_t npy_uintp + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS + + npy_intp NPY_MAX_ELSIZE + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef char flags + cdef int type_num + cdef int itemsize "elsize" + cdef int alignment + cdef object fields + cdef tuple names + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + cdef PyArray_ArrayDescr* subarray + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base # NOT PUBLIC, DO NOT USE ! + + + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + float real + float imag + + ctypedef struct npy_cdouble: + double real + double imag + + ctypedef struct npy_clongdouble: + long double real + long double imag + + ctypedef struct npy_complex64: + float real + float imag + + ctypedef struct npy_complex128: + double real + double imag + + ctypedef struct npy_complex160: + long double real + long double imag + + ctypedef struct npy_complex192: + long double real + long double imag + + ctypedef struct npy_complex256: + long double real + long double imag + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + int PyArray_FLAGS(ndarray) nogil + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISPYTHON(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISPYTHON(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISPYTHON(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(object, int val) + npy_intp PyArray_REFCOUNT(object) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_SetNumericOps (object) except -1 + object PyArray_GetNumericOps () + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + void PyArray_SetStringFunction (object, int) + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CastTo (ndarray, ndarray) except -1 + int PyArray_CastAnyTo (ndarray, ndarray) except -1 + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + object PyArray_ScalarFromObject (object) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + object PyArray_FromDims (int, int *, int) + #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_MoveInto (ndarray, ndarray) except -1 + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + void PyArray_FillObjectArray (ndarray, object) except * + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + object PyArray_NewFlagsObject (object) + npy_bool PyArray_CanCastScalar (type, type) + #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_FieldNames (object) + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + #int PyArray_As1D (object*, char **, int *, int) + #int PyArray_As2D (object*, char ***, int *, int *, int) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_CopyAndTranspose (object) + object PyArray_Correlate (object, object, int) + int PyArray_TypestrConvert (int, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_TypeNumFromName (char *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + void _PyArray_SigintHandler (int) + void* _PyArray_GetSigintBuf () + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_CompareString (char *, char *, size_t) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +# The int types are mapped a bit surprising -- +# numpy.int corresponds to 'l' and numpy.long to 'q' +ctypedef npy_long int_t +ctypedef npy_longlong longlong_t + +ctypedef npy_ulong uint_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef npy_cfloat cfloat_t +ctypedef npy_cdouble cdouble_t +ctypedef npy_clongdouble clongdouble_t + +ctypedef npy_cdouble complex_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_ERR_IGNORE + UFUNC_ERR_WARN + UFUNC_ERR_RAISE + UFUNC_ERR_CALL + UFUNC_ERR_PRINT + UFUNC_ERR_LOG + UFUNC_MASK_DIVIDEBYZERO + UFUNC_MASK_OVERFLOW + UFUNC_MASK_UNDERFLOW + UFUNC_MASK_INVALID + UFUNC_SHIFT_DIVIDEBYZERO + UFUNC_SHIFT_OVERFLOW + UFUNC_SHIFT_UNDERFLOW + UFUNC_SHIFT_INVALID + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + UFUNC_ERR_DEFAULT + UFUNC_ERR_DEFAULT2 + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + int PyUFunc_GetPyValues \ + (char *, int *, int *, PyObject **) + int PyUFunc_checkfperr \ + (int, PyObject *, int *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_handlefperr \ + (int, PyObject *, int, int *) except -1 + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy.core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy.core.umath failed to import") + +cdef extern from *: + # Leave a marker that the NumPy declarations came from this file + # See https://github.com/cython/cython/issues/3573 + """ + /* NumPy API declarations from "numpy/__init__.pxd" */ + """ + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.py new file mode 100644 index 00000000..91da496a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.py @@ -0,0 +1,461 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as ``np``:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +To search for documents containing a keyword, do:: + + >>> np.lookfor('keyword') + ... # doctest: +SKIP + +General-purpose documents like a glossary and help on the basic concepts +of numpy are available under the ``doc`` sub-module:: + + >>> from numpy import doc + >>> help(doc) + ... # doctest: +SKIP + +Available subpackages +--------------------- +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +distutils + Enhancements to distutils with support for + Fortran compilers support and more (for Python <= 3.11). + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +matlib + Make everything matrices. +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- + +Start IPython and import `numpy` usually under the alias ``np``: `import +numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste +examples into the shell. To see which functions are available in `numpy`, +type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +import sys +import warnings + +from ._globals import _NoValue, _CopyMode +# These exceptions were moved in 1.25 and are hidden from __dir__() +from .exceptions import ( + ComplexWarning, ModuleDeprecationWarning, VisibleDeprecationWarning, + TooHardError, AxisError) + + +# If a version with git hash was stored, use that instead +from . import version +from .version import __version__ + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + # Allow distributors to run custom init code before importing numpy.core + from . import _distributor_init + + try: + from numpy.__config__ import show as show_config + except ImportError as e: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + + __all__ = [ + 'exceptions', 'ModuleDeprecationWarning', 'VisibleDeprecationWarning', + 'ComplexWarning', 'TooHardError', 'AxisError'] + + # mapping of {name: (value, deprecation_msg)} + __deprecated_attrs__ = {} + + from . import core + from .core import * + from . import compat + from . import exceptions + from . import dtypes + from . import lib + # NOTE: to be revisited following future namespace cleanup. + # See gh-14454 and gh-15672 for discussion. + from .lib import * + + from . import linalg + from . import fft + from . import polynomial + from . import random + from . import ctypeslib + from . import ma + from . import matrixlib as _mat + from .matrixlib import * + + # Deprecations introduced in NumPy 1.20.0, 2020-06-06 + import builtins as _builtins + + _msg = ( + "module 'numpy' has no attribute '{n}'.\n" + "`np.{n}` was a deprecated alias for the builtin `{n}`. " + "To avoid this error in existing code, use `{n}` by itself. " + "Doing this will not modify any behavior and is safe. {extended_msg}\n" + "The aliases was originally deprecated in NumPy 1.20; for more " + "details and guidance see the original release note at:\n" + " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("bool", _specific_msg.format("bool_")), + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __former_attrs__ = { + n: _msg.format(n=n, extended_msg=extended_msg) + for n, extended_msg in _type_info + } + + # Future warning introduced in NumPy 1.24.0, 2022-11-17 + _msg = ( + "`np.{n}` is a deprecated alias for `{an}`. (Deprecated NumPy 1.24)") + + # Some of these are awkward (since `np.str` may be preferable in the long + # term), but overall the names ending in 0 seem undesirable + _type_info = [ + ("bool8", bool_, "np.bool_"), + ("int0", intp, "np.intp"), + ("uint0", uintp, "np.uintp"), + ("str0", str_, "np.str_"), + ("bytes0", bytes_, "np.bytes_"), + ("void0", void, "np.void"), + ("object0", object_, + "`np.object0` is a deprecated alias for `np.object_`. " + "`object` can be used instead. (Deprecated NumPy 1.24)")] + + # Some of these could be defined right away, but most were aliases to + # the Python objects and only removed in NumPy 1.24. Defining them should + # probably wait for NumPy 1.26 or 2.0. + # When defined, these should possibly not be added to `__all__` to avoid + # import with `from numpy import *`. + __future_scalars__ = {"bool", "long", "ulong", "str", "bytes", "object"} + + __deprecated_attrs__.update({ + n: (alias, _msg.format(n=n, an=an)) for n, alias, an in _type_info}) + + import math + + __deprecated_attrs__['math'] = (math, + "`np.math` is a deprecated alias for the standard library `math` " + "module (Deprecated Numpy 1.25). Replace usages of `np.math` with " + "`math`") + + del math, _msg, _type_info + + from .core import abs + # now that numpy modules are imported, can initialize limits + core.getlimits._register_known_types() + + __all__.extend(['__version__', 'show_config']) + __all__.extend(core.__all__) + __all__.extend(_mat.__all__) + __all__.extend(lib.__all__) + __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) + + # Remove min and max from __all__ to avoid `from numpy import *` override + # the builtins min/max. Temporary fix for 1.25.x/1.26.x, see gh-24229. + __all__.remove('min') + __all__.remove('max') + __all__.remove('round') + + # Remove one of the two occurrences of `issubdtype`, which is exposed as + # both `numpy.core.issubdtype` and `numpy.lib.issubdtype`. + __all__.remove('issubdtype') + + # These are exported by np.core, but are replaced by the builtins below + # remove them to ensure that we don't end up with `np.long == np.int_`, + # which would be a breaking change. + del long, unicode + __all__.remove('long') + __all__.remove('unicode') + + # Remove things that are in the numpy.lib but not in the numpy namespace + # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace) + # that prevents adding more things to the main namespace by accident. + # The list below will grow until the `from .lib import *` fixme above is + # taken care of + __all__.remove('Arrayterator') + del Arrayterator + + # These names were removed in NumPy 1.20. For at least one release, + # attempts to access these names in the numpy namespace will trigger + # a warning, and calling the function will raise an exception. + _financial_names = ['fv', 'ipmt', 'irr', 'mirr', 'nper', 'npv', 'pmt', + 'ppmt', 'pv', 'rate'] + __expired_functions__ = { + name: (f'In accordance with NEP 32, the function {name} was removed ' + 'from NumPy version 1.20. A replacement for this function ' + 'is available in the numpy_financial library: ' + 'https://pypi.org/project/numpy-financial') + for name in _financial_names} + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + # oldnumeric and numarray were removed in 1.9. In case some packages import + # but do not use them, we define them here for backward compatibility. + oldnumeric = 'removed' + numarray = 'removed' + + def __getattr__(attr): + # Warn for expired attributes, and return a dummy function + # that always raises an exception. + import warnings + import math + try: + msg = __expired_functions__[attr] + except KeyError: + pass + else: + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + def _expired(*args, **kwds): + raise RuntimeError(msg) + + return _expired + + # Emit warnings for deprecated attributes + try: + val, msg = __deprecated_attrs__[attr] + except KeyError: + pass + else: + warnings.warn(msg, DeprecationWarning, stacklevel=2) + return val + + if attr in __future_scalars__: + # And future warnings for those that will change, but also give + # the AttributeError + warnings.warn( + f"In the future `np.{attr}` will be defined as the " + "corresponding NumPy scalar.", FutureWarning, stacklevel=2) + + if attr in __former_attrs__: + raise AttributeError(__former_attrs__[attr]) + + if attr == 'testing': + import numpy.testing as testing + return testing + elif attr == 'Tester': + "Removed in NumPy 1.25.0" + raise RuntimeError("Tester was removed in NumPy 1.25.") + + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + def __dir__(): + public_symbols = globals().keys() | {'testing'} + public_symbols -= { + "core", "matrixlib", + # These were moved in 1.25 and may be deprecated eventually: + "ModuleDeprecationWarning", "VisibleDeprecationWarning", + "ComplexWarning", "TooHardError", "AxisError" + } + return list(public_symbols) + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - float32(2.0)) < 1e-5: + raise AssertionError() + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + if sys.platform == "darwin": + from . import exceptions + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed Check for warning and error_message + if len(w) > 0: + for _wn in w: + if _wn.category is exceptions.RankWarning: + # Ignore other warnings, they may not be relevant (see gh-25433). + error_message = f"{_wn.category.__name__}: {str(_wn.message)}" + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" + "\nhttps://numpy.org/devdocs/building/index.html" + "\nOtherwise report this to the vendor " + "that provided NumPy.\n\n{}\n".format(error_message)) + raise RuntimeError(msg) + del _wn + del w + del _mac_os_check + + # We usually use madvise hugepages support, but on some old kernels it + # is slow and thus better avoided. + # Specifically kernel version 4.6 had a bug fix which probably fixed this: + # https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + import os + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepages to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepages = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + + # Note that this will currently only make a difference on Linux + core.multiarray._set_madvise_hugepage(use_hugepage) + del use_hugepage + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + core.multiarray._multiarray_umath._reload_guard() + + # default to "weak" promotion for "NumPy 2". + core._set_promotion_state( + os.environ.get("NPY_PROMOTION_STATE", + "weak" if _using_numpy2_behavior() else "legacy")) + + # Tell PyInstaller where to find hook-numpy.py + def _pyinstaller_hooks_dir(): + from pathlib import Path + return [str(Path(__file__).with_name("_pyinstaller").resolve())] + + # Remove symbols imported for internal use + del os + + +# Remove symbols imported for internal use +del sys, warnings diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.pyi new file mode 100644 index 00000000..a185bfe7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/__init__.pyi @@ -0,0 +1,4422 @@ +import builtins +import sys +import os +import mmap +import ctypes as ct +import array as _array +import datetime as dt +import enum +from abc import abstractmethod +from types import TracebackType, MappingProxyType, GenericAlias +from contextlib import ContextDecorator +from contextlib import contextmanager + +from numpy._pytesttester import PytestTester +from numpy.core._internal import _ctypes + +from numpy._typing import ( + # Arrays + ArrayLike, + NDArray, + _SupportsArray, + _NestedSequence, + _FiniteNestedSequence, + _SupportsArray, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeBytes_co, + _ArrayLikeUnknown, + _UnknownType, + + # DTypes + DTypeLike, + _DTypeLike, + _DTypeLikeVoid, + _SupportsDType, + _VoidDTypeLike, + + # Shapes + _Shape, + _ShapeLike, + + # Scalars + _CharLike_co, + _BoolLike_co, + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + + # `number` precision + NBitBase, + _256Bit, + _128Bit, + _96Bit, + _80Bit, + _64Bit, + _32Bit, + _16Bit, + _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitInt, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, + + # Character codes + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, + + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, +) + +from numpy._typing._callable import ( + _BoolOp, + _BoolBitOp, + _BoolSub, + _BoolTrueDiv, + _BoolMod, + _BoolDivMod, + _TD64Div, + _IntTrueDiv, + _UnsignedIntOp, + _UnsignedIntBitOp, + _UnsignedIntMod, + _UnsignedIntDivMod, + _SignedIntOp, + _SignedIntBitOp, + _SignedIntMod, + _SignedIntDivMod, + _FloatOp, + _FloatMod, + _FloatDivMod, + _ComplexOp, + _NumberOp, + _ComparisonOp, +) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable +# to the specific platform +from numpy._typing._extended_precision import ( + uint128 as uint128, + uint256 as uint256, + int128 as int128, + int256 as int256, + float80 as float80, + float96 as float96, + float128 as float128, + float256 as float256, + complex160 as complex160, + complex192 as complex192, + complex256 as complex256, + complex512 as complex512, +) + +from collections.abc import ( + Callable, + Container, + Iterable, + Iterator, + Mapping, + Sequence, + Sized, +) +from typing import ( + Literal as L, + Any, + Generator, + Generic, + IO, + NoReturn, + overload, + SupportsComplex, + SupportsFloat, + SupportsInt, + TypeVar, + Union, + Protocol, + SupportsIndex, + Final, + final, + ClassVar, +) + +# Ensures that the stubs are picked up +from numpy import ( + ctypeslib as ctypeslib, + exceptions as exceptions, + fft as fft, + lib as lib, + linalg as linalg, + ma as ma, + polynomial as polynomial, + random as random, + testing as testing, + version as version, + exceptions as exceptions, + dtypes as dtypes, +) + +from numpy.core import defchararray, records +char = defchararray +rec = records + +from numpy.core.function_base import ( + linspace as linspace, + logspace as logspace, + geomspace as geomspace, +) + +from numpy.core.fromnumeric import ( + take as take, + reshape as reshape, + choose as choose, + repeat as repeat, + put as put, + swapaxes as swapaxes, + transpose as transpose, + partition as partition, + argpartition as argpartition, + sort as sort, + argsort as argsort, + argmax as argmax, + argmin as argmin, + searchsorted as searchsorted, + resize as resize, + squeeze as squeeze, + diagonal as diagonal, + trace as trace, + ravel as ravel, + nonzero as nonzero, + shape as shape, + compress as compress, + clip as clip, + sum as sum, + all as all, + any as any, + cumsum as cumsum, + ptp as ptp, + max as max, + min as min, + amax as amax, + amin as amin, + prod as prod, + cumprod as cumprod, + ndim as ndim, + size as size, + around as around, + round as round, + mean as mean, + std as std, + var as var, +) + +from numpy.core._asarray import ( + require as require, +) + +from numpy.core._type_aliases import ( + sctypes as sctypes, + sctypeDict as sctypeDict, +) + +from numpy.core._ufunc_config import ( + seterr as seterr, + geterr as geterr, + setbufsize as setbufsize, + getbufsize as getbufsize, + seterrcall as seterrcall, + geterrcall as geterrcall, + _ErrKind, + _ErrFunc, + _ErrDictOptional, +) + +from numpy.core.arrayprint import ( + set_printoptions as set_printoptions, + get_printoptions as get_printoptions, + array2string as array2string, + format_float_scientific as format_float_scientific, + format_float_positional as format_float_positional, + array_repr as array_repr, + array_str as array_str, + set_string_function as set_string_function, + printoptions as printoptions, +) + +from numpy.core.einsumfunc import ( + einsum as einsum, + einsum_path as einsum_path, +) + +from numpy.core.multiarray import ( + ALLOW_THREADS as ALLOW_THREADS, + BUFSIZE as BUFSIZE, + CLIP as CLIP, + MAXDIMS as MAXDIMS, + MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT as MAY_SHARE_EXACT, + RAISE as RAISE, + WRAP as WRAP, + tracemalloc_domain as tracemalloc_domain, + array as array, + empty_like as empty_like, + empty as empty, + zeros as zeros, + concatenate as concatenate, + inner as inner, + where as where, + lexsort as lexsort, + can_cast as can_cast, + min_scalar_type as min_scalar_type, + result_type as result_type, + dot as dot, + vdot as vdot, + bincount as bincount, + copyto as copyto, + putmask as putmask, + packbits as packbits, + unpackbits as unpackbits, + shares_memory as shares_memory, + may_share_memory as may_share_memory, + asarray as asarray, + asanyarray as asanyarray, + ascontiguousarray as ascontiguousarray, + asfortranarray as asfortranarray, + arange as arange, + busday_count as busday_count, + busday_offset as busday_offset, + compare_chararrays as compare_chararrays, + datetime_as_string as datetime_as_string, + datetime_data as datetime_data, + frombuffer as frombuffer, + fromfile as fromfile, + fromiter as fromiter, + is_busday as is_busday, + promote_types as promote_types, + seterrobj as seterrobj, + geterrobj as geterrobj, + fromstring as fromstring, + frompyfunc as frompyfunc, + nested_iters as nested_iters, + flagsobj, +) + +from numpy.core.numeric import ( + zeros_like as zeros_like, + ones as ones, + ones_like as ones_like, + full as full, + full_like as full_like, + count_nonzero as count_nonzero, + isfortran as isfortran, + argwhere as argwhere, + flatnonzero as flatnonzero, + correlate as correlate, + convolve as convolve, + outer as outer, + tensordot as tensordot, + roll as roll, + rollaxis as rollaxis, + moveaxis as moveaxis, + cross as cross, + indices as indices, + fromfunction as fromfunction, + isscalar as isscalar, + binary_repr as binary_repr, + base_repr as base_repr, + identity as identity, + allclose as allclose, + isclose as isclose, + array_equal as array_equal, + array_equiv as array_equiv, +) + +from numpy.core.numerictypes import ( + maximum_sctype as maximum_sctype, + issctype as issctype, + obj2sctype as obj2sctype, + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + sctype2char as sctype2char, + nbytes as nbytes, + cast as cast, + ScalarType as ScalarType, + typecodes as typecodes, +) + +from numpy.core.shape_base import ( + atleast_1d as atleast_1d, + atleast_2d as atleast_2d, + atleast_3d as atleast_3d, + block as block, + hstack as hstack, + stack as stack, + vstack as vstack, +) + +from numpy.exceptions import ( + ComplexWarning as ComplexWarning, + ModuleDeprecationWarning as ModuleDeprecationWarning, + VisibleDeprecationWarning as VisibleDeprecationWarning, + TooHardError as TooHardError, + DTypePromotionError as DTypePromotionError, + AxisError as AxisError, +) + +from numpy.lib import ( + emath as emath, +) + +from numpy.lib.arraypad import ( + pad as pad, +) + +from numpy.lib.arraysetops import ( + ediff1d as ediff1d, + intersect1d as intersect1d, + setxor1d as setxor1d, + union1d as union1d, + setdiff1d as setdiff1d, + unique as unique, + in1d as in1d, + isin as isin, +) + +from numpy.lib.arrayterator import ( + Arrayterator as Arrayterator, +) + +from numpy.lib.function_base import ( + select as select, + piecewise as piecewise, + trim_zeros as trim_zeros, + copy as copy, + iterable as iterable, + percentile as percentile, + diff as diff, + gradient as gradient, + angle as angle, + unwrap as unwrap, + sort_complex as sort_complex, + disp as disp, + flip as flip, + rot90 as rot90, + extract as extract, + place as place, + asarray_chkfinite as asarray_chkfinite, + average as average, + bincount as bincount, + digitize as digitize, + cov as cov, + corrcoef as corrcoef, + median as median, + sinc as sinc, + hamming as hamming, + hanning as hanning, + bartlett as bartlett, + blackman as blackman, + kaiser as kaiser, + trapz as trapz, + i0 as i0, + add_newdoc as add_newdoc, + add_docstring as add_docstring, + meshgrid as meshgrid, + delete as delete, + insert as insert, + append as append, + interp as interp, + add_newdoc_ufunc as add_newdoc_ufunc, + quantile as quantile, +) + +from numpy.lib.histograms import ( + histogram_bin_edges as histogram_bin_edges, + histogram as histogram, + histogramdd as histogramdd, +) + +from numpy.lib.index_tricks import ( + ravel_multi_index as ravel_multi_index, + unravel_index as unravel_index, + mgrid as mgrid, + ogrid as ogrid, + r_ as r_, + c_ as c_, + s_ as s_, + index_exp as index_exp, + ix_ as ix_, + fill_diagonal as fill_diagonal, + diag_indices as diag_indices, + diag_indices_from as diag_indices_from, +) + +from numpy.lib.nanfunctions import ( + nansum as nansum, + nanmax as nanmax, + nanmin as nanmin, + nanargmax as nanargmax, + nanargmin as nanargmin, + nanmean as nanmean, + nanmedian as nanmedian, + nanpercentile as nanpercentile, + nanvar as nanvar, + nanstd as nanstd, + nanprod as nanprod, + nancumsum as nancumsum, + nancumprod as nancumprod, + nanquantile as nanquantile, +) + +from numpy.lib.npyio import ( + savetxt as savetxt, + loadtxt as loadtxt, + genfromtxt as genfromtxt, + recfromtxt as recfromtxt, + recfromcsv as recfromcsv, + load as load, + save as save, + savez as savez, + savez_compressed as savez_compressed, + packbits as packbits, + unpackbits as unpackbits, + fromregex as fromregex, +) + +from numpy.lib.polynomial import ( + poly as poly, + roots as roots, + polyint as polyint, + polyder as polyder, + polyadd as polyadd, + polysub as polysub, + polymul as polymul, + polydiv as polydiv, + polyval as polyval, + polyfit as polyfit, +) + +from numpy.lib.shape_base import ( + column_stack as column_stack, + row_stack as row_stack, + dstack as dstack, + array_split as array_split, + split as split, + hsplit as hsplit, + vsplit as vsplit, + dsplit as dsplit, + apply_over_axes as apply_over_axes, + expand_dims as expand_dims, + apply_along_axis as apply_along_axis, + kron as kron, + tile as tile, + get_array_wrap as get_array_wrap, + take_along_axis as take_along_axis, + put_along_axis as put_along_axis, +) + +from numpy.lib.stride_tricks import ( + broadcast_to as broadcast_to, + broadcast_arrays as broadcast_arrays, + broadcast_shapes as broadcast_shapes, +) + +from numpy.lib.twodim_base import ( + diag as diag, + diagflat as diagflat, + eye as eye, + fliplr as fliplr, + flipud as flipud, + tri as tri, + triu as triu, + tril as tril, + vander as vander, + histogram2d as histogram2d, + mask_indices as mask_indices, + tril_indices as tril_indices, + tril_indices_from as tril_indices_from, + triu_indices as triu_indices, + triu_indices_from as triu_indices_from, +) + +from numpy.lib.type_check import ( + mintypecode as mintypecode, + asfarray as asfarray, + real as real, + imag as imag, + iscomplex as iscomplex, + isreal as isreal, + iscomplexobj as iscomplexobj, + isrealobj as isrealobj, + nan_to_num as nan_to_num, + real_if_close as real_if_close, + typename as typename, + common_type as common_type, +) + +from numpy.lib.ufunclike import ( + fix as fix, + isposinf as isposinf, + isneginf as isneginf, +) + +from numpy.lib.utils import ( + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + deprecate as deprecate, + deprecate_with_doc as deprecate_with_doc, + get_include as get_include, + info as info, + source as source, + who as who, + lookfor as lookfor, + byte_bounds as byte_bounds, + safe_eval as safe_eval, + show_runtime as show_runtime, +) + +from numpy.matrixlib import ( + asmatrix as asmatrix, + mat as mat, + bmat as bmat, +) + +_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True) + +# Protocol for representing file-like-objects accepted +# by `ndarray.tofile` and `fromfile` +class _IOProtocol(Protocol): + def flush(self) -> object: ... + def fileno(self) -> int: ... + def tell(self) -> SupportsIndex: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +# NOTE: `seek`, `write` and `flush` are technically only required +# for `readwrite`/`write` modes +class _MemMapIOProtocol(Protocol): + def flush(self) -> object: ... + def fileno(self) -> SupportsIndex: ... + def tell(self) -> int: ... + def seek(self, offset: int, whence: int, /) -> object: ... + def write(self, s: bytes, /) -> object: ... + @property + def read(self) -> object: ... + +class _SupportsWrite(Protocol[_AnyStr_contra]): + def write(self, s: _AnyStr_contra, /) -> object: ... + +__all__: list[str] +__path__: list[str] +__version__: str +test: PytestTester + +# TODO: Move placeholders to their respective module once +# their annotations are properly implemented +# +# Placeholders for classes + +def show_config() -> None: ... + +_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray[Any, Any]) +_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) +_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] + +@final +class dtype(Generic[_DTypeScalar_co]): + names: None | tuple[builtins.str, ...] + def __hash__(self) -> int: ... + # Overload for subclass of generic + @overload + def __new__( + cls, + dtype: type[_DTypeScalar_co], + align: bool = ..., + copy: bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_DTypeScalar_co]: ... + # Overloads for string aliases, Python types, and some assorted + # other special cases. Order is sometimes important because of the + # subtype relationships + # + # bool < int < float < complex < object + # + # so we have to make sure the overloads for the narrowest type is + # first. + # Builtin types + @overload + def __new__(cls, dtype: type[bool], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bool_]: ... + @overload + def __new__(cls, dtype: type[int], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int_]: ... + @overload + def __new__(cls, dtype: None | type[float], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float_]: ... + @overload + def __new__(cls, dtype: type[complex], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex_]: ... + @overload + def __new__(cls, dtype: type[builtins.str], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: type[bytes], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... + + # `unsignedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... + @overload + def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... + @overload + def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... + @overload + def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... + @overload + def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... + @overload + def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... + @overload + def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... + + # NOTE: We're assuming here that `uint_ptr_t == size_t`, + # an assumption that does not hold in rare cases (same for `ssize_t`) + @overload + def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + @overload + def __new__(cls, dtype: _UIntCodes | type[ct.c_ulong], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint]: ... + @overload + def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + + # `signedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... + @overload + def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... + @overload + def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... + @overload + def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... + @overload + def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + @overload + def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + @overload + def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + @overload + def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + @overload + def __new__(cls, dtype: _IntCodes | type[ct.c_long], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int_]: ... + @overload + def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + + # `floating` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Float16Codes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... + @overload + def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... + @overload + def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + @overload + def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__(cls, dtype: _Complex64Codes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... + @overload + def __new__(cls, dtype: _Complex128Codes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... + @overload + def __new__(cls, dtype: _CSingleCodes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + @overload + def __new__(cls, dtype: _CDoubleCodes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + @overload + def __new__(cls, dtype: _CLongDoubleCodes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations and ctypes + @overload + def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bool_]: ... + @overload + def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... + @overload + def __new__(cls, dtype: _DT64Codes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... + @overload + def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... + @overload + def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + @overload + def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: bool = ..., copy: bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + + # dtype of a dtype is the same dtype + @overload + def __new__( + cls, + dtype: dtype[_DTypeScalar_co], + align: bool = ..., + copy: bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_DTypeScalar_co]: ... + @overload + def __new__( + cls, + dtype: _SupportsDType[dtype[_DTypeScalar_co]], + align: bool = ..., + copy: bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_DTypeScalar_co]: ... + # Handle strings that can't be expressed as literals; i.e. s1, s2, ... + @overload + def __new__( + cls, + dtype: builtins.str, + align: bool = ..., + copy: bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[Any]: ... + # Catchall overload for void-likes + @overload + def __new__( + cls, + dtype: _VoidDTypeLike, + align: bool = ..., + copy: bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[void]: ... + # Catchall overload for object-likes + @overload + def __new__( + cls, + dtype: type[object], + align: bool = ..., + copy: bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[object_]: ... + + def __class_getitem__(self, item: Any) -> GenericAlias: ... + + @overload + def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ... + + # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes + @overload + def __mul__(self: _DType, value: L[1]) -> _DType: ... + @overload + def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + @overload + def __mul__(self, value: SupportsIndex) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for + # now for non-flexible dtypes. + @overload + def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + @overload + def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ... + + def __gt__(self, other: DTypeLike) -> bool: ... + def __ge__(self, other: DTypeLike) -> bool: ... + def __lt__(self, other: DTypeLike) -> bool: ... + def __le__(self, other: DTypeLike) -> bool: ... + + # Explicitly defined `__eq__` and `__ne__` to get around mypy's + # `strict_equality` option; even though their signatures are + # identical to their `object`-based counterpart + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + + @property + def alignment(self) -> int: ... + @property + def base(self) -> dtype[Any]: ... + @property + def byteorder(self) -> builtins.str: ... + @property + def char(self) -> builtins.str: ... + @property + def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ... + @property + def fields( + self, + ) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... + @property + def flags(self) -> int: ... + @property + def hasobject(self) -> bool: ... + @property + def isbuiltin(self) -> int: ... + @property + def isnative(self) -> bool: ... + @property + def isalignedstruct(self) -> bool: ... + @property + def itemsize(self) -> int: ... + @property + def kind(self) -> builtins.str: ... + @property + def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ... + @property + def name(self) -> builtins.str: ... + @property + def num(self) -> int: ... + @property + def shape(self) -> _Shape: ... + @property + def ndim(self) -> int: ... + @property + def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... + def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... + @property + def str(self) -> builtins.str: ... + @property + def type(self) -> type[_DTypeScalar_co]: ... + +_ArrayLikeInt = Union[ + int, + integer[Any], + Sequence[Union[int, integer[Any]]], + Sequence[Sequence[Any]], # TODO: wait for support for recursive types + ndarray[Any, Any] +] + +_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter[Any]) + +@final +class flatiter(Generic[_NdArraySubClass]): + __hash__: ClassVar[None] + @property + def base(self) -> _NdArraySubClass: ... + @property + def coords(self) -> _Shape: ... + @property + def index(self) -> int: ... + def copy(self) -> _NdArraySubClass: ... + def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... + def __next__(self: flatiter[ndarray[Any, dtype[_ScalarType]]]) -> _ScalarType: ... + def __len__(self) -> int: ... + @overload + def __getitem__( + self: flatiter[ndarray[Any, dtype[_ScalarType]]], + key: int | integer[Any] | tuple[int | integer[Any]], + ) -> _ScalarType: ... + @overload + def __getitem__( + self, + key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], + ) -> _NdArraySubClass: ... + # TODO: `__setitem__` operates via `unsafe` casting rules, and can + # thus accept any type accepted by the relevant underlying `np.generic` + # constructor. + # This means that `value` must in reality be a supertype of `npt.ArrayLike`. + def __setitem__( + self, + key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], + value: Any, + ) -> None: ... + @overload + def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... + +_OrderKACF = L[None, "K", "A", "C", "F"] +_OrderACF = L[None, "A", "C", "F"] +_OrderCF = L[None, "C", "F"] + +_ModeKind = L["raise", "wrap", "clip"] +_PartitionKind = L["introselect"] +_SortKind = L["quicksort", "mergesort", "heapsort", "stable"] +_SortSide = L["left", "right"] + +_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) + +class _ArrayOrScalarCommon: + @property + def T(self: _ArraySelf) -> _ArraySelf: ... + @property + def data(self) -> memoryview: ... + @property + def flags(self) -> flagsobj: ... + @property + def itemsize(self) -> int: ... + @property + def nbytes(self) -> int: ... + def __bool__(self) -> bool: ... + def __bytes__(self) -> bytes: ... + def __str__(self) -> str: ... + def __repr__(self) -> str: ... + def __copy__(self: _ArraySelf) -> _ArraySelf: ... + def __deepcopy__(self: _ArraySelf, memo: None | dict[int, Any], /) -> _ArraySelf: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... + def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... + def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ... + def dumps(self) -> bytes: ... + def tobytes(self, order: _OrderKACF = ...) -> bytes: ... + # NOTE: `tostring()` is deprecated and therefore excluded + # def tostring(self, order=...): ... + def tofile( + self, + fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol, + sep: str = ..., + format: str = ..., + ) -> None: ... + # generics and 0d arrays return builtin scalars + def tolist(self) -> Any: ... + + @property + def __array_interface__(self) -> dict[str, Any]: ... + @property + def __array_priority__(self) -> float: ... + @property + def __array_struct__(self) -> Any: ... # builtins.PyCapsule + def __setstate__(self, state: tuple[ + SupportsIndex, # version + _ShapeLike, # Shape + _DType_co, # DType + bool, # F-continuous + bytes | list[Any], # Data + ], /) -> None: ... + # a `bool_` is returned when `keepdims=True` and `self` is a 0d array + + @overload + def all( + self, + axis: None = ..., + out: None = ..., + keepdims: L[False] = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def all( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def any( + self, + axis: None = ..., + out: None = ..., + keepdims: L[False] = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def any( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def argmax( + self, + axis: None = ..., + out: None = ..., + *, + keepdims: L[False] = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: SupportsIndex = ..., + out: None = ..., + *, + keepdims: bool = ..., + ) -> Any: ... + @overload + def argmax( + self, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + *, + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def argmin( + self, + axis: None = ..., + out: None = ..., + *, + keepdims: L[False] = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: SupportsIndex = ..., + out: None = ..., + *, + keepdims: bool = ..., + ) -> Any: ... + @overload + def argmin( + self, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + *, + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + def argsort( + self, + axis: None | SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., + ) -> ndarray[Any, Any]: ... + + @overload + def choose( + self, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, Any]: ... + @overload + def choose( + self, + choices: ArrayLike, + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + @overload + def clip( + self, + min: ArrayLike = ..., + max: None | ArrayLike = ..., + out: None = ..., + **kwargs: Any, + ) -> ndarray[Any, Any]: ... + @overload + def clip( + self, + min: None = ..., + max: ArrayLike = ..., + out: None = ..., + **kwargs: Any, + ) -> ndarray[Any, Any]: ... + @overload + def clip( + self, + min: ArrayLike = ..., + max: None | ArrayLike = ..., + out: _NdArraySubClass = ..., + **kwargs: Any, + ) -> _NdArraySubClass: ... + @overload + def clip( + self, + min: None = ..., + max: ArrayLike = ..., + out: _NdArraySubClass = ..., + **kwargs: Any, + ) -> _NdArraySubClass: ... + + @overload + def compress( + self, + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: None = ..., + ) -> ndarray[Any, Any]: ... + @overload + def compress( + self, + a: ArrayLike, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + def conj(self: _ArraySelf) -> _ArraySelf: ... + + def conjugate(self: _ArraySelf) -> _ArraySelf: ... + + @overload + def cumprod( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> ndarray[Any, Any]: ... + @overload + def cumprod( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def cumsum( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> ndarray[Any, Any]: ... + @overload + def cumsum( + self, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def max( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def max( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def mean( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def mean( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def min( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def min( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + def newbyteorder( + self: _ArraySelf, + __new_order: _ByteOrder = ..., + ) -> _ArraySelf: ... + + @overload + def prod( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def prod( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def ptp( + self, + axis: None | _ShapeLike = ..., + out: None = ..., + keepdims: bool = ..., + ) -> Any: ... + @overload + def ptp( + self, + axis: None | _ShapeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + ) -> _NdArraySubClass: ... + + @overload + def round( + self: _ArraySelf, + decimals: SupportsIndex = ..., + out: None = ..., + ) -> _ArraySelf: ... + @overload + def round( + self, + decimals: SupportsIndex = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def std( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def std( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def sum( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def sum( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + + @overload + def var( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + @overload + def var( + self, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., + ) -> _NdArraySubClass: ... + +_DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) +_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_ShapeType2 = TypeVar("_ShapeType2", bound=Any) +_NumberType = TypeVar("_NumberType", bound=number[Any]) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_2Tuple = tuple[_T, _T] +_CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"] + +_ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]] +_ArrayInt_co = NDArray[Union[bool_, integer[Any]]] +_ArrayFloat_co = NDArray[Union[bool_, integer[Any], floating[Any]]] +_ArrayComplex_co = NDArray[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]] +_ArrayNumber_co = NDArray[Union[bool_, number[Any]]] +_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]] + +# Introduce an alias for `dtype` to avoid naming conflicts. +_dtype = dtype + +# `builtins.PyCapsule` unfortunately lacks annotations as of the moment; +# use `Any` as a stopgap measure +_PyCapsule = Any + +class _SupportsItem(Protocol[_T_co]): + def item(self, args: Any, /) -> _T_co: ... + +class _SupportsReal(Protocol[_T_co]): + @property + def real(self) -> _T_co: ... + +class _SupportsImag(Protocol[_T_co]): + @property + def imag(self) -> _T_co: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): + __hash__: ClassVar[None] + @property + def base(self) -> None | ndarray[Any, Any]: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def real( + self: ndarray[_ShapeType, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + @real.setter + def real(self, value: ArrayLike) -> None: ... + @property + def imag( + self: ndarray[_ShapeType, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + @imag.setter + def imag(self, value: ArrayLike) -> None: ... + def __new__( + cls: type[_ArraySelf], + shape: _ShapeLike, + dtype: DTypeLike = ..., + buffer: None | _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: None | _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _ArraySelf: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + def __class_getitem__(self, item: Any) -> GenericAlias: ... + + @overload + def __array__(self, dtype: None = ..., /) -> ndarray[Any, _DType_co]: ... + @overload + def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... + + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + def __array_function__( + self, + func: Callable[..., Any], + types: Iterable[type], + args: Iterable[Any], + kwargs: Mapping[str, Any], + ) -> Any: ... + + # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` + # is a pseudo-abstract method the type has been narrowed down in order to + # grant subclasses a bit more flexibility + def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ... + + def __array_wrap__( + self, + array: ndarray[_ShapeType2, _DType], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + /, + ) -> ndarray[_ShapeType2, _DType]: ... + + def __array_prepare__( + self, + array: ndarray[_ShapeType2, _DType], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + /, + ) -> ndarray[_ShapeType2, _DType]: ... + + @overload + def __getitem__(self, key: ( + NDArray[integer[Any]] + | NDArray[bool_] + | tuple[NDArray[integer[Any]] | NDArray[bool_], ...] + )) -> ndarray[Any, _DType_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...]) -> Any: ... + @overload + def __getitem__(self, key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> ndarray[Any, _DType_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, _dtype[void]]: ... + + @property + def ctypes(self) -> _ctypes[int]: ... + @property + def shape(self) -> _Shape: ... + @shape.setter + def shape(self, value: _ShapeLike) -> None: ... + @property + def strides(self) -> _Shape: ... + @strides.setter + def strides(self, value: _ShapeLike) -> None: ... + def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ... + def fill(self, value: Any) -> None: ... + @property + def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... + + # Use the same output type as that of the underlying `generic` + @overload + def item( + self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] + *args: SupportsIndex, + ) -> _T: ... + @overload + def item( + self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] + args: tuple[SupportsIndex, ...], + /, + ) -> _T: ... + + @overload + def itemset(self, value: Any, /) -> None: ... + @overload + def itemset(self, item: _ShapeLike, value: Any, /) -> None: ... + + @overload + def resize(self, new_shape: _ShapeLike, /, *, refcheck: bool = ...) -> None: ... + @overload + def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ... + + def setflags( + self, write: bool = ..., align: bool = ..., uic: bool = ... + ) -> None: ... + + def squeeze( + self, + axis: None | SupportsIndex | tuple[SupportsIndex, ...] = ..., + ) -> ndarray[Any, _DType_co]: ... + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + ) -> ndarray[Any, _DType_co]: ... + + @overload + def transpose(self: _ArraySelf, axes: None | _ShapeLike, /) -> _ArraySelf: ... + @overload + def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... + + def argpartition( + self, + kth: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + kind: _PartitionKind = ..., + order: None | str | Sequence[str] = ..., + ) -> ndarray[Any, _dtype[intp]]: ... + + def diagonal( + self, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + ) -> ndarray[Any, _DType_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, out: None = ...) -> ndarray[Any, Any]: ... + @overload + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + @overload + def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ... + + # `nonzero()` is deprecated for 0d arrays/generics + def nonzero(self) -> tuple[ndarray[Any, _dtype[intp]], ...]: ... + + def partition( + self, + kth: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + kind: _PartitionKind = ..., + order: None | str | Sequence[str] = ..., + ) -> None: ... + + # `put` is technically available to `generic`, + # but is pointless as `generic`s are immutable + def put( + self, + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., + ) -> None: ... + + @overload + def searchsorted( # type: ignore[misc] + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + side: _SortSide = ..., + sorter: None | _ArrayLikeInt_co = ..., + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + side: _SortSide = ..., + sorter: None | _ArrayLikeInt_co = ..., + ) -> ndarray[Any, _dtype[intp]]: ... + + def setfield( + self, + val: ArrayLike, + dtype: DTypeLike, + offset: SupportsIndex = ..., + ) -> None: ... + + def sort( + self, + axis: SupportsIndex = ..., + kind: None | _SortKind = ..., + order: None | str | Sequence[str] = ..., + ) -> None: ... + + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _NdArraySubClass = ..., + ) -> _NdArraySubClass: ... + + @overload + def take( # type: ignore[misc] + self: ndarray[Any, _dtype[_ScalarType]], + indices: _IntLike_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, _DType_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + ) -> ndarray[Any, _DType_co]: ... + + def flatten( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + def ravel( + self, + order: _OrderKACF = ..., + ) -> ndarray[Any, _DType_co]: ... + + @overload + def reshape( + self, shape: _ShapeLike, /, *, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + @overload + def reshape( + self, *shape: SupportsIndex, order: _OrderACF = ... + ) -> ndarray[Any, _DType_co]: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarType], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: bool = ..., + copy: bool | _CopyMode = ..., + ) -> NDArray[_ScalarType]: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: bool = ..., + copy: bool | _CopyMode = ..., + ) -> NDArray[Any]: ... + + @overload + def view(self: _ArraySelf) -> _ArraySelf: ... + @overload + def view(self, type: type[_NdArraySubClass]) -> _NdArraySubClass: ... + @overload + def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ... + @overload + def view(self, dtype: DTypeLike) -> NDArray[Any]: ... + @overload + def view( + self, + dtype: DTypeLike, + type: type[_NdArraySubClass], + ) -> _NdArraySubClass: ... + + @overload + def getfield( + self, + dtype: _DTypeLike[_ScalarType], + offset: SupportsIndex = ... + ) -> NDArray[_ScalarType]: ... + @overload + def getfield( + self, + dtype: DTypeLike, + offset: SupportsIndex = ... + ) -> NDArray[Any]: ... + + # Dispatch to the underlying `generic` via protocols + def __int__( + self: ndarray[Any, _dtype[SupportsInt]], # type: ignore[type-var] + ) -> int: ... + + def __float__( + self: ndarray[Any, _dtype[SupportsFloat]], # type: ignore[type-var] + ) -> float: ... + + def __complex__( + self: ndarray[Any, _dtype[SupportsComplex]], # type: ignore[type-var] + ) -> complex: ... + + def __index__( + self: ndarray[Any, _dtype[SupportsIndex]], # type: ignore[type-var] + ) -> int: ... + + def __len__(self) -> int: ... + def __setitem__(self, key, value): ... + def __iter__(self) -> Any: ... + def __contains__(self, key) -> bool: ... + + # The last overload is for catching recursive objects whose + # nesting is too deep. + # The first overload is for catching `bytes` (as they are a subtype of + # `Sequence[int]`) and `str`. As `str` is a recursive sequence of + # strings, it will pass through the final overload otherwise + + @overload + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __lt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + + @overload + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __le__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + + @overload + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __gt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ... + @overload + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ... + @overload + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ... + @overload + def __ge__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ... + @overload + def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ... + + # Unary ops + @overload + def __abs__(self: NDArray[bool_]) -> NDArray[bool_]: ... + @overload + def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ... + @overload + def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __abs__(self: NDArray[object_]) -> Any: ... + + @overload + def __invert__(self: NDArray[bool_]) -> NDArray[bool_]: ... + @overload + def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ... + @overload + def __invert__(self: NDArray[object_]) -> Any: ... + + @overload + def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __pos__(self: NDArray[object_]) -> Any: ... + + @overload + def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + @overload + def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + @overload + def __neg__(self: NDArray[object_]) -> Any: ... + + # Binary ops + @overload + def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __matmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __mod__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __rmod__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __divmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + @overload + def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload + def __rdivmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + @overload + def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload + def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + @overload + def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc] + @overload + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __pow__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + @overload + def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + @overload + def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __lshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __and__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rand__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __xor__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __rxor__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __or__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + @overload + def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + @overload + def __ror__(self: NDArray[object_], other: Any) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + + # `np.generic` does not support inplace operations + + # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left + # operand. An exception to this rule are unsigned integers though, which + # also accepts a signed integer for the right operand as long it is a 0D + # object and its value is >= 0 + @overload + def __iadd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + @overload + def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + @overload + def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __imul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + @overload + def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + @overload + def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + @overload + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + @overload + def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + @overload + def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __iand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ixor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __ior__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + @overload + def __imatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... + @overload + def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + @overload + def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + @overload + def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + + def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ... + def __dlpack_device__(self) -> tuple[int, L[0]]: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DType_co: ... + +# NOTE: while `np.generic` is not technically an instance of `ABCMeta`, +# the `@abstractmethod` decorator is herein used to (forcefully) deny +# the creation of `np.generic` instances. +# The `# type: ignore` comments are necessary to silence mypy errors regarding +# the missing `ABCMeta` metaclass. + +# See https://github.com/numpy/numpy-stubs/pull/80 for more details. + +_ScalarType = TypeVar("_ScalarType", bound=generic) +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +class generic(_ArrayOrScalarCommon): + @abstractmethod + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + @overload + def __array__(self: _ScalarType, dtype: None = ..., /) -> ndarray[Any, _dtype[_ScalarType]]: ... + @overload + def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... + def __hash__(self) -> int: ... + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def strides(self) -> tuple[()]: ... + def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ... + @property + def flat(self: _ScalarType) -> flatiter[ndarray[Any, _dtype[_ScalarType]]]: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarType], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: bool = ..., + copy: bool | _CopyMode = ..., + ) -> _ScalarType: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: bool = ..., + copy: bool | _CopyMode = ..., + ) -> Any: ... + + # NOTE: `view` will perform a 0D->scalar cast, + # thus the array `type` is irrelevant to the output type + @overload + def view( + self: _ScalarType, + type: type[ndarray[Any, Any]] = ..., + ) -> _ScalarType: ... + @overload + def view( + self, + dtype: _DTypeLike[_ScalarType], + type: type[ndarray[Any, Any]] = ..., + ) -> _ScalarType: ... + @overload + def view( + self, + dtype: DTypeLike, + type: type[ndarray[Any, Any]] = ..., + ) -> Any: ... + + @overload + def getfield( + self, + dtype: _DTypeLike[_ScalarType], + offset: SupportsIndex = ... + ) -> _ScalarType: ... + @overload + def getfield( + self, + dtype: DTypeLike, + offset: SupportsIndex = ... + ) -> Any: ... + + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> Any: ... + + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _IntLike_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarType: ... + @overload + def take( # type: ignore[misc] + self: _ScalarType, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[Any, _dtype[_ScalarType]]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + out: _NdArraySubClass = ..., + mode: _ModeKind = ..., + ) -> _NdArraySubClass: ... + + def repeat( + self: _ScalarType, + repeats: _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., + ) -> ndarray[Any, _dtype[_ScalarType]]: ... + + def flatten( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> ndarray[Any, _dtype[_ScalarType]]: ... + + def ravel( + self: _ScalarType, + order: _OrderKACF = ..., + ) -> ndarray[Any, _dtype[_ScalarType]]: ... + + @overload + def reshape( + self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ... + ) -> ndarray[Any, _dtype[_ScalarType]]: ... + @overload + def reshape( + self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ... + ) -> ndarray[Any, _dtype[_ScalarType]]: ... + + def squeeze( + self: _ScalarType, axis: None | L[0] | tuple[()] = ... + ) -> _ScalarType: ... + def transpose(self: _ScalarType, axes: None | tuple[()] = ..., /) -> _ScalarType: ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self: _ScalarType) -> _dtype[_ScalarType]: ... + +class number(generic, Generic[_NBit1]): # type: ignore + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __neg__(self: _ArraySelf) -> _ArraySelf: ... + def __pos__(self: _ArraySelf) -> _ArraySelf: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + # Ensure that objects annotated as `number` support arithmetic operations + __add__: _NumberOp + __radd__: _NumberOp + __sub__: _NumberOp + __rsub__: _NumberOp + __mul__: _NumberOp + __rmul__: _NumberOp + __floordiv__: _NumberOp + __rfloordiv__: _NumberOp + __pow__: _NumberOp + __rpow__: _NumberOp + __truediv__: _NumberOp + __rtruediv__: _NumberOp + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + +class bool_(generic): + def __init__(self, value: object = ..., /) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> bool: ... + def tolist(self) -> bool: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + __add__: _BoolOp[bool_] + __radd__: _BoolOp[bool_] + __sub__: _BoolSub + __rsub__: _BoolSub + __mul__: _BoolOp[bool_] + __rmul__: _BoolOp[bool_] + __floordiv__: _BoolOp[int8] + __rfloordiv__: _BoolOp[int8] + __pow__: _BoolOp[int8] + __rpow__: _BoolOp[int8] + __truediv__: _BoolTrueDiv + __rtruediv__: _BoolTrueDiv + def __invert__(self) -> bool_: ... + __lshift__: _BoolBitOp[int8] + __rlshift__: _BoolBitOp[int8] + __rshift__: _BoolBitOp[int8] + __rrshift__: _BoolBitOp[int8] + __and__: _BoolBitOp[bool_] + __rand__: _BoolBitOp[bool_] + __xor__: _BoolBitOp[bool_] + __rxor__: _BoolBitOp[bool_] + __or__: _BoolBitOp[bool_] + __ror__: _BoolBitOp[bool_] + __mod__: _BoolMod + __rmod__: _BoolMod + __divmod__: _BoolDivMod + __rdivmod__: _BoolDivMod + __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + +class object_(generic): + def __init__(self, value: object = ..., /) -> None: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + # The 3 protocols below may or may not raise, + # depending on the underlying object + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + + if sys.version_info >= (3, 12): + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +# The `datetime64` constructors requires an object with the three attributes below, +# and thus supports datetime duck typing +class _DatetimeScalar(Protocol): + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int` +# depending on the unit +class datetime64(generic): + @overload + def __init__( + self, + value: None | datetime64 | _CharLike_co | _DatetimeScalar = ..., + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., + /, + ) -> None: ... + @overload + def __init__( + self, + value: int, + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co], + /, + ) -> None: ... + def __add__(self, other: _TD64Like_co) -> datetime64: ... + def __radd__(self, other: _TD64Like_co) -> datetime64: ... + @overload + def __sub__(self, other: datetime64) -> timedelta64: ... + @overload + def __sub__(self, other: _TD64Like_co) -> datetime64: ... + def __rsub__(self, other: datetime64) -> timedelta64: ... + __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + +_IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex] +_FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex] +_ComplexValue = Union[ + None, + _CharLike_co, + SupportsFloat, + SupportsComplex, + SupportsIndex, + complex, # `complex` is not a subtype of `SupportsComplex` +] + +class integer(number[_NBit1]): # type: ignore + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + + # NOTE: `__index__` is technically defined in the bottom-most + # sub-classes (`int64`, `uint32`, etc) + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> int: ... + def tolist(self) -> int: ... + def is_integer(self) -> L[True]: ... + def bit_count(self: _ScalarType) -> int: ... + def __index__(self) -> int: ... + __truediv__: _IntTrueDiv[_NBit1] + __rtruediv__: _IntTrueDiv[_NBit1] + def __mod__(self, value: _IntLike_co) -> integer[Any]: ... + def __rmod__(self, value: _IntLike_co) -> integer[Any]: ... + def __invert__(self: _IntType) -> _IntType: ... + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __rlshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __rshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __rrshift__(self, other: _IntLike_co) -> integer[Any]: ... + def __and__(self, other: _IntLike_co) -> integer[Any]: ... + def __rand__(self, other: _IntLike_co) -> integer[Any]: ... + def __or__(self, other: _IntLike_co) -> integer[Any]: ... + def __ror__(self, other: _IntLike_co) -> integer[Any]: ... + def __xor__(self, other: _IntLike_co) -> integer[Any]: ... + def __rxor__(self, other: _IntLike_co) -> integer[Any]: ... + +class signedinteger(integer[_NBit1]): + def __init__(self, value: _IntValue = ..., /) -> None: ... + __add__: _SignedIntOp[_NBit1] + __radd__: _SignedIntOp[_NBit1] + __sub__: _SignedIntOp[_NBit1] + __rsub__: _SignedIntOp[_NBit1] + __mul__: _SignedIntOp[_NBit1] + __rmul__: _SignedIntOp[_NBit1] + __floordiv__: _SignedIntOp[_NBit1] + __rfloordiv__: _SignedIntOp[_NBit1] + __pow__: _SignedIntOp[_NBit1] + __rpow__: _SignedIntOp[_NBit1] + __lshift__: _SignedIntBitOp[_NBit1] + __rlshift__: _SignedIntBitOp[_NBit1] + __rshift__: _SignedIntBitOp[_NBit1] + __rrshift__: _SignedIntBitOp[_NBit1] + __and__: _SignedIntBitOp[_NBit1] + __rand__: _SignedIntBitOp[_NBit1] + __xor__: _SignedIntBitOp[_NBit1] + __rxor__: _SignedIntBitOp[_NBit1] + __or__: _SignedIntBitOp[_NBit1] + __ror__: _SignedIntBitOp[_NBit1] + __mod__: _SignedIntMod[_NBit1] + __rmod__: _SignedIntMod[_NBit1] + __divmod__: _SignedIntDivMod[_NBit1] + __rdivmod__: _SignedIntDivMod[_NBit1] + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = signedinteger[_NBitInt] +longlong = signedinteger[_NBitLongLong] + +# TODO: `item`/`tolist` returns either `dt.timedelta` or `int` +# depending on the unit +class timedelta64(generic): + def __init__( + self, + value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ..., + format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., + /, + ) -> None: ... + @property + def numerator(self: _ScalarType) -> _ScalarType: ... + @property + def denominator(self) -> L[1]: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self) -> int: ... + def __float__(self) -> float: ... + def __complex__(self) -> complex: ... + def __neg__(self: _ArraySelf) -> _ArraySelf: ... + def __pos__(self: _ArraySelf) -> _ArraySelf: ... + def __abs__(self: _ArraySelf) -> _ArraySelf: ... + def __add__(self, other: _TD64Like_co) -> timedelta64: ... + def __radd__(self, other: _TD64Like_co) -> timedelta64: ... + def __sub__(self, other: _TD64Like_co) -> timedelta64: ... + def __rsub__(self, other: _TD64Like_co) -> timedelta64: ... + def __mul__(self, other: _FloatLike_co) -> timedelta64: ... + def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... + __truediv__: _TD64Div[float64] + __floordiv__: _TD64Div[int64] + def __rtruediv__(self, other: timedelta64) -> float64: ... + def __rfloordiv__(self, other: timedelta64) -> int64: ... + def __mod__(self, other: timedelta64) -> timedelta64: ... + def __rmod__(self, other: timedelta64) -> timedelta64: ... + def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... + __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + +class unsignedinteger(integer[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + def __init__(self, value: _IntValue = ..., /) -> None: ... + __add__: _UnsignedIntOp[_NBit1] + __radd__: _UnsignedIntOp[_NBit1] + __sub__: _UnsignedIntOp[_NBit1] + __rsub__: _UnsignedIntOp[_NBit1] + __mul__: _UnsignedIntOp[_NBit1] + __rmul__: _UnsignedIntOp[_NBit1] + __floordiv__: _UnsignedIntOp[_NBit1] + __rfloordiv__: _UnsignedIntOp[_NBit1] + __pow__: _UnsignedIntOp[_NBit1] + __rpow__: _UnsignedIntOp[_NBit1] + __lshift__: _UnsignedIntBitOp[_NBit1] + __rlshift__: _UnsignedIntBitOp[_NBit1] + __rshift__: _UnsignedIntBitOp[_NBit1] + __rrshift__: _UnsignedIntBitOp[_NBit1] + __and__: _UnsignedIntBitOp[_NBit1] + __rand__: _UnsignedIntBitOp[_NBit1] + __xor__: _UnsignedIntBitOp[_NBit1] + __rxor__: _UnsignedIntBitOp[_NBit1] + __or__: _UnsignedIntBitOp[_NBit1] + __ror__: _UnsignedIntBitOp[_NBit1] + __mod__: _UnsignedIntMod[_NBit1] + __rmod__: _UnsignedIntMod[_NBit1] + __divmod__: _UnsignedIntDivMod[_NBit1] + __rdivmod__: _UnsignedIntDivMod[_NBit1] + +uint8 = unsignedinteger[_8Bit] +uint16 = unsignedinteger[_16Bit] +uint32 = unsignedinteger[_32Bit] +uint64 = unsignedinteger[_64Bit] + +ubyte = unsignedinteger[_NBitByte] +ushort = unsignedinteger[_NBitShort] +uintc = unsignedinteger[_NBitIntC] +uintp = unsignedinteger[_NBitIntP] +uint = unsignedinteger[_NBitInt] +ulonglong = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit1]): # type: ignore + def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ... + +_IntType = TypeVar("_IntType", bound=integer[Any]) +_FloatType = TypeVar('_FloatType', bound=floating[Any]) + +class floating(inexact[_NBit1]): + def __init__(self, value: _FloatValue = ..., /) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., + /, + ) -> float: ... + def tolist(self) -> float: ... + def is_integer(self) -> bool: ... + def hex(self: float64) -> str: ... + @classmethod + def fromhex(cls: type[float64], string: str, /) -> float64: ... + def as_integer_ratio(self) -> tuple[int, int]: ... + def __ceil__(self: float64) -> int: ... + def __floor__(self: float64) -> int: ... + def __trunc__(self: float64) -> int: ... + def __getnewargs__(self: float64) -> tuple[float]: ... + def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ... + @overload + def __round__(self, ndigits: None = ...) -> int: ... + @overload + def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + __add__: _FloatOp[_NBit1] + __radd__: _FloatOp[_NBit1] + __sub__: _FloatOp[_NBit1] + __rsub__: _FloatOp[_NBit1] + __mul__: _FloatOp[_NBit1] + __rmul__: _FloatOp[_NBit1] + __truediv__: _FloatOp[_NBit1] + __rtruediv__: _FloatOp[_NBit1] + __floordiv__: _FloatOp[_NBit1] + __rfloordiv__: _FloatOp[_NBit1] + __pow__: _FloatOp[_NBit1] + __rpow__: _FloatOp[_NBit1] + __mod__: _FloatMod[_NBit1] + __rmod__: _FloatMod[_NBit1] + __divmod__: _FloatDivMod[_NBit1] + __rdivmod__: _FloatDivMod[_NBit1] + +float16 = floating[_16Bit] +float32 = floating[_32Bit] +float64 = floating[_64Bit] + +half = floating[_NBitHalf] +single = floating[_NBitSingle] +double = floating[_NBitDouble] +float_ = floating[_NBitDouble] +longdouble = floating[_NBitLongDouble] +longfloat = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): + def __init__(self, value: _ComplexValue = ..., /) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> complex: ... + def tolist(self) -> complex: ... + @property + def real(self) -> floating[_NBit1]: ... # type: ignore[override] + @property + def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] + def __getnewargs__(self: complex128) -> tuple[float, float]: ... + # NOTE: Deprecated + # def __round__(self, ndigits=...): ... + __add__: _ComplexOp[_NBit1] + __radd__: _ComplexOp[_NBit1] + __sub__: _ComplexOp[_NBit1] + __rsub__: _ComplexOp[_NBit1] + __mul__: _ComplexOp[_NBit1] + __rmul__: _ComplexOp[_NBit1] + __truediv__: _ComplexOp[_NBit1] + __rtruediv__: _ComplexOp[_NBit1] + __pow__: _ComplexOp[_NBit1] + __rpow__: _ComplexOp[_NBit1] + +complex64 = complexfloating[_32Bit, _32Bit] +complex128 = complexfloating[_64Bit, _64Bit] + +csingle = complexfloating[_NBitSingle, _NBitSingle] +singlecomplex = complexfloating[_NBitSingle, _NBitSingle] +cdouble = complexfloating[_NBitDouble, _NBitDouble] +complex_ = complexfloating[_NBitDouble, _NBitDouble] +cfloat = complexfloating[_NBitDouble, _NBitDouble] +clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble] +clongfloat = complexfloating[_NBitLongDouble, _NBitLongDouble] +longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble] + +class flexible(generic): ... # type: ignore + +# TODO: `item`/`tolist` returns either `bytes` or `tuple` +# depending on whether or not it's used as an opaque bytes sequence +# or a structure +class void(flexible): + @overload + def __init__(self, value: _IntLike_co | bytes, /, dtype : None = ...) -> None: ... + @overload + def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... + @property + def real(self: _ArraySelf) -> _ArraySelf: ... + @property + def imag(self: _ArraySelf) -> _ArraySelf: ... + def setfield( + self, val: ArrayLike, dtype: DTypeLike, offset: int = ... + ) -> None: ... + @overload + def __getitem__(self, key: str | SupportsIndex) -> Any: ... + @overload + def __getitem__(self, key: list[str]) -> void: ... + def __setitem__( + self, + key: str | list[str] | SupportsIndex, + value: ArrayLike, + ) -> None: ... + +class character(flexible): # type: ignore + def __int__(self) -> int: ... + def __float__(self) -> float: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their +# builtin `bytes` / `str` counterpart + +class bytes_(character, bytes): + @overload + def __init__(self, value: object = ..., /) -> None: ... + @overload + def __init__( + self, value: str, /, encoding: str = ..., errors: str = ... + ) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> bytes: ... + def tolist(self) -> bytes: ... + +string_ = bytes_ + +class str_(character, str): + @overload + def __init__(self, value: object = ..., /) -> None: ... + @overload + def __init__( + self, value: bytes, /, encoding: str = ..., errors: str = ... + ) -> None: ... + def item( + self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, + ) -> str: ... + def tolist(self) -> str: ... + +unicode_ = str_ + +# +# Constants +# + +Inf: Final[float] +Infinity: Final[float] +NAN: Final[float] +NINF: Final[float] +NZERO: Final[float] +NaN: Final[float] +PINF: Final[float] +PZERO: Final[float] +e: Final[float] +euler_gamma: Final[float] +inf: Final[float] +infty: Final[float] +nan: Final[float] +pi: Final[float] + +ERR_IGNORE: L[0] +ERR_WARN: L[1] +ERR_RAISE: L[2] +ERR_CALL: L[3] +ERR_PRINT: L[4] +ERR_LOG: L[5] +ERR_DEFAULT: L[521] + +SHIFT_DIVIDEBYZERO: L[0] +SHIFT_OVERFLOW: L[3] +SHIFT_UNDERFLOW: L[6] +SHIFT_INVALID: L[9] + +FPE_DIVIDEBYZERO: L[1] +FPE_OVERFLOW: L[2] +FPE_UNDERFLOW: L[4] +FPE_INVALID: L[8] + +FLOATING_POINT_SUPPORT: L[1] +UFUNC_BUFSIZE_DEFAULT = BUFSIZE + +little_endian: Final[bool] +True_: Final[bool_] +False_: Final[bool_] + +UFUNC_PYVALS_NAME: L["UFUNC_PYVALS"] + +newaxis: None + +# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs +@final +class ufunc: + @property + def __name__(self) -> str: ... + @property + def __doc__(self) -> str: ... + __call__: Callable[..., Any] + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... + @property + def ntypes(self) -> int: ... + @property + def types(self) -> list[str]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. + @property + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. + @property + def signature(self) -> None | str: ... + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + reduce: Any + accumulate: Any + reduceat: Any + outer: Any + # Similarly at won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + at: Any + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] +add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] +arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] +bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] +ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] +conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] +cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] +cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] +degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] +divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] +equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] +exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] +exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] +expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] +fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] +float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] +floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] +fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] +fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] +fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] +frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] +gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] +hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] +isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] +isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] +isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] +lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] +less: _UFunc_Nin2_Nout1[L['less'], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] +log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] +log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] +log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] +log: _UFunc_Nin1_Nout1[L['log'], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None] +maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] +minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] +mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] +multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] +positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] +power: _UFunc_Nin2_Nout1[L['power'], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] +radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] +remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] +rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] +sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] +signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] +sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] +sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] +spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] +square: _UFunc_Nin1_Nout1[L['square'], L[18], None] +subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] +tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] +tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] +true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] + +abs = absolute + +class _CopyMode(enum.Enum): + ALWAYS: L[True] + IF_NEEDED: L[False] + NEVER: L[2] + +# Warnings +class RankWarning(UserWarning): ... + +_CallType = TypeVar("_CallType", bound=_ErrFunc | _SupportsWrite[str]) + +class errstate(Generic[_CallType], ContextDecorator): + call: _CallType + kwargs: _ErrDictOptional + + # Expand `**kwargs` into explicit keyword-only arguments + def __init__( + self, + *, + call: _CallType = ..., + all: None | _ErrKind = ..., + divide: None | _ErrKind = ..., + over: None | _ErrKind = ..., + under: None | _ErrKind = ..., + invalid: None | _ErrKind = ..., + ) -> None: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: None | type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, + /, + ) -> None: ... + +@contextmanager +def _no_nep50_warning() -> Generator[None, None, None]: ... +def _get_promotion_state() -> str: ... +def _set_promotion_state(state: str, /) -> None: ... + +class ndenumerate(Generic[_ScalarType]): + iter: flatiter[NDArray[_ScalarType]] + @overload + def __new__( + cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]], + ) -> ndenumerate[_ScalarType]: ... + @overload + def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ... + @overload + def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... + @overload + def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[bool_]: ... + @overload + def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ... + @overload + def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float_]: ... + @overload + def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex_]: ... + def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ... + def __iter__(self: _T) -> _T: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, *shape: SupportsIndex) -> None: ... + def __iter__(self: _T) -> _T: ... + def __next__(self) -> _Shape: ... + +class DataSource: + def __init__( + self, + destpath: None | str | os.PathLike[str] = ..., + ) -> None: ... + def __del__(self) -> None: ... + def abspath(self, path: str) -> str: ... + def exists(self, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open( + self, + path: str, + mode: str = ..., + encoding: None | str = ..., + newline: None | str = ..., + ) -> IO[Any]: ... + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +@final +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _Shape: ... + @property + def size(self) -> int: ... + def __next__(self) -> tuple[Any, ...]: ... + def __iter__(self: _T) -> _T: ... + def reset(self) -> None: ... + +@final +class busdaycalendar: + def __new__( + cls, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + ) -> busdaycalendar: ... + @property + def weekmask(self) -> NDArray[bool_]: ... + @property + def holidays(self) -> NDArray[datetime64]: ... + +class finfo(Generic[_FloatType]): + dtype: dtype[_FloatType] + bits: int + eps: _FloatType + epsneg: _FloatType + iexp: int + machep: int + max: _FloatType + maxexp: int + min: _FloatType + minexp: int + negep: int + nexp: int + nmant: int + precision: int + resolution: _FloatType + smallest_subnormal: _FloatType + @property + def smallest_normal(self) -> _FloatType: ... + @property + def tiny(self) -> _FloatType: ... + @overload + def __new__( + cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]] + ) -> finfo[floating[_NBit1]]: ... + @overload + def __new__( + cls, dtype: complex | float | type[complex] | type[float] + ) -> finfo[float_]: ... + @overload + def __new__( + cls, dtype: str + ) -> finfo[floating[Any]]: ... + +class iinfo(Generic[_IntType]): + dtype: dtype[_IntType] + kind: str + bits: int + key: str + @property + def min(self) -> int: ... + @property + def max(self) -> int: ... + + @overload + def __new__(cls, dtype: _IntType | _DTypeLike[_IntType]) -> iinfo[_IntType]: ... + @overload + def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... + @overload + def __new__(cls, dtype: str) -> iinfo[Any]: ... + +class format_parser: + dtype: dtype[void] + def __init__( + self, + formats: DTypeLike, + names: None | str | Sequence[str], + titles: None | str | Sequence[str], + aligned: bool = ..., + byteorder: None | _ByteOrder = ..., + ) -> None: ... + +class recarray(ndarray[_ShapeType, _DType_co]): + # NOTE: While not strictly mandatory, we're demanding here that arguments + # for the `format_parser`- and `dtype`-based dtype constructors are + # mutually exclusive + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: None = ..., + buf: None | _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: None | _ShapeLike = ..., + *, + formats: DTypeLike, + names: None | str | Sequence[str] = ..., + titles: None | str | Sequence[str] = ..., + byteorder: None | _ByteOrder = ..., + aligned: bool = ..., + order: _OrderKACF = ..., + ) -> recarray[Any, dtype[record]]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: DTypeLike, + buf: None | _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: None | _ShapeLike = ..., + formats: None = ..., + names: None = ..., + titles: None = ..., + byteorder: None = ..., + aligned: L[False] = ..., + order: _OrderKACF = ..., + ) -> recarray[Any, dtype[Any]]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __getattribute__(self, attr: str) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike) -> None: ... + @overload + def __getitem__(self, indx: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + )) -> Any: ... + @overload + def __getitem__(self: recarray[Any, dtype[void]], indx: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> recarray[Any, _DType_co]: ... + @overload + def __getitem__(self, indx: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> ndarray[Any, _DType_co]: ... + @overload + def __getitem__(self, indx: str) -> NDArray[Any]: ... + @overload + def __getitem__(self, indx: list[str]) -> recarray[_ShapeType, dtype[record]]: ... + @overload + def field(self, attr: int | str, val: None = ...) -> Any: ... + @overload + def field(self, attr: int | str, val: ArrayLike) -> None: ... + +class record(void): + def __getattribute__(self, attr: str) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike) -> None: ... + def pprint(self) -> str: ... + @overload + def __getitem__(self, key: str | SupportsIndex) -> Any: ... + @overload + def __getitem__(self, key: list[str]) -> record: ... + +_NDIterFlagsKind = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] + +_NDIterOpFlagsKind = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked" +] + +@final +class nditer: + def __new__( + cls, + op: ArrayLike | Sequence[ArrayLike], + flags: None | Sequence[_NDIterFlagsKind] = ..., + op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + op_axes: None | Sequence[Sequence[SupportsIndex]] = ..., + itershape: None | _ShapeLike = ..., + buffersize: SupportsIndex = ..., + ) -> nditer: ... + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: None | type[BaseException], + exc_value: None | BaseException, + traceback: None | TracebackType, + ) -> None: ... + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Any], ...]: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def close(self) -> None: ... + def copy(self) -> nditer: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + @property + def dtypes(self) -> tuple[dtype[Any], ...]: ... + @property + def finished(self) -> bool: ... + @property + def has_delayed_bufalloc(self) -> bool: ... + @property + def has_index(self) -> bool: ... + @property + def has_multi_index(self) -> bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Any], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Any], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Any], ...]: ... + +_MemMapModeKind = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", +] + +class memmap(ndarray[_ShapeType, _DType_co]): + __array_priority__: ClassVar[float] + filename: str | None + offset: int + mode: str + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: type[uint8] = ..., + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[uint8]]: ... + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: _DTypeLike[_ScalarType], + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[_ScalarType]]: ... + @overload + def __new__( + subtype, + filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + dtype: DTypeLike, + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: None | int | tuple[int, ...] = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[Any]]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __array_wrap__( + self, + array: memmap[_ShapeType, _DType_co], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + ) -> Any: ... + def flush(self) -> None: ... + +# TODO: Add a mypy plugin for managing functions whose output type is dependent +# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) +class vectorize: + pyfunc: Callable[..., Any] + cache: bool + signature: None | str + otypes: None | str + excluded: set[int | str] + __doc__: None | str + def __init__( + self, + pyfunc: Callable[..., Any], + otypes: None | str | Iterable[DTypeLike] = ..., + doc: None | str = ..., + excluded: None | Iterable[int | str] = ..., + cache: bool = ..., + signature: None | str = ..., + ) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + +class poly1d: + @property + def variable(self) -> str: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Any]: ... + @property + def r(self) -> NDArray[Any]: ... + + @property + def coeffs(self) -> NDArray[Any]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Any]) -> None: ... + + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Any]) -> None: ... + + @property + def coef(self) -> NDArray[Any]: ... + @coef.setter + def coef(self, value: NDArray[Any]) -> None: ... + + @property + def coefficients(self) -> NDArray[Any]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Any]) -> None: ... + + __hash__: ClassVar[None] # type: ignore + + @overload + def __array__(self, t: None = ...) -> NDArray[Any]: ... + @overload + def __array__(self, t: _DType) -> ndarray[Any, _DType]: ... + + @overload + def __call__(self, val: _ScalarLike_co) -> Any: ... + @overload + def __call__(self, val: poly1d) -> poly1d: ... + @overload + def __call__(self, val: ArrayLike) -> NDArray[Any]: ... + + def __init__( + self, + c_or_r: ArrayLike, + r: bool = ..., + variable: None | str = ..., + ) -> None: ... + def __len__(self) -> int: ... + def __neg__(self) -> poly1d: ... + def __pos__(self) -> poly1d: ... + def __mul__(self, other: ArrayLike) -> poly1d: ... + def __rmul__(self, other: ArrayLike) -> poly1d: ... + def __add__(self, other: ArrayLike) -> poly1d: ... + def __radd__(self, other: ArrayLike) -> poly1d: ... + def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike) -> poly1d: ... + def __rsub__(self, other: ArrayLike) -> poly1d: ... + def __div__(self, other: ArrayLike) -> poly1d: ... + def __truediv__(self, other: ArrayLike) -> poly1d: ... + def __rdiv__(self, other: ArrayLike) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike) -> poly1d: ... + def __getitem__(self, val: int) -> Any: ... + def __setitem__(self, key: int, val: Any) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def integ( + self, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., + ) -> poly1d: ... + +class matrix(ndarray[_ShapeType, _DType_co]): + __array_priority__: ClassVar[float] + def __new__( + subtype, + data: ArrayLike, + dtype: DTypeLike = ..., + copy: bool = ..., + ) -> matrix[Any, Any]: ... + def __array_finalize__(self, obj: object) -> None: ... + + @overload + def __getitem__(self, key: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + )) -> Any: ... + @overload + def __getitem__(self, key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> matrix[Any, _DType_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ... + + def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ... + def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + + @overload + def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + @overload + def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + @overload + def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + + @overload + def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + @overload + def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + @overload + def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + + @overload + def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + @overload + def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def any(self, axis: None = ..., out: None = ...) -> bool_: ... + @overload + def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ... + @overload + def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def all(self, axis: None = ..., out: None = ...) -> bool_: ... + @overload + def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ... + @overload + def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + @overload + def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + @overload + def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + @overload + def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + @overload + def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + + def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ... + def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> list[list[_T]]: ... # type: ignore[typevar] + def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... + def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... + + @property + def T(self) -> matrix[Any, _DType_co]: ... + @property + def I(self) -> matrix[Any, Any]: ... + @property + def A(self) -> ndarray[_ShapeType, _DType_co]: ... + @property + def A1(self) -> ndarray[Any, _DType_co]: ... + @property + def H(self) -> matrix[Any, _DType_co]: ... + def getT(self) -> matrix[Any, _DType_co]: ... + def getI(self) -> matrix[Any, Any]: ... + def getA(self) -> ndarray[_ShapeType, _DType_co]: ... + def getA1(self) -> ndarray[Any, _DType_co]: ... + def getH(self) -> matrix[Any, _DType_co]: ... + +_CharType = TypeVar("_CharType", str_, bytes_) +_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_]) +_CharArray = chararray[Any, dtype[_CharType]] + +class chararray(ndarray[_ShapeType, _CharDType]): + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = ..., + unicode: L[False] = ..., + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> chararray[Any, dtype[bytes_]]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = ..., + unicode: L[True] = ..., + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> chararray[Any, dtype[str_]]: ... + + def __array_finalize__(self, obj: object) -> None: ... + def __mul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... + def __rmul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... + def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ... + + @overload + def __eq__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __eq__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __ne__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __ne__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __ge__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __ge__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __le__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __le__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __gt__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __gt__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __lt__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> NDArray[bool_]: ... + @overload + def __lt__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> NDArray[bool_]: ... + + @overload + def __add__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def __add__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def __radd__( + self: _CharArray[str_], + other: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def __radd__( + self: _CharArray[bytes_], + other: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def center( + self: _CharArray[str_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def center( + self: _CharArray[bytes_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def count( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def count( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + def decode( + self: _CharArray[bytes_], + encoding: None | str = ..., + errors: None | str = ..., + ) -> _CharArray[str_]: ... + + def encode( + self: _CharArray[str_], + encoding: None | str = ..., + errors: None | str = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def endswith( + self: _CharArray[str_], + suffix: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + @overload + def endswith( + self: _CharArray[bytes_], + suffix: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + + def expandtabs( + self, + tabsize: _ArrayLikeInt_co = ..., + ) -> chararray[Any, _CharDType]: ... + + @overload + def find( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def find( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def index( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def index( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def join( + self: _CharArray[str_], + seq: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def join( + self: _CharArray[bytes_], + seq: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def ljust( + self: _CharArray[str_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def ljust( + self: _CharArray[bytes_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def lstrip( + self: _CharArray[str_], + chars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def lstrip( + self: _CharArray[bytes_], + chars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def partition( + self: _CharArray[str_], + sep: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def partition( + self: _CharArray[bytes_], + sep: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def replace( + self: _CharArray[str_], + old: _ArrayLikeStr_co, + new: _ArrayLikeStr_co, + count: None | _ArrayLikeInt_co = ..., + ) -> _CharArray[str_]: ... + @overload + def replace( + self: _CharArray[bytes_], + old: _ArrayLikeBytes_co, + new: _ArrayLikeBytes_co, + count: None | _ArrayLikeInt_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rfind( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def rfind( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def rindex( + self: _CharArray[str_], + sub: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + @overload + def rindex( + self: _CharArray[bytes_], + sub: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[int_]: ... + + @overload + def rjust( + self: _CharArray[str_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def rjust( + self: _CharArray[bytes_], + width: _ArrayLikeInt_co, + fillchar: _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rpartition( + self: _CharArray[str_], + sep: _ArrayLikeStr_co, + ) -> _CharArray[str_]: ... + @overload + def rpartition( + self: _CharArray[bytes_], + sep: _ArrayLikeBytes_co, + ) -> _CharArray[bytes_]: ... + + @overload + def rsplit( + self: _CharArray[str_], + sep: None | _ArrayLikeStr_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + @overload + def rsplit( + self: _CharArray[bytes_], + sep: None | _ArrayLikeBytes_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + + @overload + def rstrip( + self: _CharArray[str_], + chars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def rstrip( + self: _CharArray[bytes_], + chars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def split( + self: _CharArray[str_], + sep: None | _ArrayLikeStr_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + @overload + def split( + self: _CharArray[bytes_], + sep: None | _ArrayLikeBytes_co = ..., + maxsplit: None | _ArrayLikeInt_co = ..., + ) -> NDArray[object_]: ... + + def splitlines(self, keepends: None | _ArrayLikeBool_co = ...) -> NDArray[object_]: ... + + @overload + def startswith( + self: _CharArray[str_], + prefix: _ArrayLikeStr_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + @overload + def startswith( + self: _CharArray[bytes_], + prefix: _ArrayLikeBytes_co, + start: _ArrayLikeInt_co = ..., + end: None | _ArrayLikeInt_co = ..., + ) -> NDArray[bool_]: ... + + @overload + def strip( + self: _CharArray[str_], + chars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def strip( + self: _CharArray[bytes_], + chars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def translate( + self: _CharArray[str_], + table: _ArrayLikeStr_co, + deletechars: None | _ArrayLikeStr_co = ..., + ) -> _CharArray[str_]: ... + @overload + def translate( + self: _CharArray[bytes_], + table: _ArrayLikeBytes_co, + deletechars: None | _ArrayLikeBytes_co = ..., + ) -> _CharArray[bytes_]: ... + + def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... + def capitalize(self) -> chararray[_ShapeType, _CharDType]: ... + def title(self) -> chararray[_ShapeType, _CharDType]: ... + def swapcase(self) -> chararray[_ShapeType, _CharDType]: ... + def lower(self) -> chararray[_ShapeType, _CharDType]: ... + def upper(self) -> chararray[_ShapeType, _CharDType]: ... + def isalnum(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isalpha(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isdigit(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def islower(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isspace(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def istitle(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isupper(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isnumeric(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + def isdecimal(self) -> ndarray[_ShapeType, dtype[bool_]]: ... + +# NOTE: Deprecated +# class MachAr: ... + +class _SupportsDLPack(Protocol[_T_contra]): + def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ... + +def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_core/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/__init__.py new file mode 100644 index 00000000..a2f096f3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/__init__.py @@ -0,0 +1,4 @@ +""" +This private module only contains stubs for interoperability with +NumPy 2.0 pickled arrays. It may not be used by the end user. +""" diff --git a/dbdpy-env/lib/python3.9/site-packages/pip-21.2.4.dist-info/REQUESTED b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/__init__.pyi similarity index 100% rename from dbdpy-env/lib/python3.9/site-packages/pip-21.2.4.dist-info/REQUESTED rename to dbdpy-env/lib/python3.9/site-packages/numpy/_core/__init__.pyi diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype.py new file mode 100644 index 00000000..974d93d9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype.py @@ -0,0 +1,6 @@ +from numpy.core import _dtype + +_globals = globals() + +for item in _dtype.__dir__(): + _globals[item] = getattr(_dtype, item) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype_ctypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype_ctypes.py new file mode 100644 index 00000000..bfa16aab --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_dtype_ctypes.py @@ -0,0 +1,6 @@ +from numpy.core import _dtype_ctypes + +_globals = globals() + +for item in _dtype_ctypes.__dir__(): + _globals[item] = getattr(_dtype_ctypes, item) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_internal.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_internal.py new file mode 100644 index 00000000..52a8e907 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_internal.py @@ -0,0 +1,6 @@ +from numpy.core import _internal + +_globals = globals() + +for item in _internal.__dir__(): + _globals[item] = getattr(_internal, item) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_multiarray_umath.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_multiarray_umath.py new file mode 100644 index 00000000..7ce48fcb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/_multiarray_umath.py @@ -0,0 +1,6 @@ +from numpy.core import _multiarray_umath + +_globals = globals() + +for item in _multiarray_umath.__dir__(): + _globals[item] = getattr(_multiarray_umath, item) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_core/multiarray.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/multiarray.py new file mode 100644 index 00000000..6c37d1da --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/multiarray.py @@ -0,0 +1,6 @@ +from numpy.core import multiarray + +_globals = globals() + +for item in multiarray.__dir__(): + _globals[item] = getattr(multiarray, item) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_core/umath.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/umath.py new file mode 100644 index 00000000..3d08c903 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_core/umath.py @@ -0,0 +1,6 @@ +from numpy.core import umath + +_globals = globals() + +for item in umath.__dir__(): + _globals[item] = getattr(umath, item) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_distributor_init.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_distributor_init.py new file mode 100644 index 00000000..25b0eed7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_distributor_init.py @@ -0,0 +1,15 @@ +""" Distributor init file + +Distributors: you can add custom code here to support particular distributions +of numpy. + +For example, this is a good place to put any BLAS/LAPACK initialization code. + +The numpy standard source distribution will not put code in this file, so you +can safely replace this file with your own version. +""" + +try: + from . import _distributor_init_local +except ImportError: + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_globals.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_globals.py new file mode 100644 index 00000000..416a20f5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_globals.py @@ -0,0 +1,95 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +import enum + +from ._utils import set_module as _set_module + +__all__ = ['_NoValue', '_CopyMode'] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() + + +@_set_module("numpy") +class _CopyMode(enum.Enum): + """ + An enumeration for the copy modes supported + by numpy.copy() and numpy.array(). The following three modes are supported, + + - ALWAYS: This means that a deep copy of the input + array will always be taken. + - IF_NEEDED: This means that a deep copy of the input + array will be taken only if necessary. + - NEVER: This means that the deep copy will never be taken. + If a copy cannot be avoided then a `ValueError` will be + raised. + + Note that the buffer-protocol could in theory do copies. NumPy currently + assumes an object exporting the buffer protocol will never do this. + """ + + ALWAYS = True + IF_NEEDED = False + NEVER = 2 + + def __bool__(self): + # For backwards compatibility + if self == _CopyMode.ALWAYS: + return True + + if self == _CopyMode.IF_NEEDED: + return False + + raise ValueError(f"{self} is neither True nor False.") diff --git a/dbdpy-env/lib/python3.9/site-packages/pip/_vendor/html5lib/filters/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/__init__.py similarity index 100% rename from dbdpy-env/lib/python3.9/site-packages/pip/_vendor/html5lib/filters/__init__.py rename to dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/__init__.py diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.py new file mode 100644 index 00000000..6f24318a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.py @@ -0,0 +1,37 @@ +"""This hook should collect all binary files and any hidden modules that numpy +needs. + +Our (some-what inadequate) docs for writing PyInstaller hooks are kept here: +https://pyinstaller.readthedocs.io/en/stable/hooks.html + +""" +from PyInstaller.compat import is_conda, is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies + +# Collect all DLLs inside numpy's installation folder, dump them into built +# app's root. +binaries = collect_dynamic_libs("numpy", ".") + +# If using Conda without any non-conda virtual environment manager: +if is_pure_conda: + # Assume running the NumPy from Conda-forge and collect it's DLLs from the + # communal Conda bin directory. DLLs from NumPy's dependencies must also be + # collected to capture MKL, OpenBlas, OpenMP, etc. + from PyInstaller.utils.hooks import conda_support + datas = conda_support.collect_dynamic_libs("numpy", dependencies=True) + +# Submodules PyInstaller cannot detect. `_dtype_ctypes` is only imported +# from C and `_multiarray_tests` is used in tests (which are not packed). +hiddenimports = ['numpy.core._dtype_ctypes', 'numpy.core._multiarray_tests'] + +# Remove testing and building code and packages that are referenced throughout +# NumPy but are not really dependencies. +excludedimports = [ + "scipy", + "pytest", + "f2py", + "setuptools", + "numpy.f2py", + "distutils", + "numpy.distutils", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py new file mode 100644 index 00000000..eb28070e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py @@ -0,0 +1,32 @@ +"""A crude *bit of everything* smoke test to verify PyInstaller compatibility. + +PyInstaller typically goes wrong by forgetting to package modules, extension +modules or shared libraries. This script should aim to touch as many of those +as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure +due to an uncollected resource. Missing resources are unlikely to lead to +arithmetic errors so there's generally no need to verify any calculation's +output - merely that it made it to the end OK. This script should not +explicitly import any of numpy's submodules as that gives PyInstaller undue +hints that those submodules exist and should be collected (accessing implicitly +loaded submodules is OK). + +""" +import numpy as np + +a = np.arange(1., 10.).reshape((3, 3)) % 5 +np.linalg.det(a) +a @ a +a @ a.T +np.linalg.inv(a) +np.sin(np.exp(a)) +np.linalg.svd(a) +np.linalg.eigh(a) + +np.unique(np.random.randint(0, 10, 100)) +np.sort(np.random.uniform(0, 10, 100)) + +np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) +np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum() +np.polynomial.Legendre([7, 8, 9]).roots() + +print("I made it!") diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.py new file mode 100644 index 00000000..a9061da1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.py @@ -0,0 +1,35 @@ +import subprocess +from pathlib import Path + +import pytest + + +# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'. +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +# It also leaks io.BytesIO()s. +@pytest.mark.filterwarnings('ignore::ResourceWarning') +@pytest.mark.parametrize("mode", ["--onedir", "--onefile"]) +@pytest.mark.slow +def test_pyinstaller(mode, tmp_path): + """Compile and run pyinstaller-smoke.py using PyInstaller.""" + + pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run + + source = Path(__file__).with_name("pyinstaller-smoke.py").resolve() + args = [ + # Place all generated files in ``tmp_path``. + '--workpath', str(tmp_path / "build"), + '--distpath', str(tmp_path / "dist"), + '--specpath', str(tmp_path), + mode, + str(source), + ] + pyinstaller_cli(args) + + if mode == "--onefile": + exe = tmp_path / "dist" / source.stem + else: + exe = tmp_path / "dist" / source.stem / source.stem + + p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE) + assert p.stdout.strip() == b"I made it!" diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_pytesttester.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_pytesttester.py new file mode 100644 index 00000000..1c38291a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_pytesttester.py @@ -0,0 +1,207 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicitly filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in develop mode. That +includes the standard ``python runtests.py`` invocation. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +import sys +import os + +__all__ = ['PytestTester'] + + +def _show_numpy_info(): + import numpy as np + + print("NumPy version %s" % np.__version__) + relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous + print("NumPy relaxed strides checking option:", relaxed_strides) + info = np.lib.utils._opt_info() + print("NumPy CPU features: ", (info if info else 'nothing enabled')) + + +class PytestTester: + """ + Pytest test runner. + + A test function is typically added to a package's __init__.py like so:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + + """ + def __init__(self, module_name): + self.module_name = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import pytest + import warnings + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + if sys.version_info < (3, 12): + with warnings.catch_warnings(): + warnings.simplefilter("always") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + from numpy.distutils import cpuinfo + + with warnings.catch_warnings(record=True): + # Ignore the warning from importing the array_api submodule. This + # warning is done on import, so it would break pytest collection, + # but importing it early here prevents the warning from being + # issued when it imported again. + import numpy.array_api + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + "-W ignore:Importing from numpy.matlib is", + ] + + if doctests: + pytest_args += ["--doctest-modules"] + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v"*(verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + # not importing at the top level to avoid circular import of module + from numpy.testing import IS_PYPY + if IS_PYPY: + pytest_args += ["-m", "not slow and not slow_pypy"] + else: + pytest_args += ["-m", "not slow"] + + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += ["--durations=%s" % durations] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_pytesttester.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/_pytesttester.pyi new file mode 100644 index 00000000..67ac87b3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_pytesttester.pyi @@ -0,0 +1,18 @@ +from collections.abc import Iterable +from typing import Literal as L + +__all__: list[str] + +class PytestTester: + module_name: str + def __init__(self, module_name: str) -> None: ... + def __call__( + self, + label: L["fast", "full"] = ..., + verbose: int = ..., + extra_argv: None | Iterable[str] = ..., + doctests: L[False] = ..., + coverage: bool = ..., + durations: int = ..., + tests: None | Iterable[str] = ..., + ) -> bool: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/__init__.py new file mode 100644 index 00000000..29922d95 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/__init__.py @@ -0,0 +1,221 @@ +"""Private counterpart of ``numpy.typing``.""" + +from __future__ import annotations + +from .. import ufunc +from .._utils import set_module +from typing import TYPE_CHECKING, final + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from __future__ import annotations + >>> from typing import TypeVar, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T1 = TypeVar("T1", bound=npt.NBitBase) + >>> T2 = TypeVar("T2", bound=npt.NBitBase) + + >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", + "_64Bit", "_32Bit", "_16Bit", "_8Bit", + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + + +# Silence errors about subclassing a `@final`-decorated class +class _256Bit(NBitBase): # type: ignore[misc] + pass + +class _128Bit(_256Bit): # type: ignore[misc] + pass + +class _96Bit(_128Bit): # type: ignore[misc] + pass + +class _80Bit(_96Bit): # type: ignore[misc] + pass + +class _64Bit(_80Bit): # type: ignore[misc] + pass + +class _32Bit(_64Bit): # type: ignore[misc] + pass + +class _16Bit(_32Bit): # type: ignore[misc] + pass + +class _8Bit(_16Bit): # type: ignore[misc] + pass + + +from ._nested_sequence import ( + _NestedSequence as _NestedSequence, +) +from ._nbit import ( + _NBitByte as _NBitByte, + _NBitShort as _NBitShort, + _NBitIntC as _NBitIntC, + _NBitIntP as _NBitIntP, + _NBitInt as _NBitInt, + _NBitLongLong as _NBitLongLong, + _NBitHalf as _NBitHalf, + _NBitSingle as _NBitSingle, + _NBitDouble as _NBitDouble, + _NBitLongDouble as _NBitLongDouble, +) +from ._char_codes import ( + _BoolCodes as _BoolCodes, + _UInt8Codes as _UInt8Codes, + _UInt16Codes as _UInt16Codes, + _UInt32Codes as _UInt32Codes, + _UInt64Codes as _UInt64Codes, + _Int8Codes as _Int8Codes, + _Int16Codes as _Int16Codes, + _Int32Codes as _Int32Codes, + _Int64Codes as _Int64Codes, + _Float16Codes as _Float16Codes, + _Float32Codes as _Float32Codes, + _Float64Codes as _Float64Codes, + _Complex64Codes as _Complex64Codes, + _Complex128Codes as _Complex128Codes, + _ByteCodes as _ByteCodes, + _ShortCodes as _ShortCodes, + _IntCCodes as _IntCCodes, + _IntPCodes as _IntPCodes, + _IntCodes as _IntCodes, + _LongLongCodes as _LongLongCodes, + _UByteCodes as _UByteCodes, + _UShortCodes as _UShortCodes, + _UIntCCodes as _UIntCCodes, + _UIntPCodes as _UIntPCodes, + _UIntCodes as _UIntCodes, + _ULongLongCodes as _ULongLongCodes, + _HalfCodes as _HalfCodes, + _SingleCodes as _SingleCodes, + _DoubleCodes as _DoubleCodes, + _LongDoubleCodes as _LongDoubleCodes, + _CSingleCodes as _CSingleCodes, + _CDoubleCodes as _CDoubleCodes, + _CLongDoubleCodes as _CLongDoubleCodes, + _DT64Codes as _DT64Codes, + _TD64Codes as _TD64Codes, + _StrCodes as _StrCodes, + _BytesCodes as _BytesCodes, + _VoidCodes as _VoidCodes, + _ObjectCodes as _ObjectCodes, +) +from ._scalars import ( + _CharLike_co as _CharLike_co, + _BoolLike_co as _BoolLike_co, + _UIntLike_co as _UIntLike_co, + _IntLike_co as _IntLike_co, + _FloatLike_co as _FloatLike_co, + _ComplexLike_co as _ComplexLike_co, + _TD64Like_co as _TD64Like_co, + _NumberLike_co as _NumberLike_co, + _ScalarLike_co as _ScalarLike_co, + _VoidLike_co as _VoidLike_co, +) +from ._shape import ( + _Shape as _Shape, + _ShapeLike as _ShapeLike, +) +from ._dtype_like import ( + DTypeLike as DTypeLike, + _DTypeLike as _DTypeLike, + _SupportsDType as _SupportsDType, + _VoidDTypeLike as _VoidDTypeLike, + _DTypeLikeBool as _DTypeLikeBool, + _DTypeLikeUInt as _DTypeLikeUInt, + _DTypeLikeInt as _DTypeLikeInt, + _DTypeLikeFloat as _DTypeLikeFloat, + _DTypeLikeComplex as _DTypeLikeComplex, + _DTypeLikeTD64 as _DTypeLikeTD64, + _DTypeLikeDT64 as _DTypeLikeDT64, + _DTypeLikeObject as _DTypeLikeObject, + _DTypeLikeVoid as _DTypeLikeVoid, + _DTypeLikeStr as _DTypeLikeStr, + _DTypeLikeBytes as _DTypeLikeBytes, + _DTypeLikeComplex_co as _DTypeLikeComplex_co, +) +from ._array_like import ( + NDArray as NDArray, + ArrayLike as ArrayLike, + _ArrayLike as _ArrayLike, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, + _ArrayLikeInt as _ArrayLikeInt, + _ArrayLikeBool_co as _ArrayLikeBool_co, + _ArrayLikeUInt_co as _ArrayLikeUInt_co, + _ArrayLikeInt_co as _ArrayLikeInt_co, + _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeNumber_co as _ArrayLikeNumber_co, + _ArrayLikeTD64_co as _ArrayLikeTD64_co, + _ArrayLikeDT64_co as _ArrayLikeDT64_co, + _ArrayLikeObject_co as _ArrayLikeObject_co, + _ArrayLikeVoid_co as _ArrayLikeVoid_co, + _ArrayLikeStr_co as _ArrayLikeStr_co, + _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeUnknown as _ArrayLikeUnknown, + _UnknownType as _UnknownType, +) + +if TYPE_CHECKING: + from ._ufunc import ( + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, + ) +else: + # Declare the (type-check-only) ufunc subclasses as ufunc aliases during + # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834) + _UFunc_Nin1_Nout1 = ufunc + _UFunc_Nin2_Nout1 = ufunc + _UFunc_Nin1_Nout2 = ufunc + _UFunc_Nin2_Nout2 = ufunc + _GUFunc_Nin2_Nout1 = ufunc diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_add_docstring.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_add_docstring.py new file mode 100644 index 00000000..f84d1927 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_add_docstring.py @@ -0,0 +1,152 @@ +"""A module for creating docstrings for sphinx ``data`` domains.""" + +import re +import textwrap + +from ._array_like import NDArray + +_docstrings_list = [] + + +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ + _docstrings_list.append((name, value, doc)) + + +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ + type_list_ret = [] + for name, value, doc in _docstrings_list: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + indent = "" + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + if prev == "Examples": + indent = "" + new_lines.append(f'{m.group(1)}.. rubric:: {prev}') + else: + indent = 4 * " " + new_lines.append(f'{m.group(1)}.. admonition:: {prev}') + new_lines.append("") + else: + new_lines.append(f"{indent}{line}") + + s = "\n".join(new_lines) + s_block = f""".. data:: {name}\n :value: {value}\n {s}""" + type_list_ret.append(s_block) + return "\n".join(type_list_ret) + + +add_newdoc('ArrayLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into an `~numpy.ndarray`. + + Among others this includes the likes of: + + * Scalars. + * (Nested) sequences. + * Objects implementing the `~class.__array__` protocol. + + .. versionadded:: 1.20 + + See Also + -------- + :term:`array_like`: + Any scalar or sequence that can be interpreted as an ndarray. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_array(a: npt.ArrayLike) -> np.ndarray: + ... return np.array(a) + + """) + +add_newdoc('DTypeLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into a `~numpy.dtype`. + + Among others this includes the likes of: + + * :class:`type` objects. + * Character codes or the names of :class:`type` objects. + * Objects with the ``.dtype`` attribute. + + .. versionadded:: 1.20 + + See Also + -------- + :ref:`Specifying and constructing data types ` + A comprehensive overview of all objects that can be coerced + into data types. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_dtype(d: npt.DTypeLike) -> np.dtype: + ... return np.dtype(d) + + """) + +add_newdoc('NDArray', repr(NDArray), + """ + A :term:`generic ` version of + `np.ndarray[Any, np.dtype[+ScalarType]] `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + .. versionadded:: 1.21 + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + +_docstrings = _parse_docstrings() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_array_like.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_array_like.py new file mode 100644 index 00000000..883e817d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_array_like.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import sys +from collections.abc import Collection, Callable, Sequence +from typing import Any, Protocol, Union, TypeVar, runtime_checkable + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + unsignedinteger, + integer, + floating, + complexfloating, + number, + timedelta64, + datetime64, + object_, + void, + str_, + bytes_, +) +from ._nested_sequence import _NestedSequence + +_T = TypeVar("_T") +_ScalarType = TypeVar("_ScalarType", bound=generic) +_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) +_DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) + +NDArray = ndarray[Any, dtype[_ScalarType_co]] + +# The `_SupportsArray` protocol only cares about the default dtype +# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned +# array. +# Concrete implementations of the protocol are responsible for adding +# any and all remaining overloads +@runtime_checkable +class _SupportsArray(Protocol[_DType_co]): + def __array__(self) -> ndarray[Any, _DType_co]: ... + + +@runtime_checkable +class _SupportsArrayFunc(Protocol): + """A protocol class representing `~class.__array_function__`.""" + def __array_function__( + self, + func: Callable[..., Any], + types: Collection[type[Any]], + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> object: ... + + +# TODO: Wait until mypy supports recursive objects in combination with typevars +_FiniteNestedSequence = Union[ + _T, + Sequence[_T], + Sequence[Sequence[_T]], + Sequence[Sequence[Sequence[_T]]], + Sequence[Sequence[Sequence[Sequence[_T]]]], +] + +# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` +_ArrayLike = Union[ + _SupportsArray[dtype[_ScalarType]], + _NestedSequence[_SupportsArray[dtype[_ScalarType]]], +] + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_DualArrayLike = Union[ + _SupportsArray[_DType], + _NestedSequence[_SupportsArray[_DType]], + _T, + _NestedSequence[_T], +] + +if sys.version_info >= (3, 12): + from collections.abc import Buffer + + ArrayLike = Buffer | _DualArrayLike[ + dtype[Any], + Union[bool, int, float, complex, str, bytes], + ] +else: + ArrayLike = _DualArrayLike[ + dtype[Any], + Union[bool, int, float, complex, str, bytes], + ] + +# `ArrayLike_co`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool_co = _DualArrayLike[ + dtype[bool_], + bool, +] +_ArrayLikeUInt_co = _DualArrayLike[ + dtype[Union[bool_, unsignedinteger[Any]]], + bool, +] +_ArrayLikeInt_co = _DualArrayLike[ + dtype[Union[bool_, integer[Any]]], + Union[bool, int], +] +_ArrayLikeFloat_co = _DualArrayLike[ + dtype[Union[bool_, integer[Any], floating[Any]]], + Union[bool, int, float], +] +_ArrayLikeComplex_co = _DualArrayLike[ + dtype[Union[ + bool_, + integer[Any], + floating[Any], + complexfloating[Any, Any], + ]], + Union[bool, int, float, complex], +] +_ArrayLikeNumber_co = _DualArrayLike[ + dtype[Union[bool_, number[Any]]], + Union[bool, int, float, complex], +] +_ArrayLikeTD64_co = _DualArrayLike[ + dtype[Union[bool_, integer[Any], timedelta64]], + Union[bool, int], +] +_ArrayLikeDT64_co = Union[ + _SupportsArray[dtype[datetime64]], + _NestedSequence[_SupportsArray[dtype[datetime64]]], +] +_ArrayLikeObject_co = Union[ + _SupportsArray[dtype[object_]], + _NestedSequence[_SupportsArray[dtype[object_]]], +] + +_ArrayLikeVoid_co = Union[ + _SupportsArray[dtype[void]], + _NestedSequence[_SupportsArray[dtype[void]]], +] +_ArrayLikeStr_co = _DualArrayLike[ + dtype[str_], + str, +] +_ArrayLikeBytes_co = _DualArrayLike[ + dtype[bytes_], + bytes, +] + +_ArrayLikeInt = _DualArrayLike[ + dtype[integer[Any]], + int, +] + +# Extra ArrayLike type so that pyright can deal with NDArray[Any] +# Used as the first overload, should only match NDArray[Any], +# not any actual types. +# https://github.com/numpy/numpy/pull/22193 +class _UnknownType: + ... + + +_ArrayLikeUnknown = _DualArrayLike[ + dtype[_UnknownType], + _UnknownType, +] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_callable.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_callable.pyi new file mode 100644 index 00000000..ee818e90 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_callable.pyi @@ -0,0 +1,338 @@ +""" +A module with various ``typing.Protocol`` subclasses that implement +the ``__call__`` magic method. + +See the `Mypy documentation`_ on protocols for more details. + +.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols + +""" + +from __future__ import annotations + +from typing import ( + TypeVar, + overload, + Any, + NoReturn, + Protocol, +) + +from numpy import ( + ndarray, + dtype, + generic, + bool_, + timedelta64, + number, + integer, + unsignedinteger, + signedinteger, + int8, + int_, + floating, + float64, + complexfloating, + complex128, +) +from ._nbit import _NBitInt, _NBitDouble +from ._scalars import ( + _BoolLike_co, + _IntLike_co, + _FloatLike_co, + _NumberLike_co, +) +from . import NBitBase +from ._array_like import NDArray +from ._nested_sequence import _NestedSequence + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T1_contra = TypeVar("_T1_contra", contravariant=True) +_T2_contra = TypeVar("_T2_contra", contravariant=True) +_2Tuple = tuple[_T1, _T1] + +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +_IntType = TypeVar("_IntType", bound=integer) +_FloatType = TypeVar("_FloatType", bound=floating) +_NumberType = TypeVar("_NumberType", bound=number) +_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) +_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) + +class _BoolOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +class _BoolBitOp(Protocol[_GenericType_co]): + @overload + def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + +class _BoolSub(Protocol): + # Note that `other: bool_` is absent here + @overload + def __call__(self, other: bool, /) -> NoReturn: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +class _BoolTrueDiv(Protocol): + @overload + def __call__(self, other: float | _IntLike_co, /) -> float64: ... + @overload + def __call__(self, other: complex, /) -> complex128: ... + @overload + def __call__(self, other: _NumberType, /) -> _NumberType: ... + +class _BoolMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> int8: ... + @overload # platform dependent + def __call__(self, other: int, /) -> int_: ... + @overload + def __call__(self, other: float, /) -> float64: ... + @overload + def __call__(self, other: _IntType, /) -> _IntType: ... + @overload + def __call__(self, other: _FloatType, /) -> _FloatType: ... + +class _BoolDivMod(Protocol): + @overload + def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... + @overload # platform dependent + def __call__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... + @overload + def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... + +class _TD64Div(Protocol[_NumberType_co]): + @overload + def __call__(self, other: timedelta64, /) -> _NumberType_co: ... + @overload + def __call__(self, other: _BoolLike_co, /) -> NoReturn: ... + @overload + def __call__(self, other: _FloatLike_co, /) -> timedelta64: ... + +class _IntTrueDiv(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ... + +class _UnsignedIntOp(Protocol[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__( + self, other: int | signedinteger[Any], / + ) -> Any: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1 | _NBit2]: ... + +class _UnsignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[Any]: ... + @overload + def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1 | _NBit2]: ... + +class _UnsignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + @overload + def __call__( + self, other: int | signedinteger[Any], / + ) -> Any: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> unsignedinteger[_NBit1 | _NBit2]: ... + +class _UnsignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__( + self, other: int | signedinteger[Any], / + ) -> _2Tuple[Any]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__( + self, other: unsignedinteger[_NBit2], / + ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ... + +class _SignedIntOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> signedinteger[_NBit1 | _NBit2]: ... + +class _SignedIntBitOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> signedinteger[_NBit1 | _NBit2]: ... + +class _SignedIntMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> signedinteger[_NBit1 | _NBit2]: ... + +class _SignedIntDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... + @overload + def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__( + self, other: signedinteger[_NBit2], /, + ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ... + +class _FloatOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1 | _NBit2]: ... + +class _FloatMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> floating[_NBit1]: ... + @overload + def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + @overload + def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> floating[_NBit1 | _NBit2]: ... + +class _FloatDivMod(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... + @overload + def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ... + @overload + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + @overload + def __call__( + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ... + +class _ComplexOp(Protocol[_NBit1]): + @overload + def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ... + @overload + def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ... + @overload + def __call__( + self, other: complex, /, + ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + @overload + def __call__( + self, + other: ( + integer[_NBit2] + | floating[_NBit2] + | complexfloating[_NBit2, _NBit2] + ), /, + ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ... + +class _NumberOp(Protocol): + def __call__(self, other: _NumberLike_co, /) -> Any: ... + +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> object: ... + +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> object: ... + +class _ComparisonOp(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> bool_: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[bool_]: ... + @overload + def __call__( + self, + other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT], + /, + ) -> Any: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_char_codes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_char_codes.py new file mode 100644 index 00000000..f840d17b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_char_codes.py @@ -0,0 +1,111 @@ +from typing import Literal + +_BoolCodes = Literal["?", "=?", "?", "bool", "bool_", "bool8"] + +_UInt8Codes = Literal["uint8", "u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "=u8", "u8"] + +_Int8Codes = Literal["int8", "i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "=i8", "i8"] + +_Float16Codes = Literal["float16", "f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "=f8", "f8"] + +_Complex64Codes = Literal["complex64", "c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "=c16", "c16"] + +_ByteCodes = Literal["byte", "b", "=b", "b"] +_ShortCodes = Literal["short", "h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "=i", "i"] +_IntPCodes = Literal["intp", "int0", "p", "=p", "p"] +_IntCodes = Literal["long", "int", "int_", "l", "=l", "l"] +_LongLongCodes = Literal["longlong", "q", "=q", "q"] + +_UByteCodes = Literal["ubyte", "B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "P"] +_UIntCodes = Literal["ulong", "uint", "L", "=L", "L"] +_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"] + +_HalfCodes = Literal["half", "e", "=e", "e"] +_SingleCodes = Literal["single", "f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "g"] + +_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"] + +_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"] +_VoidCodes = Literal["void", "void0", "V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "=O", "O"] + +_DT64Codes = Literal[ + "datetime64", "=datetime64", "datetime64", + "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", + "datetime64[M]", "=datetime64[M]", "datetime64[M]", + "datetime64[W]", "=datetime64[W]", "datetime64[W]", + "datetime64[D]", "=datetime64[D]", "datetime64[D]", + "datetime64[h]", "=datetime64[h]", "datetime64[h]", + "datetime64[m]", "=datetime64[m]", "datetime64[m]", + "datetime64[s]", "=datetime64[s]", "datetime64[s]", + "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", + "datetime64[us]", "=datetime64[us]", "datetime64[us]", + "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", + "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", + "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", + "datetime64[as]", "=datetime64[as]", "datetime64[as]", + "M", "=M", "M", + "M8", "=M8", "M8", + "M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "=M8[as]", "M8[as]", +] +_TD64Codes = Literal[ + "timedelta64", "=timedelta64", "timedelta64", + "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", + "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", + "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", + "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", + "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", + "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", + "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", + "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", + "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", + "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", + "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", + "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", + "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", + "m", "=m", "m", + "m8", "=m8", "m8", + "m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "=m8[as]", "m8[as]", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py new file mode 100644 index 00000000..207a99c5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py @@ -0,0 +1,246 @@ +from collections.abc import Sequence +from typing import ( + Any, + Sequence, + Union, + TypeVar, + Protocol, + TypedDict, + runtime_checkable, +) + +import numpy as np + +from ._shape import _ShapeLike + +from ._char_codes import ( + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, +) + +_SCT = TypeVar("_SCT", bound=np.generic) +_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) + +_DTypeLikeNested = Any # TODO: wait for support for recursive types + + +# Mandatory keys +class _DTypeDictBase(TypedDict): + names: Sequence[str] + formats: Sequence[_DTypeLikeNested] + + +# Mandatory + optional keys +class _DTypeDict(_DTypeDictBase, total=False): + # Only `str` elements are usable as indexing aliases, + # but `titles` can in principle accept any object + offsets: Sequence[int] + titles: Sequence[Any] + itemsize: int + aligned: bool + + +# A protocol for anything with the dtype attribute +@runtime_checkable +class _SupportsDType(Protocol[_DType_co]): + @property + def dtype(self) -> _DType_co: ... + + +# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` +_DTypeLike = Union[ + np.dtype[_SCT], + type[_SCT], + _SupportsDType[np.dtype[_SCT]], +] + + +# Would create a dtype[np.void] +_VoidDTypeLike = Union[ + # (flexible_dtype, itemsize) + tuple[_DTypeLikeNested, int], + # (fixed_dtype, shape) + tuple[_DTypeLikeNested, _ShapeLike], + # [(field_name, field_dtype, field_shape), ...] + # + # The type here is quite broad because NumPy accepts quite a wide + # range of inputs inside the list; see the tests for some + # examples. + list[Any], + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., + # 'itemsize': ...} + _DTypeDict, + # (base_dtype, new_dtype) + tuple[_DTypeLikeNested, _DTypeLikeNested], +] + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike = Union[ + np.dtype[Any], + # default data type (float64) + None, + # array-scalar types and generic types + type[Any], # NOTE: We're stuck with `type[Any]` due to object dtypes + # anything with a dtype attribute + _SupportsDType[np.dtype[Any]], + # character codes, type strings or comma-separated fields, e.g., 'float64' + str, + _VoidDTypeLike, +] + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discourged and +# therefore not included in the Union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool = Union[ + type[bool], + type[np.bool_], + np.dtype[np.bool_], + _SupportsDType[np.dtype[np.bool_]], + _BoolCodes, +] +_DTypeLikeUInt = Union[ + type[np.unsignedinteger], + np.dtype[np.unsignedinteger], + _SupportsDType[np.dtype[np.unsignedinteger]], + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _UIntCodes, + _ULongLongCodes, +] +_DTypeLikeInt = Union[ + type[int], + type[np.signedinteger], + np.dtype[np.signedinteger], + _SupportsDType[np.dtype[np.signedinteger]], + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _IntCodes, + _LongLongCodes, +] +_DTypeLikeFloat = Union[ + type[float], + type[np.floating], + np.dtype[np.floating], + _SupportsDType[np.dtype[np.floating]], + _Float16Codes, + _Float32Codes, + _Float64Codes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, +] +_DTypeLikeComplex = Union[ + type[complex], + type[np.complexfloating], + np.dtype[np.complexfloating], + _SupportsDType[np.dtype[np.complexfloating]], + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_DTypeLikeDT64 = Union[ + type[np.timedelta64], + np.dtype[np.timedelta64], + _SupportsDType[np.dtype[np.timedelta64]], + _TD64Codes, +] +_DTypeLikeTD64 = Union[ + type[np.datetime64], + np.dtype[np.datetime64], + _SupportsDType[np.dtype[np.datetime64]], + _DT64Codes, +] +_DTypeLikeStr = Union[ + type[str], + type[np.str_], + np.dtype[np.str_], + _SupportsDType[np.dtype[np.str_]], + _StrCodes, +] +_DTypeLikeBytes = Union[ + type[bytes], + type[np.bytes_], + np.dtype[np.bytes_], + _SupportsDType[np.dtype[np.bytes_]], + _BytesCodes, +] +_DTypeLikeVoid = Union[ + type[np.void], + np.dtype[np.void], + _SupportsDType[np.dtype[np.void]], + _VoidCodes, + _VoidDTypeLike, +] +_DTypeLikeObject = Union[ + type, + np.dtype[np.object_], + _SupportsDType[np.dtype[np.object_]], + _ObjectCodes, +] + +_DTypeLikeComplex_co = Union[ + _DTypeLikeBool, + _DTypeLikeUInt, + _DTypeLikeInt, + _DTypeLikeFloat, + _DTypeLikeComplex, +] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py new file mode 100644 index 00000000..7246b47d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py @@ -0,0 +1,27 @@ +"""A module with platform-specific extended precision +`numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +import numpy as np +from . import ( + _80Bit, + _96Bit, + _128Bit, + _256Bit, +) + +uint128 = np.unsignedinteger[_128Bit] +uint256 = np.unsignedinteger[_256Bit] +int128 = np.signedinteger[_128Bit] +int256 = np.signedinteger[_256Bit] +float80 = np.floating[_80Bit] +float96 = np.floating[_96Bit] +float128 = np.floating[_128Bit] +float256 = np.floating[_256Bit] +complex160 = np.complexfloating[_80Bit, _80Bit] +complex192 = np.complexfloating[_96Bit, _96Bit] +complex256 = np.complexfloating[_128Bit, _128Bit] +complex512 = np.complexfloating[_256Bit, _256Bit] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nbit.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nbit.py new file mode 100644 index 00000000..b8d35db4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nbit.py @@ -0,0 +1,16 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import Any + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte = Any +_NBitShort = Any +_NBitIntC = Any +_NBitIntP = Any +_NBitInt = Any +_NBitLongLong = Any + +_NBitHalf = Any +_NBitSingle = Any +_NBitDouble = Any +_NBitLongDouble = Any diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py new file mode 100644 index 00000000..3d0d25ae --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py @@ -0,0 +1,86 @@ +"""A module containing the `_NestedSequence` protocol.""" + +from __future__ import annotations + +from collections.abc import Iterator +from typing import ( + Any, + TypeVar, + Protocol, + runtime_checkable, +) + +__all__ = ["_NestedSequence"] + +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + Warning + ------- + `_NestedSequence` currently does not work in combination with typevars, + *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + + See Also + -------- + collections.abc.Sequence + ABCs for read-only and mutable :term:`sequences`. + + Examples + -------- + .. code-block:: python + + >>> from __future__ import annotations + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> from numpy._typing import _NestedSequence + + >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]: + ... return np.asarray(seq).dtype + + >>> a = get_dtype([1.0]) + >>> b = get_dtype([[1.0]]) + >>> c = get_dtype([[[1.0]]]) + >>> d = get_dtype([[[[1.0]]]]) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + + """ + + def __len__(self, /) -> int: + """Implement ``len(self)``.""" + raise NotImplementedError + + def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: + """Implement ``self[x]``.""" + raise NotImplementedError + + def __contains__(self, x: object, /) -> bool: + """Implement ``x in self``.""" + raise NotImplementedError + + def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``iter(self)``.""" + raise NotImplementedError + + def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + """Implement ``reversed(self)``.""" + raise NotImplementedError + + def count(self, value: Any, /) -> int: + """Return the number of occurrences of `value`.""" + raise NotImplementedError + + def index(self, value: Any, /) -> int: + """Return the first index of `value`.""" + raise NotImplementedError diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_scalars.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_scalars.py new file mode 100644 index 00000000..e46ff04a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_scalars.py @@ -0,0 +1,30 @@ +from typing import Union, Any + +import numpy as np + +# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and +# `np.bytes_` are already subclasses of their builtin counterpart + +_CharLike_co = Union[str, bytes] + +# The 6 `Like_co` type-aliases below represent all scalars that can be +# coerced into `` (with the casting rule `same_kind`) +_BoolLike_co = Union[bool, np.bool_] +_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger[Any]] +_IntLike_co = Union[_BoolLike_co, int, np.integer[Any]] +_FloatLike_co = Union[_IntLike_co, float, np.floating[Any]] +_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating[Any, Any]] +_TD64Like_co = Union[_IntLike_co, np.timedelta64] + +_NumberLike_co = Union[int, float, complex, np.number[Any], np.bool_] +_ScalarLike_co = Union[ + int, + float, + complex, + str, + bytes, + np.generic, +] + +# `_VoidLike_co` is technically not a scalar, but it's close enough +_VoidLike_co = Union[tuple[Any, ...], np.void] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_shape.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_shape.py new file mode 100644 index 00000000..4f1204e4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_shape.py @@ -0,0 +1,7 @@ +from collections.abc import Sequence +from typing import Union, SupportsIndex + +_Shape = tuple[int, ...] + +# Anything that can be coerced to a shape tuple +_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi new file mode 100644 index 00000000..9f8e0d4e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi @@ -0,0 +1,445 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. + +""" + +from typing import ( + Any, + Generic, + overload, + TypeVar, + Literal, + SupportsIndex, + Protocol, +) + +from numpy import ufunc, _CastingKind, _OrderKACF +from numpy.typing import NDArray + +from ._shape import _ShapeLike +from ._scalars import _ScalarLike_co +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike + +_T = TypeVar("_T") +_2Tuple = tuple[_T, _T] +_3Tuple = tuple[_T, _T, _T] +_4Tuple = tuple[_T, _T, _T, _T] + +_NTypes = TypeVar("_NTypes", bound=int) +_IDType = TypeVar("_IDType", bound=Any) +_NameType = TypeVar("_NameType", bound=str) + + +class _SupportsArrayUFunc(Protocol): + def __array_ufunc__( + self, + ufunc: ufunc, + method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + +# NOTE: In reality `extobj` should be a length of list 3 containing an +# int, an int, and a callable, but there's no way to properly express +# non-homogenous lists. +# Use `Any` over `Union` to avoid issues related to lists invariance. + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods are simply typed as `None`. + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed as `None` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + out: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> NDArray[Any]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + + def at( + self, + a: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + /, + ) -> None: ... + +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + out: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> NDArray[Any]: ... + + def at( + self, + a: NDArray[Any], + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + + def reduce( + self, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + keepdims: bool = ..., + initial: Any = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + ) -> NDArray[Any]: ... + + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] = ..., + ) -> NDArray[Any]: ... + + # Expand `**kwargs` into explicit keyword-only arguments + @overload + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, *, + out: None = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> Any: ... + @overload + def outer( # type: ignore[misc] + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> NDArray[Any]: ... + +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[Any]: ... + +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + @property + def at(self) -> None: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + __out1: None | NDArray[Any] = ..., + __out2: None | NDArray[Any] = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: None | _ArrayLikeBool_co = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[None | str] = ..., + extobj: list[Any] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + + # NOTE: In practice the only gufunc in the main namespace is `matmul`, + # so we can use its signature here + @property + def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ... + @property + def reduce(self) -> None: ... + @property + def accumulate(self) -> None: ... + @property + def reduceat(self) -> None: ... + @property + def outer(self) -> None: ... + @property + def at(self) -> None: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None = ..., + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]], + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[None | str] = ..., + extobj: list[Any] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Any]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/setup.py new file mode 100644 index 00000000..24022fda --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/setup.py @@ -0,0 +1,10 @@ +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('_typing', parent_package, top_path) + config.add_data_files('*.pyi') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/__init__.py new file mode 100644 index 00000000..388dd917 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/__init__.py @@ -0,0 +1,29 @@ +""" +This is a module for defining private helpers which do not depend on the +rest of NumPy. + +Everything in here must be self-contained so that it can be +imported anywhere else without creating circular imports. +If a utility requires the import of NumPy, it probably belongs +in ``numpy.core``. +""" + +from ._convertions import asunicode, asbytes + + +def set_module(module): + """Private decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + func.__module__ = module + return func + return decorator diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_convertions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_convertions.py new file mode 100644 index 00000000..ab15a8ba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_convertions.py @@ -0,0 +1,18 @@ +""" +A set of methods retained from np.compat module that +are still used across codebase. +""" + +__all__ = ["asunicode", "asbytes"] + + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_inspect.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_inspect.py new file mode 100644 index 00000000..9a874a71 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_inspect.py @@ -0,0 +1,191 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_pep440.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_pep440.py new file mode 100644 index 00000000..73d0afb5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_pep440.py @@ -0,0 +1,487 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/__init__.py
new file mode 100644
index 00000000..77f22788
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/__init__.py
@@ -0,0 +1,387 @@
+"""
+A NumPy sub-namespace that conforms to the Python array API standard.
+
+This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
+is still considered experimental, and will issue a warning when imported.
+
+This is a proof-of-concept namespace that wraps the corresponding NumPy
+functions to give a conforming implementation of the Python array API standard
+(https://data-apis.github.io/array-api/latest/). The standard is currently in
+an RFC phase and comments on it are both welcome and encouraged. Comments
+should be made either at https://github.com/data-apis/array-api or at
+https://github.com/data-apis/consortium-feedback/discussions.
+
+NumPy already follows the proposed spec for the most part, so this module
+serves mostly as a thin wrapper around it. However, NumPy also implements a
+lot of behavior that is not included in the spec, so this serves as a
+restricted subset of the API. Only those functions that are part of the spec
+are included in this namespace, and all functions are given with the exact
+signature given in the spec, including the use of position-only arguments, and
+omitting any extra keyword arguments implemented by NumPy but not part of the
+spec. The behavior of some functions is also modified from the NumPy behavior
+to conform to the standard. Note that the underlying array object itself is
+wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
+is implemented in pure Python with no C extensions.
+
+The array API spec is designed as a "minimal API subset" and explicitly allows
+libraries to include behaviors not specified by it. But users of this module
+that intend to write portable code should be aware that only those behaviors
+that are listed in the spec are guaranteed to be implemented across libraries.
+Consequently, the NumPy implementation was chosen to be both conforming and
+minimal, so that users can use this implementation of the array API namespace
+and be sure that behaviors that it defines will be available in conforming
+namespaces from other libraries.
+
+A few notes about the current state of this submodule:
+
+- There is a test suite that tests modules against the array API standard at
+  https://github.com/data-apis/array-api-tests. The test suite is still a work
+  in progress, but the existing tests pass on this module, with a few
+  exceptions:
+
+  - DLPack support (see https://github.com/data-apis/array-api/pull/106) is
+    not included here, as it requires a full implementation in NumPy proper
+    first.
+
+  The test suite is not yet complete, and even the tests that exist are not
+  guaranteed to give a comprehensive coverage of the spec. Therefore, when
+  reviewing and using this submodule, you should refer to the standard
+  documents themselves. There are some tests in numpy.array_api.tests, but
+  they primarily focus on things that are not tested by the official array API
+  test suite.
+
+- There is a custom array object, numpy.array_api.Array, which is returned by
+  all functions in this module. All functions in the array API namespace
+  implicitly assume that they will only receive this object as input. The only
+  way to create instances of this object is to use one of the array creation
+  functions. It does not have a public constructor on the object itself. The
+  object is a small wrapper class around numpy.ndarray. The main purpose of it
+  is to restrict the namespace of the array object to only those dtypes and
+  only those methods that are required by the spec, as well as to limit/change
+  certain behavior that differs in the spec. In particular:
+
+  - The array API namespace does not have scalar objects, only 0-D arrays.
+    Operations on Array that would create a scalar in NumPy create a 0-D
+    array.
+
+  - Indexing: Only a subset of indices supported by NumPy are required by the
+    spec. The Array object restricts indexing to only allow those types of
+    indices that are required by the spec. See the docstring of the
+    numpy.array_api.Array._validate_indices helper function for more
+    information.
+
+  - Type promotion: Some type promotion rules are different in the spec. In
+    particular, the spec does not have any value-based casting. The spec also
+    does not require cross-kind casting, like integer -> floating-point. Only
+    those promotions that are explicitly required by the array API
+    specification are allowed in this module. See NEP 47 for more info.
+
+  - Functions do not automatically call asarray() on their input, and will not
+    work if the input type is not Array. The exception is array creation
+    functions, and Python operators on the Array object, which accept Python
+    scalars of the same type as the array dtype.
+
+- All functions include type annotations, corresponding to those given in the
+  spec (see _typing.py for definitions of some custom types). These do not
+  currently fully pass mypy due to some limitations in mypy.
+
+- Dtype objects are just the NumPy dtype objects, e.g., float64 =
+  np.dtype('float64'). The spec does not require any behavior on these dtype
+  objects other than that they be accessible by name and be comparable by
+  equality, but it was considered too much extra complexity to create custom
+  objects to represent dtypes.
+
+- All places where the implementations in this submodule are known to deviate
+  from their corresponding functions in NumPy are marked with "# Note:"
+  comments.
+
+Still TODO in this module are:
+
+- DLPack support for numpy.ndarray is still in progress. See
+  https://github.com/numpy/numpy/pull/19083.
+
+- The copy=False keyword argument to asarray() is not yet implemented. This
+  requires support in numpy.asarray() first.
+
+- Some functions are not yet fully tested in the array API test suite, and may
+  require updates that are not yet known until the tests are written.
+
+- The spec is still in an RFC phase and may still have minor updates, which
+  will need to be reflected here.
+
+- Complex number support in array API spec is planned but not yet finalized,
+  as are the fft extension and certain linear algebra functions such as eig
+  that require complex dtypes.
+
+"""
+
+import warnings
+
+warnings.warn(
+    "The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
+)
+
+__array_api_version__ = "2022.12"
+
+__all__ = ["__array_api_version__"]
+
+from ._constants import e, inf, nan, pi, newaxis
+
+__all__ += ["e", "inf", "nan", "pi"]
+
+from ._creation_functions import (
+    asarray,
+    arange,
+    empty,
+    empty_like,
+    eye,
+    from_dlpack,
+    full,
+    full_like,
+    linspace,
+    meshgrid,
+    ones,
+    ones_like,
+    tril,
+    triu,
+    zeros,
+    zeros_like,
+)
+
+__all__ += [
+    "asarray",
+    "arange",
+    "empty",
+    "empty_like",
+    "eye",
+    "from_dlpack",
+    "full",
+    "full_like",
+    "linspace",
+    "meshgrid",
+    "ones",
+    "ones_like",
+    "tril",
+    "triu",
+    "zeros",
+    "zeros_like",
+]
+
+from ._data_type_functions import (
+    astype,
+    broadcast_arrays,
+    broadcast_to,
+    can_cast,
+    finfo,
+    isdtype,
+    iinfo,
+    result_type,
+)
+
+__all__ += [
+    "astype",
+    "broadcast_arrays",
+    "broadcast_to",
+    "can_cast",
+    "finfo",
+    "iinfo",
+    "result_type",
+]
+
+from ._dtypes import (
+    int8,
+    int16,
+    int32,
+    int64,
+    uint8,
+    uint16,
+    uint32,
+    uint64,
+    float32,
+    float64,
+    complex64,
+    complex128,
+    bool,
+)
+
+__all__ += [
+    "int8",
+    "int16",
+    "int32",
+    "int64",
+    "uint8",
+    "uint16",
+    "uint32",
+    "uint64",
+    "float32",
+    "float64",
+    "bool",
+]
+
+from ._elementwise_functions import (
+    abs,
+    acos,
+    acosh,
+    add,
+    asin,
+    asinh,
+    atan,
+    atan2,
+    atanh,
+    bitwise_and,
+    bitwise_left_shift,
+    bitwise_invert,
+    bitwise_or,
+    bitwise_right_shift,
+    bitwise_xor,
+    ceil,
+    conj,
+    cos,
+    cosh,
+    divide,
+    equal,
+    exp,
+    expm1,
+    floor,
+    floor_divide,
+    greater,
+    greater_equal,
+    imag,
+    isfinite,
+    isinf,
+    isnan,
+    less,
+    less_equal,
+    log,
+    log1p,
+    log2,
+    log10,
+    logaddexp,
+    logical_and,
+    logical_not,
+    logical_or,
+    logical_xor,
+    multiply,
+    negative,
+    not_equal,
+    positive,
+    pow,
+    real,
+    remainder,
+    round,
+    sign,
+    sin,
+    sinh,
+    square,
+    sqrt,
+    subtract,
+    tan,
+    tanh,
+    trunc,
+)
+
+__all__ += [
+    "abs",
+    "acos",
+    "acosh",
+    "add",
+    "asin",
+    "asinh",
+    "atan",
+    "atan2",
+    "atanh",
+    "bitwise_and",
+    "bitwise_left_shift",
+    "bitwise_invert",
+    "bitwise_or",
+    "bitwise_right_shift",
+    "bitwise_xor",
+    "ceil",
+    "cos",
+    "cosh",
+    "divide",
+    "equal",
+    "exp",
+    "expm1",
+    "floor",
+    "floor_divide",
+    "greater",
+    "greater_equal",
+    "isfinite",
+    "isinf",
+    "isnan",
+    "less",
+    "less_equal",
+    "log",
+    "log1p",
+    "log2",
+    "log10",
+    "logaddexp",
+    "logical_and",
+    "logical_not",
+    "logical_or",
+    "logical_xor",
+    "multiply",
+    "negative",
+    "not_equal",
+    "positive",
+    "pow",
+    "remainder",
+    "round",
+    "sign",
+    "sin",
+    "sinh",
+    "square",
+    "sqrt",
+    "subtract",
+    "tan",
+    "tanh",
+    "trunc",
+]
+
+from ._indexing_functions import take
+
+__all__ += ["take"]
+
+# linalg is an extension in the array API spec, which is a sub-namespace. Only
+# a subset of functions in it are imported into the top-level namespace.
+from . import linalg
+
+__all__ += ["linalg"]
+
+from .linalg import matmul, tensordot, matrix_transpose, vecdot
+
+__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"]
+
+from ._manipulation_functions import (
+    concat,
+    expand_dims,
+    flip,
+    permute_dims,
+    reshape,
+    roll,
+    squeeze,
+    stack,
+)
+
+__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"]
+
+from ._searching_functions import argmax, argmin, nonzero, where
+
+__all__ += ["argmax", "argmin", "nonzero", "where"]
+
+from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values
+
+__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"]
+
+from ._sorting_functions import argsort, sort
+
+__all__ += ["argsort", "sort"]
+
+from ._statistical_functions import max, mean, min, prod, std, sum, var
+
+__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
+
+from ._utility_functions import all, any
+
+__all__ += ["all", "any"]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_array_object.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_array_object.py
new file mode 100644
index 00000000..5aff9863
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_array_object.py
@@ -0,0 +1,1133 @@
+"""
+Wrapper class around the ndarray object for the array API standard.
+
+The array API standard defines some behaviors differently than ndarray, in
+particular, type promotion rules are different (the standard has no
+value-based casting). The standard also specifies a more limited subset of
+array methods and functionalities than are implemented on ndarray. Since the
+goal of the array_api namespace is to be a minimal implementation of the array
+API standard, we need to define a separate wrapper class for the array_api
+namespace.
+
+The standard compliant class is only a wrapper class. It is *not* a subclass
+of ndarray.
+"""
+
+from __future__ import annotations
+
+import operator
+from enum import IntEnum
+from ._creation_functions import asarray
+from ._dtypes import (
+    _all_dtypes,
+    _boolean_dtypes,
+    _integer_dtypes,
+    _integer_or_boolean_dtypes,
+    _floating_dtypes,
+    _complex_floating_dtypes,
+    _numeric_dtypes,
+    _result_type,
+    _dtype_categories,
+)
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union, Any, SupportsIndex
+import types
+
+if TYPE_CHECKING:
+    from ._typing import Any, PyCapsule, Device, Dtype
+    import numpy.typing as npt
+
+import numpy as np
+
+from numpy import array_api
+
+
+class Array:
+    """
+    n-d array object for the array API namespace.
+
+    See the docstring of :py:obj:`np.ndarray ` for more
+    information.
+
+    This is a wrapper around numpy.ndarray that restricts the usage to only
+    those things that are required by the array API namespace. Note,
+    attributes on this object that start with a single underscore are not part
+    of the API specification and should only be used internally. This object
+    should not be constructed directly. Rather, use one of the creation
+    functions, such as asarray().
+
+    """
+    _array: np.ndarray[Any, Any]
+
+    # Use a custom constructor instead of __init__, as manually initializing
+    # this class is not supported API.
+    @classmethod
+    def _new(cls, x, /):
+        """
+        This is a private method for initializing the array API Array
+        object.
+
+        Functions outside of the array_api submodule should not use this
+        method. Use one of the creation functions instead, such as
+        ``asarray``.
+
+        """
+        obj = super().__new__(cls)
+        # Note: The spec does not have array scalars, only 0-D arrays.
+        if isinstance(x, np.generic):
+            # Convert the array scalar to a 0-D array
+            x = np.asarray(x)
+        if x.dtype not in _all_dtypes:
+            raise TypeError(
+                f"The array_api namespace does not support the dtype '{x.dtype}'"
+            )
+        obj._array = x
+        return obj
+
+    # Prevent Array() from working
+    def __new__(cls, *args, **kwargs):
+        raise TypeError(
+            "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
+        )
+
+    # These functions are not required by the spec, but are implemented for
+    # the sake of usability.
+
+    def __str__(self: Array, /) -> str:
+        """
+        Performs the operation __str__.
+        """
+        return self._array.__str__().replace("array", "Array")
+
+    def __repr__(self: Array, /) -> str:
+        """
+        Performs the operation __repr__.
+        """
+        suffix = f", dtype={self.dtype.name})"
+        if 0 in self.shape:
+            prefix = "empty("
+            mid = str(self.shape)
+        else:
+            prefix = "Array("
+            mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
+        return prefix + mid + suffix
+
+    # This function is not required by the spec, but we implement it here for
+    # convenience so that np.asarray(np.array_api.Array) will work.
+    def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
+        """
+        Warning: this method is NOT part of the array API spec. Implementers
+        of other libraries need not include it, and users should not assume it
+        will be present in other implementations.
+
+        """
+        return np.asarray(self._array, dtype=dtype)
+
+    # These are various helper functions to make the array behavior match the
+    # spec in places where it either deviates from or is more strict than
+    # NumPy behavior
+
+    def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array:
+        """
+        Helper function for operators to only allow specific input dtypes
+
+        Use like
+
+            other = self._check_allowed_dtypes(other, 'numeric', '__add__')
+            if other is NotImplemented:
+                return other
+        """
+
+        if self.dtype not in _dtype_categories[dtype_category]:
+            raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+        if isinstance(other, (int, complex, float, bool)):
+            other = self._promote_scalar(other)
+        elif isinstance(other, Array):
+            if other.dtype not in _dtype_categories[dtype_category]:
+                raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+        else:
+            return NotImplemented
+
+        # This will raise TypeError for type combinations that are not allowed
+        # to promote in the spec (even if the NumPy array operator would
+        # promote them).
+        res_dtype = _result_type(self.dtype, other.dtype)
+        if op.startswith("__i"):
+            # Note: NumPy will allow in-place operators in some cases where
+            # the type promoted operator does not match the left-hand side
+            # operand. For example,
+
+            # >>> a = np.array(1, dtype=np.int8)
+            # >>> a += np.array(1, dtype=np.int16)
+
+            # The spec explicitly disallows this.
+            if res_dtype != self.dtype:
+                raise TypeError(
+                    f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
+                )
+
+        return other
+
+    # Helper function to match the type promotion rules in the spec
+    def _promote_scalar(self, scalar):
+        """
+        Returns a promoted version of a Python scalar appropriate for use with
+        operations on self.
+
+        This may raise an OverflowError in cases where the scalar is an
+        integer that is too large to fit in a NumPy integer dtype, or
+        TypeError when the scalar type is incompatible with the dtype of self.
+        """
+        # Note: Only Python scalar types that match the array dtype are
+        # allowed.
+        if isinstance(scalar, bool):
+            if self.dtype not in _boolean_dtypes:
+                raise TypeError(
+                    "Python bool scalars can only be promoted with bool arrays"
+                )
+        elif isinstance(scalar, int):
+            if self.dtype in _boolean_dtypes:
+                raise TypeError(
+                    "Python int scalars cannot be promoted with bool arrays"
+                )
+            if self.dtype in _integer_dtypes:
+                info = np.iinfo(self.dtype)
+                if not (info.min <= scalar <= info.max):
+                    raise OverflowError(
+                        "Python int scalars must be within the bounds of the dtype for integer arrays"
+                    )
+            # int + array(floating) is allowed
+        elif isinstance(scalar, float):
+            if self.dtype not in _floating_dtypes:
+                raise TypeError(
+                    "Python float scalars can only be promoted with floating-point arrays."
+                )
+        elif isinstance(scalar, complex):
+            if self.dtype not in _complex_floating_dtypes:
+                raise TypeError(
+                    "Python complex scalars can only be promoted with complex floating-point arrays."
+                )
+        else:
+            raise TypeError("'scalar' must be a Python scalar")
+
+        # Note: scalars are unconditionally cast to the same dtype as the
+        # array.
+
+        # Note: the spec only specifies integer-dtype/int promotion
+        # behavior for integers within the bounds of the integer dtype.
+        # Outside of those bounds we use the default NumPy behavior (either
+        # cast or raise OverflowError).
+        return Array._new(np.array(scalar, self.dtype))
+
+    @staticmethod
+    def _normalize_two_args(x1, x2) -> Tuple[Array, Array]:
+        """
+        Normalize inputs to two arg functions to fix type promotion rules
+
+        NumPy deviates from the spec type promotion rules in cases where one
+        argument is 0-dimensional and the other is not. For example:
+
+        >>> import numpy as np
+        >>> a = np.array([1.0], dtype=np.float32)
+        >>> b = np.array(1.0, dtype=np.float64)
+        >>> np.add(a, b) # The spec says this should be float64
+        array([2.], dtype=float32)
+
+        To fix this, we add a dimension to the 0-dimension array before passing it
+        through. This works because a dimension would be added anyway from
+        broadcasting, so the resulting shape is the same, but this prevents NumPy
+        from not promoting the dtype.
+        """
+        # Another option would be to use signature=(x1.dtype, x2.dtype, None),
+        # but that only works for ufuncs, so we would have to call the ufuncs
+        # directly in the operator methods. One should also note that this
+        # sort of trick wouldn't work for functions like searchsorted, which
+        # don't do normal broadcasting, but there aren't any functions like
+        # that in the array API namespace.
+        if x1.ndim == 0 and x2.ndim != 0:
+            # The _array[None] workaround was chosen because it is relatively
+            # performant. broadcast_to(x1._array, x2.shape) is much slower. We
+            # could also manually type promote x2, but that is more complicated
+            # and about the same performance as this.
+            x1 = Array._new(x1._array[None])
+        elif x2.ndim == 0 and x1.ndim != 0:
+            x2 = Array._new(x2._array[None])
+        return (x1, x2)
+
+    # Note: A large fraction of allowed indices are disallowed here (see the
+    # docstring below)
+    def _validate_index(self, key):
+        """
+        Validate an index according to the array API.
+
+        The array API specification only requires a subset of indices that are
+        supported by NumPy. This function will reject any index that is
+        allowed by NumPy but not required by the array API specification. We
+        always raise ``IndexError`` on such indices (the spec does not require
+        any specific behavior on them, but this makes the NumPy array API
+        namespace a minimal implementation of the spec). See
+        https://data-apis.org/array-api/latest/API_specification/indexing.html
+        for the full list of required indexing behavior
+
+        This function raises IndexError if the index ``key`` is invalid. It
+        only raises ``IndexError`` on indices that are not already rejected by
+        NumPy, as NumPy will already raise the appropriate error on such
+        indices. ``shape`` may be None, in which case, only cases that are
+        independent of the array shape are checked.
+
+        The following cases are allowed by NumPy, but not specified by the array
+        API specification:
+
+        - Indices to not include an implicit ellipsis at the end. That is,
+          every axis of an array must be explicitly indexed or an ellipsis
+          included. This behaviour is sometimes referred to as flat indexing.
+
+        - The start and stop of a slice may not be out of bounds. In
+          particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
+          following are allowed:
+
+          - ``i`` or ``j`` omitted (``None``).
+          - ``-n <= i <= max(0, n - 1)``.
+          - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+          - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+        - Boolean array indices are not allowed as part of a larger tuple
+          index.
+
+        - Integer array indices are not allowed (with the exception of 0-D
+          arrays, which are treated the same as scalars).
+
+        Additionally, it should be noted that indices that would return a
+        scalar in NumPy will return a 0-D array. Array scalars are not allowed
+        in the specification, only 0-D arrays. This is done in the
+        ``Array._new`` constructor, not this function.
+
+        """
+        _key = key if isinstance(key, tuple) else (key,)
+        for i in _key:
+            if isinstance(i, bool) or not (
+                isinstance(i, SupportsIndex)  # i.e. ints
+                or isinstance(i, slice)
+                or i == Ellipsis
+                or i is None
+                or isinstance(i, Array)
+                or isinstance(i, np.ndarray)
+            ):
+                raise IndexError(
+                    f"Single-axes index {i} has {type(i)=}, but only "
+                    "integers, slices (:), ellipsis (...), newaxis (None), "
+                    "zero-dimensional integer arrays and boolean arrays "
+                    "are specified in the Array API."
+                )
+
+        nonexpanding_key = []
+        single_axes = []
+        n_ellipsis = 0
+        key_has_mask = False
+        for i in _key:
+            if i is not None:
+                nonexpanding_key.append(i)
+                if isinstance(i, Array) or isinstance(i, np.ndarray):
+                    if i.dtype in _boolean_dtypes:
+                        key_has_mask = True
+                    single_axes.append(i)
+                else:
+                    # i must not be an array here, to avoid elementwise equals
+                    if i == Ellipsis:
+                        n_ellipsis += 1
+                    else:
+                        single_axes.append(i)
+
+        n_single_axes = len(single_axes)
+        if n_ellipsis > 1:
+            return  # handled by ndarray
+        elif n_ellipsis == 0:
+            # Note boolean masks must be the sole index, which we check for
+            # later on.
+            if not key_has_mask and n_single_axes < self.ndim:
+                raise IndexError(
+                    f"{self.ndim=}, but the multi-axes index only specifies "
+                    f"{n_single_axes} dimensions. If this was intentional, "
+                    "add a trailing ellipsis (...) which expands into as many "
+                    "slices (:) as necessary - this is what np.ndarray arrays "
+                    "implicitly do, but such flat indexing behaviour is not "
+                    "specified in the Array API."
+                )
+
+        if n_ellipsis == 0:
+            indexed_shape = self.shape
+        else:
+            ellipsis_start = None
+            for pos, i in enumerate(nonexpanding_key):
+                if not (isinstance(i, Array) or isinstance(i, np.ndarray)):
+                    if i == Ellipsis:
+                        ellipsis_start = pos
+                        break
+            assert ellipsis_start is not None  # sanity check
+            ellipsis_end = self.ndim - (n_single_axes - ellipsis_start)
+            indexed_shape = (
+                self.shape[:ellipsis_start] + self.shape[ellipsis_end:]
+            )
+        for i, side in zip(single_axes, indexed_shape):
+            if isinstance(i, slice):
+                if side == 0:
+                    f_range = "0 (or None)"
+                else:
+                    f_range = f"between -{side} and {side - 1} (or None)"
+                if i.start is not None:
+                    try:
+                        start = operator.index(i.start)
+                    except TypeError:
+                        pass  # handled by ndarray
+                    else:
+                        if not (-side <= start <= side):
+                            raise IndexError(
+                                f"Slice {i} contains {start=}, but should be "
+                                f"{f_range} for an axis of size {side} "
+                                "(out-of-bounds starts are not specified in "
+                                "the Array API)"
+                            )
+                if i.stop is not None:
+                    try:
+                        stop = operator.index(i.stop)
+                    except TypeError:
+                        pass  # handled by ndarray
+                    else:
+                        if not (-side <= stop <= side):
+                            raise IndexError(
+                                f"Slice {i} contains {stop=}, but should be "
+                                f"{f_range} for an axis of size {side} "
+                                "(out-of-bounds stops are not specified in "
+                                "the Array API)"
+                            )
+            elif isinstance(i, Array):
+                if i.dtype in _boolean_dtypes and len(_key) != 1:
+                    assert isinstance(key, tuple)  # sanity check
+                    raise IndexError(
+                        f"Single-axes index {i} is a boolean array and "
+                        f"{len(key)=}, but masking is only specified in the "
+                        "Array API when the array is the sole index."
+                    )
+                elif i.dtype in _integer_dtypes and i.ndim != 0:
+                    raise IndexError(
+                        f"Single-axes index {i} is a non-zero-dimensional "
+                        "integer array, but advanced integer indexing is not "
+                        "specified in the Array API."
+                    )
+            elif isinstance(i, tuple):
+                raise IndexError(
+                    f"Single-axes index {i} is a tuple, but nested tuple "
+                    "indices are not specified in the Array API."
+                )
+
+    # Everything below this line is required by the spec.
+
+    def __abs__(self: Array, /) -> Array:
+        """
+        Performs the operation __abs__.
+        """
+        if self.dtype not in _numeric_dtypes:
+            raise TypeError("Only numeric dtypes are allowed in __abs__")
+        res = self._array.__abs__()
+        return self.__class__._new(res)
+
+    def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __add__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__add__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__add__(other._array)
+        return self.__class__._new(res)
+
+    def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __and__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__and__(other._array)
+        return self.__class__._new(res)
+
+    def __array_namespace__(
+        self: Array, /, *, api_version: Optional[str] = None
+    ) -> types.ModuleType:
+        if api_version is not None and not api_version.startswith("2021."):
+            raise ValueError(f"Unrecognized array API version: {api_version!r}")
+        return array_api
+
+    def __bool__(self: Array, /) -> bool:
+        """
+        Performs the operation __bool__.
+        """
+        # Note: This is an error here.
+        if self._array.ndim != 0:
+            raise TypeError("bool is only allowed on arrays with 0 dimensions")
+        res = self._array.__bool__()
+        return res
+
+    def __complex__(self: Array, /) -> complex:
+        """
+        Performs the operation __complex__.
+        """
+        # Note: This is an error here.
+        if self._array.ndim != 0:
+            raise TypeError("complex is only allowed on arrays with 0 dimensions")
+        res = self._array.__complex__()
+        return res
+
+    def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
+        """
+        Performs the operation __dlpack__.
+        """
+        return self._array.__dlpack__(stream=stream)
+
+    def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
+        """
+        Performs the operation __dlpack_device__.
+        """
+        # Note: device support is required for this
+        return self._array.__dlpack_device__()
+
+    def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+        """
+        Performs the operation __eq__.
+        """
+        # Even though "all" dtypes are allowed, we still require them to be
+        # promotable with each other.
+        other = self._check_allowed_dtypes(other, "all", "__eq__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__eq__(other._array)
+        return self.__class__._new(res)
+
+    def __float__(self: Array, /) -> float:
+        """
+        Performs the operation __float__.
+        """
+        # Note: This is an error here.
+        if self._array.ndim != 0:
+            raise TypeError("float is only allowed on arrays with 0 dimensions")
+        if self.dtype in _complex_floating_dtypes:
+            raise TypeError("float is not allowed on complex floating-point arrays")
+        res = self._array.__float__()
+        return res
+
+    def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __floordiv__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__floordiv__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__floordiv__(other._array)
+        return self.__class__._new(res)
+
+    def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __ge__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__ge__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__ge__(other._array)
+        return self.__class__._new(res)
+
+    def __getitem__(
+        self: Array,
+        key: Union[
+            int,
+            slice,
+            ellipsis,
+            Tuple[Union[int, slice, ellipsis, None], ...],
+            Array,
+        ],
+        /,
+    ) -> Array:
+        """
+        Performs the operation __getitem__.
+        """
+        # Note: Only indices required by the spec are allowed. See the
+        # docstring of _validate_index
+        self._validate_index(key)
+        if isinstance(key, Array):
+            # Indexing self._array with array_api arrays can be erroneous
+            key = key._array
+        res = self._array.__getitem__(key)
+        return self._new(res)
+
+    def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __gt__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__gt__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__gt__(other._array)
+        return self.__class__._new(res)
+
+    def __int__(self: Array, /) -> int:
+        """
+        Performs the operation __int__.
+        """
+        # Note: This is an error here.
+        if self._array.ndim != 0:
+            raise TypeError("int is only allowed on arrays with 0 dimensions")
+        if self.dtype in _complex_floating_dtypes:
+            raise TypeError("int is not allowed on complex floating-point arrays")
+        res = self._array.__int__()
+        return res
+
+    def __index__(self: Array, /) -> int:
+        """
+        Performs the operation __index__.
+        """
+        res = self._array.__index__()
+        return res
+
+    def __invert__(self: Array, /) -> Array:
+        """
+        Performs the operation __invert__.
+        """
+        if self.dtype not in _integer_or_boolean_dtypes:
+            raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
+        res = self._array.__invert__()
+        return self.__class__._new(res)
+
+    def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __le__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__le__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__le__(other._array)
+        return self.__class__._new(res)
+
+    def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
+        """
+        Performs the operation __lshift__.
+        """
+        other = self._check_allowed_dtypes(other, "integer", "__lshift__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__lshift__(other._array)
+        return self.__class__._new(res)
+
+    def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __lt__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__lt__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__lt__(other._array)
+        return self.__class__._new(res)
+
+    def __matmul__(self: Array, other: Array, /) -> Array:
+        """
+        Performs the operation __matmul__.
+        """
+        # matmul is not defined for scalars, but without this, we may get
+        # the wrong error message from asarray.
+        other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
+        if other is NotImplemented:
+            return other
+        res = self._array.__matmul__(other._array)
+        return self.__class__._new(res)
+
+    def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __mod__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__mod__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__mod__(other._array)
+        return self.__class__._new(res)
+
+    def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __mul__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__mul__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__mul__(other._array)
+        return self.__class__._new(res)
+
+    def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+        """
+        Performs the operation __ne__.
+        """
+        other = self._check_allowed_dtypes(other, "all", "__ne__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__ne__(other._array)
+        return self.__class__._new(res)
+
+    def __neg__(self: Array, /) -> Array:
+        """
+        Performs the operation __neg__.
+        """
+        if self.dtype not in _numeric_dtypes:
+            raise TypeError("Only numeric dtypes are allowed in __neg__")
+        res = self._array.__neg__()
+        return self.__class__._new(res)
+
+    def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __or__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__or__(other._array)
+        return self.__class__._new(res)
+
+    def __pos__(self: Array, /) -> Array:
+        """
+        Performs the operation __pos__.
+        """
+        if self.dtype not in _numeric_dtypes:
+            raise TypeError("Only numeric dtypes are allowed in __pos__")
+        res = self._array.__pos__()
+        return self.__class__._new(res)
+
+    def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __pow__.
+        """
+        from ._elementwise_functions import pow
+
+        other = self._check_allowed_dtypes(other, "numeric", "__pow__")
+        if other is NotImplemented:
+            return other
+        # Note: NumPy's __pow__ does not follow type promotion rules for 0-d
+        # arrays, so we use pow() here instead.
+        return pow(self, other)
+
+    def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
+        """
+        Performs the operation __rshift__.
+        """
+        other = self._check_allowed_dtypes(other, "integer", "__rshift__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rshift__(other._array)
+        return self.__class__._new(res)
+
+    def __setitem__(
+        self,
+        key: Union[
+            int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+        ],
+        value: Union[int, float, bool, Array],
+        /,
+    ) -> None:
+        """
+        Performs the operation __setitem__.
+        """
+        # Note: Only indices required by the spec are allowed. See the
+        # docstring of _validate_index
+        self._validate_index(key)
+        if isinstance(key, Array):
+            # Indexing self._array with array_api arrays can be erroneous
+            key = key._array
+        self._array.__setitem__(key, asarray(value)._array)
+
+    def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __sub__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__sub__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__sub__(other._array)
+        return self.__class__._new(res)
+
+    # PEP 484 requires int to be a subtype of float, but __truediv__ should
+    # not accept int.
+    def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
+        """
+        Performs the operation __truediv__.
+        """
+        other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__truediv__(other._array)
+        return self.__class__._new(res)
+
+    def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __xor__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__xor__(other._array)
+        return self.__class__._new(res)
+
+    def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __iadd__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
+        if other is NotImplemented:
+            return other
+        self._array.__iadd__(other._array)
+        return self
+
+    def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __radd__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__radd__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__radd__(other._array)
+        return self.__class__._new(res)
+
+    def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __iand__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
+        if other is NotImplemented:
+            return other
+        self._array.__iand__(other._array)
+        return self
+
+    def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __rand__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rand__(other._array)
+        return self.__class__._new(res)
+
+    def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __ifloordiv__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__ifloordiv__")
+        if other is NotImplemented:
+            return other
+        self._array.__ifloordiv__(other._array)
+        return self
+
+    def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __rfloordiv__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__rfloordiv__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rfloordiv__(other._array)
+        return self.__class__._new(res)
+
+    def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
+        """
+        Performs the operation __ilshift__.
+        """
+        other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
+        if other is NotImplemented:
+            return other
+        self._array.__ilshift__(other._array)
+        return self
+
+    def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
+        """
+        Performs the operation __rlshift__.
+        """
+        other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rlshift__(other._array)
+        return self.__class__._new(res)
+
+    def __imatmul__(self: Array, other: Array, /) -> Array:
+        """
+        Performs the operation __imatmul__.
+        """
+        # matmul is not defined for scalars, but without this, we may get
+        # the wrong error message from asarray.
+        other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
+        if other is NotImplemented:
+            return other
+        res = self._array.__imatmul__(other._array)
+        return self.__class__._new(res)
+
+    def __rmatmul__(self: Array, other: Array, /) -> Array:
+        """
+        Performs the operation __rmatmul__.
+        """
+        # matmul is not defined for scalars, but without this, we may get
+        # the wrong error message from asarray.
+        other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
+        if other is NotImplemented:
+            return other
+        res = self._array.__rmatmul__(other._array)
+        return self.__class__._new(res)
+
+    def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __imod__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__imod__")
+        if other is NotImplemented:
+            return other
+        self._array.__imod__(other._array)
+        return self
+
+    def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __rmod__.
+        """
+        other = self._check_allowed_dtypes(other, "real numeric", "__rmod__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rmod__(other._array)
+        return self.__class__._new(res)
+
+    def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __imul__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__imul__")
+        if other is NotImplemented:
+            return other
+        self._array.__imul__(other._array)
+        return self
+
+    def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __rmul__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rmul__(other._array)
+        return self.__class__._new(res)
+
+    def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __ior__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
+        if other is NotImplemented:
+            return other
+        self._array.__ior__(other._array)
+        return self
+
+    def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __ror__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__ror__(other._array)
+        return self.__class__._new(res)
+
+    def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __ipow__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__ipow__")
+        if other is NotImplemented:
+            return other
+        self._array.__ipow__(other._array)
+        return self
+
+    def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __rpow__.
+        """
+        from ._elementwise_functions import pow
+
+        other = self._check_allowed_dtypes(other, "numeric", "__rpow__")
+        if other is NotImplemented:
+            return other
+        # Note: NumPy's __pow__ does not follow the spec type promotion rules
+        # for 0-d arrays, so we use pow() here instead.
+        return pow(other, self)
+
+    def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
+        """
+        Performs the operation __irshift__.
+        """
+        other = self._check_allowed_dtypes(other, "integer", "__irshift__")
+        if other is NotImplemented:
+            return other
+        self._array.__irshift__(other._array)
+        return self
+
+    def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
+        """
+        Performs the operation __rrshift__.
+        """
+        other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rrshift__(other._array)
+        return self.__class__._new(res)
+
+    def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __isub__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__isub__")
+        if other is NotImplemented:
+            return other
+        self._array.__isub__(other._array)
+        return self
+
+    def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
+        """
+        Performs the operation __rsub__.
+        """
+        other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rsub__(other._array)
+        return self.__class__._new(res)
+
+    def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
+        """
+        Performs the operation __itruediv__.
+        """
+        other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
+        if other is NotImplemented:
+            return other
+        self._array.__itruediv__(other._array)
+        return self
+
+    def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
+        """
+        Performs the operation __rtruediv__.
+        """
+        other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rtruediv__(other._array)
+        return self.__class__._new(res)
+
+    def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __ixor__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
+        if other is NotImplemented:
+            return other
+        self._array.__ixor__(other._array)
+        return self
+
+    def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+        """
+        Performs the operation __rxor__.
+        """
+        other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
+        if other is NotImplemented:
+            return other
+        self, other = self._normalize_two_args(self, other)
+        res = self._array.__rxor__(other._array)
+        return self.__class__._new(res)
+
+    def to_device(self: Array, device: Device, /, stream: None = None) -> Array:
+        if stream is not None:
+            raise ValueError("The stream argument to to_device() is not supported")
+        if device == 'cpu':
+            return self
+        raise ValueError(f"Unsupported device {device!r}")
+
+    @property
+    def dtype(self) -> Dtype:
+        """
+        Array API compatible wrapper for :py:meth:`np.ndarray.dtype `.
+
+        See its docstring for more information.
+        """
+        return self._array.dtype
+
+    @property
+    def device(self) -> Device:
+        return "cpu"
+
+    # Note: mT is new in array API spec (see matrix_transpose)
+    @property
+    def mT(self) -> Array:
+        from .linalg import matrix_transpose
+        return matrix_transpose(self)
+
+    @property
+    def ndim(self) -> int:
+        """
+        Array API compatible wrapper for :py:meth:`np.ndarray.ndim `.
+
+        See its docstring for more information.
+        """
+        return self._array.ndim
+
+    @property
+    def shape(self) -> Tuple[int, ...]:
+        """
+        Array API compatible wrapper for :py:meth:`np.ndarray.shape `.
+
+        See its docstring for more information.
+        """
+        return self._array.shape
+
+    @property
+    def size(self) -> int:
+        """
+        Array API compatible wrapper for :py:meth:`np.ndarray.size `.
+
+        See its docstring for more information.
+        """
+        return self._array.size
+
+    @property
+    def T(self) -> Array:
+        """
+        Array API compatible wrapper for :py:meth:`np.ndarray.T `.
+
+        See its docstring for more information.
+        """
+        # Note: T only works on 2-dimensional arrays. See the corresponding
+        # note in the specification:
+        # https://data-apis.org/array-api/latest/API_specification/array_object.html#t
+        if self.ndim != 2:
+            raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
+        return self.__class__._new(self._array.T)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_constants.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_constants.py
new file mode 100644
index 00000000..15ab81d1
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_constants.py
@@ -0,0 +1,7 @@
+import numpy as np
+
+e = np.e
+inf = np.inf
+nan = np.nan
+pi = np.pi
+newaxis = np.newaxis
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py
new file mode 100644
index 00000000..3b014d37
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py
@@ -0,0 +1,351 @@
+from __future__ import annotations
+
+
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+    from ._typing import (
+        Array,
+        Device,
+        Dtype,
+        NestedSequence,
+        SupportsBufferProtocol,
+    )
+    from collections.abc import Sequence
+from ._dtypes import _all_dtypes
+
+import numpy as np
+
+
+def _check_valid_dtype(dtype):
+    # Note: Only spelling dtypes as the dtype objects is supported.
+
+    # We use this instead of "dtype in _all_dtypes" because the dtype objects
+    # define equality with the sorts of things we want to disallow.
+    for d in (None,) + _all_dtypes:
+        if dtype is d:
+            return
+    raise ValueError("dtype must be one of the supported dtypes")
+
+
+def asarray(
+    obj: Union[
+        Array,
+        bool,
+        int,
+        float,
+        NestedSequence[bool | int | float],
+        SupportsBufferProtocol,
+    ],
+    /,
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+    copy: Optional[Union[bool, np._CopyMode]] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.asarray `.
+
+    See its docstring for more information.
+    """
+    # _array_object imports in this file are inside the functions to avoid
+    # circular imports
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    if copy in (False, np._CopyMode.IF_NEEDED):
+        # Note: copy=False is not yet implemented in np.asarray
+        raise NotImplementedError("copy=False is not yet implemented")
+    if isinstance(obj, Array):
+        if dtype is not None and obj.dtype != dtype:
+            copy = True
+        if copy in (True, np._CopyMode.ALWAYS):
+            return Array._new(np.array(obj._array, copy=True, dtype=dtype))
+        return obj
+    if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
+        # Give a better error message in this case. NumPy would convert this
+        # to an object array. TODO: This won't handle large integers in lists.
+        raise OverflowError("Integer out of bounds for array dtypes")
+    res = np.asarray(obj, dtype=dtype)
+    return Array._new(res)
+
+
+def arange(
+    start: Union[int, float],
+    /,
+    stop: Optional[Union[int, float]] = None,
+    step: Union[int, float] = 1,
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arange `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
+
+
+def empty(
+    shape: Union[int, Tuple[int, ...]],
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.empty `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.empty(shape, dtype=dtype))
+
+
+def empty_like(
+    x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.empty_like `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.empty_like(x._array, dtype=dtype))
+
+
+def eye(
+    n_rows: int,
+    n_cols: Optional[int] = None,
+    /,
+    *,
+    k: int = 0,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.eye `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
+
+
+def from_dlpack(x: object, /) -> Array:
+    from ._array_object import Array
+
+    return Array._new(np.from_dlpack(x))
+
+
+def full(
+    shape: Union[int, Tuple[int, ...]],
+    fill_value: Union[int, float],
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.full `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    if isinstance(fill_value, Array) and fill_value.ndim == 0:
+        fill_value = fill_value._array
+    res = np.full(shape, fill_value, dtype=dtype)
+    if res.dtype not in _all_dtypes:
+        # This will happen if the fill value is not something that NumPy
+        # coerces to one of the acceptable dtypes.
+        raise TypeError("Invalid input to full")
+    return Array._new(res)
+
+
+def full_like(
+    x: Array,
+    /,
+    fill_value: Union[int, float],
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.full_like `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    res = np.full_like(x._array, fill_value, dtype=dtype)
+    if res.dtype not in _all_dtypes:
+        # This will happen if the fill value is not something that NumPy
+        # coerces to one of the acceptable dtypes.
+        raise TypeError("Invalid input to full_like")
+    return Array._new(res)
+
+
+def linspace(
+    start: Union[int, float],
+    stop: Union[int, float],
+    /,
+    num: int,
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+    endpoint: bool = True,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linspace `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
+
+
+def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]:
+    """
+    Array API compatible wrapper for :py:func:`np.meshgrid `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    # Note: unlike np.meshgrid, only inputs with all the same dtype are
+    # allowed
+
+    if len({a.dtype for a in arrays}) > 1:
+        raise ValueError("meshgrid inputs must all have the same dtype")
+
+    return [
+        Array._new(array)
+        for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
+    ]
+
+
+def ones(
+    shape: Union[int, Tuple[int, ...]],
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.ones `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.ones(shape, dtype=dtype))
+
+
+def ones_like(
+    x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.ones_like `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.ones_like(x._array, dtype=dtype))
+
+
+def tril(x: Array, /, *, k: int = 0) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.tril `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    if x.ndim < 2:
+        # Note: Unlike np.tril, x must be at least 2-D
+        raise ValueError("x must be at least 2-dimensional for tril")
+    return Array._new(np.tril(x._array, k=k))
+
+
+def triu(x: Array, /, *, k: int = 0) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.triu `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    if x.ndim < 2:
+        # Note: Unlike np.triu, x must be at least 2-D
+        raise ValueError("x must be at least 2-dimensional for triu")
+    return Array._new(np.triu(x._array, k=k))
+
+
+def zeros(
+    shape: Union[int, Tuple[int, ...]],
+    *,
+    dtype: Optional[Dtype] = None,
+    device: Optional[Device] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.zeros `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.zeros(shape, dtype=dtype))
+
+
+def zeros_like(
+    x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.zeros_like `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    _check_valid_dtype(dtype)
+    if device not in ["cpu", None]:
+        raise ValueError(f"Unsupported device {device!r}")
+    return Array._new(np.zeros_like(x._array, dtype=dtype))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py
new file mode 100644
index 00000000..6f972c3b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py
@@ -0,0 +1,197 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import (
+    _all_dtypes,
+    _boolean_dtypes,
+    _signed_integer_dtypes,
+    _unsigned_integer_dtypes,
+    _integer_dtypes,
+    _real_floating_dtypes,
+    _complex_floating_dtypes,
+    _numeric_dtypes,
+    _result_type,
+)
+
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+if TYPE_CHECKING:
+    from ._typing import Dtype
+    from collections.abc import Sequence
+
+import numpy as np
+
+
+# Note: astype is a function, not an array method as in NumPy.
+def astype(x: Array, dtype: Dtype, /, *, copy: bool = True) -> Array:
+    if not copy and dtype == x.dtype:
+        return x
+    return Array._new(x._array.astype(dtype=dtype, copy=copy))
+
+
+def broadcast_arrays(*arrays: Array) -> List[Array]:
+    """
+    Array API compatible wrapper for :py:func:`np.broadcast_arrays `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    return [
+        Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
+    ]
+
+
+def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.broadcast_to `.
+
+    See its docstring for more information.
+    """
+    from ._array_object import Array
+
+    return Array._new(np.broadcast_to(x._array, shape))
+
+
+def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
+    """
+    Array API compatible wrapper for :py:func:`np.can_cast `.
+
+    See its docstring for more information.
+    """
+    if isinstance(from_, Array):
+        from_ = from_.dtype
+    elif from_ not in _all_dtypes:
+        raise TypeError(f"{from_=}, but should be an array_api array or dtype")
+    if to not in _all_dtypes:
+        raise TypeError(f"{to=}, but should be a dtype")
+    # Note: We avoid np.can_cast() as it has discrepancies with the array API,
+    # since NumPy allows cross-kind casting (e.g., NumPy allows bool -> int8).
+    # See https://github.com/numpy/numpy/issues/20870
+    try:
+        # We promote `from_` and `to` together. We then check if the promoted
+        # dtype is `to`, which indicates if `from_` can (up)cast to `to`.
+        dtype = _result_type(from_, to)
+        return to == dtype
+    except TypeError:
+        # _result_type() raises if the dtypes don't promote together
+        return False
+
+
+# These are internal objects for the return types of finfo and iinfo, since
+# the NumPy versions contain extra data that isn't part of the spec.
+@dataclass
+class finfo_object:
+    bits: int
+    # Note: The types of the float data here are float, whereas in NumPy they
+    # are scalars of the corresponding float dtype.
+    eps: float
+    max: float
+    min: float
+    smallest_normal: float
+    dtype: Dtype
+
+
+@dataclass
+class iinfo_object:
+    bits: int
+    max: int
+    min: int
+    dtype: Dtype
+
+
+def finfo(type: Union[Dtype, Array], /) -> finfo_object:
+    """
+    Array API compatible wrapper for :py:func:`np.finfo `.
+
+    See its docstring for more information.
+    """
+    fi = np.finfo(type)
+    # Note: The types of the float data here are float, whereas in NumPy they
+    # are scalars of the corresponding float dtype.
+    return finfo_object(
+        fi.bits,
+        float(fi.eps),
+        float(fi.max),
+        float(fi.min),
+        float(fi.smallest_normal),
+        fi.dtype,
+    )
+
+
+def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
+    """
+    Array API compatible wrapper for :py:func:`np.iinfo `.
+
+    See its docstring for more information.
+    """
+    ii = np.iinfo(type)
+    return iinfo_object(ii.bits, ii.max, ii.min, ii.dtype)
+
+
+# Note: isdtype is a new function from the 2022.12 array API specification.
+def isdtype(
+    dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]]
+) -> bool:
+    """
+    Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
+
+    See
+    https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
+    for more details
+    """
+    if isinstance(kind, tuple):
+        # Disallow nested tuples
+        if any(isinstance(k, tuple) for k in kind):
+            raise TypeError("'kind' must be a dtype, str, or tuple of dtypes and strs")
+        return any(isdtype(dtype, k) for k in kind)
+    elif isinstance(kind, str):
+        if kind == 'bool':
+            return dtype in _boolean_dtypes
+        elif kind == 'signed integer':
+            return dtype in _signed_integer_dtypes
+        elif kind == 'unsigned integer':
+            return dtype in _unsigned_integer_dtypes
+        elif kind == 'integral':
+            return dtype in _integer_dtypes
+        elif kind == 'real floating':
+            return dtype in _real_floating_dtypes
+        elif kind == 'complex floating':
+            return dtype in _complex_floating_dtypes
+        elif kind == 'numeric':
+            return dtype in _numeric_dtypes
+        else:
+            raise ValueError(f"Unrecognized data type kind: {kind!r}")
+    elif kind in _all_dtypes:
+        return dtype == kind
+    else:
+        raise TypeError(f"'kind' must be a dtype, str, or tuple of dtypes and strs, not {type(kind).__name__}")
+
+def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype:
+    """
+    Array API compatible wrapper for :py:func:`np.result_type `.
+
+    See its docstring for more information.
+    """
+    # Note: we use a custom implementation that gives only the type promotions
+    # required by the spec rather than using np.result_type. NumPy implements
+    # too many extra type promotions like int64 + uint64 -> float64, and does
+    # value-based casting on scalar arrays.
+    A = []
+    for a in arrays_and_dtypes:
+        if isinstance(a, Array):
+            a = a.dtype
+        elif isinstance(a, np.ndarray) or a not in _all_dtypes:
+            raise TypeError("result_type() inputs must be array_api arrays or dtypes")
+        A.append(a)
+
+    if len(A) == 0:
+        raise ValueError("at least one array or dtype is required")
+    elif len(A) == 1:
+        return A[0]
+    else:
+        t = A[0]
+        for t2 in A[1:]:
+            t = _result_type(t, t2)
+        return t
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_dtypes.py
new file mode 100644
index 00000000..0e8f666e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_dtypes.py
@@ -0,0 +1,180 @@
+import numpy as np
+
+# Note: we use dtype objects instead of dtype classes. The spec does not
+# require any behavior on dtypes other than equality.
+int8 = np.dtype("int8")
+int16 = np.dtype("int16")
+int32 = np.dtype("int32")
+int64 = np.dtype("int64")
+uint8 = np.dtype("uint8")
+uint16 = np.dtype("uint16")
+uint32 = np.dtype("uint32")
+uint64 = np.dtype("uint64")
+float32 = np.dtype("float32")
+float64 = np.dtype("float64")
+complex64 = np.dtype("complex64")
+complex128 = np.dtype("complex128")
+# Note: This name is changed
+bool = np.dtype("bool")
+
+_all_dtypes = (
+    int8,
+    int16,
+    int32,
+    int64,
+    uint8,
+    uint16,
+    uint32,
+    uint64,
+    float32,
+    float64,
+    complex64,
+    complex128,
+    bool,
+)
+_boolean_dtypes = (bool,)
+_real_floating_dtypes = (float32, float64)
+_floating_dtypes = (float32, float64, complex64, complex128)
+_complex_floating_dtypes = (complex64, complex128)
+_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
+_signed_integer_dtypes = (int8, int16, int32, int64)
+_unsigned_integer_dtypes = (uint8, uint16, uint32, uint64)
+_integer_or_boolean_dtypes = (
+    bool,
+    int8,
+    int16,
+    int32,
+    int64,
+    uint8,
+    uint16,
+    uint32,
+    uint64,
+)
+_real_numeric_dtypes = (
+    float32,
+    float64,
+    int8,
+    int16,
+    int32,
+    int64,
+    uint8,
+    uint16,
+    uint32,
+    uint64,
+)
+_numeric_dtypes = (
+    float32,
+    float64,
+    complex64,
+    complex128,
+    int8,
+    int16,
+    int32,
+    int64,
+    uint8,
+    uint16,
+    uint32,
+    uint64,
+)
+
+_dtype_categories = {
+    "all": _all_dtypes,
+    "real numeric": _real_numeric_dtypes,
+    "numeric": _numeric_dtypes,
+    "integer": _integer_dtypes,
+    "integer or boolean": _integer_or_boolean_dtypes,
+    "boolean": _boolean_dtypes,
+    "real floating-point": _floating_dtypes,
+    "complex floating-point": _complex_floating_dtypes,
+    "floating-point": _floating_dtypes,
+}
+
+
+# Note: the spec defines a restricted type promotion table compared to NumPy.
+# In particular, cross-kind promotions like integer + float or boolean +
+# integer are not allowed, even for functions that accept both kinds.
+# Additionally, NumPy promotes signed integer + uint64 to float64, but this
+# promotion is not allowed here. To be clear, Python scalar int objects are
+# allowed to promote to floating-point dtypes, but only in array operators
+# (see Array._promote_scalar) method in _array_object.py.
+_promotion_table = {
+    (int8, int8): int8,
+    (int8, int16): int16,
+    (int8, int32): int32,
+    (int8, int64): int64,
+    (int16, int8): int16,
+    (int16, int16): int16,
+    (int16, int32): int32,
+    (int16, int64): int64,
+    (int32, int8): int32,
+    (int32, int16): int32,
+    (int32, int32): int32,
+    (int32, int64): int64,
+    (int64, int8): int64,
+    (int64, int16): int64,
+    (int64, int32): int64,
+    (int64, int64): int64,
+    (uint8, uint8): uint8,
+    (uint8, uint16): uint16,
+    (uint8, uint32): uint32,
+    (uint8, uint64): uint64,
+    (uint16, uint8): uint16,
+    (uint16, uint16): uint16,
+    (uint16, uint32): uint32,
+    (uint16, uint64): uint64,
+    (uint32, uint8): uint32,
+    (uint32, uint16): uint32,
+    (uint32, uint32): uint32,
+    (uint32, uint64): uint64,
+    (uint64, uint8): uint64,
+    (uint64, uint16): uint64,
+    (uint64, uint32): uint64,
+    (uint64, uint64): uint64,
+    (int8, uint8): int16,
+    (int8, uint16): int32,
+    (int8, uint32): int64,
+    (int16, uint8): int16,
+    (int16, uint16): int32,
+    (int16, uint32): int64,
+    (int32, uint8): int32,
+    (int32, uint16): int32,
+    (int32, uint32): int64,
+    (int64, uint8): int64,
+    (int64, uint16): int64,
+    (int64, uint32): int64,
+    (uint8, int8): int16,
+    (uint16, int8): int32,
+    (uint32, int8): int64,
+    (uint8, int16): int16,
+    (uint16, int16): int32,
+    (uint32, int16): int64,
+    (uint8, int32): int32,
+    (uint16, int32): int32,
+    (uint32, int32): int64,
+    (uint8, int64): int64,
+    (uint16, int64): int64,
+    (uint32, int64): int64,
+    (float32, float32): float32,
+    (float32, float64): float64,
+    (float64, float32): float64,
+    (float64, float64): float64,
+    (complex64, complex64): complex64,
+    (complex64, complex128): complex128,
+    (complex128, complex64): complex128,
+    (complex128, complex128): complex128,
+    (float32, complex64): complex64,
+    (float32, complex128): complex128,
+    (float64, complex64): complex128,
+    (float64, complex128): complex128,
+    (complex64, float32): complex64,
+    (complex64, float64): complex128,
+    (complex128, float32): complex128,
+    (complex128, float64): complex128,
+    (bool, bool): bool,
+}
+
+
+def _result_type(type1, type2):
+    if (type1, type2) in _promotion_table:
+        return _promotion_table[type1, type2]
+    raise TypeError(f"{type1} and {type2} cannot be type promoted together")
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py
new file mode 100644
index 00000000..8b696772
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py
@@ -0,0 +1,765 @@
+from __future__ import annotations
+
+from ._dtypes import (
+    _boolean_dtypes,
+    _floating_dtypes,
+    _real_floating_dtypes,
+    _complex_floating_dtypes,
+    _integer_dtypes,
+    _integer_or_boolean_dtypes,
+    _real_numeric_dtypes,
+    _numeric_dtypes,
+    _result_type,
+)
+from ._array_object import Array
+
+import numpy as np
+
+
+def abs(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.abs `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in abs")
+    return Array._new(np.abs(x._array))
+
+
+# Note: the function name is different here
+def acos(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arccos `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in acos")
+    return Array._new(np.arccos(x._array))
+
+
+# Note: the function name is different here
+def acosh(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arccosh `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in acosh")
+    return Array._new(np.arccosh(x._array))
+
+
+def add(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.add `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in add")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.add(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def asin(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arcsin `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in asin")
+    return Array._new(np.arcsin(x._array))
+
+
+# Note: the function name is different here
+def asinh(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arcsinh `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in asinh")
+    return Array._new(np.arcsinh(x._array))
+
+
+# Note: the function name is different here
+def atan(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arctan `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in atan")
+    return Array._new(np.arctan(x._array))
+
+
+# Note: the function name is different here
+def atan2(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arctan2 `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_floating_dtypes or x2.dtype not in _real_floating_dtypes:
+        raise TypeError("Only real floating-point dtypes are allowed in atan2")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.arctan2(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def atanh(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.arctanh `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in atanh")
+    return Array._new(np.arctanh(x._array))
+
+
+def bitwise_and(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.bitwise_and `.
+
+    See its docstring for more information.
+    """
+    if (
+        x1.dtype not in _integer_or_boolean_dtypes
+        or x2.dtype not in _integer_or_boolean_dtypes
+    ):
+        raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.bitwise_and(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.left_shift `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+        raise TypeError("Only integer dtypes are allowed in bitwise_left_shift")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    # Note: bitwise_left_shift is only defined for x2 nonnegative.
+    if np.any(x2._array < 0):
+        raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0")
+    return Array._new(np.left_shift(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_invert(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.invert `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _integer_or_boolean_dtypes:
+        raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert")
+    return Array._new(np.invert(x._array))
+
+
+def bitwise_or(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.bitwise_or `.
+
+    See its docstring for more information.
+    """
+    if (
+        x1.dtype not in _integer_or_boolean_dtypes
+        or x2.dtype not in _integer_or_boolean_dtypes
+    ):
+        raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.bitwise_or(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.right_shift `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+        raise TypeError("Only integer dtypes are allowed in bitwise_right_shift")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    # Note: bitwise_right_shift is only defined for x2 nonnegative.
+    if np.any(x2._array < 0):
+        raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0")
+    return Array._new(np.right_shift(x1._array, x2._array))
+
+
+def bitwise_xor(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.bitwise_xor `.
+
+    See its docstring for more information.
+    """
+    if (
+        x1.dtype not in _integer_or_boolean_dtypes
+        or x2.dtype not in _integer_or_boolean_dtypes
+    ):
+        raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.bitwise_xor(x1._array, x2._array))
+
+
+def ceil(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.ceil `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in ceil")
+    if x.dtype in _integer_dtypes:
+        # Note: The return dtype of ceil is the same as the input
+        return x
+    return Array._new(np.ceil(x._array))
+
+
+def conj(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.conj `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _complex_floating_dtypes:
+        raise TypeError("Only complex floating-point dtypes are allowed in conj")
+    return Array._new(np.conj(x))
+
+
+def cos(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.cos `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in cos")
+    return Array._new(np.cos(x._array))
+
+
+def cosh(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.cosh `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in cosh")
+    return Array._new(np.cosh(x._array))
+
+
+def divide(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.divide `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in divide")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.divide(x1._array, x2._array))
+
+
+def equal(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.equal `.
+
+    See its docstring for more information.
+    """
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.equal(x1._array, x2._array))
+
+
+def exp(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.exp `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in exp")
+    return Array._new(np.exp(x._array))
+
+
+def expm1(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.expm1 `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in expm1")
+    return Array._new(np.expm1(x._array))
+
+
+def floor(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.floor `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in floor")
+    if x.dtype in _integer_dtypes:
+        # Note: The return dtype of floor is the same as the input
+        return x
+    return Array._new(np.floor(x._array))
+
+
+def floor_divide(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.floor_divide `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in floor_divide")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.floor_divide(x1._array, x2._array))
+
+
+def greater(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.greater `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in greater")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.greater(x1._array, x2._array))
+
+
+def greater_equal(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.greater_equal `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in greater_equal")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.greater_equal(x1._array, x2._array))
+
+
+def imag(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.imag `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _complex_floating_dtypes:
+        raise TypeError("Only complex floating-point dtypes are allowed in imag")
+    return Array._new(np.imag(x))
+
+
+def isfinite(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.isfinite `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in isfinite")
+    return Array._new(np.isfinite(x._array))
+
+
+def isinf(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.isinf `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in isinf")
+    return Array._new(np.isinf(x._array))
+
+
+def isnan(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.isnan `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in isnan")
+    return Array._new(np.isnan(x._array))
+
+
+def less(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.less `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in less")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.less(x1._array, x2._array))
+
+
+def less_equal(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.less_equal `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in less_equal")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.less_equal(x1._array, x2._array))
+
+
+def log(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.log `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in log")
+    return Array._new(np.log(x._array))
+
+
+def log1p(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.log1p `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in log1p")
+    return Array._new(np.log1p(x._array))
+
+
+def log2(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.log2 `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in log2")
+    return Array._new(np.log2(x._array))
+
+
+def log10(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.log10 `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in log10")
+    return Array._new(np.log10(x._array))
+
+
+def logaddexp(x1: Array, x2: Array) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.logaddexp `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_floating_dtypes or x2.dtype not in _real_floating_dtypes:
+        raise TypeError("Only real floating-point dtypes are allowed in logaddexp")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.logaddexp(x1._array, x2._array))
+
+
+def logical_and(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.logical_and `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+        raise TypeError("Only boolean dtypes are allowed in logical_and")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.logical_and(x1._array, x2._array))
+
+
+def logical_not(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.logical_not `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _boolean_dtypes:
+        raise TypeError("Only boolean dtypes are allowed in logical_not")
+    return Array._new(np.logical_not(x._array))
+
+
+def logical_or(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.logical_or `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+        raise TypeError("Only boolean dtypes are allowed in logical_or")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.logical_or(x1._array, x2._array))
+
+
+def logical_xor(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.logical_xor `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+        raise TypeError("Only boolean dtypes are allowed in logical_xor")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.logical_xor(x1._array, x2._array))
+
+
+def multiply(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.multiply `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in multiply")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.multiply(x1._array, x2._array))
+
+
+def negative(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.negative `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in negative")
+    return Array._new(np.negative(x._array))
+
+
+def not_equal(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.not_equal `.
+
+    See its docstring for more information.
+    """
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.not_equal(x1._array, x2._array))
+
+
+def positive(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.positive `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in positive")
+    return Array._new(np.positive(x._array))
+
+
+# Note: the function name is different here
+def pow(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.power `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in pow")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.power(x1._array, x2._array))
+
+
+def real(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.real `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _complex_floating_dtypes:
+        raise TypeError("Only complex floating-point dtypes are allowed in real")
+    return Array._new(np.real(x))
+
+
+def remainder(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.remainder `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in remainder")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.remainder(x1._array, x2._array))
+
+
+def round(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.round `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in round")
+    return Array._new(np.round(x._array))
+
+
+def sign(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.sign `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in sign")
+    return Array._new(np.sign(x._array))
+
+
+def sin(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.sin `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in sin")
+    return Array._new(np.sin(x._array))
+
+
+def sinh(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.sinh `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in sinh")
+    return Array._new(np.sinh(x._array))
+
+
+def square(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.square `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in square")
+    return Array._new(np.square(x._array))
+
+
+def sqrt(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.sqrt `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in sqrt")
+    return Array._new(np.sqrt(x._array))
+
+
+def subtract(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.subtract `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in subtract")
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.subtract(x1._array, x2._array))
+
+
+def tan(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.tan `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in tan")
+    return Array._new(np.tan(x._array))
+
+
+def tanh(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.tanh `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _floating_dtypes:
+        raise TypeError("Only floating-point dtypes are allowed in tanh")
+    return Array._new(np.tanh(x._array))
+
+
+def trunc(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.trunc `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in trunc")
+    if x.dtype in _integer_dtypes:
+        # Note: The return dtype of trunc is the same as the input
+        return x
+    return Array._new(np.trunc(x._array))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_indexing_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_indexing_functions.py
new file mode 100644
index 00000000..baf23f7f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_indexing_functions.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _integer_dtypes
+
+import numpy as np
+
+def take(x: Array, indices: Array, /, *, axis: Optional[int] = None) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.take `.
+
+    See its docstring for more information.
+    """
+    if axis is None and x.ndim != 1:
+        raise ValueError("axis must be specified when ndim > 1")
+    if indices.dtype not in _integer_dtypes:
+        raise TypeError("Only integer dtypes are allowed in indexing")
+    if indices.ndim != 1:
+        raise ValueError("Only 1-dim indices array is supported")
+    return Array._new(np.take(x._array, indices._array, axis=axis))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.py
new file mode 100644
index 00000000..556bde7d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.py
@@ -0,0 +1,112 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._data_type_functions import result_type
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+
+# Note: the function name is different here
+def concat(
+    arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.concatenate `.
+
+    See its docstring for more information.
+    """
+    # Note: Casting rules here are different from the np.concatenate default
+    # (no for scalars with axis=None, no cross-kind casting)
+    dtype = result_type(*arrays)
+    arrays = tuple(a._array for a in arrays)
+    return Array._new(np.concatenate(arrays, axis=axis, dtype=dtype))
+
+
+def expand_dims(x: Array, /, *, axis: int) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.expand_dims `.
+
+    See its docstring for more information.
+    """
+    return Array._new(np.expand_dims(x._array, axis))
+
+
+def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.flip `.
+
+    See its docstring for more information.
+    """
+    return Array._new(np.flip(x._array, axis=axis))
+
+
+# Note: The function name is different here (see also matrix_transpose).
+# Unlike transpose(), the axes argument is required.
+def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.transpose `.
+
+    See its docstring for more information.
+    """
+    return Array._new(np.transpose(x._array, axes))
+
+
+# Note: the optional argument is called 'shape', not 'newshape'
+def reshape(x: Array, 
+            /, 
+            shape: Tuple[int, ...],
+            *,
+            copy: Optional[Bool] = None) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.reshape `.
+
+    See its docstring for more information.
+    """
+
+    data = x._array
+    if copy:
+        data = np.copy(data)
+
+    reshaped = np.reshape(data, shape)
+
+    if copy is False and not np.shares_memory(data, reshaped):
+        raise AttributeError("Incompatible shape for in-place modification.")
+
+    return Array._new(reshaped)
+
+
+def roll(
+    x: Array,
+    /,
+    shift: Union[int, Tuple[int, ...]],
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.roll `.
+
+    See its docstring for more information.
+    """
+    return Array._new(np.roll(x._array, shift, axis=axis))
+
+
+def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.squeeze `.
+
+    See its docstring for more information.
+    """
+    return Array._new(np.squeeze(x._array, axis=axis))
+
+
+def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.stack `.
+
+    See its docstring for more information.
+    """
+    # Call result type here just to raise on disallowed type combinations
+    result_type(*arrays)
+    arrays = tuple(a._array for a in arrays)
+    return Array._new(np.stack(arrays, axis=axis))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_searching_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_searching_functions.py
new file mode 100644
index 00000000..a1f4b0c9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_searching_functions.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _result_type, _real_numeric_dtypes
+
+from typing import Optional, Tuple
+
+import numpy as np
+
+
+def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.argmax `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in argmax")
+    return Array._new(np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims)))
+
+
+def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.argmin `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in argmin")
+    return Array._new(np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims)))
+
+
+def nonzero(x: Array, /) -> Tuple[Array, ...]:
+    """
+    Array API compatible wrapper for :py:func:`np.nonzero `.
+
+    See its docstring for more information.
+    """
+    return tuple(Array._new(i) for i in np.nonzero(x._array))
+
+
+def where(condition: Array, x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.where `.
+
+    See its docstring for more information.
+    """
+    # Call result type here just to raise on disallowed type combinations
+    _result_type(x1.dtype, x2.dtype)
+    x1, x2 = Array._normalize_two_args(x1, x2)
+    return Array._new(np.where(condition._array, x1._array, x2._array))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_set_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_set_functions.py
new file mode 100644
index 00000000..0b4132cf
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_set_functions.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import NamedTuple
+
+import numpy as np
+
+# Note: np.unique() is split into four functions in the array API:
+# unique_all, unique_counts, unique_inverse, and unique_values (this is done
+# to remove polymorphic return types).
+
+# Note: The various unique() functions are supposed to return multiple NaNs.
+# This does not match the NumPy behavior, however, this is currently left as a
+# TODO in this implementation as this behavior may be reverted in np.unique().
+# See https://github.com/numpy/numpy/issues/20326.
+
+# Note: The functions here return a namedtuple (np.unique() returns a normal
+# tuple).
+
+class UniqueAllResult(NamedTuple):
+    values: Array
+    indices: Array
+    inverse_indices: Array
+    counts: Array
+
+
+class UniqueCountsResult(NamedTuple):
+    values: Array
+    counts: Array
+
+
+class UniqueInverseResult(NamedTuple):
+    values: Array
+    inverse_indices: Array
+
+
+def unique_all(x: Array, /) -> UniqueAllResult:
+    """
+    Array API compatible wrapper for :py:func:`np.unique `.
+
+    See its docstring for more information.
+    """
+    values, indices, inverse_indices, counts = np.unique(
+        x._array,
+        return_counts=True,
+        return_index=True,
+        return_inverse=True,
+        equal_nan=False,
+    )
+    # np.unique() flattens inverse indices, but they need to share x's shape
+    # See https://github.com/numpy/numpy/issues/20638
+    inverse_indices = inverse_indices.reshape(x.shape)
+    return UniqueAllResult(
+        Array._new(values),
+        Array._new(indices),
+        Array._new(inverse_indices),
+        Array._new(counts),
+    )
+
+
+def unique_counts(x: Array, /) -> UniqueCountsResult:
+    res = np.unique(
+        x._array,
+        return_counts=True,
+        return_index=False,
+        return_inverse=False,
+        equal_nan=False,
+    )
+
+    return UniqueCountsResult(*[Array._new(i) for i in res])
+
+
+def unique_inverse(x: Array, /) -> UniqueInverseResult:
+    """
+    Array API compatible wrapper for :py:func:`np.unique `.
+
+    See its docstring for more information.
+    """
+    values, inverse_indices = np.unique(
+        x._array,
+        return_counts=False,
+        return_index=False,
+        return_inverse=True,
+        equal_nan=False,
+    )
+    # np.unique() flattens inverse indices, but they need to share x's shape
+    # See https://github.com/numpy/numpy/issues/20638
+    inverse_indices = inverse_indices.reshape(x.shape)
+    return UniqueInverseResult(Array._new(values), Array._new(inverse_indices))
+
+
+def unique_values(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.unique `.
+
+    See its docstring for more information.
+    """
+    res = np.unique(
+        x._array,
+        return_counts=False,
+        return_index=False,
+        return_inverse=False,
+        equal_nan=False,
+    )
+    return Array._new(res)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.py
new file mode 100644
index 00000000..9b8cb044
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _real_numeric_dtypes
+
+import numpy as np
+
+
+# Note: the descending keyword argument is new in this function
+def argsort(
+    x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.argsort `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in argsort")
+    # Note: this keyword argument is different, and the default is different.
+    kind = "stable" if stable else "quicksort"
+    if not descending:
+        res = np.argsort(x._array, axis=axis, kind=kind)
+    else:
+        # As NumPy has no native descending sort, we imitate it here. Note that
+        # simply flipping the results of np.argsort(x._array, ...) would not
+        # respect the relative order like it would in native descending sorts.
+        res = np.flip(
+            np.argsort(np.flip(x._array, axis=axis), axis=axis, kind=kind),
+            axis=axis,
+        )
+        # Rely on flip()/argsort() to validate axis
+        normalised_axis = axis if axis >= 0 else x.ndim + axis
+        max_i = x.shape[normalised_axis] - 1
+        res = max_i - res
+    return Array._new(res)
+
+# Note: the descending keyword argument is new in this function
+def sort(
+    x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.sort `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in sort")
+    # Note: this keyword argument is different, and the default is different.
+    kind = "stable" if stable else "quicksort"
+    res = np.sort(x._array, axis=axis, kind=kind)
+    if descending:
+        res = np.flip(res, axis=axis)
+    return Array._new(res)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.py
new file mode 100644
index 00000000..98e31b51
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.py
@@ -0,0 +1,122 @@
+from __future__ import annotations
+
+from ._dtypes import (
+    _real_floating_dtypes,
+    _real_numeric_dtypes,
+    _numeric_dtypes,
+)
+from ._array_object import Array
+from ._dtypes import float32, float64, complex64, complex128
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+    from ._typing import Dtype
+
+import numpy as np
+
+
+def max(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    keepdims: bool = False,
+) -> Array:
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in max")
+    return Array._new(np.max(x._array, axis=axis, keepdims=keepdims))
+
+
+def mean(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    keepdims: bool = False,
+) -> Array:
+    if x.dtype not in _real_floating_dtypes:
+        raise TypeError("Only real floating-point dtypes are allowed in mean")
+    return Array._new(np.mean(x._array, axis=axis, keepdims=keepdims))
+
+
+def min(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    keepdims: bool = False,
+) -> Array:
+    if x.dtype not in _real_numeric_dtypes:
+        raise TypeError("Only real numeric dtypes are allowed in min")
+    return Array._new(np.min(x._array, axis=axis, keepdims=keepdims))
+
+
+def prod(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    dtype: Optional[Dtype] = None,
+    keepdims: bool = False,
+) -> Array:
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in prod")
+    # Note: sum() and prod() always upcast for dtype=None. `np.prod` does that
+    # for integers, but not for float32 or complex64, so we need to
+    # special-case it here
+    if dtype is None:
+        if x.dtype == float32:
+            dtype = float64
+        elif x.dtype == complex64:
+            dtype = complex128
+    return Array._new(np.prod(x._array, dtype=dtype, axis=axis, keepdims=keepdims))
+
+
+def std(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    correction: Union[int, float] = 0.0,
+    keepdims: bool = False,
+) -> Array:
+    # Note: the keyword argument correction is different here
+    if x.dtype not in _real_floating_dtypes:
+        raise TypeError("Only real floating-point dtypes are allowed in std")
+    return Array._new(np.std(x._array, axis=axis, ddof=correction, keepdims=keepdims))
+
+
+def sum(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    dtype: Optional[Dtype] = None,
+    keepdims: bool = False,
+) -> Array:
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError("Only numeric dtypes are allowed in sum")
+    # Note: sum() and prod() always upcast for dtype=None. `np.sum` does that
+    # for integers, but not for float32 or complex64, so we need to
+    # special-case it here
+    if dtype is None:
+        if x.dtype == float32:
+            dtype = float64
+        elif x.dtype == complex64:
+            dtype = complex128
+    return Array._new(np.sum(x._array, axis=axis, dtype=dtype, keepdims=keepdims))
+
+
+def var(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    correction: Union[int, float] = 0.0,
+    keepdims: bool = False,
+) -> Array:
+    # Note: the keyword argument correction is different here
+    if x.dtype not in _real_floating_dtypes:
+        raise TypeError("Only real floating-point dtypes are allowed in var")
+    return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_typing.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_typing.py
new file mode 100644
index 00000000..e63a375b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_typing.py
@@ -0,0 +1,76 @@
+"""
+This file defines the types for type annotations.
+
+These names aren't part of the module namespace, but they are used in the
+annotations in the function signatures. The functions in the module are only
+valid for inputs that match the given type annotations.
+"""
+
+from __future__ import annotations
+
+__all__ = [
+    "Array",
+    "Device",
+    "Dtype",
+    "SupportsDLPack",
+    "SupportsBufferProtocol",
+    "PyCapsule",
+]
+
+import sys
+
+from typing import (
+    Any,
+    Literal,
+    Sequence,
+    Type,
+    Union,
+    TypeVar,
+    Protocol,
+)
+
+from ._array_object import Array
+from numpy import (
+    dtype,
+    int8,
+    int16,
+    int32,
+    int64,
+    uint8,
+    uint16,
+    uint32,
+    uint64,
+    float32,
+    float64,
+)
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+class NestedSequence(Protocol[_T_co]):
+    def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
+    def __len__(self, /) -> int: ...
+
+Device = Literal["cpu"]
+
+Dtype = dtype[Union[
+    int8,
+    int16,
+    int32,
+    int64,
+    uint8,
+    uint16,
+    uint32,
+    uint64,
+    float32,
+    float64,
+]]
+
+if sys.version_info >= (3, 12):
+    from collections.abc import Buffer as SupportsBufferProtocol
+else:
+    SupportsBufferProtocol = Any
+
+PyCapsule = Any
+
+class SupportsDLPack(Protocol):
+    def __dlpack__(self, /, *, stream: None = ...) -> PyCapsule: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_utility_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_utility_functions.py
new file mode 100644
index 00000000..5ecb4bd9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_utility_functions.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+
+def all(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    keepdims: bool = False,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.all `.
+
+    See its docstring for more information.
+    """
+    return Array._new(np.asarray(np.all(x._array, axis=axis, keepdims=keepdims)))
+
+
+def any(
+    x: Array,
+    /,
+    *,
+    axis: Optional[Union[int, Tuple[int, ...]]] = None,
+    keepdims: bool = False,
+) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.any `.
+
+    See its docstring for more information.
+    """
+    return Array._new(np.asarray(np.any(x._array, axis=axis, keepdims=keepdims)))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/linalg.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/linalg.py
new file mode 100644
index 00000000..09af9dfc
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/linalg.py
@@ -0,0 +1,462 @@
+from __future__ import annotations
+
+from ._dtypes import (
+    _floating_dtypes,
+    _numeric_dtypes,
+    float32,
+    float64,
+    complex64,
+    complex128
+)
+from ._manipulation_functions import reshape
+from ._array_object import Array
+
+from ..core.numeric import normalize_axis_tuple
+
+from typing import TYPE_CHECKING
+if TYPE_CHECKING:
+    from ._typing import Literal, Optional, Sequence, Tuple, Union, Dtype
+
+from typing import NamedTuple
+
+import numpy.linalg
+import numpy as np
+
+class EighResult(NamedTuple):
+    eigenvalues: Array
+    eigenvectors: Array
+
+class QRResult(NamedTuple):
+    Q: Array
+    R: Array
+
+class SlogdetResult(NamedTuple):
+    sign: Array
+    logabsdet: Array
+
+class SVDResult(NamedTuple):
+    U: Array
+    S: Array
+    Vh: Array
+
+# Note: the inclusion of the upper keyword is different from
+# np.linalg.cholesky, which does not have it.
+def cholesky(x: Array, /, *, upper: bool = False) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.cholesky `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.cholesky.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in cholesky')
+    L = np.linalg.cholesky(x._array)
+    if upper:
+        return Array._new(L).mT
+    return Array._new(L)
+
+# Note: cross is the numpy top-level namespace, not np.linalg
+def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.cross `.
+
+    See its docstring for more information.
+    """
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError('Only numeric dtypes are allowed in cross')
+    # Note: this is different from np.cross(), which broadcasts
+    if x1.shape != x2.shape:
+        raise ValueError('x1 and x2 must have the same shape')
+    if x1.ndim == 0:
+        raise ValueError('cross() requires arrays of dimension at least 1')
+    # Note: this is different from np.cross(), which allows dimension 2
+    if x1.shape[axis] != 3:
+        raise ValueError('cross() dimension must equal 3')
+    return Array._new(np.cross(x1._array, x2._array, axis=axis))
+
+def det(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.det `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.det.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in det')
+    return Array._new(np.linalg.det(x._array))
+
+# Note: diagonal is the numpy top-level namespace, not np.linalg
+def diagonal(x: Array, /, *, offset: int = 0) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.diagonal `.
+
+    See its docstring for more information.
+    """
+    # Note: diagonal always operates on the last two axes, whereas np.diagonal
+    # operates on the first two axes by default
+    return Array._new(np.diagonal(x._array, offset=offset, axis1=-2, axis2=-1))
+
+
+def eigh(x: Array, /) -> EighResult:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.eigh `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.eigh.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in eigh')
+
+    # Note: the return type here is a namedtuple, which is different from
+    # np.eigh, which only returns a tuple.
+    return EighResult(*map(Array._new, np.linalg.eigh(x._array)))
+
+
+def eigvalsh(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.eigvalsh `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.eigvalsh.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in eigvalsh')
+
+    return Array._new(np.linalg.eigvalsh(x._array))
+
+def inv(x: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.inv `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.inv.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in inv')
+
+    return Array._new(np.linalg.inv(x._array))
+
+
+# Note: matmul is the numpy top-level namespace but not in np.linalg
+def matmul(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.matmul `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to numeric dtypes only is different from
+    # np.matmul.
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError('Only numeric dtypes are allowed in matmul')
+
+    return Array._new(np.matmul(x1._array, x2._array))
+
+
+# Note: the name here is different from norm(). The array API norm is split
+# into matrix_norm and vector_norm().
+
+# The type for ord should be Optional[Union[int, float, Literal[np.inf,
+# -np.inf, 'fro', 'nuc']]], but Literal does not support floating-point
+# literals.
+def matrix_norm(x: Array, /, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.norm `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.norm.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in matrix_norm')
+
+    return Array._new(np.linalg.norm(x._array, axis=(-2, -1), keepdims=keepdims, ord=ord))
+
+
+def matrix_power(x: Array, n: int, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.matrix_power `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.matrix_power.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed for the first argument of matrix_power')
+
+    # np.matrix_power already checks if n is an integer
+    return Array._new(np.linalg.matrix_power(x._array, n))
+
+# Note: the keyword argument name rtol is different from np.linalg.matrix_rank
+def matrix_rank(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.matrix_rank `.
+
+    See its docstring for more information.
+    """
+    # Note: this is different from np.linalg.matrix_rank, which supports 1
+    # dimensional arrays.
+    if x.ndim < 2:
+        raise np.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
+    S = np.linalg.svd(x._array, compute_uv=False)
+    if rtol is None:
+        tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * np.finfo(S.dtype).eps
+    else:
+        if isinstance(rtol, Array):
+            rtol = rtol._array
+        # Note: this is different from np.linalg.matrix_rank, which does not multiply
+        # the tolerance by the largest singular value.
+        tol = S.max(axis=-1, keepdims=True)*np.asarray(rtol)[..., np.newaxis]
+    return Array._new(np.count_nonzero(S > tol, axis=-1))
+
+
+# Note: this function is new in the array API spec. Unlike transpose, it only
+# transposes the last two axes.
+def matrix_transpose(x: Array, /) -> Array:
+    if x.ndim < 2:
+        raise ValueError("x must be at least 2-dimensional for matrix_transpose")
+    return Array._new(np.swapaxes(x._array, -1, -2))
+
+# Note: outer is the numpy top-level namespace, not np.linalg
+def outer(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.outer `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to numeric dtypes only is different from
+    # np.outer.
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError('Only numeric dtypes are allowed in outer')
+
+    # Note: the restriction to only 1-dim arrays is different from np.outer
+    if x1.ndim != 1 or x2.ndim != 1:
+        raise ValueError('The input arrays to outer must be 1-dimensional')
+
+    return Array._new(np.outer(x1._array, x2._array))
+
+# Note: the keyword argument name rtol is different from np.linalg.pinv
+def pinv(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.pinv `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.pinv.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in pinv')
+
+    # Note: this is different from np.linalg.pinv, which does not multiply the
+    # default tolerance by max(M, N).
+    if rtol is None:
+        rtol = max(x.shape[-2:]) * np.finfo(x.dtype).eps
+    return Array._new(np.linalg.pinv(x._array, rcond=rtol))
+
+def qr(x: Array, /, *, mode: Literal['reduced', 'complete'] = 'reduced') -> QRResult:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.qr `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.qr.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in qr')
+
+    # Note: the return type here is a namedtuple, which is different from
+    # np.linalg.qr, which only returns a tuple.
+    return QRResult(*map(Array._new, np.linalg.qr(x._array, mode=mode)))
+
+def slogdet(x: Array, /) -> SlogdetResult:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.slogdet `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.slogdet.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in slogdet')
+
+    # Note: the return type here is a namedtuple, which is different from
+    # np.linalg.slogdet, which only returns a tuple.
+    return SlogdetResult(*map(Array._new, np.linalg.slogdet(x._array)))
+
+# Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a
+# vector when it is exactly 1-dimensional. All other cases treat x2 as a stack
+# of matrices. The np.linalg.solve behavior of allowing stacks of both
+# matrices and vectors is ambiguous c.f.
+# https://github.com/numpy/numpy/issues/15349 and
+# https://github.com/data-apis/array-api/issues/285.
+
+# To workaround this, the below is the code from np.linalg.solve except
+# only calling solve1 in the exactly 1D case.
+def _solve(a, b):
+    from ..linalg.linalg import (_makearray, _assert_stacked_2d,
+                                 _assert_stacked_square, _commonType,
+                                 isComplexType, get_linalg_error_extobj,
+                                 _raise_linalgerror_singular)
+    from ..linalg import _umath_linalg
+
+    a, _ = _makearray(a)
+    _assert_stacked_2d(a)
+    _assert_stacked_square(a)
+    b, wrap = _makearray(b)
+    t, result_t = _commonType(a, b)
+
+    # This part is different from np.linalg.solve
+    if b.ndim == 1:
+        gufunc = _umath_linalg.solve1
+    else:
+        gufunc = _umath_linalg.solve
+
+    # This does nothing currently but is left in because it will be relevant
+    # when complex dtype support is added to the spec in 2022.
+    signature = 'DD->D' if isComplexType(t) else 'dd->d'
+    with np.errstate(call=_raise_linalgerror_singular, invalid='call',
+                     over='ignore', divide='ignore', under='ignore'):
+        r = gufunc(a, b, signature=signature)
+
+    return wrap(r.astype(result_t, copy=False))
+
+def solve(x1: Array, x2: Array, /) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.solve `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.solve.
+    if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in solve')
+
+    return Array._new(_solve(x1._array, x2._array))
+
+def svd(x: Array, /, *, full_matrices: bool = True) -> SVDResult:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.svd `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.svd.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in svd')
+
+    # Note: the return type here is a namedtuple, which is different from
+    # np.svd, which only returns a tuple.
+    return SVDResult(*map(Array._new, np.linalg.svd(x._array, full_matrices=full_matrices)))
+
+# Note: svdvals is not in NumPy (but it is in SciPy). It is equivalent to
+# np.linalg.svd(compute_uv=False).
+def svdvals(x: Array, /) -> Union[Array, Tuple[Array, ...]]:
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in svdvals')
+    return Array._new(np.linalg.svd(x._array, compute_uv=False))
+
+# Note: tensordot is the numpy top-level namespace but not in np.linalg
+
+# Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like.
+def tensordot(x1: Array, x2: Array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2) -> Array:
+    # Note: the restriction to numeric dtypes only is different from
+    # np.tensordot.
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError('Only numeric dtypes are allowed in tensordot')
+
+    return Array._new(np.tensordot(x1._array, x2._array, axes=axes))
+
+# Note: trace is the numpy top-level namespace, not np.linalg
+def trace(x: Array, /, *, offset: int = 0, dtype: Optional[Dtype] = None) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.trace `.
+
+    See its docstring for more information.
+    """
+    if x.dtype not in _numeric_dtypes:
+        raise TypeError('Only numeric dtypes are allowed in trace')
+
+    # Note: trace() works the same as sum() and prod() (see
+    # _statistical_functions.py)
+    if dtype is None:
+        if x.dtype == float32:
+            dtype = float64
+        elif x.dtype == complex64:
+            dtype = complex128
+    # Note: trace always operates on the last two axes, whereas np.trace
+    # operates on the first two axes by default
+    return Array._new(np.asarray(np.trace(x._array, offset=offset, axis1=-2, axis2=-1, dtype=dtype)))
+
+# Note: vecdot is not in NumPy
+def vecdot(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
+    if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+        raise TypeError('Only numeric dtypes are allowed in vecdot')
+    ndim = max(x1.ndim, x2.ndim)
+    x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape)
+    x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape)
+    if x1_shape[axis] != x2_shape[axis]:
+        raise ValueError("x1 and x2 must have the same size along the given axis")
+
+    x1_, x2_ = np.broadcast_arrays(x1._array, x2._array)
+    x1_ = np.moveaxis(x1_, axis, -1)
+    x2_ = np.moveaxis(x2_, axis, -1)
+
+    res = x1_[..., None, :] @ x2_[..., None]
+    return Array._new(res[..., 0, 0])
+
+
+# Note: the name here is different from norm(). The array API norm is split
+# into matrix_norm and vector_norm().
+
+# The type for ord should be Optional[Union[int, float, Literal[np.inf,
+# -np.inf]]] but Literal does not support floating-point literals.
+def vector_norm(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> Array:
+    """
+    Array API compatible wrapper for :py:func:`np.linalg.norm `.
+
+    See its docstring for more information.
+    """
+    # Note: the restriction to floating-point dtypes only is different from
+    # np.linalg.norm.
+    if x.dtype not in _floating_dtypes:
+        raise TypeError('Only floating-point dtypes are allowed in norm')
+
+    # np.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or
+    # when axis=None and the input is 2-D, so to force a vector norm, we make
+    # it so the input is 1-D (for axis=None), or reshape so that norm is done
+    # on a single dimension.
+    a = x._array
+    if axis is None:
+        # Note: np.linalg.norm() doesn't handle 0-D arrays
+        a = a.ravel()
+        _axis = 0
+    elif isinstance(axis, tuple):
+        # Note: The axis argument supports any number of axes, whereas
+        # np.linalg.norm() only supports a single axis for vector norm.
+        normalized_axis = normalize_axis_tuple(axis, x.ndim)
+        rest = tuple(i for i in range(a.ndim) if i not in normalized_axis)
+        newshape = axis + rest
+        a = np.transpose(a, newshape).reshape(
+            (np.prod([a.shape[i] for i in axis], dtype=int), *[a.shape[i] for i in rest]))
+        _axis = 0
+    else:
+        _axis = axis
+
+    res = Array._new(np.linalg.norm(a, axis=_axis, ord=ord))
+
+    if keepdims:
+        # We can't reuse np.linalg.norm(keepdims) because of the reshape hacks
+        # above to avoid matrix norm logic.
+        shape = list(x.shape)
+        _axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim)
+        for i in _axis:
+            shape[i] = 1
+        res = reshape(res, tuple(shape))
+
+    return res
+
+__all__ = ['cholesky', 'cross', 'det', 'diagonal', 'eigh', 'eigvalsh', 'inv', 'matmul', 'matrix_norm', 'matrix_power', 'matrix_rank', 'matrix_transpose', 'outer', 'pinv', 'qr', 'slogdet', 'solve', 'svd', 'svdvals', 'tensordot', 'trace', 'vecdot', 'vector_norm']
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/setup.py
new file mode 100644
index 00000000..c8bc2910
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/setup.py
@@ -0,0 +1,12 @@
+def configuration(parent_package="", top_path=None):
+    from numpy.distutils.misc_util import Configuration
+
+    config = Configuration("array_api", parent_package, top_path)
+    config.add_subpackage("tests")
+    return config
+
+
+if __name__ == "__main__":
+    from numpy.distutils.core import setup
+
+    setup(configuration=configuration)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/__init__.py
new file mode 100644
index 00000000..536062e3
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/__init__.py
@@ -0,0 +1,7 @@
+"""
+Tests for the array API namespace.
+
+Note, full compliance with the array API can be tested with the official array API test
+suite https://github.com/data-apis/array-api-tests. This test suite primarily
+focuses on those things that are not tested by the official test suite.
+"""
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.py
new file mode 100644
index 00000000..0feb72c4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.py
@@ -0,0 +1,395 @@
+import operator
+
+from numpy.testing import assert_raises, suppress_warnings
+import numpy as np
+import pytest
+
+from .. import ones, asarray, reshape, result_type, all, equal
+from .._array_object import Array
+from .._dtypes import (
+    _all_dtypes,
+    _boolean_dtypes,
+    _real_floating_dtypes,
+    _floating_dtypes,
+    _complex_floating_dtypes,
+    _integer_dtypes,
+    _integer_or_boolean_dtypes,
+    _real_numeric_dtypes,
+    _numeric_dtypes,
+    int8,
+    int16,
+    int32,
+    int64,
+    uint64,
+    bool as bool_,
+)
+
+
+def test_validate_index():
+    # The indexing tests in the official array API test suite test that the
+    # array object correctly handles the subset of indices that are required
+    # by the spec. But the NumPy array API implementation specifically
+    # disallows any index not required by the spec, via Array._validate_index.
+    # This test focuses on testing that non-valid indices are correctly
+    # rejected. See
+    # https://data-apis.org/array-api/latest/API_specification/indexing.html
+    # and the docstring of Array._validate_index for the exact indexing
+    # behavior that should be allowed. This does not test indices that are
+    # already invalid in NumPy itself because Array will generally just pass
+    # such indices directly to the underlying np.ndarray.
+
+    a = ones((3, 4))
+
+    # Out of bounds slices are not allowed
+    assert_raises(IndexError, lambda: a[:4])
+    assert_raises(IndexError, lambda: a[:-4])
+    assert_raises(IndexError, lambda: a[:3:-1])
+    assert_raises(IndexError, lambda: a[:-5:-1])
+    assert_raises(IndexError, lambda: a[4:])
+    assert_raises(IndexError, lambda: a[-4:])
+    assert_raises(IndexError, lambda: a[4::-1])
+    assert_raises(IndexError, lambda: a[-4::-1])
+
+    assert_raises(IndexError, lambda: a[...,:5])
+    assert_raises(IndexError, lambda: a[...,:-5])
+    assert_raises(IndexError, lambda: a[...,:5:-1])
+    assert_raises(IndexError, lambda: a[...,:-6:-1])
+    assert_raises(IndexError, lambda: a[...,5:])
+    assert_raises(IndexError, lambda: a[...,-5:])
+    assert_raises(IndexError, lambda: a[...,5::-1])
+    assert_raises(IndexError, lambda: a[...,-5::-1])
+
+    # Boolean indices cannot be part of a larger tuple index
+    assert_raises(IndexError, lambda: a[a[:,0]==1,0])
+    assert_raises(IndexError, lambda: a[a[:,0]==1,...])
+    assert_raises(IndexError, lambda: a[..., a[0]==1])
+    assert_raises(IndexError, lambda: a[[True, True, True]])
+    assert_raises(IndexError, lambda: a[(True, True, True),])
+
+    # Integer array indices are not allowed (except for 0-D)
+    idx = asarray([[0, 1]])
+    assert_raises(IndexError, lambda: a[idx])
+    assert_raises(IndexError, lambda: a[idx,])
+    assert_raises(IndexError, lambda: a[[0, 1]])
+    assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
+    assert_raises(IndexError, lambda: a[[0, 1]])
+    assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
+
+    # Multiaxis indices must contain exactly as many indices as dimensions
+    assert_raises(IndexError, lambda: a[()])
+    assert_raises(IndexError, lambda: a[0,])
+    assert_raises(IndexError, lambda: a[0])
+    assert_raises(IndexError, lambda: a[:])
+
+def test_operators():
+    # For every operator, we test that it works for the required type
+    # combinations and raises TypeError otherwise
+    binary_op_dtypes = {
+        "__add__": "numeric",
+        "__and__": "integer_or_boolean",
+        "__eq__": "all",
+        "__floordiv__": "real numeric",
+        "__ge__": "real numeric",
+        "__gt__": "real numeric",
+        "__le__": "real numeric",
+        "__lshift__": "integer",
+        "__lt__": "real numeric",
+        "__mod__": "real numeric",
+        "__mul__": "numeric",
+        "__ne__": "all",
+        "__or__": "integer_or_boolean",
+        "__pow__": "numeric",
+        "__rshift__": "integer",
+        "__sub__": "numeric",
+        "__truediv__": "floating",
+        "__xor__": "integer_or_boolean",
+    }
+    # Recompute each time because of in-place ops
+    def _array_vals():
+        for d in _integer_dtypes:
+            yield asarray(1, dtype=d)
+        for d in _boolean_dtypes:
+            yield asarray(False, dtype=d)
+        for d in _floating_dtypes:
+            yield asarray(1.0, dtype=d)
+
+
+    BIG_INT = int(1e30)
+    for op, dtypes in binary_op_dtypes.items():
+        ops = [op]
+        if op not in ["__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__"]:
+            rop = "__r" + op[2:]
+            iop = "__i" + op[2:]
+            ops += [rop, iop]
+        for s in [1, 1.0, 1j, BIG_INT, False]:
+            for _op in ops:
+                for a in _array_vals():
+                    # Test array op scalar. From the spec, the following combinations
+                    # are supported:
+
+                    # - Python bool for a bool array dtype,
+                    # - a Python int within the bounds of the given dtype for integer array dtypes,
+                    # - a Python int or float for real floating-point array dtypes
+                    # - a Python int, float, or complex for complex floating-point array dtypes
+
+                    if ((dtypes == "all"
+                         or dtypes == "numeric" and a.dtype in _numeric_dtypes
+                         or dtypes == "real numeric" and a.dtype in _real_numeric_dtypes
+                         or dtypes == "integer" and a.dtype in _integer_dtypes
+                         or dtypes == "integer_or_boolean" and a.dtype in _integer_or_boolean_dtypes
+                         or dtypes == "boolean" and a.dtype in _boolean_dtypes
+                         or dtypes == "floating" and a.dtype in _floating_dtypes
+                        )
+                        # bool is a subtype of int, which is why we avoid
+                        # isinstance here.
+                        and (a.dtype in _boolean_dtypes and type(s) == bool
+                             or a.dtype in _integer_dtypes and type(s) == int
+                             or a.dtype in _real_floating_dtypes and type(s) in [float, int]
+                             or a.dtype in _complex_floating_dtypes and type(s) in [complex, float, int]
+                        )):
+                        if a.dtype in _integer_dtypes and s == BIG_INT:
+                            assert_raises(OverflowError, lambda: getattr(a, _op)(s))
+                        else:
+                            # Only test for no error
+                            with suppress_warnings() as sup:
+                                # ignore warnings from pow(BIG_INT)
+                                sup.filter(RuntimeWarning,
+                                           "invalid value encountered in power")
+                                getattr(a, _op)(s)
+                    else:
+                        assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+                # Test array op array.
+                for _op in ops:
+                    for x in _array_vals():
+                        for y in _array_vals():
+                            # See the promotion table in NEP 47 or the array
+                            # API spec page on type promotion. Mixed kind
+                            # promotion is not defined.
+                            if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+                                or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+                                or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+                                or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+                                or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes
+                                or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes
+                                or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+                                or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+                                ):
+                                assert_raises(TypeError, lambda: getattr(x, _op)(y))
+                            # Ensure in-place operators only promote to the same dtype as the left operand.
+                            elif (
+                                _op.startswith("__i")
+                                and result_type(x.dtype, y.dtype) != x.dtype
+                            ):
+                                assert_raises(TypeError, lambda: getattr(x, _op)(y))
+                            # Ensure only those dtypes that are required for every operator are allowed.
+                            elif (dtypes == "all" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+                                                      or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+                                or (dtypes == "real numeric" and x.dtype in _real_numeric_dtypes and y.dtype in _real_numeric_dtypes)
+                                or (dtypes == "numeric" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+                                or dtypes == "integer" and x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
+                                or dtypes == "integer_or_boolean" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
+                                                                       or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)
+                                or dtypes == "boolean" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+                                or dtypes == "floating" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes
+                            ):
+                                getattr(x, _op)(y)
+                            else:
+                                assert_raises(TypeError, lambda: getattr(x, _op)(y))
+
+    unary_op_dtypes = {
+        "__abs__": "numeric",
+        "__invert__": "integer_or_boolean",
+        "__neg__": "numeric",
+        "__pos__": "numeric",
+    }
+    for op, dtypes in unary_op_dtypes.items():
+        for a in _array_vals():
+            if (
+                dtypes == "numeric"
+                and a.dtype in _numeric_dtypes
+                or dtypes == "integer_or_boolean"
+                and a.dtype in _integer_or_boolean_dtypes
+            ):
+                # Only test for no error
+                getattr(a, op)()
+            else:
+                assert_raises(TypeError, lambda: getattr(a, op)())
+
+    # Finally, matmul() must be tested separately, because it works a bit
+    # different from the other operations.
+    def _matmul_array_vals():
+        for a in _array_vals():
+            yield a
+        for d in _all_dtypes:
+            yield ones((3, 4), dtype=d)
+            yield ones((4, 2), dtype=d)
+            yield ones((4, 4), dtype=d)
+
+    # Scalars always error
+    for _op in ["__matmul__", "__rmatmul__", "__imatmul__"]:
+        for s in [1, 1.0, False]:
+            for a in _matmul_array_vals():
+                if (type(s) in [float, int] and a.dtype in _floating_dtypes
+                    or type(s) == int and a.dtype in _integer_dtypes):
+                    # Type promotion is valid, but @ is not allowed on 0-D
+                    # inputs, so the error is a ValueError
+                    assert_raises(ValueError, lambda: getattr(a, _op)(s))
+                else:
+                    assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+    for x in _matmul_array_vals():
+        for y in _matmul_array_vals():
+            if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+                or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+                or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+                or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+                or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+                or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+                or x.dtype in _boolean_dtypes
+                or y.dtype in _boolean_dtypes
+                ):
+                assert_raises(TypeError, lambda: x.__matmul__(y))
+                assert_raises(TypeError, lambda: y.__rmatmul__(x))
+                assert_raises(TypeError, lambda: x.__imatmul__(y))
+            elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:
+                assert_raises(ValueError, lambda: x.__matmul__(y))
+                assert_raises(ValueError, lambda: y.__rmatmul__(x))
+                if result_type(x.dtype, y.dtype) != x.dtype:
+                    assert_raises(TypeError, lambda: x.__imatmul__(y))
+                else:
+                    assert_raises(ValueError, lambda: x.__imatmul__(y))
+            else:
+                x.__matmul__(y)
+                y.__rmatmul__(x)
+                if result_type(x.dtype, y.dtype) != x.dtype:
+                    assert_raises(TypeError, lambda: x.__imatmul__(y))
+                elif y.shape[0] != y.shape[1]:
+                    # This one fails because x @ y has a different shape from x
+                    assert_raises(ValueError, lambda: x.__imatmul__(y))
+                else:
+                    x.__imatmul__(y)
+
+
+def test_python_scalar_construtors():
+    b = asarray(False)
+    i = asarray(0)
+    f = asarray(0.0)
+    c = asarray(0j)
+
+    assert bool(b) == False
+    assert int(i) == 0
+    assert float(f) == 0.0
+    assert operator.index(i) == 0
+
+    # bool/int/float/complex should only be allowed on 0-D arrays.
+    assert_raises(TypeError, lambda: bool(asarray([False])))
+    assert_raises(TypeError, lambda: int(asarray([0])))
+    assert_raises(TypeError, lambda: float(asarray([0.0])))
+    assert_raises(TypeError, lambda: complex(asarray([0j])))
+    assert_raises(TypeError, lambda: operator.index(asarray([0])))
+
+    # bool should work on all types of arrays
+    assert bool(b) is bool(i) is bool(f) is bool(c) is False
+
+    # int should fail on complex arrays
+    assert int(b) == int(i) == int(f) == 0
+    assert_raises(TypeError, lambda: int(c))
+
+    # float should fail on complex arrays
+    assert float(b) == float(i) == float(f) == 0.0
+    assert_raises(TypeError, lambda: float(c))
+
+    # complex should work on all types of arrays
+    assert complex(b) == complex(i) == complex(f) == complex(c) == 0j
+
+    # index should only work on integer arrays
+    assert operator.index(i) == 0
+    assert_raises(TypeError, lambda: operator.index(b))
+    assert_raises(TypeError, lambda: operator.index(f))
+    assert_raises(TypeError, lambda: operator.index(c))
+
+
+def test_device_property():
+    a = ones((3, 4))
+    assert a.device == 'cpu'
+
+    assert all(equal(a.to_device('cpu'), a))
+    assert_raises(ValueError, lambda: a.to_device('gpu'))
+
+    assert all(equal(asarray(a, device='cpu'), a))
+    assert_raises(ValueError, lambda: asarray(a, device='gpu'))
+
+def test_array_properties():
+    a = ones((1, 2, 3))
+    b = ones((2, 3))
+    assert_raises(ValueError, lambda: a.T)
+
+    assert isinstance(b.T, Array)
+    assert b.T.shape == (3, 2)
+
+    assert isinstance(a.mT, Array)
+    assert a.mT.shape == (1, 3, 2)
+    assert isinstance(b.mT, Array)
+    assert b.mT.shape == (3, 2)
+
+def test___array__():
+    a = ones((2, 3), dtype=int16)
+    assert np.asarray(a) is a._array
+    b = np.asarray(a, dtype=np.float64)
+    assert np.all(np.equal(b, np.ones((2, 3), dtype=np.float64)))
+    assert b.dtype == np.float64
+
+def test_allow_newaxis():
+    a = ones(5)
+    indexed_a = a[None, :]
+    assert indexed_a.shape == (1, 5)
+
+def test_disallow_flat_indexing_with_newaxis():
+    a = ones((3, 3, 3))
+    with pytest.raises(IndexError):
+        a[None, 0, 0]
+
+def test_disallow_mask_with_newaxis():
+    a = ones((3, 3, 3))
+    with pytest.raises(IndexError):
+        a[None, asarray(True)]
+
+@pytest.mark.parametrize("shape", [(), (5,), (3, 3, 3)])
+@pytest.mark.parametrize("index", ["string", False, True])
+def test_error_on_invalid_index(shape, index):
+    a = ones(shape)
+    with pytest.raises(IndexError):
+        a[index]
+
+def test_mask_0d_array_without_errors():
+    a = ones(())
+    a[asarray(True)]
+
+@pytest.mark.parametrize(
+    "i", [slice(5), slice(5, 0), asarray(True), asarray([0, 1])]
+)
+def test_error_on_invalid_index_with_ellipsis(i):
+    a = ones((3, 3, 3))
+    with pytest.raises(IndexError):
+        a[..., i]
+    with pytest.raises(IndexError):
+        a[i, ...]
+
+def test_array_keys_use_private_array():
+    """
+    Indexing operations convert array keys before indexing the internal array
+
+    Fails when array_api array keys are not converted into NumPy-proper arrays
+    in __getitem__(). This is achieved by passing array_api arrays with 0-sized
+    dimensions, which NumPy-proper treats erroneously - not sure why!
+
+    TODO: Find and use appropriate __setitem__() case.
+    """
+    a = ones((0, 0), dtype=bool_)
+    assert a[a].shape == (0,)
+
+    a = ones((0,), dtype=bool_)
+    key = ones((0, 0), dtype=bool_)
+    with pytest.raises(IndexError):
+        a[key]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.py
new file mode 100644
index 00000000..be9eaa38
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.py
@@ -0,0 +1,142 @@
+from numpy.testing import assert_raises
+import numpy as np
+
+from .. import all
+from .._creation_functions import (
+    asarray,
+    arange,
+    empty,
+    empty_like,
+    eye,
+    full,
+    full_like,
+    linspace,
+    meshgrid,
+    ones,
+    ones_like,
+    zeros,
+    zeros_like,
+)
+from .._dtypes import float32, float64
+from .._array_object import Array
+
+
+def test_asarray_errors():
+    # Test various protections against incorrect usage
+    assert_raises(TypeError, lambda: Array([1]))
+    assert_raises(TypeError, lambda: asarray(["a"]))
+    assert_raises(ValueError, lambda: asarray([1.0], dtype=np.float16))
+    assert_raises(OverflowError, lambda: asarray(2**100))
+    # Preferably this would be OverflowError
+    # assert_raises(OverflowError, lambda: asarray([2**100]))
+    assert_raises(TypeError, lambda: asarray([2**100]))
+    asarray([1], device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: asarray([1], device="gpu"))
+
+    assert_raises(ValueError, lambda: asarray([1], dtype=int))
+    assert_raises(ValueError, lambda: asarray([1], dtype="i"))
+
+
+def test_asarray_copy():
+    a = asarray([1])
+    b = asarray(a, copy=True)
+    a[0] = 0
+    assert all(b[0] == 1)
+    assert all(a[0] == 0)
+    a = asarray([1])
+    b = asarray(a, copy=np._CopyMode.ALWAYS)
+    a[0] = 0
+    assert all(b[0] == 1)
+    assert all(a[0] == 0)
+    a = asarray([1])
+    b = asarray(a, copy=np._CopyMode.NEVER)
+    a[0] = 0
+    assert all(b[0] == 0)
+    assert_raises(NotImplementedError, lambda: asarray(a, copy=False))
+    assert_raises(NotImplementedError,
+                  lambda: asarray(a, copy=np._CopyMode.IF_NEEDED))
+
+
+def test_arange_errors():
+    arange(1, device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: arange(1, device="gpu"))
+    assert_raises(ValueError, lambda: arange(1, dtype=int))
+    assert_raises(ValueError, lambda: arange(1, dtype="i"))
+
+
+def test_empty_errors():
+    empty((1,), device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: empty((1,), device="gpu"))
+    assert_raises(ValueError, lambda: empty((1,), dtype=int))
+    assert_raises(ValueError, lambda: empty((1,), dtype="i"))
+
+
+def test_empty_like_errors():
+    empty_like(asarray(1), device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: empty_like(asarray(1), device="gpu"))
+    assert_raises(ValueError, lambda: empty_like(asarray(1), dtype=int))
+    assert_raises(ValueError, lambda: empty_like(asarray(1), dtype="i"))
+
+
+def test_eye_errors():
+    eye(1, device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: eye(1, device="gpu"))
+    assert_raises(ValueError, lambda: eye(1, dtype=int))
+    assert_raises(ValueError, lambda: eye(1, dtype="i"))
+
+
+def test_full_errors():
+    full((1,), 0, device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: full((1,), 0, device="gpu"))
+    assert_raises(ValueError, lambda: full((1,), 0, dtype=int))
+    assert_raises(ValueError, lambda: full((1,), 0, dtype="i"))
+
+
+def test_full_like_errors():
+    full_like(asarray(1), 0, device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: full_like(asarray(1), 0, device="gpu"))
+    assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype=int))
+    assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype="i"))
+
+
+def test_linspace_errors():
+    linspace(0, 1, 10, device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: linspace(0, 1, 10, device="gpu"))
+    assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype=float))
+    assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype="f"))
+
+
+def test_ones_errors():
+    ones((1,), device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: ones((1,), device="gpu"))
+    assert_raises(ValueError, lambda: ones((1,), dtype=int))
+    assert_raises(ValueError, lambda: ones((1,), dtype="i"))
+
+
+def test_ones_like_errors():
+    ones_like(asarray(1), device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: ones_like(asarray(1), device="gpu"))
+    assert_raises(ValueError, lambda: ones_like(asarray(1), dtype=int))
+    assert_raises(ValueError, lambda: ones_like(asarray(1), dtype="i"))
+
+
+def test_zeros_errors():
+    zeros((1,), device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: zeros((1,), device="gpu"))
+    assert_raises(ValueError, lambda: zeros((1,), dtype=int))
+    assert_raises(ValueError, lambda: zeros((1,), dtype="i"))
+
+
+def test_zeros_like_errors():
+    zeros_like(asarray(1), device="cpu")  # Doesn't error
+    assert_raises(ValueError, lambda: zeros_like(asarray(1), device="gpu"))
+    assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype=int))
+    assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype="i"))
+
+def test_meshgrid_dtype_errors():
+    # Doesn't raise
+    meshgrid()
+    meshgrid(asarray([1.], dtype=float32))
+    meshgrid(asarray([1.], dtype=float32), asarray([1.], dtype=float32))
+
+    assert_raises(ValueError, lambda: meshgrid(asarray([1.], dtype=float32), asarray([1.], dtype=float64)))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.py
new file mode 100644
index 00000000..61d56ca4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.py
@@ -0,0 +1,31 @@
+import pytest
+
+from numpy.testing import assert_raises
+from numpy import array_api as xp
+import numpy as np
+
+@pytest.mark.parametrize(
+    "from_, to, expected",
+    [
+        (xp.int8, xp.int16, True),
+        (xp.int16, xp.int8, False),
+        (xp.bool, xp.int8, False),
+        (xp.asarray(0, dtype=xp.uint8), xp.int8, False),
+    ],
+)
+def test_can_cast(from_, to, expected):
+    """
+    can_cast() returns correct result
+    """
+    assert xp.can_cast(from_, to) == expected
+
+def test_isdtype_strictness():
+    assert_raises(TypeError, lambda: xp.isdtype(xp.float64, 64))
+    assert_raises(ValueError, lambda: xp.isdtype(xp.float64, 'f8'))
+
+    assert_raises(TypeError, lambda: xp.isdtype(xp.float64, (('integral',),)))
+    assert_raises(TypeError, lambda: xp.isdtype(xp.float64, np.object_))
+
+    # TODO: These will require https://github.com/numpy/numpy/issues/23883
+    # assert_raises(TypeError, lambda: xp.isdtype(xp.float64, None))
+    # assert_raises(TypeError, lambda: xp.isdtype(xp.float64, np.float64))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.py
new file mode 100644
index 00000000..1228d0af
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.py
@@ -0,0 +1,114 @@
+from inspect import getfullargspec
+
+from numpy.testing import assert_raises
+
+from .. import asarray, _elementwise_functions
+from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
+from .._dtypes import (
+    _dtype_categories,
+    _boolean_dtypes,
+    _floating_dtypes,
+    _integer_dtypes,
+)
+
+
+def nargs(func):
+    return len(getfullargspec(func).args)
+
+
+def test_function_types():
+    # Test that every function accepts only the required input types. We only
+    # test the negative cases here (error). The positive cases are tested in
+    # the array API test suite.
+
+    elementwise_function_input_types = {
+        "abs": "numeric",
+        "acos": "floating-point",
+        "acosh": "floating-point",
+        "add": "numeric",
+        "asin": "floating-point",
+        "asinh": "floating-point",
+        "atan": "floating-point",
+        "atan2": "real floating-point",
+        "atanh": "floating-point",
+        "bitwise_and": "integer or boolean",
+        "bitwise_invert": "integer or boolean",
+        "bitwise_left_shift": "integer",
+        "bitwise_or": "integer or boolean",
+        "bitwise_right_shift": "integer",
+        "bitwise_xor": "integer or boolean",
+        "ceil": "real numeric",
+        "conj": "complex floating-point",
+        "cos": "floating-point",
+        "cosh": "floating-point",
+        "divide": "floating-point",
+        "equal": "all",
+        "exp": "floating-point",
+        "expm1": "floating-point",
+        "floor": "real numeric",
+        "floor_divide": "real numeric",
+        "greater": "real numeric",
+        "greater_equal": "real numeric",
+        "imag": "complex floating-point",
+        "isfinite": "numeric",
+        "isinf": "numeric",
+        "isnan": "numeric",
+        "less": "real numeric",
+        "less_equal": "real numeric",
+        "log": "floating-point",
+        "logaddexp": "real floating-point",
+        "log10": "floating-point",
+        "log1p": "floating-point",
+        "log2": "floating-point",
+        "logical_and": "boolean",
+        "logical_not": "boolean",
+        "logical_or": "boolean",
+        "logical_xor": "boolean",
+        "multiply": "numeric",
+        "negative": "numeric",
+        "not_equal": "all",
+        "positive": "numeric",
+        "pow": "numeric",
+        "real": "complex floating-point",
+        "remainder": "real numeric",
+        "round": "numeric",
+        "sign": "numeric",
+        "sin": "floating-point",
+        "sinh": "floating-point",
+        "sqrt": "floating-point",
+        "square": "numeric",
+        "subtract": "numeric",
+        "tan": "floating-point",
+        "tanh": "floating-point",
+        "trunc": "real numeric",
+    }
+
+    def _array_vals():
+        for d in _integer_dtypes:
+            yield asarray(1, dtype=d)
+        for d in _boolean_dtypes:
+            yield asarray(False, dtype=d)
+        for d in _floating_dtypes:
+            yield asarray(1.0, dtype=d)
+
+    for x in _array_vals():
+        for func_name, types in elementwise_function_input_types.items():
+            dtypes = _dtype_categories[types]
+            func = getattr(_elementwise_functions, func_name)
+            if nargs(func) == 2:
+                for y in _array_vals():
+                    if x.dtype not in dtypes or y.dtype not in dtypes:
+                        assert_raises(TypeError, lambda: func(x, y))
+            else:
+                if x.dtype not in dtypes:
+                    assert_raises(TypeError, lambda: func(x))
+
+
+def test_bitwise_shift_error():
+    # bitwise shift functions should raise when the second argument is negative
+    assert_raises(
+        ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
+    )
+    assert_raises(
+        ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
+    )
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_indexing_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_indexing_functions.py
new file mode 100644
index 00000000..9e05c638
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_indexing_functions.py
@@ -0,0 +1,24 @@
+import pytest
+
+from numpy import array_api as xp
+
+
+@pytest.mark.parametrize(
+    "x, indices, axis, expected",
+    [
+        ([2, 3], [1, 1, 0], 0,  [3, 3, 2]),
+        ([2, 3], [1, 1, 0], -1, [3, 3, 2]),
+        ([[2, 3]], [1], -1, [[3]]),
+        ([[2, 3]], [0, 0], 0, [[2, 3], [2, 3]]),
+    ],
+)
+def test_take_function(x, indices, axis, expected):
+    """
+    Indices respect relative order of a descending stable-sort
+
+    See https://github.com/numpy/numpy/issues/20778
+    """
+    x = xp.asarray(x)
+    indices = xp.asarray(indices)
+    out = xp.take(x, indices, axis=axis)
+    assert xp.all(out == xp.asarray(expected))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_manipulation_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_manipulation_functions.py
new file mode 100644
index 00000000..aec57c38
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_manipulation_functions.py
@@ -0,0 +1,37 @@
+from numpy.testing import assert_raises
+import numpy as np
+
+from .. import all
+from .._creation_functions import asarray
+from .._dtypes import float64, int8
+from .._manipulation_functions import (
+        concat,
+        reshape,
+        stack
+)
+
+
+def test_concat_errors():
+    assert_raises(TypeError, lambda: concat((1, 1), axis=None))
+    assert_raises(TypeError, lambda: concat([asarray([1], dtype=int8),
+                                             asarray([1], dtype=float64)]))
+
+
+def test_stack_errors():
+    assert_raises(TypeError, lambda: stack([asarray([1, 1], dtype=int8),
+                                            asarray([2, 2], dtype=float64)]))
+
+
+def test_reshape_copy():
+    a = asarray(np.ones((2, 3)))
+    b = reshape(a, (3, 2), copy=True)
+    assert not np.shares_memory(a._array, b._array)
+    
+    a = asarray(np.ones((2, 3)))
+    b = reshape(a, (3, 2), copy=False)
+    assert np.shares_memory(a._array, b._array)
+
+    a = asarray(np.ones((2, 3)).T)
+    b = reshape(a, (3, 2), copy=True)
+    assert_raises(AttributeError, lambda: reshape(a, (2, 3), copy=False))
+
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.py
new file mode 100644
index 00000000..b8eb65d4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.py
@@ -0,0 +1,19 @@
+import pytest
+from hypothesis import given
+from hypothesis.extra.array_api import make_strategies_namespace
+
+from numpy import array_api as xp
+
+xps = make_strategies_namespace(xp)
+
+
+@pytest.mark.parametrize("func", [xp.unique_all, xp.unique_inverse])
+@given(xps.arrays(dtype=xps.scalar_dtypes(), shape=xps.array_shapes()))
+def test_inverse_indices_shape(func, x):
+    """
+    Inverse indices share shape of input array
+
+    See https://github.com/numpy/numpy/issues/20638
+    """
+    out = func(x)
+    assert out.inverse_indices.shape == x.shape
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.py
new file mode 100644
index 00000000..9848bbfe
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.py
@@ -0,0 +1,23 @@
+import pytest
+
+from numpy import array_api as xp
+
+
+@pytest.mark.parametrize(
+    "obj, axis, expected",
+    [
+        ([0, 0], -1, [0, 1]),
+        ([0, 1, 0], -1, [1, 0, 2]),
+        ([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
+        ([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
+    ],
+)
+def test_stable_desc_argsort(obj, axis, expected):
+    """
+    Indices respect relative order of a descending stable-sort
+
+    See https://github.com/numpy/numpy/issues/20778
+    """
+    x = xp.asarray(obj)
+    out = xp.argsort(x, axis=axis, stable=True, descending=True)
+    assert xp.all(out == xp.asarray(expected))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.py
new file mode 100644
index 00000000..0dd100d1
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.py
@@ -0,0 +1,27 @@
+from typing import Callable
+
+import pytest
+
+from numpy import array_api as xp
+
+
+def p(func: Callable, *args, **kwargs):
+    f_sig = ", ".join(
+        [str(a) for a in args] + [f"{k}={v}" for k, v in kwargs.items()]
+    )
+    id_ = f"{func.__name__}({f_sig})"
+    return pytest.param(func, args, kwargs, id=id_)
+
+
+@pytest.mark.parametrize(
+    "func, args, kwargs",
+    [
+        p(xp.can_cast, 42, xp.int8),
+        p(xp.can_cast, xp.int8, 42),
+        p(xp.result_type, 42),
+    ],
+)
+def test_raises_on_invalid_types(func, args, kwargs):
+    """Function raises TypeError when passed invalidly-typed inputs"""
+    with pytest.raises(TypeError):
+        func(*args, **kwargs)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/compat/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/__init__.py
new file mode 100644
index 00000000..504f8b00
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/__init__.py
@@ -0,0 +1,19 @@
+"""
+Compatibility module.
+
+This module contains duplicated code from Python itself or 3rd party
+extensions, which may be included for the following reasons:
+
+  * compatibility
+  * we may only need a small subset of the copied library/module
+
+"""
+
+from .._utils import _inspect
+from .._utils._inspect import getargspec, formatargspec
+from . import py3k
+from .py3k import *
+
+__all__ = []
+__all__.extend(_inspect.__all__)
+__all__.extend(py3k.__all__)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/compat/py3k.py b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/py3k.py
new file mode 100644
index 00000000..d02c9f8f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/py3k.py
@@ -0,0 +1,145 @@
+"""
+Python 3.X compatibility tools.
+
+While this file was originally intended for Python 2 -> 3 transition,
+it is now used to create a compatibility layer between different
+minor versions of Python 3.
+
+While the active version of numpy may not support a given version of python, we
+allow downstream libraries to continue to use these shims for forward
+compatibility with numpy while they transition their code to newer versions of
+Python.
+"""
+__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
+           'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
+           'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
+           'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
+           'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
+
+import sys
+import os
+from pathlib import Path
+import io
+try:
+    import pickle5 as pickle
+except ImportError:
+    import pickle
+
+long = int
+integer_types = (int,)
+basestring = str
+unicode = str
+bytes = bytes
+
+def asunicode(s):
+    if isinstance(s, bytes):
+        return s.decode('latin1')
+    return str(s)
+
+def asbytes(s):
+    if isinstance(s, bytes):
+        return s
+    return str(s).encode('latin1')
+
+def asstr(s):
+    if isinstance(s, bytes):
+        return s.decode('latin1')
+    return str(s)
+
+def isfileobj(f):
+    if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)):
+        return False
+    try:
+        # BufferedReader/Writer may raise OSError when
+        # fetching `fileno()` (e.g. when wrapping BytesIO).
+        f.fileno()
+        return True
+    except OSError:
+        return False
+
+def open_latin1(filename, mode='r'):
+    return open(filename, mode=mode, encoding='iso-8859-1')
+
+def sixu(s):
+    return s
+
+strchar = 'U'
+
+def getexception():
+    return sys.exc_info()[1]
+
+def asbytes_nested(x):
+    if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+        return [asbytes_nested(y) for y in x]
+    else:
+        return asbytes(x)
+
+def asunicode_nested(x):
+    if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+        return [asunicode_nested(y) for y in x]
+    else:
+        return asunicode(x)
+
+def is_pathlib_path(obj):
+    """
+    Check whether obj is a `pathlib.Path` object.
+
+    Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
+    """
+    return isinstance(obj, Path)
+
+# from Python 3.7
+class contextlib_nullcontext:
+    """Context manager that does no additional processing.
+
+    Used as a stand-in for a normal context manager, when a particular
+    block of code is only sometimes used with a normal context manager:
+
+    cm = optional_cm if condition else nullcontext()
+    with cm:
+        # Perform operation, using optional_cm if condition is True
+
+    .. note::
+        Prefer using `contextlib.nullcontext` instead of this context manager.
+    """
+
+    def __init__(self, enter_result=None):
+        self.enter_result = enter_result
+
+    def __enter__(self):
+        return self.enter_result
+
+    def __exit__(self, *excinfo):
+        pass
+
+
+def npy_load_module(name, fn, info=None):
+    """
+    Load a module. Uses ``load_module`` which will be deprecated in python
+    3.12. An alternative that uses ``exec_module`` is in
+    numpy.distutils.misc_util.exec_mod_from_location
+
+    .. versionadded:: 1.11.2
+
+    Parameters
+    ----------
+    name : str
+        Full module name.
+    fn : str
+        Path to module file.
+    info : tuple, optional
+        Only here for backward compatibility with Python 2.*.
+
+    Returns
+    -------
+    mod : module
+
+    """
+    # Explicitly lazy import this to avoid paying the cost
+    # of importing importlib at startup
+    from importlib.machinery import SourceFileLoader
+    return SourceFileLoader(name, fn).load_module()
+
+
+os_fspath = os.fspath
+os_PathLike = os.PathLike
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/compat/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/setup.py
new file mode 100644
index 00000000..c1b34a2c
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/setup.py
@@ -0,0 +1,10 @@
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+
+    config = Configuration('compat', parent_package, top_path)
+    config.add_subpackage('tests')
+    return config
+
+if __name__ == '__main__':
+    from numpy.distutils.core import setup
+    setup(configuration=configuration)
diff --git a/scripts/apple_example.ipynb b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/tests/__init__.py
similarity index 100%
rename from scripts/apple_example.ipynb
rename to dbdpy-env/lib/python3.9/site-packages/numpy/compat/tests/__init__.py
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/compat/tests/test_compat.py b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/tests/test_compat.py
new file mode 100644
index 00000000..d4391565
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/compat/tests/test_compat.py
@@ -0,0 +1,22 @@
+from os.path import join
+from io import BufferedReader, BytesIO
+
+from numpy.compat import isfileobj
+from numpy.testing import assert_
+from numpy.testing import tempdir
+
+
+def test_isfileobj():
+    with tempdir(prefix="numpy_test_compat_") as folder:
+        filename = join(folder, 'a.bin')
+
+        with open(filename, 'wb') as f:
+            assert_(isfileobj(f))
+
+        with open(filename, 'ab') as f:
+            assert_(isfileobj(f))
+
+        with open(filename, 'rb') as f:
+            assert_(isfileobj(f))
+
+        assert_(isfileobj(BufferedReader(BytesIO())) is False)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/conftest.py b/dbdpy-env/lib/python3.9/site-packages/numpy/conftest.py
new file mode 100644
index 00000000..f1a3eda9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/conftest.py
@@ -0,0 +1,138 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+import os
+import tempfile
+
+import hypothesis
+import pytest
+import numpy
+
+from numpy.core._multiarray_tests import get_fpu_mode
+
+
+_old_fpu_mode = None
+_collect_results = {}
+
+# Use a known and persistent tmpdir for hypothesis' caches, which
+# can be automatically cleared by the OS or user.
+hypothesis.configuration.set_hypothesis_home_dir(
+    os.path.join(tempfile.gettempdir(), ".hypothesis")
+)
+
+# We register two custom profiles for Numpy - for details see
+# https://hypothesis.readthedocs.io/en/latest/settings.html
+# The first is designed for our own CI runs; the latter also 
+# forces determinism and is designed for use via np.test()
+hypothesis.settings.register_profile(
+    name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.register_profile(
+    name="np.test() profile",
+    deadline=None, print_blob=True, database=None, derandomize=True,
+    suppress_health_check=list(hypothesis.HealthCheck),
+)
+# Note that the default profile is chosen based on the presence 
+# of pytest.ini, but can be overridden by passing the 
+# --hypothesis-profile=NAME argument to pytest.
+_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
+hypothesis.settings.load_profile(
+    "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
+)
+
+# The experimentalAPI is used in _umath_tests
+os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1"
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "valgrind_error: Tests that are known to error under valgrind.")
+    config.addinivalue_line("markers",
+        "leaks_references: Tests that are known to leak references.")
+    config.addinivalue_line("markers",
+        "slow: Tests that are very slow.")
+    config.addinivalue_line("markers",
+        "slow_pypy: Tests that are very slow on pypy.")
+
+
+def pytest_addoption(parser):
+    parser.addoption("--available-memory", action="store", default=None,
+                     help=("Set amount of memory available for running the "
+                           "test suite. This can result to tests requiring "
+                           "especially large amounts of memory to be skipped. "
+                           "Equivalent to setting environment variable "
+                           "NPY_AVAILABLE_MEM. Default: determined"
+                           "automatically."))
+
+
+def pytest_sessionstart(session):
+    available_mem = session.config.getoption('available_memory')
+    if available_mem is not None:
+        os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
+#FIXME when yield tests are gone.
+@pytest.hookimpl()
+def pytest_itemcollected(item):
+    """
+    Check FPU precision mode was not changed during test collection.
+
+    The clumsy way we do it here is mainly necessary because numpy
+    still uses yield tests, which can execute code at test collection
+    time.
+    """
+    global _old_fpu_mode
+
+    mode = get_fpu_mode()
+
+    if _old_fpu_mode is None:
+        _old_fpu_mode = mode
+    elif mode != _old_fpu_mode:
+        _collect_results[item] = (_old_fpu_mode, mode)
+        _old_fpu_mode = mode
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+    """
+    Check FPU precision mode was not changed during the test.
+    """
+    old_mode = get_fpu_mode()
+    yield
+    new_mode = get_fpu_mode()
+
+    if old_mode != new_mode:
+        raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+                             " during the test".format(old_mode, new_mode))
+
+    collect_result = _collect_results.get(request.node)
+    if collect_result is not None:
+        old_mode, new_mode = collect_result
+        raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+                             " when collecting the test".format(old_mode,
+                                                                new_mode))
+
+
+@pytest.fixture(autouse=True)
+def add_np(doctest_namespace):
+    doctest_namespace['np'] = numpy
+
+@pytest.fixture(autouse=True)
+def env_setup(monkeypatch):
+    monkeypatch.setenv('PYTHONHASHSEED', '0')
+
+
+@pytest.fixture(params=[True, False])
+def weak_promotion(request):
+    """
+    Fixture to ensure "legacy" promotion state or change it to use the new
+    weak promotion (plus warning).  `old_promotion` should be used as a
+    parameter in the function.
+    """
+    state = numpy._get_promotion_state()
+    if request.param:
+        numpy._set_promotion_state("weak_and_warn")
+    else:
+        numpy._set_promotion_state("legacy")
+
+    yield request.param
+    numpy._set_promotion_state(state)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/__init__.py
new file mode 100644
index 00000000..2d59b89e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/__init__.py
@@ -0,0 +1,180 @@
+"""
+Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
+
+Please note that this module is private.  All functions and objects
+are available in the main ``numpy`` namespace - use that instead.
+
+"""
+
+import os
+import warnings
+
+from numpy.version import version as __version__
+
+
+# disables OpenBLAS affinity setting of the main thread that limits
+# python threads or processes to one core
+env_added = []
+for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
+    if envkey not in os.environ:
+        os.environ[envkey] = '1'
+        env_added.append(envkey)
+
+try:
+    from . import multiarray
+except ImportError as exc:
+    import sys
+    msg = """
+
+IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
+
+Importing the numpy C-extensions failed. This error can happen for
+many reasons, often due to issues with your setup or how NumPy was
+installed.
+
+We have compiled some common reasons and troubleshooting tips at:
+
+    https://numpy.org/devdocs/user/troubleshooting-importerror.html
+
+Please note and check the following:
+
+  * The Python version is: Python%d.%d from "%s"
+  * The NumPy version is: "%s"
+
+and make sure that they are the versions you expect.
+Please carefully study the documentation linked above for further help.
+
+Original error was: %s
+""" % (sys.version_info[0], sys.version_info[1], sys.executable,
+        __version__, exc)
+    raise ImportError(msg)
+finally:
+    for envkey in env_added:
+        del os.environ[envkey]
+del envkey
+del env_added
+del os
+
+from . import umath
+
+# Check that multiarray,umath are pure python modules wrapping
+# _multiarray_umath and not either of the old c-extension modules
+if not (hasattr(multiarray, '_multiarray_umath') and
+        hasattr(umath, '_multiarray_umath')):
+    import sys
+    path = sys.modules['numpy'].__path__
+    msg = ("Something is wrong with the numpy installation. "
+        "While importing we detected an older version of "
+        "numpy in {}. One method of fixing this is to repeatedly uninstall "
+        "numpy until none is found, then reinstall this version.")
+    raise ImportError(msg.format(path))
+
+from . import numerictypes as nt
+multiarray.set_typeDict(nt.sctypeDict)
+from . import numeric
+from .numeric import *
+from . import fromnumeric
+from .fromnumeric import *
+from . import defchararray as char
+from . import records
+from . import records as rec
+from .records import record, recarray, format_parser
+# Note: module name memmap is overwritten by a class with same name
+from .memmap import *
+from .defchararray import chararray
+from . import function_base
+from .function_base import *
+from . import _machar
+from . import getlimits
+from .getlimits import *
+from . import shape_base
+from .shape_base import *
+from . import einsumfunc
+from .einsumfunc import *
+del nt
+
+from .numeric import absolute as abs
+
+# do this after everything else, to minimize the chance of this misleadingly
+# appearing in an import-time traceback
+from . import _add_newdocs
+from . import _add_newdocs_scalars
+# add these for module-freeze analysis (like PyInstaller)
+from . import _dtype_ctypes
+from . import _internal
+from . import _dtype
+from . import _methods
+
+__all__ = ['char', 'rec', 'memmap']
+__all__ += numeric.__all__
+__all__ += ['record', 'recarray', 'format_parser']
+__all__ += ['chararray']
+__all__ += function_base.__all__
+__all__ += getlimits.__all__
+__all__ += shape_base.__all__
+__all__ += einsumfunc.__all__
+
+# We used to use `np.core._ufunc_reconstruct` to unpickle. This is unnecessary,
+# but old pickles saved before 1.20 will be using it, and there is no reason
+# to break loading them.
+def _ufunc_reconstruct(module, name):
+    # The `fromlist` kwarg is required to ensure that `mod` points to the
+    # inner-most module rather than the parent package when module name is
+    # nested. This makes it possible to pickle non-toplevel ufuncs such as
+    # scipy.special.expit for instance.
+    mod = __import__(module, fromlist=[name])
+    return getattr(mod, name)
+
+
+def _ufunc_reduce(func):
+    # Report the `__name__`. pickle will try to find the module. Note that
+    # pickle supports for this `__name__` to be a `__qualname__`. It may
+    # make sense to add a `__qualname__` to ufuncs, to allow this more
+    # explicitly (Numba has ufuncs as attributes).
+    # See also: https://github.com/dask/distributed/issues/3450
+    return func.__name__
+
+
+def _DType_reconstruct(scalar_type):
+    # This is a work-around to pickle type(np.dtype(np.float64)), etc.
+    # and it should eventually be replaced with a better solution, e.g. when
+    # DTypes become HeapTypes.
+    return type(dtype(scalar_type))
+
+
+def _DType_reduce(DType):
+    # As types/classes, most DTypes can simply be pickled by their name:
+    if not DType._legacy or DType.__module__ == "numpy.dtypes":
+        return DType.__name__
+
+    # However, user defined legacy dtypes (like rational) do not end up in
+    # `numpy.dtypes` as module and do not have a public class at all.
+    # For these, we pickle them by reconstructing them from the scalar type:
+    scalar_type = DType.type
+    return _DType_reconstruct, (scalar_type,)
+
+
+def __getattr__(name):
+    # Deprecated 2022-11-22, NumPy 1.25.
+    if name == "MachAr":
+        warnings.warn(
+            "The `np.core.MachAr` is considered private API (NumPy 1.24)",
+            DeprecationWarning, stacklevel=2,
+        )
+        return _machar.MachAr
+    raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
+
+
+import copyreg
+
+copyreg.pickle(ufunc, _ufunc_reduce)
+copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
+
+# Unclutter namespace (must keep _*_reconstruct for unpickling)
+del copyreg
+del _ufunc_reduce
+del _DType_reduce
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/__init__.pyi
new file mode 100644
index 00000000..4c7a42bf
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/__init__.pyi
@@ -0,0 +1,2 @@
+# NOTE: The `np.core` namespace is deliberately kept empty due to it
+# being private (despite the lack of leading underscore)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs.py
new file mode 100644
index 00000000..6e29fcf5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs.py
@@ -0,0 +1,7080 @@
+"""
+This is only meant to add docs to objects defined in C-extension modules.
+The purpose is to allow easier editing of the docstrings without
+requiring a re-compile.
+
+NOTE: Many of the methods of ndarray have corresponding functions.
+      If you update these docstrings, please keep also the ones in
+      core/fromnumeric.py, core/defmatrix.py up-to-date.
+
+"""
+
+from numpy.core.function_base import add_newdoc
+from numpy.core.overrides import array_function_like_doc
+
+
+###############################################################################
+#
+# flatiter
+#
+# flatiter needs a toplevel description
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'flatiter',
+    """
+    Flat iterator object to iterate over arrays.
+
+    A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
+    It allows iterating over the array as if it were a 1-D array,
+    either in a for-loop or by calling its `next` method.
+
+    Iteration is done in row-major, C-style order (the last
+    index varying the fastest). The iterator can also be indexed using
+    basic slicing or advanced indexing.
+
+    See Also
+    --------
+    ndarray.flat : Return a flat iterator over an array.
+    ndarray.flatten : Returns a flattened copy of an array.
+
+    Notes
+    -----
+    A `flatiter` iterator can not be constructed directly from Python code
+    by calling the `flatiter` constructor.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> fl = x.flat
+    >>> type(fl)
+    
+    >>> for item in fl:
+    ...     print(item)
+    ...
+    0
+    1
+    2
+    3
+    4
+    5
+
+    >>> fl[2:4]
+    array([2, 3])
+
+    """)
+
+# flatiter attributes
+
+add_newdoc('numpy.core', 'flatiter', ('base',
+    """
+    A reference to the array that is iterated over.
+
+    Examples
+    --------
+    >>> x = np.arange(5)
+    >>> fl = x.flat
+    >>> fl.base is x
+    True
+
+    """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('coords',
+    """
+    An N-dimensional tuple of current coordinates.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> fl = x.flat
+    >>> fl.coords
+    (0, 0)
+    >>> next(fl)
+    0
+    >>> fl.coords
+    (0, 1)
+
+    """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('index',
+    """
+    Current flat index into the array.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> fl = x.flat
+    >>> fl.index
+    0
+    >>> next(fl)
+    0
+    >>> fl.index
+    1
+
+    """))
+
+# flatiter functions
+
+add_newdoc('numpy.core', 'flatiter', ('__array__',
+    """__array__(type=None) Get array from iterator
+
+    """))
+
+
+add_newdoc('numpy.core', 'flatiter', ('copy',
+    """
+    copy()
+
+    Get a copy of the iterator as a 1-D array.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> x
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> fl = x.flat
+    >>> fl.copy()
+    array([0, 1, 2, 3, 4, 5])
+
+    """))
+
+
+###############################################################################
+#
+# nditer
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'nditer',
+    """
+    nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0)
+
+    Efficient multi-dimensional iterator object to iterate over arrays.
+    To get started using this object, see the
+    :ref:`introductory guide to array iteration `.
+
+    Parameters
+    ----------
+    op : ndarray or sequence of array_like
+        The array(s) to iterate over.
+
+    flags : sequence of str, optional
+          Flags to control the behavior of the iterator.
+
+          * ``buffered`` enables buffering when required.
+          * ``c_index`` causes a C-order index to be tracked.
+          * ``f_index`` causes a Fortran-order index to be tracked.
+          * ``multi_index`` causes a multi-index, or a tuple of indices
+            with one per iteration dimension, to be tracked.
+          * ``common_dtype`` causes all the operands to be converted to
+            a common data type, with copying or buffering as necessary.
+          * ``copy_if_overlap`` causes the iterator to determine if read
+            operands have overlap with write operands, and make temporary
+            copies as necessary to avoid overlap. False positives (needless
+            copying) are possible in some cases.
+          * ``delay_bufalloc`` delays allocation of the buffers until
+            a reset() call is made. Allows ``allocate`` operands to
+            be initialized before their values are copied into the buffers.
+          * ``external_loop`` causes the ``values`` given to be
+            one-dimensional arrays with multiple values instead of
+            zero-dimensional arrays.
+          * ``grow_inner`` allows the ``value`` array sizes to be made
+            larger than the buffer size when both ``buffered`` and
+            ``external_loop`` is used.
+          * ``ranged`` allows the iterator to be restricted to a sub-range
+            of the iterindex values.
+          * ``refs_ok`` enables iteration of reference types, such as
+            object arrays.
+          * ``reduce_ok`` enables iteration of ``readwrite`` operands
+            which are broadcasted, also known as reduction operands.
+          * ``zerosize_ok`` allows `itersize` to be zero.
+    op_flags : list of list of str, optional
+          This is a list of flags for each operand. At minimum, one of
+          ``readonly``, ``readwrite``, or ``writeonly`` must be specified.
+
+          * ``readonly`` indicates the operand will only be read from.
+          * ``readwrite`` indicates the operand will be read from and written to.
+          * ``writeonly`` indicates the operand will only be written to.
+          * ``no_broadcast`` prevents the operand from being broadcasted.
+          * ``contig`` forces the operand data to be contiguous.
+          * ``aligned`` forces the operand data to be aligned.
+          * ``nbo`` forces the operand data to be in native byte order.
+          * ``copy`` allows a temporary read-only copy if required.
+          * ``updateifcopy`` allows a temporary read-write copy if required.
+          * ``allocate`` causes the array to be allocated if it is None
+            in the ``op`` parameter.
+          * ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
+          * ``arraymask`` indicates that this operand is the mask to use
+            for selecting elements when writing to operands with the
+            'writemasked' flag set. The iterator does not enforce this,
+            but when writing from a buffer back to the array, it only
+            copies those elements indicated by this mask.
+          * ``writemasked`` indicates that only elements where the chosen
+            ``arraymask`` operand is True will be written to.
+          * ``overlap_assume_elementwise`` can be used to mark operands that are
+            accessed only in the iterator order, to allow less conservative
+            copying when ``copy_if_overlap`` is present.
+    op_dtypes : dtype or tuple of dtype(s), optional
+        The required data type(s) of the operands. If copying or buffering
+        is enabled, the data will be converted to/from their original types.
+    order : {'C', 'F', 'A', 'K'}, optional
+        Controls the iteration order. 'C' means C order, 'F' means
+        Fortran order, 'A' means 'F' order if all the arrays are Fortran
+        contiguous, 'C' order otherwise, and 'K' means as close to the
+        order the array elements appear in memory as possible. This also
+        affects the element memory order of ``allocate`` operands, as they
+        are allocated to be compatible with iteration order.
+        Default is 'K'.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur when making a copy
+        or buffering.  Setting this to 'unsafe' is not recommended,
+        as it can adversely affect accumulations.
+
+        * 'no' means the data types should not be cast at all.
+        * 'equiv' means only byte-order changes are allowed.
+        * 'safe' means only casts which can preserve values are allowed.
+        * 'same_kind' means only safe casts or casts within a kind,
+          like float64 to float32, are allowed.
+        * 'unsafe' means any data conversions may be done.
+    op_axes : list of list of ints, optional
+        If provided, is a list of ints or None for each operands.
+        The list of axes for an operand is a mapping from the dimensions
+        of the iterator to the dimensions of the operand. A value of
+        -1 can be placed for entries, causing that dimension to be
+        treated as `newaxis`.
+    itershape : tuple of ints, optional
+        The desired shape of the iterator. This allows ``allocate`` operands
+        with a dimension mapped by op_axes not corresponding to a dimension
+        of a different operand to get a value not equal to 1 for that
+        dimension.
+    buffersize : int, optional
+        When buffering is enabled, controls the size of the temporary
+        buffers. Set to 0 for the default value.
+
+    Attributes
+    ----------
+    dtypes : tuple of dtype(s)
+        The data types of the values provided in `value`. This may be
+        different from the operand data types if buffering is enabled.
+        Valid only before the iterator is closed.
+    finished : bool
+        Whether the iteration over the operands is finished or not.
+    has_delayed_bufalloc : bool
+        If True, the iterator was created with the ``delay_bufalloc`` flag,
+        and no reset() function was called on it yet.
+    has_index : bool
+        If True, the iterator was created with either the ``c_index`` or
+        the ``f_index`` flag, and the property `index` can be used to
+        retrieve it.
+    has_multi_index : bool
+        If True, the iterator was created with the ``multi_index`` flag,
+        and the property `multi_index` can be used to retrieve it.
+    index
+        When the ``c_index`` or ``f_index`` flag was used, this property
+        provides access to the index. Raises a ValueError if accessed
+        and ``has_index`` is False.
+    iterationneedsapi : bool
+        Whether iteration requires access to the Python API, for example
+        if one of the operands is an object array.
+    iterindex : int
+        An index which matches the order of iteration.
+    itersize : int
+        Size of the iterator.
+    itviews
+        Structured view(s) of `operands` in memory, matching the reordered
+        and optimized iterator access pattern. Valid only before the iterator
+        is closed.
+    multi_index
+        When the ``multi_index`` flag was used, this property
+        provides access to the index. Raises a ValueError if accessed
+        accessed and ``has_multi_index`` is False.
+    ndim : int
+        The dimensions of the iterator.
+    nop : int
+        The number of iterator operands.
+    operands : tuple of operand(s)
+        The array(s) to be iterated over. Valid only before the iterator is
+        closed.
+    shape : tuple of ints
+        Shape tuple, the shape of the iterator.
+    value
+        Value of ``operands`` at current iteration. Normally, this is a
+        tuple of array scalars, but if the flag ``external_loop`` is used,
+        it is a tuple of one dimensional arrays.
+
+    Notes
+    -----
+    `nditer` supersedes `flatiter`.  The iterator implementation behind
+    `nditer` is also exposed by the NumPy C API.
+
+    The Python exposure supplies two iteration interfaces, one which follows
+    the Python iterator protocol, and another which mirrors the C-style
+    do-while pattern.  The native Python approach is better in most cases, but
+    if you need the coordinates or index of an iterator, use the C-style pattern.
+
+    Examples
+    --------
+    Here is how we might write an ``iter_add`` function, using the
+    Python iterator protocol:
+
+    >>> def iter_add_py(x, y, out=None):
+    ...     addop = np.add
+    ...     it = np.nditer([x, y, out], [],
+    ...                 [['readonly'], ['readonly'], ['writeonly','allocate']])
+    ...     with it:
+    ...         for (a, b, c) in it:
+    ...             addop(a, b, out=c)
+    ...         return it.operands[2]
+
+    Here is the same function, but following the C-style pattern:
+
+    >>> def iter_add(x, y, out=None):
+    ...    addop = np.add
+    ...    it = np.nditer([x, y, out], [],
+    ...                [['readonly'], ['readonly'], ['writeonly','allocate']])
+    ...    with it:
+    ...        while not it.finished:
+    ...            addop(it[0], it[1], out=it[2])
+    ...            it.iternext()
+    ...        return it.operands[2]
+
+    Here is an example outer product function:
+
+    >>> def outer_it(x, y, out=None):
+    ...     mulop = np.multiply
+    ...     it = np.nditer([x, y, out], ['external_loop'],
+    ...             [['readonly'], ['readonly'], ['writeonly', 'allocate']],
+    ...             op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
+    ...                      [-1] * x.ndim + list(range(y.ndim)),
+    ...                      None])
+    ...     with it:
+    ...         for (a, b, c) in it:
+    ...             mulop(a, b, out=c)
+    ...         return it.operands[2]
+
+    >>> a = np.arange(2)+1
+    >>> b = np.arange(3)+1
+    >>> outer_it(a,b)
+    array([[1, 2, 3],
+           [2, 4, 6]])
+
+    Here is an example function which operates like a "lambda" ufunc:
+
+    >>> def luf(lamdaexpr, *args, **kwargs):
+    ...    '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
+    ...    nargs = len(args)
+    ...    op = (kwargs.get('out',None),) + args
+    ...    it = np.nditer(op, ['buffered','external_loop'],
+    ...            [['writeonly','allocate','no_broadcast']] +
+    ...                            [['readonly','nbo','aligned']]*nargs,
+    ...            order=kwargs.get('order','K'),
+    ...            casting=kwargs.get('casting','safe'),
+    ...            buffersize=kwargs.get('buffersize',0))
+    ...    while not it.finished:
+    ...        it[0] = lamdaexpr(*it[1:])
+    ...        it.iternext()
+    ...    return it.operands[0]
+
+    >>> a = np.arange(5)
+    >>> b = np.ones(5)
+    >>> luf(lambda i,j:i*i + j/2, a, b)
+    array([  0.5,   1.5,   4.5,   9.5,  16.5])
+
+    If operand flags ``"writeonly"`` or ``"readwrite"`` are used the
+    operands may be views into the original data with the
+    `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
+    context manager or the `nditer.close` method must be called before
+    using the result. The temporary data will be written back to the
+    original data when the `__exit__` function is called but not before:
+
+    >>> a = np.arange(6, dtype='i4')[::-2]
+    >>> with np.nditer(a, [],
+    ...        [['writeonly', 'updateifcopy']],
+    ...        casting='unsafe',
+    ...        op_dtypes=[np.dtype('f4')]) as i:
+    ...    x = i.operands[0]
+    ...    x[:] = [-1, -2, -3]
+    ...    # a still unchanged here
+    >>> a, x
+    (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
+
+    It is important to note that once the iterator is exited, dangling
+    references (like `x` in the example) may or may not share data with
+    the original data `a`. If writeback semantics were active, i.e. if
+    `x.base.flags.writebackifcopy` is `True`, then exiting the iterator
+    will sever the connection between `x` and `a`, writing to `x` will
+    no longer write to `a`. If writeback semantics are not active, then
+    `x.data` will still point at some part of `a.data`, and writing to
+    one will affect the other.
+
+    Context management and the `close` method appeared in version 1.15.0.
+
+    """)
+
+# nditer methods
+
+add_newdoc('numpy.core', 'nditer', ('copy',
+    """
+    copy()
+
+    Get a copy of the iterator in its current state.
+
+    Examples
+    --------
+    >>> x = np.arange(10)
+    >>> y = x + 1
+    >>> it = np.nditer([x, y])
+    >>> next(it)
+    (array(0), array(1))
+    >>> it2 = it.copy()
+    >>> next(it2)
+    (array(1), array(2))
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('operands',
+    """
+    operands[`Slice`]
+
+    The array(s) to be iterated over. Valid only before the iterator is closed.
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('debug_print',
+    """
+    debug_print()
+
+    Print the current state of the `nditer` instance and debug info to stdout.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
+    """
+    enable_external_loop()
+
+    When the "external_loop" was not used during construction, but
+    is desired, this modifies the iterator to behave as if the flag
+    was specified.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('iternext',
+    """
+    iternext()
+
+    Check whether iterations are left, and perform a single internal iteration
+    without returning the result.  Used in the C-style pattern do-while
+    pattern.  For an example, see `nditer`.
+
+    Returns
+    -------
+    iternext : bool
+        Whether or not there are iterations left.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_axis',
+    """
+    remove_axis(i, /)
+
+    Removes axis `i` from the iterator. Requires that the flag "multi_index"
+    be enabled.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
+    """
+    remove_multi_index()
+
+    When the "multi_index" flag was specified, this removes it, allowing
+    the internal iteration structure to be optimized further.
+
+    """))
+
+add_newdoc('numpy.core', 'nditer', ('reset',
+    """
+    reset()
+
+    Reset the iterator to its initial state.
+
+    """))
+
+add_newdoc('numpy.core', 'nested_iters',
+    """
+    nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \
+    order="K", casting="safe", buffersize=0)
+
+    Create nditers for use in nested loops
+
+    Create a tuple of `nditer` objects which iterate in nested loops over
+    different axes of the op argument. The first iterator is used in the
+    outermost loop, the last in the innermost loop. Advancing one will change
+    the subsequent iterators to point at its new element.
+
+    Parameters
+    ----------
+    op : ndarray or sequence of array_like
+        The array(s) to iterate over.
+
+    axes : list of list of int
+        Each item is used as an "op_axes" argument to an nditer
+
+    flags, op_flags, op_dtypes, order, casting, buffersize (optional)
+        See `nditer` parameters of the same name
+
+    Returns
+    -------
+    iters : tuple of nditer
+        An nditer for each item in `axes`, outermost first
+
+    See Also
+    --------
+    nditer
+
+    Examples
+    --------
+
+    Basic usage. Note how y is the "flattened" version of
+    [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
+    the first iter's axes as [1]
+
+    >>> a = np.arange(12).reshape(2, 3, 2)
+    >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
+    >>> for x in i:
+    ...      print(i.multi_index)
+    ...      for y in j:
+    ...          print('', j.multi_index, y)
+    (0,)
+     (0, 0) 0
+     (0, 1) 1
+     (1, 0) 6
+     (1, 1) 7
+    (1,)
+     (0, 0) 2
+     (0, 1) 3
+     (1, 0) 8
+     (1, 1) 9
+    (2,)
+     (0, 0) 4
+     (0, 1) 5
+     (1, 0) 10
+     (1, 1) 11
+
+    """)
+
+add_newdoc('numpy.core', 'nditer', ('close',
+    """
+    close()
+
+    Resolve all writeback semantics in writeable operands.
+
+    .. versionadded:: 1.15.0
+
+    See Also
+    --------
+
+    :ref:`nditer-context-manager`
+
+    """))
+
+
+###############################################################################
+#
+# broadcast
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'broadcast',
+    """
+    Produce an object that mimics broadcasting.
+
+    Parameters
+    ----------
+    in1, in2, ... : array_like
+        Input parameters.
+
+    Returns
+    -------
+    b : broadcast object
+        Broadcast the input parameters against one another, and
+        return an object that encapsulates the result.
+        Amongst others, it has ``shape`` and ``nd`` properties, and
+        may be used as an iterator.
+
+    See Also
+    --------
+    broadcast_arrays
+    broadcast_to
+    broadcast_shapes
+
+    Examples
+    --------
+
+    Manually adding two vectors, using broadcasting:
+
+    >>> x = np.array([[1], [2], [3]])
+    >>> y = np.array([4, 5, 6])
+    >>> b = np.broadcast(x, y)
+
+    >>> out = np.empty(b.shape)
+    >>> out.flat = [u+v for (u,v) in b]
+    >>> out
+    array([[5.,  6.,  7.],
+           [6.,  7.,  8.],
+           [7.,  8.,  9.]])
+
+    Compare against built-in broadcasting:
+
+    >>> x + y
+    array([[5, 6, 7],
+           [6, 7, 8],
+           [7, 8, 9]])
+
+    """)
+
+# attributes
+
+add_newdoc('numpy.core', 'broadcast', ('index',
+    """
+    current index in broadcasted result
+
+    Examples
+    --------
+    >>> x = np.array([[1], [2], [3]])
+    >>> y = np.array([4, 5, 6])
+    >>> b = np.broadcast(x, y)
+    >>> b.index
+    0
+    >>> next(b), next(b), next(b)
+    ((1, 4), (1, 5), (1, 6))
+    >>> b.index
+    3
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('iters',
+    """
+    tuple of iterators along ``self``'s "components."
+
+    Returns a tuple of `numpy.flatiter` objects, one for each "component"
+    of ``self``.
+
+    See Also
+    --------
+    numpy.flatiter
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> row, col = b.iters
+    >>> next(row), next(col)
+    (1, 4)
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('ndim',
+    """
+    Number of dimensions of broadcasted result. Alias for `nd`.
+
+    .. versionadded:: 1.12.0
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.ndim
+    2
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('nd',
+    """
+    Number of dimensions of broadcasted result. For code intended for NumPy
+    1.12.0 and later the more consistent `ndim` is preferred.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.nd
+    2
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('numiter',
+    """
+    Number of iterators possessed by the broadcasted result.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.numiter
+    2
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('shape',
+    """
+    Shape of broadcasted result.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.shape
+    (3, 3)
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('size',
+    """
+    Total size of broadcasted result.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.size
+    9
+
+    """))
+
+add_newdoc('numpy.core', 'broadcast', ('reset',
+    """
+    reset()
+
+    Reset the broadcasted result's iterator(s).
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    None
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> y = np.array([[4], [5], [6]])
+    >>> b = np.broadcast(x, y)
+    >>> b.index
+    0
+    >>> next(b), next(b), next(b)
+    ((1, 4), (2, 4), (3, 4))
+    >>> b.index
+    3
+    >>> b.reset()
+    >>> b.index
+    0
+
+    """))
+
+###############################################################################
+#
+# numpy functions
+#
+###############################################################################
+
+add_newdoc('numpy.core.multiarray', 'array',
+    """
+    array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
+          like=None)
+
+    Create an array.
+
+    Parameters
+    ----------
+    object : array_like
+        An array, any object exposing the array interface, an object whose
+        ``__array__`` method returns an array, or any (nested) sequence.
+        If object is a scalar, a 0-dimensional array containing object is
+        returned.
+    dtype : data-type, optional
+        The desired data-type for the array. If not given, NumPy will try to use
+        a default ``dtype`` that can represent the values (by applying promotion
+        rules when necessary.)
+    copy : bool, optional
+        If true (default), then the object is copied.  Otherwise, a copy will
+        only be made if ``__array__`` returns a copy, if obj is a nested
+        sequence, or if a copy is needed to satisfy any of the other
+        requirements (``dtype``, ``order``, etc.).
+    order : {'K', 'A', 'C', 'F'}, optional
+        Specify the memory layout of the array. If object is not an array, the
+        newly created array will be in C order (row major) unless 'F' is
+        specified, in which case it will be in Fortran order (column major).
+        If object is an array the following holds.
+
+        ===== ========= ===================================================
+        order  no copy                     copy=True
+        ===== ========= ===================================================
+        'K'   unchanged F & C order preserved, otherwise most similar order
+        'A'   unchanged F order if input is F and not C, otherwise C order
+        'C'   C order   C order
+        'F'   F order   F order
+        ===== ========= ===================================================
+
+        When ``copy=False`` and a copy is made for other reasons, the result is
+        the same as if ``copy=True``, with some exceptions for 'A', see the
+        Notes section. The default order is 'K'.
+    subok : bool, optional
+        If True, then sub-classes will be passed-through, otherwise
+        the returned array will be forced to be a base-class array (default).
+    ndmin : int, optional
+        Specifies the minimum number of dimensions that the resulting
+        array should have.  Ones will be prepended to the shape as
+        needed to meet this requirement.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        An array object satisfying the specified requirements.
+
+    See Also
+    --------
+    empty_like : Return an empty array with shape and type of input.
+    ones_like : Return an array of ones with shape and type of input.
+    zeros_like : Return an array of zeros with shape and type of input.
+    full_like : Return a new array with shape of input filled with value.
+    empty : Return a new uninitialized array.
+    ones : Return a new array setting values to one.
+    zeros : Return a new array setting values to zero.
+    full : Return a new array of given shape filled with value.
+
+
+    Notes
+    -----
+    When order is 'A' and ``object`` is an array in neither 'C' nor 'F' order,
+    and a copy is forced by a change in dtype, then the order of the result is
+    not necessarily 'C' as expected. This is likely a bug.
+
+    Examples
+    --------
+    >>> np.array([1, 2, 3])
+    array([1, 2, 3])
+
+    Upcasting:
+
+    >>> np.array([1, 2, 3.0])
+    array([ 1.,  2.,  3.])
+
+    More than one dimension:
+
+    >>> np.array([[1, 2], [3, 4]])
+    array([[1, 2],
+           [3, 4]])
+
+    Minimum dimensions 2:
+
+    >>> np.array([1, 2, 3], ndmin=2)
+    array([[1, 2, 3]])
+
+    Type provided:
+
+    >>> np.array([1, 2, 3], dtype=complex)
+    array([ 1.+0.j,  2.+0.j,  3.+0.j])
+
+    Data-type consisting of more than one element:
+
+    >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a']
+    array([1, 3])
+
+    Creating an array from sub-classes:
+
+    >>> np.array(np.mat('1 2; 3 4'))
+    array([[1, 2],
+           [3, 4]])
+
+    >>> np.array(np.mat('1 2; 3 4'), subok=True)
+    matrix([[1, 2],
+            [3, 4]])
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'asarray',
+    """
+    asarray(a, dtype=None, order=None, *, like=None)
+
+    Convert the input to an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data, in any form that can be converted to an array.  This
+        includes lists, lists of tuples, tuples, tuples of tuples, tuples
+        of lists and ndarrays.
+    dtype : data-type, optional
+        By default, the data-type is inferred from the input data.
+    order : {'C', 'F', 'A', 'K'}, optional
+        Memory layout.  'A' and 'K' depend on the order of input array a.
+        'C' row-major (C-style),
+        'F' column-major (Fortran-style) memory representation.
+        'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+        'K' (keep) preserve input order
+        Defaults to 'K'.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Array interpretation of `a`.  No copy is performed if the input
+        is already an ndarray with matching dtype and order.  If `a` is a
+        subclass of ndarray, a base class ndarray is returned.
+
+    See Also
+    --------
+    asanyarray : Similar function which passes through subclasses.
+    ascontiguousarray : Convert input to a contiguous array.
+    asfarray : Convert input to a floating point ndarray.
+    asfortranarray : Convert input to an ndarray with column-major
+                     memory order.
+    asarray_chkfinite : Similar function which checks input for NaNs and Infs.
+    fromiter : Create an array from an iterator.
+    fromfunction : Construct an array by executing a function on grid
+                   positions.
+
+    Examples
+    --------
+    Convert a list into an array:
+
+    >>> a = [1, 2]
+    >>> np.asarray(a)
+    array([1, 2])
+
+    Existing arrays are not copied:
+
+    >>> a = np.array([1, 2])
+    >>> np.asarray(a) is a
+    True
+
+    If `dtype` is set, array is copied only if dtype does not match:
+
+    >>> a = np.array([1, 2], dtype=np.float32)
+    >>> np.asarray(a, dtype=np.float32) is a
+    True
+    >>> np.asarray(a, dtype=np.float64) is a
+    False
+
+    Contrary to `asanyarray`, ndarray subclasses are not passed through:
+
+    >>> issubclass(np.recarray, np.ndarray)
+    True
+    >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+    >>> np.asarray(a) is a
+    False
+    >>> np.asanyarray(a) is a
+    True
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'asanyarray',
+    """
+    asanyarray(a, dtype=None, order=None, *, like=None)
+
+    Convert the input to an ndarray, but pass ndarray subclasses through.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data, in any form that can be converted to an array.  This
+        includes scalars, lists, lists of tuples, tuples, tuples of tuples,
+        tuples of lists, and ndarrays.
+    dtype : data-type, optional
+        By default, the data-type is inferred from the input data.
+    order : {'C', 'F', 'A', 'K'}, optional
+        Memory layout.  'A' and 'K' depend on the order of input array a.
+        'C' row-major (C-style),
+        'F' column-major (Fortran-style) memory representation.
+        'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+        'K' (keep) preserve input order
+        Defaults to 'C'.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray or an ndarray subclass
+        Array interpretation of `a`.  If `a` is an ndarray or a subclass
+        of ndarray, it is returned as-is and no copy is performed.
+
+    See Also
+    --------
+    asarray : Similar function which always returns ndarrays.
+    ascontiguousarray : Convert input to a contiguous array.
+    asfarray : Convert input to a floating point ndarray.
+    asfortranarray : Convert input to an ndarray with column-major
+                     memory order.
+    asarray_chkfinite : Similar function which checks input for NaNs and
+                        Infs.
+    fromiter : Create an array from an iterator.
+    fromfunction : Construct an array by executing a function on grid
+                   positions.
+
+    Examples
+    --------
+    Convert a list into an array:
+
+    >>> a = [1, 2]
+    >>> np.asanyarray(a)
+    array([1, 2])
+
+    Instances of `ndarray` subclasses are passed through as-is:
+
+    >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+    >>> np.asanyarray(a) is a
+    True
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'ascontiguousarray',
+    """
+    ascontiguousarray(a, dtype=None, *, like=None)
+
+    Return a contiguous array (ndim >= 1) in memory (C order).
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    dtype : str or dtype object, optional
+        Data-type of returned array.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Contiguous array of same shape and content as `a`, with type `dtype`
+        if specified.
+
+    See Also
+    --------
+    asfortranarray : Convert input to an ndarray with column-major
+                     memory order.
+    require : Return an ndarray that satisfies requirements.
+    ndarray.flags : Information about the memory layout of the array.
+
+    Examples
+    --------
+    Starting with a Fortran-contiguous array:
+
+    >>> x = np.ones((2, 3), order='F')
+    >>> x.flags['F_CONTIGUOUS']
+    True
+
+    Calling ``ascontiguousarray`` makes a C-contiguous copy:
+
+    >>> y = np.ascontiguousarray(x)
+    >>> y.flags['C_CONTIGUOUS']
+    True
+    >>> np.may_share_memory(x, y)
+    False
+
+    Now, starting with a C-contiguous array:
+
+    >>> x = np.ones((2, 3), order='C')
+    >>> x.flags['C_CONTIGUOUS']
+    True
+
+    Then, calling ``ascontiguousarray`` returns the same object:
+
+    >>> y = np.ascontiguousarray(x)
+    >>> x is y
+    True
+
+    Note: This function returns an array with at least one-dimension (1-d)
+    so it will not preserve 0-d arrays.
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'asfortranarray',
+    """
+    asfortranarray(a, dtype=None, *, like=None)
+
+    Return an array (ndim >= 1) laid out in Fortran order in memory.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    dtype : str or dtype object, optional
+        By default, the data-type is inferred from the input data.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        The input `a` in Fortran, or column-major, order.
+
+    See Also
+    --------
+    ascontiguousarray : Convert input to a contiguous (C order) array.
+    asanyarray : Convert input to an ndarray with either row or
+        column-major memory order.
+    require : Return an ndarray that satisfies requirements.
+    ndarray.flags : Information about the memory layout of the array.
+
+    Examples
+    --------
+    Starting with a C-contiguous array:
+
+    >>> x = np.ones((2, 3), order='C')
+    >>> x.flags['C_CONTIGUOUS']
+    True
+
+    Calling ``asfortranarray`` makes a Fortran-contiguous copy:
+
+    >>> y = np.asfortranarray(x)
+    >>> y.flags['F_CONTIGUOUS']
+    True
+    >>> np.may_share_memory(x, y)
+    False
+
+    Now, starting with a Fortran-contiguous array:
+
+    >>> x = np.ones((2, 3), order='F')
+    >>> x.flags['F_CONTIGUOUS']
+    True
+
+    Then, calling ``asfortranarray`` returns the same object:
+
+    >>> y = np.asfortranarray(x)
+    >>> x is y
+    True
+
+    Note: This function returns an array with at least one-dimension (1-d)
+    so it will not preserve 0-d arrays.
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'empty',
+    """
+    empty(shape, dtype=float, order='C', *, like=None)
+
+    Return a new array of given shape and type, without initializing entries.
+
+    Parameters
+    ----------
+    shape : int or tuple of int
+        Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
+    dtype : data-type, optional
+        Desired output data-type for the array, e.g, `numpy.int8`. Default is
+        `numpy.float64`.
+    order : {'C', 'F'}, optional, default: 'C'
+        Whether to store multi-dimensional data in row-major
+        (C-style) or column-major (Fortran-style) order in
+        memory.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of uninitialized (arbitrary) data of the given shape, dtype, and
+        order.  Object arrays will be initialized to None.
+
+    See Also
+    --------
+    empty_like : Return an empty array with shape and type of input.
+    ones : Return a new array setting values to one.
+    zeros : Return a new array setting values to zero.
+    full : Return a new array of given shape filled with value.
+
+
+    Notes
+    -----
+    `empty`, unlike `zeros`, does not set the array values to zero,
+    and may therefore be marginally faster.  On the other hand, it requires
+    the user to manually set all the values in the array, and should be
+    used with caution.
+
+    Examples
+    --------
+    >>> np.empty([2, 2])
+    array([[ -9.74499359e+001,   6.69583040e-309],
+           [  2.13182611e-314,   3.06959433e-309]])         #uninitialized
+
+    >>> np.empty([2, 2], dtype=int)
+    array([[-1073741821, -1067949133],
+           [  496041986,    19249760]])                     #uninitialized
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'scalar',
+    """
+    scalar(dtype, obj)
+
+    Return a new scalar array of the given type initialized with obj.
+
+    This function is meant mainly for pickle support. `dtype` must be a
+    valid data-type descriptor. If `dtype` corresponds to an object
+    descriptor, then `obj` can be any object, otherwise `obj` must be a
+    string. If `obj` is not given, it will be interpreted as None for object
+    type and as zeros for all other types.
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'zeros',
+    """
+    zeros(shape, dtype=float, order='C', *, like=None)
+
+    Return a new array of given shape and type, filled with zeros.
+
+    Parameters
+    ----------
+    shape : int or tuple of ints
+        Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+    dtype : data-type, optional
+        The desired data-type for the array, e.g., `numpy.int8`.  Default is
+        `numpy.float64`.
+    order : {'C', 'F'}, optional, default: 'C'
+        Whether to store multi-dimensional data in row-major
+        (C-style) or column-major (Fortran-style) order in
+        memory.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of zeros with the given shape, dtype, and order.
+
+    See Also
+    --------
+    zeros_like : Return an array of zeros with shape and type of input.
+    empty : Return a new uninitialized array.
+    ones : Return a new array setting values to one.
+    full : Return a new array of given shape filled with value.
+
+    Examples
+    --------
+    >>> np.zeros(5)
+    array([ 0.,  0.,  0.,  0.,  0.])
+
+    >>> np.zeros((5,), dtype=int)
+    array([0, 0, 0, 0, 0])
+
+    >>> np.zeros((2, 1))
+    array([[ 0.],
+           [ 0.]])
+
+    >>> s = (2,2)
+    >>> np.zeros(s)
+    array([[ 0.,  0.],
+           [ 0.,  0.]])
+
+    >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
+    array([(0, 0), (0, 0)],
+          dtype=[('x', '>> np.fromstring('1 2', dtype=int, sep=' ')
+    array([1, 2])
+    >>> np.fromstring('1, 2', dtype=int, sep=',')
+    array([1, 2])
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'compare_chararrays',
+    """
+    compare_chararrays(a1, a2, cmp, rstrip)
+
+    Performs element-wise comparison of two string arrays using the
+    comparison operator specified by `cmp_op`.
+
+    Parameters
+    ----------
+    a1, a2 : array_like
+        Arrays to be compared.
+    cmp : {"<", "<=", "==", ">=", ">", "!="}
+        Type of comparison.
+    rstrip : Boolean
+        If True, the spaces at the end of Strings are removed before the comparison.
+
+    Returns
+    -------
+    out : ndarray
+        The output array of type Boolean with the same shape as a and b.
+
+    Raises
+    ------
+    ValueError
+        If `cmp_op` is not valid.
+    TypeError
+        If at least one of `a` or `b` is a non-string array
+
+    Examples
+    --------
+    >>> a = np.array(["a", "b", "cde"])
+    >>> b = np.array(["a", "a", "dec"])
+    >>> np.compare_chararrays(a, b, ">", True)
+    array([False,  True, False])
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'fromiter',
+    """
+    fromiter(iter, dtype, count=-1, *, like=None)
+
+    Create a new 1-dimensional array from an iterable object.
+
+    Parameters
+    ----------
+    iter : iterable object
+        An iterable object providing data for the array.
+    dtype : data-type
+        The data-type of the returned array.
+
+        .. versionchanged:: 1.23
+            Object and subarray dtypes are now supported (note that the final
+            result is not 1-D for a subarray dtype).
+
+    count : int, optional
+        The number of items to read from *iterable*.  The default is -1,
+        which means all data is read.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        The output array.
+
+    Notes
+    -----
+    Specify `count` to improve performance.  It allows ``fromiter`` to
+    pre-allocate the output array, instead of resizing it on demand.
+
+    Examples
+    --------
+    >>> iterable = (x*x for x in range(5))
+    >>> np.fromiter(iterable, float)
+    array([  0.,   1.,   4.,   9.,  16.])
+
+    A carefully constructed subarray dtype will lead to higher dimensional
+    results:
+
+    >>> iterable = ((x+1, x+2) for x in range(5))
+    >>> np.fromiter(iterable, dtype=np.dtype((int, 2)))
+    array([[1, 2],
+           [2, 3],
+           [3, 4],
+           [4, 5],
+           [5, 6]])
+
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'fromfile',
+    """
+    fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
+
+    Construct an array from data in a text or binary file.
+
+    A highly efficient way of reading binary data with a known data-type,
+    as well as parsing simply formatted text files.  Data written using the
+    `tofile` method can be read using this function.
+
+    Parameters
+    ----------
+    file : file or str or Path
+        Open file object or filename.
+
+        .. versionchanged:: 1.17.0
+            `pathlib.Path` objects are now accepted.
+
+    dtype : data-type
+        Data type of the returned array.
+        For binary files, it is used to determine the size and byte-order
+        of the items in the file.
+        Most builtin numeric types are supported and extension types may be supported.
+
+        .. versionadded:: 1.18.0
+            Complex dtypes.
+
+    count : int
+        Number of items to read. ``-1`` means all items (i.e., the complete
+        file).
+    sep : str
+        Separator between items if file is a text file.
+        Empty ("") separator means the file should be treated as binary.
+        Spaces (" ") in the separator match zero or more whitespace characters.
+        A separator consisting only of spaces must match at least one
+        whitespace.
+    offset : int
+        The offset (in bytes) from the file's current position. Defaults to 0.
+        Only permitted for binary files.
+
+        .. versionadded:: 1.17.0
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    See also
+    --------
+    load, save
+    ndarray.tofile
+    loadtxt : More flexible way of loading data from a text file.
+
+    Notes
+    -----
+    Do not rely on the combination of `tofile` and `fromfile` for
+    data storage, as the binary files generated are not platform
+    independent.  In particular, no byte-order or data-type information is
+    saved.  Data can be stored in the platform independent ``.npy`` format
+    using `save` and `load` instead.
+
+    Examples
+    --------
+    Construct an ndarray:
+
+    >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
+    ...                ('temp', float)])
+    >>> x = np.zeros((1,), dtype=dt)
+    >>> x['time']['min'] = 10; x['temp'] = 98.25
+    >>> x
+    array([((10, 0), 98.25)],
+          dtype=[('time', [('min', '>> import tempfile
+    >>> fname = tempfile.mkstemp()[1]
+    >>> x.tofile(fname)
+
+    Read the raw data from disk:
+
+    >>> np.fromfile(fname, dtype=dt)
+    array([((10, 0), 98.25)],
+          dtype=[('time', [('min', '>> np.save(fname, x)
+    >>> np.load(fname + '.npy')
+    array([((10, 0), 98.25)],
+          dtype=[('time', [('min', '>> dt = np.dtype(int)
+      >>> dt = dt.newbyteorder('>')
+      >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
+
+    The data of the resulting array will not be byteswapped, but will be
+    interpreted correctly.
+
+    This function creates a view into the original object.  This should be safe
+    in general, but it may make sense to copy the result when the original
+    object is mutable or untrusted.
+
+    Examples
+    --------
+    >>> s = b'hello world'
+    >>> np.frombuffer(s, dtype='S1', count=5, offset=6)
+    array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
+
+    >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
+    array([1, 2], dtype=uint8)
+    >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
+    array([1, 2, 3], dtype=uint8)
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', 'from_dlpack',
+    """
+    from_dlpack(x, /)
+
+    Create a NumPy array from an object implementing the ``__dlpack__``
+    protocol. Generally, the returned NumPy array is a read-only view
+    of the input object. See [1]_ and [2]_ for more details.
+
+    Parameters
+    ----------
+    x : object
+        A Python object that implements the ``__dlpack__`` and
+        ``__dlpack_device__`` methods.
+
+    Returns
+    -------
+    out : ndarray
+
+    References
+    ----------
+    .. [1] Array API documentation,
+       https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack
+
+    .. [2] Python specification for DLPack,
+       https://dmlc.github.io/dlpack/latest/python_spec.html
+
+    Examples
+    --------
+    >>> import torch
+    >>> x = torch.arange(10)
+    >>> # create a view of the torch tensor "x" in NumPy
+    >>> y = np.from_dlpack(x)
+    """)
+
+add_newdoc('numpy.core', 'fastCopyAndTranspose',
+    """
+    fastCopyAndTranspose(a)
+
+    .. deprecated:: 1.24
+
+       fastCopyAndTranspose is deprecated and will be removed. Use the copy and
+       transpose methods instead, e.g. ``arr.T.copy()``
+    """)
+
+add_newdoc('numpy.core.multiarray', 'correlate',
+    """cross_correlate(a,v, mode=0)""")
+
+add_newdoc('numpy.core.multiarray', 'arange',
+    """
+    arange([start,] stop[, step,], dtype=None, *, like=None)
+
+    Return evenly spaced values within a given interval.
+
+    ``arange`` can be called with a varying number of positional arguments:
+
+    * ``arange(stop)``: Values are generated within the half-open interval
+      ``[0, stop)`` (in other words, the interval including `start` but
+      excluding `stop`).
+    * ``arange(start, stop)``: Values are generated within the half-open
+      interval ``[start, stop)``.
+    * ``arange(start, stop, step)`` Values are generated within the half-open
+      interval ``[start, stop)``, with spacing between values given by
+      ``step``.
+
+    For integer arguments the function is roughly equivalent to the Python
+    built-in :py:class:`range`, but returns an ndarray rather than a ``range``
+    instance.
+
+    When using a non-integer step, such as 0.1, it is often better to use
+    `numpy.linspace`.
+
+    See the Warning sections below for more information.
+
+    Parameters
+    ----------
+    start : integer or real, optional
+        Start of interval.  The interval includes this value.  The default
+        start value is 0.
+    stop : integer or real
+        End of interval.  The interval does not include this value, except
+        in some cases where `step` is not an integer and floating point
+        round-off affects the length of `out`.
+    step : integer or real, optional
+        Spacing between values.  For any output `out`, this is the distance
+        between two adjacent values, ``out[i+1] - out[i]``.  The default
+        step size is 1.  If `step` is specified as a position argument,
+        `start` must also be given.
+    dtype : dtype, optional
+        The type of the output array.  If `dtype` is not given, infer the data
+        type from the other input arguments.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    arange : ndarray
+        Array of evenly spaced values.
+
+        For floating point arguments, the length of the result is
+        ``ceil((stop - start)/step)``.  Because of floating point overflow,
+        this rule may result in the last element of `out` being greater
+        than `stop`.
+
+    Warnings
+    --------
+    The length of the output might not be numerically stable.
+
+    Another stability issue is due to the internal implementation of
+    `numpy.arange`.
+    The actual step value used to populate the array is
+    ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss
+    can occur here, due to casting or due to using floating points when
+    `start` is much larger than `step`. This can lead to unexpected
+    behaviour. For example::
+
+      >>> np.arange(0, 5, 0.5, dtype=int)
+      array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+      >>> np.arange(-3, 3, 0.5, dtype=int)
+      array([-3, -2, -1,  0,  1,  2,  3,  4,  5,  6,  7,  8])
+
+    In such cases, the use of `numpy.linspace` should be preferred.
+
+    The built-in :py:class:`range` generates :std:doc:`Python built-in integers
+    that have arbitrary size `, while `numpy.arange`
+    produces `numpy.int32` or `numpy.int64` numbers. This may result in
+    incorrect results for large integer values::
+
+      >>> power = 40
+      >>> modulo = 10000
+      >>> x1 = [(n ** power) % modulo for n in range(8)]
+      >>> x2 = [(n ** power) % modulo for n in np.arange(8)]
+      >>> print(x1)
+      [0, 1, 7776, 8801, 6176, 625, 6576, 4001]  # correct
+      >>> print(x2)
+      [0, 1, 7776, 7185, 0, 5969, 4816, 3361]  # incorrect
+
+    See Also
+    --------
+    numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
+    numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
+    numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
+    :ref:`how-to-partition`
+
+    Examples
+    --------
+    >>> np.arange(3)
+    array([0, 1, 2])
+    >>> np.arange(3.0)
+    array([ 0.,  1.,  2.])
+    >>> np.arange(3,7)
+    array([3, 4, 5, 6])
+    >>> np.arange(3,7,2)
+    array([3, 5])
+
+    """.replace(
+        "${ARRAY_FUNCTION_LIKE}",
+        array_function_like_doc,
+    ))
+
+add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
+    """_get_ndarray_c_version()
+
+    Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
+
+    """)
+
+add_newdoc('numpy.core.multiarray', '_reconstruct',
+    """_reconstruct(subtype, shape, dtype)
+
+    Construct an empty array. Used by Pickles.
+
+    """)
+
+
+add_newdoc('numpy.core.multiarray', 'set_string_function',
+    """
+    set_string_function(f, repr=1)
+
+    Internal method to set a function to be used when pretty printing arrays.
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
+    """
+    set_numeric_ops(op1=func1, op2=func2, ...)
+
+    Set numerical operators for array objects.
+
+    .. deprecated:: 1.16
+
+        For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
+        For ndarray subclasses, define the ``__array_ufunc__`` method and
+        override the relevant ufunc.
+
+    Parameters
+    ----------
+    op1, op2, ... : callable
+        Each ``op = func`` pair describes an operator to be replaced.
+        For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
+        addition by modulus 5 addition.
+
+    Returns
+    -------
+    saved_ops : list of callables
+        A list of all operators, stored before making replacements.
+
+    Notes
+    -----
+    .. warning::
+       Use with care!  Incorrect usage may lead to memory errors.
+
+    A function replacing an operator cannot make use of that operator.
+    For example, when replacing add, you may not use ``+``.  Instead,
+    directly call ufuncs.
+
+    Examples
+    --------
+    >>> def add_mod5(x, y):
+    ...     return np.add(x, y) % 5
+    ...
+    >>> old_funcs = np.set_numeric_ops(add=add_mod5)
+
+    >>> x = np.arange(12).reshape((3, 4))
+    >>> x + x
+    array([[0, 2, 4, 1],
+           [3, 0, 2, 4],
+           [1, 3, 0, 2]])
+
+    >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'promote_types',
+    """
+    promote_types(type1, type2)
+
+    Returns the data type with the smallest size and smallest scalar
+    kind to which both ``type1`` and ``type2`` may be safely cast.
+    The returned data type is always considered "canonical", this mainly
+    means that the promoted dtype will always be in native byte order.
+
+    This function is symmetric, but rarely associative.
+
+    Parameters
+    ----------
+    type1 : dtype or dtype specifier
+        First data type.
+    type2 : dtype or dtype specifier
+        Second data type.
+
+    Returns
+    -------
+    out : dtype
+        The promoted data type.
+
+    Notes
+    -----
+    Please see `numpy.result_type` for additional information about promotion.
+
+    .. versionadded:: 1.6.0
+
+    Starting in NumPy 1.9, promote_types function now returns a valid string
+    length when given an integer or float dtype as one argument and a string
+    dtype as another argument. Previously it always returned the input string
+    dtype, even if it wasn't long enough to store the max integer/float value
+    converted to a string.
+
+    .. versionchanged:: 1.23.0
+
+    NumPy now supports promotion for more structured dtypes.  It will now
+    remove unnecessary padding from a structure dtype and promote included
+    fields individually.
+
+    See Also
+    --------
+    result_type, dtype, can_cast
+
+    Examples
+    --------
+    >>> np.promote_types('f4', 'f8')
+    dtype('float64')
+
+    >>> np.promote_types('i8', 'f4')
+    dtype('float64')
+
+    >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8')
+    dtype('S11')
+
+    An example of a non-associative case:
+
+    >>> p = np.promote_types
+    >>> p('S', p('i1', 'u1'))
+    dtype('S6')
+    >>> p(p('S', 'i1'), 'u1')
+    dtype('S4')
+
+    """)
+
+add_newdoc('numpy.core.multiarray', 'c_einsum',
+    """
+    c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
+           casting='safe')
+
+    *This documentation shadows that of the native python implementation of the `einsum` function,
+    except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
+
+    Evaluates the Einstein summation convention on the operands.
+
+    Using the Einstein summation convention, many common multi-dimensional,
+    linear algebraic array operations can be represented in a simple fashion.
+    In *implicit* mode `einsum` computes these values.
+
+    In *explicit* mode, `einsum` provides further flexibility to compute
+    other array operations that might not be considered classical Einstein
+    summation operations, by disabling, or forcing summation over specified
+    subscript labels.
+
+    See the notes and examples for clarification.
+
+    Parameters
+    ----------
+    subscripts : str
+        Specifies the subscripts for summation as comma separated list of
+        subscript labels. An implicit (classical Einstein summation)
+        calculation is performed unless the explicit indicator '->' is
+        included as well as subscript labels of the precise output form.
+    operands : list of array_like
+        These are the arrays for the operation.
+    out : ndarray, optional
+        If provided, the calculation is done into this array.
+    dtype : {data-type, None}, optional
+        If provided, forces the calculation to use the data type specified.
+        Note that you may have to also give a more liberal `casting`
+        parameter to allow the conversions. Default is None.
+    order : {'C', 'F', 'A', 'K'}, optional
+        Controls the memory layout of the output. 'C' means it should
+        be C contiguous. 'F' means it should be Fortran contiguous,
+        'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+        'K' means it should be as close to the layout of the inputs as
+        is possible, including arbitrarily permuted axes.
+        Default is 'K'.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur.  Setting this to
+        'unsafe' is not recommended, as it can adversely affect accumulations.
+
+          * 'no' means the data types should not be cast at all.
+          * 'equiv' means only byte-order changes are allowed.
+          * 'safe' means only casts which can preserve values are allowed.
+          * 'same_kind' means only safe casts or casts within a kind,
+            like float64 to float32, are allowed.
+          * 'unsafe' means any data conversions may be done.
+
+        Default is 'safe'.
+    optimize : {False, True, 'greedy', 'optimal'}, optional
+        Controls if intermediate optimization should occur. No optimization
+        will occur if False and True will default to the 'greedy' algorithm.
+        Also accepts an explicit contraction list from the ``np.einsum_path``
+        function. See ``np.einsum_path`` for more details. Defaults to False.
+
+    Returns
+    -------
+    output : ndarray
+        The calculation based on the Einstein summation convention.
+
+    See Also
+    --------
+    einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+
+    Notes
+    -----
+    .. versionadded:: 1.6.0
+
+    The Einstein summation convention can be used to compute
+    many multi-dimensional, linear algebraic array operations. `einsum`
+    provides a succinct way of representing these.
+
+    A non-exhaustive list of these operations,
+    which can be computed by `einsum`, is shown below along with examples:
+
+    * Trace of an array, :py:func:`numpy.trace`.
+    * Return a diagonal, :py:func:`numpy.diag`.
+    * Array axis summations, :py:func:`numpy.sum`.
+    * Transpositions and permutations, :py:func:`numpy.transpose`.
+    * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+    * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+    * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+    * Tensor contractions, :py:func:`numpy.tensordot`.
+    * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
+
+    The subscripts string is a comma-separated list of subscript labels,
+    where each label refers to a dimension of the corresponding operand.
+    Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+    is equivalent to :py:func:`np.inner(a,b) `. If a label
+    appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+    view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+    describes traditional matrix multiplication and is equivalent to
+    :py:func:`np.matmul(a,b) `. Repeated subscript labels in one
+    operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+    to :py:func:`np.trace(a) `.
+
+    In *implicit mode*, the chosen subscripts are important
+    since the axes of the output are reordered alphabetically.  This
+    means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+    ``np.einsum('ji', a)`` takes its transpose. Additionally,
+    ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+    ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+    multiplication since subscript 'h' precedes subscript 'i'.
+
+    In *explicit mode* the output can be directly controlled by
+    specifying output subscript labels.  This requires the
+    identifier '->' as well as the list of output subscript labels.
+    This feature increases the flexibility of the function since
+    summing can be disabled or forced when required. The call
+    ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `,
+    and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `.
+    The difference is that `einsum` does not allow broadcasting by default.
+    Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+    order of the output subscript labels and therefore returns matrix
+    multiplication, unlike the example above in implicit mode.
+
+    To enable and control broadcasting, use an ellipsis.  Default
+    NumPy-style broadcasting is done by adding an ellipsis
+    to the left of each term, like ``np.einsum('...ii->...i', a)``.
+    To take the trace along the first and last axes,
+    you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+    product with the left-most indices instead of rightmost, one can do
+    ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+    When there is only one operand, no axes are summed, and no output
+    parameter is provided, a view into the operand is returned instead
+    of a new array.  Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+    produces a view (changed in version 1.10.0).
+
+    `einsum` also provides an alternative way to provide the subscripts
+    and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+    If the output shape is not provided in this format `einsum` will be
+    calculated in implicit mode, otherwise it will be performed explicitly.
+    The examples below have corresponding `einsum` calls with the two
+    parameter methods.
+
+    .. versionadded:: 1.10.0
+
+    Views returned from einsum are now writeable whenever the input array
+    is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+    have the same effect as :py:func:`np.swapaxes(a, 0, 2) `
+    and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+    of a 2D array.
+
+    Examples
+    --------
+    >>> a = np.arange(25).reshape(5,5)
+    >>> b = np.arange(5)
+    >>> c = np.arange(6).reshape(2,3)
+
+    Trace of a matrix:
+
+    >>> np.einsum('ii', a)
+    60
+    >>> np.einsum(a, [0,0])
+    60
+    >>> np.trace(a)
+    60
+
+    Extract the diagonal (requires explicit form):
+
+    >>> np.einsum('ii->i', a)
+    array([ 0,  6, 12, 18, 24])
+    >>> np.einsum(a, [0,0], [0])
+    array([ 0,  6, 12, 18, 24])
+    >>> np.diag(a)
+    array([ 0,  6, 12, 18, 24])
+
+    Sum over an axis (requires explicit form):
+
+    >>> np.einsum('ij->i', a)
+    array([ 10,  35,  60,  85, 110])
+    >>> np.einsum(a, [0,1], [0])
+    array([ 10,  35,  60,  85, 110])
+    >>> np.sum(a, axis=1)
+    array([ 10,  35,  60,  85, 110])
+
+    For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+    >>> np.einsum('...j->...', a)
+    array([ 10,  35,  60,  85, 110])
+    >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+    array([ 10,  35,  60,  85, 110])
+
+    Compute a matrix transpose, or reorder any number of axes:
+
+    >>> np.einsum('ji', c)
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+    >>> np.einsum('ij->ji', c)
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+    >>> np.einsum(c, [1,0])
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+    >>> np.transpose(c)
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+
+    Vector inner products:
+
+    >>> np.einsum('i,i', b, b)
+    30
+    >>> np.einsum(b, [0], b, [0])
+    30
+    >>> np.inner(b,b)
+    30
+
+    Matrix vector multiplication:
+
+    >>> np.einsum('ij,j', a, b)
+    array([ 30,  80, 130, 180, 230])
+    >>> np.einsum(a, [0,1], b, [1])
+    array([ 30,  80, 130, 180, 230])
+    >>> np.dot(a, b)
+    array([ 30,  80, 130, 180, 230])
+    >>> np.einsum('...j,j', a, b)
+    array([ 30,  80, 130, 180, 230])
+
+    Broadcasting and scalar multiplication:
+
+    >>> np.einsum('..., ...', 3, c)
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+    >>> np.einsum(',ij', 3, c)
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+    >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+    >>> np.multiply(3, c)
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+
+    Vector outer product:
+
+    >>> np.einsum('i,j', np.arange(2)+1, b)
+    array([[0, 1, 2, 3, 4],
+           [0, 2, 4, 6, 8]])
+    >>> np.einsum(np.arange(2)+1, [0], b, [1])
+    array([[0, 1, 2, 3, 4],
+           [0, 2, 4, 6, 8]])
+    >>> np.outer(np.arange(2)+1, b)
+    array([[0, 1, 2, 3, 4],
+           [0, 2, 4, 6, 8]])
+
+    Tensor contraction:
+
+    >>> a = np.arange(60.).reshape(3,4,5)
+    >>> b = np.arange(24.).reshape(4,3,2)
+    >>> np.einsum('ijk,jil->kl', a, b)
+    array([[ 4400.,  4730.],
+           [ 4532.,  4874.],
+           [ 4664.,  5018.],
+           [ 4796.,  5162.],
+           [ 4928.,  5306.]])
+    >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+    array([[ 4400.,  4730.],
+           [ 4532.,  4874.],
+           [ 4664.,  5018.],
+           [ 4796.,  5162.],
+           [ 4928.,  5306.]])
+    >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+    array([[ 4400.,  4730.],
+           [ 4532.,  4874.],
+           [ 4664.,  5018.],
+           [ 4796.,  5162.],
+           [ 4928.,  5306.]])
+
+    Writeable returned arrays (since version 1.10.0):
+
+    >>> a = np.zeros((3, 3))
+    >>> np.einsum('ii->i', a)[:] = 1
+    >>> a
+    array([[ 1.,  0.,  0.],
+           [ 0.,  1.,  0.],
+           [ 0.,  0.,  1.]])
+
+    Example of ellipsis use:
+
+    >>> a = np.arange(6).reshape((3,2))
+    >>> b = np.arange(12).reshape((4,3))
+    >>> np.einsum('ki,jk->ij', a, b)
+    array([[10, 28, 46, 64],
+           [13, 40, 67, 94]])
+    >>> np.einsum('ki,...k->i...', a, b)
+    array([[10, 28, 46, 64],
+           [13, 40, 67, 94]])
+    >>> np.einsum('k...,jk', a, b)
+    array([[10, 28, 46, 64],
+           [13, 40, 67, 94]])
+
+    """)
+
+
+##############################################################################
+#
+# Documentation for ndarray attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ndarray object
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray',
+    """
+    ndarray(shape, dtype=float, buffer=None, offset=0,
+            strides=None, order=None)
+
+    An array object represents a multidimensional, homogeneous array
+    of fixed-size items.  An associated data-type object describes the
+    format of each element in the array (its byte-order, how many bytes it
+    occupies in memory, whether it is an integer, a floating point number,
+    or something else, etc.)
+
+    Arrays should be constructed using `array`, `zeros` or `empty` (refer
+    to the See Also section below).  The parameters given here refer to
+    a low-level method (`ndarray(...)`) for instantiating an array.
+
+    For more information, refer to the `numpy` module and examine the
+    methods and attributes of an array.
+
+    Parameters
+    ----------
+    (for the __new__ method; see Notes below)
+
+    shape : tuple of ints
+        Shape of created array.
+    dtype : data-type, optional
+        Any object that can be interpreted as a numpy data type.
+    buffer : object exposing buffer interface, optional
+        Used to fill the array with data.
+    offset : int, optional
+        Offset of array data in buffer.
+    strides : tuple of ints, optional
+        Strides of data in memory.
+    order : {'C', 'F'}, optional
+        Row-major (C-style) or column-major (Fortran-style) order.
+
+    Attributes
+    ----------
+    T : ndarray
+        Transpose of the array.
+    data : buffer
+        The array's elements, in memory.
+    dtype : dtype object
+        Describes the format of the elements in the array.
+    flags : dict
+        Dictionary containing information related to memory use, e.g.,
+        'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
+    flat : numpy.flatiter object
+        Flattened version of the array as an iterator.  The iterator
+        allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
+        assignment examples; TODO).
+    imag : ndarray
+        Imaginary part of the array.
+    real : ndarray
+        Real part of the array.
+    size : int
+        Number of elements in the array.
+    itemsize : int
+        The memory use of each array element in bytes.
+    nbytes : int
+        The total number of bytes required to store the array data,
+        i.e., ``itemsize * size``.
+    ndim : int
+        The array's number of dimensions.
+    shape : tuple of ints
+        Shape of the array.
+    strides : tuple of ints
+        The step-size required to move from one element to the next in
+        memory. For example, a contiguous ``(3, 4)`` array of type
+        ``int16`` in C-order has strides ``(8, 2)``.  This implies that
+        to move from element to element in memory requires jumps of 2 bytes.
+        To move from row-to-row, one needs to jump 8 bytes at a time
+        (``2 * 4``).
+    ctypes : ctypes object
+        Class containing properties of the array needed for interaction
+        with ctypes.
+    base : ndarray
+        If the array is a view into another array, that array is its `base`
+        (unless that array is also a view).  The `base` array is where the
+        array data is actually stored.
+
+    See Also
+    --------
+    array : Construct an array.
+    zeros : Create an array, each element of which is zero.
+    empty : Create an array, but leave its allocated memory unchanged (i.e.,
+            it contains "garbage").
+    dtype : Create a data-type.
+    numpy.typing.NDArray : An ndarray alias :term:`generic `
+                           w.r.t. its `dtype.type `.
+
+    Notes
+    -----
+    There are two modes of creating an array using ``__new__``:
+
+    1. If `buffer` is None, then only `shape`, `dtype`, and `order`
+       are used.
+    2. If `buffer` is an object exposing the buffer interface, then
+       all keywords are interpreted.
+
+    No ``__init__`` method is needed because the array is fully initialized
+    after the ``__new__`` method.
+
+    Examples
+    --------
+    These examples illustrate the low-level `ndarray` constructor.  Refer
+    to the `See Also` section above for easier ways of constructing an
+    ndarray.
+
+    First mode, `buffer` is None:
+
+    >>> np.ndarray(shape=(2,2), dtype=float, order='F')
+    array([[0.0e+000, 0.0e+000], # random
+           [     nan, 2.5e-323]])
+
+    Second mode:
+
+    >>> np.ndarray((2,), buffer=np.array([1,2,3]),
+    ...            offset=np.int_().itemsize,
+    ...            dtype=int) # offset = 1*itemsize, i.e. skip first element
+    array([2, 3])
+
+    """)
+
+
+##############################################################################
+#
+# ndarray attributes
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
+    """Array protocol: Python side."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
+    """Array priority."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
+    """Array protocol: C-struct side."""))
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack__',
+    """a.__dlpack__(*, stream=None)
+
+    DLPack Protocol: Part of the Array API."""))
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack_device__',
+    """a.__dlpack_device__()
+
+    DLPack Protocol: Part of the Array API."""))
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
+    """
+    Base object if memory is from some other object.
+
+    Examples
+    --------
+    The base of an array that owns its memory is None:
+
+    >>> x = np.array([1,2,3,4])
+    >>> x.base is None
+    True
+
+    Slicing creates a view, whose memory is shared with x:
+
+    >>> y = x[2:]
+    >>> y.base is x
+    True
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
+    """
+    An object to simplify the interaction of the array with the ctypes
+    module.
+
+    This attribute creates an object that makes it easier to use arrays
+    when calling shared libraries with the ctypes module. The returned
+    object has, among others, data, shape, and strides attributes (see
+    Notes below) which themselves return ctypes objects that can be used
+    as arguments to a shared library.
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    c : Python object
+        Possessing attributes data, shape, strides, etc.
+
+    See Also
+    --------
+    numpy.ctypeslib
+
+    Notes
+    -----
+    Below are the public attributes of this object which were documented
+    in "Guide to NumPy" (we have omitted undocumented public attributes,
+    as well as documented private attributes):
+
+    .. autoattribute:: numpy.core._internal._ctypes.data
+        :noindex:
+
+    .. autoattribute:: numpy.core._internal._ctypes.shape
+        :noindex:
+
+    .. autoattribute:: numpy.core._internal._ctypes.strides
+        :noindex:
+
+    .. automethod:: numpy.core._internal._ctypes.data_as
+        :noindex:
+
+    .. automethod:: numpy.core._internal._ctypes.shape_as
+        :noindex:
+
+    .. automethod:: numpy.core._internal._ctypes.strides_as
+        :noindex:
+
+    If the ctypes module is not available, then the ctypes attribute
+    of array objects still returns something useful, but ctypes objects
+    are not returned and errors may be raised instead. In particular,
+    the object will still have the ``as_parameter`` attribute which will
+    return an integer equal to the data attribute.
+
+    Examples
+    --------
+    >>> import ctypes
+    >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
+    >>> x
+    array([[0, 1],
+           [2, 3]], dtype=int32)
+    >>> x.ctypes.data
+    31962608 # may vary
+    >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
+    <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
+    >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
+    c_uint(0)
+    >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
+    c_ulong(4294967296)
+    >>> x.ctypes.shape
+     # may vary
+    >>> x.ctypes.strides
+     # may vary
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
+    """Python buffer object pointing to the start of the array's data."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
+    """
+    Data-type of the array's elements.
+
+    .. warning::
+
+        Setting ``arr.dtype`` is discouraged and may be deprecated in the
+        future.  Setting will replace the ``dtype`` without modifying the
+        memory (see also `ndarray.view` and `ndarray.astype`).
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    d : numpy dtype object
+
+    See Also
+    --------
+    ndarray.astype : Cast the values contained in the array to a new data-type.
+    ndarray.view : Create a view of the same data but a different data-type.
+    numpy.dtype
+
+    Examples
+    --------
+    >>> x
+    array([[0, 1],
+           [2, 3]])
+    >>> x.dtype
+    dtype('int32')
+    >>> type(x.dtype)
+    
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
+    """
+    The imaginary part of the array.
+
+    Examples
+    --------
+    >>> x = np.sqrt([1+0j, 0+1j])
+    >>> x.imag
+    array([ 0.        ,  0.70710678])
+    >>> x.imag.dtype
+    dtype('float64')
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
+    """
+    Length of one array element in bytes.
+
+    Examples
+    --------
+    >>> x = np.array([1,2,3], dtype=np.float64)
+    >>> x.itemsize
+    8
+    >>> x = np.array([1,2,3], dtype=np.complex128)
+    >>> x.itemsize
+    16
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
+    """
+    Information about the memory layout of the array.
+
+    Attributes
+    ----------
+    C_CONTIGUOUS (C)
+        The data is in a single, C-style contiguous segment.
+    F_CONTIGUOUS (F)
+        The data is in a single, Fortran-style contiguous segment.
+    OWNDATA (O)
+        The array owns the memory it uses or borrows it from another object.
+    WRITEABLE (W)
+        The data area can be written to.  Setting this to False locks
+        the data, making it read-only.  A view (slice, etc.) inherits WRITEABLE
+        from its base array at creation time, but a view of a writeable
+        array may be subsequently locked while the base array remains writeable.
+        (The opposite is not true, in that a view of a locked array may not
+        be made writeable.  However, currently, locking a base object does not
+        lock any views that already reference it, so under that circumstance it
+        is possible to alter the contents of a locked array via a previously
+        created writeable view onto it.)  Attempting to change a non-writeable
+        array raises a RuntimeError exception.
+    ALIGNED (A)
+        The data and all elements are aligned appropriately for the hardware.
+    WRITEBACKIFCOPY (X)
+        This array is a copy of some other array. The C-API function
+        PyArray_ResolveWritebackIfCopy must be called before deallocating
+        to the base array will be updated with the contents of this array.
+    FNC
+        F_CONTIGUOUS and not C_CONTIGUOUS.
+    FORC
+        F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
+    BEHAVED (B)
+        ALIGNED and WRITEABLE.
+    CARRAY (CA)
+        BEHAVED and C_CONTIGUOUS.
+    FARRAY (FA)
+        BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
+
+    Notes
+    -----
+    The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
+    or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
+    names are only supported in dictionary access.
+
+    Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be
+    changed by the user, via direct assignment to the attribute or dictionary
+    entry, or by calling `ndarray.setflags`.
+
+    The array flags cannot be set arbitrarily:
+
+    - WRITEBACKIFCOPY can only be set ``False``.
+    - ALIGNED can only be set ``True`` if the data is truly aligned.
+    - WRITEABLE can only be set ``True`` if the array owns its own memory
+      or the ultimate owner of the memory exposes a writeable buffer
+      interface or is a string.
+
+    Arrays can be both C-style and Fortran-style contiguous simultaneously.
+    This is clear for 1-dimensional arrays, but can also be true for higher
+    dimensional arrays.
+
+    Even for contiguous arrays a stride for a given dimension
+    ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
+    or the array has no elements.
+    It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+    for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+    Fortran-style contiguous arrays is true.
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
+    """
+    A 1-D iterator over the array.
+
+    This is a `numpy.flatiter` instance, which acts similarly to, but is not
+    a subclass of, Python's built-in iterator object.
+
+    See Also
+    --------
+    flatten : Return a copy of the array collapsed into one dimension.
+
+    flatiter
+
+    Examples
+    --------
+    >>> x = np.arange(1, 7).reshape(2, 3)
+    >>> x
+    array([[1, 2, 3],
+           [4, 5, 6]])
+    >>> x.flat[3]
+    4
+    >>> x.T
+    array([[1, 4],
+           [2, 5],
+           [3, 6]])
+    >>> x.T.flat[3]
+    5
+    >>> type(x.flat)
+    
+
+    An assignment example:
+
+    >>> x.flat = 3; x
+    array([[3, 3, 3],
+           [3, 3, 3]])
+    >>> x.flat[[1,4]] = 1; x
+    array([[3, 1, 3],
+           [3, 1, 3]])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
+    """
+    Total bytes consumed by the elements of the array.
+
+    Notes
+    -----
+    Does not include memory consumed by non-element attributes of the
+    array object.
+
+    See Also
+    --------
+    sys.getsizeof
+        Memory consumed by the object itself without parents in case view.
+        This does include memory consumed by non-element attributes.
+
+    Examples
+    --------
+    >>> x = np.zeros((3,5,2), dtype=np.complex128)
+    >>> x.nbytes
+    480
+    >>> np.prod(x.shape) * x.itemsize
+    480
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
+    """
+    Number of array dimensions.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> x.ndim
+    1
+    >>> y = np.zeros((2, 3, 4))
+    >>> y.ndim
+    3
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
+    """
+    The real part of the array.
+
+    Examples
+    --------
+    >>> x = np.sqrt([1+0j, 0+1j])
+    >>> x.real
+    array([ 1.        ,  0.70710678])
+    >>> x.real.dtype
+    dtype('float64')
+
+    See Also
+    --------
+    numpy.real : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
+    """
+    Tuple of array dimensions.
+
+    The shape property is usually used to get the current shape of an array,
+    but may also be used to reshape the array in-place by assigning a tuple of
+    array dimensions to it.  As with `numpy.reshape`, one of the new shape
+    dimensions can be -1, in which case its value is inferred from the size of
+    the array and the remaining dimensions. Reshaping an array in-place will
+    fail if a copy is required.
+
+    .. warning::
+
+        Setting ``arr.shape`` is discouraged and may be deprecated in the
+        future.  Using `ndarray.reshape` is the preferred approach.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3, 4])
+    >>> x.shape
+    (4,)
+    >>> y = np.zeros((2, 3, 4))
+    >>> y.shape
+    (2, 3, 4)
+    >>> y.shape = (3, 8)
+    >>> y
+    array([[ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
+           [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.]])
+    >>> y.shape = (3, 6)
+    Traceback (most recent call last):
+      File "", line 1, in 
+    ValueError: total size of new array must be unchanged
+    >>> np.zeros((4,2))[::2].shape = (-1,)
+    Traceback (most recent call last):
+      File "", line 1, in 
+    AttributeError: Incompatible shape for in-place modification. Use
+    `.reshape()` to make a copy with the desired shape.
+
+    See Also
+    --------
+    numpy.shape : Equivalent getter function.
+    numpy.reshape : Function similar to setting ``shape``.
+    ndarray.reshape : Method similar to setting ``shape``.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
+    """
+    Number of elements in the array.
+
+    Equal to ``np.prod(a.shape)``, i.e., the product of the array's
+    dimensions.
+
+    Notes
+    -----
+    `a.size` returns a standard arbitrary precision Python integer. This
+    may not be the case with other methods of obtaining the same value
+    (like the suggested ``np.prod(a.shape)``, which returns an instance
+    of ``np.int_``), and may be relevant if the value is used further in
+    calculations that may overflow a fixed size integer type.
+
+    Examples
+    --------
+    >>> x = np.zeros((3, 5, 2), dtype=np.complex128)
+    >>> x.size
+    30
+    >>> np.prod(x.shape)
+    30
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
+    """
+    Tuple of bytes to step in each dimension when traversing an array.
+
+    The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
+    is::
+
+        offset = sum(np.array(i) * a.strides)
+
+    A more detailed explanation of strides can be found in the
+    "ndarray.rst" file in the NumPy reference guide.
+
+    .. warning::
+
+        Setting ``arr.strides`` is discouraged and may be deprecated in the
+        future.  `numpy.lib.stride_tricks.as_strided` should be preferred
+        to create a new view of the same data in a safer way.
+
+    Notes
+    -----
+    Imagine an array of 32-bit integers (each 4 bytes)::
+
+      x = np.array([[0, 1, 2, 3, 4],
+                    [5, 6, 7, 8, 9]], dtype=np.int32)
+
+    This array is stored in memory as 40 bytes, one after the other
+    (known as a contiguous block of memory).  The strides of an array tell
+    us how many bytes we have to skip in memory to move to the next position
+    along a certain axis.  For example, we have to skip 4 bytes (1 value) to
+    move to the next column, but 20 bytes (5 values) to get to the same
+    position in the next row.  As such, the strides for the array `x` will be
+    ``(20, 4)``.
+
+    See Also
+    --------
+    numpy.lib.stride_tricks.as_strided
+
+    Examples
+    --------
+    >>> y = np.reshape(np.arange(2*3*4), (2,3,4))
+    >>> y
+    array([[[ 0,  1,  2,  3],
+            [ 4,  5,  6,  7],
+            [ 8,  9, 10, 11]],
+           [[12, 13, 14, 15],
+            [16, 17, 18, 19],
+            [20, 21, 22, 23]]])
+    >>> y.strides
+    (48, 16, 4)
+    >>> y[1,1,1]
+    17
+    >>> offset=sum(y.strides * np.array((1,1,1)))
+    >>> offset/y.itemsize
+    17
+
+    >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
+    >>> x.strides
+    (32, 4, 224, 1344)
+    >>> i = np.array([3,5,2,2])
+    >>> offset = sum(i * x.strides)
+    >>> x[3,5,2,2]
+    813
+    >>> offset / x.itemsize
+    813
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
+    """
+    View of the transposed array.
+
+    Same as ``self.transpose()``.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> a
+    array([[1, 2],
+           [3, 4]])
+    >>> a.T
+    array([[1, 3],
+           [2, 4]])
+
+    >>> a = np.array([1, 2, 3, 4])
+    >>> a
+    array([1, 2, 3, 4])
+    >>> a.T
+    array([1, 2, 3, 4])
+
+    See Also
+    --------
+    transpose
+
+    """))
+
+
+##############################################################################
+#
+# ndarray methods
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
+    """ a.__array__([dtype], /)
+
+    Returns either a new reference to self if dtype is not given or a new array
+    of provided data type if dtype is different from the current dtype of the
+    array.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
+    """a.__array_finalize__(obj, /)
+
+    Present so subclasses can call super. Does nothing.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
+    """a.__array_prepare__(array[, context], /)
+
+    Returns a view of `array` with the same type as self.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
+    """a.__array_wrap__(array[, context], /)
+
+    Returns a view of `array` with the same type as self.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
+    """a.__copy__()
+
+    Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
+
+    Equivalent to ``a.copy(order='K')``.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__',
+    """a.__class_getitem__(item, /)
+
+    Return a parametrized wrapper around the `~numpy.ndarray` type.
+
+    .. versionadded:: 1.22
+
+    Returns
+    -------
+    alias : types.GenericAlias
+        A parametrized `~numpy.ndarray` type.
+
+    Examples
+    --------
+    >>> from typing import Any
+    >>> import numpy as np
+
+    >>> np.ndarray[Any, np.dtype[Any]]
+    numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]
+
+    See Also
+    --------
+    :pep:`585` : Type hinting generics in standard collections.
+    numpy.typing.NDArray : An ndarray alias :term:`generic `
+                        w.r.t. its `dtype.type `.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
+    """a.__deepcopy__(memo, /)
+
+    Used if :func:`copy.deepcopy` is called on an array.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
+    """a.__reduce__()
+
+    For pickling.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
+    """a.__setstate__(state, /)
+
+    For unpickling.
+
+    The `state` argument must be a sequence that contains the following
+    elements:
+
+    Parameters
+    ----------
+    version : int
+        optional pickle version. If omitted defaults to 0.
+    shape : tuple
+    dtype : data-type
+    isFortran : bool
+    rawdata : string or list
+        a binary string with the data (or a list if 'a' is an object array)
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
+    """
+    a.all(axis=None, out=None, keepdims=False, *, where=True)
+
+    Returns True if all elements evaluate to True.
+
+    Refer to `numpy.all` for full documentation.
+
+    See Also
+    --------
+    numpy.all : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
+    """
+    a.any(axis=None, out=None, keepdims=False, *, where=True)
+
+    Returns True if any of the elements of `a` evaluate to True.
+
+    Refer to `numpy.any` for full documentation.
+
+    See Also
+    --------
+    numpy.any : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
+    """
+    a.argmax(axis=None, out=None, *, keepdims=False)
+
+    Return indices of the maximum values along the given axis.
+
+    Refer to `numpy.argmax` for full documentation.
+
+    See Also
+    --------
+    numpy.argmax : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
+    """
+    a.argmin(axis=None, out=None, *, keepdims=False)
+
+    Return indices of the minimum values along the given axis.
+
+    Refer to `numpy.argmin` for detailed documentation.
+
+    See Also
+    --------
+    numpy.argmin : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
+    """
+    a.argsort(axis=-1, kind=None, order=None)
+
+    Returns the indices that would sort this array.
+
+    Refer to `numpy.argsort` for full documentation.
+
+    See Also
+    --------
+    numpy.argsort : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
+    """
+    a.argpartition(kth, axis=-1, kind='introselect', order=None)
+
+    Returns the indices that would partition this array.
+
+    Refer to `numpy.argpartition` for full documentation.
+
+    .. versionadded:: 1.8.0
+
+    See Also
+    --------
+    numpy.argpartition : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
+    """
+    a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
+
+    Copy of the array, cast to a specified type.
+
+    Parameters
+    ----------
+    dtype : str or dtype
+        Typecode or data-type to which the array is cast.
+    order : {'C', 'F', 'A', 'K'}, optional
+        Controls the memory layout order of the result.
+        'C' means C order, 'F' means Fortran order, 'A'
+        means 'F' order if all the arrays are Fortran contiguous,
+        'C' order otherwise, and 'K' means as close to the
+        order the array elements appear in memory as possible.
+        Default is 'K'.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur. Defaults to 'unsafe'
+        for backwards compatibility.
+
+          * 'no' means the data types should not be cast at all.
+          * 'equiv' means only byte-order changes are allowed.
+          * 'safe' means only casts which can preserve values are allowed.
+          * 'same_kind' means only safe casts or casts within a kind,
+            like float64 to float32, are allowed.
+          * 'unsafe' means any data conversions may be done.
+    subok : bool, optional
+        If True, then sub-classes will be passed-through (default), otherwise
+        the returned array will be forced to be a base-class array.
+    copy : bool, optional
+        By default, astype always returns a newly allocated array. If this
+        is set to false, and the `dtype`, `order`, and `subok`
+        requirements are satisfied, the input array is returned instead
+        of a copy.
+
+    Returns
+    -------
+    arr_t : ndarray
+        Unless `copy` is False and the other conditions for returning the input
+        array are satisfied (see description for `copy` input parameter), `arr_t`
+        is a new array of the same shape as the input array, with dtype, order
+        given by `dtype`, `order`.
+
+    Notes
+    -----
+    .. versionchanged:: 1.17.0
+       Casting between a simple data type and a structured one is possible only
+       for "unsafe" casting.  Casting to multiple fields is allowed, but
+       casting from multiple fields is not.
+
+    .. versionchanged:: 1.9.0
+       Casting from numeric to string types in 'safe' casting mode requires
+       that the string dtype length is long enough to store the max
+       integer/float value converted.
+
+    Raises
+    ------
+    ComplexWarning
+        When casting from complex to float or int. To avoid this,
+        one should use ``a.real.astype(t)``.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 2.5])
+    >>> x
+    array([1. ,  2. ,  2.5])
+
+    >>> x.astype(int)
+    array([1, 2, 2])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
+    """
+    a.byteswap(inplace=False)
+
+    Swap the bytes of the array elements
+
+    Toggle between low-endian and big-endian data representation by
+    returning a byteswapped array, optionally swapped in-place.
+    Arrays of byte-strings are not swapped. The real and imaginary
+    parts of a complex number are swapped individually.
+
+    Parameters
+    ----------
+    inplace : bool, optional
+        If ``True``, swap bytes in-place, default is ``False``.
+
+    Returns
+    -------
+    out : ndarray
+        The byteswapped array. If `inplace` is ``True``, this is
+        a view to self.
+
+    Examples
+    --------
+    >>> A = np.array([1, 256, 8755], dtype=np.int16)
+    >>> list(map(hex, A))
+    ['0x1', '0x100', '0x2233']
+    >>> A.byteswap(inplace=True)
+    array([  256,     1, 13090], dtype=int16)
+    >>> list(map(hex, A))
+    ['0x100', '0x1', '0x3322']
+
+    Arrays of byte-strings are not swapped
+
+    >>> A = np.array([b'ceg', b'fac'])
+    >>> A.byteswap()
+    array([b'ceg', b'fac'], dtype='|S3')
+
+    ``A.newbyteorder().byteswap()`` produces an array with the same values
+      but different representation in memory
+
+    >>> A = np.array([1, 2, 3])
+    >>> A.view(np.uint8)
+    array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
+           0, 0], dtype=uint8)
+    >>> A.newbyteorder().byteswap(inplace=True)
+    array([1, 2, 3])
+    >>> A.view(np.uint8)
+    array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
+           0, 3], dtype=uint8)
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
+    """
+    a.choose(choices, out=None, mode='raise')
+
+    Use an index array to construct a new array from a set of choices.
+
+    Refer to `numpy.choose` for full documentation.
+
+    See Also
+    --------
+    numpy.choose : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
+    """
+    a.clip(min=None, max=None, out=None, **kwargs)
+
+    Return an array whose values are limited to ``[min, max]``.
+    One of max or min must be given.
+
+    Refer to `numpy.clip` for full documentation.
+
+    See Also
+    --------
+    numpy.clip : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
+    """
+    a.compress(condition, axis=None, out=None)
+
+    Return selected slices of this array along given axis.
+
+    Refer to `numpy.compress` for full documentation.
+
+    See Also
+    --------
+    numpy.compress : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
+    """
+    a.conj()
+
+    Complex-conjugate all elements.
+
+    Refer to `numpy.conjugate` for full documentation.
+
+    See Also
+    --------
+    numpy.conjugate : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
+    """
+    a.conjugate()
+
+    Return the complex conjugate, element-wise.
+
+    Refer to `numpy.conjugate` for full documentation.
+
+    See Also
+    --------
+    numpy.conjugate : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
+    """
+    a.copy(order='C')
+
+    Return a copy of the array.
+
+    Parameters
+    ----------
+    order : {'C', 'F', 'A', 'K'}, optional
+        Controls the memory layout of the copy. 'C' means C-order,
+        'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+        'C' otherwise. 'K' means match the layout of `a` as closely
+        as possible. (Note that this function and :func:`numpy.copy` are very
+        similar but have different default values for their order=
+        arguments, and this function always passes sub-classes through.)
+
+    See also
+    --------
+    numpy.copy : Similar function with different default behavior
+    numpy.copyto
+
+    Notes
+    -----
+    This function is the preferred method for creating an array copy.  The
+    function :func:`numpy.copy` is similar, but it defaults to using order 'K',
+    and will not pass sub-classes through by default.
+
+    Examples
+    --------
+    >>> x = np.array([[1,2,3],[4,5,6]], order='F')
+
+    >>> y = x.copy()
+
+    >>> x.fill(0)
+
+    >>> x
+    array([[0, 0, 0],
+           [0, 0, 0]])
+
+    >>> y
+    array([[1, 2, 3],
+           [4, 5, 6]])
+
+    >>> y.flags['C_CONTIGUOUS']
+    True
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
+    """
+    a.cumprod(axis=None, dtype=None, out=None)
+
+    Return the cumulative product of the elements along the given axis.
+
+    Refer to `numpy.cumprod` for full documentation.
+
+    See Also
+    --------
+    numpy.cumprod : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
+    """
+    a.cumsum(axis=None, dtype=None, out=None)
+
+    Return the cumulative sum of the elements along the given axis.
+
+    Refer to `numpy.cumsum` for full documentation.
+
+    See Also
+    --------
+    numpy.cumsum : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
+    """
+    a.diagonal(offset=0, axis1=0, axis2=1)
+
+    Return specified diagonals. In NumPy 1.9 the returned array is a
+    read-only view instead of a copy as in previous NumPy versions.  In
+    a future version the read-only restriction will be removed.
+
+    Refer to :func:`numpy.diagonal` for full documentation.
+
+    See Also
+    --------
+    numpy.diagonal : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dot'))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
+    """a.dump(file)
+
+    Dump a pickle of the array to the specified file.
+    The array can be read back with pickle.load or numpy.load.
+
+    Parameters
+    ----------
+    file : str or Path
+        A string naming the dump file.
+
+        .. versionchanged:: 1.17.0
+            `pathlib.Path` objects are now accepted.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
+    """
+    a.dumps()
+
+    Returns the pickle of the array as a string.
+    pickle.loads will convert the string back to an array.
+
+    Parameters
+    ----------
+    None
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
+    """
+    a.fill(value)
+
+    Fill the array with a scalar value.
+
+    Parameters
+    ----------
+    value : scalar
+        All elements of `a` will be assigned this value.
+
+    Examples
+    --------
+    >>> a = np.array([1, 2])
+    >>> a.fill(0)
+    >>> a
+    array([0, 0])
+    >>> a = np.empty(2)
+    >>> a.fill(1)
+    >>> a
+    array([1.,  1.])
+
+    Fill expects a scalar value and always behaves the same as assigning
+    to a single array element.  The following is a rare example where this
+    distinction is important:
+
+    >>> a = np.array([None, None], dtype=object)
+    >>> a[0] = np.array(3)
+    >>> a
+    array([array(3), None], dtype=object)
+    >>> a.fill(np.array(3))
+    >>> a
+    array([array(3), array(3)], dtype=object)
+
+    Where other forms of assignments will unpack the array being assigned:
+
+    >>> a[...] = np.array(3)
+    >>> a
+    array([3, 3], dtype=object)
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
+    """
+    a.flatten(order='C')
+
+    Return a copy of the array collapsed into one dimension.
+
+    Parameters
+    ----------
+    order : {'C', 'F', 'A', 'K'}, optional
+        'C' means to flatten in row-major (C-style) order.
+        'F' means to flatten in column-major (Fortran-
+        style) order. 'A' means to flatten in column-major
+        order if `a` is Fortran *contiguous* in memory,
+        row-major order otherwise. 'K' means to flatten
+        `a` in the order the elements occur in memory.
+        The default is 'C'.
+
+    Returns
+    -------
+    y : ndarray
+        A copy of the input array, flattened to one dimension.
+
+    See Also
+    --------
+    ravel : Return a flattened array.
+    flat : A 1-D flat iterator over the array.
+
+    Examples
+    --------
+    >>> a = np.array([[1,2], [3,4]])
+    >>> a.flatten()
+    array([1, 2, 3, 4])
+    >>> a.flatten('F')
+    array([1, 3, 2, 4])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
+    """
+    a.getfield(dtype, offset=0)
+
+    Returns a field of the given array as a certain type.
+
+    A field is a view of the array data with a given data-type. The values in
+    the view are determined by the given type and the offset into the current
+    array in bytes. The offset needs to be such that the view dtype fits in the
+    array dtype; for example an array of dtype complex128 has 16-byte elements.
+    If taking a view with a 32-bit integer (4 bytes), the offset needs to be
+    between 0 and 12 bytes.
+
+    Parameters
+    ----------
+    dtype : str or dtype
+        The data type of the view. The dtype size of the view can not be larger
+        than that of the array itself.
+    offset : int
+        Number of bytes to skip before beginning the element view.
+
+    Examples
+    --------
+    >>> x = np.diag([1.+1.j]*2)
+    >>> x[1, 1] = 2 + 4.j
+    >>> x
+    array([[1.+1.j,  0.+0.j],
+           [0.+0.j,  2.+4.j]])
+    >>> x.getfield(np.float64)
+    array([[1.,  0.],
+           [0.,  2.]])
+
+    By choosing an offset of 8 bytes we can select the complex part of the
+    array for our view:
+
+    >>> x.getfield(np.float64, offset=8)
+    array([[1.,  0.],
+           [0.,  4.]])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
+    """
+    a.item(*args)
+
+    Copy an element of an array to a standard Python scalar and return it.
+
+    Parameters
+    ----------
+    \\*args : Arguments (variable number and type)
+
+        * none: in this case, the method only works for arrays
+          with one element (`a.size == 1`), which element is
+          copied into a standard Python scalar object and returned.
+
+        * int_type: this argument is interpreted as a flat index into
+          the array, specifying which element to copy and return.
+
+        * tuple of int_types: functions as does a single int_type argument,
+          except that the argument is interpreted as an nd-index into the
+          array.
+
+    Returns
+    -------
+    z : Standard Python scalar object
+        A copy of the specified element of the array as a suitable
+        Python scalar
+
+    Notes
+    -----
+    When the data type of `a` is longdouble or clongdouble, item() returns
+    a scalar array object because there is no available Python scalar that
+    would not lose information. Void arrays return a buffer object for item(),
+    unless fields are defined, in which case a tuple is returned.
+
+    `item` is very similar to a[args], except, instead of an array scalar,
+    a standard Python scalar is returned. This can be useful for speeding up
+    access to elements of the array and doing arithmetic on elements of the
+    array using Python's optimized math.
+
+    Examples
+    --------
+    >>> np.random.seed(123)
+    >>> x = np.random.randint(9, size=(3, 3))
+    >>> x
+    array([[2, 2, 6],
+           [1, 3, 6],
+           [1, 0, 1]])
+    >>> x.item(3)
+    1
+    >>> x.item(7)
+    0
+    >>> x.item((0, 1))
+    2
+    >>> x.item((2, 2))
+    1
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
+    """
+    a.itemset(*args)
+
+    Insert scalar into an array (scalar is cast to array's dtype, if possible)
+
+    There must be at least 1 argument, and define the last argument
+    as *item*.  Then, ``a.itemset(*args)`` is equivalent to but faster
+    than ``a[args] = item``.  The item should be a scalar value and `args`
+    must select a single item in the array `a`.
+
+    Parameters
+    ----------
+    \\*args : Arguments
+        If one argument: a scalar, only used in case `a` is of size 1.
+        If two arguments: the last argument is the value to be set
+        and must be a scalar, the first argument specifies a single array
+        element location. It is either an int or a tuple.
+
+    Notes
+    -----
+    Compared to indexing syntax, `itemset` provides some speed increase
+    for placing a scalar into a particular location in an `ndarray`,
+    if you must do this.  However, generally this is discouraged:
+    among other problems, it complicates the appearance of the code.
+    Also, when using `itemset` (and `item`) inside a loop, be sure
+    to assign the methods to a local variable to avoid the attribute
+    look-up at each loop iteration.
+
+    Examples
+    --------
+    >>> np.random.seed(123)
+    >>> x = np.random.randint(9, size=(3, 3))
+    >>> x
+    array([[2, 2, 6],
+           [1, 3, 6],
+           [1, 0, 1]])
+    >>> x.itemset(4, 0)
+    >>> x.itemset((2, 2), 9)
+    >>> x
+    array([[2, 2, 6],
+           [1, 0, 6],
+           [1, 0, 9]])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
+    """
+    a.max(axis=None, out=None, keepdims=False, initial=, where=True)
+
+    Return the maximum along a given axis.
+
+    Refer to `numpy.amax` for full documentation.
+
+    See Also
+    --------
+    numpy.amax : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
+    """
+    a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True)
+
+    Returns the average of the array elements along given axis.
+
+    Refer to `numpy.mean` for full documentation.
+
+    See Also
+    --------
+    numpy.mean : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
+    """
+    a.min(axis=None, out=None, keepdims=False, initial=, where=True)
+
+    Return the minimum along a given axis.
+
+    Refer to `numpy.amin` for full documentation.
+
+    See Also
+    --------
+    numpy.amin : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
+    """
+    arr.newbyteorder(new_order='S', /)
+
+    Return the array with the same data viewed with a different byte order.
+
+    Equivalent to::
+
+        arr.view(arr.dtype.newbytorder(new_order))
+
+    Changes are also made in all fields and sub-arrays of the array data
+    type.
+
+
+
+    Parameters
+    ----------
+    new_order : string, optional
+        Byte order to force; a value from the byte order specifications
+        below. `new_order` codes can be any of:
+
+        * 'S' - swap dtype from current to opposite endian
+        * {'<', 'little'} - little endian
+        * {'>', 'big'} - big endian
+        * {'=', 'native'} - native order, equivalent to `sys.byteorder`
+        * {'|', 'I'} - ignore (no change to byte order)
+
+        The default value ('S') results in swapping the current
+        byte order.
+
+
+    Returns
+    -------
+    new_arr : array
+        New array object with the dtype reflecting given change to the
+        byte order.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
+    """
+    a.nonzero()
+
+    Return the indices of the elements that are non-zero.
+
+    Refer to `numpy.nonzero` for full documentation.
+
+    See Also
+    --------
+    numpy.nonzero : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
+    """
+    a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
+
+    Return the product of the array elements over the given axis
+
+    Refer to `numpy.prod` for full documentation.
+
+    See Also
+    --------
+    numpy.prod : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
+    """
+    a.ptp(axis=None, out=None, keepdims=False)
+
+    Peak to peak (maximum - minimum) value along a given axis.
+
+    Refer to `numpy.ptp` for full documentation.
+
+    See Also
+    --------
+    numpy.ptp : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
+    """
+    a.put(indices, values, mode='raise')
+
+    Set ``a.flat[n] = values[n]`` for all `n` in indices.
+
+    Refer to `numpy.put` for full documentation.
+
+    See Also
+    --------
+    numpy.put : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
+    """
+    a.ravel([order])
+
+    Return a flattened array.
+
+    Refer to `numpy.ravel` for full documentation.
+
+    See Also
+    --------
+    numpy.ravel : equivalent function
+
+    ndarray.flat : a flat iterator on the array.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
+    """
+    a.repeat(repeats, axis=None)
+
+    Repeat elements of an array.
+
+    Refer to `numpy.repeat` for full documentation.
+
+    See Also
+    --------
+    numpy.repeat : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
+    """
+    a.reshape(shape, order='C')
+
+    Returns an array containing the same data with a new shape.
+
+    Refer to `numpy.reshape` for full documentation.
+
+    See Also
+    --------
+    numpy.reshape : equivalent function
+
+    Notes
+    -----
+    Unlike the free function `numpy.reshape`, this method on `ndarray` allows
+    the elements of the shape parameter to be passed in as separate arguments.
+    For example, ``a.reshape(10, 11)`` is equivalent to
+    ``a.reshape((10, 11))``.
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
+    """
+    a.resize(new_shape, refcheck=True)
+
+    Change shape and size of array in-place.
+
+    Parameters
+    ----------
+    new_shape : tuple of ints, or `n` ints
+        Shape of resized array.
+    refcheck : bool, optional
+        If False, reference count will not be checked. Default is True.
+
+    Returns
+    -------
+    None
+
+    Raises
+    ------
+    ValueError
+        If `a` does not own its own data or references or views to it exist,
+        and the data memory must be changed.
+        PyPy only: will always raise if the data memory must be changed, since
+        there is no reliable way to determine if references or views to it
+        exist.
+
+    SystemError
+        If the `order` keyword argument is specified. This behaviour is a
+        bug in NumPy.
+
+    See Also
+    --------
+    resize : Return a new array with the specified shape.
+
+    Notes
+    -----
+    This reallocates space for the data area if necessary.
+
+    Only contiguous arrays (data elements consecutive in memory) can be
+    resized.
+
+    The purpose of the reference count check is to make sure you
+    do not use this array as a buffer for another Python object and then
+    reallocate the memory. However, reference counts can increase in
+    other ways so if you are sure that you have not shared the memory
+    for this array with another Python object, then you may safely set
+    `refcheck` to False.
+
+    Examples
+    --------
+    Shrinking an array: array is flattened (in the order that the data are
+    stored in memory), resized, and reshaped:
+
+    >>> a = np.array([[0, 1], [2, 3]], order='C')
+    >>> a.resize((2, 1))
+    >>> a
+    array([[0],
+           [1]])
+
+    >>> a = np.array([[0, 1], [2, 3]], order='F')
+    >>> a.resize((2, 1))
+    >>> a
+    array([[0],
+           [2]])
+
+    Enlarging an array: as above, but missing entries are filled with zeros:
+
+    >>> b = np.array([[0, 1], [2, 3]])
+    >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
+    >>> b
+    array([[0, 1, 2],
+           [3, 0, 0]])
+
+    Referencing an array prevents resizing...
+
+    >>> c = a
+    >>> a.resize((1, 1))
+    Traceback (most recent call last):
+    ...
+    ValueError: cannot resize an array that references or is referenced ...
+
+    Unless `refcheck` is False:
+
+    >>> a.resize((1, 1), refcheck=False)
+    >>> a
+    array([[0]])
+    >>> c
+    array([[0]])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
+    """
+    a.round(decimals=0, out=None)
+
+    Return `a` with each element rounded to the given number of decimals.
+
+    Refer to `numpy.around` for full documentation.
+
+    See Also
+    --------
+    numpy.around : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
+    """
+    a.searchsorted(v, side='left', sorter=None)
+
+    Find indices where elements of v should be inserted in a to maintain order.
+
+    For full documentation, see `numpy.searchsorted`
+
+    See Also
+    --------
+    numpy.searchsorted : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
+    """
+    a.setfield(val, dtype, offset=0)
+
+    Put a value into a specified place in a field defined by a data-type.
+
+    Place `val` into `a`'s field defined by `dtype` and beginning `offset`
+    bytes into the field.
+
+    Parameters
+    ----------
+    val : object
+        Value to be placed in field.
+    dtype : dtype object
+        Data-type of the field in which to place `val`.
+    offset : int, optional
+        The number of bytes into the field at which to place `val`.
+
+    Returns
+    -------
+    None
+
+    See Also
+    --------
+    getfield
+
+    Examples
+    --------
+    >>> x = np.eye(3)
+    >>> x.getfield(np.float64)
+    array([[1.,  0.,  0.],
+           [0.,  1.,  0.],
+           [0.,  0.,  1.]])
+    >>> x.setfield(3, np.int32)
+    >>> x.getfield(np.int32)
+    array([[3, 3, 3],
+           [3, 3, 3],
+           [3, 3, 3]], dtype=int32)
+    >>> x
+    array([[1.0e+000, 1.5e-323, 1.5e-323],
+           [1.5e-323, 1.0e+000, 1.5e-323],
+           [1.5e-323, 1.5e-323, 1.0e+000]])
+    >>> x.setfield(np.eye(3), np.int32)
+    >>> x
+    array([[1.,  0.,  0.],
+           [0.,  1.,  0.],
+           [0.,  0.,  1.]])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
+    """
+    a.setflags(write=None, align=None, uic=None)
+
+    Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY,
+    respectively.
+
+    These Boolean-valued flags affect how numpy interprets the memory
+    area used by `a` (see Notes below). The ALIGNED flag can only
+    be set to True if the data is actually aligned according to the type.
+    The WRITEBACKIFCOPY and flag can never be set
+    to True. The flag WRITEABLE can only be set to True if the array owns its
+    own memory, or the ultimate owner of the memory exposes a writeable buffer
+    interface, or is a string. (The exception for string is made so that
+    unpickling can be done without copying memory.)
+
+    Parameters
+    ----------
+    write : bool, optional
+        Describes whether or not `a` can be written to.
+    align : bool, optional
+        Describes whether or not `a` is aligned properly for its type.
+    uic : bool, optional
+        Describes whether or not `a` is a copy of another "base" array.
+
+    Notes
+    -----
+    Array flags provide information about how the memory area used
+    for the array is to be interpreted. There are 7 Boolean flags
+    in use, only four of which can be changed by the user:
+    WRITEBACKIFCOPY, WRITEABLE, and ALIGNED.
+
+    WRITEABLE (W) the data area can be written to;
+
+    ALIGNED (A) the data and strides are aligned appropriately for the hardware
+    (as determined by the compiler);
+
+    WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
+    by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
+    called, the base array will be updated with the contents of this array.
+
+    All flags can be accessed using the single (upper case) letter as well
+    as the full name.
+
+    Examples
+    --------
+    >>> y = np.array([[3, 1, 7],
+    ...               [2, 0, 0],
+    ...               [8, 5, 9]])
+    >>> y
+    array([[3, 1, 7],
+           [2, 0, 0],
+           [8, 5, 9]])
+    >>> y.flags
+      C_CONTIGUOUS : True
+      F_CONTIGUOUS : False
+      OWNDATA : True
+      WRITEABLE : True
+      ALIGNED : True
+      WRITEBACKIFCOPY : False
+    >>> y.setflags(write=0, align=0)
+    >>> y.flags
+      C_CONTIGUOUS : True
+      F_CONTIGUOUS : False
+      OWNDATA : True
+      WRITEABLE : False
+      ALIGNED : False
+      WRITEBACKIFCOPY : False
+    >>> y.setflags(uic=1)
+    Traceback (most recent call last):
+      File "", line 1, in 
+    ValueError: cannot set WRITEBACKIFCOPY flag to True
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
+    """
+    a.sort(axis=-1, kind=None, order=None)
+
+    Sort an array in-place. Refer to `numpy.sort` for full documentation.
+
+    Parameters
+    ----------
+    axis : int, optional
+        Axis along which to sort. Default is -1, which means sort along the
+        last axis.
+    kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+        Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+        and 'mergesort' use timsort under the covers and, in general, the
+        actual implementation will vary with datatype. The 'mergesort' option
+        is retained for backwards compatibility.
+
+        .. versionchanged:: 1.15.0
+           The 'stable' option was added.
+
+    order : str or list of str, optional
+        When `a` is an array with fields defined, this argument specifies
+        which fields to compare first, second, etc.  A single field can
+        be specified as a string, and not all fields need be specified,
+        but unspecified fields will still be used, in the order in which
+        they come up in the dtype, to break ties.
+
+    See Also
+    --------
+    numpy.sort : Return a sorted copy of an array.
+    numpy.argsort : Indirect sort.
+    numpy.lexsort : Indirect stable sort on multiple keys.
+    numpy.searchsorted : Find elements in sorted array.
+    numpy.partition: Partial sort.
+
+    Notes
+    -----
+    See `numpy.sort` for notes on the different sorting algorithms.
+
+    Examples
+    --------
+    >>> a = np.array([[1,4], [3,1]])
+    >>> a.sort(axis=1)
+    >>> a
+    array([[1, 4],
+           [1, 3]])
+    >>> a.sort(axis=0)
+    >>> a
+    array([[1, 3],
+           [1, 4]])
+
+    Use the `order` keyword to specify a field to use when sorting a
+    structured array:
+
+    >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
+    >>> a.sort(order='y')
+    >>> a
+    array([(b'c', 1), (b'a', 2)],
+          dtype=[('x', 'S1'), ('y', '>> a = np.array([3, 4, 2, 1])
+    >>> a.partition(3)
+    >>> a
+    array([2, 1, 3, 4])
+
+    >>> a.partition((1, 3))
+    >>> a
+    array([1, 2, 3, 4])
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
+    """
+    a.squeeze(axis=None)
+
+    Remove axes of length one from `a`.
+
+    Refer to `numpy.squeeze` for full documentation.
+
+    See Also
+    --------
+    numpy.squeeze : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
+    """
+    a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
+
+    Returns the standard deviation of the array elements along given axis.
+
+    Refer to `numpy.std` for full documentation.
+
+    See Also
+    --------
+    numpy.std : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
+    """
+    a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
+
+    Return the sum of the array elements over the given axis.
+
+    Refer to `numpy.sum` for full documentation.
+
+    See Also
+    --------
+    numpy.sum : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
+    """
+    a.swapaxes(axis1, axis2)
+
+    Return a view of the array with `axis1` and `axis2` interchanged.
+
+    Refer to `numpy.swapaxes` for full documentation.
+
+    See Also
+    --------
+    numpy.swapaxes : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
+    """
+    a.take(indices, axis=None, out=None, mode='raise')
+
+    Return an array formed from the elements of `a` at the given indices.
+
+    Refer to `numpy.take` for full documentation.
+
+    See Also
+    --------
+    numpy.take : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
+    """
+    a.tofile(fid, sep="", format="%s")
+
+    Write array to a file as text or binary (default).
+
+    Data is always written in 'C' order, independent of the order of `a`.
+    The data produced by this method can be recovered using the function
+    fromfile().
+
+    Parameters
+    ----------
+    fid : file or str or Path
+        An open file object, or a string containing a filename.
+
+        .. versionchanged:: 1.17.0
+            `pathlib.Path` objects are now accepted.
+
+    sep : str
+        Separator between array items for text output.
+        If "" (empty), a binary file is written, equivalent to
+        ``file.write(a.tobytes())``.
+    format : str
+        Format string for text file output.
+        Each entry in the array is formatted to text by first converting
+        it to the closest Python type, and then using "format" % item.
+
+    Notes
+    -----
+    This is a convenience function for quick storage of array data.
+    Information on endianness and precision is lost, so this method is not a
+    good choice for files intended to archive data or transport data between
+    machines with different endianness. Some of these problems can be overcome
+    by outputting the data as text files, at the expense of speed and file
+    size.
+
+    When fid is a file object, array contents are directly written to the
+    file, bypassing the file object's ``write`` method. As a result, tofile
+    cannot be used with files objects supporting compression (e.g., GzipFile)
+    or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
+    """
+    a.tolist()
+
+    Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
+
+    Return a copy of the array data as a (nested) Python list.
+    Data items are converted to the nearest compatible builtin Python type, via
+    the `~numpy.ndarray.item` function.
+
+    If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
+    not be a list at all, but a simple Python scalar.
+
+    Parameters
+    ----------
+    none
+
+    Returns
+    -------
+    y : object, or list of object, or list of list of object, or ...
+        The possibly nested list of array elements.
+
+    Notes
+    -----
+    The array may be recreated via ``a = np.array(a.tolist())``, although this
+    may sometimes lose precision.
+
+    Examples
+    --------
+    For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
+    except that ``tolist`` changes numpy scalars to Python scalars:
+
+    >>> a = np.uint32([1, 2])
+    >>> a_list = list(a)
+    >>> a_list
+    [1, 2]
+    >>> type(a_list[0])
+    
+    >>> a_tolist = a.tolist()
+    >>> a_tolist
+    [1, 2]
+    >>> type(a_tolist[0])
+    
+
+    Additionally, for a 2D array, ``tolist`` applies recursively:
+
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> list(a)
+    [array([1, 2]), array([3, 4])]
+    >>> a.tolist()
+    [[1, 2], [3, 4]]
+
+    The base case for this recursion is a 0D array:
+
+    >>> a = np.array(1)
+    >>> list(a)
+    Traceback (most recent call last):
+      ...
+    TypeError: iteration over a 0-d array
+    >>> a.tolist()
+    1
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
+    a.tobytes(order='C')
+
+    Construct Python bytes containing the raw data bytes in the array.
+
+    Constructs Python bytes showing a copy of the raw contents of
+    data memory. The bytes object is produced in C-order by default.
+    This behavior is controlled by the ``order`` parameter.
+
+    .. versionadded:: 1.9.0
+
+    Parameters
+    ----------
+    order : {'C', 'F', 'A'}, optional
+        Controls the memory layout of the bytes object. 'C' means C-order,
+        'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
+        Fortran contiguous, 'C' otherwise. Default is 'C'.
+
+    Returns
+    -------
+    s : bytes
+        Python bytes exhibiting a copy of `a`'s raw data.
+
+    See also
+    --------
+    frombuffer
+        Inverse of this operation, construct a 1-dimensional array from Python
+        bytes.
+
+    Examples
+    --------
+    >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes()
+    b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
+    >>> x.tobytes('C') == x.tobytes()
+    True
+    >>> x.tobytes('F')
+    b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r"""
+    a.tostring(order='C')
+
+    A compatibility alias for `tobytes`, with exactly the same behavior.
+
+    Despite its name, it returns `bytes` not `str`\ s.
+
+    .. deprecated:: 1.19.0
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
+    """
+    a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
+
+    Return the sum along diagonals of the array.
+
+    Refer to `numpy.trace` for full documentation.
+
+    See Also
+    --------
+    numpy.trace : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
+    """
+    a.transpose(*axes)
+
+    Returns a view of the array with axes transposed.
+
+    Refer to `numpy.transpose` for full documentation.
+
+    Parameters
+    ----------
+    axes : None, tuple of ints, or `n` ints
+
+     * None or no argument: reverses the order of the axes.
+
+     * tuple of ints: `i` in the `j`-th place in the tuple means that the
+       array's `i`-th axis becomes the transposed array's `j`-th axis.
+
+     * `n` ints: same as an n-tuple of the same ints (this form is
+       intended simply as a "convenience" alternative to the tuple form).
+
+    Returns
+    -------
+    p : ndarray
+        View of the array with its axes suitably permuted.
+
+    See Also
+    --------
+    transpose : Equivalent function.
+    ndarray.T : Array property returning the array transposed.
+    ndarray.reshape : Give a new shape to an array without changing its data.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> a
+    array([[1, 2],
+           [3, 4]])
+    >>> a.transpose()
+    array([[1, 3],
+           [2, 4]])
+    >>> a.transpose((1, 0))
+    array([[1, 3],
+           [2, 4]])
+    >>> a.transpose(1, 0)
+    array([[1, 3],
+           [2, 4]])
+
+    >>> a = np.array([1, 2, 3, 4])
+    >>> a
+    array([1, 2, 3, 4])
+    >>> a.transpose()
+    array([1, 2, 3, 4])
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
+    """
+    a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
+
+    Returns the variance of the array elements, along given axis.
+
+    Refer to `numpy.var` for full documentation.
+
+    See Also
+    --------
+    numpy.var : equivalent function
+
+    """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
+    """
+    a.view([dtype][, type])
+
+    New view of array with the same data.
+
+    .. note::
+        Passing None for ``dtype`` is different from omitting the parameter,
+        since the former invokes ``dtype(None)`` which is an alias for
+        ``dtype('float_')``.
+
+    Parameters
+    ----------
+    dtype : data-type or ndarray sub-class, optional
+        Data-type descriptor of the returned view, e.g., float32 or int16.
+        Omitting it results in the view having the same data-type as `a`.
+        This argument can also be specified as an ndarray sub-class, which
+        then specifies the type of the returned object (this is equivalent to
+        setting the ``type`` parameter).
+    type : Python type, optional
+        Type of the returned view, e.g., ndarray or matrix.  Again, omission
+        of the parameter results in type preservation.
+
+    Notes
+    -----
+    ``a.view()`` is used two different ways:
+
+    ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
+    of the array's memory with a different data-type.  This can cause a
+    reinterpretation of the bytes of memory.
+
+    ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
+    returns an instance of `ndarray_subclass` that looks at the same array
+    (same shape, dtype, etc.)  This does not cause a reinterpretation of the
+    memory.
+
+    For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
+    bytes per entry than the previous dtype (for example, converting a regular
+    array to a structured array), then the last axis of ``a`` must be
+    contiguous. This axis will be resized in the result.
+
+    .. versionchanged:: 1.23.0
+       Only the last axis needs to be contiguous. Previously, the entire array
+       had to be C-contiguous.
+
+    Examples
+    --------
+    >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
+
+    Viewing array data using a different type and dtype:
+
+    >>> y = x.view(dtype=np.int16, type=np.matrix)
+    >>> y
+    matrix([[513]], dtype=int16)
+    >>> print(type(y))
+    
+
+    Creating a view on a structured array so it can be used in calculations
+
+    >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
+    >>> xv = x.view(dtype=np.int8).reshape(-1,2)
+    >>> xv
+    array([[1, 2],
+           [3, 4]], dtype=int8)
+    >>> xv.mean(0)
+    array([2.,  3.])
+
+    Making changes to the view changes the underlying array
+
+    >>> xv[0,1] = 20
+    >>> x
+    array([(1, 20), (3,  4)], dtype=[('a', 'i1'), ('b', 'i1')])
+
+    Using a view to convert an array to a recarray:
+
+    >>> z = x.view(np.recarray)
+    >>> z.a
+    array([1, 3], dtype=int8)
+
+    Views share data:
+
+    >>> x[0] = (9, 10)
+    >>> z[0]
+    (9, 10)
+
+    Views that change the dtype size (bytes per entry) should normally be
+    avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
+
+    >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)
+    >>> y = x[:, ::2]
+    >>> y
+    array([[1, 3],
+           [4, 6]], dtype=int16)
+    >>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
+    Traceback (most recent call last):
+        ...
+    ValueError: To change to a dtype of a different size, the last axis must be contiguous
+    >>> z = y.copy()
+    >>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
+    array([[(1, 3)],
+           [(4, 6)]], dtype=[('width', '>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4)
+    >>> x.transpose(1, 0, 2).view(np.int16)
+    array([[[ 256,  770],
+            [3340, 3854]],
+    
+           [[1284, 1798],
+            [4368, 4882]],
+    
+           [[2312, 2826],
+            [5396, 5910]]], dtype=int16)
+
+    """))
+
+
+##############################################################################
+#
+# umath functions
+#
+##############################################################################
+
+add_newdoc('numpy.core.umath', 'frompyfunc',
+    """
+    frompyfunc(func, /, nin, nout, *[, identity])
+
+    Takes an arbitrary Python function and returns a NumPy ufunc.
+
+    Can be used, for example, to add broadcasting to a built-in Python
+    function (see Examples section).
+
+    Parameters
+    ----------
+    func : Python function object
+        An arbitrary Python function.
+    nin : int
+        The number of input arguments.
+    nout : int
+        The number of objects returned by `func`.
+    identity : object, optional
+        The value to use for the `~numpy.ufunc.identity` attribute of the resulting
+        object. If specified, this is equivalent to setting the underlying
+        C ``identity`` field to ``PyUFunc_IdentityValue``.
+        If omitted, the identity is set to ``PyUFunc_None``. Note that this is
+        _not_ equivalent to setting the identity to ``None``, which implies the
+        operation is reorderable.
+
+    Returns
+    -------
+    out : ufunc
+        Returns a NumPy universal function (``ufunc``) object.
+
+    See Also
+    --------
+    vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
+
+    Notes
+    -----
+    The returned ufunc always returns PyObject arrays.
+
+    Examples
+    --------
+    Use frompyfunc to add broadcasting to the Python function ``oct``:
+
+    >>> oct_array = np.frompyfunc(oct, 1, 1)
+    >>> oct_array(np.array((10, 30, 100)))
+    array(['0o12', '0o36', '0o144'], dtype=object)
+    >>> np.array((oct(10), oct(30), oct(100))) # for comparison
+    array(['0o12', '0o36', '0o144'], dtype='>> np.geterrobj()  # first get the defaults
+    [8192, 521, None]
+
+    >>> def err_handler(type, flag):
+    ...     print("Floating point error (%s), with flag %s" % (type, flag))
+    ...
+    >>> old_bufsize = np.setbufsize(20000)
+    >>> old_err = np.seterr(divide='raise')
+    >>> old_handler = np.seterrcall(err_handler)
+    >>> np.geterrobj()
+    [8192, 521, ]
+
+    >>> old_err = np.seterr(all='ignore')
+    >>> np.base_repr(np.geterrobj()[1], 8)
+    '0'
+    >>> old_err = np.seterr(divide='warn', over='log', under='call',
+    ...                     invalid='print')
+    >>> np.base_repr(np.geterrobj()[1], 8)
+    '4351'
+
+    """)
+
+add_newdoc('numpy.core.umath', 'seterrobj',
+    """
+    seterrobj(errobj, /)
+
+    Set the object that defines floating-point error handling.
+
+    The error object contains all information that defines the error handling
+    behavior in NumPy. `seterrobj` is used internally by the other
+    functions that set error handling behavior (`seterr`, `seterrcall`).
+
+    Parameters
+    ----------
+    errobj : list
+        The error object, a list containing three elements:
+        [internal numpy buffer size, error mask, error callback function].
+
+        The error mask is a single integer that holds the treatment information
+        on all four floating point errors. The information for each error type
+        is contained in three bits of the integer. If we print it in base 8, we
+        can see what treatment is set for "invalid", "under", "over", and
+        "divide" (in that order). The printed string can be interpreted with
+
+        * 0 : 'ignore'
+        * 1 : 'warn'
+        * 2 : 'raise'
+        * 3 : 'call'
+        * 4 : 'print'
+        * 5 : 'log'
+
+    See Also
+    --------
+    geterrobj, seterr, geterr, seterrcall, geterrcall
+    getbufsize, setbufsize
+
+    Notes
+    -----
+    For complete documentation of the types of floating-point exceptions and
+    treatment options, see `seterr`.
+
+    Examples
+    --------
+    >>> old_errobj = np.geterrobj()  # first get the defaults
+    >>> old_errobj
+    [8192, 521, None]
+
+    >>> def err_handler(type, flag):
+    ...     print("Floating point error (%s), with flag %s" % (type, flag))
+    ...
+    >>> new_errobj = [20000, 12, err_handler]
+    >>> np.seterrobj(new_errobj)
+    >>> np.base_repr(12, 8)  # int for divide=4 ('print') and over=1 ('warn')
+    '14'
+    >>> np.geterr()
+    {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
+    >>> np.geterrcall() is err_handler
+    True
+
+    """)
+
+
+##############################################################################
+#
+# compiled_base functions
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'add_docstring',
+    """
+    add_docstring(obj, docstring)
+
+    Add a docstring to a built-in obj if possible.
+    If the obj already has a docstring raise a RuntimeError
+    If this routine does not know how to add a docstring to the object
+    raise a TypeError
+    """)
+
+add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
+    """
+    add_ufunc_docstring(ufunc, new_docstring)
+
+    Replace the docstring for a ufunc with new_docstring.
+    This method will only work if the current docstring for
+    the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
+
+    Parameters
+    ----------
+    ufunc : numpy.ufunc
+        A ufunc whose current doc is NULL.
+    new_docstring : string
+        The new docstring for the ufunc.
+
+    Notes
+    -----
+    This method allocates memory for new_docstring on
+    the heap. Technically this creates a mempory leak, since this
+    memory will not be reclaimed until the end of the program
+    even if the ufunc itself is removed. However this will only
+    be a problem if the user is repeatedly creating ufuncs with
+    no documentation, adding documentation via add_newdoc_ufunc,
+    and then throwing away the ufunc.
+    """)
+
+add_newdoc('numpy.core.multiarray', 'get_handler_name',
+    """
+    get_handler_name(a: ndarray) -> str,None
+
+    Return the name of the memory handler used by `a`. If not provided, return
+    the name of the memory handler that will be used to allocate data for the
+    next `ndarray` in this context. May return None if `a` does not own its
+    memory, in which case you can traverse ``a.base`` for a memory handler.
+    """)
+
+add_newdoc('numpy.core.multiarray', 'get_handler_version',
+    """
+    get_handler_version(a: ndarray) -> int,None
+
+    Return the version of the memory handler used by `a`. If not provided,
+    return the version of the memory handler that will be used to allocate data
+    for the next `ndarray` in this context. May return None if `a` does not own
+    its memory, in which case you can traverse ``a.base`` for a memory handler.
+    """)
+
+add_newdoc('numpy.core.multiarray', '_get_madvise_hugepage',
+    """
+    _get_madvise_hugepage() -> bool
+
+    Get use of ``madvise (2)`` MADV_HUGEPAGE support when
+    allocating the array data. Returns the currently set value.
+    See `global_state` for more information.
+    """)
+
+add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage',
+    """
+    _set_madvise_hugepage(enabled: bool) -> bool
+
+    Set  or unset use of ``madvise (2)`` MADV_HUGEPAGE support when
+    allocating the array data. Returns the previously set value.
+    See `global_state` for more information.
+    """)
+
+add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
+    """
+    format_float_OSprintf_g(val, precision)
+
+    Print a floating point scalar using the system's printf function,
+    equivalent to:
+
+        printf("%.*g", precision, val);
+
+    for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
+    method is designed to help cross-validate the format_float_* methods.
+
+    Parameters
+    ----------
+    val : python float or numpy floating scalar
+        Value to format.
+
+    precision : non-negative integer, optional
+        Precision given to printf.
+
+    Returns
+    -------
+    rep : string
+        The string representation of the floating point value
+
+    See Also
+    --------
+    format_float_scientific
+    format_float_positional
+    """)
+
+
+##############################################################################
+#
+# Documentation for ufunc attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ufunc object
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc',
+    """
+    Functions that operate element by element on whole arrays.
+
+    To see the documentation for a specific ufunc, use `info`.  For
+    example, ``np.info(np.sin)``.  Because ufuncs are written in C
+    (for speed) and linked into Python with NumPy's ufunc facility,
+    Python's help() function finds this page whenever help() is called
+    on a ufunc.
+
+    A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
+
+    **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)``
+
+    Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
+
+    The broadcasting rules are:
+
+    * Dimensions of length 1 may be prepended to either array.
+    * Arrays may be repeated along dimensions of length 1.
+
+    Parameters
+    ----------
+    *x : array_like
+        Input arrays.
+    out : ndarray, None, or tuple of ndarray and None, optional
+        Alternate array object(s) in which to put the result; if provided, it
+        must have a shape that the inputs broadcast to. A tuple of arrays
+        (possible only as a keyword argument) must have length equal to the
+        number of outputs; use None for uninitialized outputs to be
+        allocated by the ufunc.
+    where : array_like, optional
+        This condition is broadcast over the input. At locations where the
+        condition is True, the `out` array will be set to the ufunc result.
+        Elsewhere, the `out` array will retain its original value.
+        Note that if an uninitialized `out` array is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+    **kwargs
+        For other keyword-only arguments, see the :ref:`ufunc docs `.
+
+    Returns
+    -------
+    r : ndarray or tuple of ndarray
+        `r` will have the shape that the arrays in `x` broadcast to; if `out` is
+        provided, it will be returned. If not, `r` will be allocated and
+        may contain uninitialized values. If the function has more than one
+        output, then the result will be a tuple of arrays.
+
+    """)
+
+
+##############################################################################
+#
+# ufunc attributes
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc', ('identity',
+    """
+    The identity value.
+
+    Data attribute containing the identity element for the ufunc, if it has one.
+    If it does not, the attribute value is None.
+
+    Examples
+    --------
+    >>> np.add.identity
+    0
+    >>> np.multiply.identity
+    1
+    >>> np.power.identity
+    1
+    >>> print(np.exp.identity)
+    None
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('nargs',
+    """
+    The number of arguments.
+
+    Data attribute containing the number of arguments the ufunc takes, including
+    optional ones.
+
+    Notes
+    -----
+    Typically this value will be one more than what you might expect because all
+    ufuncs take  the optional "out" argument.
+
+    Examples
+    --------
+    >>> np.add.nargs
+    3
+    >>> np.multiply.nargs
+    3
+    >>> np.power.nargs
+    3
+    >>> np.exp.nargs
+    2
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('nin',
+    """
+    The number of inputs.
+
+    Data attribute containing the number of arguments the ufunc treats as input.
+
+    Examples
+    --------
+    >>> np.add.nin
+    2
+    >>> np.multiply.nin
+    2
+    >>> np.power.nin
+    2
+    >>> np.exp.nin
+    1
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('nout',
+    """
+    The number of outputs.
+
+    Data attribute containing the number of arguments the ufunc treats as output.
+
+    Notes
+    -----
+    Since all ufuncs can take output arguments, this will always be (at least) 1.
+
+    Examples
+    --------
+    >>> np.add.nout
+    1
+    >>> np.multiply.nout
+    1
+    >>> np.power.nout
+    1
+    >>> np.exp.nout
+    1
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('ntypes',
+    """
+    The number of types.
+
+    The number of numerical NumPy types - of which there are 18 total - on which
+    the ufunc can operate.
+
+    See Also
+    --------
+    numpy.ufunc.types
+
+    Examples
+    --------
+    >>> np.add.ntypes
+    18
+    >>> np.multiply.ntypes
+    18
+    >>> np.power.ntypes
+    17
+    >>> np.exp.ntypes
+    7
+    >>> np.remainder.ntypes
+    14
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('types',
+    """
+    Returns a list with types grouped input->output.
+
+    Data attribute listing the data-type "Domain-Range" groupings the ufunc can
+    deliver. The data-types are given using the character codes.
+
+    See Also
+    --------
+    numpy.ufunc.ntypes
+
+    Examples
+    --------
+    >>> np.add.types
+    ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
+    'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
+    'GG->G', 'OO->O']
+
+    >>> np.multiply.types
+    ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
+    'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
+    'GG->G', 'OO->O']
+
+    >>> np.power.types
+    ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
+    'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
+    'OO->O']
+
+    >>> np.exp.types
+    ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
+
+    >>> np.remainder.types
+    ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
+    'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('signature',
+    """
+    Definition of the core elements a generalized ufunc operates on.
+
+    The signature determines how the dimensions of each input/output array
+    are split into core and loop dimensions:
+
+    1. Each dimension in the signature is matched to a dimension of the
+       corresponding passed-in array, starting from the end of the shape tuple.
+    2. Core dimensions assigned to the same label in the signature must have
+       exactly matching sizes, no broadcasting is performed.
+    3. The core dimensions are removed from all inputs and the remaining
+       dimensions are broadcast together, defining the loop dimensions.
+
+    Notes
+    -----
+    Generalized ufuncs are used internally in many linalg functions, and in
+    the testing suite; the examples below are taken from these.
+    For ufuncs that operate on scalars, the signature is None, which is
+    equivalent to '()' for every argument.
+
+    Examples
+    --------
+    >>> np.core.umath_tests.matrix_multiply.signature
+    '(m,n),(n,p)->(m,p)'
+    >>> np.linalg._umath_linalg.det.signature
+    '(m,m)->()'
+    >>> np.add.signature is None
+    True  # equivalent to '(),()->()'
+    """))
+
+##############################################################################
+#
+# ufunc methods
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc', ('reduce',
+    """
+    reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True)
+
+    Reduces `array`'s dimension by one, by applying ufunc along one axis.
+
+    Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`.  Then
+    :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
+    the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
+    ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
+    For a one-dimensional array, reduce produces results equivalent to:
+    ::
+
+     r = op.identity # op = ufunc
+     for i in range(len(A)):
+       r = op(r, A[i])
+     return r
+
+    For example, add.reduce() is equivalent to sum().
+
+    Parameters
+    ----------
+    array : array_like
+        The array to act on.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which a reduction is performed.
+        The default (`axis` = 0) is perform a reduction over the first
+        dimension of the input array. `axis` may be negative, in
+        which case it counts from the last to the first axis.
+
+        .. versionadded:: 1.7.0
+
+        If this is None, a reduction is performed over all the axes.
+        If this is a tuple of ints, a reduction is performed on multiple
+        axes, instead of a single axis or all the axes as before.
+
+        For operations which are either not commutative or not associative,
+        doing a reduction over multiple axes is not well-defined. The
+        ufuncs do not currently raise an exception in this case, but will
+        likely do so in the future.
+    dtype : data-type code, optional
+        The type used to represent the intermediate results. Defaults
+        to the data-type of the output array if this is provided, or
+        the data-type of the input array if no output array is provided.
+    out : ndarray, None, or tuple of ndarray and None, optional
+        A location into which the result is stored. If not provided or None,
+        a freshly-allocated array is returned. For consistency with
+        ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
+        1-element tuple.
+
+        .. versionchanged:: 1.13.0
+           Tuples are allowed for keyword argument.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the original `array`.
+
+        .. versionadded:: 1.7.0
+    initial : scalar, optional
+        The value with which to start the reduction.
+        If the ufunc has no identity or the dtype is object, this defaults
+        to None - otherwise it defaults to ufunc.identity.
+        If ``None`` is given, the first element of the reduction is used,
+        and an error is thrown if the reduction is empty.
+
+        .. versionadded:: 1.15.0
+
+    where : array_like of bool, optional
+        A boolean array which is broadcasted to match the dimensions
+        of `array`, and selects elements to include in the reduction. Note
+        that for ufuncs like ``minimum`` that do not have an identity
+        defined, one has to pass in also ``initial``.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    r : ndarray
+        The reduced array. If `out` was supplied, `r` is a reference to it.
+
+    Examples
+    --------
+    >>> np.multiply.reduce([2,3,5])
+    30
+
+    A multi-dimensional array example:
+
+    >>> X = np.arange(8).reshape((2,2,2))
+    >>> X
+    array([[[0, 1],
+            [2, 3]],
+           [[4, 5],
+            [6, 7]]])
+    >>> np.add.reduce(X, 0)
+    array([[ 4,  6],
+           [ 8, 10]])
+    >>> np.add.reduce(X) # confirm: default axis value is 0
+    array([[ 4,  6],
+           [ 8, 10]])
+    >>> np.add.reduce(X, 1)
+    array([[ 2,  4],
+           [10, 12]])
+    >>> np.add.reduce(X, 2)
+    array([[ 1,  5],
+           [ 9, 13]])
+
+    You can use the ``initial`` keyword argument to initialize the reduction
+    with a different value, and ``where`` to select specific elements to include:
+
+    >>> np.add.reduce([10], initial=5)
+    15
+    >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
+    array([14., 14.])
+    >>> a = np.array([10., np.nan, 10])
+    >>> np.add.reduce(a, where=~np.isnan(a))
+    20.0
+
+    Allows reductions of empty arrays where they would normally fail, i.e.
+    for ufuncs without an identity.
+
+    >>> np.minimum.reduce([], initial=np.inf)
+    inf
+    >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
+    array([ 1., 10.])
+    >>> np.minimum.reduce([])
+    Traceback (most recent call last):
+        ...
+    ValueError: zero-size array to reduction operation minimum which has no identity
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('accumulate',
+    """
+    accumulate(array, axis=0, dtype=None, out=None)
+
+    Accumulate the result of applying the operator to all elements.
+
+    For a one-dimensional array, accumulate produces results equivalent to::
+
+      r = np.empty(len(A))
+      t = op.identity        # op = the ufunc being applied to A's  elements
+      for i in range(len(A)):
+          t = op(t, A[i])
+          r[i] = t
+      return r
+
+    For example, add.accumulate() is equivalent to np.cumsum().
+
+    For a multi-dimensional array, accumulate is applied along only one
+    axis (axis zero by default; see Examples below) so repeated use is
+    necessary if one wants to accumulate over multiple axes.
+
+    Parameters
+    ----------
+    array : array_like
+        The array to act on.
+    axis : int, optional
+        The axis along which to apply the accumulation; default is zero.
+    dtype : data-type code, optional
+        The data-type used to represent the intermediate results. Defaults
+        to the data-type of the output array if such is provided, or the
+        data-type of the input array if no output array is provided.
+    out : ndarray, None, or tuple of ndarray and None, optional
+        A location into which the result is stored. If not provided or None,
+        a freshly-allocated array is returned. For consistency with
+        ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
+        1-element tuple.
+
+        .. versionchanged:: 1.13.0
+           Tuples are allowed for keyword argument.
+
+    Returns
+    -------
+    r : ndarray
+        The accumulated values. If `out` was supplied, `r` is a reference to
+        `out`.
+
+    Examples
+    --------
+    1-D array examples:
+
+    >>> np.add.accumulate([2, 3, 5])
+    array([ 2,  5, 10])
+    >>> np.multiply.accumulate([2, 3, 5])
+    array([ 2,  6, 30])
+
+    2-D array examples:
+
+    >>> I = np.eye(2)
+    >>> I
+    array([[1.,  0.],
+           [0.,  1.]])
+
+    Accumulate along axis 0 (rows), down columns:
+
+    >>> np.add.accumulate(I, 0)
+    array([[1.,  0.],
+           [1.,  1.]])
+    >>> np.add.accumulate(I) # no axis specified = axis zero
+    array([[1.,  0.],
+           [1.,  1.]])
+
+    Accumulate along axis 1 (columns), through rows:
+
+    >>> np.add.accumulate(I, 1)
+    array([[1.,  1.],
+           [0.,  1.]])
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('reduceat',
+    """
+    reduceat(array, indices, axis=0, dtype=None, out=None)
+
+    Performs a (local) reduce with specified slices over a single axis.
+
+    For i in ``range(len(indices))``, `reduceat` computes
+    ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
+    generalized "row" parallel to `axis` in the final result (i.e., in a
+    2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
+    `axis = 1`, it becomes the i-th column).  There are three exceptions to this:
+
+    * when ``i = len(indices) - 1`` (so for the last index),
+      ``indices[i+1] = array.shape[axis]``.
+    * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
+      simply ``array[indices[i]]``.
+    * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
+
+    The shape of the output depends on the size of `indices`, and may be
+    larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
+
+    Parameters
+    ----------
+    array : array_like
+        The array to act on.
+    indices : array_like
+        Paired indices, comma separated (not colon), specifying slices to
+        reduce.
+    axis : int, optional
+        The axis along which to apply the reduceat.
+    dtype : data-type code, optional
+        The type used to represent the intermediate results. Defaults
+        to the data type of the output array if this is provided, or
+        the data type of the input array if no output array is provided.
+    out : ndarray, None, or tuple of ndarray and None, optional
+        A location into which the result is stored. If not provided or None,
+        a freshly-allocated array is returned. For consistency with
+        ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
+        1-element tuple.
+
+        .. versionchanged:: 1.13.0
+           Tuples are allowed for keyword argument.
+
+    Returns
+    -------
+    r : ndarray
+        The reduced values. If `out` was supplied, `r` is a reference to
+        `out`.
+
+    Notes
+    -----
+    A descriptive example:
+
+    If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
+    ``ufunc.reduceat(array, indices)[::2]`` where `indices` is
+    ``range(len(array) - 1)`` with a zero placed
+    in every other element:
+    ``indices = zeros(2 * len(array) - 1)``,
+    ``indices[1::2] = range(1, len(array))``.
+
+    Don't be fooled by this attribute's name: `reduceat(array)` is not
+    necessarily smaller than `array`.
+
+    Examples
+    --------
+    To take the running sum of four successive values:
+
+    >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
+    array([ 6, 10, 14, 18])
+
+    A 2-D example:
+
+    >>> x = np.linspace(0, 15, 16).reshape(4,4)
+    >>> x
+    array([[ 0.,   1.,   2.,   3.],
+           [ 4.,   5.,   6.,   7.],
+           [ 8.,   9.,  10.,  11.],
+           [12.,  13.,  14.,  15.]])
+
+    ::
+
+     # reduce such that the result has the following five rows:
+     # [row1 + row2 + row3]
+     # [row4]
+     # [row2]
+     # [row3]
+     # [row1 + row2 + row3 + row4]
+
+    >>> np.add.reduceat(x, [0, 3, 1, 2, 0])
+    array([[12.,  15.,  18.,  21.],
+           [12.,  13.,  14.,  15.],
+           [ 4.,   5.,   6.,   7.],
+           [ 8.,   9.,  10.,  11.],
+           [24.,  28.,  32.,  36.]])
+
+    ::
+
+     # reduce such that result has the following two columns:
+     # [col1 * col2 * col3, col4]
+
+    >>> np.multiply.reduceat(x, [0, 3], 1)
+    array([[   0.,     3.],
+           [ 120.,     7.],
+           [ 720.,    11.],
+           [2184.,    15.]])
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('outer',
+    r"""
+    outer(A, B, /, **kwargs)
+
+    Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
+
+    Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
+    ``op.outer(A, B)`` is an array of dimension M + N such that:
+
+    .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
+       op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
+
+    For `A` and `B` one-dimensional, this is equivalent to::
+
+      r = empty(len(A),len(B))
+      for i in range(len(A)):
+          for j in range(len(B)):
+              r[i,j] = op(A[i], B[j])  # op = ufunc in question
+
+    Parameters
+    ----------
+    A : array_like
+        First array
+    B : array_like
+        Second array
+    kwargs : any
+        Arguments to pass on to the ufunc. Typically `dtype` or `out`.
+        See `ufunc` for a comprehensive overview of all available arguments.
+
+    Returns
+    -------
+    r : ndarray
+        Output array
+
+    See Also
+    --------
+    numpy.outer : A less powerful version of ``np.multiply.outer``
+                  that `ravel`\ s all inputs to 1D. This exists
+                  primarily for compatibility with old code.
+
+    tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
+                ``np.multiply.outer(a, b)`` behave same for all
+                dimensions of a and b.
+
+    Examples
+    --------
+    >>> np.multiply.outer([1, 2, 3], [4, 5, 6])
+    array([[ 4,  5,  6],
+           [ 8, 10, 12],
+           [12, 15, 18]])
+
+    A multi-dimensional example:
+
+    >>> A = np.array([[1, 2, 3], [4, 5, 6]])
+    >>> A.shape
+    (2, 3)
+    >>> B = np.array([[1, 2, 3, 4]])
+    >>> B.shape
+    (1, 4)
+    >>> C = np.multiply.outer(A, B)
+    >>> C.shape; C
+    (2, 3, 1, 4)
+    array([[[[ 1,  2,  3,  4]],
+            [[ 2,  4,  6,  8]],
+            [[ 3,  6,  9, 12]]],
+           [[[ 4,  8, 12, 16]],
+            [[ 5, 10, 15, 20]],
+            [[ 6, 12, 18, 24]]]])
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('at',
+    """
+    at(a, indices, b=None, /)
+
+    Performs unbuffered in place operation on operand 'a' for elements
+    specified by 'indices'. For addition ufunc, this method is equivalent to
+    ``a[indices] += b``, except that results are accumulated for elements that
+    are indexed more than once. For example, ``a[[0,0]] += 1`` will only
+    increment the first element once because of buffering, whereas
+    ``add.at(a, [0,0], 1)`` will increment the first element twice.
+
+    .. versionadded:: 1.8.0
+
+    Parameters
+    ----------
+    a : array_like
+        The array to perform in place operation on.
+    indices : array_like or tuple
+        Array like index object or slice object for indexing into first
+        operand. If first operand has multiple dimensions, indices can be a
+        tuple of array like index objects or slice objects.
+    b : array_like
+        Second operand for ufuncs requiring two operands. Operand must be
+        broadcastable over first operand after indexing or slicing.
+
+    Examples
+    --------
+    Set items 0 and 1 to their negative values:
+
+    >>> a = np.array([1, 2, 3, 4])
+    >>> np.negative.at(a, [0, 1])
+    >>> a
+    array([-1, -2,  3,  4])
+
+    Increment items 0 and 1, and increment item 2 twice:
+
+    >>> a = np.array([1, 2, 3, 4])
+    >>> np.add.at(a, [0, 1, 2, 2], 1)
+    >>> a
+    array([2, 3, 5, 4])
+
+    Add items 0 and 1 in first array to second array,
+    and store results in first array:
+
+    >>> a = np.array([1, 2, 3, 4])
+    >>> b = np.array([1, 2])
+    >>> np.add.at(a, [0, 1], b)
+    >>> a
+    array([2, 4, 3, 4])
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('resolve_dtypes',
+    """
+    resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False)
+
+    Find the dtypes NumPy will use for the operation.  Both input and
+    output dtypes are returned and may differ from those provided.
+
+    .. note::
+
+        This function always applies NEP 50 rules since it is not provided
+        any actual values.  The Python types ``int``, ``float``, and
+        ``complex`` thus behave weak and should be passed for "untyped"
+        Python input.
+
+    Parameters
+    ----------
+    dtypes : tuple of dtypes, None, or literal int, float, complex
+        The input dtypes for each operand.  Output operands can be
+        None, indicating that the dtype must be found.
+    signature : tuple of DTypes or None, optional
+        If given, enforces exact DType (classes) of the specific operand.
+        The ufunc ``dtype`` argument is equivalent to passing a tuple with
+        only output dtypes set.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        The casting mode when casting is necessary.  This is identical to
+        the ufunc call casting modes.
+    reduction : boolean
+        If given, the resolution assumes a reduce operation is happening
+        which slightly changes the promotion and type resolution rules.
+        `dtypes` is usually something like ``(None, np.dtype("i2"), None)``
+        for reductions (first input is also the output).
+
+        .. note::
+
+            The default casting mode is "same_kind", however, as of
+            NumPy 1.24, NumPy uses "unsafe" for reductions.
+
+    Returns
+    -------
+    dtypes : tuple of dtypes
+        The dtypes which NumPy would use for the calculation.  Note that
+        dtypes may not match the passed in ones (casting is necessary).
+
+    See Also
+    --------
+    numpy.ufunc._resolve_dtypes_and_context :
+        Similar function to this, but returns additional information which
+        give access to the core C functionality of NumPy.
+
+    Examples
+    --------
+    This API requires passing dtypes, define them for convenience:
+
+    >>> int32 = np.dtype("int32")
+    >>> float32 = np.dtype("float32")
+
+    The typical ufunc call does not pass an output dtype.  `np.add` has two
+    inputs and one output, so leave the output as ``None`` (not provided):
+
+    >>> np.add.resolve_dtypes((int32, float32, None))
+    (dtype('float64'), dtype('float64'), dtype('float64'))
+
+    The loop found uses "float64" for all operands (including the output), the
+    first input would be cast.
+
+    ``resolve_dtypes`` supports "weak" handling for Python scalars by passing
+    ``int``, ``float``, or ``complex``:
+
+    >>> np.add.resolve_dtypes((float32, float, None))
+    (dtype('float32'), dtype('float32'), dtype('float32'))
+
+    Where the Python ``float`` behaves samilar to a Python value ``0.0``
+    in a ufunc call.  (See :ref:`NEP 50 ` for details.)
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('_resolve_dtypes_and_context',
+    """
+    _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False)
+
+    See `numpy.ufunc.resolve_dtypes` for parameter information.  This
+    function is considered *unstable*.  You may use it, but the returned
+    information is NumPy version specific and expected to change.
+    Large API/ABI changes are not expected, but a new NumPy version is
+    expected to require updating code using this functionality.
+
+    This function is designed to be used in conjunction with
+    `numpy.ufunc._get_strided_loop`.  The calls are split to mirror the C API
+    and allow future improvements.
+
+    Returns
+    -------
+    dtypes : tuple of dtypes
+    call_info :
+        PyCapsule with all necessary information to get access to low level
+        C calls.  See `numpy.ufunc._get_strided_loop` for more information.
+
+    """))
+
+add_newdoc('numpy.core', 'ufunc', ('_get_strided_loop',
+    """
+    _get_strided_loop(call_info, /, *, fixed_strides=None)
+
+    This function fills in the ``call_info`` capsule to include all
+    information necessary to call the low-level strided loop from NumPy.
+
+    See notes for more information.
+
+    Parameters
+    ----------
+    call_info : PyCapsule
+        The PyCapsule returned by `numpy.ufunc._resolve_dtypes_and_context`.
+    fixed_strides : tuple of int or None, optional
+        A tuple with fixed byte strides of all input arrays.  NumPy may use
+        this information to find specialized loops, so any call must follow
+        the given stride.  Use ``None`` to indicate that the stride is not
+        known (or not fixed) for all calls.
+
+    Notes
+    -----
+    Together with `numpy.ufunc._resolve_dtypes_and_context` this function
+    gives low-level access to the NumPy ufunc loops.
+    The first function does general preparation and returns the required
+    information. It returns this as a C capsule with the version specific
+    name ``numpy_1.24_ufunc_call_info``.
+    The NumPy 1.24 ufunc call info capsule has the following layout::
+
+        typedef struct {
+            PyArrayMethod_StridedLoop *strided_loop;
+            PyArrayMethod_Context *context;
+            NpyAuxData *auxdata;
+
+            /* Flag information (expected to change) */
+            npy_bool requires_pyapi;  /* GIL is required by loop */
+
+            /* Loop doesn't set FPE flags; if not set check FPE flags */
+            npy_bool no_floatingpoint_errors;
+        } ufunc_call_info;
+
+    Note that the first call only fills in the ``context``.  The call to
+    ``_get_strided_loop`` fills in all other data.
+    Please see the ``numpy/experimental_dtype_api.h`` header for exact
+    call information; the main thing to note is that the new-style loops
+    return 0 on success, -1 on failure.  They are passed context as new
+    first input and ``auxdata`` as (replaced) last.
+
+    Only the ``strided_loop``signature is considered guaranteed stable
+    for NumPy bug-fix releases.  All other API is tied to the experimental
+    API versioning.
+
+    The reason for the split call is that cast information is required to
+    decide what the fixed-strides will be.
+
+    NumPy ties the lifetime of the ``auxdata`` information to the capsule.
+
+    """))
+
+
+
+##############################################################################
+#
+# Documentation for dtype attributes and methods
+#
+##############################################################################
+
+##############################################################################
+#
+# dtype object
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype',
+    """
+    dtype(dtype, align=False, copy=False, [metadata])
+
+    Create a data type object.
+
+    A numpy array is homogeneous, and contains elements described by a
+    dtype object. A dtype object can be constructed from different
+    combinations of fundamental numeric types.
+
+    Parameters
+    ----------
+    dtype
+        Object to be converted to a data type object.
+    align : bool, optional
+        Add padding to the fields to match what a C compiler would output
+        for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
+        or a comma-separated string. If a struct dtype is being created,
+        this also sets a sticky alignment flag ``isalignedstruct``.
+    copy : bool, optional
+        Make a new copy of the data-type object. If ``False``, the result
+        may just be a reference to a built-in data-type object.
+    metadata : dict, optional
+        An optional dictionary with dtype metadata.
+
+    See also
+    --------
+    result_type
+
+    Examples
+    --------
+    Using array-scalar type:
+
+    >>> np.dtype(np.int16)
+    dtype('int16')
+
+    Structured type, one field name 'f1', containing int16:
+
+    >>> np.dtype([('f1', np.int16)])
+    dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])])
+    dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
+    dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')])
+    dtype([('a', '>> np.dtype("i4, (2,3)f8")
+    dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
+    dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
+    dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
+
+    Using dictionaries.  Two fields named 'gender' and 'age':
+
+    >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
+    dtype([('gender', 'S1'), ('age', 'u1')])
+
+    Offsets in bytes, here 0 and 25:
+
+    >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
+    dtype([('surname', 'S25'), ('age', 'u1')])
+
+    """)
+
+##############################################################################
+#
+# dtype attributes
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
+    """
+    The required alignment (bytes) of this data-type according to the compiler.
+
+    More information is available in the C-API section of the manual.
+
+    Examples
+    --------
+
+    >>> x = np.dtype('i4')
+    >>> x.alignment
+    4
+
+    >>> x = np.dtype(float)
+    >>> x.alignment
+    8
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
+    """
+    A character indicating the byte-order of this data-type object.
+
+    One of:
+
+    ===  ==============
+    '='  native
+    '<'  little-endian
+    '>'  big-endian
+    '|'  not applicable
+    ===  ==============
+
+    All built-in data-type objects have byteorder either '=' or '|'.
+
+    Examples
+    --------
+
+    >>> dt = np.dtype('i2')
+    >>> dt.byteorder
+    '='
+    >>> # endian is not relevant for 8 bit numbers
+    >>> np.dtype('i1').byteorder
+    '|'
+    >>> # or ASCII strings
+    >>> np.dtype('S2').byteorder
+    '|'
+    >>> # Even if specific code is given, and it is native
+    >>> # '=' is the byteorder
+    >>> import sys
+    >>> sys_is_le = sys.byteorder == 'little'
+    >>> native_code = '<' if sys_is_le else '>'
+    >>> swapped_code = '>' if sys_is_le else '<'
+    >>> dt = np.dtype(native_code + 'i2')
+    >>> dt.byteorder
+    '='
+    >>> # Swapped code shows up as itself
+    >>> dt = np.dtype(swapped_code + 'i2')
+    >>> dt.byteorder == swapped_code
+    True
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('char',
+    """A unique character code for each of the 21 different built-in types.
+
+    Examples
+    --------
+
+    >>> x = np.dtype(float)
+    >>> x.char
+    'd'
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
+    """
+    `__array_interface__` description of the data-type.
+
+    The format is that required by the 'descr' key in the
+    `__array_interface__` attribute.
+
+    Warning: This attribute exists specifically for `__array_interface__`,
+    and passing it directly to `np.dtype` will not accurately reconstruct
+    some dtypes (e.g., scalar and subarray dtypes).
+
+    Examples
+    --------
+
+    >>> x = np.dtype(float)
+    >>> x.descr
+    [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+    >>> dt.descr
+    [('name', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+    >>> print(dt.fields)
+    {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
+    """
+    Bit-flags describing how this data type is to be interpreted.
+
+    Bit-masks are in `numpy.core.multiarray` as the constants
+    `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
+    `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
+    of these flags is in C-API documentation; they are largely useful
+    for user-defined data-types.
+
+    The following example demonstrates that operations on this particular
+    dtype requires Python C-API.
+
+    Examples
+    --------
+
+    >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+    >>> x.flags
+    16
+    >>> np.core.multiarray.NEEDS_PYAPI
+    16
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
+    """
+    Boolean indicating whether this dtype contains any reference-counted
+    objects in any fields or sub-dtypes.
+
+    Recall that what is actually in the ndarray memory representing
+    the Python object is the memory address of that object (a pointer).
+    Special handling may be required, and this attribute is useful for
+    distinguishing data types that may contain arbitrary Python objects
+    and data-types that won't.
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
+    """
+    Integer indicating how this dtype relates to the built-in dtypes.
+
+    Read-only.
+
+    =  ========================================================================
+    0  if this is a structured array type, with fields
+    1  if this is a dtype compiled into numpy (such as ints, floats etc)
+    2  if the dtype is for a user-defined numpy type
+       A user-defined type uses the numpy C-API machinery to extend
+       numpy to handle a new array type. See
+       :ref:`user.user-defined-data-types` in the NumPy manual.
+    =  ========================================================================
+
+    Examples
+    --------
+    >>> dt = np.dtype('i2')
+    >>> dt.isbuiltin
+    1
+    >>> dt = np.dtype('f8')
+    >>> dt.isbuiltin
+    1
+    >>> dt = np.dtype([('field1', 'f8')])
+    >>> dt.isbuiltin
+    0
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
+    """
+    Boolean indicating whether the byte order of this dtype is native
+    to the platform.
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
+    """
+    Boolean indicating whether the dtype is a struct which maintains
+    field alignment. This flag is sticky, so when combining multiple
+    structs together, it is preserved and produces new dtypes which
+    are also aligned.
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
+    """
+    The element size of this data-type object.
+
+    For 18 of the 21 types this number is fixed by the data-type.
+    For the flexible data-types, this number can be anything.
+
+    Examples
+    --------
+
+    >>> arr = np.array([[1, 2], [3, 4]])
+    >>> arr.dtype
+    dtype('int64')
+    >>> arr.itemsize
+    8
+
+    >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+    >>> dt.itemsize
+    80
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
+    """
+    A character code (one of 'biufcmMOSUV') identifying the general kind of data.
+
+    =  ======================
+    b  boolean
+    i  signed integer
+    u  unsigned integer
+    f  floating-point
+    c  complex floating-point
+    m  timedelta
+    M  datetime
+    O  object
+    S  (byte-)string
+    U  Unicode
+    V  void
+    =  ======================
+
+    Examples
+    --------
+
+    >>> dt = np.dtype('i4')
+    >>> dt.kind
+    'i'
+    >>> dt = np.dtype('f8')
+    >>> dt.kind
+    'f'
+    >>> dt = np.dtype([('field1', 'f8')])
+    >>> dt.kind
+    'V'
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('metadata',
+    """
+    Either ``None`` or a readonly dictionary of metadata (mappingproxy).
+
+    The metadata field can be set using any dictionary at data-type
+    creation. NumPy currently has no uniform approach to propagating
+    metadata; although some array operations preserve it, there is no
+    guarantee that others will.
+
+    .. warning::
+
+        Although used in certain projects, this feature was long undocumented
+        and is not well supported. Some aspects of metadata propagation
+        are expected to change in the future.
+
+    Examples
+    --------
+
+    >>> dt = np.dtype(float, metadata={"key": "value"})
+    >>> dt.metadata["key"]
+    'value'
+    >>> arr = np.array([1, 2, 3], dtype=dt)
+    >>> arr.dtype.metadata
+    mappingproxy({'key': 'value'})
+
+    Adding arrays with identical datatypes currently preserves the metadata:
+
+    >>> (arr + arr).dtype.metadata
+    mappingproxy({'key': 'value'})
+
+    But if the arrays have different dtype metadata, the metadata may be
+    dropped:
+
+    >>> dt2 = np.dtype(float, metadata={"key2": "value2"})
+    >>> arr2 = np.array([3, 2, 1], dtype=dt2)
+    >>> (arr + arr2).dtype.metadata is None
+    True  # The metadata field is cleared so None is returned
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('name',
+    """
+    A bit-width name for this data-type.
+
+    Un-sized flexible data-type objects do not have this attribute.
+
+    Examples
+    --------
+
+    >>> x = np.dtype(float)
+    >>> x.name
+    'float64'
+    >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+    >>> x.name
+    'void640'
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('names',
+    """
+    Ordered list of field names, or ``None`` if there are no fields.
+
+    The names are ordered according to increasing byte offset. This can be
+    used, for example, to walk through all of the named fields in offset order.
+
+    Examples
+    --------
+    >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+    >>> dt.names
+    ('name', 'grades')
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('num',
+    """
+    A unique number for each of the 21 different built-in types.
+
+    These are roughly ordered from least-to-most precision.
+
+    Examples
+    --------
+
+    >>> dt = np.dtype(str)
+    >>> dt.num
+    19
+
+    >>> dt = np.dtype(float)
+    >>> dt.num
+    12
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
+    """
+    Shape tuple of the sub-array if this data type describes a sub-array,
+    and ``()`` otherwise.
+
+    Examples
+    --------
+
+    >>> dt = np.dtype(('i4', 4))
+    >>> dt.shape
+    (4,)
+
+    >>> dt = np.dtype(('i4', (2, 3)))
+    >>> dt.shape
+    (2, 3)
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
+    """
+    Number of dimensions of the sub-array if this data type describes a
+    sub-array, and ``0`` otherwise.
+
+    .. versionadded:: 1.13.0
+
+    Examples
+    --------
+    >>> x = np.dtype(float)
+    >>> x.ndim
+    0
+
+    >>> x = np.dtype((float, 8))
+    >>> x.ndim
+    1
+
+    >>> x = np.dtype(('i4', (3, 4)))
+    >>> x.ndim
+    2
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('str',
+    """The array-protocol typestring of this data-type object."""))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
+    """
+    Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
+    None otherwise.
+
+    The *shape* is the fixed shape of the sub-array described by this
+    data type, and *item_dtype* the data type of the array.
+
+    If a field whose dtype object has this attribute is retrieved,
+    then the extra dimensions implied by *shape* are tacked on to
+    the end of the retrieved array.
+
+    See Also
+    --------
+    dtype.base
+
+    Examples
+    --------
+    >>> x = numpy.dtype('8f')
+    >>> x.subdtype
+    (dtype('float32'), (8,))
+
+    >>> x =  numpy.dtype('i2')
+    >>> x.subdtype
+    >>>
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('base',
+    """
+    Returns dtype for the base element of the subarrays,
+    regardless of their dimension or shape.
+
+    See Also
+    --------
+    dtype.subdtype
+
+    Examples
+    --------
+    >>> x = numpy.dtype('8f')
+    >>> x.base
+    dtype('float32')
+
+    >>> x =  numpy.dtype('i2')
+    >>> x.base
+    dtype('int16')
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('type',
+    """The type object used to instantiate a scalar of this data-type."""))
+
+##############################################################################
+#
+# dtype methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
+    """
+    newbyteorder(new_order='S', /)
+
+    Return a new dtype with a different byte order.
+
+    Changes are also made in all fields and sub-arrays of the data type.
+
+    Parameters
+    ----------
+    new_order : string, optional
+        Byte order to force; a value from the byte order specifications
+        below.  The default value ('S') results in swapping the current
+        byte order.  `new_order` codes can be any of:
+
+        * 'S' - swap dtype from current to opposite endian
+        * {'<', 'little'} - little endian
+        * {'>', 'big'} - big endian
+        * {'=', 'native'} - native order
+        * {'|', 'I'} - ignore (no change to byte order)
+
+    Returns
+    -------
+    new_dtype : dtype
+        New dtype object with the given change to the byte order.
+
+    Notes
+    -----
+    Changes are also made in all fields and sub-arrays of the data type.
+
+    Examples
+    --------
+    >>> import sys
+    >>> sys_is_le = sys.byteorder == 'little'
+    >>> native_code = '<' if sys_is_le else '>'
+    >>> swapped_code = '>' if sys_is_le else '<'
+    >>> native_dt = np.dtype(native_code+'i2')
+    >>> swapped_dt = np.dtype(swapped_code+'i2')
+    >>> native_dt.newbyteorder('S') == swapped_dt
+    True
+    >>> native_dt.newbyteorder() == swapped_dt
+    True
+    >>> native_dt == swapped_dt.newbyteorder('S')
+    True
+    >>> native_dt == swapped_dt.newbyteorder('=')
+    True
+    >>> native_dt == swapped_dt.newbyteorder('N')
+    True
+    >>> native_dt == native_dt.newbyteorder('|')
+    True
+    >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>')
+    True
+    >>> np.dtype('>i2') == native_dt.newbyteorder('B')
+    True
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__class_getitem__',
+    """
+    __class_getitem__(item, /)
+
+    Return a parametrized wrapper around the `~numpy.dtype` type.
+
+    .. versionadded:: 1.22
+
+    Returns
+    -------
+    alias : types.GenericAlias
+        A parametrized `~numpy.dtype` type.
+
+    Examples
+    --------
+    >>> import numpy as np
+
+    >>> np.dtype[np.int64]
+    numpy.dtype[numpy.int64]
+
+    See Also
+    --------
+    :pep:`585` : Type hinting generics in standard collections.
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__ge__',
+    """
+    __ge__(value, /)
+
+    Return ``self >= value``.
+
+    Equivalent to ``np.can_cast(value, self, casting="safe")``.
+
+    See Also
+    --------
+    can_cast : Returns True if cast between data types can occur according to
+               the casting rule.
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__le__',
+    """
+    __le__(value, /)
+
+    Return ``self <= value``.
+
+    Equivalent to ``np.can_cast(self, value, casting="safe")``.
+
+    See Also
+    --------
+    can_cast : Returns True if cast between data types can occur according to
+               the casting rule.
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__gt__',
+    """
+    __ge__(value, /)
+
+    Return ``self > value``.
+
+    Equivalent to
+    ``self != value and np.can_cast(value, self, casting="safe")``.
+
+    See Also
+    --------
+    can_cast : Returns True if cast between data types can occur according to
+               the casting rule.
+
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__lt__',
+    """
+    __lt__(value, /)
+
+    Return ``self < value``.
+
+    Equivalent to
+    ``self != value and np.can_cast(self, value, casting="safe")``.
+
+    See Also
+    --------
+    can_cast : Returns True if cast between data types can occur according to
+               the casting rule.
+
+    """))
+
+##############################################################################
+#
+# Datetime-related Methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar',
+    """
+    busdaycalendar(weekmask='1111100', holidays=None)
+
+    A business day calendar object that efficiently stores information
+    defining valid days for the busday family of functions.
+
+    The default valid days are Monday through Friday ("business days").
+    A busdaycalendar object can be specified with any set of weekly
+    valid days, plus an optional "holiday" dates that always will be invalid.
+
+    Once a busdaycalendar object is created, the weekmask and holidays
+    cannot be modified.
+
+    .. versionadded:: 1.7.0
+
+    Parameters
+    ----------
+    weekmask : str or array_like of bool, optional
+        A seven-element array indicating which of Monday through Sunday are
+        valid days. May be specified as a length-seven list or array, like
+        [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+        like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+        weekdays, optionally separated by white space. Valid abbreviations
+        are: Mon Tue Wed Thu Fri Sat Sun
+    holidays : array_like of datetime64[D], optional
+        An array of dates to consider as invalid dates, no matter which
+        weekday they fall upon.  Holiday dates may be specified in any
+        order, and NaT (not-a-time) dates are ignored.  This list is
+        saved in a normalized form that is suited for fast calculations
+        of valid days.
+
+    Returns
+    -------
+    out : busdaycalendar
+        A business day calendar object containing the specified
+        weekmask and holidays values.
+
+    See Also
+    --------
+    is_busday : Returns a boolean array indicating valid days.
+    busday_offset : Applies an offset counted in valid days.
+    busday_count : Counts how many valid days are in a half-open date range.
+
+    Attributes
+    ----------
+    Note: once a busdaycalendar object is created, you cannot modify the
+    weekmask or holidays.  The attributes return copies of internal data.
+    weekmask : (copy) seven-element array of bool
+    holidays : (copy) sorted array of datetime64[D]
+
+    Examples
+    --------
+    >>> # Some important days in July
+    ... bdd = np.busdaycalendar(
+    ...             holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+    >>> # Default is Monday to Friday weekdays
+    ... bdd.weekmask
+    array([ True,  True,  True,  True,  True, False, False])
+    >>> # Any holidays already on the weekend are removed
+    ... bdd.holidays
+    array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
+    """)
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
+    """A copy of the seven-element boolean mask indicating valid days."""))
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
+    """A copy of the holiday array indicating additional invalid days."""))
+
+add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
+    """
+    normalize_axis_index(axis, ndim, msg_prefix=None)
+
+    Normalizes an axis index, `axis`, such that is a valid positive index into
+    the shape of array with `ndim` dimensions. Raises an AxisError with an
+    appropriate message if this is not possible.
+
+    Used internally by all axis-checking logic.
+
+    .. versionadded:: 1.13.0
+
+    Parameters
+    ----------
+    axis : int
+        The un-normalized index of the axis. Can be negative
+    ndim : int
+        The number of dimensions of the array that `axis` should be normalized
+        against
+    msg_prefix : str
+        A prefix to put before the message, typically the name of the argument
+
+    Returns
+    -------
+    normalized_axis : int
+        The normalized axis index, such that `0 <= normalized_axis < ndim`
+
+    Raises
+    ------
+    AxisError
+        If the axis index is invalid, when `-ndim <= axis < ndim` is false.
+
+    Examples
+    --------
+    >>> normalize_axis_index(0, ndim=3)
+    0
+    >>> normalize_axis_index(1, ndim=3)
+    1
+    >>> normalize_axis_index(-1, ndim=3)
+    2
+
+    >>> normalize_axis_index(3, ndim=3)
+    Traceback (most recent call last):
+    ...
+    AxisError: axis 3 is out of bounds for array of dimension 3
+    >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
+    Traceback (most recent call last):
+    ...
+    AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
+    """)
+
+add_newdoc('numpy.core.multiarray', 'datetime_data',
+    """
+    datetime_data(dtype, /)
+
+    Get information about the step size of a date or time type.
+
+    The returned tuple can be passed as the second argument of `numpy.datetime64` and
+    `numpy.timedelta64`.
+
+    Parameters
+    ----------
+    dtype : dtype
+        The dtype object, which must be a `datetime64` or `timedelta64` type.
+
+    Returns
+    -------
+    unit : str
+        The :ref:`datetime unit ` on which this dtype
+        is based.
+    count : int
+        The number of base units in a step.
+
+    Examples
+    --------
+    >>> dt_25s = np.dtype('timedelta64[25s]')
+    >>> np.datetime_data(dt_25s)
+    ('s', 25)
+    >>> np.array(10, dt_25s).astype('timedelta64[s]')
+    array(250, dtype='timedelta64[s]')
+
+    The result can be used to construct a datetime that uses the same units
+    as a timedelta
+
+    >>> np.datetime64('2010', np.datetime_data(dt_25s))
+    numpy.datetime64('2010-01-01T00:00:00','25s')
+    """)
+
+
+##############################################################################
+#
+# Documentation for `generic` attributes and methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+    """
+    Base class for numpy scalar types.
+
+    Class from which most (all?) numpy scalar types are derived.  For
+    consistency, exposes the same API as `ndarray`, despite many
+    consequent attributes being either "get-only," or completely irrelevant.
+    This is the class from which it is strongly suggested users should derive
+    custom scalar types.
+
+    """)
+
+# Attributes
+
+def refer_to_array_attribute(attr, method=True):
+    docstring = """
+    Scalar {} identical to the corresponding array attribute.
+
+    Please see `ndarray.{}`.
+    """
+
+    return attr, docstring.format("method" if method else "attribute", attr)
+
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('T', method=False))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('base', method=False))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('data',
+    """Pointer to start of data."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
+    """Get array data-descriptor."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
+    """The integer value of flags."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
+    """A 1-D view of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
+    """The imaginary part of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
+    """The length of one element in bytes."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
+    """The length of the scalar in bytes."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
+    """The number of array dimensions."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('real',
+    """The real part of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
+    """Tuple of array dimensions."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('size',
+    """The number of elements in the gentype."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
+    """Tuple of bytes steps in each dimension."""))
+
+# Methods
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('all'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('any'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('argmax'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('argmin'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('argsort'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('astype'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('byteswap'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('choose'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('clip'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('compress'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('conjugate'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('copy'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('cumprod'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('cumsum'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('diagonal'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('dump'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('dumps'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('fill'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('flatten'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('getfield'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('item'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('itemset'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('max'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('mean'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('min'))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
+    """
+    newbyteorder(new_order='S', /)
+
+    Return a new `dtype` with a different byte order.
+
+    Changes are also made in all fields and sub-arrays of the data type.
+
+    The `new_order` code can be any from the following:
+
+    * 'S' - swap dtype from current to opposite endian
+    * {'<', 'little'} - little endian
+    * {'>', 'big'} - big endian
+    * {'=', 'native'} - native order
+    * {'|', 'I'} - ignore (no change to byte order)
+
+    Parameters
+    ----------
+    new_order : str, optional
+        Byte order to force; a value from the byte order specifications
+        above.  The default value ('S') results in swapping the current
+        byte order.
+
+
+    Returns
+    -------
+    new_dtype : dtype
+        New `dtype` object with the given change to the byte order.
+
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('nonzero'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('prod'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('ptp'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('put'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('ravel'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('repeat'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('reshape'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('resize'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('round'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('searchsorted'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('setfield'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('setflags'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('sort'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('squeeze'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('std'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('sum'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('swapaxes'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('take'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('tofile'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('tolist'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('tostring'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('trace'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('transpose'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('var'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+           refer_to_array_attribute('view'))
+
+add_newdoc('numpy.core.numerictypes', 'number', ('__class_getitem__',
+    """
+    __class_getitem__(item, /)
+
+    Return a parametrized wrapper around the `~numpy.number` type.
+
+    .. versionadded:: 1.22
+
+    Returns
+    -------
+    alias : types.GenericAlias
+        A parametrized `~numpy.number` type.
+
+    Examples
+    --------
+    >>> from typing import Any
+    >>> import numpy as np
+
+    >>> np.signedinteger[Any]
+    numpy.signedinteger[typing.Any]
+
+    See Also
+    --------
+    :pep:`585` : Type hinting generics in standard collections.
+
+    """))
+
+##############################################################################
+#
+# Documentation for scalar type abstract base classes in type hierarchy
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.numerictypes', 'number',
+    """
+    Abstract base class of all numeric scalar types.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'integer',
+    """
+    Abstract base class of all integer scalar types.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'signedinteger',
+    """
+    Abstract base class of all signed integer scalar types.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
+    """
+    Abstract base class of all unsigned integer scalar types.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'inexact',
+    """
+    Abstract base class of all numeric scalar types with a (potentially)
+    inexact representation of the values in its range, such as
+    floating-point numbers.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'floating',
+    """
+    Abstract base class of all floating-point scalar types.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'complexfloating',
+    """
+    Abstract base class of all complex number scalar types that are made up of
+    floating-point numbers.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'flexible',
+    """
+    Abstract base class of all scalar types without predefined length.
+    The actual size of these types depends on the specific `np.dtype`
+    instantiation.
+
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'character',
+    """
+    Abstract base class of all character string scalar types.
+
+    """)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py
new file mode 100644
index 00000000..f9a6ad96
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py
@@ -0,0 +1,372 @@
+"""
+This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
+our sphinx ``conf.py`` during doc builds, where we want to avoid showing
+platform-dependent information.
+"""
+import sys
+import os
+from numpy.core import dtype
+from numpy.core import numerictypes as _numerictypes
+from numpy.core.function_base import add_newdoc
+
+##############################################################################
+#
+# Documentation for concrete scalar classes
+#
+##############################################################################
+
+def numeric_type_aliases(aliases):
+    def type_aliases_gen():
+        for alias, doc in aliases:
+            try:
+                alias_type = getattr(_numerictypes, alias)
+            except AttributeError:
+                # The set of aliases that actually exist varies between platforms
+                pass
+            else:
+                yield (alias_type, alias, doc)
+    return list(type_aliases_gen())
+
+
+possible_aliases = numeric_type_aliases([
+    ('int8', '8-bit signed integer (``-128`` to ``127``)'),
+    ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
+    ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
+    ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
+    ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
+    ('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
+    ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
+    ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
+    ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
+    ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
+    ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
+    ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
+    ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
+    ('float96', '96-bit extended-precision floating-point number type'),
+    ('float128', '128-bit extended-precision floating-point number type'),
+    ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
+    ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
+    ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
+    ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
+    ])
+
+
+def _get_platform_and_machine():
+    try:
+        system, _, _, _, machine = os.uname()
+    except AttributeError:
+        system = sys.platform
+        if system == 'win32':
+            machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
+                    or os.environ.get('PROCESSOR_ARCHITECTURE', '')
+        else:
+            machine = 'unknown'
+    return system, machine
+
+
+_system, _machine = _get_platform_and_machine()
+_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
+
+
+def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
+    # note: `:field: value` is rST syntax which renders as field lists.
+    o = getattr(_numerictypes, obj)
+
+    character_code = dtype(o).char
+    canonical_name_doc = "" if obj == o.__name__ else \
+                        f":Canonical name: `numpy.{obj}`\n    "
+    if fixed_aliases:
+        alias_doc = ''.join(f":Alias: `numpy.{alias}`\n    "
+                            for alias in fixed_aliases)
+    else:
+        alias_doc = ''
+    alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n    "
+                         for (alias_type, alias, doc) in possible_aliases if alias_type is o)
+
+    docstring = f"""
+    {doc.strip()}
+
+    :Character code: ``'{character_code}'``
+    {canonical_name_doc}{alias_doc}
+    """
+
+    add_newdoc('numpy.core.numerictypes', obj, docstring)
+
+
+add_newdoc_for_scalar_type('bool_', [],
+    """
+    Boolean type (True or False), stored as a byte.
+
+    .. warning::
+
+       The :class:`bool_` type is not a subclass of the :class:`int_` type
+       (the :class:`bool_` is not even a number type). This is different
+       than Python's default implementation of :class:`bool` as a
+       sub-class of :class:`int`.
+    """)
+
+add_newdoc_for_scalar_type('byte', [],
+    """
+    Signed integer type, compatible with C ``char``.
+    """)
+
+add_newdoc_for_scalar_type('short', [],
+    """
+    Signed integer type, compatible with C ``short``.
+    """)
+
+add_newdoc_for_scalar_type('intc', [],
+    """
+    Signed integer type, compatible with C ``int``.
+    """)
+
+add_newdoc_for_scalar_type('int_', [],
+    """
+    Signed integer type, compatible with Python `int` and C ``long``.
+    """)
+
+add_newdoc_for_scalar_type('longlong', [],
+    """
+    Signed integer type, compatible with C ``long long``.
+    """)
+
+add_newdoc_for_scalar_type('ubyte', [],
+    """
+    Unsigned integer type, compatible with C ``unsigned char``.
+    """)
+
+add_newdoc_for_scalar_type('ushort', [],
+    """
+    Unsigned integer type, compatible with C ``unsigned short``.
+    """)
+
+add_newdoc_for_scalar_type('uintc', [],
+    """
+    Unsigned integer type, compatible with C ``unsigned int``.
+    """)
+
+add_newdoc_for_scalar_type('uint', [],
+    """
+    Unsigned integer type, compatible with C ``unsigned long``.
+    """)
+
+add_newdoc_for_scalar_type('ulonglong', [],
+    """
+    Signed integer type, compatible with C ``unsigned long long``.
+    """)
+
+add_newdoc_for_scalar_type('half', [],
+    """
+    Half-precision floating-point number type.
+    """)
+
+add_newdoc_for_scalar_type('single', [],
+    """
+    Single-precision floating-point number type, compatible with C ``float``.
+    """)
+
+add_newdoc_for_scalar_type('double', ['float_'],
+    """
+    Double-precision floating-point number type, compatible with Python `float`
+    and C ``double``.
+    """)
+
+add_newdoc_for_scalar_type('longdouble', ['longfloat'],
+    """
+    Extended-precision floating-point number type, compatible with C
+    ``long double`` but not necessarily with IEEE 754 quadruple-precision.
+    """)
+
+add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
+    """
+    Complex number type composed of two single-precision floating-point
+    numbers.
+    """)
+
+add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
+    """
+    Complex number type composed of two double-precision floating-point
+    numbers, compatible with Python `complex`.
+    """)
+
+add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
+    """
+    Complex number type composed of two extended-precision floating-point
+    numbers.
+    """)
+
+add_newdoc_for_scalar_type('object_', [],
+    """
+    Any Python object.
+    """)
+
+add_newdoc_for_scalar_type('str_', ['unicode_'],
+    r"""
+    A unicode string.
+
+    This type strips trailing null codepoints.
+
+    >>> s = np.str_("abc\x00")
+    >>> s
+    'abc'
+
+    Unlike the builtin `str`, this supports the :ref:`python:bufferobjects`, exposing its
+    contents as UCS4:
+
+    >>> m = memoryview(np.str_("abc"))
+    >>> m.format
+    '3w'
+    >>> m.tobytes()
+    b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
+    """)
+
+add_newdoc_for_scalar_type('bytes_', ['string_'],
+    r"""
+    A byte string.
+
+    When used in arrays, this type strips trailing null bytes.
+    """)
+
+add_newdoc_for_scalar_type('void', [],
+    r"""
+    np.void(length_or_data, /, dtype=None)
+
+    Create a new structured or unstructured void scalar.
+
+    Parameters
+    ----------
+    length_or_data : int, array-like, bytes-like, object
+       One of multiple meanings (see notes).  The length or
+       bytes data of an unstructured void.  Or alternatively,
+       the data to be stored in the new scalar when `dtype`
+       is provided.
+       This can be an array-like, in which case an array may
+       be returned.
+    dtype : dtype, optional
+        If provided the dtype of the new scalar.  This dtype must
+        be "void" dtype (i.e. a structured or unstructured void,
+        see also :ref:`defining-structured-types`).
+
+       ..versionadded:: 1.24
+
+    Notes
+    -----
+    For historical reasons and because void scalars can represent both
+    arbitrary byte data and structured dtypes, the void constructor
+    has three calling conventions:
+
+    1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
+       ``\0`` bytes.  The 5 can be a Python or NumPy integer.
+    2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
+       The dtype itemsize will match the byte string length, here ``"V10"``.
+    3. When a ``dtype=`` is passed the call is roughly the same as an
+       array creation.  However, a void scalar rather than array is returned.
+
+    Please see the examples which show all three different conventions.
+
+    Examples
+    --------
+    >>> np.void(5)
+    void(b'\x00\x00\x00\x00\x00')
+    >>> np.void(b'abcd')
+    void(b'\x61\x62\x63\x64')
+    >>> np.void((5, 3.2, "eggs"), dtype="i,d,S5")
+    (5, 3.2, b'eggs')  # looks like a tuple, but is `np.void`
+    >>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
+    (3, 3)  # looks like a tuple, but is `np.void`
+
+    """)
+
+add_newdoc_for_scalar_type('datetime64', [],
+    """
+    If created from a 64-bit integer, it represents an offset from
+    ``1970-01-01T00:00:00``.
+    If created from string, the string can be in ISO 8601 date
+    or datetime format.
+
+    >>> np.datetime64(10, 'Y')
+    numpy.datetime64('1980')
+    >>> np.datetime64('1980', 'Y')
+    numpy.datetime64('1980')
+    >>> np.datetime64(10, 'D')
+    numpy.datetime64('1970-01-11')
+
+    See :ref:`arrays.datetime` for more information.
+    """)
+
+add_newdoc_for_scalar_type('timedelta64', [],
+    """
+    A timedelta stored as a 64-bit integer.
+
+    See :ref:`arrays.datetime` for more information.
+    """)
+
+add_newdoc('numpy.core.numerictypes', "integer", ('is_integer',
+    """
+    integer.is_integer() -> bool
+
+    Return ``True`` if the number is finite with integral value.
+
+    .. versionadded:: 1.22
+
+    Examples
+    --------
+    >>> np.int64(-2).is_integer()
+    True
+    >>> np.uint32(5).is_integer()
+    True
+    """))
+
+# TODO: work out how to put this on the base class, np.floating
+for float_name in ('half', 'single', 'double', 'longdouble'):
+    add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
+        """
+        {ftype}.as_integer_ratio() -> (int, int)
+
+        Return a pair of integers, whose ratio is exactly equal to the original
+        floating point number, and with a positive denominator.
+        Raise `OverflowError` on infinities and a `ValueError` on NaNs.
+
+        >>> np.{ftype}(10.0).as_integer_ratio()
+        (10, 1)
+        >>> np.{ftype}(0.0).as_integer_ratio()
+        (0, 1)
+        >>> np.{ftype}(-.25).as_integer_ratio()
+        (-1, 4)
+        """.format(ftype=float_name)))
+
+    add_newdoc('numpy.core.numerictypes', float_name, ('is_integer',
+        f"""
+        {float_name}.is_integer() -> bool
+
+        Return ``True`` if the floating point number is finite with integral
+        value, and ``False`` otherwise.
+
+        .. versionadded:: 1.22
+
+        Examples
+        --------
+        >>> np.{float_name}(-2.0).is_integer()
+        True
+        >>> np.{float_name}(3.2).is_integer()
+        False
+        """))
+
+for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
+        'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
+    # Add negative examples for signed cases by checking typecode
+    add_newdoc('numpy.core.numerictypes', int_name, ('bit_count',
+        f"""
+        {int_name}.bit_count() -> int
+
+        Computes the number of 1-bits in the absolute value of the input.
+        Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
+
+        Examples
+        --------
+        >>> np.{int_name}(127).bit_count()
+        7""" +
+        (f"""
+        >>> np.{int_name}(-127).bit_count()
+        7
+        """ if dtype(int_name).char.islower() else "")))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_asarray.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_asarray.py
new file mode 100644
index 00000000..a9abc5a8
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_asarray.py
@@ -0,0 +1,134 @@
+"""
+Functions in the ``as*array`` family that promote array-likes into arrays.
+
+`require` fits this category despite its name not matching this pattern.
+"""
+from .overrides import (
+    array_function_dispatch,
+    set_array_function_like_doc,
+    set_module,
+)
+from .multiarray import array, asanyarray
+
+
+__all__ = ["require"]
+
+
+POSSIBLE_FLAGS = {
+    'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
+    'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
+    'A': 'A', 'ALIGNED': 'A',
+    'W': 'W', 'WRITEABLE': 'W',
+    'O': 'O', 'OWNDATA': 'O',
+    'E': 'E', 'ENSUREARRAY': 'E'
+}
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def require(a, dtype=None, requirements=None, *, like=None):
+    """
+    Return an ndarray of the provided type that satisfies requirements.
+
+    This function is useful to be sure that an array with the correct flags
+    is returned for passing to compiled code (perhaps through ctypes).
+
+    Parameters
+    ----------
+    a : array_like
+       The object to be converted to a type-and-requirement-satisfying array.
+    dtype : data-type
+       The required data-type. If None preserve the current dtype. If your
+       application requires the data to be in native byteorder, include
+       a byteorder specification as a part of the dtype specification.
+    requirements : str or sequence of str
+       The requirements list can be any of the following
+
+       * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
+       * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
+       * 'ALIGNED' ('A')      - ensure a data-type aligned array
+       * 'WRITEABLE' ('W')    - ensure a writable array
+       * 'OWNDATA' ('O')      - ensure an array that owns its own data
+       * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Array with specified requirements and type if given.
+
+    See Also
+    --------
+    asarray : Convert input to an ndarray.
+    asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
+    ascontiguousarray : Convert input to a contiguous array.
+    asfortranarray : Convert input to an ndarray with column-major
+                     memory order.
+    ndarray.flags : Information about the memory layout of the array.
+
+    Notes
+    -----
+    The returned array will be guaranteed to have the listed requirements
+    by making a copy if needed.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2,3)
+    >>> x.flags
+      C_CONTIGUOUS : True
+      F_CONTIGUOUS : False
+      OWNDATA : False
+      WRITEABLE : True
+      ALIGNED : True
+      WRITEBACKIFCOPY : False
+
+    >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
+    >>> y.flags
+      C_CONTIGUOUS : False
+      F_CONTIGUOUS : True
+      OWNDATA : True
+      WRITEABLE : True
+      ALIGNED : True
+      WRITEBACKIFCOPY : False
+
+    """
+    if like is not None:
+        return _require_with_like(
+            like,
+            a,
+            dtype=dtype,
+            requirements=requirements,
+        )
+
+    if not requirements:
+        return asanyarray(a, dtype=dtype)
+
+    requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
+
+    if 'E' in requirements:
+        requirements.remove('E')
+        subok = False
+    else:
+        subok = True
+
+    order = 'A'
+    if requirements >= {'C', 'F'}:
+        raise ValueError('Cannot specify both "C" and "F" order')
+    elif 'F' in requirements:
+        order = 'F'
+        requirements.remove('F')
+    elif 'C' in requirements:
+        order = 'C'
+        requirements.remove('C')
+
+    arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
+
+    for prop in requirements:
+        if not arr.flags[prop]:
+            return arr.copy(order)
+    return arr
+
+
+_require_with_like = array_function_dispatch()(require)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_asarray.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_asarray.pyi
new file mode 100644
index 00000000..69d1528d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_asarray.pyi
@@ -0,0 +1,42 @@
+from collections.abc import Iterable
+from typing import Any, TypeVar, Union, overload, Literal
+
+from numpy import ndarray
+from numpy._typing import DTypeLike, _SupportsArrayFunc
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
+
+_Requirements = Literal[
+    "C", "C_CONTIGUOUS", "CONTIGUOUS",
+    "F", "F_CONTIGUOUS", "FORTRAN",
+    "A", "ALIGNED",
+    "W", "WRITEABLE",
+    "O", "OWNDATA"
+]
+_E = Literal["E", "ENSUREARRAY"]
+_RequirementsWithE = Union[_Requirements, _E]
+
+@overload
+def require(
+    a: _ArrayType,
+    dtype: None = ...,
+    requirements: None | _Requirements | Iterable[_Requirements] = ...,
+    *,
+    like: _SupportsArrayFunc = ...
+) -> _ArrayType: ...
+@overload
+def require(
+    a: object,
+    dtype: DTypeLike = ...,
+    requirements: _E | Iterable[_RequirementsWithE] = ...,
+    *,
+    like: _SupportsArrayFunc = ...
+) -> ndarray[Any, Any]: ...
+@overload
+def require(
+    a: object,
+    dtype: DTypeLike = ...,
+    requirements: None | _Requirements | Iterable[_Requirements] = ...,
+    *,
+    like: _SupportsArrayFunc = ...
+) -> ndarray[Any, Any]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype.py
new file mode 100644
index 00000000..ff50f519
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype.py
@@ -0,0 +1,369 @@
+"""
+A place for code to be called from the implementation of np.dtype
+
+String handling is much easier to do correctly in python.
+"""
+import numpy as np
+
+
+_kind_to_stem = {
+    'u': 'uint',
+    'i': 'int',
+    'c': 'complex',
+    'f': 'float',
+    'b': 'bool',
+    'V': 'void',
+    'O': 'object',
+    'M': 'datetime',
+    'm': 'timedelta',
+    'S': 'bytes',
+    'U': 'str',
+}
+
+
+def _kind_name(dtype):
+    try:
+        return _kind_to_stem[dtype.kind]
+    except KeyError as e:
+        raise RuntimeError(
+            "internal dtype error, unknown kind {!r}"
+            .format(dtype.kind)
+        ) from None
+
+
+def __str__(dtype):
+    if dtype.fields is not None:
+        return _struct_str(dtype, include_align=True)
+    elif dtype.subdtype:
+        return _subarray_str(dtype)
+    elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
+        return dtype.str
+    else:
+        return dtype.name
+
+
+def __repr__(dtype):
+    arg_str = _construction_repr(dtype, include_align=False)
+    if dtype.isalignedstruct:
+        arg_str = arg_str + ", align=True"
+    return "dtype({})".format(arg_str)
+
+
+def _unpack_field(dtype, offset, title=None):
+    """
+    Helper function to normalize the items in dtype.fields.
+
+    Call as:
+
+    dtype, offset, title = _unpack_field(*dtype.fields[name])
+    """
+    return dtype, offset, title
+
+
+def _isunsized(dtype):
+    # PyDataType_ISUNSIZED
+    return dtype.itemsize == 0
+
+
+def _construction_repr(dtype, include_align=False, short=False):
+    """
+    Creates a string repr of the dtype, excluding the 'dtype()' part
+    surrounding the object. This object may be a string, a list, or
+    a dict depending on the nature of the dtype. This
+    is the object passed as the first parameter to the dtype
+    constructor, and if no additional constructor parameters are
+    given, will reproduce the exact memory layout.
+
+    Parameters
+    ----------
+    short : bool
+        If true, this creates a shorter repr using 'kind' and 'itemsize', instead
+        of the longer type name.
+
+    include_align : bool
+        If true, this includes the 'align=True' parameter
+        inside the struct dtype construction dict when needed. Use this flag
+        if you want a proper repr string without the 'dtype()' part around it.
+
+        If false, this does not preserve the
+        'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
+        struct arrays like the regular repr does, because the 'align'
+        flag is not part of first dtype constructor parameter. This
+        mode is intended for a full 'repr', where the 'align=True' is
+        provided as the second parameter.
+    """
+    if dtype.fields is not None:
+        return _struct_str(dtype, include_align=include_align)
+    elif dtype.subdtype:
+        return _subarray_str(dtype)
+    else:
+        return _scalar_str(dtype, short=short)
+
+
+def _scalar_str(dtype, short):
+    byteorder = _byte_order_str(dtype)
+
+    if dtype.type == np.bool_:
+        if short:
+            return "'?'"
+        else:
+            return "'bool'"
+
+    elif dtype.type == np.object_:
+        # The object reference may be different sizes on different
+        # platforms, so it should never include the itemsize here.
+        return "'O'"
+
+    elif dtype.type == np.bytes_:
+        if _isunsized(dtype):
+            return "'S'"
+        else:
+            return "'S%d'" % dtype.itemsize
+
+    elif dtype.type == np.str_:
+        if _isunsized(dtype):
+            return "'%sU'" % byteorder
+        else:
+            return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
+
+    # unlike the other types, subclasses of void are preserved - but
+    # historically the repr does not actually reveal the subclass
+    elif issubclass(dtype.type, np.void):
+        if _isunsized(dtype):
+            return "'V'"
+        else:
+            return "'V%d'" % dtype.itemsize
+
+    elif dtype.type == np.datetime64:
+        return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+    elif dtype.type == np.timedelta64:
+        return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+    elif np.issubdtype(dtype, np.number):
+        # Short repr with endianness, like '' """
+    # hack to obtain the native and swapped byte order characters
+    swapped = np.dtype(int).newbyteorder('S')
+    native = swapped.newbyteorder('S')
+
+    byteorder = dtype.byteorder
+    if byteorder == '=':
+        return native.byteorder
+    if byteorder == 'S':
+        # TODO: this path can never be reached
+        return swapped.byteorder
+    elif byteorder == '|':
+        return ''
+    else:
+        return byteorder
+
+
+def _datetime_metadata_str(dtype):
+    # TODO: this duplicates the C metastr_to_unicode functionality
+    unit, count = np.datetime_data(dtype)
+    if unit == 'generic':
+        return ''
+    elif count == 1:
+        return '[{}]'.format(unit)
+    else:
+        return '[{}{}]'.format(count, unit)
+
+
+def _struct_dict_str(dtype, includealignedflag):
+    # unpack the fields dictionary into ls
+    names = dtype.names
+    fld_dtypes = []
+    offsets = []
+    titles = []
+    for name in names:
+        fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
+        fld_dtypes.append(fld_dtype)
+        offsets.append(offset)
+        titles.append(title)
+
+    # Build up a string to make the dictionary
+
+    if np.core.arrayprint._get_legacy_print_mode() <= 121:
+        colon = ":"
+        fieldsep = ","
+    else:
+        colon = ": "
+        fieldsep = ", "
+
+    # First, the names
+    ret = "{'names'%s[" % colon
+    ret += fieldsep.join(repr(name) for name in names)
+
+    # Second, the formats
+    ret += "], 'formats'%s[" % colon
+    ret += fieldsep.join(
+        _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
+
+    # Third, the offsets
+    ret += "], 'offsets'%s[" % colon
+    ret += fieldsep.join("%d" % offset for offset in offsets)
+
+    # Fourth, the titles
+    if any(title is not None for title in titles):
+        ret += "], 'titles'%s[" % colon
+        ret += fieldsep.join(repr(title) for title in titles)
+
+    # Fifth, the itemsize
+    ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
+
+    if (includealignedflag and dtype.isalignedstruct):
+        # Finally, the aligned flag
+        ret += ", 'aligned'%sTrue}" % colon
+    else:
+        ret += "}"
+
+    return ret
+
+
+def _aligned_offset(offset, alignment):
+    # round up offset:
+    return - (-offset // alignment) * alignment
+
+
+def _is_packed(dtype):
+    """
+    Checks whether the structured data type in 'dtype'
+    has a simple layout, where all the fields are in order,
+    and follow each other with no alignment padding.
+
+    When this returns true, the dtype can be reconstructed
+    from a list of the field names and dtypes with no additional
+    dtype parameters.
+
+    Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
+    """
+    align = dtype.isalignedstruct
+    max_alignment = 1
+    total_offset = 0
+    for name in dtype.names:
+        fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+        if align:
+            total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
+            max_alignment = max(max_alignment, fld_dtype.alignment)
+
+        if fld_offset != total_offset:
+            return False
+        total_offset += fld_dtype.itemsize
+
+    if align:
+        total_offset = _aligned_offset(total_offset, max_alignment)
+
+    if total_offset != dtype.itemsize:
+        return False
+    return True
+
+
+def _struct_list_str(dtype):
+    items = []
+    for name in dtype.names:
+        fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+        item = "("
+        if title is not None:
+            item += "({!r}, {!r}), ".format(title, name)
+        else:
+            item += "{!r}, ".format(name)
+        # Special case subarray handling here
+        if fld_dtype.subdtype is not None:
+            base, shape = fld_dtype.subdtype
+            item += "{}, {}".format(
+                _construction_repr(base, short=True),
+                shape
+            )
+        else:
+            item += _construction_repr(fld_dtype, short=True)
+
+        item += ")"
+        items.append(item)
+
+    return "[" + ", ".join(items) + "]"
+
+
+def _struct_str(dtype, include_align):
+    # The list str representation can't include the 'align=' flag,
+    # so if it is requested and the struct has the aligned flag set,
+    # we must use the dict str instead.
+    if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
+        sub = _struct_list_str(dtype)
+
+    else:
+        sub = _struct_dict_str(dtype, include_align)
+
+    # If the data type isn't the default, void, show it
+    if dtype.type != np.void:
+        return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
+    else:
+        return sub
+
+
+def _subarray_str(dtype):
+    base, shape = dtype.subdtype
+    return "({}, {})".format(
+        _construction_repr(base, short=True),
+        shape
+    )
+
+
+def _name_includes_bit_suffix(dtype):
+    if dtype.type == np.object_:
+        # pointer size varies by system, best to omit it
+        return False
+    elif dtype.type == np.bool_:
+        # implied
+        return False
+    elif dtype.type is None:
+        return True
+    elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
+        # unspecified
+        return False
+    else:
+        return True
+
+
+def _name_get(dtype):
+    # provides dtype.name.__get__, documented as returning a "bit name"
+
+    if dtype.isbuiltin == 2:
+        # user dtypes don't promise to do anything special
+        return dtype.type.__name__
+
+    if dtype.kind == '\x00':
+        name = type(dtype).__name__
+    elif issubclass(dtype.type, np.void):
+        # historically, void subclasses preserve their name, eg `record64`
+        name = dtype.type.__name__
+    else:
+        name = _kind_name(dtype)
+
+    # append bit counts
+    if _name_includes_bit_suffix(dtype):
+        name += "{}".format(dtype.itemsize * 8)
+
+    # append metadata to datetimes
+    if dtype.type in (np.datetime64, np.timedelta64):
+        name += _datetime_metadata_str(dtype)
+
+    return name
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py
new file mode 100644
index 00000000..6d7cbb24
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,117 @@
+"""
+Conversion from ctypes to dtype.
+
+In an ideal world, we could achieve this through the PEP3118 buffer protocol,
+something like::
+
+    def dtype_from_ctypes_type(t):
+        # needed to ensure that the shape of `t` is within memoryview.format
+        class DummyStruct(ctypes.Structure):
+            _fields_ = [('a', t)]
+
+        # empty to avoid memory allocation
+        ctype_0 = (DummyStruct * 0)()
+        mv = memoryview(ctype_0)
+
+        # convert the struct, and slice back out the field
+        return _dtype_from_pep3118(mv.format)['a']
+
+Unfortunately, this fails because:
+
+* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
+* PEP3118 cannot represent unions, but both numpy and ctypes can
+* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
+"""
+
+# We delay-import ctypes for distributions that do not include it.
+# While this module is not used unless the user passes in ctypes
+# members, it is eagerly imported from numpy/core/__init__.py.
+import numpy as np
+
+
+def _from_ctypes_array(t):
+    return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
+
+
+def _from_ctypes_structure(t):
+    for item in t._fields_:
+        if len(item) > 2:
+            raise TypeError(
+                "ctypes bitfields have no dtype equivalent")
+
+    if hasattr(t, "_pack_"):
+        import ctypes
+        formats = []
+        offsets = []
+        names = []
+        current_offset = 0
+        for fname, ftyp in t._fields_:
+            names.append(fname)
+            formats.append(dtype_from_ctypes_type(ftyp))
+            # Each type has a default offset, this is platform dependent for some types.
+            effective_pack = min(t._pack_, ctypes.alignment(ftyp))
+            current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
+            offsets.append(current_offset)
+            current_offset += ctypes.sizeof(ftyp)
+
+        return np.dtype(dict(
+            formats=formats,
+            offsets=offsets,
+            names=names,
+            itemsize=ctypes.sizeof(t)))
+    else:
+        fields = []
+        for fname, ftyp in t._fields_:
+            fields.append((fname, dtype_from_ctypes_type(ftyp)))
+
+        # by default, ctypes structs are aligned
+        return np.dtype(fields, align=True)
+
+
+def _from_ctypes_scalar(t):
+    """
+    Return the dtype type with endianness included if it's the case
+    """
+    if getattr(t, '__ctype_be__', None) is t:
+        return np.dtype('>' + t._type_)
+    elif getattr(t, '__ctype_le__', None) is t:
+        return np.dtype('<' + t._type_)
+    else:
+        return np.dtype(t._type_)
+
+
+def _from_ctypes_union(t):
+    import ctypes
+    formats = []
+    offsets = []
+    names = []
+    for fname, ftyp in t._fields_:
+        names.append(fname)
+        formats.append(dtype_from_ctypes_type(ftyp))
+        offsets.append(0)  # Union fields are offset to 0
+
+    return np.dtype(dict(
+        formats=formats,
+        offsets=offsets,
+        names=names,
+        itemsize=ctypes.sizeof(t)))
+
+
+def dtype_from_ctypes_type(t):
+    """
+    Construct a dtype object from a ctypes type
+    """
+    import _ctypes
+    if issubclass(t, _ctypes.Array):
+        return _from_ctypes_array(t)
+    elif issubclass(t, _ctypes._Pointer):
+        raise TypeError("ctypes pointers have no dtype equivalent")
+    elif issubclass(t, _ctypes.Structure):
+        return _from_ctypes_structure(t)
+    elif issubclass(t, _ctypes.Union):
+        return _from_ctypes_union(t)
+    elif isinstance(getattr(t, '_type_', None), str):
+        return _from_ctypes_scalar(t)
+    else:
+        raise NotImplementedError(
+            "Unknown ctypes type {}".format(t.__name__))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_exceptions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_exceptions.py
new file mode 100644
index 00000000..87d4213a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_exceptions.py
@@ -0,0 +1,172 @@
+"""
+Various richly-typed exceptions, that also help us deal with string formatting
+in python where it's easier.
+
+By putting the formatting in `__str__`, we also avoid paying the cost for
+users who silence the exceptions.
+"""
+from .._utils import set_module
+
+def _unpack_tuple(tup):
+    if len(tup) == 1:
+        return tup[0]
+    else:
+        return tup
+
+
+def _display_as_base(cls):
+    """
+    A decorator that makes an exception class look like its base.
+
+    We use this to hide subclasses that are implementation details - the user
+    should catch the base type, which is what the traceback will show them.
+
+    Classes decorated with this decorator are subject to removal without a
+    deprecation warning.
+    """
+    assert issubclass(cls, Exception)
+    cls.__name__ = cls.__base__.__name__
+    return cls
+
+
+class UFuncTypeError(TypeError):
+    """ Base class for all ufunc exceptions """
+    def __init__(self, ufunc):
+        self.ufunc = ufunc
+
+
+@_display_as_base
+class _UFuncNoLoopError(UFuncTypeError):
+    """ Thrown when a ufunc loop cannot be found """
+    def __init__(self, ufunc, dtypes):
+        super().__init__(ufunc)
+        self.dtypes = tuple(dtypes)
+
+    def __str__(self):
+        return (
+            "ufunc {!r} did not contain a loop with signature matching types "
+            "{!r} -> {!r}"
+        ).format(
+            self.ufunc.__name__,
+            _unpack_tuple(self.dtypes[:self.ufunc.nin]),
+            _unpack_tuple(self.dtypes[self.ufunc.nin:])
+        )
+
+
+@_display_as_base
+class _UFuncBinaryResolutionError(_UFuncNoLoopError):
+    """ Thrown when a binary resolution fails """
+    def __init__(self, ufunc, dtypes):
+        super().__init__(ufunc, dtypes)
+        assert len(self.dtypes) == 2
+
+    def __str__(self):
+        return (
+            "ufunc {!r} cannot use operands with types {!r} and {!r}"
+        ).format(
+            self.ufunc.__name__, *self.dtypes
+        )
+
+
+@_display_as_base
+class _UFuncCastingError(UFuncTypeError):
+    def __init__(self, ufunc, casting, from_, to):
+        super().__init__(ufunc)
+        self.casting = casting
+        self.from_ = from_
+        self.to = to
+
+
+@_display_as_base
+class _UFuncInputCastingError(_UFuncCastingError):
+    """ Thrown when a ufunc input cannot be casted """
+    def __init__(self, ufunc, casting, from_, to, i):
+        super().__init__(ufunc, casting, from_, to)
+        self.in_i = i
+
+    def __str__(self):
+        # only show the number if more than one input exists
+        i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
+        return (
+            "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
+            "rule {!r}"
+        ).format(
+            self.ufunc.__name__, i_str, self.from_, self.to, self.casting
+        )
+
+
+@_display_as_base
+class _UFuncOutputCastingError(_UFuncCastingError):
+    """ Thrown when a ufunc output cannot be casted """
+    def __init__(self, ufunc, casting, from_, to, i):
+        super().__init__(ufunc, casting, from_, to)
+        self.out_i = i
+
+    def __str__(self):
+        # only show the number if more than one output exists
+        i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
+        return (
+            "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
+            "rule {!r}"
+        ).format(
+            self.ufunc.__name__, i_str, self.from_, self.to, self.casting
+        )
+
+
+@_display_as_base
+class _ArrayMemoryError(MemoryError):
+    """ Thrown when an array cannot be allocated"""
+    def __init__(self, shape, dtype):
+        self.shape = shape
+        self.dtype = dtype
+
+    @property
+    def _total_size(self):
+        num_bytes = self.dtype.itemsize
+        for dim in self.shape:
+            num_bytes *= dim
+        return num_bytes
+
+    @staticmethod
+    def _size_to_string(num_bytes):
+        """ Convert a number of bytes into a binary size string """
+
+        # https://en.wikipedia.org/wiki/Binary_prefix
+        LOG2_STEP = 10
+        STEP = 1024
+        units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
+
+        unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
+        unit_val = 1 << (unit_i * LOG2_STEP)
+        n_units = num_bytes / unit_val
+        del unit_val
+
+        # ensure we pick a unit that is correct after rounding
+        if round(n_units) == STEP:
+            unit_i += 1
+            n_units /= STEP
+
+        # deal with sizes so large that we don't have units for them
+        if unit_i >= len(units):
+            new_unit_i = len(units) - 1
+            n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
+            unit_i = new_unit_i
+
+        unit_name = units[unit_i]
+        # format with a sensible number of digits
+        if unit_i == 0:
+            # no decimal point on bytes
+            return '{:.0f} {}'.format(n_units, unit_name)
+        elif round(n_units) < 1000:
+            # 3 significant figures, if none are dropped to the left of the .
+            return '{:#.3g} {}'.format(n_units, unit_name)
+        else:
+            # just give all the digits otherwise
+            return '{:#.0f} {}'.format(n_units, unit_name)
+
+    def __str__(self):
+        size_str = self._size_to_string(self._total_size)
+        return (
+            "Unable to allocate {} for an array with shape {} and data type {}"
+            .format(size_str, self.shape, self.dtype)
+        )
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_internal.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_internal.py
new file mode 100644
index 00000000..c7838588
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_internal.py
@@ -0,0 +1,935 @@
+"""
+A place for internal code
+
+Some things are more easily handled Python.
+
+"""
+import ast
+import re
+import sys
+import warnings
+
+from ..exceptions import DTypePromotionError
+from .multiarray import dtype, array, ndarray, promote_types
+try:
+    import ctypes
+except ImportError:
+    ctypes = None
+
+IS_PYPY = sys.implementation.name == 'pypy'
+
+if sys.byteorder == 'little':
+    _nbo = '<'
+else:
+    _nbo = '>'
+
+def _makenames_list(adict, align):
+    allfields = []
+
+    for fname, obj in adict.items():
+        n = len(obj)
+        if not isinstance(obj, tuple) or n not in (2, 3):
+            raise ValueError("entry not a 2- or 3- tuple")
+        if n > 2 and obj[2] == fname:
+            continue
+        num = int(obj[1])
+        if num < 0:
+            raise ValueError("invalid offset.")
+        format = dtype(obj[0], align=align)
+        if n > 2:
+            title = obj[2]
+        else:
+            title = None
+        allfields.append((fname, format, num, title))
+    # sort by offsets
+    allfields.sort(key=lambda x: x[2])
+    names = [x[0] for x in allfields]
+    formats = [x[1] for x in allfields]
+    offsets = [x[2] for x in allfields]
+    titles = [x[3] for x in allfields]
+
+    return names, formats, offsets, titles
+
+# Called in PyArray_DescrConverter function when
+#  a dictionary without "names" and "formats"
+#  fields is used as a data-type descriptor.
+def _usefields(adict, align):
+    try:
+        names = adict[-1]
+    except KeyError:
+        names = None
+    if names is None:
+        names, formats, offsets, titles = _makenames_list(adict, align)
+    else:
+        formats = []
+        offsets = []
+        titles = []
+        for name in names:
+            res = adict[name]
+            formats.append(res[0])
+            offsets.append(res[1])
+            if len(res) > 2:
+                titles.append(res[2])
+            else:
+                titles.append(None)
+
+    return dtype({"names": names,
+                  "formats": formats,
+                  "offsets": offsets,
+                  "titles": titles}, align)
+
+
+# construct an array_protocol descriptor list
+#  from the fields attribute of a descriptor
+# This calls itself recursively but should eventually hit
+#  a descriptor that has no fields and then return
+#  a simple typestring
+
+def _array_descr(descriptor):
+    fields = descriptor.fields
+    if fields is None:
+        subdtype = descriptor.subdtype
+        if subdtype is None:
+            if descriptor.metadata is None:
+                return descriptor.str
+            else:
+                new = descriptor.metadata.copy()
+                if new:
+                    return (descriptor.str, new)
+                else:
+                    return descriptor.str
+        else:
+            return (_array_descr(subdtype[0]), subdtype[1])
+
+    names = descriptor.names
+    ordered_fields = [fields[x] + (x,) for x in names]
+    result = []
+    offset = 0
+    for field in ordered_fields:
+        if field[1] > offset:
+            num = field[1] - offset
+            result.append(('', f'|V{num}'))
+            offset += num
+        elif field[1] < offset:
+            raise ValueError(
+                "dtype.descr is not defined for types with overlapping or "
+                "out-of-order fields")
+        if len(field) > 3:
+            name = (field[2], field[3])
+        else:
+            name = field[2]
+        if field[0].subdtype:
+            tup = (name, _array_descr(field[0].subdtype[0]),
+                   field[0].subdtype[1])
+        else:
+            tup = (name, _array_descr(field[0]))
+        offset += field[0].itemsize
+        result.append(tup)
+
+    if descriptor.itemsize > offset:
+        num = descriptor.itemsize - offset
+        result.append(('', f'|V{num}'))
+
+    return result
+
+# Build a new array from the information in a pickle.
+# Note that the name numpy.core._internal._reconstruct is embedded in
+# pickles of ndarrays made with NumPy before release 1.0
+# so don't remove the name here, or you'll
+# break backward compatibility.
+def _reconstruct(subtype, shape, dtype):
+    return ndarray.__new__(subtype, shape, dtype)
+
+
+# format_re was originally from numarray by J. Todd Miller
+
+format_re = re.compile(r'(?P[<>|=]?)'
+                       r'(?P *[(]?[ ,0-9]*[)]? *)'
+                       r'(?P[<>|=]?)'
+                       r'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
+sep_re = re.compile(r'\s*,\s*')
+space_re = re.compile(r'\s+$')
+
+# astr is a string (perhaps comma separated)
+
+_convorder = {'=': _nbo}
+
+def _commastring(astr):
+    startindex = 0
+    result = []
+    while startindex < len(astr):
+        mo = format_re.match(astr, pos=startindex)
+        try:
+            (order1, repeats, order2, dtype) = mo.groups()
+        except (TypeError, AttributeError):
+            raise ValueError(
+                f'format number {len(result)+1} of "{astr}" is not recognized'
+                ) from None
+        startindex = mo.end()
+        # Separator or ending padding
+        if startindex < len(astr):
+            if space_re.match(astr, pos=startindex):
+                startindex = len(astr)
+            else:
+                mo = sep_re.match(astr, pos=startindex)
+                if not mo:
+                    raise ValueError(
+                        'format number %d of "%s" is not recognized' %
+                        (len(result)+1, astr))
+                startindex = mo.end()
+
+        if order2 == '':
+            order = order1
+        elif order1 == '':
+            order = order2
+        else:
+            order1 = _convorder.get(order1, order1)
+            order2 = _convorder.get(order2, order2)
+            if (order1 != order2):
+                raise ValueError(
+                    'inconsistent byte-order specification %s and %s' %
+                    (order1, order2))
+            order = order1
+
+        if order in ('|', '=', _nbo):
+            order = ''
+        dtype = order + dtype
+        if (repeats == ''):
+            newitem = dtype
+        else:
+            newitem = (dtype, ast.literal_eval(repeats))
+        result.append(newitem)
+
+    return result
+
+class dummy_ctype:
+    def __init__(self, cls):
+        self._cls = cls
+    def __mul__(self, other):
+        return self
+    def __call__(self, *other):
+        return self._cls(other)
+    def __eq__(self, other):
+        return self._cls == other._cls
+    def __ne__(self, other):
+        return self._cls != other._cls
+
+def _getintp_ctype():
+    val = _getintp_ctype.cache
+    if val is not None:
+        return val
+    if ctypes is None:
+        import numpy as np
+        val = dummy_ctype(np.intp)
+    else:
+        char = dtype('p').char
+        if char == 'i':
+            val = ctypes.c_int
+        elif char == 'l':
+            val = ctypes.c_long
+        elif char == 'q':
+            val = ctypes.c_longlong
+        else:
+            val = ctypes.c_long
+    _getintp_ctype.cache = val
+    return val
+_getintp_ctype.cache = None
+
+# Used for .ctypes attribute of ndarray
+
+class _missing_ctypes:
+    def cast(self, num, obj):
+        return num.value
+
+    class c_void_p:
+        def __init__(self, ptr):
+            self.value = ptr
+
+
+class _ctypes:
+    def __init__(self, array, ptr=None):
+        self._arr = array
+
+        if ctypes:
+            self._ctypes = ctypes
+            self._data = self._ctypes.c_void_p(ptr)
+        else:
+            # fake a pointer-like object that holds onto the reference
+            self._ctypes = _missing_ctypes()
+            self._data = self._ctypes.c_void_p(ptr)
+            self._data._objects = array
+
+        if self._arr.ndim == 0:
+            self._zerod = True
+        else:
+            self._zerod = False
+
+    def data_as(self, obj):
+        """
+        Return the data pointer cast to a particular c-types object.
+        For example, calling ``self._as_parameter_`` is equivalent to
+        ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
+        pointer to a ctypes array of floating-point data:
+        ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
+
+        The returned pointer will keep a reference to the array.
+        """
+        # _ctypes.cast function causes a circular reference of self._data in
+        # self._data._objects. Attributes of self._data cannot be released
+        # until gc.collect is called. Make a copy of the pointer first then let
+        # it hold the array reference. This is a workaround to circumvent the
+        # CPython bug https://bugs.python.org/issue12836
+        ptr = self._ctypes.cast(self._data, obj)
+        ptr._arr = self._arr
+        return ptr
+
+    def shape_as(self, obj):
+        """
+        Return the shape tuple as an array of some other c-types
+        type. For example: ``self.shape_as(ctypes.c_short)``.
+        """
+        if self._zerod:
+            return None
+        return (obj*self._arr.ndim)(*self._arr.shape)
+
+    def strides_as(self, obj):
+        """
+        Return the strides tuple as an array of some other
+        c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
+        """
+        if self._zerod:
+            return None
+        return (obj*self._arr.ndim)(*self._arr.strides)
+
+    @property
+    def data(self):
+        """
+        A pointer to the memory area of the array as a Python integer.
+        This memory area may contain data that is not aligned, or not in correct
+        byte-order. The memory area may not even be writeable. The array
+        flags and data-type of this array should be respected when passing this
+        attribute to arbitrary C-code to avoid trouble that can include Python
+        crashing. User Beware! The value of this attribute is exactly the same
+        as ``self._array_interface_['data'][0]``.
+
+        Note that unlike ``data_as``, a reference will not be kept to the array:
+        code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
+        pointer to a deallocated array, and should be spelt
+        ``(a + b).ctypes.data_as(ctypes.c_void_p)``
+        """
+        return self._data.value
+
+    @property
+    def shape(self):
+        """
+        (c_intp*self.ndim): A ctypes array of length self.ndim where
+        the basetype is the C-integer corresponding to ``dtype('p')`` on this
+        platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
+        `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
+        the platform. The ctypes array contains the shape of
+        the underlying array.
+        """
+        return self.shape_as(_getintp_ctype())
+
+    @property
+    def strides(self):
+        """
+        (c_intp*self.ndim): A ctypes array of length self.ndim where
+        the basetype is the same as for the shape attribute. This ctypes array
+        contains the strides information from the underlying array. This strides
+        information is important for showing how many bytes must be jumped to
+        get to the next element in the array.
+        """
+        return self.strides_as(_getintp_ctype())
+
+    @property
+    def _as_parameter_(self):
+        """
+        Overrides the ctypes semi-magic method
+
+        Enables `c_func(some_array.ctypes)`
+        """
+        return self.data_as(ctypes.c_void_p)
+
+    # Numpy 1.21.0, 2021-05-18
+
+    def get_data(self):
+        """Deprecated getter for the `_ctypes.data` property.
+
+        .. deprecated:: 1.21
+        """
+        warnings.warn('"get_data" is deprecated. Use "data" instead',
+                      DeprecationWarning, stacklevel=2)
+        return self.data
+
+    def get_shape(self):
+        """Deprecated getter for the `_ctypes.shape` property.
+
+        .. deprecated:: 1.21
+        """
+        warnings.warn('"get_shape" is deprecated. Use "shape" instead',
+                      DeprecationWarning, stacklevel=2)
+        return self.shape
+
+    def get_strides(self):
+        """Deprecated getter for the `_ctypes.strides` property.
+
+        .. deprecated:: 1.21
+        """
+        warnings.warn('"get_strides" is deprecated. Use "strides" instead',
+                      DeprecationWarning, stacklevel=2)
+        return self.strides
+
+    def get_as_parameter(self):
+        """Deprecated getter for the `_ctypes._as_parameter_` property.
+
+        .. deprecated:: 1.21
+        """
+        warnings.warn(
+            '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
+            DeprecationWarning, stacklevel=2,
+        )
+        return self._as_parameter_
+
+
+def _newnames(datatype, order):
+    """
+    Given a datatype and an order object, return a new names tuple, with the
+    order indicated
+    """
+    oldnames = datatype.names
+    nameslist = list(oldnames)
+    if isinstance(order, str):
+        order = [order]
+    seen = set()
+    if isinstance(order, (list, tuple)):
+        for name in order:
+            try:
+                nameslist.remove(name)
+            except ValueError:
+                if name in seen:
+                    raise ValueError(f"duplicate field name: {name}") from None
+                else:
+                    raise ValueError(f"unknown field name: {name}") from None
+            seen.add(name)
+        return tuple(list(order) + nameslist)
+    raise ValueError(f"unsupported order value: {order}")
+
+def _copy_fields(ary):
+    """Return copy of structured array with padding between fields removed.
+
+    Parameters
+    ----------
+    ary : ndarray
+       Structured array from which to remove padding bytes
+
+    Returns
+    -------
+    ary_copy : ndarray
+       Copy of ary with padding bytes removed
+    """
+    dt = ary.dtype
+    copy_dtype = {'names': dt.names,
+                  'formats': [dt.fields[name][0] for name in dt.names]}
+    return array(ary, dtype=copy_dtype, copy=True)
+
+def _promote_fields(dt1, dt2):
+    """ Perform type promotion for two structured dtypes.
+
+    Parameters
+    ----------
+    dt1 : structured dtype
+        First dtype.
+    dt2 : structured dtype
+        Second dtype.
+
+    Returns
+    -------
+    out : dtype
+        The promoted dtype
+
+    Notes
+    -----
+    If one of the inputs is aligned, the result will be.  The titles of
+    both descriptors must match (point to the same field).
+    """
+    # Both must be structured and have the same names in the same order
+    if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
+        raise DTypePromotionError(
+                f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
+
+    # if both are identical, we can (maybe!) just return the same dtype.
+    identical = dt1 is dt2
+    new_fields = []
+    for name in dt1.names:
+        field1 = dt1.fields[name]
+        field2 = dt2.fields[name]
+        new_descr = promote_types(field1[0], field2[0])
+        identical = identical and new_descr is field1[0]
+
+        # Check that the titles match (if given):
+        if field1[2:] != field2[2:]:
+            raise DTypePromotionError(
+                    f"field titles of field '{name}' mismatch")
+        if len(field1) == 2:
+            new_fields.append((name, new_descr))
+        else:
+            new_fields.append(((field1[2], name), new_descr))
+
+    res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
+
+    # Might as well preserve identity (and metadata) if the dtype is identical
+    # and the itemsize, offsets are also unmodified.  This could probably be
+    # sped up, but also probably just be removed entirely.
+    if identical and res.itemsize == dt1.itemsize:
+        for name in dt1.names:
+            if dt1.fields[name][1] != res.fields[name][1]:
+                return res  # the dtype changed.
+        return dt1
+
+    return res
+
+
+def _getfield_is_safe(oldtype, newtype, offset):
+    """ Checks safety of getfield for object arrays.
+
+    As in _view_is_safe, we need to check that memory containing objects is not
+    reinterpreted as a non-object datatype and vice versa.
+
+    Parameters
+    ----------
+    oldtype : data-type
+        Data type of the original ndarray.
+    newtype : data-type
+        Data type of the field being accessed by ndarray.getfield
+    offset : int
+        Offset of the field being accessed by ndarray.getfield
+
+    Raises
+    ------
+    TypeError
+        If the field access is invalid
+
+    """
+    if newtype.hasobject or oldtype.hasobject:
+        if offset == 0 and newtype == oldtype:
+            return
+        if oldtype.names is not None:
+            for name in oldtype.names:
+                if (oldtype.fields[name][1] == offset and
+                        oldtype.fields[name][0] == newtype):
+                    return
+        raise TypeError("Cannot get/set field of an object array")
+    return
+
+def _view_is_safe(oldtype, newtype):
+    """ Checks safety of a view involving object arrays, for example when
+    doing::
+
+        np.zeros(10, dtype=oldtype).view(newtype)
+
+    Parameters
+    ----------
+    oldtype : data-type
+        Data type of original ndarray
+    newtype : data-type
+        Data type of the view
+
+    Raises
+    ------
+    TypeError
+        If the new type is incompatible with the old type.
+
+    """
+
+    # if the types are equivalent, there is no problem.
+    # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
+    if oldtype == newtype:
+        return
+
+    if newtype.hasobject or oldtype.hasobject:
+        raise TypeError("Cannot change data-type for object array.")
+    return
+
+# Given a string containing a PEP 3118 format specifier,
+# construct a NumPy dtype
+
+_pep3118_native_map = {
+    '?': '?',
+    'c': 'S1',
+    'b': 'b',
+    'B': 'B',
+    'h': 'h',
+    'H': 'H',
+    'i': 'i',
+    'I': 'I',
+    'l': 'l',
+    'L': 'L',
+    'q': 'q',
+    'Q': 'Q',
+    'e': 'e',
+    'f': 'f',
+    'd': 'd',
+    'g': 'g',
+    'Zf': 'F',
+    'Zd': 'D',
+    'Zg': 'G',
+    's': 'S',
+    'w': 'U',
+    'O': 'O',
+    'x': 'V',  # padding
+}
+_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
+
+_pep3118_standard_map = {
+    '?': '?',
+    'c': 'S1',
+    'b': 'b',
+    'B': 'B',
+    'h': 'i2',
+    'H': 'u2',
+    'i': 'i4',
+    'I': 'u4',
+    'l': 'i4',
+    'L': 'u4',
+    'q': 'i8',
+    'Q': 'u8',
+    'e': 'f2',
+    'f': 'f',
+    'd': 'd',
+    'Zf': 'F',
+    'Zd': 'D',
+    's': 'S',
+    'w': 'U',
+    'O': 'O',
+    'x': 'V',  # padding
+}
+_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
+
+_pep3118_unsupported_map = {
+    'u': 'UCS-2 strings',
+    '&': 'pointers',
+    't': 'bitfields',
+    'X': 'function pointers',
+}
+
+class _Stream:
+    def __init__(self, s):
+        self.s = s
+        self.byteorder = '@'
+
+    def advance(self, n):
+        res = self.s[:n]
+        self.s = self.s[n:]
+        return res
+
+    def consume(self, c):
+        if self.s[:len(c)] == c:
+            self.advance(len(c))
+            return True
+        return False
+
+    def consume_until(self, c):
+        if callable(c):
+            i = 0
+            while i < len(self.s) and not c(self.s[i]):
+                i = i + 1
+            return self.advance(i)
+        else:
+            i = self.s.index(c)
+            res = self.advance(i)
+            self.advance(len(c))
+            return res
+
+    @property
+    def next(self):
+        return self.s[0]
+
+    def __bool__(self):
+        return bool(self.s)
+
+
+def _dtype_from_pep3118(spec):
+    stream = _Stream(spec)
+    dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
+    return dtype
+
+def __dtype_from_pep3118(stream, is_subdtype):
+    field_spec = dict(
+        names=[],
+        formats=[],
+        offsets=[],
+        itemsize=0
+    )
+    offset = 0
+    common_alignment = 1
+    is_padding = False
+
+    # Parse spec
+    while stream:
+        value = None
+
+        # End of structure, bail out to upper level
+        if stream.consume('}'):
+            break
+
+        # Sub-arrays (1)
+        shape = None
+        if stream.consume('('):
+            shape = stream.consume_until(')')
+            shape = tuple(map(int, shape.split(',')))
+
+        # Byte order
+        if stream.next in ('@', '=', '<', '>', '^', '!'):
+            byteorder = stream.advance(1)
+            if byteorder == '!':
+                byteorder = '>'
+            stream.byteorder = byteorder
+
+        # Byte order characters also control native vs. standard type sizes
+        if stream.byteorder in ('@', '^'):
+            type_map = _pep3118_native_map
+            type_map_chars = _pep3118_native_typechars
+        else:
+            type_map = _pep3118_standard_map
+            type_map_chars = _pep3118_standard_typechars
+
+        # Item sizes
+        itemsize_str = stream.consume_until(lambda c: not c.isdigit())
+        if itemsize_str:
+            itemsize = int(itemsize_str)
+        else:
+            itemsize = 1
+
+        # Data types
+        is_padding = False
+
+        if stream.consume('T{'):
+            value, align = __dtype_from_pep3118(
+                stream, is_subdtype=True)
+        elif stream.next in type_map_chars:
+            if stream.next == 'Z':
+                typechar = stream.advance(2)
+            else:
+                typechar = stream.advance(1)
+
+            is_padding = (typechar == 'x')
+            dtypechar = type_map[typechar]
+            if dtypechar in 'USV':
+                dtypechar += '%d' % itemsize
+                itemsize = 1
+            numpy_byteorder = {'@': '=', '^': '='}.get(
+                stream.byteorder, stream.byteorder)
+            value = dtype(numpy_byteorder + dtypechar)
+            align = value.alignment
+        elif stream.next in _pep3118_unsupported_map:
+            desc = _pep3118_unsupported_map[stream.next]
+            raise NotImplementedError(
+                "Unrepresentable PEP 3118 data type {!r} ({})"
+                .format(stream.next, desc))
+        else:
+            raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
+
+        #
+        # Native alignment may require padding
+        #
+        # Here we assume that the presence of a '@' character implicitly implies
+        # that the start of the array is *already* aligned.
+        #
+        extra_offset = 0
+        if stream.byteorder == '@':
+            start_padding = (-offset) % align
+            intra_padding = (-value.itemsize) % align
+
+            offset += start_padding
+
+            if intra_padding != 0:
+                if itemsize > 1 or (shape is not None and _prod(shape) > 1):
+                    # Inject internal padding to the end of the sub-item
+                    value = _add_trailing_padding(value, intra_padding)
+                else:
+                    # We can postpone the injection of internal padding,
+                    # as the item appears at most once
+                    extra_offset += intra_padding
+
+            # Update common alignment
+            common_alignment = _lcm(align, common_alignment)
+
+        # Convert itemsize to sub-array
+        if itemsize != 1:
+            value = dtype((value, (itemsize,)))
+
+        # Sub-arrays (2)
+        if shape is not None:
+            value = dtype((value, shape))
+
+        # Field name
+        if stream.consume(':'):
+            name = stream.consume_until(':')
+        else:
+            name = None
+
+        if not (is_padding and name is None):
+            if name is not None and name in field_spec['names']:
+                raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
+            field_spec['names'].append(name)
+            field_spec['formats'].append(value)
+            field_spec['offsets'].append(offset)
+
+        offset += value.itemsize
+        offset += extra_offset
+
+        field_spec['itemsize'] = offset
+
+    # extra final padding for aligned types
+    if stream.byteorder == '@':
+        field_spec['itemsize'] += (-offset) % common_alignment
+
+    # Check if this was a simple 1-item type, and unwrap it
+    if (field_spec['names'] == [None]
+            and field_spec['offsets'][0] == 0
+            and field_spec['itemsize'] == field_spec['formats'][0].itemsize
+            and not is_subdtype):
+        ret = field_spec['formats'][0]
+    else:
+        _fix_names(field_spec)
+        ret = dtype(field_spec)
+
+    # Finished
+    return ret, common_alignment
+
+def _fix_names(field_spec):
+    """ Replace names which are None with the next unused f%d name """
+    names = field_spec['names']
+    for i, name in enumerate(names):
+        if name is not None:
+            continue
+
+        j = 0
+        while True:
+            name = f'f{j}'
+            if name not in names:
+                break
+            j = j + 1
+        names[i] = name
+
+def _add_trailing_padding(value, padding):
+    """Inject the specified number of padding bytes at the end of a dtype"""
+    if value.fields is None:
+        field_spec = dict(
+            names=['f0'],
+            formats=[value],
+            offsets=[0],
+            itemsize=value.itemsize
+        )
+    else:
+        fields = value.fields
+        names = value.names
+        field_spec = dict(
+            names=names,
+            formats=[fields[name][0] for name in names],
+            offsets=[fields[name][1] for name in names],
+            itemsize=value.itemsize
+        )
+
+    field_spec['itemsize'] += padding
+    return dtype(field_spec)
+
+def _prod(a):
+    p = 1
+    for x in a:
+        p *= x
+    return p
+
+def _gcd(a, b):
+    """Calculate the greatest common divisor of a and b"""
+    while b:
+        a, b = b, a % b
+    return a
+
+def _lcm(a, b):
+    return a // _gcd(a, b) * b
+
+def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
+    """ Format the error message for when __array_ufunc__ gives up. """
+    args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
+                            ['{}={!r}'.format(k, v)
+                             for k, v in kwargs.items()])
+    args = inputs + kwargs.get('out', ())
+    types_string = ', '.join(repr(type(arg).__name__) for arg in args)
+    return ('operand type(s) all returned NotImplemented from '
+            '__array_ufunc__({!r}, {!r}, {}): {}'
+            .format(ufunc, method, args_string, types_string))
+
+
+def array_function_errmsg_formatter(public_api, types):
+    """ Format the error message for when __array_ufunc__ gives up. """
+    func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
+    return ("no implementation found for '{}' on types that implement "
+            '__array_function__: {}'.format(func_name, list(types)))
+
+
+def _ufunc_doc_signature_formatter(ufunc):
+    """
+    Builds a signature string which resembles PEP 457
+
+    This is used to construct the first line of the docstring
+    """
+
+    # input arguments are simple
+    if ufunc.nin == 1:
+        in_args = 'x'
+    else:
+        in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
+
+    # output arguments are both keyword or positional
+    if ufunc.nout == 0:
+        out_args = ', /, out=()'
+    elif ufunc.nout == 1:
+        out_args = ', /, out=None'
+    else:
+        out_args = '[, {positional}], / [, out={default}]'.format(
+            positional=', '.join(
+                'out{}'.format(i+1) for i in range(ufunc.nout)),
+            default=repr((None,)*ufunc.nout)
+        )
+
+    # keyword only args depend on whether this is a gufunc
+    kwargs = (
+        ", casting='same_kind'"
+        ", order='K'"
+        ", dtype=None"
+        ", subok=True"
+    )
+
+    # NOTE: gufuncs may or may not support the `axis` parameter
+    if ufunc.signature is None:
+        kwargs = f", where=True{kwargs}[, signature, extobj]"
+    else:
+        kwargs += "[, signature, extobj, axes, axis]"
+
+    # join all the parts together
+    return '{name}({in_args}{out_args}, *{kwargs})'.format(
+        name=ufunc.__name__,
+        in_args=in_args,
+        out_args=out_args,
+        kwargs=kwargs
+    )
+
+
+def npy_ctypes_check(cls):
+    # determine if a class comes from ctypes, in order to work around
+    # a bug in the buffer protocol for those objects, bpo-10746
+    try:
+        # ctypes class are new-style, so have an __mro__. This probably fails
+        # for ctypes classes with multiple inheritance.
+        if IS_PYPY:
+            # (..., _ctypes.basics._CData, Bufferable, object)
+            ctype_base = cls.__mro__[-3]
+        else:
+            # # (..., _ctypes._CData, object)
+            ctype_base = cls.__mro__[-2]
+        # right now, they're part of the _ctypes module
+        return '_ctypes' in ctype_base.__module__
+    except Exception:
+        return False
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_internal.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_internal.pyi
new file mode 100644
index 00000000..8a25ef2c
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_internal.pyi
@@ -0,0 +1,30 @@
+from typing import Any, TypeVar, overload, Generic
+import ctypes as ct
+
+from numpy import ndarray
+from numpy.ctypeslib import c_intp
+
+_CastT = TypeVar("_CastT", bound=ct._CanCastTo)  # Copied from `ctypes.cast`
+_CT = TypeVar("_CT", bound=ct._CData)
+_PT = TypeVar("_PT", bound=None | int)
+
+# TODO: Let the likes of `shape_as` and `strides_as` return `None`
+# for 0D arrays once we've got shape-support
+
+class _ctypes(Generic[_PT]):
+    @overload
+    def __new__(cls, array: ndarray[Any, Any], ptr: None = ...) -> _ctypes[None]: ...
+    @overload
+    def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]: ...
+    @property
+    def data(self) -> _PT: ...
+    @property
+    def shape(self) -> ct.Array[c_intp]: ...
+    @property
+    def strides(self) -> ct.Array[c_intp]: ...
+    @property
+    def _as_parameter_(self) -> ct.c_void_p: ...
+
+    def data_as(self, obj: type[_CastT]) -> _CastT: ...
+    def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
+    def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_machar.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_machar.py
new file mode 100644
index 00000000..59d71014
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_machar.py
@@ -0,0 +1,356 @@
+"""
+Machine arithmetic - determine the parameters of the
+floating-point arithmetic system
+
+Author: Pearu Peterson, September 2003
+
+"""
+__all__ = ['MachAr']
+
+from .fromnumeric import any
+from ._ufunc_config import errstate
+from .._utils import set_module
+
+# Need to speed this up...especially for longfloat
+
+# Deprecated 2021-10-20, NumPy 1.22
+class MachAr:
+    """
+    Diagnosing machine parameters.
+
+    Attributes
+    ----------
+    ibeta : int
+        Radix in which numbers are represented.
+    it : int
+        Number of base-`ibeta` digits in the floating point mantissa M.
+    machep : int
+        Exponent of the smallest (most negative) power of `ibeta` that,
+        added to 1.0, gives something different from 1.0
+    eps : float
+        Floating-point number ``beta**machep`` (floating point precision)
+    negep : int
+        Exponent of the smallest power of `ibeta` that, subtracted
+        from 1.0, gives something different from 1.0.
+    epsneg : float
+        Floating-point number ``beta**negep``.
+    iexp : int
+        Number of bits in the exponent (including its sign and bias).
+    minexp : int
+        Smallest (most negative) power of `ibeta` consistent with there
+        being no leading zeros in the mantissa.
+    xmin : float
+        Floating-point number ``beta**minexp`` (the smallest [in
+        magnitude] positive floating point number with full precision).
+    maxexp : int
+        Smallest (positive) power of `ibeta` that causes overflow.
+    xmax : float
+        ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
+        usable floating value).
+    irnd : int
+        In ``range(6)``, information on what kind of rounding is done
+        in addition, and on how underflow is handled.
+    ngrd : int
+        Number of 'guard digits' used when truncating the product
+        of two mantissas to fit the representation.
+    epsilon : float
+        Same as `eps`.
+    tiny : float
+        An alias for `smallest_normal`, kept for backwards compatibility.
+    huge : float
+        Same as `xmax`.
+    precision : float
+        ``- int(-log10(eps))``
+    resolution : float
+        ``- 10**(-precision)``
+    smallest_normal : float
+        The smallest positive floating point number with 1 as leading bit in
+        the mantissa following IEEE-754. Same as `xmin`.
+    smallest_subnormal : float
+        The smallest positive floating point number with 0 as leading bit in
+        the mantissa following IEEE-754.
+
+    Parameters
+    ----------
+    float_conv : function, optional
+        Function that converts an integer or integer array to a float
+        or float array. Default is `float`.
+    int_conv : function, optional
+        Function that converts a float or float array to an integer or
+        integer array. Default is `int`.
+    float_to_float : function, optional
+        Function that converts a float array to float. Default is `float`.
+        Note that this does not seem to do anything useful in the current
+        implementation.
+    float_to_str : function, optional
+        Function that converts a single float to a string. Default is
+        ``lambda v:'%24.16e' %v``.
+    title : str, optional
+        Title that is printed in the string representation of `MachAr`.
+
+    See Also
+    --------
+    finfo : Machine limits for floating point types.
+    iinfo : Machine limits for integer types.
+
+    References
+    ----------
+    .. [1] Press, Teukolsky, Vetterling and Flannery,
+           "Numerical Recipes in C++," 2nd ed,
+           Cambridge University Press, 2002, p. 31.
+
+    """
+
+    def __init__(self, float_conv=float,int_conv=int,
+                 float_to_float=float,
+                 float_to_str=lambda v:'%24.16e' % v,
+                 title='Python floating point number'):
+        """
+
+        float_conv - convert integer to float (array)
+        int_conv   - convert float (array) to integer
+        float_to_float - convert float array to float
+        float_to_str - convert array float to str
+        title        - description of used floating point numbers
+
+        """
+        # We ignore all errors here because we are purposely triggering
+        # underflow to detect the properties of the runninng arch.
+        with errstate(under='ignore'):
+            self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
+
+    def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
+        max_iterN = 10000
+        msg = "Did not converge after %d tries with %s"
+        one = float_conv(1)
+        two = one + one
+        zero = one - one
+
+        # Do we really need to do this?  Aren't they 2 and 2.0?
+        # Determine ibeta and beta
+        a = one
+        for _ in range(max_iterN):
+            a = a + a
+            temp = a + one
+            temp1 = temp - a
+            if any(temp1 - one != zero):
+                break
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+        b = one
+        for _ in range(max_iterN):
+            b = b + b
+            temp = a + b
+            itemp = int_conv(temp-a)
+            if any(itemp != 0):
+                break
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+        ibeta = itemp
+        beta = float_conv(ibeta)
+
+        # Determine it and irnd
+        it = -1
+        b = one
+        for _ in range(max_iterN):
+            it = it + 1
+            b = b * beta
+            temp = b + one
+            temp1 = temp - b
+            if any(temp1 - one != zero):
+                break
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+
+        betah = beta / two
+        a = one
+        for _ in range(max_iterN):
+            a = a + a
+            temp = a + one
+            temp1 = temp - a
+            if any(temp1 - one != zero):
+                break
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+        temp = a + betah
+        irnd = 0
+        if any(temp-a != zero):
+            irnd = 1
+        tempa = a + beta
+        temp = tempa + betah
+        if irnd == 0 and any(temp-tempa != zero):
+            irnd = 2
+
+        # Determine negep and epsneg
+        negep = it + 3
+        betain = one / beta
+        a = one
+        for i in range(negep):
+            a = a * betain
+        b = a
+        for _ in range(max_iterN):
+            temp = one - a
+            if any(temp-one != zero):
+                break
+            a = a * beta
+            negep = negep - 1
+            # Prevent infinite loop on PPC with gcc 4.0:
+            if negep < 0:
+                raise RuntimeError("could not determine machine tolerance "
+                                   "for 'negep', locals() -> %s" % (locals()))
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+        negep = -negep
+        epsneg = a
+
+        # Determine machep and eps
+        machep = - it - 3
+        a = b
+
+        for _ in range(max_iterN):
+            temp = one + a
+            if any(temp-one != zero):
+                break
+            a = a * beta
+            machep = machep + 1
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+        eps = a
+
+        # Determine ngrd
+        ngrd = 0
+        temp = one + eps
+        if irnd == 0 and any(temp*one - one != zero):
+            ngrd = 1
+
+        # Determine iexp
+        i = 0
+        k = 1
+        z = betain
+        t = one + eps
+        nxres = 0
+        for _ in range(max_iterN):
+            y = z
+            z = y*y
+            a = z*one  # Check here for underflow
+            temp = z*t
+            if any(a+a == zero) or any(abs(z) >= y):
+                break
+            temp1 = temp * betain
+            if any(temp1*beta == z):
+                break
+            i = i + 1
+            k = k + k
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+        if ibeta != 10:
+            iexp = i + 1
+            mx = k + k
+        else:
+            iexp = 2
+            iz = ibeta
+            while k >= iz:
+                iz = iz * ibeta
+                iexp = iexp + 1
+            mx = iz + iz - 1
+
+        # Determine minexp and xmin
+        for _ in range(max_iterN):
+            xmin = y
+            y = y * betain
+            a = y * one
+            temp = y * t
+            if any((a + a) != zero) and any(abs(y) < xmin):
+                k = k + 1
+                temp1 = temp * betain
+                if any(temp1*beta == y) and any(temp != y):
+                    nxres = 3
+                    xmin = y
+                    break
+            else:
+                break
+        else:
+            raise RuntimeError(msg % (_, one.dtype))
+        minexp = -k
+
+        # Determine maxexp, xmax
+        if mx <= k + k - 3 and ibeta != 10:
+            mx = mx + mx
+            iexp = iexp + 1
+        maxexp = mx + minexp
+        irnd = irnd + nxres
+        if irnd >= 2:
+            maxexp = maxexp - 2
+        i = maxexp + minexp
+        if ibeta == 2 and not i:
+            maxexp = maxexp - 1
+        if i > 20:
+            maxexp = maxexp - 1
+        if any(a != y):
+            maxexp = maxexp - 2
+        xmax = one - epsneg
+        if any(xmax*one != xmax):
+            xmax = one - beta*epsneg
+        xmax = xmax / (xmin*beta*beta*beta)
+        i = maxexp + minexp + 3
+        for j in range(i):
+            if ibeta == 2:
+                xmax = xmax + xmax
+            else:
+                xmax = xmax * beta
+
+        smallest_subnormal = abs(xmin / beta ** (it))
+
+        self.ibeta = ibeta
+        self.it = it
+        self.negep = negep
+        self.epsneg = float_to_float(epsneg)
+        self._str_epsneg = float_to_str(epsneg)
+        self.machep = machep
+        self.eps = float_to_float(eps)
+        self._str_eps = float_to_str(eps)
+        self.ngrd = ngrd
+        self.iexp = iexp
+        self.minexp = minexp
+        self.xmin = float_to_float(xmin)
+        self._str_xmin = float_to_str(xmin)
+        self.maxexp = maxexp
+        self.xmax = float_to_float(xmax)
+        self._str_xmax = float_to_str(xmax)
+        self.irnd = irnd
+
+        self.title = title
+        # Commonly used parameters
+        self.epsilon = self.eps
+        self.tiny = self.xmin
+        self.huge = self.xmax
+        self.smallest_normal = self.xmin
+        self._str_smallest_normal = float_to_str(self.xmin)
+        self.smallest_subnormal = float_to_float(smallest_subnormal)
+        self._str_smallest_subnormal = float_to_str(smallest_subnormal)
+
+        import math
+        self.precision = int(-math.log10(float_to_float(self.eps)))
+        ten = two + two + two + two + two
+        resolution = ten ** (-self.precision)
+        self.resolution = float_to_float(resolution)
+        self._str_resolution = float_to_str(resolution)
+
+    def __str__(self):
+        fmt = (
+           'Machine parameters for %(title)s\n'
+           '---------------------------------------------------------------------\n'
+           'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
+           'machep=%(machep)s     eps=%(_str_eps)s (beta**machep == epsilon)\n'
+           'negep =%(negep)s  epsneg=%(_str_epsneg)s (beta**epsneg)\n'
+           'minexp=%(minexp)s   xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
+           'maxexp=%(maxexp)s    xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
+           'smallest_normal=%(smallest_normal)s    '
+           'smallest_subnormal=%(smallest_subnormal)s\n'
+           '---------------------------------------------------------------------\n'
+           )
+        return fmt % self.__dict__
+
+
+if __name__ == '__main__':
+    print(MachAr())
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_methods.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_methods.py
new file mode 100644
index 00000000..0fc070b3
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_methods.py
@@ -0,0 +1,234 @@
+"""
+Array methods which are called by both the C-code for the method
+and the Python code for the NumPy-namespace function
+
+"""
+import warnings
+from contextlib import nullcontext
+
+from numpy.core import multiarray as mu
+from numpy.core import umath as um
+from numpy.core.multiarray import asanyarray
+from numpy.core import numerictypes as nt
+from numpy.core import _exceptions
+from numpy.core._ufunc_config import _no_nep50_warning
+from numpy._globals import _NoValue
+from numpy.compat import pickle, os_fspath
+
+# save those O(100) nanoseconds!
+umr_maximum = um.maximum.reduce
+umr_minimum = um.minimum.reduce
+umr_sum = um.add.reduce
+umr_prod = um.multiply.reduce
+umr_any = um.logical_or.reduce
+umr_all = um.logical_and.reduce
+
+# Complex types to -> (2,)float view for fast-path computation in _var()
+_complex_to_float = {
+    nt.dtype(nt.csingle) : nt.dtype(nt.single),
+    nt.dtype(nt.cdouble) : nt.dtype(nt.double),
+}
+# Special case for windows: ensure double takes precedence
+if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
+    _complex_to_float.update({
+        nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
+    })
+
+# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
+# small reductions
+def _amax(a, axis=None, out=None, keepdims=False,
+          initial=_NoValue, where=True):
+    return umr_maximum(a, axis, None, out, keepdims, initial, where)
+
+def _amin(a, axis=None, out=None, keepdims=False,
+          initial=_NoValue, where=True):
+    return umr_minimum(a, axis, None, out, keepdims, initial, where)
+
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
+         initial=_NoValue, where=True):
+    return umr_sum(a, axis, dtype, out, keepdims, initial, where)
+
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
+          initial=_NoValue, where=True):
+    return umr_prod(a, axis, dtype, out, keepdims, initial, where)
+
+def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+    # Parsing keyword arguments is currently fairly slow, so avoid it for now
+    if where is True:
+        return umr_any(a, axis, dtype, out, keepdims)
+    return umr_any(a, axis, dtype, out, keepdims, where=where)
+
+def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+    # Parsing keyword arguments is currently fairly slow, so avoid it for now
+    if where is True:
+        return umr_all(a, axis, dtype, out, keepdims)
+    return umr_all(a, axis, dtype, out, keepdims, where=where)
+
+def _count_reduce_items(arr, axis, keepdims=False, where=True):
+    # fast-path for the default case
+    if where is True:
+        # no boolean mask given, calculate items according to axis
+        if axis is None:
+            axis = tuple(range(arr.ndim))
+        elif not isinstance(axis, tuple):
+            axis = (axis,)
+        items = 1
+        for ax in axis:
+            items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
+        items = nt.intp(items)
+    else:
+        # TODO: Optimize case when `where` is broadcast along a non-reduction
+        # axis and full sum is more excessive than needed.
+
+        # guarded to protect circular imports
+        from numpy.lib.stride_tricks import broadcast_to
+        # count True values in (potentially broadcasted) boolean mask
+        items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
+                        keepdims)
+    return items
+
+def _clip(a, min=None, max=None, out=None, **kwargs):
+    if min is None and max is None:
+        raise ValueError("One of max or min must be given")
+
+    if min is None:
+        return um.minimum(a, max, out=out, **kwargs)
+    elif max is None:
+        return um.maximum(a, min, out=out, **kwargs)
+    else:
+        return um.clip(a, min, max, out=out, **kwargs)
+
+def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+    arr = asanyarray(a)
+
+    is_float16_result = False
+
+    rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
+    if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
+        warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
+
+    # Cast bool, unsigned int, and int to float64 by default
+    if dtype is None:
+        if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+            dtype = mu.dtype('f8')
+        elif issubclass(arr.dtype.type, nt.float16):
+            dtype = mu.dtype('f4')
+            is_float16_result = True
+
+    ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
+    if isinstance(ret, mu.ndarray):
+        with _no_nep50_warning():
+            ret = um.true_divide(
+                    ret, rcount, out=ret, casting='unsafe', subok=False)
+        if is_float16_result and out is None:
+            ret = arr.dtype.type(ret)
+    elif hasattr(ret, 'dtype'):
+        if is_float16_result:
+            ret = arr.dtype.type(ret / rcount)
+        else:
+            ret = ret.dtype.type(ret / rcount)
+    else:
+        ret = ret / rcount
+
+    return ret
+
+def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
+         where=True):
+    arr = asanyarray(a)
+
+    rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
+    # Make this warning show up on top.
+    if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
+        warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
+                      stacklevel=2)
+
+    # Cast bool, unsigned int, and int to float64 by default
+    if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+        dtype = mu.dtype('f8')
+
+    # Compute the mean.
+    # Note that if dtype is not of inexact type then arraymean will
+    # not be either.
+    arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
+    # The shape of rcount has to match arrmean to not change the shape of out
+    # in broadcasting. Otherwise, it cannot be stored back to arrmean.
+    if rcount.ndim == 0:
+        # fast-path for default case when where is True
+        div = rcount
+    else:
+        # matching rcount to arrmean when where is specified as array
+        div = rcount.reshape(arrmean.shape)
+    if isinstance(arrmean, mu.ndarray):
+        with _no_nep50_warning():
+            arrmean = um.true_divide(arrmean, div, out=arrmean,
+                                     casting='unsafe', subok=False)
+    elif hasattr(arrmean, "dtype"):
+        arrmean = arrmean.dtype.type(arrmean / rcount)
+    else:
+        arrmean = arrmean / rcount
+
+    # Compute sum of squared deviations from mean
+    # Note that x may not be inexact and that we need it to be an array,
+    # not a scalar.
+    x = asanyarray(arr - arrmean)
+
+    if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
+        x = um.multiply(x, x, out=x)
+    # Fast-paths for built-in complex types
+    elif x.dtype in _complex_to_float:
+        xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
+        um.multiply(xv, xv, out=xv)
+        x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
+    # Most general case; includes handling object arrays containing imaginary
+    # numbers and complex types with non-native byteorder
+    else:
+        x = um.multiply(x, um.conjugate(x), out=x).real
+
+    ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
+
+    # Compute degrees of freedom and make sure it is not negative.
+    rcount = um.maximum(rcount - ddof, 0)
+
+    # divide by degrees of freedom
+    if isinstance(ret, mu.ndarray):
+        with _no_nep50_warning():
+            ret = um.true_divide(
+                    ret, rcount, out=ret, casting='unsafe', subok=False)
+    elif hasattr(ret, 'dtype'):
+        ret = ret.dtype.type(ret / rcount)
+    else:
+        ret = ret / rcount
+
+    return ret
+
+def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
+         where=True):
+    ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+               keepdims=keepdims, where=where)
+
+    if isinstance(ret, mu.ndarray):
+        ret = um.sqrt(ret, out=ret)
+    elif hasattr(ret, 'dtype'):
+        ret = ret.dtype.type(um.sqrt(ret))
+    else:
+        ret = um.sqrt(ret)
+
+    return ret
+
+def _ptp(a, axis=None, out=None, keepdims=False):
+    return um.subtract(
+        umr_maximum(a, axis, None, out, keepdims),
+        umr_minimum(a, axis, None, None, keepdims),
+        out
+    )
+
+def _dump(self, file, protocol=2):
+    if hasattr(file, 'write'):
+        ctx = nullcontext(file)
+    else:
+        ctx = open(os_fspath(file), "wb")
+    with ctx as f:
+        pickle.dump(self, f, protocol=protocol)
+
+def _dumps(self, protocol=2):
+    return pickle.dumps(self, protocol=protocol)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_multiarray_tests.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_multiarray_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..0d549ca8
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_multiarray_tests.cpython-39-darwin.so differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_multiarray_umath.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_multiarray_umath.cpython-39-darwin.so
new file mode 100755
index 00000000..6af727fb
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_multiarray_umath.cpython-39-darwin.so differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..f0d8a20c
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.cpython-39-darwin.so differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_rational_tests.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_rational_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..708ab15d
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_rational_tests.cpython-39-darwin.so differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_simd.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_simd.cpython-39-darwin.so
new file mode 100755
index 00000000..fdcd6dfe
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_simd.cpython-39-darwin.so differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_string_helpers.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_string_helpers.py
new file mode 100644
index 00000000..1f757cc0
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_string_helpers.py
@@ -0,0 +1,100 @@
+"""
+String-handling utilities to avoid locale-dependence.
+
+Used primarily to generate type name aliases.
+"""
+# "import string" is costly to import!
+# Construct the translation tables directly
+#   "A" = chr(65), "a" = chr(97)
+_all_chars = tuple(map(chr, range(256)))
+_ascii_upper = _all_chars[65:65+26]
+_ascii_lower = _all_chars[97:97+26]
+LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
+UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
+
+
+def english_lower(s):
+    """ Apply English case rules to convert ASCII strings to all lower case.
+
+    This is an internal utility function to replace calls to str.lower() such
+    that we can avoid changing behavior with changing locales. In particular,
+    Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+    both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
+
+    Parameters
+    ----------
+    s : str
+
+    Returns
+    -------
+    lowered : str
+
+    Examples
+    --------
+    >>> from numpy.core.numerictypes import english_lower
+    >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+    'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
+    >>> english_lower('')
+    ''
+    """
+    lowered = s.translate(LOWER_TABLE)
+    return lowered
+
+
+def english_upper(s):
+    """ Apply English case rules to convert ASCII strings to all upper case.
+
+    This is an internal utility function to replace calls to str.upper() such
+    that we can avoid changing behavior with changing locales. In particular,
+    Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+    both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
+
+    Parameters
+    ----------
+    s : str
+
+    Returns
+    -------
+    uppered : str
+
+    Examples
+    --------
+    >>> from numpy.core.numerictypes import english_upper
+    >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+    'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+    >>> english_upper('')
+    ''
+    """
+    uppered = s.translate(UPPER_TABLE)
+    return uppered
+
+
+def english_capitalize(s):
+    """ Apply English case rules to convert the first character of an ASCII
+    string to upper case.
+
+    This is an internal utility function to replace calls to str.capitalize()
+    such that we can avoid changing behavior with changing locales.
+
+    Parameters
+    ----------
+    s : str
+
+    Returns
+    -------
+    capitalized : str
+
+    Examples
+    --------
+    >>> from numpy.core.numerictypes import english_capitalize
+    >>> english_capitalize('int8')
+    'Int8'
+    >>> english_capitalize('Int8')
+    'Int8'
+    >>> english_capitalize('')
+    ''
+    """
+    if s:
+        return english_upper(s[0]) + s[1:]
+    else:
+        return s
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..99e73a6e
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.cpython-39-darwin.so differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_type_aliases.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_type_aliases.py
new file mode 100644
index 00000000..38f1a099
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_type_aliases.py
@@ -0,0 +1,245 @@
+"""
+Due to compatibility, numpy has a very large number of different naming
+conventions for the scalar types (those subclassing from `numpy.generic`).
+This file produces a convoluted set of dictionaries mapping names to types,
+and sometimes other mappings too.
+
+.. data:: allTypes
+    A dictionary of names to types that will be exposed as attributes through
+    ``np.core.numerictypes.*``
+
+.. data:: sctypeDict
+    Similar to `allTypes`, but maps a broader set of aliases to their types.
+
+.. data:: sctypes
+    A dictionary keyed by a "type group" string, providing a list of types
+    under that group.
+
+"""
+
+from numpy.compat import unicode
+from numpy.core._string_helpers import english_lower
+from numpy.core.multiarray import typeinfo, dtype
+from numpy.core._dtype import _kind_name
+
+
+sctypeDict = {}      # Contains all leaf-node scalar types with aliases
+allTypes = {}            # Collect the types we will add to the module
+
+
+# separate the actual type info from the abstract base classes
+_abstract_types = {}
+_concrete_typeinfo = {}
+for k, v in typeinfo.items():
+    # make all the keys lowercase too
+    k = english_lower(k)
+    if isinstance(v, type):
+        _abstract_types[k] = v
+    else:
+        _concrete_typeinfo[k] = v
+
+_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
+
+
+def _bits_of(obj):
+    try:
+        info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
+    except StopIteration:
+        if obj in _abstract_types.values():
+            msg = "Cannot count the bits of an abstract type"
+            raise ValueError(msg) from None
+
+        # some third-party type - make a best-guess
+        return dtype(obj).itemsize * 8
+    else:
+        return info.bits
+
+
+def bitname(obj):
+    """Return a bit-width name for a given type object"""
+    bits = _bits_of(obj)
+    dt = dtype(obj)
+    char = dt.kind
+    base = _kind_name(dt)
+
+    if base == 'object':
+        bits = 0
+
+    if bits != 0:
+        char = "%s%d" % (char, bits // 8)
+
+    return base, bits, char
+
+
+def _add_types():
+    for name, info in _concrete_typeinfo.items():
+        # define C-name and insert typenum and typechar references also
+        allTypes[name] = info.type
+        sctypeDict[name] = info.type
+        sctypeDict[info.char] = info.type
+        sctypeDict[info.num] = info.type
+
+    for name, cls in _abstract_types.items():
+        allTypes[name] = cls
+_add_types()
+
+# This is the priority order used to assign the bit-sized NPY_INTxx names, which
+# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
+# consistent.
+# If two C types have the same size, then the earliest one in this list is used
+# as the sized name.
+_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
+_uint_ctypes = list('u' + t for t in _int_ctypes)
+
+def _add_aliases():
+    for name, info in _concrete_typeinfo.items():
+        # these are handled by _add_integer_aliases
+        if name in _int_ctypes or name in _uint_ctypes:
+            continue
+
+        # insert bit-width version for this class (if relevant)
+        base, bit, char = bitname(info.type)
+
+        myname = "%s%d" % (base, bit)
+
+        # ensure that (c)longdouble does not overwrite the aliases assigned to
+        # (c)double
+        if name in ('longdouble', 'clongdouble') and myname in allTypes:
+            continue
+
+        # Add to the main namespace if desired:
+        if bit != 0 and base != "bool":
+            allTypes[myname] = info.type
+
+        # add forward, reverse, and string mapping to numarray
+        sctypeDict[char] = info.type
+
+        # add mapping for both the bit name
+        sctypeDict[myname] = info.type
+
+
+_add_aliases()
+
+def _add_integer_aliases():
+    seen_bits = set()
+    for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
+        i_info = _concrete_typeinfo[i_ctype]
+        u_info = _concrete_typeinfo[u_ctype]
+        bits = i_info.bits  # same for both
+
+        for info, charname, intname in [
+                (i_info,'i%d' % (bits//8,), 'int%d' % bits),
+                (u_info,'u%d' % (bits//8,), 'uint%d' % bits)]:
+            if bits not in seen_bits:
+                # sometimes two different types have the same number of bits
+                # if so, the one iterated over first takes precedence
+                allTypes[intname] = info.type
+                sctypeDict[intname] = info.type
+                sctypeDict[charname] = info.type
+
+        seen_bits.add(bits)
+
+_add_integer_aliases()
+
+# We use these later
+void = allTypes['void']
+
+#
+# Rework the Python names (so that float and complex and int are consistent
+#                            with Python usage)
+#
+def _set_up_aliases():
+    type_pairs = [('complex_', 'cdouble'),
+                  ('single', 'float'),
+                  ('csingle', 'cfloat'),
+                  ('singlecomplex', 'cfloat'),
+                  ('float_', 'double'),
+                  ('intc', 'int'),
+                  ('uintc', 'uint'),
+                  ('int_', 'long'),
+                  ('uint', 'ulong'),
+                  ('cfloat', 'cdouble'),
+                  ('longfloat', 'longdouble'),
+                  ('clongfloat', 'clongdouble'),
+                  ('longcomplex', 'clongdouble'),
+                  ('bool_', 'bool'),
+                  ('bytes_', 'string'),
+                  ('string_', 'string'),
+                  ('str_', 'unicode'),
+                  ('unicode_', 'unicode'),
+                  ('object_', 'object')]
+    for alias, t in type_pairs:
+        allTypes[alias] = allTypes[t]
+        sctypeDict[alias] = sctypeDict[t]
+    # Remove aliases overriding python types and modules
+    to_remove = ['object', 'int', 'float',
+                 'complex', 'bool', 'string', 'datetime', 'timedelta',
+                 'bytes', 'str']
+
+    for t in to_remove:
+        try:
+            del allTypes[t]
+            del sctypeDict[t]
+        except KeyError:
+            pass
+
+    # Additional aliases in sctypeDict that should not be exposed as attributes
+    attrs_to_remove = ['ulong']
+
+    for t in attrs_to_remove:
+        try:
+            del allTypes[t]
+        except KeyError:
+            pass
+_set_up_aliases()
+
+
+sctypes = {'int': [],
+           'uint':[],
+           'float':[],
+           'complex':[],
+           'others':[bool, object, bytes, unicode, void]}
+
+def _add_array_type(typename, bits):
+    try:
+        t = allTypes['%s%d' % (typename, bits)]
+    except KeyError:
+        pass
+    else:
+        sctypes[typename].append(t)
+
+def _set_array_types():
+    ibytes = [1, 2, 4, 8, 16, 32, 64]
+    fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
+    for bytes in ibytes:
+        bits = 8*bytes
+        _add_array_type('int', bits)
+        _add_array_type('uint', bits)
+    for bytes in fbytes:
+        bits = 8*bytes
+        _add_array_type('float', bits)
+        _add_array_type('complex', 2*bits)
+    _gi = dtype('p')
+    if _gi.type not in sctypes['int']:
+        indx = 0
+        sz = _gi.itemsize
+        _lst = sctypes['int']
+        while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
+            indx += 1
+        sctypes['int'].insert(indx, _gi.type)
+        sctypes['uint'].insert(indx, dtype('P').type)
+_set_array_types()
+
+
+# Add additional strings to the sctypeDict
+_toadd = ['int', 'float', 'complex', 'bool', 'object',
+          'str', 'bytes', ('a', 'bytes_'),
+          ('int0', 'intp'), ('uint0', 'uintp')]
+
+for name in _toadd:
+    if isinstance(name, tuple):
+        sctypeDict[name[0]] = allTypes[name[1]]
+    else:
+        sctypeDict[name] = allTypes['%s_' % name]
+
+del _toadd, name
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi
new file mode 100644
index 00000000..c0b6f1a8
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi
@@ -0,0 +1,13 @@
+from typing import Any, TypedDict
+
+from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating
+
+class _SCTypes(TypedDict):
+    int: list[type[signedinteger[Any]]]
+    uint: list[type[unsignedinteger[Any]]]
+    float: list[type[floating[Any]]]
+    complex: list[type[complexfloating[Any, Any]]]
+    others: list[type]
+
+sctypeDict: dict[int | str, type[generic]]
+sctypes: _SCTypes
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_ufunc_config.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_ufunc_config.py
new file mode 100644
index 00000000..df821309
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_ufunc_config.py
@@ -0,0 +1,466 @@
+"""
+Functions for changing global ufunc configuration
+
+This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
+"""
+import collections.abc
+import contextlib
+import contextvars
+
+from .._utils import set_module
+from .umath import (
+    UFUNC_BUFSIZE_DEFAULT,
+    ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
+    SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
+)
+from . import umath
+
+__all__ = [
+    "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
+    "errstate", '_no_nep50_warning'
+]
+
+_errdict = {"ignore": ERR_IGNORE,
+            "warn": ERR_WARN,
+            "raise": ERR_RAISE,
+            "call": ERR_CALL,
+            "print": ERR_PRINT,
+            "log": ERR_LOG}
+
+_errdict_rev = {value: key for key, value in _errdict.items()}
+
+
+@set_module('numpy')
+def seterr(all=None, divide=None, over=None, under=None, invalid=None):
+    """
+    Set how floating-point errors are handled.
+
+    Note that operations on integer scalar types (such as `int16`) are
+    handled like floating point, and are affected by these settings.
+
+    Parameters
+    ----------
+    all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+        Set treatment for all types of floating-point errors at once:
+
+        - ignore: Take no action when the exception occurs.
+        - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
+        - raise: Raise a `FloatingPointError`.
+        - call: Call a function specified using the `seterrcall` function.
+        - print: Print a warning directly to ``stdout``.
+        - log: Record error in a Log object specified by `seterrcall`.
+
+        The default is not to change the current behavior.
+    divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+        Treatment for division by zero.
+    over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+        Treatment for floating-point overflow.
+    under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+        Treatment for floating-point underflow.
+    invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+        Treatment for invalid floating-point operation.
+
+    Returns
+    -------
+    old_settings : dict
+        Dictionary containing the old settings.
+
+    See also
+    --------
+    seterrcall : Set a callback function for the 'call' mode.
+    geterr, geterrcall, errstate
+
+    Notes
+    -----
+    The floating-point exceptions are defined in the IEEE 754 standard [1]_:
+
+    - Division by zero: infinite result obtained from finite numbers.
+    - Overflow: result too large to be expressed.
+    - Underflow: result so close to zero that some precision
+      was lost.
+    - Invalid operation: result is not an expressible number, typically
+      indicates that a NaN was produced.
+
+    .. [1] https://en.wikipedia.org/wiki/IEEE_754
+
+    Examples
+    --------
+    >>> old_settings = np.seterr(all='ignore')  #seterr to known value
+    >>> np.seterr(over='raise')
+    {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
+    >>> np.seterr(**old_settings)  # reset to default
+    {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
+
+    >>> np.int16(32000) * np.int16(3)
+    30464
+    >>> old_settings = np.seterr(all='warn', over='raise')
+    >>> np.int16(32000) * np.int16(3)
+    Traceback (most recent call last):
+      File "", line 1, in 
+    FloatingPointError: overflow encountered in scalar multiply
+
+    >>> old_settings = np.seterr(all='print')
+    >>> np.geterr()
+    {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
+    >>> np.int16(32000) * np.int16(3)
+    30464
+
+    """
+
+    pyvals = umath.geterrobj()
+    old = geterr()
+
+    if divide is None:
+        divide = all or old['divide']
+    if over is None:
+        over = all or old['over']
+    if under is None:
+        under = all or old['under']
+    if invalid is None:
+        invalid = all or old['invalid']
+
+    maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
+                 (_errdict[over] << SHIFT_OVERFLOW) +
+                 (_errdict[under] << SHIFT_UNDERFLOW) +
+                 (_errdict[invalid] << SHIFT_INVALID))
+
+    pyvals[1] = maskvalue
+    umath.seterrobj(pyvals)
+    return old
+
+
+@set_module('numpy')
+def geterr():
+    """
+    Get the current way of handling floating-point errors.
+
+    Returns
+    -------
+    res : dict
+        A dictionary with keys "divide", "over", "under", and "invalid",
+        whose values are from the strings "ignore", "print", "log", "warn",
+        "raise", and "call". The keys represent possible floating-point
+        exceptions, and the values define how these exceptions are handled.
+
+    See Also
+    --------
+    geterrcall, seterr, seterrcall
+
+    Notes
+    -----
+    For complete documentation of the types of floating-point exceptions and
+    treatment options, see `seterr`.
+
+    Examples
+    --------
+    >>> np.geterr()
+    {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
+    >>> np.arange(3.) / np.arange(3.)
+    array([nan,  1.,  1.])
+
+    >>> oldsettings = np.seterr(all='warn', over='raise')
+    >>> np.geterr()
+    {'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'}
+    >>> np.arange(3.) / np.arange(3.)
+    array([nan,  1.,  1.])
+
+    """
+    maskvalue = umath.geterrobj()[1]
+    mask = 7
+    res = {}
+    val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
+    res['divide'] = _errdict_rev[val]
+    val = (maskvalue >> SHIFT_OVERFLOW) & mask
+    res['over'] = _errdict_rev[val]
+    val = (maskvalue >> SHIFT_UNDERFLOW) & mask
+    res['under'] = _errdict_rev[val]
+    val = (maskvalue >> SHIFT_INVALID) & mask
+    res['invalid'] = _errdict_rev[val]
+    return res
+
+
+@set_module('numpy')
+def setbufsize(size):
+    """
+    Set the size of the buffer used in ufuncs.
+
+    Parameters
+    ----------
+    size : int
+        Size of buffer.
+
+    """
+    if size > 10e6:
+        raise ValueError("Buffer size, %s, is too big." % size)
+    if size < 5:
+        raise ValueError("Buffer size, %s, is too small." % size)
+    if size % 16 != 0:
+        raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
+
+    pyvals = umath.geterrobj()
+    old = getbufsize()
+    pyvals[0] = size
+    umath.seterrobj(pyvals)
+    return old
+
+
+@set_module('numpy')
+def getbufsize():
+    """
+    Return the size of the buffer used in ufuncs.
+
+    Returns
+    -------
+    getbufsize : int
+        Size of ufunc buffer in bytes.
+
+    """
+    return umath.geterrobj()[0]
+
+
+@set_module('numpy')
+def seterrcall(func):
+    """
+    Set the floating-point error callback function or log object.
+
+    There are two ways to capture floating-point error messages.  The first
+    is to set the error-handler to 'call', using `seterr`.  Then, set
+    the function to call using this function.
+
+    The second is to set the error-handler to 'log', using `seterr`.
+    Floating-point errors then trigger a call to the 'write' method of
+    the provided object.
+
+    Parameters
+    ----------
+    func : callable f(err, flag) or object with write method
+        Function to call upon floating-point errors ('call'-mode) or
+        object whose 'write' method is used to log such message ('log'-mode).
+
+        The call function takes two arguments. The first is a string describing
+        the type of error (such as "divide by zero", "overflow", "underflow",
+        or "invalid value"), and the second is the status flag.  The flag is a
+        byte, whose four least-significant bits indicate the type of error, one
+        of "divide", "over", "under", "invalid"::
+
+          [0 0 0 0 divide over under invalid]
+
+        In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
+
+        If an object is provided, its write method should take one argument,
+        a string.
+
+    Returns
+    -------
+    h : callable, log instance or None
+        The old error handler.
+
+    See Also
+    --------
+    seterr, geterr, geterrcall
+
+    Examples
+    --------
+    Callback upon error:
+
+    >>> def err_handler(type, flag):
+    ...     print("Floating point error (%s), with flag %s" % (type, flag))
+    ...
+
+    >>> saved_handler = np.seterrcall(err_handler)
+    >>> save_err = np.seterr(all='call')
+
+    >>> np.array([1, 2, 3]) / 0.0
+    Floating point error (divide by zero), with flag 1
+    array([inf, inf, inf])
+
+    >>> np.seterrcall(saved_handler)
+    
+    >>> np.seterr(**save_err)
+    {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
+
+    Log error message:
+
+    >>> class Log:
+    ...     def write(self, msg):
+    ...         print("LOG: %s" % msg)
+    ...
+
+    >>> log = Log()
+    >>> saved_handler = np.seterrcall(log)
+    >>> save_err = np.seterr(all='log')
+
+    >>> np.array([1, 2, 3]) / 0.0
+    LOG: Warning: divide by zero encountered in divide
+    array([inf, inf, inf])
+
+    >>> np.seterrcall(saved_handler)
+    
+    >>> np.seterr(**save_err)
+    {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
+
+    """
+    if func is not None and not isinstance(func, collections.abc.Callable):
+        if (not hasattr(func, 'write') or
+                not isinstance(func.write, collections.abc.Callable)):
+            raise ValueError("Only callable can be used as callback")
+    pyvals = umath.geterrobj()
+    old = geterrcall()
+    pyvals[2] = func
+    umath.seterrobj(pyvals)
+    return old
+
+
+@set_module('numpy')
+def geterrcall():
+    """
+    Return the current callback function used on floating-point errors.
+
+    When the error handling for a floating-point error (one of "divide",
+    "over", "under", or "invalid") is set to 'call' or 'log', the function
+    that is called or the log instance that is written to is returned by
+    `geterrcall`. This function or log instance has been set with
+    `seterrcall`.
+
+    Returns
+    -------
+    errobj : callable, log instance or None
+        The current error handler. If no handler was set through `seterrcall`,
+        ``None`` is returned.
+
+    See Also
+    --------
+    seterrcall, seterr, geterr
+
+    Notes
+    -----
+    For complete documentation of the types of floating-point exceptions and
+    treatment options, see `seterr`.
+
+    Examples
+    --------
+    >>> np.geterrcall()  # we did not yet set a handler, returns None
+
+    >>> oldsettings = np.seterr(all='call')
+    >>> def err_handler(type, flag):
+    ...     print("Floating point error (%s), with flag %s" % (type, flag))
+    >>> oldhandler = np.seterrcall(err_handler)
+    >>> np.array([1, 2, 3]) / 0.0
+    Floating point error (divide by zero), with flag 1
+    array([inf, inf, inf])
+
+    >>> cur_handler = np.geterrcall()
+    >>> cur_handler is err_handler
+    True
+
+    """
+    return umath.geterrobj()[2]
+
+
+class _unspecified:
+    pass
+
+
+_Unspecified = _unspecified()
+
+
+@set_module('numpy')
+class errstate(contextlib.ContextDecorator):
+    """
+    errstate(**kwargs)
+
+    Context manager for floating-point error handling.
+
+    Using an instance of `errstate` as a context manager allows statements in
+    that context to execute with a known error handling behavior. Upon entering
+    the context the error handling is set with `seterr` and `seterrcall`, and
+    upon exiting it is reset to what it was before.
+
+    ..  versionchanged:: 1.17.0
+        `errstate` is also usable as a function decorator, saving
+        a level of indentation if an entire function is wrapped.
+        See :py:class:`contextlib.ContextDecorator` for more information.
+
+    Parameters
+    ----------
+    kwargs : {divide, over, under, invalid}
+        Keyword arguments. The valid keywords are the possible floating-point
+        exceptions. Each keyword should have a string value that defines the
+        treatment for the particular error. Possible values are
+        {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
+
+    See Also
+    --------
+    seterr, geterr, seterrcall, geterrcall
+
+    Notes
+    -----
+    For complete documentation of the types of floating-point exceptions and
+    treatment options, see `seterr`.
+
+    Examples
+    --------
+    >>> olderr = np.seterr(all='ignore')  # Set error handling to known state.
+
+    >>> np.arange(3) / 0.
+    array([nan, inf, inf])
+    >>> with np.errstate(divide='warn'):
+    ...     np.arange(3) / 0.
+    array([nan, inf, inf])
+
+    >>> np.sqrt(-1)
+    nan
+    >>> with np.errstate(invalid='raise'):
+    ...     np.sqrt(-1)
+    Traceback (most recent call last):
+      File "", line 2, in 
+    FloatingPointError: invalid value encountered in sqrt
+
+    Outside the context the error handling behavior has not changed:
+
+    >>> np.geterr()
+    {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
+
+    """
+
+    def __init__(self, *, call=_Unspecified, **kwargs):
+        self.call = call
+        self.kwargs = kwargs
+
+    def __enter__(self):
+        self.oldstate = seterr(**self.kwargs)
+        if self.call is not _Unspecified:
+            self.oldcall = seterrcall(self.call)
+
+    def __exit__(self, *exc_info):
+        seterr(**self.oldstate)
+        if self.call is not _Unspecified:
+            seterrcall(self.oldcall)
+
+
+def _setdef():
+    defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
+    umath.seterrobj(defval)
+
+
+# set the default values
+_setdef()
+
+
+NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False)
+
+@set_module('numpy')
+@contextlib.contextmanager
+def _no_nep50_warning():
+    """
+    Context manager to disable NEP 50 warnings.  This context manager is
+    only relevant if the NEP 50 warnings are enabled globally (which is not
+    thread/context safe).
+
+    This warning context manager itself is fully safe, however.
+    """
+    token = NO_NEP50_WARNING.set(True)
+    try:
+        yield
+    finally:
+        NO_NEP50_WARNING.reset(token)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi
new file mode 100644
index 00000000..f5650450
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi
@@ -0,0 +1,37 @@
+from collections.abc import Callable
+from typing import Any, Literal, TypedDict
+
+from numpy import _SupportsWrite
+
+_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
+_ErrFunc = Callable[[str, int], Any]
+
+class _ErrDict(TypedDict):
+    divide: _ErrKind
+    over: _ErrKind
+    under: _ErrKind
+    invalid: _ErrKind
+
+class _ErrDictOptional(TypedDict, total=False):
+    all: None | _ErrKind
+    divide: None | _ErrKind
+    over: None | _ErrKind
+    under: None | _ErrKind
+    invalid: None | _ErrKind
+
+def seterr(
+    all: None | _ErrKind = ...,
+    divide: None | _ErrKind = ...,
+    over: None | _ErrKind = ...,
+    under: None | _ErrKind = ...,
+    invalid: None | _ErrKind = ...,
+) -> _ErrDict: ...
+def geterr() -> _ErrDict: ...
+def setbufsize(size: int) -> int: ...
+def getbufsize() -> int: ...
+def seterrcall(
+    func: None | _ErrFunc | _SupportsWrite[str]
+) -> None | _ErrFunc | _SupportsWrite[str]: ...
+def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ...
+
+# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/_umath_tests.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_umath_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..2b2d2aae
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/_umath_tests.cpython-39-darwin.so differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/arrayprint.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/arrayprint.py
new file mode 100644
index 00000000..62cd5270
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/arrayprint.py
@@ -0,0 +1,1725 @@
+"""Array printing function
+
+$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
+
+"""
+__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
+           "set_printoptions", "get_printoptions", "printoptions",
+           "format_float_positional", "format_float_scientific"]
+__docformat__ = 'restructuredtext'
+
+#
+# Written by Konrad Hinsen 
+# last revision: 1996-3-13
+# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
+# and by Perry Greenfield 2000-4-1 for numarray
+# and by Travis Oliphant  2005-8-22 for numpy
+
+
+# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
+# scalars but for different purposes. scalartypes.c.src has str/reprs for when
+# the scalar is printed on its own, while arrayprint.py has strs for when
+# scalars are printed inside an ndarray. Only the latter strs are currently
+# user-customizable.
+
+import functools
+import numbers
+import sys
+try:
+    from _thread import get_ident
+except ImportError:
+    from _dummy_thread import get_ident
+
+import numpy as np
+from . import numerictypes as _nt
+from .umath import absolute, isinf, isfinite, isnat
+from . import multiarray
+from .multiarray import (array, dragon4_positional, dragon4_scientific,
+                         datetime_as_string, datetime_data, ndarray,
+                         set_legacy_print_mode)
+from .fromnumeric import any
+from .numeric import concatenate, asarray, errstate
+from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
+                           flexible)
+from .overrides import array_function_dispatch, set_module
+import operator
+import warnings
+import contextlib
+
+_format_options = {
+    'edgeitems': 3,  # repr N leading and trailing items of each dimension
+    'threshold': 1000,  # total items > triggers array summarization
+    'floatmode': 'maxprec',
+    'precision': 8,  # precision of floating point representations
+    'suppress': False,  # suppress printing small floating values in exp format
+    'linewidth': 75,
+    'nanstr': 'nan',
+    'infstr': 'inf',
+    'sign': '-',
+    'formatter': None,
+    # Internally stored as an int to simplify comparisons; converted from/to
+    # str/False on the way in/out.
+    'legacy': sys.maxsize}
+
+def _make_options_dict(precision=None, threshold=None, edgeitems=None,
+                       linewidth=None, suppress=None, nanstr=None, infstr=None,
+                       sign=None, formatter=None, floatmode=None, legacy=None):
+    """
+    Make a dictionary out of the non-None arguments, plus conversion of
+    *legacy* and sanity checks.
+    """
+
+    options = {k: v for k, v in locals().items() if v is not None}
+
+    if suppress is not None:
+        options['suppress'] = bool(suppress)
+
+    modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
+    if floatmode not in modes + [None]:
+        raise ValueError("floatmode option must be one of " +
+                         ", ".join('"{}"'.format(m) for m in modes))
+
+    if sign not in [None, '-', '+', ' ']:
+        raise ValueError("sign option must be one of ' ', '+', or '-'")
+
+    if legacy == False:
+        options['legacy'] = sys.maxsize
+    elif legacy == '1.13':
+        options['legacy'] = 113
+    elif legacy == '1.21':
+        options['legacy'] = 121
+    elif legacy is None:
+        pass  # OK, do nothing.
+    else:
+        warnings.warn(
+            "legacy printing option can currently only be '1.13', '1.21', or "
+            "`False`", stacklevel=3)
+
+    if threshold is not None:
+        # forbid the bad threshold arg suggested by stack overflow, gh-12351
+        if not isinstance(threshold, numbers.Number):
+            raise TypeError("threshold must be numeric")
+        if np.isnan(threshold):
+            raise ValueError("threshold must be non-NAN, try "
+                             "sys.maxsize for untruncated representation")
+
+    if precision is not None:
+        # forbid the bad precision arg as suggested by issue #18254
+        try:
+            options['precision'] = operator.index(precision)
+        except TypeError as e:
+            raise TypeError('precision must be an integer') from e
+
+    return options
+
+
+@set_module('numpy')
+def set_printoptions(precision=None, threshold=None, edgeitems=None,
+                     linewidth=None, suppress=None, nanstr=None, infstr=None,
+                     formatter=None, sign=None, floatmode=None, *, legacy=None):
+    """
+    Set printing options.
+
+    These options determine the way floating point numbers, arrays and
+    other NumPy objects are displayed.
+
+    Parameters
+    ----------
+    precision : int or None, optional
+        Number of digits of precision for floating point output (default 8).
+        May be None if `floatmode` is not `fixed`, to print as many digits as
+        necessary to uniquely specify the value.
+    threshold : int, optional
+        Total number of array elements which trigger summarization
+        rather than full repr (default 1000).
+        To always use the full repr without summarization, pass `sys.maxsize`.
+    edgeitems : int, optional
+        Number of array items in summary at beginning and end of
+        each dimension (default 3).
+    linewidth : int, optional
+        The number of characters per line for the purpose of inserting
+        line breaks (default 75).
+    suppress : bool, optional
+        If True, always print floating point numbers using fixed point
+        notation, in which case numbers equal to zero in the current precision
+        will print as zero.  If False, then scientific notation is used when
+        absolute value of the smallest number is < 1e-4 or the ratio of the
+        maximum absolute value to the minimum is > 1e3. The default is False.
+    nanstr : str, optional
+        String representation of floating point not-a-number (default nan).
+    infstr : str, optional
+        String representation of floating point infinity (default inf).
+    sign : string, either '-', '+', or ' ', optional
+        Controls printing of the sign of floating-point types. If '+', always
+        print the sign of positive values. If ' ', always prints a space
+        (whitespace character) in the sign position of positive values.  If
+        '-', omit the sign character of positive values. (default '-')
+    formatter : dict of callables, optional
+        If not None, the keys should indicate the type(s) that the respective
+        formatting function applies to.  Callables should return a string.
+        Types that are not specified (by their corresponding keys) are handled
+        by the default formatters.  Individual types for which a formatter
+        can be set are:
+
+        - 'bool'
+        - 'int'
+        - 'timedelta' : a `numpy.timedelta64`
+        - 'datetime' : a `numpy.datetime64`
+        - 'float'
+        - 'longfloat' : 128-bit floats
+        - 'complexfloat'
+        - 'longcomplexfloat' : composed of two 128-bit floats
+        - 'numpystr' : types `numpy.bytes_` and `numpy.str_`
+        - 'object' : `np.object_` arrays
+
+        Other keys that can be used to set a group of types at once are:
+
+        - 'all' : sets all types
+        - 'int_kind' : sets 'int'
+        - 'float_kind' : sets 'float' and 'longfloat'
+        - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+        - 'str_kind' : sets 'numpystr'
+    floatmode : str, optional
+        Controls the interpretation of the `precision` option for
+        floating-point types. Can take the following values
+        (default maxprec_equal):
+
+        * 'fixed': Always print exactly `precision` fractional digits,
+                even if this would print more or fewer digits than
+                necessary to specify the value uniquely.
+        * 'unique': Print the minimum number of fractional digits necessary
+                to represent each value uniquely. Different elements may
+                have a different number of digits. The value of the
+                `precision` option is ignored.
+        * 'maxprec': Print at most `precision` fractional digits, but if
+                an element can be uniquely represented with fewer digits
+                only print it with that many.
+        * 'maxprec_equal': Print at most `precision` fractional digits,
+                but if every element in the array can be uniquely
+                represented with an equal number of fewer digits, use that
+                many digits for all elements.
+    legacy : string or `False`, optional
+        If set to the string `'1.13'` enables 1.13 legacy printing mode. This
+        approximates numpy 1.13 print output by including a space in the sign
+        position of floats and different behavior for 0d arrays. This also
+        enables 1.21 legacy printing mode (described below).
+
+        If set to the string `'1.21'` enables 1.21 legacy printing mode. This
+        approximates numpy 1.21 print output of complex structured dtypes
+        by not inserting spaces after commas that separate fields and after
+        colons.
+
+        If set to `False`, disables legacy mode.
+
+        Unrecognized strings will be ignored with a warning for forward
+        compatibility.
+
+        .. versionadded:: 1.14.0
+        .. versionchanged:: 1.22.0
+
+    See Also
+    --------
+    get_printoptions, printoptions, set_string_function, array2string
+
+    Notes
+    -----
+    `formatter` is always reset with a call to `set_printoptions`.
+
+    Use `printoptions` as a context manager to set the values temporarily.
+
+    Examples
+    --------
+    Floating point precision can be set:
+
+    >>> np.set_printoptions(precision=4)
+    >>> np.array([1.123456789])
+    [1.1235]
+
+    Long arrays can be summarised:
+
+    >>> np.set_printoptions(threshold=5)
+    >>> np.arange(10)
+    array([0, 1, 2, ..., 7, 8, 9])
+
+    Small results can be suppressed:
+
+    >>> eps = np.finfo(float).eps
+    >>> x = np.arange(4.)
+    >>> x**2 - (x + eps)**2
+    array([-4.9304e-32, -4.4409e-16,  0.0000e+00,  0.0000e+00])
+    >>> np.set_printoptions(suppress=True)
+    >>> x**2 - (x + eps)**2
+    array([-0., -0.,  0.,  0.])
+
+    A custom formatter can be used to display array elements as desired:
+
+    >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
+    >>> x = np.arange(3)
+    >>> x
+    array([int: 0, int: -1, int: -2])
+    >>> np.set_printoptions()  # formatter gets reset
+    >>> x
+    array([0, 1, 2])
+
+    To put back the default options, you can use:
+
+    >>> np.set_printoptions(edgeitems=3, infstr='inf',
+    ... linewidth=75, nanstr='nan', precision=8,
+    ... suppress=False, threshold=1000, formatter=None)
+
+    Also to temporarily override options, use `printoptions` as a context manager:
+
+    >>> with np.printoptions(precision=2, suppress=True, threshold=5):
+    ...     np.linspace(0, 10, 10)
+    array([ 0.  ,  1.11,  2.22, ...,  7.78,  8.89, 10.  ])
+
+    """
+    opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
+                             suppress, nanstr, infstr, sign, formatter,
+                             floatmode, legacy)
+    # formatter is always reset
+    opt['formatter'] = formatter
+    _format_options.update(opt)
+
+    # set the C variable for legacy mode
+    if _format_options['legacy'] == 113:
+        set_legacy_print_mode(113)
+        # reset the sign option in legacy mode to avoid confusion
+        _format_options['sign'] = '-'
+    elif _format_options['legacy'] == 121:
+        set_legacy_print_mode(121)
+    elif _format_options['legacy'] == sys.maxsize:
+        set_legacy_print_mode(0)
+
+
+@set_module('numpy')
+def get_printoptions():
+    """
+    Return the current print options.
+
+    Returns
+    -------
+    print_opts : dict
+        Dictionary of current print options with keys
+
+          - precision : int
+          - threshold : int
+          - edgeitems : int
+          - linewidth : int
+          - suppress : bool
+          - nanstr : str
+          - infstr : str
+          - formatter : dict of callables
+          - sign : str
+
+        For a full description of these options, see `set_printoptions`.
+
+    See Also
+    --------
+    set_printoptions, printoptions, set_string_function
+
+    """
+    opts = _format_options.copy()
+    opts['legacy'] = {
+        113: '1.13', 121: '1.21', sys.maxsize: False,
+    }[opts['legacy']]
+    return opts
+
+
+def _get_legacy_print_mode():
+    """Return the legacy print mode as an int."""
+    return _format_options['legacy']
+
+
+@set_module('numpy')
+@contextlib.contextmanager
+def printoptions(*args, **kwargs):
+    """Context manager for setting print options.
+
+    Set print options for the scope of the `with` block, and restore the old
+    options at the end. See `set_printoptions` for the full description of
+    available options.
+
+    Examples
+    --------
+
+    >>> from numpy.testing import assert_equal
+    >>> with np.printoptions(precision=2):
+    ...     np.array([2.0]) / 3
+    array([0.67])
+
+    The `as`-clause of the `with`-statement gives the current print options:
+
+    >>> with np.printoptions(precision=2) as opts:
+    ...      assert_equal(opts, np.get_printoptions())
+
+    See Also
+    --------
+    set_printoptions, get_printoptions
+
+    """
+    opts = np.get_printoptions()
+    try:
+        np.set_printoptions(*args, **kwargs)
+        yield np.get_printoptions()
+    finally:
+        np.set_printoptions(**opts)
+
+
+def _leading_trailing(a, edgeitems, index=()):
+    """
+    Keep only the N-D corners (leading and trailing edges) of an array.
+
+    Should be passed a base-class ndarray, since it makes no guarantees about
+    preserving subclasses.
+    """
+    axis = len(index)
+    if axis == a.ndim:
+        return a[index]
+
+    if a.shape[axis] > 2*edgeitems:
+        return concatenate((
+            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
+            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
+        ), axis=axis)
+    else:
+        return _leading_trailing(a, edgeitems, index + np.index_exp[:])
+
+
+def _object_format(o):
+    """ Object arrays containing lists should be printed unambiguously """
+    if type(o) is list:
+        fmt = 'list({!r})'
+    else:
+        fmt = '{!r}'
+    return fmt.format(o)
+
+def repr_format(x):
+    return repr(x)
+
+def str_format(x):
+    return str(x)
+
+def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
+                    formatter, **kwargs):
+    # note: extra arguments in kwargs are ignored
+
+    # wrapped in lambdas to avoid taking a code path with the wrong type of data
+    formatdict = {
+        'bool': lambda: BoolFormat(data),
+        'int': lambda: IntegerFormat(data),
+        'float': lambda: FloatingFormat(
+            data, precision, floatmode, suppress, sign, legacy=legacy),
+        'longfloat': lambda: FloatingFormat(
+            data, precision, floatmode, suppress, sign, legacy=legacy),
+        'complexfloat': lambda: ComplexFloatingFormat(
+            data, precision, floatmode, suppress, sign, legacy=legacy),
+        'longcomplexfloat': lambda: ComplexFloatingFormat(
+            data, precision, floatmode, suppress, sign, legacy=legacy),
+        'datetime': lambda: DatetimeFormat(data, legacy=legacy),
+        'timedelta': lambda: TimedeltaFormat(data),
+        'object': lambda: _object_format,
+        'void': lambda: str_format,
+        'numpystr': lambda: repr_format}
+
+    # we need to wrap values in `formatter` in a lambda, so that the interface
+    # is the same as the above values.
+    def indirect(x):
+        return lambda: x
+
+    if formatter is not None:
+        fkeys = [k for k in formatter.keys() if formatter[k] is not None]
+        if 'all' in fkeys:
+            for key in formatdict.keys():
+                formatdict[key] = indirect(formatter['all'])
+        if 'int_kind' in fkeys:
+            for key in ['int']:
+                formatdict[key] = indirect(formatter['int_kind'])
+        if 'float_kind' in fkeys:
+            for key in ['float', 'longfloat']:
+                formatdict[key] = indirect(formatter['float_kind'])
+        if 'complex_kind' in fkeys:
+            for key in ['complexfloat', 'longcomplexfloat']:
+                formatdict[key] = indirect(formatter['complex_kind'])
+        if 'str_kind' in fkeys:
+            formatdict['numpystr'] = indirect(formatter['str_kind'])
+        for key in formatdict.keys():
+            if key in fkeys:
+                formatdict[key] = indirect(formatter[key])
+
+    return formatdict
+
+def _get_format_function(data, **options):
+    """
+    find the right formatting function for the dtype_
+    """
+    dtype_ = data.dtype
+    dtypeobj = dtype_.type
+    formatdict = _get_formatdict(data, **options)
+    if dtypeobj is None:
+        return formatdict["numpystr"]()
+    elif issubclass(dtypeobj, _nt.bool_):
+        return formatdict['bool']()
+    elif issubclass(dtypeobj, _nt.integer):
+        if issubclass(dtypeobj, _nt.timedelta64):
+            return formatdict['timedelta']()
+        else:
+            return formatdict['int']()
+    elif issubclass(dtypeobj, _nt.floating):
+        if issubclass(dtypeobj, _nt.longfloat):
+            return formatdict['longfloat']()
+        else:
+            return formatdict['float']()
+    elif issubclass(dtypeobj, _nt.complexfloating):
+        if issubclass(dtypeobj, _nt.clongfloat):
+            return formatdict['longcomplexfloat']()
+        else:
+            return formatdict['complexfloat']()
+    elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)):
+        return formatdict['numpystr']()
+    elif issubclass(dtypeobj, _nt.datetime64):
+        return formatdict['datetime']()
+    elif issubclass(dtypeobj, _nt.object_):
+        return formatdict['object']()
+    elif issubclass(dtypeobj, _nt.void):
+        if dtype_.names is not None:
+            return StructuredVoidFormat.from_data(data, **options)
+        else:
+            return formatdict['void']()
+    else:
+        return formatdict['numpystr']()
+
+
+def _recursive_guard(fillvalue='...'):
+    """
+    Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
+
+    Decorates a function such that if it calls itself with the same first
+    argument, it returns `fillvalue` instead of recursing.
+
+    Largely copied from reprlib.recursive_repr
+    """
+
+    def decorating_function(f):
+        repr_running = set()
+
+        @functools.wraps(f)
+        def wrapper(self, *args, **kwargs):
+            key = id(self), get_ident()
+            if key in repr_running:
+                return fillvalue
+            repr_running.add(key)
+            try:
+                return f(self, *args, **kwargs)
+            finally:
+                repr_running.discard(key)
+
+        return wrapper
+
+    return decorating_function
+
+
+# gracefully handle recursive calls, when object arrays contain themselves
+@_recursive_guard()
+def _array2string(a, options, separator=' ', prefix=""):
+    # The formatter __init__s in _get_format_function cannot deal with
+    # subclasses yet, and we also need to avoid recursion issues in
+    # _formatArray with subclasses which return 0d arrays in place of scalars
+    data = asarray(a)
+    if a.shape == ():
+        a = data
+
+    if a.size > options['threshold']:
+        summary_insert = "..."
+        data = _leading_trailing(data, options['edgeitems'])
+    else:
+        summary_insert = ""
+
+    # find the right formatting function for the array
+    format_function = _get_format_function(data, **options)
+
+    # skip over "["
+    next_line_prefix = " "
+    # skip over array(
+    next_line_prefix += " "*len(prefix)
+
+    lst = _formatArray(a, format_function, options['linewidth'],
+                       next_line_prefix, separator, options['edgeitems'],
+                       summary_insert, options['legacy'])
+    return lst
+
+
+def _array2string_dispatcher(
+        a, max_line_width=None, precision=None,
+        suppress_small=None, separator=None, prefix=None,
+        style=None, formatter=None, threshold=None,
+        edgeitems=None, sign=None, floatmode=None, suffix=None,
+        *, legacy=None):
+    return (a,)
+
+
+@array_function_dispatch(_array2string_dispatcher, module='numpy')
+def array2string(a, max_line_width=None, precision=None,
+                 suppress_small=None, separator=' ', prefix="",
+                 style=np._NoValue, formatter=None, threshold=None,
+                 edgeitems=None, sign=None, floatmode=None, suffix="",
+                 *, legacy=None):
+    """
+    Return a string representation of an array.
+
+    Parameters
+    ----------
+    a : ndarray
+        Input array.
+    max_line_width : int, optional
+        Inserts newlines if text is longer than `max_line_width`.
+        Defaults to ``numpy.get_printoptions()['linewidth']``.
+    precision : int or None, optional
+        Floating point precision.
+        Defaults to ``numpy.get_printoptions()['precision']``.
+    suppress_small : bool, optional
+        Represent numbers "very close" to zero as zero; default is False.
+        Very close is defined by precision: if the precision is 8, e.g.,
+        numbers smaller (in absolute value) than 5e-9 are represented as
+        zero.
+        Defaults to ``numpy.get_printoptions()['suppress']``.
+    separator : str, optional
+        Inserted between elements.
+    prefix : str, optional
+    suffix : str, optional
+        The length of the prefix and suffix strings are used to respectively
+        align and wrap the output. An array is typically printed as::
+
+          prefix + array2string(a) + suffix
+
+        The output is left-padded by the length of the prefix string, and
+        wrapping is forced at the column ``max_line_width - len(suffix)``.
+        It should be noted that the content of prefix and suffix strings are
+        not included in the output.
+    style : _NoValue, optional
+        Has no effect, do not use.
+
+        .. deprecated:: 1.14.0
+    formatter : dict of callables, optional
+        If not None, the keys should indicate the type(s) that the respective
+        formatting function applies to.  Callables should return a string.
+        Types that are not specified (by their corresponding keys) are handled
+        by the default formatters.  Individual types for which a formatter
+        can be set are:
+
+        - 'bool'
+        - 'int'
+        - 'timedelta' : a `numpy.timedelta64`
+        - 'datetime' : a `numpy.datetime64`
+        - 'float'
+        - 'longfloat' : 128-bit floats
+        - 'complexfloat'
+        - 'longcomplexfloat' : composed of two 128-bit floats
+        - 'void' : type `numpy.void`
+        - 'numpystr' : types `numpy.bytes_` and `numpy.str_`
+
+        Other keys that can be used to set a group of types at once are:
+
+        - 'all' : sets all types
+        - 'int_kind' : sets 'int'
+        - 'float_kind' : sets 'float' and 'longfloat'
+        - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+        - 'str_kind' : sets 'numpystr'
+    threshold : int, optional
+        Total number of array elements which trigger summarization
+        rather than full repr.
+        Defaults to ``numpy.get_printoptions()['threshold']``.
+    edgeitems : int, optional
+        Number of array items in summary at beginning and end of
+        each dimension.
+        Defaults to ``numpy.get_printoptions()['edgeitems']``.
+    sign : string, either '-', '+', or ' ', optional
+        Controls printing of the sign of floating-point types. If '+', always
+        print the sign of positive values. If ' ', always prints a space
+        (whitespace character) in the sign position of positive values.  If
+        '-', omit the sign character of positive values.
+        Defaults to ``numpy.get_printoptions()['sign']``.
+    floatmode : str, optional
+        Controls the interpretation of the `precision` option for
+        floating-point types.
+        Defaults to ``numpy.get_printoptions()['floatmode']``.
+        Can take the following values:
+
+        - 'fixed': Always print exactly `precision` fractional digits,
+          even if this would print more or fewer digits than
+          necessary to specify the value uniquely.
+        - 'unique': Print the minimum number of fractional digits necessary
+          to represent each value uniquely. Different elements may
+          have a different number of digits.  The value of the
+          `precision` option is ignored.
+        - 'maxprec': Print at most `precision` fractional digits, but if
+          an element can be uniquely represented with fewer digits
+          only print it with that many.
+        - 'maxprec_equal': Print at most `precision` fractional digits,
+          but if every element in the array can be uniquely
+          represented with an equal number of fewer digits, use that
+          many digits for all elements.
+    legacy : string or `False`, optional
+        If set to the string `'1.13'` enables 1.13 legacy printing mode. This
+        approximates numpy 1.13 print output by including a space in the sign
+        position of floats and different behavior for 0d arrays. If set to
+        `False`, disables legacy mode. Unrecognized strings will be ignored
+        with a warning for forward compatibility.
+
+        .. versionadded:: 1.14.0
+
+    Returns
+    -------
+    array_str : str
+        String representation of the array.
+
+    Raises
+    ------
+    TypeError
+        if a callable in `formatter` does not return a string.
+
+    See Also
+    --------
+    array_str, array_repr, set_printoptions, get_printoptions
+
+    Notes
+    -----
+    If a formatter is specified for a certain type, the `precision` keyword is
+    ignored for that type.
+
+    This is a very flexible function; `array_repr` and `array_str` are using
+    `array2string` internally so keywords with the same name should work
+    identically in all three functions.
+
+    Examples
+    --------
+    >>> x = np.array([1e-16,1,2,3])
+    >>> np.array2string(x, precision=2, separator=',',
+    ...                       suppress_small=True)
+    '[0.,1.,2.,3.]'
+
+    >>> x  = np.arange(3.)
+    >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
+    '[0.00 1.00 2.00]'
+
+    >>> x  = np.arange(3)
+    >>> np.array2string(x, formatter={'int':lambda x: hex(x)})
+    '[0x0 0x1 0x2]'
+
+    """
+
+    overrides = _make_options_dict(precision, threshold, edgeitems,
+                                   max_line_width, suppress_small, None, None,
+                                   sign, formatter, floatmode, legacy)
+    options = _format_options.copy()
+    options.update(overrides)
+
+    if options['legacy'] <= 113:
+        if style is np._NoValue:
+            style = repr
+
+        if a.shape == () and a.dtype.names is None:
+            return style(a.item())
+    elif style is not np._NoValue:
+        # Deprecation 11-9-2017  v1.14
+        warnings.warn("'style' argument is deprecated and no longer functional"
+                      " except in 1.13 'legacy' mode",
+                      DeprecationWarning, stacklevel=2)
+
+    if options['legacy'] > 113:
+        options['linewidth'] -= len(suffix)
+
+    # treat as a null array if any of shape elements == 0
+    if a.size == 0:
+        return "[]"
+
+    return _array2string(a, options, separator, prefix)
+
+
+def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
+    needs_wrap = len(line) + len(word) > line_width
+    if legacy > 113:
+        # don't wrap lines if it won't help
+        if len(line) <= len(next_line_prefix):
+            needs_wrap = False
+
+    if needs_wrap:
+        s += line.rstrip() + "\n"
+        line = next_line_prefix
+    line += word
+    return s, line
+
+
+def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
+    """
+    Extends line with nicely formatted (possibly multi-line) string ``word``.
+    """
+    words = word.splitlines()
+    if len(words) == 1 or legacy <= 113:
+        return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
+
+    max_word_length = max(len(word) for word in words)
+    if (len(line) + max_word_length > line_width and
+            len(line) > len(next_line_prefix)):
+        s += line.rstrip() + '\n'
+        line = next_line_prefix + words[0]
+        indent = next_line_prefix
+    else:
+        indent = len(line)*' '
+        line += words[0]
+
+    for word in words[1::]:
+        s += line.rstrip() + '\n'
+        line = indent + word
+
+    suffix_length = max_word_length - len(words[-1])
+    line += suffix_length*' '
+
+    return s, line
+
+def _formatArray(a, format_function, line_width, next_line_prefix,
+                 separator, edge_items, summary_insert, legacy):
+    """formatArray is designed for two modes of operation:
+
+    1. Full output
+
+    2. Summarized output
+
+    """
+    def recurser(index, hanging_indent, curr_width):
+        """
+        By using this local function, we don't need to recurse with all the
+        arguments. Since this function is not created recursively, the cost is
+        not significant
+        """
+        axis = len(index)
+        axes_left = a.ndim - axis
+
+        if axes_left == 0:
+            return format_function(a[index])
+
+        # when recursing, add a space to align with the [ added, and reduce the
+        # length of the line by 1
+        next_hanging_indent = hanging_indent + ' '
+        if legacy <= 113:
+            next_width = curr_width
+        else:
+            next_width = curr_width - len(']')
+
+        a_len = a.shape[axis]
+        show_summary = summary_insert and 2*edge_items < a_len
+        if show_summary:
+            leading_items = edge_items
+            trailing_items = edge_items
+        else:
+            leading_items = 0
+            trailing_items = a_len
+
+        # stringify the array with the hanging indent on the first line too
+        s = ''
+
+        # last axis (rows) - wrap elements if they would not fit on one line
+        if axes_left == 1:
+            # the length up until the beginning of the separator / bracket
+            if legacy <= 113:
+                elem_width = curr_width - len(separator.rstrip())
+            else:
+                elem_width = curr_width - max(len(separator.rstrip()), len(']'))
+
+            line = hanging_indent
+            for i in range(leading_items):
+                word = recurser(index + (i,), next_hanging_indent, next_width)
+                s, line = _extendLine_pretty(
+                    s, line, word, elem_width, hanging_indent, legacy)
+                line += separator
+
+            if show_summary:
+                s, line = _extendLine(
+                    s, line, summary_insert, elem_width, hanging_indent, legacy)
+                if legacy <= 113:
+                    line += ", "
+                else:
+                    line += separator
+
+            for i in range(trailing_items, 1, -1):
+                word = recurser(index + (-i,), next_hanging_indent, next_width)
+                s, line = _extendLine_pretty(
+                    s, line, word, elem_width, hanging_indent, legacy)
+                line += separator
+
+            if legacy <= 113:
+                # width of the separator is not considered on 1.13
+                elem_width = curr_width
+            word = recurser(index + (-1,), next_hanging_indent, next_width)
+            s, line = _extendLine_pretty(
+                s, line, word, elem_width, hanging_indent, legacy)
+
+            s += line
+
+        # other axes - insert newlines between rows
+        else:
+            s = ''
+            line_sep = separator.rstrip() + '\n'*(axes_left - 1)
+
+            for i in range(leading_items):
+                nested = recurser(index + (i,), next_hanging_indent, next_width)
+                s += hanging_indent + nested + line_sep
+
+            if show_summary:
+                if legacy <= 113:
+                    # trailing space, fixed nbr of newlines, and fixed separator
+                    s += hanging_indent + summary_insert + ", \n"
+                else:
+                    s += hanging_indent + summary_insert + line_sep
+
+            for i in range(trailing_items, 1, -1):
+                nested = recurser(index + (-i,), next_hanging_indent,
+                                  next_width)
+                s += hanging_indent + nested + line_sep
+
+            nested = recurser(index + (-1,), next_hanging_indent, next_width)
+            s += hanging_indent + nested
+
+        # remove the hanging indent, and wrap in []
+        s = '[' + s[len(hanging_indent):] + ']'
+        return s
+
+    try:
+        # invoke the recursive part with an initial index and prefix
+        return recurser(index=(),
+                        hanging_indent=next_line_prefix,
+                        curr_width=line_width)
+    finally:
+        # recursive closures have a cyclic reference to themselves, which
+        # requires gc to collect (gh-10620). To avoid this problem, for
+        # performance and PyPy friendliness, we break the cycle:
+        recurser = None
+
+def _none_or_positive_arg(x, name):
+    if x is None:
+        return -1
+    if x < 0:
+        raise ValueError("{} must be >= 0".format(name))
+    return x
+
+class FloatingFormat:
+    """ Formatter for subtypes of np.floating """
+    def __init__(self, data, precision, floatmode, suppress_small, sign=False,
+                 *, legacy=None):
+        # for backcompatibility, accept bools
+        if isinstance(sign, bool):
+            sign = '+' if sign else '-'
+
+        self._legacy = legacy
+        if self._legacy <= 113:
+            # when not 0d, legacy does not support '-'
+            if data.shape != () and sign == '-':
+                sign = ' '
+
+        self.floatmode = floatmode
+        if floatmode == 'unique':
+            self.precision = None
+        else:
+            self.precision = precision
+
+        self.precision = _none_or_positive_arg(self.precision, 'precision')
+
+        self.suppress_small = suppress_small
+        self.sign = sign
+        self.exp_format = False
+        self.large_exponent = False
+
+        self.fillFormat(data)
+
+    def fillFormat(self, data):
+        # only the finite values are used to compute the number of digits
+        finite_vals = data[isfinite(data)]
+
+        # choose exponential mode based on the non-zero finite values:
+        abs_non_zero = absolute(finite_vals[finite_vals != 0])
+        if len(abs_non_zero) != 0:
+            max_val = np.max(abs_non_zero)
+            min_val = np.min(abs_non_zero)
+            with errstate(over='ignore'):  # division can overflow
+                if max_val >= 1.e8 or (not self.suppress_small and
+                        (min_val < 0.0001 or max_val/min_val > 1000.)):
+                    self.exp_format = True
+
+        # do a first pass of printing all the numbers, to determine sizes
+        if len(finite_vals) == 0:
+            self.pad_left = 0
+            self.pad_right = 0
+            self.trim = '.'
+            self.exp_size = -1
+            self.unique = True
+            self.min_digits = None
+        elif self.exp_format:
+            trim, unique = '.', True
+            if self.floatmode == 'fixed' or self._legacy <= 113:
+                trim, unique = 'k', False
+            strs = (dragon4_scientific(x, precision=self.precision,
+                               unique=unique, trim=trim, sign=self.sign == '+')
+                    for x in finite_vals)
+            frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
+            int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
+            self.exp_size = max(len(s) for s in exp_strs) - 1
+
+            self.trim = 'k'
+            self.precision = max(len(s) for s in frac_part)
+            self.min_digits = self.precision
+            self.unique = unique
+
+            # for back-compat with np 1.13, use 2 spaces & sign and full prec
+            if self._legacy <= 113:
+                self.pad_left = 3
+            else:
+                # this should be only 1 or 2. Can be calculated from sign.
+                self.pad_left = max(len(s) for s in int_part)
+            # pad_right is only needed for nan length calculation
+            self.pad_right = self.exp_size + 2 + self.precision
+        else:
+            trim, unique = '.', True
+            if self.floatmode == 'fixed':
+                trim, unique = 'k', False
+            strs = (dragon4_positional(x, precision=self.precision,
+                                       fractional=True,
+                                       unique=unique, trim=trim,
+                                       sign=self.sign == '+')
+                    for x in finite_vals)
+            int_part, frac_part = zip(*(s.split('.') for s in strs))
+            if self._legacy <= 113:
+                self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
+            else:
+                self.pad_left = max(len(s) for s in int_part)
+            self.pad_right = max(len(s) for s in frac_part)
+            self.exp_size = -1
+            self.unique = unique
+
+            if self.floatmode in ['fixed', 'maxprec_equal']:
+                self.precision = self.min_digits = self.pad_right
+                self.trim = 'k'
+            else:
+                self.trim = '.'
+                self.min_digits = 0
+
+        if self._legacy > 113:
+            # account for sign = ' ' by adding one to pad_left
+            if self.sign == ' ' and not any(np.signbit(finite_vals)):
+                self.pad_left += 1
+
+        # if there are non-finite values, may need to increase pad_left
+        if data.size != finite_vals.size:
+            neginf = self.sign != '-' or any(data[isinf(data)] < 0)
+            nanlen = len(_format_options['nanstr'])
+            inflen = len(_format_options['infstr']) + neginf
+            offset = self.pad_right + 1  # +1 for decimal pt
+            self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
+
+    def __call__(self, x):
+        if not np.isfinite(x):
+            with errstate(invalid='ignore'):
+                if np.isnan(x):
+                    sign = '+' if self.sign == '+' else ''
+                    ret = sign + _format_options['nanstr']
+                else:  # isinf
+                    sign = '-' if x < 0 else '+' if self.sign == '+' else ''
+                    ret = sign + _format_options['infstr']
+                return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
+
+        if self.exp_format:
+            return dragon4_scientific(x,
+                                      precision=self.precision,
+                                      min_digits=self.min_digits,
+                                      unique=self.unique,
+                                      trim=self.trim,
+                                      sign=self.sign == '+',
+                                      pad_left=self.pad_left,
+                                      exp_digits=self.exp_size)
+        else:
+            return dragon4_positional(x,
+                                      precision=self.precision,
+                                      min_digits=self.min_digits,
+                                      unique=self.unique,
+                                      fractional=True,
+                                      trim=self.trim,
+                                      sign=self.sign == '+',
+                                      pad_left=self.pad_left,
+                                      pad_right=self.pad_right)
+
+
+@set_module('numpy')
+def format_float_scientific(x, precision=None, unique=True, trim='k',
+                            sign=False, pad_left=None, exp_digits=None,
+                            min_digits=None):
+    """
+    Format a floating-point scalar as a decimal string in scientific notation.
+
+    Provides control over rounding, trimming and padding. Uses and assumes
+    IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+    Parameters
+    ----------
+    x : python float or numpy floating scalar
+        Value to format.
+    precision : non-negative integer or None, optional
+        Maximum number of digits to print. May be None if `unique` is
+        `True`, but must be an integer if unique is `False`.
+    unique : boolean, optional
+        If `True`, use a digit-generation strategy which gives the shortest
+        representation which uniquely identifies the floating-point number from
+        other values of the same type, by judicious rounding. If `precision`
+        is given fewer digits than necessary can be printed. If `min_digits`
+        is given more can be printed, in which cases the last digit is rounded
+        with unbiased rounding.
+        If `False`, digits are generated as if printing an infinite-precision
+        value and stopping after `precision` digits, rounding the remaining
+        value with unbiased rounding
+    trim : one of 'k', '.', '0', '-', optional
+        Controls post-processing trimming of trailing digits, as follows:
+
+        * 'k' : keep trailing zeros, keep decimal point (no trimming)
+        * '.' : trim all trailing zeros, leave decimal point
+        * '0' : trim all but the zero before the decimal point. Insert the
+          zero if it is missing.
+        * '-' : trim trailing zeros and any trailing decimal point
+    sign : boolean, optional
+        Whether to show the sign for positive values.
+    pad_left : non-negative integer, optional
+        Pad the left side of the string with whitespace until at least that
+        many characters are to the left of the decimal point.
+    exp_digits : non-negative integer, optional
+        Pad the exponent with zeros until it contains at least this many digits.
+        If omitted, the exponent will be at least 2 digits.
+    min_digits : non-negative integer or None, optional
+        Minimum number of digits to print. This only has an effect for
+        `unique=True`. In that case more digits than necessary to uniquely
+        identify the value may be printed and rounded unbiased.
+
+        -- versionadded:: 1.21.0
+
+    Returns
+    -------
+    rep : string
+        The string representation of the floating point value
+
+    See Also
+    --------
+    format_float_positional
+
+    Examples
+    --------
+    >>> np.format_float_scientific(np.float32(np.pi))
+    '3.1415927e+00'
+    >>> s = np.float32(1.23e24)
+    >>> np.format_float_scientific(s, unique=False, precision=15)
+    '1.230000071797338e+24'
+    >>> np.format_float_scientific(s, exp_digits=4)
+    '1.23e+0024'
+    """
+    precision = _none_or_positive_arg(precision, 'precision')
+    pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+    exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
+    min_digits = _none_or_positive_arg(min_digits, 'min_digits')
+    if min_digits > 0 and precision > 0 and min_digits > precision:
+        raise ValueError("min_digits must be less than or equal to precision")
+    return dragon4_scientific(x, precision=precision, unique=unique,
+                              trim=trim, sign=sign, pad_left=pad_left,
+                              exp_digits=exp_digits, min_digits=min_digits)
+
+
+@set_module('numpy')
+def format_float_positional(x, precision=None, unique=True,
+                            fractional=True, trim='k', sign=False,
+                            pad_left=None, pad_right=None, min_digits=None):
+    """
+    Format a floating-point scalar as a decimal string in positional notation.
+
+    Provides control over rounding, trimming and padding. Uses and assumes
+    IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+    Parameters
+    ----------
+    x : python float or numpy floating scalar
+        Value to format.
+    precision : non-negative integer or None, optional
+        Maximum number of digits to print. May be None if `unique` is
+        `True`, but must be an integer if unique is `False`.
+    unique : boolean, optional
+        If `True`, use a digit-generation strategy which gives the shortest
+        representation which uniquely identifies the floating-point number from
+        other values of the same type, by judicious rounding. If `precision`
+        is given fewer digits than necessary can be printed, or if `min_digits`
+        is given more can be printed, in which cases the last digit is rounded
+        with unbiased rounding.
+        If `False`, digits are generated as if printing an infinite-precision
+        value and stopping after `precision` digits, rounding the remaining
+        value with unbiased rounding
+    fractional : boolean, optional
+        If `True`, the cutoffs of `precision` and `min_digits` refer to the
+        total number of digits after the decimal point, including leading
+        zeros.
+        If `False`, `precision` and `min_digits` refer to the total number of
+        significant digits, before or after the decimal point, ignoring leading
+        zeros.
+    trim : one of 'k', '.', '0', '-', optional
+        Controls post-processing trimming of trailing digits, as follows:
+
+        * 'k' : keep trailing zeros, keep decimal point (no trimming)
+        * '.' : trim all trailing zeros, leave decimal point
+        * '0' : trim all but the zero before the decimal point. Insert the
+          zero if it is missing.
+        * '-' : trim trailing zeros and any trailing decimal point
+    sign : boolean, optional
+        Whether to show the sign for positive values.
+    pad_left : non-negative integer, optional
+        Pad the left side of the string with whitespace until at least that
+        many characters are to the left of the decimal point.
+    pad_right : non-negative integer, optional
+        Pad the right side of the string with whitespace until at least that
+        many characters are to the right of the decimal point.
+    min_digits : non-negative integer or None, optional
+        Minimum number of digits to print. Only has an effect if `unique=True`
+        in which case additional digits past those necessary to uniquely
+        identify the value may be printed, rounding the last additional digit.
+
+        -- versionadded:: 1.21.0
+
+    Returns
+    -------
+    rep : string
+        The string representation of the floating point value
+
+    See Also
+    --------
+    format_float_scientific
+
+    Examples
+    --------
+    >>> np.format_float_positional(np.float32(np.pi))
+    '3.1415927'
+    >>> np.format_float_positional(np.float16(np.pi))
+    '3.14'
+    >>> np.format_float_positional(np.float16(0.3))
+    '0.3'
+    >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
+    '0.3000488281'
+    """
+    precision = _none_or_positive_arg(precision, 'precision')
+    pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+    pad_right = _none_or_positive_arg(pad_right, 'pad_right')
+    min_digits = _none_or_positive_arg(min_digits, 'min_digits')
+    if not fractional and precision == 0:
+        raise ValueError("precision must be greater than 0 if "
+                         "fractional=False")
+    if min_digits > 0 and precision > 0 and min_digits > precision:
+        raise ValueError("min_digits must be less than or equal to precision")
+    return dragon4_positional(x, precision=precision, unique=unique,
+                              fractional=fractional, trim=trim,
+                              sign=sign, pad_left=pad_left,
+                              pad_right=pad_right, min_digits=min_digits)
+
+
+class IntegerFormat:
+    def __init__(self, data):
+        if data.size > 0:
+            max_str_len = max(len(str(np.max(data))),
+                              len(str(np.min(data))))
+        else:
+            max_str_len = 0
+        self.format = '%{}d'.format(max_str_len)
+
+    def __call__(self, x):
+        return self.format % x
+
+
+class BoolFormat:
+    def __init__(self, data, **kwargs):
+        # add an extra space so " True" and "False" have the same length and
+        # array elements align nicely when printed, except in 0d arrays
+        self.truestr = ' True' if data.shape != () else 'True'
+
+    def __call__(self, x):
+        return self.truestr if x else "False"
+
+
+class ComplexFloatingFormat:
+    """ Formatter for subtypes of np.complexfloating """
+    def __init__(self, x, precision, floatmode, suppress_small,
+                 sign=False, *, legacy=None):
+        # for backcompatibility, accept bools
+        if isinstance(sign, bool):
+            sign = '+' if sign else '-'
+
+        floatmode_real = floatmode_imag = floatmode
+        if legacy <= 113:
+            floatmode_real = 'maxprec_equal'
+            floatmode_imag = 'maxprec'
+
+        self.real_format = FloatingFormat(
+            x.real, precision, floatmode_real, suppress_small,
+            sign=sign, legacy=legacy
+        )
+        self.imag_format = FloatingFormat(
+            x.imag, precision, floatmode_imag, suppress_small,
+            sign='+', legacy=legacy
+        )
+
+    def __call__(self, x):
+        r = self.real_format(x.real)
+        i = self.imag_format(x.imag)
+
+        # add the 'j' before the terminal whitespace in i
+        sp = len(i.rstrip())
+        i = i[:sp] + 'j' + i[sp:]
+
+        return r + i
+
+
+class _TimelikeFormat:
+    def __init__(self, data):
+        non_nat = data[~isnat(data)]
+        if len(non_nat) > 0:
+            # Max str length of non-NaT elements
+            max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
+                              len(self._format_non_nat(np.min(non_nat))))
+        else:
+            max_str_len = 0
+        if len(non_nat) < data.size:
+            # data contains a NaT
+            max_str_len = max(max_str_len, 5)
+        self._format = '%{}s'.format(max_str_len)
+        self._nat = "'NaT'".rjust(max_str_len)
+
+    def _format_non_nat(self, x):
+        # override in subclass
+        raise NotImplementedError
+
+    def __call__(self, x):
+        if isnat(x):
+            return self._nat
+        else:
+            return self._format % self._format_non_nat(x)
+
+
+class DatetimeFormat(_TimelikeFormat):
+    def __init__(self, x, unit=None, timezone=None, casting='same_kind',
+                 legacy=False):
+        # Get the unit from the dtype
+        if unit is None:
+            if x.dtype.kind == 'M':
+                unit = datetime_data(x.dtype)[0]
+            else:
+                unit = 's'
+
+        if timezone is None:
+            timezone = 'naive'
+        self.timezone = timezone
+        self.unit = unit
+        self.casting = casting
+        self.legacy = legacy
+
+        # must be called after the above are configured
+        super().__init__(x)
+
+    def __call__(self, x):
+        if self.legacy <= 113:
+            return self._format_non_nat(x)
+        return super().__call__(x)
+
+    def _format_non_nat(self, x):
+        return "'%s'" % datetime_as_string(x,
+                                    unit=self.unit,
+                                    timezone=self.timezone,
+                                    casting=self.casting)
+
+
+class TimedeltaFormat(_TimelikeFormat):
+    def _format_non_nat(self, x):
+        return str(x.astype('i8'))
+
+
+class SubArrayFormat:
+    def __init__(self, format_function, **options):
+        self.format_function = format_function
+        self.threshold = options['threshold']
+        self.edge_items = options['edgeitems']
+
+    def __call__(self, a):
+        self.summary_insert = "..." if a.size > self.threshold else ""
+        return self.format_array(a)
+
+    def format_array(self, a):
+        if np.ndim(a) == 0:
+            return self.format_function(a)
+
+        if self.summary_insert and a.shape[0] > 2*self.edge_items:
+            formatted = (
+                [self.format_array(a_) for a_ in a[:self.edge_items]]
+                + [self.summary_insert]
+                + [self.format_array(a_) for a_ in a[-self.edge_items:]]
+            )
+        else:
+            formatted = [self.format_array(a_) for a_ in a]
+
+        return "[" + ", ".join(formatted) + "]"
+
+
+class StructuredVoidFormat:
+    """
+    Formatter for structured np.void objects.
+
+    This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
+    as alias scalars lose their field information, and the implementation
+    relies upon np.void.__getitem__.
+    """
+    def __init__(self, format_functions):
+        self.format_functions = format_functions
+
+    @classmethod
+    def from_data(cls, data, **options):
+        """
+        This is a second way to initialize StructuredVoidFormat, using the raw data
+        as input. Added to avoid changing the signature of __init__.
+        """
+        format_functions = []
+        for field_name in data.dtype.names:
+            format_function = _get_format_function(data[field_name], **options)
+            if data.dtype[field_name].shape != ():
+                format_function = SubArrayFormat(format_function, **options)
+            format_functions.append(format_function)
+        return cls(format_functions)
+
+    def __call__(self, x):
+        str_fields = [
+            format_function(field)
+            for field, format_function in zip(x, self.format_functions)
+        ]
+        if len(str_fields) == 1:
+            return "({},)".format(str_fields[0])
+        else:
+            return "({})".format(", ".join(str_fields))
+
+
+def _void_scalar_repr(x):
+    """
+    Implements the repr for structured-void scalars. It is called from the
+    scalartypes.c.src code, and is placed here because it uses the elementwise
+    formatters defined above.
+    """
+    return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
+
+
+_typelessdata = [int_, float_, complex_, bool_]
+
+
+def dtype_is_implied(dtype):
+    """
+    Determine if the given dtype is implied by the representation of its values.
+
+    Parameters
+    ----------
+    dtype : dtype
+        Data type
+
+    Returns
+    -------
+    implied : bool
+        True if the dtype is implied by the representation of its values.
+
+    Examples
+    --------
+    >>> np.core.arrayprint.dtype_is_implied(int)
+    True
+    >>> np.array([1, 2, 3], int)
+    array([1, 2, 3])
+    >>> np.core.arrayprint.dtype_is_implied(np.int8)
+    False
+    >>> np.array([1, 2, 3], np.int8)
+    array([1, 2, 3], dtype=int8)
+    """
+    dtype = np.dtype(dtype)
+    if _format_options['legacy'] <= 113 and dtype.type == bool_:
+        return False
+
+    # not just void types can be structured, and names are not part of the repr
+    if dtype.names is not None:
+        return False
+
+    # should care about endianness *unless size is 1* (e.g., int8, bool)
+    if not dtype.isnative:
+        return False
+
+    return dtype.type in _typelessdata
+
+
+def dtype_short_repr(dtype):
+    """
+    Convert a dtype to a short form which evaluates to the same dtype.
+
+    The intent is roughly that the following holds
+
+    >>> from numpy import *
+    >>> dt = np.int64([1, 2]).dtype
+    >>> assert eval(dtype_short_repr(dt)) == dt
+    """
+    if type(dtype).__repr__ != np.dtype.__repr__:
+        # TODO: Custom repr for user DTypes, logic should likely move.
+        return repr(dtype)
+    if dtype.names is not None:
+        # structured dtypes give a list or tuple repr
+        return str(dtype)
+    elif issubclass(dtype.type, flexible):
+        # handle these separately so they don't give garbage like str256
+        return "'%s'" % str(dtype)
+
+    typename = dtype.name
+    if not dtype.isnative:
+        # deal with cases like dtype(' 0
+
+    prefix = class_name + "("
+    suffix = ")" if skipdtype else ","
+
+    if (_format_options['legacy'] <= 113 and
+            arr.shape == () and not arr.dtype.names):
+        lst = repr(arr.item())
+    elif arr.size > 0 or arr.shape == (0,):
+        lst = array2string(arr, max_line_width, precision, suppress_small,
+                           ', ', prefix, suffix=suffix)
+    else:  # show zero-length shape unless it is (0,)
+        lst = "[], shape=%s" % (repr(arr.shape),)
+
+    arr_str = prefix + lst + suffix
+
+    if skipdtype:
+        return arr_str
+
+    dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
+
+    # compute whether we should put dtype on a new line: Do so if adding the
+    # dtype would extend the last line past max_line_width.
+    # Note: This line gives the correct result even when rfind returns -1.
+    last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
+    spacer = " "
+    if _format_options['legacy'] <= 113:
+        if issubclass(arr.dtype.type, flexible):
+            spacer = '\n' + ' '*len(class_name + "(")
+    elif last_line_len + len(dtype_str) + 1 > max_line_width:
+        spacer = '\n' + ' '*len(class_name + "(")
+
+    return arr_str + spacer + dtype_str
+
+
+def _array_repr_dispatcher(
+        arr, max_line_width=None, precision=None, suppress_small=None):
+    return (arr,)
+
+
+@array_function_dispatch(_array_repr_dispatcher, module='numpy')
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+    """
+    Return the string representation of an array.
+
+    Parameters
+    ----------
+    arr : ndarray
+        Input array.
+    max_line_width : int, optional
+        Inserts newlines if text is longer than `max_line_width`.
+        Defaults to ``numpy.get_printoptions()['linewidth']``.
+    precision : int, optional
+        Floating point precision.
+        Defaults to ``numpy.get_printoptions()['precision']``.
+    suppress_small : bool, optional
+        Represent numbers "very close" to zero as zero; default is False.
+        Very close is defined by precision: if the precision is 8, e.g.,
+        numbers smaller (in absolute value) than 5e-9 are represented as
+        zero.
+        Defaults to ``numpy.get_printoptions()['suppress']``.
+
+    Returns
+    -------
+    string : str
+      The string representation of an array.
+
+    See Also
+    --------
+    array_str, array2string, set_printoptions
+
+    Examples
+    --------
+    >>> np.array_repr(np.array([1,2]))
+    'array([1, 2])'
+    >>> np.array_repr(np.ma.array([0.]))
+    'MaskedArray([0.])'
+    >>> np.array_repr(np.array([], np.int32))
+    'array([], dtype=int32)'
+
+    >>> x = np.array([1e-6, 4e-7, 2, 3])
+    >>> np.array_repr(x, precision=6, suppress_small=True)
+    'array([0.000001,  0.      ,  2.      ,  3.      ])'
+
+    """
+    return _array_repr_implementation(
+        arr, max_line_width, precision, suppress_small)
+
+
+@_recursive_guard()
+def _guarded_repr_or_str(v):
+    if isinstance(v, bytes):
+        return repr(v)
+    return str(v)
+
+
+def _array_str_implementation(
+        a, max_line_width=None, precision=None, suppress_small=None,
+        array2string=array2string):
+    """Internal version of array_str() that allows overriding array2string."""
+    if (_format_options['legacy'] <= 113 and
+            a.shape == () and not a.dtype.names):
+        return str(a.item())
+
+    # the str of 0d arrays is a special case: It should appear like a scalar,
+    # so floats are not truncated by `precision`, and strings are not wrapped
+    # in quotes. So we return the str of the scalar value.
+    if a.shape == ():
+        # obtain a scalar and call str on it, avoiding problems for subclasses
+        # for which indexing with () returns a 0d instead of a scalar by using
+        # ndarray's getindex. Also guard against recursive 0d object arrays.
+        return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
+
+    return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+
+def _array_str_dispatcher(
+        a, max_line_width=None, precision=None, suppress_small=None):
+    return (a,)
+
+
+@array_function_dispatch(_array_str_dispatcher, module='numpy')
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+    """
+    Return a string representation of the data in an array.
+
+    The data in the array is returned as a single string.  This function is
+    similar to `array_repr`, the difference being that `array_repr` also
+    returns information on the kind of array and its data type.
+
+    Parameters
+    ----------
+    a : ndarray
+        Input array.
+    max_line_width : int, optional
+        Inserts newlines if text is longer than `max_line_width`.
+        Defaults to ``numpy.get_printoptions()['linewidth']``.
+    precision : int, optional
+        Floating point precision.
+        Defaults to ``numpy.get_printoptions()['precision']``.
+    suppress_small : bool, optional
+        Represent numbers "very close" to zero as zero; default is False.
+        Very close is defined by precision: if the precision is 8, e.g.,
+        numbers smaller (in absolute value) than 5e-9 are represented as
+        zero.
+        Defaults to ``numpy.get_printoptions()['suppress']``.
+
+    See Also
+    --------
+    array2string, array_repr, set_printoptions
+
+    Examples
+    --------
+    >>> np.array_str(np.arange(3))
+    '[0 1 2]'
+
+    """
+    return _array_str_implementation(
+        a, max_line_width, precision, suppress_small)
+
+
+# needed if __array_function__ is disabled
+_array2string_impl = getattr(array2string, '__wrapped__', array2string)
+_default_array_str = functools.partial(_array_str_implementation,
+                                       array2string=_array2string_impl)
+_default_array_repr = functools.partial(_array_repr_implementation,
+                                        array2string=_array2string_impl)
+
+
+def set_string_function(f, repr=True):
+    """
+    Set a Python function to be used when pretty printing arrays.
+
+    Parameters
+    ----------
+    f : function or None
+        Function to be used to pretty print arrays. The function should expect
+        a single array argument and return a string of the representation of
+        the array. If None, the function is reset to the default NumPy function
+        to print arrays.
+    repr : bool, optional
+        If True (default), the function for pretty printing (``__repr__``)
+        is set, if False the function that returns the default string
+        representation (``__str__``) is set.
+
+    See Also
+    --------
+    set_printoptions, get_printoptions
+
+    Examples
+    --------
+    >>> def pprint(arr):
+    ...     return 'HA! - What are you going to do now?'
+    ...
+    >>> np.set_string_function(pprint)
+    >>> a = np.arange(10)
+    >>> a
+    HA! - What are you going to do now?
+    >>> _ = a
+    >>> # [0 1 2 3 4 5 6 7 8 9]
+
+    We can reset the function to the default:
+
+    >>> np.set_string_function(None)
+    >>> a
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+    `repr` affects either pretty printing or normal string representation.
+    Note that ``__repr__`` is still affected by setting ``__str__``
+    because the width of each array element in the returned string becomes
+    equal to the length of the result of ``__str__()``.
+
+    >>> x = np.arange(4)
+    >>> np.set_string_function(lambda x:'random', repr=False)
+    >>> x.__str__()
+    'random'
+    >>> x.__repr__()
+    'array([0, 1, 2, 3])'
+
+    """
+    if f is None:
+        if repr:
+            return multiarray.set_string_function(_default_array_repr, 1)
+        else:
+            return multiarray.set_string_function(_default_array_str, 0)
+    else:
+        return multiarray.set_string_function(f, repr)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/arrayprint.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/arrayprint.pyi
new file mode 100644
index 00000000..d8255387
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/arrayprint.pyi
@@ -0,0 +1,142 @@
+from types import TracebackType
+from collections.abc import Callable
+from typing import Any, Literal, TypedDict, SupportsIndex
+
+# Using a private class is by no means ideal, but it is simply a consequence
+# of a `contextlib.context` returning an instance of aforementioned class
+from contextlib import _GeneratorContextManager
+
+from numpy import (
+    ndarray,
+    generic,
+    bool_,
+    integer,
+    timedelta64,
+    datetime64,
+    floating,
+    complexfloating,
+    void,
+    str_,
+    bytes_,
+    longdouble,
+    clongdouble,
+)
+from numpy._typing import ArrayLike, _CharLike_co, _FloatLike_co
+
+_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
+
+class _FormatDict(TypedDict, total=False):
+    bool: Callable[[bool_], str]
+    int: Callable[[integer[Any]], str]
+    timedelta: Callable[[timedelta64], str]
+    datetime: Callable[[datetime64], str]
+    float: Callable[[floating[Any]], str]
+    longfloat: Callable[[longdouble], str]
+    complexfloat: Callable[[complexfloating[Any, Any]], str]
+    longcomplexfloat: Callable[[clongdouble], str]
+    void: Callable[[void], str]
+    numpystr: Callable[[_CharLike_co], str]
+    object: Callable[[object], str]
+    all: Callable[[object], str]
+    int_kind: Callable[[integer[Any]], str]
+    float_kind: Callable[[floating[Any]], str]
+    complex_kind: Callable[[complexfloating[Any, Any]], str]
+    str_kind: Callable[[_CharLike_co], str]
+
+class _FormatOptions(TypedDict):
+    precision: int
+    threshold: int
+    edgeitems: int
+    linewidth: int
+    suppress: bool
+    nanstr: str
+    infstr: str
+    formatter: None | _FormatDict
+    sign: Literal["-", "+", " "]
+    floatmode: _FloatMode
+    legacy: Literal[False, "1.13", "1.21"]
+
+def set_printoptions(
+    precision: None | SupportsIndex = ...,
+    threshold: None | int = ...,
+    edgeitems: None | int = ...,
+    linewidth: None | int = ...,
+    suppress: None | bool = ...,
+    nanstr: None | str = ...,
+    infstr: None | str = ...,
+    formatter: None | _FormatDict = ...,
+    sign: Literal[None, "-", "+", " "] = ...,
+    floatmode: None | _FloatMode = ...,
+    *,
+    legacy: Literal[None, False, "1.13", "1.21"] = ...
+) -> None: ...
+def get_printoptions() -> _FormatOptions: ...
+def array2string(
+    a: ndarray[Any, Any],
+    max_line_width: None | int = ...,
+    precision: None | SupportsIndex = ...,
+    suppress_small: None | bool = ...,
+    separator: str = ...,
+    prefix: str = ...,
+    # NOTE: With the `style` argument being deprecated,
+    # all arguments between `formatter` and `suffix` are de facto
+    # keyworld-only arguments
+    *,
+    formatter: None | _FormatDict = ...,
+    threshold: None | int = ...,
+    edgeitems: None | int = ...,
+    sign: Literal[None, "-", "+", " "] = ...,
+    floatmode: None | _FloatMode = ...,
+    suffix: str = ...,
+    legacy: Literal[None, False, "1.13", "1.21"] = ...,
+) -> str: ...
+def format_float_scientific(
+    x: _FloatLike_co,
+    precision: None | int = ...,
+    unique: bool = ...,
+    trim: Literal["k", ".", "0", "-"] = ...,
+    sign: bool = ...,
+    pad_left: None | int = ...,
+    exp_digits: None | int = ...,
+    min_digits: None | int = ...,
+) -> str: ...
+def format_float_positional(
+    x: _FloatLike_co,
+    precision: None | int = ...,
+    unique: bool = ...,
+    fractional: bool = ...,
+    trim: Literal["k", ".", "0", "-"] = ...,
+    sign: bool = ...,
+    pad_left: None | int = ...,
+    pad_right: None | int = ...,
+    min_digits: None | int = ...,
+) -> str: ...
+def array_repr(
+    arr: ndarray[Any, Any],
+    max_line_width: None | int = ...,
+    precision: None | SupportsIndex = ...,
+    suppress_small: None | bool = ...,
+) -> str: ...
+def array_str(
+    a: ndarray[Any, Any],
+    max_line_width: None | int = ...,
+    precision: None | SupportsIndex = ...,
+    suppress_small: None | bool = ...,
+) -> str: ...
+def set_string_function(
+    f: None | Callable[[ndarray[Any, Any]], str], repr: bool = ...
+) -> None: ...
+def printoptions(
+    precision: None | SupportsIndex = ...,
+    threshold: None | int = ...,
+    edgeitems: None | int = ...,
+    linewidth: None | int = ...,
+    suppress: None | bool = ...,
+    nanstr: None | str = ...,
+    infstr: None | str = ...,
+    formatter: None | _FormatDict = ...,
+    sign: Literal[None, "-", "+", " "] = ...,
+    floatmode: None | _FloatMode = ...,
+    *,
+    legacy: Literal[None, False, "1.13", "1.21"] = ...
+) -> _GeneratorContextManager[_FormatOptions]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/cversions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/cversions.py
new file mode 100644
index 00000000..00159c3a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/cversions.py
@@ -0,0 +1,13 @@
+"""Simple script to compute the api hash of the current API.
+
+The API has is defined by numpy_api_order and ufunc_api_order.
+
+"""
+from os.path import dirname
+
+from code_generators.genapi import fullapi_hash
+from code_generators.numpy_api import full_api
+
+if __name__ == '__main__':
+    curdir = dirname(__file__)
+    print(fullapi_hash(full_api))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/defchararray.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/defchararray.py
new file mode 100644
index 00000000..11c5a30b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/defchararray.py
@@ -0,0 +1,2914 @@
+"""
+This module contains a set of functions for vectorized string
+operations and methods.
+
+.. note::
+   The `chararray` class exists for backwards compatibility with
+   Numarray, it is not recommended for new development. Starting from numpy
+   1.4, if one needs arrays of strings, it is recommended to use arrays of
+   `dtype` `object_`, `bytes_` or `str_`, and use the free functions
+   in the `numpy.char` module for fast vectorized string operations.
+
+Some methods will only be available if the corresponding string method is
+available in your version of Python.
+
+The preferred alias for `defchararray` is `numpy.char`.
+
+"""
+import functools
+
+from .._utils import set_module
+from .numerictypes import (
+    bytes_, str_, integer, int_, object_, bool_, character)
+from .numeric import ndarray, compare_chararrays
+from .numeric import array as narray
+from numpy.core.multiarray import _vec_string
+from numpy.core import overrides
+from numpy.compat import asbytes
+import numpy
+
+__all__ = [
+    'equal', 'not_equal', 'greater_equal', 'less_equal',
+    'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
+    'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
+    'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
+    'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
+    'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
+    'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
+    'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
+    'array', 'asarray'
+    ]
+
+
+_globalvar = 0
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy.char')
+
+
+def _is_unicode(arr):
+    """Returns True if arr is a string or a string array with a dtype that
+    represents a unicode string, otherwise returns False.
+
+    """
+    if (isinstance(arr, str) or
+            issubclass(numpy.asarray(arr).dtype.type, str)):
+        return True
+    return False
+
+
+def _to_bytes_or_str_array(result, output_dtype_like=None):
+    """
+    Helper function to cast a result back into an array
+    with the appropriate dtype if an object array must be used
+    as an intermediary.
+    """
+    ret = numpy.asarray(result.tolist())
+    dtype = getattr(output_dtype_like, 'dtype', None)
+    if dtype is not None:
+        return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False)
+    return ret
+
+
+def _clean_args(*args):
+    """
+    Helper function for delegating arguments to Python string
+    functions.
+
+    Many of the Python string operations that have optional arguments
+    do not use 'None' to indicate a default value.  In these cases,
+    we need to remove all None arguments, and those following them.
+    """
+    newargs = []
+    for chk in args:
+        if chk is None:
+            break
+        newargs.append(chk)
+    return newargs
+
+def _get_num_chars(a):
+    """
+    Helper function that returns the number of characters per field in
+    a string or unicode array.  This is to abstract out the fact that
+    for a unicode array this is itemsize / 4.
+    """
+    if issubclass(a.dtype.type, str_):
+        return a.itemsize // 4
+    return a.itemsize
+
+
+def _binary_op_dispatcher(x1, x2):
+    return (x1, x2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def equal(x1, x2):
+    """
+    Return (x1 == x2) element-wise.
+
+    Unlike `numpy.equal`, this comparison is performed by first
+    stripping whitespace characters from the end of the string.  This
+    behavior is provided for backward-compatibility with numarray.
+
+    Parameters
+    ----------
+    x1, x2 : array_like of str or unicode
+        Input arrays of the same shape.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools.
+
+    See Also
+    --------
+    not_equal, greater_equal, less_equal, greater, less
+    """
+    return compare_chararrays(x1, x2, '==', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def not_equal(x1, x2):
+    """
+    Return (x1 != x2) element-wise.
+
+    Unlike `numpy.not_equal`, this comparison is performed by first
+    stripping whitespace characters from the end of the string.  This
+    behavior is provided for backward-compatibility with numarray.
+
+    Parameters
+    ----------
+    x1, x2 : array_like of str or unicode
+        Input arrays of the same shape.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools.
+
+    See Also
+    --------
+    equal, greater_equal, less_equal, greater, less
+    """
+    return compare_chararrays(x1, x2, '!=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater_equal(x1, x2):
+    """
+    Return (x1 >= x2) element-wise.
+
+    Unlike `numpy.greater_equal`, this comparison is performed by
+    first stripping whitespace characters from the end of the string.
+    This behavior is provided for backward-compatibility with
+    numarray.
+
+    Parameters
+    ----------
+    x1, x2 : array_like of str or unicode
+        Input arrays of the same shape.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools.
+
+    See Also
+    --------
+    equal, not_equal, less_equal, greater, less
+    """
+    return compare_chararrays(x1, x2, '>=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less_equal(x1, x2):
+    """
+    Return (x1 <= x2) element-wise.
+
+    Unlike `numpy.less_equal`, this comparison is performed by first
+    stripping whitespace characters from the end of the string.  This
+    behavior is provided for backward-compatibility with numarray.
+
+    Parameters
+    ----------
+    x1, x2 : array_like of str or unicode
+        Input arrays of the same shape.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools.
+
+    See Also
+    --------
+    equal, not_equal, greater_equal, greater, less
+    """
+    return compare_chararrays(x1, x2, '<=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater(x1, x2):
+    """
+    Return (x1 > x2) element-wise.
+
+    Unlike `numpy.greater`, this comparison is performed by first
+    stripping whitespace characters from the end of the string.  This
+    behavior is provided for backward-compatibility with numarray.
+
+    Parameters
+    ----------
+    x1, x2 : array_like of str or unicode
+        Input arrays of the same shape.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools.
+
+    See Also
+    --------
+    equal, not_equal, greater_equal, less_equal, less
+    """
+    return compare_chararrays(x1, x2, '>', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less(x1, x2):
+    """
+    Return (x1 < x2) element-wise.
+
+    Unlike `numpy.greater`, this comparison is performed by first
+    stripping whitespace characters from the end of the string.  This
+    behavior is provided for backward-compatibility with numarray.
+
+    Parameters
+    ----------
+    x1, x2 : array_like of str or unicode
+        Input arrays of the same shape.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools.
+
+    See Also
+    --------
+    equal, not_equal, greater_equal, less_equal, greater
+    """
+    return compare_chararrays(x1, x2, '<', True)
+
+
+def _unary_op_dispatcher(a):
+    return (a,)
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def str_len(a):
+    """
+    Return len(a) element-wise.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of integers
+
+    See Also
+    --------
+    len
+
+    Examples
+    --------
+    >>> a = np.array(['Grace Hopper Conference', 'Open Source Day'])
+    >>> np.char.str_len(a)
+    array([23, 15])
+    >>> a = np.array([u'\u0420', u'\u043e'])
+    >>> np.char.str_len(a)
+    array([1, 1])
+    >>> a = np.array([['hello', 'world'], [u'\u0420', u'\u043e']])
+    >>> np.char.str_len(a)
+    array([[5, 5], [1, 1]])
+    """
+    # Note: __len__, etc. currently return ints, which are not C-integers.
+    # Generally intp would be expected for lengths, although int is sufficient
+    # due to the dtype itemsize limitation.
+    return _vec_string(a, int_, '__len__')
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def add(x1, x2):
+    """
+    Return element-wise string concatenation for two arrays of str or unicode.
+
+    Arrays `x1` and `x2` must have the same shape.
+
+    Parameters
+    ----------
+    x1 : array_like of str or unicode
+        Input array.
+    x2 : array_like of str or unicode
+        Input array.
+
+    Returns
+    -------
+    add : ndarray
+        Output array of `bytes_` or `str_`, depending on input types
+        of the same shape as `x1` and `x2`.
+
+    """
+    arr1 = numpy.asarray(x1)
+    arr2 = numpy.asarray(x2)
+    out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
+
+    if type(arr1.dtype) != type(arr2.dtype):
+        # Enforce this for now.  The solution to it will be implement add
+        # as a ufunc.  It never worked right on Python 3: bytes + unicode gave
+        # nonsense unicode + bytes errored, and unicode + object used the
+        # object dtype itemsize as num chars (worked on short strings).
+        # bytes + void worked but promoting void->bytes is dubious also.
+        raise TypeError(
+            "np.char.add() requires both arrays of the same dtype kind, but "
+            f"got dtypes: '{arr1.dtype}' and '{arr2.dtype}' (the few cases "
+            "where this used to work often lead to incorrect results).")
+
+    return _vec_string(arr1, type(arr1.dtype)(out_size), '__add__', (arr2,))
+
+def _multiply_dispatcher(a, i):
+    return (a,)
+
+
+@array_function_dispatch(_multiply_dispatcher)
+def multiply(a, i):
+    """
+    Return (a * i), that is string multiple concatenation,
+    element-wise.
+
+    Values in `i` of less than 0 are treated as 0 (which yields an
+    empty string).
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    i : array_like of ints
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input types
+    
+    Examples
+    --------
+    >>> a = np.array(["a", "b", "c"])
+    >>> np.char.multiply(x, 3)
+    array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3])
+    >>> np.char.multiply(a, i)
+    array(['a', 'bb', 'ccc'], dtype='>> np.char.multiply(np.array(['a']), i)
+    array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
+    >>> np.char.multiply(a, 3)
+    array([['aaa', 'bbb', 'ccc'],
+           ['ddd', 'eee', 'fff']], dtype='>> np.char.multiply(a, i)
+    array([['a', 'bb', 'ccc'],
+           ['d', 'ee', 'fff']], dtype='>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
+    array(['a1b2', '1b2a', 'b2a1', '2a1b'],
+        dtype='|S4')
+    >>> np.char.capitalize(c)
+    array(['A1b2', '1b2a', 'B2a1', '2a1b'],
+        dtype='|S4')
+
+    """
+    a_arr = numpy.asarray(a)
+    return _vec_string(a_arr, a_arr.dtype, 'capitalize')
+
+
+def _center_dispatcher(a, width, fillchar=None):
+    return (a,)
+
+
+@array_function_dispatch(_center_dispatcher)
+def center(a, width, fillchar=' '):
+    """
+    Return a copy of `a` with its elements centered in a string of
+    length `width`.
+
+    Calls `str.center` element-wise.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    width : int
+        The length of the resulting strings
+    fillchar : str or unicode, optional
+        The padding character to use (default is space).
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input
+        types
+
+    See Also
+    --------
+    str.center
+    
+    Notes
+    -----
+    This function is intended to work with arrays of strings.  The
+    fill character is not applied to numeric types.
+
+    Examples
+    --------
+    >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c
+    array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.char.center(c, width=9)
+    array(['   a1b2  ', '   1b2a  ', '   b2a1  ', '   2a1b  '], dtype='>> np.char.center(c, width=9, fillchar='*')
+    array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.char.center(c, width=1)
+    array(['a', '1', 'b', '2'], dtype='>> c = np.array(['aAaAaA', '  aA  ', 'abBABba'])
+    >>> c
+    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='>> np.char.count(c, 'A')
+    array([3, 1, 1])
+    >>> np.char.count(c, 'aA')
+    array([3, 1, 0])
+    >>> np.char.count(c, 'A', start=1, end=4)
+    array([2, 1, 1])
+    >>> np.char.count(c, 'A', start=1, end=3)
+    array([1, 0, 0])
+
+    """
+    return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))
+
+
+def _code_dispatcher(a, encoding=None, errors=None):
+    return (a,)
+
+
+@array_function_dispatch(_code_dispatcher)
+def decode(a, encoding=None, errors=None):
+    r"""
+    Calls ``bytes.decode`` element-wise.
+
+    The set of available codecs comes from the Python standard library,
+    and may be extended at runtime.  For more information, see the
+    :mod:`codecs` module.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    encoding : str, optional
+       The name of an encoding
+
+    errors : str, optional
+       Specifies how to handle encoding errors
+
+    Returns
+    -------
+    out : ndarray
+
+    See Also
+    --------
+    :py:meth:`bytes.decode`
+
+    Notes
+    -----
+    The type of the result will depend on the encoding specified.
+
+    Examples
+    --------
+    >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
+    ...               b'\x81\x82\xc2\xc1\xc2\x82\x81'])
+    >>> c
+    array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
+    ...    b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7')
+    >>> np.char.decode(c, encoding='cp037')
+    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='>> s = np.array(['foo', 'bar'])
+    >>> s[0] = 'foo'
+    >>> s[1] = 'bar'
+    >>> s
+    array(['foo', 'bar'], dtype='>> np.char.endswith(s, 'ar')
+    array([False,  True])
+    >>> np.char.endswith(s, 'a', start=1, end=2)
+    array([False,  True])
+
+    """
+    return _vec_string(
+        a, bool_, 'endswith', [suffix, start] + _clean_args(end))
+
+
+def _expandtabs_dispatcher(a, tabsize=None):
+    return (a,)
+
+
+@array_function_dispatch(_expandtabs_dispatcher)
+def expandtabs(a, tabsize=8):
+    """
+    Return a copy of each string element where all tab characters are
+    replaced by one or more spaces.
+
+    Calls `str.expandtabs` element-wise.
+
+    Return a copy of each string element where all tab characters are
+    replaced by one or more spaces, depending on the current column
+    and the given `tabsize`. The column number is reset to zero after
+    each newline occurring in the string. This doesn't understand other
+    non-printing characters or escape sequences.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+        Input array
+    tabsize : int, optional
+        Replace tabs with `tabsize` number of spaces.  If not given defaults
+        to 8 spaces.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input type
+
+    See Also
+    --------
+    str.expandtabs
+
+    """
+    return _to_bytes_or_str_array(
+        _vec_string(a, object_, 'expandtabs', (tabsize,)), a)
+
+
+@array_function_dispatch(_count_dispatcher)
+def find(a, sub, start=0, end=None):
+    """
+    For each element, return the lowest index in the string where
+    substring `sub` is found.
+
+    Calls `str.find` element-wise.
+
+    For each element, return the lowest index in the string where
+    substring `sub` is found, such that `sub` is contained in the
+    range [`start`, `end`].
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    sub : str or unicode
+
+    start, end : int, optional
+        Optional arguments `start` and `end` are interpreted as in
+        slice notation.
+
+    Returns
+    -------
+    out : ndarray or int
+        Output array of ints.  Returns -1 if `sub` is not found.
+
+    See Also
+    --------
+    str.find
+
+    Examples
+    --------
+    >>> a = np.array(["NumPy is a Python library"])
+    >>> np.char.find(a, "Python", start=0, end=None)
+    array([11])
+
+    """
+    return _vec_string(
+        a, int_, 'find', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_count_dispatcher)
+def index(a, sub, start=0, end=None):
+    """
+    Like `find`, but raises `ValueError` when the substring is not found.
+
+    Calls `str.index` element-wise.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    sub : str or unicode
+
+    start, end : int, optional
+
+    Returns
+    -------
+    out : ndarray
+        Output array of ints.  Returns -1 if `sub` is not found.
+
+    See Also
+    --------
+    find, str.find
+
+    Examples
+    --------
+    >>> a = np.array(["Computer Science"])
+    >>> np.char.index(a, "Science", start=0, end=None)
+    array([9])
+
+    """
+    return _vec_string(
+        a, int_, 'index', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isalnum(a):
+    """
+    Returns true for each element if all characters in the string are
+    alphanumeric and there is at least one character, false otherwise.
+
+    Calls `str.isalnum` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input type
+
+    See Also
+    --------
+    str.isalnum
+    """
+    return _vec_string(a, bool_, 'isalnum')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isalpha(a):
+    """
+    Returns true for each element if all characters in the string are
+    alphabetic and there is at least one character, false otherwise.
+
+    Calls `str.isalpha` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools
+
+    See Also
+    --------
+    str.isalpha
+    """
+    return _vec_string(a, bool_, 'isalpha')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isdigit(a):
+    """
+    Returns true for each element if all characters in the string are
+    digits and there is at least one character, false otherwise.
+
+    Calls `str.isdigit` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools
+
+    See Also
+    --------
+    str.isdigit
+
+    Examples
+    --------
+    >>> a = np.array(['a', 'b', '0'])
+    >>> np.char.isdigit(a)
+    array([False, False,  True])
+    >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']])
+    >>> np.char.isdigit(a)
+    array([[False, False,  True], [False,  True,  True]])
+    """
+    return _vec_string(a, bool_, 'isdigit')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def islower(a):
+    """
+    Returns true for each element if all cased characters in the
+    string are lowercase and there is at least one cased character,
+    false otherwise.
+
+    Calls `str.islower` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools
+
+    See Also
+    --------
+    str.islower
+    """
+    return _vec_string(a, bool_, 'islower')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isspace(a):
+    """
+    Returns true for each element if there are only whitespace
+    characters in the string and there is at least one character,
+    false otherwise.
+
+    Calls `str.isspace` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools
+
+    See Also
+    --------
+    str.isspace
+    """
+    return _vec_string(a, bool_, 'isspace')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def istitle(a):
+    """
+    Returns true for each element if the element is a titlecased
+    string and there is at least one character, false otherwise.
+
+    Call `str.istitle` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools
+
+    See Also
+    --------
+    str.istitle
+    """
+    return _vec_string(a, bool_, 'istitle')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isupper(a):
+    """
+    Return true for each element if all cased characters in the
+    string are uppercase and there is at least one character, false
+    otherwise.
+
+    Call `str.isupper` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of bools
+
+    See Also
+    --------
+    str.isupper
+
+    Examples
+    --------
+    >>> str = "GHC"
+    >>> np.char.isupper(str)
+    array(True)     
+    >>> a = np.array(["hello", "HELLO", "Hello"])
+    >>> np.char.isupper(a)
+    array([False,  True, False]) 
+
+    """
+    return _vec_string(a, bool_, 'isupper')
+
+
+def _join_dispatcher(sep, seq):
+    return (sep, seq)
+
+
+@array_function_dispatch(_join_dispatcher)
+def join(sep, seq):
+    """
+    Return a string which is the concatenation of the strings in the
+    sequence `seq`.
+
+    Calls `str.join` element-wise.
+
+    Parameters
+    ----------
+    sep : array_like of str or unicode
+    seq : array_like of str or unicode
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input types
+
+    See Also
+    --------
+    str.join
+
+    Examples
+    --------
+    >>> np.char.join('-', 'osd')
+    array('o-s-d', dtype='>> np.char.join(['-', '.'], ['ghc', 'osd'])
+    array(['g-h-c', 'o.s.d'], dtype='>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
+    array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.char.lower(c)
+    array(['a1b c', '1bca', 'bca1'], dtype='>> c = np.array(['aAaAaA', '  aA  ', 'abBABba'])
+    >>> c
+    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='>> np.char.lstrip(c, 'a')
+    array(['AaAaA', '  aA  ', 'bBABba'], dtype='>> np.char.lstrip(c, 'A') # leaves c unchanged
+    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
+    ... # XXX: is this a regression? This used to return True
+    ... # np.char.lstrip(c,'') does not modify c at all.
+    False
+    >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
+    True
+
+    """
+    a_arr = numpy.asarray(a)
+    return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
+
+
+def _partition_dispatcher(a, sep):
+    return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
+def partition(a, sep):
+    """
+    Partition each element in `a` around `sep`.
+
+    Calls `str.partition` element-wise.
+
+    For each element in `a`, split the element as the first
+    occurrence of `sep`, and return 3 strings containing the part
+    before the separator, the separator itself, and the part after
+    the separator. If the separator is not found, return 3 strings
+    containing the string itself, followed by two empty strings.
+
+    Parameters
+    ----------
+    a : array_like, {str, unicode}
+        Input array
+    sep : {str, unicode}
+        Separator to split each string element in `a`.
+
+    Returns
+    -------
+    out : ndarray, {str, unicode}
+        Output array of str or unicode, depending on input type.
+        The output array will have an extra dimension with 3
+        elements per input element.
+
+    See Also
+    --------
+    str.partition
+
+    """
+    return _to_bytes_or_str_array(
+        _vec_string(a, object_, 'partition', (sep,)), a)
+
+
+def _replace_dispatcher(a, old, new, count=None):
+    return (a,)
+
+
+@array_function_dispatch(_replace_dispatcher)
+def replace(a, old, new, count=None):
+    """
+    For each element in `a`, return a copy of the string with all
+    occurrences of substring `old` replaced by `new`.
+
+    Calls `str.replace` element-wise.
+
+    Parameters
+    ----------
+    a : array-like of str or unicode
+
+    old, new : str or unicode
+
+    count : int, optional
+        If the optional argument `count` is given, only the first
+        `count` occurrences are replaced.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input type
+
+    See Also
+    --------
+    str.replace
+    
+    Examples
+    --------
+    >>> a = np.array(["That is a mango", "Monkeys eat mangos"])
+    >>> np.char.replace(a, 'mango', 'banana')
+    array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array(["The dish is fresh", "This is it"])
+    >>> np.char.replace(a, 'is', 'was')
+    array(['The dwash was fresh', 'Thwas was it'], dtype='>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
+    array(['aAaAaA', 'abBABba'],
+        dtype='|S7')
+    >>> np.char.rstrip(c, b'a')
+    array(['aAaAaA', 'abBABb'],
+        dtype='|S7')
+    >>> np.char.rstrip(c, b'A')
+    array(['aAaAa', 'abBABba'],
+        dtype='|S7')
+
+    """
+    a_arr = numpy.asarray(a)
+    return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(a, sep=None, maxsplit=None):
+    """
+    For each element in `a`, return a list of the words in the
+    string, using `sep` as the delimiter string.
+
+    Calls `str.split` element-wise.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    sep : str or unicode, optional
+       If `sep` is not specified or None, any whitespace string is a
+       separator.
+
+    maxsplit : int, optional
+        If `maxsplit` is given, at most `maxsplit` splits are done.
+
+    Returns
+    -------
+    out : ndarray
+        Array of list objects
+
+    See Also
+    --------
+    str.split, rsplit
+
+    """
+    # This will return an array of lists of different sizes, so we
+    # leave it as an object array
+    return _vec_string(
+        a, object_, 'split', [sep] + _clean_args(maxsplit))
+
+
+def _splitlines_dispatcher(a, keepends=None):
+    return (a,)
+
+
+@array_function_dispatch(_splitlines_dispatcher)
+def splitlines(a, keepends=None):
+    """
+    For each element in `a`, return a list of the lines in the
+    element, breaking at line boundaries.
+
+    Calls `str.splitlines` element-wise.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    keepends : bool, optional
+        Line breaks are not included in the resulting list unless
+        keepends is given and true.
+
+    Returns
+    -------
+    out : ndarray
+        Array of list objects
+
+    See Also
+    --------
+    str.splitlines
+
+    """
+    return _vec_string(
+        a, object_, 'splitlines', _clean_args(keepends))
+
+
+def _startswith_dispatcher(a, prefix, start=None, end=None):
+    return (a,)
+
+
+@array_function_dispatch(_startswith_dispatcher)
+def startswith(a, prefix, start=0, end=None):
+    """
+    Returns a boolean array which is `True` where the string element
+    in `a` starts with `prefix`, otherwise `False`.
+
+    Calls `str.startswith` element-wise.
+
+    Parameters
+    ----------
+    a : array_like of str or unicode
+
+    prefix : str
+
+    start, end : int, optional
+        With optional `start`, test beginning at that position. With
+        optional `end`, stop comparing at that position.
+
+    Returns
+    -------
+    out : ndarray
+        Array of booleans
+
+    See Also
+    --------
+    str.startswith
+
+    """
+    return _vec_string(
+        a, bool_, 'startswith', [prefix, start] + _clean_args(end))
+
+
+@array_function_dispatch(_strip_dispatcher)
+def strip(a, chars=None):
+    """
+    For each element in `a`, return a copy with the leading and
+    trailing characters removed.
+
+    Calls `str.strip` element-wise.
+
+    Parameters
+    ----------
+    a : array-like of str or unicode
+
+    chars : str or unicode, optional
+       The `chars` argument is a string specifying the set of
+       characters to be removed. If omitted or None, the `chars`
+       argument defaults to removing whitespace. The `chars` argument
+       is not a prefix or suffix; rather, all combinations of its
+       values are stripped.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input type
+
+    See Also
+    --------
+    str.strip
+
+    Examples
+    --------
+    >>> c = np.array(['aAaAaA', '  aA  ', 'abBABba'])
+    >>> c
+    array(['aAaAaA', '  aA  ', 'abBABba'], dtype='>> np.char.strip(c)
+    array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
+    array(['AaAaA', '  aA  ', 'bBABb'], dtype='>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
+    array(['aAaAa', '  aA  ', 'abBABba'], dtype='>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
+    array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
+        dtype='|S5')
+    >>> np.char.swapcase(c)
+    array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
+        dtype='|S5')
+
+    """
+    a_arr = numpy.asarray(a)
+    return _vec_string(a_arr, a_arr.dtype, 'swapcase')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def title(a):
+    """
+    Return element-wise title cased version of string or unicode.
+
+    Title case words start with uppercase characters, all remaining cased
+    characters are lowercase.
+
+    Calls `str.title` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like, {str, unicode}
+        Input array.
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input type
+
+    See Also
+    --------
+    str.title
+
+    Examples
+    --------
+    >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
+    array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
+        dtype='|S5')
+    >>> np.char.title(c)
+    array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
+        dtype='|S5')
+
+    """
+    a_arr = numpy.asarray(a)
+    return _vec_string(a_arr, a_arr.dtype, 'title')
+
+
+def _translate_dispatcher(a, table, deletechars=None):
+    return (a,)
+
+
+@array_function_dispatch(_translate_dispatcher)
+def translate(a, table, deletechars=None):
+    """
+    For each element in `a`, return a copy of the string where all
+    characters occurring in the optional argument `deletechars` are
+    removed, and the remaining characters have been mapped through the
+    given translation table.
+
+    Calls `str.translate` element-wise.
+
+    Parameters
+    ----------
+    a : array-like of str or unicode
+
+    table : str of length 256
+
+    deletechars : str
+
+    Returns
+    -------
+    out : ndarray
+        Output array of str or unicode, depending on input type
+
+    See Also
+    --------
+    str.translate
+
+    """
+    a_arr = numpy.asarray(a)
+    if issubclass(a_arr.dtype.type, str_):
+        return _vec_string(
+            a_arr, a_arr.dtype, 'translate', (table,))
+    else:
+        return _vec_string(
+            a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def upper(a):
+    """
+    Return an array with the elements converted to uppercase.
+
+    Calls `str.upper` element-wise.
+
+    For 8-bit strings, this method is locale-dependent.
+
+    Parameters
+    ----------
+    a : array_like, {str, unicode}
+        Input array.
+
+    Returns
+    -------
+    out : ndarray, {str, unicode}
+        Output array of str or unicode, depending on input type
+
+    See Also
+    --------
+    str.upper
+
+    Examples
+    --------
+    >>> c = np.array(['a1b c', '1bca', 'bca1']); c
+    array(['a1b c', '1bca', 'bca1'], dtype='>> np.char.upper(c)
+    array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.char.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII'])
+    array([ True, False, False, False, False])
+
+    """
+    if not _is_unicode(a):
+        raise TypeError("isnumeric is only available for Unicode strings and arrays")
+    return _vec_string(a, bool_, 'isnumeric')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isdecimal(a):
+    """
+    For each element, return True if there are only decimal
+    characters in the element.
+
+    Calls `str.isdecimal` element-wise.
+
+    Decimal characters include digit characters, and all characters
+    that can be used to form decimal-radix numbers,
+    e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
+
+    Parameters
+    ----------
+    a : array_like, unicode
+        Input array.
+
+    Returns
+    -------
+    out : ndarray, bool
+        Array of booleans identical in shape to `a`.
+
+    See Also
+    --------
+    str.isdecimal
+
+    Examples
+    --------
+    >>> np.char.isdecimal(['12345', '4.99', '123ABC', ''])
+    array([ True, False, False, False])
+
+    """ 
+    if not _is_unicode(a):
+        raise TypeError(
+            "isdecimal is only available for Unicode strings and arrays")
+    return _vec_string(a, bool_, 'isdecimal')
+
+
+@set_module('numpy')
+class chararray(ndarray):
+    """
+    chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
+              strides=None, order=None)
+
+    Provides a convenient view on arrays of string and unicode values.
+
+    .. note::
+       The `chararray` class exists for backwards compatibility with
+       Numarray, it is not recommended for new development. Starting from numpy
+       1.4, if one needs arrays of strings, it is recommended to use arrays of
+       `dtype` `object_`, `bytes_` or `str_`, and use the free functions
+       in the `numpy.char` module for fast vectorized string operations.
+
+    Versus a regular NumPy array of type `str` or `unicode`, this
+    class adds the following functionality:
+
+      1) values automatically have whitespace removed from the end
+         when indexed
+
+      2) comparison operators automatically remove whitespace from the
+         end when comparing values
+
+      3) vectorized string operations are provided as methods
+         (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
+
+    chararrays should be created using `numpy.char.array` or
+    `numpy.char.asarray`, rather than this constructor directly.
+
+    This constructor creates the array, using `buffer` (with `offset`
+    and `strides`) if it is not ``None``. If `buffer` is ``None``, then
+    constructs a new array with `strides` in "C order", unless both
+    ``len(shape) >= 2`` and ``order='F'``, in which case `strides`
+    is in "Fortran order".
+
+    Methods
+    -------
+    astype
+    argsort
+    copy
+    count
+    decode
+    dump
+    dumps
+    encode
+    endswith
+    expandtabs
+    fill
+    find
+    flatten
+    getfield
+    index
+    isalnum
+    isalpha
+    isdecimal
+    isdigit
+    islower
+    isnumeric
+    isspace
+    istitle
+    isupper
+    item
+    join
+    ljust
+    lower
+    lstrip
+    nonzero
+    put
+    ravel
+    repeat
+    replace
+    reshape
+    resize
+    rfind
+    rindex
+    rjust
+    rsplit
+    rstrip
+    searchsorted
+    setfield
+    setflags
+    sort
+    split
+    splitlines
+    squeeze
+    startswith
+    strip
+    swapaxes
+    swapcase
+    take
+    title
+    tofile
+    tolist
+    tostring
+    translate
+    transpose
+    upper
+    view
+    zfill
+
+    Parameters
+    ----------
+    shape : tuple
+        Shape of the array.
+    itemsize : int, optional
+        Length of each array element, in number of characters. Default is 1.
+    unicode : bool, optional
+        Are the array elements of type unicode (True) or string (False).
+        Default is False.
+    buffer : object exposing the buffer interface or str, optional
+        Memory address of the start of the array data.  Default is None,
+        in which case a new array is created.
+    offset : int, optional
+        Fixed stride displacement from the beginning of an axis?
+        Default is 0. Needs to be >=0.
+    strides : array_like of ints, optional
+        Strides for the array (see `ndarray.strides` for full description).
+        Default is None.
+    order : {'C', 'F'}, optional
+        The order in which the array data is stored in memory: 'C' ->
+        "row major" order (the default), 'F' -> "column major"
+        (Fortran) order.
+
+    Examples
+    --------
+    >>> charar = np.chararray((3, 3))
+    >>> charar[:] = 'a'
+    >>> charar
+    chararray([[b'a', b'a', b'a'],
+               [b'a', b'a', b'a'],
+               [b'a', b'a', b'a']], dtype='|S1')
+
+    >>> charar = np.chararray(charar.shape, itemsize=5)
+    >>> charar[:] = 'abc'
+    >>> charar
+    chararray([[b'abc', b'abc', b'abc'],
+               [b'abc', b'abc', b'abc'],
+               [b'abc', b'abc', b'abc']], dtype='|S5')
+
+    """
+    def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
+                offset=0, strides=None, order='C'):
+        global _globalvar
+
+        if unicode:
+            dtype = str_
+        else:
+            dtype = bytes_
+
+        # force itemsize to be a Python int, since using NumPy integer
+        # types results in itemsize.itemsize being used as the size of
+        # strings in the new array.
+        itemsize = int(itemsize)
+
+        if isinstance(buffer, str):
+            # unicode objects do not have the buffer interface
+            filler = buffer
+            buffer = None
+        else:
+            filler = None
+
+        _globalvar = 1
+        if buffer is None:
+            self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+                                   order=order)
+        else:
+            self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+                                   buffer=buffer,
+                                   offset=offset, strides=strides,
+                                   order=order)
+        if filler is not None:
+            self[...] = filler
+        _globalvar = 0
+        return self
+
+    def __array_finalize__(self, obj):
+        # The b is a special case because it is used for reconstructing.
+        if not _globalvar and self.dtype.char not in 'SUbc':
+            raise ValueError("Can only create a chararray from string data.")
+
+    def __getitem__(self, obj):
+        val = ndarray.__getitem__(self, obj)
+
+        if isinstance(val, character):
+            temp = val.rstrip()
+            if len(temp) == 0:
+                val = ''
+            else:
+                val = temp
+
+        return val
+
+    # IMPLEMENTATION NOTE: Most of the methods of this class are
+    # direct delegations to the free functions in this module.
+    # However, those that return an array of strings should instead
+    # return a chararray, so some extra wrapping is required.
+
+    def __eq__(self, other):
+        """
+        Return (self == other) element-wise.
+
+        See Also
+        --------
+        equal
+        """
+        return equal(self, other)
+
+    def __ne__(self, other):
+        """
+        Return (self != other) element-wise.
+
+        See Also
+        --------
+        not_equal
+        """
+        return not_equal(self, other)
+
+    def __ge__(self, other):
+        """
+        Return (self >= other) element-wise.
+
+        See Also
+        --------
+        greater_equal
+        """
+        return greater_equal(self, other)
+
+    def __le__(self, other):
+        """
+        Return (self <= other) element-wise.
+
+        See Also
+        --------
+        less_equal
+        """
+        return less_equal(self, other)
+
+    def __gt__(self, other):
+        """
+        Return (self > other) element-wise.
+
+        See Also
+        --------
+        greater
+        """
+        return greater(self, other)
+
+    def __lt__(self, other):
+        """
+        Return (self < other) element-wise.
+
+        See Also
+        --------
+        less
+        """
+        return less(self, other)
+
+    def __add__(self, other):
+        """
+        Return (self + other), that is string concatenation,
+        element-wise for a pair of array_likes of str or unicode.
+
+        See Also
+        --------
+        add
+        """
+        return asarray(add(self, other))
+
+    def __radd__(self, other):
+        """
+        Return (other + self), that is string concatenation,
+        element-wise for a pair of array_likes of `bytes_` or `str_`.
+
+        See Also
+        --------
+        add
+        """
+        return asarray(add(numpy.asarray(other), self))
+
+    def __mul__(self, i):
+        """
+        Return (self * i), that is string multiple concatenation,
+        element-wise.
+
+        See Also
+        --------
+        multiply
+        """
+        return asarray(multiply(self, i))
+
+    def __rmul__(self, i):
+        """
+        Return (self * i), that is string multiple concatenation,
+        element-wise.
+
+        See Also
+        --------
+        multiply
+        """
+        return asarray(multiply(self, i))
+
+    def __mod__(self, i):
+        """
+        Return (self % i), that is pre-Python 2.6 string formatting
+        (interpolation), element-wise for a pair of array_likes of `bytes_`
+        or `str_`.
+
+        See Also
+        --------
+        mod
+        """
+        return asarray(mod(self, i))
+
+    def __rmod__(self, other):
+        return NotImplemented
+
+    def argsort(self, axis=-1, kind=None, order=None):
+        """
+        Return the indices that sort the array lexicographically.
+
+        For full documentation see `numpy.argsort`, for which this method is
+        in fact merely a "thin wrapper."
+
+        Examples
+        --------
+        >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
+        >>> c = c.view(np.chararray); c
+        chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
+              dtype='|S5')
+        >>> c[c.argsort()]
+        chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
+              dtype='|S5')
+
+        """
+        return self.__array__().argsort(axis, kind, order)
+    argsort.__doc__ = ndarray.argsort.__doc__
+
+    def capitalize(self):
+        """
+        Return a copy of `self` with only the first character of each element
+        capitalized.
+
+        See Also
+        --------
+        char.capitalize
+
+        """
+        return asarray(capitalize(self))
+
+    def center(self, width, fillchar=' '):
+        """
+        Return a copy of `self` with its elements centered in a
+        string of length `width`.
+
+        See Also
+        --------
+        center
+        """
+        return asarray(center(self, width, fillchar))
+
+    def count(self, sub, start=0, end=None):
+        """
+        Returns an array with the number of non-overlapping occurrences of
+        substring `sub` in the range [`start`, `end`].
+
+        See Also
+        --------
+        char.count
+
+        """
+        return count(self, sub, start, end)
+
+    def decode(self, encoding=None, errors=None):
+        """
+        Calls ``bytes.decode`` element-wise.
+
+        See Also
+        --------
+        char.decode
+
+        """
+        return decode(self, encoding, errors)
+
+    def encode(self, encoding=None, errors=None):
+        """
+        Calls `str.encode` element-wise.
+
+        See Also
+        --------
+        char.encode
+
+        """
+        return encode(self, encoding, errors)
+
+    def endswith(self, suffix, start=0, end=None):
+        """
+        Returns a boolean array which is `True` where the string element
+        in `self` ends with `suffix`, otherwise `False`.
+
+        See Also
+        --------
+        char.endswith
+
+        """
+        return endswith(self, suffix, start, end)
+
+    def expandtabs(self, tabsize=8):
+        """
+        Return a copy of each string element where all tab characters are
+        replaced by one or more spaces.
+
+        See Also
+        --------
+        char.expandtabs
+
+        """
+        return asarray(expandtabs(self, tabsize))
+
+    def find(self, sub, start=0, end=None):
+        """
+        For each element, return the lowest index in the string where
+        substring `sub` is found.
+
+        See Also
+        --------
+        char.find
+
+        """
+        return find(self, sub, start, end)
+
+    def index(self, sub, start=0, end=None):
+        """
+        Like `find`, but raises `ValueError` when the substring is not found.
+
+        See Also
+        --------
+        char.index
+
+        """
+        return index(self, sub, start, end)
+
+    def isalnum(self):
+        """
+        Returns true for each element if all characters in the string
+        are alphanumeric and there is at least one character, false
+        otherwise.
+
+        See Also
+        --------
+        char.isalnum
+
+        """
+        return isalnum(self)
+
+    def isalpha(self):
+        """
+        Returns true for each element if all characters in the string
+        are alphabetic and there is at least one character, false
+        otherwise.
+
+        See Also
+        --------
+        char.isalpha
+
+        """
+        return isalpha(self)
+
+    def isdigit(self):
+        """
+        Returns true for each element if all characters in the string are
+        digits and there is at least one character, false otherwise.
+
+        See Also
+        --------
+        char.isdigit
+
+        """
+        return isdigit(self)
+
+    def islower(self):
+        """
+        Returns true for each element if all cased characters in the
+        string are lowercase and there is at least one cased character,
+        false otherwise.
+
+        See Also
+        --------
+        char.islower
+
+        """
+        return islower(self)
+
+    def isspace(self):
+        """
+        Returns true for each element if there are only whitespace
+        characters in the string and there is at least one character,
+        false otherwise.
+
+        See Also
+        --------
+        char.isspace
+
+        """
+        return isspace(self)
+
+    def istitle(self):
+        """
+        Returns true for each element if the element is a titlecased
+        string and there is at least one character, false otherwise.
+
+        See Also
+        --------
+        char.istitle
+
+        """
+        return istitle(self)
+
+    def isupper(self):
+        """
+        Returns true for each element if all cased characters in the
+        string are uppercase and there is at least one character, false
+        otherwise.
+
+        See Also
+        --------
+        char.isupper
+
+        """
+        return isupper(self)
+
+    def join(self, seq):
+        """
+        Return a string which is the concatenation of the strings in the
+        sequence `seq`.
+
+        See Also
+        --------
+        char.join
+
+        """
+        return join(self, seq)
+
+    def ljust(self, width, fillchar=' '):
+        """
+        Return an array with the elements of `self` left-justified in a
+        string of length `width`.
+
+        See Also
+        --------
+        char.ljust
+
+        """
+        return asarray(ljust(self, width, fillchar))
+
+    def lower(self):
+        """
+        Return an array with the elements of `self` converted to
+        lowercase.
+
+        See Also
+        --------
+        char.lower
+
+        """
+        return asarray(lower(self))
+
+    def lstrip(self, chars=None):
+        """
+        For each element in `self`, return a copy with the leading characters
+        removed.
+
+        See Also
+        --------
+        char.lstrip
+
+        """
+        return asarray(lstrip(self, chars))
+
+    def partition(self, sep):
+        """
+        Partition each element in `self` around `sep`.
+
+        See Also
+        --------
+        partition
+        """
+        return asarray(partition(self, sep))
+
+    def replace(self, old, new, count=None):
+        """
+        For each element in `self`, return a copy of the string with all
+        occurrences of substring `old` replaced by `new`.
+
+        See Also
+        --------
+        char.replace
+
+        """
+        return asarray(replace(self, old, new, count))
+
+    def rfind(self, sub, start=0, end=None):
+        """
+        For each element in `self`, return the highest index in the string
+        where substring `sub` is found, such that `sub` is contained
+        within [`start`, `end`].
+
+        See Also
+        --------
+        char.rfind
+
+        """
+        return rfind(self, sub, start, end)
+
+    def rindex(self, sub, start=0, end=None):
+        """
+        Like `rfind`, but raises `ValueError` when the substring `sub` is
+        not found.
+
+        See Also
+        --------
+        char.rindex
+
+        """
+        return rindex(self, sub, start, end)
+
+    def rjust(self, width, fillchar=' '):
+        """
+        Return an array with the elements of `self`
+        right-justified in a string of length `width`.
+
+        See Also
+        --------
+        char.rjust
+
+        """
+        return asarray(rjust(self, width, fillchar))
+
+    def rpartition(self, sep):
+        """
+        Partition each element in `self` around `sep`.
+
+        See Also
+        --------
+        rpartition
+        """
+        return asarray(rpartition(self, sep))
+
+    def rsplit(self, sep=None, maxsplit=None):
+        """
+        For each element in `self`, return a list of the words in
+        the string, using `sep` as the delimiter string.
+
+        See Also
+        --------
+        char.rsplit
+
+        """
+        return rsplit(self, sep, maxsplit)
+
+    def rstrip(self, chars=None):
+        """
+        For each element in `self`, return a copy with the trailing
+        characters removed.
+
+        See Also
+        --------
+        char.rstrip
+
+        """
+        return asarray(rstrip(self, chars))
+
+    def split(self, sep=None, maxsplit=None):
+        """
+        For each element in `self`, return a list of the words in the
+        string, using `sep` as the delimiter string.
+
+        See Also
+        --------
+        char.split
+
+        """
+        return split(self, sep, maxsplit)
+
+    def splitlines(self, keepends=None):
+        """
+        For each element in `self`, return a list of the lines in the
+        element, breaking at line boundaries.
+
+        See Also
+        --------
+        char.splitlines
+
+        """
+        return splitlines(self, keepends)
+
+    def startswith(self, prefix, start=0, end=None):
+        """
+        Returns a boolean array which is `True` where the string element
+        in `self` starts with `prefix`, otherwise `False`.
+
+        See Also
+        --------
+        char.startswith
+
+        """
+        return startswith(self, prefix, start, end)
+
+    def strip(self, chars=None):
+        """
+        For each element in `self`, return a copy with the leading and
+        trailing characters removed.
+
+        See Also
+        --------
+        char.strip
+
+        """
+        return asarray(strip(self, chars))
+
+    def swapcase(self):
+        """
+        For each element in `self`, return a copy of the string with
+        uppercase characters converted to lowercase and vice versa.
+
+        See Also
+        --------
+        char.swapcase
+
+        """
+        return asarray(swapcase(self))
+
+    def title(self):
+        """
+        For each element in `self`, return a titlecased version of the
+        string: words start with uppercase characters, all remaining cased
+        characters are lowercase.
+
+        See Also
+        --------
+        char.title
+
+        """
+        return asarray(title(self))
+
+    def translate(self, table, deletechars=None):
+        """
+        For each element in `self`, return a copy of the string where
+        all characters occurring in the optional argument
+        `deletechars` are removed, and the remaining characters have
+        been mapped through the given translation table.
+
+        See Also
+        --------
+        char.translate
+
+        """
+        return asarray(translate(self, table, deletechars))
+
+    def upper(self):
+        """
+        Return an array with the elements of `self` converted to
+        uppercase.
+
+        See Also
+        --------
+        char.upper
+
+        """
+        return asarray(upper(self))
+
+    def zfill(self, width):
+        """
+        Return the numeric string left-filled with zeros in a string of
+        length `width`.
+
+        See Also
+        --------
+        char.zfill
+
+        """
+        return asarray(zfill(self, width))
+
+    def isnumeric(self):
+        """
+        For each element in `self`, return True if there are only
+        numeric characters in the element.
+
+        See Also
+        --------
+        char.isnumeric
+
+        """
+        return isnumeric(self)
+
+    def isdecimal(self):
+        """
+        For each element in `self`, return True if there are only
+        decimal characters in the element.
+
+        See Also
+        --------
+        char.isdecimal
+
+        """
+        return isdecimal(self)
+
+
+@set_module("numpy.char")
+def array(obj, itemsize=None, copy=True, unicode=None, order=None):
+    """
+    Create a `chararray`.
+
+    .. note::
+       This class is provided for numarray backward-compatibility.
+       New code (not concerned with numarray compatibility) should use
+       arrays of type `bytes_` or `str_` and use the free functions
+       in :mod:`numpy.char ` for fast
+       vectorized string operations instead.
+
+    Versus a regular NumPy array of type `str` or `unicode`, this
+    class adds the following functionality:
+
+      1) values automatically have whitespace removed from the end
+         when indexed
+
+      2) comparison operators automatically remove whitespace from the
+         end when comparing values
+
+      3) vectorized string operations are provided as methods
+         (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
+
+    Parameters
+    ----------
+    obj : array of str or unicode-like
+
+    itemsize : int, optional
+        `itemsize` is the number of characters per scalar in the
+        resulting array.  If `itemsize` is None, and `obj` is an
+        object array or a Python list, the `itemsize` will be
+        automatically determined.  If `itemsize` is provided and `obj`
+        is of type str or unicode, then the `obj` string will be
+        chunked into `itemsize` pieces.
+
+    copy : bool, optional
+        If true (default), then the object is copied.  Otherwise, a copy
+        will only be made if __array__ returns a copy, if obj is a
+        nested sequence, or if a copy is needed to satisfy any of the other
+        requirements (`itemsize`, unicode, `order`, etc.).
+
+    unicode : bool, optional
+        When true, the resulting `chararray` can contain Unicode
+        characters, when false only 8-bit characters.  If unicode is
+        None and `obj` is one of the following:
+
+          - a `chararray`,
+          - an ndarray of type `str` or `unicode`
+          - a Python str or unicode object,
+
+        then the unicode setting of the output array will be
+        automatically determined.
+
+    order : {'C', 'F', 'A'}, optional
+        Specify the order of the array.  If order is 'C' (default), then the
+        array will be in C-contiguous order (last-index varies the
+        fastest).  If order is 'F', then the returned array
+        will be in Fortran-contiguous order (first-index varies the
+        fastest).  If order is 'A', then the returned array may
+        be in any order (either C-, Fortran-contiguous, or even
+        discontiguous).
+    """
+    if isinstance(obj, (bytes, str)):
+        if unicode is None:
+            if isinstance(obj, str):
+                unicode = True
+            else:
+                unicode = False
+
+        if itemsize is None:
+            itemsize = len(obj)
+        shape = len(obj) // itemsize
+
+        return chararray(shape, itemsize=itemsize, unicode=unicode,
+                         buffer=obj, order=order)
+
+    if isinstance(obj, (list, tuple)):
+        obj = numpy.asarray(obj)
+
+    if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
+        # If we just have a vanilla chararray, create a chararray
+        # view around it.
+        if not isinstance(obj, chararray):
+            obj = obj.view(chararray)
+
+        if itemsize is None:
+            itemsize = obj.itemsize
+            # itemsize is in 8-bit chars, so for Unicode, we need
+            # to divide by the size of a single Unicode character,
+            # which for NumPy is always 4
+            if issubclass(obj.dtype.type, str_):
+                itemsize //= 4
+
+        if unicode is None:
+            if issubclass(obj.dtype.type, str_):
+                unicode = True
+            else:
+                unicode = False
+
+        if unicode:
+            dtype = str_
+        else:
+            dtype = bytes_
+
+        if order is not None:
+            obj = numpy.asarray(obj, order=order)
+        if (copy or
+                (itemsize != obj.itemsize) or
+                (not unicode and isinstance(obj, str_)) or
+                (unicode and isinstance(obj, bytes_))):
+            obj = obj.astype((dtype, int(itemsize)))
+        return obj
+
+    if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
+        if itemsize is None:
+            # Since no itemsize was specified, convert the input array to
+            # a list so the ndarray constructor will automatically
+            # determine the itemsize for us.
+            obj = obj.tolist()
+            # Fall through to the default case
+
+    if unicode:
+        dtype = str_
+    else:
+        dtype = bytes_
+
+    if itemsize is None:
+        val = narray(obj, dtype=dtype, order=order, subok=True)
+    else:
+        val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
+    return val.view(chararray)
+
+
+@set_module("numpy.char")
+def asarray(obj, itemsize=None, unicode=None, order=None):
+    """
+    Convert the input to a `chararray`, copying the data only if
+    necessary.
+
+    Versus a regular NumPy array of type `str` or `unicode`, this
+    class adds the following functionality:
+
+      1) values automatically have whitespace removed from the end
+         when indexed
+
+      2) comparison operators automatically remove whitespace from the
+         end when comparing values
+
+      3) vectorized string operations are provided as methods
+         (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
+
+    Parameters
+    ----------
+    obj : array of str or unicode-like
+
+    itemsize : int, optional
+        `itemsize` is the number of characters per scalar in the
+        resulting array.  If `itemsize` is None, and `obj` is an
+        object array or a Python list, the `itemsize` will be
+        automatically determined.  If `itemsize` is provided and `obj`
+        is of type str or unicode, then the `obj` string will be
+        chunked into `itemsize` pieces.
+
+    unicode : bool, optional
+        When true, the resulting `chararray` can contain Unicode
+        characters, when false only 8-bit characters.  If unicode is
+        None and `obj` is one of the following:
+
+          - a `chararray`,
+          - an ndarray of type `str` or 'unicode`
+          - a Python str or unicode object,
+
+        then the unicode setting of the output array will be
+        automatically determined.
+
+    order : {'C', 'F'}, optional
+        Specify the order of the array.  If order is 'C' (default), then the
+        array will be in C-contiguous order (last-index varies the
+        fastest).  If order is 'F', then the returned array
+        will be in Fortran-contiguous order (first-index varies the
+        fastest).
+    """
+    return array(obj, itemsize, copy=False,
+                 unicode=unicode, order=order)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/defchararray.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/defchararray.pyi
new file mode 100644
index 00000000..73d90bb2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/defchararray.pyi
@@ -0,0 +1,421 @@
+from typing import (
+    Literal as L,
+    overload,
+    TypeVar,
+    Any,
+)
+
+from numpy import (
+    chararray as chararray,
+    dtype,
+    str_,
+    bytes_,
+    int_,
+    bool_,
+    object_,
+    _OrderKACF,
+)
+
+from numpy._typing import (
+    NDArray,
+    _ArrayLikeStr_co as U_co,
+    _ArrayLikeBytes_co as S_co,
+    _ArrayLikeInt_co as i_co,
+    _ArrayLikeBool_co as b_co,
+)
+
+from numpy.core.multiarray import compare_chararrays as compare_chararrays
+
+_SCT = TypeVar("_SCT", str_, bytes_)
+_CharArray = chararray[Any, dtype[_SCT]]
+
+__all__: list[str]
+
+# Comparison
+@overload
+def equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def not_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def not_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def greater_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def greater_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def less_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def less_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def greater(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def greater(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def less(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def less(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+# String operations
+@overload
+def add(x1: U_co, x2: U_co) -> NDArray[str_]: ...
+@overload
+def add(x1: S_co, x2: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def multiply(a: U_co, i: i_co) -> NDArray[str_]: ...
+@overload
+def multiply(a: S_co, i: i_co) -> NDArray[bytes_]: ...
+
+@overload
+def mod(a: U_co, value: Any) -> NDArray[str_]: ...
+@overload
+def mod(a: S_co, value: Any) -> NDArray[bytes_]: ...
+
+@overload
+def capitalize(a: U_co) -> NDArray[str_]: ...
+@overload
+def capitalize(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
+@overload
+def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
+
+def decode(
+    a: S_co,
+    encoding: None | str = ...,
+    errors: None | str = ...,
+) -> NDArray[str_]: ...
+
+def encode(
+    a: U_co,
+    encoding: None | str = ...,
+    errors: None | str = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ...
+@overload
+def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def join(sep: U_co, seq: U_co) -> NDArray[str_]: ...
+@overload
+def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
+@overload
+def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def lower(a: U_co) -> NDArray[str_]: ...
+@overload
+def lower(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
+@overload
+def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def partition(a: U_co, sep: U_co) -> NDArray[str_]: ...
+@overload
+def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def replace(
+    a: U_co,
+    old: U_co,
+    new: U_co,
+    count: None | i_co = ...,
+) -> NDArray[str_]: ...
+@overload
+def replace(
+    a: S_co,
+    old: S_co,
+    new: S_co,
+    count: None | i_co = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def rjust(
+    a: U_co,
+    width: i_co,
+    fillchar: U_co = ...,
+) -> NDArray[str_]: ...
+@overload
+def rjust(
+    a: S_co,
+    width: i_co,
+    fillchar: S_co = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ...
+@overload
+def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def rsplit(
+    a: U_co,
+    sep: None | U_co = ...,
+    maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+@overload
+def rsplit(
+    a: S_co,
+    sep: None | S_co = ...,
+    maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
+@overload
+def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def split(
+    a: U_co,
+    sep: None | U_co = ...,
+    maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+@overload
+def split(
+    a: S_co,
+    sep: None | S_co = ...,
+    maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]: ...
+@overload
+def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]: ...
+
+@overload
+def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
+@overload
+def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def swapcase(a: U_co) -> NDArray[str_]: ...
+@overload
+def swapcase(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def title(a: U_co) -> NDArray[str_]: ...
+@overload
+def title(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def translate(
+    a: U_co,
+    table: U_co,
+    deletechars: None | U_co = ...,
+) -> NDArray[str_]: ...
+@overload
+def translate(
+    a: S_co,
+    table: S_co,
+    deletechars: None | S_co = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def upper(a: U_co) -> NDArray[str_]: ...
+@overload
+def upper(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def zfill(a: U_co, width: i_co) -> NDArray[str_]: ...
+@overload
+def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ...
+
+# String information
+@overload
+def count(
+    a: U_co,
+    sub: U_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def count(
+    a: S_co,
+    sub: S_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def endswith(
+    a: U_co,
+    suffix: U_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+@overload
+def endswith(
+    a: S_co,
+    suffix: S_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+
+@overload
+def find(
+    a: U_co,
+    sub: U_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def find(
+    a: S_co,
+    sub: S_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def index(
+    a: U_co,
+    sub: U_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def index(
+    a: S_co,
+    sub: S_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+def isalpha(a: U_co | S_co) -> NDArray[bool_]: ...
+def isalnum(a: U_co | S_co) -> NDArray[bool_]: ...
+def isdecimal(a: U_co | S_co) -> NDArray[bool_]: ...
+def isdigit(a: U_co | S_co) -> NDArray[bool_]: ...
+def islower(a: U_co | S_co) -> NDArray[bool_]: ...
+def isnumeric(a: U_co | S_co) -> NDArray[bool_]: ...
+def isspace(a: U_co | S_co) -> NDArray[bool_]: ...
+def istitle(a: U_co | S_co) -> NDArray[bool_]: ...
+def isupper(a: U_co | S_co) -> NDArray[bool_]: ...
+
+@overload
+def rfind(
+    a: U_co,
+    sub: U_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def rfind(
+    a: S_co,
+    sub: S_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def rindex(
+    a: U_co,
+    sub: U_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def rindex(
+    a: S_co,
+    sub: S_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def startswith(
+    a: U_co,
+    prefix: U_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+@overload
+def startswith(
+    a: S_co,
+    prefix: S_co,
+    start: i_co = ...,
+    end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+
+def str_len(A: U_co | S_co) -> NDArray[int_]: ...
+
+# Overload 1 and 2: str- or bytes-based array-likes
+# overload 3: arbitrary object with unicode=False  (-> bytes_)
+# overload 4: arbitrary object with unicode=True  (-> str_)
+@overload
+def array(
+    obj: U_co,
+    itemsize: None | int = ...,
+    copy: bool = ...,
+    unicode: L[False] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def array(
+    obj: S_co,
+    itemsize: None | int = ...,
+    copy: bool = ...,
+    unicode: L[False] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def array(
+    obj: object,
+    itemsize: None | int = ...,
+    copy: bool = ...,
+    unicode: L[False] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def array(
+    obj: object,
+    itemsize: None | int = ...,
+    copy: bool = ...,
+    unicode: L[True] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+
+@overload
+def asarray(
+    obj: U_co,
+    itemsize: None | int = ...,
+    unicode: L[False] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def asarray(
+    obj: S_co,
+    itemsize: None | int = ...,
+    unicode: L[False] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def asarray(
+    obj: object,
+    itemsize: None | int = ...,
+    unicode: L[False] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def asarray(
+    obj: object,
+    itemsize: None | int = ...,
+    unicode: L[True] = ...,
+    order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/einsumfunc.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/einsumfunc.py
new file mode 100644
index 00000000..01966f0f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/einsumfunc.py
@@ -0,0 +1,1443 @@
+"""
+Implementation of optimized einsum.
+
+"""
+import itertools
+import operator
+
+from numpy.core.multiarray import c_einsum
+from numpy.core.numeric import asanyarray, tensordot
+from numpy.core.overrides import array_function_dispatch
+
+__all__ = ['einsum', 'einsum_path']
+
+einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+einsum_symbols_set = set(einsum_symbols)
+
+
+def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
+    """
+    Computes the number of FLOPS in the contraction.
+
+    Parameters
+    ----------
+    idx_contraction : iterable
+        The indices involved in the contraction
+    inner : bool
+        Does this contraction require an inner product?
+    num_terms : int
+        The number of terms in a contraction
+    size_dictionary : dict
+        The size of each of the indices in idx_contraction
+
+    Returns
+    -------
+    flop_count : int
+        The total number of FLOPS required for the contraction.
+
+    Examples
+    --------
+
+    >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
+    30
+
+    >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
+    60
+
+    """
+
+    overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
+    op_factor = max(1, num_terms - 1)
+    if inner:
+        op_factor += 1
+
+    return overall_size * op_factor
+
+def _compute_size_by_dict(indices, idx_dict):
+    """
+    Computes the product of the elements in indices based on the dictionary
+    idx_dict.
+
+    Parameters
+    ----------
+    indices : iterable
+        Indices to base the product on.
+    idx_dict : dictionary
+        Dictionary of index sizes
+
+    Returns
+    -------
+    ret : int
+        The resulting product.
+
+    Examples
+    --------
+    >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
+    90
+
+    """
+    ret = 1
+    for i in indices:
+        ret *= idx_dict[i]
+    return ret
+
+
+def _find_contraction(positions, input_sets, output_set):
+    """
+    Finds the contraction for a given set of input and output sets.
+
+    Parameters
+    ----------
+    positions : iterable
+        Integer positions of terms used in the contraction.
+    input_sets : list
+        List of sets that represent the lhs side of the einsum subscript
+    output_set : set
+        Set that represents the rhs side of the overall einsum subscript
+
+    Returns
+    -------
+    new_result : set
+        The indices of the resulting contraction
+    remaining : list
+        List of sets that have not been contracted, the new set is appended to
+        the end of this list
+    idx_removed : set
+        Indices removed from the entire contraction
+    idx_contraction : set
+        The indices used in the current contraction
+
+    Examples
+    --------
+
+    # A simple dot product test case
+    >>> pos = (0, 1)
+    >>> isets = [set('ab'), set('bc')]
+    >>> oset = set('ac')
+    >>> _find_contraction(pos, isets, oset)
+    ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
+
+    # A more complex case with additional terms in the contraction
+    >>> pos = (0, 2)
+    >>> isets = [set('abd'), set('ac'), set('bdc')]
+    >>> oset = set('ac')
+    >>> _find_contraction(pos, isets, oset)
+    ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
+    """
+
+    idx_contract = set()
+    idx_remain = output_set.copy()
+    remaining = []
+    for ind, value in enumerate(input_sets):
+        if ind in positions:
+            idx_contract |= value
+        else:
+            remaining.append(value)
+            idx_remain |= value
+
+    new_result = idx_remain & idx_contract
+    idx_removed = (idx_contract - new_result)
+    remaining.append(new_result)
+
+    return (new_result, remaining, idx_removed, idx_contract)
+
+
+def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
+    """
+    Computes all possible pair contractions, sieves the results based
+    on ``memory_limit`` and returns the lowest cost path. This algorithm
+    scales factorial with respect to the elements in the list ``input_sets``.
+
+    Parameters
+    ----------
+    input_sets : list
+        List of sets that represent the lhs side of the einsum subscript
+    output_set : set
+        Set that represents the rhs side of the overall einsum subscript
+    idx_dict : dictionary
+        Dictionary of index sizes
+    memory_limit : int
+        The maximum number of elements in a temporary array
+
+    Returns
+    -------
+    path : list
+        The optimal contraction order within the memory limit constraint.
+
+    Examples
+    --------
+    >>> isets = [set('abd'), set('ac'), set('bdc')]
+    >>> oset = set()
+    >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+    >>> _optimal_path(isets, oset, idx_sizes, 5000)
+    [(0, 2), (0, 1)]
+    """
+
+    full_results = [(0, [], input_sets)]
+    for iteration in range(len(input_sets) - 1):
+        iter_results = []
+
+        # Compute all unique pairs
+        for curr in full_results:
+            cost, positions, remaining = curr
+            for con in itertools.combinations(range(len(input_sets) - iteration), 2):
+
+                # Find the contraction
+                cont = _find_contraction(con, remaining, output_set)
+                new_result, new_input_sets, idx_removed, idx_contract = cont
+
+                # Sieve the results based on memory_limit
+                new_size = _compute_size_by_dict(new_result, idx_dict)
+                if new_size > memory_limit:
+                    continue
+
+                # Build (total_cost, positions, indices_remaining)
+                total_cost =  cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
+                new_pos = positions + [con]
+                iter_results.append((total_cost, new_pos, new_input_sets))
+
+        # Update combinatorial list, if we did not find anything return best
+        # path + remaining contractions
+        if iter_results:
+            full_results = iter_results
+        else:
+            path = min(full_results, key=lambda x: x[0])[1]
+            path += [tuple(range(len(input_sets) - iteration))]
+            return path
+
+    # If we have not found anything return single einsum contraction
+    if len(full_results) == 0:
+        return [tuple(range(len(input_sets)))]
+
+    path = min(full_results, key=lambda x: x[0])[1]
+    return path
+
+def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
+    """Compute the cost (removed size + flops) and resultant indices for
+    performing the contraction specified by ``positions``.
+
+    Parameters
+    ----------
+    positions : tuple of int
+        The locations of the proposed tensors to contract.
+    input_sets : list of sets
+        The indices found on each tensors.
+    output_set : set
+        The output indices of the expression.
+    idx_dict : dict
+        Mapping of each index to its size.
+    memory_limit : int
+        The total allowed size for an intermediary tensor.
+    path_cost : int
+        The contraction cost so far.
+    naive_cost : int
+        The cost of the unoptimized expression.
+
+    Returns
+    -------
+    cost : (int, int)
+        A tuple containing the size of any indices removed, and the flop cost.
+    positions : tuple of int
+        The locations of the proposed tensors to contract.
+    new_input_sets : list of sets
+        The resulting new list of indices if this proposed contraction is performed.
+
+    """
+
+    # Find the contraction
+    contract = _find_contraction(positions, input_sets, output_set)
+    idx_result, new_input_sets, idx_removed, idx_contract = contract
+
+    # Sieve the results based on memory_limit
+    new_size = _compute_size_by_dict(idx_result, idx_dict)
+    if new_size > memory_limit:
+        return None
+
+    # Build sort tuple
+    old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
+    removed_size = sum(old_sizes) - new_size
+
+    # NB: removed_size used to be just the size of any removed indices i.e.:
+    #     helpers.compute_size_by_dict(idx_removed, idx_dict)
+    cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
+    sort = (-removed_size, cost)
+
+    # Sieve based on total cost as well
+    if (path_cost + cost) > naive_cost:
+        return None
+
+    # Add contraction to possible choices
+    return [sort, positions, new_input_sets]
+
+
+def _update_other_results(results, best):
+    """Update the positions and provisional input_sets of ``results`` based on
+    performing the contraction result ``best``. Remove any involving the tensors
+    contracted.
+
+    Parameters
+    ----------
+    results : list
+        List of contraction results produced by ``_parse_possible_contraction``.
+    best : list
+        The best contraction of ``results`` i.e. the one that will be performed.
+
+    Returns
+    -------
+    mod_results : list
+        The list of modified results, updated with outcome of ``best`` contraction.
+    """
+
+    best_con = best[1]
+    bx, by = best_con
+    mod_results = []
+
+    for cost, (x, y), con_sets in results:
+
+        # Ignore results involving tensors just contracted
+        if x in best_con or y in best_con:
+            continue
+
+        # Update the input_sets
+        del con_sets[by - int(by > x) - int(by > y)]
+        del con_sets[bx - int(bx > x) - int(bx > y)]
+        con_sets.insert(-1, best[2][-1])
+
+        # Update the position indices
+        mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
+        mod_results.append((cost, mod_con, con_sets))
+
+    return mod_results
+
+def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
+    """
+    Finds the path by contracting the best pair until the input list is
+    exhausted. The best pair is found by minimizing the tuple
+    ``(-prod(indices_removed), cost)``.  What this amounts to is prioritizing
+    matrix multiplication or inner product operations, then Hadamard like
+    operations, and finally outer operations. Outer products are limited by
+    ``memory_limit``. This algorithm scales cubically with respect to the
+    number of elements in the list ``input_sets``.
+
+    Parameters
+    ----------
+    input_sets : list
+        List of sets that represent the lhs side of the einsum subscript
+    output_set : set
+        Set that represents the rhs side of the overall einsum subscript
+    idx_dict : dictionary
+        Dictionary of index sizes
+    memory_limit : int
+        The maximum number of elements in a temporary array
+
+    Returns
+    -------
+    path : list
+        The greedy contraction order within the memory limit constraint.
+
+    Examples
+    --------
+    >>> isets = [set('abd'), set('ac'), set('bdc')]
+    >>> oset = set()
+    >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+    >>> _greedy_path(isets, oset, idx_sizes, 5000)
+    [(0, 2), (0, 1)]
+    """
+
+    # Handle trivial cases that leaked through
+    if len(input_sets) == 1:
+        return [(0,)]
+    elif len(input_sets) == 2:
+        return [(0, 1)]
+
+    # Build up a naive cost
+    contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
+    idx_result, new_input_sets, idx_removed, idx_contract = contract
+    naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
+
+    # Initially iterate over all pairs
+    comb_iter = itertools.combinations(range(len(input_sets)), 2)
+    known_contractions = []
+
+    path_cost = 0
+    path = []
+
+    for iteration in range(len(input_sets) - 1):
+
+        # Iterate over all pairs on first step, only previously found pairs on subsequent steps
+        for positions in comb_iter:
+
+            # Always initially ignore outer products
+            if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
+                continue
+
+            result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
+                                                 naive_cost)
+            if result is not None:
+                known_contractions.append(result)
+
+        # If we do not have a inner contraction, rescan pairs including outer products
+        if len(known_contractions) == 0:
+
+            # Then check the outer products
+            for positions in itertools.combinations(range(len(input_sets)), 2):
+                result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
+                                                     path_cost, naive_cost)
+                if result is not None:
+                    known_contractions.append(result)
+
+            # If we still did not find any remaining contractions, default back to einsum like behavior
+            if len(known_contractions) == 0:
+                path.append(tuple(range(len(input_sets))))
+                break
+
+        # Sort based on first index
+        best = min(known_contractions, key=lambda x: x[0])
+
+        # Now propagate as many unused contractions as possible to next iteration
+        known_contractions = _update_other_results(known_contractions, best)
+
+        # Next iteration only compute contractions with the new tensor
+        # All other contractions have been accounted for
+        input_sets = best[2]
+        new_tensor_pos = len(input_sets) - 1
+        comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
+
+        # Update path and total cost
+        path.append(best[1])
+        path_cost += best[0][1]
+
+    return path
+
+
+def _can_dot(inputs, result, idx_removed):
+    """
+    Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
+
+    Parameters
+    ----------
+    inputs : list of str
+        Specifies the subscripts for summation.
+    result : str
+        Resulting summation.
+    idx_removed : set
+        Indices that are removed in the summation
+
+
+    Returns
+    -------
+    type : bool
+        Returns true if BLAS should and can be used, else False
+
+    Notes
+    -----
+    If the operations is BLAS level 1 or 2 and is not already aligned
+    we default back to einsum as the memory movement to copy is more
+    costly than the operation itself.
+
+
+    Examples
+    --------
+
+    # Standard GEMM operation
+    >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
+    True
+
+    # Can use the standard BLAS, but requires odd data movement
+    >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
+    False
+
+    # DDOT where the memory is not aligned
+    >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
+    False
+
+    """
+
+    # All `dot` calls remove indices
+    if len(idx_removed) == 0:
+        return False
+
+    # BLAS can only handle two operands
+    if len(inputs) != 2:
+        return False
+
+    input_left, input_right = inputs
+
+    for c in set(input_left + input_right):
+        # can't deal with repeated indices on same input or more than 2 total
+        nl, nr = input_left.count(c), input_right.count(c)
+        if (nl > 1) or (nr > 1) or (nl + nr > 2):
+            return False
+
+        # can't do implicit summation or dimension collapse e.g.
+        #     "ab,bc->c" (implicitly sum over 'a')
+        #     "ab,ca->ca" (take diagonal of 'a')
+        if nl + nr - 1 == int(c in result):
+            return False
+
+    # Build a few temporaries
+    set_left = set(input_left)
+    set_right = set(input_right)
+    keep_left = set_left - idx_removed
+    keep_right = set_right - idx_removed
+    rs = len(idx_removed)
+
+    # At this point we are a DOT, GEMV, or GEMM operation
+
+    # Handle inner products
+
+    # DDOT with aligned data
+    if input_left == input_right:
+        return True
+
+    # DDOT without aligned data (better to use einsum)
+    if set_left == set_right:
+        return False
+
+    # Handle the 4 possible (aligned) GEMV or GEMM cases
+
+    # GEMM or GEMV no transpose
+    if input_left[-rs:] == input_right[:rs]:
+        return True
+
+    # GEMM or GEMV transpose both
+    if input_left[:rs] == input_right[-rs:]:
+        return True
+
+    # GEMM or GEMV transpose right
+    if input_left[-rs:] == input_right[-rs:]:
+        return True
+
+    # GEMM or GEMV transpose left
+    if input_left[:rs] == input_right[:rs]:
+        return True
+
+    # Einsum is faster than GEMV if we have to copy data
+    if not keep_left or not keep_right:
+        return False
+
+    # We are a matrix-matrix product, but we need to copy data
+    return True
+
+
+def _parse_einsum_input(operands):
+    """
+    A reproduction of einsum c side einsum parsing in python.
+
+    Returns
+    -------
+    input_strings : str
+        Parsed input strings
+    output_string : str
+        Parsed output string
+    operands : list of array_like
+        The operands to use in the numpy contraction
+
+    Examples
+    --------
+    The operand list is simplified to reduce printing:
+
+    >>> np.random.seed(123)
+    >>> a = np.random.rand(4, 4)
+    >>> b = np.random.rand(4, 4, 4)
+    >>> _parse_einsum_input(('...a,...a->...', a, b))
+    ('za,xza', 'xz', [a, b]) # may vary
+
+    >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
+    ('za,xza', 'xz', [a, b]) # may vary
+    """
+
+    if len(operands) == 0:
+        raise ValueError("No input operands")
+
+    if isinstance(operands[0], str):
+        subscripts = operands[0].replace(" ", "")
+        operands = [asanyarray(v) for v in operands[1:]]
+
+        # Ensure all characters are valid
+        for s in subscripts:
+            if s in '.,->':
+                continue
+            if s not in einsum_symbols:
+                raise ValueError("Character %s is not a valid symbol." % s)
+
+    else:
+        tmp_operands = list(operands)
+        operand_list = []
+        subscript_list = []
+        for p in range(len(operands) // 2):
+            operand_list.append(tmp_operands.pop(0))
+            subscript_list.append(tmp_operands.pop(0))
+
+        output_list = tmp_operands[-1] if len(tmp_operands) else None
+        operands = [asanyarray(v) for v in operand_list]
+        subscripts = ""
+        last = len(subscript_list) - 1
+        for num, sub in enumerate(subscript_list):
+            for s in sub:
+                if s is Ellipsis:
+                    subscripts += "..."
+                else:
+                    try:
+                        s = operator.index(s)
+                    except TypeError as e:
+                        raise TypeError("For this input type lists must contain "
+                                        "either int or Ellipsis") from e
+                    subscripts += einsum_symbols[s]
+            if num != last:
+                subscripts += ","
+
+        if output_list is not None:
+            subscripts += "->"
+            for s in output_list:
+                if s is Ellipsis:
+                    subscripts += "..."
+                else:
+                    try:
+                        s = operator.index(s)
+                    except TypeError as e:
+                        raise TypeError("For this input type lists must contain "
+                                        "either int or Ellipsis") from e
+                    subscripts += einsum_symbols[s]
+    # Check for proper "->"
+    if ("-" in subscripts) or (">" in subscripts):
+        invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
+        if invalid or (subscripts.count("->") != 1):
+            raise ValueError("Subscripts can only contain one '->'.")
+
+    # Parse ellipses
+    if "." in subscripts:
+        used = subscripts.replace(".", "").replace(",", "").replace("->", "")
+        unused = list(einsum_symbols_set - set(used))
+        ellipse_inds = "".join(unused)
+        longest = 0
+
+        if "->" in subscripts:
+            input_tmp, output_sub = subscripts.split("->")
+            split_subscripts = input_tmp.split(",")
+            out_sub = True
+        else:
+            split_subscripts = subscripts.split(',')
+            out_sub = False
+
+        for num, sub in enumerate(split_subscripts):
+            if "." in sub:
+                if (sub.count(".") != 3) or (sub.count("...") != 1):
+                    raise ValueError("Invalid Ellipses.")
+
+                # Take into account numerical values
+                if operands[num].shape == ():
+                    ellipse_count = 0
+                else:
+                    ellipse_count = max(operands[num].ndim, 1)
+                    ellipse_count -= (len(sub) - 3)
+
+                if ellipse_count > longest:
+                    longest = ellipse_count
+
+                if ellipse_count < 0:
+                    raise ValueError("Ellipses lengths do not match.")
+                elif ellipse_count == 0:
+                    split_subscripts[num] = sub.replace('...', '')
+                else:
+                    rep_inds = ellipse_inds[-ellipse_count:]
+                    split_subscripts[num] = sub.replace('...', rep_inds)
+
+        subscripts = ",".join(split_subscripts)
+        if longest == 0:
+            out_ellipse = ""
+        else:
+            out_ellipse = ellipse_inds[-longest:]
+
+        if out_sub:
+            subscripts += "->" + output_sub.replace("...", out_ellipse)
+        else:
+            # Special care for outputless ellipses
+            output_subscript = ""
+            tmp_subscripts = subscripts.replace(",", "")
+            for s in sorted(set(tmp_subscripts)):
+                if s not in (einsum_symbols):
+                    raise ValueError("Character %s is not a valid symbol." % s)
+                if tmp_subscripts.count(s) == 1:
+                    output_subscript += s
+            normal_inds = ''.join(sorted(set(output_subscript) -
+                                         set(out_ellipse)))
+
+            subscripts += "->" + out_ellipse + normal_inds
+
+    # Build output string if does not exist
+    if "->" in subscripts:
+        input_subscripts, output_subscript = subscripts.split("->")
+    else:
+        input_subscripts = subscripts
+        # Build output subscripts
+        tmp_subscripts = subscripts.replace(",", "")
+        output_subscript = ""
+        for s in sorted(set(tmp_subscripts)):
+            if s not in einsum_symbols:
+                raise ValueError("Character %s is not a valid symbol." % s)
+            if tmp_subscripts.count(s) == 1:
+                output_subscript += s
+
+    # Make sure output subscripts are in the input
+    for char in output_subscript:
+        if char not in input_subscripts:
+            raise ValueError("Output character %s did not appear in the input"
+                             % char)
+
+    # Make sure number operands is equivalent to the number of terms
+    if len(input_subscripts.split(',')) != len(operands):
+        raise ValueError("Number of einsum subscripts must be equal to the "
+                         "number of operands.")
+
+    return (input_subscripts, output_subscript, operands)
+
+
+def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
+    # NOTE: technically, we should only dispatch on array-like arguments, not
+    # subscripts (given as strings). But separating operands into
+    # arrays/subscripts is a little tricky/slow (given einsum's two supported
+    # signatures), so as a practical shortcut we dispatch on everything.
+    # Strings will be ignored for dispatching since they don't define
+    # __array_function__.
+    return operands
+
+
+@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
+def einsum_path(*operands, optimize='greedy', einsum_call=False):
+    """
+    einsum_path(subscripts, *operands, optimize='greedy')
+
+    Evaluates the lowest cost contraction order for an einsum expression by
+    considering the creation of intermediate arrays.
+
+    Parameters
+    ----------
+    subscripts : str
+        Specifies the subscripts for summation.
+    *operands : list of array_like
+        These are the arrays for the operation.
+    optimize : {bool, list, tuple, 'greedy', 'optimal'}
+        Choose the type of path. If a tuple is provided, the second argument is
+        assumed to be the maximum intermediate size created. If only a single
+        argument is provided the largest input or output array size is used
+        as a maximum intermediate size.
+
+        * if a list is given that starts with ``einsum_path``, uses this as the
+          contraction path
+        * if False no optimization is taken
+        * if True defaults to the 'greedy' algorithm
+        * 'optimal' An algorithm that combinatorially explores all possible
+          ways of contracting the listed tensors and chooses the least costly
+          path. Scales exponentially with the number of terms in the
+          contraction.
+        * 'greedy' An algorithm that chooses the best pair contraction
+          at each step. Effectively, this algorithm searches the largest inner,
+          Hadamard, and then outer products at each step. Scales cubically with
+          the number of terms in the contraction. Equivalent to the 'optimal'
+          path for most contractions.
+
+        Default is 'greedy'.
+
+    Returns
+    -------
+    path : list of tuples
+        A list representation of the einsum path.
+    string_repr : str
+        A printable representation of the einsum path.
+
+    Notes
+    -----
+    The resulting path indicates which terms of the input contraction should be
+    contracted first, the result of this contraction is then appended to the
+    end of the contraction list. This list can then be iterated over until all
+    intermediate contractions are complete.
+
+    See Also
+    --------
+    einsum, linalg.multi_dot
+
+    Examples
+    --------
+
+    We can begin with a chain dot example. In this case, it is optimal to
+    contract the ``b`` and ``c`` tensors first as represented by the first
+    element of the path ``(1, 2)``. The resulting tensor is added to the end
+    of the contraction and the remaining contraction ``(0, 1)`` is then
+    completed.
+
+    >>> np.random.seed(123)
+    >>> a = np.random.rand(2, 2)
+    >>> b = np.random.rand(2, 5)
+    >>> c = np.random.rand(5, 2)
+    >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
+    >>> print(path_info[0])
+    ['einsum_path', (1, 2), (0, 1)]
+    >>> print(path_info[1])
+      Complete contraction:  ij,jk,kl->il # may vary
+             Naive scaling:  4
+         Optimized scaling:  3
+          Naive FLOP count:  1.600e+02
+      Optimized FLOP count:  5.600e+01
+       Theoretical speedup:  2.857
+      Largest intermediate:  4.000e+00 elements
+    -------------------------------------------------------------------------
+    scaling                  current                                remaining
+    -------------------------------------------------------------------------
+       3                   kl,jk->jl                                ij,jl->il
+       3                   jl,ij->il                                   il->il
+
+
+    A more complex index transformation example.
+
+    >>> I = np.random.rand(10, 10, 10, 10)
+    >>> C = np.random.rand(10, 10)
+    >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
+    ...                            optimize='greedy')
+
+    >>> print(path_info[0])
+    ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
+    >>> print(path_info[1]) 
+      Complete contraction:  ea,fb,abcd,gc,hd->efgh # may vary
+             Naive scaling:  8
+         Optimized scaling:  5
+          Naive FLOP count:  8.000e+08
+      Optimized FLOP count:  8.000e+05
+       Theoretical speedup:  1000.000
+      Largest intermediate:  1.000e+04 elements
+    --------------------------------------------------------------------------
+    scaling                  current                                remaining
+    --------------------------------------------------------------------------
+       5               abcd,ea->bcde                      fb,gc,hd,bcde->efgh
+       5               bcde,fb->cdef                         gc,hd,cdef->efgh
+       5               cdef,gc->defg                            hd,defg->efgh
+       5               defg,hd->efgh                               efgh->efgh
+    """
+
+    # Figure out what the path really is
+    path_type = optimize
+    if path_type is True:
+        path_type = 'greedy'
+    if path_type is None:
+        path_type = False
+
+    explicit_einsum_path = False
+    memory_limit = None
+
+    # No optimization or a named path algorithm
+    if (path_type is False) or isinstance(path_type, str):
+        pass
+
+    # Given an explicit path
+    elif len(path_type) and (path_type[0] == 'einsum_path'):
+        explicit_einsum_path = True
+
+    # Path tuple with memory limit
+    elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
+            isinstance(path_type[1], (int, float))):
+        memory_limit = int(path_type[1])
+        path_type = path_type[0]
+
+    else:
+        raise TypeError("Did not understand the path: %s" % str(path_type))
+
+    # Hidden option, only einsum should call this
+    einsum_call_arg = einsum_call
+
+    # Python side parsing
+    input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
+
+    # Build a few useful list and sets
+    input_list = input_subscripts.split(',')
+    input_sets = [set(x) for x in input_list]
+    output_set = set(output_subscript)
+    indices = set(input_subscripts.replace(',', ''))
+
+    # Get length of each unique dimension and ensure all dimensions are correct
+    dimension_dict = {}
+    broadcast_indices = [[] for x in range(len(input_list))]
+    for tnum, term in enumerate(input_list):
+        sh = operands[tnum].shape
+        if len(sh) != len(term):
+            raise ValueError("Einstein sum subscript %s does not contain the "
+                             "correct number of indices for operand %d."
+                             % (input_subscripts[tnum], tnum))
+        for cnum, char in enumerate(term):
+            dim = sh[cnum]
+
+            # Build out broadcast indices
+            if dim == 1:
+                broadcast_indices[tnum].append(char)
+
+            if char in dimension_dict.keys():
+                # For broadcasting cases we always want the largest dim size
+                if dimension_dict[char] == 1:
+                    dimension_dict[char] = dim
+                elif dim not in (1, dimension_dict[char]):
+                    raise ValueError("Size of label '%s' for operand %d (%d) "
+                                     "does not match previous terms (%d)."
+                                     % (char, tnum, dimension_dict[char], dim))
+            else:
+                dimension_dict[char] = dim
+
+    # Convert broadcast inds to sets
+    broadcast_indices = [set(x) for x in broadcast_indices]
+
+    # Compute size of each input array plus the output array
+    size_list = [_compute_size_by_dict(term, dimension_dict)
+                 for term in input_list + [output_subscript]]
+    max_size = max(size_list)
+
+    if memory_limit is None:
+        memory_arg = max_size
+    else:
+        memory_arg = memory_limit
+
+    # Compute naive cost
+    # This isn't quite right, need to look into exactly how einsum does this
+    inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
+    naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
+
+    # Compute the path
+    if explicit_einsum_path:
+        path = path_type[1:]
+    elif (
+        (path_type is False)
+        or (len(input_list) in [1, 2])
+        or (indices == output_set)
+    ):
+        # Nothing to be optimized, leave it to einsum
+        path = [tuple(range(len(input_list)))]
+    elif path_type == "greedy":
+        path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
+    elif path_type == "optimal":
+        path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
+    else:
+        raise KeyError("Path name %s not found", path_type)
+
+    cost_list, scale_list, size_list, contraction_list = [], [], [], []
+
+    # Build contraction tuple (positions, gemm, einsum_str, remaining)
+    for cnum, contract_inds in enumerate(path):
+        # Make sure we remove inds from right to left
+        contract_inds = tuple(sorted(list(contract_inds), reverse=True))
+
+        contract = _find_contraction(contract_inds, input_sets, output_set)
+        out_inds, input_sets, idx_removed, idx_contract = contract
+
+        cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
+        cost_list.append(cost)
+        scale_list.append(len(idx_contract))
+        size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
+
+        bcast = set()
+        tmp_inputs = []
+        for x in contract_inds:
+            tmp_inputs.append(input_list.pop(x))
+            bcast |= broadcast_indices.pop(x)
+
+        new_bcast_inds = bcast - idx_removed
+
+        # If we're broadcasting, nix blas
+        if not len(idx_removed & bcast):
+            do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+        else:
+            do_blas = False
+
+        # Last contraction
+        if (cnum - len(path)) == -1:
+            idx_result = output_subscript
+        else:
+            sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
+            idx_result = "".join([x[1] for x in sorted(sort_result)])
+
+        input_list.append(idx_result)
+        broadcast_indices.append(new_bcast_inds)
+        einsum_str = ",".join(tmp_inputs) + "->" + idx_result
+
+        contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
+        contraction_list.append(contraction)
+
+    opt_cost = sum(cost_list) + 1
+
+    if len(input_list) != 1:
+        # Explicit "einsum_path" is usually trusted, but we detect this kind of
+        # mistake in order to prevent from returning an intermediate value.
+        raise RuntimeError(
+            "Invalid einsum_path is specified: {} more operands has to be "
+            "contracted.".format(len(input_list) - 1))
+
+    if einsum_call_arg:
+        return (operands, contraction_list)
+
+    # Return the path along with a nice string representation
+    overall_contraction = input_subscripts + "->" + output_subscript
+    header = ("scaling", "current", "remaining")
+
+    speedup = naive_cost / opt_cost
+    max_i = max(size_list)
+
+    path_print  = "  Complete contraction:  %s\n" % overall_contraction
+    path_print += "         Naive scaling:  %d\n" % len(indices)
+    path_print += "     Optimized scaling:  %d\n" % max(scale_list)
+    path_print += "      Naive FLOP count:  %.3e\n" % naive_cost
+    path_print += "  Optimized FLOP count:  %.3e\n" % opt_cost
+    path_print += "   Theoretical speedup:  %3.3f\n" % speedup
+    path_print += "  Largest intermediate:  %.3e elements\n" % max_i
+    path_print += "-" * 74 + "\n"
+    path_print += "%6s %24s %40s\n" % header
+    path_print += "-" * 74
+
+    for n, contraction in enumerate(contraction_list):
+        inds, idx_rm, einsum_str, remaining, blas = contraction
+        remaining_str = ",".join(remaining) + "->" + output_subscript
+        path_run = (scale_list[n], einsum_str, remaining_str)
+        path_print += "\n%4d    %24s %40s" % path_run
+
+    path = ['einsum_path'] + path
+    return (path, path_print)
+
+
+def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
+    # Arguably we dispatch on more arguments than we really should; see note in
+    # _einsum_path_dispatcher for why.
+    yield from operands
+    yield out
+
+
+# Rewrite einsum to handle different cases
+@array_function_dispatch(_einsum_dispatcher, module='numpy')
+def einsum(*operands, out=None, optimize=False, **kwargs):
+    """
+    einsum(subscripts, *operands, out=None, dtype=None, order='K',
+           casting='safe', optimize=False)
+
+    Evaluates the Einstein summation convention on the operands.
+
+    Using the Einstein summation convention, many common multi-dimensional,
+    linear algebraic array operations can be represented in a simple fashion.
+    In *implicit* mode `einsum` computes these values.
+
+    In *explicit* mode, `einsum` provides further flexibility to compute
+    other array operations that might not be considered classical Einstein
+    summation operations, by disabling, or forcing summation over specified
+    subscript labels.
+
+    See the notes and examples for clarification.
+
+    Parameters
+    ----------
+    subscripts : str
+        Specifies the subscripts for summation as comma separated list of
+        subscript labels. An implicit (classical Einstein summation)
+        calculation is performed unless the explicit indicator '->' is
+        included as well as subscript labels of the precise output form.
+    operands : list of array_like
+        These are the arrays for the operation.
+    out : ndarray, optional
+        If provided, the calculation is done into this array.
+    dtype : {data-type, None}, optional
+        If provided, forces the calculation to use the data type specified.
+        Note that you may have to also give a more liberal `casting`
+        parameter to allow the conversions. Default is None.
+    order : {'C', 'F', 'A', 'K'}, optional
+        Controls the memory layout of the output. 'C' means it should
+        be C contiguous. 'F' means it should be Fortran contiguous,
+        'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+        'K' means it should be as close to the layout as the inputs as
+        is possible, including arbitrarily permuted axes.
+        Default is 'K'.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur.  Setting this to
+        'unsafe' is not recommended, as it can adversely affect accumulations.
+
+          * 'no' means the data types should not be cast at all.
+          * 'equiv' means only byte-order changes are allowed.
+          * 'safe' means only casts which can preserve values are allowed.
+          * 'same_kind' means only safe casts or casts within a kind,
+            like float64 to float32, are allowed.
+          * 'unsafe' means any data conversions may be done.
+
+        Default is 'safe'.
+    optimize : {False, True, 'greedy', 'optimal'}, optional
+        Controls if intermediate optimization should occur. No optimization
+        will occur if False and True will default to the 'greedy' algorithm.
+        Also accepts an explicit contraction list from the ``np.einsum_path``
+        function. See ``np.einsum_path`` for more details. Defaults to False.
+
+    Returns
+    -------
+    output : ndarray
+        The calculation based on the Einstein summation convention.
+
+    See Also
+    --------
+    einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+    einops :
+        similar verbose interface is provided by
+        `einops `_ package to cover
+        additional operations: transpose, reshape/flatten, repeat/tile,
+        squeeze/unsqueeze and reductions.
+    opt_einsum :
+        `opt_einsum `_
+        optimizes contraction order for einsum-like expressions
+        in backend-agnostic manner.
+
+    Notes
+    -----
+    .. versionadded:: 1.6.0
+
+    The Einstein summation convention can be used to compute
+    many multi-dimensional, linear algebraic array operations. `einsum`
+    provides a succinct way of representing these.
+
+    A non-exhaustive list of these operations,
+    which can be computed by `einsum`, is shown below along with examples:
+
+    * Trace of an array, :py:func:`numpy.trace`.
+    * Return a diagonal, :py:func:`numpy.diag`.
+    * Array axis summations, :py:func:`numpy.sum`.
+    * Transpositions and permutations, :py:func:`numpy.transpose`.
+    * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+    * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+    * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+    * Tensor contractions, :py:func:`numpy.tensordot`.
+    * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
+
+    The subscripts string is a comma-separated list of subscript labels,
+    where each label refers to a dimension of the corresponding operand.
+    Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+    is equivalent to :py:func:`np.inner(a,b) `. If a label
+    appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+    view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+    describes traditional matrix multiplication and is equivalent to
+    :py:func:`np.matmul(a,b) `. Repeated subscript labels in one
+    operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+    to :py:func:`np.trace(a) `.
+
+    In *implicit mode*, the chosen subscripts are important
+    since the axes of the output are reordered alphabetically.  This
+    means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+    ``np.einsum('ji', a)`` takes its transpose. Additionally,
+    ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+    ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+    multiplication since subscript 'h' precedes subscript 'i'.
+
+    In *explicit mode* the output can be directly controlled by
+    specifying output subscript labels.  This requires the
+    identifier '->' as well as the list of output subscript labels.
+    This feature increases the flexibility of the function since
+    summing can be disabled or forced when required. The call
+    ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) `,
+    and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) `.
+    The difference is that `einsum` does not allow broadcasting by default.
+    Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+    order of the output subscript labels and therefore returns matrix
+    multiplication, unlike the example above in implicit mode.
+
+    To enable and control broadcasting, use an ellipsis.  Default
+    NumPy-style broadcasting is done by adding an ellipsis
+    to the left of each term, like ``np.einsum('...ii->...i', a)``.
+    To take the trace along the first and last axes,
+    you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+    product with the left-most indices instead of rightmost, one can do
+    ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+    When there is only one operand, no axes are summed, and no output
+    parameter is provided, a view into the operand is returned instead
+    of a new array.  Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+    produces a view (changed in version 1.10.0).
+
+    `einsum` also provides an alternative way to provide the subscripts
+    and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+    If the output shape is not provided in this format `einsum` will be
+    calculated in implicit mode, otherwise it will be performed explicitly.
+    The examples below have corresponding `einsum` calls with the two
+    parameter methods.
+
+    .. versionadded:: 1.10.0
+
+    Views returned from einsum are now writeable whenever the input array
+    is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+    have the same effect as :py:func:`np.swapaxes(a, 0, 2) `
+    and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+    of a 2D array.
+
+    .. versionadded:: 1.12.0
+
+    Added the ``optimize`` argument which will optimize the contraction order
+    of an einsum expression. For a contraction with three or more operands this
+    can greatly increase the computational efficiency at the cost of a larger
+    memory footprint during computation.
+
+    Typically a 'greedy' algorithm is applied which empirical tests have shown
+    returns the optimal path in the majority of cases. In some cases 'optimal'
+    will return the superlative path through a more expensive, exhaustive search.
+    For iterative calculations it may be advisable to calculate the optimal path
+    once and reuse that path by supplying it as an argument. An example is given
+    below.
+
+    See :py:func:`numpy.einsum_path` for more details.
+
+    Examples
+    --------
+    >>> a = np.arange(25).reshape(5,5)
+    >>> b = np.arange(5)
+    >>> c = np.arange(6).reshape(2,3)
+
+    Trace of a matrix:
+
+    >>> np.einsum('ii', a)
+    60
+    >>> np.einsum(a, [0,0])
+    60
+    >>> np.trace(a)
+    60
+
+    Extract the diagonal (requires explicit form):
+
+    >>> np.einsum('ii->i', a)
+    array([ 0,  6, 12, 18, 24])
+    >>> np.einsum(a, [0,0], [0])
+    array([ 0,  6, 12, 18, 24])
+    >>> np.diag(a)
+    array([ 0,  6, 12, 18, 24])
+
+    Sum over an axis (requires explicit form):
+
+    >>> np.einsum('ij->i', a)
+    array([ 10,  35,  60,  85, 110])
+    >>> np.einsum(a, [0,1], [0])
+    array([ 10,  35,  60,  85, 110])
+    >>> np.sum(a, axis=1)
+    array([ 10,  35,  60,  85, 110])
+
+    For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+    >>> np.einsum('...j->...', a)
+    array([ 10,  35,  60,  85, 110])
+    >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+    array([ 10,  35,  60,  85, 110])
+
+    Compute a matrix transpose, or reorder any number of axes:
+
+    >>> np.einsum('ji', c)
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+    >>> np.einsum('ij->ji', c)
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+    >>> np.einsum(c, [1,0])
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+    >>> np.transpose(c)
+    array([[0, 3],
+           [1, 4],
+           [2, 5]])
+
+    Vector inner products:
+
+    >>> np.einsum('i,i', b, b)
+    30
+    >>> np.einsum(b, [0], b, [0])
+    30
+    >>> np.inner(b,b)
+    30
+
+    Matrix vector multiplication:
+
+    >>> np.einsum('ij,j', a, b)
+    array([ 30,  80, 130, 180, 230])
+    >>> np.einsum(a, [0,1], b, [1])
+    array([ 30,  80, 130, 180, 230])
+    >>> np.dot(a, b)
+    array([ 30,  80, 130, 180, 230])
+    >>> np.einsum('...j,j', a, b)
+    array([ 30,  80, 130, 180, 230])
+
+    Broadcasting and scalar multiplication:
+
+    >>> np.einsum('..., ...', 3, c)
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+    >>> np.einsum(',ij', 3, c)
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+    >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+    >>> np.multiply(3, c)
+    array([[ 0,  3,  6],
+           [ 9, 12, 15]])
+
+    Vector outer product:
+
+    >>> np.einsum('i,j', np.arange(2)+1, b)
+    array([[0, 1, 2, 3, 4],
+           [0, 2, 4, 6, 8]])
+    >>> np.einsum(np.arange(2)+1, [0], b, [1])
+    array([[0, 1, 2, 3, 4],
+           [0, 2, 4, 6, 8]])
+    >>> np.outer(np.arange(2)+1, b)
+    array([[0, 1, 2, 3, 4],
+           [0, 2, 4, 6, 8]])
+
+    Tensor contraction:
+
+    >>> a = np.arange(60.).reshape(3,4,5)
+    >>> b = np.arange(24.).reshape(4,3,2)
+    >>> np.einsum('ijk,jil->kl', a, b)
+    array([[4400., 4730.],
+           [4532., 4874.],
+           [4664., 5018.],
+           [4796., 5162.],
+           [4928., 5306.]])
+    >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+    array([[4400., 4730.],
+           [4532., 4874.],
+           [4664., 5018.],
+           [4796., 5162.],
+           [4928., 5306.]])
+    >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+    array([[4400., 4730.],
+           [4532., 4874.],
+           [4664., 5018.],
+           [4796., 5162.],
+           [4928., 5306.]])
+
+    Writeable returned arrays (since version 1.10.0):
+
+    >>> a = np.zeros((3, 3))
+    >>> np.einsum('ii->i', a)[:] = 1
+    >>> a
+    array([[1., 0., 0.],
+           [0., 1., 0.],
+           [0., 0., 1.]])
+
+    Example of ellipsis use:
+
+    >>> a = np.arange(6).reshape((3,2))
+    >>> b = np.arange(12).reshape((4,3))
+    >>> np.einsum('ki,jk->ij', a, b)
+    array([[10, 28, 46, 64],
+           [13, 40, 67, 94]])
+    >>> np.einsum('ki,...k->i...', a, b)
+    array([[10, 28, 46, 64],
+           [13, 40, 67, 94]])
+    >>> np.einsum('k...,jk', a, b)
+    array([[10, 28, 46, 64],
+           [13, 40, 67, 94]])
+
+    Chained array operations. For more complicated contractions, speed ups
+    might be achieved by repeatedly computing a 'greedy' path or pre-computing the
+    'optimal' path and repeatedly applying it, using an
+    `einsum_path` insertion (since version 1.12.0). Performance improvements can be
+    particularly significant with larger arrays:
+
+    >>> a = np.ones(64).reshape(2,4,8)
+
+    Basic `einsum`: ~1520ms  (benchmarked on 3.1GHz Intel i5.)
+
+    >>> for iteration in range(500):
+    ...     _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
+
+    Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
+
+    >>> for iteration in range(500):
+    ...     _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
+
+    Greedy `einsum` (faster optimal path approximation): ~160ms
+
+    >>> for iteration in range(500):
+    ...     _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
+
+    Optimal `einsum` (best usage pattern in some use cases): ~110ms
+
+    >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
+    >>> for iteration in range(500):
+    ...     _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
+
+    """
+    # Special handling if out is specified
+    specified_out = out is not None
+
+    # If no optimization, run pure einsum
+    if optimize is False:
+        if specified_out:
+            kwargs['out'] = out
+        return c_einsum(*operands, **kwargs)
+
+    # Check the kwargs to avoid a more cryptic error later, without having to
+    # repeat default values here
+    valid_einsum_kwargs = ['dtype', 'order', 'casting']
+    unknown_kwargs = [k for (k, v) in kwargs.items() if
+                      k not in valid_einsum_kwargs]
+    if len(unknown_kwargs):
+        raise TypeError("Did not understand the following kwargs: %s"
+                        % unknown_kwargs)
+
+    # Build the contraction list and operand
+    operands, contraction_list = einsum_path(*operands, optimize=optimize,
+                                             einsum_call=True)
+
+    # Handle order kwarg for output array, c_einsum allows mixed case
+    output_order = kwargs.pop('order', 'K')
+    if output_order.upper() == 'A':
+        if all(arr.flags.f_contiguous for arr in operands):
+            output_order = 'F'
+        else:
+            output_order = 'C'
+
+    # Start contraction loop
+    for num, contraction in enumerate(contraction_list):
+        inds, idx_rm, einsum_str, remaining, blas = contraction
+        tmp_operands = [operands.pop(x) for x in inds]
+
+        # Do we need to deal with the output?
+        handle_out = specified_out and ((num + 1) == len(contraction_list))
+
+        # Call tensordot if still possible
+        if blas:
+            # Checks have already been handled
+            input_str, results_index = einsum_str.split('->')
+            input_left, input_right = input_str.split(',')
+
+            tensor_result = input_left + input_right
+            for s in idx_rm:
+                tensor_result = tensor_result.replace(s, "")
+
+            # Find indices to contract over
+            left_pos, right_pos = [], []
+            for s in sorted(idx_rm):
+                left_pos.append(input_left.find(s))
+                right_pos.append(input_right.find(s))
+
+            # Contract!
+            new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
+
+            # Build a new view if needed
+            if (tensor_result != results_index) or handle_out:
+                if handle_out:
+                    kwargs["out"] = out
+                new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
+
+        # Call einsum
+        else:
+            # If out was specified
+            if handle_out:
+                kwargs["out"] = out
+
+            # Do the contraction
+            new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
+
+        # Append new items and dereference what we can
+        operands.append(new_view)
+        del tmp_operands, new_view
+
+    if specified_out:
+        return out
+    else:
+        return asanyarray(operands[0], order=output_order)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi
new file mode 100644
index 00000000..ad483bb9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi
@@ -0,0 +1,187 @@
+from collections.abc import Sequence
+from typing import TypeVar, Any, overload, Union, Literal
+
+from numpy import (
+    ndarray,
+    dtype,
+    bool_,
+    number,
+    _OrderKACF,
+)
+from numpy._typing import (
+    _ArrayLikeBool_co,
+    _ArrayLikeUInt_co,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeObject_co,
+    _DTypeLikeBool,
+    _DTypeLikeUInt,
+    _DTypeLikeInt,
+    _DTypeLikeFloat,
+    _DTypeLikeComplex,
+    _DTypeLikeComplex_co,
+    _DTypeLikeObject,
+)
+
+_ArrayType = TypeVar(
+    "_ArrayType",
+    bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
+)
+
+_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any]
+_CastingSafe = Literal["no", "equiv", "safe", "same_kind"]
+_CastingUnsafe = Literal["unsafe"]
+
+__all__: list[str]
+
+# TODO: Properly handle the `casting`-based combinatorics
+# TODO: We need to evaluate the content `__subscripts` in order
+# to identify whether or an array or scalar is returned. At a cursory
+# glance this seems like something that can quite easily be done with
+# a mypy plugin.
+# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeBool_co,
+    out: None = ...,
+    dtype: None | _DTypeLikeBool = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeUInt_co,
+    out: None = ...,
+    dtype: None | _DTypeLikeUInt = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeInt_co,
+    out: None = ...,
+    dtype: None | _DTypeLikeInt = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeFloat_co,
+    out: None = ...,
+    dtype: None | _DTypeLikeFloat = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeComplex_co,
+    out: None = ...,
+    dtype: None | _DTypeLikeComplex = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: Any,
+    casting: _CastingUnsafe,
+    dtype: None | _DTypeLikeComplex_co = ...,
+    out: None = ...,
+    order: _OrderKACF = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeComplex_co,
+    out: _ArrayType,
+    dtype: None | _DTypeLikeComplex_co = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: Any,
+    out: _ArrayType,
+    casting: _CastingUnsafe,
+    dtype: None | _DTypeLikeComplex_co = ...,
+    order: _OrderKACF = ...,
+    optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeObject_co,
+    out: None = ...,
+    dtype: None | _DTypeLikeObject = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: Any,
+    casting: _CastingUnsafe,
+    dtype: None | _DTypeLikeObject = ...,
+    out: None = ...,
+    order: _OrderKACF = ...,
+    optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeObject_co,
+    out: _ArrayType,
+    dtype: None | _DTypeLikeObject = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingSafe = ...,
+    optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+@overload
+def einsum(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: Any,
+    out: _ArrayType,
+    casting: _CastingUnsafe,
+    dtype: None | _DTypeLikeObject = ...,
+    order: _OrderKACF = ...,
+    optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+
+# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
+# It is therefore excluded from the signatures below.
+# NOTE: In practice the list consists of a `str` (first element)
+# and a variable number of integer tuples.
+def einsum_path(
+    subscripts: str | _ArrayLikeInt_co,
+    /,
+    *operands: _ArrayLikeComplex_co | _DTypeLikeObject,
+    optimize: _OptimizeKind = ...,
+) -> tuple[list[Any], str]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/fromnumeric.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/fromnumeric.py
new file mode 100644
index 00000000..69cabb33
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/fromnumeric.py
@@ -0,0 +1,3920 @@
+"""Module containing non-deprecated functions borrowed from Numeric.
+
+"""
+import functools
+import types
+import warnings
+
+import numpy as np
+from .._utils import set_module
+from . import multiarray as mu
+from . import overrides
+from . import umath as um
+from . import numerictypes as nt
+from .multiarray import asarray, array, asanyarray, concatenate
+from . import _methods
+
+_dt_ = nt.sctype2char
+
+# functions that are methods
+__all__ = [
+    'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
+    'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
+    'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
+    'max', 'min',
+    'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
+    'ravel', 'repeat', 'reshape', 'resize', 'round', 'round_',
+    'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
+    'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
+]
+
+_gentype = types.GeneratorType
+# save away Python sum
+_sum_ = sum
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+# functions that are now methods
+def _wrapit(obj, method, *args, **kwds):
+    try:
+        wrap = obj.__array_wrap__
+    except AttributeError:
+        wrap = None
+    result = getattr(asarray(obj), method)(*args, **kwds)
+    if wrap:
+        if not isinstance(result, mu.ndarray):
+            result = asarray(result)
+        result = wrap(result)
+    return result
+
+
+def _wrapfunc(obj, method, *args, **kwds):
+    bound = getattr(obj, method, None)
+    if bound is None:
+        return _wrapit(obj, method, *args, **kwds)
+
+    try:
+        return bound(*args, **kwds)
+    except TypeError:
+        # A TypeError occurs if the object does have such a method in its
+        # class, but its signature is not identical to that of NumPy's. This
+        # situation has occurred in the case of a downstream library like
+        # 'pandas'.
+        #
+        # Call _wrapit from within the except clause to ensure a potential
+        # exception has a traceback chain.
+        return _wrapit(obj, method, *args, **kwds)
+
+
+def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
+    passkwargs = {k: v for k, v in kwargs.items()
+                  if v is not np._NoValue}
+
+    if type(obj) is not mu.ndarray:
+        try:
+            reduction = getattr(obj, method)
+        except AttributeError:
+            pass
+        else:
+            # This branch is needed for reductions like any which don't
+            # support a dtype.
+            if dtype is not None:
+                return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
+            else:
+                return reduction(axis=axis, out=out, **passkwargs)
+
+    return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
+
+
+def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
+    return (a, out)
+
+
+@array_function_dispatch(_take_dispatcher)
+def take(a, indices, axis=None, out=None, mode='raise'):
+    """
+    Take elements from an array along an axis.
+
+    When axis is not None, this function does the same thing as "fancy"
+    indexing (indexing arrays using arrays); however, it can be easier to use
+    if you need elements along a given axis. A call such as
+    ``np.take(arr, indices, axis=3)`` is equivalent to
+    ``arr[:,:,:,indices,...]``.
+
+    Explained without fancy indexing, this is equivalent to the following use
+    of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
+    indices::
+
+        Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+        Nj = indices.shape
+        for ii in ndindex(Ni):
+            for jj in ndindex(Nj):
+                for kk in ndindex(Nk):
+                    out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
+
+    Parameters
+    ----------
+    a : array_like (Ni..., M, Nk...)
+        The source array.
+    indices : array_like (Nj...)
+        The indices of the values to extract.
+
+        .. versionadded:: 1.8.0
+
+        Also allow scalars for indices.
+    axis : int, optional
+        The axis over which to select values. By default, the flattened
+        input array is used.
+    out : ndarray, optional (Ni..., Nj..., Nk...)
+        If provided, the result will be placed in this array. It should
+        be of the appropriate shape and dtype. Note that `out` is always
+        buffered if `mode='raise'`; use other modes for better performance.
+    mode : {'raise', 'wrap', 'clip'}, optional
+        Specifies how out-of-bounds indices will behave.
+
+        * 'raise' -- raise an error (default)
+        * 'wrap' -- wrap around
+        * 'clip' -- clip to the range
+
+        'clip' mode means that all indices that are too large are replaced
+        by the index that addresses the last element along that axis. Note
+        that this disables indexing with negative numbers.
+
+    Returns
+    -------
+    out : ndarray (Ni..., Nj..., Nk...)
+        The returned array has the same type as `a`.
+
+    See Also
+    --------
+    compress : Take elements using a boolean mask
+    ndarray.take : equivalent method
+    take_along_axis : Take elements by matching the array and the index arrays
+
+    Notes
+    -----
+
+    By eliminating the inner loop in the description above, and using `s_` to
+    build simple slice objects, `take` can be expressed  in terms of applying
+    fancy indexing to each 1-d slice::
+
+        Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+        for ii in ndindex(Ni):
+            for kk in ndindex(Nj):
+                out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
+
+    For this reason, it is equivalent to (but faster than) the following use
+    of `apply_along_axis`::
+
+        out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
+
+    Examples
+    --------
+    >>> a = [4, 3, 5, 7, 6, 8]
+    >>> indices = [0, 1, 4]
+    >>> np.take(a, indices)
+    array([4, 3, 6])
+
+    In this example if `a` is an ndarray, "fancy" indexing can be used.
+
+    >>> a = np.array(a)
+    >>> a[indices]
+    array([4, 3, 6])
+
+    If `indices` is not one dimensional, the output also has these dimensions.
+
+    >>> np.take(a, [[0, 1], [2, 3]])
+    array([[4, 3],
+           [5, 7]])
+    """
+    return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
+
+
+def _reshape_dispatcher(a, newshape, order=None):
+    return (a,)
+
+
+# not deprecated --- copy if necessary, view otherwise
+@array_function_dispatch(_reshape_dispatcher)
+def reshape(a, newshape, order='C'):
+    """
+    Gives a new shape to an array without changing its data.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to be reshaped.
+    newshape : int or tuple of ints
+        The new shape should be compatible with the original shape. If
+        an integer, then the result will be a 1-D array of that length.
+        One shape dimension can be -1. In this case, the value is
+        inferred from the length of the array and remaining dimensions.
+    order : {'C', 'F', 'A'}, optional
+        Read the elements of `a` using this index order, and place the
+        elements into the reshaped array using this index order.  'C'
+        means to read / write the elements using C-like index order,
+        with the last axis index changing fastest, back to the first
+        axis index changing slowest. 'F' means to read / write the
+        elements using Fortran-like index order, with the first index
+        changing fastest, and the last index changing slowest. Note that
+        the 'C' and 'F' options take no account of the memory layout of
+        the underlying array, and only refer to the order of indexing.
+        'A' means to read / write the elements in Fortran-like index
+        order if `a` is Fortran *contiguous* in memory, C-like order
+        otherwise.
+
+    Returns
+    -------
+    reshaped_array : ndarray
+        This will be a new view object if possible; otherwise, it will
+        be a copy.  Note there is no guarantee of the *memory layout* (C- or
+        Fortran- contiguous) of the returned array.
+
+    See Also
+    --------
+    ndarray.reshape : Equivalent method.
+
+    Notes
+    -----
+    It is not always possible to change the shape of an array without copying
+    the data.
+    
+    The `order` keyword gives the index ordering both for *fetching* the values
+    from `a`, and then *placing* the values into the output array.
+    For example, let's say you have an array:
+
+    >>> a = np.arange(6).reshape((3, 2))
+    >>> a
+    array([[0, 1],
+           [2, 3],
+           [4, 5]])
+
+    You can think of reshaping as first raveling the array (using the given
+    index order), then inserting the elements from the raveled array into the
+    new array using the same kind of index ordering as was used for the
+    raveling.
+
+    >>> np.reshape(a, (2, 3)) # C-like index ordering
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
+    array([[0, 4, 3],
+           [2, 1, 5]])
+    >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
+    array([[0, 4, 3],
+           [2, 1, 5]])
+
+    Examples
+    --------
+    >>> a = np.array([[1,2,3], [4,5,6]])
+    >>> np.reshape(a, 6)
+    array([1, 2, 3, 4, 5, 6])
+    >>> np.reshape(a, 6, order='F')
+    array([1, 4, 2, 5, 3, 6])
+
+    >>> np.reshape(a, (3,-1))       # the unspecified value is inferred to be 2
+    array([[1, 2],
+           [3, 4],
+           [5, 6]])
+    """
+    return _wrapfunc(a, 'reshape', newshape, order=order)
+
+
+def _choose_dispatcher(a, choices, out=None, mode=None):
+    yield a
+    yield from choices
+    yield out
+
+
+@array_function_dispatch(_choose_dispatcher)
+def choose(a, choices, out=None, mode='raise'):
+    """
+    Construct an array from an index array and a list of arrays to choose from.
+
+    First of all, if confused or uncertain, definitely look at the Examples -
+    in its full generality, this function is less simple than it might
+    seem from the following code description (below ndi =
+    `numpy.lib.index_tricks`):
+
+    ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
+
+    But this omits some subtleties.  Here is a fully general summary:
+
+    Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
+    (`choices`), `a` and each choice array are first broadcast, as necessary,
+    to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
+    0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
+    for each ``i``.  Then, a new array with shape ``Ba.shape`` is created as
+    follows:
+
+    * if ``mode='raise'`` (the default), then, first of all, each element of
+      ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
+      that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
+      position in ``Ba`` - then the value at the same position in the new array
+      is the value in ``Bchoices[i]`` at that same position;
+
+    * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
+      integer; modular arithmetic is used to map integers outside the range
+      `[0, n-1]` back into that range; and then the new array is constructed
+      as above;
+
+    * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
+      integer; negative integers are mapped to 0; values greater than ``n-1``
+      are mapped to ``n-1``; and then the new array is constructed as above.
+
+    Parameters
+    ----------
+    a : int array
+        This array must contain integers in ``[0, n-1]``, where ``n`` is the
+        number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
+        cases any integers are permissible.
+    choices : sequence of arrays
+        Choice arrays. `a` and all of the choices must be broadcastable to the
+        same shape.  If `choices` is itself an array (not recommended), then
+        its outermost dimension (i.e., the one corresponding to
+        ``choices.shape[0]``) is taken as defining the "sequence".
+    out : array, optional
+        If provided, the result will be inserted into this array. It should
+        be of the appropriate shape and dtype. Note that `out` is always
+        buffered if ``mode='raise'``; use other modes for better performance.
+    mode : {'raise' (default), 'wrap', 'clip'}, optional
+        Specifies how indices outside ``[0, n-1]`` will be treated:
+
+          * 'raise' : an exception is raised
+          * 'wrap' : value becomes value mod ``n``
+          * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
+
+    Returns
+    -------
+    merged_array : array
+        The merged result.
+
+    Raises
+    ------
+    ValueError: shape mismatch
+        If `a` and each choice array are not all broadcastable to the same
+        shape.
+
+    See Also
+    --------
+    ndarray.choose : equivalent method
+    numpy.take_along_axis : Preferable if `choices` is an array
+
+    Notes
+    -----
+    To reduce the chance of misinterpretation, even though the following
+    "abuse" is nominally supported, `choices` should neither be, nor be
+    thought of as, a single array, i.e., the outermost sequence-like container
+    should be either a list or a tuple.
+
+    Examples
+    --------
+
+    >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+    ...   [20, 21, 22, 23], [30, 31, 32, 33]]
+    >>> np.choose([2, 3, 1, 0], choices
+    ... # the first element of the result will be the first element of the
+    ... # third (2+1) "array" in choices, namely, 20; the second element
+    ... # will be the second element of the fourth (3+1) choice array, i.e.,
+    ... # 31, etc.
+    ... )
+    array([20, 31, 12,  3])
+    >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
+    array([20, 31, 12,  3])
+    >>> # because there are 4 choice arrays
+    >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
+    array([20,  1, 12,  3])
+    >>> # i.e., 0
+
+    A couple examples illustrating how choose broadcasts:
+
+    >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
+    >>> choices = [-10, 10]
+    >>> np.choose(a, choices)
+    array([[ 10, -10,  10],
+           [-10,  10, -10],
+           [ 10, -10,  10]])
+
+    >>> # With thanks to Anne Archibald
+    >>> a = np.array([0, 1]).reshape((2,1,1))
+    >>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
+    >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
+    >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
+    array([[[ 1,  1,  1,  1,  1],
+            [ 2,  2,  2,  2,  2],
+            [ 3,  3,  3,  3,  3]],
+           [[-1, -2, -3, -4, -5],
+            [-1, -2, -3, -4, -5],
+            [-1, -2, -3, -4, -5]]])
+
+    """
+    return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
+
+
+def _repeat_dispatcher(a, repeats, axis=None):
+    return (a,)
+
+
+@array_function_dispatch(_repeat_dispatcher)
+def repeat(a, repeats, axis=None):
+    """
+    Repeat each element of an array after themselves
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    repeats : int or array of ints
+        The number of repetitions for each element.  `repeats` is broadcasted
+        to fit the shape of the given axis.
+    axis : int, optional
+        The axis along which to repeat values.  By default, use the
+        flattened input array, and return a flat output array.
+
+    Returns
+    -------
+    repeated_array : ndarray
+        Output array which has the same shape as `a`, except along
+        the given axis.
+
+    See Also
+    --------
+    tile : Tile an array.
+    unique : Find the unique elements of an array.
+
+    Examples
+    --------
+    >>> np.repeat(3, 4)
+    array([3, 3, 3, 3])
+    >>> x = np.array([[1,2],[3,4]])
+    >>> np.repeat(x, 2)
+    array([1, 1, 2, 2, 3, 3, 4, 4])
+    >>> np.repeat(x, 3, axis=1)
+    array([[1, 1, 1, 2, 2, 2],
+           [3, 3, 3, 4, 4, 4]])
+    >>> np.repeat(x, [1, 2], axis=0)
+    array([[1, 2],
+           [3, 4],
+           [3, 4]])
+
+    """
+    return _wrapfunc(a, 'repeat', repeats, axis=axis)
+
+
+def _put_dispatcher(a, ind, v, mode=None):
+    return (a, ind, v)
+
+
+@array_function_dispatch(_put_dispatcher)
+def put(a, ind, v, mode='raise'):
+    """
+    Replaces specified elements of an array with given values.
+
+    The indexing works on the flattened target array. `put` is roughly
+    equivalent to:
+
+    ::
+
+        a.flat[ind] = v
+
+    Parameters
+    ----------
+    a : ndarray
+        Target array.
+    ind : array_like
+        Target indices, interpreted as integers.
+    v : array_like
+        Values to place in `a` at target indices. If `v` is shorter than
+        `ind` it will be repeated as necessary.
+    mode : {'raise', 'wrap', 'clip'}, optional
+        Specifies how out-of-bounds indices will behave.
+
+        * 'raise' -- raise an error (default)
+        * 'wrap' -- wrap around
+        * 'clip' -- clip to the range
+
+        'clip' mode means that all indices that are too large are replaced
+        by the index that addresses the last element along that axis. Note
+        that this disables indexing with negative numbers. In 'raise' mode,
+        if an exception occurs the target array may still be modified.
+
+    See Also
+    --------
+    putmask, place
+    put_along_axis : Put elements by matching the array and the index arrays
+
+    Examples
+    --------
+    >>> a = np.arange(5)
+    >>> np.put(a, [0, 2], [-44, -55])
+    >>> a
+    array([-44,   1, -55,   3,   4])
+
+    >>> a = np.arange(5)
+    >>> np.put(a, 22, -5, mode='clip')
+    >>> a
+    array([ 0,  1,  2,  3, -5])
+
+    """
+    try:
+        put = a.put
+    except AttributeError as e:
+        raise TypeError("argument 1 must be numpy.ndarray, "
+                        "not {name}".format(name=type(a).__name__)) from e
+
+    return put(ind, v, mode=mode)
+
+
+def _swapaxes_dispatcher(a, axis1, axis2):
+    return (a,)
+
+
+@array_function_dispatch(_swapaxes_dispatcher)
+def swapaxes(a, axis1, axis2):
+    """
+    Interchange two axes of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis1 : int
+        First axis.
+    axis2 : int
+        Second axis.
+
+    Returns
+    -------
+    a_swapped : ndarray
+        For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
+        returned; otherwise a new array is created. For earlier NumPy
+        versions a view of `a` is returned only if the order of the
+        axes is changed, otherwise the input array is returned.
+
+    Examples
+    --------
+    >>> x = np.array([[1,2,3]])
+    >>> np.swapaxes(x,0,1)
+    array([[1],
+           [2],
+           [3]])
+
+    >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
+    >>> x
+    array([[[0, 1],
+            [2, 3]],
+           [[4, 5],
+            [6, 7]]])
+
+    >>> np.swapaxes(x,0,2)
+    array([[[0, 4],
+            [2, 6]],
+           [[1, 5],
+            [3, 7]]])
+
+    """
+    return _wrapfunc(a, 'swapaxes', axis1, axis2)
+
+
+def _transpose_dispatcher(a, axes=None):
+    return (a,)
+
+
+@array_function_dispatch(_transpose_dispatcher)
+def transpose(a, axes=None):
+    """
+    Returns an array with axes transposed.
+
+    For a 1-D array, this returns an unchanged view of the original array, as a
+    transposed vector is simply the same vector.
+    To convert a 1-D array into a 2-D column vector, an additional dimension
+    must be added, e.g., ``np.atleast2d(a).T`` achieves this, as does
+    ``a[:, np.newaxis]``.
+    For a 2-D array, this is the standard matrix transpose.
+    For an n-D array, if axes are given, their order indicates how the
+    axes are permuted (see Examples). If axes are not provided, then
+    ``transpose(a).shape == a.shape[::-1]``.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axes : tuple or list of ints, optional
+        If specified, it must be a tuple or list which contains a permutation
+        of [0,1,...,N-1] where N is the number of axes of `a`. The `i`'th axis
+        of the returned array will correspond to the axis numbered ``axes[i]``
+        of the input. If not specified, defaults to ``range(a.ndim)[::-1]``,
+        which reverses the order of the axes.
+
+    Returns
+    -------
+    p : ndarray
+        `a` with its axes permuted. A view is returned whenever possible.
+
+    See Also
+    --------
+    ndarray.transpose : Equivalent method.
+    moveaxis : Move axes of an array to new positions.
+    argsort : Return the indices that would sort an array.
+
+    Notes
+    -----
+    Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors
+    when using the `axes` keyword argument.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> a
+    array([[1, 2],
+           [3, 4]])
+    >>> np.transpose(a)
+    array([[1, 3],
+           [2, 4]])
+
+    >>> a = np.array([1, 2, 3, 4])
+    >>> a
+    array([1, 2, 3, 4])
+    >>> np.transpose(a)
+    array([1, 2, 3, 4])
+
+    >>> a = np.ones((1, 2, 3))
+    >>> np.transpose(a, (1, 0, 2)).shape
+    (2, 1, 3)
+
+    >>> a = np.ones((2, 3, 4, 5))
+    >>> np.transpose(a).shape
+    (5, 4, 3, 2)
+
+    """
+    return _wrapfunc(a, 'transpose', axes)
+
+
+def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
+    return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
+def partition(a, kth, axis=-1, kind='introselect', order=None):
+    """
+    Return a partitioned copy of an array.
+
+    Creates a copy of the array with its elements rearranged in such a
+    way that the value of the element in k-th position is in the position
+    the value would be in a sorted array.  In the partitioned array, all
+    elements before the k-th element are less than or equal to that
+    element, and all the elements after the k-th element are greater than
+    or equal to that element.  The ordering of the elements in the two
+    partitions is undefined.
+
+    .. versionadded:: 1.8.0
+
+    Parameters
+    ----------
+    a : array_like
+        Array to be sorted.
+    kth : int or sequence of ints
+        Element index to partition by. The k-th value of the element
+        will be in its final sorted position and all smaller elements
+        will be moved before it and all equal or greater elements behind
+        it. The order of all elements in the partitions is undefined. If
+        provided with a sequence of k-th it will partition all elements
+        indexed by k-th  of them into their sorted position at once.
+
+        .. deprecated:: 1.22.0
+            Passing booleans as index is deprecated.
+    axis : int or None, optional
+        Axis along which to sort. If None, the array is flattened before
+        sorting. The default is -1, which sorts along the last axis.
+    kind : {'introselect'}, optional
+        Selection algorithm. Default is 'introselect'.
+    order : str or list of str, optional
+        When `a` is an array with fields defined, this argument
+        specifies which fields to compare first, second, etc.  A single
+        field can be specified as a string.  Not all fields need be
+        specified, but unspecified fields will still be used, in the
+        order in which they come up in the dtype, to break ties.
+
+    Returns
+    -------
+    partitioned_array : ndarray
+        Array of the same type and shape as `a`.
+
+    See Also
+    --------
+    ndarray.partition : Method to sort an array in-place.
+    argpartition : Indirect partition.
+    sort : Full sorting
+
+    Notes
+    -----
+    The various selection algorithms are characterized by their average
+    speed, worst case performance, work space size, and whether they are
+    stable. A stable sort keeps items with the same key in the same
+    relative order. The available algorithms have the following
+    properties:
+
+    ================= ======= ============= ============ =======
+       kind            speed   worst case    work space  stable
+    ================= ======= ============= ============ =======
+    'introselect'        1        O(n)           0         no
+    ================= ======= ============= ============ =======
+
+    All the partition algorithms make temporary copies of the data when
+    partitioning along any but the last axis.  Consequently,
+    partitioning along the last axis is faster and uses less space than
+    partitioning along any other axis.
+
+    The sort order for complex numbers is lexicographic. If both the
+    real and imaginary parts are non-nan then the order is determined by
+    the real parts except when they are equal, in which case the order
+    is determined by the imaginary parts.
+
+    Examples
+    --------
+    >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0])
+    >>> p = np.partition(a, 4)
+    >>> p
+    array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7])
+
+    ``p[4]`` is 2;  all elements in ``p[:4]`` are less than or equal
+    to ``p[4]``, and all elements in ``p[5:]`` are greater than or
+    equal to ``p[4]``.  The partition is::
+
+        [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7]
+
+    The next example shows the use of multiple values passed to `kth`.
+
+    >>> p2 = np.partition(a, (4, 8))
+    >>> p2
+    array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7])
+
+    ``p2[4]`` is 2  and ``p2[8]`` is 5.  All elements in ``p2[:4]``
+    are less than or equal to ``p2[4]``, all elements in ``p2[5:8]``
+    are greater than or equal to ``p2[4]`` and less than or equal to
+    ``p2[8]``, and all elements in ``p2[9:]`` are greater than or
+    equal to ``p2[8]``.  The partition is::
+
+        [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7]
+    """
+    if axis is None:
+        # flatten returns (1, N) for np.matrix, so always use the last axis
+        a = asanyarray(a).flatten()
+        axis = -1
+    else:
+        a = asanyarray(a).copy(order="K")
+    a.partition(kth, axis=axis, kind=kind, order=order)
+    return a
+
+
+def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
+    return (a,)
+
+
+@array_function_dispatch(_argpartition_dispatcher)
+def argpartition(a, kth, axis=-1, kind='introselect', order=None):
+    """
+    Perform an indirect partition along the given axis using the
+    algorithm specified by the `kind` keyword. It returns an array of
+    indices of the same shape as `a` that index data along the given
+    axis in partitioned order.
+
+    .. versionadded:: 1.8.0
+
+    Parameters
+    ----------
+    a : array_like
+        Array to sort.
+    kth : int or sequence of ints
+        Element index to partition by. The k-th element will be in its
+        final sorted position and all smaller elements will be moved
+        before it and all larger elements behind it. The order of all
+        elements in the partitions is undefined. If provided with a
+        sequence of k-th it will partition all of them into their sorted
+        position at once.
+
+        .. deprecated:: 1.22.0
+            Passing booleans as index is deprecated.
+    axis : int or None, optional
+        Axis along which to sort. The default is -1 (the last axis). If
+        None, the flattened array is used.
+    kind : {'introselect'}, optional
+        Selection algorithm. Default is 'introselect'
+    order : str or list of str, optional
+        When `a` is an array with fields defined, this argument
+        specifies which fields to compare first, second, etc. A single
+        field can be specified as a string, and not all fields need be
+        specified, but unspecified fields will still be used, in the
+        order in which they come up in the dtype, to break ties.
+
+    Returns
+    -------
+    index_array : ndarray, int
+        Array of indices that partition `a` along the specified axis.
+        If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
+        More generally, ``np.take_along_axis(a, index_array, axis=axis)``
+        always yields the partitioned `a`, irrespective of dimensionality.
+
+    See Also
+    --------
+    partition : Describes partition algorithms used.
+    ndarray.partition : Inplace partition.
+    argsort : Full indirect sort.
+    take_along_axis : Apply ``index_array`` from argpartition
+                      to an array as if by calling partition.
+
+    Notes
+    -----
+    See `partition` for notes on the different selection algorithms.
+
+    Examples
+    --------
+    One dimensional array:
+
+    >>> x = np.array([3, 4, 2, 1])
+    >>> x[np.argpartition(x, 3)]
+    array([2, 1, 3, 4])
+    >>> x[np.argpartition(x, (1, 3))]
+    array([1, 2, 3, 4])
+
+    >>> x = [3, 4, 2, 1]
+    >>> np.array(x)[np.argpartition(x, 3)]
+    array([2, 1, 3, 4])
+
+    Multi-dimensional array:
+
+    >>> x = np.array([[3, 4, 2], [1, 3, 1]])
+    >>> index_array = np.argpartition(x, kth=1, axis=-1)
+    >>> np.take_along_axis(x, index_array, axis=-1)  # same as np.partition(x, kth=1)
+    array([[2, 3, 4],
+           [1, 1, 3]])
+
+    """
+    return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
+
+
+def _sort_dispatcher(a, axis=None, kind=None, order=None):
+    return (a,)
+
+
+@array_function_dispatch(_sort_dispatcher)
+def sort(a, axis=-1, kind=None, order=None):
+    """
+    Return a sorted copy of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to be sorted.
+    axis : int or None, optional
+        Axis along which to sort. If None, the array is flattened before
+        sorting. The default is -1, which sorts along the last axis.
+    kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+        Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+        and 'mergesort' use timsort or radix sort under the covers and, in general,
+        the actual implementation will vary with data type. The 'mergesort' option
+        is retained for backwards compatibility.
+
+        .. versionchanged:: 1.15.0.
+           The 'stable' option was added.
+
+    order : str or list of str, optional
+        When `a` is an array with fields defined, this argument specifies
+        which fields to compare first, second, etc.  A single field can
+        be specified as a string, and not all fields need be specified,
+        but unspecified fields will still be used, in the order in which
+        they come up in the dtype, to break ties.
+
+    Returns
+    -------
+    sorted_array : ndarray
+        Array of the same type and shape as `a`.
+
+    See Also
+    --------
+    ndarray.sort : Method to sort an array in-place.
+    argsort : Indirect sort.
+    lexsort : Indirect stable sort on multiple keys.
+    searchsorted : Find elements in a sorted array.
+    partition : Partial sort.
+
+    Notes
+    -----
+    The various sorting algorithms are characterized by their average speed,
+    worst case performance, work space size, and whether they are stable. A
+    stable sort keeps items with the same key in the same relative
+    order. The four algorithms implemented in NumPy have the following
+    properties:
+
+    =========== ======= ============= ============ ========
+       kind      speed   worst case    work space   stable
+    =========== ======= ============= ============ ========
+    'quicksort'    1     O(n^2)            0          no
+    'heapsort'     3     O(n*log(n))       0          no
+    'mergesort'    2     O(n*log(n))      ~n/2        yes
+    'timsort'      2     O(n*log(n))      ~n/2        yes
+    =========== ======= ============= ============ ========
+
+    .. note:: The datatype determines which of 'mergesort' or 'timsort'
+       is actually used, even if 'mergesort' is specified. User selection
+       at a finer scale is not currently available.
+
+    All the sort algorithms make temporary copies of the data when
+    sorting along any but the last axis.  Consequently, sorting along
+    the last axis is faster and uses less space than sorting along
+    any other axis.
+
+    The sort order for complex numbers is lexicographic. If both the real
+    and imaginary parts are non-nan then the order is determined by the
+    real parts except when they are equal, in which case the order is
+    determined by the imaginary parts.
+
+    Previous to numpy 1.4.0 sorting real and complex arrays containing nan
+    values led to undefined behaviour. In numpy versions >= 1.4.0 nan
+    values are sorted to the end. The extended sort order is:
+
+      * Real: [R, nan]
+      * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
+
+    where R is a non-nan real value. Complex values with the same nan
+    placements are sorted according to the non-nan part if it exists.
+    Non-nan values are sorted as before.
+
+    .. versionadded:: 1.12.0
+
+    quicksort has been changed to `introsort `_.
+    When sorting does not make enough progress it switches to
+    `heapsort `_.
+    This implementation makes quicksort O(n*log(n)) in the worst case.
+
+    'stable' automatically chooses the best stable sorting algorithm
+    for the data type being sorted.
+    It, along with 'mergesort' is currently mapped to
+    `timsort `_
+    or `radix sort `_
+    depending on the data type.
+    API forward compatibility currently limits the
+    ability to select the implementation and it is hardwired for the different
+    data types.
+
+    .. versionadded:: 1.17.0
+
+    Timsort is added for better performance on already or nearly
+    sorted data. On random data timsort is almost identical to
+    mergesort. It is now used for stable sort while quicksort is still the
+    default sort if none is chosen. For timsort details, refer to
+    `CPython listsort.txt `_.
+    'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
+    O(n) sort instead of O(n log n).
+
+    .. versionchanged:: 1.18.0
+
+    NaT now sorts to the end of arrays for consistency with NaN.
+
+    Examples
+    --------
+    >>> a = np.array([[1,4],[3,1]])
+    >>> np.sort(a)                # sort along the last axis
+    array([[1, 4],
+           [1, 3]])
+    >>> np.sort(a, axis=None)     # sort the flattened array
+    array([1, 1, 3, 4])
+    >>> np.sort(a, axis=0)        # sort along the first axis
+    array([[1, 1],
+           [3, 4]])
+
+    Use the `order` keyword to specify a field to use when sorting a
+    structured array:
+
+    >>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
+    >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
+    ...           ('Galahad', 1.7, 38)]
+    >>> a = np.array(values, dtype=dtype)       # create a structured array
+    >>> np.sort(a, order='height')                        # doctest: +SKIP
+    array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+           ('Lancelot', 1.8999999999999999, 38)],
+          dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height'])               # doctest: +SKIP
+    array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
+           ('Arthur', 1.8, 41)],
+          dtype=[('name', '|S10'), ('height', '>> x = np.array([3, 1, 2])
+    >>> np.argsort(x)
+    array([1, 2, 0])
+
+    Two-dimensional array:
+
+    >>> x = np.array([[0, 3], [2, 2]])
+    >>> x
+    array([[0, 3],
+           [2, 2]])
+
+    >>> ind = np.argsort(x, axis=0)  # sorts along first axis (down)
+    >>> ind
+    array([[0, 1],
+           [1, 0]])
+    >>> np.take_along_axis(x, ind, axis=0)  # same as np.sort(x, axis=0)
+    array([[0, 2],
+           [2, 3]])
+
+    >>> ind = np.argsort(x, axis=1)  # sorts along last axis (across)
+    >>> ind
+    array([[0, 1],
+           [0, 1]])
+    >>> np.take_along_axis(x, ind, axis=1)  # same as np.sort(x, axis=1)
+    array([[0, 3],
+           [2, 2]])
+
+    Indices of the sorted elements of a N-dimensional array:
+
+    >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
+    >>> ind
+    (array([0, 1, 1, 0]), array([0, 0, 1, 1]))
+    >>> x[ind]  # same as np.sort(x, axis=None)
+    array([0, 2, 2, 3])
+
+    Sorting with keys:
+
+    >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x
+    array([(1, 0), (0, 1)],
+          dtype=[('x', '>> np.argsort(x, order=('x','y'))
+    array([1, 0])
+
+    >>> np.argsort(x, order=('y','x'))
+    array([0, 1])
+
+    """
+    return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
+
+
+def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
+    return (a, out)
+
+
+@array_function_dispatch(_argmax_dispatcher)
+def argmax(a, axis=None, out=None, *, keepdims=np._NoValue):
+    """
+    Returns the indices of the maximum values along an axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int, optional
+        By default, the index is into the flattened array, otherwise
+        along the specified axis.
+    out : array, optional
+        If provided, the result will be inserted into this array. It should
+        be of the appropriate shape and dtype.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the array.
+
+        .. versionadded:: 1.22.0
+
+    Returns
+    -------
+    index_array : ndarray of ints
+        Array of indices into the array. It has the same shape as `a.shape`
+        with the dimension along `axis` removed. If `keepdims` is set to True,
+        then the size of `axis` will be 1 with the resulting array having same
+        shape as `a.shape`.
+
+    See Also
+    --------
+    ndarray.argmax, argmin
+    amax : The maximum value along a given axis.
+    unravel_index : Convert a flat index into an index tuple.
+    take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+                      from argmax to an array as if by calling max.
+
+    Notes
+    -----
+    In case of multiple occurrences of the maximum values, the indices
+    corresponding to the first occurrence are returned.
+
+    Examples
+    --------
+    >>> a = np.arange(6).reshape(2,3) + 10
+    >>> a
+    array([[10, 11, 12],
+           [13, 14, 15]])
+    >>> np.argmax(a)
+    5
+    >>> np.argmax(a, axis=0)
+    array([1, 1, 1])
+    >>> np.argmax(a, axis=1)
+    array([2, 2])
+
+    Indexes of the maximal elements of a N-dimensional array:
+
+    >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
+    >>> ind
+    (1, 2)
+    >>> a[ind]
+    15
+
+    >>> b = np.arange(6)
+    >>> b[1] = 5
+    >>> b
+    array([0, 5, 2, 3, 4, 5])
+    >>> np.argmax(b)  # Only the first occurrence is returned.
+    1
+
+    >>> x = np.array([[4,2,3], [1,0,3]])
+    >>> index_array = np.argmax(x, axis=-1)
+    >>> # Same as np.amax(x, axis=-1, keepdims=True)
+    >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+    array([[4],
+           [3]])
+    >>> # Same as np.amax(x, axis=-1)
+    >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+    array([4, 3])
+
+    Setting `keepdims` to `True`,
+
+    >>> x = np.arange(24).reshape((2, 3, 4))
+    >>> res = np.argmax(x, axis=1, keepdims=True)
+    >>> res.shape
+    (2, 1, 4)
+    """
+    kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
+    return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds)
+
+
+def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
+    return (a, out)
+
+
+@array_function_dispatch(_argmin_dispatcher)
+def argmin(a, axis=None, out=None, *, keepdims=np._NoValue):
+    """
+    Returns the indices of the minimum values along an axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int, optional
+        By default, the index is into the flattened array, otherwise
+        along the specified axis.
+    out : array, optional
+        If provided, the result will be inserted into this array. It should
+        be of the appropriate shape and dtype.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the array.
+
+        .. versionadded:: 1.22.0
+
+    Returns
+    -------
+    index_array : ndarray of ints
+        Array of indices into the array. It has the same shape as `a.shape`
+        with the dimension along `axis` removed. If `keepdims` is set to True,
+        then the size of `axis` will be 1 with the resulting array having same
+        shape as `a.shape`.
+
+    See Also
+    --------
+    ndarray.argmin, argmax
+    amin : The minimum value along a given axis.
+    unravel_index : Convert a flat index into an index tuple.
+    take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+                      from argmin to an array as if by calling min.
+
+    Notes
+    -----
+    In case of multiple occurrences of the minimum values, the indices
+    corresponding to the first occurrence are returned.
+
+    Examples
+    --------
+    >>> a = np.arange(6).reshape(2,3) + 10
+    >>> a
+    array([[10, 11, 12],
+           [13, 14, 15]])
+    >>> np.argmin(a)
+    0
+    >>> np.argmin(a, axis=0)
+    array([0, 0, 0])
+    >>> np.argmin(a, axis=1)
+    array([0, 0])
+
+    Indices of the minimum elements of a N-dimensional array:
+
+    >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
+    >>> ind
+    (0, 0)
+    >>> a[ind]
+    10
+
+    >>> b = np.arange(6) + 10
+    >>> b[4] = 10
+    >>> b
+    array([10, 11, 12, 13, 10, 15])
+    >>> np.argmin(b)  # Only the first occurrence is returned.
+    0
+
+    >>> x = np.array([[4,2,3], [1,0,3]])
+    >>> index_array = np.argmin(x, axis=-1)
+    >>> # Same as np.amin(x, axis=-1, keepdims=True)
+    >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+    array([[2],
+           [0]])
+    >>> # Same as np.amax(x, axis=-1)
+    >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+    array([2, 0])
+
+    Setting `keepdims` to `True`,
+
+    >>> x = np.arange(24).reshape((2, 3, 4))
+    >>> res = np.argmin(x, axis=1, keepdims=True)
+    >>> res.shape
+    (2, 1, 4)
+    """
+    kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
+    return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds)
+
+
+def _searchsorted_dispatcher(a, v, side=None, sorter=None):
+    return (a, v, sorter)
+
+
+@array_function_dispatch(_searchsorted_dispatcher)
+def searchsorted(a, v, side='left', sorter=None):
+    """
+    Find indices where elements should be inserted to maintain order.
+
+    Find the indices into a sorted array `a` such that, if the
+    corresponding elements in `v` were inserted before the indices, the
+    order of `a` would be preserved.
+
+    Assuming that `a` is sorted:
+
+    ======  ============================
+    `side`  returned index `i` satisfies
+    ======  ============================
+    left    ``a[i-1] < v <= a[i]``
+    right   ``a[i-1] <= v < a[i]``
+    ======  ============================
+
+    Parameters
+    ----------
+    a : 1-D array_like
+        Input array. If `sorter` is None, then it must be sorted in
+        ascending order, otherwise `sorter` must be an array of indices
+        that sort it.
+    v : array_like
+        Values to insert into `a`.
+    side : {'left', 'right'}, optional
+        If 'left', the index of the first suitable location found is given.
+        If 'right', return the last such index.  If there is no suitable
+        index, return either 0 or N (where N is the length of `a`).
+    sorter : 1-D array_like, optional
+        Optional array of integer indices that sort array a into ascending
+        order. They are typically the result of argsort.
+
+        .. versionadded:: 1.7.0
+
+    Returns
+    -------
+    indices : int or array of ints
+        Array of insertion points with the same shape as `v`,
+        or an integer if `v` is a scalar.
+
+    See Also
+    --------
+    sort : Return a sorted copy of an array.
+    histogram : Produce histogram from 1-D data.
+
+    Notes
+    -----
+    Binary search is used to find the required insertion points.
+
+    As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
+    `nan` values. The enhanced sort order is documented in `sort`.
+
+    This function uses the same algorithm as the builtin python `bisect.bisect_left`
+    (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
+    which is also vectorized in the `v` argument.
+
+    Examples
+    --------
+    >>> np.searchsorted([1,2,3,4,5], 3)
+    2
+    >>> np.searchsorted([1,2,3,4,5], 3, side='right')
+    3
+    >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
+    array([0, 5, 1, 2])
+
+    """
+    return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
+
+
+def _resize_dispatcher(a, new_shape):
+    return (a,)
+
+
+@array_function_dispatch(_resize_dispatcher)
+def resize(a, new_shape):
+    """
+    Return a new array with the specified shape.
+
+    If the new array is larger than the original array, then the new
+    array is filled with repeated copies of `a`.  Note that this behavior
+    is different from a.resize(new_shape) which fills with zeros instead
+    of repeated copies of `a`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to be resized.
+
+    new_shape : int or tuple of int
+        Shape of resized array.
+
+    Returns
+    -------
+    reshaped_array : ndarray
+        The new array is formed from the data in the old array, repeated
+        if necessary to fill out the required number of elements.  The
+        data are repeated iterating over the array in C-order.
+
+    See Also
+    --------
+    numpy.reshape : Reshape an array without changing the total size.
+    numpy.pad : Enlarge and pad an array.
+    numpy.repeat : Repeat elements of an array.
+    ndarray.resize : resize an array in-place.
+
+    Notes
+    -----
+    When the total size of the array does not change `~numpy.reshape` should
+    be used.  In most other cases either indexing (to reduce the size)
+    or padding (to increase the size) may be a more appropriate solution.
+
+    Warning: This functionality does **not** consider axes separately,
+    i.e. it does not apply interpolation/extrapolation.
+    It fills the return array with the required number of elements, iterating
+    over `a` in C-order, disregarding axes (and cycling back from the start if
+    the new shape is larger).  This functionality is therefore not suitable to
+    resize images, or data where each axis represents a separate and distinct
+    entity.
+
+    Examples
+    --------
+    >>> a=np.array([[0,1],[2,3]])
+    >>> np.resize(a,(2,3))
+    array([[0, 1, 2],
+           [3, 0, 1]])
+    >>> np.resize(a,(1,4))
+    array([[0, 1, 2, 3]])
+    >>> np.resize(a,(2,4))
+    array([[0, 1, 2, 3],
+           [0, 1, 2, 3]])
+
+    """
+    if isinstance(new_shape, (int, nt.integer)):
+        new_shape = (new_shape,)
+
+    a = ravel(a)
+
+    new_size = 1
+    for dim_length in new_shape:
+        new_size *= dim_length
+        if dim_length < 0:
+            raise ValueError('all elements of `new_shape` must be non-negative')
+
+    if a.size == 0 or new_size == 0:
+        # First case must zero fill. The second would have repeats == 0.
+        return np.zeros_like(a, shape=new_shape)
+
+    repeats = -(-new_size // a.size)  # ceil division
+    a = concatenate((a,) * repeats)[:new_size]
+
+    return reshape(a, new_shape)
+
+
+def _squeeze_dispatcher(a, axis=None):
+    return (a,)
+
+
+@array_function_dispatch(_squeeze_dispatcher)
+def squeeze(a, axis=None):
+    """
+    Remove axes of length one from `a`.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : None or int or tuple of ints, optional
+        .. versionadded:: 1.7.0
+
+        Selects a subset of the entries of length one in the
+        shape. If an axis is selected with shape entry greater than
+        one, an error is raised.
+
+    Returns
+    -------
+    squeezed : ndarray
+        The input array, but with all or a subset of the
+        dimensions of length 1 removed. This is always `a` itself
+        or a view into `a`. Note that if all axes are squeezed,
+        the result is a 0d array and not a scalar.
+
+    Raises
+    ------
+    ValueError
+        If `axis` is not None, and an axis being squeezed is not of length 1
+
+    See Also
+    --------
+    expand_dims : The inverse operation, adding entries of length one
+    reshape : Insert, remove, and combine dimensions, and resize existing ones
+
+    Examples
+    --------
+    >>> x = np.array([[[0], [1], [2]]])
+    >>> x.shape
+    (1, 3, 1)
+    >>> np.squeeze(x).shape
+    (3,)
+    >>> np.squeeze(x, axis=0).shape
+    (3, 1)
+    >>> np.squeeze(x, axis=1).shape
+    Traceback (most recent call last):
+    ...
+    ValueError: cannot select an axis to squeeze out which has size not equal to one
+    >>> np.squeeze(x, axis=2).shape
+    (1, 3)
+    >>> x = np.array([[1234]])
+    >>> x.shape
+    (1, 1)
+    >>> np.squeeze(x)
+    array(1234)  # 0d array
+    >>> np.squeeze(x).shape
+    ()
+    >>> np.squeeze(x)[()]
+    1234
+
+    """
+    try:
+        squeeze = a.squeeze
+    except AttributeError:
+        return _wrapit(a, 'squeeze', axis=axis)
+    if axis is None:
+        return squeeze()
+    else:
+        return squeeze(axis=axis)
+
+
+def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
+    return (a,)
+
+
+@array_function_dispatch(_diagonal_dispatcher)
+def diagonal(a, offset=0, axis1=0, axis2=1):
+    """
+    Return specified diagonals.
+
+    If `a` is 2-D, returns the diagonal of `a` with the given offset,
+    i.e., the collection of elements of the form ``a[i, i+offset]``.  If
+    `a` has more than two dimensions, then the axes specified by `axis1`
+    and `axis2` are used to determine the 2-D sub-array whose diagonal is
+    returned.  The shape of the resulting array can be determined by
+    removing `axis1` and `axis2` and appending an index to the right equal
+    to the size of the resulting diagonals.
+
+    In versions of NumPy prior to 1.7, this function always returned a new,
+    independent array containing a copy of the values in the diagonal.
+
+    In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
+    but depending on this fact is deprecated. Writing to the resulting
+    array continues to work as it used to, but a FutureWarning is issued.
+
+    Starting in NumPy 1.9 it returns a read-only view on the original array.
+    Attempting to write to the resulting array will produce an error.
+
+    In some future release, it will return a read/write view and writing to
+    the returned array will alter your original array.  The returned array
+    will have the same type as the input array.
+
+    If you don't write to the array returned by this function, then you can
+    just ignore all of the above.
+
+    If you depend on the current behavior, then we suggest copying the
+    returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
+    of just ``np.diagonal(a)``. This will work with both past and future
+    versions of NumPy.
+
+    Parameters
+    ----------
+    a : array_like
+        Array from which the diagonals are taken.
+    offset : int, optional
+        Offset of the diagonal from the main diagonal.  Can be positive or
+        negative.  Defaults to main diagonal (0).
+    axis1 : int, optional
+        Axis to be used as the first axis of the 2-D sub-arrays from which
+        the diagonals should be taken.  Defaults to first axis (0).
+    axis2 : int, optional
+        Axis to be used as the second axis of the 2-D sub-arrays from
+        which the diagonals should be taken. Defaults to second axis (1).
+
+    Returns
+    -------
+    array_of_diagonals : ndarray
+        If `a` is 2-D, then a 1-D array containing the diagonal and of the
+        same type as `a` is returned unless `a` is a `matrix`, in which case
+        a 1-D array rather than a (2-D) `matrix` is returned in order to
+        maintain backward compatibility.
+
+        If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
+        are removed, and a new axis inserted at the end corresponding to the
+        diagonal.
+
+    Raises
+    ------
+    ValueError
+        If the dimension of `a` is less than 2.
+
+    See Also
+    --------
+    diag : MATLAB work-a-like for 1-D and 2-D arrays.
+    diagflat : Create diagonal arrays.
+    trace : Sum along diagonals.
+
+    Examples
+    --------
+    >>> a = np.arange(4).reshape(2,2)
+    >>> a
+    array([[0, 1],
+           [2, 3]])
+    >>> a.diagonal()
+    array([0, 3])
+    >>> a.diagonal(1)
+    array([1])
+
+    A 3-D example:
+
+    >>> a = np.arange(8).reshape(2,2,2); a
+    array([[[0, 1],
+            [2, 3]],
+           [[4, 5],
+            [6, 7]]])
+    >>> a.diagonal(0,  # Main diagonals of two arrays created by skipping
+    ...            0,  # across the outer(left)-most axis last and
+    ...            1)  # the "middle" (row) axis first.
+    array([[0, 6],
+           [1, 7]])
+
+    The sub-arrays whose main diagonals we just obtained; note that each
+    corresponds to fixing the right-most (column) axis, and that the
+    diagonals are "packed" in rows.
+
+    >>> a[:,:,0]  # main diagonal is [0 6]
+    array([[0, 2],
+           [4, 6]])
+    >>> a[:,:,1]  # main diagonal is [1 7]
+    array([[1, 3],
+           [5, 7]])
+
+    The anti-diagonal can be obtained by reversing the order of elements
+    using either `numpy.flipud` or `numpy.fliplr`.
+
+    >>> a = np.arange(9).reshape(3, 3)
+    >>> a
+    array([[0, 1, 2],
+           [3, 4, 5],
+           [6, 7, 8]])
+    >>> np.fliplr(a).diagonal()  # Horizontal flip
+    array([2, 4, 6])
+    >>> np.flipud(a).diagonal()  # Vertical flip
+    array([6, 4, 2])
+
+    Note that the order in which the diagonal is retrieved varies depending
+    on the flip function.
+    """
+    if isinstance(a, np.matrix):
+        # Make diagonal of matrix 1-D to preserve backward compatibility.
+        return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+    else:
+        return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+
+
+def _trace_dispatcher(
+        a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
+    return (a, out)
+
+
+@array_function_dispatch(_trace_dispatcher)
+def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+    """
+    Return the sum along diagonals of the array.
+
+    If `a` is 2-D, the sum along its diagonal with the given offset
+    is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
+
+    If `a` has more than two dimensions, then the axes specified by axis1 and
+    axis2 are used to determine the 2-D sub-arrays whose traces are returned.
+    The shape of the resulting array is the same as that of `a` with `axis1`
+    and `axis2` removed.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array, from which the diagonals are taken.
+    offset : int, optional
+        Offset of the diagonal from the main diagonal. Can be both positive
+        and negative. Defaults to 0.
+    axis1, axis2 : int, optional
+        Axes to be used as the first and second axis of the 2-D sub-arrays
+        from which the diagonals should be taken. Defaults are the first two
+        axes of `a`.
+    dtype : dtype, optional
+        Determines the data-type of the returned array and of the accumulator
+        where the elements are summed. If dtype has the value None and `a` is
+        of integer type of precision less than the default integer
+        precision, then the default integer precision is used. Otherwise,
+        the precision is the same as that of `a`.
+    out : ndarray, optional
+        Array into which the output is placed. Its type is preserved and
+        it must be of the right shape to hold the output.
+
+    Returns
+    -------
+    sum_along_diagonals : ndarray
+        If `a` is 2-D, the sum along the diagonal is returned.  If `a` has
+        larger dimensions, then an array of sums along diagonals is returned.
+
+    See Also
+    --------
+    diag, diagonal, diagflat
+
+    Examples
+    --------
+    >>> np.trace(np.eye(3))
+    3.0
+    >>> a = np.arange(8).reshape((2,2,2))
+    >>> np.trace(a)
+    array([6, 8])
+
+    >>> a = np.arange(24).reshape((2,2,2,3))
+    >>> np.trace(a).shape
+    (2, 3)
+
+    """
+    if isinstance(a, np.matrix):
+        # Get trace of matrix via an array to preserve backward compatibility.
+        return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+    else:
+        return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+
+
+def _ravel_dispatcher(a, order=None):
+    return (a,)
+
+
+@array_function_dispatch(_ravel_dispatcher)
+def ravel(a, order='C'):
+    """Return a contiguous flattened array.
+
+    A 1-D array, containing the elements of the input, is returned.  A copy is
+    made only if needed.
+
+    As of NumPy 1.10, the returned array will have the same type as the input
+    array. (for example, a masked array will be returned for a masked array
+    input)
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.  The elements in `a` are read in the order specified by
+        `order`, and packed as a 1-D array.
+    order : {'C','F', 'A', 'K'}, optional
+
+        The elements of `a` are read using this index order. 'C' means
+        to index the elements in row-major, C-style order,
+        with the last axis index changing fastest, back to the first
+        axis index changing slowest.  'F' means to index the elements
+        in column-major, Fortran-style order, with the
+        first index changing fastest, and the last index changing
+        slowest. Note that the 'C' and 'F' options take no account of
+        the memory layout of the underlying array, and only refer to
+        the order of axis indexing.  'A' means to read the elements in
+        Fortran-like index order if `a` is Fortran *contiguous* in
+        memory, C-like order otherwise.  'K' means to read the
+        elements in the order they occur in memory, except for
+        reversing the data when strides are negative.  By default, 'C'
+        index order is used.
+
+    Returns
+    -------
+    y : array_like
+        y is a contiguous 1-D array of the same subtype as `a`,
+        with shape ``(a.size,)``.
+        Note that matrices are special cased for backward compatibility,
+        if `a` is a matrix, then y is a 1-D ndarray.
+
+    See Also
+    --------
+    ndarray.flat : 1-D iterator over an array.
+    ndarray.flatten : 1-D array copy of the elements of an array
+                      in row-major order.
+    ndarray.reshape : Change the shape of an array without changing its data.
+
+    Notes
+    -----
+    In row-major, C-style order, in two dimensions, the row index
+    varies the slowest, and the column index the quickest.  This can
+    be generalized to multiple dimensions, where row-major order
+    implies that the index along the first axis varies slowest, and
+    the index along the last quickest.  The opposite holds for
+    column-major, Fortran-style index ordering.
+
+    When a view is desired in as many cases as possible, ``arr.reshape(-1)``
+    may be preferable. However, ``ravel`` supports ``K`` in the optional
+    ``order`` argument while ``reshape`` does not.
+
+    Examples
+    --------
+    It is equivalent to ``reshape(-1, order=order)``.
+
+    >>> x = np.array([[1, 2, 3], [4, 5, 6]])
+    >>> np.ravel(x)
+    array([1, 2, 3, 4, 5, 6])
+
+    >>> x.reshape(-1)
+    array([1, 2, 3, 4, 5, 6])
+
+    >>> np.ravel(x, order='F')
+    array([1, 4, 2, 5, 3, 6])
+
+    When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
+
+    >>> np.ravel(x.T)
+    array([1, 4, 2, 5, 3, 6])
+    >>> np.ravel(x.T, order='A')
+    array([1, 2, 3, 4, 5, 6])
+
+    When ``order`` is 'K', it will preserve orderings that are neither 'C'
+    nor 'F', but won't reverse axes:
+
+    >>> a = np.arange(3)[::-1]; a
+    array([2, 1, 0])
+    >>> a.ravel(order='C')
+    array([2, 1, 0])
+    >>> a.ravel(order='K')
+    array([2, 1, 0])
+
+    >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
+    array([[[ 0,  2,  4],
+            [ 1,  3,  5]],
+           [[ 6,  8, 10],
+            [ 7,  9, 11]]])
+    >>> a.ravel(order='C')
+    array([ 0,  2,  4,  1,  3,  5,  6,  8, 10,  7,  9, 11])
+    >>> a.ravel(order='K')
+    array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
+
+    """
+    if isinstance(a, np.matrix):
+        return asarray(a).ravel(order=order)
+    else:
+        return asanyarray(a).ravel(order=order)
+
+
+def _nonzero_dispatcher(a):
+    return (a,)
+
+
+@array_function_dispatch(_nonzero_dispatcher)
+def nonzero(a):
+    """
+    Return the indices of the elements that are non-zero.
+
+    Returns a tuple of arrays, one for each dimension of `a`,
+    containing the indices of the non-zero elements in that
+    dimension. The values in `a` are always tested and returned in
+    row-major, C-style order.
+
+    To group the indices by element, rather than dimension, use `argwhere`,
+    which returns a row for each non-zero element.
+
+    .. note::
+
+       When called on a zero-d array or scalar, ``nonzero(a)`` is treated
+       as ``nonzero(atleast_1d(a))``.
+
+       .. deprecated:: 1.17.0
+
+          Use `atleast_1d` explicitly if this behavior is deliberate.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+
+    Returns
+    -------
+    tuple_of_arrays : tuple
+        Indices of elements that are non-zero.
+
+    See Also
+    --------
+    flatnonzero :
+        Return indices that are non-zero in the flattened version of the input
+        array.
+    ndarray.nonzero :
+        Equivalent ndarray method.
+    count_nonzero :
+        Counts the number of non-zero elements in the input array.
+
+    Notes
+    -----
+    While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
+    recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
+    will correctly handle 0-d arrays.
+
+    Examples
+    --------
+    >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
+    >>> x
+    array([[3, 0, 0],
+           [0, 4, 0],
+           [5, 6, 0]])
+    >>> np.nonzero(x)
+    (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
+
+    >>> x[np.nonzero(x)]
+    array([3, 4, 5, 6])
+    >>> np.transpose(np.nonzero(x))
+    array([[0, 0],
+           [1, 1],
+           [2, 0],
+           [2, 1]])
+
+    A common use for ``nonzero`` is to find the indices of an array, where
+    a condition is True.  Given an array `a`, the condition `a` > 3 is a
+    boolean array and since False is interpreted as 0, np.nonzero(a > 3)
+    yields the indices of the `a` where the condition is true.
+
+    >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+    >>> a > 3
+    array([[False, False, False],
+           [ True,  True,  True],
+           [ True,  True,  True]])
+    >>> np.nonzero(a > 3)
+    (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+    Using this result to index `a` is equivalent to using the mask directly:
+
+    >>> a[np.nonzero(a > 3)]
+    array([4, 5, 6, 7, 8, 9])
+    >>> a[a > 3]  # prefer this spelling
+    array([4, 5, 6, 7, 8, 9])
+
+    ``nonzero`` can also be called as a method of the array.
+
+    >>> (a > 3).nonzero()
+    (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+    """
+    return _wrapfunc(a, 'nonzero')
+
+
+def _shape_dispatcher(a):
+    return (a,)
+
+
+@array_function_dispatch(_shape_dispatcher)
+def shape(a):
+    """
+    Return the shape of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+
+    Returns
+    -------
+    shape : tuple of ints
+        The elements of the shape tuple give the lengths of the
+        corresponding array dimensions.
+
+    See Also
+    --------
+    len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
+          ``N>=1``.
+    ndarray.shape : Equivalent array method.
+
+    Examples
+    --------
+    >>> np.shape(np.eye(3))
+    (3, 3)
+    >>> np.shape([[1, 3]])
+    (1, 2)
+    >>> np.shape([0])
+    (1,)
+    >>> np.shape(0)
+    ()
+
+    >>> a = np.array([(1, 2), (3, 4), (5, 6)],
+    ...              dtype=[('x', 'i4'), ('y', 'i4')])
+    >>> np.shape(a)
+    (3,)
+    >>> a.shape
+    (3,)
+
+    """
+    try:
+        result = a.shape
+    except AttributeError:
+        result = asarray(a).shape
+    return result
+
+
+def _compress_dispatcher(condition, a, axis=None, out=None):
+    return (condition, a, out)
+
+
+@array_function_dispatch(_compress_dispatcher)
+def compress(condition, a, axis=None, out=None):
+    """
+    Return selected slices of an array along given axis.
+
+    When working along a given axis, a slice along that axis is returned in
+    `output` for each index where `condition` evaluates to True. When
+    working on a 1-D array, `compress` is equivalent to `extract`.
+
+    Parameters
+    ----------
+    condition : 1-D array of bools
+        Array that selects which entries to return. If len(condition)
+        is less than the size of `a` along the given axis, then output is
+        truncated to the length of the condition array.
+    a : array_like
+        Array from which to extract a part.
+    axis : int, optional
+        Axis along which to take slices. If None (default), work on the
+        flattened array.
+    out : ndarray, optional
+        Output array.  Its type is preserved and it must be of the right
+        shape to hold the output.
+
+    Returns
+    -------
+    compressed_array : ndarray
+        A copy of `a` without the slices along axis for which `condition`
+        is false.
+
+    See Also
+    --------
+    take, choose, diag, diagonal, select
+    ndarray.compress : Equivalent method in ndarray
+    extract : Equivalent method when working on 1-D arrays
+    :ref:`ufuncs-output-type`
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4], [5, 6]])
+    >>> a
+    array([[1, 2],
+           [3, 4],
+           [5, 6]])
+    >>> np.compress([0, 1], a, axis=0)
+    array([[3, 4]])
+    >>> np.compress([False, True, True], a, axis=0)
+    array([[3, 4],
+           [5, 6]])
+    >>> np.compress([False, True], a, axis=1)
+    array([[2],
+           [4],
+           [6]])
+
+    Working on the flattened array does not return slices along an axis but
+    selects elements.
+
+    >>> np.compress([False, True], a)
+    array([2])
+
+    """
+    return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
+
+
+def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
+    return (a, a_min, a_max)
+
+
+@array_function_dispatch(_clip_dispatcher)
+def clip(a, a_min, a_max, out=None, **kwargs):
+    """
+    Clip (limit) the values in an array.
+
+    Given an interval, values outside the interval are clipped to
+    the interval edges.  For example, if an interval of ``[0, 1]``
+    is specified, values smaller than 0 become 0, and values larger
+    than 1 become 1.
+
+    Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
+
+    No check is performed to ensure ``a_min < a_max``.
+
+    Parameters
+    ----------
+    a : array_like
+        Array containing elements to clip.
+    a_min, a_max : array_like or None
+        Minimum and maximum value. If ``None``, clipping is not performed on
+        the corresponding edge. Only one of `a_min` and `a_max` may be
+        ``None``. Both are broadcast against `a`.
+    out : ndarray, optional
+        The results will be placed in this array. It may be the input
+        array for in-place clipping.  `out` must be of the right shape
+        to hold the output.  Its type is preserved.
+    **kwargs
+        For other keyword-only arguments, see the
+        :ref:`ufunc docs `.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    clipped_array : ndarray
+        An array with the elements of `a`, but where values
+        < `a_min` are replaced with `a_min`, and those > `a_max`
+        with `a_max`.
+
+    See Also
+    --------
+    :ref:`ufuncs-output-type`
+
+    Notes
+    -----
+    When `a_min` is greater than `a_max`, `clip` returns an
+    array in which all values are equal to `a_max`,
+    as shown in the second example.
+
+    Examples
+    --------
+    >>> a = np.arange(10)
+    >>> a
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+    >>> np.clip(a, 1, 8)
+    array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
+    >>> np.clip(a, 8, 1)
+    array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
+    >>> np.clip(a, 3, 6, out=a)
+    array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+    >>> a
+    array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+    >>> a = np.arange(10)
+    >>> a
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+    >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
+    array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
+
+    """
+    return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
+
+
+def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+                    initial=None, where=None):
+    return (a, out)
+
+
+@array_function_dispatch(_sum_dispatcher)
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+        initial=np._NoValue, where=np._NoValue):
+    """
+    Sum of array elements over a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Elements to sum.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which a sum is performed.  The default,
+        axis=None, will sum all of the elements of the input array.  If
+        axis is negative it counts from the last to the first axis.
+
+        .. versionadded:: 1.7.0
+
+        If axis is a tuple of ints, a sum is performed on all of the axes
+        specified in the tuple instead of a single axis or all the axes as
+        before.
+    dtype : dtype, optional
+        The type of the returned array and of the accumulator in which the
+        elements are summed.  The dtype of `a` is used by default unless `a`
+        has an integer dtype of less precision than the default platform
+        integer.  In that case, if `a` is signed then the platform integer
+        is used while if `a` is unsigned then an unsigned integer of the
+        same precision as the platform integer is used.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must have
+        the same shape as the expected output, but the type of the output
+        values will be cast if necessary.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `sum` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+    initial : scalar, optional
+        Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.15.0
+
+    where : array_like of bool, optional
+        Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    sum_along_axis : ndarray
+        An array with the same shape as `a`, with the specified
+        axis removed.   If `a` is a 0-d array, or if `axis` is None, a scalar
+        is returned.  If an output array is specified, a reference to
+        `out` is returned.
+
+    See Also
+    --------
+    ndarray.sum : Equivalent method.
+
+    add.reduce : Equivalent functionality of `add`.
+
+    cumsum : Cumulative sum of array elements.
+
+    trapz : Integration of array values using the composite trapezoidal rule.
+
+    mean, average
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.
+
+    The sum of an empty array is the neutral element 0:
+
+    >>> np.sum([])
+    0.0
+
+    For floating point numbers the numerical precision of sum (and
+    ``np.add.reduce``) is in general limited by directly adding each number
+    individually to the result causing rounding errors in every step.
+    However, often numpy will use a  numerically better approach (partial
+    pairwise summation) leading to improved precision in many use-cases.
+    This improved precision is always provided when no ``axis`` is given.
+    When ``axis`` is given, it will depend on which axis is summed.
+    Technically, to provide the best speed possible, the improved precision
+    is only used when the summation is along the fast axis in memory.
+    Note that the exact precision may vary depending on other parameters.
+    In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
+    more precise approach to summation.
+    Especially when summing a large number of lower precision floating point
+    numbers, such as ``float32``, numerical errors can become significant.
+    In such cases it can be advisable to use `dtype="float64"` to use a higher
+    precision for the output.
+
+    Examples
+    --------
+    >>> np.sum([0.5, 1.5])
+    2.0
+    >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
+    1
+    >>> np.sum([[0, 1], [0, 5]])
+    6
+    >>> np.sum([[0, 1], [0, 5]], axis=0)
+    array([0, 6])
+    >>> np.sum([[0, 1], [0, 5]], axis=1)
+    array([1, 5])
+    >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
+    array([1., 5.])
+
+    If the accumulator is too small, overflow occurs:
+
+    >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
+    -128
+
+    You can also start the sum with a value other than zero:
+
+    >>> np.sum([10], initial=5)
+    15
+    """
+    if isinstance(a, _gentype):
+        # 2018-02-25, 1.15.0
+        warnings.warn(
+            "Calling np.sum(generator) is deprecated, and in the future will give a different result. "
+            "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
+            DeprecationWarning, stacklevel=2)
+
+        res = _sum_(a)
+        if out is not None:
+            out[...] = res
+            return out
+        return res
+
+    return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
+                          initial=initial, where=where)
+
+
+def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
+                    where=np._NoValue):
+    return (a, where, out)
+
+
+@array_function_dispatch(_any_dispatcher)
+def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
+    """
+    Test whether any array element along a given axis evaluates to True.
+
+    Returns single boolean if `axis` is ``None``
+
+    Parameters
+    ----------
+    a : array_like
+        Input array or object that can be converted to an array.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which a logical OR reduction is performed.
+        The default (``axis=None``) is to perform a logical OR over all
+        the dimensions of the input array. `axis` may be negative, in
+        which case it counts from the last to the first axis.
+
+        .. versionadded:: 1.7.0
+
+        If this is a tuple of ints, a reduction is performed on multiple
+        axes, instead of a single axis or all the axes as before.
+    out : ndarray, optional
+        Alternate output array in which to place the result.  It must have
+        the same shape as the expected output and its type is preserved
+        (e.g., if it is of type float, then it will remain so, returning
+        1.0 for True and 0.0 for False, regardless of the type of `a`).
+        See :ref:`ufuncs-output-type` for more details.
+
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `any` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    where : array_like of bool, optional
+        Elements to include in checking for any `True` values.
+        See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    any : bool or ndarray
+        A new boolean or `ndarray` is returned unless `out` is specified,
+        in which case a reference to `out` is returned.
+
+    See Also
+    --------
+    ndarray.any : equivalent method
+
+    all : Test whether all elements along a given axis evaluate to True.
+
+    Notes
+    -----
+    Not a Number (NaN), positive infinity and negative infinity evaluate
+    to `True` because these are not equal to zero.
+
+    Examples
+    --------
+    >>> np.any([[True, False], [True, True]])
+    True
+
+    >>> np.any([[True, False], [False, False]], axis=0)
+    array([ True, False])
+
+    >>> np.any([-1, 0, 5])
+    True
+
+    >>> np.any(np.nan)
+    True
+
+    >>> np.any([[True, False], [False, False]], where=[[False], [True]])
+    False
+
+    >>> o=np.array(False)
+    >>> z=np.any([-1, 4, 5], out=o)
+    >>> z, o
+    (array(True), array(True))
+    >>> # Check now that z is a reference to o
+    >>> z is o
+    True
+    >>> id(z), id(o) # identity of z and o              # doctest: +SKIP
+    (191614240, 191614240)
+
+    """
+    return _wrapreduction(a, np.logical_or, 'any', axis, None, out,
+                          keepdims=keepdims, where=where)
+
+
+def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
+                    where=None):
+    return (a, where, out)
+
+
+@array_function_dispatch(_all_dispatcher)
+def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
+    """
+    Test whether all array elements along a given axis evaluate to True.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array or object that can be converted to an array.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which a logical AND reduction is performed.
+        The default (``axis=None``) is to perform a logical AND over all
+        the dimensions of the input array. `axis` may be negative, in
+        which case it counts from the last to the first axis.
+
+        .. versionadded:: 1.7.0
+
+        If this is a tuple of ints, a reduction is performed on multiple
+        axes, instead of a single axis or all the axes as before.
+    out : ndarray, optional
+        Alternate output array in which to place the result.
+        It must have the same shape as the expected output and its
+        type is preserved (e.g., if ``dtype(out)`` is float, the result
+        will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
+        details.
+
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `all` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    where : array_like of bool, optional
+        Elements to include in checking for all `True` values.
+        See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    all : ndarray, bool
+        A new boolean or array is returned unless `out` is specified,
+        in which case a reference to `out` is returned.
+
+    See Also
+    --------
+    ndarray.all : equivalent method
+
+    any : Test whether any element along a given axis evaluates to True.
+
+    Notes
+    -----
+    Not a Number (NaN), positive infinity and negative infinity
+    evaluate to `True` because these are not equal to zero.
+
+    Examples
+    --------
+    >>> np.all([[True,False],[True,True]])
+    False
+
+    >>> np.all([[True,False],[True,True]], axis=0)
+    array([ True, False])
+
+    >>> np.all([-1, 4, 5])
+    True
+
+    >>> np.all([1.0, np.nan])
+    True
+
+    >>> np.all([[True, True], [False, True]], where=[[True], [False]])
+    True
+
+    >>> o=np.array(False)
+    >>> z=np.all([-1, 4, 5], out=o)
+    >>> id(z), id(o), z
+    (28293632, 28293632, array(True)) # may vary
+
+    """
+    return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
+                          keepdims=keepdims, where=where)
+
+
+def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
+    return (a, out)
+
+
+@array_function_dispatch(_cumsum_dispatcher)
+def cumsum(a, axis=None, dtype=None, out=None):
+    """
+    Return the cumulative sum of the elements along a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int, optional
+        Axis along which the cumulative sum is computed. The default
+        (None) is to compute the cumsum over the flattened array.
+    dtype : dtype, optional
+        Type of the returned array and of the accumulator in which the
+        elements are summed.  If `dtype` is not specified, it defaults
+        to the dtype of `a`, unless `a` has an integer dtype with a
+        precision less than that of the default platform integer.  In
+        that case, the default platform integer is used.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output
+        but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
+        more details.
+
+    Returns
+    -------
+    cumsum_along_axis : ndarray.
+        A new array holding the result is returned unless `out` is
+        specified, in which case a reference to `out` is returned. The
+        result has the same size as `a`, and the same shape as `a` if
+        `axis` is not None or `a` is a 1-d array.
+
+    See Also
+    --------
+    sum : Sum array elements.
+    trapz : Integration of array values using the composite trapezoidal rule.
+    diff : Calculate the n-th discrete difference along given axis.
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.
+
+    ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point
+    values since ``sum`` may use a pairwise summation routine, reducing
+    the roundoff-error. See `sum` for more information.
+
+    Examples
+    --------
+    >>> a = np.array([[1,2,3], [4,5,6]])
+    >>> a
+    array([[1, 2, 3],
+           [4, 5, 6]])
+    >>> np.cumsum(a)
+    array([ 1,  3,  6, 10, 15, 21])
+    >>> np.cumsum(a, dtype=float)     # specifies type of output value(s)
+    array([  1.,   3.,   6.,  10.,  15.,  21.])
+
+    >>> np.cumsum(a,axis=0)      # sum over rows for each of the 3 columns
+    array([[1, 2, 3],
+           [5, 7, 9]])
+    >>> np.cumsum(a,axis=1)      # sum over columns for each of the 2 rows
+    array([[ 1,  3,  6],
+           [ 4,  9, 15]])
+
+    ``cumsum(b)[-1]`` may not be equal to ``sum(b)``
+
+    >>> b = np.array([1, 2e-9, 3e-9] * 1000000)
+    >>> b.cumsum()[-1]
+    1000000.0050045159
+    >>> b.sum()
+    1000000.0050000029
+
+    """
+    return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
+
+
+def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
+    return (a, out)
+
+
+@array_function_dispatch(_ptp_dispatcher)
+def ptp(a, axis=None, out=None, keepdims=np._NoValue):
+    """
+    Range of values (maximum - minimum) along an axis.
+
+    The name of the function comes from the acronym for 'peak to peak'.
+
+    .. warning::
+        `ptp` preserves the data type of the array. This means the
+        return value for an input of signed integers with n bits
+        (e.g. `np.int8`, `np.int16`, etc) is also a signed integer
+        with n bits.  In that case, peak-to-peak values greater than
+        ``2**(n-1)-1`` will be returned as negative values. An example
+        with a work-around is shown below.
+
+    Parameters
+    ----------
+    a : array_like
+        Input values.
+    axis : None or int or tuple of ints, optional
+        Axis along which to find the peaks.  By default, flatten the
+        array.  `axis` may be negative, in
+        which case it counts from the last to the first axis.
+
+        .. versionadded:: 1.15.0
+
+        If this is a tuple of ints, a reduction is performed on multiple
+        axes, instead of a single axis or all the axes as before.
+    out : array_like
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output,
+        but the type of the output values will be cast if necessary.
+
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `ptp` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    Returns
+    -------
+    ptp : ndarray or scalar
+        The range of a given array - `scalar` if array is one-dimensional
+        or a new array holding the result along the given axis
+
+    Examples
+    --------
+    >>> x = np.array([[4, 9, 2, 10],
+    ...               [6, 9, 7, 12]])
+
+    >>> np.ptp(x, axis=1)
+    array([8, 6])
+
+    >>> np.ptp(x, axis=0)
+    array([2, 0, 5, 2])
+
+    >>> np.ptp(x)
+    10
+
+    This example shows that a negative value can be returned when
+    the input is an array of signed integers.
+
+    >>> y = np.array([[1, 127],
+    ...               [0, 127],
+    ...               [-1, 127],
+    ...               [-2, 127]], dtype=np.int8)
+    >>> np.ptp(y, axis=1)
+    array([ 126,  127, -128, -127], dtype=int8)
+
+    A work-around is to use the `view()` method to view the result as
+    unsigned integers with the same bit width:
+
+    >>> np.ptp(y, axis=1).view(np.uint8)
+    array([126, 127, 128, 129], dtype=uint8)
+
+    """
+    kwargs = {}
+    if keepdims is not np._NoValue:
+        kwargs['keepdims'] = keepdims
+    if type(a) is not mu.ndarray:
+        try:
+            ptp = a.ptp
+        except AttributeError:
+            pass
+        else:
+            return ptp(axis=axis, out=out, **kwargs)
+    return _methods._ptp(a, axis=axis, out=out, **kwargs)
+
+
+def _max_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+                    where=None):
+    return (a, out)
+
+
+@array_function_dispatch(_max_dispatcher)
+@set_module('numpy')
+def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+         where=np._NoValue):
+    """
+    Return the maximum of an array or maximum along an axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which to operate.  By default, flattened input is
+        used.
+
+        .. versionadded:: 1.7.0
+
+        If this is a tuple of ints, the maximum is selected over multiple axes,
+        instead of a single axis or all the axes as before.
+    out : ndarray, optional
+        Alternative output array in which to place the result.  Must
+        be of the same shape and buffer length as the expected output.
+        See :ref:`ufuncs-output-type` for more details.
+
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the ``max`` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    initial : scalar, optional
+        The minimum value of an output element. Must be present to allow
+        computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.15.0
+
+    where : array_like of bool, optional
+        Elements to compare for the maximum. See `~numpy.ufunc.reduce`
+        for details.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    max : ndarray or scalar
+        Maximum of `a`. If `axis` is None, the result is a scalar value.
+        If `axis` is an int, the result is an array of dimension
+        ``a.ndim - 1``. If `axis` is a tuple, the result is an array of 
+        dimension ``a.ndim - len(axis)``.
+
+    See Also
+    --------
+    amin :
+        The minimum value of an array along a given axis, propagating any NaNs.
+    nanmax :
+        The maximum value of an array along a given axis, ignoring any NaNs.
+    maximum :
+        Element-wise maximum of two arrays, propagating any NaNs.
+    fmax :
+        Element-wise maximum of two arrays, ignoring any NaNs.
+    argmax :
+        Return the indices of the maximum values.
+
+    nanmin, minimum, fmin
+
+    Notes
+    -----
+    NaN values are propagated, that is if at least one item is NaN, the
+    corresponding max value will be NaN as well. To ignore NaN values
+    (MATLAB behavior), please use nanmax.
+
+    Don't use `~numpy.max` for element-wise comparison of 2 arrays; when
+    ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
+    ``max(a, axis=0)``.
+
+    Examples
+    --------
+    >>> a = np.arange(4).reshape((2,2))
+    >>> a
+    array([[0, 1],
+           [2, 3]])
+    >>> np.max(a)           # Maximum of the flattened array
+    3
+    >>> np.max(a, axis=0)   # Maxima along the first axis
+    array([2, 3])
+    >>> np.max(a, axis=1)   # Maxima along the second axis
+    array([1, 3])
+    >>> np.max(a, where=[False, True], initial=-1, axis=0)
+    array([-1,  3])
+    >>> b = np.arange(5, dtype=float)
+    >>> b[2] = np.NaN
+    >>> np.max(b)
+    nan
+    >>> np.max(b, where=~np.isnan(b), initial=-1)
+    4.0
+    >>> np.nanmax(b)
+    4.0
+
+    You can use an initial value to compute the maximum of an empty slice, or
+    to initialize it to a different value:
+
+    >>> np.max([[-50], [10]], axis=-1, initial=0)
+    array([ 0, 10])
+
+    Notice that the initial value is used as one of the elements for which the
+    maximum is determined, unlike for the default argument Python's max
+    function, which is only used for empty iterables.
+
+    >>> np.max([5], initial=6)
+    6
+    >>> max([5], default=6)
+    5
+    """
+    return _wrapreduction(a, np.maximum, 'max', axis, None, out,
+                          keepdims=keepdims, initial=initial, where=where)
+
+
+@array_function_dispatch(_max_dispatcher)
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+         where=np._NoValue):
+    """
+    Return the maximum of an array or maximum along an axis.
+
+    `amax` is an alias of `~numpy.max`.
+
+    See Also
+    --------
+    max : alias of this function
+    ndarray.max : equivalent method
+    """
+    return _wrapreduction(a, np.maximum, 'max', axis, None, out,
+                          keepdims=keepdims, initial=initial, where=where)
+
+
+def _min_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+                    where=None):
+    return (a, out)
+
+
+@array_function_dispatch(_min_dispatcher)
+def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+        where=np._NoValue):
+    """
+    Return the minimum of an array or minimum along an axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which to operate.  By default, flattened input is
+        used.
+
+        .. versionadded:: 1.7.0
+
+        If this is a tuple of ints, the minimum is selected over multiple axes,
+        instead of a single axis or all the axes as before.
+    out : ndarray, optional
+        Alternative output array in which to place the result.  Must
+        be of the same shape and buffer length as the expected output.
+        See :ref:`ufuncs-output-type` for more details.
+
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the ``min`` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    initial : scalar, optional
+        The maximum value of an output element. Must be present to allow
+        computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.15.0
+
+    where : array_like of bool, optional
+        Elements to compare for the minimum. See `~numpy.ufunc.reduce`
+        for details.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    min : ndarray or scalar
+        Minimum of `a`. If `axis` is None, the result is a scalar value.
+        If `axis` is an int, the result is an array of dimension
+        ``a.ndim - 1``.  If `axis` is a tuple, the result is an array of 
+        dimension ``a.ndim - len(axis)``.
+
+    See Also
+    --------
+    amax :
+        The maximum value of an array along a given axis, propagating any NaNs.
+    nanmin :
+        The minimum value of an array along a given axis, ignoring any NaNs.
+    minimum :
+        Element-wise minimum of two arrays, propagating any NaNs.
+    fmin :
+        Element-wise minimum of two arrays, ignoring any NaNs.
+    argmin :
+        Return the indices of the minimum values.
+
+    nanmax, maximum, fmax
+
+    Notes
+    -----
+    NaN values are propagated, that is if at least one item is NaN, the
+    corresponding min value will be NaN as well. To ignore NaN values
+    (MATLAB behavior), please use nanmin.
+
+    Don't use `~numpy.min` for element-wise comparison of 2 arrays; when
+    ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
+    ``min(a, axis=0)``.
+
+    Examples
+    --------
+    >>> a = np.arange(4).reshape((2,2))
+    >>> a
+    array([[0, 1],
+           [2, 3]])
+    >>> np.min(a)           # Minimum of the flattened array
+    0
+    >>> np.min(a, axis=0)   # Minima along the first axis
+    array([0, 1])
+    >>> np.min(a, axis=1)   # Minima along the second axis
+    array([0, 2])
+    >>> np.min(a, where=[False, True], initial=10, axis=0)
+    array([10,  1])
+
+    >>> b = np.arange(5, dtype=float)
+    >>> b[2] = np.NaN
+    >>> np.min(b)
+    nan
+    >>> np.min(b, where=~np.isnan(b), initial=10)
+    0.0
+    >>> np.nanmin(b)
+    0.0
+
+    >>> np.min([[-50], [10]], axis=-1, initial=0)
+    array([-50,   0])
+
+    Notice that the initial value is used as one of the elements for which the
+    minimum is determined, unlike for the default argument Python's max
+    function, which is only used for empty iterables.
+
+    Notice that this isn't the same as Python's ``default`` argument.
+
+    >>> np.min([6], initial=5)
+    5
+    >>> min([6], default=5)
+    6
+    """
+    return _wrapreduction(a, np.minimum, 'min', axis, None, out,
+                          keepdims=keepdims, initial=initial, where=where)
+
+
+@array_function_dispatch(_min_dispatcher)
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+         where=np._NoValue):
+    """
+    Return the minimum of an array or minimum along an axis.
+
+    `amin` is an alias of `~numpy.min`.
+
+    See Also
+    --------
+    min : alias of this function
+    ndarray.min : equivalent method
+    """
+    return _wrapreduction(a, np.minimum, 'min', axis, None, out,
+                          keepdims=keepdims, initial=initial, where=where)
+
+
+def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+                     initial=None, where=None):
+    return (a, out)
+
+
+@array_function_dispatch(_prod_dispatcher)
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+         initial=np._NoValue, where=np._NoValue):
+    """
+    Return the product of array elements over a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which a product is performed.  The default,
+        axis=None, will calculate the product of all the elements in the
+        input array. If axis is negative it counts from the last to the
+        first axis.
+
+        .. versionadded:: 1.7.0
+
+        If axis is a tuple of ints, a product is performed on all of the
+        axes specified in the tuple instead of a single axis or all the
+        axes as before.
+    dtype : dtype, optional
+        The type of the returned array, as well as of the accumulator in
+        which the elements are multiplied.  The dtype of `a` is used by
+        default unless `a` has an integer dtype of less precision than the
+        default platform integer.  In that case, if `a` is signed then the
+        platform integer is used while if `a` is unsigned then an unsigned
+        integer of the same precision as the platform integer is used.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must have
+        the same shape as the expected output, but the type of the output
+        values will be cast if necessary.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left in the
+        result as dimensions with size one. With this option, the result
+        will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `prod` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+    initial : scalar, optional
+        The starting value for this product. See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.15.0
+
+    where : array_like of bool, optional
+        Elements to include in the product. See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    product_along_axis : ndarray, see `dtype` parameter above.
+        An array shaped as `a` but with the specified axis removed.
+        Returns a reference to `out` if specified.
+
+    See Also
+    --------
+    ndarray.prod : equivalent method
+    :ref:`ufuncs-output-type`
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.  That means that, on a 32-bit platform:
+
+    >>> x = np.array([536870910, 536870910, 536870910, 536870910])
+    >>> np.prod(x)
+    16 # may vary
+
+    The product of an empty array is the neutral element 1:
+
+    >>> np.prod([])
+    1.0
+
+    Examples
+    --------
+    By default, calculate the product of all elements:
+
+    >>> np.prod([1.,2.])
+    2.0
+
+    Even when the input array is two-dimensional:
+
+    >>> a = np.array([[1., 2.], [3., 4.]])
+    >>> np.prod(a)
+    24.0
+
+    But we can also specify the axis over which to multiply:
+
+    >>> np.prod(a, axis=1)
+    array([  2.,  12.])
+    >>> np.prod(a, axis=0)
+    array([3., 8.])
+    
+    Or select specific elements to include:
+
+    >>> np.prod([1., np.nan, 3.], where=[True, False, True])
+    3.0
+
+    If the type of `x` is unsigned, then the output type is
+    the unsigned platform integer:
+
+    >>> x = np.array([1, 2, 3], dtype=np.uint8)
+    >>> np.prod(x).dtype == np.uint
+    True
+
+    If `x` is of a signed integer type, then the output type
+    is the default platform integer:
+
+    >>> x = np.array([1, 2, 3], dtype=np.int8)
+    >>> np.prod(x).dtype == int
+    True
+
+    You can also start the product with a value other than one:
+
+    >>> np.prod([1, 2], initial=5)
+    10
+    """
+    return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
+                          keepdims=keepdims, initial=initial, where=where)
+
+
+def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
+    return (a, out)
+
+
+@array_function_dispatch(_cumprod_dispatcher)
+def cumprod(a, axis=None, dtype=None, out=None):
+    """
+    Return the cumulative product of elements along a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int, optional
+        Axis along which the cumulative product is computed.  By default
+        the input is flattened.
+    dtype : dtype, optional
+        Type of the returned array, as well as of the accumulator in which
+        the elements are multiplied.  If *dtype* is not specified, it
+        defaults to the dtype of `a`, unless `a` has an integer dtype with
+        a precision less than that of the default platform integer.  In
+        that case, the default platform integer is used instead.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output
+        but the type of the resulting values will be cast if necessary.
+
+    Returns
+    -------
+    cumprod : ndarray
+        A new array holding the result is returned unless `out` is
+        specified, in which case a reference to out is returned.
+
+    See Also
+    --------
+    :ref:`ufuncs-output-type`
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.
+
+    Examples
+    --------
+    >>> a = np.array([1,2,3])
+    >>> np.cumprod(a) # intermediate results 1, 1*2
+    ...               # total product 1*2*3 = 6
+    array([1, 2, 6])
+    >>> a = np.array([[1, 2, 3], [4, 5, 6]])
+    >>> np.cumprod(a, dtype=float) # specify type of output
+    array([   1.,    2.,    6.,   24.,  120.,  720.])
+
+    The cumulative product for each column (i.e., over the rows) of `a`:
+
+    >>> np.cumprod(a, axis=0)
+    array([[ 1,  2,  3],
+           [ 4, 10, 18]])
+
+    The cumulative product for each row (i.e. over the columns) of `a`:
+
+    >>> np.cumprod(a,axis=1)
+    array([[  1,   2,   6],
+           [  4,  20, 120]])
+
+    """
+    return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
+
+
+def _ndim_dispatcher(a):
+    return (a,)
+
+
+@array_function_dispatch(_ndim_dispatcher)
+def ndim(a):
+    """
+    Return the number of dimensions of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.  If it is not already an ndarray, a conversion is
+        attempted.
+
+    Returns
+    -------
+    number_of_dimensions : int
+        The number of dimensions in `a`.  Scalars are zero-dimensional.
+
+    See Also
+    --------
+    ndarray.ndim : equivalent method
+    shape : dimensions of array
+    ndarray.shape : dimensions of array
+
+    Examples
+    --------
+    >>> np.ndim([[1,2,3],[4,5,6]])
+    2
+    >>> np.ndim(np.array([[1,2,3],[4,5,6]]))
+    2
+    >>> np.ndim(1)
+    0
+
+    """
+    try:
+        return a.ndim
+    except AttributeError:
+        return asarray(a).ndim
+
+
+def _size_dispatcher(a, axis=None):
+    return (a,)
+
+
+@array_function_dispatch(_size_dispatcher)
+def size(a, axis=None):
+    """
+    Return the number of elements along a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : int, optional
+        Axis along which the elements are counted.  By default, give
+        the total number of elements.
+
+    Returns
+    -------
+    element_count : int
+        Number of elements along the specified axis.
+
+    See Also
+    --------
+    shape : dimensions of array
+    ndarray.shape : dimensions of array
+    ndarray.size : number of elements in array
+
+    Examples
+    --------
+    >>> a = np.array([[1,2,3],[4,5,6]])
+    >>> np.size(a)
+    6
+    >>> np.size(a,1)
+    3
+    >>> np.size(a,0)
+    2
+
+    """
+    if axis is None:
+        try:
+            return a.size
+        except AttributeError:
+            return asarray(a).size
+    else:
+        try:
+            return a.shape[axis]
+        except AttributeError:
+            return asarray(a).shape[axis]
+
+
+def _round_dispatcher(a, decimals=None, out=None):
+    return (a, out)
+
+
+@array_function_dispatch(_round_dispatcher)
+def round(a, decimals=0, out=None):
+    """
+    Evenly round to the given number of decimals.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    decimals : int, optional
+        Number of decimal places to round to (default: 0).  If
+        decimals is negative, it specifies the number of positions to
+        the left of the decimal point.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must have
+        the same shape as the expected output, but the type of the output
+        values will be cast if necessary. See :ref:`ufuncs-output-type` for more
+        details.
+
+    Returns
+    -------
+    rounded_array : ndarray
+        An array of the same type as `a`, containing the rounded values.
+        Unless `out` was specified, a new array is created.  A reference to
+        the result is returned.
+
+        The real and imaginary parts of complex numbers are rounded
+        separately.  The result of rounding a float is a float.
+
+    See Also
+    --------
+    ndarray.round : equivalent method
+    around : an alias for this function
+    ceil, fix, floor, rint, trunc
+
+
+    Notes
+    -----
+    For values exactly halfway between rounded decimal values, NumPy
+    rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
+    -0.5 and 0.5 round to 0.0, etc.
+
+    ``np.round`` uses a fast but sometimes inexact algorithm to round
+    floating-point datatypes. For positive `decimals` it is equivalent to
+    ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
+    error due to the inexact representation of decimal fractions in the IEEE
+    floating point standard [1]_ and errors introduced when scaling by powers
+    of ten. For instance, note the extra "1" in the following:
+
+        >>> np.round(56294995342131.5, 3)
+        56294995342131.51
+
+    If your goal is to print such values with a fixed number of decimals, it is
+    preferable to use numpy's float printing routines to limit the number of
+    printed decimals:
+
+        >>> np.format_float_positional(56294995342131.5, precision=3)
+        '56294995342131.5'
+
+    The float printing routines use an accurate but much more computationally
+    demanding algorithm to compute the number of digits after the decimal
+    point.
+
+    Alternatively, Python's builtin `round` function uses a more accurate
+    but slower algorithm for 64-bit floating point values:
+
+        >>> round(56294995342131.5, 3)
+        56294995342131.5
+        >>> np.round(16.055, 2), round(16.055, 2)  # equals 16.0549999999999997
+        (16.06, 16.05)
+
+
+    References
+    ----------
+    .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
+           https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
+
+    Examples
+    --------
+    >>> np.round([0.37, 1.64])
+    array([0., 2.])
+    >>> np.round([0.37, 1.64], decimals=1)
+    array([0.4, 1.6])
+    >>> np.round([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
+    array([0., 2., 2., 4., 4.])
+    >>> np.round([1,2,3,11], decimals=1) # ndarray of ints is returned
+    array([ 1,  2,  3, 11])
+    >>> np.round([1,2,3,11], decimals=-1)
+    array([ 0,  0,  0, 10])
+
+    """
+    return _wrapfunc(a, 'round', decimals=decimals, out=out)
+
+
+@array_function_dispatch(_round_dispatcher)
+def around(a, decimals=0, out=None):
+    """
+    Round an array to the given number of decimals.
+
+    `around` is an alias of `~numpy.round`.
+
+    See Also
+    --------
+    ndarray.round : equivalent method
+    round : alias for this function
+    ceil, fix, floor, rint, trunc
+
+    """
+    return _wrapfunc(a, 'round', decimals=decimals, out=out)
+
+
+def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
+                     where=None):
+    return (a, where, out)
+
+
+@array_function_dispatch(_mean_dispatcher)
+def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
+         where=np._NoValue):
+    """
+    Compute the arithmetic mean along the specified axis.
+
+    Returns the average of the array elements.  The average is taken over
+    the flattened array by default, otherwise over the specified axis.
+    `float64` intermediate and return values are used for integer inputs.
+
+    Parameters
+    ----------
+    a : array_like
+        Array containing numbers whose mean is desired. If `a` is not an
+        array, a conversion is attempted.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which the means are computed. The default is to
+        compute the mean of the flattened array.
+
+        .. versionadded:: 1.7.0
+
+        If this is a tuple of ints, a mean is performed over multiple axes,
+        instead of a single axis or all the axes as before.
+    dtype : data-type, optional
+        Type to use in computing the mean.  For integer inputs, the default
+        is `float64`; for floating point inputs, it is the same as the
+        input dtype.
+    out : ndarray, optional
+        Alternate output array in which to place the result.  The default
+        is ``None``; if provided, it must have the same shape as the
+        expected output, but the type will be cast if necessary.
+        See :ref:`ufuncs-output-type` for more details.
+
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `mean` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    where : array_like of bool, optional
+        Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    m : ndarray, see dtype parameter above
+        If `out=None`, returns a new array containing the mean values,
+        otherwise a reference to the output array is returned.
+
+    See Also
+    --------
+    average : Weighted average
+    std, var, nanmean, nanstd, nanvar
+
+    Notes
+    -----
+    The arithmetic mean is the sum of the elements along the axis divided
+    by the number of elements.
+
+    Note that for floating-point input, the mean is computed using the
+    same precision the input has.  Depending on the input data, this can
+    cause the results to be inaccurate, especially for `float32` (see
+    example below).  Specifying a higher-precision accumulator using the
+    `dtype` keyword can alleviate this issue.
+
+    By default, `float16` results are computed using `float32` intermediates
+    for extra precision.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> np.mean(a)
+    2.5
+    >>> np.mean(a, axis=0)
+    array([2., 3.])
+    >>> np.mean(a, axis=1)
+    array([1.5, 3.5])
+
+    In single precision, `mean` can be inaccurate:
+
+    >>> a = np.zeros((2, 512*512), dtype=np.float32)
+    >>> a[0, :] = 1.0
+    >>> a[1, :] = 0.1
+    >>> np.mean(a)
+    0.54999924
+
+    Computing the mean in float64 is more accurate:
+
+    >>> np.mean(a, dtype=np.float64)
+    0.55000000074505806 # may vary
+
+    Specifying a where argument:
+
+    >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
+    >>> np.mean(a)
+    12.0
+    >>> np.mean(a, where=[[True], [False], [False]])
+    9.0
+
+    """
+    kwargs = {}
+    if keepdims is not np._NoValue:
+        kwargs['keepdims'] = keepdims
+    if where is not np._NoValue:
+        kwargs['where'] = where
+    if type(a) is not mu.ndarray:
+        try:
+            mean = a.mean
+        except AttributeError:
+            pass
+        else:
+            return mean(axis=axis, dtype=dtype, out=out, **kwargs)
+
+    return _methods._mean(a, axis=axis, dtype=dtype,
+                          out=out, **kwargs)
+
+
+def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+                    keepdims=None, *, where=None):
+    return (a, where, out)
+
+
+@array_function_dispatch(_std_dispatcher)
+def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
+        where=np._NoValue):
+    """
+    Compute the standard deviation along the specified axis.
+
+    Returns the standard deviation, a measure of the spread of a distribution,
+    of the array elements. The standard deviation is computed for the
+    flattened array by default, otherwise over the specified axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Calculate the standard deviation of these values.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which the standard deviation is computed. The
+        default is to compute the standard deviation of the flattened array.
+
+        .. versionadded:: 1.7.0
+
+        If this is a tuple of ints, a standard deviation is performed over
+        multiple axes, instead of a single axis or all the axes as before.
+    dtype : dtype, optional
+        Type to use in computing the standard deviation. For arrays of
+        integer type the default is float64, for arrays of float types it is
+        the same as the array type.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must have
+        the same shape as the expected output but the type (of the calculated
+        values) will be cast if necessary.
+    ddof : int, optional
+        Means Delta Degrees of Freedom.  The divisor used in calculations
+        is ``N - ddof``, where ``N`` represents the number of elements.
+        By default `ddof` is zero.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `std` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    where : array_like of bool, optional
+        Elements to include in the standard deviation.
+        See `~numpy.ufunc.reduce` for details.
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    standard_deviation : ndarray, see dtype parameter above.
+        If `out` is None, return a new array containing the standard deviation,
+        otherwise return a reference to the output array.
+
+    See Also
+    --------
+    var, mean, nanmean, nanstd, nanvar
+    :ref:`ufuncs-output-type`
+
+    Notes
+    -----
+    The standard deviation is the square root of the average of the squared
+    deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
+    ``x = abs(a - a.mean())**2``.
+
+    The average squared deviation is typically calculated as ``x.sum() / N``,
+    where ``N = len(x)``. If, however, `ddof` is specified, the divisor
+    ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
+    provides an unbiased estimator of the variance of the infinite population.
+    ``ddof=0`` provides a maximum likelihood estimate of the variance for
+    normally distributed variables. The standard deviation computed in this
+    function is the square root of the estimated variance, so even with
+    ``ddof=1``, it will not be an unbiased estimate of the standard deviation
+    per se.
+
+    Note that, for complex numbers, `std` takes the absolute
+    value before squaring, so that the result is always real and nonnegative.
+
+    For floating-point input, the *std* is computed using the same
+    precision the input has. Depending on the input data, this can cause
+    the results to be inaccurate, especially for float32 (see example below).
+    Specifying a higher-accuracy accumulator using the `dtype` keyword can
+    alleviate this issue.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> np.std(a)
+    1.1180339887498949 # may vary
+    >>> np.std(a, axis=0)
+    array([1.,  1.])
+    >>> np.std(a, axis=1)
+    array([0.5,  0.5])
+
+    In single precision, std() can be inaccurate:
+
+    >>> a = np.zeros((2, 512*512), dtype=np.float32)
+    >>> a[0, :] = 1.0
+    >>> a[1, :] = 0.1
+    >>> np.std(a)
+    0.45000005
+
+    Computing the standard deviation in float64 is more accurate:
+
+    >>> np.std(a, dtype=np.float64)
+    0.44999999925494177 # may vary
+
+    Specifying a where argument:
+
+    >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+    >>> np.std(a)
+    2.614064523559687 # may vary
+    >>> np.std(a, where=[[True], [True], [False]])
+    2.0
+
+    """
+    kwargs = {}
+    if keepdims is not np._NoValue:
+        kwargs['keepdims'] = keepdims
+    if where is not np._NoValue:
+        kwargs['where'] = where
+    if type(a) is not mu.ndarray:
+        try:
+            std = a.std
+        except AttributeError:
+            pass
+        else:
+            return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+    return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+                         **kwargs)
+
+
+def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+                    keepdims=None, *, where=None):
+    return (a, where, out)
+
+
+@array_function_dispatch(_var_dispatcher)
+def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
+        where=np._NoValue):
+    """
+    Compute the variance along the specified axis.
+
+    Returns the variance of the array elements, a measure of the spread of a
+    distribution.  The variance is computed for the flattened array by
+    default, otherwise over the specified axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Array containing numbers whose variance is desired.  If `a` is not an
+        array, a conversion is attempted.
+    axis : None or int or tuple of ints, optional
+        Axis or axes along which the variance is computed.  The default is to
+        compute the variance of the flattened array.
+
+        .. versionadded:: 1.7.0
+
+        If this is a tuple of ints, a variance is performed over multiple axes,
+        instead of a single axis or all the axes as before.
+    dtype : data-type, optional
+        Type to use in computing the variance.  For arrays of integer type
+        the default is `float64`; for arrays of float types it is the same as
+        the array type.
+    out : ndarray, optional
+        Alternate output array in which to place the result.  It must have
+        the same shape as the expected output, but the type is cast if
+        necessary.
+    ddof : int, optional
+        "Delta Degrees of Freedom": the divisor used in the calculation is
+        ``N - ddof``, where ``N`` represents the number of elements. By
+        default `ddof` is zero.
+    keepdims : bool, optional
+        If this is set to True, the axes which are reduced are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        If the default value is passed, then `keepdims` will not be
+        passed through to the `var` method of sub-classes of
+        `ndarray`, however any non-default value will be.  If the
+        sub-class' method does not implement `keepdims` any
+        exceptions will be raised.
+
+    where : array_like of bool, optional
+        Elements to include in the variance. See `~numpy.ufunc.reduce` for
+        details.
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    variance : ndarray, see dtype parameter above
+        If ``out=None``, returns a new array containing the variance;
+        otherwise, a reference to the output array is returned.
+
+    See Also
+    --------
+    std, mean, nanmean, nanstd, nanvar
+    :ref:`ufuncs-output-type`
+
+    Notes
+    -----
+    The variance is the average of the squared deviations from the mean,
+    i.e.,  ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
+
+    The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
+    If, however, `ddof` is specified, the divisor ``N - ddof`` is used
+    instead.  In standard statistical practice, ``ddof=1`` provides an
+    unbiased estimator of the variance of a hypothetical infinite population.
+    ``ddof=0`` provides a maximum likelihood estimate of the variance for
+    normally distributed variables.
+
+    Note that for complex numbers, the absolute value is taken before
+    squaring, so that the result is always real and nonnegative.
+
+    For floating-point input, the variance is computed using the same
+    precision the input has.  Depending on the input data, this can cause
+    the results to be inaccurate, especially for `float32` (see example
+    below).  Specifying a higher-accuracy accumulator using the ``dtype``
+    keyword can alleviate this issue.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> np.var(a)
+    1.25
+    >>> np.var(a, axis=0)
+    array([1.,  1.])
+    >>> np.var(a, axis=1)
+    array([0.25,  0.25])
+
+    In single precision, var() can be inaccurate:
+
+    >>> a = np.zeros((2, 512*512), dtype=np.float32)
+    >>> a[0, :] = 1.0
+    >>> a[1, :] = 0.1
+    >>> np.var(a)
+    0.20250003
+
+    Computing the variance in float64 is more accurate:
+
+    >>> np.var(a, dtype=np.float64)
+    0.20249999932944759 # may vary
+    >>> ((1-0.55)**2 + (0.1-0.55)**2)/2
+    0.2025
+
+    Specifying a where argument:
+
+    >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+    >>> np.var(a)
+    6.833333333333333 # may vary
+    >>> np.var(a, where=[[True], [True], [False]])
+    4.0
+
+    """
+    kwargs = {}
+    if keepdims is not np._NoValue:
+        kwargs['keepdims'] = keepdims
+    if where is not np._NoValue:
+        kwargs['where'] = where
+
+    if type(a) is not mu.ndarray:
+        try:
+            var = a.var
+
+        except AttributeError:
+            pass
+        else:
+            return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+    return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+                         **kwargs)
+
+
+# Aliases of other functions. Provided unique docstrings 
+# are for reference purposes only. Wherever possible,
+# avoid using them.
+
+
+def _round__dispatcher(a, decimals=None, out=None):
+    # 2023-02-28, 1.25.0
+    warnings.warn("`round_` is deprecated as of NumPy 1.25.0, and will be "
+                  "removed in NumPy 2.0. Please use `round` instead.",
+                  DeprecationWarning, stacklevel=3)
+    return (a, out)
+
+
+@array_function_dispatch(_round__dispatcher)
+def round_(a, decimals=0, out=None):
+    """
+    Round an array to the given number of decimals.
+
+    `~numpy.round_` is a disrecommended backwards-compatibility
+    alias of `~numpy.around` and `~numpy.round`.
+
+    .. deprecated:: 1.25.0
+        ``round_`` is deprecated as of NumPy 1.25.0, and will be
+        removed in NumPy 2.0. Please use `round` instead.
+
+    See Also
+    --------
+    around : equivalent function; see for details.
+    """
+    return around(a, decimals=decimals, out=out)
+
+
+def _product_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+                        initial=None, where=None):
+    # 2023-03-02, 1.25.0
+    warnings.warn("`product` is deprecated as of NumPy 1.25.0, and will be "
+                  "removed in NumPy 2.0. Please use `prod` instead.",
+                  DeprecationWarning, stacklevel=3)
+    return (a, out)
+
+
+@array_function_dispatch(_product_dispatcher, verify=False)
+def product(*args, **kwargs):
+    """
+    Return the product of array elements over a given axis.
+
+    .. deprecated:: 1.25.0
+        ``product`` is deprecated as of NumPy 1.25.0, and will be
+        removed in NumPy 2.0. Please use `prod` instead.
+
+    See Also
+    --------
+    prod : equivalent function; see for details.
+    """
+    return prod(*args, **kwargs)
+
+
+def _cumproduct_dispatcher(a, axis=None, dtype=None, out=None):
+    # 2023-03-02, 1.25.0
+    warnings.warn("`cumproduct` is deprecated as of NumPy 1.25.0, and will be "
+                  "removed in NumPy 2.0. Please use `cumprod` instead.",
+                  DeprecationWarning, stacklevel=3)
+    return (a, out)
+
+
+@array_function_dispatch(_cumproduct_dispatcher, verify=False)
+def cumproduct(*args, **kwargs):
+    """
+    Return the cumulative product over the given axis.
+
+    .. deprecated:: 1.25.0
+        ``cumproduct`` is deprecated as of NumPy 1.25.0, and will be
+        removed in NumPy 2.0. Please use `cumprod` instead.
+
+    See Also
+    --------
+    cumprod : equivalent function; see for details.
+    """
+    return cumprod(*args, **kwargs)
+
+
+def _sometrue_dispatcher(a, axis=None, out=None, keepdims=None, *,
+                         where=np._NoValue):
+    # 2023-03-02, 1.25.0
+    warnings.warn("`sometrue` is deprecated as of NumPy 1.25.0, and will be "
+                  "removed in NumPy 2.0. Please use `any` instead.",
+                  DeprecationWarning, stacklevel=3)
+    return (a, where, out)
+
+
+@array_function_dispatch(_sometrue_dispatcher, verify=False)
+def sometrue(*args, **kwargs):
+    """
+    Check whether some values are true.
+
+    Refer to `any` for full documentation.
+
+    .. deprecated:: 1.25.0
+        ``sometrue`` is deprecated as of NumPy 1.25.0, and will be
+        removed in NumPy 2.0. Please use `any` instead.
+
+    See Also
+    --------
+    any : equivalent function; see for details.
+    """
+    return any(*args, **kwargs)
+
+
+def _alltrue_dispatcher(a, axis=None, out=None, keepdims=None, *, where=None):
+    # 2023-03-02, 1.25.0
+    warnings.warn("`alltrue` is deprecated as of NumPy 1.25.0, and will be "
+                  "removed in NumPy 2.0. Please use `all` instead.",
+                  DeprecationWarning, stacklevel=3)
+    return (a, where, out)
+
+
+@array_function_dispatch(_alltrue_dispatcher, verify=False)
+def alltrue(*args, **kwargs):
+    """
+    Check if all elements of input array are true.
+
+    .. deprecated:: 1.25.0
+        ``alltrue`` is deprecated as of NumPy 1.25.0, and will be
+        removed in NumPy 2.0. Please use `all` instead.
+
+    See Also
+    --------
+    numpy.all : Equivalent function; see for details.
+    """
+    return all(*args, **kwargs)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi
new file mode 100644
index 00000000..5438b270
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi
@@ -0,0 +1,1060 @@
+import datetime as dt
+from collections.abc import Sequence
+from typing import Union, Any, overload, TypeVar, Literal, SupportsIndex
+
+from numpy import (
+    ndarray,
+    number,
+    uint64,
+    int_,
+    int64,
+    intp,
+    float16,
+    bool_,
+    floating,
+    complexfloating,
+    object_,
+    generic,
+    _OrderKACF,
+    _OrderACF,
+    _ModeKind,
+    _PartitionKind,
+    _SortKind,
+    _SortSide,
+    _CastingKind,
+)
+from numpy._typing import (
+    DTypeLike,
+    _DTypeLike,
+    ArrayLike,
+    _ArrayLike,
+    NDArray,
+    _ShapeLike,
+    _Shape,
+    _ArrayLikeBool_co,
+    _ArrayLikeUInt_co,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeObject_co,
+    _IntLike_co,
+    _BoolLike_co,
+    _ComplexLike_co,
+    _NumberLike_co,
+    _ScalarLike_co,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+__all__: list[str]
+
+@overload
+def take(
+    a: _ArrayLike[_SCT],
+    indices: _IntLike_co,
+    axis: None = ...,
+    out: None = ...,
+    mode: _ModeKind = ...,
+) -> _SCT: ...
+@overload
+def take(
+    a: ArrayLike,
+    indices: _IntLike_co,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    mode: _ModeKind = ...,
+) -> Any: ...
+@overload
+def take(
+    a: _ArrayLike[_SCT],
+    indices: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    mode: _ModeKind = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def take(
+    a: ArrayLike,
+    indices: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    mode: _ModeKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def take(
+    a: ArrayLike,
+    indices: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+    out: _ArrayType = ...,
+    mode: _ModeKind = ...,
+) -> _ArrayType: ...
+
+@overload
+def reshape(
+    a: _ArrayLike[_SCT],
+    newshape: _ShapeLike,
+    order: _OrderACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def reshape(
+    a: ArrayLike,
+    newshape: _ShapeLike,
+    order: _OrderACF = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def choose(
+    a: _IntLike_co,
+    choices: ArrayLike,
+    out: None = ...,
+    mode: _ModeKind = ...,
+) -> Any: ...
+@overload
+def choose(
+    a: _ArrayLikeInt_co,
+    choices: _ArrayLike[_SCT],
+    out: None = ...,
+    mode: _ModeKind = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def choose(
+    a: _ArrayLikeInt_co,
+    choices: ArrayLike,
+    out: None = ...,
+    mode: _ModeKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def choose(
+    a: _ArrayLikeInt_co,
+    choices: ArrayLike,
+    out: _ArrayType = ...,
+    mode: _ModeKind = ...,
+) -> _ArrayType: ...
+
+@overload
+def repeat(
+    a: _ArrayLike[_SCT],
+    repeats: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def repeat(
+    a: ArrayLike,
+    repeats: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+def put(
+    a: NDArray[Any],
+    ind: _ArrayLikeInt_co,
+    v: ArrayLike,
+    mode: _ModeKind = ...,
+) -> None: ...
+
+@overload
+def swapaxes(
+    a: _ArrayLike[_SCT],
+    axis1: SupportsIndex,
+    axis2: SupportsIndex,
+) -> NDArray[_SCT]: ...
+@overload
+def swapaxes(
+    a: ArrayLike,
+    axis1: SupportsIndex,
+    axis2: SupportsIndex,
+) -> NDArray[Any]: ...
+
+@overload
+def transpose(
+    a: _ArrayLike[_SCT],
+    axes: None | _ShapeLike = ...
+) -> NDArray[_SCT]: ...
+@overload
+def transpose(
+    a: ArrayLike,
+    axes: None | _ShapeLike = ...
+) -> NDArray[Any]: ...
+
+@overload
+def partition(
+    a: _ArrayLike[_SCT],
+    kth: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+    kind: _PartitionKind = ...,
+    order: None | str | Sequence[str] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def partition(
+    a: ArrayLike,
+    kth: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+    kind: _PartitionKind = ...,
+    order: None | str | Sequence[str] = ...,
+) -> NDArray[Any]: ...
+
+def argpartition(
+    a: ArrayLike,
+    kth: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+    kind: _PartitionKind = ...,
+    order: None | str | Sequence[str] = ...,
+) -> NDArray[intp]: ...
+
+@overload
+def sort(
+    a: _ArrayLike[_SCT],
+    axis: None | SupportsIndex = ...,
+    kind: None | _SortKind = ...,
+    order: None | str | Sequence[str] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def sort(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    kind: None | _SortKind = ...,
+    order: None | str | Sequence[str] = ...,
+) -> NDArray[Any]: ...
+
+def argsort(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    kind: None | _SortKind = ...,
+    order: None | str | Sequence[str] = ...,
+) -> NDArray[intp]: ...
+
+@overload
+def argmax(
+    a: ArrayLike,
+    axis: None = ...,
+    out: None = ...,
+    *,
+    keepdims: Literal[False] = ...,
+) -> intp: ...
+@overload
+def argmax(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    *,
+    keepdims: bool = ...,
+) -> Any: ...
+@overload
+def argmax(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    out: _ArrayType = ...,
+    *,
+    keepdims: bool = ...,
+) -> _ArrayType: ...
+
+@overload
+def argmin(
+    a: ArrayLike,
+    axis: None = ...,
+    out: None = ...,
+    *,
+    keepdims: Literal[False] = ...,
+) -> intp: ...
+@overload
+def argmin(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    *,
+    keepdims: bool = ...,
+) -> Any: ...
+@overload
+def argmin(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    out: _ArrayType = ...,
+    *,
+    keepdims: bool = ...,
+) -> _ArrayType: ...
+
+@overload
+def searchsorted(
+    a: ArrayLike,
+    v: _ScalarLike_co,
+    side: _SortSide = ...,
+    sorter: None | _ArrayLikeInt_co = ...,  # 1D int array
+) -> intp: ...
+@overload
+def searchsorted(
+    a: ArrayLike,
+    v: ArrayLike,
+    side: _SortSide = ...,
+    sorter: None | _ArrayLikeInt_co = ...,  # 1D int array
+) -> NDArray[intp]: ...
+
+@overload
+def resize(
+    a: _ArrayLike[_SCT],
+    new_shape: _ShapeLike,
+) -> NDArray[_SCT]: ...
+@overload
+def resize(
+    a: ArrayLike,
+    new_shape: _ShapeLike,
+) -> NDArray[Any]: ...
+
+@overload
+def squeeze(
+    a: _SCT,
+    axis: None | _ShapeLike = ...,
+) -> _SCT: ...
+@overload
+def squeeze(
+    a: _ArrayLike[_SCT],
+    axis: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def squeeze(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def diagonal(
+    a: _ArrayLike[_SCT],
+    offset: SupportsIndex = ...,
+    axis1: SupportsIndex = ...,
+    axis2: SupportsIndex = ...,  # >= 2D array
+) -> NDArray[_SCT]: ...
+@overload
+def diagonal(
+    a: ArrayLike,
+    offset: SupportsIndex = ...,
+    axis1: SupportsIndex = ...,
+    axis2: SupportsIndex = ...,  # >= 2D array
+) -> NDArray[Any]: ...
+
+@overload
+def trace(
+    a: ArrayLike,  # >= 2D array
+    offset: SupportsIndex = ...,
+    axis1: SupportsIndex = ...,
+    axis2: SupportsIndex = ...,
+    dtype: DTypeLike = ...,
+    out: None = ...,
+) -> Any: ...
+@overload
+def trace(
+    a: ArrayLike,  # >= 2D array
+    offset: SupportsIndex = ...,
+    axis1: SupportsIndex = ...,
+    axis2: SupportsIndex = ...,
+    dtype: DTypeLike = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]: ...
+@overload
+def ravel(a: ArrayLike, order: _OrderKACF = ...) -> NDArray[Any]: ...
+
+def nonzero(a: ArrayLike) -> tuple[NDArray[intp], ...]: ...
+
+def shape(a: ArrayLike) -> _Shape: ...
+
+@overload
+def compress(
+    condition: _ArrayLikeBool_co,  # 1D bool array
+    a: _ArrayLike[_SCT],
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def compress(
+    condition: _ArrayLikeBool_co,  # 1D bool array
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def compress(
+    condition: _ArrayLikeBool_co,  # 1D bool array
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def clip(
+    a: _SCT,
+    a_min: None | ArrayLike,
+    a_max: None | ArrayLike,
+    out: None = ...,
+    *,
+    dtype: None = ...,
+    where: None | _ArrayLikeBool_co = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    signature: str | tuple[None | str, ...] = ...,
+    extobj: list[Any] = ...,
+    casting: _CastingKind = ...,
+) -> _SCT: ...
+@overload
+def clip(
+    a: _ScalarLike_co,
+    a_min: None | ArrayLike,
+    a_max: None | ArrayLike,
+    out: None = ...,
+    *,
+    dtype: None = ...,
+    where: None | _ArrayLikeBool_co = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    signature: str | tuple[None | str, ...] = ...,
+    extobj: list[Any] = ...,
+    casting: _CastingKind = ...,
+) -> Any: ...
+@overload
+def clip(
+    a: _ArrayLike[_SCT],
+    a_min: None | ArrayLike,
+    a_max: None | ArrayLike,
+    out: None = ...,
+    *,
+    dtype: None = ...,
+    where: None | _ArrayLikeBool_co = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    signature: str | tuple[None | str, ...] = ...,
+    extobj: list[Any] = ...,
+    casting: _CastingKind = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def clip(
+    a: ArrayLike,
+    a_min: None | ArrayLike,
+    a_max: None | ArrayLike,
+    out: None = ...,
+    *,
+    dtype: None = ...,
+    where: None | _ArrayLikeBool_co = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    signature: str | tuple[None | str, ...] = ...,
+    extobj: list[Any] = ...,
+    casting: _CastingKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def clip(
+    a: ArrayLike,
+    a_min: None | ArrayLike,
+    a_max: None | ArrayLike,
+    out: _ArrayType = ...,
+    *,
+    dtype: DTypeLike,
+    where: None | _ArrayLikeBool_co = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    signature: str | tuple[None | str, ...] = ...,
+    extobj: list[Any] = ...,
+    casting: _CastingKind = ...,
+) -> Any: ...
+@overload
+def clip(
+    a: ArrayLike,
+    a_min: None | ArrayLike,
+    a_max: None | ArrayLike,
+    out: _ArrayType,
+    *,
+    dtype: DTypeLike = ...,
+    where: None | _ArrayLikeBool_co = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    signature: str | tuple[None | str, ...] = ...,
+    extobj: list[Any] = ...,
+    casting: _CastingKind = ...,
+) -> _ArrayType: ...
+
+@overload
+def sum(
+    a: _ArrayLike[_SCT],
+    axis: None = ...,
+    dtype: None = ...,
+    out: None  = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def sum(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: None  = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def sum(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: _ArrayType  = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def all(
+    a: ArrayLike,
+    axis: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> bool_: ...
+@overload
+def all(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def all(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: _ArrayType = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def any(
+    a: ArrayLike,
+    axis: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> bool_: ...
+@overload
+def any(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def any(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: _ArrayType = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def cumsum(
+    a: _ArrayLike[_SCT],
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def cumsum(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumsum(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def cumsum(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    dtype: DTypeLike = ...,
+    out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumsum(
+    a: ArrayLike,
+    axis: None | SupportsIndex = ...,
+    dtype: DTypeLike = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def ptp(
+    a: _ArrayLike[_SCT],
+    axis: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+) -> _SCT: ...
+@overload
+def ptp(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+) -> Any: ...
+@overload
+def ptp(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: _ArrayType = ...,
+    keepdims: bool = ...,
+) -> _ArrayType: ...
+
+@overload
+def amax(
+    a: _ArrayLike[_SCT],
+    axis: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def amax(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def amax(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: _ArrayType = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def amin(
+    a: _ArrayLike[_SCT],
+    axis: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def amin(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def amin(
+    a: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    out: _ArrayType = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+# TODO: `np.prod()``: For object arrays `initial` does not necessarily
+# have to be a numerical scalar.
+# The only requirement is that it is compatible
+# with the `.__mul__()` method(s) of the passed array's elements.
+
+# Note that the same situation holds for all wrappers around
+# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
+@overload
+def prod(
+    a: _ArrayLikeBool_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> int_: ...
+@overload
+def prod(
+    a: _ArrayLikeUInt_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> uint64: ...
+@overload
+def prod(
+    a: _ArrayLikeInt_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> int64: ...
+@overload
+def prod(
+    a: _ArrayLikeFloat_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def prod(
+    a: _ArrayLikeComplex_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def prod(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def prod(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def prod(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: None | DTypeLike = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def prod(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: None | DTypeLike = ...,
+    out: _ArrayType = ...,
+    keepdims: bool = ...,
+    initial: _NumberLike_co = ...,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def cumprod(
+    a: _ArrayLikeBool_co,
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[int_]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeUInt_co,
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[uint64]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[int64]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeFloat_co,
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeComplex_co,
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeObject_co,
+    axis: None | SupportsIndex = ...,
+    dtype: None = ...,
+    out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | SupportsIndex = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | SupportsIndex = ...,
+    dtype: DTypeLike = ...,
+    out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumprod(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | SupportsIndex = ...,
+    dtype: DTypeLike = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+def ndim(a: ArrayLike) -> int: ...
+
+def size(a: ArrayLike, axis: None | int = ...) -> int: ...
+
+@overload
+def around(
+    a: _BoolLike_co,
+    decimals: SupportsIndex = ...,
+    out: None = ...,
+) -> float16: ...
+@overload
+def around(
+    a: _SCT_uifcO,
+    decimals: SupportsIndex = ...,
+    out: None = ...,
+) -> _SCT_uifcO: ...
+@overload
+def around(
+    a: _ComplexLike_co | object_,
+    decimals: SupportsIndex = ...,
+    out: None = ...,
+) -> Any: ...
+@overload
+def around(
+    a: _ArrayLikeBool_co,
+    decimals: SupportsIndex = ...,
+    out: None = ...,
+) -> NDArray[float16]: ...
+@overload
+def around(
+    a: _ArrayLike[_SCT_uifcO],
+    decimals: SupportsIndex = ...,
+    out: None = ...,
+) -> NDArray[_SCT_uifcO]: ...
+@overload
+def around(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    decimals: SupportsIndex = ...,
+    out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def around(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    decimals: SupportsIndex = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def mean(
+    a: _ArrayLikeFloat_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def mean(
+    a: _ArrayLikeComplex_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def mean(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: None = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def mean(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    out: None = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def mean(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: None = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def mean(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: _ArrayType = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def std(
+    a: _ArrayLikeComplex_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def std(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: None = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def std(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def std(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def std(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: _ArrayType = ...,
+    ddof: float = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def var(
+    a: _ArrayLikeComplex_co,
+    axis: None = ...,
+    dtype: None = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def var(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: None = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def var(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: Literal[False] = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def var(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: None = ...,
+    ddof: float = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def var(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    dtype: DTypeLike = ...,
+    out: _ArrayType = ...,
+    ddof: float = ...,
+    keepdims: bool = ...,
+    *,
+    where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+max = amax
+min = amin
+round = around
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/function_base.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/function_base.py
new file mode 100644
index 00000000..00e4e6b0
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/function_base.py
@@ -0,0 +1,551 @@
+import functools
+import warnings
+import operator
+import types
+
+import numpy as np
+from . import numeric as _nx
+from .numeric import result_type, NaN, asanyarray, ndim
+from numpy.core.multiarray import add_docstring
+from numpy.core import overrides
+
+__all__ = ['logspace', 'linspace', 'geomspace']
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
+                         dtype=None, axis=None):
+    return (start, stop)
+
+
+@array_function_dispatch(_linspace_dispatcher)
+def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
+             axis=0):
+    """
+    Return evenly spaced numbers over a specified interval.
+
+    Returns `num` evenly spaced samples, calculated over the
+    interval [`start`, `stop`].
+
+    The endpoint of the interval can optionally be excluded.
+
+    .. versionchanged:: 1.16.0
+        Non-scalar `start` and `stop` are now supported.
+
+    .. versionchanged:: 1.20.0
+        Values are rounded towards ``-inf`` instead of ``0`` when an
+        integer ``dtype`` is specified. The old behavior can
+        still be obtained with ``np.linspace(start, stop, num).astype(int)``
+
+    Parameters
+    ----------
+    start : array_like
+        The starting value of the sequence.
+    stop : array_like
+        The end value of the sequence, unless `endpoint` is set to False.
+        In that case, the sequence consists of all but the last of ``num + 1``
+        evenly spaced samples, so that `stop` is excluded.  Note that the step
+        size changes when `endpoint` is False.
+    num : int, optional
+        Number of samples to generate. Default is 50. Must be non-negative.
+    endpoint : bool, optional
+        If True, `stop` is the last sample. Otherwise, it is not included.
+        Default is True.
+    retstep : bool, optional
+        If True, return (`samples`, `step`), where `step` is the spacing
+        between samples.
+    dtype : dtype, optional
+        The type of the output array.  If `dtype` is not given, the data type
+        is inferred from `start` and `stop`. The inferred dtype will never be
+        an integer; `float` is chosen even if the arguments would produce an
+        array of integers.
+
+        .. versionadded:: 1.9.0
+
+    axis : int, optional
+        The axis in the result to store the samples.  Relevant only if start
+        or stop are array-like.  By default (0), the samples will be along a
+        new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+        .. versionadded:: 1.16.0
+
+    Returns
+    -------
+    samples : ndarray
+        There are `num` equally spaced samples in the closed interval
+        ``[start, stop]`` or the half-open interval ``[start, stop)``
+        (depending on whether `endpoint` is True or False).
+    step : float, optional
+        Only returned if `retstep` is True
+
+        Size of spacing between samples.
+
+
+    See Also
+    --------
+    arange : Similar to `linspace`, but uses a step size (instead of the
+             number of samples).
+    geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
+                scale (a geometric progression).
+    logspace : Similar to `geomspace`, but with the end points specified as
+               logarithms.
+    :ref:`how-to-partition`
+
+    Examples
+    --------
+    >>> np.linspace(2.0, 3.0, num=5)
+    array([2.  , 2.25, 2.5 , 2.75, 3.  ])
+    >>> np.linspace(2.0, 3.0, num=5, endpoint=False)
+    array([2. ,  2.2,  2.4,  2.6,  2.8])
+    >>> np.linspace(2.0, 3.0, num=5, retstep=True)
+    (array([2.  ,  2.25,  2.5 ,  2.75,  3.  ]), 0.25)
+
+    Graphical illustration:
+
+    >>> import matplotlib.pyplot as plt
+    >>> N = 8
+    >>> y = np.zeros(N)
+    >>> x1 = np.linspace(0, 10, N, endpoint=True)
+    >>> x2 = np.linspace(0, 10, N, endpoint=False)
+    >>> plt.plot(x1, y, 'o')
+    []
+    >>> plt.plot(x2, y + 0.5, 'o')
+    []
+    >>> plt.ylim([-0.5, 1])
+    (-0.5, 1)
+    >>> plt.show()
+
+    """
+    num = operator.index(num)
+    if num < 0:
+        raise ValueError("Number of samples, %s, must be non-negative." % num)
+    div = (num - 1) if endpoint else num
+
+    # Convert float/complex array scalars to float, gh-3504
+    # and make sure one can use variables that have an __array_interface__, gh-6634
+    start = asanyarray(start) * 1.0
+    stop  = asanyarray(stop)  * 1.0
+
+    dt = result_type(start, stop, float(num))
+    if dtype is None:
+        dtype = dt
+        integer_dtype = False
+    else:
+        integer_dtype = _nx.issubdtype(dtype, _nx.integer)
+
+    delta = stop - start
+    y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
+    # In-place multiplication y *= delta/div is faster, but prevents the multiplicant
+    # from overriding what class is produced, and thus prevents, e.g. use of Quantities,
+    # see gh-7142. Hence, we multiply in place only for standard scalar types.
+    if div > 0:
+        _mult_inplace = _nx.isscalar(delta)
+        step = delta / div
+        any_step_zero = (
+            step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any())
+        if any_step_zero:
+            # Special handling for denormal numbers, gh-5437
+            y /= div
+            if _mult_inplace:
+                y *= delta
+            else:
+                y = y * delta
+        else:
+            if _mult_inplace:
+                y *= step
+            else:
+                y = y * step
+    else:
+        # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
+        # have an undefined step
+        step = NaN
+        # Multiply with delta to allow possible override of output class.
+        y = y * delta
+
+    y += start
+
+    if endpoint and num > 1:
+        y[-1, ...] = stop
+
+    if axis != 0:
+        y = _nx.moveaxis(y, 0, axis)
+
+    if integer_dtype:
+        _nx.floor(y, out=y)
+
+    if retstep:
+        return y.astype(dtype, copy=False), step
+    else:
+        return y.astype(dtype, copy=False)
+
+
+def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
+                         dtype=None, axis=None):
+    return (start, stop, base)
+
+
+@array_function_dispatch(_logspace_dispatcher)
+def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
+             axis=0):
+    """
+    Return numbers spaced evenly on a log scale.
+
+    In linear space, the sequence starts at ``base ** start``
+    (`base` to the power of `start`) and ends with ``base ** stop``
+    (see `endpoint` below).
+
+    .. versionchanged:: 1.16.0
+        Non-scalar `start` and `stop` are now supported.
+
+    .. versionchanged:: 1.25.0
+        Non-scalar 'base` is now supported
+
+    Parameters
+    ----------
+    start : array_like
+        ``base ** start`` is the starting value of the sequence.
+    stop : array_like
+        ``base ** stop`` is the final value of the sequence, unless `endpoint`
+        is False.  In that case, ``num + 1`` values are spaced over the
+        interval in log-space, of which all but the last (a sequence of
+        length `num`) are returned.
+    num : integer, optional
+        Number of samples to generate.  Default is 50.
+    endpoint : boolean, optional
+        If true, `stop` is the last sample. Otherwise, it is not included.
+        Default is True.
+    base : array_like, optional
+        The base of the log space. The step size between the elements in
+        ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
+        Default is 10.0.
+    dtype : dtype
+        The type of the output array.  If `dtype` is not given, the data type
+        is inferred from `start` and `stop`. The inferred type will never be
+        an integer; `float` is chosen even if the arguments would produce an
+        array of integers.
+    axis : int, optional
+        The axis in the result to store the samples.  Relevant only if start,
+        stop, or base are array-like.  By default (0), the samples will be
+        along a new axis inserted at the beginning. Use -1 to get an axis at
+        the end.
+
+        .. versionadded:: 1.16.0
+
+
+    Returns
+    -------
+    samples : ndarray
+        `num` samples, equally spaced on a log scale.
+
+    See Also
+    --------
+    arange : Similar to linspace, with the step size specified instead of the
+             number of samples. Note that, when used with a float endpoint, the
+             endpoint may or may not be included.
+    linspace : Similar to logspace, but with the samples uniformly distributed
+               in linear space, instead of log space.
+    geomspace : Similar to logspace, but with endpoints specified directly.
+    :ref:`how-to-partition`
+
+    Notes
+    -----
+    If base is a scalar, logspace is equivalent to the code
+
+    >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
+    ... # doctest: +SKIP
+    >>> power(base, y).astype(dtype)
+    ... # doctest: +SKIP
+
+    Examples
+    --------
+    >>> np.logspace(2.0, 3.0, num=4)
+    array([ 100.        ,  215.443469  ,  464.15888336, 1000.        ])
+    >>> np.logspace(2.0, 3.0, num=4, endpoint=False)
+    array([100.        ,  177.827941  ,  316.22776602,  562.34132519])
+    >>> np.logspace(2.0, 3.0, num=4, base=2.0)
+    array([4.        ,  5.0396842 ,  6.34960421,  8.        ])
+    >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1)
+    array([[ 4.        ,  5.0396842 ,  6.34960421,  8.        ],
+           [ 9.        , 12.98024613, 18.72075441, 27.        ]])
+
+    Graphical illustration:
+
+    >>> import matplotlib.pyplot as plt
+    >>> N = 10
+    >>> x1 = np.logspace(0.1, 1, N, endpoint=True)
+    >>> x2 = np.logspace(0.1, 1, N, endpoint=False)
+    >>> y = np.zeros(N)
+    >>> plt.plot(x1, y, 'o')
+    []
+    >>> plt.plot(x2, y + 0.5, 'o')
+    []
+    >>> plt.ylim([-0.5, 1])
+    (-0.5, 1)
+    >>> plt.show()
+
+    """
+    ndmax = np.broadcast(start, stop, base).ndim
+    start, stop, base = (
+        np.array(a, copy=False, subok=True, ndmin=ndmax)
+        for a in (start, stop, base)
+    )
+    y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
+    base = np.expand_dims(base, axis=axis)
+    if dtype is None:
+        return _nx.power(base, y)
+    return _nx.power(base, y).astype(dtype, copy=False)
+
+
+def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
+                          axis=None):
+    return (start, stop)
+
+
+@array_function_dispatch(_geomspace_dispatcher)
+def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
+    """
+    Return numbers spaced evenly on a log scale (a geometric progression).
+
+    This is similar to `logspace`, but with endpoints specified directly.
+    Each output sample is a constant multiple of the previous.
+
+    .. versionchanged:: 1.16.0
+        Non-scalar `start` and `stop` are now supported.
+
+    Parameters
+    ----------
+    start : array_like
+        The starting value of the sequence.
+    stop : array_like
+        The final value of the sequence, unless `endpoint` is False.
+        In that case, ``num + 1`` values are spaced over the
+        interval in log-space, of which all but the last (a sequence of
+        length `num`) are returned.
+    num : integer, optional
+        Number of samples to generate.  Default is 50.
+    endpoint : boolean, optional
+        If true, `stop` is the last sample. Otherwise, it is not included.
+        Default is True.
+    dtype : dtype
+        The type of the output array.  If `dtype` is not given, the data type
+        is inferred from `start` and `stop`. The inferred dtype will never be
+        an integer; `float` is chosen even if the arguments would produce an
+        array of integers.
+    axis : int, optional
+        The axis in the result to store the samples.  Relevant only if start
+        or stop are array-like.  By default (0), the samples will be along a
+        new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+        .. versionadded:: 1.16.0
+
+    Returns
+    -------
+    samples : ndarray
+        `num` samples, equally spaced on a log scale.
+
+    See Also
+    --------
+    logspace : Similar to geomspace, but with endpoints specified using log
+               and base.
+    linspace : Similar to geomspace, but with arithmetic instead of geometric
+               progression.
+    arange : Similar to linspace, with the step size specified instead of the
+             number of samples.
+    :ref:`how-to-partition`
+
+    Notes
+    -----
+    If the inputs or dtype are complex, the output will follow a logarithmic
+    spiral in the complex plane.  (There are an infinite number of spirals
+    passing through two points; the output will follow the shortest such path.)
+
+    Examples
+    --------
+    >>> np.geomspace(1, 1000, num=4)
+    array([    1.,    10.,   100.,  1000.])
+    >>> np.geomspace(1, 1000, num=3, endpoint=False)
+    array([   1.,   10.,  100.])
+    >>> np.geomspace(1, 1000, num=4, endpoint=False)
+    array([   1.        ,    5.62341325,   31.6227766 ,  177.827941  ])
+    >>> np.geomspace(1, 256, num=9)
+    array([   1.,    2.,    4.,    8.,   16.,   32.,   64.,  128.,  256.])
+
+    Note that the above may not produce exact integers:
+
+    >>> np.geomspace(1, 256, num=9, dtype=int)
+    array([  1,   2,   4,   7,  16,  32,  63, 127, 256])
+    >>> np.around(np.geomspace(1, 256, num=9)).astype(int)
+    array([  1,   2,   4,   8,  16,  32,  64, 128, 256])
+
+    Negative, decreasing, and complex inputs are allowed:
+
+    >>> np.geomspace(1000, 1, num=4)
+    array([1000.,  100.,   10.,    1.])
+    >>> np.geomspace(-1000, -1, num=4)
+    array([-1000.,  -100.,   -10.,    -1.])
+    >>> np.geomspace(1j, 1000j, num=4)  # Straight line
+    array([0.   +1.j, 0.  +10.j, 0. +100.j, 0.+1000.j])
+    >>> np.geomspace(-1+0j, 1+0j, num=5)  # Circle
+    array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
+            6.12323400e-17+1.00000000e+00j,  7.07106781e-01+7.07106781e-01j,
+            1.00000000e+00+0.00000000e+00j])
+
+    Graphical illustration of `endpoint` parameter:
+
+    >>> import matplotlib.pyplot as plt
+    >>> N = 10
+    >>> y = np.zeros(N)
+    >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
+    []
+    >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
+    []
+    >>> plt.axis([0.5, 2000, 0, 3])
+    [0.5, 2000, 0, 3]
+    >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
+    >>> plt.show()
+
+    """
+    start = asanyarray(start)
+    stop = asanyarray(stop)
+    if _nx.any(start == 0) or _nx.any(stop == 0):
+        raise ValueError('Geometric sequence cannot include zero')
+
+    dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
+    if dtype is None:
+        dtype = dt
+    else:
+        # complex to dtype('complex128'), for instance
+        dtype = _nx.dtype(dtype)
+
+    # Promote both arguments to the same dtype in case, for instance, one is
+    # complex and another is negative and log would produce NaN otherwise.
+    # Copy since we may change things in-place further down.
+    start = start.astype(dt, copy=True)
+    stop = stop.astype(dt, copy=True)
+
+    out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
+    # Avoid negligible real or imaginary parts in output by rotating to
+    # positive real, calculating, then undoing rotation
+    if _nx.issubdtype(dt, _nx.complexfloating):
+        all_imag = (start.real == 0.) & (stop.real == 0.)
+        if _nx.any(all_imag):
+            start[all_imag] = start[all_imag].imag
+            stop[all_imag] = stop[all_imag].imag
+            out_sign[all_imag] = 1j
+
+    both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
+    if _nx.any(both_negative):
+        _nx.negative(start, out=start, where=both_negative)
+        _nx.negative(stop, out=stop, where=both_negative)
+        _nx.negative(out_sign, out=out_sign, where=both_negative)
+
+    log_start = _nx.log10(start)
+    log_stop = _nx.log10(stop)
+    result = logspace(log_start, log_stop, num=num,
+                      endpoint=endpoint, base=10.0, dtype=dtype)
+
+    # Make sure the endpoints match the start and stop arguments. This is
+    # necessary because np.exp(np.log(x)) is not necessarily equal to x.
+    if num > 0:
+        result[0] = start
+        if num > 1 and endpoint:
+            result[-1] = stop
+
+    result = out_sign * result
+
+    if axis != 0:
+        result = _nx.moveaxis(result, 0, axis)
+
+    return result.astype(dtype, copy=False)
+
+
+def _needs_add_docstring(obj):
+    """
+    Returns true if the only way to set the docstring of `obj` from python is
+    via add_docstring.
+
+    This function errs on the side of being overly conservative.
+    """
+    Py_TPFLAGS_HEAPTYPE = 1 << 9
+
+    if isinstance(obj, (types.FunctionType, types.MethodType, property)):
+        return False
+
+    if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
+        return False
+
+    return True
+
+
+def _add_docstring(obj, doc, warn_on_python):
+    if warn_on_python and not _needs_add_docstring(obj):
+        warnings.warn(
+            "add_newdoc was used on a pure-python object {}. "
+            "Prefer to attach it directly to the source."
+            .format(obj),
+            UserWarning,
+            stacklevel=3)
+    try:
+        add_docstring(obj, doc)
+    except Exception:
+        pass
+
+
+def add_newdoc(place, obj, doc, warn_on_python=True):
+    """
+    Add documentation to an existing object, typically one defined in C
+
+    The purpose is to allow easier editing of the docstrings without requiring
+    a re-compile. This exists primarily for internal use within numpy itself.
+
+    Parameters
+    ----------
+    place : str
+        The absolute name of the module to import from
+    obj : str
+        The name of the object to add documentation to, typically a class or
+        function name
+    doc : {str, Tuple[str, str], List[Tuple[str, str]]}
+        If a string, the documentation to apply to `obj`
+
+        If a tuple, then the first element is interpreted as an attribute of
+        `obj` and the second as the docstring to apply - ``(method, docstring)``
+
+        If a list, then each element of the list should be a tuple of length
+        two - ``[(method1, docstring1), (method2, docstring2), ...]``
+    warn_on_python : bool
+        If True, the default, emit `UserWarning` if this is used to attach
+        documentation to a pure-python object.
+
+    Notes
+    -----
+    This routine never raises an error if the docstring can't be written, but
+    will raise an error if the object being documented does not exist.
+
+    This routine cannot modify read-only docstrings, as appear
+    in new-style classes or built-in functions. Because this
+    routine never raises an error the caller must check manually
+    that the docstrings were changed.
+
+    Since this function grabs the ``char *`` from a c-level str object and puts
+    it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
+    C-API best-practices, by:
+
+    - modifying a `PyTypeObject` after calling `PyType_Ready`
+    - calling `Py_INCREF` on the str and losing the reference, so the str
+      will never be released
+
+    If possible it should be avoided.
+    """
+    new = getattr(__import__(place, globals(), {}, [obj]), obj)
+    if isinstance(doc, str):
+        _add_docstring(new, doc.strip(), warn_on_python)
+    elif isinstance(doc, tuple):
+        attr, docstring = doc
+        _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
+    elif isinstance(doc, list):
+        for attr, docstring in doc:
+            _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/function_base.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/function_base.pyi
new file mode 100644
index 00000000..2c2a277b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/function_base.pyi
@@ -0,0 +1,187 @@
+from typing import (
+    Literal as L,
+    overload,
+    Any,
+    SupportsIndex,
+    TypeVar,
+)
+
+from numpy import floating, complexfloating, generic
+from numpy._typing import (
+    NDArray,
+    DTypeLike,
+    _DTypeLike,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+__all__: list[str]
+
+@overload
+def linspace(
+    start: _ArrayLikeFloat_co,
+    stop: _ArrayLikeFloat_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[False] = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def linspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[False] = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[False] = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def linspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[False] = ...,
+    dtype: DTypeLike = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[Any]: ...
+@overload
+def linspace(
+    start: _ArrayLikeFloat_co,
+    stop: _ArrayLikeFloat_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[True] = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> tuple[NDArray[floating[Any]], floating[Any]]: ...
+@overload
+def linspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[True] = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[True] = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    axis: SupportsIndex = ...,
+) -> tuple[NDArray[_SCT], _SCT]: ...
+@overload
+def linspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    retstep: L[True] = ...,
+    dtype: DTypeLike = ...,
+    axis: SupportsIndex = ...,
+) -> tuple[NDArray[Any], Any]: ...
+
+@overload
+def logspace(
+    start: _ArrayLikeFloat_co,
+    stop: _ArrayLikeFloat_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    base: _ArrayLikeFloat_co = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def logspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    base: _ArrayLikeComplex_co = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def logspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    base: _ArrayLikeComplex_co = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def logspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    base: _ArrayLikeComplex_co = ...,
+    dtype: DTypeLike = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def geomspace(
+    start: _ArrayLikeFloat_co,
+    stop: _ArrayLikeFloat_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def geomspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    dtype: None = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def geomspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def geomspace(
+    start: _ArrayLikeComplex_co,
+    stop: _ArrayLikeComplex_co,
+    num: SupportsIndex = ...,
+    endpoint: bool = ...,
+    dtype: DTypeLike = ...,
+    axis: SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+# Re-exported to `np.lib.function_base`
+def add_newdoc(
+    place: str,
+    obj: str,
+    doc: str | tuple[str, str] | list[tuple[str, str]],
+    warn_on_python: bool = ...,
+) -> None: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/getlimits.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/getlimits.py
new file mode 100644
index 00000000..13414c2a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/getlimits.py
@@ -0,0 +1,735 @@
+"""Machine limits for Float32 and Float64 and (long double) if available...
+
+"""
+__all__ = ['finfo', 'iinfo']
+
+import warnings
+
+from .._utils import set_module
+from ._machar import MachAr
+from . import numeric
+from . import numerictypes as ntypes
+from .numeric import array, inf, NaN
+from .umath import log10, exp2, nextafter, isnan
+
+
+def _fr0(a):
+    """fix rank-0 --> rank-1"""
+    if a.ndim == 0:
+        a = a.copy()
+        a.shape = (1,)
+    return a
+
+
+def _fr1(a):
+    """fix rank > 0 --> rank-0"""
+    if a.size == 1:
+        a = a.copy()
+        a.shape = ()
+    return a
+
+
+class MachArLike:
+    """ Object to simulate MachAr instance """
+    def __init__(self, ftype, *, eps, epsneg, huge, tiny,
+                 ibeta, smallest_subnormal=None, **kwargs):
+        self.params = _MACHAR_PARAMS[ftype]
+        self.ftype = ftype
+        self.title = self.params['title']
+        # Parameter types same as for discovered MachAr object.
+        if not smallest_subnormal:
+            self._smallest_subnormal = nextafter(
+                self.ftype(0), self.ftype(1), dtype=self.ftype)
+        else:
+            self._smallest_subnormal = smallest_subnormal
+        self.epsilon = self.eps = self._float_to_float(eps)
+        self.epsneg = self._float_to_float(epsneg)
+        self.xmax = self.huge = self._float_to_float(huge)
+        self.xmin = self._float_to_float(tiny)
+        self.smallest_normal = self.tiny = self._float_to_float(tiny)
+        self.ibeta = self.params['itype'](ibeta)
+        self.__dict__.update(kwargs)
+        self.precision = int(-log10(self.eps))
+        self.resolution = self._float_to_float(
+            self._float_conv(10) ** (-self.precision))
+        self._str_eps = self._float_to_str(self.eps)
+        self._str_epsneg = self._float_to_str(self.epsneg)
+        self._str_xmin = self._float_to_str(self.xmin)
+        self._str_xmax = self._float_to_str(self.xmax)
+        self._str_resolution = self._float_to_str(self.resolution)
+        self._str_smallest_normal = self._float_to_str(self.xmin)
+
+    @property
+    def smallest_subnormal(self):
+        """Return the value for the smallest subnormal.
+
+        Returns
+        -------
+        smallest_subnormal : float
+            value for the smallest subnormal.
+
+        Warns
+        -----
+        UserWarning
+            If the calculated value for the smallest subnormal is zero.
+        """
+        # Check that the calculated value is not zero, in case it raises a
+        # warning.
+        value = self._smallest_subnormal
+        if self.ftype(0) == value:
+            warnings.warn(
+                'The value of the smallest subnormal for {} type '
+                'is zero.'.format(self.ftype), UserWarning, stacklevel=2)
+
+        return self._float_to_float(value)
+
+    @property
+    def _str_smallest_subnormal(self):
+        """Return the string representation of the smallest subnormal."""
+        return self._float_to_str(self.smallest_subnormal)
+
+    def _float_to_float(self, value):
+        """Converts float to float.
+
+        Parameters
+        ----------
+        value : float
+            value to be converted.
+        """
+        return _fr1(self._float_conv(value))
+
+    def _float_conv(self, value):
+        """Converts float to conv.
+
+        Parameters
+        ----------
+        value : float
+            value to be converted.
+        """
+        return array([value], self.ftype)
+
+    def _float_to_str(self, value):
+        """Converts float to str.
+
+        Parameters
+        ----------
+        value : float
+            value to be converted.
+        """
+        return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
+
+
+_convert_to_float = {
+    ntypes.csingle: ntypes.single,
+    ntypes.complex_: ntypes.float_,
+    ntypes.clongfloat: ntypes.longfloat
+    }
+
+# Parameters for creating MachAr / MachAr-like objects
+_title_fmt = 'numpy {} precision floating point number'
+_MACHAR_PARAMS = {
+    ntypes.double: dict(
+        itype = ntypes.int64,
+        fmt = '%24.16e',
+        title = _title_fmt.format('double')),
+    ntypes.single: dict(
+        itype = ntypes.int32,
+        fmt = '%15.7e',
+        title = _title_fmt.format('single')),
+    ntypes.longdouble: dict(
+        itype = ntypes.longlong,
+        fmt = '%s',
+        title = _title_fmt.format('long double')),
+    ntypes.half: dict(
+        itype = ntypes.int16,
+        fmt = '%12.5e',
+        title = _title_fmt.format('half'))}
+
+# Key to identify the floating point type.  Key is result of
+# ftype('-0.1').newbyteorder('<').tobytes()
+#
+# 20230201 - use (ftype(-1.0) / ftype(10.0)).newbyteorder('<').tobytes()
+#            instead because stold may have deficiencies on some platforms.
+# See:
+# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
+
+_KNOWN_TYPES = {}
+def _register_type(machar, bytepat):
+    _KNOWN_TYPES[bytepat] = machar
+_float_ma = {}
+
+
+def _register_known_types():
+    # Known parameters for float16
+    # See docstring of MachAr class for description of parameters.
+    f16 = ntypes.float16
+    float16_ma = MachArLike(f16,
+                            machep=-10,
+                            negep=-11,
+                            minexp=-14,
+                            maxexp=16,
+                            it=10,
+                            iexp=5,
+                            ibeta=2,
+                            irnd=5,
+                            ngrd=0,
+                            eps=exp2(f16(-10)),
+                            epsneg=exp2(f16(-11)),
+                            huge=f16(65504),
+                            tiny=f16(2 ** -14))
+    _register_type(float16_ma, b'f\xae')
+    _float_ma[16] = float16_ma
+
+    # Known parameters for float32
+    f32 = ntypes.float32
+    float32_ma = MachArLike(f32,
+                            machep=-23,
+                            negep=-24,
+                            minexp=-126,
+                            maxexp=128,
+                            it=23,
+                            iexp=8,
+                            ibeta=2,
+                            irnd=5,
+                            ngrd=0,
+                            eps=exp2(f32(-23)),
+                            epsneg=exp2(f32(-24)),
+                            huge=f32((1 - 2 ** -24) * 2**128),
+                            tiny=exp2(f32(-126)))
+    _register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
+    _float_ma[32] = float32_ma
+
+    # Known parameters for float64
+    f64 = ntypes.float64
+    epsneg_f64 = 2.0 ** -53.0
+    tiny_f64 = 2.0 ** -1022.0
+    float64_ma = MachArLike(f64,
+                            machep=-52,
+                            negep=-53,
+                            minexp=-1022,
+                            maxexp=1024,
+                            it=52,
+                            iexp=11,
+                            ibeta=2,
+                            irnd=5,
+                            ngrd=0,
+                            eps=2.0 ** -52.0,
+                            epsneg=epsneg_f64,
+                            huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
+                            tiny=tiny_f64)
+    _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+    _float_ma[64] = float64_ma
+
+    # Known parameters for IEEE 754 128-bit binary float
+    ld = ntypes.longdouble
+    epsneg_f128 = exp2(ld(-113))
+    tiny_f128 = exp2(ld(-16382))
+    # Ignore runtime error when this is not f128
+    with numeric.errstate(all='ignore'):
+        huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
+    float128_ma = MachArLike(ld,
+                             machep=-112,
+                             negep=-113,
+                             minexp=-16382,
+                             maxexp=16384,
+                             it=112,
+                             iexp=15,
+                             ibeta=2,
+                             irnd=5,
+                             ngrd=0,
+                             eps=exp2(ld(-112)),
+                             epsneg=epsneg_f128,
+                             huge=huge_f128,
+                             tiny=tiny_f128)
+    # IEEE 754 128-bit binary float
+    _register_type(float128_ma,
+        b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+    _float_ma[128] = float128_ma
+
+    # Known parameters for float80 (Intel 80-bit extended precision)
+    epsneg_f80 = exp2(ld(-64))
+    tiny_f80 = exp2(ld(-16382))
+    # Ignore runtime error when this is not f80
+    with numeric.errstate(all='ignore'):
+        huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
+    float80_ma = MachArLike(ld,
+                            machep=-63,
+                            negep=-64,
+                            minexp=-16382,
+                            maxexp=16384,
+                            it=63,
+                            iexp=15,
+                            ibeta=2,
+                            irnd=5,
+                            ngrd=0,
+                            eps=exp2(ld(-63)),
+                            epsneg=epsneg_f80,
+                            huge=huge_f80,
+                            tiny=tiny_f80)
+    # float80, first 10 bytes containing actual storage
+    _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
+    _float_ma[80] = float80_ma
+
+    # Guessed / known parameters for double double; see:
+    # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
+    # These numbers have the same exponent range as float64, but extended number of
+    # digits in the significand.
+    huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
+    # As the smallest_normal in double double is so hard to calculate we set
+    # it to NaN.
+    smallest_normal_dd = NaN
+    # Leave the same value for the smallest subnormal as double
+    smallest_subnormal_dd = ld(nextafter(0., 1.))
+    float_dd_ma = MachArLike(ld,
+                             machep=-105,
+                             negep=-106,
+                             minexp=-1022,
+                             maxexp=1024,
+                             it=105,
+                             iexp=11,
+                             ibeta=2,
+                             irnd=5,
+                             ngrd=0,
+                             eps=exp2(ld(-105)),
+                             epsneg=exp2(ld(-106)),
+                             huge=huge_dd,
+                             tiny=smallest_normal_dd,
+                             smallest_subnormal=smallest_subnormal_dd)
+    # double double; low, high order (e.g. PPC 64)
+    _register_type(float_dd_ma,
+        b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+    # double double; high, low order (e.g. PPC 64 le)
+    _register_type(float_dd_ma,
+        b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
+    _float_ma['dd'] = float_dd_ma
+
+
+def _get_machar(ftype):
+    """ Get MachAr instance or MachAr-like instance
+
+    Get parameters for floating point type, by first trying signatures of
+    various known floating point types, then, if none match, attempting to
+    identify parameters by analysis.
+
+    Parameters
+    ----------
+    ftype : class
+        Numpy floating point type class (e.g. ``np.float64``)
+
+    Returns
+    -------
+    ma_like : instance of :class:`MachAr` or :class:`MachArLike`
+        Object giving floating point parameters for `ftype`.
+
+    Warns
+    -----
+    UserWarning
+        If the binary signature of the float type is not in the dictionary of
+        known float types.
+    """
+    params = _MACHAR_PARAMS.get(ftype)
+    if params is None:
+        raise ValueError(repr(ftype))
+    # Detect known / suspected types
+    # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold
+    # may be deficient
+    key = (ftype(-1.0) / ftype(10.)).newbyteorder('<').tobytes()
+    ma_like = None
+    if ftype == ntypes.longdouble:
+        # Could be 80 bit == 10 byte extended precision, where last bytes can
+        # be random garbage.
+        # Comparing first 10 bytes to pattern first to avoid branching on the
+        # random garbage.
+        ma_like = _KNOWN_TYPES.get(key[:10])
+    if ma_like is None:
+        # see if the full key is known.
+        ma_like = _KNOWN_TYPES.get(key)
+    if ma_like is None and len(key) == 16:
+        # machine limits could be f80 masquerading as np.float128,
+        # find all keys with length 16 and make new dict, but make the keys
+        # only 10 bytes long, the last bytes can be random garbage
+        _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}
+        ma_like = _kt.get(key[:10])
+    if ma_like is not None:
+        return ma_like
+    # Fall back to parameter discovery
+    warnings.warn(
+        f'Signature {key} for {ftype} does not match any known type: '
+        'falling back to type probe function.\n'
+        'This warnings indicates broken support for the dtype!',
+        UserWarning, stacklevel=2)
+    return _discovered_machar(ftype)
+
+
+def _discovered_machar(ftype):
+    """ Create MachAr instance with found information on float types
+
+    TODO: MachAr should be retired completely ideally.  We currently only
+          ever use it system with broken longdouble (valgrind, WSL).
+    """
+    params = _MACHAR_PARAMS[ftype]
+    return MachAr(lambda v: array([v], ftype),
+                  lambda v:_fr0(v.astype(params['itype']))[0],
+                  lambda v:array(_fr0(v)[0], ftype),
+                  lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
+                  params['title'])
+
+
+@set_module('numpy')
+class finfo:
+    """
+    finfo(dtype)
+
+    Machine limits for floating point types.
+
+    Attributes
+    ----------
+    bits : int
+        The number of bits occupied by the type.
+    dtype : dtype
+        Returns the dtype for which `finfo` returns information. For complex
+        input, the returned dtype is the associated ``float*`` dtype for its
+        real and complex components.
+    eps : float
+        The difference between 1.0 and the next smallest representable float
+        larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
+        standard, ``eps = 2**-52``, approximately 2.22e-16.
+    epsneg : float
+        The difference between 1.0 and the next smallest representable float
+        less than 1.0. For example, for 64-bit binary floats in the IEEE-754
+        standard, ``epsneg = 2**-53``, approximately 1.11e-16.
+    iexp : int
+        The number of bits in the exponent portion of the floating point
+        representation.
+    machep : int
+        The exponent that yields `eps`.
+    max : floating point number of the appropriate type
+        The largest representable number.
+    maxexp : int
+        The smallest positive power of the base (2) that causes overflow.
+    min : floating point number of the appropriate type
+        The smallest representable number, typically ``-max``.
+    minexp : int
+        The most negative power of the base (2) consistent with there
+        being no leading 0's in the mantissa.
+    negep : int
+        The exponent that yields `epsneg`.
+    nexp : int
+        The number of bits in the exponent including its sign and bias.
+    nmant : int
+        The number of bits in the mantissa.
+    precision : int
+        The approximate number of decimal digits to which this kind of
+        float is precise.
+    resolution : floating point number of the appropriate type
+        The approximate decimal resolution of this type, i.e.,
+        ``10**-precision``.
+    tiny : float
+        An alias for `smallest_normal`, kept for backwards compatibility.
+    smallest_normal : float
+        The smallest positive floating point number with 1 as leading bit in
+        the mantissa following IEEE-754 (see Notes).
+    smallest_subnormal : float
+        The smallest positive floating point number with 0 as leading bit in
+        the mantissa following IEEE-754.
+
+    Parameters
+    ----------
+    dtype : float, dtype, or instance
+        Kind of floating point or complex floating point
+        data-type about which to get information.
+
+    See Also
+    --------
+    iinfo : The equivalent for integer data types.
+    spacing : The distance between a value and the nearest adjacent number
+    nextafter : The next floating point value after x1 towards x2
+
+    Notes
+    -----
+    For developers of NumPy: do not instantiate this at the module level.
+    The initial calculation of these parameters is expensive and negatively
+    impacts import times.  These objects are cached, so calling ``finfo()``
+    repeatedly inside your functions is not a problem.
+
+    Note that ``smallest_normal`` is not actually the smallest positive
+    representable value in a NumPy floating point type. As in the IEEE-754
+    standard [1]_, NumPy floating point types make use of subnormal numbers to
+    fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
+    may have significantly reduced precision [2]_.
+
+    This function can also be used for complex data types as well. If used,
+    the output will be the same as the corresponding real float type
+    (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).
+    However, the output is true for the real and imaginary components.
+
+    References
+    ----------
+    .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
+           pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935
+    .. [2] Wikipedia, "Denormal Numbers",
+           https://en.wikipedia.org/wiki/Denormal_number
+
+    Examples
+    --------
+    >>> np.finfo(np.float64).dtype
+    dtype('float64')
+    >>> np.finfo(np.complex64).dtype
+    dtype('float32')
+
+    """
+
+    _finfo_cache = {}
+
+    def __new__(cls, dtype):
+        try:
+            obj = cls._finfo_cache.get(dtype)  # most common path
+            if obj is not None:
+                return obj
+        except TypeError:
+            pass
+
+        if dtype is None:
+            # Deprecated in NumPy 1.25, 2023-01-16
+            warnings.warn(
+                "finfo() dtype cannot be None. This behavior will "
+                "raise an error in the future. (Deprecated in NumPy 1.25)",
+                DeprecationWarning,
+                stacklevel=2
+            )
+
+        try:
+            dtype = numeric.dtype(dtype)
+        except TypeError:
+            # In case a float instance was given
+            dtype = numeric.dtype(type(dtype))
+
+        obj = cls._finfo_cache.get(dtype)
+        if obj is not None:
+            return obj
+        dtypes = [dtype]
+        newdtype = numeric.obj2sctype(dtype)
+        if newdtype is not dtype:
+            dtypes.append(newdtype)
+            dtype = newdtype
+        if not issubclass(dtype, numeric.inexact):
+            raise ValueError("data type %r not inexact" % (dtype))
+        obj = cls._finfo_cache.get(dtype)
+        if obj is not None:
+            return obj
+        if not issubclass(dtype, numeric.floating):
+            newdtype = _convert_to_float[dtype]
+            if newdtype is not dtype:
+                # dtype changed, for example from complex128 to float64
+                dtypes.append(newdtype)
+                dtype = newdtype
+
+                obj = cls._finfo_cache.get(dtype, None)
+                if obj is not None:
+                    # the original dtype was not in the cache, but the new
+                    # dtype is in the cache. we add the original dtypes to
+                    # the cache and return the result
+                    for dt in dtypes:
+                        cls._finfo_cache[dt] = obj
+                    return obj
+        obj = object.__new__(cls)._init(dtype)
+        for dt in dtypes:
+            cls._finfo_cache[dt] = obj
+        return obj
+
+    def _init(self, dtype):
+        self.dtype = numeric.dtype(dtype)
+        machar = _get_machar(dtype)
+
+        for word in ['precision', 'iexp',
+                     'maxexp', 'minexp', 'negep',
+                     'machep']:
+            setattr(self, word, getattr(machar, word))
+        for word in ['resolution', 'epsneg', 'smallest_subnormal']:
+            setattr(self, word, getattr(machar, word).flat[0])
+        self.bits = self.dtype.itemsize * 8
+        self.max = machar.huge.flat[0]
+        self.min = -self.max
+        self.eps = machar.eps.flat[0]
+        self.nexp = machar.iexp
+        self.nmant = machar.it
+        self._machar = machar
+        self._str_tiny = machar._str_xmin.strip()
+        self._str_max = machar._str_xmax.strip()
+        self._str_epsneg = machar._str_epsneg.strip()
+        self._str_eps = machar._str_eps.strip()
+        self._str_resolution = machar._str_resolution.strip()
+        self._str_smallest_normal = machar._str_smallest_normal.strip()
+        self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
+        return self
+
+    def __str__(self):
+        fmt = (
+            'Machine parameters for %(dtype)s\n'
+            '---------------------------------------------------------------\n'
+            'precision = %(precision)3s   resolution = %(_str_resolution)s\n'
+            'machep = %(machep)6s   eps =        %(_str_eps)s\n'
+            'negep =  %(negep)6s   epsneg =     %(_str_epsneg)s\n'
+            'minexp = %(minexp)6s   tiny =       %(_str_tiny)s\n'
+            'maxexp = %(maxexp)6s   max =        %(_str_max)s\n'
+            'nexp =   %(nexp)6s   min =        -max\n'
+            'smallest_normal = %(_str_smallest_normal)s   '
+            'smallest_subnormal = %(_str_smallest_subnormal)s\n'
+            '---------------------------------------------------------------\n'
+            )
+        return fmt % self.__dict__
+
+    def __repr__(self):
+        c = self.__class__.__name__
+        d = self.__dict__.copy()
+        d['klass'] = c
+        return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
+                 " max=%(_str_max)s, dtype=%(dtype)s)") % d)
+
+    @property
+    def smallest_normal(self):
+        """Return the value for the smallest normal.
+
+        Returns
+        -------
+        smallest_normal : float
+            Value for the smallest normal.
+
+        Warns
+        -----
+        UserWarning
+            If the calculated value for the smallest normal is requested for
+            double-double.
+        """
+        # This check is necessary because the value for smallest_normal is
+        # platform dependent for longdouble types.
+        if isnan(self._machar.smallest_normal.flat[0]):
+            warnings.warn(
+                'The value of smallest normal is undefined for double double',
+                UserWarning, stacklevel=2)
+        return self._machar.smallest_normal.flat[0]
+
+    @property
+    def tiny(self):
+        """Return the value for tiny, alias of smallest_normal.
+
+        Returns
+        -------
+        tiny : float
+            Value for the smallest normal, alias of smallest_normal.
+
+        Warns
+        -----
+        UserWarning
+            If the calculated value for the smallest normal is requested for
+            double-double.
+        """
+        return self.smallest_normal
+
+
+@set_module('numpy')
+class iinfo:
+    """
+    iinfo(type)
+
+    Machine limits for integer types.
+
+    Attributes
+    ----------
+    bits : int
+        The number of bits occupied by the type.
+    dtype : dtype
+        Returns the dtype for which `iinfo` returns information.
+    min : int
+        The smallest integer expressible by the type.
+    max : int
+        The largest integer expressible by the type.
+
+    Parameters
+    ----------
+    int_type : integer type, dtype, or instance
+        The kind of integer data type to get information about.
+
+    See Also
+    --------
+    finfo : The equivalent for floating point data types.
+
+    Examples
+    --------
+    With types:
+
+    >>> ii16 = np.iinfo(np.int16)
+    >>> ii16.min
+    -32768
+    >>> ii16.max
+    32767
+    >>> ii32 = np.iinfo(np.int32)
+    >>> ii32.min
+    -2147483648
+    >>> ii32.max
+    2147483647
+
+    With instances:
+
+    >>> ii32 = np.iinfo(np.int32(10))
+    >>> ii32.min
+    -2147483648
+    >>> ii32.max
+    2147483647
+
+    """
+
+    _min_vals = {}
+    _max_vals = {}
+
+    def __init__(self, int_type):
+        try:
+            self.dtype = numeric.dtype(int_type)
+        except TypeError:
+            self.dtype = numeric.dtype(type(int_type))
+        self.kind = self.dtype.kind
+        self.bits = self.dtype.itemsize * 8
+        self.key = "%s%d" % (self.kind, self.bits)
+        if self.kind not in 'iu':
+            raise ValueError("Invalid integer data type %r." % (self.kind,))
+
+    @property
+    def min(self):
+        """Minimum value of given dtype."""
+        if self.kind == 'u':
+            return 0
+        else:
+            try:
+                val = iinfo._min_vals[self.key]
+            except KeyError:
+                val = int(-(1 << (self.bits-1)))
+                iinfo._min_vals[self.key] = val
+            return val
+
+    @property
+    def max(self):
+        """Maximum value of given dtype."""
+        try:
+            val = iinfo._max_vals[self.key]
+        except KeyError:
+            if self.kind == 'u':
+                val = int((1 << self.bits) - 1)
+            else:
+                val = int((1 << (self.bits-1)) - 1)
+            iinfo._max_vals[self.key] = val
+        return val
+
+    def __str__(self):
+        """String representation."""
+        fmt = (
+            'Machine parameters for %(dtype)s\n'
+            '---------------------------------------------------------------\n'
+            'min = %(min)s\n'
+            'max = %(max)s\n'
+            '---------------------------------------------------------------\n'
+            )
+        return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
+
+    def __repr__(self):
+        return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
+                                    self.min, self.max, self.dtype)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/getlimits.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/getlimits.pyi
new file mode 100644
index 00000000..da5e3c23
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/getlimits.pyi
@@ -0,0 +1,6 @@
+from numpy import (
+    finfo as finfo,
+    iinfo as iinfo,
+)
+
+__all__: list[str]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.c b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.c
new file mode 100644
index 00000000..4fa051c1
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.c
@@ -0,0 +1,314 @@
+
+/* These pointers will be stored in the C-object for use in other
+    extension modules
+*/
+
+void *PyArray_API[] = {
+        (void *) PyArray_GetNDArrayCVersion,
+        (void *) &PyBigArray_Type,
+        (void *) &PyArray_Type,
+        (void *) &PyArrayDescr_Type,
+        (void *) &PyArrayFlags_Type,
+        (void *) &PyArrayIter_Type,
+        (void *) &PyArrayMultiIter_Type,
+        (int *) &NPY_NUMUSERTYPES,
+        (void *) &PyBoolArrType_Type,
+        (void *) &_PyArrayScalar_BoolValues,
+        (void *) &PyGenericArrType_Type,
+        (void *) &PyNumberArrType_Type,
+        (void *) &PyIntegerArrType_Type,
+        (void *) &PySignedIntegerArrType_Type,
+        (void *) &PyUnsignedIntegerArrType_Type,
+        (void *) &PyInexactArrType_Type,
+        (void *) &PyFloatingArrType_Type,
+        (void *) &PyComplexFloatingArrType_Type,
+        (void *) &PyFlexibleArrType_Type,
+        (void *) &PyCharacterArrType_Type,
+        (void *) &PyByteArrType_Type,
+        (void *) &PyShortArrType_Type,
+        (void *) &PyIntArrType_Type,
+        (void *) &PyLongArrType_Type,
+        (void *) &PyLongLongArrType_Type,
+        (void *) &PyUByteArrType_Type,
+        (void *) &PyUShortArrType_Type,
+        (void *) &PyUIntArrType_Type,
+        (void *) &PyULongArrType_Type,
+        (void *) &PyULongLongArrType_Type,
+        (void *) &PyFloatArrType_Type,
+        (void *) &PyDoubleArrType_Type,
+        (void *) &PyLongDoubleArrType_Type,
+        (void *) &PyCFloatArrType_Type,
+        (void *) &PyCDoubleArrType_Type,
+        (void *) &PyCLongDoubleArrType_Type,
+        (void *) &PyObjectArrType_Type,
+        (void *) &PyStringArrType_Type,
+        (void *) &PyUnicodeArrType_Type,
+        (void *) &PyVoidArrType_Type,
+        (void *) PyArray_SetNumericOps,
+        (void *) PyArray_GetNumericOps,
+        (void *) PyArray_INCREF,
+        (void *) PyArray_XDECREF,
+        (void *) PyArray_SetStringFunction,
+        (void *) PyArray_DescrFromType,
+        (void *) PyArray_TypeObjectFromType,
+        (void *) PyArray_Zero,
+        (void *) PyArray_One,
+        (void *) PyArray_CastToType,
+        (void *) PyArray_CastTo,
+        (void *) PyArray_CastAnyTo,
+        (void *) PyArray_CanCastSafely,
+        (void *) PyArray_CanCastTo,
+        (void *) PyArray_ObjectType,
+        (void *) PyArray_DescrFromObject,
+        (void *) PyArray_ConvertToCommonType,
+        (void *) PyArray_DescrFromScalar,
+        (void *) PyArray_DescrFromTypeObject,
+        (void *) PyArray_Size,
+        (void *) PyArray_Scalar,
+        (void *) PyArray_FromScalar,
+        (void *) PyArray_ScalarAsCtype,
+        (void *) PyArray_CastScalarToCtype,
+        (void *) PyArray_CastScalarDirect,
+        (void *) PyArray_ScalarFromObject,
+        (void *) PyArray_GetCastFunc,
+        (void *) PyArray_FromDims,
+        (void *) PyArray_FromDimsAndDataAndDescr,
+        (void *) PyArray_FromAny,
+        (void *) PyArray_EnsureArray,
+        (void *) PyArray_EnsureAnyArray,
+        (void *) PyArray_FromFile,
+        (void *) PyArray_FromString,
+        (void *) PyArray_FromBuffer,
+        (void *) PyArray_FromIter,
+        (void *) PyArray_Return,
+        (void *) PyArray_GetField,
+        (void *) PyArray_SetField,
+        (void *) PyArray_Byteswap,
+        (void *) PyArray_Resize,
+        (void *) PyArray_MoveInto,
+        (void *) PyArray_CopyInto,
+        (void *) PyArray_CopyAnyInto,
+        (void *) PyArray_CopyObject,
+        (void *) PyArray_NewCopy,
+        (void *) PyArray_ToList,
+        (void *) PyArray_ToString,
+        (void *) PyArray_ToFile,
+        (void *) PyArray_Dump,
+        (void *) PyArray_Dumps,
+        (void *) PyArray_ValidType,
+        (void *) PyArray_UpdateFlags,
+        (void *) PyArray_New,
+        (void *) PyArray_NewFromDescr,
+        (void *) PyArray_DescrNew,
+        (void *) PyArray_DescrNewFromType,
+        (void *) PyArray_GetPriority,
+        (void *) PyArray_IterNew,
+        (void *) PyArray_MultiIterNew,
+        (void *) PyArray_PyIntAsInt,
+        (void *) PyArray_PyIntAsIntp,
+        (void *) PyArray_Broadcast,
+        (void *) PyArray_FillObjectArray,
+        (void *) PyArray_FillWithScalar,
+        (void *) PyArray_CheckStrides,
+        (void *) PyArray_DescrNewByteorder,
+        (void *) PyArray_IterAllButAxis,
+        (void *) PyArray_CheckFromAny,
+        (void *) PyArray_FromArray,
+        (void *) PyArray_FromInterface,
+        (void *) PyArray_FromStructInterface,
+        (void *) PyArray_FromArrayAttr,
+        (void *) PyArray_ScalarKind,
+        (void *) PyArray_CanCoerceScalar,
+        (void *) PyArray_NewFlagsObject,
+        (void *) PyArray_CanCastScalar,
+        (void *) PyArray_CompareUCS4,
+        (void *) PyArray_RemoveSmallest,
+        (void *) PyArray_ElementStrides,
+        (void *) PyArray_Item_INCREF,
+        (void *) PyArray_Item_XDECREF,
+        (void *) PyArray_FieldNames,
+        (void *) PyArray_Transpose,
+        (void *) PyArray_TakeFrom,
+        (void *) PyArray_PutTo,
+        (void *) PyArray_PutMask,
+        (void *) PyArray_Repeat,
+        (void *) PyArray_Choose,
+        (void *) PyArray_Sort,
+        (void *) PyArray_ArgSort,
+        (void *) PyArray_SearchSorted,
+        (void *) PyArray_ArgMax,
+        (void *) PyArray_ArgMin,
+        (void *) PyArray_Reshape,
+        (void *) PyArray_Newshape,
+        (void *) PyArray_Squeeze,
+        (void *) PyArray_View,
+        (void *) PyArray_SwapAxes,
+        (void *) PyArray_Max,
+        (void *) PyArray_Min,
+        (void *) PyArray_Ptp,
+        (void *) PyArray_Mean,
+        (void *) PyArray_Trace,
+        (void *) PyArray_Diagonal,
+        (void *) PyArray_Clip,
+        (void *) PyArray_Conjugate,
+        (void *) PyArray_Nonzero,
+        (void *) PyArray_Std,
+        (void *) PyArray_Sum,
+        (void *) PyArray_CumSum,
+        (void *) PyArray_Prod,
+        (void *) PyArray_CumProd,
+        (void *) PyArray_All,
+        (void *) PyArray_Any,
+        (void *) PyArray_Compress,
+        (void *) PyArray_Flatten,
+        (void *) PyArray_Ravel,
+        (void *) PyArray_MultiplyList,
+        (void *) PyArray_MultiplyIntList,
+        (void *) PyArray_GetPtr,
+        (void *) PyArray_CompareLists,
+        (void *) PyArray_AsCArray,
+        (void *) PyArray_As1D,
+        (void *) PyArray_As2D,
+        (void *) PyArray_Free,
+        (void *) PyArray_Converter,
+        (void *) PyArray_IntpFromSequence,
+        (void *) PyArray_Concatenate,
+        (void *) PyArray_InnerProduct,
+        (void *) PyArray_MatrixProduct,
+        (void *) PyArray_CopyAndTranspose,
+        (void *) PyArray_Correlate,
+        (void *) PyArray_TypestrConvert,
+        (void *) PyArray_DescrConverter,
+        (void *) PyArray_DescrConverter2,
+        (void *) PyArray_IntpConverter,
+        (void *) PyArray_BufferConverter,
+        (void *) PyArray_AxisConverter,
+        (void *) PyArray_BoolConverter,
+        (void *) PyArray_ByteorderConverter,
+        (void *) PyArray_OrderConverter,
+        (void *) PyArray_EquivTypes,
+        (void *) PyArray_Zeros,
+        (void *) PyArray_Empty,
+        (void *) PyArray_Where,
+        (void *) PyArray_Arange,
+        (void *) PyArray_ArangeObj,
+        (void *) PyArray_SortkindConverter,
+        (void *) PyArray_LexSort,
+        (void *) PyArray_Round,
+        (void *) PyArray_EquivTypenums,
+        (void *) PyArray_RegisterDataType,
+        (void *) PyArray_RegisterCastFunc,
+        (void *) PyArray_RegisterCanCast,
+        (void *) PyArray_InitArrFuncs,
+        (void *) PyArray_IntTupleFromIntp,
+        (void *) PyArray_TypeNumFromName,
+        (void *) PyArray_ClipmodeConverter,
+        (void *) PyArray_OutputConverter,
+        (void *) PyArray_BroadcastToShape,
+        (void *) _PyArray_SigintHandler,
+        (void *) _PyArray_GetSigintBuf,
+        (void *) PyArray_DescrAlignConverter,
+        (void *) PyArray_DescrAlignConverter2,
+        (void *) PyArray_SearchsideConverter,
+        (void *) PyArray_CheckAxis,
+        (void *) PyArray_OverflowMultiplyList,
+        (void *) PyArray_CompareString,
+        (void *) PyArray_MultiIterFromObjects,
+        (void *) PyArray_GetEndianness,
+        (void *) PyArray_GetNDArrayCFeatureVersion,
+        (void *) PyArray_Correlate2,
+        (void *) PyArray_NeighborhoodIterNew,
+        (void *) &PyTimeIntegerArrType_Type,
+        (void *) &PyDatetimeArrType_Type,
+        (void *) &PyTimedeltaArrType_Type,
+        (void *) &PyHalfArrType_Type,
+        (void *) &NpyIter_Type,
+        (void *) PyArray_SetDatetimeParseFunction,
+        (void *) PyArray_DatetimeToDatetimeStruct,
+        (void *) PyArray_TimedeltaToTimedeltaStruct,
+        (void *) PyArray_DatetimeStructToDatetime,
+        (void *) PyArray_TimedeltaStructToTimedelta,
+        (void *) NpyIter_New,
+        (void *) NpyIter_MultiNew,
+        (void *) NpyIter_AdvancedNew,
+        (void *) NpyIter_Copy,
+        (void *) NpyIter_Deallocate,
+        (void *) NpyIter_HasDelayedBufAlloc,
+        (void *) NpyIter_HasExternalLoop,
+        (void *) NpyIter_EnableExternalLoop,
+        (void *) NpyIter_GetInnerStrideArray,
+        (void *) NpyIter_GetInnerLoopSizePtr,
+        (void *) NpyIter_Reset,
+        (void *) NpyIter_ResetBasePointers,
+        (void *) NpyIter_ResetToIterIndexRange,
+        (void *) NpyIter_GetNDim,
+        (void *) NpyIter_GetNOp,
+        (void *) NpyIter_GetIterNext,
+        (void *) NpyIter_GetIterSize,
+        (void *) NpyIter_GetIterIndexRange,
+        (void *) NpyIter_GetIterIndex,
+        (void *) NpyIter_GotoIterIndex,
+        (void *) NpyIter_HasMultiIndex,
+        (void *) NpyIter_GetShape,
+        (void *) NpyIter_GetGetMultiIndex,
+        (void *) NpyIter_GotoMultiIndex,
+        (void *) NpyIter_RemoveMultiIndex,
+        (void *) NpyIter_HasIndex,
+        (void *) NpyIter_IsBuffered,
+        (void *) NpyIter_IsGrowInner,
+        (void *) NpyIter_GetBufferSize,
+        (void *) NpyIter_GetIndexPtr,
+        (void *) NpyIter_GotoIndex,
+        (void *) NpyIter_GetDataPtrArray,
+        (void *) NpyIter_GetDescrArray,
+        (void *) NpyIter_GetOperandArray,
+        (void *) NpyIter_GetIterView,
+        (void *) NpyIter_GetReadFlags,
+        (void *) NpyIter_GetWriteFlags,
+        (void *) NpyIter_DebugPrint,
+        (void *) NpyIter_IterationNeedsAPI,
+        (void *) NpyIter_GetInnerFixedStrideArray,
+        (void *) NpyIter_RemoveAxis,
+        (void *) NpyIter_GetAxisStrideArray,
+        (void *) NpyIter_RequiresBuffering,
+        (void *) NpyIter_GetInitialDataPtrArray,
+        (void *) NpyIter_CreateCompatibleStrides,
+        (void *) PyArray_CastingConverter,
+        (void *) PyArray_CountNonzero,
+        (void *) PyArray_PromoteTypes,
+        (void *) PyArray_MinScalarType,
+        (void *) PyArray_ResultType,
+        (void *) PyArray_CanCastArrayTo,
+        (void *) PyArray_CanCastTypeTo,
+        (void *) PyArray_EinsteinSum,
+        (void *) PyArray_NewLikeArray,
+        (void *) PyArray_GetArrayParamsFromObject,
+        (void *) PyArray_ConvertClipmodeSequence,
+        (void *) PyArray_MatrixProduct2,
+        (void *) NpyIter_IsFirstVisit,
+        (void *) PyArray_SetBaseObject,
+        (void *) PyArray_CreateSortedStridePerm,
+        (void *) PyArray_RemoveAxesInPlace,
+        (void *) PyArray_DebugPrint,
+        (void *) PyArray_FailUnlessWriteable,
+        (void *) PyArray_SetUpdateIfCopyBase,
+        (void *) PyDataMem_NEW,
+        (void *) PyDataMem_FREE,
+        (void *) PyDataMem_RENEW,
+        (void *) PyDataMem_SetEventHook,
+        (NPY_CASTING *) &NPY_DEFAULT_ASSIGN_CASTING,
+        (void *) PyArray_MapIterSwapAxes,
+        (void *) PyArray_MapIterArray,
+        (void *) PyArray_MapIterNext,
+        (void *) PyArray_Partition,
+        (void *) PyArray_ArgPartition,
+        (void *) PyArray_SelectkindConverter,
+        (void *) PyDataMem_NEW_ZEROED,
+        (void *) PyArray_CheckAnyScalarExact,
+        (void *) PyArray_MapIterArrayCopyIfOverlap,
+        (void *) PyArray_ResolveWritebackIfCopy,
+        (void *) PyArray_SetWritebackIfCopyBase,
+        (void *) PyDataMem_SetHandler,
+        (void *) PyDataMem_GetHandler,
+        (PyObject* *) &PyDataMem_DefaultHandler
+};
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.h
new file mode 100644
index 00000000..4c626832
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.h
@@ -0,0 +1,1566 @@
+
+#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
+
+typedef struct {
+        PyObject_HEAD
+        npy_bool obval;
+} PyBoolScalarObject;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
+extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+NPY_NO_EXPORT  unsigned int PyArray_GetNDArrayCVersion \
+       (void);
+extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArray_Type;
+
+extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull;
+#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull))
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type;
+
+extern NPY_NO_EXPORT int NPY_NUMUSERTYPES;
+
+extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type;
+
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type;
+
+NPY_NO_EXPORT  int PyArray_SetNumericOps \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_GetNumericOps \
+       (void);
+NPY_NO_EXPORT  int PyArray_INCREF \
+       (PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_XDECREF \
+       (PyArrayObject *);
+NPY_NO_EXPORT  void PyArray_SetStringFunction \
+       (PyObject *, int);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_DescrFromType \
+       (int);
+NPY_NO_EXPORT  PyObject * PyArray_TypeObjectFromType \
+       (int);
+NPY_NO_EXPORT  char * PyArray_Zero \
+       (PyArrayObject *);
+NPY_NO_EXPORT  char * PyArray_One \
+       (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CastToType \
+       (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT  int PyArray_CastTo \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_CastAnyTo \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_CanCastSafely \
+       (int, int);
+NPY_NO_EXPORT  npy_bool PyArray_CanCastTo \
+       (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT  int PyArray_ObjectType \
+       (PyObject *, int);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_DescrFromObject \
+       (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT  PyArrayObject ** PyArray_ConvertToCommonType \
+       (PyObject *, int *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_DescrFromScalar \
+       (PyObject *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_DescrFromTypeObject \
+       (PyObject *);
+NPY_NO_EXPORT  npy_intp PyArray_Size \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Scalar \
+       (void *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \
+       (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT  void PyArray_ScalarAsCtype \
+       (PyObject *, void *);
+NPY_NO_EXPORT  int PyArray_CastScalarToCtype \
+       (PyObject *, void *, PyArray_Descr *);
+NPY_NO_EXPORT  int PyArray_CastScalarDirect \
+       (PyObject *, PyArray_Descr *, void *, int);
+NPY_NO_EXPORT  PyObject * PyArray_ScalarFromObject \
+       (PyObject *);
+NPY_NO_EXPORT  PyArray_VectorUnaryFunc * PyArray_GetCastFunc \
+       (PyArray_Descr *, int);
+NPY_NO_EXPORT  PyObject * PyArray_FromDims \
+       (int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type));
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \
+       (int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data));
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \
+       (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \
+       (PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_FromFile \
+       (FILE *, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT  PyObject * PyArray_FromString \
+       (char *, npy_intp, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT  PyObject * PyArray_FromBuffer \
+       (PyObject *, PyArray_Descr *, npy_intp, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \
+       (PyObject *, PyArray_Descr *, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \
+       (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_GetField \
+       (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetField \
+       (PyArrayObject *, PyArray_Descr *, int, PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Byteswap \
+       (PyArrayObject *, npy_bool);
+NPY_NO_EXPORT  PyObject * PyArray_Resize \
+       (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order));
+NPY_NO_EXPORT  int PyArray_MoveInto \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_CopyInto \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_CopyAnyInto \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_CopyObject \
+       (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_NewCopy \
+       (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT  PyObject * PyArray_ToList \
+       (PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_ToString \
+       (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT  int PyArray_ToFile \
+       (PyArrayObject *, FILE *, char *, char *);
+NPY_NO_EXPORT  int PyArray_Dump \
+       (PyObject *, PyObject *, int);
+NPY_NO_EXPORT  PyObject * PyArray_Dumps \
+       (PyObject *, int);
+NPY_NO_EXPORT  int PyArray_ValidType \
+       (int);
+NPY_NO_EXPORT  void PyArray_UpdateFlags \
+       (PyArrayObject *, int);
+NPY_NO_EXPORT  PyObject * PyArray_New \
+       (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_NewFromDescr \
+       (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_DescrNew \
+       (PyArray_Descr *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_DescrNewFromType \
+       (int);
+NPY_NO_EXPORT  double PyArray_GetPriority \
+       (PyObject *, double);
+NPY_NO_EXPORT  PyObject * PyArray_IterNew \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject* PyArray_MultiIterNew \
+       (int, ...);
+NPY_NO_EXPORT  int PyArray_PyIntAsInt \
+       (PyObject *);
+NPY_NO_EXPORT  npy_intp PyArray_PyIntAsIntp \
+       (PyObject *);
+NPY_NO_EXPORT  int PyArray_Broadcast \
+       (PyArrayMultiIterObject *);
+NPY_NO_EXPORT  void PyArray_FillObjectArray \
+       (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT  int PyArray_FillWithScalar \
+       (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT  npy_bool PyArray_CheckStrides \
+       (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_DescrNewByteorder \
+       (PyArray_Descr *, char);
+NPY_NO_EXPORT  PyObject * PyArray_IterAllButAxis \
+       (PyObject *, int *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \
+       (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \
+       (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT  PyObject * PyArray_FromInterface \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_FromStructInterface \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_FromArrayAttr \
+       (PyObject *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT  NPY_SCALARKIND PyArray_ScalarKind \
+       (int, PyArrayObject **);
+NPY_NO_EXPORT  int PyArray_CanCoerceScalar \
+       (int, int, NPY_SCALARKIND);
+NPY_NO_EXPORT  PyObject * PyArray_NewFlagsObject \
+       (PyObject *);
+NPY_NO_EXPORT  npy_bool PyArray_CanCastScalar \
+       (PyTypeObject *, PyTypeObject *);
+NPY_NO_EXPORT  int PyArray_CompareUCS4 \
+       (npy_ucs4 const *, npy_ucs4 const *, size_t);
+NPY_NO_EXPORT  int PyArray_RemoveSmallest \
+       (PyArrayMultiIterObject *);
+NPY_NO_EXPORT  int PyArray_ElementStrides \
+       (PyObject *);
+NPY_NO_EXPORT  void PyArray_Item_INCREF \
+       (char *, PyArray_Descr *);
+NPY_NO_EXPORT  void PyArray_Item_XDECREF \
+       (char *, PyArray_Descr *);
+NPY_NO_EXPORT  PyObject * PyArray_FieldNames \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Transpose \
+       (PyArrayObject *, PyArray_Dims *);
+NPY_NO_EXPORT  PyObject * PyArray_TakeFrom \
+       (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT  PyObject * PyArray_PutTo \
+       (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT  PyObject * PyArray_PutMask \
+       (PyArrayObject *, PyObject*, PyObject*);
+NPY_NO_EXPORT  PyObject * PyArray_Repeat \
+       (PyArrayObject *, PyObject *, int);
+NPY_NO_EXPORT  PyObject * PyArray_Choose \
+       (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT  int PyArray_Sort \
+       (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT  PyObject * PyArray_ArgSort \
+       (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT  PyObject * PyArray_SearchSorted \
+       (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_ArgMax \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_ArgMin \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Reshape \
+       (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Newshape \
+       (PyArrayObject *, PyArray_Dims *, NPY_ORDER);
+NPY_NO_EXPORT  PyObject * PyArray_Squeeze \
+       (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \
+       (PyArrayObject *, PyArray_Descr *, PyTypeObject *);
+NPY_NO_EXPORT  PyObject * PyArray_SwapAxes \
+       (PyArrayObject *, int, int);
+NPY_NO_EXPORT  PyObject * PyArray_Max \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Min \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Ptp \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Mean \
+       (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Trace \
+       (PyArrayObject *, int, int, int, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Diagonal \
+       (PyArrayObject *, int, int, int);
+NPY_NO_EXPORT  PyObject * PyArray_Clip \
+       (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Conjugate \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Nonzero \
+       (PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Std \
+       (PyArrayObject *, int, int, PyArrayObject *, int);
+NPY_NO_EXPORT  PyObject * PyArray_Sum \
+       (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_CumSum \
+       (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Prod \
+       (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_CumProd \
+       (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_All \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Any \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Compress \
+       (PyArrayObject *, PyObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Flatten \
+       (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT  PyObject * PyArray_Ravel \
+       (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT  npy_intp PyArray_MultiplyList \
+       (npy_intp const *, int);
+NPY_NO_EXPORT  int PyArray_MultiplyIntList \
+       (int const *, int);
+NPY_NO_EXPORT  void * PyArray_GetPtr \
+       (PyArrayObject *, npy_intp const*);
+NPY_NO_EXPORT  int PyArray_CompareLists \
+       (npy_intp const *, npy_intp const *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \
+       (PyObject **, void *, npy_intp *, int, PyArray_Descr*);
+NPY_NO_EXPORT  int PyArray_As1D \
+       (PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode));
+NPY_NO_EXPORT  int PyArray_As2D \
+       (PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode));
+NPY_NO_EXPORT  int PyArray_Free \
+       (PyObject *, void *);
+NPY_NO_EXPORT  int PyArray_Converter \
+       (PyObject *, PyObject **);
+NPY_NO_EXPORT  int PyArray_IntpFromSequence \
+       (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT  PyObject * PyArray_Concatenate \
+       (PyObject *, int);
+NPY_NO_EXPORT  PyObject * PyArray_InnerProduct \
+       (PyObject *, PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_MatrixProduct \
+       (PyObject *, PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_CopyAndTranspose \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Correlate \
+       (PyObject *, PyObject *, int);
+NPY_NO_EXPORT  int PyArray_TypestrConvert \
+       (int, int);
+NPY_NO_EXPORT  int PyArray_DescrConverter \
+       (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT  int PyArray_DescrConverter2 \
+       (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT  int PyArray_IntpConverter \
+       (PyObject *, PyArray_Dims *);
+NPY_NO_EXPORT  int PyArray_BufferConverter \
+       (PyObject *, PyArray_Chunk *);
+NPY_NO_EXPORT  int PyArray_AxisConverter \
+       (PyObject *, int *);
+NPY_NO_EXPORT  int PyArray_BoolConverter \
+       (PyObject *, npy_bool *);
+NPY_NO_EXPORT  int PyArray_ByteorderConverter \
+       (PyObject *, char *);
+NPY_NO_EXPORT  int PyArray_OrderConverter \
+       (PyObject *, NPY_ORDER *);
+NPY_NO_EXPORT  unsigned char PyArray_EquivTypes \
+       (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \
+       (int, npy_intp const *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \
+       (int, npy_intp const *, PyArray_Descr *, int);
+NPY_NO_EXPORT  PyObject * PyArray_Where \
+       (PyObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_Arange \
+       (double, double, double, int);
+NPY_NO_EXPORT  PyObject * PyArray_ArangeObj \
+       (PyObject *, PyObject *, PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT  int PyArray_SortkindConverter \
+       (PyObject *, NPY_SORTKIND *);
+NPY_NO_EXPORT  PyObject * PyArray_LexSort \
+       (PyObject *, int);
+NPY_NO_EXPORT  PyObject * PyArray_Round \
+       (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  unsigned char PyArray_EquivTypenums \
+       (int, int);
+NPY_NO_EXPORT  int PyArray_RegisterDataType \
+       (PyArray_Descr *);
+NPY_NO_EXPORT  int PyArray_RegisterCastFunc \
+       (PyArray_Descr *, int, PyArray_VectorUnaryFunc *);
+NPY_NO_EXPORT  int PyArray_RegisterCanCast \
+       (PyArray_Descr *, int, NPY_SCALARKIND);
+NPY_NO_EXPORT  void PyArray_InitArrFuncs \
+       (PyArray_ArrFuncs *);
+NPY_NO_EXPORT  PyObject * PyArray_IntTupleFromIntp \
+       (int, npy_intp const *);
+NPY_NO_EXPORT  int PyArray_TypeNumFromName \
+       (char const *);
+NPY_NO_EXPORT  int PyArray_ClipmodeConverter \
+       (PyObject *, NPY_CLIPMODE *);
+NPY_NO_EXPORT  int PyArray_OutputConverter \
+       (PyObject *, PyArrayObject **);
+NPY_NO_EXPORT  PyObject * PyArray_BroadcastToShape \
+       (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT  void _PyArray_SigintHandler \
+       (int);
+NPY_NO_EXPORT  void* _PyArray_GetSigintBuf \
+       (void);
+NPY_NO_EXPORT  int PyArray_DescrAlignConverter \
+       (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT  int PyArray_DescrAlignConverter2 \
+       (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT  int PyArray_SearchsideConverter \
+       (PyObject *, void *);
+NPY_NO_EXPORT  PyObject * PyArray_CheckAxis \
+       (PyArrayObject *, int *, int);
+NPY_NO_EXPORT  npy_intp PyArray_OverflowMultiplyList \
+       (npy_intp const *, int);
+NPY_NO_EXPORT  int PyArray_CompareString \
+       (const char *, const char *, size_t);
+NPY_NO_EXPORT  PyObject* PyArray_MultiIterFromObjects \
+       (PyObject **, int, int, ...);
+NPY_NO_EXPORT  int PyArray_GetEndianness \
+       (void);
+NPY_NO_EXPORT  unsigned int PyArray_GetNDArrayCFeatureVersion \
+       (void);
+NPY_NO_EXPORT  PyObject * PyArray_Correlate2 \
+       (PyObject *, PyObject *, int);
+NPY_NO_EXPORT  PyObject* PyArray_NeighborhoodIterNew \
+       (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*);
+extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject NpyIter_Type;
+
+NPY_NO_EXPORT  void PyArray_SetDatetimeParseFunction \
+       (PyObject *NPY_UNUSED(op));
+NPY_NO_EXPORT  void PyArray_DatetimeToDatetimeStruct \
+       (npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *);
+NPY_NO_EXPORT  void PyArray_TimedeltaToTimedeltaStruct \
+       (npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *);
+NPY_NO_EXPORT  npy_datetime PyArray_DatetimeStructToDatetime \
+       (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d));
+NPY_NO_EXPORT  npy_datetime PyArray_TimedeltaStructToTimedelta \
+       (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d));
+NPY_NO_EXPORT  NpyIter * NpyIter_New \
+       (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*);
+NPY_NO_EXPORT  NpyIter * NpyIter_MultiNew \
+       (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **);
+NPY_NO_EXPORT  NpyIter * NpyIter_AdvancedNew \
+       (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp);
+NPY_NO_EXPORT  NpyIter * NpyIter_Copy \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_Deallocate \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_bool NpyIter_HasDelayedBufAlloc \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_bool NpyIter_HasExternalLoop \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_EnableExternalLoop \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_intp * NpyIter_GetInnerStrideArray \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_intp * NpyIter_GetInnerLoopSizePtr \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_Reset \
+       (NpyIter *, char **);
+NPY_NO_EXPORT  int NpyIter_ResetBasePointers \
+       (NpyIter *, char **, char **);
+NPY_NO_EXPORT  int NpyIter_ResetToIterIndexRange \
+       (NpyIter *, npy_intp, npy_intp, char **);
+NPY_NO_EXPORT  int NpyIter_GetNDim \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_GetNOp \
+       (NpyIter *);
+NPY_NO_EXPORT  NpyIter_IterNextFunc * NpyIter_GetIterNext \
+       (NpyIter *, char **);
+NPY_NO_EXPORT  npy_intp NpyIter_GetIterSize \
+       (NpyIter *);
+NPY_NO_EXPORT  void NpyIter_GetIterIndexRange \
+       (NpyIter *, npy_intp *, npy_intp *);
+NPY_NO_EXPORT  npy_intp NpyIter_GetIterIndex \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_GotoIterIndex \
+       (NpyIter *, npy_intp);
+NPY_NO_EXPORT  npy_bool NpyIter_HasMultiIndex \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_GetShape \
+       (NpyIter *, npy_intp *);
+NPY_NO_EXPORT  NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \
+       (NpyIter *, char **);
+NPY_NO_EXPORT  int NpyIter_GotoMultiIndex \
+       (NpyIter *, npy_intp const *);
+NPY_NO_EXPORT  int NpyIter_RemoveMultiIndex \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_bool NpyIter_HasIndex \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_bool NpyIter_IsBuffered \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_bool NpyIter_IsGrowInner \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_intp NpyIter_GetBufferSize \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_intp * NpyIter_GetIndexPtr \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_GotoIndex \
+       (NpyIter *, npy_intp);
+NPY_NO_EXPORT  char ** NpyIter_GetDataPtrArray \
+       (NpyIter *);
+NPY_NO_EXPORT  PyArray_Descr ** NpyIter_GetDescrArray \
+       (NpyIter *);
+NPY_NO_EXPORT  PyArrayObject ** NpyIter_GetOperandArray \
+       (NpyIter *);
+NPY_NO_EXPORT  PyArrayObject * NpyIter_GetIterView \
+       (NpyIter *, npy_intp);
+NPY_NO_EXPORT  void NpyIter_GetReadFlags \
+       (NpyIter *, char *);
+NPY_NO_EXPORT  void NpyIter_GetWriteFlags \
+       (NpyIter *, char *);
+NPY_NO_EXPORT  void NpyIter_DebugPrint \
+       (NpyIter *);
+NPY_NO_EXPORT  npy_bool NpyIter_IterationNeedsAPI \
+       (NpyIter *);
+NPY_NO_EXPORT  void NpyIter_GetInnerFixedStrideArray \
+       (NpyIter *, npy_intp *);
+NPY_NO_EXPORT  int NpyIter_RemoveAxis \
+       (NpyIter *, int);
+NPY_NO_EXPORT  npy_intp * NpyIter_GetAxisStrideArray \
+       (NpyIter *, int);
+NPY_NO_EXPORT  npy_bool NpyIter_RequiresBuffering \
+       (NpyIter *);
+NPY_NO_EXPORT  char ** NpyIter_GetInitialDataPtrArray \
+       (NpyIter *);
+NPY_NO_EXPORT  int NpyIter_CreateCompatibleStrides \
+       (NpyIter *, npy_intp, npy_intp *);
+NPY_NO_EXPORT  int PyArray_CastingConverter \
+       (PyObject *, NPY_CASTING *);
+NPY_NO_EXPORT  npy_intp PyArray_CountNonzero \
+       (PyArrayObject *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_PromoteTypes \
+       (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_MinScalarType \
+       (PyArrayObject *);
+NPY_NO_EXPORT  PyArray_Descr * PyArray_ResultType \
+       (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]);
+NPY_NO_EXPORT  npy_bool PyArray_CanCastArrayTo \
+       (PyArrayObject *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT  npy_bool PyArray_CanCastTypeTo \
+       (PyArray_Descr *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT  PyArrayObject * PyArray_EinsteinSum \
+       (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_NewLikeArray \
+       (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int);
+NPY_NO_EXPORT  int PyArray_GetArrayParamsFromObject \
+       (PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context));
+NPY_NO_EXPORT  int PyArray_ConvertClipmodeSequence \
+       (PyObject *, NPY_CLIPMODE *, int);
+NPY_NO_EXPORT  PyObject * PyArray_MatrixProduct2 \
+       (PyObject *, PyObject *, PyArrayObject*);
+NPY_NO_EXPORT  npy_bool NpyIter_IsFirstVisit \
+       (NpyIter *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \
+       (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT  void PyArray_CreateSortedStridePerm \
+       (int, npy_intp const *, npy_stride_sort_item *);
+NPY_NO_EXPORT  void PyArray_RemoveAxesInPlace \
+       (PyArrayObject *, const npy_bool *);
+NPY_NO_EXPORT  void PyArray_DebugPrint \
+       (PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_FailUnlessWriteable \
+       (PyArrayObject *, const char *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  void * PyDataMem_NEW \
+       (size_t);
+NPY_NO_EXPORT  void PyDataMem_FREE \
+       (void *);
+NPY_NO_EXPORT  void * PyDataMem_RENEW \
+       (void *, size_t);
+NPY_NO_EXPORT  PyDataMem_EventHookFunc * PyDataMem_SetEventHook \
+       (PyDataMem_EventHookFunc *, void *, void **);
+extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING;
+
+NPY_NO_EXPORT  void PyArray_MapIterSwapAxes \
+       (PyArrayMapIterObject *, PyArrayObject **, int);
+NPY_NO_EXPORT  PyObject * PyArray_MapIterArray \
+       (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT  void PyArray_MapIterNext \
+       (PyArrayMapIterObject *);
+NPY_NO_EXPORT  int PyArray_Partition \
+       (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT  PyObject * PyArray_ArgPartition \
+       (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT  int PyArray_SelectkindConverter \
+       (PyObject *, NPY_SELECTKIND *);
+NPY_NO_EXPORT  void * PyDataMem_NEW_ZEROED \
+       (size_t, size_t);
+NPY_NO_EXPORT  int PyArray_CheckAnyScalarExact \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyArray_MapIterArrayCopyIfOverlap \
+       (PyArrayObject *, PyObject *, int, PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_ResolveWritebackIfCopy \
+       (PyArrayObject *);
+NPY_NO_EXPORT  int PyArray_SetWritebackIfCopyBase \
+       (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT  PyObject * PyDataMem_SetHandler \
+       (PyObject *);
+NPY_NO_EXPORT  PyObject * PyDataMem_GetHandler \
+       (void);
+extern NPY_NO_EXPORT PyObject* PyDataMem_DefaultHandler;
+
+
+#else
+
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+extern void **PyArray_API;
+#else
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+void **PyArray_API;
+#else
+static void **PyArray_API=NULL;
+#endif
+#endif
+
+#define PyArray_GetNDArrayCVersion \
+        (*(unsigned int (*)(void)) \
+    PyArray_API[0])
+#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1])
+#define PyArray_Type (*(PyTypeObject *)PyArray_API[2])
+#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3])
+#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4])
+#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5])
+#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6])
+#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7])
+#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8])
+#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9])
+#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10])
+#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11])
+#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12])
+#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13])
+#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14])
+#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15])
+#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16])
+#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17])
+#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18])
+#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19])
+#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20])
+#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21])
+#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22])
+#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23])
+#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24])
+#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25])
+#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26])
+#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27])
+#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28])
+#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29])
+#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30])
+#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31])
+#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32])
+#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33])
+#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34])
+#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35])
+#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36])
+#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37])
+#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38])
+#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39])
+#define PyArray_SetNumericOps \
+        (*(int (*)(PyObject *)) \
+    PyArray_API[40])
+#define PyArray_GetNumericOps \
+        (*(PyObject * (*)(void)) \
+    PyArray_API[41])
+#define PyArray_INCREF \
+        (*(int (*)(PyArrayObject *)) \
+    PyArray_API[42])
+#define PyArray_XDECREF \
+        (*(int (*)(PyArrayObject *)) \
+    PyArray_API[43])
+#define PyArray_SetStringFunction \
+        (*(void (*)(PyObject *, int)) \
+    PyArray_API[44])
+#define PyArray_DescrFromType \
+        (*(PyArray_Descr * (*)(int)) \
+    PyArray_API[45])
+#define PyArray_TypeObjectFromType \
+        (*(PyObject * (*)(int)) \
+    PyArray_API[46])
+#define PyArray_Zero \
+        (*(char * (*)(PyArrayObject *)) \
+    PyArray_API[47])
+#define PyArray_One \
+        (*(char * (*)(PyArrayObject *)) \
+    PyArray_API[48])
+#define PyArray_CastToType \
+        (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+    PyArray_API[49])
+#define PyArray_CastTo \
+        (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[50])
+#define PyArray_CastAnyTo \
+        (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[51])
+#define PyArray_CanCastSafely \
+        (*(int (*)(int, int)) \
+    PyArray_API[52])
+#define PyArray_CanCastTo \
+        (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \
+    PyArray_API[53])
+#define PyArray_ObjectType \
+        (*(int (*)(PyObject *, int)) \
+    PyArray_API[54])
+#define PyArray_DescrFromObject \
+        (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \
+    PyArray_API[55])
+#define PyArray_ConvertToCommonType \
+        (*(PyArrayObject ** (*)(PyObject *, int *)) \
+    PyArray_API[56])
+#define PyArray_DescrFromScalar \
+        (*(PyArray_Descr * (*)(PyObject *)) \
+    PyArray_API[57])
+#define PyArray_DescrFromTypeObject \
+        (*(PyArray_Descr * (*)(PyObject *)) \
+    PyArray_API[58])
+#define PyArray_Size \
+        (*(npy_intp (*)(PyObject *)) \
+    PyArray_API[59])
+#define PyArray_Scalar \
+        (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \
+    PyArray_API[60])
+#define PyArray_FromScalar \
+        (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \
+    PyArray_API[61])
+#define PyArray_ScalarAsCtype \
+        (*(void (*)(PyObject *, void *)) \
+    PyArray_API[62])
+#define PyArray_CastScalarToCtype \
+        (*(int (*)(PyObject *, void *, PyArray_Descr *)) \
+    PyArray_API[63])
+#define PyArray_CastScalarDirect \
+        (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \
+    PyArray_API[64])
+#define PyArray_ScalarFromObject \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[65])
+#define PyArray_GetCastFunc \
+        (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \
+    PyArray_API[66])
+#define PyArray_FromDims \
+        (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))) \
+    PyArray_API[67])
+#define PyArray_FromDimsAndDataAndDescr \
+        (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data))) \
+    PyArray_API[68])
+#define PyArray_FromAny \
+        (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+    PyArray_API[69])
+#define PyArray_EnsureArray \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[70])
+#define PyArray_EnsureAnyArray \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[71])
+#define PyArray_FromFile \
+        (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \
+    PyArray_API[72])
+#define PyArray_FromString \
+        (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \
+    PyArray_API[73])
+#define PyArray_FromBuffer \
+        (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \
+    PyArray_API[74])
+#define PyArray_FromIter \
+        (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \
+    PyArray_API[75])
+#define PyArray_Return \
+        (*(PyObject * (*)(PyArrayObject *)) \
+    PyArray_API[76])
+#define PyArray_GetField \
+        (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+    PyArray_API[77])
+#define PyArray_SetField \
+        (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \
+    PyArray_API[78])
+#define PyArray_Byteswap \
+        (*(PyObject * (*)(PyArrayObject *, npy_bool)) \
+    PyArray_API[79])
+#define PyArray_Resize \
+        (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \
+    PyArray_API[80])
+#define PyArray_MoveInto \
+        (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[81])
+#define PyArray_CopyInto \
+        (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[82])
+#define PyArray_CopyAnyInto \
+        (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[83])
+#define PyArray_CopyObject \
+        (*(int (*)(PyArrayObject *, PyObject *)) \
+    PyArray_API[84])
+#define PyArray_NewCopy \
+        (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+    PyArray_API[85])
+#define PyArray_ToList \
+        (*(PyObject * (*)(PyArrayObject *)) \
+    PyArray_API[86])
+#define PyArray_ToString \
+        (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+    PyArray_API[87])
+#define PyArray_ToFile \
+        (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \
+    PyArray_API[88])
+#define PyArray_Dump \
+        (*(int (*)(PyObject *, PyObject *, int)) \
+    PyArray_API[89])
+#define PyArray_Dumps \
+        (*(PyObject * (*)(PyObject *, int)) \
+    PyArray_API[90])
+#define PyArray_ValidType \
+        (*(int (*)(int)) \
+    PyArray_API[91])
+#define PyArray_UpdateFlags \
+        (*(void (*)(PyArrayObject *, int)) \
+    PyArray_API[92])
+#define PyArray_New \
+        (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \
+    PyArray_API[93])
+#define PyArray_NewFromDescr \
+        (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \
+    PyArray_API[94])
+#define PyArray_DescrNew \
+        (*(PyArray_Descr * (*)(PyArray_Descr *)) \
+    PyArray_API[95])
+#define PyArray_DescrNewFromType \
+        (*(PyArray_Descr * (*)(int)) \
+    PyArray_API[96])
+#define PyArray_GetPriority \
+        (*(double (*)(PyObject *, double)) \
+    PyArray_API[97])
+#define PyArray_IterNew \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[98])
+#define PyArray_MultiIterNew \
+        (*(PyObject* (*)(int, ...)) \
+    PyArray_API[99])
+#define PyArray_PyIntAsInt \
+        (*(int (*)(PyObject *)) \
+    PyArray_API[100])
+#define PyArray_PyIntAsIntp \
+        (*(npy_intp (*)(PyObject *)) \
+    PyArray_API[101])
+#define PyArray_Broadcast \
+        (*(int (*)(PyArrayMultiIterObject *)) \
+    PyArray_API[102])
+#define PyArray_FillObjectArray \
+        (*(void (*)(PyArrayObject *, PyObject *)) \
+    PyArray_API[103])
+#define PyArray_FillWithScalar \
+        (*(int (*)(PyArrayObject *, PyObject *)) \
+    PyArray_API[104])
+#define PyArray_CheckStrides \
+        (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \
+    PyArray_API[105])
+#define PyArray_DescrNewByteorder \
+        (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \
+    PyArray_API[106])
+#define PyArray_IterAllButAxis \
+        (*(PyObject * (*)(PyObject *, int *)) \
+    PyArray_API[107])
+#define PyArray_CheckFromAny \
+        (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+    PyArray_API[108])
+#define PyArray_FromArray \
+        (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+    PyArray_API[109])
+#define PyArray_FromInterface \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[110])
+#define PyArray_FromStructInterface \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[111])
+#define PyArray_FromArrayAttr \
+        (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \
+    PyArray_API[112])
+#define PyArray_ScalarKind \
+        (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \
+    PyArray_API[113])
+#define PyArray_CanCoerceScalar \
+        (*(int (*)(int, int, NPY_SCALARKIND)) \
+    PyArray_API[114])
+#define PyArray_NewFlagsObject \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[115])
+#define PyArray_CanCastScalar \
+        (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \
+    PyArray_API[116])
+#define PyArray_CompareUCS4 \
+        (*(int (*)(npy_ucs4 const *, npy_ucs4 const *, size_t)) \
+    PyArray_API[117])
+#define PyArray_RemoveSmallest \
+        (*(int (*)(PyArrayMultiIterObject *)) \
+    PyArray_API[118])
+#define PyArray_ElementStrides \
+        (*(int (*)(PyObject *)) \
+    PyArray_API[119])
+#define PyArray_Item_INCREF \
+        (*(void (*)(char *, PyArray_Descr *)) \
+    PyArray_API[120])
+#define PyArray_Item_XDECREF \
+        (*(void (*)(char *, PyArray_Descr *)) \
+    PyArray_API[121])
+#define PyArray_FieldNames \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[122])
+#define PyArray_Transpose \
+        (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \
+    PyArray_API[123])
+#define PyArray_TakeFrom \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \
+    PyArray_API[124])
+#define PyArray_PutTo \
+        (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \
+    PyArray_API[125])
+#define PyArray_PutMask \
+        (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \
+    PyArray_API[126])
+#define PyArray_Repeat \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \
+    PyArray_API[127])
+#define PyArray_Choose \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \
+    PyArray_API[128])
+#define PyArray_Sort \
+        (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+    PyArray_API[129])
+#define PyArray_ArgSort \
+        (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+    PyArray_API[130])
+#define PyArray_SearchSorted \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \
+    PyArray_API[131])
+#define PyArray_ArgMax \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[132])
+#define PyArray_ArgMin \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[133])
+#define PyArray_Reshape \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *)) \
+    PyArray_API[134])
+#define PyArray_Newshape \
+        (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \
+    PyArray_API[135])
+#define PyArray_Squeeze \
+        (*(PyObject * (*)(PyArrayObject *)) \
+    PyArray_API[136])
+#define PyArray_View \
+        (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \
+    PyArray_API[137])
+#define PyArray_SwapAxes \
+        (*(PyObject * (*)(PyArrayObject *, int, int)) \
+    PyArray_API[138])
+#define PyArray_Max \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[139])
+#define PyArray_Min \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[140])
+#define PyArray_Ptp \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[141])
+#define PyArray_Mean \
+        (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+    PyArray_API[142])
+#define PyArray_Trace \
+        (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \
+    PyArray_API[143])
+#define PyArray_Diagonal \
+        (*(PyObject * (*)(PyArrayObject *, int, int, int)) \
+    PyArray_API[144])
+#define PyArray_Clip \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \
+    PyArray_API[145])
+#define PyArray_Conjugate \
+        (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[146])
+#define PyArray_Nonzero \
+        (*(PyObject * (*)(PyArrayObject *)) \
+    PyArray_API[147])
+#define PyArray_Std \
+        (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \
+    PyArray_API[148])
+#define PyArray_Sum \
+        (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+    PyArray_API[149])
+#define PyArray_CumSum \
+        (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+    PyArray_API[150])
+#define PyArray_Prod \
+        (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+    PyArray_API[151])
+#define PyArray_CumProd \
+        (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+    PyArray_API[152])
+#define PyArray_All \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[153])
+#define PyArray_Any \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[154])
+#define PyArray_Compress \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \
+    PyArray_API[155])
+#define PyArray_Flatten \
+        (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+    PyArray_API[156])
+#define PyArray_Ravel \
+        (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+    PyArray_API[157])
+#define PyArray_MultiplyList \
+        (*(npy_intp (*)(npy_intp const *, int)) \
+    PyArray_API[158])
+#define PyArray_MultiplyIntList \
+        (*(int (*)(int const *, int)) \
+    PyArray_API[159])
+#define PyArray_GetPtr \
+        (*(void * (*)(PyArrayObject *, npy_intp const*)) \
+    PyArray_API[160])
+#define PyArray_CompareLists \
+        (*(int (*)(npy_intp const *, npy_intp const *, int)) \
+    PyArray_API[161])
+#define PyArray_AsCArray \
+        (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \
+    PyArray_API[162])
+#define PyArray_As1D \
+        (*(int (*)(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))) \
+    PyArray_API[163])
+#define PyArray_As2D \
+        (*(int (*)(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))) \
+    PyArray_API[164])
+#define PyArray_Free \
+        (*(int (*)(PyObject *, void *)) \
+    PyArray_API[165])
+#define PyArray_Converter \
+        (*(int (*)(PyObject *, PyObject **)) \
+    PyArray_API[166])
+#define PyArray_IntpFromSequence \
+        (*(int (*)(PyObject *, npy_intp *, int)) \
+    PyArray_API[167])
+#define PyArray_Concatenate \
+        (*(PyObject * (*)(PyObject *, int)) \
+    PyArray_API[168])
+#define PyArray_InnerProduct \
+        (*(PyObject * (*)(PyObject *, PyObject *)) \
+    PyArray_API[169])
+#define PyArray_MatrixProduct \
+        (*(PyObject * (*)(PyObject *, PyObject *)) \
+    PyArray_API[170])
+#define PyArray_CopyAndTranspose \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[171])
+#define PyArray_Correlate \
+        (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+    PyArray_API[172])
+#define PyArray_TypestrConvert \
+        (*(int (*)(int, int)) \
+    PyArray_API[173])
+#define PyArray_DescrConverter \
+        (*(int (*)(PyObject *, PyArray_Descr **)) \
+    PyArray_API[174])
+#define PyArray_DescrConverter2 \
+        (*(int (*)(PyObject *, PyArray_Descr **)) \
+    PyArray_API[175])
+#define PyArray_IntpConverter \
+        (*(int (*)(PyObject *, PyArray_Dims *)) \
+    PyArray_API[176])
+#define PyArray_BufferConverter \
+        (*(int (*)(PyObject *, PyArray_Chunk *)) \
+    PyArray_API[177])
+#define PyArray_AxisConverter \
+        (*(int (*)(PyObject *, int *)) \
+    PyArray_API[178])
+#define PyArray_BoolConverter \
+        (*(int (*)(PyObject *, npy_bool *)) \
+    PyArray_API[179])
+#define PyArray_ByteorderConverter \
+        (*(int (*)(PyObject *, char *)) \
+    PyArray_API[180])
+#define PyArray_OrderConverter \
+        (*(int (*)(PyObject *, NPY_ORDER *)) \
+    PyArray_API[181])
+#define PyArray_EquivTypes \
+        (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \
+    PyArray_API[182])
+#define PyArray_Zeros \
+        (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \
+    PyArray_API[183])
+#define PyArray_Empty \
+        (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \
+    PyArray_API[184])
+#define PyArray_Where \
+        (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \
+    PyArray_API[185])
+#define PyArray_Arange \
+        (*(PyObject * (*)(double, double, double, int)) \
+    PyArray_API[186])
+#define PyArray_ArangeObj \
+        (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \
+    PyArray_API[187])
+#define PyArray_SortkindConverter \
+        (*(int (*)(PyObject *, NPY_SORTKIND *)) \
+    PyArray_API[188])
+#define PyArray_LexSort \
+        (*(PyObject * (*)(PyObject *, int)) \
+    PyArray_API[189])
+#define PyArray_Round \
+        (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+    PyArray_API[190])
+#define PyArray_EquivTypenums \
+        (*(unsigned char (*)(int, int)) \
+    PyArray_API[191])
+#define PyArray_RegisterDataType \
+        (*(int (*)(PyArray_Descr *)) \
+    PyArray_API[192])
+#define PyArray_RegisterCastFunc \
+        (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \
+    PyArray_API[193])
+#define PyArray_RegisterCanCast \
+        (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \
+    PyArray_API[194])
+#define PyArray_InitArrFuncs \
+        (*(void (*)(PyArray_ArrFuncs *)) \
+    PyArray_API[195])
+#define PyArray_IntTupleFromIntp \
+        (*(PyObject * (*)(int, npy_intp const *)) \
+    PyArray_API[196])
+#define PyArray_TypeNumFromName \
+        (*(int (*)(char const *)) \
+    PyArray_API[197])
+#define PyArray_ClipmodeConverter \
+        (*(int (*)(PyObject *, NPY_CLIPMODE *)) \
+    PyArray_API[198])
+#define PyArray_OutputConverter \
+        (*(int (*)(PyObject *, PyArrayObject **)) \
+    PyArray_API[199])
+#define PyArray_BroadcastToShape \
+        (*(PyObject * (*)(PyObject *, npy_intp *, int)) \
+    PyArray_API[200])
+#define _PyArray_SigintHandler \
+        (*(void (*)(int)) \
+    PyArray_API[201])
+#define _PyArray_GetSigintBuf \
+        (*(void* (*)(void)) \
+    PyArray_API[202])
+#define PyArray_DescrAlignConverter \
+        (*(int (*)(PyObject *, PyArray_Descr **)) \
+    PyArray_API[203])
+#define PyArray_DescrAlignConverter2 \
+        (*(int (*)(PyObject *, PyArray_Descr **)) \
+    PyArray_API[204])
+#define PyArray_SearchsideConverter \
+        (*(int (*)(PyObject *, void *)) \
+    PyArray_API[205])
+#define PyArray_CheckAxis \
+        (*(PyObject * (*)(PyArrayObject *, int *, int)) \
+    PyArray_API[206])
+#define PyArray_OverflowMultiplyList \
+        (*(npy_intp (*)(npy_intp const *, int)) \
+    PyArray_API[207])
+#define PyArray_CompareString \
+        (*(int (*)(const char *, const char *, size_t)) \
+    PyArray_API[208])
+#define PyArray_MultiIterFromObjects \
+        (*(PyObject* (*)(PyObject **, int, int, ...)) \
+    PyArray_API[209])
+#define PyArray_GetEndianness \
+        (*(int (*)(void)) \
+    PyArray_API[210])
+#define PyArray_GetNDArrayCFeatureVersion \
+        (*(unsigned int (*)(void)) \
+    PyArray_API[211])
+#define PyArray_Correlate2 \
+        (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+    PyArray_API[212])
+#define PyArray_NeighborhoodIterNew \
+        (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \
+    PyArray_API[213])
+#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214])
+#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215])
+#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216])
+#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217])
+#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218])
+#define PyArray_SetDatetimeParseFunction \
+        (*(void (*)(PyObject *NPY_UNUSED(op))) \
+    PyArray_API[219])
+#define PyArray_DatetimeToDatetimeStruct \
+        (*(void (*)(npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *)) \
+    PyArray_API[220])
+#define PyArray_TimedeltaToTimedeltaStruct \
+        (*(void (*)(npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *)) \
+    PyArray_API[221])
+#define PyArray_DatetimeStructToDatetime \
+        (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))) \
+    PyArray_API[222])
+#define PyArray_TimedeltaStructToTimedelta \
+        (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))) \
+    PyArray_API[223])
+#define NpyIter_New \
+        (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \
+    PyArray_API[224])
+#define NpyIter_MultiNew \
+        (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \
+    PyArray_API[225])
+#define NpyIter_AdvancedNew \
+        (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \
+    PyArray_API[226])
+#define NpyIter_Copy \
+        (*(NpyIter * (*)(NpyIter *)) \
+    PyArray_API[227])
+#define NpyIter_Deallocate \
+        (*(int (*)(NpyIter *)) \
+    PyArray_API[228])
+#define NpyIter_HasDelayedBufAlloc \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[229])
+#define NpyIter_HasExternalLoop \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[230])
+#define NpyIter_EnableExternalLoop \
+        (*(int (*)(NpyIter *)) \
+    PyArray_API[231])
+#define NpyIter_GetInnerStrideArray \
+        (*(npy_intp * (*)(NpyIter *)) \
+    PyArray_API[232])
+#define NpyIter_GetInnerLoopSizePtr \
+        (*(npy_intp * (*)(NpyIter *)) \
+    PyArray_API[233])
+#define NpyIter_Reset \
+        (*(int (*)(NpyIter *, char **)) \
+    PyArray_API[234])
+#define NpyIter_ResetBasePointers \
+        (*(int (*)(NpyIter *, char **, char **)) \
+    PyArray_API[235])
+#define NpyIter_ResetToIterIndexRange \
+        (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \
+    PyArray_API[236])
+#define NpyIter_GetNDim \
+        (*(int (*)(NpyIter *)) \
+    PyArray_API[237])
+#define NpyIter_GetNOp \
+        (*(int (*)(NpyIter *)) \
+    PyArray_API[238])
+#define NpyIter_GetIterNext \
+        (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \
+    PyArray_API[239])
+#define NpyIter_GetIterSize \
+        (*(npy_intp (*)(NpyIter *)) \
+    PyArray_API[240])
+#define NpyIter_GetIterIndexRange \
+        (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \
+    PyArray_API[241])
+#define NpyIter_GetIterIndex \
+        (*(npy_intp (*)(NpyIter *)) \
+    PyArray_API[242])
+#define NpyIter_GotoIterIndex \
+        (*(int (*)(NpyIter *, npy_intp)) \
+    PyArray_API[243])
+#define NpyIter_HasMultiIndex \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[244])
+#define NpyIter_GetShape \
+        (*(int (*)(NpyIter *, npy_intp *)) \
+    PyArray_API[245])
+#define NpyIter_GetGetMultiIndex \
+        (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \
+    PyArray_API[246])
+#define NpyIter_GotoMultiIndex \
+        (*(int (*)(NpyIter *, npy_intp const *)) \
+    PyArray_API[247])
+#define NpyIter_RemoveMultiIndex \
+        (*(int (*)(NpyIter *)) \
+    PyArray_API[248])
+#define NpyIter_HasIndex \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[249])
+#define NpyIter_IsBuffered \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[250])
+#define NpyIter_IsGrowInner \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[251])
+#define NpyIter_GetBufferSize \
+        (*(npy_intp (*)(NpyIter *)) \
+    PyArray_API[252])
+#define NpyIter_GetIndexPtr \
+        (*(npy_intp * (*)(NpyIter *)) \
+    PyArray_API[253])
+#define NpyIter_GotoIndex \
+        (*(int (*)(NpyIter *, npy_intp)) \
+    PyArray_API[254])
+#define NpyIter_GetDataPtrArray \
+        (*(char ** (*)(NpyIter *)) \
+    PyArray_API[255])
+#define NpyIter_GetDescrArray \
+        (*(PyArray_Descr ** (*)(NpyIter *)) \
+    PyArray_API[256])
+#define NpyIter_GetOperandArray \
+        (*(PyArrayObject ** (*)(NpyIter *)) \
+    PyArray_API[257])
+#define NpyIter_GetIterView \
+        (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \
+    PyArray_API[258])
+#define NpyIter_GetReadFlags \
+        (*(void (*)(NpyIter *, char *)) \
+    PyArray_API[259])
+#define NpyIter_GetWriteFlags \
+        (*(void (*)(NpyIter *, char *)) \
+    PyArray_API[260])
+#define NpyIter_DebugPrint \
+        (*(void (*)(NpyIter *)) \
+    PyArray_API[261])
+#define NpyIter_IterationNeedsAPI \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[262])
+#define NpyIter_GetInnerFixedStrideArray \
+        (*(void (*)(NpyIter *, npy_intp *)) \
+    PyArray_API[263])
+#define NpyIter_RemoveAxis \
+        (*(int (*)(NpyIter *, int)) \
+    PyArray_API[264])
+#define NpyIter_GetAxisStrideArray \
+        (*(npy_intp * (*)(NpyIter *, int)) \
+    PyArray_API[265])
+#define NpyIter_RequiresBuffering \
+        (*(npy_bool (*)(NpyIter *)) \
+    PyArray_API[266])
+#define NpyIter_GetInitialDataPtrArray \
+        (*(char ** (*)(NpyIter *)) \
+    PyArray_API[267])
+#define NpyIter_CreateCompatibleStrides \
+        (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \
+    PyArray_API[268])
+#define PyArray_CastingConverter \
+        (*(int (*)(PyObject *, NPY_CASTING *)) \
+    PyArray_API[269])
+#define PyArray_CountNonzero \
+        (*(npy_intp (*)(PyArrayObject *)) \
+    PyArray_API[270])
+#define PyArray_PromoteTypes \
+        (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \
+    PyArray_API[271])
+#define PyArray_MinScalarType \
+        (*(PyArray_Descr * (*)(PyArrayObject *)) \
+    PyArray_API[272])
+#define PyArray_ResultType \
+        (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \
+    PyArray_API[273])
+#define PyArray_CanCastArrayTo \
+        (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \
+    PyArray_API[274])
+#define PyArray_CanCastTypeTo \
+        (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \
+    PyArray_API[275])
+#define PyArray_EinsteinSum \
+        (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \
+    PyArray_API[276])
+#define PyArray_NewLikeArray \
+        (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \
+    PyArray_API[277])
+#define PyArray_GetArrayParamsFromObject \
+        (*(int (*)(PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context))) \
+    PyArray_API[278])
+#define PyArray_ConvertClipmodeSequence \
+        (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \
+    PyArray_API[279])
+#define PyArray_MatrixProduct2 \
+        (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \
+    PyArray_API[280])
+#define NpyIter_IsFirstVisit \
+        (*(npy_bool (*)(NpyIter *, int)) \
+    PyArray_API[281])
+#define PyArray_SetBaseObject \
+        (*(int (*)(PyArrayObject *, PyObject *)) \
+    PyArray_API[282])
+#define PyArray_CreateSortedStridePerm \
+        (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \
+    PyArray_API[283])
+#define PyArray_RemoveAxesInPlace \
+        (*(void (*)(PyArrayObject *, const npy_bool *)) \
+    PyArray_API[284])
+#define PyArray_DebugPrint \
+        (*(void (*)(PyArrayObject *)) \
+    PyArray_API[285])
+#define PyArray_FailUnlessWriteable \
+        (*(int (*)(PyArrayObject *, const char *)) \
+    PyArray_API[286])
+#define PyArray_SetUpdateIfCopyBase \
+        (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[287])
+#define PyDataMem_NEW \
+        (*(void * (*)(size_t)) \
+    PyArray_API[288])
+#define PyDataMem_FREE \
+        (*(void (*)(void *)) \
+    PyArray_API[289])
+#define PyDataMem_RENEW \
+        (*(void * (*)(void *, size_t)) \
+    PyArray_API[290])
+#define PyDataMem_SetEventHook \
+        (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \
+    PyArray_API[291])
+#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292])
+#define PyArray_MapIterSwapAxes \
+        (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \
+    PyArray_API[293])
+#define PyArray_MapIterArray \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *)) \
+    PyArray_API[294])
+#define PyArray_MapIterNext \
+        (*(void (*)(PyArrayMapIterObject *)) \
+    PyArray_API[295])
+#define PyArray_Partition \
+        (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+    PyArray_API[296])
+#define PyArray_ArgPartition \
+        (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+    PyArray_API[297])
+#define PyArray_SelectkindConverter \
+        (*(int (*)(PyObject *, NPY_SELECTKIND *)) \
+    PyArray_API[298])
+#define PyDataMem_NEW_ZEROED \
+        (*(void * (*)(size_t, size_t)) \
+    PyArray_API[299])
+#define PyArray_CheckAnyScalarExact \
+        (*(int (*)(PyObject *)) \
+    PyArray_API[300])
+#define PyArray_MapIterArrayCopyIfOverlap \
+        (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \
+    PyArray_API[301])
+#define PyArray_ResolveWritebackIfCopy \
+        (*(int (*)(PyArrayObject *)) \
+    PyArray_API[302])
+#define PyArray_SetWritebackIfCopyBase \
+        (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+    PyArray_API[303])
+
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+#define PyDataMem_SetHandler \
+        (*(PyObject * (*)(PyObject *)) \
+    PyArray_API[304])
+#endif
+
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+#define PyDataMem_GetHandler \
+        (*(PyObject * (*)(void)) \
+    PyArray_API[305])
+#endif
+#define PyDataMem_DefaultHandler (*(PyObject* *)PyArray_API[306])
+
+#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
+static int
+_import_array(void)
+{
+  int st;
+  PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+  PyObject *c_api = NULL;
+
+  if (numpy == NULL) {
+      return -1;
+  }
+  c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
+  Py_DECREF(numpy);
+  if (c_api == NULL) {
+      return -1;
+  }
+
+  if (!PyCapsule_CheckExact(c_api)) {
+      PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
+      Py_DECREF(c_api);
+      return -1;
+  }
+  PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+  Py_DECREF(c_api);
+  if (PyArray_API == NULL) {
+      PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
+      return -1;
+  }
+
+  /* Perform runtime check of C API version */
+  if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
+      PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+             "ABI version 0x%x but this version of numpy is 0x%x", \
+             (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
+      return -1;
+  }
+  if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
+      PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+             "API version 0x%x but this version of numpy is 0x%x . "\
+             "Check the section C-API incompatibility at the "\
+             "Troubleshooting ImportError section at "\
+             "https://numpy.org/devdocs/user/troubleshooting-importerror.html"\
+             "#c-api-incompatibility "\
+              "for indications on how to solve this problem .", \
+             (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
+      return -1;
+  }
+
+  /*
+   * Perform runtime check of endianness and check it matches the one set by
+   * the headers (npy_endian.h) as a safeguard
+   */
+  st = PyArray_GetEndianness();
+  if (st == NPY_CPU_UNKNOWN_ENDIAN) {
+      PyErr_SetString(PyExc_RuntimeError,
+                      "FATAL: module compiled as unknown endian");
+      return -1;
+  }
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+  if (st != NPY_CPU_BIG) {
+      PyErr_SetString(PyExc_RuntimeError,
+                      "FATAL: module compiled as big endian, but "
+                      "detected different endianness at runtime");
+      return -1;
+  }
+#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
+  if (st != NPY_CPU_LITTLE) {
+      PyErr_SetString(PyExc_RuntimeError,
+                      "FATAL: module compiled as little endian, but "
+                      "detected different endianness at runtime");
+      return -1;
+  }
+#endif
+
+  return 0;
+}
+
+#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } }
+
+#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
+
+#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
+
+#endif
+
+#endif
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.c b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.c
new file mode 100644
index 00000000..d1b4a87b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.c
@@ -0,0 +1,50 @@
+
+/* These pointers will be stored in the C-object for use in other
+    extension modules
+*/
+
+void *PyUFunc_API[] = {
+        (void *) &PyUFunc_Type,
+        (void *) PyUFunc_FromFuncAndData,
+        (void *) PyUFunc_RegisterLoopForType,
+        (void *) PyUFunc_GenericFunction,
+        (void *) PyUFunc_f_f_As_d_d,
+        (void *) PyUFunc_d_d,
+        (void *) PyUFunc_f_f,
+        (void *) PyUFunc_g_g,
+        (void *) PyUFunc_F_F_As_D_D,
+        (void *) PyUFunc_F_F,
+        (void *) PyUFunc_D_D,
+        (void *) PyUFunc_G_G,
+        (void *) PyUFunc_O_O,
+        (void *) PyUFunc_ff_f_As_dd_d,
+        (void *) PyUFunc_ff_f,
+        (void *) PyUFunc_dd_d,
+        (void *) PyUFunc_gg_g,
+        (void *) PyUFunc_FF_F_As_DD_D,
+        (void *) PyUFunc_DD_D,
+        (void *) PyUFunc_FF_F,
+        (void *) PyUFunc_GG_G,
+        (void *) PyUFunc_OO_O,
+        (void *) PyUFunc_O_O_method,
+        (void *) PyUFunc_OO_O_method,
+        (void *) PyUFunc_On_Om,
+        (void *) PyUFunc_GetPyValues,
+        (void *) PyUFunc_checkfperr,
+        (void *) PyUFunc_clearfperr,
+        (void *) PyUFunc_getfperr,
+        (void *) PyUFunc_handlefperr,
+        (void *) PyUFunc_ReplaceLoopBySignature,
+        (void *) PyUFunc_FromFuncAndDataAndSignature,
+        (void *) PyUFunc_SetUsesArraysAsData,
+        (void *) PyUFunc_e_e,
+        (void *) PyUFunc_e_e_As_f_f,
+        (void *) PyUFunc_e_e_As_d_d,
+        (void *) PyUFunc_ee_e,
+        (void *) PyUFunc_ee_e_As_ff_f,
+        (void *) PyUFunc_ee_e_As_dd_d,
+        (void *) PyUFunc_DefaultTypeResolver,
+        (void *) PyUFunc_ValidateCasting,
+        (void *) PyUFunc_RegisterLoopForDescr,
+        (void *) PyUFunc_FromFuncAndDataAndSignatureAndIdentity
+};
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.h
new file mode 100644
index 00000000..e2efe29e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.h
@@ -0,0 +1,314 @@
+
+#ifdef _UMATHMODULE
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+NPY_NO_EXPORT  PyObject * PyUFunc_FromFuncAndData \
+       (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int);
+NPY_NO_EXPORT  int PyUFunc_RegisterLoopForType \
+       (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *);
+NPY_NO_EXPORT  int PyUFunc_GenericFunction \
+       (PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op));
+NPY_NO_EXPORT  void PyUFunc_f_f_As_d_d \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_d_d \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_f_f \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_g_g \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_F_F_As_D_D \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_F_F \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_D_D \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_G_G \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_O_O \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_ff_f_As_dd_d \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_ff_f \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_dd_d \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_gg_g \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_FF_F_As_DD_D \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_DD_D \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_FF_F \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_GG_G \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_OO_O \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_O_O_method \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_OO_O_method \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_On_Om \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  int PyUFunc_GetPyValues \
+       (char *, int *, int *, PyObject **);
+NPY_NO_EXPORT  int PyUFunc_checkfperr \
+       (int, PyObject *, int *);
+NPY_NO_EXPORT  void PyUFunc_clearfperr \
+       (void);
+NPY_NO_EXPORT  int PyUFunc_getfperr \
+       (void);
+NPY_NO_EXPORT  int PyUFunc_handlefperr \
+       (int, PyObject *, int, int *);
+NPY_NO_EXPORT  int PyUFunc_ReplaceLoopBySignature \
+       (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *);
+NPY_NO_EXPORT  PyObject * PyUFunc_FromFuncAndDataAndSignature \
+       (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *);
+NPY_NO_EXPORT  int PyUFunc_SetUsesArraysAsData \
+       (void **NPY_UNUSED(data), size_t NPY_UNUSED(i));
+NPY_NO_EXPORT  void PyUFunc_e_e \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_e_e_As_f_f \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_e_e_As_d_d \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_ee_e \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_ee_e_As_ff_f \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  void PyUFunc_ee_e_As_dd_d \
+       (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT  int PyUFunc_DefaultTypeResolver \
+       (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT  int PyUFunc_ValidateCasting \
+       (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **);
+NPY_NO_EXPORT  int PyUFunc_RegisterLoopForDescr \
+       (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *);
+NPY_NO_EXPORT  PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+       (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *);
+
+#else
+
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
+extern void **PyUFunc_API;
+#else
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+void **PyUFunc_API;
+#else
+static void **PyUFunc_API=NULL;
+#endif
+#endif
+
+#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
+#define PyUFunc_FromFuncAndData \
+        (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \
+    PyUFunc_API[1])
+#define PyUFunc_RegisterLoopForType \
+        (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \
+    PyUFunc_API[2])
+#define PyUFunc_GenericFunction \
+        (*(int (*)(PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op))) \
+    PyUFunc_API[3])
+#define PyUFunc_f_f_As_d_d \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[4])
+#define PyUFunc_d_d \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[5])
+#define PyUFunc_f_f \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[6])
+#define PyUFunc_g_g \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[7])
+#define PyUFunc_F_F_As_D_D \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[8])
+#define PyUFunc_F_F \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[9])
+#define PyUFunc_D_D \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[10])
+#define PyUFunc_G_G \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[11])
+#define PyUFunc_O_O \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[12])
+#define PyUFunc_ff_f_As_dd_d \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[13])
+#define PyUFunc_ff_f \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[14])
+#define PyUFunc_dd_d \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[15])
+#define PyUFunc_gg_g \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[16])
+#define PyUFunc_FF_F_As_DD_D \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[17])
+#define PyUFunc_DD_D \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[18])
+#define PyUFunc_FF_F \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[19])
+#define PyUFunc_GG_G \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[20])
+#define PyUFunc_OO_O \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[21])
+#define PyUFunc_O_O_method \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[22])
+#define PyUFunc_OO_O_method \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[23])
+#define PyUFunc_On_Om \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[24])
+#define PyUFunc_GetPyValues \
+        (*(int (*)(char *, int *, int *, PyObject **)) \
+    PyUFunc_API[25])
+#define PyUFunc_checkfperr \
+        (*(int (*)(int, PyObject *, int *)) \
+    PyUFunc_API[26])
+#define PyUFunc_clearfperr \
+        (*(void (*)(void)) \
+    PyUFunc_API[27])
+#define PyUFunc_getfperr \
+        (*(int (*)(void)) \
+    PyUFunc_API[28])
+#define PyUFunc_handlefperr \
+        (*(int (*)(int, PyObject *, int, int *)) \
+    PyUFunc_API[29])
+#define PyUFunc_ReplaceLoopBySignature \
+        (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \
+    PyUFunc_API[30])
+#define PyUFunc_FromFuncAndDataAndSignature \
+        (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \
+    PyUFunc_API[31])
+#define PyUFunc_SetUsesArraysAsData \
+        (*(int (*)(void **NPY_UNUSED(data), size_t NPY_UNUSED(i))) \
+    PyUFunc_API[32])
+#define PyUFunc_e_e \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[33])
+#define PyUFunc_e_e_As_f_f \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[34])
+#define PyUFunc_e_e_As_d_d \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[35])
+#define PyUFunc_ee_e \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[36])
+#define PyUFunc_ee_e_As_ff_f \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[37])
+#define PyUFunc_ee_e_As_dd_d \
+        (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+    PyUFunc_API[38])
+#define PyUFunc_DefaultTypeResolver \
+        (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \
+    PyUFunc_API[39])
+#define PyUFunc_ValidateCasting \
+        (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \
+    PyUFunc_API[40])
+#define PyUFunc_RegisterLoopForDescr \
+        (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \
+    PyUFunc_API[41])
+
+#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
+#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+        (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \
+    PyUFunc_API[42])
+#endif
+
+static inline int
+_import_umath(void)
+{
+  PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+  PyObject *c_api = NULL;
+
+  if (numpy == NULL) {
+      PyErr_SetString(PyExc_ImportError,
+                      "numpy.core._multiarray_umath failed to import");
+      return -1;
+  }
+  c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
+  Py_DECREF(numpy);
+  if (c_api == NULL) {
+      PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
+      return -1;
+  }
+
+  if (!PyCapsule_CheckExact(c_api)) {
+      PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
+      Py_DECREF(c_api);
+      return -1;
+  }
+  PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+  Py_DECREF(c_api);
+  if (PyUFunc_API == NULL) {
+      PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
+      return -1;
+  }
+  return 0;
+}
+
+#define import_umath() \
+    do {\
+        UFUNC_NOFPE\
+        if (_import_umath() < 0) {\
+            PyErr_Print();\
+            PyErr_SetString(PyExc_ImportError,\
+                    "numpy.core.umath failed to import");\
+            return NULL;\
+        }\
+    } while(0)
+
+#define import_umath1(ret) \
+    do {\
+        UFUNC_NOFPE\
+        if (_import_umath() < 0) {\
+            PyErr_Print();\
+            PyErr_SetString(PyExc_ImportError,\
+                    "numpy.core.umath failed to import");\
+            return ret;\
+        }\
+    } while(0)
+
+#define import_umath2(ret, msg) \
+    do {\
+        UFUNC_NOFPE\
+        if (_import_umath() < 0) {\
+            PyErr_Print();\
+            PyErr_SetString(PyExc_ImportError, msg);\
+            return ret;\
+        }\
+    } while(0)
+
+#define import_ufunc() \
+    do {\
+        UFUNC_NOFPE\
+        if (_import_umath() < 0) {\
+            PyErr_Print();\
+            PyErr_SetString(PyExc_ImportError,\
+                    "numpy.core.umath failed to import");\
+        }\
+    } while(0)
+
+#endif
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_dtype_api.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_dtype_api.h
new file mode 100644
index 00000000..39fbc500
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_dtype_api.h
@@ -0,0 +1,408 @@
+/*
+ * DType related API shared by the (experimental) public API And internal API.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_
+
+#define __EXPERIMENTAL_DTYPE_API_VERSION 11
+
+struct PyArrayMethodObject_tag;
+
+/*
+ * Largely opaque struct for DType classes (i.e. metaclass instances).
+ * The internal definition is currently in `ndarraytypes.h` (export is a bit
+ * more complex because `PyArray_Descr` is a DTypeMeta internally but not
+ * externally).
+ */
+#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)
+
+    typedef struct PyArray_DTypeMeta_tag {
+        PyHeapTypeObject super;
+
+        /*
+        * Most DTypes will have a singleton default instance, for the
+        * parametric legacy DTypes (bytes, string, void, datetime) this
+        * may be a pointer to the *prototype* instance?
+        */
+        PyArray_Descr *singleton;
+        /* Copy of the legacy DTypes type number, usually invalid. */
+        int type_num;
+
+        /* The type object of the scalar instances (may be NULL?) */
+        PyTypeObject *scalar_type;
+        /*
+        * DType flags to signal legacy, parametric, or
+        * abstract.  But plenty of space for additional information/flags.
+        */
+        npy_uint64 flags;
+
+        /*
+        * Use indirection in order to allow a fixed size for this struct.
+        * A stable ABI size makes creating a static DType less painful
+        * while also ensuring flexibility for all opaque API (with one
+        * indirection due the pointer lookup).
+        */
+        void *dt_slots;
+        /* Allow growing (at the moment also beyond this) */
+        void *reserved[3];
+    } PyArray_DTypeMeta;
+
+#endif  /* not internal build */
+
+/*
+ * ******************************************************
+ *         ArrayMethod API (Casting and UFuncs)
+ * ******************************************************
+ */
+/*
+ * NOTE: Expected changes:
+ *       * probably split runtime and general flags into two
+ *       * should possibly not use an enum for typedef for more stable ABI?
+ */
+typedef enum {
+    /* Flag for whether the GIL is required */
+    NPY_METH_REQUIRES_PYAPI = 1 << 0,
+    /*
+     * Some functions cannot set floating point error flags, this flag
+     * gives us the option (not requirement) to skip floating point error
+     * setup/check. No function should set error flags and ignore them
+     * since it would interfere with chaining operations (e.g. casting).
+     */
+    NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1,
+    /* Whether the method supports unaligned access (not runtime) */
+    NPY_METH_SUPPORTS_UNALIGNED = 1 << 2,
+    /*
+     * Used for reductions to allow reordering the operation.  At this point
+     * assume that if set, it also applies to normal operations though!
+     */
+    NPY_METH_IS_REORDERABLE = 1 << 3,
+    /*
+     * Private flag for now for *logic* functions.  The logical functions
+     * `logical_or` and `logical_and` can always cast the inputs to booleans
+     * "safely" (because that is how the cast to bool is defined).
+     * @seberg: I am not sure this is the best way to handle this, so its
+     * private for now (also it is very limited anyway).
+     * There is one "exception". NA aware dtypes cannot cast to bool
+     * (hopefully), so the `??->?` loop should error even with this flag.
+     * But a second NA fallback loop will be necessary.
+     */
+    _NPY_METH_FORCE_CAST_INPUTS = 1 << 17,
+
+    /* All flags which can change at runtime */
+    NPY_METH_RUNTIME_FLAGS = (
+            NPY_METH_REQUIRES_PYAPI |
+            NPY_METH_NO_FLOATINGPOINT_ERRORS),
+} NPY_ARRAYMETHOD_FLAGS;
+
+
+typedef struct PyArrayMethod_Context_tag {
+    /* The caller, which is typically the original ufunc.  May be NULL */
+    PyObject *caller;
+    /* The method "self".  Publically currentl an opaque object. */
+    struct PyArrayMethodObject_tag *method;
+
+    /* Operand descriptors, filled in by resolve_descriptors */
+    PyArray_Descr **descriptors;
+    /* Structure may grow (this is harmless for DType authors) */
+} PyArrayMethod_Context;
+
+
+/*
+ * The main object for creating a new ArrayMethod. We use the typical `slots`
+ * mechanism used by the Python limited API (see below for the slot defs).
+ */
+typedef struct {
+    const char *name;
+    int nin, nout;
+    NPY_CASTING casting;
+    NPY_ARRAYMETHOD_FLAGS flags;
+    PyArray_DTypeMeta **dtypes;
+    PyType_Slot *slots;
+} PyArrayMethod_Spec;
+
+
+/*
+ * ArrayMethod slots
+ * -----------------
+ *
+ * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed
+ * but can be deprecated and arbitrarily extended.
+ */
+#define NPY_METH_resolve_descriptors 1
+/* We may want to adapt the `get_loop` signature a bit: */
+#define _NPY_METH_get_loop 2
+#define NPY_METH_get_reduction_initial 3
+/* specific loops for constructions/default get_loop: */
+#define NPY_METH_strided_loop 4
+#define NPY_METH_contiguous_loop 5
+#define NPY_METH_unaligned_strided_loop 6
+#define NPY_METH_unaligned_contiguous_loop 7
+#define NPY_METH_contiguous_indexed_loop 8
+
+/*
+ * The resolve descriptors function, must be able to handle NULL values for
+ * all output (but not input) `given_descrs` and fill `loop_descrs`.
+ * Return -1 on error or 0 if the operation is not possible without an error
+ * set.  (This may still be in flux.)
+ * Otherwise must return the "casting safety", for normal functions, this is
+ * almost always "safe" (or even "equivalent"?).
+ *
+ * `resolve_descriptors` is optional if all output DTypes are non-parametric.
+ */
+typedef NPY_CASTING (resolve_descriptors_function)(
+        /* "method" is currently opaque (necessary e.g. to wrap Python) */
+        struct PyArrayMethodObject_tag *method,
+        /* DTypes the method was created for */
+        PyArray_DTypeMeta **dtypes,
+        /* Input descriptors (instances).  Outputs may be NULL. */
+        PyArray_Descr **given_descrs,
+        /* Exact loop descriptors to use, must not hold references on error */
+        PyArray_Descr **loop_descrs,
+        npy_intp *view_offset);
+
+
+typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,
+        char *const *data, const npy_intp *dimensions, const npy_intp *strides,
+        NpyAuxData *transferdata);
+
+
+typedef int (get_loop_function)(
+        PyArrayMethod_Context *context,
+        int aligned, int move_references,
+        const npy_intp *strides,
+        PyArrayMethod_StridedLoop **out_loop,
+        NpyAuxData **out_transferdata,
+        NPY_ARRAYMETHOD_FLAGS *flags);
+
+/**
+ * Query an ArrayMethod for the initial value for use in reduction.
+ *
+ * @param context The arraymethod context, mainly to access the descriptors.
+ * @param reduction_is_empty Whether the reduction is empty. When it is, the
+ *     value returned may differ.  In this case it is a "default" value that
+ *     may differ from the "identity" value normally used.  For example:
+ *     - `0.0` is the default for `sum([])`.  But `-0.0` is the correct
+ *       identity otherwise as it preserves the sign for `sum([-0.0])`.
+ *     - We use no identity for object, but return the default of `0` and `1`
+ *       for the empty `sum([], dtype=object)` and `prod([], dtype=object)`.
+ *       This allows `np.sum(np.array(["a", "b"], dtype=object))` to work.
+ *     - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN`
+ *       not a good *default* when there are no items.
+ * @param initial Pointer to initial data to be filled (if possible)
+ *
+ * @returns -1, 0, or 1 indicating error, no initial value, and initial being
+ *     successfully filled.  Errors must not be given where 0 is correct, NumPy
+ *     may call this even when not strictly necessary.
+ */
+typedef int (get_reduction_initial_function)(
+        PyArrayMethod_Context *context, npy_bool reduction_is_empty,
+        char *initial);
+
+/*
+ * The following functions are only used by the wrapping array method defined
+ * in umath/wrapping_array_method.c
+ */
+
+/*
+ * The function to convert the given descriptors (passed in to
+ * `resolve_descriptors`) and translates them for the wrapped loop.
+ * The new descriptors MUST be viewable with the old ones, `NULL` must be
+ * supported (for outputs) and should normally be forwarded.
+ *
+ * The function must clean up on error.
+ *
+ * NOTE: We currently assume that this translation gives "viewable" results.
+ *       I.e. there is no additional casting related to the wrapping process.
+ *       In principle that could be supported, but not sure it is useful.
+ *       This currently also means that e.g. alignment must apply identically
+ *       to the new dtypes.
+ *
+ * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast`
+ *       there is no way to "pass out" the result of this function.  This means
+ *       it will be called twice for every ufunc call.
+ *       (I am considering including `auxdata` as an "optional" parameter to
+ *       `resolve_descriptors`, so that it can be filled there if not NULL.)
+ */
+typedef int translate_given_descrs_func(int nin, int nout,
+        PyArray_DTypeMeta *wrapped_dtypes[],
+        PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]);
+
+/**
+ * The function to convert the actual loop descriptors (as returned by the
+ * original `resolve_descriptors` function) to the ones the output array
+ * should use.
+ * This function must return "viewable" types, it must not mutate them in any
+ * form that would break the inner-loop logic.  Does not need to support NULL.
+ *
+ * The function must clean up on error.
+ *
+ * @param nargs Number of arguments
+ * @param new_dtypes The DTypes of the output (usually probably not needed)
+ * @param given_descrs Original given_descrs to the resolver, necessary to
+ *        fetch any information related to the new dtypes from the original.
+ * @param original_descrs The `loop_descrs` returned by the wrapped loop.
+ * @param loop_descrs The output descriptors, compatible to `original_descrs`.
+ *
+ * @returns 0 on success, -1 on failure.
+ */
+typedef int translate_loop_descrs_func(int nin, int nout,
+        PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[],
+        PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]);
+
+
+/*
+ * A traverse loop working on a single array. This is similar to the general
+ * strided-loop function. This is designed for loops that need to visit every
+ * element of a single array.
+ *
+ * Currently this is used for array clearing, via the NPY_DT_get_clear_loop
+ * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook.
+ * These are most useful for handling arrays storing embedded references to
+ * python objects or heap-allocated data.
+ *
+ * The `void *traverse_context` is passed in because we may need to pass in
+ * Intepreter state or similar in the future, but we don't want to pass in
+ * a full context (with pointers to dtypes, method, caller which all make
+ * no sense for a traverse function).
+ *
+ * We assume for now that this context can be just passed through in the
+ * the future (for structured dtypes).
+ *
+ */
+typedef int (traverse_loop_function)(
+        void *traverse_context, PyArray_Descr *descr, char *data,
+        npy_intp size, npy_intp stride, NpyAuxData *auxdata);
+
+
+/*
+ * Simplified get_loop function specific to dtype traversal
+ *
+ * It should set the flags needed for the traversal loop and set out_loop to the
+ * loop function, which must be a valid traverse_loop_function
+ * pointer. Currently this is used for zero-filling and clearing arrays storing
+ * embedded references.
+ *
+ */
+typedef int (get_traverse_loop_function)(
+        void *traverse_context, PyArray_Descr *descr,
+        int aligned, npy_intp fixed_stride,
+        traverse_loop_function **out_loop, NpyAuxData **out_auxdata,
+        NPY_ARRAYMETHOD_FLAGS *flags);
+
+
+/*
+ * ****************************
+ *          DTYPE API
+ * ****************************
+ */
+
+#define NPY_DT_ABSTRACT 1 << 1
+#define NPY_DT_PARAMETRIC 1 << 2
+#define NPY_DT_NUMERIC 1 << 3
+
+/*
+ * These correspond to slots in the NPY_DType_Slots struct and must
+ * be in the same order as the members of that struct. If new slots
+ * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also
+ * be updated
+ */
+
+#define NPY_DT_discover_descr_from_pyobject 1
+// this slot is considered private because its API hasn't beed decided
+#define _NPY_DT_is_known_scalar_type 2
+#define NPY_DT_default_descr 3
+#define NPY_DT_common_dtype 4
+#define NPY_DT_common_instance 5
+#define NPY_DT_ensure_canonical 6
+#define NPY_DT_setitem 7
+#define NPY_DT_getitem 8
+#define NPY_DT_get_clear_loop 9
+#define NPY_DT_get_fill_zero_loop 10
+
+// These PyArray_ArrFunc slots will be deprecated and replaced eventually
+// getitem and setitem can be defined as a performance optimization;
+// by default the user dtypes call `legacy_getitem_using_DType` and
+// `legacy_setitem_using_DType`, respectively. This functionality is
+// only supported for basic NumPy DTypes.
+
+
+// used to separate dtype slots from arrfuncs slots
+// intended only for internal use but defined here for clarity
+#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10)
+
+// Cast is disabled
+// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET
+
+#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET
+
+#define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET
+
+// Casting related slots are disabled. See
+// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163
+// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET
+
+// These are deprecated in NumPy 1.19, so are disabled here.
+// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET
+// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET
+#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET
+
+// TODO: These slots probably still need some thought, and/or a way to "grow"?
+typedef struct {
+    PyTypeObject *typeobj;    /* type of python scalar or NULL */
+    int flags;                /* flags, including parametric and abstract */
+    /* NULL terminated cast definitions. Use NULL for the newly created DType */
+    PyArrayMethod_Spec **casts;
+    PyType_Slot *slots;
+    /* Baseclass or NULL (will always subclass `np.dtype`) */
+    PyTypeObject *baseclass;
+} PyArrayDTypeMeta_Spec;
+
+
+typedef PyArray_Descr *(discover_descr_from_pyobject_function)(
+        PyArray_DTypeMeta *cls, PyObject *obj);
+
+/*
+ * Before making this public, we should decide whether it should pass
+ * the type, or allow looking at the object. A possible use-case:
+ * `np.array(np.array([0]), dtype=np.ndarray)`
+ * Could consider arrays that are not `dtype=ndarray` "scalars".
+ */
+typedef int (is_known_scalar_type_function)(
+        PyArray_DTypeMeta *cls, PyTypeObject *obj);
+
+typedef PyArray_Descr *(default_descr_function)(PyArray_DTypeMeta *cls);
+typedef PyArray_DTypeMeta *(common_dtype_function)(
+        PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);
+typedef PyArray_Descr *(common_instance_function)(
+        PyArray_Descr *dtype1, PyArray_Descr *dtype2);
+typedef PyArray_Descr *(ensure_canonical_function)(PyArray_Descr *dtype);
+
+/*
+ * TODO: These two functions are currently only used for experimental DType
+ *       API support.  Their relation should be "reversed": NumPy should
+ *       always use them internally.
+ *       There are open points about "casting safety" though, e.g. setting
+ *       elements is currently always unsafe.
+ */
+typedef int(setitemfunction)(PyArray_Descr *, PyObject *, char *);
+typedef PyObject *(getitemfunction)(PyArray_Descr *, char *);
+
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h
new file mode 100644
index 00000000..b365cb50
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h
@@ -0,0 +1,90 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+#error You should not include this header directly
+#endif
+/*
+ * Private API (here for inline)
+ */
+static inline int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
+
+/*
+ * Update to next item of the iterator
+ *
+ * Note: this simply increment the coordinates vector, last dimension
+ * incremented first , i.e, for dimension 3
+ * ...
+ * -1, -1, -1
+ * -1, -1,  0
+ * -1, -1,  1
+ *  ....
+ * -1,  0, -1
+ * -1,  0,  0
+ *  ....
+ * 0,  -1, -1
+ * 0,  -1,  0
+ *  ....
+ */
+#define _UPDATE_COORD_ITER(c) \
+    wb = iter->coordinates[c] < iter->bounds[c][1]; \
+    if (wb) { \
+        iter->coordinates[c] += 1; \
+        return 0; \
+    } \
+    else { \
+        iter->coordinates[c] = iter->bounds[c][0]; \
+    }
+
+static inline int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
+{
+    npy_intp i, wb;
+
+    for (i = iter->nd - 1; i >= 0; --i) {
+        _UPDATE_COORD_ITER(i)
+    }
+
+    return 0;
+}
+
+/*
+ * Version optimized for 2d arrays, manual loop unrolling
+ */
+static inline int
+_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
+{
+    npy_intp wb;
+
+    _UPDATE_COORD_ITER(1)
+    _UPDATE_COORD_ITER(0)
+
+    return 0;
+}
+#undef _UPDATE_COORD_ITER
+
+/*
+ * Advance to the next neighbour
+ */
+static inline int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
+{
+    _PyArrayNeighborhoodIter_IncrCoord (iter);
+    iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+    return 0;
+}
+
+/*
+ * Reset functions
+ */
+static inline int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
+{
+    npy_intp i;
+
+    for (i = 0; i < iter->nd; ++i) {
+        iter->coordinates[i] = iter->bounds[i][0];
+    }
+    iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+    return 0;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_numpyconfig.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_numpyconfig.h
new file mode 100644
index 00000000..dfbb552c
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/_numpyconfig.h
@@ -0,0 +1,32 @@
+/* #undef NPY_HAVE_ENDIAN_H */
+
+#define NPY_SIZEOF_SHORT 2
+#define NPY_SIZEOF_INT 4
+#define NPY_SIZEOF_LONG 8
+#define NPY_SIZEOF_FLOAT 4
+#define NPY_SIZEOF_COMPLEX_FLOAT 8
+#define NPY_SIZEOF_DOUBLE 8
+#define NPY_SIZEOF_COMPLEX_DOUBLE 16
+#define NPY_SIZEOF_LONGDOUBLE 8
+#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
+#define NPY_SIZEOF_PY_INTPTR_T 8
+#define NPY_SIZEOF_OFF_T 8
+#define NPY_SIZEOF_PY_LONG_LONG 8
+#define NPY_SIZEOF_LONGLONG 8
+
+#define NPY_USE_C99_COMPLEX 1
+#define NPY_HAVE_COMPLEX_DOUBLE 1
+#define NPY_HAVE_COMPLEX_FLOAT 1
+#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
+#define NPY_USE_C99_FORMATS 1
+
+/* #undef NPY_NO_SIGNAL */
+#define NPY_NO_SMP 0
+
+#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
+#define NPY_ABI_VERSION 0x01000009
+#define NPY_API_VERSION 0x00000011
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS 1
+#endif
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h
new file mode 100644
index 00000000..da47bb09
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h
@@ -0,0 +1,12 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
+#define Py_ARRAYOBJECT_H
+
+#include "ndarrayobject.h"
+#include "npy_interrupt.h"
+
+#ifdef NPY_NO_PREFIX
+#include "noprefix.h"
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h
new file mode 100644
index 00000000..258bf95b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h
@@ -0,0 +1,186 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
+
+#ifndef _MULTIARRAYMODULE
+typedef struct {
+        PyObject_HEAD
+        npy_bool obval;
+} PyBoolScalarObject;
+#endif
+
+
+typedef struct {
+        PyObject_HEAD
+        signed char obval;
+} PyByteScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        short obval;
+} PyShortScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        int obval;
+} PyIntScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        long obval;
+} PyLongScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_longlong obval;
+} PyLongLongScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        unsigned char obval;
+} PyUByteScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        unsigned short obval;
+} PyUShortScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        unsigned int obval;
+} PyUIntScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        unsigned long obval;
+} PyULongScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_ulonglong obval;
+} PyULongLongScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_half obval;
+} PyHalfScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        float obval;
+} PyFloatScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        double obval;
+} PyDoubleScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_longdouble obval;
+} PyLongDoubleScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_cfloat obval;
+} PyCFloatScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_cdouble obval;
+} PyCDoubleScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        npy_clongdouble obval;
+} PyCLongDoubleScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        PyObject * obval;
+} PyObjectScalarObject;
+
+typedef struct {
+        PyObject_HEAD
+        npy_datetime obval;
+        PyArray_DatetimeMetaData obmeta;
+} PyDatetimeScalarObject;
+
+typedef struct {
+        PyObject_HEAD
+        npy_timedelta obval;
+        PyArray_DatetimeMetaData obmeta;
+} PyTimedeltaScalarObject;
+
+
+typedef struct {
+        PyObject_HEAD
+        char obval;
+} PyScalarObject;
+
+#define PyStringScalarObject PyBytesObject
+typedef struct {
+        /* note that the PyObject_HEAD macro lives right here */
+        PyUnicodeObject base;
+        Py_UCS4 *obval;
+    #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
+        char *buffer_fmt;
+    #endif
+} PyUnicodeScalarObject;
+
+
+typedef struct {
+        PyObject_VAR_HEAD
+        char *obval;
+        PyArray_Descr *descr;
+        int flags;
+        PyObject *base;
+    #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
+        void *_buffer_info;  /* private buffer info, tagged to allow warning */
+    #endif
+} PyVoidScalarObject;
+
+/* Macros
+     PyScalarObject
+     PyArrType_Type
+   are defined in ndarrayobject.h
+*/
+
+#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))
+#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
+#define PyArrayScalar_FromLong(i) \
+        ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
+#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i)                  \
+        return Py_INCREF(PyArrayScalar_FromLong(i)), \
+                PyArrayScalar_FromLong(i)
+#define PyArrayScalar_RETURN_FALSE              \
+        return Py_INCREF(PyArrayScalar_False),  \
+                PyArrayScalar_False
+#define PyArrayScalar_RETURN_TRUE               \
+        return Py_INCREF(PyArrayScalar_True),   \
+                PyArrayScalar_True
+
+#define PyArrayScalar_New(cls) \
+        Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)
+#define PyArrayScalar_VAL(obj, cls)             \
+        ((Py##cls##ScalarObject *)obj)->obval
+#define PyArrayScalar_ASSIGN(obj, cls, val) \
+        PyArrayScalar_VAL(obj, cls) = val
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/experimental_dtype_api.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/experimental_dtype_api.h
new file mode 100644
index 00000000..19088dab
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/experimental_dtype_api.h
@@ -0,0 +1,365 @@
+/*
+ * This header exports the new experimental DType API as proposed in
+ * NEPs 41 to 43.  For background, please check these NEPs.  Otherwise,
+ * this header also serves as documentation for the time being.
+ *
+ * The header includes `_dtype_api.h` which holds most definition while this
+ * header mainly wraps functions for public consumption.
+ *
+ * Please do not hesitate to contact @seberg with questions.  This is
+ * developed together with https://github.com/seberg/experimental_user_dtypes
+ * and those interested in experimenting are encouraged to contribute there.
+ *
+ * To use the functions defined in the header, call::
+ *
+ *     if (import_experimental_dtype_api(version) < 0) {
+ *         return NULL;
+ *     }
+ *
+ * in your module init.  (A version mismatch will be reported, just update
+ * to the correct one, this will alert you of possible changes.)
+ *
+ * The following lists the main symbols currently exported.  Please do not
+ * hesitate to ask for help or clarification:
+ *
+ * - PyUFunc_AddLoopFromSpec:
+ *
+ *     Register a new loop for a ufunc.  This uses the `PyArrayMethod_Spec`
+ *     which must be filled in (see in-line comments).
+ *
+ * - PyUFunc_AddWrappingLoop:
+ *
+ *     Register a new loop which reuses an existing one, but modifies the
+ *     result dtypes.  Please search the internal NumPy docs for more info
+ *     at this point.  (Used for physical units dtype.)
+ *
+ * - PyUFunc_AddPromoter:
+ *
+ *     Register a new promoter for a ufunc.  A promoter is a function stored
+ *     in a PyCapsule (see in-line comments).  It is passed the operation and
+ *     requested DType signatures and can mutate it to attempt a new search
+ *     for a matching loop/promoter.
+ *     I.e. for Numba a promoter could even add the desired loop.
+ *
+ * - PyArrayInitDTypeMeta_FromSpec:
+ *
+ *     Initialize a new DType.  It must currently be a static Python C type
+ *     that is declared as `PyArray_DTypeMeta` and not `PyTypeObject`.
+ *     Further, it must subclass `np.dtype` and set its type to
+ *     `PyArrayDTypeMeta_Type` (before calling `PyType_Read()`).
+ *
+ * - PyArray_CommonDType:
+ *
+ *     Find the common-dtype ("promotion") for two DType classes.  Similar
+ *     to `np.result_type`, but works on the classes and not instances.
+ *
+ * - PyArray_PromoteDTypeSequence:
+ *
+ *     Same as CommonDType, but works with an arbitrary number of DTypes.
+ *     This function is smarter and can often return successful and unambiguous
+ *     results when `common_dtype(common_dtype(dt1, dt2), dt3)` would
+ *     depend on the operation order or fail.  Nevertheless, DTypes should
+ *     aim to ensure that their common-dtype implementation is associative
+ *     and commutative!  (Mainly, unsigned and signed integers are not.)
+ *
+ *     For guaranteed consistent results DTypes must implement common-Dtype
+ *     "transitively".  If A promotes B and B promotes C, than A must generally
+ *     also promote C; where "promotes" means implements the promotion.
+ *     (There are some exceptions for abstract DTypes)
+ *
+ * - PyArray_GetDefaultDescr:
+ *
+ *     Given a DType class, returns the default instance (descriptor).
+ *     This is an inline function checking for `singleton` first and only
+ *     calls the `default_descr` function if necessary.
+ *
+ * - PyArray_DoubleDType, etc.:
+ *
+ *     Aliases to the DType classes for the builtin NumPy DTypes.
+ *
+ * WARNING
+ * =======
+ *
+ * By using this header, you understand that this is a fully experimental
+ * exposure.  Details are expected to change, and some options may have no
+ * effect.  (Please contact @seberg if you have questions!)
+ * If the exposure stops working, please file a bug report with NumPy.
+ * Further, a DType created using this API/header should still be expected
+ * to be incompatible with some functionality inside and outside of NumPy.
+ * In this case crashes must be expected.  Please report any such problems
+ * so that they can be fixed before final exposure.
+ * Furthermore, expect missing checks for programming errors which the final
+ * API is expected to have.
+ *
+ * Symbols with a leading underscore are likely to not be included in the
+ * first public version, if these are central to your use-case, please let
+ * us know, so that we can reconsider.
+ *
+ * "Array-like" consumer API not yet under considerations
+ * ======================================================
+ *
+ * The new DType API is designed in a way to make it potentially useful for
+ * alternative "array-like" implementations.  This will require careful
+ * exposure of details and functions and is not part of this experimental API.
+ *
+ * Brief (incompatibility) changelog
+ * =================================
+ *
+ * 2. None (only additions).
+ * 3. New `npy_intp *view_offset` argument for `resolve_descriptors`.
+ *    This replaces the `NPY_CAST_IS_VIEW` flag.  It can be set to 0 if the
+ *    operation is a view, and is pre-initialized to `NPY_MIN_INTP` indicating
+ *    that the operation is not a view.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_
+
+#include 
+#include "ndarraytypes.h"
+#include "_dtype_api.h"
+
+/*
+ * The contents of PyArrayMethodObject are currently opaque (is there a way
+ * good way to make them be `PyObject *`?)
+ */
+typedef struct PyArrayMethodObject_tag PyArrayMethodObject;
+
+/*
+ * There must be a better way?! -- Oh well, this is experimental
+ * (my issue with it, is that I cannot undef those helpers).
+ */
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+    #define NPY_EXP_DTYPE_API_CONCAT_HELPER2(x, y) x ## y
+    #define NPY_EXP_DTYPE_API_CONCAT_HELPER(arg) NPY_EXP_DTYPE_API_CONCAT_HELPER2(arg, __experimental_dtype_api_table)
+    #define __experimental_dtype_api_table NPY_EXP_DTYPE_API_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL)
+#else
+    #define __experimental_dtype_api_table __experimental_dtype_api_table
+#endif
+
+/* Support for correct multi-file projects: */
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+    extern void **__experimental_dtype_api_table;
+#else
+    /*
+     * Just a hack so I don't forget importing as much myself, I spend way too
+     * much time noticing it the first time around :).
+     */
+    static void
+    __not_imported(void)
+    {
+        printf("*****\nCritical error, dtype API not imported\n*****\n");
+    }
+
+    static void *__uninitialized_table[] = {
+            &__not_imported, &__not_imported, &__not_imported, &__not_imported,
+            &__not_imported, &__not_imported, &__not_imported, &__not_imported};
+
+    #if defined(PY_ARRAY_UNIQUE_SYMBOL)
+        void **__experimental_dtype_api_table = __uninitialized_table;
+    #else
+        static void **__experimental_dtype_api_table = __uninitialized_table;
+    #endif
+#endif
+
+
+typedef int _ufunc_addloop_fromspec_func(
+        PyObject *ufunc, PyArrayMethod_Spec *spec);
+/*
+ * The main ufunc registration function.  This adds a new implementation/loop
+ * to a ufunc.  It replaces `PyUFunc_RegisterLoopForType`.
+ */
+#define PyUFunc_AddLoopFromSpec \
+    (*(_ufunc_addloop_fromspec_func *)(__experimental_dtype_api_table[0]))
+
+
+/* Please see the NumPy definitions in `array_method.h` for details on these */
+typedef int translate_given_descrs_func(int nin, int nout,
+        PyArray_DTypeMeta *wrapped_dtypes[],
+        PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]);
+typedef int translate_loop_descrs_func(int nin, int nout,
+        PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[],
+        PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]);
+
+typedef int _ufunc_wrapping_loop_func(PyObject *ufunc_obj,
+        PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[],
+        translate_given_descrs_func *translate_given_descrs,
+        translate_loop_descrs_func *translate_loop_descrs);
+#define PyUFunc_AddWrappingLoop \
+    (*(_ufunc_wrapping_loop_func *)(__experimental_dtype_api_table[7]))
+
+/*
+ * Type of the C promoter function, which must be wrapped into a
+ * PyCapsule with name "numpy._ufunc_promoter".
+ *
+ * Note that currently the output dtypes are always NULL unless they are
+ * also part of the signature.  This is an implementation detail and could
+ * change in the future.  However, in general promoters should not have a
+ * need for output dtypes.
+ * (There are potential use-cases, these are currently unsupported.)
+ */
+typedef int promoter_function(PyObject *ufunc,
+        PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+        PyArray_DTypeMeta *new_op_dtypes[]);
+
+/*
+ * Function to register a promoter.
+ *
+ * @param ufunc The ufunc object to register the promoter with.
+ * @param DType_tuple A Python tuple containing DTypes or None matching the
+ *        number of inputs and outputs of the ufunc.
+ * @param promoter A PyCapsule with name "numpy._ufunc_promoter" containing
+ *        a pointer to a `promoter_function`.
+ */
+typedef int _ufunc_addpromoter_func(
+        PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter);
+#define PyUFunc_AddPromoter \
+    (*(_ufunc_addpromoter_func *)(__experimental_dtype_api_table[1]))
+
+#define PyArrayDTypeMeta_Type \
+    (*(PyTypeObject *)__experimental_dtype_api_table[2])
+typedef int __dtypemeta_fromspec(
+        PyArray_DTypeMeta *DType, PyArrayDTypeMeta_Spec *dtype_spec);
+/*
+ * Finalize creation of a DTypeMeta.  You must ensure that the DTypeMeta is
+ * a proper subclass.  The DTypeMeta object has additional fields compared to
+ * a normal PyTypeObject!
+ * The only (easy) creation of a new DType is to create a static Type which
+ * inherits `PyArray_DescrType`, sets its type to `PyArrayDTypeMeta_Type` and
+ * uses `PyArray_DTypeMeta` defined above as the C-structure.
+ */
+#define PyArrayInitDTypeMeta_FromSpec \
+    ((__dtypemeta_fromspec *)(__experimental_dtype_api_table[3]))
+
+
+/*
+ * *************************************
+ *          WORKING WITH DTYPES
+ * *************************************
+ */
+
+typedef PyArray_DTypeMeta *__common_dtype(
+        PyArray_DTypeMeta *DType1, PyArray_DTypeMeta *DType2);
+#define PyArray_CommonDType \
+    ((__common_dtype *)(__experimental_dtype_api_table[4]))
+
+
+typedef PyArray_DTypeMeta *__promote_dtype_sequence(
+        npy_intp num, PyArray_DTypeMeta *DTypes[]);
+#define PyArray_PromoteDTypeSequence \
+    ((__promote_dtype_sequence *)(__experimental_dtype_api_table[5]))
+
+
+typedef PyArray_Descr *__get_default_descr(
+        PyArray_DTypeMeta *DType);
+#define _PyArray_GetDefaultDescr \
+    ((__get_default_descr *)(__experimental_dtype_api_table[6]))
+
+static inline PyArray_Descr *
+PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType)
+{
+    if (DType->singleton != NULL) {
+        Py_INCREF(DType->singleton);
+        return DType->singleton;
+    }
+    return _PyArray_GetDefaultDescr(DType);
+}
+
+
+/*
+ * NumPy's builtin DTypes:
+ */
+#define PyArray_BoolDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[10])
+/* Integers */
+#define PyArray_ByteDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[11])
+#define PyArray_UByteDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[12])
+#define PyArray_ShortDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[13])
+#define PyArray_UShortDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[14])
+#define PyArray_IntDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[15])
+#define PyArray_UIntDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[16])
+#define PyArray_LongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[17])
+#define PyArray_ULongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[18])
+#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[19])
+#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[20])
+/* Integer aliases */
+#define PyArray_Int8Type (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[21])
+#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[22])
+#define PyArray_Int16DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[23])
+#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[24])
+#define PyArray_Int32DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[25])
+#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[26])
+#define PyArray_Int64DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[27])
+#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[28])
+#define PyArray_IntpDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[29])
+#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[30])
+/* Floats */
+#define PyArray_HalfType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[31])
+#define PyArray_FloatDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[32])
+#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[33])
+#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[34])
+/* Complex */
+#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[35])
+#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[36])
+#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[37])
+/* String/Bytes */
+#define PyArray_StringDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[38])
+#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[39])
+/* Datetime/Timedelta */
+#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[40])
+#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[41])
+/* Object/Void */
+#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[42])
+#define PyArray_VoidDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[43])
+
+/*
+ * ********************************
+ *         Initialization
+ * ********************************
+ *
+ * Import the experimental API, the version must match the one defined in
+ * the header to ensure changes are taken into account. NumPy will further
+ * runtime-check this.
+ * You must call this function to use the symbols defined in this file.
+ */
+#if !defined(NO_IMPORT) && !defined(NO_IMPORT_ARRAY)
+
+static int
+import_experimental_dtype_api(int version)
+{
+    if (version != __EXPERIMENTAL_DTYPE_API_VERSION) {
+        PyErr_Format(PyExc_RuntimeError,
+                "DType API version %d did not match header version %d. Please "
+                "update the import statement and check for API changes.",
+                version, __EXPERIMENTAL_DTYPE_API_VERSION);
+        return -1;
+    }
+    if (__experimental_dtype_api_table != __uninitialized_table) {
+        /* already imported. */
+        return 0;
+    }
+
+    PyObject *multiarray = PyImport_ImportModule("numpy.core._multiarray_umath");
+    if (multiarray == NULL) {
+        return -1;
+    }
+
+    PyObject *api = PyObject_CallMethod(multiarray,
+        "_get_experimental_dtype_api", "i", version);
+    Py_DECREF(multiarray);
+    if (api == NULL) {
+        return -1;
+    }
+    __experimental_dtype_api_table = (void **)PyCapsule_GetPointer(api,
+            "experimental_dtype_api_table");
+    Py_DECREF(api);
+
+    if (__experimental_dtype_api_table == NULL) {
+        __experimental_dtype_api_table = __uninitialized_table;
+        return -1;
+    }
+    return 0;
+}
+
+#endif  /* !defined(NO_IMPORT) && !defined(NO_IMPORT_ARRAY) */
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h
new file mode 100644
index 00000000..95040166
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h
@@ -0,0 +1,70 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
+
+#include 
+#include 
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Half-precision routines
+ */
+
+/* Conversions */
+float npy_half_to_float(npy_half h);
+double npy_half_to_double(npy_half h);
+npy_half npy_float_to_half(float f);
+npy_half npy_double_to_half(double d);
+/* Comparisons */
+int npy_half_eq(npy_half h1, npy_half h2);
+int npy_half_ne(npy_half h1, npy_half h2);
+int npy_half_le(npy_half h1, npy_half h2);
+int npy_half_lt(npy_half h1, npy_half h2);
+int npy_half_ge(npy_half h1, npy_half h2);
+int npy_half_gt(npy_half h1, npy_half h2);
+/* faster *_nonan variants for when you know h1 and h2 are not NaN */
+int npy_half_eq_nonan(npy_half h1, npy_half h2);
+int npy_half_lt_nonan(npy_half h1, npy_half h2);
+int npy_half_le_nonan(npy_half h1, npy_half h2);
+/* Miscellaneous functions */
+int npy_half_iszero(npy_half h);
+int npy_half_isnan(npy_half h);
+int npy_half_isinf(npy_half h);
+int npy_half_isfinite(npy_half h);
+int npy_half_signbit(npy_half h);
+npy_half npy_half_copysign(npy_half x, npy_half y);
+npy_half npy_half_spacing(npy_half h);
+npy_half npy_half_nextafter(npy_half x, npy_half y);
+npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus);
+
+/*
+ * Half-precision constants
+ */
+
+#define NPY_HALF_ZERO   (0x0000u)
+#define NPY_HALF_PZERO  (0x0000u)
+#define NPY_HALF_NZERO  (0x8000u)
+#define NPY_HALF_ONE    (0x3c00u)
+#define NPY_HALF_NEGONE (0xbc00u)
+#define NPY_HALF_PINF   (0x7c00u)
+#define NPY_HALF_NINF   (0xfc00u)
+#define NPY_HALF_NAN    (0x7e00u)
+
+#define NPY_MAX_HALF    (0x7bffu)
+
+/*
+ * Bit-level conversions
+ */
+
+npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);
+npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);
+npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);
+npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h
new file mode 100644
index 00000000..36cfdd6f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h
@@ -0,0 +1,251 @@
+/*
+ * DON'T INCLUDE THIS DIRECTLY.
+ */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include 
+#include "ndarraytypes.h"
+
+/* Includes the "function" C-API -- these are all stored in a
+   list of pointers --- one for each file
+   The two lists are concatenated into one in multiarray.
+
+   They are available as import_array()
+*/
+
+#include "__multiarray_api.h"
+
+
+/* C-API that requires previous API to be defined */
+
+#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
+
+#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
+#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
+
+#define PyArray_HasArrayInterfaceType(op, type, context, out)                 \
+        ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) ||    \
+         (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) ||          \
+         (((out)=PyArray_FromArrayAttr(op, type, context)) !=                 \
+          Py_NotImplemented))
+
+#define PyArray_HasArrayInterface(op, out)                                    \
+        PyArray_HasArrayInterfaceType(op, NULL, NULL, out)
+
+#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \
+                               (PyArray_NDIM((PyArrayObject *)op) == 0))
+
+#define PyArray_IsScalar(obj, cls)                                            \
+        (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))
+
+#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) ||               \
+                                PyArray_IsZeroDim(m))
+#define PyArray_IsPythonNumber(obj)                                           \
+        (PyFloat_Check(obj) || PyComplex_Check(obj) ||                        \
+         PyLong_Check(obj) || PyBool_Check(obj))
+#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj)                       \
+              || PyArray_IsScalar((obj), Integer))
+#define PyArray_IsPythonScalar(obj)                                           \
+        (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) ||                 \
+         PyUnicode_Check(obj))
+
+#define PyArray_IsAnyScalar(obj)                                              \
+        (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
+
+#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) ||           \
+                                     PyArray_CheckScalar(obj))
+
+
+#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ?                   \
+                                  Py_INCREF(m), (m) :                         \
+                                  (PyArrayObject *)(PyArray_Copy(m)))
+
+#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) &&   \
+                                  PyArray_CompareLists(PyArray_DIMS(a1),      \
+                                                       PyArray_DIMS(a2),      \
+                                                       PyArray_NDIM(a1)))
+
+#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
+#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
+#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)
+
+#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags,   \
+                                                      NULL)
+
+#define PyArray_FROM_OT(m,type) PyArray_FromAny(m,                            \
+                                PyArray_DescrFromType(type), 0, 0, 0, NULL)
+
+#define PyArray_FROM_OTF(m, type, flags) \
+        PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
+                        (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+                         ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)
+
+#define PyArray_FROMANY(m, type, min, max, flags) \
+        PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \
+                        (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+                         (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)
+
+#define PyArray_ZEROS(m, dims, type, is_f_order) \
+        PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_EMPTY(m, dims, type, is_f_order) \
+        PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \
+                                           PyArray_NBYTES(obj))
+#ifndef PYPY_VERSION
+#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt)
+#define NPY_REFCOUNT PyArray_REFCOUNT
+#endif
+#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE)
+
+#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
+        PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+                              max_depth, NPY_ARRAY_DEFAULT, NULL)
+
+#define PyArray_EquivArrTypes(a1, a2) \
+        PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
+
+#define PyArray_EquivByteorders(b1, b2) \
+        (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
+
+#define PyArray_SimpleNew(nd, dims, typenum) \
+        PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
+
+#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
+        PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \
+                    data, 0, NPY_ARRAY_CARRAY, NULL)
+
+#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
+        PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \
+                             NULL, NULL, 0, NULL)
+
+#define PyArray_ToScalar(data, arr) \
+        PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)
+
+
+/* These might be faster without the dereferencing of obj
+   going on inside -- of course an optimizing compiler should
+   inline the constants inside a for loop making it a moot point
+*/
+
+#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
+                                         (i)*PyArray_STRIDES(obj)[0]))
+
+#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
+                                            (i)*PyArray_STRIDES(obj)[0] + \
+                                            (j)*PyArray_STRIDES(obj)[1]))
+
+#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
+                                            (i)*PyArray_STRIDES(obj)[0] + \
+                                            (j)*PyArray_STRIDES(obj)[1] + \
+                                            (k)*PyArray_STRIDES(obj)[2]))
+
+#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
+                                            (i)*PyArray_STRIDES(obj)[0] + \
+                                            (j)*PyArray_STRIDES(obj)[1] + \
+                                            (k)*PyArray_STRIDES(obj)[2] + \
+                                            (l)*PyArray_STRIDES(obj)[3]))
+
+static inline void
+PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
+{
+    PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
+    if (fa && fa->base) {
+        if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) {
+            PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
+            Py_DECREF(fa->base);
+            fa->base = NULL;
+            PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
+        }
+    }
+}
+
+#define PyArray_DESCR_REPLACE(descr) do { \
+                PyArray_Descr *_new_; \
+                _new_ = PyArray_DescrNew(descr); \
+                Py_XDECREF(descr); \
+                descr = _new_; \
+        } while(0)
+
+/* Copy should always return contiguous array */
+#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)
+
+#define PyArray_FromObject(op, type, min_depth, max_depth) \
+        PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+                              max_depth, NPY_ARRAY_BEHAVED | \
+                                         NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
+        PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+                              max_depth, NPY_ARRAY_DEFAULT | \
+                                         NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
+        PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+                        max_depth, NPY_ARRAY_ENSURECOPY | \
+                                   NPY_ARRAY_DEFAULT | \
+                                   NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_Cast(mp, type_num)                                            \
+        PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
+
+#define PyArray_Take(ap, items, axis)                                         \
+        PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)
+
+#define PyArray_Put(ap, items, values)                                        \
+        PyArray_PutTo(ap, items, values, NPY_RAISE)
+
+/* Compatibility with old Numeric stuff -- don't use in new code */
+
+#define PyArray_FromDimsAndData(nd, d, type, data)                            \
+        PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type),   \
+                                        data)
+
+
+/*
+   Check to see if this key in the dictionary is the "title"
+   entry of the tuple (i.e. a duplicate dictionary entry in the fields
+   dict).
+*/
+
+static inline int
+NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
+{
+    PyObject *title;
+    if (PyTuple_Size(value) != 3) {
+        return 0;
+    }
+    title = PyTuple_GetItem(value, 2);
+    if (key == title) {
+        return 1;
+    }
+#ifdef PYPY_VERSION
+    /*
+     * On PyPy, dictionary keys do not always preserve object identity.
+     * Fall back to comparison by value.
+     */
+    if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
+        return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
+    }
+#endif
+    return 0;
+}
+
+/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */
+#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))
+
+#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
+#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h
new file mode 100644
index 00000000..742ba526
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h
@@ -0,0 +1,1945 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_
+
+#include "npy_common.h"
+#include "npy_endian.h"
+#include "npy_cpu.h"
+#include "utils.h"
+
+#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN
+
+/* Only use thread if configured in config and python supports it */
+#if defined WITH_THREAD && !NPY_NO_SMP
+        #define NPY_ALLOW_THREADS 1
+#else
+        #define NPY_ALLOW_THREADS 0
+#endif
+
+#ifndef __has_extension
+#define __has_extension(x) 0
+#endif
+
+#if !defined(_NPY_NO_DEPRECATIONS) && \
+    ((defined(__GNUC__)&& __GNUC__ >= 6) || \
+     __has_extension(attribute_deprecated_with_message))
+#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text)))
+#else
+#define NPY_ATTR_DEPRECATE(text)
+#endif
+
+/*
+ * There are several places in the code where an array of dimensions
+ * is allocated statically.  This is the size of that static
+ * allocation.
+ *
+ * The array creation itself could have arbitrary dimensions but all
+ * the places where static allocation is used would need to be changed
+ * to dynamic (including inside of several structures)
+ */
+
+#define NPY_MAXDIMS 32
+#define NPY_MAXARGS 32
+
+/* Used for Converter Functions "O&" code in ParseTuple */
+#define NPY_FAIL 0
+#define NPY_SUCCEED 1
+
+
+enum NPY_TYPES {    NPY_BOOL=0,
+                    NPY_BYTE, NPY_UBYTE,
+                    NPY_SHORT, NPY_USHORT,
+                    NPY_INT, NPY_UINT,
+                    NPY_LONG, NPY_ULONG,
+                    NPY_LONGLONG, NPY_ULONGLONG,
+                    NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
+                    NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
+                    NPY_OBJECT=17,
+                    NPY_STRING, NPY_UNICODE,
+                    NPY_VOID,
+                    /*
+                     * New 1.6 types appended, may be integrated
+                     * into the above in 2.0.
+                     */
+                    NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,
+
+                    NPY_NTYPES,
+                    NPY_NOTYPE,
+                    NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"),
+                    NPY_USERDEF=256,  /* leave room for characters */
+
+                    /* The number of types not including the new 1.6 types */
+                    NPY_NTYPES_ABI_COMPATIBLE=21
+};
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma deprecated(NPY_CHAR)
+#endif
+
+/* basetype array priority */
+#define NPY_PRIORITY 0.0
+
+/* default subtype priority */
+#define NPY_SUBTYPE_PRIORITY 1.0
+
+/* default scalar priority */
+#define NPY_SCALAR_PRIORITY -1000000.0
+
+/* How many floating point types are there (excluding half) */
+#define NPY_NUM_FLOATTYPE 3
+
+/*
+ * These characters correspond to the array type and the struct
+ * module
+ */
+
+enum NPY_TYPECHAR {
+        NPY_BOOLLTR = '?',
+        NPY_BYTELTR = 'b',
+        NPY_UBYTELTR = 'B',
+        NPY_SHORTLTR = 'h',
+        NPY_USHORTLTR = 'H',
+        NPY_INTLTR = 'i',
+        NPY_UINTLTR = 'I',
+        NPY_LONGLTR = 'l',
+        NPY_ULONGLTR = 'L',
+        NPY_LONGLONGLTR = 'q',
+        NPY_ULONGLONGLTR = 'Q',
+        NPY_HALFLTR = 'e',
+        NPY_FLOATLTR = 'f',
+        NPY_DOUBLELTR = 'd',
+        NPY_LONGDOUBLELTR = 'g',
+        NPY_CFLOATLTR = 'F',
+        NPY_CDOUBLELTR = 'D',
+        NPY_CLONGDOUBLELTR = 'G',
+        NPY_OBJECTLTR = 'O',
+        NPY_STRINGLTR = 'S',
+        NPY_STRINGLTR2 = 'a',
+        NPY_UNICODELTR = 'U',
+        NPY_VOIDLTR = 'V',
+        NPY_DATETIMELTR = 'M',
+        NPY_TIMEDELTALTR = 'm',
+        NPY_CHARLTR = 'c',
+
+        /*
+         * No Descriptor, just a define -- this let's
+         * Python users specify an array of integers
+         * large enough to hold a pointer on the
+         * platform
+         */
+        NPY_INTPLTR = 'p',
+        NPY_UINTPLTR = 'P',
+
+        /*
+         * These are for dtype 'kinds', not dtype 'typecodes'
+         * as the above are for.
+         */
+        NPY_GENBOOLLTR ='b',
+        NPY_SIGNEDLTR = 'i',
+        NPY_UNSIGNEDLTR = 'u',
+        NPY_FLOATINGLTR = 'f',
+        NPY_COMPLEXLTR = 'c'
+};
+
+/*
+ * Changing this may break Numpy API compatibility
+ * due to changing offsets in PyArray_ArrFuncs, so be
+ * careful. Here we have reused the mergesort slot for
+ * any kind of stable sort, the actual implementation will
+ * depend on the data type.
+ */
+typedef enum {
+        NPY_QUICKSORT=0,
+        NPY_HEAPSORT=1,
+        NPY_MERGESORT=2,
+        NPY_STABLESORT=2,
+} NPY_SORTKIND;
+#define NPY_NSORTS (NPY_STABLESORT + 1)
+
+
+typedef enum {
+        NPY_INTROSELECT=0
+} NPY_SELECTKIND;
+#define NPY_NSELECTS (NPY_INTROSELECT + 1)
+
+
+typedef enum {
+        NPY_SEARCHLEFT=0,
+        NPY_SEARCHRIGHT=1
+} NPY_SEARCHSIDE;
+#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1)
+
+
+typedef enum {
+        NPY_NOSCALAR=-1,
+        NPY_BOOL_SCALAR,
+        NPY_INTPOS_SCALAR,
+        NPY_INTNEG_SCALAR,
+        NPY_FLOAT_SCALAR,
+        NPY_COMPLEX_SCALAR,
+        NPY_OBJECT_SCALAR
+} NPY_SCALARKIND;
+#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1)
+
+/* For specifying array memory layout or iteration order */
+typedef enum {
+        /* Fortran order if inputs are all Fortran, C otherwise */
+        NPY_ANYORDER=-1,
+        /* C order */
+        NPY_CORDER=0,
+        /* Fortran order */
+        NPY_FORTRANORDER=1,
+        /* An order as close to the inputs as possible */
+        NPY_KEEPORDER=2
+} NPY_ORDER;
+
+/* For specifying allowed casting in operations which support it */
+typedef enum {
+        _NPY_ERROR_OCCURRED_IN_CAST = -1,
+        /* Only allow identical types */
+        NPY_NO_CASTING=0,
+        /* Allow identical and byte swapped types */
+        NPY_EQUIV_CASTING=1,
+        /* Only allow safe casts */
+        NPY_SAFE_CASTING=2,
+        /* Allow safe casts or casts within the same kind */
+        NPY_SAME_KIND_CASTING=3,
+        /* Allow any casts */
+        NPY_UNSAFE_CASTING=4,
+} NPY_CASTING;
+
+typedef enum {
+        NPY_CLIP=0,
+        NPY_WRAP=1,
+        NPY_RAISE=2
+} NPY_CLIPMODE;
+
+typedef enum {
+        NPY_VALID=0,
+        NPY_SAME=1,
+        NPY_FULL=2
+} NPY_CORRELATEMODE;
+
+/* The special not-a-time (NaT) value */
+#define NPY_DATETIME_NAT NPY_MIN_INT64
+
+/*
+ * Upper bound on the length of a DATETIME ISO 8601 string
+ *   YEAR: 21 (64-bit year)
+ *   MONTH: 3
+ *   DAY: 3
+ *   HOURS: 3
+ *   MINUTES: 3
+ *   SECONDS: 3
+ *   ATTOSECONDS: 1 + 3*6
+ *   TIMEZONE: 5
+ *   NULL TERMINATOR: 1
+ */
+#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1)
+
+/* The FR in the unit names stands for frequency */
+typedef enum {
+        /* Force signed enum type, must be -1 for code compatibility */
+        NPY_FR_ERROR = -1,      /* error or undetermined */
+
+        /* Start of valid units */
+        NPY_FR_Y = 0,           /* Years */
+        NPY_FR_M = 1,           /* Months */
+        NPY_FR_W = 2,           /* Weeks */
+        /* Gap where 1.6 NPY_FR_B (value 3) was */
+        NPY_FR_D = 4,           /* Days */
+        NPY_FR_h = 5,           /* hours */
+        NPY_FR_m = 6,           /* minutes */
+        NPY_FR_s = 7,           /* seconds */
+        NPY_FR_ms = 8,          /* milliseconds */
+        NPY_FR_us = 9,          /* microseconds */
+        NPY_FR_ns = 10,         /* nanoseconds */
+        NPY_FR_ps = 11,         /* picoseconds */
+        NPY_FR_fs = 12,         /* femtoseconds */
+        NPY_FR_as = 13,         /* attoseconds */
+        NPY_FR_GENERIC = 14     /* unbound units, can convert to anything */
+} NPY_DATETIMEUNIT;
+
+/*
+ * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS
+ * is technically one more than the actual number of units.
+ */
+#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)
+#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC
+
+/*
+ * Business day conventions for mapping invalid business
+ * days to valid business days.
+ */
+typedef enum {
+    /* Go forward in time to the following business day. */
+    NPY_BUSDAY_FORWARD,
+    NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD,
+    /* Go backward in time to the preceding business day. */
+    NPY_BUSDAY_BACKWARD,
+    NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD,
+    /*
+     * Go forward in time to the following business day, unless it
+     * crosses a month boundary, in which case go backward
+     */
+    NPY_BUSDAY_MODIFIEDFOLLOWING,
+    /*
+     * Go backward in time to the preceding business day, unless it
+     * crosses a month boundary, in which case go forward.
+     */
+    NPY_BUSDAY_MODIFIEDPRECEDING,
+    /* Produce a NaT for non-business days. */
+    NPY_BUSDAY_NAT,
+    /* Raise an exception for non-business days. */
+    NPY_BUSDAY_RAISE
+} NPY_BUSDAY_ROLL;
+
+/************************************************************
+ * NumPy Auxiliary Data for inner loops, sort functions, etc.
+ ************************************************************/
+
+/*
+ * When creating an auxiliary data struct, this should always appear
+ * as the first member, like this:
+ *
+ * typedef struct {
+ *     NpyAuxData base;
+ *     double constant;
+ * } constant_multiplier_aux_data;
+ */
+typedef struct NpyAuxData_tag NpyAuxData;
+
+/* Function pointers for freeing or cloning auxiliary data */
+typedef void (NpyAuxData_FreeFunc) (NpyAuxData *);
+typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *);
+
+struct NpyAuxData_tag {
+    NpyAuxData_FreeFunc *free;
+    NpyAuxData_CloneFunc *clone;
+    /* To allow for a bit of expansion without breaking the ABI */
+    void *reserved[2];
+};
+
+/* Macros to use for freeing and cloning auxiliary data */
+#define NPY_AUXDATA_FREE(auxdata) \
+    do { \
+        if ((auxdata) != NULL) { \
+            (auxdata)->free(auxdata); \
+        } \
+    } while(0)
+#define NPY_AUXDATA_CLONE(auxdata) \
+    ((auxdata)->clone(auxdata))
+
+#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr);
+#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr);
+
+/*
+* Macros to define how array, and dimension/strides data is
+* allocated. These should be made private
+*/
+
+#define NPY_USE_PYMEM 1
+
+
+#if NPY_USE_PYMEM == 1
+/* use the Raw versions which are safe to call with the GIL released */
+#define PyArray_malloc PyMem_RawMalloc
+#define PyArray_free PyMem_RawFree
+#define PyArray_realloc PyMem_RawRealloc
+#else
+#define PyArray_malloc malloc
+#define PyArray_free free
+#define PyArray_realloc realloc
+#endif
+
+/* Dimensions and strides */
+#define PyDimMem_NEW(size)                                         \
+    ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp)))
+
+#define PyDimMem_FREE(ptr) PyArray_free(ptr)
+
+#define PyDimMem_RENEW(ptr,size)                                   \
+        ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp)))
+
+/* forward declaration */
+struct _PyArray_Descr;
+
+/* These must deal with unaligned and swapped data if necessary */
+typedef PyObject * (PyArray_GetItemFunc) (void *, void *);
+typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *);
+
+typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp,
+                                     npy_intp, int, void *);
+
+typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *);
+typedef npy_bool (PyArray_NonzeroFunc)(void *, void *);
+
+
+/*
+ * These assume aligned and notswapped data -- a buffer will be used
+ * before or contiguous data will be obtained
+ */
+
+typedef int (PyArray_CompareFunc)(const void *, const void *, void *);
+typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *);
+
+typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *,
+                               npy_intp, void *);
+
+typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *,
+                                       void *);
+
+/*
+ * XXX the ignore argument should be removed next time the API version
+ * is bumped. It used to be the separator.
+ */
+typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr,
+                               char *ignore, struct _PyArray_Descr *);
+typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr,
+                                  struct _PyArray_Descr *);
+
+typedef int (PyArray_FillFunc)(void *, npy_intp, void *);
+
+typedef int (PyArray_SortFunc)(void *, npy_intp, void *);
+typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *);
+typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp,
+                                    npy_intp *, npy_intp *,
+                                    void *);
+typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp,
+                                       npy_intp *, npy_intp *,
+                                       void *);
+
+typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *);
+
+typedef int (PyArray_ScalarKindFunc)(void *);
+
+typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min,
+                                    void *max, void *out);
+typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in,
+                                       void *values, npy_intp nv);
+typedef int  (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray,
+                                       npy_intp nindarray, npy_intp n_outer,
+                                       npy_intp m_middle, npy_intp nelem,
+                                       NPY_CLIPMODE clipmode);
+
+typedef struct {
+        npy_intp *ptr;
+        int len;
+} PyArray_Dims;
+
+typedef struct {
+        /*
+         * Functions to cast to most other standard types
+         * Can have some NULL entries. The types
+         * DATETIME, TIMEDELTA, and HALF go into the castdict
+         * even though they are built-in.
+         */
+        PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE];
+
+        /* The next four functions *cannot* be NULL */
+
+        /*
+         * Functions to get and set items with standard Python types
+         * -- not array scalars
+         */
+        PyArray_GetItemFunc *getitem;
+        PyArray_SetItemFunc *setitem;
+
+        /*
+         * Copy and/or swap data.  Memory areas may not overlap
+         * Use memmove first if they might
+         */
+        PyArray_CopySwapNFunc *copyswapn;
+        PyArray_CopySwapFunc *copyswap;
+
+        /*
+         * Function to compare items
+         * Can be NULL
+         */
+        PyArray_CompareFunc *compare;
+
+        /*
+         * Function to select largest
+         * Can be NULL
+         */
+        PyArray_ArgFunc *argmax;
+
+        /*
+         * Function to compute dot product
+         * Can be NULL
+         */
+        PyArray_DotFunc *dotfunc;
+
+        /*
+         * Function to scan an ASCII file and
+         * place a single value plus possible separator
+         * Can be NULL
+         */
+        PyArray_ScanFunc *scanfunc;
+
+        /*
+         * Function to read a single value from a string
+         * and adjust the pointer; Can be NULL
+         */
+        PyArray_FromStrFunc *fromstr;
+
+        /*
+         * Function to determine if data is zero or not
+         * If NULL a default version is
+         * used at Registration time.
+         */
+        PyArray_NonzeroFunc *nonzero;
+
+        /*
+         * Used for arange. Should return 0 on success
+         * and -1 on failure.
+         * Can be NULL.
+         */
+        PyArray_FillFunc *fill;
+
+        /*
+         * Function to fill arrays with scalar values
+         * Can be NULL
+         */
+        PyArray_FillWithScalarFunc *fillwithscalar;
+
+        /*
+         * Sorting functions
+         * Can be NULL
+         */
+        PyArray_SortFunc *sort[NPY_NSORTS];
+        PyArray_ArgSortFunc *argsort[NPY_NSORTS];
+
+        /*
+         * Dictionary of additional casting functions
+         * PyArray_VectorUnaryFuncs
+         * which can be populated to support casting
+         * to other registered types. Can be NULL
+         */
+        PyObject *castdict;
+
+        /*
+         * Functions useful for generalizing
+         * the casting rules.
+         * Can be NULL;
+         */
+        PyArray_ScalarKindFunc *scalarkind;
+        int **cancastscalarkindto;
+        int *cancastto;
+
+        PyArray_FastClipFunc *fastclip;
+        PyArray_FastPutmaskFunc *fastputmask;
+        PyArray_FastTakeFunc *fasttake;
+
+        /*
+         * Function to select smallest
+         * Can be NULL
+         */
+        PyArray_ArgFunc *argmin;
+
+} PyArray_ArrFuncs;
+
+/* The item must be reference counted when it is inserted or extracted. */
+#define NPY_ITEM_REFCOUNT   0x01
+/* Same as needing REFCOUNT */
+#define NPY_ITEM_HASOBJECT  0x01
+/* Convert to list for pickling */
+#define NPY_LIST_PICKLE     0x02
+/* The item is a POINTER  */
+#define NPY_ITEM_IS_POINTER 0x04
+/* memory needs to be initialized for this data-type */
+#define NPY_NEEDS_INIT      0x08
+/* operations need Python C-API so don't give-up thread. */
+#define NPY_NEEDS_PYAPI     0x10
+/* Use f.getitem when extracting elements of this data-type */
+#define NPY_USE_GETITEM     0x20
+/* Use f.setitem when setting creating 0-d array from this data-type.*/
+#define NPY_USE_SETITEM     0x40
+/* A sticky flag specifically for structured arrays */
+#define NPY_ALIGNED_STRUCT  0x80
+
+/*
+ *These are inherited for global data-type if any data-types in the
+ * field have them
+ */
+#define NPY_FROM_FIELDS    (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \
+                            NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI)
+
+#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \
+                                NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \
+                                NPY_NEEDS_INIT | NPY_NEEDS_PYAPI)
+
+#define PyDataType_FLAGCHK(dtype, flag) \
+        (((dtype)->flags & (flag)) == (flag))
+
+#define PyDataType_REFCHK(dtype) \
+        PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)
+
+typedef struct _PyArray_Descr {
+        PyObject_HEAD
+        /*
+         * the type object representing an
+         * instance of this type -- should not
+         * be two type_numbers with the same type
+         * object.
+         */
+        PyTypeObject *typeobj;
+        /* kind for this type */
+        char kind;
+        /* unique-character representing this type */
+        char type;
+        /*
+         * '>' (big), '<' (little), '|'
+         * (not-applicable), or '=' (native).
+         */
+        char byteorder;
+        /* flags describing data type */
+        char flags;
+        /* number representing this type */
+        int type_num;
+        /* element size (itemsize) for this type */
+        int elsize;
+        /* alignment needed for this type */
+        int alignment;
+        /*
+         * Non-NULL if this type is
+         * is an array (C-contiguous)
+         * of some other type
+         */
+        struct _arr_descr *subarray;
+        /*
+         * The fields dictionary for this type
+         * For statically defined descr this
+         * is always Py_None
+         */
+        PyObject *fields;
+        /*
+         * An ordered tuple of field names or NULL
+         * if no fields are defined
+         */
+        PyObject *names;
+        /*
+         * a table of functions specific for each
+         * basic data descriptor
+         */
+        PyArray_ArrFuncs *f;
+        /* Metadata about this dtype */
+        PyObject *metadata;
+        /*
+         * Metadata specific to the C implementation
+         * of the particular dtype. This was added
+         * for NumPy 1.7.0.
+         */
+        NpyAuxData *c_metadata;
+        /* Cached hash value (-1 if not yet computed).
+         * This was added for NumPy 2.0.0.
+         */
+        npy_hash_t hash;
+} PyArray_Descr;
+
+typedef struct _arr_descr {
+        PyArray_Descr *base;
+        PyObject *shape;       /* a tuple */
+} PyArray_ArrayDescr;
+
+/*
+ * Memory handler structure for array data.
+ */
+/* The declaration of free differs from PyMemAllocatorEx */
+typedef struct {
+    void *ctx;
+    void* (*malloc) (void *ctx, size_t size);
+    void* (*calloc) (void *ctx, size_t nelem, size_t elsize);
+    void* (*realloc) (void *ctx, void *ptr, size_t new_size);
+    void (*free) (void *ctx, void *ptr, size_t size);
+    /*
+     * This is the end of the version=1 struct. Only add new fields after
+     * this line
+     */
+} PyDataMemAllocator;
+
+typedef struct {
+    char name[127];  /* multiple of 64 to keep the struct aligned */
+    uint8_t version; /* currently 1 */
+    PyDataMemAllocator allocator;
+} PyDataMem_Handler;
+
+
+/*
+ * The main array object structure.
+ *
+ * It has been recommended to use the inline functions defined below
+ * (PyArray_DATA and friends) to access fields here for a number of
+ * releases. Direct access to the members themselves is deprecated.
+ * To ensure that your code does not use deprecated access,
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ * (or NPY_1_8_API_VERSION or higher as required).
+ */
+/* This struct will be moved to a private header in a future release */
+typedef struct tagPyArrayObject_fields {
+    PyObject_HEAD
+    /* Pointer to the raw data buffer */
+    char *data;
+    /* The number of dimensions, also called 'ndim' */
+    int nd;
+    /* The size in each dimension, also called 'shape' */
+    npy_intp *dimensions;
+    /*
+     * Number of bytes to jump to get to the
+     * next element in each dimension
+     */
+    npy_intp *strides;
+    /*
+     * This object is decref'd upon
+     * deletion of array. Except in the
+     * case of WRITEBACKIFCOPY which has
+     * special handling.
+     *
+     * For views it points to the original
+     * array, collapsed so no chains of
+     * views occur.
+     *
+     * For creation from buffer object it
+     * points to an object that should be
+     * decref'd on deletion
+     *
+     * For WRITEBACKIFCOPY flag this is an
+     * array to-be-updated upon calling
+     * PyArray_ResolveWritebackIfCopy
+     */
+    PyObject *base;
+    /* Pointer to type structure */
+    PyArray_Descr *descr;
+    /* Flags describing array -- see below */
+    int flags;
+    /* For weak references */
+    PyObject *weakreflist;
+#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION
+    void *_buffer_info;  /* private buffer info, tagged to allow warning */
+#endif
+    /*
+     * For malloc/calloc/realloc/free per object
+     */
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+    PyObject *mem_handler;
+#endif
+} PyArrayObject_fields;
+
+/*
+ * To hide the implementation details, we only expose
+ * the Python struct HEAD.
+ */
+#if !defined(NPY_NO_DEPRECATED_API) || \
+    (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+/*
+ * Can't put this in npy_deprecated_api.h like the others.
+ * PyArrayObject field access is deprecated as of NumPy 1.7.
+ */
+typedef PyArrayObject_fields PyArrayObject;
+#else
+typedef struct tagPyArrayObject {
+        PyObject_HEAD
+} PyArrayObject;
+#endif
+
+/*
+ * Removed 2020-Nov-25, NumPy 1.20
+ * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields))
+ *
+ * The above macro was removed as it gave a false sense of a stable ABI
+ * with respect to the structures size.  If you require a runtime constant,
+ * you can use `PyArray_Type.tp_basicsize` instead.  Otherwise, please
+ * see the PyArrayObject documentation or ask the NumPy developers for
+ * information on how to correctly replace the macro in a way that is
+ * compatible with multiple NumPy versions.
+ */
+
+
+/* Array Flags Object */
+typedef struct PyArrayFlagsObject {
+        PyObject_HEAD
+        PyObject *arr;
+        int flags;
+} PyArrayFlagsObject;
+
+/* Mirrors buffer object to ptr */
+
+typedef struct {
+        PyObject_HEAD
+        PyObject *base;
+        void *ptr;
+        npy_intp len;
+        int flags;
+} PyArray_Chunk;
+
+typedef struct {
+    NPY_DATETIMEUNIT base;
+    int num;
+} PyArray_DatetimeMetaData;
+
+typedef struct {
+    NpyAuxData base;
+    PyArray_DatetimeMetaData meta;
+} PyArray_DatetimeDTypeMetaData;
+
+/*
+ * This structure contains an exploded view of a date-time value.
+ * NaT is represented by year == NPY_DATETIME_NAT.
+ */
+typedef struct {
+        npy_int64 year;
+        npy_int32 month, day, hour, min, sec, us, ps, as;
+} npy_datetimestruct;
+
+/* This is not used internally. */
+typedef struct {
+        npy_int64 day;
+        npy_int32 sec, us, ps, as;
+} npy_timedeltastruct;
+
+typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
+
+/*
+ * Means c-style contiguous (last index varies the fastest). The data
+ * elements right after each other.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_C_CONTIGUOUS    0x0001
+
+/*
+ * Set if array is a contiguous Fortran array: the first index varies
+ * the fastest in memory (strides array is reverse of C-contiguous
+ * array)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_F_CONTIGUOUS    0x0002
+
+/*
+ * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a
+ * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with
+ * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS
+ * at the same time if they have either zero or one element.
+ * A higher dimensional array always has the same contiguity flags as
+ * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are
+ * effectively ignored when checking for contiguity.
+ */
+
+/*
+ * If set, the array owns the data: it will be free'd when the array
+ * is deleted.
+ *
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_OWNDATA         0x0004
+
+/*
+ * An array never has the next four set; they're only used as parameter
+ * flags to the various FromAny functions
+ *
+ * This flag may be requested in constructor functions.
+ */
+
+/* Cause a cast to occur regardless of whether or not it is safe. */
+#define NPY_ARRAY_FORCECAST       0x0010
+
+/*
+ * Always copy the array. Returned arrays are always CONTIGUOUS,
+ * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSURECOPY      0x0020
+
+/*
+ * Make sure the returned array is a base-class ndarray
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSUREARRAY     0x0040
+
+/*
+ * Make sure that the strides are in units of the element size Needed
+ * for some operations with record-arrays.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ELEMENTSTRIDES  0x0080
+
+/*
+ * Array data is aligned on the appropriate memory address for the type
+ * stored according to how the compiler would align things (e.g., an
+ * array of integers (4 bytes each) starts on a memory address that's
+ * a multiple of 4)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_ALIGNED         0x0100
+
+/*
+ * Array data has the native endianness
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_NOTSWAPPED      0x0200
+
+/*
+ * Array data is writeable
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_WRITEABLE       0x0400
+
+/*
+ * If this flag is set, then base contains a pointer to an array of
+ * the same size that should be updated with the current contents of
+ * this array when PyArray_ResolveWritebackIfCopy is called.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000
+
+/*
+ * No copy may be made while converting from an object/array (result is a view)
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSURENOCOPY 0x4000
+
+/*
+ * NOTE: there are also internal flags defined in multiarray/arrayobject.h,
+ * which start at bit 31 and work down.
+ */
+
+#define NPY_ARRAY_BEHAVED      (NPY_ARRAY_ALIGNED | \
+                                NPY_ARRAY_WRITEABLE)
+#define NPY_ARRAY_BEHAVED_NS   (NPY_ARRAY_ALIGNED | \
+                                NPY_ARRAY_WRITEABLE | \
+                                NPY_ARRAY_NOTSWAPPED)
+#define NPY_ARRAY_CARRAY       (NPY_ARRAY_C_CONTIGUOUS | \
+                                NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_CARRAY_RO    (NPY_ARRAY_C_CONTIGUOUS | \
+                                NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_FARRAY       (NPY_ARRAY_F_CONTIGUOUS | \
+                                NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_FARRAY_RO    (NPY_ARRAY_F_CONTIGUOUS | \
+                                NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_DEFAULT      (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_IN_ARRAY     (NPY_ARRAY_CARRAY_RO)
+#define NPY_ARRAY_OUT_ARRAY    (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY  (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \
+                                NPY_ARRAY_WRITEBACKIFCOPY)
+#define NPY_ARRAY_IN_FARRAY    (NPY_ARRAY_FARRAY_RO)
+#define NPY_ARRAY_OUT_FARRAY   (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \
+                                NPY_ARRAY_WRITEBACKIFCOPY)
+
+#define NPY_ARRAY_UPDATE_ALL   (NPY_ARRAY_C_CONTIGUOUS | \
+                                NPY_ARRAY_F_CONTIGUOUS | \
+                                NPY_ARRAY_ALIGNED)
+
+/* This flag is for the array interface, not PyArrayObject */
+#define NPY_ARR_HAS_DESCR  0x0800
+
+
+
+
+/*
+ * Size of internal buffers used for alignment Make BUFSIZE a multiple
+ * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned
+ */
+#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble))
+#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000)
+#define NPY_BUFSIZE 8192
+/* buffer stress test size: */
+/*#define NPY_BUFSIZE 17*/
+
+#define PyArray_MAX(a,b) (((a)>(b))?(a):(b))
+#define PyArray_MIN(a,b) (((a)<(b))?(a):(b))
+#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \
+                               ((p).real < (q).real)))
+#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \
+                               ((p).real > (q).real)))
+#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \
+                               ((p).real <= (q).real)))
+#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \
+                               ((p).real >= (q).real)))
+#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag))
+#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag))
+
+/*
+ * C API: consists of Macros and functions.  The MACROS are defined
+ * here.
+ */
+
+
+#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE)
+#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED)
+
+#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS)
+
+/* the variable is used in some places, so always define it */
+#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL;
+#if NPY_ALLOW_THREADS
+#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0);
+#define NPY_END_THREADS   do { if (_save) \
+                { PyEval_RestoreThread(_save); _save = NULL;} } while (0);
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \
+                { _save = PyEval_SaveThread();} } while (0);
+
+#define NPY_BEGIN_THREADS_DESCR(dtype) \
+        do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
+                NPY_BEGIN_THREADS;} while (0);
+
+#define NPY_END_THREADS_DESCR(dtype) \
+        do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
+                NPY_END_THREADS; } while (0);
+
+#define NPY_ALLOW_C_API_DEF  PyGILState_STATE __save__;
+#define NPY_ALLOW_C_API      do {__save__ = PyGILState_Ensure();} while (0);
+#define NPY_DISABLE_C_API    do {PyGILState_Release(__save__);} while (0);
+#else
+#define NPY_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS
+#define NPY_END_THREADS
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size)
+#define NPY_BEGIN_THREADS_DESCR(dtype)
+#define NPY_END_THREADS_DESCR(dtype)
+#define NPY_ALLOW_C_API_DEF
+#define NPY_ALLOW_C_API
+#define NPY_DISABLE_C_API
+#endif
+
+/**********************************
+ * The nditer object, added in 1.6
+ **********************************/
+
+/* The actual structure of the iterator is an internal detail */
+typedef struct NpyIter_InternalOnly NpyIter;
+
+/* Iterator function pointers that may be specialized */
+typedef int (NpyIter_IterNextFunc)(NpyIter *iter);
+typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter,
+                                      npy_intp *outcoords);
+
+/*** Global flags that may be passed to the iterator constructors ***/
+
+/* Track an index representing C order */
+#define NPY_ITER_C_INDEX                    0x00000001
+/* Track an index representing Fortran order */
+#define NPY_ITER_F_INDEX                    0x00000002
+/* Track a multi-index */
+#define NPY_ITER_MULTI_INDEX                0x00000004
+/* User code external to the iterator does the 1-dimensional innermost loop */
+#define NPY_ITER_EXTERNAL_LOOP              0x00000008
+/* Convert all the operands to a common data type */
+#define NPY_ITER_COMMON_DTYPE               0x00000010
+/* Operands may hold references, requiring API access during iteration */
+#define NPY_ITER_REFS_OK                    0x00000020
+/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */
+#define NPY_ITER_ZEROSIZE_OK                0x00000040
+/* Permits reductions (size-0 stride with dimension size > 1) */
+#define NPY_ITER_REDUCE_OK                  0x00000080
+/* Enables sub-range iteration */
+#define NPY_ITER_RANGED                     0x00000100
+/* Enables buffering */
+#define NPY_ITER_BUFFERED                   0x00000200
+/* When buffering is enabled, grows the inner loop if possible */
+#define NPY_ITER_GROWINNER                  0x00000400
+/* Delay allocation of buffers until first Reset* call */
+#define NPY_ITER_DELAY_BUFALLOC             0x00000800
+/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */
+#define NPY_ITER_DONT_NEGATE_STRIDES        0x00001000
+/*
+ * If output operands overlap with other operands (based on heuristics that
+ * has false positives but no false negatives), make temporary copies to
+ * eliminate overlap.
+ */
+#define NPY_ITER_COPY_IF_OVERLAP            0x00002000
+
+/*** Per-operand flags that may be passed to the iterator constructors ***/
+
+/* The operand will be read from and written to */
+#define NPY_ITER_READWRITE                  0x00010000
+/* The operand will only be read from */
+#define NPY_ITER_READONLY                   0x00020000
+/* The operand will only be written to */
+#define NPY_ITER_WRITEONLY                  0x00040000
+/* The operand's data must be in native byte order */
+#define NPY_ITER_NBO                        0x00080000
+/* The operand's data must be aligned */
+#define NPY_ITER_ALIGNED                    0x00100000
+/* The operand's data must be contiguous (within the inner loop) */
+#define NPY_ITER_CONTIG                     0x00200000
+/* The operand may be copied to satisfy requirements */
+#define NPY_ITER_COPY                       0x00400000
+/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */
+#define NPY_ITER_UPDATEIFCOPY               0x00800000
+/* Allocate the operand if it is NULL */
+#define NPY_ITER_ALLOCATE                   0x01000000
+/* If an operand is allocated, don't use any subtype */
+#define NPY_ITER_NO_SUBTYPE                 0x02000000
+/* This is a virtual array slot, operand is NULL but temporary data is there */
+#define NPY_ITER_VIRTUAL                    0x04000000
+/* Require that the dimension match the iterator dimensions exactly */
+#define NPY_ITER_NO_BROADCAST               0x08000000
+/* A mask is being used on this array, affects buffer -> array copy */
+#define NPY_ITER_WRITEMASKED                0x10000000
+/* This array is the mask for all WRITEMASKED operands */
+#define NPY_ITER_ARRAYMASK                  0x20000000
+/* Assume iterator order data access for COPY_IF_OVERLAP */
+#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000
+
+#define NPY_ITER_GLOBAL_FLAGS               0x0000ffff
+#define NPY_ITER_PER_OP_FLAGS               0xffff0000
+
+
+/*****************************
+ * Basic iterator object
+ *****************************/
+
+/* FWD declaration */
+typedef struct PyArrayIterObject_tag PyArrayIterObject;
+
+/*
+ * type of the function which translates a set of coordinates to a
+ * pointer to the data
+ */
+typedef char* (*npy_iter_get_dataptr_t)(
+        PyArrayIterObject* iter, const npy_intp*);
+
+struct PyArrayIterObject_tag {
+        PyObject_HEAD
+        int               nd_m1;            /* number of dimensions - 1 */
+        npy_intp          index, size;
+        npy_intp          coordinates[NPY_MAXDIMS];/* N-dimensional loop */
+        npy_intp          dims_m1[NPY_MAXDIMS];    /* ao->dimensions - 1 */
+        npy_intp          strides[NPY_MAXDIMS];    /* ao->strides or fake */
+        npy_intp          backstrides[NPY_MAXDIMS];/* how far to jump back */
+        npy_intp          factors[NPY_MAXDIMS];     /* shape factors */
+        PyArrayObject     *ao;
+        char              *dataptr;        /* pointer to current item*/
+        npy_bool          contiguous;
+
+        npy_intp          bounds[NPY_MAXDIMS][2];
+        npy_intp          limits[NPY_MAXDIMS][2];
+        npy_intp          limits_sizes[NPY_MAXDIMS];
+        npy_iter_get_dataptr_t translate;
+} ;
+
+
+/* Iterator API */
+#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type)
+
+#define _PyAIT(it) ((PyArrayIterObject *)(it))
+#define PyArray_ITER_RESET(it) do { \
+        _PyAIT(it)->index = 0; \
+        _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+        memset(_PyAIT(it)->coordinates, 0, \
+               (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \
+} while (0)
+
+#define _PyArray_ITER_NEXT1(it) do { \
+        (it)->dataptr += _PyAIT(it)->strides[0]; \
+        (it)->coordinates[0]++; \
+} while (0)
+
+#define _PyArray_ITER_NEXT2(it) do { \
+        if ((it)->coordinates[1] < (it)->dims_m1[1]) { \
+                (it)->coordinates[1]++; \
+                (it)->dataptr += (it)->strides[1]; \
+        } \
+        else { \
+                (it)->coordinates[1] = 0; \
+                (it)->coordinates[0]++; \
+                (it)->dataptr += (it)->strides[0] - \
+                        (it)->backstrides[1]; \
+        } \
+} while (0)
+
+#define PyArray_ITER_NEXT(it) do { \
+        _PyAIT(it)->index++; \
+        if (_PyAIT(it)->nd_m1 == 0) { \
+                _PyArray_ITER_NEXT1(_PyAIT(it)); \
+        } \
+        else if (_PyAIT(it)->contiguous) \
+                _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \
+        else if (_PyAIT(it)->nd_m1 == 1) { \
+                _PyArray_ITER_NEXT2(_PyAIT(it)); \
+        } \
+        else { \
+                int __npy_i; \
+                for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \
+                        if (_PyAIT(it)->coordinates[__npy_i] < \
+                            _PyAIT(it)->dims_m1[__npy_i]) { \
+                                _PyAIT(it)->coordinates[__npy_i]++; \
+                                _PyAIT(it)->dataptr += \
+                                        _PyAIT(it)->strides[__npy_i]; \
+                                break; \
+                        } \
+                        else { \
+                                _PyAIT(it)->coordinates[__npy_i] = 0; \
+                                _PyAIT(it)->dataptr -= \
+                                        _PyAIT(it)->backstrides[__npy_i]; \
+                        } \
+                } \
+        } \
+} while (0)
+
+#define PyArray_ITER_GOTO(it, destination) do { \
+        int __npy_i; \
+        _PyAIT(it)->index = 0; \
+        _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+        for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \
+                if (destination[__npy_i] < 0) { \
+                        destination[__npy_i] += \
+                                _PyAIT(it)->dims_m1[__npy_i]+1; \
+                } \
+                _PyAIT(it)->dataptr += destination[__npy_i] * \
+                        _PyAIT(it)->strides[__npy_i]; \
+                _PyAIT(it)->coordinates[__npy_i] = \
+                        destination[__npy_i]; \
+                _PyAIT(it)->index += destination[__npy_i] * \
+                        ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \
+                          _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \
+        } \
+} while (0)
+
+#define PyArray_ITER_GOTO1D(it, ind) do { \
+        int __npy_i; \
+        npy_intp __npy_ind = (npy_intp)(ind); \
+        if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \
+        _PyAIT(it)->index = __npy_ind; \
+        if (_PyAIT(it)->nd_m1 == 0) { \
+                _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+                        __npy_ind * _PyAIT(it)->strides[0]; \
+        } \
+        else if (_PyAIT(it)->contiguous) \
+                _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+                        __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \
+        else { \
+                _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+                for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \
+                     __npy_i++) { \
+                        _PyAIT(it)->coordinates[__npy_i] = \
+                                (__npy_ind / _PyAIT(it)->factors[__npy_i]); \
+                        _PyAIT(it)->dataptr += \
+                                (__npy_ind / _PyAIT(it)->factors[__npy_i]) \
+                                * _PyAIT(it)->strides[__npy_i]; \
+                        __npy_ind %= _PyAIT(it)->factors[__npy_i]; \
+                } \
+        } \
+} while (0)
+
+#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr))
+
+#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size)
+
+
+/*
+ * Any object passed to PyArray_Broadcast must be binary compatible
+ * with this structure.
+ */
+
+typedef struct {
+        PyObject_HEAD
+        int                  numiter;                 /* number of iters */
+        npy_intp             size;                    /* broadcasted size */
+        npy_intp             index;                   /* current index */
+        int                  nd;                      /* number of dims */
+        npy_intp             dimensions[NPY_MAXDIMS]; /* dimensions */
+        PyArrayIterObject    *iters[NPY_MAXARGS];     /* iterators */
+} PyArrayMultiIterObject;
+
+#define _PyMIT(m) ((PyArrayMultiIterObject *)(m))
+#define PyArray_MultiIter_RESET(multi) do {                                   \
+        int __npy_mi;                                                         \
+        _PyMIT(multi)->index = 0;                                             \
+        for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter;  __npy_mi++) {    \
+                PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]);           \
+        }                                                                     \
+} while (0)
+
+#define PyArray_MultiIter_NEXT(multi) do {                                    \
+        int __npy_mi;                                                         \
+        _PyMIT(multi)->index++;                                               \
+        for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter;   __npy_mi++) {   \
+                PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]);            \
+        }                                                                     \
+} while (0)
+
+#define PyArray_MultiIter_GOTO(multi, dest) do {                            \
+        int __npy_mi;                                                       \
+        for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) {   \
+                PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest);    \
+        }                                                                   \
+        _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index;              \
+} while (0)
+
+#define PyArray_MultiIter_GOTO1D(multi, ind) do {                          \
+        int __npy_mi;                                                      \
+        for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) {  \
+                PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind);  \
+        }                                                                  \
+        _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index;             \
+} while (0)
+
+#define PyArray_MultiIter_DATA(multi, i)                \
+        ((void *)(_PyMIT(multi)->iters[i]->dataptr))
+
+#define PyArray_MultiIter_NEXTi(multi, i)               \
+        PyArray_ITER_NEXT(_PyMIT(multi)->iters[i])
+
+#define PyArray_MultiIter_NOTDONE(multi)                \
+        (_PyMIT(multi)->index < _PyMIT(multi)->size)
+
+/*
+ * Store the information needed for fancy-indexing over an array. The
+ * fields are slightly unordered to keep consec, dataptr and subspace
+ * where they were originally.
+ */
+typedef struct {
+        PyObject_HEAD
+        /*
+         * Multi-iterator portion --- needs to be present in this
+         * order to work with PyArray_Broadcast
+         */
+
+        int                   numiter;                 /* number of index-array
+                                                          iterators */
+        npy_intp              size;                    /* size of broadcasted
+                                                          result */
+        npy_intp              index;                   /* current index */
+        int                   nd;                      /* number of dims */
+        npy_intp              dimensions[NPY_MAXDIMS]; /* dimensions */
+        NpyIter               *outer;                  /* index objects
+                                                          iterator */
+        void                  *unused[NPY_MAXDIMS - 2];
+        PyArrayObject         *array;
+        /* Flat iterator for the indexed array. For compatibility solely. */
+        PyArrayIterObject     *ait;
+
+        /*
+         * Subspace array. For binary compatibility (was an iterator,
+         * but only the check for NULL should be used).
+         */
+        PyArrayObject         *subspace;
+
+        /*
+         * if subspace iteration, then this is the array of axes in
+         * the underlying array represented by the index objects
+         */
+        int                   iteraxes[NPY_MAXDIMS];
+        npy_intp              fancy_strides[NPY_MAXDIMS];
+
+        /* pointer when all fancy indices are 0 */
+        char                  *baseoffset;
+
+        /*
+         * after binding consec denotes at which axis the fancy axes
+         * are inserted.
+         */
+        int                   consec;
+        char                  *dataptr;
+
+        int                   nd_fancy;
+        npy_intp              fancy_dims[NPY_MAXDIMS];
+
+        /*
+         * Whether the iterator (any of the iterators) requires API.  This is
+         * unused by NumPy itself; ArrayMethod flags are more precise.
+         */
+        int                   needs_api;
+
+        /*
+         * Extra op information.
+         */
+        PyArrayObject         *extra_op;
+        PyArray_Descr         *extra_op_dtype;         /* desired dtype */
+        npy_uint32            *extra_op_flags;         /* Iterator flags */
+
+        NpyIter               *extra_op_iter;
+        NpyIter_IterNextFunc  *extra_op_next;
+        char                  **extra_op_ptrs;
+
+        /*
+         * Information about the iteration state.
+         */
+        NpyIter_IterNextFunc  *outer_next;
+        char                  **outer_ptrs;
+        npy_intp              *outer_strides;
+
+        /*
+         * Information about the subspace iterator.
+         */
+        NpyIter               *subspace_iter;
+        NpyIter_IterNextFunc  *subspace_next;
+        char                  **subspace_ptrs;
+        npy_intp              *subspace_strides;
+
+        /* Count for the external loop (which ever it is) for API iteration */
+        npy_intp              iter_count;
+
+} PyArrayMapIterObject;
+
+enum {
+    NPY_NEIGHBORHOOD_ITER_ZERO_PADDING,
+    NPY_NEIGHBORHOOD_ITER_ONE_PADDING,
+    NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING,
+    NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING,
+    NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING
+};
+
+typedef struct {
+    PyObject_HEAD
+
+    /*
+     * PyArrayIterObject part: keep this in this exact order
+     */
+    int               nd_m1;            /* number of dimensions - 1 */
+    npy_intp          index, size;
+    npy_intp          coordinates[NPY_MAXDIMS];/* N-dimensional loop */
+    npy_intp          dims_m1[NPY_MAXDIMS];    /* ao->dimensions - 1 */
+    npy_intp          strides[NPY_MAXDIMS];    /* ao->strides or fake */
+    npy_intp          backstrides[NPY_MAXDIMS];/* how far to jump back */
+    npy_intp          factors[NPY_MAXDIMS];     /* shape factors */
+    PyArrayObject     *ao;
+    char              *dataptr;        /* pointer to current item*/
+    npy_bool          contiguous;
+
+    npy_intp          bounds[NPY_MAXDIMS][2];
+    npy_intp          limits[NPY_MAXDIMS][2];
+    npy_intp          limits_sizes[NPY_MAXDIMS];
+    npy_iter_get_dataptr_t translate;
+
+    /*
+     * New members
+     */
+    npy_intp nd;
+
+    /* Dimensions is the dimension of the array */
+    npy_intp dimensions[NPY_MAXDIMS];
+
+    /*
+     * Neighborhood points coordinates are computed relatively to the
+     * point pointed by _internal_iter
+     */
+    PyArrayIterObject* _internal_iter;
+    /*
+     * To keep a reference to the representation of the constant value
+     * for constant padding
+     */
+    char* constant;
+
+    int mode;
+} PyArrayNeighborhoodIterObject;
+
+/*
+ * Neighborhood iterator API
+ */
+
+/* General: those work for any mode */
+static inline int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter);
+static inline int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter);
+#if 0
+static inline int
+PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
+#endif
+
+/*
+ * Include inline implementations - functions defined there are not
+ * considered public API
+ */
+#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+#include "_neighborhood_iterator_imp.h"
+#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+
+
+
+/* The default array type */
+#define NPY_DEFAULT_TYPE NPY_DOUBLE
+
+/*
+ * All sorts of useful ways to look into a PyArrayObject. It is recommended
+ * to use PyArrayObject * objects instead of always casting from PyObject *,
+ * for improved type checking.
+ *
+ * In many cases here the macro versions of the accessors are deprecated,
+ * but can't be immediately changed to inline functions because the
+ * preexisting macros accept PyObject * and do automatic casts. Inline
+ * functions accepting PyArrayObject * provides for some compile-time
+ * checking of correctness when working with these objects in C.
+ */
+
+#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \
+                                 PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS))
+
+#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \
+                             (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)))
+
+#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \
+                               NPY_ARRAY_F_CONTIGUOUS : 0))
+
+#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API))
+/*
+ * Changing access macros into functions, to allow for future hiding
+ * of the internal memory layout. This later hiding will allow the 2.x series
+ * to change the internal representation of arrays without affecting
+ * ABI compatibility.
+ */
+
+static inline int
+PyArray_NDIM(const PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->nd;
+}
+
+static inline void *
+PyArray_DATA(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->data;
+}
+
+static inline char *
+PyArray_BYTES(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->data;
+}
+
+static inline npy_intp *
+PyArray_DIMS(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+static inline npy_intp *
+PyArray_STRIDES(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->strides;
+}
+
+static inline npy_intp
+PyArray_DIM(const PyArrayObject *arr, int idim)
+{
+    return ((PyArrayObject_fields *)arr)->dimensions[idim];
+}
+
+static inline npy_intp
+PyArray_STRIDE(const PyArrayObject *arr, int istride)
+{
+    return ((PyArrayObject_fields *)arr)->strides[istride];
+}
+
+static inline NPY_RETURNS_BORROWED_REF PyObject *
+PyArray_BASE(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->base;
+}
+
+static inline NPY_RETURNS_BORROWED_REF PyArray_Descr *
+PyArray_DESCR(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static inline int
+PyArray_FLAGS(const PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->flags;
+}
+
+static inline npy_intp
+PyArray_ITEMSIZE(const PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->descr->elsize;
+}
+
+static inline int
+PyArray_TYPE(const PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->descr->type_num;
+}
+
+static inline int
+PyArray_CHKFLAGS(const PyArrayObject *arr, int flags)
+{
+    return (PyArray_FLAGS(arr) & flags) == flags;
+}
+
+static inline PyObject *
+PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
+{
+    return ((PyArrayObject_fields *)arr)->descr->f->getitem(
+                                        (void *)itemptr, (PyArrayObject *)arr);
+}
+
+/*
+ * SETITEM should only be used if it is known that the value is a scalar
+ * and of a type understood by the arrays dtype.
+ * Use `PyArray_Pack` if the value may be of a different dtype.
+ */
+static inline int
+PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
+{
+    return ((PyArrayObject_fields *)arr)->descr->f->setitem(v, itemptr, arr);
+}
+
+#else
+
+/* These macros are deprecated as of NumPy 1.7. */
+#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd)
+#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data)
+#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data)
+#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions)
+#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides)
+#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n])
+#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n])
+#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base)
+#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr)
+#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags)
+#define PyArray_CHKFLAGS(m, FLAGS) \
+        ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS))
+#define PyArray_ITEMSIZE(obj) \
+                    (((PyArrayObject_fields *)(obj))->descr->elsize)
+#define PyArray_TYPE(obj) \
+                    (((PyArrayObject_fields *)(obj))->descr->type_num)
+#define PyArray_GETITEM(obj,itemptr) \
+        PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \
+                                     (PyArrayObject *)(obj))
+
+#define PyArray_SETITEM(obj,itemptr,v) \
+        PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \
+                                     (char *)(itemptr), \
+                                     (PyArrayObject *)(obj))
+#endif
+
+static inline PyArray_Descr *
+PyArray_DTYPE(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static inline npy_intp *
+PyArray_SHAPE(PyArrayObject *arr)
+{
+    return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+/*
+ * Enables the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static inline void
+PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags)
+{
+    ((PyArrayObject_fields *)arr)->flags |= flags;
+}
+
+/*
+ * Clears the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static inline void
+PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
+{
+    ((PyArrayObject_fields *)arr)->flags &= ~flags;
+}
+
+#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+    static inline NPY_RETURNS_BORROWED_REF PyObject *
+    PyArray_HANDLER(PyArrayObject *arr)
+    {
+        return ((PyArrayObject_fields *)arr)->mem_handler;
+    }
+#endif
+
+#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL)
+
+#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) ||   \
+                                 ((type) == NPY_USHORT) ||     \
+                                 ((type) == NPY_UINT) ||       \
+                                 ((type) == NPY_ULONG) ||      \
+                                 ((type) == NPY_ULONGLONG))
+
+#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) ||      \
+                               ((type) == NPY_SHORT) ||        \
+                               ((type) == NPY_INT) ||          \
+                               ((type) == NPY_LONG) ||         \
+                               ((type) == NPY_LONGLONG))
+
+#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) &&     \
+                                ((type) <= NPY_ULONGLONG))
+
+#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \
+                              ((type) <= NPY_LONGDOUBLE)) || \
+                              ((type) == NPY_HALF))
+
+#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \
+                                  ((type) == NPY_HALF))
+
+#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) ||    \
+                                  ((type) == NPY_UNICODE))
+
+#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) &&   \
+                                ((type) <= NPY_CLONGDOUBLE))
+
+#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) ||      \
+                                  ((type) == NPY_DOUBLE) ||    \
+                                  ((type) == NPY_CDOUBLE) ||   \
+                                  ((type) == NPY_BOOL) ||      \
+                                  ((type) == NPY_OBJECT ))
+
+#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) &&  \
+                                    ((type) <=NPY_VOID))
+
+#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) &&  \
+                                    ((type) <=NPY_TIMEDELTA))
+
+#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \
+                                   ((type) < NPY_USERDEF+     \
+                                    NPY_NUMUSERTYPES))
+
+#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) ||  \
+                                    PyTypeNum_ISUSERDEF(type))
+
+#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT)
+
+
+#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num )
+#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL)
+#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL)
+#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \
+                                      !PyDataType_HASFIELDS(dtype))
+#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)
+
+#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
+#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj))
+#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj))
+#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj))
+#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj))
+#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj))
+#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj))
+#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj))
+#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj))
+#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj))
+#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj))
+#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj))
+
+    /*
+     * FIXME: This should check for a flag on the data-type that
+     * states whether or not it is variable length.  Because the
+     * ISFLEXIBLE check is hard-coded to the built-in data-types.
+     */
+#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+
+#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj))
+
+
+#define NPY_LITTLE '<'
+#define NPY_BIG '>'
+#define NPY_NATIVE '='
+#define NPY_SWAP 's'
+#define NPY_IGNORE '|'
+
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+#define NPY_NATBYTE NPY_BIG
+#define NPY_OPPBYTE NPY_LITTLE
+#else
+#define NPY_NATBYTE NPY_LITTLE
+#define NPY_OPPBYTE NPY_BIG
+#endif
+
+#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE)
+#define PyArray_IsNativeByteOrder PyArray_ISNBO
+#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder)
+#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) &&       \
+                                    PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY)
+#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO)
+#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY)
+#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO)
+#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED)
+#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED)
+
+
+#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder)
+#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d))
+
+/************************************************************
+ * A struct used by PyArray_CreateSortedStridePerm, new in 1.7.
+ ************************************************************/
+
+typedef struct {
+    npy_intp perm, stride;
+} npy_stride_sort_item;
+
+/************************************************************
+ * This is the form of the struct that's stored in the
+ * PyCapsule returned by an array's __array_struct__ attribute. See
+ * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
+ * documentation.
+ ************************************************************/
+typedef struct {
+    int two;              /*
+                           * contains the integer 2 as a sanity
+                           * check
+                           */
+
+    int nd;               /* number of dimensions */
+
+    char typekind;        /*
+                           * kind in array --- character code of
+                           * typestr
+                           */
+
+    int itemsize;         /* size of each element */
+
+    int flags;            /*
+                           * how should be data interpreted. Valid
+                           * flags are CONTIGUOUS (1), F_CONTIGUOUS (2),
+                           * ALIGNED (0x100), NOTSWAPPED (0x200), and
+                           * WRITEABLE (0x400).  ARR_HAS_DESCR (0x800)
+                           * states that arrdescr field is present in
+                           * structure
+                           */
+
+    npy_intp *shape;       /*
+                            * A length-nd array of shape
+                            * information
+                            */
+
+    npy_intp *strides;    /* A length-nd array of stride information */
+
+    void *data;           /* A pointer to the first element of the array */
+
+    PyObject *descr;      /*
+                           * A list of fields or NULL (ignored if flags
+                           * does not have ARR_HAS_DESCR flag set)
+                           */
+} PyArrayInterface;
+
+/*
+ * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions.
+ * See the documentation for PyDataMem_SetEventHook.
+ */
+typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
+                                       void *user_data);
+
+
+/*
+ * PyArray_DTypeMeta related definitions.
+ *
+ * As of now, this API is preliminary and will be extended as necessary.
+ */
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+    /*
+     * The Structures defined in this block are currently considered
+     * private API and may change without warning!
+     * Part of this (at least the size) is expected to be public API without
+     * further modifications.
+     */
+    /* TODO: Make this definition public in the API, as soon as its settled */
+    NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type;
+
+    /*
+     * While NumPy DTypes would not need to be heap types the plan is to
+     * make DTypes available in Python at which point they will be heap types.
+     * Since we also wish to add fields to the DType class, this looks like
+     * a typical instance definition, but with PyHeapTypeObject instead of
+     * only the PyObject_HEAD.
+     * This must only be exposed very extremely careful consideration, since
+     * it is a fairly complex construct which may be better to allow
+     * refactoring of.
+     */
+    typedef struct {
+        PyHeapTypeObject super;
+
+        /*
+         * Most DTypes will have a singleton default instance, for the
+         * parametric legacy DTypes (bytes, string, void, datetime) this
+         * may be a pointer to the *prototype* instance?
+         */
+        PyArray_Descr *singleton;
+        /* Copy of the legacy DTypes type number, usually invalid. */
+        int type_num;
+
+        /* The type object of the scalar instances (may be NULL?) */
+        PyTypeObject *scalar_type;
+        /*
+         * DType flags to signal legacy, parametric, or
+         * abstract.  But plenty of space for additional information/flags.
+         */
+        npy_uint64 flags;
+
+        /*
+         * Use indirection in order to allow a fixed size for this struct.
+         * A stable ABI size makes creating a static DType less painful
+         * while also ensuring flexibility for all opaque API (with one
+         * indirection due the pointer lookup).
+         */
+        void *dt_slots;
+        void *reserved[3];
+    } PyArray_DTypeMeta;
+
+#endif  /* NPY_INTERNAL_BUILD */
+
+
+/*
+ * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files
+ * npy_*_*_deprecated_api.h are only included from here and nowhere else.
+ */
+#ifdef NPY_DEPRECATED_INCLUDES
+#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES."
+#endif
+#define NPY_DEPRECATED_INCLUDES
+#if !defined(NPY_NO_DEPRECATED_API) || \
+    (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+#include "npy_1_7_deprecated_api.h"
+#endif
+/*
+ * There is no file npy_1_8_deprecated_api.h since there are no additional
+ * deprecated API features in NumPy 1.8.
+ *
+ * Note to maintainers: insert code like the following in future NumPy
+ * versions.
+ *
+ * #if !defined(NPY_NO_DEPRECATED_API) || \
+ *     (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION)
+ * #include "npy_1_9_deprecated_api.h"
+ * #endif
+ */
+#undef NPY_DEPRECATED_INCLUDES
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h
new file mode 100644
index 00000000..cea5b0d4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h
@@ -0,0 +1,211 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_
+
+/*
+ * You can directly include noprefix.h as a backward
+ * compatibility measure
+ */
+#ifndef NPY_NO_PREFIX
+#include "ndarrayobject.h"
+#include "npy_interrupt.h"
+#endif
+
+#define SIGSETJMP   NPY_SIGSETJMP
+#define SIGLONGJMP  NPY_SIGLONGJMP
+#define SIGJMP_BUF  NPY_SIGJMP_BUF
+
+#define MAX_DIMS NPY_MAXDIMS
+
+#define longlong    npy_longlong
+#define ulonglong   npy_ulonglong
+#define Bool        npy_bool
+#define longdouble  npy_longdouble
+#define byte        npy_byte
+
+#ifndef _BSD_SOURCE
+#define ushort      npy_ushort
+#define uint        npy_uint
+#define ulong       npy_ulong
+#endif
+
+#define ubyte       npy_ubyte
+#define ushort      npy_ushort
+#define uint        npy_uint
+#define ulong       npy_ulong
+#define cfloat      npy_cfloat
+#define cdouble     npy_cdouble
+#define clongdouble npy_clongdouble
+#define Int8        npy_int8
+#define UInt8       npy_uint8
+#define Int16       npy_int16
+#define UInt16      npy_uint16
+#define Int32       npy_int32
+#define UInt32      npy_uint32
+#define Int64       npy_int64
+#define UInt64      npy_uint64
+#define Int128      npy_int128
+#define UInt128     npy_uint128
+#define Int256      npy_int256
+#define UInt256     npy_uint256
+#define Float16     npy_float16
+#define Complex32   npy_complex32
+#define Float32     npy_float32
+#define Complex64   npy_complex64
+#define Float64     npy_float64
+#define Complex128  npy_complex128
+#define Float80     npy_float80
+#define Complex160  npy_complex160
+#define Float96     npy_float96
+#define Complex192  npy_complex192
+#define Float128    npy_float128
+#define Complex256  npy_complex256
+#define intp        npy_intp
+#define uintp       npy_uintp
+#define datetime    npy_datetime
+#define timedelta   npy_timedelta
+
+#define SIZEOF_LONGLONG         NPY_SIZEOF_LONGLONG
+#define SIZEOF_INTP             NPY_SIZEOF_INTP
+#define SIZEOF_UINTP            NPY_SIZEOF_UINTP
+#define SIZEOF_HALF             NPY_SIZEOF_HALF
+#define SIZEOF_LONGDOUBLE       NPY_SIZEOF_LONGDOUBLE
+#define SIZEOF_DATETIME         NPY_SIZEOF_DATETIME
+#define SIZEOF_TIMEDELTA        NPY_SIZEOF_TIMEDELTA
+
+#define LONGLONG_FMT NPY_LONGLONG_FMT
+#define ULONGLONG_FMT NPY_ULONGLONG_FMT
+#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX
+#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX
+
+#define MAX_INT8 127
+#define MIN_INT8 -128
+#define MAX_UINT8 255
+#define MAX_INT16 32767
+#define MIN_INT16 -32768
+#define MAX_UINT16 65535
+#define MAX_INT32 2147483647
+#define MIN_INT32 (-MAX_INT32 - 1)
+#define MAX_UINT32 4294967295U
+#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807)
+#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1))
+#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615)
+#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1))
+#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
+#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1))
+#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
+
+#define MAX_BYTE NPY_MAX_BYTE
+#define MIN_BYTE NPY_MIN_BYTE
+#define MAX_UBYTE NPY_MAX_UBYTE
+#define MAX_SHORT NPY_MAX_SHORT
+#define MIN_SHORT NPY_MIN_SHORT
+#define MAX_USHORT NPY_MAX_USHORT
+#define MAX_INT   NPY_MAX_INT
+#define MIN_INT   NPY_MIN_INT
+#define MAX_UINT  NPY_MAX_UINT
+#define MAX_LONG  NPY_MAX_LONG
+#define MIN_LONG  NPY_MIN_LONG
+#define MAX_ULONG  NPY_MAX_ULONG
+#define MAX_LONGLONG NPY_MAX_LONGLONG
+#define MIN_LONGLONG NPY_MIN_LONGLONG
+#define MAX_ULONGLONG NPY_MAX_ULONGLONG
+#define MIN_DATETIME NPY_MIN_DATETIME
+#define MAX_DATETIME NPY_MAX_DATETIME
+#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA
+#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA
+
+#define BITSOF_BOOL       NPY_BITSOF_BOOL
+#define BITSOF_CHAR       NPY_BITSOF_CHAR
+#define BITSOF_SHORT      NPY_BITSOF_SHORT
+#define BITSOF_INT        NPY_BITSOF_INT
+#define BITSOF_LONG       NPY_BITSOF_LONG
+#define BITSOF_LONGLONG   NPY_BITSOF_LONGLONG
+#define BITSOF_HALF       NPY_BITSOF_HALF
+#define BITSOF_FLOAT      NPY_BITSOF_FLOAT
+#define BITSOF_DOUBLE     NPY_BITSOF_DOUBLE
+#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE
+#define BITSOF_DATETIME   NPY_BITSOF_DATETIME
+#define BITSOF_TIMEDELTA   NPY_BITSOF_TIMEDELTA
+
+#define _pya_malloc PyArray_malloc
+#define _pya_free PyArray_free
+#define _pya_realloc PyArray_realloc
+
+#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF
+#define BEGIN_THREADS     NPY_BEGIN_THREADS
+#define END_THREADS       NPY_END_THREADS
+#define ALLOW_C_API_DEF   NPY_ALLOW_C_API_DEF
+#define ALLOW_C_API       NPY_ALLOW_C_API
+#define DISABLE_C_API     NPY_DISABLE_C_API
+
+#define PY_FAIL NPY_FAIL
+#define PY_SUCCEED NPY_SUCCEED
+
+#ifndef TRUE
+#define TRUE NPY_TRUE
+#endif
+
+#ifndef FALSE
+#define FALSE NPY_FALSE
+#endif
+
+#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT
+
+#define CONTIGUOUS         NPY_CONTIGUOUS
+#define C_CONTIGUOUS       NPY_C_CONTIGUOUS
+#define FORTRAN            NPY_FORTRAN
+#define F_CONTIGUOUS       NPY_F_CONTIGUOUS
+#define OWNDATA            NPY_OWNDATA
+#define FORCECAST          NPY_FORCECAST
+#define ENSURECOPY         NPY_ENSURECOPY
+#define ENSUREARRAY        NPY_ENSUREARRAY
+#define ELEMENTSTRIDES     NPY_ELEMENTSTRIDES
+#define ALIGNED            NPY_ALIGNED
+#define NOTSWAPPED         NPY_NOTSWAPPED
+#define WRITEABLE          NPY_WRITEABLE
+#define WRITEBACKIFCOPY    NPY_ARRAY_WRITEBACKIFCOPY
+#define ARR_HAS_DESCR      NPY_ARR_HAS_DESCR
+#define BEHAVED            NPY_BEHAVED
+#define BEHAVED_NS         NPY_BEHAVED_NS
+#define CARRAY             NPY_CARRAY
+#define CARRAY_RO          NPY_CARRAY_RO
+#define FARRAY             NPY_FARRAY
+#define FARRAY_RO          NPY_FARRAY_RO
+#define DEFAULT            NPY_DEFAULT
+#define IN_ARRAY           NPY_IN_ARRAY
+#define OUT_ARRAY          NPY_OUT_ARRAY
+#define INOUT_ARRAY        NPY_INOUT_ARRAY
+#define IN_FARRAY          NPY_IN_FARRAY
+#define OUT_FARRAY         NPY_OUT_FARRAY
+#define INOUT_FARRAY       NPY_INOUT_FARRAY
+#define UPDATE_ALL         NPY_UPDATE_ALL
+
+#define OWN_DATA          NPY_OWNDATA
+#define BEHAVED_FLAGS     NPY_BEHAVED
+#define BEHAVED_FLAGS_NS  NPY_BEHAVED_NS
+#define CARRAY_FLAGS_RO   NPY_CARRAY_RO
+#define CARRAY_FLAGS      NPY_CARRAY
+#define FARRAY_FLAGS      NPY_FARRAY
+#define FARRAY_FLAGS_RO   NPY_FARRAY_RO
+#define DEFAULT_FLAGS     NPY_DEFAULT
+#define UPDATE_ALL_FLAGS  NPY_UPDATE_ALL_FLAGS
+
+#ifndef MIN
+#define MIN PyArray_MIN
+#endif
+#ifndef MAX
+#define MAX PyArray_MAX
+#endif
+#define MAX_INTP NPY_MAX_INTP
+#define MIN_INTP NPY_MIN_INTP
+#define MAX_UINTP NPY_MAX_UINTP
+#define INTP_FMT NPY_INTP_FMT
+
+#ifndef PYPY_VERSION
+#define REFCOUNT PyArray_REFCOUNT
+#define MAX_ELSIZE NPY_MAX_ELSIZE
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h
new file mode 100644
index 00000000..6455d40d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -0,0 +1,124 @@
+#ifndef NPY_DEPRECATED_INCLUDES
+#error "Should never include npy_*_*_deprecated_api directly."
+#endif
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_
+
+/* Emit a warning if the user did not specifically request the old API */
+#ifndef NPY_NO_DEPRECATED_API
+#if defined(_WIN32)
+#define _WARN___STR2__(x) #x
+#define _WARN___STR1__(x) _WARN___STR2__(x)
+#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
+#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \
+                         "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
+#else
+#warning "Using deprecated NumPy API, disable it with " \
+         "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
+#endif
+#endif
+
+/*
+ * This header exists to collect all dangerous/deprecated NumPy API
+ * as of NumPy 1.7.
+ *
+ * This is an attempt to remove bad API, the proliferation of macros,
+ * and namespace pollution currently produced by the NumPy headers.
+ */
+
+/* These array flags are deprecated as of NumPy 1.7 */
+#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
+#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS
+
+/*
+ * The consistent NPY_ARRAY_* names which don't pollute the NPY_*
+ * namespace were added in NumPy 1.7.
+ *
+ * These versions of the carray flags are deprecated, but
+ * probably should only be removed after two releases instead of one.
+ */
+#define NPY_C_CONTIGUOUS   NPY_ARRAY_C_CONTIGUOUS
+#define NPY_F_CONTIGUOUS   NPY_ARRAY_F_CONTIGUOUS
+#define NPY_OWNDATA        NPY_ARRAY_OWNDATA
+#define NPY_FORCECAST      NPY_ARRAY_FORCECAST
+#define NPY_ENSURECOPY     NPY_ARRAY_ENSURECOPY
+#define NPY_ENSUREARRAY    NPY_ARRAY_ENSUREARRAY
+#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES
+#define NPY_ALIGNED        NPY_ARRAY_ALIGNED
+#define NPY_NOTSWAPPED     NPY_ARRAY_NOTSWAPPED
+#define NPY_WRITEABLE      NPY_ARRAY_WRITEABLE
+#define NPY_BEHAVED        NPY_ARRAY_BEHAVED
+#define NPY_BEHAVED_NS     NPY_ARRAY_BEHAVED_NS
+#define NPY_CARRAY         NPY_ARRAY_CARRAY
+#define NPY_CARRAY_RO      NPY_ARRAY_CARRAY_RO
+#define NPY_FARRAY         NPY_ARRAY_FARRAY
+#define NPY_FARRAY_RO      NPY_ARRAY_FARRAY_RO
+#define NPY_DEFAULT        NPY_ARRAY_DEFAULT
+#define NPY_IN_ARRAY       NPY_ARRAY_IN_ARRAY
+#define NPY_OUT_ARRAY      NPY_ARRAY_OUT_ARRAY
+#define NPY_INOUT_ARRAY    NPY_ARRAY_INOUT_ARRAY
+#define NPY_IN_FARRAY      NPY_ARRAY_IN_FARRAY
+#define NPY_OUT_FARRAY     NPY_ARRAY_OUT_FARRAY
+#define NPY_INOUT_FARRAY   NPY_ARRAY_INOUT_FARRAY
+#define NPY_UPDATE_ALL     NPY_ARRAY_UPDATE_ALL
+
+/* This way of accessing the default type is deprecated as of NumPy 1.7 */
+#define PyArray_DEFAULT NPY_DEFAULT_TYPE
+
+/* These DATETIME bits aren't used internally */
+#define PyDataType_GetDatetimeMetaData(descr)                                 \
+    ((descr->metadata == NULL) ? NULL :                                       \
+        ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer(                   \
+                PyDict_GetItemString(                                         \
+                    descr->metadata, NPY_METADATA_DTSTR), NULL))))
+
+/*
+ * Deprecated as of NumPy 1.7, this kind of shortcut doesn't
+ * belong in the public API.
+ */
+#define NPY_AO PyArrayObject
+
+/*
+ * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't
+ * belong in the public API.
+ */
+#define fortran fortran_
+
+/*
+ * Deprecated as of NumPy 1.7, as it is a namespace-polluting
+ * macro.
+ */
+#define FORTRAN_IF PyArray_FORTRAN_IF
+
+/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */
+#define NPY_METADATA_DTSTR "__timeunit__"
+
+/*
+ * Deprecated as of NumPy 1.7.
+ * The reasoning:
+ *  - These are for datetime, but there's no datetime "namespace".
+ *  - They just turn NPY_STR_ into "", which is just
+ *    making something simple be indirected.
+ */
+#define NPY_STR_Y "Y"
+#define NPY_STR_M "M"
+#define NPY_STR_W "W"
+#define NPY_STR_D "D"
+#define NPY_STR_h "h"
+#define NPY_STR_m "m"
+#define NPY_STR_s "s"
+#define NPY_STR_ms "ms"
+#define NPY_STR_us "us"
+#define NPY_STR_ns "ns"
+#define NPY_STR_ps "ps"
+#define NPY_STR_fs "fs"
+#define NPY_STR_as "as"
+
+/*
+ * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be
+ * removed in the next major release.
+ */
+#include "old_defines.h"
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h
new file mode 100644
index 00000000..62fde943
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h
@@ -0,0 +1,595 @@
+/*
+ * This is a convenience header file providing compatibility utilities
+ * for supporting different minor versions of Python 3.
+ * It was originally used to support the transition from Python 2,
+ * hence the "3k" naming.
+ *
+ * If you want to use this for your own projects, it's recommended to make a
+ * copy of it. Although the stuff below is unlikely to change, we don't provide
+ * strong backwards compatibility guarantees at the moment.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
+
+#include 
+#include 
+
+#ifndef NPY_PY3K
+#define NPY_PY3K 1
+#endif
+
+#include "numpy/npy_common.h"
+#include "numpy/ndarrayobject.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PyInt -> PyLong
+ */
+
+
+/*
+ * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is
+ * included here because it is missing from the PyPy API. It completes the PyLong_As*
+ * group of functions and can be useful in replacing PyInt_Check.
+ */
+static inline int
+Npy__PyLong_AsInt(PyObject *obj)
+{
+    int overflow;
+    long result = PyLong_AsLongAndOverflow(obj, &overflow);
+
+    /* INT_MAX and INT_MIN are defined in Python.h */
+    if (overflow || result > INT_MAX || result < INT_MIN) {
+        /* XXX: could be cute and give a different
+           message for overflow == -1 */
+        PyErr_SetString(PyExc_OverflowError,
+                        "Python int too large to convert to C int");
+        return -1;
+    }
+    return (int)result;
+}
+
+
+#if defined(NPY_PY3K)
+/* Return True only if the long fits in a C long */
+static inline int PyInt_Check(PyObject *op) {
+    int overflow = 0;
+    if (!PyLong_Check(op)) {
+        return 0;
+    }
+    PyLong_AsLongAndOverflow(op, &overflow);
+    return (overflow == 0);
+}
+
+
+#define PyInt_FromLong PyLong_FromLong
+#define PyInt_AsLong PyLong_AsLong
+#define PyInt_AS_LONG PyLong_AsLong
+#define PyInt_AsSsize_t PyLong_AsSsize_t
+#define PyNumber_Int PyNumber_Long
+
+/* NOTE:
+ *
+ * Since the PyLong type is very different from the fixed-range PyInt,
+ * we don't define PyInt_Type -> PyLong_Type.
+ */
+#endif /* NPY_PY3K */
+
+/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */
+#ifdef NPY_PY3K
+#  define NpySlice_GetIndicesEx PySlice_GetIndicesEx
+#else
+#  define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \
+    PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
+#endif
+
+#if PY_VERSION_HEX < 0x030900a4
+    /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */
+    #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0)
+    /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */
+    #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0)
+    /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */
+    #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0)
+#endif
+
+
+#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
+
+/*
+ * PyString -> PyBytes
+ */
+
+#if defined(NPY_PY3K)
+
+#define PyString_Type PyBytes_Type
+#define PyString_Check PyBytes_Check
+#define PyStringObject PyBytesObject
+#define PyString_FromString PyBytes_FromString
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_AsStringAndSize PyBytes_AsStringAndSize
+#define PyString_FromFormat PyBytes_FromFormat
+#define PyString_Concat PyBytes_Concat
+#define PyString_ConcatAndDel PyBytes_ConcatAndDel
+#define PyString_AsString PyBytes_AsString
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_Size PyBytes_Size
+
+#define PyUString_Type PyUnicode_Type
+#define PyUString_Check PyUnicode_Check
+#define PyUStringObject PyUnicodeObject
+#define PyUString_FromString PyUnicode_FromString
+#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize
+#define PyUString_FromFormat PyUnicode_FromFormat
+#define PyUString_Concat PyUnicode_Concat2
+#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel
+#define PyUString_GET_SIZE PyUnicode_GET_SIZE
+#define PyUString_Size PyUnicode_Size
+#define PyUString_InternFromString PyUnicode_InternFromString
+#define PyUString_Format PyUnicode_Format
+
+#define PyBaseString_Check(obj) (PyUnicode_Check(obj))
+
+#else
+
+#define PyBytes_Type PyString_Type
+#define PyBytes_Check PyString_Check
+#define PyBytesObject PyStringObject
+#define PyBytes_FromString PyString_FromString
+#define PyBytes_FromStringAndSize PyString_FromStringAndSize
+#define PyBytes_AS_STRING PyString_AS_STRING
+#define PyBytes_AsStringAndSize PyString_AsStringAndSize
+#define PyBytes_FromFormat PyString_FromFormat
+#define PyBytes_Concat PyString_Concat
+#define PyBytes_ConcatAndDel PyString_ConcatAndDel
+#define PyBytes_AsString PyString_AsString
+#define PyBytes_GET_SIZE PyString_GET_SIZE
+#define PyBytes_Size PyString_Size
+
+#define PyUString_Type PyString_Type
+#define PyUString_Check PyString_Check
+#define PyUStringObject PyStringObject
+#define PyUString_FromString PyString_FromString
+#define PyUString_FromStringAndSize PyString_FromStringAndSize
+#define PyUString_FromFormat PyString_FromFormat
+#define PyUString_Concat PyString_Concat
+#define PyUString_ConcatAndDel PyString_ConcatAndDel
+#define PyUString_GET_SIZE PyString_GET_SIZE
+#define PyUString_Size PyString_Size
+#define PyUString_InternFromString PyString_InternFromString
+#define PyUString_Format PyString_Format
+
+#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj))
+
+#endif /* NPY_PY3K */
+
+/*
+ * Macros to protect CRT calls against instant termination when passed an
+ * invalid parameter (https://bugs.python.org/issue23524).
+ */
+#if defined _MSC_VER && _MSC_VER >= 1900
+
+#include 
+
+extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
+#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \
+    _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
+#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); }
+
+#else
+
+#define NPY_BEGIN_SUPPRESS_IPH
+#define NPY_END_SUPPRESS_IPH
+
+#endif /* _MSC_VER >= 1900 */
+
+
+static inline void
+PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
+{
+    Py_SETREF(*left, PyUnicode_Concat(*left, right));
+    Py_DECREF(right);
+}
+
+static inline void
+PyUnicode_Concat2(PyObject **left, PyObject *right)
+{
+    Py_SETREF(*left, PyUnicode_Concat(*left, right));
+}
+
+/*
+ * PyFile_* compatibility
+ */
+
+/*
+ * Get a FILE* handle to the file represented by the Python object
+ */
+static inline FILE*
+npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
+{
+    int fd, fd2, unbuf;
+    Py_ssize_t fd2_tmp;
+    PyObject *ret, *os, *io, *io_raw;
+    npy_off_t pos;
+    FILE *handle;
+
+    /* For Python 2 PyFileObject, use PyFile_AsFile */
+#if !defined(NPY_PY3K)
+    if (PyFile_Check(file)) {
+        return PyFile_AsFile(file);
+    }
+#endif
+
+    /* Flush first to ensure things end up in the file in the correct order */
+    ret = PyObject_CallMethod(file, "flush", "");
+    if (ret == NULL) {
+        return NULL;
+    }
+    Py_DECREF(ret);
+    fd = PyObject_AsFileDescriptor(file);
+    if (fd == -1) {
+        return NULL;
+    }
+
+    /*
+     * The handle needs to be dup'd because we have to call fclose
+     * at the end
+     */
+    os = PyImport_ImportModule("os");
+    if (os == NULL) {
+        return NULL;
+    }
+    ret = PyObject_CallMethod(os, "dup", "i", fd);
+    Py_DECREF(os);
+    if (ret == NULL) {
+        return NULL;
+    }
+    fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError);
+    Py_DECREF(ret);
+    if (fd2_tmp == -1 && PyErr_Occurred()) {
+        return NULL;
+    }
+    if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) {
+        PyErr_SetString(PyExc_IOError,
+                        "Getting an 'int' from os.dup() failed");
+        return NULL;
+    }
+    fd2 = (int)fd2_tmp;
+
+    /* Convert to FILE* handle */
+#ifdef _WIN32
+    NPY_BEGIN_SUPPRESS_IPH
+    handle = _fdopen(fd2, mode);
+    NPY_END_SUPPRESS_IPH
+#else
+    handle = fdopen(fd2, mode);
+#endif
+    if (handle == NULL) {
+        PyErr_SetString(PyExc_IOError,
+                        "Getting a FILE* from a Python file object via "
+                        "_fdopen failed. If you built NumPy, you probably "
+                        "linked with the wrong debug/release runtime");
+        return NULL;
+    }
+
+    /* Record the original raw file handle position */
+    *orig_pos = npy_ftell(handle);
+    if (*orig_pos == -1) {
+        /* The io module is needed to determine if buffering is used */
+        io = PyImport_ImportModule("io");
+        if (io == NULL) {
+            fclose(handle);
+            return NULL;
+        }
+        /* File object instances of RawIOBase are unbuffered */
+        io_raw = PyObject_GetAttrString(io, "RawIOBase");
+        Py_DECREF(io);
+        if (io_raw == NULL) {
+            fclose(handle);
+            return NULL;
+        }
+        unbuf = PyObject_IsInstance(file, io_raw);
+        Py_DECREF(io_raw);
+        if (unbuf == 1) {
+            /* Succeed if the IO is unbuffered */
+            return handle;
+        }
+        else {
+            PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+            fclose(handle);
+            return NULL;
+        }
+    }
+
+    /* Seek raw handle to the Python-side position */
+    ret = PyObject_CallMethod(file, "tell", "");
+    if (ret == NULL) {
+        fclose(handle);
+        return NULL;
+    }
+    pos = PyLong_AsLongLong(ret);
+    Py_DECREF(ret);
+    if (PyErr_Occurred()) {
+        fclose(handle);
+        return NULL;
+    }
+    if (npy_fseek(handle, pos, SEEK_SET) == -1) {
+        PyErr_SetString(PyExc_IOError, "seeking file failed");
+        fclose(handle);
+        return NULL;
+    }
+    return handle;
+}
+
+/*
+ * Close the dup-ed file handle, and seek the Python one to the current position
+ */
+static inline int
+npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
+{
+    int fd, unbuf;
+    PyObject *ret, *io, *io_raw;
+    npy_off_t position;
+
+    /* For Python 2 PyFileObject, do nothing */
+#if !defined(NPY_PY3K)
+    if (PyFile_Check(file)) {
+        return 0;
+    }
+#endif
+
+    position = npy_ftell(handle);
+
+    /* Close the FILE* handle */
+    fclose(handle);
+
+    /*
+     * Restore original file handle position, in order to not confuse
+     * Python-side data structures
+     */
+    fd = PyObject_AsFileDescriptor(file);
+    if (fd == -1) {
+        return -1;
+    }
+
+    if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {
+
+        /* The io module is needed to determine if buffering is used */
+        io = PyImport_ImportModule("io");
+        if (io == NULL) {
+            return -1;
+        }
+        /* File object instances of RawIOBase are unbuffered */
+        io_raw = PyObject_GetAttrString(io, "RawIOBase");
+        Py_DECREF(io);
+        if (io_raw == NULL) {
+            return -1;
+        }
+        unbuf = PyObject_IsInstance(file, io_raw);
+        Py_DECREF(io_raw);
+        if (unbuf == 1) {
+            /* Succeed if the IO is unbuffered */
+            return 0;
+        }
+        else {
+            PyErr_SetString(PyExc_IOError, "seeking file failed");
+            return -1;
+        }
+    }
+
+    if (position == -1) {
+        PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+        return -1;
+    }
+
+    /* Seek Python-side handle to the FILE* handle position */
+    ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);
+    if (ret == NULL) {
+        return -1;
+    }
+    Py_DECREF(ret);
+    return 0;
+}
+
+static inline int
+npy_PyFile_Check(PyObject *file)
+{
+    int fd;
+    /* For Python 2, check if it is a PyFileObject */
+#if !defined(NPY_PY3K)
+    if (PyFile_Check(file)) {
+        return 1;
+    }
+#endif
+    fd = PyObject_AsFileDescriptor(file);
+    if (fd == -1) {
+        PyErr_Clear();
+        return 0;
+    }
+    return 1;
+}
+
+static inline PyObject*
+npy_PyFile_OpenFile(PyObject *filename, const char *mode)
+{
+    PyObject *open;
+    open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
+    if (open == NULL) {
+        return NULL;
+    }
+    return PyObject_CallFunction(open, "Os", filename, mode);
+}
+
+static inline int
+npy_PyFile_CloseFile(PyObject *file)
+{
+    PyObject *ret;
+
+    ret = PyObject_CallMethod(file, "close", NULL);
+    if (ret == NULL) {
+        return -1;
+    }
+    Py_DECREF(ret);
+    return 0;
+}
+
+
+/* This is a copy of _PyErr_ChainExceptions
+ */
+static inline void
+npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
+{
+    if (exc == NULL)
+        return;
+
+    if (PyErr_Occurred()) {
+        /* only py3 supports this anyway */
+        #ifdef NPY_PY3K
+            PyObject *exc2, *val2, *tb2;
+            PyErr_Fetch(&exc2, &val2, &tb2);
+            PyErr_NormalizeException(&exc, &val, &tb);
+            if (tb != NULL) {
+                PyException_SetTraceback(val, tb);
+                Py_DECREF(tb);
+            }
+            Py_DECREF(exc);
+            PyErr_NormalizeException(&exc2, &val2, &tb2);
+            PyException_SetContext(val2, val);
+            PyErr_Restore(exc2, val2, tb2);
+        #endif
+    }
+    else {
+        PyErr_Restore(exc, val, tb);
+    }
+}
+
+
+/* This is a copy of _PyErr_ChainExceptions, with:
+ *  - a minimal implementation for python 2
+ *  - __cause__ used instead of __context__
+ */
+static inline void
+npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
+{
+    if (exc == NULL)
+        return;
+
+    if (PyErr_Occurred()) {
+        /* only py3 supports this anyway */
+        #ifdef NPY_PY3K
+            PyObject *exc2, *val2, *tb2;
+            PyErr_Fetch(&exc2, &val2, &tb2);
+            PyErr_NormalizeException(&exc, &val, &tb);
+            if (tb != NULL) {
+                PyException_SetTraceback(val, tb);
+                Py_DECREF(tb);
+            }
+            Py_DECREF(exc);
+            PyErr_NormalizeException(&exc2, &val2, &tb2);
+            PyException_SetCause(val2, val);
+            PyErr_Restore(exc2, val2, tb2);
+        #endif
+    }
+    else {
+        PyErr_Restore(exc, val, tb);
+    }
+}
+
+/*
+ * PyObject_Cmp
+ */
+#if defined(NPY_PY3K)
+static inline int
+PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
+{
+    int v;
+    v = PyObject_RichCompareBool(i1, i2, Py_LT);
+    if (v == 1) {
+        *cmp = -1;
+        return 1;
+    }
+    else if (v == -1) {
+        return -1;
+    }
+
+    v = PyObject_RichCompareBool(i1, i2, Py_GT);
+    if (v == 1) {
+        *cmp = 1;
+        return 1;
+    }
+    else if (v == -1) {
+        return -1;
+    }
+
+    v = PyObject_RichCompareBool(i1, i2, Py_EQ);
+    if (v == 1) {
+        *cmp = 0;
+        return 1;
+    }
+    else {
+        *cmp = 0;
+        return -1;
+    }
+}
+#endif
+
+/*
+ * PyCObject functions adapted to PyCapsules.
+ *
+ * The main job here is to get rid of the improved error handling
+ * of PyCapsules. It's a shame...
+ */
+static inline PyObject *
+NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
+{
+    PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
+    if (ret == NULL) {
+        PyErr_Clear();
+    }
+    return ret;
+}
+
+static inline PyObject *
+NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
+{
+    PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
+    if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
+        PyErr_Clear();
+        Py_DECREF(ret);
+        ret = NULL;
+    }
+    return ret;
+}
+
+static inline void *
+NpyCapsule_AsVoidPtr(PyObject *obj)
+{
+    void *ret = PyCapsule_GetPointer(obj, NULL);
+    if (ret == NULL) {
+        PyErr_Clear();
+    }
+    return ret;
+}
+
+static inline void *
+NpyCapsule_GetDesc(PyObject *obj)
+{
+    return PyCapsule_GetContext(obj);
+}
+
+static inline int
+NpyCapsule_Check(PyObject *ptr)
+{
+    return PyCapsule_CheckExact(ptr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h
new file mode 100644
index 00000000..9e98f8ef
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h
@@ -0,0 +1,1086 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
+
+/* need Python.h for npy_intp, npy_uintp */
+#include 
+
+/* numpconfig.h is auto-generated */
+#include "numpyconfig.h"
+#ifdef HAVE_NPY_CONFIG_H
+#include 
+#endif
+
+/*
+ * using static inline modifiers when defining npy_math functions
+ * allows the compiler to make optimizations when possible
+ */
+#ifndef NPY_INLINE_MATH
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+    #define NPY_INLINE_MATH 1
+#else
+    #define NPY_INLINE_MATH 0
+#endif
+#endif
+
+/*
+ * gcc does not unroll even with -O3
+ * use with care, unrolling on modern cpus rarely speeds things up
+ */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS
+#define NPY_GCC_UNROLL_LOOPS \
+    __attribute__((optimize("unroll-loops")))
+#else
+#define NPY_GCC_UNROLL_LOOPS
+#endif
+
+/* highest gcc optimization level, enabled autovectorizer */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3
+#define NPY_GCC_OPT_3 __attribute__((optimize("O3")))
+#else
+#define NPY_GCC_OPT_3
+#endif
+
+/*
+ * mark an argument (starting from 1) that must not be NULL and is not checked
+ * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check
+ */
+#ifdef HAVE_ATTRIBUTE_NONNULL
+#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n)))
+#else
+#define NPY_GCC_NONNULL(n)
+#endif
+
+/*
+ * give a hint to the compiler which branch is more likely or unlikely
+ * to occur, e.g. rare error cases:
+ *
+ * if (NPY_UNLIKELY(failure == 0))
+ *    return NULL;
+ *
+ * the double !! is to cast the expression (e.g. NULL) to a boolean required by
+ * the intrinsic
+ */
+#ifdef HAVE___BUILTIN_EXPECT
+#define NPY_LIKELY(x) __builtin_expect(!!(x), 1)
+#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define NPY_LIKELY(x) (x)
+#define NPY_UNLIKELY(x) (x)
+#endif
+
+#ifdef HAVE___BUILTIN_PREFETCH
+/* unlike _mm_prefetch also works on non-x86 */
+#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))
+#else
+#ifdef NPY_HAVE_SSE
+/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */
+#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \
+                                             (loc == 1 ? _MM_HINT_T2 : \
+                                              (loc == 2 ? _MM_HINT_T1 : \
+                                               (loc == 3 ? _MM_HINT_T0 : -1))))
+#else
+#define NPY_PREFETCH(x, rw,loc)
+#endif
+#endif
+
+/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */
+#if defined(_MSC_VER) && !defined(__clang__)
+    #define NPY_INLINE __inline
+/* clang included here to handle clang-cl on Windows */
+#elif defined(__GNUC__) || defined(__clang__)
+    #if defined(__STRICT_ANSI__)
+         #define NPY_INLINE __inline__
+    #else
+         #define NPY_INLINE inline
+    #endif
+#else
+    #define NPY_INLINE
+#endif
+
+#ifdef _MSC_VER
+    #define NPY_FINLINE static __forceinline
+#elif defined(__GNUC__)
+    #define NPY_FINLINE static inline __attribute__((always_inline))
+#else
+    #define NPY_FINLINE static
+#endif
+
+#if defined(_MSC_VER)
+    #define NPY_NOINLINE static __declspec(noinline)
+#elif defined(__GNUC__) || defined(__clang__)
+    #define NPY_NOINLINE static __attribute__((noinline))
+#else
+    #define NPY_NOINLINE static
+#endif
+
+#ifdef HAVE___THREAD
+    #define NPY_TLS __thread
+#else
+    #ifdef HAVE___DECLSPEC_THREAD_
+        #define NPY_TLS __declspec(thread)
+    #else
+        #define NPY_TLS
+    #endif
+#endif
+
+#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE
+  #define NPY_RETURNS_BORROWED_REF \
+    __attribute__((cpychecker_returns_borrowed_ref))
+#else
+  #define NPY_RETURNS_BORROWED_REF
+#endif
+
+#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE
+  #define NPY_STEALS_REF_TO_ARG(n) \
+   __attribute__((cpychecker_steals_reference_to_arg(n)))
+#else
+ #define NPY_STEALS_REF_TO_ARG(n)
+#endif
+
+/* 64 bit file position support, also on win-amd64. Issue gh-2256 */
+#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \
+    defined(__MINGW32__) || defined(__MINGW64__)
+    #include 
+
+    #define npy_fseek _fseeki64
+    #define npy_ftell _ftelli64
+    #define npy_lseek _lseeki64
+    #define npy_off_t npy_int64
+
+    #if NPY_SIZEOF_INT == 8
+        #define NPY_OFF_T_PYFMT "i"
+    #elif NPY_SIZEOF_LONG == 8
+        #define NPY_OFF_T_PYFMT "l"
+    #elif NPY_SIZEOF_LONGLONG == 8
+        #define NPY_OFF_T_PYFMT "L"
+    #else
+        #error Unsupported size for type off_t
+    #endif
+#else
+#ifdef HAVE_FSEEKO
+    #define npy_fseek fseeko
+#else
+    #define npy_fseek fseek
+#endif
+#ifdef HAVE_FTELLO
+    #define npy_ftell ftello
+#else
+    #define npy_ftell ftell
+#endif
+    #include 
+    #ifndef _WIN32
+        #include 
+    #endif
+    #define npy_lseek lseek
+    #define npy_off_t off_t
+
+    #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT
+        #define NPY_OFF_T_PYFMT "h"
+    #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT
+        #define NPY_OFF_T_PYFMT "i"
+    #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG
+        #define NPY_OFF_T_PYFMT "l"
+    #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG
+        #define NPY_OFF_T_PYFMT "L"
+    #else
+        #error Unsupported size for type off_t
+    #endif
+#endif
+
+/* enums for detected endianness */
+enum {
+        NPY_CPU_UNKNOWN_ENDIAN,
+        NPY_CPU_LITTLE,
+        NPY_CPU_BIG
+};
+
+/*
+ * This is to typedef npy_intp to the appropriate pointer size for this
+ * platform.  Py_intptr_t, Py_uintptr_t are defined in pyport.h.
+ */
+typedef Py_intptr_t npy_intp;
+typedef Py_uintptr_t npy_uintp;
+
+/*
+ * Define sizes that were not defined in numpyconfig.h.
+ */
+#define NPY_SIZEOF_CHAR 1
+#define NPY_SIZEOF_BYTE 1
+#define NPY_SIZEOF_DATETIME 8
+#define NPY_SIZEOF_TIMEDELTA 8
+#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T
+#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T
+#define NPY_SIZEOF_HALF 2
+#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT
+#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE
+#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE
+
+#ifdef constchar
+#undef constchar
+#endif
+
+#define NPY_SSIZE_T_PYFMT "n"
+#define constchar char
+
+/* NPY_INTP_FMT Note:
+ *      Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf,
+ *      NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those
+ *      functions use different formatting codes that are portably specified
+ *      according to the Python documentation. See issue gh-2388.
+ */
+#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT
+        #define NPY_INTP NPY_INT
+        #define NPY_UINTP NPY_UINT
+        #define PyIntpArrType_Type PyIntArrType_Type
+        #define PyUIntpArrType_Type PyUIntArrType_Type
+        #define NPY_MAX_INTP NPY_MAX_INT
+        #define NPY_MIN_INTP NPY_MIN_INT
+        #define NPY_MAX_UINTP NPY_MAX_UINT
+        #define NPY_INTP_FMT "d"
+#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG
+        #define NPY_INTP NPY_LONG
+        #define NPY_UINTP NPY_ULONG
+        #define PyIntpArrType_Type PyLongArrType_Type
+        #define PyUIntpArrType_Type PyULongArrType_Type
+        #define NPY_MAX_INTP NPY_MAX_LONG
+        #define NPY_MIN_INTP NPY_MIN_LONG
+        #define NPY_MAX_UINTP NPY_MAX_ULONG
+        #define NPY_INTP_FMT "ld"
+#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG)
+        #define NPY_INTP NPY_LONGLONG
+        #define NPY_UINTP NPY_ULONGLONG
+        #define PyIntpArrType_Type PyLongLongArrType_Type
+        #define PyUIntpArrType_Type PyULongLongArrType_Type
+        #define NPY_MAX_INTP NPY_MAX_LONGLONG
+        #define NPY_MIN_INTP NPY_MIN_LONGLONG
+        #define NPY_MAX_UINTP NPY_MAX_ULONGLONG
+        #define NPY_INTP_FMT "lld"
+#endif
+
+/*
+ * We can only use C99 formats for npy_int_p if it is the same as
+ * intp_t, hence the condition on HAVE_UNITPTR_T
+ */
+#if (NPY_USE_C99_FORMATS) == 1 \
+        && (defined HAVE_UINTPTR_T) \
+        && (defined HAVE_INTTYPES_H)
+        #include 
+        #undef NPY_INTP_FMT
+        #define NPY_INTP_FMT PRIdPTR
+#endif
+
+
+/*
+ * Some platforms don't define bool, long long, or long double.
+ * Handle that here.
+ */
+#define NPY_BYTE_FMT "hhd"
+#define NPY_UBYTE_FMT "hhu"
+#define NPY_SHORT_FMT "hd"
+#define NPY_USHORT_FMT "hu"
+#define NPY_INT_FMT "d"
+#define NPY_UINT_FMT "u"
+#define NPY_LONG_FMT "ld"
+#define NPY_ULONG_FMT "lu"
+#define NPY_HALF_FMT "g"
+#define NPY_FLOAT_FMT "g"
+#define NPY_DOUBLE_FMT "g"
+
+
+#ifdef PY_LONG_LONG
+typedef PY_LONG_LONG npy_longlong;
+typedef unsigned PY_LONG_LONG npy_ulonglong;
+#  ifdef _MSC_VER
+#    define NPY_LONGLONG_FMT         "I64d"
+#    define NPY_ULONGLONG_FMT        "I64u"
+#  else
+#    define NPY_LONGLONG_FMT         "lld"
+#    define NPY_ULONGLONG_FMT        "llu"
+#  endif
+#  ifdef _MSC_VER
+#    define NPY_LONGLONG_SUFFIX(x)   (x##i64)
+#    define NPY_ULONGLONG_SUFFIX(x)  (x##Ui64)
+#  else
+#    define NPY_LONGLONG_SUFFIX(x)   (x##LL)
+#    define NPY_ULONGLONG_SUFFIX(x)  (x##ULL)
+#  endif
+#else
+typedef long npy_longlong;
+typedef unsigned long npy_ulonglong;
+#  define NPY_LONGLONG_SUFFIX(x)  (x##L)
+#  define NPY_ULONGLONG_SUFFIX(x) (x##UL)
+#endif
+
+
+typedef unsigned char npy_bool;
+#define NPY_FALSE 0
+#define NPY_TRUE 1
+/*
+ * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double).
+ * In some certain cases, it may forced to be equal to sizeof(double)
+ * even against the compiler implementation and the same goes for
+ * `complex long double`.
+ *
+ * Therefore, avoid `long double`, use `npy_longdouble` instead,
+ * and when it comes to standard math functions make sure of using
+ * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`.
+ * For example:
+ *   npy_longdouble *ptr, x;
+ *   #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ *       npy_longdouble r = modf(x, ptr);
+ *   #else
+ *       npy_longdouble r = modfl(x, ptr);
+ *   #endif
+ *
+ * See https://github.com/numpy/numpy/issues/20348
+ */
+#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+    #define NPY_LONGDOUBLE_FMT "g"
+    typedef double npy_longdouble;
+#else
+    #define NPY_LONGDOUBLE_FMT "Lg"
+    typedef long double npy_longdouble;
+#endif
+
+#ifndef Py_USING_UNICODE
+#error Must use Python with unicode enabled.
+#endif
+
+
+typedef signed char npy_byte;
+typedef unsigned char npy_ubyte;
+typedef unsigned short npy_ushort;
+typedef unsigned int npy_uint;
+typedef unsigned long npy_ulong;
+
+/* These are for completeness */
+typedef char npy_char;
+typedef short npy_short;
+typedef int npy_int;
+typedef long npy_long;
+typedef float npy_float;
+typedef double npy_double;
+
+typedef Py_hash_t npy_hash_t;
+#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
+
+/*
+ * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being
+ * able to do .real/.imag. Will have to convert code first.
+ */
+#if 0
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE)
+typedef complex npy_cdouble;
+#else
+typedef struct { double real, imag; } npy_cdouble;
+#endif
+
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT)
+typedef complex float npy_cfloat;
+#else
+typedef struct { float real, imag; } npy_cfloat;
+#endif
+
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE)
+typedef complex long double npy_clongdouble;
+#else
+typedef struct {npy_longdouble real, imag;} npy_clongdouble;
+#endif
+#endif
+#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE
+#error npy_cdouble definition is not compatible with C99 complex definition ! \
+        Please contact NumPy maintainers and give detailed information about your \
+        compiler and platform
+#endif
+typedef struct { double real, imag; } npy_cdouble;
+
+#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT
+#error npy_cfloat definition is not compatible with C99 complex definition ! \
+        Please contact NumPy maintainers and give detailed information about your \
+        compiler and platform
+#endif
+typedef struct { float real, imag; } npy_cfloat;
+
+#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE
+#error npy_clongdouble definition is not compatible with C99 complex definition ! \
+        Please contact NumPy maintainers and give detailed information about your \
+        compiler and platform
+#endif
+typedef struct { npy_longdouble real, imag; } npy_clongdouble;
+
+/*
+ * numarray-style bit-width typedefs
+ */
+#define NPY_MAX_INT8 127
+#define NPY_MIN_INT8 -128
+#define NPY_MAX_UINT8 255
+#define NPY_MAX_INT16 32767
+#define NPY_MIN_INT16 -32768
+#define NPY_MAX_UINT16 65535
+#define NPY_MAX_INT32 2147483647
+#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1)
+#define NPY_MAX_UINT32 4294967295U
+#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807)
+#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615)
+#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
+#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
+#define NPY_MIN_DATETIME NPY_MIN_INT64
+#define NPY_MAX_DATETIME NPY_MAX_INT64
+#define NPY_MIN_TIMEDELTA NPY_MIN_INT64
+#define NPY_MAX_TIMEDELTA NPY_MAX_INT64
+
+        /* Need to find the number of bits for each type and
+           make definitions accordingly.
+
+           C states that sizeof(char) == 1 by definition
+
+           So, just using the sizeof keyword won't help.
+
+           It also looks like Python itself uses sizeof(char) quite a
+           bit, which by definition should be 1 all the time.
+
+           Idea: Make Use of CHAR_BIT which should tell us how many
+           BITS per CHARACTER
+        */
+
+        /* Include platform definitions -- These are in the C89/90 standard */
+#include 
+#define NPY_MAX_BYTE SCHAR_MAX
+#define NPY_MIN_BYTE SCHAR_MIN
+#define NPY_MAX_UBYTE UCHAR_MAX
+#define NPY_MAX_SHORT SHRT_MAX
+#define NPY_MIN_SHORT SHRT_MIN
+#define NPY_MAX_USHORT USHRT_MAX
+#define NPY_MAX_INT   INT_MAX
+#ifndef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#endif
+#define NPY_MIN_INT   INT_MIN
+#define NPY_MAX_UINT  UINT_MAX
+#define NPY_MAX_LONG  LONG_MAX
+#define NPY_MIN_LONG  LONG_MIN
+#define NPY_MAX_ULONG  ULONG_MAX
+
+#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)
+#define NPY_BITSOF_CHAR CHAR_BIT
+#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)
+#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT)
+#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT)
+#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT)
+#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT)
+#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT)
+#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT)
+#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT)
+#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT)
+#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT)
+#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT)
+#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT)
+
+#if NPY_BITSOF_LONG == 8
+#define NPY_INT8 NPY_LONG
+#define NPY_UINT8 NPY_ULONG
+        typedef long npy_int8;
+        typedef unsigned long npy_uint8;
+#define PyInt8ScalarObject PyLongScalarObject
+#define PyInt8ArrType_Type PyLongArrType_Type
+#define PyUInt8ScalarObject PyULongScalarObject
+#define PyUInt8ArrType_Type PyULongArrType_Type
+#define NPY_INT8_FMT NPY_LONG_FMT
+#define NPY_UINT8_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 16
+#define NPY_INT16 NPY_LONG
+#define NPY_UINT16 NPY_ULONG
+        typedef long npy_int16;
+        typedef unsigned long npy_uint16;
+#define PyInt16ScalarObject PyLongScalarObject
+#define PyInt16ArrType_Type PyLongArrType_Type
+#define PyUInt16ScalarObject PyULongScalarObject
+#define PyUInt16ArrType_Type PyULongArrType_Type
+#define NPY_INT16_FMT NPY_LONG_FMT
+#define NPY_UINT16_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 32
+#define NPY_INT32 NPY_LONG
+#define NPY_UINT32 NPY_ULONG
+        typedef long npy_int32;
+        typedef unsigned long npy_uint32;
+        typedef unsigned long npy_ucs4;
+#define PyInt32ScalarObject PyLongScalarObject
+#define PyInt32ArrType_Type PyLongArrType_Type
+#define PyUInt32ScalarObject PyULongScalarObject
+#define PyUInt32ArrType_Type PyULongArrType_Type
+#define NPY_INT32_FMT NPY_LONG_FMT
+#define NPY_UINT32_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 64
+#define NPY_INT64 NPY_LONG
+#define NPY_UINT64 NPY_ULONG
+        typedef long npy_int64;
+        typedef unsigned long npy_uint64;
+#define PyInt64ScalarObject PyLongScalarObject
+#define PyInt64ArrType_Type PyLongArrType_Type
+#define PyUInt64ScalarObject PyULongScalarObject
+#define PyUInt64ArrType_Type PyULongArrType_Type
+#define NPY_INT64_FMT NPY_LONG_FMT
+#define NPY_UINT64_FMT NPY_ULONG_FMT
+#define MyPyLong_FromInt64 PyLong_FromLong
+#define MyPyLong_AsInt64 PyLong_AsLong
+#elif NPY_BITSOF_LONG == 128
+#define NPY_INT128 NPY_LONG
+#define NPY_UINT128 NPY_ULONG
+        typedef long npy_int128;
+        typedef unsigned long npy_uint128;
+#define PyInt128ScalarObject PyLongScalarObject
+#define PyInt128ArrType_Type PyLongArrType_Type
+#define PyUInt128ScalarObject PyULongScalarObject
+#define PyUInt128ArrType_Type PyULongArrType_Type
+#define NPY_INT128_FMT NPY_LONG_FMT
+#define NPY_UINT128_FMT NPY_ULONG_FMT
+#endif
+
+#if NPY_BITSOF_LONGLONG == 8
+#  ifndef NPY_INT8
+#    define NPY_INT8 NPY_LONGLONG
+#    define NPY_UINT8 NPY_ULONGLONG
+        typedef npy_longlong npy_int8;
+        typedef npy_ulonglong npy_uint8;
+#    define PyInt8ScalarObject PyLongLongScalarObject
+#    define PyInt8ArrType_Type PyLongLongArrType_Type
+#    define PyUInt8ScalarObject PyULongLongScalarObject
+#    define PyUInt8ArrType_Type PyULongLongArrType_Type
+#define NPY_INT8_FMT NPY_LONGLONG_FMT
+#define NPY_UINT8_FMT NPY_ULONGLONG_FMT
+#  endif
+#  define NPY_MAX_LONGLONG NPY_MAX_INT8
+#  define NPY_MIN_LONGLONG NPY_MIN_INT8
+#  define NPY_MAX_ULONGLONG NPY_MAX_UINT8
+#elif NPY_BITSOF_LONGLONG == 16
+#  ifndef NPY_INT16
+#    define NPY_INT16 NPY_LONGLONG
+#    define NPY_UINT16 NPY_ULONGLONG
+        typedef npy_longlong npy_int16;
+        typedef npy_ulonglong npy_uint16;
+#    define PyInt16ScalarObject PyLongLongScalarObject
+#    define PyInt16ArrType_Type PyLongLongArrType_Type
+#    define PyUInt16ScalarObject PyULongLongScalarObject
+#    define PyUInt16ArrType_Type PyULongLongArrType_Type
+#define NPY_INT16_FMT NPY_LONGLONG_FMT
+#define NPY_UINT16_FMT NPY_ULONGLONG_FMT
+#  endif
+#  define NPY_MAX_LONGLONG NPY_MAX_INT16
+#  define NPY_MIN_LONGLONG NPY_MIN_INT16
+#  define NPY_MAX_ULONGLONG NPY_MAX_UINT16
+#elif NPY_BITSOF_LONGLONG == 32
+#  ifndef NPY_INT32
+#    define NPY_INT32 NPY_LONGLONG
+#    define NPY_UINT32 NPY_ULONGLONG
+        typedef npy_longlong npy_int32;
+        typedef npy_ulonglong npy_uint32;
+        typedef npy_ulonglong npy_ucs4;
+#    define PyInt32ScalarObject PyLongLongScalarObject
+#    define PyInt32ArrType_Type PyLongLongArrType_Type
+#    define PyUInt32ScalarObject PyULongLongScalarObject
+#    define PyUInt32ArrType_Type PyULongLongArrType_Type
+#define NPY_INT32_FMT NPY_LONGLONG_FMT
+#define NPY_UINT32_FMT NPY_ULONGLONG_FMT
+#  endif
+#  define NPY_MAX_LONGLONG NPY_MAX_INT32
+#  define NPY_MIN_LONGLONG NPY_MIN_INT32
+#  define NPY_MAX_ULONGLONG NPY_MAX_UINT32
+#elif NPY_BITSOF_LONGLONG == 64
+#  ifndef NPY_INT64
+#    define NPY_INT64 NPY_LONGLONG
+#    define NPY_UINT64 NPY_ULONGLONG
+        typedef npy_longlong npy_int64;
+        typedef npy_ulonglong npy_uint64;
+#    define PyInt64ScalarObject PyLongLongScalarObject
+#    define PyInt64ArrType_Type PyLongLongArrType_Type
+#    define PyUInt64ScalarObject PyULongLongScalarObject
+#    define PyUInt64ArrType_Type PyULongLongArrType_Type
+#define NPY_INT64_FMT NPY_LONGLONG_FMT
+#define NPY_UINT64_FMT NPY_ULONGLONG_FMT
+#    define MyPyLong_FromInt64 PyLong_FromLongLong
+#    define MyPyLong_AsInt64 PyLong_AsLongLong
+#  endif
+#  define NPY_MAX_LONGLONG NPY_MAX_INT64
+#  define NPY_MIN_LONGLONG NPY_MIN_INT64
+#  define NPY_MAX_ULONGLONG NPY_MAX_UINT64
+#elif NPY_BITSOF_LONGLONG == 128
+#  ifndef NPY_INT128
+#    define NPY_INT128 NPY_LONGLONG
+#    define NPY_UINT128 NPY_ULONGLONG
+        typedef npy_longlong npy_int128;
+        typedef npy_ulonglong npy_uint128;
+#    define PyInt128ScalarObject PyLongLongScalarObject
+#    define PyInt128ArrType_Type PyLongLongArrType_Type
+#    define PyUInt128ScalarObject PyULongLongScalarObject
+#    define PyUInt128ArrType_Type PyULongLongArrType_Type
+#define NPY_INT128_FMT NPY_LONGLONG_FMT
+#define NPY_UINT128_FMT NPY_ULONGLONG_FMT
+#  endif
+#  define NPY_MAX_LONGLONG NPY_MAX_INT128
+#  define NPY_MIN_LONGLONG NPY_MIN_INT128
+#  define NPY_MAX_ULONGLONG NPY_MAX_UINT128
+#elif NPY_BITSOF_LONGLONG == 256
+#  define NPY_INT256 NPY_LONGLONG
+#  define NPY_UINT256 NPY_ULONGLONG
+        typedef npy_longlong npy_int256;
+        typedef npy_ulonglong npy_uint256;
+#  define PyInt256ScalarObject PyLongLongScalarObject
+#  define PyInt256ArrType_Type PyLongLongArrType_Type
+#  define PyUInt256ScalarObject PyULongLongScalarObject
+#  define PyUInt256ArrType_Type PyULongLongArrType_Type
+#define NPY_INT256_FMT NPY_LONGLONG_FMT
+#define NPY_UINT256_FMT NPY_ULONGLONG_FMT
+#  define NPY_MAX_LONGLONG NPY_MAX_INT256
+#  define NPY_MIN_LONGLONG NPY_MIN_INT256
+#  define NPY_MAX_ULONGLONG NPY_MAX_UINT256
+#endif
+
+#if NPY_BITSOF_INT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_INT
+#define NPY_UINT8 NPY_UINT
+        typedef int npy_int8;
+        typedef unsigned int npy_uint8;
+#    define PyInt8ScalarObject PyIntScalarObject
+#    define PyInt8ArrType_Type PyIntArrType_Type
+#    define PyUInt8ScalarObject PyUIntScalarObject
+#    define PyUInt8ArrType_Type PyUIntArrType_Type
+#define NPY_INT8_FMT NPY_INT_FMT
+#define NPY_UINT8_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_INT
+#define NPY_UINT16 NPY_UINT
+        typedef int npy_int16;
+        typedef unsigned int npy_uint16;
+#    define PyInt16ScalarObject PyIntScalarObject
+#    define PyInt16ArrType_Type PyIntArrType_Type
+#    define PyUInt16ScalarObject PyIntUScalarObject
+#    define PyUInt16ArrType_Type PyIntUArrType_Type
+#define NPY_INT16_FMT NPY_INT_FMT
+#define NPY_UINT16_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_INT
+#define NPY_UINT32 NPY_UINT
+        typedef int npy_int32;
+        typedef unsigned int npy_uint32;
+        typedef unsigned int npy_ucs4;
+#    define PyInt32ScalarObject PyIntScalarObject
+#    define PyInt32ArrType_Type PyIntArrType_Type
+#    define PyUInt32ScalarObject PyUIntScalarObject
+#    define PyUInt32ArrType_Type PyUIntArrType_Type
+#define NPY_INT32_FMT NPY_INT_FMT
+#define NPY_UINT32_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_INT
+#define NPY_UINT64 NPY_UINT
+        typedef int npy_int64;
+        typedef unsigned int npy_uint64;
+#    define PyInt64ScalarObject PyIntScalarObject
+#    define PyInt64ArrType_Type PyIntArrType_Type
+#    define PyUInt64ScalarObject PyUIntScalarObject
+#    define PyUInt64ArrType_Type PyUIntArrType_Type
+#define NPY_INT64_FMT NPY_INT_FMT
+#define NPY_UINT64_FMT NPY_UINT_FMT
+#    define MyPyLong_FromInt64 PyLong_FromLong
+#    define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_INT == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_INT
+#define NPY_UINT128 NPY_UINT
+        typedef int npy_int128;
+        typedef unsigned int npy_uint128;
+#    define PyInt128ScalarObject PyIntScalarObject
+#    define PyInt128ArrType_Type PyIntArrType_Type
+#    define PyUInt128ScalarObject PyUIntScalarObject
+#    define PyUInt128ArrType_Type PyUIntArrType_Type
+#define NPY_INT128_FMT NPY_INT_FMT
+#define NPY_UINT128_FMT NPY_UINT_FMT
+#endif
+#endif
+
+#if NPY_BITSOF_SHORT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_SHORT
+#define NPY_UINT8 NPY_USHORT
+        typedef short npy_int8;
+        typedef unsigned short npy_uint8;
+#    define PyInt8ScalarObject PyShortScalarObject
+#    define PyInt8ArrType_Type PyShortArrType_Type
+#    define PyUInt8ScalarObject PyUShortScalarObject
+#    define PyUInt8ArrType_Type PyUShortArrType_Type
+#define NPY_INT8_FMT NPY_SHORT_FMT
+#define NPY_UINT8_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_SHORT
+#define NPY_UINT16 NPY_USHORT
+        typedef short npy_int16;
+        typedef unsigned short npy_uint16;
+#    define PyInt16ScalarObject PyShortScalarObject
+#    define PyInt16ArrType_Type PyShortArrType_Type
+#    define PyUInt16ScalarObject PyUShortScalarObject
+#    define PyUInt16ArrType_Type PyUShortArrType_Type
+#define NPY_INT16_FMT NPY_SHORT_FMT
+#define NPY_UINT16_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_SHORT
+#define NPY_UINT32 NPY_USHORT
+        typedef short npy_int32;
+        typedef unsigned short npy_uint32;
+        typedef unsigned short npy_ucs4;
+#    define PyInt32ScalarObject PyShortScalarObject
+#    define PyInt32ArrType_Type PyShortArrType_Type
+#    define PyUInt32ScalarObject PyUShortScalarObject
+#    define PyUInt32ArrType_Type PyUShortArrType_Type
+#define NPY_INT32_FMT NPY_SHORT_FMT
+#define NPY_UINT32_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_SHORT
+#define NPY_UINT64 NPY_USHORT
+        typedef short npy_int64;
+        typedef unsigned short npy_uint64;
+#    define PyInt64ScalarObject PyShortScalarObject
+#    define PyInt64ArrType_Type PyShortArrType_Type
+#    define PyUInt64ScalarObject PyUShortScalarObject
+#    define PyUInt64ArrType_Type PyUShortArrType_Type
+#define NPY_INT64_FMT NPY_SHORT_FMT
+#define NPY_UINT64_FMT NPY_USHORT_FMT
+#    define MyPyLong_FromInt64 PyLong_FromLong
+#    define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_SHORT == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_SHORT
+#define NPY_UINT128 NPY_USHORT
+        typedef short npy_int128;
+        typedef unsigned short npy_uint128;
+#    define PyInt128ScalarObject PyShortScalarObject
+#    define PyInt128ArrType_Type PyShortArrType_Type
+#    define PyUInt128ScalarObject PyUShortScalarObject
+#    define PyUInt128ArrType_Type PyUShortArrType_Type
+#define NPY_INT128_FMT NPY_SHORT_FMT
+#define NPY_UINT128_FMT NPY_USHORT_FMT
+#endif
+#endif
+
+
+#if NPY_BITSOF_CHAR == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_BYTE
+#define NPY_UINT8 NPY_UBYTE
+        typedef signed char npy_int8;
+        typedef unsigned char npy_uint8;
+#    define PyInt8ScalarObject PyByteScalarObject
+#    define PyInt8ArrType_Type PyByteArrType_Type
+#    define PyUInt8ScalarObject PyUByteScalarObject
+#    define PyUInt8ArrType_Type PyUByteArrType_Type
+#define NPY_INT8_FMT NPY_BYTE_FMT
+#define NPY_UINT8_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_BYTE
+#define NPY_UINT16 NPY_UBYTE
+        typedef signed char npy_int16;
+        typedef unsigned char npy_uint16;
+#    define PyInt16ScalarObject PyByteScalarObject
+#    define PyInt16ArrType_Type PyByteArrType_Type
+#    define PyUInt16ScalarObject PyUByteScalarObject
+#    define PyUInt16ArrType_Type PyUByteArrType_Type
+#define NPY_INT16_FMT NPY_BYTE_FMT
+#define NPY_UINT16_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_BYTE
+#define NPY_UINT32 NPY_UBYTE
+        typedef signed char npy_int32;
+        typedef unsigned char npy_uint32;
+        typedef unsigned char npy_ucs4;
+#    define PyInt32ScalarObject PyByteScalarObject
+#    define PyInt32ArrType_Type PyByteArrType_Type
+#    define PyUInt32ScalarObject PyUByteScalarObject
+#    define PyUInt32ArrType_Type PyUByteArrType_Type
+#define NPY_INT32_FMT NPY_BYTE_FMT
+#define NPY_UINT32_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_BYTE
+#define NPY_UINT64 NPY_UBYTE
+        typedef signed char npy_int64;
+        typedef unsigned char npy_uint64;
+#    define PyInt64ScalarObject PyByteScalarObject
+#    define PyInt64ArrType_Type PyByteArrType_Type
+#    define PyUInt64ScalarObject PyUByteScalarObject
+#    define PyUInt64ArrType_Type PyUByteArrType_Type
+#define NPY_INT64_FMT NPY_BYTE_FMT
+#define NPY_UINT64_FMT NPY_UBYTE_FMT
+#    define MyPyLong_FromInt64 PyLong_FromLong
+#    define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_CHAR == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_BYTE
+#define NPY_UINT128 NPY_UBYTE
+        typedef signed char npy_int128;
+        typedef unsigned char npy_uint128;
+#    define PyInt128ScalarObject PyByteScalarObject
+#    define PyInt128ArrType_Type PyByteArrType_Type
+#    define PyUInt128ScalarObject PyUByteScalarObject
+#    define PyUInt128ArrType_Type PyUByteArrType_Type
+#define NPY_INT128_FMT NPY_BYTE_FMT
+#define NPY_UINT128_FMT NPY_UBYTE_FMT
+#endif
+#endif
+
+
+
+#if NPY_BITSOF_DOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_DOUBLE
+#define NPY_COMPLEX64 NPY_CDOUBLE
+        typedef double npy_float32;
+        typedef npy_cdouble npy_complex64;
+#    define PyFloat32ScalarObject PyDoubleScalarObject
+#    define PyComplex64ScalarObject PyCDoubleScalarObject
+#    define PyFloat32ArrType_Type PyDoubleArrType_Type
+#    define PyComplex64ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_DOUBLE
+#define NPY_COMPLEX128 NPY_CDOUBLE
+        typedef double npy_float64;
+        typedef npy_cdouble npy_complex128;
+#    define PyFloat64ScalarObject PyDoubleScalarObject
+#    define PyComplex128ScalarObject PyCDoubleScalarObject
+#    define PyFloat64ArrType_Type PyDoubleArrType_Type
+#    define PyComplex128ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_DOUBLE
+#define NPY_COMPLEX160 NPY_CDOUBLE
+        typedef double npy_float80;
+        typedef npy_cdouble npy_complex160;
+#    define PyFloat80ScalarObject PyDoubleScalarObject
+#    define PyComplex160ScalarObject PyCDoubleScalarObject
+#    define PyFloat80ArrType_Type PyDoubleArrType_Type
+#    define PyComplex160ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_DOUBLE
+#define NPY_COMPLEX192 NPY_CDOUBLE
+        typedef double npy_float96;
+        typedef npy_cdouble npy_complex192;
+#    define PyFloat96ScalarObject PyDoubleScalarObject
+#    define PyComplex192ScalarObject PyCDoubleScalarObject
+#    define PyFloat96ArrType_Type PyDoubleArrType_Type
+#    define PyComplex192ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_DOUBLE
+#define NPY_COMPLEX256 NPY_CDOUBLE
+        typedef double npy_float128;
+        typedef npy_cdouble npy_complex256;
+#    define PyFloat128ScalarObject PyDoubleScalarObject
+#    define PyComplex256ScalarObject PyCDoubleScalarObject
+#    define PyFloat128ArrType_Type PyDoubleArrType_Type
+#    define PyComplex256ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT
+#endif
+#endif
+
+
+
+#if NPY_BITSOF_FLOAT == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_FLOAT
+#define NPY_COMPLEX64 NPY_CFLOAT
+        typedef float npy_float32;
+        typedef npy_cfloat npy_complex64;
+#    define PyFloat32ScalarObject PyFloatScalarObject
+#    define PyComplex64ScalarObject PyCFloatScalarObject
+#    define PyFloat32ArrType_Type PyFloatArrType_Type
+#    define PyComplex64ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT32_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_FLOAT
+#define NPY_COMPLEX128 NPY_CFLOAT
+        typedef float npy_float64;
+        typedef npy_cfloat npy_complex128;
+#    define PyFloat64ScalarObject PyFloatScalarObject
+#    define PyComplex128ScalarObject PyCFloatScalarObject
+#    define PyFloat64ArrType_Type PyFloatArrType_Type
+#    define PyComplex128ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT64_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_FLOAT
+#define NPY_COMPLEX160 NPY_CFLOAT
+        typedef float npy_float80;
+        typedef npy_cfloat npy_complex160;
+#    define PyFloat80ScalarObject PyFloatScalarObject
+#    define PyComplex160ScalarObject PyCFloatScalarObject
+#    define PyFloat80ArrType_Type PyFloatArrType_Type
+#    define PyComplex160ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT80_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_FLOAT
+#define NPY_COMPLEX192 NPY_CFLOAT
+        typedef float npy_float96;
+        typedef npy_cfloat npy_complex192;
+#    define PyFloat96ScalarObject PyFloatScalarObject
+#    define PyComplex192ScalarObject PyCFloatScalarObject
+#    define PyFloat96ArrType_Type PyFloatArrType_Type
+#    define PyComplex192ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT96_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_FLOAT
+#define NPY_COMPLEX256 NPY_CFLOAT
+        typedef float npy_float128;
+        typedef npy_cfloat npy_complex256;
+#    define PyFloat128ScalarObject PyFloatScalarObject
+#    define PyComplex256ScalarObject PyCFloatScalarObject
+#    define PyFloat128ArrType_Type PyFloatArrType_Type
+#    define PyComplex256ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT128_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT
+#endif
+#endif
+
+/* half/float16 isn't a floating-point type in C */
+#define NPY_FLOAT16 NPY_HALF
+typedef npy_uint16 npy_half;
+typedef npy_half npy_float16;
+
+#if NPY_BITSOF_LONGDOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_LONGDOUBLE
+#define NPY_COMPLEX64 NPY_CLONGDOUBLE
+        typedef npy_longdouble npy_float32;
+        typedef npy_clongdouble npy_complex64;
+#    define PyFloat32ScalarObject PyLongDoubleScalarObject
+#    define PyComplex64ScalarObject PyCLongDoubleScalarObject
+#    define PyFloat32ArrType_Type PyLongDoubleArrType_Type
+#    define PyComplex64ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_LONGDOUBLE
+#define NPY_COMPLEX128 NPY_CLONGDOUBLE
+        typedef npy_longdouble npy_float64;
+        typedef npy_clongdouble npy_complex128;
+#    define PyFloat64ScalarObject PyLongDoubleScalarObject
+#    define PyComplex128ScalarObject PyCLongDoubleScalarObject
+#    define PyFloat64ArrType_Type PyLongDoubleArrType_Type
+#    define PyComplex128ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_LONGDOUBLE
+#define NPY_COMPLEX160 NPY_CLONGDOUBLE
+        typedef npy_longdouble npy_float80;
+        typedef npy_clongdouble npy_complex160;
+#    define PyFloat80ScalarObject PyLongDoubleScalarObject
+#    define PyComplex160ScalarObject PyCLongDoubleScalarObject
+#    define PyFloat80ArrType_Type PyLongDoubleArrType_Type
+#    define PyComplex160ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_LONGDOUBLE
+#define NPY_COMPLEX192 NPY_CLONGDOUBLE
+        typedef npy_longdouble npy_float96;
+        typedef npy_clongdouble npy_complex192;
+#    define PyFloat96ScalarObject PyLongDoubleScalarObject
+#    define PyComplex192ScalarObject PyCLongDoubleScalarObject
+#    define PyFloat96ArrType_Type PyLongDoubleArrType_Type
+#    define PyComplex192ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_LONGDOUBLE
+#define NPY_COMPLEX256 NPY_CLONGDOUBLE
+        typedef npy_longdouble npy_float128;
+        typedef npy_clongdouble npy_complex256;
+#    define PyFloat128ScalarObject PyLongDoubleScalarObject
+#    define PyComplex256ScalarObject PyCLongDoubleScalarObject
+#    define PyFloat128ArrType_Type PyLongDoubleArrType_Type
+#    define PyComplex256ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 256
+#define NPY_FLOAT256 NPY_LONGDOUBLE
+#define NPY_COMPLEX512 NPY_CLONGDOUBLE
+        typedef npy_longdouble npy_float256;
+        typedef npy_clongdouble npy_complex512;
+#    define PyFloat256ScalarObject PyLongDoubleScalarObject
+#    define PyComplex512ScalarObject PyCLongDoubleScalarObject
+#    define PyFloat256ArrType_Type PyLongDoubleArrType_Type
+#    define PyComplex512ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT
+#endif
+
+/* datetime typedefs */
+typedef npy_int64 npy_timedelta;
+typedef npy_int64 npy_datetime;
+#define NPY_DATETIME_FMT NPY_INT64_FMT
+#define NPY_TIMEDELTA_FMT NPY_INT64_FMT
+
+/* End of typedefs for numarray style bit-width names */
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h
new file mode 100644
index 00000000..a19f8e6b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h
@@ -0,0 +1,129 @@
+/*
+ * This set (target) cpu specific macros:
+ *      - Possible values:
+ *              NPY_CPU_X86
+ *              NPY_CPU_AMD64
+ *              NPY_CPU_PPC
+ *              NPY_CPU_PPC64
+ *              NPY_CPU_PPC64LE
+ *              NPY_CPU_SPARC
+ *              NPY_CPU_S390
+ *              NPY_CPU_IA64
+ *              NPY_CPU_HPPA
+ *              NPY_CPU_ALPHA
+ *              NPY_CPU_ARMEL
+ *              NPY_CPU_ARMEB
+ *              NPY_CPU_SH_LE
+ *              NPY_CPU_SH_BE
+ *              NPY_CPU_ARCEL
+ *              NPY_CPU_ARCEB
+ *              NPY_CPU_RISCV64
+ *              NPY_CPU_LOONGARCH
+ *              NPY_CPU_WASM
+ */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
+
+#include "numpyconfig.h"
+
+#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
+    /*
+     * __i386__ is defined by gcc and Intel compiler on Linux,
+     * _M_IX86 by VS compiler,
+     * i386 by Sun compilers on opensolaris at least
+     */
+    #define NPY_CPU_X86
+#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
+    /*
+     * both __x86_64__ and __amd64__ are defined by gcc
+     * __x86_64 defined by sun compiler on opensolaris at least
+     * _M_AMD64 defined by MS compiler
+     */
+    #define NPY_CPU_AMD64
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+    #define NPY_CPU_PPC64LE
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+    #define NPY_CPU_PPC64
+#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
+    /*
+     * __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
+     * but can't find it ATM
+     * _ARCH_PPC is used by at least gcc on AIX
+     * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
+     * for those specifically first before defaulting to ppc
+     */
+    #define NPY_CPU_PPC
+#elif defined(__sparc__) || defined(__sparc)
+    /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
+    #define NPY_CPU_SPARC
+#elif defined(__s390__)
+    #define NPY_CPU_S390
+#elif defined(__ia64)
+    #define NPY_CPU_IA64
+#elif defined(__hppa)
+    #define NPY_CPU_HPPA
+#elif defined(__alpha__)
+    #define NPY_CPU_ALPHA
+#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)
+    /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */
+    #if defined(__ARMEB__) || defined(__AARCH64EB__)
+        #if defined(__ARM_32BIT_STATE)
+            #define NPY_CPU_ARMEB_AARCH32
+        #elif defined(__ARM_64BIT_STATE)
+            #define NPY_CPU_ARMEB_AARCH64
+        #else
+            #define NPY_CPU_ARMEB
+        #endif
+    #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
+        #if defined(__ARM_32BIT_STATE)
+            #define NPY_CPU_ARMEL_AARCH32
+        #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
+            #define NPY_CPU_ARMEL_AARCH64
+        #else
+            #define NPY_CPU_ARMEL
+        #endif
+    #else
+        # error Unknown ARM CPU, please report this to numpy maintainers with \
+	information about your platform (OS, CPU and compiler)
+    #endif
+#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
+    #define NPY_CPU_SH_LE
+#elif defined(__sh__) && defined(__BIG_ENDIAN__)
+    #define NPY_CPU_SH_BE
+#elif defined(__MIPSEL__)
+    #define NPY_CPU_MIPSEL
+#elif defined(__MIPSEB__)
+    #define NPY_CPU_MIPSEB
+#elif defined(__or1k__)
+    #define NPY_CPU_OR1K
+#elif defined(__mc68000__)
+    #define NPY_CPU_M68K
+#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
+    #define NPY_CPU_ARCEL
+#elif defined(__arc__) && defined(__BIG_ENDIAN__)
+    #define NPY_CPU_ARCEB
+#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
+    #define NPY_CPU_RISCV64
+#elif defined(__loongarch__)
+    #define NPY_CPU_LOONGARCH
+#elif defined(__EMSCRIPTEN__)
+    /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
+    #define NPY_CPU_WASM
+#else
+    #error Unknown CPU, please report this to numpy maintainers with \
+    information about your platform (OS, CPU and compiler)
+#endif
+
+/*
+ * Except for the following architectures, memory access is limited to the natural
+ * alignment of data types otherwise it may lead to bus error or performance regression.
+ * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt.
+*/
+#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__)
+    #define NPY_ALIGNMENT_REQUIRED 0
+#endif
+#ifndef NPY_ALIGNMENT_REQUIRED
+    #define NPY_ALIGNMENT_REQUIRED 1
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h
new file mode 100644
index 00000000..5e58a7f5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h
@@ -0,0 +1,77 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
+
+/*
+ * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
+ * endian.h
+ */
+
+#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H)
+    /* Use endian.h if available */
+
+    #if defined(NPY_HAVE_ENDIAN_H)
+    #include 
+    #elif defined(NPY_HAVE_SYS_ENDIAN_H)
+    #include 
+    #endif
+
+    #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN)
+        #define NPY_BYTE_ORDER    BYTE_ORDER
+        #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN
+        #define NPY_BIG_ENDIAN    BIG_ENDIAN
+    #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)
+        #define NPY_BYTE_ORDER    _BYTE_ORDER
+        #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN
+        #define NPY_BIG_ENDIAN    _BIG_ENDIAN
+    #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+        #define NPY_BYTE_ORDER    __BYTE_ORDER
+        #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
+        #define NPY_BIG_ENDIAN    __BIG_ENDIAN
+    #endif
+#endif
+
+#ifndef NPY_BYTE_ORDER
+    /* Set endianness info using target CPU */
+    #include "npy_cpu.h"
+
+    #define NPY_LITTLE_ENDIAN 1234
+    #define NPY_BIG_ENDIAN 4321
+
+    #if defined(NPY_CPU_X86)                  \
+            || defined(NPY_CPU_AMD64)         \
+            || defined(NPY_CPU_IA64)          \
+            || defined(NPY_CPU_ALPHA)         \
+            || defined(NPY_CPU_ARMEL)         \
+            || defined(NPY_CPU_ARMEL_AARCH32) \
+            || defined(NPY_CPU_ARMEL_AARCH64) \
+            || defined(NPY_CPU_SH_LE)         \
+            || defined(NPY_CPU_MIPSEL)        \
+            || defined(NPY_CPU_PPC64LE)       \
+            || defined(NPY_CPU_ARCEL)         \
+            || defined(NPY_CPU_RISCV64)       \
+            || defined(NPY_CPU_LOONGARCH)     \
+            || defined(NPY_CPU_WASM)
+        #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
+
+    #elif defined(NPY_CPU_PPC)                \
+            || defined(NPY_CPU_SPARC)         \
+            || defined(NPY_CPU_S390)          \
+            || defined(NPY_CPU_HPPA)          \
+            || defined(NPY_CPU_PPC64)         \
+            || defined(NPY_CPU_ARMEB)         \
+            || defined(NPY_CPU_ARMEB_AARCH32) \
+            || defined(NPY_CPU_ARMEB_AARCH64) \
+            || defined(NPY_CPU_SH_BE)         \
+            || defined(NPY_CPU_MIPSEB)        \
+            || defined(NPY_CPU_OR1K)          \
+            || defined(NPY_CPU_M68K)          \
+            || defined(NPY_CPU_ARCEB)
+        #define NPY_BYTE_ORDER NPY_BIG_ENDIAN
+
+    #else
+        #error Unknown CPU: can not set endianness
+    #endif
+
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h
new file mode 100644
index 00000000..69a0374d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h
@@ -0,0 +1,56 @@
+/*
+ * This API is only provided because it is part of publicly exported
+ * headers. Its use is considered DEPRECATED, and it will be removed
+ * eventually.
+ * (This includes the _PyArray_SigintHandler and _PyArray_GetSigintBuf
+ * functions which are however, public API, and not headers.)
+ *
+ * Instead of using these non-threadsafe macros consider periodically
+ * querying `PyErr_CheckSignals()` or `PyOS_InterruptOccurred()` will work.
+ * Both of these require holding the GIL, although cpython could add a
+ * version of `PyOS_InterruptOccurred()` which does not. Such a version
+ * actually exists as private API in Python 3.10, and backported to 3.9 and 3.8,
+ * see also https://bugs.python.org/issue41037 and
+ * https://github.com/python/cpython/pull/20599).
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_
+
+#ifndef NPY_NO_SIGNAL
+
+#include 
+#include 
+
+#ifndef sigsetjmp
+
+#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1)
+#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2)
+#define NPY_SIGJMP_BUF jmp_buf
+
+#else
+
+#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2)
+#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2)
+#define NPY_SIGJMP_BUF sigjmp_buf
+
+#endif
+
+#    define NPY_SIGINT_ON {                                             \
+                   PyOS_sighandler_t _npy_sig_save;                     \
+                   _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \
+                   if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \
+                                 1) == 0) {                             \
+
+#    define NPY_SIGINT_OFF }                                      \
+        PyOS_setsig(SIGINT, _npy_sig_save);                       \
+        }
+
+#else  /* NPY_NO_SIGNAL  */
+
+#define NPY_SIGINT_ON
+#define NPY_SIGINT_OFF
+
+#endif  /* HAVE_SIGSETJMP */
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h
new file mode 100644
index 00000000..2fcd41eb
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h
@@ -0,0 +1,563 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
+
+#include 
+
+#include 
+
+/* By adding static inline specifiers to npy_math function definitions when
+   appropriate, compiler is given the opportunity to optimize */
+#if NPY_INLINE_MATH
+#define NPY_INPLACE static inline
+#else
+#define NPY_INPLACE
+#endif
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99
+ * for INFINITY)
+ *
+ * XXX: I should test whether INFINITY and NAN are available on the platform
+ */
+static inline float __npy_inff(void)
+{
+    const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
+    return __bint.__f;
+}
+
+static inline float __npy_nanf(void)
+{
+    const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
+    return __bint.__f;
+}
+
+static inline float __npy_pzerof(void)
+{
+    const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
+    return __bint.__f;
+}
+
+static inline float __npy_nzerof(void)
+{
+    const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
+    return __bint.__f;
+}
+
+#define NPY_INFINITYF __npy_inff()
+#define NPY_NANF __npy_nanf()
+#define NPY_PZEROF __npy_pzerof()
+#define NPY_NZEROF __npy_nzerof()
+
+#define NPY_INFINITY ((npy_double)NPY_INFINITYF)
+#define NPY_NAN ((npy_double)NPY_NANF)
+#define NPY_PZERO ((npy_double)NPY_PZEROF)
+#define NPY_NZERO ((npy_double)NPY_NZEROF)
+
+#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)
+#define NPY_NANL ((npy_longdouble)NPY_NANF)
+#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)
+#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)
+
+/*
+ * Useful constants
+ */
+#define NPY_E         2.718281828459045235360287471352662498  /* e */
+#define NPY_LOG2E     1.442695040888963407359924681001892137  /* log_2 e */
+#define NPY_LOG10E    0.434294481903251827651128918916605082  /* log_10 e */
+#define NPY_LOGE2     0.693147180559945309417232121458176568  /* log_e 2 */
+#define NPY_LOGE10    2.302585092994045684017991454684364208  /* log_e 10 */
+#define NPY_PI        3.141592653589793238462643383279502884  /* pi */
+#define NPY_PI_2      1.570796326794896619231321691639751442  /* pi/2 */
+#define NPY_PI_4      0.785398163397448309615660845819875721  /* pi/4 */
+#define NPY_1_PI      0.318309886183790671537767526745028724  /* 1/pi */
+#define NPY_2_PI      0.636619772367581343075535053490057448  /* 2/pi */
+#define NPY_EULER     0.577215664901532860606512090082402431  /* Euler constant */
+#define NPY_SQRT2     1.414213562373095048801688724209698079  /* sqrt(2) */
+#define NPY_SQRT1_2   0.707106781186547524400844362104849039  /* 1/sqrt(2) */
+
+#define NPY_Ef        2.718281828459045235360287471352662498F /* e */
+#define NPY_LOG2Ef    1.442695040888963407359924681001892137F /* log_2 e */
+#define NPY_LOG10Ef   0.434294481903251827651128918916605082F /* log_10 e */
+#define NPY_LOGE2f    0.693147180559945309417232121458176568F /* log_e 2 */
+#define NPY_LOGE10f   2.302585092994045684017991454684364208F /* log_e 10 */
+#define NPY_PIf       3.141592653589793238462643383279502884F /* pi */
+#define NPY_PI_2f     1.570796326794896619231321691639751442F /* pi/2 */
+#define NPY_PI_4f     0.785398163397448309615660845819875721F /* pi/4 */
+#define NPY_1_PIf     0.318309886183790671537767526745028724F /* 1/pi */
+#define NPY_2_PIf     0.636619772367581343075535053490057448F /* 2/pi */
+#define NPY_EULERf    0.577215664901532860606512090082402431F /* Euler constant */
+#define NPY_SQRT2f    1.414213562373095048801688724209698079F /* sqrt(2) */
+#define NPY_SQRT1_2f  0.707106781186547524400844362104849039F /* 1/sqrt(2) */
+
+#define NPY_El        2.718281828459045235360287471352662498L /* e */
+#define NPY_LOG2El    1.442695040888963407359924681001892137L /* log_2 e */
+#define NPY_LOG10El   0.434294481903251827651128918916605082L /* log_10 e */
+#define NPY_LOGE2l    0.693147180559945309417232121458176568L /* log_e 2 */
+#define NPY_LOGE10l   2.302585092994045684017991454684364208L /* log_e 10 */
+#define NPY_PIl       3.141592653589793238462643383279502884L /* pi */
+#define NPY_PI_2l     1.570796326794896619231321691639751442L /* pi/2 */
+#define NPY_PI_4l     0.785398163397448309615660845819875721L /* pi/4 */
+#define NPY_1_PIl     0.318309886183790671537767526745028724L /* 1/pi */
+#define NPY_2_PIl     0.636619772367581343075535053490057448L /* 2/pi */
+#define NPY_EULERl    0.577215664901532860606512090082402431L /* Euler constant */
+#define NPY_SQRT2l    1.414213562373095048801688724209698079L /* sqrt(2) */
+#define NPY_SQRT1_2l  0.707106781186547524400844362104849039L /* 1/sqrt(2) */
+
+/*
+ * Integer functions.
+ */
+NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a);
+NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a);
+NPY_INPLACE uint8_t npy_popcountu(npy_uint a);
+NPY_INPLACE uint8_t npy_popcountul(npy_ulong a);
+NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a);
+NPY_INPLACE uint8_t npy_popcounthh(npy_byte a);
+NPY_INPLACE uint8_t npy_popcounth(npy_short a);
+NPY_INPLACE uint8_t npy_popcount(npy_int a);
+NPY_INPLACE uint8_t npy_popcountl(npy_long a);
+NPY_INPLACE uint8_t npy_popcountll(npy_longlong a);
+
+/*
+ * C99 double math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE double npy_sin(double x);
+NPY_INPLACE double npy_cos(double x);
+NPY_INPLACE double npy_tan(double x);
+NPY_INPLACE double npy_hypot(double x, double y);
+NPY_INPLACE double npy_log2(double x);
+NPY_INPLACE double npy_atan2(double x, double y);
+
+/* Mandatory C99 double math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+#define npy_sinh sinh
+#define npy_cosh cosh
+#define npy_tanh tanh
+#define npy_asin asin
+#define npy_acos acos
+#define npy_atan atan
+#define npy_log log
+#define npy_log10 log10
+#define npy_cbrt cbrt
+#define npy_fabs fabs
+#define npy_ceil ceil
+#define npy_fmod fmod
+#define npy_floor floor
+#define npy_expm1 expm1
+#define npy_log1p log1p
+#define npy_acosh acosh
+#define npy_asinh asinh
+#define npy_atanh atanh
+#define npy_rint rint
+#define npy_trunc trunc
+#define npy_exp2 exp2
+#define npy_frexp frexp
+#define npy_ldexp ldexp
+#define npy_copysign copysign
+#define npy_exp exp
+#define npy_sqrt sqrt
+#define npy_pow pow
+#define npy_modf modf
+#define npy_nextafter nextafter
+
+double npy_spacing(double x);
+
+/*
+ * IEEE 754 fpu handling
+ */
+
+/* use builtins to avoid function calls in tight loops
+ * only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISNAN
+    #define npy_isnan(x) __builtin_isnan(x)
+#else
+    #define npy_isnan(x) isnan(x)
+#endif
+
+
+/* only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISFINITE
+    #define npy_isfinite(x) __builtin_isfinite(x)
+#else
+    #define npy_isfinite(x) isfinite((x))
+#endif
+
+/* only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISINF
+    #define npy_isinf(x) __builtin_isinf(x)
+#else
+    #define npy_isinf(x) isinf((x))
+#endif
+
+#define npy_signbit(x) signbit((x))
+
+/*
+ * float C99 math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE float npy_sinf(float x);
+NPY_INPLACE float npy_cosf(float x);
+NPY_INPLACE float npy_tanf(float x);
+NPY_INPLACE float npy_expf(float x);
+NPY_INPLACE float npy_sqrtf(float x);
+NPY_INPLACE float npy_hypotf(float x, float y);
+NPY_INPLACE float npy_log2f(float x);
+NPY_INPLACE float npy_atan2f(float x, float y);
+NPY_INPLACE float npy_powf(float x, float y);
+NPY_INPLACE float npy_modff(float x, float* y);
+
+/* Mandatory C99 float math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+
+#define npy_sinhf sinhf
+#define npy_coshf coshf
+#define npy_tanhf tanhf
+#define npy_asinf asinf
+#define npy_acosf acosf
+#define npy_atanf atanf
+#define npy_logf logf
+#define npy_log10f log10f
+#define npy_cbrtf cbrtf
+#define npy_fabsf fabsf
+#define npy_ceilf ceilf
+#define npy_fmodf fmodf
+#define npy_floorf floorf
+#define npy_expm1f expm1f
+#define npy_log1pf log1pf
+#define npy_asinhf asinhf
+#define npy_acoshf acoshf
+#define npy_atanhf atanhf
+#define npy_rintf rintf
+#define npy_truncf truncf
+#define npy_exp2f exp2f
+#define npy_frexpf frexpf
+#define npy_ldexpf ldexpf
+#define npy_copysignf copysignf
+#define npy_nextafterf nextafterf
+
+float npy_spacingf(float x);
+
+/*
+ * long double C99 double math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
+
+/* Mandatory C99 double math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+#define npy_sinhl sinhl
+#define npy_coshl coshl
+#define npy_tanhl tanhl
+#define npy_fabsl fabsl
+#define npy_floorl floorl
+#define npy_ceill ceill
+#define npy_rintl rintl
+#define npy_truncl truncl
+#define npy_cbrtl cbrtl
+#define npy_log10l log10l
+#define npy_logl logl
+#define npy_expm1l expm1l
+#define npy_asinl asinl
+#define npy_acosl acosl
+#define npy_atanl atanl
+#define npy_asinhl asinhl
+#define npy_acoshl acoshl
+#define npy_atanhl atanhl
+#define npy_log1pl log1pl
+#define npy_exp2l exp2l
+#define npy_fmodl fmodl
+#define npy_frexpl frexpl
+#define npy_ldexpl ldexpl
+#define npy_copysignl copysignl
+#define npy_nextafterl nextafterl
+
+npy_longdouble npy_spacingl(npy_longdouble x);
+
+/*
+ * Non standard functions
+ */
+NPY_INPLACE double npy_deg2rad(double x);
+NPY_INPLACE double npy_rad2deg(double x);
+NPY_INPLACE double npy_logaddexp(double x, double y);
+NPY_INPLACE double npy_logaddexp2(double x, double y);
+NPY_INPLACE double npy_divmod(double x, double y, double *modulus);
+NPY_INPLACE double npy_heaviside(double x, double h0);
+
+NPY_INPLACE float npy_deg2radf(float x);
+NPY_INPLACE float npy_rad2degf(float x);
+NPY_INPLACE float npy_logaddexpf(float x, float y);
+NPY_INPLACE float npy_logaddexp2f(float x, float y);
+NPY_INPLACE float npy_divmodf(float x, float y, float *modulus);
+NPY_INPLACE float npy_heavisidef(float x, float h0);
+
+NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,
+                           npy_longdouble *modulus);
+NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
+
+#define npy_degrees npy_rad2deg
+#define npy_degreesf npy_rad2degf
+#define npy_degreesl npy_rad2degl
+
+#define npy_radians npy_deg2rad
+#define npy_radiansf npy_deg2radf
+#define npy_radiansl npy_deg2radl
+
+/*
+ * Complex declarations
+ */
+
+/*
+ * C99 specifies that complex numbers have the same representation as
+ * an array of two elements, where the first element is the real part
+ * and the second element is the imaginary part.
+ */
+#define __NPY_CPACK_IMP(x, y, type, ctype)   \
+    union {                                  \
+        ctype z;                             \
+        type a[2];                           \
+    } z1;                                    \
+                                             \
+    z1.a[0] = (x);                           \
+    z1.a[1] = (y);                           \
+                                             \
+    return z1.z;
+
+static inline npy_cdouble npy_cpack(double x, double y)
+{
+    __NPY_CPACK_IMP(x, y, double, npy_cdouble);
+}
+
+static inline npy_cfloat npy_cpackf(float x, float y)
+{
+    __NPY_CPACK_IMP(x, y, float, npy_cfloat);
+}
+
+static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
+{
+    __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble);
+}
+#undef __NPY_CPACK_IMP
+
+/*
+ * Same remark as above, but in the other direction: extract first/second
+ * member of complex number, assuming a C99-compatible representation
+ *
+ * Those are defineds as static inline, and such as a reasonable compiler would
+ * most likely compile this to one or two instructions (on CISC at least)
+ */
+#define __NPY_CEXTRACT_IMP(z, index, type, ctype)   \
+    union {                                         \
+        ctype z;                                    \
+        type a[2];                                  \
+    } __z_repr;                                     \
+    __z_repr.z = z;                                 \
+                                                    \
+    return __z_repr.a[index];
+
+static inline double npy_creal(npy_cdouble z)
+{
+    __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble);
+}
+
+static inline double npy_cimag(npy_cdouble z)
+{
+    __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble);
+}
+
+static inline float npy_crealf(npy_cfloat z)
+{
+    __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat);
+}
+
+static inline float npy_cimagf(npy_cfloat z)
+{
+    __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat);
+}
+
+static inline npy_longdouble npy_creall(npy_clongdouble z)
+{
+    __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble);
+}
+
+static inline npy_longdouble npy_cimagl(npy_clongdouble z)
+{
+    __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble);
+}
+#undef __NPY_CEXTRACT_IMP
+
+/*
+ * Double precision complex functions
+ */
+double npy_cabs(npy_cdouble z);
+double npy_carg(npy_cdouble z);
+
+npy_cdouble npy_cexp(npy_cdouble z);
+npy_cdouble npy_clog(npy_cdouble z);
+npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);
+
+npy_cdouble npy_csqrt(npy_cdouble z);
+
+npy_cdouble npy_ccos(npy_cdouble z);
+npy_cdouble npy_csin(npy_cdouble z);
+npy_cdouble npy_ctan(npy_cdouble z);
+
+npy_cdouble npy_ccosh(npy_cdouble z);
+npy_cdouble npy_csinh(npy_cdouble z);
+npy_cdouble npy_ctanh(npy_cdouble z);
+
+npy_cdouble npy_cacos(npy_cdouble z);
+npy_cdouble npy_casin(npy_cdouble z);
+npy_cdouble npy_catan(npy_cdouble z);
+
+npy_cdouble npy_cacosh(npy_cdouble z);
+npy_cdouble npy_casinh(npy_cdouble z);
+npy_cdouble npy_catanh(npy_cdouble z);
+
+/*
+ * Single precision complex functions
+ */
+float npy_cabsf(npy_cfloat z);
+float npy_cargf(npy_cfloat z);
+
+npy_cfloat npy_cexpf(npy_cfloat z);
+npy_cfloat npy_clogf(npy_cfloat z);
+npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);
+
+npy_cfloat npy_csqrtf(npy_cfloat z);
+
+npy_cfloat npy_ccosf(npy_cfloat z);
+npy_cfloat npy_csinf(npy_cfloat z);
+npy_cfloat npy_ctanf(npy_cfloat z);
+
+npy_cfloat npy_ccoshf(npy_cfloat z);
+npy_cfloat npy_csinhf(npy_cfloat z);
+npy_cfloat npy_ctanhf(npy_cfloat z);
+
+npy_cfloat npy_cacosf(npy_cfloat z);
+npy_cfloat npy_casinf(npy_cfloat z);
+npy_cfloat npy_catanf(npy_cfloat z);
+
+npy_cfloat npy_cacoshf(npy_cfloat z);
+npy_cfloat npy_casinhf(npy_cfloat z);
+npy_cfloat npy_catanhf(npy_cfloat z);
+
+
+/*
+ * Extended precision complex functions
+ */
+npy_longdouble npy_cabsl(npy_clongdouble z);
+npy_longdouble npy_cargl(npy_clongdouble z);
+
+npy_clongdouble npy_cexpl(npy_clongdouble z);
+npy_clongdouble npy_clogl(npy_clongdouble z);
+npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);
+
+npy_clongdouble npy_csqrtl(npy_clongdouble z);
+
+npy_clongdouble npy_ccosl(npy_clongdouble z);
+npy_clongdouble npy_csinl(npy_clongdouble z);
+npy_clongdouble npy_ctanl(npy_clongdouble z);
+
+npy_clongdouble npy_ccoshl(npy_clongdouble z);
+npy_clongdouble npy_csinhl(npy_clongdouble z);
+npy_clongdouble npy_ctanhl(npy_clongdouble z);
+
+npy_clongdouble npy_cacosl(npy_clongdouble z);
+npy_clongdouble npy_casinl(npy_clongdouble z);
+npy_clongdouble npy_catanl(npy_clongdouble z);
+
+npy_clongdouble npy_cacoshl(npy_clongdouble z);
+npy_clongdouble npy_casinhl(npy_clongdouble z);
+npy_clongdouble npy_catanhl(npy_clongdouble z);
+
+
+/*
+ * Functions that set the floating point error
+ * status word.
+ */
+
+/*
+ * platform-dependent code translates floating point
+ * status to an integer sum of these values
+ */
+#define NPY_FPE_DIVIDEBYZERO  1
+#define NPY_FPE_OVERFLOW      2
+#define NPY_FPE_UNDERFLOW     4
+#define NPY_FPE_INVALID       8
+
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
+int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
+void npy_set_floatstatus_divbyzero(void);
+void npy_set_floatstatus_overflow(void);
+void npy_set_floatstatus_underflow(void);
+void npy_set_floatstatus_invalid(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#if NPY_INLINE_MATH
+#include "npy_math_internal.h"
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h
new file mode 100644
index 00000000..39658c0b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h
@@ -0,0 +1,20 @@
+/*
+ * This include file is provided for inclusion in Cython *.pyd files where
+ * one would like to define the NPY_NO_DEPRECATED_API macro. It can be
+ * included by
+ *
+ * cdef extern from "npy_no_deprecated_api.h": pass
+ *
+ */
+#ifndef NPY_NO_DEPRECATED_API
+
+/* put this check here since there may be multiple includes in C extensions. */
+#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \
+    defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \
+    defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_)
+#error "npy_no_deprecated_api.h" must be first among numpy includes.
+#else
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#endif
+
+#endif  /* NPY_NO_DEPRECATED_API */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h
new file mode 100644
index 00000000..0ce5d78b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h
@@ -0,0 +1,42 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
+
+#if defined(linux) || defined(__linux) || defined(__linux__)
+    #define NPY_OS_LINUX
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || \
+            defined(__OpenBSD__) || defined(__DragonFly__)
+    #define NPY_OS_BSD
+    #ifdef __FreeBSD__
+        #define NPY_OS_FREEBSD
+    #elif defined(__NetBSD__)
+        #define NPY_OS_NETBSD
+    #elif defined(__OpenBSD__)
+        #define NPY_OS_OPENBSD
+    #elif defined(__DragonFly__)
+        #define NPY_OS_DRAGONFLY
+    #endif
+#elif defined(sun) || defined(__sun)
+    #define NPY_OS_SOLARIS
+#elif defined(__CYGWIN__)
+    #define NPY_OS_CYGWIN
+/* We are on Windows.*/
+#elif defined(_WIN32)
+  /* We are using MinGW (64-bit or 32-bit)*/
+  #if defined(__MINGW32__) || defined(__MINGW64__)
+    #define NPY_OS_MINGW
+  /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/
+  #elif defined(_WIN64)
+    #define NPY_OS_WIN64
+  /* Otherwise assume we are targeting 32-bit Windows*/
+  #else
+    #define NPY_OS_WIN32
+  #endif
+#elif defined(__APPLE__)
+    #define NPY_OS_DARWIN
+#elif defined(__HAIKU__)
+    #define NPY_OS_HAIKU
+#else
+    #define NPY_OS_UNKNOWN
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h
new file mode 100644
index 00000000..1c25aa5f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h
@@ -0,0 +1,138 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
+
+#include "_numpyconfig.h"
+
+/*
+ * On Mac OS X, because there is only one configuration stage for all the archs
+ * in universal builds, any macro which depends on the arch needs to be
+ * hardcoded.
+ *
+ * Note that distutils/pip will attempt a universal2 build when Python itself
+ * is built as universal2, hence this hardcoding is needed even if we do not
+ * support universal2 wheels anymore (see gh-22796).
+ * This code block can be removed after we have dropped the setup.py based
+ * build completely.
+ */
+#ifdef __APPLE__
+    #undef NPY_SIZEOF_LONG
+    #undef NPY_SIZEOF_PY_INTPTR_T
+
+    #ifdef __LP64__
+        #define NPY_SIZEOF_LONG         8
+        #define NPY_SIZEOF_PY_INTPTR_T  8
+    #else
+        #define NPY_SIZEOF_LONG         4
+        #define NPY_SIZEOF_PY_INTPTR_T  4
+    #endif
+
+    #undef NPY_SIZEOF_LONGDOUBLE
+    #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
+    #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE
+      #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE
+    #endif
+    #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
+      #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
+    #endif
+
+    #if defined(__arm64__)
+        #define NPY_SIZEOF_LONGDOUBLE         8
+        #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
+        #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1
+    #elif defined(__x86_64)
+        #define NPY_SIZEOF_LONGDOUBLE         16
+        #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+        #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1
+    #elif defined (__i386)
+        #define NPY_SIZEOF_LONGDOUBLE         12
+        #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
+    #elif defined(__ppc__) || defined (__ppc64__)
+        #define NPY_SIZEOF_LONGDOUBLE         16
+        #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+    #else
+        #error "unknown architecture"
+    #endif
+#endif
+
+
+/**
+ * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro,
+ * we include API version numbers for specific versions of NumPy.
+ * To exclude all API that was deprecated as of 1.7, add the following before
+ * #including any NumPy headers:
+ *   #define NPY_NO_DEPRECATED_API  NPY_1_7_API_VERSION
+ * The same is true for NPY_TARGET_VERSION, although NumPy will default to
+ * a backwards compatible build anyway.
+ */
+#define NPY_1_7_API_VERSION 0x00000007
+#define NPY_1_8_API_VERSION 0x00000008
+#define NPY_1_9_API_VERSION 0x00000009
+#define NPY_1_10_API_VERSION 0x0000000a
+#define NPY_1_11_API_VERSION 0x0000000a
+#define NPY_1_12_API_VERSION 0x0000000a
+#define NPY_1_13_API_VERSION 0x0000000b
+#define NPY_1_14_API_VERSION 0x0000000c
+#define NPY_1_15_API_VERSION 0x0000000c
+#define NPY_1_16_API_VERSION 0x0000000d
+#define NPY_1_17_API_VERSION 0x0000000d
+#define NPY_1_18_API_VERSION 0x0000000d
+#define NPY_1_19_API_VERSION 0x0000000d
+#define NPY_1_20_API_VERSION 0x0000000e
+#define NPY_1_21_API_VERSION 0x0000000e
+#define NPY_1_22_API_VERSION 0x0000000f
+#define NPY_1_23_API_VERSION 0x00000010
+#define NPY_1_24_API_VERSION 0x00000010
+#define NPY_1_25_API_VERSION 0x00000011
+
+
+/*
+ * Binary compatibility version number.  This number is increased
+ * whenever the C-API is changed such that binary compatibility is
+ * broken, i.e. whenever a recompile of extension modules is needed.
+ */
+#define NPY_VERSION NPY_ABI_VERSION
+
+/*
+ * Minor API version we are compiling to be compatible with.  The version
+ * Number is always increased when the API changes via: `NPY_API_VERSION`
+ * (and should maybe just track the NumPy version).
+ *
+ * If we have an internal build, we always target the current version of
+ * course.
+ *
+ * For downstream users, we default to an older version to provide them with
+ * maximum compatibility by default.  Downstream can choose to extend that
+ * default, or narrow it down if they wish to use newer API.  If you adjust
+ * this, consider the Python version support (example for 1.25.x):
+ *
+ * NumPy 1.25.x supports Python:                     3.9  3.10  3.11  (3.12)
+ * NumPy 1.19.x supports Python:      3.6  3.7  3.8  3.9
+ * NumPy 1.17.x supports Python: 3.5  3.6  3.7  3.8
+ * NumPy 1.15.x supports Python: ...  3.6  3.7
+ *
+ * Users of the stable ABI may wish to target the last Python that is not
+ * end of life.  This would be 3.8 at NumPy 1.25 release time.
+ * 1.17 as default was the choice of oldest-support-numpy at the time and
+ * has in practice no limit (comapared to 1.19).  Even earlier becomes legacy.
+ */
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+    /* NumPy internal build, always use current version. */
+    #define NPY_FEATURE_VERSION NPY_API_VERSION
+#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION
+    /* user provided a target version, use it */
+    #define NPY_FEATURE_VERSION NPY_TARGET_VERSION
+#else
+    /* Use the default (increase when dropping Python 3.9 support) */
+    #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION
+#endif
+
+/* Sanity check the (requested) feature version */
+#if NPY_FEATURE_VERSION > NPY_API_VERSION
+    #error "NPY_TARGET_VERSION higher than NumPy headers!"
+#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION
+    /* No support for irrelevant old targets, no need for error, but warn. */
+    #warning "Requested NumPy target lower than supported NumPy 1.15."
+#endif
+
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h
new file mode 100644
index 00000000..b3fa6775
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h
@@ -0,0 +1,187 @@
+/* This header is deprecated as of NumPy 1.7 */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_
+
+#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION
+#error The header "old_defines.h" is deprecated as of NumPy 1.7.
+#endif
+
+#define NDARRAY_VERSION NPY_VERSION
+
+#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE
+#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE
+#define PyArray_BUFSIZE NPY_BUFSIZE
+
+#define PyArray_PRIORITY NPY_PRIORITY
+#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY
+#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE
+
+#define NPY_MAX PyArray_MAX
+#define NPY_MIN PyArray_MIN
+
+#define PyArray_TYPES       NPY_TYPES
+#define PyArray_BOOL        NPY_BOOL
+#define PyArray_BYTE        NPY_BYTE
+#define PyArray_UBYTE       NPY_UBYTE
+#define PyArray_SHORT       NPY_SHORT
+#define PyArray_USHORT      NPY_USHORT
+#define PyArray_INT         NPY_INT
+#define PyArray_UINT        NPY_UINT
+#define PyArray_LONG        NPY_LONG
+#define PyArray_ULONG       NPY_ULONG
+#define PyArray_LONGLONG    NPY_LONGLONG
+#define PyArray_ULONGLONG   NPY_ULONGLONG
+#define PyArray_HALF        NPY_HALF
+#define PyArray_FLOAT       NPY_FLOAT
+#define PyArray_DOUBLE      NPY_DOUBLE
+#define PyArray_LONGDOUBLE  NPY_LONGDOUBLE
+#define PyArray_CFLOAT      NPY_CFLOAT
+#define PyArray_CDOUBLE     NPY_CDOUBLE
+#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE
+#define PyArray_OBJECT      NPY_OBJECT
+#define PyArray_STRING      NPY_STRING
+#define PyArray_UNICODE     NPY_UNICODE
+#define PyArray_VOID        NPY_VOID
+#define PyArray_DATETIME    NPY_DATETIME
+#define PyArray_TIMEDELTA   NPY_TIMEDELTA
+#define PyArray_NTYPES      NPY_NTYPES
+#define PyArray_NOTYPE      NPY_NOTYPE
+#define PyArray_CHAR        NPY_CHAR
+#define PyArray_USERDEF     NPY_USERDEF
+#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES
+
+#define PyArray_INTP        NPY_INTP
+#define PyArray_UINTP       NPY_UINTP
+
+#define PyArray_INT8    NPY_INT8
+#define PyArray_UINT8   NPY_UINT8
+#define PyArray_INT16   NPY_INT16
+#define PyArray_UINT16  NPY_UINT16
+#define PyArray_INT32   NPY_INT32
+#define PyArray_UINT32  NPY_UINT32
+
+#ifdef NPY_INT64
+#define PyArray_INT64   NPY_INT64
+#define PyArray_UINT64  NPY_UINT64
+#endif
+
+#ifdef NPY_INT128
+#define PyArray_INT128 NPY_INT128
+#define PyArray_UINT128 NPY_UINT128
+#endif
+
+#ifdef NPY_FLOAT16
+#define PyArray_FLOAT16  NPY_FLOAT16
+#define PyArray_COMPLEX32  NPY_COMPLEX32
+#endif
+
+#ifdef NPY_FLOAT80
+#define PyArray_FLOAT80  NPY_FLOAT80
+#define PyArray_COMPLEX160  NPY_COMPLEX160
+#endif
+
+#ifdef NPY_FLOAT96
+#define PyArray_FLOAT96  NPY_FLOAT96
+#define PyArray_COMPLEX192  NPY_COMPLEX192
+#endif
+
+#ifdef NPY_FLOAT128
+#define PyArray_FLOAT128  NPY_FLOAT128
+#define PyArray_COMPLEX256  NPY_COMPLEX256
+#endif
+
+#define PyArray_FLOAT32    NPY_FLOAT32
+#define PyArray_COMPLEX64  NPY_COMPLEX64
+#define PyArray_FLOAT64    NPY_FLOAT64
+#define PyArray_COMPLEX128 NPY_COMPLEX128
+
+
+#define PyArray_TYPECHAR        NPY_TYPECHAR
+#define PyArray_BOOLLTR         NPY_BOOLLTR
+#define PyArray_BYTELTR         NPY_BYTELTR
+#define PyArray_UBYTELTR        NPY_UBYTELTR
+#define PyArray_SHORTLTR        NPY_SHORTLTR
+#define PyArray_USHORTLTR       NPY_USHORTLTR
+#define PyArray_INTLTR          NPY_INTLTR
+#define PyArray_UINTLTR         NPY_UINTLTR
+#define PyArray_LONGLTR         NPY_LONGLTR
+#define PyArray_ULONGLTR        NPY_ULONGLTR
+#define PyArray_LONGLONGLTR     NPY_LONGLONGLTR
+#define PyArray_ULONGLONGLTR    NPY_ULONGLONGLTR
+#define PyArray_HALFLTR         NPY_HALFLTR
+#define PyArray_FLOATLTR        NPY_FLOATLTR
+#define PyArray_DOUBLELTR       NPY_DOUBLELTR
+#define PyArray_LONGDOUBLELTR   NPY_LONGDOUBLELTR
+#define PyArray_CFLOATLTR       NPY_CFLOATLTR
+#define PyArray_CDOUBLELTR      NPY_CDOUBLELTR
+#define PyArray_CLONGDOUBLELTR  NPY_CLONGDOUBLELTR
+#define PyArray_OBJECTLTR       NPY_OBJECTLTR
+#define PyArray_STRINGLTR       NPY_STRINGLTR
+#define PyArray_STRINGLTR2      NPY_STRINGLTR2
+#define PyArray_UNICODELTR      NPY_UNICODELTR
+#define PyArray_VOIDLTR         NPY_VOIDLTR
+#define PyArray_DATETIMELTR     NPY_DATETIMELTR
+#define PyArray_TIMEDELTALTR    NPY_TIMEDELTALTR
+#define PyArray_CHARLTR         NPY_CHARLTR
+#define PyArray_INTPLTR         NPY_INTPLTR
+#define PyArray_UINTPLTR        NPY_UINTPLTR
+#define PyArray_GENBOOLLTR      NPY_GENBOOLLTR
+#define PyArray_SIGNEDLTR       NPY_SIGNEDLTR
+#define PyArray_UNSIGNEDLTR     NPY_UNSIGNEDLTR
+#define PyArray_FLOATINGLTR     NPY_FLOATINGLTR
+#define PyArray_COMPLEXLTR      NPY_COMPLEXLTR
+
+#define PyArray_QUICKSORT   NPY_QUICKSORT
+#define PyArray_HEAPSORT    NPY_HEAPSORT
+#define PyArray_MERGESORT   NPY_MERGESORT
+#define PyArray_SORTKIND    NPY_SORTKIND
+#define PyArray_NSORTS      NPY_NSORTS
+
+#define PyArray_NOSCALAR       NPY_NOSCALAR
+#define PyArray_BOOL_SCALAR    NPY_BOOL_SCALAR
+#define PyArray_INTPOS_SCALAR  NPY_INTPOS_SCALAR
+#define PyArray_INTNEG_SCALAR  NPY_INTNEG_SCALAR
+#define PyArray_FLOAT_SCALAR   NPY_FLOAT_SCALAR
+#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR
+#define PyArray_OBJECT_SCALAR  NPY_OBJECT_SCALAR
+#define PyArray_SCALARKIND     NPY_SCALARKIND
+#define PyArray_NSCALARKINDS   NPY_NSCALARKINDS
+
+#define PyArray_ANYORDER     NPY_ANYORDER
+#define PyArray_CORDER       NPY_CORDER
+#define PyArray_FORTRANORDER NPY_FORTRANORDER
+#define PyArray_ORDER        NPY_ORDER
+
+#define PyDescr_ISBOOL      PyDataType_ISBOOL
+#define PyDescr_ISUNSIGNED  PyDataType_ISUNSIGNED
+#define PyDescr_ISSIGNED    PyDataType_ISSIGNED
+#define PyDescr_ISINTEGER   PyDataType_ISINTEGER
+#define PyDescr_ISFLOAT     PyDataType_ISFLOAT
+#define PyDescr_ISNUMBER    PyDataType_ISNUMBER
+#define PyDescr_ISSTRING    PyDataType_ISSTRING
+#define PyDescr_ISCOMPLEX   PyDataType_ISCOMPLEX
+#define PyDescr_ISPYTHON    PyDataType_ISPYTHON
+#define PyDescr_ISFLEXIBLE  PyDataType_ISFLEXIBLE
+#define PyDescr_ISUSERDEF   PyDataType_ISUSERDEF
+#define PyDescr_ISEXTENDED  PyDataType_ISEXTENDED
+#define PyDescr_ISOBJECT    PyDataType_ISOBJECT
+#define PyDescr_HASFIELDS   PyDataType_HASFIELDS
+
+#define PyArray_LITTLE NPY_LITTLE
+#define PyArray_BIG NPY_BIG
+#define PyArray_NATIVE NPY_NATIVE
+#define PyArray_SWAP NPY_SWAP
+#define PyArray_IGNORE NPY_IGNORE
+
+#define PyArray_NATBYTE NPY_NATBYTE
+#define PyArray_OPPBYTE NPY_OPPBYTE
+
+#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE
+
+#define PyArray_USE_PYMEM NPY_USE_PYMEM
+
+#define PyArray_RemoveLargest PyArray_RemoveSmallest
+
+#define PyArray_UCS4 npy_ucs4
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/LICENSE.txt b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/LICENSE.txt
new file mode 100644
index 00000000..d72a7c38
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/LICENSE.txt
@@ -0,0 +1,21 @@
+  zlib License
+  ------------
+
+  Copyright (C) 2010 - 2019 ridiculous_fish, 
+  Copyright (C) 2016 - 2019 Kim Walisch, 
+
+  This software is provided 'as-is', without any express or implied
+  warranty.  In no event will the authors be held liable for any damages
+  arising from the use of this software.
+
+  Permission is granted to anyone to use this software for any purpose,
+  including commercial applications, and to alter it and redistribute it
+  freely, subject to the following restrictions:
+
+  1. The origin of this software must not be misrepresented; you must not
+     claim that you wrote the original software. If you use this software
+     in a product, an acknowledgment in the product documentation would be
+     appreciated but is not required.
+  2. Altered source versions must be plainly marked as such, and must not be
+     misrepresented as being the original software.
+  3. This notice may not be removed or altered from any source distribution.
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h
new file mode 100644
index 00000000..162dd5c5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h
@@ -0,0 +1,20 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
+
+#pragma once
+#include 
+#include 
+#include 
+
+/* Must match the declaration in numpy/random/.pxd */
+
+typedef struct bitgen {
+  void *state;
+  uint64_t (*next_uint64)(void *st);
+  uint32_t (*next_uint32)(void *st);
+  double (*next_double)(void *st);
+  uint64_t (*next_raw)(void *st);
+} bitgen_t;
+
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h
new file mode 100644
index 00000000..e7fa4bd0
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h
@@ -0,0 +1,209 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include 
+#include "numpy/npy_common.h"
+#include 
+#include 
+#include 
+
+#include "numpy/npy_math.h"
+#include "numpy/random/bitgen.h"
+
+/*
+ * RAND_INT_TYPE is used to share integer generators with RandomState which
+ * used long in place of int64_t. If changing a distribution that uses
+ * RAND_INT_TYPE, then the original unmodified copy must be retained for
+ * use in RandomState by copying to the legacy distributions source file.
+ */
+#ifdef NP_RANDOM_LEGACY
+#define RAND_INT_TYPE long
+#define RAND_INT_MAX LONG_MAX
+#else
+#define RAND_INT_TYPE int64_t
+#define RAND_INT_MAX INT64_MAX
+#endif
+
+#ifdef _MSC_VER
+#define DECLDIR __declspec(dllexport)
+#else
+#define DECLDIR extern
+#endif
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? x : y)
+#define MAX(x, y) (((x) > (y)) ? x : y)
+#endif
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338328
+#endif
+
+typedef struct s_binomial_t {
+  int has_binomial; /* !=0: following parameters initialized for binomial */
+  double psave;
+  RAND_INT_TYPE nsave;
+  double r;
+  double q;
+  double fm;
+  RAND_INT_TYPE m;
+  double p1;
+  double xm;
+  double xl;
+  double xr;
+  double c;
+  double laml;
+  double lamr;
+  double p2;
+  double p3;
+  double p4;
+} binomial_t;
+
+DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
+DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
+DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
+DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
+DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
+DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
+
+DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
+DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
+DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
+DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
+
+DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
+
+DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
+DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
+
+DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
+DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
+DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
+DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
+DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
+DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
+DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
+DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
+DECLDIR double random_power(bitgen_t *bitgen_state, double a);
+DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
+DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
+DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
+DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+                                           double nonc);
+DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+                                   double dfden, double nonc);
+DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
+DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
+DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+                                 double right);
+
+DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
+DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
+                                 double p);
+
+DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
+                                int64_t n, binomial_t *binomial);
+
+DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p);
+DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
+DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
+                                      int64_t good, int64_t bad, int64_t sample);
+DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
+
+/* Generate random uint64 numbers in closed interval [off, off + rng]. */
+DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
+                                       uint64_t rng, uint64_t mask,
+                                       bool use_masked);
+
+/* Generate random uint32 numbers in closed interval [off, off + rng]. */
+DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+                                                uint32_t off, uint32_t rng,
+                                                uint32_t mask, bool use_masked,
+                                                int *bcnt, uint32_t *buf);
+DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+                                                uint16_t off, uint16_t rng,
+                                                uint16_t mask, bool use_masked,
+                                                int *bcnt, uint32_t *buf);
+DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
+                                              uint8_t rng, uint8_t mask,
+                                              bool use_masked, int *bcnt,
+                                              uint32_t *buf);
+DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
+                                              npy_bool rng, npy_bool mask,
+                                              bool use_masked, int *bcnt,
+                                              uint32_t *buf);
+
+DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
+                                        uint64_t rng, npy_intp cnt,
+                                        bool use_masked, uint64_t *out);
+DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
+                                        uint32_t rng, npy_intp cnt,
+                                        bool use_masked, uint32_t *out);
+DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
+                                        uint16_t rng, npy_intp cnt,
+                                        bool use_masked, uint16_t *out);
+DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
+                                       uint8_t rng, npy_intp cnt,
+                                       bool use_masked, uint8_t *out);
+DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
+                                      npy_bool rng, npy_intp cnt,
+                                      bool use_masked, npy_bool *out);
+
+DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
+                                double *pix, npy_intp d, binomial_t *binomial);
+
+/* multivariate hypergeometric, "count" method */
+DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+                              int64_t total,
+                              size_t num_colors, int64_t *colors,
+                              int64_t nsample,
+                              size_t num_variates, int64_t *variates);
+
+/* multivariate hypergeometric, "marginals" method */
+DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+                                   int64_t total,
+                                   size_t num_colors, int64_t *colors,
+                                   int64_t nsample,
+                                   size_t num_variates, int64_t *variates);
+
+/* Common to legacy-distributions.c and distributions.c but not exported */
+
+RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
+                                   RAND_INT_TYPE n,
+                                   double p,
+                                   binomial_t *binomial);
+RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
+                                        RAND_INT_TYPE n,
+                                        double p,
+                                        binomial_t *binomial);
+double random_loggam(double x);
+static inline double next_double(bitgen_t *bitgen_state) {
+    return bitgen_state->next_double(bitgen_state->state);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/libdivide.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/libdivide.h
new file mode 100644
index 00000000..f4eb8039
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/random/libdivide.h
@@ -0,0 +1,2079 @@
+// libdivide.h - Optimized integer division
+// https://libdivide.com
+//
+// Copyright (C) 2010 - 2019 ridiculous_fish, 
+// Copyright (C) 2016 - 2019 Kim Walisch, 
+//
+// libdivide is dual-licensed under the Boost or zlib licenses.
+// You may use libdivide under the terms of either of these.
+// See LICENSE.txt for more details.
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
+
+#define LIBDIVIDE_VERSION "3.0"
+#define LIBDIVIDE_VERSION_MAJOR 3
+#define LIBDIVIDE_VERSION_MINOR 0
+
+#include 
+
+#if defined(__cplusplus)
+    #include 
+    #include 
+    #include 
+#else
+    #include 
+    #include 
+#endif
+
+#if defined(LIBDIVIDE_AVX512)
+    #include 
+#elif defined(LIBDIVIDE_AVX2)
+    #include 
+#elif defined(LIBDIVIDE_SSE2)
+    #include 
+#endif
+
+#if defined(_MSC_VER)
+    #include 
+    // disable warning C4146: unary minus operator applied
+    // to unsigned type, result still unsigned
+    #pragma warning(disable: 4146)
+    #define LIBDIVIDE_VC
+#endif
+
+#if !defined(__has_builtin)
+    #define __has_builtin(x) 0
+#endif
+
+#if defined(__SIZEOF_INT128__)
+    #define HAS_INT128_T
+    // clang-cl on Windows does not yet support 128-bit division
+    #if !(defined(__clang__) && defined(LIBDIVIDE_VC))
+        #define HAS_INT128_DIV
+    #endif
+#endif
+
+#if defined(__x86_64__) || defined(_M_X64)
+    #define LIBDIVIDE_X86_64
+#endif
+
+#if defined(__i386__)
+    #define LIBDIVIDE_i386
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+    #define LIBDIVIDE_GCC_STYLE_ASM
+#endif
+
+#if defined(__cplusplus) || defined(LIBDIVIDE_VC)
+    #define LIBDIVIDE_FUNCTION __FUNCTION__
+#else
+    #define LIBDIVIDE_FUNCTION __func__
+#endif
+
+#define LIBDIVIDE_ERROR(msg) \
+    do { \
+        fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \
+            __LINE__, LIBDIVIDE_FUNCTION, msg); \
+        abort(); \
+    } while (0)
+
+#if defined(LIBDIVIDE_ASSERTIONS_ON)
+    #define LIBDIVIDE_ASSERT(x) \
+        do { \
+            if (!(x)) { \
+                fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \
+                    __LINE__, LIBDIVIDE_FUNCTION, #x); \
+                abort(); \
+            } \
+        } while (0)
+#else
+    #define LIBDIVIDE_ASSERT(x)
+#endif
+
+#ifdef __cplusplus
+namespace libdivide {
+#endif
+
+// pack divider structs to prevent compilers from padding.
+// This reduces memory usage by up to 43% when using a large
+// array of libdivide dividers and improves performance
+// by up to 10% because of reduced memory bandwidth.
+#pragma pack(push, 1)
+
+struct libdivide_u32_t {
+    uint32_t magic;
+    uint8_t more;
+};
+
+struct libdivide_s32_t {
+    int32_t magic;
+    uint8_t more;
+};
+
+struct libdivide_u64_t {
+    uint64_t magic;
+    uint8_t more;
+};
+
+struct libdivide_s64_t {
+    int64_t magic;
+    uint8_t more;
+};
+
+struct libdivide_u32_branchfree_t {
+    uint32_t magic;
+    uint8_t more;
+};
+
+struct libdivide_s32_branchfree_t {
+    int32_t magic;
+    uint8_t more;
+};
+
+struct libdivide_u64_branchfree_t {
+    uint64_t magic;
+    uint8_t more;
+};
+
+struct libdivide_s64_branchfree_t {
+    int64_t magic;
+    uint8_t more;
+};
+
+#pragma pack(pop)
+
+// Explanation of the "more" field:
+//
+// * Bits 0-5 is the shift value (for shift path or mult path).
+// * Bit 6 is the add indicator for mult path.
+// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative
+//   divisor indicator so that we can efficiently use sign extension to
+//   create a bitmask with all bits set to 1 (if the divisor is negative)
+//   or 0 (if the divisor is positive).
+//
+// u32: [0-4] shift value
+//      [5] ignored
+//      [6] add indicator
+//      magic number of 0 indicates shift path
+//
+// s32: [0-4] shift value
+//      [5] ignored
+//      [6] add indicator
+//      [7] indicates negative divisor
+//      magic number of 0 indicates shift path
+//
+// u64: [0-5] shift value
+//      [6] add indicator
+//      magic number of 0 indicates shift path
+//
+// s64: [0-5] shift value
+//      [6] add indicator
+//      [7] indicates negative divisor
+//      magic number of 0 indicates shift path
+//
+// In s32 and s64 branchfree modes, the magic number is negated according to
+// whether the divisor is negated. In branchfree strategy, it is not negated.
+
+enum {
+    LIBDIVIDE_32_SHIFT_MASK = 0x1F,
+    LIBDIVIDE_64_SHIFT_MASK = 0x3F,
+    LIBDIVIDE_ADD_MARKER = 0x40,
+    LIBDIVIDE_NEGATIVE_DIVISOR = 0x80
+};
+
+static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d);
+static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d);
+static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d);
+static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d);
+
+static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d);
+static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d);
+static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d);
+static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d);
+
+static inline int32_t  libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom);
+static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom);
+static inline int64_t  libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom);
+static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom);
+
+static inline int32_t  libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom);
+static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom);
+static inline int64_t  libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom);
+static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom);
+
+static inline int32_t  libdivide_s32_recover(const struct libdivide_s32_t *denom);
+static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom);
+static inline int64_t  libdivide_s64_recover(const struct libdivide_s64_t *denom);
+static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom);
+
+static inline int32_t  libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom);
+static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom);
+static inline int64_t  libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom);
+static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) {
+    uint64_t xl = x, yl = y;
+    uint64_t rl = xl * yl;
+    return (uint32_t)(rl >> 32);
+}
+
+static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) {
+    int64_t xl = x, yl = y;
+    int64_t rl = xl * yl;
+    // needs to be arithmetic shift
+    return (int32_t)(rl >> 32);
+}
+
+static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
+#if defined(LIBDIVIDE_VC) && \
+    defined(LIBDIVIDE_X86_64)
+    return __umulh(x, y);
+#elif defined(HAS_INT128_T)
+    __uint128_t xl = x, yl = y;
+    __uint128_t rl = xl * yl;
+    return (uint64_t)(rl >> 64);
+#else
+    // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
+    uint32_t mask = 0xFFFFFFFF;
+    uint32_t x0 = (uint32_t)(x & mask);
+    uint32_t x1 = (uint32_t)(x >> 32);
+    uint32_t y0 = (uint32_t)(y & mask);
+    uint32_t y1 = (uint32_t)(y >> 32);
+    uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
+    uint64_t x0y1 = x0 * (uint64_t)y1;
+    uint64_t x1y0 = x1 * (uint64_t)y0;
+    uint64_t x1y1 = x1 * (uint64_t)y1;
+    uint64_t temp = x1y0 + x0y0_hi;
+    uint64_t temp_lo = temp & mask;
+    uint64_t temp_hi = temp >> 32;
+
+    return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32);
+#endif
+}
+
+static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) {
+#if defined(LIBDIVIDE_VC) && \
+    defined(LIBDIVIDE_X86_64)
+    return __mulh(x, y);
+#elif defined(HAS_INT128_T)
+    __int128_t xl = x, yl = y;
+    __int128_t rl = xl * yl;
+    return (int64_t)(rl >> 64);
+#else
+    // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
+    uint32_t mask = 0xFFFFFFFF;
+    uint32_t x0 = (uint32_t)(x & mask);
+    uint32_t y0 = (uint32_t)(y & mask);
+    int32_t x1 = (int32_t)(x >> 32);
+    int32_t y1 = (int32_t)(y >> 32);
+    uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
+    int64_t t = x1 * (int64_t)y0 + x0y0_hi;
+    int64_t w1 = x0 * (int64_t)y1 + (t & mask);
+
+    return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32);
+#endif
+}
+
+static inline int32_t libdivide_count_leading_zeros32(uint32_t val) {
+#if defined(__GNUC__) || \
+    __has_builtin(__builtin_clz)
+    // Fast way to count leading zeros
+    return __builtin_clz(val);
+#elif defined(LIBDIVIDE_VC)
+    unsigned long result;
+    if (_BitScanReverse(&result, val)) {
+        return 31 - result;
+    }
+    return 0;
+#else
+    if (val == 0)
+        return 32;
+    int32_t result = 8;
+    uint32_t hi = 0xFFU << 24;
+    while ((val & hi) == 0) {
+        hi >>= 8;
+        result += 8;
+    }
+    while (val & hi) {
+        result -= 1;
+        hi <<= 1;
+    }
+    return result;
+#endif
+}
+
+static inline int32_t libdivide_count_leading_zeros64(uint64_t val) {
+#if defined(__GNUC__) || \
+    __has_builtin(__builtin_clzll)
+    // Fast way to count leading zeros
+    return __builtin_clzll(val);
+#elif defined(LIBDIVIDE_VC) && defined(_WIN64)
+    unsigned long result;
+    if (_BitScanReverse64(&result, val)) {
+        return 63 - result;
+    }
+    return 0;
+#else
+    uint32_t hi = val >> 32;
+    uint32_t lo = val & 0xFFFFFFFF;
+    if (hi != 0) return libdivide_count_leading_zeros32(hi);
+    return 32 + libdivide_count_leading_zeros32(lo);
+#endif
+}
+
+// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit
+// uint {v}. The result must fit in 32 bits.
+// Returns the quotient directly and the remainder in *r
+static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) {
+#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \
+     defined(LIBDIVIDE_GCC_STYLE_ASM)
+    uint32_t result;
+    __asm__("divl %[v]"
+            : "=a"(result), "=d"(*r)
+            : [v] "r"(v), "a"(u0), "d"(u1)
+            );
+    return result;
+#else
+    uint64_t n = ((uint64_t)u1 << 32) | u0;
+    uint32_t result = (uint32_t)(n / v);
+    *r = (uint32_t)(n - result * (uint64_t)v);
+    return result;
+#endif
+}
+
+// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit
+// uint {v}. The result must fit in 64 bits.
+// Returns the quotient directly and the remainder in *r
+static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) {
+#if defined(LIBDIVIDE_X86_64) && \
+    defined(LIBDIVIDE_GCC_STYLE_ASM)
+    uint64_t result;
+    __asm__("divq %[v]"
+            : "=a"(result), "=d"(*r)
+            : [v] "r"(v), "a"(u0), "d"(u1)
+            );
+    return result;
+#elif defined(HAS_INT128_T) && \
+      defined(HAS_INT128_DIV)
+    __uint128_t n = ((__uint128_t)u1 << 64) | u0;
+    uint64_t result = (uint64_t)(n / v);
+    *r = (uint64_t)(n - result * (__uint128_t)v);
+    return result;
+#else
+    // Code taken from Hacker's Delight:
+    // http://www.hackersdelight.org/HDcode/divlu.c.
+    // License permits inclusion here per:
+    // http://www.hackersdelight.org/permissions.htm
+
+    const uint64_t b = (1ULL << 32); // Number base (32 bits)
+    uint64_t un1, un0; // Norm. dividend LSD's
+    uint64_t vn1, vn0; // Norm. divisor digits
+    uint64_t q1, q0; // Quotient digits
+    uint64_t un64, un21, un10; // Dividend digit pairs
+    uint64_t rhat; // A remainder
+    int32_t s; // Shift amount for norm
+
+    // If overflow, set rem. to an impossible value,
+    // and return the largest possible quotient
+    if (u1 >= v) {
+        *r = (uint64_t) -1;
+        return (uint64_t) -1;
+    }
+
+    // count leading zeros
+    s = libdivide_count_leading_zeros64(v);
+    if (s > 0) {
+        // Normalize divisor
+        v = v << s;
+        un64 = (u1 << s) | (u0 >> (64 - s));
+        un10 = u0 << s; // Shift dividend left
+    } else {
+        // Avoid undefined behavior of (u0 >> 64).
+        // The behavior is undefined if the right operand is
+        // negative, or greater than or equal to the length
+        // in bits of the promoted left operand.
+        un64 = u1;
+        un10 = u0;
+    }
+
+    // Break divisor up into two 32-bit digits
+    vn1 = v >> 32;
+    vn0 = v & 0xFFFFFFFF;
+
+    // Break right half of dividend into two digits
+    un1 = un10 >> 32;
+    un0 = un10 & 0xFFFFFFFF;
+
+    // Compute the first quotient digit, q1
+    q1 = un64 / vn1;
+    rhat = un64 - q1 * vn1;
+
+    while (q1 >= b || q1 * vn0 > b * rhat + un1) {
+        q1 = q1 - 1;
+        rhat = rhat + vn1;
+        if (rhat >= b)
+            break;
+    }
+
+     // Multiply and subtract
+    un21 = un64 * b + un1 - q1 * v;
+
+    // Compute the second quotient digit
+    q0 = un21 / vn1;
+    rhat = un21 - q0 * vn1;
+
+    while (q0 >= b || q0 * vn0 > b * rhat + un0) {
+        q0 = q0 - 1;
+        rhat = rhat + vn1;
+        if (rhat >= b)
+            break;
+    }
+
+    *r = (un21 * b + un0 - q0 * v) >> s;
+    return q1 * b + q0;
+#endif
+}
+
+// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0)
+static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) {
+    if (signed_shift > 0) {
+        uint32_t shift = signed_shift;
+        *u1 <<= shift;
+        *u1 |= *u0 >> (64 - shift);
+        *u0 <<= shift;
+    }
+    else if (signed_shift < 0) {
+        uint32_t shift = -signed_shift;
+        *u0 >>= shift;
+        *u0 |= *u1 << (64 - shift);
+        *u1 >>= shift;
+    }
+}
+
+// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder.
+static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) {
+#if defined(HAS_INT128_T) && \
+    defined(HAS_INT128_DIV)
+    __uint128_t ufull = u_hi;
+    __uint128_t vfull = v_hi;
+    ufull = (ufull << 64) | u_lo;
+    vfull = (vfull << 64) | v_lo;
+    uint64_t res = (uint64_t)(ufull / vfull);
+    __uint128_t remainder = ufull - (vfull * res);
+    *r_lo = (uint64_t)remainder;
+    *r_hi = (uint64_t)(remainder >> 64);
+    return res;
+#else
+    // Adapted from "Unsigned Doubleword Division" in Hacker's Delight
+    // We want to compute u / v
+    typedef struct { uint64_t hi; uint64_t lo; } u128_t;
+    u128_t u = {u_hi, u_lo};
+    u128_t v = {v_hi, v_lo};
+
+    if (v.hi == 0) {
+        // divisor v is a 64 bit value, so we just need one 128/64 division
+        // Note that we are simpler than Hacker's Delight here, because we know
+        // the quotient fits in 64 bits whereas Hacker's Delight demands a full
+        // 128 bit quotient
+        *r_hi = 0;
+        return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo);
+    }
+    // Here v >= 2**64
+    // We know that v.hi != 0, so count leading zeros is OK
+    // We have 0 <= n <= 63
+    uint32_t n = libdivide_count_leading_zeros64(v.hi);
+
+    // Normalize the divisor so its MSB is 1
+    u128_t v1t = v;
+    libdivide_u128_shift(&v1t.hi, &v1t.lo, n);
+    uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64
+
+    // To ensure no overflow
+    u128_t u1 = u;
+    libdivide_u128_shift(&u1.hi, &u1.lo, -1);
+
+    // Get quotient from divide unsigned insn.
+    uint64_t rem_ignored;
+    uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored);
+
+    // Undo normalization and division of u by 2.
+    u128_t q0 = {0, q1};
+    libdivide_u128_shift(&q0.hi, &q0.lo, n);
+    libdivide_u128_shift(&q0.hi, &q0.lo, -63);
+
+    // Make q0 correct or too small by 1
+    // Equivalent to `if (q0 != 0) q0 = q0 - 1;`
+    if (q0.hi != 0 || q0.lo != 0) {
+        q0.hi -= (q0.lo == 0); // borrow
+        q0.lo -= 1;
+    }
+
+    // Now q0 is correct.
+    // Compute q0 * v as q0v
+    // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo)
+    // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) +
+    //   (q0.lo * v.hi <<  64) + q0.lo * v.lo)
+    // Each term is 128 bit
+    // High half of full product (upper 128 bits!) are dropped
+    u128_t q0v = {0, 0};
+    q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo);
+    q0v.lo = q0.lo*v.lo;
+
+    // Compute u - q0v as u_q0v
+    // This is the remainder
+    u128_t u_q0v = u;
+    u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow
+    u_q0v.lo -= q0v.lo;
+
+    // Check if u_q0v >= v
+    // This checks if our remainder is larger than the divisor
+    if ((u_q0v.hi > v.hi) ||
+        (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) {
+        // Increment q0
+        q0.lo += 1;
+        q0.hi += (q0.lo == 0); // carry
+
+        // Subtract v from remainder
+        u_q0v.hi -= v.hi + (u_q0v.lo < v.lo);
+        u_q0v.lo -= v.lo;
+    }
+
+    *r_hi = u_q0v.hi;
+    *r_lo = u_q0v.lo;
+
+    LIBDIVIDE_ASSERT(q0.hi == 0);
+    return q0.lo;
+#endif
+}
+
+////////// UINT32
+
+static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) {
+    if (d == 0) {
+        LIBDIVIDE_ERROR("divider must be != 0");
+    }
+
+    struct libdivide_u32_t result;
+    uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d);
+
+    // Power of 2
+    if ((d & (d - 1)) == 0) {
+        // We need to subtract 1 from the shift value in case of an unsigned
+        // branchfree divider because there is a hardcoded right shift by 1
+        // in its division algorithm. Because of this we also need to add back
+        // 1 in its recovery algorithm.
+        result.magic = 0;
+        result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
+    } else {
+        uint8_t more;
+        uint32_t rem, proposed_m;
+        proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem);
+
+        LIBDIVIDE_ASSERT(rem > 0 && rem < d);
+        const uint32_t e = d - rem;
+
+        // This power works if e < 2**floor_log_2_d.
+        if (!branchfree && (e < (1U << floor_log_2_d))) {
+            // This power works
+            more = floor_log_2_d;
+        } else {
+            // We have to use the general 33-bit algorithm.  We need to compute
+            // (2**power) / d. However, we already have (2**(power-1))/d and
+            // its remainder.  By doubling both, and then correcting the
+            // remainder, we can compute the larger division.
+            // don't care about overflow here - in fact, we expect it
+            proposed_m += proposed_m;
+            const uint32_t twice_rem = rem + rem;
+            if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
+            more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+        }
+        result.magic = 1 + proposed_m;
+        result.more = more;
+        // result.more's shift should in general be ceil_log_2_d. But if we
+        // used the smaller power, we subtract one from the shift because we're
+        // using the smaller power. If we're using the larger power, we
+        // subtract one from the shift because it's taken care of by the add
+        // indicator. So floor_log_2_d happens to be correct in both cases.
+    }
+    return result;
+}
+
+struct libdivide_u32_t libdivide_u32_gen(uint32_t d) {
+    return libdivide_internal_u32_gen(d, 0);
+}
+
+struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) {
+    if (d == 1) {
+        LIBDIVIDE_ERROR("branchfree divider must be != 1");
+    }
+    struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1);
+    struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)};
+    return ret;
+}
+
+uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return numer >> more;
+    }
+    else {
+        uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            uint32_t t = ((numer - q) >> 1) + q;
+            return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
+        }
+        else {
+            // All upper bits are 0,
+            // don't need to mask them off.
+            return q >> more;
+        }
+    }
+}
+
+uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) {
+    uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
+    uint32_t t = ((numer - q) >> 1) + q;
+    return t >> denom->more;
+}
+
+uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+    if (!denom->magic) {
+        return 1U << shift;
+    } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
+        // We compute q = n/d = n*m / 2^(32 + shift)
+        // Therefore we have d = 2^(32 + shift) / m
+        // We need to ceil it.
+        // We know d is not a power of 2, so m is not a power of 2,
+        // so we can just add 1 to the floor
+        uint32_t hi_dividend = 1U << shift;
+        uint32_t rem_ignored;
+        return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored);
+    } else {
+        // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
+        // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
+        // Also note that shift may be as high as 31, so shift + 1 will
+        // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
+        // then double the quotient and remainder.
+        uint64_t half_n = 1ULL << (32 + shift);
+        uint64_t d = (1ULL << 32) | denom->magic;
+        // Note that the quotient is guaranteed <= 32 bits, but the remainder
+        // may need 33!
+        uint32_t half_q = (uint32_t)(half_n / d);
+        uint64_t rem = half_n % d;
+        // We computed 2^(32+shift)/(m+2^32)
+        // Need to double it, and then add 1 to the quotient if doubling th
+        // remainder would increase the quotient.
+        // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
+        uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
+
+        // We rounded down in gen (hence +1)
+        return full_q + 1;
+    }
+}
+
+uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+    if (!denom->magic) {
+        return 1U << (shift + 1);
+    } else {
+        // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
+        // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
+        // Also note that shift may be as high as 31, so shift + 1 will
+        // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
+        // then double the quotient and remainder.
+        uint64_t half_n = 1ULL << (32 + shift);
+        uint64_t d = (1ULL << 32) | denom->magic;
+        // Note that the quotient is guaranteed <= 32 bits, but the remainder
+        // may need 33!
+        uint32_t half_q = (uint32_t)(half_n / d);
+        uint64_t rem = half_n % d;
+        // We computed 2^(32+shift)/(m+2^32)
+        // Need to double it, and then add 1 to the quotient if doubling th
+        // remainder would increase the quotient.
+        // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
+        uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
+
+        // We rounded down in gen (hence +1)
+        return full_q + 1;
+    }
+}
+
+/////////// UINT64
+
+static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) {
+    if (d == 0) {
+        LIBDIVIDE_ERROR("divider must be != 0");
+    }
+
+    struct libdivide_u64_t result;
+    uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d);
+
+    // Power of 2
+    if ((d & (d - 1)) == 0) {
+        // We need to subtract 1 from the shift value in case of an unsigned
+        // branchfree divider because there is a hardcoded right shift by 1
+        // in its division algorithm. Because of this we also need to add back
+        // 1 in its recovery algorithm.
+        result.magic = 0;
+        result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
+    } else {
+        uint64_t proposed_m, rem;
+        uint8_t more;
+        // (1 << (64 + floor_log_2_d)) / d
+        proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem);
+
+        LIBDIVIDE_ASSERT(rem > 0 && rem < d);
+        const uint64_t e = d - rem;
+
+        // This power works if e < 2**floor_log_2_d.
+        if (!branchfree && e < (1ULL << floor_log_2_d)) {
+            // This power works
+            more = floor_log_2_d;
+        } else {
+            // We have to use the general 65-bit algorithm.  We need to compute
+            // (2**power) / d. However, we already have (2**(power-1))/d and
+            // its remainder. By doubling both, and then correcting the
+            // remainder, we can compute the larger division.
+            // don't care about overflow here - in fact, we expect it
+            proposed_m += proposed_m;
+            const uint64_t twice_rem = rem + rem;
+            if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
+                more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+        }
+        result.magic = 1 + proposed_m;
+        result.more = more;
+        // result.more's shift should in general be ceil_log_2_d. But if we
+        // used the smaller power, we subtract one from the shift because we're
+        // using the smaller power. If we're using the larger power, we
+        // subtract one from the shift because it's taken care of by the add
+        // indicator. So floor_log_2_d happens to be correct in both cases,
+        // which is why we do it outside of the if statement.
+    }
+    return result;
+}
+
+struct libdivide_u64_t libdivide_u64_gen(uint64_t d) {
+    return libdivide_internal_u64_gen(d, 0);
+}
+
+struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) {
+    if (d == 1) {
+        LIBDIVIDE_ERROR("branchfree divider must be != 1");
+    }
+    struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1);
+    struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)};
+    return ret;
+}
+
+uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return numer >> more;
+    }
+    else {
+        uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            uint64_t t = ((numer - q) >> 1) + q;
+            return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
+        }
+        else {
+             // All upper bits are 0,
+             // don't need to mask them off.
+            return q >> more;
+        }
+    }
+}
+
+uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) {
+    uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
+    uint64_t t = ((numer - q) >> 1) + q;
+    return t >> denom->more;
+}
+
+uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+    if (!denom->magic) {
+        return 1ULL << shift;
+    } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
+        // We compute q = n/d = n*m / 2^(64 + shift)
+        // Therefore we have d = 2^(64 + shift) / m
+        // We need to ceil it.
+        // We know d is not a power of 2, so m is not a power of 2,
+        // so we can just add 1 to the floor
+        uint64_t hi_dividend = 1ULL << shift;
+        uint64_t rem_ignored;
+        return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored);
+    } else {
+        // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
+        // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
+        // libdivide_u32_recover for more on what we do here.
+        // TODO: do something better than 128 bit math
+
+        // Full n is a (potentially) 129 bit value
+        // half_n is a 128 bit value
+        // Compute the hi half of half_n. Low half is 0.
+        uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
+        // d is a 65 bit value. The high bit is always set to 1.
+        const uint64_t d_hi = 1, d_lo = denom->magic;
+        // Note that the quotient is guaranteed <= 64 bits,
+        // but the remainder may need 65!
+        uint64_t r_hi, r_lo;
+        uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
+        // We computed 2^(64+shift)/(m+2^64)
+        // Double the remainder ('dr') and check if that is larger than d
+        // Note that d is a 65 bit value, so r1 is small and so r1 + r1
+        // cannot overflow
+        uint64_t dr_lo = r_lo + r_lo;
+        uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
+        int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
+        uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
+        return full_q + 1;
+    }
+}
+
+uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+    if (!denom->magic) {
+        return 1ULL << (shift + 1);
+    } else {
+        // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
+        // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
+        // libdivide_u32_recover for more on what we do here.
+        // TODO: do something better than 128 bit math
+
+        // Full n is a (potentially) 129 bit value
+        // half_n is a 128 bit value
+        // Compute the hi half of half_n. Low half is 0.
+        uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
+        // d is a 65 bit value. The high bit is always set to 1.
+        const uint64_t d_hi = 1, d_lo = denom->magic;
+        // Note that the quotient is guaranteed <= 64 bits,
+        // but the remainder may need 65!
+        uint64_t r_hi, r_lo;
+        uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
+        // We computed 2^(64+shift)/(m+2^64)
+        // Double the remainder ('dr') and check if that is larger than d
+        // Note that d is a 65 bit value, so r1 is small and so r1 + r1
+        // cannot overflow
+        uint64_t dr_lo = r_lo + r_lo;
+        uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
+        int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
+        uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
+        return full_q + 1;
+    }
+}
+
+/////////// SINT32
+
+static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) {
+    if (d == 0) {
+        LIBDIVIDE_ERROR("divider must be != 0");
+    }
+
+    struct libdivide_s32_t result;
+
+    // If d is a power of 2, or negative a power of 2, we have to use a shift.
+    // This is especially important because the magic algorithm fails for -1.
+    // To check if d is a power of 2 or its inverse, it suffices to check
+    // whether its absolute value has exactly one bit set. This works even for
+    // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
+    // and is a power of 2.
+    uint32_t ud = (uint32_t)d;
+    uint32_t absD = (d < 0) ? -ud : ud;
+    uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD);
+    // check if exactly one bit is set,
+    // don't care if absD is 0 since that's divide by zero
+    if ((absD & (absD - 1)) == 0) {
+        // Branchfree and normal paths are exactly the same
+        result.magic = 0;
+        result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
+    } else {
+        LIBDIVIDE_ASSERT(floor_log_2_d >= 1);
+
+        uint8_t more;
+        // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word
+        // is 0 and the high word is floor_log_2_d - 1
+        uint32_t rem, proposed_m;
+        proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem);
+        const uint32_t e = absD - rem;
+
+        // We are going to start with a power of floor_log_2_d - 1.
+        // This works if works if e < 2**floor_log_2_d.
+        if (!branchfree && e < (1U << floor_log_2_d)) {
+            // This power works
+            more = floor_log_2_d - 1;
+        } else {
+            // We need to go one higher. This should not make proposed_m
+            // overflow, but it will make it negative when interpreted as an
+            // int32_t.
+            proposed_m += proposed_m;
+            const uint32_t twice_rem = rem + rem;
+            if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
+            more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+        }
+
+        proposed_m += 1;
+        int32_t magic = (int32_t)proposed_m;
+
+        // Mark if we are negative. Note we only negate the magic number in the
+        // branchfull case.
+        if (d < 0) {
+            more |= LIBDIVIDE_NEGATIVE_DIVISOR;
+            if (!branchfree) {
+                magic = -magic;
+            }
+        }
+
+        result.more = more;
+        result.magic = magic;
+    }
+    return result;
+}
+
+struct libdivide_s32_t libdivide_s32_gen(int32_t d) {
+    return libdivide_internal_s32_gen(d, 0);
+}
+
+struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) {
+    struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1);
+    struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more};
+    return result;
+}
+
+int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+    if (!denom->magic) {
+        uint32_t sign = (int8_t)more >> 7;
+        uint32_t mask = (1U << shift) - 1;
+        uint32_t uq = numer + ((numer >> 31) & mask);
+        int32_t q = (int32_t)uq;
+        q >>= shift;
+        q = (q ^ sign) - sign;
+        return q;
+    } else {
+        uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer);
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // must be arithmetic shift and then sign extend
+            int32_t sign = (int8_t)more >> 7;
+            // q += (more < 0 ? -numer : numer)
+            // cast required to avoid UB
+            uq += ((uint32_t)numer ^ sign) - sign;
+        }
+        int32_t q = (int32_t)uq;
+        q >>= shift;
+        q += (q < 0);
+        return q;
+    }
+}
+
+int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+    // must be arithmetic shift and then sign extend
+    int32_t sign = (int8_t)more >> 7;
+    int32_t magic = denom->magic;
+    int32_t q = libdivide_mullhi_s32(magic, numer);
+    q += numer;
+
+    // If q is non-negative, we have nothing to do
+    // If q is negative, we want to add either (2**shift)-1 if d is a power of
+    // 2, or (2**shift) if it is not a power of 2
+    uint32_t is_power_of_2 = (magic == 0);
+    uint32_t q_sign = (uint32_t)(q >> 31);
+    q += q_sign & ((1U << shift) - is_power_of_2);
+
+    // Now arithmetic right shift
+    q >>= shift;
+    // Negate if needed
+    q = (q ^ sign) - sign;
+
+    return q;
+}
+
+int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+    if (!denom->magic) {
+        uint32_t absD = 1U << shift;
+        if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
+            absD = -absD;
+        }
+        return (int32_t)absD;
+    } else {
+        // Unsigned math is much easier
+        // We negate the magic number only in the branchfull case, and we don't
+        // know which case we're in. However we have enough information to
+        // determine the correct sign of the magic number. The divisor was
+        // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set,
+        // the magic number's sign is opposite that of the divisor.
+        // We want to compute the positive magic number.
+        int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
+        int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
+            ? denom->magic > 0 : denom->magic < 0;
+
+        // Handle the power of 2 case (including branchfree)
+        if (denom->magic == 0) {
+            int32_t result = 1U << shift;
+            return negative_divisor ? -result : result;
+        }
+
+        uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic);
+        uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30
+        uint32_t q = (uint32_t)(n / d);
+        int32_t result = (int32_t)q;
+        result += 1;
+        return negative_divisor ? -result : result;
+    }
+}
+
+int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) {
+    return libdivide_s32_recover((const struct libdivide_s32_t *)denom);
+}
+
+///////////// SINT64
+
+static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) {
+    if (d == 0) {
+        LIBDIVIDE_ERROR("divider must be != 0");
+    }
+
+    struct libdivide_s64_t result;
+
+    // If d is a power of 2, or negative a power of 2, we have to use a shift.
+    // This is especially important because the magic algorithm fails for -1.
+    // To check if d is a power of 2 or its inverse, it suffices to check
+    // whether its absolute value has exactly one bit set.  This works even for
+    // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
+    // and is a power of 2.
+    uint64_t ud = (uint64_t)d;
+    uint64_t absD = (d < 0) ? -ud : ud;
+    uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD);
+    // check if exactly one bit is set,
+    // don't care if absD is 0 since that's divide by zero
+    if ((absD & (absD - 1)) == 0) {
+        // Branchfree and non-branchfree cases are the same
+        result.magic = 0;
+        result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
+    } else {
+        // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word
+        // is 0 and the high word is floor_log_2_d - 1
+        uint8_t more;
+        uint64_t rem, proposed_m;
+        proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem);
+        const uint64_t e = absD - rem;
+
+        // We are going to start with a power of floor_log_2_d - 1.
+        // This works if works if e < 2**floor_log_2_d.
+        if (!branchfree && e < (1ULL << floor_log_2_d)) {
+            // This power works
+            more = floor_log_2_d - 1;
+        } else {
+            // We need to go one higher. This should not make proposed_m
+            // overflow, but it will make it negative when interpreted as an
+            // int32_t.
+            proposed_m += proposed_m;
+            const uint64_t twice_rem = rem + rem;
+            if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
+            // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we
+            // also set ADD_MARKER this is an annoying optimization that
+            // enables algorithm #4 to avoid the mask. However we always set it
+            // in the branchfree case
+            more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+        }
+        proposed_m += 1;
+        int64_t magic = (int64_t)proposed_m;
+
+        // Mark if we are negative
+        if (d < 0) {
+            more |= LIBDIVIDE_NEGATIVE_DIVISOR;
+            if (!branchfree) {
+                magic = -magic;
+            }
+        }
+
+        result.more = more;
+        result.magic = magic;
+    }
+    return result;
+}
+
+struct libdivide_s64_t libdivide_s64_gen(int64_t d) {
+    return libdivide_internal_s64_gen(d, 0);
+}
+
+struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) {
+    struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1);
+    struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more};
+    return ret;
+}
+
+int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+    if (!denom->magic) { // shift path
+        uint64_t mask = (1ULL << shift) - 1;
+        uint64_t uq = numer + ((numer >> 63) & mask);
+        int64_t q = (int64_t)uq;
+        q >>= shift;
+        // must be arithmetic shift and then sign-extend
+        int64_t sign = (int8_t)more >> 7;
+        q = (q ^ sign) - sign;
+        return q;
+    } else {
+        uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer);
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // must be arithmetic shift and then sign extend
+            int64_t sign = (int8_t)more >> 7;
+            // q += (more < 0 ? -numer : numer)
+            // cast required to avoid UB
+            uq += ((uint64_t)numer ^ sign) - sign;
+        }
+        int64_t q = (int64_t)uq;
+        q >>= shift;
+        q += (q < 0);
+        return q;
+    }
+}
+
+int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+    // must be arithmetic shift and then sign extend
+    int64_t sign = (int8_t)more >> 7;
+    int64_t magic = denom->magic;
+    int64_t q = libdivide_mullhi_s64(magic, numer);
+    q += numer;
+
+    // If q is non-negative, we have nothing to do.
+    // If q is negative, we want to add either (2**shift)-1 if d is a power of
+    // 2, or (2**shift) if it is not a power of 2.
+    uint64_t is_power_of_2 = (magic == 0);
+    uint64_t q_sign = (uint64_t)(q >> 63);
+    q += q_sign & ((1ULL << shift) - is_power_of_2);
+
+    // Arithmetic right shift
+    q >>= shift;
+    // Negate if needed
+    q = (q ^ sign) - sign;
+
+    return q;
+}
+
+int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) {
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+    if (denom->magic == 0) { // shift path
+        uint64_t absD = 1ULL << shift;
+        if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
+            absD = -absD;
+        }
+        return (int64_t)absD;
+    } else {
+        // Unsigned math is much easier
+        int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
+        int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
+            ? denom->magic > 0 : denom->magic < 0;
+
+        uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic);
+        uint64_t n_hi = 1ULL << shift, n_lo = 0;
+        uint64_t rem_ignored;
+        uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored);
+        int64_t result = (int64_t)(q + 1);
+        if (negative_divisor) {
+            result = -result;
+        }
+        return result;
+    }
+}
+
+int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) {
+    return libdivide_s64_recover((const struct libdivide_s64_t *)denom);
+}
+
+#if defined(LIBDIVIDE_AVX512)
+
+static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom);
+static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom);
+static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom);
+static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom);
+
+static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+static inline __m512i libdivide_s64_signbits(__m512i v) {;
+    return _mm512_srai_epi64(v, 63);
+}
+
+static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) {
+    return _mm512_srai_epi64(v, amt);
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) {
+    __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32);
+    __m512i a1X3X = _mm512_srli_epi64(a, 32);
+    __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
+    __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask);
+    return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// b is one 32-bit value repeated.
+static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) {
+    __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32);
+    __m512i a1X3X = _mm512_srli_epi64(a, 32);
+    __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
+    __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask);
+    return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) {
+    __m512i lomask = _mm512_set1_epi64(0xffffffff);
+    __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1);
+    __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1);
+    __m512i w0 = _mm512_mul_epu32(x, y);
+    __m512i w1 = _mm512_mul_epu32(x, yh);
+    __m512i w2 = _mm512_mul_epu32(xh, y);
+    __m512i w3 = _mm512_mul_epu32(xh, yh);
+    __m512i w0h = _mm512_srli_epi64(w0, 32);
+    __m512i s1 = _mm512_add_epi64(w1, w0h);
+    __m512i s1l = _mm512_and_si512(s1, lomask);
+    __m512i s1h = _mm512_srli_epi64(s1, 32);
+    __m512i s2 = _mm512_add_epi64(w2, s1l);
+    __m512i s2h = _mm512_srli_epi64(s2, 32);
+    __m512i hi = _mm512_add_epi64(w3, s1h);
+            hi = _mm512_add_epi64(hi, s2h);
+
+    return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) {
+    __m512i p = libdivide_mullhi_u64_vector(x, y);
+    __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y);
+    __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x);
+    p = _mm512_sub_epi64(p, t1);
+    p = _mm512_sub_epi64(p, t2);
+    return p;
+}
+
+////////// UINT32
+
+__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return _mm512_srli_epi32(numers, more);
+    }
+    else {
+        __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // uint32_t t = ((numer - q) >> 1) + q;
+            // return t >> denom->shift;
+            uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+            __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
+            return _mm512_srli_epi32(t, shift);
+        }
+        else {
+            return _mm512_srli_epi32(q, more);
+        }
+    }
+}
+
+__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) {
+    __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
+    __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
+    return _mm512_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return _mm512_srli_epi64(numers, more);
+    }
+    else {
+        __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // uint32_t t = ((numer - q) >> 1) + q;
+            // return t >> denom->shift;
+            uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+            __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
+            return _mm512_srli_epi64(t, shift);
+        }
+        else {
+            return _mm512_srli_epi64(q, more);
+        }
+    }
+}
+
+__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) {
+    __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
+    __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
+    return _mm512_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+        uint32_t mask = (1U << shift) - 1;
+        __m512i roundToZeroTweak = _mm512_set1_epi32(mask);
+        // q = numer + ((numer >> 31) & roundToZeroTweak);
+        __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak));
+        q = _mm512_srai_epi32(q, shift);
+        __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+        // q = (q ^ sign) - sign;
+        q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign);
+        return q;
+    }
+    else {
+        __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+             // must be arithmetic shift
+            __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+             // q += ((numer ^ sign) - sign);
+            q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign));
+        }
+        // q >>= shift
+        q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+        q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0)
+        return q;
+    }
+}
+
+__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) {
+    int32_t magic = denom->magic;
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+     // must be arithmetic shift
+    __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+    __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic));
+    q = _mm512_add_epi32(q, numers); // q += numers
+
+    // If q is non-negative, we have nothing to do
+    // If q is negative, we want to add either (2**shift)-1 if d is
+    // a power of 2, or (2**shift) if it is not a power of 2
+    uint32_t is_power_of_2 = (magic == 0);
+    __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31
+    __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2);
+    q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
+    q = _mm512_srai_epi32(q, shift); // q >>= shift
+    q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
+    return q;
+}
+
+////////// SINT64
+
+__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) {
+    uint8_t more = denom->more;
+    int64_t magic = denom->magic;
+    if (magic == 0) { // shift path
+        uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+        uint64_t mask = (1ULL << shift) - 1;
+        __m512i roundToZeroTweak = _mm512_set1_epi64(mask);
+        // q = numer + ((numer >> 63) & roundToZeroTweak);
+        __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak));
+        q = libdivide_s64_shift_right_vector(q, shift);
+        __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+         // q = (q ^ sign) - sign;
+        q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign);
+        return q;
+    }
+    else {
+        __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // must be arithmetic shift
+            __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+            // q += ((numer ^ sign) - sign);
+            q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign));
+        }
+        // q >>= denom->mult_path.shift
+        q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+        q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0)
+        return q;
+    }
+}
+
+__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) {
+    int64_t magic = denom->magic;
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+    // must be arithmetic shift
+    __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+
+     // libdivide_mullhi_s64(numers, magic);
+    __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
+    q = _mm512_add_epi64(q, numers); // q += numers
+
+    // If q is non-negative, we have nothing to do.
+    // If q is negative, we want to add either (2**shift)-1 if d is
+    // a power of 2, or (2**shift) if it is not a power of 2.
+    uint32_t is_power_of_2 = (magic == 0);
+    __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+    __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2);
+    q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
+    q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+    q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
+    return q;
+}
+
+#elif defined(LIBDIVIDE_AVX2)
+
+static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom);
+static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom);
+static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom);
+static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom);
+
+static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+// Implementation of _mm256_srai_epi64(v, 63) (from AVX512).
+static inline __m256i libdivide_s64_signbits(__m256i v) {
+    __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
+    __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31);
+    return signBits;
+}
+
+// Implementation of _mm256_srai_epi64 (from AVX512).
+static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) {
+    const int b = 64 - amt;
+    __m256i m = _mm256_set1_epi64x(1ULL << (b - 1));
+    __m256i x = _mm256_srli_epi64(v, amt);
+    __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m);
+    return result;
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) {
+    __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32);
+    __m256i a1X3X = _mm256_srli_epi64(a, 32);
+    __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
+    __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask);
+    return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// b is one 32-bit value repeated.
+static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) {
+    __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32);
+    __m256i a1X3X = _mm256_srli_epi64(a, 32);
+    __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
+    __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask);
+    return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) {
+    __m256i lomask = _mm256_set1_epi64x(0xffffffff);
+    __m256i xh = _mm256_shuffle_epi32(x, 0xB1);        // x0l, x0h, x1l, x1h
+    __m256i yh = _mm256_shuffle_epi32(y, 0xB1);        // y0l, y0h, y1l, y1h
+    __m256i w0 = _mm256_mul_epu32(x, y);               // x0l*y0l, x1l*y1l
+    __m256i w1 = _mm256_mul_epu32(x, yh);              // x0l*y0h, x1l*y1h
+    __m256i w2 = _mm256_mul_epu32(xh, y);              // x0h*y0l, x1h*y0l
+    __m256i w3 = _mm256_mul_epu32(xh, yh);             // x0h*y0h, x1h*y1h
+    __m256i w0h = _mm256_srli_epi64(w0, 32);
+    __m256i s1 = _mm256_add_epi64(w1, w0h);
+    __m256i s1l = _mm256_and_si256(s1, lomask);
+    __m256i s1h = _mm256_srli_epi64(s1, 32);
+    __m256i s2 = _mm256_add_epi64(w2, s1l);
+    __m256i s2h = _mm256_srli_epi64(s2, 32);
+    __m256i hi = _mm256_add_epi64(w3, s1h);
+            hi = _mm256_add_epi64(hi, s2h);
+
+    return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) {
+    __m256i p = libdivide_mullhi_u64_vector(x, y);
+    __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y);
+    __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x);
+    p = _mm256_sub_epi64(p, t1);
+    p = _mm256_sub_epi64(p, t2);
+    return p;
+}
+
+////////// UINT32
+
+__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return _mm256_srli_epi32(numers, more);
+    }
+    else {
+        __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // uint32_t t = ((numer - q) >> 1) + q;
+            // return t >> denom->shift;
+            uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+            __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
+            return _mm256_srli_epi32(t, shift);
+        }
+        else {
+            return _mm256_srli_epi32(q, more);
+        }
+    }
+}
+
+__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) {
+    __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
+    __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
+    return _mm256_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return _mm256_srli_epi64(numers, more);
+    }
+    else {
+        __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // uint32_t t = ((numer - q) >> 1) + q;
+            // return t >> denom->shift;
+            uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+            __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
+            return _mm256_srli_epi64(t, shift);
+        }
+        else {
+            return _mm256_srli_epi64(q, more);
+        }
+    }
+}
+
+__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) {
+    __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
+    __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
+    return _mm256_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+        uint32_t mask = (1U << shift) - 1;
+        __m256i roundToZeroTweak = _mm256_set1_epi32(mask);
+        // q = numer + ((numer >> 31) & roundToZeroTweak);
+        __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak));
+        q = _mm256_srai_epi32(q, shift);
+        __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+        // q = (q ^ sign) - sign;
+        q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign);
+        return q;
+    }
+    else {
+        __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+             // must be arithmetic shift
+            __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+             // q += ((numer ^ sign) - sign);
+            q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign));
+        }
+        // q >>= shift
+        q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+        q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0)
+        return q;
+    }
+}
+
+__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) {
+    int32_t magic = denom->magic;
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+     // must be arithmetic shift
+    __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+    __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic));
+    q = _mm256_add_epi32(q, numers); // q += numers
+
+    // If q is non-negative, we have nothing to do
+    // If q is negative, we want to add either (2**shift)-1 if d is
+    // a power of 2, or (2**shift) if it is not a power of 2
+    uint32_t is_power_of_2 = (magic == 0);
+    __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31
+    __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2);
+    q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
+    q = _mm256_srai_epi32(q, shift); // q >>= shift
+    q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
+    return q;
+}
+
+////////// SINT64
+
+__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) {
+    uint8_t more = denom->more;
+    int64_t magic = denom->magic;
+    if (magic == 0) { // shift path
+        uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+        uint64_t mask = (1ULL << shift) - 1;
+        __m256i roundToZeroTweak = _mm256_set1_epi64x(mask);
+        // q = numer + ((numer >> 63) & roundToZeroTweak);
+        __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak));
+        q = libdivide_s64_shift_right_vector(q, shift);
+        __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+         // q = (q ^ sign) - sign;
+        q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign);
+        return q;
+    }
+    else {
+        __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // must be arithmetic shift
+            __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+            // q += ((numer ^ sign) - sign);
+            q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign));
+        }
+        // q >>= denom->mult_path.shift
+        q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+        q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0)
+        return q;
+    }
+}
+
+__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) {
+    int64_t magic = denom->magic;
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+    // must be arithmetic shift
+    __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+
+     // libdivide_mullhi_s64(numers, magic);
+    __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
+    q = _mm256_add_epi64(q, numers); // q += numers
+
+    // If q is non-negative, we have nothing to do.
+    // If q is negative, we want to add either (2**shift)-1 if d is
+    // a power of 2, or (2**shift) if it is not a power of 2.
+    uint32_t is_power_of_2 = (magic == 0);
+    __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+    __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2);
+    q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
+    q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+    q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
+    return q;
+}
+
+#elif defined(LIBDIVIDE_SSE2)
+
+static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom);
+static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom);
+static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom);
+static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom);
+
+static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+// Implementation of _mm_srai_epi64(v, 63) (from AVX512).
+static inline __m128i libdivide_s64_signbits(__m128i v) {
+    __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
+    __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31);
+    return signBits;
+}
+
+// Implementation of _mm_srai_epi64 (from AVX512).
+static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) {
+    const int b = 64 - amt;
+    __m128i m = _mm_set1_epi64x(1ULL << (b - 1));
+    __m128i x = _mm_srli_epi64(v, amt);
+    __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m);
+    return result;
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) {
+    __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32);
+    __m128i a1X3X = _mm_srli_epi64(a, 32);
+    __m128i mask = _mm_set_epi32(-1, 0, -1, 0);
+    __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask);
+    return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// SSE2 does not have a signed multiplication instruction, but we can convert
+// unsigned to signed pretty efficiently. Again, b is just a 32 bit value
+// repeated four times.
+static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) {
+    __m128i p = libdivide_mullhi_u32_vector(a, b);
+    // t1 = (a >> 31) & y, arithmetic shift
+    __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b);
+    __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a);
+    p = _mm_sub_epi32(p, t1);
+    p = _mm_sub_epi32(p, t2);
+    return p;
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) {
+    __m128i lomask = _mm_set1_epi64x(0xffffffff);
+    __m128i xh = _mm_shuffle_epi32(x, 0xB1);        // x0l, x0h, x1l, x1h
+    __m128i yh = _mm_shuffle_epi32(y, 0xB1);        // y0l, y0h, y1l, y1h
+    __m128i w0 = _mm_mul_epu32(x, y);               // x0l*y0l, x1l*y1l
+    __m128i w1 = _mm_mul_epu32(x, yh);              // x0l*y0h, x1l*y1h
+    __m128i w2 = _mm_mul_epu32(xh, y);              // x0h*y0l, x1h*y0l
+    __m128i w3 = _mm_mul_epu32(xh, yh);             // x0h*y0h, x1h*y1h
+    __m128i w0h = _mm_srli_epi64(w0, 32);
+    __m128i s1 = _mm_add_epi64(w1, w0h);
+    __m128i s1l = _mm_and_si128(s1, lomask);
+    __m128i s1h = _mm_srli_epi64(s1, 32);
+    __m128i s2 = _mm_add_epi64(w2, s1l);
+    __m128i s2h = _mm_srli_epi64(s2, 32);
+    __m128i hi = _mm_add_epi64(w3, s1h);
+            hi = _mm_add_epi64(hi, s2h);
+
+    return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) {
+    __m128i p = libdivide_mullhi_u64_vector(x, y);
+    __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y);
+    __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x);
+    p = _mm_sub_epi64(p, t1);
+    p = _mm_sub_epi64(p, t2);
+    return p;
+}
+
+////////// UINT32
+
+__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return _mm_srli_epi32(numers, more);
+    }
+    else {
+        __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // uint32_t t = ((numer - q) >> 1) + q;
+            // return t >> denom->shift;
+            uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+            __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
+            return _mm_srli_epi32(t, shift);
+        }
+        else {
+            return _mm_srli_epi32(q, more);
+        }
+    }
+}
+
+__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) {
+    __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
+    __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
+    return _mm_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        return _mm_srli_epi64(numers, more);
+    }
+    else {
+        __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // uint32_t t = ((numer - q) >> 1) + q;
+            // return t >> denom->shift;
+            uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+            __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
+            return _mm_srli_epi64(t, shift);
+        }
+        else {
+            return _mm_srli_epi64(q, more);
+        }
+    }
+}
+
+__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) {
+    __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
+    __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
+    return _mm_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) {
+    uint8_t more = denom->more;
+    if (!denom->magic) {
+        uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+        uint32_t mask = (1U << shift) - 1;
+        __m128i roundToZeroTweak = _mm_set1_epi32(mask);
+        // q = numer + ((numer >> 31) & roundToZeroTweak);
+        __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak));
+        q = _mm_srai_epi32(q, shift);
+        __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+        // q = (q ^ sign) - sign;
+        q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign);
+        return q;
+    }
+    else {
+        __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+             // must be arithmetic shift
+            __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+             // q += ((numer ^ sign) - sign);
+            q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign));
+        }
+        // q >>= shift
+        q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+        q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0)
+        return q;
+    }
+}
+
+__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) {
+    int32_t magic = denom->magic;
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+     // must be arithmetic shift
+    __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+    __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic));
+    q = _mm_add_epi32(q, numers); // q += numers
+
+    // If q is non-negative, we have nothing to do
+    // If q is negative, we want to add either (2**shift)-1 if d is
+    // a power of 2, or (2**shift) if it is not a power of 2
+    uint32_t is_power_of_2 = (magic == 0);
+    __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31
+    __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2);
+    q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
+    q = _mm_srai_epi32(q, shift); // q >>= shift
+    q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
+    return q;
+}
+
+////////// SINT64
+
+__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) {
+    uint8_t more = denom->more;
+    int64_t magic = denom->magic;
+    if (magic == 0) { // shift path
+        uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+        uint64_t mask = (1ULL << shift) - 1;
+        __m128i roundToZeroTweak = _mm_set1_epi64x(mask);
+        // q = numer + ((numer >> 63) & roundToZeroTweak);
+        __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak));
+        q = libdivide_s64_shift_right_vector(q, shift);
+        __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+         // q = (q ^ sign) - sign;
+        q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign);
+        return q;
+    }
+    else {
+        __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
+        if (more & LIBDIVIDE_ADD_MARKER) {
+            // must be arithmetic shift
+            __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+            // q += ((numer ^ sign) - sign);
+            q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign));
+        }
+        // q >>= denom->mult_path.shift
+        q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+        q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
+        return q;
+    }
+}
+
+__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) {
+    int64_t magic = denom->magic;
+    uint8_t more = denom->more;
+    uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+    // must be arithmetic shift
+    __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+
+     // libdivide_mullhi_s64(numers, magic);
+    __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
+    q = _mm_add_epi64(q, numers); // q += numers
+
+    // If q is non-negative, we have nothing to do.
+    // If q is negative, we want to add either (2**shift)-1 if d is
+    // a power of 2, or (2**shift) if it is not a power of 2.
+    uint32_t is_power_of_2 = (magic == 0);
+    __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+    __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2);
+    q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
+    q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+    q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
+    return q;
+}
+
+#endif
+
+/////////// C++ stuff
+
+#ifdef __cplusplus
+
+// The C++ divider class is templated on both an integer type
+// (like uint64_t) and an algorithm type.
+// * BRANCHFULL is the default algorithm type.
+// * BRANCHFREE is the branchfree algorithm type.
+enum {
+    BRANCHFULL,
+    BRANCHFREE
+};
+
+#if defined(LIBDIVIDE_AVX512)
+    #define LIBDIVIDE_VECTOR_TYPE __m512i
+#elif defined(LIBDIVIDE_AVX2)
+    #define LIBDIVIDE_VECTOR_TYPE __m256i
+#elif defined(LIBDIVIDE_SSE2)
+    #define LIBDIVIDE_VECTOR_TYPE __m128i
+#endif
+
+#if !defined(LIBDIVIDE_VECTOR_TYPE)
+    #define LIBDIVIDE_DIVIDE_VECTOR(ALGO)
+#else
+    #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
+        LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \
+            return libdivide_##ALGO##_do_vector(n, &denom); \
+        }
+#endif
+
+// The DISPATCHER_GEN() macro generates C++ methods (for the given integer
+// and algorithm types) that redirect to libdivide's C API.
+#define DISPATCHER_GEN(T, ALGO) \
+    libdivide_##ALGO##_t denom; \
+    dispatcher() { } \
+    dispatcher(T d) \
+        : denom(libdivide_##ALGO##_gen(d)) \
+    { } \
+    T divide(T n) const { \
+        return libdivide_##ALGO##_do(n, &denom); \
+    } \
+    LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
+    T recover() const { \
+        return libdivide_##ALGO##_recover(&denom); \
+    }
+
+// The dispatcher selects a specific division algorithm for a given
+// type and ALGO using partial template specialization.
+template struct dispatcher { };
+
+template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) };
+template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) };
+template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) };
+template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) };
+template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) };
+template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) };
+template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) };
+template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) };
+
+// This is the main divider class for use by the user (C++ API).
+// The actual division algorithm is selected using the dispatcher struct
+// based on the integer and algorithm template parameters.
+template
+class divider {
+public:
+    // We leave the default constructor empty so that creating
+    // an array of dividers and then initializing them
+    // later doesn't slow us down.
+    divider() { }
+
+    // Constructor that takes the divisor as a parameter
+    divider(T d) : div(d) { }
+
+    // Divides n by the divisor
+    T divide(T n) const {
+        return div.divide(n);
+    }
+
+    // Recovers the divisor, returns the value that was
+    // used to initialize this divider object.
+    T recover() const {
+        return div.recover();
+    }
+
+    bool operator==(const divider& other) const {
+        return div.denom.magic == other.denom.magic &&
+               div.denom.more == other.denom.more;
+    }
+
+    bool operator!=(const divider& other) const {
+        return !(*this == other);
+    }
+
+#if defined(LIBDIVIDE_VECTOR_TYPE)
+    // Treats the vector as packed integer values with the same type as
+    // the divider (e.g. s32, u32, s64, u64) and divides each of
+    // them by the divider, returning the packed quotients.
+    LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const {
+        return div.divide(n);
+    }
+#endif
+
+private:
+    // Storage for the actual divisor
+    dispatcher::value,
+               std::is_signed::value, sizeof(T), ALGO> div;
+};
+
+// Overload of operator / for scalar division
+template
+T operator/(T n, const divider& div) {
+    return div.divide(n);
+}
+
+// Overload of operator /= for scalar division
+template
+T& operator/=(T& n, const divider& div) {
+    n = div.divide(n);
+    return n;
+}
+
+#if defined(LIBDIVIDE_VECTOR_TYPE)
+    // Overload of operator / for vector division
+    template
+    LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) {
+        return div.divide(n);
+    }
+    // Overload of operator /= for vector division
+    template
+    LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) {
+        n = div.divide(n);
+        return n;
+    }
+#endif
+
+// libdivdie::branchfree_divider
+template 
+using branchfree_divider = divider;
+
+}  // namespace libdivide
+
+#endif  // __cplusplus
+
+#endif  // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h
new file mode 100644
index 00000000..9e00f2e5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h
@@ -0,0 +1,359 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
+
+#include 
+#include 
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The legacy generic inner loop for a standard element-wise or
+ * generalized ufunc.
+ */
+typedef void (*PyUFuncGenericFunction)
+            (char **args,
+             npy_intp const *dimensions,
+             npy_intp const *strides,
+             void *innerloopdata);
+
+/*
+ * The most generic one-dimensional inner loop for
+ * a masked standard element-wise ufunc. "Masked" here means that it skips
+ * doing calculations on any items for which the maskptr array has a true
+ * value.
+ */
+typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
+                char **dataptrs, npy_intp *strides,
+                char *maskptr, npy_intp mask_stride,
+                npy_intp count,
+                NpyAuxData *innerloopdata);
+
+/* Forward declaration for the type resolver and loop selector typedefs */
+struct _tagPyUFuncObject;
+
+/*
+ * Given the operands for calling a ufunc, should determine the
+ * calculation input and output data types and return an inner loop function.
+ * This function should validate that the casting rule is being followed,
+ * and fail if it is not.
+ *
+ * For backwards compatibility, the regular type resolution function does not
+ * support auxiliary data with object semantics. The type resolution call
+ * which returns a masked generic function returns a standard NpyAuxData
+ * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
+ * work.
+ *
+ * ufunc:             The ufunc object.
+ * casting:           The 'casting' parameter provided to the ufunc.
+ * operands:          An array of length (ufunc->nin + ufunc->nout),
+ *                    with the output parameters possibly NULL.
+ * type_tup:          Either NULL, or the type_tup passed to the ufunc.
+ * out_dtypes:        An array which should be populated with new
+ *                    references to (ufunc->nin + ufunc->nout) new
+ *                    dtypes, one for each input and output. These
+ *                    dtypes should all be in native-endian format.
+ *
+ * Should return 0 on success, -1 on failure (with exception set),
+ * or -2 if Py_NotImplemented should be returned.
+ */
+typedef int (PyUFunc_TypeResolutionFunc)(
+                                struct _tagPyUFuncObject *ufunc,
+                                NPY_CASTING casting,
+                                PyArrayObject **operands,
+                                PyObject *type_tup,
+                                PyArray_Descr **out_dtypes);
+
+/*
+ * Legacy loop selector. (This should NOT normally be used and we can expect
+ * that only the `PyUFunc_DefaultLegacyInnerLoopSelector` is ever set).
+ * However, unlike the masked version, it probably still works.
+ *
+ * ufunc:             The ufunc object.
+ * dtypes:            An array which has been populated with dtypes,
+ *                    in most cases by the type resolution function
+ *                    for the same ufunc.
+ * out_innerloop:     Should be populated with the correct ufunc inner
+ *                    loop for the given type.
+ * out_innerloopdata: Should be populated with the void* data to
+ *                    be passed into the out_innerloop function.
+ * out_needs_api:     If the inner loop needs to use the Python API,
+ *                    should set the to 1, otherwise should leave
+ *                    this untouched.
+ */
+typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)(
+                            struct _tagPyUFuncObject *ufunc,
+                            PyArray_Descr **dtypes,
+                            PyUFuncGenericFunction *out_innerloop,
+                            void **out_innerloopdata,
+                            int *out_needs_api);
+
+
+typedef struct _tagPyUFuncObject {
+        PyObject_HEAD
+        /*
+         * nin: Number of inputs
+         * nout: Number of outputs
+         * nargs: Always nin + nout (Why is it stored?)
+         */
+        int nin, nout, nargs;
+
+        /*
+         * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero
+         * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,
+         * PyUFunc_IdentityValue.
+         */
+        int identity;
+
+        /* Array of one-dimensional core loops */
+        PyUFuncGenericFunction *functions;
+        /* Array of funcdata that gets passed into the functions */
+        void **data;
+        /* The number of elements in 'functions' and 'data' */
+        int ntypes;
+
+        /* Used to be unused field 'check_return' */
+        int reserved1;
+
+        /* The name of the ufunc */
+        const char *name;
+
+        /* Array of type numbers, of size ('nargs' * 'ntypes') */
+        char *types;
+
+        /* Documentation string */
+        const char *doc;
+
+        void *ptr;
+        PyObject *obj;
+        PyObject *userloops;
+
+        /* generalized ufunc parameters */
+
+        /* 0 for scalar ufunc; 1 for generalized ufunc */
+        int core_enabled;
+        /* number of distinct dimension names in signature */
+        int core_num_dim_ix;
+
+        /*
+         * dimension indices of input/output argument k are stored in
+         * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
+         */
+
+        /* numbers of core dimensions of each argument */
+        int *core_num_dims;
+        /*
+         * dimension indices in a flatted form; indices
+         * are in the range of [0,core_num_dim_ix)
+         */
+        int *core_dim_ixs;
+        /*
+         * positions of 1st core dimensions of each
+         * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
+         */
+        int *core_offsets;
+        /* signature string for printing purpose */
+        char *core_signature;
+
+        /*
+         * A function which resolves the types and fills an array
+         * with the dtypes for the inputs and outputs.
+         */
+        PyUFunc_TypeResolutionFunc *type_resolver;
+        /*
+         * A function which returns an inner loop written for
+         * NumPy 1.6 and earlier ufuncs. This is for backwards
+         * compatibility, and may be NULL if inner_loop_selector
+         * is specified.
+         */
+        PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
+        /*
+         * This was blocked off to be the "new" inner loop selector in 1.7,
+         * but this was never implemented. (This is also why the above
+         * selector is called the "legacy" selector.)
+         */
+        #ifndef Py_LIMITED_API
+            vectorcallfunc vectorcall;
+        #else
+            void *vectorcall;
+        #endif
+
+        /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */
+        void *_always_null_previously_masked_innerloop_selector;
+
+        /*
+         * List of flags for each operand when ufunc is called by nditer object.
+         * These flags will be used in addition to the default flags for each
+         * operand set by nditer object.
+         */
+        npy_uint32 *op_flags;
+
+        /*
+         * List of global flags used when ufunc is called by nditer object.
+         * These flags will be used in addition to the default global flags
+         * set by nditer object.
+         */
+        npy_uint32 iter_flags;
+
+        /* New in NPY_API_VERSION 0x0000000D and above */
+    #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION
+        /*
+         * for each core_num_dim_ix distinct dimension names,
+         * the possible "frozen" size (-1 if not frozen).
+         */
+        npy_intp *core_dim_sizes;
+
+        /*
+         * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
+         */
+        npy_uint32 *core_dim_flags;
+
+        /* Identity for reduction, when identity == PyUFunc_IdentityValue */
+        PyObject *identity_value;
+    #endif  /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */
+
+        /* New in NPY_API_VERSION 0x0000000F and above */
+    #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION
+        /* New private fields related to dispatching */
+        void *_dispatch_cache;
+        /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */
+        PyObject *_loops;
+    #endif
+} PyUFuncObject;
+
+#include "arrayobject.h"
+/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
+/* the core dimension's size will be determined by the operands. */
+#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
+/* the core dimension may be absent */
+#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
+/* flags inferred during execution */
+#define UFUNC_CORE_DIM_MISSING 0x00040000
+
+#define UFUNC_ERR_IGNORE 0
+#define UFUNC_ERR_WARN   1
+#define UFUNC_ERR_RAISE  2
+#define UFUNC_ERR_CALL   3
+#define UFUNC_ERR_PRINT  4
+#define UFUNC_ERR_LOG    5
+
+        /* Python side integer mask */
+
+#define UFUNC_MASK_DIVIDEBYZERO 0x07
+#define UFUNC_MASK_OVERFLOW 0x3f
+#define UFUNC_MASK_UNDERFLOW 0x1ff
+#define UFUNC_MASK_INVALID 0xfff
+
+#define UFUNC_SHIFT_DIVIDEBYZERO 0
+#define UFUNC_SHIFT_OVERFLOW     3
+#define UFUNC_SHIFT_UNDERFLOW    6
+#define UFUNC_SHIFT_INVALID      9
+
+
+#define UFUNC_OBJ_ISOBJECT      1
+#define UFUNC_OBJ_NEEDS_API     2
+
+   /* Default user error mode */
+#define UFUNC_ERR_DEFAULT                               \
+        (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) +  \
+        (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) +      \
+        (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID)
+
+#if NPY_ALLOW_THREADS
+#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
+#define NPY_LOOP_END_THREADS   do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
+#else
+#define NPY_LOOP_BEGIN_THREADS
+#define NPY_LOOP_END_THREADS
+#endif
+
+/*
+ * UFunc has unit of 0, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_Zero 0
+/*
+ * UFunc has unit of 1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_One 1
+/*
+ * UFunc has unit of -1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once. Intended for
+ * bitwise_and reduction.
+ */
+#define PyUFunc_MinusOne 2
+/*
+ * UFunc has no unit, and the order of operations cannot be reordered.
+ * This case does not allow reduction with multiple axes at once.
+ */
+#define PyUFunc_None -1
+/*
+ * UFunc has no unit, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_ReorderableNone -2
+/*
+ * UFunc unit is an identity_value, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_IdentityValue -3
+
+
+#define UFUNC_REDUCE 0
+#define UFUNC_ACCUMULATE 1
+#define UFUNC_REDUCEAT 2
+#define UFUNC_OUTER 3
+
+
+typedef struct {
+        int nin;
+        int nout;
+        PyObject *callable;
+} PyUFunc_PyFuncData;
+
+/* A linked-list of function information for
+   user-defined 1-d loops.
+ */
+typedef struct _loop1d_info {
+        PyUFuncGenericFunction func;
+        void *data;
+        int *arg_types;
+        struct _loop1d_info *next;
+        int nargs;
+        PyArray_Descr **arg_dtypes;
+} PyUFunc_Loop1d;
+
+
+#include "__ufunc_api.h"
+
+#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
+
+/*
+ * THESE MACROS ARE DEPRECATED.
+ * Use npy_set_floatstatus_* in the npymath library.
+ */
+#define UFUNC_FPE_DIVIDEBYZERO  NPY_FPE_DIVIDEBYZERO
+#define UFUNC_FPE_OVERFLOW      NPY_FPE_OVERFLOW
+#define UFUNC_FPE_UNDERFLOW     NPY_FPE_UNDERFLOW
+#define UFUNC_FPE_INVALID       NPY_FPE_INVALID
+
+#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
+#define generate_overflow_error() npy_set_floatstatus_overflow()
+
+  /* Make sure it gets defined if it isn't already */
+#ifndef UFUNC_NOFPE
+/* Clear the floating point exception default of Borland C++ */
+#if defined(__BORLANDC__)
+#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
+#else
+#define UFUNC_NOFPE
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h
new file mode 100644
index 00000000..97f06092
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h
@@ -0,0 +1,37 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
+
+#ifndef __COMP_NPY_UNUSED
+    #if defined(__GNUC__)
+        #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+    #elif defined(__ICC)
+        #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+    #elif defined(__clang__)
+        #define __COMP_NPY_UNUSED __attribute__ ((unused))
+    #else
+        #define __COMP_NPY_UNUSED
+    #endif
+#endif
+
+#if defined(__GNUC__) || defined(__ICC) || defined(__clang__)
+    #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
+#elif defined(_MSC_VER)
+    #define NPY_DECL_ALIGNED(x) __declspec(align(x))
+#else
+    #define NPY_DECL_ALIGNED(x)
+#endif
+
+/* Use this to tag a variable as not used. It will remove unused variable
+ * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
+ * to avoid accidental use */
+#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED
+#define NPY_EXPAND(x) x
+
+#define NPY_STRINGIFY(x) #x
+#define NPY_TOSTRING(x) NPY_STRINGIFY(x)
+
+#define NPY_CAT__(a, b) a ## b
+#define NPY_CAT_(a, b) NPY_CAT__(a, b)
+#define NPY_CAT(a, b) NPY_CAT_(a, b)
+
+#endif  /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/libnpymath.a b/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/libnpymath.a
new file mode 100644
index 00000000..c5d8bb63
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/libnpymath.a differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini
new file mode 100644
index 00000000..5840f5e1
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini
@@ -0,0 +1,12 @@
+[meta]
+Name = mlib
+Description = Math library used with this version of numpy
+Version = 1.0
+
+[default]
+Libs=-lm
+Cflags=
+
+[msvc]
+Libs=m.lib
+Cflags=
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini
new file mode 100644
index 00000000..3e465ad2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini
@@ -0,0 +1,20 @@
+[meta]
+Name=npymath
+Description=Portable, core math library implementing C99 standard
+Version=0.1
+
+[variables]
+pkgname=numpy.core
+prefix=${pkgdir}
+libdir=${prefix}/lib
+includedir=${prefix}/include
+
+[default]
+Libs=-L${libdir} -lnpymath
+Cflags=-I${includedir}
+Requires=mlib
+
+[msvc]
+Libs=/LIBPATH:${libdir} npymath.lib
+Cflags=/INCLUDE:${includedir}
+Requires=mlib
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/memmap.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/memmap.py
new file mode 100644
index 00000000..79c69545
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/memmap.py
@@ -0,0 +1,338 @@
+from contextlib import nullcontext
+
+import numpy as np
+from .._utils import set_module
+from .numeric import uint8, ndarray, dtype
+from numpy.compat import os_fspath, is_pathlib_path
+
+__all__ = ['memmap']
+
+dtypedescr = dtype
+valid_filemodes = ["r", "c", "r+", "w+"]
+writeable_filemodes = ["r+", "w+"]
+
+mode_equivalents = {
+    "readonly":"r",
+    "copyonwrite":"c",
+    "readwrite":"r+",
+    "write":"w+"
+    }
+
+
+@set_module('numpy')
+class memmap(ndarray):
+    """Create a memory-map to an array stored in a *binary* file on disk.
+
+    Memory-mapped files are used for accessing small segments of large files
+    on disk, without reading the entire file into memory.  NumPy's
+    memmap's are array-like objects.  This differs from Python's ``mmap``
+    module, which uses file-like objects.
+
+    This subclass of ndarray has some unpleasant interactions with
+    some operations, because it doesn't quite fit properly as a subclass.
+    An alternative to using this subclass is to create the ``mmap``
+    object yourself, then create an ndarray with ndarray.__new__ directly,
+    passing the object created in its 'buffer=' parameter.
+
+    This class may at some point be turned into a factory function
+    which returns a view into an mmap buffer.
+
+    Flush the memmap instance to write the changes to the file. Currently there
+    is no API to close the underlying ``mmap``. It is tricky to ensure the
+    resource is actually closed, since it may be shared between different
+    memmap instances.
+
+
+    Parameters
+    ----------
+    filename : str, file-like object, or pathlib.Path instance
+        The file name or file object to be used as the array data buffer.
+    dtype : data-type, optional
+        The data-type used to interpret the file contents.
+        Default is `uint8`.
+    mode : {'r+', 'r', 'w+', 'c'}, optional
+        The file is opened in this mode:
+
+        +------+-------------------------------------------------------------+
+        | 'r'  | Open existing file for reading only.                        |
+        +------+-------------------------------------------------------------+
+        | 'r+' | Open existing file for reading and writing.                 |
+        +------+-------------------------------------------------------------+
+        | 'w+' | Create or overwrite existing file for reading and writing.  |
+        |      | If ``mode == 'w+'`` then `shape` must also be specified.    |
+        +------+-------------------------------------------------------------+
+        | 'c'  | Copy-on-write: assignments affect data in memory, but       |
+        |      | changes are not saved to disk.  The file on disk is         |
+        |      | read-only.                                                  |
+        +------+-------------------------------------------------------------+
+
+        Default is 'r+'.
+    offset : int, optional
+        In the file, array data starts at this offset. Since `offset` is
+        measured in bytes, it should normally be a multiple of the byte-size
+        of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
+        file are valid; The file will be extended to accommodate the
+        additional data. By default, ``memmap`` will start at the beginning of
+        the file, even if ``filename`` is a file pointer ``fp`` and
+        ``fp.tell() != 0``.
+    shape : tuple, optional
+        The desired shape of the array. If ``mode == 'r'`` and the number
+        of remaining bytes after `offset` is not a multiple of the byte-size
+        of `dtype`, you must specify `shape`. By default, the returned array
+        will be 1-D with the number of elements determined by file size
+        and data-type.
+    order : {'C', 'F'}, optional
+        Specify the order of the ndarray memory layout:
+        :term:`row-major`, C-style or :term:`column-major`,
+        Fortran-style.  This only has an effect if the shape is
+        greater than 1-D.  The default order is 'C'.
+
+    Attributes
+    ----------
+    filename : str or pathlib.Path instance
+        Path to the mapped file.
+    offset : int
+        Offset position in the file.
+    mode : str
+        File mode.
+
+    Methods
+    -------
+    flush
+        Flush any changes in memory to file on disk.
+        When you delete a memmap object, flush is called first to write
+        changes to disk.
+
+
+    See also
+    --------
+    lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
+
+    Notes
+    -----
+    The memmap object can be used anywhere an ndarray is accepted.
+    Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
+    ``True``.
+
+    Memory-mapped files cannot be larger than 2GB on 32-bit systems.
+
+    When a memmap causes a file to be created or extended beyond its
+    current size in the filesystem, the contents of the new part are
+    unspecified. On systems with POSIX filesystem semantics, the extended
+    part will be filled with zero bytes.
+
+    Examples
+    --------
+    >>> data = np.arange(12, dtype='float32')
+    >>> data.resize((3,4))
+
+    This example uses a temporary file so that doctest doesn't write
+    files to your directory. You would use a 'normal' filename.
+
+    >>> from tempfile import mkdtemp
+    >>> import os.path as path
+    >>> filename = path.join(mkdtemp(), 'newfile.dat')
+
+    Create a memmap with dtype and shape that matches our data:
+
+    >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
+    >>> fp
+    memmap([[0., 0., 0., 0.],
+            [0., 0., 0., 0.],
+            [0., 0., 0., 0.]], dtype=float32)
+
+    Write data to memmap array:
+
+    >>> fp[:] = data[:]
+    >>> fp
+    memmap([[  0.,   1.,   2.,   3.],
+            [  4.,   5.,   6.,   7.],
+            [  8.,   9.,  10.,  11.]], dtype=float32)
+
+    >>> fp.filename == path.abspath(filename)
+    True
+
+    Flushes memory changes to disk in order to read them back
+
+    >>> fp.flush()
+
+    Load the memmap and verify data was stored:
+
+    >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+    >>> newfp
+    memmap([[  0.,   1.,   2.,   3.],
+            [  4.,   5.,   6.,   7.],
+            [  8.,   9.,  10.,  11.]], dtype=float32)
+
+    Read-only memmap:
+
+    >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+    >>> fpr.flags.writeable
+    False
+
+    Copy-on-write memmap:
+
+    >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
+    >>> fpc.flags.writeable
+    True
+
+    It's possible to assign to copy-on-write array, but values are only
+    written into the memory copy of the array, and not written to disk:
+
+    >>> fpc
+    memmap([[  0.,   1.,   2.,   3.],
+            [  4.,   5.,   6.,   7.],
+            [  8.,   9.,  10.,  11.]], dtype=float32)
+    >>> fpc[0,:] = 0
+    >>> fpc
+    memmap([[  0.,   0.,   0.,   0.],
+            [  4.,   5.,   6.,   7.],
+            [  8.,   9.,  10.,  11.]], dtype=float32)
+
+    File on disk is unchanged:
+
+    >>> fpr
+    memmap([[  0.,   1.,   2.,   3.],
+            [  4.,   5.,   6.,   7.],
+            [  8.,   9.,  10.,  11.]], dtype=float32)
+
+    Offset into a memmap:
+
+    >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
+    >>> fpo
+    memmap([  4.,   5.,   6.,   7.,   8.,   9.,  10.,  11.], dtype=float32)
+
+    """
+
+    __array_priority__ = -100.0
+
+    def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
+                shape=None, order='C'):
+        # Import here to minimize 'import numpy' overhead
+        import mmap
+        import os.path
+        try:
+            mode = mode_equivalents[mode]
+        except KeyError as e:
+            if mode not in valid_filemodes:
+                raise ValueError(
+                    "mode must be one of {!r} (got {!r})"
+                    .format(valid_filemodes + list(mode_equivalents.keys()), mode)
+                ) from None
+
+        if mode == 'w+' and shape is None:
+            raise ValueError("shape must be given if mode == 'w+'")
+
+        if hasattr(filename, 'read'):
+            f_ctx = nullcontext(filename)
+        else:
+            f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
+
+        with f_ctx as fid:
+            fid.seek(0, 2)
+            flen = fid.tell()
+            descr = dtypedescr(dtype)
+            _dbytes = descr.itemsize
+
+            if shape is None:
+                bytes = flen - offset
+                if bytes % _dbytes:
+                    raise ValueError("Size of available data is not a "
+                            "multiple of the data-type size.")
+                size = bytes // _dbytes
+                shape = (size,)
+            else:
+                if not isinstance(shape, tuple):
+                    shape = (shape,)
+                size = np.intp(1)  # avoid default choice of np.int_, which might overflow
+                for k in shape:
+                    size *= k
+
+            bytes = int(offset + size*_dbytes)
+
+            if mode in ('w+', 'r+') and flen < bytes:
+                fid.seek(bytes - 1, 0)
+                fid.write(b'\0')
+                fid.flush()
+
+            if mode == 'c':
+                acc = mmap.ACCESS_COPY
+            elif mode == 'r':
+                acc = mmap.ACCESS_READ
+            else:
+                acc = mmap.ACCESS_WRITE
+
+            start = offset - offset % mmap.ALLOCATIONGRANULARITY
+            bytes -= start
+            array_offset = offset - start
+            mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
+
+            self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
+                                   offset=array_offset, order=order)
+            self._mmap = mm
+            self.offset = offset
+            self.mode = mode
+
+            if is_pathlib_path(filename):
+                # special case - if we were constructed with a pathlib.path,
+                # then filename is a path object, not a string
+                self.filename = filename.resolve()
+            elif hasattr(fid, "name") and isinstance(fid.name, str):
+                # py3 returns int for TemporaryFile().name
+                self.filename = os.path.abspath(fid.name)
+            # same as memmap copies (e.g. memmap + 1)
+            else:
+                self.filename = None
+
+        return self
+
+    def __array_finalize__(self, obj):
+        if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
+            self._mmap = obj._mmap
+            self.filename = obj.filename
+            self.offset = obj.offset
+            self.mode = obj.mode
+        else:
+            self._mmap = None
+            self.filename = None
+            self.offset = None
+            self.mode = None
+
+    def flush(self):
+        """
+        Write any changes in the array to the file on disk.
+
+        For further information, see `memmap`.
+
+        Parameters
+        ----------
+        None
+
+        See Also
+        --------
+        memmap
+
+        """
+        if self.base is not None and hasattr(self.base, 'flush'):
+            self.base.flush()
+
+    def __array_wrap__(self, arr, context=None):
+        arr = super().__array_wrap__(arr, context)
+
+        # Return a memmap if a memmap was given as the output of the
+        # ufunc. Leave the arr class unchanged if self is not a memmap
+        # to keep original memmap subclasses behavior
+        if self is arr or type(self) is not memmap:
+            return arr
+        # Return scalar instead of 0d memmap, e.g. for np.sum with
+        # axis=None
+        if arr.shape == ():
+            return arr[()]
+        # Return ndarray otherwise
+        return arr.view(np.ndarray)
+
+    def __getitem__(self, index):
+        res = super().__getitem__(index)
+        if type(res) is memmap and res._mmap is None:
+            return res.view(type=ndarray)
+        return res
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/memmap.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/memmap.pyi
new file mode 100644
index 00000000..03c6b772
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/memmap.pyi
@@ -0,0 +1,3 @@
+from numpy import memmap as memmap
+
+__all__: list[str]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/multiarray.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/multiarray.py
new file mode 100644
index 00000000..d1128334
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/multiarray.py
@@ -0,0 +1,1715 @@
+"""
+Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
+the multiarray and umath c-extension modules were merged into a single
+_multiarray_umath extension module. So we replicate the old namespace
+by importing from the extension module.
+
+"""
+
+import functools
+from . import overrides
+from . import _multiarray_umath
+from ._multiarray_umath import *  # noqa: F403
+# These imports are needed for backward compatibility,
+# do not change them. issue gh-15518
+# _get_ndarray_c_version is semi-public, on purpose not added to __all__
+from ._multiarray_umath import (
+    fastCopyAndTranspose, _flagdict, from_dlpack, _place, _reconstruct,
+    _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
+    _get_madvise_hugepage, _set_madvise_hugepage,
+    _get_promotion_state, _set_promotion_state, _using_numpy2_behavior
+    )
+
+__all__ = [
+    '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
+    'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
+    'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
+    'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP',
+    '_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string',
+    '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',
+    'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',
+    'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
+    'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
+    'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
+    'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
+    'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
+    'frombuffer', 'fromfile', 'fromiter', 'fromstring',
+    'get_handler_name', 'get_handler_version', 'inner', 'interp',
+    'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory',
+    'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',
+    'normalize_axis_index', 'packbits', 'promote_types', 'putmask',
+    'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
+    'set_legacy_print_mode', 'set_numeric_ops', 'set_string_function',
+    'set_typeDict', 'shares_memory', 'tracemalloc_domain', 'typeinfo',
+    'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros',
+    '_get_promotion_state', '_set_promotion_state', '_using_numpy2_behavior']
+
+# For backward compatibility, make sure pickle imports these functions from here
+_reconstruct.__module__ = 'numpy.core.multiarray'
+scalar.__module__ = 'numpy.core.multiarray'
+
+
+from_dlpack.__module__ = 'numpy'
+arange.__module__ = 'numpy'
+array.__module__ = 'numpy'
+asarray.__module__ = 'numpy'
+asanyarray.__module__ = 'numpy'
+ascontiguousarray.__module__ = 'numpy'
+asfortranarray.__module__ = 'numpy'
+datetime_data.__module__ = 'numpy'
+empty.__module__ = 'numpy'
+frombuffer.__module__ = 'numpy'
+fromfile.__module__ = 'numpy'
+fromiter.__module__ = 'numpy'
+frompyfunc.__module__ = 'numpy'
+fromstring.__module__ = 'numpy'
+geterrobj.__module__ = 'numpy'
+may_share_memory.__module__ = 'numpy'
+nested_iters.__module__ = 'numpy'
+promote_types.__module__ = 'numpy'
+set_numeric_ops.__module__ = 'numpy'
+seterrobj.__module__ = 'numpy'
+zeros.__module__ = 'numpy'
+_get_promotion_state.__module__ = 'numpy'
+_set_promotion_state.__module__ = 'numpy'
+_using_numpy2_behavior.__module__ = 'numpy'
+
+
+# We can't verify dispatcher signatures because NumPy's C functions don't
+# support introspection.
+array_function_from_c_func_and_dispatcher = functools.partial(
+    overrides.array_function_from_dispatcher,
+    module='numpy', docs_from_dispatcher=True, verify=False)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
+def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
+    """
+    empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
+
+    Return a new array with the same shape and type as a given array.
+
+    Parameters
+    ----------
+    prototype : array_like
+        The shape and data-type of `prototype` define these same attributes
+        of the returned array.
+    dtype : data-type, optional
+        Overrides the data type of the result.
+
+        .. versionadded:: 1.6.0
+    order : {'C', 'F', 'A', or 'K'}, optional
+        Overrides the memory layout of the result. 'C' means C-order,
+        'F' means F-order, 'A' means 'F' if `prototype` is Fortran
+        contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
+        as closely as possible.
+
+        .. versionadded:: 1.6.0
+    subok : bool, optional.
+        If True, then the newly created array will use the sub-class
+        type of `prototype`, otherwise it will be a base-class array. Defaults
+        to True.
+    shape : int or sequence of ints, optional.
+        Overrides the shape of the result. If order='K' and the number of
+        dimensions is unchanged, will try to keep order, otherwise,
+        order='C' is implied.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of uninitialized (arbitrary) data with the same
+        shape and type as `prototype`.
+
+    See Also
+    --------
+    ones_like : Return an array of ones with shape and type of input.
+    zeros_like : Return an array of zeros with shape and type of input.
+    full_like : Return a new array with shape of input filled with value.
+    empty : Return a new uninitialized array.
+
+    Notes
+    -----
+    This function does *not* initialize the returned array; to do that use
+    `zeros_like` or `ones_like` instead.  It may be marginally faster than
+    the functions that do set the array values.
+
+    Examples
+    --------
+    >>> a = ([1,2,3], [4,5,6])                         # a is array-like
+    >>> np.empty_like(a)
+    array([[-1073741821, -1073741821,           3],    # uninitialized
+           [          0,           0, -1073741821]])
+    >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
+    >>> np.empty_like(a)
+    array([[ -2.00000715e+000,   1.48219694e-323,  -2.00000572e+000], # uninitialized
+           [  4.38791518e-305,  -2.00000715e+000,   4.17269252e-309]])
+
+    """
+    return (prototype,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
+def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
+    """
+    concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
+
+    Join a sequence of arrays along an existing axis.
+
+    Parameters
+    ----------
+    a1, a2, ... : sequence of array_like
+        The arrays must have the same shape, except in the dimension
+        corresponding to `axis` (the first, by default).
+    axis : int, optional
+        The axis along which the arrays will be joined.  If axis is None,
+        arrays are flattened before use.  Default is 0.
+    out : ndarray, optional
+        If provided, the destination to place the result. The shape must be
+        correct, matching that of what concatenate would have returned if no
+        out argument were specified.
+    dtype : str or dtype
+        If provided, the destination array will have this dtype. Cannot be
+        provided together with `out`.
+
+        .. versionadded:: 1.20.0
+
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    res : ndarray
+        The concatenated array.
+
+    See Also
+    --------
+    ma.concatenate : Concatenate function that preserves input masks.
+    array_split : Split an array into multiple sub-arrays of equal or
+                  near-equal size.
+    split : Split array into a list of multiple sub-arrays of equal size.
+    hsplit : Split array into multiple sub-arrays horizontally (column wise).
+    vsplit : Split array into multiple sub-arrays vertically (row wise).
+    dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+    stack : Stack a sequence of arrays along a new axis.
+    block : Assemble arrays from blocks.
+    hstack : Stack arrays in sequence horizontally (column wise).
+    vstack : Stack arrays in sequence vertically (row wise).
+    dstack : Stack arrays in sequence depth wise (along third dimension).
+    column_stack : Stack 1-D arrays as columns into a 2-D array.
+
+    Notes
+    -----
+    When one or more of the arrays to be concatenated is a MaskedArray,
+    this function will return a MaskedArray object instead of an ndarray,
+    but the input masks are *not* preserved. In cases where a MaskedArray
+    is expected as input, use the ma.concatenate function from the masked
+    array module instead.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> b = np.array([[5, 6]])
+    >>> np.concatenate((a, b), axis=0)
+    array([[1, 2],
+           [3, 4],
+           [5, 6]])
+    >>> np.concatenate((a, b.T), axis=1)
+    array([[1, 2, 5],
+           [3, 4, 6]])
+    >>> np.concatenate((a, b), axis=None)
+    array([1, 2, 3, 4, 5, 6])
+
+    This function will not preserve masking of MaskedArray inputs.
+
+    >>> a = np.ma.arange(3)
+    >>> a[1] = np.ma.masked
+    >>> b = np.arange(2, 5)
+    >>> a
+    masked_array(data=[0, --, 2],
+                 mask=[False,  True, False],
+           fill_value=999999)
+    >>> b
+    array([2, 3, 4])
+    >>> np.concatenate([a, b])
+    masked_array(data=[0, 1, 2, 2, 3, 4],
+                 mask=False,
+           fill_value=999999)
+    >>> np.ma.concatenate([a, b])
+    masked_array(data=[0, --, 2, 2, 3, 4],
+                 mask=[False,  True, False, False, False, False],
+           fill_value=999999)
+
+    """
+    if out is not None:
+        # optimize for the typical case where only arrays is provided
+        arrays = list(arrays)
+        arrays.append(out)
+    return arrays
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
+def inner(a, b):
+    """
+    inner(a, b, /)
+
+    Inner product of two arrays.
+
+    Ordinary inner product of vectors for 1-D arrays (without complex
+    conjugation), in higher dimensions a sum product over the last axes.
+
+    Parameters
+    ----------
+    a, b : array_like
+        If `a` and `b` are nonscalar, their last dimensions must match.
+
+    Returns
+    -------
+    out : ndarray
+        If `a` and `b` are both
+        scalars or both 1-D arrays then a scalar is returned; otherwise
+        an array is returned.
+        ``out.shape = (*a.shape[:-1], *b.shape[:-1])``
+
+    Raises
+    ------
+    ValueError
+        If both `a` and `b` are nonscalar and their last dimensions have
+        different sizes.
+
+    See Also
+    --------
+    tensordot : Sum products over arbitrary axes.
+    dot : Generalised matrix product, using second last dimension of `b`.
+    einsum : Einstein summation convention.
+
+    Notes
+    -----
+    For vectors (1-D arrays) it computes the ordinary inner-product::
+
+        np.inner(a, b) = sum(a[:]*b[:])
+
+    More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::
+
+        np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
+
+    or explicitly::
+
+        np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
+             = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
+
+    In addition `a` or `b` may be scalars, in which case::
+
+       np.inner(a,b) = a*b
+
+    Examples
+    --------
+    Ordinary inner product for vectors:
+
+    >>> a = np.array([1,2,3])
+    >>> b = np.array([0,1,0])
+    >>> np.inner(a, b)
+    2
+
+    Some multidimensional examples:
+
+    >>> a = np.arange(24).reshape((2,3,4))
+    >>> b = np.arange(4)
+    >>> c = np.inner(a, b)
+    >>> c.shape
+    (2, 3)
+    >>> c
+    array([[ 14,  38,  62],
+           [ 86, 110, 134]])
+
+    >>> a = np.arange(2).reshape((1,1,2))
+    >>> b = np.arange(6).reshape((3,2))
+    >>> c = np.inner(a, b)
+    >>> c.shape
+    (1, 1, 3)
+    >>> c
+    array([[[1, 3, 5]]])
+
+    An example where `b` is a scalar:
+
+    >>> np.inner(np.eye(2), 7)
+    array([[7., 0.],
+           [0., 7.]])
+
+    """
+    return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
+def where(condition, x=None, y=None):
+    """
+    where(condition, [x, y], /)
+
+    Return elements chosen from `x` or `y` depending on `condition`.
+
+    .. note::
+        When only `condition` is provided, this function is a shorthand for
+        ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
+        preferred, as it behaves correctly for subclasses. The rest of this
+        documentation covers only the case where all three arguments are
+        provided.
+
+    Parameters
+    ----------
+    condition : array_like, bool
+        Where True, yield `x`, otherwise yield `y`.
+    x, y : array_like
+        Values from which to choose. `x`, `y` and `condition` need to be
+        broadcastable to some shape.
+
+    Returns
+    -------
+    out : ndarray
+        An array with elements from `x` where `condition` is True, and elements
+        from `y` elsewhere.
+
+    See Also
+    --------
+    choose
+    nonzero : The function that is called when x and y are omitted
+
+    Notes
+    -----
+    If all the arrays are 1-D, `where` is equivalent to::
+
+        [xv if c else yv
+         for c, xv, yv in zip(condition, x, y)]
+
+    Examples
+    --------
+    >>> a = np.arange(10)
+    >>> a
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+    >>> np.where(a < 5, a, 10*a)
+    array([ 0,  1,  2,  3,  4, 50, 60, 70, 80, 90])
+
+    This can be used on multidimensional arrays too:
+
+    >>> np.where([[True, False], [True, True]],
+    ...          [[1, 2], [3, 4]],
+    ...          [[9, 8], [7, 6]])
+    array([[1, 8],
+           [3, 4]])
+
+    The shapes of x, y, and the condition are broadcast together:
+
+    >>> x, y = np.ogrid[:3, :4]
+    >>> np.where(x < y, x, 10 + y)  # both x and 10+y are broadcast
+    array([[10,  0,  0,  0],
+           [10, 11,  1,  1],
+           [10, 11, 12,  2]])
+
+    >>> a = np.array([[0, 1, 2],
+    ...               [0, 2, 4],
+    ...               [0, 3, 6]])
+    >>> np.where(a < 4, a, -1)  # -1 is broadcast
+    array([[ 0,  1,  2],
+           [ 0,  2, -1],
+           [ 0,  3, -1]])
+    """
+    return (condition, x, y)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
+def lexsort(keys, axis=None):
+    """
+    lexsort(keys, axis=-1)
+
+    Perform an indirect stable sort using a sequence of keys.
+
+    Given multiple sorting keys, which can be interpreted as columns in a
+    spreadsheet, lexsort returns an array of integer indices that describes
+    the sort order by multiple columns. The last key in the sequence is used
+    for the primary sort order, the second-to-last key for the secondary sort
+    order, and so on. The keys argument must be a sequence of objects that
+    can be converted to arrays of the same shape. If a 2D array is provided
+    for the keys argument, its rows are interpreted as the sorting keys and
+    sorting is according to the last row, second last row etc.
+
+    Parameters
+    ----------
+    keys : (k, N) array or tuple containing k (N,)-shaped sequences
+        The `k` different "columns" to be sorted.  The last column (or row if
+        `keys` is a 2D array) is the primary sort key.
+    axis : int, optional
+        Axis to be indirectly sorted.  By default, sort over the last axis.
+
+    Returns
+    -------
+    indices : (N,) ndarray of ints
+        Array of indices that sort the keys along the specified axis.
+
+    See Also
+    --------
+    argsort : Indirect sort.
+    ndarray.sort : In-place sort.
+    sort : Return a sorted copy of an array.
+
+    Examples
+    --------
+    Sort names: first by surname, then by name.
+
+    >>> surnames =    ('Hertz',    'Galilei', 'Hertz')
+    >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
+    >>> ind = np.lexsort((first_names, surnames))
+    >>> ind
+    array([1, 2, 0])
+
+    >>> [surnames[i] + ", " + first_names[i] for i in ind]
+    ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
+
+    Sort two columns of numbers:
+
+    >>> a = [1,5,1,4,3,4,4] # First column
+    >>> b = [9,4,0,4,0,2,1] # Second column
+    >>> ind = np.lexsort((b,a)) # Sort by a, then by b
+    >>> ind
+    array([2, 0, 4, 6, 5, 3, 1])
+
+    >>> [(a[i],b[i]) for i in ind]
+    [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
+
+    Note that sorting is first according to the elements of ``a``.
+    Secondary sorting is according to the elements of ``b``.
+
+    A normal ``argsort`` would have yielded:
+
+    >>> [(a[i],b[i]) for i in np.argsort(a)]
+    [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
+
+    Structured arrays are sorted lexically by ``argsort``:
+
+    >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
+    ...              dtype=np.dtype([('x', int), ('y', int)]))
+
+    >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
+    array([2, 0, 4, 6, 5, 3, 1])
+
+    """
+    if isinstance(keys, tuple):
+        return keys
+    else:
+        return (keys,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
+def can_cast(from_, to, casting=None):
+    """
+    can_cast(from_, to, casting='safe')
+
+    Returns True if cast between data types can occur according to the
+    casting rule.  If from is a scalar or array scalar, also returns
+    True if the scalar value can be cast without overflow or truncation
+    to an integer.
+
+    Parameters
+    ----------
+    from_ : dtype, dtype specifier, scalar, or array
+        Data type, scalar, or array to cast from.
+    to : dtype or dtype specifier
+        Data type to cast to.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur.
+
+          * 'no' means the data types should not be cast at all.
+          * 'equiv' means only byte-order changes are allowed.
+          * 'safe' means only casts which can preserve values are allowed.
+          * 'same_kind' means only safe casts or casts within a kind,
+            like float64 to float32, are allowed.
+          * 'unsafe' means any data conversions may be done.
+
+    Returns
+    -------
+    out : bool
+        True if cast can occur according to the casting rule.
+
+    Notes
+    -----
+    .. versionchanged:: 1.17.0
+       Casting between a simple data type and a structured one is possible only
+       for "unsafe" casting.  Casting to multiple fields is allowed, but
+       casting from multiple fields is not.
+
+    .. versionchanged:: 1.9.0
+       Casting from numeric to string types in 'safe' casting mode requires
+       that the string dtype length is long enough to store the maximum
+       integer/float value converted.
+
+    See also
+    --------
+    dtype, result_type
+
+    Examples
+    --------
+    Basic examples
+
+    >>> np.can_cast(np.int32, np.int64)
+    True
+    >>> np.can_cast(np.float64, complex)
+    True
+    >>> np.can_cast(complex, float)
+    False
+
+    >>> np.can_cast('i8', 'f8')
+    True
+    >>> np.can_cast('i8', 'f4')
+    False
+    >>> np.can_cast('i4', 'S4')
+    False
+
+    Casting scalars
+
+    >>> np.can_cast(100, 'i1')
+    True
+    >>> np.can_cast(150, 'i1')
+    False
+    >>> np.can_cast(150, 'u1')
+    True
+
+    >>> np.can_cast(3.5e100, np.float32)
+    False
+    >>> np.can_cast(1000.0, np.float32)
+    True
+
+    Array scalar checks the value, array does not
+
+    >>> np.can_cast(np.array(1000.0), np.float32)
+    True
+    >>> np.can_cast(np.array([1000.0]), np.float32)
+    False
+
+    Using the casting rules
+
+    >>> np.can_cast('i8', 'i8', 'no')
+    True
+    >>> np.can_cast('i8', 'no')
+    False
+
+    >>> np.can_cast('i8', 'equiv')
+    True
+    >>> np.can_cast('i8', 'equiv')
+    False
+
+    >>> np.can_cast('i8', 'safe')
+    True
+    >>> np.can_cast('i4', 'safe')
+    False
+
+    >>> np.can_cast('i4', 'same_kind')
+    True
+    >>> np.can_cast('u4', 'same_kind')
+    False
+
+    >>> np.can_cast('u4', 'unsafe')
+    True
+
+    """
+    return (from_,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
+def min_scalar_type(a):
+    """
+    min_scalar_type(a, /)
+
+    For scalar ``a``, returns the data type with the smallest size
+    and smallest scalar kind which can hold its value.  For non-scalar
+    array ``a``, returns the vector's dtype unmodified.
+
+    Floating point values are not demoted to integers,
+    and complex values are not demoted to floats.
+
+    Parameters
+    ----------
+    a : scalar or array_like
+        The value whose minimal data type is to be found.
+
+    Returns
+    -------
+    out : dtype
+        The minimal data type.
+
+    Notes
+    -----
+    .. versionadded:: 1.6.0
+
+    See Also
+    --------
+    result_type, promote_types, dtype, can_cast
+
+    Examples
+    --------
+    >>> np.min_scalar_type(10)
+    dtype('uint8')
+
+    >>> np.min_scalar_type(-260)
+    dtype('int16')
+
+    >>> np.min_scalar_type(3.1)
+    dtype('float16')
+
+    >>> np.min_scalar_type(1e50)
+    dtype('float64')
+
+    >>> np.min_scalar_type(np.arange(4,dtype='f8'))
+    dtype('float64')
+
+    """
+    return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
+def result_type(*arrays_and_dtypes):
+    """
+    result_type(*arrays_and_dtypes)
+
+    Returns the type that results from applying the NumPy
+    type promotion rules to the arguments.
+
+    Type promotion in NumPy works similarly to the rules in languages
+    like C++, with some slight differences.  When both scalars and
+    arrays are used, the array's type takes precedence and the actual value
+    of the scalar is taken into account.
+
+    For example, calculating 3*a, where a is an array of 32-bit floats,
+    intuitively should result in a 32-bit float output.  If the 3 is a
+    32-bit integer, the NumPy rules indicate it can't convert losslessly
+    into a 32-bit float, so a 64-bit float should be the result type.
+    By examining the value of the constant, '3', we see that it fits in
+    an 8-bit integer, which can be cast losslessly into the 32-bit float.
+
+    Parameters
+    ----------
+    arrays_and_dtypes : list of arrays and dtypes
+        The operands of some operation whose result type is needed.
+
+    Returns
+    -------
+    out : dtype
+        The result type.
+
+    See also
+    --------
+    dtype, promote_types, min_scalar_type, can_cast
+
+    Notes
+    -----
+    .. versionadded:: 1.6.0
+
+    The specific algorithm used is as follows.
+
+    Categories are determined by first checking which of boolean,
+    integer (int/uint), or floating point (float/complex) the maximum
+    kind of all the arrays and the scalars are.
+
+    If there are only scalars or the maximum category of the scalars
+    is higher than the maximum category of the arrays,
+    the data types are combined with :func:`promote_types`
+    to produce the return value.
+
+    Otherwise, `min_scalar_type` is called on each scalar, and
+    the resulting data types are all combined with :func:`promote_types`
+    to produce the return value.
+
+    The set of int values is not a subset of the uint values for types
+    with the same number of bits, something not reflected in
+    :func:`min_scalar_type`, but handled as a special case in `result_type`.
+
+    Examples
+    --------
+    >>> np.result_type(3, np.arange(7, dtype='i1'))
+    dtype('int8')
+
+    >>> np.result_type('i4', 'c8')
+    dtype('complex128')
+
+    >>> np.result_type(3.0, -2)
+    dtype('float64')
+
+    """
+    return arrays_and_dtypes
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
+def dot(a, b, out=None):
+    """
+    dot(a, b, out=None)
+
+    Dot product of two arrays. Specifically,
+
+    - If both `a` and `b` are 1-D arrays, it is inner product of vectors
+      (without complex conjugation).
+
+    - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
+      but using :func:`matmul` or ``a @ b`` is preferred.
+
+    - If either `a` or `b` is 0-D (scalar), it is equivalent to
+      :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is
+      preferred.
+
+    - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
+      the last axis of `a` and `b`.
+
+    - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
+      sum product over the last axis of `a` and the second-to-last axis of
+      `b`::
+
+        dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
+
+    It uses an optimized BLAS library when possible (see `numpy.linalg`).
+
+    Parameters
+    ----------
+    a : array_like
+        First argument.
+    b : array_like
+        Second argument.
+    out : ndarray, optional
+        Output argument. This must have the exact kind that would be returned
+        if it was not used. In particular, it must have the right type, must be
+        C-contiguous, and its dtype must be the dtype that would be returned
+        for `dot(a,b)`. This is a performance feature. Therefore, if these
+        conditions are not met, an exception is raised, instead of attempting
+        to be flexible.
+
+    Returns
+    -------
+    output : ndarray
+        Returns the dot product of `a` and `b`.  If `a` and `b` are both
+        scalars or both 1-D arrays then a scalar is returned; otherwise
+        an array is returned.
+        If `out` is given, then it is returned.
+
+    Raises
+    ------
+    ValueError
+        If the last dimension of `a` is not the same size as
+        the second-to-last dimension of `b`.
+
+    See Also
+    --------
+    vdot : Complex-conjugating dot product.
+    tensordot : Sum products over arbitrary axes.
+    einsum : Einstein summation convention.
+    matmul : '@' operator as method with out parameter.
+    linalg.multi_dot : Chained dot product.
+
+    Examples
+    --------
+    >>> np.dot(3, 4)
+    12
+
+    Neither argument is complex-conjugated:
+
+    >>> np.dot([2j, 3j], [2j, 3j])
+    (-13+0j)
+
+    For 2-D arrays it is the matrix product:
+
+    >>> a = [[1, 0], [0, 1]]
+    >>> b = [[4, 1], [2, 2]]
+    >>> np.dot(a, b)
+    array([[4, 1],
+           [2, 2]])
+
+    >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
+    >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
+    >>> np.dot(a, b)[2,3,2,1,2,2]
+    499128
+    >>> sum(a[2,3,2,:] * b[1,2,:,2])
+    499128
+
+    """
+    return (a, b, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
+def vdot(a, b):
+    """
+    vdot(a, b, /)
+
+    Return the dot product of two vectors.
+
+    The vdot(`a`, `b`) function handles complex numbers differently than
+    dot(`a`, `b`).  If the first argument is complex the complex conjugate
+    of the first argument is used for the calculation of the dot product.
+
+    Note that `vdot` handles multidimensional arrays differently than `dot`:
+    it does *not* perform a matrix product, but flattens input arguments
+    to 1-D vectors first. Consequently, it should only be used for vectors.
+
+    Parameters
+    ----------
+    a : array_like
+        If `a` is complex the complex conjugate is taken before calculation
+        of the dot product.
+    b : array_like
+        Second argument to the dot product.
+
+    Returns
+    -------
+    output : ndarray
+        Dot product of `a` and `b`.  Can be an int, float, or
+        complex depending on the types of `a` and `b`.
+
+    See Also
+    --------
+    dot : Return the dot product without using the complex conjugate of the
+          first argument.
+
+    Examples
+    --------
+    >>> a = np.array([1+2j,3+4j])
+    >>> b = np.array([5+6j,7+8j])
+    >>> np.vdot(a, b)
+    (70-8j)
+    >>> np.vdot(b, a)
+    (70+8j)
+
+    Note that higher-dimensional arrays are flattened!
+
+    >>> a = np.array([[1, 4], [5, 6]])
+    >>> b = np.array([[4, 1], [2, 2]])
+    >>> np.vdot(a, b)
+    30
+    >>> np.vdot(b, a)
+    30
+    >>> 1*4 + 4*1 + 5*2 + 6*2
+    30
+
+    """
+    return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
+def bincount(x, weights=None, minlength=None):
+    """
+    bincount(x, /, weights=None, minlength=0)
+
+    Count number of occurrences of each value in array of non-negative ints.
+
+    The number of bins (of size 1) is one larger than the largest value in
+    `x`. If `minlength` is specified, there will be at least this number
+    of bins in the output array (though it will be longer if necessary,
+    depending on the contents of `x`).
+    Each bin gives the number of occurrences of its index value in `x`.
+    If `weights` is specified the input array is weighted by it, i.e. if a
+    value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
+    of ``out[n] += 1``.
+
+    Parameters
+    ----------
+    x : array_like, 1 dimension, nonnegative ints
+        Input array.
+    weights : array_like, optional
+        Weights, array of the same shape as `x`.
+    minlength : int, optional
+        A minimum number of bins for the output array.
+
+        .. versionadded:: 1.6.0
+
+    Returns
+    -------
+    out : ndarray of ints
+        The result of binning the input array.
+        The length of `out` is equal to ``np.amax(x)+1``.
+
+    Raises
+    ------
+    ValueError
+        If the input is not 1-dimensional, or contains elements with negative
+        values, or if `minlength` is negative.
+    TypeError
+        If the type of the input is float or complex.
+
+    See Also
+    --------
+    histogram, digitize, unique
+
+    Examples
+    --------
+    >>> np.bincount(np.arange(5))
+    array([1, 1, 1, 1, 1])
+    >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
+    array([1, 3, 1, 1, 0, 0, 0, 1])
+
+    >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
+    >>> np.bincount(x).size == np.amax(x)+1
+    True
+
+    The input array needs to be of integer dtype, otherwise a
+    TypeError is raised:
+
+    >>> np.bincount(np.arange(5, dtype=float))
+    Traceback (most recent call last):
+      ...
+    TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
+    according to the rule 'safe'
+
+    A possible use of ``bincount`` is to perform sums over
+    variable-size chunks of an array, using the ``weights`` keyword.
+
+    >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
+    >>> x = np.array([0, 1, 1, 2, 2, 2])
+    >>> np.bincount(x,  weights=w)
+    array([ 0.3,  0.7,  1.1])
+
+    """
+    return (x, weights)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
+def ravel_multi_index(multi_index, dims, mode=None, order=None):
+    """
+    ravel_multi_index(multi_index, dims, mode='raise', order='C')
+
+    Converts a tuple of index arrays into an array of flat
+    indices, applying boundary modes to the multi-index.
+
+    Parameters
+    ----------
+    multi_index : tuple of array_like
+        A tuple of integer arrays, one array for each dimension.
+    dims : tuple of ints
+        The shape of array into which the indices from ``multi_index`` apply.
+    mode : {'raise', 'wrap', 'clip'}, optional
+        Specifies how out-of-bounds indices are handled.  Can specify
+        either one mode or a tuple of modes, one mode per index.
+
+        * 'raise' -- raise an error (default)
+        * 'wrap' -- wrap around
+        * 'clip' -- clip to the range
+
+        In 'clip' mode, a negative index which would normally
+        wrap will clip to 0 instead.
+    order : {'C', 'F'}, optional
+        Determines whether the multi-index should be viewed as
+        indexing in row-major (C-style) or column-major
+        (Fortran-style) order.
+
+    Returns
+    -------
+    raveled_indices : ndarray
+        An array of indices into the flattened version of an array
+        of dimensions ``dims``.
+
+    See Also
+    --------
+    unravel_index
+
+    Notes
+    -----
+    .. versionadded:: 1.6.0
+
+    Examples
+    --------
+    >>> arr = np.array([[3,6,6],[4,5,1]])
+    >>> np.ravel_multi_index(arr, (7,6))
+    array([22, 41, 37])
+    >>> np.ravel_multi_index(arr, (7,6), order='F')
+    array([31, 41, 13])
+    >>> np.ravel_multi_index(arr, (4,6), mode='clip')
+    array([22, 23, 19])
+    >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
+    array([12, 13, 13])
+
+    >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
+    1621
+    """
+    return multi_index
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
+def unravel_index(indices, shape=None, order=None):
+    """
+    unravel_index(indices, shape, order='C')
+
+    Converts a flat index or array of flat indices into a tuple
+    of coordinate arrays.
+
+    Parameters
+    ----------
+    indices : array_like
+        An integer array whose elements are indices into the flattened
+        version of an array of dimensions ``shape``. Before version 1.6.0,
+        this function accepted just one index value.
+    shape : tuple of ints
+        The shape of the array to use for unraveling ``indices``.
+
+        .. versionchanged:: 1.16.0
+            Renamed from ``dims`` to ``shape``.
+
+    order : {'C', 'F'}, optional
+        Determines whether the indices should be viewed as indexing in
+        row-major (C-style) or column-major (Fortran-style) order.
+
+        .. versionadded:: 1.6.0
+
+    Returns
+    -------
+    unraveled_coords : tuple of ndarray
+        Each array in the tuple has the same shape as the ``indices``
+        array.
+
+    See Also
+    --------
+    ravel_multi_index
+
+    Examples
+    --------
+    >>> np.unravel_index([22, 41, 37], (7,6))
+    (array([3, 6, 6]), array([4, 5, 1]))
+    >>> np.unravel_index([31, 41, 13], (7,6), order='F')
+    (array([3, 6, 6]), array([4, 5, 1]))
+
+    >>> np.unravel_index(1621, (6,7,8,9))
+    (3, 1, 4, 1)
+
+    """
+    return (indices,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
+def copyto(dst, src, casting=None, where=None):
+    """
+    copyto(dst, src, casting='same_kind', where=True)
+
+    Copies values from one array to another, broadcasting as necessary.
+
+    Raises a TypeError if the `casting` rule is violated, and if
+    `where` is provided, it selects which elements to copy.
+
+    .. versionadded:: 1.7.0
+
+    Parameters
+    ----------
+    dst : ndarray
+        The array into which values are copied.
+    src : array_like
+        The array from which values are copied.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur when copying.
+
+          * 'no' means the data types should not be cast at all.
+          * 'equiv' means only byte-order changes are allowed.
+          * 'safe' means only casts which can preserve values are allowed.
+          * 'same_kind' means only safe casts or casts within a kind,
+            like float64 to float32, are allowed.
+          * 'unsafe' means any data conversions may be done.
+    where : array_like of bool, optional
+        A boolean array which is broadcasted to match the dimensions
+        of `dst`, and selects elements to copy from `src` to `dst`
+        wherever it contains the value True.
+
+    Examples
+    --------
+    >>> A = np.array([4, 5, 6])
+    >>> B = [1, 2, 3]
+    >>> np.copyto(A, B)
+    >>> A
+    array([1, 2, 3])
+
+    >>> A = np.array([[1, 2, 3], [4, 5, 6]])
+    >>> B = [[4, 5, 6], [7, 8, 9]]
+    >>> np.copyto(A, B)
+    >>> A
+    array([[4, 5, 6],
+           [7, 8, 9]])
+
+    """
+    return (dst, src, where)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
+def putmask(a, /, mask, values):
+    """
+    putmask(a, mask, values)
+
+    Changes elements of an array based on conditional and input values.
+
+    Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
+
+    If `values` is not the same size as `a` and `mask` then it will repeat.
+    This gives behavior different from ``a[mask] = values``.
+
+    Parameters
+    ----------
+    a : ndarray
+        Target array.
+    mask : array_like
+        Boolean mask array. It has to be the same shape as `a`.
+    values : array_like
+        Values to put into `a` where `mask` is True. If `values` is smaller
+        than `a` it will be repeated.
+
+    See Also
+    --------
+    place, put, take, copyto
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2, 3)
+    >>> np.putmask(x, x>2, x**2)
+    >>> x
+    array([[ 0,  1,  2],
+           [ 9, 16, 25]])
+
+    If `values` is smaller than `a` it is repeated:
+
+    >>> x = np.arange(5)
+    >>> np.putmask(x, x>1, [-33, -44])
+    >>> x
+    array([  0,   1, -33, -44, -33])
+
+    """
+    return (a, mask, values)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
+def packbits(a, axis=None, bitorder='big'):
+    """
+    packbits(a, /, axis=None, bitorder='big')
+
+    Packs the elements of a binary-valued array into bits in a uint8 array.
+
+    The result is padded to full bytes by inserting zero bits at the end.
+
+    Parameters
+    ----------
+    a : array_like
+        An array of integers or booleans whose elements should be packed to
+        bits.
+    axis : int, optional
+        The dimension over which bit-packing is done.
+        ``None`` implies packing the flattened array.
+    bitorder : {'big', 'little'}, optional
+        The order of the input bits. 'big' will mimic bin(val),
+        ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
+        reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
+        Defaults to 'big'.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    packed : ndarray
+        Array of type uint8 whose elements represent bits corresponding to the
+        logical (0 or nonzero) value of the input elements. The shape of
+        `packed` has the same number of dimensions as the input (unless `axis`
+        is None, in which case the output is 1-D).
+
+    See Also
+    --------
+    unpackbits: Unpacks elements of a uint8 array into a binary-valued output
+                array.
+
+    Examples
+    --------
+    >>> a = np.array([[[1,0,1],
+    ...                [0,1,0]],
+    ...               [[1,1,0],
+    ...                [0,0,1]]])
+    >>> b = np.packbits(a, axis=-1)
+    >>> b
+    array([[[160],
+            [ 64]],
+           [[192],
+            [ 32]]], dtype=uint8)
+
+    Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
+    and 32 = 0010 0000.
+
+    """
+    return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
+def unpackbits(a, axis=None, count=None, bitorder='big'):
+    """
+    unpackbits(a, /, axis=None, count=None, bitorder='big')
+
+    Unpacks elements of a uint8 array into a binary-valued output array.
+
+    Each element of `a` represents a bit-field that should be unpacked
+    into a binary-valued output array. The shape of the output array is
+    either 1-D (if `axis` is ``None``) or the same shape as the input
+    array with unpacking done along the axis specified.
+
+    Parameters
+    ----------
+    a : ndarray, uint8 type
+       Input array.
+    axis : int, optional
+        The dimension over which bit-unpacking is done.
+        ``None`` implies unpacking the flattened array.
+    count : int or None, optional
+        The number of elements to unpack along `axis`, provided as a way
+        of undoing the effect of packing a size that is not a multiple
+        of eight. A non-negative number means to only unpack `count`
+        bits. A negative number means to trim off that many bits from
+        the end. ``None`` means to unpack the entire array (the
+        default). Counts larger than the available number of bits will
+        add zero padding to the output. Negative counts must not
+        exceed the available number of bits.
+
+        .. versionadded:: 1.17.0
+
+    bitorder : {'big', 'little'}, optional
+        The order of the returned bits. 'big' will mimic bin(val),
+        ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
+        the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
+        Defaults to 'big'.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    unpacked : ndarray, uint8 type
+       The elements are binary-valued (0 or 1).
+
+    See Also
+    --------
+    packbits : Packs the elements of a binary-valued array into bits in
+               a uint8 array.
+
+    Examples
+    --------
+    >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
+    >>> a
+    array([[ 2],
+           [ 7],
+           [23]], dtype=uint8)
+    >>> b = np.unpackbits(a, axis=1)
+    >>> b
+    array([[0, 0, 0, 0, 0, 0, 1, 0],
+           [0, 0, 0, 0, 0, 1, 1, 1],
+           [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
+    >>> c = np.unpackbits(a, axis=1, count=-3)
+    >>> c
+    array([[0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0],
+           [0, 0, 0, 1, 0]], dtype=uint8)
+
+    >>> p = np.packbits(b, axis=0)
+    >>> np.unpackbits(p, axis=0)
+    array([[0, 0, 0, 0, 0, 0, 1, 0],
+           [0, 0, 0, 0, 0, 1, 1, 1],
+           [0, 0, 0, 1, 0, 1, 1, 1],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0],
+           [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
+    >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
+    True
+
+    """
+    return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
+def shares_memory(a, b, max_work=None):
+    """
+    shares_memory(a, b, /, max_work=None)
+
+    Determine if two arrays share memory.
+
+    .. warning::
+
+       This function can be exponentially slow for some inputs, unless
+       `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
+       If in doubt, use `numpy.may_share_memory` instead.
+
+    Parameters
+    ----------
+    a, b : ndarray
+        Input arrays
+    max_work : int, optional
+        Effort to spend on solving the overlap problem (maximum number
+        of candidate solutions to consider). The following special
+        values are recognized:
+
+        max_work=MAY_SHARE_EXACT  (default)
+            The problem is solved exactly. In this case, the function returns
+            True only if there is an element shared between the arrays. Finding
+            the exact solution may take extremely long in some cases.
+        max_work=MAY_SHARE_BOUNDS
+            Only the memory bounds of a and b are checked.
+
+    Raises
+    ------
+    numpy.exceptions.TooHardError
+        Exceeded max_work.
+
+    Returns
+    -------
+    out : bool
+
+    See Also
+    --------
+    may_share_memory
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3, 4])
+    >>> np.shares_memory(x, np.array([5, 6, 7]))
+    False
+    >>> np.shares_memory(x[::2], x)
+    True
+    >>> np.shares_memory(x[::2], x[1::2])
+    False
+
+    Checking whether two arrays share memory is NP-complete, and
+    runtime may increase exponentially in the number of
+    dimensions. Hence, `max_work` should generally be set to a finite
+    number, as it is possible to construct examples that take
+    extremely long to run:
+
+    >>> from numpy.lib.stride_tricks import as_strided
+    >>> x = np.zeros([192163377], dtype=np.int8)
+    >>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
+    >>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
+    >>> np.shares_memory(x1, x2, max_work=1000)
+    Traceback (most recent call last):
+    ...
+    numpy.exceptions.TooHardError: Exceeded max_work
+
+    Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
+    around 1 minute for this case. It is possible to find problems
+    that take still significantly longer.
+
+    """
+    return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
+def may_share_memory(a, b, max_work=None):
+    """
+    may_share_memory(a, b, /, max_work=None)
+
+    Determine if two arrays might share memory
+
+    A return of True does not necessarily mean that the two arrays
+    share any element.  It just means that they *might*.
+
+    Only the memory bounds of a and b are checked by default.
+
+    Parameters
+    ----------
+    a, b : ndarray
+        Input arrays
+    max_work : int, optional
+        Effort to spend on solving the overlap problem.  See
+        `shares_memory` for details.  Default for ``may_share_memory``
+        is to do a bounds check.
+
+    Returns
+    -------
+    out : bool
+
+    See Also
+    --------
+    shares_memory
+
+    Examples
+    --------
+    >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+    False
+    >>> x = np.zeros([3, 4])
+    >>> np.may_share_memory(x[:,0], x[:,1])
+    True
+
+    """
+    return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
+def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
+    """
+    is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+    Calculates which of the given dates are valid days, and which are not.
+
+    .. versionadded:: 1.7.0
+
+    Parameters
+    ----------
+    dates : array_like of datetime64[D]
+        The array of dates to process.
+    weekmask : str or array_like of bool, optional
+        A seven-element array indicating which of Monday through Sunday are
+        valid days. May be specified as a length-seven list or array, like
+        [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+        like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+        weekdays, optionally separated by white space. Valid abbreviations
+        are: Mon Tue Wed Thu Fri Sat Sun
+    holidays : array_like of datetime64[D], optional
+        An array of dates to consider as invalid dates.  They may be
+        specified in any order, and NaT (not-a-time) dates are ignored.
+        This list is saved in a normalized form that is suited for
+        fast calculations of valid days.
+    busdaycal : busdaycalendar, optional
+        A `busdaycalendar` object which specifies the valid days. If this
+        parameter is provided, neither weekmask nor holidays may be
+        provided.
+    out : array of bool, optional
+        If provided, this array is filled with the result.
+
+    Returns
+    -------
+    out : array of bool
+        An array with the same shape as ``dates``, containing True for
+        each valid day, and False for each invalid day.
+
+    See Also
+    --------
+    busdaycalendar : An object that specifies a custom set of valid days.
+    busday_offset : Applies an offset counted in valid days.
+    busday_count : Counts how many valid days are in a half-open date range.
+
+    Examples
+    --------
+    >>> # The weekdays are Friday, Saturday, and Monday
+    ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
+    ...                 holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+    array([False, False,  True])
+    """
+    return (dates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
+def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
+                  busdaycal=None, out=None):
+    """
+    busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+    First adjusts the date to fall on a valid day according to
+    the ``roll`` rule, then applies offsets to the given dates
+    counted in valid days.
+
+    .. versionadded:: 1.7.0
+
+    Parameters
+    ----------
+    dates : array_like of datetime64[D]
+        The array of dates to process.
+    offsets : array_like of int
+        The array of offsets, which is broadcast with ``dates``.
+    roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
+        How to treat dates that do not fall on a valid day. The default
+        is 'raise'.
+
+          * 'raise' means to raise an exception for an invalid day.
+          * 'nat' means to return a NaT (not-a-time) for an invalid day.
+          * 'forward' and 'following' mean to take the first valid day
+            later in time.
+          * 'backward' and 'preceding' mean to take the first valid day
+            earlier in time.
+          * 'modifiedfollowing' means to take the first valid day
+            later in time unless it is across a Month boundary, in which
+            case to take the first valid day earlier in time.
+          * 'modifiedpreceding' means to take the first valid day
+            earlier in time unless it is across a Month boundary, in which
+            case to take the first valid day later in time.
+    weekmask : str or array_like of bool, optional
+        A seven-element array indicating which of Monday through Sunday are
+        valid days. May be specified as a length-seven list or array, like
+        [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+        like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+        weekdays, optionally separated by white space. Valid abbreviations
+        are: Mon Tue Wed Thu Fri Sat Sun
+    holidays : array_like of datetime64[D], optional
+        An array of dates to consider as invalid dates.  They may be
+        specified in any order, and NaT (not-a-time) dates are ignored.
+        This list is saved in a normalized form that is suited for
+        fast calculations of valid days.
+    busdaycal : busdaycalendar, optional
+        A `busdaycalendar` object which specifies the valid days. If this
+        parameter is provided, neither weekmask nor holidays may be
+        provided.
+    out : array of datetime64[D], optional
+        If provided, this array is filled with the result.
+
+    Returns
+    -------
+    out : array of datetime64[D]
+        An array with a shape from broadcasting ``dates`` and ``offsets``
+        together, containing the dates with offsets applied.
+
+    See Also
+    --------
+    busdaycalendar : An object that specifies a custom set of valid days.
+    is_busday : Returns a boolean array indicating valid days.
+    busday_count : Counts how many valid days are in a half-open date range.
+
+    Examples
+    --------
+    >>> # First business day in October 2011 (not accounting for holidays)
+    ... np.busday_offset('2011-10', 0, roll='forward')
+    numpy.datetime64('2011-10-03')
+    >>> # Last business day in February 2012 (not accounting for holidays)
+    ... np.busday_offset('2012-03', -1, roll='forward')
+    numpy.datetime64('2012-02-29')
+    >>> # Third Wednesday in January 2011
+    ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
+    numpy.datetime64('2011-01-19')
+    >>> # 2012 Mother's Day in Canada and the U.S.
+    ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
+    numpy.datetime64('2012-05-13')
+
+    >>> # First business day on or after a date
+    ... np.busday_offset('2011-03-20', 0, roll='forward')
+    numpy.datetime64('2011-03-21')
+    >>> np.busday_offset('2011-03-22', 0, roll='forward')
+    numpy.datetime64('2011-03-22')
+    >>> # First business day after a date
+    ... np.busday_offset('2011-03-20', 1, roll='backward')
+    numpy.datetime64('2011-03-21')
+    >>> np.busday_offset('2011-03-22', 1, roll='backward')
+    numpy.datetime64('2011-03-23')
+    """
+    return (dates, offsets, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
+def busday_count(begindates, enddates, weekmask=None, holidays=None,
+                 busdaycal=None, out=None):
+    """
+    busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
+
+    Counts the number of valid days between `begindates` and
+    `enddates`, not including the day of `enddates`.
+
+    If ``enddates`` specifies a date value that is earlier than the
+    corresponding ``begindates`` date value, the count will be negative.
+
+    .. versionadded:: 1.7.0
+
+    Parameters
+    ----------
+    begindates : array_like of datetime64[D]
+        The array of the first dates for counting.
+    enddates : array_like of datetime64[D]
+        The array of the end dates for counting, which are excluded
+        from the count themselves.
+    weekmask : str or array_like of bool, optional
+        A seven-element array indicating which of Monday through Sunday are
+        valid days. May be specified as a length-seven list or array, like
+        [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+        like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+        weekdays, optionally separated by white space. Valid abbreviations
+        are: Mon Tue Wed Thu Fri Sat Sun
+    holidays : array_like of datetime64[D], optional
+        An array of dates to consider as invalid dates.  They may be
+        specified in any order, and NaT (not-a-time) dates are ignored.
+        This list is saved in a normalized form that is suited for
+        fast calculations of valid days.
+    busdaycal : busdaycalendar, optional
+        A `busdaycalendar` object which specifies the valid days. If this
+        parameter is provided, neither weekmask nor holidays may be
+        provided.
+    out : array of int, optional
+        If provided, this array is filled with the result.
+
+    Returns
+    -------
+    out : array of int
+        An array with a shape from broadcasting ``begindates`` and ``enddates``
+        together, containing the number of valid days between
+        the begin and end dates.
+
+    See Also
+    --------
+    busdaycalendar : An object that specifies a custom set of valid days.
+    is_busday : Returns a boolean array indicating valid days.
+    busday_offset : Applies an offset counted in valid days.
+
+    Examples
+    --------
+    >>> # Number of weekdays in January 2011
+    ... np.busday_count('2011-01', '2011-02')
+    21
+    >>> # Number of weekdays in 2011
+    >>> np.busday_count('2011', '2012')
+    260
+    >>> # Number of Saturdays in 2011
+    ... np.busday_count('2011', '2012', weekmask='Sat')
+    53
+    """
+    return (begindates, enddates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(
+    _multiarray_umath.datetime_as_string)
+def datetime_as_string(arr, unit=None, timezone=None, casting=None):
+    """
+    datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
+
+    Convert an array of datetimes into an array of strings.
+
+    Parameters
+    ----------
+    arr : array_like of datetime64
+        The array of UTC timestamps to format.
+    unit : str
+        One of None, 'auto', or a :ref:`datetime unit `.
+    timezone : {'naive', 'UTC', 'local'} or tzinfo
+        Timezone information to use when displaying the datetime. If 'UTC', end
+        with a Z to indicate UTC time. If 'local', convert to the local timezone
+        first, and suffix with a +-#### timezone offset. If a tzinfo object,
+        then do as with 'local', but use the specified timezone.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
+        Casting to allow when changing between datetime units.
+
+    Returns
+    -------
+    str_arr : ndarray
+        An array of strings the same shape as `arr`.
+
+    Examples
+    --------
+    >>> import pytz
+    >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
+    >>> d
+    array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
+           '2002-10-27T07:30'], dtype='datetime64[m]')
+
+    Setting the timezone to UTC shows the same information, but with a Z suffix
+
+    >>> np.datetime_as_string(d, timezone='UTC')
+    array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
+           '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
+    array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
+           '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h')
+    array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
+          dtype='>> np.datetime_as_string(d, unit='s')
+    array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
+           '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe')
+    Traceback (most recent call last):
+        ...
+    TypeError: Cannot create a datetime string as units 'h' from a NumPy
+    datetime with units 'm' according to the rule 'safe'
+    """
+    return (arr,)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/multiarray.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/multiarray.pyi
new file mode 100644
index 00000000..dc05f812
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/multiarray.pyi
@@ -0,0 +1,1022 @@
+# TODO: Sort out any and all missing functions in this namespace
+
+import os
+import datetime as dt
+from collections.abc import Sequence, Callable, Iterable
+from typing import (
+    Literal as L,
+    Any,
+    overload,
+    TypeVar,
+    SupportsIndex,
+    final,
+    Final,
+    Protocol,
+    ClassVar,
+)
+
+from numpy import (
+    # Re-exports
+    busdaycalendar as busdaycalendar,
+    broadcast as broadcast,
+    dtype as dtype,
+    ndarray as ndarray,
+    nditer as nditer,
+
+    # The rest
+    ufunc,
+    str_,
+    bool_,
+    uint8,
+    intp,
+    int_,
+    float64,
+    timedelta64,
+    datetime64,
+    generic,
+    unsignedinteger,
+    signedinteger,
+    floating,
+    complexfloating,
+    _OrderKACF,
+    _OrderCF,
+    _CastingKind,
+    _ModeKind,
+    _SupportsBuffer,
+    _IOProtocol,
+    _CopyMode,
+    _NDIterFlagsKind,
+    _NDIterOpFlagsKind,
+)
+
+from numpy._typing import (
+    # Shapes
+    _ShapeLike,
+
+    # DTypes
+    DTypeLike,
+    _DTypeLike,
+
+    # Arrays
+    NDArray,
+    ArrayLike,
+    _ArrayLike,
+    _SupportsArrayFunc,
+    _NestedSequence,
+    _ArrayLikeBool_co,
+    _ArrayLikeUInt_co,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeTD64_co,
+    _ArrayLikeDT64_co,
+    _ArrayLikeObject_co,
+    _ArrayLikeStr_co,
+    _ArrayLikeBytes_co,
+    _ScalarLike_co,
+    _IntLike_co,
+    _FloatLike_co,
+    _TD64Like_co,
+)
+
+_T_co = TypeVar("_T_co", covariant=True)
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+# Valid time units
+_UnitKind = L[
+    "Y",
+    "M",
+    "D",
+    "h",
+    "m",
+    "s",
+    "ms",
+    "us", "μs",
+    "ns",
+    "ps",
+    "fs",
+    "as",
+]
+_RollKind = L[  # `raise` is deliberately excluded
+    "nat",
+    "forward",
+    "following",
+    "backward",
+    "preceding",
+    "modifiedfollowing",
+    "modifiedpreceding",
+]
+
+class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]):
+    def __len__(self) -> int: ...
+    def __getitem__(self, key: _T_contra, /) -> _T_co: ...
+
+__all__: list[str]
+
+ALLOW_THREADS: Final[int]  # 0 or 1 (system-specific)
+BUFSIZE: L[8192]
+CLIP: L[0]
+WRAP: L[1]
+RAISE: L[2]
+MAXDIMS: L[32]
+MAY_SHARE_BOUNDS: L[0]
+MAY_SHARE_EXACT: L[-1]
+tracemalloc_domain: L[389047]
+
+@overload
+def empty_like(
+    prototype: _ArrayType,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> _ArrayType: ...
+@overload
+def empty_like(
+    prototype: _ArrayLike[_SCT],
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def empty_like(
+    prototype: object,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+@overload
+def empty_like(
+    prototype: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def empty_like(
+    prototype: Any,
+    dtype: DTypeLike,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def array(
+    object: _ArrayType,
+    dtype: None = ...,
+    *,
+    copy: bool | _CopyMode = ...,
+    order: _OrderKACF = ...,
+    subok: L[True],
+    ndmin: int = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> _ArrayType: ...
+@overload
+def array(
+    object: _ArrayLike[_SCT],
+    dtype: None = ...,
+    *,
+    copy: bool | _CopyMode = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    ndmin: int = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def array(
+    object: object,
+    dtype: None = ...,
+    *,
+    copy: bool | _CopyMode = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    ndmin: int = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def array(
+    object: Any,
+    dtype: _DTypeLike[_SCT],
+    *,
+    copy: bool | _CopyMode = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    ndmin: int = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def array(
+    object: Any,
+    dtype: DTypeLike,
+    *,
+    copy: bool | _CopyMode = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    ndmin: int = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def zeros(
+    shape: _ShapeLike,
+    dtype: None = ...,
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def zeros(
+    shape: _ShapeLike,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def zeros(
+    shape: _ShapeLike,
+    dtype: DTypeLike,
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def empty(
+    shape: _ShapeLike,
+    dtype: None = ...,
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def empty(
+    shape: _ShapeLike,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def empty(
+    shape: _ShapeLike,
+    dtype: DTypeLike,
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def unravel_index(  # type: ignore[misc]
+    indices: _IntLike_co,
+    shape: _ShapeLike,
+    order: _OrderCF = ...,
+) -> tuple[intp, ...]: ...
+@overload
+def unravel_index(
+    indices: _ArrayLikeInt_co,
+    shape: _ShapeLike,
+    order: _OrderCF = ...,
+) -> tuple[NDArray[intp], ...]: ...
+
+@overload
+def ravel_multi_index(  # type: ignore[misc]
+    multi_index: Sequence[_IntLike_co],
+    dims: Sequence[SupportsIndex],
+    mode: _ModeKind | tuple[_ModeKind, ...] = ...,
+    order: _OrderCF = ...,
+) -> intp: ...
+@overload
+def ravel_multi_index(
+    multi_index: Sequence[_ArrayLikeInt_co],
+    dims: Sequence[SupportsIndex],
+    mode: _ModeKind | tuple[_ModeKind, ...] = ...,
+    order: _OrderCF = ...,
+) -> NDArray[intp]: ...
+
+# NOTE: Allow any sequence of array-like objects
+@overload
+def concatenate(  # type: ignore[misc]
+    arrays: _ArrayLike[_SCT],
+    /,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    *,
+    dtype: None = ...,
+    casting: None | _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def concatenate(  # type: ignore[misc]
+    arrays: _SupportsLenAndGetItem[int, ArrayLike],
+    /,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    *,
+    dtype: None = ...,
+    casting: None | _CastingKind = ...
+) -> NDArray[Any]: ...
+@overload
+def concatenate(  # type: ignore[misc]
+    arrays: _SupportsLenAndGetItem[int, ArrayLike],
+    /,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    *,
+    dtype: _DTypeLike[_SCT],
+    casting: None | _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def concatenate(  # type: ignore[misc]
+    arrays: _SupportsLenAndGetItem[int, ArrayLike],
+    /,
+    axis: None | SupportsIndex = ...,
+    out: None = ...,
+    *,
+    dtype: DTypeLike,
+    casting: None | _CastingKind = ...
+) -> NDArray[Any]: ...
+@overload
+def concatenate(
+    arrays: _SupportsLenAndGetItem[int, ArrayLike],
+    /,
+    axis: None | SupportsIndex = ...,
+    out: _ArrayType = ...,
+    *,
+    dtype: DTypeLike = ...,
+    casting: None | _CastingKind = ...
+) -> _ArrayType: ...
+
+def inner(
+    a: ArrayLike,
+    b: ArrayLike,
+    /,
+) -> Any: ...
+
+@overload
+def where(
+    condition: ArrayLike,
+    /,
+) -> tuple[NDArray[intp], ...]: ...
+@overload
+def where(
+    condition: ArrayLike,
+    x: ArrayLike,
+    y: ArrayLike,
+    /,
+) -> NDArray[Any]: ...
+
+def lexsort(
+    keys: ArrayLike,
+    axis: None | SupportsIndex = ...,
+) -> Any: ...
+
+def can_cast(
+    from_: ArrayLike | DTypeLike,
+    to: DTypeLike,
+    casting: None | _CastingKind = ...,
+) -> bool: ...
+
+def min_scalar_type(
+    a: ArrayLike, /,
+) -> dtype[Any]: ...
+
+def result_type(
+    *arrays_and_dtypes: ArrayLike | DTypeLike,
+) -> dtype[Any]: ...
+
+@overload
+def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ...
+@overload
+def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ...
+
+@overload
+def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> bool_: ...  # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ...  # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ...  # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ...  # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ...
+@overload
+def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ...
+@overload
+def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...
+
+def bincount(
+    x: ArrayLike,
+    /,
+    weights: None | ArrayLike = ...,
+    minlength: SupportsIndex = ...,
+) -> NDArray[intp]: ...
+
+def copyto(
+    dst: NDArray[Any],
+    src: ArrayLike,
+    casting: None | _CastingKind = ...,
+    where: None | _ArrayLikeBool_co = ...,
+) -> None: ...
+
+def putmask(
+    a: NDArray[Any],
+    /,
+    mask: _ArrayLikeBool_co,
+    values: ArrayLike,
+) -> None: ...
+
+def packbits(
+    a: _ArrayLikeInt_co,
+    /,
+    axis: None | SupportsIndex = ...,
+    bitorder: L["big", "little"] = ...,
+) -> NDArray[uint8]: ...
+
+def unpackbits(
+    a: _ArrayLike[uint8],
+    /,
+    axis: None | SupportsIndex = ...,
+    count: None | SupportsIndex = ...,
+    bitorder: L["big", "little"] = ...,
+) -> NDArray[uint8]: ...
+
+def shares_memory(
+    a: object,
+    b: object,
+    /,
+    max_work: None | int = ...,
+) -> bool: ...
+
+def may_share_memory(
+    a: object,
+    b: object,
+    /,
+    max_work: None | int = ...,
+) -> bool: ...
+
+@overload
+def asarray(
+    a: _ArrayLike[_SCT],
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray(
+    a: object,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def asarray(
+    a: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray(
+    a: Any,
+    dtype: DTypeLike,
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def asanyarray(
+    a: _ArrayType,  # Preserve subclass-information
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> _ArrayType: ...
+@overload
+def asanyarray(
+    a: _ArrayLike[_SCT],
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asanyarray(
+    a: object,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def asanyarray(
+    a: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asanyarray(
+    a: Any,
+    dtype: DTypeLike,
+    order: _OrderKACF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def ascontiguousarray(
+    a: _ArrayLike[_SCT],
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ascontiguousarray(
+    a: object,
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def ascontiguousarray(
+    a: Any,
+    dtype: _DTypeLike[_SCT],
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ascontiguousarray(
+    a: Any,
+    dtype: DTypeLike,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def asfortranarray(
+    a: _ArrayLike[_SCT],
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asfortranarray(
+    a: object,
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def asfortranarray(
+    a: Any,
+    dtype: _DTypeLike[_SCT],
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asfortranarray(
+    a: Any,
+    dtype: DTypeLike,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+# In practice `list[Any]` is list with an int, int and a valid
+# `np.seterrcall()` object
+def geterrobj() -> list[Any]: ...
+def seterrobj(errobj: list[Any], /) -> None: ...
+
+def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ...
+
+# `sep` is a de facto mandatory argument, as its default value is deprecated
+@overload
+def fromstring(
+    string: str | bytes,
+    dtype: None = ...,
+    count: SupportsIndex = ...,
+    *,
+    sep: str,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def fromstring(
+    string: str | bytes,
+    dtype: _DTypeLike[_SCT],
+    count: SupportsIndex = ...,
+    *,
+    sep: str,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def fromstring(
+    string: str | bytes,
+    dtype: DTypeLike,
+    count: SupportsIndex = ...,
+    *,
+    sep: str,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+def frompyfunc(
+    func: Callable[..., Any], /,
+    nin: SupportsIndex,
+    nout: SupportsIndex,
+    *,
+    identity: Any = ...,
+) -> ufunc: ...
+
+@overload
+def fromfile(
+    file: str | bytes | os.PathLike[Any] | _IOProtocol,
+    dtype: None = ...,
+    count: SupportsIndex = ...,
+    sep: str = ...,
+    offset: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def fromfile(
+    file: str | bytes | os.PathLike[Any] | _IOProtocol,
+    dtype: _DTypeLike[_SCT],
+    count: SupportsIndex = ...,
+    sep: str = ...,
+    offset: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def fromfile(
+    file: str | bytes | os.PathLike[Any] | _IOProtocol,
+    dtype: DTypeLike,
+    count: SupportsIndex = ...,
+    sep: str = ...,
+    offset: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def fromiter(
+    iter: Iterable[Any],
+    dtype: _DTypeLike[_SCT],
+    count: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def fromiter(
+    iter: Iterable[Any],
+    dtype: DTypeLike,
+    count: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def frombuffer(
+    buffer: _SupportsBuffer,
+    dtype: None = ...,
+    count: SupportsIndex = ...,
+    offset: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def frombuffer(
+    buffer: _SupportsBuffer,
+    dtype: _DTypeLike[_SCT],
+    count: SupportsIndex = ...,
+    offset: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def frombuffer(
+    buffer: _SupportsBuffer,
+    dtype: DTypeLike,
+    count: SupportsIndex = ...,
+    offset: SupportsIndex = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def arange(  # type: ignore[misc]
+    stop: _IntLike_co,
+    /, *,
+    dtype: None = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def arange(  # type: ignore[misc]
+    start: _IntLike_co,
+    stop: _IntLike_co,
+    step: _IntLike_co = ...,
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def arange(  # type: ignore[misc]
+    stop: _FloatLike_co,
+    /, *,
+    dtype: None = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def arange(  # type: ignore[misc]
+    start: _FloatLike_co,
+    stop: _FloatLike_co,
+    step: _FloatLike_co = ...,
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def arange(
+    stop: _TD64Like_co,
+    /, *,
+    dtype: None = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def arange(
+    start: _TD64Like_co,
+    stop: _TD64Like_co,
+    step: _TD64Like_co = ...,
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def arange(  # both start and stop must always be specified for datetime64
+    start: datetime64,
+    stop: datetime64,
+    step: datetime64 = ...,
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def arange(
+    stop: Any,
+    /, *,
+    dtype: _DTypeLike[_SCT],
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def arange(
+    start: Any,
+    stop: Any,
+    step: Any = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def arange(
+    stop: Any, /,
+    *,
+    dtype: DTypeLike,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def arange(
+    start: Any,
+    stop: Any,
+    step: Any = ...,
+    dtype: DTypeLike = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+def datetime_data(
+    dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,
+) -> tuple[str, int]: ...
+
+# The datetime functions perform unsafe casts to `datetime64[D]`,
+# so a lot of different argument types are allowed here
+
+@overload
+def busday_count(  # type: ignore[misc]
+    begindates: _ScalarLike_co | dt.date,
+    enddates: _ScalarLike_co | dt.date,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> int_: ...
+@overload
+def busday_count(  # type: ignore[misc]
+    begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
+    enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> NDArray[int_]: ...
+@overload
+def busday_count(
+    begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
+    enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+# `roll="raise"` is (more or less?) equivalent to `casting="safe"`
+@overload
+def busday_offset(  # type: ignore[misc]
+    dates: datetime64 | dt.date,
+    offsets: _TD64Like_co | dt.timedelta,
+    roll: L["raise"] = ...,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> datetime64: ...
+@overload
+def busday_offset(  # type: ignore[misc]
+    dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
+    offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
+    roll: L["raise"] = ...,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def busday_offset(  # type: ignore[misc]
+    dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
+    offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
+    roll: L["raise"] = ...,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+@overload
+def busday_offset(  # type: ignore[misc]
+    dates: _ScalarLike_co | dt.date,
+    offsets: _ScalarLike_co | dt.timedelta,
+    roll: _RollKind,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> datetime64: ...
+@overload
+def busday_offset(  # type: ignore[misc]
+    dates: ArrayLike | dt.date | _NestedSequence[dt.date],
+    offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
+    roll: _RollKind,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def busday_offset(
+    dates: ArrayLike | dt.date | _NestedSequence[dt.date],
+    offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
+    roll: _RollKind,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def is_busday(  # type: ignore[misc]
+    dates: _ScalarLike_co | dt.date,
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> bool_: ...
+@overload
+def is_busday(  # type: ignore[misc]
+    dates: ArrayLike | _NestedSequence[dt.date],
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def is_busday(
+    dates: ArrayLike | _NestedSequence[dt.date],
+    weekmask: ArrayLike = ...,
+    holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+    busdaycal: None | busdaycalendar = ...,
+    out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def datetime_as_string(  # type: ignore[misc]
+    arr: datetime64 | dt.date,
+    unit: None | L["auto"] | _UnitKind = ...,
+    timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
+    casting: _CastingKind = ...,
+) -> str_: ...
+@overload
+def datetime_as_string(
+    arr: _ArrayLikeDT64_co | _NestedSequence[dt.date],
+    unit: None | L["auto"] | _UnitKind = ...,
+    timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
+    casting: _CastingKind = ...,
+) -> NDArray[str_]: ...
+
+@overload
+def compare_chararrays(
+    a1: _ArrayLikeStr_co,
+    a2: _ArrayLikeStr_co,
+    cmp: L["<", "<=", "==", ">=", ">", "!="],
+    rstrip: bool,
+) -> NDArray[bool_]: ...
+@overload
+def compare_chararrays(
+    a1: _ArrayLikeBytes_co,
+    a2: _ArrayLikeBytes_co,
+    cmp: L["<", "<=", "==", ">=", ">", "!="],
+    rstrip: bool,
+) -> NDArray[bool_]: ...
+
+def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ...
+
+_GetItemKeys = L[
+    "C", "CONTIGUOUS", "C_CONTIGUOUS",
+    "F", "FORTRAN", "F_CONTIGUOUS",
+    "W", "WRITEABLE",
+    "B", "BEHAVED",
+    "O", "OWNDATA",
+    "A", "ALIGNED",
+    "X", "WRITEBACKIFCOPY",
+    "CA", "CARRAY",
+    "FA", "FARRAY",
+    "FNC",
+    "FORC",
+]
+_SetItemKeys = L[
+    "A", "ALIGNED",
+    "W", "WRITEABLE",
+    "X", "WRITEBACKIFCOPY",
+]
+
+@final
+class flagsobj:
+    __hash__: ClassVar[None]  # type: ignore[assignment]
+    aligned: bool
+    # NOTE: deprecated
+    # updateifcopy: bool
+    writeable: bool
+    writebackifcopy: bool
+    @property
+    def behaved(self) -> bool: ...
+    @property
+    def c_contiguous(self) -> bool: ...
+    @property
+    def carray(self) -> bool: ...
+    @property
+    def contiguous(self) -> bool: ...
+    @property
+    def f_contiguous(self) -> bool: ...
+    @property
+    def farray(self) -> bool: ...
+    @property
+    def fnc(self) -> bool: ...
+    @property
+    def forc(self) -> bool: ...
+    @property
+    def fortran(self) -> bool: ...
+    @property
+    def num(self) -> int: ...
+    @property
+    def owndata(self) -> bool: ...
+    def __getitem__(self, key: _GetItemKeys) -> bool: ...
+    def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ...
+
+def nested_iters(
+    op: ArrayLike | Sequence[ArrayLike],
+    axes: Sequence[Sequence[SupportsIndex]],
+    flags: None | Sequence[_NDIterFlagsKind] = ...,
+    op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ...,
+    op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,
+    order: _OrderKACF = ...,
+    casting: _CastingKind = ...,
+    buffersize: SupportsIndex = ...,
+) -> tuple[nditer, ...]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/numeric.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numeric.py
new file mode 100644
index 00000000..91ac3f86
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numeric.py
@@ -0,0 +1,2530 @@
+import functools
+import itertools
+import operator
+import sys
+import warnings
+import numbers
+import builtins
+
+import numpy as np
+from . import multiarray
+from .multiarray import (
+    fastCopyAndTranspose, ALLOW_THREADS,
+    BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
+    WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
+    asfortranarray, broadcast, can_cast, compare_chararrays,
+    concatenate, copyto, dot, dtype, empty,
+    empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter,
+    fromstring, inner, lexsort, matmul, may_share_memory,
+    min_scalar_type, ndarray, nditer, nested_iters, promote_types,
+    putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
+    zeros, normalize_axis_index, _get_promotion_state, _set_promotion_state,
+    _using_numpy2_behavior)
+
+from . import overrides
+from . import umath
+from . import shape_base
+from .overrides import set_array_function_like_doc, set_module
+from .umath import (multiply, invert, sin, PINF, NAN)
+from . import numerictypes
+from .numerictypes import longlong, intc, int_, float_, complex_, bool_
+from ..exceptions import ComplexWarning, TooHardError, AxisError
+from ._ufunc_config import errstate, _no_nep50_warning
+
+bitwise_not = invert
+ufunc = type(sin)
+newaxis = None
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+    'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
+    'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
+    'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
+    'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where',
+    'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
+    'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
+    'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
+    'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
+    'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
+    'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
+    'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+    'identity', 'allclose', 'compare_chararrays', 'putmask',
+    'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
+    'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
+    'BUFSIZE', 'ALLOW_THREADS', 'full', 'full_like',
+    'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
+    'MAY_SHARE_EXACT', '_get_promotion_state', '_set_promotion_state',
+    '_using_numpy2_behavior']
+
+
+def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
+    return (a,)
+
+
+@array_function_dispatch(_zeros_like_dispatcher)
+def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
+    """
+    Return an array of zeros with the same shape and type as a given array.
+
+    Parameters
+    ----------
+    a : array_like
+        The shape and data-type of `a` define these same attributes of
+        the returned array.
+    dtype : data-type, optional
+        Overrides the data type of the result.
+
+        .. versionadded:: 1.6.0
+    order : {'C', 'F', 'A', or 'K'}, optional
+        Overrides the memory layout of the result. 'C' means C-order,
+        'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+        'C' otherwise. 'K' means match the layout of `a` as closely
+        as possible.
+
+        .. versionadded:: 1.6.0
+    subok : bool, optional.
+        If True, then the newly created array will use the sub-class
+        type of `a`, otherwise it will be a base-class array. Defaults
+        to True.
+    shape : int or sequence of ints, optional.
+        Overrides the shape of the result. If order='K' and the number of
+        dimensions is unchanged, will try to keep order, otherwise,
+        order='C' is implied.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of zeros with the same shape and type as `a`.
+
+    See Also
+    --------
+    empty_like : Return an empty array with shape and type of input.
+    ones_like : Return an array of ones with shape and type of input.
+    full_like : Return a new array with shape of input filled with value.
+    zeros : Return a new array setting values to zero.
+
+    Examples
+    --------
+    >>> x = np.arange(6)
+    >>> x = x.reshape((2, 3))
+    >>> x
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> np.zeros_like(x)
+    array([[0, 0, 0],
+           [0, 0, 0]])
+
+    >>> y = np.arange(3, dtype=float)
+    >>> y
+    array([0., 1., 2.])
+    >>> np.zeros_like(y)
+    array([0.,  0.,  0.])
+
+    """
+    res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
+    # needed instead of a 0 to get same result as zeros for string dtypes
+    z = zeros(1, dtype=res.dtype)
+    multiarray.copyto(res, z, casting='unsafe')
+    return res
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def ones(shape, dtype=None, order='C', *, like=None):
+    """
+    Return a new array of given shape and type, filled with ones.
+
+    Parameters
+    ----------
+    shape : int or sequence of ints
+        Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+    dtype : data-type, optional
+        The desired data-type for the array, e.g., `numpy.int8`.  Default is
+        `numpy.float64`.
+    order : {'C', 'F'}, optional, default: C
+        Whether to store multi-dimensional data in row-major
+        (C-style) or column-major (Fortran-style) order in
+        memory.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of ones with the given shape, dtype, and order.
+
+    See Also
+    --------
+    ones_like : Return an array of ones with shape and type of input.
+    empty : Return a new uninitialized array.
+    zeros : Return a new array setting values to zero.
+    full : Return a new array of given shape filled with value.
+
+
+    Examples
+    --------
+    >>> np.ones(5)
+    array([1., 1., 1., 1., 1.])
+
+    >>> np.ones((5,), dtype=int)
+    array([1, 1, 1, 1, 1])
+
+    >>> np.ones((2, 1))
+    array([[1.],
+           [1.]])
+
+    >>> s = (2,2)
+    >>> np.ones(s)
+    array([[1.,  1.],
+           [1.,  1.]])
+
+    """
+    if like is not None:
+        return _ones_with_like(like, shape, dtype=dtype, order=order)
+
+    a = empty(shape, dtype, order)
+    multiarray.copyto(a, 1, casting='unsafe')
+    return a
+
+
+_ones_with_like = array_function_dispatch()(ones)
+
+
+def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
+    return (a,)
+
+
+@array_function_dispatch(_ones_like_dispatcher)
+def ones_like(a, dtype=None, order='K', subok=True, shape=None):
+    """
+    Return an array of ones with the same shape and type as a given array.
+
+    Parameters
+    ----------
+    a : array_like
+        The shape and data-type of `a` define these same attributes of
+        the returned array.
+    dtype : data-type, optional
+        Overrides the data type of the result.
+
+        .. versionadded:: 1.6.0
+    order : {'C', 'F', 'A', or 'K'}, optional
+        Overrides the memory layout of the result. 'C' means C-order,
+        'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+        'C' otherwise. 'K' means match the layout of `a` as closely
+        as possible.
+
+        .. versionadded:: 1.6.0
+    subok : bool, optional.
+        If True, then the newly created array will use the sub-class
+        type of `a`, otherwise it will be a base-class array. Defaults
+        to True.
+    shape : int or sequence of ints, optional.
+        Overrides the shape of the result. If order='K' and the number of
+        dimensions is unchanged, will try to keep order, otherwise,
+        order='C' is implied.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of ones with the same shape and type as `a`.
+
+    See Also
+    --------
+    empty_like : Return an empty array with shape and type of input.
+    zeros_like : Return an array of zeros with shape and type of input.
+    full_like : Return a new array with shape of input filled with value.
+    ones : Return a new array setting values to one.
+
+    Examples
+    --------
+    >>> x = np.arange(6)
+    >>> x = x.reshape((2, 3))
+    >>> x
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> np.ones_like(x)
+    array([[1, 1, 1],
+           [1, 1, 1]])
+
+    >>> y = np.arange(3, dtype=float)
+    >>> y
+    array([0., 1., 2.])
+    >>> np.ones_like(y)
+    array([1.,  1.,  1.])
+
+    """
+    res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
+    multiarray.copyto(res, 1, casting='unsafe')
+    return res
+
+
+def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
+    return(like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def full(shape, fill_value, dtype=None, order='C', *, like=None):
+    """
+    Return a new array of given shape and type, filled with `fill_value`.
+
+    Parameters
+    ----------
+    shape : int or sequence of ints
+        Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+    fill_value : scalar or array_like
+        Fill value.
+    dtype : data-type, optional
+        The desired data-type for the array  The default, None, means
+         ``np.array(fill_value).dtype``.
+    order : {'C', 'F'}, optional
+        Whether to store multidimensional data in C- or Fortran-contiguous
+        (row- or column-wise) order in memory.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of `fill_value` with the given shape, dtype, and order.
+
+    See Also
+    --------
+    full_like : Return a new array with shape of input filled with value.
+    empty : Return a new uninitialized array.
+    ones : Return a new array setting values to one.
+    zeros : Return a new array setting values to zero.
+
+    Examples
+    --------
+    >>> np.full((2, 2), np.inf)
+    array([[inf, inf],
+           [inf, inf]])
+    >>> np.full((2, 2), 10)
+    array([[10, 10],
+           [10, 10]])
+
+    >>> np.full((2, 2), [1, 2])
+    array([[1, 2],
+           [1, 2]])
+
+    """
+    if like is not None:
+        return _full_with_like(
+                like, shape, fill_value, dtype=dtype, order=order)
+
+    if dtype is None:
+        fill_value = asarray(fill_value)
+        dtype = fill_value.dtype
+    a = empty(shape, dtype, order)
+    multiarray.copyto(a, fill_value, casting='unsafe')
+    return a
+
+
+_full_with_like = array_function_dispatch()(full)
+
+
+def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
+    return (a,)
+
+
+@array_function_dispatch(_full_like_dispatcher)
+def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
+    """
+    Return a full array with the same shape and type as a given array.
+
+    Parameters
+    ----------
+    a : array_like
+        The shape and data-type of `a` define these same attributes of
+        the returned array.
+    fill_value : array_like
+        Fill value.
+    dtype : data-type, optional
+        Overrides the data type of the result.
+    order : {'C', 'F', 'A', or 'K'}, optional
+        Overrides the memory layout of the result. 'C' means C-order,
+        'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+        'C' otherwise. 'K' means match the layout of `a` as closely
+        as possible.
+    subok : bool, optional.
+        If True, then the newly created array will use the sub-class
+        type of `a`, otherwise it will be a base-class array. Defaults
+        to True.
+    shape : int or sequence of ints, optional.
+        Overrides the shape of the result. If order='K' and the number of
+        dimensions is unchanged, will try to keep order, otherwise,
+        order='C' is implied.
+
+        .. versionadded:: 1.17.0
+
+    Returns
+    -------
+    out : ndarray
+        Array of `fill_value` with the same shape and type as `a`.
+
+    See Also
+    --------
+    empty_like : Return an empty array with shape and type of input.
+    ones_like : Return an array of ones with shape and type of input.
+    zeros_like : Return an array of zeros with shape and type of input.
+    full : Return a new array of given shape filled with value.
+
+    Examples
+    --------
+    >>> x = np.arange(6, dtype=int)
+    >>> np.full_like(x, 1)
+    array([1, 1, 1, 1, 1, 1])
+    >>> np.full_like(x, 0.1)
+    array([0, 0, 0, 0, 0, 0])
+    >>> np.full_like(x, 0.1, dtype=np.double)
+    array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+    >>> np.full_like(x, np.nan, dtype=np.double)
+    array([nan, nan, nan, nan, nan, nan])
+
+    >>> y = np.arange(6, dtype=np.double)
+    >>> np.full_like(y, 0.1)
+    array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+
+    >>> y = np.zeros([2, 2, 3], dtype=int)
+    >>> np.full_like(y, [0, 0, 255])
+    array([[[  0,   0, 255],
+            [  0,   0, 255]],
+           [[  0,   0, 255],
+            [  0,   0, 255]]])
+    """
+    res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
+    multiarray.copyto(res, fill_value, casting='unsafe')
+    return res
+
+
+def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
+    return (a,)
+
+
+@array_function_dispatch(_count_nonzero_dispatcher)
+def count_nonzero(a, axis=None, *, keepdims=False):
+    """
+    Counts the number of non-zero values in the array ``a``.
+
+    The word "non-zero" is in reference to the Python 2.x
+    built-in method ``__nonzero__()`` (renamed ``__bool__()``
+    in Python 3.x) of Python objects that tests an object's
+    "truthfulness". For example, any number is considered
+    truthful if it is nonzero, whereas any string is considered
+    truthful if it is not the empty string. Thus, this function
+    (recursively) counts how many elements in ``a`` (and in
+    sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
+    method evaluated to ``True``.
+
+    Parameters
+    ----------
+    a : array_like
+        The array for which to count non-zeros.
+    axis : int or tuple, optional
+        Axis or tuple of axes along which to count non-zeros.
+        Default is None, meaning that non-zeros will be counted
+        along a flattened version of ``a``.
+
+        .. versionadded:: 1.12.0
+
+    keepdims : bool, optional
+        If this is set to True, the axes that are counted are left
+        in the result as dimensions with size one. With this option,
+        the result will broadcast correctly against the input array.
+
+        .. versionadded:: 1.19.0
+
+    Returns
+    -------
+    count : int or array of int
+        Number of non-zero values in the array along a given axis.
+        Otherwise, the total number of non-zero values in the array
+        is returned.
+
+    See Also
+    --------
+    nonzero : Return the coordinates of all the non-zero values.
+
+    Examples
+    --------
+    >>> np.count_nonzero(np.eye(4))
+    4
+    >>> a = np.array([[0, 1, 7, 0],
+    ...               [3, 0, 2, 19]])
+    >>> np.count_nonzero(a)
+    5
+    >>> np.count_nonzero(a, axis=0)
+    array([1, 1, 2, 1])
+    >>> np.count_nonzero(a, axis=1)
+    array([2, 3])
+    >>> np.count_nonzero(a, axis=1, keepdims=True)
+    array([[2],
+           [3]])
+    """
+    if axis is None and not keepdims:
+        return multiarray.count_nonzero(a)
+
+    a = asanyarray(a)
+
+    # TODO: this works around .astype(bool) not working properly (gh-9847)
+    if np.issubdtype(a.dtype, np.character):
+        a_bool = a != a.dtype.type()
+    else:
+        a_bool = a.astype(np.bool_, copy=False)
+
+    return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
+
+
+@set_module('numpy')
+def isfortran(a):
+    """
+    Check if the array is Fortran contiguous but *not* C contiguous.
+
+    This function is obsolete and, because of changes due to relaxed stride
+    checking, its return value for the same array may differ for versions
+    of NumPy >= 1.10.0 and previous versions. If you only want to check if an
+    array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
+
+    Parameters
+    ----------
+    a : ndarray
+        Input array.
+
+    Returns
+    -------
+    isfortran : bool
+        Returns True if the array is Fortran contiguous but *not* C contiguous.
+
+
+    Examples
+    --------
+
+    np.array allows to specify whether the array is written in C-contiguous
+    order (last index varies the fastest), or FORTRAN-contiguous order in
+    memory (first index varies the fastest).
+
+    >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
+    >>> a
+    array([[1, 2, 3],
+           [4, 5, 6]])
+    >>> np.isfortran(a)
+    False
+
+    >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+    >>> b
+    array([[1, 2, 3],
+           [4, 5, 6]])
+    >>> np.isfortran(b)
+    True
+
+
+    The transpose of a C-ordered array is a FORTRAN-ordered array.
+
+    >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
+    >>> a
+    array([[1, 2, 3],
+           [4, 5, 6]])
+    >>> np.isfortran(a)
+    False
+    >>> b = a.T
+    >>> b
+    array([[1, 4],
+           [2, 5],
+           [3, 6]])
+    >>> np.isfortran(b)
+    True
+
+    C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
+
+    >>> np.isfortran(np.array([1, 2], order='F'))
+    False
+
+    """
+    return a.flags.fnc
+
+
+def _argwhere_dispatcher(a):
+    return (a,)
+
+
+@array_function_dispatch(_argwhere_dispatcher)
+def argwhere(a):
+    """
+    Find the indices of array elements that are non-zero, grouped by element.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+
+    Returns
+    -------
+    index_array : (N, a.ndim) ndarray
+        Indices of elements that are non-zero. Indices are grouped by element.
+        This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
+        non-zero items.
+
+    See Also
+    --------
+    where, nonzero
+
+    Notes
+    -----
+    ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
+    but produces a result of the correct shape for a 0D array.
+
+    The output of ``argwhere`` is not suitable for indexing arrays.
+    For this purpose use ``nonzero(a)`` instead.
+
+    Examples
+    --------
+    >>> x = np.arange(6).reshape(2,3)
+    >>> x
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> np.argwhere(x>1)
+    array([[0, 2],
+           [1, 0],
+           [1, 1],
+           [1, 2]])
+
+    """
+    # nonzero does not behave well on 0d, so promote to 1d
+    if np.ndim(a) == 0:
+        a = shape_base.atleast_1d(a)
+        # then remove the added dimension
+        return argwhere(a)[:,:0]
+    return transpose(nonzero(a))
+
+
+def _flatnonzero_dispatcher(a):
+    return (a,)
+
+
+@array_function_dispatch(_flatnonzero_dispatcher)
+def flatnonzero(a):
+    """
+    Return indices that are non-zero in the flattened version of a.
+
+    This is equivalent to ``np.nonzero(np.ravel(a))[0]``.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+
+    Returns
+    -------
+    res : ndarray
+        Output array, containing the indices of the elements of ``a.ravel()``
+        that are non-zero.
+
+    See Also
+    --------
+    nonzero : Return the indices of the non-zero elements of the input array.
+    ravel : Return a 1-D array containing the elements of the input array.
+
+    Examples
+    --------
+    >>> x = np.arange(-2, 3)
+    >>> x
+    array([-2, -1,  0,  1,  2])
+    >>> np.flatnonzero(x)
+    array([0, 1, 3, 4])
+
+    Use the indices of the non-zero elements as an index array to extract
+    these elements:
+
+    >>> x.ravel()[np.flatnonzero(x)]
+    array([-2, -1,  1,  2])
+
+    """
+    return np.nonzero(np.ravel(a))[0]
+
+
+def _correlate_dispatcher(a, v, mode=None):
+    return (a, v)
+
+
+@array_function_dispatch(_correlate_dispatcher)
+def correlate(a, v, mode='valid'):
+    r"""
+    Cross-correlation of two 1-dimensional sequences.
+
+    This function computes the correlation as generally defined in signal
+    processing texts:
+
+    .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n
+
+    with a and v sequences being zero-padded where necessary and
+    :math:`\overline x` denoting complex conjugation.
+
+    Parameters
+    ----------
+    a, v : array_like
+        Input sequences.
+    mode : {'valid', 'same', 'full'}, optional
+        Refer to the `convolve` docstring.  Note that the default
+        is 'valid', unlike `convolve`, which uses 'full'.
+    old_behavior : bool
+        `old_behavior` was removed in NumPy 1.10. If you need the old
+        behavior, use `multiarray.correlate`.
+
+    Returns
+    -------
+    out : ndarray
+        Discrete cross-correlation of `a` and `v`.
+
+    See Also
+    --------
+    convolve : Discrete, linear convolution of two one-dimensional sequences.
+    multiarray.correlate : Old, no conjugate, version of correlate.
+    scipy.signal.correlate : uses FFT which has superior performance on large arrays.
+
+    Notes
+    -----
+    The definition of correlation above is not unique and sometimes correlation
+    may be defined differently. Another common definition is:
+
+    .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}}
+
+    which is related to :math:`c_k` by :math:`c'_k = c_{-k}`.
+
+    `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
+    not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
+    be preferable.
+
+
+    Examples
+    --------
+    >>> np.correlate([1, 2, 3], [0, 1, 0.5])
+    array([3.5])
+    >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
+    array([2. ,  3.5,  3. ])
+    >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
+    array([0.5,  2. ,  3.5,  3. ,  0. ])
+
+    Using complex sequences:
+
+    >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
+    array([ 0.5-0.5j,  1.0+0.j ,  1.5-1.5j,  3.0-1.j ,  0.0+0.j ])
+
+    Note that you get the time reversed, complex conjugated result
+    (:math:`\overline{c_{-k}}`) when the two input sequences a and v change
+    places:
+
+    >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
+    array([ 0.0+0.j ,  3.0+1.j ,  1.5+1.5j,  1.0+0.j ,  0.5+0.5j])
+
+    """
+    return multiarray.correlate2(a, v, mode)
+
+
+def _convolve_dispatcher(a, v, mode=None):
+    return (a, v)
+
+
+@array_function_dispatch(_convolve_dispatcher)
+def convolve(a, v, mode='full'):
+    """
+    Returns the discrete, linear convolution of two one-dimensional sequences.
+
+    The convolution operator is often seen in signal processing, where it
+    models the effect of a linear time-invariant system on a signal [1]_.  In
+    probability theory, the sum of two independent random variables is
+    distributed according to the convolution of their individual
+    distributions.
+
+    If `v` is longer than `a`, the arrays are swapped before computation.
+
+    Parameters
+    ----------
+    a : (N,) array_like
+        First one-dimensional input array.
+    v : (M,) array_like
+        Second one-dimensional input array.
+    mode : {'full', 'valid', 'same'}, optional
+        'full':
+          By default, mode is 'full'.  This returns the convolution
+          at each point of overlap, with an output shape of (N+M-1,). At
+          the end-points of the convolution, the signals do not overlap
+          completely, and boundary effects may be seen.
+
+        'same':
+          Mode 'same' returns output of length ``max(M, N)``.  Boundary
+          effects are still visible.
+
+        'valid':
+          Mode 'valid' returns output of length
+          ``max(M, N) - min(M, N) + 1``.  The convolution product is only given
+          for points where the signals overlap completely.  Values outside
+          the signal boundary have no effect.
+
+    Returns
+    -------
+    out : ndarray
+        Discrete, linear convolution of `a` and `v`.
+
+    See Also
+    --------
+    scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
+                               Transform.
+    scipy.linalg.toeplitz : Used to construct the convolution operator.
+    polymul : Polynomial multiplication. Same output as convolve, but also
+              accepts poly1d objects as input.
+
+    Notes
+    -----
+    The discrete convolution operation is defined as
+
+    .. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m}
+
+    It can be shown that a convolution :math:`x(t) * y(t)` in time/space
+    is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
+    domain, after appropriate padding (padding is necessary to prevent
+    circular convolution).  Since multiplication is more efficient (faster)
+    than convolution, the function `scipy.signal.fftconvolve` exploits the
+    FFT to calculate the convolution of large data-sets.
+
+    References
+    ----------
+    .. [1] Wikipedia, "Convolution",
+        https://en.wikipedia.org/wiki/Convolution
+
+    Examples
+    --------
+    Note how the convolution operator flips the second array
+    before "sliding" the two across one another:
+
+    >>> np.convolve([1, 2, 3], [0, 1, 0.5])
+    array([0. , 1. , 2.5, 4. , 1.5])
+
+    Only return the middle values of the convolution.
+    Contains boundary effects, where zeros are taken
+    into account:
+
+    >>> np.convolve([1,2,3],[0,1,0.5], 'same')
+    array([1. ,  2.5,  4. ])
+
+    The two arrays are of the same length, so there
+    is only one position where they completely overlap:
+
+    >>> np.convolve([1,2,3],[0,1,0.5], 'valid')
+    array([2.5])
+
+    """
+    a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
+    if (len(v) > len(a)):
+        a, v = v, a
+    if len(a) == 0:
+        raise ValueError('a cannot be empty')
+    if len(v) == 0:
+        raise ValueError('v cannot be empty')
+    return multiarray.correlate(a, v[::-1], mode)
+
+
+def _outer_dispatcher(a, b, out=None):
+    return (a, b, out)
+
+
+@array_function_dispatch(_outer_dispatcher)
+def outer(a, b, out=None):
+    """
+    Compute the outer product of two vectors.
+
+    Given two vectors `a` and `b` of length ``M`` and ``N``, repsectively,
+    the outer product [1]_ is::
+
+      [[a_0*b_0  a_0*b_1 ... a_0*b_{N-1} ]
+       [a_1*b_0    .
+       [ ...          .
+       [a_{M-1}*b_0            a_{M-1}*b_{N-1} ]]
+
+    Parameters
+    ----------
+    a : (M,) array_like
+        First input vector.  Input is flattened if
+        not already 1-dimensional.
+    b : (N,) array_like
+        Second input vector.  Input is flattened if
+        not already 1-dimensional.
+    out : (M, N) ndarray, optional
+        A location where the result is stored
+
+        .. versionadded:: 1.9.0
+
+    Returns
+    -------
+    out : (M, N) ndarray
+        ``out[i, j] = a[i] * b[j]``
+
+    See also
+    --------
+    inner
+    einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
+    ufunc.outer : A generalization to dimensions other than 1D and other
+                  operations. ``np.multiply.outer(a.ravel(), b.ravel())``
+                  is the equivalent.
+    tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
+                is the equivalent.
+
+    References
+    ----------
+    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
+           ed., Baltimore, MD, Johns Hopkins University Press, 1996,
+           pg. 8.
+
+    Examples
+    --------
+    Make a (*very* coarse) grid for computing a Mandelbrot set:
+
+    >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
+    >>> rl
+    array([[-2., -1.,  0.,  1.,  2.],
+           [-2., -1.,  0.,  1.,  2.],
+           [-2., -1.,  0.,  1.,  2.],
+           [-2., -1.,  0.,  1.,  2.],
+           [-2., -1.,  0.,  1.,  2.]])
+    >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
+    >>> im
+    array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
+           [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
+           [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
+           [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
+           [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
+    >>> grid = rl + im
+    >>> grid
+    array([[-2.+2.j, -1.+2.j,  0.+2.j,  1.+2.j,  2.+2.j],
+           [-2.+1.j, -1.+1.j,  0.+1.j,  1.+1.j,  2.+1.j],
+           [-2.+0.j, -1.+0.j,  0.+0.j,  1.+0.j,  2.+0.j],
+           [-2.-1.j, -1.-1.j,  0.-1.j,  1.-1.j,  2.-1.j],
+           [-2.-2.j, -1.-2.j,  0.-2.j,  1.-2.j,  2.-2.j]])
+
+    An example using a "vector" of letters:
+
+    >>> x = np.array(['a', 'b', 'c'], dtype=object)
+    >>> np.outer(x, [1, 2, 3])
+    array([['a', 'aa', 'aaa'],
+           ['b', 'bb', 'bbb'],
+           ['c', 'cc', 'ccc']], dtype=object)
+
+    """
+    a = asarray(a)
+    b = asarray(b)
+    return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
+
+
+def _tensordot_dispatcher(a, b, axes=None):
+    return (a, b)
+
+
+@array_function_dispatch(_tensordot_dispatcher)
+def tensordot(a, b, axes=2):
+    """
+    Compute tensor dot product along specified axes.
+
+    Given two tensors, `a` and `b`, and an array_like object containing
+    two array_like objects, ``(a_axes, b_axes)``, sum the products of
+    `a`'s and `b`'s elements (components) over the axes specified by
+    ``a_axes`` and ``b_axes``. The third argument can be a single non-negative
+    integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
+    of `a` and the first ``N`` dimensions of `b` are summed over.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Tensors to "dot".
+
+    axes : int or (2,) array_like
+        * integer_like
+          If an int N, sum over the last N axes of `a` and the first N axes
+          of `b` in order. The sizes of the corresponding axes must match.
+        * (2,) array_like
+          Or, a list of axes to be summed over, first sequence applying to `a`,
+          second to `b`. Both elements array_like must be of the same length.
+
+    Returns
+    -------
+    output : ndarray
+        The tensor dot product of the input.
+
+    See Also
+    --------
+    dot, einsum
+
+    Notes
+    -----
+    Three common use cases are:
+        * ``axes = 0`` : tensor product :math:`a\\otimes b`
+        * ``axes = 1`` : tensor dot product :math:`a\\cdot b`
+        * ``axes = 2`` : (default) tensor double contraction :math:`a:b`
+
+    When `axes` is integer_like, the sequence for evaluation will be: first
+    the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
+    Nth axis in `b` last.
+
+    When there is more than one axis to sum over - and they are not the last
+    (first) axes of `a` (`b`) - the argument `axes` should consist of
+    two sequences of the same length, with the first axis to sum over given
+    first in both sequences, the second axis second, and so forth.
+
+    The shape of the result consists of the non-contracted axes of the
+    first tensor, followed by the non-contracted axes of the second.
+
+    Examples
+    --------
+    A "traditional" example:
+
+    >>> a = np.arange(60.).reshape(3,4,5)
+    >>> b = np.arange(24.).reshape(4,3,2)
+    >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
+    >>> c.shape
+    (5, 2)
+    >>> c
+    array([[4400., 4730.],
+           [4532., 4874.],
+           [4664., 5018.],
+           [4796., 5162.],
+           [4928., 5306.]])
+    >>> # A slower but equivalent way of computing the same...
+    >>> d = np.zeros((5,2))
+    >>> for i in range(5):
+    ...   for j in range(2):
+    ...     for k in range(3):
+    ...       for n in range(4):
+    ...         d[i,j] += a[k,n,i] * b[n,k,j]
+    >>> c == d
+    array([[ True,  True],
+           [ True,  True],
+           [ True,  True],
+           [ True,  True],
+           [ True,  True]])
+
+    An extended example taking advantage of the overloading of + and \\*:
+
+    >>> a = np.array(range(1, 9))
+    >>> a.shape = (2, 2, 2)
+    >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
+    >>> A.shape = (2, 2)
+    >>> a; A
+    array([[[1, 2],
+            [3, 4]],
+           [[5, 6],
+            [7, 8]]])
+    array([['a', 'b'],
+           ['c', 'd']], dtype=object)
+
+    >>> np.tensordot(a, A) # third argument default is 2 for double-contraction
+    array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
+
+    >>> np.tensordot(a, A, 1)
+    array([[['acc', 'bdd'],
+            ['aaacccc', 'bbbdddd']],
+           [['aaaaacccccc', 'bbbbbdddddd'],
+            ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
+
+    >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
+    array([[[[['a', 'b'],
+              ['c', 'd']],
+              ...
+
+    >>> np.tensordot(a, A, (0, 1))
+    array([[['abbbbb', 'cddddd'],
+            ['aabbbbbb', 'ccdddddd']],
+           [['aaabbbbbbb', 'cccddddddd'],
+            ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
+
+    >>> np.tensordot(a, A, (2, 1))
+    array([[['abb', 'cdd'],
+            ['aaabbbb', 'cccdddd']],
+           [['aaaaabbbbbb', 'cccccdddddd'],
+            ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
+
+    >>> np.tensordot(a, A, ((0, 1), (0, 1)))
+    array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
+
+    >>> np.tensordot(a, A, ((2, 1), (1, 0)))
+    array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
+
+    """
+    try:
+        iter(axes)
+    except Exception:
+        axes_a = list(range(-axes, 0))
+        axes_b = list(range(0, axes))
+    else:
+        axes_a, axes_b = axes
+    try:
+        na = len(axes_a)
+        axes_a = list(axes_a)
+    except TypeError:
+        axes_a = [axes_a]
+        na = 1
+    try:
+        nb = len(axes_b)
+        axes_b = list(axes_b)
+    except TypeError:
+        axes_b = [axes_b]
+        nb = 1
+
+    a, b = asarray(a), asarray(b)
+    as_ = a.shape
+    nda = a.ndim
+    bs = b.shape
+    ndb = b.ndim
+    equal = True
+    if na != nb:
+        equal = False
+    else:
+        for k in range(na):
+            if as_[axes_a[k]] != bs[axes_b[k]]:
+                equal = False
+                break
+            if axes_a[k] < 0:
+                axes_a[k] += nda
+            if axes_b[k] < 0:
+                axes_b[k] += ndb
+    if not equal:
+        raise ValueError("shape-mismatch for sum")
+
+    # Move the axes to sum over to the end of "a"
+    # and to the front of "b"
+    notin = [k for k in range(nda) if k not in axes_a]
+    newaxes_a = notin + axes_a
+    N2 = 1
+    for axis in axes_a:
+        N2 *= as_[axis]
+    newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
+    olda = [as_[axis] for axis in notin]
+
+    notin = [k for k in range(ndb) if k not in axes_b]
+    newaxes_b = axes_b + notin
+    N2 = 1
+    for axis in axes_b:
+        N2 *= bs[axis]
+    newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
+    oldb = [bs[axis] for axis in notin]
+
+    at = a.transpose(newaxes_a).reshape(newshape_a)
+    bt = b.transpose(newaxes_b).reshape(newshape_b)
+    res = dot(at, bt)
+    return res.reshape(olda + oldb)
+
+
+def _roll_dispatcher(a, shift, axis=None):
+    return (a,)
+
+
+@array_function_dispatch(_roll_dispatcher)
+def roll(a, shift, axis=None):
+    """
+    Roll array elements along a given axis.
+
+    Elements that roll beyond the last position are re-introduced at
+    the first.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    shift : int or tuple of ints
+        The number of places by which elements are shifted.  If a tuple,
+        then `axis` must be a tuple of the same size, and each of the
+        given axes is shifted by the corresponding number.  If an int
+        while `axis` is a tuple of ints, then the same value is used for
+        all given axes.
+    axis : int or tuple of ints, optional
+        Axis or axes along which elements are shifted.  By default, the
+        array is flattened before shifting, after which the original
+        shape is restored.
+
+    Returns
+    -------
+    res : ndarray
+        Output array, with the same shape as `a`.
+
+    See Also
+    --------
+    rollaxis : Roll the specified axis backwards, until it lies in a
+               given position.
+
+    Notes
+    -----
+    .. versionadded:: 1.12.0
+
+    Supports rolling over multiple dimensions simultaneously.
+
+    Examples
+    --------
+    >>> x = np.arange(10)
+    >>> np.roll(x, 2)
+    array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
+    >>> np.roll(x, -2)
+    array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
+
+    >>> x2 = np.reshape(x, (2, 5))
+    >>> x2
+    array([[0, 1, 2, 3, 4],
+           [5, 6, 7, 8, 9]])
+    >>> np.roll(x2, 1)
+    array([[9, 0, 1, 2, 3],
+           [4, 5, 6, 7, 8]])
+    >>> np.roll(x2, -1)
+    array([[1, 2, 3, 4, 5],
+           [6, 7, 8, 9, 0]])
+    >>> np.roll(x2, 1, axis=0)
+    array([[5, 6, 7, 8, 9],
+           [0, 1, 2, 3, 4]])
+    >>> np.roll(x2, -1, axis=0)
+    array([[5, 6, 7, 8, 9],
+           [0, 1, 2, 3, 4]])
+    >>> np.roll(x2, 1, axis=1)
+    array([[4, 0, 1, 2, 3],
+           [9, 5, 6, 7, 8]])
+    >>> np.roll(x2, -1, axis=1)
+    array([[1, 2, 3, 4, 0],
+           [6, 7, 8, 9, 5]])
+    >>> np.roll(x2, (1, 1), axis=(1, 0))
+    array([[9, 5, 6, 7, 8],
+           [4, 0, 1, 2, 3]])
+    >>> np.roll(x2, (2, 1), axis=(1, 0))
+    array([[8, 9, 5, 6, 7],
+           [3, 4, 0, 1, 2]])
+
+    """
+    a = asanyarray(a)
+    if axis is None:
+        return roll(a.ravel(), shift, 0).reshape(a.shape)
+
+    else:
+        axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
+        broadcasted = broadcast(shift, axis)
+        if broadcasted.ndim > 1:
+            raise ValueError(
+                "'shift' and 'axis' should be scalars or 1D sequences")
+        shifts = {ax: 0 for ax in range(a.ndim)}
+        for sh, ax in broadcasted:
+            shifts[ax] += sh
+
+        rolls = [((slice(None), slice(None)),)] * a.ndim
+        for ax, offset in shifts.items():
+            offset %= a.shape[ax] or 1  # If `a` is empty, nothing matters.
+            if offset:
+                # (original, result), (original, result)
+                rolls[ax] = ((slice(None, -offset), slice(offset, None)),
+                             (slice(-offset, None), slice(None, offset)))
+
+        result = empty_like(a)
+        for indices in itertools.product(*rolls):
+            arr_index, res_index = zip(*indices)
+            result[res_index] = a[arr_index]
+
+        return result
+
+
+def _rollaxis_dispatcher(a, axis, start=None):
+    return (a,)
+
+
+@array_function_dispatch(_rollaxis_dispatcher)
+def rollaxis(a, axis, start=0):
+    """
+    Roll the specified axis backwards, until it lies in a given position.
+
+    This function continues to be supported for backward compatibility, but you
+    should prefer `moveaxis`. The `moveaxis` function was added in NumPy
+    1.11.
+
+    Parameters
+    ----------
+    a : ndarray
+        Input array.
+    axis : int
+        The axis to be rolled. The positions of the other axes do not
+        change relative to one another.
+    start : int, optional
+        When ``start <= axis``, the axis is rolled back until it lies in
+        this position. When ``start > axis``, the axis is rolled until it
+        lies before this position. The default, 0, results in a "complete"
+        roll. The following table describes how negative values of ``start``
+        are interpreted:
+
+        .. table::
+           :align: left
+
+           +-------------------+----------------------+
+           |     ``start``     | Normalized ``start`` |
+           +===================+======================+
+           | ``-(arr.ndim+1)`` | raise ``AxisError``  |
+           +-------------------+----------------------+
+           | ``-arr.ndim``     | 0                    |
+           +-------------------+----------------------+
+           | |vdots|           | |vdots|              |
+           +-------------------+----------------------+
+           | ``-1``            | ``arr.ndim-1``       |
+           +-------------------+----------------------+
+           | ``0``             | ``0``                |
+           +-------------------+----------------------+
+           | |vdots|           | |vdots|              |
+           +-------------------+----------------------+
+           | ``arr.ndim``      | ``arr.ndim``         |
+           +-------------------+----------------------+
+           | ``arr.ndim + 1``  | raise ``AxisError``  |
+           +-------------------+----------------------+
+
+        .. |vdots|   unicode:: U+22EE .. Vertical Ellipsis
+
+    Returns
+    -------
+    res : ndarray
+        For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
+        NumPy versions a view of `a` is returned only if the order of the
+        axes is changed, otherwise the input array is returned.
+
+    See Also
+    --------
+    moveaxis : Move array axes to new positions.
+    roll : Roll the elements of an array by a number of positions along a
+        given axis.
+
+    Examples
+    --------
+    >>> a = np.ones((3,4,5,6))
+    >>> np.rollaxis(a, 3, 1).shape
+    (3, 6, 4, 5)
+    >>> np.rollaxis(a, 2).shape
+    (5, 3, 4, 6)
+    >>> np.rollaxis(a, 1, 4).shape
+    (3, 5, 6, 4)
+
+    """
+    n = a.ndim
+    axis = normalize_axis_index(axis, n)
+    if start < 0:
+        start += n
+    msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
+    if not (0 <= start < n + 1):
+        raise AxisError(msg % ('start', -n, 'start', n + 1, start))
+    if axis < start:
+        # it's been removed
+        start -= 1
+    if axis == start:
+        return a[...]
+    axes = list(range(0, n))
+    axes.remove(axis)
+    axes.insert(start, axis)
+    return a.transpose(axes)
+
+
+def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
+    """
+    Normalizes an axis argument into a tuple of non-negative integer axes.
+
+    This handles shorthands such as ``1`` and converts them to ``(1,)``,
+    as well as performing the handling of negative indices covered by
+    `normalize_axis_index`.
+
+    By default, this forbids axes from being specified multiple times.
+
+    Used internally by multi-axis-checking logic.
+
+    .. versionadded:: 1.13.0
+
+    Parameters
+    ----------
+    axis : int, iterable of int
+        The un-normalized index or indices of the axis.
+    ndim : int
+        The number of dimensions of the array that `axis` should be normalized
+        against.
+    argname : str, optional
+        A prefix to put before the error message, typically the name of the
+        argument.
+    allow_duplicate : bool, optional
+        If False, the default, disallow an axis from being specified twice.
+
+    Returns
+    -------
+    normalized_axes : tuple of int
+        The normalized axis index, such that `0 <= normalized_axis < ndim`
+
+    Raises
+    ------
+    AxisError
+        If any axis provided is out of range
+    ValueError
+        If an axis is repeated
+
+    See also
+    --------
+    normalize_axis_index : normalizing a single scalar axis
+    """
+    # Optimization to speed-up the most common cases.
+    if type(axis) not in (tuple, list):
+        try:
+            axis = [operator.index(axis)]
+        except TypeError:
+            pass
+    # Going via an iterator directly is slower than via list comprehension.
+    axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
+    if not allow_duplicate and len(set(axis)) != len(axis):
+        if argname:
+            raise ValueError('repeated axis in `{}` argument'.format(argname))
+        else:
+            raise ValueError('repeated axis')
+    return axis
+
+
+def _moveaxis_dispatcher(a, source, destination):
+    return (a,)
+
+
+@array_function_dispatch(_moveaxis_dispatcher)
+def moveaxis(a, source, destination):
+    """
+    Move axes of an array to new positions.
+
+    Other axes remain in their original order.
+
+    .. versionadded:: 1.11.0
+
+    Parameters
+    ----------
+    a : np.ndarray
+        The array whose axes should be reordered.
+    source : int or sequence of int
+        Original positions of the axes to move. These must be unique.
+    destination : int or sequence of int
+        Destination positions for each of the original axes. These must also be
+        unique.
+
+    Returns
+    -------
+    result : np.ndarray
+        Array with moved axes. This array is a view of the input array.
+
+    See Also
+    --------
+    transpose : Permute the dimensions of an array.
+    swapaxes : Interchange two axes of an array.
+
+    Examples
+    --------
+    >>> x = np.zeros((3, 4, 5))
+    >>> np.moveaxis(x, 0, -1).shape
+    (4, 5, 3)
+    >>> np.moveaxis(x, -1, 0).shape
+    (5, 3, 4)
+
+    These all achieve the same result:
+
+    >>> np.transpose(x).shape
+    (5, 4, 3)
+    >>> np.swapaxes(x, 0, -1).shape
+    (5, 4, 3)
+    >>> np.moveaxis(x, [0, 1], [-1, -2]).shape
+    (5, 4, 3)
+    >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
+    (5, 4, 3)
+
+    """
+    try:
+        # allow duck-array types if they define transpose
+        transpose = a.transpose
+    except AttributeError:
+        a = asarray(a)
+        transpose = a.transpose
+
+    source = normalize_axis_tuple(source, a.ndim, 'source')
+    destination = normalize_axis_tuple(destination, a.ndim, 'destination')
+    if len(source) != len(destination):
+        raise ValueError('`source` and `destination` arguments must have '
+                         'the same number of elements')
+
+    order = [n for n in range(a.ndim) if n not in source]
+
+    for dest, src in sorted(zip(destination, source)):
+        order.insert(dest, src)
+
+    result = transpose(order)
+    return result
+
+
+def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
+    return (a, b)
+
+
+@array_function_dispatch(_cross_dispatcher)
+def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
+    """
+    Return the cross product of two (arrays of) vectors.
+
+    The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
+    to both `a` and `b`.  If `a` and `b` are arrays of vectors, the vectors
+    are defined by the last axis of `a` and `b` by default, and these axes
+    can have dimensions 2 or 3.  Where the dimension of either `a` or `b` is
+    2, the third component of the input vector is assumed to be zero and the
+    cross product calculated accordingly.  In cases where both input vectors
+    have dimension 2, the z-component of the cross product is returned.
+
+    Parameters
+    ----------
+    a : array_like
+        Components of the first vector(s).
+    b : array_like
+        Components of the second vector(s).
+    axisa : int, optional
+        Axis of `a` that defines the vector(s).  By default, the last axis.
+    axisb : int, optional
+        Axis of `b` that defines the vector(s).  By default, the last axis.
+    axisc : int, optional
+        Axis of `c` containing the cross product vector(s).  Ignored if
+        both input vectors have dimension 2, as the return is scalar.
+        By default, the last axis.
+    axis : int, optional
+        If defined, the axis of `a`, `b` and `c` that defines the vector(s)
+        and cross product(s).  Overrides `axisa`, `axisb` and `axisc`.
+
+    Returns
+    -------
+    c : ndarray
+        Vector cross product(s).
+
+    Raises
+    ------
+    ValueError
+        When the dimension of the vector(s) in `a` and/or `b` does not
+        equal 2 or 3.
+
+    See Also
+    --------
+    inner : Inner product
+    outer : Outer product.
+    ix_ : Construct index arrays.
+
+    Notes
+    -----
+    .. versionadded:: 1.9.0
+
+    Supports full broadcasting of the inputs.
+
+    Examples
+    --------
+    Vector cross-product.
+
+    >>> x = [1, 2, 3]
+    >>> y = [4, 5, 6]
+    >>> np.cross(x, y)
+    array([-3,  6, -3])
+
+    One vector with dimension 2.
+
+    >>> x = [1, 2]
+    >>> y = [4, 5, 6]
+    >>> np.cross(x, y)
+    array([12, -6, -3])
+
+    Equivalently:
+
+    >>> x = [1, 2, 0]
+    >>> y = [4, 5, 6]
+    >>> np.cross(x, y)
+    array([12, -6, -3])
+
+    Both vectors with dimension 2.
+
+    >>> x = [1,2]
+    >>> y = [4,5]
+    >>> np.cross(x, y)
+    array(-3)
+
+    Multiple vector cross-products. Note that the direction of the cross
+    product vector is defined by the *right-hand rule*.
+
+    >>> x = np.array([[1,2,3], [4,5,6]])
+    >>> y = np.array([[4,5,6], [1,2,3]])
+    >>> np.cross(x, y)
+    array([[-3,  6, -3],
+           [ 3, -6,  3]])
+
+    The orientation of `c` can be changed using the `axisc` keyword.
+
+    >>> np.cross(x, y, axisc=0)
+    array([[-3,  3],
+           [ 6, -6],
+           [-3,  3]])
+
+    Change the vector definition of `x` and `y` using `axisa` and `axisb`.
+
+    >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
+    >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
+    >>> np.cross(x, y)
+    array([[ -6,  12,  -6],
+           [  0,   0,   0],
+           [  6, -12,   6]])
+    >>> np.cross(x, y, axisa=0, axisb=0)
+    array([[-24,  48, -24],
+           [-30,  60, -30],
+           [-36,  72, -36]])
+
+    """
+    if axis is not None:
+        axisa, axisb, axisc = (axis,) * 3
+    a = asarray(a)
+    b = asarray(b)
+    # Check axisa and axisb are within bounds
+    axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
+    axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
+
+    # Move working axis to the end of the shape
+    a = moveaxis(a, axisa, -1)
+    b = moveaxis(b, axisb, -1)
+    msg = ("incompatible dimensions for cross product\n"
+           "(dimension must be 2 or 3)")
+    if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
+        raise ValueError(msg)
+
+    # Create the output array
+    shape = broadcast(a[..., 0], b[..., 0]).shape
+    if a.shape[-1] == 3 or b.shape[-1] == 3:
+        shape += (3,)
+        # Check axisc is within bounds
+        axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
+    dtype = promote_types(a.dtype, b.dtype)
+    cp = empty(shape, dtype)
+
+    # recast arrays as dtype
+    a = a.astype(dtype)
+    b = b.astype(dtype)
+
+    # create local aliases for readability
+    a0 = a[..., 0]
+    a1 = a[..., 1]
+    if a.shape[-1] == 3:
+        a2 = a[..., 2]
+    b0 = b[..., 0]
+    b1 = b[..., 1]
+    if b.shape[-1] == 3:
+        b2 = b[..., 2]
+    if cp.ndim != 0 and cp.shape[-1] == 3:
+        cp0 = cp[..., 0]
+        cp1 = cp[..., 1]
+        cp2 = cp[..., 2]
+
+    if a.shape[-1] == 2:
+        if b.shape[-1] == 2:
+            # a0 * b1 - a1 * b0
+            multiply(a0, b1, out=cp)
+            cp -= a1 * b0
+            return cp
+        else:
+            assert b.shape[-1] == 3
+            # cp0 = a1 * b2 - 0  (a2 = 0)
+            # cp1 = 0 - a0 * b2  (a2 = 0)
+            # cp2 = a0 * b1 - a1 * b0
+            multiply(a1, b2, out=cp0)
+            multiply(a0, b2, out=cp1)
+            negative(cp1, out=cp1)
+            multiply(a0, b1, out=cp2)
+            cp2 -= a1 * b0
+    else:
+        assert a.shape[-1] == 3
+        if b.shape[-1] == 3:
+            # cp0 = a1 * b2 - a2 * b1
+            # cp1 = a2 * b0 - a0 * b2
+            # cp2 = a0 * b1 - a1 * b0
+            multiply(a1, b2, out=cp0)
+            tmp = array(a2 * b1)
+            cp0 -= tmp
+            multiply(a2, b0, out=cp1)
+            multiply(a0, b2, out=tmp)
+            cp1 -= tmp
+            multiply(a0, b1, out=cp2)
+            multiply(a1, b0, out=tmp)
+            cp2 -= tmp
+        else:
+            assert b.shape[-1] == 2
+            # cp0 = 0 - a2 * b1  (b2 = 0)
+            # cp1 = a2 * b0 - 0  (b2 = 0)
+            # cp2 = a0 * b1 - a1 * b0
+            multiply(a2, b1, out=cp0)
+            negative(cp0, out=cp0)
+            multiply(a2, b0, out=cp1)
+            multiply(a0, b1, out=cp2)
+            cp2 -= a1 * b0
+
+    return moveaxis(cp, -1, axisc)
+
+
+little_endian = (sys.byteorder == 'little')
+
+
+@set_module('numpy')
+def indices(dimensions, dtype=int, sparse=False):
+    """
+    Return an array representing the indices of a grid.
+
+    Compute an array where the subarrays contain index values 0, 1, ...
+    varying only along the corresponding axis.
+
+    Parameters
+    ----------
+    dimensions : sequence of ints
+        The shape of the grid.
+    dtype : dtype, optional
+        Data type of the result.
+    sparse : boolean, optional
+        Return a sparse representation of the grid instead of a dense
+        representation. Default is False.
+
+        .. versionadded:: 1.17
+
+    Returns
+    -------
+    grid : one ndarray or tuple of ndarrays
+        If sparse is False:
+            Returns one array of grid indices,
+            ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
+        If sparse is True:
+            Returns a tuple of arrays, with
+            ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
+            dimensions[i] in the ith place
+
+    See Also
+    --------
+    mgrid, ogrid, meshgrid
+
+    Notes
+    -----
+    The output shape in the dense case is obtained by prepending the number
+    of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
+    is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
+    ``(N, r0, ..., rN-1)``.
+
+    The subarrays ``grid[k]`` contains the N-D array of indices along the
+    ``k-th`` axis. Explicitly::
+
+        grid[k, i0, i1, ..., iN-1] = ik
+
+    Examples
+    --------
+    >>> grid = np.indices((2, 3))
+    >>> grid.shape
+    (2, 2, 3)
+    >>> grid[0]        # row indices
+    array([[0, 0, 0],
+           [1, 1, 1]])
+    >>> grid[1]        # column indices
+    array([[0, 1, 2],
+           [0, 1, 2]])
+
+    The indices can be used as an index into an array.
+
+    >>> x = np.arange(20).reshape(5, 4)
+    >>> row, col = np.indices((2, 3))
+    >>> x[row, col]
+    array([[0, 1, 2],
+           [4, 5, 6]])
+
+    Note that it would be more straightforward in the above example to
+    extract the required elements directly with ``x[:2, :3]``.
+
+    If sparse is set to true, the grid will be returned in a sparse
+    representation.
+
+    >>> i, j = np.indices((2, 3), sparse=True)
+    >>> i.shape
+    (2, 1)
+    >>> j.shape
+    (1, 3)
+    >>> i        # row indices
+    array([[0],
+           [1]])
+    >>> j        # column indices
+    array([[0, 1, 2]])
+
+    """
+    dimensions = tuple(dimensions)
+    N = len(dimensions)
+    shape = (1,)*N
+    if sparse:
+        res = tuple()
+    else:
+        res = empty((N,)+dimensions, dtype=dtype)
+    for i, dim in enumerate(dimensions):
+        idx = arange(dim, dtype=dtype).reshape(
+            shape[:i] + (dim,) + shape[i+1:]
+        )
+        if sparse:
+            res = res + (idx,)
+        else:
+            res[i] = idx
+    return res
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
+    """
+    Construct an array by executing a function over each coordinate.
+
+    The resulting array therefore has a value ``fn(x, y, z)`` at
+    coordinate ``(x, y, z)``.
+
+    Parameters
+    ----------
+    function : callable
+        The function is called with N parameters, where N is the rank of
+        `shape`.  Each parameter represents the coordinates of the array
+        varying along a specific axis.  For example, if `shape`
+        were ``(2, 2)``, then the parameters would be
+        ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
+    shape : (N,) tuple of ints
+        Shape of the output array, which also determines the shape of
+        the coordinate arrays passed to `function`.
+    dtype : data-type, optional
+        Data-type of the coordinate arrays passed to `function`.
+        By default, `dtype` is float.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    fromfunction : any
+        The result of the call to `function` is passed back directly.
+        Therefore the shape of `fromfunction` is completely determined by
+        `function`.  If `function` returns a scalar value, the shape of
+        `fromfunction` would not match the `shape` parameter.
+
+    See Also
+    --------
+    indices, meshgrid
+
+    Notes
+    -----
+    Keywords other than `dtype` and `like` are passed to `function`.
+
+    Examples
+    --------
+    >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float)
+    array([[0., 0.],
+           [1., 1.]])
+
+    >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float)
+    array([[0., 1.],
+           [0., 1.]])
+
+    >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
+    array([[ True, False, False],
+           [False,  True, False],
+           [False, False,  True]])
+
+    >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
+    array([[0, 1, 2],
+           [1, 2, 3],
+           [2, 3, 4]])
+
+    """
+    if like is not None:
+        return _fromfunction_with_like(
+                like, function, shape, dtype=dtype, **kwargs)
+
+    args = indices(shape, dtype=dtype)
+    return function(*args, **kwargs)
+
+
+_fromfunction_with_like = array_function_dispatch()(fromfunction)
+
+
+def _frombuffer(buf, dtype, shape, order):
+    return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
+
+
+@set_module('numpy')
+def isscalar(element):
+    """
+    Returns True if the type of `element` is a scalar type.
+
+    Parameters
+    ----------
+    element : any
+        Input argument, can be of any type and shape.
+
+    Returns
+    -------
+    val : bool
+        True if `element` is a scalar type, False if it is not.
+
+    See Also
+    --------
+    ndim : Get the number of dimensions of an array
+
+    Notes
+    -----
+    If you need a stricter way to identify a *numerical* scalar, use
+    ``isinstance(x, numbers.Number)``, as that returns ``False`` for most
+    non-numerical elements such as strings.
+
+    In most cases ``np.ndim(x) == 0`` should be used instead of this function,
+    as that will also return true for 0d arrays. This is how numpy overloads
+    functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
+    argument to `histogram`. Some key differences:
+
+    +--------------------------------------+---------------+-------------------+
+    | x                                    |``isscalar(x)``|``np.ndim(x) == 0``|
+    +======================================+===============+===================+
+    | PEP 3141 numeric objects (including  | ``True``      | ``True``          |
+    | builtins)                            |               |                   |
+    +--------------------------------------+---------------+-------------------+
+    | builtin string and buffer objects    | ``True``      | ``True``          |
+    +--------------------------------------+---------------+-------------------+
+    | other builtin objects, like          | ``False``     | ``True``          |
+    | `pathlib.Path`, `Exception`,         |               |                   |
+    | the result of `re.compile`           |               |                   |
+    +--------------------------------------+---------------+-------------------+
+    | third-party objects like             | ``False``     | ``True``          |
+    | `matplotlib.figure.Figure`           |               |                   |
+    +--------------------------------------+---------------+-------------------+
+    | zero-dimensional numpy arrays        | ``False``     | ``True``          |
+    +--------------------------------------+---------------+-------------------+
+    | other numpy arrays                   | ``False``     | ``False``         |
+    +--------------------------------------+---------------+-------------------+
+    | `list`, `tuple`, and other sequence  | ``False``     | ``False``         |
+    | objects                              |               |                   |
+    +--------------------------------------+---------------+-------------------+
+
+    Examples
+    --------
+    >>> np.isscalar(3.1)
+    True
+    >>> np.isscalar(np.array(3.1))
+    False
+    >>> np.isscalar([3.1])
+    False
+    >>> np.isscalar(False)
+    True
+    >>> np.isscalar('numpy')
+    True
+
+    NumPy supports PEP 3141 numbers:
+
+    >>> from fractions import Fraction
+    >>> np.isscalar(Fraction(5, 17))
+    True
+    >>> from numbers import Number
+    >>> np.isscalar(Number())
+    True
+
+    """
+    return (isinstance(element, generic)
+            or type(element) in ScalarType
+            or isinstance(element, numbers.Number))
+
+
+@set_module('numpy')
+def binary_repr(num, width=None):
+    """
+    Return the binary representation of the input number as a string.
+
+    For negative numbers, if width is not given, a minus sign is added to the
+    front. If width is given, the two's complement of the number is
+    returned, with respect to that width.
+
+    In a two's-complement system negative numbers are represented by the two's
+    complement of the absolute value. This is the most common method of
+    representing signed integers on computers [1]_. A N-bit two's-complement
+    system can represent every integer in the range
+    :math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
+
+    Parameters
+    ----------
+    num : int
+        Only an integer decimal number can be used.
+    width : int, optional
+        The length of the returned string if `num` is positive, or the length
+        of the two's complement if `num` is negative, provided that `width` is
+        at least a sufficient number of bits for `num` to be represented in the
+        designated form.
+
+        If the `width` value is insufficient, it will be ignored, and `num` will
+        be returned in binary (`num` > 0) or two's complement (`num` < 0) form
+        with its width equal to the minimum number of bits needed to represent
+        the number in the designated form. This behavior is deprecated and will
+        later raise an error.
+
+        .. deprecated:: 1.12.0
+
+    Returns
+    -------
+    bin : str
+        Binary representation of `num` or two's complement of `num`.
+
+    See Also
+    --------
+    base_repr: Return a string representation of a number in the given base
+               system.
+    bin: Python's built-in binary representation generator of an integer.
+
+    Notes
+    -----
+    `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
+    faster.
+
+    References
+    ----------
+    .. [1] Wikipedia, "Two's complement",
+        https://en.wikipedia.org/wiki/Two's_complement
+
+    Examples
+    --------
+    >>> np.binary_repr(3)
+    '11'
+    >>> np.binary_repr(-3)
+    '-11'
+    >>> np.binary_repr(3, width=4)
+    '0011'
+
+    The two's complement is returned when the input number is negative and
+    width is specified:
+
+    >>> np.binary_repr(-3, width=3)
+    '101'
+    >>> np.binary_repr(-3, width=5)
+    '11101'
+
+    """
+    def warn_if_insufficient(width, binwidth):
+        if width is not None and width < binwidth:
+            warnings.warn(
+                "Insufficient bit width provided. This behavior "
+                "will raise an error in the future.", DeprecationWarning,
+                stacklevel=3)
+
+    # Ensure that num is a Python integer to avoid overflow or unwanted
+    # casts to floating point.
+    num = operator.index(num)
+
+    if num == 0:
+        return '0' * (width or 1)
+
+    elif num > 0:
+        binary = bin(num)[2:]
+        binwidth = len(binary)
+        outwidth = (binwidth if width is None
+                    else builtins.max(binwidth, width))
+        warn_if_insufficient(width, binwidth)
+        return binary.zfill(outwidth)
+
+    else:
+        if width is None:
+            return '-' + bin(-num)[2:]
+
+        else:
+            poswidth = len(bin(-num)[2:])
+
+            # See gh-8679: remove extra digit
+            # for numbers at boundaries.
+            if 2**(poswidth - 1) == -num:
+                poswidth -= 1
+
+            twocomp = 2**(poswidth + 1) + num
+            binary = bin(twocomp)[2:]
+            binwidth = len(binary)
+
+            outwidth = builtins.max(binwidth, width)
+            warn_if_insufficient(width, binwidth)
+            return '1' * (outwidth - binwidth) + binary
+
+
+@set_module('numpy')
+def base_repr(number, base=2, padding=0):
+    """
+    Return a string representation of a number in the given base system.
+
+    Parameters
+    ----------
+    number : int
+        The value to convert. Positive and negative values are handled.
+    base : int, optional
+        Convert `number` to the `base` number system. The valid range is 2-36,
+        the default value is 2.
+    padding : int, optional
+        Number of zeros padded on the left. Default is 0 (no padding).
+
+    Returns
+    -------
+    out : str
+        String representation of `number` in `base` system.
+
+    See Also
+    --------
+    binary_repr : Faster version of `base_repr` for base 2.
+
+    Examples
+    --------
+    >>> np.base_repr(5)
+    '101'
+    >>> np.base_repr(6, 5)
+    '11'
+    >>> np.base_repr(7, base=5, padding=3)
+    '00012'
+
+    >>> np.base_repr(10, base=16)
+    'A'
+    >>> np.base_repr(32, base=16)
+    '20'
+
+    """
+    digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+    if base > len(digits):
+        raise ValueError("Bases greater than 36 not handled in base_repr.")
+    elif base < 2:
+        raise ValueError("Bases less than 2 not handled in base_repr.")
+
+    num = abs(number)
+    res = []
+    while num:
+        res.append(digits[num % base])
+        num //= base
+    if padding:
+        res.append('0' * padding)
+    if number < 0:
+        res.append('-')
+    return ''.join(reversed(res or '0'))
+
+
+# These are all essentially abbreviations
+# These might wind up in a special abbreviations module
+
+
+def _maketup(descr, val):
+    dt = dtype(descr)
+    # Place val in all scalar tuples:
+    fields = dt.fields
+    if fields is None:
+        return val
+    else:
+        res = [_maketup(fields[name][0], val) for name in dt.names]
+        return tuple(res)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def identity(n, dtype=None, *, like=None):
+    """
+    Return the identity array.
+
+    The identity array is a square array with ones on
+    the main diagonal.
+
+    Parameters
+    ----------
+    n : int
+        Number of rows (and columns) in `n` x `n` output.
+    dtype : data-type, optional
+        Data-type of the output.  Defaults to ``float``.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        `n` x `n` array with its main diagonal set to one,
+        and all other elements 0.
+
+    Examples
+    --------
+    >>> np.identity(3)
+    array([[1.,  0.,  0.],
+           [0.,  1.,  0.],
+           [0.,  0.,  1.]])
+
+    """
+    if like is not None:
+        return _identity_with_like(like, n, dtype=dtype)
+
+    from numpy import eye
+    return eye(n, dtype=dtype, like=like)
+
+
+_identity_with_like = array_function_dispatch()(identity)
+
+
+def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+    return (a, b)
+
+
+@array_function_dispatch(_allclose_dispatcher)
+def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+    """
+    Returns True if two arrays are element-wise equal within a tolerance.
+
+    The tolerance values are positive, typically very small numbers.  The
+    relative difference (`rtol` * abs(`b`)) and the absolute difference
+    `atol` are added together to compare against the absolute difference
+    between `a` and `b`.
+
+    NaNs are treated as equal if they are in the same place and if
+    ``equal_nan=True``.  Infs are treated as equal if they are in the same
+    place and of the same sign in both arrays.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Input arrays to compare.
+    rtol : float
+        The relative tolerance parameter (see Notes).
+    atol : float
+        The absolute tolerance parameter (see Notes).
+    equal_nan : bool
+        Whether to compare NaN's as equal.  If True, NaN's in `a` will be
+        considered equal to NaN's in `b` in the output array.
+
+        .. versionadded:: 1.10.0
+
+    Returns
+    -------
+    allclose : bool
+        Returns True if the two arrays are equal within the given
+        tolerance; False otherwise.
+
+    See Also
+    --------
+    isclose, all, any, equal
+
+    Notes
+    -----
+    If the following equation is element-wise True, then allclose returns
+    True.
+
+     absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+    The above equation is not symmetric in `a` and `b`, so that
+    ``allclose(a, b)`` might be different from ``allclose(b, a)`` in
+    some rare cases.
+
+    The comparison of `a` and `b` uses standard broadcasting, which
+    means that `a` and `b` need not have the same shape in order for
+    ``allclose(a, b)`` to evaluate to True.  The same is true for
+    `equal` but not `array_equal`.
+
+    `allclose` is not defined for non-numeric data types.
+    `bool` is considered a numeric data-type for this purpose.
+
+    Examples
+    --------
+    >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
+    False
+    >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
+    True
+    >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
+    False
+    >>> np.allclose([1.0, np.nan], [1.0, np.nan])
+    False
+    >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+    True
+
+    """
+    res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
+    return bool(res)
+
+
+def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+    return (a, b)
+
+
+@array_function_dispatch(_isclose_dispatcher)
+def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+    """
+    Returns a boolean array where two arrays are element-wise equal within a
+    tolerance.
+
+    The tolerance values are positive, typically very small numbers.  The
+    relative difference (`rtol` * abs(`b`)) and the absolute difference
+    `atol` are added together to compare against the absolute difference
+    between `a` and `b`.
+
+    .. warning:: The default `atol` is not appropriate for comparing numbers
+                 that are much smaller than one (see Notes).
+
+    Parameters
+    ----------
+    a, b : array_like
+        Input arrays to compare.
+    rtol : float
+        The relative tolerance parameter (see Notes).
+    atol : float
+        The absolute tolerance parameter (see Notes).
+    equal_nan : bool
+        Whether to compare NaN's as equal.  If True, NaN's in `a` will be
+        considered equal to NaN's in `b` in the output array.
+
+    Returns
+    -------
+    y : array_like
+        Returns a boolean array of where `a` and `b` are equal within the
+        given tolerance. If both `a` and `b` are scalars, returns a single
+        boolean value.
+
+    See Also
+    --------
+    allclose
+    math.isclose
+
+    Notes
+    -----
+    .. versionadded:: 1.7.0
+
+    For finite values, isclose uses the following equation to test whether
+    two floating point values are equivalent.
+
+     absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+    Unlike the built-in `math.isclose`, the above equation is not symmetric
+    in `a` and `b` -- it assumes `b` is the reference value -- so that
+    `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
+    the default value of atol is not zero, and is used to determine what
+    small values should be considered close to zero. The default value is
+    appropriate for expected values of order unity: if the expected values
+    are significantly smaller than one, it can result in false positives.
+    `atol` should be carefully selected for the use case at hand. A zero value
+    for `atol` will result in `False` if either `a` or `b` is zero.
+
+    `isclose` is not defined for non-numeric data types.
+    `bool` is considered a numeric data-type for this purpose.
+
+    Examples
+    --------
+    >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
+    array([ True, False])
+    >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
+    array([ True, True])
+    >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
+    array([False,  True])
+    >>> np.isclose([1.0, np.nan], [1.0, np.nan])
+    array([ True, False])
+    >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+    array([ True, True])
+    >>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
+    array([ True, False])
+    >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
+    array([False, False])
+    >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
+    array([ True,  True])
+    >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
+    array([False,  True])
+    """
+    def within_tol(x, y, atol, rtol):
+        with errstate(invalid='ignore'), _no_nep50_warning():
+            return less_equal(abs(x-y), atol + rtol * abs(y))
+
+    x = asanyarray(a)
+    y = asanyarray(b)
+
+    # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
+    # This will cause casting of x later. Also, make sure to allow subclasses
+    # (e.g., for numpy.ma).
+    # NOTE: We explicitly allow timedelta, which used to work. This could
+    #       possibly be deprecated. See also gh-18286.
+    #       timedelta works if `atol` is an integer or also a timedelta.
+    #       Although, the default tolerances are unlikely to be useful
+    if y.dtype.kind != "m":
+        dt = multiarray.result_type(y, 1.)
+        y = asanyarray(y, dtype=dt)
+
+    xfin = isfinite(x)
+    yfin = isfinite(y)
+    if all(xfin) and all(yfin):
+        return within_tol(x, y, atol, rtol)
+    else:
+        finite = xfin & yfin
+        cond = zeros_like(finite, subok=True)
+        # Because we're using boolean indexing, x & y must be the same shape.
+        # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
+        # lib.stride_tricks, though, so we can't import it here.
+        x = x * ones_like(cond)
+        y = y * ones_like(cond)
+        # Avoid subtraction with infinite/nan values...
+        cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
+        # Check for equality of infinite values...
+        cond[~finite] = (x[~finite] == y[~finite])
+        if equal_nan:
+            # Make NaN == NaN
+            both_nan = isnan(x) & isnan(y)
+
+            # Needed to treat masked arrays correctly. = True would not work.
+            cond[both_nan] = both_nan[both_nan]
+
+        return cond[()]  # Flatten 0d arrays to scalars
+
+
+def _array_equal_dispatcher(a1, a2, equal_nan=None):
+    return (a1, a2)
+
+
+@array_function_dispatch(_array_equal_dispatcher)
+def array_equal(a1, a2, equal_nan=False):
+    """
+    True if two arrays have the same shape and elements, False otherwise.
+
+    Parameters
+    ----------
+    a1, a2 : array_like
+        Input arrays.
+    equal_nan : bool
+        Whether to compare NaN's as equal. If the dtype of a1 and a2 is
+        complex, values will be considered equal if either the real or the
+        imaginary component of a given value is ``nan``.
+
+        .. versionadded:: 1.19.0
+
+    Returns
+    -------
+    b : bool
+        Returns True if the arrays are equal.
+
+    See Also
+    --------
+    allclose: Returns True if two arrays are element-wise equal within a
+              tolerance.
+    array_equiv: Returns True if input arrays are shape consistent and all
+                 elements equal.
+
+    Examples
+    --------
+    >>> np.array_equal([1, 2], [1, 2])
+    True
+    >>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
+    True
+    >>> np.array_equal([1, 2], [1, 2, 3])
+    False
+    >>> np.array_equal([1, 2], [1, 4])
+    False
+    >>> a = np.array([1, np.nan])
+    >>> np.array_equal(a, a)
+    False
+    >>> np.array_equal(a, a, equal_nan=True)
+    True
+
+    When ``equal_nan`` is True, complex values with nan components are
+    considered equal if either the real *or* the imaginary components are nan.
+
+    >>> a = np.array([1 + 1j])
+    >>> b = a.copy()
+    >>> a.real = np.nan
+    >>> b.imag = np.nan
+    >>> np.array_equal(a, b, equal_nan=True)
+    True
+    """
+    try:
+        a1, a2 = asarray(a1), asarray(a2)
+    except Exception:
+        return False
+    if a1.shape != a2.shape:
+        return False
+    if not equal_nan:
+        return bool(asarray(a1 == a2).all())
+    # Handling NaN values if equal_nan is True
+    a1nan, a2nan = isnan(a1), isnan(a2)
+    # NaN's occur at different locations
+    if not (a1nan == a2nan).all():
+        return False
+    # Shapes of a1, a2 and masks are guaranteed to be consistent by this point
+    return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
+
+
+def _array_equiv_dispatcher(a1, a2):
+    return (a1, a2)
+
+
+@array_function_dispatch(_array_equiv_dispatcher)
+def array_equiv(a1, a2):
+    """
+    Returns True if input arrays are shape consistent and all elements equal.
+
+    Shape consistent means they are either the same shape, or one input array
+    can be broadcasted to create the same shape as the other one.
+
+    Parameters
+    ----------
+    a1, a2 : array_like
+        Input arrays.
+
+    Returns
+    -------
+    out : bool
+        True if equivalent, False otherwise.
+
+    Examples
+    --------
+    >>> np.array_equiv([1, 2], [1, 2])
+    True
+    >>> np.array_equiv([1, 2], [1, 3])
+    False
+
+    Showing the shape equivalence:
+
+    >>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
+    True
+    >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
+    False
+
+    >>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
+    False
+
+    """
+    try:
+        a1, a2 = asarray(a1), asarray(a2)
+    except Exception:
+        return False
+    try:
+        multiarray.broadcast(a1, a2)
+    except Exception:
+        return False
+
+    return bool(asarray(a1 == a2).all())
+
+
+Inf = inf = infty = Infinity = PINF
+nan = NaN = NAN
+False_ = bool_(False)
+True_ = bool_(True)
+
+
+def extend_all(module):
+    existing = set(__all__)
+    mall = getattr(module, '__all__')
+    for a in mall:
+        if a not in existing:
+            __all__.append(a)
+
+
+from .umath import *
+from .numerictypes import *
+from . import fromnumeric
+from .fromnumeric import *
+from . import arrayprint
+from .arrayprint import *
+from . import _asarray
+from ._asarray import *
+from . import _ufunc_config
+from ._ufunc_config import *
+extend_all(fromnumeric)
+extend_all(umath)
+extend_all(numerictypes)
+extend_all(arrayprint)
+extend_all(_asarray)
+extend_all(_ufunc_config)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/numeric.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numeric.pyi
new file mode 100644
index 00000000..fc10bb88
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numeric.pyi
@@ -0,0 +1,660 @@
+from collections.abc import Callable, Sequence
+from typing import (
+    Any,
+    overload,
+    TypeVar,
+    Literal,
+    SupportsAbs,
+    SupportsIndex,
+    NoReturn,
+)
+if sys.version_info >= (3, 10):
+    from typing import TypeGuard
+else:
+    from typing_extensions import TypeGuard
+
+from numpy import (
+    ComplexWarning as ComplexWarning,
+    generic,
+    unsignedinteger,
+    signedinteger,
+    floating,
+    complexfloating,
+    bool_,
+    int_,
+    intp,
+    float64,
+    timedelta64,
+    object_,
+    _OrderKACF,
+    _OrderCF,
+)
+
+from numpy._typing import (
+    ArrayLike,
+    NDArray,
+    DTypeLike,
+    _ShapeLike,
+    _DTypeLike,
+    _ArrayLike,
+    _SupportsArrayFunc,
+    _ScalarLike_co,
+    _ArrayLikeBool_co,
+    _ArrayLikeUInt_co,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeTD64_co,
+    _ArrayLikeObject_co,
+    _ArrayLikeUnknown,
+)
+
+_T = TypeVar("_T")
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+_CorrelateMode = Literal["valid", "same", "full"]
+
+__all__: list[str]
+
+@overload
+def zeros_like(
+    a: _ArrayType,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: Literal[True] = ...,
+    shape: None = ...,
+) -> _ArrayType: ...
+@overload
+def zeros_like(
+    a: _ArrayLike[_SCT],
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def zeros_like(
+    a: object,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+@overload
+def zeros_like(
+    a: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[_SCT]: ...
+@overload
+def zeros_like(
+    a: Any,
+    dtype: DTypeLike,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+
+@overload
+def ones(
+    shape: _ShapeLike,
+    dtype: None = ...,
+    order: _OrderCF = ...,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def ones(
+    shape: _ShapeLike,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderCF = ...,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ones(
+    shape: _ShapeLike,
+    dtype: DTypeLike,
+    order: _OrderCF = ...,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def ones_like(
+    a: _ArrayType,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: Literal[True] = ...,
+    shape: None = ...,
+) -> _ArrayType: ...
+@overload
+def ones_like(
+    a: _ArrayLike[_SCT],
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ones_like(
+    a: object,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+@overload
+def ones_like(
+    a: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ones_like(
+    a: Any,
+    dtype: DTypeLike,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+
+@overload
+def full(
+    shape: _ShapeLike,
+    fill_value: Any,
+    dtype: None = ...,
+    order: _OrderCF = ...,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def full(
+    shape: _ShapeLike,
+    fill_value: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderCF = ...,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def full(
+    shape: _ShapeLike,
+    fill_value: Any,
+    dtype: DTypeLike,
+    order: _OrderCF = ...,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def full_like(
+    a: _ArrayType,
+    fill_value: Any,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: Literal[True] = ...,
+    shape: None = ...,
+) -> _ArrayType: ...
+@overload
+def full_like(
+    a: _ArrayLike[_SCT],
+    fill_value: Any,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def full_like(
+    a: object,
+    fill_value: Any,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+@overload
+def full_like(
+    a: Any,
+    fill_value: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[_SCT]: ...
+@overload
+def full_like(
+    a: Any,
+    fill_value: Any,
+    dtype: DTypeLike,
+    order: _OrderKACF = ...,
+    subok: bool = ...,
+    shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+
+@overload
+def count_nonzero(
+    a: ArrayLike,
+    axis: None = ...,
+    *,
+    keepdims: Literal[False] = ...,
+) -> int: ...
+@overload
+def count_nonzero(
+    a: ArrayLike,
+    axis: _ShapeLike = ...,
+    *,
+    keepdims: bool = ...,
+) -> Any: ...  # TODO: np.intp or ndarray[np.intp]
+
+def isfortran(a: NDArray[Any] | generic) -> bool: ...
+
+def argwhere(a: ArrayLike) -> NDArray[intp]: ...
+
+def flatnonzero(a: ArrayLike) -> NDArray[intp]: ...
+
+@overload
+def correlate(
+    a: _ArrayLikeUnknown,
+    v: _ArrayLikeUnknown,
+    mode: _CorrelateMode = ...,
+) -> NDArray[Any]: ...
+@overload
+def correlate(
+    a: _ArrayLikeBool_co,
+    v: _ArrayLikeBool_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[bool_]: ...
+@overload
+def correlate(
+    a: _ArrayLikeUInt_co,
+    v: _ArrayLikeUInt_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def correlate(
+    a: _ArrayLikeInt_co,
+    v: _ArrayLikeInt_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def correlate(
+    a: _ArrayLikeFloat_co,
+    v: _ArrayLikeFloat_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def correlate(
+    a: _ArrayLikeComplex_co,
+    v: _ArrayLikeComplex_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def correlate(
+    a: _ArrayLikeTD64_co,
+    v: _ArrayLikeTD64_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def correlate(
+    a: _ArrayLikeObject_co,
+    v: _ArrayLikeObject_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def convolve(
+    a: _ArrayLikeUnknown,
+    v: _ArrayLikeUnknown,
+    mode: _CorrelateMode = ...,
+) -> NDArray[Any]: ...
+@overload
+def convolve(
+    a: _ArrayLikeBool_co,
+    v: _ArrayLikeBool_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[bool_]: ...
+@overload
+def convolve(
+    a: _ArrayLikeUInt_co,
+    v: _ArrayLikeUInt_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def convolve(
+    a: _ArrayLikeInt_co,
+    v: _ArrayLikeInt_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def convolve(
+    a: _ArrayLikeFloat_co,
+    v: _ArrayLikeFloat_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def convolve(
+    a: _ArrayLikeComplex_co,
+    v: _ArrayLikeComplex_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def convolve(
+    a: _ArrayLikeTD64_co,
+    v: _ArrayLikeTD64_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def convolve(
+    a: _ArrayLikeObject_co,
+    v: _ArrayLikeObject_co,
+    mode: _CorrelateMode = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def outer(
+    a: _ArrayLikeUnknown,
+    b: _ArrayLikeUnknown,
+    out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def outer(
+    a: _ArrayLikeBool_co,
+    b: _ArrayLikeBool_co,
+    out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def outer(
+    a: _ArrayLikeUInt_co,
+    b: _ArrayLikeUInt_co,
+    out: None = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def outer(
+    a: _ArrayLikeInt_co,
+    b: _ArrayLikeInt_co,
+    out: None = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def outer(
+    a: _ArrayLikeFloat_co,
+    b: _ArrayLikeFloat_co,
+    out: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def outer(
+    a: _ArrayLikeComplex_co,
+    b: _ArrayLikeComplex_co,
+    out: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def outer(
+    a: _ArrayLikeTD64_co,
+    b: _ArrayLikeTD64_co,
+    out: None = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def outer(
+    a: _ArrayLikeObject_co,
+    b: _ArrayLikeObject_co,
+    out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def outer(
+    a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+    b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+    out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def tensordot(
+    a: _ArrayLikeUnknown,
+    b: _ArrayLikeUnknown,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[Any]: ...
+@overload
+def tensordot(
+    a: _ArrayLikeBool_co,
+    b: _ArrayLikeBool_co,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[bool_]: ...
+@overload
+def tensordot(
+    a: _ArrayLikeUInt_co,
+    b: _ArrayLikeUInt_co,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def tensordot(
+    a: _ArrayLikeInt_co,
+    b: _ArrayLikeInt_co,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def tensordot(
+    a: _ArrayLikeFloat_co,
+    b: _ArrayLikeFloat_co,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def tensordot(
+    a: _ArrayLikeComplex_co,
+    b: _ArrayLikeComplex_co,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def tensordot(
+    a: _ArrayLikeTD64_co,
+    b: _ArrayLikeTD64_co,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def tensordot(
+    a: _ArrayLikeObject_co,
+    b: _ArrayLikeObject_co,
+    axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def roll(
+    a: _ArrayLike[_SCT],
+    shift: _ShapeLike,
+    axis: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def roll(
+    a: ArrayLike,
+    shift: _ShapeLike,
+    axis: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+
+def rollaxis(
+    a: NDArray[_SCT],
+    axis: int,
+    start: int = ...,
+) -> NDArray[_SCT]: ...
+
+def moveaxis(
+    a: NDArray[_SCT],
+    source: _ShapeLike,
+    destination: _ShapeLike,
+) -> NDArray[_SCT]: ...
+
+@overload
+def cross(
+    a: _ArrayLikeUnknown,
+    b: _ArrayLikeUnknown,
+    axisa: int = ...,
+    axisb: int = ...,
+    axisc: int = ...,
+    axis: None | int = ...,
+) -> NDArray[Any]: ...
+@overload
+def cross(
+    a: _ArrayLikeBool_co,
+    b: _ArrayLikeBool_co,
+    axisa: int = ...,
+    axisb: int = ...,
+    axisc: int = ...,
+    axis: None | int = ...,
+) -> NoReturn: ...
+@overload
+def cross(
+    a: _ArrayLikeUInt_co,
+    b: _ArrayLikeUInt_co,
+    axisa: int = ...,
+    axisb: int = ...,
+    axisc: int = ...,
+    axis: None | int = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def cross(
+    a: _ArrayLikeInt_co,
+    b: _ArrayLikeInt_co,
+    axisa: int = ...,
+    axisb: int = ...,
+    axisc: int = ...,
+    axis: None | int = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def cross(
+    a: _ArrayLikeFloat_co,
+    b: _ArrayLikeFloat_co,
+    axisa: int = ...,
+    axisb: int = ...,
+    axisc: int = ...,
+    axis: None | int = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cross(
+    a: _ArrayLikeComplex_co,
+    b: _ArrayLikeComplex_co,
+    axisa: int = ...,
+    axisb: int = ...,
+    axisc: int = ...,
+    axis: None | int = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cross(
+    a: _ArrayLikeObject_co,
+    b: _ArrayLikeObject_co,
+    axisa: int = ...,
+    axisb: int = ...,
+    axisc: int = ...,
+    axis: None | int = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def indices(
+    dimensions: Sequence[int],
+    dtype: type[int] = ...,
+    sparse: Literal[False] = ...,
+) -> NDArray[int_]: ...
+@overload
+def indices(
+    dimensions: Sequence[int],
+    dtype: type[int] = ...,
+    sparse: Literal[True] = ...,
+) -> tuple[NDArray[int_], ...]: ...
+@overload
+def indices(
+    dimensions: Sequence[int],
+    dtype: _DTypeLike[_SCT],
+    sparse: Literal[False] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def indices(
+    dimensions: Sequence[int],
+    dtype: _DTypeLike[_SCT],
+    sparse: Literal[True],
+) -> tuple[NDArray[_SCT], ...]: ...
+@overload
+def indices(
+    dimensions: Sequence[int],
+    dtype: DTypeLike,
+    sparse: Literal[False] = ...,
+) -> NDArray[Any]: ...
+@overload
+def indices(
+    dimensions: Sequence[int],
+    dtype: DTypeLike,
+    sparse: Literal[True],
+) -> tuple[NDArray[Any], ...]: ...
+
+def fromfunction(
+    function: Callable[..., _T],
+    shape: Sequence[int],
+    *,
+    dtype: DTypeLike = ...,
+    like: _SupportsArrayFunc = ...,
+    **kwargs: Any,
+) -> _T: ...
+
+def isscalar(element: object) -> TypeGuard[
+    generic | bool | int | float | complex | str | bytes | memoryview
+]: ...
+
+def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ...
+
+def base_repr(
+    number: SupportsAbs[float],
+    base: float = ...,
+    padding: SupportsIndex = ...,
+) -> str: ...
+
+@overload
+def identity(
+    n: int,
+    dtype: None = ...,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def identity(
+    n: int,
+    dtype: _DTypeLike[_SCT],
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def identity(
+    n: int,
+    dtype: DTypeLike,
+    *,
+    like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+def allclose(
+    a: ArrayLike,
+    b: ArrayLike,
+    rtol: float = ...,
+    atol: float = ...,
+    equal_nan: bool = ...,
+) -> bool: ...
+
+@overload
+def isclose(
+    a: _ScalarLike_co,
+    b: _ScalarLike_co,
+    rtol: float = ...,
+    atol: float = ...,
+    equal_nan: bool = ...,
+) -> bool_: ...
+@overload
+def isclose(
+    a: ArrayLike,
+    b: ArrayLike,
+    rtol: float = ...,
+    atol: float = ...,
+    equal_nan: bool = ...,
+) -> NDArray[bool_]: ...
+
+def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ...
+
+def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/numerictypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numerictypes.py
new file mode 100644
index 00000000..aea41bc2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numerictypes.py
@@ -0,0 +1,689 @@
+"""
+numerictypes: Define the numeric type objects
+
+This module is designed so "from numerictypes import \\*" is safe.
+Exported symbols include:
+
+  Dictionary with all registered number types (including aliases):
+    sctypeDict
+
+  Type objects (not all will be available, depends on platform):
+      see variable sctypes for which ones you have
+
+    Bit-width names
+
+    int8 int16 int32 int64 int128
+    uint8 uint16 uint32 uint64 uint128
+    float16 float32 float64 float96 float128 float256
+    complex32 complex64 complex128 complex192 complex256 complex512
+    datetime64 timedelta64
+
+    c-based names
+
+    bool_
+
+    object_
+
+    void, str_, unicode_
+
+    byte, ubyte,
+    short, ushort
+    intc, uintc,
+    intp, uintp,
+    int_, uint,
+    longlong, ulonglong,
+
+    single, csingle,
+    float_, complex_,
+    longfloat, clongfloat,
+
+   As part of the type-hierarchy:    xx -- is bit-width
+
+   generic
+     +-> bool_                                  (kind=b)
+     +-> number
+     |   +-> integer
+     |   |   +-> signedinteger     (intxx)      (kind=i)
+     |   |   |     byte
+     |   |   |     short
+     |   |   |     intc
+     |   |   |     intp
+     |   |   |     int_
+     |   |   |     longlong
+     |   |   \\-> unsignedinteger  (uintxx)     (kind=u)
+     |   |         ubyte
+     |   |         ushort
+     |   |         uintc
+     |   |         uintp
+     |   |         uint_
+     |   |         ulonglong
+     |   +-> inexact
+     |       +-> floating          (floatxx)    (kind=f)
+     |       |     half
+     |       |     single
+     |       |     float_          (double)
+     |       |     longfloat
+     |       \\-> complexfloating  (complexxx)  (kind=c)
+     |             csingle         (singlecomplex)
+     |             complex_        (cfloat, cdouble)
+     |             clongfloat      (longcomplex)
+     +-> flexible
+     |   +-> character
+     |   |     str_     (string_, bytes_)       (kind=S)    [Python 2]
+     |   |     unicode_                         (kind=U)    [Python 2]
+     |   |
+     |   |     bytes_   (string_)               (kind=S)    [Python 3]
+     |   |     str_     (unicode_)              (kind=U)    [Python 3]
+     |   |
+     |   \\-> void                              (kind=V)
+     \\-> object_ (not used much)               (kind=O)
+
+"""
+import numbers
+import warnings
+
+from .multiarray import (
+        ndarray, array, dtype, datetime_data, datetime_as_string,
+        busday_offset, busday_count, is_busday, busdaycalendar
+        )
+from .._utils import set_module
+
+# we add more at the bottom
+__all__ = ['sctypeDict', 'sctypes',
+           'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
+           'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
+           'issubdtype', 'datetime_data', 'datetime_as_string',
+           'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
+           ]
+
+# we don't need all these imports, but we need to keep them for compatibility
+# for users using np.core.numerictypes.UPPER_TABLE
+from ._string_helpers import (
+    english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
+)
+
+from ._type_aliases import (
+    sctypeDict,
+    allTypes,
+    bitname,
+    sctypes,
+    _concrete_types,
+    _concrete_typeinfo,
+    _bits_of,
+)
+from ._dtype import _kind_name
+
+# we don't export these for import *, but we do want them accessible
+# as numerictypes.bool, etc.
+from builtins import bool, int, float, complex, object, str, bytes
+from numpy.compat import long, unicode
+
+
+# We use this later
+generic = allTypes['generic']
+
+genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
+                   'int32', 'uint32', 'int64', 'uint64', 'int128',
+                   'uint128', 'float16',
+                   'float32', 'float64', 'float80', 'float96', 'float128',
+                   'float256',
+                   'complex32', 'complex64', 'complex128', 'complex160',
+                   'complex192', 'complex256', 'complex512', 'object']
+
+@set_module('numpy')
+def maximum_sctype(t):
+    """
+    Return the scalar type of highest precision of the same kind as the input.
+
+    Parameters
+    ----------
+    t : dtype or dtype specifier
+        The input data type. This can be a `dtype` object or an object that
+        is convertible to a `dtype`.
+
+    Returns
+    -------
+    out : dtype
+        The highest precision data type of the same kind (`dtype.kind`) as `t`.
+
+    See Also
+    --------
+    obj2sctype, mintypecode, sctype2char
+    dtype
+
+    Examples
+    --------
+    >>> np.maximum_sctype(int)
+    
+    >>> np.maximum_sctype(np.uint8)
+    
+    >>> np.maximum_sctype(complex)
+     # may vary
+
+    >>> np.maximum_sctype(str)
+    
+
+    >>> np.maximum_sctype('i2')
+    
+    >>> np.maximum_sctype('f4')
+     # may vary
+
+    """
+    g = obj2sctype(t)
+    if g is None:
+        return t
+    t = g
+    base = _kind_name(dtype(t))
+    if base in sctypes:
+        return sctypes[base][-1]
+    else:
+        return t
+
+
+@set_module('numpy')
+def issctype(rep):
+    """
+    Determines whether the given object represents a scalar data-type.
+
+    Parameters
+    ----------
+    rep : any
+        If `rep` is an instance of a scalar dtype, True is returned. If not,
+        False is returned.
+
+    Returns
+    -------
+    out : bool
+        Boolean result of check whether `rep` is a scalar dtype.
+
+    See Also
+    --------
+    issubsctype, issubdtype, obj2sctype, sctype2char
+
+    Examples
+    --------
+    >>> np.issctype(np.int32)
+    True
+    >>> np.issctype(list)
+    False
+    >>> np.issctype(1.1)
+    False
+
+    Strings are also a scalar type:
+
+    >>> np.issctype(np.dtype('str'))
+    True
+
+    """
+    if not isinstance(rep, (type, dtype)):
+        return False
+    try:
+        res = obj2sctype(rep)
+        if res and res != object_:
+            return True
+        return False
+    except Exception:
+        return False
+
+
+@set_module('numpy')
+def obj2sctype(rep, default=None):
+    """
+    Return the scalar dtype or NumPy equivalent of Python type of an object.
+
+    Parameters
+    ----------
+    rep : any
+        The object of which the type is returned.
+    default : any, optional
+        If given, this is returned for objects whose types can not be
+        determined. If not given, None is returned for those objects.
+
+    Returns
+    -------
+    dtype : dtype or Python type
+        The data type of `rep`.
+
+    See Also
+    --------
+    sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
+
+    Examples
+    --------
+    >>> np.obj2sctype(np.int32)
+    
+    >>> np.obj2sctype(np.array([1., 2.]))
+    
+    >>> np.obj2sctype(np.array([1.j]))
+    
+
+    >>> np.obj2sctype(dict)
+    
+    >>> np.obj2sctype('string')
+
+    >>> np.obj2sctype(1, default=list)
+    
+
+    """
+    # prevent abstract classes being upcast
+    if isinstance(rep, type) and issubclass(rep, generic):
+        return rep
+    # extract dtype from arrays
+    if isinstance(rep, ndarray):
+        return rep.dtype.type
+    # fall back on dtype to convert
+    try:
+        res = dtype(rep)
+    except Exception:
+        return default
+    else:
+        return res.type
+
+
+@set_module('numpy')
+def issubclass_(arg1, arg2):
+    """
+    Determine if a class is a subclass of a second class.
+
+    `issubclass_` is equivalent to the Python built-in ``issubclass``,
+    except that it returns False instead of raising a TypeError if one
+    of the arguments is not a class.
+
+    Parameters
+    ----------
+    arg1 : class
+        Input class. True is returned if `arg1` is a subclass of `arg2`.
+    arg2 : class or tuple of classes.
+        Input class. If a tuple of classes, True is returned if `arg1` is a
+        subclass of any of the tuple elements.
+
+    Returns
+    -------
+    out : bool
+        Whether `arg1` is a subclass of `arg2` or not.
+
+    See Also
+    --------
+    issubsctype, issubdtype, issctype
+
+    Examples
+    --------
+    >>> np.issubclass_(np.int32, int)
+    False
+    >>> np.issubclass_(np.int32, float)
+    False
+    >>> np.issubclass_(np.float64, float)
+    True
+
+    """
+    try:
+        return issubclass(arg1, arg2)
+    except TypeError:
+        return False
+
+
+@set_module('numpy')
+def issubsctype(arg1, arg2):
+    """
+    Determine if the first argument is a subclass of the second argument.
+
+    Parameters
+    ----------
+    arg1, arg2 : dtype or dtype specifier
+        Data-types.
+
+    Returns
+    -------
+    out : bool
+        The result.
+
+    See Also
+    --------
+    issctype, issubdtype, obj2sctype
+
+    Examples
+    --------
+    >>> np.issubsctype('S8', str)
+    False
+    >>> np.issubsctype(np.array([1]), int)
+    True
+    >>> np.issubsctype(np.array([1]), float)
+    False
+
+    """
+    return issubclass(obj2sctype(arg1), obj2sctype(arg2))
+
+
+@set_module('numpy')
+def issubdtype(arg1, arg2):
+    r"""
+    Returns True if first argument is a typecode lower/equal in type hierarchy.
+
+    This is like the builtin :func:`issubclass`, but for `dtype`\ s.
+
+    Parameters
+    ----------
+    arg1, arg2 : dtype_like
+        `dtype` or object coercible to one
+
+    Returns
+    -------
+    out : bool
+
+    See Also
+    --------
+    :ref:`arrays.scalars` : Overview of the numpy type hierarchy.
+    issubsctype, issubclass_
+
+    Examples
+    --------
+    `issubdtype` can be used to check the type of arrays:
+
+    >>> ints = np.array([1, 2, 3], dtype=np.int32)
+    >>> np.issubdtype(ints.dtype, np.integer)
+    True
+    >>> np.issubdtype(ints.dtype, np.floating)
+    False
+
+    >>> floats = np.array([1, 2, 3], dtype=np.float32)
+    >>> np.issubdtype(floats.dtype, np.integer)
+    False
+    >>> np.issubdtype(floats.dtype, np.floating)
+    True
+
+    Similar types of different sizes are not subdtypes of each other:
+
+    >>> np.issubdtype(np.float64, np.float32)
+    False
+    >>> np.issubdtype(np.float32, np.float64)
+    False
+
+    but both are subtypes of `floating`:
+
+    >>> np.issubdtype(np.float64, np.floating)
+    True
+    >>> np.issubdtype(np.float32, np.floating)
+    True
+
+    For convenience, dtype-like objects are allowed too:
+
+    >>> np.issubdtype('S1', np.string_)
+    True
+    >>> np.issubdtype('i4', np.signedinteger)
+    True
+
+    """
+    if not issubclass_(arg1, generic):
+        arg1 = dtype(arg1).type
+    if not issubclass_(arg2, generic):
+        arg2 = dtype(arg2).type
+
+    return issubclass(arg1, arg2)
+
+
+# This dictionary allows look up based on any alias for an array data-type
+class _typedict(dict):
+    """
+    Base object for a dictionary for look-up with any alias for an array dtype.
+
+    Instances of `_typedict` can not be used as dictionaries directly,
+    first they have to be populated.
+
+    """
+
+    def __getitem__(self, obj):
+        return dict.__getitem__(self, obj2sctype(obj))
+
+nbytes = _typedict()
+_alignment = _typedict()
+_maxvals = _typedict()
+_minvals = _typedict()
+def _construct_lookups():
+    for name, info in _concrete_typeinfo.items():
+        obj = info.type
+        nbytes[obj] = info.bits // 8
+        _alignment[obj] = info.alignment
+        if len(info) > 5:
+            _maxvals[obj] = info.max
+            _minvals[obj] = info.min
+        else:
+            _maxvals[obj] = None
+            _minvals[obj] = None
+
+_construct_lookups()
+
+
+@set_module('numpy')
+def sctype2char(sctype):
+    """
+    Return the string representation of a scalar dtype.
+
+    Parameters
+    ----------
+    sctype : scalar dtype or object
+        If a scalar dtype, the corresponding string character is
+        returned. If an object, `sctype2char` tries to infer its scalar type
+        and then return the corresponding string character.
+
+    Returns
+    -------
+    typechar : str
+        The string character corresponding to the scalar type.
+
+    Raises
+    ------
+    ValueError
+        If `sctype` is an object for which the type can not be inferred.
+
+    See Also
+    --------
+    obj2sctype, issctype, issubsctype, mintypecode
+
+    Examples
+    --------
+    >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
+    ...     print(np.sctype2char(sctype))
+    l # may vary
+    d
+    D
+    S
+    O
+
+    >>> x = np.array([1., 2-1.j])
+    >>> np.sctype2char(x)
+    'D'
+    >>> np.sctype2char(list)
+    'O'
+
+    """
+    sctype = obj2sctype(sctype)
+    if sctype is None:
+        raise ValueError("unrecognized type")
+    if sctype not in _concrete_types:
+        # for compatibility
+        raise KeyError(sctype)
+    return dtype(sctype).char
+
+# Create dictionary of casting functions that wrap sequences
+# indexed by type or type character
+cast = _typedict()
+for key in _concrete_types:
+    cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
+
+
+def _scalar_type_key(typ):
+    """A ``key`` function for `sorted`."""
+    dt = dtype(typ)
+    return (dt.kind.lower(), dt.itemsize)
+
+
+ScalarType = [int, float, complex, bool, bytes, str, memoryview]
+ScalarType += sorted(_concrete_types, key=_scalar_type_key)
+ScalarType = tuple(ScalarType)
+
+
+# Now add the types we've determined to this module
+for key in allTypes:
+    globals()[key] = allTypes[key]
+    __all__.append(key)
+
+del key
+
+typecodes = {'Character':'c',
+             'Integer':'bhilqp',
+             'UnsignedInteger':'BHILQP',
+             'Float':'efdg',
+             'Complex':'FDG',
+             'AllInteger':'bBhHiIlLqQpP',
+             'AllFloat':'efdgFDG',
+             'Datetime': 'Mm',
+             'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
+
+# backwards compatibility --- deprecated name
+# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)
+typeDict = sctypeDict
+
+# b -> boolean
+# u -> unsigned integer
+# i -> signed integer
+# f -> floating point
+# c -> complex
+# M -> datetime
+# m -> timedelta
+# S -> string
+# U -> Unicode string
+# V -> record
+# O -> Python object
+_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
+
+__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
+__len_test_types = len(__test_types)
+
+# Keep incrementing until a common type both can be coerced to
+#  is found.  Otherwise, return None
+def _find_common_coerce(a, b):
+    if a > b:
+        return a
+    try:
+        thisind = __test_types.index(a.char)
+    except ValueError:
+        return None
+    return _can_coerce_all([a, b], start=thisind)
+
+# Find a data-type that all data-types in a list can be coerced to
+def _can_coerce_all(dtypelist, start=0):
+    N = len(dtypelist)
+    if N == 0:
+        return None
+    if N == 1:
+        return dtypelist[0]
+    thisind = start
+    while thisind < __len_test_types:
+        newdtype = dtype(__test_types[thisind])
+        numcoerce = len([x for x in dtypelist if newdtype >= x])
+        if numcoerce == N:
+            return newdtype
+        thisind += 1
+    return None
+
+def _register_types():
+    numbers.Integral.register(integer)
+    numbers.Complex.register(inexact)
+    numbers.Real.register(floating)
+    numbers.Number.register(number)
+
+_register_types()
+
+
+@set_module('numpy')
+def find_common_type(array_types, scalar_types):
+    """
+    Determine common type following standard coercion rules.
+
+    .. deprecated:: NumPy 1.25
+
+        This function is deprecated, use `numpy.promote_types` or
+        `numpy.result_type` instead.  To achieve semantics for the
+        `scalar_types` argument, use `numpy.result_type` and pass the Python
+        values `0`, `0.0`, or `0j`.
+        This will give the same results in almost all cases.
+        More information and rare exception can be found in the
+        `NumPy 1.25 release notes `_.
+
+    Parameters
+    ----------
+    array_types : sequence
+        A list of dtypes or dtype convertible objects representing arrays.
+    scalar_types : sequence
+        A list of dtypes or dtype convertible objects representing scalars.
+
+    Returns
+    -------
+    datatype : dtype
+        The common data type, which is the maximum of `array_types` ignoring
+        `scalar_types`, unless the maximum of `scalar_types` is of a
+        different kind (`dtype.kind`). If the kind is not understood, then
+        None is returned.
+
+    See Also
+    --------
+    dtype, common_type, can_cast, mintypecode
+
+    Examples
+    --------
+    >>> np.find_common_type([], [np.int64, np.float32, complex])
+    dtype('complex128')
+    >>> np.find_common_type([np.int64, np.float32], [])
+    dtype('float64')
+
+    The standard casting rules ensure that a scalar cannot up-cast an
+    array unless the scalar is of a fundamentally different kind of data
+    (i.e. under a different hierarchy in the data type hierarchy) then
+    the array:
+
+    >>> np.find_common_type([np.float32], [np.int64, np.float64])
+    dtype('float32')
+
+    Complex is of a different type, so it up-casts the float in the
+    `array_types` argument:
+
+    >>> np.find_common_type([np.float32], [complex])
+    dtype('complex128')
+
+    Type specifier strings are convertible to dtypes and can therefore
+    be used instead of dtypes:
+
+    >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
+    dtype('complex128')
+
+    """
+    # Deprecated 2022-11-07, NumPy 1.25
+    warnings.warn(
+            "np.find_common_type is deprecated.  Please use `np.result_type` "
+            "or `np.promote_types`.\n"
+            "See https://numpy.org/devdocs/release/1.25.0-notes.html and the "
+            "docs for more information.  (Deprecated NumPy 1.25)",
+            DeprecationWarning, stacklevel=2)
+
+    array_types = [dtype(x) for x in array_types]
+    scalar_types = [dtype(x) for x in scalar_types]
+
+    maxa = _can_coerce_all(array_types)
+    maxsc = _can_coerce_all(scalar_types)
+
+    if maxa is None:
+        return maxsc
+
+    if maxsc is None:
+        return maxa
+
+    try:
+        index_a = _kind_list.index(maxa.kind)
+        index_sc = _kind_list.index(maxsc.kind)
+    except ValueError:
+        return None
+
+    if index_sc > index_a:
+        return _find_common_coerce(maxsc, maxa)
+    else:
+        return maxa
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/numerictypes.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numerictypes.pyi
new file mode 100644
index 00000000..d05861b2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/numerictypes.pyi
@@ -0,0 +1,156 @@
+import sys
+import types
+from collections.abc import Iterable
+from typing import (
+    Literal as L,
+    Union,
+    overload,
+    Any,
+    TypeVar,
+    Protocol,
+    TypedDict,
+)
+
+from numpy import (
+    ndarray,
+    dtype,
+    generic,
+    bool_,
+    ubyte,
+    ushort,
+    uintc,
+    uint,
+    ulonglong,
+    byte,
+    short,
+    intc,
+    int_,
+    longlong,
+    half,
+    single,
+    double,
+    longdouble,
+    csingle,
+    cdouble,
+    clongdouble,
+    datetime64,
+    timedelta64,
+    object_,
+    str_,
+    bytes_,
+    void,
+)
+
+from numpy.core._type_aliases import (
+    sctypeDict as sctypeDict,
+    sctypes as sctypes,
+)
+
+from numpy._typing import DTypeLike, ArrayLike, _DTypeLike
+
+_T = TypeVar("_T")
+_SCT = TypeVar("_SCT", bound=generic)
+
+class _CastFunc(Protocol):
+    def __call__(
+        self, x: ArrayLike, k: DTypeLike = ...
+    ) -> ndarray[Any, dtype[Any]]: ...
+
+class _TypeCodes(TypedDict):
+    Character: L['c']
+    Integer: L['bhilqp']
+    UnsignedInteger: L['BHILQP']
+    Float: L['efdg']
+    Complex: L['FDG']
+    AllInteger: L['bBhHiIlLqQpP']
+    AllFloat: L['efdgFDG']
+    Datetime: L['Mm']
+    All: L['?bhilqpBHILQPefdgFDGSUVOMm']
+
+class _typedict(dict[type[generic], _T]):
+    def __getitem__(self, key: DTypeLike) -> _T: ...
+
+if sys.version_info >= (3, 10):
+    _TypeTuple = Union[
+        type[Any],
+        types.UnionType,
+        tuple[Union[type[Any], types.UnionType, tuple[Any, ...]], ...],
+    ]
+else:
+    _TypeTuple = Union[
+        type[Any],
+        tuple[Union[type[Any], tuple[Any, ...]], ...],
+    ]
+
+__all__: list[str]
+
+@overload
+def maximum_sctype(t: _DTypeLike[_SCT]) -> type[_SCT]: ...
+@overload
+def maximum_sctype(t: DTypeLike) -> type[Any]: ...
+
+@overload
+def issctype(rep: dtype[Any] | type[Any]) -> bool: ...
+@overload
+def issctype(rep: object) -> L[False]: ...
+
+@overload
+def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | type[_SCT]: ...
+@overload
+def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | type[_SCT]: ...
+@overload
+def obj2sctype(rep: DTypeLike, default: None = ...) -> None | type[Any]: ...
+@overload
+def obj2sctype(rep: DTypeLike, default: _T) -> _T | type[Any]: ...
+@overload
+def obj2sctype(rep: object, default: None = ...) -> None: ...
+@overload
+def obj2sctype(rep: object, default: _T) -> _T: ...
+
+@overload
+def issubclass_(arg1: type[Any], arg2: _TypeTuple) -> bool: ...
+@overload
+def issubclass_(arg1: object, arg2: object) -> L[False]: ...
+
+def issubsctype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ...
+
+def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ...
+
+def sctype2char(sctype: DTypeLike) -> str: ...
+
+cast: _typedict[_CastFunc]
+nbytes: _typedict[int]
+typecodes: _TypeCodes
+ScalarType: tuple[
+    type[int],
+    type[float],
+    type[complex],
+    type[bool],
+    type[bytes],
+    type[str],
+    type[memoryview],
+    type[bool_],
+    type[csingle],
+    type[cdouble],
+    type[clongdouble],
+    type[half],
+    type[single],
+    type[double],
+    type[longdouble],
+    type[byte],
+    type[short],
+    type[intc],
+    type[int_],
+    type[longlong],
+    type[timedelta64],
+    type[datetime64],
+    type[object_],
+    type[bytes_],
+    type[str_],
+    type[ubyte],
+    type[ushort],
+    type[uintc],
+    type[uint],
+    type[ulonglong],
+    type[void],
+]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/overrides.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/overrides.py
new file mode 100644
index 00000000..6403e65b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/overrides.py
@@ -0,0 +1,181 @@
+"""Implementation of __array_function__ overrides from NEP-18."""
+import collections
+import functools
+import os
+
+from .._utils import set_module
+from .._utils._inspect import getargspec
+from numpy.core._multiarray_umath import (
+    add_docstring,  _get_implementing_args, _ArrayFunctionDispatcher)
+
+
+ARRAY_FUNCTIONS = set()
+
+array_function_like_doc = (
+    """like : array_like, optional
+        Reference object to allow the creation of arrays which are not
+        NumPy arrays. If an array-like passed in as ``like`` supports
+        the ``__array_function__`` protocol, the result will be defined
+        by it. In this case, it ensures the creation of an array object
+        compatible with that passed in via this argument."""
+)
+
+def set_array_function_like_doc(public_api):
+    if public_api.__doc__ is not None:
+        public_api.__doc__ = public_api.__doc__.replace(
+            "${ARRAY_FUNCTION_LIKE}",
+            array_function_like_doc,
+        )
+    return public_api
+
+
+add_docstring(
+    _ArrayFunctionDispatcher,
+    """
+    Class to wrap functions with checks for __array_function__ overrides.
+
+    All arguments are required, and can only be passed by position.
+
+    Parameters
+    ----------
+    dispatcher : function or None
+        The dispatcher function that returns a single sequence-like object
+        of all arguments relevant.  It must have the same signature (except
+        the default values) as the actual implementation.
+        If ``None``, this is a ``like=`` dispatcher and the
+        ``_ArrayFunctionDispatcher`` must be called with ``like`` as the
+        first (additional and positional) argument.
+    implementation : function
+        Function that implements the operation on NumPy arrays without
+        overrides.  Arguments passed calling the ``_ArrayFunctionDispatcher``
+        will be forwarded to this (and the ``dispatcher``) as if using
+        ``*args, **kwargs``.
+
+    Attributes
+    ----------
+    _implementation : function
+        The original implementation passed in.
+    """)
+
+
+# exposed for testing purposes; used internally by _ArrayFunctionDispatcher
+add_docstring(
+    _get_implementing_args,
+    """
+    Collect arguments on which to call __array_function__.
+
+    Parameters
+    ----------
+    relevant_args : iterable of array-like
+        Iterable of possibly array-like arguments to check for
+        __array_function__ methods.
+
+    Returns
+    -------
+    Sequence of arguments with __array_function__ methods, in the order in
+    which they should be called.
+    """)
+
+
+ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
+
+
+def verify_matching_signatures(implementation, dispatcher):
+    """Verify that a dispatcher function has the right signature."""
+    implementation_spec = ArgSpec(*getargspec(implementation))
+    dispatcher_spec = ArgSpec(*getargspec(dispatcher))
+
+    if (implementation_spec.args != dispatcher_spec.args or
+            implementation_spec.varargs != dispatcher_spec.varargs or
+            implementation_spec.keywords != dispatcher_spec.keywords or
+            (bool(implementation_spec.defaults) !=
+             bool(dispatcher_spec.defaults)) or
+            (implementation_spec.defaults is not None and
+             len(implementation_spec.defaults) !=
+             len(dispatcher_spec.defaults))):
+        raise RuntimeError('implementation and dispatcher for %s have '
+                           'different function signatures' % implementation)
+
+    if implementation_spec.defaults is not None:
+        if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
+            raise RuntimeError('dispatcher functions can only use None for '
+                               'default argument values')
+
+
+def array_function_dispatch(dispatcher=None, module=None, verify=True,
+                            docs_from_dispatcher=False):
+    """Decorator for adding dispatch with the __array_function__ protocol.
+
+    See NEP-18 for example usage.
+
+    Parameters
+    ----------
+    dispatcher : callable or None
+        Function that when called like ``dispatcher(*args, **kwargs)`` with
+        arguments from the NumPy function call returns an iterable of
+        array-like arguments to check for ``__array_function__``.
+
+        If `None`, the first argument is used as the single `like=` argument
+        and not passed on.  A function implementing `like=` must call its
+        dispatcher with `like` as the first non-keyword argument.
+    module : str, optional
+        __module__ attribute to set on new function, e.g., ``module='numpy'``.
+        By default, module is copied from the decorated function.
+    verify : bool, optional
+        If True, verify the that the signature of the dispatcher and decorated
+        function signatures match exactly: all required and optional arguments
+        should appear in order with the same names, but the default values for
+        all optional arguments should be ``None``. Only disable verification
+        if the dispatcher's signature needs to deviate for some particular
+        reason, e.g., because the function has a signature like
+        ``func(*args, **kwargs)``.
+    docs_from_dispatcher : bool, optional
+        If True, copy docs from the dispatcher function onto the dispatched
+        function, rather than from the implementation. This is useful for
+        functions defined in C, which otherwise don't have docstrings.
+
+    Returns
+    -------
+    Function suitable for decorating the implementation of a NumPy function.
+
+    """
+    def decorator(implementation):
+        if verify:
+            if dispatcher is not None:
+                verify_matching_signatures(implementation, dispatcher)
+            else:
+                # Using __code__ directly similar to verify_matching_signature
+                co = implementation.__code__
+                last_arg = co.co_argcount + co.co_kwonlyargcount - 1
+                last_arg = co.co_varnames[last_arg]
+                if last_arg != "like" or co.co_kwonlyargcount == 0:
+                    raise RuntimeError(
+                        "__array_function__ expects `like=` to be the last "
+                        "argument and a keyword-only argument. "
+                        f"{implementation} does not seem to comply.")
+
+        if docs_from_dispatcher:
+            add_docstring(implementation, dispatcher.__doc__)
+
+        public_api = _ArrayFunctionDispatcher(dispatcher, implementation)
+        public_api = functools.wraps(implementation)(public_api)
+
+        if module is not None:
+            public_api.__module__ = module
+
+        ARRAY_FUNCTIONS.add(public_api)
+
+        return public_api
+
+    return decorator
+
+
+def array_function_from_dispatcher(
+        implementation, module=None, verify=True, docs_from_dispatcher=True):
+    """Like array_function_dispatcher, but with function arguments flipped."""
+
+    def decorator(dispatcher):
+        return array_function_dispatch(
+            dispatcher, module, verify=verify,
+            docs_from_dispatcher=docs_from_dispatcher)(implementation)
+    return decorator
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/records.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/records.py
new file mode 100644
index 00000000..0fb49e8f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/records.py
@@ -0,0 +1,1099 @@
+"""
+Record Arrays
+=============
+Record arrays expose the fields of structured arrays as properties.
+
+Most commonly, ndarrays contain elements of a single type, e.g. floats,
+integers, bools etc.  However, it is possible for elements to be combinations
+of these using structured types, such as::
+
+  >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)])
+  >>> a
+  array([(1, 2.), (1, 2.)], dtype=[('x', '>> a['x']
+  array([1, 1])
+
+  >>> a['y']
+  array([2., 2.])
+
+Record arrays allow us to access fields as properties::
+
+  >>> ar = np.rec.array(a)
+
+  >>> ar.x
+  array([1, 1])
+
+  >>> ar.y
+  array([2., 2.])
+
+"""
+import warnings
+from collections import Counter
+from contextlib import nullcontext
+
+from .._utils import set_module
+from . import numeric as sb
+from . import numerictypes as nt
+from numpy.compat import os_fspath
+from .arrayprint import _get_legacy_print_mode
+
+# All of the functions allow formats to be a dtype
+__all__ = [
+    'record', 'recarray', 'format_parser',
+    'fromarrays', 'fromrecords', 'fromstring', 'fromfile', 'array',
+]
+
+
+ndarray = sb.ndarray
+
+_byteorderconv = {'b':'>',
+                  'l':'<',
+                  'n':'=',
+                  'B':'>',
+                  'L':'<',
+                  'N':'=',
+                  'S':'s',
+                  's':'s',
+                  '>':'>',
+                  '<':'<',
+                  '=':'=',
+                  '|':'|',
+                  'I':'|',
+                  'i':'|'}
+
+# formats regular expression
+# allows multidimensional spec with a tuple syntax in front
+# of the letter code '(2,3)f4' and ' (  2 ,  3  )  f4  '
+# are equally allowed
+
+numfmt = nt.sctypeDict
+
+
+def find_duplicate(list):
+    """Find duplication in a list, return a list of duplicated elements"""
+    return [
+        item
+        for item, counts in Counter(list).items()
+        if counts > 1
+    ]
+
+
+@set_module('numpy')
+class format_parser:
+    """
+    Class to convert formats, names, titles description to a dtype.
+
+    After constructing the format_parser object, the dtype attribute is
+    the converted data-type:
+    ``dtype = format_parser(formats, names, titles).dtype``
+
+    Attributes
+    ----------
+    dtype : dtype
+        The converted data-type.
+
+    Parameters
+    ----------
+    formats : str or list of str
+        The format description, either specified as a string with
+        comma-separated format descriptions in the form ``'f8, i4, a5'``, or
+        a list of format description strings  in the form
+        ``['f8', 'i4', 'a5']``.
+    names : str or list/tuple of str
+        The field names, either specified as a comma-separated string in the
+        form ``'col1, col2, col3'``, or as a list or tuple of strings in the
+        form ``['col1', 'col2', 'col3']``.
+        An empty list can be used, in that case default field names
+        ('f0', 'f1', ...) are used.
+    titles : sequence
+        Sequence of title strings. An empty list can be used to leave titles
+        out.
+    aligned : bool, optional
+        If True, align the fields by padding as the C-compiler would.
+        Default is False.
+    byteorder : str, optional
+        If specified, all the fields will be changed to the
+        provided byte-order.  Otherwise, the default byte-order is
+        used. For all available string specifiers, see `dtype.newbyteorder`.
+
+    See Also
+    --------
+    dtype, typename, sctype2char
+
+    Examples
+    --------
+    >>> np.format_parser(['>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
+    ...                  []).dtype
+    dtype([('col1', '>> np.format_parser([' len(titles):
+            self._titles += [None] * (self._nfields - len(titles))
+
+    def _createdtype(self, byteorder):
+        dtype = sb.dtype({
+            'names': self._names,
+            'formats': self._f_formats,
+            'offsets': self._offsets,
+            'titles': self._titles,
+        })
+        if byteorder is not None:
+            byteorder = _byteorderconv[byteorder[0]]
+            dtype = dtype.newbyteorder(byteorder)
+
+        self.dtype = dtype
+
+
+class record(nt.void):
+    """A data-type scalar that allows field access as attribute lookup.
+    """
+
+    # manually set name and module so that this class's type shows up
+    # as numpy.record when printed
+    __name__ = 'record'
+    __module__ = 'numpy'
+
+    def __repr__(self):
+        if _get_legacy_print_mode() <= 113:
+            return self.__str__()
+        return super().__repr__()
+
+    def __str__(self):
+        if _get_legacy_print_mode() <= 113:
+            return str(self.item())
+        return super().__str__()
+
+    def __getattribute__(self, attr):
+        if attr in ('setfield', 'getfield', 'dtype'):
+            return nt.void.__getattribute__(self, attr)
+        try:
+            return nt.void.__getattribute__(self, attr)
+        except AttributeError:
+            pass
+        fielddict = nt.void.__getattribute__(self, 'dtype').fields
+        res = fielddict.get(attr, None)
+        if res:
+            obj = self.getfield(*res[:2])
+            # if it has fields return a record,
+            # otherwise return the object
+            try:
+                dt = obj.dtype
+            except AttributeError:
+                #happens if field is Object type
+                return obj
+            if dt.names is not None:
+                return obj.view((self.__class__, obj.dtype))
+            return obj
+        else:
+            raise AttributeError("'record' object has no "
+                    "attribute '%s'" % attr)
+
+    def __setattr__(self, attr, val):
+        if attr in ('setfield', 'getfield', 'dtype'):
+            raise AttributeError("Cannot set '%s' attribute" % attr)
+        fielddict = nt.void.__getattribute__(self, 'dtype').fields
+        res = fielddict.get(attr, None)
+        if res:
+            return self.setfield(val, *res[:2])
+        else:
+            if getattr(self, attr, None):
+                return nt.void.__setattr__(self, attr, val)
+            else:
+                raise AttributeError("'record' object has no "
+                        "attribute '%s'" % attr)
+
+    def __getitem__(self, indx):
+        obj = nt.void.__getitem__(self, indx)
+
+        # copy behavior of record.__getattribute__,
+        if isinstance(obj, nt.void) and obj.dtype.names is not None:
+            return obj.view((self.__class__, obj.dtype))
+        else:
+            # return a single element
+            return obj
+
+    def pprint(self):
+        """Pretty-print all fields."""
+        # pretty-print all fields
+        names = self.dtype.names
+        maxlen = max(len(name) for name in names)
+        fmt = '%% %ds: %%s' % maxlen
+        rows = [fmt % (name, getattr(self, name)) for name in names]
+        return "\n".join(rows)
+
+# The recarray is almost identical to a standard array (which supports
+#   named fields already)  The biggest difference is that it can use
+#   attribute-lookup to find the fields and it is constructed using
+#   a record.
+
+# If byteorder is given it forces a particular byteorder on all
+#  the fields (and any subfields)
+
+class recarray(ndarray):
+    """Construct an ndarray that allows field access using attributes.
+
+    Arrays may have a data-types containing fields, analogous
+    to columns in a spread sheet.  An example is ``[(x, int), (y, float)]``,
+    where each entry in the array is a pair of ``(int, float)``.  Normally,
+    these attributes are accessed using dictionary lookups such as ``arr['x']``
+    and ``arr['y']``.  Record arrays allow the fields to be accessed as members
+    of the array, using ``arr.x`` and ``arr.y``.
+
+    Parameters
+    ----------
+    shape : tuple
+        Shape of output array.
+    dtype : data-type, optional
+        The desired data-type.  By default, the data-type is determined
+        from `formats`, `names`, `titles`, `aligned` and `byteorder`.
+    formats : list of data-types, optional
+        A list containing the data-types for the different columns, e.g.
+        ``['i4', 'f8', 'i4']``.  `formats` does *not* support the new
+        convention of using types directly, i.e. ``(int, float, int)``.
+        Note that `formats` must be a list, not a tuple.
+        Given that `formats` is somewhat limited, we recommend specifying
+        `dtype` instead.
+    names : tuple of str, optional
+        The name of each column, e.g. ``('x', 'y', 'z')``.
+    buf : buffer, optional
+        By default, a new array is created of the given shape and data-type.
+        If `buf` is specified and is an object exposing the buffer interface,
+        the array will use the memory from the existing buffer.  In this case,
+        the `offset` and `strides` keywords are available.
+
+    Other Parameters
+    ----------------
+    titles : tuple of str, optional
+        Aliases for column names.  For example, if `names` were
+        ``('x', 'y', 'z')`` and `titles` is
+        ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
+        ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
+    byteorder : {'<', '>', '='}, optional
+        Byte-order for all fields.
+    aligned : bool, optional
+        Align the fields in memory as the C-compiler would.
+    strides : tuple of ints, optional
+        Buffer (`buf`) is interpreted according to these strides (strides
+        define how many bytes each array element, row, column, etc.
+        occupy in memory).
+    offset : int, optional
+        Start reading buffer (`buf`) from this offset onwards.
+    order : {'C', 'F'}, optional
+        Row-major (C-style) or column-major (Fortran-style) order.
+
+    Returns
+    -------
+    rec : recarray
+        Empty array of the given shape and type.
+
+    See Also
+    --------
+    core.records.fromrecords : Construct a record array from data.
+    record : fundamental data-type for `recarray`.
+    format_parser : determine a data-type from formats, names, titles.
+
+    Notes
+    -----
+    This constructor can be compared to ``empty``: it creates a new record
+    array but does not fill it with data.  To create a record array from data,
+    use one of the following methods:
+
+    1. Create a standard ndarray and convert it to a record array,
+       using ``arr.view(np.recarray)``
+    2. Use the `buf` keyword.
+    3. Use `np.rec.fromrecords`.
+
+    Examples
+    --------
+    Create an array with two fields, ``x`` and ``y``:
+
+    >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x
+    array([(1., 2), (3., 4)], dtype=[('x', '>> x['x']
+    array([1., 3.])
+
+    View the array as a record array:
+
+    >>> x = x.view(np.recarray)
+
+    >>> x.x
+    array([1., 3.])
+
+    >>> x.y
+    array([2, 4])
+
+    Create a new, empty record array:
+
+    >>> np.recarray((2,),
+    ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
+    rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
+           (3471280, 1.2134086255804012e-316, 0)],
+          dtype=[('x', ' 0 or self.shape == (0,):
+            lst = sb.array2string(
+                self, separator=', ', prefix=prefix, suffix=',')
+        else:
+            # show zero-length shape unless it is (0,)
+            lst = "[], shape=%s" % (repr(self.shape),)
+
+        lf = '\n'+' '*len(prefix)
+        if _get_legacy_print_mode() <= 113:
+            lf = ' ' + lf  # trailing space
+        return fmt % (lst, lf, repr_dtype)
+
+    def field(self, attr, val=None):
+        if isinstance(attr, int):
+            names = ndarray.__getattribute__(self, 'dtype').names
+            attr = names[attr]
+
+        fielddict = ndarray.__getattribute__(self, 'dtype').fields
+
+        res = fielddict[attr][:2]
+
+        if val is None:
+            obj = self.getfield(*res)
+            if obj.dtype.names is not None:
+                return obj
+            return obj.view(ndarray)
+        else:
+            return self.setfield(val, *res)
+
+
+def _deprecate_shape_0_as_None(shape):
+    if shape == 0:
+        warnings.warn(
+            "Passing `shape=0` to have the shape be inferred is deprecated, "
+            "and in future will be equivalent to `shape=(0,)`. To infer "
+            "the shape and suppress this warning, pass `shape=None` instead.",
+            FutureWarning, stacklevel=3)
+        return None
+    else:
+        return shape
+
+
+@set_module("numpy.rec")
+def fromarrays(arrayList, dtype=None, shape=None, formats=None,
+               names=None, titles=None, aligned=False, byteorder=None):
+    """Create a record array from a (flat) list of arrays
+
+    Parameters
+    ----------
+    arrayList : list or tuple
+        List of array-like objects (such as lists, tuples,
+        and ndarrays).
+    dtype : data-type, optional
+        valid dtype for all arrays
+    shape : int or tuple of ints, optional
+        Shape of the resulting array. If not provided, inferred from
+        ``arrayList[0]``.
+    formats, names, titles, aligned, byteorder :
+        If `dtype` is ``None``, these arguments are passed to
+        `numpy.format_parser` to construct a dtype. See that function for
+        detailed documentation.
+
+    Returns
+    -------
+    np.recarray
+        Record array consisting of given arrayList columns.
+
+    Examples
+    --------
+    >>> x1=np.array([1,2,3,4])
+    >>> x2=np.array(['a','dd','xyz','12'])
+    >>> x3=np.array([1.1,2,3,4])
+    >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
+    >>> print(r[1])
+    (2, 'dd', 2.0) # may vary
+    >>> x1[1]=34
+    >>> r.a
+    array([1, 2, 3, 4])
+
+    >>> x1 = np.array([1, 2, 3, 4])
+    >>> x2 = np.array(['a', 'dd', 'xyz', '12'])
+    >>> x3 = np.array([1.1, 2, 3,4])
+    >>> r = np.core.records.fromarrays(
+    ...     [x1, x2, x3],
+    ...     dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)]))
+    >>> r
+    rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ),
+               (4, b'12', 4. )],
+              dtype=[('a', ' 0:
+        shape = shape[:-nn]
+
+    _array = recarray(shape, descr)
+
+    # populate the record array (makes a copy)
+    for k, obj in enumerate(arrayList):
+        nn = descr[k].ndim
+        testshape = obj.shape[:obj.ndim - nn]
+        name = _names[k]
+        if testshape != shape:
+            raise ValueError(f'array-shape mismatch in array {k} ("{name}")')
+
+        _array[name] = obj
+
+    return _array
+
+
+@set_module("numpy.rec")
+def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
+                titles=None, aligned=False, byteorder=None):
+    """Create a recarray from a list of records in text form.
+
+    Parameters
+    ----------
+    recList : sequence
+        data in the same field may be heterogeneous - they will be promoted
+        to the highest data type.
+    dtype : data-type, optional
+        valid dtype for all arrays
+    shape : int or tuple of ints, optional
+        shape of each array.
+    formats, names, titles, aligned, byteorder :
+        If `dtype` is ``None``, these arguments are passed to
+        `numpy.format_parser` to construct a dtype. See that function for
+        detailed documentation.
+
+        If both `formats` and `dtype` are None, then this will auto-detect
+        formats. Use list of tuples rather than list of lists for faster
+        processing.
+
+    Returns
+    -------
+    np.recarray
+        record array consisting of given recList rows.
+
+    Examples
+    --------
+    >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
+    ... names='col1,col2,col3')
+    >>> print(r[0])
+    (456, 'dbe', 1.2)
+    >>> r.col1
+    array([456,   2])
+    >>> r.col2
+    array(['dbe', 'de'], dtype='>> import pickle
+    >>> pickle.loads(pickle.dumps(r))
+    rec.array([(456, 'dbe', 1.2), (  2, 'de', 1.3)],
+              dtype=[('col1', ' 1:
+            raise ValueError("Can only deal with 1-d array.")
+        _array = recarray(shape, descr)
+        for k in range(_array.size):
+            _array[k] = tuple(recList[k])
+        # list of lists instead of list of tuples ?
+        # 2018-02-07, 1.14.1
+        warnings.warn(
+            "fromrecords expected a list of tuples, may have received a list "
+            "of lists instead. In the future that will raise an error",
+            FutureWarning, stacklevel=2)
+        return _array
+    else:
+        if shape is not None and retval.shape != shape:
+            retval.shape = shape
+
+    res = retval.view(recarray)
+
+    return res
+
+
+@set_module("numpy.rec")
+def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
+               names=None, titles=None, aligned=False, byteorder=None):
+    r"""Create a record array from binary data
+
+    Note that despite the name of this function it does not accept `str`
+    instances.
+
+    Parameters
+    ----------
+    datastring : bytes-like
+        Buffer of binary data
+    dtype : data-type, optional
+        Valid dtype for all arrays
+    shape : int or tuple of ints, optional
+        Shape of each array.
+    offset : int, optional
+        Position in the buffer to start reading from.
+    formats, names, titles, aligned, byteorder :
+        If `dtype` is ``None``, these arguments are passed to
+        `numpy.format_parser` to construct a dtype. See that function for
+        detailed documentation.
+
+
+    Returns
+    -------
+    np.recarray
+        Record array view into the data in datastring. This will be readonly
+        if `datastring` is readonly.
+
+    See Also
+    --------
+    numpy.frombuffer
+
+    Examples
+    --------
+    >>> a = b'\x01\x02\x03abc'
+    >>> np.core.records.fromstring(a, dtype='u1,u1,u1,S3')
+    rec.array([(1, 2, 3, b'abc')],
+            dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')])
+
+    >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64),
+    ...                 ('GradeLevel', np.int32)]
+    >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5),
+    ...                         ('Aadi', 66.6, 6)], dtype=grades_dtype)
+    >>> np.core.records.fromstring(grades_array.tobytes(), dtype=grades_dtype)
+    rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)],
+            dtype=[('Name', '>> s = '\x01\x02\x03abc'
+    >>> np.core.records.fromstring(s, dtype='u1,u1,u1,S3')
+    Traceback (most recent call last)
+       ...
+    TypeError: a bytes-like object is required, not 'str'
+    """
+
+    if dtype is None and formats is None:
+        raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
+
+    if dtype is not None:
+        descr = sb.dtype(dtype)
+    else:
+        descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+
+    itemsize = descr.itemsize
+
+    # NumPy 1.19.0, 2020-01-01
+    shape = _deprecate_shape_0_as_None(shape)
+
+    if shape in (None, -1):
+        shape = (len(datastring) - offset) // itemsize
+
+    _array = recarray(shape, descr, buf=datastring, offset=offset)
+    return _array
+
+def get_remaining_size(fd):
+    pos = fd.tell()
+    try:
+        fd.seek(0, 2)
+        return fd.tell() - pos
+    finally:
+        fd.seek(pos, 0)
+
+
+@set_module("numpy.rec")
+def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
+             names=None, titles=None, aligned=False, byteorder=None):
+    """Create an array from binary file data
+
+    Parameters
+    ----------
+    fd : str or file type
+        If file is a string or a path-like object then that file is opened,
+        else it is assumed to be a file object. The file object must
+        support random access (i.e. it must have tell and seek methods).
+    dtype : data-type, optional
+        valid dtype for all arrays
+    shape : int or tuple of ints, optional
+        shape of each array.
+    offset : int, optional
+        Position in the file to start reading from.
+    formats, names, titles, aligned, byteorder :
+        If `dtype` is ``None``, these arguments are passed to
+        `numpy.format_parser` to construct a dtype. See that function for
+        detailed documentation
+
+    Returns
+    -------
+    np.recarray
+        record array consisting of data enclosed in file.
+
+    Examples
+    --------
+    >>> from tempfile import TemporaryFile
+    >>> a = np.empty(10,dtype='f8,i4,a5')
+    >>> a[5] = (0.5,10,'abcde')
+    >>>
+    >>> fd=TemporaryFile()
+    >>> a = a.newbyteorder('<')
+    >>> a.tofile(fd)
+    >>>
+    >>> _ = fd.seek(0)
+    >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
+    ... byteorder='<')
+    >>> print(r[5])
+    (0.5, 10, 'abcde')
+    >>> r.shape
+    (10,)
+    """
+
+    if dtype is None and formats is None:
+        raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
+
+    # NumPy 1.19.0, 2020-01-01
+    shape = _deprecate_shape_0_as_None(shape)
+
+    if shape is None:
+        shape = (-1,)
+    elif isinstance(shape, int):
+        shape = (shape,)
+
+    if hasattr(fd, 'readinto'):
+        # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface.
+        # Example of fd: gzip, BytesIO, BufferedReader
+        # file already opened
+        ctx = nullcontext(fd)
+    else:
+        # open file
+        ctx = open(os_fspath(fd), 'rb')
+
+    with ctx as fd:
+        if offset > 0:
+            fd.seek(offset, 1)
+        size = get_remaining_size(fd)
+
+        if dtype is not None:
+            descr = sb.dtype(dtype)
+        else:
+            descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+
+        itemsize = descr.itemsize
+
+        shapeprod = sb.array(shape).prod(dtype=nt.intp)
+        shapesize = shapeprod * itemsize
+        if shapesize < 0:
+            shape = list(shape)
+            shape[shape.index(-1)] = size // -shapesize
+            shape = tuple(shape)
+            shapeprod = sb.array(shape).prod(dtype=nt.intp)
+
+        nbytes = shapeprod * itemsize
+
+        if nbytes > size:
+            raise ValueError(
+                    "Not enough bytes left in file for specified shape and type")
+
+        # create the array
+        _array = recarray(shape, descr)
+        nbytesread = fd.readinto(_array.data)
+        if nbytesread != nbytes:
+            raise OSError("Didn't read as many bytes as expected")
+
+    return _array
+
+
+@set_module("numpy.rec")
+def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
+          names=None, titles=None, aligned=False, byteorder=None, copy=True):
+    """
+    Construct a record array from a wide-variety of objects.
+
+    A general-purpose record array constructor that dispatches to the
+    appropriate `recarray` creation function based on the inputs (see Notes).
+
+    Parameters
+    ----------
+    obj : any
+        Input object. See Notes for details on how various input types are
+        treated.
+    dtype : data-type, optional
+        Valid dtype for array.
+    shape : int or tuple of ints, optional
+        Shape of each array.
+    offset : int, optional
+        Position in the file or buffer to start reading from.
+    strides : tuple of ints, optional
+        Buffer (`buf`) is interpreted according to these strides (strides
+        define how many bytes each array element, row, column, etc.
+        occupy in memory).
+    formats, names, titles, aligned, byteorder :
+        If `dtype` is ``None``, these arguments are passed to
+        `numpy.format_parser` to construct a dtype. See that function for
+        detailed documentation.
+    copy : bool, optional
+        Whether to copy the input object (True), or to use a reference instead.
+        This option only applies when the input is an ndarray or recarray.
+        Defaults to True.
+
+    Returns
+    -------
+    np.recarray
+        Record array created from the specified object.
+
+    Notes
+    -----
+    If `obj` is ``None``, then call the `~numpy.recarray` constructor. If
+    `obj` is a string, then call the `fromstring` constructor. If `obj` is a
+    list or a tuple, then if the first object is an `~numpy.ndarray`, call
+    `fromarrays`, otherwise call `fromrecords`. If `obj` is a
+    `~numpy.recarray`, then make a copy of the data in the recarray
+    (if ``copy=True``) and use the new formats, names, and titles. If `obj`
+    is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then
+    return ``obj.view(recarray)``, making a copy of the data if ``copy=True``.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+    array([[1, 2, 3],
+           [4, 5, 6],
+           [7, 8, 9]])
+
+    >>> np.core.records.array(a)
+    rec.array([[1, 2, 3],
+               [4, 5, 6],
+               [7, 8, 9]],
+        dtype=int32)
+
+    >>> b = [(1, 1), (2, 4), (3, 9)]
+    >>> c = np.core.records.array(b, formats = ['i2', 'f2'], names = ('x', 'y'))
+    >>> c
+    rec.array([(1, 1.0), (2, 4.0), (3, 9.0)],
+              dtype=[('x', '>> c.x
+    rec.array([1, 2, 3], dtype=int16)
+
+    >>> c.y
+    rec.array([ 1.0,  4.0,  9.0], dtype=float16)
+
+    >>> r = np.rec.array(['abc','def'], names=['col1','col2'])
+    >>> print(r.col1)
+    abc
+
+    >>> r.col1
+    array('abc', dtype='>> r.col2
+    array('def', dtype=' object: ...
+    def tell(self, /) -> int: ...
+    def readinto(self, buffer: memoryview, /) -> int: ...
+
+__all__: list[str]
+
+@overload
+def fromarrays(
+    arrayList: Iterable[ArrayLike],
+    dtype: DTypeLike = ...,
+    shape: None | _ShapeLike = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+) -> _RecArray[Any]: ...
+@overload
+def fromarrays(
+    arrayList: Iterable[ArrayLike],
+    dtype: None = ...,
+    shape: None | _ShapeLike = ...,
+    *,
+    formats: DTypeLike,
+    names: None | str | Sequence[str] = ...,
+    titles: None | str | Sequence[str] = ...,
+    aligned: bool = ...,
+    byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def fromrecords(
+    recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
+    dtype: DTypeLike = ...,
+    shape: None | _ShapeLike = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+) -> _RecArray[record]: ...
+@overload
+def fromrecords(
+    recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
+    dtype: None = ...,
+    shape: None | _ShapeLike = ...,
+    *,
+    formats: DTypeLike,
+    names: None | str | Sequence[str] = ...,
+    titles: None | str | Sequence[str] = ...,
+    aligned: bool = ...,
+    byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def fromstring(
+    datastring: _SupportsBuffer,
+    dtype: DTypeLike,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+) -> _RecArray[record]: ...
+@overload
+def fromstring(
+    datastring: _SupportsBuffer,
+    dtype: None = ...,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    *,
+    formats: DTypeLike,
+    names: None | str | Sequence[str] = ...,
+    titles: None | str | Sequence[str] = ...,
+    aligned: bool = ...,
+    byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def fromfile(
+    fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto,
+    dtype: DTypeLike,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+) -> _RecArray[Any]: ...
+@overload
+def fromfile(
+    fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto,
+    dtype: None = ...,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    *,
+    formats: DTypeLike,
+    names: None | str | Sequence[str] = ...,
+    titles: None | str | Sequence[str] = ...,
+    aligned: bool = ...,
+    byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def array(
+    obj: _SCT | NDArray[_SCT],
+    dtype: None = ...,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+    copy: bool = ...,
+) -> _RecArray[_SCT]: ...
+@overload
+def array(
+    obj: ArrayLike,
+    dtype: DTypeLike,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+    copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+    obj: ArrayLike,
+    dtype: None = ...,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    *,
+    formats: DTypeLike,
+    names: None | str | Sequence[str] = ...,
+    titles: None | str | Sequence[str] = ...,
+    aligned: bool = ...,
+    byteorder: None | _ByteOrder = ...,
+    copy: bool = ...,
+) -> _RecArray[record]: ...
+@overload
+def array(
+    obj: None,
+    dtype: DTypeLike,
+    shape: _ShapeLike,
+    offset: int = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+    copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+    obj: None,
+    dtype: None = ...,
+    *,
+    shape: _ShapeLike,
+    offset: int = ...,
+    formats: DTypeLike,
+    names: None | str | Sequence[str] = ...,
+    titles: None | str | Sequence[str] = ...,
+    aligned: bool = ...,
+    byteorder: None | _ByteOrder = ...,
+    copy: bool = ...,
+) -> _RecArray[record]: ...
+@overload
+def array(
+    obj: _SupportsReadInto,
+    dtype: DTypeLike,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    formats: None = ...,
+    names: None = ...,
+    titles: None = ...,
+    aligned: bool = ...,
+    byteorder: None = ...,
+    copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+    obj: _SupportsReadInto,
+    dtype: None = ...,
+    shape: None | _ShapeLike = ...,
+    offset: int = ...,
+    *,
+    formats: DTypeLike,
+    names: None | str | Sequence[str] = ...,
+    titles: None | str | Sequence[str] = ...,
+    aligned: bool = ...,
+    byteorder: None | _ByteOrder = ...,
+    copy: bool = ...,
+) -> _RecArray[record]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/shape_base.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/shape_base.py
new file mode 100644
index 00000000..250fffd4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/shape_base.py
@@ -0,0 +1,923 @@
+__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
+           'stack', 'vstack']
+
+import functools
+import itertools
+import operator
+import warnings
+
+from . import numeric as _nx
+from . import overrides
+from .multiarray import array, asanyarray, normalize_axis_index
+from . import fromnumeric as _from_nx
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+def _atleast_1d_dispatcher(*arys):
+    return arys
+
+
+@array_function_dispatch(_atleast_1d_dispatcher)
+def atleast_1d(*arys):
+    """
+    Convert inputs to arrays with at least one dimension.
+
+    Scalar inputs are converted to 1-dimensional arrays, whilst
+    higher-dimensional inputs are preserved.
+
+    Parameters
+    ----------
+    arys1, arys2, ... : array_like
+        One or more input arrays.
+
+    Returns
+    -------
+    ret : ndarray
+        An array, or list of arrays, each with ``a.ndim >= 1``.
+        Copies are made only if necessary.
+
+    See Also
+    --------
+    atleast_2d, atleast_3d
+
+    Examples
+    --------
+    >>> np.atleast_1d(1.0)
+    array([1.])
+
+    >>> x = np.arange(9.0).reshape(3,3)
+    >>> np.atleast_1d(x)
+    array([[0., 1., 2.],
+           [3., 4., 5.],
+           [6., 7., 8.]])
+    >>> np.atleast_1d(x) is x
+    True
+
+    >>> np.atleast_1d(1, [3, 4])
+    [array([1]), array([3, 4])]
+
+    """
+    res = []
+    for ary in arys:
+        ary = asanyarray(ary)
+        if ary.ndim == 0:
+            result = ary.reshape(1)
+        else:
+            result = ary
+        res.append(result)
+    if len(res) == 1:
+        return res[0]
+    else:
+        return res
+
+
+def _atleast_2d_dispatcher(*arys):
+    return arys
+
+
+@array_function_dispatch(_atleast_2d_dispatcher)
+def atleast_2d(*arys):
+    """
+    View inputs as arrays with at least two dimensions.
+
+    Parameters
+    ----------
+    arys1, arys2, ... : array_like
+        One or more array-like sequences.  Non-array inputs are converted
+        to arrays.  Arrays that already have two or more dimensions are
+        preserved.
+
+    Returns
+    -------
+    res, res2, ... : ndarray
+        An array, or list of arrays, each with ``a.ndim >= 2``.
+        Copies are avoided where possible, and views with two or more
+        dimensions are returned.
+
+    See Also
+    --------
+    atleast_1d, atleast_3d
+
+    Examples
+    --------
+    >>> np.atleast_2d(3.0)
+    array([[3.]])
+
+    >>> x = np.arange(3.0)
+    >>> np.atleast_2d(x)
+    array([[0., 1., 2.]])
+    >>> np.atleast_2d(x).base is x
+    True
+
+    >>> np.atleast_2d(1, [1, 2], [[1, 2]])
+    [array([[1]]), array([[1, 2]]), array([[1, 2]])]
+
+    """
+    res = []
+    for ary in arys:
+        ary = asanyarray(ary)
+        if ary.ndim == 0:
+            result = ary.reshape(1, 1)
+        elif ary.ndim == 1:
+            result = ary[_nx.newaxis, :]
+        else:
+            result = ary
+        res.append(result)
+    if len(res) == 1:
+        return res[0]
+    else:
+        return res
+
+
+def _atleast_3d_dispatcher(*arys):
+    return arys
+
+
+@array_function_dispatch(_atleast_3d_dispatcher)
+def atleast_3d(*arys):
+    """
+    View inputs as arrays with at least three dimensions.
+
+    Parameters
+    ----------
+    arys1, arys2, ... : array_like
+        One or more array-like sequences.  Non-array inputs are converted to
+        arrays.  Arrays that already have three or more dimensions are
+        preserved.
+
+    Returns
+    -------
+    res1, res2, ... : ndarray
+        An array, or list of arrays, each with ``a.ndim >= 3``.  Copies are
+        avoided where possible, and views with three or more dimensions are
+        returned.  For example, a 1-D array of shape ``(N,)`` becomes a view
+        of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
+        view of shape ``(M, N, 1)``.
+
+    See Also
+    --------
+    atleast_1d, atleast_2d
+
+    Examples
+    --------
+    >>> np.atleast_3d(3.0)
+    array([[[3.]]])
+
+    >>> x = np.arange(3.0)
+    >>> np.atleast_3d(x).shape
+    (1, 3, 1)
+
+    >>> x = np.arange(12.0).reshape(4,3)
+    >>> np.atleast_3d(x).shape
+    (4, 3, 1)
+    >>> np.atleast_3d(x).base is x.base  # x is a reshape, so not base itself
+    True
+
+    >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
+    ...     print(arr, arr.shape) # doctest: +SKIP
+    ...
+    [[[1]
+      [2]]] (1, 2, 1)
+    [[[1]
+      [2]]] (1, 2, 1)
+    [[[1 2]]] (1, 1, 2)
+
+    """
+    res = []
+    for ary in arys:
+        ary = asanyarray(ary)
+        if ary.ndim == 0:
+            result = ary.reshape(1, 1, 1)
+        elif ary.ndim == 1:
+            result = ary[_nx.newaxis, :, _nx.newaxis]
+        elif ary.ndim == 2:
+            result = ary[:, :, _nx.newaxis]
+        else:
+            result = ary
+        res.append(result)
+    if len(res) == 1:
+        return res[0]
+    else:
+        return res
+
+
+def _arrays_for_stack_dispatcher(arrays):
+    if not hasattr(arrays, "__getitem__"):
+        raise TypeError('arrays to stack must be passed as a "sequence" type '
+                        'such as list or tuple.')
+
+    return tuple(arrays)
+
+
+def _vhstack_dispatcher(tup, *, dtype=None, casting=None):
+    return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
+def vstack(tup, *, dtype=None, casting="same_kind"):
+    """
+    Stack arrays in sequence vertically (row wise).
+
+    This is equivalent to concatenation along the first axis after 1-D arrays
+    of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
+    `vsplit`.
+
+    This function makes most sense for arrays with up to 3 dimensions. For
+    instance, for pixel-data with a height (first axis), width (second axis),
+    and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+    `block` provide more general stacking and concatenation operations.
+
+    ``np.row_stack`` is an alias for `vstack`. They are the same function.
+
+    Parameters
+    ----------
+    tup : sequence of ndarrays
+        The arrays must have the same shape along all but the first axis.
+        1-D arrays must have the same length.
+
+    dtype : str or dtype
+        If provided, the destination array will have this dtype. Cannot be
+        provided together with `out`.
+
+    .. versionadded:: 1.24
+
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+    .. versionadded:: 1.24
+
+    Returns
+    -------
+    stacked : ndarray
+        The array formed by stacking the given arrays, will be at least 2-D.
+
+    See Also
+    --------
+    concatenate : Join a sequence of arrays along an existing axis.
+    stack : Join a sequence of arrays along a new axis.
+    block : Assemble an nd-array from nested lists of blocks.
+    hstack : Stack arrays in sequence horizontally (column wise).
+    dstack : Stack arrays in sequence depth wise (along third axis).
+    column_stack : Stack 1-D arrays as columns into a 2-D array.
+    vsplit : Split an array into multiple sub-arrays vertically (row-wise).
+
+    Examples
+    --------
+    >>> a = np.array([1, 2, 3])
+    >>> b = np.array([4, 5, 6])
+    >>> np.vstack((a,b))
+    array([[1, 2, 3],
+           [4, 5, 6]])
+
+    >>> a = np.array([[1], [2], [3]])
+    >>> b = np.array([[4], [5], [6]])
+    >>> np.vstack((a,b))
+    array([[1],
+           [2],
+           [3],
+           [4],
+           [5],
+           [6]])
+
+    """
+    arrs = atleast_2d(*tup)
+    if not isinstance(arrs, list):
+        arrs = [arrs]
+    return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
+def hstack(tup, *, dtype=None, casting="same_kind"):
+    """
+    Stack arrays in sequence horizontally (column wise).
+
+    This is equivalent to concatenation along the second axis, except for 1-D
+    arrays where it concatenates along the first axis. Rebuilds arrays divided
+    by `hsplit`.
+
+    This function makes most sense for arrays with up to 3 dimensions. For
+    instance, for pixel-data with a height (first axis), width (second axis),
+    and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+    `block` provide more general stacking and concatenation operations.
+
+    Parameters
+    ----------
+    tup : sequence of ndarrays
+        The arrays must have the same shape along all but the second axis,
+        except 1-D arrays which can be any length.
+
+    dtype : str or dtype
+        If provided, the destination array will have this dtype. Cannot be
+        provided together with `out`.
+
+    .. versionadded:: 1.24
+
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+    .. versionadded:: 1.24
+
+    Returns
+    -------
+    stacked : ndarray
+        The array formed by stacking the given arrays.
+
+    See Also
+    --------
+    concatenate : Join a sequence of arrays along an existing axis.
+    stack : Join a sequence of arrays along a new axis.
+    block : Assemble an nd-array from nested lists of blocks.
+    vstack : Stack arrays in sequence vertically (row wise).
+    dstack : Stack arrays in sequence depth wise (along third axis).
+    column_stack : Stack 1-D arrays as columns into a 2-D array.
+    hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
+
+    Examples
+    --------
+    >>> a = np.array((1,2,3))
+    >>> b = np.array((4,5,6))
+    >>> np.hstack((a,b))
+    array([1, 2, 3, 4, 5, 6])
+    >>> a = np.array([[1],[2],[3]])
+    >>> b = np.array([[4],[5],[6]])
+    >>> np.hstack((a,b))
+    array([[1, 4],
+           [2, 5],
+           [3, 6]])
+
+    """
+    arrs = atleast_1d(*tup)
+    if not isinstance(arrs, list):
+        arrs = [arrs]
+    # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
+    if arrs and arrs[0].ndim == 1:
+        return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
+    else:
+        return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)
+
+
+def _stack_dispatcher(arrays, axis=None, out=None, *,
+                      dtype=None, casting=None):
+    arrays = _arrays_for_stack_dispatcher(arrays)
+    if out is not None:
+        # optimize for the typical case where only arrays is provided
+        arrays = list(arrays)
+        arrays.append(out)
+    return arrays
+
+
+@array_function_dispatch(_stack_dispatcher)
+def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
+    """
+    Join a sequence of arrays along a new axis.
+
+    The ``axis`` parameter specifies the index of the new axis in the
+    dimensions of the result. For example, if ``axis=0`` it will be the first
+    dimension and if ``axis=-1`` it will be the last dimension.
+
+    .. versionadded:: 1.10.0
+
+    Parameters
+    ----------
+    arrays : sequence of array_like
+        Each array must have the same shape.
+
+    axis : int, optional
+        The axis in the result array along which the input arrays are stacked.
+
+    out : ndarray, optional
+        If provided, the destination to place the result. The shape must be
+        correct, matching that of what stack would have returned if no
+        out argument were specified.
+
+    dtype : str or dtype
+        If provided, the destination array will have this dtype. Cannot be
+        provided together with `out`.
+
+        .. versionadded:: 1.24
+
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+        .. versionadded:: 1.24
+
+
+    Returns
+    -------
+    stacked : ndarray
+        The stacked array has one more dimension than the input arrays.
+
+    See Also
+    --------
+    concatenate : Join a sequence of arrays along an existing axis.
+    block : Assemble an nd-array from nested lists of blocks.
+    split : Split array into a list of multiple sub-arrays of equal size.
+
+    Examples
+    --------
+    >>> arrays = [np.random.randn(3, 4) for _ in range(10)]
+    >>> np.stack(arrays, axis=0).shape
+    (10, 3, 4)
+
+    >>> np.stack(arrays, axis=1).shape
+    (3, 10, 4)
+
+    >>> np.stack(arrays, axis=2).shape
+    (3, 4, 10)
+
+    >>> a = np.array([1, 2, 3])
+    >>> b = np.array([4, 5, 6])
+    >>> np.stack((a, b))
+    array([[1, 2, 3],
+           [4, 5, 6]])
+
+    >>> np.stack((a, b), axis=-1)
+    array([[1, 4],
+           [2, 5],
+           [3, 6]])
+
+    """
+    arrays = [asanyarray(arr) for arr in arrays]
+    if not arrays:
+        raise ValueError('need at least one array to stack')
+
+    shapes = {arr.shape for arr in arrays}
+    if len(shapes) != 1:
+        raise ValueError('all input arrays must have the same shape')
+
+    result_ndim = arrays[0].ndim + 1
+    axis = normalize_axis_index(axis, result_ndim)
+
+    sl = (slice(None),) * axis + (_nx.newaxis,)
+    expanded_arrays = [arr[sl] for arr in arrays]
+    return _nx.concatenate(expanded_arrays, axis=axis, out=out,
+                           dtype=dtype, casting=casting)
+
+
+# Internal functions to eliminate the overhead of repeated dispatch in one of
+# the two possible paths inside np.block.
+# Use getattr to protect against __array_function__ being disabled.
+_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
+_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
+_concatenate = getattr(_from_nx.concatenate,
+                       '__wrapped__', _from_nx.concatenate)
+
+
+def _block_format_index(index):
+    """
+    Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
+    """
+    idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
+    return 'arrays' + idx_str
+
+
+def _block_check_depths_match(arrays, parent_index=[]):
+    """
+    Recursive function checking that the depths of nested lists in `arrays`
+    all match. Mismatch raises a ValueError as described in the block
+    docstring below.
+
+    The entire index (rather than just the depth) needs to be calculated
+    for each innermost list, in case an error needs to be raised, so that
+    the index of the offending list can be printed as part of the error.
+
+    Parameters
+    ----------
+    arrays : nested list of arrays
+        The arrays to check
+    parent_index : list of int
+        The full index of `arrays` within the nested lists passed to
+        `_block_check_depths_match` at the top of the recursion.
+
+    Returns
+    -------
+    first_index : list of int
+        The full index of an element from the bottom of the nesting in
+        `arrays`. If any element at the bottom is an empty list, this will
+        refer to it, and the last index along the empty axis will be None.
+    max_arr_ndim : int
+        The maximum of the ndims of the arrays nested in `arrays`.
+    final_size: int
+        The number of elements in the final array. This is used the motivate
+        the choice of algorithm used using benchmarking wisdom.
+
+    """
+    if type(arrays) is tuple:
+        # not strictly necessary, but saves us from:
+        #  - more than one way to do things - no point treating tuples like
+        #    lists
+        #  - horribly confusing behaviour that results when tuples are
+        #    treated like ndarray
+        raise TypeError(
+            '{} is a tuple. '
+            'Only lists can be used to arrange blocks, and np.block does '
+            'not allow implicit conversion from tuple to ndarray.'.format(
+                _block_format_index(parent_index)
+            )
+        )
+    elif type(arrays) is list and len(arrays) > 0:
+        idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
+                      for i, arr in enumerate(arrays))
+
+        first_index, max_arr_ndim, final_size = next(idxs_ndims)
+        for index, ndim, size in idxs_ndims:
+            final_size += size
+            if ndim > max_arr_ndim:
+                max_arr_ndim = ndim
+            if len(index) != len(first_index):
+                raise ValueError(
+                    "List depths are mismatched. First element was at depth "
+                    "{}, but there is an element at depth {} ({})".format(
+                        len(first_index),
+                        len(index),
+                        _block_format_index(index)
+                    )
+                )
+            # propagate our flag that indicates an empty list at the bottom
+            if index[-1] is None:
+                first_index = index
+
+        return first_index, max_arr_ndim, final_size
+    elif type(arrays) is list and len(arrays) == 0:
+        # We've 'bottomed out' on an empty list
+        return parent_index + [None], 0, 0
+    else:
+        # We've 'bottomed out' - arrays is either a scalar or an array
+        size = _size(arrays)
+        return parent_index, _ndim(arrays), size
+
+
+def _atleast_nd(a, ndim):
+    # Ensures `a` has at least `ndim` dimensions by prepending
+    # ones to `a.shape` as necessary
+    return array(a, ndmin=ndim, copy=False, subok=True)
+
+
+def _accumulate(values):
+    return list(itertools.accumulate(values))
+
+
+def _concatenate_shapes(shapes, axis):
+    """Given array shapes, return the resulting shape and slices prefixes.
+
+    These help in nested concatenation.
+
+    Returns
+    -------
+    shape: tuple of int
+        This tuple satisfies::
+
+            shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
+            shape == concatenate(arrs, axis).shape
+
+    slice_prefixes: tuple of (slice(start, end), )
+        For a list of arrays being concatenated, this returns the slice
+        in the larger array at axis that needs to be sliced into.
+
+        For example, the following holds::
+
+            ret = concatenate([a, b, c], axis)
+            _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
+
+            ret[(slice(None),) * axis + sl_a] == a
+            ret[(slice(None),) * axis + sl_b] == b
+            ret[(slice(None),) * axis + sl_c] == c
+
+        These are called slice prefixes since they are used in the recursive
+        blocking algorithm to compute the left-most slices during the
+        recursion. Therefore, they must be prepended to rest of the slice
+        that was computed deeper in the recursion.
+
+        These are returned as tuples to ensure that they can quickly be added
+        to existing slice tuple without creating a new tuple every time.
+
+    """
+    # Cache a result that will be reused.
+    shape_at_axis = [shape[axis] for shape in shapes]
+
+    # Take a shape, any shape
+    first_shape = shapes[0]
+    first_shape_pre = first_shape[:axis]
+    first_shape_post = first_shape[axis+1:]
+
+    if any(shape[:axis] != first_shape_pre or
+           shape[axis+1:] != first_shape_post for shape in shapes):
+        raise ValueError(
+            'Mismatched array shapes in block along axis {}.'.format(axis))
+
+    shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
+
+    offsets_at_axis = _accumulate(shape_at_axis)
+    slice_prefixes = [(slice(start, end),)
+                      for start, end in zip([0] + offsets_at_axis,
+                                            offsets_at_axis)]
+    return shape, slice_prefixes
+
+
+def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
+    """
+    Returns the shape of the final array, along with a list
+    of slices and a list of arrays that can be used for assignment inside the
+    new array
+
+    Parameters
+    ----------
+    arrays : nested list of arrays
+        The arrays to check
+    max_depth : list of int
+        The number of nested lists
+    result_ndim : int
+        The number of dimensions in thefinal array.
+
+    Returns
+    -------
+    shape : tuple of int
+        The shape that the final array will take on.
+    slices: list of tuple of slices
+        The slices into the full array required for assignment. These are
+        required to be prepended with ``(Ellipsis, )`` to obtain to correct
+        final index.
+    arrays: list of ndarray
+        The data to assign to each slice of the full array
+
+    """
+    if depth < max_depth:
+        shapes, slices, arrays = zip(
+            *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
+              for arr in arrays])
+
+        axis = result_ndim - max_depth + depth
+        shape, slice_prefixes = _concatenate_shapes(shapes, axis)
+
+        # Prepend the slice prefix and flatten the slices
+        slices = [slice_prefix + the_slice
+                  for slice_prefix, inner_slices in zip(slice_prefixes, slices)
+                  for the_slice in inner_slices]
+
+        # Flatten the array list
+        arrays = functools.reduce(operator.add, arrays)
+
+        return shape, slices, arrays
+    else:
+        # We've 'bottomed out' - arrays is either a scalar or an array
+        # type(arrays) is not list
+        # Return the slice and the array inside a list to be consistent with
+        # the recursive case.
+        arr = _atleast_nd(arrays, result_ndim)
+        return arr.shape, [()], [arr]
+
+
+def _block(arrays, max_depth, result_ndim, depth=0):
+    """
+    Internal implementation of block based on repeated concatenation.
+    `arrays` is the argument passed to
+    block. `max_depth` is the depth of nested lists within `arrays` and
+    `result_ndim` is the greatest of the dimensions of the arrays in
+    `arrays` and the depth of the lists in `arrays` (see block docstring
+    for details).
+    """
+    if depth < max_depth:
+        arrs = [_block(arr, max_depth, result_ndim, depth+1)
+                for arr in arrays]
+        return _concatenate(arrs, axis=-(max_depth-depth))
+    else:
+        # We've 'bottomed out' - arrays is either a scalar or an array
+        # type(arrays) is not list
+        return _atleast_nd(arrays, result_ndim)
+
+
+def _block_dispatcher(arrays):
+    # Use type(...) is list to match the behavior of np.block(), which special
+    # cases list specifically rather than allowing for generic iterables or
+    # tuple. Also, we know that list.__array_function__ will never exist.
+    if type(arrays) is list:
+        for subarrays in arrays:
+            yield from _block_dispatcher(subarrays)
+    else:
+        yield arrays
+
+
+@array_function_dispatch(_block_dispatcher)
+def block(arrays):
+    """
+    Assemble an nd-array from nested lists of blocks.
+
+    Blocks in the innermost lists are concatenated (see `concatenate`) along
+    the last dimension (-1), then these are concatenated along the
+    second-last dimension (-2), and so on until the outermost list is reached.
+
+    Blocks can be of any dimension, but will not be broadcasted using the normal
+    rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
+    the same for all blocks. This is primarily useful for working with scalars,
+    and means that code like ``np.block([v, 1])`` is valid, where
+    ``v.ndim == 1``.
+
+    When the nested list is two levels deep, this allows block matrices to be
+    constructed from their components.
+
+    .. versionadded:: 1.13.0
+
+    Parameters
+    ----------
+    arrays : nested list of array_like or scalars (but not tuples)
+        If passed a single ndarray or scalar (a nested list of depth 0), this
+        is returned unmodified (and not copied).
+
+        Elements shapes must match along the appropriate axes (without
+        broadcasting), but leading 1s will be prepended to the shape as
+        necessary to make the dimensions match.
+
+    Returns
+    -------
+    block_array : ndarray
+        The array assembled from the given blocks.
+
+        The dimensionality of the output is equal to the greatest of:
+        * the dimensionality of all the inputs
+        * the depth to which the input list is nested
+
+    Raises
+    ------
+    ValueError
+        * If list depths are mismatched - for instance, ``[[a, b], c]`` is
+          illegal, and should be spelt ``[[a, b], [c]]``
+        * If lists are empty - for instance, ``[[a, b], []]``
+
+    See Also
+    --------
+    concatenate : Join a sequence of arrays along an existing axis.
+    stack : Join a sequence of arrays along a new axis.
+    vstack : Stack arrays in sequence vertically (row wise).
+    hstack : Stack arrays in sequence horizontally (column wise).
+    dstack : Stack arrays in sequence depth wise (along third axis).
+    column_stack : Stack 1-D arrays as columns into a 2-D array.
+    vsplit : Split an array into multiple sub-arrays vertically (row-wise).
+
+    Notes
+    -----
+
+    When called with only scalars, ``np.block`` is equivalent to an ndarray
+    call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
+    ``np.array([[1, 2], [3, 4]])``.
+
+    This function does not enforce that the blocks lie on a fixed grid.
+    ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
+
+        AAAbb
+        AAAbb
+        cccDD
+
+    But is also allowed to produce, for some ``a, b, c, d``::
+
+        AAAbb
+        AAAbb
+        cDDDD
+
+    Since concatenation happens along the last axis first, `block` is _not_
+    capable of producing the following directly::
+
+        AAAbb
+        cccbb
+        cccDD
+
+    Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
+    equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
+
+    Examples
+    --------
+    The most common use of this function is to build a block matrix
+
+    >>> A = np.eye(2) * 2
+    >>> B = np.eye(3) * 3
+    >>> np.block([
+    ...     [A,               np.zeros((2, 3))],
+    ...     [np.ones((3, 2)), B               ]
+    ... ])
+    array([[2., 0., 0., 0., 0.],
+           [0., 2., 0., 0., 0.],
+           [1., 1., 3., 0., 0.],
+           [1., 1., 0., 3., 0.],
+           [1., 1., 0., 0., 3.]])
+
+    With a list of depth 1, `block` can be used as `hstack`
+
+    >>> np.block([1, 2, 3])              # hstack([1, 2, 3])
+    array([1, 2, 3])
+
+    >>> a = np.array([1, 2, 3])
+    >>> b = np.array([4, 5, 6])
+    >>> np.block([a, b, 10])             # hstack([a, b, 10])
+    array([ 1,  2,  3,  4,  5,  6, 10])
+
+    >>> A = np.ones((2, 2), int)
+    >>> B = 2 * A
+    >>> np.block([A, B])                 # hstack([A, B])
+    array([[1, 1, 2, 2],
+           [1, 1, 2, 2]])
+
+    With a list of depth 2, `block` can be used in place of `vstack`:
+
+    >>> a = np.array([1, 2, 3])
+    >>> b = np.array([4, 5, 6])
+    >>> np.block([[a], [b]])             # vstack([a, b])
+    array([[1, 2, 3],
+           [4, 5, 6]])
+
+    >>> A = np.ones((2, 2), int)
+    >>> B = 2 * A
+    >>> np.block([[A], [B]])             # vstack([A, B])
+    array([[1, 1],
+           [1, 1],
+           [2, 2],
+           [2, 2]])
+
+    It can also be used in places of `atleast_1d` and `atleast_2d`
+
+    >>> a = np.array(0)
+    >>> b = np.array([1])
+    >>> np.block([a])                    # atleast_1d(a)
+    array([0])
+    >>> np.block([b])                    # atleast_1d(b)
+    array([1])
+
+    >>> np.block([[a]])                  # atleast_2d(a)
+    array([[0]])
+    >>> np.block([[b]])                  # atleast_2d(b)
+    array([[1]])
+
+
+    """
+    arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
+
+    # It was found through benchmarking that making an array of final size
+    # around 256x256 was faster by straight concatenation on a
+    # i7-7700HQ processor and dual channel ram 2400MHz.
+    # It didn't seem to matter heavily on the dtype used.
+    #
+    # A 2D array using repeated concatenation requires 2 copies of the array.
+    #
+    # The fastest algorithm will depend on the ratio of CPU power to memory
+    # speed.
+    # One can monitor the results of the benchmark
+    # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
+    # to tune this parameter until a C version of the `_block_info_recursion`
+    # algorithm is implemented which would likely be faster than the python
+    # version.
+    if list_ndim * final_size > (2 * 512 * 512):
+        return _block_slicing(arrays, list_ndim, result_ndim)
+    else:
+        return _block_concatenate(arrays, list_ndim, result_ndim)
+
+
+# These helper functions are mostly used for testing.
+# They allow us to write tests that directly call `_block_slicing`
+# or `_block_concatenate` without blocking large arrays to force the wisdom
+# to trigger the desired path.
+def _block_setup(arrays):
+    """
+    Returns
+    (`arrays`, list_ndim, result_ndim, final_size)
+    """
+    bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
+    list_ndim = len(bottom_index)
+    if bottom_index and bottom_index[-1] is None:
+        raise ValueError(
+            'List at {} cannot be empty'.format(
+                _block_format_index(bottom_index)
+            )
+        )
+    result_ndim = max(arr_ndim, list_ndim)
+    return arrays, list_ndim, result_ndim, final_size
+
+
+def _block_slicing(arrays, list_ndim, result_ndim):
+    shape, slices, arrays = _block_info_recursion(
+        arrays, list_ndim, result_ndim)
+    dtype = _nx.result_type(*[arr.dtype for arr in arrays])
+
+    # Test preferring F only in the case that all input arrays are F
+    F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
+    C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
+    order = 'F' if F_order and not C_order else 'C'
+    result = _nx.empty(shape=shape, dtype=dtype, order=order)
+    # Note: In a c implementation, the function
+    # PyArray_CreateMultiSortedStridePerm could be used for more advanced
+    # guessing of the desired order.
+
+    for the_slice, arr in zip(slices, arrays):
+        result[(Ellipsis,) + the_slice] = arr
+    return result
+
+
+def _block_concatenate(arrays, list_ndim, result_ndim):
+    result = _block(arrays, list_ndim, result_ndim)
+    if list_ndim == 0:
+        # Catch an edge case where _block returns a view because
+        # `arrays` is a single numpy array and not a list of numpy arrays.
+        # This might copy scalars or lists twice, but this isn't a likely
+        # usecase for those interested in performance
+        result = result.copy()
+    return result
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/shape_base.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/core/shape_base.pyi
new file mode 100644
index 00000000..10116f1e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/shape_base.pyi
@@ -0,0 +1,123 @@
+from collections.abc import Sequence
+from typing import TypeVar, overload, Any, SupportsIndex
+
+from numpy import generic, _CastingKind
+from numpy._typing import (
+    NDArray,
+    ArrayLike,
+    DTypeLike,
+    _ArrayLike,
+    _DTypeLike,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+__all__: list[str]
+
+@overload
+def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
+@overload
+def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
+@overload
+def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
+
+@overload
+def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
+@overload
+def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
+@overload
+def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
+
+@overload
+def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
+@overload
+def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
+@overload
+def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
+
+@overload
+def vstack(
+    tup: Sequence[_ArrayLike[_SCT]],
+    *,
+    dtype: None = ...,
+    casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def vstack(
+    tup: Sequence[ArrayLike],
+    *,
+    dtype: _DTypeLike[_SCT],
+    casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def vstack(
+    tup: Sequence[ArrayLike],
+    *,
+    dtype: DTypeLike = ...,
+    casting: _CastingKind = ...
+) -> NDArray[Any]: ...
+
+@overload
+def hstack(
+    tup: Sequence[_ArrayLike[_SCT]],
+    *,
+    dtype: None = ...,
+    casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def hstack(
+    tup: Sequence[ArrayLike],
+    *,
+    dtype: _DTypeLike[_SCT],
+    casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def hstack(
+    tup: Sequence[ArrayLike],
+    *,
+    dtype: DTypeLike = ...,
+    casting: _CastingKind = ...
+) -> NDArray[Any]: ...
+
+@overload
+def stack(
+    arrays: Sequence[_ArrayLike[_SCT]],
+    axis: SupportsIndex = ...,
+    out: None = ...,
+    *,
+    dtype: None = ...,
+    casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def stack(
+    arrays: Sequence[ArrayLike],
+    axis: SupportsIndex = ...,
+    out: None = ...,
+    *,
+    dtype: _DTypeLike[_SCT],
+    casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def stack(
+    arrays: Sequence[ArrayLike],
+    axis: SupportsIndex = ...,
+    out: None = ...,
+    *,
+    dtype: DTypeLike = ...,
+    casting: _CastingKind = ...
+) -> NDArray[Any]: ...
+@overload
+def stack(
+    arrays: Sequence[ArrayLike],
+    axis: SupportsIndex = ...,
+    out: _ArrayType = ...,
+    *,
+    dtype: DTypeLike = ...,
+    casting: _CastingKind = ...
+) -> _ArrayType: ...
+
+@overload
+def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def block(arrays: ArrayLike) -> NDArray[Any]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/_locales.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/_locales.py
new file mode 100644
index 00000000..b1dc55a9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/_locales.py
@@ -0,0 +1,74 @@
+"""Provide class for testing in French locale
+
+"""
+import sys
+import locale
+
+import pytest
+
+__ALL__ = ['CommaDecimalPointLocale']
+
+
+def find_comma_decimal_point_locale():
+    """See if platform has a decimal point as comma locale.
+
+    Find a locale that uses a comma instead of a period as the
+    decimal point.
+
+    Returns
+    -------
+    old_locale: str
+        Locale when the function was called.
+    new_locale: {str, None)
+        First French locale found, None if none found.
+
+    """
+    if sys.platform == 'win32':
+        locales = ['FRENCH']
+    else:
+        locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
+
+    old_locale = locale.getlocale(locale.LC_NUMERIC)
+    new_locale = None
+    try:
+        for loc in locales:
+            try:
+                locale.setlocale(locale.LC_NUMERIC, loc)
+                new_locale = loc
+                break
+            except locale.Error:
+                pass
+    finally:
+        locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
+    return old_locale, new_locale
+
+
+class CommaDecimalPointLocale:
+    """Sets LC_NUMERIC to a locale with comma as decimal point.
+
+    Classes derived from this class have setup and teardown methods that run
+    tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
+    the decimal point instead of periods ('.'). On exit the locale is restored
+    to the initial locale. It also serves as context manager with the same
+    effect. If no such locale is available, the test is skipped.
+
+    .. versionadded:: 1.15.0
+
+    """
+    (cur_locale, tst_locale) = find_comma_decimal_point_locale()
+
+    def setup_method(self):
+        if self.tst_locale is None:
+            pytest.skip("No French locale available")
+        locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+    def teardown_method(self):
+        locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
+
+    def __enter__(self):
+        if self.tst_locale is None:
+            pytest.skip("No French locale available")
+        locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+    def __exit__(self, type, value, traceback):
+        locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/checks.pyx b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/checks.pyx
new file mode 100644
index 00000000..c5529ee8
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/checks.pyx
@@ -0,0 +1,35 @@
+#cython: language_level=3
+
+"""
+Functions in this module give python-space wrappers for cython functions
+exposed in numpy/__init__.pxd, so they can be tested in test_cython.py
+"""
+cimport numpy as cnp
+cnp.import_array()
+
+
+def is_td64(obj):
+    return cnp.is_timedelta64_object(obj)
+
+
+def is_dt64(obj):
+    return cnp.is_datetime64_object(obj)
+
+
+def get_dt64_value(obj):
+    return cnp.get_datetime64_value(obj)
+
+
+def get_td64_value(obj):
+    return cnp.get_timedelta64_value(obj)
+
+
+def get_dt64_unit(obj):
+    return cnp.get_datetime64_unit(obj)
+
+
+def is_integer(obj):
+    return isinstance(obj, (cnp.integer, int))
+
+def conv_intp(cnp.intp_t val):
+    return val
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/meson.build b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/meson.build
new file mode 100644
index 00000000..836b74ac
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/meson.build
@@ -0,0 +1,36 @@
+project('checks', 'c', 'cython')
+
+py = import('python').find_installation(pure: false)
+
+cc = meson.get_compiler('c')
+cy = meson.get_compiler('cython')
+
+if not cy.version().version_compare('>=0.29.35')
+  error('tests requires Cython >= 0.29.35')
+endif
+
+npy_include_path = run_command(py, [
+    '-c',
+    'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))'
+    ], check: true).stdout().strip()
+
+npy_path = run_command(py, [
+    '-c',
+    'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))'
+    ], check: true).stdout().strip()
+
+# TODO: This is a hack due to gh-25135, where cython may not find the right
+#       __init__.pyd file.
+add_project_arguments('-I', npy_path, language : 'cython')
+
+py.extension_module(
+    'checks',
+    'checks.pyx',
+    install: false,
+    c_args: [
+      '-DNPY_NO_DEPRECATED_API=0',  # Cython still uses old NumPy C API
+      # Require 1.25+ to test datetime additions
+      '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION',
+    ],
+    include_directories: [npy_include_path],
+)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.py
new file mode 100644
index 00000000..6e34aa77
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.py
@@ -0,0 +1,25 @@
+"""
+Provide python-space access to the functions exposed in numpy/__init__.pxd
+for testing.
+"""
+
+import numpy as np
+from distutils.core import setup
+from Cython.Build import cythonize
+from setuptools.extension import Extension
+import os
+
+macros = [("NPY_NO_DEPRECATED_API", 0)]
+
+checks = Extension(
+    "checks",
+    sources=[os.path.join('.', "checks.pyx")],
+    include_dirs=[np.get_include()],
+    define_macros=macros,
+)
+
+extensions = [checks]
+
+setup(
+    ext_modules=cythonize(extensions)
+)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/limited_api.c b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/limited_api.c
new file mode 100644
index 00000000..698c54c5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/limited_api.c
@@ -0,0 +1,17 @@
+#define Py_LIMITED_API 0x03060000
+
+#include 
+#include 
+#include 
+
+static PyModuleDef moduledef = {
+    .m_base = PyModuleDef_HEAD_INIT,
+    .m_name = "limited_api"
+};
+
+PyMODINIT_FUNC PyInit_limited_api(void)
+{
+    import_array();
+    import_umath();
+    return PyModule_Create(&moduledef);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py
new file mode 100644
index 00000000..18747dc8
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py
@@ -0,0 +1,22 @@
+"""
+Build an example package using the limited Python C API.
+"""
+
+import numpy as np
+from setuptools import setup, Extension
+import os
+
+macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")]
+
+limited_api = Extension(
+    "limited_api",
+    sources=[os.path.join('.', "limited_api.c")],
+    include_dirs=[np.get_include()],
+    define_macros=macros,
+)
+
+extensions = [limited_api]
+
+setup(
+    ext_modules=extensions
+)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.py
new file mode 100644
index 00000000..10b87e05
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.py
@@ -0,0 +1,88 @@
+"""
+Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
+"""
+
+import pickle
+
+import pytest
+import numpy as np
+
+_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
+_UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError
+
+class TestArrayMemoryError:
+    def test_pickling(self):
+        """ Test that _ArrayMemoryError can be pickled """
+        error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
+        res = pickle.loads(pickle.dumps(error))
+        assert res._total_size == error._total_size
+
+    def test_str(self):
+        e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
+        str(e)  # not crashing is enough
+
+    # testing these properties is easier than testing the full string repr
+    def test__size_to_string(self):
+        """ Test e._size_to_string """
+        f = _ArrayMemoryError._size_to_string
+        Ki = 1024
+        assert f(0) == '0 bytes'
+        assert f(1) == '1 bytes'
+        assert f(1023) == '1023 bytes'
+        assert f(Ki) == '1.00 KiB'
+        assert f(Ki+1) == '1.00 KiB'
+        assert f(10*Ki) == '10.0 KiB'
+        assert f(int(999.4*Ki)) == '999. KiB'
+        assert f(int(1023.4*Ki)) == '1023. KiB'
+        assert f(int(1023.5*Ki)) == '1.00 MiB'
+        assert f(Ki*Ki) == '1.00 MiB'
+
+        # 1023.9999 Mib should round to 1 GiB
+        assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
+        assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
+        # larger than sys.maxsize, adding larger prefixes isn't going to help
+        # anyway.
+        assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
+
+    def test__total_size(self):
+        """ Test e._total_size """
+        e = _ArrayMemoryError((1,), np.dtype(np.uint8))
+        assert e._total_size == 1
+
+        e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
+        assert e._total_size == 1024
+
+
+class TestUFuncNoLoopError:
+    def test_pickling(self):
+        """ Test that _UFuncNoLoopError can be pickled """
+        assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
+
+
+@pytest.mark.parametrize("args", [
+    (2, 1, None),
+    (2, 1, "test_prefix"),
+    ("test message",),
+])
+class TestAxisError:
+    def test_attr(self, args):
+        """Validate attribute types."""
+        exc = np.AxisError(*args)
+        if len(args) == 1:
+            assert exc.axis is None
+            assert exc.ndim is None
+        else:
+            axis, ndim, *_ = args
+            assert exc.axis == axis
+            assert exc.ndim == ndim
+
+    def test_pickling(self, args):
+        """Test that `AxisError` can be pickled."""
+        exc = np.AxisError(*args)
+        exc2 = pickle.loads(pickle.dumps(exc))
+
+        assert type(exc) is type(exc2)
+        for name in ("axis", "ndim", "args"):
+            attr1 = getattr(exc, name)
+            attr2 = getattr(exc2, name)
+            assert attr1 == attr2, name
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_abc.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_abc.py
new file mode 100644
index 00000000..8b12d07a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_abc.py
@@ -0,0 +1,54 @@
+from numpy.testing import assert_
+
+import numbers
+
+import numpy as np
+from numpy.core.numerictypes import sctypes
+
+class TestABC:
+    def test_abstract(self):
+        assert_(issubclass(np.number, numbers.Number))
+
+        assert_(issubclass(np.inexact, numbers.Complex))
+        assert_(issubclass(np.complexfloating, numbers.Complex))
+        assert_(issubclass(np.floating, numbers.Real))
+
+        assert_(issubclass(np.integer, numbers.Integral))
+        assert_(issubclass(np.signedinteger, numbers.Integral))
+        assert_(issubclass(np.unsignedinteger, numbers.Integral))
+
+    def test_floats(self):
+        for t in sctypes['float']:
+            assert_(isinstance(t(), numbers.Real),
+                    f"{t.__name__} is not instance of Real")
+            assert_(issubclass(t, numbers.Real),
+                    f"{t.__name__} is not subclass of Real")
+            assert_(not isinstance(t(), numbers.Rational),
+                    f"{t.__name__} is instance of Rational")
+            assert_(not issubclass(t, numbers.Rational),
+                    f"{t.__name__} is subclass of Rational")
+
+    def test_complex(self):
+        for t in sctypes['complex']:
+            assert_(isinstance(t(), numbers.Complex),
+                    f"{t.__name__} is not instance of Complex")
+            assert_(issubclass(t, numbers.Complex),
+                    f"{t.__name__} is not subclass of Complex")
+            assert_(not isinstance(t(), numbers.Real),
+                    f"{t.__name__} is instance of Real")
+            assert_(not issubclass(t, numbers.Real),
+                    f"{t.__name__} is subclass of Real")
+
+    def test_int(self):
+        for t in sctypes['int']:
+            assert_(isinstance(t(), numbers.Integral),
+                    f"{t.__name__} is not instance of Integral")
+            assert_(issubclass(t, numbers.Integral),
+                    f"{t.__name__} is not subclass of Integral")
+
+    def test_uint(self):
+        for t in sctypes['uint']:
+            assert_(isinstance(t(), numbers.Integral),
+                    f"{t.__name__} is not instance of Integral")
+            assert_(issubclass(t, numbers.Integral),
+                    f"{t.__name__} is not subclass of Integral")
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_api.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_api.py
new file mode 100644
index 00000000..0d922869
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_api.py
@@ -0,0 +1,615 @@
+import sys
+
+import numpy as np
+from numpy.core._rational_tests import rational
+import pytest
+from numpy.testing import (
+     assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
+     HAS_REFCOUNT
+    )
+
+
+def test_array_array():
+    tobj = type(object)
+    ones11 = np.ones((1, 1), np.float64)
+    tndarray = type(ones11)
+    # Test is_ndarray
+    assert_equal(np.array(ones11, dtype=np.float64), ones11)
+    if HAS_REFCOUNT:
+        old_refcount = sys.getrefcount(tndarray)
+        np.array(ones11)
+        assert_equal(old_refcount, sys.getrefcount(tndarray))
+
+    # test None
+    assert_equal(np.array(None, dtype=np.float64),
+                 np.array(np.nan, dtype=np.float64))
+    if HAS_REFCOUNT:
+        old_refcount = sys.getrefcount(tobj)
+        np.array(None, dtype=np.float64)
+        assert_equal(old_refcount, sys.getrefcount(tobj))
+
+    # test scalar
+    assert_equal(np.array(1.0, dtype=np.float64),
+                 np.ones((), dtype=np.float64))
+    if HAS_REFCOUNT:
+        old_refcount = sys.getrefcount(np.float64)
+        np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
+        assert_equal(old_refcount, sys.getrefcount(np.float64))
+
+    # test string
+    S2 = np.dtype((bytes, 2))
+    S3 = np.dtype((bytes, 3))
+    S5 = np.dtype((bytes, 5))
+    assert_equal(np.array(b"1.0", dtype=np.float64),
+                 np.ones((), dtype=np.float64))
+    assert_equal(np.array(b"1.0").dtype, S3)
+    assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
+    assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
+    assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
+
+    # test string
+    U2 = np.dtype((str, 2))
+    U3 = np.dtype((str, 3))
+    U5 = np.dtype((str, 5))
+    assert_equal(np.array("1.0", dtype=np.float64),
+                 np.ones((), dtype=np.float64))
+    assert_equal(np.array("1.0").dtype, U3)
+    assert_equal(np.array("1.0", dtype=str).dtype, U3)
+    assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
+    assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
+
+    builtins = getattr(__builtins__, '__dict__', __builtins__)
+    assert_(hasattr(builtins, 'get'))
+
+    # test memoryview
+    dat = np.array(memoryview(b'1.0'), dtype=np.float64)
+    assert_equal(dat, [49.0, 46.0, 48.0])
+    assert_(dat.dtype.type is np.float64)
+
+    dat = np.array(memoryview(b'1.0'))
+    assert_equal(dat, [49, 46, 48])
+    assert_(dat.dtype.type is np.uint8)
+
+    # test array interface
+    a = np.array(100.0, dtype=np.float64)
+    o = type("o", (object,),
+             dict(__array_interface__=a.__array_interface__))
+    assert_equal(np.array(o, dtype=np.float64), a)
+
+    # test array_struct interface
+    a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
+                 dtype=[('f0', int), ('f1', float), ('f2', str)])
+    o = type("o", (object,),
+             dict(__array_struct__=a.__array_struct__))
+    ## wasn't what I expected... is np.array(o) supposed to equal a ?
+    ## instead we get a array([...], dtype=">V18")
+    assert_equal(bytes(np.array(o).data), bytes(a.data))
+
+    # test array
+    o = type("o", (object,),
+             dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
+    assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
+
+    # test recursion
+    nested = 1.5
+    for i in range(np.MAXDIMS):
+        nested = [nested]
+
+    # no error
+    np.array(nested)
+
+    # Exceeds recursion limit
+    assert_raises(ValueError, np.array, [nested], dtype=np.float64)
+
+    # Try with lists...
+    # float32
+    assert_equal(np.array([None] * 10, dtype=np.float32),
+                 np.full((10,), np.nan, dtype=np.float32))
+    assert_equal(np.array([[None]] * 10, dtype=np.float32),
+                 np.full((10, 1), np.nan, dtype=np.float32))
+    assert_equal(np.array([[None] * 10], dtype=np.float32),
+                 np.full((1, 10), np.nan, dtype=np.float32))
+    assert_equal(np.array([[None] * 10] * 10, dtype=np.float32),
+                 np.full((10, 10), np.nan, dtype=np.float32))
+    # float64
+    assert_equal(np.array([None] * 10, dtype=np.float64),
+                 np.full((10,), np.nan, dtype=np.float64))
+    assert_equal(np.array([[None]] * 10, dtype=np.float64),
+                 np.full((10, 1), np.nan, dtype=np.float64))
+    assert_equal(np.array([[None] * 10], dtype=np.float64),
+                 np.full((1, 10), np.nan, dtype=np.float64))
+    assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
+                 np.full((10, 10), np.nan, dtype=np.float64))
+
+    assert_equal(np.array([1.0] * 10, dtype=np.float64),
+                 np.ones((10,), dtype=np.float64))
+    assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
+                 np.ones((10, 1), dtype=np.float64))
+    assert_equal(np.array([[1.0] * 10], dtype=np.float64),
+                 np.ones((1, 10), dtype=np.float64))
+    assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
+                 np.ones((10, 10), dtype=np.float64))
+
+    # Try with tuples
+    assert_equal(np.array((None,) * 10, dtype=np.float64),
+                 np.full((10,), np.nan, dtype=np.float64))
+    assert_equal(np.array([(None,)] * 10, dtype=np.float64),
+                 np.full((10, 1), np.nan, dtype=np.float64))
+    assert_equal(np.array([(None,) * 10], dtype=np.float64),
+                 np.full((1, 10), np.nan, dtype=np.float64))
+    assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
+                 np.full((10, 10), np.nan, dtype=np.float64))
+
+    assert_equal(np.array((1.0,) * 10, dtype=np.float64),
+                 np.ones((10,), dtype=np.float64))
+    assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
+                 np.ones((10, 1), dtype=np.float64))
+    assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
+                 np.ones((1, 10), dtype=np.float64))
+    assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
+                 np.ones((10, 10), dtype=np.float64))
+
+@pytest.mark.parametrize("array", [True, False])
+def test_array_impossible_casts(array):
+    # All builtin types can be forcibly cast, at least theoretically,
+    # but user dtypes cannot necessarily.
+    rt = rational(1, 2)
+    if array:
+        rt = np.array(rt)
+    with assert_raises(TypeError):
+        np.array(rt, dtype="M8")
+
+
+# TODO: remove when fastCopyAndTranspose deprecation expires
+@pytest.mark.parametrize("a",
+    (
+        np.array(2),  # 0D array
+        np.array([3, 2, 7, 0]),  # 1D array
+        np.arange(6).reshape(2, 3)  # 2D array
+    ),
+)
+def test_fastCopyAndTranspose(a):
+    with pytest.deprecated_call():
+        b = np.fastCopyAndTranspose(a)
+        assert_equal(b, a.T)
+        assert b.flags.owndata
+
+
+def test_array_astype():
+    a = np.arange(6, dtype='f4').reshape(2, 3)
+    # Default behavior: allows unsafe casts, keeps memory layout,
+    #                   always copies.
+    b = a.astype('i4')
+    assert_equal(a, b)
+    assert_equal(b.dtype, np.dtype('i4'))
+    assert_equal(a.strides, b.strides)
+    b = a.T.astype('i4')
+    assert_equal(a.T, b)
+    assert_equal(b.dtype, np.dtype('i4'))
+    assert_equal(a.T.strides, b.strides)
+    b = a.astype('f4')
+    assert_equal(a, b)
+    assert_(not (a is b))
+
+    # copy=False parameter can sometimes skip a copy
+    b = a.astype('f4', copy=False)
+    assert_(a is b)
+
+    # order parameter allows overriding of the memory layout,
+    # forcing a copy if the layout is wrong
+    b = a.astype('f4', order='F', copy=False)
+    assert_equal(a, b)
+    assert_(not (a is b))
+    assert_(b.flags.f_contiguous)
+
+    b = a.astype('f4', order='C', copy=False)
+    assert_equal(a, b)
+    assert_(a is b)
+    assert_(b.flags.c_contiguous)
+
+    # casting parameter allows catching bad casts
+    b = a.astype('c8', casting='safe')
+    assert_equal(a, b)
+    assert_equal(b.dtype, np.dtype('c8'))
+
+    assert_raises(TypeError, a.astype, 'i4', casting='safe')
+
+    # subok=False passes through a non-subclassed array
+    b = a.astype('f4', subok=0, copy=False)
+    assert_(a is b)
+
+    class MyNDArray(np.ndarray):
+        pass
+
+    a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
+
+    # subok=True passes through a subclass
+    b = a.astype('f4', subok=True, copy=False)
+    assert_(a is b)
+
+    # subok=True is default, and creates a subtype on a cast
+    b = a.astype('i4', copy=False)
+    assert_equal(a, b)
+    assert_equal(type(b), MyNDArray)
+
+    # subok=False never returns a subclass
+    b = a.astype('f4', subok=False, copy=False)
+    assert_equal(a, b)
+    assert_(not (a is b))
+    assert_(type(b) is not MyNDArray)
+
+    # Make sure converting from string object to fixed length string
+    # does not truncate.
+    a = np.array([b'a'*100], dtype='O')
+    b = a.astype('S')
+    assert_equal(a, b)
+    assert_equal(b.dtype, np.dtype('S100'))
+    a = np.array(['a'*100], dtype='O')
+    b = a.astype('U')
+    assert_equal(a, b)
+    assert_equal(b.dtype, np.dtype('U100'))
+
+    # Same test as above but for strings shorter than 64 characters
+    a = np.array([b'a'*10], dtype='O')
+    b = a.astype('S')
+    assert_equal(a, b)
+    assert_equal(b.dtype, np.dtype('S10'))
+    a = np.array(['a'*10], dtype='O')
+    b = a.astype('U')
+    assert_equal(a, b)
+    assert_equal(b.dtype, np.dtype('U10'))
+
+    a = np.array(123456789012345678901234567890, dtype='O').astype('S')
+    assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+    a = np.array(123456789012345678901234567890, dtype='O').astype('U')
+    assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
+
+    a = np.array([123456789012345678901234567890], dtype='O').astype('S')
+    assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+    a = np.array([123456789012345678901234567890], dtype='O').astype('U')
+    assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
+
+    a = np.array(123456789012345678901234567890, dtype='S')
+    assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+    a = np.array(123456789012345678901234567890, dtype='U')
+    assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
+
+    a = np.array('a\u0140', dtype='U')
+    b = np.ndarray(buffer=a, dtype='uint32', shape=2)
+    assert_(b.size == 2)
+
+    a = np.array([1000], dtype='i4')
+    assert_raises(TypeError, a.astype, 'S1', casting='safe')
+
+    a = np.array(1000, dtype='i4')
+    assert_raises(TypeError, a.astype, 'U1', casting='safe')
+
+    # gh-24023
+    assert_raises(TypeError, a.astype)
+
+@pytest.mark.parametrize("dt", ["S", "U"])
+def test_array_astype_to_string_discovery_empty(dt):
+    # See also gh-19085
+    arr = np.array([""], dtype=object)
+    # Note, the itemsize is the `0 -> 1` logic, which should change.
+    # The important part the test is rather that it does not error.
+    assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
+
+    # check the same thing for `np.can_cast` (since it accepts arrays)
+    assert np.can_cast(arr, dt, casting="unsafe")
+    assert not np.can_cast(arr, dt, casting="same_kind")
+    # as well as for the object as a descriptor:
+    assert np.can_cast("O", dt, casting="unsafe")
+
+@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
+def test_array_astype_to_void(dt):
+    dt = np.dtype(dt)
+    arr = np.array([], dtype=dt)
+    assert arr.astype("V").dtype.itemsize == dt.itemsize
+
+def test_object_array_astype_to_void():
+    # This is different to `test_array_astype_to_void` as object arrays
+    # are inspected.  The default void is "V8" (8 is the length of double)
+    arr = np.array([], dtype="O").astype("V")
+    assert arr.dtype == "V8"
+
+@pytest.mark.parametrize("t",
+    np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
+)
+def test_array_astype_warning(t):
+    # test ComplexWarning when casting from complex to float or int
+    a = np.array(10, dtype=np.complex_)
+    assert_warns(np.ComplexWarning, a.astype, t)
+
+@pytest.mark.parametrize(["dtype", "out_dtype"],
+        [(np.bytes_, np.bool_),
+         (np.str_, np.bool_),
+         (np.dtype("S10,S9"), np.dtype("?,?"))])
+def test_string_to_boolean_cast(dtype, out_dtype):
+    """
+    Currently, for `astype` strings are cast to booleans effectively by
+    calling `bool(int(string)`. This is not consistent (see gh-9875) and
+    will eventually be deprecated.
+    """
+    arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
+    expected = np.array([True, True, False, False], dtype=out_dtype)
+    assert_array_equal(arr.astype(out_dtype), expected)
+
+@pytest.mark.parametrize(["dtype", "out_dtype"],
+        [(np.bytes_, np.bool_),
+         (np.str_, np.bool_),
+         (np.dtype("S10,S9"), np.dtype("?,?"))])
+def test_string_to_boolean_cast_errors(dtype, out_dtype):
+    """
+    These currently error out, since cast to integers fails, but should not
+    error out in the future.
+    """
+    for invalid in ["False", "True", "", "\0", "non-empty"]:
+        arr = np.array([invalid], dtype=dtype)
+        with assert_raises(ValueError):
+            arr.astype(out_dtype)
+
+@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
+@pytest.mark.parametrize("scalar_type",
+        [np.complex64, np.complex128, np.clongdouble])
+def test_string_to_complex_cast(str_type, scalar_type):
+    value = scalar_type(b"1+3j")
+    assert scalar_type(value) == 1+3j
+    assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
+    assert np.array(value).astype(scalar_type)[()] == 1+3j
+    arr = np.zeros(1, dtype=scalar_type)
+    arr[0] = value
+    assert arr[0] == 1+3j
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+def test_none_to_nan_cast(dtype):
+    # Note that at the time of writing this test, the scalar constructors
+    # reject None
+    arr = np.zeros(1, dtype=dtype)
+    arr[0] = None
+    assert np.isnan(arr)[0]
+    assert np.isnan(np.array(None, dtype=dtype))[()]
+    assert np.isnan(np.array([None], dtype=dtype))[0]
+    assert np.isnan(np.array(None).astype(dtype))[()]
+
+def test_copyto_fromscalar():
+    a = np.arange(6, dtype='f4').reshape(2, 3)
+
+    # Simple copy
+    np.copyto(a, 1.5)
+    assert_equal(a, 1.5)
+    np.copyto(a.T, 2.5)
+    assert_equal(a, 2.5)
+
+    # Where-masked copy
+    mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
+    np.copyto(a, 3.5, where=mask)
+    assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
+    mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
+    np.copyto(a.T, 4.5, where=mask)
+    assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
+
+def test_copyto():
+    a = np.arange(6, dtype='i4').reshape(2, 3)
+
+    # Simple copy
+    np.copyto(a, [[3, 1, 5], [6, 2, 1]])
+    assert_equal(a, [[3, 1, 5], [6, 2, 1]])
+
+    # Overlapping copy should work
+    np.copyto(a[:, :2], a[::-1, 1::-1])
+    assert_equal(a, [[2, 6, 5], [1, 3, 1]])
+
+    # Defaults to 'same_kind' casting
+    assert_raises(TypeError, np.copyto, a, 1.5)
+
+    # Force a copy with 'unsafe' casting, truncating 1.5 to 1
+    np.copyto(a, 1.5, casting='unsafe')
+    assert_equal(a, 1)
+
+    # Copying with a mask
+    np.copyto(a, 3, where=[True, False, True])
+    assert_equal(a, [[3, 1, 3], [3, 1, 3]])
+
+    # Casting rule still applies with a mask
+    assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
+
+    # Lists of integer 0's and 1's is ok too
+    np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
+    assert_equal(a, [[3, 4, 4], [4, 1, 3]])
+
+    # Overlapping copy with mask should work
+    np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
+    assert_equal(a, [[3, 4, 4], [4, 3, 3]])
+
+    # 'dst' must be an array
+    assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
+
+def test_copyto_permut():
+    # test explicit overflow case
+    pad = 500
+    l = [True] * pad + [True, True, True, True]
+    r = np.zeros(len(l)-pad)
+    d = np.ones(len(l)-pad)
+    mask = np.array(l)[pad:]
+    np.copyto(r, d, where=mask[::-1])
+
+    # test all permutation of possible masks, 9 should be sufficient for
+    # current 4 byte unrolled code
+    power = 9
+    d = np.ones(power)
+    for i in range(2**power):
+        r = np.zeros(power)
+        l = [(i & x) != 0 for x in range(power)]
+        mask = np.array(l)
+        np.copyto(r, d, where=mask)
+        assert_array_equal(r == 1, l)
+        assert_equal(r.sum(), sum(l))
+
+        r = np.zeros(power)
+        np.copyto(r, d, where=mask[::-1])
+        assert_array_equal(r == 1, l[::-1])
+        assert_equal(r.sum(), sum(l))
+
+        r = np.zeros(power)
+        np.copyto(r[::2], d[::2], where=mask[::2])
+        assert_array_equal(r[::2] == 1, l[::2])
+        assert_equal(r[::2].sum(), sum(l[::2]))
+
+        r = np.zeros(power)
+        np.copyto(r[::2], d[::2], where=mask[::-2])
+        assert_array_equal(r[::2] == 1, l[::-2])
+        assert_equal(r[::2].sum(), sum(l[::-2]))
+
+        for c in [0xFF, 0x7F, 0x02, 0x10]:
+            r = np.zeros(power)
+            mask = np.array(l)
+            imask = np.array(l).view(np.uint8)
+            imask[mask != 0] = c
+            np.copyto(r, d, where=mask)
+            assert_array_equal(r == 1, l)
+            assert_equal(r.sum(), sum(l))
+
+    r = np.zeros(power)
+    np.copyto(r, d, where=True)
+    assert_equal(r.sum(), r.size)
+    r = np.ones(power)
+    d = np.zeros(power)
+    np.copyto(r, d, where=False)
+    assert_equal(r.sum(), r.size)
+
+def test_copy_order():
+    a = np.arange(24).reshape(2, 1, 3, 4)
+    b = a.copy(order='F')
+    c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
+
+    def check_copy_result(x, y, ccontig, fcontig, strides=False):
+        assert_(not (x is y))
+        assert_equal(x, y)
+        assert_equal(res.flags.c_contiguous, ccontig)
+        assert_equal(res.flags.f_contiguous, fcontig)
+
+    # Validate the initial state of a, b, and c
+    assert_(a.flags.c_contiguous)
+    assert_(not a.flags.f_contiguous)
+    assert_(not b.flags.c_contiguous)
+    assert_(b.flags.f_contiguous)
+    assert_(not c.flags.c_contiguous)
+    assert_(not c.flags.f_contiguous)
+
+    # Copy with order='C'
+    res = a.copy(order='C')
+    check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+    res = b.copy(order='C')
+    check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
+    res = c.copy(order='C')
+    check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
+    res = np.copy(a, order='C')
+    check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+    res = np.copy(b, order='C')
+    check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
+    res = np.copy(c, order='C')
+    check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
+
+    # Copy with order='F'
+    res = a.copy(order='F')
+    check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
+    res = b.copy(order='F')
+    check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+    res = c.copy(order='F')
+    check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
+    res = np.copy(a, order='F')
+    check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
+    res = np.copy(b, order='F')
+    check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+    res = np.copy(c, order='F')
+    check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
+
+    # Copy with order='K'
+    res = a.copy(order='K')
+    check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+    res = b.copy(order='K')
+    check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+    res = c.copy(order='K')
+    check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
+    res = np.copy(a, order='K')
+    check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+    res = np.copy(b, order='K')
+    check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+    res = np.copy(c, order='K')
+    check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
+
+def test_contiguous_flags():
+    a = np.ones((4, 4, 1))[::2,:,:]
+    a.strides = a.strides[:2] + (-123,)
+    b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
+
+    def check_contig(a, ccontig, fcontig):
+        assert_(a.flags.c_contiguous == ccontig)
+        assert_(a.flags.f_contiguous == fcontig)
+
+    # Check if new arrays are correct:
+    check_contig(a, False, False)
+    check_contig(b, False, False)
+    check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
+    check_contig(np.array([[[1], [2]]], order='F'), True, True)
+    check_contig(np.empty((2, 2)), True, False)
+    check_contig(np.empty((2, 2), order='F'), False, True)
+
+    # Check that np.array creates correct contiguous flags:
+    check_contig(np.array(a, copy=False), False, False)
+    check_contig(np.array(a, copy=False, order='C'), True, False)
+    check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
+
+    # Check slicing update of flags and :
+    check_contig(a[0], True, True)
+    check_contig(a[None, ::4, ..., None], True, True)
+    check_contig(b[0, 0, ...], False, True)
+    check_contig(b[:, :, 0:0, :, :], True, True)
+
+    # Test ravel and squeeze.
+    check_contig(a.ravel(), True, True)
+    check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
+
+def test_broadcast_arrays():
+    # Test user defined dtypes
+    a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
+    b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
+    result = np.broadcast_arrays(a, b)
+    assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
+    assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
+
+@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
+        [((2, 2), [5.0,  6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
+         ((3, 2), [1.0,  2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0,  2.0]]))])
+def test_full_from_list(shape, fill_value, expected_output):
+    output = np.full(shape, fill_value)
+    assert_equal(output, expected_output)
+
+def test_astype_copyflag():
+    # test the various copyflag options
+    arr = np.arange(10, dtype=np.intp)
+
+    res_true = arr.astype(np.intp, copy=True)
+    assert not np.may_share_memory(arr, res_true)
+    res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS)
+    assert not np.may_share_memory(arr, res_always)
+
+    res_false = arr.astype(np.intp, copy=False)
+    # `res_false is arr` currently, but check `may_share_memory`.
+    assert np.may_share_memory(arr, res_false)
+    res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED)
+    # `res_if_needed is arr` currently, but check `may_share_memory`.
+    assert np.may_share_memory(arr, res_if_needed)
+
+    res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER)
+    assert np.may_share_memory(arr, res_never)
+
+    # Simple tests for when a copy is necessary:
+    res_false = arr.astype(np.float64, copy=False)
+    assert_array_equal(res_false, arr)
+    res_if_needed = arr.astype(np.float64, 
+                               copy=np._CopyMode.IF_NEEDED)
+    assert_array_equal(res_if_needed, arr)
+    assert_raises(ValueError, arr.astype, np.float64,
+                  copy=np._CopyMode.NEVER)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_argparse.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_argparse.py
new file mode 100644
index 00000000..fae22702
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_argparse.py
@@ -0,0 +1,62 @@
+"""
+Tests for the private NumPy argument parsing functionality.
+They mainly exists to ensure good test coverage without having to try the
+weirder cases on actual numpy functions but test them in one place.
+
+The test function is defined in C to be equivalent to (errors may not always
+match exactly, and could be adjusted):
+
+    def func(arg1, /, arg2, *, arg3):
+        i = integer(arg1)  # reproducing the 'i' parsing in Python.
+        return None
+"""
+
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import argparse_example_function as func
+
+
+def test_invalid_integers():
+    with pytest.raises(TypeError,
+            match="integer argument expected, got float"):
+        func(1.)
+    with pytest.raises(OverflowError):
+        func(2**100)
+
+
+def test_missing_arguments():
+    with pytest.raises(TypeError,
+            match="missing required positional argument 0"):
+        func()
+    with pytest.raises(TypeError,
+            match="missing required positional argument 0"):
+        func(arg2=1, arg3=4)
+    with pytest.raises(TypeError,
+            match=r"missing required argument \'arg2\' \(pos 1\)"):
+        func(1, arg3=5)
+
+
+def test_too_many_positional():
+    # the second argument is positional but can be passed as keyword.
+    with pytest.raises(TypeError,
+            match="takes from 2 to 3 positional arguments but 4 were given"):
+        func(1, 2, 3, 4)
+
+
+def test_multiple_values():
+    with pytest.raises(TypeError,
+            match=r"given by name \('arg2'\) and position \(position 1\)"):
+        func(1, 2, arg2=3)
+
+
+def test_string_fallbacks():
+    # We can (currently?) use numpy strings to test the "slow" fallbacks
+    # that should normally not be taken due to string interning.
+    arg2 = np.str_("arg2")
+    missing_arg = np.str_("missing_arg")
+    func(1, **{arg2: 3})
+    with pytest.raises(TypeError,
+            match="got an unexpected keyword argument 'missing_arg'"):
+        func(2, **{missing_arg: 3})
+
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.py
new file mode 100644
index 00000000..629bfce5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.py
@@ -0,0 +1,898 @@
+"""
+Tests for array coercion, mainly through testing `np.array` results directly.
+Note that other such tests exist, e.g., in `test_api.py` and many corner-cases
+are tested (sometimes indirectly) elsewhere.
+"""
+
+from itertools import permutations, product
+
+import pytest
+from pytest import param
+
+import numpy as np
+from numpy.core._rational_tests import rational
+from numpy.core._multiarray_umath import _discover_array_parameters
+
+from numpy.testing import (
+    assert_array_equal, assert_warns, IS_PYPY)
+
+
+def arraylikes():
+    """
+    Generator for functions converting an array into various array-likes.
+    If full is True (default) it includes array-likes not capable of handling
+    all dtypes.
+    """
+    # base array:
+    def ndarray(a):
+        return a
+
+    yield param(ndarray, id="ndarray")
+
+    # subclass:
+    class MyArr(np.ndarray):
+        pass
+
+    def subclass(a):
+        return a.view(MyArr)
+
+    yield subclass
+
+    class _SequenceLike():
+        # Older NumPy versions, sometimes cared whether a protocol array was
+        # also _SequenceLike.  This shouldn't matter, but keep it for now
+        # for __array__ and not the others.
+        def __len__(self):
+            raise TypeError
+
+        def __getitem__(self):
+            raise TypeError
+
+    # Array-interface
+    class ArrayDunder(_SequenceLike):
+        def __init__(self, a):
+            self.a = a
+
+        def __array__(self, dtype=None):
+            return self.a
+
+    yield param(ArrayDunder, id="__array__")
+
+    # memory-view
+    yield param(memoryview, id="memoryview")
+
+    # Array-interface
+    class ArrayInterface:
+        def __init__(self, a):
+            self.a = a  # need to hold on to keep interface valid
+            self.__array_interface__ = a.__array_interface__
+
+    yield param(ArrayInterface, id="__array_interface__")
+
+    # Array-Struct
+    class ArrayStruct:
+        def __init__(self, a):
+            self.a = a  # need to hold on to keep struct valid
+            self.__array_struct__ = a.__array_struct__
+
+    yield param(ArrayStruct, id="__array_struct__")
+
+
+def scalar_instances(times=True, extended_precision=True, user_dtype=True):
+    # Hard-coded list of scalar instances.
+    # Floats:
+    yield param(np.sqrt(np.float16(5)), id="float16")
+    yield param(np.sqrt(np.float32(5)), id="float32")
+    yield param(np.sqrt(np.float64(5)), id="float64")
+    if extended_precision:
+        yield param(np.sqrt(np.longdouble(5)), id="longdouble")
+
+    # Complex:
+    yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
+    yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
+    if extended_precision:
+        yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
+
+    # Bool:
+    # XFAIL: Bool should be added, but has some bad properties when it
+    # comes to strings, see also gh-9875
+    # yield param(np.bool_(0), id="bool")
+
+    # Integers:
+    yield param(np.int8(2), id="int8")
+    yield param(np.int16(2), id="int16")
+    yield param(np.int32(2), id="int32")
+    yield param(np.int64(2), id="int64")
+
+    yield param(np.uint8(2), id="uint8")
+    yield param(np.uint16(2), id="uint16")
+    yield param(np.uint32(2), id="uint32")
+    yield param(np.uint64(2), id="uint64")
+
+    # Rational:
+    if user_dtype:
+        yield param(rational(1, 2), id="rational")
+
+    # Cannot create a structured void scalar directly:
+    structured = np.array([(1, 3)], "i,i")[0]
+    assert isinstance(structured, np.void)
+    assert structured.dtype == np.dtype("i,i")
+    yield param(structured, id="structured")
+
+    if times:
+        # Datetimes and timedelta
+        yield param(np.timedelta64(2), id="timedelta64[generic]")
+        yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
+        yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
+
+        yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
+        yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
+
+    # Strings and unstructured void:
+    yield param(np.bytes_(b"1234"), id="bytes")
+    yield param(np.str_("2345"), id="unicode")
+    yield param(np.void(b"4321"), id="unstructured_void")
+
+
+def is_parametric_dtype(dtype):
+    """Returns True if the dtype is a parametric legacy dtype (itemsize
+    is 0, or a datetime without units)
+    """
+    if dtype.itemsize == 0:
+        return True
+    if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
+        if dtype.name.endswith("64"):
+            # Generic time units
+            return True
+    return False
+
+
+class TestStringDiscovery:
+    @pytest.mark.parametrize("obj",
+            [object(), 1.2, 10**43, None, "string"],
+            ids=["object", "1.2", "10**43", "None", "string"])
+    def test_basic_stringlength(self, obj):
+        length = len(str(obj))
+        expected = np.dtype(f"S{length}")
+
+        assert np.array(obj, dtype="S").dtype == expected
+        assert np.array([obj], dtype="S").dtype == expected
+
+        # A nested array is also discovered correctly
+        arr = np.array(obj, dtype="O")
+        assert np.array(arr, dtype="S").dtype == expected
+        # Also if we use the dtype class
+        assert np.array(arr, dtype=type(expected)).dtype == expected
+        # Check that .astype() behaves identical
+        assert arr.astype("S").dtype == expected
+        # The DType class is accepted by `.astype()`
+        assert arr.astype(type(np.dtype("S"))).dtype == expected
+
+    @pytest.mark.parametrize("obj",
+            [object(), 1.2, 10**43, None, "string"],
+            ids=["object", "1.2", "10**43", "None", "string"])
+    def test_nested_arrays_stringlength(self, obj):
+        length = len(str(obj))
+        expected = np.dtype(f"S{length}")
+        arr = np.array(obj, dtype="O")
+        assert np.array([arr, arr], dtype="S").dtype == expected
+
+    @pytest.mark.parametrize("arraylike", arraylikes())
+    def test_unpack_first_level(self, arraylike):
+        # We unpack exactly one level of array likes
+        obj = np.array([None])
+        obj[0] = np.array(1.2)
+        # the length of the included item, not of the float dtype
+        length = len(str(obj[0]))
+        expected = np.dtype(f"S{length}")
+
+        obj = arraylike(obj)
+        # casting to string usually calls str(obj)
+        arr = np.array([obj], dtype="S")
+        assert arr.shape == (1, 1)
+        assert arr.dtype == expected
+
+
+class TestScalarDiscovery:
+    def test_void_special_case(self):
+        # Void dtypes with structures discover tuples as elements
+        arr = np.array((1, 2, 3), dtype="i,i,i")
+        assert arr.shape == ()
+        arr = np.array([(1, 2, 3)], dtype="i,i,i")
+        assert arr.shape == (1,)
+
+    def test_char_special_case(self):
+        arr = np.array("string", dtype="c")
+        assert arr.shape == (6,)
+        assert arr.dtype.char == "c"
+        arr = np.array(["string"], dtype="c")
+        assert arr.shape == (1, 6)
+        assert arr.dtype.char == "c"
+
+    def test_char_special_case_deep(self):
+        # Check that the character special case errors correctly if the
+        # array is too deep:
+        nested = ["string"]  # 2 dimensions (due to string being sequence)
+        for i in range(np.MAXDIMS - 2):
+            nested = [nested]
+
+        arr = np.array(nested, dtype='c')
+        assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
+        with pytest.raises(ValueError):
+            np.array([nested], dtype="c")
+
+    def test_unknown_object(self):
+        arr = np.array(object())
+        assert arr.shape == ()
+        assert arr.dtype == np.dtype("O")
+
+    @pytest.mark.parametrize("scalar", scalar_instances())
+    def test_scalar(self, scalar):
+        arr = np.array(scalar)
+        assert arr.shape == ()
+        assert arr.dtype == scalar.dtype
+
+        arr = np.array([[scalar, scalar]])
+        assert arr.shape == (1, 2)
+        assert arr.dtype == scalar.dtype
+
+    # Additionally to string this test also runs into a corner case
+    # with datetime promotion (the difference is the promotion order).
+    @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
+    def test_scalar_promotion(self):
+        for sc1, sc2 in product(scalar_instances(), scalar_instances()):
+            sc1, sc2 = sc1.values[0], sc2.values[0]
+            # test all combinations:
+            try:
+                arr = np.array([sc1, sc2])
+            except (TypeError, ValueError):
+                # The promotion between two times can fail
+                # XFAIL (ValueError): Some object casts are currently undefined
+                continue
+            assert arr.shape == (2,)
+            try:
+                dt1, dt2 = sc1.dtype, sc2.dtype
+                expected_dtype = np.promote_types(dt1, dt2)
+                assert arr.dtype == expected_dtype
+            except TypeError as e:
+                # Will currently always go to object dtype
+                assert arr.dtype == np.dtype("O")
+
+    @pytest.mark.parametrize("scalar", scalar_instances())
+    def test_scalar_coercion(self, scalar):
+        # This tests various scalar coercion paths, mainly for the numerical
+        # types. It includes some paths not directly related to `np.array`.
+        if isinstance(scalar, np.inexact):
+            # Ensure we have a full-precision number if available
+            scalar = type(scalar)((scalar * 2)**0.5)
+
+        if type(scalar) is rational:
+            # Rational generally fails due to a missing cast. In the future
+            # object casts should automatically be defined based on `setitem`.
+            pytest.xfail("Rational to object cast is undefined currently.")
+
+        # Use casting from object:
+        arr = np.array(scalar, dtype=object).astype(scalar.dtype)
+
+        # Test various ways to create an array containing this scalar:
+        arr1 = np.array(scalar).reshape(1)
+        arr2 = np.array([scalar])
+        arr3 = np.empty(1, dtype=scalar.dtype)
+        arr3[0] = scalar
+        arr4 = np.empty(1, dtype=scalar.dtype)
+        arr4[:] = [scalar]
+        # All of these methods should yield the same results
+        assert_array_equal(arr, arr1)
+        assert_array_equal(arr, arr2)
+        assert_array_equal(arr, arr3)
+        assert_array_equal(arr, arr4)
+
+    @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
+    @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
+    @pytest.mark.parametrize("cast_to", scalar_instances())
+    def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
+        """
+        Test that in most cases:
+           * `np.array(scalar, dtype=dtype)`
+           * `np.empty((), dtype=dtype)[()] = scalar`
+           * `np.array(scalar).astype(dtype)`
+        should behave the same.  The only exceptions are parametric dtypes
+        (mainly datetime/timedelta without unit) and void without fields.
+        """
+        dtype = cast_to.dtype  # use to parametrize only the target dtype
+
+        for scalar in scalar_instances(times=False):
+            scalar = scalar.values[0]
+
+            if dtype.type == np.void:
+               if scalar.dtype.fields is not None and dtype.fields is None:
+                    # Here, coercion to "V6" works, but the cast fails.
+                    # Since the types are identical, SETITEM takes care of
+                    # this, but has different rules than the cast.
+                    with pytest.raises(TypeError):
+                        np.array(scalar).astype(dtype)
+                    np.array(scalar, dtype=dtype)
+                    np.array([scalar], dtype=dtype)
+                    continue
+
+            # The main test, we first try to use casting and if it succeeds
+            # continue below testing that things are the same, otherwise
+            # test that the alternative paths at least also fail.
+            try:
+                cast = np.array(scalar).astype(dtype)
+            except (TypeError, ValueError, RuntimeError):
+                # coercion should also raise (error type may change)
+                with pytest.raises(Exception):
+                    np.array(scalar, dtype=dtype)
+
+                if (isinstance(scalar, rational) and
+                        np.issubdtype(dtype, np.signedinteger)):
+                    return
+
+                with pytest.raises(Exception):
+                    np.array([scalar], dtype=dtype)
+                # assignment should also raise
+                res = np.zeros((), dtype=dtype)
+                with pytest.raises(Exception):
+                    res[()] = scalar
+
+                return
+
+            # Non error path:
+            arr = np.array(scalar, dtype=dtype)
+            assert_array_equal(arr, cast)
+            # assignment behaves the same
+            ass = np.zeros((), dtype=dtype)
+            ass[()] = scalar
+            assert_array_equal(ass, cast)
+
+    @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
+    def test_pyscalar_subclasses(self, pyscalar):
+        """NumPy arrays are read/write which means that anything but invariant
+        behaviour is on thin ice.  However, we currently are happy to discover
+        subclasses of Python float, int, complex the same as the base classes.
+        This should potentially be deprecated.
+        """
+        class MyScalar(type(pyscalar)):
+            pass
+
+        res = np.array(MyScalar(pyscalar))
+        expected = np.array(pyscalar)
+        assert_array_equal(res, expected)
+
+    @pytest.mark.parametrize("dtype_char", np.typecodes["All"])
+    def test_default_dtype_instance(self, dtype_char):
+        if dtype_char in "SU":
+            dtype = np.dtype(dtype_char + "1")
+        elif dtype_char == "V":
+            # Legacy behaviour was to use V8. The reason was float64 being the
+            # default dtype and that having 8 bytes.
+            dtype = np.dtype("V8")
+        else:
+            dtype = np.dtype(dtype_char)
+
+        discovered_dtype, _ = _discover_array_parameters([], type(dtype))
+
+        assert discovered_dtype == dtype
+        assert discovered_dtype.itemsize == dtype.itemsize
+
+    @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
+    @pytest.mark.parametrize(["scalar", "error"],
+            [(np.float64(np.nan), ValueError),
+             (np.array(-1).astype(np.ulonglong)[()], OverflowError)])
+    def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
+        """
+        Signed integers are currently different in that they do not cast other
+        NumPy scalar, but instead use scalar.__int__(). The hardcoded
+        exception to this rule is `np.array(scalar, dtype=integer)`.
+        """
+        dtype = np.dtype(dtype)
+
+        # This is a special case using casting logic. It warns for the NaN
+        # but allows the cast (giving undefined behaviour).
+        with np.errstate(invalid="ignore"):
+            coerced = np.array(scalar, dtype=dtype)
+            cast = np.array(scalar).astype(dtype)
+        assert_array_equal(coerced, cast)
+
+        # However these fail:
+        with pytest.raises(error):
+            np.array([scalar], dtype=dtype)
+        with pytest.raises(error):
+            cast[()] = scalar
+
+
+class TestTimeScalars:
+    @pytest.mark.parametrize("dtype", [np.int64, np.float32])
+    @pytest.mark.parametrize("scalar",
+            [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
+             param(np.timedelta64(123, "s"), id="timedelta64[s]"),
+             param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
+             param(np.datetime64(1, "D"), id="datetime64[D]")],)
+    def test_coercion_basic(self, dtype, scalar):
+        # Note the `[scalar]` is there because np.array(scalar) uses stricter
+        # `scalar.__int__()` rules for backward compatibility right now.
+        arr = np.array(scalar, dtype=dtype)
+        cast = np.array(scalar).astype(dtype)
+        assert_array_equal(arr, cast)
+
+        ass = np.ones((), dtype=dtype)
+        if issubclass(dtype, np.integer):
+            with pytest.raises(TypeError):
+                # raises, as would np.array([scalar], dtype=dtype), this is
+                # conversion from times, but behaviour of integers.
+                ass[()] = scalar
+        else:
+            ass[()] = scalar
+            assert_array_equal(ass, cast)
+
+    @pytest.mark.parametrize("dtype", [np.int64, np.float32])
+    @pytest.mark.parametrize("scalar",
+            [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
+             param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
+    def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
+        # Only "ns" and "generic" timedeltas can be converted to numbers
+        # so these are slightly special.
+        arr = np.array(scalar, dtype=dtype)
+        cast = np.array(scalar).astype(dtype)
+        ass = np.ones((), dtype=dtype)
+        ass[()] = scalar  # raises, as would np.array([scalar], dtype=dtype)
+
+        assert_array_equal(arr, cast)
+        assert_array_equal(cast, cast)
+
+    @pytest.mark.parametrize("dtype", ["S6", "U6"])
+    @pytest.mark.parametrize(["val", "unit"],
+            [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
+    def test_coercion_assignment_datetime(self, val, unit, dtype):
+        # String from datetime64 assignment is currently special cased to
+        # never use casting.  This is because casting will error in this
+        # case, and traditionally in most cases the behaviour is maintained
+        # like this.  (`np.array(scalar, dtype="U6")` would have failed before)
+        # TODO: This discrepancy _should_ be resolved, either by relaxing the
+        #       cast, or by deprecating the first part.
+        scalar = np.datetime64(val, unit)
+        dtype = np.dtype(dtype)
+        cut_string = dtype.type(str(scalar)[:6])
+
+        arr = np.array(scalar, dtype=dtype)
+        assert arr[()] == cut_string
+        ass = np.ones((), dtype=dtype)
+        ass[()] = scalar
+        assert ass[()] == cut_string
+
+        with pytest.raises(RuntimeError):
+            # However, unlike the above assignment using `str(scalar)[:6]`
+            # due to being handled by the string DType and not be casting
+            # the explicit cast fails:
+            np.array(scalar).astype(dtype)
+
+
+    @pytest.mark.parametrize(["val", "unit"],
+            [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
+    def test_coercion_assignment_timedelta(self, val, unit):
+        scalar = np.timedelta64(val, unit)
+
+        # Unlike datetime64, timedelta allows the unsafe cast:
+        np.array(scalar, dtype="S6")
+        cast = np.array(scalar).astype("S6")
+        ass = np.ones((), dtype="S6")
+        ass[()] = scalar
+        expected = scalar.astype("S")[:6]
+        assert cast[()] == expected
+        assert ass[()] == expected
+
+class TestNested:
+    def test_nested_simple(self):
+        initial = [1.2]
+        nested = initial
+        for i in range(np.MAXDIMS - 1):
+            nested = [nested]
+
+        arr = np.array(nested, dtype="float64")
+        assert arr.shape == (1,) * np.MAXDIMS
+        with pytest.raises(ValueError):
+            np.array([nested], dtype="float64")
+
+        with pytest.raises(ValueError, match=".*would exceed the maximum"):
+            np.array([nested])  # user must ask for `object` explicitly
+
+        arr = np.array([nested], dtype=object)
+        assert arr.dtype == np.dtype("O")
+        assert arr.shape == (1,) * np.MAXDIMS
+        assert arr.item() is initial
+
+    def test_pathological_self_containing(self):
+        # Test that this also works for two nested sequences
+        l = []
+        l.append(l)
+        arr = np.array([l, l, l], dtype=object)
+        assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
+
+        # Also check a ragged case:
+        arr = np.array([l, [None], l], dtype=object)
+        assert arr.shape == (3, 1)
+
+    @pytest.mark.parametrize("arraylike", arraylikes())
+    def test_nested_arraylikes(self, arraylike):
+        # We try storing an array like into an array, but the array-like
+        # will have too many dimensions.  This means the shape discovery
+        # decides that the array-like must be treated as an object (a special
+        # case of ragged discovery).  The result will be an array with one
+        # dimension less than the maximum dimensions, and the array being
+        # assigned to it (which does work for object or if `float(arraylike)`
+        # works).
+        initial = arraylike(np.ones((1, 1)))
+
+        nested = initial
+        for i in range(np.MAXDIMS - 1):
+            nested = [nested]
+
+        with pytest.raises(ValueError, match=".*would exceed the maximum"):
+            # It will refuse to assign the array into
+            np.array(nested, dtype="float64")
+
+        # If this is object, we end up assigning a (1, 1) array into (1,)
+        # (due to running out of dimensions), this is currently supported but
+        # a special case which is not ideal.
+        arr = np.array(nested, dtype=object)
+        assert arr.shape == (1,) * np.MAXDIMS
+        assert arr.item() == np.array(initial).item()
+
+    @pytest.mark.parametrize("arraylike", arraylikes())
+    def test_uneven_depth_ragged(self, arraylike):
+        arr = np.arange(4).reshape((2, 2))
+        arr = arraylike(arr)
+
+        # Array is ragged in the second dimension already:
+        out = np.array([arr, [arr]], dtype=object)
+        assert out.shape == (2,)
+        assert out[0] is arr
+        assert type(out[1]) is list
+
+        # Array is ragged in the third dimension:
+        with pytest.raises(ValueError):
+            # This is a broadcast error during assignment, because
+            # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
+            np.array([arr, [arr, arr]], dtype=object)
+
+    def test_empty_sequence(self):
+        arr = np.array([[], [1], [[1]]], dtype=object)
+        assert arr.shape == (3,)
+
+        # The empty sequence stops further dimension discovery, so the
+        # result shape will be (0,) which leads to an error during:
+        with pytest.raises(ValueError):
+            np.array([[], np.empty((0, 1))], dtype=object)
+
+    def test_array_of_different_depths(self):
+        # When multiple arrays (or array-likes) are included in a
+        # sequences and have different depth, we currently discover
+        # as many dimensions as they share. (see also gh-17224)
+        arr = np.zeros((3, 2))
+        mismatch_first_dim = np.zeros((1, 2))
+        mismatch_second_dim = np.zeros((3, 3))
+
+        dtype, shape = _discover_array_parameters(
+            [arr, mismatch_second_dim], dtype=np.dtype("O"))
+        assert shape == (2, 3)
+
+        dtype, shape = _discover_array_parameters(
+            [arr, mismatch_first_dim], dtype=np.dtype("O"))
+        assert shape == (2,)
+        # The second case is currently supported because the arrays
+        # can be stored as objects:
+        res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
+        assert res[0] is arr
+        assert res[1] is mismatch_first_dim
+
+
+class TestBadSequences:
+    # These are tests for bad objects passed into `np.array`, in general
+    # these have undefined behaviour.  In the old code they partially worked
+    # when now they will fail.  We could (and maybe should) create a copy
+    # of all sequences to be safe against bad-actors.
+
+    def test_growing_list(self):
+        # List to coerce, `mylist` will append to it during coercion
+        obj = []
+        class mylist(list):
+            def __len__(self):
+                obj.append([1, 2])
+                return super().__len__()
+
+        obj.append(mylist([1, 2]))
+
+        with pytest.raises(RuntimeError):
+            np.array(obj)
+
+    # Note: We do not test a shrinking list.  These do very evil things
+    #       and the only way to fix them would be to copy all sequences.
+    #       (which may be a real option in the future).
+
+    def test_mutated_list(self):
+        # List to coerce, `mylist` will mutate the first element
+        obj = []
+        class mylist(list):
+            def __len__(self):
+                obj[0] = [2, 3]  # replace with a different list.
+                return super().__len__()
+
+        obj.append([2, 3])
+        obj.append(mylist([1, 2]))
+        # Does not crash:
+        np.array(obj)
+
+    def test_replace_0d_array(self):
+        # List to coerce, `mylist` will mutate the first element
+        obj = []
+        class baditem:
+            def __len__(self):
+                obj[0][0] = 2  # replace with a different list.
+                raise ValueError("not actually a sequence!")
+
+            def __getitem__(self):
+                pass
+
+        # Runs into a corner case in the new code, the `array(2)` is cached
+        # so replacing it invalidates the cache.
+        obj.append([np.array(2), baditem()])
+        with pytest.raises(RuntimeError):
+            np.array(obj)
+
+
+class TestArrayLikes:
+    @pytest.mark.parametrize("arraylike", arraylikes())
+    def test_0d_object_special_case(self, arraylike):
+        arr = np.array(0.)
+        obj = arraylike(arr)
+        # A single array-like is always converted:
+        res = np.array(obj, dtype=object)
+        assert_array_equal(arr, res)
+
+        # But a single 0-D nested array-like never:
+        res = np.array([obj], dtype=object)
+        assert res[0] is obj
+
+    @pytest.mark.parametrize("arraylike", arraylikes())
+    @pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)])
+    def test_object_assignment_special_case(self, arraylike, arr):
+        obj = arraylike(arr)
+        empty = np.arange(1, dtype=object)
+        empty[:] = [obj]
+        assert empty[0] is obj
+
+    def test_0d_generic_special_case(self):
+        class ArraySubclass(np.ndarray):
+            def __float__(self):
+                raise TypeError("e.g. quantities raise on this")
+
+        arr = np.array(0.)
+        obj = arr.view(ArraySubclass)
+        res = np.array(obj)
+        # The subclass is simply cast:
+        assert_array_equal(arr, res)
+
+        # If the 0-D array-like is included, __float__ is currently
+        # guaranteed to be used.  We may want to change that, quantities
+        # and masked arrays half make use of this.
+        with pytest.raises(TypeError):
+            np.array([obj])
+
+        # The same holds for memoryview:
+        obj = memoryview(arr)
+        res = np.array(obj)
+        assert_array_equal(arr, res)
+        with pytest.raises(ValueError):
+            # The error type does not matter much here.
+            np.array([obj])
+
+    def test_arraylike_classes(self):
+        # The classes of array-likes should generally be acceptable to be
+        # stored inside a numpy (object) array.  This tests all of the
+        # special attributes (since all are checked during coercion).
+        arr = np.array(np.int64)
+        assert arr[()] is np.int64
+        arr = np.array([np.int64])
+        assert arr[0] is np.int64
+
+        # This also works for properties/unbound methods:
+        class ArrayLike:
+            @property
+            def __array_interface__(self):
+                pass
+
+            @property
+            def __array_struct__(self):
+                pass
+
+            def __array__(self):
+                pass
+
+        arr = np.array(ArrayLike)
+        assert arr[()] is ArrayLike
+        arr = np.array([ArrayLike])
+        assert arr[0] is ArrayLike
+
+    @pytest.mark.skipif(
+            np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
+    def test_too_large_array_error_paths(self):
+        """Test the error paths, including for memory leaks"""
+        arr = np.array(0, dtype="uint8")
+        # Guarantees that a contiguous copy won't work:
+        arr = np.broadcast_to(arr, 2**62)
+
+        for i in range(5):
+            # repeat, to ensure caching cannot have an effect:
+            with pytest.raises(MemoryError):
+                np.array(arr)
+            with pytest.raises(MemoryError):
+                np.array([arr])
+
+    @pytest.mark.parametrize("attribute",
+        ["__array_interface__", "__array__", "__array_struct__"])
+    @pytest.mark.parametrize("error", [RecursionError, MemoryError])
+    def test_bad_array_like_attributes(self, attribute, error):
+        # RecursionError and MemoryError are considered fatal. All errors
+        # (except AttributeError) should probably be raised in the future,
+        # but shapely made use of it, so it will require a deprecation.
+
+        class BadInterface:
+            def __getattr__(self, attr):
+                if attr == attribute:
+                    raise error
+                super().__getattr__(attr)
+
+        with pytest.raises(error):
+            np.array(BadInterface())
+
+    @pytest.mark.parametrize("error", [RecursionError, MemoryError])
+    def test_bad_array_like_bad_length(self, error):
+        # RecursionError and MemoryError are considered "critical" in
+        # sequences. We could expand this more generally though. (NumPy 1.20)
+        class BadSequence:
+            def __len__(self):
+                raise error
+            def __getitem__(self):
+                # must have getitem to be a Sequence
+                return 1
+
+        with pytest.raises(error):
+            np.array(BadSequence())
+
+
+class TestAsArray:
+    """Test expected behaviors of ``asarray``."""
+
+    def test_dtype_identity(self):
+        """Confirm the intended behavior for *dtype* kwarg.
+
+        The result of ``asarray()`` should have the dtype provided through the
+        keyword argument, when used. This forces unique array handles to be
+        produced for unique np.dtype objects, but (for equivalent dtypes), the
+        underlying data (the base object) is shared with the original array
+        object.
+
+        Ref https://github.com/numpy/numpy/issues/1468
+        """
+        int_array = np.array([1, 2, 3], dtype='i')
+        assert np.asarray(int_array) is int_array
+
+        # The character code resolves to the singleton dtype object provided
+        # by the numpy package.
+        assert np.asarray(int_array, dtype='i') is int_array
+
+        # Derive a dtype from n.dtype('i'), but add a metadata object to force
+        # the dtype to be distinct.
+        unequal_type = np.dtype('i', metadata={'spam': True})
+        annotated_int_array = np.asarray(int_array, dtype=unequal_type)
+        assert annotated_int_array is not int_array
+        assert annotated_int_array.base is int_array
+        # Create an equivalent descriptor with a new and distinct dtype
+        # instance.
+        equivalent_requirement = np.dtype('i', metadata={'spam': True})
+        annotated_int_array_alt = np.asarray(annotated_int_array,
+                                             dtype=equivalent_requirement)
+        assert unequal_type == equivalent_requirement
+        assert unequal_type is not equivalent_requirement
+        assert annotated_int_array_alt is not annotated_int_array
+        assert annotated_int_array_alt.dtype is equivalent_requirement
+
+        # Check the same logic for a pair of C types whose equivalence may vary
+        # between computing environments.
+        # Find an equivalent pair.
+        integer_type_codes = ('i', 'l', 'q')
+        integer_dtypes = [np.dtype(code) for code in integer_type_codes]
+        typeA = None
+        typeB = None
+        for typeA, typeB in permutations(integer_dtypes, r=2):
+            if typeA == typeB:
+                assert typeA is not typeB
+                break
+        assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype)
+
+        # These ``asarray()`` calls may produce a new view or a copy,
+        # but never the same object.
+        long_int_array = np.asarray(int_array, dtype='l')
+        long_long_int_array = np.asarray(int_array, dtype='q')
+        assert long_int_array is not int_array
+        assert long_long_int_array is not int_array
+        assert np.asarray(long_int_array, dtype='q') is not long_int_array
+        array_a = np.asarray(int_array, dtype=typeA)
+        assert typeA == typeB
+        assert typeA is not typeB
+        assert array_a.dtype is typeA
+        assert array_a is not np.asarray(array_a, dtype=typeB)
+        assert np.asarray(array_a, dtype=typeB).dtype is typeB
+        assert array_a is np.asarray(array_a, dtype=typeB).base
+
+
+class TestSpecialAttributeLookupFailure:
+    # An exception was raised while fetching the attribute
+
+    class WeirdArrayLike:
+        @property
+        def __array__(self):
+            raise RuntimeError("oops!")
+
+    class WeirdArrayInterface:
+        @property
+        def __array_interface__(self):
+            raise RuntimeError("oops!")
+
+    def test_deprecated(self):
+        with pytest.raises(RuntimeError):
+            np.array(self.WeirdArrayLike())
+        with pytest.raises(RuntimeError):
+            np.array(self.WeirdArrayInterface())
+
+
+def test_subarray_from_array_construction():
+    # Arrays are more complex, since they "broadcast" on success:
+    arr = np.array([1, 2])
+
+    res = arr.astype("(2)i,")
+    assert_array_equal(res, [[1, 1], [2, 2]])
+
+    res = np.array(arr, dtype="(2)i,")
+
+    assert_array_equal(res, [[1, 1], [2, 2]])
+
+    res = np.array([[(1,), (2,)], arr], dtype="(2)i,")
+    assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]])
+
+    # Also try a multi-dimensional example:
+    arr = np.arange(5 * 2).reshape(5, 2)
+    expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2))
+
+    res = arr.astype("(2,2)f")
+    assert_array_equal(res, expected)
+
+    res = np.array(arr, dtype="(2,2)f")
+    assert_array_equal(res, expected)
+
+
+def test_empty_string():
+    # Empty strings are unfortunately often converted to S1 and we need to
+    # make sure we are filling the S1 and not the (possibly) detected S0
+    # result.  This should likely just return S0 and if not maybe the decision
+    # to return S1 should be moved.
+    res = np.array([""] * 10, dtype="S")
+    assert_array_equal(res, np.array("\0", "S1"))
+    assert res.dtype == "S1"
+
+    arr = np.array([""] * 10, dtype=object)
+
+    res = arr.astype("S")
+    assert_array_equal(res, b"")
+    assert res.dtype == "S1"
+
+    res = np.array(arr, dtype="S")
+    assert_array_equal(res, b"")
+    # TODO: This is arguably weird/wrong, but seems old:
+    assert res.dtype == f"S{np.dtype('O').itemsize}"
+
+    res = np.array([[""] * 10, arr], dtype="S")
+    assert_array_equal(res, b"")
+    assert res.shape == (2, 10)
+    assert res.dtype == "S1"
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.py
new file mode 100644
index 00000000..16c719c5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.py
@@ -0,0 +1,219 @@
+import sys
+import pytest
+import numpy as np
+from numpy.testing import extbuild
+
+
+@pytest.fixture
+def get_module(tmp_path):
+    """ Some codes to generate data and manage temporary buffers use when
+    sharing with numpy via the array interface protocol.
+    """
+
+    if not sys.platform.startswith('linux'):
+        pytest.skip('link fails on cygwin')
+
+    prologue = '''
+        #include 
+        #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+        #include 
+        #include 
+        #include 
+
+        NPY_NO_EXPORT
+        void delete_array_struct(PyObject *cap) {
+
+            /* get the array interface structure */
+            PyArrayInterface *inter = (PyArrayInterface*)
+                PyCapsule_GetPointer(cap, NULL);
+
+            /* get the buffer by which data was shared */
+            double *ptr = (double*)PyCapsule_GetContext(cap);
+
+            /* for the purposes of the regression test set the elements
+               to nan */
+            for (npy_intp i = 0; i < inter->shape[0]; ++i)
+                ptr[i] = nan("");
+
+            /* free the shared buffer */
+            free(ptr);
+
+            /* free the array interface structure */
+            free(inter->shape);
+            free(inter);
+
+            fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
+                " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
+        }
+        '''
+
+    functions = [
+        ("new_array_struct", "METH_VARARGS", """
+
+            long long n_elem = 0;
+            double value = 0.0;
+
+            if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
+                Py_RETURN_NONE;
+            }
+
+            /* allocate and initialize the data to share with numpy */
+            long long n_bytes = n_elem*sizeof(double);
+            double *data = (double*)malloc(n_bytes);
+
+            if (!data) {
+                PyErr_Format(PyExc_MemoryError,
+                    "Failed to malloc %lld bytes", n_bytes);
+
+                Py_RETURN_NONE;
+            }
+
+            for (long long i = 0; i < n_elem; ++i) {
+                data[i] = value;
+            }
+
+            /* calculate the shape and stride */
+            int nd = 1;
+
+            npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
+            npy_intp *shape = ss;
+            npy_intp *stride = ss + nd;
+
+            shape[0] = n_elem;
+            stride[0] = sizeof(double);
+
+            /* construct the array interface */
+            PyArrayInterface *inter = (PyArrayInterface*)
+                malloc(sizeof(PyArrayInterface));
+
+            memset(inter, 0, sizeof(PyArrayInterface));
+
+            inter->two = 2;
+            inter->nd = nd;
+            inter->typekind = 'f';
+            inter->itemsize = sizeof(double);
+            inter->shape = shape;
+            inter->strides = stride;
+            inter->data = data;
+            inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
+                           NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
+
+            /* package into a capsule */
+            PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
+
+            /* save the pointer to the data */
+            PyCapsule_SetContext(cap, data);
+
+            fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
+                " ptr = %ld\\n", (long)cap, (long)inter, (long)data);
+
+            return cap;
+        """)
+        ]
+
+    more_init = "import_array();"
+
+    try:
+        import array_interface_testing
+        return array_interface_testing
+    except ImportError:
+        pass
+
+    # if it does not exist, build and load it
+    return extbuild.build_and_import_extension('array_interface_testing',
+                                               functions,
+                                               prologue=prologue,
+                                               include_dirs=[np.get_include()],
+                                               build_dir=tmp_path,
+                                               more_init=more_init)
+
+
+# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on
+# Python 3.12 and up.
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+@pytest.mark.slow
+def test_cstruct(get_module):
+
+    class data_source:
+        """
+        This class is for testing the timing of the PyCapsule destructor
+        invoked when numpy release its reference to the shared data as part of
+        the numpy array interface protocol. If the PyCapsule destructor is
+        called early the shared data is freed and invalid memory accesses will
+        occur.
+        """
+
+        def __init__(self, size, value):
+            self.size = size
+            self.value = value
+
+        @property
+        def __array_struct__(self):
+            return get_module.new_array_struct(self.size, self.value)
+
+    # write to the same stream as the C code
+    stderr = sys.__stderr__
+
+    # used to validate the shared data.
+    expected_value = -3.1415
+    multiplier = -10000.0
+
+    # create some data to share with numpy via the array interface
+    # assign the data an expected value.
+    stderr.write(' ---- create an object to share data ---- \n')
+    buf = data_source(256, expected_value)
+    stderr.write(' ---- OK!\n\n')
+
+    # share the data
+    stderr.write(' ---- share data via the array interface protocol ---- \n')
+    arr = np.array(buf, copy=False)
+    stderr.write('arr.__array_interface___ = %s\n' % (
+                 str(arr.__array_interface__)))
+    stderr.write('arr.base = %s\n' % (str(arr.base)))
+    stderr.write(' ---- OK!\n\n')
+
+    # release the source of the shared data. this will not release the data
+    # that was shared with numpy, that is done in the PyCapsule destructor.
+    stderr.write(' ---- destroy the object that shared data ---- \n')
+    buf = None
+    stderr.write(' ---- OK!\n\n')
+
+    # check that we got the expected data. If the PyCapsule destructor we
+    # defined was prematurely called then this test will fail because our
+    # destructor sets the elements of the array to NaN before free'ing the
+    # buffer. Reading the values here may also cause a SEGV
+    assert np.allclose(arr, expected_value)
+
+    # read the data. If the PyCapsule destructor we defined was prematurely
+    # called then reading the values here may cause a SEGV and will be reported
+    # as invalid reads by valgrind
+    stderr.write(' ---- read shared data ---- \n')
+    stderr.write('arr = %s\n' % (str(arr)))
+    stderr.write(' ---- OK!\n\n')
+
+    # write to the shared buffer. If the shared data was prematurely deleted
+    # this will may cause a SEGV and valgrind will report invalid writes
+    stderr.write(' ---- modify shared data ---- \n')
+    arr *= multiplier
+    expected_value *= multiplier
+    stderr.write('arr.__array_interface___ = %s\n' % (
+                 str(arr.__array_interface__)))
+    stderr.write('arr.base = %s\n' % (str(arr.base)))
+    stderr.write(' ---- OK!\n\n')
+
+    # read the data. If the shared data was prematurely deleted this
+    # will may cause a SEGV and valgrind will report invalid reads
+    stderr.write(' ---- read modified shared data ---- \n')
+    stderr.write('arr = %s\n' % (str(arr)))
+    stderr.write(' ---- OK!\n\n')
+
+    # check that we got the expected data. If the PyCapsule destructor we
+    # defined was prematurely called then this test will fail because our
+    # destructor sets the elements of the array to NaN before free'ing the
+    # buffer. Reading the values here may also cause a SEGV
+    assert np.allclose(arr, expected_value)
+
+    # free the shared data, the PyCapsule destructor should run here
+    stderr.write(' ---- free shared data ---- \n')
+    arr = None
+    stderr.write(' ---- OK!\n\n')
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.py
new file mode 100644
index 00000000..4fd4d555
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.py
@@ -0,0 +1,85 @@
+"""
+This file tests the generic aspects of ArrayMethod.  At the time of writing
+this is private API, but when added, public API may be added here.
+"""
+
+from __future__ import annotations
+
+import sys
+import types
+from typing import Any
+
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
+
+
+class TestResolveDescriptors:
+    # Test mainly error paths of the resolve_descriptors function,
+    # note that the `casting_unittests` tests exercise this non-error paths.
+
+    # Casting implementations are the main/only current user:
+    method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
+
+    @pytest.mark.parametrize("args", [
+        (True,),  # Not a tuple.
+        ((None,)),  # Too few elements
+        ((None, None, None),),  # Too many
+        ((None, None),),  # Input dtype is None, which is invalid.
+        ((np.dtype("d"), True),),  # Output dtype is not a dtype
+        ((np.dtype("f"), None),),  # Input dtype does not match method
+    ])
+    def test_invalid_arguments(self, args):
+        with pytest.raises(TypeError):
+            self.method._resolve_descriptors(*args)
+
+
+class TestSimpleStridedCall:
+    # Test mainly error paths of the resolve_descriptors function,
+    # note that the `casting_unittests` tests exercise this non-error paths.
+
+    # Casting implementations are the main/only current user:
+    method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
+
+    @pytest.mark.parametrize(["args", "error"], [
+        ((True,), TypeError),  # Not a tuple
+        (((None,),), TypeError),  # Too few elements
+        ((None, None), TypeError),  # Inputs are not arrays.
+        (((None, None, None),), TypeError),  # Too many
+        (((np.arange(3), np.arange(3)),), TypeError),  # Incorrect dtypes
+        (((np.ones(3, dtype=">d"), np.ones(3, dtype=" None:
+        """Test `ndarray.__class_getitem__`."""
+        alias = cls[Any, Any]
+        assert isinstance(alias, types.GenericAlias)
+        assert alias.__origin__ is cls
+
+    @pytest.mark.parametrize("arg_len", range(4))
+    def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
+        arg_tup = (Any,) * arg_len
+        if arg_len in (1, 2):
+            assert cls[arg_tup]
+        else:
+            match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
+            with pytest.raises(TypeError, match=match):
+                cls[arg_tup]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.py
new file mode 100644
index 00000000..6796b407
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.py
@@ -0,0 +1,1047 @@
+import sys
+import gc
+from hypothesis import given
+from hypothesis.extra import numpy as hynp
+import pytest
+
+import numpy as np
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
+    assert_raises_regex,
+    )
+from numpy.core.arrayprint import _typelessdata
+import textwrap
+
+class TestArrayRepr:
+    def test_nan_inf(self):
+        x = np.array([np.nan, np.inf])
+        assert_equal(repr(x), 'array([nan, inf])')
+
+    def test_subclass(self):
+        class sub(np.ndarray): pass
+
+        # one dimensional
+        x1d = np.array([1, 2]).view(sub)
+        assert_equal(repr(x1d), 'sub([1, 2])')
+
+        # two dimensional
+        x2d = np.array([[1, 2], [3, 4]]).view(sub)
+        assert_equal(repr(x2d),
+            'sub([[1, 2],\n'
+            '     [3, 4]])')
+
+        # two dimensional with flexible dtype
+        xstruct = np.ones((2,2), dtype=[('a', ' 1)
+        y = sub(None)
+        x[()] = y
+        y[()] = x
+        assert_equal(repr(x),
+            'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
+        assert_equal(str(x), '...')
+        x[()] = 0  # resolve circular references for garbage collector
+
+        # nested 0d-subclass-object
+        x = sub(None)
+        x[()] = sub(None)
+        assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
+        assert_equal(str(x), 'None')
+
+        # gh-10663
+        class DuckCounter(np.ndarray):
+            def __getitem__(self, item):
+                result = super().__getitem__(item)
+                if not isinstance(result, DuckCounter):
+                    result = result[...].view(DuckCounter)
+                return result
+
+            def to_string(self):
+                return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
+
+            def __str__(self):
+                if self.shape == ():
+                    return self.to_string()
+                else:
+                    fmt = {'all': lambda x: x.to_string()}
+                    return np.array2string(self, formatter=fmt)
+
+        dc = np.arange(5).view(DuckCounter)
+        assert_equal(str(dc), "[zero one two many many]")
+        assert_equal(str(dc[0]), "zero")
+
+    def test_self_containing(self):
+        arr0d = np.array(None)
+        arr0d[()] = arr0d
+        assert_equal(repr(arr0d),
+            'array(array(..., dtype=object), dtype=object)')
+        arr0d[()] = 0  # resolve recursion for garbage collector
+
+        arr1d = np.array([None, None])
+        arr1d[1] = arr1d
+        assert_equal(repr(arr1d),
+            'array([None, array(..., dtype=object)], dtype=object)')
+        arr1d[1] = 0  # resolve recursion for garbage collector
+
+        first = np.array(None)
+        second = np.array(None)
+        first[()] = second
+        second[()] = first
+        assert_equal(repr(first),
+            'array(array(array(..., dtype=object), dtype=object), dtype=object)')
+        first[()] = 0  # resolve circular references for garbage collector
+
+    def test_containing_list(self):
+        # printing square brackets directly would be ambiguuous
+        arr1d = np.array([None, None])
+        arr1d[0] = [1, 2]
+        arr1d[1] = [3]
+        assert_equal(repr(arr1d),
+            'array([list([1, 2]), list([3])], dtype=object)')
+
+    def test_void_scalar_recursion(self):
+        # gh-9345
+        repr(np.void(b'test'))  # RecursionError ?
+
+    def test_fieldless_structured(self):
+        # gh-10366
+        no_fields = np.dtype([])
+        arr_no_fields = np.empty(4, dtype=no_fields)
+        assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
+
+
+class TestComplexArray:
+    def test_str(self):
+        rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
+        cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
+        dtypes = [np.complex64, np.cdouble, np.clongdouble]
+        actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
+        wanted = [
+            '[0.+0.j]',    '[0.+0.j]',    '[0.+0.j]',
+            '[0.+1.j]',    '[0.+1.j]',    '[0.+1.j]',
+            '[0.-1.j]',    '[0.-1.j]',    '[0.-1.j]',
+            '[0.+infj]',   '[0.+infj]',   '[0.+infj]',
+            '[0.-infj]',   '[0.-infj]',   '[0.-infj]',
+            '[0.+nanj]',   '[0.+nanj]',   '[0.+nanj]',
+            '[1.+0.j]',    '[1.+0.j]',    '[1.+0.j]',
+            '[1.+1.j]',    '[1.+1.j]',    '[1.+1.j]',
+            '[1.-1.j]',    '[1.-1.j]',    '[1.-1.j]',
+            '[1.+infj]',   '[1.+infj]',   '[1.+infj]',
+            '[1.-infj]',   '[1.-infj]',   '[1.-infj]',
+            '[1.+nanj]',   '[1.+nanj]',   '[1.+nanj]',
+            '[-1.+0.j]',   '[-1.+0.j]',   '[-1.+0.j]',
+            '[-1.+1.j]',   '[-1.+1.j]',   '[-1.+1.j]',
+            '[-1.-1.j]',   '[-1.-1.j]',   '[-1.-1.j]',
+            '[-1.+infj]',  '[-1.+infj]',  '[-1.+infj]',
+            '[-1.-infj]',  '[-1.-infj]',  '[-1.-infj]',
+            '[-1.+nanj]',  '[-1.+nanj]',  '[-1.+nanj]',
+            '[inf+0.j]',   '[inf+0.j]',   '[inf+0.j]',
+            '[inf+1.j]',   '[inf+1.j]',   '[inf+1.j]',
+            '[inf-1.j]',   '[inf-1.j]',   '[inf-1.j]',
+            '[inf+infj]',  '[inf+infj]',  '[inf+infj]',
+            '[inf-infj]',  '[inf-infj]',  '[inf-infj]',
+            '[inf+nanj]',  '[inf+nanj]',  '[inf+nanj]',
+            '[-inf+0.j]',  '[-inf+0.j]',  '[-inf+0.j]',
+            '[-inf+1.j]',  '[-inf+1.j]',  '[-inf+1.j]',
+            '[-inf-1.j]',  '[-inf-1.j]',  '[-inf-1.j]',
+            '[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
+            '[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
+            '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
+            '[nan+0.j]',   '[nan+0.j]',   '[nan+0.j]',
+            '[nan+1.j]',   '[nan+1.j]',   '[nan+1.j]',
+            '[nan-1.j]',   '[nan-1.j]',   '[nan-1.j]',
+            '[nan+infj]',  '[nan+infj]',  '[nan+infj]',
+            '[nan-infj]',  '[nan-infj]',  '[nan-infj]',
+            '[nan+nanj]',  '[nan+nanj]',  '[nan+nanj]']
+
+        for res, val in zip(actual, wanted):
+            assert_equal(res, val)
+
+class TestArray2String:
+    def test_basic(self):
+        """Basic test of array2string."""
+        a = np.arange(3)
+        assert_(np.array2string(a) == '[0 1 2]')
+        assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
+        assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
+
+    def test_unexpected_kwarg(self):
+        # ensure than an appropriate TypeError
+        # is raised when array2string receives
+        # an unexpected kwarg
+
+        with assert_raises_regex(TypeError, 'nonsense'):
+            np.array2string(np.array([1, 2, 3]),
+                            nonsense=None)
+
+    def test_format_function(self):
+        """Test custom format function for each element in array."""
+        def _format_function(x):
+            if np.abs(x) < 1:
+                return '.'
+            elif np.abs(x) < 2:
+                return 'o'
+            else:
+                return 'O'
+
+        x = np.arange(3)
+        x_hex = "[0x0 0x1 0x2]"
+        x_oct = "[0o0 0o1 0o2]"
+        assert_(np.array2string(x, formatter={'all':_format_function}) ==
+                "[. o O]")
+        assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
+                "[. o O]")
+        assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
+                "[0.0000 1.0000 2.0000]")
+        assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
+                x_hex)
+        assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
+                x_oct)
+
+        x = np.arange(3.)
+        assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
+                "[0.00 1.00 2.00]")
+        assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
+                "[0.00 1.00 2.00]")
+
+        s = np.array(['abc', 'def'])
+        assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
+                '[abcabc defdef]')
+
+    def test_structure_format_mixed(self):
+        dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+        x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+        assert_equal(np.array2string(x),
+                "[('Sarah', [8., 7.]) ('John', [6., 7.])]")
+
+        np.set_printoptions(legacy='1.13')
+        try:
+            # for issue #5692
+            A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
+            A[5:].fill(np.datetime64('NaT'))
+            assert_equal(
+                np.array2string(A),
+                textwrap.dedent("""\
+                [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+                 ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
+                 ('NaT',) ('NaT',) ('NaT',)]""")
+            )
+        finally:
+            np.set_printoptions(legacy=False)
+
+        # same again, but with non-legacy behavior
+        assert_equal(
+            np.array2string(A),
+            textwrap.dedent("""\
+            [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+             ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+             ('1970-01-01T00:00:00',) (                'NaT',)
+             (                'NaT',) (                'NaT',)
+             (                'NaT',) (                'NaT',)]""")
+        )
+
+        # and again, with timedeltas
+        A = np.full(10, 123456, dtype=[("A", "m8[s]")])
+        A[5:].fill(np.datetime64('NaT'))
+        assert_equal(
+            np.array2string(A),
+            textwrap.dedent("""\
+            [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
+             ( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
+        )
+
+    def test_structure_format_int(self):
+        # See #8160
+        struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
+        assert_equal(np.array2string(struct_int),
+                "[([  1,  -1],) ([123,   1],)]")
+        struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
+                dtype=[('B', 'i4', (2, 2))])
+        assert_equal(np.array2string(struct_2dint),
+                "[([[ 0,  1], [ 2,  3]],) ([[12,  0], [ 0,  0]],)]")
+
+    def test_structure_format_float(self):
+        # See #8172
+        array_scalar = np.array(
+                (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
+        assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
+
+    def test_unstructured_void_repr(self):
+        a = np.array([27, 91, 50, 75,  7, 65, 10,  8,
+                      27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
+        assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
+        assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
+        assert_equal(repr(a),
+            r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
+            r"       b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
+
+        assert_equal(eval(repr(a), vars(np)), a)
+        assert_equal(eval(repr(a[0]), vars(np)), a[0])
+
+    def test_edgeitems_kwarg(self):
+        # previously the global print options would be taken over the kwarg
+        arr = np.zeros(3, int)
+        assert_equal(
+            np.array2string(arr, edgeitems=1, threshold=0),
+            "[0 ... 0]"
+        )
+
+    def test_summarize_1d(self):
+        A = np.arange(1001)
+        strA = '[   0    1    2 ...  998  999 1000]'
+        assert_equal(str(A), strA)
+
+        reprA = 'array([   0,    1,    2, ...,  998,  999, 1000])'
+        assert_equal(repr(A), reprA)
+
+    def test_summarize_2d(self):
+        A = np.arange(1002).reshape(2, 501)
+        strA = '[[   0    1    2 ...  498  499  500]\n' \
+               ' [ 501  502  503 ...  999 1000 1001]]'
+        assert_equal(str(A), strA)
+
+        reprA = 'array([[   0,    1,    2, ...,  498,  499,  500],\n' \
+                '       [ 501,  502,  503, ...,  999, 1000, 1001]])'
+        assert_equal(repr(A), reprA)
+
+    def test_summarize_structure(self):
+        A = (np.arange(2002, dtype="i8", (2, 1001))])
+        strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]"
+        assert_equal(str(B), strB)
+
+        reprB = (
+            "array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n"
+            "      dtype=[('i', '>i8', (2, 1001))])"
+        )
+        assert_equal(repr(B), reprB)
+
+        C = (np.arange(22, dtype=" 1:
+            # if the type is >1 byte, the non-native endian version
+            # must show endianness.
+            assert non_native_repr != native_repr
+            assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr
+
+    def test_linewidth_repr(self):
+        a = np.full(7, fill_value=2)
+        np.set_printoptions(linewidth=17)
+        assert_equal(
+            repr(a),
+            textwrap.dedent("""\
+            array([2, 2, 2,
+                   2, 2, 2,
+                   2])""")
+        )
+        np.set_printoptions(linewidth=17, legacy='1.13')
+        assert_equal(
+            repr(a),
+            textwrap.dedent("""\
+            array([2, 2, 2,
+                   2, 2, 2, 2])""")
+        )
+
+        a = np.full(8, fill_value=2)
+
+        np.set_printoptions(linewidth=18, legacy=False)
+        assert_equal(
+            repr(a),
+            textwrap.dedent("""\
+            array([2, 2, 2,
+                   2, 2, 2,
+                   2, 2])""")
+        )
+
+        np.set_printoptions(linewidth=18, legacy='1.13')
+        assert_equal(
+            repr(a),
+            textwrap.dedent("""\
+            array([2, 2, 2, 2,
+                   2, 2, 2, 2])""")
+        )
+
+    def test_linewidth_str(self):
+        a = np.full(18, fill_value=2)
+        np.set_printoptions(linewidth=18)
+        assert_equal(
+            str(a),
+            textwrap.dedent("""\
+            [2 2 2 2 2 2 2 2
+             2 2 2 2 2 2 2 2
+             2 2]""")
+        )
+        np.set_printoptions(linewidth=18, legacy='1.13')
+        assert_equal(
+            str(a),
+            textwrap.dedent("""\
+            [2 2 2 2 2 2 2 2 2
+             2 2 2 2 2 2 2 2 2]""")
+        )
+
+    def test_edgeitems(self):
+        np.set_printoptions(edgeitems=1, threshold=1)
+        a = np.arange(27).reshape((3, 3, 3))
+        assert_equal(
+            repr(a),
+            textwrap.dedent("""\
+            array([[[ 0, ...,  2],
+                    ...,
+                    [ 6, ...,  8]],
+
+                   ...,
+
+                   [[18, ..., 20],
+                    ...,
+                    [24, ..., 26]]])""")
+        )
+
+        b = np.zeros((3, 3, 1, 1))
+        assert_equal(
+            repr(b),
+            textwrap.dedent("""\
+            array([[[[0.]],
+
+                    ...,
+
+                    [[0.]]],
+
+
+                   ...,
+
+
+                   [[[0.]],
+
+                    ...,
+
+                    [[0.]]]])""")
+        )
+
+        # 1.13 had extra trailing spaces, and was missing newlines
+        np.set_printoptions(legacy='1.13')
+
+        assert_equal(
+            repr(a),
+            textwrap.dedent("""\
+            array([[[ 0, ...,  2],
+                    ..., 
+                    [ 6, ...,  8]],
+
+                   ..., 
+                   [[18, ..., 20],
+                    ..., 
+                    [24, ..., 26]]])""")
+        )
+
+        assert_equal(
+            repr(b),
+            textwrap.dedent("""\
+            array([[[[ 0.]],
+
+                    ..., 
+                    [[ 0.]]],
+
+
+                   ..., 
+                   [[[ 0.]],
+
+                    ..., 
+                    [[ 0.]]]])""")
+        )
+
+    def test_edgeitems_structured(self):
+        np.set_printoptions(edgeitems=1, threshold=1)
+        A = np.arange(5*2*3, dtype=" np.finfo("f8").max:
+        yield param(np.finfo(np.longdouble).max, "float64",
+                    id="longdouble-to-f8")
+
+    # Cast to complex32:
+    yield param(2e300, "complex64", id="float-to-c8")
+    yield param(2e300+0j, "complex64", id="complex-to-c8")
+    yield param(2e300j, "complex64", id="complex-to-c8")
+    yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8")
+
+    # Invalid float to integer casts:
+    with np.errstate(over="ignore"):
+        for to_dt in np.typecodes["AllInteger"]:
+            for value in [np.inf, np.nan]:
+                for from_dt in np.typecodes["AllFloat"]:
+                    from_dt = np.dtype(from_dt)
+                    from_val = from_dt.type(value)
+
+                    yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}")
+
+
+def check_operations(dtype, value):
+    """
+    There are many dedicated paths in NumPy which cast and should check for
+    floating point errors which occurred during those casts.
+    """
+    if dtype.kind != 'i':
+        # These assignments use the stricter setitem logic:
+        def assignment():
+            arr = np.empty(3, dtype=dtype)
+            arr[0] = value
+
+        yield assignment
+
+        def fill():
+            arr = np.empty(3, dtype=dtype)
+            arr.fill(value)
+
+        yield fill
+
+    def copyto_scalar():
+        arr = np.empty(3, dtype=dtype)
+        np.copyto(arr, value, casting="unsafe")
+
+    yield copyto_scalar
+
+    def copyto():
+        arr = np.empty(3, dtype=dtype)
+        np.copyto(arr, np.array([value, value, value]), casting="unsafe")
+
+    yield copyto
+
+    def copyto_scalar_masked():
+        arr = np.empty(3, dtype=dtype)
+        np.copyto(arr, value, casting="unsafe",
+                  where=[True, False, True])
+
+    yield copyto_scalar_masked
+
+    def copyto_masked():
+        arr = np.empty(3, dtype=dtype)
+        np.copyto(arr, np.array([value, value, value]), casting="unsafe",
+                  where=[True, False, True])
+
+    yield copyto_masked
+
+    def direct_cast():
+        np.array([value, value, value]).astype(dtype)
+
+    yield direct_cast
+
+    def direct_cast_nd_strided():
+        arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :]
+        arr.astype(dtype)
+
+    yield direct_cast_nd_strided
+
+    def boolean_array_assignment():
+        arr = np.empty(3, dtype=dtype)
+        arr[[True, False, True]] = np.array([value, value])
+
+    yield boolean_array_assignment
+
+    def integer_array_assignment():
+        arr = np.empty(3, dtype=dtype)
+        values = np.array([value, value])
+
+        arr[[0, 1]] = values
+
+    yield integer_array_assignment
+
+    def integer_array_assignment_with_subspace():
+        arr = np.empty((5, 3), dtype=dtype)
+        values = np.array([value, value, value])
+
+        arr[[0, 2]] = values
+
+    yield integer_array_assignment_with_subspace
+
+    def flat_assignment():
+        arr = np.empty((3,), dtype=dtype)
+        values = np.array([value, value, value])
+        arr.flat[:] = values
+
+    yield flat_assignment
+
+@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes())
+@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
+def test_floatingpoint_errors_casting(dtype, value):
+    dtype = np.dtype(dtype)
+    for operation in check_operations(dtype, value):
+        dtype = np.dtype(dtype)
+
+        match = "invalid" if dtype.kind in 'iu' else "overflow"
+        with pytest.warns(RuntimeWarning, match=match):
+            operation()
+
+        with np.errstate(all="raise"):
+            with pytest.raises(FloatingPointError, match=match):
+                operation()
+
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.py
new file mode 100644
index 00000000..a49d876d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.py
@@ -0,0 +1,819 @@
+"""
+The tests exercise the casting machinery in a more low-level manner.
+The reason is mostly to test a new implementation of the casting machinery.
+
+Unlike most tests in NumPy, these are closer to unit-tests rather
+than integration tests.
+"""
+
+import pytest
+import textwrap
+import enum
+import random
+import ctypes
+
+import numpy as np
+from numpy.lib.stride_tricks import as_strided
+
+from numpy.testing import assert_array_equal
+from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
+
+
+# Simple skips object, parametric and long double (unsupported by struct)
+simple_dtypes = "?bhilqBHILQefdFD"
+if np.dtype("l").itemsize != np.dtype("q").itemsize:
+    # Remove l and L, the table was generated with 64bit linux in mind.
+    simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
+simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
+
+
+def simple_dtype_instances():
+    for dtype_class in simple_dtypes:
+        dt = dtype_class()
+        yield pytest.param(dt, id=str(dt))
+        if dt.byteorder != "|":
+            dt = dt.newbyteorder()
+            yield pytest.param(dt, id=str(dt))
+
+
+def get_expected_stringlength(dtype):
+    """Returns the string length when casting the basic dtypes to strings.
+    """
+    if dtype == np.bool_:
+        return 5
+    if dtype.kind in "iu":
+        if dtype.itemsize == 1:
+            length = 3
+        elif dtype.itemsize == 2:
+            length = 5
+        elif dtype.itemsize == 4:
+            length = 10
+        elif dtype.itemsize == 8:
+            length = 20
+        else:
+            raise AssertionError(f"did not find expected length for {dtype}")
+
+        if dtype.kind == "i":
+            length += 1  # adds one character for the sign
+
+        return length
+
+    # Note: Can't do dtype comparison for longdouble on windows
+    if dtype.char == "g":
+        return 48
+    elif dtype.char == "G":
+        return 48 * 2
+    elif dtype.kind == "f":
+        return 32  # also for half apparently.
+    elif dtype.kind == "c":
+        return 32 * 2
+
+    raise AssertionError(f"did not find expected length for {dtype}")
+
+
+class Casting(enum.IntEnum):
+    no = 0
+    equiv = 1
+    safe = 2
+    same_kind = 3
+    unsafe = 4
+
+
+def _get_cancast_table():
+    table = textwrap.dedent("""
+        X ? b h i l q B H I L Q e f d g F D G S U V O M m
+        ? # = = = = = = = = = = = = = = = = = = = = = . =
+        b . # = = = = . . . . . = = = = = = = = = = = . =
+        h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
+        i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
+        l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
+        q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
+        B . ~ = = = = # = = = = = = = = = = = = = = = . =
+        H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
+        I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
+        L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
+        Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
+        e . . . . . . . . . . . # = = = = = = = = = = . .
+        f . . . . . . . . . . . ~ # = = = = = = = = = . .
+        d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
+        g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
+        F . . . . . . . . . . . . . . . # = = = = = = . .
+        D . . . . . . . . . . . . . . . ~ # = = = = = . .
+        G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
+        S . . . . . . . . . . . . . . . . . . # = = = . .
+        U . . . . . . . . . . . . . . . . . . . # = = . .
+        V . . . . . . . . . . . . . . . . . . . . # = . .
+        O . . . . . . . . . . . . . . . . . . . . = # . .
+        M . . . . . . . . . . . . . . . . . . . . = = # .
+        m . . . . . . . . . . . . . . . . . . . . = = . #
+        """).strip().split("\n")
+    dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
+
+    convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
+                    "=": Casting.safe, "#": Casting.equiv,
+                    " ": -1}
+
+    cancast = {}
+    for from_dt, row in zip(dtypes, table[1:]):
+        cancast[from_dt] = {}
+        for to_dt, c in zip(dtypes, row[2::2]):
+            cancast[from_dt][to_dt] = convert_cast[c]
+
+    return cancast
+
+CAST_TABLE = _get_cancast_table()
+
+
+class TestChanges:
+    """
+    These test cases exercise some behaviour changes
+    """
+    @pytest.mark.parametrize("string", ["S", "U"])
+    @pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
+    def test_float_to_string(self, floating, string):
+        assert np.can_cast(floating, string)
+        # 100 is long enough to hold any formatted floating
+        assert np.can_cast(floating, f"{string}100")
+
+    def test_to_void(self):
+        # But in general, we do consider these safe:
+        assert np.can_cast("d", "V")
+        assert np.can_cast("S20", "V")
+
+        # Do not consider it a safe cast if the void is too smaller:
+        assert not np.can_cast("d", "V1")
+        assert not np.can_cast("S20", "V1")
+        assert not np.can_cast("U1", "V1")
+        # Structured to unstructured is just like any other:
+        assert np.can_cast("d,i", "V", casting="same_kind")
+        # Unstructured void to unstructured is actually no cast at all:
+        assert np.can_cast("V3", "V", casting="no")
+        assert np.can_cast("V0", "V", casting="no")
+
+
+class TestCasting:
+    size = 1500  # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
+
+    def get_data(self, dtype1, dtype2):
+        if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
+            length = self.size // dtype1.itemsize
+        else:
+            length = self.size // dtype2.itemsize
+
+        # Assume that the base array is well enough aligned for all inputs.
+        arr1 = np.empty(length, dtype=dtype1)
+        assert arr1.flags.c_contiguous
+        assert arr1.flags.aligned
+
+        values = [random.randrange(-128, 128) for _ in range(length)]
+
+        for i, value in enumerate(values):
+            # Use item assignment to ensure this is not using casting:
+            if value < 0 and dtype1.kind == "u":
+                # Manually rollover unsigned integers (-1 -> int.max)
+                value = value + np.iinfo(dtype1).max + 1
+            arr1[i] = value
+
+        if dtype2 is None:
+            if dtype1.char == "?":
+                values = [bool(v) for v in values]
+            return arr1, values
+
+        if dtype2.char == "?":
+            values = [bool(v) for v in values]
+
+        arr2 = np.empty(length, dtype=dtype2)
+        assert arr2.flags.c_contiguous
+        assert arr2.flags.aligned
+
+        for i, value in enumerate(values):
+            # Use item assignment to ensure this is not using casting:
+            if value < 0 and dtype2.kind == "u":
+                # Manually rollover unsigned integers (-1 -> int.max)
+                value = value + np.iinfo(dtype2).max + 1
+            arr2[i] = value
+
+        return arr1, arr2, values
+
+    def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
+        """
+        Returns a copy of arr1 that may be non-contiguous or unaligned, and a
+        matching array for arr2 (although not a copy).
+        """
+        if contig:
+            stride1 = arr1.dtype.itemsize
+            stride2 = arr2.dtype.itemsize
+        elif aligned:
+            stride1 = 2 * arr1.dtype.itemsize
+            stride2 = 2 * arr2.dtype.itemsize
+        else:
+            stride1 = arr1.dtype.itemsize + 1
+            stride2 = arr2.dtype.itemsize + 1
+
+        max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
+        max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
+        from_bytes = np.zeros(max_size1, dtype=np.uint8)
+        to_bytes = np.zeros(max_size2, dtype=np.uint8)
+
+        # Sanity check that the above is large enough:
+        assert stride1 * len(arr1) <= from_bytes.nbytes
+        assert stride2 * len(arr2) <= to_bytes.nbytes
+
+        if aligned:
+            new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
+                              arr1.shape, (stride1,))
+            new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
+                              arr2.shape, (stride2,))
+        else:
+            new1 = as_strided(from_bytes[1:].view(arr1.dtype),
+                              arr1.shape, (stride1,))
+            new2 = as_strided(to_bytes[1:].view(arr2.dtype),
+                              arr2.shape, (stride2,))
+
+        new1[...] = arr1
+
+        if not contig:
+            # Ensure we did not overwrite bytes that should not be written:
+            offset = arr1.dtype.itemsize if aligned else 0
+            buf = from_bytes[offset::stride1].tobytes()
+            assert buf.count(b"\0") == len(buf)
+
+        if contig:
+            assert new1.flags.c_contiguous
+            assert new2.flags.c_contiguous
+        else:
+            assert not new1.flags.c_contiguous
+            assert not new2.flags.c_contiguous
+
+        if aligned:
+            assert new1.flags.aligned
+            assert new2.flags.aligned
+        else:
+            assert not new1.flags.aligned or new1.dtype.alignment == 1
+            assert not new2.flags.aligned or new2.dtype.alignment == 1
+
+        return new1, new2
+
+    @pytest.mark.parametrize("from_Dt", simple_dtypes)
+    def test_simple_cancast(self, from_Dt):
+        for to_Dt in simple_dtypes:
+            cast = get_castingimpl(from_Dt, to_Dt)
+
+            for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
+                default = cast._resolve_descriptors((from_dt, None))[1][1]
+                assert default == to_Dt()
+                del default
+
+                for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
+                    casting, (from_res, to_res), view_off = (
+                            cast._resolve_descriptors((from_dt, to_dt)))
+                    assert(type(from_res) == from_Dt)
+                    assert(type(to_res) == to_Dt)
+                    if view_off is not None:
+                        # If a view is acceptable, this is "no" casting
+                        # and byte order must be matching.
+                        assert casting == Casting.no
+                        # The above table lists this as "equivalent"
+                        assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
+                        # Note that to_res may not be the same as from_dt
+                        assert from_res.isnative == to_res.isnative
+                    else:
+                        if from_Dt == to_Dt:
+                            # Note that to_res may not be the same as from_dt
+                            assert from_res.isnative != to_res.isnative
+                        assert casting == CAST_TABLE[from_Dt][to_Dt]
+
+                    if from_Dt is to_Dt:
+                        assert(from_dt is from_res)
+                        assert(to_dt is to_res)
+
+
+    @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
+    @pytest.mark.parametrize("from_dt", simple_dtype_instances())
+    def test_simple_direct_casts(self, from_dt):
+        """
+        This test checks numeric direct casts for dtypes supported also by the
+        struct module (plus complex).  It tries to be test a wide range of
+        inputs, but skips over possibly undefined behaviour (e.g. int rollover).
+        Longdouble and CLongdouble are tested, but only using double precision.
+
+        If this test creates issues, it should possibly just be simplified
+        or even removed (checking whether unaligned/non-contiguous casts give
+        the same results is useful, though).
+        """
+        for to_dt in simple_dtype_instances():
+            to_dt = to_dt.values[0]
+            cast = get_castingimpl(type(from_dt), type(to_dt))
+
+            casting, (from_res, to_res), view_off = cast._resolve_descriptors(
+                (from_dt, to_dt))
+
+            if from_res is not from_dt or to_res is not to_dt:
+                # Do not test this case, it is handled in multiple steps,
+                # each of which should is tested individually.
+                return
+
+            safe = casting <= Casting.safe
+            del from_res, to_res, casting
+
+            arr1, arr2, values = self.get_data(from_dt, to_dt)
+
+            cast._simple_strided_call((arr1, arr2))
+
+            # Check via python list
+            assert arr2.tolist() == values
+
+            # Check that the same results are achieved for strided loops
+            arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
+            cast._simple_strided_call((arr1_o, arr2_o))
+
+            assert_array_equal(arr2_o, arr2)
+            assert arr2_o.tobytes() == arr2.tobytes()
+
+            # Check if alignment makes a difference, but only if supported
+            # and only if the alignment can be wrong
+            if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
+                    not cast._supports_unaligned):
+                return
+
+            arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
+            cast._simple_strided_call((arr1_o, arr2_o))
+
+            assert_array_equal(arr2_o, arr2)
+            assert arr2_o.tobytes() == arr2.tobytes()
+
+            arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
+            cast._simple_strided_call((arr1_o, arr2_o))
+
+            assert_array_equal(arr2_o, arr2)
+            assert arr2_o.tobytes() == arr2.tobytes()
+
+            del arr1_o, arr2_o, cast
+
+    @pytest.mark.parametrize("from_Dt", simple_dtypes)
+    def test_numeric_to_times(self, from_Dt):
+        # We currently only implement contiguous loops, so only need to
+        # test those.
+        from_dt = from_Dt()
+
+        time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
+                       np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
+        for time_dt in time_dtypes:
+            cast = get_castingimpl(type(from_dt), type(time_dt))
+
+            casting, (from_res, to_res), view_off = cast._resolve_descriptors(
+                (from_dt, time_dt))
+
+            assert from_res is from_dt
+            assert to_res is time_dt
+            del from_res, to_res
+
+            assert casting & CAST_TABLE[from_Dt][type(time_dt)]
+            assert view_off is None
+
+            int64_dt = np.dtype(np.int64)
+            arr1, arr2, values = self.get_data(from_dt, int64_dt)
+            arr2 = arr2.view(time_dt)
+            arr2[...] = np.datetime64("NaT")
+
+            if time_dt == np.dtype("M8"):
+                # This is a bit of a strange path, and could probably be removed
+                arr1[-1] = 0  # ensure at least one value is not NaT
+
+                # The cast currently succeeds, but the values are invalid:
+                cast._simple_strided_call((arr1, arr2))
+                with pytest.raises(ValueError):
+                    str(arr2[-1])  # e.g. conversion to string fails
+                return
+
+            cast._simple_strided_call((arr1, arr2))
+
+            assert [int(v) for v in arr2.tolist()] == values
+
+            # Check that the same results are achieved for strided loops
+            arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
+            cast._simple_strided_call((arr1_o, arr2_o))
+
+            assert_array_equal(arr2_o, arr2)
+            assert arr2_o.tobytes() == arr2.tobytes()
+
+    @pytest.mark.parametrize(
+            ["from_dt", "to_dt", "expected_casting", "expected_view_off",
+             "nom", "denom"],
+            [("M8[ns]", None, Casting.no, 0, 1, 1),
+             (str(np.dtype("M8[ns]").newbyteorder()), None,
+                  Casting.equiv, None, 1, 1),
+             ("M8", "M8[ms]", Casting.safe, 0, 1, 1),
+             # should be invalid cast:
+             ("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
+             ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
+             ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
+             ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
+             ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
+             ("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
+                  # give full values based on NumPy 1.19.x
+                  [-2**63, 0, -1, 1314, -1315, 564442610]),
+             ("m8[ns]", None, Casting.no, 0, 1, 1),
+             (str(np.dtype("m8[ns]").newbyteorder()), None,
+                  Casting.equiv, None, 1, 1),
+             ("m8", "m8[ms]", Casting.safe, 0, 1, 1),
+             # should be invalid cast:
+             ("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
+             ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
+             ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
+             ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
+             ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
+             ("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
+                  # give full values based on NumPy 1.19.x
+                  [-2**63, 0, 0, 1314, -1315, 564442610])])
+    def test_time_to_time(self, from_dt, to_dt,
+                          expected_casting, expected_view_off,
+                          nom, denom):
+        from_dt = np.dtype(from_dt)
+        if to_dt is not None:
+            to_dt = np.dtype(to_dt)
+
+        # Test a few values for casting (results generated with NumPy 1.19)
+        values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
+        values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
+        assert values.dtype.byteorder == from_dt.byteorder
+        assert np.isnat(values.view(from_dt)[0])
+
+        DType = type(from_dt)
+        cast = get_castingimpl(DType, DType)
+        casting, (from_res, to_res), view_off = cast._resolve_descriptors(
+                (from_dt, to_dt))
+        assert from_res is from_dt
+        assert to_res is to_dt or to_dt is None
+        assert casting == expected_casting
+        assert view_off == expected_view_off
+
+        if nom is not None:
+            expected_out = (values * nom // denom).view(to_res)
+            expected_out[0] = "NaT"
+        else:
+            expected_out = np.empty_like(values)
+            expected_out[...] = denom
+            expected_out = expected_out.view(to_dt)
+
+        orig_arr = values.view(from_dt)
+        orig_out = np.empty_like(expected_out)
+
+        if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
+            # Casting from non-generic to generic units is an error and should
+            # probably be reported as an invalid cast earlier.
+            with pytest.raises(ValueError):
+                cast._simple_strided_call((orig_arr, orig_out))
+            return
+
+        for aligned in [True, True]:
+            for contig in [True, True]:
+                arr, out = self.get_data_variation(
+                        orig_arr, orig_out, aligned, contig)
+                out[...] = 0
+                cast._simple_strided_call((arr, out))
+                assert_array_equal(out.view("int64"), expected_out.view("int64"))
+
+    def string_with_modified_length(self, dtype, change_length):
+        fact = 1 if dtype.char == "S" else 4
+        length = dtype.itemsize // fact + change_length
+        return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
+
+    @pytest.mark.parametrize("other_DT", simple_dtypes)
+    @pytest.mark.parametrize("string_char", ["S", "U"])
+    def test_string_cancast(self, other_DT, string_char):
+        fact = 1 if string_char == "S" else 4
+
+        string_DT = type(np.dtype(string_char))
+        cast = get_castingimpl(other_DT, string_DT)
+
+        other_dt = other_DT()
+        expected_length = get_expected_stringlength(other_dt)
+        string_dt = np.dtype(f"{string_char}{expected_length}")
+
+        safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
+                (other_dt, None))
+        assert res_dt.itemsize == expected_length * fact
+        assert safety == Casting.safe  # we consider to string casts "safe"
+        assert view_off is None
+        assert isinstance(res_dt, string_DT)
+
+        # These casts currently implement changing the string length, so
+        # check the cast-safety for too long/fixed string lengths:
+        for change_length in [-1, 0, 1]:
+            if change_length >= 0:
+                expected_safety = Casting.safe
+            else:
+                expected_safety = Casting.same_kind
+
+            to_dt = self.string_with_modified_length(string_dt, change_length)
+            safety, (_, res_dt), view_off = cast._resolve_descriptors(
+                    (other_dt, to_dt))
+            assert res_dt is to_dt
+            assert safety == expected_safety
+            assert view_off is None
+
+        # The opposite direction is always considered unsafe:
+        cast = get_castingimpl(string_DT, other_DT)
+
+        safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
+        assert safety == Casting.unsafe
+        assert view_off is None
+
+        cast = get_castingimpl(string_DT, other_DT)
+        safety, (_, res_dt), view_off = cast._resolve_descriptors(
+            (string_dt, None))
+        assert safety == Casting.unsafe
+        assert view_off is None
+        assert other_dt is res_dt  # returns the singleton for simple dtypes
+
+    @pytest.mark.parametrize("string_char", ["S", "U"])
+    @pytest.mark.parametrize("other_dt", simple_dtype_instances())
+    def test_simple_string_casts_roundtrip(self, other_dt, string_char):
+        """
+        Tests casts from and to string by checking the roundtripping property.
+
+        The test also covers some string to string casts (but not all).
+
+        If this test creates issues, it should possibly just be simplified
+        or even removed (checking whether unaligned/non-contiguous casts give
+        the same results is useful, though).
+        """
+        string_DT = type(np.dtype(string_char))
+
+        cast = get_castingimpl(type(other_dt), string_DT)
+        cast_back = get_castingimpl(string_DT, type(other_dt))
+        _, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
+                (other_dt, None))
+
+        if res_other_dt is not other_dt:
+            # do not support non-native byteorder, skip test in that case
+            assert other_dt.byteorder != res_other_dt.byteorder
+            return
+
+        orig_arr, values = self.get_data(other_dt, None)
+        str_arr = np.zeros(len(orig_arr), dtype=string_dt)
+        string_dt_short = self.string_with_modified_length(string_dt, -1)
+        str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
+        string_dt_long = self.string_with_modified_length(string_dt, 1)
+        str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
+
+        assert not cast._supports_unaligned  # if support is added, should test
+        assert not cast_back._supports_unaligned
+
+        for contig in [True, False]:
+            other_arr, str_arr = self.get_data_variation(
+                orig_arr, str_arr, True, contig)
+            _, str_arr_short = self.get_data_variation(
+                orig_arr, str_arr_short.copy(), True, contig)
+            _, str_arr_long = self.get_data_variation(
+                orig_arr, str_arr_long, True, contig)
+
+            cast._simple_strided_call((other_arr, str_arr))
+
+            cast._simple_strided_call((other_arr, str_arr_short))
+            assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
+
+            cast._simple_strided_call((other_arr, str_arr_long))
+            assert_array_equal(str_arr, str_arr_long)
+
+            if other_dt.kind == "b":
+                # Booleans do not roundtrip
+                continue
+
+            other_arr[...] = 0
+            cast_back._simple_strided_call((str_arr, other_arr))
+            assert_array_equal(orig_arr, other_arr)
+
+            other_arr[...] = 0
+            cast_back._simple_strided_call((str_arr_long, other_arr))
+            assert_array_equal(orig_arr, other_arr)
+
+    @pytest.mark.parametrize("other_dt", ["S8", "U8"])
+    @pytest.mark.parametrize("string_char", ["S", "U"])
+    def test_string_to_string_cancast(self, other_dt, string_char):
+        other_dt = np.dtype(other_dt)
+
+        fact = 1 if string_char == "S" else 4
+        div = 1 if other_dt.char == "S" else 4
+
+        string_DT = type(np.dtype(string_char))
+        cast = get_castingimpl(type(other_dt), string_DT)
+
+        expected_length = other_dt.itemsize // div
+        string_dt = np.dtype(f"{string_char}{expected_length}")
+
+        safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
+                (other_dt, None))
+        assert res_dt.itemsize == expected_length * fact
+        assert isinstance(res_dt, string_DT)
+
+        expected_view_off = None
+        if other_dt.char == string_char:
+            if other_dt.isnative:
+                expected_safety = Casting.no
+                expected_view_off = 0
+            else:
+                expected_safety = Casting.equiv
+        elif string_char == "U":
+            expected_safety = Casting.safe
+        else:
+            expected_safety = Casting.unsafe
+
+        assert view_off == expected_view_off
+        assert expected_safety == safety
+
+        for change_length in [-1, 0, 1]:
+            to_dt = self.string_with_modified_length(string_dt, change_length)
+            safety, (_, res_dt), view_off = cast._resolve_descriptors(
+                    (other_dt, to_dt))
+
+            assert res_dt is to_dt
+            if change_length <= 0:
+                assert view_off == expected_view_off
+            else:
+                assert view_off is None
+            if expected_safety == Casting.unsafe:
+                assert safety == expected_safety
+            elif change_length < 0:
+                assert safety == Casting.same_kind
+            elif change_length == 0:
+                assert safety == expected_safety
+            elif change_length > 0:
+                assert safety == Casting.safe
+
+    @pytest.mark.parametrize("order1", [">", "<"])
+    @pytest.mark.parametrize("order2", [">", "<"])
+    def test_unicode_byteswapped_cast(self, order1, order2):
+        # Very specific tests (not using the castingimpl directly)
+        # that tests unicode bytedwaps including for unaligned array data.
+        dtype1 = np.dtype(f"{order1}U30")
+        dtype2 = np.dtype(f"{order2}U30")
+        data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
+        data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
+        if dtype1.alignment != 1:
+            # alignment should always be >1, but skip the check if not
+            assert not data1.flags.aligned
+            assert not data2.flags.aligned
+
+        element = "this is a ünicode string‽"
+        data1[()] = element
+        # Test both `data1` and `data1.copy()`  (which should be aligned)
+        for data in [data1, data1.copy()]:
+            data2[...] = data1
+            assert data2[()] == element
+            assert data2.copy()[()] == element
+
+    def test_void_to_string_special_case(self):
+        # Cover a small special case in void to string casting that could
+        # probably just as well be turned into an error (compare
+        # `test_object_to_parametric_internal_error` below).
+        assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
+        assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
+
+    def test_object_to_parametric_internal_error(self):
+        # We reject casting from object to a parametric type, without
+        # figuring out the correct instance first.
+        object_dtype = type(np.dtype(object))
+        other_dtype = type(np.dtype(str))
+        cast = get_castingimpl(object_dtype, other_dtype)
+        with pytest.raises(TypeError,
+                    match="casting from object to the parametric DType"):
+            cast._resolve_descriptors((np.dtype("O"), None))
+
+    @pytest.mark.parametrize("dtype", simple_dtype_instances())
+    def test_object_and_simple_resolution(self, dtype):
+        # Simple test to exercise the cast when no instance is specified
+        object_dtype = type(np.dtype(object))
+        cast = get_castingimpl(object_dtype, type(dtype))
+
+        safety, (_, res_dt), view_off = cast._resolve_descriptors(
+                (np.dtype("O"), dtype))
+        assert safety == Casting.unsafe
+        assert view_off is None
+        assert res_dt is dtype
+
+        safety, (_, res_dt), view_off = cast._resolve_descriptors(
+                (np.dtype("O"), None))
+        assert safety == Casting.unsafe
+        assert view_off is None
+        assert res_dt == dtype.newbyteorder("=")
+
+    @pytest.mark.parametrize("dtype", simple_dtype_instances())
+    def test_simple_to_object_resolution(self, dtype):
+        # Simple test to exercise the cast when no instance is specified
+        object_dtype = type(np.dtype(object))
+        cast = get_castingimpl(type(dtype), object_dtype)
+
+        safety, (_, res_dt), view_off = cast._resolve_descriptors(
+                (dtype, None))
+        assert safety == Casting.safe
+        assert view_off is None
+        assert res_dt is np.dtype("O")
+
+    @pytest.mark.parametrize("casting", ["no", "unsafe"])
+    def test_void_and_structured_with_subarray(self, casting):
+        # test case corresponding to gh-19325
+        dtype = np.dtype([("foo", " casts may succeed or fail, but a NULL'ed array must
+        # behave the same as one filled with None's.
+        arr_normal = np.array([None] * 5)
+        arr_NULLs = np.empty_like(arr_normal)
+        ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes)
+        # If the check fails (maybe it should) the test would lose its purpose:
+        assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
+
+        try:
+            expected = arr_normal.astype(dtype)
+        except TypeError:
+            with pytest.raises(TypeError):
+                arr_NULLs.astype(dtype),
+        else:
+            assert_array_equal(expected, arr_NULLs.astype(dtype))
+
+    @pytest.mark.parametrize("dtype",
+            np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
+    def test_nonstandard_bool_to_other(self, dtype):
+        # simple test for casting bool_ to numeric types, which should not
+        # expose the detail that NumPy bools can sometimes take values other
+        # than 0 and 1.  See also gh-19514.
+        nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool)
+        res = nonstandard_bools.astype(dtype)
+        expected = [0, 1, 1]
+        assert_array_equal(res, expected)
+
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py
new file mode 100644
index 00000000..c602eba4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py
@@ -0,0 +1,208 @@
+"""
+Tests for numpy/core/src/multiarray/conversion_utils.c
+"""
+import re
+import sys
+
+import pytest
+
+import numpy as np
+import numpy.core._multiarray_tests as mt
+from numpy.testing import assert_warns, IS_PYPY
+
+
+class StringConverterTestCase:
+    allow_bytes = True
+    case_insensitive = True
+    exact_match = False
+    warn = True
+
+    def _check_value_error(self, val):
+        pattern = r'\(got {}\)'.format(re.escape(repr(val)))
+        with pytest.raises(ValueError, match=pattern) as exc:
+            self.conv(val)
+
+    def _check_conv_assert_warn(self, val, expected):
+        if self.warn:
+            with assert_warns(DeprecationWarning) as exc:
+                assert self.conv(val) == expected
+        else:
+            assert self.conv(val) == expected
+
+    def _check(self, val, expected):
+        """Takes valid non-deprecated inputs for converters,
+        runs converters on inputs, checks correctness of outputs,
+        warnings and errors"""
+        assert self.conv(val) == expected
+
+        if self.allow_bytes:
+            assert self.conv(val.encode('ascii')) == expected
+        else:
+            with pytest.raises(TypeError):
+                self.conv(val.encode('ascii'))
+
+        if len(val) != 1:
+            if self.exact_match:
+                self._check_value_error(val[:1])
+                self._check_value_error(val + '\0')
+            else:
+                self._check_conv_assert_warn(val[:1], expected)
+
+        if self.case_insensitive:
+            if val != val.lower():
+                self._check_conv_assert_warn(val.lower(), expected)
+            if val != val.upper():
+                self._check_conv_assert_warn(val.upper(), expected)
+        else:
+            if val != val.lower():
+                self._check_value_error(val.lower())
+            if val != val.upper():
+                self._check_value_error(val.upper())
+
+    def test_wrong_type(self):
+        # common cases which apply to all the below
+        with pytest.raises(TypeError):
+            self.conv({})
+        with pytest.raises(TypeError):
+            self.conv([])
+
+    def test_wrong_value(self):
+        # nonsense strings
+        self._check_value_error('')
+        self._check_value_error('\N{greek small letter pi}')
+
+        if self.allow_bytes:
+            self._check_value_error(b'')
+            # bytes which can't be converted to strings via utf8
+            self._check_value_error(b"\xFF")
+        if self.exact_match:
+            self._check_value_error("there's no way this is supported")
+
+
+class TestByteorderConverter(StringConverterTestCase):
+    """ Tests of PyArray_ByteorderConverter """
+    conv = mt.run_byteorder_converter
+    warn = False
+
+    def test_valid(self):
+        for s in ['big', '>']:
+            self._check(s, 'NPY_BIG')
+        for s in ['little', '<']:
+            self._check(s, 'NPY_LITTLE')
+        for s in ['native', '=']:
+            self._check(s, 'NPY_NATIVE')
+        for s in ['ignore', '|']:
+            self._check(s, 'NPY_IGNORE')
+        for s in ['swap']:
+            self._check(s, 'NPY_SWAP')
+
+
+class TestSortkindConverter(StringConverterTestCase):
+    """ Tests of PyArray_SortkindConverter """
+    conv = mt.run_sortkind_converter
+    warn = False
+
+    def test_valid(self):
+        self._check('quicksort', 'NPY_QUICKSORT')
+        self._check('heapsort', 'NPY_HEAPSORT')
+        self._check('mergesort', 'NPY_STABLESORT')  # alias
+        self._check('stable', 'NPY_STABLESORT')
+
+
+class TestSelectkindConverter(StringConverterTestCase):
+    """ Tests of PyArray_SelectkindConverter """
+    conv = mt.run_selectkind_converter
+    case_insensitive = False
+    exact_match = True
+
+    def test_valid(self):
+        self._check('introselect', 'NPY_INTROSELECT')
+
+
+class TestSearchsideConverter(StringConverterTestCase):
+    """ Tests of PyArray_SearchsideConverter """
+    conv = mt.run_searchside_converter
+    def test_valid(self):
+        self._check('left', 'NPY_SEARCHLEFT')
+        self._check('right', 'NPY_SEARCHRIGHT')
+
+
+class TestOrderConverter(StringConverterTestCase):
+    """ Tests of PyArray_OrderConverter """
+    conv = mt.run_order_converter
+    warn = False
+
+    def test_valid(self):
+        self._check('c', 'NPY_CORDER')
+        self._check('f', 'NPY_FORTRANORDER')
+        self._check('a', 'NPY_ANYORDER')
+        self._check('k', 'NPY_KEEPORDER')
+
+    def test_flatten_invalid_order(self):
+        # invalid after gh-14596
+        with pytest.raises(ValueError):
+            self.conv('Z')
+        for order in [False, True, 0, 8]:
+            with pytest.raises(TypeError):
+                self.conv(order)
+
+
+class TestClipmodeConverter(StringConverterTestCase):
+    """ Tests of PyArray_ClipmodeConverter """
+    conv = mt.run_clipmode_converter
+    def test_valid(self):
+        self._check('clip', 'NPY_CLIP')
+        self._check('wrap', 'NPY_WRAP')
+        self._check('raise', 'NPY_RAISE')
+
+        # integer values allowed here
+        assert self.conv(np.CLIP) == 'NPY_CLIP'
+        assert self.conv(np.WRAP) == 'NPY_WRAP'
+        assert self.conv(np.RAISE) == 'NPY_RAISE'
+
+
+class TestCastingConverter(StringConverterTestCase):
+    """ Tests of PyArray_CastingConverter """
+    conv = mt.run_casting_converter
+    case_insensitive = False
+    exact_match = True
+
+    def test_valid(self):
+        self._check("no", "NPY_NO_CASTING")
+        self._check("equiv", "NPY_EQUIV_CASTING")
+        self._check("safe", "NPY_SAFE_CASTING")
+        self._check("same_kind", "NPY_SAME_KIND_CASTING")
+        self._check("unsafe", "NPY_UNSAFE_CASTING")
+
+
+class TestIntpConverter:
+    """ Tests of PyArray_IntpConverter """
+    conv = mt.run_intp_converter
+
+    def test_basic(self):
+        assert self.conv(1) == (1,)
+        assert self.conv((1, 2)) == (1, 2)
+        assert self.conv([1, 2]) == (1, 2)
+        assert self.conv(()) == ()
+
+    def test_none(self):
+        # once the warning expires, this will raise TypeError
+        with pytest.warns(DeprecationWarning):
+            assert self.conv(None) == ()
+
+    @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+            reason="PyPy bug in error formatting")
+    def test_float(self):
+        with pytest.raises(TypeError):
+            self.conv(1.0)
+        with pytest.raises(TypeError):
+            self.conv([1, 1.0])
+
+    def test_too_large(self):
+        with pytest.raises(ValueError):
+            self.conv(2**64)
+
+    def test_too_many_dims(self):
+        assert self.conv([1]*32) == (1,)*32
+        with pytest.raises(ValueError):
+            self.conv([1]*33)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.py
new file mode 100644
index 00000000..41a60d5c
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.py
@@ -0,0 +1,43 @@
+from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+from numpy.core import _umath_tests
+from numpy.testing import assert_equal
+
+def test_dispatcher():
+    """
+    Testing the utilities of the CPU dispatcher
+    """
+    targets = (
+        "SSE2", "SSE41", "AVX2",
+        "VSX", "VSX2", "VSX3",
+        "NEON", "ASIMD", "ASIMDHP",
+        "VX", "VXE"
+    )
+    highest_sfx = "" # no suffix for the baseline
+    all_sfx = []
+    for feature in reversed(targets):
+        # skip baseline features, by the default `CCompilerOpt` do not generate separated objects
+        # for the baseline,  just one object combined all of them via 'baseline' option
+        # within the configuration statements.
+        if feature in __cpu_baseline__:
+            continue
+        # check compiler and running machine support
+        if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
+            continue
+
+        if not highest_sfx:
+            highest_sfx = "_" + feature
+        all_sfx.append("func" + "_" + feature)
+
+    test = _umath_tests.test_dispatch()
+    assert_equal(test["func"], "func" + highest_sfx)
+    assert_equal(test["var"], "var"  + highest_sfx)
+
+    if highest_sfx:
+        assert_equal(test["func_xb"], "func" + highest_sfx)
+        assert_equal(test["var_xb"], "var"  + highest_sfx)
+    else:
+        assert_equal(test["func_xb"], "nobase")
+        assert_equal(test["var_xb"], "nobase")
+
+    all_sfx.append("func") # add the baseline
+    assert_equal(test["all"], all_sfx)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.py
new file mode 100644
index 00000000..48ab30a4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.py
@@ -0,0 +1,404 @@
+import sys, platform, re, pytest
+from numpy.core._multiarray_umath import (
+    __cpu_features__,
+    __cpu_baseline__,
+    __cpu_dispatch__,
+)
+import numpy as np
+import subprocess
+import pathlib
+import os
+import re
+
+def assert_features_equal(actual, desired, fname):
+    __tracebackhide__ = True  # Hide traceback for py.test
+    actual, desired = str(actual), str(desired)
+    if actual == desired:
+        return
+    detected = str(__cpu_features__).replace("'", "")
+    try:
+        with open("/proc/cpuinfo") as fd:
+            cpuinfo = fd.read(2048)
+    except Exception as err:
+        cpuinfo = str(err)
+
+    try:
+        import subprocess
+        auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
+        auxv = auxv.decode()
+    except Exception as err:
+        auxv = str(err)
+
+    import textwrap
+    error_report = textwrap.indent(
+"""
+###########################################
+### Extra debugging information
+###########################################
+-------------------------------------------
+--- NumPy Detections
+-------------------------------------------
+%s
+-------------------------------------------
+--- SYS / CPUINFO
+-------------------------------------------
+%s....
+-------------------------------------------
+--- SYS / AUXV
+-------------------------------------------
+%s
+""" % (detected, cpuinfo, auxv), prefix='\r')
+
+    raise AssertionError((
+        "Failure Detection\n"
+        " NAME: '%s'\n"
+        " ACTUAL: %s\n"
+        " DESIRED: %s\n"
+        "%s"
+    ) % (fname, actual, desired, error_report))
+
+def _text_to_list(txt):
+    out = txt.strip("][\n").replace("'", "").split(', ')
+    return None if out[0] == "" else out
+
+class AbstractTest:
+    features = []
+    features_groups = {}
+    features_map = {}
+    features_flags = set()
+
+    def load_flags(self):
+        # a hook
+        pass
+    def test_features(self):
+        self.load_flags()
+        for gname, features in self.features_groups.items():
+            test_features = [self.cpu_have(f) for f in features]
+            assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
+
+        for feature_name in self.features:
+            cpu_have = self.cpu_have(feature_name)
+            npy_have = __cpu_features__.get(feature_name)
+            assert_features_equal(npy_have, cpu_have, feature_name)
+
+    def cpu_have(self, feature_name):
+        map_names = self.features_map.get(feature_name, feature_name)
+        if isinstance(map_names, str):
+            return map_names in self.features_flags
+        for f in map_names:
+            if f in self.features_flags:
+                return True
+        return False
+
+    def load_flags_cpuinfo(self, magic_key):
+        self.features_flags = self.get_cpuinfo_item(magic_key)
+
+    def get_cpuinfo_item(self, magic_key):
+        values = set()
+        with open('/proc/cpuinfo') as fd:
+            for line in fd:
+                if not line.startswith(magic_key):
+                    continue
+                flags_value = [s.strip() for s in line.split(':', 1)]
+                if len(flags_value) == 2:
+                    values = values.union(flags_value[1].upper().split())
+        return values
+
+    def load_flags_auxv(self):
+        auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
+        for at in auxv.split(b'\n'):
+            if not at.startswith(b"AT_HWCAP"):
+                continue
+            hwcap_value = [s.strip() for s in at.split(b':', 1)]
+            if len(hwcap_value) == 2:
+                self.features_flags = self.features_flags.union(
+                    hwcap_value[1].upper().decode().split()
+                )
+
+@pytest.mark.skipif(
+    sys.platform == 'emscripten',
+    reason= (
+        "The subprocess module is not available on WASM platforms and"
+        " therefore this test class cannot be properly executed."
+    ),
+)
+class TestEnvPrivation:
+    cwd = pathlib.Path(__file__).parent.resolve()
+    env = os.environ.copy()
+    _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None)
+    _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None)
+    SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True)
+    unavailable_feats = [
+        feat for feat in __cpu_dispatch__ if not __cpu_features__[feat]
+    ]
+    UNAVAILABLE_FEAT = (
+        None if len(unavailable_feats) == 0
+        else unavailable_feats[0]
+    )
+    BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0]
+    SCRIPT = """
+def main():
+    from numpy.core._multiarray_umath import __cpu_features__, __cpu_dispatch__
+
+    detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]]
+    print(detected)
+
+if __name__ == "__main__":
+    main()
+    """
+
+    @pytest.fixture(autouse=True)
+    def setup_class(self, tmp_path_factory):
+        file = tmp_path_factory.mktemp("runtime_test_script")
+        file /= "_runtime_detect.py"
+        file.write_text(self.SCRIPT)
+        self.file = file
+        return
+
+    def _run(self):
+        return subprocess.run(
+            [sys.executable, self.file],
+            env=self.env,
+            **self.SUBPROCESS_ARGS,
+            )
+
+    # Helper function mimicing pytest.raises for subprocess call
+    def _expect_error(
+        self,
+        msg,
+        err_type,
+        no_error_msg="Failed to generate error"
+    ):
+        try:
+            self._run()
+        except subprocess.CalledProcessError as e:
+            assertion_message = f"Expected: {msg}\nGot: {e.stderr}"
+            assert re.search(msg, e.stderr), assertion_message
+
+            assertion_message = (
+                f"Expected error of type: {err_type}; see full "
+                f"error:\n{e.stderr}"
+            )
+            assert re.search(err_type, e.stderr), assertion_message
+        else:
+            assert False, no_error_msg
+
+    def setup_method(self):
+        """Ensure that the environment is reset"""
+        self.env = os.environ.copy()
+        return
+
+    def test_runtime_feature_selection(self):
+        """
+        Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the
+        features exactly specified are dispatched.
+        """
+
+        # Capture runtime-enabled features
+        out = self._run()
+        non_baseline_features = _text_to_list(out.stdout)
+
+        if non_baseline_features is None:
+            pytest.skip(
+                "No dispatchable features outside of baseline detected."
+            )
+        feature = non_baseline_features[0]
+
+        # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
+        # specified
+        self.env['NPY_ENABLE_CPU_FEATURES'] = feature
+        out = self._run()
+        enabled_features = _text_to_list(out.stdout)
+
+        # Ensure that only one feature is enabled, and it is exactly the one
+        # specified by `NPY_ENABLE_CPU_FEATURES`
+        assert set(enabled_features) == {feature}
+
+        if len(non_baseline_features) < 2:
+            pytest.skip("Only one non-baseline feature detected.")
+        # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
+        # specified
+        self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features)
+        out = self._run()
+        enabled_features = _text_to_list(out.stdout)
+
+        # Ensure that both features are enabled, and they are exactly the ones
+        # specified by `NPY_ENABLE_CPU_FEATURES`
+        assert set(enabled_features) == set(non_baseline_features)
+        return
+
+    @pytest.mark.parametrize("enabled, disabled",
+    [
+        ("feature", "feature"),
+        ("feature", "same"),
+    ])
+    def test_both_enable_disable_set(self, enabled, disabled):
+        """
+        Ensure that when both environment variables are set then an
+        ImportError is thrown
+        """
+        self.env['NPY_ENABLE_CPU_FEATURES'] = enabled
+        self.env['NPY_DISABLE_CPU_FEATURES'] = disabled
+        msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES"
+        err_type = "ImportError"
+        self._expect_error(msg, err_type)
+
+    @pytest.mark.skipif(
+        not __cpu_dispatch__,
+        reason=(
+            "NPY_*_CPU_FEATURES only parsed if "
+            "`__cpu_dispatch__` is non-empty"
+        )
+    )
+    @pytest.mark.parametrize("action", ["ENABLE", "DISABLE"])
+    def test_variable_too_long(self, action):
+        """
+        Test that an error is thrown if the environment variables are too long
+        to be processed. Current limit is 1024, but this may change later.
+        """
+        MAX_VAR_LENGTH = 1024
+        # Actual length is MAX_VAR_LENGTH + 1 due to null-termination
+        self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH
+        msg = (
+            f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is "
+            f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted"
+        )
+        err_type = "RuntimeError"
+        self._expect_error(msg, err_type)
+
+    @pytest.mark.skipif(
+        not __cpu_dispatch__,
+        reason=(
+            "NPY_*_CPU_FEATURES only parsed if "
+            "`__cpu_dispatch__` is non-empty"
+        )
+    )
+    def test_impossible_feature_disable(self):
+        """
+        Test that a RuntimeError is thrown if an impossible feature-disabling
+        request is made. This includes disabling a baseline feature.
+        """
+
+        if self.BASELINE_FEAT is None:
+            pytest.skip("There are no unavailable features to test with")
+        bad_feature = self.BASELINE_FEAT
+        self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature
+        msg = (
+            f"You cannot disable CPU feature '{bad_feature}', since it is "
+            "part of the baseline optimizations"
+        )
+        err_type = "RuntimeError"
+        self._expect_error(msg, err_type)
+
+    def test_impossible_feature_enable(self):
+        """
+        Test that a RuntimeError is thrown if an impossible feature-enabling
+        request is made. This includes enabling a feature not supported by the
+        machine, or disabling a baseline optimization.
+        """
+
+        if self.UNAVAILABLE_FEAT is None:
+            pytest.skip("There are no unavailable features to test with")
+        bad_feature = self.UNAVAILABLE_FEAT
+        self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature
+        msg = (
+            f"You cannot enable CPU features \\({bad_feature}\\), since "
+            "they are not supported by your machine."
+        )
+        err_type = "RuntimeError"
+        self._expect_error(msg, err_type)
+
+        # Ensure that only the bad feature gets reported
+        feats = f"{bad_feature}, {self.BASELINE_FEAT}"
+        self.env['NPY_ENABLE_CPU_FEATURES'] = feats
+        msg = (
+            f"You cannot enable CPU features \\({bad_feature}\\), since they "
+            "are not supported by your machine."
+        )
+        self._expect_error(msg, err_type)
+
+is_linux = sys.platform.startswith('linux')
+is_cygwin = sys.platform.startswith('cygwin')
+machine  = platform.machine()
+is_x86   = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
+@pytest.mark.skipif(
+    not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
+)
+class Test_X86_Features(AbstractTest):
+    features = [
+        "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
+        "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
+        "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
+        "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
+        "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16",
+    ]
+    features_groups = dict(
+        AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
+        AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
+                      "AVX5124VNNIW", "AVX512VPOPCNTDQ"],
+        AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
+        AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
+        AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
+                      "AVX512VBMI"],
+        AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
+                      "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
+        AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ",
+                      "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI",
+                      "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ",
+                      "AVX512FP16"],
+    )
+    features_map = dict(
+        SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
+        AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
+        AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
+        AVX512FP16="AVX512_FP16",
+    )
+    def load_flags(self):
+        self.load_flags_cpuinfo("flags")
+
+is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
+class Test_POWER_Features(AbstractTest):
+    features = ["VSX", "VSX2", "VSX3", "VSX4"]
+    features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
+
+    def load_flags(self):
+        self.load_flags_auxv()
+
+
+is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_zarch,
+                    reason="Only for Linux and IBM Z")
+class Test_ZARCH_Features(AbstractTest):
+    features = ["VX", "VXE", "VXE2"]
+
+    def load_flags(self):
+        self.load_flags_auxv()
+
+
+is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
+class Test_ARM_Features(AbstractTest):
+    features = [
+        "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
+    ]
+    features_groups = dict(
+        NEON_FP16  = ["NEON", "HALF"],
+        NEON_VFPV4 = ["NEON", "VFPV4"],
+    )
+    def load_flags(self):
+        self.load_flags_cpuinfo("Features")
+        arch = self.get_cpuinfo_item("CPU architecture")
+        # in case of mounting virtual filesystem of aarch64 kernel
+        is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
+        if  re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
+            self.features_map = dict(
+                NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
+            )
+        else:
+            self.features_map = dict(
+                # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
+                # doesn't provide information about ASIMD, so we assume that ASIMD is supported
+                # if the kernel reports any one of the following ARM8 features.
+                ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
+            )
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.py
new file mode 100644
index 00000000..da6a4bd5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.py
@@ -0,0 +1,253 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_array_equal
+from numpy.core._multiarray_umath import (
+    _discover_array_parameters as discover_array_params, _get_sfloat_dtype)
+
+
+SF = _get_sfloat_dtype()
+
+
+class TestSFloat:
+    def _get_array(self, scaling, aligned=True):
+        if not aligned:
+            a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
+            a = a.view(np.float64)
+            a[:] = [1., 2., 3.]
+        else:
+            a = np.array([1., 2., 3.])
+
+        a *= 1./scaling  # the casting code also uses the reciprocal.
+        return a.view(SF(scaling))
+
+    def test_sfloat_rescaled(self):
+        sf = SF(1.)
+        sf2 = sf.scaled_by(2.)
+        assert sf2.get_scaling() == 2.
+        sf6 = sf2.scaled_by(3.)
+        assert sf6.get_scaling() == 6.
+
+    def test_class_discovery(self):
+        # This does not test much, since we always discover the scaling as 1.
+        # But most of NumPy (when writing) does not understand DType classes
+        dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
+        assert dt == SF(1.)
+
+    @pytest.mark.parametrize("scaling", [1., -1., 2.])
+    def test_scaled_float_from_floats(self, scaling):
+        a = np.array([1., 2., 3.], dtype=SF(scaling))
+
+        assert a.dtype.get_scaling() == scaling
+        assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
+
+    def test_repr(self):
+        # Check the repr, mainly to cover the code paths:
+        assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
+
+    def test_dtype_name(self):
+        assert SF(1.).name == "_ScaledFloatTestDType64"
+
+    @pytest.mark.parametrize("scaling", [1., -1., 2.])
+    def test_sfloat_from_float(self, scaling):
+        a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
+
+        assert a.dtype.get_scaling() == scaling
+        assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
+
+    @pytest.mark.parametrize("aligned", [True, False])
+    @pytest.mark.parametrize("scaling", [1., -1., 2.])
+    def test_sfloat_getitem(self, aligned, scaling):
+        a = self._get_array(1., aligned)
+        assert a.tolist() == [1., 2., 3.]
+
+    @pytest.mark.parametrize("aligned", [True, False])
+    def test_sfloat_casts(self, aligned):
+        a = self._get_array(1., aligned)
+
+        assert np.can_cast(a, SF(-1.), casting="equiv")
+        assert not np.can_cast(a, SF(-1.), casting="no")
+        na = a.astype(SF(-1.))
+        assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
+
+        assert np.can_cast(a, SF(2.), casting="same_kind")
+        assert not np.can_cast(a, SF(2.), casting="safe")
+        a2 = a.astype(SF(2.))
+        assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
+
+    @pytest.mark.parametrize("aligned", [True, False])
+    def test_sfloat_cast_internal_errors(self, aligned):
+        a = self._get_array(2e300, aligned)
+
+        with pytest.raises(TypeError,
+                match="error raised inside the core-loop: non-finite factor!"):
+            a.astype(SF(2e-300))
+
+    def test_sfloat_promotion(self):
+        assert np.result_type(SF(2.), SF(3.)) == SF(3.)
+        assert np.result_type(SF(3.), SF(2.)) == SF(3.)
+        # Float64 -> SF(1.) and then promotes normally, so both of this work:
+        assert np.result_type(SF(3.), np.float64) == SF(3.)
+        assert np.result_type(np.float64, SF(0.5)) == SF(1.)
+
+        # Test an undefined promotion:
+        with pytest.raises(TypeError):
+            np.result_type(SF(1.), np.int64)
+
+    def test_basic_multiply(self):
+        a = self._get_array(2.)
+        b = self._get_array(4.)
+
+        res = a * b
+        # multiplies dtype scaling and content separately:
+        assert res.dtype.get_scaling() == 8.
+        expected_view = a.view(np.float64) * b.view(np.float64)
+        assert_array_equal(res.view(np.float64), expected_view)
+
+    def test_possible_and_impossible_reduce(self):
+        # For reductions to work, the first and last operand must have the
+        # same dtype.  For this parametric DType that is not necessarily true.
+        a = self._get_array(2.)
+        # Addition reductin works (as of writing requires to pass initial
+        # because setting a scaled-float from the default `0` fails).
+        res = np.add.reduce(a, initial=0.)
+        assert res == a.astype(np.float64).sum()
+
+        # But each multiplication changes the factor, so a reduction is not
+        # possible (the relaxed version of the old refusal to handle any
+        # flexible dtype).
+        with pytest.raises(TypeError,
+                match="the resolved dtypes are not compatible"):
+            np.multiply.reduce(a)
+
+    def test_basic_ufunc_at(self):
+        float_a = np.array([1., 2., 3.])
+        b = self._get_array(2.)
+
+        float_b = b.view(np.float64).copy()
+        np.multiply.at(float_b, [1, 1, 1], float_a)
+        np.multiply.at(b, [1, 1, 1], float_a)
+
+        assert_array_equal(b.view(np.float64), float_b)
+
+    def test_basic_multiply_promotion(self):
+        float_a = np.array([1., 2., 3.])
+        b = self._get_array(2.)
+
+        res1 = float_a * b
+        res2 = b * float_a
+
+        # one factor is one, so we get the factor of b:
+        assert res1.dtype == res2.dtype == b.dtype
+        expected_view = float_a * b.view(np.float64)
+        assert_array_equal(res1.view(np.float64), expected_view)
+        assert_array_equal(res2.view(np.float64), expected_view)
+
+        # Check that promotion works when `out` is used:
+        np.multiply(b, float_a, out=res2)
+        with pytest.raises(TypeError):
+            # The promoter accepts this (maybe it should not), but the SFloat
+            # result cannot be cast to integer:
+            np.multiply(b, float_a, out=np.arange(3))
+
+    def test_basic_addition(self):
+        a = self._get_array(2.)
+        b = self._get_array(4.)
+
+        res = a + b
+        # addition uses the type promotion rules for the result:
+        assert res.dtype == np.result_type(a.dtype, b.dtype)
+        expected_view = (a.astype(res.dtype).view(np.float64) +
+                         b.astype(res.dtype).view(np.float64))
+        assert_array_equal(res.view(np.float64), expected_view)
+
+    def test_addition_cast_safety(self):
+        """The addition method is special for the scaled float, because it
+        includes the "cast" between different factors, thus cast-safety
+        is influenced by the implementation.
+        """
+        a = self._get_array(2.)
+        b = self._get_array(-2.)
+        c = self._get_array(3.)
+
+        # sign change is "equiv":
+        np.add(a, b, casting="equiv")
+        with pytest.raises(TypeError):
+            np.add(a, b, casting="no")
+
+        # Different factor is "same_kind" (default) so check that "safe" fails
+        with pytest.raises(TypeError):
+            np.add(a, c, casting="safe")
+
+        # Check that casting the output fails also (done by the ufunc here)
+        with pytest.raises(TypeError):
+            np.add(a, a, out=c, casting="safe")
+
+    @pytest.mark.parametrize("ufunc",
+            [np.logical_and, np.logical_or, np.logical_xor])
+    def test_logical_ufuncs_casts_to_bool(self, ufunc):
+        a = self._get_array(2.)
+        a[0] = 0.  # make sure first element is considered False.
+
+        float_equiv = a.astype(float)
+        expected = ufunc(float_equiv, float_equiv)
+        res = ufunc(a, a)
+        assert_array_equal(res, expected)
+
+        # also check that the same works for reductions:
+        expected = ufunc.reduce(float_equiv)
+        res = ufunc.reduce(a)
+        assert_array_equal(res, expected)
+
+        # The output casting does not match the bool, bool -> bool loop:
+        with pytest.raises(TypeError):
+            ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
+
+    def test_wrapped_and_wrapped_reductions(self):
+        a = self._get_array(2.)
+        float_equiv = a.astype(float)
+
+        expected = np.hypot(float_equiv, float_equiv)
+        res = np.hypot(a, a)
+        assert res.dtype == a.dtype
+        res_float = res.view(np.float64) * 2
+        assert_array_equal(res_float, expected)
+
+        # Also check reduction (keepdims, due to incorrect getitem)
+        res = np.hypot.reduce(a, keepdims=True)
+        assert res.dtype == a.dtype
+        expected = np.hypot.reduce(float_equiv, keepdims=True)
+        assert res.view(np.float64) * 2 == expected
+
+    def test_astype_class(self):
+        # Very simple test that we accept `.astype()` also on the class.
+        # ScaledFloat always returns the default descriptor, but it does
+        # check the relevant code paths.
+        arr = np.array([1., 2., 3.], dtype=object)
+
+        res = arr.astype(SF)  # passing the class class
+        expected = arr.astype(SF(1.))  # above will have discovered 1. scaling
+        assert_array_equal(res.view(np.float64), expected.view(np.float64))
+
+    def test_creation_class(self):
+        arr1 = np.array([1., 2., 3.], dtype=SF)
+        assert arr1.dtype == SF(1.)
+        arr2 = np.array([1., 2., 3.], dtype=SF(1.))
+        assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
+
+
+def test_type_pickle():
+    # can't actually unpickle, but we can pickle (if in namespace)
+    import pickle
+
+    np._ScaledFloatTestDType = SF
+
+    s = pickle.dumps(SF)
+    res = pickle.loads(s)
+    assert res is SF
+
+    del np._ScaledFloatTestDType
+
+
+def test_is_numeric():
+    assert SF._is_numeric
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cython.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cython.py
new file mode 100644
index 00000000..0e0d00c2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_cython.py
@@ -0,0 +1,135 @@
+import os
+import shutil
+import subprocess
+import sys
+import pytest
+
+import numpy as np
+from numpy.testing import IS_WASM
+
+# This import is copied from random.tests.test_extending
+try:
+    import cython
+    from Cython.Compiler.Version import version as cython_version
+except ImportError:
+    cython = None
+else:
+    from numpy._utils import _pep440
+
+    # Cython 0.29.30 is required for Python 3.11 and there are
+    # other fixes in the 0.29 series that are needed even for earlier
+    # Python versions.
+    # Note: keep in sync with the one in pyproject.toml
+    required_version = "0.29.30"
+    if _pep440.parse(cython_version) < _pep440.Version(required_version):
+        # too old or wrong cython, skip the test
+        cython = None
+
+pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
+
+
+@pytest.fixture(scope='module')
+def install_temp(tmpdir_factory):
+    # Based in part on test_cython from random.tests.test_extending
+    if IS_WASM:
+        pytest.skip("No subprocess")
+
+    srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython')
+    build_dir = tmpdir_factory.mktemp("cython_test") / "build"
+    os.makedirs(build_dir, exist_ok=True)
+    try:
+        subprocess.check_call(["meson", "--version"])
+    except FileNotFoundError:
+        pytest.skip("No usable 'meson' found")
+    if sys.platform == "win32":
+        subprocess.check_call(["meson", "setup",
+                               "--buildtype=release",
+                               "--vsenv", str(srcdir)],
+                              cwd=build_dir,
+                              )
+    else:
+        subprocess.check_call(["meson", "setup", str(srcdir)],
+                              cwd=build_dir
+                              )
+    subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir)
+
+    sys.path.append(str(build_dir))
+
+def test_is_timedelta64_object(install_temp):
+    import checks
+
+    assert checks.is_td64(np.timedelta64(1234))
+    assert checks.is_td64(np.timedelta64(1234, "ns"))
+    assert checks.is_td64(np.timedelta64("NaT", "ns"))
+
+    assert not checks.is_td64(1)
+    assert not checks.is_td64(None)
+    assert not checks.is_td64("foo")
+    assert not checks.is_td64(np.datetime64("now", "s"))
+
+
+def test_is_datetime64_object(install_temp):
+    import checks
+
+    assert checks.is_dt64(np.datetime64(1234, "ns"))
+    assert checks.is_dt64(np.datetime64("NaT", "ns"))
+
+    assert not checks.is_dt64(1)
+    assert not checks.is_dt64(None)
+    assert not checks.is_dt64("foo")
+    assert not checks.is_dt64(np.timedelta64(1234))
+
+
+def test_get_datetime64_value(install_temp):
+    import checks
+
+    dt64 = np.datetime64("2016-01-01", "ns")
+
+    result = checks.get_dt64_value(dt64)
+    expected = dt64.view("i8")
+
+    assert result == expected
+
+
+def test_get_timedelta64_value(install_temp):
+    import checks
+
+    td64 = np.timedelta64(12345, "h")
+
+    result = checks.get_td64_value(td64)
+    expected = td64.view("i8")
+
+    assert result == expected
+
+
+def test_get_datetime64_unit(install_temp):
+    import checks
+
+    dt64 = np.datetime64("2016-01-01", "ns")
+    result = checks.get_dt64_unit(dt64)
+    expected = 10
+    assert result == expected
+
+    td64 = np.timedelta64(12345, "h")
+    result = checks.get_dt64_unit(td64)
+    expected = 5
+    assert result == expected
+
+
+def test_abstract_scalars(install_temp):
+    import checks
+
+    assert checks.is_integer(1)
+    assert checks.is_integer(np.int8(1))
+    assert checks.is_integer(np.uint64(1))
+
+def test_conv_intp(install_temp):
+    import checks
+
+    class myint:
+        def __int__(self):
+            return 3
+
+    # These conversion passes via `__int__`, not `__index__`:
+    assert checks.conv_intp(3.) == 3
+    assert checks.conv_intp(myint()) == 3
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_datetime.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_datetime.py
new file mode 100644
index 00000000..547ebf9d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_datetime.py
@@ -0,0 +1,2569 @@
+
+import numpy
+import numpy as np
+import datetime
+import pytest
+from numpy.testing import (
+    IS_WASM,
+    assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
+    assert_raises_regex, assert_array_equal,
+    )
+from numpy.compat import pickle
+
+# Use pytz to test out various time zones if available
+try:
+    from pytz import timezone as tz
+    _has_pytz = True
+except ImportError:
+    _has_pytz = False
+
+try:
+    RecursionError
+except NameError:
+    RecursionError = RuntimeError  # python < 3.5
+
+
+class TestDateTime:
+    def test_datetime_dtype_creation(self):
+        for unit in ['Y', 'M', 'W', 'D',
+                     'h', 'm', 's', 'ms', 'us',
+                     'μs',  # alias for us
+                     'ns', 'ps', 'fs', 'as']:
+            dt1 = np.dtype('M8[750%s]' % unit)
+            assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
+            dt2 = np.dtype('m8[%s]' % unit)
+            assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
+
+        # Generic units shouldn't add [] to the end
+        assert_equal(str(np.dtype("M8")), "datetime64")
+
+        # Should be possible to specify the endianness
+        assert_equal(np.dtype("=M8"), np.dtype("M8"))
+        assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
+        assert_(np.dtype(">M8") == np.dtype("M8") or
+                np.dtype("M8[D]") == np.dtype("M8[D]") or
+                np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or
+                np.dtype("m8[D]") == np.dtype("m8[D]") or
+                np.dtype("m8") != np.dtype(" Scalars
+        assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
+        assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
+        assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
+        assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
+        assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
+
+        # Arrays -> Scalars
+        assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
+        assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
+        assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
+        assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
+        assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
+
+        # NaN -> NaT
+        nan = np.array([np.nan] * 8)
+        fnan = nan.astype('f')
+        lnan = nan.astype('g')
+        cnan = nan.astype('D')
+        cfnan = nan.astype('F')
+        clnan = nan.astype('G')
+
+        nat = np.array([np.datetime64('NaT')] * 8)
+        assert_equal(nan.astype('M8[ns]'), nat)
+        assert_equal(fnan.astype('M8[ns]'), nat)
+        assert_equal(lnan.astype('M8[ns]'), nat)
+        assert_equal(cnan.astype('M8[ns]'), nat)
+        assert_equal(cfnan.astype('M8[ns]'), nat)
+        assert_equal(clnan.astype('M8[ns]'), nat)
+
+        nat = np.array([np.timedelta64('NaT')] * 8)
+        assert_equal(nan.astype('timedelta64[ns]'), nat)
+        assert_equal(fnan.astype('timedelta64[ns]'), nat)
+        assert_equal(lnan.astype('timedelta64[ns]'), nat)
+        assert_equal(cnan.astype('timedelta64[ns]'), nat)
+        assert_equal(cfnan.astype('timedelta64[ns]'), nat)
+        assert_equal(clnan.astype('timedelta64[ns]'), nat)
+
+    def test_days_creation(self):
+        assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
+                (1600-1970)*365 - (1972-1600)/4 + 3 - 365)
+        assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
+                (1600-1970)*365 - (1972-1600)/4 + 3)
+        assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
+                (1600-1970)*365 - (1972-1600)/4 + 3 + 366)
+        assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
+                (1900-1970)*365 - (1970-1900)//4)
+        assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
+                (1900-1970)*365 - (1970-1900)//4 + 365)
+        assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
+        assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
+        assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
+        assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
+        assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
+        assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
+        assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
+        assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
+        assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
+                 (2000 - 1970)*365 + (2000 - 1972)//4)
+        assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
+                 (2000 - 1970)*365 + (2000 - 1972)//4 + 366)
+        assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
+                 (2400 - 1970)*365 + (2400 - 1972)//4 - 3)
+        assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
+                 (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
+
+        assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
+                (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
+        assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
+                (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
+        assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
+                 (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
+        assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
+                 (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
+        assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
+                 (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
+
+    def test_days_to_pydate(self):
+        assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
+                    datetime.date(1599, 1, 1))
+        assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
+                    datetime.date(1600, 1, 1))
+        assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
+                    datetime.date(1601, 1, 1))
+        assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
+                    datetime.date(1900, 1, 1))
+        assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
+                    datetime.date(1901, 1, 1))
+        assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
+                    datetime.date(2000, 1, 1))
+        assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
+                    datetime.date(2001, 1, 1))
+        assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
+                    datetime.date(1600, 2, 29))
+        assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
+                    datetime.date(1600, 3, 1))
+        assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
+                    datetime.date(2001, 3, 22))
+
+    def test_dtype_comparison(self):
+        assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
+        assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
+        assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
+        assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
+
+    def test_pydatetime_creation(self):
+        a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
+        assert_equal(a[0], a[1])
+        a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
+        assert_equal(a[0], a[1])
+        a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
+        assert_equal(a[0], a[1])
+        # Will fail if the date changes during the exact right moment
+        a = np.array(['today', datetime.date.today()], dtype='M8[D]')
+        assert_equal(a[0], a[1])
+        # datetime.datetime.now() returns local time, not UTC
+        #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
+        #assert_equal(a[0], a[1])
+
+        # we can give a datetime.date time units
+        assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
+                     np.array(np.datetime64('1960-03-12T00:00:00')))
+
+    def test_datetime_string_conversion(self):
+        a = ['2011-03-16', '1920-01-01', '2013-05-19']
+        str_a = np.array(a, dtype='S')
+        uni_a = np.array(a, dtype='U')
+        dt_a = np.array(a, dtype='M')
+
+        # String to datetime
+        assert_equal(dt_a, str_a.astype('M'))
+        assert_equal(dt_a.dtype, str_a.astype('M').dtype)
+        dt_b = np.empty_like(dt_a)
+        dt_b[...] = str_a
+        assert_equal(dt_a, dt_b)
+
+        # Datetime to string
+        assert_equal(str_a, dt_a.astype('S0'))
+        str_b = np.empty_like(str_a)
+        str_b[...] = dt_a
+        assert_equal(str_a, str_b)
+
+        # Unicode to datetime
+        assert_equal(dt_a, uni_a.astype('M'))
+        assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
+        dt_b = np.empty_like(dt_a)
+        dt_b[...] = uni_a
+        assert_equal(dt_a, dt_b)
+
+        # Datetime to unicode
+        assert_equal(uni_a, dt_a.astype('U'))
+        uni_b = np.empty_like(uni_a)
+        uni_b[...] = dt_a
+        assert_equal(uni_a, uni_b)
+
+        # Datetime to long string - gh-9712
+        assert_equal(str_a, dt_a.astype((np.bytes_, 128)))
+        str_b = np.empty(str_a.shape, dtype=(np.bytes_, 128))
+        str_b[...] = dt_a
+        assert_equal(str_a, str_b)
+
+    @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
+    def test_time_byteswapping(self, time_dtype):
+        times = np.array(["2017", "NaT"], dtype=time_dtype)
+        times_swapped = times.astype(times.dtype.newbyteorder())
+        assert_array_equal(times, times_swapped)
+
+        unswapped = times_swapped.view(np.int64).newbyteorder()
+        assert_array_equal(unswapped, times.view(np.int64))
+
+    @pytest.mark.parametrize(["time1", "time2"],
+            [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")])
+    def test_time_byteswapped_cast(self, time1, time2):
+        dtype1 = np.dtype(time1)
+        dtype2 = np.dtype(time2)
+        times = np.array(["2017", "NaT"], dtype=dtype1)
+        expected = times.astype(dtype2)
+
+        # Test that every byte-swapping combination also returns the same
+        # results (previous tests check that this comparison works fine).
+        res = times.astype(dtype1.newbyteorder()).astype(dtype2)
+        assert_array_equal(res, expected)
+        res = times.astype(dtype2.newbyteorder())
+        assert_array_equal(res, expected)
+        res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder())
+        assert_array_equal(res, expected)
+
+    @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
+    @pytest.mark.parametrize("str_dtype", ["U", "S"])
+    def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):
+        times = np.array(["2017", "NaT"], dtype=time_dtype)
+        # Unfortunately, timedelta does not roundtrip:
+        from_strings = np.array(["2017", "NaT"], dtype=str_dtype)
+        to_strings = times.astype(str_dtype)  # assume this is correct
+
+        # Check that conversion from times to string works if src is swapped:
+        times_swapped = times.astype(times.dtype.newbyteorder())
+        res = times_swapped.astype(str_dtype)
+        assert_array_equal(res, to_strings)
+        # And also if both are swapped:
+        res = times_swapped.astype(to_strings.dtype.newbyteorder())
+        assert_array_equal(res, to_strings)
+        # only destination is swapped:
+        res = times.astype(to_strings.dtype.newbyteorder())
+        assert_array_equal(res, to_strings)
+
+        # Check that conversion from string to times works if src is swapped:
+        from_strings_swapped = from_strings.astype(
+                from_strings.dtype.newbyteorder())
+        res = from_strings_swapped.astype(time_dtype)
+        assert_array_equal(res, times)
+        # And if both are swapped:
+        res = from_strings_swapped.astype(times.dtype.newbyteorder())
+        assert_array_equal(res, times)
+        # Only destination is swapped:
+        res = from_strings.astype(times.dtype.newbyteorder())
+        assert_array_equal(res, times)
+
+    def test_datetime_array_str(self):
+        a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
+        assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
+
+        a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
+        assert_equal(np.array2string(a, separator=', ',
+                    formatter={'datetime': lambda x:
+                            "'%s'" % np.datetime_as_string(x, timezone='UTC')}),
+                     "['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
+
+        # Check that one NaT doesn't corrupt subsequent entries
+        a = np.array(['2010', 'NaT', '2030']).astype('M')
+        assert_equal(str(a), "['2010'  'NaT' '2030']")
+
+    def test_timedelta_array_str(self):
+        a = np.array([-1, 0, 100], dtype='m')
+        assert_equal(str(a), "[ -1   0 100]")
+        a = np.array(['NaT', 'NaT'], dtype='m')
+        assert_equal(str(a), "['NaT' 'NaT']")
+        # Check right-alignment with NaTs
+        a = np.array([-1, 'NaT', 0], dtype='m')
+        assert_equal(str(a), "[   -1 'NaT'     0]")
+        a = np.array([-1, 'NaT', 1234567], dtype='m')
+        assert_equal(str(a), "[     -1   'NaT' 1234567]")
+
+        # Test with other byteorder:
+        a = np.array([-1, 'NaT', 1234567], dtype='>m')
+        assert_equal(str(a), "[     -1   'NaT' 1234567]")
+        a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
+              b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
+        assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
+
+    def test_setstate(self):
+        "Verify that datetime dtype __setstate__ can handle bad arguments"
+        dt = np.dtype('>M8[us]')
+        assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
+        assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+        assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
+        assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+
+    def test_dtype_promotion(self):
+        # datetime  datetime computes the metadata gcd
+        # timedelta  timedelta computes the metadata gcd
+        for mM in ['m', 'M']:
+            assert_equal(
+                np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
+                np.dtype(mM+'8[2Y]'))
+            assert_equal(
+                np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
+                np.dtype(mM+'8[3Y]'))
+            assert_equal(
+                np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
+                np.dtype(mM+'8[2M]'))
+            assert_equal(
+                np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
+                np.dtype(mM+'8[1D]'))
+            assert_equal(
+                np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
+                np.dtype(mM+'8[s]'))
+            assert_equal(
+                np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
+                np.dtype(mM+'8[7s]'))
+        # timedelta  timedelta raises when there is no reasonable gcd
+        assert_raises(TypeError, np.promote_types,
+                            np.dtype('m8[Y]'), np.dtype('m8[D]'))
+        assert_raises(TypeError, np.promote_types,
+                            np.dtype('m8[M]'), np.dtype('m8[W]'))
+        # timedelta and float cannot be safely cast with each other
+        assert_raises(TypeError, np.promote_types, "float32", "m8")
+        assert_raises(TypeError, np.promote_types, "m8", "float32")
+        assert_raises(TypeError, np.promote_types, "uint64", "m8")
+        assert_raises(TypeError, np.promote_types, "m8", "uint64")
+
+        # timedelta  timedelta may overflow with big unit ranges
+        assert_raises(OverflowError, np.promote_types,
+                            np.dtype('m8[W]'), np.dtype('m8[fs]'))
+        assert_raises(OverflowError, np.promote_types,
+                            np.dtype('m8[s]'), np.dtype('m8[as]'))
+
+    def test_cast_overflow(self):
+        # gh-4486
+        def cast():
+            numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("datetime64[%s]',
+                                      'timedelta64[%s]'])
+    def test_isfinite_isinf_isnan_units(self, unit, dstr):
+        '''check isfinite, isinf, isnan for all units of M, m dtypes
+        '''
+        arr_val = [123, -321, "NaT"]
+        arr = np.array(arr_val,  dtype= dstr % unit)
+        pos = np.array([True, True,  False])
+        neg = np.array([False, False,  True])
+        false = np.array([False, False,  False])
+        assert_equal(np.isfinite(arr), pos)
+        assert_equal(np.isinf(arr), false)
+        assert_equal(np.isnan(arr), neg)
+
+    def test_assert_equal(self):
+        assert_raises(AssertionError, assert_equal,
+                np.datetime64('nat'), np.timedelta64('nat'))
+
+    def test_corecursive_input(self):
+        # construct a co-recursive list
+        a, b = [], []
+        a.append(b)
+        b.append(a)
+        obj_arr = np.array([None])
+        obj_arr[0] = a
+
+        # At some point this caused a stack overflow (gh-11154). Now raises
+        # ValueError since the nested list cannot be converted to a datetime.
+        assert_raises(ValueError, obj_arr.astype, 'M8')
+        assert_raises(ValueError, obj_arr.astype, 'm8')
+
+    @pytest.mark.parametrize("shape", [(), (1,)])
+    def test_discovery_from_object_array(self, shape):
+        arr = np.array("2020-10-10", dtype=object).reshape(shape)
+        res = np.array("2020-10-10", dtype="M8").reshape(shape)
+        assert res.dtype == np.dtype("M8[D]")
+        assert_equal(arr.astype("M8"), res)
+        arr[...] = np.bytes_("2020-10-10")  # try a numpy string type
+        assert_equal(arr.astype("M8"), res)
+        arr = arr.astype("S")
+        assert_equal(arr.astype("S").astype("M8"), res)
+
+    @pytest.mark.parametrize("time_unit", [
+        "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
+        # compound units
+        "10D", "2M",
+    ])
+    def test_limit_symmetry(self, time_unit):
+        """
+        Dates should have symmetric limits around the unix epoch at +/-np.int64
+        """
+        epoch = np.datetime64(0, time_unit)
+        latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
+        earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
+
+        # above should not have overflowed
+        assert earliest < epoch < latest
+
+    @pytest.mark.parametrize("time_unit", [
+        "Y", "M",
+        pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
+        "D", "h", "m",
+        "s", "ms", "us", "ns", "ps", "fs", "as",
+        pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
+    ])
+    @pytest.mark.parametrize("sign", [-1, 1])
+    def test_limit_str_roundtrip(self, time_unit, sign):
+        """
+        Limits should roundtrip when converted to strings.
+
+        This tests the conversion to and from npy_datetimestruct.
+        """
+        # TODO: add absolute (gold standard) time span limit strings
+        limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
+
+        # Convert to string and back. Explicit unit needed since the day and
+        # week reprs are not distinguishable.
+        limit_via_str = np.datetime64(str(limit), time_unit)
+        assert limit_via_str == limit
+
+
+class TestDateTimeData:
+
+    def test_basic(self):
+        a = np.array(['1980-03-23'], dtype=np.datetime64)
+        assert_equal(np.datetime_data(a.dtype), ('D', 1))
+
+    def test_bytes(self):
+        # byte units are converted to unicode
+        dt = np.datetime64('2000', (b'ms', 5))
+        assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+        dt = np.datetime64('2000', b'5ms')
+        assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+    def test_non_ascii(self):
+        # μs is normalized to μ
+        dt = np.datetime64('2000', ('μs', 5))
+        assert np.datetime_data(dt.dtype) == ('us', 5)
+
+        dt = np.datetime64('2000', '5μs')
+        assert np.datetime_data(dt.dtype) == ('us', 5)
+
+
+def test_comparisons_return_not_implemented():
+    # GH#17017
+
+    class custom:
+        __array_priority__ = 10000
+
+    obj = custom()
+
+    dt = np.datetime64('2000', 'ns')
+    td = dt - dt
+
+    for item in [dt, td]:
+        assert item.__eq__(obj) is NotImplemented
+        assert item.__ne__(obj) is NotImplemented
+        assert item.__le__(obj) is NotImplemented
+        assert item.__lt__(obj) is NotImplemented
+        assert item.__ge__(obj) is NotImplemented
+        assert item.__gt__(obj) is NotImplemented
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.py
new file mode 100644
index 00000000..39699f45
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.py
@@ -0,0 +1,686 @@
+import pytest
+
+import numpy as np
+from numpy.core.multiarray import _vec_string
+from numpy.testing import (
+    assert_, assert_equal, assert_array_equal, assert_raises,
+    assert_raises_regex
+    )
+
+kw_unicode_true = {'unicode': True}  # make 2to3 work properly
+kw_unicode_false = {'unicode': False}
+
+class TestBasic:
+    def test_from_object_array(self):
+        A = np.array([['abc', 2],
+                      ['long   ', '0123456789']], dtype='O')
+        B = np.char.array(A)
+        assert_equal(B.dtype.itemsize, 10)
+        assert_array_equal(B, [[b'abc', b'2'],
+                               [b'long', b'0123456789']])
+
+    def test_from_object_array_unicode(self):
+        A = np.array([['abc', 'Sigma \u03a3'],
+                      ['long   ', '0123456789']], dtype='O')
+        assert_raises(ValueError, np.char.array, (A,))
+        B = np.char.array(A, **kw_unicode_true)
+        assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
+        assert_array_equal(B, [['abc', 'Sigma \u03a3'],
+                               ['long', '0123456789']])
+
+    def test_from_string_array(self):
+        A = np.array([[b'abc', b'foo'],
+                      [b'long   ', b'0123456789']])
+        assert_equal(A.dtype.type, np.bytes_)
+        B = np.char.array(A)
+        assert_array_equal(B, A)
+        assert_equal(B.dtype, A.dtype)
+        assert_equal(B.shape, A.shape)
+        B[0, 0] = 'changed'
+        assert_(B[0, 0] != A[0, 0])
+        C = np.char.asarray(A)
+        assert_array_equal(C, A)
+        assert_equal(C.dtype, A.dtype)
+        C[0, 0] = 'changed again'
+        assert_(C[0, 0] != B[0, 0])
+        assert_(C[0, 0] == A[0, 0])
+
+    def test_from_unicode_array(self):
+        A = np.array([['abc', 'Sigma \u03a3'],
+                      ['long   ', '0123456789']])
+        assert_equal(A.dtype.type, np.str_)
+        B = np.char.array(A)
+        assert_array_equal(B, A)
+        assert_equal(B.dtype, A.dtype)
+        assert_equal(B.shape, A.shape)
+        B = np.char.array(A, **kw_unicode_true)
+        assert_array_equal(B, A)
+        assert_equal(B.dtype, A.dtype)
+        assert_equal(B.shape, A.shape)
+
+        def fail():
+            np.char.array(A, **kw_unicode_false)
+
+        assert_raises(UnicodeEncodeError, fail)
+
+    def test_unicode_upconvert(self):
+        A = np.char.array(['abc'])
+        B = np.char.array(['\u03a3'])
+        assert_(issubclass((A + B).dtype.type, np.str_))
+
+    def test_from_string(self):
+        A = np.char.array(b'abc')
+        assert_equal(len(A), 1)
+        assert_equal(len(A[0]), 3)
+        assert_(issubclass(A.dtype.type, np.bytes_))
+
+    def test_from_unicode(self):
+        A = np.char.array('\u03a3')
+        assert_equal(len(A), 1)
+        assert_equal(len(A[0]), 1)
+        assert_equal(A.itemsize, 4)
+        assert_(issubclass(A.dtype.type, np.str_))
+
+class TestVecString:
+    def test_non_existent_method(self):
+
+        def fail():
+            _vec_string('a', np.bytes_, 'bogus')
+
+        assert_raises(AttributeError, fail)
+
+    def test_non_string_array(self):
+
+        def fail():
+            _vec_string(1, np.bytes_, 'strip')
+
+        assert_raises(TypeError, fail)
+
+    def test_invalid_args_tuple(self):
+
+        def fail():
+            _vec_string(['a'], np.bytes_, 'strip', 1)
+
+        assert_raises(TypeError, fail)
+
+    def test_invalid_type_descr(self):
+
+        def fail():
+            _vec_string(['a'], 'BOGUS', 'strip')
+
+        assert_raises(TypeError, fail)
+
+    def test_invalid_function_args(self):
+
+        def fail():
+            _vec_string(['a'], np.bytes_, 'strip', (1,))
+
+        assert_raises(TypeError, fail)
+
+    def test_invalid_result_type(self):
+
+        def fail():
+            _vec_string(['a'], np.int_, 'strip')
+
+        assert_raises(TypeError, fail)
+
+    def test_broadcast_error(self):
+
+        def fail():
+            _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
+
+        assert_raises(ValueError, fail)
+
+
+class TestWhitespace:
+    def setup_method(self):
+        self.A = np.array([['abc ', '123  '],
+                           ['789 ', 'xyz ']]).view(np.chararray)
+        self.B = np.array([['abc', '123'],
+                           ['789', 'xyz']]).view(np.chararray)
+
+    def test1(self):
+        assert_(np.all(self.A == self.B))
+        assert_(np.all(self.A >= self.B))
+        assert_(np.all(self.A <= self.B))
+        assert_(not np.any(self.A > self.B))
+        assert_(not np.any(self.A < self.B))
+        assert_(not np.any(self.A != self.B))
+
+class TestChar:
+    def setup_method(self):
+        self.A = np.array('abc1', dtype='c').view(np.chararray)
+
+    def test_it(self):
+        assert_equal(self.A.shape, (4,))
+        assert_equal(self.A.upper()[:2].tobytes(), b'AB')
+
+class TestComparisons:
+    def setup_method(self):
+        self.A = np.array([['abc', '123'],
+                           ['789', 'xyz']]).view(np.chararray)
+        self.B = np.array([['efg', '123  '],
+                           ['051', 'tuv']]).view(np.chararray)
+
+    def test_not_equal(self):
+        assert_array_equal((self.A != self.B), [[True, False], [True, True]])
+
+    def test_equal(self):
+        assert_array_equal((self.A == self.B), [[False, True], [False, False]])
+
+    def test_greater_equal(self):
+        assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
+
+    def test_less_equal(self):
+        assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
+
+    def test_greater(self):
+        assert_array_equal((self.A > self.B), [[False, False], [True, True]])
+
+    def test_less(self):
+        assert_array_equal((self.A < self.B), [[True, False], [False, False]])
+
+    def test_type(self):
+        out1 = np.char.equal(self.A, self.B)
+        out2 = np.char.equal('a', 'a')
+        assert_(isinstance(out1, np.ndarray))
+        assert_(isinstance(out2, np.ndarray))
+
+class TestComparisonsMixed1(TestComparisons):
+    """Ticket #1276"""
+
+    def setup_method(self):
+        TestComparisons.setup_method(self)
+        self.B = np.array([['efg', '123  '],
+                           ['051', 'tuv']], np.str_).view(np.chararray)
+
+class TestComparisonsMixed2(TestComparisons):
+    """Ticket #1276"""
+
+    def setup_method(self):
+        TestComparisons.setup_method(self)
+        self.A = np.array([['abc', '123'],
+                           ['789', 'xyz']], np.str_).view(np.chararray)
+
+class TestInformation:
+    def setup_method(self):
+        self.A = np.array([[' abc ', ''],
+                           ['12345', 'MixedCase'],
+                           ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
+        self.B = np.array([[' \u03a3 ', ''],
+                           ['12345', 'MixedCase'],
+                           ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
+
+    def test_len(self):
+        assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
+        assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
+        assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
+
+    def test_count(self):
+        assert_(issubclass(self.A.count('').dtype.type, np.integer))
+        assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
+        assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
+        # Python doesn't seem to like counting NULL characters
+        # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
+        assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
+        assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
+        assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
+        # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
+
+    def test_endswith(self):
+        assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
+        assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
+        assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
+
+        def fail():
+            self.A.endswith('3', 'fdjk')
+
+        assert_raises(TypeError, fail)
+
+    def test_find(self):
+        assert_(issubclass(self.A.find('a').dtype.type, np.integer))
+        assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
+        assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
+        assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
+        assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
+
+    def test_index(self):
+
+        def fail():
+            self.A.index('a')
+
+        assert_raises(ValueError, fail)
+        assert_(np.char.index('abcba', 'b') == 1)
+        assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
+
+    def test_isalnum(self):
+        assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
+        assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
+
+    def test_isalpha(self):
+        assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
+        assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
+
+    def test_isdigit(self):
+        assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
+        assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
+
+    def test_islower(self):
+        assert_(issubclass(self.A.islower().dtype.type, np.bool_))
+        assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
+
+    def test_isspace(self):
+        assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
+        assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
+
+    def test_istitle(self):
+        assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
+        assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
+
+    def test_isupper(self):
+        assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
+        assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
+
+    def test_rfind(self):
+        assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
+        assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
+        assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
+        assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
+        assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
+
+    def test_rindex(self):
+
+        def fail():
+            self.A.rindex('a')
+
+        assert_raises(ValueError, fail)
+        assert_(np.char.rindex('abcba', 'b') == 3)
+        assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
+
+    def test_startswith(self):
+        assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
+        assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
+        assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
+
+        def fail():
+            self.A.startswith('3', 'fdjk')
+
+        assert_raises(TypeError, fail)
+
+
+class TestMethods:
+    def setup_method(self):
+        self.A = np.array([[' abc ', ''],
+                           ['12345', 'MixedCase'],
+                           ['123 \t 345 \0 ', 'UPPER']],
+                          dtype='S').view(np.chararray)
+        self.B = np.array([[' \u03a3 ', ''],
+                           ['12345', 'MixedCase'],
+                           ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
+
+    def test_capitalize(self):
+        tgt = [[b' abc ', b''],
+               [b'12345', b'Mixedcase'],
+               [b'123 \t 345 \0 ', b'Upper']]
+        assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_))
+        assert_array_equal(self.A.capitalize(), tgt)
+
+        tgt = [[' \u03c3 ', ''],
+               ['12345', 'Mixedcase'],
+               ['123 \t 345 \0 ', 'Upper']]
+        assert_(issubclass(self.B.capitalize().dtype.type, np.str_))
+        assert_array_equal(self.B.capitalize(), tgt)
+
+    def test_center(self):
+        assert_(issubclass(self.A.center(10).dtype.type, np.bytes_))
+        C = self.A.center([10, 20])
+        assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+        C = self.A.center(20, b'#')
+        assert_(np.all(C.startswith(b'#')))
+        assert_(np.all(C.endswith(b'#')))
+
+        C = np.char.center(b'FOO', [[10, 20], [15, 8]])
+        tgt = [[b'   FOO    ', b'        FOO         '],
+               [b'      FOO      ', b'  FOO   ']]
+        assert_(issubclass(C.dtype.type, np.bytes_))
+        assert_array_equal(C, tgt)
+
+    def test_decode(self):
+        A = np.char.array([b'\\u03a3'])
+        assert_(A.decode('unicode-escape')[0] == '\u03a3')
+
+    def test_encode(self):
+        B = self.B.encode('unicode_escape')
+        assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
+
+    def test_expandtabs(self):
+        T = self.A.expandtabs()
+        assert_(T[2, 0] == b'123      345 \0')
+
+    def test_join(self):
+        # NOTE: list(b'123') == [49, 50, 51]
+        #       so that b','.join(b'123') results to an error on Py3
+        A0 = self.A.decode('ascii')
+
+        A = np.char.join([',', '#'], A0)
+        assert_(issubclass(A.dtype.type, np.str_))
+        tgt = np.array([[' ,a,b,c, ', ''],
+                        ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
+                        ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
+        assert_array_equal(np.char.join([',', '#'], A0), tgt)
+
+    def test_ljust(self):
+        assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_))
+
+        C = self.A.ljust([10, 20])
+        assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+        C = self.A.ljust(20, b'#')
+        assert_array_equal(C.startswith(b'#'), [
+                [False, True], [False, False], [False, False]])
+        assert_(np.all(C.endswith(b'#')))
+
+        C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
+        tgt = [[b'FOO       ', b'FOO                 '],
+               [b'FOO            ', b'FOO     ']]
+        assert_(issubclass(C.dtype.type, np.bytes_))
+        assert_array_equal(C, tgt)
+
+    def test_lower(self):
+        tgt = [[b' abc ', b''],
+               [b'12345', b'mixedcase'],
+               [b'123 \t 345 \0 ', b'upper']]
+        assert_(issubclass(self.A.lower().dtype.type, np.bytes_))
+        assert_array_equal(self.A.lower(), tgt)
+
+        tgt = [[' \u03c3 ', ''],
+               ['12345', 'mixedcase'],
+               ['123 \t 345 \0 ', 'upper']]
+        assert_(issubclass(self.B.lower().dtype.type, np.str_))
+        assert_array_equal(self.B.lower(), tgt)
+
+    def test_lstrip(self):
+        tgt = [[b'abc ', b''],
+               [b'12345', b'MixedCase'],
+               [b'123 \t 345 \0 ', b'UPPER']]
+        assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_))
+        assert_array_equal(self.A.lstrip(), tgt)
+
+        tgt = [[b' abc', b''],
+               [b'2345', b'ixedCase'],
+               [b'23 \t 345 \x00', b'UPPER']]
+        assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
+
+        tgt = [['\u03a3 ', ''],
+               ['12345', 'MixedCase'],
+               ['123 \t 345 \0 ', 'UPPER']]
+        assert_(issubclass(self.B.lstrip().dtype.type, np.str_))
+        assert_array_equal(self.B.lstrip(), tgt)
+
+    def test_partition(self):
+        P = self.A.partition([b'3', b'M'])
+        tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
+               [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+               [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
+        assert_(issubclass(P.dtype.type, np.bytes_))
+        assert_array_equal(P, tgt)
+
+    def test_replace(self):
+        R = self.A.replace([b'3', b'a'],
+                           [b'##########', b'@'])
+        tgt = [[b' abc ', b''],
+               [b'12##########45', b'MixedC@se'],
+               [b'12########## \t ##########45 \x00', b'UPPER']]
+        assert_(issubclass(R.dtype.type, np.bytes_))
+        assert_array_equal(R, tgt)
+
+    def test_rjust(self):
+        assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_))
+
+        C = self.A.rjust([10, 20])
+        assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+        C = self.A.rjust(20, b'#')
+        assert_(np.all(C.startswith(b'#')))
+        assert_array_equal(C.endswith(b'#'),
+                           [[False, True], [False, False], [False, False]])
+
+        C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
+        tgt = [[b'       FOO', b'                 FOO'],
+               [b'            FOO', b'     FOO']]
+        assert_(issubclass(C.dtype.type, np.bytes_))
+        assert_array_equal(C, tgt)
+
+    def test_rpartition(self):
+        P = self.A.rpartition([b'3', b'M'])
+        tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
+               [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+               [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
+        assert_(issubclass(P.dtype.type, np.bytes_))
+        assert_array_equal(P, tgt)
+
+    def test_rsplit(self):
+        A = self.A.rsplit(b'3')
+        tgt = [[[b' abc '], [b'']],
+               [[b'12', b'45'], [b'MixedCase']],
+               [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
+        assert_(issubclass(A.dtype.type, np.object_))
+        assert_equal(A.tolist(), tgt)
+
+    def test_rstrip(self):
+        assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_))
+
+        tgt = [[b' abc', b''],
+               [b'12345', b'MixedCase'],
+               [b'123 \t 345', b'UPPER']]
+        assert_array_equal(self.A.rstrip(), tgt)
+
+        tgt = [[b' abc ', b''],
+               [b'1234', b'MixedCase'],
+               [b'123 \t 345 \x00', b'UPP']
+               ]
+        assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
+
+        tgt = [[' \u03a3', ''],
+               ['12345', 'MixedCase'],
+               ['123 \t 345', 'UPPER']]
+        assert_(issubclass(self.B.rstrip().dtype.type, np.str_))
+        assert_array_equal(self.B.rstrip(), tgt)
+
+    def test_strip(self):
+        tgt = [[b'abc', b''],
+               [b'12345', b'MixedCase'],
+               [b'123 \t 345', b'UPPER']]
+        assert_(issubclass(self.A.strip().dtype.type, np.bytes_))
+        assert_array_equal(self.A.strip(), tgt)
+
+        tgt = [[b' abc ', b''],
+               [b'234', b'ixedCas'],
+               [b'23 \t 345 \x00', b'UPP']]
+        assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
+
+        tgt = [['\u03a3', ''],
+               ['12345', 'MixedCase'],
+               ['123 \t 345', 'UPPER']]
+        assert_(issubclass(self.B.strip().dtype.type, np.str_))
+        assert_array_equal(self.B.strip(), tgt)
+
+    def test_split(self):
+        A = self.A.split(b'3')
+        tgt = [
+               [[b' abc '], [b'']],
+               [[b'12', b'45'], [b'MixedCase']],
+               [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
+        assert_(issubclass(A.dtype.type, np.object_))
+        assert_equal(A.tolist(), tgt)
+
+    def test_splitlines(self):
+        A = np.char.array(['abc\nfds\nwer']).splitlines()
+        assert_(issubclass(A.dtype.type, np.object_))
+        assert_(A.shape == (1,))
+        assert_(len(A[0]) == 3)
+
+    def test_swapcase(self):
+        tgt = [[b' ABC ', b''],
+               [b'12345', b'mIXEDcASE'],
+               [b'123 \t 345 \0 ', b'upper']]
+        assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_))
+        assert_array_equal(self.A.swapcase(), tgt)
+
+        tgt = [[' \u03c3 ', ''],
+               ['12345', 'mIXEDcASE'],
+               ['123 \t 345 \0 ', 'upper']]
+        assert_(issubclass(self.B.swapcase().dtype.type, np.str_))
+        assert_array_equal(self.B.swapcase(), tgt)
+
+    def test_title(self):
+        tgt = [[b' Abc ', b''],
+               [b'12345', b'Mixedcase'],
+               [b'123 \t 345 \0 ', b'Upper']]
+        assert_(issubclass(self.A.title().dtype.type, np.bytes_))
+        assert_array_equal(self.A.title(), tgt)
+
+        tgt = [[' \u03a3 ', ''],
+               ['12345', 'Mixedcase'],
+               ['123 \t 345 \0 ', 'Upper']]
+        assert_(issubclass(self.B.title().dtype.type, np.str_))
+        assert_array_equal(self.B.title(), tgt)
+
+    def test_upper(self):
+        tgt = [[b' ABC ', b''],
+               [b'12345', b'MIXEDCASE'],
+               [b'123 \t 345 \0 ', b'UPPER']]
+        assert_(issubclass(self.A.upper().dtype.type, np.bytes_))
+        assert_array_equal(self.A.upper(), tgt)
+
+        tgt = [[' \u03a3 ', ''],
+               ['12345', 'MIXEDCASE'],
+               ['123 \t 345 \0 ', 'UPPER']]
+        assert_(issubclass(self.B.upper().dtype.type, np.str_))
+        assert_array_equal(self.B.upper(), tgt)
+
+    def test_isnumeric(self):
+
+        def fail():
+            self.A.isnumeric()
+
+        assert_raises(TypeError, fail)
+        assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
+        assert_array_equal(self.B.isnumeric(), [
+                [False, False], [True, False], [False, False]])
+
+    def test_isdecimal(self):
+
+        def fail():
+            self.A.isdecimal()
+
+        assert_raises(TypeError, fail)
+        assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
+        assert_array_equal(self.B.isdecimal(), [
+                [False, False], [True, False], [False, False]])
+
+
+class TestOperations:
+    def setup_method(self):
+        self.A = np.array([['abc', '123'],
+                           ['789', 'xyz']]).view(np.chararray)
+        self.B = np.array([['efg', '456'],
+                           ['051', 'tuv']]).view(np.chararray)
+
+    def test_add(self):
+        AB = np.array([['abcefg', '123456'],
+                       ['789051', 'xyztuv']]).view(np.chararray)
+        assert_array_equal(AB, (self.A + self.B))
+        assert_(len((self.A + self.B)[0][0]) == 6)
+
+    def test_radd(self):
+        QA = np.array([['qabc', 'q123'],
+                       ['q789', 'qxyz']]).view(np.chararray)
+        assert_array_equal(QA, ('q' + self.A))
+
+    def test_mul(self):
+        A = self.A
+        for r in (2, 3, 5, 7, 197):
+            Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
+                           [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
+
+            assert_array_equal(Ar, (self.A * r))
+
+        for ob in [object(), 'qrs']:
+            with assert_raises_regex(ValueError,
+                                     'Can only multiply by integers'):
+                A*ob
+
+    def test_rmul(self):
+        A = self.A
+        for r in (2, 3, 5, 7, 197):
+            Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
+                           [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
+            assert_array_equal(Ar, (r * self.A))
+
+        for ob in [object(), 'qrs']:
+            with assert_raises_regex(ValueError,
+                                     'Can only multiply by integers'):
+                ob * A
+
+    def test_mod(self):
+        """Ticket #856"""
+        F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
+        C = np.array([[3, 7], [19, 1]])
+        FC = np.array([['3', '7.000000'],
+                       ['19', '1']]).view(np.chararray)
+        assert_array_equal(FC, F % C)
+
+        A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
+        A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
+        assert_array_equal(A1, (A % 1))
+
+        A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
+        assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
+
+    def test_rmod(self):
+        assert_(("%s" % self.A) == str(self.A))
+        assert_(("%r" % self.A) == repr(self.A))
+
+        for ob in [42, object()]:
+            with assert_raises_regex(
+                    TypeError, "unsupported operand type.* and 'chararray'"):
+                ob % self.A
+
+    def test_slice(self):
+        """Regression test for https://github.com/numpy/numpy/issues/5982"""
+
+        arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
+                       dtype='S4').view(np.chararray)
+        sl1 = arr[:]
+        assert_array_equal(sl1, arr)
+        assert_(sl1.base is arr)
+        assert_(sl1.base.base is arr.base)
+
+        sl2 = arr[:, :]
+        assert_array_equal(sl2, arr)
+        assert_(sl2.base is arr)
+        assert_(sl2.base.base is arr.base)
+
+        assert_(arr[0, 0] == b'abc')
+
+
+def test_empty_indexing():
+    """Regression test for ticket 1948."""
+    # Check that indexing a chararray with an empty list/array returns an
+    # empty chararray instead of a chararray with a single empty string in it.
+    s = np.chararray((4,))
+    assert_(s[[]].size == 0)
+
+
+@pytest.mark.parametrize(["dt1", "dt2"],
+        [("S", "U"), ("U", "S"), ("S", "O"), ("U", "O"),
+         ("S", "d"), ("S", "V")])
+def test_add_types(dt1, dt2):
+    arr1 = np.array([1234234], dtype=dt1)
+    # If the following fails, e.g. use a number and test "V" explicitly
+    arr2 = np.array([b"423"], dtype=dt2)
+    with pytest.raises(TypeError,
+            match=f".*same dtype kind.*{arr1.dtype}.*{arr2.dtype}"):
+        np.char.add(arr1, arr2)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.py
new file mode 100644
index 00000000..3ada39e9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.py
@@ -0,0 +1,817 @@
+"""
+Tests related to deprecation warnings. Also a convenient place
+to document how deprecations should eventually be turned into errors.
+
+"""
+import datetime
+import operator
+import warnings
+import pytest
+import tempfile
+import re
+import sys
+
+import numpy as np
+from numpy.testing import (
+    assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
+    KnownFailureException, break_cycles,
+    )
+
+from numpy.core._multiarray_tests import fromstring_null_term_c_api
+
+try:
+    import pytz
+    _has_pytz = True
+except ImportError:
+    _has_pytz = False
+
+
+class _DeprecationTestCase:
+    # Just as warning: warnings uses re.match, so the start of this message
+    # must match.
+    message = ''
+    warning_cls = DeprecationWarning
+
+    def setup_method(self):
+        self.warn_ctx = warnings.catch_warnings(record=True)
+        self.log = self.warn_ctx.__enter__()
+
+        # Do *not* ignore other DeprecationWarnings. Ignoring warnings
+        # can give very confusing results because of
+        # https://bugs.python.org/issue4180 and it is probably simplest to
+        # try to keep the tests cleanly giving only the right warning type.
+        # (While checking them set to "error" those are ignored anyway)
+        # We still have them show up, because otherwise they would be raised
+        warnings.filterwarnings("always", category=self.warning_cls)
+        warnings.filterwarnings("always", message=self.message,
+                                category=self.warning_cls)
+
+    def teardown_method(self):
+        self.warn_ctx.__exit__()
+
+    def assert_deprecated(self, function, num=1, ignore_others=False,
+                          function_fails=False,
+                          exceptions=np._NoValue,
+                          args=(), kwargs={}):
+        """Test if DeprecationWarnings are given and raised.
+
+        This first checks if the function when called gives `num`
+        DeprecationWarnings, after that it tries to raise these
+        DeprecationWarnings and compares them with `exceptions`.
+        The exceptions can be different for cases where this code path
+        is simply not anticipated and the exception is replaced.
+
+        Parameters
+        ----------
+        function : callable
+            The function to test
+        num : int
+            Number of DeprecationWarnings to expect. This should normally be 1.
+        ignore_others : bool
+            Whether warnings of the wrong type should be ignored (note that
+            the message is not checked)
+        function_fails : bool
+            If the function would normally fail, setting this will check for
+            warnings inside a try/except block.
+        exceptions : Exception or tuple of Exceptions
+            Exception to expect when turning the warnings into an error.
+            The default checks for DeprecationWarnings. If exceptions is
+            empty the function is expected to run successfully.
+        args : tuple
+            Arguments for `function`
+        kwargs : dict
+            Keyword arguments for `function`
+        """
+        __tracebackhide__ = True  # Hide traceback for py.test
+
+        # reset the log
+        self.log[:] = []
+
+        if exceptions is np._NoValue:
+            exceptions = (self.warning_cls,)
+
+        try:
+            function(*args, **kwargs)
+        except (Exception if function_fails else tuple()):
+            pass
+
+        # just in case, clear the registry
+        num_found = 0
+        for warning in self.log:
+            if warning.category is self.warning_cls:
+                num_found += 1
+            elif not ignore_others:
+                raise AssertionError(
+                        "expected %s but got: %s" %
+                        (self.warning_cls.__name__, warning.category))
+        if num is not None and num_found != num:
+            msg = "%i warnings found but %i expected." % (len(self.log), num)
+            lst = [str(w) for w in self.log]
+            raise AssertionError("\n".join([msg] + lst))
+
+        with warnings.catch_warnings():
+            warnings.filterwarnings("error", message=self.message,
+                                    category=self.warning_cls)
+            try:
+                function(*args, **kwargs)
+                if exceptions != tuple():
+                    raise AssertionError(
+                            "No error raised during function call")
+            except exceptions:
+                if exceptions == tuple():
+                    raise AssertionError(
+                            "Error raised during function call")
+
+    def assert_not_deprecated(self, function, args=(), kwargs={}):
+        """Test that warnings are not raised.
+
+        This is just a shorthand for:
+
+        self.assert_deprecated(function, num=0, ignore_others=True,
+                        exceptions=tuple(), args=args, kwargs=kwargs)
+        """
+        self.assert_deprecated(function, num=0, ignore_others=True,
+                        exceptions=tuple(), args=args, kwargs=kwargs)
+
+
+class _VisibleDeprecationTestCase(_DeprecationTestCase):
+    warning_cls = np.VisibleDeprecationWarning
+
+
+class TestDatetime64Timezone(_DeprecationTestCase):
+    """Parsing of datetime64 with timezones deprecated in 1.11.0, because
+    datetime64 is now timezone naive rather than UTC only.
+
+    It will be quite a while before we can remove this, because, at the very
+    least, a lot of existing code uses the 'Z' modifier to avoid conversion
+    from local time to UTC, even if otherwise it handles time in a timezone
+    naive fashion.
+    """
+    def test_string(self):
+        self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
+        self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
+
+    @pytest.mark.skipif(not _has_pytz,
+                        reason="The pytz module is not available.")
+    def test_datetime(self):
+        tz = pytz.timezone('US/Eastern')
+        dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
+        self.assert_deprecated(np.datetime64, args=(dt,))
+
+
+class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
+    """Assigning the 'data' attribute of an ndarray is unsafe as pointed
+     out in gh-7093. Eventually, such assignment should NOT be allowed, but
+     in the interests of maintaining backwards compatibility, only a Deprecation-
+     Warning will be raised instead for the time being to give developers time to
+     refactor relevant code.
+    """
+
+    def test_data_attr_assignment(self):
+        a = np.arange(10)
+        b = np.linspace(0, 1, 10)
+
+        self.message = ("Assigning the 'data' attribute is an "
+                        "inherently unsafe operation and will "
+                        "be removed in the future.")
+        self.assert_deprecated(a.__setattr__, args=('data', b.data))
+
+
+class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
+    """
+    If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
+    represent the number in base 2 (positive) or 2's complement (negative) form,
+    the function used to silently ignore the parameter and return a representation
+    using the minimal number of bits needed for the form in question. Such behavior
+    is now considered unsafe from a user perspective and will raise an error in the future.
+    """
+
+    def test_insufficient_width_positive(self):
+        args = (10,)
+        kwargs = {'width': 2}
+
+        self.message = ("Insufficient bit width provided. This behavior "
+                        "will raise an error in the future.")
+        self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+
+    def test_insufficient_width_negative(self):
+        args = (-5,)
+        kwargs = {'width': 2}
+
+        self.message = ("Insufficient bit width provided. This behavior "
+                        "will raise an error in the future.")
+        self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+
+
+class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
+    # Deprecated 2021-01-05, NumPy 1.21
+    message = r".*`.dtype` attribute"
+
+    def test_deprecation_dtype_attribute_is_dtype(self):
+        class dt:
+            dtype = "f8"
+
+        class vdt(np.void):
+            dtype = "f,f"
+
+        self.assert_deprecated(lambda: np.dtype(dt))
+        self.assert_deprecated(lambda: np.dtype(dt()))
+        self.assert_deprecated(lambda: np.dtype(vdt))
+        self.assert_deprecated(lambda: np.dtype(vdt(1)))
+
+
+class TestTestDeprecated:
+    def test_assert_deprecated(self):
+        test_case_instance = _DeprecationTestCase()
+        test_case_instance.setup_method()
+        assert_raises(AssertionError,
+                      test_case_instance.assert_deprecated,
+                      lambda: None)
+
+        def foo():
+            warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
+
+        test_case_instance.assert_deprecated(foo)
+        test_case_instance.teardown_method()
+
+
+class TestNonNumericConjugate(_DeprecationTestCase):
+    """
+    Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
+    which conflicts with the error behavior of np.conjugate.
+    """
+    def test_conjugate(self):
+        for a in np.array(5), np.array(5j):
+            self.assert_not_deprecated(a.conjugate)
+        for a in (np.array('s'), np.array('2016', 'M'),
+                np.array((1, 2), [('a', int), ('b', int)])):
+            self.assert_deprecated(a.conjugate)
+
+
+class TestNPY_CHAR(_DeprecationTestCase):
+    # 2017-05-03, 1.13.0
+    def test_npy_char_deprecation(self):
+        from numpy.core._multiarray_tests import npy_char_deprecation
+        self.assert_deprecated(npy_char_deprecation)
+        assert_(npy_char_deprecation() == 'S1')
+
+
+class TestPyArray_AS1D(_DeprecationTestCase):
+    def test_npy_pyarrayas1d_deprecation(self):
+        from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
+        assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
+
+
+class TestPyArray_AS2D(_DeprecationTestCase):
+    def test_npy_pyarrayas2d_deprecation(self):
+        from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
+        assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
+
+
+class TestDatetimeEvent(_DeprecationTestCase):
+    # 2017-08-11, 1.14.0
+    def test_3_tuple(self):
+        for cls in (np.datetime64, np.timedelta64):
+            # two valid uses - (unit, num) and (unit, num, den, None)
+            self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
+            self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
+
+            # trying to use the event argument, removed in 1.7.0, is deprecated
+            # it used to be a uint8
+            self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
+            self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
+            self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
+            self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
+
+
+class TestTruthTestingEmptyArrays(_DeprecationTestCase):
+    # 2017-09-25, 1.14.0
+    message = '.*truth value of an empty array is ambiguous.*'
+
+    def test_1d(self):
+        self.assert_deprecated(bool, args=(np.array([]),))
+
+    def test_2d(self):
+        self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
+        self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
+        self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
+
+
+class TestBincount(_DeprecationTestCase):
+    # 2017-06-01, 1.14.0
+    def test_bincount_minlength(self):
+        self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
+
+
+
+class TestGeneratorSum(_DeprecationTestCase):
+    # 2018-02-25, 1.15.0
+    def test_generator_sum(self):
+        self.assert_deprecated(np.sum, args=((i for i in range(5)),))
+
+
+class TestFromstring(_DeprecationTestCase):
+    # 2017-10-19, 1.14
+    def test_fromstring(self):
+        self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+
+class TestFromStringAndFileInvalidData(_DeprecationTestCase):
+    # 2019-06-08, 1.17.0
+    # Tests should be moved to real tests when deprecation is done.
+    message = "string or file could not be read to its end"
+
+    @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+    def test_deprecate_unparsable_data_file(self, invalid_str):
+        x = np.array([1.51, 2, 3.51, 4], dtype=float)
+
+        with tempfile.TemporaryFile(mode="w") as f:
+            x.tofile(f, sep=',', format='%.2f')
+            f.write(invalid_str)
+
+            f.seek(0)
+            self.assert_deprecated(lambda: np.fromfile(f, sep=","))
+            f.seek(0)
+            self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
+            # Should not raise:
+            with warnings.catch_warnings():
+                warnings.simplefilter("error", DeprecationWarning)
+                f.seek(0)
+                res = np.fromfile(f, sep=",", count=4)
+                assert_array_equal(res, x)
+
+    @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+    def test_deprecate_unparsable_string(self, invalid_str):
+        x = np.array([1.51, 2, 3.51, 4], dtype=float)
+        x_str = "1.51,2,3.51,4{}".format(invalid_str)
+
+        self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
+        self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
+
+        # The C-level API can use not fixed size, but 0 terminated strings,
+        # so test that as well:
+        bytestr = x_str.encode("ascii")
+        self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
+
+        with assert_warns(DeprecationWarning):
+            # this is slightly strange, in that fromstring leaves data
+            # potentially uninitialized (would be good to error when all is
+            # read, but count is larger then actual data maybe).
+            res = np.fromstring(x_str, sep=",", count=5)
+            assert_array_equal(res[:-1], x)
+
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", DeprecationWarning)
+
+            # Should not raise:
+            res = np.fromstring(x_str, sep=",", count=4)
+            assert_array_equal(res, x)
+
+
+class Test_GetSet_NumericOps(_DeprecationTestCase):
+    # 2018-09-20, 1.16.0
+    def test_get_numeric_ops(self):
+        from numpy.core._multiarray_tests import getset_numericops
+        self.assert_deprecated(getset_numericops, num=2)
+
+        # empty kwargs prevents any state actually changing which would break
+        # other tests.
+        self.assert_deprecated(np.set_numeric_ops, kwargs={})
+        assert_raises(ValueError, np.set_numeric_ops, add='abc')
+
+
+class TestShape1Fields(_DeprecationTestCase):
+    warning_cls = FutureWarning
+
+    # 2019-05-20, 1.17.0
+    def test_shape_1_fields(self):
+        self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
+
+
+class TestNonZero(_DeprecationTestCase):
+    # 2019-05-26, 1.17.0
+    def test_zerod(self):
+        self.assert_deprecated(lambda: np.nonzero(np.array(0)))
+        self.assert_deprecated(lambda: np.nonzero(np.array(1)))
+
+
+class TestToString(_DeprecationTestCase):
+    # 2020-03-06 1.19.0
+    message = re.escape("tostring() is deprecated. Use tobytes() instead.")
+
+    def test_tostring(self):
+        arr = np.array(list(b"test\xFF"), dtype=np.uint8)
+        self.assert_deprecated(arr.tostring)
+
+    def test_tostring_matches_tobytes(self):
+        arr = np.array(list(b"test\xFF"), dtype=np.uint8)
+        b = arr.tobytes()
+        with assert_warns(DeprecationWarning):
+            s = arr.tostring()
+        assert s == b
+
+
+class TestDTypeCoercion(_DeprecationTestCase):
+    # 2020-02-06 1.19.0
+    message = "Converting .* to a dtype .*is deprecated"
+    deprecated_types = [
+        # The builtin scalar super types:
+        np.generic, np.flexible, np.number,
+        np.inexact, np.floating, np.complexfloating,
+        np.integer, np.unsignedinteger, np.signedinteger,
+        # character is a deprecated S1 special case:
+        np.character,
+    ]
+
+    def test_dtype_coercion(self):
+        for scalar_type in self.deprecated_types:
+            self.assert_deprecated(np.dtype, args=(scalar_type,))
+
+    def test_array_construction(self):
+        for scalar_type in self.deprecated_types:
+            self.assert_deprecated(np.array, args=([], scalar_type,))
+
+    def test_not_deprecated(self):
+        # All specific types are not deprecated:
+        for group in np.sctypes.values():
+            for scalar_type in group:
+                self.assert_not_deprecated(np.dtype, args=(scalar_type,))
+
+        for scalar_type in [type, dict, list, tuple]:
+            # Typical python types are coerced to object currently:
+            self.assert_not_deprecated(np.dtype, args=(scalar_type,))
+
+
+class BuiltInRoundComplexDType(_DeprecationTestCase):
+    # 2020-03-31 1.19.0
+    deprecated_types = [np.csingle, np.cdouble, np.clongdouble]
+    not_deprecated_types = [
+        np.int8, np.int16, np.int32, np.int64,
+        np.uint8, np.uint16, np.uint32, np.uint64,
+        np.float16, np.float32, np.float64,
+    ]
+
+    def test_deprecated(self):
+        for scalar_type in self.deprecated_types:
+            scalar = scalar_type(0)
+            self.assert_deprecated(round, args=(scalar,))
+            self.assert_deprecated(round, args=(scalar, 0))
+            self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
+
+    def test_not_deprecated(self):
+        for scalar_type in self.not_deprecated_types:
+            scalar = scalar_type(0)
+            self.assert_not_deprecated(round, args=(scalar,))
+            self.assert_not_deprecated(round, args=(scalar, 0))
+            self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
+
+
+class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase):
+    # 2020-05-27, NumPy 1.20.0
+    message = "Out of bound index found. This was previously ignored.*"
+
+    @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])])
+    def test_empty_subspace(self, index):
+        # Test for both a single and two/multiple advanced indices. These
+        # This will raise an IndexError in the future.
+        arr = np.ones((2, 2, 0))
+        self.assert_deprecated(arr.__getitem__, args=(index,))
+        self.assert_deprecated(arr.__setitem__, args=(index, 0.))
+
+        # for this array, the subspace is only empty after applying the slice
+        arr2 = np.ones((2, 2, 1))
+        index2 = (slice(0, 0),) + index
+        self.assert_deprecated(arr2.__getitem__, args=(index2,))
+        self.assert_deprecated(arr2.__setitem__, args=(index2, 0.))
+
+    def test_empty_index_broadcast_not_deprecated(self):
+        arr = np.ones((2, 2, 2))
+
+        index = ([[3], [2]], [])  # broadcast to an empty result.
+        self.assert_not_deprecated(arr.__getitem__, args=(index,))
+        self.assert_not_deprecated(arr.__setitem__,
+                                   args=(index, np.empty((2, 0, 2))))
+
+
+class TestNonExactMatchDeprecation(_DeprecationTestCase):
+    # 2020-04-22
+    def test_non_exact_match(self):
+        arr = np.array([[3, 6, 6], [4, 5, 1]])
+        # misspelt mode check
+        self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp'))
+        # using completely different word with first character as R
+        self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random'))
+
+
+class TestMatrixInOuter(_DeprecationTestCase):
+    # 2020-05-13 NumPy 1.20.0
+    message = (r"add.outer\(\) was passed a numpy matrix as "
+               r"(first|second) argument.")
+
+    def test_deprecated(self):
+        arr = np.array([1, 2, 3])
+        m = np.array([1, 2, 3]).view(np.matrix)
+        self.assert_deprecated(np.add.outer, args=(m, m), num=2)
+        self.assert_deprecated(np.add.outer, args=(arr, m))
+        self.assert_deprecated(np.add.outer, args=(m, arr))
+        self.assert_not_deprecated(np.add.outer, args=(arr, arr))
+
+
+class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
+    # NumPy 1.20, 2020-09-03
+    message = "concatenate with `axis=None` will use same-kind casting"
+
+    def test_deprecated(self):
+        self.assert_deprecated(np.concatenate,
+                args=(([0.], [1.]),),
+                kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
+
+    def test_not_deprecated(self):
+        self.assert_not_deprecated(np.concatenate,
+                args=(([0.], [1.]),),
+                kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
+                        'casting': "unsafe"})
+
+        with assert_raises(TypeError):
+            # Tests should notice if the deprecation warning is given first...
+            np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
+                           casting="same_kind")
+
+
+class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase):
+    # Deprecated 2020-11-24, NumPy 1.20
+    """
+    Technically, it should be impossible to create numpy object scalars,
+    but there was an unpickle path that would in theory allow it. That
+    path is invalid and must lead to the warning.
+    """
+    message = "Unpickling a scalar with object dtype is deprecated."
+
+    def test_deprecated(self):
+        ctor = np.core.multiarray.scalar
+        self.assert_deprecated(lambda: ctor(np.dtype("O"), 1))
+
+
+class TestSingleElementSignature(_DeprecationTestCase):
+    # Deprecated 2021-04-01, NumPy 1.21
+    message = r"The use of a length 1"
+
+    def test_deprecated(self):
+        self.assert_deprecated(lambda: np.add(1, 2, signature="d"))
+        self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),)))
+
+
+class TestCtypesGetter(_DeprecationTestCase):
+    # Deprecated 2021-05-18, Numpy 1.21.0
+    warning_cls = DeprecationWarning
+    ctypes = np.array([1]).ctypes
+
+    @pytest.mark.parametrize(
+        "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"]
+    )
+    def test_deprecated(self, name: str) -> None:
+        func = getattr(self.ctypes, name)
+        self.assert_deprecated(lambda: func())
+
+    @pytest.mark.parametrize(
+        "name", ["data", "shape", "strides", "_as_parameter_"]
+    )
+    def test_not_deprecated(self, name: str) -> None:
+        self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
+
+
+PARTITION_DICT = {
+    "partition method": np.arange(10).partition,
+    "argpartition method": np.arange(10).argpartition,
+    "partition function": lambda kth: np.partition(np.arange(10), kth),
+    "argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
+}
+
+
+@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
+class TestPartitionBoolIndex(_DeprecationTestCase):
+    # Deprecated 2021-09-29, NumPy 1.22
+    warning_cls = DeprecationWarning
+    message = "Passing booleans as partition index is deprecated"
+
+    def test_deprecated(self, func):
+        self.assert_deprecated(lambda: func(True))
+        self.assert_deprecated(lambda: func([False, True]))
+
+    def test_not_deprecated(self, func):
+        self.assert_not_deprecated(lambda: func(1))
+        self.assert_not_deprecated(lambda: func([0, 1]))
+
+
+class TestMachAr(_DeprecationTestCase):
+    # Deprecated 2022-11-22, NumPy 1.25
+    warning_cls = DeprecationWarning
+
+    def test_deprecated_module(self):
+        self.assert_deprecated(lambda: getattr(np.core, "MachAr"))
+
+
+class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
+    # Deprecated 2021-11-08, NumPy 1.22
+    @pytest.mark.parametrize("func",
+        [np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
+    def test_deprecated(self, func):
+        self.assert_deprecated(
+            lambda: func([0., 1.], 0., interpolation="linear"))
+        self.assert_deprecated(
+            lambda: func([0., 1.], 0., interpolation="nearest"))
+
+    @pytest.mark.parametrize("func",
+            [np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
+    def test_both_passed(self, func):
+        with warnings.catch_warnings():
+            # catch the DeprecationWarning so that it does not raise:
+            warnings.simplefilter("always", DeprecationWarning)
+            with pytest.raises(TypeError):
+                func([0., 1.], 0., interpolation="nearest", method="nearest")
+
+
+class TestMemEventHook(_DeprecationTestCase):
+    # Deprecated 2021-11-18, NumPy 1.23
+    def test_mem_seteventhook(self):
+        # The actual tests are within the C code in
+        # multiarray/_multiarray_tests.c.src
+        import numpy.core._multiarray_tests as ma_tests
+        with pytest.warns(DeprecationWarning,
+                          match='PyDataMem_SetEventHook is deprecated'):
+            ma_tests.test_pydatamem_seteventhook_start()
+        # force an allocation and free of a numpy array
+        # needs to be larger then limit of small memory cacher in ctors.c
+        a = np.zeros(1000)
+        del a
+        break_cycles()
+        with pytest.warns(DeprecationWarning,
+                          match='PyDataMem_SetEventHook is deprecated'):
+            ma_tests.test_pydatamem_seteventhook_end()
+
+
+class TestArrayFinalizeNone(_DeprecationTestCase):
+    message = "Setting __array_finalize__ = None"
+
+    def test_use_none_is_deprecated(self):
+        # Deprecated way that ndarray itself showed nothing needs finalizing.
+        class NoFinalize(np.ndarray):
+            __array_finalize__ = None
+
+        self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
+
+class TestAxisNotMAXDIMS(_DeprecationTestCase):
+    # Deprecated 2022-01-08, NumPy 1.23
+    message = r"Using `axis=32` \(MAXDIMS\) is deprecated"
+
+    def test_deprecated(self):
+        a = np.zeros((1,)*32)
+        self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS))
+
+
+class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase):
+    # Deprecated 2022-07-03, NumPy 1.23
+    # This test can be removed without replacement after the deprecation.
+    # The tests:
+    #   * numpy/lib/tests/test_loadtxt.py::test_integer_signs
+    #   * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails
+    # Have a warning filter that needs to be removed.
+    message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*"
+
+    @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+    def test_deprecated_warning(self, dtype):
+        with pytest.warns(DeprecationWarning, match=self.message):
+            np.loadtxt(["10.5"], dtype=dtype)
+
+    @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+    def test_deprecated_raised(self, dtype):
+        # The DeprecationWarning is chained when raised, so test manually:
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", DeprecationWarning)
+            try:
+                np.loadtxt(["10.5"], dtype=dtype)
+            except ValueError as e:
+                assert isinstance(e.__cause__, DeprecationWarning)
+
+
+class TestScalarConversion(_DeprecationTestCase):
+    # 2023-01-02, 1.25.0
+    def test_float_conversion(self):
+        self.assert_deprecated(float, args=(np.array([3.14]),))
+
+    def test_behaviour(self):
+        b = np.array([[3.14]])
+        c = np.zeros(5)
+        with pytest.warns(DeprecationWarning):
+            c[0] = b
+
+
+class TestPyIntConversion(_DeprecationTestCase):
+    message = r".*stop allowing conversion of out-of-bound.*"
+
+    @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+    def test_deprecated_scalar(self, dtype):
+        dtype = np.dtype(dtype)
+        info = np.iinfo(dtype)
+
+        # Cover the most common creation paths (all end up in the
+        # same place):
+        def scalar(value, dtype):
+            dtype.type(value)
+
+        def assign(value, dtype):
+            arr = np.array([0, 0, 0], dtype=dtype)
+            arr[2] = value
+
+        def create(value, dtype):
+            np.array([value], dtype=dtype)
+
+        for creation_func in [scalar, assign, create]:
+            try:
+                self.assert_deprecated(
+                        lambda: creation_func(info.min - 1, dtype))
+            except OverflowError:
+                pass  # OverflowErrors always happened also before and are OK.
+
+            try:
+                self.assert_deprecated(
+                        lambda: creation_func(info.max + 1, dtype))
+            except OverflowError:
+                pass  # OverflowErrors always happened also before and are OK.
+
+
+class TestDeprecatedGlobals(_DeprecationTestCase):
+    # Deprecated 2022-11-17, NumPy 1.24
+    def test_type_aliases(self):
+        # from builtins
+        self.assert_deprecated(lambda: np.bool8)
+        self.assert_deprecated(lambda: np.int0)
+        self.assert_deprecated(lambda: np.uint0)
+        self.assert_deprecated(lambda: np.bytes0)
+        self.assert_deprecated(lambda: np.str0)
+        self.assert_deprecated(lambda: np.object0)
+
+
+@pytest.mark.parametrize("name",
+        ["bool", "long", "ulong", "str", "bytes", "object"])
+def test_future_scalar_attributes(name):
+    # FutureWarning added 2022-11-17, NumPy 1.24,
+    assert name not in dir(np)  # we may want to not add them
+    with pytest.warns(FutureWarning,
+            match=f"In the future .*{name}"):
+        assert not hasattr(np, name)
+
+    # Unfortunately, they are currently still valid via `np.dtype()`
+    np.dtype(name)
+    name in np.sctypeDict
+
+
+# Ignore the above future attribute warning for this test.
+@pytest.mark.filterwarnings("ignore:In the future:FutureWarning")
+class TestRemovedGlobals:
+    # Removed 2023-01-12, NumPy 1.24.0
+    # Not a deprecation, but the large error was added to aid those who missed
+    # the previous deprecation, and should be removed similarly to one
+    # (or faster).
+    @pytest.mark.parametrize("name",
+            ["object", "bool", "float", "complex", "str", "int"])
+    def test_attributeerror_includes_info(self, name):
+        msg = f".*\n`np.{name}` was a deprecated alias for the builtin"
+        with pytest.raises(AttributeError, match=msg):
+            getattr(np, name)
+
+
+class TestDeprecatedFinfo(_DeprecationTestCase):
+    # Deprecated in NumPy 1.25, 2023-01-16
+    def test_deprecated_none(self):
+        self.assert_deprecated(np.finfo, args=(None,))
+
+class TestFromnumeric(_DeprecationTestCase):
+    # 2023-02-28, 1.25.0
+    def test_round_(self):
+        self.assert_deprecated(lambda: np.round_(np.array([1.5, 2.5, 3.5])))
+
+    # 2023-03-02, 1.25.0
+    def test_cumproduct(self):
+        self.assert_deprecated(lambda: np.cumproduct(np.array([1, 2, 3])))
+
+    # 2023-03-02, 1.25.0
+    def test_product(self):
+        self.assert_deprecated(lambda: np.product(np.array([1, 2, 3])))
+
+    # 2023-03-02, 1.25.0
+    def test_sometrue(self):
+        self.assert_deprecated(lambda: np.sometrue(np.array([True, False])))
+
+    # 2023-03-02, 1.25.0
+    def test_alltrue(self):
+        self.assert_deprecated(lambda: np.alltrue(np.array([True, False])))
+
+
+class TestMathAlias(_DeprecationTestCase):
+    # Deprecated in Numpy 1.25, 2023-04-06
+    def test_deprecated_np_math(self):
+        self.assert_deprecated(lambda: np.math)
+
+    def test_deprecated_np_lib_math(self):
+        self.assert_deprecated(lambda: np.lib.math)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py
new file mode 100644
index 00000000..49249bc6
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py
@@ -0,0 +1,124 @@
+import sys
+import pytest
+
+import numpy as np
+from numpy.testing import assert_array_equal, IS_PYPY
+
+
+class TestDLPack:
+    @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
+    def test_dunder_dlpack_refcount(self):
+        x = np.arange(5)
+        y = x.__dlpack__()
+        assert sys.getrefcount(x) == 3
+        del y
+        assert sys.getrefcount(x) == 2
+
+    def test_dunder_dlpack_stream(self):
+        x = np.arange(5)
+        x.__dlpack__(stream=None)
+
+        with pytest.raises(RuntimeError):
+            x.__dlpack__(stream=1)
+
+    def test_strides_not_multiple_of_itemsize(self):
+        dt = np.dtype([('int', np.int32), ('char', np.int8)])
+        y = np.zeros((5,), dtype=dt)
+        z = y['int']
+
+        with pytest.raises(BufferError):
+            np.from_dlpack(z)
+
+    @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
+    def test_from_dlpack_refcount(self):
+        x = np.arange(5)
+        y = np.from_dlpack(x)
+        assert sys.getrefcount(x) == 3
+        del y
+        assert sys.getrefcount(x) == 2
+
+    @pytest.mark.parametrize("dtype", [
+        np.bool_,
+        np.int8, np.int16, np.int32, np.int64,
+        np.uint8, np.uint16, np.uint32, np.uint64,
+        np.float16, np.float32, np.float64,
+        np.complex64, np.complex128
+    ])
+    def test_dtype_passthrough(self, dtype):
+        x = np.arange(5).astype(dtype)
+        y = np.from_dlpack(x)
+
+        assert y.dtype == x.dtype
+        assert_array_equal(x, y)
+
+    def test_invalid_dtype(self):
+        x = np.asarray(np.datetime64('2021-05-27'))
+
+        with pytest.raises(BufferError):
+            np.from_dlpack(x)
+
+    def test_invalid_byte_swapping(self):
+        dt = np.dtype('=i8').newbyteorder()
+        x = np.arange(5, dtype=dt)
+
+        with pytest.raises(BufferError):
+            np.from_dlpack(x)
+
+    def test_non_contiguous(self):
+        x = np.arange(25).reshape((5, 5))
+
+        y1 = x[0]
+        assert_array_equal(y1, np.from_dlpack(y1))
+
+        y2 = x[:, 0]
+        assert_array_equal(y2, np.from_dlpack(y2))
+
+        y3 = x[1, :]
+        assert_array_equal(y3, np.from_dlpack(y3))
+
+        y4 = x[1]
+        assert_array_equal(y4, np.from_dlpack(y4))
+
+        y5 = np.diagonal(x).copy()
+        assert_array_equal(y5, np.from_dlpack(y5))
+
+    @pytest.mark.parametrize("ndim", range(33))
+    def test_higher_dims(self, ndim):
+        shape = (1,) * ndim
+        x = np.zeros(shape, dtype=np.float64)
+
+        assert shape == np.from_dlpack(x).shape
+
+    def test_dlpack_device(self):
+        x = np.arange(5)
+        assert x.__dlpack_device__() == (1, 0)
+        y = np.from_dlpack(x)
+        assert y.__dlpack_device__() == (1, 0)
+        z = y[::2]
+        assert z.__dlpack_device__() == (1, 0)
+
+    def dlpack_deleter_exception(self):
+        x = np.arange(5)
+        _ = x.__dlpack__()
+        raise RuntimeError
+
+    def test_dlpack_destructor_exception(self):
+        with pytest.raises(RuntimeError):
+            self.dlpack_deleter_exception()
+
+    def test_readonly(self):
+        x = np.arange(5)
+        x.flags.writeable = False
+        with pytest.raises(BufferError):
+            x.__dlpack__()
+
+    def test_ndim0(self):
+        x = np.array(1.0)
+        y = np.from_dlpack(x)
+        assert_array_equal(x, y)
+
+    def test_size1dims_arrays(self):
+        x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
+                       buffer=np.ones(1000, dtype=np.uint8), order='F')
+        y = np.from_dlpack(x)
+        assert_array_equal(x, y)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dtype.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dtype.py
new file mode 100644
index 00000000..ac155b67
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_dtype.py
@@ -0,0 +1,1906 @@
+import sys
+import operator
+import pytest
+import ctypes
+import gc
+import types
+from typing import Any
+
+import numpy as np
+import numpy.dtypes
+from numpy.core._rational_tests import rational
+from numpy.core._multiarray_tests import create_custom_field_dtype
+from numpy.testing import (
+    assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
+    IS_PYSTON, _OLD_PROMOTION)
+from numpy.compat import pickle
+from itertools import permutations
+import random
+
+import hypothesis
+from hypothesis.extra import numpy as hynp
+
+
+
+def assert_dtype_equal(a, b):
+    assert_equal(a, b)
+    assert_equal(hash(a), hash(b),
+                 "two equivalent types do not hash to the same value !")
+
+def assert_dtype_not_equal(a, b):
+    assert_(a != b)
+    assert_(hash(a) != hash(b),
+            "two different types hash to the same value !")
+
+class TestBuiltin:
+    @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+                                   np.compat.unicode])
+    def test_run(self, t):
+        """Only test hash runs at all."""
+        dt = np.dtype(t)
+        hash(dt)
+
+    @pytest.mark.parametrize('t', [int, float])
+    def test_dtype(self, t):
+        # Make sure equivalent byte order char hash the same (e.g. < and = on
+        # little endian)
+        dt = np.dtype(t)
+        dt2 = dt.newbyteorder("<")
+        dt3 = dt.newbyteorder(">")
+        if dt == dt2:
+            assert_(dt.byteorder != dt2.byteorder, "bogus test")
+            assert_dtype_equal(dt, dt2)
+        else:
+            assert_(dt.byteorder != dt3.byteorder, "bogus test")
+            assert_dtype_equal(dt, dt3)
+
+    def test_equivalent_dtype_hashing(self):
+        # Make sure equivalent dtypes with different type num hash equal
+        uintp = np.dtype(np.uintp)
+        if uintp.itemsize == 4:
+            left = uintp
+            right = np.dtype(np.uint32)
+        else:
+            left = uintp
+            right = np.dtype(np.ulonglong)
+        assert_(left == right)
+        assert_(hash(left) == hash(right))
+
+    def test_invalid_types(self):
+        # Make sure invalid type strings raise an error
+
+        assert_raises(TypeError, np.dtype, 'O3')
+        assert_raises(TypeError, np.dtype, 'O5')
+        assert_raises(TypeError, np.dtype, 'O7')
+        assert_raises(TypeError, np.dtype, 'b3')
+        assert_raises(TypeError, np.dtype, 'h4')
+        assert_raises(TypeError, np.dtype, 'I5')
+        assert_raises(TypeError, np.dtype, 'e3')
+        assert_raises(TypeError, np.dtype, 'f5')
+
+        if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
+            assert_raises(TypeError, np.dtype, 'g12')
+        elif np.dtype('g').itemsize == 12:
+            assert_raises(TypeError, np.dtype, 'g16')
+
+        if np.dtype('l').itemsize == 8:
+            assert_raises(TypeError, np.dtype, 'l4')
+            assert_raises(TypeError, np.dtype, 'L4')
+        else:
+            assert_raises(TypeError, np.dtype, 'l8')
+            assert_raises(TypeError, np.dtype, 'L8')
+
+        if np.dtype('q').itemsize == 8:
+            assert_raises(TypeError, np.dtype, 'q4')
+            assert_raises(TypeError, np.dtype, 'Q4')
+        else:
+            assert_raises(TypeError, np.dtype, 'q8')
+            assert_raises(TypeError, np.dtype, 'Q8')
+
+    def test_richcompare_invalid_dtype_equality(self):
+        # Make sure objects that cannot be converted to valid
+        # dtypes results in False/True when compared to valid dtypes.
+        # Here 7 cannot be converted to dtype. No exceptions should be raised
+
+        assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
+        assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
+
+    @pytest.mark.parametrize(
+        'operation',
+        [operator.le, operator.lt, operator.ge, operator.gt])
+    def test_richcompare_invalid_dtype_comparison(self, operation):
+        # Make sure TypeError is raised for comparison operators
+        # for invalid dtypes. Here 7 is an invalid dtype.
+
+        with pytest.raises(TypeError):
+            operation(np.dtype(np.int32), 7)
+
+    @pytest.mark.parametrize("dtype",
+             ['Bool', 'Bytes0', 'Complex32', 'Complex64',
+              'Datetime64', 'Float16', 'Float32', 'Float64',
+              'Int8', 'Int16', 'Int32', 'Int64',
+              'Object0', 'Str0', 'Timedelta64',
+              'UInt8', 'UInt16', 'Uint32', 'UInt32',
+              'Uint64', 'UInt64', 'Void0',
+              "Float128", "Complex128"])
+    def test_numeric_style_types_are_invalid(self, dtype):
+        with assert_raises(TypeError):
+            np.dtype(dtype)
+
+    def test_remaining_dtypes_with_bad_bytesize(self):
+        # The np. aliases were deprecated, these probably should be too 
+        assert np.dtype("int0") is np.dtype("intp")
+        assert np.dtype("uint0") is np.dtype("uintp")
+        assert np.dtype("bool8") is np.dtype("bool")
+        assert np.dtype("bytes0") is np.dtype("bytes")
+        assert np.dtype("str0") is np.dtype("str")
+        assert np.dtype("object0") is np.dtype("object")
+
+    @pytest.mark.parametrize(
+        'value',
+        ['m8', 'M8', 'datetime64', 'timedelta64',
+         'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
+         '>f', '= (3, 12),
+    reason="Python 3.12 has immortal refcounts, this test will no longer "
+           "work. See gh-23986"
+)
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestStructuredObjectRefcounting:
+    """These tests cover various uses of complicated structured types which
+    include objects and thus require reference counting.
+    """
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    @pytest.mark.parametrize(["creation_func", "creation_obj"], [
+        pytest.param(np.empty, None,
+             # None is probably used for too many things
+             marks=pytest.mark.skip("unreliable due to python's behaviour")),
+        (np.ones, 1),
+        (np.zeros, 0)])
+    def test_structured_object_create_delete(self, dt, pat, count, singleton,
+                                             creation_func, creation_obj):
+        """Structured object reference counting in creation and deletion"""
+        # The test assumes that 0, 1, and None are singletons.
+        gc.collect()
+        before = sys.getrefcount(creation_obj)
+        arr = creation_func(3, dt)
+
+        now = sys.getrefcount(creation_obj)
+        assert now - before == count * 3
+        del arr
+        now = sys.getrefcount(creation_obj)
+        assert now == before
+
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    def test_structured_object_item_setting(self, dt, pat, count, singleton):
+        """Structured object reference counting for simple item setting"""
+        one = 1
+
+        gc.collect()
+        before = sys.getrefcount(singleton)
+        arr = np.array([pat] * 3, dt)
+        assert sys.getrefcount(singleton) - before == count * 3
+        # Fill with `1` and check that it was replaced correctly:
+        before2 = sys.getrefcount(one)
+        arr[...] = one
+        after2 = sys.getrefcount(one)
+        assert after2 - before2 == count * 3
+        del arr
+        gc.collect()
+        assert sys.getrefcount(one) == before2
+        assert sys.getrefcount(singleton) == before
+
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    @pytest.mark.parametrize(
+        ['shape', 'index', 'items_changed'],
+        [((3,), ([0, 2],), 2),
+         ((3, 2), ([0, 2], slice(None)), 4),
+         ((3, 2), ([0, 2], [1]), 2),
+         ((3,), ([True, False, True]), 2)])
+    def test_structured_object_indexing(self, shape, index, items_changed,
+                                        dt, pat, count, singleton):
+        """Structured object reference counting for advanced indexing."""
+        # Use two small negative values (should be singletons, but less likely
+        # to run into race-conditions).  This failed in some threaded envs
+        # When using 0 and 1.  If it fails again, should remove all explicit
+        # checks, and rely on `pytest-leaks` reference count checker only.
+        val0 = -4
+        val1 = -5
+
+        arr = np.full(shape, val0, dt)
+
+        gc.collect()
+        before_val0 = sys.getrefcount(val0)
+        before_val1 = sys.getrefcount(val1)
+        # Test item getting:
+        part = arr[index]
+        after_val0 = sys.getrefcount(val0)
+        assert after_val0 - before_val0 == count * items_changed
+        del part
+        # Test item setting:
+        arr[index] = val1
+        gc.collect()
+        after_val0 = sys.getrefcount(val0)
+        after_val1 = sys.getrefcount(val1)
+        assert before_val0 - after_val0 == count * items_changed
+        assert after_val1 - before_val1 == count * items_changed
+
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
+        """Structured object reference counting for specialized functions.
+        The older functions such as take and repeat use different code paths
+        then item setting (when writing this).
+        """
+        indices = [0, 1]
+
+        arr = np.array([pat] * 3, dt)
+        gc.collect()
+        before = sys.getrefcount(singleton)
+        res = arr.take(indices)
+        after = sys.getrefcount(singleton)
+        assert after - before == count * 2
+        new = res.repeat(10)
+        gc.collect()
+        after_repeat = sys.getrefcount(singleton)
+        assert after_repeat - after == count * 2 * 10
+
+
+class TestStructuredDtypeSparseFields:
+    """Tests subarray fields which contain sparse dtypes so that
+    not all memory is used by the dtype work. Such dtype's should
+    leave the underlying memory unchanged.
+    """
+    dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
+                             'offsets':[0, 4]}, (2, 3))])
+    sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
+                                    'offsets':[4]}, (2, 3))])
+
+    def test_sparse_field_assignment(self):
+        arr = np.zeros(3, self.dtype)
+        sparse_arr = arr.view(self.sparse_dtype)
+
+        sparse_arr[...] = np.finfo(np.float32).max
+        # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+        assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+    def test_sparse_field_assignment_fancy(self):
+        # Fancy assignment goes to the copyswap function for complex types:
+        arr = np.zeros(3, self.dtype)
+        sparse_arr = arr.view(self.sparse_dtype)
+
+        sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
+        # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+        assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+
+class TestMonsterType:
+    """Test deeply nested subtypes."""
+
+    def test1(self):
+        simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
+            'titles': ['Red pixel', 'Blue pixel']})
+        a = np.dtype([('yo', int), ('ye', simple1),
+            ('yi', np.dtype((int, (3, 2))))])
+        b = np.dtype([('yo', int), ('ye', simple1),
+            ('yi', np.dtype((int, (3, 2))))])
+        assert_dtype_equal(a, b)
+
+        c = np.dtype([('yo', int), ('ye', simple1),
+            ('yi', np.dtype((a, (3, 2))))])
+        d = np.dtype([('yo', int), ('ye', simple1),
+            ('yi', np.dtype((a, (3, 2))))])
+        assert_dtype_equal(c, d)
+
+    @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+    def test_list_recursion(self):
+        l = list()
+        l.append(('f', l))
+        with pytest.raises(RecursionError):
+            np.dtype(l)
+
+    @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+    def test_tuple_recursion(self):
+        d = np.int32
+        for i in range(100000):
+            d = (d, (1,))
+        with pytest.raises(RecursionError):
+            np.dtype(d)
+
+    @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+    def test_dict_recursion(self):
+        d = dict(names=['self'], formats=[None], offsets=[0])
+        d['formats'][0] = d
+        with pytest.raises(RecursionError):
+            np.dtype(d)
+
+
+class TestMetadata:
+    def test_no_metadata(self):
+        d = np.dtype(int)
+        assert_(d.metadata is None)
+
+    def test_metadata_takes_dict(self):
+        d = np.dtype(int, metadata={'datum': 1})
+        assert_(d.metadata == {'datum': 1})
+
+    def test_metadata_rejects_nondict(self):
+        assert_raises(TypeError, np.dtype, int, metadata='datum')
+        assert_raises(TypeError, np.dtype, int, metadata=1)
+        assert_raises(TypeError, np.dtype, int, metadata=None)
+
+    def test_nested_metadata(self):
+        d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
+        assert_(d['a'].metadata == {'datum': 1})
+
+    def test_base_metadata_copied(self):
+        d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
+        assert_(d.metadata == {'datum': 1})
+
+class TestString:
+    def test_complex_dtype_str(self):
+        dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
+                                ('rtile', '>f4', (64, 36))], (3,)),
+                       ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+                                   ('bright', '>f4', (8, 36))])])
+        assert_equal(str(dt),
+                     "[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
+                     "('rtile', '>f4', (64, 36))], (3,)), "
+                     "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
+                     "('bright', '>f4', (8, 36))])]")
+
+        # If the sticky aligned flag is set to True, it makes the
+        # str() function use a dict representation with an 'aligned' flag
+        dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
+                                ('rtile', '>f4', (64, 36))],
+                                (3,)),
+                       ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+                                   ('bright', '>f4', (8, 36))])],
+                       align=True)
+        assert_equal(str(dt),
+                    "{'names': ['top', 'bottom'],"
+                    " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
+                                   "('rtile', '>f4', (64, 36))], (3,)), "
+                                  "[('bleft', ('>f4', (8, 64)), (1,)), "
+                                   "('bright', '>f4', (8, 36))]],"
+                    " 'offsets': [0, 76800],"
+                    " 'itemsize': 80000,"
+                    " 'aligned': True}")
+        with np.printoptions(legacy='1.21'):
+            assert_equal(str(dt),
+                        "{'names':['top','bottom'], "
+                         "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
+                                      "('rtile', '>f4', (64, 36))], (3,)),"
+                                     "[('bleft', ('>f4', (8, 64)), (1,)), "
+                                      "('bright', '>f4', (8, 36))]], "
+                         "'offsets':[0,76800], "
+                         "'itemsize':80000, "
+                         "'aligned':True}")
+        assert_equal(np.dtype(eval(str(dt))), dt)
+
+        dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
+                        'offsets': [0, 1, 2],
+                        'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
+        assert_equal(str(dt),
+                    "[(('Red pixel', 'r'), 'u1'), "
+                    "(('Green pixel', 'g'), 'u1'), "
+                    "(('Blue pixel', 'b'), 'u1')]")
+
+        dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
+                       'formats': ['f4', (64, 64)), (1,)),
+                                ('rtile', '>f4', (64, 36))], (3,)),
+                       ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+                                   ('bright', '>f4', (8, 36))])])
+        assert_equal(repr(dt),
+                     "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
+                     "('rtile', '>f4', (64, 36))], (3,)), "
+                     "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
+                     "('bright', '>f4', (8, 36))])])")
+
+        dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
+                        'offsets': [0, 1, 2],
+                        'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
+                        align=True)
+        assert_equal(repr(dt),
+                    "dtype([(('Red pixel', 'r'), 'u1'), "
+                    "(('Green pixel', 'g'), 'u1'), "
+                    "(('Blue pixel', 'b'), 'u1')], align=True)")
+
+    def test_repr_structured_not_packed(self):
+        dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
+                       'formats': ['i4")
+        assert np.result_type(dt).isnative
+        assert np.result_type(dt).num == dt.num
+
+        # dtype with empty space:
+        struct_dt = np.dtype(">i4,i1,f4', (2, 1)), ('b', 'u4')])
+        self.check(BigEndStruct, expected)
+
+    def test_little_endian_structure_packed(self):
+        class LittleEndStruct(ctypes.LittleEndianStructure):
+            _fields_ = [
+                ('one', ctypes.c_uint8),
+                ('two', ctypes.c_uint32)
+            ]
+            _pack_ = 1
+        expected = np.dtype([('one', 'u1'), ('two', 'B'),
+            ('b', '>H')
+        ], align=True)
+        self.check(PaddedStruct, expected)
+
+    def test_simple_endian_types(self):
+        self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2'))
+        self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
+        self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
+
+    all_types = set(np.typecodes['All'])
+    all_pairs = permutations(all_types, 2)
+
+    @pytest.mark.parametrize("pair", all_pairs)
+    def test_pairs(self, pair):
+        """
+        Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
+        Example: np.dtype('d,I') -> dtype([('f0', ' None:
+        alias = np.dtype[Any]
+        assert isinstance(alias, types.GenericAlias)
+        assert alias.__origin__ is np.dtype
+
+    @pytest.mark.parametrize("code", np.typecodes["All"])
+    def test_dtype_subclass(self, code: str) -> None:
+        cls = type(np.dtype(code))
+        alias = cls[Any]
+        assert isinstance(alias, types.GenericAlias)
+        assert alias.__origin__ is cls
+
+    @pytest.mark.parametrize("arg_len", range(4))
+    def test_subscript_tuple(self, arg_len: int) -> None:
+        arg_tup = (Any,) * arg_len
+        if arg_len == 1:
+            assert np.dtype[arg_tup]
+        else:
+            with pytest.raises(TypeError):
+                np.dtype[arg_tup]
+
+    def test_subscript_scalar(self) -> None:
+        assert np.dtype[Any]
+
+
+def test_result_type_integers_and_unitless_timedelta64():
+    # Regression test for gh-20077.  The following call of `result_type`
+    # would cause a seg. fault.
+    td = np.timedelta64(4)
+    result = np.result_type(0, td)
+    assert_dtype_equal(result, td.dtype)
+
+
+def test_creating_dtype_with_dtype_class_errors():
+    # Regression test for #25031, calling `np.dtype` with itself segfaulted.
+    with pytest.raises(TypeError, match="Cannot convert np.dtype into a"):
+        np.array(np.ones(10), dtype=np.dtype)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_einsum.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_einsum.py
new file mode 100644
index 00000000..702be248
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_einsum.py
@@ -0,0 +1,1248 @@
+import itertools
+import sys
+import platform
+
+import pytest
+
+import numpy as np
+from numpy.testing import (
+    assert_, assert_equal, assert_array_equal, assert_almost_equal,
+    assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
+    )
+
+try:
+    COMPILERS = np.show_config(mode="dicts")["Compilers"]
+    USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl"
+except TypeError:
+    USING_CLANG_CL = False
+
+# Setup for optimize einsum
+chars = 'abcdefghij'
+sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
+global_size_dict = dict(zip(chars, sizes))
+
+
+class TestEinsum:
+    def test_einsum_errors(self):
+        for do_opt in [True, False]:
+            # Need enough arguments
+            assert_raises(ValueError, np.einsum, optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "", optimize=do_opt)
+
+            # subscripts must be a string
+            assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
+
+            # out parameter must be an array
+            assert_raises(TypeError, np.einsum, "", 0, out='test',
+                          optimize=do_opt)
+
+            # order parameter must be a valid order
+            assert_raises(ValueError, np.einsum, "", 0, order='W',
+                          optimize=do_opt)
+
+            # casting parameter must be a valid casting
+            assert_raises(ValueError, np.einsum, "", 0, casting='blah',
+                          optimize=do_opt)
+
+            # dtype parameter must be a valid dtype
+            assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
+                          optimize=do_opt)
+
+            # other keyword arguments are rejected
+            assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
+                          optimize=do_opt)
+
+            # issue 4528 revealed a segfault with this call
+            assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
+
+            # number of operands must match count in subscripts string
+            assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
+            assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
+                          optimize=do_opt)
+            assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
+
+            # can't have more subscripts than dimensions in the operand
+            assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
+
+            # invalid ellipsis
+            assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
+            assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
+
+            # invalid subscript character
+            assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
+
+            # output subscripts must appear in input
+            assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
+
+            # output subscripts may only be specified once
+            assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
+                          optimize=do_opt)
+
+            # dimensions much match when being collapsed
+            assert_raises(ValueError, np.einsum, "ii",
+                          np.arange(6).reshape(2, 3), optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "ii->i",
+                          np.arange(6).reshape(2, 3), optimize=do_opt)
+
+            # broadcasting to new dimensions must be enabled explicitly
+            assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
+                          optimize=do_opt)
+            assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
+                          out=np.arange(4).reshape(2, 2), optimize=do_opt)
+            with assert_raises_regex(ValueError, "'b'"):
+                # gh-11221 - 'c' erroneously appeared in the error message
+                a = np.ones((3, 3, 4, 5, 6))
+                b = np.ones((3, 4, 5))
+                np.einsum('aabcb,abc', a, b)
+
+            # Check order kwarg, asanyarray allows 1d to pass through
+            assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1),
+                          optimize=do_opt, order='d')
+
+    def test_einsum_object_errors(self):
+        # Exceptions created by object arithmetic should
+        # successfully propagate
+
+        class CustomException(Exception):
+            pass
+
+        class DestructoBox:
+
+            def __init__(self, value, destruct):
+                self._val = value
+                self._destruct = destruct
+
+            def __add__(self, other):
+                tmp = self._val + other._val
+                if tmp >= self._destruct:
+                    raise CustomException
+                else:
+                    self._val = tmp
+                    return self
+
+            def __radd__(self, other):
+                if other == 0:
+                    return self
+                else:
+                    return self.__add__(other)
+
+            def __mul__(self, other):
+                tmp = self._val * other._val
+                if tmp >= self._destruct:
+                    raise CustomException
+                else:
+                    self._val = tmp
+                    return self
+
+            def __rmul__(self, other):
+                if other == 0:
+                    return self
+                else:
+                    return self.__mul__(other)
+
+        a = np.array([DestructoBox(i, 5) for i in range(1, 10)],
+                     dtype='object').reshape(3, 3)
+
+        # raised from unbuffered_loop_nop1_ndim2
+        assert_raises(CustomException, np.einsum, "ij->i", a)
+
+        # raised from unbuffered_loop_nop1_ndim3
+        b = np.array([DestructoBox(i, 100) for i in range(0, 27)],
+                     dtype='object').reshape(3, 3, 3)
+        assert_raises(CustomException, np.einsum, "i...k->...", b)
+
+        # raised from unbuffered_loop_nop2_ndim2
+        b = np.array([DestructoBox(i, 55) for i in range(1, 4)],
+                     dtype='object')
+        assert_raises(CustomException, np.einsum, "ij, j", a, b)
+
+        # raised from unbuffered_loop_nop2_ndim3
+        assert_raises(CustomException, np.einsum, "ij, jh", a, a)
+
+        # raised from PyArray_EinsteinSum
+        assert_raises(CustomException, np.einsum, "ij->", a)
+
+    def test_einsum_views(self):
+        # pass-through
+        for do_opt in [True, False]:
+            a = np.arange(6)
+            a.shape = (2, 3)
+
+            b = np.einsum("...", a, optimize=do_opt)
+            assert_(b.base is a)
+
+            b = np.einsum(a, [Ellipsis], optimize=do_opt)
+            assert_(b.base is a)
+
+            b = np.einsum("ij", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, a)
+
+            b = np.einsum(a, [0, 1], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, a)
+
+            # output is writeable whenever input is writeable
+            b = np.einsum("...", a, optimize=do_opt)
+            assert_(b.flags['WRITEABLE'])
+            a.flags['WRITEABLE'] = False
+            b = np.einsum("...", a, optimize=do_opt)
+            assert_(not b.flags['WRITEABLE'])
+
+            # transpose
+            a = np.arange(6)
+            a.shape = (2, 3)
+
+            b = np.einsum("ji", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, a.T)
+
+            b = np.einsum(a, [1, 0], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, a.T)
+
+            # diagonal
+            a = np.arange(9)
+            a.shape = (3, 3)
+
+            b = np.einsum("ii->i", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[i, i] for i in range(3)])
+
+            b = np.einsum(a, [0, 0], [0], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[i, i] for i in range(3)])
+
+            # diagonal with various ways of broadcasting an additional dimension
+            a = np.arange(27)
+            a.shape = (3, 3, 3)
+
+            b = np.einsum("...ii->...i", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+            b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+            b = np.einsum("ii...->...i", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [[x[i, i] for i in range(3)]
+                             for x in a.transpose(2, 0, 1)])
+
+            b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [[x[i, i] for i in range(3)]
+                             for x in a.transpose(2, 0, 1)])
+
+            b = np.einsum("...ii->i...", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[:, i, i] for i in range(3)])
+
+            b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[:, i, i] for i in range(3)])
+
+            b = np.einsum("jii->ij", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[:, i, i] for i in range(3)])
+
+            b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[:, i, i] for i in range(3)])
+
+            b = np.einsum("ii...->i...", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+            b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+            b = np.einsum("i...i->i...", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+            b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+            b = np.einsum("i...i->...i", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [[x[i, i] for i in range(3)]
+                             for x in a.transpose(1, 0, 2)])
+
+            b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [[x[i, i] for i in range(3)]
+                             for x in a.transpose(1, 0, 2)])
+
+            # triple diagonal
+            a = np.arange(27)
+            a.shape = (3, 3, 3)
+
+            b = np.einsum("iii->i", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[i, i, i] for i in range(3)])
+
+            b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, [a[i, i, i] for i in range(3)])
+
+            # swap axes
+            a = np.arange(24)
+            a.shape = (2, 3, 4)
+
+            b = np.einsum("ijk->jik", a, optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, a.swapaxes(0, 1))
+
+            b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
+            assert_(b.base is a)
+            assert_equal(b, a.swapaxes(0, 1))
+
+    @np._no_nep50_warning()
+    def check_einsum_sums(self, dtype, do_opt=False):
+        dtype = np.dtype(dtype)
+        # Check various sums.  Does many sizes to exercise unrolled loops.
+
+        # sum(a, axis=-1)
+        for n in range(1, 17):
+            a = np.arange(n, dtype=dtype)
+            b = np.sum(a, axis=-1)
+            if hasattr(b, 'astype'):
+                b = b.astype(dtype)
+            assert_equal(np.einsum("i->", a, optimize=do_opt), b)
+            assert_equal(np.einsum(a, [0], [], optimize=do_opt), b)
+
+        for n in range(1, 17):
+            a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
+            b = np.sum(a, axis=-1)
+            if hasattr(b, 'astype'):
+                b = b.astype(dtype)
+            assert_equal(np.einsum("...i->...", a, optimize=do_opt), b)
+            assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b)
+
+        # sum(a, axis=0)
+        for n in range(1, 17):
+            a = np.arange(2*n, dtype=dtype).reshape(2, n)
+            b = np.sum(a, axis=0)
+            if hasattr(b, 'astype'):
+                b = b.astype(dtype)
+            assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
+            assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
+
+        for n in range(1, 17):
+            a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
+            b = np.sum(a, axis=0)
+            if hasattr(b, 'astype'):
+                b = b.astype(dtype)
+            assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
+            assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
+
+        # trace(a)
+        for n in range(1, 17):
+            a = np.arange(n*n, dtype=dtype).reshape(n, n)
+            b = np.trace(a)
+            if hasattr(b, 'astype'):
+                b = b.astype(dtype)
+            assert_equal(np.einsum("ii", a, optimize=do_opt), b)
+            assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b)
+
+            # gh-15961: should accept numpy int64 type in subscript list
+            np_array = np.asarray([0, 0])
+            assert_equal(np.einsum(a, np_array, optimize=do_opt), b)
+            assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b)
+
+        # multiply(a, b)
+        assert_equal(np.einsum("..., ...", 3, 4), 12)  # scalar case
+        for n in range(1, 17):
+            a = np.arange(3 * n, dtype=dtype).reshape(3, n)
+            b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
+            assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
+                         np.multiply(a, b))
+            assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
+                         np.multiply(a, b))
+
+        # inner(a,b)
+        for n in range(1, 17):
+            a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
+            b = np.arange(n, dtype=dtype)
+            assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
+            assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
+                         np.inner(a, b))
+
+        for n in range(1, 11):
+            a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
+            b = np.arange(n, dtype=dtype)
+            assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
+                         np.inner(a.T, b.T).T)
+            assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
+                         np.inner(a.T, b.T).T)
+
+        # outer(a,b)
+        for n in range(1, 17):
+            a = np.arange(3, dtype=dtype)+1
+            b = np.arange(n, dtype=dtype)+1
+            assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
+                         np.outer(a, b))
+            assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
+                         np.outer(a, b))
+
+        # Suppress the complex warnings for the 'as f8' tests
+        with suppress_warnings() as sup:
+            sup.filter(np.ComplexWarning)
+
+            # matvec(a,b) / a.dot(b) where a is matrix, b is vector
+            for n in range(1, 17):
+                a = np.arange(4*n, dtype=dtype).reshape(4, n)
+                b = np.arange(n, dtype=dtype)
+                assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
+                             np.dot(a, b))
+                assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
+                             np.dot(a, b))
+
+                c = np.arange(4, dtype=dtype)
+                np.einsum("ij,j", a, b, out=c,
+                          dtype='f8', casting='unsafe', optimize=do_opt)
+                assert_equal(c,
+                             np.dot(a.astype('f8'),
+                                    b.astype('f8')).astype(dtype))
+                c[...] = 0
+                np.einsum(a, [0, 1], b, [1], out=c,
+                          dtype='f8', casting='unsafe', optimize=do_opt)
+                assert_equal(c,
+                             np.dot(a.astype('f8'),
+                                    b.astype('f8')).astype(dtype))
+
+            for n in range(1, 17):
+                a = np.arange(4*n, dtype=dtype).reshape(4, n)
+                b = np.arange(n, dtype=dtype)
+                assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
+                             np.dot(b.T, a.T))
+                assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
+                             np.dot(b.T, a.T))
+
+                c = np.arange(4, dtype=dtype)
+                np.einsum("ji,j", a.T, b.T, out=c,
+                          dtype='f8', casting='unsafe', optimize=do_opt)
+                assert_equal(c,
+                             np.dot(b.T.astype('f8'),
+                                    a.T.astype('f8')).astype(dtype))
+                c[...] = 0
+                np.einsum(a.T, [1, 0], b.T, [1], out=c,
+                          dtype='f8', casting='unsafe', optimize=do_opt)
+                assert_equal(c,
+                             np.dot(b.T.astype('f8'),
+                                    a.T.astype('f8')).astype(dtype))
+
+            # matmat(a,b) / a.dot(b) where a is matrix, b is matrix
+            for n in range(1, 17):
+                if n < 8 or dtype != 'f2':
+                    a = np.arange(4*n, dtype=dtype).reshape(4, n)
+                    b = np.arange(n*6, dtype=dtype).reshape(n, 6)
+                    assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
+                                 np.dot(a, b))
+                    assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
+                                 np.dot(a, b))
+
+            for n in range(1, 17):
+                a = np.arange(4*n, dtype=dtype).reshape(4, n)
+                b = np.arange(n*6, dtype=dtype).reshape(n, 6)
+                c = np.arange(24, dtype=dtype).reshape(4, 6)
+                np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
+                          optimize=do_opt)
+                assert_equal(c,
+                             np.dot(a.astype('f8'),
+                                    b.astype('f8')).astype(dtype))
+                c[...] = 0
+                np.einsum(a, [0, 1], b, [1, 2], out=c,
+                          dtype='f8', casting='unsafe', optimize=do_opt)
+                assert_equal(c,
+                             np.dot(a.astype('f8'),
+                                    b.astype('f8')).astype(dtype))
+
+            # matrix triple product (note this is not currently an efficient
+            # way to multiply 3 matrices)
+            a = np.arange(12, dtype=dtype).reshape(3, 4)
+            b = np.arange(20, dtype=dtype).reshape(4, 5)
+            c = np.arange(30, dtype=dtype).reshape(5, 6)
+            if dtype != 'f2':
+                assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
+                             a.dot(b).dot(c))
+                assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
+                                       optimize=do_opt), a.dot(b).dot(c))
+
+            d = np.arange(18, dtype=dtype).reshape(3, 6)
+            np.einsum("ij,jk,kl", a, b, c, out=d,
+                      dtype='f8', casting='unsafe', optimize=do_opt)
+            tgt = a.astype('f8').dot(b.astype('f8'))
+            tgt = tgt.dot(c.astype('f8')).astype(dtype)
+            assert_equal(d, tgt)
+
+            d[...] = 0
+            np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
+                      dtype='f8', casting='unsafe', optimize=do_opt)
+            tgt = a.astype('f8').dot(b.astype('f8'))
+            tgt = tgt.dot(c.astype('f8')).astype(dtype)
+            assert_equal(d, tgt)
+
+            # tensordot(a, b)
+            if np.dtype(dtype) != np.dtype('f2'):
+                a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
+                b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
+                assert_equal(np.einsum("ijk, jil -> kl", a, b),
+                             np.tensordot(a, b, axes=([1, 0], [0, 1])))
+                assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
+                             np.tensordot(a, b, axes=([1, 0], [0, 1])))
+
+                c = np.arange(10, dtype=dtype).reshape(5, 2)
+                np.einsum("ijk,jil->kl", a, b, out=c,
+                          dtype='f8', casting='unsafe', optimize=do_opt)
+                assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
+                             axes=([1, 0], [0, 1])).astype(dtype))
+                c[...] = 0
+                np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
+                          dtype='f8', casting='unsafe', optimize=do_opt)
+                assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
+                             axes=([1, 0], [0, 1])).astype(dtype))
+
+        # logical_and(logical_and(a!=0, b!=0), c!=0)
+        neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1
+        a = np.array([1,   3,   neg_val, 0,  12,  13,   0,   1], dtype=dtype)
+        b = np.array([0,   3.5, 0., neg_val,  0,   1,    3,   12], dtype=dtype)
+        c = np.array([True, True, False, True, True, False, True, True])
+
+        assert_equal(np.einsum("i,i,i->i", a, b, c,
+                     dtype='?', casting='unsafe', optimize=do_opt),
+                     np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+        assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
+                     dtype='?', casting='unsafe'),
+                     np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+
+        a = np.arange(9, dtype=dtype)
+        assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
+        assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
+        assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
+        assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
+
+        # Various stride0, contiguous, and SSE aligned variants
+        for n in range(1, 25):
+            a = np.arange(n, dtype=dtype)
+            if np.dtype(dtype).itemsize > 1:
+                assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
+                             np.multiply(a, a))
+                assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
+                assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
+                assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
+                assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
+                assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
+
+                assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
+                             np.multiply(a[1:], a[:-1]))
+                assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
+                             np.dot(a[1:], a[:-1]))
+                assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
+                assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
+                assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
+                             2*np.sum(a[1:]))
+                assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
+                             2*np.sum(a[1:]))
+
+        # An object array, summed as the data type
+        a = np.arange(9, dtype=object)
+
+        b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
+        assert_equal(b, np.sum(a))
+        if hasattr(b, "dtype"):
+            # Can be a python object when dtype is object
+            assert_equal(b.dtype, np.dtype(dtype))
+
+        b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
+        assert_equal(b, np.sum(a))
+        if hasattr(b, "dtype"):
+            # Can be a python object when dtype is object
+            assert_equal(b.dtype, np.dtype(dtype))
+
+        # A case which was failing (ticket #1885)
+        p = np.arange(2) + 1
+        q = np.arange(4).reshape(2, 2) + 3
+        r = np.arange(4).reshape(2, 2) + 7
+        assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
+
+        # singleton dimensions broadcast (gh-10343)
+        p = np.ones((10,2))
+        q = np.ones((1,2))
+        assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+                           np.einsum('ij,ij->j', p, q, optimize=False))
+        assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+                           [10.] * 2)
+
+        # a blas-compatible contraction broadcasting case which was failing
+        # for optimize=True (ticket #10930)
+        x = np.array([2., 3.])
+        y = np.array([4.])
+        assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
+        assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
+
+        # all-ones array was bypassing bug (ticket #10930)
+        p = np.ones((1, 5)) / 2
+        q = np.ones((5, 5)) / 2
+        for optimize in (True, False):
+            assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
+                                         optimize=optimize),
+                               np.einsum("...ij,...jk->...ik", p, q,
+                                         optimize=optimize))
+            assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
+                                         optimize=optimize),
+                               np.full((1, 5), 1.25))
+
+        # Cases which were failing (gh-10899)
+        x = np.eye(2, dtype=dtype)
+        y = np.ones(2, dtype=dtype)
+        assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
+                           [2.])  # contig_contig_outstride0_two
+        assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
+                           [2.])  # stride0_contig_outstride0_two
+        assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
+                           [2.])  # contig_stride0_outstride0_two
+
+    def test_einsum_sums_int8(self):
+        if (
+                (sys.platform == 'darwin' and platform.machine() == 'x86_64')
+                or
+                USING_CLANG_CL
+        ):
+            pytest.xfail('Fails on macOS x86-64 and when using clang-cl '
+                         'with Meson, see gh-23838')
+        self.check_einsum_sums('i1')
+
+    def test_einsum_sums_uint8(self):
+        if (
+                (sys.platform == 'darwin' and platform.machine() == 'x86_64')
+                or
+                USING_CLANG_CL
+        ):
+            pytest.xfail('Fails on macOS x86-64 and when using clang-cl '
+                         'with Meson, see gh-23838')
+        self.check_einsum_sums('u1')
+
+    def test_einsum_sums_int16(self):
+        self.check_einsum_sums('i2')
+
+    def test_einsum_sums_uint16(self):
+        self.check_einsum_sums('u2')
+
+    def test_einsum_sums_int32(self):
+        self.check_einsum_sums('i4')
+        self.check_einsum_sums('i4', True)
+
+    def test_einsum_sums_uint32(self):
+        self.check_einsum_sums('u4')
+        self.check_einsum_sums('u4', True)
+
+    def test_einsum_sums_int64(self):
+        self.check_einsum_sums('i8')
+
+    def test_einsum_sums_uint64(self):
+        self.check_einsum_sums('u8')
+
+    def test_einsum_sums_float16(self):
+        self.check_einsum_sums('f2')
+
+    def test_einsum_sums_float32(self):
+        self.check_einsum_sums('f4')
+
+    def test_einsum_sums_float64(self):
+        self.check_einsum_sums('f8')
+        self.check_einsum_sums('f8', True)
+
+    def test_einsum_sums_longdouble(self):
+        self.check_einsum_sums(np.longdouble)
+
+    def test_einsum_sums_cfloat64(self):
+        self.check_einsum_sums('c8')
+        self.check_einsum_sums('c8', True)
+
+    def test_einsum_sums_cfloat128(self):
+        self.check_einsum_sums('c16')
+
+    def test_einsum_sums_clongdouble(self):
+        self.check_einsum_sums(np.clongdouble)
+
+    def test_einsum_sums_object(self):
+        self.check_einsum_sums('object')
+        self.check_einsum_sums('object', True)
+
+    def test_einsum_misc(self):
+        # This call used to crash because of a bug in
+        # PyArray_AssignZero
+        a = np.ones((1, 2))
+        b = np.ones((2, 2, 1))
+        assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
+        assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
+
+        # Regression test for issue #10369 (test unicode inputs with Python 2)
+        assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
+        assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+        assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
+                               optimize='greedy'), 20)
+
+        # The iterator had an issue with buffering this reduction
+        a = np.ones((5, 12, 4, 2, 3), np.int64)
+        b = np.ones((5, 12, 11), np.int64)
+        assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
+                     np.einsum('ijklm,ijn->', a, b))
+        assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
+                     np.einsum('ijklm,ijn->', a, b, optimize=True))
+
+        # Issue #2027, was a problem in the contiguous 3-argument
+        # inner loop implementation
+        a = np.arange(1, 3)
+        b = np.arange(1, 5).reshape(2, 2)
+        c = np.arange(1, 9).reshape(4, 2)
+        assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
+                     [[[1,  3], [3,  9], [5, 15], [7, 21]],
+                     [[8, 16], [16, 32], [24, 48], [32, 64]]])
+        assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
+                     [[[1,  3], [3,  9], [5, 15], [7, 21]],
+                     [[8, 16], [16, 32], [24, 48], [32, 64]]])
+
+        # Ensure explicitly setting out=None does not cause an error
+        # see issue gh-15776 and issue gh-15256
+        assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
+
+    def test_object_loop(self):
+
+        class Mult:
+            def __mul__(self, other):
+                return 42
+
+        objMult = np.array([Mult()])
+        objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object)
+
+        with pytest.raises(TypeError):
+            np.einsum("i,j", [1], objNULL)
+        with pytest.raises(TypeError):
+            np.einsum("i,j", objNULL, [1])
+        assert np.einsum("i,j", objMult, objMult) == 42
+
+    def test_subscript_range(self):
+        # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
+        # when creating a subscript from arrays
+        a = np.ones((2, 3))
+        b = np.ones((3, 4))
+        np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
+        np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
+        np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
+        assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
+        assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
+
+    def test_einsum_broadcast(self):
+        # Issue #2455 change in handling ellipsis
+        # remove the 'middle broadcast' error
+        # only use the 'RIGHT' iteration in prepare_op_axes
+        # adds auto broadcast on left where it belongs
+        # broadcast on right has to be explicit
+        # We need to test the optimized parsing as well
+
+        A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
+        B = np.arange(3)
+        ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
+        for opt in [True, False]:
+            assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
+            assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
+            assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref)  # used to raise error
+
+        A = np.arange(12).reshape((4, 3))
+        B = np.arange(6).reshape((3, 2))
+        ref = np.einsum('ik,kj->ij', A, B, optimize=False)
+        for opt in [True, False]:
+            assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
+            assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
+            assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref)  # used to raise error
+            assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref)  # used to raise error
+
+        dims = [2, 3, 4, 5]
+        a = np.arange(np.prod(dims)).reshape(dims)
+        v = np.arange(dims[2])
+        ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
+        for opt in [True, False]:
+            assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
+            assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref)  # used to raise error
+            assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
+
+        J, K, M = 160, 160, 120
+        A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
+        B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
+        ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
+        for opt in [True, False]:
+            assert_equal(np.einsum('...lmn,lmno->...o', A, B,
+                                   optimize=opt), ref)  # used to raise error
+
+    def test_einsum_fixedstridebug(self):
+        # Issue #4485 obscure einsum bug
+        # This case revealed a bug in nditer where it reported a stride
+        # as 'fixed' (0) when it was in fact not fixed during processing
+        # (0 or 4). The reason for the bug was that the check for a fixed
+        # stride was using the information from the 2D inner loop reuse
+        # to restrict the iteration dimensions it had to validate to be
+        # the same, but that 2D inner loop reuse logic is only triggered
+        # during the buffer copying step, and hence it was invalid to
+        # rely on those values. The fix is to check all the dimensions
+        # of the stride in question, which in the test case reveals that
+        # the stride is not fixed.
+        #
+        # NOTE: This test is triggered by the fact that the default buffersize,
+        #       used by einsum, is 8192, and 3*2731 = 8193, is larger than that
+        #       and results in a mismatch between the buffering and the
+        #       striding for operand A.
+        A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
+        B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
+        es = np.einsum('cl, cpx->lpx',  A,  B)
+        tp = np.tensordot(A,  B,  axes=(0,  0))
+        assert_equal(es,  tp)
+        # The following is the original test case from the bug report,
+        # made repeatable by changing random arrays to aranges.
+        A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
+        B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
+        es = np.einsum('cl, cpxy->lpxy',  A, B)
+        tp = np.tensordot(A, B,  axes=(0, 0))
+        assert_equal(es, tp)
+
+    def test_einsum_fixed_collapsingbug(self):
+        # Issue #5147.
+        # The bug only occurred when output argument of einssum was used.
+        x = np.random.normal(0, 1, (5, 5, 5, 5))
+        y1 = np.zeros((5, 5))
+        np.einsum('aabb->ab', x, out=y1)
+        idx = np.arange(5)
+        y2 = x[idx[:, None], idx[:, None], idx, idx]
+        assert_equal(y1, y2)
+
+    def test_einsum_failed_on_p9_and_s390x(self):
+        # Issues gh-14692 and gh-12689
+        # Bug with signed vs unsigned char errored on power9 and s390x Linux
+        tensor = np.random.random_sample((10, 10, 10, 10))
+        x = np.einsum('ijij->', tensor)
+        y = tensor.trace(axis1=0, axis2=2).trace()
+        assert_allclose(x, y)
+
+    def test_einsum_all_contig_non_contig_output(self):
+        # Issue gh-5907, tests that the all contiguous special case
+        # actually checks the contiguity of the output
+        x = np.ones((5, 5))
+        out = np.ones(10)[::2]
+        correct_base = np.ones(10)
+        correct_base[::2] = 5
+        # Always worked (inner iteration is done with 0-stride):
+        np.einsum('mi,mi,mi->m', x, x, x, out=out)
+        assert_array_equal(out.base, correct_base)
+        # Example 1:
+        out = np.ones(10)[::2]
+        np.einsum('im,im,im->m', x, x, x, out=out)
+        assert_array_equal(out.base, correct_base)
+        # Example 2, buffering causes x to be contiguous but
+        # special cases do not catch the operation before:
+        out = np.ones((2, 2, 2))[..., 0]
+        correct_base = np.ones((2, 2, 2))
+        correct_base[..., 0] = 2
+        x = np.ones((2, 2), np.float32)
+        np.einsum('ij,jk->ik', x, x, out=out)
+        assert_array_equal(out.base, correct_base)
+
+    @pytest.mark.parametrize("dtype",
+             np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
+    def test_different_paths(self, dtype):
+        # Test originally added to cover broken float16 path: gh-20305
+        # Likely most are covered elsewhere, at least partially.
+        dtype = np.dtype(dtype)
+        # Simple test, designed to exercise most specialized code paths,
+        # note the +0.5 for floats.  This makes sure we use a float value
+        # where the results must be exact.
+        arr = (np.arange(7) + 0.5).astype(dtype)
+        scalar = np.array(2, dtype=dtype)
+
+        # contig -> scalar:
+        res = np.einsum('i->', arr)
+        assert res == arr.sum()
+        # contig, contig -> contig:
+        res = np.einsum('i,i->i', arr, arr)
+        assert_array_equal(res, arr * arr)
+        # noncontig, noncontig -> contig:
+        res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2])
+        assert_array_equal(res, arr * arr)
+        # contig + contig -> scalar
+        assert np.einsum('i,i->', arr, arr) == (arr * arr).sum()
+        # contig + scalar -> contig (with out)
+        out = np.ones(7, dtype=dtype)
+        res = np.einsum('i,->i', arr, dtype.type(2), out=out)
+        assert_array_equal(res, arr * dtype.type(2))
+        # scalar + contig -> contig (with out)
+        res = np.einsum(',i->i', scalar, arr)
+        assert_array_equal(res, arr * dtype.type(2))
+        # scalar + contig -> scalar
+        res = np.einsum(',i->', scalar, arr)
+        # Use einsum to compare to not have difference due to sum round-offs:
+        assert res == np.einsum('i->', scalar * arr)
+        # contig + scalar -> scalar
+        res = np.einsum('i,->', arr, scalar)
+        # Use einsum to compare to not have difference due to sum round-offs:
+        assert res == np.einsum('i->', scalar * arr)
+        # contig + contig + contig -> scalar
+        arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype)
+        res = np.einsum('i,i,i->', arr, arr, arr)
+        assert_array_equal(res, (arr * arr * arr).sum())
+        # four arrays:
+        res = np.einsum('i,i,i,i->', arr, arr, arr, arr)
+        assert_array_equal(res, (arr * arr * arr * arr).sum())
+
+    def test_small_boolean_arrays(self):
+        # See gh-5946.
+        # Use array of True embedded in False.
+        a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
+        a[...] = True
+        out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
+        tgt = np.ones((2, 1, 1), dtype=np.bool_)
+        res = np.einsum('...ij,...jk->...ik', a, a, out=out)
+        assert_equal(res, tgt)
+
+    def test_out_is_res(self):
+        a = np.arange(9).reshape(3, 3)
+        res = np.einsum('...ij,...jk->...ik', a, a, out=a)
+        assert res is a
+
+    def optimize_compare(self, subscripts, operands=None):
+        # Tests all paths of the optimization function against
+        # conventional einsum
+        if operands is None:
+            args = [subscripts]
+            terms = subscripts.split('->')[0].split(',')
+            for term in terms:
+                dims = [global_size_dict[x] for x in term]
+                args.append(np.random.rand(*dims))
+        else:
+            args = [subscripts] + operands
+
+        noopt = np.einsum(*args, optimize=False)
+        opt = np.einsum(*args, optimize='greedy')
+        assert_almost_equal(opt, noopt)
+        opt = np.einsum(*args, optimize='optimal')
+        assert_almost_equal(opt, noopt)
+
+    def test_hadamard_like_products(self):
+        # Hadamard outer products
+        self.optimize_compare('a,ab,abc->abc')
+        self.optimize_compare('a,b,ab->ab')
+
+    def test_index_transformations(self):
+        # Simple index transformation cases
+        self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
+        self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
+        self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
+
+    def test_complex(self):
+        # Long test cases
+        self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+        self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+        self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
+        self.optimize_compare('abhe,hidj,jgba,hiab,gab')
+        self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
+        self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
+        self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
+        self.optimize_compare('bdhe,acad,hiab,agac,hibd')
+
+    def test_collapse(self):
+        # Inner products
+        self.optimize_compare('ab,ab,c->')
+        self.optimize_compare('ab,ab,c->c')
+        self.optimize_compare('ab,ab,cd,cd->')
+        self.optimize_compare('ab,ab,cd,cd->ac')
+        self.optimize_compare('ab,ab,cd,cd->cd')
+        self.optimize_compare('ab,ab,cd,cd,ef,ef->')
+
+    def test_expand(self):
+        # Outer products
+        self.optimize_compare('ab,cd,ef->abcdef')
+        self.optimize_compare('ab,cd,ef->acdf')
+        self.optimize_compare('ab,cd,de->abcde')
+        self.optimize_compare('ab,cd,de->be')
+        self.optimize_compare('ab,bcd,cd->abcd')
+        self.optimize_compare('ab,bcd,cd->abd')
+
+    def test_edge_cases(self):
+        # Difficult edge cases for optimization
+        self.optimize_compare('eb,cb,fb->cef')
+        self.optimize_compare('dd,fb,be,cdb->cef')
+        self.optimize_compare('bca,cdb,dbf,afc->')
+        self.optimize_compare('dcc,fce,ea,dbf->ab')
+        self.optimize_compare('fdf,cdd,ccd,afe->ae')
+        self.optimize_compare('abcd,ad')
+        self.optimize_compare('ed,fcd,ff,bcf->be')
+        self.optimize_compare('baa,dcf,af,cde->be')
+        self.optimize_compare('bd,db,eac->ace')
+        self.optimize_compare('fff,fae,bef,def->abd')
+        self.optimize_compare('efc,dbc,acf,fd->abe')
+        self.optimize_compare('ba,ac,da->bcd')
+
+    def test_inner_product(self):
+        # Inner products
+        self.optimize_compare('ab,ab')
+        self.optimize_compare('ab,ba')
+        self.optimize_compare('abc,abc')
+        self.optimize_compare('abc,bac')
+        self.optimize_compare('abc,cba')
+
+    def test_random_cases(self):
+        # Randomly built test cases
+        self.optimize_compare('aab,fa,df,ecc->bde')
+        self.optimize_compare('ecb,fef,bad,ed->ac')
+        self.optimize_compare('bcf,bbb,fbf,fc->')
+        self.optimize_compare('bb,ff,be->e')
+        self.optimize_compare('bcb,bb,fc,fff->')
+        self.optimize_compare('fbb,dfd,fc,fc->')
+        self.optimize_compare('afd,ba,cc,dc->bf')
+        self.optimize_compare('adb,bc,fa,cfc->d')
+        self.optimize_compare('bbd,bda,fc,db->acf')
+        self.optimize_compare('dba,ead,cad->bce')
+        self.optimize_compare('aef,fbc,dca->bde')
+
+    def test_combined_views_mapping(self):
+        # gh-10792
+        a = np.arange(9).reshape(1, 1, 3, 1, 3)
+        b = np.einsum('bbcdc->d', a)
+        assert_equal(b, [12])
+
+    def test_broadcasting_dot_cases(self):
+        # Ensures broadcasting cases are not mistaken for GEMM
+
+        a = np.random.rand(1, 5, 4)
+        b = np.random.rand(4, 6)
+        c = np.random.rand(5, 6)
+        d = np.random.rand(10)
+
+        self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
+        self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
+
+        e = np.random.rand(1, 1, 5, 4)
+        f = np.random.rand(7, 7)
+        self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
+        self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
+
+        # Edge case found in gh-11308
+        g = np.arange(64).reshape(2, 4, 8)
+        self.optimize_compare('obk,ijk->ioj', operands=[g, g])
+
+    def test_output_order(self):
+        # Ensure output order is respected for optimize cases, the below
+        # conraction should yield a reshaped tensor view
+        # gh-16415
+
+        a = np.ones((2, 3, 5), order='F')
+        b = np.ones((4, 3), order='F')
+
+        for opt in [True, False]:
+            tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt)
+            assert_(tmp.flags.f_contiguous)
+
+            tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt)
+            assert_(tmp.flags.f_contiguous)
+
+            tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt)
+            assert_(tmp.flags.c_contiguous)
+
+            tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt)
+            assert_(tmp.flags.c_contiguous is False)
+            assert_(tmp.flags.f_contiguous is False)
+
+            tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt)
+            assert_(tmp.flags.c_contiguous is False)
+            assert_(tmp.flags.f_contiguous is False)
+
+        c = np.ones((4, 3), order='C')
+        for opt in [True, False]:
+            tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt)
+            assert_(tmp.flags.c_contiguous)
+
+        d = np.ones((2, 3, 5), order='C')
+        for opt in [True, False]:
+            tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt)
+            assert_(tmp.flags.c_contiguous)
+
+class TestEinsumPath:
+    def build_operands(self, string, size_dict=global_size_dict):
+
+        # Builds views based off initial operands
+        operands = [string]
+        terms = string.split('->')[0].split(',')
+        for term in terms:
+            dims = [size_dict[x] for x in term]
+            operands.append(np.random.rand(*dims))
+
+        return operands
+
+    def assert_path_equal(self, comp, benchmark):
+        # Checks if list of tuples are equivalent
+        ret = (len(comp) == len(benchmark))
+        assert_(ret)
+        for pos in range(len(comp) - 1):
+            ret &= isinstance(comp[pos + 1], tuple)
+            ret &= (comp[pos + 1] == benchmark[pos + 1])
+        assert_(ret)
+
+    def test_memory_contraints(self):
+        # Ensure memory constraints are satisfied
+
+        outer_test = self.build_operands('a,b,c->abc')
+
+        path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
+        self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+        path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
+        self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+        long_test = self.build_operands('acdf,jbje,gihb,hfac')
+        path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
+        self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+        path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
+        self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+    def test_long_paths(self):
+        # Long complex cases
+
+        # Long test 1
+        long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+        path, path_str = np.einsum_path(*long_test1, optimize='greedy')
+        self.assert_path_equal(path, ['einsum_path',
+                                      (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+        path, path_str = np.einsum_path(*long_test1, optimize='optimal')
+        self.assert_path_equal(path, ['einsum_path',
+                                      (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+        # Long test 2
+        long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
+        path, path_str = np.einsum_path(*long_test2, optimize='greedy')
+        self.assert_path_equal(path, ['einsum_path',
+                                      (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+        path, path_str = np.einsum_path(*long_test2, optimize='optimal')
+        self.assert_path_equal(path, ['einsum_path',
+                                      (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+    def test_edge_paths(self):
+        # Difficult edge cases
+
+        # Edge test1
+        edge_test1 = self.build_operands('eb,cb,fb->cef')
+        path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
+        self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+        path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
+        self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+        # Edge test2
+        edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
+        path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
+        self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+        path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
+        self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+        # Edge test3
+        edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
+        path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
+        self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+        path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
+        self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+        # Edge test4
+        edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
+        path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+        self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+        path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+        self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+        # Edge test5
+        edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
+                                         size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
+        path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+        self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+        path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+        self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+    def test_path_type_input(self):
+        # Test explicit path handling
+        path_test = self.build_operands('dcc,fce,ea,dbf->ab')
+
+        path, path_str = np.einsum_path(*path_test, optimize=False)
+        self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+        path, path_str = np.einsum_path(*path_test, optimize=True)
+        self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+        exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
+        path, path_str = np.einsum_path(*path_test, optimize=exp_path)
+        self.assert_path_equal(path, exp_path)
+
+        # Double check einsum works on the input path
+        noopt = np.einsum(*path_test, optimize=False)
+        opt = np.einsum(*path_test, optimize=exp_path)
+        assert_almost_equal(noopt, opt)
+
+    def test_path_type_input_internal_trace(self):
+        #gh-20962
+        path_test = self.build_operands('cab,cdd->ab')
+        exp_path = ['einsum_path', (1,), (0, 1)]
+
+        path, path_str = np.einsum_path(*path_test, optimize=exp_path)
+        self.assert_path_equal(path, exp_path)
+
+        # Double check einsum works on the input path
+        noopt = np.einsum(*path_test, optimize=False)
+        opt = np.einsum(*path_test, optimize=exp_path)
+        assert_almost_equal(noopt, opt)
+
+    def test_path_type_input_invalid(self):
+        path_test = self.build_operands('ab,bc,cd,de->ae')
+        exp_path = ['einsum_path', (2, 3), (0, 1)]
+        assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
+        assert_raises(
+            RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
+
+        path_test = self.build_operands('a,a,a->a')
+        exp_path = ['einsum_path', (1,), (0, 1)]
+        assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
+        assert_raises(
+            RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
+
+    def test_spaces(self):
+        #gh-10794
+        arr = np.array([[1]])
+        for sp in itertools.product(['', ' '], repeat=4):
+            # no error for any spacing
+            np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
+
+def test_overlap():
+    a = np.arange(9, dtype=int).reshape(3, 3)
+    b = np.arange(9, dtype=int).reshape(3, 3)
+    d = np.dot(a, b)
+    # sanity check
+    c = np.einsum('ij,jk->ik', a, b)
+    assert_equal(c, d)
+    #gh-10080, out overlaps one of the operands
+    c = np.einsum('ij,jk->ik', a, b, out=b)
+    assert_equal(c, d)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_errstate.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_errstate.py
new file mode 100644
index 00000000..3a5647f6
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_errstate.py
@@ -0,0 +1,61 @@
+import pytest
+import sysconfig
+
+import numpy as np
+from numpy.testing import assert_, assert_raises, IS_WASM
+
+# The floating point emulation on ARM EABI systems lacking a hardware FPU is
+# known to be buggy. This is an attempt to identify these hosts. It may not
+# catch all possible cases, but it catches the known cases of gh-413 and
+# gh-15562.
+hosttype = sysconfig.get_config_var('HOST_GNU_TYPE')
+arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi')
+
+class TestErrstate:
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.skipif(arm_softfloat,
+                        reason='platform/cpu issue with FPU (gh-413,-15562)')
+    def test_invalid(self):
+        with np.errstate(all='raise', under='ignore'):
+            a = -np.arange(3)
+            # This should work
+            with np.errstate(invalid='ignore'):
+                np.sqrt(a)
+            # While this should fail!
+            with assert_raises(FloatingPointError):
+                np.sqrt(a)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.skipif(arm_softfloat,
+                        reason='platform/cpu issue with FPU (gh-15562)')
+    def test_divide(self):
+        with np.errstate(all='raise', under='ignore'):
+            a = -np.arange(3)
+            # This should work
+            with np.errstate(divide='ignore'):
+                a // 0
+            # While this should fail!
+            with assert_raises(FloatingPointError):
+                a // 0
+            # As should this, see gh-15562
+            with assert_raises(FloatingPointError):
+                a // a
+
+    def test_errcall(self):
+        def foo(*args):
+            print(args)
+
+        olderrcall = np.geterrcall()
+        with np.errstate(call=foo):
+            assert_(np.geterrcall() is foo, 'call is not foo')
+            with np.errstate(call=None):
+                assert_(np.geterrcall() is None, 'call is not None')
+        assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
+
+    def test_errstate_decorator(self):
+        @np.errstate(all='ignore')
+        def foo():
+            a = -np.arange(3)
+            a // 0
+            
+        foo()
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_extint128.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_extint128.py
new file mode 100644
index 00000000..3b64915f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_extint128.py
@@ -0,0 +1,219 @@
+import itertools
+import contextlib
+import operator
+import pytest
+
+import numpy as np
+import numpy.core._multiarray_tests as mt
+
+from numpy.testing import assert_raises, assert_equal
+
+
+INT64_MAX = np.iinfo(np.int64).max
+INT64_MIN = np.iinfo(np.int64).min
+INT64_MID = 2**32
+
+# int128 is not two's complement, the sign bit is separate
+INT128_MAX = 2**128 - 1
+INT128_MIN = -INT128_MAX
+INT128_MID = 2**64
+
+INT64_VALUES = (
+    [INT64_MIN + j for j in range(20)] +
+    [INT64_MAX - j for j in range(20)] +
+    [INT64_MID + j for j in range(-20, 20)] +
+    [2*INT64_MID + j for j in range(-20, 20)] +
+    [INT64_MID//2 + j for j in range(-20, 20)] +
+    list(range(-70, 70))
+)
+
+INT128_VALUES = (
+    [INT128_MIN + j for j in range(20)] +
+    [INT128_MAX - j for j in range(20)] +
+    [INT128_MID + j for j in range(-20, 20)] +
+    [2*INT128_MID + j for j in range(-20, 20)] +
+    [INT128_MID//2 + j for j in range(-20, 20)] +
+    list(range(-70, 70)) +
+    [False]  # negative zero
+)
+
+INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
+
+
+@contextlib.contextmanager
+def exc_iter(*args):
+    """
+    Iterate over Cartesian product of *args, and if an exception is raised,
+    add information of the current iterate.
+    """
+
+    value = [None]
+
+    def iterate():
+        for v in itertools.product(*args):
+            value[0] = v
+            yield v
+
+    try:
+        yield iterate()
+    except Exception:
+        import traceback
+        msg = "At: %r\n%s" % (repr(value[0]),
+                              traceback.format_exc())
+        raise AssertionError(msg)
+
+
+def test_safe_binop():
+    # Test checked arithmetic routines
+
+    ops = [
+        (operator.add, 1),
+        (operator.sub, 2),
+        (operator.mul, 3)
+    ]
+
+    with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
+        for xop, a, b in it:
+            pyop, op = xop
+            c = pyop(a, b)
+
+            if not (INT64_MIN <= c <= INT64_MAX):
+                assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
+            else:
+                d = mt.extint_safe_binop(a, b, op)
+                if c != d:
+                    # assert_equal is slow
+                    assert_equal(d, c)
+
+
+def test_to_128():
+    with exc_iter(INT64_VALUES) as it:
+        for a, in it:
+            b = mt.extint_to_128(a)
+            if a != b:
+                assert_equal(b, a)
+
+
+def test_to_64():
+    with exc_iter(INT128_VALUES) as it:
+        for a, in it:
+            if not (INT64_MIN <= a <= INT64_MAX):
+                assert_raises(OverflowError, mt.extint_to_64, a)
+            else:
+                b = mt.extint_to_64(a)
+                if a != b:
+                    assert_equal(b, a)
+
+
+def test_mul_64_64():
+    with exc_iter(INT64_VALUES, INT64_VALUES) as it:
+        for a, b in it:
+            c = a * b
+            d = mt.extint_mul_64_64(a, b)
+            if c != d:
+                assert_equal(d, c)
+
+
+def test_add_128():
+    with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+        for a, b in it:
+            c = a + b
+            if not (INT128_MIN <= c <= INT128_MAX):
+                assert_raises(OverflowError, mt.extint_add_128, a, b)
+            else:
+                d = mt.extint_add_128(a, b)
+                if c != d:
+                    assert_equal(d, c)
+
+
+def test_sub_128():
+    with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+        for a, b in it:
+            c = a - b
+            if not (INT128_MIN <= c <= INT128_MAX):
+                assert_raises(OverflowError, mt.extint_sub_128, a, b)
+            else:
+                d = mt.extint_sub_128(a, b)
+                if c != d:
+                    assert_equal(d, c)
+
+
+def test_neg_128():
+    with exc_iter(INT128_VALUES) as it:
+        for a, in it:
+            b = -a
+            c = mt.extint_neg_128(a)
+            if b != c:
+                assert_equal(c, b)
+
+
+def test_shl_128():
+    with exc_iter(INT128_VALUES) as it:
+        for a, in it:
+            if a < 0:
+                b = -(((-a) << 1) & (2**128-1))
+            else:
+                b = (a << 1) & (2**128-1)
+            c = mt.extint_shl_128(a)
+            if b != c:
+                assert_equal(c, b)
+
+
+def test_shr_128():
+    with exc_iter(INT128_VALUES) as it:
+        for a, in it:
+            if a < 0:
+                b = -((-a) >> 1)
+            else:
+                b = a >> 1
+            c = mt.extint_shr_128(a)
+            if b != c:
+                assert_equal(c, b)
+
+
+def test_gt_128():
+    with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+        for a, b in it:
+            c = a > b
+            d = mt.extint_gt_128(a, b)
+            if c != d:
+                assert_equal(d, c)
+
+
+@pytest.mark.slow
+def test_divmod_128_64():
+    with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+        for a, b in it:
+            if a >= 0:
+                c, cr = divmod(a, b)
+            else:
+                c, cr = divmod(-a, b)
+                c = -c
+                cr = -cr
+
+            d, dr = mt.extint_divmod_128_64(a, b)
+
+            if c != d or d != dr or b*d + dr != a:
+                assert_equal(d, c)
+                assert_equal(dr, cr)
+                assert_equal(b*d + dr, a)
+
+
+def test_floordiv_128_64():
+    with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+        for a, b in it:
+            c = a // b
+            d = mt.extint_floordiv_128_64(a, b)
+
+            if c != d:
+                assert_equal(d, c)
+
+
+def test_ceildiv_128_64():
+    with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+        for a, b in it:
+            c = (a + b - 1) // b
+            d = mt.extint_ceildiv_128_64(a, b)
+
+            if c != d:
+                assert_equal(d, c)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_function_base.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_function_base.py
new file mode 100644
index 00000000..79f1ecfc
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_function_base.py
@@ -0,0 +1,446 @@
+import pytest
+from numpy import (
+    logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
+    ndarray, sqrt, nextafter, stack, errstate
+    )
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
+    )
+
+
+class PhysicalQuantity(float):
+    def __new__(cls, value):
+        return float.__new__(cls, value)
+
+    def __add__(self, x):
+        assert_(isinstance(x, PhysicalQuantity))
+        return PhysicalQuantity(float(x) + float(self))
+    __radd__ = __add__
+
+    def __sub__(self, x):
+        assert_(isinstance(x, PhysicalQuantity))
+        return PhysicalQuantity(float(self) - float(x))
+
+    def __rsub__(self, x):
+        assert_(isinstance(x, PhysicalQuantity))
+        return PhysicalQuantity(float(x) - float(self))
+
+    def __mul__(self, x):
+        return PhysicalQuantity(float(x) * float(self))
+    __rmul__ = __mul__
+
+    def __div__(self, x):
+        return PhysicalQuantity(float(self) / float(x))
+
+    def __rdiv__(self, x):
+        return PhysicalQuantity(float(x) / float(self))
+
+
+class PhysicalQuantity2(ndarray):
+    __array_priority__ = 10
+
+
+class TestLogspace:
+
+    def test_basic(self):
+        y = logspace(0, 6)
+        assert_(len(y) == 50)
+        y = logspace(0, 6, num=100)
+        assert_(y[-1] == 10 ** 6)
+        y = logspace(0, 6, endpoint=False)
+        assert_(y[-1] < 10 ** 6)
+        y = logspace(0, 6, num=7)
+        assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+    def test_start_stop_array(self):
+        start = array([0., 1.])
+        stop = array([6., 7.])
+        t1 = logspace(start, stop, 6)
+        t2 = stack([logspace(_start, _stop, 6)
+                    for _start, _stop in zip(start, stop)], axis=1)
+        assert_equal(t1, t2)
+        t3 = logspace(start, stop[0], 6)
+        t4 = stack([logspace(_start, stop[0], 6)
+                    for _start in start], axis=1)
+        assert_equal(t3, t4)
+        t5 = logspace(start, stop, 6, axis=-1)
+        assert_equal(t5, t2.T)
+
+    @pytest.mark.parametrize("axis", [0, 1, -1])
+    def test_base_array(self, axis: int):
+        start = 1
+        stop = 2
+        num = 6
+        base = array([1, 2])
+        t1 = logspace(start, stop, num=num, base=base, axis=axis)
+        t2 = stack(
+            [logspace(start, stop, num=num, base=_base) for _base in base],
+            axis=(axis + 1) % t1.ndim,
+        )
+        assert_equal(t1, t2)
+
+    @pytest.mark.parametrize("axis", [0, 1, -1])
+    def test_stop_base_array(self, axis: int):
+        start = 1
+        stop = array([2, 3])
+        num = 6
+        base = array([1, 2])
+        t1 = logspace(start, stop, num=num, base=base, axis=axis)
+        t2 = stack(
+            [logspace(start, _stop, num=num, base=_base)
+             for _stop, _base in zip(stop, base)],
+            axis=(axis + 1) % t1.ndim,
+        )
+        assert_equal(t1, t2)
+
+    def test_dtype(self):
+        y = logspace(0, 6, dtype='float32')
+        assert_equal(y.dtype, dtype('float32'))
+        y = logspace(0, 6, dtype='float64')
+        assert_equal(y.dtype, dtype('float64'))
+        y = logspace(0, 6, dtype='int32')
+        assert_equal(y.dtype, dtype('int32'))
+
+    def test_physical_quantities(self):
+        a = PhysicalQuantity(1.0)
+        b = PhysicalQuantity(5.0)
+        assert_equal(logspace(a, b), logspace(1.0, 5.0))
+
+    def test_subclass(self):
+        a = array(1).view(PhysicalQuantity2)
+        b = array(7).view(PhysicalQuantity2)
+        ls = logspace(a, b)
+        assert type(ls) is PhysicalQuantity2
+        assert_equal(ls, logspace(1.0, 7.0))
+        ls = logspace(a, b, 1)
+        assert type(ls) is PhysicalQuantity2
+        assert_equal(ls, logspace(1.0, 7.0, 1))
+
+
+class TestGeomspace:
+
+    def test_basic(self):
+        y = geomspace(1, 1e6)
+        assert_(len(y) == 50)
+        y = geomspace(1, 1e6, num=100)
+        assert_(y[-1] == 10 ** 6)
+        y = geomspace(1, 1e6, endpoint=False)
+        assert_(y[-1] < 10 ** 6)
+        y = geomspace(1, 1e6, num=7)
+        assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+        y = geomspace(8, 2, num=3)
+        assert_allclose(y, [8, 4, 2])
+        assert_array_equal(y.imag, 0)
+
+        y = geomspace(-1, -100, num=3)
+        assert_array_equal(y, [-1, -10, -100])
+        assert_array_equal(y.imag, 0)
+
+        y = geomspace(-100, -1, num=3)
+        assert_array_equal(y, [-100, -10, -1])
+        assert_array_equal(y.imag, 0)
+
+    def test_boundaries_match_start_and_stop_exactly(self):
+        # make sure that the boundaries of the returned array exactly
+        # equal 'start' and 'stop' - this isn't obvious because
+        # np.exp(np.log(x)) isn't necessarily exactly equal to x
+        start = 0.3
+        stop = 20.3
+
+        y = geomspace(start, stop, num=1)
+        assert_equal(y[0], start)
+
+        y = geomspace(start, stop, num=1, endpoint=False)
+        assert_equal(y[0], start)
+
+        y = geomspace(start, stop, num=3)
+        assert_equal(y[0], start)
+        assert_equal(y[-1], stop)
+
+        y = geomspace(start, stop, num=3, endpoint=False)
+        assert_equal(y[0], start)
+
+    def test_nan_interior(self):
+        with errstate(invalid='ignore'):
+            y = geomspace(-3, 3, num=4)
+
+        assert_equal(y[0], -3.0)
+        assert_(isnan(y[1:-1]).all())
+        assert_equal(y[3], 3.0)
+
+        with errstate(invalid='ignore'):
+            y = geomspace(-3, 3, num=4, endpoint=False)
+
+        assert_equal(y[0], -3.0)
+        assert_(isnan(y[1:]).all())
+
+    def test_complex(self):
+        # Purely imaginary
+        y = geomspace(1j, 16j, num=5)
+        assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
+        assert_array_equal(y.real, 0)
+
+        y = geomspace(-4j, -324j, num=5)
+        assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
+        assert_array_equal(y.real, 0)
+
+        y = geomspace(1+1j, 1000+1000j, num=4)
+        assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
+
+        y = geomspace(-1+1j, -1000+1000j, num=4)
+        assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
+
+        # Logarithmic spirals
+        y = geomspace(-1, 1, num=3, dtype=complex)
+        assert_allclose(y, [-1, 1j, +1])
+
+        y = geomspace(0+3j, -3+0j, 3)
+        assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+        y = geomspace(0+3j, 3+0j, 3)
+        assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
+        y = geomspace(-3+0j, 0-3j, 3)
+        assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
+        y = geomspace(0+3j, -3+0j, 3)
+        assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+        y = geomspace(-2-3j, 5+7j, 7)
+        assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
+                            2.08885354-4.34146838j, 4.58345529-3.16355218j,
+                            6.41401745-0.55233457j, 6.75707386+3.11795092j,
+                            5+7j])
+
+        # Type promotion should prevent the -5 from becoming a NaN
+        y = geomspace(3j, -5, 2)
+        assert_allclose(y, [3j, -5])
+        y = geomspace(-5, 3j, 2)
+        assert_allclose(y, [-5, 3j])
+
+    def test_dtype(self):
+        y = geomspace(1, 1e6, dtype='float32')
+        assert_equal(y.dtype, dtype('float32'))
+        y = geomspace(1, 1e6, dtype='float64')
+        assert_equal(y.dtype, dtype('float64'))
+        y = geomspace(1, 1e6, dtype='int32')
+        assert_equal(y.dtype, dtype('int32'))
+
+        # Native types
+        y = geomspace(1, 1e6, dtype=float)
+        assert_equal(y.dtype, dtype('float_'))
+        y = geomspace(1, 1e6, dtype=complex)
+        assert_equal(y.dtype, dtype('complex'))
+
+    def test_start_stop_array_scalar(self):
+        lim1 = array([120, 100], dtype="int8")
+        lim2 = array([-120, -100], dtype="int8")
+        lim3 = array([1200, 1000], dtype="uint16")
+        t1 = geomspace(lim1[0], lim1[1], 5)
+        t2 = geomspace(lim2[0], lim2[1], 5)
+        t3 = geomspace(lim3[0], lim3[1], 5)
+        t4 = geomspace(120.0, 100.0, 5)
+        t5 = geomspace(-120.0, -100.0, 5)
+        t6 = geomspace(1200.0, 1000.0, 5)
+
+        # t3 uses float32, t6 uses float64
+        assert_allclose(t1, t4, rtol=1e-2)
+        assert_allclose(t2, t5, rtol=1e-2)
+        assert_allclose(t3, t6, rtol=1e-5)
+
+    def test_start_stop_array(self):
+        # Try to use all special cases.
+        start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
+        stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
+        t1 = geomspace(start, stop, 5)
+        t2 = stack([geomspace(_start, _stop, 5)
+                    for _start, _stop in zip(start, stop)], axis=1)
+        assert_equal(t1, t2)
+        t3 = geomspace(start, stop[0], 5)
+        t4 = stack([geomspace(_start, stop[0], 5)
+                    for _start in start], axis=1)
+        assert_equal(t3, t4)
+        t5 = geomspace(start, stop, 5, axis=-1)
+        assert_equal(t5, t2.T)
+
+    def test_physical_quantities(self):
+        a = PhysicalQuantity(1.0)
+        b = PhysicalQuantity(5.0)
+        assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
+
+    def test_subclass(self):
+        a = array(1).view(PhysicalQuantity2)
+        b = array(7).view(PhysicalQuantity2)
+        gs = geomspace(a, b)
+        assert type(gs) is PhysicalQuantity2
+        assert_equal(gs, geomspace(1.0, 7.0))
+        gs = geomspace(a, b, 1)
+        assert type(gs) is PhysicalQuantity2
+        assert_equal(gs, geomspace(1.0, 7.0, 1))
+
+    def test_bounds(self):
+        assert_raises(ValueError, geomspace, 0, 10)
+        assert_raises(ValueError, geomspace, 10, 0)
+        assert_raises(ValueError, geomspace, 0, 0)
+
+
+class TestLinspace:
+
+    def test_basic(self):
+        y = linspace(0, 10)
+        assert_(len(y) == 50)
+        y = linspace(2, 10, num=100)
+        assert_(y[-1] == 10)
+        y = linspace(2, 10, endpoint=False)
+        assert_(y[-1] < 10)
+        assert_raises(ValueError, linspace, 0, 10, num=-1)
+
+    def test_corner(self):
+        y = list(linspace(0, 1, 1))
+        assert_(y == [0.0], y)
+        assert_raises(TypeError, linspace, 0, 1, num=2.5)
+
+    def test_type(self):
+        t1 = linspace(0, 1, 0).dtype
+        t2 = linspace(0, 1, 1).dtype
+        t3 = linspace(0, 1, 2).dtype
+        assert_equal(t1, t2)
+        assert_equal(t2, t3)
+
+    def test_dtype(self):
+        y = linspace(0, 6, dtype='float32')
+        assert_equal(y.dtype, dtype('float32'))
+        y = linspace(0, 6, dtype='float64')
+        assert_equal(y.dtype, dtype('float64'))
+        y = linspace(0, 6, dtype='int32')
+        assert_equal(y.dtype, dtype('int32'))
+
+    def test_start_stop_array_scalar(self):
+        lim1 = array([-120, 100], dtype="int8")
+        lim2 = array([120, -100], dtype="int8")
+        lim3 = array([1200, 1000], dtype="uint16")
+        t1 = linspace(lim1[0], lim1[1], 5)
+        t2 = linspace(lim2[0], lim2[1], 5)
+        t3 = linspace(lim3[0], lim3[1], 5)
+        t4 = linspace(-120.0, 100.0, 5)
+        t5 = linspace(120.0, -100.0, 5)
+        t6 = linspace(1200.0, 1000.0, 5)
+        assert_equal(t1, t4)
+        assert_equal(t2, t5)
+        assert_equal(t3, t6)
+
+    def test_start_stop_array(self):
+        start = array([-120, 120], dtype="int8")
+        stop = array([100, -100], dtype="int8")
+        t1 = linspace(start, stop, 5)
+        t2 = stack([linspace(_start, _stop, 5)
+                    for _start, _stop in zip(start, stop)], axis=1)
+        assert_equal(t1, t2)
+        t3 = linspace(start, stop[0], 5)
+        t4 = stack([linspace(_start, stop[0], 5)
+                    for _start in start], axis=1)
+        assert_equal(t3, t4)
+        t5 = linspace(start, stop, 5, axis=-1)
+        assert_equal(t5, t2.T)
+
+    def test_complex(self):
+        lim1 = linspace(1 + 2j, 3 + 4j, 5)
+        t1 = array([1.0+2.j, 1.5+2.5j,  2.0+3j, 2.5+3.5j, 3.0+4j])
+        lim2 = linspace(1j, 10, 5)
+        t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
+        assert_equal(lim1, t1)
+        assert_equal(lim2, t2)
+
+    def test_physical_quantities(self):
+        a = PhysicalQuantity(0.0)
+        b = PhysicalQuantity(1.0)
+        assert_equal(linspace(a, b), linspace(0.0, 1.0))
+
+    def test_subclass(self):
+        a = array(0).view(PhysicalQuantity2)
+        b = array(1).view(PhysicalQuantity2)
+        ls = linspace(a, b)
+        assert type(ls) is PhysicalQuantity2
+        assert_equal(ls, linspace(0.0, 1.0))
+        ls = linspace(a, b, 1)
+        assert type(ls) is PhysicalQuantity2
+        assert_equal(ls, linspace(0.0, 1.0, 1))
+
+    def test_array_interface(self):
+        # Regression test for https://github.com/numpy/numpy/pull/6659
+        # Ensure that start/stop can be objects that implement
+        # __array_interface__ and are convertible to numeric scalars
+
+        class Arrayish:
+            """
+            A generic object that supports the __array_interface__ and hence
+            can in principle be converted to a numeric scalar, but is not
+            otherwise recognized as numeric, but also happens to support
+            multiplication by floats.
+
+            Data should be an object that implements the buffer interface,
+            and contains at least 4 bytes.
+            """
+
+            def __init__(self, data):
+                self._data = data
+
+            @property
+            def __array_interface__(self):
+                return {'shape': (), 'typestr': ' 1)
+        assert_(info.minexp < -1)
+        assert_(info.maxexp > 1)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_half.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_half.py
new file mode 100644
index 00000000..fbc1bf6a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_half.py
@@ -0,0 +1,572 @@
+import platform
+import pytest
+
+import numpy as np
+from numpy import uint16, float16, float32, float64
+from numpy.testing import assert_, assert_equal, _OLD_PROMOTION, IS_WASM
+
+
+def assert_raises_fpe(strmatch, callable, *args, **kwargs):
+    try:
+        callable(*args, **kwargs)
+    except FloatingPointError as exc:
+        assert_(str(exc).find(strmatch) >= 0,
+                "Did not raise floating point %s error" % strmatch)
+    else:
+        assert_(False,
+                "Did not raise floating point %s error" % strmatch)
+
+class TestHalf:
+    def setup_method(self):
+        # An array of all possible float16 values
+        self.all_f16 = np.arange(0x10000, dtype=uint16)
+        self.all_f16.dtype = float16
+
+        # NaN value can cause an invalid FP exception if HW is been used
+        with np.errstate(invalid='ignore'):
+            self.all_f32 = np.array(self.all_f16, dtype=float32)
+            self.all_f64 = np.array(self.all_f16, dtype=float64)
+
+        # An array of all non-NaN float16 values, in sorted order
+        self.nonan_f16 = np.concatenate(
+                                (np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
+                                 np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
+        self.nonan_f16.dtype = float16
+        self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
+        self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
+
+        # An array of all finite float16 values, in sorted order
+        self.finite_f16 = self.nonan_f16[1:-1]
+        self.finite_f32 = self.nonan_f32[1:-1]
+        self.finite_f64 = self.nonan_f64[1:-1]
+
+    def test_half_conversions(self):
+        """Checks that all 16-bit values survive conversion
+           to/from 32-bit and 64-bit float"""
+        # Because the underlying routines preserve the NaN bits, every
+        # value is preserved when converting to/from other floats.
+
+        # Convert from float32 back to float16
+        with np.errstate(invalid='ignore'):
+            b = np.array(self.all_f32, dtype=float16)
+        # avoid testing NaNs due to differ bits wither Q/SNaNs
+        b_nn = b == b
+        assert_equal(self.all_f16[b_nn].view(dtype=uint16),
+                     b[b_nn].view(dtype=uint16))
+
+        # Convert from float64 back to float16
+        with np.errstate(invalid='ignore'):
+            b = np.array(self.all_f64, dtype=float16)
+        b_nn = b == b
+        assert_equal(self.all_f16[b_nn].view(dtype=uint16),
+                     b[b_nn].view(dtype=uint16))
+
+        # Convert float16 to longdouble and back
+        # This doesn't necessarily preserve the extra NaN bits,
+        # so exclude NaNs.
+        a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
+        b = np.array(a_ld, dtype=float16)
+        assert_equal(self.nonan_f16.view(dtype=uint16),
+                     b.view(dtype=uint16))
+
+        # Check the range for which all integers can be represented
+        i_int = np.arange(-2048, 2049)
+        i_f16 = np.array(i_int, dtype=float16)
+        j = np.array(i_f16, dtype=int)
+        assert_equal(i_int, j)
+
+    @pytest.mark.parametrize("string_dt", ["S", "U"])
+    def test_half_conversion_to_string(self, string_dt):
+        # Currently uses S/U32 (which is sufficient for float32)
+        expected_dt = np.dtype(f"{string_dt}32")
+        assert np.promote_types(np.float16, string_dt) == expected_dt
+        assert np.promote_types(string_dt, np.float16) == expected_dt
+
+        arr = np.ones(3, dtype=np.float16).astype(string_dt)
+        assert arr.dtype == expected_dt
+
+    @pytest.mark.parametrize("string_dt", ["S", "U"])
+    def test_half_conversion_from_string(self, string_dt):
+        string = np.array("3.1416", dtype=string_dt)
+        assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16)
+
+    @pytest.mark.parametrize("offset", [None, "up", "down"])
+    @pytest.mark.parametrize("shift", [None, "up", "down"])
+    @pytest.mark.parametrize("float_t", [np.float32, np.float64])
+    @np._no_nep50_warning()
+    def test_half_conversion_rounding(self, float_t, shift, offset):
+        # Assumes that round to even is used during casting.
+        max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
+
+        # Test all (positive) finite numbers, denormals are most interesting
+        # however:
+        f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
+        f16s_float = f16s_patterns.view(np.float16).astype(float_t)
+
+        # Shift the values by half a bit up or a down (or do not shift),
+        if shift == "up":
+            f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
+        elif shift == "down":
+            f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
+        else:
+            f16s_float = f16s_float[1:-1]
+
+        # Increase the float by a minimal value:
+        if offset == "up":
+            f16s_float = np.nextafter(f16s_float, float_t(np.inf))
+        elif offset == "down":
+            f16s_float = np.nextafter(f16s_float, float_t(-np.inf))
+
+        # Convert back to float16 and its bit pattern:
+        res_patterns = f16s_float.astype(np.float16).view(np.uint16)
+
+        # The above calculations tries the original values, or the exact
+        # mid points between the float16 values. It then further offsets them
+        # by as little as possible. If no offset occurs, "round to even"
+        # logic will be necessary, an arbitrarily small offset should cause
+        # normal up/down rounding always.
+
+        # Calculate the expected pattern:
+        cmp_patterns = f16s_patterns[1:-1].copy()
+
+        if shift == "down" and offset != "up":
+            shift_pattern = -1
+        elif shift == "up" and offset != "down":
+            shift_pattern = 1
+        else:
+            # There cannot be a shift, either shift is None, so all rounding
+            # will go back to original, or shift is reduced by offset too much.
+            shift_pattern = 0
+
+        # If rounding occurs, is it normal rounding or round to even?
+        if offset is None:
+            # Round to even occurs, modify only non-even, cast to allow + (-1)
+            cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
+        else:
+            cmp_patterns.view(np.int16)[...] += shift_pattern
+
+        assert_equal(res_patterns, cmp_patterns)
+
+    @pytest.mark.parametrize(["float_t", "uint_t", "bits"],
+                             [(np.float32, np.uint32, 23),
+                              (np.float64, np.uint64, 52)])
+    def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
+        # Test specifically that all bits are considered when deciding
+        # whether round to even should occur (i.e. no bits are lost at the
+        # end. Compare also gh-12721. The most bits can get lost for the
+        # smallest denormal:
+        smallest_value = np.uint16(1).view(np.float16).astype(float_t)
+        assert smallest_value == 2**-24
+
+        # Will be rounded to zero based on round to even rule:
+        rounded_to_zero = smallest_value / float_t(2)
+        assert rounded_to_zero.astype(np.float16) == 0
+
+        # The significand will be all 0 for the float_t, test that we do not
+        # lose the lower ones of these:
+        for i in range(bits):
+            # slightly increasing the value should make it round up:
+            larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
+            larger_value = larger_pattern.view(float_t)
+            assert larger_value.astype(np.float16) == smallest_value
+
+    def test_nans_infs(self):
+        with np.errstate(all='ignore'):
+            # Check some of the ufuncs
+            assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
+            assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
+            assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
+            assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
+            assert_equal(np.spacing(float16(65504)), np.inf)
+
+            # Check comparisons of all values with NaN
+            nan = float16(np.nan)
+
+            assert_(not (self.all_f16 == nan).any())
+            assert_(not (nan == self.all_f16).any())
+
+            assert_((self.all_f16 != nan).all())
+            assert_((nan != self.all_f16).all())
+
+            assert_(not (self.all_f16 < nan).any())
+            assert_(not (nan < self.all_f16).any())
+
+            assert_(not (self.all_f16 <= nan).any())
+            assert_(not (nan <= self.all_f16).any())
+
+            assert_(not (self.all_f16 > nan).any())
+            assert_(not (nan > self.all_f16).any())
+
+            assert_(not (self.all_f16 >= nan).any())
+            assert_(not (nan >= self.all_f16).any())
+
+    def test_half_values(self):
+        """Confirms a small number of known half values"""
+        a = np.array([1.0, -1.0,
+                      2.0, -2.0,
+                      0.0999755859375, 0.333251953125,  # 1/10, 1/3
+                      65504, -65504,           # Maximum magnitude
+                      2.0**(-14), -2.0**(-14),  # Minimum normal
+                      2.0**(-24), -2.0**(-24),  # Minimum subnormal
+                      0, -1/1e1000,            # Signed zeros
+                      np.inf, -np.inf])
+        b = np.array([0x3c00, 0xbc00,
+                      0x4000, 0xc000,
+                      0x2e66, 0x3555,
+                      0x7bff, 0xfbff,
+                      0x0400, 0x8400,
+                      0x0001, 0x8001,
+                      0x0000, 0x8000,
+                      0x7c00, 0xfc00], dtype=uint16)
+        b.dtype = float16
+        assert_equal(a, b)
+
+    def test_half_rounding(self):
+        """Checks that rounding when converting to half is correct"""
+        a = np.array([2.0**-25 + 2.0**-35,  # Rounds to minimum subnormal
+                      2.0**-25,       # Underflows to zero (nearest even mode)
+                      2.0**-26,       # Underflows to zero
+                      1.0+2.0**-11 + 2.0**-16,  # rounds to 1.0+2**(-10)
+                      1.0+2.0**-11,   # rounds to 1.0 (nearest even mode)
+                      1.0+2.0**-12,   # rounds to 1.0
+                      65519,          # rounds to 65504
+                      65520],         # rounds to inf
+                      dtype=float64)
+        rounded = [2.0**-24,
+                   0.0,
+                   0.0,
+                   1.0+2.0**(-10),
+                   1.0,
+                   1.0,
+                   65504,
+                   np.inf]
+
+        # Check float64->float16 rounding
+        with np.errstate(over="ignore"):
+            b = np.array(a, dtype=float16)
+        assert_equal(b, rounded)
+
+        # Check float32->float16 rounding
+        a = np.array(a, dtype=float32)
+        with np.errstate(over="ignore"):
+            b = np.array(a, dtype=float16)
+        assert_equal(b, rounded)
+
+    def test_half_correctness(self):
+        """Take every finite float16, and check the casting functions with
+           a manual conversion."""
+
+        # Create an array of all finite float16s
+        a_bits = self.finite_f16.view(dtype=uint16)
+
+        # Convert to 64-bit float manually
+        a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
+        a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
+        a_man = (a_bits & 0x03ff) * 2.0**(-10)
+        # Implicit bit of normalized floats
+        a_man[a_exp != -15] += 1
+        # Denormalized exponent is -14
+        a_exp[a_exp == -15] = -14
+
+        a_manual = a_sgn * a_man * 2.0**a_exp
+
+        a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
+        if len(a32_fail) != 0:
+            bad_index = a32_fail[0]
+            assert_equal(self.finite_f32, a_manual,
+                 "First non-equal is half value 0x%x -> %g != %g" %
+                            (a_bits[bad_index],
+                             self.finite_f32[bad_index],
+                             a_manual[bad_index]))
+
+        a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
+        if len(a64_fail) != 0:
+            bad_index = a64_fail[0]
+            assert_equal(self.finite_f64, a_manual,
+                 "First non-equal is half value 0x%x -> %g != %g" %
+                            (a_bits[bad_index],
+                             self.finite_f64[bad_index],
+                             a_manual[bad_index]))
+
+    def test_half_ordering(self):
+        """Make sure comparisons are working right"""
+
+        # All non-NaN float16 values in reverse order
+        a = self.nonan_f16[::-1].copy()
+
+        # 32-bit float copy
+        b = np.array(a, dtype=float32)
+
+        # Should sort the same
+        a.sort()
+        b.sort()
+        assert_equal(a, b)
+
+        # Comparisons should work
+        assert_((a[:-1] <= a[1:]).all())
+        assert_(not (a[:-1] > a[1:]).any())
+        assert_((a[1:] >= a[:-1]).all())
+        assert_(not (a[1:] < a[:-1]).any())
+        # All != except for +/-0
+        assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
+        assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
+
+    def test_half_funcs(self):
+        """Test the various ArrFuncs"""
+
+        # fill
+        assert_equal(np.arange(10, dtype=float16),
+                     np.arange(10, dtype=float32))
+
+        # fillwithscalar
+        a = np.zeros((5,), dtype=float16)
+        a.fill(1)
+        assert_equal(a, np.ones((5,), dtype=float16))
+
+        # nonzero and copyswap
+        a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
+        assert_equal(a.nonzero()[0],
+                     [2, 5, 6])
+        a = a.byteswap()
+        a = a.view(a.dtype.newbyteorder())
+        assert_equal(a.nonzero()[0],
+                     [2, 5, 6])
+
+        # dot
+        a = np.arange(0, 10, 0.5, dtype=float16)
+        b = np.ones((20,), dtype=float16)
+        assert_equal(np.dot(a, b),
+                     95)
+
+        # argmax
+        a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
+        assert_equal(a.argmax(),
+                     4)
+        a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
+        assert_equal(a.argmax(),
+                     5)
+
+        # getitem
+        a = np.arange(10, dtype=float16)
+        for i in range(10):
+            assert_equal(a.item(i), i)
+
+    def test_spacing_nextafter(self):
+        """Test np.spacing and np.nextafter"""
+        # All non-negative finite #'s
+        a = np.arange(0x7c00, dtype=uint16)
+        hinf = np.array((np.inf,), dtype=float16)
+        hnan = np.array((np.nan,), dtype=float16)
+        a_f16 = a.view(dtype=float16)
+
+        assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
+
+        assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
+        assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
+        assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
+
+        assert_equal(np.nextafter(hinf, a_f16), a_f16[-1])
+        assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1])
+
+        assert_equal(np.nextafter(hinf, hinf), hinf)
+        assert_equal(np.nextafter(hinf, -hinf), a_f16[-1])
+        assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1])
+        assert_equal(np.nextafter(-hinf, -hinf), -hinf)
+
+        assert_equal(np.nextafter(a_f16, hnan), hnan[0])
+        assert_equal(np.nextafter(hnan, a_f16), hnan[0])
+
+        assert_equal(np.nextafter(hnan, hnan), hnan)
+        assert_equal(np.nextafter(hinf, hnan), hnan)
+        assert_equal(np.nextafter(hnan, hinf), hnan)
+
+        # switch to negatives
+        a |= 0x8000
+
+        assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
+        assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
+
+        assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
+        assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
+        assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
+
+        assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1])
+        assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1])
+
+        assert_equal(np.nextafter(a_f16, hnan), hnan[0])
+        assert_equal(np.nextafter(hnan, a_f16), hnan[0])
+
+    def test_half_ufuncs(self):
+        """Test the various ufuncs"""
+
+        a = np.array([0, 1, 2, 4, 2], dtype=float16)
+        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
+        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
+
+        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
+        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
+        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
+        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
+
+        assert_equal(np.equal(a, b), [False, False, False, True, False])
+        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
+        assert_equal(np.less(a, b), [False, True, False, False, True])
+        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
+        assert_equal(np.greater(a, b), [True, False, True, False, False])
+        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
+        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
+        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
+        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
+        assert_equal(np.logical_not(a), [True, False, False, False, False])
+
+        assert_equal(np.isnan(c), [False, False, False, True, False])
+        assert_equal(np.isinf(c), [False, False, True, False, False])
+        assert_equal(np.isfinite(c), [True, True, False, False, True])
+        assert_equal(np.signbit(b), [True, False, False, False, False])
+
+        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
+
+        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
+
+        x = np.maximum(b, c)
+        assert_(np.isnan(x[3]))
+        x[3] = 0
+        assert_equal(x, [0, 5, 1, 0, 6])
+
+        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
+
+        x = np.minimum(b, c)
+        assert_(np.isnan(x[3]))
+        x[3] = 0
+        assert_equal(x, [-2, -1, -np.inf, 0, 3])
+
+        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
+        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
+        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
+        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
+
+        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
+        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
+        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
+        assert_equal(np.square(b), [4, 25, 1, 16, 9])
+        assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
+        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
+        assert_equal(np.conjugate(b), b)
+        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
+        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
+        assert_equal(np.positive(b), b)
+        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
+        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
+        assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
+        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
+
+    @np._no_nep50_warning()
+    def test_half_coercion(self, weak_promotion):
+        """Test that half gets coerced properly with the other types"""
+        a16 = np.array((1,), dtype=float16)
+        a32 = np.array((1,), dtype=float32)
+        b16 = float16(1)
+        b32 = float32(1)
+
+        assert np.power(a16, 2).dtype == float16
+        assert np.power(a16, 2.0).dtype == float16
+        assert np.power(a16, b16).dtype == float16
+        expected_dt = float32 if weak_promotion else float16
+        assert np.power(a16, b32).dtype == expected_dt
+        assert np.power(a16, a16).dtype == float16
+        assert np.power(a16, a32).dtype == float32
+
+        expected_dt = float16 if weak_promotion else float64
+        assert np.power(b16, 2).dtype == expected_dt
+        assert np.power(b16, 2.0).dtype == expected_dt
+        assert np.power(b16, b16).dtype, float16
+        assert np.power(b16, b32).dtype, float32
+        assert np.power(b16, a16).dtype, float16
+        assert np.power(b16, a32).dtype, float32
+
+        assert np.power(a32, a16).dtype == float32
+        assert np.power(a32, b16).dtype == float32
+        expected_dt = float32 if weak_promotion else float16
+        assert np.power(b32, a16).dtype == expected_dt
+        assert np.power(b32, b16).dtype == float32
+
+    @pytest.mark.skipif(platform.machine() == "armv5tel",
+                        reason="See gh-413.")
+    @pytest.mark.skipif(IS_WASM,
+                        reason="fp exceptions don't work in wasm.")
+    def test_half_fpe(self):
+        with np.errstate(all='raise'):
+            sx16 = np.array((1e-4,), dtype=float16)
+            bx16 = np.array((1e4,), dtype=float16)
+            sy16 = float16(1e-4)
+            by16 = float16(1e4)
+
+            # Underflow errors
+            assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
+            assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
+            assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
+            assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
+            assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
+            assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
+            assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
+            assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
+            assert_raises_fpe('underflow', lambda a, b:a/b,
+                                             float16(2.**-14), float16(2**11))
+            assert_raises_fpe('underflow', lambda a, b:a/b,
+                                             float16(-2.**-14), float16(2**11))
+            assert_raises_fpe('underflow', lambda a, b:a/b,
+                                             float16(2.**-14+2**-24), float16(2))
+            assert_raises_fpe('underflow', lambda a, b:a/b,
+                                             float16(-2.**-14-2**-24), float16(2))
+            assert_raises_fpe('underflow', lambda a, b:a/b,
+                                             float16(2.**-14+2**-23), float16(4))
+
+            # Overflow errors
+            assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
+            assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
+            assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
+            assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
+            assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
+            assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
+            assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
+            assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
+            assert_raises_fpe('overflow', lambda a, b:a+b,
+                                             float16(65504), float16(17))
+            assert_raises_fpe('overflow', lambda a, b:a-b,
+                                             float16(-65504), float16(17))
+            assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
+            assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
+            assert_raises_fpe('overflow', np.spacing, float16(65504))
+
+            # Invalid value errors
+            assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
+            assert_raises_fpe('invalid', np.spacing, float16(np.inf))
+            assert_raises_fpe('invalid', np.spacing, float16(np.nan))
+
+            # These should not raise
+            float16(65472)+float16(32)
+            float16(2**-13)/float16(2)
+            float16(2**-14)/float16(2**10)
+            np.spacing(float16(-65504))
+            np.nextafter(float16(65504), float16(-np.inf))
+            np.nextafter(float16(-65504), float16(np.inf))
+            np.nextafter(float16(np.inf), float16(0))
+            np.nextafter(float16(-np.inf), float16(0))
+            np.nextafter(float16(0), float16(np.nan))
+            np.nextafter(float16(np.nan), float16(0))
+            float16(2**-14)/float16(2**10)
+            float16(-2**-14)/float16(2**10)
+            float16(2**-14+2**-23)/float16(2)
+            float16(-2**-14-2**-23)/float16(2)
+
+    def test_half_array_interface(self):
+        """Test that half is compatible with __array_interface__"""
+        class Dummy:
+            pass
+
+        a = np.ones((1,), dtype=float16)
+        b = Dummy()
+        b.__array_interface__ = a.__array_interface__
+        c = np.array(b)
+        assert_(c.dtype == float16)
+        assert_equal(a, c)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.py
new file mode 100644
index 00000000..bace4c05
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.py
@@ -0,0 +1,30 @@
+import pytest
+
+import random
+from numpy.core._multiarray_tests import identityhash_tester
+
+
+@pytest.mark.parametrize("key_length", [1, 3, 6])
+@pytest.mark.parametrize("length", [1, 16, 2000])
+def test_identity_hashtable(key_length, length):
+    # use a 30 object pool for everything (duplicates will happen)
+    pool = [object() for i in range(20)]
+    keys_vals = []
+    for i in range(length):
+        keys = tuple(random.choices(pool, k=key_length))
+        keys_vals.append((keys, random.choice(pool)))
+
+    dictionary = dict(keys_vals)
+
+    # add a random item at the end:
+    keys_vals.append(random.choice(keys_vals))
+    # the expected one could be different with duplicates:
+    expected = dictionary[keys_vals[-1][0]]
+
+    res = identityhash_tester(key_length, keys_vals, replace=True)
+    assert res is expected
+
+    # check that ensuring one duplicate definitely raises:
+    keys_vals.insert(0, keys_vals[-2])
+    with pytest.raises(RuntimeError):
+        identityhash_tester(key_length, keys_vals)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.py
new file mode 100644
index 00000000..a0e9a8c5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.py
@@ -0,0 +1,133 @@
+import numpy as np
+from numpy.testing import (
+        assert_raises, assert_raises_regex,
+        )
+
+
+class TestIndexErrors:
+    '''Tests to exercise indexerrors not covered by other tests.'''
+
+    def test_arraytypes_fasttake(self):
+        'take from a 0-length dimension'
+        x = np.empty((2, 3, 0, 4))
+        assert_raises(IndexError, x.take, [0], axis=2)
+        assert_raises(IndexError, x.take, [1], axis=2)
+        assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
+        assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
+
+    def test_take_from_object(self):
+        # Check exception taking from object array
+        d = np.zeros(5, dtype=object)
+        assert_raises(IndexError, d.take, [6])
+
+        # Check exception taking from 0-d array
+        d = np.zeros((5, 0), dtype=object)
+        assert_raises(IndexError, d.take, [1], axis=1)
+        assert_raises(IndexError, d.take, [0], axis=1)
+        assert_raises(IndexError, d.take, [0])
+        assert_raises(IndexError, d.take, [0], mode='wrap')
+        assert_raises(IndexError, d.take, [0], mode='clip')
+
+    def test_multiindex_exceptions(self):
+        a = np.empty(5, dtype=object)
+        assert_raises(IndexError, a.item, 20)
+        a = np.empty((5, 0), dtype=object)
+        assert_raises(IndexError, a.item, (0, 0))
+
+        a = np.empty(5, dtype=object)
+        assert_raises(IndexError, a.itemset, 20, 0)
+        a = np.empty((5, 0), dtype=object)
+        assert_raises(IndexError, a.itemset, (0, 0), 0)
+
+    def test_put_exceptions(self):
+        a = np.zeros((5, 5))
+        assert_raises(IndexError, a.put, 100, 0)
+        a = np.zeros((5, 5), dtype=object)
+        assert_raises(IndexError, a.put, 100, 0)
+        a = np.zeros((5, 5, 0))
+        assert_raises(IndexError, a.put, 100, 0)
+        a = np.zeros((5, 5, 0), dtype=object)
+        assert_raises(IndexError, a.put, 100, 0)
+
+    def test_iterators_exceptions(self):
+        "cases in iterators.c"
+        def assign(obj, ind, val):
+            obj[ind] = val
+
+        a = np.zeros([1, 2, 3])
+        assert_raises(IndexError, lambda: a[0, 5, None, 2])
+        assert_raises(IndexError, lambda: a[0, 5, 0, 2])
+        assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
+        assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2),  1))
+
+        a = np.zeros([1, 0, 3])
+        assert_raises(IndexError, lambda: a[0, 0, None, 2])
+        assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
+
+        a = np.zeros([1, 2, 3])
+        assert_raises(IndexError, lambda: a.flat[10])
+        assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
+        a = np.zeros([1, 0, 3])
+        assert_raises(IndexError, lambda: a.flat[10])
+        assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
+
+        a = np.zeros([1, 2, 3])
+        assert_raises(IndexError, lambda: a.flat[np.array(10)])
+        assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
+        a = np.zeros([1, 0, 3])
+        assert_raises(IndexError, lambda: a.flat[np.array(10)])
+        assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
+
+        a = np.zeros([1, 2, 3])
+        assert_raises(IndexError, lambda: a.flat[np.array([10])])
+        assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
+        a = np.zeros([1, 0, 3])
+        assert_raises(IndexError, lambda: a.flat[np.array([10])])
+        assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
+
+    def test_mapping(self):
+        "cases from mapping.c"
+
+        def assign(obj, ind, val):
+            obj[ind] = val
+
+        a = np.zeros((0, 10))
+        assert_raises(IndexError, lambda: a[12])
+
+        a = np.zeros((3, 5))
+        assert_raises(IndexError, lambda: a[(10, 20)])
+        assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
+        a = np.zeros((3, 0))
+        assert_raises(IndexError, lambda: a[(1, 0)])
+        assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
+
+        a = np.zeros((10,))
+        assert_raises(IndexError, lambda: assign(a, 10, 1))
+        a = np.zeros((0,))
+        assert_raises(IndexError, lambda: assign(a, 10, 1))
+
+        a = np.zeros((3, 5))
+        assert_raises(IndexError, lambda: a[(1, [1, 20])])
+        assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
+        a = np.zeros((3, 0))
+        assert_raises(IndexError, lambda: a[(1, [0, 1])])
+        assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
+
+    def test_mapping_error_message(self):
+        a = np.zeros((3, 5))
+        index = (1, 2, 3, 4, 5)
+        assert_raises_regex(
+                IndexError,
+                "too many indices for array: "
+                "array is 2-dimensional, but 5 were indexed",
+                lambda: a[index])
+
+    def test_methods(self):
+        "cases from methods.c"
+
+        a = np.zeros((3, 3))
+        assert_raises(IndexError, lambda: a.item(100))
+        assert_raises(IndexError, lambda: a.itemset(100, 1))
+        a = np.zeros((0, 3))
+        assert_raises(IndexError, lambda: a.item(100))
+        assert_raises(IndexError, lambda: a.itemset(100, 1))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexing.py
new file mode 100644
index 00000000..04293670
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_indexing.py
@@ -0,0 +1,1417 @@
+import sys
+import warnings
+import functools
+import operator
+
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import array_indexing
+from itertools import product
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_raises_regex,
+    assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM
+    )
+
+
+class TestIndexing:
+    def test_index_no_floats(self):
+        a = np.array([[[5]]])
+
+        assert_raises(IndexError, lambda: a[0.0])
+        assert_raises(IndexError, lambda: a[0, 0.0])
+        assert_raises(IndexError, lambda: a[0.0, 0])
+        assert_raises(IndexError, lambda: a[0.0,:])
+        assert_raises(IndexError, lambda: a[:, 0.0])
+        assert_raises(IndexError, lambda: a[:, 0.0,:])
+        assert_raises(IndexError, lambda: a[0.0,:,:])
+        assert_raises(IndexError, lambda: a[0, 0, 0.0])
+        assert_raises(IndexError, lambda: a[0.0, 0, 0])
+        assert_raises(IndexError, lambda: a[0, 0.0, 0])
+        assert_raises(IndexError, lambda: a[-1.4])
+        assert_raises(IndexError, lambda: a[0, -1.4])
+        assert_raises(IndexError, lambda: a[-1.4, 0])
+        assert_raises(IndexError, lambda: a[-1.4,:])
+        assert_raises(IndexError, lambda: a[:, -1.4])
+        assert_raises(IndexError, lambda: a[:, -1.4,:])
+        assert_raises(IndexError, lambda: a[-1.4,:,:])
+        assert_raises(IndexError, lambda: a[0, 0, -1.4])
+        assert_raises(IndexError, lambda: a[-1.4, 0, 0])
+        assert_raises(IndexError, lambda: a[0, -1.4, 0])
+        assert_raises(IndexError, lambda: a[0.0:, 0.0])
+        assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
+
+    def test_slicing_no_floats(self):
+        a = np.array([[5]])
+
+        # start as float.
+        assert_raises(TypeError, lambda: a[0.0:])
+        assert_raises(TypeError, lambda: a[0:, 0.0:2])
+        assert_raises(TypeError, lambda: a[0.0::2, :0])
+        assert_raises(TypeError, lambda: a[0.0:1:2,:])
+        assert_raises(TypeError, lambda: a[:, 0.0:])
+        # stop as float.
+        assert_raises(TypeError, lambda: a[:0.0])
+        assert_raises(TypeError, lambda: a[:0, 1:2.0])
+        assert_raises(TypeError, lambda: a[:0.0:2, :0])
+        assert_raises(TypeError, lambda: a[:0.0,:])
+        assert_raises(TypeError, lambda: a[:, 0:4.0:2])
+        # step as float.
+        assert_raises(TypeError, lambda: a[::1.0])
+        assert_raises(TypeError, lambda: a[0:, :2:2.0])
+        assert_raises(TypeError, lambda: a[1::4.0, :0])
+        assert_raises(TypeError, lambda: a[::5.0,:])
+        assert_raises(TypeError, lambda: a[:, 0:4:2.0])
+        # mixed.
+        assert_raises(TypeError, lambda: a[1.0:2:2.0])
+        assert_raises(TypeError, lambda: a[1.0::2.0])
+        assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
+        assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
+        assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
+        assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
+        # should still get the DeprecationWarning if step = 0.
+        assert_raises(TypeError, lambda: a[::0.0])
+
+    def test_index_no_array_to_index(self):
+        # No non-scalar arrays.
+        a = np.array([[[1]]])
+
+        assert_raises(TypeError, lambda: a[a:a:a])
+
+    def test_none_index(self):
+        # `None` index adds newaxis
+        a = np.array([1, 2, 3])
+        assert_equal(a[None], a[np.newaxis])
+        assert_equal(a[None].ndim, a.ndim + 1)
+
+    def test_empty_tuple_index(self):
+        # Empty tuple index creates a view
+        a = np.array([1, 2, 3])
+        assert_equal(a[()], a)
+        assert_(a[()].base is a)
+        a = np.array(0)
+        assert_(isinstance(a[()], np.int_))
+
+    def test_void_scalar_empty_tuple(self):
+        s = np.zeros((), dtype='V4')
+        assert_equal(s[()].dtype, s.dtype)
+        assert_equal(s[()], s)
+        assert_equal(type(s[...]), np.ndarray)
+
+    def test_same_kind_index_casting(self):
+        # Indexes should be cast with same-kind and not safe, even if that
+        # is somewhat unsafe. So test various different code paths.
+        index = np.arange(5)
+        u_index = index.astype(np.uintp)
+        arr = np.arange(10)
+
+        assert_array_equal(arr[index], arr[u_index])
+        arr[u_index] = np.arange(5)
+        assert_array_equal(arr, np.arange(10))
+
+        arr = np.arange(10).reshape(5, 2)
+        assert_array_equal(arr[index], arr[u_index])
+
+        arr[u_index] = np.arange(5)[:,None]
+        assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
+
+        arr = np.arange(25).reshape(5, 5)
+        assert_array_equal(arr[u_index, u_index], arr[index, index])
+
+    def test_empty_fancy_index(self):
+        # Empty list index creates an empty array
+        # with the same dtype (but with weird shape)
+        a = np.array([1, 2, 3])
+        assert_equal(a[[]], [])
+        assert_equal(a[[]].dtype, a.dtype)
+
+        b = np.array([], dtype=np.intp)
+        assert_equal(a[[]], [])
+        assert_equal(a[[]].dtype, a.dtype)
+
+        b = np.array([])
+        assert_raises(IndexError, a.__getitem__, b)
+
+    def test_ellipsis_index(self):
+        a = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]])
+        assert_(a[...] is not a)
+        assert_equal(a[...], a)
+        # `a[...]` was `a` in numpy <1.9.
+        assert_(a[...].base is a)
+
+        # Slicing with ellipsis can skip an
+        # arbitrary number of dimensions
+        assert_equal(a[0, ...], a[0])
+        assert_equal(a[0, ...], a[0,:])
+        assert_equal(a[..., 0], a[:, 0])
+
+        # Slicing with ellipsis always results
+        # in an array, not a scalar
+        assert_equal(a[0, ..., 1], np.array(2))
+
+        # Assignment with `(Ellipsis,)` on 0-d arrays
+        b = np.array(1)
+        b[(Ellipsis,)] = 2
+        assert_equal(b, 2)
+
+    def test_single_int_index(self):
+        # Single integer index selects one row
+        a = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]])
+
+        assert_equal(a[0], [1, 2, 3])
+        assert_equal(a[-1], [7, 8, 9])
+
+        # Index out of bounds produces IndexError
+        assert_raises(IndexError, a.__getitem__, 1 << 30)
+        # Index overflow produces IndexError
+        assert_raises(IndexError, a.__getitem__, 1 << 64)
+
+    def test_single_bool_index(self):
+        # Single boolean index
+        a = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]])
+
+        assert_equal(a[np.array(True)], a[None])
+        assert_equal(a[np.array(False)], a[None][0:0])
+
+    def test_boolean_shape_mismatch(self):
+        arr = np.ones((5, 4, 3))
+
+        index = np.array([True])
+        assert_raises(IndexError, arr.__getitem__, index)
+
+        index = np.array([False] * 6)
+        assert_raises(IndexError, arr.__getitem__, index)
+
+        index = np.zeros((4, 4), dtype=bool)
+        assert_raises(IndexError, arr.__getitem__, index)
+
+        assert_raises(IndexError, arr.__getitem__, (slice(None), index))
+
+    def test_boolean_indexing_onedim(self):
+        # Indexing a 2-dimensional array with
+        # boolean array of length one
+        a = np.array([[ 0.,  0.,  0.]])
+        b = np.array([ True], dtype=bool)
+        assert_equal(a[b], a)
+        # boolean assignment
+        a[b] = 1.
+        assert_equal(a, [[1., 1., 1.]])
+
+    def test_boolean_assignment_value_mismatch(self):
+        # A boolean assignment should fail when the shape of the values
+        # cannot be broadcast to the subscription. (see also gh-3458)
+        a = np.arange(4)
+
+        def f(a, v):
+            a[a > -1] = v
+
+        assert_raises(ValueError, f, a, [])
+        assert_raises(ValueError, f, a, [1, 2, 3])
+        assert_raises(ValueError, f, a[:1], [1, 2, 3])
+
+    def test_boolean_assignment_needs_api(self):
+        # See also gh-7666
+        # This caused a segfault on Python 2 due to the GIL not being
+        # held when the iterator does not need it, but the transfer function
+        # does
+        arr = np.zeros(1000)
+        indx = np.zeros(1000, dtype=bool)
+        indx[:100] = True
+        arr[indx] = np.ones(100, dtype=object)
+
+        expected = np.zeros(1000)
+        expected[:100] = 1
+        assert_array_equal(arr, expected)
+
+    def test_boolean_indexing_twodim(self):
+        # Indexing a 2-dimensional array with
+        # 2-dimensional boolean array
+        a = np.array([[1, 2, 3],
+                      [4, 5, 6],
+                      [7, 8, 9]])
+        b = np.array([[ True, False,  True],
+                      [False,  True, False],
+                      [ True, False,  True]])
+        assert_equal(a[b], [1, 3, 5, 7, 9])
+        assert_equal(a[b[1]], [[4, 5, 6]])
+        assert_equal(a[b[0]], a[b[2]])
+
+        # boolean assignment
+        a[b] = 0
+        assert_equal(a, [[0, 2, 0],
+                         [4, 0, 6],
+                         [0, 8, 0]])
+
+    def test_boolean_indexing_list(self):
+        # Regression test for #13715. It's a use-after-free bug which the
+        # test won't directly catch, but it will show up in valgrind.
+        a = np.array([1, 2, 3])
+        b = [True, False, True]
+        # Two variants of the test because the first takes a fast path
+        assert_equal(a[b], [1, 3])
+        assert_equal(a[None, b], [[1, 3]])
+
+    def test_reverse_strides_and_subspace_bufferinit(self):
+        # This tests that the strides are not reversed for simple and
+        # subspace fancy indexing.
+        a = np.ones(5)
+        b = np.zeros(5, dtype=np.intp)[::-1]
+        c = np.arange(5)[::-1]
+
+        a[b] = c
+        # If the strides are not reversed, the 0 in the arange comes last.
+        assert_equal(a[0], 0)
+
+        # This also tests that the subspace buffer is initialized:
+        a = np.ones((5, 2))
+        c = np.arange(10).reshape(5, 2)[::-1]
+        a[b, :] = c
+        assert_equal(a[0], [0, 1])
+
+    def test_reversed_strides_result_allocation(self):
+        # Test a bug when calculating the output strides for a result array
+        # when the subspace size was 1 (and test other cases as well)
+        a = np.arange(10)[:, None]
+        i = np.arange(10)[::-1]
+        assert_array_equal(a[i], a[i.copy('C')])
+
+        a = np.arange(20).reshape(-1, 2)
+
+    def test_uncontiguous_subspace_assignment(self):
+        # During development there was a bug activating a skip logic
+        # based on ndim instead of size.
+        a = np.full((3, 4, 2), -1)
+        b = np.full((3, 4, 2), -1)
+
+        a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
+        b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
+
+        assert_equal(a, b)
+
+    def test_too_many_fancy_indices_special_case(self):
+        # Just documents behaviour, this is a small limitation.
+        a = np.ones((1,) * 32)  # 32 is NPY_MAXDIMS
+        assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
+
+    def test_scalar_array_bool(self):
+        # NumPy bools can be used as boolean index (python ones as of yet not)
+        a = np.array(1)
+        assert_equal(a[np.bool_(True)], a[np.array(True)])
+        assert_equal(a[np.bool_(False)], a[np.array(False)])
+
+        # After deprecating bools as integers:
+        #a = np.array([0,1,2])
+        #assert_equal(a[True, :], a[None, :])
+        #assert_equal(a[:, True], a[:, None])
+        #
+        #assert_(not np.may_share_memory(a, a[True, :]))
+
+    def test_everything_returns_views(self):
+        # Before `...` would return a itself.
+        a = np.arange(5)
+
+        assert_(a is not a[()])
+        assert_(a is not a[...])
+        assert_(a is not a[:])
+
+    def test_broaderrors_indexing(self):
+        a = np.zeros((5, 5))
+        assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
+        assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
+
+    def test_trivial_fancy_out_of_bounds(self):
+        a = np.zeros(5)
+        ind = np.ones(20, dtype=np.intp)
+        ind[-1] = 10
+        assert_raises(IndexError, a.__getitem__, ind)
+        assert_raises(IndexError, a.__setitem__, ind, 0)
+        ind = np.ones(20, dtype=np.intp)
+        ind[0] = 11
+        assert_raises(IndexError, a.__getitem__, ind)
+        assert_raises(IndexError, a.__setitem__, ind, 0)
+
+    def test_trivial_fancy_not_possible(self):
+        # Test that the fast path for trivial assignment is not incorrectly
+        # used when the index is not contiguous or 1D, see also gh-11467.
+        a = np.arange(6)
+        idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
+        assert_array_equal(a[idx], idx)
+
+        # this case must not go into the fast path, note that idx is
+        # a non-contiuguous none 1D array here.
+        a[idx] = -1
+        res = np.arange(6)
+        res[0] = -1
+        res[3] = -1
+        assert_array_equal(a, res)
+
+    def test_nonbaseclass_values(self):
+        class SubClass(np.ndarray):
+            def __array_finalize__(self, old):
+                # Have array finalize do funny things
+                self.fill(99)
+
+        a = np.zeros((5, 5))
+        s = a.copy().view(type=SubClass)
+        s.fill(1)
+
+        a[[0, 1, 2, 3, 4], :] = s
+        assert_((a == 1).all())
+
+        # Subspace is last, so transposing might want to finalize
+        a[:, [0, 1, 2, 3, 4]] = s
+        assert_((a == 1).all())
+
+        a.fill(0)
+        a[...] = s
+        assert_((a == 1).all())
+
+    def test_array_like_values(self):
+        # Similar to the above test, but use a memoryview instead
+        a = np.zeros((5, 5))
+        s = np.arange(25, dtype=np.float64).reshape(5, 5)
+
+        a[[0, 1, 2, 3, 4], :] = memoryview(s)
+        assert_array_equal(a, s)
+
+        a[:, [0, 1, 2, 3, 4]] = memoryview(s)
+        assert_array_equal(a, s)
+
+        a[...] = memoryview(s)
+        assert_array_equal(a, s)
+
+    def test_subclass_writeable(self):
+        d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
+                         dtype=[('target', 'S20'), ('V_mag', '>f4')])
+        ind = np.array([False,  True,  True], dtype=bool)
+        assert_(d[ind].flags.writeable)
+        ind = np.array([0, 1])
+        assert_(d[ind].flags.writeable)
+        assert_(d[...].flags.writeable)
+        assert_(d[0].flags.writeable)
+
+    def test_memory_order(self):
+        # This is not necessary to preserve. Memory layouts for
+        # more complex indices are not as simple.
+        a = np.arange(10)
+        b = np.arange(10).reshape(5,2).T
+        assert_(a[b].flags.f_contiguous)
+
+        # Takes a different implementation branch:
+        a = a.reshape(-1, 1)
+        assert_(a[b, 0].flags.f_contiguous)
+
+    def test_scalar_return_type(self):
+        # Full scalar indices should return scalars and object
+        # arrays should not call PyArray_Return on their items
+        class Zero:
+            # The most basic valid indexing
+            def __index__(self):
+                return 0
+
+        z = Zero()
+
+        class ArrayLike:
+            # Simple array, should behave like the array
+            def __array__(self):
+                return np.array(0)
+
+        a = np.zeros(())
+        assert_(isinstance(a[()], np.float_))
+        a = np.zeros(1)
+        assert_(isinstance(a[z], np.float_))
+        a = np.zeros((1, 1))
+        assert_(isinstance(a[z, np.array(0)], np.float_))
+        assert_(isinstance(a[z, ArrayLike()], np.float_))
+
+        # And object arrays do not call it too often:
+        b = np.array(0)
+        a = np.array(0, dtype=object)
+        a[()] = b
+        assert_(isinstance(a[()], np.ndarray))
+        a = np.array([b, None])
+        assert_(isinstance(a[z], np.ndarray))
+        a = np.array([[b, None]])
+        assert_(isinstance(a[z, np.array(0)], np.ndarray))
+        assert_(isinstance(a[z, ArrayLike()], np.ndarray))
+
+    def test_small_regressions(self):
+        # Reference count of intp for index checks
+        a = np.array([0])
+        if HAS_REFCOUNT:
+            refcount = sys.getrefcount(np.dtype(np.intp))
+        # item setting always checks indices in separate function:
+        a[np.array([0], dtype=np.intp)] = 1
+        a[np.array([0], dtype=np.uint8)] = 1
+        assert_raises(IndexError, a.__setitem__,
+                      np.array([1], dtype=np.intp), 1)
+        assert_raises(IndexError, a.__setitem__,
+                      np.array([1], dtype=np.uint8), 1)
+
+        if HAS_REFCOUNT:
+            assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
+
+    def test_unaligned(self):
+        v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
+        d = v.view(np.dtype("S8"))
+        # unaligned source
+        x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
+        x = x.view(np.dtype("S8"))
+        x[...] = np.array("b" * 8, dtype="S")
+        b = np.arange(d.size)
+        #trivial
+        assert_equal(d[b], d)
+        d[b] = x
+        # nontrivial
+        # unaligned index array
+        b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
+        b = b.view(np.intp)[:d.size]
+        b[...] = np.arange(d.size)
+        assert_equal(d[b.astype(np.int16)], d)
+        d[b.astype(np.int16)] = x
+        # boolean
+        d[b % 2 == 0]
+        d[b % 2 == 0] = x[::2]
+
+    def test_tuple_subclass(self):
+        arr = np.ones((5, 5))
+
+        # A tuple subclass should also be an nd-index
+        class TupleSubclass(tuple):
+            pass
+        index = ([1], [1])
+        index = TupleSubclass(index)
+        assert_(arr[index].shape == (1,))
+        # Unlike the non nd-index:
+        assert_(arr[index,].shape != (1,))
+
+    def test_broken_sequence_not_nd_index(self):
+        # See gh-5063:
+        # If we have an object which claims to be a sequence, but fails
+        # on item getting, this should not be converted to an nd-index (tuple)
+        # If this object happens to be a valid index otherwise, it should work
+        # This object here is very dubious and probably bad though:
+        class SequenceLike:
+            def __index__(self):
+                return 0
+
+            def __len__(self):
+                return 1
+
+            def __getitem__(self, item):
+                raise IndexError('Not possible')
+
+        arr = np.arange(10)
+        assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+
+        # also test that field indexing does not segfault
+        # for a similar reason, by indexing a structured array
+        arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
+        assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+
+    def test_indexing_array_weird_strides(self):
+        # See also gh-6221
+        # the shapes used here come from the issue and create the correct
+        # size for the iterator buffering size.
+        x = np.ones(10)
+        x2 = np.ones((10, 2))
+        ind = np.arange(10)[:, None, None, None]
+        ind = np.broadcast_to(ind, (10, 55, 4, 4))
+
+        # single advanced index case
+        assert_array_equal(x[ind], x[ind.copy()])
+        # higher dimensional advanced index
+        zind = np.zeros(4, dtype=np.intp)
+        assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
+
+    def test_indexing_array_negative_strides(self):
+        # From gh-8264,
+        # core dumps if negative strides are used in iteration
+        arro = np.zeros((4, 4))
+        arr = arro[::-1, ::-1]
+
+        slices = (slice(None), [0, 1, 2, 3])
+        arr[slices] = 10
+        assert_array_equal(arr, 10.)
+
+    def test_character_assignment(self):
+        # This is an example a function going through CopyObject which
+        # used to have an untested special path for scalars
+        # (the character special dtype case, should be deprecated probably)
+        arr = np.zeros((1, 5), dtype="c")
+        arr[0] = np.str_("asdfg")  # must assign as a sequence
+        assert_array_equal(arr[0], np.array("asdfg", dtype="c"))
+        assert arr[0, 1] == b"s"  # make sure not all were set to "a" for both
+
+    @pytest.mark.parametrize("index",
+            [True, False, np.array([0])])
+    @pytest.mark.parametrize("num", [32, 40])
+    @pytest.mark.parametrize("original_ndim", [1, 32])
+    def test_too_many_advanced_indices(self, index, num, original_ndim):
+        # These are limitations based on the number of arguments we can process.
+        # For `num=32` (and all boolean cases), the result is actually define;
+        # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
+        arr = np.ones((1,) * original_ndim)
+        with pytest.raises(IndexError):
+            arr[(index,) * num]
+        with pytest.raises(IndexError):
+            arr[(index,) * num] = 1.
+
+    @pytest.mark.skipif(IS_WASM, reason="no threading")
+    def test_structured_advanced_indexing(self):
+        # Test that copyswap(n) used by integer array indexing is threadsafe
+        # for structured datatypes, see gh-15387. This test can behave randomly.
+        from concurrent.futures import ThreadPoolExecutor
+
+        # Create a deeply nested dtype to make a failure more likely:
+        dt = np.dtype([("", "f8")])
+        dt = np.dtype([("", dt)] * 2)
+        dt = np.dtype([("", dt)] * 2)
+        # The array should be large enough to likely run into threading issues
+        arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0]
+
+        rng = np.random.default_rng()
+        def func(arr):
+            indx = rng.integers(0, len(arr), size=6000, dtype=np.intp)
+            arr[indx]
+
+        tpe = ThreadPoolExecutor(max_workers=8)
+        futures = [tpe.submit(func, arr) for _ in range(10)]
+        for f in futures:
+            f.result()
+
+        assert arr.dtype is dt
+
+    def test_nontuple_ndindex(self):
+        a = np.arange(25).reshape((5, 5))
+        assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
+        assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
+        assert_raises(IndexError, a.__getitem__, [slice(None)])
+
+
+class TestFieldIndexing:
+    def test_scalar_return_type(self):
+        # Field access on an array should return an array, even if it
+        # is 0-d.
+        a = np.zeros((), [('a','f8')])
+        assert_(isinstance(a['a'], np.ndarray))
+        assert_(isinstance(a[['a']], np.ndarray))
+
+
+class TestBroadcastedAssignments:
+    def assign(self, a, ind, val):
+        a[ind] = val
+        return a
+
+    def test_prepending_ones(self):
+        a = np.zeros((3, 2))
+
+        a[...] = np.ones((1, 3, 2))
+        # Fancy with subspace with and without transpose
+        a[[0, 1, 2], :] = np.ones((1, 3, 2))
+        a[:, [0, 1]] = np.ones((1, 3, 2))
+        # Fancy without subspace (with broadcasting)
+        a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
+
+    def test_prepend_not_one(self):
+        assign = self.assign
+        s_ = np.s_
+        a = np.zeros(5)
+
+        # Too large and not only ones.
+        assert_raises(ValueError, assign, a, s_[...],  np.ones((2, 1)))
+        assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
+        assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
+
+    def test_simple_broadcasting_errors(self):
+        assign = self.assign
+        s_ = np.s_
+        a = np.zeros((5, 1))
+
+        assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
+        assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
+        assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
+        assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
+        assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
+
+    @pytest.mark.parametrize("index", [
+            (..., [1, 2], slice(None)),
+            ([0, 1], ..., 0),
+            (..., [1, 2], [1, 2])])
+    def test_broadcast_error_reports_correct_shape(self, index):
+        values = np.zeros((100, 100))  # will never broadcast below  
+
+        arr = np.zeros((3, 4, 5, 6, 7))
+        # We currently report without any spaces (could be changed)
+        shape_str = str(arr[index].shape).replace(" ", "")
+        
+        with pytest.raises(ValueError) as e:
+            arr[index] = values
+
+        assert str(e.value).endswith(shape_str)
+
+    def test_index_is_larger(self):
+        # Simple case of fancy index broadcasting of the index.
+        a = np.zeros((5, 5))
+        a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
+
+        assert_((a[:3, :3] == [2, 3, 4]).all())
+
+    def test_broadcast_subspace(self):
+        a = np.zeros((100, 100))
+        v = np.arange(100)[:,None]
+        b = np.arange(100)[::-1]
+        a[b] = v
+        assert_((a[::-1] == v).all())
+
+
+class TestSubclasses:
+    def test_basic(self):
+        # Test that indexing in various ways produces SubClass instances,
+        # and that the base is set up correctly: the original subclass
+        # instance for views, and a new ndarray for advanced/boolean indexing
+        # where a copy was made (latter a regression test for gh-11983).
+        class SubClass(np.ndarray):
+            pass
+
+        a = np.arange(5)
+        s = a.view(SubClass)
+        s_slice = s[:3]
+        assert_(type(s_slice) is SubClass)
+        assert_(s_slice.base is s)
+        assert_array_equal(s_slice, a[:3])
+
+        s_fancy = s[[0, 1, 2]]
+        assert_(type(s_fancy) is SubClass)
+        assert_(s_fancy.base is not s)
+        assert_(type(s_fancy.base) is np.ndarray)
+        assert_array_equal(s_fancy, a[[0, 1, 2]])
+        assert_array_equal(s_fancy.base, a[[0, 1, 2]])
+
+        s_bool = s[s > 0]
+        assert_(type(s_bool) is SubClass)
+        assert_(s_bool.base is not s)
+        assert_(type(s_bool.base) is np.ndarray)
+        assert_array_equal(s_bool, a[a > 0])
+        assert_array_equal(s_bool.base, a[a > 0])
+
+    def test_fancy_on_read_only(self):
+        # Test that fancy indexing on read-only SubClass does not make a
+        # read-only copy (gh-14132)
+        class SubClass(np.ndarray):
+            pass
+
+        a = np.arange(5)
+        s = a.view(SubClass)
+        s.flags.writeable = False
+        s_fancy = s[[0, 1, 2]]
+        assert_(s_fancy.flags.writeable)
+
+
+    def test_finalize_gets_full_info(self):
+        # Array finalize should be called on the filled array.
+        class SubClass(np.ndarray):
+            def __array_finalize__(self, old):
+                self.finalize_status = np.array(self)
+                self.old = old
+
+        s = np.arange(10).view(SubClass)
+        new_s = s[:3]
+        assert_array_equal(new_s.finalize_status, new_s)
+        assert_array_equal(new_s.old, s)
+
+        new_s = s[[0,1,2,3]]
+        assert_array_equal(new_s.finalize_status, new_s)
+        assert_array_equal(new_s.old, s)
+
+        new_s = s[s > 0]
+        assert_array_equal(new_s.finalize_status, new_s)
+        assert_array_equal(new_s.old, s)
+
+
+class TestFancyIndexingCast:
+    def test_boolean_index_cast_assign(self):
+        # Setup the boolean index and float arrays.
+        shape = (8, 63)
+        bool_index = np.zeros(shape).astype(bool)
+        bool_index[0, 1] = True
+        zero_array = np.zeros(shape)
+
+        # Assigning float is fine.
+        zero_array[bool_index] = np.array([1])
+        assert_equal(zero_array[0, 1], 1)
+
+        # Fancy indexing works, although we get a cast warning.
+        assert_warns(np.ComplexWarning,
+                     zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
+        assert_equal(zero_array[0, 1], 2)  # No complex part
+
+        # Cast complex to float, throwing away the imaginary portion.
+        assert_warns(np.ComplexWarning,
+                     zero_array.__setitem__, bool_index, np.array([1j]))
+        assert_equal(zero_array[0, 1], 0)
+
+class TestFancyIndexingEquivalence:
+    def test_object_assign(self):
+        # Check that the field and object special case using copyto is active.
+        # The right hand side cannot be converted to an array here.
+        a = np.arange(5, dtype=object)
+        b = a.copy()
+        a[:3] = [1, (1,2), 3]
+        b[[0, 1, 2]] = [1, (1,2), 3]
+        assert_array_equal(a, b)
+
+        # test same for subspace fancy indexing
+        b = np.arange(5, dtype=object)[None, :]
+        b[[0], :3] = [[1, (1,2), 3]]
+        assert_array_equal(a, b[0])
+
+        # Check that swapping of axes works.
+        # There was a bug that made the later assignment throw a ValueError
+        # do to an incorrectly transposed temporary right hand side (gh-5714)
+        b = b.T
+        b[:3, [0]] = [[1], [(1,2)], [3]]
+        assert_array_equal(a, b[:, 0])
+
+        # Another test for the memory order of the subspace
+        arr = np.ones((3, 4, 5), dtype=object)
+        # Equivalent slicing assignment for comparison
+        cmp_arr = arr.copy()
+        cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
+        arr[[0], ...] = [[[1], [2], [3], [4]]]
+        assert_array_equal(arr, cmp_arr)
+        arr = arr.copy('F')
+        arr[[0], ...] = [[[1], [2], [3], [4]]]
+        assert_array_equal(arr, cmp_arr)
+
+    def test_cast_equivalence(self):
+        # Yes, normal slicing uses unsafe casting.
+        a = np.arange(5)
+        b = a.copy()
+
+        a[:3] = np.array(['2', '-3', '-1'])
+        b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
+        assert_array_equal(a, b)
+
+        # test the same for subspace fancy indexing
+        b = np.arange(5)[None, :]
+        b[[0], :3] = np.array([['2', '-3', '-1']])
+        assert_array_equal(a, b[0])
+
+
+class TestMultiIndexingAutomated:
+    """
+    These tests use code to mimic the C-Code indexing for selection.
+
+    NOTE:
+
+        * This still lacks tests for complex item setting.
+        * If you change behavior of indexing, you might want to modify
+          these tests to try more combinations.
+        * Behavior was written to match numpy version 1.8. (though a
+          first version matched 1.7.)
+        * Only tuple indices are supported by the mimicking code.
+          (and tested as of writing this)
+        * Error types should match most of the time as long as there
+          is only one error. For multiple errors, what gets raised
+          will usually not be the same one. They are *not* tested.
+
+    Update 2016-11-30: It is probably not worth maintaining this test
+    indefinitely and it can be dropped if maintenance becomes a burden.
+
+    """
+
+    def setup_method(self):
+        self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
+        self.b = np.empty((3, 0, 5, 6))
+        self.complex_indices = ['skip', Ellipsis,
+            0,
+            # Boolean indices, up to 3-d for some special cases of eating up
+            # dimensions, also need to test all False
+            np.array([True, False, False]),
+            np.array([[True, False], [False, True]]),
+            np.array([[[False, False], [False, False]]]),
+            # Some slices:
+            slice(-5, 5, 2),
+            slice(1, 1, 100),
+            slice(4, -1, -2),
+            slice(None, None, -3),
+            # Some Fancy indexes:
+            np.empty((0, 1, 1), dtype=np.intp),  # empty and can be broadcast
+            np.array([0, 1, -2]),
+            np.array([[2], [0], [1]]),
+            np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
+            np.array([2, -1], dtype=np.int8),
+            np.zeros([1]*31, dtype=int),  # trigger too large array.
+            np.array([0., 1.])]  # invalid datatype
+        # Some simpler indices that still cover a bit more
+        self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
+                               'skip']
+        # Very simple ones to fill the rest:
+        self.fill_indices = [slice(None, None), 0]
+
+    def _get_multi_index(self, arr, indices):
+        """Mimic multi dimensional indexing.
+
+        Parameters
+        ----------
+        arr : ndarray
+            Array to be indexed.
+        indices : tuple of index objects
+
+        Returns
+        -------
+        out : ndarray
+            An array equivalent to the indexing operation (but always a copy).
+            `arr[indices]` should be identical.
+        no_copy : bool
+            Whether the indexing operation requires a copy. If this is `True`,
+            `np.may_share_memory(arr, arr[indices])` should be `True` (with
+            some exceptions for scalars and possibly 0-d arrays).
+
+        Notes
+        -----
+        While the function may mostly match the errors of normal indexing this
+        is generally not the case.
+        """
+        in_indices = list(indices)
+        indices = []
+        # if False, this is a fancy or boolean index
+        no_copy = True
+        # number of fancy/scalar indexes that are not consecutive
+        num_fancy = 0
+        # number of dimensions indexed by a "fancy" index
+        fancy_dim = 0
+        # NOTE: This is a funny twist (and probably OK to change).
+        # The boolean array has illegal indexes, but this is
+        # allowed if the broadcast fancy-indices are 0-sized.
+        # This variable is to catch that case.
+        error_unless_broadcast_to_empty = False
+
+        # We need to handle Ellipsis and make arrays from indices, also
+        # check if this is fancy indexing (set no_copy).
+        ndim = 0
+        ellipsis_pos = None  # define here mostly to replace all but first.
+        for i, indx in enumerate(in_indices):
+            if indx is None:
+                continue
+            if isinstance(indx, np.ndarray) and indx.dtype == bool:
+                no_copy = False
+                if indx.ndim == 0:
+                    raise IndexError
+                # boolean indices can have higher dimensions
+                ndim += indx.ndim
+                fancy_dim += indx.ndim
+                continue
+            if indx is Ellipsis:
+                if ellipsis_pos is None:
+                    ellipsis_pos = i
+                    continue  # do not increment ndim counter
+                raise IndexError
+            if isinstance(indx, slice):
+                ndim += 1
+                continue
+            if not isinstance(indx, np.ndarray):
+                # This could be open for changes in numpy.
+                # numpy should maybe raise an error if casting to intp
+                # is not safe. It rejects np.array([1., 2.]) but not
+                # [1., 2.] as index (same for ie. np.take).
+                # (Note the importance of empty lists if changing this here)
+                try:
+                    indx = np.array(indx, dtype=np.intp)
+                except ValueError:
+                    raise IndexError
+                in_indices[i] = indx
+            elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
+                raise IndexError('arrays used as indices must be of '
+                                 'integer (or boolean) type')
+            if indx.ndim != 0:
+                no_copy = False
+            ndim += 1
+            fancy_dim += 1
+
+        if arr.ndim - ndim < 0:
+            # we can't take more dimensions then we have, not even for 0-d
+            # arrays.  since a[()] makes sense, but not a[(),]. We will
+            # raise an error later on, unless a broadcasting error occurs
+            # first.
+            raise IndexError
+
+        if ndim == 0 and None not in in_indices:
+            # Well we have no indexes or one Ellipsis. This is legal.
+            return arr.copy(), no_copy
+
+        if ellipsis_pos is not None:
+            in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
+                                                       (arr.ndim - ndim))
+
+        for ax, indx in enumerate(in_indices):
+            if isinstance(indx, slice):
+                # convert to an index array
+                indx = np.arange(*indx.indices(arr.shape[ax]))
+                indices.append(['s', indx])
+                continue
+            elif indx is None:
+                # this is like taking a slice with one element from a new axis:
+                indices.append(['n', np.array([0], dtype=np.intp)])
+                arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
+                continue
+            if isinstance(indx, np.ndarray) and indx.dtype == bool:
+                if indx.shape != arr.shape[ax:ax+indx.ndim]:
+                    raise IndexError
+
+                try:
+                    flat_indx = np.ravel_multi_index(np.nonzero(indx),
+                                    arr.shape[ax:ax+indx.ndim], mode='raise')
+                except Exception:
+                    error_unless_broadcast_to_empty = True
+                    # fill with 0s instead, and raise error later
+                    flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
+                # concatenate axis into a single one:
+                if indx.ndim != 0:
+                    arr = arr.reshape((arr.shape[:ax]
+                                  + (np.prod(arr.shape[ax:ax+indx.ndim]),)
+                                  + arr.shape[ax+indx.ndim:]))
+                    indx = flat_indx
+                else:
+                    # This could be changed, a 0-d boolean index can
+                    # make sense (even outside the 0-d indexed array case)
+                    # Note that originally this is could be interpreted as
+                    # integer in the full integer special case.
+                    raise IndexError
+            else:
+                # If the index is a singleton, the bounds check is done
+                # before the broadcasting. This used to be different in <1.9
+                if indx.ndim == 0:
+                    if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
+                        raise IndexError
+            if indx.ndim == 0:
+                # The index is a scalar. This used to be two fold, but if
+                # fancy indexing was active, the check was done later,
+                # possibly after broadcasting it away (1.7. or earlier).
+                # Now it is always done.
+                if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
+                    raise IndexError
+            if (len(indices) > 0 and
+                    indices[-1][0] == 'f' and
+                    ax != ellipsis_pos):
+                # NOTE: There could still have been a 0-sized Ellipsis
+                # between them. Checked that with ellipsis_pos.
+                indices[-1].append(indx)
+            else:
+                # We have a fancy index that is not after an existing one.
+                # NOTE: A 0-d array triggers this as well, while one may
+                # expect it to not trigger it, since a scalar would not be
+                # considered fancy indexing.
+                num_fancy += 1
+                indices.append(['f', indx])
+
+        if num_fancy > 1 and not no_copy:
+            # We have to flush the fancy indexes left
+            new_indices = indices[:]
+            axes = list(range(arr.ndim))
+            fancy_axes = []
+            new_indices.insert(0, ['f'])
+            ni = 0
+            ai = 0
+            for indx in indices:
+                ni += 1
+                if indx[0] == 'f':
+                    new_indices[0].extend(indx[1:])
+                    del new_indices[ni]
+                    ni -= 1
+                    for ax in range(ai, ai + len(indx[1:])):
+                        fancy_axes.append(ax)
+                        axes.remove(ax)
+                ai += len(indx) - 1  # axis we are at
+            indices = new_indices
+            # and now we need to transpose arr:
+            arr = arr.transpose(*(fancy_axes + axes))
+
+        # We only have one 'f' index now and arr is transposed accordingly.
+        # Now handle newaxis by reshaping...
+        ax = 0
+        for indx in indices:
+            if indx[0] == 'f':
+                if len(indx) == 1:
+                    continue
+                # First of all, reshape arr to combine fancy axes into one:
+                orig_shape = arr.shape
+                orig_slice = orig_shape[ax:ax + len(indx[1:])]
+                arr = arr.reshape((arr.shape[:ax]
+                                    + (np.prod(orig_slice).astype(int),)
+                                    + arr.shape[ax + len(indx[1:]):]))
+
+                # Check if broadcasting works
+                res = np.broadcast(*indx[1:])
+                # unfortunately the indices might be out of bounds. So check
+                # that first, and use mode='wrap' then. However only if
+                # there are any indices...
+                if res.size != 0:
+                    if error_unless_broadcast_to_empty:
+                        raise IndexError
+                    for _indx, _size in zip(indx[1:], orig_slice):
+                        if _indx.size == 0:
+                            continue
+                        if np.any(_indx >= _size) or np.any(_indx < -_size):
+                                raise IndexError
+                if len(indx[1:]) == len(orig_slice):
+                    if np.prod(orig_slice) == 0:
+                        # Work around for a crash or IndexError with 'wrap'
+                        # in some 0-sized cases.
+                        try:
+                            mi = np.ravel_multi_index(indx[1:], orig_slice,
+                                                      mode='raise')
+                        except Exception:
+                            # This happens with 0-sized orig_slice (sometimes?)
+                            # here it is a ValueError, but indexing gives a:
+                            raise IndexError('invalid index into 0-sized')
+                    else:
+                        mi = np.ravel_multi_index(indx[1:], orig_slice,
+                                                  mode='wrap')
+                else:
+                    # Maybe never happens...
+                    raise ValueError
+                arr = arr.take(mi.ravel(), axis=ax)
+                try:
+                    arr = arr.reshape((arr.shape[:ax]
+                                        + mi.shape
+                                        + arr.shape[ax+1:]))
+                except ValueError:
+                    # too many dimensions, probably
+                    raise IndexError
+                ax += mi.ndim
+                continue
+
+            # If we are here, we have a 1D array for take:
+            arr = arr.take(indx[1], axis=ax)
+            ax += 1
+
+        return arr, no_copy
+
+    def _check_multi_index(self, arr, index):
+        """Check a multi index item getting and simple setting.
+
+        Parameters
+        ----------
+        arr : ndarray
+            Array to be indexed, must be a reshaped arange.
+        index : tuple of indexing objects
+            Index being tested.
+        """
+        # Test item getting
+        try:
+            mimic_get, no_copy = self._get_multi_index(arr, index)
+        except Exception as e:
+            if HAS_REFCOUNT:
+                prev_refcount = sys.getrefcount(arr)
+            assert_raises(type(e), arr.__getitem__, index)
+            assert_raises(type(e), arr.__setitem__, index, 0)
+            if HAS_REFCOUNT:
+                assert_equal(prev_refcount, sys.getrefcount(arr))
+            return
+
+        self._compare_index_result(arr, index, mimic_get, no_copy)
+
+    def _check_single_index(self, arr, index):
+        """Check a single index item getting and simple setting.
+
+        Parameters
+        ----------
+        arr : ndarray
+            Array to be indexed, must be an arange.
+        index : indexing object
+            Index being tested. Must be a single index and not a tuple
+            of indexing objects (see also `_check_multi_index`).
+        """
+        try:
+            mimic_get, no_copy = self._get_multi_index(arr, (index,))
+        except Exception as e:
+            if HAS_REFCOUNT:
+                prev_refcount = sys.getrefcount(arr)
+            assert_raises(type(e), arr.__getitem__, index)
+            assert_raises(type(e), arr.__setitem__, index, 0)
+            if HAS_REFCOUNT:
+                assert_equal(prev_refcount, sys.getrefcount(arr))
+            return
+
+        self._compare_index_result(arr, index, mimic_get, no_copy)
+
+    def _compare_index_result(self, arr, index, mimic_get, no_copy):
+        """Compare mimicked result to indexing result.
+        """
+        arr = arr.copy()
+        indexed_arr = arr[index]
+        assert_array_equal(indexed_arr, mimic_get)
+        # Check if we got a view, unless its a 0-sized or 0-d array.
+        # (then its not a view, and that does not matter)
+        if indexed_arr.size != 0 and indexed_arr.ndim != 0:
+            assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
+            # Check reference count of the original array
+            if HAS_REFCOUNT:
+                if no_copy:
+                    # refcount increases by one:
+                    assert_equal(sys.getrefcount(arr), 3)
+                else:
+                    assert_equal(sys.getrefcount(arr), 2)
+
+        # Test non-broadcast setitem:
+        b = arr.copy()
+        b[index] = mimic_get + 1000
+        if b.size == 0:
+            return  # nothing to compare here...
+        if no_copy and indexed_arr.ndim != 0:
+            # change indexed_arr in-place to manipulate original:
+            indexed_arr += 1000
+            assert_array_equal(arr, b)
+            return
+        # Use the fact that the array is originally an arange:
+        arr.flat[indexed_arr.ravel()] += 1000
+        assert_array_equal(arr, b)
+
+    def test_boolean(self):
+        a = np.array(5)
+        assert_equal(a[np.array(True)], 5)
+        a[np.array(True)] = 1
+        assert_equal(a, 1)
+        # NOTE: This is different from normal broadcasting, as
+        # arr[boolean_array] works like in a multi index. Which means
+        # it is aligned to the left. This is probably correct for
+        # consistency with arr[boolean_array,] also no broadcasting
+        # is done at all
+        self._check_multi_index(
+            self.a, (np.zeros_like(self.a, dtype=bool),))
+        self._check_multi_index(
+            self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
+        self._check_multi_index(
+            self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
+
+    def test_multidim(self):
+        # Automatically test combinations with complex indexes on 2nd (or 1st)
+        # spot and the simple ones in one other spot.
+        with warnings.catch_warnings():
+            # This is so that np.array(True) is not accepted in a full integer
+            # index, when running the file separately.
+            warnings.filterwarnings('error', '', DeprecationWarning)
+            warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
+
+            def isskip(idx):
+                return isinstance(idx, str) and idx == "skip"
+
+            for simple_pos in [0, 2, 3]:
+                tocheck = [self.fill_indices, self.complex_indices,
+                           self.fill_indices, self.fill_indices]
+                tocheck[simple_pos] = self.simple_indices
+                for index in product(*tocheck):
+                    index = tuple(i for i in index if not isskip(i))
+                    self._check_multi_index(self.a, index)
+                    self._check_multi_index(self.b, index)
+
+        # Check very simple item getting:
+        self._check_multi_index(self.a, (0, 0, 0, 0))
+        self._check_multi_index(self.b, (0, 0, 0, 0))
+        # Also check (simple cases of) too many indices:
+        assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
+        assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
+        assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
+        assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
+
+    def test_1d(self):
+        a = np.arange(10)
+        for index in self.complex_indices:
+            self._check_single_index(a, index)
+
+class TestFloatNonIntegerArgument:
+    """
+    These test that ``TypeError`` is raised when you try to use
+    non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
+    and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
+
+    """
+    def test_valid_indexing(self):
+        # These should raise no errors.
+        a = np.array([[[5]]])
+
+        a[np.array([0])]
+        a[[0, 0]]
+        a[:, [0, 0]]
+        a[:, 0,:]
+        a[:,:,:]
+
+    def test_valid_slicing(self):
+        # These should raise no errors.
+        a = np.array([[[5]]])
+
+        a[::]
+        a[0:]
+        a[:2]
+        a[0:2]
+        a[::2]
+        a[1::2]
+        a[:2:2]
+        a[1:2:2]
+
+    def test_non_integer_argument_errors(self):
+        a = np.array([[5]])
+
+        assert_raises(TypeError, np.reshape, a, (1., 1., -1))
+        assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
+        assert_raises(TypeError, np.take, a, [0], 1.)
+        assert_raises(TypeError, np.take, a, [0], np.float64(1.))
+
+    def test_non_integer_sequence_multiplication(self):
+        # NumPy scalar sequence multiply should not work with non-integers
+        def mult(a, b):
+            return a * b
+
+        assert_raises(TypeError, mult, [1], np.float_(3))
+        # following should be OK
+        mult([1], np.int_(3))
+
+    def test_reduce_axis_float_index(self):
+        d = np.zeros((3,3,3))
+        assert_raises(TypeError, np.min, d, 0.5)
+        assert_raises(TypeError, np.min, d, (0.5, 1))
+        assert_raises(TypeError, np.min, d, (1, 2.2))
+        assert_raises(TypeError, np.min, d, (.2, 1.2))
+
+
+class TestBooleanIndexing:
+    # Using a boolean as integer argument/indexing is an error.
+    def test_bool_as_int_argument_errors(self):
+        a = np.array([[[1]]])
+
+        assert_raises(TypeError, np.reshape, a, (True, -1))
+        assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
+        # Note that operator.index(np.array(True)) does not work, a boolean
+        # array is thus also deprecated, but not with the same message:
+        assert_raises(TypeError, operator.index, np.array(True))
+        assert_warns(DeprecationWarning, operator.index, np.True_)
+        assert_raises(TypeError, np.take, args=(a, [0], False))
+
+    def test_boolean_indexing_weirdness(self):
+        # Weird boolean indexing things
+        a = np.ones((2, 3, 4))
+        assert a[False, True, ...].shape == (0, 2, 3, 4)
+        assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2)
+        assert_raises(IndexError, lambda: a[False, [0, 1], ...])
+
+    def test_boolean_indexing_fast_path(self):
+        # These used to either give the wrong error, or incorrectly give no
+        # error.
+        a = np.ones((3, 3))
+
+        # This used to incorrectly work (and give an array of shape (0,))
+        idx1 = np.array([[False]*9])
+        assert_raises_regex(IndexError,
+            "boolean index did not match indexed array along dimension 0; "
+            "dimension is 3 but corresponding boolean dimension is 1",
+            lambda: a[idx1])
+
+        # This used to incorrectly give a ValueError: operands could not be broadcast together
+        idx2 = np.array([[False]*8 + [True]])
+        assert_raises_regex(IndexError,
+            "boolean index did not match indexed array along dimension 0; "
+            "dimension is 3 but corresponding boolean dimension is 1",
+            lambda: a[idx2])
+
+        # This is the same as it used to be. The above two should work like this.
+        idx3 = np.array([[False]*10])
+        assert_raises_regex(IndexError,
+            "boolean index did not match indexed array along dimension 0; "
+            "dimension is 3 but corresponding boolean dimension is 1",
+            lambda: a[idx3])
+
+        # This used to give ValueError: non-broadcastable operand
+        a = np.ones((1, 1, 2))
+        idx = np.array([[[True], [False]]])
+        assert_raises_regex(IndexError,
+            "boolean index did not match indexed array along dimension 1; "
+            "dimension is 1 but corresponding boolean dimension is 2",
+            lambda: a[idx])
+
+
+class TestArrayToIndexDeprecation:
+    """Creating an index from array not 0-D is an error.
+
+    """
+    def test_array_to_index_error(self):
+        # so no exception is expected. The raising is effectively tested above.
+        a = np.array([[[1]]])
+
+        assert_raises(TypeError, operator.index, np.array([1]))
+        assert_raises(TypeError, np.reshape, a, (a, -1))
+        assert_raises(TypeError, np.take, a, [0], a)
+
+
+class TestNonIntegerArrayLike:
+    """Tests that array_likes only valid if can safely cast to integer.
+
+    For instance, lists give IndexError when they cannot be safely cast to
+    an integer.
+
+    """
+    def test_basic(self):
+        a = np.arange(10)
+
+        assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
+        assert_raises(IndexError, a.__getitem__, (['1', '2'],))
+
+        # The following is valid
+        a.__getitem__([])
+
+
+class TestMultipleEllipsisError:
+    """An index can only have a single ellipsis.
+
+    """
+    def test_basic(self):
+        a = np.arange(10)
+        assert_raises(IndexError, lambda: a[..., ...])
+        assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
+        assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
+
+
+class TestCApiAccess:
+    def test_getitem(self):
+        subscript = functools.partial(array_indexing, 0)
+
+        # 0-d arrays don't work:
+        assert_raises(IndexError, subscript, np.ones(()), 0)
+        # Out of bound values:
+        assert_raises(IndexError, subscript, np.ones(10), 11)
+        assert_raises(IndexError, subscript, np.ones(10), -11)
+        assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
+        assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
+
+        a = np.arange(10)
+        assert_array_equal(a[4], subscript(a, 4))
+        a = a.reshape(5, 2)
+        assert_array_equal(a[-4], subscript(a, -4))
+
+    def test_setitem(self):
+        assign = functools.partial(array_indexing, 1)
+
+        # Deletion is impossible:
+        assert_raises(ValueError, assign, np.ones(10), 0)
+        # 0-d arrays don't work:
+        assert_raises(IndexError, assign, np.ones(()), 0, 0)
+        # Out of bound values:
+        assert_raises(IndexError, assign, np.ones(10), 11, 0)
+        assert_raises(IndexError, assign, np.ones(10), -11, 0)
+        assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
+        assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
+
+        a = np.arange(10)
+        assign(a, 4, 10)
+        assert_(a[4] == 10)
+
+        a = a.reshape(5, 2)
+        assign(a, 4, 10)
+        assert_array_equal(a[-1], [10, 10])
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.py
new file mode 100644
index 00000000..5660ef58
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.py
@@ -0,0 +1,165 @@
+import sys
+
+import pytest
+
+import numpy as np
+from numpy.testing import (
+    assert_, assert_raises, assert_array_equal, HAS_REFCOUNT
+    )
+
+
+class TestTake:
+    def test_simple(self):
+        a = [[1, 2], [3, 4]]
+        a_str = [[b'1', b'2'], [b'3', b'4']]
+        modes = ['raise', 'wrap', 'clip']
+        indices = [-1, 4]
+        index_arrays = [np.empty(0, dtype=np.intp),
+                        np.empty(tuple(), dtype=np.intp),
+                        np.empty((1, 1), dtype=np.intp)]
+        real_indices = {'raise': {-1: 1, 4: IndexError},
+                        'wrap': {-1: 1, 4: 0},
+                        'clip': {-1: 0, 4: 1}}
+        # Currently all types but object, use the same function generation.
+        # So it should not be necessary to test all. However test also a non
+        # refcounted struct on top of object, which has a size that hits the
+        # default (non-specialized) path.
+        types = int, object, np.dtype([('', 'i2', 3)])
+        for t in types:
+            # ta works, even if the array may be odd if buffer interface is used
+            ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
+            tresult = list(ta.T.copy())
+            for index_array in index_arrays:
+                if index_array.size != 0:
+                    tresult[0].shape = (2,) + index_array.shape
+                    tresult[1].shape = (2,) + index_array.shape
+                for mode in modes:
+                    for index in indices:
+                        real_index = real_indices[mode][index]
+                        if real_index is IndexError and index_array.size != 0:
+                            index_array.put(0, index)
+                            assert_raises(IndexError, ta.take, index_array,
+                                          mode=mode, axis=1)
+                        elif index_array.size != 0:
+                            index_array.put(0, index)
+                            res = ta.take(index_array, mode=mode, axis=1)
+                            assert_array_equal(res, tresult[real_index])
+                        else:
+                            res = ta.take(index_array, mode=mode, axis=1)
+                            assert_(res.shape == (2,) + index_array.shape)
+
+    def test_refcounting(self):
+        objects = [object() for i in range(10)]
+        for mode in ('raise', 'clip', 'wrap'):
+            a = np.array(objects)
+            b = np.array([2, 2, 4, 5, 3, 5])
+            a.take(b, out=a[:6], mode=mode)
+            del a
+            if HAS_REFCOUNT:
+                assert_(all(sys.getrefcount(o) == 3 for o in objects))
+            # not contiguous, example:
+            a = np.array(objects * 2)[::2]
+            a.take(b, out=a[:6], mode=mode)
+            del a
+            if HAS_REFCOUNT:
+                assert_(all(sys.getrefcount(o) == 3 for o in objects))
+
+    def test_unicode_mode(self):
+        d = np.arange(10)
+        k = b'\xc3\xa4'.decode("UTF8")
+        assert_raises(ValueError, d.take, 5, mode=k)
+
+    def test_empty_partition(self):
+        # In reference to github issue #6530
+        a_original = np.array([0, 2, 4, 6, 8, 10])
+        a = a_original.copy()
+
+        # An empty partition should be a successful no-op
+        a.partition(np.array([], dtype=np.int16))
+
+        assert_array_equal(a, a_original)
+
+    def test_empty_argpartition(self):
+        # In reference to github issue #6530
+        a = np.array([0, 2, 4, 6, 8, 10])
+        a = a.argpartition(np.array([], dtype=np.int16))
+
+        b = np.array([0, 1, 2, 3, 4, 5])
+        assert_array_equal(a, b)
+
+
+class TestPutMask:
+    @pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,O"])
+    def test_simple(self, dtype):
+        if dtype.lower() == "m":
+            dtype += "8[ns]"
+
+        # putmask is weird and doesn't care about value length (even shorter)
+        vals = np.arange(1001).astype(dtype=dtype)
+
+        mask = np.random.randint(2, size=1000).astype(bool)
+        # Use vals.dtype in case of flexible dtype (i.e. string)
+        arr = np.zeros(1000, dtype=vals.dtype)
+        zeros = arr.copy()
+
+        np.putmask(arr, mask, vals)
+        assert_array_equal(arr[mask], vals[:len(mask)][mask])
+        assert_array_equal(arr[~mask], zeros[~mask])
+
+    @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"])
+    @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"])
+    def test_empty(self, dtype, mode):
+        arr = np.zeros(1000, dtype=dtype)
+        arr_copy = arr.copy()
+        mask = np.random.randint(2, size=1000).astype(bool)
+
+        # Allowing empty values like this is weird...
+        np.put(arr, mask, [])
+        assert_array_equal(arr, arr_copy)
+
+
+class TestPut:
+    @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"])
+    @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"])
+    def test_simple(self, dtype, mode):
+        if dtype.lower() == "m":
+            dtype += "8[ns]"
+
+        # put is weird and doesn't care about value length (even shorter)
+        vals = np.arange(1001).astype(dtype=dtype)
+
+        # Use vals.dtype in case of flexible dtype (i.e. string)
+        arr = np.zeros(1000, dtype=vals.dtype)
+        zeros = arr.copy()
+
+        if mode == "clip":
+            # Special because 0 and -1 value are "reserved" for clip test
+            indx = np.random.permutation(len(arr) - 2)[:-500] + 1
+
+            indx[-1] = 0
+            indx[-2] = len(arr) - 1
+            indx_put = indx.copy()
+            indx_put[-1] = -1389
+            indx_put[-2] = 1321
+        else:
+            # Avoid duplicates (for simplicity) and fill half only
+            indx = np.random.permutation(len(arr) - 3)[:-500]
+            indx_put = indx
+            if mode == "wrap":
+                indx_put = indx_put + len(arr)
+
+        np.put(arr, indx_put, vals, mode=mode)
+        assert_array_equal(arr[indx], vals[:len(indx)])
+        untouched = np.ones(len(arr), dtype=bool)
+        untouched[indx] = False
+        assert_array_equal(arr[untouched], zeros[:untouched.sum()])
+
+    @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"])
+    @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"])
+    def test_empty(self, dtype, mode):
+        arr = np.zeros(1000, dtype=dtype)
+        arr_copy = arr.copy()
+
+        # Allowing empty values like this is weird...
+        np.put(arr, [1, 2, 3], [])
+        assert_array_equal(arr, arr_copy)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.py
new file mode 100644
index 00000000..725de19b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.py
@@ -0,0 +1,44 @@
+import os
+import shutil
+import subprocess
+import sys
+import sysconfig
+import pytest
+
+from numpy.testing import IS_WASM
+
+
+@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess")
+@pytest.mark.xfail(
+    sysconfig.get_config_var("Py_DEBUG"),
+    reason=(
+        "Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, "
+        "and Py_REF_DEBUG"
+    ),
+)
+def test_limited_api(tmp_path):
+    """Test building a third-party C extension with the limited API."""
+    # Based in part on test_cython from random.tests.test_extending
+
+    here = os.path.dirname(__file__)
+    ext_dir = os.path.join(here, "examples", "limited_api")
+
+    cytest = str(tmp_path / "limited_api")
+
+    shutil.copytree(ext_dir, cytest)
+    # build the examples and "install" them into a temporary directory
+
+    install_log = str(tmp_path / "tmp_install_log.txt")
+    subprocess.check_output(
+        [
+            sys.executable,
+            "setup.py",
+            "build",
+            "install",
+            "--prefix", str(tmp_path / "installdir"),
+            "--single-version-externally-managed",
+            "--record",
+            install_log,
+        ],
+        cwd=cytest,
+    )
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.py
new file mode 100644
index 00000000..45721950
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.py
@@ -0,0 +1,395 @@
+import warnings
+import platform
+import pytest
+
+import numpy as np
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
+    temppath, IS_MUSL
+    )
+from numpy.core.tests._locales import CommaDecimalPointLocale
+
+
+LD_INFO = np.finfo(np.longdouble)
+longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
+
+
+_o = 1 + LD_INFO.eps
+string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
+del _o
+
+
+def test_scalar_extraction():
+    """Confirm that extracting a value doesn't convert to python float"""
+    o = 1 + LD_INFO.eps
+    a = np.array([o, o, o])
+    assert_equal(a[1], o)
+
+
+# Conversions string -> long double
+
+# 0.1 not exactly representable in base 2 floating point.
+repr_precision = len(repr(np.longdouble(0.1)))
+# +2 from macro block starting around line 842 in scalartypes.c.src.
+
+
+@pytest.mark.skipif(IS_MUSL,
+                    reason="test flaky on musllinux")
+@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
+                    reason="repr precision not enough to show eps")
+def test_repr_roundtrip():
+    # We will only see eps in repr if within printing precision.
+    o = 1 + LD_INFO.eps
+    assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+def test_repr_roundtrip_bytes():
+    o = 1 + LD_INFO.eps
+    assert_equal(np.longdouble(repr(o).encode("ascii")), o)
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
+def test_array_and_stringlike_roundtrip(strtype):
+    """
+    Test that string representations of long-double roundtrip both
+    for array casting and scalar coercion, see also gh-15608.
+    """
+    o = 1 + LD_INFO.eps
+
+    if strtype in (np.bytes_, bytes):
+        o_str = strtype(repr(o).encode("ascii"))
+    else:
+        o_str = strtype(repr(o))
+
+    # Test that `o` is correctly coerced from the string-like
+    assert o == np.longdouble(o_str)
+
+    # Test that arrays also roundtrip correctly:
+    o_strarr = np.asarray([o] * 3, dtype=strtype)
+    assert (o == o_strarr.astype(np.longdouble)).all()
+
+    # And array coercion and casting to string give the same as scalar repr:
+    assert (o_strarr == o_str).all()
+    assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
+
+
+def test_bogus_string():
+    assert_raises(ValueError, np.longdouble, "spam")
+    assert_raises(ValueError, np.longdouble, "1.0 flub")
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+def test_fromstring():
+    o = 1 + LD_INFO.eps
+    s = (" " + repr(o))*5
+    a = np.array([o]*5)
+    assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
+                 err_msg="reading '%s'" % s)
+
+
+def test_fromstring_complex():
+    for ctype in ["complex", "cdouble", "cfloat"]:
+        # Check spacing between separator
+        assert_equal(np.fromstring("1, 2 ,  3  ,4", sep=",", dtype=ctype),
+                     np.array([1., 2., 3., 4.]))
+        # Real component not specified
+        assert_equal(np.fromstring("1j, -2j,  3j, 4e1j", sep=",", dtype=ctype),
+                     np.array([1.j, -2.j, 3.j, 40.j]))
+        # Both components specified
+        assert_equal(np.fromstring("1+1j,2-2j, -3+3j,  -4e1+4j", sep=",", dtype=ctype),
+                     np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+        # Spaces at wrong places
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
+                         np.array([1.]))
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
+                         np.array([1.]))
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
+                         np.array([1.]))
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
+                         np.array([1.]))
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
+                         np.array([1.]))
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
+                         np.array([1j]))
+
+
+def test_fromstring_bogus():
+    with assert_warns(DeprecationWarning):
+        assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
+                     np.array([1., 2., 3.]))
+
+
+def test_fromstring_empty():
+    with assert_warns(DeprecationWarning):
+        assert_equal(np.fromstring("xxxxx", sep="x"),
+                     np.array([]))
+
+
+def test_fromstring_missing():
+    with assert_warns(DeprecationWarning):
+        assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
+                     np.array([1]))
+
+
+class TestFileBased:
+
+    ldbl = 1 + LD_INFO.eps
+    tgt = np.array([ldbl]*5)
+    out = ''.join([repr(t) + '\n' for t in tgt])
+
+    def test_fromfile_bogus(self):
+        with temppath() as path:
+            with open(path, 'w') as f:
+                f.write("1. 2. 3. flop 4.\n")
+
+            with assert_warns(DeprecationWarning):
+                res = np.fromfile(path, dtype=float, sep=" ")
+        assert_equal(res, np.array([1., 2., 3.]))
+
+    def test_fromfile_complex(self):
+        for ctype in ["complex", "cdouble", "cfloat"]:
+            # Check spacing between separator and only real component specified
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1, 2 ,  3  ,4\n")
+
+                res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1., 2., 3., 4.]))
+
+            # Real component not specified
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1j, -2j,  3j, 4e1j\n")
+
+                res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
+
+            # Both components specified
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1+1j,2-2j, -3+3j,  -4e1+4j\n")
+
+                res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+
+            # Spaces at wrong places
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1+2 j,3\n")
+
+                with assert_warns(DeprecationWarning):
+                    res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1.]))
+
+            # Spaces at wrong places
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1+ 2j,3\n")
+
+                with assert_warns(DeprecationWarning):
+                    res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1.]))
+
+            # Spaces at wrong places
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1 +2j,3\n")
+
+                with assert_warns(DeprecationWarning):
+                    res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1.]))
+
+            # Spaces at wrong places
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1+j\n")
+
+                with assert_warns(DeprecationWarning):
+                    res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1.]))
+
+            # Spaces at wrong places
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1+\n")
+
+                with assert_warns(DeprecationWarning):
+                    res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1.]))
+
+            # Spaces at wrong places
+            with temppath() as path:
+                with open(path, 'w') as f:
+                    f.write("1j+1\n")
+
+                with assert_warns(DeprecationWarning):
+                    res = np.fromfile(path, dtype=ctype, sep=",")
+            assert_equal(res, np.array([1.j]))
+
+
+
+    @pytest.mark.skipif(string_to_longdouble_inaccurate,
+                        reason="Need strtold_l")
+    def test_fromfile(self):
+        with temppath() as path:
+            with open(path, 'w') as f:
+                f.write(self.out)
+            res = np.fromfile(path, dtype=np.longdouble, sep="\n")
+        assert_equal(res, self.tgt)
+
+    @pytest.mark.skipif(string_to_longdouble_inaccurate,
+                        reason="Need strtold_l")
+    def test_genfromtxt(self):
+        with temppath() as path:
+            with open(path, 'w') as f:
+                f.write(self.out)
+            res = np.genfromtxt(path, dtype=np.longdouble)
+        assert_equal(res, self.tgt)
+
+    @pytest.mark.skipif(string_to_longdouble_inaccurate,
+                        reason="Need strtold_l")
+    def test_loadtxt(self):
+        with temppath() as path:
+            with open(path, 'w') as f:
+                f.write(self.out)
+            res = np.loadtxt(path, dtype=np.longdouble)
+        assert_equal(res, self.tgt)
+
+    @pytest.mark.skipif(string_to_longdouble_inaccurate,
+                        reason="Need strtold_l")
+    def test_tofile_roundtrip(self):
+        with temppath() as path:
+            self.tgt.tofile(path, sep=" ")
+            res = np.fromfile(path, dtype=np.longdouble, sep=" ")
+        assert_equal(res, self.tgt)
+
+
+# Conversions long double -> string
+
+
+def test_repr_exact():
+    o = 1 + LD_INFO.eps
+    assert_(repr(o) != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+                    reason="Need strtold_l")
+def test_format():
+    o = 1 + LD_INFO.eps
+    assert_("{0:.40g}".format(o) != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+                    reason="Need strtold_l")
+def test_percent():
+    o = 1 + LD_INFO.eps
+    assert_("%.40g" % o != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double,
+                    reason="array repr problem")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+                    reason="Need strtold_l")
+def test_array_repr():
+    o = 1 + LD_INFO.eps
+    a = np.array([o])
+    b = np.array([1], dtype=np.longdouble)
+    if not np.all(a != b):
+        raise ValueError("precision loss creating arrays")
+    assert_(repr(a) != repr(b))
+
+#
+# Locale tests: scalar types formatting should be independent of the locale
+#
+
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
+
+    def test_repr_roundtrip_foreign(self):
+        o = 1.5
+        assert_equal(o, np.longdouble(repr(o)))
+
+    def test_fromstring_foreign_repr(self):
+        f = 1.234
+        a = np.fromstring(repr(f), dtype=float, sep=" ")
+        assert_equal(a[0], f)
+
+    def test_fromstring_best_effort_float(self):
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
+                         np.array([1.]))
+
+    def test_fromstring_best_effort(self):
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
+                         np.array([1.]))
+
+    def test_fromstring_foreign(self):
+        s = "1.234"
+        a = np.fromstring(s, dtype=np.longdouble, sep=" ")
+        assert_equal(a[0], np.longdouble(s))
+
+    def test_fromstring_foreign_sep(self):
+        a = np.array([1, 2, 3, 4])
+        b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
+        assert_array_equal(a, b)
+
+    def test_fromstring_foreign_value(self):
+        with assert_warns(DeprecationWarning):
+            b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
+            assert_array_equal(b[0], 1)
+
+
+@pytest.mark.parametrize("int_val", [
+    # cases discussed in gh-10723
+    # and gh-9968
+    2 ** 1024, 0])
+def test_longdouble_from_int(int_val):
+    # for issue gh-9968
+    str_val = str(int_val)
+    # we'll expect a RuntimeWarning on platforms
+    # with np.longdouble equivalent to np.double
+    # for large integer input
+    with warnings.catch_warnings(record=True) as w:
+        warnings.filterwarnings('always', '', RuntimeWarning)
+        # can be inf==inf on some platforms
+        assert np.longdouble(int_val) == np.longdouble(str_val)
+        # we can't directly compare the int and
+        # max longdouble value on all platforms
+        if np.allclose(np.finfo(np.longdouble).max,
+                       np.finfo(np.double).max) and w:
+            assert w[0].category is RuntimeWarning
+
+@pytest.mark.parametrize("bool_val", [
+    True, False])
+def test_longdouble_from_bool(bool_val):
+    assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
+
+
+@pytest.mark.skipif(
+    not (IS_MUSL and platform.machine() == "x86_64"),
+    reason="only need to run on musllinux_x86_64"
+)
+def test_musllinux_x86_64_signature():
+    # this test may fail if you're emulating musllinux_x86_64 on a different
+    # architecture, but should pass natively.
+    known_sigs = [b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf']
+    sig = (np.longdouble(-1.0) / np.longdouble(10.0)
+           ).newbyteorder('<').tobytes()[:10]
+    assert sig in known_sigs
+
+
+def test_eps_positive():
+    # np.finfo('g').eps should be positive on all platforms. If this isn't true
+    # then something may have gone wrong with the MachArLike, e.g. if
+    # np.core.getlimits._discovered_machar didn't work properly
+    assert np.finfo(np.longdouble).eps > 0.
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_machar.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_machar.py
new file mode 100644
index 00000000..3a66ec51
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_machar.py
@@ -0,0 +1,30 @@
+"""
+Test machar. Given recent changes to hardcode type data, we might want to get
+rid of both MachAr and this test at some point.
+
+"""
+from numpy.core._machar import MachAr
+import numpy.core.numerictypes as ntypes
+from numpy import errstate, array
+
+
+class TestMachAr:
+    def _run_machar_highprec(self):
+        # Instantiate MachAr instance with high enough precision to cause
+        # underflow
+        try:
+            hiprec = ntypes.float96
+            MachAr(lambda v: array(v, hiprec))
+        except AttributeError:
+            # Fixme, this needs to raise a 'skip' exception.
+            "Skipping test: no ntypes.float96 available on this platform."
+
+    def test_underlow(self):
+        # Regression test for #759:
+        # instantiating MachAr for dtype = np.float96 raises spurious warning.
+        with errstate(all='raise'):
+            try:
+                self._run_machar_highprec()
+            except FloatingPointError as e:
+                msg = "Caught %s exception, should not have been raised." % e
+                raise AssertionError(msg)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.py
new file mode 100644
index 00000000..1fd4c4d4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.py
@@ -0,0 +1,931 @@
+import itertools
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
+from numpy.core import _umath_tests
+from numpy.lib.stride_tricks import as_strided
+from numpy.testing import (
+    assert_, assert_raises, assert_equal, assert_array_equal
+    )
+
+
+ndims = 2
+size = 10
+shape = tuple([size] * ndims)
+
+MAY_SHARE_BOUNDS = 0
+MAY_SHARE_EXACT = -1
+
+
+def _indices_for_nelems(nelems):
+    """Returns slices of length nelems, from start onwards, in direction sign."""
+
+    if nelems == 0:
+        return [size // 2]  # int index
+
+    res = []
+    for step in (1, 2):
+        for sign in (-1, 1):
+            start = size // 2 - nelems * step * sign // 2
+            stop = start + nelems * step * sign
+            res.append(slice(start, stop, step * sign))
+
+    return res
+
+
+def _indices_for_axis():
+    """Returns (src, dst) pairs of indices."""
+
+    res = []
+    for nelems in (0, 2, 3):
+        ind = _indices_for_nelems(nelems)
+        res.extend(itertools.product(ind, ind))  # all assignments of size "nelems"
+
+    return res
+
+
+def _indices(ndims):
+    """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
+
+    ind = _indices_for_axis()
+    return itertools.product(ind, repeat=ndims)
+
+
+def _check_assignment(srcidx, dstidx):
+    """Check assignment arr[dstidx] = arr[srcidx] works."""
+
+    arr = np.arange(np.prod(shape)).reshape(shape)
+
+    cpy = arr.copy()
+
+    cpy[dstidx] = arr[srcidx]
+    arr[dstidx] = arr[srcidx]
+
+    assert_(np.all(arr == cpy),
+            'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
+
+
+def test_overlapping_assignments():
+    # Test automatically generated assignments which overlap in memory.
+
+    inds = _indices(ndims)
+
+    for ind in inds:
+        srcidx = tuple([a[0] for a in ind])
+        dstidx = tuple([a[1] for a in ind])
+
+        _check_assignment(srcidx, dstidx)
+
+
+@pytest.mark.slow
+def test_diophantine_fuzz():
+    # Fuzz test the diophantine solver
+    rng = np.random.RandomState(1234)
+
+    max_int = np.iinfo(np.intp).max
+
+    for ndim in range(10):
+        feasible_count = 0
+        infeasible_count = 0
+
+        min_count = 500//(ndim + 1)
+
+        while min(feasible_count, infeasible_count) < min_count:
+            # Ensure big and small integer problems
+            A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
+            U_max = rng.randint(0, 11, dtype=np.intp)**6
+
+            A_max = min(max_int, A_max)
+            U_max = min(max_int-1, U_max)
+
+            A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))
+                      for j in range(ndim))
+            U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))
+                      for j in range(ndim))
+
+            b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
+            b = int(rng.randint(-1, b_ub+2, dtype=np.intp))
+
+            if ndim == 0 and feasible_count < min_count:
+                b = 0
+
+            X = solve_diophantine(A, U, b)
+
+            if X is None:
+                # Check the simplified decision problem agrees
+                X_simplified = solve_diophantine(A, U, b, simplify=1)
+                assert_(X_simplified is None, (A, U, b, X_simplified))
+
+                # Check no solution exists (provided the problem is
+                # small enough so that brute force checking doesn't
+                # take too long)
+                ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U))
+
+                size = 1
+                for r in ranges:
+                    size *= len(r)
+                if size < 100000:
+                    assert_(not any(sum(w) == b for w in itertools.product(*ranges)))
+                    infeasible_count += 1
+            else:
+                # Check the simplified decision problem agrees
+                X_simplified = solve_diophantine(A, U, b, simplify=1)
+                assert_(X_simplified is not None, (A, U, b, X_simplified))
+
+                # Check validity
+                assert_(sum(a*x for a, x in zip(A, X)) == b)
+                assert_(all(0 <= x <= ub for x, ub in zip(X, U)))
+                feasible_count += 1
+
+
+def test_diophantine_overflow():
+    # Smoke test integer overflow detection
+    max_intp = np.iinfo(np.intp).max
+    max_int64 = np.iinfo(np.int64).max
+
+    if max_int64 <= max_intp:
+        # Check that the algorithm works internally in 128-bit;
+        # solving this problem requires large intermediate numbers
+        A = (max_int64//2, max_int64//2 - 10)
+        U = (max_int64//2, max_int64//2 - 10)
+        b = 2*(max_int64//2) - 10
+
+        assert_equal(solve_diophantine(A, U, b), (1, 1))
+
+
+def check_may_share_memory_exact(a, b):
+    got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
+
+    assert_equal(np.may_share_memory(a, b),
+                 np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))
+
+    a.fill(0)
+    b.fill(0)
+    a.fill(1)
+    exact = b.any()
+
+    err_msg = ""
+    if got != exact:
+        err_msg = "    " + "\n    ".join([
+            "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),
+            "shape_a = %r" % (a.shape,),
+            "shape_b = %r" % (b.shape,),
+            "strides_a = %r" % (a.strides,),
+            "strides_b = %r" % (b.strides,),
+            "size_a = %r" % (a.size,),
+            "size_b = %r" % (b.size,)
+        ])
+
+    assert_equal(got, exact, err_msg=err_msg)
+
+
+def test_may_share_memory_manual():
+    # Manual test cases for may_share_memory
+
+    # Base arrays
+    xs0 = [
+        np.zeros([13, 21, 23, 22], dtype=np.int8),
+        np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]
+    ]
+
+    # Generate all negative stride combinations
+    xs = []
+    for x in xs0:
+        for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):
+            xp = x[ss]
+            xs.append(xp)
+
+    for x in xs:
+        # The default is a simple extent check
+        assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))
+        assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))
+
+        # Exact checks
+        check_may_share_memory_exact(x[:,0,:], x[:,1,:])
+        check_may_share_memory_exact(x[:,::7], x[:,3::3])
+
+        try:
+            xp = x.ravel()
+            if xp.flags.owndata:
+                continue
+            xp = xp.view(np.int16)
+        except ValueError:
+            continue
+
+        # 0-size arrays cannot overlap
+        check_may_share_memory_exact(x.ravel()[6:6],
+                                     xp.reshape(13, 21, 23, 11)[:,::7])
+
+        # Test itemsize is dealt with
+        check_may_share_memory_exact(x[:,::7],
+                                     xp.reshape(13, 21, 23, 11))
+        check_may_share_memory_exact(x[:,::7],
+                                     xp.reshape(13, 21, 23, 11)[:,3::3])
+        check_may_share_memory_exact(x.ravel()[6:7],
+                                     xp.reshape(13, 21, 23, 11)[:,::7])
+
+    # Check unit size
+    x = np.zeros([1], dtype=np.int8)
+    check_may_share_memory_exact(x, x)
+    check_may_share_memory_exact(x, x.copy())
+
+
+def iter_random_view_pairs(x, same_steps=True, equal_size=False):
+    rng = np.random.RandomState(1234)
+
+    if equal_size and same_steps:
+        raise ValueError()
+
+    def random_slice(n, step):
+        start = rng.randint(0, n+1, dtype=np.intp)
+        stop = rng.randint(start, n+1, dtype=np.intp)
+        if rng.randint(0, 2, dtype=np.intp) == 0:
+            stop, start = start, stop
+            step *= -1
+        return slice(start, stop, step)
+
+    def random_slice_fixed_size(n, step, size):
+        start = rng.randint(0, n+1 - size*step)
+        stop = start + (size-1)*step + 1
+        if rng.randint(0, 2) == 0:
+            stop, start = start-1, stop-1
+            if stop < 0:
+                stop = None
+            step *= -1
+        return slice(start, stop, step)
+
+    # First a few regular views
+    yield x, x
+    for j in range(1, 7, 3):
+        yield x[j:], x[:-j]
+        yield x[...,j:], x[...,:-j]
+
+    # An array with zero stride internal overlap
+    strides = list(x.strides)
+    strides[0] = 0
+    xp = as_strided(x, shape=x.shape, strides=strides)
+    yield x, xp
+    yield xp, xp
+
+    # An array with non-zero stride internal overlap
+    strides = list(x.strides)
+    if strides[0] > 1:
+        strides[0] = 1
+    xp = as_strided(x, shape=x.shape, strides=strides)
+    yield x, xp
+    yield xp, xp
+
+    # Then discontiguous views
+    while True:
+        steps = tuple(rng.randint(1, 11, dtype=np.intp)
+                      if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+                      for j in range(x.ndim))
+        s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
+
+        t1 = np.arange(x.ndim)
+        rng.shuffle(t1)
+
+        if equal_size:
+            t2 = t1
+        else:
+            t2 = np.arange(x.ndim)
+            rng.shuffle(t2)
+
+        a = x[s1]
+
+        if equal_size:
+            if a.size == 0:
+                continue
+
+            steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
+                           if rng.randint(0, 5) == 0 else 1
+                           for p, s, pa in zip(x.shape, s1, a.shape))
+            s2 = tuple(random_slice_fixed_size(p, s, pa)
+                       for p, s, pa in zip(x.shape, steps2, a.shape))
+        elif same_steps:
+            steps2 = steps
+        else:
+            steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
+                           if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+                           for j in range(x.ndim))
+
+        if not equal_size:
+            s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
+
+        a = a.transpose(t1)
+        b = x[s2].transpose(t2)
+
+        yield a, b
+
+
+def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
+    # Check that overlap problems with common strides are solved with
+    # little work.
+    x = np.zeros([17,34,71,97], dtype=np.int16)
+
+    feasible = 0
+    infeasible = 0
+
+    pair_iter = iter_random_view_pairs(x, same_steps)
+
+    while min(feasible, infeasible) < min_count:
+        a, b = next(pair_iter)
+
+        bounds_overlap = np.may_share_memory(a, b)
+        may_share_answer = np.may_share_memory(a, b)
+        easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
+        exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
+
+        if easy_answer != exact_answer:
+            # assert_equal is slow...
+            assert_equal(easy_answer, exact_answer)
+
+        if may_share_answer != bounds_overlap:
+            assert_equal(may_share_answer, bounds_overlap)
+
+        if bounds_overlap:
+            if exact_answer:
+                feasible += 1
+            else:
+                infeasible += 1
+
+
+@pytest.mark.slow
+def test_may_share_memory_easy_fuzz():
+    # Check that overlap problems with common strides are always
+    # solved with little work.
+
+    check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,
+                                     same_steps=True,
+                                     min_count=2000)
+
+
+@pytest.mark.slow
+def test_may_share_memory_harder_fuzz():
+    # Overlap problems with not necessarily common strides take more
+    # work.
+    #
+    # The work bound below can't be reduced much. Harder problems can
+    # also exist but not be detected here, as the set of problems
+    # comes from RNG.
+
+    check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,
+                                     same_steps=False,
+                                     min_count=2000)
+
+
+def test_shares_memory_api():
+    x = np.zeros([4, 5, 6], dtype=np.int8)
+
+    assert_equal(np.shares_memory(x, x), True)
+    assert_equal(np.shares_memory(x, x.copy()), False)
+
+    a = x[:,::2,::3]
+    b = x[:,::3,::2]
+    assert_equal(np.shares_memory(a, b), True)
+    assert_equal(np.shares_memory(a, b, max_work=None), True)
+    assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
+
+
+def test_may_share_memory_bad_max_work():
+    x = np.zeros([1])
+    assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
+    assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
+
+
+def test_internal_overlap_diophantine():
+    def check(A, U, exists=None):
+        X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
+
+        if exists is None:
+            exists = (X is not None)
+
+        if X is not None:
+            assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))
+            assert_(all(0 <= x <= u for x, u in zip(X, U)))
+            assert_(any(x != u//2 for x, u in zip(X, U)))
+
+        if exists:
+            assert_(X is not None, repr(X))
+        else:
+            assert_(X is None, repr(X))
+
+    # Smoke tests
+    check((3, 2), (2*2, 3*2), exists=True)
+    check((3*2, 2), (15*2, (3-1)*2), exists=False)
+
+
+def test_internal_overlap_slices():
+    # Slicing an array never generates internal overlap
+
+    x = np.zeros([17,34,71,97], dtype=np.int16)
+
+    rng = np.random.RandomState(1234)
+
+    def random_slice(n, step):
+        start = rng.randint(0, n+1, dtype=np.intp)
+        stop = rng.randint(start, n+1, dtype=np.intp)
+        if rng.randint(0, 2, dtype=np.intp) == 0:
+            stop, start = start, stop
+            step *= -1
+        return slice(start, stop, step)
+
+    cases = 0
+    min_count = 5000
+
+    while cases < min_count:
+        steps = tuple(rng.randint(1, 11, dtype=np.intp)
+                      if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+                      for j in range(x.ndim))
+        t1 = np.arange(x.ndim)
+        rng.shuffle(t1)
+        s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
+        a = x[s1].transpose(t1)
+
+        assert_(not internal_overlap(a))
+        cases += 1
+
+
+def check_internal_overlap(a, manual_expected=None):
+    got = internal_overlap(a)
+
+    # Brute-force check
+    m = set()
+    ranges = tuple(range(n) for n in a.shape)
+    for v in itertools.product(*ranges):
+        offset = sum(s*w for s, w in zip(a.strides, v))
+        if offset in m:
+            expected = True
+            break
+        else:
+            m.add(offset)
+    else:
+        expected = False
+
+    # Compare
+    if got != expected:
+        assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))
+    if manual_expected is not None and expected != manual_expected:
+        assert_equal(expected, manual_expected)
+    return got
+
+
+def test_internal_overlap_manual():
+    # Stride tricks can construct arrays with internal overlap
+
+    # We don't care about memory bounds, the array is not
+    # read/write accessed
+    x = np.arange(1).astype(np.int8)
+
+    # Check low-dimensional special cases
+
+    check_internal_overlap(x, False) # 1-dim
+    check_internal_overlap(x.reshape([]), False) # 0-dim
+
+    a = as_strided(x, strides=(3, 4), shape=(4, 4))
+    check_internal_overlap(a, False)
+
+    a = as_strided(x, strides=(3, 4), shape=(5, 4))
+    check_internal_overlap(a, True)
+
+    a = as_strided(x, strides=(0,), shape=(0,))
+    check_internal_overlap(a, False)
+
+    a = as_strided(x, strides=(0,), shape=(1,))
+    check_internal_overlap(a, False)
+
+    a = as_strided(x, strides=(0,), shape=(2,))
+    check_internal_overlap(a, True)
+
+    a = as_strided(x, strides=(0, -9993), shape=(87, 22))
+    check_internal_overlap(a, True)
+
+    a = as_strided(x, strides=(0, -9993), shape=(1, 22))
+    check_internal_overlap(a, False)
+
+    a = as_strided(x, strides=(0, -9993), shape=(0, 22))
+    check_internal_overlap(a, False)
+
+
+def test_internal_overlap_fuzz():
+    # Fuzz check; the brute-force check is fairly slow
+
+    x = np.arange(1).astype(np.int8)
+
+    overlap = 0
+    no_overlap = 0
+    min_count = 100
+
+    rng = np.random.RandomState(1234)
+
+    while min(overlap, no_overlap) < min_count:
+        ndim = rng.randint(1, 4, dtype=np.intp)
+
+        strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
+                        for j in range(ndim))
+        shape = tuple(rng.randint(1, 30, dtype=np.intp)
+                      for j in range(ndim))
+
+        a = as_strided(x, strides=strides, shape=shape)
+        result = check_internal_overlap(a)
+
+        if result:
+            overlap += 1
+        else:
+            no_overlap += 1
+
+
+def test_non_ndarray_inputs():
+    # Regression check for gh-5604
+
+    class MyArray:
+        def __init__(self, data):
+            self.data = data
+
+        @property
+        def __array_interface__(self):
+            return self.data.__array_interface__
+
+    class MyArray2:
+        def __init__(self, data):
+            self.data = data
+
+        def __array__(self):
+            return self.data
+
+    for cls in [MyArray, MyArray2]:
+        x = np.arange(5)
+
+        assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
+        assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
+
+        assert_(np.shares_memory(cls(x[1::3]), x[::2]))
+        assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
+
+
+def view_element_first_byte(x):
+    """Construct an array viewing the first byte of each element of `x`"""
+    from numpy.lib.stride_tricks import DummyArray
+    interface = dict(x.__array_interface__)
+    interface['typestr'] = '|b1'
+    interface['descr'] = [('', '|b1')]
+    return np.asarray(DummyArray(interface, x))
+
+
+def assert_copy_equivalent(operation, args, out, **kwargs):
+    """
+    Check that operation(*args, out=out) produces results
+    equivalent to out[...] = operation(*args, out=out.copy())
+    """
+
+    kwargs['out'] = out
+    kwargs2 = dict(kwargs)
+    kwargs2['out'] = out.copy()
+
+    out_orig = out.copy()
+    out[...] = operation(*args, **kwargs2)
+    expected = out.copy()
+    out[...] = out_orig
+
+    got = operation(*args, **kwargs).copy()
+
+    if (got != expected).any():
+        assert_equal(got, expected)
+
+
+class TestUFunc:
+    """
+    Test ufunc call memory overlap handling
+    """
+
+    def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
+                             count=5000):
+        shapes = [7, 13, 8, 21, 29, 32]
+
+        rng = np.random.RandomState(1234)
+
+        for ndim in range(1, 6):
+            x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
+
+            it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+            min_count = count // (ndim + 1)**2
+
+            overlapping = 0
+            while overlapping < min_count:
+                a, b = next(it)
+
+                a_orig = a.copy()
+                b_orig = b.copy()
+
+                if get_out_axis_size is None:
+                    assert_copy_equivalent(operation, [a], out=b)
+
+                    if np.shares_memory(a, b):
+                        overlapping += 1
+                else:
+                    for axis in itertools.chain(range(ndim), [None]):
+                        a[...] = a_orig
+                        b[...] = b_orig
+
+                        # Determine size for reduction axis (None if scalar)
+                        outsize, scalarize = get_out_axis_size(a, b, axis)
+                        if outsize == 'skip':
+                            continue
+
+                        # Slice b to get an output array of the correct size
+                        sl = [slice(None)] * ndim
+                        if axis is None:
+                            if outsize is None:
+                                sl = [slice(0, 1)] + [0]*(ndim - 1)
+                            else:
+                                sl = [slice(0, outsize)] + [0]*(ndim - 1)
+                        else:
+                            if outsize is None:
+                                k = b.shape[axis]//2
+                                if ndim == 1:
+                                    sl[axis] = slice(k, k + 1)
+                                else:
+                                    sl[axis] = k
+                            else:
+                                assert b.shape[axis] >= outsize
+                                sl[axis] = slice(0, outsize)
+                        b_out = b[tuple(sl)]
+
+                        if scalarize:
+                            b_out = b_out.reshape([])
+
+                        if np.shares_memory(a, b_out):
+                            overlapping += 1
+
+                        # Check result
+                        assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
+
+    @pytest.mark.slow
+    def test_unary_ufunc_call_fuzz(self):
+        self.check_unary_fuzz(np.invert, None, np.int16)
+
+    @pytest.mark.slow
+    def test_unary_ufunc_call_complex_fuzz(self):
+        # Complex typically has a smaller alignment than itemsize
+        self.check_unary_fuzz(np.negative, None, np.complex128, count=500)
+
+    def test_binary_ufunc_accumulate_fuzz(self):
+        def get_out_axis_size(a, b, axis):
+            if axis is None:
+                if a.ndim == 1:
+                    return a.size, False
+                else:
+                    return 'skip', False  # accumulate doesn't support this
+            else:
+                return a.shape[axis], False
+
+        self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
+                              dtype=np.int16, count=500)
+
+    def test_binary_ufunc_reduce_fuzz(self):
+        def get_out_axis_size(a, b, axis):
+            return None, (axis is None or a.ndim == 1)
+
+        self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
+                              dtype=np.int16, count=500)
+
+    def test_binary_ufunc_reduceat_fuzz(self):
+        def get_out_axis_size(a, b, axis):
+            if axis is None:
+                if a.ndim == 1:
+                    return a.size, False
+                else:
+                    return 'skip', False  # reduceat doesn't support this
+            else:
+                return a.shape[axis], False
+
+        def do_reduceat(a, out, axis):
+            if axis is None:
+                size = len(a)
+                step = size//len(out)
+            else:
+                size = a.shape[axis]
+                step = a.shape[axis] // out.shape[axis]
+            idx = np.arange(0, size, step)
+            return np.add.reduceat(a, idx, out=out, axis=axis)
+
+        self.check_unary_fuzz(do_reduceat, get_out_axis_size,
+                              dtype=np.int16, count=500)
+
+    def test_binary_ufunc_reduceat_manual(self):
+        def check(ufunc, a, ind, out):
+            c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
+            c2 = ufunc.reduceat(a, ind, out=out)
+            assert_array_equal(c1, c2)
+
+        # Exactly same input/output arrays
+        a = np.arange(10000, dtype=np.int16)
+        check(np.add, a, a[::-1].copy(), a)
+
+        # Overlap with index
+        a = np.arange(10000, dtype=np.int16)
+        check(np.add, a, a[::-1], a)
+
+    @pytest.mark.slow
+    def test_unary_gufunc_fuzz(self):
+        shapes = [7, 13, 8, 21, 29, 32]
+        gufunc = _umath_tests.euclidean_pdist
+
+        rng = np.random.RandomState(1234)
+
+        for ndim in range(2, 6):
+            x = rng.rand(*shapes[:ndim])
+
+            it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+            min_count = 500 // (ndim + 1)**2
+
+            overlapping = 0
+            while overlapping < min_count:
+                a, b = next(it)
+
+                if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
+                    continue
+
+                # Ensure the shapes are so that euclidean_pdist is happy
+                if b.shape[-1] > b.shape[-2]:
+                    b = b[...,0,:]
+                else:
+                    b = b[...,:,0]
+
+                n = a.shape[-2]
+                p = n * (n - 1) // 2
+                if p <= b.shape[-1] and p > 0:
+                    b = b[...,:p]
+                else:
+                    n = max(2, int(np.sqrt(b.shape[-1]))//2)
+                    p = n * (n - 1) // 2
+                    a = a[...,:n,:]
+                    b = b[...,:p]
+
+                # Call
+                if np.shares_memory(a, b):
+                    overlapping += 1
+
+                with np.errstate(over='ignore', invalid='ignore'):
+                    assert_copy_equivalent(gufunc, [a], out=b)
+
+    def test_ufunc_at_manual(self):
+        def check(ufunc, a, ind, b=None):
+            a0 = a.copy()
+            if b is None:
+                ufunc.at(a0, ind.copy())
+                c1 = a0.copy()
+                ufunc.at(a, ind)
+                c2 = a.copy()
+            else:
+                ufunc.at(a0, ind.copy(), b.copy())
+                c1 = a0.copy()
+                ufunc.at(a, ind, b)
+                c2 = a.copy()
+            assert_array_equal(c1, c2)
+
+        # Overlap with index
+        a = np.arange(10000, dtype=np.int16)
+        check(np.invert, a[::-1], a)
+
+        # Overlap with second data array
+        a = np.arange(100, dtype=np.int16)
+        ind = np.arange(0, 100, 2, dtype=np.int16)
+        check(np.add, a, ind, a[25:75])
+
+    def test_unary_ufunc_1d_manual(self):
+        # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`)
+
+        def check(a, b):
+            a_orig = a.copy()
+            b_orig = b.copy()
+
+            b0 = b.copy()
+            c1 = ufunc(a, out=b0)
+            c2 = ufunc(a, out=b)
+            assert_array_equal(c1, c2)
+
+            # Trigger "fancy ufunc loop" code path
+            mask = view_element_first_byte(b).view(np.bool_)
+
+            a[...] = a_orig
+            b[...] = b_orig
+            c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
+
+            a[...] = a_orig
+            b[...] = b_orig
+            c2 = ufunc(a, out=b, where=mask.copy()).copy()
+
+            # Also, mask overlapping with output
+            a[...] = a_orig
+            b[...] = b_orig
+            c3 = ufunc(a, out=b, where=mask).copy()
+
+            assert_array_equal(c1, c2)
+            assert_array_equal(c1, c3)
+
+        dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
+                  np.float64, np.complex64, np.complex128]
+        dtypes = [np.dtype(x) for x in dtypes]
+
+        for dtype in dtypes:
+            if np.issubdtype(dtype, np.integer):
+                ufunc = np.invert
+            else:
+                ufunc = np.reciprocal
+
+            n = 1000
+            k = 10
+            indices = [
+                np.index_exp[:n],
+                np.index_exp[k:k+n],
+                np.index_exp[n-1::-1],
+                np.index_exp[k+n-1:k-1:-1],
+                np.index_exp[:2*n:2],
+                np.index_exp[k:k+2*n:2],
+                np.index_exp[2*n-1::-2],
+                np.index_exp[k+2*n-1:k-1:-2],
+            ]
+
+            for xi, yi in itertools.product(indices, indices):
+                v = np.arange(1, 1 + n*2 + k, dtype=dtype)
+                x = v[xi]
+                y = v[yi]
+
+                with np.errstate(all='ignore'):
+                    check(x, y)
+
+                    # Scalar cases
+                    check(x[:1], y)
+                    check(x[-1:], y)
+                    check(x[:1].reshape([]), y)
+                    check(x[-1:].reshape([]), y)
+
+    def test_unary_ufunc_where_same(self):
+        # Check behavior at wheremask overlap
+        ufunc = np.invert
+
+        def check(a, out, mask):
+            c1 = ufunc(a, out=out.copy(), where=mask.copy())
+            c2 = ufunc(a, out=out, where=mask)
+            assert_array_equal(c1, c2)
+
+        # Check behavior with same input and output arrays
+        x = np.arange(100).astype(np.bool_)
+        check(x, x, x)
+        check(x, x.copy(), x)
+        check(x, x, x.copy())
+
+    @pytest.mark.slow
+    def test_binary_ufunc_1d_manual(self):
+        ufunc = np.add
+
+        def check(a, b, c):
+            c0 = c.copy()
+            c1 = ufunc(a, b, out=c0)
+            c2 = ufunc(a, b, out=c)
+            assert_array_equal(c1, c2)
+
+        for dtype in [np.int8, np.int16, np.int32, np.int64,
+                      np.float32, np.float64, np.complex64, np.complex128]:
+            # Check different data dependency orders
+
+            n = 1000
+            k = 10
+
+            indices = []
+            for p in [1, 2]:
+                indices.extend([
+                    np.index_exp[:p*n:p],
+                    np.index_exp[k:k+p*n:p],
+                    np.index_exp[p*n-1::-p],
+                    np.index_exp[k+p*n-1:k-1:-p],
+                ])
+
+            for x, y, z in itertools.product(indices, indices, indices):
+                v = np.arange(6*n).astype(dtype)
+                x = v[x]
+                y = v[y]
+                z = v[z]
+
+                check(x, y, z)
+
+                # Scalar cases
+                check(x[:1], y, z)
+                check(x[-1:], y, z)
+                check(x[:1].reshape([]), y, z)
+                check(x[-1:].reshape([]), y, z)
+                check(x, y[:1], z)
+                check(x, y[-1:], z)
+                check(x, y[:1].reshape([]), z)
+                check(x, y[-1:].reshape([]), z)
+
+    def test_inplace_op_simple_manual(self):
+        rng = np.random.RandomState(1234)
+        x = rng.rand(200, 200)  # bigger than bufsize
+
+        x += x.T
+        assert_array_equal(x - x.T, 0)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.py
new file mode 100644
index 00000000..a381fa1d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.py
@@ -0,0 +1,443 @@
+import asyncio
+import gc
+import os
+import pytest
+import numpy as np
+import threading
+import warnings
+from numpy.testing import extbuild, assert_warns, IS_WASM
+import sys
+
+
+# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on
+# Python 3.12 and up. It's an internal test utility, so for now we just skip
+# these tests.
+
+
+@pytest.fixture
+def get_module(tmp_path):
+    """ Add a memory policy that returns a false pointer 64 bytes into the
+    actual allocation, and fill the prefix with some text. Then check at each
+    memory manipulation that the prefix exists, to make sure all alloc/realloc/
+    free/calloc go via the functions here.
+    """
+    if sys.platform.startswith('cygwin'):
+        pytest.skip('link fails on cygwin')
+    if IS_WASM:
+        pytest.skip("Can't build module inside Wasm")
+    functions = [
+        ("get_default_policy", "METH_NOARGS", """
+             Py_INCREF(PyDataMem_DefaultHandler);
+             return PyDataMem_DefaultHandler;
+         """),
+        ("set_secret_data_policy", "METH_NOARGS", """
+             PyObject *secret_data =
+                 PyCapsule_New(&secret_data_handler, "mem_handler", NULL);
+             if (secret_data == NULL) {
+                 return NULL;
+             }
+             PyObject *old = PyDataMem_SetHandler(secret_data);
+             Py_DECREF(secret_data);
+             return old;
+         """),
+        ("set_old_policy", "METH_O", """
+             PyObject *old;
+             if (args != NULL && PyCapsule_CheckExact(args)) {
+                 old = PyDataMem_SetHandler(args);
+             }
+             else {
+                 old = PyDataMem_SetHandler(NULL);
+             }
+             return old;
+         """),
+        ("get_array", "METH_NOARGS", """
+            char *buf = (char *)malloc(20);
+            npy_intp dims[1];
+            dims[0] = 20;
+            PyArray_Descr *descr =  PyArray_DescrNewFromType(NPY_UINT8);
+            return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL,
+                                        buf, NPY_ARRAY_WRITEABLE, NULL);
+         """),
+        ("set_own", "METH_O", """
+            if (!PyArray_Check(args)) {
+                PyErr_SetString(PyExc_ValueError,
+                             "need an ndarray");
+                return NULL;
+            }
+            PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA);
+            // Maybe try this too?
+            // PyArray_BASE(PyArrayObject *)args) = NULL;
+            Py_RETURN_NONE;
+         """),
+        ("get_array_with_base", "METH_NOARGS", """
+            char *buf = (char *)malloc(20);
+            npy_intp dims[1];
+            dims[0] = 20;
+            PyArray_Descr *descr =  PyArray_DescrNewFromType(NPY_UINT8);
+            PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims,
+                                                 NULL, buf,
+                                                 NPY_ARRAY_WRITEABLE, NULL);
+            if (arr == NULL) return NULL;
+            PyObject *obj = PyCapsule_New(buf, "buf capsule",
+                                          (PyCapsule_Destructor)&warn_on_free);
+            if (obj == NULL) {
+                Py_DECREF(arr);
+                return NULL;
+            }
+            if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) {
+                Py_DECREF(arr);
+                Py_DECREF(obj);
+                return NULL;
+            }
+            return arr;
+
+         """),
+    ]
+    prologue = '''
+        #define NPY_TARGET_VERSION NPY_1_22_API_VERSION
+        #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+        #include 
+        /*
+         * This struct allows the dynamic configuration of the allocator funcs
+         * of the `secret_data_allocator`. It is provided here for
+         * demonstration purposes, as a valid `ctx` use-case scenario.
+         */
+        typedef struct {
+            void *(*malloc)(size_t);
+            void *(*calloc)(size_t, size_t);
+            void *(*realloc)(void *, size_t);
+            void (*free)(void *);
+        } SecretDataAllocatorFuncs;
+
+        NPY_NO_EXPORT void *
+        shift_alloc(void *ctx, size_t sz) {
+            SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+            char *real = (char *)funcs->malloc(sz + 64);
+            if (real == NULL) {
+                return NULL;
+            }
+            snprintf(real, 64, "originally allocated %ld", (unsigned long)sz);
+            return (void *)(real + 64);
+        }
+        NPY_NO_EXPORT void *
+        shift_zero(void *ctx, size_t sz, size_t cnt) {
+            SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+            char *real = (char *)funcs->calloc(sz + 64, cnt);
+            if (real == NULL) {
+                return NULL;
+            }
+            snprintf(real, 64, "originally allocated %ld via zero",
+                     (unsigned long)sz);
+            return (void *)(real + 64);
+        }
+        NPY_NO_EXPORT void
+        shift_free(void *ctx, void * p, npy_uintp sz) {
+            SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+            if (p == NULL) {
+                return ;
+            }
+            char *real = (char *)p - 64;
+            if (strncmp(real, "originally allocated", 20) != 0) {
+                fprintf(stdout, "uh-oh, unmatched shift_free, "
+                        "no appropriate prefix\\n");
+                /* Make C runtime crash by calling free on the wrong address */
+                funcs->free((char *)p + 10);
+                /* funcs->free(real); */
+            }
+            else {
+                npy_uintp i = (npy_uintp)atoi(real +20);
+                if (i != sz) {
+                    fprintf(stderr, "uh-oh, unmatched shift_free"
+                            "(ptr, %ld) but allocated %ld\\n", sz, i);
+                    /* This happens in some places, only print */
+                    funcs->free(real);
+                }
+                else {
+                    funcs->free(real);
+                }
+            }
+        }
+        NPY_NO_EXPORT void *
+        shift_realloc(void *ctx, void * p, npy_uintp sz) {
+            SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+            if (p != NULL) {
+                char *real = (char *)p - 64;
+                if (strncmp(real, "originally allocated", 20) != 0) {
+                    fprintf(stdout, "uh-oh, unmatched shift_realloc\\n");
+                    return realloc(p, sz);
+                }
+                return (void *)((char *)funcs->realloc(real, sz + 64) + 64);
+            }
+            else {
+                char *real = (char *)funcs->realloc(p, sz + 64);
+                if (real == NULL) {
+                    return NULL;
+                }
+                snprintf(real, 64, "originally allocated "
+                         "%ld  via realloc", (unsigned long)sz);
+                return (void *)(real + 64);
+            }
+        }
+        /* As an example, we use the standard {m|c|re}alloc/free funcs. */
+        static SecretDataAllocatorFuncs secret_data_handler_ctx = {
+            malloc,
+            calloc,
+            realloc,
+            free
+        };
+        static PyDataMem_Handler secret_data_handler = {
+            "secret_data_allocator",
+            1,
+            {
+                &secret_data_handler_ctx, /* ctx */
+                shift_alloc,              /* malloc */
+                shift_zero,               /* calloc */
+                shift_realloc,            /* realloc */
+                shift_free                /* free */
+            }
+        };
+        void warn_on_free(void *capsule) {
+            PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1);
+            void * obj = PyCapsule_GetPointer(capsule,
+                                              PyCapsule_GetName(capsule));
+            free(obj);
+        };
+        '''
+    more_init = "import_array();"
+    try:
+        import mem_policy
+        return mem_policy
+    except ImportError:
+        pass
+    # if it does not exist, build and load it
+    return extbuild.build_and_import_extension('mem_policy',
+                                               functions,
+                                               prologue=prologue,
+                                               include_dirs=[np.get_include()],
+                                               build_dir=tmp_path,
+                                               more_init=more_init)
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+def test_set_policy(get_module):
+
+    get_handler_name = np.core.multiarray.get_handler_name
+    get_handler_version = np.core.multiarray.get_handler_version
+    orig_policy_name = get_handler_name()
+
+    a = np.arange(10).reshape((2, 5))  # a doesn't own its own data
+    assert get_handler_name(a) is None
+    assert get_handler_version(a) is None
+    assert get_handler_name(a.base) == orig_policy_name
+    assert get_handler_version(a.base) == 1
+
+    orig_policy = get_module.set_secret_data_policy()
+
+    b = np.arange(10).reshape((2, 5))  # b doesn't own its own data
+    assert get_handler_name(b) is None
+    assert get_handler_version(b) is None
+    assert get_handler_name(b.base) == 'secret_data_allocator'
+    assert get_handler_version(b.base) == 1
+
+    if orig_policy_name == 'default_allocator':
+        get_module.set_old_policy(None)  # tests PyDataMem_SetHandler(NULL)
+        assert get_handler_name() == 'default_allocator'
+    else:
+        get_module.set_old_policy(orig_policy)
+        assert get_handler_name() == orig_policy_name
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+def test_default_policy_singleton(get_module):
+    get_handler_name = np.core.multiarray.get_handler_name
+
+    # set the policy to default
+    orig_policy = get_module.set_old_policy(None)
+
+    assert get_handler_name() == 'default_allocator'
+
+    # re-set the policy to default
+    def_policy_1 = get_module.set_old_policy(None)
+
+    assert get_handler_name() == 'default_allocator'
+
+    # set the policy to original
+    def_policy_2 = get_module.set_old_policy(orig_policy)
+
+    # since default policy is a singleton,
+    # these should be the same object
+    assert def_policy_1 is def_policy_2 is get_module.get_default_policy()
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+def test_policy_propagation(get_module):
+    # The memory policy goes hand-in-hand with flags.owndata
+
+    class MyArr(np.ndarray):
+        pass
+
+    get_handler_name = np.core.multiarray.get_handler_name
+    orig_policy_name = get_handler_name()
+    a = np.arange(10).view(MyArr).reshape((2, 5))
+    assert get_handler_name(a) is None
+    assert a.flags.owndata is False
+
+    assert get_handler_name(a.base) is None
+    assert a.base.flags.owndata is False
+
+    assert get_handler_name(a.base.base) == orig_policy_name
+    assert a.base.base.flags.owndata is True
+
+
+async def concurrent_context1(get_module, orig_policy_name, event):
+    if orig_policy_name == 'default_allocator':
+        get_module.set_secret_data_policy()
+        assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
+    else:
+        get_module.set_old_policy(None)
+        assert np.core.multiarray.get_handler_name() == 'default_allocator'
+    event.set()
+
+
+async def concurrent_context2(get_module, orig_policy_name, event):
+    await event.wait()
+    # the policy is not affected by changes in parallel contexts
+    assert np.core.multiarray.get_handler_name() == orig_policy_name
+    # change policy in the child context
+    if orig_policy_name == 'default_allocator':
+        get_module.set_secret_data_policy()
+        assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
+    else:
+        get_module.set_old_policy(None)
+        assert np.core.multiarray.get_handler_name() == 'default_allocator'
+
+
+async def async_test_context_locality(get_module):
+    orig_policy_name = np.core.multiarray.get_handler_name()
+
+    event = asyncio.Event()
+    # the child contexts inherit the parent policy
+    concurrent_task1 = asyncio.create_task(
+        concurrent_context1(get_module, orig_policy_name, event))
+    concurrent_task2 = asyncio.create_task(
+        concurrent_context2(get_module, orig_policy_name, event))
+    await concurrent_task1
+    await concurrent_task2
+
+    # the parent context is not affected by child policy changes
+    assert np.core.multiarray.get_handler_name() == orig_policy_name
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+def test_context_locality(get_module):
+    if (sys.implementation.name == 'pypy'
+            and sys.pypy_version_info[:3] < (7, 3, 6)):
+        pytest.skip('no context-locality support in PyPy < 7.3.6')
+    asyncio.run(async_test_context_locality(get_module))
+
+
+def concurrent_thread1(get_module, event):
+    get_module.set_secret_data_policy()
+    assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
+    event.set()
+
+
+def concurrent_thread2(get_module, event):
+    event.wait()
+    # the policy is not affected by changes in parallel threads
+    assert np.core.multiarray.get_handler_name() == 'default_allocator'
+    # change policy in the child thread
+    get_module.set_secret_data_policy()
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+def test_thread_locality(get_module):
+    orig_policy_name = np.core.multiarray.get_handler_name()
+
+    event = threading.Event()
+    # the child threads do not inherit the parent policy
+    concurrent_task1 = threading.Thread(target=concurrent_thread1,
+                                        args=(get_module, event))
+    concurrent_task2 = threading.Thread(target=concurrent_thread2,
+                                        args=(get_module, event))
+    concurrent_task1.start()
+    concurrent_task2.start()
+    concurrent_task1.join()
+    concurrent_task2.join()
+
+    # the parent thread is not affected by child policy changes
+    assert np.core.multiarray.get_handler_name() == orig_policy_name
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+@pytest.mark.skip(reason="too slow, see gh-23975")
+def test_new_policy(get_module):
+    a = np.arange(10)
+    orig_policy_name = np.core.multiarray.get_handler_name(a)
+
+    orig_policy = get_module.set_secret_data_policy()
+
+    b = np.arange(10)
+    assert np.core.multiarray.get_handler_name(b) == 'secret_data_allocator'
+
+    # test array manipulation. This is slow
+    if orig_policy_name == 'default_allocator':
+        # when the np.core.test tests recurse into this test, the
+        # policy will be set so this "if" will be false, preventing
+        # infinite recursion
+        #
+        # if needed, debug this by
+        # - running tests with -- -s (to not capture stdout/stderr
+        # - setting verbose=2
+        # - setting extra_argv=['-vv'] here
+        assert np.core.test('full', verbose=1, extra_argv=[])
+        # also try the ma tests, the pickling test is quite tricky
+        assert np.ma.test('full', verbose=1, extra_argv=[])
+
+    get_module.set_old_policy(orig_policy)
+
+    c = np.arange(10)
+    assert np.core.multiarray.get_handler_name(c) == orig_policy_name
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+@pytest.mark.xfail(sys.implementation.name == "pypy",
+                   reason=("bad interaction between getenv and "
+                           "os.environ inside pytest"))
+@pytest.mark.parametrize("policy", ["0", "1", None])
+def test_switch_owner(get_module, policy):
+    a = get_module.get_array()
+    assert np.core.multiarray.get_handler_name(a) is None
+    get_module.set_own(a)
+
+    if policy is None:
+        # See what we expect to be set based on the env variable
+        policy = os.getenv("NUMPY_WARN_IF_NO_MEM_POLICY", "0") == "1"
+        oldval = None
+    else:
+        policy = policy == "1"
+        oldval = np.core._multiarray_umath._set_numpy_warn_if_no_mem_policy(
+            policy)
+    try:
+        # The policy should be NULL, so we have to assume we can call
+        # "free".  A warning is given if the policy == "1"
+        if policy:
+            with assert_warns(RuntimeWarning) as w:
+                del a
+                gc.collect()
+        else:
+            del a
+            gc.collect()
+
+    finally:
+        if oldval is not None:
+            np.core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval)
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
+def test_owner_is_base(get_module):
+    a = get_module.get_array_with_base()
+    with pytest.warns(UserWarning, match='warn_on_free'):
+        del a
+        gc.collect()
+        gc.collect()
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_memmap.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_memmap.py
new file mode 100644
index 00000000..ad074b31
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_memmap.py
@@ -0,0 +1,215 @@
+import sys
+import os
+import mmap
+import pytest
+from pathlib import Path
+from tempfile import NamedTemporaryFile, TemporaryFile
+
+from numpy import (
+    memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply)
+
+from numpy import arange, allclose, asarray
+from numpy.testing import (
+    assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
+    break_cycles
+    )
+
+class TestMemmap:
+    def setup_method(self):
+        self.tmpfp = NamedTemporaryFile(prefix='mmap')
+        self.shape = (3, 4)
+        self.dtype = 'float32'
+        self.data = arange(12, dtype=self.dtype)
+        self.data.resize(self.shape)
+
+    def teardown_method(self):
+        self.tmpfp.close()
+        self.data = None
+        if IS_PYPY:
+            break_cycles()
+            break_cycles()
+
+    def test_roundtrip(self):
+        # Write data to file
+        fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+                    shape=self.shape)
+        fp[:] = self.data[:]
+        del fp  # Test __del__ machinery, which handles cleanup
+
+        # Read data back from file
+        newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
+                       shape=self.shape)
+        assert_(allclose(self.data, newfp))
+        assert_array_equal(self.data, newfp)
+        assert_equal(newfp.flags.writeable, False)
+
+    def test_open_with_filename(self, tmp_path):
+        tmpname = tmp_path / 'mmap'
+        fp = memmap(tmpname, dtype=self.dtype, mode='w+',
+                       shape=self.shape)
+        fp[:] = self.data[:]
+        del fp
+
+    def test_unnamed_file(self):
+        with TemporaryFile() as f:
+            fp = memmap(f, dtype=self.dtype, shape=self.shape)
+            del fp
+
+    def test_attributes(self):
+        offset = 1
+        mode = "w+"
+        fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
+                    shape=self.shape, offset=offset)
+        assert_equal(offset, fp.offset)
+        assert_equal(mode, fp.mode)
+        del fp
+
+    def test_filename(self, tmp_path):
+        tmpname = tmp_path / "mmap"
+        fp = memmap(tmpname, dtype=self.dtype, mode='w+',
+                       shape=self.shape)
+        abspath = Path(os.path.abspath(tmpname))
+        fp[:] = self.data[:]
+        assert_equal(abspath, fp.filename)
+        b = fp[:1]
+        assert_equal(abspath, b.filename)
+        del b
+        del fp
+
+    def test_path(self, tmp_path):
+        tmpname = tmp_path / "mmap"
+        fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
+                       shape=self.shape)
+        # os.path.realpath does not resolve symlinks on Windows
+        # see: https://bugs.python.org/issue9949
+        # use Path.resolve, just as memmap class does internally
+        abspath = str(Path(tmpname).resolve())
+        fp[:] = self.data[:]
+        assert_equal(abspath, str(fp.filename.resolve()))
+        b = fp[:1]
+        assert_equal(abspath, str(b.filename.resolve()))
+        del b
+        del fp
+
+    def test_filename_fileobj(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
+                    shape=self.shape)
+        assert_equal(fp.filename, self.tmpfp.name)
+
+    @pytest.mark.skipif(sys.platform == 'gnu0',
+                        reason="Known to fail on hurd")
+    def test_flush(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+                    shape=self.shape)
+        fp[:] = self.data[:]
+        assert_equal(fp[0], self.data[0])
+        fp.flush()
+
+    def test_del(self):
+        # Make sure a view does not delete the underlying mmap
+        fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+                    shape=self.shape)
+        fp_base[0] = 5
+        fp_view = fp_base[0:1]
+        assert_equal(fp_view[0], 5)
+        del fp_view
+        # Should still be able to access and assign values after
+        # deleting the view
+        assert_equal(fp_base[0], 5)
+        fp_base[0] = 6
+        assert_equal(fp_base[0], 6)
+
+    def test_arithmetic_drops_references(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+                    shape=self.shape)
+        tmp = (fp + 10)
+        if isinstance(tmp, memmap):
+            assert_(tmp._mmap is not fp._mmap)
+
+    def test_indexing_drops_references(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+                    shape=self.shape)
+        tmp = fp[(1, 2), (2, 3)]
+        if isinstance(tmp, memmap):
+            assert_(tmp._mmap is not fp._mmap)
+
+    def test_slicing_keeps_references(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+                    shape=self.shape)
+        assert_(fp[:2, :2]._mmap is fp._mmap)
+
+    def test_view(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+        new1 = fp.view()
+        new2 = new1.view()
+        assert_(new1.base is fp)
+        assert_(new2.base is fp)
+        new_array = asarray(fp)
+        assert_(new_array.base is fp)
+
+    def test_ufunc_return_ndarray(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+        fp[:] = self.data
+
+        with suppress_warnings() as sup:
+            sup.filter(FutureWarning, "np.average currently does not preserve")
+            for unary_op in [sum, average, prod]:
+                result = unary_op(fp)
+                assert_(isscalar(result))
+                assert_(result.__class__ is self.data[0, 0].__class__)
+
+                assert_(unary_op(fp, axis=0).__class__ is ndarray)
+                assert_(unary_op(fp, axis=1).__class__ is ndarray)
+
+        for binary_op in [add, subtract, multiply]:
+            assert_(binary_op(fp, self.data).__class__ is ndarray)
+            assert_(binary_op(self.data, fp).__class__ is ndarray)
+            assert_(binary_op(fp, fp).__class__ is ndarray)
+
+        fp += 1
+        assert(fp.__class__ is memmap)
+        add(fp, 1, out=fp)
+        assert(fp.__class__ is memmap)
+
+    def test_getitem(self):
+        fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+        fp[:] = self.data
+
+        assert_(fp[1:, :-1].__class__ is memmap)
+        # Fancy indexing returns a copy that is not memmapped
+        assert_(fp[[0, 1]].__class__ is ndarray)
+
+    def test_memmap_subclass(self):
+        class MemmapSubClass(memmap):
+            pass
+
+        fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
+        fp[:] = self.data
+
+        # We keep previous behavior for subclasses of memmap, i.e. the
+        # ufunc and __getitem__ output is never turned into a ndarray
+        assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
+        assert_(sum(fp).__class__ is MemmapSubClass)
+        assert_(fp[1:, :-1].__class__ is MemmapSubClass)
+        assert(fp[[0, 1]].__class__ is MemmapSubClass)
+
+    def test_mmap_offset_greater_than_allocation_granularity(self):
+        size = 5 * mmap.ALLOCATIONGRANULARITY
+        offset = mmap.ALLOCATIONGRANULARITY + 1
+        fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
+        assert_(fp.offset == offset)
+
+    def test_no_shape(self):
+        self.tmpfp.write(b'a'*16)
+        mm = memmap(self.tmpfp, dtype='float64')
+        assert_equal(mm.shape, (2,))
+
+    def test_empty_array(self):
+        # gh-12653
+        with pytest.raises(ValueError, match='empty file'):
+            memmap(self.tmpfp, shape=(0,4), mode='w+')
+
+        self.tmpfp.write(b'\0')
+
+        # ok now the file is not empty
+        memmap(self.tmpfp, shape=(0,4), mode='w+')
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.py
new file mode 100644
index 00000000..ace40049
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.py
@@ -0,0 +1,10054 @@
+from __future__ import annotations
+
+import collections.abc
+import tempfile
+import sys
+import warnings
+import operator
+import io
+import itertools
+import functools
+import ctypes
+import os
+import gc
+import re
+import weakref
+import pytest
+from contextlib import contextmanager
+
+from numpy.compat import pickle
+
+import pathlib
+import builtins
+from decimal import Decimal
+import mmap
+
+import numpy as np
+import numpy.core._multiarray_tests as _multiarray_tests
+from numpy.core._rational_tests import rational
+from numpy.testing import (
+    assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
+    assert_array_equal, assert_raises_regex, assert_array_almost_equal,
+    assert_allclose, IS_PYPY, IS_PYSTON, HAS_REFCOUNT, assert_array_less,
+    runstring, temppath, suppress_warnings, break_cycles, _SUPPORTS_SVE,
+    )
+from numpy.testing._private.utils import requires_memory, _no_tracing
+from numpy.core.tests._locales import CommaDecimalPointLocale
+from numpy.lib.recfunctions import repack_fields
+from numpy.core.multiarray import _get_ndarray_c_version
+
+# Need to test an object that does not fully implement math interface
+from datetime import timedelta, datetime
+
+
+def assert_arg_sorted(arr, arg):
+    # resulting array should be sorted and arg values should be unique
+    assert_equal(arr[arg], np.sort(arr))
+    assert_equal(np.sort(arg), np.arange(len(arg)))
+
+
+def _aligned_zeros(shape, dtype=float, order="C", align=None):
+    """
+    Allocate a new ndarray with aligned memory.
+
+    The ndarray is guaranteed *not* aligned to twice the requested alignment.
+    Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
+    dtype.alignment."""
+    dtype = np.dtype(dtype)
+    if dtype == np.dtype(object):
+        # Can't do this, fall back to standard allocation (which
+        # should always be sufficiently aligned)
+        if align is not None:
+            raise ValueError("object array alignment not supported")
+        return np.zeros(shape, dtype=dtype, order=order)
+    if align is None:
+        align = dtype.alignment
+    if not hasattr(shape, '__len__'):
+        shape = (shape,)
+    size = functools.reduce(operator.mul, shape) * dtype.itemsize
+    buf = np.empty(size + 2*align + 1, np.uint8)
+
+    ptr = buf.__array_interface__['data'][0]
+    offset = ptr % align
+    if offset != 0:
+        offset = align - offset
+    if (ptr % (2*align)) == 0:
+        offset += align
+
+    # Note: slices producing 0-size arrays do not necessarily change
+    # data pointer --- so we use and allocate size+1
+    buf = buf[offset:offset+size+1][:-1]
+    buf.fill(0)
+    data = np.ndarray(shape, dtype, buf, order=order)
+    return data
+
+
+class TestFlags:
+    def setup_method(self):
+        self.a = np.arange(10)
+
+    def test_writeable(self):
+        mydict = locals()
+        self.a.flags.writeable = False
+        assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
+        assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
+        self.a.flags.writeable = True
+        self.a[0] = 5
+        self.a[0] = 0
+
+    def test_writeable_any_base(self):
+        # Ensure that any base being writeable is sufficient to change flag;
+        # this is especially interesting for arrays from an array interface.
+        arr = np.arange(10)
+
+        class subclass(np.ndarray):
+            pass
+
+        # Create subclass so base will not be collapsed, this is OK to change
+        view1 = arr.view(subclass)
+        view2 = view1[...]
+        arr.flags.writeable = False
+        view2.flags.writeable = False
+        view2.flags.writeable = True  # Can be set to True again.
+
+        arr = np.arange(10)
+
+        class frominterface:
+            def __init__(self, arr):
+                self.arr = arr
+                self.__array_interface__ = arr.__array_interface__
+
+        view1 = np.asarray(frominterface)
+        view2 = view1[...]
+        view2.flags.writeable = False
+        view2.flags.writeable = True
+
+        view1.flags.writeable = False
+        view2.flags.writeable = False
+        with assert_raises(ValueError):
+            # Must assume not writeable, since only base is not:
+            view2.flags.writeable = True
+
+    def test_writeable_from_readonly(self):
+        # gh-9440 - make sure fromstring, from buffer on readonly buffers
+        # set writeable False
+        data = b'\x00' * 100
+        vals = np.frombuffer(data, 'B')
+        assert_raises(ValueError, vals.setflags, write=True)
+        types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+        values = np.core.records.fromstring(data, types)
+        vals = values['vals']
+        assert_raises(ValueError, vals.setflags, write=True)
+
+    def test_writeable_from_buffer(self):
+        data = bytearray(b'\x00' * 100)
+        vals = np.frombuffer(data, 'B')
+        assert_(vals.flags.writeable)
+        vals.setflags(write=False)
+        assert_(vals.flags.writeable is False)
+        vals.setflags(write=True)
+        assert_(vals.flags.writeable)
+        types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+        values = np.core.records.fromstring(data, types)
+        vals = values['vals']
+        assert_(vals.flags.writeable)
+        vals.setflags(write=False)
+        assert_(vals.flags.writeable is False)
+        vals.setflags(write=True)
+        assert_(vals.flags.writeable)
+
+    @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
+    def test_writeable_pickle(self):
+        import pickle
+        # Small arrays will be copied without setting base.
+        # See condition for using PyArray_SetBaseObject in
+        # array_setstate.
+        a = np.arange(1000)
+        for v in range(pickle.HIGHEST_PROTOCOL):
+            vals = pickle.loads(pickle.dumps(a, v))
+            assert_(vals.flags.writeable)
+            assert_(isinstance(vals.base, bytes))
+
+    def test_writeable_from_c_data(self):
+        # Test that the writeable flag can be changed for an array wrapping
+        # low level C-data, but not owning its data.
+        # Also see that this is deprecated to change from python.
+        from numpy.core._multiarray_tests import get_c_wrapping_array
+
+        arr_writeable = get_c_wrapping_array(True)
+        assert not arr_writeable.flags.owndata
+        assert arr_writeable.flags.writeable
+        view = arr_writeable[...]
+
+        # Toggling the writeable flag works on the view:
+        view.flags.writeable = False
+        assert not view.flags.writeable
+        view.flags.writeable = True
+        assert view.flags.writeable
+        # Flag can be unset on the arr_writeable:
+        arr_writeable.flags.writeable = False
+
+        arr_readonly = get_c_wrapping_array(False)
+        assert not arr_readonly.flags.owndata
+        assert not arr_readonly.flags.writeable
+
+        for arr in [arr_writeable, arr_readonly]:
+            view = arr[...]
+            view.flags.writeable = False  # make sure it is readonly
+            arr.flags.writeable = False
+            assert not arr.flags.writeable
+
+            with assert_raises(ValueError):
+                view.flags.writeable = True
+
+            with warnings.catch_warnings():
+                warnings.simplefilter("error", DeprecationWarning)
+                with assert_raises(DeprecationWarning):
+                    arr.flags.writeable = True
+
+            with assert_warns(DeprecationWarning):
+                arr.flags.writeable = True
+
+    def test_warnonwrite(self):
+        a = np.arange(10)
+        a.flags._warn_on_write = True
+        with warnings.catch_warnings(record=True) as w:
+            warnings.filterwarnings('always')
+            a[1] = 10
+            a[2] = 10
+            # only warn once
+            assert_(len(w) == 1)
+
+    @pytest.mark.parametrize(["flag", "flag_value", "writeable"],
+            [("writeable", True, True),
+             # Delete _warn_on_write after deprecation and simplify
+             # the parameterization:
+             ("_warn_on_write", True, False),
+             ("writeable", False, False)])
+    def test_readonly_flag_protocols(self, flag, flag_value, writeable):
+        a = np.arange(10)
+        setattr(a.flags, flag, flag_value)
+
+        class MyArr():
+            __array_struct__ = a.__array_struct__
+
+        assert memoryview(a).readonly is not writeable
+        assert a.__array_interface__['data'][1] is not writeable
+        assert np.asarray(MyArr()).flags.writeable is writeable
+
+    def test_otherflags(self):
+        assert_equal(self.a.flags.carray, True)
+        assert_equal(self.a.flags['C'], True)
+        assert_equal(self.a.flags.farray, False)
+        assert_equal(self.a.flags.behaved, True)
+        assert_equal(self.a.flags.fnc, False)
+        assert_equal(self.a.flags.forc, True)
+        assert_equal(self.a.flags.owndata, True)
+        assert_equal(self.a.flags.writeable, True)
+        assert_equal(self.a.flags.aligned, True)
+        assert_equal(self.a.flags.writebackifcopy, False)
+        assert_equal(self.a.flags['X'], False)
+        assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
+
+    def test_string_align(self):
+        a = np.zeros(4, dtype=np.dtype('|S4'))
+        assert_(a.flags.aligned)
+        # not power of two are accessed byte-wise and thus considered aligned
+        a = np.zeros(5, dtype=np.dtype('|S4'))
+        assert_(a.flags.aligned)
+
+    def test_void_align(self):
+        a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
+        assert_(a.flags.aligned)
+
+
+class TestHash:
+    # see #3793
+    def test_int(self):
+        for st, ut, s in [(np.int8, np.uint8, 8),
+                          (np.int16, np.uint16, 16),
+                          (np.int32, np.uint32, 32),
+                          (np.int64, np.uint64, 64)]:
+            for i in range(1, s):
+                assert_equal(hash(st(-2**i)), hash(-2**i),
+                             err_msg="%r: -2**%d" % (st, i))
+                assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
+                             err_msg="%r: 2**%d" % (st, i - 1))
+                assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
+                             err_msg="%r: 2**%d - 1" % (st, i))
+
+                i = max(i - 1, 1)
+                assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
+                             err_msg="%r: 2**%d" % (ut, i - 1))
+                assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
+                             err_msg="%r: 2**%d - 1" % (ut, i))
+
+
+class TestAttributes:
+    def setup_method(self):
+        self.one = np.arange(10)
+        self.two = np.arange(20).reshape(4, 5)
+        self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
+
+    def test_attributes(self):
+        assert_equal(self.one.shape, (10,))
+        assert_equal(self.two.shape, (4, 5))
+        assert_equal(self.three.shape, (2, 5, 6))
+        self.three.shape = (10, 3, 2)
+        assert_equal(self.three.shape, (10, 3, 2))
+        self.three.shape = (2, 5, 6)
+        assert_equal(self.one.strides, (self.one.itemsize,))
+        num = self.two.itemsize
+        assert_equal(self.two.strides, (5*num, num))
+        num = self.three.itemsize
+        assert_equal(self.three.strides, (30*num, 6*num, num))
+        assert_equal(self.one.ndim, 1)
+        assert_equal(self.two.ndim, 2)
+        assert_equal(self.three.ndim, 3)
+        num = self.two.itemsize
+        assert_equal(self.two.size, 20)
+        assert_equal(self.two.nbytes, 20*num)
+        assert_equal(self.two.itemsize, self.two.dtype.itemsize)
+        assert_equal(self.two.base, np.arange(20))
+
+    def test_dtypeattr(self):
+        assert_equal(self.one.dtype, np.dtype(np.int_))
+        assert_equal(self.three.dtype, np.dtype(np.float_))
+        assert_equal(self.one.dtype.char, 'l')
+        assert_equal(self.three.dtype.char, 'd')
+        assert_(self.three.dtype.str[0] in '<>')
+        assert_equal(self.one.dtype.str[1], 'i')
+        assert_equal(self.three.dtype.str[1], 'f')
+
+    def test_int_subclassing(self):
+        # Regression test for https://github.com/numpy/numpy/pull/3526
+
+        numpy_int = np.int_(0)
+
+        # int_ doesn't inherit from Python int, because it's not fixed-width
+        assert_(not isinstance(numpy_int, int))
+
+    def test_stridesattr(self):
+        x = self.one
+
+        def make_array(size, offset, strides):
+            return np.ndarray(size, buffer=x, dtype=int,
+                              offset=offset*x.itemsize,
+                              strides=strides*x.itemsize)
+
+        assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
+        assert_raises(ValueError, make_array, 4, 4, -2)
+        assert_raises(ValueError, make_array, 4, 2, -1)
+        assert_raises(ValueError, make_array, 8, 3, 1)
+        assert_equal(make_array(8, 3, 0), np.array([3]*8))
+        # Check behavior reported in gh-2503:
+        assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
+        make_array(0, 0, 10)
+
+    def test_set_stridesattr(self):
+        x = self.one
+
+        def make_array(size, offset, strides):
+            try:
+                r = np.ndarray([size], dtype=int, buffer=x,
+                               offset=offset*x.itemsize)
+            except Exception as e:
+                raise RuntimeError(e)
+            r.strides = strides = strides*x.itemsize
+            return r
+
+        assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
+        assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
+        assert_raises(ValueError, make_array, 4, 4, -2)
+        assert_raises(ValueError, make_array, 4, 2, -1)
+        assert_raises(RuntimeError, make_array, 8, 3, 1)
+        # Check that the true extent of the array is used.
+        # Test relies on as_strided base not exposing a buffer.
+        x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
+
+        def set_strides(arr, strides):
+            arr.strides = strides
+
+        assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
+
+        # Test for offset calculations:
+        x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
+                                                    shape=(10,), strides=(-1,))
+        assert_raises(ValueError, set_strides, x[::-1], -1)
+        a = x[::-1]
+        a.strides = 1
+        a[::2].strides = 2
+
+        # test 0d
+        arr_0d = np.array(0)
+        arr_0d.strides = ()
+        assert_raises(TypeError, set_strides, arr_0d, None)
+
+    def test_fill(self):
+        for t in "?bhilqpBHILQPfdgFDGO":
+            x = np.empty((3, 2, 1), t)
+            y = np.empty((3, 2, 1), t)
+            x.fill(1)
+            y[...] = 1
+            assert_equal(x, y)
+
+    def test_fill_max_uint64(self):
+        x = np.empty((3, 2, 1), dtype=np.uint64)
+        y = np.empty((3, 2, 1), dtype=np.uint64)
+        value = 2**64 - 1
+        y[...] = value
+        x.fill(value)
+        assert_array_equal(x, y)
+
+    def test_fill_struct_array(self):
+        # Filling from a scalar
+        x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
+        x.fill(x[0])
+        assert_equal(x['f1'][1], x['f1'][0])
+        # Filling from a tuple that can be converted
+        # to a scalar
+        x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
+        x.fill((3.5, -2))
+        assert_array_equal(x['a'], [3.5, 3.5])
+        assert_array_equal(x['b'], [-2, -2])
+
+    def test_fill_readonly(self):
+        # gh-22922
+        a = np.zeros(11)
+        a.setflags(write=False)
+        with pytest.raises(ValueError, match=".*read-only"):
+            a.fill(0)
+
+
+class TestArrayConstruction:
+    def test_array(self):
+        d = np.ones(6)
+        r = np.array([d, d])
+        assert_equal(r, np.ones((2, 6)))
+
+        d = np.ones(6)
+        tgt = np.ones((2, 6))
+        r = np.array([d, d])
+        assert_equal(r, tgt)
+        tgt[1] = 2
+        r = np.array([d, d + 1])
+        assert_equal(r, tgt)
+
+        d = np.ones(6)
+        r = np.array([[d, d]])
+        assert_equal(r, np.ones((1, 2, 6)))
+
+        d = np.ones(6)
+        r = np.array([[d, d], [d, d]])
+        assert_equal(r, np.ones((2, 2, 6)))
+
+        d = np.ones((6, 6))
+        r = np.array([d, d])
+        assert_equal(r, np.ones((2, 6, 6)))
+
+        d = np.ones((6, ))
+        r = np.array([[d, d + 1], d + 2], dtype=object)
+        assert_equal(len(r), 2)
+        assert_equal(r[0], [d, d + 1])
+        assert_equal(r[1], d + 2)
+
+        tgt = np.ones((2, 3), dtype=bool)
+        tgt[0, 2] = False
+        tgt[1, 0:2] = False
+        r = np.array([[True, True, False], [False, False, True]])
+        assert_equal(r, tgt)
+        r = np.array([[True, False], [True, False], [False, True]])
+        assert_equal(r, tgt.T)
+
+    def test_array_empty(self):
+        assert_raises(TypeError, np.array)
+
+    def test_0d_array_shape(self):
+        assert np.ones(np.array(3)).shape == (3,)
+
+    def test_array_copy_false(self):
+        d = np.array([1, 2, 3])
+        e = np.array(d, copy=False)
+        d[1] = 3
+        assert_array_equal(e, [1, 3, 3])
+        e = np.array(d, copy=False, order='F')
+        d[1] = 4
+        assert_array_equal(e, [1, 4, 3])
+        e[2] = 7
+        assert_array_equal(d, [1, 4, 7])
+
+    def test_array_copy_true(self):
+        d = np.array([[1,2,3], [1, 2, 3]])
+        e = np.array(d, copy=True)
+        d[0, 1] = 3
+        e[0, 2] = -7
+        assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
+        assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
+        e = np.array(d, copy=True, order='F')
+        d[0, 1] = 5
+        e[0, 2] = 7
+        assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
+        assert_array_equal(d, [[1, 5, 3], [1,2,3]])
+
+    def test_array_cont(self):
+        d = np.ones(10)[::2]
+        assert_(np.ascontiguousarray(d).flags.c_contiguous)
+        assert_(np.ascontiguousarray(d).flags.f_contiguous)
+        assert_(np.asfortranarray(d).flags.c_contiguous)
+        assert_(np.asfortranarray(d).flags.f_contiguous)
+        d = np.ones((10, 10))[::2,::2]
+        assert_(np.ascontiguousarray(d).flags.c_contiguous)
+        assert_(np.asfortranarray(d).flags.f_contiguous)
+
+    @pytest.mark.parametrize("func",
+            [np.array,
+             np.asarray,
+             np.asanyarray,
+             np.ascontiguousarray,
+             np.asfortranarray])
+    def test_bad_arguments_error(self, func):
+        with pytest.raises(TypeError):
+            func(3, dtype="bad dtype")
+        with pytest.raises(TypeError):
+            func()  # missing arguments
+        with pytest.raises(TypeError):
+            func(1, 2, 3, 4, 5, 6, 7, 8)  # too many arguments
+
+    @pytest.mark.parametrize("func",
+            [np.array,
+             np.asarray,
+             np.asanyarray,
+             np.ascontiguousarray,
+             np.asfortranarray])
+    def test_array_as_keyword(self, func):
+        # This should likely be made positional only, but do not change
+        # the name accidentally.
+        if func is np.array:
+            func(object=3)
+        else:
+            func(a=3)
+
+
+class TestAssignment:
+    def test_assignment_broadcasting(self):
+        a = np.arange(6).reshape(2, 3)
+
+        # Broadcasting the input to the output
+        a[...] = np.arange(3)
+        assert_equal(a, [[0, 1, 2], [0, 1, 2]])
+        a[...] = np.arange(2).reshape(2, 1)
+        assert_equal(a, [[0, 0, 0], [1, 1, 1]])
+
+        # For compatibility with <= 1.5, a limited version of broadcasting
+        # the output to the input.
+        #
+        # This behavior is inconsistent with NumPy broadcasting
+        # in general, because it only uses one of the two broadcasting
+        # rules (adding a new "1" dimension to the left of the shape),
+        # applied to the output instead of an input. In NumPy 2.0, this kind
+        # of broadcasting assignment will likely be disallowed.
+        a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
+        assert_equal(a, [[5, 4, 3], [2, 1, 0]])
+        # The other type of broadcasting would require a reduction operation.
+
+        def assign(a, b):
+            a[...] = b
+
+        assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
+
+    def test_assignment_errors(self):
+        # Address issue #2276
+        class C:
+            pass
+        a = np.zeros(1)
+
+        def assign(v):
+            a[0] = v
+
+        assert_raises((AttributeError, TypeError), assign, C())
+        assert_raises(ValueError, assign, [1])
+
+    def test_unicode_assignment(self):
+        # gh-5049
+        from numpy.core.numeric import set_string_function
+
+        @contextmanager
+        def inject_str(s):
+            """ replace ndarray.__str__ temporarily """
+            set_string_function(lambda x: s, repr=False)
+            try:
+                yield
+            finally:
+                set_string_function(None, repr=False)
+
+        a1d = np.array(['test'])
+        a0d = np.array('done')
+        with inject_str('bad'):
+            a1d[0] = a0d  # previously this would invoke __str__
+        assert_equal(a1d[0], 'done')
+
+        # this would crash for the same reason
+        np.array([np.array('\xe5\xe4\xf6')])
+
+    def test_stringlike_empty_list(self):
+        # gh-8902
+        u = np.array(['done'])
+        b = np.array([b'done'])
+
+        class bad_sequence:
+            def __getitem__(self): pass
+            def __len__(self): raise RuntimeError
+
+        assert_raises(ValueError, operator.setitem, u, 0, [])
+        assert_raises(ValueError, operator.setitem, b, 0, [])
+
+        assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
+        assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
+
+    def test_longdouble_assignment(self):
+        # only relevant if longdouble is larger than float
+        # we're looking for loss of precision
+
+        for dtype in (np.longdouble, np.longcomplex):
+            # gh-8902
+            tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
+            tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
+
+            # construction
+            tiny1d = np.array([tinya])
+            assert_equal(tiny1d[0], tinya)
+
+            # scalar = scalar
+            tiny1d[0] = tinyb
+            assert_equal(tiny1d[0], tinyb)
+
+            # 0d = scalar
+            tiny1d[0, ...] = tinya
+            assert_equal(tiny1d[0], tinya)
+
+            # 0d = 0d
+            tiny1d[0, ...] = tinyb[...]
+            assert_equal(tiny1d[0], tinyb)
+
+            # scalar = 0d
+            tiny1d[0] = tinyb[...]
+            assert_equal(tiny1d[0], tinyb)
+
+            arr = np.array([np.array(tinya)])
+            assert_equal(arr[0], tinya)
+
+    def test_cast_to_string(self):
+        # cast to str should do "str(scalar)", not "str(scalar.item())"
+        # Example: In python2, str(float) is truncated, so we want to avoid
+        # str(np.float64(...).item()) as this would incorrectly truncate.
+        a = np.zeros(1, dtype='S20')
+        a[:] = np.array(['1.12345678901234567890'], dtype='f8')
+        assert_equal(a[0], b"1.1234567890123457")
+
+
+class TestDtypedescr:
+    def test_construction(self):
+        d1 = np.dtype('i4')
+        assert_equal(d1, np.dtype(np.int32))
+        d2 = np.dtype('f8')
+        assert_equal(d2, np.dtype(np.float64))
+
+    def test_byteorders(self):
+        assert_(np.dtype('i4'))
+        assert_(np.dtype([('a', 'i4')]))
+
+    def test_structured_non_void(self):
+        fields = [('a', 'i8'), ('b', 'f8')])
+        assert_equal(a == b, [False, True])
+        assert_equal(a != b, [True, False])
+
+        a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', 'i8')])
+        assert_equal(a == b, [False, True])
+        assert_equal(a != b, [True, False])
+
+        # Including with embedded subarray dtype (although subarray comparison
+        # itself may still be a bit weird and compare the raw data)
+        a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5i8')])
+        assert_equal(a == b, [False, True])
+        assert_equal(a != b, [True, False])
+
+    @pytest.mark.parametrize("op", [
+            operator.eq, lambda x, y: operator.eq(y, x),
+            operator.ne, lambda x, y: operator.ne(y, x)])
+    def test_void_comparison_failures(self, op):
+        # In principle, one could decide to return an array of False for some
+        # if comparisons are impossible.  But right now we return TypeError
+        # when "void" dtype are involved.
+        x = np.zeros(3, dtype=[('a', 'i1')])
+        y = np.zeros(3)
+        # Cannot compare non-structured to structured:
+        with pytest.raises(TypeError):
+            op(x, y)
+
+        # Added title prevents promotion, but casts are OK:
+        y = np.zeros(3, dtype=[(('title', 'a'), 'i1')])
+        assert np.can_cast(y.dtype, x.dtype)
+        with pytest.raises(TypeError):
+            op(x, y)
+
+        x = np.zeros(3, dtype="V7")
+        y = np.zeros(3, dtype="V8")
+        with pytest.raises(TypeError):
+            op(x, y)
+
+    def test_casting(self):
+        # Check that casting a structured array to change its byte order
+        # works
+        a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe'))
+        b = a.astype([('a', '>i4')])
+        assert_equal(b, a.byteswap().newbyteorder())
+        assert_equal(a['a'][0], b['a'][0])
+
+        # Check that equality comparison works on structured arrays if
+        # they are 'equiv'-castable
+        a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')])
+        assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
+        assert_equal(a == b, [True, True])
+
+        # Check that 'equiv' casting can change byte order
+        assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
+        c = a.astype(b.dtype, casting='equiv')
+        assert_equal(a == c, [True, True])
+
+        # Check that 'safe' casting can change byte order and up-cast
+        # fields
+        t = [('a', 'f8')]
+        assert_(np.can_cast(a.dtype, t, casting='safe'))
+        c = a.astype(t, casting='safe')
+        assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
+                     [True, True])
+
+        # Check that 'same_kind' casting can change byte order and
+        # change field widths within a "kind"
+        t = [('a', 'f4')]
+        assert_(np.can_cast(a.dtype, t, casting='same_kind'))
+        c = a.astype(t, casting='same_kind')
+        assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
+                     [True, True])
+
+        # Check that casting fails if the casting rule should fail on
+        # any of the fields
+        t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')]
+            assert_(not np.can_cast(a.dtype, t, casting=casting))
+            t = [('a', '>i4'), ('b', 'i8")
+        ab = np.array([(1, 2)], dtype=[A, B])
+        ba = np.array([(1, 2)], dtype=[B, A])
+        assert_raises(TypeError, np.concatenate, ab, ba)
+        assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype)
+        assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype)
+
+        # dtypes with same field names/order but different memory offsets
+        # and byte-order are promotable to packed nbo.
+        assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype),
+                     repack_fields(ab.dtype.newbyteorder('N')))
+
+        # gh-13667
+        # dtypes with different fieldnames but castable field types are castable
+        assert_equal(np.can_cast(ab.dtype, ba.dtype), True)
+        assert_equal(ab.astype(ba.dtype).dtype, ba.dtype)
+        assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True)
+        assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True)
+        assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False)
+        assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')],
+                                 casting='unsafe'), True)
+
+        ab[:] = ba  # make sure assignment still works
+
+        # tests of type-promotion of corresponding fields
+        dt1 = np.dtype([("", "i4")])
+        dt2 = np.dtype([("", "i8")])
+        assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')]))
+        assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')]))
+        assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")]))
+        assert_equal(np.promote_types('i4,f8', 'i8,f4'),
+                     np.dtype([('f0', 'i8'), ('f1', 'f8')]))
+        # test nested case
+        dt1nest = np.dtype([("", dt1)])
+        dt2nest = np.dtype([("", dt2)])
+        assert_equal(np.promote_types(dt1nest, dt2nest),
+                     np.dtype([('f0', np.dtype([('f0', 'i8')]))]))
+
+        # note that offsets are lost when promoting:
+        dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]})
+        a = np.ones(3, dtype=dt)
+        assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')]))
+
+    @pytest.mark.parametrize("dtype_dict", [
+            dict(names=["a", "b"], formats=["i4", "f"], itemsize=100),
+            dict(names=["a", "b"], formats=["i4", "f"],
+                 offsets=[0, 12])])
+    @pytest.mark.parametrize("align", [True, False])
+    def test_structured_promotion_packs(self, dtype_dict, align):
+        # Structured dtypes are packed when promoted (we consider the packed
+        # form to be "canonical"), so tere is no extra padding.
+        dtype = np.dtype(dtype_dict, align=align)
+        # Remove non "canonical" dtype options:
+        dtype_dict.pop("itemsize", None)
+        dtype_dict.pop("offsets", None)
+        expected = np.dtype(dtype_dict, align=align)
+
+        res = np.promote_types(dtype, dtype)
+        assert res.itemsize == expected.itemsize
+        assert res.fields == expected.fields
+
+        # But the "expected" one, should just be returned unchanged:
+        res = np.promote_types(expected, expected)
+        assert res is expected
+
+    def test_structured_asarray_is_view(self):
+        # A scalar viewing an array preserves its view even when creating a
+        # new array. This test documents behaviour, it may not be the best
+        # desired behaviour.
+        arr = np.array([1], dtype="i,i")
+        scalar = arr[0]
+        assert not scalar.flags.owndata  # view into the array
+        assert np.asarray(scalar).base is scalar
+        # But never when a dtype is passed in:
+        assert np.asarray(scalar, dtype=scalar.dtype).base is None
+        # A scalar which owns its data does not have this property.
+        # It is not easy to create one, one method is to use pickle:
+        scalar = pickle.loads(pickle.dumps(scalar))
+        assert scalar.flags.owndata
+        assert np.asarray(scalar).base is None
+
+class TestBool:
+    def test_test_interning(self):
+        a0 = np.bool_(0)
+        b0 = np.bool_(False)
+        assert_(a0 is b0)
+        a1 = np.bool_(1)
+        b1 = np.bool_(True)
+        assert_(a1 is b1)
+        assert_(np.array([True])[0] is a1)
+        assert_(np.array(True)[()] is a1)
+
+    def test_sum(self):
+        d = np.ones(101, dtype=bool)
+        assert_equal(d.sum(), d.size)
+        assert_equal(d[::2].sum(), d[::2].size)
+        assert_equal(d[::-2].sum(), d[::-2].size)
+
+        d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
+        assert_equal(d.sum(), d.size)
+        assert_equal(d[::2].sum(), d[::2].size)
+        assert_equal(d[::-2].sum(), d[::-2].size)
+
+    def check_count_nonzero(self, power, length):
+        powers = [2 ** i for i in range(length)]
+        for i in range(2**power):
+            l = [(i & x) != 0 for x in powers]
+            a = np.array(l, dtype=bool)
+            c = builtins.sum(l)
+            assert_equal(np.count_nonzero(a), c)
+            av = a.view(np.uint8)
+            av *= 3
+            assert_equal(np.count_nonzero(a), c)
+            av *= 4
+            assert_equal(np.count_nonzero(a), c)
+            av[av != 0] = 0xFF
+            assert_equal(np.count_nonzero(a), c)
+
+    def test_count_nonzero(self):
+        # check all 12 bit combinations in a length 17 array
+        # covers most cases of the 16 byte unrolled code
+        self.check_count_nonzero(12, 17)
+
+    @pytest.mark.slow
+    def test_count_nonzero_all(self):
+        # check all combinations in a length 17 array
+        # covers all cases of the 16 byte unrolled code
+        self.check_count_nonzero(17, 17)
+
+    def test_count_nonzero_unaligned(self):
+        # prevent mistakes as e.g. gh-4060
+        for o in range(7):
+            a = np.zeros((18,), dtype=bool)[o+1:]
+            a[:o] = True
+            assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+            a = np.ones((18,), dtype=bool)[o+1:]
+            a[:o] = False
+            assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+
+    def _test_cast_from_flexible(self, dtype):
+        # empty string -> false
+        for n in range(3):
+            v = np.array(b'', (dtype, n))
+            assert_equal(bool(v), False)
+            assert_equal(bool(v[()]), False)
+            assert_equal(v.astype(bool), False)
+            assert_(isinstance(v.astype(bool), np.ndarray))
+            assert_(v[()].astype(bool) is np.False_)
+
+        # anything else -> true
+        for n in range(1, 4):
+            for val in [b'a', b'0', b' ']:
+                v = np.array(val, (dtype, n))
+                assert_equal(bool(v), True)
+                assert_equal(bool(v[()]), True)
+                assert_equal(v.astype(bool), True)
+                assert_(isinstance(v.astype(bool), np.ndarray))
+                assert_(v[()].astype(bool) is np.True_)
+
+    def test_cast_from_void(self):
+        self._test_cast_from_flexible(np.void)
+
+    @pytest.mark.xfail(reason="See gh-9847")
+    def test_cast_from_unicode(self):
+        self._test_cast_from_flexible(np.str_)
+
+    @pytest.mark.xfail(reason="See gh-9847")
+    def test_cast_from_bytes(self):
+        self._test_cast_from_flexible(np.bytes_)
+
+
+class TestZeroSizeFlexible:
+    @staticmethod
+    def _zeros(shape, dtype=str):
+        dtype = np.dtype(dtype)
+        if dtype == np.void:
+            return np.zeros(shape, dtype=(dtype, 0))
+
+        # not constructable directly
+        dtype = np.dtype([('x', dtype, 0)])
+        return np.zeros(shape, dtype=dtype)['x']
+
+    def test_create(self):
+        zs = self._zeros(10, bytes)
+        assert_equal(zs.itemsize, 0)
+        zs = self._zeros(10, np.void)
+        assert_equal(zs.itemsize, 0)
+        zs = self._zeros(10, str)
+        assert_equal(zs.itemsize, 0)
+
+    def _test_sort_partition(self, name, kinds, **kwargs):
+        # Previously, these would all hang
+        for dt in [bytes, np.void, str]:
+            zs = self._zeros(10, dt)
+            sort_method = getattr(zs, name)
+            sort_func = getattr(np, name)
+            for kind in kinds:
+                sort_method(kind=kind, **kwargs)
+                sort_func(zs, kind=kind, **kwargs)
+
+    def test_sort(self):
+        self._test_sort_partition('sort', kinds='qhs')
+
+    def test_argsort(self):
+        self._test_sort_partition('argsort', kinds='qhs')
+
+    def test_partition(self):
+        self._test_sort_partition('partition', kinds=['introselect'], kth=2)
+
+    def test_argpartition(self):
+        self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
+
+    def test_resize(self):
+        # previously an error
+        for dt in [bytes, np.void, str]:
+            zs = self._zeros(10, dt)
+            zs.resize(25)
+            zs.resize((10, 10))
+
+    def test_view(self):
+        for dt in [bytes, np.void, str]:
+            zs = self._zeros(10, dt)
+
+            # viewing as itself should be allowed
+            assert_equal(zs.view(dt).dtype, np.dtype(dt))
+
+            # viewing as any non-empty type gives an empty result
+            assert_equal(zs.view((dt, 1)).shape, (0,))
+
+    def test_dumps(self):
+        zs = self._zeros(10, int)
+        assert_equal(zs, pickle.loads(zs.dumps()))
+
+    def test_pickle(self):
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            for dt in [bytes, np.void, str]:
+                zs = self._zeros(10, dt)
+                p = pickle.dumps(zs, protocol=proto)
+                zs2 = pickle.loads(p)
+
+                assert_equal(zs.dtype, zs2.dtype)
+
+    def test_pickle_empty(self):
+        """Checking if an empty array pickled and un-pickled will not cause a
+        segmentation fault"""
+        arr = np.array([]).reshape(999999, 0)
+        pk_dmp = pickle.dumps(arr)
+        pk_load = pickle.loads(pk_dmp)
+
+        assert pk_load.size == 0
+
+    @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+                        reason="requires pickle protocol 5")
+    def test_pickle_with_buffercallback(self):
+        array = np.arange(10)
+        buffers = []
+        bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
+                                    protocol=5)
+        array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
+        # when using pickle protocol 5 with buffer callbacks,
+        # array_from_buffer is reconstructed from a buffer holding a view
+        # to the initial array's data, so modifying an element in array
+        # should modify it in array_from_buffer too.
+        array[0] = -1
+        assert array_from_buffer[0] == -1, array_from_buffer[0]
+
+
+class TestMethods:
+
+    sort_kinds = ['quicksort', 'heapsort', 'stable']
+
+    def test_all_where(self):
+        a = np.array([[True, False, True],
+                      [False, False, False],
+                      [True, True, True]])
+        wh_full = np.array([[True, False, True],
+                            [False, False, False],
+                            [True, False, True]])
+        wh_lower = np.array([[False],
+                             [False],
+                             [True]])
+        for _ax in [0, None]:
+            assert_equal(a.all(axis=_ax, where=wh_lower),
+                        np.all(a[wh_lower[:,0],:], axis=_ax))
+            assert_equal(np.all(a, axis=_ax, where=wh_lower),
+                         a[wh_lower[:,0],:].all(axis=_ax))
+
+        assert_equal(a.all(where=wh_full), True)
+        assert_equal(np.all(a, where=wh_full), True)
+        assert_equal(a.all(where=False), True)
+        assert_equal(np.all(a, where=False), True)
+
+    def test_any_where(self):
+        a = np.array([[True, False, True],
+                      [False, False, False],
+                      [True, True, True]])
+        wh_full = np.array([[False, True, False],
+                            [True, True, True],
+                            [False, False, False]])
+        wh_middle = np.array([[False],
+                              [True],
+                              [False]])
+        for _ax in [0, None]:
+            assert_equal(a.any(axis=_ax, where=wh_middle),
+                         np.any(a[wh_middle[:,0],:], axis=_ax))
+            assert_equal(np.any(a, axis=_ax, where=wh_middle),
+                         a[wh_middle[:,0],:].any(axis=_ax))
+        assert_equal(a.any(where=wh_full), False)
+        assert_equal(np.any(a, where=wh_full), False)
+        assert_equal(a.any(where=False), False)
+        assert_equal(np.any(a, where=False), False)
+
+    def test_compress(self):
+        tgt = [[5, 6, 7, 8, 9]]
+        arr = np.arange(10).reshape(2, 5)
+        out = arr.compress([0, 1], axis=0)
+        assert_equal(out, tgt)
+
+        tgt = [[1, 3], [6, 8]]
+        out = arr.compress([0, 1, 0, 1, 0], axis=1)
+        assert_equal(out, tgt)
+
+        tgt = [[1], [6]]
+        arr = np.arange(10).reshape(2, 5)
+        out = arr.compress([0, 1], axis=1)
+        assert_equal(out, tgt)
+
+        arr = np.arange(10).reshape(2, 5)
+        out = arr.compress([0, 1])
+        assert_equal(out, 1)
+
+    def test_choose(self):
+        x = 2*np.ones((3,), dtype=int)
+        y = 3*np.ones((3,), dtype=int)
+        x2 = 2*np.ones((2, 3), dtype=int)
+        y2 = 3*np.ones((2, 3), dtype=int)
+        ind = np.array([0, 0, 1])
+
+        A = ind.choose((x, y))
+        assert_equal(A, [2, 2, 3])
+
+        A = ind.choose((x2, y2))
+        assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+        A = ind.choose((x, y2))
+        assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+        oned = np.ones(1)
+        # gh-12031, caused SEGFAULT
+        assert_raises(TypeError, oned.choose,np.void(0), [oned])
+
+        out = np.array(0)
+        ret = np.choose(np.array(1), [10, 20, 30], out=out)
+        assert out is ret
+        assert_equal(out[()], 20)
+
+        # gh-6272 check overlap on out
+        x = np.arange(5)
+        y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')
+        assert_equal(y, np.array([0, 1, 2]))
+
+    def test_prod(self):
+        ba = [1, 2, 10, 11, 6, 5, 4]
+        ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+
+        for ctype in [np.int16, np.uint16, np.int32, np.uint32,
+                      np.float32, np.float64, np.complex64, np.complex128]:
+            a = np.array(ba, ctype)
+            a2 = np.array(ba2, ctype)
+            if ctype in ['1', 'b']:
+                assert_raises(ArithmeticError, a.prod)
+                assert_raises(ArithmeticError, a2.prod, axis=1)
+            else:
+                assert_equal(a.prod(axis=0), 26400)
+                assert_array_equal(a2.prod(axis=0),
+                                   np.array([50, 36, 84, 180], ctype))
+                assert_array_equal(a2.prod(axis=-1),
+                                   np.array([24, 1890, 600], ctype))
+
+    @pytest.mark.parametrize('dtype', [None, object])
+    def test_repeat(self, dtype):
+        m = np.array([1, 2, 3, 4, 5, 6], dtype=dtype)
+        m_rect = m.reshape((2, 3))
+
+        A = m.repeat([1, 3, 2, 1, 1, 2])
+        assert_equal(A, [1, 2, 2, 2, 3,
+                         3, 4, 5, 6, 6])
+
+        A = m.repeat(2)
+        assert_equal(A, [1, 1, 2, 2, 3, 3,
+                         4, 4, 5, 5, 6, 6])
+
+        A = m_rect.repeat([2, 1], axis=0)
+        assert_equal(A, [[1, 2, 3],
+                         [1, 2, 3],
+                         [4, 5, 6]])
+
+        A = m_rect.repeat([1, 3, 2], axis=1)
+        assert_equal(A, [[1, 2, 2, 2, 3, 3],
+                         [4, 5, 5, 5, 6, 6]])
+
+        A = m_rect.repeat(2, axis=0)
+        assert_equal(A, [[1, 2, 3],
+                         [1, 2, 3],
+                         [4, 5, 6],
+                         [4, 5, 6]])
+
+        A = m_rect.repeat(2, axis=1)
+        assert_equal(A, [[1, 1, 2, 2, 3, 3],
+                         [4, 4, 5, 5, 6, 6]])
+
+    def test_reshape(self):
+        arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
+
+        tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
+        assert_equal(arr.reshape(2, 6), tgt)
+
+        tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
+        assert_equal(arr.reshape(3, 4), tgt)
+
+        tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
+        assert_equal(arr.reshape((3, 4), order='F'), tgt)
+
+        tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
+        assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
+
+    def test_round(self):
+        def check_round(arr, expected, *round_args):
+            assert_equal(arr.round(*round_args), expected)
+            # With output array
+            out = np.zeros_like(arr)
+            res = arr.round(*round_args, out=out)
+            assert_equal(out, expected)
+            assert out is res
+
+        check_round(np.array([1.2, 1.5]), [1, 2])
+        check_round(np.array(1.5), 2)
+        check_round(np.array([12.2, 15.5]), [10, 20], -1)
+        check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
+        # Complex rounding
+        check_round(np.array([4.5 + 1.5j]), [4 + 2j])
+        check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
+
+    def test_squeeze(self):
+        a = np.array([[[1], [2], [3]]])
+        assert_equal(a.squeeze(), [1, 2, 3])
+        assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
+        assert_raises(ValueError, a.squeeze, axis=(1,))
+        assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
+
+    def test_transpose(self):
+        a = np.array([[1, 2], [3, 4]])
+        assert_equal(a.transpose(), [[1, 3], [2, 4]])
+        assert_raises(ValueError, lambda: a.transpose(0))
+        assert_raises(ValueError, lambda: a.transpose(0, 0))
+        assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
+
+    def test_sort(self):
+        # test ordering for floats and complex containing nans. It is only
+        # necessary to check the less-than comparison, so sorts that
+        # only follow the insertion sort path are sufficient. We only
+        # test doubles and complex doubles as the logic is the same.
+
+        # check doubles
+        msg = "Test real sort order with nans"
+        a = np.array([np.nan, 1, 0])
+        b = np.sort(a)
+        assert_equal(b, a[::-1], msg)
+        # check complex
+        msg = "Test complex sort order with nans"
+        a = np.zeros(9, dtype=np.complex128)
+        a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
+        a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
+        b = np.sort(a)
+        assert_equal(b, a[::-1], msg)
+
+    # all c scalar sorts use the same code with different types
+    # so it suffices to run a quick check with one type. The number
+    # of sorted items must be greater than ~50 to check the actual
+    # algorithm because quick and merge sort fall over to insertion
+    # sort for small arrays.
+
+    @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64,
+                                       np.float16, np.float32, np.float64,
+                                       np.longdouble])
+    def test_sort_unsigned(self, dtype):
+        a = np.arange(101, dtype=dtype)
+        b = a[::-1].copy()
+        for kind in self.sort_kinds:
+            msg = "scalar sort, kind=%s" % kind
+            c = a.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+            c = b.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+
+    @pytest.mark.parametrize('dtype',
+                             [np.int8, np.int16, np.int32, np.int64, np.float16,
+                              np.float32, np.float64, np.longdouble])
+    def test_sort_signed(self, dtype):
+        a = np.arange(-50, 51, dtype=dtype)
+        b = a[::-1].copy()
+        for kind in self.sort_kinds:
+            msg = "scalar sort, kind=%s" % (kind)
+            c = a.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+            c = b.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+
+    @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble])
+    @pytest.mark.parametrize('part', ['real', 'imag'])
+    def test_sort_complex(self, part, dtype):
+        # test complex sorts. These use the same code as the scalars
+        # but the compare function differs.
+        cdtype = {
+            np.single: np.csingle,
+            np.double: np.cdouble,
+            np.longdouble: np.clongdouble,
+        }[dtype]
+        a = np.arange(-50, 51, dtype=dtype)
+        b = a[::-1].copy()
+        ai = (a * (1+1j)).astype(cdtype)
+        bi = (b * (1+1j)).astype(cdtype)
+        setattr(ai, part, 1)
+        setattr(bi, part, 1)
+        for kind in self.sort_kinds:
+            msg = "complex sort, %s part == 1, kind=%s" % (part, kind)
+            c = ai.copy()
+            c.sort(kind=kind)
+            assert_equal(c, ai, msg)
+            c = bi.copy()
+            c.sort(kind=kind)
+            assert_equal(c, ai, msg)
+
+    def test_sort_complex_byte_swapping(self):
+        # test sorting of complex arrays requiring byte-swapping, gh-5441
+        for endianness in '<>':
+            for dt in np.typecodes['Complex']:
+                arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
+                c = arr.copy()
+                c.sort()
+                msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
+                assert_equal(c, arr, msg)
+
+    @pytest.mark.parametrize('dtype', [np.bytes_, np.str_])
+    def test_sort_string(self, dtype):
+        # np.array will perform the encoding to bytes for us in the bytes test
+        a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype)
+        b = a[::-1].copy()
+        for kind in self.sort_kinds:
+            msg = "kind=%s" % kind
+            c = a.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+            c = b.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+
+    def test_sort_object(self):
+        # test object array sorts.
+        a = np.empty((101,), dtype=object)
+        a[:] = list(range(101))
+        b = a[::-1]
+        for kind in ['q', 'h', 'm']:
+            msg = "kind=%s" % kind
+            c = a.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+            c = b.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+
+    @pytest.mark.parametrize("dt", [
+            np.dtype([('f', float), ('i', int)]),
+            np.dtype([('f', float), ('i', object)])])
+    @pytest.mark.parametrize("step", [1, 2])
+    def test_sort_structured(self, dt, step):
+        # test record array sorts.
+        a = np.array([(i, i) for i in range(101*step)], dtype=dt)
+        b = a[::-1]
+        for kind in ['q', 'h', 'm']:
+            msg = "kind=%s" % kind
+            c = a.copy()[::step]
+            indx = c.argsort(kind=kind)
+            c.sort(kind=kind)
+            assert_equal(c, a[::step], msg)
+            assert_equal(a[::step][indx], a[::step], msg)
+            c = b.copy()[::step]
+            indx = c.argsort(kind=kind)
+            c.sort(kind=kind)
+            assert_equal(c, a[step-1::step], msg)
+            assert_equal(b[::step][indx], a[step-1::step], msg)
+
+    @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]'])
+    def test_sort_time(self, dtype):
+        # test datetime64 and timedelta64 sorts.
+        a = np.arange(0, 101, dtype=dtype)
+        b = a[::-1]
+        for kind in ['q', 'h', 'm']:
+            msg = "kind=%s" % kind
+            c = a.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+            c = b.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+
+    def test_sort_axis(self):
+        # check axis handling. This should be the same for all type
+        # specific sorts, so we only check it for one type and one kind
+        a = np.array([[3, 2], [1, 0]])
+        b = np.array([[1, 0], [3, 2]])
+        c = np.array([[2, 3], [0, 1]])
+        d = a.copy()
+        d.sort(axis=0)
+        assert_equal(d, b, "test sort with axis=0")
+        d = a.copy()
+        d.sort(axis=1)
+        assert_equal(d, c, "test sort with axis=1")
+        d = a.copy()
+        d.sort()
+        assert_equal(d, c, "test sort with default axis")
+
+    def test_sort_size_0(self):
+        # check axis handling for multidimensional empty arrays
+        a = np.array([])
+        a.shape = (3, 2, 1, 0)
+        for axis in range(-a.ndim, a.ndim):
+            msg = 'test empty array sort with axis={0}'.format(axis)
+            assert_equal(np.sort(a, axis=axis), a, msg)
+        msg = 'test empty array sort with axis=None'
+        assert_equal(np.sort(a, axis=None), a.ravel(), msg)
+
+    def test_sort_bad_ordering(self):
+        # test generic class with bogus ordering,
+        # should not segfault.
+        class Boom:
+            def __lt__(self, other):
+                return True
+
+        a = np.array([Boom()] * 100, dtype=object)
+        for kind in self.sort_kinds:
+            msg = "kind=%s" % kind
+            c = a.copy()
+            c.sort(kind=kind)
+            assert_equal(c, a, msg)
+
+    def test_void_sort(self):
+        # gh-8210 - previously segfaulted
+        for i in range(4):
+            rand = np.random.randint(256, size=4000, dtype=np.uint8)
+            arr = rand.view('V4')
+            arr[::-1].sort()
+
+        dt = np.dtype([('val', 'i4', (1,))])
+        for i in range(4):
+            rand = np.random.randint(256, size=4000, dtype=np.uint8)
+            arr = rand.view(dt)
+            arr[::-1].sort()
+
+    def test_sort_raises(self):
+        #gh-9404
+        arr = np.array([0, datetime.now(), 1], dtype=object)
+        for kind in self.sort_kinds:
+            assert_raises(TypeError, arr.sort, kind=kind)
+        #gh-3879
+        class Raiser:
+            def raises_anything(*args, **kwargs):
+                raise TypeError("SOMETHING ERRORED")
+            __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
+        arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
+        np.random.shuffle(arr)
+        for kind in self.sort_kinds:
+            assert_raises(TypeError, arr.sort, kind=kind)
+
+    def test_sort_degraded(self):
+        # test degraded dataset would take minutes to run with normal qsort
+        d = np.arange(1000000)
+        do = d.copy()
+        x = d
+        # create a median of 3 killer where each median is the sorted second
+        # last element of the quicksort partition
+        while x.size > 3:
+            mid = x.size // 2
+            x[mid], x[-2] = x[-2], x[mid]
+            x = x[:-2]
+
+        assert_equal(np.sort(d), do)
+        assert_equal(d[np.argsort(d)], do)
+
+    def test_copy(self):
+        def assert_fortran(arr):
+            assert_(arr.flags.fortran)
+            assert_(arr.flags.f_contiguous)
+            assert_(not arr.flags.c_contiguous)
+
+        def assert_c(arr):
+            assert_(not arr.flags.fortran)
+            assert_(not arr.flags.f_contiguous)
+            assert_(arr.flags.c_contiguous)
+
+        a = np.empty((2, 2), order='F')
+        # Test copying a Fortran array
+        assert_c(a.copy())
+        assert_c(a.copy('C'))
+        assert_fortran(a.copy('F'))
+        assert_fortran(a.copy('A'))
+
+        # Now test starting with a C array.
+        a = np.empty((2, 2), order='C')
+        assert_c(a.copy())
+        assert_c(a.copy('C'))
+        assert_fortran(a.copy('F'))
+        assert_c(a.copy('A'))
+
+    @pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O'])
+    def test__deepcopy__(self, dtype):
+        # Force the entry of NULLs into array
+        a = np.empty(4, dtype=dtype)
+        ctypes.memset(a.ctypes.data, 0, a.nbytes)
+
+        # Ensure no error is raised, see gh-21833
+        b = a.__deepcopy__({})
+
+        a[0] = 42
+        with pytest.raises(AssertionError):
+            assert_array_equal(a, b)
+
+    def test__deepcopy__catches_failure(self):
+        class MyObj:
+            def __deepcopy__(self, *args, **kwargs):
+                raise RuntimeError
+
+        arr = np.array([1, MyObj(), 3], dtype='O')
+        with pytest.raises(RuntimeError):
+            arr.__deepcopy__({})
+
+    def test_sort_order(self):
+        # Test sorting an array with fields
+        x1 = np.array([21, 32, 14])
+        x2 = np.array(['my', 'first', 'name'])
+        x3 = np.array([3.1, 4.5, 6.2])
+        r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
+
+        r.sort(order=['id'])
+        assert_equal(r.id, np.array([14, 21, 32]))
+        assert_equal(r.word, np.array(['name', 'my', 'first']))
+        assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
+
+        r.sort(order=['word'])
+        assert_equal(r.id, np.array([32, 21, 14]))
+        assert_equal(r.word, np.array(['first', 'my', 'name']))
+        assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
+
+        r.sort(order=['number'])
+        assert_equal(r.id, np.array([21, 32, 14]))
+        assert_equal(r.word, np.array(['my', 'first', 'name']))
+        assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
+
+        assert_raises_regex(ValueError, 'duplicate',
+            lambda: r.sort(order=['id', 'id']))
+
+        if sys.byteorder == 'little':
+            strtype = '>i2'
+        else:
+            strtype = '':
+            for dt in np.typecodes['Complex']:
+                arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
+                msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
+                assert_equal(arr.argsort(),
+                             np.arange(len(arr), dtype=np.intp), msg)
+
+        # test string argsorts.
+        s = 'aaaaaaaa'
+        a = np.array([s + chr(i) for i in range(101)])
+        b = a[::-1].copy()
+        r = np.arange(101)
+        rr = r[::-1]
+        for kind in self.sort_kinds:
+            msg = "string argsort, kind=%s" % kind
+            assert_equal(a.copy().argsort(kind=kind), r, msg)
+            assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+        # test unicode argsorts.
+        s = 'aaaaaaaa'
+        a = np.array([s + chr(i) for i in range(101)], dtype=np.str_)
+        b = a[::-1]
+        r = np.arange(101)
+        rr = r[::-1]
+        for kind in self.sort_kinds:
+            msg = "unicode argsort, kind=%s" % kind
+            assert_equal(a.copy().argsort(kind=kind), r, msg)
+            assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+        # test object array argsorts.
+        a = np.empty((101,), dtype=object)
+        a[:] = list(range(101))
+        b = a[::-1]
+        r = np.arange(101)
+        rr = r[::-1]
+        for kind in self.sort_kinds:
+            msg = "object argsort, kind=%s" % kind
+            assert_equal(a.copy().argsort(kind=kind), r, msg)
+            assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+        # test structured array argsorts.
+        dt = np.dtype([('f', float), ('i', int)])
+        a = np.array([(i, i) for i in range(101)], dtype=dt)
+        b = a[::-1]
+        r = np.arange(101)
+        rr = r[::-1]
+        for kind in self.sort_kinds:
+            msg = "structured array argsort, kind=%s" % kind
+            assert_equal(a.copy().argsort(kind=kind), r, msg)
+            assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+        # test datetime64 argsorts.
+        a = np.arange(0, 101, dtype='datetime64[D]')
+        b = a[::-1]
+        r = np.arange(101)
+        rr = r[::-1]
+        for kind in ['q', 'h', 'm']:
+            msg = "datetime64 argsort, kind=%s" % kind
+            assert_equal(a.copy().argsort(kind=kind), r, msg)
+            assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+        # test timedelta64 argsorts.
+        a = np.arange(0, 101, dtype='timedelta64[D]')
+        b = a[::-1]
+        r = np.arange(101)
+        rr = r[::-1]
+        for kind in ['q', 'h', 'm']:
+            msg = "timedelta64 argsort, kind=%s" % kind
+            assert_equal(a.copy().argsort(kind=kind), r, msg)
+            assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+        # check axis handling. This should be the same for all type
+        # specific argsorts, so we only check it for one type and one kind
+        a = np.array([[3, 2], [1, 0]])
+        b = np.array([[1, 1], [0, 0]])
+        c = np.array([[1, 0], [1, 0]])
+        assert_equal(a.copy().argsort(axis=0), b)
+        assert_equal(a.copy().argsort(axis=1), c)
+        assert_equal(a.copy().argsort(), c)
+
+        # check axis handling for multidimensional empty arrays
+        a = np.array([])
+        a.shape = (3, 2, 1, 0)
+        for axis in range(-a.ndim, a.ndim):
+            msg = 'test empty array argsort with axis={0}'.format(axis)
+            assert_equal(np.argsort(a, axis=axis),
+                         np.zeros_like(a, dtype=np.intp), msg)
+        msg = 'test empty array argsort with axis=None'
+        assert_equal(np.argsort(a, axis=None),
+                     np.zeros_like(a.ravel(), dtype=np.intp), msg)
+
+        # check that stable argsorts are stable
+        r = np.arange(100)
+        # scalars
+        a = np.zeros(100)
+        assert_equal(a.argsort(kind='m'), r)
+        # complex
+        a = np.zeros(100, dtype=complex)
+        assert_equal(a.argsort(kind='m'), r)
+        # string
+        a = np.array(['aaaaaaaaa' for i in range(100)])
+        assert_equal(a.argsort(kind='m'), r)
+        # unicode
+        a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.str_)
+        assert_equal(a.argsort(kind='m'), r)
+
+    def test_sort_unicode_kind(self):
+        d = np.arange(10)
+        k = b'\xc3\xa4'.decode("UTF8")
+        assert_raises(ValueError, d.sort, kind=k)
+        assert_raises(ValueError, d.argsort, kind=k)
+
+    @pytest.mark.parametrize('a', [
+        np.array([0, 1, np.nan], dtype=np.float16),
+        np.array([0, 1, np.nan], dtype=np.float32),
+        np.array([0, 1, np.nan]),
+    ])
+    def test_searchsorted_floats(self, a):
+        # test for floats arrays containing nans. Explicitly test
+        # half, single, and double precision floats to verify that
+        # the NaN-handling is correct.
+        msg = "Test real (%s) searchsorted with nans, side='l'" % a.dtype
+        b = a.searchsorted(a, side='left')
+        assert_equal(b, np.arange(3), msg)
+        msg = "Test real (%s) searchsorted with nans, side='r'" % a.dtype
+        b = a.searchsorted(a, side='right')
+        assert_equal(b, np.arange(1, 4), msg)
+        # check keyword arguments
+        a.searchsorted(v=1)
+        x = np.array([0, 1, np.nan], dtype='float32')
+        y = np.searchsorted(x, x[-1])
+        assert_equal(y, 2)
+
+    def test_searchsorted_complex(self):
+        # test for complex arrays containing nans.
+        # The search sorted routines use the compare functions for the
+        # array type, so this checks if that is consistent with the sort
+        # order.
+        # check double complex
+        a = np.zeros(9, dtype=np.complex128)
+        a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
+        a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
+        msg = "Test complex searchsorted with nans, side='l'"
+        b = a.searchsorted(a, side='left')
+        assert_equal(b, np.arange(9), msg)
+        msg = "Test complex searchsorted with nans, side='r'"
+        b = a.searchsorted(a, side='right')
+        assert_equal(b, np.arange(1, 10), msg)
+        msg = "Test searchsorted with little endian, side='l'"
+        a = np.array([0, 128], dtype=' p[:, i]).all(),
+                       msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
+                    aae(p, d1[np.arange(d1.shape[0])[:, None],
+                        np.argpartition(d1, i, axis=1, kind=k)])
+
+                    p = np.partition(d0, i, axis=0, kind=k)
+                    aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
+                    # array_less does not seem to work right
+                    at((p[:i, :] <= p[i, :]).all(),
+                       msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
+                    at((p[i + 1:, :] > p[i, :]).all(),
+                       msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
+                    aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
+                        np.arange(d0.shape[1])[None, :]])
+
+                    # check inplace
+                    dc = d.copy()
+                    dc.partition(i, kind=k)
+                    assert_equal(dc, np.partition(d, i, kind=k))
+                    dc = d0.copy()
+                    dc.partition(i, axis=0, kind=k)
+                    assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
+                    dc = d1.copy()
+                    dc.partition(i, axis=1, kind=k)
+                    assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
+
+    def assert_partitioned(self, d, kth):
+        prev = 0
+        for k in np.sort(kth):
+            assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
+            assert_((d[k:] >= d[k]).all(),
+                    msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
+            prev = k + 1
+
+    def test_partition_iterative(self):
+            d = np.arange(17)
+            kth = (0, 1, 2, 429, 231)
+            assert_raises(ValueError, d.partition, kth)
+            assert_raises(ValueError, d.argpartition, kth)
+            d = np.arange(10).reshape((2, 5))
+            assert_raises(ValueError, d.partition, kth, axis=0)
+            assert_raises(ValueError, d.partition, kth, axis=1)
+            assert_raises(ValueError, np.partition, d, kth, axis=1)
+            assert_raises(ValueError, np.partition, d, kth, axis=None)
+
+            d = np.array([3, 4, 2, 1])
+            p = np.partition(d, (0, 3))
+            self.assert_partitioned(p, (0, 3))
+            self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
+
+            assert_array_equal(p, np.partition(d, (-3, -1)))
+            assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
+
+            d = np.arange(17)
+            np.random.shuffle(d)
+            d.partition(range(d.size))
+            assert_array_equal(np.arange(17), d)
+            np.random.shuffle(d)
+            assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
+
+            # test unsorted kth
+            d = np.arange(17)
+            np.random.shuffle(d)
+            keys = np.array([1, 3, 8, -2])
+            np.random.shuffle(d)
+            p = np.partition(d, keys)
+            self.assert_partitioned(p, keys)
+            p = d[np.argpartition(d, keys)]
+            self.assert_partitioned(p, keys)
+            np.random.shuffle(keys)
+            assert_array_equal(np.partition(d, keys), p)
+            assert_array_equal(d[np.argpartition(d, keys)], p)
+
+            # equal kth
+            d = np.arange(20)[::-1]
+            self.assert_partitioned(np.partition(d, [5]*4), [5])
+            self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
+                                    [5]*4 + [6, 13])
+            self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
+            self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
+                                    [5]*4 + [6, 13])
+
+            d = np.arange(12)
+            np.random.shuffle(d)
+            d1 = np.tile(np.arange(12), (4, 1))
+            map(np.random.shuffle, d1)
+            d0 = np.transpose(d1)
+
+            kth = (1, 6, 7, -1)
+            p = np.partition(d1, kth, axis=1)
+            pa = d1[np.arange(d1.shape[0])[:, None],
+                    d1.argpartition(kth, axis=1)]
+            assert_array_equal(p, pa)
+            for i in range(d1.shape[0]):
+                self.assert_partitioned(p[i,:], kth)
+            p = np.partition(d0, kth, axis=0)
+            pa = d0[np.argpartition(d0, kth, axis=0),
+                    np.arange(d0.shape[1])[None,:]]
+            assert_array_equal(p, pa)
+            for i in range(d0.shape[1]):
+                self.assert_partitioned(p[:, i], kth)
+
+    def test_partition_cdtype(self):
+        d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+                   ('Lancelot', 1.9, 38)],
+                  dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype)
+        ops = {
+            'add':      (np.add, True, float),
+            'sub':      (np.subtract, True, float),
+            'mul':      (np.multiply, True, float),
+            'truediv':  (np.true_divide, True, float),
+            'floordiv': (np.floor_divide, True, float),
+            'mod':      (np.remainder, True, float),
+            'divmod':   (np.divmod, False, float),
+            'pow':      (np.power, True, int),
+            'lshift':   (np.left_shift, True, int),
+            'rshift':   (np.right_shift, True, int),
+            'and':      (np.bitwise_and, True, int),
+            'xor':      (np.bitwise_xor, True, int),
+            'or':       (np.bitwise_or, True, int),
+            'matmul':   (np.matmul, True, float),
+            # 'ge':       (np.less_equal, False),
+            # 'gt':       (np.less, False),
+            # 'le':       (np.greater_equal, False),
+            # 'lt':       (np.greater, False),
+            # 'eq':       (np.equal, False),
+            # 'ne':       (np.not_equal, False),
+        }
+
+        class Coerced(Exception):
+            pass
+
+        def array_impl(self):
+            raise Coerced
+
+        def op_impl(self, other):
+            return "forward"
+
+        def rop_impl(self, other):
+            return "reverse"
+
+        def iop_impl(self, other):
+            return "in-place"
+
+        def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
+            return ("__array_ufunc__", ufunc, method, args, kwargs)
+
+        # Create an object with the given base, in the given module, with a
+        # bunch of placeholder __op__ methods, and optionally a
+        # __array_ufunc__ and __array_priority__.
+        def make_obj(base, array_priority=False, array_ufunc=False,
+                     alleged_module="__main__"):
+            class_namespace = {"__array__": array_impl}
+            if array_priority is not False:
+                class_namespace["__array_priority__"] = array_priority
+            for op in ops:
+                class_namespace["__{0}__".format(op)] = op_impl
+                class_namespace["__r{0}__".format(op)] = rop_impl
+                class_namespace["__i{0}__".format(op)] = iop_impl
+            if array_ufunc is not False:
+                class_namespace["__array_ufunc__"] = array_ufunc
+            eval_namespace = {"base": base,
+                              "class_namespace": class_namespace,
+                              "__name__": alleged_module,
+                              }
+            MyType = eval("type('MyType', (base,), class_namespace)",
+                          eval_namespace)
+            if issubclass(MyType, np.ndarray):
+                # Use this range to avoid special case weirdnesses around
+                # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
+                return np.arange(3, 7).reshape(2, 2).view(MyType)
+            else:
+                return MyType()
+
+        def check(obj, binop_override_expected, ufunc_override_expected,
+                  inplace_override_expected, check_scalar=True):
+            for op, (ufunc, has_inplace, dtype) in ops.items():
+                err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
+                           % (op, ufunc, has_inplace, dtype))
+                check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
+                if check_scalar:
+                    check_objs.append(check_objs[0][0])
+                for arr in check_objs:
+                    arr_method = getattr(arr, "__{0}__".format(op))
+
+                    def first_out_arg(result):
+                        if op == "divmod":
+                            assert_(isinstance(result, tuple))
+                            return result[0]
+                        else:
+                            return result
+
+                    # arr __op__ obj
+                    if binop_override_expected:
+                        assert_equal(arr_method(obj), NotImplemented, err_msg)
+                    elif ufunc_override_expected:
+                        assert_equal(arr_method(obj)[0], "__array_ufunc__",
+                                     err_msg)
+                    else:
+                        if (isinstance(obj, np.ndarray) and
+                            (type(obj).__array_ufunc__ is
+                             np.ndarray.__array_ufunc__)):
+                            # __array__ gets ignored
+                            res = first_out_arg(arr_method(obj))
+                            assert_(res.__class__ is obj.__class__, err_msg)
+                        else:
+                            assert_raises((TypeError, Coerced),
+                                          arr_method, obj, err_msg=err_msg)
+                    # obj __op__ arr
+                    arr_rmethod = getattr(arr, "__r{0}__".format(op))
+                    if ufunc_override_expected:
+                        res = arr_rmethod(obj)
+                        assert_equal(res[0], "__array_ufunc__",
+                                     err_msg=err_msg)
+                        assert_equal(res[1], ufunc, err_msg=err_msg)
+                    else:
+                        if (isinstance(obj, np.ndarray) and
+                                (type(obj).__array_ufunc__ is
+                                 np.ndarray.__array_ufunc__)):
+                            # __array__ gets ignored
+                            res = first_out_arg(arr_rmethod(obj))
+                            assert_(res.__class__ is obj.__class__, err_msg)
+                        else:
+                            # __array_ufunc__ = "asdf" creates a TypeError
+                            assert_raises((TypeError, Coerced),
+                                          arr_rmethod, obj, err_msg=err_msg)
+
+                    # arr __iop__ obj
+                    # array scalars don't have in-place operators
+                    if has_inplace and isinstance(arr, np.ndarray):
+                        arr_imethod = getattr(arr, "__i{0}__".format(op))
+                        if inplace_override_expected:
+                            assert_equal(arr_method(obj), NotImplemented,
+                                         err_msg=err_msg)
+                        elif ufunc_override_expected:
+                            res = arr_imethod(obj)
+                            assert_equal(res[0], "__array_ufunc__", err_msg)
+                            assert_equal(res[1], ufunc, err_msg)
+                            assert_(type(res[-1]["out"]) is tuple, err_msg)
+                            assert_(res[-1]["out"][0] is arr, err_msg)
+                        else:
+                            if (isinstance(obj, np.ndarray) and
+                                    (type(obj).__array_ufunc__ is
+                                    np.ndarray.__array_ufunc__)):
+                                # __array__ gets ignored
+                                assert_(arr_imethod(obj) is arr, err_msg)
+                            else:
+                                assert_raises((TypeError, Coerced),
+                                              arr_imethod, obj,
+                                              err_msg=err_msg)
+
+                    op_fn = getattr(operator, op, None)
+                    if op_fn is None:
+                        op_fn = getattr(operator, op + "_", None)
+                    if op_fn is None:
+                        op_fn = getattr(builtins, op)
+                    assert_equal(op_fn(obj, arr), "forward", err_msg)
+                    if not isinstance(obj, np.ndarray):
+                        if binop_override_expected:
+                            assert_equal(op_fn(arr, obj), "reverse", err_msg)
+                        elif ufunc_override_expected:
+                            assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
+                                         err_msg)
+                    if ufunc_override_expected:
+                        assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
+                                     err_msg)
+
+        # No array priority, no array_ufunc -> nothing called
+        check(make_obj(object), False, False, False)
+        # Negative array priority, no array_ufunc -> nothing called
+        # (has to be very negative, because scalar priority is -1000000.0)
+        check(make_obj(object, array_priority=-2**30), False, False, False)
+        # Positive array priority, no array_ufunc -> binops and iops only
+        check(make_obj(object, array_priority=1), True, False, True)
+        # ndarray ignores array_priority for ndarray subclasses
+        check(make_obj(np.ndarray, array_priority=1), False, False, False,
+              check_scalar=False)
+        # Positive array_priority and array_ufunc -> array_ufunc only
+        check(make_obj(object, array_priority=1,
+                       array_ufunc=array_ufunc_impl), False, True, False)
+        check(make_obj(np.ndarray, array_priority=1,
+                       array_ufunc=array_ufunc_impl), False, True, False)
+        # array_ufunc set to None -> defer binops only
+        check(make_obj(object, array_ufunc=None), True, False, False)
+        check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
+              check_scalar=False)
+
+    @pytest.mark.parametrize("priority", [None, "runtime error"])
+    def test_ufunc_binop_bad_array_priority(self, priority):
+        # Mainly checks that this does not crash.  The second array has a lower
+        # priority than -1 ("error value").  If the __radd__ actually exists,
+        # bad things can happen (I think via the scalar paths).
+        # In principle both of these can probably just be errors in the future.
+        class BadPriority:
+            @property
+            def __array_priority__(self):
+                if priority == "runtime error":
+                    raise RuntimeError("RuntimeError in __array_priority__!")
+                return priority
+
+            def __radd__(self, other):
+                return "result"
+
+        class LowPriority(np.ndarray):
+            __array_priority__ = -1000
+
+        # Priority failure uses the same as scalars (smaller -1000).  So the
+        # LowPriority wins with 'result' for each element (inner operation).
+        res = np.arange(3).view(LowPriority) + BadPriority()
+        assert res.shape == (3,)
+        assert res[0] == 'result'
+
+
+    def test_ufunc_override_normalize_signature(self):
+        # gh-5674
+        class SomeClass:
+            def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+                return kw
+
+        a = SomeClass()
+        kw = np.add(a, [1])
+        assert_('sig' not in kw and 'signature' not in kw)
+        kw = np.add(a, [1], sig='ii->i')
+        assert_('sig' not in kw and 'signature' in kw)
+        assert_equal(kw['signature'], 'ii->i')
+        kw = np.add(a, [1], signature='ii->i')
+        assert_('sig' not in kw and 'signature' in kw)
+        assert_equal(kw['signature'], 'ii->i')
+
+    def test_array_ufunc_index(self):
+        # Check that index is set appropriately, also if only an output
+        # is passed on (latter is another regression tests for github bug 4753)
+        # This also checks implicitly that 'out' is always a tuple.
+        class CheckIndex:
+            def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+                for i, a in enumerate(inputs):
+                    if a is self:
+                        return i
+                # calls below mean we must be in an output.
+                for j, a in enumerate(kw['out']):
+                    if a is self:
+                        return (j,)
+
+        a = CheckIndex()
+        dummy = np.arange(2.)
+        # 1 input, 1 output
+        assert_equal(np.sin(a), 0)
+        assert_equal(np.sin(dummy, a), (0,))
+        assert_equal(np.sin(dummy, out=a), (0,))
+        assert_equal(np.sin(dummy, out=(a,)), (0,))
+        assert_equal(np.sin(a, a), 0)
+        assert_equal(np.sin(a, out=a), 0)
+        assert_equal(np.sin(a, out=(a,)), 0)
+        # 1 input, 2 outputs
+        assert_equal(np.modf(dummy, a), (0,))
+        assert_equal(np.modf(dummy, None, a), (1,))
+        assert_equal(np.modf(dummy, dummy, a), (1,))
+        assert_equal(np.modf(dummy, out=(a, None)), (0,))
+        assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
+        assert_equal(np.modf(dummy, out=(None, a)), (1,))
+        assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
+        assert_equal(np.modf(a, out=(dummy, a)), 0)
+        with assert_raises(TypeError):
+            # Out argument must be tuple, since there are multiple outputs
+            np.modf(dummy, out=a)
+
+        assert_raises(ValueError, np.modf, dummy, out=(a,))
+
+        # 2 inputs, 1 output
+        assert_equal(np.add(a, dummy), 0)
+        assert_equal(np.add(dummy, a), 1)
+        assert_equal(np.add(dummy, dummy, a), (0,))
+        assert_equal(np.add(dummy, a, a), 1)
+        assert_equal(np.add(dummy, dummy, out=a), (0,))
+        assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
+        assert_equal(np.add(a, dummy, out=a), 0)
+
+    def test_out_override(self):
+        # regression test for github bug 4753
+        class OutClass(np.ndarray):
+            def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+                if 'out' in kw:
+                    tmp_kw = kw.copy()
+                    tmp_kw.pop('out')
+                    func = getattr(ufunc, method)
+                    kw['out'][0][...] = func(*inputs, **tmp_kw)
+
+        A = np.array([0]).view(OutClass)
+        B = np.array([5])
+        C = np.array([6])
+        np.multiply(C, B, A)
+        assert_equal(A[0], 30)
+        assert_(isinstance(A, OutClass))
+        A[0] = 0
+        np.multiply(C, B, out=A)
+        assert_equal(A[0], 30)
+        assert_(isinstance(A, OutClass))
+
+    def test_pow_override_with_errors(self):
+        # regression test for gh-9112
+        class PowerOnly(np.ndarray):
+            def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+                if ufunc is not np.power:
+                    raise NotImplementedError
+                return "POWER!"
+        # explicit cast to float, to ensure the fast power path is taken.
+        a = np.array(5., dtype=np.float64).view(PowerOnly)
+        assert_equal(a ** 2.5, "POWER!")
+        with assert_raises(NotImplementedError):
+            a ** 0.5
+        with assert_raises(NotImplementedError):
+            a ** 0
+        with assert_raises(NotImplementedError):
+            a ** 1
+        with assert_raises(NotImplementedError):
+            a ** -1
+        with assert_raises(NotImplementedError):
+            a ** 2
+
+    def test_pow_array_object_dtype(self):
+        # test pow on arrays of object dtype
+        class SomeClass:
+            def __init__(self, num=None):
+                self.num = num
+
+            # want to ensure a fast pow path is not taken
+            def __mul__(self, other):
+                raise AssertionError('__mul__ should not be called')
+
+            def __div__(self, other):
+                raise AssertionError('__div__ should not be called')
+
+            def __pow__(self, exp):
+                return SomeClass(num=self.num ** exp)
+
+            def __eq__(self, other):
+                if isinstance(other, SomeClass):
+                    return self.num == other.num
+
+            __rpow__ = __pow__
+
+        def pow_for(exp, arr):
+            return np.array([x ** exp for x in arr])
+
+        obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
+
+        assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
+        assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
+        assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
+        assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
+        assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+
+    def test_pos_array_ufunc_override(self):
+        class A(np.ndarray):
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                return getattr(ufunc, method)(*[i.view(np.ndarray) for
+                                                i in inputs], **kwargs)
+        tst = np.array('foo').view(A)
+        with assert_raises(TypeError):
+            +tst
+
+
+class TestTemporaryElide:
+    # elision is only triggered on relatively large arrays
+
+    def test_extension_incref_elide(self):
+        # test extension (e.g. cython) calling PyNumber_* slots without
+        # increasing the reference counts
+        #
+        # def incref_elide(a):
+        #    d = input.copy() # refcount 1
+        #    return d, d + d # PyNumber_Add without increasing refcount
+        from numpy.core._multiarray_tests import incref_elide
+        d = np.ones(100000)
+        orig, res = incref_elide(d)
+        d + d
+        # the return original should not be changed to an inplace operation
+        assert_array_equal(orig, d)
+        assert_array_equal(res, d + d)
+
+    def test_extension_incref_elide_stack(self):
+        # scanning if the refcount == 1 object is on the python stack to check
+        # that we are called directly from python is flawed as object may still
+        # be above the stack pointer and we have no access to the top of it
+        #
+        # def incref_elide_l(d):
+        #    return l[4] + l[4] # PyNumber_Add without increasing refcount
+        from numpy.core._multiarray_tests import incref_elide_l
+        # padding with 1 makes sure the object on the stack is not overwritten
+        l = [1, 1, 1, 1, np.ones(100000)]
+        res = incref_elide_l(l)
+        # the return original should not be changed to an inplace operation
+        assert_array_equal(l[4], np.ones(100000))
+        assert_array_equal(res, l[4] + l[4])
+
+    def test_temporary_with_cast(self):
+        # check that we don't elide into a temporary which would need casting
+        d = np.ones(200000, dtype=np.int64)
+        assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
+
+        r = ((d + d) / 2)
+        assert_equal(r.dtype, np.dtype('f8'))
+
+        r = np.true_divide((d + d), 2)
+        assert_equal(r.dtype, np.dtype('f8'))
+
+        r = ((d + d) / 2.)
+        assert_equal(r.dtype, np.dtype('f8'))
+
+        r = ((d + d) // 2)
+        assert_equal(r.dtype, np.dtype(np.int64))
+
+        # commutative elision into the astype result
+        f = np.ones(100000, dtype=np.float32)
+        assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
+
+        # no elision into lower type
+        d = f.astype(np.float64)
+        assert_equal(((f + f) + d).dtype, d.dtype)
+        l = np.ones(100000, dtype=np.longdouble)
+        assert_equal(((d + d) + l).dtype, l.dtype)
+
+        # test unary abs with different output dtype
+        for dt in (np.complex64, np.complex128, np.clongdouble):
+            c = np.ones(100000, dtype=dt)
+            r = abs(c * 2.0)
+            assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
+
+    def test_elide_broadcast(self):
+        # test no elision on broadcast to higher dimension
+        # only triggers elision code path in debug mode as triggering it in
+        # normal mode needs 256kb large matching dimension, so a lot of memory
+        d = np.ones((2000, 1), dtype=int)
+        b = np.ones((2000), dtype=bool)
+        r = (1 - d) + b
+        assert_equal(r, 1)
+        assert_equal(r.shape, (2000, 2000))
+
+    def test_elide_scalar(self):
+        # check inplace op does not create ndarray from scalars
+        a = np.bool_()
+        assert_(type(~(a & a)) is np.bool_)
+
+    def test_elide_scalar_readonly(self):
+        # The imaginary part of a real array is readonly. This needs to go
+        # through fast_scalar_power which is only called for powers of
+        # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
+        # elision which can be gotten for the imaginary part of a real
+        # array. Should not error.
+        a = np.empty(100000, dtype=np.float64)
+        a.imag ** 2
+
+    def test_elide_readonly(self):
+        # don't try to elide readonly temporaries
+        r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
+        assert_equal(r, 0)
+
+    def test_elide_updateifcopy(self):
+        a = np.ones(2**20)[::2]
+        b = a.flat.__array__() + 1
+        del b
+        assert_equal(a, 1)
+
+
+class TestCAPI:
+    def test_IsPythonScalar(self):
+        from numpy.core._multiarray_tests import IsPythonScalar
+        assert_(IsPythonScalar(b'foobar'))
+        assert_(IsPythonScalar(1))
+        assert_(IsPythonScalar(2**80))
+        assert_(IsPythonScalar(2.))
+        assert_(IsPythonScalar("a"))
+
+    @pytest.mark.parametrize("converter",
+             [_multiarray_tests.run_scalar_intp_converter,
+              _multiarray_tests.run_scalar_intp_from_sequence])
+    def test_intp_sequence_converters(self, converter):
+        # Test simple values (-1 is special for error return paths)
+        assert converter(10) == (10,)
+        assert converter(-1) == (-1,)
+        # A 0-D array looks a bit like a sequence but must take the integer
+        # path:
+        assert converter(np.array(123)) == (123,)
+        # Test simple sequences (intp_from_sequence only supports length 1):
+        assert converter((10,)) == (10,)
+        assert converter(np.array([11])) == (11,)
+
+    @pytest.mark.parametrize("converter",
+             [_multiarray_tests.run_scalar_intp_converter,
+              _multiarray_tests.run_scalar_intp_from_sequence])
+    @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+            reason="PyPy bug in error formatting")
+    def test_intp_sequence_converters_errors(self, converter):
+        with pytest.raises(TypeError,
+                match="expected a sequence of integers or a single integer, "):
+            converter(object())
+        with pytest.raises(TypeError,
+                match="expected a sequence of integers or a single integer, "
+                      "got '32.0'"):
+            converter(32.)
+        with pytest.raises(TypeError,
+                match="'float' object cannot be interpreted as an integer"):
+            converter([32.])
+        with pytest.raises(ValueError,
+                match="Maximum allowed dimension"):
+            # These converters currently convert overflows to a ValueError
+            converter(2**64)
+
+
+class TestSubscripting:
+    def test_test_zero_rank(self):
+        x = np.array([1, 2, 3])
+        assert_(isinstance(x[0], np.int_))
+        assert_(type(x[0, ...]) is np.ndarray)
+
+
+class TestPickling:
+    @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
+                        reason=('this tests the error messages when trying to'
+                                'protocol 5 although it is not available'))
+    def test_correct_protocol5_error_message(self):
+        array = np.arange(10)
+
+    def test_record_array_with_object_dtype(self):
+        my_object = object()
+
+        arr_with_object = np.array(
+                [(my_object, 1, 2.0)],
+                dtype=[('a', object), ('b', int), ('c', float)])
+        arr_without_object = np.array(
+                [('xxx', 1, 2.0)],
+                dtype=[('a', str), ('b', int), ('c', float)])
+
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            depickled_arr_with_object = pickle.loads(
+                    pickle.dumps(arr_with_object, protocol=proto))
+            depickled_arr_without_object = pickle.loads(
+                    pickle.dumps(arr_without_object, protocol=proto))
+
+            assert_equal(arr_with_object.dtype,
+                         depickled_arr_with_object.dtype)
+            assert_equal(arr_without_object.dtype,
+                         depickled_arr_without_object.dtype)
+
+    @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+                        reason="requires pickle protocol 5")
+    def test_f_contiguous_array(self):
+        f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+        buffers = []
+
+        # When using pickle protocol 5, Fortran-contiguous arrays can be
+        # serialized using out-of-band buffers
+        bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
+                                    buffer_callback=buffers.append)
+
+        assert len(buffers) > 0
+
+        depickled_f_contiguous_array = pickle.loads(bytes_string,
+                                                    buffers=buffers)
+
+        assert_equal(f_contiguous_array, depickled_f_contiguous_array)
+
+    def test_non_contiguous_array(self):
+        non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
+        assert not non_contiguous_array.flags.c_contiguous
+        assert not non_contiguous_array.flags.f_contiguous
+
+        # make sure non-contiguous arrays can be pickled-depickled
+        # using any protocol
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            depickled_non_contiguous_array = pickle.loads(
+                    pickle.dumps(non_contiguous_array, protocol=proto))
+
+            assert_equal(non_contiguous_array, depickled_non_contiguous_array)
+
+    def test_roundtrip(self):
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            carray = np.array([[2, 9], [7, 0], [3, 8]])
+            DATA = [
+                carray,
+                np.transpose(carray),
+                np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
+                                                   ('c', float)])
+            ]
+
+            refs = [weakref.ref(a) for a in DATA]
+            for a in DATA:
+                assert_equal(
+                        a, pickle.loads(pickle.dumps(a, protocol=proto)),
+                        err_msg="%r" % a)
+            del a, DATA, carray
+            break_cycles()
+            # check for reference leaks (gh-12793)
+            for ref in refs:
+                assert ref() is None
+
+    def _loads(self, obj):
+        return pickle.loads(obj, encoding='latin1')
+
+    # version 0 pickles, using protocol=2 to pickle
+    # version 0 doesn't have a version field
+    def test_version0_int8(self):
+        s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
+        a = np.array([1, 2, 3, 4], dtype=np.int8)
+        p = self._loads(s)
+        assert_equal(a, p)
+
+    def test_version0_float32(self):
+        s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
+        assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
+        assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
+
+    def test_mixed(self):
+        g1 = np.array(["spam", "spa", "spammer", "and eggs"])
+        g2 = "spam"
+        assert_array_equal(g1 == g2, [x == g2 for x in g1])
+        assert_array_equal(g1 != g2, [x != g2 for x in g1])
+        assert_array_equal(g1 < g2, [x < g2 for x in g1])
+        assert_array_equal(g1 > g2, [x > g2 for x in g1])
+        assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
+        assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
+
+    def test_unicode(self):
+        g1 = np.array(["This", "is", "example"])
+        g2 = np.array(["This", "was", "example"])
+        assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
+        assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
+        assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
+        assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
+        assert_array_equal(g1 < g2,  [g1[i] < g2[i] for i in [0, 1, 2]])
+        assert_array_equal(g1 > g2,  [g1[i] > g2[i] for i in [0, 1, 2]])
+
+class TestArgmaxArgminCommon:
+
+    sizes = [(), (3,), (3, 2), (2, 3),
+             (3, 3), (2, 3, 4), (4, 3, 2),
+             (1, 2, 3, 4), (2, 3, 4, 1),
+             (3, 4, 1, 2), (4, 1, 2, 3),
+             (64,), (128,), (256,)]
+
+    @pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
+        for axis in list(range(-len(size), len(size))) + [None]]
+        for size in sizes]))
+    @pytest.mark.parametrize('method', [np.argmax, np.argmin])
+    def test_np_argmin_argmax_keepdims(self, size, axis, method):
+
+        arr = np.random.normal(size=size)
+
+        # contiguous arrays
+        if axis is None:
+            new_shape = [1 for _ in range(len(size))]
+        else:
+            new_shape = list(size)
+            new_shape[axis] = 1
+        new_shape = tuple(new_shape)
+
+        _res_orig = method(arr, axis=axis)
+        res_orig = _res_orig.reshape(new_shape)
+        res = method(arr, axis=axis, keepdims=True)
+        assert_equal(res, res_orig)
+        assert_(res.shape == new_shape)
+        outarray = np.empty(res.shape, dtype=res.dtype)
+        res1 = method(arr, axis=axis, out=outarray,
+                            keepdims=True)
+        assert_(res1 is outarray)
+        assert_equal(res, outarray)
+
+        if len(size) > 0:
+            wrong_shape = list(new_shape)
+            if axis is not None:
+                wrong_shape[axis] = 2
+            else:
+                wrong_shape[0] = 2
+            wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
+            with pytest.raises(ValueError):
+                method(arr.T, axis=axis,
+                        out=wrong_outarray, keepdims=True)
+
+        # non-contiguous arrays
+        if axis is None:
+            new_shape = [1 for _ in range(len(size))]
+        else:
+            new_shape = list(size)[::-1]
+            new_shape[axis] = 1
+        new_shape = tuple(new_shape)
+
+        _res_orig = method(arr.T, axis=axis)
+        res_orig = _res_orig.reshape(new_shape)
+        res = method(arr.T, axis=axis, keepdims=True)
+        assert_equal(res, res_orig)
+        assert_(res.shape == new_shape)
+        outarray = np.empty(new_shape[::-1], dtype=res.dtype)
+        outarray = outarray.T
+        res1 = method(arr.T, axis=axis, out=outarray,
+                            keepdims=True)
+        assert_(res1 is outarray)
+        assert_equal(res, outarray)
+
+        if len(size) > 0:
+            # one dimension lesser for non-zero sized
+            # array should raise an error
+            with pytest.raises(ValueError):
+                method(arr[0], axis=axis,
+                        out=outarray, keepdims=True)
+
+        if len(size) > 0:
+            wrong_shape = list(new_shape)
+            if axis is not None:
+                wrong_shape[axis] = 2
+            else:
+                wrong_shape[0] = 2
+            wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
+            with pytest.raises(ValueError):
+                method(arr.T, axis=axis,
+                        out=wrong_outarray, keepdims=True)
+
+    @pytest.mark.parametrize('method', ['max', 'min'])
+    def test_all(self, method):
+        a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
+        arg_method = getattr(a, 'arg' + method)
+        val_method = getattr(a, method)
+        for i in range(a.ndim):
+            a_maxmin = val_method(i)
+            aarg_maxmin = arg_method(i)
+            axes = list(range(a.ndim))
+            axes.remove(i)
+            assert_(np.all(a_maxmin == aarg_maxmin.choose(
+                                        *a.transpose(i, *axes))))
+
+    @pytest.mark.parametrize('method', ['argmax', 'argmin'])
+    def test_output_shape(self, method):
+        # see also gh-616
+        a = np.ones((10, 5))
+        arg_method = getattr(a, method)
+        # Check some simple shape mismatches
+        out = np.ones(11, dtype=np.int_)
+        assert_raises(ValueError, arg_method, -1, out)
+
+        out = np.ones((2, 5), dtype=np.int_)
+        assert_raises(ValueError, arg_method, -1, out)
+
+        # these could be relaxed possibly (used to allow even the previous)
+        out = np.ones((1, 10), dtype=np.int_)
+        assert_raises(ValueError, arg_method, -1, out)
+
+        out = np.ones(10, dtype=np.int_)
+        arg_method(-1, out=out)
+        assert_equal(out, arg_method(-1))
+
+    @pytest.mark.parametrize('ndim', [0, 1])
+    @pytest.mark.parametrize('method', ['argmax', 'argmin'])
+    def test_ret_is_out(self, ndim, method):
+        a = np.ones((4,) + (256,)*ndim)
+        arg_method = getattr(a, method)
+        out = np.empty((256,)*ndim, dtype=np.intp)
+        ret = arg_method(axis=0, out=out)
+        assert ret is out
+
+    @pytest.mark.parametrize('np_array, method, idx, val',
+        [(np.zeros, 'argmax', 5942, "as"),
+         (np.ones, 'argmin', 6001, "0")])
+    def test_unicode(self, np_array, method, idx, val):
+        d = np_array(6031, dtype='= cmin))
+        assert_(np.all(x <= cmax))
+
+    def _clip_type(self, type_group, array_max,
+                   clip_min, clip_max, inplace=False,
+                   expected_min=None, expected_max=None):
+        if expected_min is None:
+            expected_min = clip_min
+        if expected_max is None:
+            expected_max = clip_max
+
+        for T in np.sctypes[type_group]:
+            if sys.byteorder == 'little':
+                byte_orders = ['=', '>']
+            else:
+                byte_orders = ['<', '=']
+
+            for byteorder in byte_orders:
+                dtype = np.dtype(T).newbyteorder(byteorder)
+
+                x = (np.random.random(1000) * array_max).astype(dtype)
+                if inplace:
+                    # The tests that call us pass clip_min and clip_max that
+                    # might not fit in the destination dtype. They were written
+                    # assuming the previous unsafe casting, which now must be
+                    # passed explicitly to avoid a warning.
+                    x.clip(clip_min, clip_max, x, casting='unsafe')
+                else:
+                    x = x.clip(clip_min, clip_max)
+                    byteorder = '='
+
+                if x.dtype.byteorder == '|':
+                    byteorder = '|'
+                assert_equal(x.dtype.byteorder, byteorder)
+                self._check_range(x, expected_min, expected_max)
+        return x
+
+    def test_basic(self):
+        for inplace in [False, True]:
+            self._clip_type(
+                'float', 1024, -12.8, 100.2, inplace=inplace)
+            self._clip_type(
+                'float', 1024, 0, 0, inplace=inplace)
+
+            self._clip_type(
+                'int', 1024, -120, 100, inplace=inplace)
+            self._clip_type(
+                'int', 1024, 0, 0, inplace=inplace)
+
+            self._clip_type(
+                'uint', 1024, 0, 0, inplace=inplace)
+            self._clip_type(
+                'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
+
+    def test_record_array(self):
+        rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
+                       dtype=[('x', '= 3))
+        x = val.clip(min=3)
+        assert_(np.all(x >= 3))
+        x = val.clip(max=4)
+        assert_(np.all(x <= 4))
+
+    def test_nan(self):
+        input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
+        result = input_arr.clip(-1, 1)
+        expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
+        assert_array_equal(result, expected)
+
+
+class TestCompress:
+    def test_axis(self):
+        tgt = [[5, 6, 7, 8, 9]]
+        arr = np.arange(10).reshape(2, 5)
+        out = np.compress([0, 1], arr, axis=0)
+        assert_equal(out, tgt)
+
+        tgt = [[1, 3], [6, 8]]
+        out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
+        assert_equal(out, tgt)
+
+    def test_truncate(self):
+        tgt = [[1], [6]]
+        arr = np.arange(10).reshape(2, 5)
+        out = np.compress([0, 1], arr, axis=1)
+        assert_equal(out, tgt)
+
+    def test_flatten(self):
+        arr = np.arange(10).reshape(2, 5)
+        out = np.compress([0, 1], arr)
+        assert_equal(out, 1)
+
+
+class TestPutmask:
+    def tst_basic(self, x, T, mask, val):
+        np.putmask(x, mask, val)
+        assert_equal(x[mask], np.array(val, T))
+
+    def test_ip_types(self):
+        unchecked_types = [bytes, str, np.void]
+
+        x = np.random.random(1000)*100
+        mask = x < 40
+
+        for val in [-100, 0, 15]:
+            for types in np.sctypes.values():
+                for T in types:
+                    if T not in unchecked_types:
+                        if val < 0 and np.dtype(T).kind == "u":
+                            val = np.iinfo(T).max - 99
+                        self.tst_basic(x.copy().astype(T), T, mask, val)
+
+            # Also test string of a length which uses an untypical length
+            dt = np.dtype("S3")
+            self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
+
+    def test_mask_size(self):
+        assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
+
+    @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', '= 2, 3)
+
+    def test_kwargs(self):
+        x = np.array([0, 0])
+        np.putmask(x, [0, 1], [-1, -2])
+        assert_array_equal(x, [0, -2])
+
+        x = np.array([0, 0])
+        np.putmask(x, mask=[0, 1], values=[-1, -2])
+        assert_array_equal(x, [0, -2])
+
+        x = np.array([0, 0])
+        np.putmask(x, values=[-1, -2],  mask=[0, 1])
+        assert_array_equal(x, [0, -2])
+
+        with pytest.raises(TypeError):
+            np.putmask(a=x, values=[-1, -2],  mask=[0, 1])
+
+
+class TestTake:
+    def tst_basic(self, x):
+        ind = list(range(x.shape[0]))
+        assert_array_equal(x.take(ind, axis=0), x)
+
+    def test_ip_types(self):
+        unchecked_types = [bytes, str, np.void]
+
+        x = np.random.random(24)*100
+        x.shape = 2, 3, 4
+        for types in np.sctypes.values():
+            for T in types:
+                if T not in unchecked_types:
+                    self.tst_basic(x.copy().astype(T))
+
+            # Also test string of a length which uses an untypical length
+            self.tst_basic(x.astype("S3"))
+
+    def test_raise(self):
+        x = np.random.random(24)*100
+        x.shape = 2, 3, 4
+        assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
+        assert_raises(IndexError, x.take, [-3], axis=0)
+        assert_array_equal(x.take([-1], axis=0)[0], x[1])
+
+    def test_clip(self):
+        x = np.random.random(24)*100
+        x.shape = 2, 3, 4
+        assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
+        assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
+
+    def test_wrap(self):
+        x = np.random.random(24)*100
+        x.shape = 2, 3, 4
+        assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
+        assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
+        assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
+
+    @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', ' 16MB
+        d = np.zeros(4 * 1024 ** 2)
+        d.tofile(tmp_filename)
+        assert_equal(os.path.getsize(tmp_filename), d.nbytes)
+        assert_array_equal(d, np.fromfile(tmp_filename))
+        # check offset
+        with open(tmp_filename, "r+b") as f:
+            f.seek(d.nbytes)
+            d.tofile(f)
+            assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
+        # check append mode (gh-8329)
+        open(tmp_filename, "w").close()  # delete file contents
+        with open(tmp_filename, "ab") as f:
+            d.tofile(f)
+        assert_array_equal(d, np.fromfile(tmp_filename))
+        with open(tmp_filename, "ab") as f:
+            d.tofile(f)
+        assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
+
+    def test_io_open_buffered_fromfile(self, x, tmp_filename):
+        # gh-6632
+        x.tofile(tmp_filename)
+        with io.open(tmp_filename, 'rb', buffering=-1) as f:
+            y = np.fromfile(f, dtype=x.dtype)
+        assert_array_equal(y, x.flat)
+
+    def test_file_position_after_fromfile(self, tmp_filename):
+        # gh-4118
+        sizes = [io.DEFAULT_BUFFER_SIZE//8,
+                 io.DEFAULT_BUFFER_SIZE,
+                 io.DEFAULT_BUFFER_SIZE*8]
+
+        for size in sizes:
+            with open(tmp_filename, 'wb') as f:
+                f.seek(size-1)
+                f.write(b'\0')
+
+            for mode in ['rb', 'r+b']:
+                err_msg = "%d %s" % (size, mode)
+
+                with open(tmp_filename, mode) as f:
+                    f.read(2)
+                    np.fromfile(f, dtype=np.float64, count=1)
+                    pos = f.tell()
+                assert_equal(pos, 10, err_msg=err_msg)
+
+    def test_file_position_after_tofile(self, tmp_filename):
+        # gh-4118
+        sizes = [io.DEFAULT_BUFFER_SIZE//8,
+                 io.DEFAULT_BUFFER_SIZE,
+                 io.DEFAULT_BUFFER_SIZE*8]
+
+        for size in sizes:
+            err_msg = "%d" % (size,)
+
+            with open(tmp_filename, 'wb') as f:
+                f.seek(size-1)
+                f.write(b'\0')
+                f.seek(10)
+                f.write(b'12')
+                np.array([0], dtype=np.float64).tofile(f)
+                pos = f.tell()
+            assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
+
+            with open(tmp_filename, 'r+b') as f:
+                f.read(2)
+                f.seek(0, 1)  # seek between read&write required by ANSI C
+                np.array([0], dtype=np.float64).tofile(f)
+                pos = f.tell()
+            assert_equal(pos, 10, err_msg=err_msg)
+
+    def test_load_object_array_fromfile(self, tmp_filename):
+        # gh-12300
+        with open(tmp_filename, 'w') as f:
+            # Ensure we have a file with consistent contents
+            pass
+
+        with open(tmp_filename, 'rb') as f:
+            assert_raises_regex(ValueError, "Cannot read into object array",
+                                np.fromfile, f, dtype=object)
+
+        assert_raises_regex(ValueError, "Cannot read into object array",
+                            np.fromfile, tmp_filename, dtype=object)
+
+    def test_fromfile_offset(self, x, tmp_filename):
+        with open(tmp_filename, 'wb') as f:
+            x.tofile(f)
+
+        with open(tmp_filename, 'rb') as f:
+            y = np.fromfile(f, dtype=x.dtype, offset=0)
+            assert_array_equal(y, x.flat)
+
+        with open(tmp_filename, 'rb') as f:
+            count_items = len(x.flat) // 8
+            offset_items = len(x.flat) // 4
+            offset_bytes = x.dtype.itemsize * offset_items
+            y = np.fromfile(
+                f, dtype=x.dtype, count=count_items, offset=offset_bytes
+            )
+            assert_array_equal(
+                y, x.flat[offset_items:offset_items+count_items]
+            )
+
+            # subsequent seeks should stack
+            offset_bytes = x.dtype.itemsize
+            z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes)
+            assert_array_equal(z, x.flat[offset_items+count_items+1:])
+
+        with open(tmp_filename, 'wb') as f:
+            x.tofile(f, sep=",")
+
+        with open(tmp_filename, 'rb') as f:
+            assert_raises_regex(
+                    TypeError,
+                    "'offset' argument only permitted for binary files",
+                    np.fromfile, tmp_filename, dtype=x.dtype,
+                    sep=",", offset=1)
+
+    @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t")
+    def test_fromfile_bad_dup(self, x, tmp_filename):
+        def dup_str(fd):
+            return 'abc'
+
+        def dup_bigint(fd):
+            return 2**68
+
+        old_dup = os.dup
+        try:
+            with open(tmp_filename, 'wb') as f:
+                x.tofile(f)
+                for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)):
+                    os.dup = dup
+                    assert_raises(exc, np.fromfile, f)
+        finally:
+            os.dup = old_dup
+
+    def _check_from(self, s, value, filename, **kw):
+        if 'sep' not in kw:
+            y = np.frombuffer(s, **kw)
+        else:
+            y = np.fromstring(s, **kw)
+        assert_array_equal(y, value)
+
+        with open(filename, 'wb') as f:
+            f.write(s)
+        y = np.fromfile(filename, **kw)
+        assert_array_equal(y, value)
+
+    @pytest.fixture(params=["period", "comma"])
+    def decimal_sep_localization(self, request):
+        """
+        Including this fixture in a test will automatically
+        execute it with both types of decimal separator.
+
+        So::
+
+            def test_decimal(decimal_sep_localization):
+                pass
+
+        is equivalent to the following two tests::
+
+            def test_decimal_period_separator():
+                pass
+
+            def test_decimal_comma_separator():
+                with CommaDecimalPointLocale():
+                    pass
+        """
+        if request.param == "period":
+            yield
+        elif request.param == "comma":
+            with CommaDecimalPointLocale():
+                yield
+        else:
+            assert False, request.param
+
+    def test_nan(self, tmp_filename, decimal_sep_localization):
+        self._check_from(
+            b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
+            [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+            tmp_filename,
+            sep=' ')
+
+    def test_inf(self, tmp_filename, decimal_sep_localization):
+        self._check_from(
+            b"inf +inf -inf infinity -Infinity iNfInItY -inF",
+            [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
+            tmp_filename,
+            sep=' ')
+
+    def test_numbers(self, tmp_filename, decimal_sep_localization):
+        self._check_from(
+            b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
+            [1.234, -1.234, .3, .3e55, -123133.1231e+133],
+            tmp_filename,
+            sep=' ')
+
+    def test_binary(self, tmp_filename):
+        self._check_from(
+            b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
+            np.array([1, 2, 3, 4]),
+            tmp_filename,
+            dtype=''])
+    @pytest.mark.parametrize('dtype', [float, int, complex])
+    def test_basic(self, byteorder, dtype):
+        dt = np.dtype(dtype).newbyteorder(byteorder)
+        x = (np.random.random((4, 7)) * 5).astype(dt)
+        buf = x.tobytes()
+        assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
+
+    @pytest.mark.parametrize("obj", [np.arange(10), b"12345678"])
+    def test_array_base(self, obj):
+        # Objects (including NumPy arrays), which do not use the
+        # `release_buffer` slot should be directly used as a base object.
+        # See also gh-21612
+        new = np.frombuffer(obj)
+        assert new.base is obj
+
+    def test_empty(self):
+        assert_array_equal(np.frombuffer(b''), np.array([]))
+
+    @pytest.mark.skipif(IS_PYPY,
+            reason="PyPy's memoryview currently does not track exports. See: "
+                   "https://foss.heptapod.net/pypy/pypy/-/issues/3724")
+    def test_mmap_close(self):
+        # The old buffer protocol was not safe for some things that the new
+        # one is.  But `frombuffer` always used the old one for a long time.
+        # Checks that it is safe with the new one (using memoryviews)
+        with tempfile.TemporaryFile(mode='wb') as tmp:
+            tmp.write(b"asdf")
+            tmp.flush()
+            mm = mmap.mmap(tmp.fileno(), 0)
+            arr = np.frombuffer(mm, dtype=np.uint8)
+            with pytest.raises(BufferError):
+                mm.close()  # cannot close while array uses the buffer
+            del arr
+            mm.close()
+
+class TestFlat:
+    def setup_method(self):
+        a0 = np.arange(20.0)
+        a = a0.reshape(4, 5)
+        a0.shape = (4, 5)
+        a.flags.writeable = False
+        self.a = a
+        self.b = a[::2, ::2]
+        self.a0 = a0
+        self.b0 = a0[::2, ::2]
+
+    def test_contiguous(self):
+        testpassed = False
+        try:
+            self.a.flat[12] = 100.0
+        except ValueError:
+            testpassed = True
+        assert_(testpassed)
+        assert_(self.a.flat[12] == 12.0)
+
+    def test_discontiguous(self):
+        testpassed = False
+        try:
+            self.b.flat[4] = 100.0
+        except ValueError:
+            testpassed = True
+        assert_(testpassed)
+        assert_(self.b.flat[4] == 12.0)
+
+    def test___array__(self):
+        c = self.a.flat.__array__()
+        d = self.b.flat.__array__()
+        e = self.a0.flat.__array__()
+        f = self.b0.flat.__array__()
+
+        assert_(c.flags.writeable is False)
+        assert_(d.flags.writeable is False)
+        assert_(e.flags.writeable is True)
+        assert_(f.flags.writeable is False)
+        assert_(c.flags.writebackifcopy is False)
+        assert_(d.flags.writebackifcopy is False)
+        assert_(e.flags.writebackifcopy is False)
+        assert_(f.flags.writebackifcopy is False)
+
+    @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+    def test_refcount(self):
+        # includes regression test for reference count error gh-13165
+        inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]
+        indtype = np.dtype(np.intp)
+        rc_indtype = sys.getrefcount(indtype)
+        for ind in inds:
+            rc_ind = sys.getrefcount(ind)
+            for _ in range(100):
+                try:
+                    self.a.flat[ind]
+                except IndexError:
+                    pass
+            assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
+            assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
+
+    def test_index_getset(self):
+        it = np.arange(10).reshape(2, 1, 5).flat
+        with pytest.raises(AttributeError):
+            it.index = 10
+
+        for _ in it:
+            pass
+        # Check the value of `.index` is updated correctly (see also gh-19153)
+        # If the type was incorrect, this would show up on big-endian machines
+        assert it.index == it.base.size
+
+
+class TestResize:
+
+    @_no_tracing
+    def test_basic(self):
+        x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+        if IS_PYPY:
+            x.resize((5, 5), refcheck=False)
+        else:
+            x.resize((5, 5))
+        assert_array_equal(x.flat[:9],
+                np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
+        assert_array_equal(x[9:].flat, 0)
+
+    def test_check_reference(self):
+        x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+        y = x
+        assert_raises(ValueError, x.resize, (5, 1))
+        del y  # avoid pyflakes unused variable warning.
+
+    @_no_tracing
+    def test_int_shape(self):
+        x = np.eye(3)
+        if IS_PYPY:
+            x.resize(3, refcheck=False)
+        else:
+            x.resize(3)
+        assert_array_equal(x, np.eye(3)[0,:])
+
+    def test_none_shape(self):
+        x = np.eye(3)
+        x.resize(None)
+        assert_array_equal(x, np.eye(3))
+        x.resize()
+        assert_array_equal(x, np.eye(3))
+
+    def test_0d_shape(self):
+        # to it multiple times to test it does not break alloc cache gh-9216
+        for i in range(10):
+            x = np.empty((1,))
+            x.resize(())
+            assert_equal(x.shape, ())
+            assert_equal(x.size, 1)
+            x = np.empty(())
+            x.resize((1,))
+            assert_equal(x.shape, (1,))
+            assert_equal(x.size, 1)
+
+    def test_invalid_arguments(self):
+        assert_raises(TypeError, np.eye(3).resize, 'hi')
+        assert_raises(ValueError, np.eye(3).resize, -1)
+        assert_raises(TypeError, np.eye(3).resize, order=1)
+        assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
+
+    @_no_tracing
+    def test_freeform_shape(self):
+        x = np.eye(3)
+        if IS_PYPY:
+            x.resize(3, 2, 1, refcheck=False)
+        else:
+            x.resize(3, 2, 1)
+        assert_(x.shape == (3, 2, 1))
+
+    @_no_tracing
+    def test_zeros_appended(self):
+        x = np.eye(3)
+        if IS_PYPY:
+            x.resize(2, 3, 3, refcheck=False)
+        else:
+            x.resize(2, 3, 3)
+        assert_array_equal(x[0], np.eye(3))
+        assert_array_equal(x[1], np.zeros((3, 3)))
+
+    @_no_tracing
+    def test_obj_obj(self):
+        # check memory is initialized on resize, gh-4857
+        a = np.ones(10, dtype=[('k', object, 2)])
+        if IS_PYPY:
+            a.resize(15, refcheck=False)
+        else:
+            a.resize(15,)
+        assert_equal(a.shape, (15,))
+        assert_array_equal(a['k'][-5:], 0)
+        assert_array_equal(a['k'][:-5], 1)
+
+    def test_empty_view(self):
+        # check that sizes containing a zero don't trigger a reallocate for
+        # already empty arrays
+        x = np.zeros((10, 0), int)
+        x_view = x[...]
+        x_view.resize((0, 10))
+        x_view.resize((0, 100))
+
+    def test_check_weakref(self):
+        x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+        xref = weakref.ref(x)
+        assert_raises(ValueError, x.resize, (5, 1))
+        del xref  # avoid pyflakes unused variable warning.
+
+
+class TestRecord:
+    def test_field_rename(self):
+        dt = np.dtype([('f', float), ('i', int)])
+        dt.names = ['p', 'q']
+        assert_equal(dt.names, ['p', 'q'])
+
+    def test_multiple_field_name_occurrence(self):
+        def test_dtype_init():
+            np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
+
+        # Error raised when multiple fields have the same name
+        assert_raises(ValueError, test_dtype_init)
+
+    def test_bytes_fields(self):
+        # Bytes are not allowed in field names and not recognized in titles
+        # on Py3
+        assert_raises(TypeError, np.dtype, [(b'a', int)])
+        assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
+
+        dt = np.dtype([((b'a', 'b'), int)])
+        assert_raises(TypeError, dt.__getitem__, b'a')
+
+        x = np.array([(1,), (2,), (3,)], dtype=dt)
+        assert_raises(IndexError, x.__getitem__, b'a')
+
+        y = x[0]
+        assert_raises(IndexError, y.__getitem__, b'a')
+
+    def test_multiple_field_name_unicode(self):
+        def test_dtype_unicode():
+            np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
+
+        # Error raised when multiple fields have the same name(unicode included)
+        assert_raises(ValueError, test_dtype_unicode)
+
+    def test_fromarrays_unicode(self):
+        # A single name string provided to fromarrays() is allowed to be unicode
+        # on both Python 2 and 3:
+        x = np.core.records.fromarrays(
+            [[0], [1]], names='a,b', formats='i4,i4')
+        assert_equal(x['a'][0], 0)
+        assert_equal(x['b'][0], 1)
+
+    def test_unicode_order(self):
+        # Test that we can sort with order as a unicode field name in both Python 2 and
+        # 3:
+        name = 'b'
+        x = np.array([1, 3, 2], dtype=[(name, int)])
+        x.sort(order=name)
+        assert_equal(x['b'], np.array([1, 2, 3]))
+
+    def test_field_names(self):
+        # Test unicode and 8-bit / byte strings can be used
+        a = np.zeros((1,), dtype=[('f1', 'i4'),
+                                  ('f2', 'i4'),
+                                  ('f3', [('sf1', 'i4')])])
+        # byte string indexing fails gracefully
+        assert_raises(IndexError, a.__setitem__, b'f1', 1)
+        assert_raises(IndexError, a.__getitem__, b'f1')
+        assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
+        assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
+        b = a.copy()
+        fn1 = str('f1')
+        b[fn1] = 1
+        assert_equal(b[fn1], 1)
+        fnn = str('not at all')
+        assert_raises(ValueError, b.__setitem__, fnn, 1)
+        assert_raises(ValueError, b.__getitem__, fnn)
+        b[0][fn1] = 2
+        assert_equal(b[fn1], 2)
+        # Subfield
+        assert_raises(ValueError, b[0].__setitem__, fnn, 1)
+        assert_raises(ValueError, b[0].__getitem__, fnn)
+        # Subfield
+        fn3 = str('f3')
+        sfn1 = str('sf1')
+        b[fn3][sfn1] = 1
+        assert_equal(b[fn3][sfn1], 1)
+        assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
+        assert_raises(ValueError, b[fn3].__getitem__, fnn)
+        # multiple subfields
+        fn2 = str('f2')
+        b[fn2] = 3
+
+        assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+        assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+        assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+
+        # non-ascii unicode field indexing is well behaved
+        assert_raises(ValueError, a.__setitem__, '\u03e0', 1)
+        assert_raises(ValueError, a.__getitem__, '\u03e0')
+
+    def test_record_hash(self):
+        a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
+        a.flags.writeable = False
+        b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
+        b.flags.writeable = False
+        c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
+        c.flags.writeable = False
+        assert_(hash(a[0]) == hash(a[1]))
+        assert_(hash(a[0]) == hash(b[0]))
+        assert_(hash(a[0]) != hash(b[1]))
+        assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
+
+    def test_record_no_hash(self):
+        a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
+        assert_raises(TypeError, hash, a[0])
+
+    def test_empty_structure_creation(self):
+        # make sure these do not raise errors (gh-5631)
+        np.array([()], dtype={'names': [], 'formats': [],
+                           'offsets': [], 'itemsize': 12})
+        np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
+                                           'offsets': [], 'itemsize': 12})
+
+    def test_multifield_indexing_view(self):
+        a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
+        v = a[['a', 'c']]
+        assert_(v.base is a)
+        assert_(v.dtype == np.dtype({'names': ['a', 'c'],
+                                     'formats': ['i4', 'u4'],
+                                     'offsets': [0, 8]}))
+        v[:] = (4,5)
+        assert_equal(a[0].item(), (4, 1, 5))
+
+class TestView:
+    def test_basic(self):
+        x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
+                     dtype=[('r', np.int8), ('g', np.int8),
+                            ('b', np.int8), ('a', np.int8)])
+        # We must be specific about the endianness here:
+        y = x.view(dtype=' 0)
+                    assert_(issubclass(w[0].category, RuntimeWarning))
+
+    def test_empty(self):
+        A = np.zeros((0, 3))
+        for f in self.funcs:
+            for axis in [0, None]:
+                with warnings.catch_warnings(record=True) as w:
+                    warnings.simplefilter('always')
+                    assert_(np.isnan(f(A, axis=axis)).all())
+                    assert_(len(w) > 0)
+                    assert_(issubclass(w[0].category, RuntimeWarning))
+            for axis in [1]:
+                with warnings.catch_warnings(record=True) as w:
+                    warnings.simplefilter('always')
+                    assert_equal(f(A, axis=axis), np.zeros([]))
+
+    def test_mean_values(self):
+        for mat in [self.rmat, self.cmat, self.omat]:
+            for axis in [0, 1]:
+                tgt = mat.sum(axis=axis)
+                res = _mean(mat, axis=axis) * mat.shape[axis]
+                assert_almost_equal(res, tgt)
+            for axis in [None]:
+                tgt = mat.sum(axis=axis)
+                res = _mean(mat, axis=axis) * np.prod(mat.shape)
+                assert_almost_equal(res, tgt)
+
+    def test_mean_float16(self):
+        # This fail if the sum inside mean is done in float16 instead
+        # of float32.
+        assert_(_mean(np.ones(100000, dtype='float16')) == 1)
+
+    def test_mean_axis_error(self):
+        # Ensure that AxisError is raised instead of IndexError when axis is
+        # out of bounds, see gh-15817.
+        with assert_raises(np.exceptions.AxisError):
+            np.arange(10).mean(axis=2)
+
+    def test_mean_where(self):
+        a = np.arange(16).reshape((4, 4))
+        wh_full = np.array([[False, True, False, True],
+                            [True, False, True, False],
+                            [True, True, False, False],
+                            [False, False, True, True]])
+        wh_partial = np.array([[False],
+                               [True],
+                               [True],
+                               [False]])
+        _cases = [(1, True, [1.5, 5.5, 9.5, 13.5]),
+                  (0, wh_full, [6., 5., 10., 9.]),
+                  (1, wh_full, [2., 5., 8.5, 14.5]),
+                  (0, wh_partial, [6., 7., 8., 9.])]
+        for _ax, _wh, _res in _cases:
+            assert_allclose(a.mean(axis=_ax, where=_wh),
+                            np.array(_res))
+            assert_allclose(np.mean(a, axis=_ax, where=_wh),
+                            np.array(_res))
+
+        a3d = np.arange(16).reshape((2, 2, 4))
+        _wh_partial = np.array([False, True, True, False])
+        _res = [[1.5, 5.5], [9.5, 13.5]]
+        assert_allclose(a3d.mean(axis=2, where=_wh_partial),
+                        np.array(_res))
+        assert_allclose(np.mean(a3d, axis=2, where=_wh_partial),
+                        np.array(_res))
+
+        with pytest.warns(RuntimeWarning) as w:
+            assert_allclose(a.mean(axis=1, where=wh_partial),
+                            np.array([np.nan, 5.5, 9.5, np.nan]))
+        with pytest.warns(RuntimeWarning) as w:
+            assert_equal(a.mean(where=False), np.nan)
+        with pytest.warns(RuntimeWarning) as w:
+            assert_equal(np.mean(a, where=False), np.nan)
+
+    def test_var_values(self):
+        for mat in [self.rmat, self.cmat, self.omat]:
+            for axis in [0, 1, None]:
+                msqr = _mean(mat * mat.conj(), axis=axis)
+                mean = _mean(mat, axis=axis)
+                tgt = msqr - mean * mean.conjugate()
+                res = _var(mat, axis=axis)
+                assert_almost_equal(res, tgt)
+
+    @pytest.mark.parametrize(('complex_dtype', 'ndec'), (
+        ('complex64', 6),
+        ('complex128', 7),
+        ('clongdouble', 7),
+    ))
+    def test_var_complex_values(self, complex_dtype, ndec):
+        # Test fast-paths for every builtin complex type
+        for axis in [0, 1, None]:
+            mat = self.cmat.copy().astype(complex_dtype)
+            msqr = _mean(mat * mat.conj(), axis=axis)
+            mean = _mean(mat, axis=axis)
+            tgt = msqr - mean * mean.conjugate()
+            res = _var(mat, axis=axis)
+            assert_almost_equal(res, tgt, decimal=ndec)
+
+    def test_var_dimensions(self):
+        # _var paths for complex number introduce additions on views that
+        # increase dimensions. Ensure this generalizes to higher dims
+        mat = np.stack([self.cmat]*3)
+        for axis in [0, 1, 2, -1, None]:
+            msqr = _mean(mat * mat.conj(), axis=axis)
+            mean = _mean(mat, axis=axis)
+            tgt = msqr - mean * mean.conjugate()
+            res = _var(mat, axis=axis)
+            assert_almost_equal(res, tgt)
+
+    def test_var_complex_byteorder(self):
+        # Test that var fast-path does not cause failures for complex arrays
+        # with non-native byteorder
+        cmat = self.cmat.copy().astype('complex128')
+        cmat_swapped = cmat.astype(cmat.dtype.newbyteorder())
+        assert_almost_equal(cmat.var(), cmat_swapped.var())
+
+    def test_var_axis_error(self):
+        # Ensure that AxisError is raised instead of IndexError when axis is
+        # out of bounds, see gh-15817.
+        with assert_raises(np.exceptions.AxisError):
+            np.arange(10).var(axis=2)
+
+    def test_var_where(self):
+        a = np.arange(25).reshape((5, 5))
+        wh_full = np.array([[False, True, False, True, True],
+                            [True, False, True, True, False],
+                            [True, True, False, False, True],
+                            [False, True, True, False, True],
+                            [True, False, True, True, False]])
+        wh_partial = np.array([[False],
+                               [True],
+                               [True],
+                               [False],
+                               [True]])
+        _cases = [(0, True, [50., 50., 50., 50., 50.]),
+                  (1, True, [2., 2., 2., 2., 2.])]
+        for _ax, _wh, _res in _cases:
+            assert_allclose(a.var(axis=_ax, where=_wh),
+                            np.array(_res))
+            assert_allclose(np.var(a, axis=_ax, where=_wh),
+                            np.array(_res))
+
+        a3d = np.arange(16).reshape((2, 2, 4))
+        _wh_partial = np.array([False, True, True, False])
+        _res = [[0.25, 0.25], [0.25, 0.25]]
+        assert_allclose(a3d.var(axis=2, where=_wh_partial),
+                        np.array(_res))
+        assert_allclose(np.var(a3d, axis=2, where=_wh_partial),
+                        np.array(_res))
+
+        assert_allclose(np.var(a, axis=1, where=wh_full),
+                        np.var(a[wh_full].reshape((5, 3)), axis=1))
+        assert_allclose(np.var(a, axis=0, where=wh_partial),
+                        np.var(a[wh_partial[:,0]], axis=0))
+        with pytest.warns(RuntimeWarning) as w:
+            assert_equal(a.var(where=False), np.nan)
+        with pytest.warns(RuntimeWarning) as w:
+            assert_equal(np.var(a, where=False), np.nan)
+
+    def test_std_values(self):
+        for mat in [self.rmat, self.cmat, self.omat]:
+            for axis in [0, 1, None]:
+                tgt = np.sqrt(_var(mat, axis=axis))
+                res = _std(mat, axis=axis)
+                assert_almost_equal(res, tgt)
+
+    def test_std_where(self):
+        a = np.arange(25).reshape((5,5))[::-1]
+        whf = np.array([[False, True, False, True, True],
+                        [True, False, True, False, True],
+                        [True, True, False, True, False],
+                        [True, False, True, True, False],
+                        [False, True, False, True, True]])
+        whp = np.array([[False],
+                        [False],
+                        [True],
+                        [True],
+                        [False]])
+        _cases = [
+            (0, True, 7.07106781*np.ones((5))),
+            (1, True, 1.41421356*np.ones((5))),
+            (0, whf,
+             np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])),
+            (0, whp, 2.5*np.ones((5)))
+        ]
+        for _ax, _wh, _res in _cases:
+            assert_allclose(a.std(axis=_ax, where=_wh), _res)
+            assert_allclose(np.std(a, axis=_ax, where=_wh), _res)
+
+        a3d = np.arange(16).reshape((2, 2, 4))
+        _wh_partial = np.array([False, True, True, False])
+        _res = [[0.5, 0.5], [0.5, 0.5]]
+        assert_allclose(a3d.std(axis=2, where=_wh_partial),
+                        np.array(_res))
+        assert_allclose(np.std(a3d, axis=2, where=_wh_partial),
+                        np.array(_res))
+
+        assert_allclose(a.std(axis=1, where=whf),
+                        np.std(a[whf].reshape((5,3)), axis=1))
+        assert_allclose(np.std(a, axis=1, where=whf),
+                        (a[whf].reshape((5,3))).std(axis=1))
+        assert_allclose(a.std(axis=0, where=whp),
+                        np.std(a[whp[:,0]], axis=0))
+        assert_allclose(np.std(a, axis=0, where=whp),
+                        (a[whp[:,0]]).std(axis=0))
+        with pytest.warns(RuntimeWarning) as w:
+            assert_equal(a.std(where=False), np.nan)
+        with pytest.warns(RuntimeWarning) as w:
+            assert_equal(np.std(a, where=False), np.nan)
+
+    def test_subclass(self):
+        class TestArray(np.ndarray):
+            def __new__(cls, data, info):
+                result = np.array(data)
+                result = result.view(cls)
+                result.info = info
+                return result
+
+            def __array_finalize__(self, obj):
+                self.info = getattr(obj, "info", '')
+
+        dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
+        res = dat.mean(1)
+        assert_(res.info == dat.info)
+        res = dat.std(1)
+        assert_(res.info == dat.info)
+        res = dat.var(1)
+        assert_(res.info == dat.info)
+
+
+class TestVdot:
+    def test_basic(self):
+        dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
+        dt_complex = np.typecodes['Complex']
+
+        # test real
+        a = np.eye(3)
+        for dt in dt_numeric + 'O':
+            b = a.astype(dt)
+            res = np.vdot(b, b)
+            assert_(np.isscalar(res))
+            assert_equal(np.vdot(b, b), 3)
+
+        # test complex
+        a = np.eye(3) * 1j
+        for dt in dt_complex + 'O':
+            b = a.astype(dt)
+            res = np.vdot(b, b)
+            assert_(np.isscalar(res))
+            assert_equal(np.vdot(b, b), 3)
+
+        # test boolean
+        b = np.eye(3, dtype=bool)
+        res = np.vdot(b, b)
+        assert_(np.isscalar(res))
+        assert_equal(np.vdot(b, b), True)
+
+    def test_vdot_array_order(self):
+        a = np.array([[1, 2], [3, 4]], order='C')
+        b = np.array([[1, 2], [3, 4]], order='F')
+        res = np.vdot(a, a)
+
+        # integer arrays are exact
+        assert_equal(np.vdot(a, b), res)
+        assert_equal(np.vdot(b, a), res)
+        assert_equal(np.vdot(b, b), res)
+
+    def test_vdot_uncontiguous(self):
+        for size in [2, 1000]:
+            # Different sizes match different branches in vdot.
+            a = np.zeros((size, 2, 2))
+            b = np.zeros((size, 2, 2))
+            a[:, 0, 0] = np.arange(size)
+            b[:, 0, 0] = np.arange(size) + 1
+            # Make a and b uncontiguous:
+            a = a[..., 0]
+            b = b[..., 0]
+
+            assert_equal(np.vdot(a, b),
+                         np.vdot(a.flatten(), b.flatten()))
+            assert_equal(np.vdot(a, b.copy()),
+                         np.vdot(a.flatten(), b.flatten()))
+            assert_equal(np.vdot(a.copy(), b),
+                         np.vdot(a.flatten(), b.flatten()))
+            assert_equal(np.vdot(a.copy('F'), b),
+                         np.vdot(a.flatten(), b.flatten()))
+            assert_equal(np.vdot(a, b.copy('F')),
+                         np.vdot(a.flatten(), b.flatten()))
+
+
+class TestDot:
+    def setup_method(self):
+        np.random.seed(128)
+        self.A = np.random.rand(4, 2)
+        self.b1 = np.random.rand(2, 1)
+        self.b2 = np.random.rand(2)
+        self.b3 = np.random.rand(1, 2)
+        self.b4 = np.random.rand(4)
+        self.N = 7
+
+    def test_dotmatmat(self):
+        A = self.A
+        res = np.dot(A.transpose(), A)
+        tgt = np.array([[1.45046013, 0.86323640],
+                        [0.86323640, 0.84934569]])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotmatvec(self):
+        A, b1 = self.A, self.b1
+        res = np.dot(A, b1)
+        tgt = np.array([[0.32114320], [0.04889721],
+                        [0.15696029], [0.33612621]])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotmatvec2(self):
+        A, b2 = self.A, self.b2
+        res = np.dot(A, b2)
+        tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotvecmat(self):
+        A, b4 = self.A, self.b4
+        res = np.dot(b4, A)
+        tgt = np.array([1.23495091, 1.12222648])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotvecmat2(self):
+        b3, A = self.b3, self.A
+        res = np.dot(b3, A.transpose())
+        tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotvecmat3(self):
+        A, b4 = self.A, self.b4
+        res = np.dot(A.transpose(), b4)
+        tgt = np.array([1.23495091, 1.12222648])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotvecvecouter(self):
+        b1, b3 = self.b1, self.b3
+        res = np.dot(b1, b3)
+        tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotvecvecinner(self):
+        b1, b3 = self.b1, self.b3
+        res = np.dot(b3, b1)
+        tgt = np.array([[ 0.23129668]])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotcolumnvect1(self):
+        b1 = np.ones((3, 1))
+        b2 = [5.3]
+        res = np.dot(b1, b2)
+        tgt = np.array([5.3, 5.3, 5.3])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotcolumnvect2(self):
+        b1 = np.ones((3, 1)).transpose()
+        b2 = [6.2]
+        res = np.dot(b2, b1)
+        tgt = np.array([6.2, 6.2, 6.2])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotvecscalar(self):
+        np.random.seed(100)
+        b1 = np.random.rand(1, 1)
+        b2 = np.random.rand(1, 4)
+        res = np.dot(b1, b2)
+        tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_dotvecscalar2(self):
+        np.random.seed(100)
+        b1 = np.random.rand(4, 1)
+        b2 = np.random.rand(1, 1)
+        res = np.dot(b1, b2)
+        tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
+        assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_all(self):
+        dims = [(), (1,), (1, 1)]
+        dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
+        for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
+            b1 = np.zeros(dim1)
+            b2 = np.zeros(dim2)
+            res = np.dot(b1, b2)
+            tgt = np.zeros(dim)
+            assert_(res.shape == tgt.shape)
+            assert_almost_equal(res, tgt, decimal=self.N)
+
+    def test_vecobject(self):
+        class Vec:
+            def __init__(self, sequence=None):
+                if sequence is None:
+                    sequence = []
+                self.array = np.array(sequence)
+
+            def __add__(self, other):
+                out = Vec()
+                out.array = self.array + other.array
+                return out
+
+            def __sub__(self, other):
+                out = Vec()
+                out.array = self.array - other.array
+                return out
+
+            def __mul__(self, other):  # with scalar
+                out = Vec(self.array.copy())
+                out.array *= other
+                return out
+
+            def __rmul__(self, other):
+                return self*other
+
+        U_non_cont = np.transpose([[1., 1.], [1., 2.]])
+        U_cont = np.ascontiguousarray(U_non_cont)
+        x = np.array([Vec([1., 0.]), Vec([0., 1.])])
+        zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
+        zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
+        assert_equal(zeros[0].array, zeros_test[0].array)
+        assert_equal(zeros[1].array, zeros_test[1].array)
+
+    def test_dot_2args(self):
+        from numpy.core.multiarray import dot
+
+        a = np.array([[1, 2], [3, 4]], dtype=float)
+        b = np.array([[1, 0], [1, 1]], dtype=float)
+        c = np.array([[3, 2], [7, 4]], dtype=float)
+
+        d = dot(a, b)
+        assert_allclose(c, d)
+
+    def test_dot_3args(self):
+        from numpy.core.multiarray import dot
+
+        np.random.seed(22)
+        f = np.random.random_sample((1024, 16))
+        v = np.random.random_sample((16, 32))
+
+        r = np.empty((1024, 32))
+        for i in range(12):
+            dot(f, v, r)
+        if HAS_REFCOUNT:
+            assert_equal(sys.getrefcount(r), 2)
+        r2 = dot(f, v, out=None)
+        assert_array_equal(r2, r)
+        assert_(r is dot(f, v, out=r))
+
+        v = v[:, 0].copy()  # v.shape == (16,)
+        r = r[:, 0].copy()  # r.shape == (1024,)
+        r2 = dot(f, v)
+        assert_(r is dot(f, v, r))
+        assert_array_equal(r2, r)
+
+    def test_dot_3args_errors(self):
+        from numpy.core.multiarray import dot
+
+        np.random.seed(22)
+        f = np.random.random_sample((1024, 16))
+        v = np.random.random_sample((16, 32))
+
+        r = np.empty((1024, 31))
+        assert_raises(ValueError, dot, f, v, r)
+
+        r = np.empty((1024,))
+        assert_raises(ValueError, dot, f, v, r)
+
+        r = np.empty((32,))
+        assert_raises(ValueError, dot, f, v, r)
+
+        r = np.empty((32, 1024))
+        assert_raises(ValueError, dot, f, v, r)
+        assert_raises(ValueError, dot, f, v, r.T)
+
+        r = np.empty((1024, 64))
+        assert_raises(ValueError, dot, f, v, r[:, ::2])
+        assert_raises(ValueError, dot, f, v, r[:, :32])
+
+        r = np.empty((1024, 32), dtype=np.float32)
+        assert_raises(ValueError, dot, f, v, r)
+
+        r = np.empty((1024, 32), dtype=int)
+        assert_raises(ValueError, dot, f, v, r)
+
+    def test_dot_out_result(self):
+        x = np.ones((), dtype=np.float16)
+        y = np.ones((5,), dtype=np.float16)
+        z = np.zeros((5,), dtype=np.float16)
+        res = x.dot(y, out=z)
+        assert np.array_equal(res, y)
+        assert np.array_equal(z, y)
+
+    def test_dot_out_aliasing(self):
+        x = np.ones((), dtype=np.float16)
+        y = np.ones((5,), dtype=np.float16)
+        z = np.zeros((5,), dtype=np.float16)
+        res = x.dot(y, out=z)
+        z[0] = 2
+        assert np.array_equal(res, z)
+
+    def test_dot_array_order(self):
+        a = np.array([[1, 2], [3, 4]], order='C')
+        b = np.array([[1, 2], [3, 4]], order='F')
+        res = np.dot(a, a)
+
+        # integer arrays are exact
+        assert_equal(np.dot(a, b), res)
+        assert_equal(np.dot(b, a), res)
+        assert_equal(np.dot(b, b), res)
+
+    def test_accelerate_framework_sgemv_fix(self):
+
+        def aligned_array(shape, align, dtype, order='C'):
+            d = dtype(0)
+            N = np.prod(shape)
+            tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
+            address = tmp.__array_interface__["data"][0]
+            for offset in range(align):
+                if (address + offset) % align == 0:
+                    break
+            tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
+            return tmp.reshape(shape, order=order)
+
+        def as_aligned(arr, align, dtype, order='C'):
+            aligned = aligned_array(arr.shape, align, dtype, order)
+            aligned[:] = arr[:]
+            return aligned
+
+        def assert_dot_close(A, X, desired):
+            assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
+
+        m = aligned_array(100, 15, np.float32)
+        s = aligned_array((100, 100), 15, np.float32)
+        np.dot(s, m)  # this will always segfault if the bug is present
+
+        testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F'))
+        for align, m, n, a_order in testdata:
+            # Calculation in double precision
+            A_d = np.random.rand(m, n)
+            X_d = np.random.rand(n)
+            desired = np.dot(A_d, X_d)
+            # Calculation with aligned single precision
+            A_f = as_aligned(A_d, align, np.float32, order=a_order)
+            X_f = as_aligned(X_d, align, np.float32)
+            assert_dot_close(A_f, X_f, desired)
+            # Strided A rows
+            A_d_2 = A_d[::2]
+            desired = np.dot(A_d_2, X_d)
+            A_f_2 = A_f[::2]
+            assert_dot_close(A_f_2, X_f, desired)
+            # Strided A columns, strided X vector
+            A_d_22 = A_d_2[:, ::2]
+            X_d_2 = X_d[::2]
+            desired = np.dot(A_d_22, X_d_2)
+            A_f_22 = A_f_2[:, ::2]
+            X_f_2 = X_f[::2]
+            assert_dot_close(A_f_22, X_f_2, desired)
+            # Check the strides are as expected
+            if a_order == 'F':
+                assert_equal(A_f_22.strides, (8, 8 * m))
+            else:
+                assert_equal(A_f_22.strides, (8 * n, 8))
+            assert_equal(X_f_2.strides, (8,))
+            # Strides in A rows + cols only
+            X_f_2c = as_aligned(X_f_2, align, np.float32)
+            assert_dot_close(A_f_22, X_f_2c, desired)
+            # Strides just in A cols
+            A_d_12 = A_d[:, ::2]
+            desired = np.dot(A_d_12, X_d_2)
+            A_f_12 = A_f[:, ::2]
+            assert_dot_close(A_f_12, X_f_2c, desired)
+            # Strides in A cols and X
+            assert_dot_close(A_f_12, X_f_2, desired)
+
+    @pytest.mark.slow
+    @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
+    @requires_memory(free_bytes=18e9)  # complex case needs 18GiB+
+    def test_huge_vectordot(self, dtype):
+        # Large vector multiplications are chunked with 32bit BLAS
+        # Test that the chunking does the right thing, see also gh-22262
+        data = np.ones(2**30+100, dtype=dtype)
+        res = np.dot(data, data)
+        assert res == 2**30+100
+
+    def test_dtype_discovery_fails(self):
+        # See gh-14247, error checking was missing for failed dtype discovery
+        class BadObject(object):
+            def __array__(self):
+                raise TypeError("just this tiny mint leaf")
+
+        with pytest.raises(TypeError):
+            np.dot(BadObject(), BadObject())
+
+        with pytest.raises(TypeError):
+            np.dot(3.0, BadObject())
+
+
+class MatmulCommon:
+    """Common tests for '@' operator and numpy.matmul.
+
+    """
+    # Should work with these types. Will want to add
+    # "O" at some point
+    types = "?bhilqBHILQefdgFDGO"
+
+    def test_exceptions(self):
+        dims = [
+            ((1,), (2,)),            # mismatched vector vector
+            ((2, 1,), (2,)),         # mismatched matrix vector
+            ((2,), (1, 2)),          # mismatched vector matrix
+            ((1, 2), (3, 1)),        # mismatched matrix matrix
+            ((1,), ()),              # vector scalar
+            ((), (1)),               # scalar vector
+            ((1, 1), ()),            # matrix scalar
+            ((), (1, 1)),            # scalar matrix
+            ((2, 2, 1), (3, 1, 2)),  # cannot broadcast
+            ]
+
+        for dt, (dm1, dm2) in itertools.product(self.types, dims):
+            a = np.ones(dm1, dtype=dt)
+            b = np.ones(dm2, dtype=dt)
+            assert_raises(ValueError, self.matmul, a, b)
+
+    def test_shapes(self):
+        dims = [
+            ((1, 1), (2, 1, 1)),     # broadcast first argument
+            ((2, 1, 1), (1, 1)),     # broadcast second argument
+            ((2, 1, 1), (2, 1, 1)),  # matrix stack sizes match
+            ]
+
+        for dt, (dm1, dm2) in itertools.product(self.types, dims):
+            a = np.ones(dm1, dtype=dt)
+            b = np.ones(dm2, dtype=dt)
+            res = self.matmul(a, b)
+            assert_(res.shape == (2, 1, 1))
+
+        # vector vector returns scalars.
+        for dt in self.types:
+            a = np.ones((2,), dtype=dt)
+            b = np.ones((2,), dtype=dt)
+            c = self.matmul(a, b)
+            assert_(np.array(c).shape == ())
+
+    def test_result_types(self):
+        mat = np.ones((1,1))
+        vec = np.ones((1,))
+        for dt in self.types:
+            m = mat.astype(dt)
+            v = vec.astype(dt)
+            for arg in [(m, v), (v, m), (m, m)]:
+                res = self.matmul(*arg)
+                assert_(res.dtype == dt)
+
+            # vector vector returns scalars
+            if dt != "O":
+                res = self.matmul(v, v)
+                assert_(type(res) is np.dtype(dt).type)
+
+    def test_scalar_output(self):
+        vec1 = np.array([2])
+        vec2 = np.array([3, 4]).reshape(1, -1)
+        tgt = np.array([6, 8])
+        for dt in self.types[1:]:
+            v1 = vec1.astype(dt)
+            v2 = vec2.astype(dt)
+            res = self.matmul(v1, v2)
+            assert_equal(res, tgt)
+            res = self.matmul(v2.T, v1)
+            assert_equal(res, tgt)
+
+        # boolean type
+        vec = np.array([True, True], dtype='?').reshape(1, -1)
+        res = self.matmul(vec[:, 0], vec)
+        assert_equal(res, True)
+
+    def test_vector_vector_values(self):
+        vec1 = np.array([1, 2])
+        vec2 = np.array([3, 4]).reshape(-1, 1)
+        tgt1 = np.array([11])
+        tgt2 = np.array([[3, 6], [4, 8]])
+        for dt in self.types[1:]:
+            v1 = vec1.astype(dt)
+            v2 = vec2.astype(dt)
+            res = self.matmul(v1, v2)
+            assert_equal(res, tgt1)
+            # no broadcast, we must make v1 into a 2d ndarray
+            res = self.matmul(v2, v1.reshape(1, -1))
+            assert_equal(res, tgt2)
+
+        # boolean type
+        vec = np.array([True, True], dtype='?')
+        res = self.matmul(vec, vec)
+        assert_equal(res, True)
+
+    def test_vector_matrix_values(self):
+        vec = np.array([1, 2])
+        mat1 = np.array([[1, 2], [3, 4]])
+        mat2 = np.stack([mat1]*2, axis=0)
+        tgt1 = np.array([7, 10])
+        tgt2 = np.stack([tgt1]*2, axis=0)
+        for dt in self.types[1:]:
+            v = vec.astype(dt)
+            m1 = mat1.astype(dt)
+            m2 = mat2.astype(dt)
+            res = self.matmul(v, m1)
+            assert_equal(res, tgt1)
+            res = self.matmul(v, m2)
+            assert_equal(res, tgt2)
+
+        # boolean type
+        vec = np.array([True, False])
+        mat1 = np.array([[True, False], [False, True]])
+        mat2 = np.stack([mat1]*2, axis=0)
+        tgt1 = np.array([True, False])
+        tgt2 = np.stack([tgt1]*2, axis=0)
+
+        res = self.matmul(vec, mat1)
+        assert_equal(res, tgt1)
+        res = self.matmul(vec, mat2)
+        assert_equal(res, tgt2)
+
+    def test_matrix_vector_values(self):
+        vec = np.array([1, 2])
+        mat1 = np.array([[1, 2], [3, 4]])
+        mat2 = np.stack([mat1]*2, axis=0)
+        tgt1 = np.array([5, 11])
+        tgt2 = np.stack([tgt1]*2, axis=0)
+        for dt in self.types[1:]:
+            v = vec.astype(dt)
+            m1 = mat1.astype(dt)
+            m2 = mat2.astype(dt)
+            res = self.matmul(m1, v)
+            assert_equal(res, tgt1)
+            res = self.matmul(m2, v)
+            assert_equal(res, tgt2)
+
+        # boolean type
+        vec = np.array([True, False])
+        mat1 = np.array([[True, False], [False, True]])
+        mat2 = np.stack([mat1]*2, axis=0)
+        tgt1 = np.array([True, False])
+        tgt2 = np.stack([tgt1]*2, axis=0)
+
+        res = self.matmul(vec, mat1)
+        assert_equal(res, tgt1)
+        res = self.matmul(vec, mat2)
+        assert_equal(res, tgt2)
+
+    def test_matrix_matrix_values(self):
+        mat1 = np.array([[1, 2], [3, 4]])
+        mat2 = np.array([[1, 0], [1, 1]])
+        mat12 = np.stack([mat1, mat2], axis=0)
+        mat21 = np.stack([mat2, mat1], axis=0)
+        tgt11 = np.array([[7, 10], [15, 22]])
+        tgt12 = np.array([[3, 2], [7, 4]])
+        tgt21 = np.array([[1, 2], [4, 6]])
+        tgt12_21 = np.stack([tgt12, tgt21], axis=0)
+        tgt11_12 = np.stack((tgt11, tgt12), axis=0)
+        tgt11_21 = np.stack((tgt11, tgt21), axis=0)
+        for dt in self.types[1:]:
+            m1 = mat1.astype(dt)
+            m2 = mat2.astype(dt)
+            m12 = mat12.astype(dt)
+            m21 = mat21.astype(dt)
+
+            # matrix @ matrix
+            res = self.matmul(m1, m2)
+            assert_equal(res, tgt12)
+            res = self.matmul(m2, m1)
+            assert_equal(res, tgt21)
+
+            # stacked @ matrix
+            res = self.matmul(m12, m1)
+            assert_equal(res, tgt11_21)
+
+            # matrix @ stacked
+            res = self.matmul(m1, m12)
+            assert_equal(res, tgt11_12)
+
+            # stacked @ stacked
+            res = self.matmul(m12, m21)
+            assert_equal(res, tgt12_21)
+
+        # boolean type
+        m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
+        m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
+        m12 = np.stack([m1, m2], axis=0)
+        m21 = np.stack([m2, m1], axis=0)
+        tgt11 = m1
+        tgt12 = m1
+        tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
+        tgt12_21 = np.stack([tgt12, tgt21], axis=0)
+        tgt11_12 = np.stack((tgt11, tgt12), axis=0)
+        tgt11_21 = np.stack((tgt11, tgt21), axis=0)
+
+        # matrix @ matrix
+        res = self.matmul(m1, m2)
+        assert_equal(res, tgt12)
+        res = self.matmul(m2, m1)
+        assert_equal(res, tgt21)
+
+        # stacked @ matrix
+        res = self.matmul(m12, m1)
+        assert_equal(res, tgt11_21)
+
+        # matrix @ stacked
+        res = self.matmul(m1, m12)
+        assert_equal(res, tgt11_12)
+
+        # stacked @ stacked
+        res = self.matmul(m12, m21)
+        assert_equal(res, tgt12_21)
+
+
+class TestMatmul(MatmulCommon):
+    matmul = np.matmul
+
+    def test_out_arg(self):
+        a = np.ones((5, 2), dtype=float)
+        b = np.array([[1, 3], [5, 7]], dtype=float)
+        tgt = np.dot(a, b)
+
+        # test as positional argument
+        msg = "out positional argument"
+        out = np.zeros((5, 2), dtype=float)
+        self.matmul(a, b, out)
+        assert_array_equal(out, tgt, err_msg=msg)
+
+        # test as keyword argument
+        msg = "out keyword argument"
+        out = np.zeros((5, 2), dtype=float)
+        self.matmul(a, b, out=out)
+        assert_array_equal(out, tgt, err_msg=msg)
+
+        # test out with not allowed type cast (safe casting)
+        msg = "Cannot cast ufunc .* output"
+        out = np.zeros((5, 2), dtype=np.int32)
+        assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
+
+        # test out with type upcast to complex
+        out = np.zeros((5, 2), dtype=np.complex128)
+        c = self.matmul(a, b, out=out)
+        assert_(c is out)
+        with suppress_warnings() as sup:
+            sup.filter(np.ComplexWarning, '')
+            c = c.astype(tgt.dtype)
+        assert_array_equal(c, tgt)
+
+    def test_empty_out(self):
+        # Check that the output cannot be broadcast, so that it cannot be
+        # size zero when the outer dimensions (iterator size) has size zero.
+        arr = np.ones((0, 1, 1))
+        out = np.ones((1, 1, 1))
+        assert self.matmul(arr, arr).shape == (0, 1, 1)
+
+        with pytest.raises(ValueError, match=r"non-broadcastable"):
+            self.matmul(arr, arr, out=out)
+
+    def test_out_contiguous(self):
+        a = np.ones((5, 2), dtype=float)
+        b = np.array([[1, 3], [5, 7]], dtype=float)
+        v = np.array([1, 3], dtype=float)
+        tgt = np.dot(a, b)
+        tgt_mv = np.dot(a, v)
+
+        # test out non-contiguous
+        out = np.ones((5, 2, 2), dtype=float)
+        c = self.matmul(a, b, out=out[..., 0])
+        assert c.base is out
+        assert_array_equal(c, tgt)
+        c = self.matmul(a, v, out=out[:, 0, 0])
+        assert_array_equal(c, tgt_mv)
+        c = self.matmul(v, a.T, out=out[:, 0, 0])
+        assert_array_equal(c, tgt_mv)
+
+        # test out contiguous in only last dim
+        out = np.ones((10, 2), dtype=float)
+        c = self.matmul(a, b, out=out[::2, :])
+        assert_array_equal(c, tgt)
+
+        # test transposes of out, args
+        out = np.ones((5, 2), dtype=float)
+        c = self.matmul(b.T, a.T, out=out.T)
+        assert_array_equal(out, tgt)
+
+    m1 = np.arange(15.).reshape(5, 3)
+    m2 = np.arange(21.).reshape(3, 7)
+    m3 = np.arange(30.).reshape(5, 6)[:, ::2]  # non-contiguous
+    vc = np.arange(10.)
+    vr = np.arange(6.)
+    m0 = np.zeros((3, 0))
+    @pytest.mark.parametrize('args', (
+            # matrix-matrix
+            (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
+            # matrix-matrix-transpose, contiguous and non
+            (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
+            (m3, m3.T), (m3.T, m3),
+            # matrix-matrix non-contiguous
+            (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
+            # vector-matrix, matrix-vector, contiguous
+            (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
+            # vector-matrix, matrix-vector, vector non-contiguous
+            (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
+            # vector-matrix, matrix-vector, matrix non-contiguous
+            (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
+            # vector-matrix, matrix-vector, both non-contiguous
+            (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
+            # size == 0
+            (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
+        ))
+    def test_dot_equivalent(self, args):
+        r1 = np.matmul(*args)
+        r2 = np.dot(*args)
+        assert_equal(r1, r2)
+
+        r3 = np.matmul(args[0].copy(), args[1].copy())
+        assert_equal(r1, r3)
+
+    def test_matmul_object(self):
+        import fractions
+
+        f = np.vectorize(fractions.Fraction)
+        def random_ints():
+            return np.random.randint(1, 1000, size=(10, 3, 3))
+        M1 = f(random_ints(), random_ints())
+        M2 = f(random_ints(), random_ints())
+
+        M3 = self.matmul(M1, M2)
+
+        [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
+
+        assert_allclose(N3, self.matmul(N1, N2))
+
+    def test_matmul_object_type_scalar(self):
+        from fractions import Fraction as F
+        v = np.array([F(2,3), F(5,7)])
+        res = self.matmul(v, v)
+        assert_(type(res) is F)
+
+    def test_matmul_empty(self):
+        a = np.empty((3, 0), dtype=object)
+        b = np.empty((0, 3), dtype=object)
+        c = np.zeros((3, 3))
+        assert_array_equal(np.matmul(a, b), c)
+
+    def test_matmul_exception_multiply(self):
+        # test that matmul fails if `__mul__` is missing
+        class add_not_multiply():
+            def __add__(self, other):
+                return self
+        a = np.full((3,3), add_not_multiply())
+        with assert_raises(TypeError):
+            b = np.matmul(a, a)
+
+    def test_matmul_exception_add(self):
+        # test that matmul fails if `__add__` is missing
+        class multiply_not_add():
+            def __mul__(self, other):
+                return self
+        a = np.full((3,3), multiply_not_add())
+        with assert_raises(TypeError):
+            b = np.matmul(a, a)
+
+    def test_matmul_bool(self):
+        # gh-14439
+        a = np.array([[1, 0],[1, 1]], dtype=bool)
+        assert np.max(a.view(np.uint8)) == 1
+        b = np.matmul(a, a)
+        # matmul with boolean output should always be 0, 1
+        assert np.max(b.view(np.uint8)) == 1
+
+        rg = np.random.default_rng(np.random.PCG64(43))
+        d = rg.integers(2, size=4*5, dtype=np.int8)
+        d = d.reshape(4, 5) > 0
+        out1 = np.matmul(d, d.reshape(5, 4))
+        out2 = np.dot(d, d.reshape(5, 4))
+        assert_equal(out1, out2)
+
+        c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
+        assert not np.any(c)
+
+
+class TestMatmulOperator(MatmulCommon):
+    import operator
+    matmul = operator.matmul
+
+    def test_array_priority_override(self):
+
+        class A:
+            __array_priority__ = 1000
+
+            def __matmul__(self, other):
+                return "A"
+
+            def __rmatmul__(self, other):
+                return "A"
+
+        a = A()
+        b = np.ones(2)
+        assert_equal(self.matmul(a, b), "A")
+        assert_equal(self.matmul(b, a), "A")
+
+    def test_matmul_raises(self):
+        assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
+        assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
+        assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc'))
+
+
+class TestMatmulInplace:
+    DTYPES = {}
+    for i in MatmulCommon.types:
+        for j in MatmulCommon.types:
+            if np.can_cast(j, i):
+                DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j))
+
+    @pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES)
+    def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None:
+        a = np.arange(10).reshape(5, 2).astype(dtype1)
+        a_id = id(a)
+        b = np.ones((2, 2), dtype=dtype2)
+
+        ref = a @ b
+        a @= b
+
+        assert id(a) == a_id
+        assert a.dtype == dtype1
+        assert a.shape == (5, 2)
+        if dtype1.kind in "fc":
+            np.testing.assert_allclose(a, ref)
+        else:
+            np.testing.assert_array_equal(a, ref)
+
+    SHAPES = {
+        "2d_large": ((10**5, 10), (10, 10)),
+        "3d_large": ((10**4, 10, 10), (1, 10, 10)),
+        "1d": ((3,), (3,)),
+        "2d_1d": ((3, 3), (3,)),
+        "1d_2d": ((3,), (3, 3)),
+        "2d_broadcast": ((3, 3), (3, 1)),
+        "2d_broadcast_reverse": ((1, 3), (3, 3)),
+        "3d_broadcast1": ((3, 3, 3), (1, 3, 1)),
+        "3d_broadcast2": ((3, 3, 3), (1, 3, 3)),
+        "3d_broadcast3": ((3, 3, 3), (3, 3, 1)),
+        "3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)),
+        "3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)),
+        "3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)),
+    }
+
+    @pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES)
+    def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]):
+        a_size = np.prod(a_shape)
+        a = np.arange(a_size).reshape(a_shape).astype(np.float64)
+        a_id = id(a)
+
+        b_size = np.prod(b_shape)
+        b = np.arange(b_size).reshape(b_shape)
+
+        ref = a @ b
+        if ref.shape != a_shape:
+            with pytest.raises(ValueError):
+                a @= b
+            return
+        else:
+            a @= b
+
+        assert id(a) == a_id
+        assert a.dtype.type == np.float64
+        assert a.shape == a_shape
+        np.testing.assert_allclose(a, ref)
+
+
+def test_matmul_axes():
+    a = np.arange(3*4*5).reshape(3, 4, 5)
+    c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
+    assert c.shape == (3, 4, 4)
+    d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
+    assert d.shape == (4, 4, 3)
+    e = np.swapaxes(d, 0, 2)
+    assert_array_equal(e, c)
+    f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
+    assert f.shape == (4, 5)
+
+
+class TestInner:
+
+    def test_inner_type_mismatch(self):
+        c = 1.
+        A = np.array((1,1), dtype='i,i')
+
+        assert_raises(TypeError, np.inner, c, A)
+        assert_raises(TypeError, np.inner, A, c)
+
+    def test_inner_scalar_and_vector(self):
+        for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+            sca = np.array(3, dtype=dt)[()]
+            vec = np.array([1, 2], dtype=dt)
+            desired = np.array([3, 6], dtype=dt)
+            assert_equal(np.inner(vec, sca), desired)
+            assert_equal(np.inner(sca, vec), desired)
+
+    def test_vecself(self):
+        # Ticket 844.
+        # Inner product of a vector with itself segfaults or give
+        # meaningless result
+        a = np.zeros(shape=(1, 80), dtype=np.float64)
+        p = np.inner(a, a)
+        assert_almost_equal(p, 0, decimal=14)
+
+    def test_inner_product_with_various_contiguities(self):
+        # github issue 6532
+        for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+            # check an inner product involving a matrix transpose
+            A = np.array([[1, 2], [3, 4]], dtype=dt)
+            B = np.array([[1, 3], [2, 4]], dtype=dt)
+            C = np.array([1, 1], dtype=dt)
+            desired = np.array([4, 6], dtype=dt)
+            assert_equal(np.inner(A.T, C), desired)
+            assert_equal(np.inner(C, A.T), desired)
+            assert_equal(np.inner(B, C), desired)
+            assert_equal(np.inner(C, B), desired)
+            # check a matrix product
+            desired = np.array([[7, 10], [15, 22]], dtype=dt)
+            assert_equal(np.inner(A, B), desired)
+            # check the syrk vs. gemm paths
+            desired = np.array([[5, 11], [11, 25]], dtype=dt)
+            assert_equal(np.inner(A, A), desired)
+            assert_equal(np.inner(A, A.copy()), desired)
+            # check an inner product involving an aliased and reversed view
+            a = np.arange(5).astype(dt)
+            b = a[::-1]
+            desired = np.array(10, dtype=dt).item()
+            assert_equal(np.inner(b, a), desired)
+
+    def test_3d_tensor(self):
+        for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+            a = np.arange(24).reshape(2,3,4).astype(dt)
+            b = np.arange(24, 48).reshape(2,3,4).astype(dt)
+            desired = np.array(
+                [[[[ 158,  182,  206],
+                   [ 230,  254,  278]],
+
+                  [[ 566,  654,  742],
+                   [ 830,  918, 1006]],
+
+                  [[ 974, 1126, 1278],
+                   [1430, 1582, 1734]]],
+
+                 [[[1382, 1598, 1814],
+                   [2030, 2246, 2462]],
+
+                  [[1790, 2070, 2350],
+                   [2630, 2910, 3190]],
+
+                  [[2198, 2542, 2886],
+                   [3230, 3574, 3918]]]]
+            ).astype(dt)
+            assert_equal(np.inner(a, b), desired)
+            assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
+
+
+class TestChoose:
+    def setup_method(self):
+        self.x = 2*np.ones((3,), dtype=int)
+        self.y = 3*np.ones((3,), dtype=int)
+        self.x2 = 2*np.ones((2, 3), dtype=int)
+        self.y2 = 3*np.ones((2, 3), dtype=int)
+        self.ind = [0, 0, 1]
+
+    def test_basic(self):
+        A = np.choose(self.ind, (self.x, self.y))
+        assert_equal(A, [2, 2, 3])
+
+    def test_broadcast1(self):
+        A = np.choose(self.ind, (self.x2, self.y2))
+        assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+    def test_broadcast2(self):
+        A = np.choose(self.ind, (self.x, self.y2))
+        assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+    @pytest.mark.parametrize("ops",
+        [(1000, np.array([1], dtype=np.uint8)),
+         (-1, np.array([1], dtype=np.uint8)),
+         (1., np.float32(3)),
+         (1., np.array([3], dtype=np.float32))],)
+    def test_output_dtype(self, ops):
+        expected_dt = np.result_type(*ops)
+        assert(np.choose([0], ops).dtype == expected_dt)
+
+
+class TestRepeat:
+    def setup_method(self):
+        self.m = np.array([1, 2, 3, 4, 5, 6])
+        self.m_rect = self.m.reshape((2, 3))
+
+    def test_basic(self):
+        A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
+        assert_equal(A, [1, 2, 2, 2, 3,
+                         3, 4, 5, 6, 6])
+
+    def test_broadcast1(self):
+        A = np.repeat(self.m, 2)
+        assert_equal(A, [1, 1, 2, 2, 3, 3,
+                         4, 4, 5, 5, 6, 6])
+
+    def test_axis_spec(self):
+        A = np.repeat(self.m_rect, [2, 1], axis=0)
+        assert_equal(A, [[1, 2, 3],
+                         [1, 2, 3],
+                         [4, 5, 6]])
+
+        A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
+        assert_equal(A, [[1, 2, 2, 2, 3, 3],
+                         [4, 5, 5, 5, 6, 6]])
+
+    def test_broadcast2(self):
+        A = np.repeat(self.m_rect, 2, axis=0)
+        assert_equal(A, [[1, 2, 3],
+                         [1, 2, 3],
+                         [4, 5, 6],
+                         [4, 5, 6]])
+
+        A = np.repeat(self.m_rect, 2, axis=1)
+        assert_equal(A, [[1, 1, 2, 2, 3, 3],
+                         [4, 4, 5, 5, 6, 6]])
+
+
+# TODO: test for multidimensional
+NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
+
+
+@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
+class TestNeighborhoodIter:
+    # Simple, 2d tests
+    def test_simple2d(self, dt):
+        # Test zero and one padding for simple data type
+        x = np.array([[0, 1], [2, 3]], dtype=dt)
+        r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
+             np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
+             np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
+             np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
+        assert_array_equal(l, r)
+
+        r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
+             np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
+             np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
+             np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
+        assert_array_equal(l, r)
+
+        r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
+             np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
+             np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+             np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
+        assert_array_equal(l, r)
+
+        # Test with start in the middle
+        r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+             np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2)
+        assert_array_equal(l, r)
+
+    def test_mirror2d(self, dt):
+        x = np.array([[0, 1], [2, 3]], dtype=dt)
+        r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
+             np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
+             np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
+             np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
+        assert_array_equal(l, r)
+
+    # Simple, 1d tests
+    def test_simple(self, dt):
+        # Test padding with constant values
+        x = np.linspace(1, 5, 5).astype(dt)
+        r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 1], x[0], NEIGH_MODE['zero'])
+        assert_array_equal(l, r)
+
+        r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 1], x[0], NEIGH_MODE['one'])
+        assert_array_equal(l, r)
+
+        r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-1, 1], x[4], NEIGH_MODE['constant'])
+        assert_array_equal(l, r)
+
+    # Test mirror modes
+    def test_mirror(self, dt):
+        x = np.linspace(1, 5, 5).astype(dt)
+        r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+                [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-2, 2], x[1], NEIGH_MODE['mirror'])
+        assert_([i.dtype == dt for i in l])
+        assert_array_equal(l, r)
+
+    # Circular mode
+    def test_circular(self, dt):
+        x = np.linspace(1, 5, 5).astype(dt)
+        r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+                [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
+        l = _multiarray_tests.test_neighborhood_iterator(
+                x, [-2, 2], x[0], NEIGH_MODE['circular'])
+        assert_array_equal(l, r)
+
+
+# Test stacking neighborhood iterators
+class TestStackedNeighborhoodIter:
+    # Simple, 1d test: stacking 2 constant-padded neigh iterators
+    def test_simple_const(self):
+        dt = np.float64
+        # Test zero and one padding for simple data type
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([0], dtype=dt),
+             np.array([0], dtype=dt),
+             np.array([1], dtype=dt),
+             np.array([2], dtype=dt),
+             np.array([3], dtype=dt),
+             np.array([0], dtype=dt),
+             np.array([0], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
+        assert_array_equal(l, r)
+
+        r = [np.array([1, 0, 1], dtype=dt),
+             np.array([0, 1, 2], dtype=dt),
+             np.array([1, 2, 3], dtype=dt),
+             np.array([2, 3, 0], dtype=dt),
+             np.array([3, 0, 1], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
+        assert_array_equal(l, r)
+
+    # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+    # mirror padding
+    def test_simple_mirror(self):
+        dt = np.float64
+        # Stacking zero on top of mirror
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([0, 1, 1], dtype=dt),
+             np.array([1, 1, 2], dtype=dt),
+             np.array([1, 2, 3], dtype=dt),
+             np.array([2, 3, 3], dtype=dt),
+             np.array([3, 3, 0], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([1, 0, 0], dtype=dt),
+             np.array([0, 0, 1], dtype=dt),
+             np.array([0, 1, 2], dtype=dt),
+             np.array([1, 2, 3], dtype=dt),
+             np.array([2, 3, 0], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero: 2nd
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([0, 1, 2], dtype=dt),
+             np.array([1, 2, 3], dtype=dt),
+             np.array([2, 3, 0], dtype=dt),
+             np.array([3, 0, 0], dtype=dt),
+             np.array([0, 0, 3], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero: 3rd
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([1, 0, 0, 1, 2], dtype=dt),
+             np.array([0, 0, 1, 2, 3], dtype=dt),
+             np.array([0, 1, 2, 3, 0], dtype=dt),
+             np.array([1, 2, 3, 0, 0], dtype=dt),
+             np.array([2, 3, 0, 0, 3], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
+        assert_array_equal(l, r)
+
+    # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+    # circular padding
+    def test_simple_circular(self):
+        dt = np.float64
+        # Stacking zero on top of mirror
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([0, 3, 1], dtype=dt),
+             np.array([3, 1, 2], dtype=dt),
+             np.array([1, 2, 3], dtype=dt),
+             np.array([2, 3, 1], dtype=dt),
+             np.array([3, 1, 0], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([3, 0, 0], dtype=dt),
+             np.array([0, 0, 1], dtype=dt),
+             np.array([0, 1, 2], dtype=dt),
+             np.array([1, 2, 3], dtype=dt),
+             np.array([2, 3, 0], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero: 2nd
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([0, 1, 2], dtype=dt),
+             np.array([1, 2, 3], dtype=dt),
+             np.array([2, 3, 0], dtype=dt),
+             np.array([3, 0, 0], dtype=dt),
+             np.array([0, 0, 1], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero: 3rd
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([3, 0, 0, 1, 2], dtype=dt),
+             np.array([0, 0, 1, 2, 3], dtype=dt),
+             np.array([0, 1, 2, 3, 0], dtype=dt),
+             np.array([1, 2, 3, 0, 0], dtype=dt),
+             np.array([2, 3, 0, 0, 1], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
+        assert_array_equal(l, r)
+
+    # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
+    # being strictly within the array
+    def test_simple_strict_within(self):
+        dt = np.float64
+        # Stacking zero on top of zero, first neighborhood strictly inside the
+        # array
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([1, 2, 3, 0], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero, first neighborhood strictly inside the
+        # array
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([1, 2, 3, 3], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
+        assert_array_equal(l, r)
+
+        # Stacking mirror on top of zero, first neighborhood strictly inside the
+        # array
+        x = np.array([1, 2, 3], dtype=dt)
+        r = [np.array([1, 2, 3, 1], dtype=dt)]
+        l = _multiarray_tests.test_neighborhood_iterator_oob(
+                x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
+        assert_array_equal(l, r)
+
+class TestWarnings:
+
+    def test_complex_warning(self):
+        x = np.array([1, 2])
+        y = np.array([1-2j, 1+2j])
+
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", np.ComplexWarning)
+            assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
+            assert_equal(x, [1, 2])
+
+
+class TestMinScalarType:
+
+    def test_usigned_shortshort(self):
+        dt = np.min_scalar_type(2**8-1)
+        wanted = np.dtype('uint8')
+        assert_equal(wanted, dt)
+
+    def test_usigned_short(self):
+        dt = np.min_scalar_type(2**16-1)
+        wanted = np.dtype('uint16')
+        assert_equal(wanted, dt)
+
+    def test_usigned_int(self):
+        dt = np.min_scalar_type(2**32-1)
+        wanted = np.dtype('uint32')
+        assert_equal(wanted, dt)
+
+    def test_usigned_longlong(self):
+        dt = np.min_scalar_type(2**63-1)
+        wanted = np.dtype('uint64')
+        assert_equal(wanted, dt)
+
+    def test_object(self):
+        dt = np.min_scalar_type(2**64)
+        wanted = np.dtype('O')
+        assert_equal(wanted, dt)
+
+
+from numpy.core._internal import _dtype_from_pep3118
+
+
+class TestPEP3118Dtype:
+    def _check(self, spec, wanted):
+        dt = np.dtype(wanted)
+        actual = _dtype_from_pep3118(spec)
+        assert_equal(actual, dt,
+                     err_msg="spec %r != dtype %r" % (spec, wanted))
+
+    def test_native_padding(self):
+        align = np.dtype('i').alignment
+        for j in range(8):
+            if j == 0:
+                s = 'bi'
+            else:
+                s = 'b%dxi' % j
+            self._check('@'+s, {'f0': ('i1', 0),
+                                'f1': ('i', align*(1 + j//align))})
+            self._check('='+s, {'f0': ('i1', 0),
+                                'f1': ('i', 1+j)})
+
+    def test_native_padding_2(self):
+        # Native padding should work also for structs and sub-arrays
+        self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
+        self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
+
+    def test_trailing_padding(self):
+        # Trailing padding should be included, *and*, the item size
+        # should match the alignment if in aligned mode
+        align = np.dtype('i').alignment
+        size = np.dtype('i').itemsize
+
+        def aligned(n):
+            return align*(1 + (n-1)//align)
+
+        base = dict(formats=['i'], names=['f0'])
+
+        self._check('ix',    dict(itemsize=aligned(size + 1), **base))
+        self._check('ixx',   dict(itemsize=aligned(size + 2), **base))
+        self._check('ixxx',  dict(itemsize=aligned(size + 3), **base))
+        self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
+        self._check('i7x',   dict(itemsize=aligned(size + 7), **base))
+
+        self._check('^ix',    dict(itemsize=size + 1, **base))
+        self._check('^ixx',   dict(itemsize=size + 2, **base))
+        self._check('^ixxx',  dict(itemsize=size + 3, **base))
+        self._check('^ixxxx', dict(itemsize=size + 4, **base))
+        self._check('^i7x',   dict(itemsize=size + 7, **base))
+
+    def test_native_padding_3(self):
+        dt = np.dtype(
+                [('a', 'b'), ('b', 'i'),
+                    ('sub', np.dtype('b,i')), ('c', 'i')],
+                align=True)
+        self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
+
+        dt = np.dtype(
+                [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
+                    ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
+        self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
+
+    def test_padding_with_array_inside_struct(self):
+        dt = np.dtype(
+                [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
+                    ('d', 'i')],
+                align=True)
+        self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
+
+    def test_byteorder_inside_struct(self):
+        # The byte order after @T{=i} should be '=', not '@'.
+        # Check this by noting the absence of native alignment.
+        self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
+                                 'f1': ('i', 5)})
+
+    def test_intra_padding(self):
+        # Natively aligned sub-arrays may require some internal padding
+        align = np.dtype('i').alignment
+        size = np.dtype('i').itemsize
+
+        def aligned(n):
+            return (align*(1 + (n-1)//align))
+
+        self._check('(3)T{ix}', (dict(
+            names=['f0'],
+            formats=['i'],
+            offsets=[0],
+            itemsize=aligned(size + 1)
+        ), (3,)))
+
+    def test_char_vs_string(self):
+        dt = np.dtype('c')
+        self._check('c', dt)
+
+        dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
+        self._check('4c4s', dt)
+
+    def test_field_order(self):
+        # gh-9053 - previously, we relied on dictionary key order
+        self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
+        self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
+
+    def test_unnamed_fields(self):
+        self._check('ii',     [('f0', 'i'), ('f1', 'i')])
+        self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
+
+        self._check('i', 'i')
+        self._check('i:f0:', [('f0', 'i')])
+
+
+class TestNewBufferProtocol:
+    """ Test PEP3118 buffers """
+
+    def _check_roundtrip(self, obj):
+        obj = np.asarray(obj)
+        x = memoryview(obj)
+        y = np.asarray(x)
+        y2 = np.array(x)
+        assert_(not y.flags.owndata)
+        assert_(y2.flags.owndata)
+
+        assert_equal(y.dtype, obj.dtype)
+        assert_equal(y.shape, obj.shape)
+        assert_array_equal(obj, y)
+
+        assert_equal(y2.dtype, obj.dtype)
+        assert_equal(y2.shape, obj.shape)
+        assert_array_equal(obj, y2)
+
+    def test_roundtrip(self):
+        x = np.array([1, 2, 3, 4, 5], dtype='i4')
+        self._check_roundtrip(x)
+
+        x = np.array([[1, 2], [3, 4]], dtype=np.float64)
+        self._check_roundtrip(x)
+
+        x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
+        self._check_roundtrip(x)
+
+        dt = [('a', 'b'),
+              ('b', 'h'),
+              ('c', 'i'),
+              ('d', 'l'),
+              ('dx', 'q'),
+              ('e', 'B'),
+              ('f', 'H'),
+              ('g', 'I'),
+              ('h', 'L'),
+              ('hx', 'Q'),
+              ('i', np.single),
+              ('j', np.double),
+              ('k', np.longdouble),
+              ('ix', np.csingle),
+              ('jx', np.cdouble),
+              ('kx', np.clongdouble),
+              ('l', 'S4'),
+              ('m', 'U4'),
+              ('n', 'V3'),
+              ('o', '?'),
+              ('p', np.half),
+              ]
+        x = np.array(
+                [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                    b'aaaa', 'bbbb', b'xxx', True, 1.0)],
+                dtype=dt)
+        self._check_roundtrip(x)
+
+        x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
+        self._check_roundtrip(x)
+
+        x = np.array([1, 2, 3], dtype='>i2')
+        self._check_roundtrip(x)
+
+        x = np.array([1, 2, 3], dtype='')
+                x = np.zeros(4, dtype=dt)
+                self._check_roundtrip(x)
+
+    def test_roundtrip_scalar(self):
+        # Issue #4015.
+        self._check_roundtrip(0)
+
+    def test_invalid_buffer_format(self):
+        # datetime64 cannot be used fully in a buffer yet
+        # Should be fixed in the next Numpy major release
+        dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+        a = np.empty(3, dt)
+        assert_raises((ValueError, BufferError), memoryview, a)
+        assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
+
+    def test_export_simple_1d(self):
+        x = np.array([1, 2, 3, 4, 5], dtype='i')
+        y = memoryview(x)
+        assert_equal(y.format, 'i')
+        assert_equal(y.shape, (5,))
+        assert_equal(y.ndim, 1)
+        assert_equal(y.strides, (4,))
+        assert_equal(y.suboffsets, ())
+        assert_equal(y.itemsize, 4)
+
+    def test_export_simple_nd(self):
+        x = np.array([[1, 2], [3, 4]], dtype=np.float64)
+        y = memoryview(x)
+        assert_equal(y.format, 'd')
+        assert_equal(y.shape, (2, 2))
+        assert_equal(y.ndim, 2)
+        assert_equal(y.strides, (16, 8))
+        assert_equal(y.suboffsets, ())
+        assert_equal(y.itemsize, 8)
+
+    def test_export_discontiguous(self):
+        x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
+        y = memoryview(x)
+        assert_equal(y.format, 'f')
+        assert_equal(y.shape, (3, 3))
+        assert_equal(y.ndim, 2)
+        assert_equal(y.strides, (36, 4))
+        assert_equal(y.suboffsets, ())
+        assert_equal(y.itemsize, 4)
+
+    def test_export_record(self):
+        dt = [('a', 'b'),
+              ('b', 'h'),
+              ('c', 'i'),
+              ('d', 'l'),
+              ('dx', 'q'),
+              ('e', 'B'),
+              ('f', 'H'),
+              ('g', 'I'),
+              ('h', 'L'),
+              ('hx', 'Q'),
+              ('i', np.single),
+              ('j', np.double),
+              ('k', np.longdouble),
+              ('ix', np.csingle),
+              ('jx', np.cdouble),
+              ('kx', np.clongdouble),
+              ('l', 'S4'),
+              ('m', 'U4'),
+              ('n', 'V3'),
+              ('o', '?'),
+              ('p', np.half),
+              ]
+        x = np.array(
+                [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                    b'aaaa', 'bbbb', b'   ', True, 1.0)],
+                dtype=dt)
+        y = memoryview(x)
+        assert_equal(y.shape, (1,))
+        assert_equal(y.ndim, 1)
+        assert_equal(y.suboffsets, ())
+
+        sz = sum([np.dtype(b).itemsize for a, b in dt])
+        if np.dtype('l').itemsize == 4:
+            assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+        else:
+            assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+        # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides
+        if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
+            assert_equal(y.strides, (sz,))
+        assert_equal(y.itemsize, sz)
+
+    def test_export_subarray(self):
+        x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
+        y = memoryview(x)
+        assert_equal(y.format, 'T{(2,2)i:a:}')
+        assert_equal(y.shape, ())
+        assert_equal(y.ndim, 0)
+        assert_equal(y.strides, ())
+        assert_equal(y.suboffsets, ())
+        assert_equal(y.itemsize, 16)
+
+    def test_export_endian(self):
+        x = np.array([1, 2, 3], dtype='>i')
+        y = memoryview(x)
+        if sys.byteorder == 'little':
+            assert_equal(y.format, '>i')
+        else:
+            assert_equal(y.format, 'i')
+
+        x = np.array([1, 2, 3], dtype=' np.array(0, dtype=dt1), "type %s failed" % (dt1,))
+            assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
+
+            for dt2 in np.typecodes['AllInteger']:
+                assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+
+        # Unsigned integers
+        for dt1 in 'BHILQP':
+            assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+            assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+            assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+
+            # Unsigned vs signed
+            for dt2 in 'bhilqp':
+                assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+
+        # Signed integers and floats
+        for dt1 in 'bhlqp' + np.typecodes['Float']:
+            assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+            assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+            assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+
+            for dt2 in 'bhlqp' + np.typecodes['Float']:
+                assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
+                        "type %s and %s failed" % (dt1, dt2))
+
+    def test_to_bool_scalar(self):
+        assert_equal(bool(np.array([False])), False)
+        assert_equal(bool(np.array([True])), True)
+        assert_equal(bool(np.array([[42]])), True)
+        assert_raises(ValueError, bool, np.array([1, 2]))
+
+        class NotConvertible:
+            def __bool__(self):
+                raise NotImplementedError
+
+        assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
+        assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
+        if IS_PYSTON:
+            pytest.skip("Pyston disables recursion checking")
+
+        self_containing = np.array([None])
+        self_containing[0] = self_containing
+
+        Error = RecursionError
+
+        assert_raises(Error, bool, self_containing)  # previously stack overflow
+        self_containing[0] = None  # resolve circular reference
+
+    def test_to_int_scalar(self):
+        # gh-9972 means that these aren't always the same
+        int_funcs = (int, lambda x: x.__int__())
+        for int_func in int_funcs:
+            assert_equal(int_func(np.array(0)), 0)
+            with assert_warns(DeprecationWarning):
+                assert_equal(int_func(np.array([1])), 1)
+            with assert_warns(DeprecationWarning):
+                assert_equal(int_func(np.array([[42]])), 42)
+            assert_raises(TypeError, int_func, np.array([1, 2]))
+
+            # gh-9972
+            assert_equal(4, int_func(np.array('4')))
+            assert_equal(5, int_func(np.bytes_(b'5')))
+            assert_equal(6, int_func(np.str_('6')))
+
+            # The delegation of int() to __trunc__ was deprecated in
+            # Python 3.11.
+            if sys.version_info < (3, 11):
+                class HasTrunc:
+                    def __trunc__(self):
+                        return 3
+                assert_equal(3, int_func(np.array(HasTrunc())))
+                with assert_warns(DeprecationWarning):
+                    assert_equal(3, int_func(np.array([HasTrunc()])))
+            else:
+                pass
+
+            class NotConvertible:
+                def __int__(self):
+                    raise NotImplementedError
+            assert_raises(NotImplementedError,
+                int_func, np.array(NotConvertible()))
+            with assert_warns(DeprecationWarning):
+                assert_raises(NotImplementedError,
+                    int_func, np.array([NotConvertible()]))
+
+
+class TestWhere:
+    def test_basic(self):
+        dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
+               np.longdouble, np.clongdouble]
+        for dt in dts:
+            c = np.ones(53, dtype=bool)
+            assert_equal(np.where( c, dt(0), dt(1)), dt(0))
+            assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
+            assert_equal(np.where(True, dt(0), dt(1)), dt(0))
+            assert_equal(np.where(False, dt(0), dt(1)), dt(1))
+            d = np.ones_like(c).astype(dt)
+            e = np.zeros_like(d)
+            r = d.astype(dt)
+            c[7] = False
+            r[7] = e[7]
+            assert_equal(np.where(c, e, e), e)
+            assert_equal(np.where(c, d, e), r)
+            assert_equal(np.where(c, d, e[0]), r)
+            assert_equal(np.where(c, d[0], e), r)
+            assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
+            assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
+            assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
+            assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
+            assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
+            assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
+            assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
+
+    def test_exotic(self):
+        # object
+        assert_array_equal(np.where(True, None, None), np.array(None))
+        # zero sized
+        m = np.array([], dtype=bool).reshape(0, 3)
+        b = np.array([], dtype=np.float64).reshape(0, 3)
+        assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
+
+        # object cast
+        d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
+                      0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
+                      1.267, 0.229, -1.39, 0.487])
+        nan = float('NaN')
+        e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
+                     'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
+                     dtype=object)
+        m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
+                      0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
+
+        r = e[:]
+        r[np.where(m)] = d[np.where(m)]
+        assert_array_equal(np.where(m, d, e), r)
+
+        r = e[:]
+        r[np.where(~m)] = d[np.where(~m)]
+        assert_array_equal(np.where(m, e, d), r)
+
+        assert_array_equal(np.where(m, e, e), e)
+
+        # minimal dtype result with NaN scalar (e.g required by pandas)
+        d = np.array([1., 2.], dtype=np.float32)
+        e = float('NaN')
+        assert_equal(np.where(True, d, e).dtype, np.float32)
+        e = float('Infinity')
+        assert_equal(np.where(True, d, e).dtype, np.float32)
+        e = float('-Infinity')
+        assert_equal(np.where(True, d, e).dtype, np.float32)
+        # also check upcast
+        e = float(1e150)
+        assert_equal(np.where(True, d, e).dtype, np.float64)
+
+    def test_ndim(self):
+        c = [True, False]
+        a = np.zeros((2, 25))
+        b = np.ones((2, 25))
+        r = np.where(np.array(c)[:,np.newaxis], a, b)
+        assert_array_equal(r[0], a[0])
+        assert_array_equal(r[1], b[0])
+
+        a = a.T
+        b = b.T
+        r = np.where(c, a, b)
+        assert_array_equal(r[:,0], a[:,0])
+        assert_array_equal(r[:,1], b[:,0])
+
+    def test_dtype_mix(self):
+        c = np.array([False, True, False, False, False, False, True, False,
+                     False, False, True, False])
+        a = np.uint32(1)
+        b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
+                      dtype=np.float64)
+        r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
+                     dtype=np.float64)
+        assert_equal(np.where(c, a, b), r)
+
+        a = a.astype(np.float32)
+        b = b.astype(np.int64)
+        assert_equal(np.where(c, a, b), r)
+
+        # non bool mask
+        c = c.astype(int)
+        c[c != 0] = 34242324
+        assert_equal(np.where(c, a, b), r)
+        # invert
+        tmpmask = c != 0
+        c[c == 0] = 41247212
+        c[tmpmask] = 0
+        assert_equal(np.where(c, b, a), r)
+
+    def test_foreign(self):
+        c = np.array([False, True, False, False, False, False, True, False,
+                     False, False, True, False])
+        r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
+                     dtype=np.float64)
+        a = np.ones(1, dtype='>i4')
+        b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
+                     dtype=np.float64)
+        assert_equal(np.where(c, a, b), r)
+
+        b = b.astype('>f8')
+        assert_equal(np.where(c, a, b), r)
+
+        a = a.astype('i4')
+        assert_equal(np.where(c, a, b), r)
+
+    def test_error(self):
+        c = [True, True]
+        a = np.ones((4, 5))
+        b = np.ones((5, 5))
+        assert_raises(ValueError, np.where, c, a, a)
+        assert_raises(ValueError, np.where, c[0], a, b)
+
+    def test_string(self):
+        # gh-4778 check strings are properly filled with nulls
+        a = np.array("abc")
+        b = np.array("x" * 753)
+        assert_equal(np.where(True, a, b), "abc")
+        assert_equal(np.where(False, b, a), "abc")
+
+        # check native datatype sized strings
+        a = np.array("abcd")
+        b = np.array("x" * 8)
+        assert_equal(np.where(True, a, b), "abcd")
+        assert_equal(np.where(False, b, a), "abcd")
+
+    def test_empty_result(self):
+        # pass empty where result through an assignment which reads the data of
+        # empty arrays, error detectable with valgrind, see gh-8922
+        x = np.zeros((1, 1))
+        ibad = np.vstack(np.where(x == 99.))
+        assert_array_equal(ibad,
+                           np.atleast_2d(np.array([[],[]], dtype=np.intp)))
+
+    def test_largedim(self):
+        # invalid read regression gh-9304
+        shape = [10, 2, 3, 4, 5, 6]
+        np.random.seed(2)
+        array = np.random.rand(*shape)
+
+        for i in range(10):
+            benchmark = array.nonzero()
+            result = array.nonzero()
+            assert_array_equal(benchmark, result)
+
+    def test_kwargs(self):
+        a = np.zeros(1)
+        with assert_raises(TypeError):
+            np.where(a, x=a, y=a)
+
+
+if not IS_PYPY:
+    # sys.getsizeof() is not valid on PyPy
+    class TestSizeOf:
+
+        def test_empty_array(self):
+            x = np.array([])
+            assert_(sys.getsizeof(x) > 0)
+
+        def check_array(self, dtype):
+            elem_size = dtype(0).itemsize
+
+            for length in [10, 50, 100, 500]:
+                x = np.arange(length, dtype=dtype)
+                assert_(sys.getsizeof(x) > length * elem_size)
+
+        def test_array_int32(self):
+            self.check_array(np.int32)
+
+        def test_array_int64(self):
+            self.check_array(np.int64)
+
+        def test_array_float32(self):
+            self.check_array(np.float32)
+
+        def test_array_float64(self):
+            self.check_array(np.float64)
+
+        def test_view(self):
+            d = np.ones(100)
+            assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
+
+        def test_reshape(self):
+            d = np.ones(100)
+            assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+
+        @_no_tracing
+        def test_resize(self):
+            d = np.ones(100)
+            old = sys.getsizeof(d)
+            d.resize(50)
+            assert_(old > sys.getsizeof(d))
+            d.resize(150)
+            assert_(old < sys.getsizeof(d))
+
+        def test_error(self):
+            d = np.ones(100)
+            assert_raises(TypeError, d.__sizeof__, "a")
+
+
+class TestHashing:
+
+    def test_arrays_not_hashable(self):
+        x = np.ones(3)
+        assert_raises(TypeError, hash, x)
+
+    def test_collections_hashable(self):
+        x = np.array([])
+        assert_(not isinstance(x, collections.abc.Hashable))
+
+
+class TestArrayPriority:
+    # This will go away when __array_priority__ is settled, meanwhile
+    # it serves to check unintended changes.
+    op = operator
+    binary_ops = [
+        op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
+        op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
+        op.ge, op.lt, op.le, op.ne, op.eq
+        ]
+
+    class Foo(np.ndarray):
+        __array_priority__ = 100.
+
+        def __new__(cls, *args, **kwargs):
+            return np.array(*args, **kwargs).view(cls)
+
+    class Bar(np.ndarray):
+        __array_priority__ = 101.
+
+        def __new__(cls, *args, **kwargs):
+            return np.array(*args, **kwargs).view(cls)
+
+    class Other:
+        __array_priority__ = 1000.
+
+        def _all(self, other):
+            return self.__class__()
+
+        __add__ = __radd__ = _all
+        __sub__ = __rsub__ = _all
+        __mul__ = __rmul__ = _all
+        __pow__ = __rpow__ = _all
+        __div__ = __rdiv__ = _all
+        __mod__ = __rmod__ = _all
+        __truediv__ = __rtruediv__ = _all
+        __floordiv__ = __rfloordiv__ = _all
+        __and__ = __rand__ = _all
+        __xor__ = __rxor__ = _all
+        __or__ = __ror__ = _all
+        __lshift__ = __rlshift__ = _all
+        __rshift__ = __rrshift__ = _all
+        __eq__ = _all
+        __ne__ = _all
+        __gt__ = _all
+        __ge__ = _all
+        __lt__ = _all
+        __le__ = _all
+
+    def test_ndarray_subclass(self):
+        a = np.array([1, 2])
+        b = self.Bar([1, 2])
+        for f in self.binary_ops:
+            msg = repr(f)
+            assert_(isinstance(f(a, b), self.Bar), msg)
+            assert_(isinstance(f(b, a), self.Bar), msg)
+
+    def test_ndarray_other(self):
+        a = np.array([1, 2])
+        b = self.Other()
+        for f in self.binary_ops:
+            msg = repr(f)
+            assert_(isinstance(f(a, b), self.Other), msg)
+            assert_(isinstance(f(b, a), self.Other), msg)
+
+    def test_subclass_subclass(self):
+        a = self.Foo([1, 2])
+        b = self.Bar([1, 2])
+        for f in self.binary_ops:
+            msg = repr(f)
+            assert_(isinstance(f(a, b), self.Bar), msg)
+            assert_(isinstance(f(b, a), self.Bar), msg)
+
+    def test_subclass_other(self):
+        a = self.Foo([1, 2])
+        b = self.Other()
+        for f in self.binary_ops:
+            msg = repr(f)
+            assert_(isinstance(f(a, b), self.Other), msg)
+            assert_(isinstance(f(b, a), self.Other), msg)
+
+
+class TestBytestringArrayNonzero:
+
+    def test_empty_bstring_array_is_falsey(self):
+        assert_(not np.array([''], dtype=str))
+
+    def test_whitespace_bstring_array_is_falsey(self):
+        a = np.array(['spam'], dtype=str)
+        a[0] = '  \0\0'
+        assert_(not a)
+
+    def test_all_null_bstring_array_is_falsey(self):
+        a = np.array(['spam'], dtype=str)
+        a[0] = '\0\0\0\0'
+        assert_(not a)
+
+    def test_null_inside_bstring_array_is_truthy(self):
+        a = np.array(['spam'], dtype=str)
+        a[0] = ' \0 \0'
+        assert_(a)
+
+
+class TestUnicodeEncoding:
+    """
+    Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping
+    issues, etc
+    """
+    def test_round_trip(self):
+        """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """
+        # gh-15363
+        arr = np.zeros(shape=(), dtype="U1")
+        for i in range(1, sys.maxunicode + 1):
+            expected = chr(i)
+            arr[()] = expected
+            assert arr[()] == expected
+            assert arr.item() == expected
+
+    def test_assign_scalar(self):
+        # gh-3258
+        l = np.array(['aa', 'bb'])
+        l[:] = np.str_('cc')
+        assert_equal(l, ['cc', 'cc'])
+
+    def test_fill_scalar(self):
+        # gh-7227
+        l = np.array(['aa', 'bb'])
+        l.fill(np.str_('cc'))
+        assert_equal(l, ['cc', 'cc'])
+
+
+class TestUnicodeArrayNonzero:
+
+    def test_empty_ustring_array_is_falsey(self):
+        assert_(not np.array([''], dtype=np.str_))
+
+    def test_whitespace_ustring_array_is_falsey(self):
+        a = np.array(['eggs'], dtype=np.str_)
+        a[0] = '  \0\0'
+        assert_(not a)
+
+    def test_all_null_ustring_array_is_falsey(self):
+        a = np.array(['eggs'], dtype=np.str_)
+        a[0] = '\0\0\0\0'
+        assert_(not a)
+
+    def test_null_inside_ustring_array_is_truthy(self):
+        a = np.array(['eggs'], dtype=np.str_)
+        a[0] = ' \0 \0'
+        assert_(a)
+
+
+class TestFormat:
+
+    def test_0d(self):
+        a = np.array(np.pi)
+        assert_equal('{:0.3g}'.format(a), '3.14')
+        assert_equal('{:0.3g}'.format(a[()]), '3.14')
+
+    def test_1d_no_format(self):
+        a = np.array([np.pi])
+        assert_equal('{}'.format(a), str(a))
+
+    def test_1d_format(self):
+        # until gh-5543, ensure that the behaviour matches what it used to be
+        a = np.array([np.pi])
+        assert_raises(TypeError, '{:30}'.format, a)
+
+from numpy.testing import IS_PYPY
+
+class TestCTypes:
+
+    def test_ctypes_is_available(self):
+        test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+        assert_equal(ctypes, test_arr.ctypes._ctypes)
+        assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+
+    def test_ctypes_is_not_available(self):
+        from numpy.core import _internal
+        _internal.ctypes = None
+        try:
+            test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+            assert_(isinstance(test_arr.ctypes._ctypes,
+                               _internal._missing_ctypes))
+            assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+        finally:
+            _internal.ctypes = ctypes
+
+    def _make_readonly(x):
+        x.flags.writeable = False
+        return x
+
+    @pytest.mark.parametrize('arr', [
+        np.array([1, 2, 3]),
+        np.array([['one', 'two'], ['three', 'four']]),
+        np.array((1, 2), dtype='i4,i4'),
+        np.zeros((2,), dtype=
+            np.dtype(dict(
+                formats=['2, [44, 55])
+        assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
+        # hit one of the failing paths
+        assert_raises(ValueError, np.place, a, a>20, [])
+
+    def test_put_noncontiguous(self):
+        a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+        np.put(a, [0, 2], [44, 55])
+        assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
+
+    def test_putmask_noncontiguous(self):
+        a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+        # uses arr_putmask
+        np.putmask(a, a>2, a**2)
+        assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
+
+    def test_take_mode_raise(self):
+        a = np.arange(6, dtype='int')
+        out = np.empty(2, dtype='int')
+        np.take(a, [0, 2], out=out, mode='raise')
+        assert_equal(out, np.array([0, 2]))
+
+    def test_choose_mod_raise(self):
+        a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
+        out = np.empty((3,3), dtype='int')
+        choices = [-10, 10]
+        np.choose(a, choices, out=out, mode='raise')
+        assert_equal(out, np.array([[ 10, -10,  10],
+                                    [-10,  10, -10],
+                                    [ 10, -10,  10]]))
+
+    def test_flatiter__array__(self):
+        a = np.arange(9).reshape(3,3)
+        b = a.T.flat
+        c = b.__array__()
+        # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
+        del c
+
+    def test_dot_out(self):
+        # if HAVE_CBLAS, will use WRITEBACKIFCOPY
+        a = np.arange(9, dtype=float).reshape(3,3)
+        b = np.dot(a, a, out=a)
+        assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
+
+    def test_view_assign(self):
+        from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
+
+        arr = np.arange(9).reshape(3, 3).T
+        arr_wb = npy_create_writebackifcopy(arr)
+        assert_(arr_wb.flags.writebackifcopy)
+        assert_(arr_wb.base is arr)
+        arr_wb[...] = -100
+        npy_resolve(arr_wb)
+        # arr changes after resolve, even though we assigned to arr_wb
+        assert_equal(arr, -100)
+        # after resolve, the two arrays no longer reference each other
+        assert_(arr_wb.ctypes.data != 0)
+        assert_equal(arr_wb.base, None)
+        # assigning to arr_wb does not get transferred to arr
+        arr_wb[...] = 100
+        assert_equal(arr, -100)
+
+    @pytest.mark.leaks_references(
+            reason="increments self in dealloc; ignore since deprecated path.")
+    def test_dealloc_warning(self):
+        with suppress_warnings() as sup:
+            sup.record(RuntimeWarning)
+            arr = np.arange(9).reshape(3, 3)
+            v = arr.T
+            _multiarray_tests.npy_abuse_writebackifcopy(v)
+            assert len(sup.log) == 1
+
+    def test_view_discard_refcount(self):
+        from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
+
+        arr = np.arange(9).reshape(3, 3).T
+        orig = arr.copy()
+        if HAS_REFCOUNT:
+            arr_cnt = sys.getrefcount(arr)
+        arr_wb = npy_create_writebackifcopy(arr)
+        assert_(arr_wb.flags.writebackifcopy)
+        assert_(arr_wb.base is arr)
+        arr_wb[...] = -100
+        npy_discard(arr_wb)
+        # arr remains unchanged after discard
+        assert_equal(arr, orig)
+        # after discard, the two arrays no longer reference each other
+        assert_(arr_wb.ctypes.data != 0)
+        assert_equal(arr_wb.base, None)
+        if HAS_REFCOUNT:
+            assert_equal(arr_cnt, sys.getrefcount(arr))
+        # assigning to arr_wb does not get transferred to arr
+        arr_wb[...] = 100
+        assert_equal(arr, orig)
+
+
+class TestArange:
+    def test_infinite(self):
+        assert_raises_regex(
+            ValueError, "size exceeded",
+            np.arange, 0, np.inf
+        )
+
+    def test_nan_step(self):
+        assert_raises_regex(
+            ValueError, "cannot compute length",
+            np.arange, 0, 1, np.nan
+        )
+
+    def test_zero_step(self):
+        assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
+        assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
+
+        # empty range
+        assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
+        assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
+
+    def test_require_range(self):
+        assert_raises(TypeError, np.arange)
+        assert_raises(TypeError, np.arange, step=3)
+        assert_raises(TypeError, np.arange, dtype='int64')
+        assert_raises(TypeError, np.arange, start=4)
+
+    def test_start_stop_kwarg(self):
+        keyword_stop = np.arange(stop=3)
+        keyword_zerotostop = np.arange(start=0, stop=3)
+        keyword_start_stop = np.arange(start=3, stop=9)
+
+        assert len(keyword_stop) == 3
+        assert len(keyword_zerotostop) == 3
+        assert len(keyword_start_stop) == 6
+        assert_array_equal(keyword_stop, keyword_zerotostop)
+
+    def test_arange_booleans(self):
+        # Arange makes some sense for booleans and works up to length 2.
+        # But it is weird since `arange(2, 4, dtype=bool)` works.
+        # Arguably, much or all of this could be deprecated/removed.
+        res = np.arange(False, dtype=bool)
+        assert_array_equal(res, np.array([], dtype="bool"))
+
+        res = np.arange(True, dtype="bool")
+        assert_array_equal(res, [False])
+
+        res = np.arange(2, dtype="bool")
+        assert_array_equal(res, [False, True])
+
+        # This case is especially weird, but drops out without special case:
+        res = np.arange(6, 8, dtype="bool")
+        assert_array_equal(res, [True, True])
+
+        with pytest.raises(TypeError):
+            np.arange(3, dtype="bool")
+
+    @pytest.mark.parametrize("dtype", ["S3", "U", "5i"])
+    def test_rejects_bad_dtypes(self, dtype):
+        dtype = np.dtype(dtype)
+        DType_name = re.escape(str(type(dtype)))
+        with pytest.raises(TypeError,
+                match=rf"arange\(\) not supported for inputs .* {DType_name}"):
+            np.arange(2, dtype=dtype)
+
+    def test_rejects_strings(self):
+        # Explicitly test error for strings which may call "b" - "a":
+        DType_name = re.escape(str(type(np.array("a").dtype)))
+        with pytest.raises(TypeError,
+                match=rf"arange\(\) not supported for inputs .* {DType_name}"):
+            np.arange("a", "b")
+
+    def test_byteswapped(self):
+        res_be = np.arange(1, 1000, dtype=">i4")
+        res_le = np.arange(1, 1000, dtype="i4"
+        assert res_le.dtype == " arr2
+
+
+@pytest.mark.parametrize("op", [
+        operator.eq, operator.ne, operator.le, operator.lt, operator.ge,
+        operator.gt])
+def test_comparisons_forwards_error(op):
+    class NotArray:
+        def __array__(self):
+            raise TypeError("run you fools")
+
+    with pytest.raises(TypeError, match="run you fools"):
+        op(np.arange(2), NotArray())
+
+    with pytest.raises(TypeError, match="run you fools"):
+        op(NotArray(), np.arange(2))
+
+
+def test_richcompare_scalar_boolean_singleton_return():
+    # These are currently guaranteed to be the boolean singletons, but maybe
+    # returning NumPy booleans would also be OK:
+    assert (np.array(0) == "a") is False
+    assert (np.array(0) != "a") is True
+    assert (np.int16(0) == "a") is False
+    assert (np.int16(0) != "a") is True
+
+
+@pytest.mark.parametrize("op", [
+        operator.eq, operator.ne, operator.le, operator.lt, operator.ge,
+        operator.gt])
+def test_ragged_comparison_fails(op):
+    # This needs to convert the internal array to True/False, which fails:
+    a = np.array([1, np.array([1, 2, 3])], dtype=object)
+    b = np.array([1, np.array([1, 2, 3])], dtype=object)
+
+    with pytest.raises(ValueError, match="The truth value.*ambiguous"):
+        op(a, b)
+
+
+@pytest.mark.parametrize(
+    ["fun", "npfun"],
+    [
+        (_multiarray_tests.npy_cabs, np.absolute),
+        (_multiarray_tests.npy_carg, np.angle)
+    ]
+)
+@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan])
+@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan])
+@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__())
+def test_npymath_complex(fun, npfun, x, y, test_dtype):
+    # Smoketest npymath functions
+    z = test_dtype(complex(x, y))
+    with np.errstate(invalid='ignore'):
+        # Fallback implementations may emit a warning for +-inf (see gh-24876):
+        #     RuntimeWarning: invalid value encountered in absolute
+        got = fun(z)
+        expected = npfun(z)
+        assert_allclose(got, expected)
+
+
+def test_npymath_real():
+    # Smoketest npymath functions
+    from numpy.core._multiarray_tests import (
+        npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
+
+    funcs = {npy_log10: np.log10,
+             npy_cosh: np.cosh,
+             npy_sinh: np.sinh,
+             npy_tan: np.tan,
+             npy_tanh: np.tanh}
+    vals = (1, np.inf, -np.inf, np.nan)
+    types = (np.float32, np.float64, np.longdouble)
+
+    with np.errstate(all='ignore'):
+        for fun, npfun in funcs.items():
+            for x, t in itertools.product(vals, types):
+                z = t(x)
+                got = fun(z)
+                expected = npfun(z)
+                assert_allclose(got, expected)
+
+def test_uintalignment_and_alignment():
+    # alignment code needs to satisfy these requirements:
+    #  1. numpy structs match C struct layout
+    #  2. ufuncs/casting is safe wrt to aligned access
+    #  3. copy code is safe wrt to "uint alidned" access
+    #
+    # Complex types are the main problem, whose alignment may not be the same
+    # as their "uint alignment".
+    #
+    # This test might only fail on certain platforms, where uint64 alignment is
+    # not equal to complex64 alignment. The second 2 tests will only fail
+    # for DEBUG=1.
+
+    d1 = np.dtype('u1,c8', align=True)
+    d2 = np.dtype('u4,c8', align=True)
+    d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
+
+    assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
+    assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
+    assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
+
+    # check that C struct matches numpy struct size
+    s = _multiarray_tests.get_struct_alignments()
+    for d, (alignment, size) in zip([d1,d2,d3], s):
+        assert_equal(d.alignment, alignment)
+        assert_equal(d.itemsize, size)
+
+    # check that ufuncs don't complain in debug mode
+    # (this is probably OK if the aligned flag is true above)
+    src = np.zeros((2,2), dtype=d1)['f1']  # 4-byte aligned, often
+    np.exp(src)  # assert fails?
+
+    # check that copy code doesn't complain in debug mode
+    dst = np.zeros((2,2), dtype='c8')
+    dst[:,1] = src[:,1]  # assert in lowlevel_strided_loops fails?
+
+class TestAlignment:
+    # adapted from scipy._lib.tests.test__util.test__aligned_zeros
+    # Checks that unusual memory alignments don't trip up numpy.
+    # In particular, check RELAXED_STRIDES don't trip alignment assertions in
+    # NDEBUG mode for size-0 arrays (gh-12503)
+
+    def check(self, shape, dtype, order, align):
+        err_msg = repr((shape, dtype, order, align))
+        x = _aligned_zeros(shape, dtype, order, align=align)
+        if align is None:
+            align = np.dtype(dtype).alignment
+        assert_equal(x.__array_interface__['data'][0] % align, 0)
+        if hasattr(shape, '__len__'):
+            assert_equal(x.shape, shape, err_msg)
+        else:
+            assert_equal(x.shape, (shape,), err_msg)
+        assert_equal(x.dtype, dtype)
+        if order == "C":
+            assert_(x.flags.c_contiguous, err_msg)
+        elif order == "F":
+            if x.size > 0:
+                assert_(x.flags.f_contiguous, err_msg)
+        elif order is None:
+            assert_(x.flags.c_contiguous, err_msg)
+        else:
+            raise ValueError()
+
+    def test_various_alignments(self):
+        for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
+            for n in [0, 1, 3, 11]:
+                for order in ["C", "F", None]:
+                    for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
+                        if dtype == 'O':
+                            # object dtype can't be misaligned
+                            continue
+                        for shape in [n, (1, 2, 3, n)]:
+                            self.check(shape, np.dtype(dtype), order, align)
+
+    def test_strided_loop_alignments(self):
+        # particularly test that complex64 and float128 use right alignment
+        # code-paths, since these are particularly problematic. It is useful to
+        # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
+        for align in [1, 2, 4, 8, 12, 16, None]:
+            xf64 = _aligned_zeros(3, np.float64)
+
+            xc64 = _aligned_zeros(3, np.complex64, align=align)
+            xf128 = _aligned_zeros(3, np.longdouble, align=align)
+
+            # test casting, both to and from misaligned
+            with suppress_warnings() as sup:
+                sup.filter(np.ComplexWarning, "Casting complex values")
+                xc64.astype('f8')
+            xf64.astype(np.complex64)
+            test = xc64 + xf64
+
+            xf128.astype('f8')
+            xf64.astype(np.longdouble)
+            test = xf128 + xf64
+
+            test = xf128 + xc64
+
+            # test copy, both to and from misaligned
+            # contig copy
+            xf64[:] = xf64.copy()
+            xc64[:] = xc64.copy()
+            xf128[:] = xf128.copy()
+            # strided copy
+            xf64[::2] = xf64[::2].copy()
+            xc64[::2] = xc64[::2].copy()
+            xf128[::2] = xf128[::2].copy()
+
+def test_getfield():
+    a = np.arange(32, dtype='uint16')
+    if sys.byteorder == 'little':
+        i = 0
+        j = 1
+    else:
+        i = 1
+        j = 0
+    b = a.getfield('int8', i)
+    assert_equal(b, a)
+    b = a.getfield('int8', j)
+    assert_equal(b, 0)
+    pytest.raises(ValueError, a.getfield, 'uint8', -1)
+    pytest.raises(ValueError, a.getfield, 'uint8', 16)
+    pytest.raises(ValueError, a.getfield, 'uint64', 0)
+
+
+class TestViewDtype:
+    """
+    Verify that making a view of a non-contiguous array works as expected.
+    """
+    def test_smaller_dtype_multiple(self):
+        # x is non-contiguous
+        x = np.arange(10, dtype=' rc_a)
+        assert_(sys.getrefcount(dt) > rc_dt)
+    # del 'it'
+    it = None
+    assert_equal(sys.getrefcount(a), rc_a)
+    assert_equal(sys.getrefcount(dt), rc_dt)
+
+    # With a copy
+    a = arange(6, dtype='f4')
+    dt = np.dtype('f4')
+    rc_a = sys.getrefcount(a)
+    rc_dt = sys.getrefcount(dt)
+    it = nditer(a, [],
+                [['readwrite']],
+                op_dtypes=[dt])
+    rc2_a = sys.getrefcount(a)
+    rc2_dt = sys.getrefcount(dt)
+    it2 = it.copy()
+    assert_(sys.getrefcount(a) > rc2_a)
+    assert_(sys.getrefcount(dt) > rc2_dt)
+    it = None
+    assert_equal(sys.getrefcount(a), rc2_a)
+    assert_equal(sys.getrefcount(dt), rc2_dt)
+    it2 = None
+    assert_equal(sys.getrefcount(a), rc_a)
+    assert_equal(sys.getrefcount(dt), rc_dt)
+
+    del it2  # avoid pyflakes unused variable warning
+
+def test_iter_best_order():
+    # The iterator should always find the iteration order
+    # with increasing memory addresses
+
+    # Test the ordering for 1-D to 5-D shapes
+    for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+        a = arange(np.prod(shape))
+        # Test each combination of positive and negative strides
+        for dirs in range(2**len(shape)):
+            dirs_index = [slice(None)]*len(shape)
+            for bit in range(len(shape)):
+                if ((2**bit) & dirs):
+                    dirs_index[bit] = slice(None, None, -1)
+            dirs_index = tuple(dirs_index)
+
+            aview = a.reshape(shape)[dirs_index]
+            # C-order
+            i = nditer(aview, [], [['readonly']])
+            assert_equal([x for x in i], a)
+            # Fortran-order
+            i = nditer(aview.T, [], [['readonly']])
+            assert_equal([x for x in i], a)
+            # Other order
+            if len(shape) > 2:
+                i = nditer(aview.swapaxes(0, 1), [], [['readonly']])
+                assert_equal([x for x in i], a)
+
+def test_iter_c_order():
+    # Test forcing C order
+
+    # Test the ordering for 1-D to 5-D shapes
+    for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+        a = arange(np.prod(shape))
+        # Test each combination of positive and negative strides
+        for dirs in range(2**len(shape)):
+            dirs_index = [slice(None)]*len(shape)
+            for bit in range(len(shape)):
+                if ((2**bit) & dirs):
+                    dirs_index[bit] = slice(None, None, -1)
+            dirs_index = tuple(dirs_index)
+
+            aview = a.reshape(shape)[dirs_index]
+            # C-order
+            i = nditer(aview, order='C')
+            assert_equal([x for x in i], aview.ravel(order='C'))
+            # Fortran-order
+            i = nditer(aview.T, order='C')
+            assert_equal([x for x in i], aview.T.ravel(order='C'))
+            # Other order
+            if len(shape) > 2:
+                i = nditer(aview.swapaxes(0, 1), order='C')
+                assert_equal([x for x in i],
+                                    aview.swapaxes(0, 1).ravel(order='C'))
+
+def test_iter_f_order():
+    # Test forcing F order
+
+    # Test the ordering for 1-D to 5-D shapes
+    for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+        a = arange(np.prod(shape))
+        # Test each combination of positive and negative strides
+        for dirs in range(2**len(shape)):
+            dirs_index = [slice(None)]*len(shape)
+            for bit in range(len(shape)):
+                if ((2**bit) & dirs):
+                    dirs_index[bit] = slice(None, None, -1)
+            dirs_index = tuple(dirs_index)
+
+            aview = a.reshape(shape)[dirs_index]
+            # C-order
+            i = nditer(aview, order='F')
+            assert_equal([x for x in i], aview.ravel(order='F'))
+            # Fortran-order
+            i = nditer(aview.T, order='F')
+            assert_equal([x for x in i], aview.T.ravel(order='F'))
+            # Other order
+            if len(shape) > 2:
+                i = nditer(aview.swapaxes(0, 1), order='F')
+                assert_equal([x for x in i],
+                                    aview.swapaxes(0, 1).ravel(order='F'))
+
+def test_iter_c_or_f_order():
+    # Test forcing any contiguous (C or F) order
+
+    # Test the ordering for 1-D to 5-D shapes
+    for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+        a = arange(np.prod(shape))
+        # Test each combination of positive and negative strides
+        for dirs in range(2**len(shape)):
+            dirs_index = [slice(None)]*len(shape)
+            for bit in range(len(shape)):
+                if ((2**bit) & dirs):
+                    dirs_index[bit] = slice(None, None, -1)
+            dirs_index = tuple(dirs_index)
+
+            aview = a.reshape(shape)[dirs_index]
+            # C-order
+            i = nditer(aview, order='A')
+            assert_equal([x for x in i], aview.ravel(order='A'))
+            # Fortran-order
+            i = nditer(aview.T, order='A')
+            assert_equal([x for x in i], aview.T.ravel(order='A'))
+            # Other order
+            if len(shape) > 2:
+                i = nditer(aview.swapaxes(0, 1), order='A')
+                assert_equal([x for x in i],
+                                    aview.swapaxes(0, 1).ravel(order='A'))
+
+def test_nditer_multi_index_set():
+    # Test the multi_index set
+    a = np.arange(6).reshape(2, 3)
+    it = np.nditer(a, flags=['multi_index'])
+
+    # Removes the iteration on two first elements of a[0]
+    it.multi_index = (0, 2,)
+
+    assert_equal([i for i in it], [2, 3, 4, 5])
+    
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_nditer_multi_index_set_refcount():
+    # Test if the reference count on index variable is decreased
+    
+    index = 0
+    i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index'])
+
+    start_count = sys.getrefcount(index)
+    i.multi_index = (index,)
+    end_count = sys.getrefcount(index)
+    
+    assert_equal(start_count, end_count)
+
+def test_iter_best_order_multi_index_1d():
+    # The multi-indices should be correct with any reordering
+
+    a = arange(4)
+    # 1D order
+    i = nditer(a, ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)])
+    # 1D reversed order
+    i = nditer(a[::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)])
+
+def test_iter_best_order_multi_index_2d():
+    # The multi-indices should be correct with any reordering
+
+    a = arange(6)
+    # 2D C-order
+    i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
+    # 2D Fortran-order
+    i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)])
+    # 2D reversed C-order
+    i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)])
+    i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)])
+    i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)])
+    # 2D reversed Fortran-order
+    i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)])
+    i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+                                                   ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)])
+    i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+                                                   ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)])
+
+def test_iter_best_order_multi_index_3d():
+    # The multi-indices should be correct with any reordering
+
+    a = arange(12)
+    # 3D C-order
+    i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1),
+                             (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)])
+    # 3D Fortran-order
+    i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0),
+                             (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)])
+    # 3D reversed C-order
+    i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1),
+                             (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)])
+    i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1),
+                             (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)])
+    i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0),
+                             (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)])
+    # 3D reversed Fortran-order
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+                                                    ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0),
+                             (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)])
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+                                                    ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0),
+                             (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)])
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+                                                    ['multi_index'], [['readonly']])
+    assert_equal(iter_multi_index(i),
+                            [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1),
+                             (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)])
+
+def test_iter_best_order_c_index_1d():
+    # The C index should be correct with any reordering
+
+    a = arange(4)
+    # 1D order
+    i = nditer(a, ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [0, 1, 2, 3])
+    # 1D reversed order
+    i = nditer(a[::-1], ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [3, 2, 1, 0])
+
+def test_iter_best_order_c_index_2d():
+    # The C index should be correct with any reordering
+
+    a = arange(6)
+    # 2D C-order
+    i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
+    # 2D Fortran-order
+    i = nditer(a.reshape(2, 3).copy(order='F'),
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5])
+    # 2D reversed C-order
+    i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2])
+    i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3])
+    i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
+    # 2D reversed Fortran-order
+    i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2])
+    i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3])
+    i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0])
+
+def test_iter_best_order_c_index_3d():
+    # The C index should be correct with any reordering
+
+    a = arange(12)
+    # 3D C-order
+    i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+    # 3D Fortran-order
+    i = nditer(a.reshape(2, 3, 2).copy(order='F'),
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
+    # 3D reversed C-order
+    i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
+    i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
+    i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
+    # 3D reversed Fortran-order
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+                                    ['c_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
+
+def test_iter_best_order_f_index_1d():
+    # The Fortran index should be correct with any reordering
+
+    a = arange(4)
+    # 1D order
+    i = nditer(a, ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [0, 1, 2, 3])
+    # 1D reversed order
+    i = nditer(a[::-1], ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [3, 2, 1, 0])
+
+def test_iter_best_order_f_index_2d():
+    # The Fortran index should be correct with any reordering
+
+    a = arange(6)
+    # 2D C-order
+    i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5])
+    # 2D Fortran-order
+    i = nditer(a.reshape(2, 3).copy(order='F'),
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
+    # 2D reversed C-order
+    i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4])
+    i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1])
+    i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0])
+    # 2D reversed Fortran-order
+    i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4])
+    i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1])
+    i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
+
+def test_iter_best_order_f_index_3d():
+    # The Fortran index should be correct with any reordering
+
+    a = arange(12)
+    # 3D C-order
+    i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
+    # 3D Fortran-order
+    i = nditer(a.reshape(2, 3, 2).copy(order='F'),
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+    # 3D reversed C-order
+    i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
+    i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
+    i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
+    # 3D reversed Fortran-order
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
+    i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+                                    ['f_index'], [['readonly']])
+    assert_equal(iter_indices(i),
+                            [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
+
+def test_iter_no_inner_full_coalesce():
+    # Check no_inner iterators which coalesce into a single inner loop
+
+    for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+        size = np.prod(shape)
+        a = arange(size)
+        # Test each combination of forward and backwards indexing
+        for dirs in range(2**len(shape)):
+            dirs_index = [slice(None)]*len(shape)
+            for bit in range(len(shape)):
+                if ((2**bit) & dirs):
+                    dirs_index[bit] = slice(None, None, -1)
+            dirs_index = tuple(dirs_index)
+
+            aview = a.reshape(shape)[dirs_index]
+            # C-order
+            i = nditer(aview, ['external_loop'], [['readonly']])
+            assert_equal(i.ndim, 1)
+            assert_equal(i[0].shape, (size,))
+            # Fortran-order
+            i = nditer(aview.T, ['external_loop'], [['readonly']])
+            assert_equal(i.ndim, 1)
+            assert_equal(i[0].shape, (size,))
+            # Other order
+            if len(shape) > 2:
+                i = nditer(aview.swapaxes(0, 1),
+                                    ['external_loop'], [['readonly']])
+                assert_equal(i.ndim, 1)
+                assert_equal(i[0].shape, (size,))
+
+def test_iter_no_inner_dim_coalescing():
+    # Check no_inner iterators whose dimensions may not coalesce completely
+
+    # Skipping the last element in a dimension prevents coalescing
+    # with the next-bigger dimension
+    a = arange(24).reshape(2, 3, 4)[:,:, :-1]
+    i = nditer(a, ['external_loop'], [['readonly']])
+    assert_equal(i.ndim, 2)
+    assert_equal(i[0].shape, (3,))
+    a = arange(24).reshape(2, 3, 4)[:, :-1,:]
+    i = nditer(a, ['external_loop'], [['readonly']])
+    assert_equal(i.ndim, 2)
+    assert_equal(i[0].shape, (8,))
+    a = arange(24).reshape(2, 3, 4)[:-1,:,:]
+    i = nditer(a, ['external_loop'], [['readonly']])
+    assert_equal(i.ndim, 1)
+    assert_equal(i[0].shape, (12,))
+
+    # Even with lots of 1-sized dimensions, should still coalesce
+    a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
+    i = nditer(a, ['external_loop'], [['readonly']])
+    assert_equal(i.ndim, 1)
+    assert_equal(i[0].shape, (24,))
+
+def test_iter_dim_coalescing():
+    # Check that the correct number of dimensions are coalesced
+
+    # Tracking a multi-index disables coalescing
+    a = arange(24).reshape(2, 3, 4)
+    i = nditer(a, ['multi_index'], [['readonly']])
+    assert_equal(i.ndim, 3)
+
+    # A tracked index can allow coalescing if it's compatible with the array
+    a3d = arange(24).reshape(2, 3, 4)
+    i = nditer(a3d, ['c_index'], [['readonly']])
+    assert_equal(i.ndim, 1)
+    i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']])
+    assert_equal(i.ndim, 3)
+    i = nditer(a3d.T, ['c_index'], [['readonly']])
+    assert_equal(i.ndim, 3)
+    i = nditer(a3d.T, ['f_index'], [['readonly']])
+    assert_equal(i.ndim, 1)
+    i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']])
+    assert_equal(i.ndim, 3)
+
+    # When C or F order is forced, coalescing may still occur
+    a3d = arange(24).reshape(2, 3, 4)
+    i = nditer(a3d, order='C')
+    assert_equal(i.ndim, 1)
+    i = nditer(a3d.T, order='C')
+    assert_equal(i.ndim, 3)
+    i = nditer(a3d, order='F')
+    assert_equal(i.ndim, 3)
+    i = nditer(a3d.T, order='F')
+    assert_equal(i.ndim, 1)
+    i = nditer(a3d, order='A')
+    assert_equal(i.ndim, 1)
+    i = nditer(a3d.T, order='A')
+    assert_equal(i.ndim, 1)
+
+def test_iter_broadcasting():
+    # Standard NumPy broadcasting rules
+
+    # 1D with scalar
+    i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 6)
+    assert_equal(i.shape, (6,))
+
+    # 2D with scalar
+    i = nditer([arange(6).reshape(2, 3), np.int32(2)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 6)
+    assert_equal(i.shape, (2, 3))
+    # 2D with 1D
+    i = nditer([arange(6).reshape(2, 3), arange(3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 6)
+    assert_equal(i.shape, (2, 3))
+    i = nditer([arange(2).reshape(2, 1), arange(3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 6)
+    assert_equal(i.shape, (2, 3))
+    # 2D with 2D
+    i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 6)
+    assert_equal(i.shape, (2, 3))
+
+    # 3D with scalar
+    i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    # 3D with 1D
+    i = nditer([arange(3), arange(24).reshape(4, 2, 3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    i = nditer([arange(3), arange(8).reshape(4, 2, 1)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    # 3D with 2D
+    i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    # 3D with 3D
+    i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3),
+                        arange(4).reshape(4, 1, 1)],
+                        ['multi_index'], [['readonly']]*3)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+    i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)],
+                        ['multi_index'], [['readonly']]*2)
+    assert_equal(i.itersize, 24)
+    assert_equal(i.shape, (4, 2, 3))
+
+def test_iter_itershape():
+    # Check that allocated outputs work with a specified shape
+    a = np.arange(6, dtype='i2').reshape(2, 3)
+    i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+                            op_axes=[[0, 1, None], None],
+                            itershape=(-1, -1, 4))
+    assert_equal(i.operands[1].shape, (2, 3, 4))
+    assert_equal(i.operands[1].strides, (24, 8, 2))
+
+    i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
+                            op_axes=[[0, 1, None], None],
+                            itershape=(-1, -1, 4))
+    assert_equal(i.operands[1].shape, (3, 2, 4))
+    assert_equal(i.operands[1].strides, (8, 24, 2))
+
+    i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
+                            order='F',
+                            op_axes=[[0, 1, None], None],
+                            itershape=(-1, -1, 4))
+    assert_equal(i.operands[1].shape, (3, 2, 4))
+    assert_equal(i.operands[1].strides, (2, 6, 12))
+
+    # If we specify 1 in the itershape, it shouldn't allow broadcasting
+    # of that dimension to a bigger value
+    assert_raises(ValueError, nditer, [a, None], [],
+                            [['readonly'], ['writeonly', 'allocate']],
+                            op_axes=[[0, 1, None], None],
+                            itershape=(-1, 1, 4))
+    # Test bug that for no op_axes but itershape, they are NULLed correctly
+    i = np.nditer([np.ones(2), None, None], itershape=(2,))
+
+def test_iter_broadcasting_errors():
+    # Check that errors are thrown for bad broadcasting shapes
+
+    # 1D with 1D
+    assert_raises(ValueError, nditer, [arange(2), arange(3)],
+                    [], [['readonly']]*2)
+    # 2D with 1D
+    assert_raises(ValueError, nditer,
+                    [arange(6).reshape(2, 3), arange(2)],
+                    [], [['readonly']]*2)
+    # 2D with 2D
+    assert_raises(ValueError, nditer,
+                    [arange(6).reshape(2, 3), arange(9).reshape(3, 3)],
+                    [], [['readonly']]*2)
+    assert_raises(ValueError, nditer,
+                    [arange(6).reshape(2, 3), arange(4).reshape(2, 2)],
+                    [], [['readonly']]*2)
+    # 3D with 3D
+    assert_raises(ValueError, nditer,
+                    [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)],
+                    [], [['readonly']]*2)
+    assert_raises(ValueError, nditer,
+                    [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)],
+                    [], [['readonly']]*2)
+
+    # Verify that the error message mentions the right shapes
+    try:
+        nditer([arange(2).reshape(1, 2, 1),
+                arange(3).reshape(1, 3),
+                arange(6).reshape(2, 3)],
+               [],
+               [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']])
+        raise AssertionError('Should have raised a broadcast error')
+    except ValueError as e:
+        msg = str(e)
+        # The message should contain the shape of the 3rd operand
+        assert_(msg.find('(2,3)') >= 0,
+                'Message "%s" doesn\'t contain operand shape (2,3)' % msg)
+        # The message should contain the broadcast shape
+        assert_(msg.find('(1,2,3)') >= 0,
+                'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg)
+
+    try:
+        nditer([arange(6).reshape(2, 3), arange(2)],
+               [],
+               [['readonly'], ['readonly']],
+               op_axes=[[0, 1], [0, np.newaxis]],
+               itershape=(4, 3))
+        raise AssertionError('Should have raised a broadcast error')
+    except ValueError as e:
+        msg = str(e)
+        # The message should contain "shape->remappedshape" for each operand
+        assert_(msg.find('(2,3)->(2,3)') >= 0,
+            'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg)
+        assert_(msg.find('(2,)->(2,newaxis)') >= 0,
+                ('Message "%s" doesn\'t contain remapped operand shape' +
+                '(2,)->(2,newaxis)') % msg)
+        # The message should contain the itershape parameter
+        assert_(msg.find('(4,3)') >= 0,
+                'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg)
+
+    try:
+        nditer([np.zeros((2, 1, 1)), np.zeros((2,))],
+               [],
+               [['writeonly', 'no_broadcast'], ['readonly']])
+        raise AssertionError('Should have raised a broadcast error')
+    except ValueError as e:
+        msg = str(e)
+        # The message should contain the shape of the bad operand
+        assert_(msg.find('(2,1,1)') >= 0,
+            'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg)
+        # The message should contain the broadcast shape
+        assert_(msg.find('(2,1,2)') >= 0,
+                'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg)
+
+def test_iter_flags_errors():
+    # Check that bad combinations of flags produce errors
+
+    a = arange(6)
+
+    # Not enough operands
+    assert_raises(ValueError, nditer, [], [], [])
+    # Too many operands
+    assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100)
+    # Bad global flag
+    assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']])
+    # Bad op flag
+    assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']])
+    # Bad order parameter
+    assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G')
+    # Bad casting parameter
+    assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon')
+    # op_flags must match ops
+    assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2)
+    # Cannot track both a C and an F index
+    assert_raises(ValueError, nditer, a,
+                ['c_index', 'f_index'], [['readonly']])
+    # Inner iteration and multi-indices/indices are incompatible
+    assert_raises(ValueError, nditer, a,
+                ['external_loop', 'multi_index'], [['readonly']])
+    assert_raises(ValueError, nditer, a,
+                ['external_loop', 'c_index'], [['readonly']])
+    assert_raises(ValueError, nditer, a,
+                ['external_loop', 'f_index'], [['readonly']])
+    # Must specify exactly one of readwrite/readonly/writeonly per operand
+    assert_raises(ValueError, nditer, a, [], [[]])
+    assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']])
+    assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']])
+    assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']])
+    assert_raises(ValueError, nditer, a,
+                [], [['readonly', 'writeonly', 'readwrite']])
+    # Python scalars are always readonly
+    assert_raises(TypeError, nditer, 1.5, [], [['writeonly']])
+    assert_raises(TypeError, nditer, 1.5, [], [['readwrite']])
+    # Array scalars are always readonly
+    assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']])
+    assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']])
+    # Check readonly array
+    a.flags.writeable = False
+    assert_raises(ValueError, nditer, a, [], [['writeonly']])
+    assert_raises(ValueError, nditer, a, [], [['readwrite']])
+    a.flags.writeable = True
+    # Multi-indices available only with the multi_index flag
+    i = nditer(arange(6), [], [['readonly']])
+    assert_raises(ValueError, lambda i:i.multi_index, i)
+    # Index available only with an index flag
+    assert_raises(ValueError, lambda i:i.index, i)
+    # GotoCoords and GotoIndex incompatible with buffering or no_inner
+
+    def assign_multi_index(i):
+        i.multi_index = (0,)
+
+    def assign_index(i):
+        i.index = 0
+
+    def assign_iterindex(i):
+        i.iterindex = 0
+
+    def assign_iterrange(i):
+        i.iterrange = (0, 1)
+    i = nditer(arange(6), ['external_loop'])
+    assert_raises(ValueError, assign_multi_index, i)
+    assert_raises(ValueError, assign_index, i)
+    assert_raises(ValueError, assign_iterindex, i)
+    assert_raises(ValueError, assign_iterrange, i)
+    i = nditer(arange(6), ['buffered'])
+    assert_raises(ValueError, assign_multi_index, i)
+    assert_raises(ValueError, assign_index, i)
+    assert_raises(ValueError, assign_iterrange, i)
+    # Can't iterate if size is zero
+    assert_raises(ValueError, nditer, np.array([]))
+
+def test_iter_slice():
+    a, b, c = np.arange(3), np.arange(3), np.arange(3.)
+    i = nditer([a, b, c], [], ['readwrite'])
+    with i:
+        i[0:2] = (3, 3)
+        assert_equal(a, [3, 1, 2])
+        assert_equal(b, [3, 1, 2])
+        assert_equal(c, [0, 1, 2])
+        i[1] = 12
+        assert_equal(i[0:2], [3, 12])
+
+def test_iter_assign_mapping():
+    a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+    it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+                       casting='same_kind', op_dtypes=[np.dtype('f4')])
+    with it:
+        it.operands[0][...] = 3
+        it.operands[0][...] = 14
+    assert_equal(a, 14)
+    it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+                       casting='same_kind', op_dtypes=[np.dtype('f4')])
+    with it:
+        x = it.operands[0][-1:1]
+        x[...] = 14
+        it.operands[0][...] = -1234
+    assert_equal(a, -1234)
+    # check for no warnings on dealloc
+    x = None
+    it = None
+
+def test_iter_nbo_align_contig():
+    # Check that byte order, alignment, and contig changes work
+
+    # Byte order change by requesting a specific dtype
+    a = np.arange(6, dtype='f4')
+    au = a.byteswap().newbyteorder()
+    assert_(a.dtype.byteorder != au.dtype.byteorder)
+    i = nditer(au, [], [['readwrite', 'updateifcopy']],
+                        casting='equiv',
+                        op_dtypes=[np.dtype('f4')])
+    with i:
+        # context manager triggers WRITEBACKIFCOPY on i at exit
+        assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+        assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+        assert_equal(i.operands[0], a)
+        i.operands[0][:] = 2
+    assert_equal(au, [2]*6)
+    del i  # should not raise a warning
+    # Byte order change by requesting NBO
+    a = np.arange(6, dtype='f4')
+    au = a.byteswap().newbyteorder()
+    assert_(a.dtype.byteorder != au.dtype.byteorder)
+    with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
+                        casting='equiv') as i:
+        # context manager triggers UPDATEIFCOPY on i at exit
+        assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+        assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+        assert_equal(i.operands[0], a)
+        i.operands[0][:] = 12345
+        i.operands[0][:] = 2
+    assert_equal(au, [2]*6)
+
+    # Unaligned input
+    a = np.zeros((6*4+1,), dtype='i1')[1:]
+    a.dtype = 'f4'
+    a[:] = np.arange(6, dtype='f4')
+    assert_(not a.flags.aligned)
+    # Without 'aligned', shouldn't copy
+    i = nditer(a, [], [['readonly']])
+    assert_(not i.operands[0].flags.aligned)
+    assert_equal(i.operands[0], a)
+    # With 'aligned', should make a copy
+    with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
+        assert_(i.operands[0].flags.aligned)
+        # context manager triggers UPDATEIFCOPY on i at exit
+        assert_equal(i.operands[0], a)
+        i.operands[0][:] = 3
+    assert_equal(a, [3]*6)
+
+    # Discontiguous input
+    a = arange(12)
+    # If it is contiguous, shouldn't copy
+    i = nditer(a[:6], [], [['readonly']])
+    assert_(i.operands[0].flags.contiguous)
+    assert_equal(i.operands[0], a[:6])
+    # If it isn't contiguous, should buffer
+    i = nditer(a[::2], ['buffered', 'external_loop'],
+                        [['readonly', 'contig']],
+                        buffersize=10)
+    assert_(i[0].flags.contiguous)
+    assert_equal(i[0], a[::2])
+
+def test_iter_array_cast():
+    # Check that arrays are cast as requested
+
+    # No cast 'f4' -> 'f4'
+    a = np.arange(6, dtype='f4').reshape(2, 3)
+    i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
+    with i:
+        assert_equal(i.operands[0], a)
+        assert_equal(i.operands[0].dtype, np.dtype('f4'))
+
+    # Byte-order cast ' '>f4'
+    a = np.arange(6, dtype='f4')]) as i:
+        assert_equal(i.operands[0], a)
+        assert_equal(i.operands[0].dtype, np.dtype('>f4'))
+
+    # Safe case 'f4' -> 'f8'
+    a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
+    i = nditer(a, [], [['readonly', 'copy']],
+            casting='safe',
+            op_dtypes=[np.dtype('f8')])
+    assert_equal(i.operands[0], a)
+    assert_equal(i.operands[0].dtype, np.dtype('f8'))
+    # The memory layout of the temporary should match a (a is (48,4,16))
+    # except negative strides get flipped to positive strides.
+    assert_equal(i.operands[0].strides, (96, 8, 32))
+    a = a[::-1,:, ::-1]
+    i = nditer(a, [], [['readonly', 'copy']],
+            casting='safe',
+            op_dtypes=[np.dtype('f8')])
+    assert_equal(i.operands[0], a)
+    assert_equal(i.operands[0].dtype, np.dtype('f8'))
+    assert_equal(i.operands[0].strides, (96, 8, 32))
+
+    # Same-kind cast 'f8' -> 'f4' -> 'f8'
+    a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+    with nditer(a, [],
+            [['readwrite', 'updateifcopy']],
+            casting='same_kind',
+            op_dtypes=[np.dtype('f4')]) as i:
+        assert_equal(i.operands[0], a)
+        assert_equal(i.operands[0].dtype, np.dtype('f4'))
+        assert_equal(i.operands[0].strides, (4, 16, 48))
+        # Check that WRITEBACKIFCOPY is activated at exit
+        i.operands[0][2, 1, 1] = -12.5
+        assert_(a[2, 1, 1] != -12.5)
+    assert_equal(a[2, 1, 1], -12.5)
+
+    a = np.arange(6, dtype='i4')[::-2]
+    with nditer(a, [],
+            [['writeonly', 'updateifcopy']],
+            casting='unsafe',
+            op_dtypes=[np.dtype('f4')]) as i:
+        assert_equal(i.operands[0].dtype, np.dtype('f4'))
+        # Even though the stride was negative in 'a', it
+        # becomes positive in the temporary
+        assert_equal(i.operands[0].strides, (4,))
+        i.operands[0][:] = [1, 2, 3]
+    assert_equal(a, [1, 2, 3])
+
+def test_iter_array_cast_errors():
+    # Check that invalid casts are caught
+
+    # Need to enable copying for casts to occur
+    assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+                [['readonly']], op_dtypes=[np.dtype('f8')])
+    # Also need to allow casting for casts to occur
+    assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+                [['readonly', 'copy']], casting='no',
+                op_dtypes=[np.dtype('f8')])
+    assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+                [['readonly', 'copy']], casting='equiv',
+                op_dtypes=[np.dtype('f8')])
+    assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+                [['writeonly', 'updateifcopy']],
+                casting='no',
+                op_dtypes=[np.dtype('f4')])
+    assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+                [['writeonly', 'updateifcopy']],
+                casting='equiv',
+                op_dtypes=[np.dtype('f4')])
+    # ' '>f4' should not work with casting='no'
+    assert_raises(TypeError, nditer, arange(2, dtype='f4')])
+    # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't
+    assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+                [['readwrite', 'updateifcopy']],
+                casting='safe',
+                op_dtypes=[np.dtype('f8')])
+    assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+                [['readwrite', 'updateifcopy']],
+                casting='safe',
+                op_dtypes=[np.dtype('f4')])
+    # 'f4' -> 'i4' is neither a safe nor a same-kind cast
+    assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+                [['readonly', 'copy']],
+                casting='same_kind',
+                op_dtypes=[np.dtype('i4')])
+    assert_raises(TypeError, nditer, arange(2, dtype='i4'), [],
+                [['writeonly', 'updateifcopy']],
+                casting='same_kind',
+                op_dtypes=[np.dtype('f4')])
+
+def test_iter_scalar_cast():
+    # Check that scalars are cast as requested
+
+    # No cast 'f4' -> 'f4'
+    i = nditer(np.float32(2.5), [], [['readonly']],
+                    op_dtypes=[np.dtype('f4')])
+    assert_equal(i.dtypes[0], np.dtype('f4'))
+    assert_equal(i.value.dtype, np.dtype('f4'))
+    assert_equal(i.value, 2.5)
+    # Safe cast 'f4' -> 'f8'
+    i = nditer(np.float32(2.5), [],
+                    [['readonly', 'copy']],
+                    casting='safe',
+                    op_dtypes=[np.dtype('f8')])
+    assert_equal(i.dtypes[0], np.dtype('f8'))
+    assert_equal(i.value.dtype, np.dtype('f8'))
+    assert_equal(i.value, 2.5)
+    # Same-kind cast 'f8' -> 'f4'
+    i = nditer(np.float64(2.5), [],
+                    [['readonly', 'copy']],
+                    casting='same_kind',
+                    op_dtypes=[np.dtype('f4')])
+    assert_equal(i.dtypes[0], np.dtype('f4'))
+    assert_equal(i.value.dtype, np.dtype('f4'))
+    assert_equal(i.value, 2.5)
+    # Unsafe cast 'f8' -> 'i4'
+    i = nditer(np.float64(3.0), [],
+                    [['readonly', 'copy']],
+                    casting='unsafe',
+                    op_dtypes=[np.dtype('i4')])
+    assert_equal(i.dtypes[0], np.dtype('i4'))
+    assert_equal(i.value.dtype, np.dtype('i4'))
+    assert_equal(i.value, 3)
+    # Readonly scalars may be cast even without setting COPY or BUFFERED
+    i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')])
+    assert_equal(i[0].dtype, np.dtype('f8'))
+    assert_equal(i[0], 3.)
+
+def test_iter_scalar_cast_errors():
+    # Check that invalid casts are caught
+
+    # Need to allow copying/buffering for write casts of scalars to occur
+    assert_raises(TypeError, nditer, np.float32(2), [],
+                [['readwrite']], op_dtypes=[np.dtype('f8')])
+    assert_raises(TypeError, nditer, 2.5, [],
+                [['readwrite']], op_dtypes=[np.dtype('f4')])
+    # 'f8' -> 'f4' isn't a safe cast if the value would overflow
+    assert_raises(TypeError, nditer, np.float64(1e60), [],
+                [['readonly']],
+                casting='safe',
+                op_dtypes=[np.dtype('f4')])
+    # 'f4' -> 'i4' is neither a safe nor a same-kind cast
+    assert_raises(TypeError, nditer, np.float32(2), [],
+                [['readonly']],
+                casting='same_kind',
+                op_dtypes=[np.dtype('i4')])
+
+def test_iter_object_arrays_basic():
+    # Check that object arrays work
+
+    obj = {'a':3,'b':'d'}
+    a = np.array([[1, 2, 3], None, obj, None], dtype='O')
+    if HAS_REFCOUNT:
+        rc = sys.getrefcount(obj)
+
+    # Need to allow references for object arrays
+    assert_raises(TypeError, nditer, a)
+    if HAS_REFCOUNT:
+        assert_equal(sys.getrefcount(obj), rc)
+
+    i = nditer(a, ['refs_ok'], ['readonly'])
+    vals = [x_[()] for x_ in i]
+    assert_equal(np.array(vals, dtype='O'), a)
+    vals, i, x = [None]*3
+    if HAS_REFCOUNT:
+        assert_equal(sys.getrefcount(obj), rc)
+
+    i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
+                        ['readonly'], order='C')
+    assert_(i.iterationneedsapi)
+    vals = [x_[()] for x_ in i]
+    assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
+    vals, i, x = [None]*3
+    if HAS_REFCOUNT:
+        assert_equal(sys.getrefcount(obj), rc)
+
+    i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
+                        ['readwrite'], order='C')
+    with i:
+        for x in i:
+            x[...] = None
+        vals, i, x = [None]*3
+    if HAS_REFCOUNT:
+        assert_(sys.getrefcount(obj) == rc-1)
+    assert_equal(a, np.array([None]*4, dtype='O'))
+
+def test_iter_object_arrays_conversions():
+    # Conversions to/from objects
+    a = np.arange(6, dtype='O')
+    i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+                    casting='unsafe', op_dtypes='i4')
+    with i:
+        for x in i:
+            x[...] += 1
+    assert_equal(a, np.arange(6)+1)
+
+    a = np.arange(6, dtype='i4')
+    i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+                    casting='unsafe', op_dtypes='O')
+    with i:
+        for x in i:
+            x[...] += 1
+    assert_equal(a, np.arange(6)+1)
+
+    # Non-contiguous object array
+    a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')])
+    a = a['a']
+    a[:] = np.arange(6)
+    i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+                    casting='unsafe', op_dtypes='i4')
+    with i:
+        for x in i:
+            x[...] += 1
+    assert_equal(a, np.arange(6)+1)
+
+    #Non-contiguous value array
+    a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')])
+    a = a['a']
+    a[:] = np.arange(6) + 98172488
+    i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+                    casting='unsafe', op_dtypes='O')
+    with i:
+        ob = i[0][()]
+        if HAS_REFCOUNT:
+            rc = sys.getrefcount(ob)
+        for x in i:
+            x[...] += 1
+    if HAS_REFCOUNT:
+        assert_(sys.getrefcount(ob) == rc-1)
+    assert_equal(a, np.arange(6)+98172489)
+
+def test_iter_common_dtype():
+    # Check that the iterator finds a common data type correctly
+
+    i = nditer([array([3], dtype='f4'), array([0], dtype='f8')],
+                    ['common_dtype'],
+                    [['readonly', 'copy']]*2,
+                    casting='safe')
+    assert_equal(i.dtypes[0], np.dtype('f8'))
+    assert_equal(i.dtypes[1], np.dtype('f8'))
+    i = nditer([array([3], dtype='i4'), array([0], dtype='f4')],
+                    ['common_dtype'],
+                    [['readonly', 'copy']]*2,
+                    casting='safe')
+    assert_equal(i.dtypes[0], np.dtype('f8'))
+    assert_equal(i.dtypes[1], np.dtype('f8'))
+    i = nditer([array([3], dtype='f4'), array(0, dtype='f8')],
+                    ['common_dtype'],
+                    [['readonly', 'copy']]*2,
+                    casting='same_kind')
+    assert_equal(i.dtypes[0], np.dtype('f4'))
+    assert_equal(i.dtypes[1], np.dtype('f4'))
+    i = nditer([array([3], dtype='u4'), array(0, dtype='i4')],
+                    ['common_dtype'],
+                    [['readonly', 'copy']]*2,
+                    casting='safe')
+    assert_equal(i.dtypes[0], np.dtype('u4'))
+    assert_equal(i.dtypes[1], np.dtype('u4'))
+    i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')],
+                    ['common_dtype'],
+                    [['readonly', 'copy']]*2,
+                    casting='safe')
+    assert_equal(i.dtypes[0], np.dtype('i8'))
+    assert_equal(i.dtypes[1], np.dtype('i8'))
+    i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'),
+                 array([2j], dtype='c8'), array([9], dtype='f8')],
+                    ['common_dtype'],
+                    [['readonly', 'copy']]*4,
+                    casting='safe')
+    assert_equal(i.dtypes[0], np.dtype('c16'))
+    assert_equal(i.dtypes[1], np.dtype('c16'))
+    assert_equal(i.dtypes[2], np.dtype('c16'))
+    assert_equal(i.dtypes[3], np.dtype('c16'))
+    assert_equal(i.value, (3, -12, 2j, 9))
+
+    # When allocating outputs, other outputs aren't factored in
+    i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [],
+                    [['readonly', 'copy'],
+                     ['writeonly', 'allocate'],
+                     ['writeonly']],
+                    casting='safe')
+    assert_equal(i.dtypes[0], np.dtype('i4'))
+    assert_equal(i.dtypes[1], np.dtype('i4'))
+    assert_equal(i.dtypes[2], np.dtype('c16'))
+    # But, if common data types are requested, they are
+    i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')],
+                    ['common_dtype'],
+                    [['readonly', 'copy'],
+                     ['writeonly', 'allocate'],
+                     ['writeonly']],
+                    casting='safe')
+    assert_equal(i.dtypes[0], np.dtype('c16'))
+    assert_equal(i.dtypes[1], np.dtype('c16'))
+    assert_equal(i.dtypes[2], np.dtype('c16'))
+
+def test_iter_copy_if_overlap():
+    # Ensure the iterator makes copies on read/write overlap, if requested
+
+    # Copy not needed, 1 op
+    for flag in ['readonly', 'writeonly', 'readwrite']:
+        a = arange(10)
+        i = nditer([a], ['copy_if_overlap'], [[flag]])
+        with i:
+            assert_(i.operands[0] is a)
+
+    # Copy needed, 2 ops, read-write overlap
+    x = arange(10)
+    a = x[1:]
+    b = x[:-1]
+    with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+        assert_(not np.shares_memory(*i.operands))
+
+    # Copy not needed with elementwise, 2 ops, exactly same arrays
+    x = arange(10)
+    a = x
+    b = x
+    i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
+                                             ['readwrite', 'overlap_assume_elementwise']])
+    with i:
+        assert_(i.operands[0] is a and i.operands[1] is b)
+    with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+        assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
+
+    # Copy not needed, 2 ops, no overlap
+    x = arange(10)
+    a = x[::2]
+    b = x[1::2]
+    i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
+    assert_(i.operands[0] is a and i.operands[1] is b)
+
+    # Copy needed, 2 ops, read-write overlap
+    x = arange(4, dtype=np.int8)
+    a = x[3:]
+    b = x.view(np.int32)[:1]
+    with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
+        assert_(not np.shares_memory(*i.operands))
+
+    # Copy needed, 3 ops, read-write overlap
+    for flag in ['writeonly', 'readwrite']:
+        x = np.ones([10, 10])
+        a = x
+        b = x.T
+        c = x
+        with nditer([a, b, c], ['copy_if_overlap'],
+                   [['readonly'], ['readonly'], [flag]]) as i:
+            a2, b2, c2 = i.operands
+            assert_(not np.shares_memory(a2, c2))
+            assert_(not np.shares_memory(b2, c2))
+
+    # Copy not needed, 3 ops, read-only overlap
+    x = np.ones([10, 10])
+    a = x
+    b = x.T
+    c = x
+    i = nditer([a, b, c], ['copy_if_overlap'],
+               [['readonly'], ['readonly'], ['readonly']])
+    a2, b2, c2 = i.operands
+    assert_(a is a2)
+    assert_(b is b2)
+    assert_(c is c2)
+
+    # Copy not needed, 3 ops, read-only overlap
+    x = np.ones([10, 10])
+    a = x
+    b = np.ones([10, 10])
+    c = x.T
+    i = nditer([a, b, c], ['copy_if_overlap'],
+               [['readonly'], ['writeonly'], ['readonly']])
+    a2, b2, c2 = i.operands
+    assert_(a is a2)
+    assert_(b is b2)
+    assert_(c is c2)
+
+    # Copy not needed, 3 ops, write-only overlap
+    x = np.arange(7)
+    a = x[:3]
+    b = x[3:6]
+    c = x[4:7]
+    i = nditer([a, b, c], ['copy_if_overlap'],
+               [['readonly'], ['writeonly'], ['writeonly']])
+    a2, b2, c2 = i.operands
+    assert_(a is a2)
+    assert_(b is b2)
+    assert_(c is c2)
+
+def test_iter_op_axes():
+    # Check that custom axes work
+
+    # Reverse the axes
+    a = arange(6).reshape(2, 3)
+    i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]])
+    assert_(all([x == y for (x, y) in i]))
+    a = arange(24).reshape(2, 3, 4)
+    i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None])
+    assert_(all([x == y for (x, y) in i]))
+
+    # Broadcast 1D to any dimension
+    a = arange(1, 31).reshape(2, 3, 5)
+    b = arange(1, 3)
+    i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]])
+    assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel())
+    b = arange(1, 4)
+    i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]])
+    assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel())
+    b = arange(1, 6)
+    i = nditer([a, b], [], [['readonly']]*2,
+                            op_axes=[None, [np.newaxis, np.newaxis, 0]])
+    assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel())
+
+    # Inner product-style broadcasting
+    a = arange(24).reshape(2, 3, 4)
+    b = arange(40).reshape(5, 2, 4)
+    i = nditer([a, b], ['multi_index'], [['readonly']]*2,
+                            op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]])
+    assert_equal(i.shape, (2, 3, 5, 2))
+
+    # Matrix product-style broadcasting
+    a = arange(12).reshape(3, 4)
+    b = arange(20).reshape(4, 5)
+    i = nditer([a, b], ['multi_index'], [['readonly']]*2,
+                            op_axes=[[0, -1], [-1, 1]])
+    assert_equal(i.shape, (3, 5))
+
+def test_iter_op_axes_errors():
+    # Check that custom axes throws errors for bad inputs
+
+    # Wrong number of items in op_axes
+    a = arange(6).reshape(2, 3)
+    assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+                                    op_axes=[[0], [1], [0]])
+    # Out of bounds items in op_axes
+    assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+                                    op_axes=[[2, 1], [0, 1]])
+    assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+                                    op_axes=[[0, 1], [2, -1]])
+    # Duplicate items in op_axes
+    assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+                                    op_axes=[[0, 0], [0, 1]])
+    assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+                                    op_axes=[[0, 1], [1, 1]])
+
+    # Different sized arrays in op_axes
+    assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+                                    op_axes=[[0, 1], [0, 1, 0]])
+
+    # Non-broadcastable dimensions in the result
+    assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+                                    op_axes=[[0, 1], [1, 0]])
+
+def test_iter_copy():
+    # Check that copying the iterator works correctly
+    a = arange(24).reshape(2, 3, 4)
+
+    # Simple iterator
+    i = nditer(a)
+    j = i.copy()
+    assert_equal([x[()] for x in i], [x[()] for x in j])
+
+    i.iterindex = 3
+    j = i.copy()
+    assert_equal([x[()] for x in i], [x[()] for x in j])
+
+    # Buffered iterator
+    i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3)
+    j = i.copy()
+    assert_equal([x[()] for x in i], [x[()] for x in j])
+
+    i.iterindex = 3
+    j = i.copy()
+    assert_equal([x[()] for x in i], [x[()] for x in j])
+
+    i.iterrange = (3, 9)
+    j = i.copy()
+    assert_equal([x[()] for x in i], [x[()] for x in j])
+
+    i.iterrange = (2, 18)
+    next(i)
+    next(i)
+    j = i.copy()
+    assert_equal([x[()] for x in i], [x[()] for x in j])
+
+    # Casting iterator
+    with nditer(a, ['buffered'], order='F', casting='unsafe',
+                op_dtypes='f8', buffersize=5) as i:
+        j = i.copy()
+    assert_equal([x[()] for x in j], a.ravel(order='F'))
+
+    a = arange(24, dtype=' unstructured (any to object), and many other
+    # casts, which cause this to require all steps in the casting machinery
+    # one level down as well as the iterator copy (which uses NpyAuxData clone)
+    in_dtype = np.dtype([("a", np.dtype("i,")),
+                         ("b", np.dtype(">i,d,S17,>d,(3)f,O,i1"))])
+    out_dtype = np.dtype([("a", np.dtype("O")),
+                          ("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))])
+    arr = np.ones(1000, dtype=in_dtype)
+
+    it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
+                   op_dtypes=[out_dtype], casting="unsafe")
+    it_copy = it.copy()
+
+    res1 = next(it)
+    del it
+    res2 = next(it_copy)
+    del it_copy
+
+    expected = arr["a"].astype(out_dtype["a"])
+    assert_array_equal(res1["a"], expected)
+    assert_array_equal(res2["a"], expected)
+
+    for field in in_dtype["b"].names:
+        # Note that the .base avoids the subarray field
+        expected = arr["b"][field].astype(out_dtype["b"][field].base)
+        assert_array_equal(res1["b"][field], expected)
+        assert_array_equal(res2["b"][field], expected)
+
+
+def test_iter_copy_casts_structured2():
+    # Similar to the above, this is a fairly arcane test to cover internals
+    in_dtype = np.dtype([("a", np.dtype("O,O")),
+                         ("b", np.dtype("(5)O,(3)O,(1,)O,(1,)i,(1,)O"))])
+    out_dtype = np.dtype([("a", np.dtype("O")),
+                          ("b", np.dtype("O,(3)i,(4)O,(4)O,(4)i"))])
+
+    arr = np.ones(1, dtype=in_dtype)
+    it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
+                   op_dtypes=[out_dtype], casting="unsafe")
+    it_copy = it.copy()
+
+    res1 = next(it)
+    del it
+    res2 = next(it_copy)
+    del it_copy
+
+    # Array of two structured scalars:
+    for res in res1, res2:
+        # Cast to tuple by getitem, which may be weird and changable?:
+        assert type(res["a"][0]) == tuple
+        assert res["a"][0] == (1, 1)
+
+    for res in res1, res2:
+        assert_array_equal(res["b"]["f0"][0], np.ones(5, dtype=object))
+        assert_array_equal(res["b"]["f1"], np.ones((1, 3), dtype="i"))
+        assert res["b"]["f2"].shape == (1, 4)
+        assert_array_equal(res["b"]["f2"][0], np.ones(4, dtype=object))
+        assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype=object))
+        assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype="i"))
+
+
+def test_iter_allocate_output_simple():
+    # Check that the iterator will properly allocate outputs
+
+    # Simple case
+    a = arange(6)
+    i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')])
+    assert_equal(i.operands[1].shape, a.shape)
+    assert_equal(i.operands[1].dtype, np.dtype('f4'))
+
+def test_iter_allocate_output_buffered_readwrite():
+    # Allocated output with buffering + delay_bufalloc
+
+    a = arange(6)
+    i = nditer([a, None], ['buffered', 'delay_bufalloc'],
+                        [['readonly'], ['allocate', 'readwrite']])
+    with i:
+        i.operands[1][:] = 1
+        i.reset()
+        for x in i:
+            x[1][...] += x[0][...]
+        assert_equal(i.operands[1], a+1)
+
+def test_iter_allocate_output_itorder():
+    # The allocated output should match the iteration order
+
+    # C-order input, best iteration order
+    a = arange(6, dtype='i4').reshape(2, 3)
+    i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')])
+    assert_equal(i.operands[1].shape, a.shape)
+    assert_equal(i.operands[1].strides, a.strides)
+    assert_equal(i.operands[1].dtype, np.dtype('f4'))
+    # F-order input, best iteration order
+    a = arange(24, dtype='i4').reshape(2, 3, 4).T
+    i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')])
+    assert_equal(i.operands[1].shape, a.shape)
+    assert_equal(i.operands[1].strides, a.strides)
+    assert_equal(i.operands[1].dtype, np.dtype('f4'))
+    # Non-contiguous input, C iteration order
+    a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1)
+    i = nditer([a, None], [],
+                        [['readonly'], ['writeonly', 'allocate']],
+                        order='C',
+                        op_dtypes=[None, np.dtype('f4')])
+    assert_equal(i.operands[1].shape, a.shape)
+    assert_equal(i.operands[1].strides, (32, 16, 4))
+    assert_equal(i.operands[1].dtype, np.dtype('f4'))
+
+def test_iter_allocate_output_opaxes():
+    # Specifying op_axes should work
+
+    a = arange(24, dtype='i4').reshape(2, 3, 4)
+    i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']],
+                        op_dtypes=[np.dtype('u4'), None],
+                        op_axes=[[1, 2, 0], None])
+    assert_equal(i.operands[0].shape, (4, 2, 3))
+    assert_equal(i.operands[0].strides, (4, 48, 16))
+    assert_equal(i.operands[0].dtype, np.dtype('u4'))
+
+def test_iter_allocate_output_types_promotion():
+    # Check type promotion of automatic outputs
+
+    i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
+                    [['readonly']]*2+[['writeonly', 'allocate']])
+    assert_equal(i.dtypes[2], np.dtype('f8'))
+    i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
+                    [['readonly']]*2+[['writeonly', 'allocate']])
+    assert_equal(i.dtypes[2], np.dtype('f8'))
+    i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
+                    [['readonly']]*2+[['writeonly', 'allocate']])
+    assert_equal(i.dtypes[2], np.dtype('f4'))
+    i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
+                    [['readonly']]*2+[['writeonly', 'allocate']])
+    assert_equal(i.dtypes[2], np.dtype('u4'))
+    i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
+                    [['readonly']]*2+[['writeonly', 'allocate']])
+    assert_equal(i.dtypes[2], np.dtype('i8'))
+
+def test_iter_allocate_output_types_byte_order():
+    # Verify the rules for byte order changes
+
+    # When there's just one input, the output type exactly matches
+    a = array([3], dtype='u4').newbyteorder()
+    i = nditer([a, None], [],
+                    [['readonly'], ['writeonly', 'allocate']])
+    assert_equal(i.dtypes[0], i.dtypes[1])
+    # With two or more inputs, the output type is in native byte order
+    i = nditer([a, a, None], [],
+                    [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+    assert_(i.dtypes[0] != i.dtypes[2])
+    assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2])
+
+def test_iter_allocate_output_types_scalar():
+    # If the inputs are all scalars, the output should be a scalar
+
+    i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [],
+                [['writeonly', 'allocate']] + [['readonly']]*4)
+    assert_equal(i.operands[0].dtype, np.dtype('complex128'))
+    assert_equal(i.operands[0].ndim, 0)
+
+def test_iter_allocate_output_subtype():
+    # Make sure that the subtype with priority wins
+    class MyNDArray(np.ndarray):
+        __array_priority__ = 15
+
+    # subclass vs ndarray
+    a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
+    b = np.arange(4).reshape(2, 2).T
+    i = nditer([a, b, None], [],
+               [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+    assert_equal(type(a), type(i.operands[2]))
+    assert_(type(b) is not type(i.operands[2]))
+    assert_equal(i.operands[2].shape, (2, 2))
+
+    # If subtypes are disabled, we should get back an ndarray.
+    i = nditer([a, b, None], [],
+               [['readonly'], ['readonly'],
+                ['writeonly', 'allocate', 'no_subtype']])
+    assert_equal(type(b), type(i.operands[2]))
+    assert_(type(a) is not type(i.operands[2]))
+    assert_equal(i.operands[2].shape, (2, 2))
+
+def test_iter_allocate_output_errors():
+    # Check that the iterator will throw errors for bad output allocations
+
+    # Need an input if no output data type is specified
+    a = arange(6)
+    assert_raises(TypeError, nditer, [a, None], [],
+                        [['writeonly'], ['writeonly', 'allocate']])
+    # Allocated output should be flagged for writing
+    assert_raises(ValueError, nditer, [a, None], [],
+                        [['readonly'], ['allocate', 'readonly']])
+    # Allocated output can't have buffering without delayed bufalloc
+    assert_raises(ValueError, nditer, [a, None], ['buffered'],
+                                            ['allocate', 'readwrite'])
+    # Must specify dtype if there are no inputs (cannot promote existing ones;
+    # maybe this should use the 'f4' here, but it does not historically.)
+    assert_raises(TypeError, nditer, [None, None], [],
+                        [['writeonly', 'allocate'],
+                         ['writeonly', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')])
+    # If using op_axes, must specify all the axes
+    a = arange(24, dtype='i4').reshape(2, 3, 4)
+    assert_raises(ValueError, nditer, [a, None], [],
+                        [['readonly'], ['writeonly', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')],
+                        op_axes=[None, [0, np.newaxis, 1]])
+    # If using op_axes, the axes must be within bounds
+    assert_raises(ValueError, nditer, [a, None], [],
+                        [['readonly'], ['writeonly', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')],
+                        op_axes=[None, [0, 3, 1]])
+    # If using op_axes, there can't be duplicates
+    assert_raises(ValueError, nditer, [a, None], [],
+                        [['readonly'], ['writeonly', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')],
+                        op_axes=[None, [0, 2, 1, 0]])
+    # Not all axes may be specified if a reduction. If there is a hole
+    # in op_axes, this is an error.
+    a = arange(24, dtype='i4').reshape(2, 3, 4)
+    assert_raises(ValueError, nditer, [a, None], ["reduce_ok"],
+                        [['readonly'], ['readwrite', 'allocate']],
+                        op_dtypes=[None, np.dtype('f4')],
+                        op_axes=[None, [0, np.newaxis, 2]])
+
+def test_all_allocated():
+    # When no output and no shape is given, `()` is used as shape.
+    i = np.nditer([None], op_dtypes=["int64"])
+    assert i.operands[0].shape == ()
+    assert i.dtypes == (np.dtype("int64"),)
+
+    i = np.nditer([None], op_dtypes=["int64"], itershape=(2, 3, 4))
+    assert i.operands[0].shape == (2, 3, 4)
+
+def test_iter_remove_axis():
+    a = arange(24).reshape(2, 3, 4)
+
+    i = nditer(a, ['multi_index'])
+    i.remove_axis(1)
+    assert_equal([x for x in i], a[:, 0,:].ravel())
+
+    a = a[::-1,:,:]
+    i = nditer(a, ['multi_index'])
+    i.remove_axis(0)
+    assert_equal([x for x in i], a[0,:,:].ravel())
+
+def test_iter_remove_multi_index_inner_loop():
+    # Check that removing multi-index support works
+
+    a = arange(24).reshape(2, 3, 4)
+
+    i = nditer(a, ['multi_index'])
+    assert_equal(i.ndim, 3)
+    assert_equal(i.shape, (2, 3, 4))
+    assert_equal(i.itviews[0].shape, (2, 3, 4))
+
+    # Removing the multi-index tracking causes all dimensions to coalesce
+    before = [x for x in i]
+    i.remove_multi_index()
+    after = [x for x in i]
+
+    assert_equal(before, after)
+    assert_equal(i.ndim, 1)
+    assert_raises(ValueError, lambda i:i.shape, i)
+    assert_equal(i.itviews[0].shape, (24,))
+
+    # Removing the inner loop means there's just one iteration
+    i.reset()
+    assert_equal(i.itersize, 24)
+    assert_equal(i[0].shape, tuple())
+    i.enable_external_loop()
+    assert_equal(i.itersize, 24)
+    assert_equal(i[0].shape, (24,))
+    assert_equal(i.value, arange(24))
+
+def test_iter_iterindex():
+    # Make sure iterindex works
+
+    buffersize = 5
+    a = arange(24).reshape(4, 3, 2)
+    for flags in ([], ['buffered']):
+        i = nditer(a, flags, buffersize=buffersize)
+        assert_equal(iter_iterindices(i), list(range(24)))
+        i.iterindex = 2
+        assert_equal(iter_iterindices(i), list(range(2, 24)))
+
+        i = nditer(a, flags, order='F', buffersize=buffersize)
+        assert_equal(iter_iterindices(i), list(range(24)))
+        i.iterindex = 5
+        assert_equal(iter_iterindices(i), list(range(5, 24)))
+
+        i = nditer(a[::-1], flags, order='F', buffersize=buffersize)
+        assert_equal(iter_iterindices(i), list(range(24)))
+        i.iterindex = 9
+        assert_equal(iter_iterindices(i), list(range(9, 24)))
+
+        i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize)
+        assert_equal(iter_iterindices(i), list(range(24)))
+        i.iterindex = 13
+        assert_equal(iter_iterindices(i), list(range(13, 24)))
+
+        i = nditer(a[::1, ::-1], flags, buffersize=buffersize)
+        assert_equal(iter_iterindices(i), list(range(24)))
+        i.iterindex = 23
+        assert_equal(iter_iterindices(i), list(range(23, 24)))
+        i.reset()
+        i.iterindex = 2
+        assert_equal(iter_iterindices(i), list(range(2, 24)))
+
+def test_iter_iterrange():
+    # Make sure getting and resetting the iterrange works
+
+    buffersize = 5
+    a = arange(24, dtype='i4').reshape(4, 3, 2)
+    a_fort = a.ravel(order='F')
+
+    i = nditer(a, ['ranged'], ['readonly'], order='F',
+                buffersize=buffersize)
+    assert_equal(i.iterrange, (0, 24))
+    assert_equal([x[()] for x in i], a_fort)
+    for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
+        i.iterrange = r
+        assert_equal(i.iterrange, r)
+        assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
+
+    i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F',
+                op_dtypes='f8', buffersize=buffersize)
+    assert_equal(i.iterrange, (0, 24))
+    assert_equal([x[()] for x in i], a_fort)
+    for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
+        i.iterrange = r
+        assert_equal(i.iterrange, r)
+        assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
+
+    def get_array(i):
+        val = np.array([], dtype='f8')
+        for x in i:
+            val = np.concatenate((val, x))
+        return val
+
+    i = nditer(a, ['ranged', 'buffered', 'external_loop'],
+                ['readonly'], order='F',
+                op_dtypes='f8', buffersize=buffersize)
+    assert_equal(i.iterrange, (0, 24))
+    assert_equal(get_array(i), a_fort)
+    for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
+        i.iterrange = r
+        assert_equal(i.iterrange, r)
+        assert_equal(get_array(i), a_fort[r[0]:r[1]])
+
+def test_iter_buffering():
+    # Test buffering with several buffer sizes and types
+    arrays = []
+    # F-order swapped array
+    arrays.append(np.arange(24,
+                    dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap())
+    # Contiguous 1-dimensional array
+    arrays.append(np.arange(10, dtype='f4'))
+    # Unaligned array
+    a = np.zeros((4*16+1,), dtype='i1')[1:]
+    a.dtype = 'i4'
+    a[:] = np.arange(16, dtype='i4')
+    arrays.append(a)
+    # 4-D F-order array
+    arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T)
+    for a in arrays:
+        for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024):
+            vals = []
+            i = nditer(a, ['buffered', 'external_loop'],
+                           [['readonly', 'nbo', 'aligned']],
+                           order='C',
+                           casting='equiv',
+                           buffersize=buffersize)
+            while not i.finished:
+                assert_(i[0].size <= buffersize)
+                vals.append(i[0].copy())
+                i.iternext()
+            assert_equal(np.concatenate(vals), a.ravel(order='C'))
+
+def test_iter_write_buffering():
+    # Test that buffering of writes is working
+
+    # F-order swapped array
+    a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
+    i = nditer(a, ['buffered'],
+                   [['readwrite', 'nbo', 'aligned']],
+                   casting='equiv',
+                   order='C',
+                   buffersize=16)
+    x = 0
+    with i:
+        while not i.finished:
+            i[0] = x
+            x += 1
+            i.iternext()
+    assert_equal(a.ravel(order='C'), np.arange(24))
+
+def test_iter_buffering_delayed_alloc():
+    # Test that delaying buffer allocation works
+
+    a = np.arange(6)
+    b = np.arange(1, dtype='f4')
+    i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
+                    ['readwrite'],
+                    casting='unsafe',
+                    op_dtypes='f4')
+    assert_(i.has_delayed_bufalloc)
+    assert_raises(ValueError, lambda i:i.multi_index, i)
+    assert_raises(ValueError, lambda i:i[0], i)
+    assert_raises(ValueError, lambda i:i[0:2], i)
+
+    def assign_iter(i):
+        i[0] = 0
+    assert_raises(ValueError, assign_iter, i)
+
+    i.reset()
+    assert_(not i.has_delayed_bufalloc)
+    assert_equal(i.multi_index, (0,))
+    with i:
+        assert_equal(i[0], 0)
+        i[1] = 1
+        assert_equal(i[0:2], [0, 1])
+        assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
+
+def test_iter_buffered_cast_simple():
+    # Test that buffering can handle a simple cast
+
+    a = np.arange(10, dtype='f4')
+    i = nditer(a, ['buffered', 'external_loop'],
+                   [['readwrite', 'nbo', 'aligned']],
+                   casting='same_kind',
+                   op_dtypes=[np.dtype('f8')],
+                   buffersize=3)
+    with i:
+        for v in i:
+            v[...] *= 2
+
+    assert_equal(a, 2*np.arange(10, dtype='f4'))
+
+def test_iter_buffered_cast_byteswapped():
+    # Test that buffering can handle a cast which requires swap->cast->swap
+
+    a = np.arange(10, dtype='f4').newbyteorder().byteswap()
+    i = nditer(a, ['buffered', 'external_loop'],
+                   [['readwrite', 'nbo', 'aligned']],
+                   casting='same_kind',
+                   op_dtypes=[np.dtype('f8').newbyteorder()],
+                   buffersize=3)
+    with i:
+        for v in i:
+            v[...] *= 2
+
+    assert_equal(a, 2*np.arange(10, dtype='f4'))
+
+    with suppress_warnings() as sup:
+        sup.filter(np.ComplexWarning)
+
+        a = np.arange(10, dtype='f8').newbyteorder().byteswap()
+        i = nditer(a, ['buffered', 'external_loop'],
+                       [['readwrite', 'nbo', 'aligned']],
+                       casting='unsafe',
+                       op_dtypes=[np.dtype('c8').newbyteorder()],
+                       buffersize=3)
+        with i:
+            for v in i:
+                v[...] *= 2
+
+        assert_equal(a, 2*np.arange(10, dtype='f8'))
+
+def test_iter_buffered_cast_byteswapped_complex():
+    # Test that buffering can handle a cast which requires swap->cast->copy
+
+    a = np.arange(10, dtype='c8').newbyteorder().byteswap()
+    a += 2j
+    i = nditer(a, ['buffered', 'external_loop'],
+                   [['readwrite', 'nbo', 'aligned']],
+                   casting='same_kind',
+                   op_dtypes=[np.dtype('c16')],
+                   buffersize=3)
+    with i:
+        for v in i:
+            v[...] *= 2
+    assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
+
+    a = np.arange(10, dtype='c8')
+    a += 2j
+    i = nditer(a, ['buffered', 'external_loop'],
+                   [['readwrite', 'nbo', 'aligned']],
+                   casting='same_kind',
+                   op_dtypes=[np.dtype('c16').newbyteorder()],
+                   buffersize=3)
+    with i:
+        for v in i:
+            v[...] *= 2
+    assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
+
+    a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
+    a += 2j
+    i = nditer(a, ['buffered', 'external_loop'],
+                   [['readwrite', 'nbo', 'aligned']],
+                   casting='same_kind',
+                   op_dtypes=[np.dtype('c16')],
+                   buffersize=3)
+    with i:
+        for v in i:
+            v[...] *= 2
+    assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
+
+    a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
+    i = nditer(a, ['buffered', 'external_loop'],
+                   [['readwrite', 'nbo', 'aligned']],
+                   casting='same_kind',
+                   op_dtypes=[np.dtype('f4')],
+                   buffersize=7)
+    with i:
+        for v in i:
+            v[...] *= 2
+    assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
+
+def test_iter_buffered_cast_structured_type():
+    # Tests buffering of structured types
+
+    # simple -> struct type (duplicates the value)
+    sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+    a = np.arange(3, dtype='f4') + 0.5
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt)
+    vals = [np.array(x) for x in i]
+    assert_equal(vals[0]['a'], 0.5)
+    assert_equal(vals[0]['b'], 0)
+    assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
+    assert_equal(vals[0]['d'], 0.5)
+    assert_equal(vals[1]['a'], 1.5)
+    assert_equal(vals[1]['b'], 1)
+    assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
+    assert_equal(vals[1]['d'], 1.5)
+    assert_equal(vals[0].dtype, np.dtype(sdt))
+
+    # object -> struct type
+    sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+    a = np.zeros((3,), dtype='O')
+    a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
+    a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
+    a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
+    if HAS_REFCOUNT:
+        rc = sys.getrefcount(a[0])
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt)
+    vals = [x.copy() for x in i]
+    assert_equal(vals[0]['a'], 0.5)
+    assert_equal(vals[0]['b'], 0)
+    assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
+    assert_equal(vals[0]['d'], 0.5)
+    assert_equal(vals[1]['a'], 1.5)
+    assert_equal(vals[1]['b'], 1)
+    assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
+    assert_equal(vals[1]['d'], 1.5)
+    assert_equal(vals[0].dtype, np.dtype(sdt))
+    vals, i, x = [None]*3
+    if HAS_REFCOUNT:
+        assert_equal(sys.getrefcount(a[0]), rc)
+
+    # single-field struct type -> simple
+    sdt = [('a', 'f4')]
+    a = np.array([(5.5,), (8,)], dtype=sdt)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes='i4')
+    assert_equal([x_[()] for x_ in i], [5, 8])
+
+    # make sure multi-field struct type -> simple doesn't work
+    sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+    a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+    assert_raises(TypeError, lambda: (
+        nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+               casting='unsafe',
+               op_dtypes='i4')))
+
+    # struct type -> struct type (field-wise copy)
+    sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+    sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
+    a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    assert_equal([np.array(x_) for x_ in i],
+                 [np.array((1, 2, 3), dtype=sdt2),
+                  np.array((4, 5, 6), dtype=sdt2)])
+
+
+def test_iter_buffered_cast_structured_type_failure_with_cleanup():
+    # make sure struct type -> struct type with different
+    # number of fields fails
+    sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+    sdt2 = [('b', 'O'), ('a', 'f8')]
+    a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
+
+    for intent in ["readwrite", "readonly", "writeonly"]:
+        # This test was initially designed to test an error at a different
+        # place, but will now raise earlier to to the cast not being possible:
+        # `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails.
+        # Without a faulty DType, there is probably no reliable
+        # way to get the initial tested behaviour.
+        simple_arr = np.array([1, 2], dtype="i,i")  # requires clean up
+        with pytest.raises(TypeError):
+            nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent],
+                   casting='unsafe', op_dtypes=["f,f", sdt2])
+
+
+def test_buffered_cast_error_paths():
+    with pytest.raises(ValueError):
+        # The input is cast into an `S3` buffer
+        np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"],
+                  casting="unsafe", flags=["buffered"])
+
+    # The `M8[ns]` is cast into the `S3` output
+    it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
+                   op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
+    with pytest.raises(ValueError):
+        with it:
+            buf = next(it)
+            buf[...] = "a"  # cannot be converted to int.
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.")
+def test_buffered_cast_error_paths_unraisable():
+    # The following gives an unraisable error. Pytest sometimes captures that
+    # (depending python and/or pytest version). So with Python>=3.8 this can
+    # probably be cleaned out in the future to check for
+    # pytest.PytestUnraisableExceptionWarning:
+    code = textwrap.dedent("""
+        import numpy as np
+    
+        it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
+                       op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
+        buf = next(it)
+        buf[...] = "a"
+        del buf, it  # Flushing only happens during deallocate right now.
+        """)
+    res = subprocess.check_output([sys.executable, "-c", code],
+                                  stderr=subprocess.STDOUT, text=True)
+    assert "ValueError" in res
+
+
+def test_iter_buffered_cast_subarray():
+    # Tests buffering of subarrays
+
+    # one element -> many (copies it to all)
+    sdt1 = [('a', 'f4')]
+    sdt2 = [('a', 'f8', (3, 2, 2))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'] = np.arange(6)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    for x, count in zip(i, list(range(6))):
+        assert_(np.all(x['a'] == count))
+
+    # one element -> many -> back (copies it to all)
+    sdt1 = [('a', 'O', (1, 1))]
+    sdt2 = [('a', 'O', (3, 2, 2))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'][:, 0, 0] = np.arange(6)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    with i:
+        assert_equal(i[0].dtype, np.dtype(sdt2))
+        count = 0
+        for x in i:
+            assert_(np.all(x['a'] == count))
+            x['a'][0] += 2
+            count += 1
+    assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
+
+    # many -> one element -> back (copies just element 0)
+    sdt1 = [('a', 'O', (3, 2, 2))]
+    sdt2 = [('a', 'O', (1,))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'][:, 0, 0, 0] = np.arange(6)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    with i:
+        assert_equal(i[0].dtype, np.dtype(sdt2))
+        count = 0
+        for x in i:
+            assert_equal(x['a'], count)
+            x['a'] += 2
+            count += 1
+    assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
+
+    # many -> one element -> back (copies just element 0)
+    sdt1 = [('a', 'f8', (3, 2, 2))]
+    sdt2 = [('a', 'O', (1,))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'][:, 0, 0, 0] = np.arange(6)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'], count)
+        count += 1
+
+    # many -> one element (copies just element 0)
+    sdt1 = [('a', 'O', (3, 2, 2))]
+    sdt2 = [('a', 'f4', (1,))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'][:, 0, 0, 0] = np.arange(6)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'], count)
+        count += 1
+
+    # many -> matching shape (straightforward copy)
+    sdt1 = [('a', 'O', (3, 2, 2))]
+    sdt2 = [('a', 'f4', (3, 2, 2))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'], a[count]['a'])
+        count += 1
+
+    # vector -> smaller vector (truncates)
+    sdt1 = [('a', 'f8', (6,))]
+    sdt2 = [('a', 'f4', (2,))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'] = np.arange(6*6).reshape(6, 6)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'], a[count]['a'][:2])
+        count += 1
+
+    # vector -> bigger vector (pads with zeros)
+    sdt1 = [('a', 'f8', (2,))]
+    sdt2 = [('a', 'f4', (6,))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'] = np.arange(6*2).reshape(6, 2)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'][:2], a[count]['a'])
+        assert_equal(x['a'][2:], [0, 0, 0, 0])
+        count += 1
+
+    # vector -> matrix (broadcasts)
+    sdt1 = [('a', 'f8', (2,))]
+    sdt2 = [('a', 'f4', (2, 2))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'] = np.arange(6*2).reshape(6, 2)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'][0], a[count]['a'])
+        assert_equal(x['a'][1], a[count]['a'])
+        count += 1
+
+    # vector -> matrix (broadcasts and zero-pads)
+    sdt1 = [('a', 'f8', (2, 1))]
+    sdt2 = [('a', 'f4', (3, 2))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'] = np.arange(6*2).reshape(6, 2, 1)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
+        assert_equal(x['a'][:2, 1], a[count]['a'][:, 0])
+        assert_equal(x['a'][2,:], [0, 0])
+        count += 1
+
+    # matrix -> matrix (truncates and zero-pads)
+    sdt1 = [('a', 'f8', (2, 3))]
+    sdt2 = [('a', 'f4', (3, 2))]
+    a = np.zeros((6,), dtype=sdt1)
+    a['a'] = np.arange(6*2*3).reshape(6, 2, 3)
+    i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+                    casting='unsafe',
+                    op_dtypes=sdt2)
+    assert_equal(i[0].dtype, np.dtype(sdt2))
+    count = 0
+    for x in i:
+        assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
+        assert_equal(x['a'][:2, 1], a[count]['a'][:, 1])
+        assert_equal(x['a'][2,:], [0, 0])
+        count += 1
+
+def test_iter_buffering_badwriteback():
+    # Writing back from a buffer cannot combine elements
+
+    # a needs write buffering, but had a broadcast dimension
+    a = np.arange(6).reshape(2, 3, 1)
+    b = np.arange(12).reshape(2, 3, 2)
+    assert_raises(ValueError, nditer, [a, b],
+                  ['buffered', 'external_loop'],
+                  [['readwrite'], ['writeonly']],
+                  order='C')
+
+    # But if a is readonly, it's fine
+    nditer([a, b], ['buffered', 'external_loop'],
+           [['readonly'], ['writeonly']],
+           order='C')
+
+    # If a has just one element, it's fine too (constant 0 stride, a reduction)
+    a = np.arange(1).reshape(1, 1, 1)
+    nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'],
+           [['readwrite'], ['writeonly']],
+           order='C')
+
+    # check that it fails on other dimensions too
+    a = np.arange(6).reshape(1, 3, 2)
+    assert_raises(ValueError, nditer, [a, b],
+                  ['buffered', 'external_loop'],
+                  [['readwrite'], ['writeonly']],
+                  order='C')
+    a = np.arange(4).reshape(2, 1, 2)
+    assert_raises(ValueError, nditer, [a, b],
+                  ['buffered', 'external_loop'],
+                  [['readwrite'], ['writeonly']],
+                  order='C')
+
+def test_iter_buffering_string():
+    # Safe casting disallows shrinking strings
+    a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
+    assert_equal(a.dtype, np.dtype('S4'))
+    assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
+                  op_dtypes='S2')
+    i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
+    assert_equal(i[0], b'abc')
+    assert_equal(i[0].dtype, np.dtype('S6'))
+
+    a = np.array(['abc', 'a', 'abcd'], dtype=np.str_)
+    assert_equal(a.dtype, np.dtype('U4'))
+    assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
+                    op_dtypes='U2')
+    i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
+    assert_equal(i[0], 'abc')
+    assert_equal(i[0].dtype, np.dtype('U6'))
+
+def test_iter_buffering_growinner():
+    # Test that the inner loop grows when no buffering is needed
+    a = np.arange(30)
+    i = nditer(a, ['buffered', 'growinner', 'external_loop'],
+                           buffersize=5)
+    # Should end up with just one inner loop here
+    assert_equal(i[0].size, a.size)
+
+
+@pytest.mark.slow
+def test_iter_buffered_reduce_reuse():
+    # large enough array for all views, including negative strides.
+    a = np.arange(2*3**5)[3**5:3**5+1]
+    flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
+    op_flags = [('readonly',), ('readwrite', 'allocate')]
+    op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
+    # wrong dtype to force buffering
+    op_dtypes = [float, a.dtype]
+
+    def get_params():
+        for xs in range(-3**2, 3**2 + 1):
+            for ys in range(xs, 3**2 + 1):
+                for op_axes in op_axes_list:
+                    # last stride is reduced and because of that not
+                    # important for this test, as it is the inner stride.
+                    strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
+                    arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides)
+
+                    for skip in [0, 1]:
+                        yield arr, op_axes, skip
+
+    for arr, op_axes, skip in get_params():
+        nditer2 = np.nditer([arr.copy(), None],
+                            op_axes=op_axes, flags=flags, op_flags=op_flags,
+                            op_dtypes=op_dtypes)
+        with nditer2:
+            nditer2.operands[-1][...] = 0
+            nditer2.reset()
+            nditer2.iterindex = skip
+
+            for (a2_in, b2_in) in nditer2:
+                b2_in += a2_in.astype(np.int_)
+
+            comp_res = nditer2.operands[-1]
+
+        for bufsize in range(0, 3**3):
+            nditer1 = np.nditer([arr, None],
+                                op_axes=op_axes, flags=flags, op_flags=op_flags,
+                                buffersize=bufsize, op_dtypes=op_dtypes)
+            with nditer1:
+                nditer1.operands[-1][...] = 0
+                nditer1.reset()
+                nditer1.iterindex = skip
+
+                for (a1_in, b1_in) in nditer1:
+                    b1_in += a1_in.astype(np.int_)
+
+                res = nditer1.operands[-1]
+            assert_array_equal(res, comp_res)
+
+
+def test_iter_no_broadcast():
+    # Test that the no_broadcast flag works
+    a = np.arange(24).reshape(2, 3, 4)
+    b = np.arange(6).reshape(2, 3, 1)
+    c = np.arange(12).reshape(3, 4)
+
+    nditer([a, b, c], [],
+           [['readonly', 'no_broadcast'],
+            ['readonly'], ['readonly']])
+    assert_raises(ValueError, nditer, [a, b, c], [],
+                  [['readonly'], ['readonly', 'no_broadcast'], ['readonly']])
+    assert_raises(ValueError, nditer, [a, b, c], [],
+                  [['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
+
+
+class TestIterNested:
+
+    def test_basic(self):
+        # Test nested iteration basic usage
+        a = arange(12).reshape(2, 3, 2)
+
+        i, j = np.nested_iters(a, [[0], [1, 2]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+        i, j = np.nested_iters(a, [[0, 1], [2]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+        i, j = np.nested_iters(a, [[0, 2], [1]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+    def test_reorder(self):
+        # Test nested iteration basic usage
+        a = arange(12).reshape(2, 3, 2)
+
+        # In 'K' order (default), it gets reordered
+        i, j = np.nested_iters(a, [[0], [2, 1]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+        i, j = np.nested_iters(a, [[1, 0], [2]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+        i, j = np.nested_iters(a, [[2, 0], [1]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+        # In 'C' order, it doesn't
+        i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
+
+        i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
+
+        i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
+
+    def test_flip_axes(self):
+        # Test nested iteration with negative axes
+        a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
+
+        # In 'K' order (default), the axes all get flipped
+        i, j = np.nested_iters(a, [[0], [1, 2]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+        i, j = np.nested_iters(a, [[0, 1], [2]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+        i, j = np.nested_iters(a, [[0, 2], [1]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+        # In 'C' order, flipping axes is disabled
+        i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
+
+        i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
+
+        i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
+
+    def test_broadcast(self):
+        # Test nested iteration with broadcasting
+        a = arange(2).reshape(2, 1)
+        b = arange(3).reshape(1, 3)
+
+        i, j = np.nested_iters([a, b], [[0], [1]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
+
+        i, j = np.nested_iters([a, b], [[1], [0]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
+
+    def test_dtype_copy(self):
+        # Test nested iteration with a copy to change dtype
+
+        # copy
+        a = arange(6, dtype='i4').reshape(2, 3)
+        i, j = np.nested_iters(a, [[0], [1]],
+                            op_flags=['readonly', 'copy'],
+                            op_dtypes='f8')
+        assert_equal(j[0].dtype, np.dtype('f8'))
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
+        vals = None
+
+        # writebackifcopy - using context manager
+        a = arange(6, dtype='f4').reshape(2, 3)
+        i, j = np.nested_iters(a, [[0], [1]],
+                            op_flags=['readwrite', 'updateifcopy'],
+                            casting='same_kind',
+                            op_dtypes='f8')
+        with i, j:
+            assert_equal(j[0].dtype, np.dtype('f8'))
+            for x in i:
+                for y in j:
+                    y[...] += 1
+            assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+        assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+        # writebackifcopy - using close()
+        a = arange(6, dtype='f4').reshape(2, 3)
+        i, j = np.nested_iters(a, [[0], [1]],
+                            op_flags=['readwrite', 'updateifcopy'],
+                            casting='same_kind',
+                            op_dtypes='f8')
+        assert_equal(j[0].dtype, np.dtype('f8'))
+        for x in i:
+            for y in j:
+                y[...] += 1
+        assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+        i.close()
+        j.close()
+        assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+    def test_dtype_buffered(self):
+        # Test nested iteration with buffering to change dtype
+
+        a = arange(6, dtype='f4').reshape(2, 3)
+        i, j = np.nested_iters(a, [[0], [1]],
+                            flags=['buffered'],
+                            op_flags=['readwrite'],
+                            casting='same_kind',
+                            op_dtypes='f8')
+        assert_equal(j[0].dtype, np.dtype('f8'))
+        for x in i:
+            for y in j:
+                y[...] += 1
+        assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+    def test_0d(self):
+        a = np.arange(12).reshape(2, 3, 2)
+        i, j = np.nested_iters(a, [[], [1, 0, 2]])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
+
+        i, j = np.nested_iters(a, [[1, 0, 2], []])
+        vals = [list(j) for _ in i]
+        assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
+
+        i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
+        vals = []
+        for x in i:
+            for y in j:
+                vals.append([z for z in k])
+        assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+    def test_iter_nested_iters_dtype_buffered(self):
+        # Test nested iteration with buffering to change dtype
+
+        a = arange(6, dtype='f4').reshape(2, 3)
+        i, j = np.nested_iters(a, [[0], [1]],
+                            flags=['buffered'],
+                            op_flags=['readwrite'],
+                            casting='same_kind',
+                            op_dtypes='f8')
+        with i, j:
+            assert_equal(j[0].dtype, np.dtype('f8'))
+            for x in i:
+                for y in j:
+                    y[...] += 1
+        assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+def test_iter_reduction_error():
+
+    a = np.arange(6)
+    assert_raises(ValueError, nditer, [a, None], [],
+                    [['readonly'], ['readwrite', 'allocate']],
+                    op_axes=[[0], [-1]])
+
+    a = np.arange(6).reshape(2, 3)
+    assert_raises(ValueError, nditer, [a, None], ['external_loop'],
+                    [['readonly'], ['readwrite', 'allocate']],
+                    op_axes=[[0, 1], [-1, -1]])
+
+def test_iter_reduction():
+    # Test doing reductions with the iterator
+
+    a = np.arange(6)
+    i = nditer([a, None], ['reduce_ok'],
+                    [['readonly'], ['readwrite', 'allocate']],
+                    op_axes=[[0], [-1]])
+    # Need to initialize the output operand to the addition unit
+    with i:
+        i.operands[1][...] = 0
+        # Do the reduction
+        for x, y in i:
+            y[...] += x
+        # Since no axes were specified, should have allocated a scalar
+        assert_equal(i.operands[1].ndim, 0)
+        assert_equal(i.operands[1], np.sum(a))
+
+    a = np.arange(6).reshape(2, 3)
+    i = nditer([a, None], ['reduce_ok', 'external_loop'],
+                    [['readonly'], ['readwrite', 'allocate']],
+                    op_axes=[[0, 1], [-1, -1]])
+    # Need to initialize the output operand to the addition unit
+    with i:
+        i.operands[1][...] = 0
+        # Reduction shape/strides for the output
+        assert_equal(i[1].shape, (6,))
+        assert_equal(i[1].strides, (0,))
+        # Do the reduction
+        for x, y in i:
+            # Use a for loop instead of ``y[...] += x``
+            # (equivalent to ``y[...] = y[...].copy() + x``),
+            # because y has zero strides we use for the reduction
+            for j in range(len(y)):
+                y[j] += x[j]
+        # Since no axes were specified, should have allocated a scalar
+        assert_equal(i.operands[1].ndim, 0)
+        assert_equal(i.operands[1], np.sum(a))
+
+    # This is a tricky reduction case for the buffering double loop
+    # to handle
+    a = np.ones((2, 3, 5))
+    it1 = nditer([a, None], ['reduce_ok', 'external_loop'],
+                    [['readonly'], ['readwrite', 'allocate']],
+                    op_axes=[None, [0, -1, 1]])
+    it2 = nditer([a, None], ['reduce_ok', 'external_loop',
+                            'buffered', 'delay_bufalloc'],
+                    [['readonly'], ['readwrite', 'allocate']],
+                    op_axes=[None, [0, -1, 1]], buffersize=10)
+    with it1, it2:
+        it1.operands[1].fill(0)
+        it2.operands[1].fill(0)
+        it2.reset()
+        for x in it1:
+            x[1][...] += x[0]
+        for x in it2:
+            x[1][...] += x[0]
+        assert_equal(it1.operands[1], it2.operands[1])
+        assert_equal(it2.operands[1].sum(), a.size)
+
+def test_iter_buffering_reduction():
+    # Test doing buffered reductions with the iterator
+
+    a = np.arange(6)
+    b = np.array(0., dtype='f8').byteswap().newbyteorder()
+    i = nditer([a, b], ['reduce_ok', 'buffered'],
+                    [['readonly'], ['readwrite', 'nbo']],
+                    op_axes=[[0], [-1]])
+    with i:
+        assert_equal(i[1].dtype, np.dtype('f8'))
+        assert_(i[1].dtype != b.dtype)
+        # Do the reduction
+        for x, y in i:
+            y[...] += x
+    # Since no axes were specified, should have allocated a scalar
+    assert_equal(b, np.sum(a))
+
+    a = np.arange(6).reshape(2, 3)
+    b = np.array([0, 0], dtype='f8').byteswap().newbyteorder()
+    i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'],
+                    [['readonly'], ['readwrite', 'nbo']],
+                    op_axes=[[0, 1], [0, -1]])
+    # Reduction shape/strides for the output
+    with i:
+        assert_equal(i[1].shape, (3,))
+        assert_equal(i[1].strides, (0,))
+        # Do the reduction
+        for x, y in i:
+            # Use a for loop instead of ``y[...] += x``
+            # (equivalent to ``y[...] = y[...].copy() + x``),
+            # because y has zero strides we use for the reduction
+            for j in range(len(y)):
+                y[j] += x[j]
+    assert_equal(b, np.sum(a, axis=1))
+
+    # Iterator inner double loop was wrong on this one
+    p = np.arange(2) + 1
+    it = np.nditer([p, None],
+            ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'],
+            [['readonly'], ['readwrite', 'allocate']],
+            op_axes=[[-1, 0], [-1, -1]],
+            itershape=(2, 2))
+    with it:
+        it.operands[1].fill(0)
+        it.reset()
+        assert_equal(it[0], [1, 2, 1, 2])
+
+    # Iterator inner loop should take argument contiguity into account
+    x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
+    x[...] = np.arange(x.size).reshape(x.shape)
+    y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
+    y_base_copy = y_base.copy()
+    y = y_base[::2,:,None]
+
+    it = np.nditer([y, x],
+                   ['buffered', 'external_loop', 'reduce_ok'],
+                   [['readwrite'], ['readonly']])
+    with it:
+        for a, b in it:
+            a.fill(2)
+
+    assert_equal(y_base[1::2], y_base_copy[1::2])
+    assert_equal(y_base[::2], 2)
+
+def test_iter_buffering_reduction_reuse_reduce_loops():
+    # There was a bug triggering reuse of the reduce loop inappropriately,
+    # which caused processing to happen in unnecessarily small chunks
+    # and overran the buffer.
+
+    a = np.zeros((2, 7))
+    b = np.zeros((1, 7))
+    it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
+                    op_flags=[['readonly'], ['readwrite']],
+                    buffersize=5)
+
+    with it:
+        bufsizes = [x.shape[0] for x, y in it]
+    assert_equal(bufsizes, [5, 2, 5, 2])
+    assert_equal(sum(bufsizes), a.size)
+
+def test_iter_writemasked_badinput():
+    a = np.zeros((2, 3))
+    b = np.zeros((3,))
+    m = np.array([[True, True, False], [False, True, False]])
+    m2 = np.array([True, True, False])
+    m3 = np.array([0, 1, 1], dtype='u1')
+    mbad1 = np.array([0, 1, 1], dtype='i1')
+    mbad2 = np.array([0, 1, 1], dtype='f4')
+
+    # Need an 'arraymask' if any operand is 'writemasked'
+    assert_raises(ValueError, nditer, [a, m], [],
+                    [['readwrite', 'writemasked'], ['readonly']])
+
+    # A 'writemasked' operand must not be readonly
+    assert_raises(ValueError, nditer, [a, m], [],
+                    [['readonly', 'writemasked'], ['readonly', 'arraymask']])
+
+    # 'writemasked' and 'arraymask' may not be used together
+    assert_raises(ValueError, nditer, [a, m], [],
+                    [['readonly'], ['readwrite', 'arraymask', 'writemasked']])
+
+    # 'arraymask' may only be specified once
+    assert_raises(ValueError, nditer, [a, m, m2], [],
+                    [['readwrite', 'writemasked'],
+                     ['readonly', 'arraymask'],
+                     ['readonly', 'arraymask']])
+
+    # An 'arraymask' with nothing 'writemasked' also doesn't make sense
+    assert_raises(ValueError, nditer, [a, m], [],
+                    [['readwrite'], ['readonly', 'arraymask']])
+
+    # A writemasked reduction requires a similarly smaller mask
+    assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'],
+                    [['readonly'],
+                     ['readwrite', 'writemasked'],
+                     ['readonly', 'arraymask']])
+    # But this should work with a smaller/equal mask to the reduction operand
+    np.nditer([a, b, m2], ['reduce_ok'],
+                    [['readonly'],
+                     ['readwrite', 'writemasked'],
+                     ['readonly', 'arraymask']])
+    # The arraymask itself cannot be a reduction
+    assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'],
+                    [['readonly'],
+                     ['readwrite', 'writemasked'],
+                     ['readwrite', 'arraymask']])
+
+    # A uint8 mask is ok too
+    np.nditer([a, m3], ['buffered'],
+                    [['readwrite', 'writemasked'],
+                     ['readonly', 'arraymask']],
+                    op_dtypes=['f4', None],
+                    casting='same_kind')
+    # An int8 mask isn't ok
+    assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'],
+                    [['readwrite', 'writemasked'],
+                     ['readonly', 'arraymask']],
+                    op_dtypes=['f4', None],
+                    casting='same_kind')
+    # A float32 mask isn't ok
+    assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'],
+                    [['readwrite', 'writemasked'],
+                     ['readonly', 'arraymask']],
+                    op_dtypes=['f4', None],
+                    casting='same_kind')
+
+
+def _is_buffered(iterator):
+    try:
+        iterator.itviews
+    except ValueError:
+        return True
+    return False
+
+@pytest.mark.parametrize("a",
+        [np.zeros((3,), dtype='f8'),
+         np.zeros((9876, 3*5), dtype='f8')[::2, :],
+         np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :],
+         # Also test with the last dimension strided (so it does not fit if
+         # there is repeated access)
+         np.zeros((9,), dtype='f8')[::3],
+         np.zeros((9876, 3*10), dtype='f8')[::2, ::5],
+         np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]])
+def test_iter_writemasked(a):
+    # Note, the slicing above is to ensure that nditer cannot combine multiple
+    # axes into one.  The repetition is just to make things a bit more
+    # interesting.
+    shape = a.shape
+    reps = shape[-1] // 3
+    msk = np.empty(shape, dtype=bool)
+    msk[...] = [True, True, False] * reps
+
+    # When buffering is unused, 'writemasked' effectively does nothing.
+    # It's up to the user of the iterator to obey the requested semantics.
+    it = np.nditer([a, msk], [],
+                [['readwrite', 'writemasked'],
+                 ['readonly', 'arraymask']])
+    with it:
+        for x, m in it:
+            x[...] = 1
+    # Because we violated the semantics, all the values became 1
+    assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape))
+
+    # Even if buffering is enabled, we still may be accessing the array
+    # directly.
+    it = np.nditer([a, msk], ['buffered'],
+                [['readwrite', 'writemasked'],
+                 ['readonly', 'arraymask']])
+    # @seberg: I honestly don't currently understand why a "buffered" iterator
+    # would end up not using a buffer for the small array here at least when
+    # "writemasked" is used, that seems confusing...  Check by testing for
+    # actual memory overlap!
+    is_buffered = True
+    with it:
+        for x, m in it:
+            x[...] = 2.5
+            if np.may_share_memory(x, a):
+                is_buffered = False
+
+    if not is_buffered:
+        # Because we violated the semantics, all the values became 2.5
+        assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape))
+    else:
+        # For large sizes, the iterator may be buffered:
+        assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape))
+        a[...] = 2.5
+
+    # If buffering will definitely happening, for instance because of
+    # a cast, only the items selected by the mask will be copied back from
+    # the buffer.
+    it = np.nditer([a, msk], ['buffered'],
+                [['readwrite', 'writemasked'],
+                 ['readonly', 'arraymask']],
+                op_dtypes=['i8', None],
+                casting='unsafe')
+    with it:
+        for x, m in it:
+            x[...] = 3
+    # Even though we violated the semantics, only the selected values
+    # were copied back
+    assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape))
+
+
+@pytest.mark.parametrize(["mask", "mask_axes"], [
+        # Allocated operand (only broadcasts with -1)
+        (None, [-1, 0]),
+        # Reduction along the first dimension (with and without op_axes)
+        (np.zeros((1, 4), dtype="bool"), [0, 1]),
+        (np.zeros((1, 4), dtype="bool"), None),
+        # Test 0-D and -1 op_axes
+        (np.zeros(4, dtype="bool"), [-1, 0]),
+        (np.zeros((), dtype="bool"), [-1, -1]),
+        (np.zeros((), dtype="bool"), None)])
+def test_iter_writemasked_broadcast_error(mask, mask_axes):
+    # This assumes that a readwrite mask makes sense. This is likely not the
+    # case and should simply be deprecated.
+    arr = np.zeros((3, 4))
+    itflags = ["reduce_ok"]
+    mask_flags = ["arraymask", "readwrite", "allocate"]
+    a_flags = ["writeonly", "writemasked"]
+    if mask_axes is None:
+        op_axes = None
+    else:
+        op_axes = [mask_axes, [0, 1]]
+
+    with assert_raises(ValueError):
+        np.nditer((mask, arr), flags=itflags, op_flags=[mask_flags, a_flags],
+                  op_axes=op_axes)
+
+
+def test_iter_writemasked_decref():
+    # force casting (to make it interesting) by using a structured dtype.
+    arr = np.arange(10000).astype(">i,O")
+    original = arr.copy()
+    mask = np.random.randint(0, 2, size=10000).astype(bool)
+
+    it = np.nditer([arr, mask], ['buffered', "refs_ok"],
+                   [['readwrite', 'writemasked'],
+                    ['readonly', 'arraymask']],
+                   op_dtypes=[" string -> longdouble` for the
+        # conversion.  But Python may refuse `str(int)` for huge ints.
+        # In that case, RuntimeWarning would be correct, but conversion
+        # fails earlier (seems to happen on 32bit linux, possibly only debug).
+        if dtype in "gG":
+            try:
+                str(too_big_int)
+            except ValueError:
+                pytest.skip("`huge_int -> string -> longdouble` failed")
+
+        # Otherwise, we overflow to infinity:
+        with pytest.warns(RuntimeWarning):
+            res = scalar_type(1) + too_big_int
+        assert res.dtype == dtype
+        assert res == np.inf
+
+        with pytest.warns(RuntimeWarning):
+            # We force the dtype here, since windows may otherwise pick the
+            # double instead of the longdouble loop.  That leads to slightly
+            # different results (conversion of the int fails as above).
+            res = np.add(np.array(1, dtype=dtype), too_big_int, dtype=dtype)
+        assert res.dtype == dtype
+        assert res == np.inf
+
+
+@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.eq])
+def test_weak_promotion_scalar_path(op):
+    # Some additional paths exercising the weak scalars.
+    np._set_promotion_state("weak")
+
+    # Integer path:
+    res = op(np.uint8(3), 5)
+    assert res == op(3, 5)
+    assert res.dtype == np.uint8 or res.dtype == bool
+
+    with pytest.raises(OverflowError):
+        op(np.uint8(3), 1000)
+
+    # Float path:
+    res = op(np.float32(3), 5.)
+    assert res == op(3., 5.)
+    assert res.dtype == np.float32 or res.dtype == bool
+
+
+def test_nep50_complex_promotion():
+    np._set_promotion_state("weak")
+
+    with pytest.warns(RuntimeWarning, match=".*overflow"):
+        res = np.complex64(3) + complex(2**300)
+
+    assert type(res) == np.complex64
+
+
+def test_nep50_integer_conversion_errors():
+    # Do not worry about warnings here (auto-fixture will reset).
+    np._set_promotion_state("weak")
+    # Implementation for error paths is mostly missing (as of writing)
+    with pytest.raises(OverflowError, match=".*uint8"):
+        np.array([1], np.uint8) + 300
+
+    with pytest.raises(OverflowError, match=".*uint8"):
+        np.uint8(1) + 300
+
+    # Error message depends on platform (maybe unsigned int or unsigned long)
+    with pytest.raises(OverflowError,
+            match="Python integer -1 out of bounds for uint8"):
+        np.uint8(1) + -1
+
+
+def test_nep50_integer_regression():
+    # Test the old integer promotion rules.  When the integer is too large,
+    # we need to keep using the old-style promotion.
+    np._set_promotion_state("legacy")
+    arr = np.array(1)
+    assert (arr + 2**63).dtype == np.float64
+    assert (arr[()] + 2**63).dtype == np.float64
+
+
+def test_nep50_with_axisconcatenator():
+    # I promised that this will be an error in the future in the 1.25
+    # release notes;  test this (NEP 50 opt-in makes the deprecation an error).
+    np._set_promotion_state("weak")
+
+    with pytest.raises(OverflowError):
+        np.r_[np.arange(5, dtype=np.int8), 255]
+
+
+@pytest.mark.parametrize("ufunc", [np.add, np.power])
+@pytest.mark.parametrize("state", ["weak", "weak_and_warn"])
+def test_nep50_huge_integers(ufunc, state):
+    # Very large integers are complicated, because they go to uint64 or
+    # object dtype.  This tests covers a few possible paths (some of which
+    # cannot give the NEP 50 warnings).
+    np._set_promotion_state(state)
+
+    with pytest.raises(OverflowError):
+        ufunc(np.int64(0), 2**63)  # 2**63 too large for int64
+
+    if state == "weak_and_warn":
+        with pytest.warns(UserWarning,
+                match="result dtype changed.*float64.*uint64"):
+            with pytest.raises(OverflowError):
+                ufunc(np.uint64(0), 2**64)
+    else:
+        with pytest.raises(OverflowError):
+            ufunc(np.uint64(0), 2**64)  # 2**64 cannot be represented by uint64
+
+    # However, 2**63 can be represented by the uint64 (and that is used):
+    if state == "weak_and_warn":
+        with pytest.warns(UserWarning,
+                match="result dtype changed.*float64.*uint64"):
+            res = ufunc(np.uint64(1), 2**63)
+    else:
+        res = ufunc(np.uint64(1), 2**63)
+
+    assert res.dtype == np.uint64
+    assert res == ufunc(1, 2**63, dtype=object)
+
+    # The following paths fail to warn correctly about the change:
+    with pytest.raises(OverflowError):
+        ufunc(np.int64(1), 2**63)  # np.array(2**63) would go to uint
+
+    with pytest.raises(OverflowError):
+        ufunc(np.int64(1), 2**100)  # np.array(2**100) would go to object
+
+    # This would go to object and thus a Python float, not a NumPy one:
+    res = ufunc(1.0, 2**100)
+    assert isinstance(res, np.float64)
+
+
+def test_nep50_in_concat_and_choose():
+    np._set_promotion_state("weak_and_warn")
+
+    with pytest.warns(UserWarning, match="result dtype changed"):
+        res = np.concatenate([np.float32(1), 1.], axis=None)
+    assert res.dtype == "float32"
+
+    with pytest.warns(UserWarning, match="result dtype changed"):
+        res = np.choose(1, [np.float32(1), 1.])
+    assert res.dtype == "float32"
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numeric.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numeric.py
new file mode 100644
index 00000000..d2d041f7
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numeric.py
@@ -0,0 +1,3586 @@
+import sys
+import warnings
+import itertools
+import platform
+import pytest
+import math
+from decimal import Decimal
+
+import numpy as np
+from numpy.core import umath
+from numpy.random import rand, randint, randn
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_raises_regex,
+    assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+    assert_warns, assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
+    )
+from numpy.core._rational_tests import rational
+
+from hypothesis import given, strategies as st
+from hypothesis.extra import numpy as hynp
+
+
+class TestResize:
+    def test_copies(self):
+        A = np.array([[1, 2], [3, 4]])
+        Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
+        assert_equal(np.resize(A, (2, 4)), Ar1)
+
+        Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
+        assert_equal(np.resize(A, (4, 2)), Ar2)
+
+        Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
+        assert_equal(np.resize(A, (4, 3)), Ar3)
+
+    def test_repeats(self):
+        A = np.array([1, 2, 3])
+        Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]])
+        assert_equal(np.resize(A, (2, 4)), Ar1)
+
+        Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]])
+        assert_equal(np.resize(A, (4, 2)), Ar2)
+
+        Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
+        assert_equal(np.resize(A, (4, 3)), Ar3)
+
+    def test_zeroresize(self):
+        A = np.array([[1, 2], [3, 4]])
+        Ar = np.resize(A, (0,))
+        assert_array_equal(Ar, np.array([]))
+        assert_equal(A.dtype, Ar.dtype)
+
+        Ar = np.resize(A, (0, 2))
+        assert_equal(Ar.shape, (0, 2))
+
+        Ar = np.resize(A, (2, 0))
+        assert_equal(Ar.shape, (2, 0))
+
+    def test_reshape_from_zero(self):
+        # See also gh-6740
+        A = np.zeros(0, dtype=[('a', np.float32)])
+        Ar = np.resize(A, (2, 1))
+        assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
+        assert_equal(A.dtype, Ar.dtype)
+
+    def test_negative_resize(self):
+        A = np.arange(0, 10, dtype=np.float32)
+        new_shape = (-10, -1)
+        with pytest.raises(ValueError, match=r"negative"):
+            np.resize(A, new_shape=new_shape)
+
+    def test_subclass(self):
+        class MyArray(np.ndarray):
+            __array_priority__ = 1.
+
+        my_arr = np.array([1]).view(MyArray)
+        assert type(np.resize(my_arr, 5)) is MyArray
+        assert type(np.resize(my_arr, 0)) is MyArray
+
+        my_arr = np.array([]).view(MyArray)
+        assert type(np.resize(my_arr, 5)) is MyArray
+
+
+class TestNonarrayArgs:
+    # check that non-array arguments to functions wrap them in arrays
+    def test_choose(self):
+        choices = [[0, 1, 2],
+                   [3, 4, 5],
+                   [5, 6, 7]]
+        tgt = [5, 1, 5]
+        a = [2, 0, 1]
+
+        out = np.choose(a, choices)
+        assert_equal(out, tgt)
+
+    def test_clip(self):
+        arr = [-1, 5, 2, 3, 10, -4, -9]
+        out = np.clip(arr, 2, 7)
+        tgt = [2, 5, 2, 3, 7, 2, 2]
+        assert_equal(out, tgt)
+
+    def test_compress(self):
+        arr = [[0, 1, 2, 3, 4],
+               [5, 6, 7, 8, 9]]
+        tgt = [[5, 6, 7, 8, 9]]
+        out = np.compress([0, 1], arr, axis=0)
+        assert_equal(out, tgt)
+
+    def test_count_nonzero(self):
+        arr = [[0, 1, 7, 0, 0],
+               [3, 0, 0, 2, 19]]
+        tgt = np.array([2, 3])
+        out = np.count_nonzero(arr, axis=1)
+        assert_equal(out, tgt)
+
+    def test_cumproduct(self):
+        A = [[1, 2, 3], [4, 5, 6]]
+        with assert_warns(DeprecationWarning):
+            expected = np.array([1, 2, 6, 24, 120, 720])
+            assert_(np.all(np.cumproduct(A) == expected))
+
+    def test_diagonal(self):
+        a = [[0, 1, 2, 3],
+             [4, 5, 6, 7],
+             [8, 9, 10, 11]]
+        out = np.diagonal(a)
+        tgt = [0, 5, 10]
+
+        assert_equal(out, tgt)
+
+    def test_mean(self):
+        A = [[1, 2, 3], [4, 5, 6]]
+        assert_(np.mean(A) == 3.5)
+        assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
+        assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
+
+        with warnings.catch_warnings(record=True) as w:
+            warnings.filterwarnings('always', '', RuntimeWarning)
+            assert_(np.isnan(np.mean([])))
+            assert_(w[0].category is RuntimeWarning)
+
+    def test_ptp(self):
+        a = [3, 4, 5, 10, -3, -5, 6.0]
+        assert_equal(np.ptp(a, axis=0), 15.0)
+
+    def test_prod(self):
+        arr = [[1, 2, 3, 4],
+               [5, 6, 7, 9],
+               [10, 3, 4, 5]]
+        tgt = [24, 1890, 600]
+
+        assert_equal(np.prod(arr, axis=-1), tgt)
+
+    def test_ravel(self):
+        a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
+        tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
+        assert_equal(np.ravel(a), tgt)
+
+    def test_repeat(self):
+        a = [1, 2, 3]
+        tgt = [1, 1, 2, 2, 3, 3]
+
+        out = np.repeat(a, 2)
+        assert_equal(out, tgt)
+
+    def test_reshape(self):
+        arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
+        tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
+        assert_equal(np.reshape(arr, (2, 6)), tgt)
+
+    def test_round(self):
+        arr = [1.56, 72.54, 6.35, 3.25]
+        tgt = [1.6, 72.5, 6.4, 3.2]
+        assert_equal(np.around(arr, decimals=1), tgt)
+        s = np.float64(1.)
+        assert_(isinstance(s.round(), np.float64))
+        assert_equal(s.round(), 1.)
+
+    @pytest.mark.parametrize('dtype', [
+        np.int8, np.int16, np.int32, np.int64,
+        np.uint8, np.uint16, np.uint32, np.uint64,
+        np.float16, np.float32, np.float64,
+    ])
+    def test_dunder_round(self, dtype):
+        s = dtype(1)
+        assert_(isinstance(round(s), int))
+        assert_(isinstance(round(s, None), int))
+        assert_(isinstance(round(s, ndigits=None), int))
+        assert_equal(round(s), 1)
+        assert_equal(round(s, None), 1)
+        assert_equal(round(s, ndigits=None), 1)
+
+    @pytest.mark.parametrize('val, ndigits', [
+        pytest.param(2**31 - 1, -1,
+            marks=pytest.mark.xfail(reason="Out of range of int32")
+        ),
+        (2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))),
+        (2**31 - 1, -math.ceil(math.log10(2**31 - 1)))
+    ])
+    def test_dunder_round_edgecases(self, val, ndigits):
+        assert_equal(round(val, ndigits), round(np.int32(val), ndigits))
+
+    def test_dunder_round_accuracy(self):
+        f = np.float64(5.1 * 10**73)
+        assert_(isinstance(round(f, -73), np.float64))
+        assert_array_max_ulp(round(f, -73), 5.0 * 10**73)
+        assert_(isinstance(round(f, ndigits=-73), np.float64))
+        assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73)
+
+        i = np.int64(501)
+        assert_(isinstance(round(i, -2), np.int64))
+        assert_array_max_ulp(round(i, -2), 500)
+        assert_(isinstance(round(i, ndigits=-2), np.int64))
+        assert_array_max_ulp(round(i, ndigits=-2), 500)
+
+    @pytest.mark.xfail(raises=AssertionError, reason="gh-15896")
+    def test_round_py_consistency(self):
+        f = 5.1 * 10**73
+        assert_equal(round(np.float64(f), -73), round(f, -73))
+
+    def test_searchsorted(self):
+        arr = [-8, -5, -1, 3, 6, 10]
+        out = np.searchsorted(arr, 0)
+        assert_equal(out, 3)
+
+    def test_size(self):
+        A = [[1, 2, 3], [4, 5, 6]]
+        assert_(np.size(A) == 6)
+        assert_(np.size(A, 0) == 2)
+        assert_(np.size(A, 1) == 3)
+
+    def test_squeeze(self):
+        A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
+        assert_equal(np.squeeze(A).shape, (3, 3))
+        assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
+        assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
+        assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
+        assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
+        assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
+        assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
+        assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
+        assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
+
+    def test_std(self):
+        A = [[1, 2, 3], [4, 5, 6]]
+        assert_almost_equal(np.std(A), 1.707825127659933)
+        assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
+        assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
+
+        with warnings.catch_warnings(record=True) as w:
+            warnings.filterwarnings('always', '', RuntimeWarning)
+            assert_(np.isnan(np.std([])))
+            assert_(w[0].category is RuntimeWarning)
+
+    def test_swapaxes(self):
+        tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
+        a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
+        out = np.swapaxes(a, 0, 2)
+        assert_equal(out, tgt)
+
+    def test_sum(self):
+        m = [[1, 2, 3],
+             [4, 5, 6],
+             [7, 8, 9]]
+        tgt = [[6], [15], [24]]
+        out = np.sum(m, axis=1, keepdims=True)
+
+        assert_equal(tgt, out)
+
+    def test_take(self):
+        tgt = [2, 3, 5]
+        indices = [1, 2, 4]
+        a = [1, 2, 3, 4, 5]
+
+        out = np.take(a, indices)
+        assert_equal(out, tgt)
+
+    def test_trace(self):
+        c = [[1, 2], [3, 4], [5, 6]]
+        assert_equal(np.trace(c), 5)
+
+    def test_transpose(self):
+        arr = [[1, 2], [3, 4], [5, 6]]
+        tgt = [[1, 3, 5], [2, 4, 6]]
+        assert_equal(np.transpose(arr, (1, 0)), tgt)
+
+    def test_var(self):
+        A = [[1, 2, 3], [4, 5, 6]]
+        assert_almost_equal(np.var(A), 2.9166666666666665)
+        assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
+        assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
+
+        with warnings.catch_warnings(record=True) as w:
+            warnings.filterwarnings('always', '', RuntimeWarning)
+            assert_(np.isnan(np.var([])))
+            assert_(w[0].category is RuntimeWarning)
+
+        B = np.array([None, 0])
+        B[0] = 1j
+        assert_almost_equal(np.var(B), 0.25)
+
+
+class TestIsscalar:
+    def test_isscalar(self):
+        assert_(np.isscalar(3.1))
+        assert_(np.isscalar(np.int16(12345)))
+        assert_(np.isscalar(False))
+        assert_(np.isscalar('numpy'))
+        assert_(not np.isscalar([3.1]))
+        assert_(not np.isscalar(None))
+
+        # PEP 3141
+        from fractions import Fraction
+        assert_(np.isscalar(Fraction(5, 17)))
+        from numbers import Number
+        assert_(np.isscalar(Number()))
+
+
+class TestBoolScalar:
+    def test_logical(self):
+        f = np.False_
+        t = np.True_
+        s = "xyz"
+        assert_((t and s) is s)
+        assert_((f and s) is f)
+
+    def test_bitwise_or(self):
+        f = np.False_
+        t = np.True_
+        assert_((t | t) is t)
+        assert_((f | t) is t)
+        assert_((t | f) is t)
+        assert_((f | f) is f)
+
+    def test_bitwise_and(self):
+        f = np.False_
+        t = np.True_
+        assert_((t & t) is t)
+        assert_((f & t) is f)
+        assert_((t & f) is f)
+        assert_((f & f) is f)
+
+    def test_bitwise_xor(self):
+        f = np.False_
+        t = np.True_
+        assert_((t ^ t) is f)
+        assert_((f ^ t) is t)
+        assert_((t ^ f) is t)
+        assert_((f ^ f) is f)
+
+
+class TestBoolArray:
+    def setup_method(self):
+        # offset for simd tests
+        self.t = np.array([True] * 41, dtype=bool)[1::]
+        self.f = np.array([False] * 41, dtype=bool)[1::]
+        self.o = np.array([False] * 42, dtype=bool)[2::]
+        self.nm = self.f.copy()
+        self.im = self.t.copy()
+        self.nm[3] = True
+        self.nm[-2] = True
+        self.im[3] = False
+        self.im[-2] = False
+
+    def test_all_any(self):
+        assert_(self.t.all())
+        assert_(self.t.any())
+        assert_(not self.f.all())
+        assert_(not self.f.any())
+        assert_(self.nm.any())
+        assert_(self.im.any())
+        assert_(not self.nm.all())
+        assert_(not self.im.all())
+        # check bad element in all positions
+        for i in range(256 - 7):
+            d = np.array([False] * 256, dtype=bool)[7::]
+            d[i] = True
+            assert_(np.any(d))
+            e = np.array([True] * 256, dtype=bool)[7::]
+            e[i] = False
+            assert_(not np.all(e))
+            assert_array_equal(e, ~d)
+        # big array test for blocked libc loops
+        for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
+            d = np.array([False] * 100043, dtype=bool)
+            d[i] = True
+            assert_(np.any(d), msg="%r" % i)
+            e = np.array([True] * 100043, dtype=bool)
+            e[i] = False
+            assert_(not np.all(e), msg="%r" % i)
+
+    def test_logical_not_abs(self):
+        assert_array_equal(~self.t, self.f)
+        assert_array_equal(np.abs(~self.t), self.f)
+        assert_array_equal(np.abs(~self.f), self.t)
+        assert_array_equal(np.abs(self.f), self.f)
+        assert_array_equal(~np.abs(self.f), self.t)
+        assert_array_equal(~np.abs(self.t), self.f)
+        assert_array_equal(np.abs(~self.nm), self.im)
+        np.logical_not(self.t, out=self.o)
+        assert_array_equal(self.o, self.f)
+        np.abs(self.t, out=self.o)
+        assert_array_equal(self.o, self.t)
+
+    def test_logical_and_or_xor(self):
+        assert_array_equal(self.t | self.t, self.t)
+        assert_array_equal(self.f | self.f, self.f)
+        assert_array_equal(self.t | self.f, self.t)
+        assert_array_equal(self.f | self.t, self.t)
+        np.logical_or(self.t, self.t, out=self.o)
+        assert_array_equal(self.o, self.t)
+        assert_array_equal(self.t & self.t, self.t)
+        assert_array_equal(self.f & self.f, self.f)
+        assert_array_equal(self.t & self.f, self.f)
+        assert_array_equal(self.f & self.t, self.f)
+        np.logical_and(self.t, self.t, out=self.o)
+        assert_array_equal(self.o, self.t)
+        assert_array_equal(self.t ^ self.t, self.f)
+        assert_array_equal(self.f ^ self.f, self.f)
+        assert_array_equal(self.t ^ self.f, self.t)
+        assert_array_equal(self.f ^ self.t, self.t)
+        np.logical_xor(self.t, self.t, out=self.o)
+        assert_array_equal(self.o, self.f)
+
+        assert_array_equal(self.nm & self.t, self.nm)
+        assert_array_equal(self.im & self.f, False)
+        assert_array_equal(self.nm & True, self.nm)
+        assert_array_equal(self.im & False, self.f)
+        assert_array_equal(self.nm | self.t, self.t)
+        assert_array_equal(self.im | self.f, self.im)
+        assert_array_equal(self.nm | True, self.t)
+        assert_array_equal(self.im | False, self.im)
+        assert_array_equal(self.nm ^ self.t, self.im)
+        assert_array_equal(self.im ^ self.f, self.im)
+        assert_array_equal(self.nm ^ True, self.im)
+        assert_array_equal(self.im ^ False, self.im)
+
+
+class TestBoolCmp:
+    def setup_method(self):
+        self.f = np.ones(256, dtype=np.float32)
+        self.ef = np.ones(self.f.size, dtype=bool)
+        self.d = np.ones(128, dtype=np.float64)
+        self.ed = np.ones(self.d.size, dtype=bool)
+        # generate values for all permutation of 256bit simd vectors
+        s = 0
+        for i in range(32):
+            self.f[s:s+8] = [i & 2**x for x in range(8)]
+            self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
+            s += 8
+        s = 0
+        for i in range(16):
+            self.d[s:s+4] = [i & 2**x for x in range(4)]
+            self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
+            s += 4
+
+        self.nf = self.f.copy()
+        self.nd = self.d.copy()
+        self.nf[self.ef] = np.nan
+        self.nd[self.ed] = np.nan
+
+        self.inff = self.f.copy()
+        self.infd = self.d.copy()
+        self.inff[::3][self.ef[::3]] = np.inf
+        self.infd[::3][self.ed[::3]] = np.inf
+        self.inff[1::3][self.ef[1::3]] = -np.inf
+        self.infd[1::3][self.ed[1::3]] = -np.inf
+        self.inff[2::3][self.ef[2::3]] = np.nan
+        self.infd[2::3][self.ed[2::3]] = np.nan
+        self.efnonan = self.ef.copy()
+        self.efnonan[2::3] = False
+        self.ednonan = self.ed.copy()
+        self.ednonan[2::3] = False
+
+        self.signf = self.f.copy()
+        self.signd = self.d.copy()
+        self.signf[self.ef] *= -1.
+        self.signd[self.ed] *= -1.
+        self.signf[1::6][self.ef[1::6]] = -np.inf
+        self.signd[1::6][self.ed[1::6]] = -np.inf
+        self.signf[3::6][self.ef[3::6]] = -np.nan
+        self.signd[3::6][self.ed[3::6]] = -np.nan
+        self.signf[4::6][self.ef[4::6]] = -0.
+        self.signd[4::6][self.ed[4::6]] = -0.
+
+    def test_float(self):
+        # offset for alignment test
+        for i in range(4):
+            assert_array_equal(self.f[i:] > 0, self.ef[i:])
+            assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
+            assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
+            assert_array_equal(-self.f[i:] < 0, self.ef[i:])
+            assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
+            r = self.f[i:] != 0
+            assert_array_equal(r, self.ef[i:])
+            r2 = self.f[i:] != np.zeros_like(self.f[i:])
+            r3 = 0 != self.f[i:]
+            assert_array_equal(r, r2)
+            assert_array_equal(r, r3)
+            # check bool == 0x1
+            assert_array_equal(r.view(np.int8), r.astype(np.int8))
+            assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
+            assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
+
+            # isnan on amd64 takes the same code path
+            assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
+            assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
+            assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
+            assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
+            assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
+
+    def test_double(self):
+        # offset for alignment test
+        for i in range(2):
+            assert_array_equal(self.d[i:] > 0, self.ed[i:])
+            assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
+            assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
+            assert_array_equal(-self.d[i:] < 0, self.ed[i:])
+            assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
+            r = self.d[i:] != 0
+            assert_array_equal(r, self.ed[i:])
+            r2 = self.d[i:] != np.zeros_like(self.d[i:])
+            r3 = 0 != self.d[i:]
+            assert_array_equal(r, r2)
+            assert_array_equal(r, r3)
+            # check bool == 0x1
+            assert_array_equal(r.view(np.int8), r.astype(np.int8))
+            assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
+            assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
+
+            # isnan on amd64 takes the same code path
+            assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
+            assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
+            assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
+            assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
+            assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
+
+
+class TestSeterr:
+    def test_default(self):
+        err = np.geterr()
+        assert_equal(err,
+                     dict(divide='warn',
+                          invalid='warn',
+                          over='warn',
+                          under='ignore')
+                     )
+
+    def test_set(self):
+        with np.errstate():
+            err = np.seterr()
+            old = np.seterr(divide='print')
+            assert_(err == old)
+            new = np.seterr()
+            assert_(new['divide'] == 'print')
+            np.seterr(over='raise')
+            assert_(np.geterr()['over'] == 'raise')
+            assert_(new['divide'] == 'print')
+            np.seterr(**old)
+            assert_(np.geterr() == old)
+
+    @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+    @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
+    def test_divide_err(self):
+        with np.errstate(divide='raise'):
+            with assert_raises(FloatingPointError):
+                np.array([1.]) / np.array([0.])
+
+            np.seterr(divide='ignore')
+            np.array([1.]) / np.array([0.])
+
+    @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+    def test_errobj(self):
+        olderrobj = np.geterrobj()
+        self.called = 0
+        try:
+            with warnings.catch_warnings(record=True) as w:
+                warnings.simplefilter("always")
+                with np.errstate(divide='warn'):
+                    np.seterrobj([20000, 1, None])
+                    np.array([1.]) / np.array([0.])
+                    assert_equal(len(w), 1)
+
+            def log_err(*args):
+                self.called += 1
+                extobj_err = args
+                assert_(len(extobj_err) == 2)
+                assert_("divide" in extobj_err[0])
+
+            with np.errstate(divide='ignore'):
+                np.seterrobj([20000, 3, log_err])
+                np.array([1.]) / np.array([0.])
+            assert_equal(self.called, 1)
+
+            np.seterrobj(olderrobj)
+            with np.errstate(divide='ignore'):
+                np.divide(1., 0., extobj=[20000, 3, log_err])
+            assert_equal(self.called, 2)
+        finally:
+            np.seterrobj(olderrobj)
+            del self.called
+
+    def test_errobj_noerrmask(self):
+        # errmask = 0 has a special code path for the default
+        olderrobj = np.geterrobj()
+        try:
+            # set errobj to something non default
+            np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
+                         umath.ERR_DEFAULT + 1, None])
+            # call a ufunc
+            np.isnan(np.array([6]))
+            # same with the default, lots of times to get rid of possible
+            # pre-existing stack in the code
+            for i in range(10000):
+                np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
+                             None])
+            np.isnan(np.array([6]))
+        finally:
+            np.seterrobj(olderrobj)
+
+
+class TestFloatExceptions:
+    def assert_raises_fpe(self, fpeerr, flop, x, y):
+        ftype = type(x)
+        try:
+            flop(x, y)
+            assert_(False,
+                    "Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
+        except FloatingPointError as exc:
+            assert_(str(exc).find(fpeerr) >= 0,
+                    "Type %s raised wrong fpe error '%s'." % (ftype, exc))
+
+    def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
+        # Check that fpe exception is raised.
+        #
+        # Given a floating operation `flop` and two scalar values, check that
+        # the operation raises the floating point exception specified by
+        # `fpeerr`. Tests all variants with 0-d array scalars as well.
+
+        self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
+        self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
+        self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
+        self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
+
+    # Test for all real and complex float types
+    @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+    @pytest.mark.parametrize("typecode", np.typecodes["AllFloat"])
+    def test_floating_exceptions(self, typecode):
+        if 'bsd' in sys.platform and typecode in 'gG':
+            pytest.skip(reason="Fallback impl for (c)longdouble may not raise "
+                               "FPE errors as expected on BSD OSes, "
+                               "see gh-24876, gh-23379")
+
+        # Test basic arithmetic function errors
+        with np.errstate(all='raise'):
+            ftype = np.obj2sctype(typecode)
+            if np.dtype(ftype).kind == 'f':
+                # Get some extreme values for the type
+                fi = np.finfo(ftype)
+                ft_tiny = fi._machar.tiny
+                ft_max = fi.max
+                ft_eps = fi.eps
+                underflow = 'underflow'
+                divbyzero = 'divide by zero'
+            else:
+                # 'c', complex, corresponding real dtype
+                rtype = type(ftype(0).real)
+                fi = np.finfo(rtype)
+                ft_tiny = ftype(fi._machar.tiny)
+                ft_max = ftype(fi.max)
+                ft_eps = ftype(fi.eps)
+                # The complex types raise different exceptions
+                underflow = ''
+                divbyzero = ''
+            overflow = 'overflow'
+            invalid = 'invalid'
+
+            # The value of tiny for double double is NaN, so we need to
+            # pass the assert
+            if not np.isnan(ft_tiny):
+                self.assert_raises_fpe(underflow,
+                                    lambda a, b: a/b, ft_tiny, ft_max)
+                self.assert_raises_fpe(underflow,
+                                    lambda a, b: a*b, ft_tiny, ft_tiny)
+            self.assert_raises_fpe(overflow,
+                                   lambda a, b: a*b, ft_max, ftype(2))
+            self.assert_raises_fpe(overflow,
+                                   lambda a, b: a/b, ft_max, ftype(0.5))
+            self.assert_raises_fpe(overflow,
+                                   lambda a, b: a+b, ft_max, ft_max*ft_eps)
+            self.assert_raises_fpe(overflow,
+                                   lambda a, b: a-b, -ft_max, ft_max*ft_eps)
+            self.assert_raises_fpe(overflow,
+                                   np.power, ftype(2), ftype(2**fi.nexp))
+            self.assert_raises_fpe(divbyzero,
+                                   lambda a, b: a/b, ftype(1), ftype(0))
+            self.assert_raises_fpe(
+                invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf)
+            )
+            self.assert_raises_fpe(invalid,
+                                   lambda a, b: a/b, ftype(0), ftype(0))
+            self.assert_raises_fpe(
+                invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf)
+            )
+            self.assert_raises_fpe(
+                invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)
+            )
+            self.assert_raises_fpe(invalid,
+                                   lambda a, b: a*b, ftype(0), ftype(np.inf))
+
+    @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+    def test_warnings(self):
+        # test warning code path
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            with np.errstate(all="warn"):
+                np.divide(1, 0.)
+                assert_equal(len(w), 1)
+                assert_("divide by zero" in str(w[0].message))
+                np.array(1e300) * np.array(1e300)
+                assert_equal(len(w), 2)
+                assert_("overflow" in str(w[-1].message))
+                np.array(np.inf) - np.array(np.inf)
+                assert_equal(len(w), 3)
+                assert_("invalid value" in str(w[-1].message))
+                np.array(1e-300) * np.array(1e-300)
+                assert_equal(len(w), 4)
+                assert_("underflow" in str(w[-1].message))
+
+
+class TestTypes:
+    def check_promotion_cases(self, promote_func):
+        # tests that the scalars get coerced correctly.
+        b = np.bool_(0)
+        i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
+        u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
+        f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
+        c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
+
+        # coercion within the same kind
+        assert_equal(promote_func(i8, i16), np.dtype(np.int16))
+        assert_equal(promote_func(i32, i8), np.dtype(np.int32))
+        assert_equal(promote_func(i16, i64), np.dtype(np.int64))
+        assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
+        assert_equal(promote_func(f32, f64), np.dtype(np.float64))
+        assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
+        assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
+        assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
+        assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
+        assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
+
+        # coercion between kinds
+        assert_equal(promote_func(b, i32), np.dtype(np.int32))
+        assert_equal(promote_func(b, u8), np.dtype(np.uint8))
+        assert_equal(promote_func(i8, u8), np.dtype(np.int16))
+        assert_equal(promote_func(u8, i32), np.dtype(np.int32))
+        assert_equal(promote_func(i64, u32), np.dtype(np.int64))
+        assert_equal(promote_func(u64, i32), np.dtype(np.float64))
+        assert_equal(promote_func(i32, f32), np.dtype(np.float64))
+        assert_equal(promote_func(i64, f32), np.dtype(np.float64))
+        assert_equal(promote_func(f32, i16), np.dtype(np.float32))
+        assert_equal(promote_func(f32, u32), np.dtype(np.float64))
+        assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
+        assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
+        assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
+
+        # coercion between scalars and 1-D arrays
+        assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
+        assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
+        assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
+        assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
+        assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
+        assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
+        assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
+        assert_equal(promote_func(np.int32(-1), np.array([u64])),
+                     np.dtype(np.float64))
+        assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
+        assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
+        assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
+        assert_equal(promote_func(fld, np.array([c64])),
+                     np.dtype(np.complex64))
+        assert_equal(promote_func(c64, np.array([f64])),
+                     np.dtype(np.complex128))
+        assert_equal(promote_func(np.complex64(3j), np.array([f64])),
+                     np.dtype(np.complex128))
+
+        # coercion between scalars and 1-D arrays, where
+        # the scalar has greater kind than the array
+        assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
+        assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
+        assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
+        assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
+        assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
+
+        # uint and int are treated as the same "kind" for
+        # the purposes of array-scalar promotion.
+        assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
+
+        # float and complex are treated as the same "kind" for
+        # the purposes of array-scalar promotion, so that you can do
+        # (0j + float32array) to get a complex64 array instead of
+        # a complex128 array.
+        assert_equal(promote_func(np.array([f32]), c128),
+                     np.dtype(np.complex64))
+
+    def test_coercion(self):
+        def res_type(a, b):
+            return np.add(a, b).dtype
+
+        self.check_promotion_cases(res_type)
+
+        # Use-case: float/complex scalar * bool/int8 array
+        #           shouldn't narrow the float/complex type
+        for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
+            b = 1.234 * a
+            assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
+            b = np.longdouble(1.234) * a
+            assert_equal(b.dtype, np.dtype(np.longdouble),
+                         "array type %s" % a.dtype)
+            b = np.float64(1.234) * a
+            assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
+            b = np.float32(1.234) * a
+            assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
+            b = np.float16(1.234) * a
+            assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
+
+            b = 1.234j * a
+            assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
+            b = np.clongdouble(1.234j) * a
+            assert_equal(b.dtype, np.dtype(np.clongdouble),
+                         "array type %s" % a.dtype)
+            b = np.complex128(1.234j) * a
+            assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
+            b = np.complex64(1.234j) * a
+            assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
+
+        # The following use-case is problematic, and to resolve its
+        # tricky side-effects requires more changes.
+        #
+        # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
+        #            a float32, shouldn't promote to float64
+        #
+        # a = np.array([1.0, 1.5], dtype=np.float32)
+        # t = np.array([True, False])
+        # b = t*a
+        # assert_equal(b, [1.0, 0.0])
+        # assert_equal(b.dtype, np.dtype('f4'))
+        # b = (1-t)*a
+        # assert_equal(b, [0.0, 1.5])
+        # assert_equal(b.dtype, np.dtype('f4'))
+        #
+        # Probably ~t (bitwise negation) is more proper to use here,
+        # but this is arguably less intuitive to understand at a glance, and
+        # would fail if 't' is actually an integer array instead of boolean:
+        #
+        # b = (~t)*a
+        # assert_equal(b, [0.0, 1.5])
+        # assert_equal(b.dtype, np.dtype('f4'))
+
+    def test_result_type(self):
+        self.check_promotion_cases(np.result_type)
+        assert_(np.result_type(None) == np.dtype(None))
+
+    def test_promote_types_endian(self):
+        # promote_types should always return native-endian types
+        assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8'))
+
+        assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
+        assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21'))
+        assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8'))
+        assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8'))
+        assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8'))
+
+        assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8'))
+        assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8'))
+
+    def test_can_cast_and_promote_usertypes(self):
+        # The rational type defines safe casting for signed integers,
+        # boolean. Rational itself *does* cast safely to double.
+        # (rational does not actually cast to all signed integers, e.g.
+        # int64 can be both long and longlong and it registers only the first)
+        valid_types = ["int8", "int16", "int32", "int64", "bool"]
+        invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V"
+
+        rational_dt = np.dtype(rational)
+        for numpy_dtype in valid_types:
+            numpy_dtype = np.dtype(numpy_dtype)
+            assert np.can_cast(numpy_dtype, rational_dt)
+            assert np.promote_types(numpy_dtype, rational_dt) is rational_dt
+
+        for numpy_dtype in invalid_types:
+            numpy_dtype = np.dtype(numpy_dtype)
+            assert not np.can_cast(numpy_dtype, rational_dt)
+            with pytest.raises(TypeError):
+                np.promote_types(numpy_dtype, rational_dt)
+
+        double_dt = np.dtype("double")
+        assert np.can_cast(rational_dt, double_dt)
+        assert np.promote_types(double_dt, rational_dt) is double_dt
+
+    @pytest.mark.parametrize("swap", ["", "swap"])
+    @pytest.mark.parametrize("string_dtype", ["U", "S"])
+    def test_promote_types_strings(self, swap, string_dtype):
+        if swap == "swap":
+            promote_types = lambda a, b: np.promote_types(b, a)
+        else:
+            promote_types = np.promote_types
+
+        S = string_dtype
+
+        # Promote numeric with unsized string:
+        assert_equal(promote_types('bool', S), np.dtype(S+'5'))
+        assert_equal(promote_types('b', S), np.dtype(S+'4'))
+        assert_equal(promote_types('u1', S), np.dtype(S+'3'))
+        assert_equal(promote_types('u2', S), np.dtype(S+'5'))
+        assert_equal(promote_types('u4', S), np.dtype(S+'10'))
+        assert_equal(promote_types('u8', S), np.dtype(S+'20'))
+        assert_equal(promote_types('i1', S), np.dtype(S+'4'))
+        assert_equal(promote_types('i2', S), np.dtype(S+'6'))
+        assert_equal(promote_types('i4', S), np.dtype(S+'11'))
+        assert_equal(promote_types('i8', S), np.dtype(S+'21'))
+        # Promote numeric with sized string:
+        assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
+        assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
+        assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
+        assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
+        assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
+        assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
+        assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
+        assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
+        assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
+        assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
+        assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
+        assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
+        # Promote with object:
+        assert_equal(promote_types('O', S+'30'), np.dtype('O'))
+
+    @pytest.mark.parametrize(["dtype1", "dtype2"],
+            [[np.dtype("V6"), np.dtype("V10")],  # mismatch shape
+             # Mismatching names:
+             [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
+            ])
+    def test_invalid_void_promotion(self, dtype1, dtype2):
+        with pytest.raises(TypeError):
+            np.promote_types(dtype1, dtype2)
+
+    @pytest.mark.parametrize(["dtype1", "dtype2"],
+            [[np.dtype("V10"), np.dtype("V10")],
+             [np.dtype([("name1", "i8")]),
+              np.dtype([("name1", np.dtype("i8").newbyteorder())])],
+             [np.dtype("i8,i8"), np.dtype("i8,>i8")],
+             [np.dtype("i8,i8"), np.dtype("i4,i4")],
+            ])
+    def test_valid_void_promotion(self, dtype1, dtype2):
+        assert np.promote_types(dtype1, dtype2) == dtype1
+
+    @pytest.mark.parametrize("dtype",
+            list(np.typecodes["All"]) +
+            ["i,i", "10i", "S3", "S100", "U3", "U100", rational])
+    def test_promote_identical_types_metadata(self, dtype):
+        # The same type passed in twice to promote types always
+        # preserves metadata
+        metadata = {1: 1}
+        dtype = np.dtype(dtype, metadata=metadata)
+
+        res = np.promote_types(dtype, dtype)
+        assert res.metadata == dtype.metadata
+
+        # byte-swapping preserves and makes the dtype native:
+        dtype = dtype.newbyteorder()
+        if dtype.isnative:
+            # The type does not have byte swapping
+            return
+
+        res = np.promote_types(dtype, dtype)
+
+        # Metadata is (currently) generally lost on byte-swapping (except for
+        # unicode.
+        if dtype.char != "U":
+            assert res.metadata is None
+        else:
+            assert res.metadata == metadata
+        assert res.isnative
+
+    @pytest.mark.slow
+    @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning')
+    @pytest.mark.parametrize(["dtype1", "dtype2"],
+            itertools.product(
+                list(np.typecodes["All"]) +
+                ["i,i", "S3", "S100", "U3", "U100", rational],
+                repeat=2))
+    def test_promote_types_metadata(self, dtype1, dtype2):
+        """Metadata handling in promotion does not appear formalized
+        right now in NumPy. This test should thus be considered to
+        document behaviour, rather than test the correct definition of it.
+
+        This test is very ugly, it was useful for rewriting part of the
+        promotion, but probably should eventually be replaced/deleted
+        (i.e. when metadata handling in promotion is better defined).
+        """
+        metadata1 = {1: 1}
+        metadata2 = {2: 2}
+        dtype1 = np.dtype(dtype1, metadata=metadata1)
+        dtype2 = np.dtype(dtype2, metadata=metadata2)
+
+        try:
+            res = np.promote_types(dtype1, dtype2)
+        except TypeError:
+            # Promotion failed, this test only checks metadata
+            return
+
+        if res.char not in "USV" or res.names is not None or res.shape != ():
+            # All except string dtypes (and unstructured void) lose metadata
+            # on promotion (unless both dtypes are identical).
+            # At some point structured ones did not, but were restrictive.
+            assert res.metadata is None
+        elif res == dtype1:
+            # If one result is the result, it is usually returned unchanged:
+            assert res is dtype1
+        elif res == dtype2:
+            # dtype1 may have been cast to the same type/kind as dtype2.
+            # If the resulting dtype is identical we currently pick the cast
+            # version of dtype1, which lost the metadata:
+            if np.promote_types(dtype1, dtype2.kind) == dtype2:
+                res.metadata is None
+            else:
+                res.metadata == metadata2
+        else:
+            assert res.metadata is None
+
+        # Try again for byteswapped version
+        dtype1 = dtype1.newbyteorder()
+        assert dtype1.metadata == metadata1
+        res_bs = np.promote_types(dtype1, dtype2)
+        assert res_bs == res
+        assert res_bs.metadata == res.metadata
+
+    def test_can_cast(self):
+        assert_(np.can_cast(np.int32, np.int64))
+        assert_(np.can_cast(np.float64, complex))
+        assert_(not np.can_cast(complex, float))
+
+        assert_(np.can_cast('i8', 'f8'))
+        assert_(not np.can_cast('i8', 'f4'))
+        assert_(np.can_cast('i4', 'S11'))
+
+        assert_(np.can_cast('i8', 'i8', 'no'))
+        assert_(not np.can_cast('i8', 'no'))
+
+        assert_(np.can_cast('i8', 'equiv'))
+        assert_(not np.can_cast('i8', 'equiv'))
+
+        assert_(np.can_cast('i8', 'safe'))
+        assert_(not np.can_cast('i4', 'safe'))
+
+        assert_(np.can_cast('i4', 'same_kind'))
+        assert_(not np.can_cast('u4', 'same_kind'))
+
+        assert_(np.can_cast('u4', 'unsafe'))
+
+        assert_(np.can_cast('bool', 'S5'))
+        assert_(not np.can_cast('bool', 'S4'))
+
+        assert_(np.can_cast('b', 'S4'))
+        assert_(not np.can_cast('b', 'S3'))
+
+        assert_(np.can_cast('u1', 'S3'))
+        assert_(not np.can_cast('u1', 'S2'))
+        assert_(np.can_cast('u2', 'S5'))
+        assert_(not np.can_cast('u2', 'S4'))
+        assert_(np.can_cast('u4', 'S10'))
+        assert_(not np.can_cast('u4', 'S9'))
+        assert_(np.can_cast('u8', 'S20'))
+        assert_(not np.can_cast('u8', 'S19'))
+
+        assert_(np.can_cast('i1', 'S4'))
+        assert_(not np.can_cast('i1', 'S3'))
+        assert_(np.can_cast('i2', 'S6'))
+        assert_(not np.can_cast('i2', 'S5'))
+        assert_(np.can_cast('i4', 'S11'))
+        assert_(not np.can_cast('i4', 'S10'))
+        assert_(np.can_cast('i8', 'S21'))
+        assert_(not np.can_cast('i8', 'S20'))
+
+        assert_(np.can_cast('bool', 'S5'))
+        assert_(not np.can_cast('bool', 'S4'))
+
+        assert_(np.can_cast('b', 'U4'))
+        assert_(not np.can_cast('b', 'U3'))
+
+        assert_(np.can_cast('u1', 'U3'))
+        assert_(not np.can_cast('u1', 'U2'))
+        assert_(np.can_cast('u2', 'U5'))
+        assert_(not np.can_cast('u2', 'U4'))
+        assert_(np.can_cast('u4', 'U10'))
+        assert_(not np.can_cast('u4', 'U9'))
+        assert_(np.can_cast('u8', 'U20'))
+        assert_(not np.can_cast('u8', 'U19'))
+
+        assert_(np.can_cast('i1', 'U4'))
+        assert_(not np.can_cast('i1', 'U3'))
+        assert_(np.can_cast('i2', 'U6'))
+        assert_(not np.can_cast('i2', 'U5'))
+        assert_(np.can_cast('i4', 'U11'))
+        assert_(not np.can_cast('i4', 'U10'))
+        assert_(np.can_cast('i8', 'U21'))
+        assert_(not np.can_cast('i8', 'U20'))
+
+        assert_raises(TypeError, np.can_cast, 'i4', None)
+        assert_raises(TypeError, np.can_cast, None, 'i4')
+
+        # Also test keyword arguments
+        assert_(np.can_cast(from_=np.int32, to=np.int64))
+
+    def test_can_cast_simple_to_structured(self):
+        # Non-structured can only be cast to structured in 'unsafe' mode.
+        assert_(not np.can_cast('i4', 'i4,i4'))
+        assert_(not np.can_cast('i4', 'i4,i2'))
+        assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
+        assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
+        # Even if there is just a single field which is OK.
+        assert_(not np.can_cast('i2', [('f1', 'i4')]))
+        assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
+        assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
+        # It should be the same for recursive structured or subarrays.
+        assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
+        assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
+        assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
+        assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
+
+    def test_can_cast_structured_to_simple(self):
+        # Need unsafe casting for structured to simple.
+        assert_(not np.can_cast([('f1', 'i4')], 'i4'))
+        assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
+        assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
+        # Since it is unclear what is being cast, multiple fields to
+        # single should not work even for unsafe casting.
+        assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
+        # But a single field inside a single field is OK.
+        assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
+        assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
+        # And a subarray is fine too - it will just take the first element
+        # (arguably not very consistently; might also take the first field).
+        assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
+        assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
+        # But a structured subarray with multiple fields should fail.
+        assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
+                                casting='unsafe'))
+
+    def test_can_cast_values(self):
+        # gh-5917
+        for dt in np.sctypes['int'] + np.sctypes['uint']:
+            ii = np.iinfo(dt)
+            assert_(np.can_cast(ii.min, dt))
+            assert_(np.can_cast(ii.max, dt))
+            assert_(not np.can_cast(ii.min - 1, dt))
+            assert_(not np.can_cast(ii.max + 1, dt))
+
+        for dt in np.sctypes['float']:
+            fi = np.finfo(dt)
+            assert_(np.can_cast(fi.min, dt))
+            assert_(np.can_cast(fi.max, dt))
+
+
+# Custom exception class to test exception propagation in fromiter
+class NIterError(Exception):
+    pass
+
+
+class TestFromiter:
+    def makegen(self):
+        return (x**2 for x in range(24))
+
+    def test_types(self):
+        ai32 = np.fromiter(self.makegen(), np.int32)
+        ai64 = np.fromiter(self.makegen(), np.int64)
+        af = np.fromiter(self.makegen(), float)
+        assert_(ai32.dtype == np.dtype(np.int32))
+        assert_(ai64.dtype == np.dtype(np.int64))
+        assert_(af.dtype == np.dtype(float))
+
+    def test_lengths(self):
+        expected = np.array(list(self.makegen()))
+        a = np.fromiter(self.makegen(), int)
+        a20 = np.fromiter(self.makegen(), int, 20)
+        assert_(len(a) == len(expected))
+        assert_(len(a20) == 20)
+        assert_raises(ValueError, np.fromiter,
+                          self.makegen(), int, len(expected) + 10)
+
+    def test_values(self):
+        expected = np.array(list(self.makegen()))
+        a = np.fromiter(self.makegen(), int)
+        a20 = np.fromiter(self.makegen(), int, 20)
+        assert_(np.all(a == expected, axis=0))
+        assert_(np.all(a20 == expected[:20], axis=0))
+
+    def load_data(self, n, eindex):
+        # Utility method for the issue 2592 tests.
+        # Raise an exception at the desired index in the iterator.
+        for e in range(n):
+            if e == eindex:
+                raise NIterError('error at index %s' % eindex)
+            yield e
+
+    @pytest.mark.parametrize("dtype", [int, object])
+    @pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)])
+    def test_2592(self, count, error_index, dtype):
+        # Test iteration exceptions are correctly raised. The data/generator
+        # has `count` elements but errors at `error_index`
+        iterable = self.load_data(count, error_index)
+        with pytest.raises(NIterError):
+            np.fromiter(iterable, dtype=dtype, count=count)
+
+    @pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"])
+    def test_empty_not_structured(self, dtype):
+        # Note, "S0" could be allowed at some point, so long "S" (without
+        # any length) is rejected.
+        with pytest.raises(ValueError, match="Must specify length"):
+            np.fromiter([], dtype=dtype)
+
+    @pytest.mark.parametrize(["dtype", "data"],
+            [("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
+             ("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
+             ("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
+             # subarray dtypes (important because their dimensions end up
+             # in the result arrays dimension:
+             ("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
+             (np.dtype(("O", (2, 3))),
+              [((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])])
+    @pytest.mark.parametrize("length_hint", [0, 1])
+    def test_growth_and_complicated_dtypes(self, dtype, data, length_hint):
+        dtype = np.dtype(dtype)
+
+        data = data * 100  # make sure we realloc a bit
+
+        class MyIter:
+            # Class/example from gh-15789
+            def __length_hint__(self):
+                # only required to be an estimate, this is legal
+                return length_hint  # 0 or 1
+
+            def __iter__(self):
+                return iter(data)
+
+        res = np.fromiter(MyIter(), dtype=dtype)
+        expected = np.array(data, dtype=dtype)
+
+        assert_array_equal(res, expected)
+
+    def test_empty_result(self):
+        class MyIter:
+            def __length_hint__(self):
+                return 10
+
+            def __iter__(self):
+                return iter([])  # actual iterator is empty.
+
+        res = np.fromiter(MyIter(), dtype="d")
+        assert res.shape == (0,)
+        assert res.dtype == "d"
+
+    def test_too_few_items(self):
+        msg = "iterator too short: Expected 10 but iterator had only 3 items."
+        with pytest.raises(ValueError, match=msg):
+            np.fromiter([1, 2, 3], count=10, dtype=int)
+
+    def test_failed_itemsetting(self):
+        with pytest.raises(TypeError):
+            np.fromiter([1, None, 3], dtype=int)
+
+        # The following manages to hit somewhat trickier code paths:
+        iterable = ((2, 3, 4) for i in range(5))
+        with pytest.raises(ValueError):
+            np.fromiter(iterable, dtype=np.dtype((int, 2)))
+
+class TestNonzero:
+    def test_nonzero_trivial(self):
+        assert_equal(np.count_nonzero(np.array([])), 0)
+        assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
+        assert_equal(np.nonzero(np.array([])), ([],))
+
+        assert_equal(np.count_nonzero(np.array([0])), 0)
+        assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)
+        assert_equal(np.nonzero(np.array([0])), ([],))
+
+        assert_equal(np.count_nonzero(np.array([1])), 1)
+        assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)
+        assert_equal(np.nonzero(np.array([1])), ([0],))
+
+    def test_nonzero_zerod(self):
+        assert_equal(np.count_nonzero(np.array(0)), 0)
+        assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.nonzero(np.array(0)), ([],))
+
+        assert_equal(np.count_nonzero(np.array(1)), 1)
+        assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
+        with assert_warns(DeprecationWarning):
+            assert_equal(np.nonzero(np.array(1)), ([0],))
+
+    def test_nonzero_onedim(self):
+        x = np.array([1, 0, 2, -1, 0, 0, 8])
+        assert_equal(np.count_nonzero(x), 4)
+        assert_equal(np.count_nonzero(x), 4)
+        assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
+
+        # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
+        #              dtype=[('a', 'i4'), ('b', 'i2')])
+        x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)],
+                     dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')])
+        assert_equal(np.count_nonzero(x['a']), 3)
+        assert_equal(np.count_nonzero(x['b']), 4)
+        assert_equal(np.count_nonzero(x['c']), 3)
+        assert_equal(np.count_nonzero(x['d']), 4)
+        assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
+        assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
+
+    def test_nonzero_twodim(self):
+        x = np.array([[0, 1, 0], [2, 0, 3]])
+        assert_equal(np.count_nonzero(x.astype('i1')), 3)
+        assert_equal(np.count_nonzero(x.astype('i2')), 3)
+        assert_equal(np.count_nonzero(x.astype('i4')), 3)
+        assert_equal(np.count_nonzero(x.astype('i8')), 3)
+        assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
+
+        x = np.eye(3)
+        assert_equal(np.count_nonzero(x.astype('i1')), 3)
+        assert_equal(np.count_nonzero(x.astype('i2')), 3)
+        assert_equal(np.count_nonzero(x.astype('i4')), 3)
+        assert_equal(np.count_nonzero(x.astype('i8')), 3)
+        assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
+
+        x = np.array([[(0, 1), (0, 0), (1, 11)],
+                   [(1, 1), (1, 0), (0, 0)],
+                   [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
+        assert_equal(np.count_nonzero(x['a']), 4)
+        assert_equal(np.count_nonzero(x['b']), 5)
+        assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
+        assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
+
+        assert_(not x['a'].T.flags.aligned)
+        assert_equal(np.count_nonzero(x['a'].T), 4)
+        assert_equal(np.count_nonzero(x['b'].T), 5)
+        assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
+        assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
+
+    def test_sparse(self):
+        # test special sparse condition boolean code path
+        for i in range(20):
+            c = np.zeros(200, dtype=bool)
+            c[i::20] = True
+            assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
+
+            c = np.zeros(400, dtype=bool)
+            c[10 + i:20 + i] = True
+            c[20 + i*2] = True
+            assert_equal(np.nonzero(c)[0],
+                         np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
+
+    def test_return_type(self):
+        class C(np.ndarray):
+            pass
+
+        for view in (C, np.ndarray):
+            for nd in range(1, 4):
+                shape = tuple(range(2, 2+nd))
+                x = np.arange(np.prod(shape)).reshape(shape).view(view)
+                for nzx in (np.nonzero(x), x.nonzero()):
+                    for nzx_i in nzx:
+                        assert_(type(nzx_i) is np.ndarray)
+                        assert_(nzx_i.flags.writeable)
+
+    def test_count_nonzero_axis(self):
+        # Basic check of functionality
+        m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
+
+        expected = np.array([1, 1, 1, 1, 1])
+        assert_equal(np.count_nonzero(m, axis=0), expected)
+
+        expected = np.array([2, 3])
+        assert_equal(np.count_nonzero(m, axis=1), expected)
+
+        assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
+        assert_raises(TypeError, np.count_nonzero, m, axis='foo')
+        assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
+        assert_raises(TypeError, np.count_nonzero,
+                      m, axis=np.array([[1], [2]]))
+
+    def test_count_nonzero_axis_all_dtypes(self):
+        # More thorough test that the axis argument is respected
+        # for all dtypes and responds correctly when presented with
+        # either integer or tuple arguments for axis
+        msg = "Mismatch for dtype: %s"
+
+        def assert_equal_w_dt(a, b, err_msg):
+            assert_equal(a.dtype, b.dtype, err_msg=err_msg)
+            assert_equal(a, b, err_msg=err_msg)
+
+        for dt in np.typecodes['All']:
+            err_msg = msg % (np.dtype(dt).name,)
+
+            if dt != 'V':
+                if dt != 'M':
+                    m = np.zeros((3, 3), dtype=dt)
+                    n = np.ones(1, dtype=dt)
+
+                    m[0, 0] = n[0]
+                    m[1, 0] = n[0]
+
+                else:  # np.zeros doesn't work for np.datetime64
+                    m = np.array(['1970-01-01'] * 9)
+                    m = m.reshape((3, 3))
+
+                    m[0, 0] = '1970-01-12'
+                    m[1, 0] = '1970-01-12'
+                    m = m.astype(dt)
+
+                expected = np.array([2, 0, 0], dtype=np.intp)
+                assert_equal_w_dt(np.count_nonzero(m, axis=0),
+                                  expected, err_msg=err_msg)
+
+                expected = np.array([1, 1, 0], dtype=np.intp)
+                assert_equal_w_dt(np.count_nonzero(m, axis=1),
+                                  expected, err_msg=err_msg)
+
+                expected = np.array(2)
+                assert_equal(np.count_nonzero(m, axis=(0, 1)),
+                             expected, err_msg=err_msg)
+                assert_equal(np.count_nonzero(m, axis=None),
+                             expected, err_msg=err_msg)
+                assert_equal(np.count_nonzero(m),
+                             expected, err_msg=err_msg)
+
+            if dt == 'V':
+                # There are no 'nonzero' objects for np.void, so the testing
+                # setup is slightly different for this dtype
+                m = np.array([np.void(1)] * 6).reshape((2, 3))
+
+                expected = np.array([0, 0, 0], dtype=np.intp)
+                assert_equal_w_dt(np.count_nonzero(m, axis=0),
+                                  expected, err_msg=err_msg)
+
+                expected = np.array([0, 0], dtype=np.intp)
+                assert_equal_w_dt(np.count_nonzero(m, axis=1),
+                                  expected, err_msg=err_msg)
+
+                expected = np.array(0)
+                assert_equal(np.count_nonzero(m, axis=(0, 1)),
+                             expected, err_msg=err_msg)
+                assert_equal(np.count_nonzero(m, axis=None),
+                             expected, err_msg=err_msg)
+                assert_equal(np.count_nonzero(m),
+                             expected, err_msg=err_msg)
+
+    def test_count_nonzero_axis_consistent(self):
+        # Check that the axis behaviour for valid axes in
+        # non-special cases is consistent (and therefore
+        # correct) by checking it against an integer array
+        # that is then casted to the generic object dtype
+        from itertools import combinations, permutations
+
+        axis = (0, 1, 2, 3)
+        size = (5, 5, 5, 5)
+        msg = "Mismatch for axis: %s"
+
+        rng = np.random.RandomState(1234)
+        m = rng.randint(-100, 100, size=size)
+        n = m.astype(object)
+
+        for length in range(len(axis)):
+            for combo in combinations(axis, length):
+                for perm in permutations(combo):
+                    assert_equal(
+                        np.count_nonzero(m, axis=perm),
+                        np.count_nonzero(n, axis=perm),
+                        err_msg=msg % (perm,))
+
+    def test_countnonzero_axis_empty(self):
+        a = np.array([[0, 0, 1], [1, 0, 1]])
+        assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
+
+    def test_countnonzero_keepdims(self):
+        a = np.array([[0, 0, 1, 0],
+                      [0, 3, 5, 0],
+                      [7, 9, 2, 0]])
+        assert_equal(np.count_nonzero(a, axis=0, keepdims=True),
+                     [[1, 2, 3, 0]])
+        assert_equal(np.count_nonzero(a, axis=1, keepdims=True),
+                     [[1], [2], [3]])
+        assert_equal(np.count_nonzero(a, keepdims=True),
+                     [[6]])
+
+    def test_array_method(self):
+        # Tests that the array method
+        # call to nonzero works
+        m = np.array([[1, 0, 0], [4, 0, 6]])
+        tgt = [[0, 1, 1], [0, 0, 2]]
+
+        assert_equal(m.nonzero(), tgt)
+
+    def test_nonzero_invalid_object(self):
+        # gh-9295
+        a = np.array([np.array([1, 2]), 3], dtype=object)
+        assert_raises(ValueError, np.nonzero, a)
+
+        class BoolErrors:
+            def __bool__(self):
+                raise ValueError("Not allowed")
+
+        assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
+
+    def test_nonzero_sideeffect_safety(self):
+        # gh-13631
+        class FalseThenTrue:
+            _val = False
+            def __bool__(self):
+                try:
+                    return self._val
+                finally:
+                    self._val = True
+
+        class TrueThenFalse:
+            _val = True
+            def __bool__(self):
+                try:
+                    return self._val
+                finally:
+                    self._val = False
+
+        # result grows on the second pass
+        a = np.array([True, FalseThenTrue()])
+        assert_raises(RuntimeError, np.nonzero, a)
+
+        a = np.array([[True], [FalseThenTrue()]])
+        assert_raises(RuntimeError, np.nonzero, a)
+
+        # result shrinks on the second pass
+        a = np.array([False, TrueThenFalse()])
+        assert_raises(RuntimeError, np.nonzero, a)
+
+        a = np.array([[False], [TrueThenFalse()]])
+        assert_raises(RuntimeError, np.nonzero, a)
+
+    def test_nonzero_sideffects_structured_void(self):
+        # Checks that structured void does not mutate alignment flag of
+        # original array.
+        arr = np.zeros(5, dtype="i1,i8,i8")  # `ones` may short-circuit
+        assert arr.flags.aligned  # structs are considered "aligned"
+        assert not arr["f2"].flags.aligned
+        # make sure that nonzero/count_nonzero do not flip the flag:
+        np.nonzero(arr)
+        assert arr.flags.aligned
+        np.count_nonzero(arr)
+        assert arr.flags.aligned
+
+    def test_nonzero_exception_safe(self):
+        # gh-13930
+
+        class ThrowsAfter:
+            def __init__(self, iters):
+                self.iters_left = iters
+
+            def __bool__(self):
+                if self.iters_left == 0:
+                    raise ValueError("called `iters` times")
+
+                self.iters_left -= 1
+                return True
+
+        """
+        Test that a ValueError is raised instead of a SystemError
+
+        If the __bool__ function is called after the error state is set,
+        Python (cpython) will raise a SystemError.
+        """
+
+        # assert that an exception in first pass is handled correctly
+        a = np.array([ThrowsAfter(5)]*10)
+        assert_raises(ValueError, np.nonzero, a)
+
+        # raise exception in second pass for 1-dimensional loop
+        a = np.array([ThrowsAfter(15)]*10)
+        assert_raises(ValueError, np.nonzero, a)
+
+        # raise exception in second pass for n-dimensional loop
+        a = np.array([[ThrowsAfter(15)]]*10)
+        assert_raises(ValueError, np.nonzero, a)
+
+    @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have threads")
+    def test_structured_threadsafety(self):
+        # Nonzero (and some other functions) should be threadsafe for
+        # structured datatypes, see gh-15387. This test can behave randomly.
+        from concurrent.futures import ThreadPoolExecutor
+
+        # Create a deeply nested dtype to make a failure more likely:
+        dt = np.dtype([("", "f8")])
+        dt = np.dtype([("", dt)])
+        dt = np.dtype([("", dt)] * 2)
+        # The array should be large enough to likely run into threading issues
+        arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0]
+        def func(arr):
+            arr.nonzero()
+
+        tpe = ThreadPoolExecutor(max_workers=8)
+        futures = [tpe.submit(func, arr) for _ in range(10)]
+        for f in futures:
+            f.result()
+
+        assert arr.dtype is dt
+
+
+class TestIndex:
+    def test_boolean(self):
+        a = rand(3, 5, 8)
+        V = rand(5, 8)
+        g1 = randint(0, 5, size=15)
+        g2 = randint(0, 8, size=15)
+        V[g1, g2] = -V[g1, g2]
+        assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
+
+    def test_boolean_edgecase(self):
+        a = np.array([], dtype='int32')
+        b = np.array([], dtype='bool')
+        c = a[b]
+        assert_equal(c, [])
+        assert_equal(c.dtype, np.dtype('int32'))
+
+
+class TestBinaryRepr:
+    def test_zero(self):
+        assert_equal(np.binary_repr(0), '0')
+
+    def test_positive(self):
+        assert_equal(np.binary_repr(10), '1010')
+        assert_equal(np.binary_repr(12522),
+                     '11000011101010')
+        assert_equal(np.binary_repr(10736848),
+                     '101000111101010011010000')
+
+    def test_negative(self):
+        assert_equal(np.binary_repr(-1), '-1')
+        assert_equal(np.binary_repr(-10), '-1010')
+        assert_equal(np.binary_repr(-12522),
+                     '-11000011101010')
+        assert_equal(np.binary_repr(-10736848),
+                     '-101000111101010011010000')
+
+    def test_sufficient_width(self):
+        assert_equal(np.binary_repr(0, width=5), '00000')
+        assert_equal(np.binary_repr(10, width=7), '0001010')
+        assert_equal(np.binary_repr(-5, width=7), '1111011')
+
+    def test_neg_width_boundaries(self):
+        # see gh-8670
+
+        # Ensure that the example in the issue does not
+        # break before proceeding to a more thorough test.
+        assert_equal(np.binary_repr(-128, width=8), '10000000')
+
+        for width in range(1, 11):
+            num = -2**(width - 1)
+            exp = '1' + (width - 1) * '0'
+            assert_equal(np.binary_repr(num, width=width), exp)
+
+    def test_large_neg_int64(self):
+        # See gh-14289.
+        assert_equal(np.binary_repr(np.int64(-2**62), width=64),
+                     '11' + '0'*62)
+
+
+class TestBaseRepr:
+    def test_base3(self):
+        assert_equal(np.base_repr(3**5, 3), '100000')
+
+    def test_positive(self):
+        assert_equal(np.base_repr(12, 10), '12')
+        assert_equal(np.base_repr(12, 10, 4), '000012')
+        assert_equal(np.base_repr(12, 4), '30')
+        assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
+
+    def test_negative(self):
+        assert_equal(np.base_repr(-12, 10), '-12')
+        assert_equal(np.base_repr(-12, 10, 4), '-000012')
+        assert_equal(np.base_repr(-12, 4), '-30')
+
+    def test_base_range(self):
+        with assert_raises(ValueError):
+            np.base_repr(1, 1)
+        with assert_raises(ValueError):
+            np.base_repr(1, 37)
+
+
+class TestArrayComparisons:
+    def test_array_equal(self):
+        res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
+        assert_(res)
+        assert_(type(res) is bool)
+        res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
+        assert_(not res)
+        assert_(type(res) is bool)
+        res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
+        assert_(not res)
+        assert_(type(res) is bool)
+        res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
+        assert_(not res)
+        assert_(type(res) is bool)
+        res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
+        assert_(res)
+        assert_(type(res) is bool)
+        res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
+                             np.array([('a', 1)], dtype='S1,u4'))
+        assert_(res)
+        assert_(type(res) is bool)
+
+    def test_array_equal_equal_nan(self):
+        # Test array_equal with equal_nan kwarg
+        a1 = np.array([1, 2, np.nan])
+        a2 = np.array([1, np.nan, 2])
+        a3 = np.array([1, 2, np.inf])
+
+        # equal_nan=False by default
+        assert_(not np.array_equal(a1, a1))
+        assert_(np.array_equal(a1, a1, equal_nan=True))
+        assert_(not np.array_equal(a1, a2, equal_nan=True))
+        # nan's not conflated with inf's
+        assert_(not np.array_equal(a1, a3, equal_nan=True))
+        # 0-D arrays
+        a = np.array(np.nan)
+        assert_(not np.array_equal(a, a))
+        assert_(np.array_equal(a, a, equal_nan=True))
+        # Non-float dtype - equal_nan should have no effect
+        a = np.array([1, 2, 3], dtype=int)
+        assert_(np.array_equal(a, a))
+        assert_(np.array_equal(a, a, equal_nan=True))
+        # Multi-dimensional array
+        a = np.array([[0, 1], [np.nan, 1]])
+        assert_(not np.array_equal(a, a))
+        assert_(np.array_equal(a, a, equal_nan=True))
+        # Complex values
+        a, b = [np.array([1 + 1j])]*2
+        a.real, b.imag = np.nan, np.nan
+        assert_(not np.array_equal(a, b, equal_nan=False))
+        assert_(np.array_equal(a, b, equal_nan=True))
+
+    def test_none_compares_elementwise(self):
+        a = np.array([None, 1, None], dtype=object)
+        assert_equal(a == None, [True, False, True])
+        assert_equal(a != None, [False, True, False])
+
+        a = np.ones(3)
+        assert_equal(a == None, [False, False, False])
+        assert_equal(a != None, [True, True, True])
+
+    def test_array_equiv(self):
+        res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
+        assert_(res)
+        assert_(type(res) is bool)
+        res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
+        assert_(not res)
+        assert_(type(res) is bool)
+        res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
+        assert_(not res)
+        assert_(type(res) is bool)
+        res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
+        assert_(not res)
+        assert_(type(res) is bool)
+
+        res = np.array_equiv(np.array([1, 1]), np.array([1]))
+        assert_(res)
+        assert_(type(res) is bool)
+        res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
+        assert_(res)
+        assert_(type(res) is bool)
+        res = np.array_equiv(np.array([1, 2]), np.array([2]))
+        assert_(not res)
+        assert_(type(res) is bool)
+        res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
+        assert_(not res)
+        assert_(type(res) is bool)
+        res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
+        assert_(not res)
+        assert_(type(res) is bool)
+
+    @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"])
+    def test_compare_unstructured_voids(self, dtype):
+        zeros = np.zeros(3, dtype=dtype)
+
+        assert_array_equal(zeros, zeros)
+        assert not (zeros != zeros).any()
+
+        if dtype == "V0":
+            # Can't test != of actually different data
+            return
+
+        nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype)
+
+        assert not (zeros == nonzeros).any()
+        assert (zeros != nonzeros).all()
+
+
+def assert_array_strict_equal(x, y):
+    assert_array_equal(x, y)
+    # Check flags, 32 bit arches typically don't provide 16 byte alignment
+    if ((x.dtype.alignment <= 8 or
+            np.intp().dtype.itemsize != 4) and
+            sys.platform != 'win32'):
+        assert_(x.flags == y.flags)
+    else:
+        assert_(x.flags.owndata == y.flags.owndata)
+        assert_(x.flags.writeable == y.flags.writeable)
+        assert_(x.flags.c_contiguous == y.flags.c_contiguous)
+        assert_(x.flags.f_contiguous == y.flags.f_contiguous)
+        assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
+    # check endianness
+    assert_(x.dtype.isnative == y.dtype.isnative)
+
+
+class TestClip:
+    def setup_method(self):
+        self.nr = 5
+        self.nc = 3
+
+    def fastclip(self, a, m, M, out=None, **kwargs):
+        return a.clip(m, M, out=out, **kwargs)
+
+    def clip(self, a, m, M, out=None):
+        # use a.choose to verify fastclip result
+        selector = np.less(a, m) + 2*np.greater(a, M)
+        return selector.choose((a, m, M), out=out)
+
+    # Handy functions
+    def _generate_data(self, n, m):
+        return randn(n, m)
+
+    def _generate_data_complex(self, n, m):
+        return randn(n, m) + 1.j * rand(n, m)
+
+    def _generate_flt_data(self, n, m):
+        return (randn(n, m)).astype(np.float32)
+
+    def _neg_byteorder(self, a):
+        a = np.asarray(a)
+        if sys.byteorder == 'little':
+            a = a.astype(a.dtype.newbyteorder('>'))
+        else:
+            a = a.astype(a.dtype.newbyteorder('<'))
+        return a
+
+    def _generate_non_native_data(self, n, m):
+        data = randn(n, m)
+        data = self._neg_byteorder(data)
+        assert_(not data.dtype.isnative)
+        return data
+
+    def _generate_int_data(self, n, m):
+        return (10 * rand(n, m)).astype(np.int64)
+
+    def _generate_int32_data(self, n, m):
+        return (10 * rand(n, m)).astype(np.int32)
+
+    # Now the real test cases
+
+    @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
+    def test_ones_pathological(self, dtype):
+        # for preservation of behavior described in
+        # gh-12519; amin > amax behavior may still change
+        # in the future
+        arr = np.ones(10, dtype=dtype)
+        expected = np.zeros(10, dtype=dtype)
+        actual = np.clip(arr, 1, 0)
+        if dtype == 'O':
+            assert actual.tolist() == expected.tolist()
+        else:
+            assert_equal(actual, expected)
+
+    def test_simple_double(self):
+        # Test native double input with scalar min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = 0.1
+        M = 0.6
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_simple_int(self):
+        # Test native int input with scalar min/max.
+        a = self._generate_int_data(self.nr, self.nc)
+        a = a.astype(int)
+        m = -2
+        M = 4
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_array_double(self):
+        # Test native double input with array min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = np.zeros(a.shape)
+        M = m + 0.5
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_simple_nonnative(self):
+        # Test non native double input with scalar min/max.
+        # Test native double input with non native double scalar min/max.
+        a = self._generate_non_native_data(self.nr, self.nc)
+        m = -0.5
+        M = 0.6
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_equal(ac, act)
+
+        # Test native double input with non native double scalar min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = -0.5
+        M = self._neg_byteorder(0.6)
+        assert_(not M.dtype.isnative)
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_equal(ac, act)
+
+    def test_simple_complex(self):
+        # Test native complex input with native double scalar min/max.
+        # Test native input with complex double scalar min/max.
+        a = 3 * self._generate_data_complex(self.nr, self.nc)
+        m = -0.5
+        M = 1.
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+        # Test native input with complex double scalar min/max.
+        a = 3 * self._generate_data(self.nr, self.nc)
+        m = -0.5 + 1.j
+        M = 1. + 2.j
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_clip_complex(self):
+        # Address Issue gh-5354 for clipping complex arrays
+        # Test native complex input without explicit min/max
+        # ie, either min=None or max=None
+        a = np.ones(10, dtype=complex)
+        m = a.min()
+        M = a.max()
+        am = self.fastclip(a, m, None)
+        aM = self.fastclip(a, None, M)
+        assert_array_strict_equal(am, a)
+        assert_array_strict_equal(aM, a)
+
+    def test_clip_non_contig(self):
+        # Test clip for non contiguous native input and native scalar min/max.
+        a = self._generate_data(self.nr * 2, self.nc * 3)
+        a = a[::2, ::3]
+        assert_(not a.flags['F_CONTIGUOUS'])
+        assert_(not a.flags['C_CONTIGUOUS'])
+        ac = self.fastclip(a, -1.6, 1.7)
+        act = self.clip(a, -1.6, 1.7)
+        assert_array_strict_equal(ac, act)
+
+    def test_simple_out(self):
+        # Test native double input with scalar min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = -0.5
+        M = 0.6
+        ac = np.zeros(a.shape)
+        act = np.zeros(a.shape)
+        self.fastclip(a, m, M, ac)
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    @pytest.mark.parametrize("casting", [None, "unsafe"])
+    def test_simple_int32_inout(self, casting):
+        # Test native int32 input with double min/max and int32 out.
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = np.float64(0)
+        M = np.float64(2)
+        ac = np.zeros(a.shape, dtype=np.int32)
+        act = ac.copy()
+        if casting is None:
+            with pytest.raises(TypeError):
+                self.fastclip(a, m, M, ac, casting=casting)
+        else:
+            # explicitly passing "unsafe" will silence warning
+            self.fastclip(a, m, M, ac, casting=casting)
+            self.clip(a, m, M, act)
+            assert_array_strict_equal(ac, act)
+
+    def test_simple_int64_out(self):
+        # Test native int32 input with int32 scalar min/max and int64 out.
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = np.int32(-1)
+        M = np.int32(1)
+        ac = np.zeros(a.shape, dtype=np.int64)
+        act = ac.copy()
+        self.fastclip(a, m, M, ac)
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_simple_int64_inout(self):
+        # Test native int32 input with double array min/max and int32 out.
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = np.zeros(a.shape, np.float64)
+        M = np.float64(1)
+        ac = np.zeros(a.shape, dtype=np.int32)
+        act = ac.copy()
+        self.fastclip(a, m, M, out=ac, casting="unsafe")
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_simple_int32_out(self):
+        # Test native double input with scalar min/max and int out.
+        a = self._generate_data(self.nr, self.nc)
+        m = -1.0
+        M = 2.0
+        ac = np.zeros(a.shape, dtype=np.int32)
+        act = ac.copy()
+        self.fastclip(a, m, M, out=ac, casting="unsafe")
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_simple_inplace_01(self):
+        # Test native double input with array min/max in-place.
+        a = self._generate_data(self.nr, self.nc)
+        ac = a.copy()
+        m = np.zeros(a.shape)
+        M = 1.0
+        self.fastclip(a, m, M, a)
+        self.clip(a, m, M, ac)
+        assert_array_strict_equal(a, ac)
+
+    def test_simple_inplace_02(self):
+        # Test native double input with scalar min/max in-place.
+        a = self._generate_data(self.nr, self.nc)
+        ac = a.copy()
+        m = -0.5
+        M = 0.6
+        self.fastclip(a, m, M, a)
+        self.clip(ac, m, M, ac)
+        assert_array_strict_equal(a, ac)
+
+    def test_noncontig_inplace(self):
+        # Test non contiguous double input with double scalar min/max in-place.
+        a = self._generate_data(self.nr * 2, self.nc * 3)
+        a = a[::2, ::3]
+        assert_(not a.flags['F_CONTIGUOUS'])
+        assert_(not a.flags['C_CONTIGUOUS'])
+        ac = a.copy()
+        m = -0.5
+        M = 0.6
+        self.fastclip(a, m, M, a)
+        self.clip(ac, m, M, ac)
+        assert_array_equal(a, ac)
+
+    def test_type_cast_01(self):
+        # Test native double input with scalar min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = -0.5
+        M = 0.6
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_02(self):
+        # Test native int32 input with int32 scalar min/max.
+        a = self._generate_int_data(self.nr, self.nc)
+        a = a.astype(np.int32)
+        m = -2
+        M = 4
+        ac = self.fastclip(a, m, M)
+        act = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_03(self):
+        # Test native int32 input with float64 scalar min/max.
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = -2
+        M = 4
+        ac = self.fastclip(a, np.float64(m), np.float64(M))
+        act = self.clip(a, np.float64(m), np.float64(M))
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_04(self):
+        # Test native int32 input with float32 scalar min/max.
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = np.float32(-2)
+        M = np.float32(4)
+        act = self.fastclip(a, m, M)
+        ac = self.clip(a, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_05(self):
+        # Test native int32 with double arrays min/max.
+        a = self._generate_int_data(self.nr, self.nc)
+        m = -0.5
+        M = 1.
+        ac = self.fastclip(a, m * np.zeros(a.shape), M)
+        act = self.clip(a, m * np.zeros(a.shape), M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_06(self):
+        # Test native with NON native scalar min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = 0.5
+        m_s = self._neg_byteorder(m)
+        M = 1.
+        act = self.clip(a, m_s, M)
+        ac = self.fastclip(a, m_s, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_07(self):
+        # Test NON native with native array min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = -0.5 * np.ones(a.shape)
+        M = 1.
+        a_s = self._neg_byteorder(a)
+        assert_(not a_s.dtype.isnative)
+        act = a_s.clip(m, M)
+        ac = self.fastclip(a_s, m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_08(self):
+        # Test NON native with native scalar min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = -0.5
+        M = 1.
+        a_s = self._neg_byteorder(a)
+        assert_(not a_s.dtype.isnative)
+        ac = self.fastclip(a_s, m, M)
+        act = a_s.clip(m, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_09(self):
+        # Test native with NON native array min/max.
+        a = self._generate_data(self.nr, self.nc)
+        m = -0.5 * np.ones(a.shape)
+        M = 1.
+        m_s = self._neg_byteorder(m)
+        assert_(not m_s.dtype.isnative)
+        ac = self.fastclip(a, m_s, M)
+        act = self.clip(a, m_s, M)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_10(self):
+        # Test native int32 with float min/max and float out for output argument.
+        a = self._generate_int_data(self.nr, self.nc)
+        b = np.zeros(a.shape, dtype=np.float32)
+        m = np.float32(-0.5)
+        M = np.float32(1)
+        act = self.clip(a, m, M, out=b)
+        ac = self.fastclip(a, m, M, out=b)
+        assert_array_strict_equal(ac, act)
+
+    def test_type_cast_11(self):
+        # Test non native with native scalar, min/max, out non native
+        a = self._generate_non_native_data(self.nr, self.nc)
+        b = a.copy()
+        b = b.astype(b.dtype.newbyteorder('>'))
+        bt = b.copy()
+        m = -0.5
+        M = 1.
+        self.fastclip(a, m, M, out=b)
+        self.clip(a, m, M, out=bt)
+        assert_array_strict_equal(b, bt)
+
+    def test_type_cast_12(self):
+        # Test native int32 input and min/max and float out
+        a = self._generate_int_data(self.nr, self.nc)
+        b = np.zeros(a.shape, dtype=np.float32)
+        m = np.int32(0)
+        M = np.int32(1)
+        act = self.clip(a, m, M, out=b)
+        ac = self.fastclip(a, m, M, out=b)
+        assert_array_strict_equal(ac, act)
+
+    def test_clip_with_out_simple(self):
+        # Test native double input with scalar min/max
+        a = self._generate_data(self.nr, self.nc)
+        m = -0.5
+        M = 0.6
+        ac = np.zeros(a.shape)
+        act = np.zeros(a.shape)
+        self.fastclip(a, m, M, ac)
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_clip_with_out_simple2(self):
+        # Test native int32 input with double min/max and int32 out
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = np.float64(0)
+        M = np.float64(2)
+        ac = np.zeros(a.shape, dtype=np.int32)
+        act = ac.copy()
+        self.fastclip(a, m, M, out=ac, casting="unsafe")
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_clip_with_out_simple_int32(self):
+        # Test native int32 input with int32 scalar min/max and int64 out
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = np.int32(-1)
+        M = np.int32(1)
+        ac = np.zeros(a.shape, dtype=np.int64)
+        act = ac.copy()
+        self.fastclip(a, m, M, ac)
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_clip_with_out_array_int32(self):
+        # Test native int32 input with double array min/max and int32 out
+        a = self._generate_int32_data(self.nr, self.nc)
+        m = np.zeros(a.shape, np.float64)
+        M = np.float64(1)
+        ac = np.zeros(a.shape, dtype=np.int32)
+        act = ac.copy()
+        self.fastclip(a, m, M, out=ac, casting="unsafe")
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_clip_with_out_array_outint32(self):
+        # Test native double input with scalar min/max and int out
+        a = self._generate_data(self.nr, self.nc)
+        m = -1.0
+        M = 2.0
+        ac = np.zeros(a.shape, dtype=np.int32)
+        act = ac.copy()
+        self.fastclip(a, m, M, out=ac, casting="unsafe")
+        self.clip(a, m, M, act)
+        assert_array_strict_equal(ac, act)
+
+    def test_clip_with_out_transposed(self):
+        # Test that the out argument works when transposed
+        a = np.arange(16).reshape(4, 4)
+        out = np.empty_like(a).T
+        a.clip(4, 10, out=out)
+        expected = self.clip(a, 4, 10)
+        assert_array_equal(out, expected)
+
+    def test_clip_with_out_memory_overlap(self):
+        # Test that the out argument works when it has memory overlap
+        a = np.arange(16).reshape(4, 4)
+        ac = a.copy()
+        a[:-1].clip(4, 10, out=a[1:])
+        expected = self.clip(ac[:-1], 4, 10)
+        assert_array_equal(a[1:], expected)
+
+    def test_clip_inplace_array(self):
+        # Test native double input with array min/max
+        a = self._generate_data(self.nr, self.nc)
+        ac = a.copy()
+        m = np.zeros(a.shape)
+        M = 1.0
+        self.fastclip(a, m, M, a)
+        self.clip(a, m, M, ac)
+        assert_array_strict_equal(a, ac)
+
+    def test_clip_inplace_simple(self):
+        # Test native double input with scalar min/max
+        a = self._generate_data(self.nr, self.nc)
+        ac = a.copy()
+        m = -0.5
+        M = 0.6
+        self.fastclip(a, m, M, a)
+        self.clip(a, m, M, ac)
+        assert_array_strict_equal(a, ac)
+
+    def test_clip_func_takes_out(self):
+        # Ensure that the clip() function takes an out=argument.
+        a = self._generate_data(self.nr, self.nc)
+        ac = a.copy()
+        m = -0.5
+        M = 0.6
+        a2 = np.clip(a, m, M, out=a)
+        self.clip(a, m, M, ac)
+        assert_array_strict_equal(a2, ac)
+        assert_(a2 is a)
+
+    def test_clip_nan(self):
+        d = np.arange(7.)
+        assert_equal(d.clip(min=np.nan), np.nan)
+        assert_equal(d.clip(max=np.nan), np.nan)
+        assert_equal(d.clip(min=np.nan, max=np.nan), np.nan)
+        assert_equal(d.clip(min=-2, max=np.nan), np.nan)
+        assert_equal(d.clip(min=np.nan, max=10), np.nan)
+
+    def test_object_clip(self):
+        a = np.arange(10, dtype=object)
+        actual = np.clip(a, 1, 5)
+        expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])
+        assert actual.tolist() == expected.tolist()
+
+    def test_clip_all_none(self):
+        a = np.arange(10, dtype=object)
+        with assert_raises_regex(ValueError, 'max or min'):
+            np.clip(a, None, None)
+
+    def test_clip_invalid_casting(self):
+        a = np.arange(10, dtype=object)
+        with assert_raises_regex(ValueError,
+                                 'casting must be one of'):
+            self.fastclip(a, 1, 8, casting="garbage")
+
+    @pytest.mark.parametrize("amin, amax", [
+        # two scalars
+        (1, 0),
+        # mix scalar and array
+        (1, np.zeros(10)),
+        # two arrays
+        (np.ones(10), np.zeros(10)),
+        ])
+    def test_clip_value_min_max_flip(self, amin, amax):
+        a = np.arange(10, dtype=np.int64)
+        # requirement from ufunc_docstrings.py
+        expected = np.minimum(np.maximum(a, amin), amax)
+        actual = np.clip(a, amin, amax)
+        assert_equal(actual, expected)
+
+    @pytest.mark.parametrize("arr, amin, amax, exp", [
+        # for a bug in npy_ObjectClip, based on a
+        # case produced by hypothesis
+        (np.zeros(10, dtype=np.int64),
+         0,
+         -2**64+1,
+         np.full(10, -2**64+1, dtype=object)),
+        # for bugs in NPY_TIMEDELTA_MAX, based on a case
+        # produced by hypothesis
+        (np.zeros(10, dtype='m8') - 1,
+         0,
+         0,
+         np.zeros(10, dtype='m8')),
+    ])
+    def test_clip_problem_cases(self, arr, amin, amax, exp):
+        actual = np.clip(arr, amin, amax)
+        assert_equal(actual, exp)
+
+    @pytest.mark.parametrize("arr, amin, amax", [
+        # problematic scalar nan case from hypothesis
+        (np.zeros(10, dtype=np.int64),
+         np.array(np.nan),
+         np.zeros(10, dtype=np.int32)),
+    ])
+    def test_clip_scalar_nan_propagation(self, arr, amin, amax):
+        # enforcement of scalar nan propagation for comparisons
+        # called through clip()
+        expected = np.minimum(np.maximum(arr, amin), amax)
+        actual = np.clip(arr, amin, amax)
+        assert_equal(actual, expected)
+
+    @pytest.mark.xfail(reason="propagation doesn't match spec")
+    @pytest.mark.parametrize("arr, amin, amax", [
+        (np.array([1] * 10, dtype='m8'),
+         np.timedelta64('NaT'),
+         np.zeros(10, dtype=np.int32)),
+    ])
+    @pytest.mark.filterwarnings("ignore::DeprecationWarning")
+    def test_NaT_propagation(self, arr, amin, amax):
+        # NOTE: the expected function spec doesn't
+        # propagate NaT, but clip() now does
+        expected = np.minimum(np.maximum(arr, amin), amax)
+        actual = np.clip(arr, amin, amax)
+        assert_equal(actual, expected)
+
+    @given(
+        data=st.data(),
+        arr=hynp.arrays(
+            dtype=hynp.integer_dtypes() | hynp.floating_dtypes(),
+            shape=hynp.array_shapes()
+        )
+    )
+    def test_clip_property(self, data, arr):
+        """A property-based test using Hypothesis.
+
+        This aims for maximum generality: it could in principle generate *any*
+        valid inputs to np.clip, and in practice generates much more varied
+        inputs than human testers come up with.
+
+        Because many of the inputs have tricky dependencies - compatible dtypes
+        and mutually-broadcastable shapes - we use `st.data()` strategy draw
+        values *inside* the test function, from strategies we construct based
+        on previous values.  An alternative would be to define a custom strategy
+        with `@st.composite`, but until we have duplicated code inline is fine.
+
+        That accounts for most of the function; the actual test is just three
+        lines to calculate and compare actual vs expected results!
+        """
+        numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes()
+        # Generate shapes for the bounds which can be broadcast with each other
+        # and with the base shape.  Below, we might decide to use scalar bounds,
+        # but it's clearer to generate these shapes unconditionally in advance.
+        in_shapes, result_shape = data.draw(
+            hynp.mutually_broadcastable_shapes(
+                num_shapes=2, base_shape=arr.shape
+            )
+        )
+        # Scalar `nan` is deprecated due to the differing behaviour it shows.
+        s = numeric_dtypes.flatmap(
+            lambda x: hynp.from_dtype(x, allow_nan=False))
+        amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
+            shape=in_shapes[0], elements={"allow_nan": False}))
+        amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
+            shape=in_shapes[1], elements={"allow_nan": False}))
+
+        # Then calculate our result and expected result and check that they're
+        # equal!  See gh-12519 and gh-19457 for discussion deciding on this
+        # property and the result_type argument.
+        result = np.clip(arr, amin, amax)
+        t = np.result_type(arr, amin, amax)
+        expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t)
+        assert result.dtype == t
+        assert_array_equal(result, expected)
+
+
+class TestAllclose:
+    rtol = 1e-5
+    atol = 1e-8
+
+    def setup_method(self):
+        self.olderr = np.seterr(invalid='ignore')
+
+    def teardown_method(self):
+        np.seterr(**self.olderr)
+
+    def tst_allclose(self, x, y):
+        assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
+
+    def tst_not_allclose(self, x, y):
+        assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
+
+    def test_ip_allclose(self):
+        # Parametric test factory.
+        arr = np.array([100, 1000])
+        aran = np.arange(125).reshape((5, 5, 5))
+
+        atol = self.atol
+        rtol = self.rtol
+
+        data = [([1, 0], [1, 0]),
+                ([atol], [0]),
+                ([1], [1+rtol+atol]),
+                (arr, arr + arr*rtol),
+                (arr, arr + arr*rtol + atol*2),
+                (aran, aran + aran*rtol),
+                (np.inf, np.inf),
+                (np.inf, [np.inf])]
+
+        for (x, y) in data:
+            self.tst_allclose(x, y)
+
+    def test_ip_not_allclose(self):
+        # Parametric test factory.
+        aran = np.arange(125).reshape((5, 5, 5))
+
+        atol = self.atol
+        rtol = self.rtol
+
+        data = [([np.inf, 0], [1, np.inf]),
+                ([np.inf, 0], [1, 0]),
+                ([np.inf, np.inf], [1, np.inf]),
+                ([np.inf, np.inf], [1, 0]),
+                ([-np.inf, 0], [np.inf, 0]),
+                ([np.nan, 0], [np.nan, 0]),
+                ([atol*2], [0]),
+                ([1], [1+rtol+atol*2]),
+                (aran, aran + aran*atol + atol*2),
+                (np.array([np.inf, 1]), np.array([0, np.inf]))]
+
+        for (x, y) in data:
+            self.tst_not_allclose(x, y)
+
+    def test_no_parameter_modification(self):
+        x = np.array([np.inf, 1])
+        y = np.array([0, np.inf])
+        np.allclose(x, y)
+        assert_array_equal(x, np.array([np.inf, 1]))
+        assert_array_equal(y, np.array([0, np.inf]))
+
+    def test_min_int(self):
+        # Could make problems because of abs(min_int) == min_int
+        min_int = np.iinfo(np.int_).min
+        a = np.array([min_int], dtype=np.int_)
+        assert_(np.allclose(a, a))
+
+    def test_equalnan(self):
+        x = np.array([1.0, np.nan])
+        assert_(np.allclose(x, x, equal_nan=True))
+
+    def test_return_class_is_ndarray(self):
+        # Issue gh-6475
+        # Check that allclose does not preserve subtypes
+        class Foo(np.ndarray):
+            def __new__(cls, *args, **kwargs):
+                return np.array(*args, **kwargs).view(cls)
+
+        a = Foo([1])
+        assert_(type(np.allclose(a, a)) is bool)
+
+
+class TestIsclose:
+    rtol = 1e-5
+    atol = 1e-8
+
+    def _setup(self):
+        atol = self.atol
+        rtol = self.rtol
+        arr = np.array([100, 1000])
+        aran = np.arange(125).reshape((5, 5, 5))
+
+        self.all_close_tests = [
+                ([1, 0], [1, 0]),
+                ([atol], [0]),
+                ([1], [1 + rtol + atol]),
+                (arr, arr + arr*rtol),
+                (arr, arr + arr*rtol + atol),
+                (aran, aran + aran*rtol),
+                (np.inf, np.inf),
+                (np.inf, [np.inf]),
+                ([np.inf, -np.inf], [np.inf, -np.inf]),
+                ]
+        self.none_close_tests = [
+                ([np.inf, 0], [1, np.inf]),
+                ([np.inf, -np.inf], [1, 0]),
+                ([np.inf, np.inf], [1, -np.inf]),
+                ([np.inf, np.inf], [1, 0]),
+                ([np.nan, 0], [np.nan, -np.inf]),
+                ([atol*2], [0]),
+                ([1], [1 + rtol + atol*2]),
+                (aran, aran + rtol*1.1*aran + atol*1.1),
+                (np.array([np.inf, 1]), np.array([0, np.inf])),
+                ]
+        self.some_close_tests = [
+                ([np.inf, 0], [np.inf, atol*2]),
+                ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
+                (np.arange(3), [0, 1, 2.1]),
+                (np.nan, [np.nan, np.nan, np.nan]),
+                ([0], [atol, np.inf, -np.inf, np.nan]),
+                (0, [atol, np.inf, -np.inf, np.nan]),
+                ]
+        self.some_close_results = [
+                [True, False],
+                [True, False, False],
+                [True, True, False],
+                [False, False, False],
+                [True, False, False, False],
+                [True, False, False, False],
+                ]
+
+    def test_ip_isclose(self):
+        self._setup()
+        tests = self.some_close_tests
+        results = self.some_close_results
+        for (x, y), result in zip(tests, results):
+            assert_array_equal(np.isclose(x, y), result)
+
+    def tst_all_isclose(self, x, y):
+        assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
+
+    def tst_none_isclose(self, x, y):
+        msg = "%s and %s shouldn't be close"
+        assert_(not np.any(np.isclose(x, y)), msg % (x, y))
+
+    def tst_isclose_allclose(self, x, y):
+        msg = "isclose.all() and allclose aren't same for %s and %s"
+        msg2 = "isclose and allclose aren't same for %s and %s"
+        if np.isscalar(x) and np.isscalar(y):
+            assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
+        else:
+            assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
+
+    def test_ip_all_isclose(self):
+        self._setup()
+        for (x, y) in self.all_close_tests:
+            self.tst_all_isclose(x, y)
+
+    def test_ip_none_isclose(self):
+        self._setup()
+        for (x, y) in self.none_close_tests:
+            self.tst_none_isclose(x, y)
+
+    def test_ip_isclose_allclose(self):
+        self._setup()
+        tests = (self.all_close_tests + self.none_close_tests +
+                 self.some_close_tests)
+        for (x, y) in tests:
+            self.tst_isclose_allclose(x, y)
+
+    def test_equal_nan(self):
+        assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
+        arr = np.array([1.0, np.nan])
+        assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
+
+    def test_masked_arrays(self):
+        # Make sure to test the output type when arguments are interchanged.
+
+        x = np.ma.masked_where([True, True, False], np.arange(3))
+        assert_(type(x) is type(np.isclose(2, x)))
+        assert_(type(x) is type(np.isclose(x, 2)))
+
+        x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
+        assert_(type(x) is type(np.isclose(np.inf, x)))
+        assert_(type(x) is type(np.isclose(x, np.inf)))
+
+        x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
+        y = np.isclose(np.nan, x, equal_nan=True)
+        assert_(type(x) is type(y))
+        # Ensure that the mask isn't modified...
+        assert_array_equal([True, True, False], y.mask)
+        y = np.isclose(x, np.nan, equal_nan=True)
+        assert_(type(x) is type(y))
+        # Ensure that the mask isn't modified...
+        assert_array_equal([True, True, False], y.mask)
+
+        x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
+        y = np.isclose(x, x, equal_nan=True)
+        assert_(type(x) is type(y))
+        # Ensure that the mask isn't modified...
+        assert_array_equal([True, True, False], y.mask)
+
+    def test_scalar_return(self):
+        assert_(np.isscalar(np.isclose(1, 1)))
+
+    def test_no_parameter_modification(self):
+        x = np.array([np.inf, 1])
+        y = np.array([0, np.inf])
+        np.isclose(x, y)
+        assert_array_equal(x, np.array([np.inf, 1]))
+        assert_array_equal(y, np.array([0, np.inf]))
+
+    def test_non_finite_scalar(self):
+        # GH7014, when two scalars are compared the output should also be a
+        # scalar
+        assert_(np.isclose(np.inf, -np.inf) is np.False_)
+        assert_(np.isclose(0, np.inf) is np.False_)
+        assert_(type(np.isclose(0, np.inf)) is np.bool_)
+
+    def test_timedelta(self):
+        # Allclose currently works for timedelta64 as long as `atol` is
+        # an integer or also a timedelta64
+        a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
+        assert np.isclose(a, a, atol=0, equal_nan=True).all()
+        assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all()
+        assert np.allclose(a, a, atol=0, equal_nan=True)
+        assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True)
+
+
+class TestStdVar:
+    def setup_method(self):
+        self.A = np.array([1, -1, 1, -1])
+        self.real_var = 1
+
+    def test_basic(self):
+        assert_almost_equal(np.var(self.A), self.real_var)
+        assert_almost_equal(np.std(self.A)**2, self.real_var)
+
+    def test_scalars(self):
+        assert_equal(np.var(1), 0)
+        assert_equal(np.std(1), 0)
+
+    def test_ddof1(self):
+        assert_almost_equal(np.var(self.A, ddof=1),
+                            self.real_var * len(self.A) / (len(self.A) - 1))
+        assert_almost_equal(np.std(self.A, ddof=1)**2,
+                            self.real_var*len(self.A) / (len(self.A) - 1))
+
+    def test_ddof2(self):
+        assert_almost_equal(np.var(self.A, ddof=2),
+                            self.real_var * len(self.A) / (len(self.A) - 2))
+        assert_almost_equal(np.std(self.A, ddof=2)**2,
+                            self.real_var * len(self.A) / (len(self.A) - 2))
+
+    def test_out_scalar(self):
+        d = np.arange(10)
+        out = np.array(0.)
+        r = np.std(d, out=out)
+        assert_(r is out)
+        assert_array_equal(r, out)
+        r = np.var(d, out=out)
+        assert_(r is out)
+        assert_array_equal(r, out)
+        r = np.mean(d, out=out)
+        assert_(r is out)
+        assert_array_equal(r, out)
+
+
+class TestStdVarComplex:
+    def test_basic(self):
+        A = np.array([1, 1.j, -1, -1.j])
+        real_var = 1
+        assert_almost_equal(np.var(A), real_var)
+        assert_almost_equal(np.std(A)**2, real_var)
+
+    def test_scalars(self):
+        assert_equal(np.var(1j), 0)
+        assert_equal(np.std(1j), 0)
+
+
+class TestCreationFuncs:
+    # Test ones, zeros, empty and full.
+
+    def setup_method(self):
+        dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
+        # void, bytes, str
+        variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
+        self.dtypes = sorted(dtypes - variable_sized |
+                             {np.dtype(tp.str.replace("0", str(i)))
+                              for tp in variable_sized for i in range(1, 10)},
+                             key=lambda dtype: dtype.str)
+        self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
+        self.ndims = 10
+
+    def check_function(self, func, fill_value=None):
+        par = ((0, 1, 2),
+               range(self.ndims),
+               self.orders,
+               self.dtypes)
+        fill_kwarg = {}
+        if fill_value is not None:
+            fill_kwarg = {'fill_value': fill_value}
+
+        for size, ndims, order, dtype in itertools.product(*par):
+            shape = ndims * [size]
+
+            # do not fill void type
+            if fill_kwarg and dtype.str.startswith('|V'):
+                continue
+
+            arr = func(shape, order=order, dtype=dtype,
+                       **fill_kwarg)
+
+            assert_equal(arr.dtype, dtype)
+            assert_(getattr(arr.flags, self.orders[order]))
+
+            if fill_value is not None:
+                if dtype.str.startswith('|S'):
+                    val = str(fill_value)
+                else:
+                    val = fill_value
+                assert_equal(arr, dtype.type(val))
+
+    def test_zeros(self):
+        self.check_function(np.zeros)
+
+    def test_ones(self):
+        self.check_function(np.ones)
+
+    def test_empty(self):
+        self.check_function(np.empty)
+
+    def test_full(self):
+        self.check_function(np.full, 0)
+        self.check_function(np.full, 1)
+
+    @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+    def test_for_reference_leak(self):
+        # Make sure we have an object for reference
+        dim = 1
+        beg = sys.getrefcount(dim)
+        np.zeros([dim]*10)
+        assert_(sys.getrefcount(dim) == beg)
+        np.ones([dim]*10)
+        assert_(sys.getrefcount(dim) == beg)
+        np.empty([dim]*10)
+        assert_(sys.getrefcount(dim) == beg)
+        np.full([dim]*10, 0)
+        assert_(sys.getrefcount(dim) == beg)
+
+
+class TestLikeFuncs:
+    '''Test ones_like, zeros_like, empty_like and full_like'''
+
+    def setup_method(self):
+        self.data = [
+                # Array scalars
+                (np.array(3.), None),
+                (np.array(3), 'f8'),
+                # 1D arrays
+                (np.arange(6, dtype='f4'), None),
+                (np.arange(6), 'c16'),
+                # 2D C-layout arrays
+                (np.arange(6).reshape(2, 3), None),
+                (np.arange(6).reshape(3, 2), 'i1'),
+                # 2D F-layout arrays
+                (np.arange(6).reshape((2, 3), order='F'), None),
+                (np.arange(6).reshape((3, 2), order='F'), 'i1'),
+                # 3D C-layout arrays
+                (np.arange(24).reshape(2, 3, 4), None),
+                (np.arange(24).reshape(4, 3, 2), 'f4'),
+                # 3D F-layout arrays
+                (np.arange(24).reshape((2, 3, 4), order='F'), None),
+                (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
+                # 3D non-C/F-layout arrays
+                (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
+                (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
+                     ]
+        self.shapes = [(), (5,), (5,6,), (5,6,7,)]
+
+    def compare_array_value(self, dz, value, fill_value):
+        if value is not None:
+            if fill_value:
+                # Conversion is close to what np.full_like uses
+                # but we  may want to convert directly in the future
+                # which may result in errors (where this does not).
+                z = np.array(value).astype(dz.dtype)
+                assert_(np.all(dz == z))
+            else:
+                assert_(np.all(dz == value))
+
+    def check_like_function(self, like_function, value, fill_value=False):
+        if fill_value:
+            fill_kwarg = {'fill_value': value}
+        else:
+            fill_kwarg = {}
+        for d, dtype in self.data:
+            # default (K) order, dtype
+            dz = like_function(d, dtype=dtype, **fill_kwarg)
+            assert_equal(dz.shape, d.shape)
+            assert_equal(np.array(dz.strides)*d.dtype.itemsize,
+                         np.array(d.strides)*dz.dtype.itemsize)
+            assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
+            assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
+            if dtype is None:
+                assert_equal(dz.dtype, d.dtype)
+            else:
+                assert_equal(dz.dtype, np.dtype(dtype))
+            self.compare_array_value(dz, value, fill_value)
+
+            # C order, default dtype
+            dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
+            assert_equal(dz.shape, d.shape)
+            assert_(dz.flags.c_contiguous)
+            if dtype is None:
+                assert_equal(dz.dtype, d.dtype)
+            else:
+                assert_equal(dz.dtype, np.dtype(dtype))
+            self.compare_array_value(dz, value, fill_value)
+
+            # F order, default dtype
+            dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
+            assert_equal(dz.shape, d.shape)
+            assert_(dz.flags.f_contiguous)
+            if dtype is None:
+                assert_equal(dz.dtype, d.dtype)
+            else:
+                assert_equal(dz.dtype, np.dtype(dtype))
+            self.compare_array_value(dz, value, fill_value)
+
+            # A order
+            dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
+            assert_equal(dz.shape, d.shape)
+            if d.flags.f_contiguous:
+                assert_(dz.flags.f_contiguous)
+            else:
+                assert_(dz.flags.c_contiguous)
+            if dtype is None:
+                assert_equal(dz.dtype, d.dtype)
+            else:
+                assert_equal(dz.dtype, np.dtype(dtype))
+            self.compare_array_value(dz, value, fill_value)
+
+            # Test the 'shape' parameter
+            for s in self.shapes:
+                for o in 'CFA':
+                    sz = like_function(d, dtype=dtype, shape=s, order=o,
+                                       **fill_kwarg)
+                    assert_equal(sz.shape, s)
+                    if dtype is None:
+                        assert_equal(sz.dtype, d.dtype)
+                    else:
+                        assert_equal(sz.dtype, np.dtype(dtype))
+                    if o == 'C' or (o == 'A' and d.flags.c_contiguous):
+                        assert_(sz.flags.c_contiguous)
+                    elif o == 'F' or (o == 'A' and d.flags.f_contiguous):
+                        assert_(sz.flags.f_contiguous)
+                    self.compare_array_value(sz, value, fill_value)
+
+                if (d.ndim != len(s)):
+                    assert_equal(np.argsort(like_function(d, dtype=dtype,
+                                                          shape=s, order='K',
+                                                          **fill_kwarg).strides),
+                                 np.argsort(np.empty(s, dtype=dtype,
+                                                     order='C').strides))
+                else:
+                    assert_equal(np.argsort(like_function(d, dtype=dtype,
+                                                          shape=s, order='K',
+                                                          **fill_kwarg).strides),
+                                 np.argsort(d.strides))
+
+        # Test the 'subok' parameter
+        class MyNDArray(np.ndarray):
+            pass
+
+        a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
+
+        b = like_function(a, **fill_kwarg)
+        assert_(type(b) is MyNDArray)
+
+        b = like_function(a, subok=False, **fill_kwarg)
+        assert_(type(b) is not MyNDArray)
+
+    def test_ones_like(self):
+        self.check_like_function(np.ones_like, 1)
+
+    def test_zeros_like(self):
+        self.check_like_function(np.zeros_like, 0)
+
+    def test_empty_like(self):
+        self.check_like_function(np.empty_like, None)
+
+    def test_filled_like(self):
+        self.check_like_function(np.full_like, 0, True)
+        self.check_like_function(np.full_like, 1, True)
+        self.check_like_function(np.full_like, 1000, True)
+        self.check_like_function(np.full_like, 123.456, True)
+        # Inf to integer casts cause invalid-value errors: ignore them.
+        with np.errstate(invalid="ignore"):
+            self.check_like_function(np.full_like, np.inf, True)
+
+    @pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like,
+                                          np.zeros_like, np.ones_like])
+    @pytest.mark.parametrize('dtype', [str, bytes])
+    def test_dtype_str_bytes(self, likefunc, dtype):
+        # Regression test for gh-19860
+        a = np.arange(16).reshape(2, 8)
+        b = a[:, ::2]  # Ensure b is not contiguous.
+        kwargs = {'fill_value': ''} if likefunc == np.full_like else {}
+        result = likefunc(b, dtype=dtype, **kwargs)
+        if dtype == str:
+            assert result.strides == (16, 4)
+        else:
+            # dtype is bytes
+            assert result.strides == (4, 1)
+
+
+class TestCorrelate:
+    def _setup(self, dt):
+        self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
+        self.xs = np.arange(1, 20)[::3]
+        self.y = np.array([-1, -2, -3], dtype=dt)
+        self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt)
+        self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
+        self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
+        self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
+        self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
+        self.zs = np.array([-3., -14., -30., -48., -66., -84.,
+                           -102., -54., -19.], dtype=dt)
+
+    def test_float(self):
+        self._setup(float)
+        z = np.correlate(self.x, self.y, 'full')
+        assert_array_almost_equal(z, self.z1)
+        z = np.correlate(self.x, self.y[:-1], 'full')
+        assert_array_almost_equal(z, self.z1_4)
+        z = np.correlate(self.y, self.x, 'full')
+        assert_array_almost_equal(z, self.z2)
+        z = np.correlate(self.x[::-1], self.y, 'full')
+        assert_array_almost_equal(z, self.z1r)
+        z = np.correlate(self.y, self.x[::-1], 'full')
+        assert_array_almost_equal(z, self.z2r)
+        z = np.correlate(self.xs, self.y, 'full')
+        assert_array_almost_equal(z, self.zs)
+
+    def test_object(self):
+        self._setup(Decimal)
+        z = np.correlate(self.x, self.y, 'full')
+        assert_array_almost_equal(z, self.z1)
+        z = np.correlate(self.y, self.x, 'full')
+        assert_array_almost_equal(z, self.z2)
+
+    def test_no_overwrite(self):
+        d = np.ones(100)
+        k = np.ones(3)
+        np.correlate(d, k)
+        assert_array_equal(d, np.ones(100))
+        assert_array_equal(k, np.ones(3))
+
+    def test_complex(self):
+        x = np.array([1, 2, 3, 4+1j], dtype=complex)
+        y = np.array([-1, -2j, 3+1j], dtype=complex)
+        r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
+        r_z = r_z[::-1].conjugate()
+        z = np.correlate(y, x, mode='full')
+        assert_array_almost_equal(z, r_z)
+
+    def test_zero_size(self):
+        with pytest.raises(ValueError):
+            np.correlate(np.array([]), np.ones(1000), mode='full')
+        with pytest.raises(ValueError):
+            np.correlate(np.ones(1000), np.array([]), mode='full')
+
+    def test_mode(self):
+        d = np.ones(100)
+        k = np.ones(3)
+        default_mode = np.correlate(d, k, mode='valid')
+        with assert_warns(DeprecationWarning):
+            valid_mode = np.correlate(d, k, mode='v')
+        assert_array_equal(valid_mode, default_mode)
+        # integer mode
+        with assert_raises(ValueError):
+            np.correlate(d, k, mode=-1)
+        assert_array_equal(np.correlate(d, k, mode=0), valid_mode)
+        # illegal arguments
+        with assert_raises(TypeError):
+            np.correlate(d, k, mode=None)
+
+
+class TestConvolve:
+    def test_object(self):
+        d = [1.] * 100
+        k = [1.] * 3
+        assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
+
+    def test_no_overwrite(self):
+        d = np.ones(100)
+        k = np.ones(3)
+        np.convolve(d, k)
+        assert_array_equal(d, np.ones(100))
+        assert_array_equal(k, np.ones(3))
+
+    def test_mode(self):
+        d = np.ones(100)
+        k = np.ones(3)
+        default_mode = np.convolve(d, k, mode='full')
+        with assert_warns(DeprecationWarning):
+            full_mode = np.convolve(d, k, mode='f')
+        assert_array_equal(full_mode, default_mode)
+        # integer mode
+        with assert_raises(ValueError):
+            np.convolve(d, k, mode=-1)
+        assert_array_equal(np.convolve(d, k, mode=2), full_mode)
+        # illegal arguments
+        with assert_raises(TypeError):
+            np.convolve(d, k, mode=None)
+
+
+class TestArgwhere:
+
+    @pytest.mark.parametrize('nd', [0, 1, 2])
+    def test_nd(self, nd):
+        # get an nd array with multiple elements in every dimension
+        x = np.empty((2,)*nd, bool)
+
+        # none
+        x[...] = False
+        assert_equal(np.argwhere(x).shape, (0, nd))
+
+        # only one
+        x[...] = False
+        x.flat[0] = True
+        assert_equal(np.argwhere(x).shape, (1, nd))
+
+        # all but one
+        x[...] = True
+        x.flat[0] = False
+        assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
+
+        # all
+        x[...] = True
+        assert_equal(np.argwhere(x).shape, (x.size, nd))
+
+    def test_2D(self):
+        x = np.arange(6).reshape((2, 3))
+        assert_array_equal(np.argwhere(x > 1),
+                           [[0, 2],
+                            [1, 0],
+                            [1, 1],
+                            [1, 2]])
+
+    def test_list(self):
+        assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
+
+
+class TestStringFunction:
+
+    def test_set_string_function(self):
+        a = np.array([1])
+        np.set_string_function(lambda x: "FOO", repr=True)
+        assert_equal(repr(a), "FOO")
+        np.set_string_function(None, repr=True)
+        assert_equal(repr(a), "array([1])")
+
+        np.set_string_function(lambda x: "FOO", repr=False)
+        assert_equal(str(a), "FOO")
+        np.set_string_function(None, repr=False)
+        assert_equal(str(a), "[1]")
+
+
+class TestRoll:
+    def test_roll1d(self):
+        x = np.arange(10)
+        xr = np.roll(x, 2)
+        assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
+
+    def test_roll2d(self):
+        x2 = np.reshape(np.arange(10), (2, 5))
+        x2r = np.roll(x2, 1)
+        assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
+
+        x2r = np.roll(x2, 1, axis=0)
+        assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+        x2r = np.roll(x2, 1, axis=1)
+        assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+        # Roll multiple axes at once.
+        x2r = np.roll(x2, 1, axis=(0, 1))
+        assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+        x2r = np.roll(x2, (1, 0), axis=(0, 1))
+        assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+        x2r = np.roll(x2, (-1, 0), axis=(0, 1))
+        assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+        x2r = np.roll(x2, (0, 1), axis=(0, 1))
+        assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+        x2r = np.roll(x2, (0, -1), axis=(0, 1))
+        assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
+
+        x2r = np.roll(x2, (1, 1), axis=(0, 1))
+        assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+        x2r = np.roll(x2, (-1, -1), axis=(0, 1))
+        assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
+
+        # Roll the same axis multiple times.
+        x2r = np.roll(x2, 1, axis=(0, 0))
+        assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
+
+        x2r = np.roll(x2, 1, axis=(1, 1))
+        assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
+
+        # Roll more than one turn in either direction.
+        x2r = np.roll(x2, 6, axis=1)
+        assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+        x2r = np.roll(x2, -4, axis=1)
+        assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+    def test_roll_empty(self):
+        x = np.array([])
+        assert_equal(np.roll(x, 1), np.array([]))
+
+
+class TestRollaxis:
+
+    # expected shape indexed by (axis, start) for array of
+    # shape (1, 2, 3, 4)
+    tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
+                (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
+                (0, 4): (2, 3, 4, 1),
+                (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
+                (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
+                (1, 4): (1, 3, 4, 2),
+                (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
+                (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
+                (2, 4): (1, 2, 4, 3),
+                (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
+                (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
+                (3, 4): (1, 2, 3, 4)}
+
+    def test_exceptions(self):
+        a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
+        assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
+        assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
+        assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
+        assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
+
+    def test_results(self):
+        a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
+        aind = np.indices(a.shape)
+        assert_(a.flags['OWNDATA'])
+        for (i, j) in self.tgtshape:
+            # positive axis, positive start
+            res = np.rollaxis(a, axis=i, start=j)
+            i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+            assert_(np.all(res[i0, i1, i2, i3] == a))
+            assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
+            assert_(not res.flags['OWNDATA'])
+
+            # negative axis, positive start
+            ip = i + 1
+            res = np.rollaxis(a, axis=-ip, start=j)
+            i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+            assert_(np.all(res[i0, i1, i2, i3] == a))
+            assert_(res.shape == self.tgtshape[(4 - ip, j)])
+            assert_(not res.flags['OWNDATA'])
+
+            # positive axis, negative start
+            jp = j + 1 if j < 4 else j
+            res = np.rollaxis(a, axis=i, start=-jp)
+            i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+            assert_(np.all(res[i0, i1, i2, i3] == a))
+            assert_(res.shape == self.tgtshape[(i, 4 - jp)])
+            assert_(not res.flags['OWNDATA'])
+
+            # negative axis, negative start
+            ip = i + 1
+            jp = j + 1 if j < 4 else j
+            res = np.rollaxis(a, axis=-ip, start=-jp)
+            i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+            assert_(np.all(res[i0, i1, i2, i3] == a))
+            assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
+            assert_(not res.flags['OWNDATA'])
+
+
+class TestMoveaxis:
+    def test_move_to_end(self):
+        x = np.random.randn(5, 6, 7)
+        for source, expected in [(0, (6, 7, 5)),
+                                 (1, (5, 7, 6)),
+                                 (2, (5, 6, 7)),
+                                 (-1, (5, 6, 7))]:
+            actual = np.moveaxis(x, source, -1).shape
+            assert_(actual, expected)
+
+    def test_move_new_position(self):
+        x = np.random.randn(1, 2, 3, 4)
+        for source, destination, expected in [
+                (0, 1, (2, 1, 3, 4)),
+                (1, 2, (1, 3, 2, 4)),
+                (1, -1, (1, 3, 4, 2)),
+                ]:
+            actual = np.moveaxis(x, source, destination).shape
+            assert_(actual, expected)
+
+    def test_preserve_order(self):
+        x = np.zeros((1, 2, 3, 4))
+        for source, destination in [
+                (0, 0),
+                (3, -1),
+                (-1, 3),
+                ([0, -1], [0, -1]),
+                ([2, 0], [2, 0]),
+                (range(4), range(4)),
+                ]:
+            actual = np.moveaxis(x, source, destination).shape
+            assert_(actual, (1, 2, 3, 4))
+
+    def test_move_multiples(self):
+        x = np.zeros((0, 1, 2, 3))
+        for source, destination, expected in [
+                ([0, 1], [2, 3], (2, 3, 0, 1)),
+                ([2, 3], [0, 1], (2, 3, 0, 1)),
+                ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
+                ([3, 0], [1, 0], (0, 3, 1, 2)),
+                ([0, 3], [0, 1], (0, 3, 1, 2)),
+                ]:
+            actual = np.moveaxis(x, source, destination).shape
+            assert_(actual, expected)
+
+    def test_errors(self):
+        x = np.random.randn(1, 2, 3)
+        assert_raises_regex(np.AxisError, 'source.*out of bounds',
+                            np.moveaxis, x, 3, 0)
+        assert_raises_regex(np.AxisError, 'source.*out of bounds',
+                            np.moveaxis, x, -4, 0)
+        assert_raises_regex(np.AxisError, 'destination.*out of bounds',
+                            np.moveaxis, x, 0, 5)
+        assert_raises_regex(ValueError, 'repeated axis in `source`',
+                            np.moveaxis, x, [0, 0], [0, 1])
+        assert_raises_regex(ValueError, 'repeated axis in `destination`',
+                            np.moveaxis, x, [0, 1], [1, 1])
+        assert_raises_regex(ValueError, 'must have the same number',
+                            np.moveaxis, x, 0, [0, 1])
+        assert_raises_regex(ValueError, 'must have the same number',
+                            np.moveaxis, x, [0, 1], [0])
+
+    def test_array_likes(self):
+        x = np.ma.zeros((1, 2, 3))
+        result = np.moveaxis(x, 0, 0)
+        assert_(x.shape, result.shape)
+        assert_(isinstance(result, np.ma.MaskedArray))
+
+        x = [1, 2, 3]
+        result = np.moveaxis(x, 0, 0)
+        assert_(x, list(result))
+        assert_(isinstance(result, np.ndarray))
+
+
+class TestCross:
+    def test_2x2(self):
+        u = [1, 2]
+        v = [3, 4]
+        z = -2
+        cp = np.cross(u, v)
+        assert_equal(cp, z)
+        cp = np.cross(v, u)
+        assert_equal(cp, -z)
+
+    def test_2x3(self):
+        u = [1, 2]
+        v = [3, 4, 5]
+        z = np.array([10, -5, -2])
+        cp = np.cross(u, v)
+        assert_equal(cp, z)
+        cp = np.cross(v, u)
+        assert_equal(cp, -z)
+
+    def test_3x3(self):
+        u = [1, 2, 3]
+        v = [4, 5, 6]
+        z = np.array([-3, 6, -3])
+        cp = np.cross(u, v)
+        assert_equal(cp, z)
+        cp = np.cross(v, u)
+        assert_equal(cp, -z)
+
+    def test_broadcasting(self):
+        # Ticket #2624 (Trac #2032)
+        u = np.tile([1, 2], (11, 1))
+        v = np.tile([3, 4], (11, 1))
+        z = -2
+        assert_equal(np.cross(u, v), z)
+        assert_equal(np.cross(v, u), -z)
+        assert_equal(np.cross(u, u), 0)
+
+        u = np.tile([1, 2], (11, 1)).T
+        v = np.tile([3, 4, 5], (11, 1))
+        z = np.tile([10, -5, -2], (11, 1))
+        assert_equal(np.cross(u, v, axisa=0), z)
+        assert_equal(np.cross(v, u.T), -z)
+        assert_equal(np.cross(v, v), 0)
+
+        u = np.tile([1, 2, 3], (11, 1)).T
+        v = np.tile([3, 4], (11, 1)).T
+        z = np.tile([-12, 9, -2], (11, 1))
+        assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
+        assert_equal(np.cross(v.T, u.T), -z)
+        assert_equal(np.cross(u.T, u.T), 0)
+
+        u = np.tile([1, 2, 3], (5, 1))
+        v = np.tile([4, 5, 6], (5, 1)).T
+        z = np.tile([-3, 6, -3], (5, 1))
+        assert_equal(np.cross(u, v, axisb=0), z)
+        assert_equal(np.cross(v.T, u), -z)
+        assert_equal(np.cross(u, u), 0)
+
+    def test_broadcasting_shapes(self):
+        u = np.ones((2, 1, 3))
+        v = np.ones((5, 3))
+        assert_equal(np.cross(u, v).shape, (2, 5, 3))
+        u = np.ones((10, 3, 5))
+        v = np.ones((2, 5))
+        assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
+        assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
+        assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
+        u = np.ones((10, 3, 5, 7))
+        v = np.ones((5, 7, 2))
+        assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
+        assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
+        assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
+        # gh-5885
+        u = np.ones((3, 4, 2))
+        for axisc in range(-2, 2):
+            assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
+
+    def test_uint8_int32_mixed_dtypes(self):
+        # regression test for gh-19138
+        u = np.array([[195, 8, 9]], np.uint8)
+        v = np.array([250, 166, 68], np.int32)
+        z = np.array([[950, 11010, -30370]], dtype=np.int32)
+        assert_equal(np.cross(v, u), z)
+        assert_equal(np.cross(u, v), -z)
+
+
+def test_outer_out_param():
+    arr1 = np.ones((5,))
+    arr2 = np.ones((2,))
+    arr3 = np.linspace(-2, 2, 5)
+    out1 = np.ndarray(shape=(5,5))
+    out2 = np.ndarray(shape=(2, 5))
+    res1 = np.outer(arr1, arr3, out1)
+    assert_equal(res1, out1)
+    assert_equal(np.outer(arr2, arr3, out2), out2)
+
+
+class TestIndices:
+
+    def test_simple(self):
+        [x, y] = np.indices((4, 3))
+        assert_array_equal(x, np.array([[0, 0, 0],
+                                        [1, 1, 1],
+                                        [2, 2, 2],
+                                        [3, 3, 3]]))
+        assert_array_equal(y, np.array([[0, 1, 2],
+                                        [0, 1, 2],
+                                        [0, 1, 2],
+                                        [0, 1, 2]]))
+
+    def test_single_input(self):
+        [x] = np.indices((4,))
+        assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+        [x] = np.indices((4,), sparse=True)
+        assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+    def test_scalar_input(self):
+        assert_array_equal([], np.indices(()))
+        assert_array_equal([], np.indices((), sparse=True))
+        assert_array_equal([[]], np.indices((0,)))
+        assert_array_equal([[]], np.indices((0,), sparse=True))
+
+    def test_sparse(self):
+        [x, y] = np.indices((4,3), sparse=True)
+        assert_array_equal(x, np.array([[0], [1], [2], [3]]))
+        assert_array_equal(y, np.array([[0, 1, 2]]))
+
+    @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
+    @pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
+    def test_return_type(self, dtype, dims):
+        inds = np.indices(dims, dtype=dtype)
+        assert_(inds.dtype == dtype)
+
+        for arr in np.indices(dims, dtype=dtype, sparse=True):
+            assert_(arr.dtype == dtype)
+
+
+class TestRequire:
+    flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
+                  'F', 'F_CONTIGUOUS', 'FORTRAN',
+                  'A', 'ALIGNED',
+                  'W', 'WRITEABLE',
+                  'O', 'OWNDATA']
+
+    def generate_all_false(self, dtype):
+        arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
+        arr.setflags(write=False)
+        a = arr['a']
+        assert_(not a.flags['C'])
+        assert_(not a.flags['F'])
+        assert_(not a.flags['O'])
+        assert_(not a.flags['W'])
+        assert_(not a.flags['A'])
+        return a
+
+    def set_and_check_flag(self, flag, dtype, arr):
+        if dtype is None:
+            dtype = arr.dtype
+        b = np.require(arr, dtype, [flag])
+        assert_(b.flags[flag])
+        assert_(b.dtype == dtype)
+
+        # a further call to np.require ought to return the same array
+        # unless OWNDATA is specified.
+        c = np.require(b, None, [flag])
+        if flag[0] != 'O':
+            assert_(c is b)
+        else:
+            assert_(c.flags[flag])
+
+    def test_require_each(self):
+
+        id = ['f8', 'i4']
+        fd = [None, 'f8', 'c16']
+        for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
+            a = self.generate_all_false(idtype)
+            self.set_and_check_flag(flag, fdtype,  a)
+
+    def test_unknown_requirement(self):
+        a = self.generate_all_false('f8')
+        assert_raises(KeyError, np.require, a, None, 'Q')
+
+    def test_non_array_input(self):
+        a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
+        assert_(a.flags['O'])
+        assert_(a.flags['C'])
+        assert_(a.flags['A'])
+        assert_(a.dtype == 'i4')
+        assert_equal(a, [1, 2, 3, 4])
+
+    def test_C_and_F_simul(self):
+        a = self.generate_all_false('f8')
+        assert_raises(ValueError, np.require, a, None, ['C', 'F'])
+
+    def test_ensure_array(self):
+        class ArraySubclass(np.ndarray):
+            pass
+
+        a = ArraySubclass((2, 2))
+        b = np.require(a, None, ['E'])
+        assert_(type(b) is np.ndarray)
+
+    def test_preserve_subtype(self):
+        class ArraySubclass(np.ndarray):
+            pass
+
+        for flag in self.flag_names:
+            a = ArraySubclass((2, 2))
+            self.set_and_check_flag(flag, None, a)
+
+
+class TestBroadcast:
+    def test_broadcast_in_args(self):
+        # gh-5881
+        arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
+                np.empty((5, 1, 7))]
+        mits = [np.broadcast(*arrs),
+                np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
+                np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
+                np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
+                np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
+        for mit in mits:
+            assert_equal(mit.shape, (5, 6, 7))
+            assert_equal(mit.ndim, 3)
+            assert_equal(mit.nd, 3)
+            assert_equal(mit.numiter, 4)
+            for a, ia in zip(arrs, mit.iters):
+                assert_(a is ia.base)
+
+    def test_broadcast_single_arg(self):
+        # gh-6899
+        arrs = [np.empty((5, 6, 7))]
+        mit = np.broadcast(*arrs)
+        assert_equal(mit.shape, (5, 6, 7))
+        assert_equal(mit.ndim, 3)
+        assert_equal(mit.nd, 3)
+        assert_equal(mit.numiter, 1)
+        assert_(arrs[0] is mit.iters[0].base)
+
+    def test_number_of_arguments(self):
+        arr = np.empty((5,))
+        for j in range(35):
+            arrs = [arr] * j
+            if j > 32:
+                assert_raises(ValueError, np.broadcast, *arrs)
+            else:
+                mit = np.broadcast(*arrs)
+                assert_equal(mit.numiter, j)
+
+    def test_broadcast_error_kwargs(self):
+        #gh-13455
+        arrs = [np.empty((5, 6, 7))]
+        mit  = np.broadcast(*arrs)
+        mit2 = np.broadcast(*arrs, **{})
+        assert_equal(mit.shape, mit2.shape)
+        assert_equal(mit.ndim, mit2.ndim)
+        assert_equal(mit.nd, mit2.nd)
+        assert_equal(mit.numiter, mit2.numiter)
+        assert_(mit.iters[0].base is mit2.iters[0].base)
+
+        assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
+
+    def test_shape_mismatch_error_message(self):
+        with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and "
+                                             r"arg 2 with shape \(2,\)"):
+            np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
+
+
+class TestKeepdims:
+
+    class sub_array(np.ndarray):
+        def sum(self, axis=None, dtype=None, out=None):
+            return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
+
+    def test_raise(self):
+        sub_class = self.sub_array
+        x = np.arange(30).view(sub_class)
+        assert_raises(TypeError, np.sum, x, keepdims=True)
+
+
+class TestTensordot:
+
+    def test_zero_dimension(self):
+        # Test resolution to issue #5663
+        a = np.ndarray((3,0))
+        b = np.ndarray((0,4))
+        td = np.tensordot(a, b, (1, 0))
+        assert_array_equal(td, np.dot(a, b))
+        assert_array_equal(td, np.einsum('ij,jk', a, b))
+
+    def test_zero_dimensional(self):
+        # gh-12130
+        arr_0d = np.array(1)
+        ret = np.tensordot(arr_0d, arr_0d, ([], []))  # contracting no axes is well defined
+        assert_array_equal(ret, arr_0d)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.py
new file mode 100644
index 00000000..bab5bf24
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.py
@@ -0,0 +1,570 @@
+import sys
+import itertools
+
+import pytest
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY
+
+# This is the structure of the table used for plain objects:
+#
+# +-+-+-+
+# |x|y|z|
+# +-+-+-+
+
+# Structure of a plain array description:
+Pdescr = [
+    ('x', 'i4', (2,)),
+    ('y', 'f8', (2, 2)),
+    ('z', 'u1')]
+
+# A plain list of tuples with values for testing:
+PbufferT = [
+    # x     y                  z
+    ([3, 2], [[6., 4.], [6., 4.]], 8),
+    ([4, 3], [[7., 5.], [7., 5.]], 9),
+    ]
+
+
+# This is the structure of the table used for nested objects (DON'T PANIC!):
+#
+# +-+---------------------------------+-----+----------+-+-+
+# |x|Info                             |color|info      |y|z|
+# | +-----+--+----------------+----+--+     +----+-----+ | |
+# | |value|y2|Info2           |name|z2|     |Name|Value| | |
+# | |     |  +----+-----+--+--+    |  |     |    |     | | |
+# | |     |  |name|value|y3|z3|    |  |     |    |     | | |
+# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
+#
+
+# The corresponding nested array description:
+Ndescr = [
+    ('x', 'i4', (2,)),
+    ('Info', [
+        ('value', 'c16'),
+        ('y2', 'f8'),
+        ('Info2', [
+            ('name', 'S2'),
+            ('value', 'c16', (2,)),
+            ('y3', 'f8', (2,)),
+            ('z3', 'u4', (2,))]),
+        ('name', 'S2'),
+        ('z2', 'b1')]),
+    ('color', 'S2'),
+    ('info', [
+        ('Name', 'U8'),
+        ('Value', 'c16')]),
+    ('y', 'f8', (2, 2)),
+    ('z', 'u1')]
+
+NbufferT = [
+    # x     Info                                                color info        y                  z
+    #       value y2 Info2                            name z2         Name Value
+    #                name   value    y3       z3
+    ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True),
+     b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
+    ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False),
+     b'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
+    ]
+
+
+byteorder = {'little':'<', 'big':'>'}[sys.byteorder]
+
+def normalize_descr(descr):
+    "Normalize a description adding the platform byteorder."
+
+    out = []
+    for item in descr:
+        dtype = item[1]
+        if isinstance(dtype, str):
+            if dtype[0] not in ['|', '<', '>']:
+                onebyte = dtype[1:] == "1"
+                if onebyte or dtype[0] in ['S', 'V', 'b']:
+                    dtype = "|" + dtype
+                else:
+                    dtype = byteorder + dtype
+            if len(item) > 2 and np.prod(item[2]) > 1:
+                nitem = (item[0], dtype, item[2])
+            else:
+                nitem = (item[0], dtype)
+            out.append(nitem)
+        elif isinstance(dtype, list):
+            l = normalize_descr(dtype)
+            out.append((item[0], l))
+        else:
+            raise ValueError("Expected a str or list and got %s" %
+                             (type(item)))
+    return out
+
+
+############################################################
+#    Creation tests
+############################################################
+
+class CreateZeros:
+    """Check the creation of heterogeneous arrays zero-valued"""
+
+    def test_zeros0D(self):
+        """Check creation of 0-dimensional objects"""
+        h = np.zeros((), dtype=self._descr)
+        assert_(normalize_descr(self._descr) == h.dtype.descr)
+        assert_(h.dtype.fields['x'][0].name[:4] == 'void')
+        assert_(h.dtype.fields['x'][0].char == 'V')
+        assert_(h.dtype.fields['x'][0].type == np.void)
+        # A small check that data is ok
+        assert_equal(h['z'], np.zeros((), dtype='u1'))
+
+    def test_zerosSD(self):
+        """Check creation of single-dimensional objects"""
+        h = np.zeros((2,), dtype=self._descr)
+        assert_(normalize_descr(self._descr) == h.dtype.descr)
+        assert_(h.dtype['y'].name[:4] == 'void')
+        assert_(h.dtype['y'].char == 'V')
+        assert_(h.dtype['y'].type == np.void)
+        # A small check that data is ok
+        assert_equal(h['z'], np.zeros((2,), dtype='u1'))
+
+    def test_zerosMD(self):
+        """Check creation of multi-dimensional objects"""
+        h = np.zeros((2, 3), dtype=self._descr)
+        assert_(normalize_descr(self._descr) == h.dtype.descr)
+        assert_(h.dtype['z'].name == 'uint8')
+        assert_(h.dtype['z'].char == 'B')
+        assert_(h.dtype['z'].type == np.uint8)
+        # A small check that data is ok
+        assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
+
+
+class TestCreateZerosPlain(CreateZeros):
+    """Check the creation of heterogeneous arrays zero-valued (plain)"""
+    _descr = Pdescr
+
+class TestCreateZerosNested(CreateZeros):
+    """Check the creation of heterogeneous arrays zero-valued (nested)"""
+    _descr = Ndescr
+
+
+class CreateValues:
+    """Check the creation of heterogeneous arrays with values"""
+
+    def test_tuple(self):
+        """Check creation from tuples"""
+        h = np.array(self._buffer, dtype=self._descr)
+        assert_(normalize_descr(self._descr) == h.dtype.descr)
+        if self.multiple_rows:
+            assert_(h.shape == (2,))
+        else:
+            assert_(h.shape == ())
+
+    def test_list_of_tuple(self):
+        """Check creation from list of tuples"""
+        h = np.array([self._buffer], dtype=self._descr)
+        assert_(normalize_descr(self._descr) == h.dtype.descr)
+        if self.multiple_rows:
+            assert_(h.shape == (1, 2))
+        else:
+            assert_(h.shape == (1,))
+
+    def test_list_of_list_of_tuple(self):
+        """Check creation from list of list of tuples"""
+        h = np.array([[self._buffer]], dtype=self._descr)
+        assert_(normalize_descr(self._descr) == h.dtype.descr)
+        if self.multiple_rows:
+            assert_(h.shape == (1, 1, 2))
+        else:
+            assert_(h.shape == (1, 1))
+
+
+class TestCreateValuesPlainSingle(CreateValues):
+    """Check the creation of heterogeneous arrays (plain, single row)"""
+    _descr = Pdescr
+    multiple_rows = 0
+    _buffer = PbufferT[0]
+
+class TestCreateValuesPlainMultiple(CreateValues):
+    """Check the creation of heterogeneous arrays (plain, multiple rows)"""
+    _descr = Pdescr
+    multiple_rows = 1
+    _buffer = PbufferT
+
+class TestCreateValuesNestedSingle(CreateValues):
+    """Check the creation of heterogeneous arrays (nested, single row)"""
+    _descr = Ndescr
+    multiple_rows = 0
+    _buffer = NbufferT[0]
+
+class TestCreateValuesNestedMultiple(CreateValues):
+    """Check the creation of heterogeneous arrays (nested, multiple rows)"""
+    _descr = Ndescr
+    multiple_rows = 1
+    _buffer = NbufferT
+
+
+############################################################
+#    Reading tests
+############################################################
+
+class ReadValuesPlain:
+    """Check the reading of values in heterogeneous arrays (plain)"""
+
+    def test_access_fields(self):
+        h = np.array(self._buffer, dtype=self._descr)
+        if not self.multiple_rows:
+            assert_(h.shape == ())
+            assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
+            assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
+            assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
+        else:
+            assert_(len(h) == 2)
+            assert_equal(h['x'], np.array([self._buffer[0][0],
+                                             self._buffer[1][0]], dtype='i4'))
+            assert_equal(h['y'], np.array([self._buffer[0][1],
+                                             self._buffer[1][1]], dtype='f8'))
+            assert_equal(h['z'], np.array([self._buffer[0][2],
+                                             self._buffer[1][2]], dtype='u1'))
+
+
+class TestReadValuesPlainSingle(ReadValuesPlain):
+    """Check the creation of heterogeneous arrays (plain, single row)"""
+    _descr = Pdescr
+    multiple_rows = 0
+    _buffer = PbufferT[0]
+
+class TestReadValuesPlainMultiple(ReadValuesPlain):
+    """Check the values of heterogeneous arrays (plain, multiple rows)"""
+    _descr = Pdescr
+    multiple_rows = 1
+    _buffer = PbufferT
+
+class ReadValuesNested:
+    """Check the reading of values in heterogeneous arrays (nested)"""
+
+    def test_access_top_fields(self):
+        """Check reading the top fields of a nested array"""
+        h = np.array(self._buffer, dtype=self._descr)
+        if not self.multiple_rows:
+            assert_(h.shape == ())
+            assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
+            assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
+            assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
+        else:
+            assert_(len(h) == 2)
+            assert_equal(h['x'], np.array([self._buffer[0][0],
+                                           self._buffer[1][0]], dtype='i4'))
+            assert_equal(h['y'], np.array([self._buffer[0][4],
+                                           self._buffer[1][4]], dtype='f8'))
+            assert_equal(h['z'], np.array([self._buffer[0][5],
+                                           self._buffer[1][5]], dtype='u1'))
+
+    def test_nested1_acessors(self):
+        """Check reading the nested fields of a nested array (1st level)"""
+        h = np.array(self._buffer, dtype=self._descr)
+        if not self.multiple_rows:
+            assert_equal(h['Info']['value'],
+                         np.array(self._buffer[1][0], dtype='c16'))
+            assert_equal(h['Info']['y2'],
+                         np.array(self._buffer[1][1], dtype='f8'))
+            assert_equal(h['info']['Name'],
+                         np.array(self._buffer[3][0], dtype='U2'))
+            assert_equal(h['info']['Value'],
+                         np.array(self._buffer[3][1], dtype='c16'))
+        else:
+            assert_equal(h['Info']['value'],
+                         np.array([self._buffer[0][1][0],
+                                self._buffer[1][1][0]],
+                                dtype='c16'))
+            assert_equal(h['Info']['y2'],
+                         np.array([self._buffer[0][1][1],
+                                self._buffer[1][1][1]],
+                                dtype='f8'))
+            assert_equal(h['info']['Name'],
+                         np.array([self._buffer[0][3][0],
+                                self._buffer[1][3][0]],
+                               dtype='U2'))
+            assert_equal(h['info']['Value'],
+                         np.array([self._buffer[0][3][1],
+                                self._buffer[1][3][1]],
+                               dtype='c16'))
+
+    def test_nested2_acessors(self):
+        """Check reading the nested fields of a nested array (2nd level)"""
+        h = np.array(self._buffer, dtype=self._descr)
+        if not self.multiple_rows:
+            assert_equal(h['Info']['Info2']['value'],
+                         np.array(self._buffer[1][2][1], dtype='c16'))
+            assert_equal(h['Info']['Info2']['z3'],
+                         np.array(self._buffer[1][2][3], dtype='u4'))
+        else:
+            assert_equal(h['Info']['Info2']['value'],
+                         np.array([self._buffer[0][1][2][1],
+                                self._buffer[1][1][2][1]],
+                               dtype='c16'))
+            assert_equal(h['Info']['Info2']['z3'],
+                         np.array([self._buffer[0][1][2][3],
+                                self._buffer[1][1][2][3]],
+                               dtype='u4'))
+
+    def test_nested1_descriptor(self):
+        """Check access nested descriptors of a nested array (1st level)"""
+        h = np.array(self._buffer, dtype=self._descr)
+        assert_(h.dtype['Info']['value'].name == 'complex128')
+        assert_(h.dtype['Info']['y2'].name == 'float64')
+        assert_(h.dtype['info']['Name'].name == 'str256')
+        assert_(h.dtype['info']['Value'].name == 'complex128')
+
+    def test_nested2_descriptor(self):
+        """Check access nested descriptors of a nested array (2nd level)"""
+        h = np.array(self._buffer, dtype=self._descr)
+        assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
+        assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
+
+
+class TestReadValuesNestedSingle(ReadValuesNested):
+    """Check the values of heterogeneous arrays (nested, single row)"""
+    _descr = Ndescr
+    multiple_rows = False
+    _buffer = NbufferT[0]
+
+class TestReadValuesNestedMultiple(ReadValuesNested):
+    """Check the values of heterogeneous arrays (nested, multiple rows)"""
+    _descr = Ndescr
+    multiple_rows = True
+    _buffer = NbufferT
+
+class TestEmptyField:
+    def test_assign(self):
+        a = np.arange(10, dtype=np.float32)
+        a.dtype = [("int",   "<0i4"), ("float", "<2f4")]
+        assert_(a['int'].shape == (5, 0))
+        assert_(a['float'].shape == (5, 2))
+
+class TestCommonType:
+    def test_scalar_loses1(self):
+        with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+            res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
+        assert_(res == 'f4')
+
+    def test_scalar_loses2(self):
+        with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+            res = np.find_common_type(['f4', 'f4'], ['i8'])
+        assert_(res == 'f4')
+
+    def test_scalar_wins(self):
+        with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+            res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
+        assert_(res == 'c8')
+
+    def test_scalar_wins2(self):
+        with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+            res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
+        assert_(res == 'f8')
+
+    def test_scalar_wins3(self):  # doesn't go up to 'f16' on purpose
+        with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+            res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
+        assert_(res == 'f8')
+
+class TestMultipleFields:
+    def setup_method(self):
+        self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
+
+    def _bad_call(self):
+        return self.ary['f0', 'f1']
+
+    def test_no_tuple(self):
+        assert_raises(IndexError, self._bad_call)
+
+    def test_return(self):
+        res = self.ary[['f0', 'f2']].tolist()
+        assert_(res == [(1, 3), (5, 7)])
+
+
+class TestIsSubDType:
+    # scalar types can be promoted into dtypes
+    wrappers = [np.dtype, lambda x: x]
+
+    def test_both_abstract(self):
+        assert_(np.issubdtype(np.floating, np.inexact))
+        assert_(not np.issubdtype(np.inexact, np.floating))
+
+    def test_same(self):
+        for cls in (np.float32, np.int32):
+            for w1, w2 in itertools.product(self.wrappers, repeat=2):
+                assert_(np.issubdtype(w1(cls), w2(cls)))
+
+    def test_subclass(self):
+        # note we cannot promote floating to a dtype, as it would turn into a
+        # concrete type
+        for w in self.wrappers:
+            assert_(np.issubdtype(w(np.float32), np.floating))
+            assert_(np.issubdtype(w(np.float64), np.floating))
+
+    def test_subclass_backwards(self):
+        for w in self.wrappers:
+            assert_(not np.issubdtype(np.floating, w(np.float32)))
+            assert_(not np.issubdtype(np.floating, w(np.float64)))
+
+    def test_sibling_class(self):
+        for w1, w2 in itertools.product(self.wrappers, repeat=2):
+            assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
+            assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
+
+    def test_nondtype_nonscalartype(self):
+        # See gh-14619 and gh-9505 which introduced the deprecation to fix
+        # this. These tests are directly taken from gh-9505
+        assert not np.issubdtype(np.float32, 'float64')
+        assert not np.issubdtype(np.float32, 'f8')
+        assert not np.issubdtype(np.int32, str)
+        assert not np.issubdtype(np.int32, 'int64')
+        assert not np.issubdtype(np.str_, 'void')
+        # for the following the correct spellings are
+        # np.integer, np.floating, or np.complexfloating respectively:
+        assert not np.issubdtype(np.int8, int)  # np.int8 is never np.int_
+        assert not np.issubdtype(np.float32, float)
+        assert not np.issubdtype(np.complex64, complex)
+        assert not np.issubdtype(np.float32, "float")
+        assert not np.issubdtype(np.float64, "f")
+
+        # Test the same for the correct first datatype and abstract one
+        # in the case of int, float, complex:
+        assert np.issubdtype(np.float64, 'float64')
+        assert np.issubdtype(np.float64, 'f8')
+        assert np.issubdtype(np.str_, str)
+        assert np.issubdtype(np.int64, 'int64')
+        assert np.issubdtype(np.void, 'void')
+        assert np.issubdtype(np.int8, np.integer)
+        assert np.issubdtype(np.float32, np.floating)
+        assert np.issubdtype(np.complex64, np.complexfloating)
+        assert np.issubdtype(np.float64, "float")
+        assert np.issubdtype(np.float32, "f")
+
+
+class TestSctypeDict:
+    def test_longdouble(self):
+        assert_(np.sctypeDict['f8'] is not np.longdouble)
+        assert_(np.sctypeDict['c16'] is not np.clongdouble)
+
+    def test_ulong(self):
+        # Test that 'ulong' behaves like 'long'. np.sctypeDict['long'] is an
+        # alias for np.int_, but np.long is not supported for historical
+        # reasons (gh-21063)
+        assert_(np.sctypeDict['ulong'] is np.uint)
+        with pytest.warns(FutureWarning):
+            # We will probably allow this in the future:
+            assert not hasattr(np, 'ulong')
+
+class TestBitName:
+    def test_abstract(self):
+        assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
+
+
+class TestMaximumSctype:
+
+    # note that parametrizing with sctype['int'] and similar would skip types
+    # with the same size (gh-11923)
+
+    @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong])
+    def test_int(self, t):
+        assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1])
+
+    @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong])
+    def test_uint(self, t):
+        assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1])
+
+    @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
+    def test_float(self, t):
+        assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1])
+
+    @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
+    def test_complex(self, t):
+        assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1])
+
+    @pytest.mark.parametrize('t', [np.bool_, np.object_, np.str_, np.bytes_,
+                                   np.void])
+    def test_other(self, t):
+        assert_equal(np.maximum_sctype(t), t)
+
+
+class Test_sctype2char:
+    # This function is old enough that we're really just documenting the quirks
+    # at this point.
+
+    def test_scalar_type(self):
+        assert_equal(np.sctype2char(np.double), 'd')
+        assert_equal(np.sctype2char(np.int_), 'l')
+        assert_equal(np.sctype2char(np.str_), 'U')
+        assert_equal(np.sctype2char(np.bytes_), 'S')
+
+    def test_other_type(self):
+        assert_equal(np.sctype2char(float), 'd')
+        assert_equal(np.sctype2char(list), 'O')
+        assert_equal(np.sctype2char(np.ndarray), 'O')
+
+    def test_third_party_scalar_type(self):
+        from numpy.core._rational_tests import rational
+        assert_raises(KeyError, np.sctype2char, rational)
+        assert_raises(KeyError, np.sctype2char, rational(1))
+
+    def test_array_instance(self):
+        assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
+
+    def test_abstract_type(self):
+        assert_raises(KeyError, np.sctype2char, np.floating)
+
+    def test_non_type(self):
+        assert_raises(ValueError, np.sctype2char, 1)
+
+@pytest.mark.parametrize("rep, expected", [
+    (np.int32, True),
+    (list, False),
+    (1.1, False),
+    (str, True),
+    (np.dtype(np.float64), True),
+    (np.dtype((np.int16, (3, 4))), True),
+    (np.dtype([('a', np.int8)]), True),
+    ])
+def test_issctype(rep, expected):
+    # ensure proper identification of scalar
+    # data-types by issctype()
+    actual = np.issctype(rep)
+    assert_equal(actual, expected)
+
+
+@pytest.mark.skipif(sys.flags.optimize > 1,
+                    reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+@pytest.mark.xfail(IS_PYPY,
+                   reason="PyPy cannot modify tp_doc after PyType_Ready")
+class TestDocStrings:
+    def test_platform_dependent_aliases(self):
+        if np.int64 is np.int_:
+            assert_('int64' in np.int_.__doc__)
+        elif np.int64 is np.longlong:
+            assert_('int64' in np.longlong.__doc__)
+
+
+class TestScalarTypeNames:
+    # gh-9799
+
+    numeric_types = [
+        np.byte, np.short, np.intc, np.int_, np.longlong,
+        np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong,
+        np.half, np.single, np.double, np.longdouble,
+        np.csingle, np.cdouble, np.clongdouble,
+    ]
+
+    def test_names_are_unique(self):
+        # none of the above may be aliases for each other
+        assert len(set(self.numeric_types)) == len(self.numeric_types)
+
+        # names must be unique
+        names = [t.__name__ for t in self.numeric_types]
+        assert len(set(names)) == len(names)
+
+    @pytest.mark.parametrize('t', numeric_types)
+    def test_names_reflect_attributes(self, t):
+        """ Test that names correspond to where the type is under ``np.`` """
+        assert getattr(np, t.__name__) is t
+
+    @pytest.mark.parametrize('t', numeric_types)
+    def test_names_are_undersood_by_dtype(self, t):
+        """ Test the dtype constructor maps names back to the type """
+        assert np.dtype(t.__name__).type is t
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numpy_2_0_compat.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numpy_2_0_compat.py
new file mode 100644
index 00000000..5224261f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_numpy_2_0_compat.py
@@ -0,0 +1,48 @@
+from os import path
+import pickle
+
+import numpy as np
+
+
+class TestNumPy2Compatibility:
+
+    data_dir = path.join(path.dirname(__file__), "data")
+    filename = path.join(data_dir, "numpy_2_0_array.pkl")
+
+    def test_importable__core_stubs(self):
+        """
+        Checks if stubs for `numpy._core` are importable.
+        """
+        from numpy._core.multiarray import _reconstruct
+        from numpy._core.umath import cos
+        from numpy._core._multiarray_umath import exp
+        from numpy._core._internal import ndarray
+        from numpy._core._dtype import _construction_repr
+        from numpy._core._dtype_ctypes import dtype_from_ctypes_type
+
+    def test_unpickle_numpy_2_0_file(self):
+        """
+        Checks that NumPy 1.26 and pickle is able to load pickles
+        created with NumPy 2.0 without errors/warnings.
+        """
+        with open(self.filename, mode="rb") as file:
+            content = file.read()
+
+        # Let's make sure that the pickle object we're loading
+        # was built with NumPy 2.0.
+        assert b"numpy._core.multiarray" in content
+
+        arr = pickle.loads(content, encoding="latin1")
+
+        assert isinstance(arr, np.ndarray)
+        assert arr.shape == (73,) and arr.dtype == np.float64
+
+    def test_numpy_load_numpy_2_0_file(self):
+        """
+        Checks that `numpy.load` for NumPy 1.26 is able to load pickles
+        created with NumPy 2.0 without errors/warnings.
+        """
+        arr = np.load(self.filename, encoding="latin1", allow_pickle=True)
+
+        assert isinstance(arr, np.ndarray)
+        assert arr.shape == (73,) and arr.dtype == np.float64
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_overrides.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_overrides.py
new file mode 100644
index 00000000..5924358e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_overrides.py
@@ -0,0 +1,759 @@
+import inspect
+import sys
+import os
+import tempfile
+from io import StringIO
+from unittest import mock
+
+import numpy as np
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_raises_regex)
+from numpy.core.overrides import (
+    _get_implementing_args, array_function_dispatch,
+    verify_matching_signatures)
+from numpy.compat import pickle
+import pytest
+
+
+def _return_not_implemented(self, *args, **kwargs):
+    return NotImplemented
+
+
+# need to define this at the top level to test pickling
+@array_function_dispatch(lambda array: (array,))
+def dispatched_one_arg(array):
+    """Docstring."""
+    return 'original'
+
+
+@array_function_dispatch(lambda array1, array2: (array1, array2))
+def dispatched_two_arg(array1, array2):
+    """Docstring."""
+    return 'original'
+
+
+class TestGetImplementingArgs:
+
+    def test_ndarray(self):
+        array = np.array(1)
+
+        args = _get_implementing_args([array])
+        assert_equal(list(args), [array])
+
+        args = _get_implementing_args([array, array])
+        assert_equal(list(args), [array])
+
+        args = _get_implementing_args([array, 1])
+        assert_equal(list(args), [array])
+
+        args = _get_implementing_args([1, array])
+        assert_equal(list(args), [array])
+
+    def test_ndarray_subclasses(self):
+
+        class OverrideSub(np.ndarray):
+            __array_function__ = _return_not_implemented
+
+        class NoOverrideSub(np.ndarray):
+            pass
+
+        array = np.array(1).view(np.ndarray)
+        override_sub = np.array(1).view(OverrideSub)
+        no_override_sub = np.array(1).view(NoOverrideSub)
+
+        args = _get_implementing_args([array, override_sub])
+        assert_equal(list(args), [override_sub, array])
+
+        args = _get_implementing_args([array, no_override_sub])
+        assert_equal(list(args), [no_override_sub, array])
+
+        args = _get_implementing_args(
+            [override_sub, no_override_sub])
+        assert_equal(list(args), [override_sub, no_override_sub])
+
+    def test_ndarray_and_duck_array(self):
+
+        class Other:
+            __array_function__ = _return_not_implemented
+
+        array = np.array(1)
+        other = Other()
+
+        args = _get_implementing_args([other, array])
+        assert_equal(list(args), [other, array])
+
+        args = _get_implementing_args([array, other])
+        assert_equal(list(args), [array, other])
+
+    def test_ndarray_subclass_and_duck_array(self):
+
+        class OverrideSub(np.ndarray):
+            __array_function__ = _return_not_implemented
+
+        class Other:
+            __array_function__ = _return_not_implemented
+
+        array = np.array(1)
+        subarray = np.array(1).view(OverrideSub)
+        other = Other()
+
+        assert_equal(_get_implementing_args([array, subarray, other]),
+                     [subarray, array, other])
+        assert_equal(_get_implementing_args([array, other, subarray]),
+                     [subarray, array, other])
+
+    def test_many_duck_arrays(self):
+
+        class A:
+            __array_function__ = _return_not_implemented
+
+        class B(A):
+            __array_function__ = _return_not_implemented
+
+        class C(A):
+            __array_function__ = _return_not_implemented
+
+        class D:
+            __array_function__ = _return_not_implemented
+
+        a = A()
+        b = B()
+        c = C()
+        d = D()
+
+        assert_equal(_get_implementing_args([1]), [])
+        assert_equal(_get_implementing_args([a]), [a])
+        assert_equal(_get_implementing_args([a, 1]), [a])
+        assert_equal(_get_implementing_args([a, a, a]), [a])
+        assert_equal(_get_implementing_args([a, d, a]), [a, d])
+        assert_equal(_get_implementing_args([a, b]), [b, a])
+        assert_equal(_get_implementing_args([b, a]), [b, a])
+        assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
+        assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
+
+    def test_too_many_duck_arrays(self):
+        namespace = dict(__array_function__=_return_not_implemented)
+        types = [type('A' + str(i), (object,), namespace) for i in range(33)]
+        relevant_args = [t() for t in types]
+
+        actual = _get_implementing_args(relevant_args[:32])
+        assert_equal(actual, relevant_args[:32])
+
+        with assert_raises_regex(TypeError, 'distinct argument types'):
+            _get_implementing_args(relevant_args)
+
+
+class TestNDArrayArrayFunction:
+
+    def test_method(self):
+
+        class Other:
+            __array_function__ = _return_not_implemented
+
+        class NoOverrideSub(np.ndarray):
+            pass
+
+        class OverrideSub(np.ndarray):
+            __array_function__ = _return_not_implemented
+
+        array = np.array([1])
+        other = Other()
+        no_override_sub = array.view(NoOverrideSub)
+        override_sub = array.view(OverrideSub)
+
+        result = array.__array_function__(func=dispatched_two_arg,
+                                          types=(np.ndarray,),
+                                          args=(array, 1.), kwargs={})
+        assert_equal(result, 'original')
+
+        result = array.__array_function__(func=dispatched_two_arg,
+                                          types=(np.ndarray, Other),
+                                          args=(array, other), kwargs={})
+        assert_(result is NotImplemented)
+
+        result = array.__array_function__(func=dispatched_two_arg,
+                                          types=(np.ndarray, NoOverrideSub),
+                                          args=(array, no_override_sub),
+                                          kwargs={})
+        assert_equal(result, 'original')
+
+        result = array.__array_function__(func=dispatched_two_arg,
+                                          types=(np.ndarray, OverrideSub),
+                                          args=(array, override_sub),
+                                          kwargs={})
+        assert_equal(result, 'original')
+
+        with assert_raises_regex(TypeError, 'no implementation found'):
+            np.concatenate((array, other))
+
+        expected = np.concatenate((array, array))
+        result = np.concatenate((array, no_override_sub))
+        assert_equal(result, expected.view(NoOverrideSub))
+        result = np.concatenate((array, override_sub))
+        assert_equal(result, expected.view(OverrideSub))
+
+    def test_no_wrapper(self):
+        # This shouldn't happen unless a user intentionally calls
+        # __array_function__ with invalid arguments, but check that we raise
+        # an appropriate error all the same.
+        array = np.array(1)
+        func = lambda x: x
+        with assert_raises_regex(AttributeError, '_implementation'):
+            array.__array_function__(func=func, types=(np.ndarray,),
+                                     args=(array,), kwargs={})
+
+
+class TestArrayFunctionDispatch:
+
+    def test_pickle(self):
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            roundtripped = pickle.loads(
+                    pickle.dumps(dispatched_one_arg, protocol=proto))
+            assert_(roundtripped is dispatched_one_arg)
+
+    def test_name_and_docstring(self):
+        assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
+        if sys.flags.optimize < 2:
+            assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
+
+    def test_interface(self):
+
+        class MyArray:
+            def __array_function__(self, func, types, args, kwargs):
+                return (self, func, types, args, kwargs)
+
+        original = MyArray()
+        (obj, func, types, args, kwargs) = dispatched_one_arg(original)
+        assert_(obj is original)
+        assert_(func is dispatched_one_arg)
+        assert_equal(set(types), {MyArray})
+        # assert_equal uses the overloaded np.iscomplexobj() internally
+        assert_(args == (original,))
+        assert_equal(kwargs, {})
+
+    def test_not_implemented(self):
+
+        class MyArray:
+            def __array_function__(self, func, types, args, kwargs):
+                return NotImplemented
+
+        array = MyArray()
+        with assert_raises_regex(TypeError, 'no implementation found'):
+            dispatched_one_arg(array)
+
+    def test_where_dispatch(self):
+
+        class DuckArray:
+            def __array_function__(self, ufunc, method, *inputs, **kwargs):
+                return "overridden"
+
+        array = np.array(1)
+        duck_array = DuckArray()
+
+        result = np.std(array, where=duck_array)
+
+        assert_equal(result, "overridden")
+
+
+class TestVerifyMatchingSignatures:
+
+    def test_verify_matching_signatures(self):
+
+        verify_matching_signatures(lambda x: 0, lambda x: 0)
+        verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
+        verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
+
+        with assert_raises(RuntimeError):
+            verify_matching_signatures(lambda a: 0, lambda b: 0)
+        with assert_raises(RuntimeError):
+            verify_matching_signatures(lambda x: 0, lambda x=None: 0)
+        with assert_raises(RuntimeError):
+            verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
+        with assert_raises(RuntimeError):
+            verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
+
+    def test_array_function_dispatch(self):
+
+        with assert_raises(RuntimeError):
+            @array_function_dispatch(lambda x: (x,))
+            def f(y):
+                pass
+
+        # should not raise
+        @array_function_dispatch(lambda x: (x,), verify=False)
+        def f(y):
+            pass
+
+
+def _new_duck_type_and_implements():
+    """Create a duck array type and implements functions."""
+    HANDLED_FUNCTIONS = {}
+
+    class MyArray:
+        def __array_function__(self, func, types, args, kwargs):
+            if func not in HANDLED_FUNCTIONS:
+                return NotImplemented
+            if not all(issubclass(t, MyArray) for t in types):
+                return NotImplemented
+            return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+    def implements(numpy_function):
+        """Register an __array_function__ implementations."""
+        def decorator(func):
+            HANDLED_FUNCTIONS[numpy_function] = func
+            return func
+        return decorator
+
+    return (MyArray, implements)
+
+
+class TestArrayFunctionImplementation:
+
+    def test_one_arg(self):
+        MyArray, implements = _new_duck_type_and_implements()
+
+        @implements(dispatched_one_arg)
+        def _(array):
+            return 'myarray'
+
+        assert_equal(dispatched_one_arg(1), 'original')
+        assert_equal(dispatched_one_arg(MyArray()), 'myarray')
+
+    def test_optional_args(self):
+        MyArray, implements = _new_duck_type_and_implements()
+
+        @array_function_dispatch(lambda array, option=None: (array,))
+        def func_with_option(array, option='default'):
+            return option
+
+        @implements(func_with_option)
+        def my_array_func_with_option(array, new_option='myarray'):
+            return new_option
+
+        # we don't need to implement every option on __array_function__
+        # implementations
+        assert_equal(func_with_option(1), 'default')
+        assert_equal(func_with_option(1, option='extra'), 'extra')
+        assert_equal(func_with_option(MyArray()), 'myarray')
+        with assert_raises(TypeError):
+            func_with_option(MyArray(), option='extra')
+
+        # but new options on implementations can't be used
+        result = my_array_func_with_option(MyArray(), new_option='yes')
+        assert_equal(result, 'yes')
+        with assert_raises(TypeError):
+            func_with_option(MyArray(), new_option='no')
+
+    def test_not_implemented(self):
+        MyArray, implements = _new_duck_type_and_implements()
+
+        @array_function_dispatch(lambda array: (array,), module='my')
+        def func(array):
+            return array
+
+        array = np.array(1)
+        assert_(func(array) is array)
+        assert_equal(func.__module__, 'my')
+
+        with assert_raises_regex(
+                TypeError, "no implementation found for 'my.func'"):
+            func(MyArray())
+
+    @pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"])
+    def test_signature_error_message_simple(self, name):
+        func = getattr(np, name)
+        try:
+            # all of these functions need an argument:
+            func()
+        except TypeError as e:
+            exc = e
+
+        assert exc.args[0].startswith(f"{name}()")
+
+    def test_signature_error_message(self):
+        # The lambda function will be named "", but the TypeError
+        # should show the name as "func"
+        def _dispatcher():
+            return ()
+
+        @array_function_dispatch(_dispatcher)
+        def func():
+            pass
+
+        try:
+            func._implementation(bad_arg=3)
+        except TypeError as e:
+            expected_exception = e
+
+        try:
+            func(bad_arg=3)
+            raise AssertionError("must fail")
+        except TypeError as exc:
+            if exc.args[0].startswith("_dispatcher"):
+                # We replace the qualname currently, but it used `__name__`
+                # (relevant functions have the same name and qualname anyway)
+                pytest.skip("Python version is not using __qualname__ for "
+                            "TypeError formatting.")
+
+            assert exc.args == expected_exception.args
+
+    @pytest.mark.parametrize("value", [234, "this func is not replaced"])
+    def test_dispatcher_error(self, value):
+        # If the dispatcher raises an error, we must not attempt to mutate it
+        error = TypeError(value)
+
+        def dispatcher():
+            raise error
+
+        @array_function_dispatch(dispatcher)
+        def func():
+            return 3
+
+        try:
+            func()
+            raise AssertionError("must fail")
+        except TypeError as exc:
+            assert exc is error  # unmodified exception
+
+    def test_properties(self):
+        # Check that str and repr are sensible
+        func = dispatched_two_arg
+        assert str(func) == str(func._implementation)
+        repr_no_id = repr(func).split("at ")[0]
+        repr_no_id_impl = repr(func._implementation).split("at ")[0]
+        assert repr_no_id == repr_no_id_impl
+
+    @pytest.mark.parametrize("func", [
+            lambda x, y: 0,  # no like argument
+            lambda like=None: 0,  # not keyword only
+            lambda *, like=None, a=3: 0,  # not last (not that it matters)
+        ])
+    def test_bad_like_sig(self, func):
+        # We sanity check the signature, and these should fail.
+        with pytest.raises(RuntimeError):
+            array_function_dispatch()(func)
+
+    def test_bad_like_passing(self):
+        # Cover internal sanity check for passing like as first positional arg
+        def func(*, like=None):
+            pass
+
+        func_with_like = array_function_dispatch()(func)
+        with pytest.raises(TypeError):
+            func_with_like()
+        with pytest.raises(TypeError):
+            func_with_like(like=234)
+
+    def test_too_many_args(self):
+        # Mainly a unit-test to increase coverage
+        objs = []
+        for i in range(40):
+            class MyArr:
+                def __array_function__(self, *args, **kwargs):
+                    return NotImplemented
+
+            objs.append(MyArr())
+
+        def _dispatch(*args):
+            return args
+
+        @array_function_dispatch(_dispatch)
+        def func(*args):
+            pass
+
+        with pytest.raises(TypeError, match="maximum number"):
+            func(*objs)
+
+
+
+class TestNDArrayMethods:
+
+    def test_repr(self):
+        # gh-12162: should still be defined even if __array_function__ doesn't
+        # implement np.array_repr()
+
+        class MyArray(np.ndarray):
+            def __array_function__(*args, **kwargs):
+                return NotImplemented
+
+        array = np.array(1).view(MyArray)
+        assert_equal(repr(array), 'MyArray(1)')
+        assert_equal(str(array), '1')
+
+
+class TestNumPyFunctions:
+
+    def test_set_module(self):
+        assert_equal(np.sum.__module__, 'numpy')
+        assert_equal(np.char.equal.__module__, 'numpy.char')
+        assert_equal(np.fft.fft.__module__, 'numpy.fft')
+        assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
+
+    def test_inspect_sum(self):
+        signature = inspect.signature(np.sum)
+        assert_('axis' in signature.parameters)
+
+    def test_override_sum(self):
+        MyArray, implements = _new_duck_type_and_implements()
+
+        @implements(np.sum)
+        def _(array):
+            return 'yes'
+
+        assert_equal(np.sum(MyArray()), 'yes')
+
+    def test_sum_on_mock_array(self):
+
+        # We need a proxy for mocks because __array_function__ is only looked
+        # up in the class dict
+        class ArrayProxy:
+            def __init__(self, value):
+                self.value = value
+            def __array_function__(self, *args, **kwargs):
+                return self.value.__array_function__(*args, **kwargs)
+            def __array__(self, *args, **kwargs):
+                return self.value.__array__(*args, **kwargs)
+
+        proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
+        proxy.value.__array_function__.return_value = 1
+        result = np.sum(proxy)
+        assert_equal(result, 1)
+        proxy.value.__array_function__.assert_called_once_with(
+            np.sum, (ArrayProxy,), (proxy,), {})
+        proxy.value.__array__.assert_not_called()
+
+    def test_sum_forwarding_implementation(self):
+
+        class MyArray(np.ndarray):
+
+            def sum(self, axis, out):
+                return 'summed'
+
+            def __array_function__(self, func, types, args, kwargs):
+                return super().__array_function__(func, types, args, kwargs)
+
+        # note: the internal implementation of np.sum() calls the .sum() method
+        array = np.array(1).view(MyArray)
+        assert_equal(np.sum(array), 'summed')
+
+
+class TestArrayLike:
+    def setup_method(self):
+        class MyArray():
+            def __init__(self, function=None):
+                self.function = function
+
+            def __array_function__(self, func, types, args, kwargs):
+                assert func is getattr(np, func.__name__)
+                try:
+                    my_func = getattr(self, func.__name__)
+                except AttributeError:
+                    return NotImplemented
+                return my_func(*args, **kwargs)
+
+        self.MyArray = MyArray
+
+        class MyNoArrayFunctionArray():
+            def __init__(self, function=None):
+                self.function = function
+
+        self.MyNoArrayFunctionArray = MyNoArrayFunctionArray
+
+    def add_method(self, name, arr_class, enable_value_error=False):
+        def _definition(*args, **kwargs):
+            # Check that `like=` isn't propagated downstream
+            assert 'like' not in kwargs
+
+            if enable_value_error and 'value_error' in kwargs:
+                raise ValueError
+
+            return arr_class(getattr(arr_class, name))
+        setattr(arr_class, name, _definition)
+
+    def func_args(*args, **kwargs):
+        return args, kwargs
+
+    def test_array_like_not_implemented(self):
+        self.add_method('array', self.MyArray)
+
+        ref = self.MyArray.array()
+
+        with assert_raises_regex(TypeError, 'no implementation found'):
+            array_like = np.asarray(1, like=ref)
+
+    _array_tests = [
+        ('array', *func_args((1,))),
+        ('asarray', *func_args((1,))),
+        ('asanyarray', *func_args((1,))),
+        ('ascontiguousarray', *func_args((2, 3))),
+        ('asfortranarray', *func_args((2, 3))),
+        ('require', *func_args((np.arange(6).reshape(2, 3),),
+                               requirements=['A', 'F'])),
+        ('empty', *func_args((1,))),
+        ('full', *func_args((1,), 2)),
+        ('ones', *func_args((1,))),
+        ('zeros', *func_args((1,))),
+        ('arange', *func_args(3)),
+        ('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
+        ('fromiter', *func_args(range(3), dtype=int)),
+        ('fromstring', *func_args('1,2', dtype=int, sep=',')),
+        ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
+        ('genfromtxt', *func_args(lambda: StringIO('1,2.1'),
+                                  dtype=[('int', 'i8'), ('float', 'f8')],
+                                  delimiter=',')),
+    ]
+
+    @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+    @pytest.mark.parametrize('numpy_ref', [True, False])
+    def test_array_like(self, function, args, kwargs, numpy_ref):
+        self.add_method('array', self.MyArray)
+        self.add_method(function, self.MyArray)
+        np_func = getattr(np, function)
+        my_func = getattr(self.MyArray, function)
+
+        if numpy_ref is True:
+            ref = np.array(1)
+        else:
+            ref = self.MyArray.array()
+
+        like_args = tuple(a() if callable(a) else a for a in args)
+        array_like = np_func(*like_args, **kwargs, like=ref)
+
+        if numpy_ref is True:
+            assert type(array_like) is np.ndarray
+
+            np_args = tuple(a() if callable(a) else a for a in args)
+            np_arr = np_func(*np_args, **kwargs)
+
+            # Special-case np.empty to ensure values match
+            if function == "empty":
+                np_arr.fill(1)
+                array_like.fill(1)
+
+            assert_equal(array_like, np_arr)
+        else:
+            assert type(array_like) is self.MyArray
+            assert array_like.function is my_func
+
+    @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+    @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
+    def test_no_array_function_like(self, function, args, kwargs, ref):
+        self.add_method('array', self.MyNoArrayFunctionArray)
+        self.add_method(function, self.MyNoArrayFunctionArray)
+        np_func = getattr(np, function)
+
+        # Instantiate ref if it's the MyNoArrayFunctionArray class
+        if ref == "MyNoArrayFunctionArray":
+            ref = self.MyNoArrayFunctionArray.array()
+
+        like_args = tuple(a() if callable(a) else a for a in args)
+
+        with assert_raises_regex(TypeError,
+                'The `like` argument must be an array-like that implements'):
+            np_func(*like_args, **kwargs, like=ref)
+
+    @pytest.mark.parametrize('numpy_ref', [True, False])
+    def test_array_like_fromfile(self, numpy_ref):
+        self.add_method('array', self.MyArray)
+        self.add_method("fromfile", self.MyArray)
+
+        if numpy_ref is True:
+            ref = np.array(1)
+        else:
+            ref = self.MyArray.array()
+
+        data = np.random.random(5)
+
+        with tempfile.TemporaryDirectory() as tmpdir:
+            fname = os.path.join(tmpdir, "testfile")
+            data.tofile(fname)
+
+            array_like = np.fromfile(fname, like=ref)
+            if numpy_ref is True:
+                assert type(array_like) is np.ndarray
+                np_res = np.fromfile(fname, like=ref)
+                assert_equal(np_res, data)
+                assert_equal(array_like, np_res)
+            else:
+                assert type(array_like) is self.MyArray
+                assert array_like.function is self.MyArray.fromfile
+
+    def test_exception_handling(self):
+        self.add_method('array', self.MyArray, enable_value_error=True)
+
+        ref = self.MyArray.array()
+
+        with assert_raises(TypeError):
+            # Raises the error about `value_error` being invalid first
+            np.array(1, value_error=True, like=ref)
+
+    @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+    def test_like_as_none(self, function, args, kwargs):
+        self.add_method('array', self.MyArray)
+        self.add_method(function, self.MyArray)
+        np_func = getattr(np, function)
+
+        like_args = tuple(a() if callable(a) else a for a in args)
+        # required for loadtxt and genfromtxt to init w/o error.
+        like_args_exp = tuple(a() if callable(a) else a for a in args)
+
+        array_like = np_func(*like_args, **kwargs, like=None)
+        expected = np_func(*like_args_exp, **kwargs)
+        # Special-case np.empty to ensure values match
+        if function == "empty":
+            array_like.fill(1)
+            expected.fill(1)
+        assert_equal(array_like, expected)
+
+
+def test_function_like():
+    # We provide a `__get__` implementation, make sure it works
+    assert type(np.mean) is np.core._multiarray_umath._ArrayFunctionDispatcher 
+
+    class MyClass:
+        def __array__(self):
+            # valid argument to mean:
+            return np.arange(3)
+
+        func1 = staticmethod(np.mean)
+        func2 = np.mean
+        func3 = classmethod(np.mean)
+
+    m = MyClass()
+    assert m.func1([10]) == 10
+    assert m.func2() == 1  # mean of the arange
+    with pytest.raises(TypeError, match="unsupported operand type"):
+        # Tries to operate on the class
+        m.func3()
+
+    # Manual binding also works (the above may shortcut):
+    bound = np.mean.__get__(m, MyClass)
+    assert bound() == 1
+
+    bound = np.mean.__get__(None, MyClass)  # unbound actually
+    assert bound([10]) == 10
+
+    bound = np.mean.__get__(MyClass)  # classmethod
+    with pytest.raises(TypeError, match="unsupported operand type"):
+        bound()
+
+
+def test_scipy_trapz_support_shim():
+    # SciPy 1.10 and earlier "clone" trapz in this way, so we have a
+    # support shim in place: https://github.com/scipy/scipy/issues/17811
+    # That should be removed eventually.  This test copies what SciPy does.
+    # Hopefully removable 1 year after SciPy 1.11; shim added to NumPy 1.25.
+    import types
+    import functools
+
+    def _copy_func(f):
+        # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)
+        g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
+                            argdefs=f.__defaults__, closure=f.__closure__)
+        g = functools.update_wrapper(g, f)
+        g.__kwdefaults__ = f.__kwdefaults__
+        return g
+
+    trapezoid = _copy_func(np.trapz)
+
+    assert np.trapz([1, 2]) == trapezoid([1, 2])
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_print.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_print.py
new file mode 100644
index 00000000..162686ee
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_print.py
@@ -0,0 +1,202 @@
+import sys
+
+import pytest
+
+import numpy as np
+from numpy.testing import assert_, assert_equal, IS_MUSL
+from numpy.core.tests._locales import CommaDecimalPointLocale
+
+
+from io import StringIO
+
+_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_types(tp):
+    """ Check formatting.
+
+        This is only for the str function, and only for simple types.
+        The precision of np.float32 and np.longdouble aren't the same as the
+        python float precision.
+
+    """
+    for x in [0, 1, -1, 1e20]:
+        assert_equal(str(tp(x)), str(float(x)),
+                     err_msg='Failed str formatting for type %s' % tp)
+
+    if tp(1e16).itemsize > 4:
+        assert_equal(str(tp(1e16)), str(float('1e16')),
+                     err_msg='Failed str formatting for type %s' % tp)
+    else:
+        ref = '1e+16'
+        assert_equal(str(tp(1e16)), ref,
+                     err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_nan_inf_float(tp):
+    """ Check formatting of nan & inf.
+
+        This is only for the str function, and only for simple types.
+        The precision of np.float32 and np.longdouble aren't the same as the
+        python float precision.
+
+    """
+    for x in [np.inf, -np.inf, np.nan]:
+        assert_equal(str(tp(x)), _REF[x],
+                     err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_types(tp):
+    """Check formatting of complex types.
+
+        This is only for the str function, and only for simple types.
+        The precision of np.float32 and np.longdouble aren't the same as the
+        python float precision.
+
+    """
+    for x in [0, 1, -1, 1e20]:
+        assert_equal(str(tp(x)), str(complex(x)),
+                     err_msg='Failed str formatting for type %s' % tp)
+        assert_equal(str(tp(x*1j)), str(complex(x*1j)),
+                     err_msg='Failed str formatting for type %s' % tp)
+        assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
+                     err_msg='Failed str formatting for type %s' % tp)
+
+    if tp(1e16).itemsize > 8:
+        assert_equal(str(tp(1e16)), str(complex(1e16)),
+                     err_msg='Failed str formatting for type %s' % tp)
+    else:
+        ref = '(1e+16+0j)'
+        assert_equal(str(tp(1e16)), ref,
+                     err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_inf_nan(dtype):
+    """Check inf/nan formatting of complex types."""
+    TESTS = {
+        complex(np.inf, 0): "(inf+0j)",
+        complex(0, np.inf): "infj",
+        complex(-np.inf, 0): "(-inf+0j)",
+        complex(0, -np.inf): "-infj",
+        complex(np.inf, 1): "(inf+1j)",
+        complex(1, np.inf): "(1+infj)",
+        complex(-np.inf, 1): "(-inf+1j)",
+        complex(1, -np.inf): "(1-infj)",
+        complex(np.nan, 0): "(nan+0j)",
+        complex(0, np.nan): "nanj",
+        complex(-np.nan, 0): "(nan+0j)",
+        complex(0, -np.nan): "nanj",
+        complex(np.nan, 1): "(nan+1j)",
+        complex(1, np.nan): "(1+nanj)",
+        complex(-np.nan, 1): "(nan+1j)",
+        complex(1, -np.nan): "(1+nanj)",
+    }
+    for c, s in TESTS.items():
+        assert_equal(str(dtype(c)), s)
+
+
+# print tests
+def _test_redirected_print(x, tp, ref=None):
+    file = StringIO()
+    file_tp = StringIO()
+    stdout = sys.stdout
+    try:
+        sys.stdout = file_tp
+        print(tp(x))
+        sys.stdout = file
+        if ref:
+            print(ref)
+        else:
+            print(x)
+    finally:
+        sys.stdout = stdout
+
+    assert_equal(file.getvalue(), file_tp.getvalue(),
+                 err_msg='print failed for type%s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_type_print(tp):
+    """Check formatting when using print """
+    for x in [0, 1, -1, 1e20]:
+        _test_redirected_print(float(x), tp)
+
+    for x in [np.inf, -np.inf, np.nan]:
+        _test_redirected_print(float(x), tp, _REF[x])
+
+    if tp(1e16).itemsize > 4:
+        _test_redirected_print(float(1e16), tp)
+    else:
+        ref = '1e+16'
+        _test_redirected_print(float(1e16), tp, ref)
+
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_type_print(tp):
+    """Check formatting when using print """
+    # We do not create complex with inf/nan directly because the feature is
+    # missing in python < 2.6
+    for x in [0, 1, -1, 1e20]:
+        _test_redirected_print(complex(x), tp)
+
+    if tp(1e16).itemsize > 8:
+        _test_redirected_print(complex(1e16), tp)
+    else:
+        ref = '(1e+16+0j)'
+        _test_redirected_print(complex(1e16), tp, ref)
+
+    _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
+    _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
+    _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
+
+
+def test_scalar_format():
+    """Test the str.format method with NumPy scalar types"""
+    tests = [('{0}', True, np.bool_),
+            ('{0}', False, np.bool_),
+            ('{0:d}', 130, np.uint8),
+            ('{0:d}', 50000, np.uint16),
+            ('{0:d}', 3000000000, np.uint32),
+            ('{0:d}', 15000000000000000000, np.uint64),
+            ('{0:d}', -120, np.int8),
+            ('{0:d}', -30000, np.int16),
+            ('{0:d}', -2000000000, np.int32),
+            ('{0:d}', -7000000000000000000, np.int64),
+            ('{0:g}', 1.5, np.float16),
+            ('{0:g}', 1.5, np.float32),
+            ('{0:g}', 1.5, np.float64),
+            ('{0:g}', 1.5, np.longdouble),
+            ('{0:g}', 1.5+0.5j, np.complex64),
+            ('{0:g}', 1.5+0.5j, np.complex128),
+            ('{0:g}', 1.5+0.5j, np.clongdouble)]
+
+    for (fmat, val, valtype) in tests:
+        try:
+            assert_equal(fmat.format(val), fmat.format(valtype(val)),
+                    "failed with val %s, type %s" % (val, valtype))
+        except ValueError as e:
+            assert_(False,
+               "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
+                            (fmat, repr(val), repr(valtype), str(e)))
+
+
+#
+# Locale tests: scalar types formatting should be independent of the locale
+#
+
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
+
+    def test_locale_single(self):
+        assert_equal(str(np.float32(1.2)), str(float(1.2)))
+
+    def test_locale_double(self):
+        assert_equal(str(np.double(1.2)), str(float(1.2)))
+
+    @pytest.mark.skipif(IS_MUSL,
+                        reason="test flaky on musllinux")
+    def test_locale_longdouble(self):
+        assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_protocols.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_protocols.py
new file mode 100644
index 00000000..55a2bcf7
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_protocols.py
@@ -0,0 +1,44 @@
+import pytest
+import warnings
+import numpy as np
+
+
+@pytest.mark.filterwarnings("error")
+def test_getattr_warning():
+    # issue gh-14735: make sure we clear only getattr errors, and let warnings
+    # through
+    class Wrapper:
+        def __init__(self, array):
+            self.array = array
+
+        def __len__(self):
+            return len(self.array)
+
+        def __getitem__(self, item):
+            return type(self)(self.array[item])
+
+        def __getattr__(self, name):
+            if name.startswith("__array_"):
+                warnings.warn("object got converted", UserWarning, stacklevel=1)
+
+            return getattr(self.array, name)
+
+        def __repr__(self):
+            return "".format(self=self)
+
+    array = Wrapper(np.arange(10))
+    with pytest.raises(UserWarning, match="object got converted"):
+        np.asarray(array)
+
+
+def test_array_called():
+    class Wrapper:
+        val = '0' * 100
+        def __array__(self, result=None):
+            return np.array([self.val], dtype=object)
+
+
+    wrapped = Wrapper()
+    arr = np.array(wrapped, dtype=str)
+    assert arr.dtype == 'U100'
+    assert arr[0] == Wrapper.val
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_records.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_records.py
new file mode 100644
index 00000000..a76ae2d9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_records.py
@@ -0,0 +1,520 @@
+import collections.abc
+import textwrap
+from io import BytesIO
+from os import path
+from pathlib import Path
+import pytest
+
+import numpy as np
+from numpy.testing import (
+    assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+    assert_raises, temppath,
+    )
+from numpy.compat import pickle
+
+
+class TestFromrecords:
+    def test_fromrecords(self):
+        r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
+                            names='col1,col2,col3')
+        assert_equal(r[0].item(), (456, 'dbe', 1.2))
+        assert_equal(r['col1'].dtype.kind, 'i')
+        assert_equal(r['col2'].dtype.kind, 'U')
+        assert_equal(r['col2'].dtype.itemsize, 12)
+        assert_equal(r['col3'].dtype.kind, 'f')
+
+    def test_fromrecords_0len(self):
+        """ Verify fromrecords works with a 0-length input """
+        dtype = [('a', float), ('b', float)]
+        r = np.rec.fromrecords([], dtype=dtype)
+        assert_equal(r.shape, (0,))
+
+    def test_fromrecords_2d(self):
+        data = [
+            [(1, 2), (3, 4), (5, 6)],
+            [(6, 5), (4, 3), (2, 1)]
+        ]
+        expected_a = [[1, 3, 5], [6, 4, 2]]
+        expected_b = [[2, 4, 6], [5, 3, 1]]
+
+        # try with dtype
+        r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
+        assert_equal(r1['a'], expected_a)
+        assert_equal(r1['b'], expected_b)
+
+        # try with names
+        r2 = np.rec.fromrecords(data, names=['a', 'b'])
+        assert_equal(r2['a'], expected_a)
+        assert_equal(r2['b'], expected_b)
+
+        assert_equal(r1, r2)
+
+    def test_method_array(self):
+        r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big')
+        assert_equal(r[1].item(), (25444, b'efg', 1633837924))
+
+    def test_method_array2(self):
+        r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
+                     (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
+        assert_equal(r[1].item(), (2, 22.0, b'b'))
+
+    def test_recarray_slices(self):
+        r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
+                     (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
+        assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
+
+    def test_recarray_fromarrays(self):
+        x1 = np.array([1, 2, 3, 4])
+        x2 = np.array(['a', 'dd', 'xyz', '12'])
+        x3 = np.array([1.1, 2, 3, 4])
+        r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
+        assert_equal(r[1].item(), (2, 'dd', 2.0))
+        x1[1] = 34
+        assert_equal(r.a, np.array([1, 2, 3, 4]))
+
+    def test_recarray_fromfile(self):
+        data_dir = path.join(path.dirname(__file__), 'data')
+        filename = path.join(data_dir, 'recarray_from_file.fits')
+        fd = open(filename, 'rb')
+        fd.seek(2880 * 2)
+        r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+        fd.seek(2880 * 2)
+        r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+        fd.seek(2880 * 2)
+        bytes_array = BytesIO()
+        bytes_array.write(fd.read())
+        bytes_array.seek(0)
+        r3 = np.rec.fromfile(bytes_array, formats='f8,i4,a5', shape=3, byteorder='big')
+        fd.close()
+        assert_equal(r1, r2)
+        assert_equal(r2, r3)
+
+    def test_recarray_from_obj(self):
+        count = 10
+        a = np.zeros(count, dtype='O')
+        b = np.zeros(count, dtype='f8')
+        c = np.zeros(count, dtype='f8')
+        for i in range(len(a)):
+            a[i] = list(range(1, 10))
+
+        mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
+        for i in range(len(a)):
+            assert_((mine.date[i] == list(range(1, 10))))
+            assert_((mine.data1[i] == 0.0))
+            assert_((mine.data2[i] == 0.0))
+
+    def test_recarray_repr(self):
+        a = np.array([(1, 0.1), (2, 0.2)],
+                     dtype=[('foo', ' 2) & (a < 6))
+        xb = np.where((b > 2) & (b < 6))
+        ya = ((a > 2) & (a < 6))
+        yb = ((b > 2) & (b < 6))
+        assert_array_almost_equal(xa, ya.nonzero())
+        assert_array_almost_equal(xb, yb.nonzero())
+        assert_(np.all(a[ya] > 0.5))
+        assert_(np.all(b[yb] > 0.5))
+
+    def test_endian_where(self):
+        # GitHub issue #369
+        net = np.zeros(3, dtype='>f4')
+        net[1] = 0.00458849
+        net[2] = 0.605202
+        max_net = net.max()
+        test = np.where(net <= 0., max_net, net)
+        correct = np.array([ 0.60520202,  0.00458849,  0.60520202])
+        assert_array_almost_equal(test, correct)
+
+    def test_endian_recarray(self):
+        # Ticket #2185
+        dt = np.dtype([
+               ('head', '>u4'),
+               ('data', '>u4', 2),
+            ])
+        buf = np.recarray(1, dtype=dt)
+        buf[0]['head'] = 1
+        buf[0]['data'][:] = [1, 1]
+
+        h = buf[0]['head']
+        d = buf[0]['data'][0]
+        buf[0]['head'] = h
+        buf[0]['data'][0] = d
+        assert_(buf[0]['head'] == 1)
+
+    def test_mem_dot(self):
+        # Ticket #106
+        x = np.random.randn(0, 1)
+        y = np.random.randn(10, 1)
+        # Dummy array to detect bad memory access:
+        _z = np.ones(10)
+        _dummy = np.empty((0, 10))
+        z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
+        np.dot(x, np.transpose(y), out=z)
+        assert_equal(_z, np.ones(10))
+        # Do the same for the built-in dot:
+        np.core.multiarray.dot(x, np.transpose(y), out=z)
+        assert_equal(_z, np.ones(10))
+
+    def test_arange_endian(self):
+        # Ticket #111
+        ref = np.arange(10)
+        x = np.arange(10, dtype=' 1 and x['two'] > 2)
+
+    def test_method_args(self):
+        # Make sure methods and functions have same default axis
+        # keyword and arguments
+        funcs1 = ['argmax', 'argmin', 'sum', 'any', 'all', 'cumsum',
+                  'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
+                  'round', 'min', 'max', 'argsort', 'sort']
+        funcs2 = ['compress', 'take', 'repeat']
+
+        for func in funcs1:
+            arr = np.random.rand(8, 7)
+            arr2 = arr.copy()
+            res1 = getattr(arr, func)()
+            res2 = getattr(np, func)(arr2)
+            if res1 is None:
+                res1 = arr
+
+            if res1.dtype.kind in 'uib':
+                assert_((res1 == res2).all(), func)
+            else:
+                assert_(abs(res1-res2).max() < 1e-8, func)
+
+        for func in funcs2:
+            arr1 = np.random.rand(8, 7)
+            arr2 = np.random.rand(8, 7)
+            res1 = None
+            if func == 'compress':
+                arr1 = arr1.ravel()
+                res1 = getattr(arr2, func)(arr1)
+            else:
+                arr2 = (15*arr2).astype(int).ravel()
+            if res1 is None:
+                res1 = getattr(arr1, func)(arr2)
+            res2 = getattr(np, func)(arr1, arr2)
+            assert_(abs(res1-res2).max() < 1e-8, func)
+
+    def test_mem_lexsort_strings(self):
+        # Ticket #298
+        lst = ['abc', 'cde', 'fgh']
+        np.lexsort((lst,))
+
+    def test_fancy_index(self):
+        # Ticket #302
+        x = np.array([1, 2])[np.array([0])]
+        assert_equal(x.shape, (1,))
+
+    def test_recarray_copy(self):
+        # Ticket #312
+        dt = [('x', np.int16), ('y', np.float64)]
+        ra = np.array([(1, 2.3)], dtype=dt)
+        rb = np.rec.array(ra, dtype=dt)
+        rb['x'] = 2.
+        assert_(ra['x'] != rb['x'])
+
+    def test_rec_fromarray(self):
+        # Ticket #322
+        x1 = np.array([[1, 2], [3, 4], [5, 6]])
+        x2 = np.array(['a', 'dd', 'xyz'])
+        x3 = np.array([1.1, 2, 3])
+        np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
+
+    def test_object_array_assign(self):
+        x = np.empty((2, 2), object)
+        x.flat[2] = (1, 2, 3)
+        assert_equal(x.flat[2], (1, 2, 3))
+
+    def test_ndmin_float64(self):
+        # Ticket #324
+        x = np.array([1, 2, 3], dtype=np.float64)
+        assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
+        assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
+
+    def test_ndmin_order(self):
+        # Issue #465 and related checks
+        assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
+        assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
+        assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
+        assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
+
+    def test_mem_axis_minimization(self):
+        # Ticket #327
+        data = np.arange(5)
+        data = np.add.outer(data, data)
+
+    def test_mem_float_imag(self):
+        # Ticket #330
+        np.float64(1.0).imag
+
+    def test_dtype_tuple(self):
+        # Ticket #334
+        assert_(np.dtype('i4') == np.dtype(('i4', ())))
+
+    def test_dtype_posttuple(self):
+        # Ticket #335
+        np.dtype([('col1', '()i4')])
+
+    def test_numeric_carray_compare(self):
+        # Ticket #341
+        assert_equal(np.array(['X'], 'c'), b'X')
+
+    def test_string_array_size(self):
+        # Ticket #342
+        assert_raises(ValueError,
+                              np.array, [['X'], ['X', 'X', 'X']], '|S1')
+
+    def test_dtype_repr(self):
+        # Ticket #344
+        dt1 = np.dtype(('uint32', 2))
+        dt2 = np.dtype(('uint32', (2,)))
+        assert_equal(dt1.__repr__(), dt2.__repr__())
+
+    def test_reshape_order(self):
+        # Make sure reshape order works.
+        a = np.arange(6).reshape(2, 3, order='F')
+        assert_equal(a, [[0, 2, 4], [1, 3, 5]])
+        a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
+        b = a[:, 1]
+        assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
+
+    def test_reshape_zero_strides(self):
+        # Issue #380, test reshaping of zero strided arrays
+        a = np.ones(1)
+        a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
+        assert_(a.reshape(5, 1).strides[0] == 0)
+
+    def test_reshape_zero_size(self):
+        # GitHub Issue #2700, setting shape failed for 0-sized arrays
+        a = np.ones((0, 2))
+        a.shape = (-1, 2)
+
+    # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
+    # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous.
+    @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+                        reason="Using relaxed stride debug")
+    def test_reshape_trailing_ones_strides(self):
+        # GitHub issue gh-2949, bad strides for trailing ones of new shape
+        a = np.zeros(12, dtype=np.int32)[::2]  # not contiguous
+        strides_c = (16, 8, 8, 8)
+        strides_f = (8, 24, 48, 48)
+        assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
+        assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
+        assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
+
+    def test_repeat_discont(self):
+        # Ticket #352
+        a = np.arange(12).reshape(4, 3)[:, 2]
+        assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
+
+    def test_array_index(self):
+        # Make sure optimization is not called in this case.
+        a = np.array([1, 2, 3])
+        a2 = np.array([[1, 2, 3]])
+        assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
+
+    def test_object_argmax(self):
+        a = np.array([1, 2, 3], dtype=object)
+        assert_(a.argmax() == 2)
+
+    def test_recarray_fields(self):
+        # Ticket #372
+        dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
+        dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
+        for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
+                  np.rec.array([(1, 2), (3, 4)], "i4,i4"),
+                  np.rec.array([(1, 2), (3, 4)]),
+                  np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
+                  np.rec.fromarrays([(1, 2), (3, 4)])]:
+            assert_(a.dtype in [dt0, dt1])
+
+    def test_random_shuffle(self):
+        # Ticket #374
+        a = np.arange(5).reshape((5, 1))
+        b = a.copy()
+        np.random.shuffle(b)
+        assert_equal(np.sort(b, axis=0), a)
+
+    def test_refcount_vdot(self):
+        # Changeset #3443
+        _assert_valid_refcount(np.vdot)
+
+    def test_startswith(self):
+        ca = np.char.array(['Hi', 'There'])
+        assert_equal(ca.startswith('H'), [True, False])
+
+    def test_noncommutative_reduce_accumulate(self):
+        # Ticket #413
+        tosubtract = np.arange(5)
+        todivide = np.array([2.0, 0.5, 0.25])
+        assert_equal(np.subtract.reduce(tosubtract), -10)
+        assert_equal(np.divide.reduce(todivide), 16.0)
+        assert_array_equal(np.subtract.accumulate(tosubtract),
+            np.array([0, -1, -3, -6, -10]))
+        assert_array_equal(np.divide.accumulate(todivide),
+            np.array([2., 4., 16.]))
+
+    def test_convolve_empty(self):
+        # Convolve should raise an error for empty input array.
+        assert_raises(ValueError, np.convolve, [], [1])
+        assert_raises(ValueError, np.convolve, [1], [])
+
+    def test_multidim_byteswap(self):
+        # Ticket #449
+        r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
+        assert_array_equal(r.byteswap(),
+                           np.array([(256, (0, 256, 512))], r.dtype))
+
+    def test_string_NULL(self):
+        # Changeset 3557
+        assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
+                     'a\x00\x0b\x0c')
+
+    def test_junk_in_string_fields_of_recarray(self):
+        # Ticket #483
+        r = np.array([[b'abc']], dtype=[('var1', '|S20')])
+        assert_(asbytes(r['var1'][0][0]) == b'abc')
+
+    def test_take_output(self):
+        # Ensure that 'take' honours output parameter.
+        x = np.arange(12).reshape((3, 4))
+        a = np.take(x, [0, 2], axis=1)
+        b = np.zeros_like(a)
+        np.take(x, [0, 2], axis=1, out=b)
+        assert_array_equal(a, b)
+
+    def test_take_object_fail(self):
+        # Issue gh-3001
+        d = 123.
+        a = np.array([d, 1], dtype=object)
+        if HAS_REFCOUNT:
+            ref_d = sys.getrefcount(d)
+        try:
+            a.take([0, 100])
+        except IndexError:
+            pass
+        if HAS_REFCOUNT:
+            assert_(ref_d == sys.getrefcount(d))
+
+    def test_array_str_64bit(self):
+        # Ticket #501
+        s = np.array([1, np.nan], dtype=np.float64)
+        with np.errstate(all='raise'):
+            np.array_str(s)  # Should succeed
+
+    def test_frompyfunc_endian(self):
+        # Ticket #503
+        from math import radians
+        uradians = np.frompyfunc(radians, 1, 1)
+        big_endian = np.array([83.4, 83.5], dtype='>f8')
+        little_endian = np.array([83.4, 83.5], dtype=' object
+        # casting succeeds
+        def rs():
+            x = np.ones([484, 286])
+            y = np.zeros([484, 286])
+            x |= y
+
+        assert_raises(TypeError, rs)
+
+    def test_unicode_scalar(self):
+        # Ticket #600
+        x = np.array(["DROND", "DROND1"], dtype="U6")
+        el = x[1]
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            new = pickle.loads(pickle.dumps(el, protocol=proto))
+            assert_equal(new, el)
+
+    def test_arange_non_native_dtype(self):
+        # Ticket #616
+        for T in ('>f4', ' 0)] = v
+
+        assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
+        assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
+
+        # Old special case (different code path):
+        assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
+        assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
+
+    def test_mem_scalar_indexing(self):
+        # Ticket #603
+        x = np.array([0], dtype=float)
+        index = np.array(0, dtype=np.int32)
+        x[index]
+
+    def test_binary_repr_0_width(self):
+        assert_equal(np.binary_repr(0, width=3), '000')
+
+    def test_fromstring(self):
+        assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
+                     [12, 9, 9])
+
+    def test_searchsorted_variable_length(self):
+        x = np.array(['a', 'aa', 'b'])
+        y = np.array(['d', 'e'])
+        assert_equal(x.searchsorted(y), [3, 3])
+
+    def test_string_argsort_with_zeros(self):
+        # Check argsort for strings containing zeros.
+        x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
+        assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
+        assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
+
+    def test_string_sort_with_zeros(self):
+        # Check sort for strings containing zeros.
+        x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
+        y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
+        assert_array_equal(np.sort(x, kind="q"), y)
+
+    def test_copy_detection_zero_dim(self):
+        # Ticket #658
+        np.indices((0, 3, 4)).T.reshape(-1, 3)
+
+    def test_flat_byteorder(self):
+        # Ticket #657
+        x = np.arange(10)
+        assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')):
+            x = np.array([-1, 0, 1], dtype=dt)
+            assert_equal(x.flat[0].dtype, x[0].dtype)
+
+    def test_copy_detection_corner_case(self):
+        # Ticket #658
+        np.indices((0, 3, 4)).T.reshape(-1, 3)
+
+    # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
+    # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous,
+    # 0-sized reshape itself is tested elsewhere.
+    @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+                        reason="Using relaxed stride debug")
+    def test_copy_detection_corner_case2(self):
+        # Ticket #771: strides are not set correctly when reshaping 0-sized
+        # arrays
+        b = np.indices((0, 3, 4)).T.reshape(-1, 3)
+        assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
+
+    def test_object_array_refcounting(self):
+        # Ticket #633
+        if not hasattr(sys, 'getrefcount'):
+            return
+
+        # NB. this is probably CPython-specific
+
+        cnt = sys.getrefcount
+
+        a = object()
+        b = object()
+        c = object()
+
+        cnt0_a = cnt(a)
+        cnt0_b = cnt(b)
+        cnt0_c = cnt(c)
+
+        # -- 0d -> 1-d broadcast slice assignment
+
+        arr = np.zeros(5, dtype=np.object_)
+
+        arr[:] = a
+        assert_equal(cnt(a), cnt0_a + 5)
+
+        arr[:] = b
+        assert_equal(cnt(a), cnt0_a)
+        assert_equal(cnt(b), cnt0_b + 5)
+
+        arr[:2] = c
+        assert_equal(cnt(b), cnt0_b + 3)
+        assert_equal(cnt(c), cnt0_c + 2)
+
+        del arr
+
+        # -- 1-d -> 2-d broadcast slice assignment
+
+        arr = np.zeros((5, 2), dtype=np.object_)
+        arr0 = np.zeros(2, dtype=np.object_)
+
+        arr0[0] = a
+        assert_(cnt(a) == cnt0_a + 1)
+        arr0[1] = b
+        assert_(cnt(b) == cnt0_b + 1)
+
+        arr[:, :] = arr0
+        assert_(cnt(a) == cnt0_a + 6)
+        assert_(cnt(b) == cnt0_b + 6)
+
+        arr[:, 0] = None
+        assert_(cnt(a) == cnt0_a + 1)
+
+        del arr, arr0
+
+        # -- 2-d copying + flattening
+
+        arr = np.zeros((5, 2), dtype=np.object_)
+
+        arr[:, 0] = a
+        arr[:, 1] = b
+        assert_(cnt(a) == cnt0_a + 5)
+        assert_(cnt(b) == cnt0_b + 5)
+
+        arr2 = arr.copy()
+        assert_(cnt(a) == cnt0_a + 10)
+        assert_(cnt(b) == cnt0_b + 10)
+
+        arr2 = arr[:, 0].copy()
+        assert_(cnt(a) == cnt0_a + 10)
+        assert_(cnt(b) == cnt0_b + 5)
+
+        arr2 = arr.flatten()
+        assert_(cnt(a) == cnt0_a + 10)
+        assert_(cnt(b) == cnt0_b + 10)
+
+        del arr, arr2
+
+        # -- concatenate, repeat, take, choose
+
+        arr1 = np.zeros((5, 1), dtype=np.object_)
+        arr2 = np.zeros((5, 1), dtype=np.object_)
+
+        arr1[...] = a
+        arr2[...] = b
+        assert_(cnt(a) == cnt0_a + 5)
+        assert_(cnt(b) == cnt0_b + 5)
+
+        tmp = np.concatenate((arr1, arr2))
+        assert_(cnt(a) == cnt0_a + 5 + 5)
+        assert_(cnt(b) == cnt0_b + 5 + 5)
+
+        tmp = arr1.repeat(3, axis=0)
+        assert_(cnt(a) == cnt0_a + 5 + 3*5)
+
+        tmp = arr1.take([1, 2, 3], axis=0)
+        assert_(cnt(a) == cnt0_a + 5 + 3)
+
+        x = np.array([[0], [1], [0], [1], [1]], int)
+        tmp = x.choose(arr1, arr2)
+        assert_(cnt(a) == cnt0_a + 5 + 2)
+        assert_(cnt(b) == cnt0_b + 5 + 3)
+
+        del tmp  # Avoid pyflakes unused variable warning
+
+    def test_mem_custom_float_to_array(self):
+        # Ticket 702
+        class MyFloat:
+            def __float__(self):
+                return 1.0
+
+        tmp = np.atleast_1d([MyFloat()])
+        tmp.astype(float)  # Should succeed
+
+    def test_object_array_refcount_self_assign(self):
+        # Ticket #711
+        class VictimObject:
+            deleted = False
+
+            def __del__(self):
+                self.deleted = True
+
+        d = VictimObject()
+        arr = np.zeros(5, dtype=np.object_)
+        arr[:] = d
+        del d
+        arr[:] = arr  # refcount of 'd' might hit zero here
+        assert_(not arr[0].deleted)
+        arr[:] = arr  # trying to induce a segfault by doing it again...
+        assert_(not arr[0].deleted)
+
+    def test_mem_fromiter_invalid_dtype_string(self):
+        x = [1, 2, 3]
+        assert_raises(ValueError,
+                              np.fromiter, [xi for xi in x], dtype='S')
+
+    def test_reduce_big_object_array(self):
+        # Ticket #713
+        oldsize = np.setbufsize(10*16)
+        a = np.array([None]*161, object)
+        assert_(not np.any(a))
+        np.setbufsize(oldsize)
+
+    def test_mem_0d_array_index(self):
+        # Ticket #714
+        np.zeros(10)[np.array(0)]
+
+    def test_nonnative_endian_fill(self):
+        # Non-native endian arrays were incorrectly filled with scalars
+        # before r5034.
+        if sys.byteorder == 'little':
+            dtype = np.dtype('>i4')
+        else:
+            dtype = np.dtype('data contains non-zero floats
+                    x = np.array([123456789e199], dtype=np.float64)
+                    if IS_PYPY:
+                        x.resize((m, 0), refcheck=False)
+                    else:
+                        x.resize((m, 0))
+                    y = np.array([123456789e199], dtype=np.float64)
+                    if IS_PYPY:
+                        y.resize((0, n), refcheck=False)
+                    else:
+                        y.resize((0, n))
+
+                    # `dot` should just return zero (m, n) matrix
+                    z = np.dot(x, y)
+                    assert_(np.all(z == 0))
+                    assert_(z.shape == (m, n))
+
+    def test_zeros(self):
+        # Regression test for #1061.
+        # Set a size which cannot fit into a 64 bits signed integer
+        sz = 2 ** 64
+        with assert_raises_regex(ValueError,
+                                 'Maximum allowed dimension exceeded'):
+            np.empty(sz)
+
+    def test_huge_arange(self):
+        # Regression test for #1062.
+        # Set a size which cannot fit into a 64 bits signed integer
+        sz = 2 ** 64
+        with assert_raises_regex(ValueError,
+                                 'Maximum allowed size exceeded'):
+            np.arange(sz)
+            assert_(np.size == sz)
+
+    def test_fromiter_bytes(self):
+        # Ticket #1058
+        a = np.fromiter(list(range(10)), dtype='b')
+        b = np.fromiter(list(range(10)), dtype='B')
+        assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+        assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+
+    def test_array_from_sequence_scalar_array(self):
+        # Ticket #1078: segfaults when creating an array with a sequence of
+        # 0d arrays.
+        a = np.array((np.ones(2), np.array(2)), dtype=object)
+        assert_equal(a.shape, (2,))
+        assert_equal(a.dtype, np.dtype(object))
+        assert_equal(a[0], np.ones(2))
+        assert_equal(a[1], np.array(2))
+
+        a = np.array(((1,), np.array(1)), dtype=object)
+        assert_equal(a.shape, (2,))
+        assert_equal(a.dtype, np.dtype(object))
+        assert_equal(a[0], (1,))
+        assert_equal(a[1], np.array(1))
+
+    def test_array_from_sequence_scalar_array2(self):
+        # Ticket #1081: weird array with strange input...
+        t = np.array([np.array([]), np.array(0, object)], dtype=object)
+        assert_equal(t.shape, (2,))
+        assert_equal(t.dtype, np.dtype(object))
+
+    def test_array_too_big(self):
+        # Ticket #1080.
+        assert_raises(ValueError, np.zeros, [975]*7, np.int8)
+        assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
+
+    def test_dtype_keyerrors_(self):
+        # Ticket #1106.
+        dt = np.dtype([('f1', np.uint)])
+        assert_raises(KeyError, dt.__getitem__, "f2")
+        assert_raises(IndexError, dt.__getitem__, 1)
+        assert_raises(TypeError, dt.__getitem__, 0.0)
+
+    def test_lexsort_buffer_length(self):
+        # Ticket #1217, don't segfault.
+        a = np.ones(100, dtype=np.int8)
+        b = np.ones(100, dtype=np.int32)
+        i = np.lexsort((a[::-1], b))
+        assert_equal(i, np.arange(100, dtype=int))
+
+    def test_object_array_to_fixed_string(self):
+        # Ticket #1235.
+        a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
+        b = np.array(a, dtype=(np.str_, 8))
+        assert_equal(a, b)
+        c = np.array(a, dtype=(np.str_, 5))
+        assert_equal(c, np.array(['abcde', 'ijklm']))
+        d = np.array(a, dtype=(np.str_, 12))
+        assert_equal(a, d)
+        e = np.empty((2, ), dtype=(np.str_, 8))
+        e[:] = a[:]
+        assert_equal(a, e)
+
+    def test_unicode_to_string_cast(self):
+        # Ticket #1240.
+        a = np.array([['abc', '\u03a3'],
+                      ['asdf', 'erw']],
+                     dtype='U')
+        assert_raises(UnicodeEncodeError, np.array, a, 'S4')
+
+    def test_unicode_to_string_cast_error(self):
+        # gh-15790
+        a = np.array(['\x80'] * 129, dtype='U3')
+        assert_raises(UnicodeEncodeError, np.array, a, 'S')
+        b = a.reshape(3, 43)[:-1, :-1]
+        assert_raises(UnicodeEncodeError, np.array, b, 'S')
+
+    def test_mixed_string_byte_array_creation(self):
+        a = np.array(['1234', b'123'])
+        assert_(a.itemsize == 16)
+        a = np.array([b'123', '1234'])
+        assert_(a.itemsize == 16)
+        a = np.array(['1234', b'123', '12345'])
+        assert_(a.itemsize == 20)
+        a = np.array([b'123', '1234', b'12345'])
+        assert_(a.itemsize == 20)
+        a = np.array([b'123', '1234', b'1234'])
+        assert_(a.itemsize == 16)
+
+    def test_misaligned_objects_segfault(self):
+        # Ticket #1198 and #1267
+        a1 = np.zeros((10,), dtype='O,c')
+        a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
+        a1['f0'] = a2
+        repr(a1)
+        np.argmax(a1['f0'])
+        a1['f0'][1] = "FOO"
+        a1['f0'] = "FOO"
+        np.array(a1['f0'], dtype='S')
+        np.nonzero(a1['f0'])
+        a1.sort()
+        copy.deepcopy(a1)
+
+    def test_misaligned_scalars_segfault(self):
+        # Ticket #1267
+        s1 = np.array(('a', 'Foo'), dtype='c,O')
+        s2 = np.array(('b', 'Bar'), dtype='c,O')
+        s1['f1'] = s2['f1']
+        s1['f1'] = 'Baz'
+
+    def test_misaligned_dot_product_objects(self):
+        # Ticket #1267
+        # This didn't require a fix, but it's worth testing anyway, because
+        # it may fail if .dot stops enforcing the arrays to be BEHAVED
+        a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
+        b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
+        np.dot(a['f0'], b['f0'])
+
+    def test_byteswap_complex_scalar(self):
+        # Ticket #1259 and gh-441
+        for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
+            z = np.array([2.2-1.1j], dtype)
+            x = z[0]  # always native-endian
+            y = x.byteswap()
+            if x.dtype.byteorder == z.dtype.byteorder:
+                # little-endian machine
+                assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
+            else:
+                # big-endian machine
+                assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
+            # double check real and imaginary parts:
+            assert_equal(x.real, y.real.byteswap())
+            assert_equal(x.imag, y.imag.byteswap())
+
+    def test_structured_arrays_with_objects1(self):
+        # Ticket #1299
+        stra = 'aaaa'
+        strb = 'bbbb'
+        x = np.array([[(0, stra), (1, strb)]], 'i8,O')
+        x[x.nonzero()] = x.ravel()[:1]
+        assert_(x[0, 1] == x[0, 0])
+
+    @pytest.mark.skipif(
+        sys.version_info >= (3, 12),
+        reason="Python 3.12 has immortal refcounts, this test no longer works."
+    )
+    @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+    def test_structured_arrays_with_objects2(self):
+        # Ticket #1299 second test
+        stra = 'aaaa'
+        strb = 'bbbb'
+        numb = sys.getrefcount(strb)
+        numa = sys.getrefcount(stra)
+        x = np.array([[(0, stra), (1, strb)]], 'i8,O')
+        x[x.nonzero()] = x.ravel()[:1]
+        assert_(sys.getrefcount(strb) == numb)
+        assert_(sys.getrefcount(stra) == numa + 2)
+
+    def test_duplicate_title_and_name(self):
+        # Ticket #1254
+        dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
+        assert_raises(ValueError, np.dtype, dtspec)
+
+    def test_signed_integer_division_overflow(self):
+        # Ticket #1317.
+        def test_type(t):
+            min = np.array([np.iinfo(t).min])
+            min //= -1
+
+        with np.errstate(over="ignore"):
+            for t in (np.int8, np.int16, np.int32, np.int64, int):
+                test_type(t)
+
+    def test_buffer_hashlib(self):
+        from hashlib import sha256
+
+        x = np.array([1, 2, 3], dtype=np.dtype('c')
+
+    def test_log1p_compiler_shenanigans(self):
+        # Check if log1p is behaving on 32 bit intel systems.
+        assert_(np.isfinite(np.log1p(np.exp2(-53))))
+
+    def test_fromiter_comparison(self):
+        a = np.fromiter(list(range(10)), dtype='b')
+        b = np.fromiter(list(range(10)), dtype='B')
+        assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+        assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+
+    def test_fromstring_crash(self):
+        # Ticket #1345: the following should not cause a crash
+        with assert_warns(DeprecationWarning):
+            np.fromstring(b'aa, aa, 1.0', sep=',')
+
+    def test_ticket_1539(self):
+        dtypes = [x for x in np.sctypeDict.values()
+                  if (issubclass(x, np.number)
+                      and not issubclass(x, np.timedelta64))]
+        a = np.array([], np.bool_)  # not x[0] because it is unordered
+        failures = []
+
+        for x in dtypes:
+            b = a.astype(x)
+            for y in dtypes:
+                c = a.astype(y)
+                try:
+                    d = np.dot(b, c)
+                except TypeError:
+                    failures.append((x, y))
+                else:
+                    if d != 0:
+                        failures.append((x, y))
+        if failures:
+            raise AssertionError("Failures: %r" % failures)
+
+    def test_ticket_1538(self):
+        x = np.finfo(np.float32)
+        for name in 'eps epsneg max min resolution tiny'.split():
+            assert_equal(type(getattr(x, name)), np.float32,
+                         err_msg=name)
+
+    def test_ticket_1434(self):
+        # Check that the out= argument in var and std has an effect
+        data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
+        out = np.zeros((3,))
+
+        ret = data.var(axis=1, out=out)
+        assert_(ret is out)
+        assert_array_equal(ret, data.var(axis=1))
+
+        ret = data.std(axis=1, out=out)
+        assert_(ret is out)
+        assert_array_equal(ret, data.std(axis=1))
+
+    def test_complex_nan_maximum(self):
+        cnan = complex(0, np.nan)
+        assert_equal(np.maximum(1, cnan), cnan)
+
+    def test_subclass_int_tuple_assignment(self):
+        # ticket #1563
+        class Subclass(np.ndarray):
+            def __new__(cls, i):
+                return np.ones((i,)).view(cls)
+
+        x = Subclass(5)
+        x[(0,)] = 2  # shouldn't raise an exception
+        assert_equal(x[0], 2)
+
+    def test_ufunc_no_unnecessary_views(self):
+        # ticket #1548
+        class Subclass(np.ndarray):
+            pass
+        x = np.array([1, 2, 3]).view(Subclass)
+        y = np.add(x, x, x)
+        assert_equal(id(x), id(y))
+
+    @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+    def test_take_refcount(self):
+        # ticket #939
+        a = np.arange(16, dtype=float)
+        a.shape = (4, 4)
+        lut = np.ones((5 + 3, 4), float)
+        rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
+        c1 = sys.getrefcount(rgba)
+        try:
+            lut.take(a, axis=0, mode='clip', out=rgba)
+        except TypeError:
+            pass
+        c2 = sys.getrefcount(rgba)
+        assert_equal(c1, c2)
+
+    def test_fromfile_tofile_seeks(self):
+        # On Python 3, tofile/fromfile used to get (#1610) the Python
+        # file handle out of sync
+        f0 = tempfile.NamedTemporaryFile()
+        f = f0.file
+        f.write(np.arange(255, dtype='u1').tobytes())
+
+        f.seek(20)
+        ret = np.fromfile(f, count=4, dtype='u1')
+        assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
+        assert_equal(f.tell(), 24)
+
+        f.seek(40)
+        np.array([1, 2, 3], dtype='u1').tofile(f)
+        assert_equal(f.tell(), 43)
+
+        f.seek(40)
+        data = f.read(3)
+        assert_equal(data, b"\x01\x02\x03")
+
+        f.seek(80)
+        f.read(4)
+        data = np.fromfile(f, dtype='u1', count=4)
+        assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
+
+        f.close()
+
+    def test_complex_scalar_warning(self):
+        for tp in [np.csingle, np.cdouble, np.clongdouble]:
+            x = tp(1+2j)
+            assert_warns(np.ComplexWarning, float, x)
+            with suppress_warnings() as sup:
+                sup.filter(np.ComplexWarning)
+                assert_equal(float(x), float(x.real))
+
+    def test_complex_scalar_complex_cast(self):
+        for tp in [np.csingle, np.cdouble, np.clongdouble]:
+            x = tp(1+2j)
+            assert_equal(complex(x), 1+2j)
+
+    def test_complex_boolean_cast(self):
+        # Ticket #2218
+        for tp in [np.csingle, np.cdouble, np.clongdouble]:
+            x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
+            assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
+            assert_(np.any(x))
+            assert_(np.all(x[1:]))
+
+    def test_uint_int_conversion(self):
+        x = 2**64 - 1
+        assert_equal(int(np.uint64(x)), x)
+
+    def test_duplicate_field_names_assign(self):
+        ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
+        ra.dtype.names = ('f1', 'f2')
+        repr(ra)  # should not cause a segmentation fault
+        assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
+
+    def test_eq_string_and_object_array(self):
+        # From e-mail thread "__eq__ with str and object" (Keith Goodman)
+        a1 = np.array(['a', 'b'], dtype=object)
+        a2 = np.array(['a', 'c'])
+        assert_array_equal(a1 == a2, [True, False])
+        assert_array_equal(a2 == a1, [True, False])
+
+    def test_nonzero_byteswap(self):
+        a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
+        a.dtype = np.float32
+        assert_equal(a.nonzero()[0], [1])
+        a = a.byteswap().newbyteorder()
+        assert_equal(a.nonzero()[0], [1])  # [0] if nonzero() ignores swap
+
+    def test_find_common_type_boolean(self):
+        # Ticket #1695
+        with pytest.warns(DeprecationWarning, match="np.find_common_type"):
+            res = np.find_common_type([], ['?', '?'])
+        assert res == '?'
+
+    def test_empty_mul(self):
+        a = np.array([1.])
+        a[1:1] *= 2
+        assert_equal(a, [1.])
+
+    def test_array_side_effect(self):
+        # The second use of itemsize was throwing an exception because in
+        # ctors.c, discover_itemsize was calling PyObject_Length without
+        # checking the return code.  This failed to get the length of the
+        # number 2, and the exception hung around until something checked
+        # PyErr_Occurred() and returned an error.
+        assert_equal(np.dtype('S10').itemsize, 10)
+        np.array([['abc', 2], ['long   ', '0123456789']], dtype=np.bytes_)
+        assert_equal(np.dtype('S10').itemsize, 10)
+
+    def test_any_float(self):
+        # all and any for floats
+        a = np.array([0.1, 0.9])
+        assert_(np.any(a))
+        assert_(np.all(a))
+
+    def test_large_float_sum(self):
+        a = np.arange(10000, dtype='f')
+        assert_equal(a.sum(dtype='d'), a.astype('d').sum())
+
+    def test_ufunc_casting_out(self):
+        a = np.array(1.0, dtype=np.float32)
+        b = np.array(1.0, dtype=np.float64)
+        c = np.array(1.0, dtype=np.float32)
+        np.add(a, b, out=c)
+        assert_equal(c, 2.0)
+
+    def test_array_scalar_contiguous(self):
+        # Array scalars are both C and Fortran contiguous
+        assert_(np.array(1.0).flags.c_contiguous)
+        assert_(np.array(1.0).flags.f_contiguous)
+        assert_(np.array(np.float32(1.0)).flags.c_contiguous)
+        assert_(np.array(np.float32(1.0)).flags.f_contiguous)
+
+    def test_squeeze_contiguous(self):
+        # Similar to GitHub issue #387
+        a = np.zeros((1, 2)).squeeze()
+        b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
+        assert_(a.flags.c_contiguous)
+        assert_(a.flags.f_contiguous)
+        assert_(b.flags.f_contiguous)
+
+    def test_squeeze_axis_handling(self):
+        # Issue #10779
+        # Ensure proper handling of objects
+        # that don't support axis specification
+        # when squeezing
+
+        class OldSqueeze(np.ndarray):
+
+            def __new__(cls,
+                        input_array):
+                obj = np.asarray(input_array).view(cls)
+                return obj
+
+            # it is perfectly reasonable that prior
+            # to numpy version 1.7.0 a subclass of ndarray
+            # might have been created that did not expect
+            # squeeze to have an axis argument
+            # NOTE: this example is somewhat artificial;
+            # it is designed to simulate an old API
+            # expectation to guard against regression
+            def squeeze(self):
+                return super().squeeze()
+
+        oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
+
+        # if no axis argument is specified the old API
+        # expectation should give the correct result
+        assert_equal(np.squeeze(oldsqueeze),
+                     np.array([1,2,3]))
+
+        # likewise, axis=None should work perfectly well
+        # with the old API expectation
+        assert_equal(np.squeeze(oldsqueeze, axis=None),
+                     np.array([1,2,3]))
+
+        # however, specification of any particular axis
+        # should raise a TypeError in the context of the
+        # old API specification, even when using a valid
+        # axis specification like 1 for this array
+        with assert_raises(TypeError):
+            # this would silently succeed for array
+            # subclasses / objects that did not support
+            # squeeze axis argument handling before fixing
+            # Issue #10779
+            np.squeeze(oldsqueeze, axis=1)
+
+        # check for the same behavior when using an invalid
+        # axis specification -- in this case axis=0 does not
+        # have size 1, but the priority should be to raise
+        # a TypeError for the axis argument and NOT a
+        # ValueError for squeezing a non-empty dimension
+        with assert_raises(TypeError):
+            np.squeeze(oldsqueeze, axis=0)
+
+        # the new API knows how to handle the axis
+        # argument and will return a ValueError if
+        # attempting to squeeze an axis that is not
+        # of length 1
+        with assert_raises(ValueError):
+            np.squeeze(np.array([[1],[2],[3]]), axis=0)
+
+    def test_reduce_contiguous(self):
+        # GitHub issue #387
+        a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
+        b = np.add.reduce(np.zeros((2, 1, 2)), 1)
+        assert_(a.flags.c_contiguous)
+        assert_(a.flags.f_contiguous)
+        assert_(b.flags.c_contiguous)
+
+    @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+    def test_object_array_self_reference(self):
+        # Object arrays with references to themselves can cause problems
+        a = np.array(0, dtype=object)
+        a[()] = a
+        assert_raises(RecursionError, int, a)
+        assert_raises(RecursionError, float, a)
+        a[()] = None
+
+    @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+    def test_object_array_circular_reference(self):
+        # Test the same for a circular reference.
+        a = np.array(0, dtype=object)
+        b = np.array(0, dtype=object)
+        a[()] = b
+        b[()] = a
+        assert_raises(RecursionError, int, a)
+        # NumPy has no tp_traverse currently, so circular references
+        # cannot be detected. So resolve it:
+        a[()] = None
+
+        # This was causing a to become like the above
+        a = np.array(0, dtype=object)
+        a[...] += 1
+        assert_equal(a, 1)
+
+    def test_object_array_nested(self):
+        # but is fine with a reference to a different array
+        a = np.array(0, dtype=object)
+        b = np.array(0, dtype=object)
+        a[()] = b
+        assert_equal(int(a), int(0))
+        assert_equal(float(a), float(0))
+
+    def test_object_array_self_copy(self):
+        # An object array being copied into itself DECREF'ed before INCREF'ing
+        # causing segmentation faults (gh-3787)
+        a = np.array(object(), dtype=object)
+        np.copyto(a, a)
+        if HAS_REFCOUNT:
+            assert_(sys.getrefcount(a[()]) == 2)
+        a[()].__class__  # will segfault if object was deleted
+
+    def test_zerosize_accumulate(self):
+        "Ticket #1733"
+        x = np.array([[42, 0]], dtype=np.uint32)
+        assert_equal(np.add.accumulate(x[:-1, 0]), [])
+
+    def test_objectarray_setfield(self):
+        # Setfield should not overwrite Object fields with non-Object data
+        x = np.array([1, 2, 3], dtype=object)
+        assert_raises(TypeError, x.setfield, 4, np.int32, 0)
+
+    def test_setting_rank0_string(self):
+        "Ticket #1736"
+        s1 = b"hello1"
+        s2 = b"hello2"
+        a = np.zeros((), dtype="S10")
+        a[()] = s1
+        assert_equal(a, np.array(s1))
+        a[()] = np.array(s2)
+        assert_equal(a, np.array(s2))
+
+        a = np.zeros((), dtype='f4')
+        a[()] = 3
+        assert_equal(a, np.array(3))
+        a[()] = np.array(4)
+        assert_equal(a, np.array(4))
+
+    def test_string_astype(self):
+        "Ticket #1748"
+        s1 = b'black'
+        s2 = b'white'
+        s3 = b'other'
+        a = np.array([[s1], [s2], [s3]])
+        assert_equal(a.dtype, np.dtype('S5'))
+        b = a.astype(np.dtype('S0'))
+        assert_equal(b.dtype, np.dtype('S5'))
+
+    def test_ticket_1756(self):
+        # Ticket #1756
+        s = b'0123456789abcdef'
+        a = np.array([s]*5)
+        for i in range(1, 17):
+            a1 = np.array(a, "|S%d" % i)
+            a2 = np.array([s[:i]]*5)
+            assert_equal(a1, a2)
+
+    def test_fields_strides(self):
+        "gh-2355"
+        r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
+        assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
+        assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
+        assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
+        assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
+
+    def test_alignment_update(self):
+        # Check that alignment flag is updated on stride setting
+        a = np.arange(10)
+        assert_(a.flags.aligned)
+        a.strides = 3
+        assert_(not a.flags.aligned)
+
+    def test_ticket_1770(self):
+        "Should not segfault on python 3k"
+        import numpy as np
+        try:
+            a = np.zeros((1,), dtype=[('f1', 'f')])
+            a['f1'] = 1
+            a['f2'] = 1
+        except ValueError:
+            pass
+        except Exception:
+            raise AssertionError
+
+    def test_ticket_1608(self):
+        "x.flat shouldn't modify data"
+        x = np.array([[1, 2], [3, 4]]).T
+        np.array(x.flat)
+        assert_equal(x, [[1, 3], [2, 4]])
+
+    def test_pickle_string_overwrite(self):
+        import re
+
+        data = np.array([1], dtype='b')
+        blob = pickle.dumps(data, protocol=1)
+        data = pickle.loads(blob)
+
+        # Check that loads does not clobber interned strings
+        s = re.sub("a(.)", "\x01\\1", "a_")
+        assert_equal(s[0], "\x01")
+        data[0] = 0x6a
+        s = re.sub("a(.)", "\x01\\1", "a_")
+        assert_equal(s[0], "\x01")
+
+    def test_pickle_bytes_overwrite(self):
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            data = np.array([1], dtype='b')
+            data = pickle.loads(pickle.dumps(data, protocol=proto))
+            data[0] = 0x7d
+            bytestring = "\x01  ".encode('ascii')
+            assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
+
+    def test_pickle_py2_array_latin1_hack(self):
+        # Check that unpickling hacks in Py3 that support
+        # encoding='latin1' work correctly.
+
+        # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
+        data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
+                b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
+                b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
+                b"p13\ntp14\nb.")
+        # This should work:
+        result = pickle.loads(data, encoding='latin1')
+        assert_array_equal(result, np.array([129]).astype('b'))
+        # Should not segfault:
+        assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
+
+    def test_pickle_py2_scalar_latin1_hack(self):
+        # Check that scalar unpickling hack in Py3 that supports
+        # encoding='latin1' work correctly.
+
+        # Python2 output for pickle.dumps(...)
+        datas = [
+            # (original, python2_pickle, koi8r_validity)
+            (np.str_('\u6bd2'),
+             (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
+              b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
+              b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
+             'invalid'),
+
+            (np.float64(9e123),
+             (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
+              b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
+              b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
+             'invalid'),
+
+            (np.bytes_(b'\x9c'),  # different 8-bit code point in KOI8-R vs latin1
+             (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
+              b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
+              b"tp8\nRp9\n."),
+             'different'),
+        ]
+        for original, data, koi8r_validity in datas:
+            result = pickle.loads(data, encoding='latin1')
+            assert_equal(result, original)
+
+            # Decoding under non-latin1 encoding (e.g.) KOI8-R can
+            # produce bad results, but should not segfault.
+            if koi8r_validity == 'different':
+                # Unicode code points happen to lie within latin1,
+                # but are different in koi8-r, resulting to silent
+                # bogus results
+                result = pickle.loads(data, encoding='koi8-r')
+                assert_(result != original)
+            elif koi8r_validity == 'invalid':
+                # Unicode code points outside latin1, so results
+                # to an encoding exception
+                assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
+            else:
+                raise ValueError(koi8r_validity)
+
+    def test_structured_type_to_object(self):
+        a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
+        a_obj = np.empty((2,), dtype=object)
+        a_obj[0] = (0, 1)
+        a_obj[1] = (3, 2)
+        # astype records -> object
+        assert_equal(a_rec.astype(object), a_obj)
+        # '=' records -> object
+        b = np.empty_like(a_obj)
+        b[...] = a_rec
+        assert_equal(b, a_obj)
+        # '=' object -> records
+        b = np.empty_like(a_rec)
+        b[...] = a_obj
+        assert_equal(b, a_rec)
+
+    def test_assign_obj_listoflists(self):
+        # Ticket # 1870
+        # The inner list should get assigned to the object elements
+        a = np.zeros(4, dtype=object)
+        b = a.copy()
+        a[0] = [1]
+        a[1] = [2]
+        a[2] = [3]
+        a[3] = [4]
+        b[...] = [[1], [2], [3], [4]]
+        assert_equal(a, b)
+        # The first dimension should get broadcast
+        a = np.zeros((2, 2), dtype=object)
+        a[...] = [[1, 2]]
+        assert_equal(a, [[1, 2], [1, 2]])
+
+    @pytest.mark.slow_pypy
+    def test_memoryleak(self):
+        # Ticket #1917 - ensure that array data doesn't leak
+        for i in range(1000):
+            # 100MB times 1000 would give 100GB of memory usage if it leaks
+            a = np.empty((100000000,), dtype='i1')
+            del a
+
+    @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+    def test_ufunc_reduce_memoryleak(self):
+        a = np.arange(6)
+        acnt = sys.getrefcount(a)
+        np.add.reduce(a)
+        assert_equal(sys.getrefcount(a), acnt)
+
+    def test_search_sorted_invalid_arguments(self):
+        # Ticket #2021, should not segfault.
+        x = np.arange(0, 4, dtype='datetime64[D]')
+        assert_raises(TypeError, x.searchsorted, 1)
+
+    def test_string_truncation(self):
+        # Ticket #1990 - Data can be truncated in creation of an array from a
+        # mixed sequence of numeric values and strings (gh-2583)
+        for val in [True, 1234, 123.4, complex(1, 234)]:
+            for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]:
+                b = np.array([val, tostr('xx')], dtype=dtype)
+                assert_equal(tostr(b[0]), tostr(val))
+                b = np.array([tostr('xx'), val], dtype=dtype)
+                assert_equal(tostr(b[1]), tostr(val))
+
+                # test also with longer strings
+                b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype)
+                assert_equal(tostr(b[0]), tostr(val))
+                b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype)
+                assert_equal(tostr(b[1]), tostr(val))
+
+    def test_string_truncation_ucs2(self):
+        # Ticket #2081. Python compiled with two byte unicode
+        # can lead to truncation if itemsize is not properly
+        # adjusted for NumPy's four byte unicode.
+        a = np.array(['abcd'])
+        assert_equal(a.dtype.itemsize, 16)
+
+    def test_unique_stable(self):
+        # Ticket #2063 must always choose stable sort for argsort to
+        # get consistent results
+        v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
+        res = np.unique(v, return_index=True)
+        tgt = (np.array([0, 1, 2]), np.array([ 0,  5, 11]))
+        assert_equal(res, tgt)
+
+    def test_unicode_alloc_dealloc_match(self):
+        # Ticket #1578, the mismatch only showed up when running
+        # python-debug for python versions >= 2.7, and then as
+        # a core dump and error message.
+        a = np.array(['abc'], dtype=np.str_)[0]
+        del a
+
+    def test_refcount_error_in_clip(self):
+        # Ticket #1588
+        a = np.zeros((2,), dtype='>i2').clip(min=0)
+        x = a + a
+        # This used to segfault:
+        y = str(x)
+        # Check the final string:
+        assert_(y == "[0 0]")
+
+    def test_searchsorted_wrong_dtype(self):
+        # Ticket #2189, it used to segfault, so we check that it raises the
+        # proper exception.
+        a = np.array([('a', 1)], dtype='S1, int')
+        assert_raises(TypeError, np.searchsorted, a, 1.2)
+        # Ticket #2066, similar problem:
+        dtype = np.format_parser(['i4', 'i4'], [], [])
+        a = np.recarray((2,), dtype)
+        a[...] = [(1, 2), (3, 4)]
+        assert_raises(TypeError, np.searchsorted, a, 1)
+
+    def test_complex64_alignment(self):
+        # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
+        dtt = np.complex64
+        arr = np.arange(10, dtype=dtt)
+        # 2D array
+        arr2 = np.reshape(arr, (2, 5))
+        # Fortran write followed by (C or F) read caused bus error
+        data_str = arr2.tobytes('F')
+        data_back = np.ndarray(arr2.shape,
+                              arr2.dtype,
+                              buffer=data_str,
+                              order='F')
+        assert_array_equal(arr2, data_back)
+
+    def test_structured_count_nonzero(self):
+        arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
+        count = np.count_nonzero(arr)
+        assert_equal(count, 0)
+
+    def test_copymodule_preserves_f_contiguity(self):
+        a = np.empty((2, 2), order='F')
+        b = copy.copy(a)
+        c = copy.deepcopy(a)
+        assert_(b.flags.fortran)
+        assert_(b.flags.f_contiguous)
+        assert_(c.flags.fortran)
+        assert_(c.flags.f_contiguous)
+
+    def test_fortran_order_buffer(self):
+        import numpy as np
+        a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
+        arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
+        arr2 = np.array([[['H', 'e', 'l', 'l', 'o'],
+                          ['F', 'o', 'o', 'b', '']]])
+        assert_array_equal(arr, arr2)
+
+    def test_assign_from_sequence_error(self):
+        # Ticket #4024.
+        arr = np.array([1, 2, 3])
+        assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
+        arr.__setitem__(slice(None), [9])
+        assert_equal(arr, [9, 9, 9])
+
+    def test_format_on_flex_array_element(self):
+        # Ticket #4369.
+        dt = np.dtype([('date', ' 0:
+            # unpickling ndarray goes through _frombuffer for protocol 5
+            assert b'numpy.core.numeric' in s
+        else:
+            assert b'numpy.core.multiarray' in s
+
+    def test_object_casting_errors(self):
+        # gh-11993 update to ValueError (see gh-16909), since strings can in
+        # principle be converted to complex, but this string cannot.
+        arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
+        assert_raises(ValueError, arr.astype, 'c8')
+
+    def test_eff1d_casting(self):
+        # gh-12711
+        x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
+        res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+        assert_equal(res, [-99,   1,   2,   3,  -7,  88,  99])
+
+        # The use of safe casting means, that 1<<20 is cast unsafely, an
+        # error may be better, but currently there is no mechanism for it.
+        res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20))
+        assert_equal(res, [0,   1,   2,   3,  -7,  0])
+
+    def test_pickle_datetime64_array(self):
+        # gh-12745 (would fail with pickle5 installed)
+        d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
+        arr = np.array([d])
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            dumped = pickle.dumps(arr, protocol=proto)
+            assert_equal(pickle.loads(dumped), arr)
+
+    def test_bad_array_interface(self):
+        class T:
+            __array_interface__ = {}
+
+        with assert_raises(ValueError):
+            np.array([T()])
+
+    def test_2d__array__shape(self):
+        class T:
+            def __array__(self):
+                return np.ndarray(shape=(0,0))
+
+            # Make sure __array__ is used instead of Sequence methods.
+            def __iter__(self):
+                return iter([])
+
+            def __getitem__(self, idx):
+                raise AssertionError("__getitem__ was called")
+
+            def __len__(self):
+                return 0
+
+
+        t = T()
+        # gh-13659, would raise in broadcasting [x=t for x in result]
+        arr = np.array([t])
+        assert arr.shape == (1, 0, 0)
+
+    @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+    def test_to_ctypes(self):
+        #gh-14214
+        arr = np.zeros((2 ** 31 + 1,), 'b')
+        assert arr.size * arr.itemsize > 2 ** 31
+        c_arr = np.ctypeslib.as_ctypes(arr)
+        assert_equal(c_arr._length_, arr.size)
+
+    def test_complex_conversion_error(self):
+        # gh-17068
+        with pytest.raises(TypeError, match=r"Unable to convert dtype.*"):
+            complex(np.array("now", np.datetime64))
+
+    def test__array_interface__descr(self):
+        # gh-17068
+        dt = np.dtype(dict(names=['a', 'b'],
+                           offsets=[0, 0],
+                           formats=[np.int64, np.int64]))
+        descr = np.array((1, 1), dtype=dt).__array_interface__['descr']
+        assert descr == [('', '|V8')]  # instead of [(b'', '|V8')]
+
+    @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+    @requires_memory(free_bytes=9e9)
+    def test_dot_big_stride(self):
+        # gh-17111
+        # blas stride = stride//itemsize > int32 max
+        int32_max = np.iinfo(np.int32).max
+        n = int32_max + 3
+        a = np.empty([n], dtype=np.float32)
+        b = a[::n-1]
+        b[...] = 1
+        assert b.strides[0] > int32_max * b.dtype.itemsize
+        assert np.dot(b, b) == 2.0
+
+    def test_frompyfunc_name(self):
+        # name conversion was failing for python 3 strings
+        # resulting in the default '?' name. Also test utf-8
+        # encoding using non-ascii name.
+        def cassé(x):
+            return x
+
+        f = np.frompyfunc(cassé, 1, 1)
+        assert str(f) == ""
+
+    @pytest.mark.parametrize("operation", [
+        'add', 'subtract', 'multiply', 'floor_divide',
+        'conjugate', 'fmod', 'square', 'reciprocal',
+        'power', 'absolute', 'negative', 'positive',
+        'greater', 'greater_equal', 'less',
+        'less_equal', 'equal', 'not_equal', 'logical_and',
+        'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or',
+        'bitwise_xor', 'invert', 'left_shift', 'right_shift',
+        'gcd', 'lcm'
+        ]
+    )
+    @pytest.mark.parametrize("order", [
+        ('b->', 'B->'),
+        ('h->', 'H->'),
+        ('i->', 'I->'),
+        ('l->', 'L->'),
+        ('q->', 'Q->'),
+        ]
+    )
+    def test_ufunc_order(self, operation, order):
+        # gh-18075
+        # Ensure signed types before unsigned
+        def get_idx(string, str_lst):
+            for i, s in enumerate(str_lst):
+                if string in s:
+                    return i
+            raise ValueError(f"{string} not in list")
+        types = getattr(np, operation).types
+        assert get_idx(order[0], types) < get_idx(order[1], types), (
+                f"Unexpected types order of ufunc in {operation}"
+                f"for {order}. Possible fix: Use signed before unsigned"
+                "in generate_umath.py")
+
+    def test_nonbool_logical(self):
+        # gh-22845
+        # create two arrays with bit patterns that do not overlap.
+        # needs to be large enough to test both SIMD and scalar paths
+        size = 100
+        a = np.frombuffer(b'\x01' * size, dtype=np.bool_)
+        b = np.frombuffer(b'\x80' * size, dtype=np.bool_)
+        expected = np.ones(size, dtype=np.bool_)
+        assert_array_equal(np.logical_and(a, b), expected)
+
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.py
new file mode 100644
index 00000000..da976d64
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.py
@@ -0,0 +1,186 @@
+"""
+Test the scalar constructors, which also do type-coercion
+"""
+import pytest
+
+import numpy as np
+from numpy.testing import (
+    assert_equal, assert_almost_equal, assert_warns,
+    )
+
+class TestFromString:
+    def test_floating(self):
+        # Ticket #640, floats from string
+        fsingle = np.single('1.234')
+        fdouble = np.double('1.234')
+        flongdouble = np.longdouble('1.234')
+        assert_almost_equal(fsingle, 1.234)
+        assert_almost_equal(fdouble, 1.234)
+        assert_almost_equal(flongdouble, 1.234)
+
+    def test_floating_overflow(self):
+        """ Strings containing an unrepresentable float overflow """
+        fhalf = np.half('1e10000')
+        assert_equal(fhalf, np.inf)
+        fsingle = np.single('1e10000')
+        assert_equal(fsingle, np.inf)
+        fdouble = np.double('1e10000')
+        assert_equal(fdouble, np.inf)
+        flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
+        assert_equal(flongdouble, np.inf)
+
+        fhalf = np.half('-1e10000')
+        assert_equal(fhalf, -np.inf)
+        fsingle = np.single('-1e10000')
+        assert_equal(fsingle, -np.inf)
+        fdouble = np.double('-1e10000')
+        assert_equal(fdouble, -np.inf)
+        flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
+        assert_equal(flongdouble, -np.inf)
+
+
+class TestExtraArgs:
+    def test_superclass(self):
+        # try both positional and keyword arguments
+        s = np.str_(b'\\x61', encoding='unicode-escape')
+        assert s == 'a'
+        s = np.str_(b'\\x61', 'unicode-escape')
+        assert s == 'a'
+
+        # previously this would return '\\xx'
+        with pytest.raises(UnicodeDecodeError):
+            np.str_(b'\\xx', encoding='unicode-escape')
+        with pytest.raises(UnicodeDecodeError):
+            np.str_(b'\\xx', 'unicode-escape')
+
+        # superclass fails, but numpy succeeds
+        assert np.bytes_(-2) == b'-2'
+
+    def test_datetime(self):
+        dt = np.datetime64('2000-01', ('M', 2))
+        assert np.datetime_data(dt) == ('M', 2)
+
+        with pytest.raises(TypeError):
+            np.datetime64('2000', garbage=True)
+
+    def test_bool(self):
+        with pytest.raises(TypeError):
+            np.bool_(False, garbage=True)
+
+    def test_void(self):
+        with pytest.raises(TypeError):
+            np.void(b'test', garbage=True)
+
+
+class TestFromInt:
+    def test_intp(self):
+        # Ticket #99
+        assert_equal(1024, np.intp(1024))
+
+    def test_uint64_from_negative(self):
+        with pytest.warns(DeprecationWarning):
+            assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
+
+
+int_types = [np.byte, np.short, np.intc, np.int_, np.longlong]
+uint_types = [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]
+float_types = [np.half, np.single, np.double, np.longdouble]
+cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
+
+
+class TestArrayFromScalar:
+    """ gh-15467 """
+
+    def _do_test(self, t1, t2):
+        x = t1(2)
+        arr = np.array(x, dtype=t2)
+        # type should be preserved exactly
+        if t2 is None:
+            assert arr.dtype.type is t1
+        else:
+            assert arr.dtype.type is t2
+
+    @pytest.mark.parametrize('t1', int_types + uint_types)
+    @pytest.mark.parametrize('t2', int_types + uint_types + [None])
+    def test_integers(self, t1, t2):
+        return self._do_test(t1, t2)
+
+    @pytest.mark.parametrize('t1', float_types)
+    @pytest.mark.parametrize('t2', float_types + [None])
+    def test_reals(self, t1, t2):
+        return self._do_test(t1, t2)
+
+    @pytest.mark.parametrize('t1', cfloat_types)
+    @pytest.mark.parametrize('t2', cfloat_types + [None])
+    def test_complex(self, t1, t2):
+        return self._do_test(t1, t2)
+
+
+@pytest.mark.parametrize("length",
+        [5, np.int8(5), np.array(5, dtype=np.uint16)])
+def test_void_via_length(length):
+    res = np.void(length)
+    assert type(res) is np.void
+    assert res.item() == b"\0" * 5
+    assert res.dtype == "V5"
+
+@pytest.mark.parametrize("bytes_",
+        [b"spam", np.array(567.)])
+def test_void_from_byteslike(bytes_):
+    res = np.void(bytes_)
+    expected = bytes(bytes_)
+    assert type(res) is np.void
+    assert res.item() == expected
+
+    # Passing dtype can extend it (this is how filling works)
+    res = np.void(bytes_, dtype="V100")
+    assert type(res) is np.void
+    assert res.item()[:len(expected)] == expected
+    assert res.item()[len(expected):] == b"\0" * (res.nbytes - len(expected))
+    # As well as shorten:
+    res = np.void(bytes_, dtype="V4")
+    assert type(res) is np.void
+    assert res.item() == expected[:4]
+
+def test_void_arraylike_trumps_byteslike():
+    # The memoryview is converted as an array-like of shape (18,)
+    # rather than a single bytes-like of that length.
+    m = memoryview(b"just one mintleaf?")
+    res = np.void(m)
+    assert type(res) is np.ndarray
+    assert res.dtype == "V1"
+    assert res.shape == (18,)
+
+def test_void_dtype_arg():
+    # Basic test for the dtype argument (positional and keyword)
+    res = np.void((1, 2), dtype="i,i")
+    assert res.item() == (1, 2)
+    res = np.void((2, 3), "i,i")
+    assert res.item() == (2, 3)
+
+@pytest.mark.parametrize("data",
+        [5, np.int8(5), np.array(5, dtype=np.uint16)])
+def test_void_from_integer_with_dtype(data):
+    # The "length" meaning is ignored, rather data is used:
+    res = np.void(data, dtype="i,i")
+    assert type(res) is np.void
+    assert res.dtype == "i,i"
+    assert res["f0"] == 5 and res["f1"] == 5
+
+def test_void_from_structure():
+    dtype = np.dtype([('s', [('f', 'f8'), ('u', 'U1')]), ('i', 'i2')])
+    data = np.array(((1., 'a'), 2), dtype=dtype)
+    res = np.void(data[()], dtype=dtype)
+    assert type(res) is np.void
+    assert res.dtype == dtype
+    assert res == data[()]
+
+def test_void_bad_dtype():
+    with pytest.raises(TypeError,
+            match="void: descr must be a `void.*int64"):
+        np.void(4, dtype="i8")
+
+    # Subarray dtype (with shape `(4,)` is rejected):
+    with pytest.raises(TypeError,
+            match=r"void: descr must be a `void.*\(4,\)"):
+        np.void(4, dtype="4i")
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.py
new file mode 100644
index 00000000..18a7bc82
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.py
@@ -0,0 +1,204 @@
+"""
+Test the scalar constructors, which also do type-coercion
+"""
+import fractions
+import platform
+import types
+from typing import Any, Type
+
+import pytest
+import numpy as np
+
+from numpy.testing import assert_equal, assert_raises, IS_MUSL
+
+
+class TestAsIntegerRatio:
+    # derived in part from the cpython test "test_floatasratio"
+
+    @pytest.mark.parametrize("ftype", [
+        np.half, np.single, np.double, np.longdouble])
+    @pytest.mark.parametrize("f, ratio", [
+        (0.875, (7, 8)),
+        (-0.875, (-7, 8)),
+        (0.0, (0, 1)),
+        (11.5, (23, 2)),
+        ])
+    def test_small(self, ftype, f, ratio):
+        assert_equal(ftype(f).as_integer_ratio(), ratio)
+
+    @pytest.mark.parametrize("ftype", [
+        np.half, np.single, np.double, np.longdouble])
+    def test_simple_fractions(self, ftype):
+        R = fractions.Fraction
+        assert_equal(R(0, 1),
+                     R(*ftype(0.0).as_integer_ratio()))
+        assert_equal(R(5, 2),
+                     R(*ftype(2.5).as_integer_ratio()))
+        assert_equal(R(1, 2),
+                     R(*ftype(0.5).as_integer_ratio()))
+        assert_equal(R(-2100, 1),
+                     R(*ftype(-2100.0).as_integer_ratio()))
+
+    @pytest.mark.parametrize("ftype", [
+        np.half, np.single, np.double, np.longdouble])
+    def test_errors(self, ftype):
+        assert_raises(OverflowError, ftype('inf').as_integer_ratio)
+        assert_raises(OverflowError, ftype('-inf').as_integer_ratio)
+        assert_raises(ValueError, ftype('nan').as_integer_ratio)
+
+    def test_against_known_values(self):
+        R = fractions.Fraction
+        assert_equal(R(1075, 512),
+                     R(*np.half(2.1).as_integer_ratio()))
+        assert_equal(R(-1075, 512),
+                     R(*np.half(-2.1).as_integer_ratio()))
+        assert_equal(R(4404019, 2097152),
+                     R(*np.single(2.1).as_integer_ratio()))
+        assert_equal(R(-4404019, 2097152),
+                     R(*np.single(-2.1).as_integer_ratio()))
+        assert_equal(R(4728779608739021, 2251799813685248),
+                     R(*np.double(2.1).as_integer_ratio()))
+        assert_equal(R(-4728779608739021, 2251799813685248),
+                     R(*np.double(-2.1).as_integer_ratio()))
+        # longdouble is platform dependent
+
+    @pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
+        # dtype test cases generated using hypothesis
+        # first five generated cases per dtype
+        (np.half, [0.0, 0.01154830649280303, 0.31082276347447274,
+                   0.527350517124794, 0.8308562335072596],
+                  [0, 1, 0, -8, 12]),
+        (np.single, [0.0, 0.09248576989263226, 0.8160498218131407,
+                     0.17389442853722373, 0.7956044195067877],
+                    [0, 12, 10, 17, -26]),
+        (np.double, [0.0, 0.031066908499895136, 0.5214135908877832,
+                     0.45780736035689296, 0.5906586745934036],
+                    [0, -801, 51, 194, -653]),
+        pytest.param(
+            np.longdouble,
+            [0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495,
+             0.9620175814461964],
+            [0, -7400, 14266, -7822, -8721],
+            marks=[
+                pytest.mark.skipif(
+                    np.finfo(np.double) == np.finfo(np.longdouble),
+                    reason="long double is same as double"),
+                pytest.mark.skipif(
+                    platform.machine().startswith("ppc"),
+                    reason="IBM double double"),
+            ]
+        )
+    ])
+    def test_roundtrip(self, ftype, frac_vals, exp_vals):
+        for frac, exp in zip(frac_vals, exp_vals):
+            f = np.ldexp(ftype(frac), exp)
+            assert f.dtype == ftype
+            n, d = f.as_integer_ratio()
+
+            try:
+                nf = np.longdouble(n)
+                df = np.longdouble(d)
+                if not np.isfinite(df):
+                    raise OverflowError
+            except (OverflowError, RuntimeWarning):
+                # the values may not fit in any float type
+                pytest.skip("longdouble too small on this platform")
+
+            assert_equal(nf / df, f, "{}/{}".format(n, d))
+
+
+class TestIsInteger:
+    @pytest.mark.parametrize("str_value", ["inf", "nan"])
+    @pytest.mark.parametrize("code", np.typecodes["Float"])
+    def test_special(self, code: str, str_value: str) -> None:
+        cls = np.dtype(code).type
+        value = cls(str_value)
+        assert not value.is_integer()
+
+    @pytest.mark.parametrize(
+        "code", np.typecodes["Float"] + np.typecodes["AllInteger"]
+    )
+    def test_true(self, code: str) -> None:
+        float_array = np.arange(-5, 5).astype(code)
+        for value in float_array:
+            assert value.is_integer()
+
+    @pytest.mark.parametrize("code", np.typecodes["Float"])
+    def test_false(self, code: str) -> None:
+        float_array = np.arange(-5, 5).astype(code)
+        float_array *= 1.1
+        for value in float_array:
+            if value == 0:
+                continue
+            assert not value.is_integer()
+
+
+class TestClassGetItem:
+    @pytest.mark.parametrize("cls", [
+        np.number,
+        np.integer,
+        np.inexact,
+        np.unsignedinteger,
+        np.signedinteger,
+        np.floating,
+    ])
+    def test_abc(self, cls: Type[np.number]) -> None:
+        alias = cls[Any]
+        assert isinstance(alias, types.GenericAlias)
+        assert alias.__origin__ is cls
+
+    def test_abc_complexfloating(self) -> None:
+        alias = np.complexfloating[Any, Any]
+        assert isinstance(alias, types.GenericAlias)
+        assert alias.__origin__ is np.complexfloating
+
+    @pytest.mark.parametrize("arg_len", range(4))
+    def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
+        arg_tup = (Any,) * arg_len
+        if arg_len in (1, 2):
+            assert np.complexfloating[arg_tup]
+        else:
+            match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
+            with pytest.raises(TypeError, match=match):
+                np.complexfloating[arg_tup]
+
+    @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character])
+    def test_abc_non_numeric(self, cls: Type[np.generic]) -> None:
+        with pytest.raises(TypeError):
+            cls[Any]
+
+    @pytest.mark.parametrize("code", np.typecodes["All"])
+    def test_concrete(self, code: str) -> None:
+        cls = np.dtype(code).type
+        with pytest.raises(TypeError):
+            cls[Any]
+
+    @pytest.mark.parametrize("arg_len", range(4))
+    def test_subscript_tuple(self, arg_len: int) -> None:
+        arg_tup = (Any,) * arg_len
+        if arg_len == 1:
+            assert np.number[arg_tup]
+        else:
+            with pytest.raises(TypeError):
+                np.number[arg_tup]
+
+    def test_subscript_scalar(self) -> None:
+        assert np.number[Any]
+
+
+class TestBitCount:
+    # derived in part from the cpython test "test_bit_count"
+
+    @pytest.mark.parametrize("itype", np.sctypes['int']+np.sctypes['uint'])
+    def test_small(self, itype):
+        for a in range(max(np.iinfo(itype).min, 0), 128):
+            msg = f"Smoke test for {itype}({a}).bit_count()"
+            assert itype(a).bit_count() == bin(a).count("1"), msg
+
+    def test_bit_count(self):
+        for exp in [10, 17, 63]:
+            a = 2**exp
+            assert np.uint64(a).bit_count() == 1
+            assert np.uint64(a - 1).bit_count() == exp
+            assert np.uint64(a ^ 63).bit_count() == 7
+            assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.py
new file mode 100644
index 00000000..31b0494c
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.py
@@ -0,0 +1,153 @@
+"""
+Test scalar buffer interface adheres to PEP 3118
+"""
+import numpy as np
+from numpy.core._rational_tests import rational
+from numpy.core._multiarray_tests import get_buffer_info
+import pytest
+
+from numpy.testing import assert_, assert_equal, assert_raises
+
+# PEP3118 format strings for native (standard alignment and byteorder) types
+scalars_and_codes = [
+    (np.bool_, '?'),
+    (np.byte, 'b'),
+    (np.short, 'h'),
+    (np.intc, 'i'),
+    (np.int_, 'l'),
+    (np.longlong, 'q'),
+    (np.ubyte, 'B'),
+    (np.ushort, 'H'),
+    (np.uintc, 'I'),
+    (np.uint, 'L'),
+    (np.ulonglong, 'Q'),
+    (np.half, 'e'),
+    (np.single, 'f'),
+    (np.double, 'd'),
+    (np.longdouble, 'g'),
+    (np.csingle, 'Zf'),
+    (np.cdouble, 'Zd'),
+    (np.clongdouble, 'Zg'),
+]
+scalars_only, codes_only = zip(*scalars_and_codes)
+
+
+class TestScalarPEP3118:
+
+    @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+    def test_scalar_match_array(self, scalar):
+        x = scalar()
+        a = np.array([], dtype=np.dtype(scalar))
+        mv_x = memoryview(x)
+        mv_a = memoryview(a)
+        assert_equal(mv_x.format, mv_a.format)
+
+    @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+    def test_scalar_dim(self, scalar):
+        x = scalar()
+        mv_x = memoryview(x)
+        assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
+        assert_equal(mv_x.ndim, 0)
+        assert_equal(mv_x.shape, ())
+        assert_equal(mv_x.strides, ())
+        assert_equal(mv_x.suboffsets, ())
+
+    @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
+    def test_scalar_code_and_properties(self, scalar, code):
+        x = scalar()
+        expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0,
+                        shape=(), format=code, readonly=True)
+
+        mv_x = memoryview(x)
+        assert self._as_dict(mv_x) == expected
+
+    @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+    def test_scalar_buffers_readonly(self, scalar):
+        x = scalar()
+        with pytest.raises(BufferError, match="scalar buffer is readonly"):
+            get_buffer_info(x, ["WRITABLE"])
+
+    def test_void_scalar_structured_data(self):
+        dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+        x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
+        assert_(isinstance(x, np.void))
+        mv_x = memoryview(x)
+        expected_size = 16 * np.dtype((np.str_, 1)).itemsize
+        expected_size += 2 * np.dtype(np.float64).itemsize
+        assert_equal(mv_x.itemsize, expected_size)
+        assert_equal(mv_x.ndim, 0)
+        assert_equal(mv_x.shape, ())
+        assert_equal(mv_x.strides, ())
+        assert_equal(mv_x.suboffsets, ())
+
+        # check scalar format string against ndarray format string
+        a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+        assert_(isinstance(a, np.ndarray))
+        mv_a = memoryview(a)
+        assert_equal(mv_x.itemsize, mv_a.itemsize)
+        assert_equal(mv_x.format, mv_a.format)
+
+        # Check that we do not allow writeable buffer export (technically
+        # we could allow it sometimes here...)
+        with pytest.raises(BufferError, match="scalar buffer is readonly"):
+            get_buffer_info(x, ["WRITABLE"])
+
+    def _as_dict(self, m):
+        return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
+                    ndim=m.ndim, format=m.format, readonly=m.readonly)
+
+    def test_datetime_memoryview(self):
+        # gh-11656
+        # Values verified with v1.13.3, shape is not () as in test_scalar_dim
+
+        dt1 = np.datetime64('2016-01-01')
+        dt2 = np.datetime64('2017-01-01')
+        expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
+                        format='B', readonly=True)
+        v = memoryview(dt1)
+        assert self._as_dict(v) == expected
+
+        v = memoryview(dt2 - dt1)
+        assert self._as_dict(v) == expected
+
+        dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+        a = np.empty(1, dt)
+        # Fails to create a PEP 3118 valid buffer
+        assert_raises((ValueError, BufferError), memoryview, a[0])
+
+        # Check that we do not allow writeable buffer export
+        with pytest.raises(BufferError, match="scalar buffer is readonly"):
+            get_buffer_info(dt1, ["WRITABLE"])
+
+    @pytest.mark.parametrize('s', [
+        pytest.param("\x32\x32", id="ascii"),
+        pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
+        pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
+    ])
+    def test_str_ucs4(self, s):
+        s = np.str_(s)  # only our subclass implements the buffer protocol
+
+        # all the same, characters always encode as ucs4
+        expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w',
+                        readonly=True)
+
+        v = memoryview(s)
+        assert self._as_dict(v) == expected
+
+        # integers of the paltform-appropriate endianness
+        code_points = np.frombuffer(v, dtype='i4')
+
+        assert_equal(code_points, [ord(c) for c in s])
+
+        # Check that we do not allow writeable buffer export
+        with pytest.raises(BufferError, match="scalar buffer is readonly"):
+            get_buffer_info(s, ["WRITABLE"])
+
+    def test_user_scalar_fails_buffer(self):
+        r = rational(1)
+        with assert_raises(TypeError):
+            memoryview(r)
+
+        # Check that we do not allow writeable buffer export
+        with pytest.raises(BufferError, match="scalar buffer is readonly"):
+            get_buffer_info(r, ["WRITABLE"])
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.py
new file mode 100644
index 00000000..f9c574d5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.py
@@ -0,0 +1,98 @@
+""" Test printing of scalar types.
+
+"""
+import pytest
+
+import numpy as np
+from numpy.testing import assert_, assert_raises
+
+
+class A:
+    pass
+class B(A, np.float64):
+    pass
+
+class C(B):
+    pass
+class D(C, B):
+    pass
+
+class B0(np.float64, A):
+    pass
+class C0(B0):
+    pass
+
+class HasNew:
+    def __new__(cls, *args, **kwargs):
+        return cls, args, kwargs
+
+class B1(np.float64, HasNew):
+    pass
+
+
+class TestInherit:
+    def test_init(self):
+        x = B(1.0)
+        assert_(str(x) == '1.0')
+        y = C(2.0)
+        assert_(str(y) == '2.0')
+        z = D(3.0)
+        assert_(str(z) == '3.0')
+
+    def test_init2(self):
+        x = B0(1.0)
+        assert_(str(x) == '1.0')
+        y = C0(2.0)
+        assert_(str(y) == '2.0')
+
+    def test_gh_15395(self):
+        # HasNew is the second base, so `np.float64` should have priority
+        x = B1(1.0)
+        assert_(str(x) == '1.0')
+
+        # previously caused RecursionError!?
+        with pytest.raises(TypeError):
+            B1(1.0, 2.0)
+
+
+class TestCharacter:
+    def test_char_radd(self):
+        # GH issue 9620, reached gentype_add and raise TypeError
+        np_s = np.bytes_('abc')
+        np_u = np.str_('abc')
+        s = b'def'
+        u = 'def'
+        assert_(np_s.__radd__(np_s) is NotImplemented)
+        assert_(np_s.__radd__(np_u) is NotImplemented)
+        assert_(np_s.__radd__(s) is NotImplemented)
+        assert_(np_s.__radd__(u) is NotImplemented)
+        assert_(np_u.__radd__(np_s) is NotImplemented)
+        assert_(np_u.__radd__(np_u) is NotImplemented)
+        assert_(np_u.__radd__(s) is NotImplemented)
+        assert_(np_u.__radd__(u) is NotImplemented)
+        assert_(s + np_s == b'defabc')
+        assert_(u + np_u == 'defabc')
+
+        class MyStr(str, np.generic):
+            # would segfault
+            pass
+
+        with assert_raises(TypeError):
+            # Previously worked, but gave completely wrong result
+            ret = s + MyStr('abc')
+
+        class MyBytes(bytes, np.generic):
+            # would segfault
+            pass
+
+        ret = s + MyBytes(b'abc')
+        assert(type(ret) is type(s))
+        assert ret == b"defabc"
+
+    def test_char_repeat(self):
+        np_s = np.bytes_('abc')
+        np_u = np.str_('abc')
+        res_s = b'abc' * 5
+        res_u = 'abc' * 5
+        assert_(np_s * 5 == res_s)
+        assert_(np_u * 5 == res_u)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.py
new file mode 100644
index 00000000..9977c8b1
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.py
@@ -0,0 +1,1100 @@
+import contextlib
+import sys
+import warnings
+import itertools
+import operator
+import platform
+from numpy._utils import _pep440
+import pytest
+from hypothesis import given, settings
+from hypothesis.strategies import sampled_from
+from hypothesis.extra import numpy as hynp
+
+import numpy as np
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_almost_equal,
+    assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
+    assert_warns, _SUPPORTS_SVE,
+    )
+
+try:
+    COMPILERS = np.show_config(mode="dicts")["Compilers"]
+    USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl"
+except TypeError:
+    USING_CLANG_CL = False
+
+types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
+         np.int_, np.uint, np.longlong, np.ulonglong,
+         np.single, np.double, np.longdouble, np.csingle,
+         np.cdouble, np.clongdouble]
+
+floating_types = np.floating.__subclasses__()
+complex_floating_types = np.complexfloating.__subclasses__()
+
+objecty_things = [object(), None]
+
+reasonable_operators_for_scalars = [
+    operator.lt, operator.le, operator.eq, operator.ne, operator.ge,
+    operator.gt, operator.add, operator.floordiv, operator.mod,
+    operator.mul, operator.pow, operator.sub, operator.truediv,
+]
+
+
+# This compares scalarmath against ufuncs.
+
+class TestTypes:
+    def test_types(self):
+        for atype in types:
+            a = atype(1)
+            assert_(a == 1, "error with %r: got %r" % (atype, a))
+
+    def test_type_add(self):
+        # list of types
+        for k, atype in enumerate(types):
+            a_scalar = atype(3)
+            a_array = np.array([3], dtype=atype)
+            for l, btype in enumerate(types):
+                b_scalar = btype(1)
+                b_array = np.array([1], dtype=btype)
+                c_scalar = a_scalar + b_scalar
+                c_array = a_array + b_array
+                # It was comparing the type numbers, but the new ufunc
+                # function-finding mechanism finds the lowest function
+                # to which both inputs can be cast - which produces 'l'
+                # when you do 'q' + 'b'.  The old function finding mechanism
+                # skipped ahead based on the first argument, but that
+                # does not produce properly symmetric results...
+                assert_equal(c_scalar.dtype, c_array.dtype,
+                           "error with types (%d/'%c' + %d/'%c')" %
+                            (k, np.dtype(atype).char, l, np.dtype(btype).char))
+
+    def test_type_create(self):
+        for k, atype in enumerate(types):
+            a = np.array([1, 2, 3], atype)
+            b = atype([1, 2, 3])
+            assert_equal(a, b)
+
+    def test_leak(self):
+        # test leak of scalar objects
+        # a leak would show up in valgrind as still-reachable of ~2.6MB
+        for i in range(200000):
+            np.add(1, 1)
+
+
+def check_ufunc_scalar_equivalence(op, arr1, arr2):
+    scalar1 = arr1[()]
+    scalar2 = arr2[()]
+    assert isinstance(scalar1, np.generic)
+    assert isinstance(scalar2, np.generic)
+
+    if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
+        comp_ops = {operator.ge, operator.gt, operator.le, operator.lt}
+        if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)):
+            pytest.xfail("complex comp ufuncs use sort-order, scalars do not.")
+    if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]:
+        # array**scalar special case can have different result dtype
+        # (Other powers may have issues also, but are not hit here.)
+        # TODO: It would be nice to resolve this issue.
+        pytest.skip("array**2 can have incorrect/weird result dtype")
+
+    # ignore fpe's since they may just mismatch for integers anyway.
+    with warnings.catch_warnings(), np.errstate(all="ignore"):
+        # Comparisons DeprecationWarnings replacing errors (2022-03):
+        warnings.simplefilter("error", DeprecationWarning)
+        try:
+            res = op(arr1, arr2)
+        except Exception as e:
+            with pytest.raises(type(e)):
+                op(scalar1, scalar2)
+        else:
+            scalar_res = op(scalar1, scalar2)
+            assert_array_equal(scalar_res, res, strict=True)
+
+
+@pytest.mark.slow
+@settings(max_examples=10000, deadline=2000)
+@given(sampled_from(reasonable_operators_for_scalars),
+       hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()),
+       hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()))
+def test_array_scalar_ufunc_equivalence(op, arr1, arr2):
+    """
+    This is a thorough test attempting to cover important promotion paths
+    and ensuring that arrays and scalars stay as aligned as possible.
+    However, if it creates troubles, it should maybe just be removed.
+    """
+    check_ufunc_scalar_equivalence(op, arr1, arr2)
+
+
+@pytest.mark.slow
+@given(sampled_from(reasonable_operators_for_scalars),
+       hynp.scalar_dtypes(), hynp.scalar_dtypes())
+def test_array_scalar_ufunc_dtypes(op, dt1, dt2):
+    # Same as above, but don't worry about sampling weird values so that we
+    # do not have to sample as much
+    arr1 = np.array(2, dtype=dt1)
+    arr2 = np.array(3, dtype=dt2)  # some power do weird things.
+
+    check_ufunc_scalar_equivalence(op, arr1, arr2)
+
+
+@pytest.mark.parametrize("fscalar", [np.float16, np.float32])
+def test_int_float_promotion_truediv(fscalar):
+    # Promotion for mixed int and float32/float16 must not go to float64
+    i = np.int8(1)
+    f = fscalar(1)
+    expected = np.result_type(i, f)
+    assert (i / f).dtype == expected
+    assert (f / i).dtype == expected
+    # But normal int / int true division goes to float64:
+    assert (i / i).dtype == np.dtype("float64")
+    # For int16, result has to be ast least float32 (takes ufunc path):
+    assert (np.int16(1) / f).dtype == np.dtype("float32")
+
+
+class TestBaseMath:
+    @pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982")
+    def test_blocked(self):
+        # test alignments offsets for simd instructions
+        # alignments for vz + 2 * (vs - 1) + 1
+        for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
+            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
+                                                            type='binary',
+                                                            max_size=sz):
+                exp1 = np.ones_like(inp1)
+                inp1[...] = np.ones_like(inp1)
+                inp2[...] = np.zeros_like(inp2)
+                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
+                assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
+                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
+
+                np.add(inp1, inp2, out=out)
+                assert_almost_equal(out, exp1, err_msg=msg)
+
+                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
+                assert_almost_equal(np.square(inp2),
+                                    np.multiply(inp2, inp2),  err_msg=msg)
+                # skip true divide for ints
+                if dt != np.int32:
+                    assert_almost_equal(np.reciprocal(inp2),
+                                        np.divide(1, inp2),  err_msg=msg)
+
+                inp1[...] = np.ones_like(inp1)
+                np.add(inp1, 2, out=out)
+                assert_almost_equal(out, exp1 + 2, err_msg=msg)
+                inp2[...] = np.ones_like(inp2)
+                np.add(2, inp2, out=out)
+                assert_almost_equal(out, exp1 + 2, err_msg=msg)
+
+    def test_lower_align(self):
+        # check data that is not aligned to element size
+        # i.e doubles are aligned to 4 bytes on i386
+        d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+        o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+        assert_almost_equal(d + d, d * 2)
+        np.add(d, d, out=o)
+        np.add(np.ones_like(d), d, out=o)
+        np.add(d, np.ones_like(d), out=o)
+        np.add(np.ones_like(d), d)
+        np.add(d, np.ones_like(d))
+
+
+class TestPower:
+    def test_small_types(self):
+        for t in [np.int8, np.int16, np.float16]:
+            a = t(3)
+            b = a ** 4
+            assert_(b == 81, "error with %r: got %r" % (t, b))
+
+    def test_large_types(self):
+        for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
+            a = t(51)
+            b = a ** 4
+            msg = "error with %r: got %r" % (t, b)
+            if np.issubdtype(t, np.integer):
+                assert_(b == 6765201, msg)
+            else:
+                assert_almost_equal(b, 6765201, err_msg=msg)
+
+    def test_integers_to_negative_integer_power(self):
+        # Note that the combination of uint64 with a signed integer
+        # has common type np.float64. The other combinations should all
+        # raise a ValueError for integer ** negative integer.
+        exp = [np.array(-1, dt)[()] for dt in 'bhilq']
+
+        # 1 ** -1 possible special case
+        base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
+        for i1, i2 in itertools.product(base, exp):
+            if i1.dtype != np.uint64:
+                assert_raises(ValueError, operator.pow, i1, i2)
+            else:
+                res = operator.pow(i1, i2)
+                assert_(res.dtype.type is np.float64)
+                assert_almost_equal(res, 1.)
+
+        # -1 ** -1 possible special case
+        base = [np.array(-1, dt)[()] for dt in 'bhilq']
+        for i1, i2 in itertools.product(base, exp):
+            if i1.dtype != np.uint64:
+                assert_raises(ValueError, operator.pow, i1, i2)
+            else:
+                res = operator.pow(i1, i2)
+                assert_(res.dtype.type is np.float64)
+                assert_almost_equal(res, -1.)
+
+        # 2 ** -1 perhaps generic
+        base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
+        for i1, i2 in itertools.product(base, exp):
+            if i1.dtype != np.uint64:
+                assert_raises(ValueError, operator.pow, i1, i2)
+            else:
+                res = operator.pow(i1, i2)
+                assert_(res.dtype.type is np.float64)
+                assert_almost_equal(res, .5)
+
+    def test_mixed_types(self):
+        typelist = [np.int8, np.int16, np.float16,
+                    np.float32, np.float64, np.int8,
+                    np.int16, np.int32, np.int64]
+        for t1 in typelist:
+            for t2 in typelist:
+                a = t1(3)
+                b = t2(2)
+                result = a**b
+                msg = ("error with %r and %r:"
+                       "got %r, expected %r") % (t1, t2, result, 9)
+                if np.issubdtype(np.dtype(result), np.integer):
+                    assert_(result == 9, msg)
+                else:
+                    assert_almost_equal(result, 9, err_msg=msg)
+
+    def test_modular_power(self):
+        # modular power is not implemented, so ensure it errors
+        a = 5
+        b = 4
+        c = 10
+        expected = pow(a, b, c)  # noqa: F841
+        for t in (np.int32, np.float32, np.complex64):
+            # note that 3-operand power only dispatches on the first argument
+            assert_raises(TypeError, operator.pow, t(a), b, c)
+            assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
+
+
+def floordiv_and_mod(x, y):
+    return (x // y, x % y)
+
+
+def _signs(dt):
+    if dt in np.typecodes['UnsignedInteger']:
+        return (+1,)
+    else:
+        return (+1, -1)
+
+
+class TestModulus:
+
+    def test_modulus_basic(self):
+        dt = np.typecodes['AllInteger'] + np.typecodes['Float']
+        for op in [floordiv_and_mod, divmod]:
+            for dt1, dt2 in itertools.product(dt, dt):
+                for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+                    fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+                    msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+                    a = np.array(sg1*71, dtype=dt1)[()]
+                    b = np.array(sg2*19, dtype=dt2)[()]
+                    div, rem = op(a, b)
+                    assert_equal(div*b + rem, a, err_msg=msg)
+                    if sg2 == -1:
+                        assert_(b < rem <= 0, msg)
+                    else:
+                        assert_(b > rem >= 0, msg)
+
+    def test_float_modulus_exact(self):
+        # test that float results are exact for small integers. This also
+        # holds for the same integers scaled by powers of two.
+        nlst = list(range(-127, 0))
+        plst = list(range(1, 128))
+        dividend = nlst + [0] + plst
+        divisor = nlst + plst
+        arg = list(itertools.product(dividend, divisor))
+        tgt = list(divmod(*t) for t in arg)
+
+        a, b = np.array(arg, dtype=int).T
+        # convert exact integer results from Python to float so that
+        # signed zero can be used, it is checked.
+        tgtdiv, tgtrem = np.array(tgt, dtype=float).T
+        tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
+        tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
+
+        for op in [floordiv_and_mod, divmod]:
+            for dt in np.typecodes['Float']:
+                msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+                fa = a.astype(dt)
+                fb = b.astype(dt)
+                # use list comprehension so a_ and b_ are scalars
+                div, rem = zip(*[op(a_, b_) for  a_, b_ in zip(fa, fb)])
+                assert_equal(div, tgtdiv, err_msg=msg)
+                assert_equal(rem, tgtrem, err_msg=msg)
+
+    def test_float_modulus_roundoff(self):
+        # gh-6127
+        dt = np.typecodes['Float']
+        for op in [floordiv_and_mod, divmod]:
+            for dt1, dt2 in itertools.product(dt, dt):
+                for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+                    fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+                    msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+                    a = np.array(sg1*78*6e-8, dtype=dt1)[()]
+                    b = np.array(sg2*6e-8, dtype=dt2)[()]
+                    div, rem = op(a, b)
+                    # Equal assertion should hold when fmod is used
+                    assert_equal(div*b + rem, a, err_msg=msg)
+                    if sg2 == -1:
+                        assert_(b < rem <= 0, msg)
+                    else:
+                        assert_(b > rem >= 0, msg)
+
+    def test_float_modulus_corner_cases(self):
+        # Check remainder magnitude.
+        for dt in np.typecodes['Float']:
+            b = np.array(1.0, dtype=dt)
+            a = np.nextafter(np.array(0.0, dtype=dt), -b)
+            rem = operator.mod(a, b)
+            assert_(rem <= b, 'dt: %s' % dt)
+            rem = operator.mod(-a, -b)
+            assert_(rem >= -b, 'dt: %s' % dt)
+
+        # Check nans, inf
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in remainder")
+            sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
+            sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
+            sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
+            sup.filter(RuntimeWarning, "invalid value encountered in divmod")
+            for dt in np.typecodes['Float']:
+                fone = np.array(1.0, dtype=dt)
+                fzer = np.array(0.0, dtype=dt)
+                finf = np.array(np.inf, dtype=dt)
+                fnan = np.array(np.nan, dtype=dt)
+                rem = operator.mod(fone, fzer)
+                assert_(np.isnan(rem), 'dt: %s' % dt)
+                # MSVC 2008 returns NaN here, so disable the check.
+                #rem = operator.mod(fone, finf)
+                #assert_(rem == fone, 'dt: %s' % dt)
+                rem = operator.mod(fone, fnan)
+                assert_(np.isnan(rem), 'dt: %s' % dt)
+                rem = operator.mod(finf, fone)
+                assert_(np.isnan(rem), 'dt: %s' % dt)
+                for op in [floordiv_and_mod, divmod]:
+                    div, mod = op(fone, fzer)
+                    assert_(np.isinf(div)) and assert_(np.isnan(mod))
+
+    def test_inplace_floordiv_handling(self):
+        # issue gh-12927
+        # this only applies to in-place floordiv //=, because the output type
+        # promotes to float which does not fit
+        a = np.array([1, 2], np.int64)
+        b = np.array([1, 2], np.uint64)
+        with pytest.raises(TypeError,
+                match=r"Cannot cast ufunc 'floor_divide' output from"):
+            a //= b
+
+
+class TestComplexDivision:
+    def test_zero_division(self):
+        with np.errstate(all="ignore"):
+            for t in [np.complex64, np.complex128]:
+                a = t(0.0)
+                b = t(1.0)
+                assert_(np.isinf(b/a))
+                b = t(complex(np.inf, np.inf))
+                assert_(np.isinf(b/a))
+                b = t(complex(np.inf, np.nan))
+                assert_(np.isinf(b/a))
+                b = t(complex(np.nan, np.inf))
+                assert_(np.isinf(b/a))
+                b = t(complex(np.nan, np.nan))
+                assert_(np.isnan(b/a))
+                b = t(0.)
+                assert_(np.isnan(b/a))
+
+    def test_signed_zeros(self):
+        with np.errstate(all="ignore"):
+            for t in [np.complex64, np.complex128]:
+                # tupled (numerator, denominator, expected)
+                # for testing as expected == numerator/denominator
+                data = (
+                    (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
+                    (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+                    (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
+                    (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
+                    (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
+                    (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+                    ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+                    ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
+                )
+                for cases in data:
+                    n = cases[0]
+                    d = cases[1]
+                    ex = cases[2]
+                    result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
+                    # check real and imag parts separately to avoid comparison
+                    # in array context, which does not account for signed zeros
+                    assert_equal(result.real, ex[0])
+                    assert_equal(result.imag, ex[1])
+
+    def test_branches(self):
+        with np.errstate(all="ignore"):
+            for t in [np.complex64, np.complex128]:
+                # tupled (numerator, denominator, expected)
+                # for testing as expected == numerator/denominator
+                data = list()
+
+                # trigger branch: real(fabs(denom)) > imag(fabs(denom))
+                # followed by else condition as neither are == 0
+                data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))
+
+                # trigger branch: real(fabs(denom)) > imag(fabs(denom))
+                # followed by if condition as both are == 0
+                # is performed in test_zero_division(), so this is skipped
+
+                # trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
+                data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))
+
+                for cases in data:
+                    n = cases[0]
+                    d = cases[1]
+                    ex = cases[2]
+                    result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
+                    # check real and imag parts separately to avoid comparison
+                    # in array context, which does not account for signed zeros
+                    assert_equal(result.real, ex[0])
+                    assert_equal(result.imag, ex[1])
+
+
+class TestConversion:
+    def test_int_from_long(self):
+        l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
+        li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
+        for T in [None, np.float64, np.int64]:
+            a = np.array(l, dtype=T)
+            assert_equal([int(_m) for _m in a], li)
+
+        a = np.array(l[:3], dtype=np.uint64)
+        assert_equal([int(_m) for _m in a], li[:3])
+
+    def test_iinfo_long_values(self):
+        for code in 'bBhH':
+            with pytest.warns(DeprecationWarning):
+                res = np.array(np.iinfo(code).max + 1, dtype=code)
+            tgt = np.iinfo(code).min
+            assert_(res == tgt)
+
+        for code in np.typecodes['AllInteger']:
+            res = np.array(np.iinfo(code).max, dtype=code)
+            tgt = np.iinfo(code).max
+            assert_(res == tgt)
+
+        for code in np.typecodes['AllInteger']:
+            res = np.dtype(code).type(np.iinfo(code).max)
+            tgt = np.iinfo(code).max
+            assert_(res == tgt)
+
+    def test_int_raise_behaviour(self):
+        def overflow_error_func(dtype):
+            dtype(np.iinfo(dtype).max + 1)
+
+        for code in [np.int_, np.uint, np.longlong, np.ulonglong]:
+            assert_raises(OverflowError, overflow_error_func, code)
+
+    def test_int_from_infinite_longdouble(self):
+        # gh-627
+        x = np.longdouble(np.inf)
+        assert_raises(OverflowError, int, x)
+        with suppress_warnings() as sup:
+            sup.record(np.ComplexWarning)
+            x = np.clongdouble(np.inf)
+            assert_raises(OverflowError, int, x)
+            assert_equal(len(sup.log), 1)
+
+    @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
+    def test_int_from_infinite_longdouble___int__(self):
+        x = np.longdouble(np.inf)
+        assert_raises(OverflowError, x.__int__)
+        with suppress_warnings() as sup:
+            sup.record(np.ComplexWarning)
+            x = np.clongdouble(np.inf)
+            assert_raises(OverflowError, x.__int__)
+            assert_equal(len(sup.log), 1)
+
+    @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+                        reason="long double is same as double")
+    @pytest.mark.skipif(platform.machine().startswith("ppc"),
+                        reason="IBM double double")
+    def test_int_from_huge_longdouble(self):
+        # Produce a longdouble that would overflow a double,
+        # use exponent that avoids bug in Darwin pow function.
+        exp = np.finfo(np.double).maxexp - 1
+        huge_ld = 2 * 1234 * np.longdouble(2) ** exp
+        huge_i = 2 * 1234 * 2 ** exp
+        assert_(huge_ld != np.inf)
+        assert_equal(int(huge_ld), huge_i)
+
+    def test_int_from_longdouble(self):
+        x = np.longdouble(1.5)
+        assert_equal(int(x), 1)
+        x = np.longdouble(-10.5)
+        assert_equal(int(x), -10)
+
+    def test_numpy_scalar_relational_operators(self):
+        # All integer
+        for dt1 in np.typecodes['AllInteger']:
+            assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
+            assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+            for dt2 in np.typecodes['AllInteger']:
+                assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+
+        #Unsigned integers
+        for dt1 in 'BHILQP':
+            assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+            assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+            assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+            #unsigned vs signed
+            for dt2 in 'bhilqp':
+                assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+
+        #Signed integers and floats
+        for dt1 in 'bhlqp' + np.typecodes['Float']:
+            assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+            assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+            assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+            for dt2 in 'bhlqp' + np.typecodes['Float']:
+                assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+                assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
+                        "type %s and %s failed" % (dt1, dt2))
+
+    def test_scalar_comparison_to_none(self):
+        # Scalars should just return False and not give a warnings.
+        # The comparisons are flagged by pep8, ignore that.
+        with warnings.catch_warnings(record=True) as w:
+            warnings.filterwarnings('always', '', FutureWarning)
+            assert_(not np.float32(1) == None)
+            assert_(not np.str_('test') == None)
+            # This is dubious (see below):
+            assert_(not np.datetime64('NaT') == None)
+
+            assert_(np.float32(1) != None)
+            assert_(np.str_('test') != None)
+            # This is dubious (see below):
+            assert_(np.datetime64('NaT') != None)
+        assert_(len(w) == 0)
+
+        # For documentation purposes, this is why the datetime is dubious.
+        # At the time of deprecation this was no behaviour change, but
+        # it has to be considered when the deprecations are done.
+        assert_(np.equal(np.datetime64('NaT'), None))
+
+
+#class TestRepr:
+#    def test_repr(self):
+#        for t in types:
+#            val = t(1197346475.0137341)
+#            val_repr = repr(val)
+#            val2 = eval(val_repr)
+#            assert_equal( val, val2 )
+
+
+class TestRepr:
+    def _test_type_repr(self, t):
+        finfo = np.finfo(t)
+        last_fraction_bit_idx = finfo.nexp + finfo.nmant
+        last_exponent_bit_idx = finfo.nexp
+        storage_bytes = np.dtype(t).itemsize*8
+        # could add some more types to the list below
+        for which in ['small denorm', 'small norm']:
+            # Values from https://en.wikipedia.org/wiki/IEEE_754
+            constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
+            if which == 'small denorm':
+                byte = last_fraction_bit_idx // 8
+                bytebit = 7-(last_fraction_bit_idx % 8)
+                constr[byte] = 1 << bytebit
+            elif which == 'small norm':
+                byte = last_exponent_bit_idx // 8
+                bytebit = 7-(last_exponent_bit_idx % 8)
+                constr[byte] = 1 << bytebit
+            else:
+                raise ValueError('hmm')
+            val = constr.view(t)[0]
+            val_repr = repr(val)
+            val2 = t(eval(val_repr))
+            if not (val2 == 0 and val < 1e-100):
+                assert_equal(val, val2)
+
+    def test_float_repr(self):
+        # long double test cannot work, because eval goes through a python
+        # float
+        for t in [np.float32, np.float64]:
+            self._test_type_repr(t)
+
+
+if not IS_PYPY:
+    # sys.getsizeof() is not valid on PyPy
+    class TestSizeOf:
+
+        def test_equal_nbytes(self):
+            for type in types:
+                x = type(0)
+                assert_(sys.getsizeof(x) > x.nbytes)
+
+        def test_error(self):
+            d = np.float32()
+            assert_raises(TypeError, d.__sizeof__, "a")
+
+
+class TestMultiply:
+    def test_seq_repeat(self):
+        # Test that basic sequences get repeated when multiplied with
+        # numpy integers. And errors are raised when multiplied with others.
+        # Some of this behaviour may be controversial and could be open for
+        # change.
+        accepted_types = set(np.typecodes["AllInteger"])
+        deprecated_types = {'?'}
+        forbidden_types = (
+            set(np.typecodes["All"]) - accepted_types - deprecated_types)
+        forbidden_types -= {'V'}  # can't default-construct void scalars
+
+        for seq_type in (list, tuple):
+            seq = seq_type([1, 2, 3])
+            for numpy_type in accepted_types:
+                i = np.dtype(numpy_type).type(2)
+                assert_equal(seq * i, seq * int(i))
+                assert_equal(i * seq, int(i) * seq)
+
+            for numpy_type in deprecated_types:
+                i = np.dtype(numpy_type).type()
+                assert_equal(
+                    assert_warns(DeprecationWarning, operator.mul, seq, i),
+                    seq * int(i))
+                assert_equal(
+                    assert_warns(DeprecationWarning, operator.mul, i, seq),
+                    int(i) * seq)
+
+            for numpy_type in forbidden_types:
+                i = np.dtype(numpy_type).type()
+                assert_raises(TypeError, operator.mul, seq, i)
+                assert_raises(TypeError, operator.mul, i, seq)
+
+    def test_no_seq_repeat_basic_array_like(self):
+        # Test that an array-like which does not know how to be multiplied
+        # does not attempt sequence repeat (raise TypeError).
+        # See also gh-7428.
+        class ArrayLike:
+            def __init__(self, arr):
+                self.arr = arr
+            def __array__(self):
+                return self.arr
+
+        # Test for simple ArrayLike above and memoryviews (original report)
+        for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
+            assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
+            assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
+            assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
+            assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
+
+
+class TestNegative:
+    def test_exceptions(self):
+        a = np.ones((), dtype=np.bool_)[()]
+        assert_raises(TypeError, operator.neg, a)
+
+    def test_result(self):
+        types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            for dt in types:
+                a = np.ones((), dtype=dt)[()]
+                if dt in np.typecodes['UnsignedInteger']:
+                    st = np.dtype(dt).type
+                    max = st(np.iinfo(dt).max)
+                    assert_equal(operator.neg(a), max)
+                else:
+                    assert_equal(operator.neg(a) + a, 0)
+
+class TestSubtract:
+    def test_exceptions(self):
+        a = np.ones((), dtype=np.bool_)[()]
+        assert_raises(TypeError, operator.sub, a, a)
+
+    def test_result(self):
+        types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning)
+            for dt in types:
+                a = np.ones((), dtype=dt)[()]
+                assert_equal(operator.sub(a, a), 0)
+
+
+class TestAbs:
+    def _test_abs_func(self, absfunc, test_dtype):
+        x = test_dtype(-1.5)
+        assert_equal(absfunc(x), 1.5)
+        x = test_dtype(0.0)
+        res = absfunc(x)
+        # assert_equal() checks zero signedness
+        assert_equal(res, 0.0)
+        x = test_dtype(-0.0)
+        res = absfunc(x)
+        assert_equal(res, 0.0)
+
+        x = test_dtype(np.finfo(test_dtype).max)
+        assert_equal(absfunc(x), x.real)
+
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            x = test_dtype(np.finfo(test_dtype).tiny)
+            assert_equal(absfunc(x), x.real)
+
+        x = test_dtype(np.finfo(test_dtype).min)
+        assert_equal(absfunc(x), -x.real)
+
+    @pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
+    def test_builtin_abs(self, dtype):
+        if (
+                sys.platform == "cygwin" and dtype == np.clongdouble and
+                (
+                    _pep440.parse(platform.release().split("-")[0])
+                    < _pep440.Version("3.3.0")
+                )
+        ):
+            pytest.xfail(
+                reason="absl is computed in double precision on cygwin < 3.3"
+            )
+        self._test_abs_func(abs, dtype)
+
+    @pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
+    def test_numpy_abs(self, dtype):
+        if (
+                sys.platform == "cygwin" and dtype == np.clongdouble and
+                (
+                    _pep440.parse(platform.release().split("-")[0])
+                    < _pep440.Version("3.3.0")
+                )
+        ):
+            pytest.xfail(
+                reason="absl is computed in double precision on cygwin < 3.3"
+            )
+        self._test_abs_func(np.abs, dtype)
+
+class TestBitShifts:
+
+    @pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
+    @pytest.mark.parametrize('op',
+        [operator.rshift, operator.lshift], ids=['>>', '<<'])
+    def test_shift_all_bits(self, type_code, op):
+        """Shifts where the shift amount is the width of the type or wider """
+        if (
+                USING_CLANG_CL and
+                type_code in ("l", "L") and
+                op is operator.lshift
+        ):
+            pytest.xfail("Failing on clang-cl builds")
+        # gh-2449
+        dt = np.dtype(type_code)
+        nbits = dt.itemsize * 8
+        for val in [5, -5]:
+            for shift in [nbits, nbits + 4]:
+                val_scl = np.array(val).astype(dt)[()]
+                shift_scl = dt.type(shift)
+                res_scl = op(val_scl, shift_scl)
+                if val_scl < 0 and op is operator.rshift:
+                    # sign bit is preserved
+                    assert_equal(res_scl, -1)
+                else:
+                    assert_equal(res_scl, 0)
+
+                # Result on scalars should be the same as on arrays
+                val_arr = np.array([val_scl]*32, dtype=dt)
+                shift_arr = np.array([shift]*32, dtype=dt)
+                res_arr = op(val_arr, shift_arr)
+                assert_equal(res_arr, res_scl)
+
+
+class TestHash:
+    @pytest.mark.parametrize("type_code", np.typecodes['AllInteger'])
+    def test_integer_hashes(self, type_code):
+        scalar = np.dtype(type_code).type
+        for i in range(128):
+            assert hash(i) == hash(scalar(i))
+
+    @pytest.mark.parametrize("type_code", np.typecodes['AllFloat'])
+    def test_float_and_complex_hashes(self, type_code):
+        scalar = np.dtype(type_code).type
+        for val in [np.pi, np.inf, 3, 6.]:
+            numpy_val = scalar(val)
+            # Cast back to Python, in case the NumPy scalar has less precision
+            if numpy_val.dtype.kind == 'c':
+                val = complex(numpy_val)
+            else:
+                val = float(numpy_val)
+            assert val == numpy_val
+            assert hash(val) == hash(numpy_val)
+
+        if hash(float(np.nan)) != hash(float(np.nan)):
+            # If Python distinguishes different NaNs we do so too (gh-18833)
+            assert hash(scalar(np.nan)) != hash(scalar(np.nan))
+
+    @pytest.mark.parametrize("type_code", np.typecodes['Complex'])
+    def test_complex_hashes(self, type_code):
+        # Test some complex valued hashes specifically:
+        scalar = np.dtype(type_code).type
+        for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]:
+            numpy_val = scalar(val)
+            assert hash(complex(numpy_val)) == hash(numpy_val)
+
+
+@contextlib.contextmanager
+def recursionlimit(n):
+    o = sys.getrecursionlimit()
+    try:
+        sys.setrecursionlimit(n)
+        yield
+    finally:
+        sys.setrecursionlimit(o)
+
+
+@given(sampled_from(objecty_things),
+       sampled_from(reasonable_operators_for_scalars),
+       sampled_from(types))
+def test_operator_object_left(o, op, type_):
+    try:
+        with recursionlimit(200):
+            op(o, type_(1))
+    except TypeError:
+        pass
+
+
+@given(sampled_from(objecty_things),
+       sampled_from(reasonable_operators_for_scalars),
+       sampled_from(types))
+def test_operator_object_right(o, op, type_):
+    try:
+        with recursionlimit(200):
+            op(type_(1), o)
+    except TypeError:
+        pass
+
+
+@given(sampled_from(reasonable_operators_for_scalars),
+       sampled_from(types),
+       sampled_from(types))
+def test_operator_scalars(op, type1, type2):
+    try:
+        op(type1(1), type2(1))
+    except TypeError:
+        pass
+
+
+@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
+@pytest.mark.parametrize("val", [None, 2**64])
+def test_longdouble_inf_loop(op, val):
+    # Note: The 2**64 value will pass once NEP 50 is adopted.
+    try:
+        op(np.longdouble(3), val)
+    except TypeError:
+        pass
+    try:
+        op(val, np.longdouble(3))
+    except TypeError:
+        pass
+
+
+@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
+@pytest.mark.parametrize("val", [None, 2**64])
+def test_clongdouble_inf_loop(op, val):
+    # Note: The 2**64 value will pass once NEP 50 is adopted.
+    try:
+        op(np.clongdouble(3), val)
+    except TypeError:
+        pass
+    try:
+        op(val, np.longdouble(3))
+    except TypeError:
+        pass
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.parametrize("operation", [
+        lambda min, max: max + max,
+        lambda min, max: min - max,
+        lambda min, max: max * max], ids=["+", "-", "*"])
+def test_scalar_integer_operation_overflow(dtype, operation):
+    st = np.dtype(dtype).type
+    min = st(np.iinfo(dtype).min)
+    max = st(np.iinfo(dtype).max)
+
+    with pytest.warns(RuntimeWarning, match="overflow encountered"):
+        operation(min, max)
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
+@pytest.mark.parametrize("operation", [
+        lambda min, neg_1: -min,
+        lambda min, neg_1: abs(min),
+        lambda min, neg_1: min * neg_1,
+        pytest.param(lambda min, neg_1: min // neg_1,
+            marks=pytest.mark.skip(reason="broken on some platforms"))],
+        ids=["neg", "abs", "*", "//"])
+def test_scalar_signed_integer_overflow(dtype, operation):
+    # The minimum signed integer can "overflow" for some additional operations
+    st = np.dtype(dtype).type
+    min = st(np.iinfo(dtype).min)
+    neg_1 = st(-1)
+
+    with pytest.warns(RuntimeWarning, match="overflow encountered"):
+        operation(min, neg_1)
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"])
+def test_scalar_unsigned_integer_overflow(dtype):
+    val = np.dtype(dtype).type(8)
+    with pytest.warns(RuntimeWarning, match="overflow encountered"):
+        -val
+
+    zero = np.dtype(dtype).type(0)
+    -zero  # does not warn
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.parametrize("operation", [
+        lambda val, zero: val // zero,
+        lambda val, zero: val % zero, ], ids=["//", "%"])
+def test_scalar_integer_operation_divbyzero(dtype, operation):
+    st = np.dtype(dtype).type
+    val = st(100)
+    zero = st(0)
+
+    with pytest.warns(RuntimeWarning, match="divide by zero"):
+        operation(val, zero)
+
+
+ops_with_names = [
+    ("__lt__", "__gt__", operator.lt, True),
+    ("__le__", "__ge__", operator.le, True),
+    ("__eq__", "__eq__", operator.eq, True),
+    # Note __op__ and __rop__ may be identical here:
+    ("__ne__", "__ne__", operator.ne, True),
+    ("__gt__", "__lt__", operator.gt, True),
+    ("__ge__", "__le__", operator.ge, True),
+    ("__floordiv__", "__rfloordiv__", operator.floordiv, False),
+    ("__truediv__", "__rtruediv__", operator.truediv, False),
+    ("__add__", "__radd__", operator.add, False),
+    ("__mod__", "__rmod__", operator.mod, False),
+    ("__mul__", "__rmul__", operator.mul, False),
+    ("__pow__", "__rpow__", operator.pow, False),
+    ("__sub__", "__rsub__", operator.sub, False),
+]
+
+
+@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
+@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble])
+def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
+    """
+    This test covers scalar subclass deferral.  Note that this is exceedingly
+    complicated, especially since it tends to fall back to the array paths and
+    these additionally add the "array priority" mechanism.
+
+    The behaviour was modified subtly in 1.22 (to make it closer to how Python
+    scalars work).  Due to its complexity and the fact that subclassing NumPy
+    scalars is probably a bad idea to begin with.  There is probably room
+    for adjustments here.
+    """
+    class myf_simple1(sctype):
+        pass
+
+    class myf_simple2(sctype):
+        pass
+
+    def op_func(self, other):
+        return __op__
+
+    def rop_func(self, other):
+        return __rop__
+
+    myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func})
+
+    # inheritance has to override, or this is correctly lost:
+    res = op(myf_simple1(1), myf_simple2(2))
+    assert type(res) == sctype or type(res) == np.bool_
+    assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2)  # inherited
+
+    # Two independent subclasses do not really define an order.  This could
+    # be attempted, but we do not since Python's `int` does neither:
+    assert op(myf_op(1), myf_simple1(2)) == __op__
+    assert op(myf_simple1(1), myf_op(2)) == op(1, 2)  # inherited
+
+
+def test_longdouble_complex():
+    # Simple test to check longdouble and complex combinations, since these
+    # need to go through promotion, which longdouble needs to be careful about.
+    x = np.longdouble(1)
+    assert x + 1j == 1+1j
+    assert 1j + x == 1+1j
+
+
+@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
+@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
+@np._no_nep50_warning()
+def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
+    def op_func(self, other):
+        return __op__
+
+    def rop_func(self, other):
+        return __rop__
+
+    # Check that deferring is indicated using `__array_ufunc__`:
+    myt = type("myt", (subtype,),
+               {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
+
+    # Just like normally, we should never presume we can modify the float.
+    assert op(myt(1), np.float64(2)) == __op__
+    assert op(np.float64(1), myt(2)) == __rop__
+
+    if op in {operator.mod, operator.floordiv} and subtype == complex:
+        return  # module is not support for complex.  Do not test.
+
+    if __rop__ == __op__:
+        return
+
+    # When no deferring is indicated, subclasses are handled normally.
+    myt = type("myt", (subtype,), {__rop__: rop_func})
+
+    # Check for float32, as a float subclass float64 may behave differently
+    res = op(myt(1), np.float16(2))
+    expected = op(subtype(1), np.float16(2))
+    assert res == expected
+    assert type(res) == type(expected)
+    res = op(np.float32(2), myt(1))
+    expected = op(np.float32(2), subtype(1))
+    assert res == expected
+    assert type(res) == type(expected)
+
+    # Same check for longdouble:
+    res = op(myt(1), np.longdouble(2))
+    expected = op(subtype(1), np.longdouble(2))
+    assert res == expected
+    assert type(res) == type(expected)
+    res = op(np.float32(2), myt(1))
+    expected = op(np.longdouble(2), subtype(1))
+    assert res == expected
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.py
new file mode 100644
index 00000000..98d1f4aa
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.py
@@ -0,0 +1,382 @@
+""" Test printing of scalar types.
+
+"""
+import code
+import platform
+import pytest
+import sys
+
+from tempfile import TemporaryFile
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises, IS_MUSL
+
+class TestRealScalars:
+    def test_str(self):
+        svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
+        styps = [np.float16, np.float32, np.float64, np.longdouble]
+        wanted = [
+             ['0.0',  '0.0',  '0.0',  '0.0' ],
+             ['-0.0', '-0.0', '-0.0', '-0.0'],
+             ['1.0',  '1.0',  '1.0',  '1.0' ],
+             ['-1.0', '-1.0', '-1.0', '-1.0'],
+             ['inf',  'inf',  'inf',  'inf' ],
+             ['-inf', '-inf', '-inf', '-inf'],
+             ['nan',  'nan',  'nan',  'nan']]
+
+        for wants, val in zip(wanted, svals):
+            for want, styp in zip(wants, styps):
+                msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
+                assert_equal(str(styp(val)), want, err_msg=msg)
+
+    def test_scalar_cutoffs(self):
+        # test that both the str and repr of np.float64 behaves
+        # like python floats in python3.
+        def check(v):
+            assert_equal(str(np.float64(v)), str(v))
+            assert_equal(str(np.float64(v)), repr(v))
+            assert_equal(repr(np.float64(v)), repr(v))
+            assert_equal(repr(np.float64(v)), str(v))
+
+        # check we use the same number of significant digits
+        check(1.12345678901234567890)
+        check(0.0112345678901234567890)
+
+        # check switch from scientific output to positional and back
+        check(1e-5)
+        check(1e-4)
+        check(1e15)
+        check(1e16)
+
+    def test_py2_float_print(self):
+        # gh-10753
+        # In python2, the python float type implements an obsolete method
+        # tp_print, which overrides tp_repr and tp_str when using "print" to
+        # output to a "real file" (ie, not a StringIO). Make sure we don't
+        # inherit it.
+        x = np.double(0.1999999999999)
+        with TemporaryFile('r+t') as f:
+            print(x, file=f)
+            f.seek(0)
+            output = f.read()
+        assert_equal(output, str(x) + '\n')
+        # In python2 the value float('0.1999999999999') prints with reduced
+        # precision as '0.2', but we want numpy's np.double('0.1999999999999')
+        # to print the unique value, '0.1999999999999'.
+
+        # gh-11031
+        # Only in the python2 interactive shell and when stdout is a "real"
+        # file, the output of the last command is printed to stdout without
+        # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
+        # x` are potentially different. Make sure they are the same. The only
+        # way I found to get prompt-like output is using an actual prompt from
+        # the 'code' module. Again, must use tempfile to get a "real" file.
+
+        # dummy user-input which enters one line and then ctrl-Ds.
+        def userinput():
+            yield 'np.sqrt(2)'
+            raise EOFError
+        gen = userinput()
+        input_func = lambda prompt="": next(gen)
+
+        with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
+            orig_stdout, orig_stderr = sys.stdout, sys.stderr
+            sys.stdout, sys.stderr = fo, fe
+
+            code.interact(local={'np': np}, readfunc=input_func, banner='')
+
+            sys.stdout, sys.stderr = orig_stdout, orig_stderr
+
+            fo.seek(0)
+            capture = fo.read().strip()
+
+        assert_equal(capture, repr(np.sqrt(2)))
+
+    def test_dragon4(self):
+        # these tests are adapted from Ryan Juckett's dragon4 implementation,
+        # see dragon4.c for details.
+
+        fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
+        fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
+        fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
+        fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
+
+        preckwd = lambda prec: {'unique': False, 'precision': prec}
+
+        assert_equal(fpos32('1.0'), "1.")
+        assert_equal(fsci32('1.0'), "1.e+00")
+        assert_equal(fpos32('10.234'), "10.234")
+        assert_equal(fpos32('-10.234'), "-10.234")
+        assert_equal(fsci32('10.234'), "1.0234e+01")
+        assert_equal(fsci32('-10.234'), "-1.0234e+01")
+        assert_equal(fpos32('1000.0'), "1000.")
+        assert_equal(fpos32('1.0', precision=0), "1.")
+        assert_equal(fsci32('1.0', precision=0), "1.e+00")
+        assert_equal(fpos32('10.234', precision=0), "10.")
+        assert_equal(fpos32('-10.234', precision=0), "-10.")
+        assert_equal(fsci32('10.234', precision=0), "1.e+01")
+        assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
+        assert_equal(fpos32('10.234', precision=2), "10.23")
+        assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
+        assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
+                            '9.9999999999999995e-08')
+        assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
+                            '9.8813129168249309e-324')
+        assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
+                            '9.9999999999999694e-311')
+
+
+        # test rounding
+        # 3.1415927410 is closest float32 to np.pi
+        assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
+                            "3.1415927410")
+        assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
+                            "3.1415927410e+00")
+        assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
+                            "3.1415926536")
+        assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
+                            "3.1415926536e+00")
+        # 299792448 is closest float32 to 299792458
+        assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
+        assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
+        assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
+        assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
+
+        assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
+                            "3.1415927410125732421875000")
+        assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
+                         "3.14159265358979311599796346854418516159057617187500")
+        assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
+
+
+        # smallest numbers
+        assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
+                    "0.00000000000000000000000000000000000000000000140129846432"
+                    "4817070923729583289916131280261941876515771757068283889791"
+                    "08268586060148663818836212158203125")
+        
+        assert_equal(fpos64(5e-324, unique=False, precision=1074),
+                    "0.00000000000000000000000000000000000000000000000000000000"
+                    "0000000000000000000000000000000000000000000000000000000000"
+                    "0000000000000000000000000000000000000000000000000000000000"
+                    "0000000000000000000000000000000000000000000000000000000000"
+                    "0000000000000000000000000000000000000000000000000000000000"
+                    "0000000000000000000000000000000000049406564584124654417656"
+                    "8792868221372365059802614324764425585682500675507270208751"
+                    "8652998363616359923797965646954457177309266567103559397963"
+                    "9877479601078187812630071319031140452784581716784898210368"
+                    "8718636056998730723050006387409153564984387312473397273169"
+                    "6151400317153853980741262385655911710266585566867681870395"
+                    "6031062493194527159149245532930545654440112748012970999954"
+                    "1931989409080416563324524757147869014726780159355238611550"
+                    "1348035264934720193790268107107491703332226844753335720832"
+                    "4319360923828934583680601060115061698097530783422773183292"
+                    "4790498252473077637592724787465608477820373446969953364701"
+                    "7972677717585125660551199131504891101451037862738167250955"
+                    "8373897335989936648099411642057026370902792427675445652290"
+                    "87538682506419718265533447265625")
+
+        # largest numbers
+        f32x = np.finfo(np.float32).max
+        assert_equal(fpos32(f32x, **preckwd(0)),
+                    "340282346638528859811704183484516925440.")
+        assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
+                    "1797693134862315708145274237317043567980705675258449965989"
+                    "1747680315726078002853876058955863276687817154045895351438"
+                    "2464234321326889464182768467546703537516986049910576551282"
+                    "0762454900903893289440758685084551339423045832369032229481"
+                    "6580855933212334827479782620414472316873817718091929988125"
+                    "0404026184124858368.")
+        # Warning: In unique mode only the integer digits necessary for
+        # uniqueness are computed, the rest are 0.
+        assert_equal(fpos32(f32x),
+                    "340282350000000000000000000000000000000.")
+
+        # Further tests of zero-padding vs rounding in different combinations
+        # of unique, fractional, precision, min_digits
+        # precision can only reduce digits, not add them.
+        # min_digits can only extend digits, not reduce them.
+        assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0),
+                    "340282350000000000000000000000000000000.")
+        assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4),
+                    "340282350000000000000000000000000000000.")
+        assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0),
+                    "340282346638528859811704183484516925440.")
+        assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4),
+                    "340282346638528859811704183484516925440.0000")
+        assert_equal(fpos32(f32x, unique=True, fractional=True,
+                                    min_digits=4, precision=4),
+                    "340282346638528859811704183484516925440.0000")
+        assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False,
+                                          precision=0)
+        assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4),
+                    "340300000000000000000000000000000000000.")
+        assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20),
+                    "340282350000000000000000000000000000000.")
+        assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4),
+                    "340282350000000000000000000000000000000.")
+        assert_equal(fpos32(f32x, unique=True, fractional=False,
+                                  min_digits=20),
+                    "340282346638528859810000000000000000000.")
+        assert_equal(fpos32(f32x, unique=True, fractional=False,
+                                  min_digits=15),
+                    "340282346638529000000000000000000000000.")
+        assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4),
+                    "340300000000000000000000000000000000000.")
+        # test that unique rounding is preserved when precision is supplied
+        # but no extra digits need to be printed (gh-18609)
+        a = np.float64.fromhex('-1p-97')
+        assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30')
+        assert_equal(fsci64(a, unique=False, precision=15),
+                     '-6.310887241768094e-30')
+        assert_equal(fsci64(a, unique=True, precision=15),
+                     '-6.310887241768095e-30')
+        assert_equal(fsci64(a, unique=True, min_digits=15),
+                     '-6.310887241768095e-30')
+        assert_equal(fsci64(a, unique=True, precision=15, min_digits=15),
+                     '-6.310887241768095e-30')
+        # adds/remove digits in unique mode with unbiased rnding
+        assert_equal(fsci64(a, unique=True, precision=14),
+                     '-6.31088724176809e-30')
+        assert_equal(fsci64(a, unique=True, min_digits=16),
+                     '-6.3108872417680944e-30')
+        assert_equal(fsci64(a, unique=True, precision=16),
+                     '-6.310887241768095e-30')
+        assert_equal(fsci64(a, unique=True, min_digits=14),
+                     '-6.310887241768095e-30')
+        # test min_digits in unique mode with different rounding cases
+        assert_equal(fsci64('1e120', min_digits=3), '1.000e+120')
+        assert_equal(fsci64('1e100', min_digits=3), '1.000e+100')
+
+        # test trailing zeros
+        assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
+        assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
+        assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
+        assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
+        assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
+        assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
+        assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
+        assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
+        # gh-10713
+        assert_equal(fpos64('324', unique=False, precision=5,
+                                   fractional=False), "324.00")
+
+    def test_dragon4_interface(self):
+        tps = [np.float16, np.float32, np.float64]
+        # test is flaky for musllinux on np.float128
+        if hasattr(np, 'float128') and not IS_MUSL:
+            tps.append(np.float128)
+
+        fpos = np.format_float_positional
+        fsci = np.format_float_scientific
+
+        for tp in tps:
+            # test padding
+            assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), "   1.    ")
+            assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), "  -1.    ")
+            assert_equal(fpos(tp('-10.2'),
+                         pad_left=4, pad_right=4), " -10.2   ")
+
+            # test exp_digits
+            assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
+
+            # test fixed (non-unique) mode
+            assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
+            assert_equal(fsci(tp('1.0'), unique=False, precision=4),
+                         "1.0000e+00")
+
+            # test trimming
+            # trim of 'k' or '.' only affects non-unique mode, since unique
+            # mode will not output trailing 0s.
+            assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
+                         "1.0000")
+
+            assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
+                         "1.")
+            assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
+                         "1.2" if tp != np.float16 else "1.2002")
+
+            assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
+                         "1.0")
+            assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
+                         "1.2" if tp != np.float16 else "1.2002")
+            assert_equal(fpos(tp('1.'), trim='0'), "1.0")
+
+            assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
+                         "1")
+            assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
+                         "1.2" if tp != np.float16 else "1.2002")
+            assert_equal(fpos(tp('1.'), trim='-'), "1")
+            assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
+
+    @pytest.mark.skipif(not platform.machine().startswith("ppc64"),
+                        reason="only applies to ppc float128 values")
+    def test_ppc64_ibm_double_double128(self):
+        # check that the precision decreases once we get into the subnormal
+        # range. Unlike float64, this starts around 1e-292 instead of 1e-308,
+        # which happens when the first double is normal and the second is
+        # subnormal.
+        x = np.float128('2.123123123123123123123123123123123e-286')
+        got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
+        expected = [
+            "1.06156156156156156156156156156157e-286",
+            "1.06156156156156156156156156156158e-287",
+            "1.06156156156156156156156156156159e-288",
+            "1.0615615615615615615615615615616e-289",
+            "1.06156156156156156156156156156157e-290",
+            "1.06156156156156156156156156156156e-291",
+            "1.0615615615615615615615615615616e-292",
+            "1.0615615615615615615615615615615e-293",
+            "1.061561561561561561561561561562e-294",
+            "1.06156156156156156156156156155e-295",
+            "1.0615615615615615615615615616e-296",
+            "1.06156156156156156156156156e-297",
+            "1.06156156156156156156156157e-298",
+            "1.0615615615615615615615616e-299",
+            "1.06156156156156156156156e-300",
+            "1.06156156156156156156155e-301",
+            "1.0615615615615615615616e-302",
+            "1.061561561561561561562e-303",
+            "1.06156156156156156156e-304",
+            "1.0615615615615615618e-305",
+            "1.06156156156156156e-306",
+            "1.06156156156156157e-307",
+            "1.0615615615615616e-308",
+            "1.06156156156156e-309",
+            "1.06156156156157e-310",
+            "1.0615615615616e-311",
+            "1.06156156156e-312",
+            "1.06156156154e-313",
+            "1.0615615616e-314",
+            "1.06156156e-315",
+            "1.06156155e-316",
+            "1.061562e-317",
+            "1.06156e-318",
+            "1.06155e-319",
+            "1.0617e-320",
+            "1.06e-321",
+            "1.04e-322",
+            "1e-323",
+            "0.0",
+            "0.0"]
+        assert_equal(got, expected)
+
+        # Note: we follow glibc behavior, but it (or gcc) might not be right.
+        # In particular we can get two values that print the same but are not
+        # equal:
+        a = np.float128('2')/np.float128('3')
+        b = np.float128(str(a))
+        assert_equal(str(a), str(b))
+        assert_(a != b)
+
+    def float32_roundtrip(self):
+        # gh-9360
+        x = np.float32(1024 - 2**-14)
+        y = np.float32(1024 - 2**-13)
+        assert_(repr(x) != repr(y))
+        assert_equal(np.float32(repr(x)), x)
+        assert_equal(np.float32(repr(y)), y)
+
+    def float64_vs_python(self):
+        # gh-2643, gh-6136, gh-6908
+        assert_equal(repr(np.float64(0.1)), repr(0.1))
+        assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.py
new file mode 100644
index 00000000..0428b95a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.py
@@ -0,0 +1,825 @@
+import pytest
+import numpy as np
+from numpy.core import (
+    array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
+    newaxis, concatenate, stack
+    )
+from numpy.core.shape_base import (_block_dispatcher, _block_setup,
+                                   _block_concatenate, _block_slicing)
+from numpy.testing import (
+    assert_, assert_raises, assert_array_equal, assert_equal,
+    assert_raises_regex, assert_warns, IS_PYPY
+    )
+
+
+class TestAtleast1d:
+    def test_0D_array(self):
+        a = array(1)
+        b = array(2)
+        res = [atleast_1d(a), atleast_1d(b)]
+        desired = [array([1]), array([2])]
+        assert_array_equal(res, desired)
+
+    def test_1D_array(self):
+        a = array([1, 2])
+        b = array([2, 3])
+        res = [atleast_1d(a), atleast_1d(b)]
+        desired = [array([1, 2]), array([2, 3])]
+        assert_array_equal(res, desired)
+
+    def test_2D_array(self):
+        a = array([[1, 2], [1, 2]])
+        b = array([[2, 3], [2, 3]])
+        res = [atleast_1d(a), atleast_1d(b)]
+        desired = [a, b]
+        assert_array_equal(res, desired)
+
+    def test_3D_array(self):
+        a = array([[1, 2], [1, 2]])
+        b = array([[2, 3], [2, 3]])
+        a = array([a, a])
+        b = array([b, b])
+        res = [atleast_1d(a), atleast_1d(b)]
+        desired = [a, b]
+        assert_array_equal(res, desired)
+
+    def test_r1array(self):
+        """ Test to make sure equivalent Travis O's r1array function
+        """
+        assert_(atleast_1d(3).shape == (1,))
+        assert_(atleast_1d(3j).shape == (1,))
+        assert_(atleast_1d(3.0).shape == (1,))
+        assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
+
+
+class TestAtleast2d:
+    def test_0D_array(self):
+        a = array(1)
+        b = array(2)
+        res = [atleast_2d(a), atleast_2d(b)]
+        desired = [array([[1]]), array([[2]])]
+        assert_array_equal(res, desired)
+
+    def test_1D_array(self):
+        a = array([1, 2])
+        b = array([2, 3])
+        res = [atleast_2d(a), atleast_2d(b)]
+        desired = [array([[1, 2]]), array([[2, 3]])]
+        assert_array_equal(res, desired)
+
+    def test_2D_array(self):
+        a = array([[1, 2], [1, 2]])
+        b = array([[2, 3], [2, 3]])
+        res = [atleast_2d(a), atleast_2d(b)]
+        desired = [a, b]
+        assert_array_equal(res, desired)
+
+    def test_3D_array(self):
+        a = array([[1, 2], [1, 2]])
+        b = array([[2, 3], [2, 3]])
+        a = array([a, a])
+        b = array([b, b])
+        res = [atleast_2d(a), atleast_2d(b)]
+        desired = [a, b]
+        assert_array_equal(res, desired)
+
+    def test_r2array(self):
+        """ Test to make sure equivalent Travis O's r2array function
+        """
+        assert_(atleast_2d(3).shape == (1, 1))
+        assert_(atleast_2d([3j, 1]).shape == (1, 2))
+        assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
+
+
+class TestAtleast3d:
+    def test_0D_array(self):
+        a = array(1)
+        b = array(2)
+        res = [atleast_3d(a), atleast_3d(b)]
+        desired = [array([[[1]]]), array([[[2]]])]
+        assert_array_equal(res, desired)
+
+    def test_1D_array(self):
+        a = array([1, 2])
+        b = array([2, 3])
+        res = [atleast_3d(a), atleast_3d(b)]
+        desired = [array([[[1], [2]]]), array([[[2], [3]]])]
+        assert_array_equal(res, desired)
+
+    def test_2D_array(self):
+        a = array([[1, 2], [1, 2]])
+        b = array([[2, 3], [2, 3]])
+        res = [atleast_3d(a), atleast_3d(b)]
+        desired = [a[:,:, newaxis], b[:,:, newaxis]]
+        assert_array_equal(res, desired)
+
+    def test_3D_array(self):
+        a = array([[1, 2], [1, 2]])
+        b = array([[2, 3], [2, 3]])
+        a = array([a, a])
+        b = array([b, b])
+        res = [atleast_3d(a), atleast_3d(b)]
+        desired = [a, b]
+        assert_array_equal(res, desired)
+
+
+class TestHstack:
+    def test_non_iterable(self):
+        assert_raises(TypeError, hstack, 1)
+
+    def test_empty_input(self):
+        assert_raises(ValueError, hstack, ())
+
+    def test_0D_array(self):
+        a = array(1)
+        b = array(2)
+        res = hstack([a, b])
+        desired = array([1, 2])
+        assert_array_equal(res, desired)
+
+    def test_1D_array(self):
+        a = array([1])
+        b = array([2])
+        res = hstack([a, b])
+        desired = array([1, 2])
+        assert_array_equal(res, desired)
+
+    def test_2D_array(self):
+        a = array([[1], [2]])
+        b = array([[1], [2]])
+        res = hstack([a, b])
+        desired = array([[1, 1], [2, 2]])
+        assert_array_equal(res, desired)
+
+    def test_generator(self):
+        with pytest.raises(TypeError, match="arrays to stack must be"):
+            hstack((np.arange(3) for _ in range(2)))
+        with pytest.raises(TypeError, match="arrays to stack must be"):
+            hstack(map(lambda x: x, np.ones((3, 2))))
+
+    def test_casting_and_dtype(self):
+        a = np.array([1, 2, 3])
+        b = np.array([2.5, 3.5, 4.5])
+        res = np.hstack((a, b), casting="unsafe", dtype=np.int64)
+        expected_res = np.array([1, 2, 3, 2, 3, 4])
+        assert_array_equal(res, expected_res)
+    
+    def test_casting_and_dtype_type_error(self):
+        a = np.array([1, 2, 3])
+        b = np.array([2.5, 3.5, 4.5])
+        with pytest.raises(TypeError):
+            hstack((a, b), casting="safe", dtype=np.int64)
+
+
+class TestVstack:
+    def test_non_iterable(self):
+        assert_raises(TypeError, vstack, 1)
+
+    def test_empty_input(self):
+        assert_raises(ValueError, vstack, ())
+
+    def test_0D_array(self):
+        a = array(1)
+        b = array(2)
+        res = vstack([a, b])
+        desired = array([[1], [2]])
+        assert_array_equal(res, desired)
+
+    def test_1D_array(self):
+        a = array([1])
+        b = array([2])
+        res = vstack([a, b])
+        desired = array([[1], [2]])
+        assert_array_equal(res, desired)
+
+    def test_2D_array(self):
+        a = array([[1], [2]])
+        b = array([[1], [2]])
+        res = vstack([a, b])
+        desired = array([[1], [2], [1], [2]])
+        assert_array_equal(res, desired)
+
+    def test_2D_array2(self):
+        a = array([1, 2])
+        b = array([1, 2])
+        res = vstack([a, b])
+        desired = array([[1, 2], [1, 2]])
+        assert_array_equal(res, desired)
+
+    def test_generator(self):
+        with pytest.raises(TypeError, match="arrays to stack must be"):
+            vstack((np.arange(3) for _ in range(2)))
+
+    def test_casting_and_dtype(self):
+        a = np.array([1, 2, 3])
+        b = np.array([2.5, 3.5, 4.5])
+        res = np.vstack((a, b), casting="unsafe", dtype=np.int64)
+        expected_res = np.array([[1, 2, 3], [2, 3, 4]])
+        assert_array_equal(res, expected_res)
+    
+    def test_casting_and_dtype_type_error(self):
+        a = np.array([1, 2, 3])
+        b = np.array([2.5, 3.5, 4.5])
+        with pytest.raises(TypeError):
+            vstack((a, b), casting="safe", dtype=np.int64)
+        
+
+
+class TestConcatenate:
+    def test_returns_copy(self):
+        a = np.eye(3)
+        b = np.concatenate([a])
+        b[0, 0] = 2
+        assert b[0, 0] != a[0, 0]
+
+    def test_exceptions(self):
+        # test axis must be in bounds
+        for ndim in [1, 2, 3]:
+            a = np.ones((1,)*ndim)
+            np.concatenate((a, a), axis=0)  # OK
+            assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
+            assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
+
+        # Scalars cannot be concatenated
+        assert_raises(ValueError, concatenate, (0,))
+        assert_raises(ValueError, concatenate, (np.array(0),))
+
+        # dimensionality must match
+        assert_raises_regex(
+            ValueError,
+            r"all the input arrays must have same number of dimensions, but "
+            r"the array at index 0 has 1 dimension\(s\) and the array at "
+            r"index 1 has 2 dimension\(s\)",
+            np.concatenate, (np.zeros(1), np.zeros((1, 1))))
+
+        # test shapes must match except for concatenation axis
+        a = np.ones((1, 2, 3))
+        b = np.ones((2, 2, 3))
+        axis = list(range(3))
+        for i in range(3):
+            np.concatenate((a, b), axis=axis[0])  # OK
+            assert_raises_regex(
+                ValueError,
+                "all the input array dimensions except for the concatenation axis "
+                "must match exactly, but along dimension {}, the array at "
+                "index 0 has size 1 and the array at index 1 has size 2"
+                .format(i),
+                np.concatenate, (a, b), axis=axis[1])
+            assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
+            a = np.moveaxis(a, -1, 0)
+            b = np.moveaxis(b, -1, 0)
+            axis.append(axis.pop(0))
+
+        # No arrays to concatenate raises ValueError
+        assert_raises(ValueError, concatenate, ())
+
+    def test_concatenate_axis_None(self):
+        a = np.arange(4, dtype=np.float64).reshape((2, 2))
+        b = list(range(3))
+        c = ['x']
+        r = np.concatenate((a, a), axis=None)
+        assert_equal(r.dtype, a.dtype)
+        assert_equal(r.ndim, 1)
+        r = np.concatenate((a, b), axis=None)
+        assert_equal(r.size, a.size + len(b))
+        assert_equal(r.dtype, a.dtype)
+        r = np.concatenate((a, b, c), axis=None, dtype="U")
+        d = array(['0.0', '1.0', '2.0', '3.0',
+                   '0', '1', '2', 'x'])
+        assert_array_equal(r, d)
+
+        out = np.zeros(a.size + len(b))
+        r = np.concatenate((a, b), axis=None)
+        rout = np.concatenate((a, b), axis=None, out=out)
+        assert_(out is rout)
+        assert_equal(r, rout)
+
+    def test_large_concatenate_axis_None(self):
+        # When no axis is given, concatenate uses flattened versions.
+        # This also had a bug with many arrays (see gh-5979).
+        x = np.arange(1, 100)
+        r = np.concatenate(x, None)
+        assert_array_equal(x, r)
+
+        # This should probably be deprecated:
+        r = np.concatenate(x, 100)  # axis is >= MAXDIMS
+        assert_array_equal(x, r)
+
+    def test_concatenate(self):
+        # Test concatenate function
+        # One sequence returns unmodified (but as array)
+        r4 = list(range(4))
+        assert_array_equal(concatenate((r4,)), r4)
+        # Any sequence
+        assert_array_equal(concatenate((tuple(r4),)), r4)
+        assert_array_equal(concatenate((array(r4),)), r4)
+        # 1D default concatenation
+        r3 = list(range(3))
+        assert_array_equal(concatenate((r4, r3)), r4 + r3)
+        # Mixed sequence types
+        assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
+        assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
+        # Explicit axis specification
+        assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
+        # Including negative
+        assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
+        # 2D
+        a23 = array([[10, 11, 12], [13, 14, 15]])
+        a13 = array([[0, 1, 2]])
+        res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
+        assert_array_equal(concatenate((a23, a13)), res)
+        assert_array_equal(concatenate((a23, a13), 0), res)
+        assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
+        assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
+        # Arrays much match shape
+        assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
+        # 3D
+        res = arange(2 * 3 * 7).reshape((2, 3, 7))
+        a0 = res[..., :4]
+        a1 = res[..., 4:6]
+        a2 = res[..., 6:]
+        assert_array_equal(concatenate((a0, a1, a2), 2), res)
+        assert_array_equal(concatenate((a0, a1, a2), -1), res)
+        assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
+
+        out = res.copy()
+        rout = concatenate((a0, a1, a2), 2, out=out)
+        assert_(out is rout)
+        assert_equal(res, rout)
+
+    @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
+    def test_operator_concat(self):
+        import operator
+        a = array([1, 2])
+        b = array([3, 4])
+        n = [1,2]
+        res = array([1, 2, 3, 4])
+        assert_raises(TypeError, operator.concat, a, b)
+        assert_raises(TypeError, operator.concat, a, n)
+        assert_raises(TypeError, operator.concat, n, a)
+        assert_raises(TypeError, operator.concat, a, 1)
+        assert_raises(TypeError, operator.concat, 1, a)
+
+    def test_bad_out_shape(self):
+        a = array([1, 2])
+        b = array([3, 4])
+
+        assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
+        assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
+        assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
+        concatenate((a, b), out=np.empty(4))
+
+    @pytest.mark.parametrize("axis", [None, 0])
+    @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
+    @pytest.mark.parametrize("casting",
+            ['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
+    def test_out_and_dtype(self, axis, out_dtype, casting):
+        # Compare usage of `out=out` with `dtype=out.dtype`
+        out = np.empty(4, dtype=out_dtype)
+        to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
+
+        if not np.can_cast(to_concat[0], out_dtype, casting=casting):
+            with assert_raises(TypeError):
+                concatenate(to_concat, out=out, axis=axis, casting=casting)
+            with assert_raises(TypeError):
+                concatenate(to_concat, dtype=out.dtype,
+                            axis=axis, casting=casting)
+        else:
+            res_out = concatenate(to_concat, out=out,
+                                  axis=axis, casting=casting)
+            res_dtype = concatenate(to_concat, dtype=out.dtype,
+                                    axis=axis, casting=casting)
+            assert res_out is out
+            assert_array_equal(out, res_dtype)
+            assert res_dtype.dtype == out_dtype
+
+        with assert_raises(TypeError):
+            concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
+
+    @pytest.mark.parametrize("axis", [None, 0])
+    @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
+    @pytest.mark.parametrize("arrs",
+            [([0.],), ([0.], [1]), ([0], ["string"], [1.])])
+    def test_dtype_with_promotion(self, arrs, string_dt, axis):
+        # Note that U0 and S0 should be deprecated eventually and changed to
+        # actually give the empty string result (together with `np.array`)
+        res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
+        # The actual dtype should be identical to a cast (of a double array):
+        assert res.dtype == np.array(1.).astype(string_dt).dtype
+
+    @pytest.mark.parametrize("axis", [None, 0])
+    def test_string_dtype_does_not_inspect(self, axis):
+        with pytest.raises(TypeError):
+            np.concatenate(([None], [1]), dtype="S", axis=axis)
+        with pytest.raises(TypeError):
+            np.concatenate(([None], [1]), dtype="U", axis=axis)
+
+    @pytest.mark.parametrize("axis", [None, 0])
+    def test_subarray_error(self, axis):
+        with pytest.raises(TypeError, match=".*subarray dtype"):
+            np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
+
+
+def test_stack():
+    # non-iterable input
+    assert_raises(TypeError, stack, 1)
+
+    # 0d input
+    for input_ in [(1, 2, 3),
+                   [np.int32(1), np.int32(2), np.int32(3)],
+                   [np.array(1), np.array(2), np.array(3)]]:
+        assert_array_equal(stack(input_), [1, 2, 3])
+    # 1d input examples
+    a = np.array([1, 2, 3])
+    b = np.array([4, 5, 6])
+    r1 = array([[1, 2, 3], [4, 5, 6]])
+    assert_array_equal(np.stack((a, b)), r1)
+    assert_array_equal(np.stack((a, b), axis=1), r1.T)
+    # all input types
+    assert_array_equal(np.stack(list([a, b])), r1)
+    assert_array_equal(np.stack(array([a, b])), r1)
+    # all shapes for 1d input
+    arrays = [np.random.randn(3) for _ in range(10)]
+    axes = [0, 1, -1, -2]
+    expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
+    for axis, expected_shape in zip(axes, expected_shapes):
+        assert_equal(np.stack(arrays, axis).shape, expected_shape)
+    assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
+    assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
+    # all shapes for 2d input
+    arrays = [np.random.randn(3, 4) for _ in range(10)]
+    axes = [0, 1, 2, -1, -2, -3]
+    expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
+                       (3, 4, 10), (3, 10, 4), (10, 3, 4)]
+    for axis, expected_shape in zip(axes, expected_shapes):
+        assert_equal(np.stack(arrays, axis).shape, expected_shape)
+    # empty arrays
+    assert_(stack([[], [], []]).shape == (3, 0))
+    assert_(stack([[], [], []], axis=1).shape == (0, 3))
+    # out
+    out = np.zeros_like(r1)
+    np.stack((a, b), out=out)
+    assert_array_equal(out, r1)
+    # edge cases
+    assert_raises_regex(ValueError, 'need at least one array', stack, [])
+    assert_raises_regex(ValueError, 'must have the same shape',
+                        stack, [1, np.arange(3)])
+    assert_raises_regex(ValueError, 'must have the same shape',
+                        stack, [np.arange(3), 1])
+    assert_raises_regex(ValueError, 'must have the same shape',
+                        stack, [np.arange(3), 1], axis=1)
+    assert_raises_regex(ValueError, 'must have the same shape',
+                        stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
+    assert_raises_regex(ValueError, 'must have the same shape',
+                        stack, [np.arange(2), np.arange(3)])
+
+    # do not accept generators
+    with pytest.raises(TypeError, match="arrays to stack must be"):
+        stack((x for x in range(3)))
+
+    #casting and dtype test
+    a = np.array([1, 2, 3])
+    b = np.array([2.5, 3.5, 4.5])
+    res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64)
+    expected_res = np.array([[1, 2], [2, 3], [3, 4]])
+    assert_array_equal(res, expected_res)
+    #casting and dtype with TypeError
+    with assert_raises(TypeError):
+        stack((a, b), dtype=np.int64, axis=1, casting="safe")
+
+
+@pytest.mark.parametrize("axis", [0])
+@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"])
+@pytest.mark.parametrize("casting",
+                         ['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
+def test_stack_out_and_dtype(axis, out_dtype, casting):
+    to_concat = (array([1, 2]), array([3, 4]))
+    res = array([[1, 2], [3, 4]])
+    out = np.zeros_like(res)
+
+    if not np.can_cast(to_concat[0], out_dtype, casting=casting):
+        with assert_raises(TypeError):
+            stack(to_concat, dtype=out_dtype,
+                  axis=axis, casting=casting)
+    else:
+        res_out = stack(to_concat, out=out,
+                        axis=axis, casting=casting)
+        res_dtype = stack(to_concat, dtype=out_dtype,
+                          axis=axis, casting=casting)
+        assert res_out is out
+        assert_array_equal(out, res_dtype)
+        assert res_dtype.dtype == out_dtype
+
+    with assert_raises(TypeError):
+        stack(to_concat, out=out, dtype=out_dtype, axis=axis)
+
+
+class TestBlock:
+    @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
+    def block(self, request):
+        # blocking small arrays and large arrays go through different paths.
+        # the algorithm is triggered depending on the number of element
+        # copies required.
+        # We define a test fixture that forces most tests to go through
+        # both code paths.
+        # Ultimately, this should be removed if a single algorithm is found
+        # to be faster for both small and large arrays.
+        def _block_force_concatenate(arrays):
+            arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+            return _block_concatenate(arrays, list_ndim, result_ndim)
+
+        def _block_force_slicing(arrays):
+            arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+            return _block_slicing(arrays, list_ndim, result_ndim)
+
+        if request.param == 'force_concatenate':
+            return _block_force_concatenate
+        elif request.param == 'force_slicing':
+            return _block_force_slicing
+        elif request.param == 'block':
+            return block
+        else:
+            raise ValueError('Unknown blocking request. There is a typo in the tests.')
+
+    def test_returns_copy(self, block):
+        a = np.eye(3)
+        b = block(a)
+        b[0, 0] = 2
+        assert b[0, 0] != a[0, 0]
+
+    def test_block_total_size_estimate(self, block):
+        _, _, _, total_size = _block_setup([1])
+        assert total_size == 1
+
+        _, _, _, total_size = _block_setup([[1]])
+        assert total_size == 1
+
+        _, _, _, total_size = _block_setup([[1, 1]])
+        assert total_size == 2
+
+        _, _, _, total_size = _block_setup([[1], [1]])
+        assert total_size == 2
+
+        _, _, _, total_size = _block_setup([[1, 2], [3, 4]])
+        assert total_size == 4
+
+    def test_block_simple_row_wise(self, block):
+        a_2d = np.ones((2, 2))
+        b_2d = 2 * a_2d
+        desired = np.array([[1, 1, 2, 2],
+                            [1, 1, 2, 2]])
+        result = block([a_2d, b_2d])
+        assert_equal(desired, result)
+
+    def test_block_simple_column_wise(self, block):
+        a_2d = np.ones((2, 2))
+        b_2d = 2 * a_2d
+        expected = np.array([[1, 1],
+                             [1, 1],
+                             [2, 2],
+                             [2, 2]])
+        result = block([[a_2d], [b_2d]])
+        assert_equal(expected, result)
+
+    def test_block_with_1d_arrays_row_wise(self, block):
+        # # # 1-D vectors are treated as row arrays
+        a = np.array([1, 2, 3])
+        b = np.array([2, 3, 4])
+        expected = np.array([1, 2, 3, 2, 3, 4])
+        result = block([a, b])
+        assert_equal(expected, result)
+
+    def test_block_with_1d_arrays_multiple_rows(self, block):
+        a = np.array([1, 2, 3])
+        b = np.array([2, 3, 4])
+        expected = np.array([[1, 2, 3, 2, 3, 4],
+                             [1, 2, 3, 2, 3, 4]])
+        result = block([[a, b], [a, b]])
+        assert_equal(expected, result)
+
+    def test_block_with_1d_arrays_column_wise(self, block):
+        # # # 1-D vectors are treated as row arrays
+        a_1d = np.array([1, 2, 3])
+        b_1d = np.array([2, 3, 4])
+        expected = np.array([[1, 2, 3],
+                             [2, 3, 4]])
+        result = block([[a_1d], [b_1d]])
+        assert_equal(expected, result)
+
+    def test_block_mixed_1d_and_2d(self, block):
+        a_2d = np.ones((2, 2))
+        b_1d = np.array([2, 2])
+        result = block([[a_2d], [b_1d]])
+        expected = np.array([[1, 1],
+                             [1, 1],
+                             [2, 2]])
+        assert_equal(expected, result)
+
+    def test_block_complicated(self, block):
+        # a bit more complicated
+        one_2d = np.array([[1, 1, 1]])
+        two_2d = np.array([[2, 2, 2]])
+        three_2d = np.array([[3, 3, 3, 3, 3, 3]])
+        four_1d = np.array([4, 4, 4, 4, 4, 4])
+        five_0d = np.array(5)
+        six_1d = np.array([6, 6, 6, 6, 6])
+        zero_2d = np.zeros((2, 6))
+
+        expected = np.array([[1, 1, 1, 2, 2, 2],
+                             [3, 3, 3, 3, 3, 3],
+                             [4, 4, 4, 4, 4, 4],
+                             [5, 6, 6, 6, 6, 6],
+                             [0, 0, 0, 0, 0, 0],
+                             [0, 0, 0, 0, 0, 0]])
+
+        result = block([[one_2d, two_2d],
+                        [three_2d],
+                        [four_1d],
+                        [five_0d, six_1d],
+                        [zero_2d]])
+        assert_equal(result, expected)
+
+    def test_nested(self, block):
+        one = np.array([1, 1, 1])
+        two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
+        three = np.array([3, 3, 3])
+        four = np.array([4, 4, 4])
+        five = np.array(5)
+        six = np.array([6, 6, 6, 6, 6])
+        zero = np.zeros((2, 6))
+
+        result = block([
+            [
+                block([
+                   [one],
+                   [three],
+                   [four]
+                ]),
+                two
+            ],
+            [five, six],
+            [zero]
+        ])
+        expected = np.array([[1, 1, 1, 2, 2, 2],
+                             [3, 3, 3, 2, 2, 2],
+                             [4, 4, 4, 2, 2, 2],
+                             [5, 6, 6, 6, 6, 6],
+                             [0, 0, 0, 0, 0, 0],
+                             [0, 0, 0, 0, 0, 0]])
+
+        assert_equal(result, expected)
+
+    def test_3d(self, block):
+        a000 = np.ones((2, 2, 2), int) * 1
+
+        a100 = np.ones((3, 2, 2), int) * 2
+        a010 = np.ones((2, 3, 2), int) * 3
+        a001 = np.ones((2, 2, 3), int) * 4
+
+        a011 = np.ones((2, 3, 3), int) * 5
+        a101 = np.ones((3, 2, 3), int) * 6
+        a110 = np.ones((3, 3, 2), int) * 7
+
+        a111 = np.ones((3, 3, 3), int) * 8
+
+        result = block([
+            [
+                [a000, a001],
+                [a010, a011],
+            ],
+            [
+                [a100, a101],
+                [a110, a111],
+            ]
+        ])
+        expected = array([[[1, 1, 4, 4, 4],
+                           [1, 1, 4, 4, 4],
+                           [3, 3, 5, 5, 5],
+                           [3, 3, 5, 5, 5],
+                           [3, 3, 5, 5, 5]],
+
+                          [[1, 1, 4, 4, 4],
+                           [1, 1, 4, 4, 4],
+                           [3, 3, 5, 5, 5],
+                           [3, 3, 5, 5, 5],
+                           [3, 3, 5, 5, 5]],
+
+                          [[2, 2, 6, 6, 6],
+                           [2, 2, 6, 6, 6],
+                           [7, 7, 8, 8, 8],
+                           [7, 7, 8, 8, 8],
+                           [7, 7, 8, 8, 8]],
+
+                          [[2, 2, 6, 6, 6],
+                           [2, 2, 6, 6, 6],
+                           [7, 7, 8, 8, 8],
+                           [7, 7, 8, 8, 8],
+                           [7, 7, 8, 8, 8]],
+
+                          [[2, 2, 6, 6, 6],
+                           [2, 2, 6, 6, 6],
+                           [7, 7, 8, 8, 8],
+                           [7, 7, 8, 8, 8],
+                           [7, 7, 8, 8, 8]]])
+
+        assert_array_equal(result, expected)
+
+    def test_block_with_mismatched_shape(self, block):
+        a = np.array([0, 0])
+        b = np.eye(2)
+        assert_raises(ValueError, block, [a, b])
+        assert_raises(ValueError, block, [b, a])
+
+        to_block = [[np.ones((2,3)), np.ones((2,2))],
+                    [np.ones((2,2)), np.ones((2,2))]]
+        assert_raises(ValueError, block, to_block)
+    def test_no_lists(self, block):
+        assert_equal(block(1),         np.array(1))
+        assert_equal(block(np.eye(3)), np.eye(3))
+
+    def test_invalid_nesting(self, block):
+        msg = 'depths are mismatched'
+        assert_raises_regex(ValueError, msg, block, [1, [2]])
+        assert_raises_regex(ValueError, msg, block, [1, []])
+        assert_raises_regex(ValueError, msg, block, [[1], 2])
+        assert_raises_regex(ValueError, msg, block, [[], 2])
+        assert_raises_regex(ValueError, msg, block, [
+            [[1], [2]],
+            [[3, 4]],
+            [5]  # missing brackets
+        ])
+
+    def test_empty_lists(self, block):
+        assert_raises_regex(ValueError, 'empty', block, [])
+        assert_raises_regex(ValueError, 'empty', block, [[]])
+        assert_raises_regex(ValueError, 'empty', block, [[1], []])
+
+    def test_tuple(self, block):
+        assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
+        assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
+
+    def test_different_ndims(self, block):
+        a = 1.
+        b = 2 * np.ones((1, 2))
+        c = 3 * np.ones((1, 1, 3))
+
+        result = block([a, b, c])
+        expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
+
+        assert_equal(result, expected)
+
+    def test_different_ndims_depths(self, block):
+        a = 1.
+        b = 2 * np.ones((1, 2))
+        c = 3 * np.ones((1, 2, 3))
+
+        result = block([[a, b], [c]])
+        expected = np.array([[[1., 2., 2.],
+                              [3., 3., 3.],
+                              [3., 3., 3.]]])
+
+        assert_equal(result, expected)
+
+    def test_block_memory_order(self, block):
+        # 3D
+        arr_c = np.zeros((3,)*3, order='C')
+        arr_f = np.zeros((3,)*3, order='F')
+
+        b_c = [[[arr_c, arr_c],
+                [arr_c, arr_c]],
+               [[arr_c, arr_c],
+                [arr_c, arr_c]]]
+
+        b_f = [[[arr_f, arr_f],
+                [arr_f, arr_f]],
+               [[arr_f, arr_f],
+                [arr_f, arr_f]]]
+
+        assert block(b_c).flags['C_CONTIGUOUS']
+        assert block(b_f).flags['F_CONTIGUOUS']
+
+        arr_c = np.zeros((3, 3), order='C')
+        arr_f = np.zeros((3, 3), order='F')
+        # 2D
+        b_c = [[arr_c, arr_c],
+               [arr_c, arr_c]]
+
+        b_f = [[arr_f, arr_f],
+               [arr_f, arr_f]]
+
+        assert block(b_c).flags['C_CONTIGUOUS']
+        assert block(b_f).flags['F_CONTIGUOUS']
+
+
+def test_block_dispatcher():
+    class ArrayLike:
+        pass
+    a = ArrayLike()
+    b = ArrayLike()
+    c = ArrayLike()
+    assert_equal(list(_block_dispatcher(a)), [a])
+    assert_equal(list(_block_dispatcher([a])), [a])
+    assert_equal(list(_block_dispatcher([a, b])), [a, b])
+    assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
+    # don't recurse into non-lists
+    assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd.py
new file mode 100644
index 00000000..92b56744
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd.py
@@ -0,0 +1,1333 @@
+# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics
+# may be involved in their functionality.
+import pytest, math, re
+import itertools
+import operator
+from numpy.core._simd import targets, clear_floatstatus, get_floatstatus
+from numpy.core._multiarray_umath import __cpu_baseline__
+
+def check_floatstatus(divbyzero=False, overflow=False,
+                      underflow=False, invalid=False,
+                      all=False):
+    #define NPY_FPE_DIVIDEBYZERO  1
+    #define NPY_FPE_OVERFLOW      2
+    #define NPY_FPE_UNDERFLOW     4
+    #define NPY_FPE_INVALID       8
+    err = get_floatstatus()
+    ret = (all or divbyzero) and (err & 1) != 0
+    ret |= (all or overflow) and (err & 2) != 0
+    ret |= (all or underflow) and (err & 4) != 0
+    ret |= (all or invalid) and (err & 8) != 0
+    return ret
+
+class _Test_Utility:
+    # submodule of the desired SIMD extension, e.g. targets["AVX512F"]
+    npyv = None
+    # the current data type suffix e.g. 's8'
+    sfx  = None
+    # target name can be 'baseline' or one or more of CPU features
+    target_name = None
+
+    def __getattr__(self, attr):
+        """
+        To call NPV intrinsics without the attribute 'npyv' and
+        auto suffixing intrinsics according to class attribute 'sfx'
+        """
+        return getattr(self.npyv, attr + "_" + self.sfx)
+
+    def _x2(self, intrin_name):
+        return getattr(self.npyv, f"{intrin_name}_{self.sfx}x2")
+
+    def _data(self, start=None, count=None, reverse=False):
+        """
+        Create list of consecutive numbers according to number of vector's lanes.
+        """
+        if start is None:
+            start = 1
+        if count is None:
+            count = self.nlanes
+        rng = range(start, start + count)
+        if reverse:
+            rng = reversed(rng)
+        if self._is_fp():
+            return [x / 1.0 for x in rng]
+        return list(rng)
+
+    def _is_unsigned(self):
+        return self.sfx[0] == 'u'
+
+    def _is_signed(self):
+        return self.sfx[0] == 's'
+
+    def _is_fp(self):
+        return self.sfx[0] == 'f'
+
+    def _scalar_size(self):
+        return int(self.sfx[1:])
+
+    def _int_clip(self, seq):
+        if self._is_fp():
+            return seq
+        max_int = self._int_max()
+        min_int = self._int_min()
+        return [min(max(v, min_int), max_int) for v in seq]
+
+    def _int_max(self):
+        if self._is_fp():
+            return None
+        max_u = self._to_unsigned(self.setall(-1))[0]
+        if self._is_signed():
+            return max_u // 2
+        return max_u
+
+    def _int_min(self):
+        if self._is_fp():
+            return None
+        if self._is_unsigned():
+            return 0
+        return -(self._int_max() + 1)
+
+    def _true_mask(self):
+        max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1)
+        return max_unsig[0]
+
+    def _to_unsigned(self, vector):
+        if isinstance(vector, (list, tuple)):
+            return getattr(self.npyv, "load_u" + self.sfx[1:])(vector)
+        else:
+            sfx = vector.__name__.replace("npyv_", "")
+            if sfx[0] == "b":
+                cvt_intrin = "cvt_u{0}_b{0}"
+            else:
+                cvt_intrin = "reinterpret_u{0}_{1}"
+            return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector)
+
+    def _pinfinity(self):
+        return float("inf")
+
+    def _ninfinity(self):
+        return -float("inf")
+
+    def _nan(self):
+        return float("nan")
+
+    def _cpu_features(self):
+        target = self.target_name
+        if target == "baseline":
+            target = __cpu_baseline__
+        else:
+            target = target.split('__') # multi-target separator
+        return ' '.join(target)
+
+class _SIMD_BOOL(_Test_Utility):
+    """
+    To test all boolean vector types at once
+    """
+    def _nlanes(self):
+        return getattr(self.npyv, "nlanes_u" + self.sfx[1:])
+
+    def _data(self, start=None, count=None, reverse=False):
+        true_mask = self._true_mask()
+        rng = range(self._nlanes())
+        if reverse:
+            rng = reversed(rng)
+        return [true_mask if x % 2 else 0 for x in rng]
+
+    def _load_b(self, data):
+        len_str = self.sfx[1:]
+        load = getattr(self.npyv, "load_u" + len_str)
+        cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}")
+        return cvt(load(data))
+
+    def test_operators_logical(self):
+        """
+        Logical operations for boolean types.
+        Test intrinsics:
+            npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX,
+            npyv_andc_b8, npvy_orc_b8, nvpy_xnor_b8
+        """
+        data_a = self._data()
+        data_b = self._data(reverse=True)
+        vdata_a = self._load_b(data_a)
+        vdata_b = self._load_b(data_b)
+
+        data_and = [a & b for a, b in zip(data_a, data_b)]
+        vand = getattr(self, "and")(vdata_a, vdata_b)
+        assert vand == data_and
+
+        data_or = [a | b for a, b in zip(data_a, data_b)]
+        vor = getattr(self, "or")(vdata_a, vdata_b)
+        assert vor == data_or
+
+        data_xor = [a ^ b for a, b in zip(data_a, data_b)]
+        vxor = getattr(self, "xor")(vdata_a, vdata_b)
+        assert vxor == data_xor
+
+        vnot = getattr(self, "not")(vdata_a)
+        assert vnot == data_b
+
+        # among the boolean types, andc, orc and xnor only support b8
+        if self.sfx not in ("b8"):
+            return
+
+        data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)]
+        vandc = getattr(self, "andc")(vdata_a, vdata_b)
+        assert data_andc == vandc
+
+        data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)]
+        vorc = getattr(self, "orc")(vdata_a, vdata_b)
+        assert data_orc == vorc
+
+        data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)]
+        vxnor = getattr(self, "xnor")(vdata_a, vdata_b)
+        assert data_xnor == vxnor
+
+    def test_tobits(self):
+        data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)])
+        for data in (self._data(), self._data(reverse=True)):
+            vdata = self._load_b(data)
+            data_bits = data2bits(data)
+            tobits = self.tobits(vdata)
+            bin_tobits = bin(tobits)
+            assert bin_tobits == bin(data_bits)
+
+    def test_pack(self):
+        """
+        Pack multiple vectors into one
+        Test intrinsics:
+            npyv_pack_b8_b16
+            npyv_pack_b8_b32
+            npyv_pack_b8_b64
+        """
+        if self.sfx not in ("b16", "b32", "b64"):
+            return
+        # create the vectors
+        data = self._data()
+        rdata = self._data(reverse=True)
+        vdata = self._load_b(data)
+        vrdata = self._load_b(rdata)
+        pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}")
+        # for scalar execution, concatenate the elements of the multiple lists
+        # into a single list (spack) and then iterate over the elements of
+        # the created list applying a mask to capture the first byte of them.
+        if self.sfx == "b16":
+            spack = [(i & 0xFF) for i in (list(rdata) + list(data))]
+            vpack = pack_simd(vrdata, vdata)
+        elif self.sfx == "b32":
+            spack = [(i & 0xFF) for i in (2*list(rdata) + 2*list(data))]
+            vpack = pack_simd(vrdata, vrdata, vdata, vdata)
+        elif self.sfx == "b64":
+            spack = [(i & 0xFF) for i in (4*list(rdata) + 4*list(data))]
+            vpack = pack_simd(vrdata, vrdata, vrdata, vrdata,
+                               vdata,  vdata,  vdata,  vdata)
+        assert vpack == spack
+
+    @pytest.mark.parametrize("intrin", ["any", "all"])
+    @pytest.mark.parametrize("data", (
+        [-1, 0],
+        [0, -1],
+        [-1],
+        [0]
+    ))
+    def test_operators_crosstest(self, intrin, data):
+        """
+        Test intrinsics:
+            npyv_any_##SFX
+            npyv_all_##SFX
+        """
+        data_a = self._load_b(data * self._nlanes())
+        func = eval(intrin)
+        intrin = getattr(self, intrin)
+        desired = func(data_a)
+        simd = intrin(data_a)
+        assert not not simd == desired
+
+class _SIMD_INT(_Test_Utility):
+    """
+    To test all integer vector types at once
+    """
+    def test_operators_shift(self):
+        if self.sfx in ("u8", "s8"):
+            return
+
+        data_a = self._data(self._int_max() - self.nlanes)
+        data_b = self._data(self._int_min(), reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        for count in range(self._scalar_size()):
+            # load to cast
+            data_shl_a = self.load([a << count for a in data_a])
+            # left shift
+            shl = self.shl(vdata_a, count)
+            assert shl == data_shl_a
+            # load to cast
+            data_shr_a = self.load([a >> count for a in data_a])
+            # right shift
+            shr = self.shr(vdata_a, count)
+            assert shr == data_shr_a
+
+        # shift by zero or max or out-range immediate constant is not applicable and illogical
+        for count in range(1, self._scalar_size()):
+            # load to cast
+            data_shl_a = self.load([a << count for a in data_a])
+            # left shift by an immediate constant
+            shli = self.shli(vdata_a, count)
+            assert shli == data_shl_a
+            # load to cast
+            data_shr_a = self.load([a >> count for a in data_a])
+            # right shift by an immediate constant
+            shri = self.shri(vdata_a, count)
+            assert shri == data_shr_a
+
+    def test_arithmetic_subadd_saturated(self):
+        if self.sfx in ("u32", "s32", "u64", "s64"):
+            return
+
+        data_a = self._data(self._int_max() - self.nlanes)
+        data_b = self._data(self._int_min(), reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)])
+        adds = self.adds(vdata_a, vdata_b)
+        assert adds == data_adds
+
+        data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)])
+        subs = self.subs(vdata_a, vdata_b)
+        assert subs == data_subs
+
+    def test_math_max_min(self):
+        data_a = self._data()
+        data_b = self._data(self.nlanes)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        data_max = [max(a, b) for a, b in zip(data_a, data_b)]
+        simd_max = self.max(vdata_a, vdata_b)
+        assert simd_max == data_max
+
+        data_min = [min(a, b) for a, b in zip(data_a, data_b)]
+        simd_min = self.min(vdata_a, vdata_b)
+        assert simd_min == data_min
+
+    @pytest.mark.parametrize("start", [-100, -10000, 0, 100, 10000])
+    def test_reduce_max_min(self, start):
+        """
+        Test intrinsics:
+            npyv_reduce_max_##sfx
+            npyv_reduce_min_##sfx
+        """
+        vdata_a = self.load(self._data(start))
+        assert self.reduce_max(vdata_a) == max(vdata_a)
+        assert self.reduce_min(vdata_a) == min(vdata_a)
+
+
+class _SIMD_FP32(_Test_Utility):
+    """
+    To only test single precision
+    """
+    def test_conversions(self):
+        """
+        Round to nearest even integer, assume CPU control register is set to rounding.
+        Test intrinsics:
+            npyv_round_s32_##SFX
+        """
+        features = self._cpu_features()
+        if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features):
+            # very costly to emulate nearest even on Armv7
+            # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1
+            _round = lambda v: int(v + (0.5 if v >= 0 else -0.5))
+        else:
+            _round = round
+        vdata_a = self.load(self._data())
+        vdata_a = self.sub(vdata_a, self.setall(0.5))
+        data_round = [_round(x) for x in vdata_a]
+        vround = self.round_s32(vdata_a)
+        assert vround == data_round
+
+class _SIMD_FP64(_Test_Utility):
+    """
+    To only test double precision
+    """
+    def test_conversions(self):
+        """
+        Round to nearest even integer, assume CPU control register is set to rounding.
+        Test intrinsics:
+            npyv_round_s32_##SFX
+        """
+        vdata_a = self.load(self._data())
+        vdata_a = self.sub(vdata_a, self.setall(0.5))
+        vdata_b = self.mul(vdata_a, self.setall(-1.5))
+        data_round = [round(x) for x in list(vdata_a) + list(vdata_b)]
+        vround = self.round_s32(vdata_a, vdata_b)
+        assert vround == data_round
+
+class _SIMD_FP(_Test_Utility):
+    """
+    To test all float vector types at once
+    """
+    def test_arithmetic_fused(self):
+        vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3
+        vdata_cx2 = self.add(vdata_c, vdata_c)
+        # multiply and add, a*b + c
+        data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)])
+        fma = self.muladd(vdata_a, vdata_b, vdata_c)
+        assert fma == data_fma
+        # multiply and subtract, a*b - c
+        fms = self.mulsub(vdata_a, vdata_b, vdata_c)
+        data_fms = self.sub(data_fma, vdata_cx2)
+        assert fms == data_fms
+        # negate multiply and add, -(a*b) + c
+        nfma = self.nmuladd(vdata_a, vdata_b, vdata_c)
+        data_nfma = self.sub(vdata_cx2, data_fma)
+        assert nfma == data_nfma
+        # negate multiply and subtract, -(a*b) - c
+        nfms = self.nmulsub(vdata_a, vdata_b, vdata_c)
+        data_nfms = self.mul(data_fma, self.setall(-1))
+        assert nfms == data_nfms
+        # multiply, add for odd elements and subtract even elements.
+        # (a * b) -+ c
+        fmas = list(self.muladdsub(vdata_a, vdata_b, vdata_c))
+        assert fmas[0::2] == list(data_fms)[0::2]
+        assert fmas[1::2] == list(data_fma)[1::2]
+
+    def test_abs(self):
+        pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+        data = self._data()
+        vdata = self.load(self._data())
+
+        abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan))
+        for case, desired in abs_cases:
+            data_abs = [desired]*self.nlanes
+            vabs = self.abs(self.setall(case))
+            assert vabs == pytest.approx(data_abs, nan_ok=True)
+
+        vabs = self.abs(self.mul(vdata, self.setall(-1)))
+        assert vabs == data
+
+    def test_sqrt(self):
+        pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+        data = self._data()
+        vdata = self.load(self._data())
+
+        sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf))
+        for case, desired in sqrt_cases:
+            data_sqrt = [desired]*self.nlanes
+            sqrt  = self.sqrt(self.setall(case))
+            assert sqrt == pytest.approx(data_sqrt, nan_ok=True)
+
+        data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision
+        sqrt = self.sqrt(vdata)
+        assert sqrt == data_sqrt
+
+    def test_square(self):
+        pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+        data = self._data()
+        vdata = self.load(self._data())
+        # square
+        square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf))
+        for case, desired in square_cases:
+            data_square = [desired]*self.nlanes
+            square  = self.square(self.setall(case))
+            assert square == pytest.approx(data_square, nan_ok=True)
+
+        data_square = [x*x for x in data]
+        square = self.square(vdata)
+        assert square == data_square
+
+    @pytest.mark.parametrize("intrin, func", [("ceil", math.ceil),
+    ("trunc", math.trunc), ("floor", math.floor), ("rint", round)])
+    def test_rounding(self, intrin, func):
+        """
+        Test intrinsics:
+            npyv_rint_##SFX
+            npyv_ceil_##SFX
+            npyv_trunc_##SFX
+            npyv_floor##SFX
+        """
+        intrin_name = intrin
+        intrin = getattr(self, intrin)
+        pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+        # special cases
+        round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf))
+        for case, desired in round_cases:
+            data_round = [desired]*self.nlanes
+            _round = intrin(self.setall(case))
+            assert _round == pytest.approx(data_round, nan_ok=True)
+
+        for x in range(0, 2**20, 256**2):
+            for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15):
+                data = self.load([(x+a)*w for a in range(self.nlanes)])
+                data_round = [func(x) for x in data]
+                _round = intrin(data)
+                assert _round == data_round
+
+        # test large numbers
+        for i in (
+            1.1529215045988576e+18, 4.6116860183954304e+18,
+            5.902958103546122e+20, 2.3611832414184488e+21
+        ):
+            x = self.setall(i)
+            y = intrin(x)
+            data_round = [func(n) for n in x]
+            assert y == data_round
+
+        # signed zero
+        if intrin_name == "floor":
+            data_szero = (-0.0,)
+        else:
+            data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5)
+
+        for w in data_szero:
+            _round = self._to_unsigned(intrin(self.setall(w)))
+            data_round = self._to_unsigned(self.setall(-0.0))
+            assert _round == data_round
+
+    @pytest.mark.parametrize("intrin", [
+        "max", "maxp", "maxn", "min", "minp", "minn"
+    ])
+    def test_max_min(self, intrin):
+        """
+        Test intrinsics:
+            npyv_max_##sfx
+            npyv_maxp_##sfx
+            npyv_maxn_##sfx
+            npyv_min_##sfx
+            npyv_minp_##sfx
+            npyv_minn_##sfx
+            npyv_reduce_max_##sfx
+            npyv_reduce_maxp_##sfx
+            npyv_reduce_maxn_##sfx
+            npyv_reduce_min_##sfx
+            npyv_reduce_minp_##sfx
+            npyv_reduce_minn_##sfx
+        """
+        pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+        chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0)
+        func = eval(intrin[:3])
+        reduce_intrin = getattr(self, "reduce_" + intrin)
+        intrin = getattr(self, intrin)
+        hf_nlanes = self.nlanes//2
+
+        cases = (
+            ([0.0, -0.0], [-0.0, 0.0]),
+            ([10, -10],  [10, -10]),
+            ([pinf, 10], [10, ninf]),
+            ([10, pinf], [ninf, 10]),
+            ([10, -10], [10, -10]),
+            ([-10, 10], [-10, 10])
+        )
+        for op1, op2 in cases:
+            vdata_a = self.load(op1*hf_nlanes)
+            vdata_b = self.load(op2*hf_nlanes)
+            data = func(vdata_a, vdata_b)
+            simd = intrin(vdata_a, vdata_b)
+            assert simd == data
+            data = func(vdata_a)
+            simd = reduce_intrin(vdata_a)
+            assert simd == data
+
+        if not chk_nan:
+            return
+        if chk_nan == 1:
+            test_nan = lambda a, b: (
+                b if math.isnan(a) else a if math.isnan(b) else b
+            )
+        else:
+            test_nan = lambda a, b: (
+                nan if math.isnan(a) or math.isnan(b) else b
+            )
+        cases = (
+            (nan, 10),
+            (10, nan),
+            (nan, pinf),
+            (pinf, nan),
+            (nan, nan)
+        )
+        for op1, op2 in cases:
+            vdata_ab = self.load([op1, op2]*hf_nlanes)
+            data = test_nan(op1, op2)
+            simd = reduce_intrin(vdata_ab)
+            assert simd == pytest.approx(data, nan_ok=True)
+            vdata_a = self.setall(op1)
+            vdata_b = self.setall(op2)
+            data = [data] * self.nlanes
+            simd = intrin(vdata_a, vdata_b)
+            assert simd == pytest.approx(data, nan_ok=True)
+
+    def test_reciprocal(self):
+        pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+        data = self._data()
+        vdata = self.load(self._data())
+
+        recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf))
+        for case, desired in recip_cases:
+            data_recip = [desired]*self.nlanes
+            recip = self.recip(self.setall(case))
+            assert recip == pytest.approx(data_recip, nan_ok=True)
+
+        data_recip = self.load([1/x for x in data]) # load to truncate precision
+        recip = self.recip(vdata)
+        assert recip == data_recip
+
+    def test_special_cases(self):
+        """
+        Compare Not NaN. Test intrinsics:
+            npyv_notnan_##SFX
+        """
+        nnan = self.notnan(self.setall(self._nan()))
+        assert nnan == [0]*self.nlanes
+
+    @pytest.mark.parametrize("intrin_name", [
+        "rint", "trunc", "ceil", "floor"
+    ])
+    def test_unary_invalid_fpexception(self, intrin_name):
+        intrin = getattr(self, intrin_name)
+        for d in [float("nan"), float("inf"), -float("inf")]:
+            v = self.setall(d)
+            clear_floatstatus()
+            intrin(v)
+            assert check_floatstatus(invalid=True) == False
+
+    @pytest.mark.parametrize('py_comp,np_comp', [
+        (operator.lt, "cmplt"),
+        (operator.le, "cmple"),
+        (operator.gt, "cmpgt"),
+        (operator.ge, "cmpge"),
+        (operator.eq, "cmpeq"),
+        (operator.ne, "cmpneq")
+    ])
+    def test_comparison_with_nan(self, py_comp, np_comp):
+        pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+        mask_true = self._true_mask()
+
+        def to_bool(vector):
+            return [lane == mask_true for lane in vector]
+
+        intrin = getattr(self, np_comp)
+        cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan),
+                     (ninf, nan), (-0.0, +0.0))
+        for case_operand1, case_operand2 in cmp_cases:
+            data_a = [case_operand1]*self.nlanes
+            data_b = [case_operand2]*self.nlanes
+            vdata_a = self.setall(case_operand1)
+            vdata_b = self.setall(case_operand2)
+            vcmp = to_bool(intrin(vdata_a, vdata_b))
+            data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)]
+            assert vcmp == data_cmp
+
+    @pytest.mark.parametrize("intrin", ["any", "all"])
+    @pytest.mark.parametrize("data", (
+        [float("nan"), 0],
+        [0, float("nan")],
+        [float("nan"), 1],
+        [1, float("nan")],
+        [float("nan"), float("nan")],
+        [0.0, -0.0],
+        [-0.0, 0.0],
+        [1.0, -0.0]
+    ))
+    def test_operators_crosstest(self, intrin, data):
+        """
+        Test intrinsics:
+            npyv_any_##SFX
+            npyv_all_##SFX
+        """
+        data_a = self.load(data * self.nlanes)
+        func = eval(intrin)
+        intrin = getattr(self, intrin)
+        desired = func(data_a)
+        simd = intrin(data_a)
+        assert not not simd == desired
+
+class _SIMD_ALL(_Test_Utility):
+    """
+    To test all vector types at once
+    """
+    def test_memory_load(self):
+        data = self._data()
+        # unaligned load
+        load_data = self.load(data)
+        assert load_data == data
+        # aligned load
+        loada_data = self.loada(data)
+        assert loada_data == data
+        # stream load
+        loads_data = self.loads(data)
+        assert loads_data == data
+        # load lower part
+        loadl = self.loadl(data)
+        loadl_half = list(loadl)[:self.nlanes//2]
+        data_half = data[:self.nlanes//2]
+        assert loadl_half == data_half
+        assert loadl != data # detect overflow
+
+    def test_memory_store(self):
+        data = self._data()
+        vdata = self.load(data)
+        # unaligned store
+        store = [0] * self.nlanes
+        self.store(store, vdata)
+        assert store == data
+        # aligned store
+        store_a = [0] * self.nlanes
+        self.storea(store_a, vdata)
+        assert store_a == data
+        # stream store
+        store_s = [0] * self.nlanes
+        self.stores(store_s, vdata)
+        assert store_s == data
+        # store lower part
+        store_l = [0] * self.nlanes
+        self.storel(store_l, vdata)
+        assert store_l[:self.nlanes//2] == data[:self.nlanes//2]
+        assert store_l != vdata # detect overflow
+        # store higher part
+        store_h = [0] * self.nlanes
+        self.storeh(store_h, vdata)
+        assert store_h[:self.nlanes//2] == data[self.nlanes//2:]
+        assert store_h != vdata  # detect overflow
+
+    @pytest.mark.parametrize("intrin, elsizes, scale, fill", [
+        ("self.load_tillz, self.load_till", (32, 64), 1, [0xffff]),
+        ("self.load2_tillz, self.load2_till", (32, 64), 2, [0xffff, 0x7fff]),
+    ])
+    def test_memory_partial_load(self, intrin, elsizes, scale, fill):
+        if self._scalar_size() not in elsizes:
+            return
+        npyv_load_tillz, npyv_load_till = eval(intrin)
+        data = self._data()
+        lanes = list(range(1, self.nlanes + 1))
+        lanes += [self.nlanes**2, self.nlanes**4] # test out of range
+        for n in lanes:
+            load_till = npyv_load_till(data, n, *fill)
+            load_tillz = npyv_load_tillz(data, n)
+            n *= scale
+            data_till = data[:n] + fill * ((self.nlanes-n) // scale)
+            assert load_till == data_till
+            data_tillz = data[:n] + [0] * (self.nlanes-n)
+            assert load_tillz == data_tillz
+
+    @pytest.mark.parametrize("intrin, elsizes, scale", [
+        ("self.store_till", (32, 64), 1),
+        ("self.store2_till", (32, 64), 2),
+    ])
+    def test_memory_partial_store(self, intrin, elsizes, scale):
+        if self._scalar_size() not in elsizes:
+            return
+        npyv_store_till = eval(intrin)
+        data = self._data()
+        data_rev = self._data(reverse=True)
+        vdata = self.load(data)
+        lanes = list(range(1, self.nlanes + 1))
+        lanes += [self.nlanes**2, self.nlanes**4]
+        for n in lanes:
+            data_till = data_rev.copy()
+            data_till[:n*scale] = data[:n*scale]
+            store_till = self._data(reverse=True)
+            npyv_store_till(store_till, n, vdata)
+            assert store_till == data_till
+
+    @pytest.mark.parametrize("intrin, elsizes, scale", [
+        ("self.loadn", (32, 64), 1),
+        ("self.loadn2", (32, 64), 2),
+    ])
+    def test_memory_noncont_load(self, intrin, elsizes, scale):
+        if self._scalar_size() not in elsizes:
+            return
+        npyv_loadn = eval(intrin)
+        for stride in range(-64, 64):
+            if stride < 0:
+                data = self._data(stride, -stride*self.nlanes)
+                data_stride = list(itertools.chain(
+                    *zip(*[data[-i::stride] for i in range(scale, 0, -1)])
+                ))
+            elif stride == 0:
+                data = self._data()
+                data_stride = data[0:scale] * (self.nlanes//scale)
+            else:
+                data = self._data(count=stride*self.nlanes)
+                data_stride = list(itertools.chain(
+                    *zip(*[data[i::stride] for i in range(scale)]))
+                )
+            data_stride = self.load(data_stride)  # cast unsigned
+            loadn = npyv_loadn(data, stride)
+            assert loadn == data_stride
+
+    @pytest.mark.parametrize("intrin, elsizes, scale, fill", [
+        ("self.loadn_tillz, self.loadn_till", (32, 64), 1, [0xffff]),
+        ("self.loadn2_tillz, self.loadn2_till", (32, 64), 2, [0xffff, 0x7fff]),
+    ])
+    def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill):
+        if self._scalar_size() not in elsizes:
+            return
+        npyv_loadn_tillz, npyv_loadn_till = eval(intrin)
+        lanes = list(range(1, self.nlanes + 1))
+        lanes += [self.nlanes**2, self.nlanes**4]
+        for stride in range(-64, 64):
+            if stride < 0:
+                data = self._data(stride, -stride*self.nlanes)
+                data_stride = list(itertools.chain(
+                    *zip(*[data[-i::stride] for i in range(scale, 0, -1)])
+                ))
+            elif stride == 0:
+                data = self._data()
+                data_stride = data[0:scale] * (self.nlanes//scale)
+            else:
+                data = self._data(count=stride*self.nlanes)
+                data_stride = list(itertools.chain(
+                    *zip(*[data[i::stride] for i in range(scale)])
+                ))
+            data_stride = list(self.load(data_stride))  # cast unsigned
+            for n in lanes:
+                nscale = n * scale
+                llanes = self.nlanes - nscale
+                data_stride_till = (
+                    data_stride[:nscale] + fill * (llanes//scale)
+                )
+                loadn_till = npyv_loadn_till(data, stride, n, *fill)
+                assert loadn_till == data_stride_till
+                data_stride_tillz = data_stride[:nscale] + [0] * llanes
+                loadn_tillz = npyv_loadn_tillz(data, stride, n)
+                assert loadn_tillz == data_stride_tillz
+
+    @pytest.mark.parametrize("intrin, elsizes, scale", [
+        ("self.storen", (32, 64), 1),
+        ("self.storen2", (32, 64), 2),
+    ])
+    def test_memory_noncont_store(self, intrin, elsizes, scale):
+        if self._scalar_size() not in elsizes:
+            return
+        npyv_storen = eval(intrin)
+        data = self._data()
+        vdata = self.load(data)
+        hlanes = self.nlanes // scale
+        for stride in range(1, 64):
+            data_storen = [0xff] * stride * self.nlanes
+            for s in range(0, hlanes*stride, stride):
+                i = (s//stride)*scale
+                data_storen[s:s+scale] = data[i:i+scale]
+            storen = [0xff] * stride * self.nlanes
+            storen += [0x7f]*64
+            npyv_storen(storen, stride, vdata)
+            assert storen[:-64] == data_storen
+            assert storen[-64:] == [0x7f]*64  # detect overflow
+
+        for stride in range(-64, 0):
+            data_storen = [0xff] * -stride * self.nlanes
+            for s in range(0, hlanes*stride, stride):
+                i = (s//stride)*scale
+                data_storen[s-scale:s or None] = data[i:i+scale]
+            storen = [0x7f]*64
+            storen += [0xff] * -stride * self.nlanes
+            npyv_storen(storen, stride, vdata)
+            assert storen[64:] == data_storen
+            assert storen[:64] == [0x7f]*64  # detect overflow
+        # stride 0
+        data_storen = [0x7f] * self.nlanes
+        storen = data_storen.copy()
+        data_storen[0:scale] = data[-scale:]
+        npyv_storen(storen, 0, vdata)
+        assert storen == data_storen
+
+    @pytest.mark.parametrize("intrin, elsizes, scale", [
+        ("self.storen_till", (32, 64), 1),
+        ("self.storen2_till", (32, 64), 2),
+    ])
+    def test_memory_noncont_partial_store(self, intrin, elsizes, scale):
+        if self._scalar_size() not in elsizes:
+            return
+        npyv_storen_till = eval(intrin)
+        data = self._data()
+        vdata = self.load(data)
+        lanes = list(range(1, self.nlanes + 1))
+        lanes += [self.nlanes**2, self.nlanes**4]
+        hlanes = self.nlanes // scale
+        for stride in range(1, 64):
+            for n in lanes:
+                data_till = [0xff] * stride * self.nlanes
+                tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale)
+                for s in range(0, hlanes*stride, stride)[:n]:
+                    i = (s//stride)*scale
+                    data_till[s:s+scale] = tdata[i:i+scale]
+                storen_till = [0xff] * stride * self.nlanes
+                storen_till += [0x7f]*64
+                npyv_storen_till(storen_till, stride, n, vdata)
+                assert storen_till[:-64] == data_till
+                assert storen_till[-64:] == [0x7f]*64  # detect overflow
+
+        for stride in range(-64, 0):
+            for n in lanes:
+                data_till = [0xff] * -stride * self.nlanes
+                tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale)
+                for s in range(0, hlanes*stride, stride)[:n]:
+                    i = (s//stride)*scale
+                    data_till[s-scale:s or None] = tdata[i:i+scale]
+                storen_till = [0x7f]*64
+                storen_till += [0xff] * -stride * self.nlanes
+                npyv_storen_till(storen_till, stride, n, vdata)
+                assert storen_till[64:] == data_till
+                assert storen_till[:64] == [0x7f]*64  # detect overflow
+
+        # stride 0
+        for n in lanes:
+            data_till = [0x7f] * self.nlanes
+            storen_till = data_till.copy()
+            data_till[0:scale] = data[:n*scale][-scale:]
+            npyv_storen_till(storen_till, 0, n, vdata)
+            assert storen_till == data_till
+
+    @pytest.mark.parametrize("intrin, table_size, elsize", [
+        ("self.lut32", 32, 32),
+        ("self.lut16", 16, 64)
+    ])
+    def test_lut(self, intrin, table_size, elsize):
+        """
+        Test lookup table intrinsics:
+            npyv_lut32_##sfx
+            npyv_lut16_##sfx
+        """
+        if elsize != self._scalar_size():
+            return
+        intrin = eval(intrin)
+        idx_itrin = getattr(self.npyv, f"setall_u{elsize}")
+        table = range(0, table_size)
+        for i in table:
+            broadi = self.setall(i)
+            idx = idx_itrin(i)
+            lut = intrin(table, idx)
+            assert lut == broadi
+
+    def test_misc(self):
+        broadcast_zero = self.zero()
+        assert broadcast_zero == [0] * self.nlanes
+        for i in range(1, 10):
+            broadcasti = self.setall(i)
+            assert broadcasti == [i] * self.nlanes
+
+        data_a, data_b = self._data(), self._data(reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        # py level of npyv_set_* don't support ignoring the extra specified lanes or
+        # fill non-specified lanes with zero.
+        vset = self.set(*data_a)
+        assert vset == data_a
+        # py level of npyv_setf_* don't support ignoring the extra specified lanes or
+        # fill non-specified lanes with the specified scalar.
+        vsetf = self.setf(10, *data_a)
+        assert vsetf == data_a
+
+        # We're testing the sanity of _simd's type-vector,
+        # reinterpret* intrinsics itself are tested via compiler
+        # during the build of _simd module
+        sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64"]
+        if self.npyv.simd_f64:
+            sfxes.append("f64")
+        if self.npyv.simd_f32:
+            sfxes.append("f32")
+        for sfx in sfxes:
+            vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__
+            assert vec_name == "npyv_" + sfx
+
+        # select & mask operations
+        select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b)
+        assert select_a == data_a
+        select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b)
+        assert select_b == data_b
+
+        # test extract elements
+        assert self.extract0(vdata_b) == vdata_b[0]
+
+        # cleanup intrinsic is only used with AVX for
+        # zeroing registers to avoid the AVX-SSE transition penalty,
+        # so nothing to test here
+        self.npyv.cleanup()
+
+    def test_reorder(self):
+        data_a, data_b  = self._data(), self._data(reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+        # lower half part
+        data_a_lo = data_a[:self.nlanes//2]
+        data_b_lo = data_b[:self.nlanes//2]
+        # higher half part
+        data_a_hi = data_a[self.nlanes//2:]
+        data_b_hi = data_b[self.nlanes//2:]
+        # combine two lower parts
+        combinel = self.combinel(vdata_a, vdata_b)
+        assert combinel == data_a_lo + data_b_lo
+        # combine two higher parts
+        combineh = self.combineh(vdata_a, vdata_b)
+        assert combineh == data_a_hi + data_b_hi
+        # combine x2
+        combine = self.combine(vdata_a, vdata_b)
+        assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi)
+
+        # zip(interleave)
+        data_zipl = self.load([
+            v for p in zip(data_a_lo, data_b_lo) for v in p
+        ])
+        data_ziph = self.load([
+            v for p in zip(data_a_hi, data_b_hi) for v in p
+        ])
+        vzip = self.zip(vdata_a, vdata_b)
+        assert vzip == (data_zipl, data_ziph)
+        vzip = [0]*self.nlanes*2
+        self._x2("store")(vzip, (vdata_a, vdata_b))
+        assert vzip == list(data_zipl) + list(data_ziph)
+
+        # unzip(deinterleave)
+        unzip = self.unzip(data_zipl, data_ziph)
+        assert unzip == (data_a, data_b)
+        unzip = self._x2("load")(list(data_zipl) + list(data_ziph))
+        assert unzip == (data_a, data_b)
+
+    def test_reorder_rev64(self):
+        # Reverse elements of each 64-bit lane
+        ssize = self._scalar_size()
+        if ssize == 64:
+            return
+        data_rev64 = [
+            y for x in range(0, self.nlanes, 64//ssize)
+              for y in reversed(range(x, x + 64//ssize))
+        ]
+        rev64 = self.rev64(self.load(range(self.nlanes)))
+        assert rev64 == data_rev64
+
+    def test_reorder_permi128(self):
+        """
+        Test permuting elements for each 128-bit lane.
+        npyv_permi128_##sfx
+        """
+        ssize = self._scalar_size()
+        if ssize < 32:
+            return
+        data = self.load(self._data())
+        permn = 128//ssize
+        permd = permn-1
+        nlane128 = self.nlanes//permn
+        shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6]
+        for i in range(permn):
+            indices = [(i >> shf) & permd for shf in shfl]
+            vperm = self.permi128(data, *indices)
+            data_vperm = [
+                data[j + (e & -permn)]
+                for e, j in enumerate(indices*nlane128)
+            ]
+            assert vperm == data_vperm
+
+    @pytest.mark.parametrize('func, intrin', [
+        (operator.lt, "cmplt"),
+        (operator.le, "cmple"),
+        (operator.gt, "cmpgt"),
+        (operator.ge, "cmpge"),
+        (operator.eq, "cmpeq")
+    ])
+    def test_operators_comparison(self, func, intrin):
+        if self._is_fp():
+            data_a = self._data()
+        else:
+            data_a = self._data(self._int_max() - self.nlanes)
+        data_b = self._data(self._int_min(), reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+        intrin = getattr(self, intrin)
+
+        mask_true = self._true_mask()
+        def to_bool(vector):
+            return [lane == mask_true for lane in vector]
+
+        data_cmp = [func(a, b) for a, b in zip(data_a, data_b)]
+        cmp = to_bool(intrin(vdata_a, vdata_b))
+        assert cmp == data_cmp
+
+    def test_operators_logical(self):
+        if self._is_fp():
+            data_a = self._data()
+        else:
+            data_a = self._data(self._int_max() - self.nlanes)
+        data_b = self._data(self._int_min(), reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        if self._is_fp():
+            data_cast_a = self._to_unsigned(vdata_a)
+            data_cast_b = self._to_unsigned(vdata_b)
+            cast, cast_data = self._to_unsigned, self._to_unsigned
+        else:
+            data_cast_a, data_cast_b = data_a, data_b
+            cast, cast_data = lambda a: a, self.load
+
+        data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)])
+        vxor = cast(self.xor(vdata_a, vdata_b))
+        assert vxor == data_xor
+
+        data_or  = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)])
+        vor  = cast(getattr(self, "or")(vdata_a, vdata_b))
+        assert vor == data_or
+
+        data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)])
+        vand = cast(getattr(self, "and")(vdata_a, vdata_b))
+        assert vand == data_and
+
+        data_not = cast_data([~a for a in data_cast_a])
+        vnot = cast(getattr(self, "not")(vdata_a))
+        assert vnot == data_not
+
+        if self.sfx not in ("u8"):
+            return
+        data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)]
+        vandc = cast(getattr(self, "andc")(vdata_a, vdata_b))
+        assert vandc == data_andc
+
+    @pytest.mark.parametrize("intrin", ["any", "all"])
+    @pytest.mark.parametrize("data", (
+        [1, 2, 3, 4],
+        [-1, -2, -3, -4],
+        [0, 1, 2, 3, 4],
+        [0x7f, 0x7fff, 0x7fffffff, 0x7fffffffffffffff],
+        [0, -1, -2, -3, 4],
+        [0],
+        [1],
+        [-1]
+    ))
+    def test_operators_crosstest(self, intrin, data):
+        """
+        Test intrinsics:
+            npyv_any_##SFX
+            npyv_all_##SFX
+        """
+        data_a = self.load(data * self.nlanes)
+        func = eval(intrin)
+        intrin = getattr(self, intrin)
+        desired = func(data_a)
+        simd = intrin(data_a)
+        assert not not simd == desired
+
+    def test_conversion_boolean(self):
+        bsfx = "b" + self.sfx[1:]
+        to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx))
+        from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx))
+
+        false_vb = to_boolean(self.setall(0))
+        true_vb  = self.cmpeq(self.setall(0), self.setall(0))
+        assert false_vb != true_vb
+
+        false_vsfx = from_boolean(false_vb)
+        true_vsfx = from_boolean(true_vb)
+        assert false_vsfx != true_vsfx
+
+    def test_conversion_expand(self):
+        """
+        Test expand intrinsics:
+            npyv_expand_u16_u8
+            npyv_expand_u32_u16
+        """
+        if self.sfx not in ("u8", "u16"):
+            return
+        totype = self.sfx[0]+str(int(self.sfx[1:])*2)
+        expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}")
+        # close enough from the edge to detect any deviation
+        data  = self._data(self._int_max() - self.nlanes)
+        vdata = self.load(data)
+        edata = expand(vdata)
+        # lower half part
+        data_lo = data[:self.nlanes//2]
+        # higher half part
+        data_hi = data[self.nlanes//2:]
+        assert edata == (data_lo, data_hi)
+
+    def test_arithmetic_subadd(self):
+        if self._is_fp():
+            data_a = self._data()
+        else:
+            data_a = self._data(self._int_max() - self.nlanes)
+        data_b = self._data(self._int_min(), reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        # non-saturated
+        data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast
+        add  = self.add(vdata_a, vdata_b)
+        assert add == data_add
+        data_sub  = self.load([a - b for a, b in zip(data_a, data_b)])
+        sub  = self.sub(vdata_a, vdata_b)
+        assert sub == data_sub
+
+    def test_arithmetic_mul(self):
+        if self.sfx in ("u64", "s64"):
+            return
+
+        if self._is_fp():
+            data_a = self._data()
+        else:
+            data_a = self._data(self._int_max() - self.nlanes)
+        data_b = self._data(self._int_min(), reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        data_mul = self.load([a * b for a, b in zip(data_a, data_b)])
+        mul = self.mul(vdata_a, vdata_b)
+        assert mul == data_mul
+
+    def test_arithmetic_div(self):
+        if not self._is_fp():
+            return
+
+        data_a, data_b = self._data(), self._data(reverse=True)
+        vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+        # load to truncate f64 to precision of f32
+        data_div = self.load([a / b for a, b in zip(data_a, data_b)])
+        div = self.div(vdata_a, vdata_b)
+        assert div == data_div
+
+    def test_arithmetic_intdiv(self):
+        """
+        Test integer division intrinsics:
+            npyv_divisor_##sfx
+            npyv_divc_##sfx
+        """
+        if self._is_fp():
+            return
+
+        int_min = self._int_min()
+        def trunc_div(a, d):
+            """
+            Divide towards zero works with large integers > 2^53,
+            and wrap around overflow similar to what C does.
+            """
+            if d == -1 and a == int_min:
+                return a
+            sign_a, sign_d = a < 0, d < 0
+            if a == 0 or sign_a == sign_d:
+                return a // d
+            return (a + sign_d - sign_a) // d + 1
+
+        data = [1, -int_min]  # to test overflow
+        data += range(0, 2**8, 2**5)
+        data += range(0, 2**8, 2**5-1)
+        bsize = self._scalar_size()
+        if bsize > 8:
+            data += range(2**8, 2**16, 2**13)
+            data += range(2**8, 2**16, 2**13-1)
+        if bsize > 16:
+            data += range(2**16, 2**32, 2**29)
+            data += range(2**16, 2**32, 2**29-1)
+        if bsize > 32:
+            data += range(2**32, 2**64, 2**61)
+            data += range(2**32, 2**64, 2**61-1)
+        # negate
+        data += [-x for x in data]
+        for dividend, divisor in itertools.product(data, data):
+            divisor = self.setall(divisor)[0]  # cast
+            if divisor == 0:
+                continue
+            dividend = self.load(self._data(dividend))
+            data_divc = [trunc_div(a, divisor) for a in dividend]
+            divisor_parms = self.divisor(divisor)
+            divc = self.divc(dividend, divisor_parms)
+            assert divc == data_divc
+
+    def test_arithmetic_reduce_sum(self):
+        """
+        Test reduce sum intrinsics:
+            npyv_sum_##sfx
+        """
+        if self.sfx not in ("u32", "u64", "f32", "f64"):
+            return
+        # reduce sum
+        data = self._data()
+        vdata = self.load(data)
+
+        data_sum = sum(data)
+        vsum = self.sum(vdata)
+        assert vsum == data_sum
+
+    def test_arithmetic_reduce_sumup(self):
+        """
+        Test extend reduce sum intrinsics:
+            npyv_sumup_##sfx
+        """
+        if self.sfx not in ("u8", "u16"):
+            return
+        rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes)
+        for r in rdata:
+            data = self._data(r)
+            vdata = self.load(data)
+            data_sum = sum(data)
+            vsum = self.sumup(vdata)
+            assert vsum == data_sum
+
+    def test_mask_conditional(self):
+        """
+        Conditional addition and subtraction for all supported data types.
+        Test intrinsics:
+            npyv_ifadd_##SFX, npyv_ifsub_##SFX
+        """
+        vdata_a = self.load(self._data())
+        vdata_b = self.load(self._data(reverse=True))
+        true_mask  = self.cmpeq(self.zero(), self.zero())
+        false_mask = self.cmpneq(self.zero(), self.zero())
+
+        data_sub = self.sub(vdata_b, vdata_a)
+        ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b)
+        assert ifsub == data_sub
+        ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b)
+        assert ifsub == vdata_b
+
+        data_add = self.add(vdata_b, vdata_a)
+        ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b)
+        assert ifadd == data_add
+        ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b)
+        assert ifadd == vdata_b
+
+        if not self._is_fp():
+            return
+        data_div = self.div(vdata_b, vdata_a)
+        ifdiv = self.ifdiv(true_mask, vdata_b, vdata_a, vdata_b)
+        assert ifdiv == data_div
+        ifdivz = self.ifdivz(true_mask, vdata_b, vdata_a)
+        assert ifdivz == data_div
+        ifdiv = self.ifdiv(false_mask, vdata_a, vdata_b, vdata_b)
+        assert ifdiv == vdata_b
+        ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b)
+        assert ifdivz == self.zero()
+
+bool_sfx = ("b8", "b16", "b32", "b64")
+int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64")
+fp_sfx  = ("f32", "f64")
+all_sfx = int_sfx + fp_sfx
+tests_registry = {
+    bool_sfx: _SIMD_BOOL,
+    int_sfx : _SIMD_INT,
+    fp_sfx  : _SIMD_FP,
+    ("f32",): _SIMD_FP32,
+    ("f64",): _SIMD_FP64,
+    all_sfx : _SIMD_ALL
+}
+for target_name, npyv in targets.items():
+    simd_width = npyv.simd if npyv else ''
+    pretty_name = target_name.split('__') # multi-target separator
+    if len(pretty_name) > 1:
+        # multi-target
+        pretty_name = f"({' '.join(pretty_name)})"
+    else:
+        pretty_name = pretty_name[0]
+
+    skip = ""
+    skip_sfx = dict()
+    if not npyv:
+        skip = f"target '{pretty_name}' isn't supported by current machine"
+    elif not npyv.simd:
+        skip = f"target '{pretty_name}' isn't supported by NPYV"
+    else:
+        if not npyv.simd_f32:
+            skip_sfx["f32"] = f"target '{pretty_name}' "\
+                               "doesn't support single-precision"
+        if not npyv.simd_f64:
+            skip_sfx["f64"] = f"target '{pretty_name}' doesn't"\
+                               "support double-precision"
+
+    for sfxes, cls in tests_registry.items():
+        for sfx in sfxes:
+            skip_m = skip_sfx.get(sfx, skip)
+            inhr = (cls,)
+            attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name)
+            tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr)
+            if skip_m:
+                pytest.mark.skip(reason=skip_m)(tcls)
+            globals()[tcls.__name__] = tcls
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.py
new file mode 100644
index 00000000..4fbaa9f3
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.py
@@ -0,0 +1,101 @@
+import pytest
+from numpy.core._simd import targets
+"""
+This testing unit only for checking the sanity of common functionality,
+therefore all we need is just to take one submodule that represents any
+of enabled SIMD extensions to run the test on it and the second submodule
+required to run only one check related to the possibility of mixing
+the data types among each submodule.
+"""
+npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd]
+npyv, npyv2 = (npyvs + [None, None])[:2]
+
+unsigned_sfx = ["u8", "u16", "u32", "u64"]
+signed_sfx = ["s8", "s16", "s32", "s64"]
+fp_sfx = []
+if npyv and npyv.simd_f32:
+    fp_sfx.append("f32")
+if npyv and npyv.simd_f64:
+    fp_sfx.append("f64")
+
+int_sfx = unsigned_sfx + signed_sfx
+all_sfx = unsigned_sfx + int_sfx
+
+@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support")
+class Test_SIMD_MODULE:
+
+    @pytest.mark.parametrize('sfx', all_sfx)
+    def test_num_lanes(self, sfx):
+        nlanes = getattr(npyv, "nlanes_" + sfx)
+        vector = getattr(npyv, "setall_" + sfx)(1)
+        assert len(vector) == nlanes
+
+    @pytest.mark.parametrize('sfx', all_sfx)
+    def test_type_name(self, sfx):
+        vector = getattr(npyv, "setall_" + sfx)(1)
+        assert vector.__name__ == "npyv_" + sfx
+
+    def test_raises(self):
+        a, b = [npyv.setall_u32(1)]*2
+        for sfx in all_sfx:
+            vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}")
+            pytest.raises(TypeError, vcb("add"), a)
+            pytest.raises(TypeError, vcb("add"), a, b, a)
+            pytest.raises(TypeError, vcb("setall"))
+            pytest.raises(TypeError, vcb("setall"), [1])
+            pytest.raises(TypeError, vcb("load"), 1)
+            pytest.raises(ValueError, vcb("load"), [1])
+            pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a))
+
+    @pytest.mark.skipif(not npyv2, reason=(
+        "could not find a second SIMD extension with NPYV support"
+    ))
+    def test_nomix(self):
+        # mix among submodules isn't allowed
+        a = npyv.setall_u32(1)
+        a2 = npyv2.setall_u32(1)
+        pytest.raises(TypeError, npyv.add_u32, a2, a2)
+        pytest.raises(TypeError, npyv2.add_u32, a, a)
+
+    @pytest.mark.parametrize('sfx', unsigned_sfx)
+    def test_unsigned_overflow(self, sfx):
+        nlanes = getattr(npyv, "nlanes_" + sfx)
+        maxu = (1 << int(sfx[1:])) - 1
+        maxu_72 = (1 << 72) - 1
+        lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0]
+        assert lane == maxu
+        lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes)
+        assert lanes == [maxu] * nlanes
+        lane = getattr(npyv, "setall_" + sfx)(-1)[0]
+        assert lane == maxu
+        lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes)
+        assert lanes == [maxu] * nlanes
+
+    @pytest.mark.parametrize('sfx', signed_sfx)
+    def test_signed_overflow(self, sfx):
+        nlanes = getattr(npyv, "nlanes_" + sfx)
+        maxs_72 = (1 << 71) - 1
+        lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0]
+        assert lane == -1
+        lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes)
+        assert lanes == [-1] * nlanes
+        mins_72 = -1 << 71
+        lane = getattr(npyv, "setall_" + sfx)(mins_72)[0]
+        assert lane == 0
+        lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes)
+        assert lanes == [0] * nlanes
+
+    def test_truncate_f32(self):
+        if not npyv.simd_f32:
+            pytest.skip("F32 isn't support by the SIMD extension")
+        f32 = npyv.setall_f32(0.1)[0]
+        assert f32 != 0.1
+        assert round(f32, 1) == 0.1
+
+    def test_compare(self):
+        data_range = range(0, npyv.nlanes_u32)
+        vdata = npyv.load_u32(data_range)
+        assert vdata == list(data_range)
+        assert vdata == tuple(data_range)
+        for i in data_range:
+            assert vdata[i] == data_range[i]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_strings.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_strings.py
new file mode 100644
index 00000000..42f775e8
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_strings.py
@@ -0,0 +1,99 @@
+import pytest
+
+import operator
+import numpy as np
+
+from numpy.testing import assert_array_equal
+
+
+COMPARISONS = [
+    (operator.eq, np.equal, "=="),
+    (operator.ne, np.not_equal, "!="),
+    (operator.lt, np.less, "<"),
+    (operator.le, np.less_equal, "<="),
+    (operator.gt, np.greater, ">"),
+    (operator.ge, np.greater_equal, ">="),
+]
+
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym):
+    arr_string = np.array(["a", "b"], dtype="S")
+    arr_unicode = np.array(["a", "c"], dtype="U")
+
+    with pytest.raises(TypeError, match="did not contain a loop"):
+        ufunc(arr_string, arr_unicode)
+
+    with pytest.raises(TypeError, match="did not contain a loop"):
+        ufunc(arr_unicode, arr_string)
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+def test_mixed_string_comparisons_ufuncs_with_cast(op, ufunc, sym):
+    arr_string = np.array(["a", "b"], dtype="S")
+    arr_unicode = np.array(["a", "c"], dtype="U")
+
+    # While there is no loop, manual casting is acceptable:
+    res1 = ufunc(arr_string, arr_unicode, signature="UU->?", casting="unsafe")
+    res2 = ufunc(arr_string, arr_unicode, signature="SS->?", casting="unsafe")
+
+    expected = op(arr_string.astype('U'), arr_unicode)
+    assert_array_equal(res1, expected)
+    assert_array_equal(res2, expected)
+
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+@pytest.mark.parametrize("dtypes", [
+        ("S2", "S2"), ("S2", "S10"),
+        ("U1"), (">U1", ">U1"),
+        ("U10")])
+@pytest.mark.parametrize("aligned", [True, False])
+def test_string_comparisons(op, ufunc, sym, dtypes, aligned):
+    # ensure native byte-order for the first view to stay within unicode range
+    native_dt = np.dtype(dtypes[0]).newbyteorder("=")
+    arr = np.arange(2**15).view(native_dt).astype(dtypes[0])
+    if not aligned:
+        # Make `arr` unaligned:
+        new = np.zeros(arr.nbytes + 1, dtype=np.uint8)[1:].view(dtypes[0])
+        new[...] = arr
+        arr = new
+
+    arr2 = arr.astype(dtypes[1], copy=True)
+    np.random.shuffle(arr2)
+    arr[0] = arr2[0]  # make sure one matches
+
+    expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())]
+    assert_array_equal(op(arr, arr2), expected)
+    assert_array_equal(ufunc(arr, arr2), expected)
+    assert_array_equal(np.compare_chararrays(arr, arr2, sym, False), expected)
+
+    expected = [op(d2, d1) for d1, d2 in zip(arr.tolist(), arr2.tolist())]
+    assert_array_equal(op(arr2, arr), expected)
+    assert_array_equal(ufunc(arr2, arr), expected)
+    assert_array_equal(np.compare_chararrays(arr2, arr, sym, False), expected)
+
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+@pytest.mark.parametrize("dtypes", [
+        ("S2", "S2"), ("S2", "S10"), ("U10")])
+def test_string_comparisons_empty(op, ufunc, sym, dtypes):
+    arr = np.empty((1, 0, 1, 5), dtype=dtypes[0])
+    arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1])
+
+    expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool)
+    assert_array_equal(op(arr, arr2), expected)
+    assert_array_equal(ufunc(arr, arr2), expected)
+    assert_array_equal(np.compare_chararrays(arr, arr2, sym, False), expected)
+
+
+@pytest.mark.parametrize("str_dt", ["S", "U"])
+@pytest.mark.parametrize("float_dt", np.typecodes["AllFloat"])
+def test_float_to_string_cast(str_dt, float_dt):
+    float_dt = np.dtype(float_dt)
+    fi = np.finfo(float_dt)
+    arr = np.array([np.nan, np.inf, -np.inf, fi.max, fi.min], dtype=float_dt)
+    expected = ["nan", "inf", "-inf", repr(fi.max), repr(fi.min)]
+    if float_dt.kind == 'c':
+        expected = [f"({r}+0j)" for r in expected]
+
+    res = arr.astype(str_dt)
+    assert_array_equal(res, np.array(expected, dtype=str_dt))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.py
new file mode 100644
index 00000000..9fbc4b2d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.py
@@ -0,0 +1,2996 @@
+import warnings
+import itertools
+import sys
+import ctypes as ct
+
+import pytest
+from pytest import param
+
+import numpy as np
+import numpy.core._umath_tests as umt
+import numpy.linalg._umath_linalg as uml
+import numpy.core._operand_flag_tests as opflag_tests
+import numpy.core._rational_tests as _rational_tests
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_array_equal,
+    assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
+    assert_allclose, HAS_REFCOUNT, suppress_warnings, IS_WASM, IS_PYPY,
+    )
+from numpy.testing._private.utils import requires_memory
+from numpy.compat import pickle
+
+
+UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values()
+                    if isinstance(obj, np.ufunc)]
+UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
+
+
+class TestUfuncKwargs:
+    def test_kwarg_exact(self):
+        assert_raises(TypeError, np.add, 1, 2, castingx='safe')
+        assert_raises(TypeError, np.add, 1, 2, dtypex=int)
+        assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
+        assert_raises(TypeError, np.add, 1, 2, outx=None)
+        assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
+        assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
+        assert_raises(TypeError, np.add, 1, 2, subokx=False)
+        assert_raises(TypeError, np.add, 1, 2, wherex=[True])
+
+    def test_sig_signature(self):
+        assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
+                      signature='ii->i')
+
+    def test_sig_dtype(self):
+        assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
+                      dtype=int)
+        assert_raises(TypeError, np.add, 1, 2, signature='ii->i',
+                      dtype=int)
+
+    def test_extobj_refcount(self):
+        # Should not segfault with USE_DEBUG.
+        assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
+
+
+class TestUfuncGenericLoops:
+    """Test generic loops.
+
+    The loops to be tested are:
+
+        PyUFunc_ff_f_As_dd_d
+        PyUFunc_ff_f
+        PyUFunc_dd_d
+        PyUFunc_gg_g
+        PyUFunc_FF_F_As_DD_D
+        PyUFunc_DD_D
+        PyUFunc_FF_F
+        PyUFunc_GG_G
+        PyUFunc_OO_O
+        PyUFunc_OO_O_method
+        PyUFunc_f_f_As_d_d
+        PyUFunc_d_d
+        PyUFunc_f_f
+        PyUFunc_g_g
+        PyUFunc_F_F_As_D_D
+        PyUFunc_F_F
+        PyUFunc_D_D
+        PyUFunc_G_G
+        PyUFunc_O_O
+        PyUFunc_O_O_method
+        PyUFunc_On_Om
+
+    Where:
+
+        f -- float
+        d -- double
+        g -- long double
+        F -- complex float
+        D -- complex double
+        G -- complex long double
+        O -- python object
+
+    It is difficult to assure that each of these loops is entered from the
+    Python level as the special cased loops are a moving target and the
+    corresponding types are architecture dependent. We probably need to
+    define C level testing ufuncs to get at them. For the time being, I've
+    just looked at the signatures registered in the build directory to find
+    relevant functions.
+
+    """
+    np_dtypes = [
+        (np.single, np.single), (np.single, np.double),
+        (np.csingle, np.csingle), (np.csingle, np.cdouble),
+        (np.double, np.double), (np.longdouble, np.longdouble),
+        (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
+
+    @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
+    def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
+        xs = np.full(10, input_dtype(x), dtype=output_dtype)
+        ys = f(xs)[::2]
+        assert_allclose(ys, y)
+        assert_equal(ys.dtype, output_dtype)
+
+    def f2(x, y):
+        return x**y
+
+    @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
+    def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
+        xs = np.full(10, input_dtype(x), dtype=output_dtype)
+        ys = f(xs, xs)[::2]
+        assert_allclose(ys, y)
+        assert_equal(ys.dtype, output_dtype)
+
+    # class to use in testing object method loops
+    class foo:
+        def conjugate(self):
+            return np.bool_(1)
+
+        def logical_xor(self, obj):
+            return np.bool_(1)
+
+    def test_unary_PyUFunc_O_O(self):
+        x = np.ones(10, dtype=object)
+        assert_(np.all(np.abs(x) == 1))
+
+    def test_unary_PyUFunc_O_O_method_simple(self, foo=foo):
+        x = np.full(10, foo(), dtype=object)
+        assert_(np.all(np.conjugate(x) == True))
+
+    def test_binary_PyUFunc_OO_O(self):
+        x = np.ones(10, dtype=object)
+        assert_(np.all(np.add(x, x) == 2))
+
+    def test_binary_PyUFunc_OO_O_method(self, foo=foo):
+        x = np.full(10, foo(), dtype=object)
+        assert_(np.all(np.logical_xor(x, x)))
+
+    def test_binary_PyUFunc_On_Om_method(self, foo=foo):
+        x = np.full((10, 2, 3), foo(), dtype=object)
+        assert_(np.all(np.logical_xor(x, x)))
+
+    def test_python_complex_conjugate(self):
+        # The conjugate ufunc should fall back to calling the method:
+        arr = np.array([1+2j, 3-4j], dtype="O")
+        assert isinstance(arr[0], complex)
+        res = np.conjugate(arr)
+        assert res.dtype == np.dtype("O")
+        assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O"))
+
+    @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
+    def test_unary_PyUFunc_O_O_method_full(self, ufunc):
+        """Compare the result of the object loop with non-object one"""
+        val = np.float64(np.pi/4)
+
+        class MyFloat(np.float64):
+            def __getattr__(self, attr):
+                try:
+                    return super().__getattr__(attr)
+                except AttributeError:
+                    return lambda: getattr(np.core.umath, attr)(val)
+
+        # Use 0-D arrays, to ensure the same element call
+        num_arr = np.array(val, dtype=np.float64)
+        obj_arr = np.array(MyFloat(val), dtype="O")
+
+        with np.errstate(all="raise"):
+            try:
+                res_num = ufunc(num_arr)
+            except Exception as exc:
+                with assert_raises(type(exc)):
+                    ufunc(obj_arr)
+            else:
+                res_obj = ufunc(obj_arr)
+                assert_array_almost_equal(res_num.astype("O"), res_obj)
+
+
+def _pickleable_module_global():
+    pass
+
+
+class TestUfunc:
+    def test_pickle(self):
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            assert_(pickle.loads(pickle.dumps(np.sin,
+                                              protocol=proto)) is np.sin)
+
+            # Check that ufunc not defined in the top level numpy namespace
+            # such as numpy.core._rational_tests.test_add can also be pickled
+            res = pickle.loads(pickle.dumps(_rational_tests.test_add,
+                                            protocol=proto))
+            assert_(res is _rational_tests.test_add)
+
+    def test_pickle_withstring(self):
+        astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
+                   b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
+        assert_(pickle.loads(astring) is np.cos)
+
+    @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy")
+    def test_pickle_name_is_qualname(self):
+        # This tests that a simplification of our ufunc pickle code will
+        # lead to allowing qualnames as names.  Future ufuncs should
+        # possible add a specific qualname, or a hook into pickling instead
+        # (dask+numba may benefit).
+        _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc
+        obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc))
+        assert obj is umt._pickleable_module_global_ufunc
+
+    def test_reduceat_shifting_sum(self):
+        L = 6
+        x = np.arange(L)
+        idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
+        assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
+
+    def test_all_ufunc(self):
+        """Try to check presence and results of all ufuncs.
+
+        The list of ufuncs comes from generate_umath.py and is as follows:
+
+        =====  ====  =============  ===============  ========================
+        done   args   function        types                notes
+        =====  ====  =============  ===============  ========================
+        n      1     conjugate      nums + O
+        n      1     absolute       nums + O         complex -> real
+        n      1     negative       nums + O
+        n      1     sign           nums + O         -> int
+        n      1     invert         bool + ints + O  flts raise an error
+        n      1     degrees        real + M         cmplx raise an error
+        n      1     radians        real + M         cmplx raise an error
+        n      1     arccos         flts + M
+        n      1     arccosh        flts + M
+        n      1     arcsin         flts + M
+        n      1     arcsinh        flts + M
+        n      1     arctan         flts + M
+        n      1     arctanh        flts + M
+        n      1     cos            flts + M
+        n      1     sin            flts + M
+        n      1     tan            flts + M
+        n      1     cosh           flts + M
+        n      1     sinh           flts + M
+        n      1     tanh           flts + M
+        n      1     exp            flts + M
+        n      1     expm1          flts + M
+        n      1     log            flts + M
+        n      1     log10          flts + M
+        n      1     log1p          flts + M
+        n      1     sqrt           flts + M         real x < 0 raises error
+        n      1     ceil           real + M
+        n      1     trunc          real + M
+        n      1     floor          real + M
+        n      1     fabs           real + M
+        n      1     rint           flts + M
+        n      1     isnan          flts             -> bool
+        n      1     isinf          flts             -> bool
+        n      1     isfinite       flts             -> bool
+        n      1     signbit        real             -> bool
+        n      1     modf           real             -> (frac, int)
+        n      1     logical_not    bool + nums + M  -> bool
+        n      2     left_shift     ints + O         flts raise an error
+        n      2     right_shift    ints + O         flts raise an error
+        n      2     add            bool + nums + O  boolean + is ||
+        n      2     subtract       bool + nums + O  boolean - is ^
+        n      2     multiply       bool + nums + O  boolean * is &
+        n      2     divide         nums + O
+        n      2     floor_divide   nums + O
+        n      2     true_divide    nums + O         bBhH -> f, iIlLqQ -> d
+        n      2     fmod           nums + M
+        n      2     power          nums + O
+        n      2     greater        bool + nums + O  -> bool
+        n      2     greater_equal  bool + nums + O  -> bool
+        n      2     less           bool + nums + O  -> bool
+        n      2     less_equal     bool + nums + O  -> bool
+        n      2     equal          bool + nums + O  -> bool
+        n      2     not_equal      bool + nums + O  -> bool
+        n      2     logical_and    bool + nums + M  -> bool
+        n      2     logical_or     bool + nums + M  -> bool
+        n      2     logical_xor    bool + nums + M  -> bool
+        n      2     maximum        bool + nums + O
+        n      2     minimum        bool + nums + O
+        n      2     bitwise_and    bool + ints + O  flts raise an error
+        n      2     bitwise_or     bool + ints + O  flts raise an error
+        n      2     bitwise_xor    bool + ints + O  flts raise an error
+        n      2     arctan2        real + M
+        n      2     remainder      ints + real + O
+        n      2     hypot          real + M
+        =====  ====  =============  ===============  ========================
+
+        Types other than those listed will be accepted, but they are cast to
+        the smallest compatible type for which the function is defined. The
+        casting rules are:
+
+        bool -> int8 -> float32
+        ints -> double
+
+        """
+        pass
+
+    # from include/numpy/ufuncobject.h
+    size_inferred = 2
+    can_ignore = 4
+    def test_signature0(self):
+        # the arguments to test_signature are: nin, nout, core_signature
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            2, 1, "(i),(i)->()")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (1,  1,  0))
+        assert_equal(ixs, (0, 0))
+        assert_equal(flags, (self.size_inferred,))
+        assert_equal(sizes, (-1,))
+
+    def test_signature1(self):
+        # empty core signature; treat as plain ufunc (with trivial core)
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            2, 1, "(),()->()")
+        assert_equal(enabled, 0)
+        assert_equal(num_dims, (0,  0,  0))
+        assert_equal(ixs, ())
+        assert_equal(flags, ())
+        assert_equal(sizes, ())
+
+    def test_signature2(self):
+        # more complicated names for variables
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            2, 1, "(i1,i2),(J_1)->(_kAB)")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (2, 1, 1))
+        assert_equal(ixs, (0, 1, 2, 3))
+        assert_equal(flags, (self.size_inferred,)*4)
+        assert_equal(sizes, (-1, -1, -1, -1))
+
+    def test_signature3(self):
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            2, 1, "(i1, i12),   (J_1)->(i12, i2)")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (2, 1, 2))
+        assert_equal(ixs, (0, 1, 2, 1, 3))
+        assert_equal(flags, (self.size_inferred,)*4)
+        assert_equal(sizes, (-1, -1, -1, -1))
+
+    def test_signature4(self):
+        # matrix_multiply signature from _umath_tests
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            2, 1, "(n,k),(k,m)->(n,m)")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (2, 2, 2))
+        assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+        assert_equal(flags, (self.size_inferred,)*3)
+        assert_equal(sizes, (-1, -1, -1))
+
+    def test_signature5(self):
+        # matmul signature from _umath_tests
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            2, 1, "(n?,k),(k,m?)->(n?,m?)")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (2, 2, 2))
+        assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+        assert_equal(flags, (self.size_inferred | self.can_ignore,
+                             self.size_inferred,
+                             self.size_inferred | self.can_ignore))
+        assert_equal(sizes, (-1, -1, -1))
+
+    def test_signature6(self):
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            1, 1, "(3)->()")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (1, 0))
+        assert_equal(ixs, (0,))
+        assert_equal(flags, (0,))
+        assert_equal(sizes, (3,))
+
+    def test_signature7(self):
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            3, 1, "(3),(03,3),(n)->(9)")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (1, 2, 1, 1))
+        assert_equal(ixs, (0, 0, 0, 1, 2))
+        assert_equal(flags, (0, self.size_inferred, 0))
+        assert_equal(sizes, (3, -1, 9))
+
+    def test_signature8(self):
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            3, 1, "(3?),(3?,3?),(n)->(9)")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (1, 2, 1, 1))
+        assert_equal(ixs, (0, 0, 0, 1, 2))
+        assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+        assert_equal(sizes, (3, -1, 9))
+
+    def test_signature9(self):
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            1, 1, "(  3)  -> ( )")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (1, 0))
+        assert_equal(ixs, (0,))
+        assert_equal(flags, (0,))
+        assert_equal(sizes, (3,))
+
+    def test_signature10(self):
+        enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+            3, 1, "( 3? ) , (3? ,  3?) ,(n )-> ( 9)")
+        assert_equal(enabled, 1)
+        assert_equal(num_dims, (1, 2, 1, 1))
+        assert_equal(ixs, (0, 0, 0, 1, 2))
+        assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+        assert_equal(sizes, (3, -1, 9))
+
+    def test_signature_failure_extra_parenthesis(self):
+        with assert_raises(ValueError):
+            umt.test_signature(2, 1, "((i)),(i)->()")
+
+    def test_signature_failure_mismatching_parenthesis(self):
+        with assert_raises(ValueError):
+            umt.test_signature(2, 1, "(i),)i(->()")
+
+    def test_signature_failure_signature_missing_input_arg(self):
+        with assert_raises(ValueError):
+            umt.test_signature(2, 1, "(i),->()")
+
+    def test_signature_failure_signature_missing_output_arg(self):
+        with assert_raises(ValueError):
+            umt.test_signature(2, 2, "(i),(i)->()")
+
+    def test_get_signature(self):
+        assert_equal(umt.inner1d.signature, "(i),(i)->()")
+
+    def test_forced_sig(self):
+        a = 0.5*np.arange(3, dtype='f8')
+        assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
+        with pytest.warns(DeprecationWarning):
+            assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
+        assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
+        with pytest.warns(DeprecationWarning):
+            assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'),
+                         [0, 0, 1])
+        assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
+                                            casting='unsafe'), [0, 0, 1])
+
+        b = np.zeros((3,), dtype='f8')
+        np.add(a, 0.5, out=b)
+        assert_equal(b, [0.5, 1, 1.5])
+        b[:] = 0
+        with pytest.warns(DeprecationWarning):
+            np.add(a, 0.5, sig='i', out=b, casting='unsafe')
+        assert_equal(b, [0, 0, 1])
+        b[:] = 0
+        np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
+        assert_equal(b, [0, 0, 1])
+        b[:] = 0
+        with pytest.warns(DeprecationWarning):
+            np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
+        assert_equal(b, [0, 0, 1])
+        b[:] = 0
+        np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
+        assert_equal(b, [0, 0, 1])
+
+    def test_signature_all_None(self):
+        # signature all None, is an acceptable alternative (since 1.21)
+        # to not providing a signature.
+        res1 = np.add([3], [4], sig=(None, None, None))
+        res2 = np.add([3], [4])
+        assert_array_equal(res1, res2)
+        res1 = np.maximum([3], [4], sig=(None, None, None))
+        res2 = np.maximum([3], [4])
+        assert_array_equal(res1, res2)
+
+        with pytest.raises(TypeError):
+            # special case, that would be deprecated anyway, so errors:
+            np.add(3, 4, signature=(None,))
+
+    def test_signature_dtype_type(self):
+        # Since that will be the normal behaviour (past NumPy 1.21)
+        # we do support the types already:
+        float_dtype = type(np.dtype(np.float64))
+        np.add(3, 4, signature=(float_dtype, float_dtype, None))
+
+    @pytest.mark.parametrize("get_kwarg", [
+            lambda dt: dict(dtype=x),
+            lambda dt: dict(signature=(x, None, None))])
+    def test_signature_dtype_instances_allowed(self, get_kwarg):
+        # We allow certain dtype instances when there is a clear singleton
+        # and the given one is equivalent; mainly for backcompat.
+        int64 = np.dtype("int64")
+        int64_2 = pickle.loads(pickle.dumps(int64))
+        # Relies on pickling behavior, if assert fails just remove test...
+        assert int64 is not int64_2
+
+        assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64
+        td = np.timedelta(2, "s")
+        assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]"
+
+    @pytest.mark.parametrize("get_kwarg", [
+            param(lambda x: dict(dtype=x), id="dtype"),
+            param(lambda x: dict(signature=(x, None, None)), id="signature")])
+    def test_signature_dtype_instances_allowed(self, get_kwarg):
+        msg = "The `dtype` and `signature` arguments to ufuncs"
+
+        with pytest.raises(TypeError, match=msg):
+            np.add(3, 5, **get_kwarg(np.dtype("int64").newbyteorder()))
+        with pytest.raises(TypeError, match=msg):
+            np.add(3, 5, **get_kwarg(np.dtype("m8[ns]")))
+        with pytest.raises(TypeError, match=msg):
+            np.add(3, 5, **get_kwarg("m8[ns]"))
+
+    @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"])
+    def test_partial_signature_mismatch(self, casting):
+        # If the second argument matches already, no need to specify it:
+        res = np.ldexp(np.float32(1.), np.int_(2), dtype="d")
+        assert res.dtype == "d"
+        res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d"))
+        assert res.dtype == "d"
+
+        # ldexp only has a loop for long input as second argument, overriding
+        # the output cannot help with that (no matter the casting)
+        with pytest.raises(TypeError):
+            np.ldexp(1., np.uint64(3), dtype="d")
+        with pytest.raises(TypeError):
+            np.ldexp(1., np.uint64(3), signature=(None, None, "d"))
+
+    def test_partial_signature_mismatch_with_cache(self):
+        with pytest.raises(TypeError):
+            np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
+        # Ensure e,d->None is in the dispatching cache (double loop)
+        np.add(np.float16(1), np.float64(2))
+        # The error must still be raised:
+        with pytest.raises(TypeError):
+            np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
+
+    def test_use_output_signature_for_all_arguments(self):
+        # Test that providing only `dtype=` or `signature=(None, None, dtype)`
+        # is sufficient if falling back to a homogeneous signature works.
+        # In this case, the `intp, intp -> intp` loop is chosen.
+        res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe")
+        assert res == 1  # the cast happens first.
+        res = np.power(1.5, 2.8, signature=(None, None, np.intp),
+                       casting="unsafe")
+        assert res == 1
+        with pytest.raises(TypeError):
+            # the unsafe casting would normally cause errors though:
+            np.power(1.5, 2.8, dtype=np.intp)
+
+    def test_signature_errors(self):
+        with pytest.raises(TypeError,
+                    match="the signature object to ufunc must be a string or"):
+            np.add(3, 4, signature=123.)  # neither a string nor a tuple
+
+        with pytest.raises(ValueError):
+            # bad symbols that do not translate to dtypes
+            np.add(3, 4, signature="%^->#")
+
+        with pytest.raises(ValueError):
+            np.add(3, 4, signature=b"ii-i")  # incomplete and byte string
+
+        with pytest.raises(ValueError):
+            np.add(3, 4, signature="ii>i")  # incomplete string
+
+        with pytest.raises(ValueError):
+            np.add(3, 4, signature=(None, "f8"))  # bad length
+
+        with pytest.raises(UnicodeDecodeError):
+            np.add(3, 4, signature=b"\xff\xff->i")
+
+    def test_forced_dtype_times(self):
+        # Signatures only set the type numbers (not the actual loop dtypes)
+        # so using `M` in a signature/dtype should generally work:
+        a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]')
+        np.maximum(a, a, dtype="M")
+        np.maximum.reduce(a, dtype="M")
+
+        arr = np.arange(10, dtype="m8[s]")
+        np.add(arr, arr, dtype="m")
+        np.maximum(arr, arr, dtype="m")
+
+    @pytest.mark.parametrize("ufunc", [np.add, np.sqrt])
+    def test_cast_safety(self, ufunc):
+        """Basic test for the safest casts, because ufuncs inner loops can
+        indicate a cast-safety as well (which is normally always "no").
+        """
+        def call_ufunc(arr, **kwargs):
+            return ufunc(*(arr,) * ufunc.nin, **kwargs)
+
+        arr = np.array([1., 2., 3.], dtype=np.float32)
+        arr_bs = arr.astype(arr.dtype.newbyteorder())
+        expected = call_ufunc(arr)
+        # Normally, a "no" cast:
+        res = call_ufunc(arr, casting="no")
+        assert_array_equal(expected, res)
+        # Byte-swapping is not allowed with "no" though:
+        with pytest.raises(TypeError):
+            call_ufunc(arr_bs, casting="no")
+
+        # But is allowed with "equiv":
+        res = call_ufunc(arr_bs, casting="equiv")
+        assert_array_equal(expected, res)
+
+        # Casting to float64 is safe, but not equiv:
+        with pytest.raises(TypeError):
+            call_ufunc(arr_bs, dtype=np.float64, casting="equiv")
+
+        # but it is safe cast:
+        res = call_ufunc(arr_bs, dtype=np.float64, casting="safe")
+        expected = call_ufunc(arr.astype(np.float64))  # upcast
+        assert_array_equal(expected, res)
+
+    def test_true_divide(self):
+        a = np.array(10)
+        b = np.array(20)
+        tgt = np.array(0.5)
+
+        for tc in 'bhilqBHILQefdgFDG':
+            dt = np.dtype(tc)
+            aa = a.astype(dt)
+            bb = b.astype(dt)
+
+            # Check result value and dtype.
+            for x, y in itertools.product([aa, -aa], [bb, -bb]):
+
+                # Check with no output type specified
+                if tc in 'FDG':
+                    tgt = complex(x)/complex(y)
+                else:
+                    tgt = float(x)/float(y)
+
+                res = np.true_divide(x, y)
+                rtol = max(np.finfo(res).resolution, 1e-15)
+                assert_allclose(res, tgt, rtol=rtol)
+
+                if tc in 'bhilqBHILQ':
+                    assert_(res.dtype.name == 'float64')
+                else:
+                    assert_(res.dtype.name == dt.name )
+
+                # Check with output type specified.  This also checks for the
+                # incorrect casts in issue gh-3484 because the unary '-' does
+                # not change types, even for unsigned types, Hence casts in the
+                # ufunc from signed to unsigned and vice versa will lead to
+                # errors in the values.
+                for tcout in 'bhilqBHILQ':
+                    dtout = np.dtype(tcout)
+                    assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+
+                for tcout in 'efdg':
+                    dtout = np.dtype(tcout)
+                    if tc in 'FDG':
+                        # Casting complex to float is not allowed
+                        assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+                    else:
+                        tgt = float(x)/float(y)
+                        rtol = max(np.finfo(dtout).resolution, 1e-15)
+                        # The value of tiny for double double is NaN
+                        with suppress_warnings() as sup:
+                            sup.filter(UserWarning)
+                            if not np.isnan(np.finfo(dtout).tiny):
+                                atol = max(np.finfo(dtout).tiny, 3e-308)
+                            else:
+                                atol = 3e-308
+                        # Some test values result in invalid for float16
+                        # and the cast to it may overflow to inf.
+                        with np.errstate(invalid='ignore', over='ignore'):
+                            res = np.true_divide(x, y, dtype=dtout)
+                        if not np.isfinite(res) and tcout == 'e':
+                            continue
+                        assert_allclose(res, tgt, rtol=rtol, atol=atol)
+                        assert_(res.dtype.name == dtout.name)
+
+                for tcout in 'FDG':
+                    dtout = np.dtype(tcout)
+                    tgt = complex(x)/complex(y)
+                    rtol = max(np.finfo(dtout).resolution, 1e-15)
+                    # The value of tiny for double double is NaN
+                    with suppress_warnings() as sup:
+                        sup.filter(UserWarning)
+                        if not np.isnan(np.finfo(dtout).tiny):
+                            atol = max(np.finfo(dtout).tiny, 3e-308)
+                        else:
+                            atol = 3e-308
+                    res = np.true_divide(x, y, dtype=dtout)
+                    if not np.isfinite(res):
+                        continue
+                    assert_allclose(res, tgt, rtol=rtol, atol=atol)
+                    assert_(res.dtype.name == dtout.name)
+
+        # Check booleans
+        a = np.ones((), dtype=np.bool_)
+        res = np.true_divide(a, a)
+        assert_(res == 1.0)
+        assert_(res.dtype.name == 'float64')
+        res = np.true_divide(~a, a)
+        assert_(res == 0.0)
+        assert_(res.dtype.name == 'float64')
+
+    def test_sum_stability(self):
+        a = np.ones(500, dtype=np.float32)
+        assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
+
+        a = np.ones(500, dtype=np.float64)
+        assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_sum(self):
+        for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
+            for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
+                      128, 1024, 1235):
+                # warning if sum overflows, which it does in float16
+                with warnings.catch_warnings(record=True) as w:
+                    warnings.simplefilter("always", RuntimeWarning)
+
+                    tgt = dt(v * (v + 1) / 2)
+                    overflow = not np.isfinite(tgt)
+                    assert_equal(len(w), 1 * overflow)
+
+                    d = np.arange(1, v + 1, dtype=dt)
+
+                    assert_almost_equal(np.sum(d), tgt)
+                    assert_equal(len(w), 2 * overflow)
+
+                    assert_almost_equal(np.sum(d[::-1]), tgt)
+                    assert_equal(len(w), 3 * overflow)
+
+            d = np.ones(500, dtype=dt)
+            assert_almost_equal(np.sum(d[::2]), 250.)
+            assert_almost_equal(np.sum(d[1::2]), 250.)
+            assert_almost_equal(np.sum(d[::3]), 167.)
+            assert_almost_equal(np.sum(d[1::3]), 167.)
+            assert_almost_equal(np.sum(d[::-2]), 250.)
+            assert_almost_equal(np.sum(d[-1::-2]), 250.)
+            assert_almost_equal(np.sum(d[::-3]), 167.)
+            assert_almost_equal(np.sum(d[-1::-3]), 167.)
+            # sum with first reduction entry != 0
+            d = np.ones((1,), dtype=dt)
+            d += d
+            assert_almost_equal(d, 2.)
+
+    def test_sum_complex(self):
+        for dt in (np.complex64, np.complex128, np.clongdouble):
+            for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
+                      128, 1024, 1235):
+                tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
+                d = np.empty(v, dtype=dt)
+                d.real = np.arange(1, v + 1)
+                d.imag = -np.arange(1, v + 1)
+                assert_almost_equal(np.sum(d), tgt)
+                assert_almost_equal(np.sum(d[::-1]), tgt)
+
+            d = np.ones(500, dtype=dt) + 1j
+            assert_almost_equal(np.sum(d[::2]), 250. + 250j)
+            assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
+            assert_almost_equal(np.sum(d[::3]), 167. + 167j)
+            assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
+            assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
+            assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
+            assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
+            assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
+            # sum with first reduction entry != 0
+            d = np.ones((1,), dtype=dt) + 1j
+            d += d
+            assert_almost_equal(d, 2. + 2j)
+
+    def test_sum_initial(self):
+        # Integer, single axis
+        assert_equal(np.sum([3], initial=2), 5)
+
+        # Floating point
+        assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
+
+        # Multiple non-adjacent axes
+        assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
+                     [12, 12, 12])
+
+    def test_sum_where(self):
+        # More extensive tests done in test_reduction_with_where.
+        assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.)
+        assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5.,
+                            where=[True, False]), [9., 5.])
+
+    def test_inner1d(self):
+        a = np.arange(6).reshape((2, 3))
+        assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
+        a = np.arange(6)
+        assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
+
+    def test_broadcast(self):
+        msg = "broadcast"
+        a = np.arange(4).reshape((2, 1, 2))
+        b = np.arange(4).reshape((1, 2, 2))
+        assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+        msg = "extend & broadcast loop dimensions"
+        b = np.arange(4).reshape((2, 2))
+        assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+        # Broadcast in core dimensions should fail
+        a = np.arange(8).reshape((4, 2))
+        b = np.arange(4).reshape((4, 1))
+        assert_raises(ValueError, umt.inner1d, a, b)
+        # Extend core dimensions should fail
+        a = np.arange(8).reshape((4, 2))
+        b = np.array(7)
+        assert_raises(ValueError, umt.inner1d, a, b)
+        # Broadcast should fail
+        a = np.arange(2).reshape((2, 1, 1))
+        b = np.arange(3).reshape((3, 1, 1))
+        assert_raises(ValueError, umt.inner1d, a, b)
+
+        # Writing to a broadcasted array with overlap should warn, gh-2705
+        a = np.arange(2)
+        b = np.arange(4).reshape((2, 2))
+        u, v = np.broadcast_arrays(a, b)
+        assert_equal(u.strides[0], 0)
+        x = u + v
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            u += v
+            assert_equal(len(w), 1)
+            assert_(x[0, 0] != u[0, 0])
+
+        # Output reduction should not be allowed.
+        # See gh-15139
+        a = np.arange(6).reshape(3, 2)
+        b = np.ones(2)
+        out = np.empty(())
+        assert_raises(ValueError, umt.inner1d, a, b, out)
+        out2 = np.empty(3)
+        c = umt.inner1d(a, b, out2)
+        assert_(c is out2)
+
+    def test_out_broadcasts(self):
+        # For ufuncs and gufuncs (not for reductions), we currently allow
+        # the output to cause broadcasting of the input arrays.
+        # both along dimensions with shape 1 and dimensions which do not
+        # exist at all in the inputs.
+        arr = np.arange(3).reshape(1, 3)
+        out = np.empty((5, 4, 3))
+        np.add(arr, arr, out=out)
+        assert (out == np.arange(3) * 2).all()
+
+        # The same holds for gufuncs (gh-16484)
+        umt.inner1d(arr, arr, out=out)
+        # the result would be just a scalar `5`, but is broadcast fully:
+        assert (out == 5).all()
+
+    @pytest.mark.parametrize(["arr", "out"], [
+                ([2], np.empty(())),
+                ([1, 2], np.empty(1)),
+                (np.ones((4, 3)), np.empty((4, 1)))],
+            ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"])
+    def test_out_broadcast_errors(self, arr, out):
+        # Output is (currently) allowed to broadcast inputs, but it cannot be
+        # smaller than the actual result.
+        with pytest.raises(ValueError, match="non-broadcastable"):
+            np.positive(arr, out=out)
+
+        with pytest.raises(ValueError, match="non-broadcastable"):
+            np.add(np.ones(()), arr, out=out)
+
+    def test_type_cast(self):
+        msg = "type cast"
+        a = np.arange(6, dtype='short').reshape((2, 3))
+        assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
+                           err_msg=msg)
+        msg = "type cast on one argument"
+        a = np.arange(6).reshape((2, 3))
+        b = a + 0.1
+        assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1),
+                                  err_msg=msg)
+
+    def test_endian(self):
+        msg = "big endian"
+        a = np.arange(6, dtype='>i4').reshape((2, 3))
+        assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
+                           err_msg=msg)
+        msg = "little endian"
+        a = np.arange(6, dtype='()'
+        inner1d = umt.inner1d
+        a = np.arange(27.).reshape((3, 3, 3))
+        b = np.arange(10., 19.).reshape((3, 1, 3))
+        # basic tests on inputs (outputs tested below with matrix_multiply).
+        c = inner1d(a, b)
+        assert_array_equal(c, (a * b).sum(-1))
+        # default
+        c = inner1d(a, b, axes=[(-1,), (-1,), ()])
+        assert_array_equal(c, (a * b).sum(-1))
+        # integers ok for single axis.
+        c = inner1d(a, b, axes=[-1, -1, ()])
+        assert_array_equal(c, (a * b).sum(-1))
+        # mix fine
+        c = inner1d(a, b, axes=[(-1,), -1, ()])
+        assert_array_equal(c, (a * b).sum(-1))
+        # can omit last axis.
+        c = inner1d(a, b, axes=[-1, -1])
+        assert_array_equal(c, (a * b).sum(-1))
+        # can pass in other types of integer (with __index__ protocol)
+        c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
+        assert_array_equal(c, (a * b).sum(-1))
+        # swap some axes
+        c = inner1d(a, b, axes=[0, 0])
+        assert_array_equal(c, (a * b).sum(0))
+        c = inner1d(a, b, axes=[0, 2])
+        assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+        # Check errors for improperly constructed axes arguments.
+        # should have list.
+        assert_raises(TypeError, inner1d, a, b, axes=-1)
+        # needs enough elements
+        assert_raises(ValueError, inner1d, a, b, axes=[-1])
+        # should pass in indices.
+        assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
+        assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
+        assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
+        # cannot pass an index unless there is only one dimension
+        # (output is wrong in this case)
+        assert_raises(np.AxisError, inner1d, a, b, axes=[-1, -1, -1])
+        # or pass in generally the wrong number of axes
+        assert_raises(np.AxisError, inner1d, a, b, axes=[-1, -1, (-1,)])
+        assert_raises(np.AxisError, inner1d, a, b, axes=[-1, (-2, -1), ()])
+        # axes need to have same length.
+        assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
+
+        # matrix_multiply signature: '(m,n),(n,p)->(m,p)'
+        mm = umt.matrix_multiply
+        a = np.arange(12).reshape((2, 3, 2))
+        b = np.arange(8).reshape((2, 2, 2, 1)) + 1
+        # Sanity check.
+        c = mm(a, b)
+        assert_array_equal(c, np.matmul(a, b))
+        # Default axes.
+        c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
+        assert_array_equal(c, np.matmul(a, b))
+        # Default with explicit axes.
+        c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
+        assert_array_equal(c, np.matmul(a, b))
+        # swap some axes.
+        c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
+        assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
+                                        b.transpose(0, 3, 1, 2)))
+        # Default with output array.
+        c = np.empty((2, 2, 3, 1))
+        d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
+        assert_(c is d)
+        assert_array_equal(c, np.matmul(a, b))
+        # Transposed output array
+        c = np.empty((1, 2, 2, 3))
+        d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
+        assert_(c is d)
+        assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
+        # Check errors for improperly constructed axes arguments.
+        # wrong argument
+        assert_raises(TypeError, mm, a, b, axis=1)
+        # axes should be list
+        assert_raises(TypeError, mm, a, b, axes=1)
+        assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
+        # list needs to have right length
+        assert_raises(ValueError, mm, a, b, axes=[])
+        assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
+        # list should not contain None, or lists
+        assert_raises(TypeError, mm, a, b, axes=[None, None, None])
+        assert_raises(TypeError,
+                      mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
+        assert_raises(TypeError,
+                      mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
+        assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
+        # single integers are AxisErrors if more are required
+        assert_raises(np.AxisError, mm, a, b, axes=[-1, -1, -1])
+        assert_raises(np.AxisError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
+        # tuples should not have duplicated values
+        assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
+        # arrays should have enough axes.
+        z = np.zeros((2, 2))
+        assert_raises(ValueError, mm, z, z[0])
+        assert_raises(ValueError, mm, z, z, out=z[:, 0])
+        assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
+        assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
+        # Regular ufuncs should not accept axes.
+        assert_raises(TypeError, np.add, 1., 1., axes=[0])
+        # should be able to deal with bad unrelated kwargs.
+        assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
+
+    def test_axis_argument(self):
+        # inner1d signature: '(i),(i)->()'
+        inner1d = umt.inner1d
+        a = np.arange(27.).reshape((3, 3, 3))
+        b = np.arange(10., 19.).reshape((3, 1, 3))
+        c = inner1d(a, b)
+        assert_array_equal(c, (a * b).sum(-1))
+        c = inner1d(a, b, axis=-1)
+        assert_array_equal(c, (a * b).sum(-1))
+        out = np.zeros_like(c)
+        d = inner1d(a, b, axis=-1, out=out)
+        assert_(d is out)
+        assert_array_equal(d, c)
+        c = inner1d(a, b, axis=0)
+        assert_array_equal(c, (a * b).sum(0))
+        # Sanity checks on innerwt and cumsum.
+        a = np.arange(6).reshape((2, 3))
+        b = np.arange(10, 16).reshape((2, 3))
+        w = np.arange(20, 26).reshape((2, 3))
+        assert_array_equal(umt.innerwt(a, b, w, axis=0),
+                           np.sum(a * b * w, axis=0))
+        assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
+        assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
+        out = np.empty_like(a)
+        b = umt.cumsum(a, out=out, axis=0)
+        assert_(out is b)
+        assert_array_equal(b, np.cumsum(a, axis=0))
+        b = umt.cumsum(a, out=out, axis=1)
+        assert_(out is b)
+        assert_array_equal(b, np.cumsum(a, axis=-1))
+        # Check errors.
+        # Cannot pass in both axis and axes.
+        assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
+        # Not an integer.
+        assert_raises(TypeError, inner1d, a, b, axis=[0])
+        # more than 1 core dimensions.
+        mm = umt.matrix_multiply
+        assert_raises(TypeError, mm, a, b, axis=1)
+        # Output wrong size in axis.
+        out = np.empty((1, 2, 3), dtype=a.dtype)
+        assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
+        # Regular ufuncs should not accept axis.
+        assert_raises(TypeError, np.add, 1., 1., axis=0)
+
+    def test_keepdims_argument(self):
+        # inner1d signature: '(i),(i)->()'
+        inner1d = umt.inner1d
+        a = np.arange(27.).reshape((3, 3, 3))
+        b = np.arange(10., 19.).reshape((3, 1, 3))
+        c = inner1d(a, b)
+        assert_array_equal(c, (a * b).sum(-1))
+        c = inner1d(a, b, keepdims=False)
+        assert_array_equal(c, (a * b).sum(-1))
+        c = inner1d(a, b, keepdims=True)
+        assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+        out = np.zeros_like(c)
+        d = inner1d(a, b, keepdims=True, out=out)
+        assert_(d is out)
+        assert_array_equal(d, c)
+        # Now combined with axis and axes.
+        c = inner1d(a, b, axis=-1, keepdims=False)
+        assert_array_equal(c, (a * b).sum(-1, keepdims=False))
+        c = inner1d(a, b, axis=-1, keepdims=True)
+        assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+        c = inner1d(a, b, axis=0, keepdims=False)
+        assert_array_equal(c, (a * b).sum(0, keepdims=False))
+        c = inner1d(a, b, axis=0, keepdims=True)
+        assert_array_equal(c, (a * b).sum(0, keepdims=True))
+        c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
+        assert_array_equal(c, (a * b).sum(-1))
+        c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
+        assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+        c = inner1d(a, b, axes=[0, 0], keepdims=False)
+        assert_array_equal(c, (a * b).sum(0))
+        c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
+        assert_array_equal(c, (a * b).sum(0, keepdims=True))
+        c = inner1d(a, b, axes=[0, 2], keepdims=False)
+        assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+        c = inner1d(a, b, axes=[0, 2], keepdims=True)
+        assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+                                                             keepdims=True))
+        c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
+        assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+                                                             keepdims=True))
+        c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
+        assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
+        # Hardly useful, but should work.
+        c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
+        assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
+                           .sum(1, keepdims=True))
+        # Check with two core dimensions.
+        a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+        expected = uml.det(a)
+        c = uml.det(a, keepdims=False)
+        assert_array_equal(c, expected)
+        c = uml.det(a, keepdims=True)
+        assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
+        a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+        expected_s, expected_l = uml.slogdet(a)
+        cs, cl = uml.slogdet(a, keepdims=False)
+        assert_array_equal(cs, expected_s)
+        assert_array_equal(cl, expected_l)
+        cs, cl = uml.slogdet(a, keepdims=True)
+        assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
+        assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
+        # Sanity check on innerwt.
+        a = np.arange(6).reshape((2, 3))
+        b = np.arange(10, 16).reshape((2, 3))
+        w = np.arange(20, 26).reshape((2, 3))
+        assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
+                           np.sum(a * b * w, axis=-1, keepdims=True))
+        assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
+                           np.sum(a * b * w, axis=0, keepdims=True))
+        # Check errors.
+        # Not a boolean
+        assert_raises(TypeError, inner1d, a, b, keepdims='true')
+        # More than 1 core dimension, and core output dimensions.
+        mm = umt.matrix_multiply
+        assert_raises(TypeError, mm, a, b, keepdims=True)
+        assert_raises(TypeError, mm, a, b, keepdims=False)
+        # Regular ufuncs should not accept keepdims.
+        assert_raises(TypeError, np.add, 1., 1., keepdims=False)
+
+    def test_innerwt(self):
+        a = np.arange(6).reshape((2, 3))
+        b = np.arange(10, 16).reshape((2, 3))
+        w = np.arange(20, 26).reshape((2, 3))
+        assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+        a = np.arange(100, 124).reshape((2, 3, 4))
+        b = np.arange(200, 224).reshape((2, 3, 4))
+        w = np.arange(300, 324).reshape((2, 3, 4))
+        assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+
+    def test_innerwt_empty(self):
+        """Test generalized ufunc with zero-sized operands"""
+        a = np.array([], dtype='f8')
+        b = np.array([], dtype='f8')
+        w = np.array([], dtype='f8')
+        assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+
+    def test_cross1d(self):
+        """Test with fixed-sized signature."""
+        a = np.eye(3)
+        assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
+        out = np.zeros((3, 3))
+        result = umt.cross1d(a[0], a, out)
+        assert_(result is out)
+        assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
+        assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
+        assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
+        # Wrong output core dimension.
+        assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
+        # Wrong output broadcast dimension (see gh-15139).
+        assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3))
+
+    def test_can_ignore_signature(self):
+        # Comparing the effects of ? in signature:
+        # matrix_multiply: (m,n),(n,p)->(m,p)    # all must be there.
+        # matmul:        (m?,n),(n,p?)->(m?,p?)  # allow missing m, p.
+        mat = np.arange(12).reshape((2, 3, 2))
+        single_vec = np.arange(2)
+        col_vec = single_vec[:, np.newaxis]
+        col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
+        # matrix @ single column vector with proper dimension
+        mm_col_vec = umt.matrix_multiply(mat, col_vec)
+        # matmul does the same thing
+        matmul_col_vec = umt.matmul(mat, col_vec)
+        assert_array_equal(matmul_col_vec, mm_col_vec)
+        # matrix @ vector without dimension making it a column vector.
+        # matrix multiply fails -> missing core dim.
+        assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
+        # matmul mimicker passes, and returns a vector.
+        matmul_col = umt.matmul(mat, single_vec)
+        assert_array_equal(matmul_col, mm_col_vec.squeeze())
+        # Now with a column array: same as for column vector,
+        # broadcasting sensibly.
+        mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
+        matmul_col_vec = umt.matmul(mat, col_vec_array)
+        assert_array_equal(matmul_col_vec, mm_col_vec)
+        # As above, but for row vector
+        single_vec = np.arange(3)
+        row_vec = single_vec[np.newaxis, :]
+        row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
+        # row vector @ matrix
+        mm_row_vec = umt.matrix_multiply(row_vec, mat)
+        matmul_row_vec = umt.matmul(row_vec, mat)
+        assert_array_equal(matmul_row_vec, mm_row_vec)
+        # single row vector @ matrix
+        assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
+        matmul_row = umt.matmul(single_vec, mat)
+        assert_array_equal(matmul_row, mm_row_vec.squeeze())
+        # row vector array @ matrix
+        mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
+        matmul_row_vec = umt.matmul(row_vec_array, mat)
+        assert_array_equal(matmul_row_vec, mm_row_vec)
+        # Now for vector combinations
+        # row vector @ column vector
+        col_vec = row_vec.T
+        col_vec_array = row_vec_array.swapaxes(-2, -1)
+        mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
+        matmul_row_col_vec = umt.matmul(row_vec, col_vec)
+        assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
+        # single row vector @ single col vector
+        assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
+        matmul_row_col = umt.matmul(single_vec, single_vec)
+        assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
+        # row vector array @ matrix
+        mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
+        matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
+        assert_array_equal(matmul_row_col_array, mm_row_col_array)
+        # Finally, check that things are *not* squeezed if one gives an
+        # output.
+        out = np.zeros_like(mm_row_col_array)
+        out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
+        assert_array_equal(out, mm_row_col_array)
+        out[:] = 0
+        out = umt.matmul(row_vec_array, col_vec_array, out=out)
+        assert_array_equal(out, mm_row_col_array)
+        # And check one cannot put missing dimensions back.
+        out = np.zeros_like(mm_row_col_vec)
+        assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
+                      out)
+        # But fine for matmul, since it is just a broadcast.
+        out = umt.matmul(single_vec, single_vec, out)
+        assert_array_equal(out, mm_row_col_vec.squeeze())
+
+    def test_matrix_multiply(self):
+        self.compare_matrix_multiply_results(np.int64)
+        self.compare_matrix_multiply_results(np.double)
+
+    def test_matrix_multiply_umath_empty(self):
+        res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
+        assert_array_equal(res, np.zeros((0, 0)))
+        res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
+        assert_array_equal(res, np.zeros((10, 10)))
+
+    def compare_matrix_multiply_results(self, tp):
+        d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
+        d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
+        msg = "matrix multiply on type %s" % d1.dtype.name
+
+        def permute_n(n):
+            if n == 1:
+                return ([0],)
+            ret = ()
+            base = permute_n(n-1)
+            for perm in base:
+                for i in range(n):
+                    new = perm + [n-1]
+                    new[n-1] = new[i]
+                    new[i] = n-1
+                    ret += (new,)
+            return ret
+
+        def slice_n(n):
+            if n == 0:
+                return ((),)
+            ret = ()
+            base = slice_n(n-1)
+            for sl in base:
+                ret += (sl+(slice(None),),)
+                ret += (sl+(slice(0, 1),),)
+            return ret
+
+        def broadcastable(s1, s2):
+            return s1 == s2 or s1 == 1 or s2 == 1
+
+        permute_3 = permute_n(3)
+        slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
+
+        ref = True
+        for p1 in permute_3:
+            for p2 in permute_3:
+                for s1 in slice_3:
+                    for s2 in slice_3:
+                        a1 = d1.transpose(p1)[s1]
+                        a2 = d2.transpose(p2)[s2]
+                        ref = ref and a1.base is not None
+                        ref = ref and a2.base is not None
+                        if (a1.shape[-1] == a2.shape[-2] and
+                                broadcastable(a1.shape[0], a2.shape[0])):
+                            assert_array_almost_equal(
+                                umt.matrix_multiply(a1, a2),
+                                np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
+                                       a1[..., np.newaxis,:], axis=-1),
+                                err_msg=msg + ' %s %s' % (str(a1.shape),
+                                                          str(a2.shape)))
+
+        assert_equal(ref, True, err_msg="reference check")
+
+    def test_euclidean_pdist(self):
+        a = np.arange(12, dtype=float).reshape(4, 3)
+        out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
+        umt.euclidean_pdist(a, out)
+        b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
+        b = b[~np.tri(a.shape[0], dtype=bool)]
+        assert_almost_equal(out, b)
+        # An output array is required to determine p with signature (n,d)->(p)
+        assert_raises(ValueError, umt.euclidean_pdist, a)
+
+    def test_cumsum(self):
+        a = np.arange(10)
+        result = umt.cumsum(a)
+        assert_array_equal(result, a.cumsum())
+
+    def test_object_logical(self):
+        a = np.array([3, None, True, False, "test", ""], dtype=object)
+        assert_equal(np.logical_or(a, None),
+                        np.array([x or None for x in a], dtype=object))
+        assert_equal(np.logical_or(a, True),
+                        np.array([x or True for x in a], dtype=object))
+        assert_equal(np.logical_or(a, 12),
+                        np.array([x or 12 for x in a], dtype=object))
+        assert_equal(np.logical_or(a, "blah"),
+                        np.array([x or "blah" for x in a], dtype=object))
+
+        assert_equal(np.logical_and(a, None),
+                        np.array([x and None for x in a], dtype=object))
+        assert_equal(np.logical_and(a, True),
+                        np.array([x and True for x in a], dtype=object))
+        assert_equal(np.logical_and(a, 12),
+                        np.array([x and 12 for x in a], dtype=object))
+        assert_equal(np.logical_and(a, "blah"),
+                        np.array([x and "blah" for x in a], dtype=object))
+
+        assert_equal(np.logical_not(a),
+                        np.array([not x for x in a], dtype=object))
+
+        assert_equal(np.logical_or.reduce(a), 3)
+        assert_equal(np.logical_and.reduce(a), None)
+
+    def test_object_comparison(self):
+        class HasComparisons:
+            def __eq__(self, other):
+                return '=='
+
+        arr0d = np.array(HasComparisons())
+        assert_equal(arr0d == arr0d, True)
+        assert_equal(np.equal(arr0d, arr0d), True)  # normal behavior is a cast
+
+        arr1d = np.array([HasComparisons()])
+        assert_equal(arr1d == arr1d, np.array([True]))
+        assert_equal(np.equal(arr1d, arr1d), np.array([True]))  # normal behavior is a cast
+        assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
+
+    def test_object_array_reduction(self):
+        # Reductions on object arrays
+        a = np.array(['a', 'b', 'c'], dtype=object)
+        assert_equal(np.sum(a), 'abc')
+        assert_equal(np.max(a), 'c')
+        assert_equal(np.min(a), 'a')
+        a = np.array([True, False, True], dtype=object)
+        assert_equal(np.sum(a), 2)
+        assert_equal(np.prod(a), 0)
+        assert_equal(np.any(a), True)
+        assert_equal(np.all(a), False)
+        assert_equal(np.max(a), True)
+        assert_equal(np.min(a), False)
+        assert_equal(np.array([[1]], dtype=object).sum(), 1)
+        assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
+        assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
+        assert_equal(np.array([[1], [2, 3]], dtype=object)
+                     .sum(initial=[0], where=[False, True]), [0, 2, 3])
+
+    def test_object_array_accumulate_inplace(self):
+        # Checks that in-place accumulates work, see also gh-7402
+        arr = np.ones(4, dtype=object)
+        arr[:] = [[1] for i in range(4)]
+        # Twice reproduced also for tuples:
+        np.add.accumulate(arr, out=arr)
+        np.add.accumulate(arr, out=arr)
+        assert_array_equal(arr,
+                           np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object),
+                          )
+
+        # And the same if the axis argument is used
+        arr = np.ones((2, 4), dtype=object)
+        arr[0, :] = [[2] for i in range(4)]
+        np.add.accumulate(arr, out=arr, axis=-1)
+        np.add.accumulate(arr, out=arr, axis=-1)
+        assert_array_equal(arr[0, :],
+                           np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object),
+                          )
+
+    def test_object_array_accumulate_failure(self):
+        # Typical accumulation on object works as expected:
+        res = np.add.accumulate(np.array([1, 0, 2], dtype=object))
+        assert_array_equal(res, np.array([1, 1, 3], dtype=object))
+        # But errors are propagated from the inner-loop if they occur:
+        with pytest.raises(TypeError):
+            np.add.accumulate([1, None, 2])
+
+    def test_object_array_reduceat_inplace(self):
+        # Checks that in-place reduceats work, see also gh-7465
+        arr = np.empty(4, dtype=object)
+        arr[:] = [[1] for i in range(4)]
+        out = np.empty(4, dtype=object)
+        out[:] = [[1] for i in range(4)]
+        np.add.reduceat(arr, np.arange(4), out=arr)
+        np.add.reduceat(arr, np.arange(4), out=arr)
+        assert_array_equal(arr, out)
+
+        # And the same if the axis argument is used
+        arr = np.ones((2, 4), dtype=object)
+        arr[0, :] = [[2] for i in range(4)]
+        out = np.ones((2, 4), dtype=object)
+        out[0, :] = [[2] for i in range(4)]
+        np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
+        np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
+        assert_array_equal(arr, out)
+
+    def test_object_array_reduceat_failure(self):
+        # Reduceat works as expected when no invalid operation occurs (None is
+        # not involved in an operation here)
+        res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2])
+        assert_array_equal(res, np.array([None, 2], dtype=object))
+        # But errors when None would be involved in an operation:
+        with pytest.raises(TypeError):
+            np.add.reduceat([1, None, 2], [0, 2])
+
+    def test_zerosize_reduction(self):
+        # Test with default dtype and object dtype
+        for a in [[], np.array([], dtype=object)]:
+            assert_equal(np.sum(a), 0)
+            assert_equal(np.prod(a), 1)
+            assert_equal(np.any(a), False)
+            assert_equal(np.all(a), True)
+            assert_raises(ValueError, np.max, a)
+            assert_raises(ValueError, np.min, a)
+
+    def test_axis_out_of_bounds(self):
+        a = np.array([False, False])
+        assert_raises(np.AxisError, a.all, axis=1)
+        a = np.array([False, False])
+        assert_raises(np.AxisError, a.all, axis=-2)
+
+        a = np.array([False, False])
+        assert_raises(np.AxisError, a.any, axis=1)
+        a = np.array([False, False])
+        assert_raises(np.AxisError, a.any, axis=-2)
+
+    def test_scalar_reduction(self):
+        # The functions 'sum', 'prod', etc allow specifying axis=0
+        # even for scalars
+        assert_equal(np.sum(3, axis=0), 3)
+        assert_equal(np.prod(3.5, axis=0), 3.5)
+        assert_equal(np.any(True, axis=0), True)
+        assert_equal(np.all(False, axis=0), False)
+        assert_equal(np.max(3, axis=0), 3)
+        assert_equal(np.min(2.5, axis=0), 2.5)
+
+        # Check scalar behaviour for ufuncs without an identity
+        assert_equal(np.power.reduce(3), 3)
+
+        # Make sure that scalars are coming out from this operation
+        assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
+        assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
+        assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
+        assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
+
+        # check if scalars/0-d arrays get cast
+        assert_(type(np.any(0, axis=0)) is np.bool_)
+
+        # assert that 0-d arrays get wrapped
+        class MyArray(np.ndarray):
+            pass
+        a = np.array(1).view(MyArray)
+        assert_(type(np.any(a)) is MyArray)
+
+    def test_casting_out_param(self):
+        # Test that it's possible to do casts on output
+        a = np.ones((200, 100), np.int64)
+        b = np.ones((200, 100), np.int64)
+        c = np.ones((200, 100), np.float64)
+        np.add(a, b, out=c)
+        assert_equal(c, 2)
+
+        a = np.zeros(65536)
+        b = np.zeros(65536, dtype=np.float32)
+        np.subtract(a, 0, out=b)
+        assert_equal(b, 0)
+
+    def test_where_param(self):
+        # Test that the where= ufunc parameter works with regular arrays
+        a = np.arange(7)
+        b = np.ones(7)
+        c = np.zeros(7)
+        np.add(a, b, out=c, where=(a % 2 == 1))
+        assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
+
+        a = np.arange(4).reshape(2, 2) + 2
+        np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
+        assert_equal(a, [[2, 27], [16, 5]])
+        # Broadcasting the where= parameter
+        np.subtract(a, 2, out=a, where=[True, False])
+        assert_equal(a, [[0, 27], [14, 5]])
+
+    def test_where_param_buffer_output(self):
+        # This test is temporarily skipped because it requires
+        # adding masking features to the nditer to work properly
+
+        # With casting on output
+        a = np.ones(10, np.int64)
+        b = np.ones(10, np.int64)
+        c = 1.5 * np.ones(10, np.float64)
+        np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
+        assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
+
+    def test_where_param_alloc(self):
+        # With casting and allocated output
+        a = np.array([1], dtype=np.int64)
+        m = np.array([True], dtype=bool)
+        assert_equal(np.sqrt(a, where=m), [1])
+
+        # No casting and allocated output
+        a = np.array([1], dtype=np.float64)
+        m = np.array([True], dtype=bool)
+        assert_equal(np.sqrt(a, where=m), [1])
+
+    def test_where_with_broadcasting(self):
+        # See gh-17198
+        a = np.random.random((5000, 4))
+        b = np.random.random((5000, 1))
+
+        where = a > 0.3
+        out = np.full_like(a, 0)
+        np.less(a, b, where=where, out=out)
+        b_where = np.broadcast_to(b, a.shape)[where]
+        assert_array_equal((a[where] < b_where), out[where].astype(bool))
+        assert not out[~where].any()  # outside mask, out remains all 0
+
+    def check_identityless_reduction(self, a):
+        # np.minimum.reduce is an identityless reduction
+
+        # Verify that it sees the zero at various positions
+        a[...] = 1
+        a[1, 0, 0] = 0
+        assert_equal(np.minimum.reduce(a, axis=None), 0)
+        assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
+        assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
+        assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
+        assert_equal(np.minimum.reduce(a, axis=0),
+                                    [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=1),
+                                    [[1, 1, 1, 1], [0, 1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=2),
+                                    [[1, 1, 1], [0, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=()), a)
+
+        a[...] = 1
+        a[0, 1, 0] = 0
+        assert_equal(np.minimum.reduce(a, axis=None), 0)
+        assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
+        assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
+        assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
+        assert_equal(np.minimum.reduce(a, axis=0),
+                                    [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=1),
+                                    [[0, 1, 1, 1], [1, 1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=2),
+                                    [[1, 0, 1], [1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=()), a)
+
+        a[...] = 1
+        a[0, 0, 1] = 0
+        assert_equal(np.minimum.reduce(a, axis=None), 0)
+        assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
+        assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
+        assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
+        assert_equal(np.minimum.reduce(a, axis=0),
+                                    [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=1),
+                                    [[1, 0, 1, 1], [1, 1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=2),
+                                    [[0, 1, 1], [1, 1, 1]])
+        assert_equal(np.minimum.reduce(a, axis=()), a)
+
+    @requires_memory(6 * 1024**3)
+    @pytest.mark.skipif(sys.maxsize < 2**32,
+            reason="test array too large for 32bit platform")
+    def test_identityless_reduction_huge_array(self):
+        # Regression test for gh-20921 (copying identity incorrectly failed)
+        arr = np.zeros((2, 2**31), 'uint8')
+        arr[:, 0] = [1, 3]
+        arr[:, -1] = [4, 1]
+        res = np.maximum.reduce(arr, axis=0)
+        del arr
+        assert res[0] == 3
+        assert res[-1] == 4
+
+    def test_identityless_reduction_corder(self):
+        a = np.empty((2, 3, 4), order='C')
+        self.check_identityless_reduction(a)
+
+    def test_identityless_reduction_forder(self):
+        a = np.empty((2, 3, 4), order='F')
+        self.check_identityless_reduction(a)
+
+    def test_identityless_reduction_otherorder(self):
+        a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
+        self.check_identityless_reduction(a)
+
+    def test_identityless_reduction_noncontig(self):
+        a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
+        a = a[1:, 1:, 1:]
+        self.check_identityless_reduction(a)
+
+    def test_identityless_reduction_noncontig_unaligned(self):
+        a = np.empty((3*4*5*8 + 1,), dtype='i1')
+        a = a[1:].view(dtype='f8')
+        a.shape = (3, 4, 5)
+        a = a[1:, 1:, 1:]
+        self.check_identityless_reduction(a)
+
+    def test_reduce_identity_depends_on_loop(self):
+        """
+        The type of the result should always depend on the selected loop, not
+        necessarily the output (only relevant for object arrays).
+        """
+        # For an object loop, the default value 0 with type int is used:
+        assert type(np.add.reduce([], dtype=object)) is int
+        out = np.array(None, dtype=object)
+        # When the loop is float64 but `out` is object this does not happen,
+        # the result is float64 cast to object (which gives Python `float`).
+        np.add.reduce([], out=out, dtype=np.float64)
+        assert type(out[()]) is float
+
+    def test_initial_reduction(self):
+        # np.minimum.reduce is an identityless reduction
+
+        # For cases like np.maximum(np.abs(...), initial=0)
+        # More generally, a supremum over non-negative numbers.
+        assert_equal(np.maximum.reduce([], initial=0), 0)
+
+        # For cases like reduction of an empty array over the reals.
+        assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
+        assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
+
+        # Random tests
+        assert_equal(np.minimum.reduce([5], initial=4), 4)
+        assert_equal(np.maximum.reduce([4], initial=5), 5)
+        assert_equal(np.maximum.reduce([5], initial=4), 5)
+        assert_equal(np.minimum.reduce([4], initial=5), 4)
+
+        # Check initial=None raises ValueError for both types of ufunc reductions
+        assert_raises(ValueError, np.minimum.reduce, [], initial=None)
+        assert_raises(ValueError, np.add.reduce, [], initial=None)
+        # Also in the somewhat special object case:
+        with pytest.raises(ValueError):
+            np.add.reduce([], initial=None, dtype=object)
+
+        # Check that np._NoValue gives default behavior.
+        assert_equal(np.add.reduce([], initial=np._NoValue), 0)
+
+        # Check that initial kwarg behaves as intended for dtype=object
+        a = np.array([10], dtype=object)
+        res = np.add.reduce(a, initial=5)
+        assert_equal(res, 15)
+
+    def test_empty_reduction_and_idenity(self):
+        arr = np.zeros((0, 5))
+        # OK, since the reduction itself is *not* empty, the result is
+        assert np.true_divide.reduce(arr, axis=1).shape == (0,)
+        # Not OK, the reduction itself is empty and we have no idenity
+        with pytest.raises(ValueError):
+            np.true_divide.reduce(arr, axis=0)
+
+        # Test that an empty reduction fails also if the result is empty
+        arr = np.zeros((0, 0, 5))
+        with pytest.raises(ValueError):
+            np.true_divide.reduce(arr, axis=1)
+
+        # Division reduction makes sense with `initial=1` (empty or not):
+        res = np.true_divide.reduce(arr, axis=1, initial=1)
+        assert_array_equal(res, np.ones((0, 5)))
+
+    @pytest.mark.parametrize('axis', (0, 1, None))
+    @pytest.mark.parametrize('where', (np.array([False, True, True]),
+                                       np.array([[True], [False], [True]]),
+                                       np.array([[True, False, False],
+                                                 [False, True, False],
+                                                 [False, True, True]])))
+    def test_reduction_with_where(self, axis, where):
+        a = np.arange(9.).reshape(3, 3)
+        a_copy = a.copy()
+        a_check = np.zeros_like(a)
+        np.positive(a, out=a_check, where=where)
+
+        res = np.add.reduce(a, axis=axis, where=where)
+        check = a_check.sum(axis)
+        assert_equal(res, check)
+        # Check we do not overwrite elements of a internally.
+        assert_array_equal(a, a_copy)
+
+    @pytest.mark.parametrize(('axis', 'where'),
+                             ((0, np.array([True, False, True])),
+                              (1, [True, True, False]),
+                              (None, True)))
+    @pytest.mark.parametrize('initial', (-np.inf, 5.))
+    def test_reduction_with_where_and_initial(self, axis, where, initial):
+        a = np.arange(9.).reshape(3, 3)
+        a_copy = a.copy()
+        a_check = np.full(a.shape, -np.inf)
+        np.positive(a, out=a_check, where=where)
+
+        res = np.maximum.reduce(a, axis=axis, where=where, initial=initial)
+        check = a_check.max(axis, initial=initial)
+        assert_equal(res, check)
+
+    def test_reduction_where_initial_needed(self):
+        a = np.arange(9.).reshape(3, 3)
+        m = [False, True, False]
+        assert_raises(ValueError, np.maximum.reduce, a, where=m)
+
+    def test_identityless_reduction_nonreorderable(self):
+        a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
+
+        res = np.divide.reduce(a, axis=0)
+        assert_equal(res, [8.0, 4.0, 8.0])
+
+        res = np.divide.reduce(a, axis=1)
+        assert_equal(res, [2.0, 8.0])
+
+        res = np.divide.reduce(a, axis=())
+        assert_equal(res, a)
+
+        assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
+
+    def test_reduce_zero_axis(self):
+        # If we have a n x m array and do a reduction with axis=1, then we are
+        # doing n reductions, and each reduction takes an m-element array. For
+        # a reduction operation without an identity, then:
+        #   n > 0, m > 0: fine
+        #   n = 0, m > 0: fine, doing 0 reductions of m-element arrays
+        #   n > 0, m = 0: can't reduce a 0-element array, ValueError
+        #   n = 0, m = 0: can't reduce a 0-element array, ValueError (for
+        #     consistency with the above case)
+        # This test doesn't actually look at return values, it just checks to
+        # make sure that error we get an error in exactly those cases where we
+        # expect one, and assumes the calculations themselves are done
+        # correctly.
+
+        def ok(f, *args, **kwargs):
+            f(*args, **kwargs)
+
+        def err(f, *args, **kwargs):
+            assert_raises(ValueError, f, *args, **kwargs)
+
+        def t(expect, func, n, m):
+            expect(func, np.zeros((n, m)), axis=1)
+            expect(func, np.zeros((m, n)), axis=0)
+            expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
+            expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
+            expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
+            expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
+            expect(func, np.zeros((m // 3, m // 3, m // 3,
+                                  n // 2, n // 2)),
+                                 axis=(0, 1, 2))
+            # Check what happens if the inner (resp. outer) dimensions are a
+            # mix of zero and non-zero:
+            expect(func, np.zeros((10, m, n)), axis=(0, 1))
+            expect(func, np.zeros((10, n, m)), axis=(0, 2))
+            expect(func, np.zeros((m, 10, n)), axis=0)
+            expect(func, np.zeros((10, m, n)), axis=1)
+            expect(func, np.zeros((10, n, m)), axis=2)
+
+        # np.maximum is just an arbitrary ufunc with no reduction identity
+        assert_equal(np.maximum.identity, None)
+        t(ok, np.maximum.reduce, 30, 30)
+        t(ok, np.maximum.reduce, 0, 30)
+        t(err, np.maximum.reduce, 30, 0)
+        t(err, np.maximum.reduce, 0, 0)
+        err(np.maximum.reduce, [])
+        np.maximum.reduce(np.zeros((0, 0)), axis=())
+
+        # all of the combinations are fine for a reduction that has an
+        # identity
+        t(ok, np.add.reduce, 30, 30)
+        t(ok, np.add.reduce, 0, 30)
+        t(ok, np.add.reduce, 30, 0)
+        t(ok, np.add.reduce, 0, 0)
+        np.add.reduce([])
+        np.add.reduce(np.zeros((0, 0)), axis=())
+
+        # OTOH, accumulate always makes sense for any combination of n and m,
+        # because it maps an m-element array to an m-element array. These
+        # tests are simpler because accumulate doesn't accept multiple axes.
+        for uf in (np.maximum, np.add):
+            uf.accumulate(np.zeros((30, 0)), axis=0)
+            uf.accumulate(np.zeros((0, 30)), axis=0)
+            uf.accumulate(np.zeros((30, 30)), axis=0)
+            uf.accumulate(np.zeros((0, 0)), axis=0)
+
+    def test_safe_casting(self):
+        # In old versions of numpy, in-place operations used the 'unsafe'
+        # casting rules. In versions >= 1.10, 'same_kind' is the
+        # default and an exception is raised instead of a warning.
+        # when 'same_kind' is not satisfied.
+        a = np.array([1, 2, 3], dtype=int)
+        # Non-in-place addition is fine
+        assert_array_equal(assert_no_warnings(np.add, a, 1.1),
+                           [2.1, 3.1, 4.1])
+        assert_raises(TypeError, np.add, a, 1.1, out=a)
+
+        def add_inplace(a, b):
+            a += b
+
+        assert_raises(TypeError, add_inplace, a, 1.1)
+        # Make sure that explicitly overriding the exception is allowed:
+        assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
+        assert_array_equal(a, [2, 3, 4])
+
+    def test_ufunc_custom_out(self):
+        # Test ufunc with built in input types and custom output type
+
+        a = np.array([0, 1, 2], dtype='i8')
+        b = np.array([0, 1, 2], dtype='i8')
+        c = np.empty(3, dtype=_rational_tests.rational)
+
+        # Output must be specified so numpy knows what
+        # ufunc signature to look for
+        result = _rational_tests.test_add(a, b, c)
+        target = np.array([0, 2, 4], dtype=_rational_tests.rational)
+        assert_equal(result, target)
+
+        # The new resolution means that we can (usually) find custom loops
+        # as long as they match exactly:
+        result = _rational_tests.test_add(a, b)
+        assert_equal(result, target)
+
+        # This works even more generally, so long the default common-dtype
+        # promoter works out:
+        result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
+        assert_equal(result, target)
+
+        # But, it can be fooled, e.g. (use scalars, which forces legacy
+        # type resolution to kick in, which then fails):
+        with assert_raises(TypeError):
+            _rational_tests.test_add(a, np.uint16(2))
+
+    def test_operand_flags(self):
+        a = np.arange(16, dtype='l').reshape(4, 4)
+        b = np.arange(9, dtype='l').reshape(3, 3)
+        opflag_tests.inplace_add(a[:-1, :-1], b)
+        assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
+            [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
+
+        a = np.array(0)
+        opflag_tests.inplace_add(a, 3)
+        assert_equal(a, 3)
+        opflag_tests.inplace_add(a, [3, 4])
+        assert_equal(a, 10)
+
+    def test_struct_ufunc(self):
+        import numpy.core._struct_ufunc_tests as struct_ufunc
+
+        a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
+        b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
+
+        result = struct_ufunc.add_triplet(a, b)
+        assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
+        assert_raises(RuntimeError, struct_ufunc.register_fail)
+
+    def test_custom_ufunc(self):
+        a = np.array(
+            [_rational_tests.rational(1, 2),
+             _rational_tests.rational(1, 3),
+             _rational_tests.rational(1, 4)],
+            dtype=_rational_tests.rational)
+        b = np.array(
+            [_rational_tests.rational(1, 2),
+             _rational_tests.rational(1, 3),
+             _rational_tests.rational(1, 4)],
+            dtype=_rational_tests.rational)
+
+        result = _rational_tests.test_add_rationals(a, b)
+        expected = np.array(
+            [_rational_tests.rational(1),
+             _rational_tests.rational(2, 3),
+             _rational_tests.rational(1, 2)],
+            dtype=_rational_tests.rational)
+        assert_equal(result, expected)
+
+    def test_custom_ufunc_forced_sig(self):
+        # gh-9351 - looking for a non-first userloop would previously hang
+        with assert_raises(TypeError):
+            np.multiply(_rational_tests.rational(1), 1,
+                        signature=(_rational_tests.rational, int, None))
+
+    def test_custom_array_like(self):
+
+        class MyThing:
+            __array_priority__ = 1000
+
+            rmul_count = 0
+            getitem_count = 0
+
+            def __init__(self, shape):
+                self.shape = shape
+
+            def __len__(self):
+                return self.shape[0]
+
+            def __getitem__(self, i):
+                MyThing.getitem_count += 1
+                if not isinstance(i, tuple):
+                    i = (i,)
+                if len(i) > self.ndim:
+                    raise IndexError("boo")
+
+                return MyThing(self.shape[len(i):])
+
+            def __rmul__(self, other):
+                MyThing.rmul_count += 1
+                return self
+
+        np.float64(5)*MyThing((3, 3))
+        assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
+        assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
+
+    @pytest.mark.parametrize("a", (
+                             np.arange(10, dtype=int),
+                             np.arange(10, dtype=_rational_tests.rational),
+                             ))
+    def test_ufunc_at_basic(self, a):
+
+        aa = a.copy()
+        np.add.at(aa, [2, 5, 2], 1)
+        assert_equal(aa, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
+
+        with pytest.raises(ValueError):
+            # missing second operand
+            np.add.at(aa, [2, 5, 3])
+
+        aa = a.copy()
+        np.negative.at(aa, [2, 5, 3])
+        assert_equal(aa, [0, 1, -2, -3, 4, -5, 6, 7, 8, 9])
+
+        aa = a.copy()
+        b = np.array([100, 100, 100])
+        np.add.at(aa, [2, 5, 2], b)
+        assert_equal(aa, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
+
+        with pytest.raises(ValueError):
+            # extraneous second operand
+            np.negative.at(a, [2, 5, 3], [1, 2, 3])
+
+        with pytest.raises(ValueError):
+            # second operand cannot be converted to an array
+            np.add.at(a, [2, 5, 3], [[1, 2], 1])
+
+    # ufuncs with indexed loops for performance in ufunc.at
+    indexed_ufuncs = [np.add, np.subtract, np.multiply, np.floor_divide,
+                      np.maximum, np.minimum, np.fmax, np.fmin]
+
+    @pytest.mark.parametrize(
+                "typecode", np.typecodes['AllInteger'] + np.typecodes['Float'])
+    @pytest.mark.parametrize("ufunc", indexed_ufuncs)
+    def test_ufunc_at_inner_loops(self, typecode, ufunc):
+        if ufunc is np.divide and typecode in np.typecodes['AllInteger']:
+            # Avoid divide-by-zero and inf for integer divide
+            a = np.ones(100, dtype=typecode)
+            indx = np.random.randint(100, size=30, dtype=np.intp)
+            vals = np.arange(1, 31, dtype=typecode)
+        else:
+            a = np.ones(1000, dtype=typecode)
+            indx = np.random.randint(1000, size=3000, dtype=np.intp)
+            vals = np.arange(3000, dtype=typecode)
+        atag = a.copy()
+        # Do the calculation twice and compare the answers
+        with warnings.catch_warnings(record=True) as w_at:
+            warnings.simplefilter('always')
+            ufunc.at(a, indx, vals)
+        with warnings.catch_warnings(record=True) as w_loop:
+            warnings.simplefilter('always')
+            for i, v in zip(indx, vals):
+                # Make sure all the work happens inside the ufunc
+                # in order to duplicate error/warning handling
+                ufunc(atag[i], v, out=atag[i:i+1], casting="unsafe")
+        assert_equal(atag, a)
+        # If w_loop warned, make sure w_at warned as well
+        if len(w_loop) > 0:
+            #
+            assert len(w_at) > 0
+            assert w_at[0].category == w_loop[0].category
+            assert str(w_at[0].message)[:10] == str(w_loop[0].message)[:10]
+
+    @pytest.mark.parametrize("typecode", np.typecodes['Complex'])
+    @pytest.mark.parametrize("ufunc", [np.add, np.subtract, np.multiply])
+    def test_ufunc_at_inner_loops_complex(self, typecode, ufunc):
+        a = np.ones(10, dtype=typecode)
+        indx = np.concatenate([np.ones(6, dtype=np.intp),
+                               np.full(18, 4, dtype=np.intp)])
+        value = a.dtype.type(1j)
+        ufunc.at(a, indx, value)
+        expected = np.ones_like(a)
+        if ufunc is np.multiply:
+            expected[1] = expected[4] = -1
+        else:
+            expected[1] += 6 * (value if ufunc is np.add else -value)
+            expected[4] += 18 * (value if ufunc is np.add else -value)
+
+        assert_array_equal(a, expected)
+
+    def test_ufunc_at_ellipsis(self):
+        # Make sure the indexed loop check does not choke on iters
+        # with subspaces
+        arr = np.zeros(5)
+        np.add.at(arr, slice(None), np.ones(5))
+        assert_array_equal(arr, np.ones(5))
+
+    def test_ufunc_at_negative(self):
+        arr = np.ones(5, dtype=np.int32)
+        indx = np.arange(5)
+        umt.indexed_negative.at(arr, indx)
+        # If it is [-1, -1, -1, -100, 0] then the regular strided loop was used
+        assert np.all(arr == [-1, -1, -1, -200, -1])
+
+    def test_ufunc_at_large(self):
+        # issue gh-23457
+        indices = np.zeros(8195, dtype=np.int16)
+        b = np.zeros(8195, dtype=float)
+        b[0] = 10
+        b[1] = 5
+        b[8192:] = 100
+        a = np.zeros(1, dtype=float)
+        np.add.at(a, indices, b)
+        assert a[0] == b.sum()
+
+    def test_cast_index_fastpath(self):
+        arr = np.zeros(10)
+        values = np.ones(100000)
+        # index must be cast, which may be buffered in chunks:
+        index = np.zeros(len(values), dtype=np.uint8)
+        np.add.at(arr, index, values)
+        assert arr[0] == len(values)
+
+    @pytest.mark.parametrize("value", [
+        np.ones(1), np.ones(()), np.float64(1.), 1.])
+    def test_ufunc_at_scalar_value_fastpath(self, value):
+        arr = np.zeros(1000)
+        # index must be cast, which may be buffered in chunks:
+        index = np.repeat(np.arange(1000), 2)
+        np.add.at(arr, index, value)
+        assert_array_equal(arr, np.full_like(arr, 2 * value))
+
+    def test_ufunc_at_multiD(self):
+        a = np.arange(9).reshape(3, 3)
+        b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
+        np.add.at(a, (slice(None), [1, 2, 1]), b)
+        assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
+
+        a = np.arange(27).reshape(3, 3, 3)
+        b = np.array([100, 200, 300])
+        np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
+        assert_equal(a,
+            [[[0, 401, 202],
+              [3, 404, 205],
+              [6, 407, 208]],
+
+             [[9, 410, 211],
+              [12, 413, 214],
+              [15, 416, 217]],
+
+             [[18, 419, 220],
+              [21, 422, 223],
+              [24, 425, 226]]])
+
+        a = np.arange(9).reshape(3, 3)
+        b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
+        np.add.at(a, ([1, 2, 1], slice(None)), b)
+        assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
+
+        a = np.arange(27).reshape(3, 3, 3)
+        b = np.array([100, 200, 300])
+        np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
+        assert_equal(a,
+            [[[0,  1,  2],
+              [203, 404, 605],
+              [106, 207, 308]],
+
+             [[9,  10, 11],
+              [212, 413, 614],
+              [115, 216, 317]],
+
+             [[18, 19, 20],
+              [221, 422, 623],
+              [124, 225, 326]]])
+
+        a = np.arange(9).reshape(3, 3)
+        b = np.array([100, 200, 300])
+        np.add.at(a, (0, [1, 2, 1]), b)
+        assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
+
+        a = np.arange(27).reshape(3, 3, 3)
+        b = np.array([100, 200, 300])
+        np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
+        assert_equal(a,
+            [[[0,  1,  2],
+              [3,  4,  5],
+              [6,  7,  8]],
+
+             [[209, 410, 611],
+              [12,  13, 14],
+              [15,  16, 17]],
+
+             [[118, 219, 320],
+              [21,  22, 23],
+              [24,  25, 26]]])
+
+        a = np.arange(27).reshape(3, 3, 3)
+        b = np.array([100, 200, 300])
+        np.add.at(a, (slice(None), slice(None), slice(None)), b)
+        assert_equal(a,
+            [[[100, 201, 302],
+              [103, 204, 305],
+              [106, 207, 308]],
+
+             [[109, 210, 311],
+              [112, 213, 314],
+              [115, 216, 317]],
+
+             [[118, 219, 320],
+              [121, 222, 323],
+              [124, 225, 326]]])
+
+    def test_ufunc_at_0D(self):
+        a = np.array(0)
+        np.add.at(a, (), 1)
+        assert_equal(a, 1)
+
+        assert_raises(IndexError, np.add.at, a, 0, 1)
+        assert_raises(IndexError, np.add.at, a, [], 1)
+
+    def test_ufunc_at_dtypes(self):
+        # Test mixed dtypes
+        a = np.arange(10)
+        np.power.at(a, [1, 2, 3, 2], 3.5)
+        assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
+
+    def test_ufunc_at_boolean(self):
+        # Test boolean indexing and boolean ufuncs
+        a = np.arange(10)
+        index = a % 2 == 0
+        np.equal.at(a, index, [0, 2, 4, 6, 8])
+        assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
+
+        # Test unary operator
+        a = np.arange(10, dtype='u4')
+        np.invert.at(a, [2, 5, 2])
+        assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
+
+    def test_ufunc_at_advanced(self):
+        # Test empty subspace
+        orig = np.arange(4)
+        a = orig[:, None][:, 0:0]
+        np.add.at(a, [0, 1], 3)
+        assert_array_equal(orig, np.arange(4))
+
+        # Test with swapped byte order
+        index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
+        values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
+        np.add.at(values, index, 3)
+        assert_array_equal(values, [1, 8, 6, 4])
+
+        # Test exception thrown
+        values = np.array(['a', 1], dtype=object)
+        assert_raises(TypeError, np.add.at, values, [0, 1], 1)
+        assert_array_equal(values, np.array(['a', 1], dtype=object))
+
+        # Test multiple output ufuncs raise error, gh-5665
+        assert_raises(ValueError, np.modf.at, np.arange(10), [1])
+
+        # Test maximum
+        a = np.array([1, 2, 3])
+        np.maximum.at(a, [0], 0)
+        assert_equal(a, np.array([1, 2, 3]))
+
+    @pytest.mark.parametrize("dtype",
+            np.typecodes['AllInteger'] + np.typecodes['Float'])
+    @pytest.mark.parametrize("ufunc",
+            [np.add, np.subtract, np.divide, np.minimum, np.maximum])
+    def test_at_negative_indexes(self, dtype, ufunc):
+        a = np.arange(0, 10).astype(dtype)
+        indxs = np.array([-1, 1, -1, 2]).astype(np.intp)
+        vals = np.array([1, 5, 2, 10], dtype=a.dtype)
+
+        expected = a.copy()
+        for i, v in zip(indxs, vals):
+            expected[i] = ufunc(expected[i], v)
+
+        ufunc.at(a, indxs, vals)
+        assert_array_equal(a, expected)
+        assert np.all(indxs == [-1, 1, -1, 2])
+
+    def test_at_not_none_signature(self):
+        # Test ufuncs with non-trivial signature raise a TypeError
+        a = np.ones((2, 2, 2))
+        b = np.ones((1, 2, 2))
+        assert_raises(TypeError, np.matmul.at, a, [0], b)
+
+        a = np.array([[[1, 2], [3, 4]]])
+        assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0])
+
+    def test_at_no_loop_for_op(self):
+        # str dtype does not have a ufunc loop for np.add
+        arr = np.ones(10, dtype=str)
+        with pytest.raises(np.core._exceptions._UFuncNoLoopError):
+            np.add.at(arr, [0, 1], [0, 1])
+
+    def test_at_output_casting(self):
+        arr = np.array([-1])
+        np.equal.at(arr, [0], [0])
+        assert arr[0] == 0
+
+    def test_at_broadcast_failure(self):
+        arr = np.arange(5)
+        with pytest.raises(ValueError):
+            np.add.at(arr, [0, 1], [1, 2, 3])
+
+
+    def test_reduce_arguments(self):
+        f = np.add.reduce
+        d = np.ones((5,2), dtype=int)
+        o = np.ones((2,), dtype=d.dtype)
+        r = o * 5
+        assert_equal(f(d), r)
+        # a, axis=0, dtype=None, out=None, keepdims=False
+        assert_equal(f(d, axis=0), r)
+        assert_equal(f(d, 0), r)
+        assert_equal(f(d, 0, dtype=None), r)
+        assert_equal(f(d, 0, dtype='i'), r)
+        assert_equal(f(d, 0, 'i'), r)
+        assert_equal(f(d, 0, None), r)
+        assert_equal(f(d, 0, None, out=None), r)
+        assert_equal(f(d, 0, None, out=o), r)
+        assert_equal(f(d, 0, None, o), r)
+        assert_equal(f(d, 0, None, None), r)
+        assert_equal(f(d, 0, None, None, keepdims=False), r)
+        assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
+        assert_equal(f(d, 0, None, None, False, 0), r)
+        assert_equal(f(d, 0, None, None, False, initial=0), r)
+        assert_equal(f(d, 0, None, None, False, 0, True), r)
+        assert_equal(f(d, 0, None, None, False, 0, where=True), r)
+        # multiple keywords
+        assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
+        assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
+        assert_equal(f(d, 0, None, out=None, keepdims=False), r)
+        assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0,
+                       where=True), r)
+
+        # too little
+        assert_raises(TypeError, f)
+        # too much
+        assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1)
+        # invalid axis
+        assert_raises(TypeError, f, d, "invalid")
+        assert_raises(TypeError, f, d, axis="invalid")
+        assert_raises(TypeError, f, d, axis="invalid", dtype=None,
+                      keepdims=True)
+        # invalid dtype
+        assert_raises(TypeError, f, d, 0, "invalid")
+        assert_raises(TypeError, f, d, dtype="invalid")
+        assert_raises(TypeError, f, d, dtype="invalid", out=None)
+        # invalid out
+        assert_raises(TypeError, f, d, 0, None, "invalid")
+        assert_raises(TypeError, f, d, out="invalid")
+        assert_raises(TypeError, f, d, out="invalid", dtype=None)
+        # keepdims boolean, no invalid value
+        # assert_raises(TypeError, f, d, 0, None, None, "invalid")
+        # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
+        # invalid mix
+        assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
+                     out=None)
+
+        # invalid keyword
+        assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
+        assert_raises(TypeError, f, d, invalid=0)
+        assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
+                      out=None)
+        assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
+                      out=None, invalid=0)
+        assert_raises(TypeError, f, d, axis=0, dtype=None,
+                      out=None, invalid=0)
+
+    def test_structured_equal(self):
+        # https://github.com/numpy/numpy/issues/4855
+
+        class MyA(np.ndarray):
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                return getattr(ufunc, method)(*(input.view(np.ndarray)
+                                              for input in inputs), **kwargs)
+        a = np.arange(12.).reshape(4,3)
+        ra = a.view(dtype=('f8,f8,f8')).squeeze()
+        mra = ra.view(MyA)
+
+        target = np.array([ True, False, False, False], dtype=bool)
+        assert_equal(np.all(target == (mra == ra[0])), True)
+
+    def test_scalar_equal(self):
+        # Scalar comparisons should always work, without deprecation warnings.
+        # even when the ufunc fails.
+        a = np.array(0.)
+        b = np.array('a')
+        assert_(a != b)
+        assert_(b != a)
+        assert_(not (a == b))
+        assert_(not (b == a))
+
+    def test_NotImplemented_not_returned(self):
+        # See gh-5964 and gh-2091. Some of these functions are not operator
+        # related and were fixed for other reasons in the past.
+        binary_funcs = [
+            np.power, np.add, np.subtract, np.multiply, np.divide,
+            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
+            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
+            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
+            np.maximum, np.minimum, np.mod,
+            np.greater, np.greater_equal, np.less, np.less_equal,
+            np.equal, np.not_equal]
+
+        a = np.array('1')
+        b = 1
+        c = np.array([1., 2.])
+        for f in binary_funcs:
+            assert_raises(TypeError, f, a, b)
+            assert_raises(TypeError, f, c, a)
+
+    @pytest.mark.parametrize("ufunc",
+             [np.logical_and, np.logical_or])  # logical_xor object loop is bad
+    @pytest.mark.parametrize("signature",
+             [(None, None, object), (object, None, None),
+              (None, object, None)])
+    def test_logical_ufuncs_object_signatures(self, ufunc, signature):
+        a = np.array([True, None, False], dtype=object)
+        res = ufunc(a, a, signature=signature)
+        assert res.dtype == object
+
+    @pytest.mark.parametrize("ufunc",
+            [np.logical_and, np.logical_or, np.logical_xor])
+    @pytest.mark.parametrize("signature",
+                 [(bool, None, object), (object, None, bool),
+                  (None, object, bool)])
+    def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature):
+        # Most mixed signatures fail (except those with bool out, e.g. `OO->?`)
+        a = np.array([True, None, False])
+        with pytest.raises(TypeError):
+            ufunc(a, a, signature=signature)
+
+    @pytest.mark.parametrize("ufunc",
+            [np.logical_and, np.logical_or, np.logical_xor])
+    def test_logical_ufuncs_support_anything(self, ufunc):
+        # The logical ufuncs support even input that can't be promoted:
+        a = np.array(b'1', dtype="V3")
+        c = np.array([1., 2.])
+        assert_array_equal(ufunc(a, c), ufunc([True, True], True))
+        assert ufunc.reduce(a) == True
+        # check that the output has no effect:
+        out = np.zeros(2, dtype=np.int32)
+        expected = ufunc([True, True], True).astype(out.dtype)
+        assert_array_equal(ufunc(a, c, out=out), expected)
+        out = np.zeros((), dtype=np.int32)
+        assert ufunc.reduce(a, out=out) == True
+        # Last check, test reduction when out and a match (the complexity here
+        # is that the "i,i->?" may seem right, but should not match.
+        a = np.array([3], dtype="i")
+        out = np.zeros((), dtype=a.dtype)
+        assert ufunc.reduce(a, out=out) == 1
+
+    @pytest.mark.parametrize("ufunc",
+            [np.logical_and, np.logical_or, np.logical_xor])
+    def test_logical_ufuncs_reject_string(self, ufunc):
+        """
+        Logical ufuncs are normally well defined by working with the boolean
+        equivalent, i.e. casting all inputs to bools should work.
+
+        However, casting strings to bools is *currently* weird, because it
+        actually uses `bool(int(str))`.  Thus we explicitly reject strings.
+        This test should succeed (and can probably just be removed) as soon as
+        string to bool casts are well defined in NumPy.
+        """
+        with pytest.raises(TypeError, match="contain a loop with signature"):
+            ufunc(["1"], ["3"])
+        with pytest.raises(TypeError, match="contain a loop with signature"):
+            ufunc.reduce(["1", "2", "0"])
+
+    @pytest.mark.parametrize("ufunc",
+             [np.logical_and, np.logical_or, np.logical_xor])
+    def test_logical_ufuncs_out_cast_check(self, ufunc):
+        a = np.array('1')
+        c = np.array([1., 2.])
+        out = a.copy()
+        with pytest.raises(TypeError):
+            # It would be safe, but not equiv casting:
+            ufunc(a, c, out=out, casting="equiv")
+
+    def test_reducelike_byteorder_resolution(self):
+        # See gh-20699, byte-order changes need some extra care in the type
+        # resolution to make the following succeed:
+        arr_be = np.arange(10, dtype=">i8")
+        arr_le = np.arange(10, dtype="i
+        if 'O' in typ or '?' in typ:
+            continue
+        inp, out = typ.split('->')
+        args = [np.ones((3, 3), t) for t in inp]
+        with warnings.catch_warnings(record=True):
+            warnings.filterwarnings("always")
+            res = ufunc(*args)
+        if isinstance(res, tuple):
+            outs = tuple(out)
+            assert len(res) == len(outs)
+            for r, t in zip(res, outs):
+                assert r.dtype == np.dtype(t)
+        else:
+            assert res.dtype == np.dtype(out)
+
+@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
+                                if isinstance(getattr(np, x), np.ufunc)])
+@np._no_nep50_warning()
+def test_ufunc_noncontiguous(ufunc):
+    '''
+    Check that contiguous and non-contiguous calls to ufuncs
+    have the same results for values in range(9)
+    '''
+    for typ in ufunc.types:
+        # types is a list of strings like ii->i
+        if any(set('O?mM') & set(typ)):
+            # bool, object, datetime are too irregular for this simple test
+            continue
+        inp, out = typ.split('->')
+        args_c = [np.empty(6, t) for t in inp]
+        args_n = [np.empty(18, t)[::3] for t in inp]
+        for a in args_c:
+            a.flat = range(1,7)
+        for a in args_n:
+            a.flat = range(1,7)
+        with warnings.catch_warnings(record=True):
+            warnings.filterwarnings("always")
+            res_c = ufunc(*args_c)
+            res_n = ufunc(*args_n)
+        if len(out) == 1:
+            res_c = (res_c,)
+            res_n = (res_n,)
+        for c_ar, n_ar in zip(res_c, res_n):
+            dt = c_ar.dtype
+            if np.issubdtype(dt, np.floating):
+                # for floating point results allow a small fuss in comparisons
+                # since different algorithms (libm vs. intrinsics) can be used
+                # for different input strides
+                res_eps = np.finfo(dt).eps
+                tol = 2*res_eps
+                assert_allclose(res_c, res_n, atol=tol, rtol=tol)
+            else:
+                assert_equal(c_ar, n_ar)
+
+
+@pytest.mark.parametrize('ufunc', [np.sign, np.equal])
+def test_ufunc_warn_with_nan(ufunc):
+    # issue gh-15127
+    # test that calling certain ufuncs with a non-standard `nan` value does not
+    # emit a warning
+    # `b` holds a 64 bit signaling nan: the most significant bit of the
+    # significand is zero.
+    b = np.array([0x7ff0000000000001], 'i8').view('f8')
+    assert np.isnan(b)
+    if ufunc.nin == 1:
+        ufunc(b)
+    elif ufunc.nin == 2:
+        ufunc(b, b.copy())
+    else:
+        raise ValueError('ufunc with more than 2 inputs')
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_ufunc_out_casterrors():
+    # Tests that casting errors are correctly reported and buffers are
+    # cleared.
+    # The following array can be added to itself as an object array, but
+    # the result cannot be cast to an integer output:
+    value = 123  # relies on python cache (leak-check will still find it)
+    arr = np.array([value] * int(np.BUFSIZE * 1.5) +
+                   ["string"] +
+                   [value] * int(1.5 * np.BUFSIZE), dtype=object)
+    out = np.ones(len(arr), dtype=np.intp)
+
+    count = sys.getrefcount(value)
+    with pytest.raises(ValueError):
+        # Output casting failure:
+        np.add(arr, arr, out=out, casting="unsafe")
+
+    assert count == sys.getrefcount(value)
+    # output is unchanged after the error, this shows that the iteration
+    # was aborted (this is not necessarily defined behaviour)
+    assert out[-1] == 1
+
+    with pytest.raises(ValueError):
+        # Input casting failure:
+        np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe")
+
+    assert count == sys.getrefcount(value)
+    # output is unchanged after the error, this shows that the iteration
+    # was aborted (this is not necessarily defined behaviour)
+    assert out[-1] == 1
+
+
+@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)])
+def test_ufunc_input_casterrors(bad_offset):
+    value = 123
+    arr = np.array([value] * bad_offset +
+                   ["string"] +
+                   [value] * int(1.5 * np.BUFSIZE), dtype=object)
+    with pytest.raises(ValueError):
+        # Force cast inputs, but the buffered cast of `arr` to intp fails:
+        np.add(arr, arr, dtype=np.intp, casting="unsafe")
+
+
+@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)])
+def test_ufunc_input_floatingpoint_error(bad_offset):
+    value = 123
+    arr = np.array([value] * bad_offset +
+                   [np.nan] +
+                   [value] * int(1.5 * np.BUFSIZE))
+    with np.errstate(invalid="raise"), pytest.raises(FloatingPointError):
+        # Force cast inputs, but the buffered cast of `arr` to intp fails:
+        np.add(arr, arr, dtype=np.intp, casting="unsafe")
+
+
+def test_trivial_loop_invalid_cast():
+    # This tests the fast-path "invalid cast", see gh-19904.
+    with pytest.raises(TypeError,
+            match="cast ufunc 'add' input 0"):
+        # the void dtype definitely cannot cast to double:
+        np.add(np.array(1, "i,i"), 3, signature="dd->d")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize("offset",
+        [0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)])
+def test_reduce_casterrors(offset):
+    # Test reporting of casting errors in reductions, we test various
+    # offsets to where the casting error will occur, since these may occur
+    # at different places during the reduction procedure. For example
+    # the first item may be special.
+    value = 123  # relies on python cache (leak-check will still find it)
+    arr = np.array([value] * offset +
+                   ["string"] +
+                   [value] * int(1.5 * np.BUFSIZE), dtype=object)
+    out = np.array(-1, dtype=np.intp)
+
+    count = sys.getrefcount(value)
+    with pytest.raises(ValueError, match="invalid literal"):
+        # This is an unsafe cast, but we currently always allow that.
+        # Note that the double loop is picked, but the cast fails.
+        # `initial=None` disables the use of an identity here to test failures
+        # while copying the first values path (not used when identity exists).
+        np.add.reduce(arr, dtype=np.intp, out=out, initial=None)
+    assert count == sys.getrefcount(value)
+    # If an error occurred during casting, the operation is done at most until
+    # the error occurs (the result of which would be `value * offset`) and -1
+    # if the error happened immediately.
+    # This does not define behaviour, the output is invalid and thus undefined
+    assert out[()] < value * offset
+
+
+def test_object_reduce_cleanup_on_failure():
+    # Test cleanup, including of the initial value (manually provided or not)
+    with pytest.raises(TypeError):
+        np.add.reduce([1, 2, None], initial=4)
+
+    with pytest.raises(TypeError):
+        np.add.reduce([1, 2, None])
+
+
+@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+@pytest.mark.parametrize("method",
+        [np.add.accumulate, np.add.reduce,
+         pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"),
+         pytest.param(lambda x: np.log.at(x, [2]), id="at")])
+def test_ufunc_methods_floaterrors(method):
+    # adding inf and -inf (or log(-inf) creates an invalid float and warns
+    arr = np.array([np.inf, 0, -np.inf])
+    with np.errstate(all="warn"):
+        with pytest.warns(RuntimeWarning, match="invalid value"):
+            method(arr)
+
+    arr = np.array([np.inf, 0, -np.inf])
+    with np.errstate(all="raise"):
+        with pytest.raises(FloatingPointError):
+            method(arr)
+
+
+def _check_neg_zero(value):
+    if value != 0.0:
+        return False
+    if not np.signbit(value.real):
+        return False
+    if value.dtype.kind == "c":
+        return np.signbit(value.imag)
+    return True
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+def test_addition_negative_zero(dtype):
+    dtype = np.dtype(dtype)
+    if dtype.kind == "c":
+        neg_zero = dtype.type(complex(-0.0, -0.0))
+    else:
+        neg_zero = dtype.type(-0.0)
+
+    arr = np.array(neg_zero)
+    arr2 = np.array(neg_zero)
+
+    assert _check_neg_zero(arr + arr2)
+    # In-place ops may end up on a different path (reduce path) see gh-21211
+    arr += arr2
+    assert _check_neg_zero(arr)
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+@pytest.mark.parametrize("use_initial", [True, False])
+def test_addition_reduce_negative_zero(dtype, use_initial):
+    dtype = np.dtype(dtype)
+    if dtype.kind == "c":
+        neg_zero = dtype.type(complex(-0.0, -0.0))
+    else:
+        neg_zero = dtype.type(-0.0)
+
+    kwargs = {}
+    if use_initial:
+        kwargs["initial"] = neg_zero
+    else:
+        pytest.xfail("-0. propagation in sum currently requires initial")
+
+    # Test various length, in case SIMD paths or chunking play a role.
+    # 150 extends beyond the pairwise blocksize; probably not important.
+    for i in range(0, 150):
+        arr = np.array([neg_zero] * i, dtype=dtype)
+        res = np.sum(arr, **kwargs)
+        if i > 0 or use_initial:
+            assert _check_neg_zero(res)
+        else:
+            # `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])`
+            assert not np.signbit(res.real)
+            assert not np.signbit(res.imag)
+
+class TestLowlevelAPIAccess:
+    def test_resolve_dtypes_basic(self):
+        # Basic test for dtype resolution:
+        i4 = np.dtype("i4")
+        f4 = np.dtype("f4")
+        f8 = np.dtype("f8")
+
+        r = np.add.resolve_dtypes((i4, f4, None))
+        assert r == (f8, f8, f8)
+
+        # Signature uses the same logic to parse as ufunc (less strict)
+        # the following is "same-kind" casting so works:
+        r = np.add.resolve_dtypes((
+                i4, i4, None), signature=(None, None, "f4"))
+        assert r == (f4, f4, f4)
+
+        # Check NEP 50 "weak" promotion also:
+        r = np.add.resolve_dtypes((f4, int, None))
+        assert r == (f4, f4, f4)
+
+        with pytest.raises(TypeError):
+            np.add.resolve_dtypes((i4, f4, None), casting="no")
+
+    def test_weird_dtypes(self):
+        S0 = np.dtype("S0")
+        # S0 is often converted by NumPy to S1, but not here:
+        r = np.equal.resolve_dtypes((S0, S0, None))
+        assert r == (S0, S0, np.dtype(bool))
+
+        # Subarray dtypes are weird and may not work fully, we preserve them
+        # leading to a TypeError (currently no equal loop for void/structured)
+        dts = np.dtype("10i")
+        with pytest.raises(TypeError):
+            np.equal.resolve_dtypes((dts, dts, None))
+
+    def test_resolve_dtypes_reduction(self):
+        i4 = np.dtype("i4")
+        with pytest.raises(NotImplementedError):
+            np.add.resolve_dtypes((i4, i4, i4), reduction=True)
+
+    @pytest.mark.parametrize("dtypes", [
+            (np.dtype("i"), np.dtype("i")),
+            (None, np.dtype("i"), np.dtype("f")),
+            (np.dtype("i"), None, np.dtype("f")),
+            ("i4", "i4", None)])
+    def test_resolve_dtypes_errors(self, dtypes):
+        with pytest.raises(TypeError):
+            np.add.resolve_dtypes(dtypes)
+
+    def test_resolve_dtypes_reduction(self):
+        i2 = np.dtype("i2")
+        long_ = np.dtype("long")
+        # Check special addition resolution:
+        res = np.add.resolve_dtypes((None, i2, None), reduction=True)
+        assert res == (long_, long_, long_)
+
+    def test_resolve_dtypes_reduction_errors(self):
+        i2 = np.dtype("i2")
+
+        with pytest.raises(TypeError):
+            np.add.resolve_dtypes((None, i2, i2))
+
+        with pytest.raises(TypeError):
+            np.add.signature((None, None, "i4"))
+
+    @pytest.mark.skipif(not hasattr(ct, "pythonapi"),
+            reason="`ctypes.pythonapi` required for capsule unpacking.")
+    def test_loop_access(self):
+        # This is a basic test for the full strided loop access
+        data_t = ct.ARRAY(ct.c_char_p, 2)
+        dim_t = ct.ARRAY(ct.c_ssize_t, 1)
+        strides_t = ct.ARRAY(ct.c_ssize_t, 2)
+        strided_loop_t = ct.CFUNCTYPE(
+                ct.c_int, ct.c_void_p, data_t, dim_t, strides_t, ct.c_void_p)
+
+        class call_info_t(ct.Structure):
+            _fields_ = [
+                ("strided_loop", strided_loop_t),
+                ("context", ct.c_void_p),
+                ("auxdata", ct.c_void_p),
+                ("requires_pyapi", ct.c_byte),
+                ("no_floatingpoint_errors", ct.c_byte),
+            ]
+
+        i4 = np.dtype("i4")
+        dt, call_info_obj = np.negative._resolve_dtypes_and_context((i4, i4))
+        assert dt == (i4, i4)  # can be used without casting
+
+        # Fill in the rest of the information:
+        np.negative._get_strided_loop(call_info_obj)
+
+        ct.pythonapi.PyCapsule_GetPointer.restype = ct.c_void_p
+        call_info = ct.pythonapi.PyCapsule_GetPointer(
+                ct.py_object(call_info_obj),
+                ct.c_char_p(b"numpy_1.24_ufunc_call_info"))
+
+        call_info = ct.cast(call_info, ct.POINTER(call_info_t)).contents
+
+        arr = np.arange(10, dtype=i4)
+        call_info.strided_loop(
+                call_info.context,
+                data_t(arr.ctypes.data, arr.ctypes.data),
+                arr.ctypes.shape,  # is a C-array with 10 here
+                strides_t(arr.ctypes.strides[0], arr.ctypes.strides[0]),
+                call_info.auxdata)
+
+        # We just directly called the negative inner-loop in-place:
+        assert_array_equal(arr, -np.arange(10, dtype=i4))
+
+    @pytest.mark.parametrize("strides", [1, (1, 2, 3), (1, "2")])
+    def test__get_strided_loop_errors_bad_strides(self, strides):
+        i4 = np.dtype("i4")
+        dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4))
+
+        with pytest.raises(TypeError, match="fixed_strides.*tuple.*or None"):
+            np.negative._get_strided_loop(call_info, fixed_strides=strides)
+
+    def test__get_strided_loop_errors_bad_call_info(self):
+        i4 = np.dtype("i4")
+        dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4))
+
+        with pytest.raises(ValueError, match="PyCapsule"):
+            np.negative._get_strided_loop("not the capsule!")
+
+        with pytest.raises(TypeError, match=".*incompatible context"):
+            np.add._get_strided_loop(call_info)
+
+        np.negative._get_strided_loop(call_info)
+        with pytest.raises(TypeError):
+            # cannot call it a second time:
+            np.negative._get_strided_loop(call_info)
+
+    def test_long_arrays(self):
+        t = np.zeros((1029, 917), dtype=np.single)
+        t[0][0] = 1
+        t[28][414] = 1
+        tc = np.cos(t)
+        assert_equal(tc[0][0], tc[28][414])
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath.py
new file mode 100644
index 00000000..963e740d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath.py
@@ -0,0 +1,4743 @@
+import platform
+import warnings
+import fnmatch
+import itertools
+import pytest
+import sys
+import os
+import operator
+from fractions import Fraction
+from functools import reduce
+from collections import namedtuple
+
+import numpy.core.umath as ncu
+from numpy.core import _umath_tests as ncu_tests
+import numpy as np
+from numpy.testing import (
+    assert_, assert_equal, assert_raises, assert_raises_regex,
+    assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+    assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
+    _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL,
+    IS_PYPY
+    )
+from numpy.testing._private.utils import _glibc_older_than
+
+UFUNCS = [obj for obj in np.core.umath.__dict__.values()
+         if isinstance(obj, np.ufunc)]
+
+UFUNCS_UNARY = [
+    uf for uf in UFUNCS if uf.nin == 1
+]
+UFUNCS_UNARY_FP = [
+    uf for uf in UFUNCS_UNARY if 'f->f' in uf.types
+]
+
+UFUNCS_BINARY = [
+    uf for uf in UFUNCS if uf.nin == 2
+]
+UFUNCS_BINARY_ACC = [
+    uf for uf in UFUNCS_BINARY if hasattr(uf, "accumulate") and uf.nout == 1
+]
+
+def interesting_binop_operands(val1, val2, dtype):
+    """
+    Helper to create "interesting" operands to cover common code paths:
+    * scalar inputs
+    * only first "values" is an array (e.g. scalar division fast-paths)
+    * Longer array (SIMD) placing the value of interest at different positions
+    * Oddly strided arrays which may not be SIMD compatible
+
+    It does not attempt to cover unaligned access or mixed dtypes.
+    These are normally handled by the casting/buffering machinery.
+
+    This is not a fixture (currently), since I believe a fixture normally
+    only yields once?
+    """
+    fill_value = 1  # could be a parameter, but maybe not an optional one?
+
+    arr1 = np.full(10003, dtype=dtype, fill_value=fill_value)
+    arr2 = np.full(10003, dtype=dtype, fill_value=fill_value)
+
+    arr1[0] = val1
+    arr2[0] = val2
+
+    extractor = lambda res: res
+    yield arr1[0], arr2[0], extractor, "scalars"
+
+    extractor = lambda res: res
+    yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays"
+
+    # reset array values to fill_value:
+    arr1[0] = fill_value
+    arr2[0] = fill_value
+
+    for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]:
+        arr1[pos] = val1
+        arr2[pos] = val2
+
+        extractor = lambda res: res[pos]
+        yield arr1, arr2, extractor, f"off-{pos}"
+        yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar"
+
+        arr1[pos] = fill_value
+        arr2[pos] = fill_value
+
+    for stride in [-1, 113]:
+        op1 = arr1[::stride]
+        op2 = arr2[::stride]
+        op1[10] = val1
+        op2[10] = val2
+
+        extractor = lambda res: res[10]
+        yield op1, op2, extractor, f"stride-{stride}"
+
+        op1[10] = fill_value
+        op2[10] = fill_value
+
+
+def on_powerpc():
+    """ True if we are running on a Power PC platform."""
+    return platform.processor() == 'powerpc' or \
+           platform.machine().startswith('ppc')
+
+
+def bad_arcsinh():
+    """The blocklisted trig functions are not accurate on aarch64/PPC for
+    complex256. Rather than dig through the actual problem skip the
+    test. This should be fixed when we can move past glibc2.17
+    which is the version in manylinux2014
+    """
+    if platform.machine() == 'aarch64':
+        x = 1.78e-10
+    elif on_powerpc():
+        x = 2.16e-10
+    else:
+        return False
+    v1 = np.arcsinh(np.float128(x))
+    v2 = np.arcsinh(np.complex256(x)).real
+    # The eps for float128 is 1-e33, so this is way bigger
+    return abs((v1 / v2) - 1.0) > 1e-23
+
+
+class _FilterInvalids:
+    def setup_method(self):
+        self.olderr = np.seterr(invalid='ignore')
+
+    def teardown_method(self):
+        np.seterr(**self.olderr)
+
+
+class TestConstants:
+    def test_pi(self):
+        assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
+
+    def test_e(self):
+        assert_allclose(ncu.e, 2.718281828459045, 1e-15)
+
+    def test_euler_gamma(self):
+        assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
+
+
+class TestOut:
+    def test_out_subok(self):
+        for subok in (True, False):
+            a = np.array(0.5)
+            o = np.empty(())
+
+            r = np.add(a, 2, o, subok=subok)
+            assert_(r is o)
+            r = np.add(a, 2, out=o, subok=subok)
+            assert_(r is o)
+            r = np.add(a, 2, out=(o,), subok=subok)
+            assert_(r is o)
+
+            d = np.array(5.7)
+            o1 = np.empty(())
+            o2 = np.empty((), dtype=np.int32)
+
+            r1, r2 = np.frexp(d, o1, None, subok=subok)
+            assert_(r1 is o1)
+            r1, r2 = np.frexp(d, None, o2, subok=subok)
+            assert_(r2 is o2)
+            r1, r2 = np.frexp(d, o1, o2, subok=subok)
+            assert_(r1 is o1)
+            assert_(r2 is o2)
+
+            r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
+            assert_(r1 is o1)
+            r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+            assert_(r2 is o2)
+            r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
+            assert_(r1 is o1)
+            assert_(r2 is o2)
+
+            with assert_raises(TypeError):
+                # Out argument must be tuple, since there are multiple outputs.
+                r1, r2 = np.frexp(d, out=o1, subok=subok)
+
+            assert_raises(TypeError, np.add, a, 2, o, o, subok=subok)
+            assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok)
+            assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok)
+            assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
+            assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
+            assert_raises(TypeError, np.add, a, 2, [], subok=subok)
+            assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
+            assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
+            o.flags.writeable = False
+            assert_raises(ValueError, np.add, a, 2, o, subok=subok)
+            assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
+            assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
+
+    def test_out_wrap_subok(self):
+        class ArrayWrap(np.ndarray):
+            __array_priority__ = 10
+
+            def __new__(cls, arr):
+                return np.asarray(arr).view(cls).copy()
+
+            def __array_wrap__(self, arr, context):
+                return arr.view(type(self))
+
+        for subok in (True, False):
+            a = ArrayWrap([0.5])
+
+            r = np.add(a, 2, subok=subok)
+            if subok:
+                assert_(isinstance(r, ArrayWrap))
+            else:
+                assert_(type(r) == np.ndarray)
+
+            r = np.add(a, 2, None, subok=subok)
+            if subok:
+                assert_(isinstance(r, ArrayWrap))
+            else:
+                assert_(type(r) == np.ndarray)
+
+            r = np.add(a, 2, out=None, subok=subok)
+            if subok:
+                assert_(isinstance(r, ArrayWrap))
+            else:
+                assert_(type(r) == np.ndarray)
+
+            r = np.add(a, 2, out=(None,), subok=subok)
+            if subok:
+                assert_(isinstance(r, ArrayWrap))
+            else:
+                assert_(type(r) == np.ndarray)
+
+            d = ArrayWrap([5.7])
+            o1 = np.empty((1,))
+            o2 = np.empty((1,), dtype=np.int32)
+
+            r1, r2 = np.frexp(d, o1, subok=subok)
+            if subok:
+                assert_(isinstance(r2, ArrayWrap))
+            else:
+                assert_(type(r2) == np.ndarray)
+
+            r1, r2 = np.frexp(d, o1, None, subok=subok)
+            if subok:
+                assert_(isinstance(r2, ArrayWrap))
+            else:
+                assert_(type(r2) == np.ndarray)
+
+            r1, r2 = np.frexp(d, None, o2, subok=subok)
+            if subok:
+                assert_(isinstance(r1, ArrayWrap))
+            else:
+                assert_(type(r1) == np.ndarray)
+
+            r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
+            if subok:
+                assert_(isinstance(r2, ArrayWrap))
+            else:
+                assert_(type(r2) == np.ndarray)
+
+            r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+            if subok:
+                assert_(isinstance(r1, ArrayWrap))
+            else:
+                assert_(type(r1) == np.ndarray)
+
+            with assert_raises(TypeError):
+                # Out argument must be tuple, since there are multiple outputs.
+                r1, r2 = np.frexp(d, out=o1, subok=subok)
+
+
+class TestComparisons:
+    import operator
+
+    @pytest.mark.parametrize('dtype', np.sctypes['uint'] + np.sctypes['int'] +
+                             np.sctypes['float'] + [np.bool_])
+    @pytest.mark.parametrize('py_comp,np_comp', [
+        (operator.lt, np.less),
+        (operator.le, np.less_equal),
+        (operator.gt, np.greater),
+        (operator.ge, np.greater_equal),
+        (operator.eq, np.equal),
+        (operator.ne, np.not_equal)
+    ])
+    def test_comparison_functions(self, dtype, py_comp, np_comp):
+        # Initialize input arrays
+        if dtype == np.bool_:
+            a = np.random.choice(a=[False, True], size=1000)
+            b = np.random.choice(a=[False, True], size=1000)
+            scalar = True
+        else:
+            a = np.random.randint(low=1, high=10, size=1000).astype(dtype)
+            b = np.random.randint(low=1, high=10, size=1000).astype(dtype)
+            scalar = 5
+        np_scalar = np.dtype(dtype).type(scalar)
+        a_lst = a.tolist()
+        b_lst = b.tolist()
+
+        # (Binary) Comparison (x1=array, x2=array)
+        comp_b = np_comp(a, b).view(np.uint8)
+        comp_b_list = [int(py_comp(x, y)) for x, y in zip(a_lst, b_lst)]
+
+        # (Scalar1) Comparison (x1=scalar, x2=array)
+        comp_s1 = np_comp(np_scalar, b).view(np.uint8)
+        comp_s1_list = [int(py_comp(scalar, x)) for x in b_lst]
+
+        # (Scalar2) Comparison (x1=array, x2=scalar)
+        comp_s2 = np_comp(a, np_scalar).view(np.uint8)
+        comp_s2_list = [int(py_comp(x, scalar)) for x in a_lst]
+
+        # Sequence: Binary, Scalar1 and Scalar2
+        assert_(comp_b.tolist() == comp_b_list,
+            f"Failed comparison ({py_comp.__name__})")
+        assert_(comp_s1.tolist() == comp_s1_list,
+            f"Failed comparison ({py_comp.__name__})")
+        assert_(comp_s2.tolist() == comp_s2_list,
+            f"Failed comparison ({py_comp.__name__})")
+
+    def test_ignore_object_identity_in_equal(self):
+        # Check comparing identical objects whose comparison
+        # is not a simple boolean, e.g., arrays that are compared elementwise.
+        a = np.array([np.array([1, 2, 3]), None], dtype=object)
+        assert_raises(ValueError, np.equal, a, a)
+
+        # Check error raised when comparing identical non-comparable objects.
+        class FunkyType:
+            def __eq__(self, other):
+                raise TypeError("I won't compare")
+
+        a = np.array([FunkyType()])
+        assert_raises(TypeError, np.equal, a, a)
+
+        # Check identity doesn't override comparison mismatch.
+        a = np.array([np.nan], dtype=object)
+        assert_equal(np.equal(a, a), [False])
+
+    def test_ignore_object_identity_in_not_equal(self):
+        # Check comparing identical objects whose comparison
+        # is not a simple boolean, e.g., arrays that are compared elementwise.
+        a = np.array([np.array([1, 2, 3]), None], dtype=object)
+        assert_raises(ValueError, np.not_equal, a, a)
+
+        # Check error raised when comparing identical non-comparable objects.
+        class FunkyType:
+            def __ne__(self, other):
+                raise TypeError("I won't compare")
+
+        a = np.array([FunkyType()])
+        assert_raises(TypeError, np.not_equal, a, a)
+
+        # Check identity doesn't override comparison mismatch.
+        a = np.array([np.nan], dtype=object)
+        assert_equal(np.not_equal(a, a), [True])
+
+    def test_error_in_equal_reduce(self):
+        # gh-20929
+        # make sure np.equal.reduce raises a TypeError if an array is passed
+        # without specifying the dtype
+        a = np.array([0, 0])
+        assert_equal(np.equal.reduce(a, dtype=bool), True)
+        assert_raises(TypeError, np.equal.reduce, a)
+
+    def test_object_dtype(self):
+        assert np.equal(1, [1], dtype=object).dtype == object
+        assert np.equal(1, [1], signature=(None, None, "O")).dtype == object
+
+    def test_object_nonbool_dtype_error(self):
+        # bool output dtype is fine of course:
+        assert np.equal(1, [1], dtype=bool).dtype == bool
+
+        # but the following are examples do not have a loop:
+        with pytest.raises(TypeError, match="No loop matching"):
+            np.equal(1, 1, dtype=np.int64)
+
+        with pytest.raises(TypeError, match="No loop matching"):
+            np.equal(1, 1, sig=(None, None, "l"))
+
+    @pytest.mark.parametrize("dtypes", ["qQ", "Qq"])
+    @pytest.mark.parametrize('py_comp, np_comp', [
+        (operator.lt, np.less),
+        (operator.le, np.less_equal),
+        (operator.gt, np.greater),
+        (operator.ge, np.greater_equal),
+        (operator.eq, np.equal),
+        (operator.ne, np.not_equal)
+    ])
+    @pytest.mark.parametrize("vals", [(2**60, 2**60+1), (2**60+1, 2**60)])
+    def test_large_integer_direct_comparison(
+            self, dtypes, py_comp, np_comp, vals):
+        # Note that float(2**60) + 1 == float(2**60).
+        a1 = np.array([2**60], dtype=dtypes[0])
+        a2 = np.array([2**60 + 1], dtype=dtypes[1])
+        expected = py_comp(2**60, 2**60+1)
+
+        assert py_comp(a1, a2) == expected
+        assert np_comp(a1, a2) == expected
+        # Also check the scalars:
+        s1 = a1[0]
+        s2 = a2[0]
+        assert isinstance(s1, np.integer)
+        assert isinstance(s2, np.integer)
+        # The Python operator here is mainly interesting:
+        assert py_comp(s1, s2) == expected
+        assert np_comp(s1, s2) == expected
+
+    @pytest.mark.parametrize("dtype", np.typecodes['UnsignedInteger'])
+    @pytest.mark.parametrize('py_comp_func, np_comp_func', [
+        (operator.lt, np.less),
+        (operator.le, np.less_equal),
+        (operator.gt, np.greater),
+        (operator.ge, np.greater_equal),
+        (operator.eq, np.equal),
+        (operator.ne, np.not_equal)
+    ])
+    @pytest.mark.parametrize("flip", [True, False])
+    def test_unsigned_signed_direct_comparison(
+            self, dtype, py_comp_func, np_comp_func, flip):
+        if flip:
+            py_comp = lambda x, y: py_comp_func(y, x)
+            np_comp = lambda x, y: np_comp_func(y, x)
+        else:
+            py_comp = py_comp_func
+            np_comp = np_comp_func
+
+        arr = np.array([np.iinfo(dtype).max], dtype=dtype)
+        expected = py_comp(int(arr[0]), -1)
+
+        assert py_comp(arr, -1) == expected
+        assert np_comp(arr, -1) == expected
+        scalar = arr[0]
+        assert isinstance(scalar, np.integer)
+        # The Python operator here is mainly interesting:
+        assert py_comp(scalar, -1) == expected
+        assert np_comp(scalar, -1) == expected
+
+
+class TestAdd:
+    def test_reduce_alignment(self):
+        # gh-9876
+        # make sure arrays with weird strides work with the optimizations in
+        # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a
+        # 4 byte offset, even though its itemsize is 8.
+        a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])
+        a['a'] = -1
+        assert_equal(a['b'].sum(), 0)
+
+
+class TestDivision:
+    def test_division_int(self):
+        # int division should follow Python
+        x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
+        if 5 / 10 == 0.5:
+            assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
+                                   -0.05, -0.1, -0.9, -1, -1.2])
+        else:
+            assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
+        assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
+        assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize("dtype,ex_val", itertools.product(
+        np.sctypes['int'] + np.sctypes['uint'], (
+            (
+                # dividend
+                "np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
+                # divisors
+                "np.arange(lsize).astype(dtype),"
+                # scalar divisors
+                "range(15)"
+            ),
+            (
+                # dividend
+                "np.arange(fo.min, fo.min+lsize).astype(dtype),"
+                # divisors
+                "np.arange(lsize//-2, lsize//2).astype(dtype),"
+                # scalar divisors
+                "range(fo.min, fo.min + 15)"
+            ), (
+                # dividend
+                "np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
+                # divisors
+                "np.arange(lsize).astype(dtype),"
+                # scalar divisors
+                "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]"
+            )
+        )
+    ))
+    def test_division_int_boundary(self, dtype, ex_val):
+        fo = np.iinfo(dtype)
+        neg = -1 if fo.min < 0 else 1
+        # Large enough to test SIMD loops and remainder elements
+        lsize = 512 + 7
+        a, b, divisors = eval(ex_val)
+        a_lst, b_lst = a.tolist(), b.tolist()
+
+        c_div = lambda n, d: (
+            0 if d == 0 else (
+                fo.min if (n and n == fo.min and d == -1) else n//d
+            )
+        )
+        with np.errstate(divide='ignore'):
+            ac = a.copy()
+            ac //= b
+            div_ab = a // b
+        div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)]
+
+        msg = "Integer arrays floor division check (//)"
+        assert all(div_ab == div_lst), msg
+        msg_eq = "Integer arrays floor division check (//=)"
+        assert all(ac == div_lst), msg_eq
+
+        for divisor in divisors:
+            ac = a.copy()
+            with np.errstate(divide='ignore', over='ignore'):
+                div_a = a // divisor
+                ac //= divisor
+            div_lst = [c_div(i, divisor) for i in a_lst]
+
+            assert all(div_a == div_lst), msg
+            assert all(ac == div_lst), msg_eq
+
+        with np.errstate(divide='raise', over='raise'):
+            if 0 in b:
+                # Verify overflow case
+                with pytest.raises(FloatingPointError,
+                        match="divide by zero encountered in floor_divide"):
+                    a // b
+            else:
+                a // b
+            if fo.min and fo.min in a:
+                with pytest.raises(FloatingPointError,
+                        match='overflow encountered in floor_divide'):
+                    a // -1
+            elif fo.min:
+                a // -1
+            with pytest.raises(FloatingPointError,
+                    match="divide by zero encountered in floor_divide"):
+                a // 0
+            with pytest.raises(FloatingPointError,
+                    match="divide by zero encountered in floor_divide"):
+                ac = a.copy()
+                ac //= 0
+
+            np.array([], dtype=dtype) // 0
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize("dtype,ex_val", itertools.product(
+        np.sctypes['int'] + np.sctypes['uint'], (
+            "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)",
+            "np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)",
+            "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)",
+            "np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)",
+        )
+    ))
+    def test_division_int_reduce(self, dtype, ex_val):
+        fo = np.iinfo(dtype)
+        a = eval(ex_val)
+        lst = a.tolist()
+        c_div = lambda n, d: (
+            0 if d == 0 or (n and n == fo.min and d == -1) else n//d
+        )
+
+        with np.errstate(divide='ignore'):
+            div_a = np.floor_divide.reduce(a)
+        div_lst = reduce(c_div, lst)
+        msg = "Reduce floor integer division check"
+        assert div_a == div_lst, msg
+
+        with np.errstate(divide='raise', over='raise'):
+            with pytest.raises(FloatingPointError,
+                    match="divide by zero encountered in reduce"):
+                np.floor_divide.reduce(np.arange(-100, 100).astype(dtype))
+            if fo.min:
+                with pytest.raises(FloatingPointError,
+                        match='overflow encountered in reduce'):
+                    np.floor_divide.reduce(
+                        np.array([fo.min, 1, -1], dtype=dtype)
+                    )
+
+    @pytest.mark.parametrize(
+            "dividend,divisor,quotient",
+            [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12),
+             (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12),
+             (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12),
+             (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12),
+             (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1),
+             (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0),
+             (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')),
+             (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')),
+             (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')),
+             (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
+             (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
+             (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')),
+             (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')),
+            ])
+    def test_division_int_timedelta(self, dividend, divisor, quotient):
+        # If either divisor is 0 or quotient is Nat, check for division by 0
+        if divisor and (isinstance(quotient, int) or not np.isnat(quotient)):
+            msg = "Timedelta floor division check"
+            assert dividend // divisor == quotient, msg
+
+            # Test for arrays as well
+            msg = "Timedelta arrays floor division check"
+            dividend_array = np.array([dividend]*5)
+            quotient_array = np.array([quotient]*5)
+            assert all(dividend_array // divisor == quotient_array), msg
+        else:
+            if IS_WASM:
+                pytest.skip("fp errors don't work in wasm")
+            with np.errstate(divide='raise', invalid='raise'):
+                with pytest.raises(FloatingPointError):
+                    dividend // divisor
+
+    def test_division_complex(self):
+        # check that implementation is correct
+        msg = "Complex division implementation check"
+        x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
+        assert_almost_equal(x**2/x, x, err_msg=msg)
+        # check overflow, underflow
+        msg = "Complex division overflow/underflow check"
+        x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
+        y = x**2/x
+        assert_almost_equal(y/x, [1, 1], err_msg=msg)
+
+    def test_zero_division_complex(self):
+        with np.errstate(invalid="ignore", divide="ignore"):
+            x = np.array([0.0], dtype=np.complex128)
+            y = 1.0/x
+            assert_(np.isinf(y)[0])
+            y = complex(np.inf, np.nan)/x
+            assert_(np.isinf(y)[0])
+            y = complex(np.nan, np.inf)/x
+            assert_(np.isinf(y)[0])
+            y = complex(np.inf, np.inf)/x
+            assert_(np.isinf(y)[0])
+            y = 0.0/x
+            assert_(np.isnan(y)[0])
+
+    def test_floor_division_complex(self):
+        # check that floor division, divmod and remainder raises type errors
+        x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
+        with pytest.raises(TypeError):
+            x // 7
+        with pytest.raises(TypeError):
+            np.divmod(x, 7)
+        with pytest.raises(TypeError):
+            np.remainder(x, 7)
+
+    def test_floor_division_signed_zero(self):
+        # Check that the sign bit is correctly set when dividing positive and
+        # negative zero by one.
+        x = np.zeros(10)
+        assert_equal(np.signbit(x//1), 0)
+        assert_equal(np.signbit((-x)//1), 1)
+
+    @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"),
+            reason="gh-22982")
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+    def test_floor_division_errors(self, dtype):
+        fnan = np.array(np.nan, dtype=dtype)
+        fone = np.array(1.0, dtype=dtype)
+        fzer = np.array(0.0, dtype=dtype)
+        finf = np.array(np.inf, dtype=dtype)
+        # divide by zero error check
+        with np.errstate(divide='raise', invalid='ignore'):
+            assert_raises(FloatingPointError, np.floor_divide, fone, fzer)
+        with np.errstate(divide='ignore', invalid='raise'):
+            np.floor_divide(fone, fzer)
+
+        # The following already contain a NaN and should not warn
+        with np.errstate(all='raise'):
+            np.floor_divide(fnan, fone)
+            np.floor_divide(fone, fnan)
+            np.floor_divide(fnan, fzer)
+            np.floor_divide(fzer, fnan)
+
+    @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+    def test_floor_division_corner_cases(self, dtype):
+        # test corner cases like 1.0//0.0 for errors and return vals
+        x = np.zeros(10, dtype=dtype)
+        y = np.ones(10, dtype=dtype)
+        fnan = np.array(np.nan, dtype=dtype)
+        fone = np.array(1.0, dtype=dtype)
+        fzer = np.array(0.0, dtype=dtype)
+        finf = np.array(np.inf, dtype=dtype)
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in floor_divide")
+            div = np.floor_divide(fnan, fone)
+            assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
+            div = np.floor_divide(fone, fnan)
+            assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
+            div = np.floor_divide(fnan, fzer)
+            assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
+        # verify 1.0//0.0 computations return inf
+        with np.errstate(divide='ignore'):
+            z = np.floor_divide(y, x)
+            assert_(np.isinf(z).all())
+
+def floor_divide_and_remainder(x, y):
+    return (np.floor_divide(x, y), np.remainder(x, y))
+
+
+def _signs(dt):
+    if dt in np.typecodes['UnsignedInteger']:
+        return (+1,)
+    else:
+        return (+1, -1)
+
+
+class TestRemainder:
+
+    def test_remainder_basic(self):
+        dt = np.typecodes['AllInteger'] + np.typecodes['Float']
+        for op in [floor_divide_and_remainder, np.divmod]:
+            for dt1, dt2 in itertools.product(dt, dt):
+                for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+                    fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+                    msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+                    a = np.array(sg1*71, dtype=dt1)
+                    b = np.array(sg2*19, dtype=dt2)
+                    div, rem = op(a, b)
+                    assert_equal(div*b + rem, a, err_msg=msg)
+                    if sg2 == -1:
+                        assert_(b < rem <= 0, msg)
+                    else:
+                        assert_(b > rem >= 0, msg)
+
+    def test_float_remainder_exact(self):
+        # test that float results are exact for small integers. This also
+        # holds for the same integers scaled by powers of two.
+        nlst = list(range(-127, 0))
+        plst = list(range(1, 128))
+        dividend = nlst + [0] + plst
+        divisor = nlst + plst
+        arg = list(itertools.product(dividend, divisor))
+        tgt = list(divmod(*t) for t in arg)
+
+        a, b = np.array(arg, dtype=int).T
+        # convert exact integer results from Python to float so that
+        # signed zero can be used, it is checked.
+        tgtdiv, tgtrem = np.array(tgt, dtype=float).T
+        tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
+        tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
+
+        for op in [floor_divide_and_remainder, np.divmod]:
+            for dt in np.typecodes['Float']:
+                msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+                fa = a.astype(dt)
+                fb = b.astype(dt)
+                div, rem = op(fa, fb)
+                assert_equal(div, tgtdiv, err_msg=msg)
+                assert_equal(rem, tgtrem, err_msg=msg)
+
+    def test_float_remainder_roundoff(self):
+        # gh-6127
+        dt = np.typecodes['Float']
+        for op in [floor_divide_and_remainder, np.divmod]:
+            for dt1, dt2 in itertools.product(dt, dt):
+                for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+                    fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+                    msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+                    a = np.array(sg1*78*6e-8, dtype=dt1)
+                    b = np.array(sg2*6e-8, dtype=dt2)
+                    div, rem = op(a, b)
+                    # Equal assertion should hold when fmod is used
+                    assert_equal(div*b + rem, a, err_msg=msg)
+                    if sg2 == -1:
+                        assert_(b < rem <= 0, msg)
+                    else:
+                        assert_(b > rem >= 0, msg)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.xfail(sys.platform.startswith("darwin"),
+            reason="MacOS seems to not give the correct 'invalid' warning for "
+                   "`fmod`.  Hopefully, others always do.")
+    @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+    def test_float_divmod_errors(self, dtype):
+        # Check valid errors raised for divmod and remainder
+        fzero = np.array(0.0, dtype=dtype)
+        fone = np.array(1.0, dtype=dtype)
+        finf = np.array(np.inf, dtype=dtype)
+        fnan = np.array(np.nan, dtype=dtype)
+        # since divmod is combination of both remainder and divide
+        # ops it will set both dividebyzero and invalid flags
+        with np.errstate(divide='raise', invalid='ignore'):
+            assert_raises(FloatingPointError, np.divmod, fone, fzero)
+        with np.errstate(divide='ignore', invalid='raise'):
+            assert_raises(FloatingPointError, np.divmod, fone, fzero)
+        with np.errstate(invalid='raise'):
+            assert_raises(FloatingPointError, np.divmod, fzero, fzero)
+        with np.errstate(invalid='raise'):
+            assert_raises(FloatingPointError, np.divmod, finf, finf)
+        with np.errstate(divide='ignore', invalid='raise'):
+            assert_raises(FloatingPointError, np.divmod, finf, fzero)
+        with np.errstate(divide='raise', invalid='ignore'):
+            # inf / 0 does not set any flags, only the modulo creates a NaN
+            np.divmod(finf, fzero)
+
+    @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"),
+            reason="gh-22982")
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.xfail(sys.platform.startswith("darwin"),
+           reason="MacOS seems to not give the correct 'invalid' warning for "
+                  "`fmod`.  Hopefully, others always do.")
+    @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+    @pytest.mark.parametrize('fn', [np.fmod, np.remainder])
+    def test_float_remainder_errors(self, dtype, fn):
+        fzero = np.array(0.0, dtype=dtype)
+        fone = np.array(1.0, dtype=dtype)
+        finf = np.array(np.inf, dtype=dtype)
+        fnan = np.array(np.nan, dtype=dtype)
+
+        # The following already contain a NaN and should not warn.
+        with np.errstate(all='raise'):
+            with pytest.raises(FloatingPointError,
+                    match="invalid value"):
+                fn(fone, fzero)
+            fn(fnan, fzero)
+            fn(fzero, fnan)
+            fn(fone, fnan)
+            fn(fnan, fone)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_float_remainder_overflow(self):
+        a = np.finfo(np.float64).tiny
+        with np.errstate(over='ignore', invalid='ignore'):
+            div, mod = np.divmod(4, a)
+            np.isinf(div)
+            assert_(mod == 0)
+        with np.errstate(over='raise', invalid='ignore'):
+            assert_raises(FloatingPointError, np.divmod, 4, a)
+        with np.errstate(invalid='raise', over='ignore'):
+            assert_raises(FloatingPointError, np.divmod, 4, a)
+
+    def test_float_divmod_corner_cases(self):
+        # check nan cases
+        for dt in np.typecodes['Float']:
+            fnan = np.array(np.nan, dtype=dt)
+            fone = np.array(1.0, dtype=dt)
+            fzer = np.array(0.0, dtype=dt)
+            finf = np.array(np.inf, dtype=dt)
+            with suppress_warnings() as sup:
+                sup.filter(RuntimeWarning, "invalid value encountered in divmod")
+                sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
+                div, rem = np.divmod(fone, fzer)
+                assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem)
+                assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+                div, rem = np.divmod(fzer, fzer)
+                assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+                assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
+                div, rem = np.divmod(finf, finf)
+                assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
+                assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+                div, rem = np.divmod(finf, fzer)
+                assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem)
+                assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+                div, rem = np.divmod(fnan, fone)
+                assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
+                assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
+                div, rem = np.divmod(fone, fnan)
+                assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
+                assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
+                div, rem = np.divmod(fnan, fzer)
+                assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
+                assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
+
+    def test_float_remainder_corner_cases(self):
+        # Check remainder magnitude.
+        for dt in np.typecodes['Float']:
+            fone = np.array(1.0, dtype=dt)
+            fzer = np.array(0.0, dtype=dt)
+            fnan = np.array(np.nan, dtype=dt)
+            b = np.array(1.0, dtype=dt)
+            a = np.nextafter(np.array(0.0, dtype=dt), -b)
+            rem = np.remainder(a, b)
+            assert_(rem <= b, 'dt: %s' % dt)
+            rem = np.remainder(-a, -b)
+            assert_(rem >= -b, 'dt: %s' % dt)
+
+        # Check nans, inf
+        with suppress_warnings() as sup:
+            sup.filter(RuntimeWarning, "invalid value encountered in remainder")
+            sup.filter(RuntimeWarning, "invalid value encountered in fmod")
+            for dt in np.typecodes['Float']:
+                fone = np.array(1.0, dtype=dt)
+                fzer = np.array(0.0, dtype=dt)
+                finf = np.array(np.inf, dtype=dt)
+                fnan = np.array(np.nan, dtype=dt)
+                rem = np.remainder(fone, fzer)
+                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+                # MSVC 2008 returns NaN here, so disable the check.
+                #rem = np.remainder(fone, finf)
+                #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
+                rem = np.remainder(finf, fone)
+                fmod = np.fmod(finf, fone)
+                assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+                rem = np.remainder(finf, finf)
+                fmod = np.fmod(finf, fone)
+                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+                assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+                rem = np.remainder(finf, fzer)
+                fmod = np.fmod(finf, fzer)
+                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+                assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+                rem = np.remainder(fone, fnan)
+                fmod = np.fmod(fone, fnan)
+                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+                assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+                rem = np.remainder(fnan, fzer)
+                fmod = np.fmod(fnan, fzer)
+                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+                assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
+                rem = np.remainder(fnan, fone)
+                fmod = np.fmod(fnan, fone)
+                assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+                assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
+
+
+class TestDivisionIntegerOverflowsAndDivideByZero:
+    result_type = namedtuple('result_type',
+            ['nocast', 'casted'])
+    helper_lambdas = {
+        'zero': lambda dtype: 0,
+        'min': lambda dtype: np.iinfo(dtype).min,
+        'neg_min': lambda dtype: -np.iinfo(dtype).min,
+        'min-zero': lambda dtype: (np.iinfo(dtype).min, 0),
+        'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0),
+    }
+    overflow_results = {
+        np.remainder: result_type(
+            helper_lambdas['zero'], helper_lambdas['zero']),
+        np.fmod: result_type(
+            helper_lambdas['zero'], helper_lambdas['zero']),
+        operator.mod: result_type(
+            helper_lambdas['zero'], helper_lambdas['zero']),
+        operator.floordiv: result_type(
+            helper_lambdas['min'], helper_lambdas['neg_min']),
+        np.floor_divide: result_type(
+            helper_lambdas['min'], helper_lambdas['neg_min']),
+        np.divmod: result_type(
+            helper_lambdas['min-zero'], helper_lambdas['neg_min-zero'])
+    }
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
+    def test_signed_division_overflow(self, dtype):
+        to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype)
+        for op1, op2, extractor, operand_identifier in to_check:
+            with pytest.warns(RuntimeWarning, match="overflow encountered"):
+                res = op1 // op2
+
+            assert res.dtype == op1.dtype
+            assert extractor(res) == np.iinfo(op1.dtype).min
+
+            # Remainder is well defined though, and does not warn:
+            res = op1 % op2
+            assert res.dtype == op1.dtype
+            assert extractor(res) == 0
+            # Check fmod as well:
+            res = np.fmod(op1, op2)
+            assert extractor(res) == 0
+
+            # Divmod warns for the division part:
+            with pytest.warns(RuntimeWarning, match="overflow encountered"):
+                res1, res2 = np.divmod(op1, op2)
+
+            assert res1.dtype == res2.dtype == op1.dtype
+            assert extractor(res1) == np.iinfo(op1.dtype).min
+            assert extractor(res2) == 0
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+    def test_divide_by_zero(self, dtype):
+        # Note that the return value cannot be well defined here, but NumPy
+        # currently uses 0 consistently.  This could be changed.
+        to_check = interesting_binop_operands(1, 0, dtype)
+        for op1, op2, extractor, operand_identifier in to_check:
+            with pytest.warns(RuntimeWarning, match="divide by zero"):
+                res = op1 // op2
+
+            assert res.dtype == op1.dtype
+            assert extractor(res) == 0
+
+            with pytest.warns(RuntimeWarning, match="divide by zero"):
+                res1, res2 = np.divmod(op1, op2)
+
+            assert res1.dtype == res2.dtype == op1.dtype
+            assert extractor(res1) == 0
+            assert extractor(res2) == 0
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize("dividend_dtype",
+            np.sctypes['int'])
+    @pytest.mark.parametrize("divisor_dtype",
+            np.sctypes['int'])
+    @pytest.mark.parametrize("operation",
+            [np.remainder, np.fmod, np.divmod, np.floor_divide,
+             operator.mod, operator.floordiv])
+    @np.errstate(divide='warn', over='warn')
+    def test_overflows(self, dividend_dtype, divisor_dtype, operation):
+        # SIMD tries to perform the operation on as many elements as possible
+        # that is a multiple of the register's size. We resort to the
+        # default implementation for the leftover elements.
+        # We try to cover all paths here.
+        arrays = [np.array([np.iinfo(dividend_dtype).min]*i,
+                           dtype=dividend_dtype) for i in range(1, 129)]
+        divisor = np.array([-1], dtype=divisor_dtype)
+        # If dividend is a larger type than the divisor (`else` case),
+        # then, result will be a larger type than dividend and will not
+        # result in an overflow for `divmod` and `floor_divide`.
+        if np.dtype(dividend_dtype).itemsize >= np.dtype(
+                divisor_dtype).itemsize and operation in (
+                        np.divmod, np.floor_divide, operator.floordiv):
+            with pytest.warns(
+                    RuntimeWarning,
+                    match="overflow encountered in"):
+                result = operation(
+                            dividend_dtype(np.iinfo(dividend_dtype).min),
+                            divisor_dtype(-1)
+                        )
+                assert result == self.overflow_results[operation].nocast(
+                        dividend_dtype)
+
+            # Arrays
+            for a in arrays:
+                # In case of divmod, we need to flatten the result
+                # column first as we get a column vector of quotient and
+                # remainder and a normal flatten of the expected result.
+                with pytest.warns(
+                        RuntimeWarning,
+                        match="overflow encountered in"):
+                    result = np.array(operation(a, divisor)).flatten('f')
+                    expected_array = np.array(
+                            [self.overflow_results[operation].nocast(
+                                dividend_dtype)]*len(a)).flatten()
+                    assert_array_equal(result, expected_array)
+        else:
+            # Scalars
+            result = operation(
+                        dividend_dtype(np.iinfo(dividend_dtype).min),
+                        divisor_dtype(-1)
+                    )
+            assert result == self.overflow_results[operation].casted(
+                    dividend_dtype)
+
+            # Arrays
+            for a in arrays:
+                # See above comment on flatten
+                result = np.array(operation(a, divisor)).flatten('f')
+                expected_array = np.array(
+                        [self.overflow_results[operation].casted(
+                            dividend_dtype)]*len(a)).flatten()
+                assert_array_equal(result, expected_array)
+
+
+class TestCbrt:
+    def test_cbrt_scalar(self):
+        assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
+
+    def test_cbrt(self):
+        x = np.array([1., 2., -3., np.inf, -np.inf])
+        assert_almost_equal(np.cbrt(x**3), x)
+
+        assert_(np.isnan(np.cbrt(np.nan)))
+        assert_equal(np.cbrt(np.inf), np.inf)
+        assert_equal(np.cbrt(-np.inf), -np.inf)
+
+
+class TestPower:
+    def test_power_float(self):
+        x = np.array([1., 2., 3.])
+        assert_equal(x**0, [1., 1., 1.])
+        assert_equal(x**1, x)
+        assert_equal(x**2, [1., 4., 9.])
+        y = x.copy()
+        y **= 2
+        assert_equal(y, [1., 4., 9.])
+        assert_almost_equal(x**(-1), [1., 0.5, 1./3])
+        assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
+
+        for out, inp, msg in _gen_alignment_data(dtype=np.float32,
+                                                 type='unary',
+                                                 max_size=11):
+            exp = [ncu.sqrt(i) for i in inp]
+            assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+            np.sqrt(inp, out=out)
+            assert_equal(out, exp, err_msg=msg)
+
+        for out, inp, msg in _gen_alignment_data(dtype=np.float64,
+                                                 type='unary',
+                                                 max_size=7):
+            exp = [ncu.sqrt(i) for i in inp]
+            assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+            np.sqrt(inp, out=out)
+            assert_equal(out, exp, err_msg=msg)
+
+    def test_power_complex(self):
+        x = np.array([1+2j, 2+3j, 3+4j])
+        assert_equal(x**0, [1., 1., 1.])
+        assert_equal(x**1, x)
+        assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
+        assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
+        assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
+        assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
+        assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
+        assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
+                                      (-117-44j)/15625])
+        assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
+                                       ncu.sqrt(3+4j)])
+        norm = 1./((x**14)[0])
+        assert_almost_equal(x**14 * norm,
+                [i * norm for i in [-76443+16124j, 23161315+58317492j,
+                                    5583548873 + 2465133864j]])
+
+        # Ticket #836
+        def assert_complex_equal(x, y):
+            assert_array_equal(x.real, y.real)
+            assert_array_equal(x.imag, y.imag)
+
+        for z in [complex(0, np.inf), complex(1, np.inf)]:
+            z = np.array([z], dtype=np.complex_)
+            with np.errstate(invalid="ignore"):
+                assert_complex_equal(z**1, z)
+                assert_complex_equal(z**2, z*z)
+                assert_complex_equal(z**3, z*z*z)
+
+    def test_power_zero(self):
+        # ticket #1271
+        zero = np.array([0j])
+        one = np.array([1+0j])
+        cnan = np.array([complex(np.nan, np.nan)])
+        # FIXME cinf not tested.
+        #cinf = np.array([complex(np.inf, 0)])
+
+        def assert_complex_equal(x, y):
+            x, y = np.asarray(x), np.asarray(y)
+            assert_array_equal(x.real, y.real)
+            assert_array_equal(x.imag, y.imag)
+
+        # positive powers
+        for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
+            assert_complex_equal(np.power(zero, p), zero)
+
+        # zero power
+        assert_complex_equal(np.power(zero, 0), one)
+        with np.errstate(invalid="ignore"):
+            assert_complex_equal(np.power(zero, 0+1j), cnan)
+
+            # negative power
+            for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
+                assert_complex_equal(np.power(zero, -p), cnan)
+            assert_complex_equal(np.power(zero, -1+0.2j), cnan)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_zero_power_nonzero(self):
+        # Testing 0^{Non-zero} issue 18378
+        zero = np.array([0.0+0.0j])
+        cnan = np.array([complex(np.nan, np.nan)])
+
+        def assert_complex_equal(x, y):
+            assert_array_equal(x.real, y.real)
+            assert_array_equal(x.imag, y.imag)
+
+        #Complex powers with positive real part will not generate a warning
+        assert_complex_equal(np.power(zero, 1+4j), zero)
+        assert_complex_equal(np.power(zero, 2-3j), zero)
+        #Testing zero values when real part is greater than zero
+        assert_complex_equal(np.power(zero, 1+1j), zero)
+        assert_complex_equal(np.power(zero, 1+0j), zero)
+        assert_complex_equal(np.power(zero, 1-1j), zero)
+        #Complex powers will negative real part or 0 (provided imaginary
+        # part is not zero) will generate a NAN and hence a RUNTIME warning
+        with pytest.warns(expected_warning=RuntimeWarning) as r:
+            assert_complex_equal(np.power(zero, -1+1j), cnan)
+            assert_complex_equal(np.power(zero, -2-3j), cnan)
+            assert_complex_equal(np.power(zero, -7+0j), cnan)
+            assert_complex_equal(np.power(zero, 0+1j), cnan)
+            assert_complex_equal(np.power(zero, 0-1j), cnan)
+        assert len(r) == 5
+
+    def test_fast_power(self):
+        x = np.array([1, 2, 3], np.int16)
+        res = x**2.0
+        assert_((x**2.00001).dtype is res.dtype)
+        assert_array_equal(res, [1, 4, 9])
+        # check the inplace operation on the casted copy doesn't mess with x
+        assert_(not np.may_share_memory(res, x))
+        assert_array_equal(x, [1, 2, 3])
+
+        # Check that the fast path ignores 1-element not 0-d arrays
+        res = x ** np.array([[[2]]])
+        assert_equal(res.shape, (1, 1, 3))
+
+    def test_integer_power(self):
+        a = np.array([15, 15], 'i8')
+        b = np.power(a, a)
+        assert_equal(b, [437893890380859375, 437893890380859375])
+
+    def test_integer_power_with_integer_zero_exponent(self):
+        dtypes = np.typecodes['Integer']
+        for dt in dtypes:
+            arr = np.arange(-10, 10, dtype=dt)
+            assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+        dtypes = np.typecodes['UnsignedInteger']
+        for dt in dtypes:
+            arr = np.arange(10, dtype=dt)
+            assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+    def test_integer_power_of_1(self):
+        dtypes = np.typecodes['AllInteger']
+        for dt in dtypes:
+            arr = np.arange(10, dtype=dt)
+            assert_equal(np.power(1, arr), np.ones_like(arr))
+
+    def test_integer_power_of_zero(self):
+        dtypes = np.typecodes['AllInteger']
+        for dt in dtypes:
+            arr = np.arange(1, 10, dtype=dt)
+            assert_equal(np.power(0, arr), np.zeros_like(arr))
+
+    def test_integer_to_negative_power(self):
+        dtypes = np.typecodes['Integer']
+        for dt in dtypes:
+            a = np.array([0, 1, 2, 3], dtype=dt)
+            b = np.array([0, 1, 2, -3], dtype=dt)
+            one = np.array(1, dtype=dt)
+            minusone = np.array(-1, dtype=dt)
+            assert_raises(ValueError, np.power, a, b)
+            assert_raises(ValueError, np.power, a, minusone)
+            assert_raises(ValueError, np.power, one, b)
+            assert_raises(ValueError, np.power, one, minusone)
+
+    def test_float_to_inf_power(self):
+        for dt in [np.float32, np.float64]:
+            a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt)
+            b = np.array([np.inf, -np.inf, np.inf, -np.inf,
+                                np.inf, -np.inf, np.inf, -np.inf], dt)
+            r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt)
+            assert_equal(np.power(a, b), r)
+
+
+class TestFloat_power:
+    def test_type_conversion(self):
+        arg_type = '?bhilBHILefdgFDG'
+        res_type = 'ddddddddddddgDDG'
+        for dtin, dtout in zip(arg_type, res_type):
+            msg = "dtin: %s, dtout: %s" % (dtin, dtout)
+            arg = np.ones(1, dtype=dtin)
+            res = np.float_power(arg, arg)
+            assert_(res.dtype.name == np.dtype(dtout).name, msg)
+
+
+class TestLog2:
+    @pytest.mark.parametrize('dt', ['f', 'd', 'g'])
+    def test_log2_values(self, dt):
+        x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+        y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+        xf = np.array(x, dtype=dt)
+        yf = np.array(y, dtype=dt)
+        assert_almost_equal(np.log2(xf), yf)
+
+    @pytest.mark.parametrize("i", range(1, 65))
+    def test_log2_ints(self, i):
+        # a good log2 implementation should provide this,
+        # might fail on OS with bad libm
+        v = np.log2(2.**i)
+        assert_equal(v, float(i), err_msg='at exponent %d' % i)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_log2_special(self):
+        assert_equal(np.log2(1.), 0.)
+        assert_equal(np.log2(np.inf), np.inf)
+        assert_(np.isnan(np.log2(np.nan)))
+
+        with warnings.catch_warnings(record=True) as w:
+            warnings.filterwarnings('always', '', RuntimeWarning)
+            assert_(np.isnan(np.log2(-1.)))
+            assert_(np.isnan(np.log2(-np.inf)))
+            assert_equal(np.log2(0.), -np.inf)
+            assert_(w[0].category is RuntimeWarning)
+            assert_(w[1].category is RuntimeWarning)
+            assert_(w[2].category is RuntimeWarning)
+
+
+class TestExp2:
+    def test_exp2_values(self):
+        x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+        y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+        for dt in ['f', 'd', 'g']:
+            xf = np.array(x, dtype=dt)
+            yf = np.array(y, dtype=dt)
+            assert_almost_equal(np.exp2(yf), xf)
+
+
+class TestLogAddExp2(_FilterInvalids):
+    # Need test for intermediate precisions
+    def test_logaddexp2_values(self):
+        x = [1, 2, 3, 4, 5]
+        y = [5, 4, 3, 2, 1]
+        z = [6, 6, 6, 6, 6]
+        for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
+            xf = np.log2(np.array(x, dtype=dt))
+            yf = np.log2(np.array(y, dtype=dt))
+            zf = np.log2(np.array(z, dtype=dt))
+            assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
+
+    def test_logaddexp2_range(self):
+        x = [1000000, -1000000, 1000200, -1000200]
+        y = [1000200, -1000200, 1000000, -1000000]
+        z = [1000200, -1000000, 1000200, -1000000]
+        for dt in ['f', 'd', 'g']:
+            logxf = np.array(x, dtype=dt)
+            logyf = np.array(y, dtype=dt)
+            logzf = np.array(z, dtype=dt)
+            assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
+
+    def test_inf(self):
+        inf = np.inf
+        x = [inf, -inf,  inf, -inf, inf, 1,  -inf,  1]
+        y = [inf,  inf, -inf, -inf, 1,   inf, 1,   -inf]
+        z = [inf,  inf,  inf, -inf, inf, inf, 1,    1]
+        with np.errstate(invalid='raise'):
+            for dt in ['f', 'd', 'g']:
+                logxf = np.array(x, dtype=dt)
+                logyf = np.array(y, dtype=dt)
+                logzf = np.array(z, dtype=dt)
+                assert_equal(np.logaddexp2(logxf, logyf), logzf)
+
+    def test_nan(self):
+        assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
+        assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
+        assert_(np.isnan(np.logaddexp2(np.nan, 0)))
+        assert_(np.isnan(np.logaddexp2(0, np.nan)))
+        assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
+
+    def test_reduce(self):
+        assert_equal(np.logaddexp2.identity, -np.inf)
+        assert_equal(np.logaddexp2.reduce([]), -np.inf)
+        assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)
+        assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)
+
+
+class TestLog:
+    def test_log_values(self):
+        x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+        y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+        for dt in ['f', 'd', 'g']:
+            log2_ = 0.69314718055994530943
+            xf = np.array(x, dtype=dt)
+            yf = np.array(y, dtype=dt)*log2_
+            assert_almost_equal(np.log(xf), yf)
+
+        # test aliasing(issue #17761)
+        x = np.array([2, 0.937500, 3, 0.947500, 1.054697])
+        xf = np.log(x)
+        assert_almost_equal(np.log(x, out=x), xf)
+
+        # test log() of max for dtype does not raise
+        for dt in ['f', 'd', 'g']:
+            try:
+                with np.errstate(all='raise'):
+                    x = np.finfo(dt).max
+                    np.log(x)
+            except FloatingPointError as exc:
+                if dt == 'g' and IS_MUSL:
+                    # FloatingPointError is known to occur on longdouble
+                    # for musllinux_x86_64 x is very large
+                    pytest.skip(
+                        "Overflow has occurred for"
+                        " np.log(np.finfo(np.longdouble).max)"
+                    )
+                else:
+                    raise exc
+
+    def test_log_strides(self):
+        np.random.seed(42)
+        strides = np.array([-4,-3,-2,-1,1,2,3,4])
+        sizes = np.arange(2,100)
+        for ii in sizes:
+            x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii))
+            x_special = x_f64.copy()
+            x_special[3:-1:4] = 1.0
+            y_true = np.log(x_f64)
+            y_special = np.log(x_special)
+            for jj in strides:
+                assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2)
+                assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2)
+
+class TestExp:
+    def test_exp_values(self):
+        x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+        y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+        for dt in ['f', 'd', 'g']:
+            log2_ = 0.69314718055994530943
+            xf = np.array(x, dtype=dt)
+            yf = np.array(y, dtype=dt)*log2_
+            assert_almost_equal(np.exp(yf), xf)
+
+    def test_exp_strides(self):
+        np.random.seed(42)
+        strides = np.array([-4,-3,-2,-1,1,2,3,4])
+        sizes = np.arange(2,100)
+        for ii in sizes:
+            x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii))
+            y_true = np.exp(x_f64)
+            for jj in strides:
+                assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2)
+
+class TestSpecialFloats:
+    def test_exp_values(self):
+        with np.errstate(under='raise', over='raise'):
+            x = [np.nan,  np.nan, np.inf, 0.]
+            y = [np.nan, -np.nan, np.inf, -np.inf]
+            for dt in ['e', 'f', 'd', 'g']:
+                xf = np.array(x, dtype=dt)
+                yf = np.array(y, dtype=dt)
+                assert_equal(np.exp(yf), xf)
+
+    # See: https://github.com/numpy/numpy/issues/19192
+    @pytest.mark.xfail(
+        _glibc_older_than("2.17"),
+        reason="Older glibc versions may not raise appropriate FP exceptions"
+    )
+    def test_exp_exceptions(self):
+        with np.errstate(over='raise'):
+            assert_raises(FloatingPointError, np.exp, np.float16(11.0899))
+            assert_raises(FloatingPointError, np.exp, np.float32(100.))
+            assert_raises(FloatingPointError, np.exp, np.float32(1E19))
+            assert_raises(FloatingPointError, np.exp, np.float64(800.))
+            assert_raises(FloatingPointError, np.exp, np.float64(1E19))
+
+        with np.errstate(under='raise'):
+            assert_raises(FloatingPointError, np.exp, np.float16(-17.5))
+            assert_raises(FloatingPointError, np.exp, np.float32(-1000.))
+            assert_raises(FloatingPointError, np.exp, np.float32(-1E19))
+            assert_raises(FloatingPointError, np.exp, np.float64(-1000.))
+            assert_raises(FloatingPointError, np.exp, np.float64(-1E19))
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_log_values(self):
+        with np.errstate(all='ignore'):
+            x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan]
+            y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0]
+            y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0]
+            for dt in ['e', 'f', 'd', 'g']:
+                xf = np.array(x, dtype=dt)
+                yf = np.array(y, dtype=dt)
+                yf1p = np.array(y1p, dtype=dt)
+                assert_equal(np.log(yf), xf)
+                assert_equal(np.log2(yf), xf)
+                assert_equal(np.log10(yf), xf)
+                assert_equal(np.log1p(yf1p), xf)
+
+        with np.errstate(divide='raise'):
+            for dt in ['e', 'f', 'd']:
+                assert_raises(FloatingPointError, np.log,
+                              np.array(0.0, dtype=dt))
+                assert_raises(FloatingPointError, np.log2,
+                              np.array(0.0, dtype=dt))
+                assert_raises(FloatingPointError, np.log10,
+                              np.array(0.0, dtype=dt))
+                assert_raises(FloatingPointError, np.log1p,
+                              np.array(-1.0, dtype=dt))
+
+        with np.errstate(invalid='raise'):
+            for dt in ['e', 'f', 'd']:
+                assert_raises(FloatingPointError, np.log,
+                              np.array(-np.inf, dtype=dt))
+                assert_raises(FloatingPointError, np.log,
+                              np.array(-1.0, dtype=dt))
+                assert_raises(FloatingPointError, np.log2,
+                              np.array(-np.inf, dtype=dt))
+                assert_raises(FloatingPointError, np.log2,
+                              np.array(-1.0, dtype=dt))
+                assert_raises(FloatingPointError, np.log10,
+                              np.array(-np.inf, dtype=dt))
+                assert_raises(FloatingPointError, np.log10,
+                              np.array(-1.0, dtype=dt))
+                assert_raises(FloatingPointError, np.log1p,
+                              np.array(-np.inf, dtype=dt))
+                assert_raises(FloatingPointError, np.log1p,
+                              np.array(-2.0, dtype=dt))
+
+        # See https://github.com/numpy/numpy/issues/18005
+        with assert_no_warnings():
+            a = np.array(1e9, dtype='float32')
+            np.log(a)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize('dtype', ['e', 'f', 'd', 'g'])
+    def test_sincos_values(self, dtype):
+        with np.errstate(all='ignore'):
+            x = [np.nan, np.nan, np.nan, np.nan]
+            y = [np.nan, -np.nan, np.inf, -np.inf]
+            xf = np.array(x, dtype=dtype)
+            yf = np.array(y, dtype=dtype)
+            assert_equal(np.sin(yf), xf)
+            assert_equal(np.cos(yf), xf)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.xfail(
+        sys.platform.startswith("darwin"),
+        reason="underflow is triggered for scalar 'sin'"
+    )
+    def test_sincos_underflow(self):
+        with np.errstate(under='raise'):
+            underflow_trigger = np.array(
+                float.fromhex("0x1.f37f47a03f82ap-511"),
+                dtype=np.float64
+            )
+            np.sin(underflow_trigger)
+            np.cos(underflow_trigger)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.parametrize('callable', [np.sin, np.cos])
+    @pytest.mark.parametrize('dtype', ['e', 'f', 'd'])
+    @pytest.mark.parametrize('value', [np.inf, -np.inf])
+    def test_sincos_errors(self, callable, dtype, value):
+        with np.errstate(invalid='raise'):
+            assert_raises(FloatingPointError, callable,
+                np.array([value], dtype=dtype))
+
+    @pytest.mark.parametrize('callable', [np.sin, np.cos])
+    @pytest.mark.parametrize('dtype', ['f', 'd'])
+    @pytest.mark.parametrize('stride', [-1, 1, 2, 4, 5])
+    def test_sincos_overlaps(self, callable, dtype, stride):
+        N = 100
+        M = N // abs(stride)
+        rng = np.random.default_rng(42)
+        x = rng.standard_normal(N, dtype)
+        y = callable(x[::stride])
+        callable(x[::stride], out=x[:M])
+        assert_equal(x[:M], y)
+
+    @pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g'])
+    def test_sqrt_values(self, dt):
+        with np.errstate(all='ignore'):
+            x = [np.nan, np.nan, np.inf, np.nan, 0.]
+            y = [np.nan, -np.nan, np.inf, -np.inf, 0.]
+            xf = np.array(x, dtype=dt)
+            yf = np.array(y, dtype=dt)
+            assert_equal(np.sqrt(yf), xf)
+
+        # with np.errstate(invalid='raise'):
+        #     assert_raises(
+        #         FloatingPointError, np.sqrt, np.array(-100., dtype=dt)
+        #     )
+
+    def test_abs_values(self):
+        x = [np.nan,  np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]
+        y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]
+        for dt in ['e', 'f', 'd', 'g']:
+            xf = np.array(x, dtype=dt)
+            yf = np.array(y, dtype=dt)
+            assert_equal(np.abs(yf), xf)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_square_values(self):
+        x = [np.nan,  np.nan, np.inf, np.inf]
+        y = [np.nan, -np.nan, np.inf, -np.inf]
+        with np.errstate(all='ignore'):
+            for dt in ['e', 'f', 'd', 'g']:
+                xf = np.array(x, dtype=dt)
+                yf = np.array(y, dtype=dt)
+                assert_equal(np.square(yf), xf)
+
+        with np.errstate(over='raise'):
+            assert_raises(FloatingPointError, np.square,
+                          np.array(1E3, dtype='e'))
+            assert_raises(FloatingPointError, np.square,
+                          np.array(1E32, dtype='f'))
+            assert_raises(FloatingPointError, np.square,
+                          np.array(1E200, dtype='d'))
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_reciprocal_values(self):
+        with np.errstate(all='ignore'):
+            x = [np.nan,  np.nan, 0.0, -0.0, np.inf, -np.inf]
+            y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]
+            for dt in ['e', 'f', 'd', 'g']:
+                xf = np.array(x, dtype=dt)
+                yf = np.array(y, dtype=dt)
+                assert_equal(np.reciprocal(yf), xf)
+
+        with np.errstate(divide='raise'):
+            for dt in ['e', 'f', 'd', 'g']:
+                assert_raises(FloatingPointError, np.reciprocal,
+                              np.array(-0.0, dtype=dt))
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_tan(self):
+        with np.errstate(all='ignore'):
+            in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf]
+            out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan]
+            for dt in ['e', 'f', 'd']:
+                in_arr = np.array(in_, dtype=dt)
+                out_arr = np.array(out, dtype=dt)
+                assert_equal(np.tan(in_arr), out_arr)
+
+        with np.errstate(invalid='raise'):
+            for dt in ['e', 'f', 'd']:
+                assert_raises(FloatingPointError, np.tan,
+                              np.array(np.inf, dtype=dt))
+                assert_raises(FloatingPointError, np.tan,
+                              np.array(-np.inf, dtype=dt))
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_arcsincos(self):
+        with np.errstate(all='ignore'):
+            in_ = [np.nan, -np.nan, np.inf, -np.inf]
+            out = [np.nan, np.nan, np.nan, np.nan]
+            for dt in ['e', 'f', 'd']:
+                in_arr = np.array(in_, dtype=dt)
+                out_arr = np.array(out, dtype=dt)
+                assert_equal(np.arcsin(in_arr), out_arr)
+                assert_equal(np.arccos(in_arr), out_arr)
+
+        for callable in [np.arcsin, np.arccos]:
+            for value in [np.inf, -np.inf, 2.0, -2.0]:
+                for dt in ['e', 'f', 'd']:
+                    with np.errstate(invalid='raise'):
+                        assert_raises(FloatingPointError, callable,
+                                      np.array(value, dtype=dt))
+
+    def test_arctan(self):
+        with np.errstate(all='ignore'):
+            in_ = [np.nan, -np.nan]
+            out = [np.nan, np.nan]
+            for dt in ['e', 'f', 'd']:
+                in_arr = np.array(in_, dtype=dt)
+                out_arr = np.array(out, dtype=dt)
+                assert_equal(np.arctan(in_arr), out_arr)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_sinh(self):
+        in_ = [np.nan, -np.nan, np.inf, -np.inf]
+        out = [np.nan, np.nan, np.inf, -np.inf]
+        for dt in ['e', 'f', 'd']:
+            in_arr = np.array(in_, dtype=dt)
+            out_arr = np.array(out, dtype=dt)
+            assert_equal(np.sinh(in_arr), out_arr)
+
+        with np.errstate(over='raise'):
+            assert_raises(FloatingPointError, np.sinh,
+                          np.array(12.0, dtype='e'))
+            assert_raises(FloatingPointError, np.sinh,
+                          np.array(120.0, dtype='f'))
+            assert_raises(FloatingPointError, np.sinh,
+                          np.array(1200.0, dtype='d'))
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    @pytest.mark.skipif('bsd' in sys.platform,
+            reason="fallback implementation may not raise, see gh-2487")
+    def test_cosh(self):
+        in_ = [np.nan, -np.nan, np.inf, -np.inf]
+        out = [np.nan, np.nan, np.inf, np.inf]
+        for dt in ['e', 'f', 'd']:
+            in_arr = np.array(in_, dtype=dt)
+            out_arr = np.array(out, dtype=dt)
+            assert_equal(np.cosh(in_arr), out_arr)
+
+        with np.errstate(over='raise'):
+            assert_raises(FloatingPointError, np.cosh,
+                          np.array(12.0, dtype='e'))
+            assert_raises(FloatingPointError, np.cosh,
+                          np.array(120.0, dtype='f'))
+            assert_raises(FloatingPointError, np.cosh,
+                          np.array(1200.0, dtype='d'))
+
+    def test_tanh(self):
+        in_ = [np.nan, -np.nan, np.inf, -np.inf]
+        out = [np.nan, np.nan, 1.0, -1.0]
+        for dt in ['e', 'f', 'd']:
+            in_arr = np.array(in_, dtype=dt)
+            out_arr = np.array(out, dtype=dt)
+            assert_equal(np.tanh(in_arr), out_arr)
+
+    def test_arcsinh(self):
+        in_ = [np.nan, -np.nan, np.inf, -np.inf]
+        out = [np.nan, np.nan, np.inf, -np.inf]
+        for dt in ['e', 'f', 'd']:
+            in_arr = np.array(in_, dtype=dt)
+            out_arr = np.array(out, dtype=dt)
+            assert_equal(np.arcsinh(in_arr), out_arr)
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_arccosh(self):
+        with np.errstate(all='ignore'):
+            in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0]
+            out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan]
+            for dt in ['e', 'f', 'd']:
+                in_arr = np.array(in_, dtype=dt)
+                out_arr = np.array(out, dtype=dt)
+                assert_equal(np.arccosh(in_arr), out_arr)
+
+        for value in [0.0, -np.inf]:
+            with np.errstate(invalid='raise'):
+                for dt in ['e', 'f', 'd']:
+                    assert_raises(FloatingPointError, np.arccosh,
+                                  np.array(value, dtype=dt))
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_arctanh(self):
+        with np.errstate(all='ignore'):
+            in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0]
+            out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan]
+            for dt in ['e', 'f', 'd']:
+                in_arr = np.array(in_, dtype=dt)
+                out_arr = np.array(out, dtype=dt)
+                assert_equal(np.arctanh(in_arr), out_arr)
+
+        for value in [1.01, np.inf, -np.inf, 1.0, -1.0]:
+            with np.errstate(invalid='raise', divide='raise'):
+                for dt in ['e', 'f', 'd']:
+                    assert_raises(FloatingPointError, np.arctanh,
+                                  np.array(value, dtype=dt))
+
+        # Make sure glibc < 2.18 atanh is not used, issue 25087
+        assert np.signbit(np.arctanh(-1j).real)
+
+    # See: https://github.com/numpy/numpy/issues/20448
+    @pytest.mark.xfail(
+        _glibc_older_than("2.17"),
+        reason="Older glibc versions may not raise appropriate FP exceptions"
+    )
+    def test_exp2(self):
+        with np.errstate(all='ignore'):
+            in_ = [np.nan, -np.nan, np.inf, -np.inf]
+            out = [np.nan, np.nan, np.inf, 0.0]
+            for dt in ['e', 'f', 'd']:
+                in_arr = np.array(in_, dtype=dt)
+                out_arr = np.array(out, dtype=dt)
+                assert_equal(np.exp2(in_arr), out_arr)
+
+        for value in [2000.0, -2000.0]:
+            with np.errstate(over='raise', under='raise'):
+                for dt in ['e', 'f', 'd']:
+                    assert_raises(FloatingPointError, np.exp2,
+                                  np.array(value, dtype=dt))
+
+    @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+    def test_expm1(self):
+        with np.errstate(all='ignore'):
+            in_ = [np.nan, -np.nan, np.inf, -np.inf]
+            out = [np.nan, np.nan, np.inf, -1.0]
+            for dt in ['e', 'f', 'd']:
+                in_arr = np.array(in_, dtype=dt)
+                out_arr = np.array(out, dtype=dt)
+                assert_equal(np.expm1(in_arr), out_arr)
+
+        for value in [200.0, 2000.0]:
+            with np.errstate(over='raise'):
+                for dt in ['e', 'f']:
+                    assert_raises(FloatingPointError, np.expm1,
+                                  np.array(value, dtype=dt))
+
+    # test to ensure no spurious FP exceptions are raised due to SIMD
+    INF_INVALID_ERR = [
+        np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh
+    ]
+    NEG_INVALID_ERR = [
+        np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh,
+        np.arctanh
+    ]
+    ONE_INVALID_ERR = [
+        np.arctanh,
+    ]
+    LTONE_INVALID_ERR = [
+        np.arccosh,
+    ]
+    BYZERO_ERR = [
+        np.log, np.log2, np.log10, np.reciprocal, np.arccosh
+    ]
+
+    @pytest.mark.skipif(sys.platform == "win32" and sys.maxsize < 2**31 + 1,
+                        reason='failures on 32-bit Python, see FIXME below')
+    @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP)
+    @pytest.mark.parametrize("dtype", ('e', 'f', 'd'))
+    @pytest.mark.parametrize("data, escape", (
+        ([0.03], LTONE_INVALID_ERR),
+        ([0.03]*32, LTONE_INVALID_ERR),
+        # neg
+        ([-1.0], NEG_INVALID_ERR),
+        ([-1.0]*32, NEG_INVALID_ERR),
+        # flat
+        ([1.0], ONE_INVALID_ERR),
+        ([1.0]*32, ONE_INVALID_ERR),
+        # zero
+        ([0.0], BYZERO_ERR),
+        ([0.0]*32, BYZERO_ERR),
+        ([-0.0], BYZERO_ERR),
+        ([-0.0]*32, BYZERO_ERR),
+        # nan
+        ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR),
+        ([0.5, 0.5, 0.5, np.nan]*32, LTONE_INVALID_ERR),
+        ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR),
+        ([np.nan, 1.0, 1.0, 1.0]*32, ONE_INVALID_ERR),
+        ([np.nan], []),
+        ([np.nan]*32, []),
+        # inf
+        ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR),
+        ([0.5, 0.5, 0.5, np.inf]*32, INF_INVALID_ERR + LTONE_INVALID_ERR),
+        ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR),
+        ([np.inf, 1.0, 1.0, 1.0]*32, INF_INVALID_ERR),
+        ([np.inf], INF_INVALID_ERR),
+        ([np.inf]*32, INF_INVALID_ERR),
+        # ninf
+        ([0.5, 0.5, 0.5, -np.inf],
+         NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR),
+        ([0.5, 0.5, 0.5, -np.inf]*32,
+         NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR),
+        ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR),
+        ([-np.inf, 1.0, 1.0, 1.0]*32, NEG_INVALID_ERR + INF_INVALID_ERR),
+        ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR),
+        ([-np.inf]*32, NEG_INVALID_ERR + INF_INVALID_ERR),
+    ))
+    def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape):
+        if escape and ufunc in escape:
+            return
+        # FIXME: NAN raises FP invalid exception:
+        #  - ceil/float16 on MSVC:32-bit
+        #  - spacing/float16 on almost all platforms
+        # FIXME: skipped on MSVC:32-bit during switch to Meson, 10 cases fail
+        #        when SIMD support not present / disabled
+        if ufunc in (np.spacing, np.ceil) and dtype == 'e':
+            return
+        array = np.array(data, dtype=dtype)
+        with assert_no_warnings():
+            ufunc(array)
+
+    @pytest.mark.parametrize("dtype", ('e', 'f', 'd'))
+    def test_divide_spurious_fpexception(self, dtype):
+        dt = np.dtype(dtype)
+        dt_info = np.finfo(dt)
+        subnorm = dt_info.smallest_subnormal
+        # Verify a bug fix caused due to filling the remaining lanes of the
+        # partially loaded dividend SIMD vector with ones, which leads to
+        # raising an overflow warning when the divisor is denormal.
+        # see https://github.com/numpy/numpy/issues/25097
+        with assert_no_warnings():
+            np.zeros(128 + 1, dtype=dt) / subnorm
+
+class TestFPClass:
+    @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1,
+                                2, 4, 5, 6, 7, 8, 9, 10])
+    def test_fpclass(self, stride):
+        arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d')
+        arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f')
+        nan     = np.array([True, True, False, False, False, False, False, False, False, False])
+        inf     = np.array([False, False, True, True, False, False, False, False, False, False])
+        sign    = np.array([False, True, False, True, True, False, True, False, False, True])
+        finite  = np.array([False, False, False, False, True, True, True, True, True, True])
+        assert_equal(np.isnan(arr_f32[::stride]), nan[::stride])
+        assert_equal(np.isnan(arr_f64[::stride]), nan[::stride])
+        assert_equal(np.isinf(arr_f32[::stride]), inf[::stride])
+        assert_equal(np.isinf(arr_f64[::stride]), inf[::stride])
+        assert_equal(np.signbit(arr_f32[::stride]), sign[::stride])
+        assert_equal(np.signbit(arr_f64[::stride]), sign[::stride])
+        assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride])
+        assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride])
+
+    @pytest.mark.parametrize("dtype", ['d', 'f'])
+    def test_fp_noncontiguous(self, dtype):
+        data = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0,
+                            1.0, -0.0, 0.0, 2.2251e-308,
+                            -2.2251e-308], dtype=dtype)
+        nan = np.array([True, True, False, False, False, False,
+                            False, False, False, False])
+        inf = np.array([False, False, True, True, False, False,
+                            False, False, False, False])
+        sign = np.array([False, True, False, True, True, False,
+                            True, False, False, True])
+        finite = np.array([False, False, False, False, True, True,
+                            True, True, True, True])
+        out = np.ndarray(data.shape, dtype='bool')
+        ncontig_in = data[1::3]
+        ncontig_out = out[1::3]
+        contig_in = np.array(ncontig_in)
+        assert_equal(ncontig_in.flags.c_contiguous, False)
+        assert_equal(ncontig_out.flags.c_contiguous, False)
+        assert_equal(contig_in.flags.c_contiguous, True)
+        # ncontig in, ncontig out
+        assert_equal(np.isnan(ncontig_in, out=ncontig_out), nan[1::3])
+        assert_equal(np.isinf(ncontig_in, out=ncontig_out), inf[1::3])
+        assert_equal(np.signbit(ncontig_in, out=ncontig_out), sign[1::3])
+        assert_equal(np.isfinite(ncontig_in, out=ncontig_out), finite[1::3])
+        # contig in, ncontig out
+        assert_equal(np.isnan(contig_in, out=ncontig_out), nan[1::3])
+        assert_equal(np.isinf(contig_in, out=ncontig_out), inf[1::3])
+        assert_equal(np.signbit(contig_in, out=ncontig_out), sign[1::3])
+        assert_equal(np.isfinite(contig_in, out=ncontig_out), finite[1::3])
+        # ncontig in, contig out
+        assert_equal(np.isnan(ncontig_in), nan[1::3])
+        assert_equal(np.isinf(ncontig_in), inf[1::3])
+        assert_equal(np.signbit(ncontig_in), sign[1::3])
+        assert_equal(np.isfinite(ncontig_in), finite[1::3])
+        # contig in, contig out, nd stride
+        data_split = np.array(np.array_split(data, 2))
+        nan_split = np.array(np.array_split(nan, 2))
+        inf_split = np.array(np.array_split(inf, 2))
+        sign_split = np.array(np.array_split(sign, 2))
+        finite_split = np.array(np.array_split(finite, 2))
+        assert_equal(np.isnan(data_split), nan_split)
+        assert_equal(np.isinf(data_split), inf_split)
+        assert_equal(np.signbit(data_split), sign_split)
+        assert_equal(np.isfinite(data_split), finite_split)
+
+class TestLDExp:
+    @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+    @pytest.mark.parametrize("dtype", ['f', 'd'])
+    def test_ldexp(self, dtype, stride):
+        mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype)
+        exp  = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i')
+        out  = np.zeros(8, dtype=dtype)
+        assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride])
+        assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride])
+
+class TestFRExp:
+    @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+    @pytest.mark.parametrize("dtype", ['f', 'd'])
+    @pytest.mark.xfail(IS_MUSL, reason="gh23048")
+    @pytest.mark.skipif(not sys.platform.startswith('linux'),
+                        reason="np.frexp gives different answers for NAN/INF on windows and linux")
+    def test_frexp(self, dtype, stride):
+        arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype)
+        mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype)
+        exp_true  = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i')
+        out_mant  = np.ones(8, dtype=dtype)
+        out_exp   = 2*np.ones(8, dtype='i')
+        mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride]))
+        assert_equal(mant_true[::stride], mant)
+        assert_equal(exp_true[::stride], exp)
+        assert_equal(out_mant[::stride], mant_true[::stride])
+        assert_equal(out_exp[::stride], exp_true[::stride])
+
+# func : [maxulperror, low, high]
+avx_ufuncs = {'sqrt'        :[1,  0.,   100.],
+              'absolute'    :[0, -100., 100.],
+              'reciprocal'  :[1,  1.,   100.],
+              'square'      :[1, -100., 100.],
+              'rint'        :[0, -100., 100.],
+              'floor'       :[0, -100., 100.],
+              'ceil'        :[0, -100., 100.],
+              'trunc'       :[0, -100., 100.]}
+
+class TestAVXUfuncs:
+    def test_avx_based_ufunc(self):
+        strides = np.array([-4,-3,-2,-1,1,2,3,4])
+        np.random.seed(42)
+        for func, prop in avx_ufuncs.items():
+            maxulperr = prop[0]
+            minval = prop[1]
+            maxval = prop[2]
+            # various array sizes to ensure masking in AVX is tested
+            for size in range(1,32):
+                myfunc = getattr(np, func)
+                x_f32 = np.float32(np.random.uniform(low=minval, high=maxval,
+                    size=size))
+                x_f64 = np.float64(x_f32)
+                x_f128 = np.longdouble(x_f32)
+                y_true128 = myfunc(x_f128)
+                if maxulperr == 0:
+                    assert_equal(myfunc(x_f32), np.float32(y_true128))
+                    assert_equal(myfunc(x_f64), np.float64(y_true128))
+                else:
+                    assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128),
+                            maxulp=maxulperr)
+                    assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128),
+                            maxulp=maxulperr)
+                # various strides to test gather instruction
+                if size > 1:
+                    y_true32 = myfunc(x_f32)
+                    y_true64 = myfunc(x_f64)
+                    for jj in strides:
+                        assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
+                        assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
+
+class TestAVXFloat32Transcendental:
+    def test_exp_float32(self):
+        np.random.seed(42)
+        x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
+        x_f64 = np.float64(x_f32)
+        assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
+
+    def test_log_float32(self):
+        np.random.seed(42)
+        x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
+        x_f64 = np.float64(x_f32)
+        assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
+
+    def test_sincos_float32(self):
+        np.random.seed(42)
+        N = 1000000
+        M = np.int_(N/20)
+        index = np.random.randint(low=0, high=N, size=M)
+        x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
+        if not _glibc_older_than("2.17"):
+            # test coverage for elements > 117435.992f for which glibc is used
+            # this is known to be problematic on old glibc, so skip it there
+            x_f32[index] = np.float32(10E+10*np.random.rand(M))
+        x_f64 = np.float64(x_f32)
+        assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
+        assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
+        # test aliasing(issue #17761)
+        tx_f32 = x_f32.copy()
+        assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2)
+        assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2)
+
+    def test_strided_float32(self):
+        np.random.seed(42)
+        strides = np.array([-4,-3,-2,-1,1,2,3,4])
+        sizes = np.arange(2,100)
+        for ii in sizes:
+            x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
+            x_f32_large = x_f32.copy()
+            x_f32_large[3:-1:4] = 120000.0
+            exp_true = np.exp(x_f32)
+            log_true = np.log(x_f32)
+            sin_true = np.sin(x_f32_large)
+            cos_true = np.cos(x_f32_large)
+            for jj in strides:
+                assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
+                assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
+                assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)
+                assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)
+
+class TestLogAddExp(_FilterInvalids):
+    def test_logaddexp_values(self):
+        x = [1, 2, 3, 4, 5]
+        y = [5, 4, 3, 2, 1]
+        z = [6, 6, 6, 6, 6]
+        for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
+            xf = np.log(np.array(x, dtype=dt))
+            yf = np.log(np.array(y, dtype=dt))
+            zf = np.log(np.array(z, dtype=dt))
+            assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
+
+    def test_logaddexp_range(self):
+        x = [1000000, -1000000, 1000200, -1000200]
+        y = [1000200, -1000200, 1000000, -1000000]
+        z = [1000200, -1000000, 1000200, -1000000]
+        for dt in ['f', 'd', 'g']:
+            logxf = np.array(x, dtype=dt)
+            logyf = np.array(y, dtype=dt)
+            logzf = np.array(z, dtype=dt)
+            assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
+
+    def test_inf(self):
+        inf = np.inf
+        x = [inf, -inf,  inf, -inf, inf, 1,  -inf,  1]
+        y = [inf,  inf, -inf, -inf, 1,   inf, 1,   -inf]
+        z = [inf,  inf,  inf, -inf, inf, inf, 1,    1]
+        with np.errstate(invalid='raise'):
+            for dt in ['f', 'd', 'g']:
+                logxf = np.array(x, dtype=dt)
+                logyf = np.array(y, dtype=dt)
+                logzf = np.array(z, dtype=dt)
+                assert_equal(np.logaddexp(logxf, logyf), logzf)
+
+    def test_nan(self):
+        assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
+        assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
+        assert_(np.isnan(np.logaddexp(np.nan, 0)))
+        assert_(np.isnan(np.logaddexp(0, np.nan)))
+        assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
+
+    def test_reduce(self):
+        assert_equal(np.logaddexp.identity, -np.inf)
+        assert_equal(np.logaddexp.reduce([]), -np.inf)
+
+
+class TestLog1p:
+    def test_log1p(self):
+        assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
+        assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
+
+    def test_special(self):
+        with np.errstate(invalid="ignore", divide="ignore"):
+            assert_equal(ncu.log1p(np.nan), np.nan)
+            assert_equal(ncu.log1p(np.inf), np.inf)
+            assert_equal(ncu.log1p(-1.), -np.inf)
+            assert_equal(ncu.log1p(-2.), np.nan)
+            assert_equal(ncu.log1p(-np.inf), np.nan)
+
+
+class TestExpm1:
+    def test_expm1(self):
+        assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
+        assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
+
+    def test_special(self):
+        assert_equal(ncu.expm1(np.inf), np.inf)
+        assert_equal(ncu.expm1(0.), 0.)
+        assert_equal(ncu.expm1(-0.), -0.)
+        assert_equal(ncu.expm1(np.inf), np.inf)
+        assert_equal(ncu.expm1(-np.inf), -1.)
+
+    def test_complex(self):
+        x = np.asarray(1e-12)
+        assert_allclose(x, ncu.expm1(x))
+        x = x.astype(np.complex128)
+        assert_allclose(x, ncu.expm1(x))
+
+
+class TestHypot:
+    def test_simple(self):
+        assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
+        assert_almost_equal(ncu.hypot(0, 0), 0)
+
+    def test_reduce(self):
+        assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
+        assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
+        assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)
+        assert_equal(ncu.hypot.reduce([]), 0.0)
+
+
+def assert_hypot_isnan(x, y):
+    with np.errstate(invalid='ignore'):
+        assert_(np.isnan(ncu.hypot(x, y)),
+                "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
+
+
+def assert_hypot_isinf(x, y):
+    with np.errstate(invalid='ignore'):
+        assert_(np.isinf(ncu.hypot(x, y)),
+                "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
+
+
+class TestHypotSpecialValues:
+    def test_nan_outputs(self):
+        assert_hypot_isnan(np.nan, np.nan)
+        assert_hypot_isnan(np.nan, 1)
+
+    def test_nan_outputs2(self):
+        assert_hypot_isinf(np.nan, np.inf)
+        assert_hypot_isinf(np.inf, np.nan)
+        assert_hypot_isinf(np.inf, 0)
+        assert_hypot_isinf(0, np.inf)
+        assert_hypot_isinf(np.inf, np.inf)
+        assert_hypot_isinf(np.inf, 23.0)
+
+    def test_no_fpe(self):
+        assert_no_warnings(ncu.hypot, np.inf, 0)
+
+
+def assert_arctan2_isnan(x, y):
+    assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_ispinf(x, y):
+    assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_isninf(x, y):
+    assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_ispzero(x, y):
+    assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_isnzero(x, y):
+    assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
+
+
+class TestArctan2SpecialValues:
+    def test_one_one(self):
+        # atan2(1, 1) returns pi/4.
+        assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
+        assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
+        assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
+
+    def test_zero_nzero(self):
+        # atan2(+-0, -0) returns +-pi.
+        assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
+        assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
+
+    def test_zero_pzero(self):
+        # atan2(+-0, +0) returns +-0.
+        assert_arctan2_ispzero(np.PZERO, np.PZERO)
+        assert_arctan2_isnzero(np.NZERO, np.PZERO)
+
+    def test_zero_negative(self):
+        # atan2(+-0, x) returns +-pi for x < 0.
+        assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
+        assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
+
+    def test_zero_positive(self):
+        # atan2(+-0, x) returns +-0 for x > 0.
+        assert_arctan2_ispzero(np.PZERO, 1)
+        assert_arctan2_isnzero(np.NZERO, 1)
+
+    def test_positive_zero(self):
+        # atan2(y, +-0) returns +pi/2 for y > 0.
+        assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
+        assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
+
+    def test_negative_zero(self):
+        # atan2(y, +-0) returns -pi/2 for y < 0.
+        assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
+        assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
+
+    def test_any_ninf(self):
+        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
+        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
+        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
+
+    def test_any_pinf(self):
+        # atan2(+-y, +infinity) returns +-0 for finite y > 0.
+        assert_arctan2_ispzero(1, np.inf)
+        assert_arctan2_isnzero(-1, np.inf)
+
+    def test_inf_any(self):
+        # atan2(+-infinity, x) returns +-pi/2 for finite x.
+        assert_almost_equal(ncu.arctan2( np.inf, 1),  0.5 * np.pi)
+        assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
+
+    def test_inf_ninf(self):
+        # atan2(+-infinity, -infinity) returns +-3*pi/4.
+        assert_almost_equal(ncu.arctan2( np.inf, -np.inf),  0.75 * np.pi)
+        assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
+
+    def test_inf_pinf(self):
+        # atan2(+-infinity, +infinity) returns +-pi/4.
+        assert_almost_equal(ncu.arctan2( np.inf, np.inf),  0.25 * np.pi)
+        assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
+
+    def test_nan_any(self):
+        # atan2(nan, x) returns nan for any x, including inf
+        assert_arctan2_isnan(np.nan, np.inf)
+        assert_arctan2_isnan(np.inf, np.nan)
+        assert_arctan2_isnan(np.nan, np.nan)
+
+
+class TestLdexp:
+    def _check_ldexp(self, tp):
+        assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
+                                      np.array(3, tp)), 16.)
+        assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
+                                      np.array(3, tp)), 16.)
+        assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
+                                      np.array(3, tp)), 16.)
+
+    def test_ldexp(self):
+        # The default Python int type should work
+        assert_almost_equal(ncu.ldexp(2., 3),  16.)
+        # The following int types should all be accepted
+        self._check_ldexp(np.int8)
+        self._check_ldexp(np.int16)
+        self._check_ldexp(np.int32)
+        self._check_ldexp('i')
+        self._check_ldexp('l')
+
+    def test_ldexp_overflow(self):
+        # silence warning emitted on overflow
+        with np.errstate(over="ignore"):
+            imax = np.iinfo(np.dtype('l')).max
+            imin = np.iinfo(np.dtype('l')).min
+            assert_equal(ncu.ldexp(2., imax), np.inf)
+            assert_equal(ncu.ldexp(2., imin), 0)
+
+
+class TestMaximum(_FilterInvalids):
+    def test_reduce(self):
+        dflt = np.typecodes['AllFloat']
+        dint = np.typecodes['AllInteger']
+        seq1 = np.arange(11)
+        seq2 = seq1[::-1]
+        func = np.maximum.reduce
+        for dt in dint:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 10)
+            assert_equal(func(tmp2), 10)
+        for dt in dflt:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 10)
+            assert_equal(func(tmp2), 10)
+            tmp1[::2] = np.nan
+            tmp2[::2] = np.nan
+            assert_equal(func(tmp1), np.nan)
+            assert_equal(func(tmp2), np.nan)
+
+    def test_reduce_complex(self):
+        assert_equal(np.maximum.reduce([1, 2j]), 1)
+        assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
+
+    def test_float_nans(self):
+        nan = np.nan
+        arg1 = np.array([0,   nan, nan])
+        arg2 = np.array([nan, 0,   nan])
+        out = np.array([nan, nan, nan])
+        assert_equal(np.maximum(arg1, arg2), out)
+
+    def test_object_nans(self):
+        # Multiple checks to give this a chance to
+        # fail if cmp is used instead of rich compare.
+        # Failure cannot be guaranteed.
+        for i in range(1):
+            x = np.array(float('nan'), object)
+            y = 1.0
+            z = np.array(float('nan'), object)
+            assert_(np.maximum(x, y) == 1.0)
+            assert_(np.maximum(z, y) == 1.0)
+
+    def test_complex_nans(self):
+        nan = np.nan
+        for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+            arg1 = np.array([0, cnan, cnan], dtype=complex)
+            arg2 = np.array([cnan, 0, cnan], dtype=complex)
+            out = np.array([nan, nan, nan], dtype=complex)
+            assert_equal(np.maximum(arg1, arg2), out)
+
+    def test_object_array(self):
+        arg1 = np.arange(5, dtype=object)
+        arg2 = arg1 + 1
+        assert_equal(np.maximum(arg1, arg2), arg2)
+
+    def test_strided_array(self):
+        arr1 = np.array([-4.0, 1.0, 10.0,  0.0, np.nan, -np.nan, np.inf, -np.inf])
+        arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0,    np.nan, 1.0,    -3.0])
+        maxtrue  = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0])
+        out = np.ones(8)
+        out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0])
+        assert_equal(np.maximum(arr1,arr2), maxtrue)
+        assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2])
+        assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0]))
+        assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan]))
+        assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan]))
+        assert_equal(out, out_maxtrue)
+
+    def test_precision(self):
+        dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+        for dt in dtypes:
+            dtmin = np.finfo(dt).min
+            dtmax = np.finfo(dt).max
+            d1 = dt(0.1)
+            d1_next = np.nextafter(d1, np.inf)
+
+            test_cases = [
+                # v1    v2          expected
+                (dtmin, -np.inf,    dtmin),
+                (dtmax, -np.inf,    dtmax),
+                (d1,    d1_next,    d1_next),
+                (dtmax, np.nan,     np.nan),
+            ]
+
+            for v1, v2, expected in test_cases:
+                assert_equal(np.maximum([v1], [v2]), [expected])
+                assert_equal(np.maximum.reduce([v1, v2]), expected)
+
+
+class TestMinimum(_FilterInvalids):
+    def test_reduce(self):
+        dflt = np.typecodes['AllFloat']
+        dint = np.typecodes['AllInteger']
+        seq1 = np.arange(11)
+        seq2 = seq1[::-1]
+        func = np.minimum.reduce
+        for dt in dint:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 0)
+            assert_equal(func(tmp2), 0)
+        for dt in dflt:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 0)
+            assert_equal(func(tmp2), 0)
+            tmp1[::2] = np.nan
+            tmp2[::2] = np.nan
+            assert_equal(func(tmp1), np.nan)
+            assert_equal(func(tmp2), np.nan)
+
+    def test_reduce_complex(self):
+        assert_equal(np.minimum.reduce([1, 2j]), 2j)
+        assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
+
+    def test_float_nans(self):
+        nan = np.nan
+        arg1 = np.array([0,   nan, nan])
+        arg2 = np.array([nan, 0,   nan])
+        out = np.array([nan, nan, nan])
+        assert_equal(np.minimum(arg1, arg2), out)
+
+    def test_object_nans(self):
+        # Multiple checks to give this a chance to
+        # fail if cmp is used instead of rich compare.
+        # Failure cannot be guaranteed.
+        for i in range(1):
+            x = np.array(float('nan'), object)
+            y = 1.0
+            z = np.array(float('nan'), object)
+            assert_(np.minimum(x, y) == 1.0)
+            assert_(np.minimum(z, y) == 1.0)
+
+    def test_complex_nans(self):
+        nan = np.nan
+        for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+            arg1 = np.array([0, cnan, cnan], dtype=complex)
+            arg2 = np.array([cnan, 0, cnan], dtype=complex)
+            out = np.array([nan, nan, nan], dtype=complex)
+            assert_equal(np.minimum(arg1, arg2), out)
+
+    def test_object_array(self):
+        arg1 = np.arange(5, dtype=object)
+        arg2 = arg1 + 1
+        assert_equal(np.minimum(arg1, arg2), arg1)
+
+    def test_strided_array(self):
+        arr1 = np.array([-4.0, 1.0, 10.0,  0.0, np.nan, -np.nan, np.inf, -np.inf])
+        arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0,    np.nan, 1.0,    -3.0])
+        mintrue  = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf])
+        out = np.ones(8)
+        out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0])
+        assert_equal(np.minimum(arr1,arr2), mintrue)
+        assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2])
+        assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0]))
+        assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan]))
+        assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan]))
+        assert_equal(out, out_mintrue)
+
+    def test_precision(self):
+        dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+        for dt in dtypes:
+            dtmin = np.finfo(dt).min
+            dtmax = np.finfo(dt).max
+            d1 = dt(0.1)
+            d1_next = np.nextafter(d1, np.inf)
+
+            test_cases = [
+                # v1    v2          expected
+                (dtmin, np.inf,     dtmin),
+                (dtmax, np.inf,     dtmax),
+                (d1,    d1_next,    d1),
+                (dtmin, np.nan,     np.nan),
+            ]
+
+            for v1, v2, expected in test_cases:
+                assert_equal(np.minimum([v1], [v2]), [expected])
+                assert_equal(np.minimum.reduce([v1, v2]), expected)
+
+
+class TestFmax(_FilterInvalids):
+    def test_reduce(self):
+        dflt = np.typecodes['AllFloat']
+        dint = np.typecodes['AllInteger']
+        seq1 = np.arange(11)
+        seq2 = seq1[::-1]
+        func = np.fmax.reduce
+        for dt in dint:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 10)
+            assert_equal(func(tmp2), 10)
+        for dt in dflt:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 10)
+            assert_equal(func(tmp2), 10)
+            tmp1[::2] = np.nan
+            tmp2[::2] = np.nan
+            assert_equal(func(tmp1), 9)
+            assert_equal(func(tmp2), 9)
+
+    def test_reduce_complex(self):
+        assert_equal(np.fmax.reduce([1, 2j]), 1)
+        assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
+
+    def test_float_nans(self):
+        nan = np.nan
+        arg1 = np.array([0,   nan, nan])
+        arg2 = np.array([nan, 0,   nan])
+        out = np.array([0,   0,   nan])
+        assert_equal(np.fmax(arg1, arg2), out)
+
+    def test_complex_nans(self):
+        nan = np.nan
+        for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+            arg1 = np.array([0, cnan, cnan], dtype=complex)
+            arg2 = np.array([cnan, 0, cnan], dtype=complex)
+            out = np.array([0,    0, nan], dtype=complex)
+            assert_equal(np.fmax(arg1, arg2), out)
+
+    def test_precision(self):
+        dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+        for dt in dtypes:
+            dtmin = np.finfo(dt).min
+            dtmax = np.finfo(dt).max
+            d1 = dt(0.1)
+            d1_next = np.nextafter(d1, np.inf)
+
+            test_cases = [
+                # v1    v2          expected
+                (dtmin, -np.inf,    dtmin),
+                (dtmax, -np.inf,    dtmax),
+                (d1,    d1_next,    d1_next),
+                (dtmax, np.nan,     dtmax),
+            ]
+
+            for v1, v2, expected in test_cases:
+                assert_equal(np.fmax([v1], [v2]), [expected])
+                assert_equal(np.fmax.reduce([v1, v2]), expected)
+
+
+class TestFmin(_FilterInvalids):
+    def test_reduce(self):
+        dflt = np.typecodes['AllFloat']
+        dint = np.typecodes['AllInteger']
+        seq1 = np.arange(11)
+        seq2 = seq1[::-1]
+        func = np.fmin.reduce
+        for dt in dint:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 0)
+            assert_equal(func(tmp2), 0)
+        for dt in dflt:
+            tmp1 = seq1.astype(dt)
+            tmp2 = seq2.astype(dt)
+            assert_equal(func(tmp1), 0)
+            assert_equal(func(tmp2), 0)
+            tmp1[::2] = np.nan
+            tmp2[::2] = np.nan
+            assert_equal(func(tmp1), 1)
+            assert_equal(func(tmp2), 1)
+
+    def test_reduce_complex(self):
+        assert_equal(np.fmin.reduce([1, 2j]), 2j)
+        assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
+
+    def test_float_nans(self):
+        nan = np.nan
+        arg1 = np.array([0,   nan, nan])
+        arg2 = np.array([nan, 0,   nan])
+        out = np.array([0,   0,   nan])
+        assert_equal(np.fmin(arg1, arg2), out)
+
+    def test_complex_nans(self):
+        nan = np.nan
+        for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+            arg1 = np.array([0, cnan, cnan], dtype=complex)
+            arg2 = np.array([cnan, 0, cnan], dtype=complex)
+            out = np.array([0,    0, nan], dtype=complex)
+            assert_equal(np.fmin(arg1, arg2), out)
+
+    def test_precision(self):
+        dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+        for dt in dtypes:
+            dtmin = np.finfo(dt).min
+            dtmax = np.finfo(dt).max
+            d1 = dt(0.1)
+            d1_next = np.nextafter(d1, np.inf)
+
+            test_cases = [
+                # v1    v2          expected
+                (dtmin, np.inf,     dtmin),
+                (dtmax, np.inf,     dtmax),
+                (d1,    d1_next,    d1),
+                (dtmin, np.nan,     dtmin),
+            ]
+
+            for v1, v2, expected in test_cases:
+                assert_equal(np.fmin([v1], [v2]), [expected])
+                assert_equal(np.fmin.reduce([v1, v2]), expected)
+
+
+class TestBool:
+    def test_exceptions(self):
+        a = np.ones(1, dtype=np.bool_)
+        assert_raises(TypeError, np.negative, a)
+        assert_raises(TypeError, np.positive, a)
+        assert_raises(TypeError, np.subtract, a, a)
+
+    def test_truth_table_logical(self):
+        # 2, 3 and 4 serves as true values
+        input1 = [0, 0, 3, 2]
+        input2 = [0, 4, 0, 2]
+
+        typecodes = (np.typecodes['AllFloat']
+                     + np.typecodes['AllInteger']
+                     + '?')     # boolean
+        for dtype in map(np.dtype, typecodes):
+            arg1 = np.asarray(input1, dtype=dtype)
+            arg2 = np.asarray(input2, dtype=dtype)
+
+            # OR
+            out = [False, True, True, True]
+            for func in (np.logical_or, np.maximum):
+                assert_equal(func(arg1, arg2).astype(bool), out)
+            # AND
+            out = [False, False, False, True]
+            for func in (np.logical_and, np.minimum):
+                assert_equal(func(arg1, arg2).astype(bool), out)
+            # XOR
+            out = [False, True, True, False]
+            for func in (np.logical_xor, np.not_equal):
+                assert_equal(func(arg1, arg2).astype(bool), out)
+
+    def test_truth_table_bitwise(self):
+        arg1 = [False, False, True, True]
+        arg2 = [False, True, False, True]
+
+        out = [False, True, True, True]
+        assert_equal(np.bitwise_or(arg1, arg2), out)
+
+        out = [False, False, False, True]
+        assert_equal(np.bitwise_and(arg1, arg2), out)
+
+        out = [False, True, True, False]
+        assert_equal(np.bitwise_xor(arg1, arg2), out)
+
+    def test_reduce(self):
+        none = np.array([0, 0, 0, 0], bool)
+        some = np.array([1, 0, 1, 1], bool)
+        every = np.array([1, 1, 1, 1], bool)
+        empty = np.array([], bool)
+
+        arrs = [none, some, every, empty]
+
+        for arr in arrs:
+            assert_equal(np.logical_and.reduce(arr), all(arr))
+
+        for arr in arrs:
+            assert_equal(np.logical_or.reduce(arr), any(arr))
+
+        for arr in arrs:
+            assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
+
+
+class TestBitwiseUFuncs:
+
+    bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
+
+    def test_values(self):
+        for dt in self.bitwise_types:
+            zeros = np.array([0], dtype=dt)
+            ones = np.array([-1]).astype(dt)
+            msg = "dt = '%s'" % dt.char
+
+            assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)
+            assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)
+
+            assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)
+            assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)
+            assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)
+            assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)
+
+            assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)
+            assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)
+            assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)
+            assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)
+
+            assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)
+            assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)
+            assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)
+            assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)
+
+    def test_types(self):
+        for dt in self.bitwise_types:
+            zeros = np.array([0], dtype=dt)
+            ones = np.array([-1]).astype(dt)
+            msg = "dt = '%s'" % dt.char
+
+            assert_(np.bitwise_not(zeros).dtype == dt, msg)
+            assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)
+            assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
+            assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
+
+    def test_identity(self):
+        assert_(np.bitwise_or.identity == 0, 'bitwise_or')
+        assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
+        assert_(np.bitwise_and.identity == -1, 'bitwise_and')
+
+    def test_reduction(self):
+        binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)
+
+        for dt in self.bitwise_types:
+            zeros = np.array([0], dtype=dt)
+            ones = np.array([-1]).astype(dt)
+            for f in binary_funcs:
+                msg = "dt: '%s', f: '%s'" % (dt, f)
+                assert_equal(f.reduce(zeros), zeros, err_msg=msg)
+                assert_equal(f.reduce(ones), ones, err_msg=msg)
+
+        # Test empty reduction, no object dtype
+        for dt in self.bitwise_types[:-1]:
+            # No object array types
+            empty = np.array([], dtype=dt)
+            for f in binary_funcs:
+                msg = "dt: '%s', f: '%s'" % (dt, f)
+                tgt = np.array(f.identity).astype(dt)
+                res = f.reduce(empty)
+                assert_equal(res, tgt, err_msg=msg)
+                assert_(res.dtype == tgt.dtype, msg)
+
+        # Empty object arrays use the identity.  Note that the types may
+        # differ, the actual type used is determined by the assign_identity
+        # function and is not the same as the type returned by the identity
+        # method.
+        for f in binary_funcs:
+            msg = "dt: '%s'" % (f,)
+            empty = np.array([], dtype=object)
+            tgt = f.identity
+            res = f.reduce(empty)
+            assert_equal(res, tgt, err_msg=msg)
+
+        # Non-empty object arrays do not use the identity
+        for f in binary_funcs:
+            msg = "dt: '%s'" % (f,)
+            btype = np.array([True], dtype=object)
+            assert_(type(f.reduce(btype)) is bool, msg)
+
+
+class TestInt:
+    def test_logical_not(self):
+        x = np.ones(10, dtype=np.int16)
+        o = np.ones(10 * 2, dtype=bool)
+        tgt = o.copy()
+        tgt[::2] = False
+        os = o[::2]
+        assert_array_equal(np.logical_not(x, out=os), False)
+        assert_array_equal(o, tgt)
+
+
+class TestFloatingPoint:
+    def test_floating_point(self):
+        assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
+
+
+class TestDegrees:
+    def test_degrees(self):
+        assert_almost_equal(ncu.degrees(np.pi), 180.0)
+        assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
+
+
+class TestRadians:
+    def test_radians(self):
+        assert_almost_equal(ncu.radians(180.0), np.pi)
+        assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
+
+
+class TestHeavside:
+    def test_heaviside(self):
+        x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
+        expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
+        expected1 = expectedhalf.copy()
+        expected1[0, 2] = 1
+
+        h = ncu.heaviside(x, 0.5)
+        assert_equal(h, expectedhalf)
+
+        h = ncu.heaviside(x, 1.0)
+        assert_equal(h, expected1)
+
+        x = x.astype(np.float32)
+
+        h = ncu.heaviside(x, np.float32(0.5))
+        assert_equal(h, expectedhalf.astype(np.float32))
+
+        h = ncu.heaviside(x, np.float32(1.0))
+        assert_equal(h, expected1.astype(np.float32))
+
+
+class TestSign:
+    def test_sign(self):
+        a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
+        out = np.zeros(a.shape)
+        tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
+
+        with np.errstate(invalid='ignore'):
+            res = ncu.sign(a)
+            assert_equal(res, tgt)
+            res = ncu.sign(a, out)
+            assert_equal(res, tgt)
+            assert_equal(out, tgt)
+
+    def test_sign_dtype_object(self):
+        # In reference to github issue #6229
+
+        foo = np.array([-.1, 0, .1])
+        a = np.sign(foo.astype(object))
+        b = np.sign(foo)
+
+        assert_array_equal(a, b)
+
+    def test_sign_dtype_nan_object(self):
+        # In reference to github issue #6229
+        def test_nan():
+            foo = np.array([np.nan])
+            # FIXME: a not used
+            a = np.sign(foo.astype(object))
+
+        assert_raises(TypeError, test_nan)
+
+class TestMinMax:
+    def test_minmax_blocked(self):
+        # simd tests on max/min, test all alignments, slow but important
+        # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
+        for dt, sz in [(np.float32, 15), (np.float64, 7)]:
+            for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
+                                                     max_size=sz):
+                for i in range(inp.size):
+                    inp[:] = np.arange(inp.size, dtype=dt)
+                    inp[i] = np.nan
+                    emsg = lambda: '%r\n%s' % (inp, msg)
+                    with suppress_warnings() as sup:
+                        sup.filter(RuntimeWarning,
+                                   "invalid value encountered in reduce")
+                        assert_(np.isnan(inp.max()), msg=emsg)
+                        assert_(np.isnan(inp.min()), msg=emsg)
+
+                    inp[i] = 1e10
+                    assert_equal(inp.max(), 1e10, err_msg=msg)
+                    inp[i] = -1e10
+                    assert_equal(inp.min(), -1e10, err_msg=msg)
+
+    def test_lower_align(self):
+        # check data that is not aligned to element size
+        # i.e doubles are aligned to 4 bytes on i386
+        d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+        assert_equal(d.max(), d[0])
+        assert_equal(d.min(), d[0])
+
+    def test_reduce_reorder(self):
+        # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+        # and put it before the call to an intrisic function that causes
+        # invalid status to be set. Also make sure warnings are not emitted
+        for n in (2, 4, 8, 16, 32):
+            for dt in (np.float32, np.float16, np.complex64):
+                for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
+                    assert_equal(np.min(r), np.nan)
+
+    def test_minimize_no_warns(self):
+        a = np.minimum(np.nan, 1)
+        assert_equal(a, np.nan)
+
+
+class TestAbsoluteNegative:
+    def test_abs_neg_blocked(self):
+        # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
+        for dt, sz in [(np.float32, 11), (np.float64, 5)]:
+            for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
+                                                     max_size=sz):
+                tgt = [ncu.absolute(i) for i in inp]
+                np.absolute(inp, out=out)
+                assert_equal(out, tgt, err_msg=msg)
+                assert_((out >= 0).all())
+
+                tgt = [-1*(i) for i in inp]
+                np.negative(inp, out=out)
+                assert_equal(out, tgt, err_msg=msg)
+
+                for v in [np.nan, -np.inf, np.inf]:
+                    for i in range(inp.size):
+                        d = np.arange(inp.size, dtype=dt)
+                        inp[:] = -d
+                        inp[i] = v
+                        d[i] = -v if v == -np.inf else v
+                        assert_array_equal(np.abs(inp), d, err_msg=msg)
+                        np.abs(inp, out=out)
+                        assert_array_equal(out, d, err_msg=msg)
+
+                        assert_array_equal(-inp, -1*inp, err_msg=msg)
+                        d = -1 * inp
+                        np.negative(inp, out=out)
+                        assert_array_equal(out, d, err_msg=msg)
+
+    def test_lower_align(self):
+        # check data that is not aligned to element size
+        # i.e doubles are aligned to 4 bytes on i386
+        d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+        assert_equal(np.abs(d), d)
+        assert_equal(np.negative(d), -d)
+        np.negative(d, out=d)
+        np.negative(np.ones_like(d), out=d)
+        np.abs(d, out=d)
+        np.abs(np.ones_like(d), out=d)
+
+    @pytest.mark.parametrize("dtype", ['d', 'f', 'int32', 'int64'])
+    @pytest.mark.parametrize("big", [True, False])
+    def test_noncontiguous(self, dtype, big):
+        data = np.array([-1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.5, 2.5, -6,
+                            6, -2.2251e-308, -8, 10], dtype=dtype)
+        expect = np.array([1.0, -1.0, 0.0, -0.0, -2.2251e-308, 2.5, -2.5, 6,
+                            -6, 2.2251e-308, 8, -10], dtype=dtype)
+        if big:
+            data = np.repeat(data, 10)
+            expect = np.repeat(expect, 10)
+        out = np.ndarray(data.shape, dtype=dtype)
+        ncontig_in = data[1::2]
+        ncontig_out = out[1::2]
+        contig_in = np.array(ncontig_in)
+        # contig in, contig out
+        assert_array_equal(np.negative(contig_in), expect[1::2])
+        # contig in, ncontig out
+        assert_array_equal(np.negative(contig_in, out=ncontig_out),
+                                expect[1::2])
+        # ncontig in, contig out
+        assert_array_equal(np.negative(ncontig_in), expect[1::2])
+        # ncontig in, ncontig out
+        assert_array_equal(np.negative(ncontig_in, out=ncontig_out),
+                                expect[1::2])
+        # contig in, contig out, nd stride
+        data_split = np.array(np.array_split(data, 2))
+        expect_split = np.array(np.array_split(expect, 2))
+        assert_equal(np.negative(data_split), expect_split)
+
+
+class TestPositive:
+    def test_valid(self):
+        valid_dtypes = [int, float, complex, object]
+        for dtype in valid_dtypes:
+            x = np.arange(5, dtype=dtype)
+            result = np.positive(x)
+            assert_equal(x, result, err_msg=str(dtype))
+
+    def test_invalid(self):
+        with assert_raises(TypeError):
+            np.positive(True)
+        with assert_raises(TypeError):
+            np.positive(np.datetime64('2000-01-01'))
+        with assert_raises(TypeError):
+            np.positive(np.array(['foo'], dtype=str))
+        with assert_raises(TypeError):
+            np.positive(np.array(['bar'], dtype=object))
+
+
+class TestSpecialMethods:
+    def test_wrap(self):
+
+        class with_wrap:
+            def __array__(self):
+                return np.zeros(1)
+
+            def __array_wrap__(self, arr, context):
+                r = with_wrap()
+                r.arr = arr
+                r.context = context
+                return r
+
+        a = with_wrap()
+        x = ncu.minimum(a, a)
+        assert_equal(x.arr, np.zeros(1))
+        func, args, i = x.context
+        assert_(func is ncu.minimum)
+        assert_equal(len(args), 2)
+        assert_equal(args[0], a)
+        assert_equal(args[1], a)
+        assert_equal(i, 0)
+
+    def test_wrap_and_prepare_out(self):
+        # Calling convention for out should not affect how special methods are
+        # called
+
+        class StoreArrayPrepareWrap(np.ndarray):
+            _wrap_args = None
+            _prepare_args = None
+            def __new__(cls):
+                return np.zeros(()).view(cls)
+            def __array_wrap__(self, obj, context):
+                self._wrap_args = context[1]
+                return obj
+            def __array_prepare__(self, obj, context):
+                self._prepare_args = context[1]
+                return obj
+            @property
+            def args(self):
+                # We need to ensure these are fetched at the same time, before
+                # any other ufuncs are called by the assertions
+                return (self._prepare_args, self._wrap_args)
+            def __repr__(self):
+                return "a"  # for short test output
+
+        def do_test(f_call, f_expected):
+            a = StoreArrayPrepareWrap()
+            f_call(a)
+            p, w = a.args
+            expected = f_expected(a)
+            try:
+                assert_equal(p, expected)
+                assert_equal(w, expected)
+            except AssertionError as e:
+                # assert_equal produces truly useless error messages
+                raise AssertionError("\n".join([
+                    "Bad arguments passed in ufunc call",
+                    " expected:              {}".format(expected),
+                    " __array_prepare__ got: {}".format(p),
+                    " __array_wrap__ got:    {}".format(w)
+                ]))
+
+        # method not on the out argument
+        do_test(lambda a: np.add(a, 0),              lambda a: (a, 0))
+        do_test(lambda a: np.add(a, 0, None),        lambda a: (a, 0))
+        do_test(lambda a: np.add(a, 0, out=None),    lambda a: (a, 0))
+        do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
+
+        # method on the out argument
+        do_test(lambda a: np.add(0, 0, a),           lambda a: (0, 0, a))
+        do_test(lambda a: np.add(0, 0, out=a),       lambda a: (0, 0, a))
+        do_test(lambda a: np.add(0, 0, out=(a,)),    lambda a: (0, 0, a))
+
+        # Also check the where mask handling:
+        do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0))
+        do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a))
+
+    def test_wrap_with_iterable(self):
+        # test fix for bug #1026:
+
+        class with_wrap(np.ndarray):
+            __array_priority__ = 10
+
+            def __new__(cls):
+                return np.asarray(1).view(cls).copy()
+
+            def __array_wrap__(self, arr, context):
+                return arr.view(type(self))
+
+        a = with_wrap()
+        x = ncu.multiply(a, (1, 2, 3))
+        assert_(isinstance(x, with_wrap))
+        assert_array_equal(x, np.array((1, 2, 3)))
+
+    def test_priority_with_scalar(self):
+        # test fix for bug #826:
+
+        class A(np.ndarray):
+            __array_priority__ = 10
+
+            def __new__(cls):
+                return np.asarray(1.0, 'float64').view(cls).copy()
+
+        a = A()
+        x = np.float64(1)*a
+        assert_(isinstance(x, A))
+        assert_array_equal(x, np.array(1))
+
+    def test_old_wrap(self):
+
+        class with_wrap:
+            def __array__(self):
+                return np.zeros(1)
+
+            def __array_wrap__(self, arr):
+                r = with_wrap()
+                r.arr = arr
+                return r
+
+        a = with_wrap()
+        x = ncu.minimum(a, a)
+        assert_equal(x.arr, np.zeros(1))
+
+    def test_priority(self):
+
+        class A:
+            def __array__(self):
+                return np.zeros(1)
+
+            def __array_wrap__(self, arr, context):
+                r = type(self)()
+                r.arr = arr
+                r.context = context
+                return r
+
+        class B(A):
+            __array_priority__ = 20.
+
+        class C(A):
+            __array_priority__ = 40.
+
+        x = np.zeros(1)
+        a = A()
+        b = B()
+        c = C()
+        f = ncu.minimum
+        assert_(type(f(x, x)) is np.ndarray)
+        assert_(type(f(x, a)) is A)
+        assert_(type(f(x, b)) is B)
+        assert_(type(f(x, c)) is C)
+        assert_(type(f(a, x)) is A)
+        assert_(type(f(b, x)) is B)
+        assert_(type(f(c, x)) is C)
+
+        assert_(type(f(a, a)) is A)
+        assert_(type(f(a, b)) is B)
+        assert_(type(f(b, a)) is B)
+        assert_(type(f(b, b)) is B)
+        assert_(type(f(b, c)) is C)
+        assert_(type(f(c, b)) is C)
+        assert_(type(f(c, c)) is C)
+
+        assert_(type(ncu.exp(a) is A))
+        assert_(type(ncu.exp(b) is B))
+        assert_(type(ncu.exp(c) is C))
+
+    def test_failing_wrap(self):
+
+        class A:
+            def __array__(self):
+                return np.zeros(2)
+
+            def __array_wrap__(self, arr, context):
+                raise RuntimeError
+
+        a = A()
+        assert_raises(RuntimeError, ncu.maximum, a, a)
+        assert_raises(RuntimeError, ncu.maximum.reduce, a)
+
+    def test_failing_out_wrap(self):
+
+        singleton = np.array([1.0])
+
+        class Ok(np.ndarray):
+            def __array_wrap__(self, obj):
+                return singleton
+
+        class Bad(np.ndarray):
+            def __array_wrap__(self, obj):
+                raise RuntimeError
+
+        ok = np.empty(1).view(Ok)
+        bad = np.empty(1).view(Bad)
+        # double-free (segfault) of "ok" if "bad" raises an exception
+        for i in range(10):
+            assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
+
+    def test_none_wrap(self):
+        # Tests that issue #8507 is resolved. Previously, this would segfault
+
+        class A:
+            def __array__(self):
+                return np.zeros(1)
+
+            def __array_wrap__(self, arr, context=None):
+                return None
+
+        a = A()
+        assert_equal(ncu.maximum(a, a), None)
+
+    def test_default_prepare(self):
+
+        class with_wrap:
+            __array_priority__ = 10
+
+            def __array__(self):
+                return np.zeros(1)
+
+            def __array_wrap__(self, arr, context):
+                return arr
+
+        a = with_wrap()
+        x = ncu.minimum(a, a)
+        assert_equal(x, np.zeros(1))
+        assert_equal(type(x), np.ndarray)
+
+    @pytest.mark.parametrize("use_where", [True, False])
+    def test_prepare(self, use_where):
+
+        class with_prepare(np.ndarray):
+            __array_priority__ = 10
+
+            def __array_prepare__(self, arr, context):
+                # make sure we can return a new
+                return np.array(arr).view(type=with_prepare)
+
+        a = np.array(1).view(type=with_prepare)
+        if use_where:
+            x = np.add(a, a, where=np.array(True))
+        else:
+            x = np.add(a, a)
+        assert_equal(x, np.array(2))
+        assert_equal(type(x), with_prepare)
+
+    @pytest.mark.parametrize("use_where", [True, False])
+    def test_prepare_out(self, use_where):
+
+        class with_prepare(np.ndarray):
+            __array_priority__ = 10
+
+            def __array_prepare__(self, arr, context):
+                return np.array(arr).view(type=with_prepare)
+
+        a = np.array([1]).view(type=with_prepare)
+        if use_where:
+            x = np.add(a, a, a, where=[True])
+        else:
+            x = np.add(a, a, a)
+        # Returned array is new, because of the strange
+        # __array_prepare__ above
+        assert_(not np.shares_memory(x, a))
+        assert_equal(x, np.array([2]))
+        assert_equal(type(x), with_prepare)
+
+    def test_failing_prepare(self):
+
+        class A:
+            def __array__(self):
+                return np.zeros(1)
+
+            def __array_prepare__(self, arr, context=None):
+                raise RuntimeError
+
+        a = A()
+        assert_raises(RuntimeError, ncu.maximum, a, a)
+        assert_raises(RuntimeError, ncu.maximum, a, a, where=False)
+
+    def test_array_too_many_args(self):
+
+        class A:
+            def __array__(self, dtype, context):
+                return np.zeros(1)
+
+        a = A()
+        assert_raises_regex(TypeError, '2 required positional', np.sum, a)
+
+    def test_ufunc_override(self):
+        # check override works even with instance with high priority.
+        class A:
+            def __array_ufunc__(self, func, method, *inputs, **kwargs):
+                return self, func, method, inputs, kwargs
+
+        class MyNDArray(np.ndarray):
+            __array_priority__ = 100
+
+        a = A()
+        b = np.array([1]).view(MyNDArray)
+        res0 = np.multiply(a, b)
+        res1 = np.multiply(b, b, out=a)
+
+        # self
+        assert_equal(res0[0], a)
+        assert_equal(res1[0], a)
+        assert_equal(res0[1], np.multiply)
+        assert_equal(res1[1], np.multiply)
+        assert_equal(res0[2], '__call__')
+        assert_equal(res1[2], '__call__')
+        assert_equal(res0[3], (a, b))
+        assert_equal(res1[3], (b, b))
+        assert_equal(res0[4], {})
+        assert_equal(res1[4], {'out': (a,)})
+
+    def test_ufunc_override_mro(self):
+
+        # Some multi arg functions for testing.
+        def tres_mul(a, b, c):
+            return a * b * c
+
+        def quatro_mul(a, b, c, d):
+            return a * b * c * d
+
+        # Make these into ufuncs.
+        three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
+        four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
+
+        class A:
+            def __array_ufunc__(self, func, method, *inputs, **kwargs):
+                return "A"
+
+        class ASub(A):
+            def __array_ufunc__(self, func, method, *inputs, **kwargs):
+                return "ASub"
+
+        class B:
+            def __array_ufunc__(self, func, method, *inputs, **kwargs):
+                return "B"
+
+        class C:
+            def __init__(self):
+                self.count = 0
+
+            def __array_ufunc__(self, func, method, *inputs, **kwargs):
+                self.count += 1
+                return NotImplemented
+
+        class CSub(C):
+            def __array_ufunc__(self, func, method, *inputs, **kwargs):
+                self.count += 1
+                return NotImplemented
+
+        a = A()
+        a_sub = ASub()
+        b = B()
+        c = C()
+
+        # Standard
+        res = np.multiply(a, a_sub)
+        assert_equal(res, "ASub")
+        res = np.multiply(a_sub, b)
+        assert_equal(res, "ASub")
+
+        # With 1 NotImplemented
+        res = np.multiply(c, a)
+        assert_equal(res, "A")
+        assert_equal(c.count, 1)
+        # Check our counter works, so we can trust tests below.
+        res = np.multiply(c, a)
+        assert_equal(c.count, 2)
+
+        # Both NotImplemented.
+        c = C()
+        c_sub = CSub()
+        assert_raises(TypeError, np.multiply, c, c_sub)
+        assert_equal(c.count, 1)
+        assert_equal(c_sub.count, 1)
+        c.count = c_sub.count = 0
+        assert_raises(TypeError, np.multiply, c_sub, c)
+        assert_equal(c.count, 1)
+        assert_equal(c_sub.count, 1)
+        c.count = 0
+        assert_raises(TypeError, np.multiply, c, c)
+        assert_equal(c.count, 1)
+        c.count = 0
+        assert_raises(TypeError, np.multiply, 2, c)
+        assert_equal(c.count, 1)
+
+        # Ternary testing.
+        assert_equal(three_mul_ufunc(a, 1, 2), "A")
+        assert_equal(three_mul_ufunc(1, a, 2), "A")
+        assert_equal(three_mul_ufunc(1, 2, a), "A")
+
+        assert_equal(three_mul_ufunc(a, a, 6), "A")
+        assert_equal(three_mul_ufunc(a, 2, a), "A")
+        assert_equal(three_mul_ufunc(a, 2, b), "A")
+        assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
+        assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
+        c.count = 0
+        assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
+        assert_equal(c.count, 1)
+        c.count = 0
+        assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
+        assert_equal(c.count, 0)
+
+        c.count = 0
+        assert_equal(three_mul_ufunc(a, b, c), "A")
+        assert_equal(c.count, 0)
+        c_sub.count = 0
+        assert_equal(three_mul_ufunc(a, b, c_sub), "A")
+        assert_equal(c_sub.count, 0)
+        assert_equal(three_mul_ufunc(1, 2, b), "B")
+
+        assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
+        assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
+        assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
+
+        # Quaternary testing.
+        assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
+        assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
+        assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
+        assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
+
+        assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
+        assert_equal(four_mul_ufunc(1, a, 2, b), "A")
+        assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
+        assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
+        assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
+
+        c = C()
+        c_sub = CSub()
+        assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
+        assert_equal(c.count, 1)
+        c.count = 0
+        assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
+        assert_equal(c_sub.count, 1)
+        assert_equal(c.count, 1)
+        c2 = C()
+        c.count = c_sub.count = 0
+        assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
+        assert_equal(c_sub.count, 1)
+        assert_equal(c.count, 1)
+        assert_equal(c2.count, 0)
+        c.count = c2.count = c_sub.count = 0
+        assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
+        assert_equal(c_sub.count, 1)
+        assert_equal(c.count, 0)
+        assert_equal(c2.count, 1)
+
+    def test_ufunc_override_methods(self):
+
+        class A:
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                return self, ufunc, method, inputs, kwargs
+
+        # __call__
+        a = A()
+        with assert_raises(TypeError):
+            np.multiply.__call__(1, a, foo='bar', answer=42)
+        res = np.multiply.__call__(1, a, subok='bar', where=42)
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], '__call__')
+        assert_equal(res[3], (1, a))
+        assert_equal(res[4], {'subok': 'bar', 'where': 42})
+
+        # __call__, wrong args
+        assert_raises(TypeError, np.multiply, a)
+        assert_raises(TypeError, np.multiply, a, a, a, a)
+        assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
+        assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
+
+        # reduce, positional args
+        res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'reduce')
+        assert_equal(res[3], (a,))
+        assert_equal(res[4], {'dtype':'dtype0',
+                              'out': ('out0',),
+                              'keepdims': 'keep0',
+                              'axis': 'axis0'})
+
+        # reduce, kwargs
+        res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
+                                 keepdims='keep0', initial='init0',
+                                 where='where0')
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'reduce')
+        assert_equal(res[3], (a,))
+        assert_equal(res[4], {'dtype':'dtype0',
+                              'out': ('out0',),
+                              'keepdims': 'keep0',
+                              'axis': 'axis0',
+                              'initial': 'init0',
+                              'where': 'where0'})
+
+        # reduce, output equal to None removed, but not other explicit ones,
+        # even if they are at their default value.
+        res = np.multiply.reduce(a, 0, None, None, False)
+        assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+        res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
+        assert_equal(res[4], {'axis': 0, 'keepdims': True})
+        res = np.multiply.reduce(a, None, out=(None,), dtype=None)
+        assert_equal(res[4], {'axis': None, 'dtype': None})
+        res = np.multiply.reduce(a, 0, None, None, False, 2, True)
+        assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+                              'initial': 2, 'where': True})
+        # np._NoValue ignored for initial
+        res = np.multiply.reduce(a, 0, None, None, False,
+                                 np._NoValue, True)
+        assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+                              'where': True})
+        # None kept for initial, True for where.
+        res = np.multiply.reduce(a, 0, None, None, False, None, True)
+        assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+                              'initial': None, 'where': True})
+
+        # reduce, wrong args
+        assert_raises(ValueError, np.multiply.reduce, a, out=())
+        assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
+        assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
+
+        # accumulate, pos args
+        res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'accumulate')
+        assert_equal(res[3], (a,))
+        assert_equal(res[4], {'dtype':'dtype0',
+                              'out': ('out0',),
+                              'axis': 'axis0'})
+
+        # accumulate, kwargs
+        res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
+                                     out='out0')
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'accumulate')
+        assert_equal(res[3], (a,))
+        assert_equal(res[4], {'dtype':'dtype0',
+                              'out': ('out0',),
+                              'axis': 'axis0'})
+
+        # accumulate, output equal to None removed.
+        res = np.multiply.accumulate(a, 0, None, None)
+        assert_equal(res[4], {'axis': 0, 'dtype': None})
+        res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
+        assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
+        res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
+        assert_equal(res[4], {'axis': None, 'dtype': None})
+
+        # accumulate, wrong args
+        assert_raises(ValueError, np.multiply.accumulate, a, out=())
+        assert_raises(ValueError, np.multiply.accumulate, a,
+                      out=('out0', 'out1'))
+        assert_raises(TypeError, np.multiply.accumulate, a,
+                      'axis0', axis='axis0')
+
+        # reduceat, pos args
+        res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'reduceat')
+        assert_equal(res[3], (a, [4, 2]))
+        assert_equal(res[4], {'dtype':'dtype0',
+                              'out': ('out0',),
+                              'axis': 'axis0'})
+
+        # reduceat, kwargs
+        res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
+                                   out='out0')
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'reduceat')
+        assert_equal(res[3], (a, [4, 2]))
+        assert_equal(res[4], {'dtype':'dtype0',
+                              'out': ('out0',),
+                              'axis': 'axis0'})
+
+        # reduceat, output equal to None removed.
+        res = np.multiply.reduceat(a, [4, 2], 0, None, None)
+        assert_equal(res[4], {'axis': 0, 'dtype': None})
+        res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
+        assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
+        res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
+        assert_equal(res[4], {'axis': None, 'dtype': None})
+
+        # reduceat, wrong args
+        assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
+        assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
+                      out=('out0', 'out1'))
+        assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
+                      'axis0', axis='axis0')
+
+        # outer
+        res = np.multiply.outer(a, 42)
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'outer')
+        assert_equal(res[3], (a, 42))
+        assert_equal(res[4], {})
+
+        # outer, wrong args
+        assert_raises(TypeError, np.multiply.outer, a)
+        assert_raises(TypeError, np.multiply.outer, a, a, a, a)
+        assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
+
+        # at
+        res = np.multiply.at(a, [4, 2], 'b0')
+        assert_equal(res[0], a)
+        assert_equal(res[1], np.multiply)
+        assert_equal(res[2], 'at')
+        assert_equal(res[3], (a, [4, 2], 'b0'))
+
+        # at, wrong args
+        assert_raises(TypeError, np.multiply.at, a)
+        assert_raises(TypeError, np.multiply.at, a, a, a, a)
+
+    def test_ufunc_override_out(self):
+
+        class A:
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                return kwargs
+
+        class B:
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                return kwargs
+
+        a = A()
+        b = B()
+        res0 = np.multiply(a, b, 'out_arg')
+        res1 = np.multiply(a, b, out='out_arg')
+        res2 = np.multiply(2, b, 'out_arg')
+        res3 = np.multiply(3, b, out='out_arg')
+        res4 = np.multiply(a, 4, 'out_arg')
+        res5 = np.multiply(a, 5, out='out_arg')
+
+        assert_equal(res0['out'][0], 'out_arg')
+        assert_equal(res1['out'][0], 'out_arg')
+        assert_equal(res2['out'][0], 'out_arg')
+        assert_equal(res3['out'][0], 'out_arg')
+        assert_equal(res4['out'][0], 'out_arg')
+        assert_equal(res5['out'][0], 'out_arg')
+
+        # ufuncs with multiple output modf and frexp.
+        res6 = np.modf(a, 'out0', 'out1')
+        res7 = np.frexp(a, 'out0', 'out1')
+        assert_equal(res6['out'][0], 'out0')
+        assert_equal(res6['out'][1], 'out1')
+        assert_equal(res7['out'][0], 'out0')
+        assert_equal(res7['out'][1], 'out1')
+
+        # While we're at it, check that default output is never passed on.
+        assert_(np.sin(a, None) == {})
+        assert_(np.sin(a, out=None) == {})
+        assert_(np.sin(a, out=(None,)) == {})
+        assert_(np.modf(a, None) == {})
+        assert_(np.modf(a, None, None) == {})
+        assert_(np.modf(a, out=(None, None)) == {})
+        with assert_raises(TypeError):
+            # Out argument must be tuple, since there are multiple outputs.
+            np.modf(a, out=None)
+
+        # don't give positional and output argument, or too many arguments.
+        # wrong number of arguments in the tuple is an error too.
+        assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
+        assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
+        assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
+        assert_raises(TypeError, np.multiply, a, out=())
+        assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
+        assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
+        assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
+        assert_raises(ValueError, np.modf, a, out=('one',))
+
+    def test_ufunc_override_where(self):
+
+        class OverriddenArrayOld(np.ndarray):
+
+            def _unwrap(self, objs):
+                cls = type(self)
+                result = []
+                for obj in objs:
+                    if isinstance(obj, cls):
+                        obj = np.array(obj)
+                    elif type(obj) != np.ndarray:
+                        return NotImplemented
+                    result.append(obj)
+                return result
+
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+
+                inputs = self._unwrap(inputs)
+                if inputs is NotImplemented:
+                    return NotImplemented
+
+                kwargs = kwargs.copy()
+                if "out" in kwargs:
+                    kwargs["out"] = self._unwrap(kwargs["out"])
+                    if kwargs["out"] is NotImplemented:
+                        return NotImplemented
+
+                r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
+                if r is not NotImplemented:
+                    r = r.view(type(self))
+
+                return r
+
+        class OverriddenArrayNew(OverriddenArrayOld):
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+
+                kwargs = kwargs.copy()
+                if "where" in kwargs:
+                    kwargs["where"] = self._unwrap((kwargs["where"], ))
+                    if kwargs["where"] is NotImplemented:
+                        return NotImplemented
+                    else:
+                        kwargs["where"] = kwargs["where"][0]
+
+                r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
+                if r is not NotImplemented:
+                    r = r.view(type(self))
+
+                return r
+
+        ufunc = np.negative
+
+        array = np.array([1, 2, 3])
+        where = np.array([True, False, True])
+        expected = ufunc(array, where=where)
+
+        with pytest.raises(TypeError):
+            ufunc(array, where=where.view(OverriddenArrayOld))
+
+        result_1 = ufunc(
+            array,
+            where=where.view(OverriddenArrayNew)
+        )
+        assert isinstance(result_1, OverriddenArrayNew)
+        assert np.all(np.array(result_1) == expected, where=where)
+
+        result_2 = ufunc(
+            array.view(OverriddenArrayNew),
+            where=where.view(OverriddenArrayNew)
+        )
+        assert isinstance(result_2, OverriddenArrayNew)
+        assert np.all(np.array(result_2) == expected, where=where)
+
+    def test_ufunc_override_exception(self):
+
+        class A:
+            def __array_ufunc__(self, *a, **kwargs):
+                raise ValueError("oops")
+
+        a = A()
+        assert_raises(ValueError, np.negative, 1, out=a)
+        assert_raises(ValueError, np.negative, a)
+        assert_raises(ValueError, np.divide, 1., a)
+
+    def test_ufunc_override_not_implemented(self):
+
+        class A:
+            def __array_ufunc__(self, *args, **kwargs):
+                return NotImplemented
+
+        msg = ("operand type(s) all returned NotImplemented from "
+               "__array_ufunc__(, '__call__', <*>): 'A'")
+        with assert_raises_regex(TypeError, fnmatch.translate(msg)):
+            np.negative(A())
+
+        msg = ("operand type(s) all returned NotImplemented from "
+               "__array_ufunc__(, '__call__', <*>, , "
+               "out=(1,)): 'A', 'object', 'int'")
+        with assert_raises_regex(TypeError, fnmatch.translate(msg)):
+            np.add(A(), object(), out=1)
+
+    def test_ufunc_override_disabled(self):
+
+        class OptOut:
+            __array_ufunc__ = None
+
+        opt_out = OptOut()
+
+        # ufuncs always raise
+        msg = "operand 'OptOut' does not support ufuncs"
+        with assert_raises_regex(TypeError, msg):
+            np.add(opt_out, 1)
+        with assert_raises_regex(TypeError, msg):
+            np.add(1, opt_out)
+        with assert_raises_regex(TypeError, msg):
+            np.negative(opt_out)
+
+        # opt-outs still hold even when other arguments have pathological
+        # __array_ufunc__ implementations
+
+        class GreedyArray:
+            def __array_ufunc__(self, *args, **kwargs):
+                return self
+
+        greedy = GreedyArray()
+        assert_(np.negative(greedy) is greedy)
+        with assert_raises_regex(TypeError, msg):
+            np.add(greedy, opt_out)
+        with assert_raises_regex(TypeError, msg):
+            np.add(greedy, 1, out=opt_out)
+
+    def test_gufunc_override(self):
+        # gufunc are just ufunc instances, but follow a different path,
+        # so check __array_ufunc__ overrides them properly.
+        class A:
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                return self, ufunc, method, inputs, kwargs
+
+        inner1d = ncu_tests.inner1d
+        a = A()
+        res = inner1d(a, a)
+        assert_equal(res[0], a)
+        assert_equal(res[1], inner1d)
+        assert_equal(res[2], '__call__')
+        assert_equal(res[3], (a, a))
+        assert_equal(res[4], {})
+
+        res = inner1d(1, 1, out=a)
+        assert_equal(res[0], a)
+        assert_equal(res[1], inner1d)
+        assert_equal(res[2], '__call__')
+        assert_equal(res[3], (1, 1))
+        assert_equal(res[4], {'out': (a,)})
+
+        # wrong number of arguments in the tuple is an error too.
+        assert_raises(TypeError, inner1d, a, out='two')
+        assert_raises(TypeError, inner1d, a, a, 'one', out='two')
+        assert_raises(TypeError, inner1d, a, a, 'one', 'two')
+        assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
+        assert_raises(ValueError, inner1d, a, a, out=())
+
+    def test_ufunc_override_with_super(self):
+        # NOTE: this class is used in doc/source/user/basics.subclassing.rst
+        # if you make any changes here, do update it there too.
+        class A(np.ndarray):
+            def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
+                args = []
+                in_no = []
+                for i, input_ in enumerate(inputs):
+                    if isinstance(input_, A):
+                        in_no.append(i)
+                        args.append(input_.view(np.ndarray))
+                    else:
+                        args.append(input_)
+
+                outputs = out
+                out_no = []
+                if outputs:
+                    out_args = []
+                    for j, output in enumerate(outputs):
+                        if isinstance(output, A):
+                            out_no.append(j)
+                            out_args.append(output.view(np.ndarray))
+                        else:
+                            out_args.append(output)
+                    kwargs['out'] = tuple(out_args)
+                else:
+                    outputs = (None,) * ufunc.nout
+
+                info = {}
+                if in_no:
+                    info['inputs'] = in_no
+                if out_no:
+                    info['outputs'] = out_no
+
+                results = super().__array_ufunc__(ufunc, method,
+                                                  *args, **kwargs)
+                if results is NotImplemented:
+                    return NotImplemented
+
+                if method == 'at':
+                    if isinstance(inputs[0], A):
+                        inputs[0].info = info
+                    return
+
+                if ufunc.nout == 1:
+                    results = (results,)
+
+                results = tuple((np.asarray(result).view(A)
+                                 if output is None else output)
+                                for result, output in zip(results, outputs))
+                if results and isinstance(results[0], A):
+                    results[0].info = info
+
+                return results[0] if len(results) == 1 else results
+
+        class B:
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                if any(isinstance(input_, A) for input_ in inputs):
+                    return "A!"
+                else:
+                    return NotImplemented
+
+        d = np.arange(5.)
+        # 1 input, 1 output
+        a = np.arange(5.).view(A)
+        b = np.sin(a)
+        check = np.sin(d)
+        assert_(np.all(check == b))
+        assert_equal(b.info, {'inputs': [0]})
+        b = np.sin(d, out=(a,))
+        assert_(np.all(check == b))
+        assert_equal(b.info, {'outputs': [0]})
+        assert_(b is a)
+        a = np.arange(5.).view(A)
+        b = np.sin(a, out=a)
+        assert_(np.all(check == b))
+        assert_equal(b.info, {'inputs': [0], 'outputs': [0]})
+
+        # 1 input, 2 outputs
+        a = np.arange(5.).view(A)
+        b1, b2 = np.modf(a)
+        assert_equal(b1.info, {'inputs': [0]})
+        b1, b2 = np.modf(d, out=(None, a))
+        assert_(b2 is a)
+        assert_equal(b1.info, {'outputs': [1]})
+        a = np.arange(5.).view(A)
+        b = np.arange(5.).view(A)
+        c1, c2 = np.modf(a, out=(a, b))
+        assert_(c1 is a)
+        assert_(c2 is b)
+        assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]})
+
+        # 2 input, 1 output
+        a = np.arange(5.).view(A)
+        b = np.arange(5.).view(A)
+        c = np.add(a, b, out=a)
+        assert_(c is a)
+        assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]})
+        # some tests with a non-ndarray subclass
+        a = np.arange(5.)
+        b = B()
+        assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+        assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+        assert_raises(TypeError, np.add, a, b)
+        a = a.view(A)
+        assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+        assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
+        assert_(np.add(a, b) == "A!")
+        # regression check for gh-9102 -- tests ufunc.reduce implicitly.
+        d = np.array([[1, 2, 3], [1, 2, 3]])
+        a = d.view(A)
+        c = a.any()
+        check = d.any()
+        assert_equal(c, check)
+        assert_(c.info, {'inputs': [0]})
+        c = a.max()
+        check = d.max()
+        assert_equal(c, check)
+        assert_(c.info, {'inputs': [0]})
+        b = np.array(0).view(A)
+        c = a.max(out=b)
+        assert_equal(c, check)
+        assert_(c is b)
+        assert_(c.info, {'inputs': [0], 'outputs': [0]})
+        check = a.max(axis=0)
+        b = np.zeros_like(check).view(A)
+        c = a.max(axis=0, out=b)
+        assert_equal(c, check)
+        assert_(c is b)
+        assert_(c.info, {'inputs': [0], 'outputs': [0]})
+        # simple explicit tests of reduce, accumulate, reduceat
+        check = np.add.reduce(d, axis=1)
+        c = np.add.reduce(a, axis=1)
+        assert_equal(c, check)
+        assert_(c.info, {'inputs': [0]})
+        b = np.zeros_like(c)
+        c = np.add.reduce(a, 1, None, b)
+        assert_equal(c, check)
+        assert_(c is b)
+        assert_(c.info, {'inputs': [0], 'outputs': [0]})
+        check = np.add.accumulate(d, axis=0)
+        c = np.add.accumulate(a, axis=0)
+        assert_equal(c, check)
+        assert_(c.info, {'inputs': [0]})
+        b = np.zeros_like(c)
+        c = np.add.accumulate(a, 0, None, b)
+        assert_equal(c, check)
+        assert_(c is b)
+        assert_(c.info, {'inputs': [0], 'outputs': [0]})
+        indices = [0, 2, 1]
+        check = np.add.reduceat(d, indices, axis=1)
+        c = np.add.reduceat(a, indices, axis=1)
+        assert_equal(c, check)
+        assert_(c.info, {'inputs': [0]})
+        b = np.zeros_like(c)
+        c = np.add.reduceat(a, indices, 1, None, b)
+        assert_equal(c, check)
+        assert_(c is b)
+        assert_(c.info, {'inputs': [0], 'outputs': [0]})
+        # and a few tests for at
+        d = np.array([[1, 2, 3], [1, 2, 3]])
+        check = d.copy()
+        a = d.copy().view(A)
+        np.add.at(check, ([0, 1], [0, 2]), 1.)
+        np.add.at(a, ([0, 1], [0, 2]), 1.)
+        assert_equal(a, check)
+        assert_(a.info, {'inputs': [0]})
+        b = np.array(1.).view(A)
+        a = d.copy().view(A)
+        np.add.at(a, ([0, 1], [0, 2]), b)
+        assert_equal(a, check)
+        assert_(a.info, {'inputs': [0, 2]})
+
+    def test_array_ufunc_direct_call(self):
+        # This is mainly a regression test for gh-24023 (shouldn't segfault)
+        a = np.array(1)
+        with pytest.raises(TypeError):
+            a.__array_ufunc__()
+
+        # No kwargs means kwargs may be NULL on the C-level
+        with pytest.raises(TypeError):
+            a.__array_ufunc__(1, 2)
+
+        # And the same with a valid call:
+        res = a.__array_ufunc__(np.add, "__call__", a, a)
+        assert_array_equal(res, a + a)
+
+class TestChoose:
+    def test_mixed(self):
+        c = np.array([True, True])
+        a = np.array([True, True])
+        assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
+
+
+class TestRationalFunctions:
+    def test_lcm(self):
+        self._test_lcm_inner(np.int16)
+        self._test_lcm_inner(np.uint16)
+
+    def test_lcm_object(self):
+        self._test_lcm_inner(np.object_)
+
+    def test_gcd(self):
+        self._test_gcd_inner(np.int16)
+        self._test_lcm_inner(np.uint16)
+
+    def test_gcd_object(self):
+        self._test_gcd_inner(np.object_)
+
+    def _test_lcm_inner(self, dtype):
+        # basic use
+        a = np.array([12, 120], dtype=dtype)
+        b = np.array([20, 200], dtype=dtype)
+        assert_equal(np.lcm(a, b), [60, 600])
+
+        if not issubclass(dtype, np.unsignedinteger):
+            # negatives are ignored
+            a = np.array([12, -12,  12, -12], dtype=dtype)
+            b = np.array([20,  20, -20, -20], dtype=dtype)
+            assert_equal(np.lcm(a, b), [60]*4)
+
+        # reduce
+        a = np.array([3, 12, 20], dtype=dtype)
+        assert_equal(np.lcm.reduce([3, 12, 20]), 60)
+
+        # broadcasting, and a test including 0
+        a = np.arange(6).astype(dtype)
+        b = 20
+        assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])
+
+    def _test_gcd_inner(self, dtype):
+        # basic use
+        a = np.array([12, 120], dtype=dtype)
+        b = np.array([20, 200], dtype=dtype)
+        assert_equal(np.gcd(a, b), [4, 40])
+
+        if not issubclass(dtype, np.unsignedinteger):
+            # negatives are ignored
+            a = np.array([12, -12,  12, -12], dtype=dtype)
+            b = np.array([20,  20, -20, -20], dtype=dtype)
+            assert_equal(np.gcd(a, b), [4]*4)
+
+        # reduce
+        a = np.array([15, 25, 35], dtype=dtype)
+        assert_equal(np.gcd.reduce(a), 5)
+
+        # broadcasting, and a test including 0
+        a = np.arange(6).astype(dtype)
+        b = 20
+        assert_equal(np.gcd(a, b), [20,  1,  2,  1,  4,  5])
+
+    def test_lcm_overflow(self):
+        # verify that we don't overflow when a*b does overflow
+        big = np.int32(np.iinfo(np.int32).max // 11)
+        a = 2*big
+        b = 5*big
+        assert_equal(np.lcm(a, b), 10*big)
+
+    def test_gcd_overflow(self):
+        for dtype in (np.int32, np.int64):
+            # verify that we don't overflow when taking abs(x)
+            # not relevant for lcm, where the result is unrepresentable anyway
+            a = dtype(np.iinfo(dtype).min)  # negative power of two
+            q = -(a // 4)
+            assert_equal(np.gcd(a,  q*3), q)
+            assert_equal(np.gcd(a, -q*3), q)
+
+    def test_decimal(self):
+        from decimal import Decimal
+        a = np.array([1,  1, -1, -1]) * Decimal('0.20')
+        b = np.array([1, -1,  1, -1]) * Decimal('0.12')
+
+        assert_equal(np.gcd(a, b), 4*[Decimal('0.04')])
+        assert_equal(np.lcm(a, b), 4*[Decimal('0.60')])
+
+    def test_float(self):
+        # not well-defined on float due to rounding errors
+        assert_raises(TypeError, np.gcd, 0.3, 0.4)
+        assert_raises(TypeError, np.lcm, 0.3, 0.4)
+
+    def test_builtin_long(self):
+        # sanity check that array coercion is alright for builtin longs
+        assert_equal(np.array(2**200).item(), 2**200)
+
+        # expressed as prime factors
+        a = np.array(2**100 * 3**5)
+        b = np.array([2**100 * 5**7, 2**50 * 3**10])
+        assert_equal(np.gcd(a, b), [2**100,               2**50 * 3**5])
+        assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])
+
+        assert_equal(np.gcd(2**100, 3**100), 1)
+
+
+class TestRoundingFunctions:
+
+    def test_object_direct(self):
+        """ test direct implementation of these magic methods """
+        class C:
+            def __floor__(self):
+                return 1
+            def __ceil__(self):
+                return 2
+            def __trunc__(self):
+                return 3
+
+        arr = np.array([C(), C()])
+        assert_equal(np.floor(arr), [1, 1])
+        assert_equal(np.ceil(arr),  [2, 2])
+        assert_equal(np.trunc(arr), [3, 3])
+
+    def test_object_indirect(self):
+        """ test implementations via __float__ """
+        class C:
+            def __float__(self):
+                return -2.5
+
+        arr = np.array([C(), C()])
+        assert_equal(np.floor(arr), [-3, -3])
+        assert_equal(np.ceil(arr),  [-2, -2])
+        with pytest.raises(TypeError):
+            np.trunc(arr)  # consistent with math.trunc
+
+    def test_fraction(self):
+        f = Fraction(-4, 3)
+        assert_equal(np.floor(f), -2)
+        assert_equal(np.ceil(f), -1)
+        assert_equal(np.trunc(f), -1)
+
+
+class TestComplexFunctions:
+    funcs = [np.arcsin,  np.arccos,  np.arctan, np.arcsinh, np.arccosh,
+             np.arctanh, np.sin,     np.cos,    np.tan,     np.exp,
+             np.exp2,    np.log,     np.sqrt,   np.log10,   np.log2,
+             np.log1p]
+
+    def test_it(self):
+        for f in self.funcs:
+            if f is np.arccosh:
+                x = 1.5
+            else:
+                x = .5
+            fr = f(x)
+            fz = f(complex(x))
+            assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
+            assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
+
+    @pytest.mark.xfail(IS_MUSL, reason="gh23049")
+    @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+    def test_precisions_consistent(self):
+        z = 1 + 1j
+        for f in self.funcs:
+            fcf = f(np.csingle(z))
+            fcd = f(np.cdouble(z))
+            fcl = f(np.clongdouble(z))
+            assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
+            assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)
+
+    @pytest.mark.xfail(IS_MUSL, reason="gh23049")
+    @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+    def test_branch_cuts(self):
+        # check branch cuts and continuity on them
+        _check_branch_cut(np.log,   -0.5, 1j, 1, -1, True)
+        _check_branch_cut(np.log2,  -0.5, 1j, 1, -1, True)
+        _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
+        _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
+        _check_branch_cut(np.sqrt,  -0.5, 1j, 1, -1, True)
+
+        _check_branch_cut(np.arcsin, [ -2, 2],   [1j, 1j], 1, -1, True)
+        _check_branch_cut(np.arccos, [ -2, 2],   [1j, 1j], 1, -1, True)
+        _check_branch_cut(np.arctan, [0-2j, 2j],  [1,  1], -1, 1, True)
+
+        _check_branch_cut(np.arcsinh, [0-2j,  2j], [1,   1], -1, 1, True)
+        _check_branch_cut(np.arccosh, [ -1, 0.5], [1j,  1j], 1, -1, True)
+        _check_branch_cut(np.arctanh, [ -2,   2], [1j, 1j], 1, -1, True)
+
+        # check against bogus branch cuts: assert continuity between quadrants
+        _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1,  1], 1, 1)
+        _check_branch_cut(np.arccos, [0-2j, 2j], [ 1,  1], 1, 1)
+        _check_branch_cut(np.arctan, [ -2,  2], [1j, 1j], 1, 1)
+
+        _check_branch_cut(np.arcsinh, [ -2,  2, 0], [1j, 1j, 1], 1, 1)
+        _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1,  1,  1j], 1, 1)
+        _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1,  1,  1j], 1, 1)
+
+    @pytest.mark.xfail(IS_MUSL, reason="gh23049")
+    @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+    def test_branch_cuts_complex64(self):
+        # check branch cuts and continuity on them
+        _check_branch_cut(np.log,   -0.5, 1j, 1, -1, True, np.complex64)
+        _check_branch_cut(np.log2,  -0.5, 1j, 1, -1, True, np.complex64)
+        _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
+        _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
+        _check_branch_cut(np.sqrt,  -0.5, 1j, 1, -1, True, np.complex64)
+
+        _check_branch_cut(np.arcsin, [ -2, 2],   [1j, 1j], 1, -1, True, np.complex64)
+        _check_branch_cut(np.arccos, [ -2, 2],   [1j, 1j], 1, -1, True, np.complex64)
+        _check_branch_cut(np.arctan, [0-2j, 2j],  [1,  1], -1, 1, True, np.complex64)
+
+        _check_branch_cut(np.arcsinh, [0-2j,  2j], [1,   1], -1, 1, True, np.complex64)
+        _check_branch_cut(np.arccosh, [ -1, 0.5], [1j,  1j], 1, -1, True, np.complex64)
+        _check_branch_cut(np.arctanh, [ -2,   2], [1j, 1j], 1, -1, True, np.complex64)
+
+        # check against bogus branch cuts: assert continuity between quadrants
+        _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1,  1], 1, 1, False, np.complex64)
+        _check_branch_cut(np.arccos, [0-2j, 2j], [ 1,  1], 1, 1, False, np.complex64)
+        _check_branch_cut(np.arctan, [ -2,  2], [1j, 1j], 1, 1, False, np.complex64)
+
+        _check_branch_cut(np.arcsinh, [ -2,  2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
+        _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1,  1,  1j], 1, 1, False, np.complex64)
+        _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1,  1,  1j], 1, 1, False, np.complex64)
+
+    def test_against_cmath(self):
+        import cmath
+
+        points = [-1-1j, -1+1j, +1-1j, +1+1j]
+        name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
+                    'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
+        atol = 4*np.finfo(complex).eps
+        for func in self.funcs:
+            fname = func.__name__.split('.')[-1]
+            cname = name_map.get(fname, fname)
+            try:
+                cfunc = getattr(cmath, cname)
+            except AttributeError:
+                continue
+            for p in points:
+                a = complex(func(np.complex_(p)))
+                b = cfunc(p)
+                assert_(
+                    abs(a - b) < atol,
+                    "%s %s: %s; cmath: %s" % (fname, p, a, b)
+                )
+
+    @pytest.mark.xfail(
+        # manylinux2014 uses glibc2.17
+        _glibc_older_than("2.18"),
+        reason="Older glibc versions are imprecise (maybe passes with SIMD?)"
+    )
+    @pytest.mark.xfail(IS_MUSL, reason="gh23049")
+    @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+    @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])
+    def test_loss_of_precision(self, dtype):
+        """Check loss of precision in complex arc* functions"""
+
+        # Check against known-good functions
+
+        info = np.finfo(dtype)
+        real_dtype = dtype(0.).real.dtype
+        eps = info.eps
+
+        def check(x, rtol):
+            x = x.astype(real_dtype)
+
+            z = x.astype(dtype)
+            d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
+            assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+                                      'arcsinh'))
+
+            z = (1j*x).astype(dtype)
+            d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
+            assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+                                      'arcsin'))
+
+            z = x.astype(dtype)
+            d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
+            assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+                                      'arctanh'))
+
+            z = (1j*x).astype(dtype)
+            d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
+            assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+                                      'arctan'))
+
+        # The switchover was chosen as 1e-3; hence there can be up to
+        # ~eps/1e-3 of relative cancellation error before it
+
+        x_series = np.logspace(-20, -3.001, 200)
+        x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
+
+        if dtype is np.longcomplex:
+            if bad_arcsinh():
+                pytest.skip("Trig functions of np.longcomplex values known "
+                            "to be inaccurate on aarch64 and PPC for some "
+                            "compilation configurations.")
+            # It's not guaranteed that the system-provided arc functions
+            # are accurate down to a few epsilons. (Eg. on Linux 64-bit)
+            # So, give more leeway for long complex tests here:
+            check(x_series, 50.0*eps)
+        else:
+            check(x_series, 2.1*eps)
+        check(x_basic, 2.0*eps/1e-3)
+
+        # Check a few points
+
+        z = np.array([1e-5*(1+1j)], dtype=dtype)
+        p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
+        d = np.absolute(1-np.arctanh(z)/p)
+        assert_(np.all(d < 1e-15))
+
+        p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
+        d = np.absolute(1-np.arcsinh(z)/p)
+        assert_(np.all(d < 1e-15))
+
+        p = 9.999999999333333333e-6j + 1.000000000066666666e-5
+        d = np.absolute(1-np.arctan(z)/p)
+        assert_(np.all(d < 1e-15))
+
+        p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
+        d = np.absolute(1-np.arcsin(z)/p)
+        assert_(np.all(d < 1e-15))
+
+        # Check continuity across switchover points
+
+        def check(func, z0, d=1):
+            z0 = np.asarray(z0, dtype=dtype)
+            zp = z0 + abs(z0) * d * eps * 2
+            zm = z0 - abs(z0) * d * eps * 2
+            assert_(np.all(zp != zm), (zp, zm))
+
+            # NB: the cancellation error at the switchover is at least eps
+            good = (abs(func(zp) - func(zm)) < 2*eps)
+            assert_(np.all(good), (func, z0[~good]))
+
+        for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
+            pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)
+                   if rp != 0 or ip != 0]
+            check(func, pts, 1)
+            check(func, pts, 1j)
+            check(func, pts, 1+1j)
+
+    @np.errstate(all="ignore")
+    def test_promotion_corner_cases(self):
+        for func in self.funcs:
+            assert func(np.float16(1)).dtype == np.float16
+            # Integer to low precision float promotion is a dubious choice:
+            assert func(np.uint8(1)).dtype == np.float16
+            assert func(np.int16(1)).dtype == np.float32
+
+
+class TestAttributes:
+    def test_attributes(self):
+        add = ncu.add
+        assert_equal(add.__name__, 'add')
+        assert_(add.ntypes >= 18)  # don't fail if types added
+        assert_('ii->i' in add.types)
+        assert_equal(add.nin, 2)
+        assert_equal(add.nout, 1)
+        assert_equal(add.identity, 0)
+
+    def test_doc(self):
+        # don't bother checking the long list of kwargs, which are likely to
+        # change
+        assert_(ncu.add.__doc__.startswith(
+            "add(x1, x2, /, out=None, *, where=True"))
+        assert_(ncu.frexp.__doc__.startswith(
+            "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
+
+
+class TestSubclass:
+
+    def test_subclass_op(self):
+
+        class simple(np.ndarray):
+            def __new__(subtype, shape):
+                self = np.ndarray.__new__(subtype, shape, dtype=object)
+                self.fill(0)
+                return self
+
+        a = simple((3, 4))
+        assert_equal(a+a, a)
+
+
+class TestFrompyfunc:
+
+    def test_identity(self):
+        def mul(a, b):
+            return a * b
+
+        # with identity=value
+        mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1)
+        assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+        assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
+        assert_equal(mul_ufunc.reduce([]), 1)
+
+        # with identity=None (reorderable)
+        mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None)
+        assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+        assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
+        assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
+
+        # with no identity (not reorderable)
+        mul_ufunc = np.frompyfunc(mul, nin=2, nout=1)
+        assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+        assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)))
+        assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
+
+
+def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
+                      dtype=complex):
+    """
+    Check for a branch cut in a function.
+
+    Assert that `x0` lies on a branch cut of function `f` and `f` is
+    continuous from the direction `dx`.
+
+    Parameters
+    ----------
+    f : func
+        Function to check
+    x0 : array-like
+        Point on branch cut
+    dx : array-like
+        Direction to check continuity in
+    re_sign, im_sign : {1, -1}
+        Change of sign of the real or imaginary part expected
+    sig_zero_ok : bool
+        Whether to check if the branch cut respects signed zero (if applicable)
+    dtype : dtype
+        Dtype to check (should be complex)
+
+    """
+    x0 = np.atleast_1d(x0).astype(dtype)
+    dx = np.atleast_1d(dx).astype(dtype)
+
+    if np.dtype(dtype).char == 'F':
+        scale = np.finfo(dtype).eps * 1e2
+        atol = np.float32(1e-2)
+    else:
+        scale = np.finfo(dtype).eps * 1e3
+        atol = 1e-4
+
+    y0 = f(x0)
+    yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
+    ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
+
+    assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
+    assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
+    assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
+    assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
+
+    if sig_zero_ok:
+        # check that signed zeros also work as a displacement
+        jr = (x0.real == 0) & (dx.real != 0)
+        ji = (x0.imag == 0) & (dx.imag != 0)
+        if np.any(jr):
+            x = x0[jr]
+            x.real = np.NZERO
+            ym = f(x)
+            assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
+            assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
+
+        if np.any(ji):
+            x = x0[ji]
+            x.imag = np.NZERO
+            ym = f(x)
+            assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
+            assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
+
+def test_copysign():
+    assert_(np.copysign(1, -1) == -1)
+    with np.errstate(divide="ignore"):
+        assert_(1 / np.copysign(0, -1) < 0)
+        assert_(1 / np.copysign(0, 1) > 0)
+    assert_(np.signbit(np.copysign(np.nan, -1)))
+    assert_(not np.signbit(np.copysign(np.nan, 1)))
+
+def _test_nextafter(t):
+    one = t(1)
+    two = t(2)
+    zero = t(0)
+    eps = np.finfo(t).eps
+    assert_(np.nextafter(one, two) - one == eps)
+    assert_(np.nextafter(one, zero) - one < 0)
+    assert_(np.isnan(np.nextafter(np.nan, one)))
+    assert_(np.isnan(np.nextafter(one, np.nan)))
+    assert_(np.nextafter(one, one) == one)
+
+def test_nextafter():
+    return _test_nextafter(np.float64)
+
+
+def test_nextafterf():
+    return _test_nextafter(np.float32)
+
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+                    reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+                    reason="IBM double double")
+def test_nextafterl():
+    return _test_nextafter(np.longdouble)
+
+
+def test_nextafter_0():
+    for t, direction in itertools.product(np.sctypes['float'], (1, -1)):
+        # The value of tiny for double double is NaN, so we need to pass the
+        # assert
+        with suppress_warnings() as sup:
+            sup.filter(UserWarning)
+            if not np.isnan(np.finfo(t).tiny):
+                tiny = np.finfo(t).tiny
+                assert_(
+                    0. < direction * np.nextafter(t(0), t(direction)) < tiny)
+        assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0)
+
+def _test_spacing(t):
+    one = t(1)
+    eps = np.finfo(t).eps
+    nan = t(np.nan)
+    inf = t(np.inf)
+    with np.errstate(invalid='ignore'):
+        assert_equal(np.spacing(one), eps)
+        assert_(np.isnan(np.spacing(nan)))
+        assert_(np.isnan(np.spacing(inf)))
+        assert_(np.isnan(np.spacing(-inf)))
+        assert_(np.spacing(t(1e30)) != 0)
+
+def test_spacing():
+    return _test_spacing(np.float64)
+
+def test_spacingf():
+    return _test_spacing(np.float32)
+
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+                    reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+                    reason="IBM double double")
+def test_spacingl():
+    return _test_spacing(np.longdouble)
+
+def test_spacing_gfortran():
+    # Reference from this fortran file, built with gfortran 4.3.3 on linux
+    # 32bits:
+    #       PROGRAM test_spacing
+    #        INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
+    #        INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
+    #
+    #        WRITE(*,*) spacing(0.00001_DBL)
+    #        WRITE(*,*) spacing(1.0_DBL)
+    #        WRITE(*,*) spacing(1000._DBL)
+    #        WRITE(*,*) spacing(10500._DBL)
+    #
+    #        WRITE(*,*) spacing(0.00001_SGL)
+    #        WRITE(*,*) spacing(1.0_SGL)
+    #        WRITE(*,*) spacing(1000._SGL)
+    #        WRITE(*,*) spacing(10500._SGL)
+    #       END PROGRAM
+    ref = {np.float64: [1.69406589450860068E-021,
+                        2.22044604925031308E-016,
+                        1.13686837721616030E-013,
+                        1.81898940354585648E-012],
+           np.float32: [9.09494702E-13,
+                        1.19209290E-07,
+                        6.10351563E-05,
+                        9.76562500E-04]}
+
+    for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
+        x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
+        assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
+
+def test_nextafter_vs_spacing():
+    # XXX: spacing does not handle long double yet
+    for t in [np.float32, np.float64]:
+        for _f in [1, 1e-5, 1000]:
+            f = t(_f)
+            f1 = t(_f + 1)
+            assert_(np.nextafter(f, f1) - f == np.spacing(f))
+
+def test_pos_nan():
+    """Check np.nan is a positive nan."""
+    assert_(np.signbit(np.nan) == 0)
+
+def test_reduceat():
+    """Test bug in reduceat when structured arrays are not copied."""
+    db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
+    a = np.empty([100], dtype=db)
+    a['name'] = 'Simple'
+    a['time'] = 10
+    a['value'] = 100
+    indx = [0, 7, 15, 25]
+
+    h2 = []
+    val1 = indx[0]
+    for val2 in indx[1:]:
+        h2.append(np.add.reduce(a['value'][val1:val2]))
+        val1 = val2
+    h2.append(np.add.reduce(a['value'][val1:]))
+    h2 = np.array(h2)
+
+    # test buffered -- this should work
+    h1 = np.add.reduceat(a['value'], indx)
+    assert_array_almost_equal(h1, h2)
+
+    # This is when the error occurs.
+    # test no buffer
+    np.setbufsize(32)
+    h1 = np.add.reduceat(a['value'], indx)
+    np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
+    assert_array_almost_equal(h1, h2)
+
+def test_reduceat_empty():
+    """Reduceat should work with empty arrays"""
+    indices = np.array([], 'i4')
+    x = np.array([], 'f8')
+    result = np.add.reduceat(x, indices)
+    assert_equal(result.dtype, x.dtype)
+    assert_equal(result.shape, (0,))
+    # Another case with a slightly different zero-sized shape
+    x = np.ones((5, 2))
+    result = np.add.reduceat(x, [], axis=0)
+    assert_equal(result.dtype, x.dtype)
+    assert_equal(result.shape, (0, 2))
+    result = np.add.reduceat(x, [], axis=1)
+    assert_equal(result.dtype, x.dtype)
+    assert_equal(result.shape, (5, 0))
+
+def test_complex_nan_comparisons():
+    nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
+    fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
+            complex(1, 1), complex(-1, -1), complex(0, 0)]
+
+    with np.errstate(invalid='ignore'):
+        for x in nans + fins:
+            x = np.array([x])
+            for y in nans + fins:
+                y = np.array([y])
+
+                if np.isfinite(x) and np.isfinite(y):
+                    continue
+
+                assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
+                assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
+                assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
+                assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
+                assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
+
+
+def test_rint_big_int():
+    # np.rint bug for large integer values on Windows 32-bit and MKL
+    # https://github.com/numpy/numpy/issues/6685
+    val = 4607998452777363968
+    # This is exactly representable in floating point
+    assert_equal(val, int(float(val)))
+    # Rint should not change the value
+    assert_equal(val, np.rint(val))
+
+
+@pytest.mark.parametrize('ftype', [np.float32, np.float64])
+def test_memoverlap_accumulate(ftype):
+    # Reproduces bug https://github.com/numpy/numpy/issues/15597
+    arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype)
+    out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype)
+    out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype)
+    assert_equal(np.maximum.accumulate(arr), out_max)
+    assert_equal(np.minimum.accumulate(arr), out_min)
+
+@pytest.mark.parametrize("ufunc, dtype", [
+    (ufunc, t[0])
+    for ufunc in UFUNCS_BINARY_ACC
+    for t in ufunc.types
+    if t[-1] == '?' and t[0] not in 'DFGMmO'
+])
+def test_memoverlap_accumulate_cmp(ufunc, dtype):
+    if ufunc.signature:
+        pytest.skip('For generic signatures only')
+    for size in (2, 8, 32, 64, 128, 256):
+        arr = np.array([0, 1, 1]*size, dtype=dtype)
+        acc = ufunc.accumulate(arr, dtype='?')
+        acc_u8 = acc.view(np.uint8)
+        exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8)
+        assert_equal(exp, acc_u8)
+
+@pytest.mark.parametrize("ufunc, dtype", [
+    (ufunc, t[0])
+    for ufunc in UFUNCS_BINARY_ACC
+    for t in ufunc.types
+    if t[0] == t[1] and t[0] == t[-1] and t[0] not in 'DFGMmO?'
+])
+def test_memoverlap_accumulate_symmetric(ufunc, dtype):
+    if ufunc.signature:
+        pytest.skip('For generic signatures only')
+    with np.errstate(all='ignore'):
+        for size in (2, 8, 32, 64, 128, 256):
+            arr = np.array([0, 1, 2]*size).astype(dtype)
+            acc = ufunc.accumulate(arr, dtype=dtype)
+            exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype)
+            assert_equal(exp, acc)
+
+def test_signaling_nan_exceptions():
+    with assert_no_warnings():
+        a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
+        np.isnan(a)
+
+@pytest.mark.parametrize("arr", [
+    np.arange(2),
+    np.matrix([0, 1]),
+    np.matrix([[0, 1], [2, 5]]),
+    ])
+def test_outer_subclass_preserve(arr):
+    # for gh-8661
+    class foo(np.ndarray): pass
+    actual = np.multiply.outer(arr.view(foo), arr.view(foo))
+    assert actual.__class__.__name__ == 'foo'
+
+def test_outer_bad_subclass():
+    class BadArr1(np.ndarray):
+        def __array_finalize__(self, obj):
+            # The outer call reshapes to 3 dims, try to do a bad reshape.
+            if self.ndim == 3:
+                self.shape = self.shape + (1,)
+
+        def __array_prepare__(self, obj, context=None):
+            return obj
+
+    class BadArr2(np.ndarray):
+        def __array_finalize__(self, obj):
+            if isinstance(obj, BadArr2):
+                # outer inserts 1-sized dims. In that case disturb them.
+                if self.shape[-1] == 1:
+                    self.shape = self.shape[::-1]
+
+        def __array_prepare__(self, obj, context=None):
+            return obj
+
+    for cls in [BadArr1, BadArr2]:
+        arr = np.ones((2, 3)).view(cls)
+        with assert_raises(TypeError) as a:
+            # The first array gets reshaped (not the second one)
+            np.add.outer(arr, [1, 2])
+
+        # This actually works, since we only see the reshaping error:
+        arr = np.ones((2, 3)).view(cls)
+        assert type(np.add.outer([1, 2], arr)) is cls
+
+def test_outer_exceeds_maxdims():
+    deep = np.ones((1,) * 17)
+    with assert_raises(ValueError):
+        np.add.outer(deep, deep)
+
+def test_bad_legacy_ufunc_silent_errors():
+    # legacy ufuncs can't report errors and NumPy can't check if the GIL
+    # is released.  So NumPy has to check after the GIL is released just to
+    # cover all bases.  `np.power` uses/used to use this.
+    arr = np.arange(3).astype(np.float64)
+
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        ncu_tests.always_error(arr, arr)
+
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        # not contiguous means the fast-path cannot be taken
+        non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2]
+        ncu_tests.always_error(non_contig, arr)
+
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        ncu_tests.always_error.outer(arr, arr)
+
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        ncu_tests.always_error.reduce(arr)
+
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        ncu_tests.always_error.reduceat(arr, [0, 1])
+
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        ncu_tests.always_error.accumulate(arr)
+
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        ncu_tests.always_error.at(arr, [0, 1, 2], arr)
+
+
+@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]])
+def test_bad_legacy_gufunc_silent_errors(x1):
+    # Verify that an exception raised in a gufunc loop propagates correctly.
+    # The signature of always_error_gufunc is '(i),()->()'.
+    with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+        ncu_tests.always_error_gufunc(x1, 0.0)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.py
new file mode 100644
index 00000000..6ee4d2fe
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.py
@@ -0,0 +1,75 @@
+import numpy as np
+import os
+from os import path
+import sys
+import pytest
+from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER
+from numpy.testing import assert_array_max_ulp
+from numpy.testing._private.utils import _glibc_older_than
+from numpy.core._multiarray_umath import __cpu_features__
+
+UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values() if
+        isinstance(obj, np.ufunc)]
+UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
+UNARY_OBJECT_UFUNCS.remove(getattr(np, 'invert'))
+
+IS_AVX = __cpu_features__.get('AVX512F', False) or \
+        (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False))
+# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448).
+runtest = (sys.platform.startswith('linux')
+           and IS_AVX and not _glibc_older_than("2.17"))
+platform_skip = pytest.mark.skipif(not runtest,
+                                   reason="avoid testing inconsistent platform "
+                                   "library implementations")
+
+# convert string to hex function taken from:
+# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
+def convert(s, datatype="np.float32"):
+    i = int(s, 16)                   # convert from hex to a Python int
+    if (datatype == "np.float64"):
+        cp = pointer(c_longlong(i))           # make this into a c long long integer
+        fp = cast(cp, POINTER(c_double))  # cast the int pointer to a double pointer
+    else:
+        cp = pointer(c_int(i))           # make this into a c integer
+        fp = cast(cp, POINTER(c_float))  # cast the int pointer to a float pointer
+
+    return fp.contents.value         # dereference the pointer, get the float
+
+str_to_float = np.vectorize(convert)
+
+class TestAccuracy:
+    @platform_skip
+    def test_validate_transcendentals(self):
+        with np.errstate(all='ignore'):
+            data_dir = path.join(path.dirname(__file__), 'data')
+            files = os.listdir(data_dir)
+            files = list(filter(lambda f: f.endswith('.csv'), files))
+            for filename in files:
+                filepath = path.join(data_dir, filename)
+                with open(filepath) as fid:
+                    file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
+                    data = np.genfromtxt(file_without_comments,
+                                         dtype=('|S39','|S39','|S39',int),
+                                         names=('type','input','output','ulperr'),
+                                         delimiter=',',
+                                         skip_header=1)
+                    npname = path.splitext(filename)[0].split('-')[3]
+                    npfunc = getattr(np, npname)
+                    for datatype in np.unique(data['type']):
+                        data_subset = data[data['type'] == datatype]
+                        inval  = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
+                        outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
+                        perm = np.random.permutation(len(inval))
+                        inval = inval[perm]
+                        outval = outval[perm]
+                        maxulperr = data_subset['ulperr'].max()
+                        assert_array_max_ulp(npfunc(inval), outval, maxulperr)
+
+    @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
+    def test_validate_fp16_transcendentals(self, ufunc):
+        with np.errstate(all='ignore'):
+            arr = np.arange(65536, dtype=np.int16)
+            datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16)
+            datafp32 = datafp16.astype(np.float32)
+            assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32),
+                    maxulp=1, dtype=np.float16)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.py b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.py
new file mode 100644
index 00000000..e5430058
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.py
@@ -0,0 +1,622 @@
+import sys
+import platform
+import pytest
+
+import numpy as np
+# import the c-extension module directly since _arg is not exported via umath
+import numpy.core._multiarray_umath as ncu
+from numpy.testing import (
+    assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp
+    )
+
+# TODO: branch cuts (use Pauli code)
+# TODO: conj 'symmetry'
+# TODO: FPU exceptions
+
+# At least on Windows the results of many complex functions are not conforming
+# to the C99 standard. See ticket 1574.
+# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
+#FIXME: this will probably change when we require full C99 campatibility
+with np.errstate(all='ignore'):
+    functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
+                            or (np.log(complex(np.NZERO, 0)).imag != np.pi))
+# TODO: replace with a check on whether platform-provided C99 funcs are used
+xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
+
+# TODO This can be xfail when the generator functions are got rid of.
+platform_skip = pytest.mark.skipif(xfail_complex_tests,
+                                   reason="Inadequate C99 complex support")
+
+
+
+class TestCexp:
+    def test_simple(self):
+        check = check_complex_value
+        f = np.exp
+
+        check(f, 1, 0, np.exp(1), 0, False)
+        check(f, 0, 1, np.cos(1), np.sin(1), False)
+
+        ref = np.exp(1) * complex(np.cos(1), np.sin(1))
+        check(f, 1, 1, ref.real, ref.imag, False)
+
+    @platform_skip
+    def test_special_values(self):
+        # C99: Section G 6.3.1
+
+        check = check_complex_value
+        f = np.exp
+
+        # cexp(+-0 + 0i) is 1 + 0i
+        check(f, np.PZERO, 0, 1, 0, False)
+        check(f, np.NZERO, 0, 1, 0, False)
+
+        # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
+        # exception
+        check(f,  1, np.inf, np.nan, np.nan)
+        check(f, -1, np.inf, np.nan, np.nan)
+        check(f,  0, np.inf, np.nan, np.nan)
+
+        # cexp(inf + 0i) is inf + 0i
+        check(f,  np.inf, 0, np.inf, 0)
+
+        # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
+        check(f,  -np.inf, 1, np.PZERO, np.PZERO)
+        check(f,  -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO)
+
+        # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
+        check(f,  np.inf, 1, np.inf, np.inf)
+        check(f,  np.inf, 0.75 * np.pi, -np.inf, np.inf)
+
+        # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
+        def _check_ninf_inf(dummy):
+            msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
+            with np.errstate(invalid='ignore'):
+                z = f(np.array(complex(-np.inf, np.inf)))
+                if z.real != 0 or z.imag != 0:
+                    raise AssertionError(msgform % (z.real, z.imag))
+
+        _check_ninf_inf(None)
+
+        # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
+        def _check_inf_inf(dummy):
+            msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
+            with np.errstate(invalid='ignore'):
+                z = f(np.array(complex(np.inf, np.inf)))
+                if not np.isinf(z.real) or not np.isnan(z.imag):
+                    raise AssertionError(msgform % (z.real, z.imag))
+
+        _check_inf_inf(None)
+
+        # cexp(-inf + nan i) is +-0 +- 0i
+        def _check_ninf_nan(dummy):
+            msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
+            with np.errstate(invalid='ignore'):
+                z = f(np.array(complex(-np.inf, np.nan)))
+                if z.real != 0 or z.imag != 0:
+                    raise AssertionError(msgform % (z.real, z.imag))
+
+        _check_ninf_nan(None)
+
+        # cexp(inf + nan i) is +-inf + nan
+        def _check_inf_nan(dummy):
+            msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
+            with np.errstate(invalid='ignore'):
+                z = f(np.array(complex(np.inf, np.nan)))
+                if not np.isinf(z.real) or not np.isnan(z.imag):
+                    raise AssertionError(msgform % (z.real, z.imag))
+
+        _check_inf_nan(None)
+
+        # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
+        # ex)
+        check(f, np.nan, 1, np.nan, np.nan)
+        check(f, np.nan, -1, np.nan, np.nan)
+
+        check(f, np.nan,  np.inf, np.nan, np.nan)
+        check(f, np.nan, -np.inf, np.nan, np.nan)
+
+        # cexp(nan + nani) is nan + nani
+        check(f, np.nan, np.nan, np.nan, np.nan)
+
+    # TODO This can be xfail when the generator functions are got rid of.
+    @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms")
+    def test_special_values2(self):
+        # XXX: most implementations get it wrong here (including glibc <= 2.10)
+        # cexp(nan + 0i) is nan + 0i
+        check = check_complex_value
+        f = np.exp
+
+        check(f, np.nan, 0, np.nan, 0)
+
+class TestClog:
+    def test_simple(self):
+        x = np.array([1+0j, 1+2j])
+        y_r = np.log(np.abs(x)) + 1j * np.angle(x)
+        y = np.log(x)
+        assert_almost_equal(y, y_r)
+
+    @platform_skip
+    @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
+    def test_special_values(self):
+        xl = []
+        yl = []
+
+        # From C99 std (Sec 6.3.2)
+        # XXX: check exceptions raised
+        # --- raise for invalid fails.
+
+        # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
+        # floating-point exception.
+        with np.errstate(divide='raise'):
+            x = np.array([np.NZERO], dtype=complex)
+            y = complex(-np.inf, np.pi)
+            assert_raises(FloatingPointError, np.log, x)
+        with np.errstate(divide='ignore'):
+            assert_almost_equal(np.log(x), y)
+
+        xl.append(x)
+        yl.append(y)
+
+        # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
+        # floating-point exception.
+        with np.errstate(divide='raise'):
+            x = np.array([0], dtype=complex)
+            y = complex(-np.inf, 0)
+            assert_raises(FloatingPointError, np.log, x)
+        with np.errstate(divide='ignore'):
+            assert_almost_equal(np.log(x), y)
+
+        xl.append(x)
+        yl.append(y)
+
+        # clog(x + i inf returns +inf + i pi /2, for finite x.
+        x = np.array([complex(1, np.inf)], dtype=complex)
+        y = complex(np.inf, 0.5 * np.pi)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        x = np.array([complex(-1, np.inf)], dtype=complex)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(x + iNaN) returns NaN + iNaN and optionally raises the
+        # 'invalid' floating- point exception, for finite x.
+        with np.errstate(invalid='raise'):
+            x = np.array([complex(1., np.nan)], dtype=complex)
+            y = complex(np.nan, np.nan)
+            #assert_raises(FloatingPointError, np.log, x)
+        with np.errstate(invalid='ignore'):
+            assert_almost_equal(np.log(x), y)
+
+        xl.append(x)
+        yl.append(y)
+
+        with np.errstate(invalid='raise'):
+            x = np.array([np.inf + 1j * np.nan], dtype=complex)
+            #assert_raises(FloatingPointError, np.log, x)
+        with np.errstate(invalid='ignore'):
+            assert_almost_equal(np.log(x), y)
+
+        xl.append(x)
+        yl.append(y)
+
+        # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
+        x = np.array([-np.inf + 1j], dtype=complex)
+        y = complex(np.inf, np.pi)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
+        x = np.array([np.inf + 1j], dtype=complex)
+        y = complex(np.inf, 0)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(- inf + i inf) returns +inf + i3pi /4.
+        x = np.array([complex(-np.inf, np.inf)], dtype=complex)
+        y = complex(np.inf, 0.75 * np.pi)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(+ inf + i inf) returns +inf + ipi /4.
+        x = np.array([complex(np.inf, np.inf)], dtype=complex)
+        y = complex(np.inf, 0.25 * np.pi)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(+/- inf + iNaN) returns +inf + iNaN.
+        x = np.array([complex(np.inf, np.nan)], dtype=complex)
+        y = complex(np.inf, np.nan)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        x = np.array([complex(-np.inf, np.nan)], dtype=complex)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(NaN + iy) returns NaN + iNaN and optionally raises the
+        # 'invalid' floating-point exception, for finite y.
+        x = np.array([complex(np.nan, 1)], dtype=complex)
+        y = complex(np.nan, np.nan)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(NaN + i inf) returns +inf + iNaN.
+        x = np.array([complex(np.nan, np.inf)], dtype=complex)
+        y = complex(np.inf, np.nan)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(NaN + iNaN) returns NaN + iNaN.
+        x = np.array([complex(np.nan, np.nan)], dtype=complex)
+        y = complex(np.nan, np.nan)
+        assert_almost_equal(np.log(x), y)
+        xl.append(x)
+        yl.append(y)
+
+        # clog(conj(z)) = conj(clog(z)).
+        xa = np.array(xl, dtype=complex)
+        ya = np.array(yl, dtype=complex)
+        with np.errstate(divide='ignore'):
+            for i in range(len(xa)):
+                assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
+
+
+class TestCsqrt:
+
+    def test_simple(self):
+        # sqrt(1)
+        check_complex_value(np.sqrt, 1, 0, 1, 0)
+
+        # sqrt(1i)
+        rres = 0.5*np.sqrt(2)
+        ires = rres
+        check_complex_value(np.sqrt, 0, 1, rres, ires, False)
+
+        # sqrt(-1)
+        check_complex_value(np.sqrt, -1, 0, 0, 1)
+
+    def test_simple_conjugate(self):
+        ref = np.conj(np.sqrt(complex(1, 1)))
+
+        def f(z):
+            return np.sqrt(np.conj(z))
+
+        check_complex_value(f, 1, 1, ref.real, ref.imag, False)
+
+    #def test_branch_cut(self):
+    #    _check_branch_cut(f, -1, 0, 1, -1)
+
+    @platform_skip
+    def test_special_values(self):
+        # C99: Sec G 6.4.2
+
+        check = check_complex_value
+        f = np.sqrt
+
+        # csqrt(+-0 + 0i) is 0 + 0i
+        check(f, np.PZERO, 0, 0, 0)
+        check(f, np.NZERO, 0, 0, 0)
+
+        # csqrt(x + infi) is inf + infi for any x (including NaN)
+        check(f,  1, np.inf, np.inf, np.inf)
+        check(f, -1, np.inf, np.inf, np.inf)
+
+        check(f, np.PZERO, np.inf, np.inf, np.inf)
+        check(f, np.NZERO, np.inf, np.inf, np.inf)
+        check(f,   np.inf, np.inf, np.inf, np.inf)
+        check(f,  -np.inf, np.inf, np.inf, np.inf)
+        check(f,  -np.nan, np.inf, np.inf, np.inf)
+
+        # csqrt(x + nani) is nan + nani for any finite x
+        check(f,  1, np.nan, np.nan, np.nan)
+        check(f, -1, np.nan, np.nan, np.nan)
+        check(f,  0, np.nan, np.nan, np.nan)
+
+        # csqrt(-inf + yi) is +0 + infi for any finite y > 0
+        check(f, -np.inf, 1, np.PZERO, np.inf)
+
+        # csqrt(inf + yi) is +inf + 0i for any finite y > 0
+        check(f, np.inf, 1, np.inf, np.PZERO)
+
+        # csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
+        def _check_ninf_nan(dummy):
+            msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
+            z = np.sqrt(np.array(complex(-np.inf, np.nan)))
+            #Fixme: ugly workaround for isinf bug.
+            with np.errstate(invalid='ignore'):
+                if not (np.isnan(z.real) and np.isinf(z.imag)):
+                    raise AssertionError(msgform % (z.real, z.imag))
+
+        _check_ninf_nan(None)
+
+        # csqrt(+inf + nani) is inf + nani
+        check(f, np.inf, np.nan, np.inf, np.nan)
+
+        # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
+        # + nani)
+        check(f, np.nan,       0, np.nan, np.nan)
+        check(f, np.nan,       1, np.nan, np.nan)
+        check(f, np.nan,  np.nan, np.nan, np.nan)
+
+        # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
+        # cuts first)
+
+class TestCpow:
+    def setup_method(self):
+        self.olderr = np.seterr(invalid='ignore')
+
+    def teardown_method(self):
+        np.seterr(**self.olderr)
+
+    def test_simple(self):
+        x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
+        y_r = x ** 2
+        y = np.power(x, 2)
+        assert_almost_equal(y, y_r)
+
+    def test_scalar(self):
+        x = np.array([1, 1j,         2,  2.5+.37j, np.inf, np.nan])
+        y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j,      2,      3])
+        lx = list(range(len(x)))
+
+        # Hardcode the expected `builtins.complex` values,
+        # as complex exponentiation is broken as of bpo-44698
+        p_r = [
+            1+0j,
+            0.20787957635076193+0j,
+            0.35812203996480685+0.6097119028618724j,
+            0.12659112128185032+0.48847676699581527j,
+            complex(np.inf, np.nan),
+            complex(np.nan, np.nan),
+        ]
+
+        n_r = [x[i] ** y[i] for i in lx]
+        for i in lx:
+            assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
+
+    def test_array(self):
+        x = np.array([1, 1j,         2,  2.5+.37j, np.inf, np.nan])
+        y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j,      2,      3])
+        lx = list(range(len(x)))
+
+        # Hardcode the expected `builtins.complex` values,
+        # as complex exponentiation is broken as of bpo-44698
+        p_r = [
+            1+0j,
+            0.20787957635076193+0j,
+            0.35812203996480685+0.6097119028618724j,
+            0.12659112128185032+0.48847676699581527j,
+            complex(np.inf, np.nan),
+            complex(np.nan, np.nan),
+        ]
+
+        n_r = x ** y
+        for i in lx:
+            assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
+
+class TestCabs:
+    def setup_method(self):
+        self.olderr = np.seterr(invalid='ignore')
+
+    def teardown_method(self):
+        np.seterr(**self.olderr)
+
+    def test_simple(self):
+        x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
+        y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
+        y = np.abs(x)
+        assert_almost_equal(y, y_r)
+
+    def test_fabs(self):
+        # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
+        x = np.array([1+0j], dtype=complex)
+        assert_array_equal(np.abs(x), np.real(x))
+
+        x = np.array([complex(1, np.NZERO)], dtype=complex)
+        assert_array_equal(np.abs(x), np.real(x))
+
+        x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
+        assert_array_equal(np.abs(x), np.real(x))
+
+        x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
+        assert_array_equal(np.abs(x), np.real(x))
+
+    def test_cabs_inf_nan(self):
+        x, y = [], []
+
+        # cabs(+-nan + nani) returns nan
+        x.append(np.nan)
+        y.append(np.nan)
+        check_real_value(np.abs,  np.nan, np.nan, np.nan)
+
+        x.append(np.nan)
+        y.append(-np.nan)
+        check_real_value(np.abs, -np.nan, np.nan, np.nan)
+
+        # According to C99 standard, if exactly one of the real/part is inf and
+        # the other nan, then cabs should return inf
+        x.append(np.inf)
+        y.append(np.nan)
+        check_real_value(np.abs,  np.inf, np.nan, np.inf)
+
+        x.append(-np.inf)
+        y.append(np.nan)
+        check_real_value(np.abs, -np.inf, np.nan, np.inf)
+
+        # cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
+        def f(a):
+            return np.abs(np.conj(a))
+
+        def g(a, b):
+            return np.abs(complex(a, b))
+
+        xa = np.array(x, dtype=complex)
+        assert len(xa) == len(x) == len(y)
+        for xi, yi in zip(x, y):
+            ref = g(xi, yi)
+            check_real_value(f, xi, yi, ref)
+
+class TestCarg:
+    def test_simple(self):
+        check_real_value(ncu._arg, 1, 0, 0, False)
+        check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
+
+        check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
+        check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
+
+    # TODO This can be xfail when the generator functions are got rid of.
+    @pytest.mark.skip(
+        reason="Complex arithmetic with signed zero fails on most platforms")
+    def test_zero(self):
+        # carg(-0 +- 0i) returns +- pi
+        check_real_value(ncu._arg, np.NZERO, np.PZERO,  np.pi, False)
+        check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False)
+
+        # carg(+0 +- 0i) returns +- 0
+        check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
+        check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO)
+
+        # carg(x +- 0i) returns +- 0 for x > 0
+        check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False)
+        check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False)
+
+        # carg(x +- 0i) returns +- pi for x < 0
+        check_real_value(ncu._arg, -1, np.PZERO,  np.pi, False)
+        check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False)
+
+        # carg(+- 0 + yi) returns pi/2 for y > 0
+        check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False)
+        check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False)
+
+        # carg(+- 0 + yi) returns -pi/2 for y < 0
+        check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False)
+        check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False)
+
+    #def test_branch_cuts(self):
+    #    _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
+
+    def test_special_values(self):
+        # carg(-np.inf +- yi) returns +-pi for finite y > 0
+        check_real_value(ncu._arg, -np.inf,  1,  np.pi, False)
+        check_real_value(ncu._arg, -np.inf, -1, -np.pi, False)
+
+        # carg(np.inf +- yi) returns +-0 for finite y > 0
+        check_real_value(ncu._arg, np.inf,  1, np.PZERO, False)
+        check_real_value(ncu._arg, np.inf, -1, np.NZERO, False)
+
+        # carg(x +- np.infi) returns +-pi/2 for finite x
+        check_real_value(ncu._arg, 1,  np.inf,  0.5 * np.pi, False)
+        check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False)
+
+        # carg(-np.inf +- np.infi) returns +-3pi/4
+        check_real_value(ncu._arg, -np.inf,  np.inf,  0.75 * np.pi, False)
+        check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False)
+
+        # carg(np.inf +- np.infi) returns +-pi/4
+        check_real_value(ncu._arg, np.inf,  np.inf,  0.25 * np.pi, False)
+        check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False)
+
+        # carg(x + yi) returns np.nan if x or y is nan
+        check_real_value(ncu._arg, np.nan,      0, np.nan, False)
+        check_real_value(ncu._arg,      0, np.nan, np.nan, False)
+
+        check_real_value(ncu._arg, np.nan, np.inf, np.nan, False)
+        check_real_value(ncu._arg, np.inf, np.nan, np.nan, False)
+
+
+def check_real_value(f, x1, y1, x, exact=True):
+    z1 = np.array([complex(x1, y1)])
+    if exact:
+        assert_equal(f(z1), x)
+    else:
+        assert_almost_equal(f(z1), x)
+
+
+def check_complex_value(f, x1, y1, x2, y2, exact=True):
+    z1 = np.array([complex(x1, y1)])
+    z2 = complex(x2, y2)
+    with np.errstate(invalid='ignore'):
+        if exact:
+            assert_equal(f(z1), z2)
+        else:
+            assert_almost_equal(f(z1), z2)
+
+class TestSpecialComplexAVX:
+    @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+    @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+    def test_array(self, stride, astype):
+        arr = np.array([complex(np.nan , np.nan),
+                        complex(np.nan , np.inf),
+                        complex(np.inf , np.nan),
+                        complex(np.inf , np.inf),
+                        complex(0.     , np.inf),
+                        complex(np.inf , 0.),
+                        complex(0.     , 0.),
+                        complex(0.     , np.nan),
+                        complex(np.nan , 0.)], dtype=astype)
+        abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype)
+        sq_true = np.array([complex(np.nan,  np.nan),
+                            complex(np.nan,  np.nan),
+                            complex(np.nan,  np.nan),
+                            complex(np.nan,  np.inf),
+                            complex(-np.inf, np.nan),
+                            complex(np.inf,  np.nan),
+                            complex(0.,     0.),
+                            complex(np.nan, np.nan),
+                            complex(np.nan, np.nan)], dtype=astype)
+        with np.errstate(invalid='ignore'):
+            assert_equal(np.abs(arr[::stride]), abs_true[::stride])
+            assert_equal(np.square(arr[::stride]), sq_true[::stride])
+
+class TestComplexAbsoluteAVX:
+    @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19])
+    @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
+    @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+    # test to ensure masking and strides work as intended in the AVX implementation
+    def test_array(self, arraysize, stride, astype):
+        arr = np.ones(arraysize, dtype=astype)
+        abs_true = np.ones(arraysize, dtype=arr.real.dtype)
+        assert_equal(np.abs(arr[::stride]), abs_true[::stride])
+
+# Testcase taken as is from https://github.com/numpy/numpy/issues/16660
+class TestComplexAbsoluteMixedDTypes:
+    @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
+    @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+    @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate'])
+
+    def test_array(self, stride, astype, func):
+        dtype = [('template_id', 'U')
+    uni_arr2 = str_arr.astype('>> _lib = np.ctypeslib.load_library('libmystuff', '.')     #doctest: +SKIP
+
+Our result type, an ndarray that must be of type double, be 1-dimensional
+and is C-contiguous in memory:
+
+>>> array_1d_double = np.ctypeslib.ndpointer(
+...                          dtype=np.double,
+...                          ndim=1, flags='CONTIGUOUS')    #doctest: +SKIP
+
+Our C-function typically takes an array and updates its values
+in-place.  For example::
+
+    void foo_func(double* x, int length)
+    {
+        int i;
+        for (i = 0; i < length; i++) {
+            x[i] = i*i;
+        }
+    }
+
+We wrap it using:
+
+>>> _lib.foo_func.restype = None                      #doctest: +SKIP
+>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
+
+Then, we're ready to call ``foo_func``:
+
+>>> out = np.empty(15, dtype=np.double)
+>>> _lib.foo_func(out, len(out))                #doctest: +SKIP
+
+"""
+__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
+           'as_ctypes_type']
+
+import os
+from numpy import (
+    integer, ndarray, dtype as _dtype, asarray, frombuffer
+)
+from numpy.core.multiarray import _flagdict, flagsobj
+
+try:
+    import ctypes
+except ImportError:
+    ctypes = None
+
+if ctypes is None:
+    def _dummy(*args, **kwds):
+        """
+        Dummy object that raises an ImportError if ctypes is not available.
+
+        Raises
+        ------
+        ImportError
+            If ctypes is not available.
+
+        """
+        raise ImportError("ctypes is not available.")
+    load_library = _dummy
+    as_ctypes = _dummy
+    as_array = _dummy
+    from numpy import intp as c_intp
+    _ndptr_base = object
+else:
+    import numpy.core._internal as nic
+    c_intp = nic._getintp_ctype()
+    del nic
+    _ndptr_base = ctypes.c_void_p
+
+    # Adapted from Albert Strasheim
+    def load_library(libname, loader_path):
+        """
+        It is possible to load a library using
+
+        >>> lib = ctypes.cdll[] # doctest: +SKIP
+
+        But there are cross-platform considerations, such as library file extensions,
+        plus the fact Windows will just load the first library it finds with that name.
+        NumPy supplies the load_library function as a convenience.
+
+        .. versionchanged:: 1.20.0
+            Allow libname and loader_path to take any
+            :term:`python:path-like object`.
+
+        Parameters
+        ----------
+        libname : path-like
+            Name of the library, which can have 'lib' as a prefix,
+            but without an extension.
+        loader_path : path-like
+            Where the library can be found.
+
+        Returns
+        -------
+        ctypes.cdll[libpath] : library object
+           A ctypes library object
+
+        Raises
+        ------
+        OSError
+            If there is no library with the expected extension, or the
+            library is defective and cannot be loaded.
+        """
+        # Convert path-like objects into strings
+        libname = os.fsdecode(libname)
+        loader_path = os.fsdecode(loader_path)
+
+        ext = os.path.splitext(libname)[1]
+        if not ext:
+            import sys
+            import sysconfig
+            # Try to load library with platform-specific name, otherwise
+            # default to libname.[so|dll|dylib].  Sometimes, these files are
+            # built erroneously on non-linux platforms.
+            base_ext = ".so"
+            if sys.platform.startswith("darwin"):
+                base_ext = ".dylib"
+            elif sys.platform.startswith("win"):
+                base_ext = ".dll"
+            libname_ext = [libname + base_ext]
+            so_ext = sysconfig.get_config_var("EXT_SUFFIX")
+            if not so_ext == base_ext:
+                libname_ext.insert(0, libname + so_ext)
+        else:
+            libname_ext = [libname]
+
+        loader_path = os.path.abspath(loader_path)
+        if not os.path.isdir(loader_path):
+            libdir = os.path.dirname(loader_path)
+        else:
+            libdir = loader_path
+
+        for ln in libname_ext:
+            libpath = os.path.join(libdir, ln)
+            if os.path.exists(libpath):
+                try:
+                    return ctypes.cdll[libpath]
+                except OSError:
+                    ## defective lib file
+                    raise
+        ## if no successful return in the libname_ext loop:
+        raise OSError("no file with expected extension")
+
+
+def _num_fromflags(flaglist):
+    num = 0
+    for val in flaglist:
+        num += _flagdict[val]
+    return num
+
+_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
+              'OWNDATA', 'WRITEBACKIFCOPY']
+def _flags_fromnum(num):
+    res = []
+    for key in _flagnames:
+        value = _flagdict[key]
+        if (num & value):
+            res.append(key)
+    return res
+
+
+class _ndptr(_ndptr_base):
+    @classmethod
+    def from_param(cls, obj):
+        if not isinstance(obj, ndarray):
+            raise TypeError("argument must be an ndarray")
+        if cls._dtype_ is not None \
+               and obj.dtype != cls._dtype_:
+            raise TypeError("array must have data type %s" % cls._dtype_)
+        if cls._ndim_ is not None \
+               and obj.ndim != cls._ndim_:
+            raise TypeError("array must have %d dimension(s)" % cls._ndim_)
+        if cls._shape_ is not None \
+               and obj.shape != cls._shape_:
+            raise TypeError("array must have shape %s" % str(cls._shape_))
+        if cls._flags_ is not None \
+               and ((obj.flags.num & cls._flags_) != cls._flags_):
+            raise TypeError("array must have flags %s" %
+                    _flags_fromnum(cls._flags_))
+        return obj.ctypes
+
+
+class _concrete_ndptr(_ndptr):
+    """
+    Like _ndptr, but with `_shape_` and `_dtype_` specified.
+
+    Notably, this means the pointer has enough information to reconstruct
+    the array, which is not generally true.
+    """
+    def _check_retval_(self):
+        """
+        This method is called when this class is used as the .restype
+        attribute for a shared-library function, to automatically wrap the
+        pointer into an array.
+        """
+        return self.contents
+
+    @property
+    def contents(self):
+        """
+        Get an ndarray viewing the data pointed to by this pointer.
+
+        This mirrors the `contents` attribute of a normal ctypes pointer
+        """
+        full_dtype = _dtype((self._dtype_, self._shape_))
+        full_ctype = ctypes.c_char * full_dtype.itemsize
+        buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
+        return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
+
+
+# Factory for an array-checking class with from_param defined for
+#  use with ctypes argtypes mechanism
+_pointer_type_cache = {}
+def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
+    """
+    Array-checking restype/argtypes.
+
+    An ndpointer instance is used to describe an ndarray in restypes
+    and argtypes specifications.  This approach is more flexible than
+    using, for example, ``POINTER(c_double)``, since several restrictions
+    can be specified, which are verified upon calling the ctypes function.
+    These include data type, number of dimensions, shape and flags.  If a
+    given array does not satisfy the specified restrictions,
+    a ``TypeError`` is raised.
+
+    Parameters
+    ----------
+    dtype : data-type, optional
+        Array data-type.
+    ndim : int, optional
+        Number of array dimensions.
+    shape : tuple of ints, optional
+        Array shape.
+    flags : str or tuple of str
+        Array flags; may be one or more of:
+
+          - C_CONTIGUOUS / C / CONTIGUOUS
+          - F_CONTIGUOUS / F / FORTRAN
+          - OWNDATA / O
+          - WRITEABLE / W
+          - ALIGNED / A
+          - WRITEBACKIFCOPY / X
+
+    Returns
+    -------
+    klass : ndpointer type object
+        A type object, which is an ``_ndtpr`` instance containing
+        dtype, ndim, shape and flags information.
+
+    Raises
+    ------
+    TypeError
+        If a given array does not satisfy the specified restrictions.
+
+    Examples
+    --------
+    >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
+    ...                                                  ndim=1,
+    ...                                                  flags='C_CONTIGUOUS')]
+    ... #doctest: +SKIP
+    >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
+    ... #doctest: +SKIP
+
+    """
+
+    # normalize dtype to an Optional[dtype]
+    if dtype is not None:
+        dtype = _dtype(dtype)
+
+    # normalize flags to an Optional[int]
+    num = None
+    if flags is not None:
+        if isinstance(flags, str):
+            flags = flags.split(',')
+        elif isinstance(flags, (int, integer)):
+            num = flags
+            flags = _flags_fromnum(num)
+        elif isinstance(flags, flagsobj):
+            num = flags.num
+            flags = _flags_fromnum(num)
+        if num is None:
+            try:
+                flags = [x.strip().upper() for x in flags]
+            except Exception as e:
+                raise TypeError("invalid flags specification") from e
+            num = _num_fromflags(flags)
+
+    # normalize shape to an Optional[tuple]
+    if shape is not None:
+        try:
+            shape = tuple(shape)
+        except TypeError:
+            # single integer -> 1-tuple
+            shape = (shape,)
+
+    cache_key = (dtype, ndim, shape, num)
+
+    try:
+        return _pointer_type_cache[cache_key]
+    except KeyError:
+        pass
+
+    # produce a name for the new type
+    if dtype is None:
+        name = 'any'
+    elif dtype.names is not None:
+        name = str(id(dtype))
+    else:
+        name = dtype.str
+    if ndim is not None:
+        name += "_%dd" % ndim
+    if shape is not None:
+        name += "_"+"x".join(str(x) for x in shape)
+    if flags is not None:
+        name += "_"+"_".join(flags)
+
+    if dtype is not None and shape is not None:
+        base = _concrete_ndptr
+    else:
+        base = _ndptr
+
+    klass = type("ndpointer_%s"%name, (base,),
+                 {"_dtype_": dtype,
+                  "_shape_" : shape,
+                  "_ndim_" : ndim,
+                  "_flags_" : num})
+    _pointer_type_cache[cache_key] = klass
+    return klass
+
+
+if ctypes is not None:
+    def _ctype_ndarray(element_type, shape):
+        """ Create an ndarray of the given element type and shape """
+        for dim in shape[::-1]:
+            element_type = dim * element_type
+            # prevent the type name include np.ctypeslib
+            element_type.__module__ = None
+        return element_type
+
+
+    def _get_scalar_type_map():
+        """
+        Return a dictionary mapping native endian scalar dtype to ctypes types
+        """
+        ct = ctypes
+        simple_types = [
+            ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+            ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+            ct.c_float, ct.c_double,
+            ct.c_bool,
+        ]
+        return {_dtype(ctype): ctype for ctype in simple_types}
+
+
+    _scalar_type_map = _get_scalar_type_map()
+
+
+    def _ctype_from_dtype_scalar(dtype):
+        # swapping twice ensure that `=` is promoted to <, >, or |
+        dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+        dtype_native = dtype.newbyteorder('=')
+        try:
+            ctype = _scalar_type_map[dtype_native]
+        except KeyError as e:
+            raise NotImplementedError(
+                "Converting {!r} to a ctypes type".format(dtype)
+            ) from None
+
+        if dtype_with_endian.byteorder == '>':
+            ctype = ctype.__ctype_be__
+        elif dtype_with_endian.byteorder == '<':
+            ctype = ctype.__ctype_le__
+
+        return ctype
+
+
+    def _ctype_from_dtype_subarray(dtype):
+        element_dtype, shape = dtype.subdtype
+        ctype = _ctype_from_dtype(element_dtype)
+        return _ctype_ndarray(ctype, shape)
+
+
+    def _ctype_from_dtype_structured(dtype):
+        # extract offsets of each field
+        field_data = []
+        for name in dtype.names:
+            field_dtype, offset = dtype.fields[name][:2]
+            field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+        # ctypes doesn't care about field order
+        field_data = sorted(field_data, key=lambda f: f[0])
+
+        if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
+            # union, if multiple fields all at address 0
+            size = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                _fields_.append((name, ctype))
+                size = max(size, ctypes.sizeof(ctype))
+
+            # pad to the right size
+            if dtype.itemsize != size:
+                _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('union', (ctypes.Union,), dict(
+                _fields_=_fields_,
+                _pack_=1,
+                __module__=None,
+            ))
+        else:
+            last_offset = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                padding = offset - last_offset
+                if padding < 0:
+                    raise NotImplementedError("Overlapping fields")
+                if padding > 0:
+                    _fields_.append(('', ctypes.c_char * padding))
+
+                _fields_.append((name, ctype))
+                last_offset = offset + ctypes.sizeof(ctype)
+
+
+            padding = dtype.itemsize - last_offset
+            if padding > 0:
+                _fields_.append(('', ctypes.c_char * padding))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('struct', (ctypes.Structure,), dict(
+                _fields_=_fields_,
+                _pack_=1,
+                __module__=None,
+            ))
+
+
+    def _ctype_from_dtype(dtype):
+        if dtype.fields is not None:
+            return _ctype_from_dtype_structured(dtype)
+        elif dtype.subdtype is not None:
+            return _ctype_from_dtype_subarray(dtype)
+        else:
+            return _ctype_from_dtype_scalar(dtype)
+
+
+    def as_ctypes_type(dtype):
+        r"""
+        Convert a dtype into a ctypes type.
+
+        Parameters
+        ----------
+        dtype : dtype
+            The dtype to convert
+
+        Returns
+        -------
+        ctype
+            A ctype scalar, union, array, or struct
+
+        Raises
+        ------
+        NotImplementedError
+            If the conversion is not possible
+
+        Notes
+        -----
+        This function does not losslessly round-trip in either direction.
+
+        ``np.dtype(as_ctypes_type(dt))`` will:
+
+         - insert padding fields
+         - reorder fields to be sorted by offset
+         - discard field titles
+
+        ``as_ctypes_type(np.dtype(ctype))`` will:
+
+         - discard the class names of `ctypes.Structure`\ s and
+           `ctypes.Union`\ s
+         - convert single-element `ctypes.Union`\ s into single-element
+           `ctypes.Structure`\ s
+         - insert padding fields
+
+        """
+        return _ctype_from_dtype(_dtype(dtype))
+
+
+    def as_array(obj, shape=None):
+        """
+        Create a numpy array from a ctypes array or POINTER.
+
+        The numpy array shares the memory with the ctypes object.
+
+        The shape parameter must be given if converting from a ctypes POINTER.
+        The shape parameter is ignored if converting from a ctypes array
+        """
+        if isinstance(obj, ctypes._Pointer):
+            # convert pointers to an array of the desired shape
+            if shape is None:
+                raise TypeError(
+                    'as_array() requires a shape argument when called on a '
+                    'pointer')
+            p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
+            obj = ctypes.cast(obj, p_arr_type).contents
+
+        return asarray(obj)
+
+
+    def as_ctypes(obj):
+        """Create and return a ctypes object from a numpy array.  Actually
+        anything that exposes the __array_interface__ is accepted."""
+        ai = obj.__array_interface__
+        if ai["strides"]:
+            raise TypeError("strided arrays not supported")
+        if ai["version"] != 3:
+            raise TypeError("only __array_interface__ version 3 supported")
+        addr, readonly = ai["data"]
+        if readonly:
+            raise TypeError("readonly arrays unsupported")
+
+        # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+        # dtype.itemsize (gh-14214)
+        ctype_scalar = as_ctypes_type(ai["typestr"])
+        result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+        result = result_type.from_address(addr)
+        result.__keep = obj
+        return result
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ctypeslib.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/ctypeslib.pyi
new file mode 100644
index 00000000..3edf98e1
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ctypeslib.pyi
@@ -0,0 +1,251 @@
+# NOTE: Numpy's mypy plugin is used for importing the correct
+# platform-specific `ctypes._SimpleCData[int]` sub-type
+from ctypes import c_int64 as _c_intp
+
+import os
+import sys
+import ctypes
+from collections.abc import Iterable, Sequence
+from typing import (
+    Literal as L,
+    Any,
+    Union,
+    TypeVar,
+    Generic,
+    overload,
+    ClassVar,
+)
+
+from numpy import (
+    ndarray,
+    dtype,
+    generic,
+    bool_,
+    byte,
+    short,
+    intc,
+    int_,
+    longlong,
+    ubyte,
+    ushort,
+    uintc,
+    uint,
+    ulonglong,
+    single,
+    double,
+    longdouble,
+    void,
+)
+from numpy.core._internal import _ctypes
+from numpy.core.multiarray import flagsobj
+from numpy._typing import (
+    # Arrays
+    NDArray,
+    _ArrayLike,
+
+    # Shapes
+    _ShapeLike,
+
+    # DTypes
+    DTypeLike,
+    _DTypeLike,
+    _VoidDTypeLike,
+    _BoolCodes,
+    _UByteCodes,
+    _UShortCodes,
+    _UIntCCodes,
+    _UIntCodes,
+    _ULongLongCodes,
+    _ByteCodes,
+    _ShortCodes,
+    _IntCCodes,
+    _IntCodes,
+    _LongLongCodes,
+    _SingleCodes,
+    _DoubleCodes,
+    _LongDoubleCodes,
+)
+
+# TODO: Add a proper `_Shape` bound once we've got variadic typevars
+_DType = TypeVar("_DType", bound=dtype[Any])
+_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any])
+_SCT = TypeVar("_SCT", bound=generic)
+
+_FlagsKind = L[
+    'C_CONTIGUOUS', 'CONTIGUOUS', 'C',
+    'F_CONTIGUOUS', 'FORTRAN', 'F',
+    'ALIGNED', 'A',
+    'WRITEABLE', 'W',
+    'OWNDATA', 'O',
+    'WRITEBACKIFCOPY', 'X',
+]
+
+# TODO: Add a shape typevar once we have variadic typevars (PEP 646)
+class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]):
+    # In practice these 4 classvars are defined in the dynamic class
+    # returned by `ndpointer`
+    _dtype_: ClassVar[_DTypeOptional]
+    _shape_: ClassVar[None]
+    _ndim_: ClassVar[None | int]
+    _flags_: ClassVar[None | list[_FlagsKind]]
+
+    @overload
+    @classmethod
+    def from_param(cls: type[_ndptr[None]], obj: ndarray[Any, Any]) -> _ctypes[Any]: ...
+    @overload
+    @classmethod
+    def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes[Any]: ...
+
+class _concrete_ndptr(_ndptr[_DType]):
+    _dtype_: ClassVar[_DType]
+    _shape_: ClassVar[tuple[int, ...]]
+    @property
+    def contents(self) -> ndarray[Any, _DType]: ...
+
+def load_library(
+    libname: str | bytes | os.PathLike[str] | os.PathLike[bytes],
+    loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes],
+) -> ctypes.CDLL: ...
+
+__all__: list[str]
+
+c_intp = _c_intp
+
+@overload
+def ndpointer(
+    dtype: None = ...,
+    ndim: int = ...,
+    shape: None | _ShapeLike = ...,
+    flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_ndptr[None]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_SCT],
+    ndim: int = ...,
+    *,
+    shape: _ShapeLike,
+    flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_concrete_ndptr[dtype[_SCT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike,
+    ndim: int = ...,
+    *,
+    shape: _ShapeLike,
+    flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_concrete_ndptr[dtype[Any]]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_SCT],
+    ndim: int = ...,
+    shape: None = ...,
+    flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_ndptr[dtype[_SCT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike,
+    ndim: int = ...,
+    shape: None = ...,
+    flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_ndptr[dtype[Any]]]: ...
+
+@overload
+def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[bool_] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ...
+@overload
+def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ...
+@overload
+def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ...
+@overload
+def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ...
+@overload
+def as_ctypes_type(dtype: _IntCodes | _DTypeLike[int_] | type[int | ctypes.c_long]) -> type[ctypes.c_long]: ...
+@overload
+def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ...
+@overload
+def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ...
+@overload
+def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ...
+@overload
+def as_ctypes_type(dtype: _UIntCodes | _DTypeLike[uint] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ...
+@overload
+def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ...
+@overload
+def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ...
+@overload
+def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes_type(dtype: str) -> type[Any]: ...
+
+@overload
+def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ...
+@overload
+def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+@overload
+def as_ctypes(obj: bool_) -> ctypes.c_bool: ...
+@overload
+def as_ctypes(obj: byte) -> ctypes.c_byte: ...
+@overload
+def as_ctypes(obj: short) -> ctypes.c_short: ...
+@overload
+def as_ctypes(obj: intc) -> ctypes.c_int: ...
+@overload
+def as_ctypes(obj: int_) -> ctypes.c_long: ...
+@overload
+def as_ctypes(obj: longlong) -> ctypes.c_longlong: ...
+@overload
+def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ...
+@overload
+def as_ctypes(obj: ushort) -> ctypes.c_ushort: ...
+@overload
+def as_ctypes(obj: uintc) -> ctypes.c_uint: ...
+@overload
+def as_ctypes(obj: uint) -> ctypes.c_ulong: ...
+@overload
+def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ...
+@overload
+def as_ctypes(obj: single) -> ctypes.c_float: ...
+@overload
+def as_ctypes(obj: double) -> ctypes.c_double: ...
+@overload
+def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ...
+@overload
+def as_ctypes(obj: void) -> Any: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes(obj: NDArray[bool_]) -> ctypes.Array[ctypes.c_bool]: ...
+@overload
+def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ...
+@overload
+def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ...
+@overload
+def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ...
+@overload
+def as_ctypes(obj: NDArray[int_]) -> ctypes.Array[ctypes.c_long]: ...
+@overload
+def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ...
+@overload
+def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ...
+@overload
+def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ...
+@overload
+def as_ctypes(obj: NDArray[uint]) -> ctypes.Array[ctypes.c_ulong]: ...
+@overload
+def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ...
+@overload
+def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ...
+@overload
+def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__init__.py
new file mode 100644
index 00000000..f74ed4d3
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__init__.py
@@ -0,0 +1,64 @@
+"""
+An enhanced distutils, providing support for Fortran compilers, for BLAS,
+LAPACK and other common libraries for numerical computing, and more.
+
+Public submodules are::
+
+    misc_util
+    system_info
+    cpu_info
+    log
+    exec_command
+
+For details, please see the *Packaging* and *NumPy Distutils User Guide*
+sections of the NumPy Reference Guide.
+
+For configuring the preference for and location of libraries like BLAS and
+LAPACK, and for setting include paths and similar build options, please see
+``site.cfg.example`` in the root of the NumPy repository or sdist.
+
+"""
+
+import warnings
+
+# Must import local ccompiler ASAP in order to get
+# customized CCompiler.spawn effective.
+from . import ccompiler
+from . import unixccompiler
+
+from .npy_pkg_config import *
+
+warnings.warn("\n\n"
+    "  `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n"
+    "  of the deprecation of `distutils` itself. It will be removed for\n"
+    "  Python >= 3.12. For older Python versions it will remain present.\n"
+    "  It is recommended to use `setuptools < 60.0` for those Python versions.\n"
+    "  For more details, see:\n"
+    "    https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n",
+    DeprecationWarning, stacklevel=2
+)
+del warnings
+
+# If numpy is installed, add distutils.test()
+try:
+    from . import __config__
+    # Normally numpy is installed if the above import works, but an interrupted
+    # in-place build could also have left a __config__.py.  In that case the
+    # next import may still fail, so keep it inside the try block.
+    from numpy._pytesttester import PytestTester
+    test = PytestTester(__name__)
+    del PytestTester
+except ImportError:
+    pass
+
+
+def customized_fcompiler(plat=None, compiler=None):
+    from numpy.distutils.fcompiler import new_fcompiler
+    c = new_fcompiler(plat=plat, compiler=compiler)
+    c.customize()
+    return c
+
+def customized_ccompiler(plat=None, compiler=None, verbose=1):
+    c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
+    c.customize('')
+    return c
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__init__.pyi
new file mode 100644
index 00000000..3938d68d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__init__.pyi
@@ -0,0 +1,4 @@
+from typing import Any
+
+# TODO: remove when the full numpy namespace is defined
+def __getattr__(name: str) -> Any: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc
new file mode 100644
index 00000000..9c7c312a
Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/__pycache__/conv_template.cpython-39.pyc differ
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py
new file mode 100644
index 00000000..82abd5f4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py
@@ -0,0 +1,91 @@
+"""
+Helper functions for interacting with the shell, and consuming shell-style
+parameters provided in config files.
+"""
+import os
+import shlex
+import subprocess
+try:
+    from shlex import quote
+except ImportError:
+    from pipes import quote
+
+__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
+
+
+class CommandLineParser:
+    """
+    An object that knows how to split and join command-line arguments.
+
+    It must be true that ``argv == split(join(argv))`` for all ``argv``.
+    The reverse neednt be true - `join(split(cmd))` may result in the addition
+    or removal of unnecessary escaping.
+    """
+    @staticmethod
+    def join(argv):
+        """ Join a list of arguments into a command line string """
+        raise NotImplementedError
+
+    @staticmethod
+    def split(cmd):
+        """ Split a command line string into a list of arguments """
+        raise NotImplementedError
+
+
+class WindowsParser:
+    """
+    The parsing behavior used by `subprocess.call("string")` on Windows, which
+    matches the Microsoft C/C++ runtime.
+
+    Note that this is _not_ the behavior of cmd.
+    """
+    @staticmethod
+    def join(argv):
+        # note that list2cmdline is specific to the windows syntax
+        return subprocess.list2cmdline(argv)
+
+    @staticmethod
+    def split(cmd):
+        import ctypes  # guarded import for systems without ctypes
+        try:
+            ctypes.windll
+        except AttributeError:
+            raise NotImplementedError
+
+        # Windows has special parsing rules for the executable (no quotes),
+        # that we do not care about - insert a dummy element
+        if not cmd:
+            return []
+        cmd = 'dummy ' + cmd
+
+        CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+        CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
+        CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
+
+        nargs = ctypes.c_int()
+        lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
+        args = [lpargs[i] for i in range(nargs.value)]
+        assert not ctypes.windll.kernel32.LocalFree(lpargs)
+
+        # strip the element we inserted
+        assert args[0] == "dummy"
+        return args[1:]
+
+
+class PosixParser:
+    """
+    The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
+    """
+    @staticmethod
+    def join(argv):
+        return ' '.join(quote(arg) for arg in argv)
+
+    @staticmethod
+    def split(cmd):
+        return shlex.split(cmd, posix=True)
+
+
+if os.name == 'nt':
+    NativeParser = WindowsParser
+elif os.name == 'posix':
+    NativeParser = PosixParser
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/armccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/armccompiler.py
new file mode 100644
index 00000000..afba7eb3
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/armccompiler.py
@@ -0,0 +1,26 @@
+from distutils.unixccompiler import UnixCCompiler                              
+
+class ArmCCompiler(UnixCCompiler):
+
+    """
+    Arm compiler.
+    """
+
+    compiler_type = 'arm'
+    cc_exe = 'armclang'
+    cxx_exe = 'armclang++'
+
+    def __init__(self, verbose=0, dry_run=0, force=0):
+        UnixCCompiler.__init__(self, verbose, dry_run, force)
+        cc_compiler = self.cc_exe
+        cxx_compiler = self.cxx_exe
+        self.set_executables(compiler=cc_compiler +
+                                      ' -O3 -fPIC',
+                             compiler_so=cc_compiler +
+                                         ' -O3 -fPIC',
+                             compiler_cxx=cxx_compiler +
+                                          ' -O3 -fPIC',
+                             linker_exe=cc_compiler +
+                                        ' -lamath',
+                             linker_so=cc_compiler +
+                                       ' -lamath -shared')
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler.py
new file mode 100644
index 00000000..40f495fc
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler.py
@@ -0,0 +1,826 @@
+import os
+import re
+import sys
+import platform
+import shlex
+import time
+import subprocess
+from copy import copy
+from pathlib import Path
+from distutils import ccompiler
+from distutils.ccompiler import (
+    compiler_class, gen_lib_options, get_default_compiler, new_compiler,
+    CCompiler
+)
+from distutils.errors import (
+    DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
+    CompileError, UnknownFileError
+)
+from distutils.sysconfig import customize_compiler
+from distutils.version import LooseVersion
+
+from numpy.distutils import log
+from numpy.distutils.exec_command import (
+    filepath_from_subprocess_output, forward_bytes_to_stdout
+)
+from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
+                                      get_num_build_jobs, \
+                                      _commandline_dep_string, \
+                                      sanitize_cxx_flags
+
+# globals for parallel build management
+import threading
+
+_job_semaphore = None
+_global_lock = threading.Lock()
+_processing_files = set()
+
+
+def _needs_build(obj, cc_args, extra_postargs, pp_opts):
+    """
+    Check if an objects needs to be rebuild based on its dependencies
+
+    Parameters
+    ----------
+    obj : str
+        object file
+
+    Returns
+    -------
+    bool
+    """
+    # defined in unixcompiler.py
+    dep_file = obj + '.d'
+    if not os.path.exists(dep_file):
+        return True
+
+    # dep_file is a makefile containing 'object: dependencies'
+    # formatted like posix shell (spaces escaped, \ line continuations)
+    # the last line contains the compiler commandline arguments as some
+    # projects may compile an extension multiple times with different
+    # arguments
+    with open(dep_file) as f:
+        lines = f.readlines()
+
+    cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)
+    last_cmdline = lines[-1]
+    if last_cmdline != cmdline:
+        return True
+
+    contents = ''.join(lines[:-1])
+    deps = [x for x in shlex.split(contents, posix=True)
+            if x != "\n" and not x.endswith(":")]
+
+    try:
+        t_obj = os.stat(obj).st_mtime
+
+        # check if any of the dependencies is newer than the object
+        # the dependencies includes the source used to create the object
+        for f in deps:
+            if os.stat(f).st_mtime > t_obj:
+                return True
+    except OSError:
+        # no object counts as newer (shouldn't happen if dep_file exists)
+        return True
+
+    return False
+
+
+def replace_method(klass, method_name, func):
+    # Py3k does not have unbound method anymore, MethodType does not work
+    m = lambda self, *args, **kw: func(self, *args, **kw)
+    setattr(klass, method_name, m)
+
+
+######################################################################
+## Method that subclasses may redefine. But don't call this method,
+## it i private to CCompiler class and may return unexpected
+## results if used elsewhere. So, you have been warned..
+
+def CCompiler_find_executables(self):
+    """
+    Does nothing here, but is called by the get_version method and can be
+    overridden by subclasses. In particular it is redefined in the `FCompiler`
+    class where more documentation can be found.
+
+    """
+    pass
+
+
+replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
+
+
+# Using customized CCompiler.spawn.
+def CCompiler_spawn(self, cmd, display=None, env=None):
+    """
+    Execute a command in a sub-process.
+
+    Parameters
+    ----------
+    cmd : str
+        The command to execute.
+    display : str or sequence of str, optional
+        The text to add to the log file kept by `numpy.distutils`.
+        If not given, `display` is equal to `cmd`.
+    env : a dictionary for environment variables, optional
+
+    Returns
+    -------
+    None
+
+    Raises
+    ------
+    DistutilsExecError
+        If the command failed, i.e. the exit status was not 0.
+
+    """
+    env = env if env is not None else dict(os.environ)
+    if display is None:
+        display = cmd
+        if is_sequence(display):
+            display = ' '.join(list(display))
+    log.info(display)
+    try:
+        if self.verbose:
+            subprocess.check_output(cmd, env=env)
+        else:
+            subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
+    except subprocess.CalledProcessError as exc:
+        o = exc.output
+        s = exc.returncode
+    except OSError as e:
+        # OSError doesn't have the same hooks for the exception
+        # output, but exec_command() historically would use an
+        # empty string for EnvironmentError (base class for
+        # OSError)
+        # o = b''
+        # still that would make the end-user lost in translation!
+        o = f"\n\n{e}\n\n\n"
+        try:
+            o = o.encode(sys.stdout.encoding)
+        except AttributeError:
+            o = o.encode('utf8')
+        # status previously used by exec_command() for parent
+        # of OSError
+        s = 127
+    else:
+        # use a convenience return here so that any kind of
+        # caught exception will execute the default code after the
+        # try / except block, which handles various exceptions
+        return None
+
+    if is_sequence(cmd):
+        cmd = ' '.join(list(cmd))
+
+    if self.verbose:
+        forward_bytes_to_stdout(o)
+
+    if re.search(b'Too many open files', o):
+        msg = '\nTry rerunning setup command until build succeeds.'
+    else:
+        msg = ''
+    raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
+                            (cmd, s, msg))
+
+replace_method(CCompiler, 'spawn', CCompiler_spawn)
+
+def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+    """
+    Return the name of the object files for the given source files.
+
+    Parameters
+    ----------
+    source_filenames : list of str
+        The list of paths to source files. Paths can be either relative or
+        absolute, this is handled transparently.
+    strip_dir : bool, optional
+        Whether to strip the directory from the returned paths. If True,
+        the file name prepended by `output_dir` is returned. Default is False.
+    output_dir : str, optional
+        If given, this path is prepended to the returned paths to the
+        object files.
+
+    Returns
+    -------
+    obj_names : list of str
+        The list of paths to the object files corresponding to the source
+        files in `source_filenames`.
+
+    """
+    if output_dir is None:
+        output_dir = ''
+    obj_names = []
+    for src_name in source_filenames:
+        base, ext = os.path.splitext(os.path.normpath(src_name))
+        base = os.path.splitdrive(base)[1] # Chop off the drive
+        base = base[os.path.isabs(base):]  # If abs, chop off leading /
+        if base.startswith('..'):
+            # Resolve starting relative path components, middle ones
+            # (if any) have been handled by os.path.normpath above.
+            i = base.rfind('..')+2
+            d = base[:i]
+            d = os.path.basename(os.path.abspath(d))
+            base = d + base[i:]
+        if ext not in self.src_extensions:
+            raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
+        if strip_dir:
+            base = os.path.basename(base)
+        obj_name = os.path.join(output_dir, base + self.obj_extension)
+        obj_names.append(obj_name)
+    return obj_names
+
+replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
+
+def CCompiler_compile(self, sources, output_dir=None, macros=None,
+                      include_dirs=None, debug=0, extra_preargs=None,
+                      extra_postargs=None, depends=None):
+    """
+    Compile one or more source files.
+
+    Please refer to the Python distutils API reference for more details.
+
+    Parameters
+    ----------
+    sources : list of str
+        A list of filenames
+    output_dir : str, optional
+        Path to the output directory.
+    macros : list of tuples
+        A list of macro definitions.
+    include_dirs : list of str, optional
+        The directories to add to the default include file search path for
+        this compilation only.
+    debug : bool, optional
+        Whether or not to output debug symbols in or alongside the object
+        file(s).
+    extra_preargs, extra_postargs : ?
+        Extra pre- and post-arguments.
+    depends : list of str, optional
+        A list of file names that all targets depend on.
+
+    Returns
+    -------
+    objects : list of str
+        A list of object file names, one per source file `sources`.
+
+    Raises
+    ------
+    CompileError
+        If compilation fails.
+
+    """
+    global _job_semaphore
+
+    jobs = get_num_build_jobs()
+
+    # setup semaphore to not exceed number of compile jobs when parallelized at
+    # extension level (python >= 3.5)
+    with _global_lock:
+        if _job_semaphore is None:
+            _job_semaphore = threading.Semaphore(jobs)
+
+    if not sources:
+        return []
+    from numpy.distutils.fcompiler import (FCompiler,
+                                           FORTRAN_COMMON_FIXED_EXTENSIONS,
+                                           has_f90_header)
+    if isinstance(self, FCompiler):
+        display = []
+        for fc in ['f77', 'f90', 'fix']:
+            fcomp = getattr(self, 'compiler_'+fc)
+            if fcomp is None:
+                continue
+            display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
+        display = '\n'.join(display)
+    else:
+        ccomp = self.compiler_so
+        display = "C compiler: %s\n" % (' '.join(ccomp),)
+    log.info(display)
+    macros, objects, extra_postargs, pp_opts, build = \
+            self._setup_compile(output_dir, macros, include_dirs, sources,
+                                depends, extra_postargs)
+    cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+    display = "compile options: '%s'" % (' '.join(cc_args))
+    if extra_postargs:
+        display += "\nextra options: '%s'" % (' '.join(extra_postargs))
+    log.info(display)
+
+    def single_compile(args):
+        obj, (src, ext) = args
+        if not _needs_build(obj, cc_args, extra_postargs, pp_opts):
+            return
+
+        # check if we are currently already processing the same object
+        # happens when using the same source in multiple extensions
+        while True:
+            # need explicit lock as there is no atomic check and add with GIL
+            with _global_lock:
+                # file not being worked on, start working
+                if obj not in _processing_files:
+                    _processing_files.add(obj)
+                    break
+            # wait for the processing to end
+            time.sleep(0.1)
+
+        try:
+            # retrieve slot from our #job semaphore and build
+            with _job_semaphore:
+                self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+        finally:
+            # register being done processing
+            with _global_lock:
+                _processing_files.remove(obj)
+
+
+    if isinstance(self, FCompiler):
+        objects_to_build = list(build.keys())
+        f77_objects, other_objects = [], []
+        for obj in objects:
+            if obj in objects_to_build:
+                src, ext = build[obj]
+                if self.compiler_type=='absoft':
+                    obj = cyg2win32(obj)
+                    src = cyg2win32(src)
+                if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \
+                   and not has_f90_header(src):
+                    f77_objects.append((obj, (src, ext)))
+                else:
+                    other_objects.append((obj, (src, ext)))
+
+        # f77 objects can be built in parallel
+        build_items = f77_objects
+        # build f90 modules serial, module files are generated during
+        # compilation and may be used by files later in the list so the
+        # ordering is important
+        for o in other_objects:
+            single_compile(o)
+    else:
+        build_items = build.items()
+
+    if len(build) > 1 and jobs > 1:
+        # build parallel
+        from concurrent.futures import ThreadPoolExecutor
+        with ThreadPoolExecutor(jobs) as pool:
+            res = pool.map(single_compile, build_items)
+        list(res)  # access result to raise errors
+    else:
+        # build serial
+        for o in build_items:
+            single_compile(o)
+
+    # Return *all* object filenames, not just the ones we just built.
+    return objects
+
+replace_method(CCompiler, 'compile', CCompiler_compile)
+
+def CCompiler_customize_cmd(self, cmd, ignore=()):
+    """
+    Customize compiler using distutils command.
+
+    Parameters
+    ----------
+    cmd : class instance
+        An instance inheriting from `distutils.cmd.Command`.
+    ignore : sequence of str, optional
+        List of `CCompiler` commands (without ``'set_'``) that should not be
+        altered. Strings that are checked for are:
+        ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
+        'rpath', 'link_objects')``.
+
+    Returns
+    -------
+    None
+
+    """
+    log.info('customize %s using %s' % (self.__class__.__name__,
+                                        cmd.__class__.__name__))
+
+    if (
+        hasattr(self, 'compiler') and
+        'clang' in self.compiler[0] and
+        not (platform.machine() == 'arm64' and sys.platform == 'darwin')
+    ):
+        # clang defaults to a non-strict floating error point model.
+        # However, '-ftrapping-math' is not currently supported (2023-04-08)
+        # for macosx_arm64.
+        # Since NumPy and most Python libs give warnings for these, override:
+        self.compiler.append('-ftrapping-math')
+        self.compiler_so.append('-ftrapping-math')
+
+    def allow(attr):
+        return getattr(cmd, attr, None) is not None and attr not in ignore
+
+    if allow('include_dirs'):
+        self.set_include_dirs(cmd.include_dirs)
+    if allow('define'):
+        for (name, value) in cmd.define:
+            self.define_macro(name, value)
+    if allow('undef'):
+        for macro in cmd.undef:
+            self.undefine_macro(macro)
+    if allow('libraries'):
+        self.set_libraries(self.libraries + cmd.libraries)
+    if allow('library_dirs'):
+        self.set_library_dirs(self.library_dirs + cmd.library_dirs)
+    if allow('rpath'):
+        self.set_runtime_library_dirs(cmd.rpath)
+    if allow('link_objects'):
+        self.set_link_objects(cmd.link_objects)
+
+replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
+
+def _compiler_to_string(compiler):
+    props = []
+    mx = 0
+    keys = list(compiler.executables.keys())
+    for key in ['version', 'libraries', 'library_dirs',
+                'object_switch', 'compile_switch',
+                'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
+        if key not in keys:
+            keys.append(key)
+    for key in keys:
+        if hasattr(compiler, key):
+            v = getattr(compiler, key)
+            mx = max(mx, len(key))
+            props.append((key, repr(v)))
+    fmt = '%-' + repr(mx+1) + 's = %s'
+    lines = [fmt % prop for prop in props]
+    return '\n'.join(lines)
+
+def CCompiler_show_customization(self):
+    """
+    Print the compiler customizations to stdout.
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    None
+
+    Notes
+    -----
+    Printing is only done if the distutils log threshold is < 2.
+
+    """
+    try:
+        self.get_version()
+    except Exception:
+        pass
+    if log._global_log.threshold<2:
+        print('*'*80)
+        print(self.__class__)
+        print(_compiler_to_string(self))
+        print('*'*80)
+
+replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
+
+def CCompiler_customize(self, dist, need_cxx=0):
+    """
+    Do any platform-specific customization of a compiler instance.
+
+    This method calls `distutils.sysconfig.customize_compiler` for
+    platform-specific customization, as well as optionally remove a flag
+    to suppress spurious warnings in case C++ code is being compiled.
+
+    Parameters
+    ----------
+    dist : object
+        This parameter is not used for anything.
+    need_cxx : bool, optional
+        Whether or not C++ has to be compiled. If so (True), the
+        ``"-Wstrict-prototypes"`` option is removed to prevent spurious
+        warnings. Default is False.
+
+    Returns
+    -------
+    None
+
+    Notes
+    -----
+    All the default options used by distutils can be extracted with::
+
+      from distutils import sysconfig
+      sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
+                                'CCSHARED', 'LDSHARED', 'SO')
+
+    """
+    # See FCompiler.customize for suggested usage.
+    log.info('customize %s' % (self.__class__.__name__))
+    customize_compiler(self)
+    if need_cxx:
+        # In general, distutils uses -Wstrict-prototypes, but this option is
+        # not valid for C++ code, only for C.  Remove it if it's there to
+        # avoid a spurious warning on every compilation.
+        try:
+            self.compiler_so.remove('-Wstrict-prototypes')
+        except (AttributeError, ValueError):
+            pass
+
+        if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
+            if not self.compiler_cxx:
+                if self.compiler[0].startswith('gcc'):
+                    a, b = 'gcc', 'g++'
+                else:
+                    a, b = 'cc', 'c++'
+                self.compiler_cxx = [self.compiler[0].replace(a, b)]\
+                                    + self.compiler[1:]
+        else:
+            if hasattr(self, 'compiler'):
+                log.warn("#### %s #######" % (self.compiler,))
+            if not hasattr(self, 'compiler_cxx'):
+                log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
+
+
+    # check if compiler supports gcc style automatic dependencies
+    # run on every extension so skip for known good compilers
+    if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
+                                      'g++' in self.compiler[0] or
+                                      'clang' in self.compiler[0]):
+        self._auto_depends = True
+    elif os.name == 'posix':
+        import tempfile
+        import shutil
+        tmpdir = tempfile.mkdtemp()
+        try:
+            fn = os.path.join(tmpdir, "file.c")
+            with open(fn, "w") as f:
+                f.write("int a;\n")
+            self.compile([fn], output_dir=tmpdir,
+                         extra_preargs=['-MMD', '-MF', fn + '.d'])
+            self._auto_depends = True
+        except CompileError:
+            self._auto_depends = False
+        finally:
+            shutil.rmtree(tmpdir)
+
+    return
+
+replace_method(CCompiler, 'customize', CCompiler_customize)
+
+def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
+    """
+    Simple matching of version numbers, for use in CCompiler and FCompiler.
+
+    Parameters
+    ----------
+    pat : str, optional
+        A regular expression matching version numbers.
+        Default is ``r'[-.\\d]+'``.
+    ignore : str, optional
+        A regular expression matching patterns to skip.
+        Default is ``''``, in which case nothing is skipped.
+    start : str, optional
+        A regular expression matching the start of where to start looking
+        for version numbers.
+        Default is ``''``, in which case searching is started at the
+        beginning of the version string given to `matcher`.
+
+    Returns
+    -------
+    matcher : callable
+        A function that is appropriate to use as the ``.version_match``
+        attribute of a `CCompiler` class. `matcher` takes a single parameter,
+        a version string.
+
+    """
+    def matcher(self, version_string):
+        # version string may appear in the second line, so getting rid
+        # of new lines:
+        version_string = version_string.replace('\n', ' ')
+        pos = 0
+        if start:
+            m = re.match(start, version_string)
+            if not m:
+                return None
+            pos = m.end()
+        while True:
+            m = re.search(pat, version_string[pos:])
+            if not m:
+                return None
+            if ignore and re.match(ignore, m.group(0)):
+                pos = m.end()
+                continue
+            break
+        return m.group(0)
+    return matcher
+
+def CCompiler_get_version(self, force=False, ok_status=[0]):
+    """
+    Return compiler version, or None if compiler is not available.
+
+    Parameters
+    ----------
+    force : bool, optional
+        If True, force a new determination of the version, even if the
+        compiler already has a version attribute. Default is False.
+    ok_status : list of int, optional
+        The list of status values returned by the version look-up process
+        for which a version string is returned. If the status value is not
+        in `ok_status`, None is returned. Default is ``[0]``.
+
+    Returns
+    -------
+    version : str or None
+        Version string, in the format of `distutils.version.LooseVersion`.
+
+    """
+    if not force and hasattr(self, 'version'):
+        return self.version
+    self.find_executables()
+    try:
+        version_cmd = self.version_cmd
+    except AttributeError:
+        return None
+    if not version_cmd or not version_cmd[0]:
+        return None
+    try:
+        matcher = self.version_match
+    except AttributeError:
+        try:
+            pat = self.version_pattern
+        except AttributeError:
+            return None
+        def matcher(version_string):
+            m = re.match(pat, version_string)
+            if not m:
+                return None
+            version = m.group('version')
+            return version
+
+    try:
+        output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
+    except subprocess.CalledProcessError as exc:
+        output = exc.output
+        status = exc.returncode
+    except OSError:
+        # match the historical returns for a parent
+        # exception class caught by exec_command()
+        status = 127
+        output = b''
+    else:
+        # output isn't actually a filepath but we do this
+        # for now to match previous distutils behavior
+        output = filepath_from_subprocess_output(output)
+        status = 0
+
+    version = None
+    if status in ok_status:
+        version = matcher(output)
+        if version:
+            version = LooseVersion(version)
+    self.version = version
+    return version
+
+replace_method(CCompiler, 'get_version', CCompiler_get_version)
+
+def CCompiler_cxx_compiler(self):
+    """
+    Return the C++ compiler.
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    cxx : class instance
+        The C++ compiler, as a `CCompiler` instance.
+
+    """
+    if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
+        return self
+
+    cxx = copy(self)
+    cxx.compiler_cxx = cxx.compiler_cxx
+    cxx.compiler_so = [cxx.compiler_cxx[0]] + \
+                      sanitize_cxx_flags(cxx.compiler_so[1:])
+    if (sys.platform.startswith(('aix', 'os400')) and
+            'ld_so_aix' in cxx.linker_so[0]):
+        # AIX needs the ld_so_aix script included with Python
+        cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+                        + cxx.linker_so[2:]
+    if sys.platform.startswith('os400'):
+        #This is required by i 7.4 and prievous for PRId64 in printf() call.
+        cxx.compiler_so.append('-D__STDC_FORMAT_MACROS')
+        #This a bug of gcc10.3, which failed to handle the TLS init.
+        cxx.compiler_so.append('-fno-extern-tls-init')
+        cxx.linker_so.append('-fno-extern-tls-init')
+    else:
+        cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
+    return cxx
+
+replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
+
+compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
+                           "Intel C Compiler for 32-bit applications")
+compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
+                            "Intel C Itanium Compiler for Itanium-based applications")
+compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
+                             "Intel C Compiler for 64-bit applications")
+compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
+                            "Intel C Compiler for 32-bit applications on Windows")
+compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
+                              "Intel C Compiler for 64-bit applications on Windows")
+compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
+                            "PathScale Compiler for SiCortex-based applications")
+compiler_class['arm'] = ('armccompiler', 'ArmCCompiler',
+                            "Arm C Compiler")
+compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler',
+                            "Fujitsu C Compiler")
+
+ccompiler._default_compilers += (('linux.*', 'intel'),
+                                 ('linux.*', 'intele'),
+                                 ('linux.*', 'intelem'),
+                                 ('linux.*', 'pathcc'),
+                                 ('nt', 'intelw'),
+                                 ('nt', 'intelemw'))
+
+if sys.platform == 'win32':
+    compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
+                                 "Mingw32 port of GNU C Compiler for Win32"\
+                                 "(for MSC built Python)")
+    if mingw32():
+        # On windows platforms, we want to default to mingw32 (gcc)
+        # because msvc can't build blitz stuff.
+        log.info('Setting mingw32 as default compiler for nt.')
+        ccompiler._default_compilers = (('nt', 'mingw32'),) \
+                                       + ccompiler._default_compilers
+
+
+_distutils_new_compiler = new_compiler
+def new_compiler (plat=None,
+                  compiler=None,
+                  verbose=None,
+                  dry_run=0,
+                  force=0):
+    # Try first C compilers from numpy.distutils.
+    if verbose is None:
+        verbose = log.get_threshold() <= log.INFO
+    if plat is None:
+        plat = os.name
+    try:
+        if compiler is None:
+            compiler = get_default_compiler(plat)
+        (module_name, class_name, long_description) = compiler_class[compiler]
+    except KeyError:
+        msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+        if compiler is not None:
+            msg = msg + " with '%s' compiler" % compiler
+        raise DistutilsPlatformError(msg)
+    module_name = "numpy.distutils." + module_name
+    try:
+        __import__ (module_name)
+    except ImportError as e:
+        msg = str(e)
+        log.info('%s in numpy.distutils; trying from distutils',
+                 str(msg))
+        module_name = module_name[6:]
+        try:
+            __import__(module_name)
+        except ImportError as e:
+            msg = str(e)
+            raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
+                  module_name)
+    try:
+        module = sys.modules[module_name]
+        klass = vars(module)[class_name]
+    except KeyError:
+        raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
+               "in module '%s'") % (class_name, module_name))
+    compiler = klass(None, dry_run, force)
+    compiler.verbose = verbose
+    log.debug('new_compiler returns %s' % (klass))
+    return compiler
+
+ccompiler.new_compiler = new_compiler
+
+_distutils_gen_lib_options = gen_lib_options
+def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
+    # the version of this function provided by CPython allows the following
+    # to return lists, which are unpacked automatically:
+    # - compiler.runtime_library_dir_option
+    # our version extends the behavior to:
+    # - compiler.library_dir_option
+    # - compiler.library_option
+    # - compiler.find_library_file
+    r = _distutils_gen_lib_options(compiler, library_dirs,
+                                   runtime_library_dirs, libraries)
+    lib_opts = []
+    for i in r:
+        if is_sequence(i):
+            lib_opts.extend(list(i))
+        else:
+            lib_opts.append(i)
+    return lib_opts
+ccompiler.gen_lib_options = gen_lib_options
+
+# Also fix up the various compiler modules, which do
+# from distutils.ccompiler import gen_lib_options
+# Don't bother with mwerks, as we don't support Classic Mac.
+for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
+    _m = sys.modules.get('distutils.' + _cc + 'compiler')
+    if _m is not None:
+        setattr(_m, 'gen_lib_options', gen_lib_options)
+
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py
new file mode 100644
index 00000000..37a5368b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py
@@ -0,0 +1,2668 @@
+"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware
+optimization, starting from parsing the command arguments, to managing the
+relation between the CPU baseline and dispatch-able features,
+also generating the required C headers and ending with compiling
+the sources with proper compiler's flags.
+
+`CCompilerOpt` doesn't provide runtime detection for the CPU features,
+instead only focuses on the compiler side, but it creates abstract C headers
+that can be used later for the final runtime dispatching process."""
+
+import atexit
+import inspect
+import os
+import pprint
+import re
+import subprocess
+import textwrap
+
+class _Config:
+    """An abstract class holds all configurable attributes of `CCompilerOpt`,
+    these class attributes can be used to change the default behavior
+    of `CCompilerOpt` in order to fit other requirements.
+
+    Attributes
+    ----------
+    conf_nocache : bool
+        Set True to disable memory and file cache.
+        Default is False.
+
+    conf_noopt : bool
+        Set True to forces the optimization to be disabled,
+        in this case `CCompilerOpt` tends to generate all
+        expected headers in order to 'not' break the build.
+        Default is False.
+
+    conf_cache_factors : list
+        Add extra factors to the primary caching factors. The caching factors
+        are utilized to determine if there are changes had happened that
+        requires to discard the cache and re-updating it. The primary factors
+        are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc).
+        Default is list of two items, containing the time of last modification
+        of `ccompiler_opt` and value of attribute "conf_noopt"
+
+    conf_tmp_path : str,
+        The path of temporary directory. Default is auto-created
+        temporary directory via ``tempfile.mkdtemp()``.
+
+    conf_check_path : str
+        The path of testing files. Each added CPU feature must have a
+        **C** source file contains at least one intrinsic or instruction that
+        related to this feature, so it can be tested against the compiler.
+        Default is ``./distutils/checks``.
+
+    conf_target_groups : dict
+        Extra tokens that can be reached from dispatch-able sources through
+        the special mark ``@targets``. Default is an empty dictionary.
+
+        **Notes**:
+            - case-insensitive for tokens and group names
+            - sign '#' must stick in the begin of group name and only within ``@targets``
+
+        **Example**:
+            .. code-block:: console
+
+                $ "@targets #avx_group other_tokens" > group_inside.c
+
+            >>> CCompilerOpt.conf_target_groups["avx_group"] = \\
+            "$werror $maxopt avx2 avx512f avx512_skx"
+            >>> cco = CCompilerOpt(cc_instance)
+            >>> cco.try_dispatch(["group_inside.c"])
+
+    conf_c_prefix : str
+        The prefix of public C definitions. Default is ``"NPY_"``.
+
+    conf_c_prefix_ : str
+        The prefix of internal C definitions. Default is ``"NPY__"``.
+
+    conf_cc_flags : dict
+        Nested dictionaries defining several compiler flags
+        that linked to some major functions, the main key
+        represent the compiler name and sub-keys represent
+        flags names. Default is already covers all supported
+        **C** compilers.
+
+        Sub-keys explained as follows:
+
+        "native": str or None
+            used by argument option `native`, to detect the current
+            machine support via the compiler.
+        "werror": str or None
+            utilized to treat warning as errors during testing CPU features
+            against the compiler and also for target's policy `$werror`
+            via dispatch-able sources.
+        "maxopt": str or None
+            utilized for target's policy '$maxopt' and the value should
+            contains the maximum acceptable optimization by the compiler.
+            e.g. in gcc `'-O3'`
+
+        **Notes**:
+            * case-sensitive for compiler names and flags
+            * use space to separate multiple flags
+            * any flag will tested against the compiler and it will skipped
+              if it's not applicable.
+
+    conf_min_features : dict
+        A dictionary defines the used CPU features for
+        argument option `'min'`, the key represent the CPU architecture
+        name e.g. `'x86'`. Default values provide the best effort
+        on wide range of users platforms.
+
+        **Note**: case-sensitive for architecture names.
+
+    conf_features : dict
+        Nested dictionaries used for identifying the CPU features.
+        the primary key is represented as a feature name or group name
+        that gathers several features. Default values covers all
+        supported features but without the major options like "flags",
+        these undefined options handle it by method `conf_features_partial()`.
+        Default value is covers almost all CPU features for *X86*, *IBM/Power64*
+        and *ARM 7/8*.
+
+        Sub-keys explained as follows:
+
+        "implies" : str or list, optional,
+            List of CPU feature names to be implied by it,
+            the feature name must be defined within `conf_features`.
+            Default is None.
+
+        "flags": str or list, optional
+            List of compiler flags. Default is None.
+
+        "detect": str or list, optional
+            List of CPU feature names that required to be detected
+            in runtime. By default, its the feature name or features
+            in "group" if its specified.
+
+        "implies_detect": bool, optional
+            If True, all "detect" of implied features will be combined.
+            Default is True. see `feature_detect()`.
+
+        "group": str or list, optional
+            Same as "implies" but doesn't require the feature name to be
+            defined within `conf_features`.
+
+        "interest": int, required
+            a key for sorting CPU features
+
+        "headers": str or list, optional
+            intrinsics C header file
+
+        "disable": str, optional
+            force disable feature, the string value should contains the
+            reason of disabling.
+
+        "autovec": bool or None, optional
+            True or False to declare that CPU feature can be auto-vectorized
+            by the compiler.
+            By default(None), treated as True if the feature contains at
+            least one applicable flag. see `feature_can_autovec()`
+
+        "extra_checks": str or list, optional
+            Extra test case names for the CPU feature that need to be tested
+            against the compiler.
+
+            Each test case must have a C file named ``extra_xxxx.c``, where
+            ``xxxx`` is the case name in lower case, under 'conf_check_path'.
+            It should contain at least one intrinsic or function related to the test case.
+
+            If the compiler able to successfully compile the C file then `CCompilerOpt`
+            will add a C ``#define`` for it into the main dispatch header, e.g.
+            ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
+
+        **NOTES**:
+            * space can be used as separator with options that supports "str or list"
+            * case-sensitive for all values and feature name must be in upper-case.
+            * if flags aren't applicable, its will skipped rather than disable the
+              CPU feature
+            * the CPU feature will disabled if the compiler fail to compile
+              the test file
+    """
+    conf_nocache = False
+    conf_noopt = False
+    conf_cache_factors = None
+    conf_tmp_path = None
+    conf_check_path = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)), "checks"
+    )
+    conf_target_groups = {}
+    conf_c_prefix = 'NPY_'
+    conf_c_prefix_ = 'NPY__'
+    conf_cc_flags = dict(
+        gcc = dict(
+            # native should always fail on arm and ppc64,
+            # native usually works only with x86
+            native = '-march=native',
+            opt = '-O3',
+            werror = '-Werror',
+        ),
+        clang = dict(
+            native = '-march=native',
+            opt = "-O3",
+            # One of the following flags needs to be applicable for Clang to
+            # guarantee the sanity of the testing process, however in certain
+            # cases `-Werror` gets skipped during the availability test due to
+            # "unused arguments" warnings.
+            # see https://github.com/numpy/numpy/issues/19624
+            werror = '-Werror=switch -Werror',
+        ),
+        icc = dict(
+            native = '-xHost',
+            opt = '-O3',
+            werror = '-Werror',
+        ),
+        iccw = dict(
+            native = '/QxHost',
+            opt = '/O3',
+            werror = '/Werror',
+        ),
+        msvc = dict(
+            native = None,
+            opt = '/O2',
+            werror = '/WX',
+        ),
+        fcc = dict(
+            native = '-mcpu=a64fx',
+            opt = None,
+            werror = None,
+        )
+    )
+    conf_min_features = dict(
+        x86 = "SSE SSE2",
+        x64 = "SSE SSE2 SSE3",
+        ppc64 = '', # play it safe
+        ppc64le = "VSX VSX2",
+        s390x = '',
+        armhf = '', # play it safe
+        aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD"
+    )
+    conf_features = dict(
+        # X86
+        SSE = dict(
+            interest=1, headers="xmmintrin.h",
+            # enabling SSE without SSE2 is useless also
+            # it's non-optional for x86_64
+            implies="SSE2"
+        ),
+        SSE2   = dict(interest=2, implies="SSE", headers="emmintrin.h"),
+        SSE3   = dict(interest=3, implies="SSE2", headers="pmmintrin.h"),
+        SSSE3  = dict(interest=4, implies="SSE3", headers="tmmintrin.h"),
+        SSE41  = dict(interest=5, implies="SSSE3", headers="smmintrin.h"),
+        POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"),
+        SSE42  = dict(interest=7, implies="POPCNT"),
+        AVX    = dict(
+            interest=8, implies="SSE42", headers="immintrin.h",
+            implies_detect=False
+        ),
+        XOP    = dict(interest=9, implies="AVX", headers="x86intrin.h"),
+        FMA4   = dict(interest=10, implies="AVX", headers="x86intrin.h"),
+        F16C   = dict(interest=11, implies="AVX"),
+        FMA3   = dict(interest=12, implies="F16C"),
+        AVX2   = dict(interest=13, implies="F16C"),
+        AVX512F = dict(
+            interest=20, implies="FMA3 AVX2", implies_detect=False,
+            extra_checks="AVX512F_REDUCE"
+        ),
+        AVX512CD = dict(interest=21, implies="AVX512F"),
+        AVX512_KNL = dict(
+            interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
+            detect="AVX512_KNL", implies_detect=False
+        ),
+        AVX512_KNM = dict(
+            interest=41, implies="AVX512_KNL",
+            group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ",
+            detect="AVX512_KNM", implies_detect=False
+        ),
+        AVX512_SKX = dict(
+            interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
+            detect="AVX512_SKX", implies_detect=False,
+            extra_checks="AVX512BW_MASK AVX512DQ_MASK"
+        ),
+        AVX512_CLX = dict(
+            interest=43, implies="AVX512_SKX", group="AVX512VNNI",
+            detect="AVX512_CLX"
+        ),
+        AVX512_CNL = dict(
+            interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI",
+            detect="AVX512_CNL", implies_detect=False
+        ),
+        AVX512_ICL = dict(
+            interest=45, implies="AVX512_CLX AVX512_CNL",
+            group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ",
+            detect="AVX512_ICL", implies_detect=False
+        ),
+        AVX512_SPR = dict(
+            interest=46, implies="AVX512_ICL", group="AVX512FP16",
+            detect="AVX512_SPR", implies_detect=False
+        ),
+        # IBM/Power
+        ## Power7/ISA 2.06
+        VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"),
+        ## Power8/ISA 2.07
+        VSX2 = dict(interest=2, implies="VSX", implies_detect=False),
+        ## Power9/ISA 3.00
+        VSX3 = dict(interest=3, implies="VSX2", implies_detect=False,
+                    extra_checks="VSX3_HALF_DOUBLE"),
+        ## Power10/ISA 3.1
+        VSX4 = dict(interest=4, implies="VSX3", implies_detect=False,
+                    extra_checks="VSX4_MMA"),
+        # IBM/Z
+        ## VX(z13) support
+        VX = dict(interest=1, headers="vecintrin.h"),
+        ## Vector-Enhancements Facility
+        VXE = dict(interest=2, implies="VX", implies_detect=False),
+        ## Vector-Enhancements Facility 2
+        VXE2 = dict(interest=3, implies="VXE", implies_detect=False),
+        # ARM
+        NEON  = dict(interest=1, headers="arm_neon.h"),
+        NEON_FP16 = dict(interest=2, implies="NEON"),
+        ## FMA
+        NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"),
+        ## Advanced SIMD
+        ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False),
+        ## ARMv8.2 half-precision & vector arithm
+        ASIMDHP = dict(interest=5, implies="ASIMD"),
+        ## ARMv8.2 dot product
+        ASIMDDP = dict(interest=6, implies="ASIMD"),
+        ## ARMv8.2 Single & half-precision Multiply
+        ASIMDFHM = dict(interest=7, implies="ASIMDHP"),
+    )
+    def conf_features_partial(self):
+        """Return a dictionary of supported CPU features by the platform,
+        and accumulate the rest of undefined options in `conf_features`,
+        the returned dict has same rules and notes in
+        class attribute `conf_features`, also its override
+        any options that been set in 'conf_features'.
+        """
+        if self.cc_noopt:
+            # optimization is disabled
+            return {}
+
+        on_x86 = self.cc_on_x86 or self.cc_on_x64
+        is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc
+
+        if on_x86 and is_unix: return dict(
+            SSE    = dict(flags="-msse"),
+            SSE2   = dict(flags="-msse2"),
+            SSE3   = dict(flags="-msse3"),
+            SSSE3  = dict(flags="-mssse3"),
+            SSE41  = dict(flags="-msse4.1"),
+            POPCNT = dict(flags="-mpopcnt"),
+            SSE42  = dict(flags="-msse4.2"),
+            AVX    = dict(flags="-mavx"),
+            F16C   = dict(flags="-mf16c"),
+            XOP    = dict(flags="-mxop"),
+            FMA4   = dict(flags="-mfma4"),
+            FMA3   = dict(flags="-mfma"),
+            AVX2   = dict(flags="-mavx2"),
+            AVX512F = dict(flags="-mavx512f -mno-mmx"),
+            AVX512CD = dict(flags="-mavx512cd"),
+            AVX512_KNL = dict(flags="-mavx512er -mavx512pf"),
+            AVX512_KNM = dict(
+                flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq"
+            ),
+            AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"),
+            AVX512_CLX = dict(flags="-mavx512vnni"),
+            AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"),
+            AVX512_ICL = dict(
+                flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq"
+            ),
+            AVX512_SPR = dict(flags="-mavx512fp16"),
+        )
+        if on_x86 and self.cc_is_icc: return dict(
+            SSE    = dict(flags="-msse"),
+            SSE2   = dict(flags="-msse2"),
+            SSE3   = dict(flags="-msse3"),
+            SSSE3  = dict(flags="-mssse3"),
+            SSE41  = dict(flags="-msse4.1"),
+            POPCNT = {},
+            SSE42  = dict(flags="-msse4.2"),
+            AVX    = dict(flags="-mavx"),
+            F16C   = {},
+            XOP    = dict(disable="Intel Compiler doesn't support it"),
+            FMA4   = dict(disable="Intel Compiler doesn't support it"),
+            # Intel Compiler doesn't support AVX2 or FMA3 independently
+            FMA3 = dict(
+                implies="F16C AVX2", flags="-march=core-avx2"
+            ),
+            AVX2 = dict(implies="FMA3", flags="-march=core-avx2"),
+            # Intel Compiler doesn't support AVX512F or AVX512CD independently
+            AVX512F = dict(
+                implies="AVX2 AVX512CD", flags="-march=common-avx512"
+            ),
+            AVX512CD = dict(
+                implies="AVX2 AVX512F", flags="-march=common-avx512"
+            ),
+            AVX512_KNL = dict(flags="-xKNL"),
+            AVX512_KNM = dict(flags="-xKNM"),
+            AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"),
+            AVX512_CLX = dict(flags="-xCASCADELAKE"),
+            AVX512_CNL = dict(flags="-xCANNONLAKE"),
+            AVX512_ICL = dict(flags="-xICELAKE-CLIENT"),
+            AVX512_SPR = dict(disable="Not supported yet")
+        )
+        if on_x86 and self.cc_is_iccw: return dict(
+            SSE    = dict(flags="/arch:SSE"),
+            SSE2   = dict(flags="/arch:SSE2"),
+            SSE3   = dict(flags="/arch:SSE3"),
+            SSSE3  = dict(flags="/arch:SSSE3"),
+            SSE41  = dict(flags="/arch:SSE4.1"),
+            POPCNT = {},
+            SSE42  = dict(flags="/arch:SSE4.2"),
+            AVX    = dict(flags="/arch:AVX"),
+            F16C   = {},
+            XOP    = dict(disable="Intel Compiler doesn't support it"),
+            FMA4   = dict(disable="Intel Compiler doesn't support it"),
+            # Intel Compiler doesn't support FMA3 or AVX2 independently
+            FMA3 = dict(
+                implies="F16C AVX2", flags="/arch:CORE-AVX2"
+            ),
+            AVX2 = dict(
+                implies="FMA3", flags="/arch:CORE-AVX2"
+            ),
+            # Intel Compiler doesn't support AVX512F or AVX512CD independently
+            AVX512F = dict(
+                implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512"
+            ),
+            AVX512CD = dict(
+                implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512"
+            ),
+            AVX512_KNL = dict(flags="/Qx:KNL"),
+            AVX512_KNM = dict(flags="/Qx:KNM"),
+            AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"),
+            AVX512_CLX = dict(flags="/Qx:CASCADELAKE"),
+            AVX512_CNL = dict(flags="/Qx:CANNONLAKE"),
+            AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"),
+            AVX512_SPR = dict(disable="Not supported yet")
+        )
+        if on_x86 and self.cc_is_msvc: return dict(
+            SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {},
+            SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {},
+            SSE3   = {},
+            SSSE3  = {},
+            SSE41  = {},
+            POPCNT = dict(headers="nmmintrin.h"),
+            SSE42  = {},
+            AVX    = dict(flags="/arch:AVX"),
+            F16C   = {},
+            XOP    = dict(headers="ammintrin.h"),
+            FMA4   = dict(headers="ammintrin.h"),
+            # MSVC doesn't support FMA3 or AVX2 independently
+            FMA3 = dict(
+                implies="F16C AVX2", flags="/arch:AVX2"
+            ),
+            AVX2 = dict(
+                implies="F16C FMA3", flags="/arch:AVX2"
+            ),
+            # MSVC doesn't support AVX512F or AVX512CD independently,
+            # always generate instructions belong to (VL/VW/DQ)
+            AVX512F = dict(
+                implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512"
+            ),
+            AVX512CD = dict(
+                implies="AVX512F AVX512_SKX", flags="/arch:AVX512"
+            ),
+            AVX512_KNL = dict(
+                disable="MSVC compiler doesn't support it"
+            ),
+            AVX512_KNM = dict(
+                disable="MSVC compiler doesn't support it"
+            ),
+            AVX512_SKX = dict(flags="/arch:AVX512"),
+            AVX512_CLX = {},
+            AVX512_CNL = {},
+            AVX512_ICL = {},
+            AVX512_SPR= dict(
+                disable="MSVC compiler doesn't support it"
+            )
+        )
+
+        on_power = self.cc_on_ppc64le or self.cc_on_ppc64
+        if on_power:
+            partial = dict(
+                VSX = dict(
+                    implies=("VSX2" if self.cc_on_ppc64le else ""),
+                    flags="-mvsx"
+                ),
+                VSX2 = dict(
+                    flags="-mcpu=power8", implies_detect=False
+                ),
+                VSX3 = dict(
+                    flags="-mcpu=power9 -mtune=power9", implies_detect=False
+                ),
+                VSX4 = dict(
+                    flags="-mcpu=power10 -mtune=power10", implies_detect=False
+                )
+            )
+            if self.cc_is_clang:
+                partial["VSX"]["flags"]  = "-maltivec -mvsx"
+                partial["VSX2"]["flags"] = "-mcpu=power8"
+                partial["VSX3"]["flags"] = "-mcpu=power9"
+                partial["VSX4"]["flags"] = "-mcpu=power10"
+
+            return partial
+
+        on_zarch = self.cc_on_s390x
+        if on_zarch:
+            partial = dict(
+                VX = dict(
+                    flags="-march=arch11 -mzvector"
+                ),
+                VXE = dict(
+                    flags="-march=arch12", implies_detect=False
+                ),
+                VXE2 = dict(
+                    flags="-march=arch13", implies_detect=False
+                )
+            )
+
+            return partial
+
+
+        if self.cc_on_aarch64 and is_unix: return dict(
+            NEON = dict(
+                implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True
+            ),
+            NEON_FP16 = dict(
+                implies="NEON NEON_VFPV4 ASIMD", autovec=True
+            ),
+            NEON_VFPV4 = dict(
+                implies="NEON NEON_FP16 ASIMD", autovec=True
+            ),
+            ASIMD = dict(
+                implies="NEON NEON_FP16 NEON_VFPV4", autovec=True
+            ),
+            ASIMDHP = dict(
+                flags="-march=armv8.2-a+fp16"
+            ),
+            ASIMDDP = dict(
+                flags="-march=armv8.2-a+dotprod"
+            ),
+            ASIMDFHM = dict(
+                flags="-march=armv8.2-a+fp16fml"
+            ),
+        )
+        if self.cc_on_armhf and is_unix: return dict(
+            NEON = dict(
+                flags="-mfpu=neon"
+            ),
+            NEON_FP16 = dict(
+                flags="-mfpu=neon-fp16 -mfp16-format=ieee"
+            ),
+            NEON_VFPV4 = dict(
+                flags="-mfpu=neon-vfpv4",
+            ),
+            ASIMD = dict(
+                flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd",
+            ),
+            ASIMDHP = dict(
+                flags="-march=armv8.2-a+fp16"
+            ),
+            ASIMDDP = dict(
+                flags="-march=armv8.2-a+dotprod",
+            ),
+            ASIMDFHM = dict(
+                flags="-march=armv8.2-a+fp16fml"
+            )
+        )
+        # TODO: ARM MSVC
+        return {}
+
+    def __init__(self):
+        if self.conf_tmp_path is None:
+            import shutil
+            import tempfile
+            tmp = tempfile.mkdtemp()
+            def rm_temp():
+                try:
+                    shutil.rmtree(tmp)
+                except OSError:
+                    pass
+            atexit.register(rm_temp)
+            self.conf_tmp_path = tmp
+
+        if self.conf_cache_factors is None:
+            self.conf_cache_factors = [
+                os.path.getmtime(__file__),
+                self.conf_nocache
+            ]
+
+class _Distutils:
+    """A helper class that provides a collection of fundamental methods
+    implemented in a top of Python and NumPy Distutils.
+
+    The idea behind this class is to gather all methods that it may
+    need to override in case of reuse 'CCompilerOpt' in environment
+    different than of what NumPy has.
+
+    Parameters
+    ----------
+    ccompiler : `CCompiler`
+        The generate instance that returned from `distutils.ccompiler.new_compiler()`.
+    """
+    def __init__(self, ccompiler):
+        self._ccompiler = ccompiler
+
+    def dist_compile(self, sources, flags, ccompiler=None, **kwargs):
+        """Wrap CCompiler.compile()"""
+        assert(isinstance(sources, list))
+        assert(isinstance(flags, list))
+        flags = kwargs.pop("extra_postargs", []) + flags
+        if not ccompiler:
+            ccompiler = self._ccompiler
+
+        return ccompiler.compile(sources, extra_postargs=flags, **kwargs)
+
+    def dist_test(self, source, flags, macros=[]):
+        """Return True if 'CCompiler.compile()' able to compile
+        a source file with certain flags.
+        """
+        assert(isinstance(source, str))
+        from distutils.errors import CompileError
+        cc = self._ccompiler;
+        bk_spawn = getattr(cc, 'spawn', None)
+        if bk_spawn:
+            cc_type = getattr(self._ccompiler, "compiler_type", "")
+            if cc_type in ("msvc",):
+                setattr(cc, 'spawn', self._dist_test_spawn_paths)
+            else:
+                setattr(cc, 'spawn', self._dist_test_spawn)
+        test = False
+        try:
+            self.dist_compile(
+                [source], flags, macros=macros, output_dir=self.conf_tmp_path
+            )
+            test = True
+        except CompileError as e:
+            self.dist_log(str(e), stderr=True)
+        if bk_spawn:
+            setattr(cc, 'spawn', bk_spawn)
+        return test
+
+    def dist_info(self):
+        """
+        Return a tuple containing info about (platform, compiler, extra_args),
+        required by the abstract class '_CCompiler' for discovering the
+        platform environment. This is also used as a cache factor in order
+        to detect any changes happening from outside.
+        """
+        if hasattr(self, "_dist_info"):
+            return self._dist_info
+
+        cc_type = getattr(self._ccompiler, "compiler_type", '')
+        if cc_type in ("intelem", "intelemw"):
+            platform = "x86_64"
+        elif cc_type in ("intel", "intelw", "intele"):
+            platform = "x86"
+        else:
+            from distutils.util import get_platform
+            platform = get_platform()
+
+        cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", ''))
+        if not cc_type or cc_type == "unix":
+            if hasattr(cc_info, "__iter__"):
+                compiler = cc_info[0]
+            else:
+                compiler = str(cc_info)
+        else:
+            compiler = cc_type
+
+        if hasattr(cc_info, "__iter__") and len(cc_info) > 1:
+            extra_args = ' '.join(cc_info[1:])
+        else:
+            extra_args  = os.environ.get("CFLAGS", "")
+            extra_args += os.environ.get("CPPFLAGS", "")
+
+        self._dist_info = (platform, compiler, extra_args)
+        return self._dist_info
+
+    @staticmethod
+    def dist_error(*args):
+        """Raise a compiler error"""
+        from distutils.errors import CompileError
+        raise CompileError(_Distutils._dist_str(*args))
+
+    @staticmethod
+    def dist_fatal(*args):
+        """Raise a distutils error"""
+        from distutils.errors import DistutilsError
+        raise DistutilsError(_Distutils._dist_str(*args))
+
+    @staticmethod
+    def dist_log(*args, stderr=False):
+        """Print a console message"""
+        from numpy.distutils import log
+        out = _Distutils._dist_str(*args)
+        if stderr:
+            log.warn(out)
+        else:
+            log.info(out)
+
+    @staticmethod
+    def dist_load_module(name, path):
+        """Load a module from file, required by the abstract class '_Cache'."""
+        from .misc_util import exec_mod_from_location
+        try:
+            return exec_mod_from_location(name, path)
+        except Exception as e:
+            _Distutils.dist_log(e, stderr=True)
+        return None
+
+    @staticmethod
+    def _dist_str(*args):
+        """Return a string to print by log and errors."""
+        def to_str(arg):
+            if not isinstance(arg, str) and hasattr(arg, '__iter__'):
+                ret = []
+                for a in arg:
+                    ret.append(to_str(a))
+                return '('+ ' '.join(ret) + ')'
+            return str(arg)
+
+        stack = inspect.stack()[2]
+        start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno)
+        out = ' '.join([
+            to_str(a)
+            for a in (*args,)
+        ])
+        return start + out
+
+    def _dist_test_spawn_paths(self, cmd, display=None):
+        """
+        Fix msvc SDK ENV path same as distutils do
+        without it we get c1: fatal error C1356: unable to find mspdbcore.dll
+        """
+        if not hasattr(self._ccompiler, "_paths"):
+            self._dist_test_spawn(cmd)
+            return
+        old_path = os.getenv("path")
+        try:
+            os.environ["path"] = self._ccompiler._paths
+            self._dist_test_spawn(cmd)
+        finally:
+            os.environ["path"] = old_path
+
+    _dist_warn_regex = re.compile(
+        # intel and msvc compilers don't raise
+        # fatal errors when flags are wrong or unsupported
+        ".*("
+        "warning D9002|"  # msvc, it should be work with any language.
+        "invalid argument for option" # intel
+        ").*"
+    )
+    @staticmethod
+    def _dist_test_spawn(cmd, display=None):
+        try:
+            o = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
+                                        text=True)
+            if o and re.match(_Distutils._dist_warn_regex, o):
+                _Distutils.dist_error(
+                    "Flags in command", cmd ,"aren't supported by the compiler"
+                    ", output -> \n%s" % o
+                )
+        except subprocess.CalledProcessError as exc:
+            o = exc.output
+            s = exc.returncode
+        except OSError as e:
+            o = e
+            s = 127
+        else:
+            return None
+        _Distutils.dist_error(
+            "Command", cmd, "failed with exit status %d output -> \n%s" % (
+            s, o
+        ))
+
+_share_cache = {}
+class _Cache:
+    """An abstract class handles caching functionality, provides two
+    levels of caching, in-memory by share instances attributes among
+    each other and by store attributes into files.
+
+    **Note**:
+        any attributes that start with ``_`` or ``conf_`` will be ignored.
+
+    Parameters
+    ----------
+    cache_path : str or None
+        The path of cache file, if None then cache in file will disabled.
+
+    *factors :
+        The caching factors that need to utilize next to `conf_cache_factors`.
+
+    Attributes
+    ----------
+    cache_private : set
+        Hold the attributes that need be skipped from "in-memory cache".
+
+    cache_infile : bool
+        Utilized during initializing this class, to determine if the cache was able
+        to loaded from the specified cache path in 'cache_path'.
+    """
+
+    # skip attributes from cache
+    _cache_ignore = re.compile("^(_|conf_)")
+
+    def __init__(self, cache_path=None, *factors):
+        self.cache_me = {}
+        self.cache_private = set()
+        self.cache_infile = False
+        self._cache_path = None
+
+        if self.conf_nocache:
+            self.dist_log("cache is disabled by `Config`")
+            return
+
+        self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors)
+        self._cache_path = cache_path
+        if cache_path:
+            if os.path.exists(cache_path):
+                self.dist_log("load cache from file ->", cache_path)
+                cache_mod = self.dist_load_module("cache", cache_path)
+                if not cache_mod:
+                    self.dist_log(
+                        "unable to load the cache file as a module",
+                        stderr=True
+                    )
+                elif not hasattr(cache_mod, "hash") or \
+                     not hasattr(cache_mod, "data"):
+                    self.dist_log("invalid cache file", stderr=True)
+                elif self._cache_hash == cache_mod.hash:
+                    self.dist_log("hit the file cache")
+                    for attr, val in cache_mod.data.items():
+                        setattr(self, attr, val)
+                    self.cache_infile = True
+                else:
+                    self.dist_log("miss the file cache")
+
+        if not self.cache_infile:
+            other_cache = _share_cache.get(self._cache_hash)
+            if other_cache:
+                self.dist_log("hit the memory cache")
+                for attr, val in other_cache.__dict__.items():
+                    if attr in other_cache.cache_private or \
+                               re.match(self._cache_ignore, attr):
+                        continue
+                    setattr(self, attr, val)
+
+        _share_cache[self._cache_hash] = self
+        atexit.register(self.cache_flush)
+
+    def __del__(self):
+        for h, o in _share_cache.items():
+            if o == self:
+                _share_cache.pop(h)
+                break
+
+    def cache_flush(self):
+        """
+        Force update the cache.
+        """
+        if not self._cache_path:
+            return
+        # TODO: don't write if the cache doesn't change
+        self.dist_log("write cache to path ->", self._cache_path)
+        cdict = self.__dict__.copy()
+        for attr in self.__dict__.keys():
+            if re.match(self._cache_ignore, attr):
+                cdict.pop(attr)
+
+        d = os.path.dirname(self._cache_path)
+        if not os.path.exists(d):
+            os.makedirs(d)
+
+        repr_dict = pprint.pformat(cdict, compact=True)
+        with open(self._cache_path, "w") as f:
+            f.write(textwrap.dedent("""\
+            # AUTOGENERATED DON'T EDIT
+            # Please make changes to the code generator \
+            (distutils/ccompiler_opt.py)
+            hash = {}
+            data = \\
+            """).format(self._cache_hash))
+            f.write(repr_dict)
+
+    def cache_hash(self, *factors):
+        # is there a built-in non-crypto hash?
+        # sdbm
+        chash = 0
+        for f in factors:
+            for char in str(f):
+                chash  = ord(char) + (chash << 6) + (chash << 16) - chash
+                chash &= 0xFFFFFFFF
+        return chash
+
+    @staticmethod
+    def me(cb):
+        """
+        A static method that can be treated as a decorator to
+        dynamically cache certain methods.
+        """
+        def cache_wrap_me(self, *args, **kwargs):
+            # good for normal args
+            cache_key = str((
+                cb.__name__, *args, *kwargs.keys(), *kwargs.values()
+            ))
+            if cache_key in self.cache_me:
+                return self.cache_me[cache_key]
+            ccb = cb(self, *args, **kwargs)
+            self.cache_me[cache_key] = ccb
+            return ccb
+        return cache_wrap_me
+
+class _CCompiler:
+    """A helper class for `CCompilerOpt` containing all utilities that
+    related to the fundamental compiler's functions.
+
+    Attributes
+    ----------
+    cc_on_x86 : bool
+        True when the target architecture is 32-bit x86
+    cc_on_x64 : bool
+        True when the target architecture is 64-bit x86
+    cc_on_ppc64 : bool
+        True when the target architecture is 64-bit big-endian powerpc
+    cc_on_ppc64le : bool
+        True when the target architecture is 64-bit litle-endian powerpc
+    cc_on_s390x : bool
+        True when the target architecture is IBM/ZARCH on linux
+    cc_on_armhf : bool
+        True when the target architecture is 32-bit ARMv7+
+    cc_on_aarch64 : bool
+        True when the target architecture is 64-bit Armv8-a+
+    cc_on_noarch : bool
+        True when the target architecture is unknown or not supported
+    cc_is_gcc : bool
+        True if the compiler is GNU or
+        if the compiler is unknown
+    cc_is_clang : bool
+        True if the compiler is Clang
+    cc_is_icc : bool
+        True if the compiler is Intel compiler (unix like)
+    cc_is_iccw : bool
+        True if the compiler is Intel compiler (msvc like)
+    cc_is_nocc : bool
+        True if the compiler isn't supported directly,
+        Note: that cause a fail-back to gcc
+    cc_has_debug : bool
+        True if the compiler has debug flags
+    cc_has_native : bool
+        True if the compiler has native flags
+    cc_noopt : bool
+        True if the compiler has definition 'DISABLE_OPT*',
+        or 'cc_on_noarch' is True
+    cc_march : str
+        The target architecture name, or "unknown" if
+        the architecture isn't supported
+    cc_name : str
+        The compiler name, or "unknown" if the compiler isn't supported
+    cc_flags : dict
+        Dictionary containing the initialized flags of `_Config.conf_cc_flags`
+    """
+    def __init__(self):
+        if hasattr(self, "cc_is_cached"):
+            return
+        #      attr            regex        compiler-expression
+        detect_arch = (
+            ("cc_on_x64",      ".*(x|x86_|amd)64.*", ""),
+            ("cc_on_x86",      ".*(win32|x86|i386|i686).*", ""),
+            ("cc_on_ppc64le",  ".*(powerpc|ppc)64(el|le).*|.*powerpc.*",
+                                          "defined(__powerpc64__) && "
+                                          "defined(__LITTLE_ENDIAN__)"),
+            ("cc_on_ppc64",    ".*(powerpc|ppc).*|.*powerpc.*",
+                                          "defined(__powerpc64__) && "
+                                          "defined(__BIG_ENDIAN__)"),
+            ("cc_on_aarch64",  ".*(aarch64|arm64).*", ""),
+            ("cc_on_armhf",    ".*arm.*", "defined(__ARM_ARCH_7__) || "
+                                          "defined(__ARM_ARCH_7A__)"),
+            ("cc_on_s390x",    ".*s390x.*", ""),
+            # undefined platform
+            ("cc_on_noarch",   "", ""),
+        )
+        detect_compiler = (
+            ("cc_is_gcc",     r".*(gcc|gnu\-g).*", ""),
+            ("cc_is_clang",    ".*clang.*", ""),
+            # intel msvc like
+            ("cc_is_iccw",     ".*(intelw|intelemw|iccw).*", ""),
+            ("cc_is_icc",      ".*(intel|icc).*", ""),  # intel unix like
+            ("cc_is_msvc",     ".*msvc.*", ""),
+            ("cc_is_fcc",     ".*fcc.*", ""),
+            # undefined compiler will be treat it as gcc
+            ("cc_is_nocc",     "", ""),
+        )
+        detect_args = (
+           ("cc_has_debug",  ".*(O0|Od|ggdb|coverage|debug:full).*", ""),
+           ("cc_has_native",
+                ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""),
+           # in case if the class run with -DNPY_DISABLE_OPTIMIZATION
+           ("cc_noopt", ".*DISABLE_OPT.*", ""),
+        )
+
+        dist_info = self.dist_info()
+        platform, compiler_info, extra_args = dist_info
+        # set False to all attrs
+        for section in (detect_arch, detect_compiler, detect_args):
+            for attr, rgex, cexpr in section:
+                setattr(self, attr, False)
+
+        for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)):
+            for attr, rgex, cexpr in detect:
+                if rgex and not re.match(rgex, searchin, re.IGNORECASE):
+                    continue
+                if cexpr and not self.cc_test_cexpr(cexpr):
+                    continue
+                setattr(self, attr, True)
+                break
+
+        for attr, rgex, cexpr in detect_args:
+            if rgex and not re.match(rgex, extra_args, re.IGNORECASE):
+                continue
+            if cexpr and not self.cc_test_cexpr(cexpr):
+                continue
+            setattr(self, attr, True)
+
+        if self.cc_on_noarch:
+            self.dist_log(
+                "unable to detect CPU architecture which lead to disable the optimization. "
+                f"check dist_info:<<\n{dist_info}\n>>",
+                stderr=True
+            )
+            self.cc_noopt = True
+
+        if self.conf_noopt:
+            self.dist_log("Optimization is disabled by the Config", stderr=True)
+            self.cc_noopt = True
+
+        if self.cc_is_nocc:
+            """
+            mingw can be treated as a gcc, and also xlc even if it based on clang,
+            but still has the same gcc optimization flags.
+            """
+            self.dist_log(
+                "unable to detect compiler type which leads to treating it as GCC. "
+                "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC."
+                f"check dist_info:<<\n{dist_info}\n>>",
+                stderr=True
+            )
+            self.cc_is_gcc = True
+
+        self.cc_march = "unknown"
+        for arch in ("x86", "x64", "ppc64", "ppc64le",
+                     "armhf", "aarch64", "s390x"):
+            if getattr(self, "cc_on_" + arch):
+                self.cc_march = arch
+                break
+
+        self.cc_name = "unknown"
+        for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"):
+            if getattr(self, "cc_is_" + name):
+                self.cc_name = name
+                break
+
+        self.cc_flags = {}
+        compiler_flags = self.conf_cc_flags.get(self.cc_name)
+        if compiler_flags is None:
+            self.dist_fatal(
+                "undefined flag for compiler '%s', "
+                "leave an empty dict instead" % self.cc_name
+            )
+        for name, flags in compiler_flags.items():
+            self.cc_flags[name] = nflags = []
+            if flags:
+                assert(isinstance(flags, str))
+                flags = flags.split()
+                for f in flags:
+                    if self.cc_test_flags([f]):
+                        nflags.append(f)
+
+        self.cc_is_cached = True
+
+    @_Cache.me
+    def cc_test_flags(self, flags):
+        """
+        Returns True if the compiler supports 'flags'.
+        """
+        assert(isinstance(flags, list))
+        self.dist_log("testing flags", flags)
+        test_path = os.path.join(self.conf_check_path, "test_flags.c")
+        test = self.dist_test(test_path, flags)
+        if not test:
+            self.dist_log("testing failed", stderr=True)
+        return test
+
+    @_Cache.me
+    def cc_test_cexpr(self, cexpr, flags=[]):
+        """
+        Same as the above but supports compile-time expressions.
+        """
+        self.dist_log("testing compiler expression", cexpr)
+        test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c")
+        with open(test_path, "w") as fd:
+            fd.write(textwrap.dedent(f"""\
+               #if !({cexpr})
+                   #error "unsupported expression"
+               #endif
+               int dummy;
+            """))
+        test = self.dist_test(test_path, flags)
+        if not test:
+            self.dist_log("testing failed", stderr=True)
+        return test
+
+    def cc_normalize_flags(self, flags):
+        """
+        Remove the conflicts that caused due gathering implied features flags.
+
+        Parameters
+        ----------
+        'flags' list, compiler flags
+            flags should be sorted from the lowest to the highest interest.
+
+        Returns
+        -------
+        list, filtered from any conflicts.
+
+        Examples
+        --------
+        >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod'])
+        ['armv8.2-a+fp16+dotprod']
+
+        >>> self.cc_normalize_flags(
+            ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2']
+        )
+        ['-march=core-avx2']
+        """
+        assert(isinstance(flags, list))
+        if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc:
+            return self._cc_normalize_unix(flags)
+
+        if self.cc_is_msvc or self.cc_is_iccw:
+            return self._cc_normalize_win(flags)
+        return flags
+
+    _cc_normalize_unix_mrgx = re.compile(
+        # 1- to check the highest of
+        r"^(-mcpu=|-march=|-x[A-Z0-9\-])"
+    )
+    _cc_normalize_unix_frgx = re.compile(
+        # 2- to remove any flags starts with
+        # -march, -mcpu, -x(INTEL) and '-m' without '='
+        r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|"
+        # exclude:
+        r"(?:-mzvector)"
+    )
+    _cc_normalize_unix_krgx = re.compile(
+        # 3- keep only the highest of
+        r"^(-mfpu|-mtune)"
+    )
+    _cc_normalize_arch_ver = re.compile(
+        r"[0-9.]"
+    )
+    def _cc_normalize_unix(self, flags):
+        def ver_flags(f):
+            #        arch ver  subflag
+            # -march=armv8.2-a+fp16fml
+            tokens = f.split('+')
+            ver = float('0' + ''.join(
+                re.findall(self._cc_normalize_arch_ver, tokens[0])
+            ))
+            return ver, tokens[0], tokens[1:]
+
+        if len(flags) <= 1:
+            return flags
+        # get the highest matched flag
+        for i, cur_flag in enumerate(reversed(flags)):
+            if not re.match(self._cc_normalize_unix_mrgx, cur_flag):
+                continue
+            lower_flags = flags[:-(i+1)]
+            upper_flags = flags[-i:]
+            filtered = list(filter(
+                self._cc_normalize_unix_frgx.search, lower_flags
+            ))
+            # gather subflags
+            ver, arch, subflags = ver_flags(cur_flag)
+            if ver > 0 and len(subflags) > 0:
+                for xflag in lower_flags:
+                    xver, _, xsubflags = ver_flags(xflag)
+                    if ver == xver:
+                        subflags = xsubflags + subflags
+                cur_flag = arch + '+' + '+'.join(subflags)
+
+            flags = filtered + [cur_flag]
+            if i > 0:
+                flags += upper_flags
+            break
+
+        # to remove overridable flags
+        final_flags = []
+        matched = set()
+        for f in reversed(flags):
+            match = re.match(self._cc_normalize_unix_krgx, f)
+            if not match:
+                pass
+            elif match[0] in matched:
+                continue
+            else:
+                matched.add(match[0])
+            final_flags.insert(0, f)
+        return final_flags
+
+    _cc_normalize_win_frgx = re.compile(
+        r"^(?!(/arch\:|/Qx\:))"
+    )
+    _cc_normalize_win_mrgx = re.compile(
+        r"^(/arch|/Qx:)"
+    )
+    def _cc_normalize_win(self, flags):
+        for i, f in enumerate(reversed(flags)):
+            if not re.match(self._cc_normalize_win_mrgx, f):
+                continue
+            i += 1
+            return list(filter(
+                self._cc_normalize_win_frgx.search, flags[:-i]
+            )) + flags[-i:]
+        return flags
+
+class _Feature:
+    """A helper class for `CCompilerOpt` that managing CPU features.
+
+    Attributes
+    ----------
+    feature_supported : dict
+        Dictionary containing all CPU features that supported
+        by the platform, according to the specified values in attribute
+        `_Config.conf_features` and `_Config.conf_features_partial()`
+
+    feature_min : set
+        The minimum support of CPU features, according to
+        the specified values in attribute `_Config.conf_min_features`.
+    """
+    def __init__(self):
+        if hasattr(self, "feature_is_cached"):
+            return
+        self.feature_supported = pfeatures = self.conf_features_partial()
+        for feature_name in list(pfeatures.keys()):
+            feature  = pfeatures[feature_name]
+            cfeature = self.conf_features[feature_name]
+            feature.update({
+                k:v for k,v in cfeature.items() if k not in feature
+            })
+            disabled = feature.get("disable")
+            if disabled is not None:
+                pfeatures.pop(feature_name)
+                self.dist_log(
+                    "feature '%s' is disabled," % feature_name,
+                    disabled, stderr=True
+                )
+                continue
+            # list is used internally for these options
+            for option in (
+                "implies", "group", "detect", "headers", "flags", "extra_checks"
+            ) :
+                oval = feature.get(option)
+                if isinstance(oval, str):
+                    feature[option] = oval.split()
+
+        self.feature_min = set()
+        min_f = self.conf_min_features.get(self.cc_march, "")
+        for F in min_f.upper().split():
+            if F in self.feature_supported:
+                self.feature_min.add(F)
+
+        self.feature_is_cached = True
+
+    def feature_names(self, names=None, force_flags=None, macros=[]):
+        """
+        Returns a set of CPU feature names that supported by platform and the **C** compiler.
+
+        Parameters
+        ----------
+        names : sequence or None, optional
+            Specify certain CPU features to test it against the **C** compiler.
+            if None(default), it will test all current supported features.
+            **Note**: feature names must be in upper-case.
+
+        force_flags : list or None, optional
+            If None(default), default compiler flags for every CPU feature will
+            be used during the test.
+
+        macros : list of tuples, optional
+            A list of C macro definitions.
+        """
+        assert(
+            names is None or (
+                not isinstance(names, str) and
+                hasattr(names, "__iter__")
+            )
+        )
+        assert(force_flags is None or isinstance(force_flags, list))
+        if names is None:
+            names = self.feature_supported.keys()
+        supported_names = set()
+        for f in names:
+            if self.feature_is_supported(
+                f, force_flags=force_flags, macros=macros
+            ):
+                supported_names.add(f)
+        return supported_names
+
+    def feature_is_exist(self, name):
+        """
+        Returns True if a certain feature is exist and covered within
+        ``_Config.conf_features``.
+
+        Parameters
+        ----------
+        'name': str
+            feature name in uppercase.
+        """
+        assert(name.isupper())
+        return name in self.conf_features
+
+    def feature_sorted(self, names, reverse=False):
+        """
+        Sort a list of CPU features ordered by the lowest interest.
+
+        Parameters
+        ----------
+        'names': sequence
+            sequence of supported feature names in uppercase.
+        'reverse': bool, optional
+            If true, the sorted features is reversed. (highest interest)
+
+        Returns
+        -------
+        list, sorted CPU features
+        """
+        def sort_cb(k):
+            if isinstance(k, str):
+                return self.feature_supported[k]["interest"]
+            # multiple features
+            rank = max([self.feature_supported[f]["interest"] for f in k])
+            # FIXME: that's not a safe way to increase the rank for
+            # multi targets
+            rank += len(k) -1
+            return rank
+        return sorted(names, reverse=reverse, key=sort_cb)
+
+    def feature_implies(self, names, keep_origins=False):
+        """
+        Return a set of CPU features that implied by 'names'
+
+        Parameters
+        ----------
+        names : str or sequence of str
+            CPU feature name(s) in uppercase.
+
+        keep_origins : bool
+            if False(default) then the returned set will not contain any
+            features from 'names'. This case happens only when two features
+            imply each other.
+
+        Examples
+        --------
+        >>> self.feature_implies("SSE3")
+        {'SSE', 'SSE2'}
+        >>> self.feature_implies("SSE2")
+        {'SSE'}
+        >>> self.feature_implies("SSE2", keep_origins=True)
+        # 'SSE2' found here since 'SSE' and 'SSE2' imply each other
+        {'SSE', 'SSE2'}
+        """
+        def get_implies(name, _caller=set()):
+            implies = set()
+            d = self.feature_supported[name]
+            for i in d.get("implies", []):
+                implies.add(i)
+                if i in _caller:
+                    # infinity recursive guard since
+                    # features can imply each other
+                    continue
+                _caller.add(name)
+                implies = implies.union(get_implies(i, _caller))
+            return implies
+
+        if isinstance(names, str):
+            implies = get_implies(names)
+            names = [names]
+        else:
+            assert(hasattr(names, "__iter__"))
+            implies = set()
+            for n in names:
+                implies = implies.union(get_implies(n))
+        if not keep_origins:
+            implies.difference_update(names)
+        return implies
+
+    def feature_implies_c(self, names):
+        """same as feature_implies() but combining 'names'"""
+        if isinstance(names, str):
+            names = set((names,))
+        else:
+            names = set(names)
+        return names.union(self.feature_implies(names))
+
+    def feature_ahead(self, names):
+        """
+        Return list of features in 'names' after remove any
+        implied features and keep the origins.
+
+        Parameters
+        ----------
+        'names': sequence
+            sequence of CPU feature names in uppercase.
+
+        Returns
+        -------
+        list of CPU features sorted as-is 'names'
+
+        Examples
+        --------
+        >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"])
+        ["SSE41"]
+        # assume AVX2 and FMA3 implies each other and AVX2
+        # is the highest interest
+        >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
+        ["AVX2"]
+        # assume AVX2 and FMA3 don't implies each other
+        >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
+        ["AVX2", "FMA3"]
+        """
+        assert(
+            not isinstance(names, str)
+            and hasattr(names, '__iter__')
+        )
+        implies = self.feature_implies(names, keep_origins=True)
+        ahead = [n for n in names if n not in implies]
+        if len(ahead) == 0:
+            # return the highest interested feature
+            # if all features imply each other
+            ahead = self.feature_sorted(names, reverse=True)[:1]
+        return ahead
+
+    def feature_untied(self, names):
+        """
+        same as 'feature_ahead()' but if both features implied each other
+        and keep the highest interest.
+
+        Parameters
+        ----------
+        'names': sequence
+            sequence of CPU feature names in uppercase.
+
+        Returns
+        -------
+        list of CPU features sorted as-is 'names'
+
+        Examples
+        --------
+        >>> self.feature_untied(["SSE2", "SSE3", "SSE41"])
+        ["SSE2", "SSE3", "SSE41"]
+        # assume AVX2 and FMA3 implies each other
+        >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"])
+        ["SSE2", "SSE3", "SSE41", "AVX2"]
+        """
+        assert(
+            not isinstance(names, str)
+            and hasattr(names, '__iter__')
+        )
+        final = []
+        for n in names:
+            implies = self.feature_implies(n)
+            tied = [
+                nn for nn in final
+                if nn in implies and n in self.feature_implies(nn)
+            ]
+            if tied:
+                tied = self.feature_sorted(tied + [n])
+                if n not in tied[1:]:
+                    continue
+                final.remove(tied[:1][0])
+            final.append(n)
+        return final
+
+    def feature_get_til(self, names, keyisfalse):
+        """
+        same as `feature_implies_c()` but stop collecting implied
+        features when feature's option that provided through
+        parameter 'keyisfalse' is False, also sorting the returned
+        features.
+        """
+        def til(tnames):
+            # sort from highest to lowest interest then cut if "key" is False
+            tnames = self.feature_implies_c(tnames)
+            tnames = self.feature_sorted(tnames, reverse=True)
+            for i, n in enumerate(tnames):
+                if not self.feature_supported[n].get(keyisfalse, True):
+                    tnames = tnames[:i+1]
+                    break
+            return tnames
+
+        if isinstance(names, str) or len(names) <= 1:
+            names = til(names)
+            # normalize the sort
+            names.reverse()
+            return names
+
+        names = self.feature_ahead(names)
+        names = {t for n in names for t in til(n)}
+        return self.feature_sorted(names)
+
+    def feature_detect(self, names):
+        """
+        Return a list of CPU features that required to be detected
+        sorted from the lowest to highest interest.
+        """
+        names = self.feature_get_til(names, "implies_detect")
+        detect = []
+        for n in names:
+            d = self.feature_supported[n]
+            detect += d.get("detect", d.get("group", [n]))
+        return detect
+
+    @_Cache.me
+    def feature_flags(self, names):
+        """
+        Return a list of CPU features flags sorted from the lowest
+        to highest interest.
+        """
+        names = self.feature_sorted(self.feature_implies_c(names))
+        flags = []
+        for n in names:
+            d = self.feature_supported[n]
+            f = d.get("flags", [])
+            if not f or not self.cc_test_flags(f):
+                continue
+            flags += f
+        return self.cc_normalize_flags(flags)
+
+    @_Cache.me
+    def feature_test(self, name, force_flags=None, macros=[]):
+        """
+        Test a certain CPU feature against the compiler through its own
+        check file.
+
+        Parameters
+        ----------
+        name : str
+            Supported CPU feature name.
+
+        force_flags : list or None, optional
+            If None(default), the returned flags from `feature_flags()`
+            will be used.
+
+        macros : list of tuples, optional
+            A list of C macro definitions.
+        """
+        if force_flags is None:
+            force_flags = self.feature_flags(name)
+
+        self.dist_log(
+            "testing feature '%s' with flags (%s)" % (
+            name, ' '.join(force_flags)
+        ))
+        # Each CPU feature must have C source code contains at
+        # least one intrinsic or instruction related to this feature.
+        test_path = os.path.join(
+            self.conf_check_path, "cpu_%s.c" % name.lower()
+        )
+        if not os.path.exists(test_path):
+            self.dist_fatal("feature test file is not exist", test_path)
+
+        test = self.dist_test(
+            test_path, force_flags + self.cc_flags["werror"], macros=macros
+        )
+        if not test:
+            self.dist_log("testing failed", stderr=True)
+        return test
+
+    @_Cache.me
+    def feature_is_supported(self, name, force_flags=None, macros=[]):
+        """
+        Check if a certain CPU feature is supported by the platform and compiler.
+
+        Parameters
+        ----------
+        name : str
+            CPU feature name in uppercase.
+
+        force_flags : list or None, optional
+            If None(default), default compiler flags for every CPU feature will
+            be used during test.
+
+        macros : list of tuples, optional
+            A list of C macro definitions.
+        """
+        assert(name.isupper())
+        assert(force_flags is None or isinstance(force_flags, list))
+
+        supported = name in self.feature_supported
+        if supported:
+            for impl in self.feature_implies(name):
+                if not self.feature_test(impl, force_flags, macros=macros):
+                    return False
+            if not self.feature_test(name, force_flags, macros=macros):
+                return False
+        return supported
+
+    @_Cache.me
+    def feature_can_autovec(self, name):
+        """
+        check if the feature can be auto-vectorized by the compiler
+        """
+        assert(isinstance(name, str))
+        d = self.feature_supported[name]
+        can = d.get("autovec", None)
+        if can is None:
+            valid_flags = [
+                self.cc_test_flags([f]) for f in d.get("flags", [])
+            ]
+            can = valid_flags and any(valid_flags)
+        return can
+
+    @_Cache.me
+    def feature_extra_checks(self, name):
+        """
+        Return a list of supported extra checks after testing them against
+        the compiler.
+
+        Parameters
+        ----------
+        names : str
+            CPU feature name in uppercase.
+        """
+        assert isinstance(name, str)
+        d = self.feature_supported[name]
+        extra_checks = d.get("extra_checks", [])
+        if not extra_checks:
+            return []
+
+        self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
+        flags = self.feature_flags(name)
+        available = []
+        not_available = []
+        for chk in extra_checks:
+            test_path = os.path.join(
+                self.conf_check_path, "extra_%s.c" % chk.lower()
+            )
+            if not os.path.exists(test_path):
+                self.dist_fatal("extra check file does not exist", test_path)
+
+            is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
+            if is_supported:
+                available.append(chk)
+            else:
+                not_available.append(chk)
+
+        if not_available:
+            self.dist_log("testing failed for checks", not_available, stderr=True)
+        return available
+
+
+    def feature_c_preprocessor(self, feature_name, tabs=0):
+        """
+        Generate C preprocessor definitions and include headers of a CPU feature.
+
+        Parameters
+        ----------
+        'feature_name': str
+            CPU feature name in uppercase.
+        'tabs': int
+            if > 0, align the generated strings to the right depend on number of tabs.
+
+        Returns
+        -------
+        str, generated C preprocessor
+
+        Examples
+        --------
+        >>> self.feature_c_preprocessor("SSE3")
+        /** SSE3 **/
+        #define NPY_HAVE_SSE3 1
+        #include 
+        """
+        assert(feature_name.isupper())
+        feature = self.feature_supported.get(feature_name)
+        assert(feature is not None)
+
+        prepr = [
+            "/** %s **/" % feature_name,
+            "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name)
+        ]
+        prepr += [
+            "#include <%s>" % h for h in feature.get("headers", [])
+        ]
+
+        extra_defs = feature.get("group", [])
+        extra_defs += self.feature_extra_checks(feature_name)
+        for edef in extra_defs:
+            # Guard extra definitions in case of duplicate with
+            # another feature
+            prepr += [
+                "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
+                "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
+                "#endif",
+            ]
+
+        if tabs > 0:
+            prepr = [('\t'*tabs) + l for l in prepr]
+        return '\n'.join(prepr)
+
+class _Parse:
+    """A helper class that parsing main arguments of `CCompilerOpt`,
+    also parsing configuration statements in dispatch-able sources.
+
+    Parameters
+    ----------
+    cpu_baseline : str or None
+        minimal set of required CPU features or special options.
+
+    cpu_dispatch : str or None
+        dispatched set of additional CPU features or special options.
+
+    Special options can be:
+        - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features`
+        - **MAX**: Enables all supported CPU features by the Compiler and platform.
+        - **NATIVE**: Enables all CPU features that supported by the current machine.
+        - **NONE**: Enables nothing
+        - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**.
+            NOTE: operand + is only added for nominal reason.
+
+    NOTES:
+        - Case-insensitive among all CPU features and special options.
+        - Comma or space can be used as a separator.
+        - If the CPU feature is not supported by the user platform or compiler,
+          it will be skipped rather than raising a fatal error.
+        - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features
+        - 'cpu_baseline' force enables implied features.
+
+    Attributes
+    ----------
+    parse_baseline_names : list
+        Final CPU baseline's feature names(sorted from low to high)
+    parse_baseline_flags : list
+        Compiler flags of baseline features
+    parse_dispatch_names : list
+        Final CPU dispatch-able feature names(sorted from low to high)
+    parse_target_groups : dict
+        Dictionary containing initialized target groups that configured
+        through class attribute `conf_target_groups`.
+
+        The key is represent the group name and value is a tuple
+        contains three items :
+            - bool, True if group has the 'baseline' option.
+            - list, list of CPU features.
+            - list, list of extra compiler flags.
+
+    """
+    def __init__(self, cpu_baseline, cpu_dispatch):
+        self._parse_policies = dict(
+            # POLICY NAME, (HAVE, NOT HAVE, [DEB])
+            KEEP_BASELINE = (
+                None, self._parse_policy_not_keepbase,
+                []
+            ),
+            KEEP_SORT = (
+                self._parse_policy_keepsort,
+                self._parse_policy_not_keepsort,
+                []
+            ),
+            MAXOPT = (
+                self._parse_policy_maxopt, None,
+                []
+            ),
+            WERROR = (
+                self._parse_policy_werror, None,
+                []
+            ),
+            AUTOVEC = (
+                self._parse_policy_autovec, None,
+                ["MAXOPT"]
+            )
+        )
+        if hasattr(self, "parse_is_cached"):
+            return
+
+        self.parse_baseline_names = []
+        self.parse_baseline_flags = []
+        self.parse_dispatch_names = []
+        self.parse_target_groups = {}
+
+        if self.cc_noopt:
+            # skip parsing baseline and dispatch args and keep parsing target groups
+            cpu_baseline = cpu_dispatch = None
+
+        self.dist_log("check requested baseline")
+        if cpu_baseline is not None:
+            cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline)
+            baseline_names = self.feature_names(cpu_baseline)
+            self.parse_baseline_flags = self.feature_flags(baseline_names)
+            self.parse_baseline_names = self.feature_sorted(
+                self.feature_implies_c(baseline_names)
+            )
+
+        self.dist_log("check requested dispatch-able features")
+        if cpu_dispatch is not None:
+            cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch)
+            cpu_dispatch = {
+                f for f in cpu_dispatch_
+                if f not in self.parse_baseline_names
+            }
+            conflict_baseline = cpu_dispatch_.difference(cpu_dispatch)
+            self.parse_dispatch_names = self.feature_sorted(
+                self.feature_names(cpu_dispatch)
+            )
+            if len(conflict_baseline) > 0:
+                self.dist_log(
+                    "skip features", conflict_baseline, "since its part of baseline"
+                )
+
+        self.dist_log("initialize targets groups")
+        for group_name, tokens in self.conf_target_groups.items():
+            self.dist_log("parse target group", group_name)
+            GROUP_NAME = group_name.upper()
+            if not tokens or not tokens.strip():
+                # allow empty groups, useful in case if there's a need
+                # to disable certain group since '_parse_target_tokens()'
+                # requires at least one valid target
+                self.parse_target_groups[GROUP_NAME] = (
+                    False, [], []
+                )
+                continue
+            has_baseline, features, extra_flags = \
+                self._parse_target_tokens(tokens)
+            self.parse_target_groups[GROUP_NAME] = (
+                has_baseline, features, extra_flags
+            )
+
+        self.parse_is_cached = True
+
+    def parse_targets(self, source):
+        """
+        Fetch and parse configuration statements that required for
+        defining the targeted CPU features, statements should be declared
+        in the top of source in between **C** comment and start
+        with a special mark **@targets**.
+
+        Configuration statements are sort of keywords representing
+        CPU features names, group of statements and policies, combined
+        together to determine the required optimization.
+
+        Parameters
+        ----------
+        source : str
+            the path of **C** source file.
+
+        Returns
+        -------
+        - bool, True if group has the 'baseline' option
+        - list, list of CPU features
+        - list, list of extra compiler flags
+        """
+        self.dist_log("looking for '@targets' inside -> ", source)
+        # get lines between /*@targets and */
+        with open(source) as fd:
+            tokens = ""
+            max_to_reach = 1000 # good enough, isn't?
+            start_with = "@targets"
+            start_pos = -1
+            end_with = "*/"
+            end_pos = -1
+            for current_line, line in enumerate(fd):
+                if current_line == max_to_reach:
+                    self.dist_fatal("reached the max of lines")
+                    break
+                if start_pos == -1:
+                    start_pos = line.find(start_with)
+                    if start_pos == -1:
+                        continue
+                    start_pos += len(start_with)
+                tokens += line
+                end_pos = line.find(end_with)
+                if end_pos != -1:
+                    end_pos += len(tokens) - len(line)
+                    break
+
+        if start_pos == -1:
+            self.dist_fatal("expected to find '%s' within a C comment" % start_with)
+        if end_pos == -1:
+            self.dist_fatal("expected to end with '%s'" % end_with)
+
+        tokens = tokens[start_pos:end_pos]
+        return self._parse_target_tokens(tokens)
+
+    _parse_regex_arg = re.compile(r'\s|,|([+-])')
+    def _parse_arg_features(self, arg_name, req_features):
+        if not isinstance(req_features, str):
+            self.dist_fatal("expected a string in '%s'" % arg_name)
+
+        final_features = set()
+        # space and comma can be used as a separator
+        tokens = list(filter(None, re.split(self._parse_regex_arg, req_features)))
+        append = True # append is the default
+        for tok in tokens:
+            if tok[0] in ("#", "$"):
+                self.dist_fatal(
+                    arg_name, "target groups and policies "
+                    "aren't allowed from arguments, "
+                    "only from dispatch-able sources"
+                )
+            if tok == '+':
+                append = True
+                continue
+            if tok == '-':
+                append = False
+                continue
+
+            TOK = tok.upper() # we use upper-case internally
+            features_to = set()
+            if TOK == "NONE":
+                pass
+            elif TOK == "NATIVE":
+                native = self.cc_flags["native"]
+                if not native:
+                    self.dist_fatal(arg_name,
+                        "native option isn't supported by the compiler"
+                    )
+                features_to = self.feature_names(
+                    force_flags=native, macros=[("DETECT_FEATURES", 1)]
+                )
+            elif TOK == "MAX":
+                features_to = self.feature_supported.keys()
+            elif TOK == "MIN":
+                features_to = self.feature_min
+            else:
+                if TOK in self.feature_supported:
+                    features_to.add(TOK)
+                else:
+                    if not self.feature_is_exist(TOK):
+                        self.dist_fatal(arg_name,
+                            ", '%s' isn't a known feature or option" % tok
+                        )
+            if append:
+                final_features = final_features.union(features_to)
+            else:
+                final_features = final_features.difference(features_to)
+
+            append = True # back to default
+
+        return final_features
+
+    _parse_regex_target = re.compile(r'\s|[*,/]|([()])')
+    def _parse_target_tokens(self, tokens):
+        assert(isinstance(tokens, str))
+        final_targets = [] # to keep it sorted as specified
+        extra_flags = []
+        has_baseline = False
+
+        skipped  = set()
+        policies = set()
+        multi_target = None
+
+        tokens = list(filter(None, re.split(self._parse_regex_target, tokens)))
+        if not tokens:
+            self.dist_fatal("expected one token at least")
+
+        for tok in tokens:
+            TOK = tok.upper()
+            ch = tok[0]
+            if ch in ('+', '-'):
+                self.dist_fatal(
+                    "+/- are 'not' allowed from target's groups or @targets, "
+                    "only from cpu_baseline and cpu_dispatch parms"
+                )
+            elif ch == '$':
+                if multi_target is not None:
+                    self.dist_fatal(
+                        "policies aren't allowed inside multi-target '()'"
+                        ", only CPU features"
+                    )
+                policies.add(self._parse_token_policy(TOK))
+            elif ch == '#':
+                if multi_target is not None:
+                    self.dist_fatal(
+                        "target groups aren't allowed inside multi-target '()'"
+                        ", only CPU features"
+                    )
+                has_baseline, final_targets, extra_flags = \
+                self._parse_token_group(TOK, has_baseline, final_targets, extra_flags)
+            elif ch == '(':
+                if multi_target is not None:
+                    self.dist_fatal("unclosed multi-target, missing ')'")
+                multi_target = set()
+            elif ch == ')':
+                if multi_target is None:
+                    self.dist_fatal("multi-target opener '(' wasn't found")
+                targets = self._parse_multi_target(multi_target)
+                if targets is None:
+                    skipped.add(tuple(multi_target))
+                else:
+                    if len(targets) == 1:
+                        targets = targets[0]
+                    if targets and targets not in final_targets:
+                        final_targets.append(targets)
+                multi_target = None # back to default
+            else:
+                if TOK == "BASELINE":
+                    if multi_target is not None:
+                        self.dist_fatal("baseline isn't allowed inside multi-target '()'")
+                    has_baseline = True
+                    continue
+
+                if multi_target is not None:
+                    multi_target.add(TOK)
+                    continue
+
+                if not self.feature_is_exist(TOK):
+                    self.dist_fatal("invalid target name '%s'" % TOK)
+
+                is_enabled = (
+                    TOK in self.parse_baseline_names or
+                    TOK in self.parse_dispatch_names
+                )
+                if  is_enabled:
+                    if TOK not in final_targets:
+                        final_targets.append(TOK)
+                    continue
+
+                skipped.add(TOK)
+
+        if multi_target is not None:
+            self.dist_fatal("unclosed multi-target, missing ')'")
+        if skipped:
+            self.dist_log(
+                "skip targets", skipped,
+                "not part of baseline or dispatch-able features"
+            )
+
+        final_targets = self.feature_untied(final_targets)
+
+        # add polices dependencies
+        for p in list(policies):
+            _, _, deps = self._parse_policies[p]
+            for d in deps:
+                if d in policies:
+                    continue
+                self.dist_log(
+                    "policy '%s' force enables '%s'" % (
+                    p, d
+                ))
+                policies.add(d)
+
+        # release policies filtrations
+        for p, (have, nhave, _) in self._parse_policies.items():
+            func = None
+            if p in policies:
+                func = have
+                self.dist_log("policy '%s' is ON" % p)
+            else:
+                func = nhave
+            if not func:
+                continue
+            has_baseline, final_targets, extra_flags = func(
+                has_baseline, final_targets, extra_flags
+            )
+
+        return has_baseline, final_targets, extra_flags
+
+    def _parse_token_policy(self, token):
+        """validate policy token"""
+        if len(token) <= 1 or token[-1:] == token[0]:
+            self.dist_fatal("'$' must stuck in the begin of policy name")
+        token = token[1:]
+        if token not in self._parse_policies:
+            self.dist_fatal(
+                "'%s' is an invalid policy name, available policies are" % token,
+                self._parse_policies.keys()
+            )
+        return token
+
+    def _parse_token_group(self, token, has_baseline, final_targets, extra_flags):
+        """validate group token"""
+        if len(token) <= 1 or token[-1:] == token[0]:
+            self.dist_fatal("'#' must stuck in the begin of group name")
+
+        token = token[1:]
+        ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get(
+            token, (False, None, [])
+        )
+        if gtargets is None:
+            self.dist_fatal(
+                "'%s' is an invalid target group name, " % token + \
+                "available target groups are",
+                self.parse_target_groups.keys()
+            )
+        if ghas_baseline:
+            has_baseline = True
+        # always keep sorting as specified
+        final_targets += [f for f in gtargets if f not in final_targets]
+        extra_flags += [f for f in gextra_flags if f not in extra_flags]
+        return has_baseline, final_targets, extra_flags
+
+    def _parse_multi_target(self, targets):
+        """validate multi targets that defined between parentheses()"""
+        # remove any implied features and keep the origins
+        if not targets:
+            self.dist_fatal("empty multi-target '()'")
+        if not all([
+            self.feature_is_exist(tar) for tar in targets
+        ]) :
+            self.dist_fatal("invalid target name in multi-target", targets)
+        if not all([
+            (
+                tar in self.parse_baseline_names or
+                tar in self.parse_dispatch_names
+            )
+            for tar in targets
+        ]) :
+            return None
+        targets = self.feature_ahead(targets)
+        if not targets:
+            return None
+        # force sort multi targets, so it can be comparable
+        targets = self.feature_sorted(targets)
+        targets = tuple(targets) # hashable
+        return targets
+
+    def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags):
+        """skip all baseline features"""
+        skipped = []
+        for tar in final_targets[:]:
+            is_base = False
+            if isinstance(tar, str):
+                is_base = tar in self.parse_baseline_names
+            else:
+                # multi targets
+                is_base = all([
+                    f in self.parse_baseline_names
+                    for f in tar
+                ])
+            if is_base:
+                skipped.append(tar)
+                final_targets.remove(tar)
+
+        if skipped:
+            self.dist_log("skip baseline features", skipped)
+
+        return has_baseline, final_targets, extra_flags
+
+    def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags):
+        """leave a notice that $keep_sort is on"""
+        self.dist_log(
+            "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n"
+            "are 'not' sorted depend on the highest interest but"
+            "as specified in the dispatch-able source or the extra group"
+        )
+        return has_baseline, final_targets, extra_flags
+
+    def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags):
+        """sorted depend on the highest interest"""
+        final_targets = self.feature_sorted(final_targets, reverse=True)
+        return has_baseline, final_targets, extra_flags
+
+    def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags):
+        """append the compiler optimization flags"""
+        if self.cc_has_debug:
+            self.dist_log("debug mode is detected, policy 'maxopt' is skipped.")
+        elif self.cc_noopt:
+            self.dist_log("optimization is disabled, policy 'maxopt' is skipped.")
+        else:
+            flags = self.cc_flags["opt"]
+            if not flags:
+                self.dist_log(
+                    "current compiler doesn't support optimization flags, "
+                    "policy 'maxopt' is skipped", stderr=True
+                )
+            else:
+                extra_flags += flags
+        return has_baseline, final_targets, extra_flags
+
+    def _parse_policy_werror(self, has_baseline, final_targets, extra_flags):
+        """force warnings to treated as errors"""
+        flags = self.cc_flags["werror"]
+        if not flags:
+            self.dist_log(
+                "current compiler doesn't support werror flags, "
+                "warnings will 'not' treated as errors", stderr=True
+            )
+        else:
+            self.dist_log("compiler warnings are treated as errors")
+            extra_flags += flags
+        return has_baseline, final_targets, extra_flags
+
+    def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags):
+        """skip features that has no auto-vectorized support by compiler"""
+        skipped = []
+        for tar in final_targets[:]:
+            if isinstance(tar, str):
+                can = self.feature_can_autovec(tar)
+            else: # multiple target
+                can = all([
+                    self.feature_can_autovec(t)
+                    for t in tar
+                ])
+            if not can:
+                final_targets.remove(tar)
+                skipped.append(tar)
+
+        if skipped:
+            self.dist_log("skip non auto-vectorized features", skipped)
+
+        return has_baseline, final_targets, extra_flags
+
+class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
+    """
+    A helper class for `CCompiler` aims to provide extra build options
+    to effectively control of compiler optimizations that are directly
+    related to CPU features.
+    """
+    def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None):
+        _Config.__init__(self)
+        _Distutils.__init__(self, ccompiler)
+        _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch)
+        _CCompiler.__init__(self)
+        _Feature.__init__(self)
+        if not self.cc_noopt and self.cc_has_native:
+            self.dist_log(
+                "native flag is specified through environment variables. "
+                "force cpu-baseline='native'"
+            )
+            cpu_baseline = "native"
+        _Parse.__init__(self, cpu_baseline, cpu_dispatch)
+        # keep the requested features untouched, need it later for report
+        # and trace purposes
+        self._requested_baseline = cpu_baseline
+        self._requested_dispatch = cpu_dispatch
+        # key is the dispatch-able source and value is a tuple
+        # contains two items (has_baseline[boolean], dispatched-features[list])
+        self.sources_status = getattr(self, "sources_status", {})
+        # every instance should has a separate one
+        self.cache_private.add("sources_status")
+        # set it at the end to make sure the cache writing was done after init
+        # this class
+        self.hit_cache = hasattr(self, "hit_cache")
+
+    def is_cached(self):
+        """
+        Returns True if the class loaded from the cache file
+        """
+        return self.cache_infile and self.hit_cache
+
+    def cpu_baseline_flags(self):
+        """
+        Returns a list of final CPU baseline compiler flags
+        """
+        return self.parse_baseline_flags
+
+    def cpu_baseline_names(self):
+        """
+        return a list of final CPU baseline feature names
+        """
+        return self.parse_baseline_names
+
+    def cpu_dispatch_names(self):
+        """
+        return a list of final CPU dispatch feature names
+        """
+        return self.parse_dispatch_names
+
+    def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs):
+        """
+        Compile one or more dispatch-able sources and generates object files,
+        also generates abstract C config headers and macros that
+        used later for the final runtime dispatching process.
+
+        The mechanism behind it is to takes each source file that specified
+        in 'sources' and branching it into several files depend on
+        special configuration statements that must be declared in the
+        top of each source which contains targeted CPU features,
+        then it compiles every branched source with the proper compiler flags.
+
+        Parameters
+        ----------
+        sources : list
+            Must be a list of dispatch-able sources file paths,
+            and configuration statements must be declared inside
+            each file.
+
+        src_dir : str
+            Path of parent directory for the generated headers and wrapped sources.
+            If None(default) the files will generated in-place.
+
+        ccompiler : CCompiler
+            Distutils `CCompiler` instance to be used for compilation.
+            If None (default), the provided instance during the initialization
+            will be used instead.
+
+        **kwargs : any
+            Arguments to pass on to the `CCompiler.compile()`
+
+        Returns
+        -------
+        list : generated object files
+
+        Raises
+        ------
+        CompileError
+            Raises by `CCompiler.compile()` on compiling failure.
+        DistutilsError
+            Some errors during checking the sanity of configuration statements.
+
+        See Also
+        --------
+        parse_targets :
+            Parsing the configuration statements of dispatch-able sources.
+        """
+        to_compile = {}
+        baseline_flags = self.cpu_baseline_flags()
+        include_dirs = kwargs.setdefault("include_dirs", [])
+
+        for src in sources:
+            output_dir = os.path.dirname(src)
+            if src_dir:
+                if not output_dir.startswith(src_dir):
+                    output_dir = os.path.join(src_dir, output_dir)
+                if output_dir not in include_dirs:
+                    # To allow including the generated config header(*.dispatch.h)
+                    # by the dispatch-able sources
+                    include_dirs.append(output_dir)
+
+            has_baseline, targets, extra_flags = self.parse_targets(src)
+            nochange = self._generate_config(output_dir, src, targets, has_baseline)
+            for tar in targets:
+                tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange)
+                flags = tuple(extra_flags + self.feature_flags(tar))
+                to_compile.setdefault(flags, []).append(tar_src)
+
+            if has_baseline:
+                flags = tuple(extra_flags + baseline_flags)
+                to_compile.setdefault(flags, []).append(src)
+
+            self.sources_status[src] = (has_baseline, targets)
+
+        # For these reasons, the sources are compiled in a separate loop:
+        # - Gathering all sources with the same flags to benefit from
+        #   the parallel compiling as much as possible.
+        # - To generate all config headers of the dispatchable sources,
+        #   before the compilation in case if there are dependency relationships
+        #   among them.
+        objects = []
+        for flags, srcs in to_compile.items():
+            objects += self.dist_compile(
+                srcs, list(flags), ccompiler=ccompiler, **kwargs
+            )
+        return objects
+
+    def generate_dispatch_header(self, header_path):
+        """
+        Generate the dispatch header which contains the #definitions and headers
+        for platform-specific instruction-sets for the enabled CPU baseline and
+        dispatch-able features.
+
+        Its highly recommended to take a look at the generated header
+        also the generated source files via `try_dispatch()`
+        in order to get the full picture.
+        """
+        self.dist_log("generate CPU dispatch header: (%s)" % header_path)
+
+        baseline_names = self.cpu_baseline_names()
+        dispatch_names = self.cpu_dispatch_names()
+        baseline_len = len(baseline_names)
+        dispatch_len = len(dispatch_names)
+
+        header_dir = os.path.dirname(header_path)
+        if not os.path.exists(header_dir):
+            self.dist_log(
+                f"dispatch header dir {header_dir} does not exist, creating it",
+                stderr=True
+            )
+            os.makedirs(header_dir)
+
+        with open(header_path, 'w') as f:
+            baseline_calls = ' \\\n'.join([
+                (
+                    "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
+                ) % (self.conf_c_prefix, f)
+                for f in baseline_names
+            ])
+            dispatch_calls = ' \\\n'.join([
+                (
+                    "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
+                ) % (self.conf_c_prefix, f)
+                for f in dispatch_names
+            ])
+            f.write(textwrap.dedent("""\
+                /*
+                 * AUTOGENERATED DON'T EDIT
+                 * Please make changes to the code generator (distutils/ccompiler_opt.py)
+                */
+                #define {pfx}WITH_CPU_BASELINE  "{baseline_str}"
+                #define {pfx}WITH_CPU_DISPATCH  "{dispatch_str}"
+                #define {pfx}WITH_CPU_BASELINE_N {baseline_len}
+                #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len}
+                #define {pfx}WITH_CPU_EXPAND_(X) X
+                #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\
+                {baseline_calls}
+                #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\
+                {dispatch_calls}
+            """).format(
+                pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names),
+                dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len,
+                dispatch_len=dispatch_len, baseline_calls=baseline_calls,
+                dispatch_calls=dispatch_calls
+            ))
+            baseline_pre = ''
+            for name in baseline_names:
+                baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n'
+
+            dispatch_pre = ''
+            for name in dispatch_names:
+                dispatch_pre += textwrap.dedent("""\
+                #ifdef {pfx}CPU_TARGET_{name}
+                {pre}
+                #endif /*{pfx}CPU_TARGET_{name}*/
+                """).format(
+                    pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor(
+                    name, tabs=1
+                ))
+
+            f.write(textwrap.dedent("""\
+            /******* baseline features *******/
+            {baseline_pre}
+            /******* dispatch features *******/
+            {dispatch_pre}
+            """).format(
+                pfx=self.conf_c_prefix_, baseline_pre=baseline_pre,
+                dispatch_pre=dispatch_pre
+            ))
+
+    def report(self, full=False):
+        report = []
+        platform_rows = []
+        baseline_rows = []
+        dispatch_rows = []
+        report.append(("Platform", platform_rows))
+        report.append(("", ""))
+        report.append(("CPU baseline", baseline_rows))
+        report.append(("", ""))
+        report.append(("CPU dispatch", dispatch_rows))
+
+        ########## platform ##########
+        platform_rows.append(("Architecture", (
+            "unsupported" if self.cc_on_noarch else self.cc_march)
+        ))
+        platform_rows.append(("Compiler", (
+            "unix-like"   if self.cc_is_nocc   else self.cc_name)
+        ))
+        ########## baseline ##########
+        if self.cc_noopt:
+            baseline_rows.append(("Requested", "optimization disabled"))
+        else:
+            baseline_rows.append(("Requested", repr(self._requested_baseline)))
+
+        baseline_names = self.cpu_baseline_names()
+        baseline_rows.append((
+            "Enabled", (' '.join(baseline_names) if baseline_names else "none")
+        ))
+        baseline_flags = self.cpu_baseline_flags()
+        baseline_rows.append((
+            "Flags", (' '.join(baseline_flags) if baseline_flags else "none")
+        ))
+        extra_checks = []
+        for name in baseline_names:
+            extra_checks += self.feature_extra_checks(name)
+        baseline_rows.append((
+            "Extra checks", (' '.join(extra_checks) if extra_checks else "none")
+        ))
+
+        ########## dispatch ##########
+        if self.cc_noopt:
+            baseline_rows.append(("Requested", "optimization disabled"))
+        else:
+            dispatch_rows.append(("Requested", repr(self._requested_dispatch)))
+
+        dispatch_names = self.cpu_dispatch_names()
+        dispatch_rows.append((
+            "Enabled", (' '.join(dispatch_names) if dispatch_names else "none")
+        ))
+        ########## Generated ##########
+        # TODO:
+        # - collect object names from 'try_dispatch()'
+        #   then get size of each object and printed
+        # - give more details about the features that not
+        #   generated due compiler support
+        # - find a better output's design.
+        #
+        target_sources = {}
+        for source, (_, targets) in self.sources_status.items():
+            for tar in targets:
+                target_sources.setdefault(tar, []).append(source)
+
+        if not full or not target_sources:
+            generated = ""
+            for tar in self.feature_sorted(target_sources):
+                sources = target_sources[tar]
+                name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
+                generated += name + "[%d] " % len(sources)
+            dispatch_rows.append(("Generated", generated[:-1] if generated else "none"))
+        else:
+            dispatch_rows.append(("Generated", ''))
+            for tar in self.feature_sorted(target_sources):
+                sources = target_sources[tar]
+                pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
+                flags = ' '.join(self.feature_flags(tar))
+                implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
+                detect = ' '.join(self.feature_detect(tar))
+                extra_checks = []
+                for name in ((tar,) if isinstance(tar, str) else tar):
+                    extra_checks += self.feature_extra_checks(name)
+                extra_checks = (' '.join(extra_checks) if extra_checks else "none")
+
+                dispatch_rows.append(('', ''))
+                dispatch_rows.append((pretty_name, implies))
+                dispatch_rows.append(("Flags", flags))
+                dispatch_rows.append(("Extra checks", extra_checks))
+                dispatch_rows.append(("Detect", detect))
+                for src in sources:
+                    dispatch_rows.append(("", src))
+
+        ###############################
+        # TODO: add support for 'markdown' format
+        text = []
+        secs_len = [len(secs) for secs, _ in report]
+        cols_len = [len(col) for _, rows in report for col, _ in rows]
+        tab = ' ' * 2
+        pad =  max(max(secs_len), max(cols_len))
+        for sec, rows in report:
+            if not sec:
+                text.append("") # empty line
+                continue
+            sec += ' ' * (pad - len(sec))
+            text.append(sec + tab + ': ')
+            for col, val in rows:
+                col += ' ' * (pad - len(col))
+                text.append(tab + col + ': ' + val)
+
+        return '\n'.join(text)
+
+    def _wrap_target(self, output_dir, dispatch_src, target, nochange=False):
+        assert(isinstance(target, (str, tuple)))
+        if isinstance(target, str):
+            ext_name = target_name = target
+        else:
+            # multi-target
+            ext_name = '.'.join(target)
+            target_name = '__'.join(target)
+
+        wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src))
+        wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower())
+        if nochange and os.path.exists(wrap_path):
+            return wrap_path
+
+        self.dist_log("wrap dispatch-able target -> ", wrap_path)
+        # sorting for readability
+        features = self.feature_sorted(self.feature_implies_c(target))
+        target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_
+        target_defs = [target_join + f for f in features]
+        target_defs = '\n'.join(target_defs)
+
+        with open(wrap_path, "w") as fd:
+            fd.write(textwrap.dedent("""\
+            /**
+             * AUTOGENERATED DON'T EDIT
+             * Please make changes to the code generator \
+             (distutils/ccompiler_opt.py)
+             */
+            #define {pfx}CPU_TARGET_MODE
+            #define {pfx}CPU_TARGET_CURRENT {target_name}
+            {target_defs}
+            #include "{path}"
+            """).format(
+                pfx=self.conf_c_prefix_, target_name=target_name,
+                path=os.path.abspath(dispatch_src), target_defs=target_defs
+            ))
+        return wrap_path
+
+    def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False):
+        config_path = os.path.basename(dispatch_src)
+        config_path = os.path.splitext(config_path)[0] + '.h'
+        config_path = os.path.join(output_dir, config_path)
+        # check if targets didn't change to avoid recompiling
+        cache_hash = self.cache_hash(targets, has_baseline)
+        try:
+            with open(config_path) as f:
+                last_hash = f.readline().split("cache_hash:")
+                if len(last_hash) == 2 and int(last_hash[1]) == cache_hash:
+                    return True
+        except OSError:
+            pass
+
+        os.makedirs(os.path.dirname(config_path), exist_ok=True)
+
+        self.dist_log("generate dispatched config -> ", config_path)
+        dispatch_calls = []
+        for tar in targets:
+            if isinstance(tar, str):
+                target_name = tar
+            else: # multi target
+                target_name = '__'.join([t for t in tar])
+            req_detect = self.feature_detect(tar)
+            req_detect = '&&'.join([
+                "CHK(%s)" % f for f in req_detect
+            ])
+            dispatch_calls.append(
+                "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % (
+                self.conf_c_prefix_, req_detect, target_name
+            ))
+        dispatch_calls = ' \\\n'.join(dispatch_calls)
+
+        if has_baseline:
+            baseline_calls = (
+                "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))"
+            ) % self.conf_c_prefix_
+        else:
+            baseline_calls = ''
+
+        with open(config_path, "w") as fd:
+            fd.write(textwrap.dedent("""\
+            // cache_hash:{cache_hash}
+            /**
+             * AUTOGENERATED DON'T EDIT
+             * Please make changes to the code generator (distutils/ccompiler_opt.py)
+             */
+            #ifndef {pfx}CPU_DISPATCH_EXPAND_
+                #define {pfx}CPU_DISPATCH_EXPAND_(X) X
+            #endif
+            #undef {pfx}CPU_DISPATCH_BASELINE_CALL
+            #undef {pfx}CPU_DISPATCH_CALL
+            #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\
+            {baseline_calls}
+            #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\
+            {dispatch_calls}
+            """).format(
+                pfx=self.conf_c_prefix_, baseline_calls=baseline_calls,
+                dispatch_calls=dispatch_calls, cache_hash=cache_hash
+            ))
+        return False
+
+def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs):
+    """
+    Create a new instance of 'CCompilerOpt' and generate the dispatch header
+    which contains the #definitions and headers of platform-specific instruction-sets for
+    the enabled CPU baseline and dispatch-able features.
+
+    Parameters
+    ----------
+    compiler : CCompiler instance
+    dispatch_hpath : str
+        path of the dispatch header
+
+    **kwargs: passed as-is to `CCompilerOpt(...)`
+    Returns
+    -------
+    new instance of CCompilerOpt
+    """
+    opt = CCompilerOpt(compiler, **kwargs)
+    if not os.path.exists(dispatch_hpath) or not opt.is_cached():
+        opt.generate_dispatch_header(dispatch_hpath)
+    return opt
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c
new file mode 100644
index 00000000..6bc9022a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c
@@ -0,0 +1,27 @@
+#ifdef _MSC_VER
+    #include 
+#endif
+#include 
+
+int main(int argc, char **argv)
+{
+    float *src = (float*)argv[argc-1];
+    float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
+    /* MAXMIN */
+    int ret  = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
+        ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
+    /* ROUNDING */
+    ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
+#ifdef __aarch64__
+    {
+        double *src2 = (double*)argv[argc-1];
+        float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
+        /* MAXMIN */
+        ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
+        ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
+        /* ROUNDING */
+        ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
+    }
+#endif
+    return ret;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c
new file mode 100644
index 00000000..e7068ce0
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c
@@ -0,0 +1,16 @@
+#ifdef _MSC_VER
+    #include 
+#endif
+#include 
+
+int main(int argc, char **argv)
+{
+    unsigned char *src = (unsigned char*)argv[argc-1];
+    uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]);
+    uint32x4_t va = vdupq_n_u32(3);
+    int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
+#ifdef __aarch64__
+    ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
+#endif
+    return ret;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c
new file mode 100644
index 00000000..54e32809
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c
@@ -0,0 +1,19 @@
+#ifdef _MSC_VER
+    #include 
+#endif
+#include 
+
+int main(int argc, char **argv)
+{
+    float16_t *src = (float16_t*)argv[argc-1];
+    float *src2 = (float*)argv[argc-2];
+    float16x8_t vhp  = vdupq_n_f16(src[0]);
+    float16x4_t vlhp = vdup_n_f16(src[1]);
+    float32x4_t vf   = vdupq_n_f32(src2[0]);
+    float32x2_t vlf  = vdup_n_f32(src2[1]);
+
+    int ret  = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
+        ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
+
+    return ret;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c
new file mode 100644
index 00000000..e2de0306
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c
@@ -0,0 +1,15 @@
+#ifdef _MSC_VER
+    #include 
+#endif
+#include 
+
+int main(int argc, char **argv)
+{
+    float16_t *src = (float16_t*)argv[argc-1];
+    float16x8_t vhp  = vdupq_n_f16(src[0]);
+    float16x4_t vlhp = vdup_n_f16(src[1]);
+
+    int ret  =  (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
+        ret  += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
+    return ret;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c
new file mode 100644
index 00000000..26ae1846
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __AVX__
+        #error "HOST/ARCH doesn't support AVX"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
+    return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c
new file mode 100644
index 00000000..ddde868f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __AVX2__
+        #error "HOST/ARCH doesn't support AVX2"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
+    return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c
new file mode 100644
index 00000000..81edcd06
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __AVX512VNNI__
+        #error "HOST/ARCH doesn't support CascadeLake AVX512 features"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    /* VNNI */
+    __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+            a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
+    return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
new file mode 100644
index 00000000..5799f122
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
@@ -0,0 +1,24 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
+        #error "HOST/ARCH doesn't support CannonLake AVX512 features"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+    /* IFMA */
+    a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
+    /* VMBI */
+    a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
+    return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c
new file mode 100644
index 00000000..3cf44d73
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c
@@ -0,0 +1,26 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
+        #error "HOST/ARCH doesn't support IceLake AVX512 features"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+    /* VBMI2 */
+    a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
+    /* BITLAG */
+    a = _mm512_popcnt_epi8(a);
+    /* VPOPCNTDQ */
+    a = _mm512_popcnt_epi64(a);
+    return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
new file mode 100644
index 00000000..cb55e57a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
@@ -0,0 +1,25 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__AVX512ER__) || !defined(__AVX512PF__)
+        #error "HOST/ARCH doesn't support Knights Landing AVX512 features"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    int base[128]={};
+    __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
+    /* ER */
+    __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
+    /* PF */
+    _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
+    return base[0];
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
new file mode 100644
index 00000000..2c426462
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
@@ -0,0 +1,30 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
+        #error "HOST/ARCH doesn't support Knights Mill AVX512 features"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+    __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
+
+    /* 4FMAPS */
+    b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
+    /* 4VNNIW */
+    a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
+    /* VPOPCNTDQ */
+    a = _mm512_popcnt_epi64(a);
+
+    a = _mm512_add_epi32(a, _mm512_castps_si512(b));
+    return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
new file mode 100644
index 00000000..8840efb7
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
@@ -0,0 +1,26 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
+        #error "HOST/ARCH doesn't support SkyLake AVX512 features"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+    /* VL */
+    __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
+    /* DQ */
+    __m512i b = _mm512_broadcast_i32x8(a);
+    /* BW */
+    b = _mm512_abs_epi16(b);
+    return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_spr.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_spr.c
new file mode 100644
index 00000000..9710d0b2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_spr.c
@@ -0,0 +1,26 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__AVX512FP16__)
+        #error "HOST/ARCH doesn't support Sapphire Rapids AVX512FP16 features"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+/* clang has a bug regarding our spr coode, see gh-23730. */
+#if __clang__
+#error
+#endif
+    __m512h a = _mm512_loadu_ph((void*)argv[argc-1]);
+    __m512h temp = _mm512_fmadd_ph(a, a, a);
+    _mm512_storeu_ph((void*)(argv[argc-1]), temp);
+    return 0;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c
new file mode 100644
index 00000000..5e29c79e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __AVX512CD__
+        #error "HOST/ARCH doesn't support AVX512CD"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+    return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c
new file mode 100644
index 00000000..d0eb7b1a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __AVX512F__
+        #error "HOST/ARCH doesn't support AVX512F"
+    #endif
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+    return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c
new file mode 100644
index 00000000..fdf36cec
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __F16C__
+        #error "HOST/ARCH doesn't support F16C"
+    #endif
+#endif
+
+#include 
+#include 
+
+int main(int argc, char **argv)
+{
+    __m128 a  = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
+    __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
+    return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c
new file mode 100644
index 00000000..bfeef22b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__FMA__) && !defined(__AVX2__)
+        #error "HOST/ARCH doesn't support FMA3"
+    #endif
+#endif
+
+#include 
+#include 
+
+int main(int argc, char **argv)
+{
+    __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
+           a = _mm256_fmadd_ps(a, a, a);
+    return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c
new file mode 100644
index 00000000..0ff17a48
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c
@@ -0,0 +1,13 @@
+#include 
+#ifdef _MSC_VER
+    #include 
+#else
+    #include 
+#endif
+
+int main(int argc, char **argv)
+{
+    __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
+           a = _mm256_macc_ps(a, a, a);
+    return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c
new file mode 100644
index 00000000..8c64f864
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c
@@ -0,0 +1,19 @@
+#ifdef _MSC_VER
+    #include 
+#endif
+#include 
+
+int main(int argc, char **argv)
+{
+    // passing from untraced pointers to avoid optimizing out any constants
+    // so we can test against the linker.
+    float *src = (float*)argv[argc-1];
+    float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
+    int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
+#ifdef __aarch64__
+    double *src2 = (double*)argv[argc-2];
+    float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
+    ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
+#endif
+    return ret;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
new file mode 100644
index 00000000..f3b94977
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
@@ -0,0 +1,11 @@
+#ifdef _MSC_VER
+    #include 
+#endif
+#include 
+
+int main(int argc, char **argv)
+{
+    short *src = (short*)argv[argc-1];
+    float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src));
+    return (int)vgetq_lane_f32(v_z4, 0);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
new file mode 100644
index 00000000..a039159d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
@@ -0,0 +1,21 @@
+#ifdef _MSC_VER
+    #include 
+#endif
+#include 
+
+int main(int argc, char **argv)
+{
+    float *src = (float*)argv[argc-1];
+    float32x4_t v1 = vdupq_n_f32(src[0]);
+    float32x4_t v2 = vdupq_n_f32(src[1]);
+    float32x4_t v3 = vdupq_n_f32(src[2]);
+    int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
+#ifdef __aarch64__
+    double *src2 = (double*)argv[argc-2];
+    float64x2_t vd1 = vdupq_n_f64(src2[0]);
+    float64x2_t vd2 = vdupq_n_f64(src2[1]);
+    float64x2_t vd3 = vdupq_n_f64(src2[2]);
+    ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
+#endif
+    return ret;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c
new file mode 100644
index 00000000..813c461f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c
@@ -0,0 +1,32 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #if !defined(__SSE4_2__) && !defined(__POPCNT__)
+        #error "HOST/ARCH doesn't support POPCNT"
+    #endif
+#endif
+
+#ifdef _MSC_VER
+    #include 
+#else
+    #include 
+#endif
+
+int main(int argc, char **argv)
+{
+    // To make sure popcnt instructions are generated
+    // and been tested against the assembler
+    unsigned long long a = *((unsigned long long*)argv[argc-1]);
+    unsigned int b = *((unsigned int*)argv[argc-2]);
+
+#if defined(_M_X64) || defined(__x86_64__)
+    a = _mm_popcnt_u64(a);
+#endif
+    b = _mm_popcnt_u32(b);
+    return (int)a + b;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c
new file mode 100644
index 00000000..602b74e7
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __SSE__
+        #error "HOST/ARCH doesn't support SSE"
+    #endif
+#endif
+
+#include 
+
+int main(void)
+{
+    __m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
+    return (int)_mm_cvtss_f32(a);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c
new file mode 100644
index 00000000..33826a9e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __SSE2__
+        #error "HOST/ARCH doesn't support SSE2"
+    #endif
+#endif
+
+#include 
+
+int main(void)
+{
+    __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
+    return _mm_cvtsi128_si32(a);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c
new file mode 100644
index 00000000..d47c20f7
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __SSE3__
+        #error "HOST/ARCH doesn't support SSE3"
+    #endif
+#endif
+
+#include 
+
+int main(void)
+{
+    __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
+    return (int)_mm_cvtss_f32(a);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c
new file mode 100644
index 00000000..7c80238a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __SSE4_1__
+        #error "HOST/ARCH doesn't support SSE41"
+    #endif
+#endif
+
+#include 
+
+int main(void)
+{
+    __m128 a = _mm_floor_ps(_mm_setzero_ps());
+    return (int)_mm_cvtss_f32(a);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c
new file mode 100644
index 00000000..f60e18f3
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __SSE4_2__
+        #error "HOST/ARCH doesn't support SSE42"
+    #endif
+#endif
+
+#include 
+
+int main(void)
+{
+    __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
+    return (int)_mm_cvtss_f32(a);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c
new file mode 100644
index 00000000..fde390d6
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+    /*
+     * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+     * whether or not the build options for those features are specified.
+     * Therefore, we must test #definitions of CPU features when option native/host
+     * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+     * the test will be broken and leads to enable all possible features.
+     */
+    #ifndef __SSSE3__
+        #error "HOST/ARCH doesn't support SSSE3"
+    #endif
+#endif
+
+#include 
+
+int main(void)
+{
+    __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
+    return (int)_mm_cvtsi128_si32(a);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c
new file mode 100644
index 00000000..0b3f30d6
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c
@@ -0,0 +1,21 @@
+#ifndef __VSX__
+    #error "VSX is not supported"
+#endif
+#include 
+
+#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
+    #define vsx_ld  vec_vsx_ld
+    #define vsx_st  vec_vsx_st
+#else
+    #define vsx_ld  vec_xl
+    #define vsx_st  vec_xst
+#endif
+
+int main(void)
+{
+    unsigned int zout[4];
+    unsigned int z4[] = {0, 0, 0, 0};
+    __vector unsigned int v_z4 = vsx_ld(0, z4);
+    vsx_st(v_z4, 0, zout);
+    return zout[0];
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c
new file mode 100644
index 00000000..410fb29d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c
@@ -0,0 +1,13 @@
+#ifndef __VSX__
+    #error "VSX is not supported"
+#endif
+#include 
+
+typedef __vector unsigned long long v_uint64x2;
+
+int main(void)
+{
+    v_uint64x2 z2 = (v_uint64x2){0, 0};
+    z2 = (v_uint64x2)vec_cmpeq(z2, z2);
+    return (int)vec_extract(z2, 0);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c
new file mode 100644
index 00000000..85752653
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c
@@ -0,0 +1,13 @@
+#ifndef __VSX__
+    #error "VSX is not supported"
+#endif
+#include 
+
+typedef __vector unsigned int v_uint32x4;
+
+int main(void)
+{
+    v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
+    z4 = vec_absd(z4, z4);
+    return (int)vec_extract(z4, 0);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c
new file mode 100644
index 00000000..a6acc738
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c
@@ -0,0 +1,14 @@
+#ifndef __VSX__
+    #error "VSX is not supported"
+#endif
+#include 
+
+typedef __vector unsigned int v_uint32x4;
+
+int main(void)
+{
+    v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16};
+    v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2};
+    v_uint32x4 v3 = vec_mod(v1, v2);
+    return (int)vec_extractm(v3);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c
new file mode 100644
index 00000000..18fb7ef9
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c
@@ -0,0 +1,16 @@
+#if (__VEC__ < 10301) || (__ARCH__ < 11)
+    #error VX not supported
+#endif
+
+#include 
+int main(int argc, char **argv)
+{
+    __vector double x = vec_abs(vec_xl(argc, (double*)argv));
+    __vector double y = vec_load_len((double*)argv, (unsigned int)argc);
+
+    x = vec_round(vec_ceil(x) + vec_floor(y));
+    __vector bool long long m = vec_cmpge(x, y);
+    __vector long long i = vec_signed(vec_sel(x, y, m));
+
+    return (int)vec_extract(i, 0);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c
new file mode 100644
index 00000000..e6933adc
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c
@@ -0,0 +1,25 @@
+#if (__VEC__ < 10302) || (__ARCH__ < 12)
+    #error VXE not supported
+#endif
+
+#include 
+int main(int argc, char **argv)
+{
+    __vector float x = vec_nabs(vec_xl(argc, (float*)argv));
+    __vector float y = vec_load_len((float*)argv, (unsigned int)argc);
+    
+    x = vec_round(vec_ceil(x) + vec_floor(y));
+    __vector bool int m = vec_cmpge(x, y);
+    x = vec_sel(x, y, m);
+
+    // need to test the existence of intrin "vflls" since vec_doublee
+    // is vec_doublee maps to wrong intrin "vfll".
+    // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871
+#if defined(__GNUC__) && !defined(__clang__)
+    __vector long long i = vec_signed(__builtin_s390_vflls(x));
+#else
+    __vector long long i = vec_signed(vec_doublee(x));
+#endif
+
+    return (int)vec_extract(i, 0);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c
new file mode 100644
index 00000000..f36d5712
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c
@@ -0,0 +1,21 @@
+#if (__VEC__ < 10303) || (__ARCH__ < 13)
+    #error VXE2 not supported
+#endif
+
+#include 
+
+int main(int argc, char **argv)
+{
+    int val;
+    __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' };
+    __vector signed short search = { 'g', 'h', 'g', 'o' };
+    __vector unsigned char len = { 0 };
+    __vector unsigned char res = vec_search_string_cc(large, search, len, &val);
+    __vector float x = vec_xl(argc, (float*)argv);
+    __vector int i = vec_signed(x);
+
+    i = vec_srdb(vec_sldb(i, i, 2), i, 3);
+    val += (int)vec_extract(res, 1);
+    val += vec_extract(i, 0);
+    return val;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c
new file mode 100644
index 00000000..51d70cf2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c
@@ -0,0 +1,12 @@
+#include 
+#ifdef _MSC_VER
+    #include 
+#else
+    #include 
+#endif
+
+int main(void)
+{
+    __m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
+    return _mm_cvtsi128_si32(a);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c
new file mode 100644
index 00000000..9cfd0c2a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c
@@ -0,0 +1,18 @@
+#include 
+/**
+ * Test BW mask operations due to:
+ *  - MSVC has supported it since vs2019 see,
+ *    https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ *  - Clang >= v8.0
+ *  - GCC >= v7.1
+ */
+int main(void)
+{
+    __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
+    m64 = _kor_mask64(m64, m64);
+    m64 = _kxor_mask64(m64, m64);
+    m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
+    m64 = _mm512_kunpackd(m64, m64);
+    m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
+    return (int)_cvtmask64_u64(m64);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c
new file mode 100644
index 00000000..f0dc88bd
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c
@@ -0,0 +1,16 @@
+#include 
+/**
+ * Test DQ mask operations due to:
+ *  - MSVC has supported it since vs2019 see,
+ *    https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ *  - Clang >= v8.0
+ *  - GCC >= v7.1
+ */
+int main(void)
+{
+    __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1));
+    m8 = _kor_mask8(m8, m8);
+    m8 = _kxor_mask8(m8, m8);
+    m8 = _cvtu32_mask8(_cvtmask8_u32(m8));
+    return (int)_cvtmask8_u32(m8);
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c
new file mode 100644
index 00000000..db01aaee
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c
@@ -0,0 +1,41 @@
+#include 
+/**
+ * The following intrinsics don't have direct native support but compilers
+ * tend to emulate them.
+ * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
+ */
+int main(void)
+{
+    __m512  one_ps = _mm512_set1_ps(1.0f);
+    __m512d one_pd = _mm512_set1_pd(1.0);
+    __m512i one_i64 = _mm512_set1_epi64(1);
+    // add
+    float sum_ps  = _mm512_reduce_add_ps(one_ps);
+    double sum_pd = _mm512_reduce_add_pd(one_pd);
+    int sum_int   = (int)_mm512_reduce_add_epi64(one_i64);
+        sum_int  += (int)_mm512_reduce_add_epi32(one_i64);
+    // mul
+    sum_ps  += _mm512_reduce_mul_ps(one_ps);
+    sum_pd  += _mm512_reduce_mul_pd(one_pd);
+    sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
+    sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
+    // min
+    sum_ps  += _mm512_reduce_min_ps(one_ps);
+    sum_pd  += _mm512_reduce_min_pd(one_pd);
+    sum_int += (int)_mm512_reduce_min_epi32(one_i64);
+    sum_int += (int)_mm512_reduce_min_epu32(one_i64);
+    sum_int += (int)_mm512_reduce_min_epi64(one_i64);
+    // max
+    sum_ps  += _mm512_reduce_max_ps(one_ps);
+    sum_pd  += _mm512_reduce_max_pd(one_pd);
+    sum_int += (int)_mm512_reduce_max_epi32(one_i64);
+    sum_int += (int)_mm512_reduce_max_epu32(one_i64);
+    sum_int += (int)_mm512_reduce_max_epi64(one_i64);
+    // and
+    sum_int += (int)_mm512_reduce_and_epi32(one_i64);
+    sum_int += (int)_mm512_reduce_and_epi64(one_i64);
+    // or
+    sum_int += (int)_mm512_reduce_or_epi32(one_i64);
+    sum_int += (int)_mm512_reduce_or_epi64(one_i64);
+    return (int)sum_ps + (int)sum_pd + sum_int;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c
new file mode 100644
index 00000000..514a2b18
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c
@@ -0,0 +1,12 @@
+/**
+ * Assembler may not fully support the following VSX3 scalar
+ * instructions, even though compilers report VSX3 support.
+ */
+int main(void)
+{
+    unsigned short bits = 0xFF;
+    double f;
+    __asm__ __volatile__("xscvhpdp %x0,%x1" : "=wa"(f) : "wa"(bits));
+    __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits) : "wa" (f));
+    return bits;
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c
new file mode 100644
index 00000000..a70b2a9f
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c
@@ -0,0 +1,21 @@
+#ifndef __VSX__
+    #error "VSX is not supported"
+#endif
+#include 
+
+typedef __vector float fv4sf_t;
+typedef __vector unsigned char vec_t;
+
+int main(void)
+{
+    __vector_quad acc0;
+    float a[4] = {0,1,2,3};
+    float b[4] = {0,1,2,3};
+    vec_t *va = (vec_t *) a;
+    vec_t *vb = (vec_t *) b;
+    __builtin_mma_xvf32ger(&acc0, va[0], vb[0]);
+    fv4sf_t result[4];
+    __builtin_mma_disassemble_acc((void *)result, &acc0);
+    fv4sf_t c0 = result[0];
+    return (int)((float*)&c0)[0];
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c
new file mode 100644
index 00000000..b73a6f43
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c
@@ -0,0 +1,36 @@
+/**
+ * Testing ASM VSX register number fixer '%x'
+ *
+ * old versions of CLANG doesn't support %x in the inline asm template
+ * which fixes register number when using any of the register constraints wa, wd, wf.
+ *
+ * xref:
+ * - https://bugs.llvm.org/show_bug.cgi?id=31837
+ * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
+ */
+#ifndef __VSX__
+    #error "VSX is not supported"
+#endif
+#include 
+
+#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
+    #define vsx_ld  vec_vsx_ld
+    #define vsx_st  vec_vsx_st
+#else
+    #define vsx_ld  vec_xl
+    #define vsx_st  vec_xst
+#endif
+
+int main(void)
+{
+    float z4[] = {0, 0, 0, 0};
+    signed int zout[] = {0, 0, 0, 0};
+
+    __vector float vz4 = vsx_ld(0, z4);
+    __vector signed int asm_ret = vsx_ld(0, zout);
+
+    __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
+
+    vsx_st(asm_ret, 0, zout);
+    return zout[0];
+}
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c
new file mode 100644
index 00000000..4cd09d42
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c
@@ -0,0 +1 @@
+int test_flags;
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/__init__.py
new file mode 100644
index 00000000..3ba501de
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/__init__.py
@@ -0,0 +1,41 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands.
+
+"""
+def test_na_writable_attributes_deletion():
+    a = np.NA(2)
+    attr =  ['payload', 'dtype']
+    for s in attr:
+        assert_raises(AttributeError, delattr, a, s)
+
+
+__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
+
+distutils_all = [  #'build_py',
+                   'clean',
+                   'install_clib',
+                   'install_scripts',
+                   'bdist',
+                   'bdist_dumb',
+                   'bdist_wininst',
+                ]
+
+__import__('distutils.command', globals(), locals(), distutils_all)
+
+__all__ = ['build',
+           'config_compiler',
+           'config',
+           'build_src',
+           'build_py',
+           'build_ext',
+           'build_clib',
+           'build_scripts',
+           'install',
+           'install_data',
+           'install_headers',
+           'install_lib',
+           'bdist_rpm',
+           'sdist',
+          ] + distutils_all
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/autodist.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/autodist.py
new file mode 100644
index 00000000..b72d0cab
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/autodist.py
@@ -0,0 +1,148 @@
+"""This module implements additional tests ala autoconf which can be useful.
+
+"""
+import textwrap
+
+# We put them here since they could be easily reused outside numpy.distutils
+
+def check_inline(cmd):
+    """Return the inline identifier (may be empty)."""
+    cmd._check_compiler()
+    body = textwrap.dedent("""
+        #ifndef __cplusplus
+        static %(inline)s int static_func (void)
+        {
+            return 0;
+        }
+        %(inline)s int nostatic_func (void)
+        {
+            return 0;
+        }
+        #endif""")
+
+    for kw in ['inline', '__inline__', '__inline']:
+        st = cmd.try_compile(body % {'inline': kw}, None, None)
+        if st:
+            return kw
+
+    return ''
+
+
+def check_restrict(cmd):
+    """Return the restrict identifier (may be empty)."""
+    cmd._check_compiler()
+    body = textwrap.dedent("""
+        static int static_func (char * %(restrict)s a)
+        {
+            return 0;
+        }
+        """)
+
+    for kw in ['restrict', '__restrict__', '__restrict']:
+        st = cmd.try_compile(body % {'restrict': kw}, None, None)
+        if st:
+            return kw
+
+    return ''
+
+
+def check_compiler_gcc(cmd):
+    """Check if the compiler is GCC."""
+
+    cmd._check_compiler()
+    body = textwrap.dedent("""
+        int
+        main()
+        {
+        #if (! defined __GNUC__)
+        #error gcc required
+        #endif
+            return 0;
+        }
+        """)
+    return cmd.try_compile(body, None, None)
+
+
+def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):
+    """
+    Check that the gcc version is at least the specified version."""
+
+    cmd._check_compiler()
+    version = '.'.join([str(major), str(minor), str(patchlevel)])
+    body = textwrap.dedent("""
+        int
+        main()
+        {
+        #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\
+                (__GNUC_MINOR__ < %(minor)d) || \\
+                (__GNUC_PATCHLEVEL__ < %(patchlevel)d)
+        #error gcc >= %(version)s required
+        #endif
+            return 0;
+        }
+        """)
+    kw = {'version': version, 'major': major, 'minor': minor,
+          'patchlevel': patchlevel}
+
+    return cmd.try_compile(body % kw, None, None)
+
+
+def check_gcc_function_attribute(cmd, attribute, name):
+    """Return True if the given function attribute is supported."""
+    cmd._check_compiler()
+    body = textwrap.dedent("""
+        #pragma GCC diagnostic error "-Wattributes"
+        #pragma clang diagnostic error "-Wattributes"
+
+        int %s %s(void* unused)
+        {
+            return 0;
+        }
+
+        int
+        main()
+        {
+            return 0;
+        }
+        """) % (attribute, name)
+    return cmd.try_compile(body, None, None) != 0
+
+
+def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code,
+                                                include):
+    """Return True if the given function attribute is supported with
+    intrinsics."""
+    cmd._check_compiler()
+    body = textwrap.dedent("""
+        #include<%s>
+        int %s %s(void)
+        {
+            %s;
+            return 0;
+        }
+
+        int
+        main()
+        {
+            return 0;
+        }
+        """) % (include, attribute, name, code)
+    return cmd.try_compile(body, None, None) != 0
+
+
+def check_gcc_variable_attribute(cmd, attribute):
+    """Return True if the given variable attribute is supported."""
+    cmd._check_compiler()
+    body = textwrap.dedent("""
+        #pragma GCC diagnostic error "-Wattributes"
+        #pragma clang diagnostic error "-Wattributes"
+
+        int %s foo;
+
+        int
+        main()
+        {
+            return 0;
+        }
+        """) % (attribute, )
+    return cmd.try_compile(body, None, None) != 0
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py
new file mode 100644
index 00000000..682e7a8e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py
@@ -0,0 +1,22 @@
+import os
+import sys
+if 'setuptools' in sys.modules:
+    from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm
+else:
+    from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
+
+class bdist_rpm(old_bdist_rpm):
+
+    def _make_spec_file(self):
+        spec_file = old_bdist_rpm._make_spec_file(self)
+
+        # Replace hardcoded setup.py script name
+        # with the real setup script name.
+        setup_py = os.path.basename(sys.argv[0])
+        if setup_py == 'setup.py':
+            return spec_file
+        new_spec_file = []
+        for line in spec_file:
+            line = line.replace('setup.py', setup_py)
+            new_spec_file.append(line)
+        return new_spec_file
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build.py
new file mode 100644
index 00000000..80830d55
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build.py
@@ -0,0 +1,62 @@
+import os
+import sys
+from distutils.command.build import build as old_build
+from distutils.util import get_platform
+from numpy.distutils.command.config_compiler import show_fortran_compilers
+
+class build(old_build):
+
+    sub_commands = [('config_cc',     lambda *args: True),
+                    ('config_fc',     lambda *args: True),
+                    ('build_src',     old_build.has_ext_modules),
+                    ] + old_build.sub_commands
+
+    user_options = old_build.user_options + [
+        ('fcompiler=', None,
+         "specify the Fortran compiler type"),
+        ('warn-error', None,
+         "turn all warnings into errors (-Werror)"),
+        ('cpu-baseline=', None,
+         "specify a list of enabled baseline CPU optimizations"),
+        ('cpu-dispatch=', None,
+         "specify a list of dispatched CPU optimizations"),
+        ('disable-optimization', None,
+         "disable CPU optimized code(dispatch,simd,fast...)"),
+        ('simd-test=', None,
+         "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
+        ]
+
+    help_options = old_build.help_options + [
+        ('help-fcompiler', None, "list available Fortran compilers",
+         show_fortran_compilers),
+        ]
+
+    def initialize_options(self):
+        old_build.initialize_options(self)
+        self.fcompiler = None
+        self.warn_error = False
+        self.cpu_baseline = "min"
+        self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default
+        self.disable_optimization = False
+        """
+        the '_simd' module is a very large. Adding more dispatched features
+        will increase binary size and compile time. By default we minimize
+        the targeted features to those most commonly used by the NumPy SIMD interface(NPYV),
+        NOTE: any specified features will be ignored if they're:
+            - part of the baseline(--cpu-baseline)
+            - not part of dispatch-able features(--cpu-dispatch)
+            - not supported by compiler or platform
+        """
+        self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \
+                         "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2"
+
+    def finalize_options(self):
+        build_scripts = self.build_scripts
+        old_build.finalize_options(self)
+        plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
+        if build_scripts is None:
+            self.build_scripts = os.path.join(self.build_base,
+                                              'scripts' + plat_specifier)
+
+    def run(self):
+        old_build.run(self)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py
new file mode 100644
index 00000000..6cd2f3e7
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py
@@ -0,0 +1,469 @@
+""" Modified version of build_clib that handles fortran source files.
+"""
+import os
+from glob import glob
+import shutil
+from distutils.command.build_clib import build_clib as old_build_clib
+from distutils.errors import DistutilsSetupError, DistutilsError, \
+    DistutilsFileError
+
+from numpy.distutils import log
+from distutils.dep_util import newer_group
+from numpy.distutils.misc_util import (
+    filter_sources, get_lib_source_files, get_numpy_include_dirs,
+    has_cxx_sources, has_f_sources, is_sequence
+)
+from numpy.distutils.ccompiler_opt import new_ccompiler_opt
+
+# Fix Python distutils bug sf #1718574:
+_l = old_build_clib.user_options
+for _i in range(len(_l)):
+    if _l[_i][0] in ['build-clib', 'build-temp']:
+        _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
+#
+
+
+class build_clib(old_build_clib):
+
+    description = "build C/C++/F libraries used by Python extensions"
+
+    user_options = old_build_clib.user_options + [
+        ('fcompiler=', None,
+         "specify the Fortran compiler type"),
+        ('inplace', 'i', 'Build in-place'),
+        ('parallel=', 'j',
+         "number of parallel jobs"),
+        ('warn-error', None,
+         "turn all warnings into errors (-Werror)"),
+        ('cpu-baseline=', None,
+         "specify a list of enabled baseline CPU optimizations"),
+        ('cpu-dispatch=', None,
+         "specify a list of dispatched CPU optimizations"),
+        ('disable-optimization', None,
+         "disable CPU optimized code(dispatch,simd,fast...)"),
+    ]
+
+    boolean_options = old_build_clib.boolean_options + \
+    ['inplace', 'warn-error', 'disable-optimization']
+
+    def initialize_options(self):
+        old_build_clib.initialize_options(self)
+        self.fcompiler = None
+        self.inplace = 0
+        self.parallel = None
+        self.warn_error = None
+        self.cpu_baseline = None
+        self.cpu_dispatch = None
+        self.disable_optimization = None
+
+
+    def finalize_options(self):
+        if self.parallel:
+            try:
+                self.parallel = int(self.parallel)
+            except ValueError as e:
+                raise ValueError("--parallel/-j argument must be an integer") from e
+        old_build_clib.finalize_options(self)
+        self.set_undefined_options('build',
+                                        ('parallel', 'parallel'),
+                                        ('warn_error', 'warn_error'),
+                                        ('cpu_baseline', 'cpu_baseline'),
+                                        ('cpu_dispatch', 'cpu_dispatch'),
+                                        ('disable_optimization', 'disable_optimization')
+                                  )
+
+    def have_f_sources(self):
+        for (lib_name, build_info) in self.libraries:
+            if has_f_sources(build_info.get('sources', [])):
+                return True
+        return False
+
+    def have_cxx_sources(self):
+        for (lib_name, build_info) in self.libraries:
+            if has_cxx_sources(build_info.get('sources', [])):
+                return True
+        return False
+
+    def run(self):
+        if not self.libraries:
+            return
+
+        # Make sure that library sources are complete.
+        languages = []
+
+        # Make sure that extension sources are complete.
+        self.run_command('build_src')
+
+        for (lib_name, build_info) in self.libraries:
+            l = build_info.get('language', None)
+            if l and l not in languages:
+                languages.append(l)
+
+        from distutils.ccompiler import new_compiler
+        self.compiler = new_compiler(compiler=self.compiler,
+                                     dry_run=self.dry_run,
+                                     force=self.force)
+        self.compiler.customize(self.distribution,
+                                need_cxx=self.have_cxx_sources())
+
+        if self.warn_error:
+            self.compiler.compiler.append('-Werror')
+            self.compiler.compiler_so.append('-Werror')
+
+        libraries = self.libraries
+        self.libraries = None
+        self.compiler.customize_cmd(self)
+        self.libraries = libraries
+
+        self.compiler.show_customization()
+
+        if not self.disable_optimization:
+            dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
+            dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
+            opt_cache_path = os.path.abspath(
+                os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py')
+            )
+            if hasattr(self, "compiler_opt"):
+                # By default `CCompilerOpt` update the cache at the exit of
+                # the process, which may lead to duplicate building
+                # (see build_extension()/force_rebuild) if run() called
+                # multiple times within the same os process/thread without
+                # giving the chance the previous instances of `CCompilerOpt`
+                # to update the cache.
+                self.compiler_opt.cache_flush()
+
+            self.compiler_opt = new_ccompiler_opt(
+                compiler=self.compiler, dispatch_hpath=dispatch_hpath,
+                cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
+                cache_path=opt_cache_path
+            )
+            def report(copt):
+                log.info("\n########### CLIB COMPILER OPTIMIZATION ###########")
+                log.info(copt.report(full=True))
+
+            import atexit
+            atexit.register(report, self.compiler_opt)
+
+        if self.have_f_sources():
+            from numpy.distutils.fcompiler import new_fcompiler
+            self._f_compiler = new_fcompiler(compiler=self.fcompiler,
+                                             verbose=self.verbose,
+                                             dry_run=self.dry_run,
+                                             force=self.force,
+                                             requiref90='f90' in languages,
+                                             c_compiler=self.compiler)
+            if self._f_compiler is not None:
+                self._f_compiler.customize(self.distribution)
+
+                libraries = self.libraries
+                self.libraries = None
+                self._f_compiler.customize_cmd(self)
+                self.libraries = libraries
+
+                self._f_compiler.show_customization()
+        else:
+            self._f_compiler = None
+
+        self.build_libraries(self.libraries)
+
+        if self.inplace:
+            for l in self.distribution.installed_libraries:
+                libname = self.compiler.library_filename(l.name)
+                source = os.path.join(self.build_clib, libname)
+                target = os.path.join(l.target_dir, libname)
+                self.mkpath(l.target_dir)
+                shutil.copy(source, target)
+
+    def get_source_files(self):
+        self.check_library_list(self.libraries)
+        filenames = []
+        for lib in self.libraries:
+            filenames.extend(get_lib_source_files(lib))
+        return filenames
+
+    def build_libraries(self, libraries):
+        for (lib_name, build_info) in libraries:
+            self.build_a_library(build_info, lib_name, libraries)
+
+    def assemble_flags(self, in_flags):
+        """ Assemble flags from flag list
+
+        Parameters
+        ----------
+        in_flags : None or sequence
+            None corresponds to empty list.  Sequence elements can be strings
+            or callables that return lists of strings. Callable takes `self` as
+            single parameter.
+
+        Returns
+        -------
+        out_flags : list
+        """
+        if in_flags is None:
+            return []
+        out_flags = []
+        for in_flag in in_flags:
+            if callable(in_flag):
+                out_flags += in_flag(self)
+            else:
+                out_flags.append(in_flag)
+        return out_flags
+
+    def build_a_library(self, build_info, lib_name, libraries):
+        # default compilers
+        compiler = self.compiler
+        fcompiler = self._f_compiler
+
+        sources = build_info.get('sources')
+        if sources is None or not is_sequence(sources):
+            raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
+                                       "'sources' must be present and must be " +
+                                       "a list of source filenames") % lib_name)
+        sources = list(sources)
+
+        c_sources, cxx_sources, f_sources, fmodule_sources \
+            = filter_sources(sources)
+        requiref90 = not not fmodule_sources or \
+            build_info.get('language', 'c') == 'f90'
+
+        # save source type information so that build_ext can use it.
+        source_languages = []
+        if c_sources:
+            source_languages.append('c')
+        if cxx_sources:
+            source_languages.append('c++')
+        if requiref90:
+            source_languages.append('f90')
+        elif f_sources:
+            source_languages.append('f77')
+        build_info['source_languages'] = source_languages
+
+        lib_file = compiler.library_filename(lib_name,
+                                             output_dir=self.build_clib)
+        depends = sources + build_info.get('depends', [])
+
+        force_rebuild = self.force
+        if not self.disable_optimization and not self.compiler_opt.is_cached():
+            log.debug("Detected changes on compiler optimizations")
+            force_rebuild = True
+        if not (force_rebuild or newer_group(depends, lib_file, 'newer')):
+            log.debug("skipping '%s' library (up-to-date)", lib_name)
+            return
+        else:
+            log.info("building '%s' library", lib_name)
+
+        config_fc = build_info.get('config_fc', {})
+        if fcompiler is not None and config_fc:
+            log.info('using additional config_fc from setup script '
+                     'for fortran compiler: %s'
+                     % (config_fc,))
+            from numpy.distutils.fcompiler import new_fcompiler
+            fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
+                                      verbose=self.verbose,
+                                      dry_run=self.dry_run,
+                                      force=self.force,
+                                      requiref90=requiref90,
+                                      c_compiler=self.compiler)
+            if fcompiler is not None:
+                dist = self.distribution
+                base_config_fc = dist.get_option_dict('config_fc').copy()
+                base_config_fc.update(config_fc)
+                fcompiler.customize(base_config_fc)
+
+        # check availability of Fortran compilers
+        if (f_sources or fmodule_sources) and fcompiler is None:
+            raise DistutilsError("library %s has Fortran sources"
+                                 " but no Fortran compiler found" % (lib_name))
+
+        if fcompiler is not None:
+            fcompiler.extra_f77_compile_args = build_info.get(
+                'extra_f77_compile_args') or []
+            fcompiler.extra_f90_compile_args = build_info.get(
+                'extra_f90_compile_args') or []
+
+        macros = build_info.get('macros')
+        if macros is None:
+            macros = []
+        include_dirs = build_info.get('include_dirs')
+        if include_dirs is None:
+            include_dirs = []
+        # Flags can be strings, or callables that return a list of strings.
+        extra_postargs = self.assemble_flags(
+            build_info.get('extra_compiler_args'))
+        extra_cflags = self.assemble_flags(
+            build_info.get('extra_cflags'))
+        extra_cxxflags = self.assemble_flags(
+            build_info.get('extra_cxxflags'))
+
+        include_dirs.extend(get_numpy_include_dirs())
+        # where compiled F90 module files are:
+        module_dirs = build_info.get('module_dirs') or []
+        module_build_dir = os.path.dirname(lib_file)
+        if requiref90:
+            self.mkpath(module_build_dir)
+
+        if compiler.compiler_type == 'msvc':
+            # this hack works around the msvc compiler attributes
+            # problem, msvc uses its own convention :(
+            c_sources += cxx_sources
+            cxx_sources = []
+            extra_cflags += extra_cxxflags
+
+        # filtering C dispatch-table sources when optimization is not disabled,
+        # otherwise treated as normal sources.
+        copt_c_sources = []
+        copt_cxx_sources = []
+        copt_baseline_flags = []
+        copt_macros = []
+        if not self.disable_optimization:
+            bsrc_dir = self.get_finalized_command("build_src").build_src
+            dispatch_hpath = os.path.join("numpy", "distutils", "include")
+            dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
+            include_dirs.append(dispatch_hpath)
+            # copt_build_src = None if self.inplace else bsrc_dir
+            copt_build_src = bsrc_dir
+            for _srcs, _dst, _ext in (
+                ((c_sources,), copt_c_sources, ('.dispatch.c',)),
+                ((c_sources, cxx_sources), copt_cxx_sources,
+                    ('.dispatch.cpp', '.dispatch.cxx'))
+            ):
+                for _src in _srcs:
+                    _dst += [
+                        _src.pop(_src.index(s))
+                        for s in _src[:] if s.endswith(_ext)
+                    ]
+            copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
+        else:
+            copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+
+        objects = []
+        if copt_cxx_sources:
+            log.info("compiling C++ dispatch-able sources")
+            objects += self.compiler_opt.try_dispatch(
+                copt_c_sources,
+                output_dir=self.build_temp,
+                src_dir=copt_build_src,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=extra_postargs + extra_cxxflags,
+                ccompiler=cxx_compiler
+            )
+
+        if copt_c_sources:
+            log.info("compiling C dispatch-able sources")
+            objects += self.compiler_opt.try_dispatch(
+                copt_c_sources,
+                output_dir=self.build_temp,
+                src_dir=copt_build_src,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=extra_postargs + extra_cflags)
+
+        if c_sources:
+            log.info("compiling C sources")
+            objects += compiler.compile(
+                c_sources,
+                output_dir=self.build_temp,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=(extra_postargs +
+                                copt_baseline_flags +
+                                extra_cflags))
+
+        if cxx_sources:
+            log.info("compiling C++ sources")
+            cxx_compiler = compiler.cxx_compiler()
+            cxx_objects = cxx_compiler.compile(
+                cxx_sources,
+                output_dir=self.build_temp,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=(extra_postargs +
+                                copt_baseline_flags +
+                                extra_cxxflags))
+            objects.extend(cxx_objects)
+
+        if f_sources or fmodule_sources:
+            extra_postargs = []
+            f_objects = []
+
+            if requiref90:
+                if fcompiler.module_dir_switch is None:
+                    existing_modules = glob('*.mod')
+                extra_postargs += fcompiler.module_options(
+                    module_dirs, module_build_dir)
+
+            if fmodule_sources:
+                log.info("compiling Fortran 90 module sources")
+                f_objects += fcompiler.compile(fmodule_sources,
+                                               output_dir=self.build_temp,
+                                               macros=macros,
+                                               include_dirs=include_dirs,
+                                               debug=self.debug,
+                                               extra_postargs=extra_postargs)
+
+            if requiref90 and self._f_compiler.module_dir_switch is None:
+                # move new compiled F90 module files to module_build_dir
+                for f in glob('*.mod'):
+                    if f in existing_modules:
+                        continue
+                    t = os.path.join(module_build_dir, f)
+                    if os.path.abspath(f) == os.path.abspath(t):
+                        continue
+                    if os.path.isfile(t):
+                        os.remove(t)
+                    try:
+                        self.move_file(f, module_build_dir)
+                    except DistutilsFileError:
+                        log.warn('failed to move %r to %r'
+                                 % (f, module_build_dir))
+
+            if f_sources:
+                log.info("compiling Fortran sources")
+                f_objects += fcompiler.compile(f_sources,
+                                               output_dir=self.build_temp,
+                                               macros=macros,
+                                               include_dirs=include_dirs,
+                                               debug=self.debug,
+                                               extra_postargs=extra_postargs)
+        else:
+            f_objects = []
+
+        if f_objects and not fcompiler.can_ccompiler_link(compiler):
+            # Default linker cannot link Fortran object files, and results
+            # need to be wrapped later. Instead of creating a real static
+            # library, just keep track of the object files.
+            listfn = os.path.join(self.build_clib,
+                                  lib_name + '.fobjects')
+            with open(listfn, 'w') as f:
+                f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
+
+            listfn = os.path.join(self.build_clib,
+                                  lib_name + '.cobjects')
+            with open(listfn, 'w') as f:
+                f.write("\n".join(os.path.abspath(obj) for obj in objects))
+
+            # create empty "library" file for dependency tracking
+            lib_fname = os.path.join(self.build_clib,
+                                     lib_name + compiler.static_lib_extension)
+            with open(lib_fname, 'wb') as f:
+                pass
+        else:
+            # assume that default linker is suitable for
+            # linking Fortran object files
+            objects.extend(f_objects)
+            compiler.create_static_lib(objects, lib_name,
+                                       output_dir=self.build_clib,
+                                       debug=self.debug)
+
+        # fix library dependencies
+        clib_libraries = build_info.get('libraries', [])
+        for lname, binfo in libraries:
+            if lname in clib_libraries:
+                clib_libraries.extend(binfo.get('libraries', []))
+        if clib_libraries:
+            build_info['libraries'] = clib_libraries
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py
new file mode 100644
index 00000000..5c62d90c
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py
@@ -0,0 +1,752 @@
+""" Modified version of build_ext that handles fortran source files.
+
+"""
+import os
+import subprocess
+from glob import glob
+
+from distutils.dep_util import newer_group
+from distutils.command.build_ext import build_ext as old_build_ext
+from distutils.errors import DistutilsFileError, DistutilsSetupError,\
+    DistutilsError
+from distutils.file_util import copy_file
+
+from numpy.distutils import log
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.system_info import combine_paths
+from numpy.distutils.misc_util import (
+    filter_sources, get_ext_source_files, get_numpy_include_dirs,
+    has_cxx_sources, has_f_sources, is_sequence
+)
+from numpy.distutils.command.config_compiler import show_fortran_compilers
+from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt
+
+class build_ext (old_build_ext):
+
+    description = "build C/C++/F extensions (compile/link to build directory)"
+
+    user_options = old_build_ext.user_options + [
+        ('fcompiler=', None,
+         "specify the Fortran compiler type"),
+        ('parallel=', 'j',
+         "number of parallel jobs"),
+        ('warn-error', None,
+         "turn all warnings into errors (-Werror)"),
+        ('cpu-baseline=', None,
+         "specify a list of enabled baseline CPU optimizations"),
+        ('cpu-dispatch=', None,
+         "specify a list of dispatched CPU optimizations"),
+        ('disable-optimization', None,
+         "disable CPU optimized code(dispatch,simd,fast...)"),
+        ('simd-test=', None,
+         "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
+    ]
+
+    help_options = old_build_ext.help_options + [
+        ('help-fcompiler', None, "list available Fortran compilers",
+         show_fortran_compilers),
+    ]
+
+    boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization']
+
+    def initialize_options(self):
+        old_build_ext.initialize_options(self)
+        self.fcompiler = None
+        self.parallel = None
+        self.warn_error = None
+        self.cpu_baseline = None
+        self.cpu_dispatch = None
+        self.disable_optimization = None
+        self.simd_test = None
+
+    def finalize_options(self):
+        if self.parallel:
+            try:
+                self.parallel = int(self.parallel)
+            except ValueError as e:
+                raise ValueError("--parallel/-j argument must be an integer") from e
+
+        # Ensure that self.include_dirs and self.distribution.include_dirs
+        # refer to the same list object. finalize_options will modify
+        # self.include_dirs, but self.distribution.include_dirs is used
+        # during the actual build.
+        # self.include_dirs is None unless paths are specified with
+        # --include-dirs.
+        # The include paths will be passed to the compiler in the order:
+        # numpy paths, --include-dirs paths, Python include path.
+        if isinstance(self.include_dirs, str):
+            self.include_dirs = self.include_dirs.split(os.pathsep)
+        incl_dirs = self.include_dirs or []
+        if self.distribution.include_dirs is None:
+            self.distribution.include_dirs = []
+        self.include_dirs = self.distribution.include_dirs
+        self.include_dirs.extend(incl_dirs)
+
+        old_build_ext.finalize_options(self)
+        self.set_undefined_options('build',
+                                        ('parallel', 'parallel'),
+                                        ('warn_error', 'warn_error'),
+                                        ('cpu_baseline', 'cpu_baseline'),
+                                        ('cpu_dispatch', 'cpu_dispatch'),
+                                        ('disable_optimization', 'disable_optimization'),
+                                        ('simd_test', 'simd_test')
+                                  )
+        CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
+
+    def run(self):
+        if not self.extensions:
+            return
+
+        # Make sure that extension sources are complete.
+        self.run_command('build_src')
+
+        if self.distribution.has_c_libraries():
+            if self.inplace:
+                if self.distribution.have_run.get('build_clib'):
+                    log.warn('build_clib already run, it is too late to '
+                             'ensure in-place build of build_clib')
+                    build_clib = self.distribution.get_command_obj(
+                        'build_clib')
+                else:
+                    build_clib = self.distribution.get_command_obj(
+                        'build_clib')
+                    build_clib.inplace = 1
+                    build_clib.ensure_finalized()
+                    build_clib.run()
+                    self.distribution.have_run['build_clib'] = 1
+
+            else:
+                self.run_command('build_clib')
+                build_clib = self.get_finalized_command('build_clib')
+            self.library_dirs.append(build_clib.build_clib)
+        else:
+            build_clib = None
+
+        # Not including C libraries to the list of
+        # extension libraries automatically to prevent
+        # bogus linking commands. Extensions must
+        # explicitly specify the C libraries that they use.
+
+        from distutils.ccompiler import new_compiler
+        from numpy.distutils.fcompiler import new_fcompiler
+
+        compiler_type = self.compiler
+        # Initialize C compiler:
+        self.compiler = new_compiler(compiler=compiler_type,
+                                     verbose=self.verbose,
+                                     dry_run=self.dry_run,
+                                     force=self.force)
+        self.compiler.customize(self.distribution)
+        self.compiler.customize_cmd(self)
+
+        if self.warn_error:
+            self.compiler.compiler.append('-Werror')
+            self.compiler.compiler_so.append('-Werror')
+
+        self.compiler.show_customization()
+
+        if not self.disable_optimization:
+            dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
+            dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
+            opt_cache_path = os.path.abspath(
+                os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')
+            )
+            if hasattr(self, "compiler_opt"):
+                # By default `CCompilerOpt` update the cache at the exit of
+                # the process, which may lead to duplicate building
+                # (see build_extension()/force_rebuild) if run() called
+                # multiple times within the same os process/thread without
+                # giving the chance the previous instances of `CCompilerOpt`
+                # to update the cache.
+                self.compiler_opt.cache_flush()
+
+            self.compiler_opt = new_ccompiler_opt(
+                compiler=self.compiler, dispatch_hpath=dispatch_hpath,
+                cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
+                cache_path=opt_cache_path
+            )
+            def report(copt):
+                log.info("\n########### EXT COMPILER OPTIMIZATION ###########")
+                log.info(copt.report(full=True))
+
+            import atexit
+            atexit.register(report, self.compiler_opt)
+
+        # Setup directory for storing generated extra DLL files on Windows
+        self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
+        if not os.path.isdir(self.extra_dll_dir):
+            os.makedirs(self.extra_dll_dir)
+
+        # Create mapping of libraries built by build_clib:
+        clibs = {}
+        if build_clib is not None:
+            for libname, build_info in build_clib.libraries or []:
+                if libname in clibs and clibs[libname] != build_info:
+                    log.warn('library %r defined more than once,'
+                             ' overwriting build_info\n%s... \nwith\n%s...'
+                             % (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
+                clibs[libname] = build_info
+        # .. and distribution libraries:
+        for libname, build_info in self.distribution.libraries or []:
+            if libname in clibs:
+                # build_clib libraries have a precedence before distribution ones
+                continue
+            clibs[libname] = build_info
+
+        # Determine if C++/Fortran 77/Fortran 90 compilers are needed.
+        # Update extension libraries, library_dirs, and macros.
+        all_languages = set()
+        for ext in self.extensions:
+            ext_languages = set()
+            c_libs = []
+            c_lib_dirs = []
+            macros = []
+            for libname in ext.libraries:
+                if libname in clibs:
+                    binfo = clibs[libname]
+                    c_libs += binfo.get('libraries', [])
+                    c_lib_dirs += binfo.get('library_dirs', [])
+                    for m in binfo.get('macros', []):
+                        if m not in macros:
+                            macros.append(m)
+
+                for l in clibs.get(libname, {}).get('source_languages', []):
+                    ext_languages.add(l)
+            if c_libs:
+                new_c_libs = ext.libraries + c_libs
+                log.info('updating extension %r libraries from %r to %r'
+                         % (ext.name, ext.libraries, new_c_libs))
+                ext.libraries = new_c_libs
+                ext.library_dirs = ext.library_dirs + c_lib_dirs
+            if macros:
+                log.info('extending extension %r defined_macros with %r'
+                         % (ext.name, macros))
+                ext.define_macros = ext.define_macros + macros
+
+            # determine extension languages
+            if has_f_sources(ext.sources):
+                ext_languages.add('f77')
+            if has_cxx_sources(ext.sources):
+                ext_languages.add('c++')
+            l = ext.language or self.compiler.detect_language(ext.sources)
+            if l:
+                ext_languages.add(l)
+
+            # reset language attribute for choosing proper linker
+            #
+            # When we build extensions with multiple languages, we have to
+            # choose a linker. The rules here are:
+            #   1. if there is Fortran code, always prefer the Fortran linker,
+            #   2. otherwise prefer C++ over C,
+            #   3. Users can force a particular linker by using
+            #          `language='c'`  # or 'c++', 'f90', 'f77'
+            #      in their config.add_extension() calls.
+            if 'c++' in ext_languages:
+                ext_language = 'c++'
+            else:
+                ext_language = 'c'  # default
+
+            has_fortran = False
+            if 'f90' in ext_languages:
+                ext_language = 'f90'
+                has_fortran = True
+            elif 'f77' in ext_languages:
+                ext_language = 'f77'
+                has_fortran = True
+
+            if not ext.language or has_fortran:
+                if l and l != ext_language and ext.language:
+                    log.warn('resetting extension %r language from %r to %r.' %
+                             (ext.name, l, ext_language))
+
+            ext.language = ext_language
+
+            # global language
+            all_languages.update(ext_languages)
+
+        need_f90_compiler = 'f90' in all_languages
+        need_f77_compiler = 'f77' in all_languages
+        need_cxx_compiler = 'c++' in all_languages
+
+        # Initialize C++ compiler:
+        if need_cxx_compiler:
+            self._cxx_compiler = new_compiler(compiler=compiler_type,
+                                              verbose=self.verbose,
+                                              dry_run=self.dry_run,
+                                              force=self.force)
+            compiler = self._cxx_compiler
+            compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
+            compiler.customize_cmd(self)
+            compiler.show_customization()
+            self._cxx_compiler = compiler.cxx_compiler()
+        else:
+            self._cxx_compiler = None
+
+        # Initialize Fortran 77 compiler:
+        if need_f77_compiler:
+            ctype = self.fcompiler
+            self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
+                                               verbose=self.verbose,
+                                               dry_run=self.dry_run,
+                                               force=self.force,
+                                               requiref90=False,
+                                               c_compiler=self.compiler)
+            fcompiler = self._f77_compiler
+            if fcompiler:
+                ctype = fcompiler.compiler_type
+                fcompiler.customize(self.distribution)
+            if fcompiler and fcompiler.get_version():
+                fcompiler.customize_cmd(self)
+                fcompiler.show_customization()
+            else:
+                self.warn('f77_compiler=%s is not available.' %
+                          (ctype))
+                self._f77_compiler = None
+        else:
+            self._f77_compiler = None
+
+        # Initialize Fortran 90 compiler:
+        if need_f90_compiler:
+            ctype = self.fcompiler
+            self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
+                                               verbose=self.verbose,
+                                               dry_run=self.dry_run,
+                                               force=self.force,
+                                               requiref90=True,
+                                               c_compiler=self.compiler)
+            fcompiler = self._f90_compiler
+            if fcompiler:
+                ctype = fcompiler.compiler_type
+                fcompiler.customize(self.distribution)
+            if fcompiler and fcompiler.get_version():
+                fcompiler.customize_cmd(self)
+                fcompiler.show_customization()
+            else:
+                self.warn('f90_compiler=%s is not available.' %
+                          (ctype))
+                self._f90_compiler = None
+        else:
+            self._f90_compiler = None
+
+        # Build extensions
+        self.build_extensions()
+
+        # Copy over any extra DLL files
+        # FIXME: In the case where there are more than two packages,
+        # we blindly assume that both packages need all of the libraries,
+        # resulting in a larger wheel than is required. This should be fixed,
+        # but it's so rare that I won't bother to handle it.
+        pkg_roots = {
+            self.get_ext_fullname(ext.name).split('.')[0]
+            for ext in self.extensions
+        }
+        for pkg_root in pkg_roots:
+            shared_lib_dir = os.path.join(pkg_root, '.libs')
+            if not self.inplace:
+                shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
+            for fn in os.listdir(self.extra_dll_dir):
+                if not os.path.isdir(shared_lib_dir):
+                    os.makedirs(shared_lib_dir)
+                if not fn.lower().endswith('.dll'):
+                    continue
+                runtime_lib = os.path.join(self.extra_dll_dir, fn)
+                copy_file(runtime_lib, shared_lib_dir)
+
+    def swig_sources(self, sources, extensions=None):
+        # Do nothing. Swig sources have been handled in build_src command.
+        return sources
+
+    def build_extension(self, ext):
+        sources = ext.sources
+        if sources is None or not is_sequence(sources):
+            raise DistutilsSetupError(
+                ("in 'ext_modules' option (extension '%s'), " +
+                 "'sources' must be present and must be " +
+                 "a list of source filenames") % ext.name)
+        sources = list(sources)
+
+        if not sources:
+            return
+
+        fullname = self.get_ext_fullname(ext.name)
+        if self.inplace:
+            modpath = fullname.split('.')
+            package = '.'.join(modpath[0:-1])
+            base = modpath[-1]
+            build_py = self.get_finalized_command('build_py')
+            package_dir = build_py.get_package_dir(package)
+            ext_filename = os.path.join(package_dir,
+                                        self.get_ext_filename(base))
+        else:
+            ext_filename = os.path.join(self.build_lib,
+                                        self.get_ext_filename(fullname))
+        depends = sources + ext.depends
+
+        force_rebuild = self.force
+        if not self.disable_optimization and not self.compiler_opt.is_cached():
+            log.debug("Detected changes on compiler optimizations")
+            force_rebuild = True
+        if not (force_rebuild or newer_group(depends, ext_filename, 'newer')):
+            log.debug("skipping '%s' extension (up-to-date)", ext.name)
+            return
+        else:
+            log.info("building '%s' extension", ext.name)
+
+        extra_args = ext.extra_compile_args or []
+        extra_cflags = getattr(ext, 'extra_c_compile_args', None) or []
+        extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or []
+
+        macros = ext.define_macros[:]
+        for undef in ext.undef_macros:
+            macros.append((undef,))
+
+        c_sources, cxx_sources, f_sources, fmodule_sources = \
+            filter_sources(ext.sources)
+
+        if self.compiler.compiler_type == 'msvc':
+            if cxx_sources:
+                # Needed to compile kiva.agg._agg extension.
+                extra_args.append('/Zm1000')
+                extra_cflags += extra_cxxflags
+            # this hack works around the msvc compiler attributes
+            # problem, msvc uses its own convention :(
+            c_sources += cxx_sources
+            cxx_sources = []
+
+        # Set Fortran/C++ compilers for compilation and linking.
+        if ext.language == 'f90':
+            fcompiler = self._f90_compiler
+        elif ext.language == 'f77':
+            fcompiler = self._f77_compiler
+        else:  # in case ext.language is c++, for instance
+            fcompiler = self._f90_compiler or self._f77_compiler
+        if fcompiler is not None:
+            fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
+                ext, 'extra_f77_compile_args') else []
+            fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
+                ext, 'extra_f90_compile_args') else []
+        cxx_compiler = self._cxx_compiler
+
+        # check for the availability of required compilers
+        if cxx_sources and cxx_compiler is None:
+            raise DistutilsError("extension %r has C++ sources"
+                                 "but no C++ compiler found" % (ext.name))
+        if (f_sources or fmodule_sources) and fcompiler is None:
+            raise DistutilsError("extension %r has Fortran sources "
+                                 "but no Fortran compiler found" % (ext.name))
+        if ext.language in ['f77', 'f90'] and fcompiler is None:
+            self.warn("extension %r has Fortran libraries "
+                      "but no Fortran linker found, using default linker" % (ext.name))
+        if ext.language == 'c++' and cxx_compiler is None:
+            self.warn("extension %r has C++ libraries "
+                      "but no C++ linker found, using default linker" % (ext.name))
+
+        kws = {'depends': ext.depends}
+        output_dir = self.build_temp
+
+        include_dirs = ext.include_dirs + get_numpy_include_dirs()
+
+        # filtering C dispatch-table sources when optimization is not disabled,
+        # otherwise treated as normal sources.
+        copt_c_sources = []
+        copt_cxx_sources = []
+        copt_baseline_flags = []
+        copt_macros = []
+        if not self.disable_optimization:
+            bsrc_dir = self.get_finalized_command("build_src").build_src
+            dispatch_hpath = os.path.join("numpy", "distutils", "include")
+            dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
+            include_dirs.append(dispatch_hpath)
+
+            # copt_build_src = None if self.inplace else bsrc_dir
+            # Always generate the generated config files and
+            # dispatch-able sources inside the build directory,
+            # even if the build option `inplace` is enabled.
+            # This approach prevents conflicts with Meson-generated
+            # config headers. Since `spin build --clean` will not remove
+            # these headers, they might overwrite the generated Meson headers,
+            # causing compatibility issues. Maintaining separate directories
+            # ensures compatibility between distutils dispatch config headers
+            # and Meson headers, avoiding build disruptions.
+            # See gh-24450 for more details.
+            copt_build_src = bsrc_dir
+            for _srcs, _dst, _ext in (
+                ((c_sources,), copt_c_sources, ('.dispatch.c',)),
+                ((c_sources, cxx_sources), copt_cxx_sources,
+                    ('.dispatch.cpp', '.dispatch.cxx'))
+            ):
+                for _src in _srcs:
+                    _dst += [
+                        _src.pop(_src.index(s))
+                        for s in _src[:] if s.endswith(_ext)
+                    ]
+            copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
+        else:
+            copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+
+        c_objects = []
+        if copt_cxx_sources:
+            log.info("compiling C++ dispatch-able sources")
+            c_objects += self.compiler_opt.try_dispatch(
+                copt_cxx_sources,
+                output_dir=output_dir,
+                src_dir=copt_build_src,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=extra_args + extra_cxxflags,
+                ccompiler=cxx_compiler,
+                **kws
+            )
+        if copt_c_sources:
+            log.info("compiling C dispatch-able sources")
+            c_objects += self.compiler_opt.try_dispatch(
+                copt_c_sources,
+                output_dir=output_dir,
+                src_dir=copt_build_src,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=extra_args + extra_cflags,
+                **kws)
+        if c_sources:
+            log.info("compiling C sources")
+            c_objects += self.compiler.compile(
+                c_sources,
+                output_dir=output_dir,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=(extra_args + copt_baseline_flags +
+                                extra_cflags),
+                **kws)
+        if cxx_sources:
+            log.info("compiling C++ sources")
+            c_objects += cxx_compiler.compile(
+                cxx_sources,
+                output_dir=output_dir,
+                macros=macros + copt_macros,
+                include_dirs=include_dirs,
+                debug=self.debug,
+                extra_postargs=(extra_args + copt_baseline_flags +
+                                extra_cxxflags),
+                **kws)
+
+        extra_postargs = []
+        f_objects = []
+        if fmodule_sources:
+            log.info("compiling Fortran 90 module sources")
+            module_dirs = ext.module_dirs[:]
+            module_build_dir = os.path.join(
+                self.build_temp, os.path.dirname(
+                    self.get_ext_filename(fullname)))
+
+            self.mkpath(module_build_dir)
+            if fcompiler.module_dir_switch is None:
+                existing_modules = glob('*.mod')
+            extra_postargs += fcompiler.module_options(
+                module_dirs, module_build_dir)
+            f_objects += fcompiler.compile(fmodule_sources,
+                                           output_dir=self.build_temp,
+                                           macros=macros,
+                                           include_dirs=include_dirs,
+                                           debug=self.debug,
+                                           extra_postargs=extra_postargs,
+                                           depends=ext.depends)
+
+            if fcompiler.module_dir_switch is None:
+                for f in glob('*.mod'):
+                    if f in existing_modules:
+                        continue
+                    t = os.path.join(module_build_dir, f)
+                    if os.path.abspath(f) == os.path.abspath(t):
+                        continue
+                    if os.path.isfile(t):
+                        os.remove(t)
+                    try:
+                        self.move_file(f, module_build_dir)
+                    except DistutilsFileError:
+                        log.warn('failed to move %r to %r' %
+                                 (f, module_build_dir))
+        if f_sources:
+            log.info("compiling Fortran sources")
+            f_objects += fcompiler.compile(f_sources,
+                                           output_dir=self.build_temp,
+                                           macros=macros,
+                                           include_dirs=include_dirs,
+                                           debug=self.debug,
+                                           extra_postargs=extra_postargs,
+                                           depends=ext.depends)
+
+        if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
+            unlinkable_fobjects = f_objects
+            objects = c_objects
+        else:
+            unlinkable_fobjects = []
+            objects = c_objects + f_objects
+
+        if ext.extra_objects:
+            objects.extend(ext.extra_objects)
+        extra_args = ext.extra_link_args or []
+        libraries = self.get_libraries(ext)[:]
+        library_dirs = ext.library_dirs[:]
+
+        linker = self.compiler.link_shared_object
+        # Always use system linker when using MSVC compiler.
+        if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
+            # expand libraries with fcompiler libraries as we are
+            # not using fcompiler linker
+            self._libs_with_msvc_and_fortran(
+                fcompiler, libraries, library_dirs)
+            if ext.runtime_library_dirs:
+                # gcc adds RPATH to the link. On windows, copy the dll into
+                # self.extra_dll_dir instead.
+                for d in ext.runtime_library_dirs:
+                    for f in glob(d + '/*.dll'):
+                        copy_file(f, self.extra_dll_dir)
+                ext.runtime_library_dirs = []
+
+        elif ext.language in ['f77', 'f90'] and fcompiler is not None:
+            linker = fcompiler.link_shared_object
+        if ext.language == 'c++' and cxx_compiler is not None:
+            linker = cxx_compiler.link_shared_object
+
+        if fcompiler is not None:
+            objects, libraries = self._process_unlinkable_fobjects(
+                    objects, libraries,
+                    fcompiler, library_dirs,
+                    unlinkable_fobjects)
+
+        linker(objects, ext_filename,
+               libraries=libraries,
+               library_dirs=library_dirs,
+               runtime_library_dirs=ext.runtime_library_dirs,
+               extra_postargs=extra_args,
+               export_symbols=self.get_export_symbols(ext),
+               debug=self.debug,
+               build_temp=self.build_temp,
+               target_lang=ext.language)
+
+    def _add_dummy_mingwex_sym(self, c_sources):
+        build_src = self.get_finalized_command("build_src").build_src
+        build_clib = self.get_finalized_command("build_clib").build_clib
+        objects = self.compiler.compile([os.path.join(build_src,
+                                                      "gfortran_vs2003_hack.c")],
+                                        output_dir=self.build_temp)
+        self.compiler.create_static_lib(
+            objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
+
+    def _process_unlinkable_fobjects(self, objects, libraries,
+                                     fcompiler, library_dirs,
+                                     unlinkable_fobjects):
+        libraries = list(libraries)
+        objects = list(objects)
+        unlinkable_fobjects = list(unlinkable_fobjects)
+
+        # Expand possible fake static libraries to objects;
+        # make sure to iterate over a copy of the list as
+        # "fake" libraries will be removed as they are
+        # encountered
+        for lib in libraries[:]:
+            for libdir in library_dirs:
+                fake_lib = os.path.join(libdir, lib + '.fobjects')
+                if os.path.isfile(fake_lib):
+                    # Replace fake static library
+                    libraries.remove(lib)
+                    with open(fake_lib) as f:
+                        unlinkable_fobjects.extend(f.read().splitlines())
+
+                    # Expand C objects
+                    c_lib = os.path.join(libdir, lib + '.cobjects')
+                    with open(c_lib) as f:
+                        objects.extend(f.read().splitlines())
+
+        # Wrap unlinkable objects to a linkable one
+        if unlinkable_fobjects:
+            fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects]
+            wrapped = fcompiler.wrap_unlinkable_objects(
+                    fobjects, output_dir=self.build_temp,
+                    extra_dll_dir=self.extra_dll_dir)
+            objects.extend(wrapped)
+
+        return objects, libraries
+
+    def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
+                                    c_library_dirs):
+        if fcompiler is None:
+            return
+
+        for libname in c_libraries:
+            if libname.startswith('msvc'):
+                continue
+            fileexists = False
+            for libdir in c_library_dirs or []:
+                libfile = os.path.join(libdir, '%s.lib' % (libname))
+                if os.path.isfile(libfile):
+                    fileexists = True
+                    break
+            if fileexists:
+                continue
+            # make g77-compiled static libs available to MSVC
+            fileexists = False
+            for libdir in c_library_dirs:
+                libfile = os.path.join(libdir, 'lib%s.a' % (libname))
+                if os.path.isfile(libfile):
+                    # copy libname.a file to name.lib so that MSVC linker
+                    # can find it
+                    libfile2 = os.path.join(self.build_temp, libname + '.lib')
+                    copy_file(libfile, libfile2)
+                    if self.build_temp not in c_library_dirs:
+                        c_library_dirs.append(self.build_temp)
+                    fileexists = True
+                    break
+            if fileexists:
+                continue
+            log.warn('could not find library %r in directories %s'
+                     % (libname, c_library_dirs))
+
+        # Always use system linker when using MSVC compiler.
+        f_lib_dirs = []
+        for dir in fcompiler.library_dirs:
+            # correct path when compiling in Cygwin but with normal Win
+            # Python
+            if dir.startswith('/usr/lib'):
+                try:
+                    dir = subprocess.check_output(['cygpath', '-w', dir])
+                except (OSError, subprocess.CalledProcessError):
+                    pass
+                else:
+                    dir = filepath_from_subprocess_output(dir)
+            f_lib_dirs.append(dir)
+        c_library_dirs.extend(f_lib_dirs)
+
+        # make g77-compiled static libs available to MSVC
+        for lib in fcompiler.libraries:
+            if not lib.startswith('msvc'):
+                c_libraries.append(lib)
+                p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
+                if p:
+                    dst_name = os.path.join(self.build_temp, lib + '.lib')
+                    if not os.path.isfile(dst_name):
+                        copy_file(p[0], dst_name)
+                    if self.build_temp not in c_library_dirs:
+                        c_library_dirs.append(self.build_temp)
+
+    def get_source_files(self):
+        self.check_extensions_list(self.extensions)
+        filenames = []
+        for ext in self.extensions:
+            filenames.extend(get_ext_source_files(ext))
+        return filenames
+
+    def get_outputs(self):
+        self.check_extensions_list(self.extensions)
+
+        outputs = []
+        for ext in self.extensions:
+            if not ext.sources:
+                continue
+            fullname = self.get_ext_fullname(ext.name)
+            outputs.append(os.path.join(self.build_lib,
+                                        self.get_ext_filename(fullname)))
+        return outputs
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_py.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_py.py
new file mode 100644
index 00000000..d30dc5bf
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_py.py
@@ -0,0 +1,31 @@
+from distutils.command.build_py import build_py as old_build_py
+from numpy.distutils.misc_util import is_string
+
+class build_py(old_build_py):
+
+    def run(self):
+        build_src = self.get_finalized_command('build_src')
+        if build_src.py_modules_dict and self.packages is None:
+            self.packages = list(build_src.py_modules_dict.keys ())
+        old_build_py.run(self)
+
+    def find_package_modules(self, package, package_dir):
+        modules = old_build_py.find_package_modules(self, package, package_dir)
+
+        # Find build_src generated *.py files.
+        build_src = self.get_finalized_command('build_src')
+        modules += build_src.py_modules_dict.get(package, [])
+
+        return modules
+
+    def find_modules(self):
+        old_py_modules = self.py_modules[:]
+        new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
+        self.py_modules[:] = new_py_modules
+        modules = old_build_py.find_modules(self)
+        self.py_modules[:] = old_py_modules
+
+        return modules
+
+    # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
+    # and item[2] is source file.
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py
new file mode 100644
index 00000000..d5cadb27
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py
@@ -0,0 +1,49 @@
+""" Modified version of build_scripts that handles building scripts from functions.
+
+"""
+from distutils.command.build_scripts import build_scripts as old_build_scripts
+from numpy.distutils import log
+from numpy.distutils.misc_util import is_string
+
+class build_scripts(old_build_scripts):
+
+    def generate_scripts(self, scripts):
+        new_scripts = []
+        func_scripts = []
+        for script in scripts:
+            if is_string(script):
+                new_scripts.append(script)
+            else:
+                func_scripts.append(script)
+        if not func_scripts:
+            return new_scripts
+
+        build_dir = self.build_dir
+        self.mkpath(build_dir)
+        for func in func_scripts:
+            script = func(build_dir)
+            if not script:
+                continue
+            if is_string(script):
+                log.info("  adding '%s' to scripts" % (script,))
+                new_scripts.append(script)
+            else:
+                [log.info("  adding '%s' to scripts" % (s,)) for s in script]
+                new_scripts.extend(list(script))
+        return new_scripts
+
+    def run (self):
+        if not self.scripts:
+            return
+
+        self.scripts = self.generate_scripts(self.scripts)
+        # Now make sure that the distribution object has this list of scripts.
+        # setuptools' develop command requires that this be a list of filenames,
+        # not functions.
+        self.distribution.scripts = self.scripts
+
+        return old_build_scripts.run(self)
+
+    def get_source_files(self):
+        from numpy.distutils.misc_util import get_script_files
+        return get_script_files(self.scripts)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_src.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_src.py
new file mode 100644
index 00000000..7303db12
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/build_src.py
@@ -0,0 +1,773 @@
+""" Build swig and f2py sources.
+"""
+import os
+import re
+import sys
+import shlex
+import copy
+
+from distutils.command import build_ext
+from distutils.dep_util import newer_group, newer
+from distutils.util import get_platform
+from distutils.errors import DistutilsError, DistutilsSetupError
+
+
+# this import can't be done here, as it uses numpy stuff only available
+# after it's installed
+#import numpy.f2py
+from numpy.distutils import log
+from numpy.distutils.misc_util import (
+    fortran_ext_match, appendpath, is_string, is_sequence, get_cmd
+    )
+from numpy.distutils.from_template import process_file as process_f_file
+from numpy.distutils.conv_template import process_file as process_c_file
+
+def subst_vars(target, source, d):
+    """Substitute any occurrence of @foo@ by d['foo'] from source file into
+    target."""
+    var = re.compile('@([a-zA-Z_]+)@')
+    with open(source, 'r') as fs:
+        with open(target, 'w') as ft:
+            for l in fs:
+                m = var.search(l)
+                if m:
+                    ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
+                else:
+                    ft.write(l)
+
+class build_src(build_ext.build_ext):
+
+    description = "build sources from SWIG, F2PY files or a function"
+
+    user_options = [
+        ('build-src=', 'd', "directory to \"build\" sources to"),
+        ('f2py-opts=', None, "list of f2py command line options"),
+        ('swig=', None, "path to the SWIG executable"),
+        ('swig-opts=', None, "list of SWIG command line options"),
+        ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
+        ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
+        ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
+        ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+        ('inplace', 'i',
+         "ignore build-lib and put compiled extensions into the source " +
+         "directory alongside your pure Python modules"),
+        ('verbose-cfg', None,
+         "change logging level from WARN to INFO which will show all " +
+         "compiler output")
+        ]
+
+    boolean_options = ['force', 'inplace', 'verbose-cfg']
+
+    help_options = []
+
+    def initialize_options(self):
+        self.extensions = None
+        self.package = None
+        self.py_modules = None
+        self.py_modules_dict = None
+        self.build_src = None
+        self.build_lib = None
+        self.build_base = None
+        self.force = None
+        self.inplace = None
+        self.package_dir = None
+        self.f2pyflags = None # obsolete
+        self.f2py_opts = None
+        self.swigflags = None # obsolete
+        self.swig_opts = None
+        self.swig_cpp = None
+        self.swig = None
+        self.verbose_cfg = None
+
+    def finalize_options(self):
+        self.set_undefined_options('build',
+                                   ('build_base', 'build_base'),
+                                   ('build_lib', 'build_lib'),
+                                   ('force', 'force'))
+        if self.package is None:
+            self.package = self.distribution.ext_package
+        self.extensions = self.distribution.ext_modules
+        self.libraries = self.distribution.libraries or []
+        self.py_modules = self.distribution.py_modules or []
+        self.data_files = self.distribution.data_files or []
+
+        if self.build_src is None:
+            plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
+            self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
+
+        # py_modules_dict is used in build_py.find_package_modules
+        self.py_modules_dict = {}
+
+        if self.f2pyflags:
+            if self.f2py_opts:
+                log.warn('ignoring --f2pyflags as --f2py-opts already used')
+            else:
+                self.f2py_opts = self.f2pyflags
+            self.f2pyflags = None
+        if self.f2py_opts is None:
+            self.f2py_opts = []
+        else:
+            self.f2py_opts = shlex.split(self.f2py_opts)
+
+        if self.swigflags:
+            if self.swig_opts:
+                log.warn('ignoring --swigflags as --swig-opts already used')
+            else:
+                self.swig_opts = self.swigflags
+            self.swigflags = None
+
+        if self.swig_opts is None:
+            self.swig_opts = []
+        else:
+            self.swig_opts = shlex.split(self.swig_opts)
+
+        # use options from build_ext command
+        build_ext = self.get_finalized_command('build_ext')
+        if self.inplace is None:
+            self.inplace = build_ext.inplace
+        if self.swig_cpp is None:
+            self.swig_cpp = build_ext.swig_cpp
+        for c in ['swig', 'swig_opt']:
+            o = '--'+c.replace('_', '-')
+            v = getattr(build_ext, c, None)
+            if v:
+                if getattr(self, c):
+                    log.warn('both build_src and build_ext define %s option' % (o))
+                else:
+                    log.info('using "%s=%s" option from build_ext command' % (o, v))
+                    setattr(self, c, v)
+
+    def run(self):
+        log.info("build_src")
+        if not (self.extensions or self.libraries):
+            return
+        self.build_sources()
+
+    def build_sources(self):
+
+        if self.inplace:
+            self.get_package_dir = \
+                     self.get_finalized_command('build_py').get_package_dir
+
+        self.build_py_modules_sources()
+
+        for libname_info in self.libraries:
+            self.build_library_sources(*libname_info)
+
+        if self.extensions:
+            self.check_extensions_list(self.extensions)
+
+            for ext in self.extensions:
+                self.build_extension_sources(ext)
+
+        self.build_data_files_sources()
+        self.build_npy_pkg_config()
+
+    def build_data_files_sources(self):
+        if not self.data_files:
+            return
+        log.info('building data_files sources')
+        from numpy.distutils.misc_util import get_data_files
+        new_data_files = []
+        for data in self.data_files:
+            if isinstance(data, str):
+                new_data_files.append(data)
+            elif isinstance(data, tuple):
+                d, files = data
+                if self.inplace:
+                    build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
+                else:
+                    build_dir = os.path.join(self.build_src, d)
+                funcs = [f for f in files if hasattr(f, '__call__')]
+                files = [f for f in files if not hasattr(f, '__call__')]
+                for f in funcs:
+                    if f.__code__.co_argcount==1:
+                        s = f(build_dir)
+                    else:
+                        s = f()
+                    if s is not None:
+                        if isinstance(s, list):
+                            files.extend(s)
+                        elif isinstance(s, str):
+                            files.append(s)
+                        else:
+                            raise TypeError(repr(s))
+                filenames = get_data_files((d, files))
+                new_data_files.append((d, filenames))
+            else:
+                raise TypeError(repr(data))
+        self.data_files[:] = new_data_files
+
+
+    def _build_npy_pkg_config(self, info, gd):
+        template, install_dir, subst_dict = info
+        template_dir = os.path.dirname(template)
+        for k, v in gd.items():
+            subst_dict[k] = v
+
+        if self.inplace == 1:
+            generated_dir = os.path.join(template_dir, install_dir)
+        else:
+            generated_dir = os.path.join(self.build_src, template_dir,
+                    install_dir)
+        generated = os.path.basename(os.path.splitext(template)[0])
+        generated_path = os.path.join(generated_dir, generated)
+        if not os.path.exists(generated_dir):
+            os.makedirs(generated_dir)
+
+        subst_vars(generated_path, template, subst_dict)
+
+        # Where to install relatively to install prefix
+        full_install_dir = os.path.join(template_dir, install_dir)
+        return full_install_dir, generated_path
+
+    def build_npy_pkg_config(self):
+        log.info('build_src: building npy-pkg config files')
+
+        # XXX: another ugly workaround to circumvent distutils brain damage. We
+        # need the install prefix here, but finalizing the options of the
+        # install command when only building sources cause error. Instead, we
+        # copy the install command instance, and finalize the copy so that it
+        # does not disrupt how distutils want to do things when with the
+        # original install command instance.
+        install_cmd = copy.copy(get_cmd('install'))
+        if not install_cmd.finalized == 1:
+            install_cmd.finalize_options()
+        build_npkg = False
+        if self.inplace == 1:
+            top_prefix = '.'
+            build_npkg = True
+        elif hasattr(install_cmd, 'install_libbase'):
+            top_prefix = install_cmd.install_libbase
+            build_npkg = True
+
+        if build_npkg:
+            for pkg, infos in self.distribution.installed_pkg_config.items():
+                pkg_path = self.distribution.package_dir[pkg]
+                prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
+                d = {'prefix': prefix}
+                for info in infos:
+                    install_dir, generated = self._build_npy_pkg_config(info, d)
+                    self.distribution.data_files.append((install_dir,
+                        [generated]))
+
+    def build_py_modules_sources(self):
+        if not self.py_modules:
+            return
+        log.info('building py_modules sources')
+        new_py_modules = []
+        for source in self.py_modules:
+            if is_sequence(source) and len(source)==3:
+                package, module_base, source = source
+                if self.inplace:
+                    build_dir = self.get_package_dir(package)
+                else:
+                    build_dir = os.path.join(self.build_src,
+                                             os.path.join(*package.split('.')))
+                if hasattr(source, '__call__'):
+                    target = os.path.join(build_dir, module_base + '.py')
+                    source = source(target)
+                if source is None:
+                    continue
+                modules = [(package, module_base, source)]
+                if package not in self.py_modules_dict:
+                    self.py_modules_dict[package] = []
+                self.py_modules_dict[package] += modules
+            else:
+                new_py_modules.append(source)
+        self.py_modules[:] = new_py_modules
+
+    def build_library_sources(self, lib_name, build_info):
+        sources = list(build_info.get('sources', []))
+
+        if not sources:
+            return
+
+        log.info('building library "%s" sources' % (lib_name))
+
+        sources = self.generate_sources(sources, (lib_name, build_info))
+
+        sources = self.template_sources(sources, (lib_name, build_info))
+
+        sources, h_files = self.filter_h_files(sources)
+
+        if h_files:
+            log.info('%s - nothing done with h_files = %s',
+                     self.package, h_files)
+
+        #for f in h_files:
+        #    self.distribution.headers.append((lib_name,f))
+
+        build_info['sources'] = sources
+        return
+
+    def build_extension_sources(self, ext):
+
+        sources = list(ext.sources)
+
+        log.info('building extension "%s" sources' % (ext.name))
+
+        fullname = self.get_ext_fullname(ext.name)
+
+        modpath = fullname.split('.')
+        package = '.'.join(modpath[0:-1])
+
+        if self.inplace:
+            self.ext_target_dir = self.get_package_dir(package)
+
+        sources = self.generate_sources(sources, ext)
+        sources = self.template_sources(sources, ext)
+        sources = self.swig_sources(sources, ext)
+        sources = self.f2py_sources(sources, ext)
+        sources = self.pyrex_sources(sources, ext)
+
+        sources, py_files = self.filter_py_files(sources)
+
+        if package not in self.py_modules_dict:
+            self.py_modules_dict[package] = []
+        modules = []
+        for f in py_files:
+            module = os.path.splitext(os.path.basename(f))[0]
+            modules.append((package, module, f))
+        self.py_modules_dict[package] += modules
+
+        sources, h_files = self.filter_h_files(sources)
+
+        if h_files:
+            log.info('%s - nothing done with h_files = %s',
+                     package, h_files)
+        #for f in h_files:
+        #    self.distribution.headers.append((package,f))
+
+        ext.sources = sources
+
+    def generate_sources(self, sources, extension):
+        new_sources = []
+        func_sources = []
+        for source in sources:
+            if is_string(source):
+                new_sources.append(source)
+            else:
+                func_sources.append(source)
+        if not func_sources:
+            return new_sources
+        if self.inplace and not is_sequence(extension):
+            build_dir = self.ext_target_dir
+        else:
+            if is_sequence(extension):
+                name = extension[0]
+            #    if 'include_dirs' not in extension[1]:
+            #        extension[1]['include_dirs'] = []
+            #    incl_dirs = extension[1]['include_dirs']
+            else:
+                name = extension.name
+            #    incl_dirs = extension.include_dirs
+            #if self.build_src not in incl_dirs:
+            #    incl_dirs.append(self.build_src)
+            build_dir = os.path.join(*([self.build_src]
+                                       +name.split('.')[:-1]))
+        self.mkpath(build_dir)
+
+        if self.verbose_cfg:
+            new_level = log.INFO
+        else:
+            new_level = log.WARN
+        old_level = log.set_threshold(new_level)
+
+        for func in func_sources:
+            source = func(extension, build_dir)
+            if not source:
+                continue
+            if is_sequence(source):
+                [log.info("  adding '%s' to sources." % (s,)) for s in source]
+                new_sources.extend(source)
+            else:
+                log.info("  adding '%s' to sources." % (source,))
+                new_sources.append(source)
+        log.set_threshold(old_level)
+        return new_sources
+
+    def filter_py_files(self, sources):
+        return self.filter_files(sources, ['.py'])
+
+    def filter_h_files(self, sources):
+        return self.filter_files(sources, ['.h', '.hpp', '.inc'])
+
+    def filter_files(self, sources, exts = []):
+        new_sources = []
+        files = []
+        for source in sources:
+            (base, ext) = os.path.splitext(source)
+            if ext in exts:
+                files.append(source)
+            else:
+                new_sources.append(source)
+        return new_sources, files
+
+    def template_sources(self, sources, extension):
+        new_sources = []
+        if is_sequence(extension):
+            depends = extension[1].get('depends')
+            include_dirs = extension[1].get('include_dirs')
+        else:
+            depends = extension.depends
+            include_dirs = extension.include_dirs
+        for source in sources:
+            (base, ext) = os.path.splitext(source)
+            if ext == '.src':  # Template file
+                if self.inplace:
+                    target_dir = os.path.dirname(base)
+                else:
+                    target_dir = appendpath(self.build_src, os.path.dirname(base))
+                self.mkpath(target_dir)
+                target_file = os.path.join(target_dir, os.path.basename(base))
+                if (self.force or newer_group([source] + depends, target_file)):
+                    if _f_pyf_ext_match(base):
+                        log.info("from_template:> %s" % (target_file))
+                        outstr = process_f_file(source)
+                    else:
+                        log.info("conv_template:> %s" % (target_file))
+                        outstr = process_c_file(source)
+                    with open(target_file, 'w') as fid:
+                        fid.write(outstr)
+                if _header_ext_match(target_file):
+                    d = os.path.dirname(target_file)
+                    if d not in include_dirs:
+                        log.info("  adding '%s' to include_dirs." % (d))
+                        include_dirs.append(d)
+                new_sources.append(target_file)
+            else:
+                new_sources.append(source)
+        return new_sources
+
+    def pyrex_sources(self, sources, extension):
+        """Pyrex not supported; this remains for Cython support (see below)"""
+        new_sources = []
+        ext_name = extension.name.split('.')[-1]
+        for source in sources:
+            (base, ext) = os.path.splitext(source)
+            if ext == '.pyx':
+                target_file = self.generate_a_pyrex_source(base, ext_name,
+                                                           source,
+                                                           extension)
+                new_sources.append(target_file)
+            else:
+                new_sources.append(source)
+        return new_sources
+
+    def generate_a_pyrex_source(self, base, ext_name, source, extension):
+        """Pyrex is not supported, but some projects monkeypatch this method.
+
+        That allows compiling Cython code, see gh-6955.
+        This method will remain here for compatibility reasons.
+        """
+        return []
+
+    def f2py_sources(self, sources, extension):
+        new_sources = []
+        f2py_sources = []
+        f_sources = []
+        f2py_targets = {}
+        target_dirs = []
+        ext_name = extension.name.split('.')[-1]
+        skip_f2py = 0
+
+        for source in sources:
+            (base, ext) = os.path.splitext(source)
+            if ext == '.pyf': # F2PY interface file
+                if self.inplace:
+                    target_dir = os.path.dirname(base)
+                else:
+                    target_dir = appendpath(self.build_src, os.path.dirname(base))
+                if os.path.isfile(source):
+                    name = get_f2py_modulename(source)
+                    if name != ext_name:
+                        raise DistutilsSetupError('mismatch of extension names: %s '
+                                                  'provides %r but expected %r' % (
+                            source, name, ext_name))
+                    target_file = os.path.join(target_dir, name+'module.c')
+                else:
+                    log.debug('  source %s does not exist: skipping f2py\'ing.' \
+                              % (source))
+                    name = ext_name
+                    skip_f2py = 1
+                    target_file = os.path.join(target_dir, name+'module.c')
+                    if not os.path.isfile(target_file):
+                        log.warn('  target %s does not exist:\n   '\
+                                 'Assuming %smodule.c was generated with '\
+                                 '"build_src --inplace" command.' \
+                                 % (target_file, name))
+                        target_dir = os.path.dirname(base)
+                        target_file = os.path.join(target_dir, name+'module.c')
+                        if not os.path.isfile(target_file):
+                            raise DistutilsSetupError("%r missing" % (target_file,))
+                        log.info('   Yes! Using %r as up-to-date target.' \
+                                 % (target_file))
+                target_dirs.append(target_dir)
+                f2py_sources.append(source)
+                f2py_targets[source] = target_file
+                new_sources.append(target_file)
+            elif fortran_ext_match(ext):
+                f_sources.append(source)
+            else:
+                new_sources.append(source)
+
+        if not (f2py_sources or f_sources):
+            return new_sources
+
+        for d in target_dirs:
+            self.mkpath(d)
+
+        f2py_options = extension.f2py_options + self.f2py_opts
+
+        if self.distribution.libraries:
+            for name, build_info in self.distribution.libraries:
+                if name in extension.libraries:
+                    f2py_options.extend(build_info.get('f2py_options', []))
+
+        log.info("f2py options: %s" % (f2py_options))
+
+        if f2py_sources:
+            if len(f2py_sources) != 1:
+                raise DistutilsSetupError(
+                    'only one .pyf file is allowed per extension module but got'\
+                    ' more: %r' % (f2py_sources,))
+            source = f2py_sources[0]
+            target_file = f2py_targets[source]
+            target_dir = os.path.dirname(target_file) or '.'
+            depends = [source] + extension.depends
+            if (self.force or newer_group(depends, target_file, 'newer')) \
+                   and not skip_f2py:
+                log.info("f2py: %s" % (source))
+                from numpy.f2py import f2py2e
+                f2py2e.run_main(f2py_options
+                                    + ['--build-dir', target_dir, source])
+            else:
+                log.debug("  skipping '%s' f2py interface (up-to-date)" % (source))
+        else:
+            #XXX TODO: --inplace support for sdist command
+            if is_sequence(extension):
+                name = extension[0]
+            else: name = extension.name
+            target_dir = os.path.join(*([self.build_src]
+                                        +name.split('.')[:-1]))
+            target_file = os.path.join(target_dir, ext_name + 'module.c')
+            new_sources.append(target_file)
+            depends = f_sources + extension.depends
+            if (self.force or newer_group(depends, target_file, 'newer')) \
+                   and not skip_f2py:
+                log.info("f2py:> %s" % (target_file))
+                self.mkpath(target_dir)
+                from numpy.f2py import f2py2e
+                f2py2e.run_main(f2py_options + ['--lower',
+                                                '--build-dir', target_dir]+\
+                                ['-m', ext_name]+f_sources)
+            else:
+                log.debug("  skipping f2py fortran files for '%s' (up-to-date)"\
+                          % (target_file))
+
+        if not os.path.isfile(target_file):
+            raise DistutilsError("f2py target file %r not generated" % (target_file,))
+
+        build_dir = os.path.join(self.build_src, target_dir)
+        target_c = os.path.join(build_dir, 'fortranobject.c')
+        target_h = os.path.join(build_dir, 'fortranobject.h')
+        log.info("  adding '%s' to sources." % (target_c))
+        new_sources.append(target_c)
+        if build_dir not in extension.include_dirs:
+            log.info("  adding '%s' to include_dirs." % (build_dir))
+            extension.include_dirs.append(build_dir)
+
+        if not skip_f2py:
+            import numpy.f2py
+            d = os.path.dirname(numpy.f2py.__file__)
+            source_c = os.path.join(d, 'src', 'fortranobject.c')
+            source_h = os.path.join(d, 'src', 'fortranobject.h')
+            if newer(source_c, target_c) or newer(source_h, target_h):
+                self.mkpath(os.path.dirname(target_c))
+                self.copy_file(source_c, target_c)
+                self.copy_file(source_h, target_h)
+        else:
+            if not os.path.isfile(target_c):
+                raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
+            if not os.path.isfile(target_h):
+                raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
+
+        for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
+            filename = os.path.join(target_dir, ext_name + name_ext)
+            if os.path.isfile(filename):
+                log.info("  adding '%s' to sources." % (filename))
+                f_sources.append(filename)
+
+        return new_sources + f_sources
+
+    def swig_sources(self, sources, extension):
+        # Assuming SWIG 1.3.14 or later. See compatibility note in
+        #   http://www.swig.org/Doc1.3/Python.html#Python_nn6
+
+        new_sources = []
+        swig_sources = []
+        swig_targets = {}
+        target_dirs = []
+        py_files = []     # swig generated .py files
+        target_ext = '.c'
+        if '-c++' in extension.swig_opts:
+            typ = 'c++'
+            is_cpp = True
+            extension.swig_opts.remove('-c++')
+        elif self.swig_cpp:
+            typ = 'c++'
+            is_cpp = True
+        else:
+            typ = None
+            is_cpp = False
+        skip_swig = 0
+        ext_name = extension.name.split('.')[-1]
+
+        for source in sources:
+            (base, ext) = os.path.splitext(source)
+            if ext == '.i': # SWIG interface file
+                # the code below assumes that the sources list
+                # contains not more than one .i SWIG interface file
+                if self.inplace:
+                    target_dir = os.path.dirname(base)
+                    py_target_dir = self.ext_target_dir
+                else:
+                    target_dir = appendpath(self.build_src, os.path.dirname(base))
+                    py_target_dir = target_dir
+                if os.path.isfile(source):
+                    name = get_swig_modulename(source)
+                    if name != ext_name[1:]:
+                        raise DistutilsSetupError(
+                            'mismatch of extension names: %s provides %r'
+                            ' but expected %r' % (source, name, ext_name[1:]))
+                    if typ is None:
+                        typ = get_swig_target(source)
+                        is_cpp = typ=='c++'
+                    else:
+                        typ2 = get_swig_target(source)
+                        if typ2 is None:
+                            log.warn('source %r does not define swig target, assuming %s swig target' \
+                                     % (source, typ))
+                        elif typ!=typ2:
+                            log.warn('expected %r but source %r defines %r swig target' \
+                                     % (typ, source, typ2))
+                            if typ2=='c++':
+                                log.warn('resetting swig target to c++ (some targets may have .c extension)')
+                                is_cpp = True
+                            else:
+                                log.warn('assuming that %r has c++ swig target' % (source))
+                    if is_cpp:
+                        target_ext = '.cpp'
+                    target_file = os.path.join(target_dir, '%s_wrap%s' \
+                                               % (name, target_ext))
+                else:
+                    log.warn('  source %s does not exist: skipping swig\'ing.' \
+                             % (source))
+                    name = ext_name[1:]
+                    skip_swig = 1
+                    target_file = _find_swig_target(target_dir, name)
+                    if not os.path.isfile(target_file):
+                        log.warn('  target %s does not exist:\n   '\
+                                 'Assuming %s_wrap.{c,cpp} was generated with '\
+                                 '"build_src --inplace" command.' \
+                                 % (target_file, name))
+                        target_dir = os.path.dirname(base)
+                        target_file = _find_swig_target(target_dir, name)
+                        if not os.path.isfile(target_file):
+                            raise DistutilsSetupError("%r missing" % (target_file,))
+                        log.warn('   Yes! Using %r as up-to-date target.' \
+                                 % (target_file))
+                target_dirs.append(target_dir)
+                new_sources.append(target_file)
+                py_files.append(os.path.join(py_target_dir, name+'.py'))
+                swig_sources.append(source)
+                swig_targets[source] = new_sources[-1]
+            else:
+                new_sources.append(source)
+
+        if not swig_sources:
+            return new_sources
+
+        if skip_swig:
+            return new_sources + py_files
+
+        for d in target_dirs:
+            self.mkpath(d)
+
+        swig = self.swig or self.find_swig()
+        swig_cmd = [swig, "-python"] + extension.swig_opts
+        if is_cpp:
+            swig_cmd.append('-c++')
+        for d in extension.include_dirs:
+            swig_cmd.append('-I'+d)
+        for source in swig_sources:
+            target = swig_targets[source]
+            depends = [source] + extension.depends
+            if self.force or newer_group(depends, target, 'newer'):
+                log.info("%s: %s" % (os.path.basename(swig) \
+                                     + (is_cpp and '++' or ''), source))
+                self.spawn(swig_cmd + self.swig_opts \
+                           + ["-o", target, '-outdir', py_target_dir, source])
+            else:
+                log.debug("  skipping '%s' swig interface (up-to-date)" \
+                         % (source))
+
+        return new_sources + py_files
+
+_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
+_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match
+
+#### SWIG related auxiliary functions ####
+_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P[\w_]+)".*\)|)\s*(?P[\w_]+)',
+                                     re.I).match
+_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search
+_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search
+
+def get_swig_target(source):
+    with open(source) as f:
+        result = None
+        line = f.readline()
+        if _has_cpp_header(line):
+            result = 'c++'
+        if _has_c_header(line):
+            result = 'c'
+    return result
+
+def get_swig_modulename(source):
+    with open(source) as f:
+        name = None
+        for line in f:
+            m = _swig_module_name_match(line)
+            if m:
+                name = m.group('name')
+                break
+    return name
+
+def _find_swig_target(target_dir, name):
+    for ext in ['.cpp', '.c']:
+        target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
+        if os.path.isfile(target):
+            break
+    return target
+
+#### F2PY related auxiliary functions ####
+
+_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)',
+                                     re.I).match
+_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?'
+                                          r'__user__[\w_]*)', re.I).match
+
+def get_f2py_modulename(source):
+    name = None
+    with open(source) as f:
+        for line in f:
+            m = _f2py_module_name_match(line)
+            if m:
+                if _f2py_user_module_name_match(line): # skip *__user__* names
+                    continue
+                name = m.group('name')
+                break
+    return name
+
+##########################################
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config.py
new file mode 100644
index 00000000..fdb650d3
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config.py
@@ -0,0 +1,516 @@
+# Added Fortran compiler support to config. Currently useful only for
+# try_compile call. try_run works but is untested for most of Fortran
+# compilers (they must define linker_exe first).
+# Pearu Peterson
+import os
+import signal
+import subprocess
+import sys
+import textwrap
+import warnings
+
+from distutils.command.config import config as old_config
+from distutils.command.config import LANG_EXT
+from distutils import log
+from distutils.file_util import copy_file
+from distutils.ccompiler import CompileError, LinkError
+import distutils
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.mingw32ccompiler import generate_manifest
+from numpy.distutils.command.autodist import (check_gcc_function_attribute,
+                                              check_gcc_function_attribute_with_intrinsics,
+                                              check_gcc_variable_attribute,
+                                              check_gcc_version_at_least,
+                                              check_inline,
+                                              check_restrict,
+                                              check_compiler_gcc)
+
+LANG_EXT['f77'] = '.f'
+LANG_EXT['f90'] = '.f90'
+
+class config(old_config):
+    old_config.user_options += [
+        ('fcompiler=', None, "specify the Fortran compiler type"),
+        ]
+
+    def initialize_options(self):
+        self.fcompiler = None
+        old_config.initialize_options(self)
+
+    def _check_compiler (self):
+        old_config._check_compiler(self)
+        from numpy.distutils.fcompiler import FCompiler, new_fcompiler
+
+        if sys.platform == 'win32' and (self.compiler.compiler_type in
+                                        ('msvc', 'intelw', 'intelemw')):
+            # XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
+            # initialize call query_vcvarsall, which throws an IOError, and
+            # causes an error along the way without much information. We try to
+            # catch it here, hoping it is early enough, and print a helpful
+            # message instead of Error: None.
+            if not self.compiler.initialized:
+                try:
+                    self.compiler.initialize()
+                except IOError as e:
+                    msg = textwrap.dedent("""\
+                        Could not initialize compiler instance: do you have Visual Studio
+                        installed?  If you are trying to build with MinGW, please use "python setup.py
+                        build -c mingw32" instead.  If you have Visual Studio installed, check it is
+                        correctly installed, and the right version (VS 2015 as of this writing).
+
+                        Original exception was: %s, and the Compiler class was %s
+                        ============================================================================""") \
+                        % (e, self.compiler.__class__.__name__)
+                    print(textwrap.dedent("""\
+                        ============================================================================"""))
+                    raise distutils.errors.DistutilsPlatformError(msg) from e
+
+            # After MSVC is initialized, add an explicit /MANIFEST to linker
+            # flags.  See issues gh-4245 and gh-4101 for details.  Also
+            # relevant are issues 4431 and 16296 on the Python bug tracker.
+            from distutils import msvc9compiler
+            if msvc9compiler.get_build_version() >= 10:
+                for ldflags in [self.compiler.ldflags_shared,
+                                self.compiler.ldflags_shared_debug]:
+                    if '/MANIFEST' not in ldflags:
+                        ldflags.append('/MANIFEST')
+
+        if not isinstance(self.fcompiler, FCompiler):
+            self.fcompiler = new_fcompiler(compiler=self.fcompiler,
+                                           dry_run=self.dry_run, force=1,
+                                           c_compiler=self.compiler)
+            if self.fcompiler is not None:
+                self.fcompiler.customize(self.distribution)
+                if self.fcompiler.get_version():
+                    self.fcompiler.customize_cmd(self)
+                    self.fcompiler.show_customization()
+
+    def _wrap_method(self, mth, lang, args):
+        from distutils.ccompiler import CompileError
+        from distutils.errors import DistutilsExecError
+        save_compiler = self.compiler
+        if lang in ['f77', 'f90']:
+            self.compiler = self.fcompiler
+        if self.compiler is None:
+            raise CompileError('%s compiler is not set' % (lang,))
+        try:
+            ret = mth(*((self,)+args))
+        except (DistutilsExecError, CompileError) as e:
+            self.compiler = save_compiler
+            raise CompileError from e
+        self.compiler = save_compiler
+        return ret
+
+    def _compile (self, body, headers, include_dirs, lang):
+        src, obj = self._wrap_method(old_config._compile, lang,
+                                     (body, headers, include_dirs, lang))
+        # _compile in unixcompiler.py sometimes creates .d dependency files.
+        # Clean them up.
+        self.temp_files.append(obj + '.d')
+        return src, obj
+
+    def _link (self, body,
+               headers, include_dirs,
+               libraries, library_dirs, lang):
+        if self.compiler.compiler_type=='msvc':
+            libraries = (libraries or [])[:]
+            library_dirs = (library_dirs or [])[:]
+            if lang in ['f77', 'f90']:
+                lang = 'c' # always use system linker when using MSVC compiler
+                if self.fcompiler:
+                    for d in self.fcompiler.library_dirs or []:
+                        # correct path when compiling in Cygwin but with
+                        # normal Win Python
+                        if d.startswith('/usr/lib'):
+                            try:
+                                d = subprocess.check_output(['cygpath',
+                                                             '-w', d])
+                            except (OSError, subprocess.CalledProcessError):
+                                pass
+                            else:
+                                d = filepath_from_subprocess_output(d)
+                        library_dirs.append(d)
+                    for libname in self.fcompiler.libraries or []:
+                        if libname not in libraries:
+                            libraries.append(libname)
+            for libname in libraries:
+                if libname.startswith('msvc'): continue
+                fileexists = False
+                for libdir in library_dirs or []:
+                    libfile = os.path.join(libdir, '%s.lib' % (libname))
+                    if os.path.isfile(libfile):
+                        fileexists = True
+                        break
+                if fileexists: continue
+                # make g77-compiled static libs available to MSVC
+                fileexists = False
+                for libdir in library_dirs:
+                    libfile = os.path.join(libdir, 'lib%s.a' % (libname))
+                    if os.path.isfile(libfile):
+                        # copy libname.a file to name.lib so that MSVC linker
+                        # can find it
+                        libfile2 = os.path.join(libdir, '%s.lib' % (libname))
+                        copy_file(libfile, libfile2)
+                        self.temp_files.append(libfile2)
+                        fileexists = True
+                        break
+                if fileexists: continue
+                log.warn('could not find library %r in directories %s' \
+                         % (libname, library_dirs))
+        elif self.compiler.compiler_type == 'mingw32':
+            generate_manifest(self)
+        return self._wrap_method(old_config._link, lang,
+                                 (body, headers, include_dirs,
+                                  libraries, library_dirs, lang))
+
+    def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
+        self._check_compiler()
+        return self.try_compile(
+                "/* we need a dummy line to make distutils happy */",
+                [header], include_dirs)
+
+    def check_decl(self, symbol,
+                   headers=None, include_dirs=None):
+        self._check_compiler()
+        body = textwrap.dedent("""
+            int main(void)
+            {
+            #ifndef %s
+                (void) %s;
+            #endif
+                ;
+                return 0;
+            }""") % (symbol, symbol)
+
+        return self.try_compile(body, headers, include_dirs)
+
+    def check_macro_true(self, symbol,
+                         headers=None, include_dirs=None):
+        self._check_compiler()
+        body = textwrap.dedent("""
+            int main(void)
+            {
+            #if %s
+            #else
+            #error false or undefined macro
+            #endif
+                ;
+                return 0;
+            }""") % (symbol,)
+
+        return self.try_compile(body, headers, include_dirs)
+
+    def check_type(self, type_name, headers=None, include_dirs=None,
+            library_dirs=None):
+        """Check type availability. Return True if the type can be compiled,
+        False otherwise"""
+        self._check_compiler()
+
+        # First check the type can be compiled
+        body = textwrap.dedent(r"""
+            int main(void) {
+              if ((%(name)s *) 0)
+                return 0;
+              if (sizeof (%(name)s))
+                return 0;
+            }
+            """) % {'name': type_name}
+
+        st = False
+        try:
+            try:
+                self._compile(body % {'type': type_name},
+                        headers, include_dirs, 'c')
+                st = True
+            except distutils.errors.CompileError:
+                st = False
+        finally:
+            self._clean()
+
+        return st
+
+    def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
+        """Check size of a given type."""
+        self._check_compiler()
+
+        # First check the type can be compiled
+        body = textwrap.dedent(r"""
+            typedef %(type)s npy_check_sizeof_type;
+            int main (void)
+            {
+                static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
+                test_array [0] = 0
+
+                ;
+                return 0;
+            }
+            """)
+        self._compile(body % {'type': type_name},
+                headers, include_dirs, 'c')
+        self._clean()
+
+        if expected:
+            body = textwrap.dedent(r"""
+                typedef %(type)s npy_check_sizeof_type;
+                int main (void)
+                {
+                    static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
+                    test_array [0] = 0
+
+                    ;
+                    return 0;
+                }
+                """)
+            for size in expected:
+                try:
+                    self._compile(body % {'type': type_name, 'size': size},
+                            headers, include_dirs, 'c')
+                    self._clean()
+                    return size
+                except CompileError:
+                    pass
+
+        # this fails to *compile* if size > sizeof(type)
+        body = textwrap.dedent(r"""
+            typedef %(type)s npy_check_sizeof_type;
+            int main (void)
+            {
+                static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
+                test_array [0] = 0
+
+                ;
+                return 0;
+            }
+            """)
+
+        # The principle is simple: we first find low and high bounds of size
+        # for the type, where low/high are looked up on a log scale. Then, we
+        # do a binary search to find the exact size between low and high
+        low = 0
+        mid = 0
+        while True:
+            try:
+                self._compile(body % {'type': type_name, 'size': mid},
+                        headers, include_dirs, 'c')
+                self._clean()
+                break
+            except CompileError:
+                #log.info("failure to test for bound %d" % mid)
+                low = mid + 1
+                mid = 2 * mid + 1
+
+        high = mid
+        # Binary search:
+        while low != high:
+            mid = (high - low) // 2 + low
+            try:
+                self._compile(body % {'type': type_name, 'size': mid},
+                        headers, include_dirs, 'c')
+                self._clean()
+                high = mid
+            except CompileError:
+                low = mid + 1
+        return low
+
+    def check_func(self, func,
+                   headers=None, include_dirs=None,
+                   libraries=None, library_dirs=None,
+                   decl=False, call=False, call_args=None):
+        # clean up distutils's config a bit: add void to main(), and
+        # return a value.
+        self._check_compiler()
+        body = []
+        if decl:
+            if type(decl) == str:
+                body.append(decl)
+            else:
+                body.append("int %s (void);" % func)
+        # Handle MSVC intrinsics: force MS compiler to make a function call.
+        # Useful to test for some functions when built with optimization on, to
+        # avoid build error because the intrinsic and our 'fake' test
+        # declaration do not match.
+        body.append("#ifdef _MSC_VER")
+        body.append("#pragma function(%s)" % func)
+        body.append("#endif")
+        body.append("int main (void) {")
+        if call:
+            if call_args is None:
+                call_args = ''
+            body.append("  %s(%s);" % (func, call_args))
+        else:
+            body.append("  %s;" % func)
+        body.append("  return 0;")
+        body.append("}")
+        body = '\n'.join(body) + "\n"
+
+        return self.try_link(body, headers, include_dirs,
+                             libraries, library_dirs)
+
+    def check_funcs_once(self, funcs,
+                   headers=None, include_dirs=None,
+                   libraries=None, library_dirs=None,
+                   decl=False, call=False, call_args=None):
+        """Check a list of functions at once.
+
+        This is useful to speed up things, since all the functions in the funcs
+        list will be put in one compilation unit.
+
+        Arguments
+        ---------
+        funcs : seq
+            list of functions to test
+        include_dirs : seq
+            list of header paths
+        libraries : seq
+            list of libraries to link the code snippet to
+        library_dirs : seq
+            list of library paths
+        decl : dict
+            for every (key, value), the declaration in the value will be
+            used for function in key. If a function is not in the
+            dictionary, no declaration will be used.
+        call : dict
+            for every item (f, value), if the value is True, a call will be
+            done to the function f.
+        """
+        self._check_compiler()
+        body = []
+        if decl:
+            for f, v in decl.items():
+                if v:
+                    body.append("int %s (void);" % f)
+
+        # Handle MS intrinsics. See check_func for more info.
+        body.append("#ifdef _MSC_VER")
+        for func in funcs:
+            body.append("#pragma function(%s)" % func)
+        body.append("#endif")
+
+        body.append("int main (void) {")
+        if call:
+            for f in funcs:
+                if f in call and call[f]:
+                    if not (call_args and f in call_args and call_args[f]):
+                        args = ''
+                    else:
+                        args = call_args[f]
+                    body.append("  %s(%s);" % (f, args))
+                else:
+                    body.append("  %s;" % f)
+        else:
+            for f in funcs:
+                body.append("  %s;" % f)
+        body.append("  return 0;")
+        body.append("}")
+        body = '\n'.join(body) + "\n"
+
+        return self.try_link(body, headers, include_dirs,
+                             libraries, library_dirs)
+
+    def check_inline(self):
+        """Return the inline keyword recognized by the compiler, empty string
+        otherwise."""
+        return check_inline(self)
+
+    def check_restrict(self):
+        """Return the restrict keyword recognized by the compiler, empty string
+        otherwise."""
+        return check_restrict(self)
+
+    def check_compiler_gcc(self):
+        """Return True if the C compiler is gcc"""
+        return check_compiler_gcc(self)
+
+    def check_gcc_function_attribute(self, attribute, name):
+        return check_gcc_function_attribute(self, attribute, name)
+
+    def check_gcc_function_attribute_with_intrinsics(self, attribute, name,
+                                                     code, include):
+        return check_gcc_function_attribute_with_intrinsics(self, attribute,
+                                                            name, code, include)
+
+    def check_gcc_variable_attribute(self, attribute):
+        return check_gcc_variable_attribute(self, attribute)
+
+    def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):
+        """Return True if the GCC version is greater than or equal to the
+        specified version."""
+        return check_gcc_version_at_least(self, major, minor, patchlevel)
+
+    def get_output(self, body, headers=None, include_dirs=None,
+                   libraries=None, library_dirs=None,
+                   lang="c", use_tee=None):
+        """Try to compile, link to an executable, and run a program
+        built from 'body' and 'headers'. Returns the exit status code
+        of the program and its output.
+        """
+        # 2008-11-16, RemoveMe
+        warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
+                      "Usage of get_output is deprecated: please do not \n"
+                      "use it anymore, and avoid configuration checks \n"
+                      "involving running executable on the target machine.\n"
+                      "+++++++++++++++++++++++++++++++++++++++++++++++++\n",
+                      DeprecationWarning, stacklevel=2)
+        self._check_compiler()
+        exitcode, output = 255, ''
+        try:
+            grabber = GrabStdout()
+            try:
+                src, obj, exe = self._link(body, headers, include_dirs,
+                                           libraries, library_dirs, lang)
+                grabber.restore()
+            except Exception:
+                output = grabber.data
+                grabber.restore()
+                raise
+            exe = os.path.join('.', exe)
+            try:
+                # specify cwd arg for consistency with
+                # historic usage pattern of exec_command()
+                # also, note that exe appears to be a string,
+                # which exec_command() handled, but we now
+                # use a list for check_output() -- this assumes
+                # that exe is always a single command
+                output = subprocess.check_output([exe], cwd='.')
+            except subprocess.CalledProcessError as exc:
+                exitstatus = exc.returncode
+                output = ''
+            except OSError:
+                # preserve the EnvironmentError exit status
+                # used historically in exec_command()
+                exitstatus = 127
+                output = ''
+            else:
+                output = filepath_from_subprocess_output(output)
+            if hasattr(os, 'WEXITSTATUS'):
+                exitcode = os.WEXITSTATUS(exitstatus)
+                if os.WIFSIGNALED(exitstatus):
+                    sig = os.WTERMSIG(exitstatus)
+                    log.error('subprocess exited with signal %d' % (sig,))
+                    if sig == signal.SIGINT:
+                        # control-C
+                        raise KeyboardInterrupt
+            else:
+                exitcode = exitstatus
+            log.info("success!")
+        except (CompileError, LinkError):
+            log.info("failure.")
+        self._clean()
+        return exitcode, output
+
+class GrabStdout:
+
+    def __init__(self):
+        self.sys_stdout = sys.stdout
+        self.data = ''
+        sys.stdout = self
+
+    def write (self, data):
+        self.sys_stdout.write(data)
+        self.data += data
+
+    def flush (self):
+        self.sys_stdout.flush()
+
+    def restore(self):
+        sys.stdout = self.sys_stdout
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py
new file mode 100644
index 00000000..44265bfc
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py
@@ -0,0 +1,126 @@
+from distutils.core import Command
+from numpy.distutils import log
+
+#XXX: Linker flags
+
+def show_fortran_compilers(_cache=None):
+    # Using cache to prevent infinite recursion.
+    if _cache:
+        return
+    elif _cache is None:
+        _cache = []
+    _cache.append(1)
+    from numpy.distutils.fcompiler import show_fcompilers
+    import distutils.core
+    dist = distutils.core._setup_distribution
+    show_fcompilers(dist)
+
+class config_fc(Command):
+    """ Distutils command to hold user specified options
+    to Fortran compilers.
+
+    config_fc command is used by the FCompiler.customize() method.
+    """
+
+    description = "specify Fortran 77/Fortran 90 compiler information"
+
+    user_options = [
+        ('fcompiler=', None, "specify Fortran compiler type"),
+        ('f77exec=', None, "specify F77 compiler command"),
+        ('f90exec=', None, "specify F90 compiler command"),
+        ('f77flags=', None, "specify F77 compiler flags"),
+        ('f90flags=', None, "specify F90 compiler flags"),
+        ('opt=', None, "specify optimization flags"),
+        ('arch=', None, "specify architecture specific optimization flags"),
+        ('debug', 'g', "compile with debugging information"),
+        ('noopt', None, "compile without optimization"),
+        ('noarch', None, "compile without arch-dependent optimization"),
+        ]
+
+    help_options = [
+        ('help-fcompiler', None, "list available Fortran compilers",
+         show_fortran_compilers),
+        ]
+
+    boolean_options = ['debug', 'noopt', 'noarch']
+
+    def initialize_options(self):
+        self.fcompiler = None
+        self.f77exec = None
+        self.f90exec = None
+        self.f77flags = None
+        self.f90flags = None
+        self.opt = None
+        self.arch = None
+        self.debug = None
+        self.noopt = None
+        self.noarch = None
+
+    def finalize_options(self):
+        log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options')
+        build_clib = self.get_finalized_command('build_clib')
+        build_ext = self.get_finalized_command('build_ext')
+        config = self.get_finalized_command('config')
+        build = self.get_finalized_command('build')
+        cmd_list = [self, config, build_clib, build_ext, build]
+        for a in ['fcompiler']:
+            l = []
+            for c in cmd_list:
+                v = getattr(c, a)
+                if v is not None:
+                    if not isinstance(v, str): v = v.compiler_type
+                    if v not in l: l.append(v)
+            if not l: v1 = None
+            else: v1 = l[0]
+            if len(l)>1:
+                log.warn('  commands have different --%s options: %s'\
+                         ', using first in list as default' % (a, l))
+            if v1:
+                for c in cmd_list:
+                    if getattr(c, a) is None: setattr(c, a, v1)
+
+    def run(self):
+        # Do nothing.
+        return
+
+class config_cc(Command):
+    """ Distutils command to hold user specified options
+    to C/C++ compilers.
+    """
+
+    description = "specify C/C++ compiler information"
+
+    user_options = [
+        ('compiler=', None, "specify C/C++ compiler type"),
+        ]
+
+    def initialize_options(self):
+        self.compiler = None
+
+    def finalize_options(self):
+        log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options')
+        build_clib = self.get_finalized_command('build_clib')
+        build_ext = self.get_finalized_command('build_ext')
+        config = self.get_finalized_command('config')
+        build = self.get_finalized_command('build')
+        cmd_list = [self, config, build_clib, build_ext, build]
+        for a in ['compiler']:
+            l = []
+            for c in cmd_list:
+                v = getattr(c, a)
+                if v is not None:
+                    if not isinstance(v, str): v = v.compiler_type
+                    if v not in l: l.append(v)
+            if not l: v1 = None
+            else: v1 = l[0]
+            if len(l)>1:
+                log.warn('  commands have different --%s options: %s'\
+                         ', using first in list as default' % (a, l))
+            if v1:
+                for c in cmd_list:
+                    if getattr(c, a) is None: setattr(c, a, v1)
+        return
+
+    def run(self):
+        # Do nothing.
+        return
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/develop.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/develop.py
new file mode 100644
index 00000000..af24baf2
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/develop.py
@@ -0,0 +1,15 @@
+""" Override the develop command from setuptools so we can ensure that our
+generated files (from build_src or build_scripts) are properly converted to real
+files with filenames.
+
+"""
+from setuptools.command.develop import develop as old_develop
+
+class develop(old_develop):
+    __doc__ = old_develop.__doc__
+    def install_for_development(self):
+        # Build sources in-place, too.
+        self.reinitialize_command('build_src', inplace=1)
+        # Make sure scripts are built.
+        self.run_command('build_scripts')
+        old_develop.install_for_development(self)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py
new file mode 100644
index 00000000..14c62b4d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py
@@ -0,0 +1,25 @@
+import sys
+
+from setuptools.command.egg_info import egg_info as _egg_info
+
+class egg_info(_egg_info):
+    def run(self):
+        if 'sdist' in sys.argv:
+            import warnings
+            import textwrap
+            msg = textwrap.dedent("""
+                `build_src` is being run, this may lead to missing
+                files in your sdist!  You want to use distutils.sdist
+                instead of the setuptools version:
+
+                    from distutils.command.sdist import sdist
+                    cmdclass={'sdist': sdist}"
+
+                See numpy's setup.py or gh-7131 for details.""")
+            warnings.warn(msg, UserWarning, stacklevel=2)
+
+        # We need to ensure that build_src has been executed in order to give
+        # setuptools' egg_info command real filenames instead of functions which
+        # generate files.
+        self.run_command("build_src")
+        _egg_info.run(self)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install.py
new file mode 100644
index 00000000..efa9b474
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install.py
@@ -0,0 +1,79 @@
+import sys
+if 'setuptools' in sys.modules:
+    import setuptools.command.install as old_install_mod
+    have_setuptools = True
+else:
+    import distutils.command.install as old_install_mod
+    have_setuptools = False
+from distutils.file_util import write_file
+
+old_install = old_install_mod.install
+
+class install(old_install):
+
+    # Always run install_clib - the command is cheap, so no need to bypass it;
+    # but it's not run by setuptools -- so it's run again in install_data
+    sub_commands = old_install.sub_commands + [
+        ('install_clib', lambda x: True)
+    ]
+
+    def finalize_options (self):
+        old_install.finalize_options(self)
+        self.install_lib = self.install_libbase
+
+    def setuptools_run(self):
+        """ The setuptools version of the .run() method.
+
+        We must pull in the entire code so we can override the level used in the
+        _getframe() call since we wrap this call by one more level.
+        """
+        from distutils.command.install import install as distutils_install
+
+        # Explicit request for old-style install?  Just do it
+        if self.old_and_unmanageable or self.single_version_externally_managed:
+            return distutils_install.run(self)
+
+        # Attempt to detect whether we were called from setup() or by another
+        # command.  If we were called by setup(), our caller will be the
+        # 'run_command' method in 'distutils.dist', and *its* caller will be
+        # the 'run_commands' method.  If we were called any other way, our
+        # immediate caller *might* be 'run_command', but it won't have been
+        # called by 'run_commands'.  This is slightly kludgy, but seems to
+        # work.
+        #
+        caller = sys._getframe(3)
+        caller_module = caller.f_globals.get('__name__', '')
+        caller_name = caller.f_code.co_name
+
+        if caller_module != 'distutils.dist' or caller_name!='run_commands':
+            # We weren't called from the command line or setup(), so we
+            # should run in backward-compatibility mode to support bdist_*
+            # commands.
+            distutils_install.run(self)
+        else:
+            self.do_egg_install()
+
+    def run(self):
+        if not have_setuptools:
+            r = old_install.run(self)
+        else:
+            r = self.setuptools_run()
+        if self.record:
+            # bdist_rpm fails when INSTALLED_FILES contains
+            # paths with spaces. Such paths must be enclosed
+            # with double-quotes.
+            with open(self.record) as f:
+                lines = []
+                need_rewrite = False
+                for l in f:
+                    l = l.rstrip()
+                    if ' ' in l:
+                        need_rewrite = True
+                        l = '"%s"' % (l)
+                    lines.append(l)
+            if need_rewrite:
+                self.execute(write_file,
+                             (self.record, lines),
+                             "re-writing list of installed files to '%s'" %
+                             self.record)
+        return r
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py
new file mode 100644
index 00000000..aa2e5594
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py
@@ -0,0 +1,40 @@
+import os
+from distutils.core import Command
+from distutils.ccompiler import new_compiler
+from numpy.distutils.misc_util import get_cmd
+
+class install_clib(Command):
+    description = "Command to install installable C libraries"
+
+    user_options = []
+
+    def initialize_options(self):
+        self.install_dir = None
+        self.outfiles = []
+
+    def finalize_options(self):
+        self.set_undefined_options('install', ('install_lib', 'install_dir'))
+
+    def run (self):
+        build_clib_cmd = get_cmd("build_clib")
+        if not build_clib_cmd.build_clib:
+            # can happen if the user specified `--skip-build`
+            build_clib_cmd.finalize_options()
+        build_dir = build_clib_cmd.build_clib
+
+        # We need the compiler to get the library name -> filename association
+        if not build_clib_cmd.compiler:
+            compiler = new_compiler(compiler=None)
+            compiler.customize(self.distribution)
+        else:
+            compiler = build_clib_cmd.compiler
+
+        for l in self.distribution.installed_libraries:
+            target_dir = os.path.join(self.install_dir, l.target_dir)
+            name = compiler.library_filename(l.name)
+            source = os.path.join(build_dir, name)
+            self.mkpath(target_dir)
+            self.outfiles.append(self.copy_file(source, target_dir)[0])
+
+    def get_outputs(self):
+        return self.outfiles
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_data.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_data.py
new file mode 100644
index 00000000..0a2e68ae
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_data.py
@@ -0,0 +1,24 @@
+import sys
+have_setuptools = ('setuptools' in sys.modules)
+
+from distutils.command.install_data import install_data as old_install_data
+
+#data installer with improved intelligence over distutils
+#data files are copied into the project directory instead
+#of willy-nilly
+class install_data (old_install_data):
+
+    def run(self):
+        old_install_data.run(self)
+
+        if have_setuptools:
+            # Run install_clib again, since setuptools does not run sub-commands
+            # of install automatically
+            self.run_command('install_clib')
+
+    def finalize_options (self):
+        self.set_undefined_options('install',
+                                   ('install_lib', 'install_dir'),
+                                   ('root', 'root'),
+                                   ('force', 'force'),
+                                  )
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py
new file mode 100644
index 00000000..bb4ad563
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py
@@ -0,0 +1,25 @@
+import os
+from distutils.command.install_headers import install_headers as old_install_headers
+
+class install_headers (old_install_headers):
+
+    def run (self):
+        headers = self.distribution.headers
+        if not headers:
+            return
+
+        prefix = os.path.dirname(self.install_dir)
+        for header in headers:
+            if isinstance(header, tuple):
+                # Kind of a hack, but I don't know where else to change this...
+                if header[0] == 'numpy.core':
+                    header = ('numpy', header[1])
+                    if os.path.splitext(header[1])[1] == '.inc':
+                        continue
+                d = os.path.join(*([prefix]+header[0].split('.')))
+                header = header[1]
+            else:
+                d = self.install_dir
+            self.mkpath(d)
+            (out, _) = self.copy_file(header, d)
+            self.outfiles.append(out)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/sdist.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/sdist.py
new file mode 100644
index 00000000..e3419388
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/command/sdist.py
@@ -0,0 +1,27 @@
+import sys
+if 'setuptools' in sys.modules:
+    from setuptools.command.sdist import sdist as old_sdist
+else:
+    from distutils.command.sdist import sdist as old_sdist
+
+from numpy.distutils.misc_util import get_data_files
+
+class sdist(old_sdist):
+
+    def add_defaults (self):
+        old_sdist.add_defaults(self)
+
+        dist = self.distribution
+
+        if dist.has_data_files():
+            for data in dist.data_files:
+                self.filelist.extend(get_data_files(data))
+
+        if dist.has_headers():
+            headers = []
+            for h in dist.headers:
+                if isinstance(h, str): headers.append(h)
+                else: headers.append(h[1])
+            self.filelist.extend(headers)
+
+        return
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/conv_template.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/conv_template.py
new file mode 100644
index 00000000..c8933d1d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/conv_template.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+"""
+takes templated file .xxx.src and produces .xxx file  where .xxx is
+.i or .c or .h, using the following template rules
+
+/**begin repeat  -- on a line by itself marks the start of a repeated code
+                    segment
+/**end repeat**/ -- on a line by itself marks it's end
+
+After the /**begin repeat and before the */, all the named templates are placed
+these should all have the same number of replacements
+
+Repeat blocks can be nested, with each nested block labeled with its depth,
+i.e.
+/**begin repeat1
+ *....
+ */
+/**end repeat1**/
+
+When using nested loops, you can optionally exclude particular
+combinations of the variables using (inside the comment portion of the inner loop):
+
+ :exclude: var1=value1, var2=value2, ...
+
+This will exclude the pattern where var1 is value1 and var2 is value2 when
+the result is being generated.
+
+
+In the main body each replace will use one entry from the list of named replacements
+
+ Note that all #..# forms in a block must have the same number of
+   comma-separated entries.
+
+Example:
+
+    An input file containing
+
+        /**begin repeat
+         * #a = 1,2,3#
+         * #b = 1,2,3#
+         */
+
+        /**begin repeat1
+         * #c = ted, jim#
+         */
+        @a@, @b@, @c@
+        /**end repeat1**/
+
+        /**end repeat**/
+
+    produces
+
+        line 1 "template.c.src"
+
+        /*
+         *********************************************************************
+         **       This file was autogenerated from a template  DO NOT EDIT!!**
+         **       Changes should be made to the original source (.src) file **
+         *********************************************************************
+         */
+
+        #line 9
+        1, 1, ted
+
+        #line 9
+        1, 1, jim
+
+        #line 9
+        2, 2, ted
+
+        #line 9
+        2, 2, jim
+
+        #line 9
+        3, 3, ted
+
+        #line 9
+        3, 3, jim
+
+"""
+
+__all__ = ['process_str', 'process_file']
+
+import os
+import sys
+import re
+
+# names for replacement that are already global.
+global_names = {}
+
+# header placed at the front of head processed file
+header =\
+"""
+/*
+ *****************************************************************************
+ **       This file was autogenerated from a template  DO NOT EDIT!!!!      **
+ **       Changes should be made to the original source (.src) file         **
+ *****************************************************************************
+ */
+
+"""
+# Parse string for repeat loops
+def parse_structure(astr, level):
+    """
+    The returned line number is from the beginning of the string, starting
+    at zero. Returns an empty list if no loops found.
+
+    """
+    if level == 0 :
+        loopbeg = "/**begin repeat"
+        loopend = "/**end repeat**/"
+    else :
+        loopbeg = "/**begin repeat%d" % level
+        loopend = "/**end repeat%d**/" % level
+
+    ind = 0
+    line = 0
+    spanlist = []
+    while True:
+        start = astr.find(loopbeg, ind)
+        if start == -1:
+            break
+        start2 = astr.find("*/", start)
+        start2 = astr.find("\n", start2)
+        fini1 = astr.find(loopend, start2)
+        fini2 = astr.find("\n", fini1)
+        line += astr.count("\n", ind, start2+1)
+        spanlist.append((start, start2+1, fini1, fini2+1, line))
+        line += astr.count("\n", start2+1, fini2)
+        ind = fini2
+    spanlist.sort()
+    return spanlist
+
+
+def paren_repl(obj):
+    torep = obj.group(1)
+    numrep = obj.group(2)
+    return ','.join([torep]*int(numrep))
+
+parenrep = re.compile(r"\(([^)]*)\)\*(\d+)")
+plainrep = re.compile(r"([^*]+)\*(\d+)")
+def parse_values(astr):
+    # replaces all occurrences of '(a,b,c)*4' in astr
+    # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
+    # empty values, i.e., ()*4 yields ',,,'. The result is
+    # split at ',' and a list of values returned.
+    astr = parenrep.sub(paren_repl, astr)
+    # replaces occurrences of xxx*3 with xxx, xxx, xxx
+    astr = ','.join([plainrep.sub(paren_repl, x.strip())
+                     for x in astr.split(',')])
+    return astr.split(',')
+
+
+stripast = re.compile(r"\n\s*\*?")
+named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
+exclude_vars_re = re.compile(r"(\w*)=(\w*)")
+exclude_re = re.compile(":exclude:")
+def parse_loop_header(loophead) :
+    """Find all named replacements in the header
+
+    Returns a list of dictionaries, one for each loop iteration,
+    where each key is a name to be substituted and the corresponding
+    value is the replacement string.
+
+    Also return a list of exclusions.  The exclusions are dictionaries
+     of key value pairs. There can be more than one exclusion.
+     [{'var1':'value1', 'var2', 'value2'[,...]}, ...]
+
+    """
+    # Strip out '\n' and leading '*', if any, in continuation lines.
+    # This should not effect code previous to this change as
+    # continuation lines were not allowed.
+    loophead = stripast.sub("", loophead)
+    # parse out the names and lists of values
+    names = []
+    reps = named_re.findall(loophead)
+    nsub = None
+    for rep in reps:
+        name = rep[0]
+        vals = parse_values(rep[1])
+        size = len(vals)
+        if nsub is None :
+            nsub = size
+        elif nsub != size :
+            msg = "Mismatch in number of values, %d != %d\n%s = %s"
+            raise ValueError(msg % (nsub, size, name, vals))
+        names.append((name, vals))
+
+
+    # Find any exclude variables
+    excludes = []
+
+    for obj in exclude_re.finditer(loophead):
+        span = obj.span()
+        # find next newline
+        endline = loophead.find('\n', span[1])
+        substr = loophead[span[1]:endline]
+        ex_names = exclude_vars_re.findall(substr)
+        excludes.append(dict(ex_names))
+
+    # generate list of dictionaries, one for each template iteration
+    dlist = []
+    if nsub is None :
+        raise ValueError("No substitution variables found")
+    for i in range(nsub):
+        tmp = {name: vals[i] for name, vals in names}
+        dlist.append(tmp)
+    return dlist
+
+replace_re = re.compile(r"@(\w+)@")
+def parse_string(astr, env, level, line) :
+    lineno = "#line %d\n" % line
+
+    # local function for string replacement, uses env
+    def replace(match):
+        name = match.group(1)
+        try :
+            val = env[name]
+        except KeyError:
+            msg = 'line %d: no definition of key "%s"'%(line, name)
+            raise ValueError(msg) from None
+        return val
+
+    code = [lineno]
+    struct = parse_structure(astr, level)
+    if struct :
+        # recurse over inner loops
+        oldend = 0
+        newlevel = level + 1
+        for sub in struct:
+            pref = astr[oldend:sub[0]]
+            head = astr[sub[0]:sub[1]]
+            text = astr[sub[1]:sub[2]]
+            oldend = sub[3]
+            newline = line + sub[4]
+            code.append(replace_re.sub(replace, pref))
+            try :
+                envlist = parse_loop_header(head)
+            except ValueError as e:
+                msg = "line %d: %s" % (newline, e)
+                raise ValueError(msg)
+            for newenv in envlist :
+                newenv.update(env)
+                newcode = parse_string(text, newenv, newlevel, newline)
+                code.extend(newcode)
+        suff = astr[oldend:]
+        code.append(replace_re.sub(replace, suff))
+    else :
+        # replace keys
+        code.append(replace_re.sub(replace, astr))
+    code.append('\n')
+    return ''.join(code)
+
+def process_str(astr):
+    code = [header]
+    code.extend(parse_string(astr, global_names, 0, 1))
+    return ''.join(code)
+
+
+include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
+                            r"(?P[\w\d./\\]+[.]src)['\"]", re.I)
+
+def resolve_includes(source):
+    d = os.path.dirname(source)
+    with open(source) as fid:
+        lines = []
+        for line in fid:
+            m = include_src_re.match(line)
+            if m:
+                fn = m.group('name')
+                if not os.path.isabs(fn):
+                    fn = os.path.join(d, fn)
+                if os.path.isfile(fn):
+                    lines.extend(resolve_includes(fn))
+                else:
+                    lines.append(line)
+            else:
+                lines.append(line)
+    return lines
+
+def process_file(source):
+    lines = resolve_includes(source)
+    sourcefile = os.path.normcase(source).replace("\\", "\\\\")
+    try:
+        code = process_str(''.join(lines))
+    except ValueError as e:
+        raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None
+    return '#line 1 "%s"\n%s' % (sourcefile, code)
+
+
+def unique_key(adict):
+    # this obtains a unique key given a dictionary
+    # currently it works by appending together n of the letters of the
+    #   current keys and increasing n until a unique key is found
+    # -- not particularly quick
+    allkeys = list(adict.keys())
+    done = False
+    n = 1
+    while not done:
+        newkey = "".join([x[:n] for x in allkeys])
+        if newkey in allkeys:
+            n += 1
+        else:
+            done = True
+    return newkey
+
+
+def main():
+    try:
+        file = sys.argv[1]
+    except IndexError:
+        fid = sys.stdin
+        outfile = sys.stdout
+    else:
+        fid = open(file, 'r')
+        (base, ext) = os.path.splitext(file)
+        newname = base
+        outfile = open(newname, 'w')
+
+    allstr = fid.read()
+    try:
+        writestr = process_str(allstr)
+    except ValueError as e:
+        raise ValueError("In %s loop at %s" % (file, e)) from None
+
+    outfile.write(writestr)
+
+if __name__ == "__main__":
+    main()
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/core.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/core.py
new file mode 100644
index 00000000..1cdc7397
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/core.py
@@ -0,0 +1,216 @@
+import sys
+from distutils.core import Distribution
+
+if 'setuptools' in sys.modules:
+    have_setuptools = True
+    from setuptools import setup as old_setup
+    # easy_install imports math, it may be picked up from cwd
+    from setuptools.command import easy_install
+    try:
+        # very old versions of setuptools don't have this
+        from setuptools.command import bdist_egg
+    except ImportError:
+        have_setuptools = False
+else:
+    from distutils.core import setup as old_setup
+    have_setuptools = False
+
+import warnings
+import distutils.core
+import distutils.dist
+
+from numpy.distutils.extension import Extension  # noqa: F401
+from numpy.distutils.numpy_distribution import NumpyDistribution
+from numpy.distutils.command import config, config_compiler, \
+     build, build_py, build_ext, build_clib, build_src, build_scripts, \
+     sdist, install_data, install_headers, install, bdist_rpm, \
+     install_clib
+from numpy.distutils.misc_util import is_sequence, is_string
+
+numpy_cmdclass = {'build':            build.build,
+                  'build_src':        build_src.build_src,
+                  'build_scripts':    build_scripts.build_scripts,
+                  'config_cc':        config_compiler.config_cc,
+                  'config_fc':        config_compiler.config_fc,
+                  'config':           config.config,
+                  'build_ext':        build_ext.build_ext,
+                  'build_py':         build_py.build_py,
+                  'build_clib':       build_clib.build_clib,
+                  'sdist':            sdist.sdist,
+                  'install_data':     install_data.install_data,
+                  'install_headers':  install_headers.install_headers,
+                  'install_clib':     install_clib.install_clib,
+                  'install':          install.install,
+                  'bdist_rpm':        bdist_rpm.bdist_rpm,
+                  }
+if have_setuptools:
+    # Use our own versions of develop and egg_info to ensure that build_src is
+    # handled appropriately.
+    from numpy.distutils.command import develop, egg_info
+    numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
+    numpy_cmdclass['develop'] = develop.develop
+    numpy_cmdclass['easy_install'] = easy_install.easy_install
+    numpy_cmdclass['egg_info'] = egg_info.egg_info
+
+def _dict_append(d, **kws):
+    for k, v in kws.items():
+        if k not in d:
+            d[k] = v
+            continue
+        dv = d[k]
+        if isinstance(dv, tuple):
+            d[k] = dv + tuple(v)
+        elif isinstance(dv, list):
+            d[k] = dv + list(v)
+        elif isinstance(dv, dict):
+            _dict_append(dv, **v)
+        elif is_string(dv):
+            assert is_string(v)
+            d[k] = v
+        else:
+            raise TypeError(repr(type(dv)))
+
+def _command_line_ok(_cache=None):
+    """ Return True if command line does not contain any
+    help or display requests.
+    """
+    if _cache:
+        return _cache[0]
+    elif _cache is None:
+        _cache = []
+    ok = True
+    display_opts = ['--'+n for n in Distribution.display_option_names]
+    for o in Distribution.display_options:
+        if o[1]:
+            display_opts.append('-'+o[1])
+    for arg in sys.argv:
+        if arg.startswith('--help') or arg=='-h' or arg in display_opts:
+            ok = False
+            break
+    _cache.append(ok)
+    return ok
+
+def get_distribution(always=False):
+    dist = distutils.core._setup_distribution
+    # XXX Hack to get numpy installable with easy_install.
+    # The problem is easy_install runs it's own setup(), which
+    # sets up distutils.core._setup_distribution. However,
+    # when our setup() runs, that gets overwritten and lost.
+    # We can't use isinstance, as the DistributionWithoutHelpCommands
+    # class is local to a function in setuptools.command.easy_install
+    if dist is not None and \
+            'DistributionWithoutHelpCommands' in repr(dist):
+        dist = None
+    if always and dist is None:
+        dist = NumpyDistribution()
+    return dist
+
+def setup(**attr):
+
+    cmdclass = numpy_cmdclass.copy()
+
+    new_attr = attr.copy()
+    if 'cmdclass' in new_attr:
+        cmdclass.update(new_attr['cmdclass'])
+    new_attr['cmdclass'] = cmdclass
+
+    if 'configuration' in new_attr:
+        # To avoid calling configuration if there are any errors
+        # or help request in command in the line.
+        configuration = new_attr.pop('configuration')
+
+        old_dist = distutils.core._setup_distribution
+        old_stop = distutils.core._setup_stop_after
+        distutils.core._setup_distribution = None
+        distutils.core._setup_stop_after = "commandline"
+        try:
+            dist = setup(**new_attr)
+        finally:
+            distutils.core._setup_distribution = old_dist
+            distutils.core._setup_stop_after = old_stop
+        if dist.help or not _command_line_ok():
+            # probably displayed help, skip running any commands
+            return dist
+
+        # create setup dictionary and append to new_attr
+        config = configuration()
+        if hasattr(config, 'todict'):
+            config = config.todict()
+        _dict_append(new_attr, **config)
+
+    # Move extension source libraries to libraries
+    libraries = []
+    for ext in new_attr.get('ext_modules', []):
+        new_libraries = []
+        for item in ext.libraries:
+            if is_sequence(item):
+                lib_name, build_info = item
+                _check_append_ext_library(libraries, lib_name, build_info)
+                new_libraries.append(lib_name)
+            elif is_string(item):
+                new_libraries.append(item)
+            else:
+                raise TypeError("invalid description of extension module "
+                                "library %r" % (item,))
+        ext.libraries = new_libraries
+    if libraries:
+        if 'libraries' not in new_attr:
+            new_attr['libraries'] = []
+        for item in libraries:
+            _check_append_library(new_attr['libraries'], item)
+
+    # sources in ext_modules or libraries may contain header files
+    if ('ext_modules' in new_attr or 'libraries' in new_attr) \
+       and 'headers' not in new_attr:
+        new_attr['headers'] = []
+
+    # Use our custom NumpyDistribution class instead of distutils' one
+    new_attr['distclass'] = NumpyDistribution
+
+    return old_setup(**new_attr)
+
+def _check_append_library(libraries, item):
+    for libitem in libraries:
+        if is_sequence(libitem):
+            if is_sequence(item):
+                if item[0]==libitem[0]:
+                    if item[1] is libitem[1]:
+                        return
+                    warnings.warn("[0] libraries list contains %r with"
+                                  " different build_info" % (item[0],),
+                                  stacklevel=2)
+                    break
+            else:
+                if item==libitem[0]:
+                    warnings.warn("[1] libraries list contains %r with"
+                                  " no build_info" % (item[0],),
+                                  stacklevel=2)
+                    break
+        else:
+            if is_sequence(item):
+                if item[0]==libitem:
+                    warnings.warn("[2] libraries list contains %r with"
+                                  " no build_info" % (item[0],),
+                                  stacklevel=2)
+                    break
+            else:
+                if item==libitem:
+                    return
+    libraries.append(item)
+
+def _check_append_ext_library(libraries, lib_name, build_info):
+    for item in libraries:
+        if is_sequence(item):
+            if item[0]==lib_name:
+                if item[1] is build_info:
+                    return
+                warnings.warn("[3] libraries list contains %r with"
+                              " different build_info" % (lib_name,),
+                              stacklevel=2)
+                break
+        elif item==lib_name:
+            warnings.warn("[4] libraries list contains %r with"
+                          " no build_info" % (lib_name,),
+                          stacklevel=2)
+            break
+    libraries.append((lib_name, build_info))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py
new file mode 100644
index 00000000..77620210
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py
@@ -0,0 +1,683 @@
+#!/usr/bin/env python3
+"""
+cpuinfo
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson 
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy (BSD style) license.  See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+Pearu Peterson
+
+"""
+__all__ = ['cpu']
+
+import os
+import platform
+import re
+import sys
+import types
+import warnings
+
+from subprocess import getstatusoutput
+
+
+def getoutput(cmd, successful_status=(0,), stacklevel=1):
+    try:
+        status, output = getstatusoutput(cmd)
+    except OSError as e:
+        warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
+        return False, ""
+    if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
+        return True, output
+    return False, output
+
+def command_info(successful_status=(0,), stacklevel=1, **kw):
+    info = {}
+    for key in kw:
+        ok, output = getoutput(kw[key], successful_status=successful_status,
+                               stacklevel=stacklevel+1)
+        if ok:
+            info[key] = output.strip()
+    return info
+
+def command_by_line(cmd, successful_status=(0,), stacklevel=1):
+    ok, output = getoutput(cmd, successful_status=successful_status,
+                           stacklevel=stacklevel+1)
+    if not ok:
+        return
+    for line in output.splitlines():
+        yield line.strip()
+
+def key_value_from_command(cmd, sep, successful_status=(0,),
+                           stacklevel=1):
+    d = {}
+    for line in command_by_line(cmd, successful_status=successful_status,
+                                stacklevel=stacklevel+1):
+        l = [s.strip() for s in line.split(sep, 1)]
+        if len(l) == 2:
+            d[l[0]] = l[1]
+    return d
+
+class CPUInfoBase:
+    """Holds CPU information and provides methods for requiring
+    the availability of various CPU features.
+    """
+
+    def _try_call(self, func):
+        try:
+            return func()
+        except Exception:
+            pass
+
+    def __getattr__(self, name):
+        if not name.startswith('_'):
+            if hasattr(self, '_'+name):
+                attr = getattr(self, '_'+name)
+                if isinstance(attr, types.MethodType):
+                    return lambda func=self._try_call,attr=attr : func(attr)
+            else:
+                return lambda : None
+        raise AttributeError(name)
+
+    def _getNCPUs(self):
+        return 1
+
+    def __get_nbits(self):
+        abits = platform.architecture()[0]
+        nbits = re.compile(r'(\d+)bit').search(abits).group(1)
+        return nbits
+
+    def _is_32bit(self):
+        return self.__get_nbits() == '32'
+
+    def _is_64bit(self):
+        return self.__get_nbits() == '64'
+
+class LinuxCPUInfo(CPUInfoBase):
+
+    info = None
+
+    def __init__(self):
+        if self.info is not None:
+            return
+        info = [ {} ]
+        ok, output = getoutput('uname -m')
+        if ok:
+            info[0]['uname_m'] = output.strip()
+        try:
+            fo = open('/proc/cpuinfo')
+        except OSError as e:
+            warnings.warn(str(e), UserWarning, stacklevel=2)
+        else:
+            for line in fo:
+                name_value = [s.strip() for s in line.split(':', 1)]
+                if len(name_value) != 2:
+                    continue
+                name, value = name_value
+                if not info or name in info[-1]: # next processor
+                    info.append({})
+                info[-1][name] = value
+            fo.close()
+        self.__class__.info = info
+
+    def _not_impl(self): pass
+
+    # Athlon
+
+    def _is_AMD(self):
+        return self.info[0]['vendor_id']=='AuthenticAMD'
+
+    def _is_AthlonK6_2(self):
+        return self._is_AMD() and self.info[0]['model'] == '2'
+
+    def _is_AthlonK6_3(self):
+        return self._is_AMD() and self.info[0]['model'] == '3'
+
+    def _is_AthlonK6(self):
+        return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
+
+    def _is_AthlonK7(self):
+        return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
+
+    def _is_AthlonMP(self):
+        return re.match(r'.*?Athlon\(tm\) MP\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_AMD64(self):
+        return self.is_AMD() and self.info[0]['family'] == '15'
+
+    def _is_Athlon64(self):
+        return re.match(r'.*?Athlon\(tm\) 64\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_AthlonHX(self):
+        return re.match(r'.*?Athlon HX\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_Opteron(self):
+        return re.match(r'.*?Opteron\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_Hammer(self):
+        return re.match(r'.*?Hammer\b',
+                        self.info[0]['model name']) is not None
+
+    # Alpha
+
+    def _is_Alpha(self):
+        return self.info[0]['cpu']=='Alpha'
+
+    def _is_EV4(self):
+        return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
+
+    def _is_EV5(self):
+        return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
+
+    def _is_EV56(self):
+        return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
+
+    def _is_PCA56(self):
+        return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
+
+    # Intel
+
+    #XXX
+    _is_i386 = _not_impl
+
+    def _is_Intel(self):
+        return self.info[0]['vendor_id']=='GenuineIntel'
+
+    def _is_i486(self):
+        return self.info[0]['cpu']=='i486'
+
+    def _is_i586(self):
+        return self.is_Intel() and self.info[0]['cpu family'] == '5'
+
+    def _is_i686(self):
+        return self.is_Intel() and self.info[0]['cpu family'] == '6'
+
+    def _is_Celeron(self):
+        return re.match(r'.*?Celeron',
+                        self.info[0]['model name']) is not None
+
+    def _is_Pentium(self):
+        return re.match(r'.*?Pentium',
+                        self.info[0]['model name']) is not None
+
+    def _is_PentiumII(self):
+        return re.match(r'.*?Pentium.*?II\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_PentiumPro(self):
+        return re.match(r'.*?PentiumPro\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_PentiumMMX(self):
+        return re.match(r'.*?Pentium.*?MMX\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_PentiumIII(self):
+        return re.match(r'.*?Pentium.*?III\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_PentiumIV(self):
+        return re.match(r'.*?Pentium.*?(IV|4)\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_PentiumM(self):
+        return re.match(r'.*?Pentium.*?M\b',
+                        self.info[0]['model name']) is not None
+
+    def _is_Prescott(self):
+        return self.is_PentiumIV() and self.has_sse3()
+
+    def _is_Nocona(self):
+        return (self.is_Intel()
+                and (self.info[0]['cpu family'] == '6'
+                     or self.info[0]['cpu family'] == '15')
+                and (self.has_sse3() and not self.has_ssse3())
+                and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None)
+
+    def _is_Core2(self):
+        return (self.is_64bit() and self.is_Intel() and
+                re.match(r'.*?Core\(TM\)2\b',
+                         self.info[0]['model name']) is not None)
+
+    def _is_Itanium(self):
+        return re.match(r'.*?Itanium\b',
+                        self.info[0]['family']) is not None
+
+    def _is_XEON(self):
+        return re.match(r'.*?XEON\b',
+                        self.info[0]['model name'], re.IGNORECASE) is not None
+
+    _is_Xeon = _is_XEON
+
+    # Varia
+
+    def _is_singleCPU(self):
+        return len(self.info) == 1
+
+    def _getNCPUs(self):
+        return len(self.info)
+
+    def _has_fdiv_bug(self):
+        return self.info[0]['fdiv_bug']=='yes'
+
+    def _has_f00f_bug(self):
+        return self.info[0]['f00f_bug']=='yes'
+
+    def _has_mmx(self):
+        return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
+
+    def _has_sse(self):
+        return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
+
+    def _has_sse2(self):
+        return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
+
+    def _has_sse3(self):
+        return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
+
+    def _has_ssse3(self):
+        return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
+
+    def _has_3dnow(self):
+        return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
+
+    def _has_3dnowext(self):
+        return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
+
+class IRIXCPUInfo(CPUInfoBase):
+    info = None
+
+    def __init__(self):
+        if self.info is not None:
+            return
+        info = key_value_from_command('sysconf', sep=' ',
+                                      successful_status=(0, 1))
+        self.__class__.info = info
+
+    def _not_impl(self): pass
+
+    def _is_singleCPU(self):
+        return self.info.get('NUM_PROCESSORS') == '1'
+
+    def _getNCPUs(self):
+        return int(self.info.get('NUM_PROCESSORS', 1))
+
+    def __cputype(self, n):
+        return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
+    def _is_r2000(self): return self.__cputype(2000)
+    def _is_r3000(self): return self.__cputype(3000)
+    def _is_r3900(self): return self.__cputype(3900)
+    def _is_r4000(self): return self.__cputype(4000)
+    def _is_r4100(self): return self.__cputype(4100)
+    def _is_r4300(self): return self.__cputype(4300)
+    def _is_r4400(self): return self.__cputype(4400)
+    def _is_r4600(self): return self.__cputype(4600)
+    def _is_r4650(self): return self.__cputype(4650)
+    def _is_r5000(self): return self.__cputype(5000)
+    def _is_r6000(self): return self.__cputype(6000)
+    def _is_r8000(self): return self.__cputype(8000)
+    def _is_r10000(self): return self.__cputype(10000)
+    def _is_r12000(self): return self.__cputype(12000)
+    def _is_rorion(self): return self.__cputype('orion')
+
+    def get_ip(self):
+        try: return self.info.get('MACHINE')
+        except Exception: pass
+    def __machine(self, n):
+        return self.info.get('MACHINE').lower() == 'ip%s' % (n)
+    def _is_IP19(self): return self.__machine(19)
+    def _is_IP20(self): return self.__machine(20)
+    def _is_IP21(self): return self.__machine(21)
+    def _is_IP22(self): return self.__machine(22)
+    def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
+    def _is_IP22_5k(self): return self.__machine(22)  and self._is_r5000()
+    def _is_IP24(self): return self.__machine(24)
+    def _is_IP25(self): return self.__machine(25)
+    def _is_IP26(self): return self.__machine(26)
+    def _is_IP27(self): return self.__machine(27)
+    def _is_IP28(self): return self.__machine(28)
+    def _is_IP30(self): return self.__machine(30)
+    def _is_IP32(self): return self.__machine(32)
+    def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
+    def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
+
+
+class DarwinCPUInfo(CPUInfoBase):
+    info = None
+
+    def __init__(self):
+        if self.info is not None:
+            return
+        info = command_info(arch='arch',
+                            machine='machine')
+        info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
+        self.__class__.info = info
+
+    def _not_impl(self): pass
+
+    def _getNCPUs(self):
+        return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
+
+    def _is_Power_Macintosh(self):
+        return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
+
+    def _is_i386(self):
+        return self.info['arch']=='i386'
+    def _is_ppc(self):
+        return self.info['arch']=='ppc'
+
+    def __machine(self, n):
+        return self.info['machine'] == 'ppc%s'%n
+    def _is_ppc601(self): return self.__machine(601)
+    def _is_ppc602(self): return self.__machine(602)
+    def _is_ppc603(self): return self.__machine(603)
+    def _is_ppc603e(self): return self.__machine('603e')
+    def _is_ppc604(self): return self.__machine(604)
+    def _is_ppc604e(self): return self.__machine('604e')
+    def _is_ppc620(self): return self.__machine(620)
+    def _is_ppc630(self): return self.__machine(630)
+    def _is_ppc740(self): return self.__machine(740)
+    def _is_ppc7400(self): return self.__machine(7400)
+    def _is_ppc7450(self): return self.__machine(7450)
+    def _is_ppc750(self): return self.__machine(750)
+    def _is_ppc403(self): return self.__machine(403)
+    def _is_ppc505(self): return self.__machine(505)
+    def _is_ppc801(self): return self.__machine(801)
+    def _is_ppc821(self): return self.__machine(821)
+    def _is_ppc823(self): return self.__machine(823)
+    def _is_ppc860(self): return self.__machine(860)
+
+
+class SunOSCPUInfo(CPUInfoBase):
+
+    info = None
+
+    def __init__(self):
+        if self.info is not None:
+            return
+        info = command_info(arch='arch',
+                            mach='mach',
+                            uname_i='uname_i',
+                            isainfo_b='isainfo -b',
+                            isainfo_n='isainfo -n',
+                            )
+        info['uname_X'] = key_value_from_command('uname -X', sep='=')
+        for line in command_by_line('psrinfo -v 0'):
+            m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): pass + + def _is_i386(self): + return self.info['isainfo_n']=='i386' + def _is_sparc(self): + return self.info['isainfo_n']=='sparc' + def _is_sparcv9(self): + return self.info['isainfo_n']=='sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch']=='sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor']=='sparcv7' + def _is_cpusparcv8(self): + return self.info['processor']=='sparcv8' + def _is_cpusparcv9(self): + return self.info['processor']=='sparcv9' + +class Win32CPUInfo(CPUInfoBase): + + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + import winreg + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" + r"\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum=0 + while True: + try: + proc=winreg.EnumKey(chnd, pnum) + except winreg.error: + break + else: + pnum+=1 + info.append({"Processor":proc}) + phnd=winreg.OpenKey(chnd, proc) + pidx=0 + while True: + try: + name, value, vtpe=winreg.EnumValue(phnd, pidx) + except winreg.error: + break + else: + pidx=pidx+1 + info[-1][name]=value + if name=="Identifier": + srch=prgx.search(value) + if srch: + info[-1]["Family"]=int(srch.group("FML")) + info[-1]["Model"]=int(srch.group("MDL")) + info[-1]["Stepping"]=int(srch.group("STP")) + except Exception as e: + print(e, '(ignoring)') + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier']=='AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_AMDK5(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [0, 1, 2, 3] + + def _is_AMDK6(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [6, 7] + + def _is_AMDK6_2(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==8 + + def _is_AMDK6_3(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==9 + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier']=='GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family']==3 + + def _is_i486(self): + return self.info[0]['Family']==4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family']==6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_PentiumMMX(self): + return self.is_Intel() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==4 + + def _is_PentiumPro(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model']==1 + + def _is_PentiumII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [3, 5, 6] + + def _is_PentiumIII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [7, 8, 9, 10, 11] + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family']==15 + + def _is_PentiumM(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [9, 13, 14] + + def _is_Core2(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [15, 16, 17] + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ + or (self.info[0]['Family'] in [6, 15]) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) + or self.info[0]['Family']==15) + elif self.is_AMD(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [6, 7, 8, 10]) + or self.info[0]['Family']==15) + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() \ + or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +#if __name__ == "__main__": +# +# cpu.is_blaa() +# cpu.is_Intel() +# cpu.is_Alpha() +# +# print('CPU information:'), +# for name in dir(cpuinfo): +# if name[0]=='_' and name[1]!='_': +# r = getattr(cpu,name[1:])() +# if r: +# if r!=1: +# print('%s=%s' %(name[1:],r)) +# else: +# print(name[1:]), +# print() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/exec_command.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/exec_command.py new file mode 100644 index 00000000..a67453ab --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/exec_command.py @@ -0,0 +1,315 @@ +""" +exec_command + +Implements exec_command function that is (almost) equivalent to +commands.getstatusoutput function but on NT, DOS systems the +returned status is actually correct (though, the returned status +values may be different by a factor). In addition, exec_command +takes keyword arguments for (re-)defining environment variables. + +Provides functions: + + exec_command --- execute command in a specified directory and + in the modified environment. + find_executable --- locate a command using info from environment + variable PATH. Equivalent to posix `which` + command. + +Author: Pearu Peterson +Created: 11 January 2003 + +Requires: Python 2.x + +Successfully tested on: + +======== ============ ================================================= +os.name sys.platform comments +======== ============ ================================================= +posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 + PyCrust 0.9.3, Idle 1.0.2 +posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 +posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 +posix darwin Darwin 7.2.0, Python 2.3 +nt win32 Windows Me + Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 + Python 2.1.1 Idle 0.8 +nt win32 Windows 98, Python 2.1.1. Idle 0.8 +nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests + fail i.e. redefining environment variables may + not work. FIXED: don't use cygwin echo! + Comment: also `cmd /c echo` will not work + but redefining environment variables do work. +posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) +nt win32 Windows XP, Python 2.3.3 +======== ============ ================================================= + +Known bugs: + +* Tests, that send messages to stderr, fail when executed from MSYS prompt + because the messages are lost at some point. + +""" +__all__ = ['exec_command', 'find_executable'] + +import os +import sys +import subprocess +import locale +import warnings + +from numpy.distutils.misc_util import is_sequence, make_temp_file +from numpy.distutils import log + +def filepath_from_subprocess_output(output): + """ + Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. + + Inherited from `exec_command`, and possibly incorrect. + """ + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + output = output.decode(mylocale, errors='replace') + output = output.replace('\r\n', '\n') + # Another historical oddity + if output[-1:] == '\n': + output = output[:-1] + return output + + +def forward_bytes_to_stdout(val): + """ + Forward bytes from a subprocess call to the console, without attempting to + decode them. + + The assumption is that the subprocess call already returned bytes in + a suitable encoding. + """ + if hasattr(sys.stdout, 'buffer'): + # use the underlying binary output if there is one + sys.stdout.buffer.write(val) + elif hasattr(sys.stdout, 'encoding'): + # round-trip the encoding if necessary + sys.stdout.write(val.decode(sys.stdout.encoding)) + else: + # make a best-guess at the encoding + sys.stdout.write(val.decode('utf8', errors='replace')) + + +def temp_file_name(): + # 2019-01-30, 1.17 + warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' + 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) + fo, name = make_temp_file() + fo.close() + return name + +def get_pythonexe(): + pythonexe = sys.executable + if os.name in ['nt', 'dos']: + fdir, fn = os.path.split(pythonexe) + fn = fn.upper().replace('PYTHONW', 'PYTHON') + pythonexe = os.path.join(fdir, fn) + assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) + return pythonexe + +def find_executable(exe, path=None, _cache={}): + """Return full path of a executable or None. + + Symbolic links are not followed. + """ + key = exe, path + try: + return _cache[key] + except KeyError: + pass + log.debug('find_executable(%r)' % exe) + orig_exe = exe + + if path is None: + path = os.environ.get('PATH', os.defpath) + if os.name=='posix': + realpath = os.path.realpath + else: + realpath = lambda a:a + + if exe.startswith('"'): + exe = exe[1:-1] + + suffixes = [''] + if os.name in ['nt', 'dos', 'os2']: + fn, ext = os.path.splitext(exe) + extra_suffixes = ['.exe', '.com', '.bat'] + if ext.lower() not in extra_suffixes: + suffixes = extra_suffixes + + if os.path.isabs(exe): + paths = [''] + else: + paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] + + for path in paths: + fn = os.path.join(path, exe) + for s in suffixes: + f_ext = fn+s + if not os.path.islink(f_ext): + f_ext = realpath(f_ext) + if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): + log.info('Found executable %s' % f_ext) + _cache[key] = f_ext + return f_ext + + log.warn('Could not locate executable %s' % orig_exe) + return None + +############################################################ + +def _preserve_environment( names ): + log.debug('_preserve_environment(%r)' % (names)) + env = {name: os.environ.get(name) for name in names} + return env + +def _update_environment( **env ): + log.debug('_update_environment(...)') + for name, value in env.items(): + os.environ[name] = value or '' + +def exec_command(command, execute_in='', use_shell=None, use_tee=None, + _with_python = 1, **env ): + """ + Return (status,output) of executed command. + + .. deprecated:: 1.17 + Use subprocess.Popen instead + + Parameters + ---------- + command : str + A concatenated string of executable and arguments. + execute_in : str + Before running command ``cd execute_in`` and after ``cd -``. + use_shell : {bool, None}, optional + If True, execute ``sh -c command``. Default None (True) + use_tee : {bool, None}, optional + If True use tee. Default None (True) + + + Returns + ------- + res : str + Both stdout and stderr messages. + + Notes + ----- + On NT, DOS systems the returned status is correct for external commands. + Wild cards will not work for non-posix systems or when use_shell=0. + + """ + # 2019-01-30, 1.17 + warnings.warn('exec_command is deprecated since NumPy v1.17, use ' + 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) + log.debug('exec_command(%r,%s)' % (command, + ','.join(['%s=%r'%kv for kv in env.items()]))) + + if use_tee is None: + use_tee = os.name=='posix' + if use_shell is None: + use_shell = os.name=='posix' + execute_in = os.path.abspath(execute_in) + oldcwd = os.path.abspath(os.getcwd()) + + if __name__[-12:] == 'exec_command': + exec_dir = os.path.dirname(os.path.abspath(__file__)) + elif os.path.isfile('exec_command.py'): + exec_dir = os.path.abspath('.') + else: + exec_dir = os.path.abspath(sys.argv[0]) + if os.path.isfile(exec_dir): + exec_dir = os.path.dirname(exec_dir) + + if oldcwd!=execute_in: + os.chdir(execute_in) + log.debug('New cwd: %s' % execute_in) + else: + log.debug('Retaining cwd: %s' % oldcwd) + + oldenv = _preserve_environment( list(env.keys()) ) + _update_environment( **env ) + + try: + st = _exec_command(command, + use_shell=use_shell, + use_tee=use_tee, + **env) + finally: + if oldcwd!=execute_in: + os.chdir(oldcwd) + log.debug('Restored cwd to %s' % oldcwd) + _update_environment(**oldenv) + + return st + + +def _exec_command(command, use_shell=None, use_tee = None, **env): + """ + Internal workhorse for exec_command(). + """ + if use_shell is None: + use_shell = os.name=='posix' + if use_tee is None: + use_tee = os.name=='posix' + + if os.name == 'posix' and use_shell: + # On POSIX, subprocess always uses /bin/sh, override + sh = os.environ.get('SHELL', '/bin/sh') + if is_sequence(command): + command = [sh, '-c', ' '.join(command)] + else: + command = [sh, '-c', command] + use_shell = False + + elif os.name == 'nt' and is_sequence(command): + # On Windows, join the string for CreateProcess() ourselves as + # subprocess does it a bit differently + command = ' '.join(_quote_arg(arg) for arg in command) + + # Inherit environment by default + env = env or None + try: + # text is set to False so that communicate() + # will return bytes. We need to decode the output ourselves + # so that Python will not raise a UnicodeDecodeError when + # it encounters an invalid character; rather, we simply replace it + proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + except OSError: + # Return 127, as os.spawn*() and /bin/sh do + return 127, '' + + text, err = proc.communicate() + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + text = text.decode(mylocale, errors='replace') + text = text.replace('\r\n', '\n') + # Another historical oddity + if text[-1:] == '\n': + text = text[:-1] + + if use_tee and text: + print(text) + return proc.returncode, text + + +def _quote_arg(arg): + """ + Quote the argument for safe use in a shell command line. + """ + # If there is a quote in the string, assume relevants parts of the + # string are already quoted (e.g. '-I"C:\\Program Files\\..."') + if '"' not in arg and ' ' in arg: + return '"%s"' % arg + return arg + +############################################################ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/extension.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/extension.py new file mode 100644 index 00000000..3ede013e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/extension.py @@ -0,0 +1,107 @@ +"""distutils.extension + +Provides the Extension class, used to describe C/C++ extension +modules in setup scripts. + +Overridden to support f2py. + +""" +import re +from distutils.extension import Extension as old_Extension + + +cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match + + +class Extension(old_Extension): + """ + Parameters + ---------- + name : str + Extension name. + sources : list of str + List of source file locations relative to the top directory of + the package. + extra_compile_args : list of str + Extra command line arguments to pass to the compiler. + extra_f77_compile_args : list of str + Extra command line arguments to pass to the fortran77 compiler. + extra_f90_compile_args : list of str + Extra command line arguments to pass to the fortran90 compiler. + """ + def __init__( + self, name, sources, + include_dirs=None, + define_macros=None, + undef_macros=None, + library_dirs=None, + libraries=None, + runtime_library_dirs=None, + extra_objects=None, + extra_compile_args=None, + extra_link_args=None, + export_symbols=None, + swig_opts=None, + depends=None, + language=None, + f2py_options=None, + module_dirs=None, + extra_c_compile_args=None, + extra_cxx_compile_args=None, + extra_f77_compile_args=None, + extra_f90_compile_args=None,): + + old_Extension.__init__( + self, name, [], + include_dirs=include_dirs, + define_macros=define_macros, + undef_macros=undef_macros, + library_dirs=library_dirs, + libraries=libraries, + runtime_library_dirs=runtime_library_dirs, + extra_objects=extra_objects, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + export_symbols=export_symbols) + + # Avoid assert statements checking that sources contains strings: + self.sources = sources + + # Python 2.4 distutils new features + self.swig_opts = swig_opts or [] + # swig_opts is assumed to be a list. Here we handle the case where it + # is specified as a string instead. + if isinstance(self.swig_opts, str): + import warnings + msg = "swig_opts is specified as a string instead of a list" + warnings.warn(msg, SyntaxWarning, stacklevel=2) + self.swig_opts = self.swig_opts.split() + + # Python 2.3 distutils new features + self.depends = depends or [] + self.language = language + + # numpy_distutils features + self.f2py_options = f2py_options or [] + self.module_dirs = module_dirs or [] + self.extra_c_compile_args = extra_c_compile_args or [] + self.extra_cxx_compile_args = extra_cxx_compile_args or [] + self.extra_f77_compile_args = extra_f77_compile_args or [] + self.extra_f90_compile_args = extra_f90_compile_args or [] + + return + + def has_cxx_sources(self): + for source in self.sources: + if cxx_ext_re(str(source)): + return True + return False + + def has_f2py_sources(self): + for source in self.sources: + if fortran_pyf_ext_re(source): + return True + return False + +# class Extension diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py new file mode 100644 index 00000000..5160e2ab --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py @@ -0,0 +1,1035 @@ +"""numpy.distutils.fcompiler + +Contains FCompiler, an abstract base class that defines the interface +for the numpy.distutils Fortran compiler abstraction model. + +Terminology: + +To be consistent, where the term 'executable' is used, it means the single +file, like 'gcc', that is executed, and should be a string. In contrast, +'command' means the entire command line, like ['gcc', '-c', 'file.c'], and +should be a list. + +But note that FCompiler.executables is actually a dictionary of commands. + +""" +__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', + 'dummy_fortran_file'] + +import os +import sys +import re +from pathlib import Path + +from distutils.sysconfig import get_python_lib +from distutils.fancy_getopt import FancyGetopt +from distutils.errors import DistutilsModuleError, \ + DistutilsExecError, CompileError, LinkError, DistutilsPlatformError +from distutils.util import split_quoted, strtobool + +from numpy.distutils.ccompiler import CCompiler, gen_lib_options +from numpy.distutils import log +from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ + make_temp_file, get_shared_lib_extension +from numpy.distutils.exec_command import find_executable +from numpy.distutils import _shell_utils + +from .environment import EnvironmentConfig + +__metaclass__ = type + + +FORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] + + +class CompilerNotFound(Exception): + pass + +def flaglist(s): + if is_string(s): + return split_quoted(s) + else: + return s + +def str2bool(s): + if is_string(s): + return strtobool(s) + return bool(s) + +def is_sequence_of_strings(seq): + return is_sequence(seq) and all_strings(seq) + +class FCompiler(CCompiler): + """Abstract base class to define the interface that must be implemented + by real Fortran compiler classes. + + Methods that subclasses may redefine: + + update_executables(), find_executables(), get_version() + get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() + get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), + get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), + get_flags_arch_f90(), get_flags_debug_f90(), + get_flags_fix(), get_flags_linker_so() + + DON'T call these methods (except get_version) after + constructing a compiler instance or inside any other method. + All methods, except update_executables() and find_executables(), + may call the get_version() method. + + After constructing a compiler instance, always call customize(dist=None) + method that finalizes compiler construction and makes the following + attributes available: + compiler_f77 + compiler_f90 + compiler_fix + linker_so + archiver + ranlib + libraries + library_dirs + """ + + # These are the environment variables and distutils keys used. + # Each configuration description is + # (, , , , ) + # The hook names are handled by the self._environment_hook method. + # - names starting with 'self.' call methods in this class + # - names starting with 'exe.' return the key in the executables dict + # - names like 'flags.YYY' return self.get_flag_YYY() + # convert is either None or a function to convert a string to the + # appropriate type used. + + distutils_vars = EnvironmentConfig( + distutils_section='config_fc', + noopt = (None, None, 'noopt', str2bool, False), + noarch = (None, None, 'noarch', str2bool, False), + debug = (None, None, 'debug', str2bool, False), + verbose = (None, None, 'verbose', str2bool, False), + ) + + command_vars = EnvironmentConfig( + distutils_section='config_fc', + compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), + compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), + compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), + version_cmd = ('exe.version_cmd', None, None, None, False), + linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), + linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), + archiver = (None, 'AR', 'ar', None, False), + ranlib = (None, 'RANLIB', 'ranlib', None, False), + ) + + flag_vars = EnvironmentConfig( + distutils_section='config_fc', + f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), + f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), + free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), + fix = ('flags.fix', None, None, flaglist, False), + opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), + opt_f77 = ('flags.opt_f77', None, None, flaglist, False), + opt_f90 = ('flags.opt_f90', None, None, flaglist, False), + arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), + arch_f77 = ('flags.arch_f77', None, None, flaglist, False), + arch_f90 = ('flags.arch_f90', None, None, flaglist, False), + debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), + debug_f77 = ('flags.debug_f77', None, None, flaglist, False), + debug_f90 = ('flags.debug_f90', None, None, flaglist, False), + flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), + linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), + linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), + ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), + ) + + language_map = {'.f': 'f77', + '.for': 'f77', + '.F': 'f77', # XXX: needs preprocessor + '.ftn': 'f77', + '.f77': 'f77', + '.f90': 'f90', + '.F90': 'f90', # XXX: needs preprocessor + '.f95': 'f90', + } + language_order = ['f90', 'f77'] + + + # These will be set by the subclass + + compiler_type = None + compiler_aliases = () + version_pattern = None + + possible_executables = [] + executables = { + 'version_cmd': ["f77", "-v"], + 'compiler_f77': ["f77"], + 'compiler_f90': ["f90"], + 'compiler_fix': ["f90", "-fixed"], + 'linker_so': ["f90", "-shared"], + 'linker_exe': ["f90"], + 'archiver': ["ar", "-cr"], + 'ranlib': None, + } + + # If compiler does not support compiling Fortran 90 then it can + # suggest using another compiler. For example, gnu would suggest + # gnu95 compiler type when there are F90 sources. + suggested_f90_compiler = None + + compile_switch = "-c" + object_switch = "-o " # Ending space matters! It will be stripped + # but if it is missing then object_switch + # will be prefixed to object file name by + # string concatenation. + library_switch = "-o " # Ditto! + + # Switch to specify where module files are created and searched + # for USE statement. Normally it is a string and also here ending + # space matters. See above. + module_dir_switch = None + + # Switch to specify where module files are searched for USE statement. + module_include_switch = '-I' + + pic_flags = [] # Flags to create position-independent code + + src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] + obj_extension = ".o" + + shared_lib_extension = get_shared_lib_extension() + static_lib_extension = ".a" # or .lib + static_lib_format = "lib%s%s" # or %s%s + shared_lib_format = "%s%s" + exe_extension = "" + + _exe_cache = {} + + _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', + 'ranlib'] + + # This will be set by new_fcompiler when called in + # command/{build_ext.py, build_clib.py, config.py} files. + c_compiler = None + + # extra_{f77,f90}_compile_args are set by build_ext.build_extension method + extra_f77_compile_args = [] + extra_f90_compile_args = [] + + def __init__(self, *args, **kw): + CCompiler.__init__(self, *args, **kw) + self.distutils_vars = self.distutils_vars.clone(self._environment_hook) + self.command_vars = self.command_vars.clone(self._environment_hook) + self.flag_vars = self.flag_vars.clone(self._environment_hook) + self.executables = self.executables.copy() + for e in self._executable_keys: + if e not in self.executables: + self.executables[e] = None + + # Some methods depend on .customize() being called first, so + # this keeps track of whether that's happened yet. + self._is_customised = False + + def __copy__(self): + obj = self.__new__(self.__class__) + obj.__dict__.update(self.__dict__) + obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) + obj.command_vars = obj.command_vars.clone(obj._environment_hook) + obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) + obj.executables = obj.executables.copy() + return obj + + def copy(self): + return self.__copy__() + + # Use properties for the attributes used by CCompiler. Setting them + # as attributes from the self.executables dictionary is error-prone, + # so we get them from there each time. + def _command_property(key): + def fget(self): + assert self._is_customised + return self.executables[key] + return property(fget=fget) + version_cmd = _command_property('version_cmd') + compiler_f77 = _command_property('compiler_f77') + compiler_f90 = _command_property('compiler_f90') + compiler_fix = _command_property('compiler_fix') + linker_so = _command_property('linker_so') + linker_exe = _command_property('linker_exe') + archiver = _command_property('archiver') + ranlib = _command_property('ranlib') + + # Make our terminology consistent. + def set_executable(self, key, value): + self.set_command(key, value) + + def set_commands(self, **kw): + for k, v in kw.items(): + self.set_command(k, v) + + def set_command(self, key, value): + if not key in self._executable_keys: + raise ValueError( + "unknown executable '%s' for class %s" % + (key, self.__class__.__name__)) + if is_string(value): + value = split_quoted(value) + assert value is None or is_sequence_of_strings(value[1:]), (key, value) + self.executables[key] = value + + ###################################################################### + ## Methods that subclasses may redefine. But don't call these methods! + ## They are private to FCompiler class and may return unexpected + ## results if used elsewhere. So, you have been warned.. + + def find_executables(self): + """Go through the self.executables dictionary, and attempt to + find and assign appropriate executables. + + Executable names are looked for in the environment (environment + variables, the distutils.cfg, and command line), the 0th-element of + the command list, and the self.possible_executables list. + + Also, if the 0th element is "" or "", the Fortran 77 + or the Fortran 90 compiler executable is used, unless overridden + by an environment setting. + + Subclasses should call this if overridden. + """ + assert self._is_customised + exe_cache = self._exe_cache + def cached_find_executable(exe): + if exe in exe_cache: + return exe_cache[exe] + fc_exe = find_executable(exe) + exe_cache[exe] = exe_cache[fc_exe] = fc_exe + return fc_exe + def verify_command_form(name, value): + if value is not None and not is_sequence_of_strings(value): + raise ValueError( + "%s value %r is invalid in class %s" % + (name, value, self.__class__.__name__)) + def set_exe(exe_key, f77=None, f90=None): + cmd = self.executables.get(exe_key, None) + if not cmd: + return None + # Note that we get cmd[0] here if the environment doesn't + # have anything set + exe_from_environ = getattr(self.command_vars, exe_key) + if not exe_from_environ: + possibles = [f90, f77] + self.possible_executables + else: + possibles = [exe_from_environ] + self.possible_executables + + seen = set() + unique_possibles = [] + for e in possibles: + if e == '': + e = f77 + elif e == '': + e = f90 + if not e or e in seen: + continue + seen.add(e) + unique_possibles.append(e) + + for exe in unique_possibles: + fc_exe = cached_find_executable(exe) + if fc_exe: + cmd[0] = fc_exe + return fc_exe + self.set_command(exe_key, None) + return None + + ctype = self.compiler_type + f90 = set_exe('compiler_f90') + if not f90: + f77 = set_exe('compiler_f77') + if f77: + log.warn('%s: no Fortran 90 compiler found' % ctype) + else: + raise CompilerNotFound('%s: f90 nor f77' % ctype) + else: + f77 = set_exe('compiler_f77', f90=f90) + if not f77: + log.warn('%s: no Fortran 77 compiler found' % ctype) + set_exe('compiler_fix', f90=f90) + + set_exe('linker_so', f77=f77, f90=f90) + set_exe('linker_exe', f77=f77, f90=f90) + set_exe('version_cmd', f77=f77, f90=f90) + set_exe('archiver') + set_exe('ranlib') + + def update_executables(self): + """Called at the beginning of customisation. Subclasses should + override this if they need to set up the executables dictionary. + + Note that self.find_executables() is run afterwards, so the + self.executables dictionary values can contain or as + the command, which will be replaced by the found F77 or F90 + compiler. + """ + pass + + def get_flags(self): + """List of flags common to all compiler types.""" + return [] + self.pic_flags + + def _get_command_flags(self, key): + cmd = self.executables.get(key, None) + if cmd is None: + return [] + return cmd[1:] + + def get_flags_f77(self): + """List of Fortran 77 specific flags.""" + return self._get_command_flags('compiler_f77') + def get_flags_f90(self): + """List of Fortran 90 specific flags.""" + return self._get_command_flags('compiler_f90') + def get_flags_free(self): + """List of Fortran 90 free format specific flags.""" + return [] + def get_flags_fix(self): + """List of Fortran 90 fixed format specific flags.""" + return self._get_command_flags('compiler_fix') + def get_flags_linker_so(self): + """List of linker flags to build a shared library.""" + return self._get_command_flags('linker_so') + def get_flags_linker_exe(self): + """List of linker flags to build an executable.""" + return self._get_command_flags('linker_exe') + def get_flags_ar(self): + """List of archiver flags. """ + return self._get_command_flags('archiver') + def get_flags_opt(self): + """List of architecture independent compiler flags.""" + return [] + def get_flags_arch(self): + """List of architecture dependent compiler flags.""" + return [] + def get_flags_debug(self): + """List of compiler flags to compile with debugging information.""" + return [] + + get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt + get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch + get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug + + def get_libraries(self): + """List of compiler libraries.""" + return self.libraries[:] + def get_library_dirs(self): + """List of compiler library directories.""" + return self.library_dirs[:] + + def get_version(self, force=False, ok_status=[0]): + assert self._is_customised + version = CCompiler.get_version(self, force=force, ok_status=ok_status) + if version is None: + raise CompilerNotFound() + return version + + + ############################################################ + + ## Public methods: + + def customize(self, dist = None): + """Customize Fortran compiler. + + This method gets Fortran compiler specific information from + (i) class definition, (ii) environment, (iii) distutils config + files, and (iv) command line (later overrides earlier). + + This method should be always called after constructing a + compiler instance. But not in __init__ because Distribution + instance is needed for (iii) and (iv). + """ + log.info('customize %s' % (self.__class__.__name__)) + + self._is_customised = True + + self.distutils_vars.use_distribution(dist) + self.command_vars.use_distribution(dist) + self.flag_vars.use_distribution(dist) + + self.update_executables() + + # find_executables takes care of setting the compiler commands, + # version_cmd, linker_so, linker_exe, ar, and ranlib + self.find_executables() + + noopt = self.distutils_vars.get('noopt', False) + noarch = self.distutils_vars.get('noarch', noopt) + debug = self.distutils_vars.get('debug', False) + + f77 = self.command_vars.compiler_f77 + f90 = self.command_vars.compiler_f90 + + f77flags = [] + f90flags = [] + freeflags = [] + fixflags = [] + + if f77: + f77 = _shell_utils.NativeParser.split(f77) + f77flags = self.flag_vars.f77 + if f90: + f90 = _shell_utils.NativeParser.split(f90) + f90flags = self.flag_vars.f90 + freeflags = self.flag_vars.free + # XXX Assuming that free format is default for f90 compiler. + fix = self.command_vars.compiler_fix + # NOTE: this and similar examples are probably just + # excluding --coverage flag when F90 = gfortran --coverage + # instead of putting that flag somewhere more appropriate + # this and similar examples where a Fortran compiler + # environment variable has been customized by CI or a user + # should perhaps eventually be more thoroughly tested and more + # robustly handled + if fix: + fix = _shell_utils.NativeParser.split(fix) + fixflags = self.flag_vars.fix + f90flags + + oflags, aflags, dflags = [], [], [] + # examine get_flags__ for extra flags + # only add them if the method is different from get_flags_ + def get_flags(tag, flags): + # note that self.flag_vars. calls self.get_flags_() + flags.extend(getattr(self.flag_vars, tag)) + this_get = getattr(self, 'get_flags_' + tag) + for name, c, flagvar in [('f77', f77, f77flags), + ('f90', f90, f90flags), + ('f90', fix, fixflags)]: + t = '%s_%s' % (tag, name) + if c and this_get is not getattr(self, 'get_flags_' + t): + flagvar.extend(getattr(self.flag_vars, t)) + if not noopt: + get_flags('opt', oflags) + if not noarch: + get_flags('arch', aflags) + if debug: + get_flags('debug', dflags) + + fflags = self.flag_vars.flags + dflags + oflags + aflags + + if f77: + self.set_commands(compiler_f77=f77+f77flags+fflags) + if f90: + self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) + if fix: + self.set_commands(compiler_fix=fix+fixflags+fflags) + + + #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS + linker_so = self.linker_so + if linker_so: + linker_so_flags = self.flag_vars.linker_so + if sys.platform.startswith('aix'): + python_lib = get_python_lib(standard_lib=1) + ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') + python_exp = os.path.join(python_lib, 'config', 'python.exp') + linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] + if sys.platform.startswith('os400'): + from distutils.sysconfig import get_config_var + python_config = get_config_var('LIBPL') + ld_so_aix = os.path.join(python_config, 'ld_so_aix') + python_exp = os.path.join(python_config, 'python.exp') + linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] + self.set_commands(linker_so=linker_so+linker_so_flags) + + linker_exe = self.linker_exe + if linker_exe: + linker_exe_flags = self.flag_vars.linker_exe + self.set_commands(linker_exe=linker_exe+linker_exe_flags) + + ar = self.command_vars.archiver + if ar: + arflags = self.flag_vars.ar + self.set_commands(archiver=[ar]+arflags) + + self.set_library_dirs(self.get_library_dirs()) + self.set_libraries(self.get_libraries()) + + def dump_properties(self): + """Print out the attributes of a compiler instance.""" + props = [] + for key in list(self.executables.keys()) + \ + ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch']: + if hasattr(self, key): + v = getattr(self, key) + props.append((key, None, '= '+repr(v))) + props.sort() + + pretty_printer = FancyGetopt(props) + for l in pretty_printer.generate_help("%s instance properties:" \ + % (self.__class__.__name__)): + if l[:4]==' --': + l = ' ' + l[4:] + print(l) + + ################### + + def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile 'src' to product 'obj'.""" + src_flags = {} + if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ + and not has_f90_header(src): + flavor = ':f77' + compiler = self.compiler_f77 + src_flags = get_f77flags(src) + extra_compile_args = self.extra_f77_compile_args or [] + elif is_free_format(src): + flavor = ':f90' + compiler = self.compiler_f90 + if compiler is None: + raise DistutilsExecError('f90 not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + else: + flavor = ':fix' + compiler = self.compiler_fix + if compiler is None: + raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ + % (self.__class__.__name__, src)) + extra_compile_args = self.extra_f90_compile_args or [] + if self.object_switch[-1]==' ': + o_args = [self.object_switch.strip(), obj] + else: + o_args = [self.object_switch.strip()+obj] + + assert self.compile_switch.strip() + s_args = [self.compile_switch, src] + + if extra_compile_args: + log.info('extra %s options: %r' \ + % (flavor[1:], ' '.join(extra_compile_args))) + + extra_flags = src_flags.get(self.compiler_type, []) + if extra_flags: + log.info('using compile options from source: %r' \ + % ' '.join(extra_flags)) + + command = compiler + cc_args + extra_flags + s_args + o_args \ + + extra_postargs + extra_compile_args + + display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, + src) + try: + self.spawn(command, display=display) + except DistutilsExecError as e: + msg = str(e) + raise CompileError(msg) from None + + def module_options(self, module_dirs, module_build_dir): + options = [] + if self.module_dir_switch is not None: + if self.module_dir_switch[-1]==' ': + options.extend([self.module_dir_switch.strip(), module_build_dir]) + else: + options.append(self.module_dir_switch.strip()+module_build_dir) + else: + print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) + print('XXX: Fix module_dir_switch for ', self.__class__.__name__) + if self.module_include_switch is not None: + for d in [module_build_dir]+module_dirs: + options.append('%s%s' % (self.module_include_switch, d)) + else: + print('XXX: module_dirs=%r option ignored' % (module_dirs)) + print('XXX: Fix module_include_switch for ', self.__class__.__name__) + return options + + def library_option(self, lib): + return "-l" + lib + def library_dir_option(self, dir): + return "-L" + dir + + def link(self, target_desc, objects, + output_filename, output_dir=None, libraries=None, + library_dirs=None, runtime_library_dirs=None, + export_symbols=None, debug=0, extra_preargs=None, + extra_postargs=None, build_temp=None, target_lang=None): + objects, output_dir = self._fix_object_args(objects, output_dir) + libraries, library_dirs, runtime_library_dirs = \ + self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) + + lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, + libraries) + if is_string(output_dir): + output_filename = os.path.join(output_dir, output_filename) + elif output_dir is not None: + raise TypeError("'output_dir' must be a string or None") + + if self._need_link(objects, output_filename): + if self.library_switch[-1]==' ': + o_args = [self.library_switch.strip(), output_filename] + else: + o_args = [self.library_switch.strip()+output_filename] + + if is_string(self.objects): + ld_args = objects + [self.objects] + else: + ld_args = objects + self.objects + ld_args = ld_args + lib_opts + o_args + if debug: + ld_args[:0] = ['-g'] + if extra_preargs: + ld_args[:0] = extra_preargs + if extra_postargs: + ld_args.extend(extra_postargs) + self.mkpath(os.path.dirname(output_filename)) + if target_desc == CCompiler.EXECUTABLE: + linker = self.linker_exe[:] + else: + linker = self.linker_so[:] + command = linker + ld_args + try: + self.spawn(command) + except DistutilsExecError as e: + msg = str(e) + raise LinkError(msg) from None + else: + log.debug("skipping %s (up-to-date)", output_filename) + + def _environment_hook(self, name, hook_name): + if hook_name is None: + return None + if is_string(hook_name): + if hook_name.startswith('self.'): + hook_name = hook_name[5:] + hook = getattr(self, hook_name) + return hook() + elif hook_name.startswith('exe.'): + hook_name = hook_name[4:] + var = self.executables[hook_name] + if var: + return var[0] + else: + return None + elif hook_name.startswith('flags.'): + hook_name = hook_name[6:] + hook = getattr(self, 'get_flags_' + hook_name) + return hook() + else: + return hook_name() + + def can_ccompiler_link(self, ccompiler): + """ + Check if the given C compiler can link objects produced by + this compiler. + """ + return True + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + + Parameters + ---------- + objects : list + List of object files to include. + output_dir : str + Output directory to place generated object files. + extra_dll_dir : str + Output directory to place extra DLL files that need to be + included on Windows. + + Returns + ------- + converted_objects : list of str + List of converted object files. + Note that the number of output files is not necessarily + the same as inputs. + + """ + raise NotImplementedError() + + ## class FCompiler + +_default_compilers = ( + # sys.platform mappings + ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', + 'intelvem', 'intelem', 'flang')), + ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), + ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', + 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', + 'pathf95', 'nagfor', 'fujitsu')), + ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', + 'g95', 'pg')), + ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), + ('irix.*', ('mips', 'gnu', 'gnu95',)), + ('aix.*', ('ibm', 'gnu', 'gnu95',)), + # os.name mappings + ('posix', ('gnu', 'gnu95',)), + ('nt', ('gnu', 'gnu95',)), + ('mac', ('gnu95', 'gnu', 'pg')), + ) + +fcompiler_class = None +fcompiler_aliases = None + +def load_all_fcompiler_classes(): + """Cache all the FCompiler classes found in modules in the + numpy.distutils.fcompiler package. + """ + from glob import glob + global fcompiler_class, fcompiler_aliases + if fcompiler_class is not None: + return + pys = os.path.join(os.path.dirname(__file__), '*.py') + fcompiler_class = {} + fcompiler_aliases = {} + for fname in glob(pys): + module_name, ext = os.path.splitext(os.path.basename(fname)) + module_name = 'numpy.distutils.fcompiler.' + module_name + __import__ (module_name) + module = sys.modules[module_name] + if hasattr(module, 'compilers'): + for cname in module.compilers: + klass = getattr(module, cname) + desc = (klass.compiler_type, klass, klass.description) + fcompiler_class[klass.compiler_type] = desc + for alias in klass.compiler_aliases: + if alias in fcompiler_aliases: + raise ValueError("alias %r defined for both %s and %s" + % (alias, klass.__name__, + fcompiler_aliases[alias][1].__name__)) + fcompiler_aliases[alias] = desc + +def _find_existing_fcompiler(compiler_types, + osname=None, platform=None, + requiref90=False, + c_compiler=None): + from numpy.distutils.core import get_distribution + dist = get_distribution(always=True) + for compiler_type in compiler_types: + v = None + try: + c = new_fcompiler(plat=platform, compiler=compiler_type, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if requiref90 and c.compiler_f90 is None: + v = None + new_compiler = c.suggested_f90_compiler + if new_compiler: + log.warn('Trying %r compiler as suggested by %r ' + 'compiler for f90 support.' % (compiler_type, + new_compiler)) + c = new_fcompiler(plat=platform, compiler=new_compiler, + c_compiler=c_compiler) + c.customize(dist) + v = c.get_version() + if v is not None: + compiler_type = new_compiler + if requiref90 and c.compiler_f90 is None: + raise ValueError('%s does not support compiling f90 codes, ' + 'skipping.' % (c.__class__.__name__)) + except DistutilsModuleError: + log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) + except CompilerNotFound: + log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) + if v is not None: + return compiler_type + return None + +def available_fcompilers_for_platform(osname=None, platform=None): + if osname is None: + osname = os.name + if platform is None: + platform = sys.platform + matching_compiler_types = [] + for pattern, compiler_type in _default_compilers: + if re.match(pattern, platform) or re.match(pattern, osname): + for ct in compiler_type: + if ct not in matching_compiler_types: + matching_compiler_types.append(ct) + if not matching_compiler_types: + matching_compiler_types.append('gnu') + return matching_compiler_types + +def get_default_fcompiler(osname=None, platform=None, requiref90=False, + c_compiler=None): + """Determine the default Fortran compiler to use for the given + platform.""" + matching_compiler_types = available_fcompilers_for_platform(osname, + platform) + log.info("get_default_fcompiler: matching types: '%s'", + matching_compiler_types) + compiler_type = _find_existing_fcompiler(matching_compiler_types, + osname=osname, + platform=platform, + requiref90=requiref90, + c_compiler=c_compiler) + return compiler_type + +# Flag to avoid rechecking for Fortran compiler every time +failed_fcompilers = set() + +def new_fcompiler(plat=None, + compiler=None, + verbose=0, + dry_run=0, + force=0, + requiref90=False, + c_compiler = None): + """Generate an instance of some FCompiler subclass for the supplied + platform/compiler combination. + """ + global failed_fcompilers + fcompiler_key = (plat, compiler) + if fcompiler_key in failed_fcompilers: + return None + + load_all_fcompiler_classes() + if plat is None: + plat = os.name + if compiler is None: + compiler = get_default_fcompiler(plat, requiref90=requiref90, + c_compiler=c_compiler) + if compiler in fcompiler_class: + module_name, klass, long_description = fcompiler_class[compiler] + elif compiler in fcompiler_aliases: + module_name, klass, long_description = fcompiler_aliases[compiler] + else: + msg = "don't know how to compile Fortran code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler." % compiler + msg = msg + " Supported compilers are: %s)" \ + % (','.join(fcompiler_class.keys())) + log.warn(msg) + failed_fcompilers.add(fcompiler_key) + return None + + compiler = klass(verbose=verbose, dry_run=dry_run, force=force) + compiler.c_compiler = c_compiler + return compiler + +def show_fcompilers(dist=None): + """Print list of available compilers (used by the "--help-fcompiler" + option to "config_fc"). + """ + if dist is None: + from distutils.dist import Distribution + from numpy.distutils.command.config_compiler import config_fc + dist = Distribution() + dist.script_name = os.path.basename(sys.argv[0]) + dist.script_args = ['config_fc'] + sys.argv[1:] + try: + dist.script_args.remove('--help-fcompiler') + except ValueError: + pass + dist.cmdclass['config_fc'] = config_fc + dist.parse_config_files() + dist.parse_command_line() + compilers = [] + compilers_na = [] + compilers_ni = [] + if not fcompiler_class: + load_all_fcompiler_classes() + platform_compilers = available_fcompilers_for_platform() + for compiler in platform_compilers: + v = None + log.set_verbosity(-2) + try: + c = new_fcompiler(compiler=compiler, verbose=dist.verbose) + c.customize(dist) + v = c.get_version() + except (DistutilsModuleError, CompilerNotFound) as e: + log.debug("show_fcompilers: %s not found" % (compiler,)) + log.debug(repr(e)) + + if v is None: + compilers_na.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2])) + else: + c.dump_properties() + compilers.append(("fcompiler="+compiler, None, + fcompiler_class[compiler][2] + ' (%s)' % v)) + + compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) + compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) + for fc in compilers_ni] + + compilers.sort() + compilers_na.sort() + compilers_ni.sort() + pretty_printer = FancyGetopt(compilers) + pretty_printer.print_help("Fortran compilers found:") + pretty_printer = FancyGetopt(compilers_na) + pretty_printer.print_help("Compilers available for this " + "platform, but not found:") + if compilers_ni: + pretty_printer = FancyGetopt(compilers_ni) + pretty_printer.print_help("Compilers not available on this platform:") + print("For compiler details, run 'config_fc --verbose' setup command.") + + +def dummy_fortran_file(): + fo, name = make_temp_file(suffix='.f') + fo.write(" subroutine dummy()\n end\n") + fo.close() + return name[:-2] + + +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match + +def is_free_format(file): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = 0 + with open(file, encoding='latin1') as f: + line = f.readline() + n = 10000 # the number of non-comment lines to scan for hints + if _has_f_header(line) or _has_fix_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = 1 + while n>0 and line: + line = line.rstrip() + if line and line[0]!='!': + n -= 1 + if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': + result = 1 + break + line = f.readline() + return result + +def has_f90_header(src): + with open(src, encoding='latin1') as f: + line = f.readline() + return _has_f90_header(line) or _has_fix_header(line) + +_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P\w+)\s*\)\s*=\s*(?P.*)', re.I) +def get_f77flags(src): + """ + Search the first 20 lines of fortran 77 code for line pattern + `CF77FLAGS()=` + Return a dictionary {:}. + """ + flags = {} + with open(src, encoding='latin1') as f: + i = 0 + for line in f: + i += 1 + if i>20: break + m = _f77flags_re.match(line) + if not m: continue + fcname = m.group('fcname').strip() + fflags = m.group('fflags').strip() + flags[fcname] = split_quoted(fflags) + return flags + +# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags + +if __name__ == '__main__': + show_fcompilers() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py new file mode 100644 index 00000000..68f516b9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py @@ -0,0 +1,156 @@ + +# Absoft Corporation ceased operations on 12/31/2022. +# Thus, all links to are invalid. + +# Notes: +# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py +# generated extension modules (works for f2py v2.45.241_1936 and up) +import os + +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from numpy.distutils.misc_util import cyg2win32 + +compilers = ['AbsoftFCompiler'] + +class AbsoftFCompiler(FCompiler): + + compiler_type = 'absoft' + description = 'Absoft Corp Fortran Compiler' + #version_pattern = r'FORTRAN 77 Compiler (?P[^\s*,]*).*?Absoft Corp' + version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ + r' (?P[^\s*,]*)(.*?Absoft Corp|)' + + # on windows: f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 + + # samt5735(8)$ f90 -V -c dummy.f + # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 + # Note that fink installs g77 as f77, so need to use f90 for detection. + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : ["f77"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + if os.name=='nt': + library_switch = '/out:' #No space after /out:! + + module_dir_switch = None + module_include_switch = '-p' + + def update_executables(self): + f = cyg2win32(dummy_fortran_file()) + self.executables['version_cmd'] = ['', '-V', '-c', + f+'.f', '-o', f+'.o'] + + def get_flags_linker_so(self): + if os.name=='nt': + opt = ['/dll'] + # The "-K shared" switches are being left in for pre-9.0 versions + # of Absoft though I don't think versions earlier than 9 can + # actually be used to build shared libraries. In fact, version + # 8 of Absoft doesn't recognize "-K shared" and will fail. + elif self.get_version() >= '9.0': + opt = ['-shared'] + else: + opt = ["-K", "shared"] + return opt + + def library_dir_option(self, dir): + if os.name=='nt': + return ['-link', '/PATH:%s' % (dir)] + return "-L" + dir + + def library_option(self, lib): + if os.name=='nt': + return '%s.lib' % (lib) + return "-l" + lib + + def get_library_dirs(self): + opt = FCompiler.get_library_dirs(self) + d = os.environ.get('ABSOFT') + if d: + if self.get_version() >= '10.0': + # use shared libraries, the static libraries were not compiled -fPIC + prefix = 'sh' + else: + prefix = '' + if cpu.is_64bit(): + suffix = '64' + else: + suffix = '' + opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) + return opt + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + if self.get_version() >= '11.0': + opt.extend(['af90math', 'afio', 'af77math', 'amisc']) + elif self.get_version() >= '10.0': + opt.extend(['af90math', 'afio', 'af77math', 'U77']) + elif self.get_version() >= '8.0': + opt.extend(['f90math', 'fio', 'f77math', 'U77']) + else: + opt.extend(['fio', 'f90math', 'fmath', 'U77']) + if os.name =='nt': + opt.append('COMDLG32') + return opt + + def get_flags(self): + opt = FCompiler.get_flags(self) + if os.name != 'nt': + opt.extend(['-s']) + if self.get_version(): + if self.get_version()>='8.2': + opt.append('-fpic') + return opt + + def get_flags_f77(self): + opt = FCompiler.get_flags_f77(self) + opt.extend(['-N22', '-N90', '-N110']) + v = self.get_version() + if os.name == 'nt': + if v and v>='8.0': + opt.extend(['-f', '-N15']) + else: + opt.append('-f') + if v: + if v<='4.6': + opt.append('-B108') + else: + # Though -N15 is undocumented, it works with + # Absoft 8.0 on Linux + opt.append('-N15') + return opt + + def get_flags_f90(self): + opt = FCompiler.get_flags_f90(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + if self.get_version(): + if self.get_version()>'4.6': + opt.extend(["-YDEALLOC=ALL"]) + return opt + + def get_flags_fix(self): + opt = FCompiler.get_flags_fix(self) + opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", + "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) + opt.extend(["-f", "fixed"]) + return opt + + def get_flags_opt(self): + opt = ['-O'] + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='absoft').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py new file mode 100644 index 00000000..3eb7e9af --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py @@ -0,0 +1,71 @@ +import sys + +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['ArmFlangCompiler'] + +import functools + +class ArmFlangCompiler(FCompiler): + compiler_type = 'arm' + description = 'Arm Compiler' + version_pattern = r'\s*Arm.*version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['armflang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["armflang", "-fPIC"], + 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], + 'compiler_f90': ["armflang", "-fPIC"], + 'linker_so': ["armflang", "-fPIC", "-shared"], + 'archiver': ["ar", "-cr"], + 'ranlib': None + } + + pic_flags = ["-fPIC", "-DPIC"] + c_compiler = 'arm' + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + return '-Wl,-rpath=%s' % dir + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='armflang').get_version()) + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py new file mode 100644 index 00000000..01314c13 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py @@ -0,0 +1,120 @@ + +#http://www.compaq.com/fortran/docs/ +import os +import sys + +from numpy.distutils.fcompiler import FCompiler +from distutils.errors import DistutilsPlatformError + +compilers = ['CompaqFCompiler'] +if os.name != 'posix' or sys.platform[:6] == 'cygwin' : + # Otherwise we'd get a false positive on posix systems with + # case-insensitive filesystems (like darwin), because we'll pick + # up /bin/df + compilers.append('CompaqVisualFCompiler') + +class CompaqFCompiler(FCompiler): + + compiler_type = 'compaq' + description = 'Compaq Fortran Compiler' + version_pattern = r'Compaq Fortran (?P[^\s]*).*' + + if sys.platform[:5]=='linux': + fc_exe = 'fort' + else: + fc_exe = 'f90' + + executables = { + 'version_cmd' : ['', "-version"], + 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], + 'compiler_fix' : [fc_exe, "-fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = '-module ' # not tested + module_include_switch = '-I' + + def get_flags(self): + return ['-assume no2underscore', '-nomixed_str_len_arg'] + def get_flags_debug(self): + return ['-g', '-check bounds'] + def get_flags_opt(self): + return ['-O4', '-align dcommons', '-assume bigarrays', + '-assume nozsize', '-math_library fast'] + def get_flags_arch(self): + return ['-arch host', '-tune host'] + def get_flags_linker_so(self): + if sys.platform[:5]=='linux': + return ['-shared'] + return ['-shared', '-Wl,-expect_unresolved,*'] + +class CompaqVisualFCompiler(FCompiler): + + compiler_type = 'compaqv' + description = 'DIGITAL or Compaq Visual Fortran Compiler' + version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' + r' Version (?P[^\s]*).*') + + compile_switch = '/compile_only' + object_switch = '/object:' + library_switch = '/OUT:' #No space after /OUT:! + + static_lib_extension = ".lib" + static_lib_format = "%s%s" + module_dir_switch = '/module:' + module_include_switch = '/I' + + ar_exe = 'lib.exe' + fc_exe = 'DF' + + if sys.platform=='win32': + from numpy.distutils.msvccompiler import MSVCCompiler + + try: + m = MSVCCompiler() + m.initialize() + ar_exe = m.lib + except DistutilsPlatformError: + pass + except AttributeError as e: + if '_MSVCCompiler__root' in str(e): + print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) + else: + raise + except OSError as e: + if not "vcvarsall.bat" in str(e): + print("Unexpected OSError in", __file__) + raise + except ValueError as e: + if not "'path'" in str(e): + print("Unexpected ValueError in", __file__) + raise + + executables = { + 'version_cmd' : ['', "/what"], + 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], + 'compiler_fix' : [fc_exe, "/fixed"], + 'compiler_f90' : [fc_exe], + 'linker_so' : [''], + 'archiver' : [ar_exe, "/OUT:"], + 'ranlib' : None + } + + def get_flags(self): + return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', + '/names:lowercase', '/assume:underscore'] + def get_flags_opt(self): + return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] + def get_flags_arch(self): + return ['/threads'] + def get_flags_debug(self): + return ['/debug'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='compaq').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py new file mode 100644 index 00000000..ecd4d998 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py @@ -0,0 +1,88 @@ +import os +from distutils.dist import Distribution + +__metaclass__ = type + +class EnvironmentConfig: + def __init__(self, distutils_section='ALL', **kw): + self._distutils_section = distutils_section + self._conf_keys = kw + self._conf = None + self._hook_handler = None + + def dump_variable(self, name): + conf_desc = self._conf_keys[name] + hook, envvar, confvar, convert, append = conf_desc + if not convert: + convert = lambda x : x + print('%s.%s:' % (self._distutils_section, name)) + v = self._hook_handler(name, hook) + print(' hook : %s' % (convert(v),)) + if envvar: + v = os.environ.get(envvar, None) + print(' environ: %s' % (convert(v),)) + if confvar and self._conf: + v = self._conf.get(confvar, (None, None))[1] + print(' config : %s' % (convert(v),)) + + def dump_variables(self): + for name in self._conf_keys: + self.dump_variable(name) + + def __getattr__(self, name): + try: + conf_desc = self._conf_keys[name] + except KeyError: + raise AttributeError( + f"'EnvironmentConfig' object has no attribute '{name}'" + ) from None + + return self._get_var(name, conf_desc) + + def get(self, name, default=None): + try: + conf_desc = self._conf_keys[name] + except KeyError: + return default + var = self._get_var(name, conf_desc) + if var is None: + var = default + return var + + def _get_var(self, name, conf_desc): + hook, envvar, confvar, convert, append = conf_desc + if convert is None: + convert = lambda x: x + var = self._hook_handler(name, hook) + if envvar is not None: + envvar_contents = os.environ.get(envvar) + if envvar_contents is not None: + envvar_contents = convert(envvar_contents) + if var and append: + if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': + var.extend(envvar_contents) + else: + # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 + # to keep old (overwrite flags rather than append to + # them) behavior + var = envvar_contents + else: + var = envvar_contents + if confvar is not None and self._conf: + if confvar in self._conf: + source, confvar_contents = self._conf[confvar] + var = convert(confvar_contents) + return var + + + def clone(self, hook_handler): + ec = self.__class__(distutils_section=self._distutils_section, + **self._conf_keys) + ec._hook_handler = hook_handler + return ec + + def use_distribution(self, dist): + if isinstance(dist, Distribution): + self._conf = dist.get_option_dict(self._distutils_section) + else: + self._conf = dist diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py new file mode 100644 index 00000000..ddce6745 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py @@ -0,0 +1,46 @@ +""" +fujitsu + +Supports Fujitsu compiler function. +This compiler is developed by Fujitsu and is used in A64FX on Fugaku. +""" +from numpy.distutils.fcompiler import FCompiler + +compilers = ['FujitsuFCompiler'] + +class FujitsuFCompiler(FCompiler): + compiler_type = 'fujitsu' + description = 'Fujitsu Fortran Compiler' + + possible_executables = ['frt'] + version_pattern = r'frt \(FRT\) (?P[a-z\d.]+)' + # $ frt --version + # frt (FRT) x.x.x yyyymmdd + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["frt", "-Fixed"], + 'compiler_fix' : ["frt", "-Fixed"], + 'compiler_f90' : ["frt"], + 'linker_so' : ["frt", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-KPIC'] + module_dir_switch = '-M' + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + def runtime_library_dir_option(self, dir): + return f'-Wl,-rpath={dir}' + def get_libraries(self): + return ['fj90f', 'fj90i', 'fjsrcinfo'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('fujitsu').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py new file mode 100644 index 00000000..e109a972 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py @@ -0,0 +1,42 @@ +# http://g95.sourceforge.net/ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['G95FCompiler'] + +class G95FCompiler(FCompiler): + compiler_type = 'g95' + description = 'G95 Fortran Compiler' + +# version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95!) May 22 2006) + + version_pattern = r'G95 \((GCC (?P[\d.]+)|.*?) \(g95 (?P.*)!\) (?P.*)\).*' + # $ g95 --version + # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["g95", "-ffixed-form"], + 'compiler_fix' : ["g95", "-ffixed-form"], + 'compiler_f90' : ["g95"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fpic'] + module_dir_switch = '-fmod=' + module_include_switch = '-I' + + def get_flags(self): + return ['-fno-second-underscore'] + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler('g95').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py new file mode 100644 index 00000000..3472b5d4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py @@ -0,0 +1,555 @@ +import re +import os +import sys +import warnings +import platform +import tempfile +import hashlib +import base64 +import subprocess +from subprocess import Popen, PIPE, STDOUT +from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.fcompiler import FCompiler +from distutils.version import LooseVersion + +compilers = ['GnuFCompiler', 'Gnu95FCompiler'] + +TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") + +# XXX: handle cross compilation + + +def is_win64(): + return sys.platform == "win32" and platform.architecture()[0] == "64bit" + + +class GnuFCompiler(FCompiler): + compiler_type = 'gnu' + compiler_aliases = ('g77', ) + description = 'GNU Fortran 77 compiler' + + def gnu_version_match(self, version_string): + """Handle the different versions of GNU fortran compilers""" + # Strip warning(s) that may be emitted by gfortran + while version_string.startswith('gfortran: warning'): + version_string =\ + version_string[version_string.find('\n') + 1:].strip() + + # Gfortran versions from after 2010 will output a simple string + # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older + # gfortrans may still return long version strings (``-dumpversion`` was + # an alias for ``--version``) + if len(version_string) <= 20: + # Try to find a valid version string + m = re.search(r'([0-9.]+)', version_string) + if m: + # g77 provides a longer version string that starts with GNU + # Fortran + if version_string.startswith('GNU Fortran'): + return ('g77', m.group(1)) + + # gfortran only outputs a version string such as #.#.#, so check + # if the match is at the start of the string + elif m.start() == 0: + return ('gfortran', m.group(1)) + else: + # Output probably from --version, try harder: + m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) + if m: + return ('gfortran', m.group(1)) + m = re.search( + r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) + if m: + v = m.group(1) + if v.startswith('0') or v.startswith('2') or v.startswith('3'): + # the '0' is for early g77's + return ('g77', v) + else: + # at some point in the 4.x series, the ' 95' was dropped + # from the version string + return ('gfortran', v) + + # If still nothing, raise an error to make the problem easy to find. + err = 'A valid Fortran version was not found in this string:\n' + raise ValueError(err + version_string) + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'g77': + return None + return v[1] + + possible_executables = ['g77', 'f77'] + executables = { + 'version_cmd' : [None, "-dumpversion"], + 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], + 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes + 'compiler_fix' : None, + 'linker_so' : [None, "-g", "-Wall"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-g", "-Wall"] + } + module_dir_switch = None + module_include_switch = None + + # Cygwin: f771: warning: -fPIC ignored for target (all code is + # position independent) + if os.name != 'nt' and sys.platform != 'cygwin': + pic_flags = ['-fPIC'] + + # use -mno-cygwin for g77 when Python is not Cygwin-Python + if sys.platform == 'win32': + for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: + executables[key].append('-mno-cygwin') + + g2c = 'g2c' + suggested_f90_compiler = 'gnu95' + + def get_flags_linker_so(self): + opt = self.linker_so[1:] + if sys.platform == 'darwin': + target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let distutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from sysconfig and then + # fall back to setting it to 10.9 This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import sysconfig + target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') + if not target: + target = '10.9' + s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' + warnings.warn(s, stacklevel=2) + os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) + else: + opt.append("-shared") + if sys.platform.startswith('sunos'): + # SunOS often has dynamically loaded symbols defined in the + # static library libg2c.a The linker doesn't like this. To + # ignore the problem, use the -mimpure-text flag. It isn't + # the safest thing, but seems to work. 'man gcc' says: + # ".. Instead of using -mimpure-text, you should compile all + # source code with -fpic or -fPIC." + opt.append('-mimpure-text') + return opt + + def get_libgcc_dir(self): + try: + output = subprocess.check_output(self.compiler_f77 + + ['-print-libgcc-file-name']) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + return os.path.dirname(output) + return None + + def get_libgfortran_dir(self): + if sys.platform[:5] == 'linux': + libgfortran_name = 'libgfortran.so' + elif sys.platform == 'darwin': + libgfortran_name = 'libgfortran.dylib' + else: + libgfortran_name = None + + libgfortran_dir = None + if libgfortran_name: + find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] + try: + output = subprocess.check_output( + self.compiler_f77 + find_lib_arg) + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + libgfortran_dir = os.path.dirname(output) + return libgfortran_dir + + def get_library_dirs(self): + opt = [] + if sys.platform[:5] != 'linux': + d = self.get_libgcc_dir() + if d: + # if windows and not cygwin, libg2c lies in a different folder + if sys.platform == 'win32' and not d.startswith('/usr/lib'): + d = os.path.normpath(d) + path = os.path.join(d, "lib%s.a" % self.g2c) + if not os.path.exists(path): + root = os.path.join(d, *((os.pardir, ) * 4)) + d2 = os.path.abspath(os.path.join(root, 'lib')) + path = os.path.join(d2, "lib%s.a" % self.g2c) + if os.path.exists(path): + opt.append(d2) + opt.append(d) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = [] + d = self.get_libgcc_dir() + if d is not None: + g2c = self.g2c + '-pic' + f = self.static_lib_format % (g2c, self.static_lib_extension) + if not os.path.isfile(os.path.join(d, f)): + g2c = self.g2c + else: + g2c = self.g2c + + if g2c is not None: + opt.append(g2c) + c_compiler = self.c_compiler + if sys.platform == 'win32' and c_compiler and \ + c_compiler.compiler_type == 'msvc': + opt.append('gcc') + if sys.platform == 'darwin': + opt.append('cc_dynamic') + return opt + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + v = self.get_version() + if v and v <= '3.3.3': + # With this compiler version building Fortran BLAS/LAPACK + # with -O3 caused failures in lib.lapack heevr,syevr tests. + opt = ['-O2'] + else: + opt = ['-O3'] + opt.append('-funroll-loops') + return opt + + def _c_arch_flags(self): + """ Return detected arch flags from CFLAGS """ + import sysconfig + try: + cflags = sysconfig.get_config_vars()['CFLAGS'] + except KeyError: + return [] + arch_re = re.compile(r"-arch\s+(\w+)") + arch_flags = [] + for arch in arch_re.findall(cflags): + arch_flags += ['-arch', arch] + return arch_flags + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + if sys.platform == 'win32' or sys.platform == 'cygwin': + # Linux/Solaris/Unix support RPATH, Windows does not + raise NotImplementedError + + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + if sys.platform == 'darwin': + return f'-Wl,-rpath,{dir}' + elif sys.platform.startswith(('aix', 'os400')): + # AIX RPATH is called LIBPATH + return f'-Wl,-blibpath:{dir}' + else: + return f'-Wl,-rpath={dir}' + + +class Gnu95FCompiler(GnuFCompiler): + compiler_type = 'gnu95' + compiler_aliases = ('gfortran', ) + description = 'GNU Fortran 95 compiler' + + def version_match(self, version_string): + v = self.gnu_version_match(version_string) + if not v or v[0] != 'gfortran': + return None + v = v[1] + if LooseVersion(v) >= "4": + # gcc-4 series releases do not support -mno-cygwin option + pass + else: + # use -mno-cygwin flag for gfortran when Python is not + # Cygwin-Python + if sys.platform == 'win32': + for key in [ + 'version_cmd', 'compiler_f77', 'compiler_f90', + 'compiler_fix', 'linker_so', 'linker_exe' + ]: + self.executables[key].append('-mno-cygwin') + return v + + possible_executables = ['gfortran', 'f95'] + executables = { + 'version_cmd' : ["", "-dumpversion"], + 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", + "-fno-second-underscore"], + 'compiler_f90' : [None, "-Wall", "-g", + "-fno-second-underscore"], + 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", + "-fno-second-underscore"], + 'linker_so' : ["", "-Wall", "-g"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"], + 'linker_exe' : [None, "-Wall"] + } + + module_dir_switch = '-J' + module_include_switch = '-I' + + if sys.platform.startswith(('aix', 'os400')): + executables['linker_so'].append('-lpthread') + if platform.architecture()[0][:2] == '64': + for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: + executables[key].append('-maix64') + + g2c = 'gfortran' + + def _universal_flags(self, cmd): + """Return a list of -arch flags for every supported architecture.""" + if not sys.platform == 'darwin': + return [] + arch_flags = [] + # get arches the C compiler gets. + c_archs = self._c_arch_flags() + if "i386" in c_archs: + c_archs[c_archs.index("i386")] = "i686" + # check the arches the Fortran compiler supports, and compare with + # arch flags from C compiler + for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: + if _can_target(cmd, arch) and arch in c_archs: + arch_flags.extend(["-arch", arch]) + return arch_flags + + def get_flags(self): + flags = GnuFCompiler.get_flags(self) + arch_flags = self._universal_flags(self.compiler_f90) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_flags_linker_so(self): + flags = GnuFCompiler.get_flags_linker_so(self) + arch_flags = self._universal_flags(self.linker_so) + if arch_flags: + flags[:0] = arch_flags + return flags + + def get_library_dirs(self): + opt = GnuFCompiler.get_library_dirs(self) + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: + d = os.path.normpath(self.get_libgcc_dir()) + root = os.path.join(d, *((os.pardir, ) * 4)) + path = os.path.join(root, "lib") + mingwdir = os.path.normpath(path) + if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): + opt.append(mingwdir) + # For Macports / Linux, libgfortran and libgcc are not co-located + lib_gfortran_dir = self.get_libgfortran_dir() + if lib_gfortran_dir: + opt.append(lib_gfortran_dir) + return opt + + def get_libraries(self): + opt = GnuFCompiler.get_libraries(self) + if sys.platform == 'darwin': + opt.remove('cc_dynamic') + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i + 1, "mingwex") + opt.insert(i + 1, "mingw32") + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + return [] + else: + pass + return opt + + def get_target(self): + try: + p = subprocess.Popen( + self.compiler_f77 + ['-v'], + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = p.communicate() + output = (stdout or b"") + (stderr or b"") + except (OSError, subprocess.CalledProcessError): + pass + else: + output = filepath_from_subprocess_output(output) + m = TARGET_R.search(output) + if m: + return m.group(1) + return "" + + def _hash_files(self, filenames): + h = hashlib.sha1() + for fn in filenames: + with open(fn, 'rb') as f: + while True: + block = f.read(131072) + if not block: + break + h.update(block) + text = base64.b32encode(h.digest()) + text = text.decode('ascii') + return text.rstrip('=') + + def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, + chained_dlls, is_archive): + """Create a wrapper shared library for the given objects + + Return an MSVC-compatible lib + """ + + c_compiler = self.c_compiler + if c_compiler.compiler_type != "msvc": + raise ValueError("This method only supports MSVC") + + object_hash = self._hash_files(list(objects) + list(chained_dlls)) + + if is_win64(): + tag = 'win_amd64' + else: + tag = 'win32' + + basename = 'lib' + os.path.splitext( + os.path.basename(objects[0]))[0][:8] + root_name = basename + '.' + object_hash + '.gfortran-' + tag + dll_name = root_name + '.dll' + def_name = root_name + '.def' + lib_name = root_name + '.lib' + dll_path = os.path.join(extra_dll_dir, dll_name) + def_path = os.path.join(output_dir, def_name) + lib_path = os.path.join(output_dir, lib_name) + + if os.path.isfile(lib_path): + # Nothing to do + return lib_path, dll_path + + if is_archive: + objects = (["-Wl,--whole-archive"] + list(objects) + + ["-Wl,--no-whole-archive"]) + self.link_shared_object( + objects, + dll_name, + output_dir=extra_dll_dir, + extra_postargs=list(chained_dlls) + [ + '-Wl,--allow-multiple-definition', + '-Wl,--output-def,' + def_path, + '-Wl,--export-all-symbols', + '-Wl,--enable-auto-import', + '-static', + '-mlong-double-64', + ]) + + # No PowerPC! + if is_win64(): + specifier = '/MACHINE:X64' + else: + specifier = '/MACHINE:X86' + + # MSVC specific code + lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] + if not c_compiler.initialized: + c_compiler.initialize() + c_compiler.spawn([c_compiler.lib] + lib_args) + + return lib_path, dll_path + + def can_ccompiler_link(self, compiler): + # MSVC cannot link objects compiled by GNU fortran + return compiler.compiler_type not in ("msvc", ) + + def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): + """ + Convert a set of object files that are not compatible with the default + linker, to a file that is compatible. + """ + if self.c_compiler.compiler_type == "msvc": + # Compile a DLL and return the lib for the DLL as + # the object. Also keep track of previous DLLs that + # we have compiled so that we can link against them. + + # If there are .a archives, assume they are self-contained + # static libraries, and build separate DLLs for each + archives = [] + plain_objects = [] + for obj in objects: + if obj.lower().endswith('.a'): + archives.append(obj) + else: + plain_objects.append(obj) + + chained_libs = [] + chained_dlls = [] + for archive in archives[::-1]: + lib, dll = self._link_wrapper_lib( + [archive], + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=True) + chained_libs.insert(0, lib) + chained_dlls.insert(0, dll) + + if not plain_objects: + return chained_libs + + lib, dll = self._link_wrapper_lib( + plain_objects, + output_dir, + extra_dll_dir, + chained_dlls=chained_dlls, + is_archive=False) + return [lib] + chained_libs + else: + raise ValueError("Unsupported C compiler") + + +def _can_target(cmd, arch): + """Return true if the architecture supports the -arch flag""" + newcmd = cmd[:] + fid, filename = tempfile.mkstemp(suffix=".f") + os.close(fid) + try: + d = os.path.dirname(filename) + output = os.path.splitext(filename)[0] + ".o" + try: + newcmd.extend(["-arch", arch, "-c", filename]) + p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) + p.communicate() + return p.returncode == 0 + finally: + if os.path.exists(output): + os.remove(output) + finally: + os.remove(filename) + + +if __name__ == '__main__': + from distutils import log + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + + print(customized_fcompiler('gnu').get_version()) + try: + print(customized_fcompiler('g95').get_version()) + except Exception as e: + print(e) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py new file mode 100644 index 00000000..09e6483b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py @@ -0,0 +1,41 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['HPUXFCompiler'] + +class HPUXFCompiler(FCompiler): + + compiler_type = 'hpux' + description = 'HP Fortran 90 Compiler' + version_pattern = r'HP F90 (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["f90", "+version"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["ld", "-b"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['+Z'] + def get_flags(self): + return self.pic_flags + ['+ppu', '+DD64'] + def get_flags_opt(self): + return ['-O3'] + def get_libraries(self): + return ['m'] + def get_library_dirs(self): + opt = ['/usr/lib/hpux64'] + return opt + def get_version(self, force=0, ok_status=[256, 0, 1]): + # XXX status==256 may indicate 'unrecognized option' or + # 'no input file'. So, version_cmd needs more work. + return FCompiler.get_version(self, force, ok_status) + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(10) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py new file mode 100644 index 00000000..29927518 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py @@ -0,0 +1,97 @@ +import os +import re +import sys +import subprocess + +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.misc_util import make_temp_file +from distutils import log + +compilers = ['IBMFCompiler'] + +class IBMFCompiler(FCompiler): + compiler_type = 'ibm' + description = 'IBM XL Fortran Compiler' + version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P[^\s*]*)' + #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 + + executables = { + 'version_cmd' : ["", "-qversion"], + 'compiler_f77' : ["xlf"], + 'compiler_fix' : ["xlf90", "-qfixed"], + 'compiler_f90' : ["xlf90"], + 'linker_so' : ["xlf95"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_version(self,*args,**kwds): + version = FCompiler.get_version(self,*args,**kwds) + + if version is None and sys.platform.startswith('aix'): + # use lslpp to find out xlf version + lslpp = find_executable('lslpp') + xlf = find_executable('xlf') + if os.path.exists(xlf) and os.path.exists(lslpp): + try: + o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) + except (OSError, subprocess.CalledProcessError): + pass + else: + m = re.search(r'xlfcmp:(?P\d+([.]\d+)+)', o) + if m: version = m.group('version') + + xlf_dir = '/etc/opt/ibmcmp/xlf' + if version is None and os.path.isdir(xlf_dir): + # linux: + # If the output of xlf does not contain version info + # (that's the case with xlf 8.1, for instance) then + # let's try another method: + l = sorted(os.listdir(xlf_dir)) + l.reverse() + l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] + if l: + from distutils.version import LooseVersion + self.version = version = LooseVersion(l[0]) + return version + + def get_flags(self): + return ['-qextname'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + opt = [] + if sys.platform=='darwin': + opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') + else: + opt.append('-bshared') + version = self.get_version(ok_status=[0, 40]) + if version is not None: + if sys.platform.startswith('aix'): + xlf_cfg = '/etc/xlf.cfg' + else: + xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version + fo, new_cfg = make_temp_file(suffix='_xlf.cfg') + log.info('Creating '+new_cfg) + with open(xlf_cfg) as fi: + crt1_match = re.compile(r'\s*crt\s*=\s*(?P.*)/crt1.o').match + for line in fi: + m = crt1_match(line) + if m: + fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) + else: + fo.write(line) + fo.close() + opt.append('-F'+new_cfg) + return opt + + def get_flags_opt(self): + return ['-O3'] + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + log.set_verbosity(2) + print(customized_fcompiler(compiler='ibm').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py new file mode 100644 index 00000000..1d606590 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py @@ -0,0 +1,211 @@ +# http://developer.intel.com/software/products/compilers/flin/ +import sys + +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file + +compilers = ['IntelFCompiler', 'IntelVisualFCompiler', + 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', + 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] + + +def intel_version_match(type): + # Match against the important stuff in the version string + return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) + + +class BaseIntelFCompiler(FCompiler): + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '-FI', '-V', '-c', + f + '.f', '-o', f + '.o'] + + def runtime_library_dir_option(self, dir): + # TODO: could use -Xlinker here, if it's supported + assert "," not in dir + + return '-Wl,-rpath=%s' % dir + + +class IntelFCompiler(BaseIntelFCompiler): + + compiler_type = 'intel' + compiler_aliases = ('ifort',) + description = 'Intel Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + possible_executables = ['ifort', 'ifc'] + + executables = { + 'version_cmd' : None, # set by update_executables + 'compiler_f77' : [None, "-72", "-w90", "-w95"], + 'compiler_f90' : [None], + 'compiler_fix' : [None, "-FI"], + 'linker_so' : ["", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_free(self): + return ['-FR'] + + def get_flags(self): + return ['-fPIC'] + + def get_flags_opt(self): # Scipy test failures with -O2 + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + return ['-fp-model', 'strict', '-O1', + '-assume', 'minus0', '-{}'.format(mpopt)] + + def get_flags_arch(self): + return [] + + def get_flags_linker_so(self): + opt = FCompiler.get_flags_linker_so(self) + v = self.get_version() + if v and v >= '8.0': + opt.append('-nofor_main') + if sys.platform == 'darwin': + # Here, it's -dynamiclib + try: + idx = opt.index('-shared') + opt.remove('-shared') + except ValueError: + idx = 0 + opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] + return opt + + +class IntelItaniumFCompiler(IntelFCompiler): + compiler_type = 'intele' + compiler_aliases = () + description = 'Intel Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium|IA-64') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + +class IntelEM64TFCompiler(IntelFCompiler): + compiler_type = 'intelem' + compiler_aliases = () + description = 'Intel Fortran Compiler for 64-bit apps' + + version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') + + possible_executables = ['ifort', 'efort', 'efc'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI"], + 'compiler_fix' : [None, "-FI"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + +# Is there no difference in the version string between the above compilers +# and the Visual compilers? + + +class IntelVisualFCompiler(BaseIntelFCompiler): + compiler_type = 'intelv' + description = 'Intel Visual Fortran Compiler for 32-bit apps' + version_match = intel_version_match('32-bit|IA-32') + + def update_executables(self): + f = dummy_fortran_file() + self.executables['version_cmd'] = ['', '/FI', '/c', + f + '.f', '/o', f + '.o'] + + ar_exe = 'lib.exe' + possible_executables = ['ifort', 'ifl'] + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None], + 'compiler_fix' : [None], + 'compiler_f90' : [None], + 'linker_so' : [None], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + compile_switch = '/c ' + object_switch = '/Fo' # No space after /Fo! + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '/module:' # No space after /module: + module_include_switch = '/I' + + def get_flags(self): + opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', + '/assume:underscore', '/fpp'] + return opt + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['/4Yb', '/d2'] + + def get_flags_opt(self): + return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 + + def get_flags_arch(self): + return ["/arch:IA32", "/QaxSSE3"] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +class IntelItaniumVisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelev' + description = 'Intel Visual Fortran Compiler for Itanium apps' + + version_match = intel_version_match('Itanium') + + possible_executables = ['efl'] # XXX this is a wild guess + ar_exe = IntelVisualFCompiler.ar_exe + + executables = { + 'version_cmd' : None, + 'compiler_f77' : [None, "-FI", "-w90", "-w95"], + 'compiler_fix' : [None, "-FI", "-4L72", "-w"], + 'compiler_f90' : [None], + 'linker_so' : ['', "-shared"], + 'archiver' : [ar_exe, "/verbose", "/OUT:"], + 'ranlib' : None + } + + +class IntelEM64VisualFCompiler(IntelVisualFCompiler): + compiler_type = 'intelvem' + description = 'Intel Visual Fortran Compiler for 64-bit apps' + + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + + def get_flags_arch(self): + return [] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='intel').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py new file mode 100644 index 00000000..e9258382 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py @@ -0,0 +1,45 @@ +import os + +from numpy.distutils.fcompiler import FCompiler + +compilers = ['LaheyFCompiler'] + +class LaheyFCompiler(FCompiler): + + compiler_type = 'lahey' + description = 'Lahey/Fujitsu Fortran 95 Compiler' + version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P[^\s*]*)' + + executables = { + 'version_cmd' : ["", "--version"], + 'compiler_f77' : ["lf95", "--fix"], + 'compiler_fix' : ["lf95", "--fix"], + 'compiler_f90' : ["lf95"], + 'linker_so' : ["lf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def get_flags_opt(self): + return ['-O'] + def get_flags_debug(self): + return ['-g', '--chk', '--chkglobal'] + def get_library_dirs(self): + opt = [] + d = os.environ.get('LAHEY') + if d: + opt.append(os.path.join(d, 'lib')) + return opt + def get_libraries(self): + opt = [] + opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='lahey').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py new file mode 100644 index 00000000..a0973804 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py @@ -0,0 +1,54 @@ +from numpy.distutils.cpuinfo import cpu +from numpy.distutils.fcompiler import FCompiler + +compilers = ['MIPSFCompiler'] + +class MIPSFCompiler(FCompiler): + + compiler_type = 'mips' + description = 'MIPSpro Fortran Compiler' + version_pattern = r'MIPSpro Compilers: Version (?P[^\s*,]*)' + + executables = { + 'version_cmd' : ["", "-version"], + 'compiler_f77' : ["f77", "-f77"], + 'compiler_fix' : ["f90", "-fixedform"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["f90", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : None + } + module_dir_switch = None #XXX: fix me + module_include_switch = None #XXX: fix me + pic_flags = ['-KPIC'] + + def get_flags(self): + return self.pic_flags + ['-n32'] + def get_flags_opt(self): + return ['-O3'] + def get_flags_arch(self): + opt = [] + for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): + if getattr(cpu, 'is_IP%s'%a)(): + opt.append('-TARG:platform=IP%s' % a) + break + return opt + def get_flags_arch_f77(self): + r = None + if cpu.is_r10000(): r = 10000 + elif cpu.is_r12000(): r = 12000 + elif cpu.is_r8000(): r = 8000 + elif cpu.is_r5000(): r = 5000 + elif cpu.is_r4000(): r = 4000 + if r is not None: + return ['r%s' % (r)] + return [] + def get_flags_arch_f90(self): + r = self.get_flags_arch_f77() + if r: + r[0] = '-' + r[0] + return r + +if __name__ == '__main__': + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='mips').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py new file mode 100644 index 00000000..939201f4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py @@ -0,0 +1,87 @@ +import sys +import re +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NAGFCompiler', 'NAGFORCompiler'] + +class BaseNAGFCompiler(FCompiler): + version_pattern = r'NAG.* Release (?P[^(\s]*)' + + def version_match(self, version_string): + m = re.search(self.version_pattern, version_string) + if m: + return m.group('version') + else: + return None + + def get_flags_linker_so(self): + return ["-Wl,-shared"] + def get_flags_opt(self): + return ['-O4'] + def get_flags_arch(self): + return [] + +class NAGFCompiler(BaseNAGFCompiler): + + compiler_type = 'nag' + description = 'NAGWare Fortran 95 Compiler' + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f95", "-fixed"], + 'compiler_fix' : ["f95", "-fixed"], + 'compiler_f90' : ["f95"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_arch(self): + version = self.get_version() + if version and version < '5.1': + return ['-target=native'] + else: + return BaseNAGFCompiler.get_flags_arch(self) + def get_flags_debug(self): + return ['-g', '-gline', '-g90', '-nan', '-C'] + +class NAGFORCompiler(BaseNAGFCompiler): + + compiler_type = 'nagfor' + description = 'NAG Fortran Compiler' + + executables = { + 'version_cmd' : ["nagfor", "-V"], + 'compiler_f77' : ["nagfor", "-fixed"], + 'compiler_fix' : ["nagfor", "-fixed"], + 'compiler_f90' : ["nagfor"], + 'linker_so' : ["nagfor"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + + def get_flags_linker_so(self): + if sys.platform == 'darwin': + return ['-unsharedrts', + '-Wl,-bundle,-flat_namespace,-undefined,suppress'] + return BaseNAGFCompiler.get_flags_linker_so(self) + def get_flags_debug(self): + version = self.get_version() + if version and version > '6.1': + return ['-g', '-u', '-nan', '-C=all', '-thread_safe', + '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] + else: + return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + compiler = customized_fcompiler(compiler='nagfor') + print(compiler.get_version()) + print(compiler.get_flags_debug()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py new file mode 100644 index 00000000..ef411fff --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py @@ -0,0 +1,28 @@ +from numpy.distutils.fcompiler import FCompiler +from numpy.distutils import customized_fcompiler + +compilers = ['NoneFCompiler'] + +class NoneFCompiler(FCompiler): + + compiler_type = 'none' + description = 'Fake Fortran compiler' + + executables = {'compiler_f77': None, + 'compiler_f90': None, + 'compiler_fix': None, + 'linker_so': None, + 'linker_exe': None, + 'archiver': None, + 'ranlib': None, + 'version_cmd': None, + } + + def find_executables(self): + pass + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + print(customized_fcompiler(compiler='none').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py new file mode 100644 index 00000000..212f3480 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py @@ -0,0 +1,53 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['NVHPCFCompiler'] + +class NVHPCFCompiler(FCompiler): + """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler + + https://developer.nvidia.com/hpc-sdk + + Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers, + https://www.pgroup.com/index.htm. + See also `numpy.distutils.fcompiler.pg`. + """ + + compiler_type = 'nv' + description = 'NVIDIA HPC SDK' + version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P[\d.-]+).*' + + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["nvfortran"], + 'compiler_fix': ["nvfortran", "-Mfixed"], + 'compiler_f90': ["nvfortran"], + 'linker_so': [""], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='nv').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py new file mode 100644 index 00000000..0768cb12 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py @@ -0,0 +1,33 @@ +from numpy.distutils.fcompiler import FCompiler + +compilers = ['PathScaleFCompiler'] + +class PathScaleFCompiler(FCompiler): + + compiler_type = 'pathf95' + description = 'PathScale Fortran Compiler' + version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P[\d.]+)' + + executables = { + 'version_cmd' : ["pathf95", "-version"], + 'compiler_f77' : ["pathf95", "-fixedform"], + 'compiler_fix' : ["pathf95", "-fixedform"], + 'compiler_f90' : ["pathf95"], + 'linker_so' : ["pathf95", "-shared"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + pic_flags = ['-fPIC'] + module_dir_switch = '-module ' # Don't remove ending space! + module_include_switch = '-I' + + def get_flags_opt(self): + return ['-O3'] + def get_flags_debug(self): + return ['-g'] + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='pathf95').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py new file mode 100644 index 00000000..72442c4f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py @@ -0,0 +1,128 @@ +# http://www.pgroup.com +import sys + +from numpy.distutils.fcompiler import FCompiler +from sys import platform +from os.path import join, dirname, normpath + +compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] + + +class PGroupFCompiler(FCompiler): + + compiler_type = 'pg' + description = 'Portland Group Fortran Compiler' + version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P[\d.-]+).*' + + if platform == 'darwin': + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran", "-dynamiclib"], + 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], + 'compiler_f90': ["pgfortran", "-dynamiclib"], + 'linker_so': ["libtool"], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = [''] + else: + executables = { + 'version_cmd': ["", "-V"], + 'compiler_f77': ["pgfortran"], + 'compiler_fix': ["pgfortran", "-Mfixed"], + 'compiler_f90': ["pgfortran"], + 'linker_so': [""], + 'archiver': ["ar", "-cr"], + 'ranlib': ["ranlib"] + } + pic_flags = ['-fpic'] + + module_dir_switch = '-module ' + module_include_switch = '-I' + + def get_flags(self): + opt = ['-Minform=inform', '-Mnosecond_underscore'] + return self.pic_flags + opt + + def get_flags_opt(self): + return ['-fast'] + + def get_flags_debug(self): + return ['-g'] + + if platform == 'darwin': + def get_flags_linker_so(self): + return ["-dynamic", '-undefined', 'dynamic_lookup'] + + else: + def get_flags_linker_so(self): + return ["-shared", '-fpic'] + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + + +import functools + +class PGroupFlangCompiler(FCompiler): + compiler_type = 'flang' + description = 'Portland Group Fortran LLVM Compiler' + version_pattern = r'\s*(flang|clang) version (?P[\d.-]+).*' + + ar_exe = 'lib.exe' + possible_executables = ['flang'] + + executables = { + 'version_cmd': ["", "--version"], + 'compiler_f77': ["flang"], + 'compiler_fix': ["flang"], + 'compiler_f90': ["flang"], + 'linker_so': [None], + 'archiver': [ar_exe, "/verbose", "/OUT:"], + 'ranlib': None + } + + library_switch = '/OUT:' # No space after /OUT:! + module_dir_switch = '-module ' # Don't remove ending space! + + def get_libraries(self): + opt = FCompiler.get_libraries(self) + opt.extend(['flang', 'flangrti', 'ompstub']) + return opt + + @functools.lru_cache(maxsize=128) + def get_library_dirs(self): + """List of compiler library directories.""" + opt = FCompiler.get_library_dirs(self) + flang_dir = dirname(self.executables['compiler_f77'][0]) + opt.append(normpath(join(flang_dir, '..', 'lib'))) + + return opt + + def get_flags(self): + return [] + + def get_flags_free(self): + return [] + + def get_flags_debug(self): + return ['-g'] + + def get_flags_opt(self): + return ['-O3'] + + def get_flags_arch(self): + return [] + + def runtime_library_dir_option(self, dir): + raise NotImplementedError + + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + if 'flang' in sys.argv: + print(customized_fcompiler(compiler='flang').get_version()) + else: + print(customized_fcompiler(compiler='pg').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py new file mode 100644 index 00000000..d039f0b2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py @@ -0,0 +1,51 @@ +from numpy.distutils.ccompiler import simple_version_match +from numpy.distutils.fcompiler import FCompiler + +compilers = ['SunFCompiler'] + +class SunFCompiler(FCompiler): + + compiler_type = 'sun' + description = 'Sun or Forte Fortran 95 Compiler' + # ex: + # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 + version_match = simple_version_match( + start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') + + executables = { + 'version_cmd' : ["", "-V"], + 'compiler_f77' : ["f90"], + 'compiler_fix' : ["f90", "-fixed"], + 'compiler_f90' : ["f90"], + 'linker_so' : ["", "-Bdynamic", "-G"], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = '-moddir=' + module_include_switch = '-M' + pic_flags = ['-xcode=pic32'] + + def get_flags_f77(self): + ret = ["-ftrap=%none"] + if (self.get_version() or '') >= '7': + ret.append("-f77") + else: + ret.append("-fixed") + return ret + def get_opt(self): + return ['-fast', '-dalign'] + def get_arch(self): + return ['-xtarget=generic'] + def get_libraries(self): + opt = [] + opt.extend(['fsu', 'sunmath', 'mvec']) + return opt + + def runtime_library_dir_option(self, dir): + return '-R%s' % dir + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='sun').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py new file mode 100644 index 00000000..92a1647b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py @@ -0,0 +1,52 @@ +import os + +from numpy.distutils.fcompiler.gnu import GnuFCompiler + +compilers = ['VastFCompiler'] + +class VastFCompiler(GnuFCompiler): + compiler_type = 'vast' + compiler_aliases = () + description = 'Pacific-Sierra Research Fortran 90 Compiler' + version_pattern = (r'\s*Pacific-Sierra Research vf90 ' + r'(Personal|Professional)\s+(?P[^\s]*)') + + # VAST f90 does not support -o with -c. So, object files are created + # to the current directory and then moved to build directory + object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' + + executables = { + 'version_cmd' : ["vf90", "-v"], + 'compiler_f77' : ["g77"], + 'compiler_fix' : ["f90", "-Wv,-ya"], + 'compiler_f90' : ["f90"], + 'linker_so' : [""], + 'archiver' : ["ar", "-cr"], + 'ranlib' : ["ranlib"] + } + module_dir_switch = None #XXX Fix me + module_include_switch = None #XXX Fix me + + def find_executables(self): + pass + + def get_version_cmd(self): + f90 = self.compiler_f90[0] + d, b = os.path.split(f90) + vf90 = os.path.join(d, 'v'+b) + return vf90 + + def get_flags_arch(self): + vast_version = self.get_version() + gnu = GnuFCompiler() + gnu.customize(None) + self.version = gnu.get_version() + opt = GnuFCompiler.get_flags_arch(self) + self.version = vast_version + return opt + +if __name__ == '__main__': + from distutils import log + log.set_verbosity(2) + from numpy.distutils import customized_fcompiler + print(customized_fcompiler(compiler='vast').get_version()) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/from_template.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/from_template.py new file mode 100644 index 00000000..90d1f4c3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/from_template.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 +""" + +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + + +""" +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <%s=%s>)" + " for <%s=%s>. Ignoring." % + (base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + writestr = process_str(allstr) + outfile.write(writestr) + + +if __name__ == "__main__": + main() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fujitsuccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fujitsuccompiler.py new file mode 100644 index 00000000..c25900b3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/fujitsuccompiler.py @@ -0,0 +1,28 @@ +from distutils.unixccompiler import UnixCCompiler + +class FujitsuCCompiler(UnixCCompiler): + + """ + Fujitsu compiler. + """ + + compiler_type = 'fujitsu' + cc_exe = 'fcc' + cxx_exe = 'FCC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables( + compiler=cc_compiler + + ' -O3 -Nclang -fPIC', + compiler_so=cc_compiler + + ' -O3 -Nclang -fPIC', + compiler_cxx=cxx_compiler + + ' -O3 -Nclang -fPIC', + linker_exe=cc_compiler + + ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', + linker_so=cc_compiler + + ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py new file mode 100644 index 00000000..0fa1c11d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py @@ -0,0 +1,111 @@ +import platform + +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.ccompiler import simple_version_match +if platform.system() == 'Windows': + from numpy.distutils.msvc9compiler import MSVCCompiler + + +class IntelCCompiler(UnixCCompiler): + """A modified Intel compiler compatible with a GCC-built Python.""" + compiler_type = 'intel' + cc_exe = 'icc' + cc_args = 'fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +class IntelItaniumCCompiler(IntelCCompiler): + compiler_type = 'intele' + + # On Itanium, the Intel Compiler used to be called ecc, let's search for + # it (now it's also icc, so ecc is last in the search). + for cc_exe in map(find_executable, ['icc', 'ecc']): + if cc_exe: + break + + +class IntelEM64TCCompiler(UnixCCompiler): + """ + A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. + """ + compiler_type = 'intelem' + cc_exe = 'icc -m64' + cc_args = '-fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +if platform.system() == 'Windows': + class IntelCCompilerW(MSVCCompiler): + """ + A modified Intel compiler compatible with an MSVC-built Python. + """ + compiler_type = 'intelw' + compiler_cxx = 'icl' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?32,') + self.__version = version_match + + def initialize(self, plat_name=None): + MSVCCompiler.initialize(self, plat_name) + self.cc = self.find_exe('icl.exe') + self.lib = self.find_exe('xilib') + self.linker = self.find_exe('xilink') + self.compile_options = ['/nologo', '/O3', '/MD', '/W3', + '/Qstd=c99'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', + '/Qstd=c99', '/Z7', '/D_DEBUG'] + + class IntelEM64TCCompilerW(IntelCCompilerW): + """ + A modified Intel x86_64 compiler compatible with + a 64bit MSVC-built Python. + """ + compiler_type = 'intelemw' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + self.__version = version_match diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/lib2def.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/lib2def.py new file mode 100644 index 00000000..851682c6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/lib2def.py @@ -0,0 +1,116 @@ +import re +import sys +import subprocess + +__doc__ = """This module generates a DEF file from the symbols in +an MSVC-compiled DLL import library. It correctly discriminates between +data and functions. The data is collected from the output of the program +nm(1). + +Usage: + python lib2def.py [libname.lib] [output.def] +or + python lib2def.py [libname.lib] > output.def + +libname.lib defaults to python.lib and output.def defaults to stdout + +Author: Robert Kern +Last Update: April 30, 1999 +""" + +__version__ = '0.1a' + +py_ver = "%d%d" % tuple(sys.version_info[:2]) + +DEFAULT_NM = ['nm', '-Cs'] + +DEF_HEADER = """LIBRARY python%s.dll +;CODE PRELOAD MOVEABLE DISCARDABLE +;DATA PRELOAD SINGLE + +EXPORTS +""" % py_ver +# the header of the DEF file + +FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) +DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) + +def parse_cmd(): + """Parses the command-line arguments. + +libfile, deffile = parse_cmd()""" + if len(sys.argv) == 3: + if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': + libfile, deffile = sys.argv[1:] + elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': + deffile, libfile = sys.argv[1:] + else: + print("I'm assuming that your first argument is the library") + print("and the second is the DEF file.") + elif len(sys.argv) == 2: + if sys.argv[1][-4:] == '.def': + deffile = sys.argv[1] + libfile = 'python%s.lib' % py_ver + elif sys.argv[1][-4:] == '.lib': + deffile = None + libfile = sys.argv[1] + else: + libfile = 'python%s.lib' % py_ver + deffile = None + return libfile, deffile + +def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): + """Returns the output of nm_cmd via a pipe. + +nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" + p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True) + nm_output, nm_err = p.communicate() + if p.returncode != 0: + raise RuntimeError('failed to run "%s": "%s"' % ( + ' '.join(nm_cmd), nm_err)) + return nm_output + +def parse_nm(nm_output): + """Returns a tuple of lists: dlist for the list of data +symbols and flist for the list of function symbols. + +dlist, flist = parse_nm(nm_output)""" + data = DATA_RE.findall(nm_output) + func = FUNC_RE.findall(nm_output) + + flist = [] + for sym in data: + if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): + flist.append(sym) + + dlist = [] + for sym in data: + if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): + dlist.append(sym) + + dlist.sort() + flist.sort() + return dlist, flist + +def output_def(dlist, flist, header, file = sys.stdout): + """Outputs the final DEF file to a file defaulting to stdout. + +output_def(dlist, flist, header, file = sys.stdout)""" + for data_sym in dlist: + header = header + '\t%s DATA\n' % data_sym + header = header + '\n' # blank line + for func_sym in flist: + header = header + '\t%s\n' % func_sym + file.write(header) + +if __name__ == '__main__': + libfile, deffile = parse_cmd() + if deffile is None: + deffile = sys.stdout + else: + deffile = open(deffile, 'w') + nm_cmd = DEFAULT_NM + [str(libfile)] + nm_output = getnm(nm_cmd, shell=False) + dlist, flist = parse_nm(nm_output) + output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/line_endings.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/line_endings.py new file mode 100644 index 00000000..686e5ebd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/line_endings.py @@ -0,0 +1,77 @@ +""" Functions for converting from DOS to UNIX line endings + +""" +import os +import re +import sys + + +def dos2unix(file): + "Replace CRLF with LF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + + newdata = re.sub("\r\n", "\n", data) + if newdata != data: + print('dos2unix:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def dos2unix_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + file = dos2unix(full_path) + if file is not None: + modified_files.append(file) + +def dos2unix_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, dos2unix_one_dir, modified_files) + return modified_files +#---------------------------------- + +def unix2dos(file): + "Replace LF with CRLF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + newdata = re.sub("\r\n", "\n", data) + newdata = re.sub("\n", "\r\n", newdata) + if newdata != data: + print('unix2dos:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def unix2dos_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + unix2dos(full_path) + if file is not None: + modified_files.append(file) + +def unix2dos_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, unix2dos_one_dir, modified_files) + return modified_files + +if __name__ == "__main__": + dos2unix_dir(sys.argv[1]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/log.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/log.py new file mode 100644 index 00000000..3347f56d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/log.py @@ -0,0 +1,111 @@ +# Colored log +import sys +from distutils.log import * # noqa: F403 +from distutils.log import Log as old_Log +from distutils.log import _global_log + +from numpy.distutils.misc_util import (red_text, default_text, cyan_text, + green_text, is_sequence, is_string) + + +def _fix_args(args,flag=1): + if is_string(args): + return args.replace('%', '%%') + if flag and is_sequence(args): + return tuple([_fix_args(a, flag=0) for a in args]) + return args + + +class Log(old_Log): + def _log(self, level, msg, args): + if level >= self.threshold: + if args: + msg = msg % _fix_args(args) + if 0: + if msg.startswith('copying ') and msg.find(' -> ') != -1: + return + if msg.startswith('byte-compiling '): + return + print(_global_color_map[level](msg)) + sys.stdout.flush() + + def good(self, msg, *args): + """ + If we log WARN messages, log this message as a 'nice' anti-warn + message. + + """ + if WARN >= self.threshold: + if args: + print(green_text(msg % _fix_args(args))) + else: + print(green_text(msg)) + sys.stdout.flush() + + +_global_log.__class__ = Log + +good = _global_log.good + +def set_threshold(level, force=False): + prev_level = _global_log.threshold + if prev_level > DEBUG or force: + # If we're running at DEBUG, don't change the threshold, as there's + # likely a good reason why we're running at this level. + _global_log.threshold = level + if level <= DEBUG: + info('set_threshold: setting threshold to DEBUG level,' + ' it can be changed only with force argument') + else: + info('set_threshold: not changing threshold from DEBUG level' + ' %s to %s' % (prev_level, level)) + return prev_level + +def get_threshold(): + return _global_log.threshold + +def set_verbosity(v, force=False): + prev_level = _global_log.threshold + if v < 0: + set_threshold(ERROR, force) + elif v == 0: + set_threshold(WARN, force) + elif v == 1: + set_threshold(INFO, force) + elif v >= 2: + set_threshold(DEBUG, force) + return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) + + +_global_color_map = { + DEBUG:cyan_text, + INFO:default_text, + WARN:red_text, + ERROR:red_text, + FATAL:red_text +} + +# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. +set_verbosity(0, force=True) + + +_error = error +_warn = warn +_info = info +_debug = debug + + +def error(msg, *a, **kw): + _error(f"ERROR: {msg}", *a, **kw) + + +def warn(msg, *a, **kw): + _warn(f"WARN: {msg}", *a, **kw) + + +def info(msg, *a, **kw): + _info(f"INFO: {msg}", *a, **kw) + + +def debug(msg, *a, **kw): + _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c new file mode 100644 index 00000000..485a675d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c @@ -0,0 +1,6 @@ +int _get_output_format(void) +{ + return 0; +} + +int _imp____lc_codepage = 0; diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py new file mode 100644 index 00000000..4763f41a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py @@ -0,0 +1,591 @@ +""" +Support code for building Python extensions on Windows. + + # NT stuff + # 1. Make sure libpython.a exists for gcc. If not, build it. + # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) + # 3. Force windows to use g77 + +""" +import os +import sys +import subprocess +import re +import textwrap + +# Overwrite certain distutils.ccompiler functions: +import numpy.distutils.ccompiler # noqa: F401 +from numpy.distutils import log +# NT stuff +# 1. Make sure libpython.a exists for gcc. If not, build it. +# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) +# --> this is done in numpy/distutils/ccompiler.py +# 3. Force windows to use g77 + +import distutils.cygwinccompiler +from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version +from distutils.errors import UnknownFileError +from numpy.distutils.misc_util import (msvc_runtime_library, + msvc_runtime_version, + msvc_runtime_major, + get_build_architecture) + +def get_msvcr_replacement(): + """Replacement for outdated version of get_msvcr from cygwinccompiler""" + msvcr = msvc_runtime_library() + return [] if msvcr is None else [msvcr] + + +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + +# the same as cygwin plus some additional parameters +class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): + """ A modified MingW32 compiler compatible with an MSVC built Python. + + """ + + compiler_type = 'mingw32' + + def __init__ (self, + verbose=0, + dry_run=0, + force=0): + + distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, + dry_run, force) + + # **changes: eric jones 4/11/01 + # 1. Check for import library on Windows. Build if it doesn't exist. + + build_import_library() + + # Check for custom msvc runtime library on Windows. Build if it doesn't exist. + msvcr_success = build_msvcr_library() + msvcr_dbg_success = build_msvcr_library(debug=True) + if msvcr_success or msvcr_dbg_success: + # add preprocessor statement for using customized msvcr lib + self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') + + # Define the MSVC version as hint for MinGW + msvcr_version = msvc_runtime_version() + if msvcr_version: + self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) + + # MS_WIN64 should be defined when building for amd64 on windows, + # but python headers define it only for MS compilers, which has all + # kind of bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' + '-Wstrict-prototypes', + linker_exe='gcc -g', + linker_so='gcc -g -shared') + else: + self.set_executables( + compiler='gcc -O2 -Wall', + compiler_so='gcc -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ ', + linker_so='g++ -shared') + # added for python2.3 support + # we can't pass it through set_executables because pre 2.2 would fail + self.compiler_cxx = ['g++'] + + # Maybe we should also append -mthreads, but then the finished dlls + # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support + # thread-safe exception handling on `Mingw32') + + # no additional libraries needed + #self.dll_libraries=[] + return + + # __init__ () + + def link(self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + export_symbols = None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + # Include the appropriate MSVC runtime library if Python was built + # with MSVC >= 7.0 (MinGW standard is msvcrt) + runtime_library = msvc_runtime_library() + if runtime_library: + if not libraries: + libraries = [] + libraries.append(runtime_library) + args = (self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + None, #export_symbols, we do this in our def-file + debug, + extra_preargs, + extra_postargs, + build_temp, + target_lang) + func = UnixCCompiler.link + func(*args[:func.__code__.co_argcount]) + return + + def object_filenames (self, + source_filenames, + strip_dir=0, + output_dir=''): + if output_dir is None: output_dir = '' + obj_names = [] + for src_name in source_filenames: + # use normcase to make sure '.rc' is really '.rc' and not '.RC' + (base, ext) = os.path.splitext (os.path.normcase(src_name)) + + # added these lines to strip off windows drive letters + # without it, .o files are placed next to .c files + # instead of the build directory + drv, base = os.path.splitdrive(base) + if drv: + base = base[1:] + + if ext not in (self.src_extensions + ['.rc', '.res']): + raise UnknownFileError( + "unknown file type '%s' (from '%s')" % \ + (ext, src_name)) + if strip_dir: + base = os.path.basename (base) + if ext == '.res' or ext == '.rc': + # these need to be compiled to object files + obj_names.append (os.path.join (output_dir, + base + ext + self.obj_extension)) + else: + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) + return obj_names + + # object_filenames () + + +def find_python_dll(): + # We can't do much here: + # - find it in the virtualenv (sys.prefix) + # - find it in python main dir (sys.base_prefix, if in a virtualenv) + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + stems = [sys.prefix] + if sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + + sub_dirs = ['', 'lib', 'bin'] + # generate possible combinations of directory trees and sub-directories + lib_dirs = [] + for stem in stems: + for folder in sub_dirs: + lib_dirs.append(os.path.join(stem, folder)) + + # add system directory as well + if 'SYSTEMROOT' in os.environ: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) + + # search in the file system for possible candidates + major_version, minor_version = tuple(sys.version_info[:2]) + implementation = sys.implementation.name + if implementation == 'cpython': + dllname = f'python{major_version}{minor_version}.dll' + elif implementation == 'pypy': + dllname = f'libpypy{major_version}.{minor_version}-c.dll' + else: + dllname = f'Unknown platform {implementation}' + print("Looking for %s" % dllname) + for folder in lib_dirs: + dll = os.path.join(folder, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.check_output(["objdump.exe", "-p", dll]) + return st.split(b'\n') + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i].decode()): + break + else: + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j].decode()) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + with open(dfile, 'w') as d: + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + +def find_dll(dll_name): + + arch = {'AMD64' : 'amd64', + 'Intel' : 'x86'}[get_build_architecture()] + + def _find_dll_in_winsxs(dll_name): + # Walk through the WinSxS directory to find the dll. + winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), + 'winsxs') + if not os.path.exists(winsxs_path): + return None + for root, dirs, files in os.walk(winsxs_path): + if dll_name in files and arch in root: + return os.path.join(root, dll_name) + return None + + def _find_dll_in_path(dll_name): + # First, look in the Python directory, then scan PATH for + # the given dll name. + for path in [sys.prefix] + os.environ['PATH'].split(';'): + filepath = os.path.join(path, dll_name) + if os.path.exists(filepath): + return os.path.abspath(filepath) + + return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) + +def build_msvcr_library(debug=False): + if os.name != 'nt': + return False + + # If the version number is None, then we couldn't find the MSVC runtime at + # all, because we are running on a Python distribution which is customed + # compiled; trust that the compiler is the same as the one available to us + # now, and that it is capable of linking with the correct runtime without + # any extra options. + msvcr_ver = msvc_runtime_major() + if msvcr_ver is None: + log.debug('Skip building import library: ' + 'Runtime is not compiled with MSVC') + return False + + # Skip using a custom library for versions < MSVC 8.0 + if msvcr_ver < 80: + log.debug('Skip building msvcr library:' + ' custom functionality not present') + return False + + msvcr_name = msvc_runtime_library() + if debug: + msvcr_name += 'd' + + # Skip if custom library already exists + out_name = "lib%s.a" % msvcr_name + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building msvcr library: "%s" exists' % + (out_file,)) + return True + + # Find the msvcr dll + msvcr_dll_name = msvcr_name + '.dll' + dll_file = find_dll(msvcr_dll_name) + if not dll_file: + log.warn('Cannot build msvcr library: "%s" not found' % + msvcr_dll_name) + return False + + def_name = "lib%s.def" % msvcr_name + def_file = os.path.join(sys.prefix, 'libs', def_name) + + log.info('Building msvcr library: "%s" (from %s)' \ + % (out_file, dll_file)) + + # Generate a symbol definition file from the msvcr dll + generate_def(dll_file, def_file) + + # Create a custom mingw library for the given symbol definitions + cmd = ['dlltool', '-d', def_file, '-l', out_file] + retcode = subprocess.call(cmd) + + # Clean up symbol definitions + os.remove(def_file) + + return (not retcode) + +def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _check_for_import_lib(): + """Check if an import library for the Python runtime already exists.""" + major_version, minor_version = tuple(sys.version_info[:2]) + + # patterns for the file name of the library itself + patterns = ['libpython%d%d.a', + 'libpython%d%d.dll.a', + 'libpython%d.%d.dll.a'] + + # directory trees that may contain the library + stems = [sys.prefix] + if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: + stems.append(sys.real_prefix) + + # possible subdirectories within those trees where it is placed + sub_dirs = ['libs', 'lib'] + + # generate a list of candidate locations + candidates = [] + for pat in patterns: + filename = pat % (major_version, minor_version) + for stem_dir in stems: + for folder in sub_dirs: + candidates.append(os.path.join(stem_dir, folder, filename)) + + # test the filesystem to see if we can find any of these + for fullname in candidates: + if os.path.isfile(fullname): + # already exists, in location given + return (True, fullname) + + # needs to be built, preferred location given first + return (False, candidates[0]) + +def _build_import_library_amd64(): + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + # get the runtime dll for which we are building import library + dll_file = find_python_dll() + log.info('Building import library (arch=AMD64): "%s" (from %s)' % + (out_file, dll_file)) + + # generate symbol list from this library + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + generate_def(dll_file, def_file) + + # generate import library from this symbol list + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.check_call(cmd) + +def _build_import_library_x86(): + """ Build the import libraries for Mingw32-gcc on Windows + """ + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) + lib_file = os.path.join(sys.prefix, 'libs', lib_name) + if not os.path.isfile(lib_file): + # didn't find library file in virtualenv, try base distribution, too, + # and use that instead if found there. for Python 2.7 venvs, the base + # directory is in attribute real_prefix instead of base_prefix. + if hasattr(sys, 'base_prefix'): + base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) + elif hasattr(sys, 'real_prefix'): + base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) + else: + base_lib = '' # os.path.isfile('') == False + + if os.path.isfile(base_lib): + lib_file = base_lib + else: + log.warn('Cannot build import library: "%s" not found', lib_file) + return + log.info('Building import library (ARCH=x86): "%s"', out_file) + + from numpy.distutils import lib2def + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + nm_output = lib2def.getnm( + lib2def.DEFAULT_NM + [lib_file], shell=False) + dlist, flist = lib2def.parse_nm(nm_output) + with open(def_file, 'w') as fid: + lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) + + dll_name = find_python_dll () + + cmd = ["dlltool", + "--dllname", dll_name, + "--def", def_file, + "--output-lib", out_file] + status = subprocess.check_output(cmd) + if status: + log.warn('Failed to build import library for gcc. Linking will fail.') + return + +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. + +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 + # on Windows XP: + _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" + crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) + if crt_ver is not None: # Available at least back to Python 3.3 + maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() + _MSVCRVER_TO_FULLVER[maj + min] = crt_ver + del maj, min + del crt_ver + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what + # to do in that case: manifest building will fail, but it should not be + # used in that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" % + (maj, min)) from None + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignment constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = textwrap.dedent("""\ + + + + + + + + + + + + + + """) + + return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(name, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- + name : str + name of the manifest file to embed + type : str {'dll', 'exe'} + type of the binary which will embed the manifest + + """ + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + maj = msvc_runtime_major() + if maj: + if not maj == int(msver): + raise ValueError( + "Discrepancy between linked msvcr " \ + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj)) + +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) + return os.path.splitext(base)[0] + +def manifest_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + exext = config.compiler.exe_extension + return root + exext + ".manifest" + +def rc_name(config): + # Get configtest name (including suffix) + root = configtest_name(config) + return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma_str, mi_str = str(msver).split('.') + # Write the manifest file + manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) + with open(manifest_name(config), "w") as man: + config.temp_files.append(manifest_name(config)) + man.write(manxml) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/misc_util.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/misc_util.py new file mode 100644 index 00000000..e226b474 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/misc_util.py @@ -0,0 +1,2493 @@ +import os +import re +import sys +import copy +import glob +import atexit +import tempfile +import subprocess +import shutil +import multiprocessing +import textwrap +import importlib.util +from threading import local as tlocal +from functools import reduce + +import distutils +from distutils.errors import DistutilsError + +# stores temporary directory of each thread to only create one per thread +_tdata = tlocal() + +# store all created temporary directories so they can be deleted on exit +_tmpdirs = [] +def clean_up_temporary_directory(): + if _tmpdirs is not None: + for d in _tmpdirs: + try: + shutil.rmtree(d) + except OSError: + pass + +atexit.register(clean_up_temporary_directory) + +__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', + 'dict_append', 'appendpath', 'generate_config_py', + 'get_cmd', 'allpath', 'get_mathlibs', + 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', + 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', + 'has_f_sources', 'has_cxx_sources', 'filter_sources', + 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', + 'get_script_files', 'get_lib_source_files', 'get_data_files', + 'dot_join', 'get_frame', 'minrelpath', 'njoin', + 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', + 'get_build_architecture', 'get_info', 'get_pkg_info', + 'get_num_build_jobs', 'sanitize_cxx_flags', + 'exec_mod_from_location'] + +class InstallableLib: + """ + Container to hold information on an installable library. + + Parameters + ---------- + name : str + Name of the installed library. + build_info : dict + Dictionary holding build information. + target_dir : str + Absolute path specifying where to install the library. + + See Also + -------- + Configuration.add_installed_library + + Notes + ----- + The three parameters are stored as attributes with the same names. + + """ + def __init__(self, name, build_info, target_dir): + self.name = name + self.build_info = build_info + self.target_dir = target_dir + + +def get_num_build_jobs(): + """ + Get number of parallel build jobs set by the --parallel command line + argument of setup.py + If the command did not receive a setting the environment variable + NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of + processors on the system, with a maximum of 8 (to prevent + overloading the system if there a lot of CPUs). + + Returns + ------- + out : int + number of parallel jobs that can be run + + """ + from numpy.distutils.core import get_distribution + try: + cpu_count = len(os.sched_getaffinity(0)) + except AttributeError: + cpu_count = multiprocessing.cpu_count() + cpu_count = min(cpu_count, 8) + envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) + dist = get_distribution() + # may be None during configuration + if dist is None: + return envjobs + + # any of these three may have the job set, take the largest + cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), + getattr(dist.get_command_obj('build_ext'), 'parallel', None), + getattr(dist.get_command_obj('build_clib'), 'parallel', None)) + if all(x is None for x in cmdattr): + return envjobs + else: + return max(x for x in cmdattr if x is not None) + +def quote_args(args): + """Quote list of arguments. + + .. deprecated:: 1.22. + """ + import warnings + warnings.warn('"quote_args" is deprecated.', + DeprecationWarning, stacklevel=2) + # don't used _nt_quote_args as it does not check if + # args items already have quotes or not. + args = list(args) + for i in range(len(args)): + a = args[i] + if ' ' in a and a[0] not in '"\'': + args[i] = '"%s"' % (a) + return args + +def allpath(name): + "Convert a /-separated pathname to one using the OS's path separator." + split = name.split('/') + return os.path.join(*split) + +def rel_path(path, parent_path): + """Return path relative to parent_path.""" + # Use realpath to avoid issues with symlinked dirs (see gh-7707) + pd = os.path.realpath(os.path.abspath(parent_path)) + apath = os.path.realpath(os.path.abspath(path)) + if len(apath) < len(pd): + return path + if apath == pd: + return '' + if pd == apath[:len(pd)]: + assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) + path = apath[len(pd)+1:] + return path + +def get_path_from_frame(frame, parent_path=None): + """Return path of the module given a frame object from the call stack. + + Returned path is relative to parent_path when given, + otherwise it is absolute path. + """ + + # First, try to find if the file name is in the frame. + try: + caller_file = eval('__file__', frame.f_globals, frame.f_locals) + d = os.path.dirname(os.path.abspath(caller_file)) + except NameError: + # __file__ is not defined, so let's try __name__. We try this second + # because setuptools spoofs __name__ to be '__main__' even though + # sys.modules['__main__'] might be something else, like easy_install(1). + caller_name = eval('__name__', frame.f_globals, frame.f_locals) + __import__(caller_name) + mod = sys.modules[caller_name] + if hasattr(mod, '__file__'): + d = os.path.dirname(os.path.abspath(mod.__file__)) + else: + # we're probably running setup.py as execfile("setup.py") + # (likely we're building an egg) + d = os.path.abspath('.') + + if parent_path is not None: + d = rel_path(d, parent_path) + + return d or '.' + +def njoin(*path): + """Join two or more pathname components + + - convert a /-separated pathname to one using the OS's path separator. + - resolve `..` and `.` from path. + + Either passing n arguments as in njoin('a','b'), or a sequence + of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. + """ + paths = [] + for p in path: + if is_sequence(p): + # njoin(['a', 'b'], 'c') + paths.append(njoin(*p)) + else: + assert is_string(p) + paths.append(p) + path = paths + if not path: + # njoin() + joined = '' + else: + # njoin('a', 'b') + joined = os.path.join(*path) + if os.path.sep != '/': + joined = joined.replace('/', os.path.sep) + return minrelpath(joined) + +def get_mathlibs(path=None): + """Return the MATHLIB line from numpyconfig.h + """ + if path is not None: + config_file = os.path.join(path, '_numpyconfig.h') + else: + # Look for the file in each of the numpy include directories. + dirs = get_numpy_include_dirs() + for path in dirs: + fn = os.path.join(path, '_numpyconfig.h') + if os.path.exists(fn): + config_file = fn + break + else: + raise DistutilsError('_numpyconfig.h not found in numpy include ' + 'dirs %r' % (dirs,)) + + with open(config_file) as fid: + mathlibs = [] + s = '#define MATHLIB' + for line in fid: + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + return mathlibs + +def minrelpath(path): + """Resolve `..` and '.' from path. + """ + if not is_string(path): + return path + if '.' not in path: + return path + l = path.split(os.sep) + while l: + try: + i = l.index('.', 1) + except ValueError: + break + del l[i] + j = 1 + while l: + try: + i = l.index('..', j) + except ValueError: + break + if l[i-1]=='..': + j += 1 + else: + del l[i], l[i-1] + j = 1 + if not l: + return '' + return os.sep.join(l) + +def sorted_glob(fileglob): + """sorts output of python glob for https://bugs.python.org/issue30461 + to allow extensions to have reproducible build results""" + return sorted(glob.glob(fileglob)) + +def _fix_paths(paths, local_path, include_non_existing): + assert is_sequence(paths), repr(type(paths)) + new_paths = [] + assert not is_string(paths), repr(paths) + for n in paths: + if is_string(n): + if '*' in n or '?' in n: + p = sorted_glob(n) + p2 = sorted_glob(njoin(local_path, n)) + if p2: + new_paths.extend(p2) + elif p: + new_paths.extend(p) + else: + if include_non_existing: + new_paths.append(n) + print('could not resolve pattern in %r: %r' % + (local_path, n)) + else: + n2 = njoin(local_path, n) + if os.path.exists(n2): + new_paths.append(n2) + else: + if os.path.exists(n): + new_paths.append(n) + elif include_non_existing: + new_paths.append(n) + if not os.path.exists(n): + print('non-existing path in %r: %r' % + (local_path, n)) + + elif is_sequence(n): + new_paths.extend(_fix_paths(n, local_path, include_non_existing)) + else: + new_paths.append(n) + return [minrelpath(p) for p in new_paths] + +def gpaths(paths, local_path='', include_non_existing=True): + """Apply glob to paths and prepend local_path if needed. + """ + if is_string(paths): + paths = (paths,) + return _fix_paths(paths, local_path, include_non_existing) + +def make_temp_file(suffix='', prefix='', text=True): + if not hasattr(_tdata, 'tempdir'): + _tdata.tempdir = tempfile.mkdtemp() + _tmpdirs.append(_tdata.tempdir) + fid, name = tempfile.mkstemp(suffix=suffix, + prefix=prefix, + dir=_tdata.tempdir, + text=text) + fo = os.fdopen(fid, 'w') + return fo, name + +# Hooks for colored terminal output. +# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle +def terminal_has_colors(): + if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: + # Avoid importing curses that causes illegal operation + # with a message: + # PYTHON2 caused an invalid page fault in + # module CYGNURSES7.DLL as 015f:18bbfc28 + # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] + # ssh to Win32 machine from debian + # curses.version is 2.2 + # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) + return 0 + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): + try: + import curses + curses.setupterm() + if (curses.tigetnum("colors") >= 0 + and curses.tigetnum("pairs") >= 0 + and ((curses.tigetstr("setf") is not None + and curses.tigetstr("setb") is not None) + or (curses.tigetstr("setaf") is not None + and curses.tigetstr("setab") is not None) + or curses.tigetstr("scp") is not None)): + return 1 + except Exception: + pass + return 0 + +if terminal_has_colors(): + _colour_codes = dict(black=0, red=1, green=2, yellow=3, + blue=4, magenta=5, cyan=6, white=7, default=9) + def colour_text(s, fg=None, bg=None, bold=False): + seq = [] + if bold: + seq.append('1') + if fg: + fgcode = 30 + _colour_codes.get(fg.lower(), 0) + seq.append(str(fgcode)) + if bg: + bgcode = 40 + _colour_codes.get(bg.lower(), 7) + seq.append(str(bgcode)) + if seq: + return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) + else: + return s +else: + def colour_text(s, fg=None, bg=None): + return s + +def default_text(s): + return colour_text(s, 'default') +def red_text(s): + return colour_text(s, 'red') +def green_text(s): + return colour_text(s, 'green') +def yellow_text(s): + return colour_text(s, 'yellow') +def cyan_text(s): + return colour_text(s, 'cyan') +def blue_text(s): + return colour_text(s, 'blue') + +######################### + +def cyg2win32(path: str) -> str: + """Convert a path from Cygwin-native to Windows-native. + + Uses the cygpath utility (part of the Base install) to do the + actual conversion. Falls back to returning the original path if + this fails. + + Handles the default ``/cygdrive`` mount prefix as well as the + ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such + as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or + ``/home/username`` + + Parameters + ---------- + path : str + The path to convert + + Returns + ------- + converted_path : str + The converted path + + Notes + ----- + Documentation for cygpath utility: + https://cygwin.com/cygwin-ug-net/cygpath.html + Documentation for the C function it wraps: + https://cygwin.com/cygwin-api/func-cygwin-conv-path.html + + """ + if sys.platform != "cygwin": + return path + return subprocess.check_output( + ["/usr/bin/cygpath", "--windows", path], text=True + ) + + +def mingw32(): + """Return true when using mingw32 environment. + """ + if sys.platform=='win32': + if os.environ.get('OSTYPE', '')=='msys': + return True + if os.environ.get('MSYSTEM', '')=='MINGW32': + return True + return False + +def msvc_runtime_version(): + "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" + msc_pos = sys.version.find('MSC v.') + if msc_pos != -1: + msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) + else: + msc_ver = None + return msc_ver + +def msvc_runtime_library(): + "Return name of MSVC runtime library if Python was built with MSVC >= 7" + ver = msvc_runtime_major () + if ver: + if ver < 140: + return "msvcr%i" % ver + else: + return "vcruntime%i" % ver + else: + return None + +def msvc_runtime_major(): + "Return major version of MSVC runtime coded like get_build_msvc_version" + major = {1300: 70, # MSVC 7.0 + 1310: 71, # MSVC 7.1 + 1400: 80, # MSVC 8 + 1500: 90, # MSVC 9 (aka 2008) + 1600: 100, # MSVC 10 (aka 2010) + 1900: 140, # MSVC 14 (aka 2015) + }.get(msvc_runtime_version(), None) + return major + +######################### + +#XXX need support for .C that is also C++ +cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match +f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match +def _get_f90_modules(source): + """Return a list of Fortran f90 module names that + given source file defines. + """ + if not f90_ext_match(source): + return [] + modules = [] + with open(source) as f: + for line in f: + m = f90_module_name_match(line) + if m: + name = m.group('name') + modules.append(name) + # break # XXX can we assume that there is one module per file? + return modules + +def is_string(s): + return isinstance(s, str) + +def all_strings(lst): + """Return True if all items in lst are string objects. """ + for item in lst: + if not is_string(item): + return False + return True + +def is_sequence(seq): + if is_string(seq): + return False + try: + len(seq) + except Exception: + return False + return True + +def is_glob_pattern(s): + return is_string(s) and ('*' in s or '?' in s) + +def as_list(seq): + if is_sequence(seq): + return list(seq) + else: + return [seq] + +def get_language(sources): + # not used in numpy/scipy packages, use build_ext.detect_language instead + """Determine language value (c,f77,f90) from sources """ + language = None + for source in sources: + if isinstance(source, str): + if f90_ext_match(source): + language = 'f90' + break + elif fortran_ext_match(source): + language = 'f77' + return language + +def has_f_sources(sources): + """Return True if sources contains Fortran files """ + for source in sources: + if fortran_ext_match(source): + return True + return False + +def has_cxx_sources(sources): + """Return True if sources contains C++ files """ + for source in sources: + if cxx_ext_match(source): + return True + return False + +def filter_sources(sources): + """Return four lists of filenames containing + C, C++, Fortran, and Fortran 90 module sources, + respectively. + """ + c_sources = [] + cxx_sources = [] + f_sources = [] + fmodule_sources = [] + for source in sources: + if fortran_ext_match(source): + modules = _get_f90_modules(source) + if modules: + fmodule_sources.append(source) + else: + f_sources.append(source) + elif cxx_ext_match(source): + cxx_sources.append(source) + else: + c_sources.append(source) + return c_sources, cxx_sources, f_sources, fmodule_sources + + +def _get_headers(directory_list): + # get *.h files from list of directories + headers = [] + for d in directory_list: + head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? + headers.extend(head) + return headers + +def _get_directories(list_of_sources): + # get unique directories from list of sources. + direcs = [] + for f in list_of_sources: + d = os.path.split(f) + if d[0] != '' and not d[0] in direcs: + direcs.append(d[0]) + return direcs + +def _commandline_dep_string(cc_args, extra_postargs, pp_opts): + """ + Return commandline representation used to determine if a file needs + to be recompiled + """ + cmdline = 'commandline: ' + cmdline += ' '.join(cc_args) + cmdline += ' '.join(extra_postargs) + cmdline += ' '.join(pp_opts) + '\n' + return cmdline + + +def get_dependencies(sources): + #XXX scan sources for include statements + return _get_headers(_get_directories(sources)) + +def is_local_src_dir(directory): + """Return true if directory is local directory. + """ + if not is_string(directory): + return False + abs_dir = os.path.abspath(directory) + c = os.path.commonprefix([os.getcwd(), abs_dir]) + new_dir = abs_dir[len(c):].split(os.sep) + if new_dir and not new_dir[0]: + new_dir = new_dir[1:] + if new_dir and new_dir[0]=='build': + return False + new_dir = os.sep.join(new_dir) + return os.path.isdir(new_dir) + +def general_source_files(top_path): + pruned_directories = {'CVS':1, '.svn':1, 'build':1} + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for f in filenames: + if not prune_file_pat.search(f): + yield os.path.join(dirpath, f) + +def general_source_directories_files(top_path): + """Return a directory name relative to top_path and + files contained. + """ + pruned_directories = ['CVS', '.svn', 'build'] + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for d in dirnames: + dpath = os.path.join(dirpath, d) + rpath = rel_path(dpath, top_path) + files = [] + for f in os.listdir(dpath): + fn = os.path.join(dpath, f) + if os.path.isfile(fn) and not prune_file_pat.search(fn): + files.append(fn) + yield rpath, files + dpath = top_path + rpath = rel_path(dpath, top_path) + filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ + if not prune_file_pat.search(f)] + files = [f for f in filenames if os.path.isfile(f)] + yield rpath, files + + +def get_ext_source_files(ext): + # Get sources and any include files in the same directory. + filenames = [] + sources = [_m for _m in ext.sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + for d in ext.depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_script_files(scripts): + scripts = [_m for _m in scripts if is_string(_m)] + return scripts + +def get_lib_source_files(lib): + filenames = [] + sources = lib[1].get('sources', []) + sources = [_m for _m in sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + depends = lib[1].get('depends', []) + for d in depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_shared_lib_extension(is_python_ext=False): + """Return the correct file extension for shared libraries. + + Parameters + ---------- + is_python_ext : bool, optional + Whether the shared library is a Python extension. Default is False. + + Returns + ------- + so_ext : str + The shared library extension. + + Notes + ----- + For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, + and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on + POSIX systems according to PEP 3149. + + """ + confvars = distutils.sysconfig.get_config_vars() + so_ext = confvars.get('EXT_SUFFIX', '') + + if not is_python_ext: + # hardcode known values, config vars (including SHLIB_SUFFIX) are + # unreliable (see #3182) + # darwin, windows and debug linux are wrong in 3.3.1 and older + if (sys.platform.startswith('linux') or + sys.platform.startswith('gnukfreebsd')): + so_ext = '.so' + elif sys.platform.startswith('darwin'): + so_ext = '.dylib' + elif sys.platform.startswith('win'): + so_ext = '.dll' + else: + # fall back to config vars for unknown platforms + # fix long extension for Python >=3.2, see PEP 3149. + if 'SOABI' in confvars: + # Does nothing unless SOABI config var exists + so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) + + return so_ext + +def get_data_files(data): + if is_string(data): + return [data] + sources = data[1] + filenames = [] + for s in sources: + if hasattr(s, '__call__'): + continue + if is_local_src_dir(s): + filenames.extend(list(general_source_files(s))) + elif is_string(s): + if os.path.isfile(s): + filenames.append(s) + else: + print('Not existing data file:', s) + else: + raise TypeError(repr(s)) + return filenames + +def dot_join(*args): + return '.'.join([a for a in args if a]) + +def get_frame(level=0): + """Return frame object from call stack with given level. + """ + try: + return sys._getframe(level+1) + except AttributeError: + frame = sys.exc_info()[2].tb_frame + for _ in range(level+1): + frame = frame.f_back + return frame + + +###################### + +class Configuration: + + _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', + 'libraries', 'headers', 'scripts', 'py_modules', + 'installed_libraries', 'define_macros'] + _dict_keys = ['package_dir', 'installed_pkg_config'] + _extra_keys = ['name', 'version'] + + numpy_include_dirs = [] + + def __init__(self, + package_name=None, + parent_name=None, + top_path=None, + package_path=None, + caller_level=1, + setup_name='setup.py', + **attrs): + """Construct configuration instance of a package. + + package_name -- name of the package + Ex.: 'distutils' + parent_name -- name of the parent package + Ex.: 'numpy' + top_path -- directory of the toplevel package + Ex.: the directory where the numpy package source sits + package_path -- directory of package. Will be computed by magic from the + directory of the caller module if not specified + Ex.: the directory where numpy.distutils is + caller_level -- frame level to caller namespace, internal parameter. + """ + self.name = dot_join(parent_name, package_name) + self.version = None + + caller_frame = get_frame(caller_level) + self.local_path = get_path_from_frame(caller_frame, top_path) + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + if top_path is None: + top_path = self.local_path + self.local_path = '' + if package_path is None: + package_path = self.local_path + elif os.path.isdir(njoin(self.local_path, package_path)): + package_path = njoin(self.local_path, package_path) + if not os.path.isdir(package_path or '.'): + raise ValueError("%r is not a directory" % (package_path,)) + self.top_path = top_path + self.package_path = package_path + # this is the relative path in the installed package + self.path_in_package = os.path.join(*self.name.split('.')) + + self.list_keys = self._list_keys[:] + self.dict_keys = self._dict_keys[:] + + for n in self.list_keys: + v = copy.copy(attrs.get(n, [])) + setattr(self, n, as_list(v)) + + for n in self.dict_keys: + v = copy.copy(attrs.get(n, {})) + setattr(self, n, v) + + known_keys = self.list_keys + self.dict_keys + self.extra_keys = self._extra_keys[:] + for n in attrs.keys(): + if n in known_keys: + continue + a = attrs[n] + setattr(self, n, a) + if isinstance(a, list): + self.list_keys.append(n) + elif isinstance(a, dict): + self.dict_keys.append(n) + else: + self.extra_keys.append(n) + + if os.path.exists(njoin(package_path, '__init__.py')): + self.packages.append(self.name) + self.package_dir[self.name] = package_path + + self.options = dict( + ignore_setup_xxx_py = False, + assume_default_configuration = False, + delegate_options_to_subpackages = False, + quiet = False, + ) + + caller_instance = None + for i in range(1, 3): + try: + f = get_frame(i) + except ValueError: + break + try: + caller_instance = eval('self', f.f_globals, f.f_locals) + break + except NameError: + pass + if isinstance(caller_instance, self.__class__): + if caller_instance.options['delegate_options_to_subpackages']: + self.set_options(**caller_instance.options) + + self.setup_name = setup_name + + def todict(self): + """ + Return a dictionary compatible with the keyword arguments of distutils + setup function. + + Examples + -------- + >>> setup(**config.todict()) #doctest: +SKIP + """ + + self._optimize_data_files() + d = {} + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for n in known_keys: + a = getattr(self, n) + if a: + d[n] = a + return d + + def info(self, message): + if not self.options['quiet']: + print(message) + + def warn(self, message): + sys.stderr.write('Warning: %s\n' % (message,)) + + def set_options(self, **options): + """ + Configure Configuration instance. + + The following options are available: + - ignore_setup_xxx_py + - assume_default_configuration + - delegate_options_to_subpackages + - quiet + + """ + for key, value in options.items(): + if key in self.options: + self.options[key] = value + else: + raise ValueError('Unknown option: '+key) + + def get_distribution(self): + """Return the distutils distribution object for self.""" + from numpy.distutils.core import get_distribution + return get_distribution() + + def _wildcard_get_subpackage(self, subpackage_name, + parent_name, + caller_level = 1): + l = subpackage_name.split('.') + subpackage_path = njoin([self.local_path]+l) + dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] + config_list = [] + for d in dirs: + if not os.path.isfile(njoin(d, '__init__.py')): + continue + if 'build' in d.split(os.sep): + continue + n = '.'.join(d.split(os.sep)[-len(l):]) + c = self.get_subpackage(n, + parent_name = parent_name, + caller_level = caller_level+1) + config_list.extend(c) + return config_list + + def _get_configuration_from_setup_py(self, setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = 1): + # In case setup_py imports local modules: + sys.path.insert(0, os.path.dirname(setup_py)) + try: + setup_name = os.path.splitext(os.path.basename(setup_py))[0] + n = dot_join(self.name, subpackage_name, setup_name) + setup_module = exec_mod_from_location( + '_'.join(n.split('.')), setup_py) + if not hasattr(setup_module, 'configuration'): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s does not define configuration())'\ + % (setup_module)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level + 1) + else: + pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) + args = (pn,) + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + config = setup_module.configuration(*args) + if config.name!=dot_join(parent_name, subpackage_name): + self.warn('Subpackage %r configuration returned as %r' % \ + (dot_join(parent_name, subpackage_name), config.name)) + finally: + del sys.path[0] + return config + + def get_subpackage(self,subpackage_name, + subpackage_path=None, + parent_name=None, + caller_level = 1): + """Return list of subpackage configurations. + + Parameters + ---------- + subpackage_name : str or None + Name of the subpackage to get the configuration. '*' in + subpackage_name is handled as a wildcard. + subpackage_path : str + If None, then the path is assumed to be the local path plus the + subpackage_name. If a setup.py file is not found in the + subpackage_path, then a default configuration is used. + parent_name : str + Parent name. + """ + if subpackage_name is None: + if subpackage_path is None: + raise ValueError( + "either subpackage_name or subpackage_path must be specified") + subpackage_name = os.path.basename(subpackage_path) + + # handle wildcards + l = subpackage_name.split('.') + if subpackage_path is None and '*' in subpackage_name: + return self._wildcard_get_subpackage(subpackage_name, + parent_name, + caller_level = caller_level+1) + assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) + if subpackage_path is None: + subpackage_path = njoin([self.local_path] + l) + else: + subpackage_path = njoin([subpackage_path] + l[:-1]) + subpackage_path = self.paths([subpackage_path])[0] + setup_py = njoin(subpackage_path, self.setup_name) + if not self.options['ignore_setup_xxx_py']: + if not os.path.isfile(setup_py): + setup_py = njoin(subpackage_path, + 'setup_%s.py' % (subpackage_name)) + if not os.path.isfile(setup_py): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s/{setup_%s,setup}.py was not found)' \ + % (os.path.dirname(setup_py), subpackage_name)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level+1) + else: + config = self._get_configuration_from_setup_py( + setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = caller_level + 1) + if config: + return [config] + else: + return [] + + def add_subpackage(self,subpackage_name, + subpackage_path=None, + standalone = False): + """Add a sub-package to the current Configuration instance. + + This is useful in a setup.py script for adding sub-packages to a + package. + + Parameters + ---------- + subpackage_name : str + name of the subpackage + subpackage_path : str + if given, the subpackage path such as the subpackage is in + subpackage_path / subpackage_name. If None,the subpackage is + assumed to be located in the local path / subpackage_name. + standalone : bool + """ + + if standalone: + parent_name = None + else: + parent_name = self.name + config_list = self.get_subpackage(subpackage_name, subpackage_path, + parent_name = parent_name, + caller_level = 2) + if not config_list: + self.warn('No configuration returned, assuming unavailable.') + for config in config_list: + d = config + if isinstance(config, Configuration): + d = config.todict() + assert isinstance(d, dict), repr(type(d)) + + self.info('Appending %s configuration to %s' \ + % (d.get('name'), self.name)) + self.dict_append(**d) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a subpackage '+ subpackage_name) + + def add_data_dir(self, data_path): + """Recursively add files under data_path to data_files list. + + Recursively add files under data_path to the list of data_files to be + installed (and distributed). The data_path can be either a relative + path-name, or an absolute path-name, or a 2-tuple where the first + argument shows where in the install directory the data directory + should be installed to. + + Parameters + ---------- + data_path : seq or str + Argument can be either + + * 2-sequence (, ) + * path to data directory where python datadir suffix defaults + to package dir. + + Notes + ----- + Rules for installation paths:: + + foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar + (gun, foo/bar) -> parent/gun + foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b + (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun + (gun/*, foo/*) -> parent/gun/a, parent/gun/b + /foo/bar -> (bar, /foo/bar) -> parent/bar + (gun, /foo/bar) -> parent/gun + (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar + + Examples + -------- + For example suppose the source directory contains fun/foo.dat and + fun/bar/car.dat: + + >>> self.add_data_dir('fun') #doctest: +SKIP + >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP + >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP + + Will install data-files to the locations:: + + / + fun/ + foo.dat + bar/ + car.dat + sun/ + foo.dat + bar/ + car.dat + gun/ + foo.dat + car.dat + + """ + if is_sequence(data_path): + d, data_path = data_path + else: + d = None + if is_sequence(data_path): + [self.add_data_dir((d, p)) for p in data_path] + return + if not is_string(data_path): + raise TypeError("not a string: %r" % (data_path,)) + if d is None: + if os.path.isabs(data_path): + return self.add_data_dir((os.path.basename(data_path), data_path)) + return self.add_data_dir((data_path, data_path)) + paths = self.paths(data_path, include_non_existing=False) + if is_glob_pattern(data_path): + if is_glob_pattern(d): + pattern_list = allpath(d).split(os.sep) + pattern_list.reverse() + # /a/*//b/ -> /a/*/b + rl = list(range(len(pattern_list)-1)); rl.reverse() + for i in rl: + if not pattern_list[i]: + del pattern_list[i] + # + for path in paths: + if not os.path.isdir(path): + print('Not a directory, skipping', path) + continue + rpath = rel_path(path, self.local_path) + path_list = rpath.split(os.sep) + path_list.reverse() + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + if i>=len(path_list): + raise ValueError('cannot fill pattern %r with %r' \ + % (d, path)) + target_list.append(path_list[i]) + else: + assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) + target_list.append(s) + i += 1 + if path_list[i:]: + self.warn('mismatch of pattern_list=%s and path_list=%s'\ + % (pattern_list, path_list)) + target_list.reverse() + self.add_data_dir((os.sep.join(target_list), path)) + else: + for path in paths: + self.add_data_dir((d, path)) + return + assert not is_glob_pattern(d), repr(d) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + for path in paths: + for d1, f in list(general_source_directories_files(path)): + target_path = os.path.join(self.path_in_package, d, d1) + data_files.append((target_path, f)) + + def _optimize_data_files(self): + data_dict = {} + for p, files in self.data_files: + if p not in data_dict: + data_dict[p] = set() + for f in files: + data_dict[p].add(f) + self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] + + def add_data_files(self,*files): + """Add data files to configuration data_files. + + Parameters + ---------- + files : sequence + Argument(s) can be either + + * 2-sequence (,) + * paths to data files where python datadir prefix defaults + to package dir. + + Notes + ----- + The form of each element of the files sequence is very flexible + allowing many combinations of where to get the files from the package + and where they should ultimately be installed on the system. The most + basic usage is for an element of the files argument sequence to be a + simple filename. This will cause that file from the local path to be + installed to the installation path of the self.name package (package + path). The file argument can also be a relative path in which case the + entire relative path will be installed into the package directory. + Finally, the file can be an absolute path name in which case the file + will be found at the absolute path name but installed to the package + path. + + This basic behavior can be augmented by passing a 2-tuple in as the + file argument. The first element of the tuple should specify the + relative path (under the package install directory) where the + remaining sequence of files should be installed to (it has nothing to + do with the file-names in the source distribution). The second element + of the tuple is the sequence of files that should be installed. The + files in this sequence can be filenames, relative paths, or absolute + paths. For absolute paths the file will be installed in the top-level + package installation directory (regardless of the first argument). + Filenames and relative path names will be installed in the package + install directory under the path name given as the first element of + the tuple. + + Rules for installation paths: + + #. file.txt -> (., file.txt)-> parent/file.txt + #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt + #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt + #. ``*``.txt -> parent/a.txt, parent/b.txt + #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt + #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt + #. (sun, file.txt) -> parent/sun/file.txt + #. (sun, bar/file.txt) -> parent/sun/file.txt + #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt + #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + An additional feature is that the path to a data-file can actually be + a function that takes no arguments and returns the actual path(s) to + the data-files. This is useful when the data files are generated while + building the package. + + Examples + -------- + Add files to the list of data_files to be included with the package. + + >>> self.add_data_files('foo.dat', + ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), + ... 'bar/cat.dat', + ... '/full/path/to/can.dat') #doctest: +SKIP + + will install these data files to:: + + / + foo.dat + fun/ + gun.dat + nun/ + pun.dat + sun.dat + bar/ + car.dat + can.dat + + where is the package (or sub-package) + directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage') or + '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). + """ + + if len(files)>1: + for f in files: + self.add_data_files(f) + return + assert len(files)==1 + if is_sequence(files[0]): + d, files = files[0] + else: + d = None + if is_string(files): + filepat = files + elif is_sequence(files): + if len(files)==1: + filepat = files[0] + else: + for f in files: + self.add_data_files((d, f)) + return + else: + raise TypeError(repr(type(files))) + + if d is None: + if hasattr(filepat, '__call__'): + d = '' + elif os.path.isabs(filepat): + d = '' + else: + d = os.path.dirname(filepat) + self.add_data_files((d, files)) + return + + paths = self.paths(filepat, include_non_existing=False) + if is_glob_pattern(filepat): + if is_glob_pattern(d): + pattern_list = d.split(os.sep) + pattern_list.reverse() + for path in paths: + path_list = path.split(os.sep) + path_list.reverse() + path_list.pop() # filename + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + target_list.append(path_list[i]) + i += 1 + else: + target_list.append(s) + target_list.reverse() + self.add_data_files((os.sep.join(target_list), path)) + else: + self.add_data_files((d, paths)) + return + assert not is_glob_pattern(d), repr((d, filepat)) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + data_files.append((os.path.join(self.path_in_package, d), paths)) + + ### XXX Implement add_py_modules + + def add_define_macros(self, macros): + """Add define macros to configuration + + Add the given sequence of macro name and value duples to the beginning + of the define_macros list This list will be visible to all extension + modules of the current package. + """ + dist = self.get_distribution() + if dist is not None: + if not hasattr(dist, 'define_macros'): + dist.define_macros = [] + dist.define_macros.extend(macros) + else: + self.define_macros.extend(macros) + + + def add_include_dirs(self,*paths): + """Add paths to configuration include directories. + + Add the given sequence of paths to the beginning of the include_dirs + list. This list will be visible to all extension modules of the + current package. + """ + include_dirs = self.paths(paths) + dist = self.get_distribution() + if dist is not None: + if dist.include_dirs is None: + dist.include_dirs = [] + dist.include_dirs.extend(include_dirs) + else: + self.include_dirs.extend(include_dirs) + + def add_headers(self,*files): + """Add installable headers to configuration. + + Add the given sequence of files to the beginning of the headers list. + By default, headers will be installed under // directory. If an item of files + is a tuple, then its first argument specifies the actual installation + location relative to the path. + + Parameters + ---------- + files : str or seq + Argument(s) can be either: + + * 2-sequence (,) + * path(s) to header file(s) where python includedir suffix will + default to package name. + """ + headers = [] + for path in files: + if is_string(path): + [headers.append((self.name, p)) for p in self.paths(path)] + else: + if not isinstance(path, (tuple, list)) or len(path) != 2: + raise TypeError(repr(path)) + [headers.append((path[0], p)) for p in self.paths(path[1])] + dist = self.get_distribution() + if dist is not None: + if dist.headers is None: + dist.headers = [] + dist.headers.extend(headers) + else: + self.headers.extend(headers) + + def paths(self,*paths,**kws): + """Apply glob to paths and prepend local_path if needed. + + Applies glob.glob(...) to each path in the sequence (if needed) and + pre-pends the local_path if needed. Because this is called on all + source lists, this allows wildcard characters to be specified in lists + of sources for extension modules and libraries and scripts and allows + path-names be relative to the source directory. + + """ + include_non_existing = kws.get('include_non_existing', True) + return gpaths(paths, + local_path = self.local_path, + include_non_existing=include_non_existing) + + def _fix_paths_dict(self, kw): + for k in kw.keys(): + v = kw[k] + if k in ['sources', 'depends', 'include_dirs', 'library_dirs', + 'module_dirs', 'extra_objects']: + new_v = self.paths(v) + kw[k] = new_v + + def add_extension(self,name,sources,**kw): + """Add extension to configuration. + + Create and add an Extension instance to the ext_modules list. This + method also takes the following optional keyword arguments that are + passed on to the Extension constructor. + + Parameters + ---------- + name : str + name of the extension + sources : seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + include_dirs : + define_macros : + undef_macros : + library_dirs : + libraries : + runtime_library_dirs : + extra_objects : + extra_compile_args : + extra_link_args : + extra_f77_compile_args : + extra_f90_compile_args : + export_symbols : + swig_opts : + depends : + The depends list contains paths to files or directories that the + sources of the extension module depend on. If any path in the + depends list is newer than the extension module, then the module + will be rebuilt. + language : + f2py_options : + module_dirs : + extra_info : dict or list + dict or list of dict of keywords to be appended to keywords. + + Notes + ----- + The self.paths(...) method is applied to all lists that may contain + paths. + """ + ext_args = copy.copy(kw) + ext_args['name'] = dot_join(self.name, name) + ext_args['sources'] = sources + + if 'extra_info' in ext_args: + extra_info = ext_args['extra_info'] + del ext_args['extra_info'] + if isinstance(extra_info, dict): + extra_info = [extra_info] + for info in extra_info: + assert isinstance(info, dict), repr(info) + dict_append(ext_args,**info) + + self._fix_paths_dict(ext_args) + + # Resolve out-of-tree dependencies + libraries = ext_args.get('libraries', []) + libnames = [] + ext_args['libraries'] = [] + for libname in libraries: + if isinstance(libname, tuple): + self._fix_paths_dict(libname[1]) + + # Handle library names of the form libname@relative/path/to/library + if '@' in libname: + lname, lpath = libname.split('@', 1) + lpath = os.path.abspath(njoin(self.local_path, lpath)) + if os.path.isdir(lpath): + c = self.get_subpackage(None, lpath, + caller_level = 2) + if isinstance(c, Configuration): + c = c.todict() + for l in [l[0] for l in c.get('libraries', [])]: + llname = l.split('__OF__', 1)[0] + if llname == lname: + c.pop('name', None) + dict_append(ext_args,**c) + break + continue + libnames.append(libname) + + ext_args['libraries'] = libnames + ext_args['libraries'] + ext_args['define_macros'] = \ + self.define_macros + ext_args.get('define_macros', []) + + from numpy.distutils.core import Extension + ext = Extension(**ext_args) + self.ext_modules.append(ext) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add an extension '+name) + return ext + + def add_library(self,name,sources,**build_info): + """ + Add library to configuration. + + Parameters + ---------- + name : str + Name of the extension. + sources : sequence + List of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + """ + self._add_library(name, sources, None, build_info) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a library '+ name) + + def _add_library(self, name, sources, install_dir, build_info): + """Common implementation for add_library and add_installed_library. Do + not use directly""" + build_info = copy.copy(build_info) + build_info['sources'] = sources + + # Sometimes, depends is not set up to an empty list by default, and if + # depends is not given to add_library, distutils barfs (#1134) + if not 'depends' in build_info: + build_info['depends'] = [] + + self._fix_paths_dict(build_info) + + # Add to libraries list so that it is build with build_clib + self.libraries.append((name, build_info)) + + def add_installed_library(self, name, sources, install_dir, build_info=None): + """ + Similar to add_library, but the specified library is installed. + + Most C libraries used with `distutils` are only used to build python + extensions, but libraries built through this method will be installed + so that they can be reused by third-party packages. + + Parameters + ---------- + name : str + Name of the installed library. + sources : sequence + List of the library's source files. See `add_library` for details. + install_dir : str + Path to install the library, relative to the current sub-package. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + Returns + ------- + None + + See Also + -------- + add_library, add_npy_pkg_config, get_info + + Notes + ----- + The best way to encode the options required to link against the specified + C libraries is to use a "libname.ini" file, and use `get_info` to + retrieve the required options (see `add_npy_pkg_config` for more + information). + + """ + if not build_info: + build_info = {} + + install_dir = os.path.join(self.package_path, install_dir) + self._add_library(name, sources, install_dir, build_info) + self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) + + def add_npy_pkg_config(self, template, install_dir, subst_dict=None): + """ + Generate and install a npy-pkg config file from a template. + + The config file generated from `template` is installed in the + given install directory, using `subst_dict` for variable substitution. + + Parameters + ---------- + template : str + The path of the template, relatively to the current package path. + install_dir : str + Where to install the npy-pkg config file, relatively to the current + package path. + subst_dict : dict, optional + If given, any string of the form ``@key@`` will be replaced by + ``subst_dict[key]`` in the template file when installed. The install + prefix is always available through the variable ``@prefix@``, since the + install prefix is not easy to get reliably from setup.py. + + See also + -------- + add_installed_library, get_info + + Notes + ----- + This works for both standard installs and in-place builds, i.e. the + ``@prefix@`` refer to the source directory for in-place builds. + + Examples + -------- + :: + + config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) + + Assuming the foo.ini.in file has the following content:: + + [meta] + Name=@foo@ + Version=1.0 + Description=dummy description + + [default] + Cflags=-I@prefix@/include + Libs= + + The generated file will have the following content:: + + [meta] + Name=bar + Version=1.0 + Description=dummy description + + [default] + Cflags=-Iprefix_dir/include + Libs= + + and will be installed as foo.ini in the 'lib' subpath. + + When cross-compiling with numpy distutils, it might be necessary to + use modified npy-pkg-config files. Using the default/generated files + will link with the host libraries (i.e. libnpymath.a). For + cross-compilation you of-course need to link with target libraries, + while using the host Python installation. + + You can copy out the numpy/core/lib/npy-pkg-config directory, add a + pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment + variable to point to the directory with the modified npy-pkg-config + files. + + Example npymath.ini modified for cross-compilation:: + + [meta] + Name=npymath + Description=Portable, core math library implementing C99 standard + Version=0.1 + + [variables] + pkgname=numpy.core + pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core + prefix=${pkgdir} + libdir=${prefix}/lib + includedir=${prefix}/include + + [default] + Libs=-L${libdir} -lnpymath + Cflags=-I${includedir} + Requires=mlib + + [msvc] + Libs=/LIBPATH:${libdir} npymath.lib + Cflags=/INCLUDE:${includedir} + Requires=mlib + + """ + if subst_dict is None: + subst_dict = {} + template = os.path.join(self.package_path, template) + + if self.name in self.installed_pkg_config: + self.installed_pkg_config[self.name].append((template, install_dir, + subst_dict)) + else: + self.installed_pkg_config[self.name] = [(template, install_dir, + subst_dict)] + + + def add_scripts(self,*files): + """Add scripts to configuration. + + Add the sequence of files to the beginning of the scripts list. + Scripts will be installed under the /bin/ directory. + + """ + scripts = self.paths(files) + dist = self.get_distribution() + if dist is not None: + if dist.scripts is None: + dist.scripts = [] + dist.scripts.extend(scripts) + else: + self.scripts.extend(scripts) + + def dict_append(self,**dict): + for key in self.list_keys: + a = getattr(self, key) + a.extend(dict.get(key, [])) + for key in self.dict_keys: + a = getattr(self, key) + a.update(dict.get(key, {})) + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for key in dict.keys(): + if key not in known_keys: + a = getattr(self, key, None) + if a and a==dict[key]: continue + self.warn('Inheriting attribute %r=%r from %r' \ + % (key, dict[key], dict.get('name', '?'))) + setattr(self, key, dict[key]) + self.extra_keys.append(key) + elif key in self.extra_keys: + self.info('Ignoring attempt to set %r (from %r to %r)' \ + % (key, getattr(self, key), dict[key])) + elif key in known_keys: + # key is already processed above + pass + else: + raise ValueError("Don't know about key=%r" % (key)) + + def __str__(self): + from pprint import pformat + known_keys = self.list_keys + self.dict_keys + self.extra_keys + s = '<'+5*'-' + '\n' + s += 'Configuration of '+self.name+':\n' + known_keys.sort() + for k in known_keys: + a = getattr(self, k, None) + if a: + s += '%s = %s\n' % (k, pformat(a)) + s += 5*'-' + '>' + return s + + def get_config_cmd(self): + """ + Returns the numpy.distutils config command instance. + """ + cmd = get_cmd('config') + cmd.ensure_finalized() + cmd.dump_source = 0 + cmd.noisy = 0 + old_path = os.environ.get('PATH') + if old_path: + path = os.pathsep.join(['.', old_path]) + os.environ['PATH'] = path + return cmd + + def get_build_temp_dir(self): + """ + Return a path to a temporary directory where temporary files should be + placed. + """ + cmd = get_cmd('build') + cmd.ensure_finalized() + return cmd.build_temp + + def have_f77c(self): + """Check for availability of Fortran 77 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 77 compiler is available (because a simple Fortran 77 + code was able to be compiled successfully). + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') + return flag + + def have_f90c(self): + """Check for availability of Fortran 90 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 90 compiler is available (because a simple Fortran + 90 code was able to be compiled successfully) + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') + return flag + + def append_to(self, extlib): + """Append libraries, include_dirs to extension or library item. + """ + if is_sequence(extlib): + lib_name, build_info = extlib + dict_append(build_info, + libraries=self.libraries, + include_dirs=self.include_dirs) + else: + from numpy.distutils.core import Extension + assert isinstance(extlib, Extension), repr(extlib) + extlib.libraries.extend(self.libraries) + extlib.include_dirs.extend(self.include_dirs) + + def _get_svn_revision(self, path): + """Return path's SVN revision number. + """ + try: + output = subprocess.check_output(['svnversion'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): + entries = njoin(path, '_svn', 'entries') + else: + entries = njoin(path, '.svn', 'entries') + if os.path.isfile(entries): + with open(entries) as f: + fstr = f.read() + if fstr[:5] == '\d+)"', fstr) + if m: + return int(m.group('revision')) + else: # non-xml entries file --- check to be sure that + m = re.search(r'dir[\n\r]+(?P\d+)', fstr) + if m: + return int(m.group('revision')) + return None + + def _get_hg_revision(self, path): + """Return path's Mercurial revision number. + """ + try: + output = subprocess.check_output( + ['hg', 'identify', '--num'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + branch_fn = njoin(path, '.hg', 'branch') + branch_cache_fn = njoin(path, '.hg', 'branch.cache') + + if os.path.isfile(branch_fn): + branch0 = None + with open(branch_fn) as f: + revision0 = f.read().strip() + + branch_map = {} + with open(branch_cache_fn) as f: + for line in f: + branch1, revision1 = line.split()[:2] + if revision1==revision0: + branch0 = branch1 + try: + revision1 = int(revision1) + except ValueError: + continue + branch_map[branch1] = revision1 + + return branch_map.get(branch0) + + return None + + + def get_version(self, version_file=None, version_variable=None): + """Try to get version string of a package. + + Return a version string of the current package or None if the version + information could not be detected. + + Notes + ----- + This method scans files named + __version__.py, _version.py, version.py, and + __svn_version__.py for string variables version, __version__, and + _version, until a version number is found. + """ + version = getattr(self, 'version', None) + if version is not None: + return version + + # Get version from version file. + if version_file is None: + files = ['__version__.py', + self.name.split('.')[-1]+'_version.py', + 'version.py', + '__svn_version__.py', + '__hg_version__.py'] + else: + files = [version_file] + if version_variable is None: + version_vars = ['version', + '__version__', + self.name.split('.')[-1]+'_version'] + else: + version_vars = [version_variable] + for f in files: + fn = njoin(self.local_path, f) + if os.path.isfile(fn): + info = ('.py', 'U', 1) + name = os.path.splitext(os.path.basename(fn))[0] + n = dot_join(self.name, name) + try: + version_module = exec_mod_from_location( + '_'.join(n.split('.')), fn) + except ImportError as e: + self.warn(str(e)) + version_module = None + if version_module is None: + continue + + for a in version_vars: + version = getattr(version_module, a, None) + if version is not None: + break + + # Try if versioneer module + try: + version = version_module.get_versions()['version'] + except AttributeError: + pass + + if version is not None: + break + + if version is not None: + self.version = version + return version + + # Get version as SVN or Mercurial revision number + revision = self._get_svn_revision(self.local_path) + if revision is None: + revision = self._get_hg_revision(self.local_path) + + if revision is not None: + version = str(revision) + self.version = version + + return version + + def make_svn_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __svn_version__.py file to the current package directory. + + Generate package __svn_version__.py file from SVN revision number, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __svn_version__.py existed before, nothing is done. + + This is + intended for working with source directories that are in an SVN + repository. + """ + target = njoin(self.local_path, '__svn_version__.py') + revision = self._get_svn_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_svn_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_svn_version_py())) + + def make_hg_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __hg_version__.py file to the current package directory. + + Generate package __hg_version__.py file from Mercurial revision, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __hg_version__.py existed before, nothing is done. + + This is intended for working with source directories that are + in an Mercurial repository. + """ + target = njoin(self.local_path, '__hg_version__.py') + revision = self._get_hg_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_hg_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_hg_version_py())) + + def make_config_py(self,name='__config__'): + """Generate package __config__.py file containing system_info + information used during building the package. + + This file is installed to the + package installation directory. + + """ + self.py_modules.append((self.name, name, generate_config_py)) + + def get_info(self,*names): + """Get resources information. + + Return information (from system_info.get_info) for all of the names in + the argument list in a single dictionary. + """ + from .system_info import get_info, dict_append + info_dict = {} + for a in names: + dict_append(info_dict,**get_info(a)) + return info_dict + + +def get_cmd(cmdname, _cache={}): + if cmdname not in _cache: + import distutils.core + dist = distutils.core._setup_distribution + if dist is None: + from distutils.errors import DistutilsInternalError + raise DistutilsInternalError( + 'setup distribution instance not initialized') + cmd = dist.get_command_obj(cmdname) + _cache[cmdname] = cmd + return _cache[cmdname] + +def get_numpy_include_dirs(): + # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] + include_dirs = Configuration.numpy_include_dirs[:] + if not include_dirs: + import numpy + include_dirs = [ numpy.get_include() ] + # else running numpy/core/setup.py + return include_dirs + +def get_npy_pkg_dir(): + """Return the path where to find the npy-pkg-config directory. + + If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that + is returned. Otherwise, a path inside the location of the numpy module is + returned. + + The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining + customized npy-pkg-config .ini files for the cross-compilation + environment, and using them when cross-compiling. + + """ + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d is not None: + return d + spec = importlib.util.find_spec('numpy') + d = os.path.join(os.path.dirname(spec.origin), + 'core', 'lib', 'npy-pkg-config') + return d + +def get_pkg_info(pkgname, dirs=None): + """ + Return library info for the given package. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_info + + """ + from numpy.distutils.npy_pkg_config import read_config + + if dirs: + dirs.append(get_npy_pkg_dir()) + else: + dirs = [get_npy_pkg_dir()] + return read_config(pkgname, dirs) + +def get_info(pkgname, dirs=None): + """ + Return an info dict for a given C library. + + The info dict contains the necessary options to use the C library. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + info : dict + The dictionary with build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_pkg_info + + Examples + -------- + To get the necessary information for the npymath library from NumPy: + + >>> npymath_info = np.distutils.misc_util.get_info('npymath') + >>> npymath_info #doctest: +SKIP + {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': + ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} + + This info dict can then be used as input to a `Configuration` instance:: + + config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) + + """ + from numpy.distutils.npy_pkg_config import parse_flags + pkg_info = get_pkg_info(pkgname, dirs) + + # Translate LibraryInfo instance into a build_info dict + info = parse_flags(pkg_info.cflags()) + for k, v in parse_flags(pkg_info.libs()).items(): + info[k].extend(v) + + # add_extension extra_info argument is ANAL + info['define_macros'] = info['macros'] + del info['macros'] + del info['ignored'] + + return info + +def is_bootstrapping(): + import builtins + + try: + builtins.__NUMPY_SETUP__ + return True + except AttributeError: + return False + + +######################### + +def default_config_dict(name = None, parent_name = None, local_path=None): + """Return a configuration dictionary for usage in + configuration() function defined in file setup_.py. + """ + import warnings + warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ + 'deprecated default_config_dict(%r,%r,%r)' + % (name, parent_name, local_path, + name, parent_name, local_path, + ), stacklevel=2) + c = Configuration(name, parent_name, local_path) + return c.todict() + + +def dict_append(d, **kws): + for k, v in kws.items(): + if k in d: + ov = d[k] + if isinstance(ov, str): + d[k] = v + else: + d[k].extend(v) + else: + d[k] = v + +def appendpath(prefix, path): + if os.path.sep != '/': + prefix = prefix.replace('/', os.path.sep) + path = path.replace('/', os.path.sep) + drive = '' + if os.path.isabs(path): + drive = os.path.splitdrive(prefix)[0] + absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] + pathdrive, path = os.path.splitdrive(path) + d = os.path.commonprefix([absprefix, path]) + if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ + or os.path.join(path[:len(d)], path[len(d):]) != path: + # Handle invalid paths + d = os.path.dirname(d) + subpath = path[len(d):] + if os.path.isabs(subpath): + subpath = subpath[1:] + else: + subpath = path + return os.path.normpath(njoin(drive + prefix, subpath)) + +def generate_config_py(target): + """Generate config.py file containing system_info information + used during building the package. + + Usage: + config['py_modules'].append((packagename, '__config__',generate_config_py)) + """ + from numpy.distutils.system_info import system_info + from distutils.dir_util import mkpath + mkpath(os.path.dirname(target)) + with open(target, 'w') as f: + f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) + f.write('# It contains system_info results at the time of building this package.\n') + f.write('__all__ = ["get_info","show"]\n\n') + + # For gfortran+msvc combination, extra shared libraries may exist + f.write(textwrap.dedent(""" + import os + import sys + + extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + + if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + os.add_dll_directory(extra_dll_dir) + + """)) + + for k, i in system_info.saved_results.items(): + f.write('%s=%r\n' % (k, i)) + f.write(textwrap.dedent(r''' + def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + + def show(): + """ + Show libraries in the system on which NumPy was built. + + Print information about various resources (libraries, library + directories, include directories, etc.) in the system on which + NumPy was built. + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. Classes specifying the information to be printed are defined + in the `numpy.distutils.system_info` module. + + Information may include: + + * ``language``: language used to write the libraries (mostly + C or f77) + * ``libraries``: names of libraries found in the system + * ``library_dirs``: directories containing the libraries + * ``include_dirs``: directories containing library header files + * ``src_dirs``: directories containing library source files + * ``define_macros``: preprocessor macros used by + ``distutils.setup`` + * ``baseline``: minimum CPU features required + * ``found``: dispatched features supported in the system + * ``not found``: dispatched features that are not supported + in the system + + 2. NumPy BLAS/LAPACK Installation Notes + + Installing a numpy wheel (``pip install numpy`` or force it + via ``pip install numpy --only-binary :numpy: numpy``) includes + an OpenBLAS implementation of the BLAS and LAPACK linear algebra + APIs. In this case, ``library_dirs`` reports the original build + time configuration as compiled with gcc/gfortran; at run time + the OpenBLAS library is in + ``site-packages/numpy.libs/`` (linux), or + ``site-packages/numpy/.dylibs/`` (macOS), or + ``site-packages/numpy/.libs/`` (windows). + + Installing numpy from source + (``pip install numpy --no-binary numpy``) searches for BLAS and + LAPACK dynamic link libraries at build time as influenced by + environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and + NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; + or the optional file ``~/.numpy-site.cfg``. + NumPy remembers those locations and expects to load the same + libraries at run-time. + In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS + library) is in the default build-time search order after + 'openblas'. + + Examples + -------- + >>> import numpy as np + >>> np.show_config() + blas_opt_info: + language = c + define_macros = [('HAVE_CBLAS', None)] + libraries = ['openblas', 'openblas'] + library_dirs = ['/usr/local/lib'] + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + + print("Supported SIMD extensions in this NumPy install:") + print(" baseline = %s" % (','.join(__cpu_baseline__))) + print(" found = %s" % (','.join(features_found))) + print(" not found = %s" % (','.join(features_not_found))) + + ''')) + + return target + +def msvc_version(compiler): + """Return version major and minor of compiler instance if it is + MSVC, raise an exception otherwise.""" + if not compiler.compiler_type == "msvc": + raise ValueError("Compiler instance is not msvc (%s)"\ + % compiler.compiler_type) + return compiler._MSVCCompiler__version + +def get_build_architecture(): + # Importing distutils.msvccompiler triggers a warning on non-Windows + # systems, so delay the import to here. + from distutils.msvccompiler import get_build_architecture + return get_build_architecture() + + +_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} + + +def sanitize_cxx_flags(cxxflags): + ''' + Some flags are valid for C but not C++. Prune them. + ''' + return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] + + +def exec_mod_from_location(modname, modfile): + ''' + Use importlib machinery to import a module `modname` from the file + `modfile`. Depending on the `spec.loader`, the module may not be + registered in sys.modules. + ''' + spec = importlib.util.spec_from_file_location(modname, modfile) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py new file mode 100644 index 00000000..68239495 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py @@ -0,0 +1,63 @@ +import os +from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if not old: + return new + if new in old: + return old + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self, plat_name=None): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib') + environ_include = os.getenv('include') + _MSVCCompiler.initialize(self, plat_name) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): + ld_args.append('/MANIFEST') + _MSVCCompiler.manifest_setup_ldargs(self, output_filename, + build_temp, ld_args) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py new file mode 100644 index 00000000..2b93221b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py @@ -0,0 +1,76 @@ +import os +from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if new in old: + return old + if not old: + return new + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib', '') + environ_include = os.getenv('include', '') + _MSVCCompiler.initialize(self) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + +def lib_opts_if_msvc(build_cmd): + """ Add flags if we are using MSVC compiler + + We can't see `build_cmd` in our scope, because we have not initialized + the distutils build command, so use this deferred calculation to run + when we are building the library. + """ + if build_cmd.compiler.compiler_type != 'msvc': + return [] + # Explicitly disable whole-program optimization. + flags = ['/GL-'] + # Disable voltbl section for vc142 to allow link using mingw-w64; see: + # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 + if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): + flags.append('-d2VolatileMetadata-') + return flags diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py new file mode 100644 index 00000000..f6e3ad39 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py @@ -0,0 +1,437 @@ +import sys +import re +import os + +from configparser import RawConfigParser + +__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', + 'read_config', 'parse_flags'] + +_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') + +class FormatError(OSError): + """ + Exception thrown when there is a problem parsing a configuration file. + + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +class PkgNotFound(OSError): + """Exception raised when a package can not be located.""" + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +def parse_flags(line): + """ + Parse a line from a config file containing compile flags. + + Parameters + ---------- + line : str + A single line containing one or more compile flags. + + Returns + ------- + d : dict + Dictionary of parsed flags, split into relevant categories. + These categories are the keys of `d`: + + * 'include_dirs' + * 'library_dirs' + * 'libraries' + * 'macros' + * 'ignored' + + """ + d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], + 'macros': [], 'ignored': []} + + flags = (' ' + line).split(' -') + for flag in flags: + flag = '-' + flag + if len(flag) > 0: + if flag.startswith('-I'): + d['include_dirs'].append(flag[2:].strip()) + elif flag.startswith('-L'): + d['library_dirs'].append(flag[2:].strip()) + elif flag.startswith('-l'): + d['libraries'].append(flag[2:].strip()) + elif flag.startswith('-D'): + d['macros'].append(flag[2:].strip()) + else: + d['ignored'].append(flag) + + return d + +def _escape_backslash(val): + return val.replace('\\', '\\\\') + +class LibraryInfo: + """ + Object containing build information about a library. + + Parameters + ---------- + name : str + The library name. + description : str + Description of the library. + version : str + Version string. + sections : dict + The sections of the configuration file for the library. The keys are + the section headers, the values the text under each header. + vars : class instance + A `VariableSet` instance, which contains ``(name, value)`` pairs for + variables defined in the configuration file for the library. + requires : sequence, optional + The required libraries for the library to be installed. + + Notes + ----- + All input parameters (except "sections" which is a method) are available as + attributes of the same name. + + """ + def __init__(self, name, description, version, sections, vars, requires=None): + self.name = name + self.description = description + if requires: + self.requires = requires + else: + self.requires = [] + self.version = version + self._sections = sections + self.vars = vars + + def sections(self): + """ + Return the section headers of the config file. + + Parameters + ---------- + None + + Returns + ------- + keys : list of str + The list of section headers. + + """ + return list(self._sections.keys()) + + def cflags(self, section="default"): + val = self.vars.interpolate(self._sections[section]['cflags']) + return _escape_backslash(val) + + def libs(self, section="default"): + val = self.vars.interpolate(self._sections[section]['libs']) + return _escape_backslash(val) + + def __str__(self): + m = ['Name: %s' % self.name, 'Description: %s' % self.description] + if self.requires: + m.append('Requires:') + else: + m.append('Requires: %s' % ",".join(self.requires)) + m.append('Version: %s' % self.version) + + return "\n".join(m) + +class VariableSet: + """ + Container object for the variables defined in a config file. + + `VariableSet` can be used as a plain dictionary, with the variable names + as keys. + + Parameters + ---------- + d : dict + Dict of items in the "variables" section of the configuration file. + + """ + def __init__(self, d): + self._raw_data = dict([(k, v) for k, v in d.items()]) + + self._re = {} + self._re_sub = {} + + self._init_parse() + + def _init_parse(self): + for k, v in self._raw_data.items(): + self._init_parse_var(k, v) + + def _init_parse_var(self, name, value): + self._re[name] = re.compile(r'\$\{%s\}' % name) + self._re_sub[name] = value + + def interpolate(self, value): + # Brute force: we keep interpolating until there is no '${var}' anymore + # or until interpolated string is equal to input string + def _interpolate(value): + for k in self._re.keys(): + value = self._re[k].sub(self._re_sub[k], value) + return value + while _VAR.search(value): + nvalue = _interpolate(value) + if nvalue == value: + break + value = nvalue + + return value + + def variables(self): + """ + Return the list of variable names. + + Parameters + ---------- + None + + Returns + ------- + names : list of str + The names of all variables in the `VariableSet` instance. + + """ + return list(self._raw_data.keys()) + + # Emulate a dict to set/get variables values + def __getitem__(self, name): + return self._raw_data[name] + + def __setitem__(self, name, value): + self._raw_data[name] = value + self._init_parse_var(name, value) + +def parse_meta(config): + if not config.has_section('meta'): + raise FormatError("No meta section found !") + + d = dict(config.items('meta')) + + for k in ['name', 'description', 'version']: + if not k in d: + raise FormatError("Option %s (section [meta]) is mandatory, " + "but not found" % k) + + if not 'requires' in d: + d['requires'] = [] + + return d + +def parse_variables(config): + if not config.has_section('variables'): + raise FormatError("No variables section found !") + + d = {} + + for name, value in config.items("variables"): + d[name] = value + + return VariableSet(d) + +def parse_sections(config): + return meta_d, r + +def pkg_to_filename(pkg_name): + return "%s.ini" % pkg_name + +def parse_config(filename, dirs=None): + if dirs: + filenames = [os.path.join(d, filename) for d in dirs] + else: + filenames = [filename] + + config = RawConfigParser() + + n = config.read(filenames) + if not len(n) >= 1: + raise PkgNotFound("Could not find file(s) %s" % str(filenames)) + + # Parse meta and variables sections + meta = parse_meta(config) + + vars = {} + if config.has_section('variables'): + for name, value in config.items("variables"): + vars[name] = _escape_backslash(value) + + # Parse "normal" sections + secs = [s for s in config.sections() if not s in ['meta', 'variables']] + sections = {} + + requires = {} + for s in secs: + d = {} + if config.has_option(s, "requires"): + requires[s] = config.get(s, 'requires') + + for name, value in config.items(s): + d[name] = value + sections[s] = d + + return meta, vars, sections, requires + +def _read_config_imp(filenames, dirs=None): + def _read_config(f): + meta, vars, sections, reqs = parse_config(f, dirs) + # recursively add sections and variables of required libraries + for rname, rvalue in reqs.items(): + nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) + + # Update var dict for variables not in 'top' config file + for k, v in nvars.items(): + if not k in vars: + vars[k] = v + + # Update sec dict + for oname, ovalue in nsections[rname].items(): + if ovalue: + sections[rname][oname] += ' %s' % ovalue + + return meta, vars, sections, reqs + + meta, vars, sections, reqs = _read_config(filenames) + + # FIXME: document this. If pkgname is defined in the variables section, and + # there is no pkgdir variable defined, pkgdir is automatically defined to + # the path of pkgname. This requires the package to be imported to work + if not 'pkgdir' in vars and "pkgname" in vars: + pkgname = vars["pkgname"] + if not pkgname in sys.modules: + raise ValueError("You should import %s to get information on %s" % + (pkgname, meta["name"])) + + mod = sys.modules[pkgname] + vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) + + return LibraryInfo(name=meta["name"], description=meta["description"], + version=meta["version"], sections=sections, vars=VariableSet(vars)) + +# Trivial cache to cache LibraryInfo instances creation. To be really +# efficient, the cache should be handled in read_config, since a same file can +# be parsed many time outside LibraryInfo creation, but I doubt this will be a +# problem in practice +_CACHE = {} +def read_config(pkgname, dirs=None): + """ + Return library info for a package from its configuration file. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of directories - usually including + the NumPy base directory - where to look for npy-pkg-config files. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + misc_util.get_info, misc_util.get_pkg_info + + Examples + -------- + >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') + >>> type(npymath_info) + + >>> print(npymath_info) + Name: npymath + Description: Portable, core math library implementing C99 standard + Requires: + Version: 0.1 #random + + """ + try: + return _CACHE[pkgname] + except KeyError: + v = _read_config_imp(pkg_to_filename(pkgname), dirs) + _CACHE[pkgname] = v + return v + +# TODO: +# - implements version comparison (modversion + atleast) + +# pkg-config simple emulator - useful for debugging, and maybe later to query +# the system +if __name__ == '__main__': + from optparse import OptionParser + import glob + + parser = OptionParser() + parser.add_option("--cflags", dest="cflags", action="store_true", + help="output all preprocessor and compiler flags") + parser.add_option("--libs", dest="libs", action="store_true", + help="output all linker flags") + parser.add_option("--use-section", dest="section", + help="use this section instead of default for options") + parser.add_option("--version", dest="version", action="store_true", + help="output version") + parser.add_option("--atleast-version", dest="min_version", + help="Minimal version") + parser.add_option("--list-all", dest="list_all", action="store_true", + help="Minimal version") + parser.add_option("--define-variable", dest="define_variable", + help="Replace variable with the given value") + + (options, args) = parser.parse_args(sys.argv) + + if len(args) < 2: + raise ValueError("Expect package name on the command line:") + + if options.list_all: + files = glob.glob("*.ini") + for f in files: + info = read_config(f) + print("%s\t%s - %s" % (info.name, info.name, info.description)) + + pkg_name = args[1] + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) + else: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) + + if options.section: + section = options.section + else: + section = "default" + + if options.define_variable: + m = re.search(r'([\S]+)=([\S]+)', options.define_variable) + if not m: + raise ValueError("--define-variable option should be of " + "the form --define-variable=foo=bar") + else: + name = m.group(1) + value = m.group(2) + info.vars[name] = value + + if options.cflags: + print(info.cflags(section)) + if options.libs: + print(info.libs(section)) + if options.version: + print(info.version) + if options.min_version: + print(info.version >= options.min_version) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py new file mode 100644 index 00000000..ea818265 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py @@ -0,0 +1,17 @@ +# XXX: Handle setuptools ? +from distutils.core import Distribution + +# This class is used because we add new files (sconscripts, and so on) with the +# scons command +class NumpyDistribution(Distribution): + def __init__(self, attrs = None): + # A list of (sconscripts, pre_hook, post_hook, src, parent_names) + self.scons_data = [] + # A list of installable libraries + self.installed_libraries = [] + # A dict of pkg_config files to generate/install + self.installed_pkg_config = {} + Distribution.__init__(self, attrs) + + def has_scons_scripts(self): + return bool(self.scons_data) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py new file mode 100644 index 00000000..48051810 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py @@ -0,0 +1,21 @@ +from distutils.unixccompiler import UnixCCompiler + +class PathScaleCCompiler(UnixCCompiler): + + """ + PathScale compiler compatible with an gcc built Python. + """ + + compiler_type = 'pathcc' + cc_exe = 'pathcc' + cxx_exe = 'pathCC' + + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler, + compiler_so=cc_compiler, + compiler_cxx=cxx_compiler, + linker_exe=cc_compiler, + linker_so=cc_compiler + ' -shared') diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/setup.py new file mode 100644 index 00000000..522756fc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/setup.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('distutils', parent_package, top_path) + config.add_subpackage('command') + config.add_subpackage('fcompiler') + config.add_subpackage('tests') + config.add_data_files('site.cfg') + config.add_data_files('mingw/gfortran_vs2003_hack.c') + config.add_data_dir('checks') + config.add_data_files('*.pyi') + config.make_config_py() + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/system_info.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/system_info.py new file mode 100644 index 00000000..feb28f61 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/system_info.py @@ -0,0 +1,3271 @@ +#!/usr/bin/env python3 +""" +This file defines a set of system_info classes for getting +information about various resources (libraries, library directories, +include directories, etc.) in the system. Usage: + info_dict = get_info() + where is a string 'atlas','x11','fftw','lapack','blas', + 'lapack_src', 'blas_src', etc. For a complete list of allowed names, + see the definition of get_info() function below. + + Returned info_dict is a dictionary which is compatible with + distutils.setup keyword arguments. If info_dict == {}, then the + asked resource is not available (system_info could not find it). + + Several *_info classes specify an environment variable to specify + the locations of software. When setting the corresponding environment + variable to 'None' then the software will be ignored, even when it + is available in system. + +Global parameters: + system_info.search_static_first - search static libraries (.a) + in precedence to shared ones (.so, .sl) if enabled. + system_info.verbosity - output the results to stdout if enabled. + +The file 'site.cfg' is looked for in + +1) Directory of main setup.py file being run. +2) Home directory of user running the setup.py file as ~/.numpy-site.cfg +3) System wide directory (location of this file...) + +The first one found is used to get system configuration options The +format is that used by ConfigParser (i.e., Windows .INI style). The +section ALL is not intended for general use. + +Appropriate defaults are used if nothing is specified. + +The order of finding the locations of resources is the following: + 1. environment variable + 2. section in site.cfg + 3. DEFAULT section in site.cfg + 4. System default search paths (see ``default_*`` variables below). +Only the first complete match is returned. + +Currently, the following classes are available, along with their section names: + + Numeric_info:Numeric + _numpy_info:Numeric + _pkg_config_info:None + accelerate_info:accelerate + accelerate_lapack_info:accelerate + agg2_info:agg2 + amd_info:amd + atlas_3_10_blas_info:atlas + atlas_3_10_blas_threads_info:atlas + atlas_3_10_info:atlas + atlas_3_10_threads_info:atlas + atlas_blas_info:atlas + atlas_blas_threads_info:atlas + atlas_info:atlas + atlas_threads_info:atlas + blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) + blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) + blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) + blas_info:blas + blas_mkl_info:mkl + blas_ssl2_info:ssl2 + blas_opt_info:ALL # usage recommended + blas_src_info:blas_src + blis_info:blis + boost_python_info:boost_python + dfftw_info:fftw + dfftw_threads_info:fftw + djbfft_info:djbfft + f2py_info:ALL + fft_opt_info:ALL + fftw2_info:fftw + fftw3_info:fftw3 + fftw_info:fftw + fftw_threads_info:fftw + flame_info:flame + freetype2_info:freetype2 + gdk_2_info:gdk_2 + gdk_info:gdk + gdk_pixbuf_2_info:gdk_pixbuf_2 + gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 + gdk_x11_2_info:gdk_x11_2 + gtkp_2_info:gtkp_2 + gtkp_x11_2_info:gtkp_x11_2 + lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) + lapack_atlas_3_10_info:atlas + lapack_atlas_3_10_threads_info:atlas + lapack_atlas_info:atlas + lapack_atlas_threads_info:atlas + lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) + lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) + lapack_info:lapack + lapack_mkl_info:mkl + lapack_ssl2_info:ssl2 + lapack_opt_info:ALL # usage recommended + lapack_src_info:lapack_src + mkl_info:mkl + ssl2_info:ssl2 + numarray_info:numarray + numerix_info:numerix + numpy_info:numpy + openblas64__info:openblas64_ + openblas64__lapack_info:openblas64_ + openblas_clapack_info:openblas + openblas_ilp64_info:openblas_ilp64 + openblas_ilp64_lapack_info:openblas_ilp64 + openblas_info:openblas + openblas_lapack_info:openblas + sfftw_info:fftw + sfftw_threads_info:fftw + system_info:ALL + umfpack_info:umfpack + wx_info:wx + x11_info:x11 + xft_info:xft + +Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER +and NPY_LAPACK_ORDER environment variables to determine the order in which +specific BLAS and LAPACK libraries are searched for. + +This search (or autodetection) can be bypassed by defining the environment +variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the +exact linker flags to use (language will be set to F77). Building against +Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK +implementations at runtime. If using this to build NumPy itself, it is +recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a +CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized +otherwise). + +Example: +---------- +[DEFAULT] +# default section +library_dirs = /usr/lib:/usr/local/lib:/opt/lib +include_dirs = /usr/include:/usr/local/include:/opt/include +src_dirs = /usr/local/src:/opt/src +# search static libraries (.a) in preference to shared ones (.so) +search_static_first = 0 + +[fftw] +libraries = rfftw, fftw + +[atlas] +library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas +# for overriding the names of the atlas libraries +libraries = lapack, f77blas, cblas, atlas + +[x11] +library_dirs = /usr/X11R6/lib +include_dirs = /usr/X11R6/include +---------- + +Note that the ``libraries`` key is the default setting for libraries. + +Authors: + Pearu Peterson , February 2002 + David M. Cooke , April 2002 + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +""" +import sys +import os +import re +import copy +import warnings +import subprocess +import textwrap + +from glob import glob +from functools import reduce +from configparser import NoOptionError +from configparser import RawConfigParser as ConfigParser +# It seems that some people are importing ConfigParser from here so is +# good to keep its class name. Use of RawConfigParser is needed in +# order to be able to load path names with percent in them, like +# `feature%2Fcool` which is common on git flow branch names. + +from distutils.errors import DistutilsError +from distutils.dist import Distribution +import sysconfig +from numpy.distutils import log +from distutils.util import get_platform + +from numpy.distutils.exec_command import ( + find_executable, filepath_from_subprocess_output, + ) +from numpy.distutils.misc_util import (is_sequence, is_string, + get_shared_lib_extension) +from numpy.distutils.command.config import config as cmd_config +from numpy.distutils import customized_ccompiler as _customized_ccompiler +from numpy.distutils import _shell_utils +import distutils.ccompiler +import tempfile +import shutil + +__all__ = ['system_info'] + +# Determine number of bits +import platform +_bits = {'32bit': 32, '64bit': 64} +platform_bits = _bits[platform.architecture()[0]] + + +global_compiler = None + +def customized_ccompiler(): + global global_compiler + if not global_compiler: + global_compiler = _customized_ccompiler() + return global_compiler + + +def _c_string_literal(s): + """ + Convert a python string into a literal suitable for inclusion into C code + """ + # only these three characters are forbidden in C strings + s = s.replace('\\', r'\\') + s = s.replace('"', r'\"') + s = s.replace('\n', r'\n') + return '"{}"'.format(s) + + +def libpaths(paths, bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> np.distutils.system_info.libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> np.distutils.system_info.libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits == 32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p + '64', p]) + + return out + + +if sys.platform == 'win32': + default_lib_dirs = ['C:\\', + os.path.join(sysconfig.get_config_var('exec_prefix'), + 'libs')] + default_runtime_dirs = [] + default_include_dirs = [] + default_src_dirs = ['.'] + default_x11_lib_dirs = [] + default_x11_include_dirs = [] + _include_dirs = [ + 'include', + 'include/suitesparse', + ] + _lib_dirs = [ + 'lib', + ] + + _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] + _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] + def add_system_root(library_root): + """Add a package manager root to the include directories""" + global default_lib_dirs + global default_include_dirs + + library_root = os.path.normpath(library_root) + + default_lib_dirs.extend( + os.path.join(library_root, d) for d in _lib_dirs) + default_include_dirs.extend( + os.path.join(library_root, d) for d in _include_dirs) + + # VCpkg is the de-facto package manager on windows for C/C++ + # libraries. If it is on the PATH, then we append its paths here. + vcpkg = shutil.which('vcpkg') + if vcpkg: + vcpkg_dir = os.path.dirname(vcpkg) + if platform.architecture()[0] == '32bit': + specifier = 'x86' + else: + specifier = 'x64' + + vcpkg_installed = os.path.join(vcpkg_dir, 'installed') + for vcpkg_root in [ + os.path.join(vcpkg_installed, specifier + '-windows'), + os.path.join(vcpkg_installed, specifier + '-windows-static'), + ]: + add_system_root(vcpkg_root) + + # Conda is another popular package manager that provides libraries + conda = shutil.which('conda') + if conda: + conda_dir = os.path.dirname(conda) + add_system_root(os.path.join(conda_dir, '..', 'Library')) + add_system_root(os.path.join(conda_dir, 'Library')) + +else: + default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', + '/opt/local/lib', '/sw/lib'], platform_bits) + default_runtime_dirs = [] + default_include_dirs = ['/usr/local/include', + '/opt/include', + # path of umfpack under macports + '/opt/local/include/ufsparse', + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] + default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] + + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', + '/usr/lib'], platform_bits) + default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] + + if os.path.exists('/usr/lib/X11'): + globbed_x11_dir = glob('/usr/lib/*/libX11.so') + if globbed_x11_dir: + x11_so_dir = os.path.split(globbed_x11_dir[0])[0] + default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) + default_x11_include_dirs.extend(['/usr/lib/X11/include', + '/usr/include/X11']) + + with open(os.devnull, 'w') as tmp: + try: + p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, + stderr=tmp) + except (OSError, DistutilsError): + # OSError if gcc is not installed, or SandboxViolation (DistutilsError + # subclass) if an old setuptools bug is triggered (see gh-3160). + pass + else: + triplet = str(p.communicate()[0].decode().strip()) + if p.returncode == 0: + # gcc supports the "-print-multiarch" option + default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] + default_lib_dirs += [os.path.join("/usr/lib/", triplet)] + + +if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: + default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) + default_include_dirs.append(os.path.join(sys.prefix, 'include')) + default_src_dirs.append(os.path.join(sys.prefix, 'src')) + +default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] +default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] +default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] +default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] + +so_ext = get_shared_lib_extension() + + +def get_standard_file(fname): + """Returns a list of files named 'fname' from + 1) System-wide directory (directory-location of this module) + 2) Users HOME directory (os.environ['HOME']) + 3) Local directory + """ + # System-wide file + filenames = [] + try: + f = __file__ + except NameError: + f = sys.argv[0] + sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], + fname) + if os.path.isfile(sysfile): + filenames.append(sysfile) + + # Home directory + # And look for the user config file + try: + f = os.path.expanduser('~') + except KeyError: + pass + else: + user_file = os.path.join(f, fname) + if os.path.isfile(user_file): + filenames.append(user_file) + + # Local file + if os.path.isfile(fname): + filenames.append(os.path.abspath(fname)) + + return filenames + + +def _parse_env_order(base_order, env): + """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` + + This method will sequence the environment variable and check for their + individual elements in `base_order`. + + The items in the environment variable may be negated via '^item' or '!itema,itemb'. + It must start with ^/! to negate all options. + + Raises + ------ + ValueError: for mixed negated and non-negated orders or multiple negated orders + + Parameters + ---------- + base_order : list of str + the base list of orders + env : str + the environment variable to be parsed, if none is found, `base_order` is returned + + Returns + ------- + allow_order : list of str + allowed orders in lower-case + unknown_order : list of str + for values not overlapping with `base_order` + """ + order_str = os.environ.get(env, None) + + # ensure all base-orders are lower-case (for easier comparison) + base_order = [order.lower() for order in base_order] + if order_str is None: + return base_order, [] + + neg = order_str.startswith('^') or order_str.startswith('!') + # Check format + order_str_l = list(order_str) + sum_neg = order_str_l.count('^') + order_str_l.count('!') + if neg: + if sum_neg > 1: + raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") + # remove prefix + order_str = order_str[1:] + elif sum_neg > 0: + raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") + + # Split and lower case + orders = order_str.lower().split(',') + + # to inform callee about non-overlapping elements + unknown_order = [] + + # if negated, we have to remove from the order + if neg: + allow_order = base_order.copy() + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order in allow_order: + allow_order.remove(order) + + else: + allow_order = [] + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order not in allow_order: + allow_order.append(order) + + return allow_order, unknown_order + + +def get_info(name, notfound_action=0): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'armpl': armpl_info, + 'blas_armpl': blas_armpl_info, + 'lapack_armpl': lapack_armpl_info, + 'fftw3_armpl': fftw3_armpl_info, + 'atlas': atlas_info, # use lapack_opt or blas_opt instead + 'atlas_threads': atlas_threads_info, # ditto + 'atlas_blas': atlas_blas_info, + 'atlas_blas_threads': atlas_blas_threads_info, + 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead + 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto + 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead + 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto + 'atlas_3_10_blas': atlas_3_10_blas_info, + 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, + 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead + 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto + 'flame': flame_info, # use lapack_opt instead + 'mkl': mkl_info, + 'ssl2': ssl2_info, + # openblas which may or may not have embedded lapack + 'openblas': openblas_info, # use blas_opt instead + # openblas with embedded lapack + 'openblas_lapack': openblas_lapack_info, # use blas_opt instead + 'openblas_clapack': openblas_clapack_info, # use blas_opt instead + 'blis': blis_info, # use blas_opt instead + 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead + 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'lapack_ssl2': lapack_ssl2_info, + 'blas_ssl2': blas_ssl2_info, + 'accelerate': accelerate_info, # use blas_opt instead + 'accelerate_lapack': accelerate_lapack_info, + 'openblas64_': openblas64__info, + 'openblas64__lapack': openblas64__lapack_info, + 'openblas_ilp64': openblas_ilp64_info, + 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, + 'x11': x11_info, + 'fft_opt': fft_opt_info, + 'fftw': fftw_info, + 'fftw2': fftw2_info, + 'fftw3': fftw3_info, + 'dfftw': dfftw_info, + 'sfftw': sfftw_info, + 'fftw_threads': fftw_threads_info, + 'dfftw_threads': dfftw_threads_info, + 'sfftw_threads': sfftw_threads_info, + 'djbfft': djbfft_info, + 'blas': blas_info, # use blas_opt instead + 'lapack': lapack_info, # use lapack_opt instead + 'lapack_src': lapack_src_info, + 'blas_src': blas_src_info, + 'numpy': numpy_info, + 'f2py': f2py_info, + 'Numeric': Numeric_info, + 'numeric': Numeric_info, + 'numarray': numarray_info, + 'numerix': numerix_info, + 'lapack_opt': lapack_opt_info, + 'lapack_ilp64_opt': lapack_ilp64_opt_info, + 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, + 'lapack64__opt': lapack64__opt_info, + 'blas_opt': blas_opt_info, + 'blas_ilp64_opt': blas_ilp64_opt_info, + 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, + 'blas64__opt': blas64__opt_info, + 'boost_python': boost_python_info, + 'agg2': agg2_info, + 'wx': wx_info, + 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, + 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, + 'gdk_pixbuf_2': gdk_pixbuf_2_info, + 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, + 'gdk': gdk_info, + 'gdk_2': gdk_2_info, + 'gdk-2.0': gdk_2_info, + 'gdk_x11_2': gdk_x11_2_info, + 'gdk-x11-2.0': gdk_x11_2_info, + 'gtkp_x11_2': gtkp_x11_2_info, + 'gtk+-x11-2.0': gtkp_x11_2_info, + 'gtkp_2': gtkp_2_info, + 'gtk+-2.0': gtkp_2_info, + 'xft': xft_info, + 'freetype2': freetype2_info, + 'umfpack': umfpack_info, + 'amd': amd_info, + }.get(name.lower(), system_info) + return cl().get_info(notfound_action) + + +class NotFoundError(DistutilsError): + """Some third-party program or library is not found.""" + + +class AliasedOptionError(DistutilsError): + """ + Aliases entries in config files should not be existing. + In section '{section}' we found multiple appearances of options {options}.""" + + +class AtlasNotFoundError(NotFoundError): + """ + Atlas (http://github.com/math-atlas/math-atlas) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [atlas]) or by setting + the ATLAS environment variable.""" + + +class FlameNotFoundError(NotFoundError): + """ + FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [flame]).""" + + +class LapackNotFoundError(NotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [lapack]) or by setting + the LAPACK environment variable.""" + + +class LapackSrcNotFoundError(LapackNotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [lapack_src]) or by setting + the LAPACK_SRC environment variable.""" + + +class LapackILP64NotFoundError(NotFoundError): + """ + 64-bit Lapack libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasOptNotFoundError(NotFoundError): + """ + Optimized (vendor) Blas libraries are not found. + Falls back to netlib Blas library which has worse performance. + A better performance should be easily gained by switching + Blas library.""" + +class BlasNotFoundError(NotFoundError): + """ + Blas (http://www.netlib.org/blas/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [blas]) or by setting + the BLAS environment variable.""" + +class BlasILP64NotFoundError(NotFoundError): + """ + 64-bit Blas libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasSrcNotFoundError(BlasNotFoundError): + """ + Blas (http://www.netlib.org/blas/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [blas_src]) or by setting + the BLAS_SRC environment variable.""" + + +class FFTWNotFoundError(NotFoundError): + """ + FFTW (http://www.fftw.org/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [fftw]) or by setting + the FFTW environment variable.""" + + +class DJBFFTNotFoundError(NotFoundError): + """ + DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [djbfft]) or by setting + the DJBFFT environment variable.""" + + +class NumericNotFoundError(NotFoundError): + """ + Numeric (https://www.numpy.org/) module not found. + Get it from above location, install it, and retry setup.py.""" + + +class X11NotFoundError(NotFoundError): + """X11 libraries not found.""" + + +class UmfpackNotFoundError(NotFoundError): + """ + UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) + not found. Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [umfpack]) or by setting + the UMFPACK environment variable.""" + + +class system_info: + + """ get_info() is the only public method. Don't use others. + """ + dir_env_var = None + # XXX: search_static_first is disabled by default, may disappear in + # future unless it is proved to be useful. + search_static_first = 0 + # The base-class section name is a random word "ALL" and is not really + # intended for general use. It cannot be None nor can it be DEFAULT as + # these break the ConfigParser. See gh-15338 + section = 'ALL' + saved_results = {} + + notfounderror = NotFoundError + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), + 'include_dirs': os.pathsep.join(default_include_dirs), + 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), + 'rpath': '', + 'src_dirs': os.pathsep.join(default_src_dirs), + 'search_static_first': str(self.search_static_first), + 'extra_compile_args': '', 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + self.files = [] + self.files.extend(get_standard_file('.numpy-site.cfg')) + self.files.extend(get_standard_file('site.cfg')) + self.parse_config_files() + + if self.section is not None: + self.search_static_first = self.cp.getboolean( + self.section, 'search_static_first') + assert isinstance(self.search_static_first, int) + + def parse_config_files(self): + self.cp.read(self.files) + if not self.cp.has_section(self.section): + if self.section is not None: + self.cp.add_section(self.section) + + def calc_libraries_info(self): + libs = self.get_libraries() + dirs = self.get_lib_dirs() + # The extensions use runtime_library_dirs + r_dirs = self.get_runtime_lib_dirs() + # Intrinsic distutils use rpath, we simply append both entries + # as though they were one entry + r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) + info = {} + for lib in libs: + i = self.check_libs(dirs, [lib]) + if i is not None: + dict_append(info, **i) + else: + log.info('Library %s was not found. Ignoring' % (lib)) + + if r_dirs: + i = self.check_libs(r_dirs, [lib]) + if i is not None: + # Swap library keywords found to runtime_library_dirs + # the libraries are insisting on the user having defined + # them using the library_dirs, and not necessarily by + # runtime_library_dirs + del i['libraries'] + i['runtime_library_dirs'] = i.pop('library_dirs') + dict_append(info, **i) + else: + log.info('Runtime library %s was not found. Ignoring' % (lib)) + + return info + + def set_info(self, **info): + if info: + lib_info = self.calc_libraries_info() + dict_append(info, **lib_info) + # Update extra information + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + self.saved_results[self.__class__.__name__] = info + + def get_option_single(self, *options): + """ Ensure that only one of `options` are found in the section + + Parameters + ---------- + *options : list of str + a list of options to be found in the section (``self.section``) + + Returns + ------- + str : + the option that is uniquely found in the section + + Raises + ------ + AliasedOptionError : + in case more than one of the options are found + """ + found = [self.cp.has_option(self.section, opt) for opt in options] + if sum(found) == 1: + return options[found.index(True)] + elif sum(found) == 0: + # nothing is found anyways + return options[0] + + # Else we have more than 1 key found + if AliasedOptionError.__doc__ is None: + raise AliasedOptionError() + raise AliasedOptionError(AliasedOptionError.__doc__.format( + section=self.section, options='[{}]'.format(', '.join(options)))) + + + def has_info(self): + return self.__class__.__name__ in self.saved_results + + def calc_extra_info(self): + """ Updates the information in the current information with + respect to these flags: + extra_compile_args + extra_link_args + """ + info = {} + for key in ['extra_compile_args', 'extra_link_args']: + # Get values + opt = self.cp.get(self.section, key) + opt = _shell_utils.NativeParser.split(opt) + if opt: + tmp = {key: opt} + dict_append(info, **tmp) + return info + + def get_info(self, notfound_action=0): + """ Return a dictionary with items that are compatible + with numpy.distutils.setup keyword arguments. + """ + flag = 0 + if not self.has_info(): + flag = 1 + log.info(self.__class__.__name__ + ':') + if hasattr(self, 'calc_info'): + self.calc_info() + if notfound_action: + if not self.has_info(): + if notfound_action == 1: + warnings.warn(self.notfounderror.__doc__, stacklevel=2) + elif notfound_action == 2: + raise self.notfounderror(self.notfounderror.__doc__) + else: + raise ValueError(repr(notfound_action)) + + if not self.has_info(): + log.info(' NOT AVAILABLE') + self.set_info() + else: + log.info(' FOUND:') + + res = self.saved_results.get(self.__class__.__name__) + if log.get_threshold() <= log.INFO and flag: + for k, v in res.items(): + v = str(v) + if k in ['sources', 'libraries'] and len(v) > 270: + v = v[:120] + '...\n...\n...' + v[-120:] + log.info(' %s = %s', k, v) + log.info('') + + return copy.deepcopy(res) + + def get_paths(self, section, key): + dirs = self.cp.get(section, key).split(os.pathsep) + env_var = self.dir_env_var + if env_var: + if is_sequence(env_var): + e0 = env_var[-1] + for e in env_var: + if e in os.environ: + e0 = e + break + if not env_var[0] == e0: + log.info('Setting %s=%s' % (env_var[0], e0)) + env_var = e0 + if env_var and env_var in os.environ: + d = os.environ[env_var] + if d == 'None': + log.info('Disabled %s: %s', + self.__class__.__name__, '(%s is None)' + % (env_var,)) + return [] + if os.path.isfile(d): + dirs = [os.path.dirname(d)] + dirs + l = getattr(self, '_lib_names', []) + if len(l) == 1: + b = os.path.basename(d) + b = os.path.splitext(b)[0] + if b[:3] == 'lib': + log.info('Replacing _lib_names[0]==%r with %r' \ + % (self._lib_names[0], b[3:])) + self._lib_names[0] = b[3:] + else: + ds = d.split(os.pathsep) + ds2 = [] + for d in ds: + if os.path.isdir(d): + ds2.append(d) + for dd in ['include', 'lib']: + d1 = os.path.join(d, dd) + if os.path.isdir(d1): + ds2.append(d1) + dirs = ds2 + dirs + default_dirs = self.cp.get(self.section, key).split(os.pathsep) + dirs.extend(default_dirs) + ret = [] + for d in dirs: + if len(d) > 0 and not os.path.isdir(d): + warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) + continue + + if d not in ret: + ret.append(d) + + log.debug('( %s = %s )', key, ':'.join(ret)) + return ret + + def get_lib_dirs(self, key='library_dirs'): + return self.get_paths(self.section, key) + + def get_runtime_lib_dirs(self, key='runtime_library_dirs'): + path = self.get_paths(self.section, key) + if path == ['']: + path = [] + return path + + def get_include_dirs(self, key='include_dirs'): + return self.get_paths(self.section, key) + + def get_src_dirs(self, key='src_dirs'): + return self.get_paths(self.section, key) + + def get_libs(self, key, default): + try: + libs = self.cp.get(self.section, key) + except NoOptionError: + if not default: + return [] + if is_string(default): + return [default] + return default + return [b for b in [a.strip() for a in libs.split(',')] if b] + + def get_libraries(self, key='libraries'): + if hasattr(self, '_lib_names'): + return self.get_libs(key, default=self._lib_names) + else: + return self.get_libs(key, '') + + def library_extensions(self): + c = customized_ccompiler() + static_exts = [] + if c.compiler_type != 'msvc': + # MSVC doesn't understand binutils + static_exts.append('.a') + if sys.platform == 'win32': + static_exts.append('.lib') # .lib is used by MSVC and others + if self.search_static_first: + exts = static_exts + [so_ext] + else: + exts = [so_ext] + static_exts + if sys.platform == 'cygwin': + exts.append('.dll.a') + if sys.platform == 'darwin': + exts.append('.dylib') + return exts + + def check_libs(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks for all libraries as shared libraries first, then + static (or vice versa if self.search_static_first is True). + """ + exts = self.library_extensions() + info = None + for ext in exts: + info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) + if info is not None: + break + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + return info + + def check_libs2(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks each library for shared or static. + """ + exts = self.library_extensions() + info = self._check_libs(lib_dirs, libs, opt_libs, exts) + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + + return info + + def _find_lib(self, lib_dir, lib, exts): + assert is_string(lib_dir) + # under windows first try without 'lib' prefix + if sys.platform == 'win32': + lib_prefixes = ['', 'lib'] + else: + lib_prefixes = ['lib'] + # for each library name, see if we can find a file for it. + for ext in exts: + for prefix in lib_prefixes: + p = self.combine_paths(lib_dir, prefix + lib + ext) + if p: + break + if p: + assert len(p) == 1 + # ??? splitext on p[0] would do this for cygwin + # doesn't seem correct + if ext == '.dll.a': + lib += '.dll' + if ext == '.lib': + lib = prefix + lib + return lib + + return False + + def _find_libs(self, lib_dirs, libs, exts): + # make sure we preserve the order of libs, as it can be important + found_dirs, found_libs = [], [] + for lib in libs: + for lib_dir in lib_dirs: + found_lib = self._find_lib(lib_dir, lib, exts) + if found_lib: + found_libs.append(found_lib) + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + break + return found_dirs, found_libs + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Find mandatory and optional libs in expected paths. + + Missing optional libraries are silently forgotten. + """ + if not is_sequence(lib_dirs): + lib_dirs = [lib_dirs] + # First, try to find the mandatory libraries + found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) + if len(found_libs) > 0 and len(found_libs) == len(libs): + # Now, check for optional libraries + opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) + found_libs.extend(opt_found_libs) + for lib_dir in opt_found_dirs: + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + info = {'libraries': found_libs, 'library_dirs': found_dirs} + return info + else: + return None + + def combine_paths(self, *args): + """Return a list of existing paths composed by all combinations + of items from the arguments. + """ + return combine_paths(*args) + + +class fft_opt_info(system_info): + + def calc_info(self): + info = {} + fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') + djbfft_info = get_info('djbfft') + if fftw_info: + dict_append(info, **fftw_info) + if djbfft_info: + dict_append(info, **djbfft_info) + self.set_info(**info) + return + + +class fftw_info(system_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + {'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]}] + + def calc_ver_info(self, ver_param): + """Returns True on successful version detection, else False""" + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + + opt = self.get_option_single(self.section + '_libs', 'libraries') + libs = self.get_libs(opt, ver_param['libs']) + info = self.check_libs(lib_dirs, libs) + if info is not None: + flag = 0 + for d in incl_dirs: + if len(self.combine_paths(d, ver_param['includes'])) \ + == len(ver_param['includes']): + dict_append(info, include_dirs=[d]) + flag = 1 + break + if flag: + dict_append(info, define_macros=ver_param['macros']) + else: + info = None + if info is not None: + self.set_info(**info) + return True + else: + log.info(' %s not found' % (ver_param['name'])) + return False + + def calc_info(self): + for i in self.ver_info: + if self.calc_ver_info(i): + break + + +class fftw2_info(fftw_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]} + ] + + +class fftw3_info(fftw_info): + #variables to override + section = 'fftw3' + dir_env_var = 'FFTW3' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + ] + + +class fftw3_armpl_info(fftw_info): + section = 'fftw3' + dir_env_var = 'ARMPL_DIR' + notfounderror = FFTWNotFoundError + ver_info = [{'name': 'fftw3', + 'libs': ['armpl_lp64_mp'], + 'includes': ['fftw3.h'], + 'macros': [('SCIPY_FFTW3_H', None)]}] + + +class dfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw', + 'libs':['drfftw', 'dfftw'], + 'includes':['dfftw.h', 'drfftw.h'], + 'macros':[('SCIPY_DFFTW_H', None)]}] + + +class sfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw', + 'libs':['srfftw', 'sfftw'], + 'includes':['sfftw.h', 'srfftw.h'], + 'macros':[('SCIPY_SFFTW_H', None)]}] + + +class fftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'fftw threads', + 'libs':['rfftw_threads', 'fftw_threads'], + 'includes':['fftw_threads.h', 'rfftw_threads.h'], + 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] + + +class dfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw threads', + 'libs':['drfftw_threads', 'dfftw_threads'], + 'includes':['dfftw_threads.h', 'drfftw_threads.h'], + 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] + + +class sfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw threads', + 'libs':['srfftw_threads', 'sfftw_threads'], + 'includes':['sfftw_threads.h', 'srfftw_threads.h'], + 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] + + +class djbfft_info(system_info): + section = 'djbfft' + dir_env_var = 'DJBFFT' + notfounderror = DJBFFTNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + info = None + for d in lib_dirs: + p = self.combine_paths(d, ['djbfft.a']) + if p: + info = {'extra_objects': p} + break + p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) + if p: + info = {'libraries': ['djbfft'], 'library_dirs': [d]} + break + if info is None: + return + for d in incl_dirs: + if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: + dict_append(info, include_dirs=[d], + define_macros=[('SCIPY_DJBFFT_H', None)]) + self.set_info(**info) + return + return + + +class mkl_info(system_info): + section = 'mkl' + dir_env_var = 'MKLROOT' + _lib_mkl = ['mkl_rt'] + + def get_mkl_rootdir(self): + mklroot = os.environ.get('MKLROOT', None) + if mklroot is not None: + return mklroot + paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) + ld_so_conf = '/etc/ld.so.conf' + if os.path.isfile(ld_so_conf): + with open(ld_so_conf) as f: + for d in f: + d = d.strip() + if d: + paths.append(d) + intel_mkl_dirs = [] + for path in paths: + path_atoms = path.split(os.sep) + for m in path_atoms: + if m.startswith('mkl'): + d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) + intel_mkl_dirs.append(d) + break + for d in paths: + dirs = glob(os.path.join(d, 'mkl', '*')) + dirs += glob(os.path.join(d, 'mkl*')) + for sub_dir in dirs: + if os.path.isdir(os.path.join(sub_dir, 'lib')): + return sub_dir + return None + + def __init__(self): + mklroot = self.get_mkl_rootdir() + if mklroot is None: + system_info.__init__(self) + else: + from .cpuinfo import cpu + if cpu.is_Itanium(): + plt = '64' + elif cpu.is_Intel() and cpu.is_64bit(): + plt = 'intel64' + else: + plt = '32' + system_info.__init__( + self, + default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], + default_include_dirs=[os.path.join(mklroot, 'include')]) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + opt = self.get_option_single('mkl_libs', 'libraries') + mkl_libs = self.get_libs(opt, self._lib_mkl) + info = self.check_libs2(lib_dirs, mkl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + if sys.platform == 'win32': + pass # win32 has no pthread library + else: + dict_append(info, libraries=['pthread']) + self.set_info(**info) + + +class lapack_mkl_info(mkl_info): + pass + + +class blas_mkl_info(mkl_info): + pass + + +class ssl2_info(system_info): + section = 'ssl2' + dir_env_var = 'SSL2_DIR' + # Multi-threaded version. Python itself must be built by Fujitsu compiler. + _lib_ssl2 = ['fjlapackexsve'] + # Single-threaded version + #_lib_ssl2 = ['fjlapacksve'] + + def get_tcsds_rootdir(self): + tcsdsroot = os.environ.get('TCSDS_PATH', None) + if tcsdsroot is not None: + return tcsdsroot + return None + + def __init__(self): + tcsdsroot = self.get_tcsds_rootdir() + if tcsdsroot is None: + system_info.__init__(self) + else: + system_info.__init__( + self, + default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], + default_include_dirs=[os.path.join(tcsdsroot, + 'clang-comp/include')]) + + def calc_info(self): + tcsdsroot = self.get_tcsds_rootdir() + + lib_dirs = self.get_lib_dirs() + if lib_dirs is None: + lib_dirs = os.path.join(tcsdsroot, 'lib64') + + incl_dirs = self.get_include_dirs() + if incl_dirs is None: + incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') + + ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) + + info = self.check_libs2(lib_dirs, ssl2_libs) + if info is None: + return + dict_append(info, + define_macros=[('HAVE_CBLAS', None), + ('HAVE_SSL2', 1)], + include_dirs=incl_dirs,) + self.set_info(**info) + + +class lapack_ssl2_info(ssl2_info): + pass + + +class blas_ssl2_info(ssl2_info): + pass + + + +class armpl_info(system_info): + section = 'armpl' + dir_env_var = 'ARMPL_DIR' + _lib_armpl = ['armpl_lp64_mp'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) + info = self.check_libs2(lib_dirs, armpl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + +class lapack_armpl_info(armpl_info): + pass + +class blas_armpl_info(armpl_info): + pass + + +class atlas_info(system_info): + section = 'atlas' + dir_env_var = 'ATLAS' + _lib_names = ['f77blas', 'cblas'] + if sys.platform[:7] == 'freebsd': + _lib_atlas = ['atlas_r'] + _lib_lapack = ['alapack_r'] + else: + _lib_atlas = ['atlas'] + _lib_lapack = ['lapack'] + + notfounderror = AtlasNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', + 'sse', '3dnow', 'sse2']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) + atlas = None + lapack = None + atlas_1 = None + for d in lib_dirs: + atlas = self.check_libs2(d, atlas_libs, []) + if atlas is not None: + lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) + lapack = self.check_libs2(lib_dirs2, lapack_libs, []) + if lapack is not None: + break + if atlas: + atlas_1 = atlas + log.info(self.__class__) + if atlas is None: + atlas = atlas_1 + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + if lapack is not None: + dict_append(info, **lapack) + dict_append(info, **atlas) + elif 'lapack_atlas' in atlas['libraries']: + dict_append(info, **atlas) + dict_append(info, + define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) + self.set_info(**info) + return + else: + dict_append(info, **atlas) + dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) + message = textwrap.dedent(""" + ********************************************************************* + Could not find lapack library within the ATLAS installation. + ********************************************************************* + """) + warnings.warn(message, stacklevel=2) + self.set_info(**info) + return + + # Check if lapack library is complete, only warn if it is not. + lapack_dir = lapack['library_dirs'][0] + lapack_name = lapack['libraries'][0] + lapack_lib = None + lib_prefixes = ['lib'] + if sys.platform == 'win32': + lib_prefixes.append('') + for e in self.library_extensions(): + for prefix in lib_prefixes: + fn = os.path.join(lapack_dir, prefix + lapack_name + e) + if os.path.exists(fn): + lapack_lib = fn + break + if lapack_lib: + break + if lapack_lib is not None: + sz = os.stat(lapack_lib)[6] + if sz <= 4000 * 1024: + message = textwrap.dedent(""" + ********************************************************************* + Lapack library (from ATLAS) is probably incomplete: + size of %s is %sk (expected >4000k) + + Follow the instructions in the KNOWN PROBLEMS section of the file + numpy/INSTALL.txt. + ********************************************************************* + """) % (lapack_lib, sz / 1024) + warnings.warn(message, stacklevel=2) + else: + info['language'] = 'f77' + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(info, **atlas_extra_info) + + self.set_info(**info) + + +class atlas_blas_info(atlas_info): + _lib_names = ['f77blas', 'cblas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_threads_info(atlas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class atlas_blas_threads_info(atlas_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class lapack_atlas_info(atlas_info): + _lib_names = ['lapack_atlas'] + atlas_info._lib_names + + +class lapack_atlas_threads_info(atlas_threads_info): + _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names + + +class atlas_3_10_info(atlas_info): + _lib_names = ['satlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_info(atlas_3_10_info): + _lib_names = ['satlas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_lib', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_3_10_threads_info(atlas_3_10_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + + +class lapack_atlas_3_10_info(atlas_3_10_info): + pass + + +class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): + pass + + +class lapack_info(system_info): + section = 'lapack' + dir_env_var = 'LAPACK' + _lib_names = ['lapack'] + notfounderror = LapackNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('lapack_libs', 'libraries') + lapack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, lapack_libs, []) + if info is None: + return + info['language'] = 'f77' + self.set_info(**info) + + +class lapack_src_info(system_info): + # LAPACK_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'lapack_src' + dir_env_var = 'LAPACK_SRC' + notfounderror = LapackSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'dgesv.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + # The following is extracted from LAPACK-3.0/SRC/Makefile. + # Added missing names from lapack-lite-3.1.1/SRC/Makefile + # while keeping removed names for Lapack-3.0 compatibility. + allaux = ''' + ilaenv ieeeck lsame lsamen xerbla + iparmq + ''' # *.f + laux = ''' + bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 + laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 + lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre + larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 + lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 + lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf + stebz stedc steqr sterf + + larra larrc larrd larr larrk larrj larrr laneg laisnan isnan + lazq3 lazq4 + ''' # [s|d]*.f + lasrc = ''' + gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak + gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv + gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 + geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd + gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal + gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd + ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein + hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 + lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb + lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp + laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv + lartv larz larzb larzt laswp lasyf latbs latdf latps latrd + latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv + pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 + potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri + pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs + spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv + sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 + tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs + trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs + tzrqf tzrzf + + lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 + ''' # [s|c|d|z]*.f + sd_lasrc = ''' + laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l + org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr + orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 + ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx + sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd + stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd + sygvx sytd2 sytrd + ''' # [s|d]*.f + cz_lasrc = ''' + bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev + heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv + hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd + hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf + hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 + laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe + laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv + spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq + ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 + unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr + ''' # [c|z]*.f + ####### + sclaux = laux + ' econd ' # s*.f + dzlaux = laux + ' secnd ' # d*.f + slasrc = lasrc + sd_lasrc # s*.f + dlasrc = lasrc + sd_lasrc # d*.f + clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f + zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f + oclasrc = ' icmax1 scsum1 ' # *.f + ozlasrc = ' izmax1 dzsum1 ' # *.f + sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + + ['c%s.f' % f for f in (clasrc).split()] \ + + ['z%s.f' % f for f in (zlasrc).split()] \ + + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] + sources = [os.path.join(src_dir, f) for f in sources] + # Lapack 3.1: + src_dir2 = os.path.join(src_dir, '..', 'INSTALL') + sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] + # Lapack 3.2.1: + sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] + # Should we check here actual existence of source files? + # Yes, the file listing is different between 3.0 and 3.1 + # versions. + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + +atlas_version_c_text = r''' +/* This file is generated from numpy/distutils/system_info.py */ +void ATL_buildinfo(void); +int main(void) { + ATL_buildinfo(); + return 0; +} +''' + +_cached_atlas_version = {} + + +def get_atlas_version(**config): + libraries = config.get('libraries', []) + library_dirs = config.get('library_dirs', []) + key = (tuple(libraries), tuple(library_dirs)) + if key in _cached_atlas_version: + return _cached_atlas_version[key] + c = cmd_config(Distribution()) + atlas_version = None + info = {} + try: + s, o = c.get_output(atlas_version_c_text, + libraries=libraries, library_dirs=library_dirs, + ) + if s and re.search(r'undefined reference to `_gfortran', o, re.M): + s, o = c.get_output(atlas_version_c_text, + libraries=libraries + ['gfortran'], + library_dirs=library_dirs, + ) + if not s: + warnings.warn(textwrap.dedent(""" + ***************************************************** + Linkage with ATLAS requires gfortran. Use + + python setup.py config_fc --fcompiler=gnu95 ... + + when building extension libraries that use ATLAS. + Make sure that -lgfortran is used for C++ extensions. + ***************************************************** + """), stacklevel=2) + dict_append(info, language='f90', + define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) + except Exception: # failed to get version from file -- maybe on Windows + # look at directory name + for o in library_dirs: + m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) + if m: + atlas_version = m.group('version') + if atlas_version is not None: + break + + # final choice --- look at ATLAS_VERSION environment + # variable + if atlas_version is None: + atlas_version = os.environ.get('ATLAS_VERSION', None) + if atlas_version: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + else: + dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) + return atlas_version or '?.?.?', info + + if not s: + m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) + if m: + atlas_version = m.group('version') + if atlas_version is None: + if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): + atlas_version = '3.2.1_pre3.3.6' + else: + log.info('Status: %d', s) + log.info('Output: %s', o) + + elif atlas_version == '3.2.1_pre3.3.6': + dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) + else: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + result = _cached_atlas_version[key] = atlas_version, info + return result + + +class lapack_opt_info(system_info): + notfounderror = LapackNotFoundError + + # List of all known LAPACK libraries, in the default order + lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', + 'accelerate', 'atlas', 'lapack'] + order_env_var_name = 'NPY_LAPACK_ORDER' + + def _calc_info_armpl(self): + info = get_info('lapack_armpl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_mkl(self): + info = get_info('lapack_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_ssl2(self): + info = get_info('lapack_ssl2') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas_lapack') + if info: + self.set_info(**info) + return True + info = get_info('openblas_clapack') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_flame(self): + info = get_info('flame') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_threads') + if not info: + info = get_info('atlas_3_10') + if not info: + info = get_info('atlas_threads') + if not info: + info = get_info('atlas') + if info: + # Figure out if ATLAS has lapack... + # If not we need the lapack library, but not BLAS! + l = info.get('define_macros', []) + if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ + or ('ATLAS_WITHOUT_LAPACK', None) in l: + # Get LAPACK (with possible warnings) + # If not found we don't accept anything + # since we can't use ATLAS with LAPACK! + lapack_info = self._get_info_lapack() + if not lapack_info: + return False + dict_append(info, **lapack_info) + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _get_info_blas(self): + # Default to get the optimized BLAS implementation + info = get_info('blas_opt') + if not info: + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('blas_src') + if not info_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('fblas_src', info_src)]) + return info + + def _get_info_lapack(self): + info = get_info('lapack') + if not info: + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('lapack_src') + if not info_src: + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('flapack_src', info_src)]) + return info + + def _calc_info_lapack(self): + info = self._get_info_lapack() + if info: + info_blas = self._get_info_blas() + dict_append(info, **info_blas) + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + self.set_info(**info) + return True + return False + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("lapack_opt_info user defined " + "LAPACK order has unacceptable " + "values: {}".format(unknown_order)) + + if 'NPY_LAPACK_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for lapack in lapack_order: + if self._calc_info(lapack): + return + + if 'lapack' not in lapack_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class _ilp64_opt_info_mixin: + symbol_suffix = None + symbol_prefix = None + + def _check_info(self, info): + macros = dict(info.get('define_macros', [])) + prefix = macros.get('BLAS_SYMBOL_PREFIX', '') + suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') + + if self.symbol_prefix not in (None, prefix): + return False + + if self.symbol_suffix not in (None, suffix): + return False + + return bool(info) + + +class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): + notfounderror = LapackILP64NotFoundError + lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] + order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' + + def _calc_info(self, name): + print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) + info = get_info(name + '_lapack') + if self._check_info(info): + self.set_info(**info) + return True + else: + print('%s_lapack does not exist' % (name)) + return False + + +class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): + # Same as lapack_ilp64_opt_info, but fix symbol names + symbol_prefix = '' + symbol_suffix = '' + + +class lapack64__opt_info(lapack_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class blas_opt_info(system_info): + notfounderror = BlasNotFoundError + # List of all known BLAS libraries, in the default order + + blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', + 'accelerate', 'atlas', 'blas'] + order_env_var_name = 'NPY_BLAS_ORDER' + + def _calc_info_armpl(self): + info = get_info('blas_armpl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_mkl(self): + info = get_info('blas_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_ssl2(self): + info = get_info('blas_ssl2') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blis(self): + info = get_info('blis') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_blas_threads') + if not info: + info = get_info('atlas_3_10_blas') + if not info: + info = get_info('atlas_blas_threads') + if not info: + info = get_info('atlas_blas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blas(self): + # Warn about a non-optimized BLAS library + warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) + info = {} + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + blas = get_info('blas') + if blas: + dict_append(info, **blas) + else: + # Not even BLAS was found! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + + blas_src = get_info('blas_src') + if not blas_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return False + dict_append(info, libraries=[('fblas_src', blas_src)]) + + self.set_info(**info) + return True + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() + if 'NPY_CBLAS_LIBS' in os.environ: + info['define_macros'].append(('HAVE_CBLAS', None)) + info['extra_link_args'].extend( + os.environ['NPY_CBLAS_LIBS'].split()) + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) + + if 'NPY_BLAS_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for blas in blas_order: + if self._calc_info(blas): + return + + if 'blas' not in blas_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): + notfounderror = BlasILP64NotFoundError + blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] + order_env_var_name = 'NPY_BLAS_ILP64_ORDER' + + def _calc_info(self, name): + info = get_info(name) + if self._check_info(info): + self.set_info(**info) + return True + return False + + +class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '' + + +class blas64__opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class cblas_info(system_info): + section = 'cblas' + dir_env_var = 'CBLAS' + # No default as it's used only in blas_info + _lib_names = [] + notfounderror = BlasNotFoundError + + +class blas_info(system_info): + section = 'blas' + dir_env_var = 'BLAS' + _lib_names = ['blas'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blas_libs', 'libraries') + blas_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, blas_libs, []) + if info is None: + return + else: + info['include_dirs'] = self.get_include_dirs() + if platform.system() == 'Windows': + # The check for windows is needed because get_cblas_libs uses the + # same compiler that was used to compile Python and msvc is + # often not installed when mingw is being used. This rough + # treatment is not desirable, but windows is tricky. + info['language'] = 'f77' # XXX: is it generally true? + # If cblas is given as an option, use those + cblas_info_obj = cblas_info() + cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') + cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) + if cblas_libs: + info['libraries'] = cblas_libs + blas_libs + info['define_macros'] = [('HAVE_CBLAS', None)] + else: + lib = self.get_cblas_libs(info) + if lib is not None: + info['language'] = 'c' + info['libraries'] = lib + info['define_macros'] = [('HAVE_CBLAS', None)] + self.set_info(**info) + + def get_cblas_libs(self, info): + """ Check whether we can link with CBLAS interface + + This method will search through several combinations of libraries + to check whether CBLAS is present: + + 1. Libraries in ``info['libraries']``, as is + 2. As 1. but also explicitly adding ``'cblas'`` as a library + 3. As 1. but also explicitly adding ``'blas'`` as a library + 4. Check only library ``'cblas'`` + 5. Check only library ``'blas'`` + + Parameters + ---------- + info : dict + system information dictionary for compilation and linking + + Returns + ------- + libraries : list of str or None + a list of libraries that enables the use of CBLAS interface. + Returns None if not found or a compilation error occurs. + + Since 1.17 returns a list. + """ + # primitive cblas check by looking for the header and trying to link + # cblas or blas + c = customized_ccompiler() + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + #include + int main(int argc, const char *argv[]) + { + double a[4] = {1,2,3,4}; + double b[4] = {5,6,7,8}; + return cblas_ddot(4, a, 1, b, 1) > 10; + }""") + src = os.path.join(tmpdir, 'source.c') + try: + with open(src, 'w') as f: + f.write(s) + + try: + # check we can compile (find headers) + obj = c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): + return None + + # check we can link (find library) + # some systems have separate cblas and blas libs. + for libs in [info['libraries'], ['cblas'] + info['libraries'], + ['blas'] + info['libraries'], ['cblas'], ['blas']]: + try: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=libs, + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + return libs + except distutils.ccompiler.LinkError: + pass + finally: + shutil.rmtree(tmpdir) + return None + + +class openblas_info(blas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = [] + notfounderror = BlasNotFoundError + + @property + def symbol_prefix(self): + try: + return self.cp.get(self.section, 'symbol_prefix') + except NoOptionError: + return '' + + @property + def symbol_suffix(self): + try: + return self.cp.get(self.section, 'symbol_suffix') + except NoOptionError: + return '' + + def _calc_info(self): + c = customized_ccompiler() + + lib_dirs = self.get_lib_dirs() + + # Prefer to use libraries over openblas_libs + opt = self.get_option_single('openblas_libs', 'libraries') + openblas_libs = self.get_libs(opt, self._lib_names) + + info = self.check_libs(lib_dirs, openblas_libs, []) + + if c.compiler_type == "msvc" and info is None: + from numpy.distutils.fcompiler import new_fcompiler + f = new_fcompiler(c_compiler=c) + if f and f.compiler_type == 'gnu95': + # Try gfortran-compatible library files + info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) + # Skip lapack check, we'd need build_ext to do it + skip_symbol_check = True + elif info: + skip_symbol_check = False + info['language'] = 'c' + + if info is None: + return None + + # Add extra info for OpenBLAS + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if not (skip_symbol_check or self.check_symbols(info)): + return None + + info['define_macros'] = [('HAVE_CBLAS', None)] + if self.symbol_prefix: + info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] + if self.symbol_suffix: + info['define_macros'] += [ + ('BLAS_SYMBOL_SUFFIX', self.symbol_suffix), + ('OPENBLAS_ILP64_NAMING_SCHEME', None), + ] + + return info + + def calc_info(self): + info = self._calc_info() + if info is not None: + self.set_info(**info) + + def check_msvc_gfortran_libs(self, library_dirs, libraries): + # First, find the full path to each library directory + library_paths = [] + for library in libraries: + for library_dir in library_dirs: + # MinGW static ext will be .a + fullpath = os.path.join(library_dir, library + '.a') + if os.path.isfile(fullpath): + library_paths.append(fullpath) + break + else: + return None + + # Generate numpy.distutils virtual static library file + basename = self.__class__.__name__ + tmpdir = os.path.join(os.getcwd(), 'build', basename) + if not os.path.isdir(tmpdir): + os.makedirs(tmpdir) + + info = {'library_dirs': [tmpdir], + 'libraries': [basename], + 'language': 'f77'} + + fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') + fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') + with open(fake_lib_file, 'w') as f: + f.write("\n".join(library_paths)) + with open(fake_clib_file, 'w') as f: + pass + + return info + + def check_symbols(self, info): + res = False + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + + prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + calls = "\n".join("%s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + s = textwrap.dedent("""\ + %(prototypes)s + int main(int argc, const char *argv[]) + { + %(calls)s + return 0; + }""") % dict(prototypes=prototypes, calls=calls) + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + try: + extra_args = info['extra_link_args'] + except Exception: + extra_args = [] + try: + with open(src, 'w') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + res = True + except distutils.ccompiler.LinkError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + +class openblas_lapack_info(openblas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = ['zungqr_'] + notfounderror = BlasNotFoundError + +class openblas_clapack_info(openblas_lapack_info): + _lib_names = ['openblas', 'lapack'] + +class openblas_ilp64_info(openblas_info): + section = 'openblas_ilp64' + dir_env_var = 'OPENBLAS_ILP64' + _lib_names = ['openblas64'] + _require_symbols = ['dgemm_', 'cblas_dgemm'] + notfounderror = BlasILP64NotFoundError + + def _calc_info(self): + info = super()._calc_info() + if info is not None: + info['define_macros'] += [('HAVE_BLAS_ILP64', None)] + return info + +class openblas_ilp64_lapack_info(openblas_ilp64_info): + _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] + + def _calc_info(self): + info = super()._calc_info() + if info: + info['define_macros'] += [('HAVE_LAPACKE', None)] + return info + +class openblas64__info(openblas_ilp64_info): + # ILP64 Openblas, with default symbol suffix + section = 'openblas64_' + dir_env_var = 'OPENBLAS64_' + _lib_names = ['openblas64_'] + symbol_suffix = '64_' + symbol_prefix = '' + +class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): + pass + +class blis_info(blas_info): + section = 'blis' + dir_env_var = 'BLIS' + _lib_names = ['blis'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blis_libs', 'libraries') + blis_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs2(lib_dirs, blis_libs, []) + if info is None: + return + + # Add include dirs + incl_dirs = self.get_include_dirs() + dict_append(info, + language='c', + define_macros=[('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + + +class flame_info(system_info): + """ Usage of libflame for LAPACK operations + + This requires libflame to be compiled with lapack wrappers: + + ./configure --enable-lapack2flame ... + + Be aware that libflame 5.1.0 has some missing names in the shared library, so + if you have problems, try the static flame library. + """ + section = 'flame' + _lib_names = ['flame'] + notfounderror = FlameNotFoundError + + def check_embedded_lapack(self, info): + """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + void zungqr_(); + int main(int argc, const char *argv[]) + { + zungqr_(); + return 0; + }""") + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + extra_args = info.get('extra_link_args', []) + try: + with open(src, 'w') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + return True + except distutils.ccompiler.LinkError: + return False + finally: + shutil.rmtree(tmpdir) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + flame_libs = self.get_libs('libraries', self._lib_names) + + info = self.check_libs2(lib_dirs, flame_libs, []) + if info is None: + return + + # Add the extra flag args to info + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if self.check_embedded_lapack(info): + # check if the user has supplied all information required + self.set_info(**info) + else: + # Try and get the BLAS lib to see if we can get it to work + blas_info = get_info('blas_opt') + if not blas_info: + # since we already failed once, this ain't going to work either + return + + # Now we need to merge the two dictionaries + for key in blas_info: + if isinstance(blas_info[key], list): + info[key] = info.get(key, []) + blas_info[key] + elif isinstance(blas_info[key], tuple): + info[key] = info.get(key, ()) + blas_info[key] + else: + info[key] = info.get(key, '') + blas_info[key] + + # Now check again + if self.check_embedded_lapack(info): + self.set_info(**info) + + +class accelerate_info(system_info): + section = 'accelerate' + _lib_names = ['accelerate', 'veclib'] + notfounderror = BlasNotFoundError + + def calc_info(self): + # Make possible to enable/disable from config file/env var + libraries = os.environ.get('ACCELERATE') + if libraries: + libraries = [libraries] + else: + libraries = self.get_libs('libraries', self._lib_names) + libraries = [lib.strip().lower() for lib in libraries] + + if (sys.platform == 'darwin' and + not os.getenv('_PYTHON_HOST_PLATFORM', None)): + # Use the system BLAS from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if (os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/') and + 'accelerate' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif (os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/') and + 'veclib' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + + if args: + macros = [ + ('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None), + ('ACCELERATE_NEW_LAPACK', None), + ] + if(os.getenv('NPY_USE_BLAS_ILP64', None)): + print('Setting HAVE_BLAS_ILP64') + macros += [ + ('HAVE_BLAS_ILP64', None), + ('ACCELERATE_LAPACK_ILP64', None), + ] + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=macros) + + return + +class accelerate_lapack_info(accelerate_info): + def _calc_info(self): + return super()._calc_info() + +class blas_src_info(system_info): + # BLAS_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'blas_src' + dir_env_var = 'BLAS_SRC' + notfounderror = BlasSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['blas'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'daxpy.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + blas1 = ''' + caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot + dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 + srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg + dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax + snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap + scabs1 + ''' + blas2 = ''' + cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv + chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv + dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv + sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger + stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc + zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 + ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv + ''' + blas3 = ''' + cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k + dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm + ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm + ''' + sources = [os.path.join(src_dir, f + '.f') \ + for f in (blas1 + blas2 + blas3).split()] + #XXX: should we check here actual existence of source files? + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + + +class x11_info(system_info): + section = 'x11' + notfounderror = X11NotFoundError + _lib_names = ['X11'] + + def __init__(self): + system_info.__init__(self, + default_lib_dirs=default_x11_lib_dirs, + default_include_dirs=default_x11_include_dirs) + + def calc_info(self): + if sys.platform in ['win32']: + return + lib_dirs = self.get_lib_dirs() + include_dirs = self.get_include_dirs() + opt = self.get_option_single('x11_libs', 'libraries') + x11_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, x11_libs, []) + if info is None: + return + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, 'X11/X.h'): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + self.set_info(**info) + + +class _numpy_info(system_info): + section = 'Numeric' + modulename = 'Numeric' + notfounderror = NumericNotFoundError + + def __init__(self): + include_dirs = [] + try: + module = __import__(self.modulename) + prefix = [] + for name in module.__file__.split(os.sep): + if name == 'lib': + break + prefix.append(name) + + # Ask numpy for its own include path before attempting + # anything else + try: + include_dirs.append(getattr(module, 'get_include')()) + except AttributeError: + pass + + include_dirs.append(sysconfig.get_path('include')) + except ImportError: + pass + py_incl_dir = sysconfig.get_path('include') + include_dirs.append(py_incl_dir) + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in include_dirs: + include_dirs.append(py_pincl_dir) + for d in default_include_dirs: + d = os.path.join(d, os.path.basename(py_incl_dir)) + if d not in include_dirs: + include_dirs.append(d) + system_info.__init__(self, + default_lib_dirs=[], + default_include_dirs=include_dirs) + + def calc_info(self): + try: + module = __import__(self.modulename) + except ImportError: + return + info = {} + macros = [] + for v in ['__version__', 'version']: + vrs = getattr(module, v, None) + if vrs is None: + continue + macros = [(self.modulename.upper() + '_VERSION', + _c_string_literal(vrs)), + (self.modulename.upper(), None)] + break + dict_append(info, define_macros=macros) + include_dirs = self.get_include_dirs() + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, + os.path.join(self.modulename, + 'arrayobject.h')): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + if info: + self.set_info(**info) + return + + +class numarray_info(_numpy_info): + section = 'numarray' + modulename = 'numarray' + + +class Numeric_info(_numpy_info): + section = 'Numeric' + modulename = 'Numeric' + + +class numpy_info(_numpy_info): + section = 'numpy' + modulename = 'numpy' + + +class numerix_info(system_info): + section = 'numerix' + + def calc_info(self): + which = None, None + if os.getenv("NUMERIX"): + which = os.getenv("NUMERIX"), "environment var" + # If all the above fail, default to numpy. + if which[0] is None: + which = "numpy", "defaulted" + try: + import numpy # noqa: F401 + which = "numpy", "defaulted" + except ImportError as e: + msg1 = str(e) + try: + import Numeric # noqa: F401 + which = "numeric", "defaulted" + except ImportError as e: + msg2 = str(e) + try: + import numarray # noqa: F401 + which = "numarray", "defaulted" + except ImportError as e: + msg3 = str(e) + log.info(msg1) + log.info(msg2) + log.info(msg3) + which = which[0].strip().lower(), which[1] + if which[0] not in ["numeric", "numarray", "numpy"]: + raise ValueError("numerix selector must be either 'Numeric' " + "or 'numarray' or 'numpy' but the value obtained" + " from the %s was '%s'." % (which[1], which[0])) + os.environ['NUMERIX'] = which[0] + self.set_info(**get_info(which[0])) + + +class f2py_info(system_info): + def calc_info(self): + try: + import numpy.f2py as f2py + except ImportError: + return + f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') + self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], + include_dirs=[f2py_dir]) + return + + +class boost_python_info(system_info): + section = 'boost_python' + dir_env_var = 'BOOST' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['boost*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', + 'module.cpp')): + src_dir = d + break + if not src_dir: + return + py_incl_dirs = [sysconfig.get_path('include')] + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in py_incl_dirs: + py_incl_dirs.append(py_pincl_dir) + srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') + bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) + bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) + info = {'libraries': [('boost_python_src', + {'include_dirs': [src_dir] + py_incl_dirs, + 'sources':bpl_srcs} + )], + 'include_dirs': [src_dir], + } + if info: + self.set_info(**info) + return + + +class agg2_info(system_info): + section = 'agg2' + dir_env_var = 'AGG2' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['agg2*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): + src_dir = d + break + if not src_dir: + return + if sys.platform == 'win32': + agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', + 'win32', 'agg_win32_bmp.cpp')) + else: + agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) + agg2_srcs += [os.path.join(src_dir, 'src', 'platform', + 'X11', + 'agg_platform_support.cpp')] + + info = {'libraries': + [('agg2_src', + {'sources': agg2_srcs, + 'include_dirs': [os.path.join(src_dir, 'include')], + } + )], + 'include_dirs': [os.path.join(src_dir, 'include')], + } + if info: + self.set_info(**info) + return + + +class _pkg_config_info(system_info): + section = None + config_env_var = 'PKG_CONFIG' + default_config_exe = 'pkg-config' + append_config_exe = '' + version_macro_name = None + release_macro_name = None + version_flag = '--modversion' + cflags_flag = '--cflags' + + def get_config_exe(self): + if self.config_env_var in os.environ: + return os.environ[self.config_env_var] + return self.default_config_exe + + def get_config_output(self, config_exe, option): + cmd = config_exe + ' ' + self.append_config_exe + ' ' + option + try: + o = subprocess.check_output(cmd) + except (OSError, subprocess.CalledProcessError): + pass + else: + o = filepath_from_subprocess_output(o) + return o + + def calc_info(self): + config_exe = find_executable(self.get_config_exe()) + if not config_exe: + log.warn('File not found: %s. Cannot determine %s info.' \ + % (config_exe, self.section)) + return + info = {} + macros = [] + libraries = [] + library_dirs = [] + include_dirs = [] + extra_link_args = [] + extra_compile_args = [] + version = self.get_config_output(config_exe, self.version_flag) + if version: + macros.append((self.__class__.__name__.split('.')[-1].upper(), + _c_string_literal(version))) + if self.version_macro_name: + macros.append((self.version_macro_name + '_%s' + % (version.replace('.', '_')), None)) + if self.release_macro_name: + release = self.get_config_output(config_exe, '--release') + if release: + macros.append((self.release_macro_name + '_%s' + % (release.replace('.', '_')), None)) + opts = self.get_config_output(config_exe, '--libs') + if opts: + for opt in opts.split(): + if opt[:2] == '-l': + libraries.append(opt[2:]) + elif opt[:2] == '-L': + library_dirs.append(opt[2:]) + else: + extra_link_args.append(opt) + opts = self.get_config_output(config_exe, self.cflags_flag) + if opts: + for opt in opts.split(): + if opt[:2] == '-I': + include_dirs.append(opt[2:]) + elif opt[:2] == '-D': + if '=' in opt: + n, v = opt[2:].split('=') + macros.append((n, v)) + else: + macros.append((opt[2:], None)) + else: + extra_compile_args.append(opt) + if macros: + dict_append(info, define_macros=macros) + if libraries: + dict_append(info, libraries=libraries) + if library_dirs: + dict_append(info, library_dirs=library_dirs) + if include_dirs: + dict_append(info, include_dirs=include_dirs) + if extra_link_args: + dict_append(info, extra_link_args=extra_link_args) + if extra_compile_args: + dict_append(info, extra_compile_args=extra_compile_args) + if info: + self.set_info(**info) + return + + +class wx_info(_pkg_config_info): + section = 'wx' + config_env_var = 'WX_CONFIG' + default_config_exe = 'wx-config' + append_config_exe = '' + version_macro_name = 'WX_VERSION' + release_macro_name = 'WX_RELEASE' + version_flag = '--version' + cflags_flag = '--cxxflags' + + +class gdk_pixbuf_xlib_2_info(_pkg_config_info): + section = 'gdk_pixbuf_xlib_2' + append_config_exe = 'gdk-pixbuf-xlib-2.0' + version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' + + +class gdk_pixbuf_2_info(_pkg_config_info): + section = 'gdk_pixbuf_2' + append_config_exe = 'gdk-pixbuf-2.0' + version_macro_name = 'GDK_PIXBUF_VERSION' + + +class gdk_x11_2_info(_pkg_config_info): + section = 'gdk_x11_2' + append_config_exe = 'gdk-x11-2.0' + version_macro_name = 'GDK_X11_VERSION' + + +class gdk_2_info(_pkg_config_info): + section = 'gdk_2' + append_config_exe = 'gdk-2.0' + version_macro_name = 'GDK_VERSION' + + +class gdk_info(_pkg_config_info): + section = 'gdk' + append_config_exe = 'gdk' + version_macro_name = 'GDK_VERSION' + + +class gtkp_x11_2_info(_pkg_config_info): + section = 'gtkp_x11_2' + append_config_exe = 'gtk+-x11-2.0' + version_macro_name = 'GTK_X11_VERSION' + + +class gtkp_2_info(_pkg_config_info): + section = 'gtkp_2' + append_config_exe = 'gtk+-2.0' + version_macro_name = 'GTK_VERSION' + + +class xft_info(_pkg_config_info): + section = 'xft' + append_config_exe = 'xft' + version_macro_name = 'XFT_VERSION' + + +class freetype2_info(_pkg_config_info): + section = 'freetype2' + append_config_exe = 'freetype2' + version_macro_name = 'FREETYPE2_VERSION' + + +class amd_info(system_info): + section = 'amd' + dir_env_var = 'AMD' + _lib_names = ['amd'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('amd_libs', 'libraries') + amd_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, amd_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, 'amd.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_AMD_H', None)], + swig_opts=['-I' + inc_dir]) + + self.set_info(**info) + return + + +class umfpack_info(system_info): + section = 'umfpack' + dir_env_var = 'UMFPACK' + notfounderror = UmfpackNotFoundError + _lib_names = ['umfpack'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('umfpack_libs', 'libraries') + umfpack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, umfpack_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_UMFPACK_H', None)], + swig_opts=['-I' + inc_dir]) + + dict_append(info, **get_info('amd')) + + self.set_info(**info) + return + + +def combine_paths(*args, **kws): + """ Return a list of existing paths composed by all combinations of + items from arguments. + """ + r = [] + for a in args: + if not a: + continue + if is_string(a): + a = [a] + r.append(a) + args = r + if not args: + return [] + if len(args) == 1: + result = reduce(lambda a, b: a + b, map(glob, args[0]), []) + elif len(args) == 2: + result = [] + for a0 in args[0]: + for a1 in args[1]: + result.extend(glob(os.path.join(a0, a1))) + else: + result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) + log.debug('(paths: %s)', ','.join(result)) + return result + +language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} +inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} + + +def dict_append(d, **kws): + languages = [] + for k, v in kws.items(): + if k == 'language': + languages.append(v) + continue + if k in d: + if k in ['library_dirs', 'include_dirs', + 'extra_compile_args', 'extra_link_args', + 'runtime_library_dirs', 'define_macros']: + [d[k].append(vv) for vv in v if vv not in d[k]] + else: + d[k].extend(v) + else: + d[k] = v + if languages: + l = inv_language_map[max([language_map.get(l, 0) for l in languages])] + d['language'] = l + return + + +def parseCmdLine(argv=(None,)): + import optparse + parser = optparse.OptionParser("usage: %prog [-v] [info objs]") + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', + default=False, + help='be verbose and print more messages') + + opts, args = parser.parse_args(args=argv[1:]) + return opts, args + + +def show_all(argv=None): + import inspect + if argv is None: + argv = sys.argv + opts, args = parseCmdLine(argv) + if opts.verbose: + log.set_threshold(log.DEBUG) + else: + log.set_threshold(log.INFO) + show_only = [] + for n in args: + if n[-5:] != '_info': + n = n + '_info' + show_only.append(n) + show_all = not show_only + _gdict_ = globals().copy() + for name, c in _gdict_.items(): + if not inspect.isclass(c): + continue + if not issubclass(c, system_info) or c is system_info: + continue + if not show_all: + if name not in show_only: + continue + del show_only[show_only.index(name)] + conf = c() + conf.verbosity = 2 + # we don't need the result, but we want + # the side effect of printing diagnostics + conf.get_info() + if show_only: + log.info('Info classes not defined: %s', ','.join(show_only)) + +if __name__ == "__main__": + show_all() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py new file mode 100644 index 00000000..372100fc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py @@ -0,0 +1,74 @@ +'''Tests for numpy.distutils.build_ext.''' + +import os +import subprocess +import sys +from textwrap import indent, dedent +import pytest +from numpy.testing import IS_WASM + +@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm") +@pytest.mark.slow +def test_multi_fortran_libs_link(tmp_path): + ''' + Ensures multiple "fake" static libraries are correctly linked. + see gh-18295 + ''' + + # We need to make sure we actually have an f77 compiler. + # This is nontrivial, so we'll borrow the utilities + # from f2py tests: + from numpy.f2py.tests.util import has_f77_compiler + if not has_f77_compiler(): + pytest.skip('No F77 compiler found') + + # make some dummy sources + with open(tmp_path / '_dummy1.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_one() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy2.f', 'w') as fid: + fid.write(indent(dedent('''\ + FUNCTION dummy_two() + RETURN + END FUNCTION'''), prefix=' '*6)) + with open(tmp_path / '_dummy.c', 'w') as fid: + # doesn't need to load - just needs to exist + fid.write('int PyInit_dummyext;') + + # make a setup file + with open(tmp_path / 'setup.py', 'w') as fid: + srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..') + fid.write(dedent(f'''\ + def configuration(parent_package="", top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration("", parent_package, top_path) + config.add_library("dummy1", sources=["_dummy1.f"]) + config.add_library("dummy2", sources=["_dummy2.f"]) + config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"]) + return config + + + if __name__ == "__main__": + import sys + sys.path.insert(0, r"{srctree}") + from numpy.distutils.core import setup + setup(**configuration(top_path="").todict())''')) + + # build the test extensino and "install" into a temporary directory + build_dir = tmp_path + subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', + '--prefix', str(tmp_path / 'installdir'), + '--record', str(tmp_path / 'tmp_install_log.txt'), + ], + cwd=str(build_dir), + ) + # get the path to the so + so = None + with open(tmp_path /'tmp_install_log.txt') as fid: + for line in fid: + if 'dummyext' in line: + so = line.strip() + break + assert so is not None diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py new file mode 100644 index 00000000..3714aea0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py @@ -0,0 +1,808 @@ +import re, textwrap, os +from os import sys, path +from distutils.errors import DistutilsError + +is_standalone = __name__ == '__main__' and __package__ is None +if is_standalone: + import unittest, contextlib, tempfile, shutil + sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) + from ccompiler_opt import CCompilerOpt + + # from numpy/testing/_private/utils.py + @contextlib.contextmanager + def tempdir(*args, **kwargs): + tmpdir = tempfile.mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + def assert_(expr, msg=''): + if not expr: + raise AssertionError(msg) +else: + from numpy.distutils.ccompiler_opt import CCompilerOpt + from numpy.testing import assert_, tempdir + +# architectures and compilers to test +arch_compilers = dict( + x86 = ("gcc", "clang", "icc", "iccw", "msvc"), + x64 = ("gcc", "clang", "icc", "iccw", "msvc"), + ppc64 = ("gcc", "clang"), + ppc64le = ("gcc", "clang"), + armhf = ("gcc", "clang"), + aarch64 = ("gcc", "clang", "fcc"), + s390x = ("gcc", "clang"), + noarch = ("gcc",) +) + +class FakeCCompilerOpt(CCompilerOpt): + fake_info = "" + def __init__(self, trap_files="", trap_flags="", *args, **kwargs): + self.fake_trap_files = trap_files + self.fake_trap_flags = trap_flags + CCompilerOpt.__init__(self, None, **kwargs) + + def __repr__(self): + return textwrap.dedent("""\ + <<<< + march : {} + compiler : {} + ---------------- + {} + >>>> + """).format(self.cc_march, self.cc_name, self.report()) + + def dist_compile(self, sources, flags, **kwargs): + assert(isinstance(sources, list)) + assert(isinstance(flags, list)) + if self.fake_trap_files: + for src in sources: + if re.match(self.fake_trap_files, src): + self.dist_error("source is trapped by a fake interface") + if self.fake_trap_flags: + for f in flags: + if re.match(self.fake_trap_flags, f): + self.dist_error("flag is trapped by a fake interface") + # fake objects + return zip(sources, [' '.join(flags)] * len(sources)) + + def dist_info(self): + return FakeCCompilerOpt.fake_info + + @staticmethod + def dist_log(*args, stderr=False): + pass + +class _Test_CCompilerOpt: + arch = None # x86_64 + cc = None # gcc + + def setup_class(self): + FakeCCompilerOpt.conf_nocache = True + self._opt = None + + def nopt(self, *args, **kwargs): + FakeCCompilerOpt.fake_info = (self.arch, self.cc, "") + return FakeCCompilerOpt(*args, **kwargs) + + def opt(self): + if not self._opt: + self._opt = self.nopt() + return self._opt + + def march(self): + return self.opt().cc_march + + def cc_name(self): + return self.opt().cc_name + + def get_targets(self, targets, groups, **kwargs): + FakeCCompilerOpt.conf_target_groups = groups + opt = self.nopt( + cpu_baseline=kwargs.get("baseline", "min"), + cpu_dispatch=kwargs.get("dispatch", "max"), + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + with tempdir() as tmpdir: + file = os.path.join(tmpdir, "test_targets.c") + with open(file, 'w') as f: + f.write(targets) + gtargets = [] + gflags = {} + fake_objects = opt.try_dispatch([file]) + for source, flags in fake_objects: + gtar = path.basename(source).split('.')[1:-1] + glen = len(gtar) + if glen == 0: + gtar = "baseline" + elif glen == 1: + gtar = gtar[0].upper() + else: + # converting multi-target into parentheses str format to be equivalent + # to the configuration statements syntax. + gtar = ('('+' '.join(gtar)+')').upper() + gtargets.append(gtar) + gflags[gtar] = flags + + has_baseline, targets = opt.sources_status[file] + targets = targets + ["baseline"] if has_baseline else targets + # convert tuple that represent multi-target into parentheses str format + targets = [ + '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar + for tar in targets + ] + if len(targets) != len(gtargets) or not all(t in gtargets for t in targets): + raise AssertionError( + "'sources_status' returns different targets than the compiled targets\n" + "%s != %s" % (targets, gtargets) + ) + # return targets from 'sources_status' since the order is matters + return targets, gflags + + def arg_regex(self, **kwargs): + map2origin = dict( + x64 = "x86", + ppc64le = "ppc64", + aarch64 = "armhf", + clang = "gcc", + ) + march = self.march(); cc_name = self.cc_name() + map_march = map2origin.get(march, march) + map_cc = map2origin.get(cc_name, cc_name) + for key in ( + march, cc_name, map_march, map_cc, + march + '_' + cc_name, + map_march + '_' + cc_name, + march + '_' + map_cc, + map_march + '_' + map_cc, + ) : + regex = kwargs.pop(key, None) + if regex is not None: + break + if regex: + if isinstance(regex, dict): + for k, v in regex.items(): + if v[-1:] not in ')}$?\\.+*': + regex[k] = v + '$' + else: + assert(isinstance(regex, str)) + if regex[-1:] not in ')}$?\\.+*': + regex += '$' + return regex + + def expect(self, dispatch, baseline="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_dispatch_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'dispatch features "%s" not match "%s"' % (features, match) + ) + + def expect_baseline(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + features = ' '.join(opt.cpu_baseline_names()) + if not match: + if len(features) != 0: + raise AssertionError( + 'expected empty features, not "%s"' % features + ) + return + if not re.match(match, features, re.IGNORECASE): + raise AssertionError( + 'baseline features "%s" not match "%s"' % (features, match) + ) + + def expect_flags(self, baseline, dispatch="", **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + opt = self.nopt( + cpu_baseline=baseline, cpu_dispatch=dispatch, + trap_files=kwargs.get("trap_files", ""), + trap_flags=kwargs.get("trap_flags", "") + ) + flags = ' '.join(opt.cpu_baseline_flags()) + if not match: + if len(flags) != 0: + raise AssertionError( + 'expected empty flags not "%s"' % flags + ) + return + if not re.match(match, flags): + raise AssertionError( + 'flags "%s" not match "%s"' % (flags, match) + ) + + def expect_targets(self, targets, groups={}, **kwargs): + match = self.arg_regex(**kwargs) + if match is None: + return + targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs) + targets = ' '.join(targets) + if not match: + if len(targets) != 0: + raise AssertionError( + 'expected empty targets, not "%s"' % targets + ) + return + if not re.match(match, targets, re.IGNORECASE): + raise AssertionError( + 'targets "%s" not match "%s"' % (targets, match) + ) + + def expect_target_flags(self, targets, groups={}, **kwargs): + match_dict = self.arg_regex(**kwargs) + if match_dict is None: + return + assert(isinstance(match_dict, dict)) + _, tar_flags = self.get_targets(targets=targets, groups=groups) + + for match_tar, match_flags in match_dict.items(): + if match_tar not in tar_flags: + raise AssertionError( + 'expected to find target "%s"' % match_tar + ) + flags = tar_flags[match_tar] + if not match_flags: + if len(flags) != 0: + raise AssertionError( + 'expected to find empty flags in target "%s"' % match_tar + ) + if not re.match(match_flags, flags): + raise AssertionError( + '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags) + ) + + def test_interface(self): + wrong_arch = "ppc64" if self.arch != "ppc64" else "x86" + wrong_cc = "clang" if self.cc != "clang" else "icc" + opt = self.opt() + assert_(getattr(opt, "cc_on_" + self.arch)) + assert_(not getattr(opt, "cc_on_" + wrong_arch)) + assert_(getattr(opt, "cc_is_" + self.cc)) + assert_(not getattr(opt, "cc_is_" + wrong_cc)) + + def test_args_empty(self): + for baseline, dispatch in ( + ("", "none"), + (None, ""), + ("none +none", "none - none"), + ("none -max", "min - max"), + ("+vsx2 -VSX2", "vsx avx2 avx512f -max"), + ("max -vsx - avx + avx512f neon -MAX ", + "min -min + max -max -vsx + avx2 -avx2 +NONE") + ) : + opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + assert(len(opt.cpu_baseline_names()) == 0) + assert(len(opt.cpu_dispatch_names()) == 0) + + def test_args_validation(self): + if self.march() == "unknown": + return + # check sanity of argument's validation + for baseline, dispatch in ( + ("unkown_feature - max +min", "unknown max min"), # unknowing features + ("#avx2", "$vsx") # groups and polices aren't acceptable + ) : + try: + self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch) + raise AssertionError("excepted an exception for invalid arguments") + except DistutilsError: + pass + + def test_skip(self): + # only takes what platform supports and skip the others + # without casing exceptions + self.expect( + "sse vsx neon", + x86="sse", ppc64="vsx", armhf="neon", unknown="" + ) + self.expect( + "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd", + x86 = "sse41 avx avx2", + ppc64 = "vsx2 vsx3", + armhf = "neon_vfpv4 asimd", + unknown = "" + ) + # any features in cpu_dispatch must be ignored if it's part of baseline + self.expect( + "sse neon vsx", baseline="sse neon vsx", + x86="", ppc64="", armhf="" + ) + self.expect( + "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp", + x86="", ppc64="", armhf="" + ) + + def test_implies(self): + # baseline combining implied features, so we count + # on it instead of testing 'feature_implies()'' directly + self.expect_baseline( + "fma3 avx2 asimd vsx3", + # .* between two spaces can validate features in between + x86 = "sse .* sse41 .* fma3.*avx2", + ppc64 = "vsx vsx2 vsx3", + armhf = "neon neon_fp16 neon_vfpv4 asimd" + ) + """ + special cases + """ + # in icc and msvc, FMA3 and AVX2 can't be separated + # both need to implies each other, same for avx512f & cd + for f0, f1 in ( + ("fma3", "avx2"), + ("avx512f", "avx512cd"), + ): + diff = ".* sse42 .* %s .*%s$" % (f0, f1) + self.expect_baseline(f0, + x86_gcc=".* sse42 .* %s$" % f0, + x86_icc=diff, x86_iccw=diff + ) + self.expect_baseline(f1, + x86_gcc=".* avx .* %s$" % f1, + x86_icc=diff, x86_iccw=diff + ) + # in msvc, following features can't be separated too + for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")): + for ff in f: + self.expect_baseline(ff, + x86_msvc=".*%s" % ' '.join(f) + ) + + # in ppc64le VSX and VSX2 can't be separated + self.expect_baseline("vsx", ppc64le="vsx vsx2") + # in aarch64 following features can't be separated + for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"): + self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd") + + def test_args_options(self): + # max & native + for o in ("max", "native"): + if o == "native" and self.cc_name() == "msvc": + continue + self.expect(o, + trap_files=".*cpu_(sse|vsx|neon|vx).c", + x86="", ppc64="", armhf="", s390x="" + ) + self.expect(o, + trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c", + x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16", + aarch64="", ppc64le="", s390x="vx" + ) + self.expect(o, + trap_files=".*cpu_(popcnt|vsx3).c", + x86="sse .* sse41", ppc64="vsx vsx2", + armhf="neon neon_fp16 .* asimd .*", + s390x="vx vxe vxe2" + ) + self.expect(o, + x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in icc, xop and fam4 aren't supported + x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*", + # in msvc, avx512_knl avx512_knm aren't supported + x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*", + armhf=".* asimd asimdhp asimddp .*", + ppc64="vsx vsx2 vsx3 vsx4.*", + s390x="vx vxe vxe2.*" + ) + # min + self.expect("min", + x86="sse sse2", x64="sse sse2 sse3", + armhf="", aarch64="neon neon_fp16 .* asimd", + ppc64="", ppc64le="vsx vsx2", s390x="" + ) + self.expect( + "min", trap_files=".*cpu_(sse2|vsx2).c", + x86="", ppc64le="" + ) + # an exception must triggered if native flag isn't supported + # when option "native" is activated through the args + try: + self.expect("native", + trap_flags=".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", + x86=".*", ppc64=".*", armhf=".*", s390x=".*", aarch64=".*", + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_flags(self): + self.expect_flags( + "sse sse2 vsx vsx2 neon neon_fp16 vx vxe", + x86_gcc="-msse -msse2", x86_icc="-msse -msse2", + x86_iccw="/arch:SSE2", + x86_msvc="/arch:SSE2" if self.march() == "x86" else "", + ppc64_gcc= "-mcpu=power8", + ppc64_clang="-mcpu=power8", + armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee", + aarch64="", + s390x="-mzvector -march=arch12" + ) + # testing normalize -march + self.expect_flags( + "asimd", + aarch64="", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd" + ) + self.expect_flags( + "asimdhp", + aarch64_gcc=r"-march=armv8.2-a\+fp16", + armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16" + ) + self.expect_flags( + "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod" + ) + self.expect_flags( + # asimdfhm implies asimdhp + "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml" + ) + self.expect_flags( + "asimddp asimdhp asimdfhm", + aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml" + ) + self.expect_flags( + "vx vxe vxe2", + s390x=r"-mzvector -march=arch13" + ) + + def test_targets_exceptions(self): + for targets in ( + "bla bla", "/*@targets", + "/*@targets */", + "/*@targets unknown */", + "/*@targets $unknown_policy avx2 */", + "/*@targets #unknown_group avx2 */", + "/*@targets $ */", + "/*@targets # vsx */", + "/*@targets #$ vsx */", + "/*@targets vsx avx2 ) */", + "/*@targets vsx avx2 (avx2 */", + "/*@targets vsx avx2 () */", + "/*@targets vsx avx2 ($autovec) */", # no features + "/*@targets vsx avx2 (xxx) */", + "/*@targets vsx avx2 (baseline) */", + ) : + try: + self.expect_targets( + targets, + x86="", armhf="", ppc64="", s390x="" + ) + if self.march() != "unknown": + raise AssertionError( + "excepted an exception for %s" % self.march() + ) + except DistutilsError: + if self.march() == "unknown": + raise AssertionError("excepted no exceptions") + + def test_targets_syntax(self): + for targets in ( + "/*@targets $keep_baseline sse vsx neon vx*/", + "/*@targets,$keep_baseline,sse,vsx,neon vx*/", + "/*@targets*$keep_baseline*sse*vsx*neon*vx*/", + """ + /* + ** @targets + ** $keep_baseline, sse vsx,neon, vx + */ + """, + """ + /* + ************@targets**************** + ** $keep_baseline, sse vsx, neon, vx + ************************************ + */ + """, + """ + /* + /////////////@targets///////////////// + //$keep_baseline//sse//vsx//neon//vx + ///////////////////////////////////// + */ + """, + """ + /* + @targets + $keep_baseline + SSE VSX NEON VX*/ + """ + ) : + self.expect_targets(targets, + x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown="" + ) + + def test_targets(self): + # test skipping baseline features + self.expect_targets( + """ + /*@targets + sse sse2 sse41 avx avx2 avx512f + vsx vsx2 vsx3 vsx4 + neon neon_fp16 asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="avx vsx2 asimd vx vxe", + x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3", + s390x="vxe2" + ) + # test skipping non-dispatch features + self.expect_targets( + """ + /*@targets + sse41 avx avx2 avx512f + vsx2 vsx3 vsx4 + asimd asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2", + x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2" + ) + # test skipping features that not supported + self.expect_targets( + """ + /*@targets + sse2 sse41 avx2 avx512f + vsx2 vsx3 vsx4 + neon asimdhp asimddp + vx vxe vxe2 + */ + """, + baseline="", + trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c", + x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon", + s390x="vxe vx" + ) + # test skipping features that implies each other + self.expect_targets( + """ + /*@targets + sse sse2 avx fma3 avx2 avx512f avx512cd + vsx vsx2 vsx3 + neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp + asimddp asimdfhm + */ + """, + baseline="", + x86_gcc="avx512cd avx512f avx2 fma3 avx sse2", + x86_msvc="avx512cd avx2 avx sse2", + x86_icc="avx512cd avx2 avx sse2", + x86_iccw="avx512cd avx2 avx sse2", + ppc64="vsx3 vsx2 vsx", + ppc64le="vsx3 vsx2", + armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon", + aarch64="asimdfhm asimddp asimdhp asimd" + ) + + def test_targets_policies(self): + # 'keep_baseline', generate objects for baseline features + self.expect_targets( + """ + /*@targets + $keep_baseline + sse2 sse42 avx2 avx512f + vsx2 vsx3 + neon neon_vfpv4 asimd asimddp + vx vxe vxe2 + */ + """, + baseline="sse41 avx2 vsx2 asimd vsx3 vxe", + x86="avx512f avx2 sse42 sse2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd", + s390x="vxe2 vxe vx" + ) + # 'keep_sort', leave the sort as-is + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort + avx512f sse42 avx2 sse2 + vsx2 vsx3 + asimd neon neon_vfpv4 asimddp + vxe vxe2 + */ + """, + x86="avx512f sse42 avx2 sse2", + ppc64="vsx2 vsx3", + armhf="asimd neon neon_vfpv4 asimddp", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimd asimddp", + s390x="vxe vxe2" + ) + # 'autovec', skipping features that can't be + # vectorized by the compiler + self.expect_targets( + """ + /*@targets + $keep_baseline $keep_sort $autovec + avx512f avx2 sse42 sse41 sse2 + vsx3 vsx2 + asimddp asimd neon_vfpv4 neon + */ + """, + x86_gcc="avx512f avx2 sse42 sse41 sse2", + x86_icc="avx512f avx2 sse42 sse41 sse2", + x86_iccw="avx512f avx2 sse42 sse41 sse2", + x86_msvc="avx512f avx2 sse2" + if self.march() == 'x86' else "avx512f avx2", + ppc64="vsx3 vsx2", + armhf="asimddp asimd neon_vfpv4 neon", + # neon, neon_vfpv4, asimd implies each other + aarch64="asimddp asimd" + ) + for policy in ("$maxopt", "$autovec"): + # 'maxopt' and autovec set the max acceptable optimization flags + self.expect_target_flags( + "/*@targets baseline %s */" % policy, + gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"}, + iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"}, + unknown={"baseline":".*"} + ) + + # 'werror', force compilers to treat warnings as errors + self.expect_target_flags( + "/*@targets baseline $werror */", + gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"}, + iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"}, + unknown={"baseline":".*"} + ) + + def test_targets_groups(self): + self.expect_targets( + """ + /*@targets $keep_baseline baseline #test_group */ + """, + groups=dict( + test_group=(""" + $keep_baseline + asimddp sse2 vsx2 avx2 vsx3 + avx512f asimdhp + """) + ), + x86="avx512f avx2 sse2 baseline", + ppc64="vsx3 vsx2 baseline", + armhf="asimddp asimdhp baseline" + ) + # test skip duplicating and sorting + self.expect_targets( + """ + /*@targets + * sse42 avx avx512f + * #test_group_1 + * vsx2 + * #test_group_2 + * asimddp asimdfhm + */ + """, + groups=dict( + test_group_1=(""" + VSX2 vsx3 asimd avx2 SSE41 + """), + test_group_2=(""" + vsx2 vsx3 asImd aVx2 sse41 + """) + ), + x86="avx512f avx2 avx sse42 sse41", + ppc64="vsx3 vsx2", + # vsx2 part of the default baseline of ppc64le, option ("min") + ppc64le="vsx3", + armhf="asimdfhm asimddp asimd", + # asimd part of the default baseline of aarch64, option ("min") + aarch64="asimdfhm asimddp" + ) + + def test_targets_multi(self): + self.expect_targets( + """ + /*@targets + (avx512_clx avx512_cnl) (asimdhp asimddp) + */ + """, + x86=r"\(avx512_clx avx512_cnl\)", + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and auto-sort + self.expect_targets( + """ + /*@targets + f16c (sse41 avx sse42) (sse3 avx2 avx512f) + vsx2 (vsx vsx3 vsx2) + (neon neon_vfpv4 asimd asimdhp asimddp) + */ + """, + x86="avx512f f16c avx", + ppc64="vsx3 vsx2", + ppc64le="vsx3", # vsx2 part of baseline + armhf=r"\(asimdhp asimddp\)", + ) + # test skipping implied features and keep sort + self.expect_targets( + """ + /*@targets $keep_sort + (sse41 avx sse42) (sse3 avx2 avx512f) + (vsx vsx3 vsx2) + (asimddp neon neon_vfpv4 asimd asimdhp) + (vx vxe vxe2) + */ + """, + x86="avx avx512f", + ppc64="vsx3", + armhf=r"\(asimdhp asimddp\)", + s390x="vxe2" + ) + # test compiler variety and avoiding duplicating + self.expect_targets( + """ + /*@targets $keep_sort + fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3 + */ + """, + x86_gcc=r"fma3 avx2 \(fma3 avx2\)", + x86_icc="avx2", x86_iccw="avx2", + x86_msvc="avx2" + ) + +def new_test(arch, cc): + if is_standalone: return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase): + arch = '{arch}' + cc = '{cc}' + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self.setup_class() + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) + return textwrap.dedent("""\ + class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt): + arch = '{arch}' + cc = '{cc}' + """).format( + class_name=arch + '_' + cc, arch=arch, cc=cc + ) +""" +if 1 and is_standalone: + FakeCCompilerOpt.fake_info = "x86_icc" + cco = FakeCCompilerOpt(None, cpu_baseline="avx2") + print(' '.join(cco.cpu_baseline_names())) + print(cco.cpu_baseline_flags()) + unittest.main() + sys.exit() +""" +for arch, compilers in arch_compilers.items(): + for cc in compilers: + exec(new_test(arch, cc)) + +if is_standalone: + unittest.main() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py new file mode 100644 index 00000000..d9e8b2b0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py @@ -0,0 +1,176 @@ +import unittest +from os import sys, path + +is_standalone = __name__ == '__main__' and __package__ is None +if is_standalone: + sys.path.append(path.abspath(path.join(path.dirname(__file__), ".."))) + from ccompiler_opt import CCompilerOpt +else: + from numpy.distutils.ccompiler_opt import CCompilerOpt + +arch_compilers = dict( + x86 = ("gcc", "clang", "icc", "iccw", "msvc"), + x64 = ("gcc", "clang", "icc", "iccw", "msvc"), + ppc64 = ("gcc", "clang"), + ppc64le = ("gcc", "clang"), + armhf = ("gcc", "clang"), + aarch64 = ("gcc", "clang"), + narch = ("gcc",) +) + +class FakeCCompilerOpt(CCompilerOpt): + fake_info = ("arch", "compiler", "extra_args") + def __init__(self, *args, **kwargs): + CCompilerOpt.__init__(self, None, **kwargs) + def dist_compile(self, sources, flags, **kwargs): + return sources + def dist_info(self): + return FakeCCompilerOpt.fake_info + @staticmethod + def dist_log(*args, stderr=False): + pass + +class _TestConfFeatures(FakeCCompilerOpt): + """A hook to check the sanity of configured features +- before it called by the abstract class '_Feature' + """ + + def conf_features_partial(self): + conf_all = self.conf_features + for feature_name, feature in conf_all.items(): + self.test_feature( + "attribute conf_features", + conf_all, feature_name, feature + ) + + conf_partial = FakeCCompilerOpt.conf_features_partial(self) + for feature_name, feature in conf_partial.items(): + self.test_feature( + "conf_features_partial()", + conf_partial, feature_name, feature + ) + return conf_partial + + def test_feature(self, log, search_in, feature_name, feature_dict): + error_msg = ( + "during validate '{}' within feature '{}', " + "march '{}' and compiler '{}'\n>> " + ).format(log, feature_name, self.cc_march, self.cc_name) + + if not feature_name.isupper(): + raise AssertionError(error_msg + "feature name must be in uppercase") + + for option, val in feature_dict.items(): + self.test_option_types(error_msg, option, val) + self.test_duplicates(error_msg, option, val) + + self.test_implies(error_msg, search_in, feature_name, feature_dict) + self.test_group(error_msg, search_in, feature_name, feature_dict) + self.test_extra_checks(error_msg, search_in, feature_name, feature_dict) + + def test_option_types(self, error_msg, option, val): + for tp, available in ( + ((str, list), ( + "implies", "headers", "flags", "group", "detect", "extra_checks" + )), + ((str,), ("disable",)), + ((int,), ("interest",)), + ((bool,), ("implies_detect",)), + ((bool, type(None)), ("autovec",)), + ) : + found_it = option in available + if not found_it: + continue + if not isinstance(val, tp): + error_tp = [t.__name__ for t in (*tp,)] + error_tp = ' or '.join(error_tp) + raise AssertionError(error_msg + + "expected '%s' type for option '%s' not '%s'" % ( + error_tp, option, type(val).__name__ + )) + break + + if not found_it: + raise AssertionError(error_msg + "invalid option name '%s'" % option) + + def test_duplicates(self, error_msg, option, val): + if option not in ( + "implies", "headers", "flags", "group", "detect", "extra_checks" + ) : return + + if isinstance(val, str): + val = val.split() + + if len(val) != len(set(val)): + raise AssertionError(error_msg + "duplicated values in option '%s'" % option) + + def test_implies(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + implies = feature_dict.get("implies", "") + if not implies: + return + if isinstance(implies, str): + implies = implies.split() + + if feature_name in implies: + raise AssertionError(error_msg + "feature implies itself") + + for impl in implies: + impl_dict = search_in.get(impl) + if impl_dict is not None: + if "disable" in impl_dict: + raise AssertionError(error_msg + "implies disabled feature '%s'" % impl) + continue + raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl) + + def test_group(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + group = feature_dict.get("group", "") + if not group: + return + if isinstance(group, str): + group = group.split() + + for f in group: + impl_dict = search_in.get(f) + if not impl_dict or "disable" in impl_dict: + continue + raise AssertionError(error_msg + + "in option 'group', '%s' already exists as a feature name" % f + ) + + def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict): + if feature_dict.get("disabled") is not None: + return + extra_checks = feature_dict.get("extra_checks", "") + if not extra_checks: + return + if isinstance(extra_checks, str): + extra_checks = extra_checks.split() + + for f in extra_checks: + impl_dict = search_in.get(f) + if not impl_dict or "disable" in impl_dict: + continue + raise AssertionError(error_msg + + "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f + ) + +class TestConfFeatures(unittest.TestCase): + def __init__(self, methodName="runTest"): + unittest.TestCase.__init__(self, methodName) + self._setup() + + def _setup(self): + FakeCCompilerOpt.conf_nocache = True + + def test_features(self): + for arch, compilers in arch_compilers.items(): + for cc in compilers: + FakeCCompilerOpt.fake_info = (arch, cc, "") + _TestConfFeatures() + +if is_standalone: + unittest.main() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py new file mode 100644 index 00000000..d1a20056 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py @@ -0,0 +1,217 @@ +import os +import pytest +import sys +from tempfile import TemporaryFile + +from numpy.distutils import exec_command +from numpy.distutils.exec_command import get_pythonexe +from numpy.testing import tempdir, assert_, assert_warns, IS_WASM + + +# In python 3 stdout, stderr are text (unicode compliant) devices, so to +# emulate them import StringIO from the io module. +from io import StringIO + +class redirect_stdout: + """Context manager to redirect stdout for exec_command test.""" + def __init__(self, stdout=None): + self._stdout = stdout or sys.stdout + + def __enter__(self): + self.old_stdout = sys.stdout + sys.stdout = self._stdout + + def __exit__(self, exc_type, exc_value, traceback): + self._stdout.flush() + sys.stdout = self.old_stdout + # note: closing sys.stdout won't close it. + self._stdout.close() + +class redirect_stderr: + """Context manager to redirect stderr for exec_command test.""" + def __init__(self, stderr=None): + self._stderr = stderr or sys.stderr + + def __enter__(self): + self.old_stderr = sys.stderr + sys.stderr = self._stderr + + def __exit__(self, exc_type, exc_value, traceback): + self._stderr.flush() + sys.stderr = self.old_stderr + # note: closing sys.stderr won't close it. + self._stderr.close() + +class emulate_nonposix: + """Context manager to emulate os.name != 'posix' """ + def __init__(self, osname='non-posix'): + self._new_name = osname + + def __enter__(self): + self._old_name = os.name + os.name = self._new_name + + def __exit__(self, exc_type, exc_value, traceback): + os.name = self._old_name + + +def test_exec_command_stdout(): + # Regression test for gh-2999 and gh-2915. + # There are several packages (nose, scipy.weave.inline, Sage inline + # Fortran) that replace stdout, in which case it doesn't have a fileno + # method. This is tested here, with a do-nothing command that fails if the + # presence of fileno() is assumed in exec_command. + + # The code has a special case for posix systems, so if we are on posix test + # both that the special case works and that the generic code works. + + # Test posix version: + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(StringIO()): + with redirect_stderr(TemporaryFile()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + +def test_exec_command_stderr(): + # Test posix version: + with redirect_stdout(TemporaryFile(mode='w+')): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + if os.name == 'posix': + # Test general (non-posix) version: + with emulate_nonposix(): + with redirect_stdout(TemporaryFile()): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +class TestExecCommand: + def setup_method(self): + self.pyexe = get_pythonexe() + + def check_nt(self, **kws): + s, o = exec_command.exec_command('cmd /C echo path=%path%') + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe) + assert_(s == 0) + assert_(o == 'win32') + + def check_posix(self, **kws): + s, o = exec_command.exec_command("echo Hello", **kws) + assert_(s == 0) + assert_(o == 'Hello') + + s, o = exec_command.exec_command('echo $AAA', **kws) + assert_(s == 0) + assert_(o == '') + + s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws) + assert_(s == 0) + assert_(o == 'Tere') + + s, o = exec_command.exec_command('echo "$AAA"', **kws) + assert_(s == 0) + assert_(o == '') + + if 'BBB' not in os.environ: + os.environ['BBB'] = 'Hi' + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws) + assert_(s == 0) + assert_(o == 'Hey') + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == 'Hi') + + del os.environ['BBB'] + + s, o = exec_command.exec_command('echo "$BBB"', **kws) + assert_(s == 0) + assert_(o == '') + + + s, o = exec_command.exec_command('this_is_not_a_command', **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command('echo path=$PATH', **kws) + assert_(s == 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys,os;sys.stderr.write(os.name)"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'posix') + + def check_basic(self, *kws): + s, o = exec_command.exec_command( + '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws) + assert_(s != 0) + assert_(o != '') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.stderr.write(\'0\');' + 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' % + self.pyexe, **kws) + assert_(s == 0) + assert_(o == '012') + + s, o = exec_command.exec_command( + '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws) + assert_(s == 15) + assert_(o == '') + + s, o = exec_command.exec_command( + '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws) + assert_(s == 0) + assert_(o == 'Heipa') + + def check_execute_in(self, **kws): + with tempdir() as tmpdir: + fn = "file" + tmpfile = os.path.join(tmpdir, fn) + with open(tmpfile, 'w') as f: + f.write('Hello') + + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' % + (self.pyexe, fn), **kws) + assert_(s != 0) + assert_(o != '') + s, o = exec_command.exec_command( + '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); ' + 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws) + assert_(s == 0) + assert_(o == 'Hello') + + def test_basic(self): + with redirect_stdout(StringIO()): + with redirect_stderr(StringIO()): + with assert_warns(DeprecationWarning): + if os.name == "posix": + self.check_posix(use_tee=0) + self.check_posix(use_tee=1) + elif os.name == "nt": + self.check_nt(use_tee=0) + self.check_nt(use_tee=1) + self.check_execute_in(use_tee=0) + self.check_execute_in(use_tee=1) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py new file mode 100644 index 00000000..dd97f1e7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py @@ -0,0 +1,43 @@ +from numpy.testing import assert_ +import numpy.distutils.fcompiler + +customizable_flags = [ + ('f77', 'F77FLAGS'), + ('f90', 'F90FLAGS'), + ('free', 'FREEFLAGS'), + ('arch', 'FARCH'), + ('debug', 'FDEBUG'), + ('flags', 'FFLAGS'), + ('linker_so', 'LDFLAGS'), +] + + +def test_fcompiler_flags(monkeypatch): + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') + flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + assert_(new_flags == [new_flag]) + + monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + prev_flags = getattr(flag_vars, opt) + monkeypatch.setenv(envvar, new_flag) + new_flags = getattr(flag_vars, opt) + + monkeypatch.delenv(envvar) + if prev_flags is None: + assert_(new_flags == [new_flag]) + else: + assert_(new_flags == prev_flags + [new_flag]) + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py new file mode 100644 index 00000000..0817ae58 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py @@ -0,0 +1,55 @@ +from numpy.testing import assert_ + +import numpy.distutils.fcompiler + +g77_version_strings = [ + ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'), + ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'), + ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'), + ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'), + ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2' + ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'), +] + +gfortran_version_strings = [ + ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))', + '4.0.3'), + ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'), + ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'), + ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'), + ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'), + ('4.8.0', '4.8.0'), + ('4.0.3-7', '4.0.3'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1", + '4.9.1'), + ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n" + "gfortran: warning: yet another warning\n4.9.1", + '4.9.1'), + ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0') +] + +class TestG77Versions: + def test_g77_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, version in g77_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_g77(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu') + for vs, _ in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) + +class TestGFortranVersions: + def test_gfortran_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, version in gfortran_version_strings: + v = fc.version_match(vs) + assert_(v == version, (vs, v)) + + def test_not_gfortran(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + for vs, _ in g77_version_strings: + v = fc.version_match(vs) + assert_(v is None, (vs, v)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py new file mode 100644 index 00000000..45c9cdac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py @@ -0,0 +1,30 @@ +import numpy.distutils.fcompiler +from numpy.testing import assert_ + + +intel_32bit_version_strings = [ + ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications" + "running on Intel(R) 32, Version 11.1", '11.1'), +] + +intel_64bit_version_strings = [ + ("Intel(R) Fortran IA-64 Compiler Professional for applications" + "running on IA-64, Version 11.0", '11.0'), + ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications" + "running on Intel(R) 64, Version 11.1", '11.1') +] + +class TestIntelFCompilerVersions: + def test_32bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel') + for vs, version in intel_32bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) + + +class TestIntelEM64TFCompilerVersions: + def test_64bit_version(self): + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem') + for vs, version in intel_64bit_version_strings: + v = fc.version_match(vs) + assert_(v == version) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py new file mode 100644 index 00000000..2e04f526 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py @@ -0,0 +1,22 @@ +from numpy.testing import assert_ +import numpy.distutils.fcompiler + +nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release ' + '6.2(Chiyoda) Build 6200', '6.2'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.1(Tozai) Build 6136', '6.1'), + ('nagfor', 'NAG Fortran Compiler Release ' + '6.0(Hibiya) Build 1021', '6.0'), + ('nagfor', 'NAG Fortran Compiler Release ' + '5.3.2(971)', '5.3.2'), + ('nag', 'NAGWare Fortran 95 compiler Release 5.1' + '(347,355-367,375,380-383,389,394,399,401-402,407,' + '431,435,437,446,459-460,463,472,494,496,503,508,' + '511,517,529,555,557,565)', '5.1')] + +class TestNagFCompilerVersions: + def test_version_match(self): + for comp, vs, version in nag_version_strings: + fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp) + v = fc.version_match(vs) + assert_(v == version) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py new file mode 100644 index 00000000..58817549 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py @@ -0,0 +1,44 @@ + +from numpy.distutils.from_template import process_str +from numpy.testing import assert_equal + + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py new file mode 100644 index 00000000..72fddf37 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py @@ -0,0 +1,34 @@ +import io +import re +from contextlib import redirect_stdout + +import pytest + +from numpy.distutils import log + + +def setup_module(): + f = io.StringIO() # changing verbosity also logs here, capture that + with redirect_stdout(f): + log.set_verbosity(2, force=True) # i.e. DEBUG + + +def teardown_module(): + log.set_verbosity(0, force=True) # the default + + +r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + + +@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) +def test_log_prefix(func_name): + func = getattr(log, func_name) + msg = f"{func_name} message" + f = io.StringIO() + with redirect_stdout(f): + func(msg) + out = f.getvalue() + assert out # sanity check + clean_out = r_ansi.sub("", out) + line = next(line for line in clean_out.splitlines()) + assert line == f"{func_name.upper()}: {msg}" diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py new file mode 100644 index 00000000..ebedacb3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py @@ -0,0 +1,42 @@ +import shutil +import subprocess +import sys +import pytest + +from numpy.distutils import mingw32ccompiler + + +@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') +def test_build_import(): + '''Test the mingw32ccompiler.build_import_library, which builds a + `python.a` from the MSVC `python.lib` + ''' + + # make sure `nm.exe` exists and supports the current python version. This + # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit + try: + out = subprocess.check_output(['nm.exe', '--help']) + except FileNotFoundError: + pytest.skip("'nm.exe' not on path, is mingw installed?") + supported = out[out.find(b'supported targets:'):] + if sys.maxsize < 2**32: + if b'pe-i386' not in supported: + raise ValueError("'nm.exe' found but it does not support 32-bit " + "dlls when using 32-bit python. Supported " + "formats: '%s'" % supported) + elif b'pe-x86-64' not in supported: + raise ValueError("'nm.exe' found but it does not support 64-bit " + "dlls when using 64-bit python. Supported " + "formats: '%s'" % supported) + # Hide the import library to force a build + has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib() + if has_import_lib: + shutil.move(fullpath, fullpath + '.bak') + + try: + # Whew, now we can actually test the function + mingw32ccompiler.build_import_library() + + finally: + if has_import_lib: + shutil.move(fullpath + '.bak', fullpath) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py new file mode 100644 index 00000000..605c8048 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py @@ -0,0 +1,82 @@ +from os.path import join, sep, dirname + +from numpy.distutils.misc_util import ( + appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info + ) +from numpy.testing import ( + assert_, assert_equal + ) + +ajoin = lambda *paths: join(*((sep,)+paths)) + +class TestAppendpath: + + def test_1(self): + assert_equal(appendpath('prefix', 'name'), join('prefix', 'name')) + assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name')) + assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name')) + assert_equal(appendpath('prefix', '/name'), join('prefix', 'name')) + + def test_2(self): + assert_equal(appendpath('prefix/sub', 'name'), + join('prefix', 'sub', 'name')) + assert_equal(appendpath('prefix/sub', 'sup/name'), + join('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub', '/prefix/name'), + ajoin('prefix', 'sub', 'name')) + + def test_3(self): + assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'), + ajoin('prefix', 'sub', 'sup', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name')) + assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'), + ajoin('prefix', 'sub', 'sub2', 'sup', 'name')) + +class TestMinrelpath: + + def test_1(self): + n = lambda path: path.replace('/', sep) + assert_equal(minrelpath(n('aa/bb')), n('aa/bb')) + assert_equal(minrelpath('..'), '..') + assert_equal(minrelpath(n('aa/..')), '') + assert_equal(minrelpath(n('aa/../bb')), 'bb') + assert_equal(minrelpath(n('aa/bb/..')), 'aa') + assert_equal(minrelpath(n('aa/bb/../..')), '') + assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd')) + assert_equal(minrelpath(n('.././..')), n('../..')) + assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd')) + +class TestGpaths: + + def test_gpaths(self): + local_path = minrelpath(join(dirname(__file__), '..')) + ls = gpaths('command/*.py', local_path) + assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls)) + f = gpaths('system_info.py', local_path) + assert_(join(local_path, 'system_info.py') == f[0], repr(f)) + +class TestSharedExtension: + + def test_get_shared_lib_extension(self): + import sys + ext = get_shared_lib_extension(is_python_ext=False) + if sys.platform.startswith('linux'): + assert_equal(ext, '.so') + elif sys.platform.startswith('gnukfreebsd'): + assert_equal(ext, '.so') + elif sys.platform.startswith('darwin'): + assert_equal(ext, '.dylib') + elif sys.platform.startswith('win'): + assert_equal(ext, '.dll') + # just check for no crash + assert_(get_shared_lib_extension(is_python_ext=True)) + + +def test_installed_npymath_ini(): + # Regression test for gh-7707. If npymath.ini wasn't installed, then this + # will give an error. + info = get_info('npymath') + + assert isinstance(info, dict) + assert "define_macros" in info diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py new file mode 100644 index 00000000..b287ebe2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py @@ -0,0 +1,84 @@ +import os + +from numpy.distutils.npy_pkg_config import read_config, parse_flags +from numpy.testing import temppath, assert_ + +simple = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[default] +cflags = -I/usr/include +libs = -L/usr/lib +""" +simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib', + 'version': '0.1', 'name': 'foo'} + +simple_variable = """\ +[meta] +Name = foo +Description = foo lib +Version = 0.1 + +[variables] +prefix = /foo/bar +libdir = ${prefix}/lib +includedir = ${prefix}/include + +[default] +cflags = -I${includedir} +libs = -L${libdir} +""" +simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib', + 'version': '0.1', 'name': 'foo'} + +class TestLibraryInfo: + def test_simple(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_d['cflags']) + assert_(out.libs() == simple_d['libflags']) + assert_(out.name == simple_d['name']) + assert_(out.version == simple_d['version']) + + def test_simple_variable(self): + with temppath('foo.ini') as path: + with open(path, 'w') as f: + f.write(simple_variable) + pkg = os.path.splitext(path)[0] + out = read_config(pkg) + + assert_(out.cflags() == simple_variable_d['cflags']) + assert_(out.libs() == simple_variable_d['libflags']) + assert_(out.name == simple_variable_d['name']) + assert_(out.version == simple_variable_d['version']) + out.vars['prefix'] = '/Users/david' + assert_(out.cflags() == '-I/Users/david/include') + +class TestParseFlags: + def test_simple_cflags(self): + d = parse_flags("-I/usr/include") + assert_(d['include_dirs'] == ['/usr/include']) + + d = parse_flags("-I/usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + d = parse_flags("-I /usr/include -DFOO") + assert_(d['include_dirs'] == ['/usr/include']) + assert_(d['macros'] == ['FOO']) + + def test_simple_lflags(self): + d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) + + d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar") + assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib']) + assert_(d['libraries'] == ['foo', 'bar']) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py new file mode 100644 index 00000000..696d38dd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py @@ -0,0 +1,79 @@ +import pytest +import subprocess +import json +import sys + +from numpy.distutils import _shell_utils +from numpy.testing import IS_WASM + +argv_cases = [ + [r'exe'], + [r'path/exe'], + [r'path\exe'], + [r'\\server\path\exe'], + [r'path to/exe'], + [r'path to\exe'], + + [r'exe', '--flag'], + [r'path/exe', '--flag'], + [r'path\exe', '--flag'], + [r'path to/exe', '--flag'], + [r'path to\exe', '--flag'], + + # flags containing literal quotes in their name + [r'path to/exe', '--flag-"quoted"'], + [r'path to\exe', '--flag-"quoted"'], + [r'path to/exe', '"--flag-quoted"'], + [r'path to\exe', '"--flag-quoted"'], +] + + +@pytest.fixture(params=[ + _shell_utils.WindowsParser, + _shell_utils.PosixParser +]) +def Parser(request): + return request.param + + +@pytest.fixture +def runner(Parser): + if Parser != _shell_utils.NativeParser: + pytest.skip('Unable to run with non-native parser') + + if Parser == _shell_utils.WindowsParser: + return lambda cmd: subprocess.check_output(cmd) + elif Parser == _shell_utils.PosixParser: + # posix has no non-shell string parsing + return lambda cmd: subprocess.check_output(cmd, shell=True) + else: + raise NotImplementedError + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.parametrize('argv', argv_cases) +def test_join_matches_subprocess(Parser, runner, argv): + """ + Test that join produces strings understood by subprocess + """ + # invoke python to return its arguments as json + cmd = [ + sys.executable, '-c', + 'import json, sys; print(json.dumps(sys.argv[1:]))' + ] + joined = Parser.join(cmd + argv) + json_out = runner(joined).decode() + assert json.loads(json_out) == argv + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.parametrize('argv', argv_cases) +def test_roundtrip(Parser, argv): + """ + Test that split is the inverse operation of join + """ + try: + joined = Parser.join(argv) + assert argv == Parser.split(joined) + except NotImplementedError: + pytest.skip("Not implemented") diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py new file mode 100644 index 00000000..9bcc0905 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py @@ -0,0 +1,334 @@ +import os +import shutil +import pytest +from tempfile import mkstemp, mkdtemp +from subprocess import Popen, PIPE +import importlib.metadata +from distutils.errors import DistutilsError + +from numpy.testing import assert_, assert_equal, assert_raises +from numpy.distutils import ccompiler, customized_ccompiler +from numpy.distutils.system_info import system_info, ConfigParser, mkl_info +from numpy.distutils.system_info import AliasedOptionError +from numpy.distutils.system_info import default_lib_dirs, default_include_dirs +from numpy.distutils import _shell_utils + + +try: + if importlib.metadata.version('setuptools') >= '60': + # pkg-resources gives deprecation warnings, and there may be more + # issues. We only support setuptools <60 + pytest.skip("setuptools is too new", allow_module_level=True) +except importlib.metadata.PackageNotFoundError: + # we don't require `setuptools`; if it is not found, continue + pass + + +def get_class(name, notfound_action=1): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'temp1': Temp1Info, + 'temp2': Temp2Info, + 'duplicate_options': DuplicateOptionInfo, + }.get(name.lower(), _system_info) + return cl() + +simple_site = """ +[ALL] +library_dirs = {dir1:s}{pathsep:s}{dir2:s} +libraries = {lib1:s},{lib2:s} +extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os +runtime_library_dirs = {dir1:s} + +[temp1] +library_dirs = {dir1:s} +libraries = {lib1:s} +runtime_library_dirs = {dir1:s} + +[temp2] +library_dirs = {dir2:s} +libraries = {lib2:s} +extra_link_args = -Wl,-rpath={lib2_escaped:s} +rpath = {dir2:s} + +[duplicate_options] +mylib_libs = {lib1:s} +libraries = {lib2:s} +""" +site_cfg = simple_site + +fakelib_c_text = """ +/* This file is generated from numpy/distutils/testing/test_system_info.py */ +#include +void foo(void) { + printf("Hello foo"); +} +void bar(void) { + printf("Hello bar"); +} +""" + +def have_compiler(): + """ Return True if there appears to be an executable compiler + """ + compiler = customized_ccompiler() + try: + cmd = compiler.compiler # Unix compilers + except AttributeError: + try: + if not compiler.initialized: + compiler.initialize() # MSVC is different + except (DistutilsError, ValueError): + return False + cmd = [compiler.cc] + try: + p = Popen(cmd, stdout=PIPE, stderr=PIPE) + p.stdout.close() + p.stderr.close() + p.wait() + except OSError: + return False + return True + + +HAVE_COMPILER = have_compiler() + + +class _system_info(system_info): + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + verbosity=1, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': '', + 'include_dirs': '', + 'runtime_library_dirs': '', + 'rpath': '', + 'src_dirs': '', + 'search_static_first': "0", + 'extra_compile_args': '', + 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + # We have to parse the config files afterwards + # to have a consistent temporary filepath + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Override _check_libs to return with all dirs """ + info = {'libraries': libs, 'library_dirs': lib_dirs} + return info + + +class Temp1Info(_system_info): + """For testing purposes""" + section = 'temp1' + + +class Temp2Info(_system_info): + """For testing purposes""" + section = 'temp2' + +class DuplicateOptionInfo(_system_info): + """For testing purposes""" + section = 'duplicate_options' + + +class TestSystemInfoReading: + + def setup_method(self): + """ Create the libraries """ + # Create 2 sources and 2 libraries + self._dir1 = mkdtemp() + self._src1 = os.path.join(self._dir1, 'foo.c') + self._lib1 = os.path.join(self._dir1, 'libfoo.so') + self._dir2 = mkdtemp() + self._src2 = os.path.join(self._dir2, 'bar.c') + self._lib2 = os.path.join(self._dir2, 'libbar.so') + # Update local site.cfg + global simple_site, site_cfg + site_cfg = simple_site.format(**{ + 'dir1': self._dir1, + 'lib1': self._lib1, + 'dir2': self._dir2, + 'lib2': self._lib2, + 'pathsep': os.pathsep, + 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2]) + }) + # Write site.cfg + fd, self._sitecfg = mkstemp() + os.close(fd) + with open(self._sitecfg, 'w') as fd: + fd.write(site_cfg) + # Write the sources + with open(self._src1, 'w') as fd: + fd.write(fakelib_c_text) + with open(self._src2, 'w') as fd: + fd.write(fakelib_c_text) + # We create all class-instances + + def site_and_parse(c, site_cfg): + c.files = [site_cfg] + c.parse_config_files() + return c + self.c_default = site_and_parse(get_class('default'), self._sitecfg) + self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg) + self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg) + self.c_dup_options = site_and_parse(get_class('duplicate_options'), + self._sitecfg) + + def teardown_method(self): + # Do each removal separately + try: + shutil.rmtree(self._dir1) + except Exception: + pass + try: + shutil.rmtree(self._dir2) + except Exception: + pass + try: + os.remove(self._sitecfg) + except Exception: + pass + + def test_all(self): + # Read in all information in the ALL block + tsi = self.c_default + assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib1, self._lib2]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os']) + + def test_temp1(self): + # Read in all information in the temp1 block + tsi = self.c_temp1 + assert_equal(tsi.get_lib_dirs(), [self._dir1]) + assert_equal(tsi.get_libraries(), [self._lib1]) + assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1]) + + def test_temp2(self): + # Read in all information in the temp2 block + tsi = self.c_temp2 + assert_equal(tsi.get_lib_dirs(), [self._dir2]) + assert_equal(tsi.get_libraries(), [self._lib2]) + # Now from rpath and not runtime_library_dirs + assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2]) + extra = tsi.calc_extra_info() + assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2]) + + def test_duplicate_options(self): + # Ensure that duplicates are raising an AliasedOptionError + tsi = self.c_dup_options + assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries") + assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1]) + assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2]) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + def test_compile1(self): + # Compile source and link the first source + c = customized_ccompiler() + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir1) + c.compile([os.path.basename(self._src1)], output_dir=self._dir1) + # Ensure that the object exists + assert_(os.path.isfile(self._src1.replace('.c', '.o')) or + os.path.isfile(self._src1.replace('.c', '.obj'))) + finally: + os.chdir(previousDir) + + @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler") + @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()), + reason="Fails with MSVC compiler ") + def test_compile2(self): + # Compile source and link the second source + tsi = self.c_temp2 + c = customized_ccompiler() + extra_link_args = tsi.calc_extra_info()['extra_link_args'] + previousDir = os.getcwd() + try: + # Change directory to not screw up directories + os.chdir(self._dir2) + c.compile([os.path.basename(self._src2)], output_dir=self._dir2, + extra_postargs=extra_link_args) + # Ensure that the object exists + assert_(os.path.isfile(self._src2.replace('.c', '.o'))) + finally: + os.chdir(previousDir) + + HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", []) + + @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if " + "numpy is built with MKL support")) + def test_overrides(self): + previousDir = os.getcwd() + cfg = os.path.join(self._dir1, 'site.cfg') + shutil.copy(self._sitecfg, cfg) + try: + os.chdir(self._dir1) + # Check that the '[ALL]' section does not override + # missing values from other sections + info = mkl_info() + lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep) + assert info.get_lib_dirs() != lib_dirs + + # But if we copy the values to a '[mkl]' section the value + # is correct + with open(cfg) as fid: + mkl = fid.read().replace('[ALL]', '[mkl]', 1) + with open(cfg, 'w') as fid: + fid.write(mkl) + info = mkl_info() + assert info.get_lib_dirs() == lib_dirs + + # Also, the values will be taken from a section named '[DEFAULT]' + with open(cfg) as fid: + dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1) + with open(cfg, 'w') as fid: + fid.write(dflt) + info = mkl_info() + assert info.get_lib_dirs() == lib_dirs + finally: + os.chdir(previousDir) + + +def test_distutils_parse_env_order(monkeypatch): + from numpy.distutils.system_info import _parse_env_order + env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER' + + base_order = list('abcdef') + + monkeypatch.setenv(env, 'b,i,e,f') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 3 + assert order == list('bef') + assert len(unknown) == 1 + + # For when LAPACK/BLAS optimization is disabled + monkeypatch.setenv(env, '') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 0 + assert len(unknown) == 0 + + for prefix in '^!': + monkeypatch.setenv(env, f'{prefix}b,i,e') + order, unknown = _parse_env_order(base_order, env) + assert len(order) == 4 + assert order == list('acdf') + assert len(unknown) == 1 + + with pytest.raises(ValueError): + monkeypatch.setenv(env, 'b,^e,i') + _parse_env_order(base_order, env) + + with pytest.raises(ValueError): + monkeypatch.setenv(env, '!b,^e,i') + _parse_env_order(base_order, env) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py new file mode 100644 index 00000000..4884960f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py @@ -0,0 +1,141 @@ +""" +unixccompiler - can handle very long argument lists for ar. + +""" +import os +import sys +import subprocess +import shlex + +from distutils.errors import CompileError, DistutilsExecError, LibError +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.ccompiler import replace_method +from numpy.distutils.misc_util import _commandline_dep_string +from numpy.distutils import log + +# Note that UnixCCompiler._compile appeared in Python 2.3 +def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile a single source files with a Unix-style compiler.""" + # HP ad-hoc fix, see ticket 1383 + ccomp = self.compiler_so + if ccomp[0] == 'aCC': + # remove flags that will trigger ANSI-C mode for aCC + if '-Ae' in ccomp: + ccomp.remove('-Ae') + if '-Aa' in ccomp: + ccomp.remove('-Aa') + # add flags for (almost) sane C++ handling + ccomp += ['-AA'] + self.compiler_so = ccomp + # ensure OPT environment variable is read + if 'OPT' in os.environ: + # XXX who uses this? + from sysconfig import get_config_vars + opt = shlex.join(shlex.split(os.environ['OPT'])) + gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) + ccomp_s = shlex.join(self.compiler_so) + if opt not in ccomp_s: + ccomp_s = ccomp_s.replace(gcv_opt, opt) + self.compiler_so = shlex.split(ccomp_s) + llink_s = shlex.join(self.linker_so) + if opt not in llink_s: + self.linker_so = self.linker_so + shlex.split(opt) + + display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) + + # gcc style automatic dependencies, outputs a makefile (-MF) that lists + # all headers needed by a c file as a side effect of compilation (-MMD) + if getattr(self, '_auto_depends', False): + deps = ['-MMD', '-MF', obj + '.d'] + else: + deps = [] + + try: + self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + + extra_postargs, display = display) + except DistutilsExecError as e: + msg = str(e) + raise CompileError(msg) from None + + # add commandline flags to dependency file + if deps: + # After running the compiler, the file created will be in EBCDIC + # but will not be tagged as such. This tags it so the file does not + # have multiple different encodings being written to it + if sys.platform == 'zos': + subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) + with open(obj + '.d', 'a') as f: + f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) + +replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) + + +def UnixCCompiler_create_static_lib(self, objects, output_libname, + output_dir=None, debug=0, target_lang=None): + """ + Build a static library in a separate sub-process. + + Parameters + ---------- + objects : list or tuple of str + List of paths to object files used to build the static library. + output_libname : str + The library name as an absolute or relative (if `output_dir` is used) + path. + output_dir : str, optional + The path to the output directory. Default is None, in which case + the ``output_dir`` attribute of the UnixCCompiler instance. + debug : bool, optional + This parameter is not used. + target_lang : str, optional + This parameter is not used. + + Returns + ------- + None + + """ + objects, output_dir = self._fix_object_args(objects, output_dir) + + output_filename = \ + self.library_filename(output_libname, output_dir=output_dir) + + if self._need_link(objects, output_filename): + try: + # previous .a may be screwed up; best to remove it first + # and recreate. + # Also, ar on OS X doesn't handle updating universal archives + os.unlink(output_filename) + except OSError: + pass + self.mkpath(os.path.dirname(output_filename)) + tmp_objects = objects + self.objects + while tmp_objects: + objects = tmp_objects[:50] + tmp_objects = tmp_objects[50:] + display = '%s: adding %d object files to %s' % ( + os.path.basename(self.archiver[0]), + len(objects), output_filename) + self.spawn(self.archiver + [output_filename] + objects, + display = display) + + # Not many Unices required ranlib anymore -- SunOS 4.x is, I + # think the only major Unix that does. Maybe we need some + # platform intelligence here to skip ranlib if it's not + # needed -- or maybe Python's configure script took care of + # it for us, hence the check for leading colon. + if self.ranlib: + display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), + output_filename) + try: + self.spawn(self.ranlib + [output_filename], + display = display) + except DistutilsExecError as e: + msg = str(e) + raise LibError(msg) from None + else: + log.debug("skipping %s (up-to-date)", output_filename) + return + +replace_method(UnixCCompiler, 'create_static_lib', + UnixCCompiler_create_static_lib) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/doc/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/doc/__init__.py new file mode 100644 index 00000000..8a944fec --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/doc/__init__.py @@ -0,0 +1,26 @@ +import os + +ref_dir = os.path.join(os.path.dirname(__file__)) + +__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and + not f.startswith('__')) + +for f in __all__: + __import__(__name__ + '.' + f) + +del f, ref_dir + +__doc__ = """\ +Topical documentation +===================== + +The following topics are available: +%s + +You can view them by + +>>> help(np.doc.TOPIC) #doctest: +SKIP + +""" % '\n- '.join([''] + __all__) + +__all__.extend(['__doc__']) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/doc/constants.py b/dbdpy-env/lib/python3.9/site-packages/numpy/doc/constants.py new file mode 100644 index 00000000..4db5c639 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/doc/constants.py @@ -0,0 +1,412 @@ +""" +========= +Constants +========= + +.. currentmodule:: numpy + +NumPy includes several constants: + +%(constant_list)s +""" +# +# Note: the docstring is autogenerated. +# +import re +import textwrap + +# Maintain same format as in numpy.add_newdocs +constants = [] +def add_newdoc(module, name, doc): + constants.append((name, doc)) + +add_newdoc('numpy', 'pi', + """ + ``pi = 3.1415926535897932384626433...`` + + References + ---------- + https://en.wikipedia.org/wiki/Pi + + """) + +add_newdoc('numpy', 'e', + """ + Euler's constant, base of natural logarithms, Napier's constant. + + ``e = 2.71828182845904523536028747135266249775724709369995...`` + + See Also + -------- + exp : Exponential function + log : Natural logarithm + + References + ---------- + https://en.wikipedia.org/wiki/E_%28mathematical_constant%29 + + """) + +add_newdoc('numpy', 'euler_gamma', + """ + ``γ = 0.5772156649015328606065120900824024310421...`` + + References + ---------- + https://en.wikipedia.org/wiki/Euler-Mascheroni_constant + + """) + +add_newdoc('numpy', 'inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Returns + ------- + y : float + A floating point representation of positive infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`. + + Examples + -------- + >>> np.inf + inf + >>> np.array([1]) / 0. + array([ Inf]) + + """) + +add_newdoc('numpy', 'nan', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + Returns + ------- + y : A floating point representation of Not a Number. + + See Also + -------- + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite (not one of + Not a Number, positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + `NaN` and `NAN` are aliases of `nan`. + + Examples + -------- + >>> np.nan + nan + >>> np.log(-1) + nan + >>> np.log([-1, 1, 2]) + array([ NaN, 0. , 0.69314718]) + + """) + +add_newdoc('numpy', 'newaxis', + """ + A convenient alias for None, useful for indexing arrays. + + Examples + -------- + >>> newaxis is None + True + >>> x = np.arange(3) + >>> x + array([0, 1, 2]) + >>> x[:, newaxis] + array([[0], + [1], + [2]]) + >>> x[:, newaxis, newaxis] + array([[[0]], + [[1]], + [[2]]]) + >>> x[:, newaxis] * x + array([[0, 0, 0], + [0, 1, 2], + [0, 2, 4]]) + + Outer product, same as ``outer(x, y)``: + + >>> y = np.arange(3, 6) + >>> x[:, newaxis] * y + array([[ 0, 0, 0], + [ 3, 4, 5], + [ 6, 8, 10]]) + + ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``: + + >>> x[newaxis, :].shape + (1, 3) + >>> x[newaxis].shape + (1, 3) + >>> x[None].shape + (1, 3) + >>> x[:, newaxis].shape + (3, 1) + + """) + +add_newdoc('numpy', 'NZERO', + """ + IEEE 754 floating point representation of negative zero. + + Returns + ------- + y : float + A floating point representation of negative zero. + + See Also + -------- + PZERO : Defines positive zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Negative zero is considered to be a finite number. + + Examples + -------- + >>> np.NZERO + -0.0 + >>> np.PZERO + 0.0 + + >>> np.isfinite([np.NZERO]) + array([ True]) + >>> np.isnan([np.NZERO]) + array([False]) + >>> np.isinf([np.NZERO]) + array([False]) + + """) + +add_newdoc('numpy', 'PZERO', + """ + IEEE 754 floating point representation of positive zero. + + Returns + ------- + y : float + A floating point representation of positive zero. + + See Also + -------- + NZERO : Defines negative zero. + + isinf : Shows which elements are positive or negative infinity. + + isposinf : Shows which elements are positive infinity. + + isneginf : Shows which elements are negative infinity. + + isnan : Shows which elements are Not a Number. + + isfinite : Shows which elements are finite - not one of + Not a Number, positive infinity and negative infinity. + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). Positive zero is considered to be a finite number. + + Examples + -------- + >>> np.PZERO + 0.0 + >>> np.NZERO + -0.0 + + >>> np.isfinite([np.PZERO]) + array([ True]) + >>> np.isnan([np.PZERO]) + array([False]) + >>> np.isinf([np.PZERO]) + array([False]) + + """) + +add_newdoc('numpy', 'NAN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NAN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'NaN', + """ + IEEE 754 floating point representation of Not a Number (NaN). + + `NaN` and `NAN` are equivalent definitions of `nan`. Please use + `nan` instead of `NaN`. + + See Also + -------- + nan + + """) + +add_newdoc('numpy', 'NINF', + """ + IEEE 754 floating point representation of negative infinity. + + Returns + ------- + y : float + A floating point representation of negative infinity. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity + + isposinf : Shows which elements are positive infinity + + isneginf : Shows which elements are negative infinity + + isnan : Shows which elements are Not a Number + + isfinite : Shows which elements are finite (not one of Not a Number, + positive infinity and negative infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + Examples + -------- + >>> np.NINF + -inf + >>> np.log(0) + -inf + + """) + +add_newdoc('numpy', 'PINF', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'infty', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'Inf', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + +add_newdoc('numpy', 'Infinity', + """ + IEEE 754 floating point representation of (positive) infinity. + + Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for + `inf`. For more details, see `inf`. + + See Also + -------- + inf + + """) + + +if __doc__: + constants_str = [] + constants.sort() + for name, doc in constants: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + new_lines.append('%s.. rubric:: %s' % (m.group(1), prev)) + new_lines.append('') + else: + new_lines.append(line) + s = "\n".join(new_lines) + + # Done. + constants_str.append(""".. data:: %s\n %s""" % (name, s)) + constants_str = "\n".join(constants_str) + + __doc__ = __doc__ % dict(constant_list=constants_str) + del constants_str, name, doc + del line, lines, new_lines, m, s, prev + +del constants, add_newdoc diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/doc/ufuncs.py b/dbdpy-env/lib/python3.9/site-packages/numpy/doc/ufuncs.py new file mode 100644 index 00000000..c99e9abc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/doc/ufuncs.py @@ -0,0 +1,137 @@ +""" +=================== +Universal Functions +=================== + +Ufuncs are, generally speaking, mathematical functions or operations that are +applied element-by-element to the contents of an array. That is, the result +in each output array element only depends on the value in the corresponding +input array (or arrays) and on no other array elements. NumPy comes with a +large suite of ufuncs, and scipy extends that suite substantially. The simplest +example is the addition operator: :: + + >>> np.array([0,2,3,4]) + np.array([1,1,-1,2]) + array([1, 3, 2, 6]) + +The ufunc module lists all the available ufuncs in numpy. Documentation on +the specific ufuncs may be found in those modules. This documentation is +intended to address the more general aspects of ufuncs common to most of +them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.) +have equivalent functions defined (e.g. add() for +) + +Type coercion +============= + +What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of +two different types? What is the type of the result? Typically, the result is +the higher of the two types. For example: :: + + float32 + float64 -> float64 + int8 + int32 -> int32 + int16 + float32 -> float32 + float32 + complex64 -> complex64 + +There are some less obvious cases generally involving mixes of types +(e.g. uints, ints and floats) where equal bit sizes for each are not +capable of saving all the information in a different type of equivalent +bit size. Some examples are int32 vs float32 or uint32 vs int32. +Generally, the result is the higher type of larger size than both +(if available). So: :: + + int32 + float32 -> float64 + uint32 + int32 -> int64 + +Finally, the type coercion behavior when expressions involve Python +scalars is different than that seen for arrays. Since Python has a +limited number of types, combining a Python int with a dtype=np.int8 +array does not coerce to the higher type but instead, the type of the +array prevails. So the rules for Python scalars combined with arrays is +that the result will be that of the array equivalent the Python scalar +if the Python scalar is of a higher 'kind' than the array (e.g., float +vs. int), otherwise the resultant type will be that of the array. +For example: :: + + Python int + int8 -> int8 + Python float + int8 -> float64 + +ufunc methods +============= + +Binary ufuncs support 4 methods. + +**.reduce(arr)** applies the binary operator to elements of the array in + sequence. For example: :: + + >>> np.add.reduce(np.arange(10)) # adds all elements of array + 45 + +For multidimensional arrays, the first dimension is reduced by default: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5)) + array([ 5, 7, 9, 11, 13]) + +The axis keyword can be used to specify different axes to reduce: :: + + >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1) + array([10, 35]) + +**.accumulate(arr)** applies the binary operator and generates an +equivalently shaped array that includes the accumulated amount for each +element of the array. A couple examples: :: + + >>> np.add.accumulate(np.arange(10)) + array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) + >>> np.multiply.accumulate(np.arange(1,9)) + array([ 1, 2, 6, 24, 120, 720, 5040, 40320]) + +The behavior for multidimensional arrays is the same as for .reduce(), +as is the use of the axis keyword). + +**.reduceat(arr,indices)** allows one to apply reduce to selected parts + of an array. It is a difficult method to understand. See the documentation + at: + +**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and + arr2. It will work on multidimensional arrays (the shape of the result is + the concatenation of the two input shapes.: :: + + >>> np.multiply.outer(np.arange(3),np.arange(4)) + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6]]) + +Output arguments +================ + +All ufuncs accept an optional output array. The array must be of the expected +output shape. Beware that if the type of the output array is of a different +(and lower) type than the output result, the results may be silently truncated +or otherwise corrupted in the downcast to the lower type. This usage is useful +when one wants to avoid creating large temporary arrays and instead allows one +to reuse the same array memory repeatedly (at the expense of not being able to +use more convenient operator notation in expressions). Note that when the +output argument is used, the ufunc still returns a reference to the result. + + >>> x = np.arange(2) + >>> np.add(np.arange(2),np.arange(2.),x) + array([0, 2]) + >>> x + array([0, 2]) + +and & or as ufuncs +================== + +Invariably people try to use the python 'and' and 'or' as logical operators +(and quite understandably). But these operators do not behave as normal +operators since Python treats these quite differently. They cannot be +overloaded with array equivalents. Thus using 'and' or 'or' with an array +results in an error. There are two alternatives: + + 1) use the ufunc functions logical_and() and logical_or(). + 2) use the bitwise operators & and \\|. The drawback of these is that if + the arguments to these operators are not boolean arrays, the result is + likely incorrect. On the other hand, most usages of logical_and and + logical_or are with boolean arrays. As long as one is careful, this is + a convenient way to apply these operators. + +""" diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/dtypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/dtypes.py new file mode 100644 index 00000000..068a6a1a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/dtypes.py @@ -0,0 +1,77 @@ +""" +DType classes and utility (:mod:`numpy.dtypes`) +=============================================== + +This module is home to specific dtypes related functionality and their classes. +For more general information about dtypes, also see `numpy.dtype` and +:ref:`arrays.dtypes`. + +Similar to the builtin ``types`` module, this submodule defines types (classes) +that are not widely used directly. + +.. versionadded:: NumPy 1.25 + + The dtypes module is new in NumPy 1.25. Previously DType classes were + only accessible indirectly. + + +DType classes +------------- + +The following are the classes of the corresponding NumPy dtype instances and +NumPy scalar types. The classes can be used in ``isinstance`` checks and can +also be instantiated or used directly. Direct use of these classes is not +typical, since their scalar counterparts (e.g. ``np.float64``) or strings +like ``"float64"`` can be used. + +.. list-table:: + :header-rows: 1 + + * - Group + - DType class + + * - Boolean + - ``BoolDType`` + + * - Bit-sized integers + - ``Int8DType``, ``UInt8DType``, ``Int16DType``, ``UInt16DType``, + ``Int32DType``, ``UInt32DType``, ``Int64DType``, ``UInt64DType`` + + * - C-named integers (may be aliases) + - ``ByteDType``, ``UByteDType``, ``ShortDType``, ``UShortDType``, + ``IntDType``, ``UIntDType``, ``LongDType``, ``ULongDType``, + ``LongLongDType``, ``ULongLongDType`` + + * - Floating point + - ``Float16DType``, ``Float32DType``, ``Float64DType``, + ``LongDoubleDType`` + + * - Complex + - ``Complex64DType``, ``Complex128DType``, ``CLongDoubleDType`` + + * - Strings + - ``BytesDType``, ``BytesDType`` + + * - Times + - ``DateTime64DType``, ``TimeDelta64DType`` + + * - Others + - ``ObjectDType``, ``VoidDType`` + +""" + +__all__ = [] + + +def _add_dtype_helper(DType, alias): + # Function to add DTypes a bit more conveniently without channeling them + # through `numpy.core._multiarray_umath` namespace or similar. + from numpy import dtypes + + setattr(dtypes, DType.__name__, DType) + __all__.append(DType.__name__) + + if alias: + alias = alias.removeprefix("numpy.dtypes.") + setattr(dtypes, alias, DType) + __all__.append(alias) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/dtypes.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/dtypes.pyi new file mode 100644 index 00000000..2f7e846f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/dtypes.pyi @@ -0,0 +1,43 @@ +import numpy as np + + +__all__: list[str] + +# Boolean: +BoolDType = np.dtype[np.bool_] +# Sized integers: +Int8DType = np.dtype[np.int8] +UInt8DType = np.dtype[np.uint8] +Int16DType = np.dtype[np.int16] +UInt16DType = np.dtype[np.uint16] +Int32DType = np.dtype[np.int32] +UInt32DType = np.dtype[np.uint32] +Int64DType = np.dtype[np.int64] +UInt64DType = np.dtype[np.uint64] +# Standard C-named version/alias: +ByteDType = np.dtype[np.byte] +UByteDType = np.dtype[np.ubyte] +ShortDType = np.dtype[np.short] +UShortDType = np.dtype[np.ushort] +IntDType = np.dtype[np.intc] +UIntDType = np.dtype[np.uintc] +LongDType = np.dtype[np.int_] # Unfortunately, the correct scalar +ULongDType = np.dtype[np.uint] # Unfortunately, the correct scalar +LongLongDType = np.dtype[np.longlong] +ULongLongDType = np.dtype[np.ulonglong] +# Floats +Float16DType = np.dtype[np.float16] +Float32DType = np.dtype[np.float32] +Float64DType = np.dtype[np.float64] +LongDoubleDType = np.dtype[np.longdouble] +# Complex: +Complex64DType = np.dtype[np.complex64] +Complex128DType = np.dtype[np.complex128] +CLongDoubleDType = np.dtype[np.clongdouble] +# Others: +ObjectDType = np.dtype[np.object_] +BytesDType = np.dtype[np.bytes_] +StrDType = np.dtype[np.str_] +VoidDType = np.dtype[np.void] +DateTime64DType = np.dtype[np.datetime64] +TimeDelta64DType = np.dtype[np.timedelta64] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/exceptions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/exceptions.py new file mode 100644 index 00000000..2f843810 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/exceptions.py @@ -0,0 +1,231 @@ +""" +Exceptions and Warnings (:mod:`numpy.exceptions`) +================================================= + +General exceptions used by NumPy. Note that some exceptions may be module +specific, such as linear algebra errors. + +.. versionadded:: NumPy 1.25 + + The exceptions module is new in NumPy 1.25. Older exceptions remain + available through the main NumPy namespace for compatibility. + +.. currentmodule:: numpy.exceptions + +Warnings +-------- +.. autosummary:: + :toctree: generated/ + + ComplexWarning Given when converting complex to real. + VisibleDeprecationWarning Same as a DeprecationWarning, but more visible. + +Exceptions +---------- +.. autosummary:: + :toctree: generated/ + + AxisError Given when an axis was invalid. + DTypePromotionError Given when no common dtype could be found. + TooHardError Error specific to `numpy.shares_memory`. + +""" + + +__all__ = [ + "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning", + "TooHardError", "AxisError", "DTypePromotionError"] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class ComplexWarning(RuntimeWarning): + """ + The warning raised when casting a complex dtype to a real dtype. + + As implemented, casting a complex number to a real discards its imaginary + part, but this behavior may not be what the user actually wants. + + """ + pass + + +class ModuleDeprecationWarning(DeprecationWarning): + """Module deprecation warning. + + .. warning:: + + This warning should not be used, since nose testing is not relevant + anymore. + + The nose tester turns ordinary Deprecation warnings into test failures. + That makes it hard to deprecate whole modules, because they get + imported by default. So this is a special Deprecation warning that the + nose tester will let pass without making tests fail. + + """ + + +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """ + + +# Exception used in shares_memory() +class TooHardError(RuntimeError): + """max_work was exceeded. + + This is raised whenever the maximum number of candidate solutions + to consider specified by the ``max_work`` parameter is exceeded. + Assigning a finite number to max_work may have caused the operation + to fail. + + """ + + pass + + +class AxisError(ValueError, IndexError): + """Axis supplied was invalid. + + This is raised whenever an ``axis`` parameter is specified that is larger + than the number of array dimensions. + For compatibility with code written against older numpy versions, which + raised a mixture of `ValueError` and `IndexError` for this situation, this + exception subclasses both to ensure that ``except ValueError`` and + ``except IndexError`` statements continue to catch `AxisError`. + + .. versionadded:: 1.13 + + Parameters + ---------- + axis : int or str + The out of bounds axis or a custom exception message. + If an axis is provided, then `ndim` should be specified as well. + ndim : int, optional + The number of array dimensions. + msg_prefix : str, optional + A prefix for the exception message. + + Attributes + ---------- + axis : int, optional + The out of bounds axis or ``None`` if a custom exception + message was provided. This should be the axis as passed by + the user, before any normalization to resolve negative indices. + + .. versionadded:: 1.22 + ndim : int, optional + The number of array dimensions or ``None`` if a custom exception + message was provided. + + .. versionadded:: 1.22 + + + Examples + -------- + >>> array_1d = np.arange(10) + >>> np.cumsum(array_1d, axis=1) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1 + + Negative axes are preserved: + + >>> np.cumsum(array_1d, axis=-2) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1 + + The class constructor generally takes the axis and arrays' + dimensionality as arguments: + + >>> print(np.AxisError(2, 1, msg_prefix='error')) + error: axis 2 is out of bounds for array of dimension 1 + + Alternatively, a custom exception message can be passed: + + >>> print(np.AxisError('Custom error message')) + Custom error message + + """ + + __slots__ = ("axis", "ndim", "_msg") + + def __init__(self, axis, ndim=None, msg_prefix=None): + if ndim is msg_prefix is None: + # single-argument form: directly set the error message + self._msg = axis + self.axis = None + self.ndim = None + else: + self._msg = msg_prefix + self.axis = axis + self.ndim = ndim + + def __str__(self): + axis = self.axis + ndim = self.ndim + + if axis is ndim is None: + return self._msg + else: + msg = f"axis {axis} is out of bounds for array of dimension {ndim}" + if self._msg is not None: + msg = f"{self._msg}: {msg}" + return msg + + +class DTypePromotionError(TypeError): + """Multiple DTypes could not be converted to a common one. + + This exception derives from ``TypeError`` and is raised whenever dtypes + cannot be converted to a single common one. This can be because they + are of a different category/class or incompatible instances of the same + one (see Examples). + + Notes + ----- + Many functions will use promotion to find the correct result and + implementation. For these functions the error will typically be chained + with a more specific error indicating that no implementation was found + for the input dtypes. + + Typically promotion should be considered "invalid" between the dtypes of + two arrays when `arr1 == arr2` can safely return all ``False`` because the + dtypes are fundamentally different. + + Examples + -------- + Datetimes and complex numbers are incompatible classes and cannot be + promoted: + + >>> np.result_type(np.dtype("M8[s]"), np.complex128) + DTypePromotionError: The DType could not + be promoted by . This means that no common + DType exists for the given inputs. For example they cannot be stored in a + single array unless the dtype is `object`. The full list of DTypes is: + (, ) + + For example for structured dtypes, the structure can mismatch and the + same ``DTypePromotionError`` is given when two structured dtypes with + a mismatch in their number of fields is given: + + >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)]) + >>> dtype2 = np.dtype([("field1", np.float64)]) + >>> np.promote_types(dtype1, dtype2) + DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` + mismatch. + + """ + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/exceptions.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/exceptions.pyi new file mode 100644 index 00000000..c76a0946 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/exceptions.pyi @@ -0,0 +1,18 @@ +from typing import overload + +__all__: list[str] + +class ComplexWarning(RuntimeWarning): ... +class ModuleDeprecationWarning(DeprecationWarning): ... +class VisibleDeprecationWarning(UserWarning): ... +class TooHardError(RuntimeError): ... +class DTypePromotionError(TypeError): ... + +class AxisError(ValueError, IndexError): + axis: None | int + ndim: None | int + @overload + def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... + @overload + def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ... + def __str__(self) -> str: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__init__.py new file mode 100644 index 00000000..e583250f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__init__.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +"""Fortran to Python Interface Generator. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the terms +of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__all__ = ['run_main', 'compile', 'get_include'] + +import sys +import subprocess +import os +import warnings + +from numpy.exceptions import VisibleDeprecationWarning +from . import f2py2e +from . import diagnose + +run_main = f2py2e.run_main +main = f2py2e.main + + +def compile(source, + modulename='untitled', + extra_args='', + verbose=True, + source_fn=None, + extension='.f', + full_output=False + ): + """ + Build extension module from a Fortran 77 source string with f2py. + + Parameters + ---------- + source : str or bytes + Fortran source of module / subroutine to compile + + .. versionchanged:: 1.16.0 + Accept str as well as bytes + + modulename : str, optional + The name of the compiled python module + extra_args : str or list, optional + Additional parameters passed to f2py + + .. versionchanged:: 1.16.0 + A list of args may also be provided. + + verbose : bool, optional + Print f2py output to screen + source_fn : str, optional + Name of the file where the fortran source is written. + The default is to use a temporary file with the extension + provided by the ``extension`` parameter + extension : ``{'.f', '.f90'}``, optional + Filename extension if `source_fn` is not provided. + The extension tells which fortran standard is used. + The default is ``.f``, which implies F77 standard. + + .. versionadded:: 1.11.0 + + full_output : bool, optional + If True, return a `subprocess.CompletedProcess` containing + the stdout and stderr of the compile process, instead of just + the status code. + + .. versionadded:: 1.20.0 + + + Returns + ------- + result : int or `subprocess.CompletedProcess` + 0 on success, or a `subprocess.CompletedProcess` if + ``full_output=True`` + + Examples + -------- + .. literalinclude:: ../../source/f2py/code/results/compile_session.dat + :language: python + + """ + import tempfile + import shlex + + if source_fn is None: + f, fname = tempfile.mkstemp(suffix=extension) + # f is a file descriptor so need to close it + # carefully -- not with .close() directly + os.close(f) + else: + fname = source_fn + + if not isinstance(source, str): + source = str(source, 'utf-8') + try: + with open(fname, 'w') as f: + f.write(source) + + args = ['-c', '-m', modulename, f.name] + + if isinstance(extra_args, str): + is_posix = (os.name == 'posix') + extra_args = shlex.split(extra_args, posix=is_posix) + + args.extend(extra_args) + + c = [sys.executable, + '-c', + 'import numpy.f2py as f2py2e;f2py2e.main()'] + args + try: + cp = subprocess.run(c, capture_output=True) + except OSError: + # preserve historic status code used by exec_command() + cp = subprocess.CompletedProcess(c, 127, stdout=b'', stderr=b'') + else: + if verbose: + print(cp.stdout.decode()) + finally: + if source_fn is None: + os.remove(fname) + + if full_output: + return cp + else: + return cp.returncode + + +def get_include(): + """ + Return the directory that contains the ``fortranobject.c`` and ``.h`` files. + + .. note:: + + This function is not needed when building an extension with + `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files + in one go. + + Python extension modules built with f2py-generated code need to use + ``fortranobject.c`` as a source file, and include the ``fortranobject.h`` + header. This function can be used to obtain the directory containing + both of these files. + + Returns + ------- + include_path : str + Absolute path to the directory containing ``fortranobject.c`` and + ``fortranobject.h``. + + Notes + ----- + .. versionadded:: 1.21.1 + + Unless the build system you are using has specific support for f2py, + building a Python extension using a ``.pyf`` signature file is a two-step + process. For a module ``mymod``: + + * Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This + generates ``_mymodmodule.c`` and (if needed) + ``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``. + * Step 2: build your Python extension module. This requires the + following source files: + + * ``_mymodmodule.c`` + * ``_mymod-f2pywrappers.f`` (if it was generated in Step 1) + * ``fortranobject.c`` + + See Also + -------- + numpy.get_include : function that returns the numpy include directory + + """ + return os.path.join(os.path.dirname(__file__), 'src') + + +def __getattr__(attr): + + # Avoid importing things that aren't needed for building + # which might import the main numpy module + if attr == "test": + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + return test + + else: + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + +def __dir__(): + return list(globals().keys() | {"test"}) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__init__.pyi new file mode 100644 index 00000000..81b6a24f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__init__.pyi @@ -0,0 +1,42 @@ +import os +import subprocess +from collections.abc import Iterable +from typing import Literal as L, Any, overload, TypedDict + +from numpy._pytesttester import PytestTester + +class _F2PyDictBase(TypedDict): + csrc: list[str] + h: list[str] + +class _F2PyDict(_F2PyDictBase, total=False): + fsrc: list[str] + ltx: list[str] + +__all__: list[str] +test: PytestTester + +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... + +@overload +def compile( # type: ignore[misc] + source: str | bytes, + modulename: str = ..., + extra_args: str | list[str] = ..., + verbose: bool = ..., + source_fn: None | str | bytes | os.PathLike[Any] = ..., + extension: L[".f", ".f90"] = ..., + full_output: L[False] = ..., +) -> int: ... +@overload +def compile( + source: str | bytes, + modulename: str = ..., + extra_args: str | list[str] = ..., + verbose: bool = ..., + source_fn: None | str | bytes | os.PathLike[Any] = ..., + extension: L[".f", ".f90"] = ..., + full_output: L[True] = ..., +) -> subprocess.CompletedProcess[bytes]: ... + +def get_include() -> str: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__main__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__main__.py new file mode 100644 index 00000000..936a753a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__main__.py @@ -0,0 +1,5 @@ +# See: +# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e +from numpy.f2py.f2py2e import main + +main() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__version__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__version__.py new file mode 100644 index 00000000..e20d7c1d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/__version__.py @@ -0,0 +1 @@ +from numpy.version import version diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/__init__.py new file mode 100644 index 00000000..e91393c1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/__init__.py @@ -0,0 +1,9 @@ +def f2py_build_generator(name): + if name == "meson": + from ._meson import MesonBackend + return MesonBackend + elif name == "distutils": + from ._distutils import DistutilsBackend + return DistutilsBackend + else: + raise ValueError(f"Unknown backend: {name}") diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_backend.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_backend.py new file mode 100644 index 00000000..a7d43d25 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_backend.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod + + +class Backend(ABC): + def __init__( + self, + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + extra_dat, + ): + self.modulename = modulename + self.sources = sources + self.extra_objects = extra_objects + self.build_dir = build_dir + self.include_dirs = include_dirs + self.library_dirs = library_dirs + self.libraries = libraries + self.define_macros = define_macros + self.undef_macros = undef_macros + self.f2py_flags = f2py_flags + self.sysinfo_flags = sysinfo_flags + self.fc_flags = fc_flags + self.flib_flags = flib_flags + self.setup_flags = setup_flags + self.remove_build_dir = remove_build_dir + self.extra_dat = extra_dat + + @abstractmethod + def compile(self) -> None: + """Compile the wrapper.""" + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_distutils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_distutils.py new file mode 100644 index 00000000..e9b22a39 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_distutils.py @@ -0,0 +1,75 @@ +from ._backend import Backend + +from numpy.distutils.core import setup, Extension +from numpy.distutils.system_info import get_info +from numpy.distutils.misc_util import dict_append +from numpy.exceptions import VisibleDeprecationWarning +import os +import sys +import shutil +import warnings + + +class DistutilsBackend(Backend): + def __init__(sef, *args, **kwargs): + warnings.warn( + "distutils has been deprecated since NumPy 1.26.x" + "Use the Meson backend instead, or generate wrappers" + "without -c and use a custom build script", + VisibleDeprecationWarning, + stacklevel=2, + ) + super().__init__(*args, **kwargs) + + def compile(self): + num_info = {} + if num_info: + self.include_dirs.extend(num_info.get("include_dirs", [])) + ext_args = { + "name": self.modulename, + "sources": self.sources, + "include_dirs": self.include_dirs, + "library_dirs": self.library_dirs, + "libraries": self.libraries, + "define_macros": self.define_macros, + "undef_macros": self.undef_macros, + "extra_objects": self.extra_objects, + "f2py_options": self.f2py_flags, + } + + if self.sysinfo_flags: + for n in self.sysinfo_flags: + i = get_info(n) + if not i: + print( + f"No {repr(n)} resources found" + "in system (try `f2py --help-link`)" + ) + dict_append(ext_args, **i) + + ext = Extension(**ext_args) + + sys.argv = [sys.argv[0]] + self.setup_flags + sys.argv.extend( + [ + "build", + "--build-temp", + self.build_dir, + "--build-base", + self.build_dir, + "--build-platlib", + ".", + "--disable-optimization", + ] + ) + + if self.fc_flags: + sys.argv.extend(["config_fc"] + self.fc_flags) + if self.flib_flags: + sys.argv.extend(["build_ext"] + self.flib_flags) + + setup(ext_modules=[ext]) + + if self.remove_build_dir and os.path.exists(self.build_dir): + print(f"Removing build directory {self.build_dir}") + shutil.rmtree(self.build_dir) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_meson.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_meson.py new file mode 100644 index 00000000..f324e0f5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/_meson.py @@ -0,0 +1,205 @@ +from __future__ import annotations + +import os +import errno +import shutil +import subprocess +import sys +from pathlib import Path + +from ._backend import Backend +from string import Template +from itertools import chain + +import warnings + + +class MesonTemplate: + """Template meson build file generation class.""" + + def __init__( + self, + modulename: str, + sources: list[Path], + deps: list[str], + libraries: list[str], + library_dirs: list[Path], + include_dirs: list[Path], + object_files: list[Path], + linker_args: list[str], + c_args: list[str], + build_type: str, + python_exe: str, + ): + self.modulename = modulename + self.build_template_path = ( + Path(__file__).parent.absolute() / "meson.build.template" + ) + self.sources = sources + self.deps = deps + self.libraries = libraries + self.library_dirs = library_dirs + if include_dirs is not None: + self.include_dirs = include_dirs + else: + self.include_dirs = [] + self.substitutions = {} + self.objects = object_files + self.pipeline = [ + self.initialize_template, + self.sources_substitution, + self.deps_substitution, + self.include_substitution, + self.libraries_substitution, + ] + self.build_type = build_type + self.python_exe = python_exe + + def meson_build_template(self) -> str: + if not self.build_template_path.is_file(): + raise FileNotFoundError( + errno.ENOENT, + "Meson build template" + f" {self.build_template_path.absolute()}" + " does not exist.", + ) + return self.build_template_path.read_text() + + def initialize_template(self) -> None: + self.substitutions["modulename"] = self.modulename + self.substitutions["buildtype"] = self.build_type + self.substitutions["python"] = self.python_exe + + def sources_substitution(self) -> None: + indent = " " * 21 + self.substitutions["source_list"] = f",\n{indent}".join( + [f"{indent}'{source}'" for source in self.sources] + ) + + def deps_substitution(self) -> None: + indent = " " * 21 + self.substitutions["dep_list"] = f",\n{indent}".join( + [f"{indent}dependency('{dep}')" for dep in self.deps] + ) + + def libraries_substitution(self) -> None: + self.substitutions["lib_dir_declarations"] = "\n".join( + [ + f"lib_dir_{i} = declare_dependency(link_args : ['-L{lib_dir}'])" + for i, lib_dir in enumerate(self.library_dirs) + ] + ) + + self.substitutions["lib_declarations"] = "\n".join( + [ + f"{lib} = declare_dependency(link_args : ['-l{lib}'])" + for lib in self.libraries + ] + ) + + indent = " " * 21 + self.substitutions["lib_list"] = f"\n{indent}".join( + [f"{indent}{lib}," for lib in self.libraries] + ) + self.substitutions["lib_dir_list"] = f"\n{indent}".join( + [f"{indent}lib_dir_{i}," for i in range(len(self.library_dirs))] + ) + + def include_substitution(self) -> None: + indent = " " * 21 + self.substitutions["inc_list"] = f",\n{indent}".join( + [f"{indent}'{inc}'" for inc in self.include_dirs] + ) + + def generate_meson_build(self): + for node in self.pipeline: + node() + template = Template(self.meson_build_template()) + return template.substitute(self.substitutions) + + +class MesonBackend(Backend): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dependencies = self.extra_dat.get("dependencies", []) + self.meson_build_dir = "bbdir" + self.build_type = ( + "debug" if any("debug" in flag for flag in self.fc_flags) else "release" + ) + + def _move_exec_to_root(self, build_dir: Path): + walk_dir = Path(build_dir) / self.meson_build_dir + path_objects = chain( + walk_dir.glob(f"{self.modulename}*.so"), + walk_dir.glob(f"{self.modulename}*.pyd"), + ) + # Same behavior as distutils + # https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293 + for path_object in path_objects: + dest_path = Path.cwd() / path_object.name + if dest_path.exists(): + dest_path.unlink() + shutil.copy2(path_object, dest_path) + os.remove(path_object) + + def write_meson_build(self, build_dir: Path) -> None: + """Writes the meson build file at specified location""" + meson_template = MesonTemplate( + self.modulename, + self.sources, + self.dependencies, + self.libraries, + self.library_dirs, + self.include_dirs, + self.extra_objects, + self.flib_flags, + self.fc_flags, + self.build_type, + sys.executable, + ) + src = meson_template.generate_meson_build() + Path(build_dir).mkdir(parents=True, exist_ok=True) + meson_build_file = Path(build_dir) / "meson.build" + meson_build_file.write_text(src) + return meson_build_file + + def _run_subprocess_command(self, command, cwd): + subprocess.run(command, cwd=cwd, check=True) + + def run_meson(self, build_dir: Path): + setup_command = ["meson", "setup", self.meson_build_dir] + self._run_subprocess_command(setup_command, build_dir) + compile_command = ["meson", "compile", "-C", self.meson_build_dir] + self._run_subprocess_command(compile_command, build_dir) + + def compile(self) -> None: + self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir) + self.write_meson_build(self.build_dir) + self.run_meson(self.build_dir) + self._move_exec_to_root(self.build_dir) + + +def _prepare_sources(mname, sources, bdir): + extended_sources = sources.copy() + Path(bdir).mkdir(parents=True, exist_ok=True) + # Copy sources + for source in sources: + if Path(source).exists() and Path(source).is_file(): + shutil.copy(source, bdir) + generated_sources = [ + Path(f"{mname}module.c"), + Path(f"{mname}-f2pywrappers2.f90"), + Path(f"{mname}-f2pywrappers.f"), + ] + bdir = Path(bdir) + for generated_source in generated_sources: + if generated_source.exists(): + shutil.copy(generated_source, bdir / generated_source.name) + extended_sources.append(generated_source.name) + generated_source.unlink() + extended_sources = [ + Path(source).name + for source in extended_sources + if not Path(source).suffix == ".pyf" + ] + return extended_sources diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/meson.build.template b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/meson.build.template new file mode 100644 index 00000000..8e34fdc8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_backends/meson.build.template @@ -0,0 +1,54 @@ +project('${modulename}', + ['c', 'fortran'], + version : '0.1', + meson_version: '>= 1.1.0', + default_options : [ + 'warning_level=1', + 'buildtype=${buildtype}' + ]) +fc = meson.get_compiler('fortran') + +py = import('python').find_installation('${python}', pure: false) +py_dep = py.dependency() + +incdir_numpy = run_command(py, + ['-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'], + check : true +).stdout().strip() + +incdir_f2py = run_command(py, + ['-c', 'import os; os.chdir(".."); import numpy.f2py; print(numpy.f2py.get_include())'], + check : true +).stdout().strip() + +inc_np = include_directories(incdir_numpy) +np_dep = declare_dependency(include_directories: inc_np) + +incdir_f2py = incdir_numpy / '..' / '..' / 'f2py' / 'src' +inc_f2py = include_directories(incdir_f2py) +fortranobject_c = incdir_f2py / 'fortranobject.c' + +inc_np = include_directories(incdir_numpy, incdir_f2py) +# gh-25000 +quadmath_dep = fc.find_library('quadmath', required: false) + +${lib_declarations} +${lib_dir_declarations} + +py.extension_module('${modulename}', + [ +${source_list}, + fortranobject_c + ], + include_directories: [ + inc_np, +${inc_list} + ], + dependencies : [ + py_dep, + quadmath_dep, +${dep_list} +${lib_list} +${lib_dir_list} + ], + install : true) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_isocbind.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_isocbind.py new file mode 100644 index 00000000..3043c5d9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_isocbind.py @@ -0,0 +1,62 @@ +""" +ISO_C_BINDING maps for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +# These map to keys in c2py_map, via forced casting for now, see gh-25229 +iso_c_binding_map = { + 'integer': { + 'c_int': 'int', + 'c_short': 'short', # 'short' <=> 'int' for now + 'c_long': 'long', # 'long' <=> 'int' for now + 'c_long_long': 'long_long', + 'c_signed_char': 'signed_char', + 'c_size_t': 'unsigned', # size_t <=> 'unsigned' for now + 'c_int8_t': 'signed_char', # int8_t <=> 'signed_char' for now + 'c_int16_t': 'short', # int16_t <=> 'short' for now + 'c_int32_t': 'int', # int32_t <=> 'int' for now + 'c_int64_t': 'long_long', + 'c_int_least8_t': 'signed_char', # int_least8_t <=> 'signed_char' for now + 'c_int_least16_t': 'short', # int_least16_t <=> 'short' for now + 'c_int_least32_t': 'int', # int_least32_t <=> 'int' for now + 'c_int_least64_t': 'long_long', + 'c_int_fast8_t': 'signed_char', # int_fast8_t <=> 'signed_char' for now + 'c_int_fast16_t': 'short', # int_fast16_t <=> 'short' for now + 'c_int_fast32_t': 'int', # int_fast32_t <=> 'int' for now + 'c_int_fast64_t': 'long_long', + 'c_intmax_t': 'long_long', # intmax_t <=> 'long_long' for now + 'c_intptr_t': 'long', # intptr_t <=> 'long' for now + 'c_ptrdiff_t': 'long', # ptrdiff_t <=> 'long' for now + }, + 'real': { + 'c_float': 'float', + 'c_double': 'double', + 'c_long_double': 'long_double' + }, + 'complex': { + 'c_float_complex': 'complex_float', + 'c_double_complex': 'complex_double', + 'c_long_double_complex': 'complex_long_double' + }, + 'logical': { + 'c_bool': 'unsigned_char' # _Bool <=> 'unsigned_char' for now + }, + 'character': { + 'c_char': 'char' + } +} + +# TODO: See gh-25229 +isoc_c2pycode_map = {} +iso_c2py_map = {} + +isoc_kindmap = {} +for fortran_type, c_type_dict in iso_c_binding_map.items(): + for c_type in c_type_dict.keys(): + isoc_kindmap[c_type] = fortran_type diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_src_pyf.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_src_pyf.py new file mode 100644 index 00000000..6247b95b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/_src_pyf.py @@ -0,0 +1,239 @@ +import re + +# START OF CODE VENDORED FROM `numpy.distutils.from_template` +############################################################# +""" +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + +""" + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <{}={}>) " + "for <{}={}>. Ignoring.".format(base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +# END OF CODE VENDORED FROM `numpy.distutils.from_template` +########################################################### diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py new file mode 100644 index 00000000..13a1074b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py @@ -0,0 +1,988 @@ +""" +Auxiliary functions for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import pprint +import sys +import re +import types +from functools import reduce +from copy import deepcopy + +from . import __version__ +from . import cfuncs + +__all__ = [ + 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', + 'getargs2', 'getcallprotoargument', 'getcallstatement', + 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', + 'getusercode1', 'getdimension', 'hasbody', 'hascallstatement', 'hascommon', + 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', + 'isallocatable', 'isarray', 'isarrayofstrings', + 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', + 'iscomplex', + 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', + 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', + 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', + 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', + 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', + 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', + 'islogicalfunction', 'islong_complex', 'islong_double', + 'islong_doublefunction', 'islong_long', 'islong_longfunction', + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', + 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', + 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', + 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', + 'isunsigned_chararray', 'isunsigned_long_long', + 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', + 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', + 'getuseblocks', 'process_f2cmap_dict' +] + + +f2py_version = __version__.version + + +errmess = sys.stderr.write +show = pprint.pprint + +options = {} +debugoptions = [] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + + +def debugcapi(var): + return 'capi' in debugoptions + + +def _ischaracter(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def _isstring(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def ischaracter_or_characterarray(var): + return _ischaracter(var) and 'charselector' not in var + + +def ischaracter(var): + return ischaracter_or_characterarray(var) and not isarray(var) + + +def ischaracterarray(var): + return ischaracter_or_characterarray(var) and isarray(var) + + +def isstring_or_stringarray(var): + return _ischaracter(var) and 'charselector' in var + + +def isstring(var): + return isstring_or_stringarray(var) and not isarray(var) + + +def isstringarray(var): + return isstring_or_stringarray(var) and isarray(var) + + +def isarrayofstrings(var): # obsolete? + # leaving out '*' for now so that `character*(*) a(m)` and `character + # a(m,*)` are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1] == '(*)' + + +def isarray(var): + return 'dimension' in var and not isexternal(var) + + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + + +def iscomplex(var): + return isscalar(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def islogical(var): + return isscalar(var) and var.get('typespec') == 'logical' + + +def isinteger(var): + return isscalar(var) and var.get('typespec') == 'integer' + + +def isreal(var): + return isscalar(var) and var.get('typespec') == 'real' + + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + + +def isint1(var): + return var.get('typespec') == 'integer' \ + and get_kind(var) == '1' and not isarray(var) + + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var) == '8' + + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-1' + + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-2' + + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-4' + + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-8' + + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '8' + + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '16' + + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var) == '32' + + +def iscomplexarray(var): + return isarray(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def isint1array(var): + return isarray(var) and var.get('typespec') == 'integer' \ + and get_kind(var) == '1' + + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-1' + + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-2' + + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-4' + + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-8' + + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '1' + + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '2' + + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '4' + + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '8' + + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + + +def ismutable(var): + return not ('dimension' not in var or isstring(var)) + + +def ismoduleroutine(rout): + return 'modulename' in rout + + +def ismodule(rout): + return 'block' in rout and 'module' == rout['block'] + + +def isfunction(rout): + return 'block' in rout and 'function' == rout['block'] + + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + + +def issubroutine(rout): + return 'block' in rout and 'subroutine' == rout['block'] + + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + +def isattr_value(var): + return 'value' in var.get('attrspec', []) + + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d == ':': + rout['hasassumedshape'] = True + return True + return False + + +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + When using GNU gcc/g77 compilers, codes should work + correctly for callbacks with: + f2py -c -DF2PY_CB_RETURNCOMPLEX + **************************************************************\n""") + return 1 + return 0 + + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and \ + 'threadsafe' in rout['f2pyenhancements'] + + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and + 'required' not in var['attrspec']) and isintent_nothide(var) + + +def isexternal(var): + return 'attrspec' in var and 'external' in var['attrspec'] + + +def getdimension(var): + dimpattern = r"\((.*?)\)" + if 'attrspec' in var.keys(): + if any('dimension' in s for s in var['attrspec']): + return [re.findall(dimpattern, v) for v in var['attrspec']][0] + + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + + +def isintent_inout(var): + return ('intent' in var and ('inout' in var['intent'] or + 'outin' in var['intent']) and 'in' not in var['intent'] and + 'hide' not in var['intent'] and 'inplace' not in var['intent']) + + +def isintent_out(var): + return 'out' in var.get('intent', []) + + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or + ('out' in var['intent'] and 'in' not in var['intent'] and + (not l_or(isintent_inout, isintent_inplace)(var))))) + + +def isintent_nothide(var): + return not isintent_hide(var) + + +def isintent_c(var): + return 'c' in var.get('intent', []) + + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) + + +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) + + +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + + +def hasinitvalue(var): + return '=' in var + + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + + +def hasnote(var): + return 'note' in var + + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + + +def hascommon(rout): + return 'common' in rout + + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + + +def hasbody(rout): + return 'body' in rout + + +def hascallstatement(rout): + return getcallstatement(rout) is not None + + +def istrue(var): + return 1 + + +def isfalse(var): + return 0 + + +class F2PYError(Exception): + pass + + +class throw_error: + + def __init__(self, mess): + self.mess = mess + + def __call__(self, var): + mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess) + raise F2PYError(mess) + + +def l_and(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l1, ' and '.join(l2))) + + +def l_or(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval('%s:%s' % (l1, ' or '.join(l2))) + + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname'] == '' + except KeyError: + return 0 + + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name == '': + raise KeyError + if not name: + errmess('Failed to use fortranname from %s\n' % + (rout['f2pyenhancements'])) + raise KeyError + except KeyError: + name = rout['name'] + return name + + +def getmultilineblock(rout, blockname, comment=1, counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: + return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter >= len(r): + return + r = r[counter] + if r[:3] == "'''": + if comment: + r = '\t/* start ' + blockname + \ + ' multiline (' + repr(counter) + ') */\n' + r[3:] + else: + r = r[3:] + if r[-3:] == "'''": + if comment: + r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' + else: + r = r[:-3] + else: + errmess("%s multiline block should end with `'''`: %s\n" + % (blockname, repr(r))) + return r + + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + + +def getcallprotoargument(rout, cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: + return r + if hascallstatement(rout): + outmess( + 'warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n] + '_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + else: + if not isattr_value(var): + ctype = ctype + '*' + if ((isstring(var) + or isarrayofstrings(var) # obsolete? + or isstringarray(var))): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types + arg_types2) + if not proto_args: + proto_args = 'void' + return proto_args + + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + + +def getargs(rout): + sortargs, args = [], [] + if 'args' in rout: + args = rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = rout['args'] + return args, sortargs + + +def getargs2(rout): + sortargs, args = [], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = auxvars + rout['args'] + return args, sortargs + + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block'] == 'python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + + +def gentitle(name): + ln = (80 - len(name) - 6) // 2 + return '/*%s %s %s*/' % (ln * '*', name, ln * '*') + + +def flatlist(lst): + if isinstance(lst, list): + return reduce(lambda x, y, f=flatlist: x + f(y), lst, []) + return [lst] + + +def stripcomma(s): + if s and s[-1] == ',': + return s[:-1] + return s + + +def replace(str, d, defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2 * list(d.keys()): + if k == 'separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep = d['separatorsfor'][k] + else: + sep = defaultsep + if isinstance(d[k], list): + str = str.replace('#%s#' % (k), sep.join(flatlist(d[k]))) + else: + str = str.replace('#%s#' % (k), d[k]) + return str + + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd = dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0] == '_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k] = [rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k] = rd[k] + ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k == 'separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1] = ar[k][k1] + else: + rd[k] = dictappend(rd[k], ar[k]) + else: + rd[k] = ar[k] + return rd + + +def applyrules(rules, d, var={}): + ret = {} + if isinstance(rules, list): + for r in rules: + rr = applyrules(r, d, var) + ret = dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs': rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k == 'separatorsfor': + ret[k] = rules[k] + continue + if isinstance(rules[k], str): + ret[k] = replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k] = [] + for i in rules[k]: + ar = applyrules({k: i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0] == '_': + continue + elif isinstance(rules[k], dict): + ret[k] = [] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res = applyrules({'supertext': i}, d, var) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + i = rules[k][k1] + if isinstance(i, dict): + res = applyrules({'supertext': i}, d) + if 'supertext' in res: + i = res['supertext'] + else: + i = '' + ret[k].append(replace(i, d)) + else: + errmess('applyrules: ignoring rule %s.\n' % repr(rules[k])) + if isinstance(ret[k], list): + if len(ret[k]) == 1: + ret[k] = ret[k][0] + if ret[k] == []: + del ret[k] + return ret + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name + +def getuseblocks(pymod): + all_uses = [] + for inner in pymod['body']: + for modblock in inner['body']: + if modblock.get('use'): + all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) + return all_uses + +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): + """ + Update the Fortran-to-C type mapping dictionary with new mappings and + return a list of successfully mapped C types. + + This function integrates a new mapping dictionary into an existing + Fortran-to-C type mapping dictionary. It ensures that all keys are in + lowercase and validates new entries against a given C-to-Python mapping + dictionary. Redefinitions and invalid entries are reported with a warning. + + Parameters + ---------- + f2cmap_all : dict + The existing Fortran-to-C type mapping dictionary that will be updated. + It should be a dictionary of dictionaries where the main keys represent + Fortran types and the nested dictionaries map Fortran type specifiers + to corresponding C types. + + new_map : dict + A dictionary containing new type mappings to be added to `f2cmap_all`. + The structure should be similar to `f2cmap_all`, with keys representing + Fortran types and values being dictionaries of type specifiers and their + C type equivalents. + + c2py_map : dict + A dictionary used for validating the C types in `new_map`. It maps C + types to corresponding Python types and is used to ensure that the C + types specified in `new_map` are valid. + + verbose : boolean + A flag used to provide information about the types mapped + + Returns + ------- + tuple of (dict, list) + The updated Fortran-to-C type mapping dictionary and a list of + successfully mapped C types. + """ + f2cmap_mapped = [] + + new_map_lower = {} + for k, d1 in new_map.items(): + d1_lower = {k1.lower(): v1 for k1, v1 in d1.items()} + new_map_lower[k.lower()] = d1_lower + + for k, d1 in new_map_lower.items(): + if k not in f2cmap_all: + f2cmap_all[k] = {} + + for k1, v1 in d1.items(): + if v1 in c2py_map: + if k1 in f2cmap_all[k]: + outmess( + "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" + % (k, k1, f2cmap_all[k][k1], v1) + ) + f2cmap_all[k][k1] = v1 + if verbose: + outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, v1)) + f2cmap_mapped.append(v1) + else: + if verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) + + return f2cmap_all, f2cmap_mapped diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/capi_maps.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/capi_maps.py new file mode 100644 index 00000000..fa477a5b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/capi_maps.py @@ -0,0 +1,819 @@ +""" +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ +f2py_version = __version__.version + +import copy +import re +import os +from .crackfortran import markoutercomma +from . import cb_rules +from ._isocbind import iso_c_binding_map, isoc_c2pycode_map, iso_c2py_map + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +__all__ = [ + 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', + 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', + 'cb_sign2map', 'cb_routsign2map', 'common_sign2map', 'process_f2cmap_dict' +] + + +depargs = [] +lcb_map = {} +lcb2_map = {} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map = {'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # forced casting + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + 'character': 'bytes', + } + +c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_STRING', + 'character': 'NPY_STRING'} + +c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', + 'int': 'i', + 'unsigned': 'I', + 'long': 'l', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'G', + 'string': 'S', + 'character': 'c'} + +# https://docs.python.org/3/c-api/arg.html#building-values +c2buildvalue_map = {'double': 'd', + 'float': 'f', + 'char': 'b', + 'signed_char': 'b', + 'short': 'h', + 'int': 'i', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'N', + 'complex_double': 'N', + 'complex_long_double': 'N', + 'string': 'y', + 'character': 'c'} + +f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', + '12': 'long_double', '16': 'long_double'}, + 'integer': {'': 'int', '1': 'signed_char', '2': 'short', + '4': 'int', '8': 'long_long', + '-1': 'unsigned_char', '-2': 'unsigned_short', + '-4': 'unsigned', '-8': 'unsigned_long_long'}, + 'complex': {'': 'complex_float', '8': 'complex_float', + '16': 'complex_double', '24': 'complex_long_double', + '32': 'complex_long_double'}, + 'complexkind': {'': 'complex_float', '4': 'complex_float', + '8': 'complex_double', '12': 'complex_long_double', + '16': 'complex_long_double'}, + 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', + '8': 'long_long'}, + 'double complex': {'': 'complex_double'}, + 'double precision': {'': 'double'}, + 'byte': {'': 'char'}, + } + +# Add ISO_C handling +c2pycode_map.update(isoc_c2pycode_map) +c2py_map.update(iso_c2py_map) +f2cmap_all, _ = process_f2cmap_dict(f2cmap_all, iso_c_binding_map, c2py_map) +# End ISO_C handling +f2cmap_default = copy.deepcopy(f2cmap_all) + +f2cmap_mapped = [] + +def load_f2cmap_file(f2cmap_file): + global f2cmap_all, f2cmap_mapped + + f2cmap_all = copy.deepcopy(f2cmap_default) + + if f2cmap_file is None: + # Default value + f2cmap_file = '.f2py_f2cmap' + if not os.path.isfile(f2cmap_file): + return + + # User defined additions to f2cmap_all. + # f2cmap_file must contain a dictionary of dictionaries, only. For + # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERS in type specifications. + try: + outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file)) + with open(f2cmap_file) as f: + d = eval(f.read().lower(), {}, {}) + f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True) + outmess('Successfully applied user defined f2cmap changes\n') + except Exception as msg: + errmess('Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg)) + + +cformat_map = {'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '\\"%s\\"', + 'character': "'%c'", + } + +# Auxiliary functions + + +def getctype(var): + """ + Determines C type + """ + ctype = 'void' + if isfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess('getctype: function %s has no return value?!\n' % a) + elif issubroutine(var): + return ctype + elif ischaracter_or_characterarray(var): + return 'character' + elif isstring_or_stringarray(var): + return 'string' + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap = f2cmap_all[typespec] + ctype = f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype = f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n' % + (var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec + 'kind' in f2cmap_all: + f2cmap = f2cmap_all[typespec + 'kind'] + try: + ctype = f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap = f2cmap_all[typespec] + try: + ctype = f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' + % (typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + else: + if not isexternal(var): + errmess('getctype: No C-type found in "%s", assuming void.\n' % var) + return ctype + + +def f2cexpr(expr): + """Rewrite Fortran expression as f2py supported C expression. + + Due to the lack of a proper expression parser in f2py, this + function uses a heuristic approach that assumes that Fortran + arithmetic expressions are valid C arithmetic expressions when + mapping Fortran function calls to the corresponding C function/CPP + macros calls. + + """ + # TODO: support Fortran `len` function with optional kind parameter + expr = re.sub(r'\blen\b', 'f2py_slen', expr) + return expr + + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess('getstrlength: function %s has no return value?!\n' % a) + if not isstring(var): + errmess( + 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) + len = '1' + if 'charselector' in var: + a = var['charselector'] + if '*' in a: + len = a['*'] + elif 'len' in a: + len = f2cexpr(a['len']) + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( + repr(var))) + len = '-1' + return len + + +def getarrdims(a, var, verbose=0): + ret = {} + if isstring(var) and not isarray(var): + ret['size'] = getstrlength(var) + ret['rank'] = '0' + ret['dims'] = '' + elif isscalar(var): + ret['size'] = '1' + ret['rank'] = '0' + ret['dims'] = '' + elif isarray(var): + dim = copy.copy(var['dimension']) + ret['size'] = '*'.join(dim) + try: + ret['size'] = repr(eval(ret['size'])) + except Exception: + pass + ret['dims'] = ','.join(dim) + ret['rank'] = repr(len(dim)) + ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] + for i in range(len(dim)): # solve dim for dependencies + v = [] + if dim[i] in depargs: + v = [dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*' % va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va) > depargs.index(a): + dim[i] = '*' + break + ret['setdims'], i = '', -1 + for d in dim: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['setdims'], i, d) + if ret['setdims']: + ret['setdims'] = ret['setdims'][:-1] + ret['cbsetdims'], i = '', -1 + for d in var['dimension']: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' + % (d)) + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, 0) + elif verbose: + errmess( + 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) + if ret['cbsetdims']: + ret['cbsetdims'] = ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af = var['result'] + else: + af = var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess('getctype: function %s has no return value?!\n' % af) + return '', '' + sig, sigout = a, a + opt = '' + if isintent_in(var): + opt = 'input' + elif isintent_inout(var): + opt = 'in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + init = '' + ctype = getctype(var) + + if hasinitvalue(var): + init, showinit = getinit(a, var) + init = ', optional\\n Default: %s' % showinit + if isscalar(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) + sigout = '%s : %s' % (out_a, c2py_map[ctype]) + elif isstring(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( + a, opt, getstrlength(var), init) + else: + sig = '%s : %s string(len=%s)%s' % ( + a, opt, getstrlength(var), init) + sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, + c2pycode_map[ + ctype], + ','.join(dim), init) + if a == out_a: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ + % (a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua = '' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua = lcb2_map[lcb_map[a]]['argname'] + if not ua == a: + ua = ' => %s' % ua + else: + ua = '' + sig = '%s : call-back function%s' % (a, ua) + sigout = sig + else: + errmess( + 'getpydocsign: Could not resolve docsignature for "%s".\n' % a) + return sig, sigout + + +def getarrdocsign(a, var): + ctype = getctype(var) + if isstring(var) and (not isarray(var)): + sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, + getstrlength(var)) + elif isscalar(var): + sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], + c2pycode_map[ctype],) + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, + c2pycode_map[ + ctype], + ','.join(dim)) + return sig + + +def getinit(a, var): + if isstring(var): + init, showinit = '""', "''" + else: + init, showinit = '', '' + if hasinitvalue(var): + init = var['='] + showinit = init + if iscomplex(var) or iscomplexarray(var): + ret = {} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i'] = markoutercomma( + v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) + except Exception: + raise ValueError( + 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) + if isarray(var): + init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( + ret['init.r'], ret['init.i']) + elif isstring(var): + if not init: + init, showinit = '""', "''" + if init[0] == "'": + init = '"%s"' % (init[1:-1].replace('"', '\\"')) + if init[0] == '"': + showinit = "'%s'" % (init[1:-1]) + return init, showinit + + +def get_elsize(var): + if isstring(var) or isstringarray(var): + elsize = getstrlength(var) + # override with user-specified length when available: + elsize = var['charselector'].get('f2py_len', elsize) + return elsize + if ischaracter(var) or ischaracterarray(var): + return '1' + # for numerical types, PyArray_New* functions ignore specified + # elsize, so we just return 1 and let elsize be determined at + # runtime, see fortranobject.c + return '1' + + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrformat + + intent + """ + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): + intent_flags.append('F2PY_%s' % s) + if intent_flags: + # TODO: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): + ret['varrformat'] = 'N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['varrformat'] = 'O' + ret['init'], ret['showinit'] = getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma( + ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey'] = a + if a in lcb_map: + ret['cbname'] = lcb_map[a] + ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname'] = a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( + a, list(lcb_map.keys()))) + if isstring(var): + ret['length'] = getstrlength(var) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + dim = copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + # Debug info + if debugcapi(var): + il = [isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + ] + rl = [] + for i in range(0, len(il), 2): + if il[i](var): + rl.append(il[i + 1]) + if isstring(var): + rl.append('slen(%s)=%s' % (a, ret['length'])) + if isarray(var): + ddim = ','.join( + map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) + rl.append('dims(%s)' % ddim) + if isexternal(var): + ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( + a, ret['cbname'], ','.join(rl)) + else: + ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( + ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstring(var): + ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isexternal(var): + ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) + if ret['ctype'] in cformat_map: + ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstring(var): + ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + return ret + + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret = {'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle('end of %s' % name), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map = {} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln = un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k] == un[0]: + ln = k + break + lcb_map[ln] = un[1] + elif 'externals' in rout and rout['externals']: + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( + ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + ret['ctype'] = getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['rformat'] = 'O' + errmess('routsign2map: no c2buildvalue key for type %s\n' % + (repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isstringfunction(rout): + ret['rlength'] = getstrlength(rout['vars'][a]) + if ret['rlength'] == '-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( + repr(rout['name']))) + ret['rlength'] = '10' + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret = {'f90modulename': m['name'], + 'F90MODULENAME': m['name'].upper(), + 'texf90modulename': m['name'].replace('_', '\\_')} + else: + ret = {'modulename': m['name'], + 'MODULENAME': m['name'].upper(), + 'texmodulename': m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note'] = m['note'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + + +def cb_sign2map(a, var, index=None): + ret = {'varname': a} + ret['varname_i'] = ret['varname'] + ret['ctype'] = getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + return ret + + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), + 'returncptr': ''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + ret['callbackname'] = '%s(%s,%s)' \ + % (F_FUNC, + rout['name'].lower(), + rout['name'].upper(), + ) + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname'] = rout['name'] + ret['begintitle'] = gentitle(ret['name']) + ret['endtitle'] = gentitle('end of %s' % ret['name']) + ret['ctype'] = getctype(rout) + ret['rctype'] = 'void' + if ret['ctype'] == 'string': + ret['rctype'] = 'void' + else: + ret['rctype'] = ret['ctype'] + if ret['rctype'] != 'void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['strlength'] = getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if hasnote(rout['vars'][a]): + ret['note'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + else: + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + nofargs = 0 + nofoptargs = 0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var = rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs = nofargs + 1 + if isoptional(var): + nofoptargs = nofoptargs + 1 + ret['maxnofargs'] = repr(nofargs) + ret['nofoptargs'] = repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def common_sign2map(a, var): # obsolute + ret = {'varname': a, 'ctype': getctype(var)} + if isstringarray(var): + ret['ctype'] = 'char' + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size'] = getstrlength(var) + ret['rank'] = '1' + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr'] = getarrdocsign(a, var) + return ret diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cb_rules.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cb_rules.py new file mode 100644 index 00000000..721e075b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cb_rules.py @@ -0,0 +1,644 @@ +""" +Build call-back mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, + iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, + isintent_hide, isintent_in, isintent_inout, isintent_nothide, + isintent_out, isoptional, isrequired, isscalar, isstring, + isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, + stripcomma, throw_error +) +from . import cfuncs + +f2py_version = __version__.version + + +################## Rules for callback function ############## + +cb_routine_rules = { + 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body': """ +#begintitle# +typedef struct { + PyObject *capi; + PyTupleObject *args_capi; + int nofargs; + jmp_buf jmpbuf; +} #name#_t; + +#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS) + +static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL; + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + #name#_t *prev = _active_#name#; + _active_#name# = ptr; + return prev; +} + +static #name#_t *get_active_#name#(void) { + return _active_#name#; +} + +#else + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr); +} + +static #name#_t *get_active_#name#(void) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key); +} + +#endif + +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { + #name#_t cb_local = { NULL, NULL, 0 }; + #name#_t *cb = NULL; + PyTupleObject *capi_arglist = NULL; + PyObject *capi_return = NULL; + PyObject *capi_tmp = NULL; + PyObject *capi_arglist_list = NULL; + int capi_j,capi_i = 0; + int capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif + cb = get_active_#name#(); + if (cb == NULL) { + capi_longjmp_ok = 0; + cb = &cb_local; + } + capi_arglist = cb->args_capi; + CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + if (cb->capi==NULL) { + capi_longjmp_ok = 0; + cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + } + if (cb->capi==NULL) { + PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); + goto capi_fail; + } + if (F2PyCapsule_Check(cb->capi)) { + #name#_typedef #name#_cptr; + #name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi); + #returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); + #return# + } + if (capi_arglist==NULL) { + capi_longjmp_ok = 0; + capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); + if (capi_tmp) { + capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + if (capi_arglist==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); + goto capi_fail; + } + } else { + PyErr_Clear(); + capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); + } + } + if (capi_arglist == NULL) { + PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); + goto capi_fail; + } +#setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) + capi_arglist_list = PySequence_List(capi_arglist); + if (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif +#pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif + CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +#ifdef PYPY_VERSION + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); + Py_DECREF(capi_arglist_list); + capi_arglist_list = NULL; +#else + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); +#endif +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif + CFUNCSMESSPY(\"cb:capi_return=\",capi_return); + if (capi_return == NULL) { + fprintf(stderr,\"capi_return is NULL\\n\"); + goto capi_fail; + } + if (capi_return == Py_None) { + Py_DECREF(capi_return); + capi_return = Py_BuildValue(\"()\"); + } + else if (!PyTuple_Check(capi_return)) { + capi_return = Py_BuildValue(\"(N)\",capi_return); + } + capi_j = PyTuple_Size(capi_return); + capi_i = 0; +#frompyobj# + CFUNCSMESS(\"cb:#name#:successful\\n\"); + Py_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif + goto capi_return_pt; +capi_fail: + fprintf(stderr,\"Call-back #name# failed.\\n\"); + Py_XDECREF(capi_return); + Py_XDECREF(capi_arglist_list); + if (capi_longjmp_ok) { + longjmp(cb->jmpbuf,-1); + } +capi_return_pt: + ; +#return# +} +#endtitle# +""", + 'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'], + 'maxnofargs': '#maxnofargs#', + 'nofoptargs': '#nofoptargs#', + 'docstr': """\ + def #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr': """ +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' +} +cb_rout_rules = [ + { # Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': ' Required arguments:', + 'docstropt': ' Optional arguments:', + 'docstrout': ' Return objects:', + 'docstrcbs': ' Call-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { # Function + 'decl': ' #ctype# return_value = 0;', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + '''\ + if (capi_j>capi_i) { + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + "#ctype#_from_pyobj failed in converting return_value of" + " call-back function #name# to C #ctype#\\n"); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }''', + {debugcapi: + ' fprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return': ' return return_value;', + '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + { # String function + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args': '#ctype# return_value,int return_value_len', + 'args_nm': 'return_value,&return_value_len', + 'args_td': '#ctype# ,int', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'}, + """\ + if (capi_j>capi_i) { + GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return': 'return;', + '_check': isstringfunction + }, + { # Complex function + 'optargs': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm': """ +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl': """ +#ifdef F2PY_CB_RETURNCOMPLEX + #ctype# return_value = {0, 0}; +#endif +""", + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + """\ + if (capi_j>capi_i) { +#ifdef F2PY_CB_RETURNCOMPLEX + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#else + GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#endif + } else { + fprintf(stderr, + \"Warning: call-back function #name# did not provide\" + \" return value (index=%d, type=#ctype#)\\n\",capi_i); + }""", + {debugcapi: """\ +#ifdef F2PY_CB_RETURNCOMPLEX + fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else + fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif +"""} + ], + 'return': """ +#ifdef F2PY_CB_RETURNCOMPLEX + return return_value; +#else + return; +#endif +""", + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check': iscomplexfunction + }, + {'docstrout': ' #pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote: '--- #note#'}], + 'docreturn': '#rname#,', + '_check': isfunction}, + {'_check': issubroutine, 'return': 'return;'} +] + +cb_arg_rules = [ + { # Doc + 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'}, + 'docstrout': {isintent_out: ' #pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, + 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, + 'depend': '' + }, + { + 'args': { + l_and(isscalar, isintent_c): '#ctype# #varname_i#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', + isarray: '#ctype# *#varname_i#', + isstring: '#ctype# #varname_i#' + }, + 'args_nm': { + l_and(isscalar, isintent_c): '#varname_i#', + l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', + isarray: '#varname_i#', + isstring: '#varname_i#' + }, + 'args_td': { + l_and(isscalar, isintent_c): '#ctype#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *', + isarray: '#ctype# *', + isstring: '#ctype#' + }, + 'need': {l_or(isscalar, isarray, isstring): '#ctype#'}, + # untested with multiple args + 'strarglens': {isstring: ',int #varname_i#_cb_len'}, + 'strarglens_td': {isstring: ',int'}, # untested with multiple args + # untested with multiple args + 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, + }, + { # Scalars + 'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')): + ''}, + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out: + ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi: 'CFUNCSMESS'}], + '_check': isscalar + }, { + 'pyobjfrom': [{isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) + goto capi_fail;"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}, + {iscomplex: '#ctype#'}], + '_check': l_and(isscalar, isintent_nothide), + '_optional': '' + }, { # String + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'}, + """ if (capi_j>capi_i) + GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi: 'CFUNCSMESS'}, 'string.h'], + '_check': l_and(isstring, isintent_out) + }, { + 'pyobjfrom': [ + {debugcapi: + (' fprintf(stderr,"debug-capi:cb:#varname#=#showvalueformat#:' + '%d:\\n",#varname_i#,#varname_i#_cb_len);')}, + {isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) { + int #varname_i#_cb_dims[] = {#varname_i#_cb_len}; + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) + goto capi_fail; + }"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}], + '_check': l_and(isstring, isintent_nothide), + '_optional': '' + }, + # Array ... + { + 'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims': ' #cbsetdims#;', + '_check': isarray, + '_depend': '' + }, + { + 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_CARRAY,NULL); +""", + l_not(isintent_c): """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_FARRAY,NULL); +""", + }, + """ + if (tmp_arr==NULL) + goto capi_fail; + if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) + goto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + """ if (capi_j>capi_i) { + PyArrayObject *rv_cb_arr = NULL; + if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; + rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c: '|F2PY_INTENT_C'}, + """,capi_tmp); + if (rv_cb_arr == NULL) { + fprintf(stderr,\"rv_cb_arr is NULL\\n\"); + goto capi_fail; + } + MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); + if (capi_tmp != (PyObject *)rv_cb_arr) { + Py_DECREF(rv_cb_arr); + } + }""", + {debugcapi: ' fprintf(stderr,"<-.\\n");'}, + ], + 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], + '_check': l_and(isarray, isintent_out) + }, { + 'docreturn': '#varname#,', + '_check': isintent_out + } +] + +################## Build call-back module ############# +cb_map = {} + + +def buildcallbacks(m): + cb_map[m['name']] = [] + for bi in m['body']: + if bi['block'] == 'interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess('warning: empty body for %s\n' % (m['name'])) + + +def buildcallback(rout, um): + from . import capi_maps + + outmess(' Constructing call-back function "cb_%s_in_%s"\n' % + (rout['name'], um)) + args, depargs = getargs(rout) + capi_maps.depargs = depargs + var = rout['vars'] + vrd = capi_maps.cb_routsign2map(rout, um) + rd = dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + savevrd = {} + for i, a in enumerate(args): + vrd = capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a] = vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs'] = rd['optargs'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm'] = rd['optargs_nm'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td'] = rd['optargs_td'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + optargs = stripcomma(replace('#docsignopt#', + {'docsignopt': rd['docsignopt']} + )) + if optargs == '': + rd['docsignature'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignature'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + if 'args' not in rd: + rd['args'] = '' + rd['args_td'] = '' + rd['args_nm'] = '' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar = applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']] = ar['body'] + if isinstance(ar['need'], str): + ar['need'] = [ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name'] + '_typedef') + cfuncs.needs[rd['name']] = ar['need'] + + capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], + 'nofoptargs': ar['nofoptargs'], + 'docstr': ar['docstr'], + 'latexdocstr': ar['latexdocstr'], + 'argname': rd['argname'] + } + outmess(' %s\n' % (ar['docstrshort'])) + return +################## Build call-back function ############# diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cfuncs.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cfuncs.py new file mode 100644 index 00000000..4328a6e5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/cfuncs.py @@ -0,0 +1,1536 @@ +#!/usr/bin/env python3 +""" +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import sys +import copy + +from . import __version__ + +f2py_version = __version__.version +errmess = sys.stderr.write + +##################### Definitions ################## + +outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], + 'userincludes': [], + 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], + 'commonhooks': []} +needs = {} +includes0 = {'includes0': '/*need_includes0*/'} +includes = {'includes': '/*need_includes*/'} +userincludes = {'userincludes': '/*need_userincludes*/'} +typedefs = {'typedefs': '/*need_typedefs*/'} +typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} +cppmacros = {'cppmacros': '/*need_cppmacros*/'} +cfuncs = {'cfuncs': '/*need_cfuncs*/'} +callbacks = {'callbacks': '/*need_callbacks*/'} +f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks = {'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h'] = '#include ' +includes0['string.h'] = '#include ' +includes0['setjmp.h'] = '#include ' + +includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' +includes['npy_math.h'] = '#include "numpy/npy_math.h"' + +includes['arrayobject.h'] = '#include "fortranobject.h"' +includes['stdarg.h'] = '#include ' + +############# Type definitions ############### + +typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' +typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' +typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' +typedefs['signed_char'] = 'typedef signed char signed_char;' +typedefs['long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double'] = """ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs[ + 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' +typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['string'] = """typedef char * string;""" +typedefs['character'] = """typedef char character;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS'] = """ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC'] = """ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE'] = """ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP'] = """ +#define SWAP(a,b,t) {\\ + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} +""" +# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & +# NPY_ARRAY_C_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR'] = """ +#define PRINTPYOBJERR(obj)\\ + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX'] = """ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +cppmacros['len..'] = """ +/* See fortranobject.h for definitions. The macros here are provided for BC. */ +#define rank f2py_rank +#define shape f2py_shape +#define fshape f2py_shape +#define len f2py_len +#define flen f2py_flen +#define slen f2py_slen +#define size f2py_size +""" +cppmacros['pyobj_from_char1'] = r""" +#define pyobj_from_char1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_short1'] = r""" +#define pyobj_from_short1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_int1'] = ['signed_char'] +cppmacros['pyobj_from_int1'] = r""" +#define pyobj_from_int1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_long1'] = r""" +#define pyobj_from_long1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_long_long1'] = ['long_long'] +cppmacros['pyobj_from_long_long1'] = """ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1'] = ['long_double'] +cppmacros['pyobj_from_long_double1'] = """ +#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_double1'] = """ +#define pyobj_from_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_float1'] = """ +#define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" +needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +cppmacros['pyobj_from_complex_long_double1'] = """ +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_double1'] = ['complex_double'] +cppmacros['pyobj_from_complex_double1'] = """ +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_float1'] = ['complex_float'] +cppmacros['pyobj_from_complex_float1'] = """ +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_string1'] = ['string'] +cppmacros['pyobj_from_string1'] = """ +#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" +needs['pyobj_from_string1size'] = ['string'] +cppmacros['pyobj_from_string1size'] = """ +#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))""" +needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE'] = """ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {\\ + *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + break;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +# cppmacros['NUMFROMARROBJ']=""" +# define NUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ +# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +# cppmacros['CNUMFROMARROBJ']=""" +# define CNUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ + + +needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE'] = """ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyBytes_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } +""" +cppmacros['GETSCALARFROMPYTUPLE'] = """ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } +""" + +cppmacros['FAILNULL'] = """\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['MEMCOPY'] = """ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC'] = """ +#define STRINGMALLOC(str,len)\\ + if ((str = (string)malloc(len+1)) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } +""" +cppmacros['STRINGFREE'] = """ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGPADN'] = ['string.h'] +cppmacros['STRINGPADN'] = """ +/* +STRINGPADN replaces null values with padding values from the right. + +`to` must have size of at least N bytes. + +If the `to[N-1]` has null value, then replace it and all the +preceding, nulls with the given padding. + +STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation. +*/ +#define STRINGPADN(to, N, NULLVALUE, PADDING) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\ + _to[_m] = PADDING; \\ + } \\ + } while (0) +""" +needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN'] = """ +/* +STRINGCOPYN copies N bytes. + +`to` and `from` buffers must have sizes of at least N bytes. +*/ +#define STRINGCOPYN(to,from,N) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, _m); \\ + } while (0) +""" +needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY'] = """ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC'] = """ +#define CHECKGENERIC(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKARRAY'] = """ +#define CHECKARRAY(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSTRING'] = """ +#define CHECKSTRING(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSCALAR'] = """ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ +# cppmacros['CHECKDIMS']=""" +# define CHECKDIMS(dims,rank) \\ +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } +# """ +cppmacros[ + 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM'] = """ +#ifdef OLDPYNUM +#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html +#endif +""" +cppmacros["F2PY_THREAD_LOCAL_DECL"] = """ +#ifndef F2PY_THREAD_LOCAL_DECL +#if defined(_MSC_VER) +#define F2PY_THREAD_LOCAL_DECL __declspec(thread) +#elif defined(NPY_OS_MINGW) +#define F2PY_THREAD_LOCAL_DECL __thread +#elif defined(__STDC_VERSION__) \\ + && (__STDC_VERSION__ >= 201112L) \\ + && !defined(__STDC_NO_THREADS__) \\ + && (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) \\ + && !defined(NPY_OS_OPENBSD) && !defined(NPY_OS_HAIKU) +/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12, + see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html, + so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence + of `threads.h` when using an older release of glibc 2.12 + See gh-19437 for details on OpenBSD */ +#include +#define F2PY_THREAD_LOCAL_DECL thread_local +#elif defined(__GNUC__) \\ + && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) +#define F2PY_THREAD_LOCAL_DECL __thread +#endif +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex'] = """ +static int calcarrindex(int *i,PyArrayObject *arr) { + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['calcarrindextr'] = """ +static int calcarrindextr(int *i,PyArrayObject *arr) { + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['forcomb'] = """ +static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; +static int initforcomb(npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + forcombcache.nd = nd; + forcombcache.d = dims; + forcombcache.tr = tr; + if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + for (k=1;k PyArray_NBYTES(arr)) { + n = PyArray_NBYTES(arr); + } + STRINGCOPYN(buf, str, n); + return 1; + } +capi_fail: + PRINTPYOBJERR(obj); + PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\"); + return 0; +} +""" +needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] +cfuncs['string_from_pyobj'] = """ +/* + Create a new string buffer `str` of at most length `len` from a + Python string-like object `obj`. + + The string buffer has given size (len) or the size of inistr when len==-1. + + The string buffer is padded with blanks: in Fortran, trailing blanks + are insignificant contrary to C nulls. + */ +static int +string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj, + const char *errmess) +{ + PyObject *tmp = NULL; + string buf = NULL; + npy_intp n = -1; +#ifdef DEBUGCFUNCS +fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\", + (char*)str, *len, (char *)inistr, obj); +#endif + if (obj == Py_None) { + n = strlen(inistr); + buf = inistr; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (!ISCONTIGUOUS(arr)) { + PyErr_SetString(PyExc_ValueError, + \"array object is non-contiguous.\"); + goto capi_fail; + } + n = PyArray_NBYTES(arr); + buf = PyArray_DATA(arr); + n = strnlen(buf, n); + } + else { + if (PyBytes_Check(obj)) { + tmp = obj; + Py_INCREF(tmp); + } + else if (PyUnicode_Check(obj)) { + tmp = PyUnicode_AsASCIIString(obj); + } + else { + PyObject *tmp2; + tmp2 = PyObject_Str(obj); + if (tmp2) { + tmp = PyUnicode_AsASCIIString(tmp2); + Py_DECREF(tmp2); + } + else { + tmp = NULL; + } + } + if (tmp == NULL) goto capi_fail; + n = PyBytes_GET_SIZE(tmp); + buf = PyBytes_AS_STRING(tmp); + } + if (*len == -1) { + /* TODO: change the type of `len` so that we can remove this */ + if (n > NPY_MAX_INT) { + PyErr_SetString(PyExc_OverflowError, + "object too large for a 32-bit int"); + goto capi_fail; + } + *len = n; + } + else if (*len < n) { + /* discard the last (len-n) bytes of input buf */ + n = *len; + } + if (n < 0 || *len < 0 || buf == NULL) { + goto capi_fail; + } + STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1) + if (n < *len) { + /* + Pad fixed-width string with nulls. The caller will replace + nulls with blanks when the corresponding argument is not + intent(c). + */ + memset(*str + n, '\\0', *len - n); + } + STRINGCOPYN(*str, buf, n); + Py_XDECREF(tmp); + return 1; +capi_fail: + Py_XDECREF(tmp); + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + +cfuncs['character_from_pyobj'] = """ +static int +character_from_pyobj(character* v, PyObject *obj, const char *errmess) { + if (PyBytes_Check(obj)) { + /* empty bytes has trailing null, so dereferencing is always safe */ + *v = PyBytes_AS_STRING(obj)[0]; + return 1; + } else if (PyUnicode_Check(obj)) { + PyObject* tmp = PyUnicode_AsASCIIString(obj); + if (tmp != NULL) { + *v = PyBytes_AS_STRING(tmp)[0]; + Py_DECREF(tmp); + return 1; + } + } else if (PyArray_Check(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *v = PyArray_BYTES(arr)[0]; + return 1; + } else if (F2PY_IS_UNICODE_ARRAY(arr)) { + // TODO: update when numpy will support 1-byte and + // 2-byte unicode dtypes + PyObject* tmp = PyUnicode_FromKindAndData( + PyUnicode_4BYTE_KIND, + PyArray_BYTES(arr), + (PyArray_NBYTES(arr)>0?1:0)); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + } else if (PySequence_Check(obj)) { + PyObject* tmp = PySequence_GetItem(obj,0); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + /* TODO: This error (and most other) error handling needs cleaning. */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + strcpy(mess, errmess); + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_TypeError; + Py_INCREF(err); + } + else { + Py_INCREF(err); + PyErr_Clear(); + } + sprintf(mess + strlen(mess), + " -- expected str|bytes|sequence-of-str-or-bytes, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + Py_DECREF(err); + } + return 0; +} +""" + +# TODO: These should be dynamically generated, too many mapped to int things, +# see note in _isocbind.py +needs['char_from_pyobj'] = ['int_from_pyobj'] +cfuncs['char_from_pyobj'] = """ +static int +char_from_pyobj(char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (char)i; + return 1; + } + return 0; +} +""" + + +needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] +cfuncs['signed_char_from_pyobj'] = """ +static int +signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (signed_char)i; + return 1; + } + return 0; +} +""" + + +needs['short_from_pyobj'] = ['int_from_pyobj'] +cfuncs['short_from_pyobj'] = """ +static int +short_from_pyobj(short* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (short)i; + return 1; + } + return 0; +} +""" + + +cfuncs['int_from_pyobj'] = """ +static int +int_from_pyobj(int* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = Npy__PyLong_AsInt(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = Npy__PyLong_AsInt(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (int_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +cfuncs['long_from_pyobj'] = """ +static int +long_from_pyobj(long* v, PyObject *obj, const char *errmess) { + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +needs['long_long_from_pyobj'] = ['long_long'] +cfuncs['long_long_from_pyobj'] = """ +static int +long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLongLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLongLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] +cfuncs['long_double_from_pyobj'] = """ +static int +long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess) +{ + double d=0; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, LongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(obj)); + return 1; + } + } + if (double_from_pyobj(&d, obj, errmess)) { + *v = (long_double)d; + return 1; + } + return 0; +} +""" + + +cfuncs['double_from_pyobj'] = """ +static int +double_from_pyobj(double* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + if (PyFloat_Check(obj)) { + *v = PyFloat_AsDouble(obj); + return !(*v == -1.0 && PyErr_Occurred()); + } + + tmp = PyNumber_Float(obj); + if (tmp) { + *v = PyFloat_AsDouble(tmp); + Py_DECREF(tmp); + return !(*v == -1.0 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['float_from_pyobj'] = ['double_from_pyobj'] +cfuncs['float_from_pyobj'] = """ +static int +float_from_pyobj(float* v, PyObject *obj, const char *errmess) +{ + double d=0.0; + if (double_from_pyobj(&d,obj,errmess)) { + *v = (float)d; + return 1; + } + return 0; +} +""" + + +needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', + 'complex_double_from_pyobj', 'npy_math.h'] +cfuncs['complex_long_double_from_pyobj'] = """ +static int +complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) +{ + complex_double cd = {0.0,0.0}; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, CLongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); + return 1; + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; +} +""" + + +needs['complex_double_from_pyobj'] = ['complex_double', 'npy_math.h'] +cfuncs['complex_double_from_pyobj'] = """ +static int +complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) { + Py_complex c; + if (PyComplex_Check(obj)) { + c = PyComplex_AsCComplex(obj); + (*v).r = c.real; + (*v).i = c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_crealf(new); + (*v).i = (double)npy_cimagf(new); + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_creall(new); + (*v).i = (double)npy_cimagl(new); + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyArrayObject *arr; + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr == NULL) { + return 0; + } + (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); + Py_DECREF(arr); + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i = 0.0; + if (PyFloat_Check(obj)) { + (*v).r = PyFloat_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['complex_float_from_pyobj'] = [ + 'complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj'] = """ +static int +complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) +{ + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; +} +""" + + +cfuncs['try_pyarr_from_character'] = """ +static int try_pyarr_from_character(PyObject* obj, character* v) { + PyArrayObject *arr = (PyArrayObject*)obj; + if (!obj) return -2; + if (PyArray_Check(obj)) { + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *(character *)(PyArray_DATA(arr)) = *v; + return 1; + } + } + { + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_ValueError; + strcpy(mess, "try_pyarr_from_character failed" + " -- expected bytes array-scalar|array, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + } + } + return 0; +} +""" + +needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs[ + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs[ + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long'] = [ + 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs[ + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float'] = [ + 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs[ + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double'] = [ + 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs[ + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + + +needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +# create the list of arguments to be used when calling back to python +cfuncs['create_cb_arglist'] = """ +static int +create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs, + const int nofoptargs, int *nofargs, PyTupleObject **args, + const char *errmess) +{ + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + Py_ssize_t tot, opt, ext, siz, i, di = 0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) { + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + Py_INCREF(tmp_fun); + tot = maxnofargs; + if (PyCFunction_Check(fun)) { + /* In case the function has a co_argcount (like on PyPy) */ + di = 0; + } + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + } + + if (tmp_fun == NULL) { + fprintf(stderr, + \"Call-back argument must be function|instance|instance.__call__|f2py-function \" + \"but got %s.\\n\", + ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name)); + goto capi_fail; + } + + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { + PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); + Py_DECREF(tmp); + if (tmp_argcount == NULL) { + goto capi_fail; + } + tot = PyLong_AsSsize_t(tmp_argcount) - di; + Py_DECREF(tmp_argcount); + } + } + /* Get the number of optional arguments */ + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); + +#ifdef DEBUGCFUNCS + fprintf(stderr, + \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\" + \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\", + maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs); +#endif + + if (siz < tot-opt) { + fprintf(stderr, + \"create_cb_arglist: Failed to build argument list \" + \"(siz) with enough arguments (tot-opt) required by \" + \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\", + siz, tot, opt); + goto capi_fail; + } + + /* Initialize argument list */ + *args = (PyTupleObject *)PyTuple_New(siz); + for (i=0;i<*nofargs;i++) { + Py_INCREF(Py_None); + PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); + } + if (xa != NULL) + for (i=(*nofargs);i 0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag = 0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag = 1 + break + if flag: + outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess( + 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out = out + saveout + break + saveout = copy.copy(outneeds[n]) + if out == []: + out = [n] + res[n] = out + return res diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/common_rules.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/common_rules.py new file mode 100644 index 00000000..64347b73 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/common_rules.py @@ -0,0 +1,146 @@ +""" +Build common block mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ +f2py_version = __version__.version + +from .auxfuncs import ( + hasbody, hascommon, hasnote, isintent_hide, outmess, getuseblocks +) +from . import capi_maps +from . import func2subr +from .crackfortran import rmbadname + + +def findcommonblocks(block, top=1): + ret = [] + if hascommon(block): + for key, value in block['common'].items(): + vars_ = {v: block['vars'][v] for v in value} + ret.append((key, value, vars_)) + elif hasbody(block): + for b in block['body']: + ret = ret + findcommonblocks(b, 0) + if top: + tret = [] + names = [] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + + +def buildhooks(m): + ret = {'commonhooks': [], 'initcommonhooks': [], + 'docs': ['"COMMON blocks:\\n"']} + fwrap = [''] + + def fadd(line, s=fwrap): + s[0] = '%s\n %s' % (s[0], line) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): + hnames.append(n) + else: + inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( + name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( + name, ','.join(inames))) + fadd('subroutine f2pyinit%s(setupfunc)' % name) + for usename in getuseblocks(m): + fadd(f'use {usename}') + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name == '_BLNK_': + fadd('common %s' % (','.join(vnames))) + else: + fadd('common /%s/ %s' % (name, ','.join(vnames))) + fadd('call setupfunc(%s)' % (','.join(inames))) + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + idims = [] + for n in inames: + ct = capi_maps.getctype(vars[n]) + elsize = capi_maps.get_elsize(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: + idims.append('(%s)' % (dm['dims'])) + else: + idims.append('') + dms = dm['dims'].strip() + if not dms: + dms = '-1' + cadd('\t{\"%s\",%s,{{%s}},%s, %s},' + % (n, dm['rank'], dms, at, elsize)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *' + s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n)) + cadd('}') + if '_' in lower_name: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' + % (F_FUNC, lower_name, name.upper(), + ','.join(['char*'] * len(inames1)))) + cadd('static void f2py_init_%s(void) {' % name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name)) + iadd('\tif (tmp == NULL) return NULL;') + iadd('\tif (F2PyDict_SetItemString(d, \"%s\", tmp) == -1) return NULL;' + % name) + iadd('\tPy_DECREF(tmp);') + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + dadd('\\end{description}') + ret['docs'].append( + '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims)))) + ret['commonhooks'] = chooks + ret['initcommonhooks'] = ihooks + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fwrap[0] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/crackfortran.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/crackfortran.py new file mode 100755 index 00000000..8d3fc276 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/crackfortran.py @@ -0,0 +1,3767 @@ +#!/usr/bin/env python3 +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' | + 'abstract interface' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind','f2py_len']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +import sys +import string +import fileinput +import re +import os +import copy +import platform +import codecs +from pathlib import Path +try: + import charset_normalizer +except ImportError: + charset_normalizer = None + +from . import __version__ + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from . import symbolic + +f2py_version = __version__.version + +# Global flags: +strictf77 = 1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform = 'fix' # 'fix','free' +quiet = 0 # Be verbose if 0 (Obsolete: not used any more) +verbose = 1 # Be quiet if 0, extra verbose if > 1. +tabchar = 4 * ' ' +pyffilename = '' +f77modulename = '' +skipemptyends = 0 # for old F77 programs without 'program' statement +ignorecontains = 1 +dolowercase = 1 +debug = [] + +# Global variables +beginpattern = '' +currentfilename = '' +expectbegin = 1 +f90modulevars = {} +filepositiontext = '' +gotnextfile = 1 +groupcache = None +groupcounter = 0 +grouplist = {groupcounter: []} +groupname = '' +include_paths = [] +neededmodule = -1 +onlyfuncs = [] +previous_context = None +skipblocksuntil = -1 +skipfuncs = [] +skipfunctions = [] +usermodules = [] + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin + global skipblocksuntil, usermodules, f90modulevars, gotnextfile + global filepositiontext, currentfilename, skipfunctions, skipfuncs + global onlyfuncs, include_paths, previous_context + global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename + global f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4 * ' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter: []} + neededmodule = -1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +def outmess(line, flag=1): + global filepositiontext + + if not verbose: + return + if not quiet: + if flag: + sys.stdout.write(filepositiontext) + sys.stdout.write(line) + +re._MAXCACHE = 50 +defaultimplicitrules = {} +for c in "abcdefghopqrstuvwxyz$_": + defaultimplicitrules[c] = {'typespec': 'real'} +for c in "ijklmn": + defaultimplicitrules[c] = {'typespec': 'integer'} +badnames = {} +invbadnames = {} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n] = n + '_bn' + invbadnames[n + '_bn'] = n + + +def rmbadname1(name): + if name in badnames: + errmess('rmbadname1: Replacing "%s" with "%s".\n' % + (name, badnames[name])) + return badnames[name] + return name + + +def rmbadname(names): + return [rmbadname1(_m) for _m in names] + + +def undo_rmbadname1(name): + if name in invbadnames: + errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' + % (name, invbadnames[name])) + return invbadnames[name] + return name + + +def undo_rmbadname(names): + return [undo_rmbadname1(_m) for _m in names] + + +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match + +# Extensions +COMMON_FREE_EXTENSIONS = ['.f90', '.f95', '.f03', '.f08'] +COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] + + +def openhook(filename, mode): + """Ensures that filename is opened with correct encoding parameter. + + This function uses charset_normalizer package, when available, for + determining the encoding of the file to be opened. When charset_normalizer + is not available, the function detects only UTF encodings, otherwise, ASCII + encoding is used as fallback. + """ + # Reads in the entire file. Robust detection of encoding. + # Correctly handles comments or late stage unicode characters + # gh-22871 + if charset_normalizer is not None: + encoding = charset_normalizer.from_path(filename).best().encoding + else: + # hint: install charset_normalizer for correct encoding handling + # No need to read the whole file for trying with startswith + nbytes = min(32, os.path.getsize(filename)) + with open(filename, 'rb') as fhandle: + raw = fhandle.read(nbytes) + if raw.startswith(codecs.BOM_UTF8): + encoding = 'UTF-8-SIG' + elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): + encoding = 'UTF-32' + elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)): + encoding = 'UTF-16' + else: + # Fallback, without charset_normalizer + encoding = 'ascii' + return open(filename, mode, encoding=encoding) + + +def is_free_format(fname): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = False + if Path(fname).suffix.lower() in COMMON_FREE_EXTENSIONS: + result = True + with openhook(fname, 'r') as fhandle: + line = fhandle.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = True + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = True + break + line = fhandle.readline() + return result + + +# Read fortran (77,90) code +def readfortrancode(ffile, dowithline=show, istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 + global beginpattern, quiet, verbose, dolowercase, include_paths + + if not istop: + saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile == []: + return + localdolowercase = dolowercase + # cont: set to True when the content of the last line read + # indicates statement continuation + cont = False + finalline = '' + ll = '' + includeline = re.compile( + r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1 = re.compile(r'(?P.*)&\s*\Z') + cont2 = re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: + dowithline('', -1) + ll, l1 = '', '' + spacedigits = [' '] + [str(_m) for _m in range(10)] + filepositiontext = '' + fin = fileinput.FileInput(ffile, openhook=openhook) + while True: + try: + l = fin.readline() + except UnicodeDecodeError as msg: + raise Exception( + f'readfortrancode: reading {fin.filename()}#{fin.lineno()}' + f' failed with\n{msg}.\nIt is likely that installing charset_normalizer' + ' package will help f2py determine the input file encoding' + ' correctly.') + if not l: + break + if fin.isfirstline(): + filepositiontext = '' + currentfilename = fin.filename() + gotnextfile = 1 + l1 = l + strictf77 = 0 + sourcecodeform = 'fix' + ext = os.path.splitext(currentfilename)[1] + if Path(currentfilename).suffix.lower() in COMMON_FIXED_EXTENSIONS and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77 = 1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform = 'free' + if strictf77: + beginpattern = beginpattern77 + else: + beginpattern = beginpattern90 + outmess('\tReading file %s (format:%s%s)\n' + % (repr(currentfilename), sourcecodeform, + strictf77 and ',strict' or '')) + + l = l.expandtabs().replace('\xa0', ' ') + # Get rid of newline characters + while not l == '': + if l[-1] not in "\n\r\f": + break + l = l[:-1] + if not strictf77: + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + if l.strip() == '': # Skip empty line + if sourcecodeform == 'free': + # In free form, a statement continues in the next line + # that is not a comment line [3.3.2.4^1], lines with + # blanks are comment lines [3.3.2.3^1]. Hence, the + # line continuation flag must retain its state. + pass + else: + # In fixed form, statement continuation is determined + # by a non-blank character at the 6-th position. Empty + # line indicates a start of a new statement + # [3.3.3.3^1]. Hence, the line continuation flag must + # be reset. + cont = False + continue + if sourcecodeform == 'fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower() == 'f2py': # f2py directive + l = ' ' + l[5:] + else: # Skip comment line + cont = False + continue + elif strictf77: + if len(l) > 72: + l = l[:72] + if not (l[0] in spacedigits): + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): + # Continuation of a previous line + ll = ll + l[6:] + finalline = '' + origfinalline = '' + else: + if not strictf77: + # F90 continuation + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + + elif sourcecodeform == 'free': + if not cont and ext == '.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess( + 'Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + raise ValueError( + "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [ + os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1 = ll + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext = '' + fin.close() + if istop: + dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase = saveglobals + +# Crack line +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))' + \ + r'\s*(?P(\b(%s)\b))' + \ + r'\s*(?P%s)\s*\Z' +## +fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern = re.compile( + beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit = re.compile(beforethisafter % ( + '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) +# +functionpattern = re.compile(beforethisafter % ( + r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern = re.compile(beforethisafter % ( + r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77 = r'program|block\s*data' +beginpattern77 = re.compile( + beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90 = groupbegins77 + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \ + r'type(?!\s*\()' +beginpattern90 = re.compile( + beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' + r'endinterface|endsubroutine|endfunction') +endpattern = re.compile( + beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' +# block, the Fortran 2008 construct needs special handling in the rest of the file +endifs = r'end\s*(if|do|where|select|while|forall|associate|' + \ + r'critical|enum|team)' +endifpattern = re.compile( + beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' +# +moduleprocedures = r'module\s*procedure' +moduleprocedurepattern = re.compile( + beforethisafter % ('', moduleprocedures, moduleprocedures, '.*'), re.I), \ + 'moduleprocedure' +implicitpattern = re.compile( + beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern = re.compile(beforethisafter % ( + '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern = re.compile( + beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern = re.compile( + beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern = re.compile( + beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' +publicpattern = re.compile( + beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' +privatepattern = re.compile( + beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' +intrinsicpattern = re.compile( + beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic' +intentpattern = re.compile(beforethisafter % ( + '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' +parameterpattern = re.compile( + beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' +datapattern = re.compile( + beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' +callpattern = re.compile( + beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' +entrypattern = re.compile( + beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern = re.compile( + beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern = re.compile( + beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' +usepattern = re.compile( + beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' +containspattern = re.compile( + beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern = re.compile( + beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' +# Non-fortran and f2py-specific statements +f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', + 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' +multilinepattern = re.compile( + r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurrence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P{char}.*)\Z".format( + not_quoted="[^\"'{}]".format(re.escape(characters)), + char="[{}]".format(re.escape(characters)), + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) + +def crackline(line, reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occurred + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist + global filepositiontext, currentfilename, neededmodule, expectbegin + global skipblocksuntil, skipemptyends, previous_context, gotnextfile + + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) + return + if reset < 0: + groupcounter = 0 + groupname = {groupcounter: ''} + groupcache = {groupcounter: {}} + grouplist = {groupcounter: []} + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = '' + groupcache[groupcounter]['name'] = '' + neededmodule = -1 + skipblocksuntil = -1 + return + if reset > 0: + fl = 0 + if f77modulename and neededmodule == groupcounter: + fl = 2 + while groupcounter > fl: + outmess('crackline: groupcounter=%s groupname=%s\n' % + (repr(groupcounter), repr(groupname))) + outmess( + 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if f77modulename and neededmodule == groupcounter: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end module + neededmodule = -1 + return + if line == '': + return + flag = 0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrinsicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern, + moduleprocedurepattern + ]: + m = pat[0].match(line) + if m: + break + flag = flag + 1 + if not m: + re_1 = crackline_re_1 + if 0 <= skipblocksuntil <= groupcounter: + return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name = invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1 = re.match( + r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line = 'callfun %s(%s) result (%s)' % ( + name, a, m2.group('result')) + else: + line = 'callfun %s(%s)' % (name, a) + m = callfunpattern[0].match(line) + if not m: + outmess( + 'crackline: could not resolve function call for line=%s.\n' % repr(line)) + return + analyzeline(m, 'callfun', line) + return + if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + return + elif pat[1] == 'end': + if 0 <= skipblocksuntil < groupcounter: + groupcounter = groupcounter - 1 + if skipblocksuntil <= groupcounter: + return + if groupcounter <= 0: + raise Exception('crackline: groupcounter(=%s) is nonpositive. ' + 'Check the blocks.' + % (groupcounter)) + m1 = beginpattern[0].match((line)) + if (m1) and (not m1.group('this') == groupname[groupcounter]): + raise Exception('crackline: End group %s does not match with ' + 'previous Begin group %s\n\t%s' % + (repr(m1.group('this')), repr(groupname[groupcounter]), + filepositiontext) + ) + if skipblocksuntil == groupcounter: + skipblocksuntil = -1 + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if not skipemptyends: + expectbegin = 1 + elif pat[1] == 'begin': + if 0 <= skipblocksuntil <= groupcounter: + groupcounter = groupcounter + 1 + return + gotnextfile = 0 + analyzeline(m, pat[1], line) + expectbegin = 0 + elif pat[1] == 'endif': + pass + elif pat[1] == 'moduleprocedure': + analyzeline(m, pat[1], line) + elif pat[1] == 'contains': + if ignorecontains: + return + if 0 <= skipblocksuntil <= groupcounter: + return + skipblocksuntil = groupcounter + else: + if 0 <= skipblocksuntil <= groupcounter: + return + analyzeline(m, pat[1], line) + + +def markouterparen(line): + l = '' + f = 0 + for c in line: + if c == '(': + f = f + 1 + if f == 1: + l = l + '@(@' + continue + elif c == ')': + f = f - 1 + if f == 0: + l = l + '@)@' + continue + l = l + c + return l + + +def markoutercomma(line, comma=','): + l = '' + f = 0 + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) + return l + +def unmarkouterparen(line): + r = line.replace('@(@', '(').replace('@)@', ')') + return r + + +def appenddecl(decl, decl2, force=1): + if not decl: + decl = {} + if not decl2: + return decl + if decl is decl2: + return decl + for k in list(decl2.keys()): + if k == 'typespec': + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'attrspec': + for l in decl2[k]: + decl = setattrspec(decl, l, force) + elif k == 'kindselector': + decl = setkindselector(decl, decl2[k], force) + elif k == 'charselector': + decl = setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'note': + pass + elif k in ['intent', 'check', 'dimension', 'optional', + 'required', 'depend']: + errmess('appenddecl: "%s" not implemented.\n' % k) + else: + raise Exception('appenddecl: Unknown variable definition key: ' + + str(k)) + return decl + +selectpattern = re.compile( + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +typedefpattern = re.compile( + r'(?:,(?P[\w(),]+))?(::)?(?P\b[a-z$_][\w$]*\b)' + r'(?:\((?P[\w,]*)\))?\Z', re.I) +nameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I) +operatorpattern = re.compile( + r'\s*(?P(operator|assignment))' + r'@\(@\s*(?P[^)]+)\s*@\)@\s*\Z', re.I) +callnameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile( + r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile( + r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) + + +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + + +def _resolvetypedefpattern(line): + line = ''.join(line.split()) # removes whitespace + m1 = typedefpattern.match(line) + print(line, m1) + if m1: + attrs = m1.group('attributes') + attrs = [a.lower() for a in attrs.split(',')] if attrs else [] + return m1.group('name'), attrs, m1.group('params') + return None, [], None + +def parse_name_for_bind(line): + pattern = re.compile(r'bind\(\s*(?P[^,]+)(?:\s*,\s*name\s*=\s*["\'](?P[^"\']+)["\']\s*)?\)', re.I) + match = pattern.search(line) + bind_statement = None + if match: + bind_statement = match.group(0) + # Remove the 'bind' construct from the line. + line = line[:match.start()] + line[match.end():] + return line, bind_statement + +def _resolvenameargspattern(line): + line, bind_cname = parse_name_for_bind(line) + line = markouterparen(line) + m1 = nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), bind_cname + m1 = operatorpattern.match(line) + if m1: + name = m1.group('scheme') + '(' + m1.group('name') + ')' + return name, [], None, None + m1 = callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + + +def analyzeline(m, case, line): + """ + Reads each line in the input file in sequence and updates global vars. + + Effectively reads and collects information from the input file to the + global variable groupcache, a dictionary containing info about each part + of the fortran module. + + At the end of analyzeline, information is filtered into the correct dict + keys, but parameter values and dimensions are not yet interpreted. + """ + global groupcounter, groupname, groupcache, grouplist, filepositiontext + global currentfilename, f77modulename, neededinterface, neededmodule + global expectbegin, gotnextfile, previous_context + + block = m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter < 1: + newname = os.path.basename(currentfilename).split('.')[0] + outmess( + 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) + gotnextfile = 0 + groupcounter = groupcounter + 1 + groupname[groupcounter] = 'program' + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = 'program' + groupcache[groupcounter]['name'] = newname + groupcache[groupcounter]['from'] = 'fromsky' + expectbegin = 0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): + block = 'block data' + elif re.match(r'python\s*module', block, re.I): + block = 'python module' + elif re.match(r'abstract\s*interface', block, re.I): + block = 'abstract interface' + if block == 'type': + name, attrs, _ = _resolvetypedefpattern(m.group('after')) + groupcache[groupcounter]['vars'][name] = dict(attrspec = attrs) + args = [] + result = None + else: + name, args, result, bindcline = _resolvenameargspattern(m.group('after')) + if name is None: + if block == 'block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data', 'abstract interface']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + if '' in args: + while '' in args: + args.remove('') + outmess( + 'analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule = 0 + needinterface = 0 + + if case in ['call', 'callfun']: + needinterface = 1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name'] == name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block = {'call': 'subroutine', 'callfun': 'function'}[case] + if f77modulename and neededmodule == -1 and groupcounter <= 1: + neededmodule = groupcounter + 2 + needmodule = 1 + if block not in ['interface', 'abstract interface']: + needinterface = 1 + # Create new block(s) + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needmodule: + if verbose > 1: + outmess('analyzeline: Creating module block %s\n' % + repr(f77modulename), 0) + groupname[groupcounter] = 'module' + groupcache[groupcounter]['block'] = 'python module' + groupcache[groupcounter]['name'] = f77modulename + groupcache[groupcounter]['from'] = '' + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needinterface: + if verbose > 1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( + groupcounter), 0) + groupname[groupcounter] = 'interface' + groupcache[groupcounter]['block'] = 'interface' + groupcache[groupcounter]['name'] = 'unknown_interface' + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupname[groupcounter] = block + groupcache[groupcounter]['block'] = block + if not name: + name = 'unknown_' + block.replace(' ', '_') + groupcache[groupcounter]['prefix'] = m.group('before') + groupcache[groupcounter]['name'] = rmbadname1(name) + groupcache[groupcounter]['result'] = result + if groupcounter == 1: + groupcache[groupcounter]['from'] = currentfilename + else: + if f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) + else: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args'] = args + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['entry'] = {} + # end of creation + if block == 'type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter - 2]['externals']: + groupcache[groupcounter - 2]['externals'].append(name) + groupcache[groupcounter]['vars'] = copy.deepcopy( + groupcache[groupcounter - 2]['vars']) + try: + del groupcache[groupcounter]['vars'][name][ + groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except Exception: + pass + if block in ['function', 'subroutine']: # set global attributes + # name is fortran name + if bindcline: + bindcdat = re.search(crackline_bindlang, bindcline) + if bindcdat: + groupcache[groupcounter]['bindlang'] = {name : {}} + groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') + if bindcdat.group('lang_name'): + groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') + try: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) + except Exception: + pass + if case == 'callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name == result: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + # if groupcounter>1: # name is interfaced + try: + groupcache[groupcounter - 2]['interfaced'].append(name) + except Exception: + pass + if block == 'function': + t = typespattern[0].match(m.group('before') + ' ' + name) + if t: + typespec, selector, attr, edecl = cracktypespec0( + t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end routine + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + + elif case == 'entry': + name, args, result, _= _resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case == 'type': + typespec, selector, attr, edecl = cracktypespec0( + block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']: + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip() + i = ll.find('::') + if i < 0 and case == 'intent': + i = markouterparen(ll).find('@)@') - 2 + ll = ll[:i + 1] + '::' + ll[i + 1:] + i = ll.find('::') + if ll[i:] == '::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n' % + (m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i < 0: + i = 0 + pl = '' + else: + pl = ll[:i].strip() + ll = ll[i + 2:] + ch = markoutercomma(pl).split('@,@') + if len(ch) > 1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( + ','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1 = namepattern.match(e) + if not m1: + if case in ['public', 'private']: + k = '' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( + case, repr(e))) + continue + else: + k = rmbadname1(m1.group('name')) + if case in ['public', 'private'] and \ + (k == 'operator' or k == 'assignment'): + k += m1.group('after') + if k not in edecl: + edecl[k] = {} + if case == 'dimension': + ap = case + m1.group('after') + if case == 'intent': + ap = m.group('this') + pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter > 1: + if '__user__' not in groupcache[groupcounter - 2]['name']: + outmess( + 'analyzeline: missing __user__ module (could be nothing)\n') + # fixes ticket 1693 + if k != groupcache[groupcounter]['name']: + outmess('analyzeline: appending intent(callback) %s' + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess( + 'analyzeline: intent(callback) %s is ignored\n' % (k)) + else: + errmess('analyzeline: intent(callback) %s is already' + ' in argument list\n' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: + ap = case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec'] = [ap] + if case == 'external': + if groupcache[groupcounter]['block'] == 'program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'moduleprocedure': + groupcache[groupcounter]['implementedby'] = \ + [x.strip() for x in m.group('after').split(',')] + elif case == 'parameter': + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr = [x.strip() for x in e.split('=')] + except Exception: + outmess( + 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) + continue + params = get_parameters(edecl) + k = rmbadname1(k) + if k not in edecl: + edecl[k] = {} + if '=' in edecl[k] and (not edecl[k]['='] == initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( + k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec') == 'real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list( + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec') == 'complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: + edecl[k]['attrspec'] = ['parameter'] + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'implicit': + if m.group('after').strip().lower() == 'none': + groupcache[groupcounter]['implicit'] = None + elif m.group('after'): + if 'implicit' in groupcache[groupcounter]: + impl = groupcache[groupcounter]['implicit'] + else: + impl = {} + if impl is None: + outmess( + 'analyzeline: Overwriting earlier "implicit none" statement.\n') + impl = {} + for e in markoutercomma(m.group('after')).split('@,@'): + decl = {} + m1 = re.match( + r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess( + 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) + continue + m2 = typespattern4implicit.match(m1.group('this')) + if not m2: + outmess( + 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) + continue + typespec, selector, attr, edecl = cracktypespec0( + m2.group('this'), m2.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + decl['typespec'] = typespec + decl['kindselector'] = kindselect + decl['charselector'] = charselect + decl['typename'] = typename + for k in list(decl.keys()): + if not decl[k]: + del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: + begc, endc = [x.strip() for x in r.split('-')] + except Exception: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement\n' % r) + continue + else: + begc = endc = r.strip() + if not len(begc) == len(endc) == 1: + outmess( + 'analyzeline: expected "-" instead of "%s" in range list of implicit statement (2)\n' % r) + continue + for o in range(ord(begc), ord(endc) + 1): + impl[chr(o)] = decl + groupcache[groupcounter]['implicit'] = impl + elif case == 'data': + ll = [] + dl = '' + il = '' + f = 0 + fc = 1 + inp = 0 + for c in m.group('after'): + if not inp: + if c == "'": + fc = not fc + if c == '/' and fc: + f = f + 1 + continue + if c == '(': + inp = inp + 1 + elif c == ')': + inp = inp - 1 + if f == 0: + dl = dl + c + elif f == 1: + il = il + c + elif f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl = c + il = '' + f = 0 + if f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars = groupcache[groupcounter].get('vars', {}) + last_name = None + for l in ll: + l[0], l[1] = l[0].strip(), l[1].strip() + if l[0].startswith(','): + l[0] = l[0][1:] + if l[0].startswith('('): + outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) + continue + for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])): + if v.startswith('('): + outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for + # wrapping. + continue + if '!' in l[1]: + # Fixes gh-24746 pyf generation + # XXX: This essentially ignores the value for generating the pyf which is fine: + # integer dimension(3) :: mytab + # common /mycom/ mytab + # Since in any case it is initialized in the Fortran code + outmess('Comment line in declaration "%s" is not supported. Skipping.\n' % l[1]) + continue + vars.setdefault(v, {}) + vtype = vars[v].get('typespec') + vdim = getdimension(vars[v]) + matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',') + try: + new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + except IndexError: + # gh-24746 + # Runs only if above code fails. Fixes the line + # DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /4*0,0.0D0/ + # by expanding to ['0', '0', '0', '0', '0.0d0'] + if any("*" in m for m in matches): + expanded_list = [] + for match in matches: + if "*" in match: + try: + multiplier, value = match.split("*") + expanded_list.extend([value.strip()] * int(multiplier)) + except ValueError: # if int(multiplier) fails + expanded_list.append(match.strip()) + else: + expanded_list.append(match.strip()) + matches = expanded_list + new_val = "(/{}/)".format(", ".join(matches)) if vdim else matches[idx] + current_val = vars[v].get('=') + if current_val and (current_val != new_val): + outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (v, current_val, new_val)) + vars[v]['='] = new_val + last_name = v + groupcache[groupcounter]['vars'] = vars + if last_name: + previous_context = ('variable', last_name, groupcounter) + elif case == 'common': + line = m.group('after').strip() + if not line[0] == '/': + line = '//' + line + cl = [] + f = 0 + bn = '' + ol = '' + for c in line: + if c == '/': + f = f + 1 + continue + if f >= 3: + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + f = f - 2 + bn = '' + ol = '' + if f % 2: + bn = bn + c + else: + ol = ol + c + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + commonkey = {} + if 'common' in groupcache[groupcounter]: + commonkey = groupcache[groupcounter]['common'] + for c in cl: + if c[0] not in commonkey: + commonkey[c[0]] = [] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: + commonkey[c[0]].append(i) + groupcache[groupcounter]['common'] = commonkey + previous_context = ('common', bn, groupcounter) + elif case == 'use': + m1 = re.match( + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm = m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use'] = {} + name = m1.group('name') + groupcache[groupcounter]['use'][name] = {} + isonly = 0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly = 1 + groupcache[groupcounter]['use'][name]['only'] = isonly + ll = [x.strip() for x in mm['list'].split(',')] + rl = {} + for l in ll: + if '=' in l: + m2 = re.match( + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) + if m2: + rl[m2.group('local').strip()] = m2.group( + 'use').strip() + else: + outmess( + 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) + else: + rl[l] = l + groupcache[groupcounter]['use'][name]['map'] = rl + else: + pass + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this') == 'usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case == 'multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + else: + if verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + return + + +def cracktypespec0(typespec, ll): + selector = None + attr = None + if re.match(r'double\s*complex', typespec, re.I): + typespec = 'double complex' + elif re.match(r'double\s*precision', typespec, re.I): + typespec = 'double precision' + else: + typespec = typespec.strip().lower() + m1 = selectpattern.match(markouterparen(ll)) + if not m1: + outmess( + 'cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d = m1.groupdict() + for k in list(d.keys()): + d[k] = unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector = d['this'] + ll = d['after'] + i = ll.find('::') + if i >= 0: + attr = ll[:i].strip() + ll = ll[i + 2:] + return typespec, selector, attr, ll +##### +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) +kindselector = re.compile( + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) +charselector = re.compile( + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) +lenkindpattern = re.compile( + r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)' + r'|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)' + r'|(f2py_len\s*=\s*(?P.*))|))\s*\Z', re.I) +lenarraypattern = re.compile( + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + + +def removespaces(expr): + expr = expr.strip() + if len(expr) <= 1: + return expr + expr2 = expr[0] + for i in range(1, len(expr) - 1): + if (expr[i] == ' ' and + ((expr[i + 1] in "()[]{}=+-/* ") or + (expr[i - 1] in "()[]{}=+-/* "))): + continue + expr2 = expr2 + expr[i] + expr2 = expr2 + expr[-1] + return expr2 + + +def markinnerspaces(line): + """ + The function replace all spaces in the input variable line which are + surrounded with quotation marks, with the triplet "@_@". + + For instance, for the input "a 'b c'" the function returns "a 'b@_@c'" + + Parameters + ---------- + line : str + + Returns + ------- + str + + """ + fragment = '' + inside = False + current_quote = None + escaped = '' + for c in line: + if escaped == '\\' and c in ['\\', '\'', '"']: + fragment += c + escaped = c + continue + if not inside and c in ['\'', '"']: + current_quote = c + if c == current_quote: + inside = not inside + elif c == ' ' and inside: + fragment += '@_@' + continue + fragment += c + escaped = c # reset to non-backslash + return fragment + + +def updatevars(typespec, selector, attrspec, entitydecl): + """ + Returns last_name, the variable name without special chars, parenthesis + or dimension specifiers. + + Alters groupcache to add the name, typespec, attrspec (and possibly value) + of current variable. + """ + global groupcache, groupcounter + + last_name = None + kindselect, charselect, typename = cracktypespec(typespec, selector) + # Clean up outer commas, whitespace and undesired chars from attrspec + if attrspec: + attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1 = [] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: + el1.append(e1.replace('@_@', ' ')) + for e in el1: + m = namepattern.match(e) + if not m: + outmess( + 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) + continue + ename = rmbadname1(m.group('name')) + edecl = {} + if ename in groupcache[groupcounter]['vars']: + edecl = groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec'] = typespec + elif typespec and (not typespec == edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector'] = copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['kindselector'][k], kindselect[k])) + else: + edecl['kindselector'][k] = copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector'] = charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' + % (ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['charselector'][k], charselect[k])) + else: + edecl['charselector'][k] = copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename'] = typename + elif typename and (not edecl['typename'] == typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec'] = copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec'] = copy.copy(typespec) + edecl['kindselector'] = copy.copy(kindselect) + edecl['charselector'] = copy.copy(charselect) + edecl['typename'] = typename + edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) + if m.group('after'): + m1 = lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1 = m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk + '2'] is not None: + d1[lk] = d1[lk + '2'] + del d1[lk + '2'] + for k in list(d1.keys()): + if d1[k] is not None: + d1[k] = unmarkouterparen(d1[k]) + else: + del d1[k] + + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector'] = {} + edecl['kindselector']['*'] = d1['len'] + del d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + + if 'init' in d1: + if '=' in edecl and (not edecl['='] == d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['='], d1['init'])) + else: + edecl['='] = d1['init'] + + if 'array' in d1: + dm = 'dimension(%s)' % d1['array'] + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec'] = [dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9] == 'dimension' and dm1 != dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' + % (ename, dm1, dm)) + break + + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( + ename + m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename] = edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + + +def cracktypespec(typespec, selector): + kindselect = None + charselect = None + typename = None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect = kindselector.match(selector) + if not kindselect: + outmess( + 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) + return + kindselect = kindselect.groupdict() + kindselect['*'] = kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: + del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec == 'character': + charselect = charselector.match(selector) + if not charselect: + outmess( + 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) + return + charselect = charselect.groupdict() + charselect['*'] = charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind = lenkindpattern.match( + markoutercomma(charselect['lenkind'])) + lenkind = lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk + '2']: + lenkind[lk] = lenkind[lk + '2'] + charselect[lk] = lenkind[lk] + del lenkind[lk + '2'] + if lenkind['f2py_len'] is not None: + # used to specify the length of assumed length strings + charselect['f2py_len'] = lenkind['f2py_len'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: + del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec == 'type': + typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: + typename = typename.group('name') + else: + outmess('cracktypespec: no typename found in %s\n' % + (repr(typespec + selector))) + else: + outmess('cracktypespec: no selector used for %s\n' % + (repr(selector))) + return kindselect, charselect, typename +###### + + +def setattrspec(decl, attr, force=0): + if not decl: + decl = {} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec'] = [attr] + return decl + if force: + decl['attrspec'].append(attr) + if attr in decl['attrspec']: + return decl + if attr == 'static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'public': + if 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'private': + if 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + + +def setkindselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k] = sel[k] + return decl + + +def setcharselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector'] = sel + return decl + + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k] = sel[k] + return decl + + +def getblockname(block, unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +# post processing + + +def setmesstext(block): + global filepositiontext + + try: + filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) + except Exception: + pass + + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + + +def get_useparameters(block, param_map=None): + global f90modulevars + + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % + (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess('get_useparameters: mapping for %s not impl.\n' % (mapping)) + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with' + ' value from module %s\n' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + + +def postcrack2(block, tab='', param_map=None): + global f90modulevars + + if not f90modulevars: + return block + if isinstance(block, list): + ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) + for g in block] + return ret + setmesstext(block) + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) + for b in block['body']] + block['body'] = new_body + + return block + + +def postcrack(block, args=None, tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + + if isinstance(block, list): + gret = [] + uret = [] + for g in block: + setmesstext(g) + g = postcrack(g, tab=tab + '\t') + # sort user routines to appear first + if 'name' in g and '__user__' in g['name']: + uret.append(g) + else: + gret.append(g) + return uret + gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + + str(block)) + if 'name' in block and not block['name'] == 'unknown_interface': + outmess('%sBlock: %s\n' % (tab, block['name']), 0) + block = analyzeargs(block) + block = analyzecommon(block) + block['vars'] = analyzevars(block) + block['sortvars'] = sortvarnames(block['vars']) + if 'args' in block and block['args']: + args = block['args'] + block['body'] = analyzebody(block, args, tab=tab) + + userisdefined = [] + if 'use' in block: + useblock = block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) + else: + useblock = {} + name = '' + if 'name' in block: + name = block['name'] + # and not userisdefined: # Build a __user__ module + if 'externals' in block and block['externals']: + interfaced = [] + if 'interfaced' in block: + interfaced = block['interfaced'] + mvars = copy.copy(block['vars']) + if name: + mname = name + '__user__routines' + else: + mname = 'unknown__user__routines' + if mname in userisdefined: + i = 1 + while '%s_%i' % (mname, i) in userisdefined: + i = i + 1 + mname = '%s_%i' % (mname, i) + interface = {'block': 'interface', 'body': [], + 'vars': {}, 'name': name + '_user_interface'} + for e in block['externals']: + if e in interfaced: + edef = [] + j = -1 + for b in block['body']: + j = j + 1 + if b['block'] == 'interface': + i = -1 + for bb in b['body']: + i = i + 1 + if 'name' in bb and bb['name'] == e: + edef = copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: + del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + else: + if e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] + if interface['vars'] or interface['body']: + block['interfaced'] = interfaced + mblock = {'block': 'python module', 'body': [ + interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} + useblock[mname] = {} + usermodules.append(mblock) + if useblock: + block['use'] = useblock + return block + + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + else: + indep.append(v) + n = len(dep) + i = 0 + while dep: # XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:] + [v] + i = i + 1 + if i > n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + + ', '.join(dep) + '\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + return indep + + +def analyzecommon(block): + if not hascommon(block): + return block + commonvars = [] + for k in list(block['common'].keys()): + comvars = [] + for e in block['common'][k]: + m = re.match( + r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims = [] + if m.group('dims'): + dims = [x.strip() + for x in markoutercomma(m.group('dims')).split('@,@')] + n = rmbadname1(m.group('name').strip()) + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append( + 'dimension(%s)' % (','.join(dims))) + else: + block['vars'][n]['attrspec'] = [ + 'dimension(%s)' % (','.join(dims))] + else: + if dims: + block['vars'][n] = { + 'attrspec': ['dimension(%s)' % (','.join(dims))]} + else: + block['vars'][n] = {} + if n not in commonvars: + commonvars.append(n) + else: + n = e + errmess( + 'analyzecommon: failed to extract "[()]" from "%s" in common /%s/.\n' % (e, k)) + comvars.append(n) + block['common'][k] = comvars + if 'commonvars' not in block: + block['commonvars'] = commonvars + else: + block['commonvars'] = block['commonvars'] + commonvars + return block + + +def analyzebody(block, args, tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + + setmesstext(block) + + maybe_private = { + key: value + for key, value in block['vars'].items() + if 'attrspec' not in value or 'public' not in value['attrspec'] + } + + body = [] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_ = b['args'] + # Add private members to skipfuncs for gh-23879 + if b['name'] in maybe_private.keys(): + skipfuncs.append(b['name']) + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen( + b, '\n' + ' ' * 6, as_interface=True) + + else: + as_ = args + b = postcrack(b, as_, tab=tab + '\t') + if b['block'] in ['interface', 'abstract interface'] and \ + not b['body'] and not b.get('implementedby'): + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '') == 'pythonmodule': + usermodules.append(b) + else: + if b['block'] == 'module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + + +def buildimplicitrules(block): + setmesstext(block) + implicitrules = defaultimplicitrules + attrrules = {} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules = None + if verbose > 1: + outmess( + 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k] = block['implicit'][k] + else: + attrrules[k] = block['implicit'][k]['typespec'] + return implicitrules, attrrules + + +def myeval(e, g=None, l=None): + """ Like `eval` but returns only integers and floats """ + r = eval(e, g, l) + if type(r) in [int, float]: + return r + raise ValueError('r=%r' % (r)) + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) + + +def getlincoef(e, xset): # e = a*x+b ; x in xset + """ + Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in + xset. + + >>> getlincoef('2*x + 1', {'x'}) + (2, 1, 'x') + >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) + (5, 3, 'x') + >>> getlincoef('0', {'x'}) + (0, 0, None) + >>> getlincoef('0*x', {'x'}) + (0, 0, 'x') + >>> getlincoef('x*x', {'x'}) + (None, None, None) + + This can be tricked by sufficiently complex expressions + + >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) + (2.0, 3.0, 'x') + """ + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except Exception: + pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x) > len_e: + continue + if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0, m1.group('after')) + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1, m1.group('after')) + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 0.5, m1.group('after')) + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s' % ( + m1.group('before'), 1.5, m1.group('after')) + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a * 0.5 + b == c and a * 1.5 + b == c2): + return a, b, x + except Exception: + pass + break + return None, None, None + + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + # The word_pattern may return values that are not + # only variables, they can be string content for instance + if word not in words and word in vars and word != name: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) + words = [] + deps[name] = words + return words + + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + + +def get_sorted_names(vars): + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + + +def _kind_func(string): + # XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind(' + string + ')' + + +def _selected_int_kind_func(r): + # XXX: This should be processor dependent + m = 10 ** r + if m <= 2 ** 8: + return 1 + if m <= 2 ** 16: + return 2 + if m <= 2 ** 32: + return 4 + if m <= 2 ** 63: + return 8 + if m <= 2 ** 128: + return 16 + return -1 + + +def _selected_real_kind_func(p, r=0, radix=0): + # XXX: This should be processor dependent + # This is only verified for 0 <= p <= 20, possibly good for p <= 33 and above + if p < 7: + return 4 + if p < 16: + return 8 + machine = platform.machine().lower() + if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): + if p <= 33: + return 16 + else: + if p < 19: + return 10 + elif p <= 33: + return 16 + return -1 + + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile( + r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile( + r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + # TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + + # We need to act according to the data. + # The easy case is if the data has a kind-specifier, + # then we may easily remove those specifiers. + # However, it may be that the user uses other specifiers...(!) + is_replaced = False + + if 'kindselector' in vars[n]: + # Remove kind specifier (including those defined + # by parameters) + if 'kind' in vars[n]['kindselector']: + orig_v_len = len(v) + v = v.replace('_' + vars[n]['kindselector']['kind'], '') + # Again, this will be true if even a single specifier + # has been replaced, see comment above. + is_replaced = len(v) < orig_v_len + + if not is_replaced: + if not selected_kind_re.match(v): + v_ = v.split('_') + # In case there are additive parameters + if len(v_) > 1: + v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') + + # Currently this will not work for complex numbers. + # There is missing code for extracting a complex number, + # which may be defined in either of these: + # a) (Re, Im) + # b) cmplx(Re, Im) + # c) dcmplx(Re, Im) + # d) cmplx(Re, Im, ) + + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list( + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + + elif iscomplex(vars[n]): + outmess(f'get_parameters[TODO]: ' + f'implement evaluation of complex expression {v}\n') + + dimspec = ([s.lstrip('dimension').strip() + for s in vars[n]['attrspec'] + if s.startswith('dimension')] or [None])[0] + + # Handle _dp for gh-6624 + # Also fixes gh-20460 + if real16pattern.search(v): + v = 8 + elif real8pattern.search(v): + v = 4 + try: + params[n] = param_eval(v, g_params, params, dimspec=dimspec) + except Exception as msg: + params[n] = v + outmess(f'get_parameters: got "{msg}" on {n!r}\n') + + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl != n: + params[nl] = params[n] + else: + print(vars[n]) + outmess(f'get_parameters:parameter {n!r} does not have value?!\n') + return params + + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + + +_is_kind_number = re.compile(r'\d+_').match + + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + # TODO: use symbolic from PR #19805 + value = eval(value, {}, params) + value = (repr if isinstance(value, str) else str)(value) + except (NameError, SyntaxError, TypeError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r ' + '(available names: %s)\n' + % (msg, value, list(params.keys()))) + return value + + +def analyzevars(block): + """ + Sets correct dimension information for each variable/parameter + """ + + global f90modulevars + + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + vars = copy.copy(block['vars']) + if block['block'] == 'function' and block['name'] not in vars: + vars[block['name']] = {} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen = block['vars']['']['attrspec'] + for n in set(vars) | set(b['name'] for b in block['body']): + for k in ['public', 'private']: + if k in gen: + vars[n] = setattrspec(vars.get(n, {}), k) + svars = [] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: + svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + # At this point, params are read and interpreted, but + # the params used to define vars are not yet parsed + dep_matches = {} + name_match = re.compile(r'[A-Za-z][\w$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n] = setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k == 'typespec' and implicitrules[ln0][k] == 'undefined': + continue + if k not in vars[n]: + vars[n][k] = implicitrules[ln0][k] + elif k == 'attrspec': + for l in implicitrules[ln0][k]: + vars[n] = setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( + repr(n), block['name'])) + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['kindselector']['kind'] = l + + dimension_exprs = {} + if 'attrspec' in vars[n]: + attr = vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec'] = [] + dim, intent, depend, check, note = None, None, None, None, None + for a in attr: + if a[:9] == 'dimension': + dim = (a[9:].strip())[1:-1] + elif a[:6] == 'intent': + intent = (a[6:].strip())[1:-1] + elif a[:6] == 'depend': + depend = (a[6:].strip())[1:-1] + elif a[:5] == 'check': + check = (a[5:].strip())[1:-1] + elif a[:4] == 'note': + note = (a[4:].strip())[1:-1] + else: + vars[n] = setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) + intent = None + if note: + note = note.replace('\\n\\n', '\n\n') + note = note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note'] = [note] + else: + vars[n]['note'].append(note) + note = None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend = None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check'] = [] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if c not in vars[n]['check']: + vars[n]['check'].append(c) + check = None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension'] = [] + for d in rmbadname( + [x.strip() for x in markoutercomma(dim).split('@,@')] + ): + # d is the expression inside the dimension declaration + # Evaluate `d` with respect to params + try: + # the dimension for this variable depends on a + # previously defined parameter + d = param_parse(d, params) + except (ValueError, IndexError, KeyError): + outmess( + ('analyzevars: could not parse dimension for ' + f'variable {d!r}\n') + ) + + dim_char = ':' if d == ':' else '*' + if d == dim_char: + dl = [dim_char] + else: + dl = markoutercomma(d, ':').split('@:@') + if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl) == 1 and dl[0] != dim_char: + dl = ['1', dl[0]] + if len(dl) == 2: + d1, d2 = map(symbolic.Expr.parse, dl) + dsize = d2 - d1 + 1 + d = dsize.tostring(language=symbolic.Language.C) + # find variables v that define d as a linear + # function, `d == a * v + b`, and store + # coefficients a and b for further analysis. + solver_and_deps = {} + for v in block['vars']: + s = symbolic.as_symbol(v) + if dsize.contains(s): + try: + a, b = dsize.linear_solve(s) + + def solve_v(s, a=a, b=b): + return (s - b) / a + + all_symbols = set(a.symbols()) + all_symbols.update(b.symbols()) + except RuntimeError as msg: + # d is not a linear function of v, + # however, if v can be determined + # from d using other means, + # implement the corresponding + # solve_v function here. + solve_v = None + all_symbols = set(dsize.symbols()) + v_deps = set( + s.data for s in all_symbols + if s.data in vars) + solver_and_deps[v] = solve_v, list(v_deps) + # Note that dsize may contain symbols that are + # not defined in block['vars']. Here we assume + # these correspond to Fortran/C intrinsic + # functions or that are defined by other + # means. We'll let the compiler validate the + # definiteness of such symbols. + dimension_exprs[d] = solver_and_deps + vars[n]['dimension'].append(d) + + if 'check' not in vars[n] and 'args' in block and n in block['args']: + # n is an argument that has no checks defined. Here we + # generate some consistency checks for n, and when n is an + # array, generate checks for its dimensions and construct + # initialization expressions. + n_deps = vars[n].get('depend', []) + n_checks = [] + n_is_input = l_or(isintent_in, isintent_inout, + isintent_inplace)(vars[n]) + if isarray(vars[n]): # n is array + for i, d in enumerate(vars[n]['dimension']): + coeffs_and_deps = dimension_exprs.get(d) + if coeffs_and_deps is None: + # d is `:` or `*` or a constant expression + pass + elif n_is_input: + # n is an input array argument and its shape + # may define variables used in dimension + # specifications. + for v, (solver, deps) in coeffs_and_deps.items(): + def compute_deps(v, deps): + for v1 in coeffs_and_deps.get(v, [None, []])[1]: + if v1 not in deps: + deps.add(v1) + compute_deps(v1, deps) + all_deps = set() + compute_deps(v, all_deps) + if ((v in n_deps + or '=' in vars[v] + or 'depend' in vars[v])): + # Skip a variable that + # - n depends on + # - has user-defined initialization expression + # - has user-defined dependencies + continue + if solver is not None and v not in all_deps: + # v can be solved from d, hence, we + # make it an optional argument with + # initialization expression: + is_required = False + init = solver(symbolic.as_symbol( + f'shape({n}, {i})')) + init = init.tostring( + language=symbolic.Language.C) + vars[v]['='] = init + # n needs to be initialized before v. So, + # making v dependent on n and on any + # variables in solver or d. + vars[v]['depend'] = [n] + deps + if 'check' not in vars[v]: + # add check only when no + # user-specified checks exist + vars[v]['check'] = [ + f'shape({n}, {i}) == {d}'] + else: + # d is a non-linear function on v, + # hence, v must be a required input + # argument that n will depend on + is_required = True + if 'intent' not in vars[v]: + vars[v]['intent'] = [] + if 'in' not in vars[v]['intent']: + vars[v]['intent'].append('in') + # v needs to be initialized before n + n_deps.append(v) + n_checks.append( + f'shape({n}, {i}) == {d}') + v_attr = vars[v].get('attrspec', []) + if not ('optional' in v_attr + or 'required' in v_attr): + v_attr.append( + 'required' if is_required else 'optional') + if v_attr: + vars[v]['attrspec'] = v_attr + if coeffs_and_deps is not None: + # extend v dependencies with ones specified in attrspec + for v, (solver, deps) in coeffs_and_deps.items(): + v_deps = vars[v].get('depend', []) + for aa in vars[v].get('attrspec', []): + if aa.startswith('depend'): + aa = ''.join(aa.split()) + v_deps.extend(aa[7:-1].split(',')) + if v_deps: + vars[v]['depend'] = list(set(v_deps)) + if n not in v_deps: + n_deps.append(v) + elif isstring(vars[n]): + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*'] = length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*'] = length + if n_checks: + vars[n]['check'] = n_checks + if n_deps: + vars[n]['depend'] = list(set(n_deps)) + + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec'] = [] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): + vars[n]['depend'].append(v) + if not vars[n]['depend']: + del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n == block['name']: # n is block name + if 'note' in vars[n]: + block['note'] = vars[n]['note'] + if block['block'] == 'function': + if 'result' in block and block['result'] in vars: + vars[n] = appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr = block['prefix'] + pr1 = pr.replace('pure', '') + ispure = (not pr == pr1) + pr = pr1.replace('recursive', '') + isrec = (not pr == pr1) + m = typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl = cracktypespec0( + m.group('this'), m.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + vars[n]['typespec'] = typespec + try: + if block['result']: + vars[block['result']]['typespec'] = typespec + except Exception: + pass + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval( + kindselect['kind'], {}, params) + except Exception: + pass + vars[n]['kindselector'] = kindselect + if charselect: + vars[n]['charselector'] = charselect + if typename: + vars[n]['typename'] = typename + if ispure: + vars[n] = setattrspec(vars[n], 'pure') + if isrec: + vars[n] = setattrspec(vars[n], 'recursive') + else: + outmess( + 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) + if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars = copy.copy(block['args'] + block['commonvars']) + else: + neededvars = copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block'] == 'function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) + + +def param_eval(v, g_params, params, dimspec=None): + """ + Creates a dictionary of indices and values for each parameter in a + parameter array to be evaluated later. + + WARNING: It is not possible to initialize multidimensional array + parameters e.g. dimension(-3:1, 4, 3:5) at this point. This is because in + Fortran initialization through array constructor requires the RESHAPE + intrinsic function. Since the right-hand side of the parameter declaration + is not executed in f2py, but rather at the compiled c/fortran extension, + later, it is not possible to execute a reshape of a parameter array. + One issue remains: if the user wants to access the array parameter from + python, we should either + 1) allow them to access the parameter array using python standard indexing + (which is often incompatible with the original fortran indexing) + 2) allow the parameter array to be accessed in python as a dictionary with + fortran indices as keys + We are choosing 2 for now. + """ + if dimspec is None: + try: + p = eval(v, g_params, params) + except Exception as msg: + p = v + outmess(f'param_eval: got "{msg}" on {v!r}\n') + return p + + # This is an array parameter. + # First, we parse the dimension information + if len(dimspec) < 2 or dimspec[::len(dimspec)-1] != "()": + raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed') + dimrange = dimspec[1:-1].split(',') + if len(dimrange) == 1: + # e.g. dimension(2) or dimension(-1:1) + dimrange = dimrange[0].split(':') + # now, dimrange is a list of 1 or 2 elements + if len(dimrange) == 1: + bound = param_parse(dimrange[0], params) + dimrange = range(1, int(bound)+1) + else: + lbound = param_parse(dimrange[0], params) + ubound = param_parse(dimrange[1], params) + dimrange = range(int(lbound), int(ubound)+1) + else: + raise ValueError(f'param_eval: multidimensional array parameters ' + '{dimspec} not supported') + + # Parse parameter value + v = (v[2:-2] if v.startswith('(/') else v).split(',') + v_eval = [] + for item in v: + try: + item = eval(item, g_params, params) + except Exception as msg: + outmess(f'param_eval: got "{msg}" on {item!r}\n') + v_eval.append(item) + + p = dict(zip(dimrange, v_eval)) + + return p + + +def param_parse(d, params): + """Recursively parse array dimensions. + + Parses the declaration of an array variable or parameter + `dimension` keyword, and is called recursively if the + dimension for this array is a previously defined parameter + (found in `params`). + + Parameters + ---------- + d : str + Fortran expression describing the dimension of an array. + params : dict + Previously parsed parameters declared in the Fortran source file. + + Returns + ------- + out : str + Parsed dimension expression. + + Examples + -------- + + * If the line being analyzed is + + `integer, parameter, dimension(2) :: pa = (/ 3, 5 /)` + + then `d = 2` and we return immediately, with + + >>> d = '2' + >>> param_parse(d, params) + 2 + + * If the line being analyzed is + + `integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)` + + then `d = 'pa'`; since `pa` is a previously parsed parameter, + and `pa = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa' + >>> params = {'pa': 3} + >>> param_parse(d, params) + 3 + + * If the line being analyzed is + + `integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)` + + then `d = 'pa(1)'`; since `pa` is a previously parsed parameter, + and `pa(1) = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa(1)' + >>> params = dict(pa={1: 3, 2: 5}) + >>> param_parse(d, params) + 3 + """ + if "(" in d: + # this dimension expression is an array + dname = d[:d.find("(")] + ddims = d[d.find("(")+1:d.rfind(")")] + # this dimension expression is also a parameter; + # parse it recursively + index = int(param_parse(ddims, params)) + return str(params[dname][index]) + elif d in params: + return str(params[d]) + else: + for p in params: + re_1 = re.compile( + r'(?P.*?)\b' + p + r'\b(?P.*)', re.I + ) + m = re_1.match(d) + while m: + d = m.group('before') + \ + str(params[p]) + m.group('after') + m = re_1.match(d) + return d + + +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules = buildimplicitrules(block) + at = determineexprtype(a, block['vars'], implicitrules) + na = 'e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase + string.digits: + c = '_' + na = na + c + if na[-1] == '_': + na = na + 'e' + else: + na = na + '_e' + a = na + while a in block['vars'] or a in block['args']: + a = a + 'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a] = at + else: + if a not in block['vars']: + if orig_a in block['vars']: + block['vars'][a] = block['vars'][orig_a] + else: + block['vars'][a] = {} + if 'externals' in block and orig_a in block['externals'] + block['interfaced']: + block['vars'][a] = setattrspec(block['vars'][a], 'external') + return a + + +def analyzeargs(block): + setmesstext(block) + implicitrules, _ = buildimplicitrules(block) + if 'args' not in block: + block['args'] = [] + args = [] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args'] = args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a] = {} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals'] = [] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']] = {} + return block + +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) +determineexprtype_re_3 = re.compile( + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) + + +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec': 'integer'} + if isinstance(r, float): + return {'typespec': 'real'} + if isinstance(r, complex): + return {'typespec': 'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + + +def determineexprtype(expr, vars, rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr = expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec': 'complex'} + m = determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) + return {'typespec': 'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t = {} + if determineexprtype_re_4.match(expr): # in parenthesis + t = determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn = m.group('name') + t = determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec': 'character', 'charselector': {'*': '*'}} + if not t: + outmess( + 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) + return t + +###### + + +def crack2fortrangen(block, tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + + setmesstext(block) + ret = '' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix = '' + name = '' + args = '' + blocktype = block['block'] + if blocktype == 'program': + return '' + argsl = [] + if 'name' in block: + name = block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block'] == 'function' or argsl: + args = '(%s)' % ','.join(argsl) + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s' % ( + f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype == 'function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s' %\ + (f2pyenhancements, tab + tabchar, + ','.join(intent_lst), name) + use = '' + if 'use' in block: + use = use2fortran(block['use'], tab + tabchar) + common = '' + if 'common' in block: + common = common2fortran(block['common'], tab + tabchar) + if name == 'unknown_interface': + name = '' + result = '' + if 'result' in block: + result = ' result (%s)' % block['result'] + if block['result'] not in argsl: + argsl.append(block['result']) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) + vars = vars2fortran( + block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) + mess = '' + if 'from' in block and not as_interface: + mess = '! in %s' % block['from'] + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = '%s%sentry %s(%s)' \ + % (entry_stmts, tab + tabchar, k, ','.join(i)) + body = body + entry_stmts + if blocktype == 'block data' and name == '_BLOCK_DATA_': + name = '' + ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( + tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + + +def common2fortran(common, tab=''): + ret = '' + for k in list(common.keys()): + if k == '_BLNK_': + ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) + else: + ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) + return ret + + +def use2fortran(use, tab=''): + ret = '' + for m in list(use.keys()): + ret = '%s%suse %s,' % (ret, tab, m) + if use[m] == {}: + if ret and ret[-1] == ',': + ret = ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret = '%s only:' % (ret) + if 'map' in use[m] and use[m]['map']: + c = ' ' + for k in list(use[m]['map'].keys()): + if k == use[m]['map'][k]: + ret = '%s%s%s' % (ret, c, k) + c = ',' + else: + ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) + c = ',' + if ret and ret[-1] == ',': + ret = ret[:-1] + return ret + + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + f = globals()['isintent_%s' % intent] + except KeyError: + pass + else: + if f(var): + ret.append(intent) + return ret + + +def vars2fortran(block, vars, args, tab='', as_interface=False): + setmesstext(block) + ret = '' + nout = [] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess( + 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess( + 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret = '%s%sintent(callback) %s' % (ret, tab, a) + ret = '%s%sexternal %s' % (ret, tab, a) + if isoptional(vars[a]): + ret = '%s%soptional %s' % (ret, tab, a) + if a in vars and 'typespec' not in vars[a]: + continue + cont = 1 + for b in block['body']: + if a == b['name'] and b['block'] == 'function': + cont = 0 + break + if cont: + continue + if a not in vars: + show(vars) + outmess('vars2fortran: No definition for argument "%s".\n' % a) + continue + if a == block['name']: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret = '%s%sexternal %s' % (ret, tab, a) + continue + show(vars[a]) + outmess('vars2fortran: No typespec for argument "%s".\n' % a) + continue + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + c = ' ' + if 'attrspec' in vars[a]: + attr = [l for l in vars[a]['attrspec'] + if l not in ['external']] + if as_interface and 'intent(in)' in attr and 'intent(out)' in attr: + # In Fortran, intent(in, out) are conflicting while + # intent(in, out) can be specified only via + # `!f2py intent(out) ..`. + # So, for the Fortran interface, we'll drop + # intent(out) to resolve the conflict. + attr.remove('intent(out)') + if attr: + vardef = '%s, %s' % (vardef, ','.join(attr)) + c = ',' + if 'dimension' in vars[a]: + vardef = '%s%sdimension(%s)' % ( + vardef, c, ','.join(vars[a]['dimension'])) + c = ',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) + c = ',' + if 'check' in vars[a]: + vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) + c = ',' + if 'depend' in vars[a]: + vardef = '%s%sdepend(%s)' % ( + vardef, c, ','.join(vars[a]['depend'])) + c = ',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = '(%s,%s)' % (v.real, v.imag) + except Exception: + pass + vardef = '%s :: %s=%s' % (vardef, a, v) + else: + vardef = '%s :: %s' % (vardef, a) + ret = '%s%s%s' % (ret, tab, vardef) + return ret +###### + + +# We expose post_processing_hooks as global variable so that +# user-libraries could register their own hooks to f2py. +post_processing_hooks = [] + + +def crackfortran(files): + global usermodules, post_processing_hooks + + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules = [] + postlist = postcrack(grouplist[0]) + outmess('Applying post-processing hooks...\n', 0) + for hook in post_processing_hooks: + outmess(f' {hook.__name__}\n', 0) + postlist = traverse(postlist, hook) + outmess('Post-processing (stage 2)...\n', 0) + postlist = postcrack2(postlist) + return usermodules + postlist + + +def crack2fortran(block): + global f2py_version + + pyf = crack2fortrangen(block) + '\n' + header = """! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer = """ +! This file was auto-generated with f2py (version:%s). +! See: +! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e +""" % (f2py_version) + return header + pyf + footer + + +def _is_visit_pair(obj): + return (isinstance(obj, tuple) + and len(obj) == 2 + and isinstance(obj[0], (int, str))) + + +def traverse(obj, visit, parents=[], result=None, *args, **kwargs): + '''Traverse f2py data structure with the following visit function: + + def visit(item, parents, result, *args, **kwargs): + """ + + parents is a list of key-"f2py data structure" pairs from which + items are taken from. + + result is a f2py data structure that is filled with the + return value of the visit function. + + item is 2-tuple (index, value) if parents[-1][1] is a list + item is 2-tuple (key, value) if parents[-1][1] is a dict + + The return value of visit must be None, or of the same kind as + item, that is, if parents[-1] is a list, the return value must + be 2-tuple (new_index, new_value), or if parents[-1] is a + dict, the return value must be 2-tuple (new_key, new_value). + + If new_index or new_value is None, the return value of visit + is ignored, that is, it will not be added to the result. + + If the return value is None, the content of obj will be + traversed, otherwise not. + """ + ''' + + if _is_visit_pair(obj): + if obj[0] == 'parent_block': + # avoid infinite recursion + return obj + new_result = visit(obj, parents, result, *args, **kwargs) + if new_result is not None: + assert _is_visit_pair(new_result) + return new_result + parent = obj + result_key, obj = obj + else: + parent = (None, obj) + result_key = None + + if isinstance(obj, list): + new_result = [] + for index, value in enumerate(obj): + new_index, new_item = traverse((index, value), visit, + parents=parents + [parent], + result=result, *args, **kwargs) + if new_index is not None: + new_result.append(new_item) + elif isinstance(obj, dict): + new_result = dict() + for key, value in obj.items(): + new_key, new_value = traverse((key, value), visit, + parents=parents + [parent], + result=result, *args, **kwargs) + if new_key is not None: + new_result[new_key] = new_value + else: + new_result = obj + + if result_key is None: + return new_result + return result_key, new_result + + +def character_backward_compatibility_hook(item, parents, result, + *args, **kwargs): + """Previously, Fortran character was incorrectly treated as + character*1. This hook fixes the usage of the corresponding + variables in `check`, `dimension`, `=`, and `callstatement` + expressions. + + The usage of `char*` in `callprotoargument` expression can be left + unchanged because C `character` is C typedef of `char`, although, + new implementations should use `character*` in the corresponding + expressions. + + See https://github.com/numpy/numpy/pull/19388 for more information. + + """ + parent_key, parent_value = parents[-1] + key, value = item + + def fix_usage(varname, value): + value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value) + value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]', + varname, value) + return value + + if parent_key in ['dimension', 'check']: + assert parents[-3][0] == 'vars' + vars_dict = parents[-3][1] + elif key == '=': + assert parents[-2][0] == 'vars' + vars_dict = parents[-2][1] + else: + vars_dict = None + + new_value = None + if vars_dict is not None: + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + new_value = fix_usage(varname, new_value) + elif key == 'callstatement': + vars_dict = parents[-2][1]['vars'] + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + # replace all occurrences of `` with + # `&` in argument passing + new_value = re.sub( + r'(? `{new_value}`\n', 1) + return (key, new_value) + + +post_processing_hooks.append(character_backward_compatibility_hook) + + +if __name__ == "__main__": + files = [] + funcs = [] + f = 1 + f2 = 0 + f3 = 0 + showblocklist = 0 + for l in sys.argv[1:]: + if l == '': + pass + elif l[0] == ':': + f = 0 + elif l == '-quiet': + quiet = 1 + verbose = 0 + elif l == '-verbose': + verbose = 2 + quiet = 0 + elif l == '-fix': + if strictf77: + outmess( + 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends = 1 + sourcecodeform = 'fix' + elif l == '-skipemptyends': + skipemptyends = 1 + elif l == '--ignore-contains': + ignorecontains = 1 + elif l == '-f77': + strictf77 = 1 + sourcecodeform = 'fix' + elif l == '-f90': + strictf77 = 0 + sourcecodeform = 'free' + skipemptyends = 1 + elif l == '-h': + f2 = 1 + elif l == '-show': + showblocklist = 1 + elif l == '-m': + f3 = 1 + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + elif f2: + f2 = 0 + pyffilename = l + elif f3: + f3 = 0 + f77modulename = l + elif f: + try: + open(l).close() + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}\n') + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specified module name for non Fortran 77 code that + should not need one (expect if you are scanning F90 code for non + module blocks but then you should use flag -skipemptyends and also + be sure that the files do not contain programs without program + statement). +""", 0) + + postlist = crackfortran(files) + if pyffilename: + outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) + pyf = crack2fortran(postlist) + with open(pyffilename, 'w') as f: + f.write(pyf) + if showblocklist: + show(postlist) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/diagnose.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/diagnose.py new file mode 100644 index 00000000..86d7004a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/diagnose.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +import os +import sys +import tempfile + + +def run_command(cmd): + print('Running %r:' % (cmd)) + os.system(cmd) + print('------') + + +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print('os.name=%r' % (os.name)) + print('------') + print('sys.platform=%r' % (sys.platform)) + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print('sys.path=%r' % (':'.join(sys.path))) + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError as e: + print('Failed to import new numpy:', e) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError as e: + print('Failed to import f2py2e:', e) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError as e: + print('Failed to import numpy_distutils:', e) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print('Found new numpy version %r in %s' % + (numpy.__version__, numpy.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % ( + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print( + 'Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print( + 'Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print( + 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) +if __name__ == "__main__": + run() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f2py2e.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f2py2e.py new file mode 100755 index 00000000..ce22b2d8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f2py2e.py @@ -0,0 +1,768 @@ +#!/usr/bin/env python3 +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import sys +import os +import pprint +import re +from pathlib import Path +from itertools import dropwhile +import argparse +import copy + +from . import crackfortran +from . import rules +from . import cb_rules +from . import auxfuncs +from . import cfuncs +from . import f90mod_rules +from . import __version__ +from . import capi_maps +from numpy.f2py._backends import f2py_build_generator + +f2py_version = __version__.version +numpy_version = __version__.version +errmess = sys.stderr.write +# outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess +MESON_ONLY_VER = (sys.version_info >= (3, 12)) + +__usage__ =\ +f"""Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + '-include

' Writes additional headers in the C wrapper, can be passed + multiple times, generates #include
each time. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --f2cmap Load Fortran-to-Python KIND specification from the given + file. Default: .f2py_f2cmap in current directory. + + --quiet Run quietly. + --verbose Run with extra verbosity. + --skip-empty-wrappers Only generate wrapper files when needed. + -v Print f2py version ID and exit. + + +build backend options (only effective with -c) +[NO_MESON] is used to indicate an option not meant to be used +with the meson backend or above Python 3.12: + + --fcompiler= Specify Fortran compiler type by vendor [NO_MESON] + --compiler= Specify distutils C compiler type [NO_MESON] + + --help-fcompiler List available Fortran compilers and exit [NO_MESON] + --f77exec= Specify the path to F77 compiler [NO_MESON] + --f90exec= Specify the path to F90 compiler [NO_MESON] + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags [NO_MESON] + --arch= Specify architecture specific optimization flags [NO_MESON] + --noopt Compile without optimization [NO_MESON] + --noarch Compile without arch-dependent optimization [NO_MESON] + --debug Compile with debugging information + + --dep + Specify a meson dependency for the module. This may + be passed multiple times for multiple dependencies. + Dependencies are stored in a list for further processing. + + Example: --dep lapack --dep scalapack + This will identify "lapack" and "scalapack" as dependencies + and remove them from argv, leaving a dependencies list + containing ["lapack", "scalapack"]. + + --backend + Specify the build backend for the compilation process. + The supported backends are 'meson' and 'distutils'. + If not specified, defaults to 'distutils'. On + Python 3.12 or higher, the default is 'meson'. + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. [NO_MESON] + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + -DUNDERSCORE_G77 + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: {f2py_version} +numpy Version: {numpy_version} +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +https://numpy.org/doc/stable/f2py/index.html\n""" + + +def scaninputline(inputline): + files, skipfuncs, onlyfuncs, debug = [], [], [], [] + f, f2, f3, f5, f6, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + emptygen = True + dolc = -1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths, inputline = get_includes(inputline) + signsfile, modulename = None, None + options = {'buildpath': buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l == '': + pass + elif l == 'only:': + f = 0 + elif l == 'skip:': + f = -1 + elif l == ':': + f = 1 + elif l[:8] == '--debug-': + debug.append(l[8:]) + elif l == '--lower': + dolc = 1 + elif l == '--build-dir': + f6 = 1 + elif l == '--no-lower': + dolc = 0 + elif l == '--quiet': + verbose = 0 + elif l == '--verbose': + verbose += 1 + elif l == '--latex-doc': + dolatexdoc = 1 + elif l == '--no-latex-doc': + dolatexdoc = 0 + elif l == '--rest-doc': + dorestdoc = 1 + elif l == '--no-rest-doc': + dorestdoc = 0 + elif l == '--wrap-functions': + wrapfuncs = 1 + elif l == '--no-wrap-functions': + wrapfuncs = 0 + elif l == '--short-latex': + options['shortlatex'] = 1 + elif l == '--coutput': + f8 = 1 + elif l == '--f2py-wrapper-output': + f9 = 1 + elif l == '--f2cmap': + f10 = 1 + elif l == '--overwrite-signature': + options['h-overwrite'] = 1 + elif l == '-h': + f2 = 1 + elif l == '-m': + f3 = 1 + elif l[:2] == '-v': + print(f2py_version) + sys.exit() + elif l == '--show-compilers': + f5 = 1 + elif l[:8] == '-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] + elif l == '--skip-empty-wrappers': + emptygen = False + elif l[0] == '-': + errmess('Unknown option %s\n' % repr(l)) + sys.exit() + elif f2: + f2 = 0 + signsfile = l + elif f3: + f3 = 0 + modulename = l + elif f6: + f6 = 0 + buildpath = l + elif f8: + f8 = 0 + options["coutput"] = l + elif f9: + f9 = 0 + options["f2py_wrapper_output"] = l + elif f10: + f10 = 0 + options["f2cmap_file"] = l + elif f == 1: + try: + with open(l): + pass + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n') + elif f == -1: + skipfuncs.append(l) + elif f == 0: + onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess('Creating build directory %s\n' % (buildpath)) + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess( + 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile)) + sys.exit() + + options['emptygen'] = emptygen + options['debug'] = debug + options['verbose'] = verbose + if dolc == -1 and not signsfile: + options['do-lower'] = 0 + else: + options['do-lower'] = dolc + if modulename: + options['module'] = modulename + if signsfile: + options['signsfile'] = signsfile + if onlyfuncs: + options['onlyfuncs'] = onlyfuncs + if skipfuncs: + options['skipfuncs'] = skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath'] = buildpath + options['include_paths'] = include_paths + options.setdefault('f2cmap_file', None) + return files, options + + +def callcrackfortran(files, options): + rules.options = options + crackfortran.debug = options['debug'] + crackfortran.verbose = options['verbose'] + if 'module' in options: + crackfortran.f77modulename = options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs = options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs = options['onlyfuncs'] + crackfortran.include_paths[:] = options['include_paths'] + crackfortran.dolowercase = options['do-lower'] + postlist = crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess('Saving signatures to file "%s"\n' % (options['signsfile'])) + pyf = crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:] == 'stdout': + sys.stdout.write(pyf) + else: + with open(options['signsfile'], 'w') as f: + f.write(pyf) + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = "%smodule.c" % mod["name"] + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"] + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + return postlist + + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby = [], [], {} + for item in lst: + if '__user__' in item['name']: + cb_rules.buildcallbacks(item) + else: + if 'use' in item: + for u in item['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(item['name']) + modules.append(item) + mnames.append(item['name']) + ret = {} + for module, name in zip(modules, mnames): + if name in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n' % ( + name, ','.join('"%s"' % s for s in isusedby[name]))) + else: + um = [] + if 'use' in module: + for u in module['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess( + f'\tModule "{name}" uses nonexisting "{u}" ' + 'which will be ignored.\n') + ret[name] = {} + dict_append(ret[name], rules.buildmodule(module, um)) + return ret + + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + + +def run_main(comline_list): + """ + Equivalent to running:: + + f2py + + where ``=string.join(,' ')``, but in Python. Unless + ``-h`` is used, this function returns a dictionary containing + information on generated modules and their dependencies on source + files. + + You cannot build extension modules with this function, that is, + using ``-c`` is not allowed. Use the ``compile`` command instead. + + Examples + -------- + The command ``f2py -m scalar scalar.f`` can be executed from Python as + follows. + + .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat + :language: python + + """ + crackfortran.reset_global_f2py_vars() + f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + # gh-22819 -- begin + parser = make_f2py_compile_parser() + args, comline_list = parser.parse_known_args(comline_list) + pyf_files, _ = filter_files("", "[.]pyf([.]src|)", comline_list) + # Checks that no existing modulename is defined in a pyf file + # TODO: Remove all this when scaninputline is replaced + if args.module_name: + if "-h" in comline_list: + modname = ( + args.module_name + ) # Directly use from args when -h is present + else: + modname = validate_modulename( + pyf_files, args.module_name + ) # Validate modname when -h is not present + comline_list += ['-m', modname] # needed for the rest of scaninputline + # gh-22819 -- end + files, options = scaninputline(comline_list) + auxfuncs.options = options + capi_maps.load_f2cmap_file(options['f2cmap_file']) + postlist = callcrackfortran(files, options) + isusedby = {} + for plist in postlist: + if 'use' in plist: + for u in plist['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(plist['name']) + for plist in postlist: + if plist['block'] == 'python module' and '__user__' in plist['name']: + if plist['name'] in isusedby: + # if not quiet: + outmess( + f'Skipping Makefile build for module "{plist["name"]}" ' + 'which is used by {}\n'.format( + ','.join(f'"{s}"' for s in isusedby[plist['name']]))) + if 'signsfile' in options: + if options['verbose'] > 1: + outmess( + 'Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess('%s %s\n' % + (os.path.basename(sys.argv[0]), options['signsfile'])) + return + for plist in postlist: + if plist['block'] != 'python module': + if 'python module' not in options: + errmess( + 'Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s' % ( + repr(plist['block']))) + auxfuncs.debugoptions = options['debug'] + f90mod_rules.options = options + auxfuncs.wrapfuncs = options['wrapfuncs'] + + ret = buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) + return ret + + +def filter_files(prefix, suffix, files, remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix + r'.*' + suffix + r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): + filtered.append(file[ind:]) + else: + rest.append(file) + return filtered, rest + + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + + +class CombineIncludePaths(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + include_paths_set = set(getattr(namespace, 'include_paths', []) or []) + if option_string == "--include_paths": + outmess("Use --include-paths or -I instead of --include_paths which will be removed") + if option_string == "--include-paths" or option_string == "--include_paths": + include_paths_set.update(values.split(':')) + else: + include_paths_set.add(values) + setattr(namespace, 'include_paths', list(include_paths_set)) + +def include_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + return parser + +def get_includes(iline): + iline = (' '.join(iline)).split() + parser = include_parser() + args, remain = parser.parse_known_args(iline) + ipaths = args.include_paths + if args.include_paths is None: + ipaths = [] + return ipaths, remain + +def make_f2py_compile_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--dep", action="append", dest="dependencies") + parser.add_argument("--backend", choices=['meson', 'distutils'], default='distutils') + parser.add_argument("-m", dest="module_name") + return parser + +def preparse_sysargv(): + # To keep backwards bug compatibility, newer flags are handled by argparse, + # and `sys.argv` is passed to the rest of `f2py` as is. + parser = make_f2py_compile_parser() + + args, remaining_argv = parser.parse_known_args() + sys.argv = [sys.argv[0]] + remaining_argv + + backend_key = args.backend + if MESON_ONLY_VER and backend_key == 'distutils': + outmess("Cannot use distutils backend with Python>=3.12," + " using meson backend instead.\n") + backend_key = "meson" + + return { + "dependencies": args.dependencies or [], + "backend": backend_key, + "modulename": args.module_name, + } + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + # Collect dependency flags, preprocess sys.argv + argy = preparse_sysargv() + modulename = argy["modulename"] + if modulename is None: + modulename = 'untitled' + dependencies = argy["dependencies"] + backend_key = argy["backend"] + build_backend = f2py_build_generator(backend_key) + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: + i = sys.argv.index('--build-dir') + except ValueError: + i = None + if i is not None: + build_dir = sys.argv[i + 1] + del sys.argv[i + 1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'--link-') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile( + r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a == ':': + fl = 0 + if fl or a == ':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1] != ':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile( + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + _reg4 = re.compile( + r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + if MESON_ONLY_VER or backend_key == 'meson': + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) + else: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print('Unknown vendor: "%s"' % (s[len(v):])) + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) + + _reg5 = re.compile(r'--(verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + # Ugly filter to remove everything but sources + sources = sys.argv[1:] + f2cmapopt = '--f2cmap' + if f2cmapopt in sys.argv: + i = sys.argv.index(f2cmapopt) + f2py_flags.extend(sys.argv[i:i + 2]) + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + + pyf_files, _sources = filter_files("", "[.]pyf([.]src|)", sources) + sources = pyf_files + _sources + modulename = validate_modulename(pyf_files, modulename) + extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value) == 1: + name_value.append(None) + if len(name_value) == 2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + # Construct wrappers / signatures / things + if backend_key == 'meson': + if not pyf_files: + outmess('Using meson backend\nWill pass --lower to f2py\nSee https://numpy.org/doc/stable/f2py/buildtools/meson.html\n') + f2py_flags.append('--lower') + run_main(f" {' '.join(f2py_flags)} -m {modulename} {' '.join(sources)}".split()) + else: + run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) + + # Order matters here, includes are needed for run_main above + include_dirs, sources = get_includes(sources) + # Now use the builder + builder = build_backend( + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + {"dependencies": dependencies}, + ) + + builder.compile() + + +def validate_modulename(pyf_files, modulename='untitled'): + if len(pyf_files) > 1: + raise ValueError("Only one .pyf file per call") + if pyf_files: + pyff = pyf_files[0] + pyf_modname = auxfuncs.get_f2py_modulename(pyff) + if modulename != pyf_modname: + outmess( + f"Ignoring -m {modulename}.\n" + f"{pyff} defines {pyf_modname} to be the modulename.\n" + ) + modulename = pyf_modname + return modulename + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + if MESON_ONLY_VER: + outmess("Use --dep for meson builds\n") + else: + from numpy.distutils.system_info import show_all + show_all() + return + + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py new file mode 100644 index 00000000..2f8a8dc1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py @@ -0,0 +1,264 @@ +""" +Build F90 module support for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import numpy as np + +from . import capi_maps +from . import func2subr +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +options = {} + + +def findf90modules(m): + if ismodule(m): + return [m] + if not hasbody(m): + return [] + ret = [] + for b in m['body']: + if ismodule(b): + ret.append(b) + else: + ret = ret + findf90modules(b) + return ret + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2 = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + from . import rules + ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], + 'need': ['F_FUNC', 'arrayobject.h'], + 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, + 'docs': ['"Fortran 90/95 modules:\\n"'], + 'latexdoc': []} + fhooks = [''] + + def fadd(line, s=fhooks): + s[0] = '%s\n %s' % (s[0], line) + doc = [''] + + def dadd(line, s=doc): + s[0] = '%s\n%s' % (s[0], line) + + usenames = getuseblocks(pymod) + for m in findf90modules(pymod): + sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ + m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: + notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess('\t\tConstructing F90 module support for "%s"...\n' % + (m['name'])) + if m['name'] in usenames and not onlyvars: + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") + continue + if onlyvars: + outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) + chooks = [''] + + def cadd(line, s=chooks): + s[0] = '%s\n%s' % (s[0], line) + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = '%s\n%s' % (s[0], line) + + vrd = capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: + dms = '-1' + use_fgetdims2 = fgetdims2 + cadd('\t{"%s",%s,{{%s}},%s, %s},' % + (undo_rmbadname1(n), dm['rank'], dms, at, + capi_maps.get_elsize(var))) + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd('--- %s' % (note)) + if isallocatable(var): + fargs.append('f2py_%s_getdims_%s' % (m['name'], n)) + efargs.append(fargs[-1]) + sargs.append( + 'void (*%s)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)' % (n)) + sargsp.append('void (*)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n)) + fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1])) + fadd('use %s, only: d => %s\n' % + (m['name'], undo_rmbadname1(n))) + fadd('integer flag\n') + fhooks[0] = fhooks[0] + fgetdims1 + dms = range(1, int(dm['rank']) + 1) + fadd(' allocate(d(%s))\n' % + (','.join(['s(%s)' % i for i in dms]))) + fhooks[0] = fhooks[0] + use_fgetdims2 + fadd('end subroutine %s' % (fargs[-1])) + else: + fargs.append(n) + sargs.append('char *%s' % (n)) + sargsp.append('char*') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n)) + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + outmess("f90mod_rules.buildhooks:" + f" skipping {b['block']} {b['name']}\n") + continue + modobjs.append('%s()' % (b['name'])) + b['modulename'] = m['name'] + api, wrap = rules.buildapi(b) + if isfunction(b): + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + else: + if wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append('f2pywrap_%s_%s' % (m['name'], b['name'])) + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + api['externroutines'] = [] + ar = applyrules(api, vrd) + ar['docs'] = [] + ar['docshort'] = [] + ret = dictappend(ret, ar) + cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)' + 'f2py_rout_#modulename#_%s_%s,' + 'doc_f2py_rout_#modulename#_%s_%s},') + % (b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append('char *%s' % (b['name'])) + sargsp.append('char *') + iadd('\tf2py_%s_def[i_f2py++].data = %s;' % + (m['name'], b['name'])) + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( + m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' + % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {' % (m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks + ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + fadd('') + fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name'])) + if mfargs: + for a in undo_rmbadname(mfargs): + fadd('use %s, only : %s' % (m['name'], a)) + if ifargs: + fadd(' '.join(['interface'] + ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd('external %s' % (a)) + fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs)))) + fadd('end subroutine f2pyinit%s\n' % (m['name'])) + + dadd('\n'.join(ret['latexdoc']).replace( + r'\subsection{', r'\subsubsection{')) + + ret['latexdoc'] = [] + ret['docs'].append('"\t%s --- %s"' % (m['name'], + ','.join(undo_rmbadname(modobjs)))) + + ret['routine_defs'] = '' + ret['doc'] = [] + ret['docshort'] = [] + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fhooks[0] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/func2subr.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/func2subr.py new file mode 100644 index 00000000..b9aa9fc0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/func2subr.py @@ -0,0 +1,323 @@ +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy + +from .auxfuncs import ( + getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, + isintent_out, islogicalfunction, ismoduleroutine, isscalar, + issubroutine, issubroutine_wrap, outmess, show +) + +from ._isocbind import isoc_kindmap + +def var2fixfortran(vars, a, fa=None, f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess('var2fixfortran: No definition for argument "%s".\n' % a) + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess('var2fixfortran: No typespec for argument "%s".\n' % a) + return '' + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = '%s(%s)' % (vardef, vars[a]['typename']) + selector = {} + lk = '' + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef = '%s(len=*)' % (vardef) + else: + vardef = '%s(%s=%s)' % (vardef, lk, selector['*']) + else: + if selector['*'] in ['*', ':']: + vardef = '%s*(%s)' % (vardef, selector['*']) + else: + vardef = '%s*%s' % (vardef, selector['*']) + else: + if 'len' in selector: + vardef = '%s(len=%s' % (vardef, selector['len']) + if 'kind' in selector: + vardef = '%s,kind=%s)' % (vardef, selector['kind']) + else: + vardef = '%s)' % (vardef) + elif 'kind' in selector: + vardef = '%s(kind=%s)' % (vardef, selector['kind']) + + vardef = '%s %s' % (vardef, fa) + if 'dimension' in vars[a]: + vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension'])) + return vardef + +def useiso_c_binding(rout): + useisoc = False + for key, value in rout['vars'].items(): + kind_value = value.get('kindselector', {}).get('kind') + if kind_value in isoc_kindmap: + return True + return useisoc + +def createfuncwrapper(rout, signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = '%sf2pywrap' % (name) + + if newname not in vars: + vars[newname] = vars[name] + args = [newname] + rout['args'][1:] + else: + args = [newname] + rout['args'] + + l_tmpl = var2fixfortran(vars, name, '@@@NAME@@@', f90mode) + if l_tmpl[:13] == 'character*(*)': + if f90mode: + l_tmpl = 'character(len=10)' + l_tmpl[13:] + else: + l_tmpl = 'character*10' + l_tmpl[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '') == '(*)': + charselect['*'] = '10' + + l1 = l_tmpl.replace('@@@NAME@@@', newname) + rl = None + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + # gh-23598 fix warning + # Essentially, this gets called again with modules where the name of the + # function is added to the arguments, which is not required, and removed + sargs = sargs.replace(f"{name}, ", '') + args = [arg for arg in args if arg != name] + rout['args'] = args + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + if useisoc: + add('use iso_c_binding') + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if useisoc: + add('use iso_c_binding') + if not need_interface: + add('external %s' % (fortranname)) + rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l1) + if rl is not None: + add(rl) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs)) + else: + add('%s = %s(%s)' % (newname, fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def createsubrwrapper(rout, signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = 'f2py_%s_d%s' % (a, i) + dv = dict(typespec='integer', intent=['hide']) + dv['='] = 'shape(%s, %s)' % (a, i) + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = '%s\n %s' % (ret[0], line) + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + add('subroutine f2pywrap_%s_%s (%s)' % + (rout['modulename'], name, sargs)) + if useisoc: + add('use iso_c_binding') + if not signature: + add('use %s, only : %s' % (rout['modulename'], fortranname)) + else: + add('subroutine f2pywrap%s (%s)' % (name, sargs)) + if useisoc: + add('use iso_c_binding') + if not need_interface: + add('external %s' % (fortranname)) + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add('external %s' % (a)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add('call %s(%s)' % (fortranname, sargs)) + if f90mode: + add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name)) + else: + add('end') + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname] = rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent'] = [] + fvar['intent'].append('out') + flag = 1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append('out=%s' % (rname)) + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' + % (name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/rules.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/rules.py new file mode 100755 index 00000000..009365e0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/rules.py @@ -0,0 +1,1568 @@ +#!/usr/bin/env python3 +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (successful) { + + put_a_to_python + if (successful) { + + put_b_to_python + if (successful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import os, sys +import time +import copy +from pathlib import Path + +# __version__.version is now the same as the NumPy version +from . import __version__ + +from .auxfuncs import ( + applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, + hascallstatement, hasexternals, hasinitvalue, hasnote, + hasresultnote, isarray, isarrayofstrings, ischaracter, + ischaracterarray, ischaracter_or_characterarray, iscomplex, + iscomplexarray, iscomplexfunction, iscomplexfunction_warn, + isdummyroutine, isexternal, isfunction, isfunction_wrap, isint1, + isint1array, isintent_aux, isintent_c, isintent_callback, + isintent_copy, isintent_hide, isintent_inout, isintent_nothide, + isintent_out, isintent_overwrite, islogical, islong_complex, + islong_double, islong_doublefunction, islong_long, + islong_longfunction, ismoduleroutine, isoptional, isrequired, + isscalar, issigned_long_longarray, isstring, isstringarray, + isstringfunction, issubroutine, isattr_value, + issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char, + isunsigned_chararray, isunsigned_long_long, + isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, + l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper +) + +from . import capi_maps +from . import cfuncs +from . import common_rules +from . import use_rules +from . import f90mod_rules +from . import func2subr + +f2py_version = __version__.version +numpy_version = __version__.version + +options = {} +sepdict = {} +# for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k] = '\n' + +#################### Rules for C/API module ################# + +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) +module_rules = { + 'modulebody': """\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ + * Do not edit this file directly unless you know what you are doing!!! + */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ + +/* Unconditionally included */ +#include +#include + +""" + gentitle("See f2py2e/cfuncs.py: includes") + """ +#includes# +#includes0# + +""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ +#typedefs# + +""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ +#typedefs_generated# + +""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ +#cppmacros# + +""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ +#cfuncs# + +""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ +#userincludes# + +""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ +#usercode1# + +""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ +#callbacks# + +""" + gentitle("See f2py2e/rules.py: buildapi") + """ +#body# + +""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ +#f90modhooks# + +""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ + +""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ +#commonhooks# + +""" + gentitle("See f2py2e/rules.py") + """ + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# + {NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "#modulename#", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_#modulename#(void) { + int i; + PyObject *m,*d, *s, *tmp; + m = #modulename#_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} + d = PyModule_GetDict(m); + s = PyUnicode_FromString(\"#f2py_version#\"); + PyDict_SetItemString(d, \"__version__\", s); + Py_DECREF(s); + s = PyUnicode_FromString( + \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); + PyDict_SetItemString(d, \"__doc__\", s); + Py_DECREF(s); + s = PyUnicode_FromString(\"""" + numpy_version + """\"); + PyDict_SetItemString(d, \"__f2py_numpy_version__\", s); + Py_DECREF(s); + #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); + /* + * Store the error object inside the dict, so that it could get deallocated. + * (in practice, this is a module, so it likely will not and cannot.) + */ + PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); + Py_DECREF(#modulename#_error); + for(i=0;f2py_routine_defs[i].name!=NULL;i++) { + tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); + PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); + Py_DECREF(tmp); + } +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#ifdef F2PY_REPORT_ATEXIT + if (! PyErr_Occurred()) + on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif + return m; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor': {'latexdoc': '\n\n', + 'restdoc': '\n\n'}, + 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc': ['Module #modulename#\n' + '=' * 80, + '\n#restdoc#'] +} + +defmod_rules = [ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, + } +] + +routine_rules = { + 'separatorsfor': sepdict, + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { + PyObject * volatile capi_buildvalue = NULL; + volatile int f2py_success = 1; +#decl# + static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif + if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ + \"#argformat#|#keyformat##xaformat#:#pyname#\",\\ + capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ + if (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ + CFUNCSMESS(\"Building return value.\\n\"); + capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# + } /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# + if (capi_buildvalue == NULL) { +#routdebugfailure# + } else { +#routdebugleave# + } + CFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif + return capi_buildvalue; +} +#endtitle# +""", + 'routine_defs': '#routine_def#', + 'initf2pywraphooks': '#initf2pywraphook#', + 'externroutines': '#declfortranroutine#', + 'doc': '#docreturn##name#(#docsignature#)', + 'docshort': '#docreturn##name#(#docsignatureshort#)', + 'docs': '" #docreturn##name#(#docsignature#)\\n"\n', + 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, + 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, + + ] +} + +################## Rules for C/API function ############## + +rout_rules = [ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + # this list will be reversed + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], + 'pyobjfrom': '/*pyobjfrom*/', + # this list will be reversed + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { + 'apiname': 'f2py_rout_#modulename#_#name#', + 'pyname': '#modulename#.#name#', + 'decl': '', + '_check': l_not(ismoduleroutine) + }, { + 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname': '#modulename#.#f90modulename#.#name#', + 'decl': '', + '_check': ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine: '', + isdummyroutine: '' + }, + 'routine_def': { + l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; + /*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: """ }"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype': '#ctype#', + 'docreturn': {l_not(isintent_hide): '#rname#,'}, + 'docstrout': '#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote: '--- #resultnote#'}], + 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ +#ifdef USESCOMPAQFORTRAN + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)): """\ + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check': l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine: '' + }, + 'routine_def': { + l_and(l_not(l_or(ismoduleroutine, isintent_c)), + l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + '(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'}, + {iscomplexfunction: + ' PyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine': [ + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; +/* #name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' #name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {l_and(debugcapi, iscomplexfunction) + : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need': [{l_not(isdummyroutine): 'F_FUNC'}, + {iscomplexfunction: 'pyobj_from_#ctype#1'}, + {islong_longfunction: 'long_long'}, + {islong_doublefunction: 'long_double'}], + 'returnformat': {l_not(isintent_hide): '#rformat#'}, + 'return': {iscomplexfunction: ',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, + '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl': [' #ctype# #name#_return_value = NULL;', + ' int #name#_return_value_len = 0;'], + 'callfortran':'#name#_return_value,#name#_return_value_len,', + 'callfortranroutine':[' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(' + + '#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN + (*f2py_func)(#callcompaqfortran#); +#else + (*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {debugcapi: + ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + ' } /* if (f2py_success) after (string)malloc */', + ], + 'returnformat': '#rformat#', + 'return': ',#name#_return_value', + 'freemem': ' STRINGFREE(#name#_return_value);', + 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check': debugcapi + } +] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + isint1: 'signed_char', + ischaracter_or_characterarray: 'character', + } + +aux_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing auxiliary variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'need': {hasinitvalue: 'math.h'}, + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': iscomplex + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ], + 'need':['len..'], + '_check':isstring + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ], + 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': l_or(isunsigned_chararray, isunsigned_char), + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +arg_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, + # Doc signatures + { + 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, + 'docstrout': {isintent_out: '#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'depend': '' + }, + # Required/Optional arguments + { + 'kwlist': '"#varname#",', + 'docsign': '#varname#,', + '_check': l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt': '"#varname#",', + 'docsignopt': '#varname#=#showinit#,', + 'docsignoptshort': '#varname#,', + '_check': l_and(isintent_nothide, isoptional) + }, + # Docstring/BuildValue + { + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': isintent_out + }, + # Externals (call-back functions) + { # Common + 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, + 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, + 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs': '#cbdocstr#', + 'latexdocstrcbs': '\\item[] #cblatexdocstr#', + 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };', + ' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;', + ' PyTupleObject *#varname#_xa_capi = NULL;', + {l_not(isintent_callback): + ' #cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'xaformat': {isintent_nothide: 'O!'}, + 'args_capi': {isrequired: ',&#varname#_cb.capi'}, + 'keys_capi': {isoptional: ',&#varname#_cb.capi'}, + 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', + 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, + 'need': ['#cbname#', 'setjmp.h'], + '_check':isexternal + }, + { + 'frompyobj': [{l_not(isintent_callback): """\ +if(F2PyCapsule_Check(#varname#_cb.capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback: """\ +if (#varname#_cb.capi==Py_None) { + #varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_cb.capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) { + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + } + else { + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + } + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_cb.capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, + """\ + if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) { +""", + {debugcapi: ["""\ + fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", + {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ + CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""", + ], + 'cleanupfrompyobj': + """\ + CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr); + Py_DECREF(#varname#_cb.args_capi); + }""", + 'need': ['SWAP', 'create_cb_arglist'], + '_check':isexternal, + '_depend':'' + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran': {l_or(isintent_c, isattr_value): '#varname#,', l_not(l_or(isintent_c, isattr_value)): '&#varname#,'}, + 'return': {isintent_out: ',#varname#'}, + '_check': l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue: 'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isscalar, l_not(iscomplex), l_not(isstring), + isintent_nothide) + }, { + 'frompyobj': [ + # hasinitvalue... + # if pyobj is None: + # varname = init + # else + # from_pyobj(varname) + # + # isoptional and noinitvalue... + # if pyobj is not None: + # from_pyobj(varname) + # else: + # varname is uninitialized + # + # ... + # from_pyobj(varname) + # + {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend': ''}, + {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)', + '_depend': ''}, + {l_not(islogical): '''\ + f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); + if (f2py_success) {'''}, + {islogical: '''\ + #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); + f2py_success = 1; + if (f2py_success) {'''}, + ], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/', + 'need': {l_not(islogical): '#ctype#_from_pyobj'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend': '' + }, { # Hidden + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + 'need': typedef_need_dict, + '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend': '' + }, { # Common + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check': l_and(isscalar, l_not(iscomplex)), + '_depend': '' + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return': {isintent_out: ',#varname#_capi'}, + '_check': iscomplex + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check': l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)) + : ' if (#varname#_capi != Py_None)'}, + ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {'], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', + 'need': ['#ctype#_from_pyobj'], + '_check': l_and(iscomplex, isintent_nothide), + '_depend': '' + }, { # Hidden + 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'}, + '_check': l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': l_and(iscomplex, isintent_hide), + '_depend': '' + }, { # Common + 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need': ['pyobj_from_#ctype#1'], + '_check': iscomplex + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check': iscomplex, + '_depend': '' + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ' PyObject *#varname#_capi = Py_None;'], + 'callfortran':'#varname#,', + 'callfortranappend':'slen(#varname#),', + 'pyobjfrom':[ + {debugcapi: + ' fprintf(stderr,' + '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + # The trailing null value for Fortran is blank. + {l_and(isintent_out, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + ], + 'return': {isintent_out: ',#varname#'}, + 'need': ['len..', + {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], + '_check': isstring + }, { # Common + 'frompyobj': [ + """\ + slen(#varname#) = #elsize#; + f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" +"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" +"""`#varname#\' of #pyname# to C #ctype#\"); + if (f2py_success) {""", + # The trailing null value for Fortran is blank. + {l_not(isintent_c): + " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, + ], + 'cleanupfrompyobj': """\ + STRINGFREE(#varname#); + } /*if (f2py_success) of #varname#*/""", + 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', + {l_not(isintent_c): 'STRINGPADN'}], + '_check':isstring, + '_depend':'' + }, { # Not hidden + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': [ + {l_and(isintent_inout, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + {isintent_inout: '''\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#, + slen(#varname#)); + if (f2py_success) {'''}], + 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#', + l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'}, + '_check': l_and(isstring, isintent_nothide) + }, { # Hidden + '_check': l_and(isstring, isintent_hide) + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check': isstring, + '_depend': '' + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ' PyArrayObject *capi_#varname#_as_array = NULL;', + ' int capi_#varname#_intent = 0;', + {isstringarray: ' int slen(#varname#) = 0;'}, + ], + 'callfortran':'#varname#,', + 'callfortranappend': {isstringarray: 'slen(#varname#),'}, + 'return': {isintent_out: ',capi_#varname#_as_array'}, + 'need': 'len..', + '_check': isarray + }, { # intent(overwrite) array + 'decl': ' int capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': ' int capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray, + '_depend': '' + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + '_check': l_and(isarray, isintent_nothide) + }, { + 'frompyobj': [ + ' #setdims#;', + ' capi_#varname#_intent |= #intent#;', + (' const char * capi_errmess = "#modulename#.#pyname#:' + ' failed to create array from the #nth# `#varname#`";'), + {isintent_hide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,Py_None,capi_errmess);'}, + {isintent_nothide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,#varname#_capi,capi_errmess);'}, + """\ + if (capi_#varname#_as_array == NULL) { + PyObject* capi_err = PyErr_Occurred(); + if (capi_err == NULL) { + capi_err = #modulename#_error; + PyErr_SetString(capi_err, capi_errmess); + } + } else { + #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_as_array)); +""", + {isstringarray: + ' slen(#varname#) = f2py_itemsize(#varname#);'}, + {hasinitvalue: [ + {isintent_nothide: + ' if (#varname#_capi == Py_None) {'}, + {isintent_hide: ' {'}, + {iscomplexarray: ' #ctype# capi_c;'}, + """\ + int *_i,capi_i=0; + CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); + if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + PyArray_NDIM(capi_#varname#_as_array),1)) { + while ((_i = nextforcomb())) + #varname#[capi_i++] = #init#; /* fortran way */ + } else { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + PyErr_SetString(exc ? exc : #modulename#_error, + \"Initialization of #nth# #varname# failed (initforcomb).\"); + npy_PyErr_ChainExceptionsCause(exc, val, tb); + f2py_success = 0; + } + } + if (f2py_success) {"""]}, + ], + 'cleanupfrompyobj': [ # note that this list will be reversed + ' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + {l_not(l_or(isintent_out, isintent_hide)): """\ + if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { + Py_XDECREF(capi_#varname#_as_array); }"""}, + {l_and(isintent_hide, l_not(isintent_out)) + : """ Py_XDECREF(capi_#varname#_as_array);"""}, + {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'}, + ], + '_check': isarray, + '_depend': '' + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Character + { + 'need': 'string', + '_check': ischaracter, + }, + # Character array + { + 'need': 'string', + '_check': ischaracterarray, + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +################# Rules for checking ############### + +check_rules = [ + { + 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need': 'len..' + }, { + 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/', + 'need': 'CHECKSCALAR', + '_check': l_and(isscalar, l_not(iscomplex)), + '_break': '' + }, { + 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/', + 'need': 'CHECKSTRING', + '_check': isstring, + '_break': '' + }, { + 'need': 'CHECKARRAY', + 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/', + '_check': isarray, + '_break': '' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + + +def buildmodule(m, um): + """ + Return + """ + outmess(' Building module "%s"...\n' % (m['name'])) + ret = {} + mod_rules = defmod_rules[:] + vrd = capi_maps.modsign2map(m) + rd = dictappend({'f2py_version': f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb = None + for bi in m['body']: + if bi['block'] not in ['interface', 'abstract interface']: + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name'] == n: + nb = b + break + + if not nb: + print( + 'buildmodule: Could not find the body of interfaced routine "%s". Skipping.\n' % (n), file=sys.stderr) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) + # options is in scope here + if options['emptygen']: + b_path = options['buildpath'] + m_name = vrd['modulename'] + outmess(' Generating possibly empty wrappers"\n') + Path(f"{b_path}/{vrd['coutput']}").touch() + if isf90: + # f77 + f90 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers2.f90"\n') + Path(f'{b_path}/{m_name}-f2pywrappers2.f90').touch() + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + else: + # only f77 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + api, wrap = buildapi(nb) + if wrap: + if isf90: + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar = applyrules(api, vrd) + rd = dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar = applyrules(cr, vrd) + rd = dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar = applyrules(mr, vrd) + rd = dictappend(rd, ar) + + for u in um: + ar = use_rules.buildusevars(u, m['use'][u['name']]) + rd = dictappend(rd, ar) + + needs = cfuncs.get_needs() + # Add mapped definitions + needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # + if cvar in typedef_need_dict.values()] + code = {} + for n in needs.keys(): + code[n] = [] + for k in needs[n]: + c = '' + if k in cfuncs.includes0: + c = cfuncs.includes0[k] + elif k in cfuncs.includes: + c = cfuncs.includes[k] + elif k in cfuncs.userincludes: + c = cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c = cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c = cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c = cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c = cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c = cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c = cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c = cfuncs.commonhooks[k] + else: + errmess('buildmodule: unknown need %s.\n' % (repr(k))) + continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar = applyrules(r, vrd, m) + rd = dictappend(rd, ar) + ar = applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + with open(fn, 'w') as f: + f.write(ar['modulebody'].replace('\t', 2 * ' ')) + outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn)) + + if options['dorestdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.rest') + with open(fn, 'w') as f: + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % + (options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.tex') + ret['ltx'] = fn + with open(fn, 'w') as f: + f.write( + '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version)) + if 'shortlatex' not in options: + f.write( + '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % + (options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('C -*- fortran -*-\n') + f.write( + 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + 'C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): + if 0 <= l.find('!') < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': + while len(l) >= 66: + lines.append(l[:66] + '\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn)) + if funcwrappers2: + wn = os.path.join( + options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename'])) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('! -*- f90 -*-\n') + f.write( + '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version)) + f.write( + '! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): + if 0 <= l.find('!') < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': + lines.append(l[:72] + '&\n &') + l = l[72:] + while len(l) > 66: + lines.append(l[:66] + '&\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn)) + return ret + +################## Build C/API function ############# + +stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', + 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} + + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs = getargs2(rout) + capi_maps.depargs = depargs + var = rout['vars'] + + if ismoduleroutine(rout): + outmess(' Constructing wrapper function "%s.%s"...\n' % + (rout['modulename'], rout['name'])) + else: + outmess(' Constructing wrapper function "%s"...\n' % (rout['name'])) + # Routine + vrd = capi_maps.routsign2map(rout) + rd = dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + + # Args + nth, nthk = 0, 0 + savevrd = {} + for a in args: + vrd = capi_maps.sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth = nth + 1 + vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' + else: + nthk = nthk + 1 + vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' + else: + vrd['nth'] = 'hidden' + savevrd[a] = vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd = savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check'] = c + ar = applyrules(check_rules, vrd, var[a]) + rd = dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign': rd['docsign'], + 'docsignopt': rd['docsignopt'], + 'docsignxa': rd['docsignxa']})) + optargs = stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa': rd['docsignxashort'], + 'docsignopt': rd['docsignoptshort']} + )) + if optargs == '': + rd['docsignatureshort'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort'] = rd[ + 'latexdocsignatureshort'].replace(',', ', ') + cfs = stripcomma(replace('#callfortran##callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + if len(rd['callfortranappend']) > 1: + rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + else: + rd['callcompaqfortran'] = cfs + rd['callfortran'] = cfs + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + + ar = applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess(' %s\n' % (ar['docshort'])) + else: + outmess(' %s\n' % (ar['docshort'])) + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/setup.cfg b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/setup.cfg new file mode 100644 index 00000000..14669544 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/setup.cfg @@ -0,0 +1,3 @@ +[bdist_rpm] +doc_files = docs/ + tests/ \ No newline at end of file diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/setup.py new file mode 100644 index 00000000..05bef300 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/setup.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +""" +setup.py for installing F2PY + +Usage: + pip install . + +Copyright 2001-2005 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +$Revision: 1.32 $ +$Date: 2005/01/30 17:22:14 $ +Pearu Peterson + +""" +from numpy.distutils.core import setup +from numpy.distutils.misc_util import Configuration + + +from __version__ import version + + +def configuration(parent_package='', top_path=None): + config = Configuration('f2py', parent_package, top_path) + config.add_subpackage('tests') + config.add_subpackage('_backends') + config.add_data_dir('tests/src') + config.add_data_files( + 'src/fortranobject.c', + 'src/fortranobject.h', + '_backends/meson.build.template', + ) + config.add_data_files('*.pyi') + return config + + +if __name__ == "__main__": + + config = configuration(top_path='') + config = config.todict() + + config['classifiers'] = [ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: NumPy License', + 'Natural Language :: English', + 'Operating System :: OS Independent', + 'Programming Language :: C', + 'Programming Language :: Fortran', + 'Programming Language :: Python', + 'Topic :: Scientific/Engineering', + 'Topic :: Software Development :: Code Generators', + ] + setup(version=version, + description="F2PY - Fortran to Python Interface Generator", + author="Pearu Peterson", + author_email="pearu@cens.ioc.ee", + maintainer="Pearu Peterson", + maintainer_email="pearu@cens.ioc.ee", + license="BSD", + platforms="Unix, Windows (mingw|cygwin), Mac OSX", + long_description="""\ +The Fortran to Python Interface Generator, or F2PY for short, is a +command line tool (f2py) for generating Python C/API modules for +wrapping Fortran 77/90/95 subroutines, accessing common blocks from +Python, and calling Python functions from Fortran (call-backs). +Interfacing subroutines/data from Fortran 90/95 modules is supported.""", + url="https://numpy.org/doc/stable/f2py/", + keywords=['Fortran', 'f2py'], + **config) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c new file mode 100644 index 00000000..072392bb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c @@ -0,0 +1,1423 @@ +#define FORTRANOBJECT_C +#include "fortranobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + This file implements: FortranObject, array_from_pyobj, copy_ND_array + + Author: Pearu Peterson + $Revision: 1.52 $ + $Date: 2005/07/11 07:44:20 $ +*/ + +int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) +{ + if (obj == NULL) { + fprintf(stderr, "Error loading %s\n", name); + if (PyErr_Occurred()) { + PyErr_Print(); + PyErr_Clear(); + } + return -1; + } + return PyDict_SetItemString(dict, name, obj); +} + +/* + * Python-only fallback for thread-local callback pointers + */ +void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict " + "failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + value = PyLong_FromVoidPtr((void *)ptr); + if (value == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); + } + + if (PyDict_SetItemString(local_dict, key, value) != 0) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); + } + + Py_DECREF(value); + + return prev; +} + +void * +F2PyGetThreadLocalCallbackPtr(char *key) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + return prev; +} + +static PyArray_Descr * +get_descr_from_type_and_elsize(const int type_num, const int elsize) { + PyArray_Descr * descr = PyArray_DescrFromType(type_num); + if (type_num == NPY_STRING) { + // PyArray_DescrFromType returns descr with elsize = 0. + PyArray_DESCR_REPLACE(descr); + if (descr == NULL) { + return NULL; + } + descr->elsize = elsize; + } + return descr; +} + +/************************* FortranObject *******************************/ + +typedef PyObject *(*fortranfunc)(PyObject *, PyObject *, PyObject *, void *); + +PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init) +{ + int i; + PyFortranObject *fp = NULL; + PyObject *v = NULL; + if (init != NULL) { /* Initialize F90 module objects */ + (*(init))(); + } + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) { + return NULL; + } + if ((fp->dict = PyDict_New()) == NULL) { + Py_DECREF(fp); + return NULL; + } + fp->len = 0; + while (defs[fp->len].name != NULL) { + fp->len++; + } + if (fp->len == 0) { + goto fail; + } + fp->defs = defs; + for (i = 0; i < fp->len; i++) { + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + v = PyFortranObject_NewAsAttr(&(fp->defs[i])); + if (v == NULL) { + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + else if ((fp->defs[i].data) != + NULL) { /* Is Fortran variable or array (not allocatable) */ + PyArray_Descr * + descr = get_descr_from_type_and_elsize(fp->defs[i].type, + fp->defs[i].elsize); + if (descr == NULL) { + goto fail; + } + v = PyArray_NewFromDescr(&PyArray_Type, descr, fp->defs[i].rank, + fp->defs[i].dims.d, NULL, fp->defs[i].data, + NPY_ARRAY_FARRAY, NULL); + if (v == NULL) { + Py_DECREF(descr); + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + } + return (PyObject *)fp; +fail: + Py_XDECREF(fp); + return NULL; +} + +PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs) +{ /* used for calling F90 module routines */ + PyFortranObject *fp = NULL; + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) + return NULL; + if ((fp->dict = PyDict_New()) == NULL) { + PyObject_Del(fp); + return NULL; + } + fp->len = 1; + fp->defs = defs; + if (defs->rank == -1) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + } else if (defs->rank == 0) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + } else { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + } + return (PyObject *)fp; +} + +/* Fortran methods */ + +static void +fortran_dealloc(PyFortranObject *fp) +{ + Py_XDECREF(fp->dict); + PyObject_Del(fp); +} + +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i; + npy_intp n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } + + if (size <= 0) { + return -1; + } + + *p++ = ')'; + size--; + + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if ((size_t)size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + p += sizeof(notalloc); + size -= sizeof(notalloc); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; + PyObject *s = NULL; + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { + size += strlen(def.doc); + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { + goto fail; + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + } + } + else { + PyArray_Descr *d = PyArray_DescrFromType(def.type); + n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type); + Py_DECREF(d); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; + } + } + if (size <= 1) { + goto fail; + } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ + s = PyUnicode_FromStringAndSize(buf, p - buf); + + PyMem_Free(buf); + return s; + +fail: + fprintf(stderr, + "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; +} + +static FortranDataDef *save_def; /* save pointer of an allocatable array */ +static void +set_data(char *d, npy_intp *f) +{ /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ + save_def->data = d; + else + save_def->data = NULL; + /* printf("set_data: d=%p,f=%d\n",d,*f); */ +} + +static PyObject * +fortran_getattr(PyFortranObject *fp, char *name) +{ + int i, j, k, flag; + if (fp->dict != NULL) { + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + if (v == NULL && PyErr_Occurred()) { + return NULL; + } + else if (v != NULL) { + Py_INCREF(v); + return v; + } + } + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) + if (fp->defs[i].rank != -1) { /* F90 allocatable array */ + if (fp->defs[i].func == NULL) + return NULL; + for (k = 0; k < fp->defs[i].rank; ++k) fp->defs[i].dims.d[k] = -1; + save_def = &fp->defs[i]; + (*(fp->defs[i].func))(&fp->defs[i].rank, fp->defs[i].dims.d, + set_data, &flag); + if (flag == 2) + k = fp->defs[i].rank + 1; + else + k = fp->defs[i].rank; + if (fp->defs[i].data != NULL) { /* array is allocated */ + PyObject *v = PyArray_New( + &PyArray_Type, k, fp->defs[i].dims.d, fp->defs[i].type, + NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); + if (v == NULL) + return NULL; + /* Py_INCREF(v); */ + return v; + } + else { /* array is not allocated */ + Py_RETURN_NONE; + } + } + if (strcmp(name, "__dict__") == 0) { + Py_INCREF(fp->dict); + return fp->dict; + } + if (strcmp(name, "__doc__") == 0) { + PyObject *s = PyUnicode_FromString(""), *s2, *s3; + for (i = 0; i < fp->len; i++) { + s2 = fortran_doc(fp->defs[i]); + s3 = PyUnicode_Concat(s, s2); + Py_DECREF(s2); + Py_DECREF(s); + s = s3; + } + if (PyDict_SetItemString(fp->dict, name, s)) + return NULL; + return s; + } + if ((strcmp(name, "_cpointer") == 0) && (fp->len == 1)) { + PyObject *cobj = + F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data), NULL); + if (PyDict_SetItemString(fp->dict, name, cobj)) + return NULL; + return cobj; + } + PyObject *str, *ret; + str = PyUnicode_FromString(name); + ret = PyObject_GenericGetAttr((PyObject *)fp, str); + Py_DECREF(str); + return ret; +} + +static int +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) +{ + int i, j, flag; + PyArrayObject *arr = NULL; + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) { + if (fp->defs[i].rank == -1) { + PyErr_SetString(PyExc_AttributeError, + "over-writing fortran routine"); + return -1; + } + if (fp->defs[i].func != NULL) { /* is allocatable array */ + npy_intp dims[F2PY_MAX_DIMS]; + int k; + save_def = &fp->defs[i]; + if (v != Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + if ((arr = array_from_pyobj(fp->defs[i].type, dims, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + (*(fp->defs[i].func))(&fp->defs[i].rank, PyArray_DIMS(arr), + set_data, &flag); + } + else { /* deallocate */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = 0; + (*(fp->defs[i].func))(&fp->defs[i].rank, dims, set_data, + &flag); + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + } + memcpy(fp->defs[i].dims.d, dims, + fp->defs[i].rank * sizeof(npy_intp)); + } + else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type, fp->defs[i].dims.d, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + } + if (fp->defs[i].data != + NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d, + PyArray_NDIM(arr)); + if (s == -1) + s = PyArray_MultiplyList(PyArray_DIMS(arr), PyArray_NDIM(arr)); + if (s < 0 || (memcpy(fp->defs[i].data, PyArray_DATA(arr), + s * PyArray_ITEMSIZE(arr))) == NULL) { + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + return -1; + } + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + } + else + return (fp->defs[i].func == NULL ? -1 : 0); + return 0; /* successful */ + } + if (fp->dict == NULL) { + fp->dict = PyDict_New(); + if (fp->dict == NULL) + return -1; + } + if (v == NULL) { + int rv = PyDict_DelItemString(fp->dict, name); + if (rv < 0) + PyErr_SetString(PyExc_AttributeError, + "delete non-existing fortran attribute"); + return rv; + } + else + return PyDict_SetItemString(fp->dict, name, v); +} + +static PyObject * +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) +{ + int i = 0; + /* printf("fortran call + name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, + fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ + if (fp->defs[i].rank == -1) { /* is Fortran routine */ + if (fp->defs[i].func == NULL) { + PyErr_Format(PyExc_RuntimeError, "no function to call"); + return NULL; + } + else if (fp->defs[i].data == NULL) + /* dummy routine */ + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp, arg, + kw, NULL); + else + return (*((fortranfunc)(fp->defs[i].func)))( + (PyObject *)fp, arg, kw, (void *)fp->defs[i].data); + } + PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); + return NULL; +} + +static PyObject * +fortran_repr(PyFortranObject *fp) +{ + PyObject *name = NULL, *repr = NULL; + name = PyObject_GetAttrString((PyObject *)fp, "__name__"); + PyErr_Clear(); + if (name != NULL && PyUnicode_Check(name)) { + repr = PyUnicode_FromFormat("", name); + } + else { + repr = PyUnicode_FromString(""); + } + Py_XDECREF(name); + return repr; +} + +PyTypeObject PyFortran_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", + .tp_basicsize = sizeof(PyFortranObject), + .tp_dealloc = (destructor)fortran_dealloc, + .tp_getattr = (getattrfunc)fortran_getattr, + .tp_setattr = (setattrfunc)fortran_setattr, + .tp_repr = (reprfunc)fortran_repr, + .tp_call = (ternaryfunc)fortran_call, +}; + +/************************* f2py_report_atexit *******************************/ + +#ifdef F2PY_REPORT_ATEXIT +static int passed_time = 0; +static int passed_counter = 0; +static int passed_call_time = 0; +static struct timeb start_time; +static struct timeb stop_time; +static struct timeb start_call_time; +static struct timeb stop_call_time; +static int cb_passed_time = 0; +static int cb_passed_counter = 0; +static int cb_passed_call_time = 0; +static struct timeb cb_start_time; +static struct timeb cb_stop_time; +static struct timeb cb_start_call_time; +static struct timeb cb_stop_call_time; + +extern void +f2py_start_clock(void) +{ + ftime(&start_time); +} +extern void +f2py_start_call_clock(void) +{ + f2py_stop_clock(); + ftime(&start_call_time); +} +extern void +f2py_stop_clock(void) +{ + ftime(&stop_time); + passed_time += 1000 * (stop_time.time - start_time.time); + passed_time += stop_time.millitm - start_time.millitm; +} +extern void +f2py_stop_call_clock(void) +{ + ftime(&stop_call_time); + passed_call_time += 1000 * (stop_call_time.time - start_call_time.time); + passed_call_time += stop_call_time.millitm - start_call_time.millitm; + passed_counter += 1; + f2py_start_clock(); +} + +extern void +f2py_cb_start_clock(void) +{ + ftime(&cb_start_time); +} +extern void +f2py_cb_start_call_clock(void) +{ + f2py_cb_stop_clock(); + ftime(&cb_start_call_time); +} +extern void +f2py_cb_stop_clock(void) +{ + ftime(&cb_stop_time); + cb_passed_time += 1000 * (cb_stop_time.time - cb_start_time.time); + cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; +} +extern void +f2py_cb_stop_call_clock(void) +{ + ftime(&cb_stop_call_time); + cb_passed_call_time += + 1000 * (cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += + cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_counter += 1; + f2py_cb_start_clock(); +} + +static int f2py_report_on_exit_been_here = 0; +extern void +f2py_report_on_exit(int exit_flag, void *name) +{ + if (f2py_report_on_exit_been_here) { + fprintf(stderr, " %s\n", (char *)name); + return; + } + f2py_report_on_exit_been_here = 1; + fprintf(stderr, " /-----------------------\\\n"); + fprintf(stderr, " < F2PY performance report >\n"); + fprintf(stderr, " \\-----------------------/\n"); + fprintf(stderr, "Overall time spent in ...\n"); + fprintf(stderr, "(a) wrapped (Fortran/C) functions : %8d msec\n", + passed_call_time); + fprintf(stderr, "(b) f2py interface, %6d calls : %8d msec\n", + passed_counter, passed_time); + fprintf(stderr, "(c) call-back (Python) functions : %8d msec\n", + cb_passed_call_time); + fprintf(stderr, "(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter, cb_passed_time); + + fprintf(stderr, + "(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time - cb_passed_call_time - cb_passed_time); + fprintf(stderr, + "Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr, "Exit status: %d\n", exit_flag); + fprintf(stderr, "Modules : %s\n", (char *)name); +} +#endif + +/********************** report on array copy ****************************/ + +#ifdef F2PY_REPORT_ON_ARRAY_COPY +static void +f2py_report_on_array_copy(PyArrayObject *arr) +{ + const npy_intp arr_size = PyArray_Size((PyObject *)arr); + if (arr_size > F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr, + "copied an array: size=%ld, elsize=%" NPY_INTP_FMT "\n", + arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); + } +} +static void +f2py_report_on_array_copy_fromany(void) +{ + fprintf(stderr, "created an array from object\n"); +} + +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR \ + f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() +#else +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY +#endif + +/************************* array_from_obj *******************************/ + +/* + * File: array_from_pyobj.c + * + * Description: + * ------------ + * Provides array_from_pyobj function that returns a contiguous array + * object with the given dimensions and required storage order, either + * in row-major (C) or column-major (Fortran) order. The function + * array_from_pyobj is very flexible about its Python object argument + * that can be any number, list, tuple, or array. + * + * array_from_pyobj is used in f2py generated Python extension + * modules. + * + * Author: Pearu Peterson + * Created: 13-16 January 2002 + * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ + */ + +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims, + const char *errmess); + +static int +find_first_negative_dimension(const int rank, const npy_intp *dims) +{ + int i; + for (i = 0; i < rank; ++i) { + if (dims[i] < 0) { + return i; + } + } + return -1; +} + +#ifdef DEBUG_COPY_ND_ARRAY +void +dump_dims(int rank, npy_intp const *dims) +{ + int i; + printf("["); + for (i = 0; i < rank; ++i) { + printf("%3" NPY_INTP_FMT, dims[i]); + } + printf("]\n"); +} +void +dump_attrs(const PyArrayObject *obj) +{ + const PyArrayObject_fields *arr = (const PyArrayObject_fields *)obj; + int rank = PyArray_NDIM(arr); + npy_intp size = PyArray_Size((PyObject *)arr); + printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", rank, + arr->flags, size); + printf("\tstrides = "); + dump_dims(rank, arr->strides); + printf("\tdimensions = "); + dump_dims(rank, arr->dimensions); +} +#endif + +#define SWAPTYPE(a, b, t) \ + { \ + t c; \ + c = (a); \ + (a) = (b); \ + (b) = c; \ + } + +static int +swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) +{ + PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, + *arr2 = (PyArrayObject_fields *)obj2; + SWAPTYPE(arr1->data, arr2->data, char *); + SWAPTYPE(arr1->nd, arr2->nd, int); + SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); + SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); + SWAPTYPE(arr1->base, arr2->base, PyObject *); + SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); + SWAPTYPE(arr1->flags, arr2->flags, int); + /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ + return 0; +} + +#define ARRAY_ISCOMPATIBLE(arr,type_num) \ + ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ + (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ + (PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) || \ + (PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) || \ + (PyArray_ISSTRING(arr) && PyTypeNum_ISSTRING(type_num))) + +static int +get_elsize(PyObject *obj) { + /* + get_elsize determines array itemsize from a Python object. Returns + elsize if successful, -1 otherwise. + + Supported types of the input are: numpy.ndarray, bytes, str, tuple, + list. + */ + + if (PyArray_Check(obj)) { + return PyArray_DESCR((PyArrayObject *)obj)->elsize; + } else if (PyBytes_Check(obj)) { + return PyBytes_GET_SIZE(obj); + } else if (PyUnicode_Check(obj)) { + return PyUnicode_GET_LENGTH(obj); + } else if (PySequence_Check(obj)) { + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + if (fast != NULL) { + Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); + int sz, elsize = 0; + for (i=0; i elsize) { + elsize = sz; + } + } + Py_DECREF(fast); + return elsize; + } + } + return -1; +} + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, + const int elsize_, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj, + const char *errmess) { + /* + * Return an array with given element type and shape from a Python + * object while taking into account the usage intent of the array. + * + * - element type is defined by type_num and elsize + * - shape is defined by dims and rank + * + * ndarray_from_pyobj is used to convert Python object arguments + * to numpy ndarrays with given type and shape that data is passed + * to interfaced Fortran or C functions. + * + * errmess (if not NULL), contains a prefix of an error message + * for an exception to be triggered within this function. + * + * Negative elsize value means that elsize is to be determined + * from the Python object in runtime. + * + * Note on strings + * --------------- + * + * String type (type_num == NPY_STRING) does not have fixed + * element size and, by default, the type object sets it to + * 0. Therefore, for string types, one has to use elsize + * argument. For other types, elsize value is ignored. + * + * NumPy defines the type of a fixed-width string as + * dtype('S'). In addition, there is also dtype('c'), that + * appears as dtype('S1') (these have the same type_num value), + * but is actually different (.char attribute is either 'S' or + * 'c', respecitely). + * + * In Fortran, character arrays and strings are different + * concepts. The relation between Fortran types, NumPy dtypes, + * and type_num-elsize pairs, is defined as follows: + * + * character*5 foo | dtype('S5') | elsize=5, shape=() + * character(5) foo | dtype('S1') | elsize=1, shape=(5) + * character*5 foo(n) | dtype('S5') | elsize=5, shape=(n,) + * character(5) foo(n) | dtype('S1') | elsize=1, shape=(5, n) + * character*(*) foo | dtype('S') | elsize=-1, shape=() + * + * Note about reference counting + * ----------------------------- + * + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). Otherwise, if obj!=arr then the caller + * must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * ---------------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyArrayObject *arr = NULL; + int elsize = (elsize_ < 0 ? get_elsize(obj) : elsize_); + if (elsize < 0) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- failed to determine element size from %s", + Py_TYPE(obj)->tp_name); + PyErr_SetString(PyExc_SystemError, mess); + return NULL; + } + PyArray_Descr * descr = get_descr_from_type_and_elsize(type_num, elsize); // new reference + if (descr == NULL) { + return NULL; + } + elsize = descr->elsize; + if ((intent & F2PY_INTENT_HIDE) + || ((intent & F2PY_INTENT_CACHE) && (obj == Py_None)) + || ((intent & F2PY_OPTIONAL) && (obj == Py_None)) + ) { + /* intent(cache), optional, intent(hide) */ + int ineg = find_first_negative_dimension(rank, dims); + if (ineg >= 0) { + int i; + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); + for(i = 0; i < rank; ++i) + sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]); + strcat(mess, ")"); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + arr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, rank, dims, + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (PyArray_ITEMSIZE(arr) != elsize) { + strcpy(mess, "failed to create intent(cache|hide)|optional array"); + sprintf(mess+strlen(mess)," -- expected elsize=%d got %" NPY_INTP_FMT, elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError,mess); + Py_DECREF(arr); + return NULL; + } + if (!(intent & F2PY_INTENT_CACHE)) { + PyArray_FILLWBYTE(arr, 0); + } + return arr; + } + + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)obj; + if (intent & F2PY_INTENT_CACHE) { + /* intent(cache) */ + if (PyArray_ISONESEGMENT(arr) + && PyArray_ITEMSIZE(arr) >= elsize) { + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + Py_DECREF(descr); + return arr; + } + strcpy(mess, "failed to initialize intent(cache) array"); + if (!PyArray_ISONESEGMENT(arr)) + strcat(mess, " -- input must be in one segment"); + if (PyArray_ITEMSIZE(arr) < elsize) + sprintf(mess + strlen(mess), + " -- expected at least elsize=%d but got " + "%" NPY_INTP_FMT, + elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inout) or intent(inplace) + */ + + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + /* + printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent)); + printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent)); + int i; + for (i=1;i<=16;i++) + printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); + */ + if ((! (intent & F2PY_INTENT_COPY)) && + PyArray_ITEMSIZE(arr) == elsize && + ARRAY_ISCOMPATIBLE(arr,type_num) && + F2PY_CHECK_ALIGNMENT(arr, intent)) { + if ((intent & F2PY_INTENT_INOUT || intent & F2PY_INTENT_INPLACE) + ? ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY(arr) : PyArray_ISFARRAY(arr)) + : ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY_RO(arr) : PyArray_ISFARRAY_RO(arr))) { + if ((intent & F2PY_INTENT_OUT)) { + Py_INCREF(arr); + } + /* Returning input array */ + Py_DECREF(descr); + return arr; + } + } + if (intent & F2PY_INTENT_INOUT) { + strcpy(mess, "failed to initialize intent(inout) array"); + /* Must use PyArray_IS*ARRAY because intent(inout) requires + * writable input */ + if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) + strcat(mess, " -- input not contiguous"); + if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) + strcat(mess, " -- input not fortran contiguous"); + if (PyArray_ITEMSIZE(arr) != elsize) + sprintf(mess + strlen(mess), + " -- expected elsize=%d but got %" NPY_INTP_FMT, + elsize, + (npy_intp)PyArray_ITEMSIZE(arr) + ); + if (!(ARRAY_ISCOMPATIBLE(arr, type_num))) { + sprintf(mess + strlen(mess), + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + } + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess + strlen(mess), " -- input not %d-aligned", + F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inplace) */ + + { + PyArrayObject * retarr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (retarr==NULL) { + Py_DECREF(descr); + return NULL; + } + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + if (PyArray_CopyInto(retarr, arr)) { + Py_DECREF(retarr); + return NULL; + } + if (intent & F2PY_INTENT_INPLACE) { + if (swap_arrays(arr,retarr)) { + Py_DECREF(retarr); + return NULL; /* XXX: set exception */ + } + Py_XDECREF(retarr); + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + } else { + arr = retarr; + } + } + return arr; + } + + if ((intent & F2PY_INTENT_INOUT) || (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { + PyErr_Format(PyExc_TypeError, + "failed to initialize intent(inout|inplace|cache) " + "array, input '%s' object is not an array", + Py_TYPE(obj)->tp_name); + Py_DECREF(descr); + return NULL; + } + + { + F2PY_REPORT_ON_ARRAY_COPY_FROMANY; + arr = (PyArrayObject *)PyArray_FromAny( + obj, descr, 0, 0, + ((intent & F2PY_INTENT_C) ? NPY_ARRAY_CARRAY + : NPY_ARRAY_FARRAY) | + NPY_ARRAY_FORCECAST, + NULL); + // Warning: in the case of NPY_STRING, PyArray_FromAny may + // reset descr->elsize, e.g. dtype('S0') becomes dtype('S1'). + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (type_num != NPY_STRING && PyArray_ITEMSIZE(arr) != elsize) { + // This is internal sanity tests: elsize has been set to + // descr->elsize in the beginning of this function. + strcpy(mess, "failed to initialize intent(in) array"); + sprintf(mess + strlen(mess), + " -- expected elsize=%d got %" NPY_INTP_FMT, elsize, + (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(arr); + return NULL; + } + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(arr); + return NULL; + } + return arr; + } +} + +extern PyArrayObject * +array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj) { + /* + Same as ndarray_from_pyobj but with elsize determined from type, + if possible. Provided for backward compatibility. + */ + PyArray_Descr* descr = PyArray_DescrFromType(type_num); + int elsize = descr->elsize; + Py_DECREF(descr); + return ndarray_from_pyobj(type_num, elsize, dims, rank, intent, obj, NULL); +} + +/*****************************************/ +/* Helper functions for array_from_pyobj */ +/*****************************************/ + +static int +check_and_fix_dimensions(const PyArrayObject* arr, const int rank, + npy_intp *dims, const char *errmess) +{ + /* + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is + * returned. + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + const npy_intp arr_size = + (PyArray_NDIM(arr)) ? PyArray_Size((PyObject *)arr) : 1; +#ifdef DEBUG_COPY_ND_ARRAY + dump_attrs(arr); + printf("check_and_fix_dimensions:init: dims="); + dump_dims(rank, dims); +#endif + if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ + npy_intp new_size = 1; + int free_axe = -1; + int i; + npy_intp d; + /* Fill dims where -1 or 0; check dimensions; calc new_size; */ + for (i = 0; i < PyArray_NDIM(arr); ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && dims[i] != d) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else { + dims[i] = d ? d : 1; + } + new_size *= dims[i]; + } + for (i = PyArray_NDIM(arr); i < rank; ++i) + if (dims[i] > 1) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); + return 1; + } + else if (free_axe < 0) + free_axe = i; + else + dims[i] = 1; + if (free_axe >= 0) { + dims[free_axe] = arr_size / new_size; + new_size *= dims[free_axe]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); + return 1; + } + } + else if (rank == PyArray_NDIM(arr)) { + npy_intp new_size = 1; + int i; + npy_intp d; + for (i = 0; i < rank; ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT, + i, dims[i], d); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + new_size *= dims[i]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); + return 1; + } + } + else { /* [[1,2]] -> [[1],[2]] */ + int i, j; + npy_intp d; + int effrank; + npy_intp size; + for (i = 0, effrank = 0; i < PyArray_NDIM(arr); ++i) + if (PyArray_DIM(arr, i) > 1) + ++effrank; + if (dims[rank - 1] >= 0) + if (effrank > rank) { + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); + return 1; + } + + for (i = 0, j = 0; i < rank; ++i) { + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + } + + for (i = rank; i < PyArray_NDIM(arr); + ++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + dims[rank - 1] *= d; + } + for (i = 0, size = 1; i < rank; ++i) size *= dims[i]; + if (size != arr_size) { + char msg[200]; + int len; + snprintf(msg, sizeof(msg), + "unexpected array size: size=%" NPY_INTP_FMT + ", arr_size=%" NPY_INTP_FMT + ", rank=%d, effrank=%d, arr.nd=%d, dims=[", + size, arr_size, rank, effrank, PyArray_NDIM(arr)); + for (i = 0; i < rank; ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + dims[i]); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=["); + for (i = 0; i < PyArray_NDIM(arr); ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + PyArray_DIM(arr, i)); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ]\n"); + PyErr_SetString(PyExc_ValueError, msg); + return 1; + } + } +#ifdef DEBUG_COPY_ND_ARRAY + printf("check_and_fix_dimensions:end: dims="); + dump_dims(rank, dims); +#endif + return 0; +} + +/* End of file: array_from_pyobj.c */ + +/************************* copy_ND_array *******************************/ + +extern int +copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) +{ + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + return PyArray_CopyInto(out, (PyArrayObject *)arr); +} + +/********************* Various utility functions ***********************/ + +extern int +f2py_describe(PyObject *obj, char *buf) { + /* + Write the description of a Python object to buf. The caller must + provide buffer with size sufficient to write the description. + + Return 1 on success. + */ + char localbuf[F2PY_MESSAGE_BUFFER_SIZE]; + if (PyBytes_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyBytes_GET_SIZE(obj), Py_TYPE(obj)->tp_name); + } else if (PyUnicode_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyUnicode_GET_LENGTH(obj), Py_TYPE(obj)->tp_name); + } else if (PyArray_CheckScalar(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + sprintf(localbuf, "%c%" NPY_INTP_FMT "-%s-scalar", PyArray_DESCR(arr)->kind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PyArray_Check(obj)) { + int i; + PyArrayObject* arr = (PyArrayObject*)obj; + strcpy(localbuf, "("); + for (i=0; ikind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PySequence_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PySequence_Length(obj), Py_TYPE(obj)->tp_name); + } else { + sprintf(localbuf, "%s instance", Py_TYPE(obj)->tp_name); + } + // TODO: detect the size of buf and make sure that size(buf) >= size(localbuf). + strcpy(buf, localbuf); + return 1; +} + +extern npy_intp +f2py_size_impl(PyArrayObject* var, ...) +{ + npy_intp sz = 0; + npy_intp dim; + npy_intp rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, "f2py_size: 2nd argument value=%" NPY_INTP_FMT + " fails to satisfy 1<=value<=%" NPY_INTP_FMT + ". Result will be 0.\n", dim, rank); + } + va_end(argp); + return sz; +} + +/*********************************************/ +/* Compatibility functions for Python >= 3.0 */ +/*********************************************/ + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +void * +F2PyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +int +F2PyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif +/************************* EOF fortranobject.c *******************************/ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h new file mode 100644 index 00000000..abd699c2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h @@ -0,0 +1,173 @@ +#ifndef Py_FORTRANOBJECT_H +#define Py_FORTRANOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifndef NPY_NO_DEPRECATED_API +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif +#ifdef FORTRANOBJECT_C +#define NO_IMPORT_ARRAY +#endif +#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API +#include "numpy/arrayobject.h" +#include "numpy/npy_3kcompat.h" + +#ifdef F2PY_REPORT_ATEXIT +#include +// clang-format off +extern void f2py_start_clock(void); +extern void f2py_stop_clock(void); +extern void f2py_start_call_clock(void); +extern void f2py_stop_call_clock(void); +extern void f2py_cb_start_clock(void); +extern void f2py_cb_stop_clock(void); +extern void f2py_cb_start_call_clock(void); +extern void f2py_cb_stop_call_clock(void); +extern void f2py_report_on_exit(int, void *); +// clang-format on +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 +#define F2PY_MESSAGE_BUFFER_SIZE 300 // Increase on "stack smashing detected" + +typedef void (*f2py_set_data_func)(char *, npy_intp *); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int *, npy_intp *, f2py_set_data_func, int *); + +/*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct { + npy_intp d[F2PY_MAX_DIMS]; + } dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + int elsize; /* Element size || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0 == strcmp(Py_TYPE(op)->tp_name, "fortran")) + +extern PyTypeObject PyFortran_Type; +extern int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj); +extern PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init); +extern PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs); + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * +F2PyCapsule_AsVoidPtr(PyObject *obj); +int +F2PyCapsule_Check(PyObject *ptr); + +extern void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); +extern void * +F2PyGetThreadLocalCallbackPtr(char *key); + +#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) \ + ? 4 \ + : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) +#define F2PY_CHECK_ALIGNMENT(arr, intent) \ + ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) +#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_DESCR(arr)->elsize >= 1) \ + || PyArray_DESCR(arr)->type_num == NPY_UINT8) +#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE) + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims, + const int rank, const int intent, PyObject *obj, + const char *errmess); + +extern PyArrayObject * +array_from_pyobj(const int type_num, npy_intp *dims, const int rank, + const int intent, PyObject *obj); +extern int +copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY +extern void +dump_attrs(const PyArrayObject *arr); +#endif + + extern int f2py_describe(PyObject *obj, char *buf); + + /* Utility CPP macros and functions that can be used in signature file + expressions. See signature-file.rst for documentation. + */ + +#define f2py_itemsize(var) (PyArray_DESCR((capi_ ## var ## _as_array))->elsize) +#define f2py_size(var, ...) f2py_size_impl((PyArrayObject *)(capi_ ## var ## _as_array), ## __VA_ARGS__, -1) +#define f2py_rank(var) var ## _Rank +#define f2py_shape(var,dim) var ## _Dims[dim] +#define f2py_len(var) f2py_shape(var,0) +#define f2py_fshape(var,dim) f2py_shape(var,rank(var)-dim-1) +#define f2py_flen(var) f2py_fshape(var,0) +#define f2py_slen(var) capi_ ## var ## _len + + extern npy_intp f2py_size_impl(PyArrayObject* var, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/symbolic.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/symbolic.py new file mode 100644 index 00000000..67120d79 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/symbolic.py @@ -0,0 +1,1517 @@ +"""Fortran/C symbolic expressions + +References: +- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" + +# To analyze Fortran expressions to solve dimensions specifications, +# for instances, we implement a minimal symbolic engine for parsing +# expressions into a tree of expression instances. As a first +# instance, we care only about arithmetic expressions involving +# integers and operations like addition (+), subtraction (-), +# multiplication (*), division (Fortran / is Python //, Fortran // is +# concatenate), and exponentiation (**). In addition, .pyf files may +# contain C expressions that support here is implemented as well. +# +# TODO: support logical constants (Op.BOOLEAN) +# TODO: support logical operators (.AND., ...) +# TODO: support defined operators (.MYOP., ...) +# +__all__ = ['Expr'] + + +import re +import warnings +from enum import Enum +from math import gcd + + +class Language(Enum): + """ + Used as Expr.tostring language argument. + """ + Python = 0 + Fortran = 1 + C = 2 + + +class Op(Enum): + """ + Used as Expr op attribute. + """ + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1000 + FACTORS = 2000 + REF = 3000 + DEREF = 3001 + + +class RelOp(Enum): + """ + Used in Op.RELATIONAL expression to specify the function part. + """ + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @classmethod + def fromstring(cls, s, language=Language.C): + if language is Language.Fortran: + return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE, + '.lt.': RelOp.LT, '.le.': RelOp.LE, + '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()] + return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT, + '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s] + + def tostring(self, language=Language.C): + if language is Language.Fortran: + return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.', + RelOp.LT: '.lt.', RelOp.LE: '.le.', + RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self] + return {RelOp.EQ: '==', RelOp.NE: '!=', + RelOp.LT: '<', RelOp.LE: '<=', + RelOp.GT: '>', RelOp.GE: '>='}[self] + + +class ArithOp(Enum): + """ + Used in Op.APPLY expression to specify the function part. + """ + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + + +class OpError(Exception): + pass + + +class Precedence(Enum): + """ + Used as Expr.tostring precedence argument. + """ + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + + +integer_types = (int,) +number_types = (int, float) + + +def _pairs_add(d, k, v): + # Internal utility method for updating terms and factors data. + c = d.get(k) + if c is None: + d[k] = v + else: + c = c + v + if c: + d[k] = c + else: + del d[k] + + +class ExprWarning(UserWarning): + pass + + +def ewarn(message): + warnings.warn(message, ExprWarning, stacklevel=2) + + +class Expr: + """Represents a Fortran expression as a op-data pair. + + Expr instances are hashable and sortable. + """ + + @staticmethod + def parse(s, language=Language.C): + """Parse a Fortran expression to a Expr. + """ + return fromstring(s, language=language) + + def __init__(self, op, data): + assert isinstance(op, Op) + + # sanity checks + if op is Op.INTEGER: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], int) + assert isinstance(data[1], (int, str)), data + elif op is Op.REAL: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], float) + assert isinstance(data[1], (int, str)), data + elif op is Op.COMPLEX: + # data is a 2-tuple of constant expressions + assert isinstance(data, tuple) and len(data) == 2 + elif op is Op.STRING: + # data is a 2-tuple of quoted string and a kind value + # (default is 1) + assert isinstance(data, tuple) and len(data) == 2 + assert (isinstance(data[0], str) + and data[0][::len(data[0])-1] in ('""', "''", '@@')) + assert isinstance(data[1], (int, str)), data + elif op is Op.SYMBOL: + # data is any hashable object + assert hash(data) is not None + elif op in (Op.ARRAY, Op.CONCAT): + # data is a tuple of expressions + assert isinstance(data, tuple) + assert all(isinstance(item, Expr) for item in data), data + elif op in (Op.TERMS, Op.FACTORS): + # data is {:} where dict values + # are nonzero Python integers + assert isinstance(data, dict) + elif op is Op.APPLY: + # data is (, , ) where + # operands are Expr instances + assert isinstance(data, tuple) and len(data) == 3 + # function is any hashable object + assert hash(data[0]) is not None + assert isinstance(data[1], tuple) + assert isinstance(data[2], dict) + elif op is Op.INDEXING: + # data is (, ) + assert isinstance(data, tuple) and len(data) == 2 + # function is any hashable object + assert hash(data[0]) is not None + elif op is Op.TERNARY: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + elif op in (Op.REF, Op.DEREF): + # data is Expr instance + assert isinstance(data, Expr) + elif op is Op.RELATIONAL: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + else: + raise NotImplementedError( + f'unknown op or missing sanity check: {op}') + + self.op = op + self.data = data + + def __eq__(self, other): + return (isinstance(other, Expr) + and self.op is other.op + and self.data == other.data) + + def __hash__(self): + if self.op in (Op.TERMS, Op.FACTORS): + data = tuple(sorted(self.data.items())) + elif self.op is Op.APPLY: + data = self.data[:2] + tuple(sorted(self.data[2].items())) + else: + data = self.data + return hash((self.op, data)) + + def __lt__(self, other): + if isinstance(other, Expr): + if self.op is not other.op: + return self.op.value < other.op.value + if self.op in (Op.TERMS, Op.FACTORS): + return (tuple(sorted(self.data.items())) + < tuple(sorted(other.data.items()))) + if self.op is Op.APPLY: + if self.data[:2] != other.data[:2]: + return self.data[:2] < other.data[:2] + return tuple(sorted(self.data[2].items())) < tuple( + sorted(other.data[2].items())) + return self.data < other.data + return NotImplemented + + def __le__(self, other): return self == other or self < other + + def __gt__(self, other): return not (self <= other) + + def __ge__(self, other): return not (self < other) + + def __repr__(self): + return f'{type(self).__name__}({self.op}, {self.data!r})' + + def __str__(self): + return self.tostring() + + def tostring(self, parent_precedence=Precedence.NONE, + language=Language.Fortran): + """Return a string representation of Expr. + """ + if self.op in (Op.INTEGER, Op.REAL): + precedence = (Precedence.SUM if self.data[0] < 0 + else Precedence.ATOM) + r = str(self.data[0]) + (f'_{self.data[1]}' + if self.data[1] != 4 else '') + elif self.op is Op.COMPLEX: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '(' + r + ')' + precedence = Precedence.ATOM + elif self.op is Op.SYMBOL: + precedence = Precedence.ATOM + r = str(self.data) + elif self.op is Op.STRING: + r = self.data[0] + if self.data[1] != 1: + r = self.data[1] + '_' + r + precedence = Precedence.ATOM + elif self.op is Op.ARRAY: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '[' + r + ']' + precedence = Precedence.ATOM + elif self.op is Op.TERMS: + terms = [] + for term, coeff in sorted(self.data.items()): + if coeff < 0: + op = ' - ' + coeff = -coeff + else: + op = ' + ' + if coeff == 1: + term = term.tostring(Precedence.SUM, language=language) + else: + if term == as_number(1): + term = str(coeff) + else: + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) + if terms: + terms.append(op) + elif op == ' - ': + terms.append('-') + terms.append(term) + r = ''.join(terms) or '0' + precedence = Precedence.SUM if terms else Precedence.ATOM + elif self.op is Op.FACTORS: + factors = [] + tail = [] + for base, exp in sorted(self.data.items()): + op = ' * ' + if exp == 1: + factor = base.tostring(Precedence.PRODUCT, + language=language) + elif language is Language.C: + if exp in range(2, 10): + factor = base.tostring(Precedence.PRODUCT, + language=language) + factor = ' * '.join([factor] * exp) + elif exp in range(-10, 0): + factor = base.tostring(Precedence.PRODUCT, + language=language) + tail += [factor] * -exp + continue + else: + factor = base.tostring(Precedence.TUPLE, + language=language) + factor = f'pow({factor}, {exp})' + else: + factor = base.tostring(Precedence.POWER, + language=language) + f' ** {exp}' + if factors: + factors.append(op) + factors.append(factor) + if tail: + if not factors: + factors += ['1'] + factors += ['/', '(', ' * '.join(tail), ')'] + r = ''.join(factors) or '1' + precedence = Precedence.PRODUCT if factors else Precedence.ATOM + elif self.op is Op.APPLY: + name, args, kwargs = self.data + if name is ArithOp.DIV and language is Language.C: + numer, denom = [arg.tostring(Precedence.PRODUCT, + language=language) + for arg in args] + r = f'{numer} / {denom}' + precedence = Precedence.PRODUCT + else: + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in args] + args += [k + '=' + v.tostring(Precedence.NONE) + for k, v in kwargs.items()] + r = f'{name}({", ".join(args)})' + precedence = Precedence.ATOM + elif self.op is Op.INDEXING: + name = self.data[0] + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in self.data[1:]] + r = f'{name}[{", ".join(args)}]' + precedence = Precedence.ATOM + elif self.op is Op.CONCAT: + args = [arg.tostring(Precedence.PRODUCT, language=language) + for arg in self.data] + r = " // ".join(args) + precedence = Precedence.PRODUCT + elif self.op is Op.TERNARY: + cond, expr1, expr2 = [a.tostring(Precedence.TUPLE, + language=language) + for a in self.data] + if language is Language.C: + r = f'({cond}?{expr1}:{expr2})' + elif language is Language.Python: + r = f'({expr1} if {cond} else {expr2})' + elif language is Language.Fortran: + r = f'merge({expr1}, {expr2}, {cond})' + else: + raise NotImplementedError( + f'tostring for {self.op} and {language}') + precedence = Precedence.ATOM + elif self.op is Op.REF: + r = '&' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.DEREF: + r = '*' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE) + else Precedence.LT) + left = left.tostring(precedence, language=language) + right = right.tostring(precedence, language=language) + rop = rop.tostring(language=language) + r = f'{left} {rop} {right}' + else: + raise NotImplementedError(f'tostring for op {self.op}') + if parent_precedence.value < precedence.value: + # If parent precedence is higher than operand precedence, + # operand will be enclosed in parenthesis. + return '(' + r + ')' + return r + + def __pos__(self): + return self + + def __neg__(self): + return self * -1 + + def __add__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number( + self.data[0] + other.data[0], + max(self.data[1], other.data[1])) + if self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 + r2, i1 + i2) + if self.op is Op.TERMS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self + as_complex(other) + elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX: + return as_complex(self) + other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self + as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) + other + return as_terms(self) + as_terms(other) + return NotImplemented + + def __radd__(self, other): + if isinstance(other, number_types): + return as_number(other) + self + return NotImplemented + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + if isinstance(other, number_types): + return as_number(other) - self + return NotImplemented + + def __mul__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number(self.data[0] * other.data[0], + max(self.data[1], other.data[1])) + elif self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1) + + if self.op is Op.FACTORS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + elif self.op is Op.TERMS: + r = Expr(self.op, {}) + for t1, c1 in self.data.items(): + for t2, c2 in other.data.items(): + _pairs_add(r.data, t1 * t2, c1 * c2) + return normalize(r) + + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self * as_complex(other) + elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL): + return as_complex(self) * other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self * as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) * other + + if self.op is Op.TERMS: + return self * as_terms(other) + elif other.op is Op.TERMS: + return as_terms(self) * other + + return as_factors(self) * as_factors(other) + return NotImplemented + + def __rmul__(self, other): + if isinstance(other, number_types): + return as_number(other) * self + return NotImplemented + + def __pow__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if other.op is Op.INTEGER: + exponent = other.data[0] + # TODO: other kind not used + if exponent == 0: + return as_number(1) + if exponent == 1: + return self + if exponent > 0: + if self.op is Op.FACTORS: + r = Expr(self.op, {}) + for k, v in self.data.items(): + r.data[k] = v * exponent + return normalize(r) + return self * (self ** (exponent - 1)) + elif exponent != -1: + return (self ** (-exponent)) ** -1 + return Expr(Op.FACTORS, {self: exponent}) + return as_apply(ArithOp.POW, self, other) + return NotImplemented + + def __truediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran / is different from Python /: + # - `/` is a truncate operation for integer operands + return normalize(as_apply(ArithOp.DIV, self, other)) + return NotImplemented + + def __rtruediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other / self + return NotImplemented + + def __floordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran // is different from Python //: + # - `//` is a concatenate operation for string operands + return normalize(Expr(Op.CONCAT, (self, other))) + return NotImplemented + + def __rfloordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other // self + return NotImplemented + + def __call__(self, *args, **kwargs): + # In Fortran, parenthesis () are use for both function call as + # well as indexing operations. + # + # TODO: implement a method for deciding when __call__ should + # return an INDEXING expression. + return as_apply(self, *map(as_expr, args), + **dict((k, as_expr(v)) for k, v in kwargs.items())) + + def __getitem__(self, index): + # Provided to support C indexing operations that .pyf files + # may contain. + index = as_expr(index) + if not isinstance(index, tuple): + index = index, + if len(index) > 1: + ewarn(f'C-index should be a single expression but got `{index}`') + return Expr(Op.INDEXING, (self,) + index) + + def substitute(self, symbols_map): + """Recursively substitute symbols with values in symbols map. + + Symbols map is a dictionary of symbol-expression pairs. + """ + if self.op is Op.SYMBOL: + value = symbols_map.get(self) + if value is None: + return self + m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data) + if m: + # complement to fromstring method + items, paren = m.groups() + if paren in ['ROUNDDIV', 'SQUARE']: + return as_array(value) + assert paren == 'ROUND', (paren, value) + return value + if self.op in (Op.INTEGER, Op.REAL, Op.STRING): + return self + if self.op in (Op.ARRAY, Op.COMPLEX): + return Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data)) + if self.op is Op.CONCAT: + return normalize(Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data))) + if self.op is Op.TERMS: + r = None + for term, coeff in self.data.items(): + if r is None: + r = term.substitute(symbols_map) * coeff + else: + r += term.substitute(symbols_map) * coeff + if r is None: + ewarn('substitute: empty TERMS expression interpreted as' + ' int-literal 0') + return as_number(0) + return r + if self.op is Op.FACTORS: + r = None + for base, exponent in self.data.items(): + if r is None: + r = base.substitute(symbols_map) ** exponent + else: + r *= base.substitute(symbols_map) ** exponent + if r is None: + ewarn('substitute: empty FACTORS expression interpreted' + ' as int-literal 1') + return as_number(1) + return r + if self.op is Op.APPLY: + target, args, kwargs = self.data + if isinstance(target, Expr): + target = target.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in args) + kwargs = dict((k, v.substitute(symbols_map)) + for k, v in kwargs.items()) + return normalize(Expr(self.op, (target, args, kwargs))) + if self.op is Op.INDEXING: + func = self.data[0] + if isinstance(func, Expr): + func = func.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in self.data[1:]) + return normalize(Expr(self.op, (func,) + args)) + if self.op is Op.TERNARY: + operands = tuple(a.substitute(symbols_map) for a in self.data) + return normalize(Expr(self.op, operands)) + if self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, self.data.substitute(symbols_map))) + if self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.substitute(symbols_map) + right = right.substitute(symbols_map) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'substitute method for {self.op}: {self!r}') + + def traverse(self, visit, *args, **kwargs): + """Traverse expression tree with visit function. + + The visit function is applied to an expression with given args + and kwargs. + + Traverse call returns an expression returned by visit when not + None, otherwise return a new normalized expression with + traverse-visit sub-expressions. + """ + result = visit(self, *args, **kwargs) + if result is not None: + return result + + if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL): + return self + elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY): + return normalize(Expr(self.op, tuple( + item.traverse(visit, *args, **kwargs) + for item in self.data))) + elif self.op in (Op.TERMS, Op.FACTORS): + data = {} + for k, v in self.data.items(): + k = k.traverse(visit, *args, **kwargs) + v = (v.traverse(visit, *args, **kwargs) + if isinstance(v, Expr) else v) + if k in data: + v = data[k] + v + data[k] = v + return normalize(Expr(self.op, data)) + elif self.op is Op.APPLY: + obj = self.data[0] + func = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + operands = tuple(operand.traverse(visit, *args, **kwargs) + for operand in self.data[1]) + kwoperands = dict((k, v.traverse(visit, *args, **kwargs)) + for k, v in self.data[2].items()) + return normalize(Expr(self.op, (func, operands, kwoperands))) + elif self.op is Op.INDEXING: + obj = self.data[0] + obj = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + indices = tuple(index.traverse(visit, *args, **kwargs) + for index in self.data[1:]) + return normalize(Expr(self.op, (obj,) + indices)) + elif self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, + self.data.traverse(visit, *args, **kwargs))) + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.traverse(visit, *args, **kwargs) + right = right.traverse(visit, *args, **kwargs) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'traverse method for {self.op}') + + def contains(self, other): + """Check if self contains other. + """ + found = [] + + def visit(expr, found=found): + if found: + return expr + elif expr == other: + found.append(1) + return expr + + self.traverse(visit) + + return len(found) != 0 + + def symbols(self): + """Return a set of symbols contained in self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.SYMBOL: + found.add(expr) + + self.traverse(visit) + + return found + + def polynomial_atoms(self): + """Return a set of expressions used as atoms in polynomial self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.FACTORS: + for b in expr.data: + b.traverse(visit) + return expr + if expr.op in (Op.TERMS, Op.COMPLEX): + return + if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp): + if expr.data[0] is ArithOp.POW: + expr.data[1][0].traverse(visit) + return expr + return + if expr.op in (Op.INTEGER, Op.REAL): + return expr + + found.add(expr) + + if expr.op in (Op.INDEXING, Op.APPLY): + return expr + + self.traverse(visit) + + return found + + def linear_solve(self, symbol): + """Return a, b such that a * symbol + b == self. + + If self is not linear with respect to symbol, raise RuntimeError. + """ + b = self.substitute({symbol: as_number(0)}) + ax = self - b + a = ax.substitute({symbol: as_number(1)}) + + zero, _ = as_numer_denom(a * symbol - ax) + + if zero != as_number(0): + raise RuntimeError(f'not a {symbol}-linear equation:' + f' {a} * {symbol} + {b} == {self}') + return a, b + + +def normalize(obj): + """Normalize Expr and apply basic evaluation methods. + """ + if not isinstance(obj, Expr): + return obj + + if obj.op is Op.TERMS: + d = {} + for t, c in obj.data.items(): + if c == 0: + continue + if t.op is Op.COMPLEX and c != 1: + t = t * c + c = 1 + if t.op is Op.TERMS: + for t1, c1 in t.data.items(): + _pairs_add(d, t1, c1 * c) + else: + _pairs_add(d, t, c) + if len(d) == 0: + # TODO: determine correct kind + return as_number(0) + elif len(d) == 1: + (t, c), = d.items() + if c == 1: + return t + return Expr(Op.TERMS, d) + + if obj.op is Op.FACTORS: + coeff = 1 + d = {} + for b, e in obj.data.items(): + if e == 0: + continue + if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1: + # expand integer powers of sums + b = b * (b ** (e - 1)) + e = 1 + + if b.op in (Op.INTEGER, Op.REAL): + if e == 1: + coeff *= b.data[0] + elif e > 0: + coeff *= b.data[0] ** e + else: + _pairs_add(d, b, e) + elif b.op is Op.FACTORS: + if e > 0 and isinstance(e, integer_types): + for b1, e1 in b.data.items(): + _pairs_add(d, b1, e1 * e) + else: + _pairs_add(d, b, e) + else: + _pairs_add(d, b, e) + if len(d) == 0 or coeff == 0: + # TODO: determine correct kind + assert isinstance(coeff, number_types) + return as_number(coeff) + elif len(d) == 1: + (b, e), = d.items() + if e == 1: + t = b + else: + t = Expr(Op.FACTORS, d) + if coeff == 1: + return t + return Expr(Op.TERMS, {t: coeff}) + elif coeff == 1: + return Expr(Op.FACTORS, d) + else: + return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff}) + + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + dividend, divisor = obj.data[1] + t1, c1 = as_term_coeff(dividend) + t2, c2 = as_term_coeff(divisor) + if isinstance(c1, integer_types) and isinstance(c2, integer_types): + g = gcd(c1, c2) + c1, c2 = c1//g, c2//g + else: + c1, c2 = c1/c2, 1 + + if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: + numer = t1.data[1][0] * c1 + denom = t1.data[1][1] * t2 * c2 + return as_apply(ArithOp.DIV, numer, denom) + + if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV: + numer = t2.data[1][1] * t1 * c1 + denom = t2.data[1][0] * c2 + return as_apply(ArithOp.DIV, numer, denom) + + d = dict(as_factors(t1).data) + for b, e in as_factors(t2).data.items(): + _pairs_add(d, b, -e) + numer, denom = {}, {} + for b, e in d.items(): + if e > 0: + numer[b] = e + else: + denom[b] = -e + numer = normalize(Expr(Op.FACTORS, numer)) * c1 + denom = normalize(Expr(Op.FACTORS, denom)) * c2 + + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1: + # TODO: denom kind not used + return numer + return as_apply(ArithOp.DIV, numer, denom) + + if obj.op is Op.CONCAT: + lst = [obj.data[0]] + for s in obj.data[1:]: + last = lst[-1] + if ( + last.op is Op.STRING + and s.op is Op.STRING + and last.data[0][0] in '"\'' + and s.data[0][0] == last.data[0][-1] + ): + new_last = as_string(last.data[0][:-1] + s.data[0][1:], + max(last.data[1], s.data[1])) + lst[-1] = new_last + else: + lst.append(s) + if len(lst) == 1: + return lst[0] + return Expr(Op.CONCAT, tuple(lst)) + + if obj.op is Op.TERNARY: + cond, expr1, expr2 = map(normalize, obj.data) + if cond.op is Op.INTEGER: + return expr1 if cond.data[0] else expr2 + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + return obj + + +def as_expr(obj): + """Convert non-Expr objects to Expr objects. + """ + if isinstance(obj, complex): + return as_complex(obj.real, obj.imag) + if isinstance(obj, number_types): + return as_number(obj) + if isinstance(obj, str): + # STRING expression holds string with boundary quotes, hence + # applying repr: + return as_string(repr(obj)) + if isinstance(obj, tuple): + return tuple(map(as_expr, obj)) + return obj + + +def as_symbol(obj): + """Return object as SYMBOL expression (variable or unparsed expression). + """ + return Expr(Op.SYMBOL, obj) + + +def as_number(obj, kind=4): + """Return object as INTEGER or REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op in (Op.INTEGER, Op.REAL): + return obj + raise OpError(f'cannot convert {obj} to INTEGER or REAL constant') + + +def as_integer(obj, kind=4): + """Return object as INTEGER constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.INTEGER: + return obj + raise OpError(f'cannot convert {obj} to INTEGER constant') + + +def as_real(obj, kind=4): + """Return object as REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.REAL, (float(obj), kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.REAL: + return obj + elif obj.op is Op.INTEGER: + return Expr(Op.REAL, (float(obj.data[0]), kind)) + raise OpError(f'cannot convert {obj} to REAL constant') + + +def as_string(obj, kind=1): + """Return object as STRING expression (string literal constant). + """ + return Expr(Op.STRING, (obj, kind)) + + +def as_array(obj): + """Return object as ARRAY expression (array constant). + """ + if isinstance(obj, Expr): + obj = obj, + return Expr(Op.ARRAY, obj) + + +def as_complex(real, imag=0): + """Return object as COMPLEX expression (complex literal constant). + """ + return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag))) + + +def as_apply(func, *args, **kwargs): + """Return object as APPLY expression (function call, constructor, etc.) + """ + return Expr(Op.APPLY, + (func, tuple(map(as_expr, args)), + dict((k, as_expr(v)) for k, v in kwargs.items()))) + + +def as_ternary(cond, expr1, expr2): + """Return object as TERNARY expression (cond?expr1:expr2). + """ + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + +def as_ref(expr): + """Return object as referencing expression. + """ + return Expr(Op.REF, expr) + + +def as_deref(expr): + """Return object as dereferencing expression. + """ + return Expr(Op.DEREF, expr) + + +def as_eq(left, right): + return Expr(Op.RELATIONAL, (RelOp.EQ, left, right)) + + +def as_ne(left, right): + return Expr(Op.RELATIONAL, (RelOp.NE, left, right)) + + +def as_lt(left, right): + return Expr(Op.RELATIONAL, (RelOp.LT, left, right)) + + +def as_le(left, right): + return Expr(Op.RELATIONAL, (RelOp.LE, left, right)) + + +def as_gt(left, right): + return Expr(Op.RELATIONAL, (RelOp.GT, left, right)) + + +def as_ge(left, right): + return Expr(Op.RELATIONAL, (RelOp.GE, left, right)) + + +def as_terms(obj): + """Return expression as TERMS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.TERMS: + return obj + if obj.op is Op.INTEGER: + return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]}) + if obj.op is Op.REAL: + return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]}) + return Expr(Op.TERMS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_factors(obj): + """Return expression as FACTORS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.FACTORS: + return obj + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + if coeff == 1: + return Expr(Op.FACTORS, {term: 1}) + return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) + if ((obj.op is Op.APPLY + and obj.data[0] is ArithOp.DIV + and not obj.data[2])): + return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) + return Expr(Op.FACTORS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_term_coeff(obj): + """Return expression as term-coefficient pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.INTEGER: + return as_integer(1, obj.data[1]), obj.data[0] + if obj.op is Op.REAL: + return as_real(1, obj.data[1]), obj.data[0] + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + return term, coeff + # TODO: find common divisor of coefficients + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + t, c = as_term_coeff(obj.data[1][0]) + return as_apply(ArithOp.DIV, t, obj.data[1][1]), c + return obj, 1 + raise OpError(f'cannot convert {type(obj)} to term and coeff') + + +def as_numer_denom(obj): + """Return expression as numer-denom pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL, + Op.INDEXING, Op.TERNARY): + return obj, as_number(1) + elif obj.op is Op.APPLY: + if obj.data[0] is ArithOp.DIV and not obj.data[2]: + numers, denoms = map(as_numer_denom, obj.data[1]) + return numers[0] * denoms[1], numers[1] * denoms[0] + return obj, as_number(1) + elif obj.op is Op.TERMS: + numers, denoms = [], [] + for term, coeff in obj.data.items(): + n, d = as_numer_denom(term) + n = n * coeff + numers.append(n) + denoms.append(d) + numer, denom = as_number(0), as_number(1) + for i in range(len(numers)): + n = numers[i] + for j in range(len(numers)): + if i != j: + n *= denoms[j] + numer += n + denom *= denoms[i] + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0: + numer, denom = -numer, -denom + return numer, denom + elif obj.op is Op.FACTORS: + numer, denom = as_number(1), as_number(1) + for b, e in obj.data.items(): + bnumer, bdenom = as_numer_denom(b) + if e > 0: + numer *= bnumer ** e + denom *= bdenom ** e + elif e < 0: + numer *= bdenom ** (-e) + denom *= bnumer ** (-e) + return numer, denom + raise OpError(f'cannot convert {type(obj)} to numer and denom') + + +def _counter(): + # Used internally to generate unique dummy symbols + counter = 0 + while True: + counter += 1 + yield counter + + +COUNTER = _counter() + + +def eliminate_quotes(s): + """Replace quoted substrings of input string. + + Return a new string and a mapping of replacements. + """ + d = {} + + def repl(m): + kind, value = m.groups()[:2] + if kind: + # remove trailing underscore + kind = kind[:-1] + p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]] + k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@' + d[k] = value + return k + + new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format( + kind=r'\w[\w\d_]*', + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")'), + repl, s) + + assert '"' not in new_s + assert "'" not in new_s + + return new_s, d + + +def insert_quotes(s, d): + """Inverse of eliminate_quotes. + """ + for k, v in d.items(): + kind = k[:k.find('@')] + if kind: + kind += '_' + s = s.replace(k, kind + v) + return s + + +def replace_parenthesis(s): + """Replace substrings of input that are enclosed in parenthesis. + + Return a new string and a mapping of replacements. + """ + # Find a parenthesis pair that appears first. + + # Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`. + # We don't handle `/` deliminator because it is not a part of an + # expression. + left, right = None, None + mn_i = len(s) + for left_, right_ in (('(/', '/)'), + '()', + '{}', # to support C literal structs + '[]'): + i = s.find(left_) + if i == -1: + continue + if i < mn_i: + mn_i = i + left, right = left_, right_ + + if left is None: + return s, {} + + i = mn_i + j = s.find(right, i) + + while s.count(left, i + 1, j) != s.count(right, i + 1, j): + j = s.find(right, j + 1) + if j == -1: + raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}') + + p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] + + k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' + v = s[i+len(left):j] + r, d = replace_parenthesis(s[j+len(right):]) + d[k] = v + return s[:i] + k + r, d + + +def _get_parenthesis_kind(s): + assert s.startswith('@__f2py_PARENTHESIS_'), s + return s.split('_')[4] + + +def unreplace_parenthesis(s, d): + """Inverse of replace_parenthesis. + """ + for k, v in d.items(): + p = _get_parenthesis_kind(k) + left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p] + right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p] + s = s.replace(k, left + v + right) + return s + + +def fromstring(s, language=Language.C): + """Create an expression from a string. + + This is a "lazy" parser, that is, only arithmetic operations are + resolved, non-arithmetic operations are treated as symbols. + """ + r = _FromStringWorker(language=language).parse(s) + if isinstance(r, Expr): + return r + raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`') + + +class _Pair: + # Internal class to represent a pair of expressions + + def __init__(self, left, right): + self.left = left + self.right = right + + def substitute(self, symbols_map): + left, right = self.left, self.right + if isinstance(left, Expr): + left = left.substitute(symbols_map) + if isinstance(right, Expr): + right = right.substitute(symbols_map) + return _Pair(left, right) + + def __repr__(self): + return f'{type(self).__name__}({self.left}, {self.right})' + + +class _FromStringWorker: + + def __init__(self, language=Language.C): + self.original = None + self.quotes_map = None + self.language = language + + def finalize_string(self, s): + return insert_quotes(s, self.quotes_map) + + def parse(self, inp): + self.original = inp + unquoted, self.quotes_map = eliminate_quotes(inp) + return self.process(unquoted) + + def process(self, s, context='expr'): + """Parse string within the given context. + + The context may define the result in case of ambiguous + expressions. For instance, consider expressions `f(x, y)` and + `(x, y) + (a, b)` where `f` is a function and pair `(x, y)` + denotes complex number. Specifying context as "args" or + "expr", the subexpression `(x, y)` will be parse to an + argument list or to a complex number, respectively. + """ + if isinstance(s, (list, tuple)): + return type(s)(self.process(s_, context) for s_ in s) + + assert isinstance(s, str), (type(s), s) + + # replace subexpressions in parenthesis with f2py @-names + r, raw_symbols_map = replace_parenthesis(s) + r = r.strip() + + def restore(r): + # restores subexpressions marked with f2py @-names + if isinstance(r, (list, tuple)): + return type(r)(map(restore, r)) + return unreplace_parenthesis(r, raw_symbols_map) + + # comma-separated tuple + if ',' in r: + operands = restore(r.split(',')) + if context == 'args': + return tuple(self.process(operands)) + if context == 'expr': + if len(operands) == 2: + # complex number literal + return as_complex(*self.process(operands)) + raise NotImplementedError( + f'parsing comma-separated list (context={context}): {r}') + + # ternary operation + m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r) + if m: + assert context == 'expr', context + oper, expr1, expr2 = restore(m.groups()) + oper = self.process(oper) + expr1 = self.process(expr1) + expr2 = self.process(expr2) + return as_ternary(oper, expr1, expr2) + + # relational expression + if self.language is Language.Fortran: + m = re.match( + r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I) + else: + m = re.match( + r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r) + if m: + left, rop, right = m.groups() + if self.language is Language.Fortran: + rop = '.' + rop + '.' + left, right = self.process(restore((left, right))) + rop = RelOp.fromstring(rop, language=self.language) + return Expr(Op.RELATIONAL, (rop, left, right)) + + # keyword argument + m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r) + if m: + keyname, value = m.groups() + value = restore(value) + return _Pair(keyname, self.process(value)) + + # addition/subtraction operations + operands = re.split(r'((? 1: + result = self.process(restore(operands[0] or '0')) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(restore(operand)) + op = op.strip() + if op == '+': + result += operand + else: + assert op == '-' + result -= operand + return result + + # string concatenate operation + if self.language is Language.Fortran and '//' in r: + operands = restore(r.split('//')) + return Expr(Op.CONCAT, + tuple(self.process(operands))) + + # multiplication/division operations + operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)', + (r if self.language is Language.C + else r.replace('**', '@__f2py_DOUBLE_STAR@'))) + if len(operands) > 1: + operands = restore(operands) + if self.language is not Language.C: + operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**') + for operand in operands] + # Expression is an arithmetic product + result = self.process(operands[0]) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(operand) + op = op.strip() + if op == '*': + result *= operand + else: + assert op == '/' + result /= operand + return result + + # referencing/dereferencing + if r.startswith('*') or r.startswith('&'): + op = {'*': Op.DEREF, '&': Op.REF}[r[0]] + operand = self.process(restore(r[1:])) + return Expr(op, operand) + + # exponentiation operations + if self.language is not Language.C and '**' in r: + operands = list(reversed(restore(r.split('**')))) + result = self.process(operands[0]) + for operand in operands[1:]: + operand = self.process(operand) + result = operand ** result + return result + + # int-literal-constant + m = re.match(r'\A({digit_string})({kind}|)\Z'.format( + digit_string=r'\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + return as_integer(int(value), kind or 4) + + # real-literal-constant + m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z' + .format( + significant=r'[.]\d+|\d+[.]\d*', + exponent=r'[edED][+-]?\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + value = value.lower() + if 'd' in value: + return as_real(float(value.replace('d', 'e')), kind or 8) + return as_real(float(value), kind or 4) + + # string-literal-constant with kind parameter specification + if r in self.quotes_map: + kind = r[:r.find('@')] + return as_string(self.quotes_map[r], kind or 1) + + # array constructor or literal complex constant or + # parenthesized expression + if r in raw_symbols_map: + paren = _get_parenthesis_kind(r) + items = self.process(restore(raw_symbols_map[r]), + 'expr' if paren == 'ROUND' else 'args') + if paren == 'ROUND': + if isinstance(items, Expr): + return items + if paren in ['ROUNDDIV', 'SQUARE']: + # Expression is a array constructor + if isinstance(items, Expr): + items = (items,) + return as_array(items) + + # function call/indexing + m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z', + r) + if m: + target, args, paren = m.groups() + target = self.process(restore(target)) + args = self.process(restore(args)[1:-1], 'args') + if not isinstance(args, tuple): + args = args, + if paren == 'ROUND': + kwargs = dict((a.left, a.right) for a in args + if isinstance(a, _Pair)) + args = tuple(a for a in args if not isinstance(a, _Pair)) + # Warning: this could also be Fortran indexing operation.. + return as_apply(target, *args, **kwargs) + else: + # Expression is a C/Python indexing operation + # (e.g. used in .pyf files) + assert paren == 'SQUARE' + return target[args] + + # Fortran standard conforming identifier + m = re.match(r'\A\w[\w\d_]*\Z', r) + if m: + return as_symbol(r) + + # fall-back to symbol + r = self.finalize_string(restore(r)) + ewarn( + f'fromstring: treating {r!r} as symbol (original={self.original})') + return as_symbol(r) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 new file mode 100644 index 00000000..76d16aae --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 @@ -0,0 +1,34 @@ +module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + +contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine +end module + +subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y +end subroutine + +subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 new file mode 100644 index 00000000..36791e46 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 @@ -0,0 +1,6 @@ +module test + abstract interface + subroutine foo() + end subroutine + end interface +end module test diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c new file mode 100644 index 00000000..9a8b4a75 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -0,0 +1,230 @@ +/* + * This file was auto-generated with f2py (version:2_1330) and hand edited by + * Pearu for testing purposes. Do not edit this file unless you know what you + * are doing!!! + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** See f2py2e/cfuncs.py: includes ***********************/ + +#define PY_SSIZE_T_CLEAN +#include +#include "fortranobject.h" +#include + +static PyObject *wrap_error; +static PyObject *wrap_module; + +/************************************ call ************************************/ +static char doc_f2py_rout_wrap_call[] = "\ +Function signature:\n\ + arr = call(type_num,dims,intent,obj)\n\ +Required arguments:\n" +" type_num : input int\n" +" dims : input int-sequence\n" +" intent : input int\n" +" obj : input python object\n" +"Return objects:\n" +" arr : array"; +static PyObject *f2py_rout_wrap_call(PyObject *capi_self, + PyObject *capi_args) { + PyObject * volatile capi_buildvalue = NULL; + int type_num = 0; + int elsize = 0; + npy_intp *dims = NULL; + PyObject *dims_capi = Py_None; + int rank = 0; + int intent = 0; + PyArrayObject *capi_arr_tmp = NULL; + PyObject *arr_capi = Py_None; + int i; + + if (!PyArg_ParseTuple(capi_args,"iiOiO|:wrap.call",\ + &type_num,&elsize,&dims_capi,&intent,&arr_capi)) + return NULL; + rank = PySequence_Length(dims_capi); + dims = malloc(rank*sizeof(npy_intp)); + for (i=0;ikind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyArray_DESCR(arr)->alignment, + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); +} + +static PyMethodDef f2py_module_methods[] = { + + {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, + {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "test_array_from_pyobj_ext", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { + PyObject *m,*d, *s; + m = wrap_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap (failed to import numpy)"); + d = PyModule_GetDict(m); + s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" + " arr = call(type_num,dims,intent,obj)\n" + "."); + PyDict_SetItemString(d, "__doc__", s); + wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); + Py_DECREF(s); + +#define ADDCONST(NAME, CONST) \ + s = PyLong_FromLong(CONST); \ + PyDict_SetItemString(d, NAME, s); \ + Py_DECREF(s) + + ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); + ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); + ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); + ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); + ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); + ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); + ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); + ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); + ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); + ADDCONST("NPY_BOOL", NPY_BOOL); + ADDCONST("NPY_BYTE", NPY_BYTE); + ADDCONST("NPY_UBYTE", NPY_UBYTE); + ADDCONST("NPY_SHORT", NPY_SHORT); + ADDCONST("NPY_USHORT", NPY_USHORT); + ADDCONST("NPY_INT", NPY_INT); + ADDCONST("NPY_UINT", NPY_UINT); + ADDCONST("NPY_INTP", NPY_INTP); + ADDCONST("NPY_UINTP", NPY_UINTP); + ADDCONST("NPY_LONG", NPY_LONG); + ADDCONST("NPY_ULONG", NPY_ULONG); + ADDCONST("NPY_LONGLONG", NPY_LONGLONG); + ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); + ADDCONST("NPY_FLOAT", NPY_FLOAT); + ADDCONST("NPY_DOUBLE", NPY_DOUBLE); + ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); + ADDCONST("NPY_CFLOAT", NPY_CFLOAT); + ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); + ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); + ADDCONST("NPY_OBJECT", NPY_OBJECT); + ADDCONST("NPY_STRING", NPY_STRING); + ADDCONST("NPY_UNICODE", NPY_UNICODE); + ADDCONST("NPY_VOID", NPY_VOID); + ADDCONST("NPY_NTYPES", NPY_NTYPES); + ADDCONST("NPY_NOTYPE", NPY_NOTYPE); + ADDCONST("NPY_USERDEF", NPY_USERDEF); + + ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); + ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); + ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); + ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); + ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); + ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); + ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); + ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); + ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); + + ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); + ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); + ADDCONST("CARRAY", NPY_ARRAY_CARRAY); + ADDCONST("FARRAY", NPY_ARRAY_FARRAY); + ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); + ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); + ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); + ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); + +#undef ADDCONST( + + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap"); + +#ifdef F2PY_REPORT_ATEXIT + on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); +#endif + + return m; +} +#ifdef __cplusplus +} +#endif diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 00000000..2665f89b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 00000000..b301710f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 00000000..cbe6317e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 00000000..337465ac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 00000000..ed6c70cb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/block_docstring/foo.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/block_docstring/foo.f new file mode 100644 index 00000000..c8315f12 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/block_docstring/foo.f @@ -0,0 +1,6 @@ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/foo.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/foo.f new file mode 100644 index 00000000..ba397bb3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/foo.f @@ -0,0 +1,62 @@ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 new file mode 100644 index 00000000..49853afd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 @@ -0,0 +1,7 @@ +function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) +end function gh17797 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 new file mode 100644 index 00000000..92b6d754 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 @@ -0,0 +1,17 @@ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh25211.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh25211.f new file mode 100644 index 00000000..ba727a10 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh25211.f @@ -0,0 +1,10 @@ + SUBROUTINE FOO(FUN,R) + EXTERNAL FUN + INTEGER I + REAL*8 R, FUN +Cf2py intent(out) r + R = 0D0 + DO I=-5,5 + R = R + FUN(I) + ENDDO + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf new file mode 100644 index 00000000..f1201115 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh25211.pyf @@ -0,0 +1,18 @@ +python module __user__routines + interface + function fun(i) result (r) + integer :: i + real*8 :: r + end function fun + end interface +end python module __user__routines + +python module callback2 + interface + subroutine foo(f,r) + use __user__routines, f=>fun + external f + real*8 intent(out) :: r + end subroutine foo + end interface +end python module callback2 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf new file mode 100644 index 00000000..8eb5bb10 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/gh_22819.pyf @@ -0,0 +1,6 @@ +python module test_22819 + interface + subroutine hello() + end subroutine hello + end interface +end python module test_22819 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hi77.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hi77.f new file mode 100644 index 00000000..8b916ebe --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hi77.f @@ -0,0 +1,3 @@ + SUBROUTINE HI + PRINT*, "HELLO WORLD" + END SUBROUTINE diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 new file mode 100644 index 00000000..981f8775 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 @@ -0,0 +1,3 @@ +function hi() + print*, "Hello World" +end function diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/common/block.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/common/block.f new file mode 100644 index 00000000..7ea7968f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/common/block.f @@ -0,0 +1,11 @@ + SUBROUTINE INITCB + DOUBLE PRECISION LONG + CHARACTER STRING + INTEGER OK + + COMMON /BLOCK/ LONG, STRING, OK + LONG = 1.0 + STRING = '2' + OK = 3 + RETURN + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/common/gh19161.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/common/gh19161.f90 new file mode 100644 index 00000000..a2f40735 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/common/gh19161.f90 @@ -0,0 +1,10 @@ +module typedefmod + use iso_fortran_env, only: real32 +end module typedefmod + +module data + use typedefmod, only: real32 + implicit none + real(kind=real32) :: x + common/test/x +end module data diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 new file mode 100644 index 00000000..e2cbd445 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 @@ -0,0 +1,13 @@ +module foo + public + type, private, bind(c) :: a + integer :: i + end type a + type, bind(c) :: b_ + integer :: j + end type b_ + public :: b_ + type :: c + integer :: k + end type c +end module foo diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f new file mode 100644 index 00000000..5ffd865c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_common.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYDATA + DATA MYDATA /0/ + END + SUBROUTINE SUB1 + COMMON /MYCOM/ MYDATA + MYDATA = MYDATA + 1 + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f new file mode 100644 index 00000000..19ff8a83 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_multiplier.f @@ -0,0 +1,5 @@ + BLOCK DATA MYBLK + IMPLICIT DOUBLE PRECISION (A-H,O-Z) + COMMON /MYCOM/ IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 + DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /2*3,2*2,0.0D0/ + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 new file mode 100644 index 00000000..576c5e48 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_stmts.f90 @@ -0,0 +1,20 @@ +! gh-23276 +module cmplxdat + implicit none + integer :: i, j + real :: x, y + real, dimension(2) :: z + real(kind=8) :: pi + complex(kind=8), target :: medium_ref_index + complex(kind=8), target :: ref_index_one, ref_index_two + complex(kind=8), dimension(2) :: my_array + real(kind=8), dimension(3) :: my_real_array = (/1.0d0, 2.0d0, 3.0d0/) + + data i, j / 2, 3 / + data x, y / 1.5, 2.0 / + data z / 3.5, 7.0 / + data medium_ref_index / (1.d0, 0.d0) / + data ref_index_one, ref_index_two / (13.0d0, 21.0d0), (-30.0d0, 43.0d0) / + data my_array / (1.0d0, 2.0d0), (-3.0d0, 4.0d0) / + data pi / 3.1415926535897932384626433832795028841971693993751058209749445923078164062d0 / +end module cmplxdat diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f new file mode 100644 index 00000000..4128f004 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/data_with_comments.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYTAB + INTEGER MYTAB(3) + DATA MYTAB/ + * 0, ! 1 and more commenty stuff + * 4, ! 2 + * 0 / + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 new file mode 100644 index 00000000..e327b25c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 @@ -0,0 +1,6 @@ +module foo + type bar + character(len = 4) :: text + end type bar + type(bar), parameter :: abar = bar('abar') +end module foo diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f new file mode 100644 index 00000000..1bb2e674 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f @@ -0,0 +1,16 @@ + subroutine subb(k) + real(8), intent(inout) :: k(:) + k=k+1 + endsubroutine + + subroutine subc(w,k) + real(8), intent(in) :: w(:) + real(8), intent(out) :: k(size(w)) + k=w+1 + endsubroutine + + function t0(value) + character value + character t0 + t0 = value + endfunction diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f new file mode 100644 index 00000000..99595384 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f @@ -0,0 +1,12 @@ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf new file mode 100644 index 00000000..b3454f18 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh22648.pyf @@ -0,0 +1,7 @@ +python module iri16py ! in + interface ! in :iri16py + block data ! in :iri16py:iridreg_modified.for + COMMON /fircom/ eden,tabhe,tabla,tabmo,tabza,tabfl + end block data + end interface +end python module iri16py diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f new file mode 100644 index 00000000..db522afa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23533.f @@ -0,0 +1,5 @@ + SUBROUTINE EXAMPLE( ) + IF( .TRUE. ) THEN + CALL DO_SOMETHING() + END IF ! ** .TRUE. ** + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 new file mode 100644 index 00000000..e0dffb5e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23598.f90 @@ -0,0 +1,4 @@ +integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b +end function diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 new file mode 100644 index 00000000..3b44efc5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 @@ -0,0 +1,11 @@ +module test_bug + implicit none + private + public :: intproduct + +contains + integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b + end function +end module diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 new file mode 100644 index 00000000..fac262d5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh23879.f90 @@ -0,0 +1,20 @@ +module gh23879 + implicit none + private + public :: foo + + contains + + subroutine foo(a, b) + integer, intent(in) :: a + integer, intent(out) :: b + b = a + call bar(b) + end subroutine + + subroutine bar(x) + integer, intent(inout) :: x + x = 2*x + end subroutine + + end module gh23879 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 new file mode 100644 index 00000000..31ea9327 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 @@ -0,0 +1,13 @@ + subroutine gh2848( & + ! first 2 parameters + par1, par2,& + ! last 2 parameters + par3, par4) + + integer, intent(in) :: par1, par2 + integer, intent(out) :: par3, par4 + + par3 = par1 + par4 = par2 + + end subroutine gh2848 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 new file mode 100644 index 00000000..1d060a3d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 @@ -0,0 +1,49 @@ +module foo + type bar + character(len = 32) :: item + end type bar + interface operator(.item.) + module procedure item_int, item_real + end interface operator(.item.) + interface operator(==) + module procedure items_are_equal + end interface operator(==) + interface assignment(=) + module procedure get_int, get_real + end interface assignment(=) +contains + function item_int(val) result(elem) + integer, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(I32)") val + end function item_int + + function item_real(val) result(elem) + real, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(1PE32.12)") val + end function item_real + + function items_are_equal(val1, val2) result(equal) + type(bar), intent(in) :: val1, val2 + logical :: equal + + equal = (val1%item == val2%item) + end function items_are_equal + + subroutine get_real(rval, item) + real, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_real + + subroutine get_int(rval, item) + integer, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_int +end module foo diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 new file mode 100644 index 00000000..2674c214 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 @@ -0,0 +1,11 @@ +module foo + private + integer :: a + public :: setA + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 new file mode 100644 index 00000000..1db76e3f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + public :: setA +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 new file mode 100644 index 00000000..46bef7cb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 new file mode 100644 index 00000000..13515ce9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 @@ -0,0 +1,4 @@ +subroutine foo(x) + real(8), intent(in) :: x + ! Écrit à l'écran la valeur de x +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap new file mode 100644 index 00000000..a4425f88 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(real32='float', real64='double'), integer=dict(int64='long_long')) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 new file mode 100644 index 00000000..1e1dc1d4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 @@ -0,0 +1,9 @@ + subroutine func1(n, x, res) + use, intrinsic :: iso_fortran_env, only: int64, real64 + implicit none + integer(int64), intent(in) :: n + real(real64), intent(in) :: x(n) + real(real64), intent(out) :: res +!f2py intent(hide) :: n + res = sum(x) + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 new file mode 100644 index 00000000..765f7c1c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/isocintrin/isoCtests.f90 @@ -0,0 +1,34 @@ + module coddity + use iso_c_binding, only: c_double, c_int, c_int64_t + implicit none + contains + subroutine c_add(a, b, c) bind(c, name="c_add") + real(c_double), intent(in) :: a, b + real(c_double), intent(out) :: c + c = a + b + end subroutine c_add + ! gh-9693 + function wat(x, y) result(z) bind(c) + integer(c_int), intent(in) :: x, y + integer(c_int) :: z + + z = x + 7 + end function wat + ! gh-25207 + subroutine c_add_int64(a, b, c) bind(c) + integer(c_int64_t), intent(in) :: a, b + integer(c_int64_t), intent(out) :: c + c = a + b + end subroutine c_add_int64 + ! gh-25207 + subroutine add_arr(A, B, C) + integer(c_int64_t), intent(in) :: A(3) + integer(c_int64_t), intent(in) :: B(3) + integer(c_int64_t), intent(out) :: C(3) + integer :: j + + do j = 1, 3 + C(j) = A(j)+B(j) + end do + end subroutine + end module coddity diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 00000000..d3d15cfb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 00000000..c3474257 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 00000000..7543a6ac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 00000000..c1b641f1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/mod.mod b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/mod.mod new file mode 100644 index 00000000..8670a97e Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/mod.mod differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90 new file mode 100644 index 00000000..4505e0cb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90 @@ -0,0 +1,12 @@ +module mod + integer :: i + integer :: x(4) + real, dimension(2,3) :: a + real, allocatable, dimension(:,:) :: b +contains + subroutine foo + integer :: k + k = 1 + a(1,2) = a(1,2)+3 + end subroutine foo +end module mod diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 new file mode 100644 index 00000000..bf1fa928 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 @@ -0,0 +1,7 @@ +subroutine foo(is_, ie_, arr, tout) + implicit none + integer :: is_,ie_ + real, intent(in) :: arr(is_:ie_) + real, intent(out) :: tout(is_:ie_) + tout = arr +end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 new file mode 100644 index 00000000..ac90cedc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 @@ -0,0 +1,57 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3._sp + real(dp), parameter :: three_d = 3._dp + integer(ii), parameter :: three_i = 3_ii + integer(il), parameter :: three_l = 3_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + + +subroutine foo_no(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3. + real(dp), parameter :: three_d = 3. + integer(ii), parameter :: three_i = 3 + integer(il), parameter :: three_l = 3 + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + +subroutine foo_sum(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 2._sp + 1._sp + real(dp), parameter :: three_d = 1._dp + 2._dp + integer(ii), parameter :: three_i = 2_ii + 1_ii + integer(il), parameter :: three_l = 1_il + 2_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 new file mode 100644 index 00000000..e51f5e9b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 @@ -0,0 +1,15 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + integer(ii), parameter :: two = 2_ii + integer(ii), parameter :: six = three * 1_ii * two + + x(1) = x(1) + x(2) + x(3) * six + return +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 new file mode 100644 index 00000000..aaa83d2e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 @@ -0,0 +1,22 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_long(x) + implicit none + integer, parameter :: ii = selected_int_kind(18) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 new file mode 100644 index 00000000..62c9a5b9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Specifically that types of constants without +! compound kind specs are correctly inferred +! adapted Gibbs iteration code from pymc +! for this test case +subroutine foo_non_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + + integer(ii) maxiterates + parameter (maxiterates=2) + + integer(ii) maxseries + parameter (maxseries=2) + + integer(ii) wasize + parameter (wasize=maxiterates*maxseries) + integer(ii), intent(inout) :: x + dimension x(wasize) + + x(1) = x(1) + x(2) + x(3) + x(4) * wasize + return +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 new file mode 100644 index 00000000..02ac9dd9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_single(x) + implicit none + integer, parameter :: rp = selected_real_kind(6) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_double(x) + implicit none + integer, parameter :: rp = selected_real_kind(15) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f new file mode 100644 index 00000000..9dc1cfa4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f @@ -0,0 +1,14 @@ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/gh25337/data.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/gh25337/data.f90 new file mode 100644 index 00000000..483d13ce --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/gh25337/data.f90 @@ -0,0 +1,8 @@ +module data + real(8) :: shift +contains + subroutine set_shift(in_shift) + real(8), intent(in) :: in_shift + shift = in_shift + end subroutine set_shift +end module data diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/gh25337/use_data.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/gh25337/use_data.f90 new file mode 100644 index 00000000..b3fae8b8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/gh25337/use_data.f90 @@ -0,0 +1,6 @@ +subroutine shift_a(dim_a, a) + use data, only: shift + integer, intent(in) :: dim_a + real(8), intent(inout), dimension(dim_a) :: a + a = a + shift +end subroutine shift_a diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 00000000..80cdad90 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo77.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo77.f new file mode 100644 index 00000000..facae101 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 new file mode 100644 index 00000000..36182bcf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo77.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo77.f new file mode 100644 index 00000000..37a1ec84 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 new file mode 100644 index 00000000..adc27b47 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo77.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo77.f new file mode 100644 index 00000000..1ab895b9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 new file mode 100644 index 00000000..ba9249aa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo77.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo77.f new file mode 100644 index 00000000..ef530145 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 new file mode 100644 index 00000000..a4526468 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f new file mode 100644 index 00000000..bf43dbf1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 new file mode 100644 index 00000000..df971998 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/size/foo.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 00000000..5b66f8c4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/char.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/char.f90 new file mode 100644 index 00000000..bb7985ce --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/char.f90 @@ -0,0 +1,29 @@ +MODULE char_test + +CONTAINS + +SUBROUTINE change_strings(strings, n_strs, out_strings) + IMPLICIT NONE + + ! Inputs + INTEGER, INTENT(IN) :: n_strs + CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings + CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings + +!f2py INTEGER, INTENT(IN) :: n_strs +!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings +!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings + + ! Misc. + INTEGER*4 :: j + + + DO j=1, n_strs + out_strings(1,j) = strings(1,j) + out_strings(2,j) = 'A' + END DO + +END SUBROUTINE change_strings + +END MODULE char_test + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 new file mode 100644 index 00000000..7fd15854 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 @@ -0,0 +1,34 @@ +function sint(s) result(i) + implicit none + character(len=*) :: s + integer :: j, i + i = 0 + do j=len(s), 1, -1 + if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then + i = i + ichar(s(j:j)) * 10 ** (j - 1) + endif + end do + return + end function sint + + function test_in_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4) :: a + integer :: i + i = sint(a) + a(1:1) = 'A' + return + end function test_in_bytes4 + + function test_inout_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4), intent(inout) :: a + integer :: i + if (a(1:1).ne.' ') then + a(1:1) = 'E' + endif + i = sint(a) + return + end function test_inout_bytes4 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh24008.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh24008.f new file mode 100644 index 00000000..ab64cf77 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh24008.f @@ -0,0 +1,8 @@ + SUBROUTINE GREET(NAME, GREETING) + CHARACTER NAME*(*), GREETING*(*) + CHARACTER*(50) MESSAGE + + MESSAGE = 'Hello, ' // NAME // ', ' // GREETING +c$$$ PRINT *, MESSAGE + + END SUBROUTINE GREET diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh24662.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh24662.f90 new file mode 100644 index 00000000..ca53413c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh24662.f90 @@ -0,0 +1,7 @@ +subroutine string_inout_optional(output) + implicit none + character*(32), optional, intent(inout) :: output + if (present(output)) then + output="output string" + endif +end subroutine diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286.f90 new file mode 100644 index 00000000..db1c7100 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286.f90 @@ -0,0 +1,14 @@ +subroutine charint(trans, info) + character, intent(in) :: trans + integer, intent(out) :: info + if (trans == 'N') then + info = 1 + else if (trans == 'T') then + info = 2 + else if (trans == 'C') then + info = 3 + else + info = -1 + end if + +end subroutine charint diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286.pyf b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286.pyf new file mode 100644 index 00000000..7b960907 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(trans=='N'||trans=='T'||trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf new file mode 100644 index 00000000..e7b10fa9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/gh25286_bc.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(*trans=='N'||*trans=='T'||*trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 new file mode 100644 index 00000000..f8f07617 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 @@ -0,0 +1,9 @@ +MODULE string_test + + character(len=8) :: string + character string77 * 8 + + character(len=12), dimension(5,7) :: strarr + character strarr77(5,7) * 12 + +END MODULE string_test diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/string.f b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/string.f new file mode 100644 index 00000000..5210ca4d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/string/string.f @@ -0,0 +1,12 @@ +C FILE: STRING.F + SUBROUTINE FOO(A,B,C,D) + CHARACTER*5 A, B + CHARACTER*(*) C,D +Cf2py intent(in) a,c +Cf2py intent(inout) b,d + A(1:1) = 'A' + B(1:1) = 'B' + C(1:1) = 'C' + D(1:1) = 'D' + END +C END OF FILE STRING.F diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 new file mode 100644 index 00000000..7d9dc0fd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 @@ -0,0 +1,9 @@ +module fortfuncs + implicit none +contains + subroutine square(x,y) + integer, intent(in), value :: x + integer, intent(out) :: y + y = x*x + end subroutine square +end module fortfuncs diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.py new file mode 100644 index 00000000..42902913 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.py @@ -0,0 +1,25 @@ +from pathlib import Path +import pytest +import textwrap +from . import util +from numpy.f2py import crackfortran +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +class TestAbstractInterface(util.F2PyTest): + sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")] + + skip = ["add1", "add2"] + + def test_abstract_interface(self): + assert self.module.ops_module.foo(3, 5) == (8, 13) + + def test_parse_abstract_interface(self): + # Test gh18403 + fpath = util.getpath("tests", "src", "abstract_interface", + "gh18403_mod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert len(mod[0]["body"]) == 1 + assert mod[0]["body"][0]["block"] == "abstract interface" diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 00000000..2b8c8def --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,686 @@ +import os +import sys +import copy +import platform +import pytest + +import numpy as np + +from numpy.testing import assert_, assert_equal +from numpy.core.multiarray import typeinfo as _typeinfo +from . import util + +wrap = None + +# Extend core typeinfo with CHARACTER to test dtype('c') +_ti = _typeinfo['STRING'] +typeinfo = dict( + CHARACTER=type(_ti)(('c', _ti.num, 8, _ti.alignment, _ti.type)), + **_typeinfo) + + +def setup_module(): + """ + Build the required testing extension module + + """ + global wrap + + # Check compiler availability first + if not util.has_c_compiler(): + pytest.skip("No C compiler available") + + if wrap is None: + config_code = """ + config.add_extension('test_array_from_pyobj_ext', + sources=['wrapmodule.c', 'fortranobject.c'], + define_macros=[]) + """ + d = os.path.dirname(__file__) + src = [ + util.getpath("tests", "src", "array_from_pyobj", "wrapmodule.c"), + util.getpath("src", "fortranobject.c"), + util.getpath("src", "fortranobject.h"), + ] + wrap = util.build_module_distutils(src, config_code, + "test_array_from_pyobj_ext") + + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + + +def flags2names(flags): + info = [] + for flagname in [ + "CONTIGUOUS", + "FORTRAN", + "OWNDATA", + "ENSURECOPY", + "ENSUREARRAY", + "ALIGNED", + "NOTSWAPPED", + "WRITEABLE", + "WRITEBACKIFCOPY", + "UPDATEIFCOPY", + "BEHAVED", + "BEHAVED_RO", + "CARRAY", + "FARRAY", + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + + +class Intent: + def __init__(self, intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i == "optional": + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, "F2PY_INTENT_" + i.upper()) + self.flags = flags + + def __getattr__(self, name): + name = name.lower() + if name == "in_": + name = "in" + return self.__class__(self.intent_list + [name]) + + def __str__(self): + return "intent(%s)" % (",".join(self.intent_list)) + + def __repr__(self): + return "Intent(%r)" % (self.intent_list) + + def is_intent(self, *names): + for name in names: + if name not in self.intent_list: + return False + return True + + def is_intent_exact(self, *names): + return len(self.intent_list) == len(names) and self.is_intent(*names) + + +intent = Intent() + +_type_names = [ + "BOOL", + "BYTE", + "UBYTE", + "SHORT", + "USHORT", + "INT", + "UINT", + "LONG", + "ULONG", + "LONGLONG", + "ULONGLONG", + "FLOAT", + "DOUBLE", + "CFLOAT", + "STRING1", + "STRING5", + "CHARACTER", +] + +_cast_dict = {"BOOL": ["BOOL"]} +_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"] +_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"] +_cast_dict["BYTE"] = ["BYTE"] +_cast_dict["UBYTE"] = ["UBYTE"] +_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"] +_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"] +_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"] +_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"] + +_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"] +_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"] + +_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"] +_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"] + +_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"] +_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"] + +_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"] + +_cast_dict['STRING1'] = ['STRING1'] +_cast_dict['STRING5'] = ['STRING5'] +_cast_dict['CHARACTER'] = ['CHARACTER'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied +# and several tests fail as the alignment flag can be randomly true or fals +# when numpy gains an aligned allocator the tests could be enabled again +# +# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) + and sys.platform != "win32" + and (platform.system(), platform.processor()) != ("Darwin", "arm")): + _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) + _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ + "ULONG", + "FLOAT", + "DOUBLE", + "LONGDOUBLE", + ] + _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [ + "CFLOAT", + "CDOUBLE", + "CLONGDOUBLE", + ] + _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"] + + +class Type: + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, np.dtype): + dtype0 = name + name = None + for n, i in typeinfo.items(): + if not isinstance(i, type) and dtype0.type is i.type: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + + if self.NAME == 'CHARACTER': + info = typeinfo[self.NAME] + self.type_num = getattr(wrap, 'NPY_STRING') + self.elsize = 1 + self.dtype = np.dtype('c') + elif self.NAME.startswith('STRING'): + info = typeinfo[self.NAME[:6]] + self.type_num = getattr(wrap, 'NPY_STRING') + self.elsize = int(self.NAME[6:] or 0) + self.dtype = np.dtype(f'S{self.elsize}') + else: + info = typeinfo[self.NAME] + self.type_num = getattr(wrap, 'NPY_' + self.NAME) + self.elsize = info.bits // 8 + self.dtype = np.dtype(info.type) + + assert self.type_num == info.num + self.type = info.type + self.dtypechar = info.char + + def __repr__(self): + return (f"Type({self.NAME})|type_num={self.type_num}," + f" dtype={self.dtype}," + f" type={self.type}, elsize={self.elsize}," + f" dtypechar={self.dtypechar}") + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if typeinfo[name].alignment < bits: + types.append(Type(name)) + return types + + def equal_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if name == self.NAME: + continue + if typeinfo[name].alignment == bits: + types.append(Type(name)) + return types + + def larger_types(self): + bits = typeinfo[self.NAME].alignment + types = [] + for name in _type_names: + if typeinfo[name].alignment > bits: + types.append(Type(name)) + return types + + +class Array: + + def __repr__(self): + return (f'Array({self.type}, {self.dims}, {self.intent},' + f' {self.obj})|arr={self.arr}') + + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, + typ.elsize, + dims, intent.flags, obj) + + assert isinstance(self.arr, np.ndarray) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert (intent.flags & wrap.F2PY_INTENT_C) + assert not self.arr.flags["FORTRAN"] + assert self.arr.flags["CONTIGUOUS"] + assert (not self.arr_attr[6] & wrap.FORTRAN) + else: + assert (not intent.flags & wrap.F2PY_INTENT_C) + assert self.arr.flags["FORTRAN"] + assert not self.arr.flags["CONTIGUOUS"] + assert (self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent("cache"): + assert isinstance(obj, np.ndarray), repr(type(obj)) + self.pyarr = np.array(obj).reshape(*dims).copy() + else: + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=self.intent.is_intent("c") and "C" or "F", + ) + assert self.pyarr.dtype == typ + self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) + assert self.pyarr.flags["OWNDATA"], (obj, intent) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert not self.pyarr.flags["FORTRAN"] + assert self.pyarr.flags["CONTIGUOUS"] + assert (not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert self.pyarr.flags["FORTRAN"] + assert not self.pyarr.flags["CONTIGUOUS"] + assert (self.pyarr_attr[6] & wrap.FORTRAN) + + assert self.arr_attr[1] == self.pyarr_attr[1] # nd + assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions + if self.arr_attr[1] <= 1: + assert self.arr_attr[3] == self.pyarr_attr[3], repr(( + self.arr_attr[3], + self.pyarr_attr[3], + self.arr.tobytes(), + self.pyarr.tobytes(), + )) # strides + assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( + self.arr_attr[5], self.pyarr_attr[5] + )) # descr + assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + self.arr_attr[6], + self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), + intent, + )) # flags + + if intent.is_intent("cache"): + assert self.arr_attr[5][3] >= self.type.elsize + else: + assert self.arr_attr[5][3] == self.type.elsize + assert (self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, np.ndarray): + if typ.elsize == Type(obj.dtype).elsize: + if not intent.is_intent("copy") and self.arr_attr[1] <= 1: + assert self.has_shared_memory() + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + return (arr1 == arr2).all() + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array.""" + if self.obj is self.arr: + return True + if not isinstance(self.obj, np.ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0] == self.arr_attr[0] + + +class TestIntent: + def test_in_out(self): + assert str(intent.in_.out) == "intent(in,out)" + assert intent.in_.c.is_intent("c") + assert not intent.in_.c.is_intent_exact("c") + assert intent.in_.c.is_intent_exact("c", "in") + assert intent.in_.c.is_intent_exact("in", "c") + assert not intent.in_.is_intent("c") + + +class TestSharedMemory: + + @pytest.fixture(autouse=True, scope="class", params=_type_names) + def setup_type(self, request): + request.cls.type = Type(request.param) + request.cls.array = lambda self, dims, intent, obj: Array( + Type(request.param), dims, intent, obj) + + @property + def num2seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return ['1' * elsize, '2' * elsize] + return [1, 2] + + @property + def num23seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return [['1' * elsize, '2' * elsize, '3' * elsize], + ['4' * elsize, '5' * elsize, '6' * elsize]] + return [[1, 2, 3], [4, 5, 6]] + + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert not a.has_shared_memory() + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) + else: + assert not a.has_shared_memory() + + @pytest.mark.parametrize("write", ["w", "ro"]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("inp", ["2seq", "23seq"]) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies""" + seq = getattr(self, "num" + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, + ((order == 'C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + + def test_inout_2seq(self): + obj = np.array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert a.has_shared_memory() + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout|inplace|cache) array"): + raise + else: + raise SystemError("intent(inout) should have failed on sequence") + + def test_f_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert a.has_shared_memory() + + obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout) array"): + raise + else: + raise SystemError( + "intent(inout) should have failed on improper array") + + def test_c_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert a.has_shared_memory() + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert not a.has_shared_memory() + + def test_c_in_from_23seq(self): + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, + self.num23seq) + assert not a.has_shared_memory() + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + assert not a.has_shared_memory() + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, + obj) + assert not a.has_shared_memory() + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, + obj) + assert not a.has_shared_memory() + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory() + + obj = np.array(self.num2seq, dtype=t.dtype, order="F") + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory(), repr(t.dtype) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on multisegmented array") + + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.NAME == 'STRING': + # string elsize is 0, so skipping the test + continue + if t.elsize >= self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + try: + self.array(shape, intent.in_.cache, obj) # Should succeed + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on smaller array") + + def test_cache_hidden(self): + shape = (2, ) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on undefined dimensions") + + def test_hidden(self): + shape = (2, ) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(hide) should have failed on undefined dimensions") + + def test_optional_none(self): + shape = (2, ) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj), ) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + a = self.array(shape, intent.optional.c, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_inplace(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes are changed inplace! + assert not obj.flags["CONTIGUOUS"] + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = np.array(self.num23seq, dtype=t.dtype) + assert obj.dtype.type == t.type + assert obj.dtype.type is not self.type.type + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, + dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes changed inplace! + assert not obj.flags["CONTIGUOUS"] + assert obj.dtype.type is self.type.type # obj changed inplace! diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 00000000..d4664cf8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,49 @@ +import os +import pytest +import tempfile + +from . import util + + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), + util.getpath("tests", "src", "assumed_shape", "precision.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), + util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), + ] + + @pytest.mark.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert r == 3 + r = self.module.sum([1, 2]) + assert r == 3 + r = self.module.sum_with_use([1, 2]) + assert r == 3 + + r = self.module.mod.sum([1, 2]) + assert r == 3 + r = self.module.mod.fsum([1, 2]) + assert r == 3 + + +class TestF2cmapOption(TestAssumedShapeSumExample): + def setup_method(self): + # Use a custom file name for .f2py_f2cmap + self.sources = list(self.sources) + f2cmap_src = self.sources.pop(-1) + + self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) + with open(f2cmap_src, "rb") as f: + self.f2cmap_file.write(f.read()) + self.f2cmap_file.close() + + self.sources.append(self.f2cmap_file.name) + self.options = ["--f2cmap", self.f2cmap_file.name] + + super().setup_method() + + def teardown_method(self): + os.unlink(self.f2cmap_file.name) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.py new file mode 100644 index 00000000..e0eacc03 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.py @@ -0,0 +1,17 @@ +import sys +import pytest +from . import util + +from numpy.testing import IS_PYPY + + +class TestBlockDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "block_docstring", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_block_docstring(self): + expected = "bar : 'i'-array(2,3)\n" + assert self.module.block.__doc__ == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.py new file mode 100644 index 00000000..5b6c294d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.py @@ -0,0 +1,243 @@ +import math +import textwrap +import sys +import pytest +import threading +import traceback +import time + +import numpy as np +from numpy.testing import IS_PYPY +from . import util + + +class TestF77Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "foo.f")] + + @pytest.mark.parametrize("name", "t,t2".split(",")) + def test_all(self, name): + self.check_function(name) + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """) + assert self.module.t.__doc__ == expected + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda: 4) + assert r == 4 + r = t(lambda a: 5, fun_extra_args=(6, )) + assert r == 5 + r = t(lambda a: a, fun_extra_args=(6, )) + assert r == 6 + r = t(lambda a: 5 + a, fun_extra_args=(7, )) + assert r == 12 + r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, )) + assert r == 180 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + + r = t(self.module.func, fun_extra_args=(6, )) + assert r == 17 + r = t(self.module.func0) + assert r == 11 + r = t(self.module.func0._cpointer) + assert r == 11 + + class A: + def __call__(self): + return 7 + + def mth(self): + return 9 + + a = A() + r = t(a) + assert r == 7 + r = t(a.mth) + assert r == 9 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback(self): + def callback(code): + if code == "r": + return 0 + else: + return 1 + + f = getattr(self.module, "string_callback") + r = f(callback) + assert r == 0 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback_array(self): + # See gh-10027 + cu1 = np.zeros((1, ), "S8") + cu2 = np.zeros((1, 8), "c") + cu3 = np.array([""], "S8") + + def callback(cu, lencu): + if cu.shape != (lencu,): + return 1 + if cu.dtype != "S8": + return 2 + if not np.all(cu == b""): + return 3 + return 0 + + f = getattr(self.module, "string_callback_array") + for cu in [cu1, cu2, cu3]: + res = f(callback, cu, cu.size) + assert res == 0 + + def test_threadsafety(self): + # Segfaults if the callback handling is not threadsafe + + errors = [] + + def cb(): + # Sleep here to make it more likely for another thread + # to call their callback at the same time. + time.sleep(1e-3) + + # Check reentrancy + r = self.module.t(lambda: 123) + assert r == 123 + + return 42 + + def runner(name): + try: + for j in range(50): + r = self.module.t(cb) + assert r == 42 + self.check_function(name) + except Exception: + errors.append(traceback.format_exc()) + + threads = [ + threading.Thread(target=runner, args=(arg, )) + for arg in ("t", "t2") for n in range(20) + ] + + for t in threads: + t.start() + + for t in threads: + t.join() + + errors = "\n\n".join(errors) + if errors: + raise AssertionError(errors) + + def test_hidden_callback(self): + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + try: + self.module.hidden_callback2(2) + except Exception as msg: + assert str(msg).startswith("cb: Callback global_f not defined") + + self.module.global_f = lambda x: x + 1 + r = self.module.hidden_callback(2) + assert r == 3 + + self.module.global_f = lambda x: x + 2 + r = self.module.hidden_callback(2) + assert r == 4 + + del self.module.global_f + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + self.module.global_f = lambda x=0: x + 3 + r = self.module.hidden_callback(2) + assert r == 5 + + # reproducer of gh18341 + r = self.module.hidden_callback2(2) + assert r == 3 + + +class TestF77CallbackPythonTLS(TestF77Callback): + """ + Callback tests using Python thread-local storage instead of + compiler-provided + """ + + options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh17797.f90")] + + def test_gh17797(self): + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 + + +class TestGH18335(util.F2PyTest): + """The reproduction of the reported issue requires specific input that + extensions may break the issue conditions, so the reproducer is + implemented as a separate test class. Do not extend this test with + other tests! + """ + sources = [util.getpath("tests", "src", "callback", "gh18335.f90")] + + def test_gh18335(self): + def foo(x): + x[0] += 1 + + r = self.module.gh18335(foo) + assert r == 123 + 1 + + +class TestGH25211(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh25211.f"), + util.getpath("tests", "src", "callback", "gh25211.pyf")] + module_name = "callback2" + + def test_gh18335(self): + def bar(x): + return x*x + + res = self.module.foo(bar) + assert res == 110 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_character.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_character.py new file mode 100644 index 00000000..e55b1b6b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_character.py @@ -0,0 +1,636 @@ +import pytest +import textwrap +from numpy.testing import assert_array_equal, assert_equal, assert_raises +import numpy as np +from numpy.f2py.tests import util + + +class TestCharacterString(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character_string' + length_list = ['1', '3', 'star'] + + code = '' + for length in length_list: + fsuffix = length + clength = dict(star='(*)').get(length, length) + + code += textwrap.dedent(f""" + + subroutine {fprefix}_input_{fsuffix}(c, o, n) + character*{clength}, intent(in) :: c + integer n + !f2py integer, depend(c), intent(hide) :: n = slen(c) + integer*1, dimension(n) :: o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input_{fsuffix} + + subroutine {fprefix}_output_{fsuffix}(c, o, n) + character*{clength}, intent(out) :: c + integer n + integer*1, dimension(n), intent(in) :: o + !f2py integer, depend(o), intent(hide) :: n = len(o) + c = transfer(o, c) + end subroutine {fprefix}_output_{fsuffix} + + subroutine {fprefix}_array_input_{fsuffix}(c, o, m, n) + integer m, i, n + character*{clength}, intent(in), dimension(m) :: c + !f2py integer, depend(c), intent(hide) :: m = len(c) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m, n), intent(out) :: o + do i=1,m + o(i, :) = transfer(c(i), o(i, :)) + end do + end subroutine {fprefix}_array_input_{fsuffix} + + subroutine {fprefix}_array_output_{fsuffix}(c, o, m, n) + character*{clength}, intent(out), dimension(m) :: c + integer n + integer*1, dimension(m, n), intent(in) :: o + !f2py character(f2py_len=n) :: c + !f2py integer, depend(o), intent(hide) :: m = len(o) + !f2py integer, depend(o), intent(hide) :: n = shape(o, 1) + do i=1,m + c(i) = transfer(o(i, :), c(i)) + end do + end subroutine {fprefix}_array_output_{fsuffix} + + subroutine {fprefix}_2d_array_input_{fsuffix}(c, o, m1, m2, n) + integer m1, m2, i, j, n + character*{clength}, intent(in), dimension(m1, m2) :: c + !f2py integer, depend(c), intent(hide) :: m1 = len(c) + !f2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m1, m2, n), intent(out) :: o + do i=1,m1 + do j=1,m2 + o(i, j, :) = transfer(c(i, j), o(i, j, :)) + end do + end do + end subroutine {fprefix}_2d_array_input_{fsuffix} + """) + + @pytest.mark.parametrize("length", length_list) + def test_input(self, length): + fsuffix = {'(*)': 'star'}.get(length, length) + f = getattr(self.module, self.fprefix + '_input_' + fsuffix) + + a = {'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length] + + assert_array_equal(f(a), np.array(list(map(ord, a)), dtype='u1')) + + @pytest.mark.parametrize("length", length_list[:-1]) + def test_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_output_' + fsuffix) + + a = {'1': 'a', '3': 'abc'}[length] + + assert_array_equal(f(np.array(list(map(ord, a)), dtype='u1')), + a.encode()) + + @pytest.mark.parametrize("length", length_list) + def test_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_input_' + fsuffix) + + a = np.array([{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length], + ], dtype='S') + + expected = np.array([[c for c in s] for s in a], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_array_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_output_' + fsuffix) + + expected = np.array( + [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S') + + a = np.array([[c for c in s] for s in expected], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_2d_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_2d_array_input_' + fsuffix) + + a = np.array([[{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], + [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length], + {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]], + dtype='S') + expected = np.array([[[c for c in item] for item in row] for row in a], + dtype='u1', order='F') + assert_array_equal(f(a), expected) + + +class TestCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_input(c, o) + character, intent(in) :: c + integer*1 o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input + + subroutine {fprefix}_output(c, o) + character :: c + integer*1, intent(in) :: o + !f2py intent(out) c + c = transfer(o, c) + end subroutine {fprefix}_output + + subroutine {fprefix}_input_output(c, o) + character, intent(in) :: c + character o + !f2py intent(out) o + o = c + end subroutine {fprefix}_input_output + + subroutine {fprefix}_inout(c, n) + character :: c, n + !f2py intent(in) n + !f2py intent(inout) c + c = n + end subroutine {fprefix}_inout + + function {fprefix}_return(o) result (c) + character :: c + character, intent(in) :: o + c = transfer(o, c) + end function {fprefix}_return + + subroutine {fprefix}_array_input(c, o) + character, intent(in) :: c(3) + integer*1 o(3) + !f2py intent(out) o + integer i + do i=1,3 + o(i) = transfer(c(i), o(i)) + end do + end subroutine {fprefix}_array_input + + subroutine {fprefix}_2d_array_input(c, o) + character, intent(in) :: c(2, 3) + integer*1 o(2, 3) + !f2py intent(out) o + integer i, j + do i=1,2 + do j=1,3 + o(i, j) = transfer(c(i, j), o(i, j)) + end do + end do + end subroutine {fprefix}_2d_array_input + + subroutine {fprefix}_array_output(c, o) + character :: c(3) + integer*1, intent(in) :: o(3) + !f2py intent(out) c + do i=1,3 + c(i) = transfer(o(i), c(i)) + end do + end subroutine {fprefix}_array_output + + subroutine {fprefix}_array_inout(c, n) + character :: c(3), n(3) + !f2py intent(in) n(3) + !f2py intent(inout) c(3) + do i=1,3 + c(i) = n(i) + end do + end subroutine {fprefix}_array_inout + + subroutine {fprefix}_2d_array_inout(c, n) + character :: c(2, 3), n(2, 3) + !f2py intent(in) n(2, 3) + !f2py intent(inout) c(2. 3) + integer i, j + do i=1,2 + do j=1,3 + c(i, j) = n(i, j) + end do + end do + end subroutine {fprefix}_2d_array_inout + + function {fprefix}_array_return(o) result (c) + character, dimension(3) :: c + character, intent(in) :: o(3) + do i=1,3 + c(i) = o(i) + end do + end function {fprefix}_array_return + + function {fprefix}_optional(o) result (c) + character, intent(in) :: o + !f2py character o = "a" + character :: c + c = o + end function {fprefix}_optional + """) + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_input(self, dtype): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f(np.array('a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(b'a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(['a'], dtype=dtype)), ord('a')) + assert_equal(f(np.array('abc', dtype=dtype)), ord('a')) + assert_equal(f(np.array([['a']], dtype=dtype)), ord('a')) + + def test_input_varia(self): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f('a'), ord('a')) + assert_equal(f(b'a'), ord(b'a')) + assert_equal(f(''), 0) + assert_equal(f(b''), 0) + assert_equal(f(b'\0'), 0) + assert_equal(f('ab'), ord('a')) + assert_equal(f(b'ab'), ord('a')) + assert_equal(f(['a']), ord('a')) + + assert_equal(f(np.array(b'a')), ord('a')) + assert_equal(f(np.array([b'a'])), ord('a')) + a = np.array('a') + assert_equal(f(a), ord('a')) + a = np.array(['a']) + assert_equal(f(a), ord('a')) + + try: + f([]) + except IndexError as msg: + if not str(msg).endswith(' got 0-list'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on empty list') + + try: + f(97) + except TypeError as msg: + if not str(msg).endswith(' got int instance'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on int value') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_array_input') + + assert_array_equal(f(np.array(['a', 'b', 'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f(np.array([b'a', b'b', b'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + + def test_array_input_varia(self): + f = getattr(self.module, self.fprefix + '_array_input') + assert_array_equal(f(['a', 'b', 'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f([b'a', b'b', b'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + + try: + f(['a', 'b', 'c', 'd']) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_2d_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_input') + + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], dtype=dtype, order='F') + expected = a.view(np.uint32 if dtype == 'U1' else np.uint8) + assert_array_equal(f(a), expected) + + def test_output(self): + f = getattr(self.module, self.fprefix + '_output') + + assert_equal(f(ord(b'a')), b'a') + assert_equal(f(0), b'\0') + + def test_array_output(self): + f = getattr(self.module, self.fprefix + '_array_output') + + assert_array_equal(f(list(map(ord, 'abc'))), + np.array(list('abc'), dtype='S1')) + + def test_input_output(self): + f = getattr(self.module, self.fprefix + '_input_output') + + assert_equal(f(b'a'), b'a') + assert_equal(f('a'), b'a') + assert_equal(f(''), b'\0') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_inout') + + a = np.array(list('abc'), dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(list('Abc'), dtype=a.dtype)) + f(a[1:], 'B') + assert_array_equal(a, np.array(list('ABc'), dtype=a.dtype)) + + a = np.array(['abc'], dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + def test_inout_varia(self): + f = getattr(self.module, self.fprefix + '_inout') + a = np.array('abc', dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array('Abc', dtype=a.dtype)) + + a = np.array(['abc'], dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + try: + f('abc', 'A') + except ValueError as msg: + if not str(msg).endswith(' got 3-str'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on str value') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_array_inout') + n = np.array(['A', 'B', 'C'], dtype=dtype, order='F') + + a = np.array(['a', 'b', 'c'], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype) + f(a[1:], n) + assert_array_equal(a, np.array(['a', 'A', 'B', 'C'], dtype=dtype)) + + a = np.array([['a', 'b', 'c']], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, np.array([['A', 'B', 'C']], dtype=dtype)) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype, order='F') + try: + f(a, n) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_2d_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_inout') + n = np.array([['A', 'B', 'C'], + ['D', 'E', 'F']], + dtype=dtype, order='F') + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], + dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + def test_return(self): + f = getattr(self.module, self.fprefix + '_return') + + assert_equal(f('a'), b'a') + + @pytest.mark.skip('fortran function returning array segfaults') + def test_array_return(self): + f = getattr(self.module, self.fprefix + '_array_return') + + a = np.array(list('abc'), dtype='S1') + assert_array_equal(f(a), a) + + def test_optional(self): + f = getattr(self.module, self.fprefix + '_optional') + + assert_equal(f(), b"a") + assert_equal(f(b'B'), b"B") + + +class TestMiscCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_misc_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_gh18684(x, y, m) + character(len=5), dimension(m), intent(in) :: x + character*5, dimension(m), intent(out) :: y + integer i, m + !f2py integer, intent(hide), depend(x) :: m = f2py_len(x) + do i=1,m + y(i) = x(i) + end do + end subroutine {fprefix}_gh18684 + + subroutine {fprefix}_gh6308(x, i) + integer i + !f2py check(i>=0 && i<12) i + character*5 name, x + common name(12) + name(i + 1) = x + end subroutine {fprefix}_gh6308 + + subroutine {fprefix}_gh4519(x) + character(len=*), intent(in) :: x(:) + !f2py intent(out) x + integer :: i + ! Uncomment for debug printing: + !do i=1, size(x) + ! print*, "x(",i,")=", x(i) + !end do + end subroutine {fprefix}_gh4519 + + pure function {fprefix}_gh3425(x) result (y) + character(len=*), intent(in) :: x + character(len=len(x)) :: y + integer :: i + do i = 1, len(x) + j = iachar(x(i:i)) + if (j>=iachar("a") .and. j<=iachar("z") ) then + y(i:i) = achar(j-32) + else + y(i:i) = x(i:i) + endif + end do + end function {fprefix}_gh3425 + + subroutine {fprefix}_character_bc_new(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x + !f2py character, dimension((x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(x == 'a' || x == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(&x, &y, z) + !f2py callprotoargument character*, character*, character* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_new + + subroutine {fprefix}_character_bc_old(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x[0] + !f2py character, dimension((*x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(*x == 'a' || x[0] == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(x, y, z) + !f2py callprotoargument char*, char*, char* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_old + """) + + def test_gh18684(self): + # Test character(len=5) and character*5 usages + f = getattr(self.module, self.fprefix + '_gh18684') + x = np.array(["abcde", "fghij"], dtype='S5') + y = f(x) + + assert_array_equal(x, y) + + def test_gh6308(self): + # Test character string array in a common block + f = getattr(self.module, self.fprefix + '_gh6308') + + assert_equal(self.module._BLNK_.name.dtype, np.dtype('S5')) + assert_equal(len(self.module._BLNK_.name), 12) + f("abcde", 0) + assert_equal(self.module._BLNK_.name[0], b"abcde") + f("12345", 5) + assert_equal(self.module._BLNK_.name[5], b"12345") + + def test_gh4519(self): + # Test array of assumed length strings + f = getattr(self.module, self.fprefix + '_gh4519') + + for x, expected in [ + ('a', dict(shape=(), dtype=np.dtype('S1'))), + ('text', dict(shape=(), dtype=np.dtype('S4'))), + (np.array(['1', '2', '3'], dtype='S1'), + dict(shape=(3,), dtype=np.dtype('S1'))), + (['1', '2', '34'], + dict(shape=(3,), dtype=np.dtype('S2'))), + (['', ''], dict(shape=(2,), dtype=np.dtype('S1')))]: + r = f(x) + for k, v in expected.items(): + assert_equal(getattr(r, k), v) + + def test_gh3425(self): + # Test returning a copy of assumed length string + f = getattr(self.module, self.fprefix + '_gh3425') + # f is equivalent to bytes.upper + + assert_equal(f('abC'), b'ABC') + assert_equal(f(''), b'') + assert_equal(f('abC12d'), b'ABC12D') + + @pytest.mark.parametrize("state", ['new', 'old']) + def test_character_bc(self, state): + f = getattr(self.module, self.fprefix + '_character_bc_' + state) + + c, a = f() + assert_equal(c, b'a') + assert_equal(len(a), 1) + + c, a = f(b'b') + assert_equal(c, b'b') + assert_equal(len(a), 2) + + assert_raises(Exception, lambda: f(b'c')) + + +class TestStringScalarArr(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "scalar_string.f90")] + + def test_char(self): + for out in (self.module.string_test.string, + self.module.string_test.string77): + expected = () + assert out.shape == expected + expected = '|S8' + assert out.dtype == expected + + def test_char_arr(self): + for out in (self.module.string_test.strarr, + self.module.string_test.strarr77): + expected = (5,7) + assert out.shape == expected + expected = '|S12' + assert out.dtype == expected + +class TestStringAssumedLength(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24008.f")] + + def test_gh24008(self): + self.module.greet("joe", "bob") + +class TestStringOptionalInOut(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24662.f90")] + + def test_gh24662(self): + self.module.string_inout_optional() + a = np.array('hi', dtype='S32') + self.module.string_inout_optional(a) + assert "output string" in a.tobytes().decode() + with pytest.raises(Exception): + aa = "Hi" + self.module.string_inout_optional(aa) + + +@pytest.mark.slow +class TestNewCharHandling(util.F2PyTest): + # from v1.24 onwards, gh-19388 + sources = [ + util.getpath("tests", "src", "string", "gh25286.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 + +@pytest.mark.slow +class TestBCCharHandling(util.F2PyTest): + # SciPy style, "incorrect" bindings with a hook + sources = [ + util.getpath("tests", "src", "string", "gh25286_bc.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_common.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_common.py new file mode 100644 index 00000000..68c1b3b3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_common.py @@ -0,0 +1,27 @@ +import os +import sys +import pytest + +import numpy as np +from . import util + + +class TestCommonBlock(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "block.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + def test_common_block(self): + self.module.initcb() + assert self.module.block.long_bn == np.array(1.0, dtype=np.float64) + assert self.module.block.string_bn == np.array("2", dtype="|S1") + assert self.module.block.ok == np.array(3, dtype=np.int32) + + +class TestCommonWithUse(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "gh19161.f90")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + def test_common_gh19161(self): + assert self.module.data.x == 0 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.py new file mode 100644 index 00000000..3c16f319 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.py @@ -0,0 +1,117 @@ +"""See https://github.com/numpy/numpy/pull/11937. + +""" +import sys +import os +import uuid +from importlib import import_module +import pytest + +import numpy.f2py + +from . import util + + +def setup_module(): + if not util.has_c_compiler(): + pytest.skip("Needs C compiler") + if not util.has_f77_compiler(): + pytest.skip("Needs FORTRAN 77 compiler") + + +# extra_args can be a list (since gh-11937) or string. +# also test absence of extra_args +@pytest.mark.parametrize("extra_args", + [["--noopt", "--debug"], "--noopt --debug", ""]) +@pytest.mark.leaks_references(reason="Imported module seems never deleted.") +def test_f2py_init_compile(extra_args): + # flush through the f2py __init__ compile() function code path as a + # crude test for input handling following migration from + # exec_command() to subprocess.check_output() in gh-11937 + + # the Fortran 77 syntax requires 6 spaces before any commands, but + # more space may be added/ + fsource = """ + integer function foo() + foo = 10 + 5 + return + end + """ + # use various helper functions in util.py to enable robust build / + # compile and reimport cycle in test suite + moddir = util.get_module_dir() + modname = util.get_temp_module_name() + + cwd = os.getcwd() + target = os.path.join(moddir, str(uuid.uuid4()) + ".f") + # try running compile() with and without a source_fn provided so + # that the code path where a temporary file for writing Fortran + # source is created is also explored + for source_fn in [target, None]: + # mimic the path changing behavior used by build_module() in + # util.py, but don't actually use build_module() because it has + # its own invocation of subprocess that circumvents the + # f2py.compile code block under test + with util.switchdir(moddir): + ret_val = numpy.f2py.compile(fsource, + modulename=modname, + extra_args=extra_args, + source_fn=source_fn) + + # check for compile success return value + assert ret_val == 0 + + # we are not currently able to import the Python-Fortran + # interface module on Windows / Appveyor, even though we do get + # successful compilation on that platform with Python 3.x + if sys.platform != "win32": + # check for sensible result of Fortran function; that means + # we can import the module name in Python and retrieve the + # result of the sum operation + return_check = import_module(modname) + calc_result = return_check.foo() + assert calc_result == 15 + # Removal from sys.modules, is not as such necessary. Even with + # removal, the module (dict) stays alive. + del sys.modules[modname] + + +def test_f2py_init_compile_failure(): + # verify an appropriate integer status value returned by + # f2py.compile() when invalid Fortran is provided + ret_val = numpy.f2py.compile(b"invalid") + assert ret_val == 1 + + +def test_f2py_init_compile_bad_cmd(): + # verify that usage of invalid command in f2py.compile() returns + # status value of 127 for historic consistency with exec_command() + # error handling + + # patch the sys Python exe path temporarily to induce an OSError + # downstream NOTE: how bad of an idea is this patching? + try: + temp = sys.executable + sys.executable = "does not exist" + + # the OSError should take precedence over invalid Fortran + ret_val = numpy.f2py.compile(b"invalid") + assert ret_val == 127 + finally: + sys.executable = temp + + +@pytest.mark.parametrize( + "fsource", + [ + "program test_f2py\nend program test_f2py", + b"program test_f2py\nend program test_f2py", + ], +) +def test_compile_from_strings(tmpdir, fsource): + # Make sure we can compile str and bytes gh-12796 + with util.switchdir(tmpdir): + ret_val = numpy.f2py.compile(fsource, + modulename="test_compile_from_strings", + extension=".f90") + assert ret_val == 0 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.py new file mode 100644 index 00000000..c8d9ddb8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.py @@ -0,0 +1,350 @@ +import importlib +import codecs +import time +import unicodedata +import pytest +import numpy as np +from numpy.f2py.crackfortran import markinnerspaces, nameargspattern +from . import util +from numpy.f2py import crackfortran +import textwrap +import contextlib +import io + + +class TestNoSpace(util.F2PyTest): + # issue gh-15035: add handling for endsubroutine, endfunction with no space + # between "end" and the block name + sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")] + + def test_module(self): + k = np.array([1, 2, 3], dtype=np.float64) + w = np.array([1, 2, 3], dtype=np.float64) + self.module.subb(k) + assert np.allclose(k, w + 1) + self.module.subc([w, k]) + assert np.allclose(k, w + 1) + assert self.module.t0("23") == b"2" + + +class TestPublicPrivate: + def test_defaultPrivate(self): + fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" in mod["vars"]["b"]["attrspec"] + assert "public" not in mod["vars"]["b"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_defaultPublic(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_access_type(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + tt = mod[0]['vars'] + assert set(tt['a']['attrspec']) == {'private', 'bind(c)'} + assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} + assert set(tt['c']['attrspec']) == {'public'} + + def test_nowrap_private_proceedures(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh23879.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + pyf = crackfortran.crack2fortran(mod) + assert 'bar' not in pyf + +class TestModuleProcedure(): + def test_moduleOperators(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "body" in mod and len(mod["body"]) == 9 + assert mod["body"][1]["name"] == "operator(.item.)" + assert "implementedby" in mod["body"][1] + assert mod["body"][1]["implementedby"] == \ + ["item_int", "item_real"] + assert mod["body"][2]["name"] == "operator(==)" + assert "implementedby" in mod["body"][2] + assert mod["body"][2]["implementedby"] == ["items_are_equal"] + assert mod["body"][3]["name"] == "assignment(=)" + assert "implementedby" in mod["body"][3] + assert mod["body"][3]["implementedby"] == \ + ["get_int", "get_real"] + + def test_notPublicPrivate(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "pubprivmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert mod['vars']['a']['attrspec'] == ['private', ] + assert mod['vars']['b']['attrspec'] == ['public', ] + assert mod['vars']['seta']['attrspec'] == ['public', ] + + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")] + + def test_external_as_statement(self): + def incr(x): + return x + 123 + + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + + r = self.module.external_as_attribute(incr) + assert r == 123 + + +class TestCrackFortran(util.F2PyTest): + # gh-2848: commented lines between parameters in subroutine parameter lists + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")] + + def test_gh2848(self): + r = self.module.gh2848(1, 2) + assert r == (1, 2) + + +class TestMarkinnerspaces: + # gh-14118: markinnerspaces does not handle multiple quotations + + def test_do_not_touch_normal_spaces(self): + test_list = ["a ", " a", "a b c", "'abcdefghij'"] + for i in test_list: + assert markinnerspaces(i) == i + + def test_one_relevant_space(self): + assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'" + assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"' + + def test_ignore_inner_quotes(self): + assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e" + assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e" + + def test_multiple_relevant_spaces(self): + assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'" + assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"' + + +class TestDimSpec(util.F2PyTest): + """This test suite tests various expressions that are used as dimension + specifications. + + There exists two usage cases where analyzing dimensions + specifications are important. + + In the first case, the size of output arrays must be defined based + on the inputs to a Fortran function. Because Fortran supports + arbitrary bases for indexing, for instance, `arr(lower:upper)`, + f2py has to evaluate an expression `upper - lower + 1` where + `lower` and `upper` are arbitrary expressions of input parameters. + The evaluation is performed in C, so f2py has to translate Fortran + expressions to valid C expressions (an alternative approach is + that a developer specifies the corresponding C expressions in a + .pyf file). + + In the second case, when user provides an input array with a given + size but some hidden parameters used in dimensions specifications + need to be determined based on the input array size. This is a + harder problem because f2py has to solve the inverse problem: find + a parameter `p` such that `upper(p) - lower(p) + 1` equals to the + size of input array. In the case when this equation cannot be + solved (e.g. because the input array size is wrong), raise an + error before calling the Fortran function (that otherwise would + likely crash Python process when the size of input arrays is + wrong). f2py currently supports this case only when the equation + is linear with respect to unknown parameter. + + """ + + suffix = ".f90" + + code_template = textwrap.dedent(""" + function get_arr_size_{count}(a, n) result (length) + integer, intent(in) :: n + integer, dimension({dimspec}), intent(out) :: a + integer length + length = size(a) + end function + + subroutine get_inv_arr_size_{count}(a, n) + integer :: n + ! the value of n is computed in f2py wrapper + !f2py intent(out) n + integer, dimension({dimspec}), intent(in) :: a + end subroutine + """) + + linear_dimspecs = [ + "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)", + "2*n, n" + ] + nonlinear_dimspecs = ["2*n:3*n*n+2*n"] + all_dimspecs = linear_dimspecs + nonlinear_dimspecs + + code = "" + for count, dimspec in enumerate(all_dimspecs): + lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')] + code += code_template.format( + count=count, + dimspec=dimspec, + first=", ".join(lst), + ) + + @pytest.mark.parametrize("dimspec", all_dimspecs) + def test_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + assert a.size == sz + + @pytest.mark.parametrize("dimspec", all_dimspecs) + def test_inv_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + if dimspec in self.nonlinear_dimspecs: + # one must specify n as input, the call we'll ensure + # that a and n are compatible: + n1 = get_inv_arr_size(a, n) + else: + # in case of linear dependence, n can be determined + # from the shape of a: + n1 = get_inv_arr_size(a) + # n1 may be different from n (for instance, when `a` size + # is a function of some `n` fraction) but it must produce + # the same sized array + sz1, _ = get_arr_size(n1) + assert sz == sz1, (n, n1, sz, sz1) + + +class TestModuleDeclaration: + def test_dependencies(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert mod[0]["vars"]["abar"]["="] == "bar('abar')" + + +class TestEval(util.F2PyTest): + def test_eval_scalar(self): + eval_scalar = crackfortran._eval_scalar + + assert eval_scalar('123', {}) == '123' + assert eval_scalar('12 + 3', {}) == '15' + assert eval_scalar('a + b', dict(a=1, b=2)) == '3' + assert eval_scalar('"123"', {}) == "'123'" + + +class TestFortranReader(util.F2PyTest): + @pytest.mark.parametrize("encoding", + ['ascii', 'utf-8', 'utf-16', 'utf-32']) + def test_input_encoding(self, tmp_path, encoding): + # gh-635 + f_path = tmp_path / f"input_with_{encoding}_encoding.f90" + with f_path.open('w', encoding=encoding) as ff: + ff.write(""" + subroutine foo() + end subroutine foo + """) + mod = crackfortran.crackfortran([str(f_path)]) + assert mod[0]['name'] == 'foo' + + +class TestUnicodeComment(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "unicode_comment.f90")] + + @pytest.mark.skipif( + (importlib.util.find_spec("charset_normalizer") is None), + reason="test requires charset_normalizer which is not installed", + ) + def test_encoding_comment(self): + self.module.foo(3) + + +class TestNameArgsPatternBacktracking: + @pytest.mark.parametrize( + ['adversary'], + [ + ('@)@bind@(@',), + ('@)@bind @(@',), + ('@)@bind foo bar baz@(@',) + ] + ) + def test_nameargspattern_backtracking(self, adversary): + '''address ReDOS vulnerability: + https://github.com/numpy/numpy/issues/23338''' + trials_per_batch = 12 + batches_per_regex = 4 + start_reps, end_reps = 15, 25 + for ii in range(start_reps, end_reps): + repeated_adversary = adversary * ii + # test times in small batches. + # this gives us more chances to catch a bad regex + # while still catching it before too long if it is bad + for _ in range(batches_per_regex): + times = [] + for _ in range(trials_per_batch): + t0 = time.perf_counter() + mtch = nameargspattern.search(repeated_adversary) + times.append(time.perf_counter() - t0) + # our pattern should be much faster than 0.2s per search + # it's unlikely that a bad regex will pass even on fast CPUs + assert np.median(times) < 0.2 + assert not mtch + # if the adversary is capped with @)@, it becomes acceptable + # according to the old version of the regex. + # that should still be true. + good_version_of_adversary = repeated_adversary + '@)@' + assert nameargspattern.search(good_version_of_adversary) + + +class TestFunctionReturn(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh23598.f90")] + + def test_function_rettype(self): + # gh-23598 + assert self.module.intproduct(3, 4) == 12 + + +class TestFortranGroupCounters(util.F2PyTest): + def test_end_if_comment(self): + # gh-23533 + fpath = util.getpath("tests", "src", "crackfortran", "gh23533.f") + try: + crackfortran.crackfortran([str(fpath)]) + except Exception as exc: + assert False, f"'crackfortran.crackfortran' raised an exception {exc}" + + +class TestF77CommonBlockReader(): + def test_gh22648(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") + with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: + mod = crackfortran.crackfortran([str(fpath)]) + assert "Mismatch" not in stdout_f2py.getvalue() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_data.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_data.py new file mode 100644 index 00000000..4e5604c0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_data.py @@ -0,0 +1,70 @@ +import os +import pytest +import numpy as np + +from . import util +from numpy.f2py.crackfortran import crackfortran + + +class TestData(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_stmts.f90")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.cmplxdat.i == 2 + assert self.module.cmplxdat.j == 3 + assert self.module.cmplxdat.x == 1.5 + assert self.module.cmplxdat.y == 2.0 + assert self.module.cmplxdat.pi == 3.1415926535897932384626433832795028841971693993751058209749445923078164062 + assert self.module.cmplxdat.medium_ref_index == np.array(1.+0.j) + assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1.+2.j, -3.+4.j])) + assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) + assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) + assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) + + def test_crackedlines(self): + mod = crackfortran(self.sources) + assert mod[0]['vars']['x']['='] == '1.5' + assert mod[0]['vars']['y']['='] == '2.0' + assert mod[0]['vars']['pi']['='] == '3.1415926535897932384626433832795028841971693993751058209749445923078164062d0' + assert mod[0]['vars']['my_real_array']['='] == '(/1.0d0, 2.0d0, 3.0d0/)' + assert mod[0]['vars']['ref_index_one']['='] == '(13.0d0, 21.0d0)' + assert mod[0]['vars']['ref_index_two']['='] == '(-30.0d0, 43.0d0)' + assert mod[0]['vars']['my_array']['='] == '(/(1.0d0, 2.0d0), (-3.0d0, 4.0d0)/)' + assert mod[0]['vars']['z']['='] == '(/3.5, 7.0/)' + +class TestDataF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_common.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.mydata == 0 + + def test_crackedlines(self): + mod = crackfortran(str(self.sources[0])) + print(mod[0]['vars']) + assert mod[0]['vars']['mydata']['='] == '0' + + +class TestDataMultiplierF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_multiplier.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.ivar1 == 3 + assert self.module.mycom.ivar2 == 3 + assert self.module.mycom.ivar3 == 2 + assert self.module.mycom.ivar4 == 2 + assert self.module.mycom.evar5 == 0 + + +class TestDataWithCommentsF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_with_comments.f")] + + # For gh-23276 + def test_data_stmts(self): + assert len(self.module.mycom.mytab) == 3 + assert self.module.mycom.mytab[0] == 0 + assert self.module.mycom.mytab[1] == 4 + assert self.module.mycom.mytab[2] == 0 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.py new file mode 100644 index 00000000..6631dd82 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.py @@ -0,0 +1,55 @@ +import os +import pytest +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +from . import util + + +def get_docdir(): + # assuming that documentation tests are run from a source + # directory + return os.path.abspath(os.path.join( + os.path.dirname(__file__), + '..', '..', '..', + 'doc', 'source', 'f2py', 'code')) + + +pytestmark = pytest.mark.skipif( + not os.path.isdir(get_docdir()), + reason=('Could not find f2py documentation sources' + f' ({get_docdir()} does not exists)')) + + +def _path(*a): + return os.path.join(*((get_docdir(),) + a)) + + +class TestDocAdvanced(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/build-f2py'] + sources = [_path('asterisk1.f90'), _path('asterisk2.f90'), + _path('ftype.f')] + + def test_asterisk1(self): + foo = getattr(self.module, 'foo1') + assert_equal(foo(), b'123456789A12') + + def test_asterisk2(self): + foo = getattr(self.module, 'foo2') + assert_equal(foo(2), b'12') + assert_equal(foo(12), b'123456789A12') + assert_equal(foo(24), b'123456789A123456789B') + + def test_ftype(self): + ftype = self.module + ftype.foo() + assert_equal(ftype.data.a, 0) + ftype.data.a = 3 + ftype.data.x = [1, 2, 3] + assert_equal(ftype.data.a, 3) + assert_array_equal(ftype.data.x, + np.array([1, 2, 3], dtype=np.float32)) + ftype.data.x[1] = 45 + assert_array_equal(ftype.data.x, + np.array([1, 45, 3], dtype=np.float32)) + + # TODO: implement test methods for other example Fortran codes diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.py new file mode 100644 index 00000000..d2967e4f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.py @@ -0,0 +1,15 @@ +from . import util +import numpy as np + +class TestF2Cmap(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), + util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap") + ] + + # gh-15095 + def test_long_long_map(self): + inp = np.ones(3) + out = self.module.func1(inp) + exp_out = 3 + assert out == exp_out diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.py new file mode 100644 index 00000000..659e0e96 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.py @@ -0,0 +1,896 @@ +import textwrap, re, sys, subprocess, shlex +from pathlib import Path +from collections import namedtuple +import platform + +import pytest + +from . import util +from numpy.f2py.f2py2e import main as f2pycli + +######################### +# CLI utils and classes # +######################### + +PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") + + +def get_io_paths(fname_inp, mname="untitled"): + """Takes in a temporary file for testing and returns the expected output and input paths + + Here expected output is essentially one of any of the possible generated + files. + + ..note:: + + Since this does not actually run f2py, none of these are guaranteed to + exist, and module names are typically incorrect + + Parameters + ---------- + fname_inp : str + The input filename + mname : str, optional + The name of the module, untitled by default + + Returns + ------- + genp : NamedTuple PPaths + The possible paths which are generated, not all of which exist + """ + bpath = Path(fname_inp) + return PPaths( + finp=bpath.with_suffix(".f"), + f90inp=bpath.with_suffix(".f90"), + pyf=bpath.with_suffix(".pyf"), + wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"), + wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"), + cmodf=bpath.with_name(f"{mname}module.c"), + ) + + +############## +# CLI Fixtures and Tests # +############# + + +@pytest.fixture(scope="session") +def hello_world_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh23598_warn(tmpdir_factory): + """F90 file for testing warnings in gh23598""" + fdat = util.getpath("tests", "src", "crackfortran", "gh23598Warn.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "gh23598Warn.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh22819_cli(tmpdir_factory): + """F90 file for testing disallowed CLI arguments in ghff819""" + fdat = util.getpath("tests", "src", "cli", "gh_22819.pyf").read_text() + fn = tmpdir_factory.getbasetemp() / "gh_22819.pyf" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def hello_world_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def retreal_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "foo.f" + fn.write_text(fdat, encoding="ascii") + return fn + +@pytest.fixture(scope="session") +def f2cmap_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90").read_text() + f2cmap = util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap").read_text() + fn = tmpdir_factory.getbasetemp() / "f2cmap.f90" + fmap = tmpdir_factory.getbasetemp() / "mapfile" + fn.write_text(fdat, encoding="ascii") + fmap.write_text(f2cmap, encoding="ascii") + return fn + + +def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): + """Check that module names are handled correctly + gh-22819 + Essentially, the -m name cannot be used to import the module, so the module + named in the .pyf needs to be used instead + + CLI :: -m and a .pyf file + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath}".split()) + with util.switchdir(ipath.parent): + f2pycli() + gen_paths = [item.name for item in ipath.parent.rglob("*") if item.is_file()] + assert "blahmodule.c" not in gen_paths # shouldn't be generated + assert "blah-f2pywrappers.f" not in gen_paths + assert "test_22819-f2pywrappers.f" in gen_paths + assert "test_22819module.c" in gen_paths + assert "Ignoring blah" + + +def test_gh22819_many_pyf(capfd, gh22819_cli, monkeypatch): + """Only one .pyf file allowed + gh-22819 + CLI :: .pyf files + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath} hello.pyf".split()) + with util.switchdir(ipath.parent): + with pytest.raises(ValueError, match="Only one .pyf file per call"): + f2pycli() + + +def test_gh23598_warn(capfd, gh23598_warn, monkeypatch): + foutl = get_io_paths(gh23598_warn, mname="test") + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate files + wrapper = foutl.wrap90.read_text() + assert "intproductf2pywrap, intpr" not in wrapper + + +def test_gen_pyf(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file is generated via the CLI + CLI :: -h + """ + ipath = Path(hello_world_f90) + opath = Path(hello_world_f90).stem + ".pyf" + monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate wrappers + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert Path(f'{opath}').exists() + + +def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file can be dumped to stdout + CLI :: -h + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert "function hi() ! in " in out + + +def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the CLI refuses to overwrite signature files + CLI :: -h without --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + with pytest.raises(SystemExit): + f2pycli() # Refuse to overwrite + _, err = capfd.readouterr() + assert "Use --overwrite-signature to overwrite" in err + + +@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), + reason='Compiler and 3.12 required') +def test_untitled_cli(capfd, hello_world_f90, monkeypatch): + """Check that modules are named correctly + + CLI :: defaults + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "untitledmodule.c" in out + + +@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') +def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): + """Check that no distutils imports are performed on 3.12 + CLI :: --fcompiler --help-link --backend distutils + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( + sys, "argv", f"f2py --help-link".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Use --dep for meson builds" in out + MNAME = "hi2" # Needs to be different for a new -c + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Cannot use distutils backend with Python>=3.12" in out + + +@pytest.mark.xfail +def test_f2py_skip(capfd, retreal_f77, monkeypatch): + """Tests that functions can be skipped + CLI :: skip: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + remaining = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test skip: {toskip}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in remaining.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_f2py_only(capfd, retreal_f77, monkeypatch): + """Test that functions can be kept by only: + CLI :: only: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + tokeep = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test only: {tokeep}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_file_processing_switch(capfd, hello_world_f90, retreal_f77, + monkeypatch): + """Tests that it is possible to return to file processing mode + CLI :: : + BUG: numpy-gh #20520 + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + ipath2 = Path(hello_world_f90) + tokeep = "td s0 hi" # hi is in ipath2 + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split( + ), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): + """Checks the generation of files based on a module name + CLI :: -m + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + + # Always generate C module + assert Path.exists(foutl.cmodf) + # File contains a function, check for F77 wrappers + assert Path.exists(foutl.wrap77) + + +def test_mod_gen_gh25263(capfd, hello_world_f77, monkeypatch): + """Check that pyf files are correctly generated with module structure + CLI :: -m -h pyf_file + BUG: numpy-gh #20520 + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f77, mname=MNAME) + ipath = foutl.finp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME} -h hi.pyf'.split()) + with util.switchdir(ipath.parent): + f2pycli() + with Path('hi.pyf').open() as hipyf: + pyfdat = hipyf.read() + assert "python module hi" in pyfdat + + +def test_lower_cmod(capfd, hello_world_f77, monkeypatch): + """Lowers cases by flag or when -h is present + + CLI :: --[no-]lower + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + capshi = re.compile(r"HI\(\)") + capslo = re.compile(r"hi\(\)") + # Case I: --lower is passed + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + # Case II: --no-lower is passed + monkeypatch.setattr(sys, "argv", + f'f2py {ipath} -m test --no-lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_lower_sig(capfd, hello_world_f77, monkeypatch): + """Lowers cases in signature files by flag or when -h is present + + CLI :: --[no-]lower -h + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + # Signature files + capshi = re.compile(r"Block: HI") + capslo = re.compile(r"Block: hi") + # Case I: --lower is implied by -h + # TODO: Clean up to prevent passing --overwrite-signature + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + + # Case II: --no-lower overrides -h + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower' + .split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_build_dir(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --build-dir + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --build-dir {odir}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert f"Wrote C/API module \"{mname}\"" in out + + +def test_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr( + sys, "argv", + f'f2py -h faker.pyf {ipath} --overwrite-signature'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + + +def test_latexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"{mname}module.tex").open() as otex: + assert "\\documentclass" in otex.read() + + +def test_nolatexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" not in out + + +def test_shortlatex(capfd, hello_world_f90, monkeypatch): + """Ensures that truncated documentation is written out + + TODO: Test to ensure this has no effect without --latex-doc + CLI :: --latex-doc --short-latex + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"./{mname}module.tex").open() as otex: + assert "\\documentclass" not in otex.read() + + +def test_restdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that RsT documentation is written out + + CLI :: --rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" in out + with Path(f"./{mname}module.rest").open() as orst: + assert r".. -*- rest -*-" in orst.read() + + +def test_norestexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" not in out + + +def test_debugcapi(capfd, hello_world_f90, monkeypatch): + """Ensures that debugging wrappers are written + + CLI :: --debug-capi + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + assert r"#define DEBUGCFUNCS" in ocmod.read() + + +@pytest.mark.xfail(reason="Consistently fails on CI.") +def test_debugcapi_bld(hello_world_f90, monkeypatch): + """Ensures that debugging wrappers work + + CLI :: --debug-capi -c + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} -c --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + eerr = textwrap.dedent("""\ +debug-capi:Python C/API function blah.hi() +debug-capi:float hi=:output,hidden,scalar +debug-capi:hi=0 +debug-capi:Fortran subroutine `f2pywraphi(&hi)' +debug-capi:hi=0 +debug-capi:Building return value. +debug-capi:Python C/API function blah.hi: successful. +debug-capi:Freeing memory. + """) + assert rout.stdout == eout + assert rout.stderr == eerr + + +def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 are included by default + + CLI :: --[no]-wrap-functions + """ + # Implied + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + # Explicit + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + +def test_nowrapfunc(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 can be disabled + + CLI :: --no-wrap-functions + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" not in out + + +def test_inclheader(capfd, hello_world_f90, monkeypatch): + """Add to the include directories + + CLI :: -include + TODO: Document this in the help string + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} -include -include '. + split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + ocmr = ocmod.read() + assert "#include " in ocmr + assert "#include " in ocmr + + +def test_inclpath(): + """Add to the include directories + + CLI :: --include-paths + """ + # TODO: populate + pass + + +def test_hlink(): + """Add to the include directories + + CLI :: --help-link + """ + # TODO: populate + pass + + +def test_f2cmap(capfd, f2cmap_f90, monkeypatch): + """Check that Fortran-to-Python KIND specs can be passed + + CLI :: --f2cmap + """ + ipath = Path(f2cmap_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --f2cmap mapfile'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Reading f2cmap from 'mapfile' ..." in out + assert "Mapping \"real(kind=real32)\" to \"float\"" in out + assert "Mapping \"real(kind=real64)\" to \"double\"" in out + assert "Mapping \"integer(kind=int64)\" to \"long_long\"" in out + assert "Successfully applied user defined f2cmap changes" in out + + +def test_quiet(capfd, hello_world_f90, monkeypatch): + """Reduce verbosity + + CLI :: --quiet + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert len(out) == 0 + + +def test_verbose(capfd, hello_world_f90, monkeypatch): + """Increase verbosity + + CLI :: --verbose + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "analyzeline" in out + + +def test_version(capfd, monkeypatch): + """Ensure version + + CLI :: -v + """ + monkeypatch.setattr(sys, "argv", 'f2py -v'.split()) + # TODO: f2py2e should not call sys.exit() after printing the version + with pytest.raises(SystemExit): + f2pycli() + out, _ = capfd.readouterr() + import numpy as np + assert np.__version__ == out.strip() + + +@pytest.mark.xfail(reason="Consistently fails on CI.") +def test_npdistop(hello_world_f90, monkeypatch): + """ + CLI :: -c + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split("python -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + + +# Numpy distutils flags +# TODO: These should be tested separately + + +def test_npd_fcompiler(): + """ + CLI :: -c --fcompiler + """ + # TODO: populate + pass + + +def test_npd_compiler(): + """ + CLI :: -c --compiler + """ + # TODO: populate + pass + + +def test_npd_help_fcompiler(): + """ + CLI :: -c --help-fcompiler + """ + # TODO: populate + pass + + +def test_npd_f77exec(): + """ + CLI :: -c --f77exec + """ + # TODO: populate + pass + + +def test_npd_f90exec(): + """ + CLI :: -c --f90exec + """ + # TODO: populate + pass + + +def test_npd_f77flags(): + """ + CLI :: -c --f77flags + """ + # TODO: populate + pass + + +def test_npd_f90flags(): + """ + CLI :: -c --f90flags + """ + # TODO: populate + pass + + +def test_npd_opt(): + """ + CLI :: -c --opt + """ + # TODO: populate + pass + + +def test_npd_arch(): + """ + CLI :: -c --arch + """ + # TODO: populate + pass + + +def test_npd_noopt(): + """ + CLI :: -c --noopt + """ + # TODO: populate + pass + + +def test_npd_noarch(): + """ + CLI :: -c --noarch + """ + # TODO: populate + pass + + +def test_npd_debug(): + """ + CLI :: -c --debug + """ + # TODO: populate + pass + + +def test_npd_link_auto(): + """ + CLI :: -c --link- + """ + # TODO: populate + pass + + +def test_npd_lib(): + """ + CLI :: -c -L/path/to/lib/ -l + """ + # TODO: populate + pass + + +def test_npd_define(): + """ + CLI :: -D + """ + # TODO: populate + pass + + +def test_npd_undefine(): + """ + CLI :: -U + """ + # TODO: populate + pass + + +def test_npd_incl(): + """ + CLI :: -I/path/to/include/ + """ + # TODO: populate + pass + + +def test_npd_linker(): + """ + CLI :: .o .so .a + """ + # TODO: populate + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_isoc.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_isoc.py new file mode 100644 index 00000000..594bd7ca --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_isoc.py @@ -0,0 +1,52 @@ +from . import util +import numpy as np +import pytest +from numpy.testing import assert_allclose + +class TestISOC(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), + ] + + # gh-24553 + def test_c_double(self): + out = self.module.coddity.c_add(1, 2) + exp_out = 3 + assert out == exp_out + + # gh-9693 + def test_bindc_function(self): + out = self.module.coddity.wat(1, 20) + exp_out = 8 + assert out == exp_out + + # gh-25207 + def test_bindc_kinds(self): + out = self.module.coddity.c_add_int64(1, 20) + exp_out = 21 + assert out == exp_out + + # gh-25207 + def test_bindc_add_arr(self): + a = np.array([1,2,3]) + b = np.array([1,2,3]) + out = self.module.coddity.add_arr(a, b) + exp_out = a*2 + assert_allclose(out, exp_out) + + +def test_process_f2cmap_dict(): + from numpy.f2py.auxfuncs import process_f2cmap_dict + + f2cmap_all = {"integer": {"8": "rubbish_type"}} + new_map = {"INTEGER": {"4": "int"}} + c2py_map = {"int": "int", "rubbish_type": "long"} + + exp_map, exp_maptyp = ({"integer": {"8": "rubbish_type", "4": "int"}}, ["int"]) + + # Call the function + res_map, res_maptyp = process_f2cmap_dict(f2cmap_all, new_map, c2py_map) + + # Assert the result is as expected + assert res_map == exp_map + assert res_maptyp == exp_maptyp diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.py new file mode 100644 index 00000000..69b85aaa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.py @@ -0,0 +1,47 @@ +import os +import pytest +import platform + +from numpy.f2py.crackfortran import ( + _selected_int_kind_func as selected_int_kind, + _selected_real_kind_func as selected_real_kind, +) +from . import util + + +class TestKind(util.F2PyTest): + sources = [util.getpath("tests", "src", "kind", "foo.f90")] + + def test_int(self): + """Test `int` kind_func for integers up to 10**40.""" + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert selectedintkind(i) == selected_int_kind( + i + ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" + + def test_real(self): + """ + Test (processor-dependent) `real` kind_func for real numbers + of up to 31 digits precision (extended/quadruple). + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" + + @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + reason="Some PowerPC may not support full IEEE 754 precision") + def test_quad_precision(self): + """ + Test kind_func for quadruple precision [`real(16)`] of 32+ digits . + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32, 40): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.py new file mode 100644 index 00000000..80653b7d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,33 @@ +import os +import textwrap +import pytest + +from numpy.testing import IS_PYPY +from . import util + + +class TestMixed(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "mixed", "foo.f"), + util.getpath("tests", "src", "mixed", "foo_fixed.f90"), + util.getpath("tests", "src", "mixed", "foo_free.f90"), + ] + + def test_all(self): + assert self.module.bar11() == 11 + assert self.module.foo_fixed.bar12() == 12 + assert self.module.foo_free.bar13() == 13 + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """) + assert self.module.bar11.__doc__ == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.py new file mode 100644 index 00000000..28822d40 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.py @@ -0,0 +1,27 @@ +import os +import sys +import pytest +import textwrap + +from . import util +from numpy.testing import IS_PYPY + + +class TestModuleDocString(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "module_data", + "module_data_docstring.f90") + ] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_module_docstring(self): + assert self.module.mod.__doc__ == textwrap.dedent("""\ + i : 'i'-scalar + x : 'i'-array(4) + a : 'f'-array(2,3) + b : 'f'-array(-1,-1), not allocated\x00 + foo()\n + Wrapper for ``foo``.\n\n""") diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.py new file mode 100644 index 00000000..2f620eaa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.py @@ -0,0 +1,112 @@ +import os +import pytest + +import numpy as np + +from . import util + + +class TestParameters(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [ + util.getpath("tests", "src", "parameter", "constant_real.f90"), + util.getpath("tests", "src", "parameter", "constant_integer.f90"), + util.getpath("tests", "src", "parameter", "constant_both.f90"), + util.getpath("tests", "src", "parameter", "constant_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_non_compound.f90"), + ] + + @pytest.mark.slow + def test_constant_real_single(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo_single, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo_single(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_real_double(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_double, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_double(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_compound_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_compound_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2]) + + @pytest.mark.slow + def test_constant_non_compound_int(self): + # check values + x = np.arange(4, dtype=np.int32) + self.module.foo_non_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3]) + + @pytest.mark.slow + def test_constant_integer_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_int(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_integer_long(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int64)[::2] + pytest.raises(ValueError, self.module.foo_long, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int64) + self.module.foo_long(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_both(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_no(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_no, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_no(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_sum(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_sum, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_sum(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_pyf_src.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_pyf_src.py new file mode 100644 index 00000000..f77ded2f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_pyf_src.py @@ -0,0 +1,44 @@ +# This test is ported from numpy.distutils +from numpy.f2py._src_pyf import process_str +from numpy.testing import assert_equal + + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 00000000..82671cd8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,16 @@ +"""See https://github.com/numpy/numpy/pull/10676. + +""" +import sys +import pytest + +from . import util + + +class TestQuotedCharacter(util.F2PyTest): + sources = [util.getpath("tests", "src", "quoted_character", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + def test_quoted_character(self): + assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")") diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.py new file mode 100644 index 00000000..1c109783 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.py @@ -0,0 +1,77 @@ +import os +import pytest + +import numpy as np + +from . import util + + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert np.allclose(x, [3, 1, 2]) + + +class TestNegativeBounds(util.F2PyTest): + # Check that negative bounds work correctly + sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] + + @pytest.mark.slow + def test_negbound(self): + xvec = np.arange(12) + xlow = -6 + xhigh = 4 + # Calculate the upper bound, + # Keeping the 1 index in mind + def ubound(xl, xh): + return xh - xl + 1 + rval = self.module.foo(is_=xlow, ie_=xhigh, + arr=xvec[:ubound(xlow, xhigh)]) + expval = np.arange(11, dtype = np.float32) + assert np.allclose(rval, expval) + + +class TestNumpyVersionAttribute(util.F2PyTest): + # Check that th attribute __f2py_numpy_version__ is present + # in the compiled module and that has the value np.__version__. + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_numpy_version_attribute(self): + + # Check that self.module has an attribute named "__f2py_numpy_version__" + assert hasattr(self.module, "__f2py_numpy_version__") + + # Check that the attribute __f2py_numpy_version__ is a string + assert isinstance(self.module.__f2py_numpy_version__, str) + + # Check that __f2py_numpy_version__ has the value numpy.__version__ + assert np.__version__ == self.module.__f2py_numpy_version__ + + +def test_include_path(): + incdir = np.f2py.get_include() + fnames_in_dir = os.listdir(incdir) + for fname in ("fortranobject.c", "fortranobject.h"): + assert fname in fnames_in_dir + + +class TestModuleAndSubroutine(util.F2PyTest): + module_name = "example" + sources = [util.getpath("tests", "src", "regression", "gh25337", "data.f90"), + util.getpath("tests", "src", "regression", "gh25337", "use_data.f90")] + + @pytest.mark.slow + def test_gh25337(self): + self.module.data.set_shift(3) + assert "data" in dir(self.module) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.py new file mode 100644 index 00000000..36c1f10f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,45 @@ +import pytest + +from numpy import array +from . import util +import platform + +IS_S390X = platform.machine() == "s390x" + + +class TestReturnCharacter(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t1", "s0", "s1"]: + assert t("23") == b"2" + r = t("ab") + assert r == b"a" + r = t(array("ab")) + assert r == b"a" + r = t(array(77, "u1")) + assert r == b"M" + elif tname in ["ts", "ss"]: + assert t(23) == b"23" + assert t("123456789abcdef") == b"123456789a" + elif tname in ["t5", "s5"]: + assert t(23) == b"23" + assert t("ab") == b"ab" + assert t("123456789abcdef") == b"12345" + else: + raise NotImplementedError + + +class TestFReturnCharacter(TestReturnCharacter): + sources = [ + util.getpath("tests", "src", "return_character", "foo77.f"), + util.getpath("tests", "src", "return_character", "foo90.f90"), + ] + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 00000000..9df79632 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,65 @@ +import pytest + +from numpy import array +from . import util + + +class TestReturnComplex(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t8", "s0", "s8"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234j) - 234.0j) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err + # assert abs(t('234')-234.)<=err + # assert abs(t('234.6')-234.6)<=err + assert abs(t(-234) + 234.0) <= err + assert abs(t([234]) - 234.0) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err + assert abs(t(array([234])) - 234.0) <= err + assert abs(t(array([[234]])) - 234.0) <= err + assert abs(t(array([234]).astype("b")) + 22.0) <= err + assert abs(t(array([234], "h")) - 234.0) <= err + assert abs(t(array([234], "i")) - 234.0) <= err + assert abs(t(array([234], "l")) - 234.0) <= err + assert abs(t(array([234], "q")) - 234.0) <= err + assert abs(t(array([234], "f")) - 234.0) <= err + assert abs(t(array([234], "d")) - 234.0) <= err + assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err + assert abs(t(array([234], "D")) - 234.0) <= err + + # pytest.raises(TypeError, t, array([234], 'a1')) + pytest.raises(TypeError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["(inf+0j)", "(Infinity+0j)"] + except OverflowError: + pass + + +class TestFReturnComplex(TestReturnComplex): + sources = [ + util.getpath("tests", "src", "return_complex", "foo77.f"), + util.getpath("tests", "src", "return_complex", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_complex, name), + name) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 00000000..3b2f42e2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,53 @@ +import pytest + +from numpy import array +from . import util + + +class TestReturnInteger(util.F2PyTest): + def check_function(self, t, tname): + assert t(123) == 123 + assert t(123.6) == 123 + assert t("123") == 123 + assert t(-123) == -123 + assert t([123]) == 123 + assert t((123, )) == 123 + assert t(array(123)) == 123 + assert t(array(123, "b")) == 123 + assert t(array(123, "h")) == 123 + assert t(array(123, "i")) == 123 + assert t(array(123, "l")) == 123 + assert t(array(123, "B")) == 123 + assert t(array(123, "f")) == 123 + assert t(array(123, "d")) == 123 + + # pytest.raises(ValueError, t, array([123],'S3')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + if tname in ["t8", "s8"]: + pytest.raises(OverflowError, t, 100000000000000000000000) + pytest.raises(OverflowError, t, 10000000011111111111111.23) + + +class TestFReturnInteger(TestReturnInteger): + sources = [ + util.getpath("tests", "src", "return_integer", "foo77.f"), + util.getpath("tests", "src", "return_integer", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_integer, name), + name) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 00000000..92fb902a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,64 @@ +import pytest + +from numpy import array +from . import util + + +class TestReturnLogical(util.F2PyTest): + def check_function(self, t): + assert t(True) == 1 + assert t(False) == 0 + assert t(0) == 0 + assert t(None) == 0 + assert t(0.0) == 0 + assert t(0j) == 0 + assert t(1j) == 1 + assert t(234) == 1 + assert t(234.6) == 1 + assert t(234.6 + 3j) == 1 + assert t("234") == 1 + assert t("aaa") == 1 + assert t("") == 0 + assert t([]) == 0 + assert t(()) == 0 + assert t({}) == 0 + assert t(t) == 1 + assert t(-234) == 1 + assert t(10**100) == 1 + assert t([234]) == 1 + assert t((234, )) == 1 + assert t(array(234)) == 1 + assert t(array([234])) == 1 + assert t(array([[234]])) == 1 + assert t(array([127], "b")) == 1 + assert t(array([234], "h")) == 1 + assert t(array([234], "i")) == 1 + assert t(array([234], "l")) == 1 + assert t(array([234], "f")) == 1 + assert t(array([234], "d")) == 1 + assert t(array([234 + 3j], "F")) == 1 + assert t(array([234], "D")) == 1 + assert t(array(0)) == 0 + assert t(array([0])) == 0 + assert t(array([[0]])) == 0 + assert t(array([0j])) == 0 + assert t(array([1])) == 1 + pytest.raises(ValueError, t, array([0, 0])) + + +class TestFReturnLogical(TestReturnLogical): + sources = [ + util.getpath("tests", "src", "return_logical", "foo77.f"), + util.getpath("tests", "src", "return_logical", "foo90.f90"), + ] + + @pytest.mark.slow + @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name)) + + @pytest.mark.slow + @pytest.mark.parametrize("name", + "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.py new file mode 100644 index 00000000..a15d6475 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,107 @@ +import platform +import pytest +import numpy as np + +from numpy import array +from . import util + + +class TestReturnReal(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t4", "s0", "s4"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t("234") - 234) <= err + assert abs(t("234.6") - 234.6) <= err + assert abs(t(-234) + 234) <= err + assert abs(t([234]) - 234) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(234).astype("b")) + 22) <= err + assert abs(t(array(234, "h")) - 234.0) <= err + assert abs(t(array(234, "i")) - 234.0) <= err + assert abs(t(array(234, "l")) - 234.0) <= err + assert abs(t(array(234, "B")) - 234.0) <= err + assert abs(t(array(234, "f")) - 234.0) <= err + assert abs(t(array(234, "d")) - 234.0) <= err + if tname in ["t0", "t4", "s0", "s4"]: + assert t(1e200) == t(1e300) # inf + + # pytest.raises(ValueError, t, array([234], 'S1')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["inf", "Infinity"] + except OverflowError: + pass + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, + reason="32-bit builds are buggy" +) +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(",")) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestFReturnReal(TestReturnReal): + sources = [ + util.getpath("tests", "src", "return_real", "foo77.f"), + util.getpath("tests", "src", "return_real", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 00000000..6d499046 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,74 @@ +import platform +import pytest +import numpy as np + +from . import util + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, + reason="32-bit builds are buggy" +) +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module_name} + """ + + def test_multiline(self): + assert self.module.foo() == 42 + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + np.dtype(np.intp).itemsize < 8, + reason="32-bit builds are buggy" +) +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module_name} + """ + + def test_callstatement(self): + assert self.module.foo() == 42 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_size.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_size.py new file mode 100644 index 00000000..bd2c349d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_size.py @@ -0,0 +1,45 @@ +import os +import pytest +import numpy as np + +from . import util + + +class TestSizeSumExample(util.F2PyTest): + sources = [util.getpath("tests", "src", "size", "foo.f90")] + + @pytest.mark.slow + def test_all(self): + r = self.module.foo([[]]) + assert r == [0] + + r = self.module.foo([[1, 2]]) + assert r == [3] + + r = self.module.foo([[1, 2], [3, 4]]) + assert np.allclose(r, [3, 7]) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert np.allclose(r, [3, 7, 11]) + + @pytest.mark.slow + def test_transpose(self): + r = self.module.trans([[]]) + assert np.allclose(r.T, np.array([[]])) + + r = self.module.trans([[1, 2]]) + assert np.allclose(r, [[1.], [2.]]) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [[1, 4], [2, 5], [3, 6]]) + + @pytest.mark.slow + def test_flatten(self): + r = self.module.flatten([[]]) + assert np.allclose(r, []) + + r = self.module.flatten([[1, 2]]) + assert np.allclose(r, [1, 2]) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [1, 2, 3, 4, 5, 6]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_string.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_string.py new file mode 100644 index 00000000..9e937188 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_string.py @@ -0,0 +1,100 @@ +import os +import pytest +import textwrap +import numpy as np +from . import util + + +class TestString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "char.f90")] + + @pytest.mark.slow + def test_char(self): + strings = np.array(["ab", "cd", "ef"], dtype="c").T + inp, out = self.module.char_test.change_strings( + strings, strings.shape[1]) + assert inp == pytest.approx(strings) + expected = strings.copy() + expected[1, :] = "AAA" + assert out == pytest.approx(expected) + + +class TestDocStringArguments(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "string.f")] + + def test_example(self): + a = np.array(b"123\0\0") + b = np.array(b"123\0\0") + c = np.array(b"123") + d = np.array(b"123") + + self.module.foo(a, b, c, d) + + assert a.tobytes() == b"123\0\0" + assert b.tobytes() == b"B23\0\0" + assert c.tobytes() == b"123" + assert d.tobytes() == b"D23" + + +class TestFixedString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "fixed_string.f90")] + + @staticmethod + def _sint(s, start=0, end=None): + """Return the content of a string buffer as integer value. + + For example: + _sint('1234') -> 4321 + _sint('123A') -> 17321 + """ + if isinstance(s, np.ndarray): + s = s.tobytes() + elif isinstance(s, str): + s = s.encode() + assert isinstance(s, bytes) + if end is None: + end = len(s) + i = 0 + for j in range(start, min(end, len(s))): + i += s[j] * 10**j + return i + + def _get_input(self, intent="in"): + if intent in ["in"]: + yield "" + yield "1" + yield "1234" + yield "12345" + yield b"" + yield b"\0" + yield b"1" + yield b"\01" + yield b"1\0" + yield b"1234" + yield b"12345" + yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0') + yield np.array(b"") # array(b'', dtype='|S1') + yield np.array(b"\0") + yield np.array(b"1") + yield np.array(b"1\0") + yield np.array(b"\01") + yield np.array(b"1234") + yield np.array(b"123\0") + yield np.array(b"12345") + + def test_intent_in(self): + for s in self._get_input(): + r = self.module.test_in_bytes4(s) + # also checks that s is not changed inplace + expected = self._sint(s, end=4) + assert r == expected, s + + def test_intent_inout(self): + for s in self._get_input(intent="inout"): + rest = self._sint(s, start=4) + r = self.module.test_inout_bytes4(s) + expected = self._sint(s, end=4) + assert r == expected + + # check that the rest of input string is preserved + assert rest == self._sint(s, start=4) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.py new file mode 100644 index 00000000..84527831 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.py @@ -0,0 +1,494 @@ +import pytest + +from numpy.f2py.symbolic import ( + Expr, + Op, + ArithOp, + Language, + as_symbol, + as_number, + as_string, + as_array, + as_complex, + as_terms, + as_factors, + eliminate_quotes, + insert_quotes, + fromstring, + as_expr, + as_apply, + as_numer_denom, + as_ternary, + as_ref, + as_deref, + normalize, + as_eq, + as_ne, + as_lt, + as_gt, + as_le, + as_ge, +) +from . import util + + +class TestSymbolic(util.F2PyTest): + def test_eliminate_quotes(self): + def worker(s): + r, d = eliminate_quotes(s) + s1 = insert_quotes(r, d) + assert s1 == s + + for kind in ["", "mykind_"]: + worker(kind + '"1234" // "ABCD"') + worker(kind + '"1234" // ' + kind + '"ABCD"') + worker(kind + "\"1234\" // 'ABCD'") + worker(kind + '"1234" // ' + kind + "'ABCD'") + worker(kind + '"1\\"2\'AB\'34"') + worker("a = " + kind + "'1\\'2\"AB\"34'") + + def test_sanity(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.op == Op.SYMBOL + assert repr(x) == "Expr(Op.SYMBOL, 'x')" + assert x == x + assert x != y + assert hash(x) is not None + + n = as_number(123) + m = as_number(456) + assert n.op == Op.INTEGER + assert repr(n) == "Expr(Op.INTEGER, (123, 4))" + assert n == n + assert n != m + assert hash(n) is not None + + fn = as_number(12.3) + fm = as_number(45.6) + assert fn.op == Op.REAL + assert repr(fn) == "Expr(Op.REAL, (12.3, 4))" + assert fn == fn + assert fn != fm + assert hash(fn) is not None + + c = as_complex(1, 2) + c2 = as_complex(3, 4) + assert c.op == Op.COMPLEX + assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4))," + " Expr(Op.INTEGER, (2, 4))))") + assert c == c + assert c != c2 + assert hash(c) is not None + + s = as_string("'123'") + s2 = as_string('"ABC"') + assert s.op == Op.STRING + assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s) + assert s == s + assert s != s2 + + a = as_array((n, m)) + b = as_array((n, )) + assert a.op == Op.ARRAY + assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," + " Expr(Op.INTEGER, (456, 4))))") + assert a == a + assert a != b + + t = as_terms(x) + u = as_terms(y) + assert t.op == Op.TERMS + assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})" + assert t == t + assert t != u + assert hash(t) is not None + + v = as_factors(x) + w = as_factors(y) + assert v.op == Op.FACTORS + assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})" + assert v == v + assert w != v + assert hash(v) is not None + + t = as_ternary(x, y, z) + u = as_ternary(x, z, y) + assert t.op == Op.TERNARY + assert t == t + assert t != u + assert hash(t) is not None + + e = as_eq(x, y) + f = as_lt(x, y) + assert e.op == Op.RELATIONAL + assert e == e + assert e != f + assert hash(e) is not None + + def test_tostring_fortran(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + m = as_number(456) + a = as_array((n, m)) + c = as_complex(n, m) + + assert str(x) == "x" + assert str(n) == "123" + assert str(a) == "[123, 456]" + assert str(c) == "(123, 456)" + + assert str(Expr(Op.TERMS, {x: 1})) == "x" + assert str(Expr(Op.TERMS, {x: 2})) == "2 * x" + assert str(Expr(Op.TERMS, {x: -1})) == "-x" + assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x" + assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y" + assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y" + assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y" + + assert str(Expr(Op.FACTORS, {x: 1})) == "x" + assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2" + assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1" + assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2" + assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y" + assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3" + + v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x + y) ** 3", str(v) + v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x * y) ** 3", str(v) + + assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()" + assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)" + assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)" + assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]" + + assert str(as_ternary(x, y, z)) == "merge(y, z, x)" + assert str(as_eq(x, y)) == "x .eq. y" + assert str(as_ne(x, y)) == "x .ne. y" + assert str(as_lt(x, y)) == "x .lt. y" + assert str(as_le(x, y)) == "x .le. y" + assert str(as_gt(x, y)) == "x .gt. y" + assert str(as_ge(x, y)) == "x .ge. y" + + def test_tostring_c(self): + language = Language.C + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + + assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x" + assert (Expr(Op.FACTORS, { + x + y: 2 + }).tostring(language=language) == "(x + y) * (x + y)") + assert Expr(Op.FACTORS, { + x: 12 + }).tostring(language=language) == "pow(x, 12)" + + assert as_apply(ArithOp.DIV, x, + y).tostring(language=language) == "x / y" + assert (as_apply(ArithOp.DIV, x, + x + y).tostring(language=language) == "x / (x + y)") + assert (as_apply(ArithOp.DIV, x - y, x + + y).tostring(language=language) == "(x - y) / (x + y)") + assert (x + (x - y) / (x + y) + + n).tostring(language=language) == "123 + x + (x - y) / (x + y)" + + assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)" + assert as_eq(x, y).tostring(language=language) == "x == y" + assert as_ne(x, y).tostring(language=language) == "x != y" + assert as_lt(x, y).tostring(language=language) == "x < y" + assert as_le(x, y).tostring(language=language) == "x <= y" + assert as_gt(x, y).tostring(language=language) == "x > y" + assert as_ge(x, y).tostring(language=language) == "x >= y" + + def test_operations(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x + x == Expr(Op.TERMS, {x: 2}) + assert x - x == Expr(Op.INTEGER, (0, 4)) + assert x + y == Expr(Op.TERMS, {x: 1, y: 1}) + assert x - y == Expr(Op.TERMS, {x: 1, y: -1}) + assert x * x == Expr(Op.FACTORS, {x: 2}) + assert x * y == Expr(Op.FACTORS, {x: 1, y: 1}) + + assert +x == x + assert -x == Expr(Op.TERMS, {x: -1}), repr(-x) + assert 2 * x == Expr(Op.TERMS, {x: 2}) + assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2}) + assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) + assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) + + assert x**2 == Expr(Op.FACTORS, {x: 2}) + assert (x + y)**2 == Expr( + Op.TERMS, + { + Expr(Op.FACTORS, {x: 2}): 1, + Expr(Op.FACTORS, {y: 2}): 1, + Expr(Op.FACTORS, { + x: 1, + y: 1 + }): 2, + }, + ) + assert (x + y) * x == x**2 + x * y + assert (x + y)**2 == x**2 + 2 * x * y + y**2 + assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2 + assert (x + y) * z == x * z + y * z + assert z * (x + y) == x * z + y * z + + assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) + assert (2 * x / 2) == x + assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2)) + assert (4 * x / 2) == 2 * x + assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (6 * x / 2) == 3 * x + assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply( + ArithOp.DIV, 5 * y, 4 * x) + assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x, + as_number(2)), (15 * x / 6) / 5 + assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) + + assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) + + s = as_string('"ABC"') + t = as_string('"123"') + + assert s // t == Expr(Op.STRING, ('"ABC123"', 1)) + assert s // x == Expr(Op.CONCAT, (s, x)) + assert x // s == Expr(Op.CONCAT, (x, s)) + + c = as_complex(1.0, 2.0) + assert -c == as_complex(-1.0, -2.0) + assert c + c == as_expr((1 + 2j) * 2) + assert c * c == as_expr((1 + 2j)**2) + + def test_substitute(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + a = as_array((x, y)) + + assert x.substitute({x: y}) == y + assert (x + y).substitute({x: z}) == y + z + assert (x * y).substitute({x: z}) == y * z + assert (x**4).substitute({x: z}) == z**4 + assert (x / y).substitute({x: z}) == z / y + assert x.substitute({x: y + z}) == y + z + assert a.substitute({x: y + z}) == as_array((y + z, y)) + + assert as_ternary(x, y, + z).substitute({x: y + z}) == as_ternary(y + z, y, z) + assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y) + + def test_fromstring(self): + + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + s = as_string('"ABC"') + t = as_string('"123"') + a = as_array((x, y)) + + assert fromstring("x") == x + assert fromstring("+ x") == x + assert fromstring("- x") == -x + assert fromstring("x + y") == x + y + assert fromstring("x + 1") == x + 1 + assert fromstring("x * y") == x * y + assert fromstring("x * 2") == x * 2 + assert fromstring("x / y") == x / y + assert fromstring("x ** 2", language=Language.Python) == x**2 + assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3 + assert fromstring("(x + y) * z") == (x + y) * z + + assert fromstring("f(x)") == f(x) + assert fromstring("f(x,y)") == f(x, y) + assert fromstring("f[x]") == f[x] + assert fromstring("f[x][y]") == f[x][y] + + assert fromstring('"ABC"') == s + assert (normalize( + fromstring('"ABC" // "123" ', + language=Language.Fortran)) == s // t) + assert fromstring('f("ABC")') == f(s) + assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND") + + assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)") + assert fromstring("f((/x, y/))") == f(a) + assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, )) + + assert fromstring("123") == as_number(123) + assert fromstring("123_2") == as_number(123, 2) + assert fromstring("123_myintkind") == as_number(123, "myintkind") + + assert fromstring("123.0") == as_number(123.0, 4) + assert fromstring("123.0_4") == as_number(123.0, 4) + assert fromstring("123.0_8") == as_number(123.0, 8) + assert fromstring("123.0e0") == as_number(123.0, 4) + assert fromstring("123.0d0") == as_number(123.0, 8) + assert fromstring("123d0") == as_number(123.0, 8) + assert fromstring("123e-0") == as_number(123.0, 4) + assert fromstring("123d+0") == as_number(123.0, 8) + assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind") + assert fromstring("3E4") == as_number(30000.0, 4) + + assert fromstring("(1, 2)") == as_complex(1, 2) + assert fromstring("(1e2, PI)") == as_complex(as_number(100.0), + as_symbol("PI")) + + assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2))) + + assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"), + x, + y=as_number(1)) + assert fromstring( + 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply( + as_symbol("PERSON"), + name=as_string('"John"'), + age=as_number(50), + shape=as_array((as_number(34), as_number(23))), + ) + + assert fromstring("x?y:z") == as_ternary(x, y, z) + + assert fromstring("*x") == as_deref(x) + assert fromstring("**x") == as_deref(as_deref(x)) + assert fromstring("&x") == as_ref(x) + assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y) + assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x**y") == as_deref(x) * as_deref(y) + + assert fromstring("x == y") == as_eq(x, y) + assert fromstring("x != y") == as_ne(x, y) + assert fromstring("x < y") == as_lt(x, y) + assert fromstring("x > y") == as_gt(x, y) + assert fromstring("x <= y") == as_le(x, y) + assert fromstring("x >= y") == as_ge(x, y) + + assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y) + assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y) + assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y) + assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y) + assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y) + assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y) + + def test_traverse(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + + # Use traverse to substitute a symbol + def replace_visit(s, r=z): + if s == x: + return r + + assert x.traverse(replace_visit) == z + assert y.traverse(replace_visit) == y + assert z.traverse(replace_visit) == z + assert (f(y)).traverse(replace_visit) == f(y) + assert (f(x)).traverse(replace_visit) == f(z) + assert (f[y]).traverse(replace_visit) == f[y] + assert (f[z]).traverse(replace_visit) == f[z] + assert (x + y + z).traverse(replace_visit) == (2 * z + y) + assert (x + + f(y, x - z)).traverse(replace_visit) == (z + + f(y, as_number(0))) + assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) + + # Use traverse to collect symbols, method 1 + function_symbols = set() + symbols = set() + + def collect_symbols(s): + if s.op is Op.APPLY: + oper = s.data[0] + function_symbols.add(oper) + if oper in symbols: + symbols.remove(oper) + elif s.op is Op.SYMBOL and s not in function_symbols: + symbols.add(s) + + (x + f(y, x - z)).traverse(collect_symbols) + assert function_symbols == {f} + assert symbols == {x, y, z} + + # Use traverse to collect symbols, method 2 + def collect_symbols2(expr, symbols): + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols2, symbols) + assert symbols == {x, y, z, f} + + # Use traverse to partially collect symbols + def collect_symbols3(expr, symbols): + if expr.op is Op.APPLY: + # skip traversing function calls + return expr + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols3, symbols) + assert symbols == {x} + + def test_linear_solve(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.linear_solve(x) == (as_number(1), as_number(0)) + assert (x + 1).linear_solve(x) == (as_number(1), as_number(1)) + assert (2 * x).linear_solve(x) == (as_number(2), as_number(0)) + assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3)) + assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) + assert y.linear_solve(x) == (as_number(0), y) + assert (y * z).linear_solve(x) == (as_number(0), y * z) + + assert (x + y).linear_solve(x) == (as_number(1), y) + assert (z * x + y).linear_solve(x) == (z, y) + assert ((z + y) * x + y).linear_solve(x) == (z + y, y) + assert (z * y * x + y).linear_solve(x) == (z * y, y) + + pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x)) + + def test_as_numer_denom(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert as_numer_denom(x) == (x, as_number(1)) + assert as_numer_denom(x / n) == (x, n) + assert as_numer_denom(n / x) == (n, x) + assert as_numer_denom(x / y) == (x, y) + assert as_numer_denom(x * y) == (x * y, as_number(1)) + assert as_numer_denom(n + x / y) == (x + n * y, y) + assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x) + + def test_polynomial_atoms(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert x.polynomial_atoms() == {x} + assert n.polynomial_atoms() == set() + assert (y[x]).polynomial_atoms() == {y[x]} + assert (y(x)).polynomial_atoms() == {y(x)} + assert (y(x) + x).polynomial_atoms() == {y(x), x} + assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} + assert (y(x)**x).polynomial_atoms() == {y(x)} diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.py new file mode 100644 index 00000000..83aaf6c9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.py @@ -0,0 +1,14 @@ +import os +import pytest + +from . import util + +class TestValueAttr(util.F2PyTest): + sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] + + # gh-21665 + def test_long_long_map(self): + inp = 2 + out = self.module.fortfuncs.square(inp) + exp_out = 4 + assert out == exp_out diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/util.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/util.py new file mode 100644 index 00000000..75b257cd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/tests/util.py @@ -0,0 +1,439 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present +- determining paths to tests + +""" +import glob +import os +import sys +import subprocess +import tempfile +import shutil +import atexit +import textwrap +import re +import pytest +import contextlib +import numpy + +from pathlib import Path +from numpy._utils import asunicode +from numpy.testing import temppath, IS_WASM +from importlib import import_module + +# +# Maintaining a temporary module directory +# + +_module_dir = None +_module_num = 5403 + +if sys.platform == "cygwin": + NUMPY_INSTALL_ROOT = Path(__file__).parent.parent.parent + _module_list = list(NUMPY_INSTALL_ROOT.glob("**/*.dll")) + + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except OSError: + pass + _module_dir = None + + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + global _module_num + get_module_dir() + name = "_test_ext_module_%d" % _module_num + _module_num += 1 + if name in sys.modules: + # this should not be possible, but check anyway + raise RuntimeError("Temporary module name already in use.") + return name + + +def _memoize(func): + memo = {} + + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + + wrapper.__name__ = func.__name__ + return wrapper + + +# +# Building modules +# + + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" + + d = get_module_dir() + + # Copy files + dst_sources = [] + f2py_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + base, ext = os.path.splitext(dst) + if ext in (".f90", ".f", ".c", ".pyf"): + f2py_sources.append(dst) + + assert f2py_sources + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + f2py_opts = ["-c", "-m", module_name] + options + f2py_sources + if skip: + f2py_opts += ["skip:"] + skip + if only: + f2py_opts += ["only:"] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, "-c", code] + f2py_opts + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running f2py failed: %s\n%s" % + (cmd[4:], asunicode(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Rebase (Cygwin-only) + if sys.platform == "cygwin": + # If someone starts deleting modules after import, this will + # need to change to record how big each module is, rather than + # relying on rebase being able to find that from the files. + _module_list.extend( + glob.glob(os.path.join(d, "{:s}*".format(module_name))) + ) + subprocess.check_call( + ["/usr/bin/rebase", "--database", "--oblivious", "--verbose"] + + _module_list + ) + + + + # Import + return import_module(module_name) + + +@_memoize +def build_code(source_code, + options=[], + skip=[], + only=[], + suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = ".f" + with temppath(suffix=suffix) as path: + with open(path, "w") as f: + f.write(source_code) + return build_module([path], + options=options, + skip=skip, + only=only, + module_name=module_name) + + +# +# Check if compilers are available at all... +# + +_compiler_status = None + + +def _get_compiler_status(): + global _compiler_status + if _compiler_status is not None: + return _compiler_status + + _compiler_status = (False, False, False) + if IS_WASM: + # Can't run compiler from inside WASM. + return _compiler_status + + # XXX: this is really ugly. But I don't know how to invoke Distutils + # in a safer way... + code = textwrap.dedent(f"""\ + import os + import sys + sys.path = {repr(sys.path)} + + def configuration(parent_name='',top_path=None): + global config + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + return config + + from numpy.distutils.core import setup + setup(configuration=configuration) + + config_cmd = config.get_config_cmd() + have_c = config_cmd.try_compile('void foo() {{}}') + print('COMPILERS:%%d,%%d,%%d' %% (have_c, + config.have_f77c(), + config.have_f90c())) + sys.exit(99) + """) + code = code % dict(syspath=repr(sys.path)) + + tmpdir = tempfile.mkdtemp() + try: + script = os.path.join(tmpdir, "setup.py") + + with open(script, "w") as f: + f.write(code) + + cmd = [sys.executable, "setup.py", "config"] + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=tmpdir) + out, err = p.communicate() + finally: + shutil.rmtree(tmpdir) + + m = re.search(br"COMPILERS:(\d+),(\d+),(\d+)", out) + if m: + _compiler_status = ( + bool(int(m.group(1))), + bool(int(m.group(2))), + bool(int(m.group(3))), + ) + # Finished + return _compiler_status + + +def has_c_compiler(): + return _get_compiler_status()[0] + + +def has_f77_compiler(): + return _get_compiler_status()[1] + + +def has_f90_compiler(): + return _get_compiler_status()[2] + + +# +# Building with distutils +# + + +@_memoize +def build_module_distutils(source_files, config_code, module_name, **kw): + """ + Build a module via distutils and import it. + + """ + d = get_module_dir() + + # Copy files + dst_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError("%s is not a file" % fn) + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + # Build script + config_code = textwrap.dedent(config_code).replace("\n", "\n ") + + code = fr""" +import os +import sys +sys.path = {repr(sys.path)} + +def configuration(parent_name='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('', parent_name, top_path) + {config_code} + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + setup(configuration=configuration) + """ + script = os.path.join(d, get_temp_module_name() + ".py") + dst_sources.append(script) + with open(script, "wb") as f: + f.write(code.encode('latin1')) + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, script, "build_ext", "-i"] + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError("Running distutils build failed: %s\n%s" % + (cmd[4:], asstr(out))) + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Import + __import__(module_name) + return sys.modules[module_name] + + +# +# Unittest convenience +# + + +class F2PyTest: + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = ".f" + module = None + + @property + def module_name(self): + cls = type(self) + return f'_{cls.__module__.rsplit(".",1)[-1]}_{cls.__name__}_ext_module' + + def setup_method(self): + if sys.platform == "win32": + pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") + + if self.module is not None: + return + + # Check compiler availability first + if not has_c_compiler(): + pytest.skip("No C compiler available") + + codes = [] + if self.sources: + codes.extend(self.sources) + if self.code is not None: + codes.append(self.suffix) + + needs_f77 = False + needs_f90 = False + needs_pyf = False + for fn in codes: + if str(fn).endswith(".f"): + needs_f77 = True + elif str(fn).endswith(".f90"): + needs_f90 = True + elif str(fn).endswith(".pyf"): + needs_pyf = True + if needs_f77 and not has_f77_compiler(): + pytest.skip("No Fortran 77 compiler available") + if needs_f90 and not has_f90_compiler(): + pytest.skip("No Fortran 90 compiler available") + if needs_pyf and not (has_f90_compiler() or has_f77_compiler()): + pytest.skip("No Fortran compiler available") + + # Build the module + if self.code is not None: + self.module = build_code( + self.code, + options=self.options, + skip=self.skip, + only=self.only, + suffix=self.suffix, + module_name=self.module_name, + ) + + if self.sources is not None: + self.module = build_module( + self.sources, + options=self.options, + skip=self.skip, + only=self.only, + module_name=self.module_name, + ) + + +# +# Helper functions +# + + +def getpath(*a): + # Package root + d = Path(numpy.f2py.__file__).parent.resolve() + return d.joinpath(*a) + + +@contextlib.contextmanager +def switchdir(path): + curpath = Path.cwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curpath) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/use_rules.py b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/use_rules.py new file mode 100644 index 00000000..808b3dd9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/f2py/use_rules.py @@ -0,0 +1,106 @@ +""" +Build 'use others module data' mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version = 'See `f2py -v`' + + +from .auxfuncs import ( + applyrules, dictappend, gentitle, hasnote, outmess +) + + +usemodule_rules = { + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need': ['F_MODFUNC'] +} + +################ + + +def buildusevars(m, r): + ret = {} + outmess( + '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name'])) + varsmap = {} + revmap = {} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( + r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]] = k + if 'only' in r and r['only']: + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]] == v: + varsmap[v] = r['map'][v] + else: + outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' % + (v, r['map'][v])) + else: + outmess( + '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v])) + else: + for v in m['vars'].keys(): + if v in revmap: + varsmap[v] = revmap[v] + else: + varsmap[v] = v + for v in varsmap.keys(): + ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret + + +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( + name, realname)) + ret = {} + vrd = {'name': name, + 'realname': realname, + 'REALNAME': realname.upper(), + 'usemodulename': usemodulename, + 'USEMODULENAME': usemodulename.upper(), + 'texname': name.replace('_', '\\_'), + 'begintitle': gentitle('%s=>%s' % (name, realname)), + 'endtitle': gentitle('end of %s=>%s' % (name, realname)), + 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename) + } + nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', + 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} + vrd['texnamename'] = name + for i in nummap.keys(): + vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): + vrd['note'] = vars[realname]['note'] + rd = dictappend({}, vrd) + + print(name, realname, vars[realname]) + ret = applyrules(usemodule_rules, rd) + return ret diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/__init__.py new file mode 100644 index 00000000..fd5e4758 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/__init__.py @@ -0,0 +1,212 @@ +""" +Discrete Fourier Transform (:mod:`numpy.fft`) +============================================= + +.. currentmodule:: numpy.fft + +The SciPy module `scipy.fft` is a more comprehensive superset +of ``numpy.fft``, which includes only a basic set of routines. + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Type Promotion +-------------- + +`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and +``complex128`` arrays respectively. For an FFT implementation that does not +promote input arrays, see `scipy.fftpack`. + +Normalization +------------- + +The argument ``norm`` indicates which direction of the pair of direct/inverse +transforms is scaled and with what normalization factor. +The default normalization (``"backward"``) has the direct (forward) transforms +unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is +possible to obtain unitary transforms by setting the keyword argument ``norm`` +to ``"ortho"`` so that both direct and inverse transforms are scaled by +:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to +``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse +transforms unscaled (i.e. exactly opposite to the default ``"backward"``). +`None` is an alias of the default option ``"backward"`` for backward +compatibility. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +from . import _pocketfft, helper +from ._pocketfft import * +from .helper import * + +__all__ = _pocketfft.__all__.copy() +__all__ += helper.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/__init__.pyi new file mode 100644 index 00000000..5518aac1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/__init__.pyi @@ -0,0 +1,29 @@ +from numpy._pytesttester import PytestTester + +from numpy.fft._pocketfft import ( + fft as fft, + ifft as ifft, + rfft as rfft, + irfft as irfft, + hfft as hfft, + ihfft as ihfft, + rfftn as rfftn, + irfftn as irfftn, + rfft2 as rfft2, + irfft2 as irfft2, + fft2 as fft2, + ifft2 as ifft2, + fftn as fftn, + ifftn as ifftn, +) + +from numpy.fft.helper import ( + fftshift as fftshift, + ifftshift as ifftshift, + fftfreq as fftfreq, + rfftfreq as rfftfreq, +) + +__all__: list[str] +__path__: list[str] +test: PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft.py b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft.py new file mode 100644 index 00000000..ad69f7c8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft.py @@ -0,0 +1,1424 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1, norm="backward") +ifft(a, n=None, axis=-1, norm="backward") +rfft(a, n=None, axis=-1, norm="backward") +irfft(a, n=None, axis=-1, norm="backward") +hfft(a, n=None, axis=-1, norm="backward") +ihfft(a, n=None, axis=-1, norm="backward") +fftn(a, s=None, axes=None, norm="backward") +ifftn(a, s=None, axes=None, norm="backward") +rfftn(a, s=None, axes=None, norm="backward") +irfftn(a, s=None, axes=None, norm="backward") +fft2(a, s=None, axes=(-2,-1), norm="backward") +ifft2(a, s=None, axes=(-2, -1), norm="backward") +rfft2(a, s=None, axes=(-2,-1), norm="backward") +irfft2(a, s=None, axes=(-2, -1), norm="backward") + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +""" +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +import functools + +from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt +from . import _pocketfft_internal as pfi +from numpy.core.multiarray import normalize_axis_index +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.fft') + + +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, inv_norm): + axis = normalize_axis_index(axis, a.ndim) + if n is None: + n = a.shape[axis] + + fct = 1/inv_norm + + if a.shape[axis] != n: + s = list(a.shape) + index = [slice(None)]*len(s) + if s[axis] > n: + index[axis] = slice(0, n) + a = a[tuple(index)] + else: + index[axis] = slice(0, s[axis]) + s[axis] = n + z = zeros(s, a.dtype.char) + z[tuple(index)] = a + a = z + + if axis == a.ndim-1: + r = pfi.execute(a, is_real, is_forward, fct) + else: + a = swapaxes(a, axis, -1) + r = pfi.execute(a, is_real, is_forward, fct) + r = swapaxes(r, axis, -1) + return r + + +def _get_forward_norm(n, norm): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + if norm is None or norm == "backward": + return 1 + elif norm == "ortho": + return sqrt(n) + elif norm == "forward": + return n + raise ValueError(f'Invalid norm value {norm}; should be "backward",' + '"ortho" or "forward".') + + +def _get_backward_norm(n, norm): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + if norm is None or norm == "backward": + return n + elif norm == "ortho": + return sqrt(n) + elif norm == "forward": + return 1 + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') + + +_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward", + "ortho": "ortho", "forward": "backward"} + + +def _swap_direction(norm): + try: + return _SWAP_DIRECTION_MAP[norm] + except KeyError: + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') from None + + +def _fft_dispatcher(a, n=None, axis=None, norm=None): + return (a,) + + +@array_function_dispatch(_fft_dispatcher) +def fft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> plt.plot(freq, sp.real, freq, sp.imag) + [, ] + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + inv_norm = _get_forward_norm(n, norm) + output = _raw_fft(a, n, axis, False, True, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ifft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, label='real') + [] + >>> plt.plot(t, s.imag, '--', label='imaginary') + [] + >>> plt.legend() + + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + inv_norm = _get_backward_norm(n, norm) + output = _raw_fft(a, n, axis, False, False, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def rfft(a, n=None, axis=-1, norm=None): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + inv_norm = _get_forward_norm(n, norm) + output = _raw_fft(a, n, axis, True, True, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def irfft(a, n=None, axis=-1, norm=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + correct length of the real input **must** be given. + + Examples + -------- + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> np.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + inv_norm = _get_backward_norm(n, norm) + output = _raw_fft(a, n, axis, True, False, inv_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def hfft(a, n=None, axis=-1, norm=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2`` where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd. + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `hfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + shape of the full signal **must** be given. + + Examples + -------- + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, -0.+0.j], # may vary + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + new_norm = _swap_direction(norm) + output = irfft(conjugate(a), n, axis, norm=new_norm) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ihfft(a, n=None, axis=-1, norm=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd: + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + Examples + -------- + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + new_norm = _swap_direction(norm) + output = conjugate(rfft(a, n, axis, norm=new_norm)) + return output + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = 1 + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = 0 + s = list(s) + if axes is None: + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii], norm=norm) + return a + + +def _fftn_dispatcher(a, s=None, axes=None, norm=None): + return (a,) + + +@array_function_dispatch(_fftn_dispatcher) +def fftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, fft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def ifftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, ifft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def fft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional discrete Fourier Transform. + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return _raw_fftnd(a, s, axes, fft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def ifft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return _raw_fftnd(a, s, axes, ifft, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def rfftn(a, s=None, axes=None, norm=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + if `s` is not given, the shape of the input along the axes specified + by `axes` is used. + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1], norm) + for ii in range(len(axes)-1): + a = fft(a, s[ii], axes[ii], norm) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def rfft2(a, s=None, axes=(-2, -1), norm=None): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + axes : sequence of ints, optional + Axes over which to compute the FFT. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.rfft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]]) + """ + return rfftn(a, s, axes, norm) + + +@array_function_dispatch(_fftn_dispatcher) +def irfftn(a, s=None, axes=None, norm=None): + """ + Computes the inverse of `rfftn`. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to + be ``2*(m-1)`` where ``m`` is the length of the input along that axis. + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The correct interpretation of the hermitian input depends on the shape of + the original data, as given by `s`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfftn` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. When performing the + final complex to real transform, the last value is thus treated as purely + real. To avoid losing information, the correct shape of the real input + **must** be given. + + Examples + -------- + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes)-1): + a = ifft(a, s[ii], axes[ii], norm) + a = irfft(a, s[-1], axes[-1], norm) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def irfft2(a, s=None, axes=(-2, -1), norm=None): + """ + Computes the inverse of `rfft2`. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default is the last two axes. + norm : {"backward", "ortho", "forward"}, optional + .. versionadded:: 1.10.0 + + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The forward two-dimensional FFT of real input, + of which `irfft2` is the inverse. + rfft : The one-dimensional FFT for real input. + irfft : The inverse of the one-dimensional FFT of real input. + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + Examples + -------- + >>> a = np.mgrid[:5, :5][0] + >>> A = np.fft.rfft2(a) + >>> np.fft.irfft2(A, s=a.shape) + array([[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) + """ + return irfftn(a, s, axes, norm) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft.pyi new file mode 100644 index 00000000..2bd8b0ba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft.pyi @@ -0,0 +1,108 @@ +from collections.abc import Sequence +from typing import Literal as L + +from numpy import complex128, float64 +from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co + +_NormKind = L[None, "backward", "ortho", "forward"] + +__all__: list[str] + +def fft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def ifft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def rfft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def irfft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., +) -> NDArray[float64]: ... + +# Input array must be compatible with `np.conjugate` +def hfft( + a: _ArrayLikeNumber_co, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., +) -> NDArray[float64]: ... + +def ihfft( + a: ArrayLike, + n: None | int = ..., + axis: int = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def fftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def ifftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def rfftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def irfftn( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[float64]: ... + +def fft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def ifft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def rfft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[complex128]: ... + +def irfft2( + a: ArrayLike, + s: None | Sequence[int] = ..., + axes: None | Sequence[int] = ..., + norm: _NormKind = ..., +) -> NDArray[float64]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.cpython-39-darwin.so new file mode 100755 index 00000000..9451c620 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/helper.py b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/helper.py new file mode 100644 index 00000000..927ee1af --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/helper.py @@ -0,0 +1,221 @@ +""" +Discrete Fourier Transforms - helper.py + +""" +from numpy.core import integer, empty, arange, asarray, roll +from numpy.core.overrides import array_function_dispatch, set_module + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = (int, integer) + + +def _fftshift_dispatcher(x, axes=None): + return (x,) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [dim // 2 for dim in x.shape] + elif isinstance(axes, integer_types): + shift = x.shape[axes] // 2 + else: + shift = [x.shape[ax] // 2 for ax in axes] + + return roll(x, shift, axes) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [-(dim // 2) for dim in x.shape] + elif isinstance(axes, integer_types): + shift = -(x.shape[axes] // 2) + else: + shift = [-(x.shape[ax] // 2) for ax in axes] + + return roll(x, shift, axes) + + +@set_module('numpy.fft') +def fftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int) + N = (n-1)//2 + 1 + p1 = arange(0, N, dtype=int) + results[:N] = p1 + p2 = arange(-(n//2), 0, dtype=int) + results[N:] = p2 + return results * val + + +@set_module('numpy.fft') +def rfftfreq(n, d=1.0): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0/(n*d) + N = n//2 + 1 + results = arange(0, N, dtype=int) + return results * val diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/helper.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/helper.pyi new file mode 100644 index 00000000..9b652519 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/helper.pyi @@ -0,0 +1,47 @@ +from typing import Any, TypeVar, overload + +from numpy import generic, integer, floating, complexfloating +from numpy._typing import ( + NDArray, + ArrayLike, + _ShapeLike, + _ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, +) + +_SCT = TypeVar("_SCT", bound=generic) + +__all__: list[str] + +@overload +def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... + +@overload +def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... + +@overload +def fftfreq( + n: int | integer[Any], + d: _ArrayLikeFloat_co = ..., +) -> NDArray[floating[Any]]: ... +@overload +def fftfreq( + n: int | integer[Any], + d: _ArrayLikeComplex_co = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def rfftfreq( + n: int | integer[Any], + d: _ArrayLikeFloat_co = ..., +) -> NDArray[floating[Any]]: ... +@overload +def rfftfreq( + n: int | integer[Any], + d: _ArrayLikeComplex_co = ..., +) -> NDArray[complexfloating[Any, Any]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_helper.py b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_helper.py new file mode 100644 index 00000000..3fb700bb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_helper.py @@ -0,0 +1,167 @@ +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +import numpy as np +from numpy.testing import assert_array_almost_equal +from numpy import fft, pi + + +class TestFFTShift: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + assert_array_almost_equal(fft.fftshift(freqs), shifted) + assert_array_almost_equal(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self): + """ Test 2D input, which has uneven dimension sizes """ + freqs = [ + [0, 1], + [2, 3], + [4, 5] + ] + + # shift in dimension 0 + shift_dim0 = [ + [4, 5], + [0, 1], + [2, 3] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = [ + [1, 0], + [3, 2], + [5, 4] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) + assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = [ + [5, 4], + [1, 0], + [3, 2] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) + assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) + + def test_equal_to_original(self): + """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ + from numpy.core import asarray, concatenate, arange, take + + def original_fftshift(x, axes=None): + """ How fftshift was implemented in v1.14""" + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + def original_ifftshift(x, axes=None): + """ How ifftshift was implemented in v1.14 """ + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n - (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + # create possible 2d array combinations and try all possible keywords + # compare output to original functions + for i in range(16): + for j in range(16): + for axes_keyword in [0, 1, None, (0,), (0, 1)]: + inp = np.random.rand(i, j) + + assert_array_almost_equal(fft.fftshift(inp, axes_keyword), + original_fftshift(inp, axes_keyword)) + + assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), + original_ifftshift(inp, axes_keyword)) + + +class TestFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9*fft.fftfreq(9), x) + assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10*fft.fftfreq(10), x) + assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9*fft.rfftfreq(9), x) + assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10*fft.rfftfreq(10), x) + assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x) + + +class TestIRFFTN: + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j*ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.py b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.py new file mode 100644 index 00000000..122a9fac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.py @@ -0,0 +1,308 @@ +import numpy as np +import pytest +from numpy.random import random +from numpy.testing import ( + assert_array_equal, assert_raises, assert_allclose, IS_WASM + ) +import threading +import queue + + +def fft1(x): + L = len(x) + phase = -2j * np.pi * (np.arange(L) / L) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x*np.exp(phase), axis=1) + + +class TestFFTShift: + + def test_fft_n(self): + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D: + + def test_identity(self): + maxlen = 512 + x = random(maxlen) + 1j*random(maxlen) + xr = random(maxlen) + for i in range(1, maxlen): + assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], + atol=1e-12) + assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i), + xr[0:i], atol=1e-12) + + def test_fft(self): + x = random(30) + 1j*random(30) + assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) + assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) + assert_allclose(fft1(x) / np.sqrt(30), + np.fft.fft(x, norm="ortho"), atol=1e-6) + assert_allclose(fft1(x) / 30., + np.fft.fft(x, norm="forward"), atol=1e-6) + + @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) + def test_ifft(self, norm): + x = random(30) + 1j*random(30) + assert_allclose( + x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), + atol=1e-6) + # Ensure we get the correct error message + with pytest.raises(ValueError, + match='Invalid number of FFT data points'): + np.fft.ifft([], norm=norm) + + def test_fft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), + np.fft.fft2(x), atol=1e-6) + assert_allclose(np.fft.fft2(x), + np.fft.fft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), + np.fft.fft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / (30. * 20.), + np.fft.fft2(x, norm="forward"), atol=1e-6) + + def test_ifft2(self): + x = random((30, 20)) + 1j*random((30, 20)) + assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), + np.fft.ifft2(x), atol=1e-6) + assert_allclose(np.fft.ifft2(x), + np.fft.ifft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), + np.fft.ifft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * (30. * 20.), + np.fft.ifft2(x, norm="forward"), atol=1e-6) + + def test_fftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_allclose( + np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), + np.fft.fftn(x), atol=1e-6) + assert_allclose(np.fft.fftn(x), + np.fft.fftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), + np.fft.fftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.), + np.fft.fftn(x, norm="forward"), atol=1e-6) + + def test_ifftn(self): + x = random((30, 20, 10)) + 1j*random((30, 20, 10)) + assert_allclose( + np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), + np.fft.ifftn(x), atol=1e-6) + assert_allclose(np.fft.ifftn(x), + np.fft.ifftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), + np.fft.ifftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.), + np.fft.ifftn(x, norm="forward"), atol=1e-6) + + def test_rfft(self): + x = random(30) + for n in [x.size, 2*x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + assert_allclose( + np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)], + np.fft.rfft(x, n=n, norm=norm), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n), + np.fft.rfft(x, n=n, norm="backward"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / np.sqrt(n), + np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / n, + np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + + def test_irfft(self): + x = random(30) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfft2(self): + x = random((30, 20)) + assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) + assert_allclose(np.fft.rfft2(x), + np.fft.rfft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), + np.fft.rfft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / (30. * 20.), + np.fft.rfft2(x, norm="forward"), atol=1e-6) + + def test_irfft2(self): + x = random((30, 20)) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfftn(self): + x = random((30, 20, 10)) + assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) + assert_allclose(np.fft.rfftn(x), + np.fft.rfftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), + np.fft.rfftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), + np.fft.rfftn(x, norm="forward"), atol=1e-6) + + def test_irfftn(self): + x = random((30, 20, 10)) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_hfft(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm), + np.fft.hfft(x_herm, norm="backward"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), + np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / 30., + np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + + def test_ihfft(self): + x = random(14) + 1j*random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="backward"), norm="backward"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="ortho"), norm="ortho"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="forward"), norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_axes(self, op): + x = random((30, 20, 10)) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + for a in axes: + op_tr = op(np.transpose(x, a)) + tr_op = np.transpose(op(x, axes=a), a) + assert_allclose(op_tr, tr_op, atol=1e-6) + + def test_all_1d_norm_preserving(self): + # verify that round-trip transforms are norm-preserving + x = random(30) + x_norm = np.linalg.norm(x) + n = x.size * 2 + func_pairs = [(np.fft.fft, np.fft.ifft), + (np.fft.rfft, np.fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (np.fft.ihfft, np.fft.hfft), + ] + for forw, back in func_pairs: + for n in [x.size, 2*x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + assert_allclose(x_norm, + np.linalg.norm(tmp), atol=1e-6) + + @pytest.mark.parametrize("dtype", [np.half, np.single, np.double, + np.longdouble]) + def test_dtypes(self, dtype): + # make sure that all input precisions are accepted and internally + # converted to 64bit + x = random(30).astype(dtype) + assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6) + assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6) + + +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.complex64, np.complex128]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + # See discussion in pull/14178 + _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + else: + raise ValueError() + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start thread") +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1+0j + self._test_mtsame(np.fft.irfft, a) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/__init__.py new file mode 100644 index 00000000..cbab200e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/__init__.py @@ -0,0 +1,92 @@ +""" +**Note:** almost all functions in the ``numpy.lib`` namespace +are also present in the main ``numpy`` namespace. Please use the +functions as ``np.`` where possible. + +``numpy.lib`` is mostly a space for implementing functions that don't +belong in core or in another NumPy submodule with a clear purpose +(e.g. ``random``, ``fft``, ``linalg``, ``ma``). + +Most contains basic functions that are used by several submodules and are +useful to have in the main name-space. + +""" + +# Public submodules +# Note: recfunctions and (maybe) format are public too, but not imported +from . import mixins +from . import scimath as emath + +# Private submodules +# load module names. See https://github.com/networkx/networkx/issues/5838 +from . import type_check +from . import index_tricks +from . import function_base +from . import nanfunctions +from . import shape_base +from . import stride_tricks +from . import twodim_base +from . import ufunclike +from . import histograms +from . import polynomial +from . import utils +from . import arraysetops +from . import npyio +from . import arrayterator +from . import arraypad +from . import _version + +from .type_check import * +from .index_tricks import * +from .function_base import * +from .nanfunctions import * +from .shape_base import * +from .stride_tricks import * +from .twodim_base import * +from .ufunclike import * +from .histograms import * + +from .polynomial import * +from .utils import * +from .arraysetops import * +from .npyio import * +from .arrayterator import Arrayterator +from .arraypad import * +from ._version import * +from numpy.core._multiarray_umath import tracemalloc_domain + +__all__ = ['emath', 'tracemalloc_domain', 'Arrayterator'] +__all__ += type_check.__all__ +__all__ += index_tricks.__all__ +__all__ += function_base.__all__ +__all__ += shape_base.__all__ +__all__ += stride_tricks.__all__ +__all__ += twodim_base.__all__ +__all__ += ufunclike.__all__ +__all__ += arraypad.__all__ +__all__ += polynomial.__all__ +__all__ += utils.__all__ +__all__ += arraysetops.__all__ +__all__ += npyio.__all__ +__all__ += nanfunctions.__all__ +__all__ += histograms.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester + +def __getattr__(attr): + # Warn for reprecated attributes + import math + import warnings + + if attr == 'math': + warnings.warn( + "`np.lib.math` is a deprecated alias for the standard library " + "`math` module (Deprecated Numpy 1.25). Replace usages of " + "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) + return math + else: + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/__init__.pyi new file mode 100644 index 00000000..d3553bbc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/__init__.pyi @@ -0,0 +1,245 @@ +import math as math +from typing import Any + +from numpy._pytesttester import PytestTester + +from numpy import ( + ndenumerate as ndenumerate, + ndindex as ndindex, +) + +from numpy.version import version + +from numpy.lib import ( + format as format, + mixins as mixins, + scimath as scimath, + stride_tricks as stride_tricks, +) + +from numpy.lib._version import ( + NumpyVersion as NumpyVersion, +) + +from numpy.lib.arraypad import ( + pad as pad, +) + +from numpy.lib.arraysetops import ( + ediff1d as ediff1d, + intersect1d as intersect1d, + setxor1d as setxor1d, + union1d as union1d, + setdiff1d as setdiff1d, + unique as unique, + in1d as in1d, + isin as isin, +) + +from numpy.lib.arrayterator import ( + Arrayterator as Arrayterator, +) + +from numpy.lib.function_base import ( + select as select, + piecewise as piecewise, + trim_zeros as trim_zeros, + copy as copy, + iterable as iterable, + percentile as percentile, + diff as diff, + gradient as gradient, + angle as angle, + unwrap as unwrap, + sort_complex as sort_complex, + disp as disp, + flip as flip, + rot90 as rot90, + extract as extract, + place as place, + vectorize as vectorize, + asarray_chkfinite as asarray_chkfinite, + average as average, + bincount as bincount, + digitize as digitize, + cov as cov, + corrcoef as corrcoef, + median as median, + sinc as sinc, + hamming as hamming, + hanning as hanning, + bartlett as bartlett, + blackman as blackman, + kaiser as kaiser, + trapz as trapz, + i0 as i0, + add_newdoc as add_newdoc, + add_docstring as add_docstring, + meshgrid as meshgrid, + delete as delete, + insert as insert, + append as append, + interp as interp, + add_newdoc_ufunc as add_newdoc_ufunc, + quantile as quantile, +) + +from numpy.lib.histograms import ( + histogram_bin_edges as histogram_bin_edges, + histogram as histogram, + histogramdd as histogramdd, +) + +from numpy.lib.index_tricks import ( + ravel_multi_index as ravel_multi_index, + unravel_index as unravel_index, + mgrid as mgrid, + ogrid as ogrid, + r_ as r_, + c_ as c_, + s_ as s_, + index_exp as index_exp, + ix_ as ix_, + fill_diagonal as fill_diagonal, + diag_indices as diag_indices, + diag_indices_from as diag_indices_from, +) + +from numpy.lib.nanfunctions import ( + nansum as nansum, + nanmax as nanmax, + nanmin as nanmin, + nanargmax as nanargmax, + nanargmin as nanargmin, + nanmean as nanmean, + nanmedian as nanmedian, + nanpercentile as nanpercentile, + nanvar as nanvar, + nanstd as nanstd, + nanprod as nanprod, + nancumsum as nancumsum, + nancumprod as nancumprod, + nanquantile as nanquantile, +) + +from numpy.lib.npyio import ( + savetxt as savetxt, + loadtxt as loadtxt, + genfromtxt as genfromtxt, + recfromtxt as recfromtxt, + recfromcsv as recfromcsv, + load as load, + save as save, + savez as savez, + savez_compressed as savez_compressed, + packbits as packbits, + unpackbits as unpackbits, + fromregex as fromregex, + DataSource as DataSource, +) + +from numpy.lib.polynomial import ( + poly as poly, + roots as roots, + polyint as polyint, + polyder as polyder, + polyadd as polyadd, + polysub as polysub, + polymul as polymul, + polydiv as polydiv, + polyval as polyval, + polyfit as polyfit, + RankWarning as RankWarning, + poly1d as poly1d, +) + +from numpy.lib.shape_base import ( + column_stack as column_stack, + row_stack as row_stack, + dstack as dstack, + array_split as array_split, + split as split, + hsplit as hsplit, + vsplit as vsplit, + dsplit as dsplit, + apply_over_axes as apply_over_axes, + expand_dims as expand_dims, + apply_along_axis as apply_along_axis, + kron as kron, + tile as tile, + get_array_wrap as get_array_wrap, + take_along_axis as take_along_axis, + put_along_axis as put_along_axis, +) + +from numpy.lib.stride_tricks import ( + broadcast_to as broadcast_to, + broadcast_arrays as broadcast_arrays, + broadcast_shapes as broadcast_shapes, +) + +from numpy.lib.twodim_base import ( + diag as diag, + diagflat as diagflat, + eye as eye, + fliplr as fliplr, + flipud as flipud, + tri as tri, + triu as triu, + tril as tril, + vander as vander, + histogram2d as histogram2d, + mask_indices as mask_indices, + tril_indices as tril_indices, + tril_indices_from as tril_indices_from, + triu_indices as triu_indices, + triu_indices_from as triu_indices_from, +) + +from numpy.lib.type_check import ( + mintypecode as mintypecode, + asfarray as asfarray, + real as real, + imag as imag, + iscomplex as iscomplex, + isreal as isreal, + iscomplexobj as iscomplexobj, + isrealobj as isrealobj, + nan_to_num as nan_to_num, + real_if_close as real_if_close, + typename as typename, + common_type as common_type, +) + +from numpy.lib.ufunclike import ( + fix as fix, + isposinf as isposinf, + isneginf as isneginf, +) + +from numpy.lib.utils import ( + issubclass_ as issubclass_, + issubsctype as issubsctype, + issubdtype as issubdtype, + deprecate as deprecate, + deprecate_with_doc as deprecate_with_doc, + get_include as get_include, + info as info, + source as source, + who as who, + lookfor as lookfor, + byte_bounds as byte_bounds, + safe_eval as safe_eval, + show_runtime as show_runtime, +) + +from numpy.core.multiarray import ( + tracemalloc_domain as tracemalloc_domain, +) + +__all__: list[str] +__path__: list[str] +test: PytestTester + +__version__ = version +emath = scimath diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_datasource.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_datasource.py new file mode 100644 index 00000000..613733fa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_datasource.py @@ -0,0 +1,704 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seamlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip, bz2 and xz are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> from numpy import DataSource + >>> ds = DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP + >>> + >>> # Use the file as you normally would + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP + +""" +import os +import io + +from .._utils import set_module + + +_open = open + + +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +# Using a class instead of a module-level dictionary +# to reduce the initial 'import numpy' overhead by +# deferring the import of lzma, bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners: + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> import gzip + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz', '.xz', '.lzma'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: io.open} + + def _load(self): + if self._loaded: + return + + try: + import bz2 + self._file_openers[".bz2"] = bz2.open + except ImportError: + pass + + try: + import gzip + self._file_openers[".gz"] = gzip.open + except ImportError: + pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode, encoding=encoding, newline=newline) + + +@set_module('numpy') +class DataSource: + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = np.DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = np.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') + >>> ds.abspath(urlname) + '/home/guido/www.google.com/index.html' + + >>> ds = np.DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/.../home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if hasattr(self, '_istmpdest') and self._istmpdest: + import shutil + + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + for c in mode: + if c in _writemodes: + return True + return False + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + Returns + ------- + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename+zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing them is slow and + # a significant fraction of numpy's total import time. + import shutil + from urllib.request import urlopen + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + with urlopen(path) as openedurl: + with _open(upath, 'wb') as f: + shutil.copyfileobj(openedurl, f) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurrence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).lstrip('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + + # First test for local path + if os.path.exists(path): + return True + + # We import this here because importing urllib is slow and + # a significant fraction of numpy's total import time. + from urllib.request import urlopen + from urllib.error import URLError + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del(netfile) + return True + except URLError: + return False + return False + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) + else: + raise FileNotFoundError(f"{path} not found.") + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `io.open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_iotools.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_iotools.py new file mode 100644 index 00000000..534d1b3e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_iotools.py @@ -0,0 +1,897 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +__docformat__ = "restructuredtext en" + +import numpy as np +import numpy.core.numeric as nx +from numpy.compat import asbytes, asunicode + + +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. + + Defaults to decoding from 'latin1'. That differs from the behavior of + np.compat.asunicode that decodes from 'ascii'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + encoding : str + Encoding used to decode `line`. + + Returns + ------- + decoded_line : str + + """ + if type(line) is bytes: + if encoding is None: + encoding = "latin1" + line = line.decode(encoding) + + return line + + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + b'' + except (TypeError, ValueError): + return False + return True + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + for name in ndtype.names or (): + if ndtype[name].names is not None: + return True + return False + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : bool, optional + If True, transform a field with a shape into several fields. Default is + False. + + Examples + -------- + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter: + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comments : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + + def __init__(self, delimiter=None, comments='#', autostrip=True, + encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + + self.comments = comments + + # Delimiter is a character + if (delimiter is None) or isinstance(delimiter, str): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + self.encoding = encoding + + def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(" \r\n") + if not line: + return [] + return line.split(self.delimiter) + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip("\r\n") + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + + def __call__(self, line): + return self._handyman(_decode_line(line, self.encoding)) + + +class NameValidator: + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ('file_', 'field2', 'with_space', 'CaSe') + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + ... deletechars='q', + ... case_sensitive=False) + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') + + """ + + defaultexcludelist = ['return', 'file', 'print'] + defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = self.defaultdeletechars + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or case_sensitive.startswith('u'): + self.case_converter = lambda x: x.upper() + elif case_sensitive.startswith('l'): + self.case_converter = lambda x: x.lower() + else: + msg = 'unrecognized case_sensitive value %s.' % case_sensitive + raise ValueError(msg) + + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nbfields : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, str): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = dict() + nbempty = 0 + + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == 'TRUE': + return True + elif value == 'FALSE': + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter: + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + _mapper = [(nx.bool_, str2bool, False), + (nx.int_, int, -1),] + + # On 32-bit systems, we need to make sure that we explicitly include + # nx.int64 since ns.int_ is nx.int32. + if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize: + _mapper.append((nx.int64, int, -1)) + + _mapper.extend([(nx.float64, float, nx.nan), + (nx.complex128, complex, nx.nan + 0j), + (nx.longdouble, nx.longdouble, nx.nan), + # If a non-default dtype is passed, fall back to generic + # ones (should only be used for the converter) + (nx.integer, int, -1), + (nx.floating, float, nx.nan), + (nx.complexfloating, complex, nx.nan + 0j), + # Last, try with the string types (must be last, because + # `_mapper[-1]` is used as default in some cases) + (nx.str_, asunicode, '???'), + (nx.bytes_, asbytes, '???'), + ]) + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + if dtype.type == np.datetime64: + return dtype + return dtype.type + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = dateutil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if hasattr(func, '__call__'): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for fct, dft in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + + @classmethod + def _find_map_entry(cls, dtype): + # if a converter for the specific dtype is available use that + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if dtype.type == deftype: + return i, (deftype, func, default_def) + + # otherwise find an inexact match + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if np.issubdtype(dtype.type, deftype): + return i, (deftype, func, default_def) + + raise LookupError + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not hasattr(dtype_or_func, '__call__'): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func('0') + except ValueError: + default = None + dtype = self._getdtype(default) + + # find the best match in our mapper + try: + self._status, (_, func, default_def) = self._find_map_entry(dtype) + except LookupError: + # no match + self.default = default + _, func, _ = self._mapper[-1] + self._status = 0 + else: + # use the found default only if we did not already have one + if default is None: + self.default = default_def + else: + self.default = default + + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = {''} + else: + if isinstance(missing_values, str): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) + + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + + def _strict_call(self, value): + try: + + # We check if we can convert the value using the current function + new_value = self.func(value) + + # In addition to having to check whether func can convert the + # value, we also have to make sure that we don't get overflow + # errors for integers. + if self.func is int: + try: + np.array(value, dtype=self.type) + except OverflowError: + raise ValueError + + # We're still here so we can now return the new value + return new_value + + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError("Cannot convert string '%s'" % value) + + def __call__(self, value): + return self._callingfunction(value) + + def _do_upgrade(self): + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + self.type, self.func, default = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + return self._strict_call(value) + except ValueError: + self._do_upgrade() + return self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + self._do_upgrade() + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values='', locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is `''`. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or '1') + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() + else: + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, str) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', ' 9 in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + vstring : str + NumPy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + >>> # skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string + + """ + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (str, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, str): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr__(self): + return "NumpyVersion(%s)" % self.vstring diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_version.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_version.pyi new file mode 100644 index 00000000..1c82c99b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/_version.pyi @@ -0,0 +1,17 @@ +__all__: list[str] + +class NumpyVersion: + vstring: str + version: str + major: int + minor: int + bugfix: int + pre_release: str + is_devversion: bool + def __init__(self, vstring: str) -> None: ... + def __lt__(self, other: str | NumpyVersion) -> bool: ... + def __le__(self, other: str | NumpyVersion) -> bool: ... + def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion) -> bool: ... + def __ge__(self, other: str | NumpyVersion) -> bool: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraypad.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraypad.py new file mode 100644 index 00000000..b06a645d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraypad.py @@ -0,0 +1,882 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +import numpy as np +from numpy.core.overrides import array_function_dispatch +from numpy.lib.index_tricks import ndindex + + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _round_if_needed(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _slice_at_axis(sl, axis): + """ + Construct tuple of slices to slice an array in the given dimension. + + Parameters + ---------- + sl : slice + The slice for the given dimension. + axis : int + The axis to which `sl` is applied. All other dimensions are left + "unsliced". + + Returns + ------- + sl : tuple of slices + A tuple with slices matching `shape` in length. + + Examples + -------- + >>> _slice_at_axis(slice(None, 3, -1), 1) + (slice(None, None, None), slice(None, 3, -1), (...,)) + """ + return (slice(None),) * axis + (sl,) + (...,) + + +def _view_roi(array, original_area_slice, axis): + """ + Get a view of the current region of interest during iterative padding. + + When padding multiple dimensions iteratively corner values are + unnecessarily overwritten multiple times. This function reduces the + working area for the first dimensions so that corners are excluded. + + Parameters + ---------- + array : ndarray + The array with the region of interest. + original_area_slice : tuple of slices + Denotes the area with original values of the unpadded array. + axis : int + The currently padded dimension assuming that `axis` is padded before + `axis` + 1. + + Returns + ------- + roi : ndarray + The region of interest of the original `array`. + """ + axis += 1 + sl = (slice(None),) * axis + original_area_slice[axis:] + return array[sl] + + +def _pad_simple(array, pad_width, fill_value=None): + """ + Pad array on all sides with either a single value or undefined values. + + Parameters + ---------- + array : ndarray + Array to grow. + pad_width : sequence of tuple[int, int] + Pad width on both sides for each dimension in `arr`. + fill_value : scalar, optional + If provided the padded area is filled with this value, otherwise + the pad area left undefined. + + Returns + ------- + padded : ndarray + The padded array with the same dtype as`array`. Its order will default + to C-style if `array` is not F-contiguous. + original_area_slice : tuple + A tuple of slices pointing to the area of the original array. + """ + # Allocate grown array + new_shape = tuple( + left + size + right + for size, (left, right) in zip(array.shape, pad_width) + ) + order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + padded = np.empty(new_shape, dtype=array.dtype, order=order) + + if fill_value is not None: + padded.fill(fill_value) + + # Copy old array into correct space + original_area_slice = tuple( + slice(left, left + size) + for size, (left, right) in zip(array.shape, pad_width) + ) + padded[original_area_slice] = array + + return padded, original_area_slice + + +def _set_pad_area(padded, axis, width_pair, value_pair): + """ + Set empty-padded area in given dimension. + + Parameters + ---------- + padded : ndarray + Array with the pad area which is modified inplace. + axis : int + Dimension with the pad area to set. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + value_pair : tuple of scalars or ndarrays + Values inserted into the pad area on each side. It must match or be + broadcastable to the shape of `arr`. + """ + left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) + padded[left_slice] = value_pair[0] + + right_slice = _slice_at_axis( + slice(padded.shape[axis] - width_pair[1], None), axis) + padded[right_slice] = value_pair[1] + + +def _get_edges(padded, axis, width_pair): + """ + Retrieve edge values from empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the edges are considered. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + + Returns + ------- + left_edge, right_edge : ndarray + Edge values of the valid area in `padded` in the given dimension. Its + shape will always match `padded` except for the dimension given by + `axis` which will have a length of 1. + """ + left_index = width_pair[0] + left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) + left_edge = padded[left_slice] + + right_index = padded.shape[axis] - width_pair[1] + right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) + right_edge = padded[right_slice] + + return left_edge, right_edge + + +def _get_linear_ramps(padded, axis, width_pair, end_value_pair): + """ + Construct linear ramps for empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the ramps are constructed. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + end_value_pair : (scalar, scalar) + End values for the linear ramps which form the edge of the fully padded + array. These values are included in the linear ramps. + + Returns + ------- + left_ramp, right_ramp : ndarray + Linear ramps to set on both sides of `padded`. + """ + edge_pair = _get_edges(padded, axis, width_pair) + + left_ramp, right_ramp = ( + np.linspace( + start=end_value, + stop=edge.squeeze(axis), # Dimension is replaced by linspace + num=width, + endpoint=False, + dtype=padded.dtype, + axis=axis + ) + for end_value, edge, width in zip( + end_value_pair, edge_pair, width_pair + ) + ) + + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] + + return left_ramp, right_ramp + + +def _get_stats(padded, axis, width_pair, length_pair, stat_func): + """ + Calculate statistic for the empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the statistic is calculated. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + length_pair : 2-element sequence of None or int + Gives the number of values in valid area from each side that is + taken into account when calculating the statistic. If None the entire + valid area in `padded` is considered. + stat_func : function + Function to compute statistic. The expected signature is + ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. + + Returns + ------- + left_stat, right_stat : ndarray + Calculated statistic for both sides of `padded`. + """ + # Calculate indices of the edges of the area with original values + left_index = width_pair[0] + right_index = padded.shape[axis] - width_pair[1] + # as well as its length + max_length = right_index - left_index + + # Limit stat_lengths to max_length + left_length, right_length = length_pair + if left_length is None or max_length < left_length: + left_length = max_length + if right_length is None or max_length < right_length: + right_length = max_length + + if (left_length == 0 or right_length == 0) \ + and stat_func in {np.amax, np.amin}: + # amax and amin can't operate on an empty array, + # raise a more descriptive warning here instead of the default one + raise ValueError("stat_length of 0 yields no value for padding") + + # Calculate statistic for the left side + left_slice = _slice_at_axis( + slice(left_index, left_index + left_length), axis) + left_chunk = padded[left_slice] + left_stat = stat_func(left_chunk, axis=axis, keepdims=True) + _round_if_needed(left_stat, padded.dtype) + + if left_length == right_length == max_length: + # return early as right_stat must be identical to left_stat + return left_stat, left_stat + + # Calculate statistic for the right side + right_slice = _slice_at_axis( + slice(right_index - right_length, right_index), axis) + right_chunk = padded[right_slice] + right_stat = stat_func(right_chunk, axis=axis, keepdims=True) + _round_if_needed(right_stat, padded.dtype) + + return left_stat, right_stat + + +def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): + """ + Pad `axis` of `arr` with reflection. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + method : str + Controls method of reflection; options are 'even' or 'odd'. + include_edge : bool + If true, edge value is included in reflection, otherwise the edge + value forms the symmetric axis to the reflection. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + old_length = padded.shape[axis] - right_pad - left_pad + + if include_edge: + # Edge is included, we need to offset the pad amount by 1 + edge_offset = 1 + else: + edge_offset = 0 # Edge is not included, no need to offset pad amount + old_length -= 1 # but must be omitted from the chunk + + if left_pad > 0: + # Pad with reflected values on left side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, left_pad) + # Slice right to left, stop on or next to edge, start relative to stop + stop = left_pad - edge_offset + start = stop + chunk_length + left_slice = _slice_at_axis(slice(start, stop, -1), axis) + left_chunk = padded[left_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) + left_chunk = 2 * padded[edge_slice] - left_chunk + + # Insert chunk into padded area + start = left_pad - chunk_length + stop = left_pad + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = left_chunk + # Adjust pointer to left edge for next iteration + left_pad -= chunk_length + + if right_pad > 0: + # Pad with reflected values on right side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, right_pad) + # Slice right to left, start on or next to edge, stop relative to start + start = -right_pad + edge_offset - 2 + stop = start - chunk_length + right_slice = _slice_at_axis(slice(start, stop, -1), axis) + right_chunk = padded[right_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis( + slice(-right_pad - 1, -right_pad), axis) + right_chunk = 2 * padded[edge_slice] - right_chunk + + # Insert chunk into padded area + start = padded.shape[axis] - right_pad + stop = start + chunk_length + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = right_chunk + # Adjust pointer to right edge for next iteration + right_pad -= chunk_length + + return left_pad, right_pad + + +def _set_wrap_both(padded, axis, width_pair, original_period): + """ + Pad `axis` of `arr` with wrapped values. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + original_period : int + Original length of data on `axis` of `arr`. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + period = padded.shape[axis] - right_pad - left_pad + # Avoid wrapping with only a subset of the original area by ensuring period + # can only be a multiple of the original area's length. + period = period // original_period * original_period + + # If the current dimension of `arr` doesn't contain enough valid values + # (not part of the undefined pad area) we need to pad multiple times. + # Each time the pad area shrinks on both sides which is communicated with + # these variables. + new_left_pad = 0 + new_right_pad = 0 + + if left_pad > 0: + # Pad with wrapped values on left side + # First slice chunk from left side of the non-pad area. + # Use min(period, left_pad) to ensure that chunk is not larger than + # pad area. + slice_end = left_pad + period + slice_start = slice_end - min(period, left_pad) + right_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + right_chunk = padded[right_slice] + + if left_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) + new_left_pad = left_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(None, left_pad), axis) + padded[pad_area] = right_chunk + + if right_pad > 0: + # Pad with wrapped values on right side + # First slice chunk from right side of the non-pad area. + # Use min(period, right_pad) to ensure that chunk is not larger than + # pad area. + slice_start = -right_pad - period + slice_end = slice_start + min(period, right_pad) + left_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + left_chunk = padded[left_slice] + + if right_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis( + slice(-right_pad, -right_pad + period), axis) + new_right_pad = right_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(-right_pad, None), axis) + padded[pad_area] = left_chunk + + return new_left_pad, new_right_pad + + +def _as_pairs(x, ndim, as_index=False): + """ + Broadcast `x` to an array with the shape (`ndim`, 2). + + A helper function for `pad` that prepares and validates arguments like + `pad_width` for iteration in pairs. + + Parameters + ---------- + x : {None, scalar, array-like} + The object to broadcast to the shape (`ndim`, 2). + ndim : int + Number of pairs the broadcasted `x` will have. + as_index : bool, optional + If `x` is not None, try to round each element of `x` to an integer + (dtype `np.intp`) and ensure every element is positive. + + Returns + ------- + pairs : nested iterables, shape (`ndim`, 2) + The broadcasted version of `x`. + + Raises + ------ + ValueError + If `as_index` is True and `x` contains negative elements. + Or if `x` is not broadcastable to the shape (`ndim`, 2). + """ + if x is None: + # Pass through None as a special case, otherwise np.round(x) fails + # with an AttributeError + return ((None, None),) * ndim + + x = np.array(x) + if as_index: + x = np.round(x).astype(np.intp, copy=False) + + if x.ndim < 3: + # Optimization: Possibly use faster paths for cases where `x` has + # only 1 or 2 elements. `np.broadcast_to` could handle these as well + # but is currently slower + + if x.size == 1: + # x was supplied as a single value + x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 + if as_index and x < 0: + raise ValueError("index can't contain negative values") + return ((x[0], x[0]),) * ndim + + if x.size == 2 and x.shape != (2, 1): + # x was supplied with a single value for each side + # but except case when each dimension has a single value + # which should be broadcasted to a pair, + # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] + x = x.ravel() # Ensure x[0], x[1] works + if as_index and (x[0] < 0 or x[1] < 0): + raise ValueError("index can't contain negative values") + return ((x[0], x[1]),) * ndim + + if as_index and x.min() < 0: + raise ValueError("index can't contain negative values") + + # Converting the array with `tolist` seems to improve performance + # when iterating and indexing the result (see usage in `pad`) + return np.broadcast_to(x, (ndim, 2)).tolist() + + +def _pad_dispatcher(array, pad_width, mode=None, **kwargs): + return (array,) + + +############################################################################### +# Public functions + + +@array_function_dispatch(_pad_dispatcher, module='numpy') +def pad(array, pad_width, mode='constant', **kwargs): + """ + Pad an array. + + Parameters + ---------- + array : array_like of rank N + The array to pad. + pad_width : {sequence, array_like, int} + Number of values padded to the edges of each axis. + ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths + for each axis. + ``(before, after)`` or ``((before, after),)`` yields same before + and after pad for each axis. + ``(pad,)`` or ``int`` is a shortcut for before = after = pad width + for all axes. + mode : str or function, optional + One of the following string values or a user supplied function. + + 'constant' (default) + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + 'empty' + Pads with undefined values. + + .. versionadded:: 1.17 + + + Padding function, see Notes. + stat_length : sequence or int, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ``((before_1, after_1), ... (before_N, after_N))`` unique statistic + lengths for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after statistic lengths for each axis. + + ``(stat_length,)`` or ``int`` is a shortcut for + ``before = after = statistic`` length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : sequence or scalar, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after constants for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + end_values : sequence or scalar, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ``((before_1, after_1), ... (before_N, after_N))`` unique end values + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after end values for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + reflect_type : {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extended part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + .. versionadded:: 1.7.0 + + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should modify a rank 1 array in-place. It + has the following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : dict + Any keyword arguments the function requires. + + Examples + -------- + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) + array([4, 4, 1, ..., 6, 6, 6]) + + >>> np.pad(a, (2, 3), 'edge') + array([1, 1, 1, ..., 5, 5, 5]) + + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1, 2], [3, 4]] + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.pad(a, (2, 3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.pad(a, (2, 3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value + >>> a = np.arange(6) + >>> a = a.reshape((2, 3)) + >>> np.pad(a, 2, pad_with) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) + """ + array = np.asarray(array) + pad_width = np.asarray(pad_width) + + if not pad_width.dtype.kind == 'i': + raise TypeError('`pad_width` must be of integral type.') + + # Broadcast to shape (array.ndim, 2) + pad_width = _as_pairs(pad_width, array.ndim, as_index=True) + + if callable(mode): + # Old behavior: Use user-supplied function with np.apply_along_axis + function = mode + # Create a new zero padded array + padded, _ = _pad_simple(array, pad_width, fill_value=0) + # And apply along each axis + + for axis in range(padded.ndim): + # Iterate using ndindex as in apply_along_axis, but assuming that + # function operates inplace on the padded array. + + # view with the iteration axis at the end + view = np.moveaxis(padded, axis, -1) + + # compute indices for the iteration axes, and append a trailing + # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) + inds = ndindex(view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + for ind in inds: + function(view[ind], pad_width[axis], axis, kwargs) + + return padded + + # Make sure that no unsupported keywords were passed for the current mode + allowed_kwargs = { + 'empty': [], 'edge': [], 'wrap': [], + 'constant': ['constant_values'], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + } + try: + unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) + except KeyError: + raise ValueError("mode '{}' is not supported".format(mode)) from None + if unsupported_kwargs: + raise ValueError("unsupported keyword arguments for mode '{}': {}" + .format(mode, unsupported_kwargs)) + + stat_functions = {"maximum": np.amax, "minimum": np.amin, + "mean": np.mean, "median": np.median} + + # Create array with final shape and original values + # (padded area is undefined) + padded, original_area_slice = _pad_simple(array, pad_width) + # And prepare iteration over all dimensions + # (zipping may be more readable than using enumerate) + axes = range(padded.ndim) + + if mode == "constant": + values = kwargs.get("constant_values", 0) + values = _as_pairs(values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, values): + roi = _view_roi(padded, original_area_slice, axis) + _set_pad_area(roi, axis, width_pair, value_pair) + + elif mode == "empty": + pass # Do nothing as _pad_simple already returned the correct result + + elif array.size == 0: + # Only modes "constant" and "empty" can extend empty axes, all other + # modes depend on `array` not being empty + # -> ensure every empty axis is only "padded with 0" + for axis, width_pair in zip(axes, pad_width): + if array.shape[axis] == 0 and any(width_pair): + raise ValueError( + "can't extend empty axis {} using modes other than " + "'constant' or 'empty'".format(axis) + ) + # passed, don't need to do anything more as _pad_simple already + # returned the correct result + + elif mode == "edge": + for axis, width_pair in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + edge_pair = _get_edges(roi, axis, width_pair) + _set_pad_area(roi, axis, width_pair, edge_pair) + + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) + end_values = _as_pairs(end_values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, end_values): + roi = _view_roi(padded, original_area_slice, axis) + ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) + _set_pad_area(roi, axis, width_pair, ramp_pair) + + elif mode in stat_functions: + func = stat_functions[mode] + length = kwargs.get("stat_length", None) + length = _as_pairs(length, padded.ndim, as_index=True) + for axis, width_pair, length_pair in zip(axes, pad_width, length): + roi = _view_roi(padded, original_area_slice, axis) + stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) + _set_pad_area(roi, axis, width_pair, stat_pair) + + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = True if mode == "symmetric" else False + for axis, (left_index, right_index) in zip(axes, pad_width): + if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + edge_pair = _get_edges(padded, axis, (left_index, right_index)) + _set_pad_area( + padded, axis, (left_index, right_index), edge_pair) + continue + + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with reflected + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_reflect_both( + roi, axis, (left_index, right_index), + method, include_edge + ) + + elif mode == "wrap": + for axis, (left_index, right_index) in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + original_period = padded.shape[axis] - right_index - left_index + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with wrapped + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_wrap_both( + roi, axis, (left_index, right_index), original_period) + + return padded diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraypad.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraypad.pyi new file mode 100644 index 00000000..1ac6fc7d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraypad.pyi @@ -0,0 +1,85 @@ +from typing import ( + Literal as L, + Any, + overload, + TypeVar, + Protocol, +) + +from numpy import generic + +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeInt, + _ArrayLike, +) + +_SCT = TypeVar("_SCT", bound=generic) + +class _ModeFunc(Protocol): + def __call__( + self, + vector: NDArray[Any], + iaxis_pad_width: tuple[int, int], + iaxis: int, + kwargs: dict[str, Any], + /, + ) -> None: ... + +_ModeKind = L[ + "constant", + "edge", + "linear_ramp", + "maximum", + "mean", + "median", + "minimum", + "reflect", + "symmetric", + "wrap", + "empty", +] + +__all__: list[str] + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + +# Expand `**kwargs` into explicit keyword-only arguments +@overload +def pad( + array: _ArrayLike[_SCT], + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: None | _ArrayLikeInt = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[_SCT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: None | _ArrayLikeInt = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[Any]: ... +@overload +def pad( + array: _ArrayLike[_SCT], + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[_SCT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[Any]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraysetops.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraysetops.py new file mode 100644 index 00000000..300bbda2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraysetops.py @@ -0,0 +1,981 @@ +""" +Set operations for arrays based on sorting. + +Notes +----- + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +`numpy.sort`, that can provide directly the permutation vectors, thus avoiding +calls to `numpy.argsort`. + +Original author: Robert Cimrman + +""" +import functools + +import numpy as np +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique', + 'in1d', 'isin' + ] + + +def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): + return (ary, to_end, to_begin) + + +@array_function_dispatch(_ediff1d_dispatcher) +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, ..., -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + # force a 1d array + ary = np.asanyarray(ary).ravel() + + # enforce that the dtype of `ary` is used for the output + dtype_req = ary.dtype + + # fast track default case + if to_begin is None and to_end is None: + return ary[1:] - ary[:-1] + + if to_begin is None: + l_begin = 0 + else: + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_begin` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_begin = to_begin.ravel() + l_begin = len(to_begin) + + if to_end is None: + l_end = 0 + else: + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_end` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_end = to_end.ravel() + l_end = len(to_end) + + # do the calculation in place and copy to_begin and to_end + l_diff = max(len(ary) - 1, 0) + result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype) + result = ary.__array_wrap__(result) + if l_begin > 0: + result[:l_begin] = to_begin + if l_end > 0: + result[l_begin + l_diff:] = to_end + np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) + return result + + +def _unpack_tuple(x): + """ Unpacks one-element tuples for use as return values """ + if len(x) == 1: + return x[0] + else: + return x + + +def _unique_dispatcher(ar, return_index=None, return_inverse=None, + return_counts=None, axis=None, *, equal_nan=None): + return (ar,) + + +@array_function_dispatch(_unique_dispatcher) +def unique(ar, return_index=False, return_inverse=False, + return_counts=False, axis=None, *, equal_nan=True): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: + + * the indices of the input array that give the unique values + * the indices of the unique array that reconstruct the input array + * the number of times each unique value comes up in the input array + + Parameters + ---------- + ar : array_like + Input array. Unless `axis` is specified, this will be flattened if it + is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse : bool, optional + If True, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique item appears + in `ar`. + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + + .. versionadded:: 1.13.0 + + equal_nan : bool, optional + If True, collapses multiple NaN values in the return array into one. + + .. versionadded:: 1.24 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + .. versionadded:: 1.9.0 + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + repeat : Repeat elements of an array. + + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + + .. versionchanged: NumPy 1.21 + If nan values are in the input array, a single nan is put + to the end of the sorted unique values. + + Also for complex arrays all NaN values are considered equivalent + (no matter whether the NaN is in the real or imaginary part). + As the representant for the returned array the smallest one in the + lexicographical order is chosen - see np.sort for how the lexicographical + order is defined for complex arrays. + + Examples + -------- + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the unique rows of a 2D array + + >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) + >>> np.unique(a, axis=0) + array([[1, 0, 0], [2, 3, 4]]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], dtype='>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + Reconstruct the input values from the unique values and counts: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> values, counts = np.unique(a, return_counts=True) + >>> values + array([1, 2, 3, 4, 6]) + >>> counts + array([1, 3, 1, 1, 1]) + >>> np.repeat(values, counts) + array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved + + """ + ar = np.asanyarray(ar) + if axis is None: + ret = _unique1d(ar, return_index, return_inverse, return_counts, + equal_nan=equal_nan) + return _unpack_tuple(ret) + + # axis was specified and not None + try: + ar = np.moveaxis(ar, axis, 0) + except np.AxisError: + # this removes the "axis1" or "axis2" prefix from the error message + raise np.AxisError(axis, ar.ndim) from None + + # Must reshape to a contiguous 2D array for this to work... + orig_shape, orig_dtype = ar.shape, ar.dtype + ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp)) + ar = np.ascontiguousarray(ar) + dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])] + + # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured + # data type with `m` fields where each field has the data type of `ar`. + # In the following, we create the array `consolidated`, which has + # shape `(n,)` with data type `dtype`. + try: + if ar.shape[1] > 0: + consolidated = ar.view(dtype) + else: + # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is + # a data type with itemsize 0, and the call `ar.view(dtype)` will + # fail. Instead, we'll use `np.empty` to explicitly create the + # array with shape `(len(ar),)`. Because `dtype` in this case has + # itemsize 0, the total size of the result is still 0 bytes. + consolidated = np.empty(len(ar), dtype=dtype) + except TypeError as e: + # There's no good way to do this for object arrays, etc... + msg = 'The axis argument to unique is not supported for dtype {dt}' + raise TypeError(msg.format(dt=ar.dtype)) from e + + def reshape_uniq(uniq): + n = len(uniq) + uniq = uniq.view(orig_dtype) + uniq = uniq.reshape(n, *orig_shape[1:]) + uniq = np.moveaxis(uniq, 0, axis) + return uniq + + output = _unique1d(consolidated, return_index, + return_inverse, return_counts, equal_nan=equal_nan) + output = (reshape_uniq(output[0]),) + output[1:] + return _unpack_tuple(output) + + +def _unique1d(ar, return_index=False, return_inverse=False, + return_counts=False, *, equal_nan=True): + """ + Find the unique elements of an array, ignoring shape. + """ + ar = np.asanyarray(ar).flatten() + + optional_indices = return_index or return_inverse + + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + mask = np.empty(aux.shape, dtype=np.bool_) + mask[:1] = True + if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and + np.isnan(aux[-1])): + if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent + aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') + else: + aux_firstnan = np.searchsorted(aux, aux[-1], side='left') + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + mask[aux_firstnan] = True + mask[aux_firstnan + 1:] = False + else: + mask[1:] = aux[1:] != aux[:-1] + + ret = (aux[mask],) + if return_index: + ret += (perm[mask],) + if return_inverse: + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[perm] = imask + ret += (inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) + ret += (np.diff(idx),) + return ret + + +def _intersect1d_dispatcher( + ar1, ar2, assume_unique=None, return_indices=None): + return (ar1, ar2) + + +@array_function_dispatch(_intersect1d_dispatcher) +def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. Will be flattened if not already 1D. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. If True but ``ar1`` or ``ar2`` are not + unique, incorrect results and out-of-bounds indices could result. + Default is False. + return_indices : bool + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + .. versionadded:: 1.15.0 + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + comm1 : ndarray + The indices of the first occurrences of the common values in `ar1`. + Only provided if `return_indices` is True. + comm2 : ndarray + The indices of the first occurrences of the common values in `ar2`. + Only provided if `return_indices` is True. + + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) + + To return the indices of the values common to the input arrays + along with the intersected values: + + >>> x = np.array([1, 1, 2, 3, 4]) + >>> y = np.array([2, 1, 4, 6]) + >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) + >>> x_ind, y_ind + (array([0, 2, 4]), array([1, 0, 2])) + >>> xy, x[x_ind], y[y_ind] + (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) + + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + + if not assume_unique: + if return_indices: + ar1, ind1 = unique(ar1, return_index=True) + ar2, ind2 = unique(ar2, return_index=True) + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + if return_indices: + aux_sort_indices = np.argsort(aux, kind='mergesort') + aux = aux[aux_sort_indices] + else: + aux.sort() + + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + + if return_indices: + ar1_indices = aux_sort_indices[:-1][mask] + ar2_indices = aux_sort_indices[1:][mask] - ar1.size + if not assume_unique: + ar1_indices = ind1[ar1_indices] + ar2_indices = ind2[ar2_indices] + + return int1d, ar1_indices, ar2_indices + else: + return int1d + + +def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setxor1d_dispatcher) +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + + aux.sort() + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) + return aux[flag[1:] & flag[:-1]] + + +def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, + kind=None): + return (ar1, ar2) + + +@array_function_dispatch(_in1d_dispatcher) +def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + """ + Test whether each element of a 1-D array is also present in a second array. + + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. + + We recommend using :func:`isin` instead of `in1d` for new code. + + Parameters + ---------- + ar1 : (M,) array_like + Input array. + ar2 : array_like + The values against which to test each value of `ar1`. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted (that is, + False where an element of `ar1` is in `ar2` and True otherwise). + Default is False. ``np.in1d(a, b, invert=True)`` is equivalent + to (but is faster than) ``np.invert(in1d(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `ar1` and `ar2`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `ar1` plus the max-min value of `ar2`. `assume_unique` + has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `ar1` and `ar2`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + .. versionadded:: 1.8.0 + + Returns + ------- + in1d : (M,) ndarray, bool + The values `ar1[in1d]` are in `ar2`. + + See Also + -------- + isin : Version of this function that preserves the + shape of ar1. + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + `in1d` can be considered as an element-wise function version of the + python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly + equivalent to ``np.array([item in b for item in a])``. + However, this idea fails if `ar2` is a set, or similar (non-sequence) + container: As ``ar2`` is converted to an array, in those cases + ``asarray(ar2)`` is an object array rather than the expected array of + contained values. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + .. versionadded:: 1.4.0 + + Examples + -------- + >>> test = np.array([0, 1, 2, 5, 0]) + >>> states = [0, 2] + >>> mask = np.in1d(test, states) + >>> mask + array([ True, False, True, False, True]) + >>> test[mask] + array([0, 2, 0]) + >>> mask = np.in1d(test, states, invert=True) + >>> mask + array([False, True, False, True, False]) + >>> test[mask] + array([1, 5]) + """ + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # Ensure that iteration through object arrays yields size-1 arrays + if ar2.dtype == object: + ar2 = ar2.reshape(-1, 1) + + if kind not in {None, 'sort', 'table'}: + raise ValueError( + f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.") + + # Can use the table method if all arrays are integers or boolean: + is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2)) + use_table_method = is_int_arrays and kind in {None, 'table'} + + if use_table_method: + if ar2.size == 0: + if invert: + return np.ones_like(ar1, dtype=bool) + else: + return np.zeros_like(ar1, dtype=bool) + + # Convert booleans to uint8 so we can use the fast integer algorithm + if ar1.dtype == bool: + ar1 = ar1.astype(np.uint8) + if ar2.dtype == bool: + ar2 = ar2.astype(np.uint8) + + ar2_min = np.min(ar2) + ar2_max = np.max(ar2) + + ar2_range = int(ar2_max) - int(ar2_min) + + # Constraints on whether we can actually use the table method: + # 1. Assert memory usage is not too large + below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) + # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype + range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max + # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype + if ar1.size > 0: + ar1_min = np.min(ar1) + ar1_max = np.max(ar1) + + # After masking, the range of ar1 is guaranteed to be + # within the range of ar2: + ar1_upper = min(int(ar1_max), int(ar2_max)) + ar1_lower = max(int(ar1_min), int(ar2_min)) + + range_safe_from_overflow &= all(( + ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max, + ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min + )) + + # Optimal performance is for approximately + # log10(size) > (log10(range) - 2.27) / 0.927. + # However, here we set the requirement that by default + # the intermediate array can only be 6x + # the combined memory allocation of the original + # arrays. See discussion on + # https://github.com/numpy/numpy/pull/12065. + + if ( + range_safe_from_overflow and + (below_memory_constraint or kind == 'table') + ): + + if invert: + outgoing_array = np.ones_like(ar1, dtype=bool) + else: + outgoing_array = np.zeros_like(ar1, dtype=bool) + + # Make elements 1 where the integer exists in ar2 + if invert: + isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 0 + else: + isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 1 + + # Mask out elements we know won't work + basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) + outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - + ar2_min] + + return outgoing_array + elif kind == 'table': # not range_safe_from_overflow + raise RuntimeError( + "You have specified kind='table', " + "but the range of values in `ar2` or `ar1` exceed the " + "maximum integer of the datatype. " + "Please set `kind` to None or 'sort'." + ) + elif kind == 'table': + raise ValueError( + "The 'table' method is only " + "supported for boolean or integer arrays. " + "Please select 'sort' or None for kind." + ) + + + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: + if invert: + mask = np.ones(len(ar1), dtype=bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + ret = np.empty(ar.shape, dtype=bool) + ret[order] = flag + + if assume_unique: + return ret[:len(ar1)] + else: + return ret[rev_idx] + + +def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None, + *, kind=None): + return (element, test_elements) + + +@array_function_dispatch(_isin_dispatcher) +def isin(element, test_elements, assume_unique=False, invert=False, *, + kind=None): + """ + Calculates ``element in test_elements``, broadcasting over `element` only. + Returns a boolean array of the same shape as `element` that is True + where an element of `element` is in `test_elements` and False otherwise. + + Parameters + ---------- + element : array_like + Input array. + test_elements : array_like + The values against which to test each value of `element`. + This argument is flattened if it is an array or array_like. + See notes for behavior with non-array-like parameters. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted, as if + calculating `element not in test_elements`. Default is False. + ``np.isin(a, b, invert=True)`` is equivalent to (but faster + than) ``np.invert(np.isin(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `ar1` and `ar2`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `ar1` plus the max-min value of `ar2`. `assume_unique` + has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `ar1` and `ar2`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + + Returns + ------- + isin : ndarray, bool + Has the same shape as `element`. The values `element[isin]` + are in `test_elements`. + + See Also + -------- + in1d : Flattened version of this function. + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Notes + ----- + + `isin` is an element-wise function version of the python keyword `in`. + ``isin(a, b)`` is roughly equivalent to + ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. + + `element` and `test_elements` are converted to arrays if they are not + already. If `test_elements` is a set (or other non-sequence collection) + it will be converted to an object array with one element, rather than an + array of the values contained in `test_elements`. This is a consequence + of the `array` constructor's way of handling non-sequence collections. + Converting the set to a list usually gives the desired behavior. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + .. versionadded:: 1.13.0 + + Examples + -------- + >>> element = 2*np.arange(4).reshape((2, 2)) + >>> element + array([[0, 2], + [4, 6]]) + >>> test_elements = [1, 2, 4, 8] + >>> mask = np.isin(element, test_elements) + >>> mask + array([[False, True], + [ True, False]]) + >>> element[mask] + array([2, 4]) + + The indices of the matched values can be obtained with `nonzero`: + + >>> np.nonzero(mask) + (array([0, 1]), array([1, 0])) + + The test can also be inverted: + + >>> mask = np.isin(element, test_elements, invert=True) + >>> mask + array([[ True, False], + [False, True]]) + >>> element[mask] + array([0, 6]) + + Because of how `array` handles sets, the following does not + work as expected: + + >>> test_set = {1, 2, 4, 8} + >>> np.isin(element, test_set) + array([[False, False], + [False, False]]) + + Casting the set to a list gives the expected result: + + >>> np.isin(element, list(test_set)) + array([[False, True], + [ True, False]]) + """ + element = np.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert, kind=kind).reshape(element.shape) + + +def _union1d_dispatcher(ar1, ar2): + return (ar1, ar2) + + +@array_function_dispatch(_union1d_dispatcher) +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) + """ + return unique(np.concatenate((ar1, ar2), axis=None)) + + +def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setdiff1d_dispatcher) +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + 1D array of values in `ar1` that are not in `ar2`. The result + is sorted when `assume_unique=False`, but otherwise only sorted + if the input is sorted. + + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + + Examples + -------- + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if assume_unique: + ar1 = np.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi new file mode 100644 index 00000000..7075c334 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi @@ -0,0 +1,362 @@ +from typing import ( + Literal as L, + Any, + TypeVar, + overload, + SupportsIndex, +) + +from numpy import ( + generic, + number, + bool_, + ushort, + ubyte, + uintc, + uint, + ulonglong, + short, + int8, + byte, + intc, + int_, + intp, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + timedelta64, + datetime64, + object_, + str_, + bytes_, + void, +) + +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeDT64_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, + _ArrayLikeNumber_co, +) + +_SCT = TypeVar("_SCT", bound=generic) +_NumberType = TypeVar("_NumberType", bound=number[Any]) + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64`producing a +# `number[_64Bit]` array +_SCTNoCast = TypeVar( + "_SCTNoCast", + bool_, + ushort, + ubyte, + uintc, + uint, + ulonglong, + short, + byte, + intc, + int_, + longlong, + half, + single, + double, + longdouble, + csingle, + cdouble, + clongdouble, + timedelta64, + datetime64, + object_, + str_, + bytes_, + void, +) + +__all__: list[str] + +@overload +def ediff1d( + ary: _ArrayLikeBool_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[int8]: ... +@overload +def ediff1d( + ary: _ArrayLike[_NumberType], + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[_NumberType]: ... +@overload +def ediff1d( + ary: _ArrayLikeNumber_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[Any]: ... +@overload +def ediff1d( + ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[timedelta64]: ... +@overload +def ediff1d( + ary: _ArrayLikeObject_co, + to_end: None | ArrayLike = ..., + to_begin: None | ArrayLike = ..., +) -> NDArray[object_]: ... + +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> NDArray[Any]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[False] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[False] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[False] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: _ArrayLike[_SCT], + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ... +@overload +def unique( + ar: ArrayLike, + return_index: L[True] = ..., + return_inverse: L[True] = ..., + return_counts: L[True] = ..., + axis: None | SupportsIndex = ..., + *, + equal_nan: bool = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ... + +@overload +def intersect1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., + return_indices: L[False] = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + return_indices: L[False] = ..., +) -> NDArray[Any]: ... +@overload +def intersect1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., + return_indices: L[True] = ..., +) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... +@overload +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + return_indices: L[True] = ..., +) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ... + +@overload +def setxor1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def setxor1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., +) -> NDArray[Any]: ... + +def in1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., + invert: bool = ..., +) -> NDArray[bool_]: ... + +def isin( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = ..., + invert: bool = ..., + *, + kind: None | str = ..., +) -> NDArray[bool_]: ... + +@overload +def union1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], +) -> NDArray[_SCTNoCast]: ... +@overload +def union1d( + ar1: ArrayLike, + ar2: ArrayLike, +) -> NDArray[Any]: ... + +@overload +def setdiff1d( + ar1: _ArrayLike[_SCTNoCast], + ar2: _ArrayLike[_SCTNoCast], + assume_unique: bool = ..., +) -> NDArray[_SCTNoCast]: ... +@overload +def setdiff1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = ..., +) -> NDArray[Any]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arrayterator.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arrayterator.py new file mode 100644 index 00000000..b9ea21f8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arrayterator.py @@ -0,0 +1,219 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from operator import mul +from functools import reduce + +__all__ = ['Arrayterator'] + + +class Arrayterator: + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + ndenumerate : Multidimensional array iterator. + flatiter : Flat array iterator. + memmap : Create a memory-map to an array stored in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) + + """ + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = [dim for dim in var.shape] + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), self.ndim + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims-length+1)) + length = len(fixed) + elif isinstance(slice_, int): + fixed.append(slice(slice_, slice_+1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims-len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop-start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `Arrayterator` one by one. It is similar to `flatiter`. + + See Also + -------- + Arrayterator + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print(subarr, type(subarr)) + ... + 0 + + """ + for block in self: + yield from block.flat + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop-start-1)//step+1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + return + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = self.var.ndim + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims-1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i]+1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count*step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count//self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims-1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i-1] += self.step[i-1] + if start[0] >= self.stop[0]: + return diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi new file mode 100644 index 00000000..aa192fb7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi @@ -0,0 +1,49 @@ +from collections.abc import Generator +from typing import ( + Any, + TypeVar, + Union, + overload, +) + +from numpy import ndarray, dtype, generic +from numpy._typing import DTypeLike + +# TODO: Set a shape bound once we've got proper shape support +_Shape = TypeVar("_Shape", bound=Any) +_DType = TypeVar("_DType", bound=dtype[Any]) +_ScalarType = TypeVar("_ScalarType", bound=generic) + +_Index = Union[ + Union[ellipsis, int, slice], + tuple[Union[ellipsis, int, slice], ...], +] + +__all__: list[str] + +# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, +# but its ``__getattr__` method does wrap around the former and thus has +# access to all its methods + +class Arrayterator(ndarray[_Shape, _DType]): + var: ndarray[_Shape, _DType] # type: ignore[assignment] + buf_size: None | int + start: list[int] + stop: list[int] + step: list[int] + + @property # type: ignore[misc] + def shape(self) -> tuple[int, ...]: ... + @property + def flat( # type: ignore[override] + self: ndarray[Any, dtype[_ScalarType]] + ) -> Generator[_ScalarType, None, None]: ... + def __init__( + self, var: ndarray[_Shape, _DType], buf_size: None | int = ... + ) -> None: ... + @overload + def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ... + @overload + def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ... + def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ... + def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/format.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/format.py new file mode 100644 index 00000000..d5b3fbac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/format.py @@ -0,0 +1,976 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that they have been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmap`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Format Version 3.0 +------------------ + +This version replaces the ASCII string (which in practice was latin1) with +a utf8-encoded string, so supports structured types with any unicode field +names. + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the +:doc:`"npy-format" NEP `, however details have +evolved with time and this document is more current. + +""" +import numpy +import warnings +from numpy.lib.utils import safe_eval, drop_metadata +from numpy.compat import ( + isfileobj, os_fspath, pickle + ) + + +__all__ = [] + + +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes +# allow growth within the address space of a 64 bit machine along one axis +GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays +_header_size_info = { + (1, 0): (' 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + return MAGIC_PREFIX + bytes([major, minor]) + +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + major, minor = magic_str[-2:] + return major, minor + + +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + # NOTE: that drop_metadata may not return the right dtype e.g. for user + # dtypes. In that case our code below would fail the same, though. + new_dtype = drop_metadata(dtype) + if new_dtype is not dtype: + warnings.warn("metadata on a dtype is not saved to an npy/npz. " + "Use another format (such as pickle) to store it.", + UserWarning, stacklevel=2) + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + else: + return dtype.str + +def descr_to_dtype(descr): + """ + Returns a dtype based off the given description. + + This is essentially the reverse of `dtype_to_descr()`. It will remove + the valueless padding fields created by, i.e. simple fields like + dtype('float32'), and then convert the description to its corresponding + dtype. + + Parameters + ---------- + descr : object + The object retrieved by dtype.descr. Can be passed to + `numpy.dtype()` in order to replicate the input dtype. + + Returns + ------- + dtype : dtype + The dtype constructed by the description. + + """ + if isinstance(descr, str): + # No padding removal needed + return numpy.dtype(descr) + elif isinstance(descr, tuple): + # subtype, will always have a shape descr[1] + dt = descr_to_dtype(descr[0]) + return numpy.dtype((dt, descr[1])) + + titles = [] + names = [] + formats = [] + offsets = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + title, name = name if isinstance(name, tuple) else (None, name) + titles.append(title) + names.append(name) + formats.append(dt) + offsets.append(offset) + offset += dt.itemsize + + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + + +def _wrap_header(header, version): + """ + Takes a stringified header, and attaches the prefix and padding to it + """ + import struct + assert version is not None + fmt, encoding = _header_size_info[version] + header = header.encode(encoding) + hlen = len(header) + 1 + padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) + try: + header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) + except struct.error: + msg = "Header length {} too big for version={}".format(hlen, version) + raise ValueError(msg) from None + + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes + # aligned up to ARRAY_ALIGN on systems like Linux where mmap() + # offset must be page-aligned (i.e. the beginning of the file). + return header_prefix + header + b' '*padlen + b'\n' + + +def _wrap_header_guess_version(header): + """ + Like `_wrap_header`, but chooses an appropriate version given the contents + """ + try: + return _wrap_header(header, (1, 0)) + except ValueError: + pass + + try: + ret = _wrap_header(header, (2, 0)) + except UnicodeEncodeError: + pass + else: + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + return ret + + header = _wrap_header(header, (3, 0)) + warnings.warn("Stored array in format 3.0. It can only be " + "read by NumPy >= 1.17", UserWarning, stacklevel=2) + return header + + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version : tuple or None + None means use oldest that works. Providing an explicit version will + raise a ValueError if the format does not allow saving this data. + Default: None + """ + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append("'%s': %s, " % (key, repr(value))) + header.append("}") + header = "".join(header) + + # Add some spare space so that the array header can be modified in-place + # when changing the array size, e.g. when growing it by appending data at + # the end. + shape = d['shape'] + header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( + shape[-1 if d['fortran_order'] else 0] + ))) if len(shape) > 0 else 0) + + if version is None: + header = _wrap_header_guess_version(header) + else: + header = _wrap_header(header, version) + fp.write(header) + +def write_array_header_1_0(fp, d): + """ Write the header for an array using the 1.0 format. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (1, 0)) + + +def write_array_header_2_0(fp, d): + """ Write the header for an array using the 2.0 format. + The 2.0 format allows storing very large structured arrays. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (2, 0)) + +def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 1.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(1, 0), max_header_size=max_header_size) + +def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 2.0 file format + version. + + This will leave the file object located just after the header. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(2, 0), max_header_size=max_header_size) + + +def _filter_header(s): + """Clean up 'L' in npz header ints. + + Cleans up the 'L' in strings representing integers. Needed to allow npz + headers produced in Python2 to be read in Python3. + + Parameters + ---------- + s : string + Npy file header. + + Returns + ------- + header : str + Cleaned up header. + + """ + import tokenize + from io import StringIO + + tokens = [] + last_token_was_number = False + for token in tokenize.generate_tokens(StringIO(s).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + return tokenize.untokenize(tokens) + + +def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import struct + hinfo = _header_size_info.get(version) + if hinfo is None: + raise ValueError("Invalid version {!r}".format(version)) + hlength_type, encoding = hinfo + + hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") + header_length = struct.unpack(hlength_type, hlength_str)[0] + header = _read_bytes(fp, header_length, "array header") + header = header.decode(encoding) + if len(header) > max_header_size: + raise ValueError( + f"Header info length ({len(header)}) is large and may not be safe " + "to load securely.\n" + "To allow loading, adjust `max_header_size` or fully trust " + "the `.npy` file using `allow_pickle=True`.\n" + "For safety against large resource use or crashes, sandboxing " + "may be necessary.") + + # The header is a pretty-printed string representation of a literal + # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # boundary. The keys are strings. + # "shape" : tuple of int + # "fortran_order" : bool + # "descr" : dtype.descr + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + # + # For performance reasons, we try without _filter_header first though + try: + d = safe_eval(header) + except SyntaxError as e: + if version <= (2, 0): + header = _filter_header(header) + try: + d = safe_eval(header) + except SyntaxError as e2: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e2 + else: + warnings.warn( + "Reading `.npy` or `.npz` file required additional " + "header parsing as it was created on Python 2. Save the " + "file again to speed up loading and avoid this warning.", + UserWarning, stacklevel=4) + else: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e + if not isinstance(d, dict): + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(keys)) + + # Sanity-check the values. + if (not isinstance(d['shape'], tuple) or + not all(isinstance(x, int) for x in d['shape'])): + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) + if not isinstance(d['fortran_order'], bool): + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) + try: + dtype = descr_to_dtype(d['descr']) + except TypeError as e: + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) from e + + return d['shape'], d['fortran_order'], dtype + +def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): + """ + Write an array to an NPY file, including a header. + + If the array is neither C-contiguous nor Fortran-contiguous AND the + file_like object is not a real file object, this function will have to + copy data in memory. + + Parameters + ---------- + fp : file_like object + An open, writable file object, or similar object with a + ``.write()`` method. + array : ndarray + The array to write to disk. + version : (int, int) or None, optional + The version number of the format. None means use the oldest + supported version that is able to store the data. Default: None + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass to pickle.dump, excluding + 'protocol'. These are only useful when pickling objects in object + arrays on Python 3 to Python 2 compatible format. + + Raises + ------ + ValueError + If the array cannot be persisted. This includes the case of + allow_pickle=False and array being an object array. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise various errors if the objects + are not picklable. + + """ + _check_version(version) + _write_array_header(fp, header_data_from_array_1_0(array), version) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + if array.dtype.hasobject: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out + if not allow_pickle: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=3, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + else: + if isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2 when using + Python 3. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + if allow_pickle: + # Effectively ignore max_header_size, since `allow_pickle` indicates + # that the input is fully trusted. + max_header_size = 2**64 + + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) from err + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if fortran_order: + array.shape = shape[::-1] + array = array.transpose() + else: + array.shape = shape + + return array + + +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + OSError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = dict( + descr=dtype_to_descr(dtype), + fortran_order=fortran_order, + shape=shape, + ) + # If we got here, then it should be safe to create the file. + with open(os_fspath(filename), mode+'b') as fp: + _write_array_header(fp, d, version) + offset = fp.tell() + else: + # Read the header of the file first. + with open(os_fspath(filename), 'rb') as fp: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = bytes() + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/format.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/format.pyi new file mode 100644 index 00000000..a4468f52 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/format.pyi @@ -0,0 +1,22 @@ +from typing import Any, Literal, Final + +__all__: list[str] + +EXPECTED_KEYS: Final[set[str]] +MAGIC_PREFIX: Final[bytes] +MAGIC_LEN: Literal[8] +ARRAY_ALIGN: Literal[64] +BUFFER_SIZE: Literal[262144] # 2**18 + +def magic(major, minor): ... +def read_magic(fp): ... +def dtype_to_descr(dtype): ... +def descr_to_dtype(descr): ... +def header_data_from_array_1_0(array): ... +def write_array_header_1_0(fp, d): ... +def write_array_header_2_0(fp, d): ... +def read_array_header_1_0(fp): ... +def read_array_header_2_0(fp): ... +def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... +def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... +def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/function_base.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/function_base.py new file mode 100644 index 00000000..e75aca1e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/function_base.py @@ -0,0 +1,5732 @@ +import collections.abc +import functools +import re +import sys +import warnings + +from .._utils import set_module +import numpy as np +import numpy.core.numeric as _nx +from numpy.core import transpose +from numpy.core.numeric import ( + ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, + ndarray, take, dot, where, intp, integer, isscalar, absolute + ) +from numpy.core.umath import ( + pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, + mod, exp, not_equal, subtract + ) +from numpy.core.fromnumeric import ( + ravel, nonzero, partition, mean, any, sum + ) +from numpy.core.numerictypes import typecodes +from numpy.core import overrides +from numpy.core.function_base import add_newdoc +from numpy.lib.twodim_base import diag +from numpy.core.multiarray import ( + _place, add_docstring, bincount, normalize_axis_index, _monotonicity, + interp as compiled_interp, interp_complex as compiled_interp_complex + ) +from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc + +import builtins + +# needed in this module for compatibility +from numpy.lib.histograms import histogram, histogramdd # noqa: F401 + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', + 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'bincount', 'digitize', 'cov', 'corrcoef', + 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', + 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', + 'quantile' + ] + +# _QuantileMethods is a dictionary listing all the supported methods to +# compute quantile/percentile. +# +# Below virtual_index refer to the index of the element where the percentile +# would be found in the sorted sample. +# When the sample contains exactly the percentile wanted, the virtual_index is +# an integer to the index of this element. +# When the percentile wanted is in between two elements, the virtual_index +# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# (a.k.a 'g' or 'gamma') +# +# Each method in _QuantileMethods has two properties +# get_virtual_index : Callable +# The function used to compute the virtual_index. +# fix_gamma : Callable +# A function used for discret methods to force the index to a specific value. +_QuantileMethods = dict( + # --- HYNDMAN and FAN METHODS + # Discrete methods + inverted_cdf=dict( + get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles), + fix_gamma=lambda gamma, _: gamma, # should never be called + ), + averaged_inverted_cdf=dict( + get_virtual_index=lambda n, quantiles: (n * quantiles) - 1, + fix_gamma=lambda gamma, _: _get_gamma_mask( + shape=gamma.shape, + default_value=1., + conditioned_value=0.5, + where=gamma == 0), + ), + closest_observation=dict( + get_virtual_index=lambda n, quantiles: _closest_observation(n, + quantiles), + fix_gamma=lambda gamma, _: gamma, # should never be called + ), + # Continuous methods + interpolated_inverted_cdf=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 1), + fix_gamma=lambda gamma, _: gamma, + ), + hazen=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0.5, 0.5), + fix_gamma=lambda gamma, _: gamma, + ), + weibull=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 0), + fix_gamma=lambda gamma, _: gamma, + ), + # Default method. + # To avoid some rounding issues, `(n-1) * quantiles` is preferred to + # `_compute_virtual_index(n, quantiles, 1, 1)`. + # They are mathematically equivalent. + linear=dict( + get_virtual_index=lambda n, quantiles: (n - 1) * quantiles, + fix_gamma=lambda gamma, _: gamma, + ), + median_unbiased=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), + fix_gamma=lambda gamma, _: gamma, + ), + normal_unbiased=dict( + get_virtual_index=lambda n, quantiles: + _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), + fix_gamma=lambda gamma, _: gamma, + ), + # --- OTHER METHODS + lower=dict( + get_virtual_index=lambda n, quantiles: np.floor( + (n - 1) * quantiles).astype(np.intp), + fix_gamma=lambda gamma, _: gamma, + # should never be called, index dtype is int + ), + higher=dict( + get_virtual_index=lambda n, quantiles: np.ceil( + (n - 1) * quantiles).astype(np.intp), + fix_gamma=lambda gamma, _: gamma, + # should never be called, index dtype is int + ), + midpoint=dict( + get_virtual_index=lambda n, quantiles: 0.5 * ( + np.floor((n - 1) * quantiles) + + np.ceil((n - 1) * quantiles)), + fix_gamma=lambda gamma, index: _get_gamma_mask( + shape=gamma.shape, + default_value=0.5, + conditioned_value=0., + where=index % 1 == 0), + ), + nearest=dict( + get_virtual_index=lambda n, quantiles: np.around( + (n - 1) * quantiles).astype(np.intp), + fix_gamma=lambda gamma, _: gamma, + # should never be called, index dtype is int + )) + + +def _rot90_dispatcher(m, k=None, axes=None): + return (m,) + + +@array_function_dispatch(_rot90_dispatcher) +def rot90(m, k=1, axes=(0, 1)): + """ + Rotate an array by 90 degrees in the plane specified by axes. + + Rotation direction is from the first towards the second axis. + This means for a 2D array with the default `k` and `axes`, the + rotation will be counterclockwise. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes : (2,) array_like + The array is rotated in the plane defined by the axes. + Axes must be different. + + .. versionadded:: 1.12.0 + + Returns + ------- + y : ndarray + A rotated view of `m`. + + See Also + -------- + flip : Reverse the order of elements in an array along the given axis. + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Notes + ----- + ``rot90(m, k=1, axes=(1,0))`` is the reverse of + ``rot90(m, k=1, axes=(0,1))`` + + ``rot90(m, k=1, axes=(1,0))`` is equivalent to + ``rot90(m, k=-1, axes=(0,1))`` + + Examples + -------- + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + >>> m = np.arange(8).reshape((2,2,2)) + >>> np.rot90(m, 1, (1,2)) + array([[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]]) + + """ + axes = tuple(axes) + if len(axes) != 2: + raise ValueError("len(axes) must be 2.") + + m = asanyarray(m) + + if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: + raise ValueError("Axes must be different.") + + if (axes[0] >= m.ndim or axes[0] < -m.ndim + or axes[1] >= m.ndim or axes[1] < -m.ndim): + raise ValueError("Axes={} out of range for array of ndim={}." + .format(axes, m.ndim)) + + k %= 4 + + if k == 0: + return m[:] + if k == 2: + return flip(flip(m, axes[0]), axes[1]) + + axes_list = arange(0, m.ndim) + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) + + if k == 1: + return transpose(flip(m, axes[1]), axes_list) + else: + # k == 3 + return flip(transpose(m, axes_list), axes[1]) + + +def _flip_dispatcher(m, axis=None): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def flip(m, axis=None): + """ + Reverse the order of elements in an array along the given axis. + + The shape of the array is preserved, but the elements are reordered. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + m : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + + .. versionchanged:: 1.15.0 + None and tuples of axes are supported + + Returns + ------- + out : array_like + A view of `m` with the entries of axis reversed. Since a view is + returned, this operation is done in constant time. + + See Also + -------- + flipud : Flip an array vertically (axis=0). + fliplr : Flip an array horizontally (axis=1). + + Notes + ----- + flip(m, 0) is equivalent to flipud(m). + + flip(m, 1) is equivalent to fliplr(m). + + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + + Examples + -------- + >>> A = np.arange(8).reshape((2,2,2)) + >>> A + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.flip(A, 0) + array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + >>> np.flip(A, 1) + array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + >>> A = np.random.randn(3,4,5) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) + True + """ + if not hasattr(m, 'ndim'): + m = asarray(m) + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] + + +@set_module('numpy') +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. + + + Examples + -------- + >>> np.iterable([1, 2, 3]) + True + >>> np.iterable(2) + False + + Notes + ----- + In most cases, the results of ``np.iterable(obj)`` are consistent with + ``isinstance(obj, collections.abc.Iterable)``. One notable exception is + the treatment of 0-dimensional arrays:: + + >>> from collections.abc import Iterable + >>> a = np.array(1.0) # 0-dimensional numpy array + >>> isinstance(a, Iterable) + True + >>> np.iterable(a) + False + + """ + try: + iter(y) + except TypeError: + return False + return True + + +def _average_dispatcher(a, axis=None, weights=None, returned=None, *, + keepdims=None): + return (a, weights) + + +@array_function_dispatch(_average_dispatcher) +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + axis=None, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + + .. versionadded:: 1.7.0 + + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a genereal pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When the length of 1D `weights` is not the same as the shape of `a` + along axis. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. + + Examples + -------- + >>> data = np.arange(1, 5) + >>> data + array([1, 2, 3, 4]) + >>> np.average(data) + 2.5 + >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) + 4.0 + + >>> data = np.arange(6).reshape((3, 2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + >>> a = np.ones(5, dtype=np.float128) + >>> w = np.ones(5, dtype=np.complex64) + >>> avg = np.average(a, weights=w) + >>> print(avg.dtype) + complex256 + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.average(data, axis=1, keepdims=True) + array([[0.5], + [2.5], + [4.5]]) + """ + a = np.asanyarray(a) + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + avg_as_array = np.asanyarray(avg) + scl = avg_as_array.dtype.type(a.size/avg_as_array.size) + else: + wgt = np.asanyarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool_)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ.") + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis.") + + # setup wgt to broadcast along axis + wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) + wgt = wgt.swapaxes(-1, axis) + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + if np.any(scl == 0.0): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = avg_as_array = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg_as_array.shape: + scl = np.broadcast_to(scl, avg_as_array.shape).copy() + return avg, scl + else: + return avg + + +@set_module('numpy') +def asarray_chkfinite(a, dtype=None, order=None): + """Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfarray : Convert input to a floating point ndarray. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array. If all elements are finite + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print('ValueError') + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): + yield x + # support the undocumented behavior of allowing scalars + if np.iterable(condlist): + yield from condlist + + +@array_function_dispatch(_piecewise_dispatcher) +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray or scalar + The input domain. + condlist : list of bool arrays or bool scalars + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., alpha=1)``, then each function is called as + ``f(x, alpha=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + Apply the same function to a scalar value. + + >>> y = -2 + >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) + array(2) + + """ + x = asanyarray(x) + n2 = len(funclist) + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + + condlist = asarray(condlist, dtype=bool) + n = len(condlist) + + if n == n2 - 1: # compute the "otherwise" condition. + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) + n += 1 + elif n != n2: + raise ValueError( + "with {} condition(s), either {} or {} functions are expected" + .format(n, n, n+1) + ) + + y = zeros_like(x) + for cond, func in zip(condlist, funclist): + if not isinstance(func, collections.abc.Callable): + y[cond] = func + else: + vals = x[cond] + if vals.size > 0: + y[cond] = func(vals, *args, **kw) + + return y + + +def _select_dispatcher(condlist, choicelist, default=None): + yield from condlist + yield from choicelist + + +@array_function_dispatch(_select_dispatcher) +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + >>> x = np.arange(6) + >>> condlist = [x<3, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 42) + array([ 0, 1, 2, 42, 16, 25]) + + >>> condlist = [x<=4, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 55) + array([ 0, 1, 2, 3, 4, 25]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + raise ValueError("select with an empty condition list is not possible") + + choicelist = [np.asarray(choice) for choice in choicelist] + + try: + intermediate_dtype = np.result_type(*choicelist) + except TypeError as e: + msg = f'Choicelist elements do not have a common dtype: {e}' + raise TypeError(msg) from None + default_array = np.asarray(default) + choicelist.append(default_array) + + # need to get the result type before broadcasting for correct scalar + # behaviour + try: + dtype = np.result_type(intermediate_dtype, default_array) + except TypeError as e: + msg = f'Choicelists and default value do not have a common dtype: {e}' + raise TypeError(msg) from None + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it separately optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + for i, cond in enumerate(condlist): + if cond.dtype.type is not np.bool_: + raise TypeError( + 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def _copy_dispatcher(a, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_copy_dispatcher) +def copy(a, order='K', subok=False): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:`ndarray.copy` are very + similar, but have different default values for their order= + arguments.) + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise the + returned array will be forced to be a base-class array (defaults to False). + + .. versionadded:: 1.19.0 + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + See Also + -------- + ndarray.copy : Preferred method for creating an array copy + + Notes + ----- + This is equivalent to: + + >>> np.array(a, copy=True) #doctest: +SKIP + + Examples + -------- + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + Note that, np.copy clears previously set WRITEABLE=False flag. + + >>> a = np.array([1, 2, 3]) + >>> a.flags["WRITEABLE"] = False + >>> b = np.copy(a) + >>> b.flags["WRITEABLE"] + True + >>> b[0] = 3 + >>> b + array([3, 2, 3]) + + Note that np.copy is a shallow copy and will not copy object + elements within arrays. This is mainly important for arrays + containing Python objects. The new array will contain the + same object which may lead to surprises if that object can + be modified (is mutable): + + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> b = np.copy(a) + >>> b[2][0] = 10 + >>> a + array([1, 'm', list([10, 3, 4])], dtype=object) + + To ensure all elements within an ``object`` array are copied, + use `copy.deepcopy`: + + >>> import copy + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> c = copy.deepcopy(a) + >>> c[2][0] = 10 + >>> c + array([1, 'm', list([10, 3, 4])], dtype=object) + >>> a + array([1, 'm', list([2, 3, 4])], dtype=object) + + """ + return array(a, order=order, subok=subok, copy=True) + +# Basic operations + + +def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): + yield f + yield from varargs + + +@array_function_dispatch(_gradient_dispatcher) +def gradient(f, *varargs, axis=None, edge_order=1): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes. + Default: 1. + + edge_order : {1, 2}, optional + Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + + .. versionadded:: 1.9.1 + + axis : None or int or tuple of ints, optional + Gradient is calculated only along the given axis or axes + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. + + .. versionadded:: 1.11.0 + + Returns + ------- + gradient : ndarray or list of ndarray + A list of ndarrays (or a single ndarray if there is only one dimension) + corresponding to the derivatives of f with respect to each dimension. + Each derivative has the same shape as f. + + Examples + -------- + >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) + >>> np.gradient(f) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(f, 2) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) + >>> np.gradient(f, x) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) + + For two dimensional arrays, the return will be two arrays ordered by + axis. In this example the first array stands for the gradient in + rows and the second one in columns direction: + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) + [array([[ 2., 2., -1.], + [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])] + + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) + [array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])] + + It is possible to specify how boundaries are treated using `edge_order` + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) + array([0., 2., 4., 6., 8.]) + + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) + array([[ 2., 2., -1.], + [ 2., 2., -1.]]) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \\mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. + """ + f = np.asanyarray(f) + N = f.ndim # number of dimensions + + if axis is None: + axes = tuple(range(N)) + else: + axes = _nx.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = np.asanyarray(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError("when 1d, distances must match " + "the length of the corresponding dimension") + if np.issubdtype(distances.dtype, np.integer): + # Convert numpy integer types to float64 to avoid modular + # arithmetic in np.diff(distances). + distances = distances.astype(np.float64) + diffx = np.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)]*N + slice2 = [slice(None)]*N + slice3 = [slice(None)]*N + slice4 = [slice(None)]*N + + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass + else: + # All other types convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + if np.issubdtype(otype, np.integer): + f = f.astype(np.float64) + otype = np.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(f, dtype=otype) + + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2)/(dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + a.shape = b.shape = c.shape = shape + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + elif np._using_numpy2_behavior(): + return tuple(outvals) + else: + return outvals + + +def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): + return (a, prepend, append) + + +@array_function_dispatch(_diff_dispatcher) +def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + .. versionadded:: 1.16.0 + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + gradient, ediff1d, cumsum + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + 255 + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + + Examples + -------- + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + + a = asanyarray(a) + nd = a.ndim + if nd == 0: + raise ValueError("diff requires input that is at least one dimensional") + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not np._NoValue: + prepend = np.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.concatenate(combined, axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = not_equal if a.dtype == np.bool_ else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a + + +def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): + return (x, xp, fp) + + +@array_function_dispatch(_interp_dispatcher) +def interp(x, xp, fp, left=None, right=None, period=None): + """ + One-dimensional linear interpolation for monotonically increasing sample points. + + Returns the one-dimensional piecewise linear interpolant to a function + with given discrete data points (`xp`, `fp`), evaluated at `x`. + + Parameters + ---------- + x : array_like + The x-coordinates at which to evaluate the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. + + fp : 1-D sequence of float or complex + The y-coordinates of the data points, same length as `xp`. + + left : optional float or complex corresponding to fp + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : optional float or complex corresponding to fp + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + .. versionadded:: 1.10.0 + + Returns + ------- + y : float or complex (corresponding to fp) or ndarray + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` + + See Also + -------- + scipy.interpolate + + Warnings + -------- + The x-coordinate sequence is expected to be increasing, but this is not + explicitly enforced. However, if the sequence `xp` is non-increasing, + interpolation results are meaningless. + + Note that, since NaN is unsortable, `xp` also cannot contain NaNs. + + A simple check for `xp` being strictly increasing is:: + + np.all(np.diff(xp) > 0) + + Examples + -------- + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) + + Complex interpolation: + + >>> x = [1.5, 4.0] + >>> xp = [2,3,5] + >>> fp = [1.0j, 0, 2+3j] + >>> np.interp(x, xp, fp) + array([0.+1.j , 1.+1.5j]) + + """ + + fp = np.asarray(fp) + + if np.iscomplexobj(fp): + interp_func = compiled_interp_complex + input_dtype = np.complex128 + else: + interp_func = compiled_interp + input_dtype = np.float64 + + if period is not None: + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=input_dtype) + + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + + return interp_func(x, xp, fp, left, right) + + +def _angle_dispatcher(z, deg=None): + return (z,) + + +@array_function_dispatch(_angle_dispatcher) +def angle(z, deg=False): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : ndarray or scalar + The counterclockwise angle from the positive real axis on the complex + plane in the range ``(-pi, pi]``, with dtype as numpy.float64. + + .. versionchanged:: 1.16.0 + This function works on subclasses of ndarray like `ma.array`. + + See Also + -------- + arctan2 + absolute + + Notes + ----- + Although the angle of the complex number 0 is undefined, ``numpy.angle(0)`` + returns the value 0. + + Examples + -------- + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) # may vary + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + + """ + z = asanyarray(z) + if issubclass(z.dtype.type, _nx.complexfloating): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + + a = arctan2(zimag, zreal) + if deg: + a *= 180/pi + return a + + +def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): + return (p,) + + +@array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, discont=None, axis=-1, *, period=2*pi): + r""" + Unwrap by taking the complement of large deltas with respect to the period. + + This unwraps a signal `p` by changing elements which have an absolute + difference from their predecessor of more than ``max(discont, period/2)`` + to their `period`-complementary values. + + For the default case where `period` is :math:`2\pi` and `discont` is + :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences + are never greater than :math:`\pi` by adding :math:`2k\pi` for some + integer :math:`k`. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. + To have an effect different from the default, `discont` should be + larger than ``period/2``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + period : float, optional + Size of the range over which the input wraps. By default, it is + ``2 pi``. + + .. versionadded:: 1.21.0 + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``period/2``, + but larger than `discont`, no unwrapping is done because taking + the complement would only make the discontinuity larger. + + Examples + -------- + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary + >>> np.unwrap([0, 1, 2, -1, 0], period=4) + array([0, 1, 2, 3, 4]) + >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) + array([2, 3, 4, 5, 6, 7, 8, 9]) + >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 + >>> np.unwrap(phase_deg, period=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + """ + p = asarray(p) + nd = p.ndim + dd = diff(p, axis=axis) + if discont is None: + discont = period/2 + slice1 = [slice(None, None)]*nd # full slices + slice1[axis] = slice(1, None) + slice1 = tuple(slice1) + dtype = np.result_type(dd, period) + if _nx.issubdtype(dtype, _nx.integer): + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + ddmod = mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + # for `mask = (abs(dd) == period/2)`, the above line made + # `ddmod[mask] == -period/2`. correct these such that + # `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, + where=(ddmod == interval_low) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype=dtype) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def _sort_complex(a): + return (a,) + + +@array_function_dispatch(_sort_complex) +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def _trim_zeros(filt, trim=None): + return (filt,) + + +@array_function_dispatch(_trim_zeros) +def trim_zeros(filt, trim='fb'): + """ + Trim the leading and/or trailing zeros from a 1-D array or sequence. + + Parameters + ---------- + filt : 1-D array or sequence + Input array. + trim : str, optional + A string with 'f' representing trim from front and 'b' to trim from + back. Default is 'fb', trim zeros from both front and back of the + array. + + Returns + ------- + trimmed : 1-D array or sequence + The result of trimming the input. The input data type is preserved. + + Examples + -------- + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, 'b') + array([0, 0, 0, ..., 0, 2, 1]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + + first = 0 + trim = trim.upper() + if 'F' in trim: + for i in filt: + if i != 0.: + break + else: + first = first + 1 + last = len(filt) + if 'B' in trim: + for i in filt[::-1]: + if i != 0.: + break + else: + last = last - 1 + return filt[first:last] + + +def _extract_dispatcher(condition, arr): + return (condition, arr) + + +@array_function_dispatch(_extract_dispatcher) +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Note that `place` does the exact opposite of `extract`. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress, place + + Examples + -------- + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]]) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def _place_dispatcher(arr, mask, vals): + return (arr, mask, vals) + + +@array_function_dispatch(_place_dispatcher) +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : ndarray + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N, it will be repeated, and if elements of `a` are to be masked, + this sequence must be non-empty. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + return _place(arr, mask, vals) + + +def disp(mesg, device=None, linefeed=True): + """ + Display a message on a device. + + Parameters + ---------- + mesg : str + Message to display. + device : object + Device to write message. If None, defaults to ``sys.stdout`` which is + very similar to ``print``. `device` needs to have ``write()`` and + ``flush()`` methods. + linefeed : bool, optional + Option whether to print a line feed or not. Defaults to True. + + Raises + ------ + AttributeError + If `device` does not have a ``write()`` or ``flush()`` method. + + Examples + -------- + Besides ``sys.stdout``, a file-like object can also be used as it has + both required methods: + + >>> from io import StringIO + >>> buf = StringIO() + >>> np.disp(u'"Display" in a file', device=buf) + >>> buf.getvalue() + '"Display" in a file\\n' + + """ + if device is None: + device = sys.stdout + if linefeed: + device.write('%s\n' % mesg) + else: + device.write('%s' % mesg) + device.flush() + return + + +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r'\w+' +_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) +_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) +_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) +_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) + + +def _parse_gufunc_signature(signature): + """ + Parse string signatures for a generalized universal function. + + Arguments + --------- + signature : string + Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` + for ``np.matmul``. + + Returns + ------- + Tuple of input and output core dimensions parsed from the signature, each + of the form List[Tuple[str, ...]]. + """ + signature = re.sub(r'\s+', '', signature) + + if not re.match(_SIGNATURE, signature): + raise ValueError( + 'not a valid gufunc signature: {}'.format(signature)) + return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list)] + for arg_list in signature.split('->')) + + +def _update_dim_sizes(dim_sizes, arg, core_dims): + """ + Incrementally check and update core dimension sizes for a single argument. + + Arguments + --------- + dim_sizes : Dict[str, int] + Sizes of existing core dimensions. Will be updated in-place. + arg : ndarray + Argument to examine. + core_dims : Tuple[str, ...] + Core dimensions for this argument. + """ + if not core_dims: + return + + num_core_dims = len(core_dims) + if arg.ndim < num_core_dims: + raise ValueError( + '%d-dimensional argument does not have enough ' + 'dimensions for all core dimensions %r' + % (arg.ndim, core_dims)) + + core_shape = arg.shape[-num_core_dims:] + for dim, size in zip(core_dims, core_shape): + if dim in dim_sizes: + if size != dim_sizes[dim]: + raise ValueError( + 'inconsistent size for core dimension %r: %r vs %r' + % (dim, size, dim_sizes[dim])) + else: + dim_sizes[dim] = size + + +def _parse_input_dimensions(args, input_core_dims): + """ + Parse broadcast and core dimensions for vectorize with a signature. + + Arguments + --------- + args : Tuple[ndarray, ...] + Tuple of input arguments to examine. + input_core_dims : List[Tuple[str, ...]] + List of core dimensions corresponding to each input. + + Returns + ------- + broadcast_shape : Tuple[int, ...] + Common shape to broadcast all non-core dimensions to. + dim_sizes : Dict[str, int] + Common sizes for named core dimensions. + """ + broadcast_args = [] + dim_sizes = {} + for arg, core_dims in zip(args, input_core_dims): + _update_dim_sizes(dim_sizes, arg, core_dims) + ndim = arg.ndim - len(core_dims) + dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) + broadcast_args.append(dummy_array) + broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) + return broadcast_shape, dim_sizes + + +def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): + """Helper for calculating broadcast shapes with core dimensions.""" + return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) + for core_dims in list_of_core_dims] + + +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, + results=None): + """Helper for creating output arrays in vectorize.""" + shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) + if dtypes is None: + dtypes = [None] * len(shapes) + if results is None: + arrays = tuple(np.empty(shape=shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + else: + arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) + for result, shape, dtype + in zip(results, shapes, dtypes)) + return arrays + + +@set_module('numpy') +class vectorize: + """ + vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, + cache=False, signature=None) + + Returns an object that acts like pyfunc, but takes arrays as input. + + Define a vectorized function which takes a nested sequence of objects or + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples + of the input arrays like the python map function, except it uses the + broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable, optional + A python function or method. + Can be omitted to produce a decorator with keyword arguments. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If None, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + .. versionadded:: 1.7.0 + + cache : bool, optional + If `True`, then cache the first function call that determines the number + of outputs if `otypes` is not provided. + + .. versionadded:: 1.7.0 + + signature : string, optional + Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for + vectorized matrix-vector multiplication. If provided, ``pyfunc`` will + be called with (and expected to return) arrays with shapes given by the + size of corresponding core dimensions. By default, ``pyfunc`` is + assumed to take scalars as input and output. + + .. versionadded:: 1.12.0 + + Returns + ------- + out : callable + A vectorized function if ``pyfunc`` was provided, + a decorator otherwise. + + See Also + -------- + frompyfunc : Takes an arbitrary Python function and returns a ufunc + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If `otypes` is not specified, then a call to the function with the + first argument will be used to determine the number of outputs. The + results of this call will be cached if `cache` is `True` to prevent + calling the function twice. However, to implement the cache, the + original function must be wrapped which will slow down subsequent + calls, so only do this if your function is expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + References + ---------- + .. [1] :doc:`/reference/c-api/generalized-ufuncs` + + Examples + -------- + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified: + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified: + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + Positional arguments may also be excluded by specifying their position: + + >>> vpolyval.excluded.add(0) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + + The `signature` argument allows for vectorizing functions that act on + non-scalar arrays of fixed length. For example, you can use it for a + vectorized calculation of Pearson correlation coefficient and its p-value: + + >>> import scipy.stats + >>> pearsonr = np.vectorize(scipy.stats.pearsonr, + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + (array([ 1., -1.]), array([ 0., 0.])) + + Or for a vectorized convolution: + + >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') + >>> convolve(np.eye(4), [1, 2, 1]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) + + Decorator syntax is supported. The decorator can be called as + a function to provide keyword arguments. + >>>@np.vectorize + ...def identity(x): + ... return x + ... + >>>identity([0, 1, 2]) + array([0, 1, 2]) + >>>@np.vectorize(otypes=[float]) + ...def as_float(x): + ... return x + ... + >>>as_float([0, 1, 2]) + array([0., 1., 2.]) + """ + def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, + excluded=None, cache=False, signature=None): + + if (pyfunc != np._NoValue) and (not callable(pyfunc)): + #Splitting the error message to keep + #the length below 79 characters. + part1 = "When used as a decorator, " + part2 = "only accepts keyword arguments." + raise TypeError(part1 + part2) + + self.pyfunc = pyfunc + self.cache = cache + self.signature = signature + if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'): + self.__name__ = pyfunc.__name__ + + self._ufunc = {} # Caching to improve default performance + self._doc = None + self.__doc__ = doc + if doc is None and hasattr(pyfunc, '__doc__'): + self.__doc__ = pyfunc.__doc__ + else: + self._doc = doc + + if isinstance(otypes, str): + for char in otypes: + if char not in typecodes['All']: + raise ValueError("Invalid otype specified: %s" % (char,)) + elif iterable(otypes): + otypes = ''.join([_nx.dtype(x).char for x in otypes]) + elif otypes is not None: + raise ValueError("Invalid otype specification") + self.otypes = otypes + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + if signature is not None: + self._in_and_out_core_dims = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dims = None + + def _init_stage_2(self, pyfunc, *args, **kwargs): + self.__name__ = pyfunc.__name__ + self.pyfunc = pyfunc + if self._doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = self._doc + + def _call_as_normal(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def __call__(self, *args, **kwargs): + if self.pyfunc is np._NoValue: + self._init_stage_2(*args, **kwargs) + return self + + return self._call_as_normal(*args, **kwargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes is not None: + otypes = self.otypes + + # self._ufunc is a dictionary whose keys are the number of + # arguments (i.e. len(args)) and whose values are ufuncs created + # by frompyfunc. len(args) can be different for different calls if + # self.pyfunc has parameters with default values. We only use the + # cache when func is self.pyfunc, which occurs when the call uses + # only positional arguments and no arguments are excluded. + + nin = len(args) + nout = len(self.otypes) + if func is not self.pyfunc or nin not in self._ufunc: + ufunc = frompyfunc(func, nin, nout) + else: + ufunc = None # We'll get it from self._ufunc + if func is self.pyfunc: + ufunc = self._ufunc.setdefault(nin, ufunc) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + args = [asarray(arg) for arg in args] + if builtins.any(arg.size == 0 for arg in args): + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + + inputs = [arg.flat[0] for arg in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if self.signature is not None: + res = self._vectorize_call_with_signature(func, args) + elif not args: + res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + + # Convert args to object arrays first + inputs = [asanyarray(a, dtype=object) for a in args] + + outputs = ufunc(*inputs) + + if ufunc.nout == 1: + res = asanyarray(outputs, dtype=otypes[0]) + else: + res = tuple([asanyarray(x, dtype=t) + for x, t in zip(outputs, otypes)]) + return res + + def _vectorize_call_with_signature(self, func, args): + """Vectorized call over positional arguments with a signature.""" + input_core_dims, output_core_dims = self._in_and_out_core_dims + + if len(args) != len(input_core_dims): + raise TypeError('wrong number of positional arguments: ' + 'expected %r, got %r' + % (len(input_core_dims), len(args))) + args = tuple(asanyarray(arg) for arg in args) + + broadcast_shape, dim_sizes = _parse_input_dimensions( + args, input_core_dims) + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, + input_core_dims) + args = [np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes)] + + outputs = None + otypes = self.otypes + nout = len(output_core_dims) + + for index in np.ndindex(*broadcast_shape): + results = func(*(arg[index] for arg in args)) + + n_results = len(results) if isinstance(results, tuple) else 1 + + if nout != n_results: + raise ValueError( + 'wrong number of outputs from pyfunc: expected %r, got %r' + % (nout, n_results)) + + if nout == 1: + results = (results,) + + if outputs is None: + for result, core_dims in zip(results, output_core_dims): + _update_dim_sizes(dim_sizes, result, core_dims) + + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes, results) + + for output, result in zip(outputs, results): + output[index] = result + + if outputs is None: + # did not call the function even once + if otypes is None: + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + if builtins.any(dim not in dim_sizes + for dims in output_core_dims + for dim in dims): + raise ValueError('cannot call `vectorize` with a signature ' + 'including new output dimensions on size 0 ' + 'inputs') + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + return outputs[0] if nout == 1 else outputs + + +def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, + fweights=None, aweights=None, *, dtype=None): + return (m, y, fweights, aweights) + + +@array_function_dispatch(_cov_dispatcher) +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None, *, dtype=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + + .. versionadded:: 1.5 + fweights : array_like, int, optional + 1-D array of integer frequency weights; the number of times each + observation vector should be repeated. + + .. versionadded:: 1.10 + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + + .. versionadded:: 1.10 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 1 + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is not None: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + + if dtype is None: + if y is None: + dtype = np.result_type(m, np.float64) + else: + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and X.shape[0] != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=False, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof*sum(w*aweights)/w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X*w).T + c = dot(X, X_T.conj()) + c *= np.true_divide(1, fact) + return c.squeeze() + + +def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, + dtype=None): + return (x, y) + + +@array_function_dispatch(_corrcoef_dispatcher) +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, + dtype=None): + """ + Return Pearson product-moment correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `R`, and the + covariance matrix, `C`, is + + .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } } + + The values of `R` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + R : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + Notes + ----- + Due to floating point rounding the resulting array may not be Hermitian, + the diagonal elements may not be 1, and the elements may not satisfy the + inequality abs(a) <= 1. The real and imaginary parts are clipped to the + interval [-1, 1] in an attempt to improve on that situation but is not + much help in the complex case. + + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + In this example we generate two random arrays, ``xarr`` and ``yarr``, and + compute the row-wise and column-wise Pearson correlation coefficients, + ``R``. Since ``rowvar`` is true by default, we first find the row-wise + Pearson correlation coefficients between the variables of ``xarr``. + + >>> import numpy as np + >>> rng = np.random.default_rng(seed=42) + >>> xarr = rng.random((3, 3)) + >>> xarr + array([[0.77395605, 0.43887844, 0.85859792], + [0.69736803, 0.09417735, 0.97562235], + [0.7611397 , 0.78606431, 0.12811363]]) + >>> R1 = np.corrcoef(xarr) + >>> R1 + array([[ 1. , 0.99256089, -0.68080986], + [ 0.99256089, 1. , -0.76492172], + [-0.68080986, -0.76492172, 1. ]]) + + If we add another set of variables and observations ``yarr``, we can + compute the row-wise Pearson correlation coefficients between the + variables in ``xarr`` and ``yarr``. + + >>> yarr = rng.random((3, 3)) + >>> yarr + array([[0.45038594, 0.37079802, 0.92676499], + [0.64386512, 0.82276161, 0.4434142 ], + [0.22723872, 0.55458479, 0.06381726]]) + >>> R2 = np.corrcoef(xarr, yarr) + >>> R2 + array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , + -0.99004057], + [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, + -0.99981569], + [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, + 0.77714685], + [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, + -0.83571711], + [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , + 0.97517215], + [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, + 1. ]]) + + Finally if we use the option ``rowvar=False``, the columns are now + being treated as the variables and we will find the column-wise Pearson + correlation coefficients between variables in ``xarr`` and ``yarr``. + + >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) + >>> R3 + array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , + 0.22423734], + [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, + -0.44069024], + [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, + 0.75137473], + [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, + 0.47536961], + [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , + -0.46666491], + [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, + 1. ]]) + + """ + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn('bias and ddof have no effect and are deprecated', + DeprecationWarning, stacklevel=2) + c = cov(x, y, rowvar, dtype=dtype) + try: + d = diag(c) + except ValueError: + # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + stddev = sqrt(d.real) + c /= stddev[:, None] + c /= stddev[None, :] + + # Clip real and imaginary parts to [-1, 1]. This does not guarantee + # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without + # excessive work. + np.clip(c.real, -1, 1, out=c.real) + if np.iscomplexobj(c): + np.clip(c.imag, -1, 1, out=c.imag) + + return c + + +@set_module('numpy') +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.blackman(12) + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.blackman(51) + >>> plt.plot(window) + [] + >>> plt.title("Blackman window") + Text(0.5, 1.0, 'Blackman window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> with np.errstate(divide='ignore', invalid='ignore'): + ... response = 20 * np.log10(mag) + ... + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Blackman window") + Text(0.5, 1.0, 'Frequency response of Blackman window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') + >>> plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1)) + + +@set_module('numpy') +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. Note that convolution with this window produces linear + interpolation. It is also known as an apodization (which means "removing + the foot", i.e. smoothing discontinuities at the beginning and end of the + sampled signal) or tapering function. The Fourier transform of the + Bartlett window is the product of two sinc functions. Note the excellent + discussion in Kanasewich [2]_. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib): + + >>> from numpy.fft import fft, fftshift + >>> window = np.bartlett(51) + >>> plt.plot(window) + [] + >>> plt.title("Bartlett window") + Text(0.5, 1.0, 'Bartlett window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> with np.errstate(divide='ignore', invalid='ignore'): + ... response = 20 * np.log10(mag) + ... + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Bartlett window") + Text(0.5, 1.0, 'Frequency response of Bartlett window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') + >>> plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1)) + + +@set_module('numpy') +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius von Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hanning(12) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response: + + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, fftshift + >>> window = np.hanning(51) + >>> plt.plot(window) + [] + >>> plt.title("Hann window") + Text(0.5, 1.0, 'Hann window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> with np.errstate(divide='ignore', invalid='ignore'): + ... response = 20 * np.log10(mag) + ... + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of the Hann window") + Text(0.5, 1.0, 'Frequency response of the Hann window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> plt.axis('tight') + ... + >>> plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return 0.5 + 0.5*cos(pi*n/(M-1)) + + +@set_module('numpy') +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response: + + >>> import matplotlib.pyplot as plt + >>> from numpy.fft import fft, fftshift + >>> window = np.hamming(51) + >>> plt.plot(window) + [] + >>> plt.title("Hamming window") + Text(0.5, 1.0, 'Hamming window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Hamming window") + Text(0.5, 1.0, 'Frequency response of Hamming window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> plt.axis('tight') + ... + >>> plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1-M, M, 2) + return 0.54 + 0.46*cos(pi*n/(M-1)) + + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x*b1 - b2 + vals[i] + + return 0.5*(b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x/2.0-2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) + + +def _i0_dispatcher(x): + return (x,) + + +@array_function_dispatch(_i0_dispatcher) +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. + + Parameters + ---------- + x : array_like of float + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = float + The modified Bessel function evaluated at each of the elements of `x`. + + See Also + -------- + scipy.special.i0, scipy.special.iv, scipy.special.ive + + Notes + ----- + The scipy implementation is recommended over this function: it is a + proper ufunc written in C, and more than an order of magnitude faster. + + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + https://personal.math.ubc.ca/~cbm/aands/page_379.htm + .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero + + Examples + -------- + >>> np.i0(0.) + array(1.0) + >>> np.i0([0, 1, 2, 3]) + array([1. , 1.26606588, 2.2795853 , 4.88079259]) + + """ + x = np.asanyarray(x) + if x.dtype.kind == 'c': + raise TypeError("i0 not supported for complex values") + if x.dtype.kind != 'f': + x = x.astype(float) + x = np.abs(x) + return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) + +## End of cephes code for i0 + + +@set_module('numpy') +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> np.kaiser(12, 14) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response: + + >>> from numpy.fft import fft, fftshift + >>> window = np.kaiser(51, 14) + >>> plt.plot(window) + [] + >>> plt.title("Kaiser window") + Text(0.5, 1.0, 'Kaiser window') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("Sample") + Text(0.5, 0, 'Sample') + >>> plt.show() + + >>> plt.figure() +
+ >>> A = fft(window, 2048) / 25.5 + >>> mag = np.abs(fftshift(A)) + >>> freq = np.linspace(-0.5, 0.5, len(A)) + >>> response = 20 * np.log10(mag) + >>> response = np.clip(response, -100, 100) + >>> plt.plot(freq, response) + [] + >>> plt.title("Frequency response of Kaiser window") + Text(0.5, 1.0, 'Frequency response of Kaiser window') + >>> plt.ylabel("Magnitude [dB]") + Text(0, 0.5, 'Magnitude [dB]') + >>> plt.xlabel("Normalized frequency [cycles per sample]") + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> plt.axis('tight') + (-0.5, 0.5, -100.0, ...) # may vary + >>> plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. (Simplified result_type with 0.0 + # strongly typed. result-type is not/less order sensitive, but that mainly + # matters for integers anyway.) + values = np.array([0.0, M, beta]) + M = values[1] + beta = values[2] + + if M == 1: + return np.ones(1, dtype=values.dtype) + n = arange(0, M) + alpha = (M-1)/2.0 + return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta) + + +def _sinc_dispatcher(x): + return (x,) + + +@array_function_dispatch(_sinc_dispatcher) +def sinc(x): + r""" + Return the normalized sinc function. + + The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument + :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not + only everywhere continuous but also infinitely differentiable. + + .. note:: + + Note the normalization factor of ``pi`` used in the definition. + This is the most commonly used definition in signal processing. + Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function + :math:`\sin(x)/x` that is more common in mathematics. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to calculate + ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. http://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + https://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + Text(0.5, 1.0, 'Sinc Function') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("X") + Text(0.5, 0, 'X') + >>> plt.show() + + """ + x = np.asanyarray(x) + y = pi * where(x == 0, 1.0e-20, x) + return sin(y)/y + + +def _msort_dispatcher(a): + return (a,) + + +@array_function_dispatch(_msort_dispatcher) +def msort(a): + """ + Return a copy of an array sorted along the first axis. + + .. deprecated:: 1.24 + + msort is deprecated, use ``np.sort(a, axis=0)`` instead. + + Parameters + ---------- + a : array_like + Array to be sorted. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + sort + + Notes + ----- + ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. + + Examples + -------- + >>> a = np.array([[1, 4], [3, 1]]) + >>> np.msort(a) # sort along the first axis + array([[1, 1], + [3, 4]]) + + """ + # 2022-10-20 1.24 + warnings.warn( + "msort is deprecated, use np.sort(a, axis=0) instead", + DeprecationWarning, + stacklevel=2, + ) + b = array(a, subok=True, copy=True) + b.sort(0) + return b + + +def _ureduce(a, func, keepdims=False, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function capable of receiving a single axis argument. + It is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis', None) + out = kwargs.get('out', None) + + if keepdims is np._NoValue: + keepdims = False + + nd = a.ndim + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, nd) + + if keepdims: + if out is not None: + index_out = tuple( + 0 if i in axis else slice(None) for i in range(nd)) + kwargs['out'] = out[(Ellipsis, ) + index_out] + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = set(range(nd)) - set(axis) + nkeep = len(keep) + # swap axis that should not be reduced to front + for i, s in enumerate(sorted(keep)): + a = a.swapaxes(i, s) + # merge reduced axis + a = a.reshape(a.shape[:nkeep] + (-1,)) + kwargs['axis'] = -1 + else: + if keepdims: + if out is not None: + index_out = (0, ) * nd + kwargs['out'] = out[(Ellipsis, ) + index_out] + + r = func(a, **kwargs) + + if out is not None: + return out + + if keepdims: + if axis is None: + index_r = (np.newaxis, ) * nd + else: + index_r = tuple( + np.newaxis if i in axis else slice(None) + for i in range(nd)) + r = r[(Ellipsis, ) + index_r] + + return r + + +def _median_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_median_dispatcher) +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + .. versionadded:: 1.9.0 + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + 3.5 + >>> np.median(a, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([7., 2.]) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + 3.5 + >>> assert not np.all(a==b) + + """ + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + + # Set the partition indexes + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + kth = [szh - 1, szh] + else: + kth = [(sz - 1) // 2] + + # We have to check for NaNs (as of writing 'M' doesn't actually work). + supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm' + if supports_nans: + kth.append(-1) + + if overwrite_input: + if axis is None: + part = a.ravel() + part.partition(kth) + else: + a.partition(kth, axis=axis) + part = a + else: + part = partition(a, kth, axis=axis) + + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index+1) + else: + indexer[axis] = slice(index-1, index+1) + indexer = tuple(indexer) + + # Use mean in both odd and even case to coerce data type, + # using out array if needed. + rout = mean(part[indexer], axis=axis, out=out) + if supports_nans and sz > 0: + # If nans are possible, warn and replace by nans like mean would. + rout = np.lib.utils._median_nancheck(part, rout, axis) + + return rout + + +def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, interpolation=None): + return (a, q, out) + + +@array_function_dispatch(_percentile_dispatcher) +def percentile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + interpolation=None): + """ + Compute the q-th percentile of the data along the specified axis. + + Returns the q-th percentile(s) of the array elements. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Percentage or sequence of percentages for the percentiles to compute. + Values must be between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + + .. versionchanged:: 1.9.0 + A tuple of axes is supported + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + .. versionadded:: 1.9.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile + quantile : equivalent to percentile, except q in the range [0, 1]. + + Notes + ----- + Given a vector ``V`` of length ``n``, the q-th percentile of ``V`` is + the value ``q/100`` of the way from the minimum to the maximum in a + sorted copy of ``V``. The values and distances of the two nearest + neighbors as well as the `method` parameter will determine the + percentile if the normalized ranking does not match the location of + ``q`` exactly. This function is the same as the median if ``q=50``, the + same as the minimum if ``q=0`` and the same as the maximum if + ``q=100``. + + The optional `method` parameter specifies the method to use when the + desired percentile lies between two indexes ``i`` and ``j = i + 1``. + In that case, we first determine ``i + g``, a virtual index that lies + between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the + fractional part of the index. The final result is, then, an interpolation + of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, + ``i`` and ``j`` are modified using correction constants ``alpha`` and + ``beta`` whose choices depend on the ``method`` used. Finally, note that + since Python uses 0-based indexing, the code subtracts another 1 from the + index internally. + + The following formula determines the virtual index ``i + g``, the location + of the percentile in the sorted sample: + + .. math:: + i + g = (q / 100) * ( n - alpha - beta + 1 ) + alpha + + The different methods then work as follows + + inverted_cdf: + method 1 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then take i + + averaged_inverted_cdf: + method 2 of H&F [1]_. + This method give discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then average between bounds + + closest_observation: + method 3 of H&F [1]_. + This method give discontinuous results: + + * if g > 0 ; then take j + * if g = 0 and index is odd ; then take j + * if g = 0 and index is even ; then take i + + interpolated_inverted_cdf: + method 4 of H&F [1]_. + This method give continuous results using: + + * alpha = 0 + * beta = 1 + + hazen: + method 5 of H&F [1]_. + This method give continuous results using: + + * alpha = 1/2 + * beta = 1/2 + + weibull: + method 6 of H&F [1]_. + This method give continuous results using: + + * alpha = 0 + * beta = 0 + + linear: + method 7 of H&F [1]_. + This method give continuous results using: + + * alpha = 1 + * beta = 1 + + median_unbiased: + method 8 of H&F [1]_. + This method is probably the best method if the sample + distribution function is unknown (see reference). + This method give continuous results using: + + * alpha = 1/3 + * beta = 1/3 + + normal_unbiased: + method 9 of H&F [1]_. + This method is probably the best method if the sample + distribution function is known to be normal. + This method give continuous results using: + + * alpha = 3/8 + * beta = 3/8 + + lower: + NumPy method kept for backwards compatibility. + Takes ``i`` as the interpolation point. + + higher: + NumPy method kept for backwards compatibility. + Takes ``j`` as the interpolation point. + + nearest: + NumPy method kept for backwards compatibility. + Takes ``i`` or ``j``, whichever is nearest. + + midpoint: + NumPy method kept for backwards compatibility. + Uses ``(i + j) / 2``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + 3.5 + >>> np.percentile(a, 50, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.percentile(a, 50, axis=1) + array([7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + The different methods can be visualized graphically: + + .. plot:: + + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', '-', 'C0'), + ('inverted_cdf', ':', 'C1'), + # Almost the same as `inverted_cdf`: + ('averaged_inverted_cdf', '-.', 'C1'), + ('closest_observation', ':', 'C2'), + ('interpolated_inverted_cdf', '--', 'C1'), + ('hazen', '--', 'C3'), + ('weibull', '-.', 'C4'), + ('median_unbiased', '--', 'C5'), + ('normal_unbiased', '-.', 'C6'), + ] + for method, style, color in lines: + ax.plot( + p, np.percentile(a, p, method=method), + label=method, linestyle=style, color=color) + ax.set( + title='Percentiles for different methods and data: ' + str(a), + xlabel='Percentile', + ylabel='Estimated percentile value', + yticks=a) + ax.legend(bbox_to_anchor=(1.03, 1)) + plt.tight_layout() + plt.show() + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "percentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + q = np.true_divide(q, 100) + q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims) + + +def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, interpolation=None): + return (a, q, out) + + +@array_function_dispatch(_quantile_dispatcher) +def quantile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + interpolation=None): + """ + Compute the q-th quantile of the data along the specified axis. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Probability or sequence of probabilities for the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The default is + to compute the quantile(s) along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probabilies levels are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a vector ``V`` of length ``n``, the q-th quantile of ``V`` is + the value ``q`` of the way from the minimum to the maximum in a + sorted copy of ``V``. The values and distances of the two nearest + neighbors as well as the `method` parameter will determine the + quantile if the normalized ranking does not match the location of + ``q`` exactly. This function is the same as the median if ``q=0.5``, the + same as the minimum if ``q=0.0`` and the same as the maximum if + ``q=1.0``. + + The optional `method` parameter specifies the method to use when the + desired quantile lies between two indexes ``i`` and ``j = i + 1``. + In that case, we first determine ``i + g``, a virtual index that lies + between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the + fractional part of the index. The final result is, then, an interpolation + of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, + ``i`` and ``j`` are modified using correction constants ``alpha`` and + ``beta`` whose choices depend on the ``method`` used. Finally, note that + since Python uses 0-based indexing, the code subtracts another 1 from the + index internally. + + The following formula determines the virtual index ``i + g``, the location + of the quantile in the sorted sample: + + .. math:: + i + g = q * ( n - alpha - beta + 1 ) + alpha + + The different methods then work as follows + + inverted_cdf: + method 1 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then take i + + averaged_inverted_cdf: + method 2 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 ; then average between bounds + + closest_observation: + method 3 of H&F [1]_. + This method gives discontinuous results: + + * if g > 0 ; then take j + * if g = 0 and index is odd ; then take j + * if g = 0 and index is even ; then take i + + interpolated_inverted_cdf: + method 4 of H&F [1]_. + This method gives continuous results using: + + * alpha = 0 + * beta = 1 + + hazen: + method 5 of H&F [1]_. + This method gives continuous results using: + + * alpha = 1/2 + * beta = 1/2 + + weibull: + method 6 of H&F [1]_. + This method gives continuous results using: + + * alpha = 0 + * beta = 0 + + linear: + method 7 of H&F [1]_. + This method gives continuous results using: + + * alpha = 1 + * beta = 1 + + median_unbiased: + method 8 of H&F [1]_. + This method is probably the best method if the sample + distribution function is unknown (see reference). + This method gives continuous results using: + + * alpha = 1/3 + * beta = 1/3 + + normal_unbiased: + method 9 of H&F [1]_. + This method is probably the best method if the sample + distribution function is known to be normal. + This method gives continuous results using: + + * alpha = 3/8 + * beta = 3/8 + + lower: + NumPy method kept for backwards compatibility. + Takes ``i`` as the interpolation point. + + higher: + NumPy method kept for backwards compatibility. + Takes ``j`` as the interpolation point. + + nearest: + NumPy method kept for backwards compatibility. + Takes ``i`` or ``j``, whichever is nearest. + + midpoint: + NumPy method kept for backwards compatibility. + Uses ``(i + j) / 2``. + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.quantile(a, 0.5, axis=1) + array([7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + See also `numpy.percentile` for a visualization of most methods. + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "quantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + q = np.asanyarray(q) + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims) + + +def _quantile_unchecked(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False): + """Assumes that q is in [0, 1], and is an ndarray""" + return _ureduce(a, + func=_quantile_ureduce_func, + q=q, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if not (0.0 <= q[i] <= 1.0): + return False + else: + if not (np.all(0 <= q) and np.all(q <= 1)): + return False + return True + + +def _check_interpolation_as_method(method, interpolation, fname): + # Deprecated NumPy 1.22, 2021-11-08 + warnings.warn( + f"the `interpolation=` argument to {fname} was renamed to " + "`method=`, which has additional options.\n" + "Users of the modes 'nearest', 'lower', 'higher', or " + "'midpoint' are encouraged to review the method they used. " + "(Deprecated NumPy 1.22)", + DeprecationWarning, stacklevel=4) + if method != "linear": + # sanity check, we assume this basically never happens + raise TypeError( + "You shall not pass both `method` and `interpolation`!\n" + "(`interpolation` is Deprecated in favor of `method`)") + return interpolation + + +def _compute_virtual_index(n, quantiles, alpha: float, beta: float): + """ + Compute the floating point indexes of an array for the linear + interpolation of quantiles. + n : array_like + The sample sizes. + quantiles : array_like + The quantiles values. + alpha : float + A constant used to correct the index computed. + beta : float + A constant used to correct the index computed. + + alpha and beta values depend on the chosen method + (see quantile documentation) + + Reference: + Hyndman&Fan paper "Sample Quantiles in Statistical Packages", + DOI: 10.1080/00031305.1996.10473566 + """ + return n * quantiles + ( + alpha + quantiles * (1 - alpha - beta) + ) - 1 + + +def _get_gamma(virtual_indexes, previous_indexes, method): + """ + Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation + of quantiles. + + virtual_indexes : array_like + The indexes where the percentile is supposed to be found in the sorted + sample. + previous_indexes : array_like + The floor values of virtual_indexes. + interpolation : dict + The interpolation method chosen, which may have a specific rule + modifying gamma. + + gamma is usually the fractional part of virtual_indexes but can be modified + by the interpolation method. + """ + gamma = np.asanyarray(virtual_indexes - previous_indexes) + gamma = method["fix_gamma"](gamma, virtual_indexes) + return np.asanyarray(gamma) + + +def _lerp(a, b, t, out=None): + """ + Compute the linear interpolation weighted by gamma on each point of + two same shape array. + + a : array_like + Left bound. + b : array_like + Right bound. + t : array_like + The interpolation weight. + out : array_like + Output array. + """ + diff_b_a = subtract(b, a) + # asanyarray is a stop-gap until gh-13105 + lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5) + if lerp_interpolation.ndim == 0 and out is None: + lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays + return lerp_interpolation + + +def _get_gamma_mask(shape, default_value, conditioned_value, where): + out = np.full(shape, default_value) + np.copyto(out, conditioned_value, where=where, casting="unsafe") + return out + + +def _discret_interpolation_to_boundaries(index, gamma_condition_fun): + previous = np.floor(index) + next = previous + 1 + gamma = index - previous + res = _get_gamma_mask(shape=index.shape, + default_value=next, + conditioned_value=previous, + where=gamma_condition_fun(gamma, index) + ).astype(np.intp) + # Some methods can lead to out-of-bound integers, clip them: + res[res < 0] = 0 + return res + + +def _closest_observation(n, quantiles): + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0) + return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, + gamma_fun) + + +def _inverted_cdf(n, quantiles): + gamma_fun = lambda gamma, _: (gamma == 0) + return _discret_interpolation_to_boundaries((n * quantiles) - 1, + gamma_fun) + + +def _quantile_ureduce_func( + a: np.array, + q: np.array, + axis: int = None, + out=None, + overwrite_input: bool = False, + method="linear", +) -> np.array: + if q.ndim > 2: + # The code below works fine for nd, but it might not have useful + # semantics. For now, keep the supported dimensions the same as it was + # before. + raise ValueError("q must be a scalar or 1d") + if overwrite_input: + if axis is None: + axis = 0 + arr = a.ravel() + else: + arr = a + else: + if axis is None: + axis = 0 + arr = a.flatten() + else: + arr = a.copy() + result = _quantile(arr, + quantiles=q, + axis=axis, + method=method, + out=out) + return result + + +def _get_indexes(arr, virtual_indexes, valid_values_count): + """ + Get the valid indexes of arr neighbouring virtual_indexes. + Note + This is a companion function to linear interpolation of + Quantiles + + Returns + ------- + (previous_indexes, next_indexes): Tuple + A Tuple of virtual_indexes neighbouring indexes + """ + previous_indexes = np.asanyarray(np.floor(virtual_indexes)) + next_indexes = np.asanyarray(previous_indexes + 1) + indexes_above_bounds = virtual_indexes >= valid_values_count - 1 + # When indexes is above max index, take the max value of the array + if indexes_above_bounds.any(): + previous_indexes[indexes_above_bounds] = -1 + next_indexes[indexes_above_bounds] = -1 + # When indexes is below min index, take the min value of the array + indexes_below_bounds = virtual_indexes < 0 + if indexes_below_bounds.any(): + previous_indexes[indexes_below_bounds] = 0 + next_indexes[indexes_below_bounds] = 0 + if np.issubdtype(arr.dtype, np.inexact): + # After the sort, slices having NaNs will have for last element a NaN + virtual_indexes_nans = np.isnan(virtual_indexes) + if virtual_indexes_nans.any(): + previous_indexes[virtual_indexes_nans] = -1 + next_indexes[virtual_indexes_nans] = -1 + previous_indexes = previous_indexes.astype(np.intp) + next_indexes = next_indexes.astype(np.intp) + return previous_indexes, next_indexes + + +def _quantile( + arr: np.array, + quantiles: np.array, + axis: int = -1, + method="linear", + out=None, +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + It computes the quantiles of the array for the given axis. + A linear interpolation is performed based on the `interpolation`. + + By default, the method is "linear" where alpha == beta == 1 which + performs the 7th method of Hyndman&Fan. + With "median_unbiased" we get alpha == beta == 1/3 + thus the 8th method of Hyndman&Fan. + """ + # --- Setup + arr = np.asanyarray(arr) + values_count = arr.shape[axis] + # The dimensions of `q` are prepended to the output shape, so we need the + # axis being sampled from `arr` to be last. + + if axis != 0: # But moveaxis is slow, so only call it if necessary. + arr = np.moveaxis(arr, axis, destination=0) + # --- Computation of indexes + # Index where to find the value in the sorted array. + # Virtual because it is a floating point value, not an valid index. + # The nearest neighbours are used for interpolation + try: + method = _QuantileMethods[method] + except KeyError: + raise ValueError( + f"{method!r} is not a valid method. Use one of: " + f"{_QuantileMethods.keys()}") from None + virtual_indexes = method["get_virtual_index"](values_count, quantiles) + virtual_indexes = np.asanyarray(virtual_indexes) + + supports_nans = ( + np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm') + + if np.issubdtype(virtual_indexes.dtype, np.integer): + # No interpolation needed, take the points along axis + if supports_nans: + # may contain nan, which would sort to the end + arr.partition(concatenate((virtual_indexes.ravel(), [-1])), axis=0) + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + arr.partition(virtual_indexes.ravel(), axis=0) + slices_having_nans = np.array(False, dtype=bool) + result = take(arr, virtual_indexes, axis=0, out=out) + else: + previous_indexes, next_indexes = _get_indexes(arr, + virtual_indexes, + values_count) + # --- Sorting + arr.partition( + np.unique(np.concatenate(([0, -1], + previous_indexes.ravel(), + next_indexes.ravel(), + ))), + axis=0) + if supports_nans: + slices_having_nans = np.isnan(arr[-1, ...]) + else: + slices_having_nans = None + # --- Get values from indexes + previous = arr[previous_indexes] + next = arr[next_indexes] + # --- Linear interpolation + gamma = _get_gamma(virtual_indexes, previous_indexes, method) + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) + result = _lerp(previous, + next, + gamma, + out=out) + if np.any(slices_having_nans): + if result.ndim == 0 and out is None: + # can't write to a scalar, but indexing will be correct + result = arr[-1] + else: + np.copyto(result, arr[-1, ...], where=slices_having_nans) + return result + + +def _trapz_dispatcher(y, x=None, dx=None, axis=None): + return (y, x) + + +@array_function_dispatch(_trapz_dispatcher) +def trapz(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapz : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + Use the trapezoidal rule on evenly spaced points: + + >>> np.trapz([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> np.trapz([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> np.trapz([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> np.trapz([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> np.trapz(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> np.trapz(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``np.trapz`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapz(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> np.trapz(a, axis=1) + array([2., 8.]) + """ + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) + return ret + + +# __array_function__ has no __code__ or other attributes normal Python funcs we +# wrap everything into a C callable. SciPy however, tries to "clone" `trapz` +# into a new Python function which requires `__code__` and a few other +# attributes. So we create a dummy clone and copy over its attributes allowing +# SciPy <= 1.10 to work: https://github.com/scipy/scipy/issues/17811 +assert not hasattr(trapz, "__code__") + +def _fake_trapz(y, x=None, dx=1.0, axis=-1): + return trapz(y, x=x, dx=dx, axis=axis) + + +trapz.__code__ = _fake_trapz.__code__ +trapz.__globals__ = _fake_trapz.__globals__ +trapz.__defaults__ = _fake_trapz.__defaults__ +trapz.__closure__ = _fake_trapz.__closure__ +trapz.__kwdefaults__ = _fake_trapz.__kwdefaults__ + + +def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): + return xi + + +# Based on scitools meshgrid +@array_function_dispatch(_meshgrid_dispatcher) +def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): + """ + Return a list of coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + .. versionchanged:: 1.9 + 1-D and 0-D cases are allowed. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + + .. versionadded:: 1.7.0 + sparse : bool, optional + If True the shape of the returned coordinate array for dimension *i* + is reduced from ``(N1, ..., Ni, ... Nn)`` to + ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are + intended to be use with :ref:`basics.broadcasting`. When all + coordinates are used in an expression, broadcasting still leads to a + fully-dimensonal result array. + + Default is False. + + .. versionadded:: 1.7.0 + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + .. versionadded:: 1.7.0 + + Returns + ------- + X1, X2,..., XN : list of ndarrays + For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, + returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = np.meshgrid(x, y, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = np.meshgrid(x, y, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. + ogrid : Construct an open multi-dimensional "meshgrid" using indexing + notation. + how-to-index + + Examples + -------- + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = np.meshgrid(x, y) + >>> xv + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) + >>> yv + array([[0., 0., 0.], + [1., 1., 1.]]) + + The result of `meshgrid` is a coordinate grid: + + >>> import matplotlib.pyplot as plt + >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none') + >>> plt.show() + + You can create sparse output arrays to save memory and computation time. + + >>> xv, yv = np.meshgrid(x, y, sparse=True) + >>> xv + array([[0. , 0.5, 1. ]]) + >>> yv + array([[0.], + [1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. If the + function depends on all coordinates, both dense and sparse outputs can be + used. + + >>> x = np.linspace(-5, 5, 101) + >>> y = np.linspace(-5, 5, 101) + >>> # full coordinate arrays + >>> xx, yy = np.meshgrid(x, y) + >>> zz = np.sqrt(xx**2 + yy**2) + >>> xx.shape, yy.shape, zz.shape + ((101, 101), (101, 101), (101, 101)) + >>> # sparse coordinate arrays + >>> xs, ys = np.meshgrid(x, y, sparse=True) + >>> zs = np.sqrt(xs**2 + ys**2) + >>> xs.shape, ys.shape, zs.shape + ((1, 101), (101, 1), (101, 101)) + >>> np.array_equal(zz, zs) + True + + >>> h = plt.contourf(x, y, zs) + >>> plt.axis('scaled') + >>> plt.colorbar() + >>> plt.show() + """ + ndim = len(xi) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) + for i, x in enumerate(xi)] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + s0[2:] + output[1].shape = (-1, 1) + s0[2:] + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = np.broadcast_arrays(*output, subok=True) + + if copy: + output = [x.copy() for x in output] + + return output + + +def _delete_dispatcher(arr, obj, axis=None): + return (arr, obj) + + +@array_function_dispatch(_delete_dispatcher) +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int or array of ints + Indicate indices of sub-arrays to remove along the specified axis. + + .. versionchanged:: 1.19.0 + Boolean indices are now treated as a mask of elements to remove, + rather than being cast to the integers 0 and 1. + + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> arr = np.arange(12) + 1 + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further + use of `mask`. + + Examples + -------- + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + if wrap: + return wrap(arr.copy(order=arrorder)) + else: + return arr.copy(order=arrorder) + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arrorder) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[tuple(slobj)] = arr[tuple(slobj)] + # copy end chunk + if stop == N: + pass + else: + slobj[axis] = slice(stop-numtodel, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(stop, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop-start, dtype=bool) + keep[:stop-start:step] = False + slobj[axis] = slice(start, stop-numtodel) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(start, stop) + arr = arr[tuple(slobj2)] + slobj2[axis] = keep + new[tuple(slobj)] = arr[tuple(slobj2)] + if wrap: + return wrap(new) + else: + return new + + if isinstance(obj, (int, integer)) and not isinstance(obj, bool): + single_value = True + else: + single_value = False + _obj = obj + obj = np.asarray(obj) + # `size == 0` to allow empty lists similar to indexing, but (as there) + # is really too generic: + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + elif obj.size == 1 and obj.dtype.kind in "ui": + # For a size 1 integer array we can use the single-value path + # (most dtypes, except boolean, should just fail later). + obj = obj.item() + single_value = True + + if single_value: + # optimization for a single value + if (obj < -N or obj >= N): + raise IndexError( + "index %i is out of bounds for axis %i with " + "size %i" % (obj, axis, N)) + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, obj) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)]*ndim + slobj2[axis] = slice(obj+1, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + else: + if obj.dtype == bool: + if obj.shape != (N,): + raise ValueError('boolean array argument obj to delete ' + 'must be one dimensional and match the axis ' + 'length of {}'.format(N)) + + # optimization, the other branch is slower + keep = ~obj + else: + keep = ones(N, dtype=bool) + keep[obj,] = False + + slobj[axis] = keep + new = arr[tuple(slobj)] + + if wrap: + return wrap(new) + else: + return new + + +def _insert_dispatcher(arr, obj, values, axis=None): + return (arr, obj, values) + + +@array_function_dispatch(_insert_dispatcher) +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : int, slice or sequence of ints + Object that defines the index or indices before which `values` is + inserted. + + .. versionadded:: 1.8.0 + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays along an existing axis. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts ``obj=0`` behaves very different + from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from + ``arr[:,[0],:] = values``. + + Examples + -------- + >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> a + array([[1, 1], + [2, 2], + [3, 3]]) + >>> np.insert(a, 1, 5) + array([1, 5, 1, ..., 2, 3, 3]) + >>> np.insert(a, 1, 5, axis=1) + array([[1, 5, 1], + [2, 5, 2], + [3, 5, 3]]) + + Difference between sequence and scalars: + + >>> np.insert(a, [1], [[1],[2],[3]], axis=1) + array([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), + ... np.insert(a, [1], [[1],[2],[3]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([1, 1, 2, 2, 3, 3]) + >>> np.insert(b, [2, 2], [5, 6]) + array([1, 1, 5, ..., 2, 3, 3]) + + >>> np.insert(b, slice(2, 4), [5, 6]) + array([1, 1, 5, ..., 2, 3, 3]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([1, 1, 7, ..., 2, 3, 3]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + wrap = None + if type(arr) is not ndarray: + try: + wrap = arr.__array_wrap__ + except AttributeError: + pass + + arr = asarray(arr) + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)]*ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), dtype=intp) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + # See also delete + # 2012-10-11, NumPy 1.8 + warnings.warn( + "in the future insert will treat boolean arrays and " + "array-likes as a boolean index instead of casting it to " + "integer", FutureWarning, stacklevel=2) + indices = indices.astype(intp) + # Code after warning period: + #if obj.ndim != 1: + # raise ValueError('boolean array argument obj to insert ' + # 'must be one dimensional') + #indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError(f"index {obj} is out of bounds for axis {axis} " + f"with size {N}") + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.moveaxis(values, 0, axis) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, index) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(index, index+numnew) + new[tuple(slobj)] = values + slobj[axis] = slice(index+numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + if wrap: + return wrap(new) + return new + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arrorder) + slobj2 = [slice(None)]*ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr + + if wrap: + return wrap(new) + return new + + +def _append_dispatcher(arr, values, axis=None): + return (arr, values) + + +@array_function_dispatch(_append_dispatcher) +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, ..., 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: all the input arrays must have same number of dimensions, but + the array at index 0 has 2 dimension(s) and the array at index 1 has 1 + dimension(s) + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim-1 + return concatenate((arr, values), axis=axis) + + +def _digitize_dispatcher(x, bins, right=None): + return (x, bins) + + +@array_function_dispatch(_digitize_dispatcher) +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `np.digitize` is implemented in terms of `np.searchsorted`. This means + that a binary search is used to bin the values, which scales much better + for larger number of bins than the previous linear search. It also removes + the requirement for the input array to be 1-dimensional. + + For monotonically _increasing_ `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/function_base.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/function_base.pyi new file mode 100644 index 00000000..687e4ab1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/function_base.pyi @@ -0,0 +1,697 @@ +import sys +from collections.abc import Sequence, Iterator, Callable, Iterable +from typing import ( + Literal as L, + Any, + TypeVar, + overload, + Protocol, + SupportsIndex, + SupportsInt, +) + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from numpy import ( + vectorize as vectorize, + ufunc, + generic, + floating, + complexfloating, + intp, + float64, + complex128, + timedelta64, + datetime64, + object_, + _OrderKACF, +) + +from numpy._typing import ( + NDArray, + ArrayLike, + DTypeLike, + _ShapeLike, + _ScalarLike_co, + _DTypeLike, + _ArrayLike, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + _ArrayLikeObject_co, + _FloatLike_co, + _ComplexLike_co, +) + +from numpy.core.function_base import ( + add_newdoc as add_newdoc, +) + +from numpy.core.multiarray import ( + add_docstring as add_docstring, + bincount as bincount, +) + +from numpy.core.umath import _add_newdoc_ufunc + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) + +_2Tuple = tuple[_T, _T] + +class _TrimZerosSequence(Protocol[_T_co]): + def __len__(self) -> int: ... + def __getitem__(self, key: slice, /) -> _T_co: ... + def __iter__(self) -> Iterator[Any]: ... + +class _SupportsWriteFlush(Protocol): + def write(self, s: str, /) -> object: ... + def flush(self) -> object: ... + +__all__: list[str] + +# NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc` +def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ... + +@overload +def rot90( + m: _ArrayLike[_SCT], + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[_SCT]: ... +@overload +def rot90( + m: ArrayLike, + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[Any]: ... + +@overload +def flip(m: _SCT, axis: None = ...) -> _SCT: ... +@overload +def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +@overload +def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +@overload +def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ... + +def iterable(y: object) -> TypeGuard[Iterable[Any]]: ... + +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = ..., + weights: None | _ArrayLikeFloat_co= ..., + returned: L[False] = ..., + keepdims: L[False] = ..., +) -> floating[Any]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = ..., + weights: None | _ArrayLikeComplex_co = ..., + returned: L[False] = ..., + keepdims: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def average( + a: _ArrayLikeObject_co, + axis: None = ..., + weights: None | Any = ..., + returned: L[False] = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = ..., + weights: None | _ArrayLikeFloat_co= ..., + returned: L[True] = ..., + keepdims: L[False] = ..., +) -> _2Tuple[floating[Any]]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = ..., + weights: None | _ArrayLikeComplex_co = ..., + returned: L[True] = ..., + keepdims: L[False] = ..., +) -> _2Tuple[complexfloating[Any, Any]]: ... +@overload +def average( + a: _ArrayLikeObject_co, + axis: None = ..., + weights: None | Any = ..., + returned: L[True] = ..., + keepdims: L[False] = ..., +) -> _2Tuple[Any]: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + weights: None | Any = ..., + returned: L[False] = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + weights: None | Any = ..., + returned: L[True] = ..., + keepdims: bool = ..., +) -> _2Tuple[Any]: ... + +@overload +def asarray_chkfinite( + a: _ArrayLike[_SCT], + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray_chkfinite( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[Any]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: _DTypeLike[_SCT], + order: _OrderKACF = ..., +) -> NDArray[_SCT]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., +) -> NDArray[Any]: ... + +# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate` +# xref python/mypy#8645 +@overload +def piecewise( + x: _ArrayLike[_SCT], + condlist: ArrayLike, + funclist: Sequence[Any | Callable[..., Any]], + *args: Any, + **kw: Any, +) -> NDArray[_SCT]: ... +@overload +def piecewise( + x: ArrayLike, + condlist: ArrayLike, + funclist: Sequence[Any | Callable[..., Any]], + *args: Any, + **kw: Any, +) -> NDArray[Any]: ... + +def select( + condlist: Sequence[ArrayLike], + choicelist: Sequence[ArrayLike], + default: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def copy( + a: _ArrayType, + order: _OrderKACF, + subok: L[True], +) -> _ArrayType: ... +@overload +def copy( + a: _ArrayType, + order: _OrderKACF = ..., + *, + subok: L[True], +) -> _ArrayType: ... +@overload +def copy( + a: _ArrayLike[_SCT], + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[_SCT]: ... +@overload +def copy( + a: ArrayLike, + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[Any]: ... + +def gradient( + f: ArrayLike, + *varargs: ArrayLike, + axis: None | _ShapeLike = ..., + edge_order: L[1, 2] = ..., +) -> Any: ... + +@overload +def diff( + a: _T, + n: L[0], + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> _T: ... +@overload +def diff( + a: ArrayLike, + n: int = ..., + axis: SupportsIndex = ..., + prepend: ArrayLike = ..., + append: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: None | _FloatLike_co = ..., + right: None | _FloatLike_co = ..., + period: None | _FloatLike_co = ..., +) -> NDArray[float64]: ... +@overload +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeComplex_co, + left: None | _ComplexLike_co = ..., + right: None | _ComplexLike_co = ..., + period: None | _FloatLike_co = ..., +) -> NDArray[complex128]: ... + +@overload +def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ... +@overload +def angle(z: object_, deg: bool = ...) -> Any: ... +@overload +def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ... +@overload +def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... + +@overload +def unwrap( + p: _ArrayLikeFloat_co, + discont: None | float = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[floating[Any]]: ... +@overload +def unwrap( + p: _ArrayLikeObject_co, + discont: None | float = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[object_]: ... + +def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ... + +def trim_zeros( + filt: _TrimZerosSequence[_T], + trim: L["f", "b", "fb", "bf"] = ..., +) -> _T: ... + +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... + +def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... + +def disp( + mesg: object, + device: None | _SupportsWriteFlush = ..., + linefeed: bool = ..., +) -> None: ... + +@overload +def cov( + m: _ArrayLikeFloat_co, + y: None | _ArrayLikeFloat_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: _DTypeLike[_SCT], +) -> NDArray[_SCT]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: None | SupportsIndex | SupportsInt = ..., + fweights: None | ArrayLike = ..., + aweights: None | ArrayLike = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +# NOTE `bias` and `ddof` have been deprecated +@overload +def corrcoef( + m: _ArrayLikeFloat_co, + y: None | _ArrayLikeFloat_co = ..., + rowvar: bool = ..., + *, + dtype: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: _DTypeLike[_SCT], +) -> NDArray[_SCT]: ... +@overload +def corrcoef( + m: _ArrayLikeComplex_co, + y: None | _ArrayLikeComplex_co = ..., + rowvar: bool = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ... + +def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... + +def kaiser( + M: _FloatLike_co, + beta: _FloatLike_co, +) -> NDArray[floating[Any]]: ... + +@overload +def sinc(x: _FloatLike_co) -> floating[Any]: ... +@overload +def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +# NOTE: Deprecated +# def msort(a: ArrayLike) -> NDArray[Any]: ... + +@overload +def median( + a: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> floating[Any]: ... +@overload +def median( + a: _ArrayLikeComplex_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def median( + a: _ArrayLikeTD64_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> timedelta64: ... +@overload +def median( + a: _ArrayLikeObject_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayType: ... + +_MethodKind = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] + +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> floating[Any]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> complexfloating[Any, Any]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> timedelta64: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> datetime64: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> NDArray[floating[Any]]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> NDArray[timedelta64]: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> NDArray[datetime64]: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., +) -> NDArray[object_]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + out: _ArrayType = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., +) -> _ArrayType: ... + +# NOTE: Not an alias, but they do have identical signatures +# (that we can reuse) +quantile = percentile + +# TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise +def trapz( + y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> Any: ... + +def meshgrid( + *xi: ArrayLike, + copy: bool = ..., + sparse: bool = ..., + indexing: L["xy", "ij"] = ..., +) -> list[NDArray[Any]]: ... + +@overload +def delete( + arr: _ArrayLike[_SCT], + obj: slice | _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def delete( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +@overload +def insert( + arr: _ArrayLike[_SCT], + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[_SCT]: ... +@overload +def insert( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +def append( + arr: ArrayLike, + values: ArrayLike, + axis: None | SupportsIndex = ..., +) -> NDArray[Any]: ... + +@overload +def digitize( + x: _FloatLike_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> intp: ... +@overload +def digitize( + x: _ArrayLikeFloat_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> NDArray[intp]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/histograms.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/histograms.py new file mode 100644 index 00000000..6ac65b72 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/histograms.py @@ -0,0 +1,1072 @@ +""" +Histogram-related functions +""" +import contextlib +import functools +import operator +import warnings + +import numpy as np +from numpy.core import overrides + +__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + +# range is a keyword argument to many functions, so save the builtin so they can +# use it. +_range = range + + +def _ptp(x): + """Peak-to-peak value of x. + + This implementation avoids the problem of signed integer arrays having a + peak-to-peak value that cannot be represented with the array's data type. + This function returns an unsigned value for signed integer arrays. + """ + return _unsigned_subtract(x.max(), x.min()) + + +def _hist_bin_sqrt(x, range): + """ + Square root histogram bin estimator. + + Bin width is inversely proportional to the data size. Used by many + programs for its simplicity. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / np.sqrt(x.size) + + +def _hist_bin_sturges(x, range): + """ + Sturges histogram bin estimator. + + A very simplistic estimator based on the assumption of normality of + the data. This estimator has poor performance for non-normal data, + which becomes especially obvious for large data sets. The estimate + depends only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (np.log2(x.size) + 1.0) + + +def _hist_bin_rice(x, range): + """ + Rice histogram bin estimator. + + Another simple estimator with no normality assumption. It has better + performance for large data than Sturges, but tends to overestimate + the number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). The estimate depends + only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) + + +def _hist_bin_scott(x, range): + """ + Scott histogram bin estimator. + + The binwidth is proportional to the standard deviation of the data + and inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) + + +def _hist_bin_stone(x, range): + """ + Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). + + The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. + The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. + https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule + + This paper by Stone appears to be the origination of this rule. + http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : (float, float) + The lower and upper range of the bins. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + + n = x.size + ptp_x = _ptp(x) + if n <= 1 or ptp_x == 0: + return 0 + + def jhat(nbins): + hh = ptp_x / nbins + p_k = np.histogram(x, bins=nbins, range=range)[0] / n + return (2 - (n + 1) * p_k.dot(p_k)) / hh + + nbins_upper_bound = max(100, int(np.sqrt(n))) + nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) + if nbins == nbins_upper_bound: + warnings.warn("The number of bins estimated may be suboptimal.", + RuntimeWarning, stacklevel=3) + return ptp_x / nbins + + +def _hist_bin_doane(x, range): + """ + Doane's histogram bin estimator. + + Improved version of Sturges' formula which works better for + non-normal data. See + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0.0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return _ptp(x) / (1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 0.0 + + +def _hist_bin_fd(x, range): + """ + The Freedman-Diaconis histogram bin estimator. + + The Freedman-Diaconis rule uses interquartile range (IQR) to + estimate binwidth. It is considered a variation of the Scott rule + with more robustness as the IQR is less affected by outliers than + the standard deviation. However, the IQR depends on fewer points + than the standard deviation, so it is less accurate, especially for + long tailed distributions. + + If the IQR is 0, this function returns 0 for the bin width. + Binwidth is inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + iqr = np.subtract(*np.percentile(x, [75, 25])) + return 2.0 * iqr * x.size ** (-1.0 / 3.0) + + +def _hist_bin_auto(x, range): + """ + Histogram bin estimator that uses the minimum width of the + Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero. + If the bin width from the FD estimator is 0, the Sturges estimator is used. + + The FD estimator is usually the most robust method, but its width + estimate tends to be too large for small `x` and bad for data with limited + variance. The Sturges estimator is quite good for small (<1000) datasets + and is the default in the R language. This method gives good off-the-shelf + behaviour. + + .. versionchanged:: 1.15.0 + If there is limited variance the IQR can be 0, which results in the + FD bin width being 0 too. This is not a valid bin width, so + ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. + If the IQR is 0, it's unlikely any variance-based estimators will be of + use, so we revert to the Sturges estimator, which only uses the size of the + dataset in its calculation. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + + See Also + -------- + _hist_bin_fd, _hist_bin_sturges + """ + fd_bw = _hist_bin_fd(x, range) + sturges_bw = _hist_bin_sturges(x, range) + del range # unused + if fd_bw: + return min(fd_bw, sturges_bw) + else: + # limited variance, so we return a len dependent bw estimator + return sturges_bw + +# Private dict initialized at module load time +_hist_bin_selectors = {'stone': _hist_bin_stone, + 'auto': _hist_bin_auto, + 'doane': _hist_bin_doane, + 'fd': _hist_bin_fd, + 'rice': _hist_bin_rice, + 'scott': _hist_bin_scott, + 'sqrt': _hist_bin_sqrt, + 'sturges': _hist_bin_sturges} + + +def _ravel_and_check_weights(a, weights): + """ Check a and weights have matching shapes, and ravel both """ + a = np.asarray(a) + + # Ensure that the array is a "subtractable" dtype + if a.dtype == np.bool_: + warnings.warn("Converting input from {} to {} for compatibility." + .format(a.dtype, np.uint8), + RuntimeWarning, stacklevel=3) + a = a.astype(np.uint8) + + if weights is not None: + weights = np.asarray(weights) + if weights.shape != a.shape: + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + return a, weights + + +def _get_outer_edges(a, range): + """ + Determine the outer bin edges to use, from either the data or the range + argument + """ + if range is not None: + first_edge, last_edge = range + if first_edge > last_edge: + raise ValueError( + 'max must be larger than min in range parameter.') + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) + elif a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + first_edge, last_edge = 0, 1 + else: + first_edge, last_edge = a.min(), a.max() + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) + + # expand empty range to avoid divide by zero + if first_edge == last_edge: + first_edge = first_edge - 0.5 + last_edge = last_edge + 0.5 + + return first_edge, last_edge + + +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned + return np.subtract(a, b, casting='unsafe', dtype=dt) + + +def _get_bin_edges(a, bins, range, weights): + """ + Computes the bins used internally by `histogram`. + + Parameters + ========== + a : ndarray + Ravelled data array + bins, range + Forwarded arguments from `histogram`. + weights : ndarray, optional + Ravelled weights array, or None + + Returns + ======= + bin_edges : ndarray + Array of bin edges + uniform_bins : (Number, Number, int): + The upper bound, lowerbound, and number of bins, used in the optimized + implementation of `histogram` that works on uniform bins. + """ + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + + if isinstance(bins, str): + bin_name = bins + # if `bins` is a string for an automatic method, + # this will replace it with the number of bins calculated + if bin_name not in _hist_bin_selectors: + raise ValueError( + "{!r} is not a valid estimator for `bins`".format(bin_name)) + if weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + first_edge, last_edge = _get_outer_edges(a, range) + + # truncate the range if needed + if range is not None: + keep = (a >= first_edge) + keep &= (a <= last_edge) + if not np.logical_and.reduce(keep): + a = a[keep] + + if a.size == 0: + n_equal_bins = 1 + else: + # Do not call selectors on empty arrays + width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) + if width: + n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) + else: + # Width can be zero for some estimators, e.g. FD when + # the IQR of the data is zero. + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError as e: + raise TypeError( + '`bins` must be an integer, a string, or an array') from e + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + first_edge, last_edge = _get_outer_edges(a, range) + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + if n_equal_bins is not None: + # gh-10322 means that type resolution rules are dependent on array + # shapes. To avoid this causing problems, we pick a type now and stick + # with it throughout. + bin_type = np.result_type(first_edge, last_edge, a) + if np.issubdtype(bin_type, np.integer): + bin_type = np.result_type(bin_type, float) + + # bin edges must be computed + bin_edges = np.linspace( + first_edge, last_edge, n_equal_bins + 1, + endpoint=True, dtype=bin_type) + return bin_edges, (first_edge, last_edge, n_equal_bins) + else: + return bin_edges, None + + +def _search_sorted_inclusive(a, v): + """ + Like `searchsorted`, but where the last item in `v` is placed on the right. + + In the context of a histogram, this makes the last bin edge inclusive + """ + return np.concatenate(( + a.searchsorted(v[:-1], 'left'), + a.searchsorted(v[-1:], 'right') + )) + + +def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_bin_edges_dispatcher) +def histogram_bin_edges(a, bins=10, range=None, weights=None): + r""" + Function to calculate only the edges of the bins used by the `histogram` + function. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. + + If `bins` is a string from the list below, `histogram_bin_edges` will use + the method chosen to calculate the optimal bin width and + consequently the number of bins (see `Notes` for more detail on + the estimators) from the data that falls within the requested + range. While the bin width will be optimal for the actual data + in the range, the number of bins will be computed to fill the + entire range, including the empty portions. For visualisation, + using the 'auto' option is suggested. Weighted data is not + supported for automated bin size selection. + + 'auto' + Maximum of the 'sturges' and 'fd' estimators. Provides good + all around performance. + + 'fd' (Freedman Diaconis Estimator) + Robust (resilient to outliers) estimator that takes into + account data variability and data size. + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. + + 'scott' + Less robust estimator that takes into account data variability + and data size. + + 'stone' + Estimator based on leave-one-out cross-validation estimate of + the integrated squared error. Can be regarded as a generalization + of Scott's rule. + + 'rice' + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. + + 'sturges' + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). This is currently not used by any of the bin estimators, + but may be in the future. + + Returns + ------- + bin_edges : array of dtype float + The edges to pass into `histogram` + + See Also + -------- + histogram + + Notes + ----- + The methods to estimate the optimal number of bins are well founded + in literature, and are inspired by the choices R provides for + histogram visualisation. Note that having the number of bins + proportional to :math:`n^{1/3}` is asymptotically optimal, which is + why it appears in most estimators. These are simply plug-in methods + that give good starting points for number of bins. In the equations + below, :math:`h` is the binwidth and :math:`n_h` is the number of + bins. All estimators that compute bin counts are recast to bin width + using the `ptp` of the data. The final bin count is obtained from + ``np.round(np.ceil(range / h))``. The final bin width is often less + than what is returned by the estimators below. + + 'auto' (maximum of the 'sturges' and 'fd' estimators) + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point is usually :math:`a.size \approx 1000`. + + 'fd' (Freedman Diaconis Estimator) + .. math:: h = 2 \frac{IQR}{n^{1/3}} + + The binwidth is proportional to the interquartile range (IQR) + and inversely proportional to cube root of a.size. Can be too + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. + + 'scott' + .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}} + + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. + + 'rice' + .. math:: n_h = 2n^{1/3} + + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. + + 'sturges' + .. math:: n_h = \log _{2}(n) + 1 + + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'doane' + .. math:: n_h = 1 + \log_{2}(n) + + \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right) + + g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right] + + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. This estimator attempts to + account for the skew of the data. + + 'sqrt' + .. math:: n_h = \sqrt n + + The simplest and fastest estimator. Only takes into account the + data size. + + Examples + -------- + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) + array([0. , 0.25, 0.5 , 0.75, 1. ]) + >>> np.histogram_bin_edges(arr, bins=2) + array([0. , 2.5, 5. ]) + + For consistency with histogram, an array of pre-computed bins is + passed through unmodified: + + >>> np.histogram_bin_edges(arr, [1, 2]) + array([1, 2]) + + This function allows one set of bins to be computed, and reused across + multiple histograms: + + >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') + >>> shared_bins + array([0., 1., 2., 3., 4., 5.]) + + >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) + >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) + >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) + + >>> hist_0; hist_1 + array([1, 1, 0, 1, 0]) + array([2, 0, 1, 1, 2]) + + Which gives more easily comparable results than using separate bins for + each histogram: + + >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') + >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') + >>> hist_0; hist_1 + array([1, 1, 1]) + array([2, 1, 1, 2]) + >>> bins_0; bins_1 + array([0., 1., 2., 3.]) + array([0. , 1.25, 2.5 , 3.75, 5. ]) + + """ + a, weights = _ravel_and_check_weights(a, weights) + bin_edges, _ = _get_bin_edges(a, bins, range, weights) + return bin_edges + + +def _histogram_dispatcher( + a, bins=None, range=None, density=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_dispatcher) +def histogram(a, bins=10, range=None, density=None, weights=None): + r""" + Compute the histogram of a dataset. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines a monotonically increasing array of bin edges, + including the rightmost edge, allowing for non-uniform bin widths. + + .. versionadded:: 1.11.0 + + If `bins` is a string, it defines the method used to calculate the + optimal bin width, as defined by `histogram_bin_edges`. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. + density : bool, optional + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + + Returns + ------- + hist : array + The values of the histogram. See `density` and `weights` for a + description of the possible semantics. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize, histogram_bin_edges + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. + + + Examples + -------- + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist * np.diff(bin_edges)) + 1.0 + + .. versionadded:: 1.11.0 + + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points: + + >>> import matplotlib.pyplot as plt + >>> rng = np.random.RandomState(10) # deterministic random data + >>> a = np.hstack((rng.normal(size=1000), + ... rng.normal(loc=5, scale=2, size=1000))) + >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram + >>> plt.title("Histogram with 'auto' bins") + Text(0.5, 1.0, "Histogram with 'auto' bins") + >>> plt.show() + + """ + a, weights = _ravel_and_check_weights(a, weights) + + bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = np.dtype(np.intp) + else: + ntype = weights.dtype + + # We set a block size, as this allows us to iterate over chunks when + # computing histograms, to minimize memory usage. + BLOCK = 65536 + + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if uniform_bins is not None and simple_weights: + # Fast algorithm for equal bins + # We now convert values of a to bin indices, under the assumption of + # equal bin widths (which is valid here). + first_edge, last_edge, n_equal_bins = uniform_bins + + # Initialize empty histogram + n = np.zeros(n_equal_bins, ntype) + + # Pre-compute histogram scaling factor + norm_numerator = n_equal_bins + norm_denom = _unsigned_subtract(last_edge, first_edge) + + # We iterate over blocks here for two reasons: the first is that for + # large arrays, it is actually faster (for example for a 10^8 array it + # is 2x as fast) and it results in a memory footprint 3x lower in the + # limit of large arrays. + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + if weights is None: + tmp_w = None + else: + tmp_w = weights[i:i + BLOCK] + + # Only include values in the right range + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) + if not np.logical_and.reduce(keep): + tmp_a = tmp_a[keep] + if tmp_w is not None: + tmp_w = tmp_w[keep] + + # This cast ensures no type promotions occur below, which gh-10322 + # make unpredictable. Getting it wrong leads to precision errors + # like gh-8123. + tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) + + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one + f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom) + * norm_numerator) + indices = f_indices.astype(np.intp) + indices[indices == n_equal_bins] -= 1 + + # The index computation is not guaranteed to give exactly + # consistent results within ~1 ULP of the bin edges. + decrement = tmp_a < bin_edges[indices] + indices[decrement] -= 1 + # The last bin includes the right edge. The other bins do not. + increment = ((tmp_a >= bin_edges[indices + 1]) + & (indices != n_equal_bins - 1)) + indices[increment] += 1 + + # We now compute the histogram using bincount + if ntype.kind == 'c': + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=n_equal_bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=n_equal_bins) + else: + n += np.bincount(indices, weights=tmp_w, + minlength=n_equal_bins).astype(ntype) + else: + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) + if weights is None: + for i in _range(0, len(a), BLOCK): + sa = np.sort(a[i:i+BLOCK]) + cum_n += _search_sorted_inclusive(sa, bin_edges) + else: + zero = np.zeros(1, dtype=ntype) + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i+BLOCK] + tmp_w = weights[i:i+BLOCK] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate((zero, sw.cumsum())) + bin_index = _search_sorted_inclusive(sa, bin_edges) + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + if density: + db = np.array(np.diff(bin_edges), float) + return n/db/n.sum(), bin_edges + + return n, bin_edges + + +def _histogramdd_dispatcher(sample, bins=None, range=None, density=None, + weights=None): + if hasattr(sample, 'shape'): # same condition as used in histogramdd + yield sample + else: + yield from sample + with contextlib.suppress(TypeError): + yield from bins + yield weights + + +@array_function_dispatch(_histogramdd_dispatcher) +def histogramdd(sample, bins=10, range=None, density=None, weights=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : (N, D) array, or (N, D) array_like + The data to be histogrammed. + + Note the unusual interpretation of sample when an array_like: + + * When an array, each row is a coordinate in a D-dimensional space - + such as ``histogramdd(np.array([p1, p2, p3]))``. + * When an array_like, each element is the list of values for single + coordinate - such as ``histogramdd((X, Y, Z))``. + + The first form should be preferred. + + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of length D, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_volume``. + weights : (N,) array_like, optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if density is True. If density is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See density and weights + for the different possible semantics. + edges : list + A list of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> r = np.random.randn(100,3) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + N, D = sample.shape + + nbin = np.empty(D, np.intp) + edges = D*[None] + dedges = D*[None] + if weights is not None: + weights = np.asarray(weights) + + try: + M = len(bins) + if M != D: + raise ValueError( + 'The dimension of bins must be equal to the dimension of the ' + 'sample x.') + except TypeError: + # bins is an integer + bins = D*[bins] + + # normalize the range argument + if range is None: + range = (None,) * D + elif len(range) != D: + raise ValueError('range argument must have one entry per dimension') + + # Create edge arrays + for i in _range(D): + if np.ndim(bins[i]) == 0: + if bins[i] < 1: + raise ValueError( + '`bins[{}]` must be positive, when an integer'.format(i)) + smin, smax = _get_outer_edges(sample[:,i], range[i]) + try: + n = operator.index(bins[i]) + + except TypeError as e: + raise TypeError( + "`bins[{}]` must be an integer, when a scalar".format(i) + ) from e + + edges[i] = np.linspace(smin, smax, n + 1) + elif np.ndim(bins[i]) == 1: + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): + raise ValueError( + '`bins[{}]` must be monotonically increasing, when an array' + .format(i)) + else: + raise ValueError( + '`bins[{}]` must be a scalar or 1d array'.format(i)) + + nbin[i] = len(edges[i]) + 1 # includes an outlier on each end + dedges[i] = np.diff(edges[i]) + + # Compute the bin number each sample falls into. + Ncount = tuple( + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') + for i in _range(D) + ) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in _range(D): + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 + + # Compute the sample indices in the flattened histogram matrix. + # This raises an error if the array is too large. + xy = np.ravel_multi_index(Ncount, nbin) + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + hist = np.bincount(xy, weights, minlength=nbin.prod()) + + # Shape into a proper matrix + hist = hist.reshape(nbin) + + # This preserves the (bad) behavior observed in gh-7845, for now. + hist = hist.astype(float, casting='safe') + + # Remove outliers (indices 0 and -1 for each dimension). + core = D*(slice(1, -1),) + hist = hist[core] + + if density: + # calculate the probability density function + s = hist.sum() + for i in _range(D): + shape = np.ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/histograms.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/histograms.pyi new file mode 100644 index 00000000..ce02718a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/histograms.pyi @@ -0,0 +1,47 @@ +from collections.abc import Sequence +from typing import ( + Literal as L, + Any, + SupportsIndex, +) + +from numpy._typing import ( + NDArray, + ArrayLike, +) + +_BinKind = L[ + "stone", + "auto", + "doane", + "fd", + "rice", + "scott", + "sqrt", + "sturges", +] + +__all__: list[str] + +def histogram_bin_edges( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: None | tuple[float, float] = ..., + weights: None | ArrayLike = ..., +) -> NDArray[Any]: ... + +def histogram( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: None | tuple[float, float] = ..., + density: bool = ..., + weights: None | ArrayLike = ..., +) -> tuple[NDArray[Any], NDArray[Any]]: ... + +def histogramdd( + sample: ArrayLike, + bins: SupportsIndex | ArrayLike = ..., + range: Sequence[tuple[float, float]] = ..., + density: None | bool = ..., + weights: None | ArrayLike = ..., +) -> tuple[NDArray[Any], list[NDArray[Any]]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/index_tricks.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/index_tricks.py new file mode 100644 index 00000000..6913d2b9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/index_tricks.py @@ -0,0 +1,1046 @@ +import functools +import sys +import math +import warnings + +import numpy as np +from .._utils import set_module +import numpy.core.numeric as _nx +from numpy.core.numeric import ScalarType, array +from numpy.core.numerictypes import issubdtype + +import numpy.matrixlib as matrixlib +from .function_base import diff +from numpy.core.multiarray import ravel_multi_index, unravel_index +from numpy.core import overrides, linspace +from numpy.lib.stride_tricks import as_strided + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' +] + + +def _ix__dispatcher(*args): + return args + + +@array_function_dispatch(_ix__dispatcher) +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + Each sequence should be of integer or boolean type. + Boolean sequences will be interpreted as boolean masks for the + corresponding dimension (equivalent to passing in + ``np.nonzero(boolean_sequence)``). + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0, 1], [2, 4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + >>> ixgrid = np.ix_([True, True], [2, 4]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + for k, new in enumerate(args): + if not isinstance(new, _nx.ndarray): + new = np.asarray(new) + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(_nx.intp) + if new.ndim != 1: + raise ValueError("Cross index must be 1 dimensional") + if issubdtype(new.dtype, _nx.bool_): + new, = new.nonzero() + new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1)) + out.append(new) + return tuple(out) + + +class nd_grid: + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`, approximately defined as:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + """ + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + # Mimic the behavior of `np.arange` and use a data type + # which is at least as large as `np.int_` + num_list = [0] + for k in range(len(key)): + step = key[k].step + start = key[k].start + stop = key[k].stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = abs(step) + size.append(int(step)) + else: + size.append( + int(math.ceil((stop - start) / (step*1.0)))) + num_list += [start, stop, step] + typ = _nx.result_type(*num_list) + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,)*len(size))] + else: + nn = _nx.indices(size, typ) + for k, kk in enumerate(key): + step = kk.step + start = kk.start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = int(abs(step)) + if step != 1: + step = (kk.stop - start) / float(step - 1) + nn[k] = (nn[k]*step+start) + if self.sparse: + slobj = [_nx.newaxis]*len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][tuple(slobj)] + slobj[k] = _nx.newaxis + return nn + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, (_nx.complexfloating, complex)): + # Prevent the (potential) creation of integer arrays + step_float = abs(step) + step = length = int(step_float) + if step != 1: + step = (key.stop-start)/float(step-1) + typ = _nx.result_type(start, stop, step_float) + return _nx.arange(0, length, 1, dtype=typ)*step + start + else: + return _nx.arange(start, stop, step) + + +class MGridClass(nd_grid): + """ + An instance which returns a dense multi-dimensional "meshgrid". + + An instance which returns a dense (or fleshed out) mesh-grid + when indexed, so that each returned argument has the same shape. + The dimensions and number of the output arrays are equal to the + number of indexing dimensions. If the step length is not a complex + number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid `ndarrays` all of the same dimensions + + See Also + -------- + ogrid : like `mgrid` but returns open (not fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> np.mgrid[0:5, 0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + """ + + def __init__(self): + super().__init__(sparse=False) + + +mgrid = MGridClass() + + +class OGridClass(nd_grid): + """ + An instance which returns an open multi-dimensional "meshgrid". + + An instance which returns an open (i.e. not fleshed out) mesh-grid + when indexed, so that only one dimension of each returned array is + greater than 1. The dimension and number of the output arrays are + equal to the number of indexing dimensions. If the step length is + not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid + `ndarrays` with only one dimension not equal to 1 + + See Also + -------- + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + + """ + + def __init__(self): + super().__init__(sparse=True) + + +ogrid = OGridClass() + + +class AxisConcatenator: + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ + # allow ma.mr_ to override this + concatenate = staticmethod(_nx.concatenate) + makemat = staticmethod(matrixlib.matrix) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.matrix = matrix + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + # handle matrix builder syntax + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) + return mymat + + if not isinstance(key, tuple): + key = (key,) + + # copy attributes, since they can be overridden in the first argument + trans1d = self.trans1d + ndmin = self.ndmin + matrix = self.matrix + axis = self.axis + + objs = [] + # dtypes or scalars for weak scalar handling in result_type + result_type_objs = [] + + for k, item in enumerate(key): + scalar = False + if isinstance(item, slice): + step = item.step + start = item.start + stop = item.stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + size = int(abs(step)) + newobj = linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=False, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(item, str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + if item in ('r', 'c'): + matrix = True + col = (item == 'c') + continue + if ',' in item: + vec = item.split(',') + try: + axis, ndmin = [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except Exception as e: + raise ValueError( + "unknown special directive {!r}".format(item) + ) from e + try: + axis = int(item) + continue + except (ValueError, TypeError) as e: + raise ValueError("unknown special directive") from e + elif type(item) in ScalarType: + scalar = True + newobj = item + else: + item_ndim = np.ndim(item) + newobj = array(item, copy=False, subok=True, ndmin=ndmin) + if trans1d != -1 and item_ndim < ndmin: + k2 = ndmin - item_ndim + k1 = trans1d + if k1 < 0: + k1 += k2 + 1 + defaxes = list(range(ndmin)) + axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] + newobj = newobj.transpose(axes) + + objs.append(newobj) + if scalar: + result_type_objs.append(item) + else: + result_type_objs.append(newobj.dtype) + + # Ensure that scalars won't up-cast unless warranted, for 0, drops + # through to error in concatenate. + if len(result_type_objs) != 0: + final_dtype = _nx.result_type(*result_type_objs) + # concatenate could do cast, but that can be overriden: + objs = [array(obj, copy=False, subok=True, + ndmin=ndmin, dtype=final_dtype) for obj in objs] + + res = self.concatenate(tuple(objs), axis=axis) + + if matrix: + oldndim = res.ndim + res = self.makemat(res) + if oldndim == 1 and col: + res = res.T + return res + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatentor(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, ..., 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, 0) + + +r_ = RClass() + + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + See Also + -------- + column_stack : Stack 1-D arrays as columns into a 2-D array. + r_ : For more detailed documentation. + + Examples + -------- + >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, ..., 4, 5, 6]]) + + """ + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + + +c_ = CClass() + + +@set_module('numpy') +class ndenumerate: + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + arr : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print(index, x) + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = np.asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + +@set_module('numpy') +class ndindex: + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + shape : ints, or a single tuple of ints + The size of each dimension of the array can be passed as + individual parameters or as the elements of a tuple. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + Dimensions as individual arguments + + >>> for index in np.ndindex(3, 2, 1): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + Same dimensions - but in a tuple ``(3, 2, 1)`` + + >>> for index in np.ndindex((3, 2, 1)): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + x = as_strided(_nx.zeros(1), shape=shape, + strides=_nx.zeros_like(shape)) + self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], + order='C') + + def __iter__(self): + return self + + def ndincr(self): + """ + Increment the multi-dimensional index by one. + + This method is for backward compatibility only: do not use. + + .. deprecated:: 1.20.0 + This method has been advised against since numpy 1.8.0, but only + started emitting DeprecationWarning as of this version. + """ + # NumPy 1.20.0, 2020-09-08 + warnings.warn( + "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", + DeprecationWarning, stacklevel=2) + next(self) + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + next(self._it) + return self._it.multi_index + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression: + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances `index_exp` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + index_exp : Predefined instance that always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + + Notes + ----- + You can do all this with `slice()` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + + +def _fill_diagonal_dispatcher(a, val, wrap=None): + return (a,) + + +@array_function_dispatch(_fill_diagonal_dispatcher) +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim >= 2``, the diagonal is the list of + locations with indices ``a[i, ..., i]`` all identical. This function + modifies the input array in-place, it does not return a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled, it gets modified in-place. + + val : scalar or array_like + Value(s) to write on the diagonal. If `val` is scalar, the value is + written along the diagonal. If array-like, the flattened `val` is + written along the diagonal, repeating if necessary to fill all + diagonal entries. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affects only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + The wrap option affects only tall matrices: + + >>> # tall matrices no wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + >>> # tall matrices wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + >>> # wide matrices + >>> a = np.zeros((3, 5), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + The anti-diagonal can be filled by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.zeros((3, 3), int); + >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip + >>> a + array([[0, 0, 1], + [0, 2, 0], + [3, 0, 0]]) + >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip + >>> a + array([[0, 0, 3], + [0, 2, 0], + [1, 0, 0]]) + + Note that the order in which the diagonal is filled varies depending + on the flip function. + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + # This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (np.cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +@set_module('numpy') +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See Also + -------- + diag_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = np.arange(n) + return (idx,) * ndim + + +def _diag_indices_from(arr): + return (arr,) + + +@array_function_dispatch(_diag_indices_from) +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Get the indices of the diagonal elements. + + >>> di = np.diag_indices_from(a) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + >>> a[di] + array([ 0, 5, 10, 15]) + + This is simply syntactic sugar for diag_indices. + + >>> np.diag_indices(a.shape[0]) + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi new file mode 100644 index 00000000..29a6b9e2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi @@ -0,0 +1,162 @@ +from collections.abc import Sequence +from typing import ( + Any, + TypeVar, + Generic, + overload, + Literal, + SupportsIndex, +) + +from numpy import ( + # Circumvent a naming conflict with `AxisConcatenator.matrix` + matrix as _Matrix, + ndenumerate as ndenumerate, + ndindex as ndindex, + ndarray, + dtype, + integer, + str_, + bytes_, + bool_, + int_, + float_, + complex_, + intp, + _OrderCF, + _ModeKind, +) +from numpy._typing import ( + # Arrays + ArrayLike, + _NestedSequence, + _FiniteNestedSequence, + NDArray, + _ArrayLikeInt, + + # DTypes + DTypeLike, + _SupportsDType, + + # Shapes + _ShapeLike, +) + +from numpy.core.multiarray import ( + unravel_index as unravel_index, + ravel_multi_index as ravel_multi_index, +) + +_T = TypeVar("_T") +_DType = TypeVar("_DType", bound=dtype[Any]) +_BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) +_TupType = TypeVar("_TupType", bound=tuple[Any, ...]) +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +__all__: list[str] + +@overload +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ... + +class nd_grid(Generic[_BoolType]): + sparse: _BoolType + def __init__(self, sparse: _BoolType = ...) -> None: ... + @overload + def __getitem__( + self: nd_grid[Literal[False]], + key: slice | Sequence[slice], + ) -> NDArray[Any]: ... + @overload + def __getitem__( + self: nd_grid[Literal[True]], + key: slice | Sequence[slice], + ) -> list[NDArray[Any]]: ... + +class MGridClass(nd_grid[Literal[False]]): + def __init__(self) -> None: ... + +mgrid: MGridClass + +class OGridClass(nd_grid[Literal[True]]): + def __init__(self) -> None: ... + +ogrid: OGridClass + +class AxisConcatenator: + axis: int + matrix: bool + ndmin: int + trans1d: int + def __init__( + self, + axis: int = ..., + matrix: bool = ..., + ndmin: int = ..., + trans1d: int = ..., + ) -> None: ... + @staticmethod + @overload + def concatenate( # type: ignore[misc] + *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... + ) -> NDArray[Any]: ... + @staticmethod + @overload + def concatenate( + *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ... + ) -> _ArrayType: ... + @staticmethod + def makemat( + data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ... + ) -> _Matrix[Any, Any]: ... + + # TODO: Sort out this `__getitem__` method + def __getitem__(self, key: Any) -> Any: ... + +class RClass(AxisConcatenator): + axis: Literal[0] + matrix: Literal[False] + ndmin: Literal[1] + trans1d: Literal[-1] + def __init__(self) -> None: ... + +r_: RClass + +class CClass(AxisConcatenator): + axis: Literal[-1] + matrix: Literal[False] + ndmin: Literal[2] + trans1d: Literal[0] + def __init__(self) -> None: ... + +c_: CClass + +class IndexExpression(Generic[_BoolType]): + maketuple: _BoolType + def __init__(self, maketuple: _BoolType) -> None: ... + @overload + def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] + @overload + def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ... + @overload + def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... + +index_exp: IndexExpression[Literal[True]] +s_: IndexExpression[Literal[False]] + +def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ... +def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ... + +# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/mixins.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/mixins.py new file mode 100644 index 00000000..117cc785 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/mixins.py @@ -0,0 +1,177 @@ +"""Mixin classes for custom array types that don't inherit from ndarray.""" +from numpy.core import umath as um + + +__all__ = ['NDArrayOperatorsMixin'] + + +def _disables_array_ufunc(obj): + """True when __array_ufunc__ is set to None.""" + try: + return obj.__array_ufunc__ is None + except AttributeError: + return False + + +def _binary_method(ufunc, name): + """Implement a forward binary method with a ufunc, e.g., __add__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(self, other) + func.__name__ = '__{}__'.format(name) + return func + + +def _reflected_binary_method(ufunc, name): + """Implement a reflected binary method with a ufunc, e.g., __radd__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(other, self) + func.__name__ = '__r{}__'.format(name) + return func + + +def _inplace_binary_method(ufunc, name): + """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" + def func(self, other): + return ufunc(self, other, out=(self,)) + func.__name__ = '__i{}__'.format(name) + return func + + +def _numeric_methods(ufunc, name): + """Implement forward, reflected and inplace binary methods with a ufunc.""" + return (_binary_method(ufunc, name), + _reflected_binary_method(ufunc, name), + _inplace_binary_method(ufunc, name)) + + +def _unary_method(ufunc, name): + """Implement a unary special method with a ufunc.""" + def func(self): + return ufunc(self) + func.__name__ = '__{}__'.format(name) + return func + + +class NDArrayOperatorsMixin: + """Mixin defining all operator special methods using __array_ufunc__. + + This class implements the special methods for almost all of Python's + builtin operators defined in the `operator` module, including comparisons + (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by + deferring to the ``__array_ufunc__`` method, which subclasses must + implement. + + It is useful for writing classes that do not inherit from `numpy.ndarray`, + but that should support arithmetic and numpy universal functions like + arrays as described in `A Mechanism for Overriding Ufuncs + `_. + + As an trivial example, consider this implementation of an ``ArrayLike`` + class that simply wraps a NumPy array and ensures that the result of any + arithmetic operation is also an ``ArrayLike`` object:: + + class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.value) + + In interactions between ``ArrayLike`` objects and numbers or numpy arrays, + the result is always another ``ArrayLike``: + + >>> x = ArrayLike([1, 2, 3]) + >>> x - 1 + ArrayLike(array([0, 1, 2])) + >>> 1 - x + ArrayLike(array([ 0, -1, -2])) + >>> np.arange(3) - x + ArrayLike(array([-1, -1, -1])) + >>> x - np.arange(3) + ArrayLike(array([1, 1, 1])) + + Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations + with arbitrary, unrecognized types. This ensures that interactions with + ArrayLike preserve a well-defined casting hierarchy. + + .. versionadded:: 1.13 + """ + __slots__ = () + # Like np.ndarray, this mixin class implements "Option 1" from the ufunc + # overrides NEP. + + # comparisons don't have reflected and in-place versions + __lt__ = _binary_method(um.less, 'lt') + __le__ = _binary_method(um.less_equal, 'le') + __eq__ = _binary_method(um.equal, 'eq') + __ne__ = _binary_method(um.not_equal, 'ne') + __gt__ = _binary_method(um.greater, 'gt') + __ge__ = _binary_method(um.greater_equal, 'ge') + + # numeric methods + __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') + __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') + __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') + __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( + um.matmul, 'matmul') + # Python 3 does not use __div__, __rdiv__, or __idiv__ + __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( + um.true_divide, 'truediv') + __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( + um.floor_divide, 'floordiv') + __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') + __divmod__ = _binary_method(um.divmod, 'divmod') + __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') + # __idivmod__ does not exist + # TODO: handle the optional third argument for __pow__? + __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') + __lshift__, __rlshift__, __ilshift__ = _numeric_methods( + um.left_shift, 'lshift') + __rshift__, __rrshift__, __irshift__ = _numeric_methods( + um.right_shift, 'rshift') + __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') + __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') + __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') + + # unary methods + __neg__ = _unary_method(um.negative, 'neg') + __pos__ = _unary_method(um.positive, 'pos') + __abs__ = _unary_method(um.absolute, 'abs') + __invert__ = _unary_method(um.invert, 'invert') diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/mixins.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/mixins.pyi new file mode 100644 index 00000000..c5744213 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/mixins.pyi @@ -0,0 +1,74 @@ +from abc import ABCMeta, abstractmethod +from typing import Literal as L, Any + +from numpy import ufunc + +__all__: list[str] + +# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, +# even though it's reliant on subclasses implementing `__array_ufunc__` + +# NOTE: The accepted input- and output-types of the various dunders are +# completely dependent on how `__array_ufunc__` is implemented. +# As such, only little type safety can be provided here. + +class NDArrayOperatorsMixin(metaclass=ABCMeta): + @abstractmethod + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + def __lt__(self, other: Any) -> Any: ... + def __le__(self, other: Any) -> Any: ... + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... + def __gt__(self, other: Any) -> Any: ... + def __ge__(self, other: Any) -> Any: ... + def __add__(self, other: Any) -> Any: ... + def __radd__(self, other: Any) -> Any: ... + def __iadd__(self, other: Any) -> Any: ... + def __sub__(self, other: Any) -> Any: ... + def __rsub__(self, other: Any) -> Any: ... + def __isub__(self, other: Any) -> Any: ... + def __mul__(self, other: Any) -> Any: ... + def __rmul__(self, other: Any) -> Any: ... + def __imul__(self, other: Any) -> Any: ... + def __matmul__(self, other: Any) -> Any: ... + def __rmatmul__(self, other: Any) -> Any: ... + def __imatmul__(self, other: Any) -> Any: ... + def __truediv__(self, other: Any) -> Any: ... + def __rtruediv__(self, other: Any) -> Any: ... + def __itruediv__(self, other: Any) -> Any: ... + def __floordiv__(self, other: Any) -> Any: ... + def __rfloordiv__(self, other: Any) -> Any: ... + def __ifloordiv__(self, other: Any) -> Any: ... + def __mod__(self, other: Any) -> Any: ... + def __rmod__(self, other: Any) -> Any: ... + def __imod__(self, other: Any) -> Any: ... + def __divmod__(self, other: Any) -> Any: ... + def __rdivmod__(self, other: Any) -> Any: ... + def __pow__(self, other: Any) -> Any: ... + def __rpow__(self, other: Any) -> Any: ... + def __ipow__(self, other: Any) -> Any: ... + def __lshift__(self, other: Any) -> Any: ... + def __rlshift__(self, other: Any) -> Any: ... + def __ilshift__(self, other: Any) -> Any: ... + def __rshift__(self, other: Any) -> Any: ... + def __rrshift__(self, other: Any) -> Any: ... + def __irshift__(self, other: Any) -> Any: ... + def __and__(self, other: Any) -> Any: ... + def __rand__(self, other: Any) -> Any: ... + def __iand__(self, other: Any) -> Any: ... + def __xor__(self, other: Any) -> Any: ... + def __rxor__(self, other: Any) -> Any: ... + def __ixor__(self, other: Any) -> Any: ... + def __or__(self, other: Any) -> Any: ... + def __ror__(self, other: Any) -> Any: ... + def __ior__(self, other: Any) -> Any: ... + def __neg__(self) -> Any: ... + def __pos__(self) -> Any: ... + def __abs__(self) -> Any: ... + def __invert__(self) -> Any: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/nanfunctions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/nanfunctions.py new file mode 100644 index 00000000..b3b57086 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/nanfunctions.py @@ -0,0 +1,1887 @@ +""" +Functions that ignore NaN. + +Functions +--------- + +- `nanmin` -- minimum non-NaN value +- `nanmax` -- maximum non-NaN value +- `nanargmin` -- index of minimum non-NaN value +- `nanargmax` -- index of maximum non-NaN value +- `nansum` -- sum of non-NaN values +- `nanprod` -- product of non-NaN values +- `nancumsum` -- cumulative sum of non-NaN values +- `nancumprod` -- cumulative product of non-NaN values +- `nanmean` -- mean of non-NaN values +- `nanvar` -- variance of non-NaN values +- `nanstd` -- standard deviation of non-NaN values +- `nanmedian` -- median of non-NaN values +- `nanquantile` -- qth quantile of non-NaN values +- `nanpercentile` -- qth percentile of non-NaN values + +""" +import functools +import warnings +import numpy as np +from numpy.lib import function_base +from numpy.core import overrides + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', + 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', + 'nancumsum', 'nancumprod', 'nanquantile' + ] + + +def _nan_mask(a, out=None): + """ + Parameters + ---------- + a : array-like + Input array with at least 1 dimension. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output and will prevent the allocation of a new array. + + Returns + ------- + y : bool ndarray or True + A bool array where ``np.nan`` positions are marked with ``False`` + and other positions are marked with ``True``. If the type of ``a`` + is such that it can't possibly contain ``np.nan``, returns ``True``. + """ + # we assume that a is an array for this private function + + if a.dtype.kind not in 'fc': + return True + + y = np.isnan(a, out=out) + y = np.invert(y, out=y) + return y + +def _replace_nan(a, val): + """ + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Note that scalars will end up as array scalars, which is important + for using the result as the value of the out argument in some + operations. + + Parameters + ---------- + a : array-like + Input array. + val : float + NaN values are set to val before doing the operation. + + Returns + ------- + y : ndarray + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return None. + + """ + a = np.asanyarray(a) + + if a.dtype == np.object_: + # object arrays do not support `isnan` (gh-9009), so make a guess + mask = np.not_equal(a, a, dtype=bool) + elif issubclass(a.dtype.type, np.inexact): + mask = np.isnan(a) + else: + mask = None + + if mask is not None: + a = np.array(a, subok=True, copy=True) + np.copyto(a, val, where=mask) + + return a, mask + + +def _copyto(a, val, mask): + """ + Replace values in `a` with NaN where `mask` is True. This differs from + copyto in that it will deal with the case where `a` is a numpy scalar. + + Parameters + ---------- + a : ndarray or numpy scalar + Array or numpy scalar some of whose values are to be replaced + by val. + val : numpy scalar + Value used a replacement. + mask : ndarray, scalar + Boolean array. Where True the corresponding element of `a` is + replaced by `val`. Broadcasts. + + Returns + ------- + res : ndarray, scalar + Array with elements replaced or scalar `val`. + + """ + if isinstance(a, np.ndarray): + np.copyto(a, val, where=mask, casting='unsafe') + else: + a = a.dtype.type(val) + return a + + +def _remove_nan_1d(arr1d, overwrite_input=False): + """ + Equivalent to arr1d[~arr1d.isnan()], but in a different order + + Presumably faster as it incurs fewer copies + + Parameters + ---------- + arr1d : ndarray + Array to remove nans from + overwrite_input : bool + True if `arr1d` can be modified in place + + Returns + ------- + res : ndarray + Array with nan elements removed + overwrite_input : bool + True if `res` can be modified in place, given the constraint on the + input + """ + if arr1d.dtype == object: + # object arrays do not support `isnan` (gh-9009), so make a guess + c = np.not_equal(arr1d, arr1d, dtype=bool) + else: + c = np.isnan(arr1d) + + s = np.nonzero(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=6) + return arr1d[:0], True + elif s.size == 0: + return arr1d, overwrite_input + else: + if not overwrite_input: + arr1d = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], True + + +def _divide_by_count(a, b, out=None): + """ + Compute a/b ignoring invalid results. If `a` is an array the division + is done in place. If `a` is a scalar, then its type is preserved in the + output. If out is None, then a is used instead so that the division + is in place. Note that this is only called with `a` an inexact type. + + Parameters + ---------- + a : {ndarray, numpy scalar} + Numerator. Expected to be of inexact type but not checked. + b : {ndarray, numpy scalar} + Denominator. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + + Returns + ------- + ret : {ndarray, numpy scalar} + The return value is a/b. If `a` was an ndarray the division is done + in place. If `a` is a numpy scalar, the division preserves its type. + + """ + with np.errstate(invalid='ignore', divide='ignore'): + if isinstance(a, np.ndarray): + if out is None: + return np.divide(a, b, out=a, casting='unsafe') + else: + return np.divide(a, b, out=out, casting='unsafe') + else: + if out is None: + # Precaution against reduced object arrays + try: + return a.dtype.type(a / b) + except AttributeError: + return a / b + else: + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') + + +def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmin_dispatcher) +def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return minimum of an array or minimum along an axis, ignoring any NaNs. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and + Nan is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose minimum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the minimum is computed. The default is to compute + the minimum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `min` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmin : ndarray + An array with the same shape as `a`, with the specified axis + removed. If `a` is a 0-d array, or if axis is None, an ndarray + scalar is returned. The same dtype as `a` is returned. + + See Also + -------- + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + amin : + The minimum value of an array along a given axis, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amax, fmax, maximum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.min. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([1., 2.]) + >>> np.nanmin(a, axis=1) + array([1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, np.NINF]) + -inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmax_dispatcher) +def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([3., 2.]) + >>> np.nanmax(a, axis=1) + array([2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, np.NINF]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if type(a) is np.ndarray and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmin_dispatcher) +def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmin(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmax_dispatcher) +def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + if mask is not None: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmax(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nansum_dispatcher) +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + + .. versionadded:: 1.8.0 + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + + .. versionadded:: 1.8.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + + .. versionadded:: 1.8.0 + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nansum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite : Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Examples + -------- + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, np.NINF]) + -inf + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + nan + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanprod_dispatcher) +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis treating Not a + Numbers (NaNs) as ones. + + One is returned for slices that are all-NaN or empty. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose product is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the product is computed. The default is to compute + the product of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.prod : Product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nanprod(1) + 1 + >>> np.nanprod([1]) + 1 + >>> np.nanprod([1, np.nan]) + 1.0 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + 6.0 + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + a, mask = _replace_nan(a, 1) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumsum_dispatcher) +def nancumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are + encountered and leading NaNs are replaced by zeros. + + Zeros are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + nancumsum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.cumsum : Cumulative sum across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumsum(1) + array([1]) + >>> np.nancumsum([1]) + array([1]) + >>> np.nancumsum([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumsum(a) + array([1., 3., 6., 6.]) + >>> np.nancumsum(a, axis=0) + array([[1., 2.], + [4., 2.]]) + >>> np.nancumsum(a, axis=1) + array([[1., 3.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 0) + return np.cumsum(a, axis=axis, dtype=dtype, out=out) + + +def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumprod_dispatcher) +def nancumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of array elements over a given axis treating Not a + Numbers (NaNs) as one. The cumulative product does not change when NaNs are + encountered and leading NaNs are replaced by ones. + + Ones are returned for slices that are all-NaN or empty. + + .. versionadded:: 1.12.0 + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + nancumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.cumprod : Cumulative product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> np.nancumprod(1) + array([1]) + >>> np.nancumprod([1]) + array([1]) + >>> np.nancumprod([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumprod(a) + array([1., 2., 6., 6.]) + >>> np.nancumprod(a, axis=0) + array([[1., 2.], + [3., 2.]]) + >>> np.nancumprod(a, axis=1) + array([[1., 2.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 1) + return np.cumprod(a, axis=axis, dtype=dtype, out=out) + + +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + *, where=None): + return (a, out) + + +@array_function_dispatch(_nanmean_dispatcher) +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + *, where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([2., 4.]) + >>> np.nanmean(a, axis=1) + array([1., 3.5]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims, + where=where) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + arr1d_parsed, overwrite_input = _remove_nan_1d( + arr1d, overwrite_input=overwrite_input, + ) + + if arr1d_parsed.size == 0: + # Ensure that a nan-esque scalar of the appropriate type (and unit) + # is returned for `timedelta64` and `complexfloating` + return arr1d[-1] + + return np.median(arr1d_parsed, overwrite_input=overwrite_input) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + # benchmarked with shuffled (50, 50, x) containing a few NaN + if a.shape[axis] < 600: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=5) + + fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan + if out is not None: + out[...] = m.filled(fill_value) + return out + return m.filled(fill_value) + + +def _nanmedian_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmedian_dispatcher) +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + nan + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([6.5, 2. , 2.5]) + >>> np.median(a, axis=1) + array([nan, 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + return function_base._ureduce(a, func=_nanmedian, keepdims=keepdims, + axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _nanpercentile_dispatcher( + a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, interpolation=None): + return (a, q, out) + + +@array_function_dispatch(_nanpercentile_dispatcher) +def nanpercentile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + interpolation=None, +): + """ + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. + + Returns the qth percentile(s) of the array elements. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be + between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The default + is to compute the percentile(s) along a flattened version of the + array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + nanmean + nanmedian : equivalent to ``nanpercentile(..., 50)`` + percentile, median, mean + nanquantile : equivalent to nanpercentile, except q in range [0, 1]. + + Notes + ----- + For more information please see `numpy.percentile` + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + nan + >>> np.nanpercentile(a, 50) + 3.0 + >>> np.nanpercentile(a, 50, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = function_base._check_interpolation_as_method( + method, interpolation, "nanpercentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + q = np.true_divide(q, 100.0) + # undo any decay that the ufunc performed (see gh-13105) + q = np.asanyarray(q) + if not function_base._quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims) + + +def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, interpolation=None): + return (a, q, out) + + +@array_function_dispatch(_nanquantile_dispatcher) +def nanquantile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + interpolation=None, +): + """ + Compute the qth quantile of the data along the specified axis, + while ignoring nan values. + Returns the qth quantile(s) of the array elements. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored + q : array_like of float + Probability or sequence of probabilities for the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + quantile + nanmean, nanmedian + nanmedian : equivalent to ``nanquantile(..., 0.5)`` + nanpercentile : same as nanquantile, but with q in the range [0, 100]. + + Notes + ----- + For more information please see `numpy.quantile` + + Examples + -------- + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.quantile(a, 0.5) + nan + >>> np.nanquantile(a, 0.5) + 3.0 + >>> np.nanquantile(a, 0.5, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanquantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanquantile(a, 0.5, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + >>> b = a.copy() + >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + + if interpolation is not None: + method = function_base._check_interpolation_as_method( + method, interpolation, "nanquantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + q = np.asanyarray(q) + if not function_base._quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims) + + +def _nanquantile_unchecked( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, +): + """Assumes that q is in [0, 1], and is an ndarray""" + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + return function_base._ureduce(a, + func=_nanquantile_ureduce_func, + q=q, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False, + method="linear"): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + """ + if axis is None or a.ndim == 1: + part = a.ravel() + result = _nanquantile_1d(part, q, overwrite_input, method) + else: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method) + # apply_along_axis fills in collapsed axis with results. + # Move that axis to the beginning to match percentile's + # convention. + if q.ndim != 0: + result = np.moveaxis(result, axis, 0) + + if out is not None: + out[...] = result + return result + + +def _nanquantile_1d(arr1d, q, overwrite_input=False, method="linear"): + """ + Private function for rank 1 arrays. Compute quantile ignoring NaNs. + See nanpercentile for parameter usage + """ + arr1d, overwrite_input = _remove_nan_1d(arr1d, + overwrite_input=overwrite_input) + if arr1d.size == 0: + # convert to scalar + return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] + + return function_base._quantile_unchecked( + arr1d, q, overwrite_input=overwrite_input, method=method) + + +def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): + return (a, out) + + +@array_function_dispatch(_nanvar_dispatcher) +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : int, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.22.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + # Compute mean + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims, + where=where) + avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims, where=where) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + np.subtract(arr, avg, out=arr, casting='unsafe', where=where) + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real + else: + sqr = np.multiply(arr, arr, out=arr, where=where) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + # Precaution against reduced object arrays + try: + var_ndim = var.ndim + except AttributeError: + var_ndim = np.ndim(var) + if var_ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, + stacklevel=2) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None): + return (a, out) + + +@array_function_dispatch(_nanstd_dispatcher) +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + .. versionadded:: 1.8.0 + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : {int, tuple of int, None}, optional + Axis or axes along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : int, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([1., 0.]) + >>> np.nanstd(a, axis=1) + array([0., 0.5]) # may vary + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + elif hasattr(var, 'dtype'): + std = var.dtype.type(np.sqrt(var)) + else: + std = np.sqrt(var) + return std diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi new file mode 100644 index 00000000..8642055f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi @@ -0,0 +1,38 @@ +from numpy.core.fromnumeric import ( + amin, + amax, + argmin, + argmax, + sum, + prod, + cumsum, + cumprod, + mean, + var, + std +) + +from numpy.lib.function_base import ( + median, + percentile, + quantile, +) + +__all__: list[str] + +# NOTE: In reaility these functions are not aliases but distinct functions +# with identical signatures. +nanmin = amin +nanmax = amax +nanargmin = argmin +nanargmax = argmax +nansum = sum +nanprod = prod +nancumsum = cumsum +nancumprod = cumprod +nanmean = mean +nanvar = var +nanstd = std +nanmedian = median +nanpercentile = percentile +nanquantile = quantile diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/npyio.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/npyio.py new file mode 100644 index 00000000..339b1dc6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/npyio.py @@ -0,0 +1,2547 @@ +import os +import re +import functools +import itertools +import warnings +import weakref +import contextlib +import operator +from operator import itemgetter, index as opindex, methodcaller +from collections.abc import Mapping + +import numpy as np +from . import format +from ._datasource import DataSource +from numpy.core import overrides +from numpy.core.multiarray import packbits, unpackbits +from numpy.core._multiarray_umath import _load_from_filelike +from numpy.core.overrides import set_array_function_like_doc, set_module +from ._iotools import ( + LineSplitter, NameValidator, StringConverter, ConverterError, + ConverterLockError, ConversionWarning, _is_string_like, + has_nested_fields, flatten_dtype, easy_dtype, _decode_line + ) + +from numpy.compat import ( + asbytes, asstr, asunicode, os_fspath, os_PathLike, + pickle + ) + + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', + 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +class BagObj: + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> from numpy.lib.npyio import BagObj as BO + >>> class BagDemo: + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) from None + + def __dir__(self): + """ + Enables dir(bagobj) to list the files in an NpzFile. + + This also enables tab-completion in an interpreter or IPython. + """ + return list(object.__getattribute__(self, '_obj').keys()) + + +def zipfile_factory(file, *args, **kwargs): + """ + Create a ZipFile. + + Allows for Zip64, and the `file` argument can accept file, str, or + pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile + constructor. + """ + if not hasattr(file, 'read'): + file = os_fspath(file) + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(file, *args, **kwargs) + + +class NpzFile(Mapping): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + allow_pickle : bool, optional + Allow loading pickled data. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + pickle_kwargs : dict, optional + Additional keyword arguments to pass on to pickle.load. + These are only useful when loading object arrays saved on + Python 2 when using Python 3. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Parameters + ---------- + fid : file or str + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.npyio.NpzFile) + True + >>> npz + NpzFile 'object' with keys x, y + >>> sorted(npz.files) + ['x', 'y'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + # Make __exit__ safe if zipfile_factory raises an exception + zip = None + fid = None + _MAX_REPR_ARRAY_COUNT = 5 + + def __init__(self, fid, own_fid=False, allow_pickle=False, + pickle_kwargs=None, *, + max_header_size=format._MAX_HEADER_SIZE): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + self._files = _zip.namelist() + self.files = [] + self.allow_pickle = allow_pickle + self.max_header_size = max_header_size + self.pickle_kwargs = pickle_kwargs + for x in self._files: + if x.endswith('.npy'): + self.files.append(x[:-4]) + else: + self.files.append(x) + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + # Implement the Mapping ABC + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + def __getitem__(self, key): + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + member = False + if key in self._files: + member = True + elif key in self.files: + member = True + key += '.npy' + if member: + bytes = self.zip.open(key) + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.close() + if magic == format.MAGIC_PREFIX: + bytes = self.zip.open(key) + return format.read_array(bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size) + else: + return self.zip.read(key) + else: + raise KeyError(f"{key} is not a file in the archive") + + def __contains__(self, key): + return (key in self._files or key in self.files) + + def __repr__(self): + # Get filename or default to `object` + if isinstance(self.fid, str): + filename = self.fid + else: + filename = getattr(self.fid, "name", "object") + + # Get the name of arrays + array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT]) + if len(self.files) > self._MAX_REPR_ARRAY_COUNT: + array_names += "..." + return f"NpzFile {filename!r} with keys: {array_names}" + + +@set_module('numpy') +def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, + encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + .. warning:: Loading files that contain object arrays uses the ``pickle`` + module, which is not secure against erroneous or maliciously + constructed data. Consider passing ``allow_pickle=False`` to + load data that is known not to contain object arrays for the + safer handling of untrusted sources. + + Parameters + ---------- + file : file-like object, string, or pathlib.Path + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods and must always + be opened in binary mode. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + allow_pickle : bool, optional + Allow loading pickled object arrays stored in npy files. Reasons for + disallowing pickles include security, as loading pickled data can + execute arbitrary code. If pickles are disallowed, loading object + arrays will fail. Default: False + + .. versionchanged:: 1.16.3 + Made default False in response to CVE-2019-6446. + + fix_imports : bool, optional + Only useful when loading Python 2 generated pickled files on Python 3, + which includes npy/npz files containing object arrays. If `fix_imports` + is True, pickle will try to map the old Python 2 names to the new names + used in Python 3. + encoding : str, optional + What encoding to use when reading Python 2 strings. Only useful when + loading Python 2 generated pickled files in Python 3, which includes + npy/npz files containing object arrays. Values other than 'latin1', + 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical + data. Default: 'ASCII' + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + OSError + If the input file does not exist or cannot be read. + UnpicklingError + If ``allow_pickle=True``, but the file cannot be loaded as a pickle. + ValueError + The file contains an object array, but ``allow_pickle=False`` given. + EOFError + When calling ``np.load`` multiple times on the same file handle, + if all data has already been read + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + if encoding not in ('ASCII', 'latin1', 'bytes'): + # The 'encoding' value for pickle also affects what encoding + # the serialized binary data of NumPy arrays is loaded + # in. Pickle does not pass on the encoding information to + # NumPy. The unpickling code in numpy.core.multiarray is + # written to assume that unicode data appearing where binary + # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. + # + # Other encoding values can corrupt binary data, and we + # purposefully disallow them. For the same reason, the errors= + # argument is not exposed, as values other than 'strict' + # result can similarly silently corrupt numerical data. + raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") + + pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) + + with contextlib.ExitStack() as stack: + if hasattr(file, 'read'): + fid = file + own_fid = False + else: + fid = stack.enter_context(open(os_fspath(file), "rb")) + own_fid = True + + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = b'PK\x03\x04' + _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + if not magic: + raise EOFError("No data left in file") + # If the file size is less than N, we need to make sure not + # to seek past the beginning of the file + fid.seek(-min(N, len(magic)), 1) # back-up + if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): + # zip-file (assume .npz) + # Potentially transfer file ownership to NpzFile + stack.pop_all() + ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + return ret + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + if allow_pickle: + max_header_size = 2**64 + return format.open_memmap(file, mode=mmap_mode, + max_header_size=max_header_size) + else: + return format.read_array(fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + else: + # Try a pickle + if not allow_pickle: + raise ValueError("Cannot load file containing pickled data " + "when allow_pickle=False") + try: + return pickle.load(fid, **pickle_kwargs) + except Exception as e: + raise pickle.UnpicklingError( + f"Failed to interpret file {file!r} as a pickle") from e + + +def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): + return (arr,) + + +@array_function_dispatch(_save_dispatcher) +def save(file, arr, allow_pickle=True, fix_imports=True): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file, str, or pathlib.Path + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string or Path, a ``.npy`` + extension will be appended to the filename if it does not already + have one. + arr : array_like + Array data to be saved. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for disallowing + pickles include security (loading pickled data can execute arbitrary + code) and portability (pickled objects may not be loadable on different + Python installations, for example if the stored objects require libraries + that are not available, and not all pickled data is compatible between + Python 2 and Python 3). + Default: True + fix_imports : bool, optional + Only useful in forcing objects in object arrays on Python 3 to be + pickled in a Python 2 compatible way. If `fix_imports` is True, pickle + will try to map the new Python 3 names to the old module names used in + Python 2, so that the pickle data stream is readable with Python 2. + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + Any data saved to the file is appended to the end of the file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + + >>> with open('test.npy', 'wb') as f: + ... np.save(f, np.array([1, 2])) + ... np.save(f, np.array([1, 3])) + >>> with open('test.npy', 'rb') as f: + ... a = np.load(f) + ... b = np.load(f) + >>> print(a, b) + # [1 2] [1 3] + """ + if hasattr(file, 'write'): + file_ctx = contextlib.nullcontext(file) + else: + file = os_fspath(file) + if not file.endswith('.npy'): + file = file + '.npy' + file_ctx = open(file, "wb") + + with file_ctx as fid: + arr = np.asanyarray(arr) + format.write_array(fid, arr, allow_pickle=allow_pickle, + pickle_kwargs=dict(fix_imports=fix_imports)) + + +def _savez_dispatcher(file, *args, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_dispatcher) +def savez(file, *args, **kwds): + """Save several arrays into a single file in uncompressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : str or file + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + When opening the saved ``.npz`` file with `load` a `NpzFile` object is + returned. This is a dictionary-like object which can be queried for + its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Keys passed in `kwds` are used as filenames inside the ZIP archive. + Therefore, keys should be valid filenames; e.g., avoid keys that begin with + ``/`` or contain ``.``. + + When naming variables with keyword arguments, it is not possible to name a + variable ``file``, as this would cause the ``file`` argument to be defined + twice in the call to ``savez``. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_0', 'arr_1'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> sorted(npzfile.files) + ['x', 'y'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False) + + +def _savez_compressed_dispatcher(file, *args, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_compressed_dispatcher) +def savez_compressed(file, *args, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : str or file + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + numpy.save : Save a single array to a binary file in NumPy format. + numpy.savetxt : Save an array to a file as plain text. + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is compressed with + ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + + + When opening the saved ``.npz`` file with `load` a `NpzFile` object is + returned. This is a dictionary-like object which can be queried for + its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> test_array = np.random.rand(3, 2) + >>> test_vector = np.random.rand(4) + >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) + >>> loaded = np.load('/tmp/123.npz') + >>> print(np.array_equal(test_array, loaded['a'])) + True + >>> print(np.array_equal(test_vector, loaded['b'])) + True + + """ + _savez(file, args, kwds, True) + + +def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + + if not hasattr(file, 'write'): + file = os_fspath(file) + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + "Cannot use un-named variables and keyword %s" % key) + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + + zipf.close() + + +def _ensure_ndmin_ndarray_check_param(ndmin): + """Just checks if the param ndmin is supported on + _ensure_ndmin_ndarray. It is intended to be used as + verification before running anything expensive. + e.g. loadtxt, genfromtxt + """ + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") + +def _ensure_ndmin_ndarray(a, *, ndmin: int): + """This is a helper function of loadtxt and genfromtxt to ensure + proper minimum dimension as requested + + ndim : int. Supported values 1, 2, 3 + ^^ whenever this changes, keep in sync with + _ensure_ndmin_ndarray_check_param + """ + # Verify that the array has at least dimensions `ndmin`. + # Tweak the size and shape of the arrays - remove extraneous dimensions + if a.ndim > ndmin: + a = np.squeeze(a) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 + if a.ndim < ndmin: + if ndmin == 1: + a = np.atleast_1d(a) + elif ndmin == 2: + a = np.atleast_2d(a).T + + return a + + +# amount of lines loadtxt reads in one chunk, can be overridden for testing +_loadtxt_chunksize = 50000 + + +def _check_nonneg_int(value, name="argument"): + try: + operator.index(value) + except TypeError: + raise TypeError(f"{name} must be an integer") from None + if value < 0: + raise ValueError(f"{name} must be nonnegative") + + +def _preprocess_comments(iterable, comments, encoding): + """ + Generator that consumes a line iterated iterable and strips out the + multiple (or multi-character) comments from lines. + This is a pre-processing step to achieve feature parity with loadtxt + (we assume that this feature is a nieche feature). + """ + for line in iterable: + if isinstance(line, bytes): + # Need to handle conversion here, or the splitting would fail + line = line.decode(encoding) + + for c in comments: + line = line.split(c, 1)[0] + + yield line + + +# The number of rows we read in one go if confronted with a parametric dtype +_loadtxt_chunksize = 50000 + + +def _read(fname, *, delimiter=',', comment='#', quote='"', + imaginary_unit='j', usecols=None, skiplines=0, + max_rows=None, converters=None, ndmin=None, unpack=False, + dtype=np.float64, encoding="bytes"): + r""" + Read a NumPy array from a text file. + + Parameters + ---------- + fname : str or file object + The filename or the file to be read. + delimiter : str, optional + Field delimiter of the fields in line of the file. + Default is a comma, ','. If None any sequence of whitespace is + considered a delimiter. + comment : str or sequence of str or None, optional + Character that begins a comment. All text from the comment + character to the end of the line is ignored. + Multiple comments or multiple-character comment strings are supported, + but may be slower and `quote` must be empty if used. + Use None to disable all use of comments. + quote : str or None, optional + Character that is used to quote string fields. Default is '"' + (a double quote). Use None to disable quote support. + imaginary_unit : str, optional + Character that represent the imaginay unit `sqrt(-1)`. + Default is 'j'. + usecols : array_like, optional + A one-dimensional array of integer column numbers. These are the + columns from the file to be included in the array. If this value + is not given, all the columns are used. + skiplines : int, optional + Number of lines to skip before interpreting the data in the file. + max_rows : int, optional + Maximum number of rows of data to read. Default is to read the + entire file. + converters : dict or callable, optional + A function to parse all columns strings into the desired value, or + a dictionary mapping column number to a parser function. + E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. + Converters can also be used to provide a default value for missing + data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will + convert empty fields to 0. + Default: None + ndmin : int, optional + Minimum dimension of the array returned. + Allowed values are 0, 1 or 2. Default is 0. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = read(...)``. When used with a structured + data-type, arrays are returned for each field. Default is False. + dtype : numpy data type + A NumPy dtype instance, can be a structured dtype to map to the + columns of the file. + encoding : str, optional + Encoding used to decode the inputfile. The special value 'bytes' + (the default) enables backwards-compatible behavior for `converters`, + ensuring that inputs to the converter functions are encoded + bytes objects. The special value 'bytes' has no additional effect if + ``converters=None``. If encoding is ``'bytes'`` or ``None``, the + default system encoding is used. + + Returns + ------- + ndarray + NumPy array. + + Examples + -------- + First we create a file for the example. + + >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n' + >>> with open('example1.csv', 'w') as f: + ... f.write(s1) + >>> a1 = read_from_filename('example1.csv') + >>> a1 + array([[1., 2., 3.], + [4., 5., 6.]]) + + The second example has columns with different data types, so a + one-dimensional array with a structured data type is returned. + The tab character is used as the field delimiter. + + >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n' + >>> with open('example2.tsv', 'w') as f: + ... f.write(s2) + >>> a2 = read_from_filename('example2.tsv', delimiter='\t') + >>> a2 + array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')], + dtype=[('f0', '= 0: + max_rows -= chunk_size + if len(next_arr) < chunk_size: + # There was less data than requested, so we are done. + break + + # Need at least one chunk, but if empty, the last one may have + # the wrong shape. + if len(chunks) > 1 and len(chunks[-1]) == 0: + del chunks[-1] + if len(chunks) == 1: + arr = chunks[0] + else: + arr = np.concatenate(chunks, axis=0) + + # NOTE: ndmin works as advertised for structured dtypes, but normally + # these would return a 1D result plus the structured dimension, + # so ndmin=2 adds a third dimension even when no squeezing occurs. + # A `squeeze=False` could be a better solution (pandas uses squeeze). + arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin) + + if arr.shape: + if arr.shape[0] == 0: + warnings.warn( + f'loadtxt: input contained no data: "{fname}"', + category=UserWarning, + stacklevel=3 + ) + + if unpack: + # Unpack structured dtypes if requested: + dt = arr.dtype + if dt.names is not None: + # For structured arrays, return an array for each field. + return [arr[field] for field in dt.names] + else: + return arr.T + else: + return arr + + +@set_array_function_like_doc +@set_module('numpy') +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None, + like=None): + r""" + Load data from a text file. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + structured data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str or sequence of str or None, optional + The characters or list of characters used to indicate the start of a + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. + delimiter : str, optional + The character used to separate the values. For backwards compatibility, + byte strings will be decoded as 'latin1'. The default is whitespace. + + .. versionchanged:: 1.23.0 + Only single character delimiters are supported. Newline characters + cannot be used as the delimiter. + + converters : dict or callable, optional + Converter functions to customize value parsing. If `converters` is + callable, the function is applied to all columns, else it must be a + dict that maps column number to a parser function. + See examples for further details. + Default: None. + + .. versionchanged:: 1.23.0 + The ability to pass a single callable to be applied to all columns + was added. + + skiprows : int, optional + Skip the first `skiprows` lines, including comments; default: 0. + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + + .. versionchanged:: 1.11.0 + When a single column has to be read it is possible to use + an integer instead of a tuple. E.g ``usecols = 3`` reads the + fourth column the same way as ``usecols = (3,)`` would. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + + .. versionadded:: 1.6.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + 'latin1' encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 + max_rows : int, optional + Read `max_rows` rows of content after `skiprows` lines. The default is + to read all the rows. Note that empty rows containing no data such as + empty lines and comment lines are not counted towards `max_rows`, + while such lines are counted in `skiprows`. + + .. versionadded:: 1.16.0 + + .. versionchanged:: 1.23.0 + Lines containing no data, including comment lines (e.g., lines + starting with '#' or as specified via `comments`) are not counted + towards `max_rows`. + quotechar : unicode character or None, optional + The character used to denote the start and end of a quoted item. + Occurrences of the delimiter or comment characters are ignored within + a quoted item. The default value is ``quotechar=None``, which means + quoting support is disabled. + + If two consecutive instances of `quotechar` are found within a quoted + field, the first is treated as an escape character. See examples. + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + Each row in the input text file must have the same number of values to be + able to read all values. If all rows do not have same number of values, a + subset of up to n columns (where n is the least number of values present + in all rows) can be read by specifying the columns via `usecols`. + + .. versionadded:: 1.10.0 + + The strings produced by the Python float.hex method can be used as + input for floats. + + Examples + -------- + >>> from io import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\n2 3") + >>> np.loadtxt(c) + array([[0., 1.], + [2., 3.]]) + + >>> d = StringIO("M 21 72\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([1., 3.]) + >>> y + array([2., 4.]) + + The `converters` argument is used to specify functions to preprocess the + text prior to parsing. `converters` can be a dictionary that maps + preprocessing functions to each column: + + >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") + >>> conv = { + ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 + ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 + ... } + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([[1., 3.], + [3., 5.]]) + + `converters` can be a callable instead of a dictionary, in which case it + is applied to all columns: + + >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") + >>> import functools + >>> conv = functools.partial(int, base=16) + >>> np.loadtxt(s, converters=conv) + array([[222., 173.], + [192., 222.]]) + + This example shows how `converters` can be used to convert a field + with a trailing minus sign into a negative number. + + >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') + >>> def conv(fld): + ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld) + ... + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Using a callable as the converter can be particularly useful for handling + values with different formatting, e.g. floats with underscores: + + >>> s = StringIO("1 2.7 100_000") + >>> np.loadtxt(s, converters=float) + array([1.e+00, 2.7e+00, 1.e+05]) + + This idea can be extended to automatically handle values specified in + many different formats: + + >>> def conv(val): + ... try: + ... return float(val) + ... except ValueError: + ... return float.fromhex(val) + >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") + >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None) + array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) + + Note that with the default ``encoding="bytes"``, the inputs to the + converter function are latin-1 encoded byte strings. To deactivate the + implicit encoding prior to conversion, use ``encoding=None`` + + >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') + >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x) + >>> np.loadtxt(s, converters=conv, encoding=None) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Support for quoted fields is enabled with the `quotechar` parameter. + Comment and delimiter characters are ignored when they appear within a + quoted item delineated by `quotechar`: + + >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"Hello, my name is ""Monty""!"') + >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + array('Hello, my name is "Monty"!', dtype='>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20") + >>> np.loadtxt(d, usecols=(0, 1)) + array([[ 1., 2.], + [ 2., 4.], + [ 3., 9.], + [ 4., 16.]]) + + """ + + if like is not None: + return _loadtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + converters=converters, skiprows=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows + ) + + if isinstance(delimiter, bytes): + delimiter.decode("latin1") + + if dtype is None: + dtype = np.float64 + + comment = comments + # Control character type conversions for Py3 convenience + if comment is not None: + if isinstance(comment, (str, bytes)): + comment = [comment] + comment = [ + x.decode('latin1') if isinstance(x, bytes) else x for x in comment] + if isinstance(delimiter, bytes): + delimiter = delimiter.decode('latin1') + + arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter, + converters=converters, skiplines=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows, quote=quotechar) + + return arr + + +_loadtxt_with_like = array_function_dispatch()(loadtxt) + + +def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, + header=None, footer=None, comments=None, + encoding=None): + return (X,) + + +@array_function_dispatch(_savetxt_dispatcher) +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# ', encoding=None): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename or file handle + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : 1D or 2D array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + + * a single specifier, `fmt='%.4e'`, resulting in numbers formatted + like `' (%s+%sj)' % (fmt, fmt)` + * a full string specifying every real and imaginary part, e.g. + `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns + * a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + + .. versionadded:: 1.5.0 + header : str, optional + String that will be written at the beginning of the file. + + .. versionadded:: 1.7.0 + footer : str, optional + String that will be written at the end of the file. + + .. versionadded:: 1.7.0 + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + + .. versionadded:: 1.7.0 + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + .. versionadded:: 1.14.0 + + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, + Python Documentation. + + Examples + -------- + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + # Py3 conversions first + if isinstance(fmt, bytes): + fmt = asstr(fmt) + delimiter = asstr(delimiter) + + class WriteWrap: + """Convert to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + + own_fh = False + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) + own_fh = True + elif hasattr(fname, 'write'): + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.names) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError('fmt has wrong shape. %s' % str(fmt)) + format = asstr(delimiter).join(map(asstr, fmt)) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError('fmt has wrong number of %% formats: %s' % fmt) + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError('invalid fmt: %r' % (fmt,)) + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(comments + header + newline) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.append(number.real) + row2.append(number.imag) + s = format % tuple(row2) + newline + fh.write(s.replace('+-', '-')) + else: + for row in X: + try: + v = format % tuple(row) + newline + except TypeError as e: + raise TypeError("Mismatch between array dtype ('%s') and " + "format specifier ('%s')" + % (str(X.dtype), format)) from e + fh.write(v) + + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(comments + footer + newline) + finally: + if own_fh: + fh.close() + + +@set_module('numpy') +def fromregex(file, regexp, dtype, encoding=None): + r""" + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : path or file + Filename or file object to read. + + .. versionchanged:: 1.22.0 + Now accepts `os.PathLike` implementations. + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array; must be a structured datatype. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + .. versionadded:: 1.14.0 + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `basics.rec`. + + Examples + -------- + >>> from io import StringIO + >>> text = StringIO("1312 foo\n1534 bar\n444 qux") + + >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex(text, regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444]) + + """ + own_fh = False + if not hasattr(file, "read"): + file = os.fspath(file) + file = np.lib._datasource.open(file, 'rt', encoding=encoding) + own_fh = True + + try: + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + if dtype.names is None: + raise TypeError('dtype must be a structured datatype.') + + content = file.read() + if isinstance(content, bytes) and isinstance(regexp, str): + regexp = asbytes(regexp) + elif isinstance(content, str) and isinstance(regexp, bytes): + regexp = asstr(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +@set_array_function_like_doc +@set_module('numpy') +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skip_header=0, skip_footer=0, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), + replace_space='_', autostrip=False, case_sensitive=True, + defaultfmt="f%i", unpack=None, usemask=False, loose=True, + invalid_raise=True, max_rows=None, encoding='bytes', + *, ndmin=0, like=None): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded. + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skiprows : int, optional + `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was removed in numpy 1.10. Please use `missing_values` + instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be preceded + by a comment delimiter. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field names + in a structured dtype. If `names` is None, the names of the dtype + fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended with an + underscore: for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variable + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = genfromtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + max_rows : int, optional + The maximum number of rows to read. Must not be used with skip_footer + at the same time. If given, the value must be at least 1. Default is + to read the entire file. + + .. versionadded:: 1.10.0 + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` is + a file object. The special value 'bytes' enables backward compatibility + workarounds that ensure that you receive byte arrays when possible + and passes latin1 encoded strings to converters. Override this value to + receive unicode arrays and pass strings as input to converters. If set + to None the system default is used. The default value is 'bytes'. + + .. versionadded:: 1.14.0 + ndmin : int, optional + Same parameter as `loadtxt` + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When the variables are named (either by a flexible dtype or with `names`), + there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + + References + ---------- + .. [1] NumPy User Guide, section `I/O with NumPy + `_. + + Examples + -------- + >>> from io import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO(u"1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> s = StringIO(u"11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, b'abcde'), + dtype=[('intvar', '>> f = StringIO(''' + ... text,# of chars + ... hello world,11 + ... numpy,5''') + >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') + array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], + dtype=[('f0', 'S12'), ('f1', 'S12')]) + + """ + + if like is not None: + return _genfromtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + skip_header=skip_header, skip_footer=skip_footer, + converters=converters, missing_values=missing_values, + filling_values=filling_values, usecols=usecols, names=names, + excludelist=excludelist, deletechars=deletechars, + replace_space=replace_space, autostrip=autostrip, + case_sensitive=case_sensitive, defaultfmt=defaultfmt, + unpack=unpack, usemask=usemask, loose=loose, + invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, + ndmin=ndmin, + ) + + _ensure_ndmin_ndarray_check_param(ndmin) + + if max_rows is not None: + if skip_footer: + raise ValueError( + "The keywords 'skip_footer' and 'max_rows' can not be " + "specified at the same time.") + if max_rows < 1: + raise ValueError("'max_rows' must be at least 1.") + + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + raise TypeError( + "The input argument 'converter' should be a valid dictionary " + "(got '%s' instead)" % type(user_converters)) + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + + # Initialize the filehandle, the LineSplitter and the NameValidator + if isinstance(fname, os_PathLike): + fname = os_fspath(fname) + if isinstance(fname, str): + fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fid_ctx = contextlib.closing(fid) + else: + fid = fname + fid_ctx = contextlib.nullcontext(fid) + try: + fhd = iter(fid) + except TypeError as e: + raise TypeError( + "fname must be a string, a filehandle, a sequence of strings,\n" + f"or an iterator of strings. Got {type(fname)} instead." + ) from e + with fid_ctx: + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=autostrip, encoding=encoding) + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + + # Skip the first `skip_header` rows + try: + for i in range(skip_header): + next(fhd) + + # Keep on until we find the first valid values + first_values = None + + while not first_values: + first_line = _decode_line(next(fhd), encoding) + if (names is True) and (comments is not None): + if comments in first_line: + first_line = ( + ''.join(first_line.split(comments)[1:])) + first_values = split_line(first_line) + except StopIteration: + # return an empty array if the datafile is empty + first_line = '' + first_values = [] + warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) + + # Should we take the first values as names ? + if names is True: + fval = first_values[0].strip() + if comments is not None: + if fval in comments: + del first_values[0] + + # Check the columns to use: make sure `usecols` is a list + if usecols is not None: + try: + usecols = [_.strip() for _ in usecols.split(",")] + except AttributeError: + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols, ] + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if names is True: + names = validate_names([str(_.strip()) for _ in first_values]) + first_line = '' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + # Get the dtype + if dtype is not None: + dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, + excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + # Make sure the names is a list (for 2.5) + if names is not None: + names = list(names) + + if usecols: + for (i, current) in enumerate(usecols): + # if usecols is a list of names, convert to a list of indices + if _is_string_like(current): + usecols[i] = names.index(current) + elif current < 0: + usecols[i] = current + len(first_values) + # If the dtype is not None, make sure we update it + if (dtype is not None) and (len(dtype) > nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') + + # Define the list of missing_values (one column: one list) + missing_values = [list(['']) for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, str): + user_value = user_missing_values.split(",") + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped, + continue + # Redefine the key if it's a column number and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times the same + # ... converter, instead of 3 different converters. + converters = [StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values)] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, locked=True, + missing_values=miss, default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, locked=True, + missing_values=miss, default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # converters may use decode to workaround numpy's old behaviour, + # so encode the string again before passing to the user converter + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, user_conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + # miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + if usecols: + # Select only the columns we need + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple([v.strip() in m + for (v, m) in zip(values, + missing_values)])) + if len(rows) == max_rows: + break + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = "Converter #%i is locked and cannot be upgraded: " % i + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + errmsg += "(occurred line #%i for value '%s')" + errmsg %= (j + 1 + skip_header, value) + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = " Line #%%i (got %%i columns instead of %i)" % nbcols + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning, stacklevel=2) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v == np.str_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.VisibleDeprecationWarning, stacklevel=2) + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types[:] + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + + if names is None: + # If the dtype is uniform (before sizing strings) + base = { + c_type + for c, c_type in zip(converters, column_types) + if c._checked} + if len(base) == 1: + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(sized_column_types)] + if usemask: + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] + else: + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names is not None: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names is not None: + mdtype = [(_, bool) for _ in dtype.names] + else: + mdtype = bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names, converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + + output = _ensure_ndmin_ndarray(output, ndmin=ndmin) + + if unpack: + if names is None: + return output.T + elif len(names) == 1: + # squeeze single-name dtypes too + return output[names[0]] + else: + # For structured arrays with multiple fields, + # return an array for each field. + return [output[field] for field in names] + return output + + +_genfromtxt_with_like = array_function_dispatch()(genfromtxt) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/npyio.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/npyio.pyi new file mode 100644 index 00000000..ef0f2a5f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/npyio.pyi @@ -0,0 +1,330 @@ +import os +import sys +import zipfile +import types +from re import Pattern +from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable +from typing import ( + Literal as L, + Any, + TypeVar, + Generic, + IO, + overload, + Protocol, +) + +from numpy import ( + DataSource as DataSource, + ndarray, + recarray, + dtype, + generic, + float64, + void, + record, +) + +from numpy.ma.mrecords import MaskedRecords +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _DTypeLike, + _SupportsArrayFunc, +) + +from numpy.core.multiarray import ( + packbits as packbits, + unpackbits as unpackbits, +) + +_T = TypeVar("_T") +_T_contra = TypeVar("_T_contra", contravariant=True) +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True) +_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True) + +class _SupportsGetItem(Protocol[_T_contra, _T_co]): + def __getitem__(self, key: _T_contra, /) -> _T_co: ... + +class _SupportsRead(Protocol[_CharType_co]): + def read(self) -> _CharType_co: ... + +class _SupportsReadSeek(Protocol[_CharType_co]): + def read(self, n: int, /) -> _CharType_co: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +class _SupportsWrite(Protocol[_CharType_contra]): + def write(self, s: _CharType_contra, /) -> object: ... + +__all__: list[str] + +class BagObj(Generic[_T_co]): + def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str) -> _T_co: ... + def __dir__(self) -> list[str]: ... + +class NpzFile(Mapping[str, NDArray[Any]]): + zip: zipfile.ZipFile + fid: None | IO[str] + files: list[str] + allow_pickle: bool + pickle_kwargs: None | Mapping[str, Any] + _MAX_REPR_ARRAY_COUNT: int + # Represent `f` as a mutable property so we can access the type of `self` + @property + def f(self: _T) -> BagObj[_T]: ... + @f.setter + def f(self: _T, value: BagObj[_T]) -> None: ... + def __init__( + self, + fid: IO[str], + own_fid: bool = ..., + allow_pickle: bool = ..., + pickle_kwargs: None | Mapping[str, Any] = ..., + ) -> None: ... + def __enter__(self: _T) -> _T: ... + def __exit__( + self, + exc_type: None | type[BaseException], + exc_value: None | BaseException, + traceback: None | types.TracebackType, + /, + ) -> None: ... + def close(self) -> None: ... + def __del__(self) -> None: ... + def __iter__(self) -> Iterator[str]: ... + def __len__(self) -> int: ... + def __getitem__(self, key: str) -> NDArray[Any]: ... + def __contains__(self, key: str) -> bool: ... + def __repr__(self) -> str: ... + +# NOTE: Returns a `NpzFile` if file is a zip file; +# returns an `ndarray`/`memmap` otherwise +def load( + file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes], + mmap_mode: L[None, "r+", "r", "w+", "c"] = ..., + allow_pickle: bool = ..., + fix_imports: bool = ..., + encoding: L["ASCII", "latin1", "bytes"] = ..., +) -> Any: ... + +def save( + file: str | os.PathLike[str] | _SupportsWrite[bytes], + arr: ArrayLike, + allow_pickle: bool = ..., + fix_imports: bool = ..., +) -> None: ... + +def savez( + file: str | os.PathLike[str] | _SupportsWrite[bytes], + *args: ArrayLike, + **kwds: ArrayLike, +) -> None: ... + +def savez_compressed( + file: str | os.PathLike[str] | _SupportsWrite[bytes], + *args: ArrayLike, + **kwds: ArrayLike, +) -> None: ... + +# File-like objects only have to implement `__iter__` and, +# optionally, `encoding` +@overload +def loadtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: None = ..., + comments: None | str | Sequence[str] = ..., + delimiter: None | str = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + skiprows: int = ..., + usecols: int | Sequence[int] = ..., + unpack: bool = ..., + ndmin: L[0, 1, 2] = ..., + encoding: None | str = ..., + max_rows: None | int = ..., + *, + quotechar: None | str = ..., + like: None | _SupportsArrayFunc = ... +) -> NDArray[float64]: ... +@overload +def loadtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: _DTypeLike[_SCT], + comments: None | str | Sequence[str] = ..., + delimiter: None | str = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + skiprows: int = ..., + usecols: int | Sequence[int] = ..., + unpack: bool = ..., + ndmin: L[0, 1, 2] = ..., + encoding: None | str = ..., + max_rows: None | int = ..., + *, + quotechar: None | str = ..., + like: None | _SupportsArrayFunc = ... +) -> NDArray[_SCT]: ... +@overload +def loadtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: DTypeLike, + comments: None | str | Sequence[str] = ..., + delimiter: None | str = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + skiprows: int = ..., + usecols: int | Sequence[int] = ..., + unpack: bool = ..., + ndmin: L[0, 1, 2] = ..., + encoding: None | str = ..., + max_rows: None | int = ..., + *, + quotechar: None | str = ..., + like: None | _SupportsArrayFunc = ... +) -> NDArray[Any]: ... + +def savetxt( + fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes], + X: ArrayLike, + fmt: str | Sequence[str] = ..., + delimiter: str = ..., + newline: str = ..., + header: str = ..., + footer: str = ..., + comments: str = ..., + encoding: None | str = ..., +) -> None: ... + +@overload +def fromregex( + file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], + regexp: str | bytes | Pattern[Any], + dtype: _DTypeLike[_SCT], + encoding: None | str = ... +) -> NDArray[_SCT]: ... +@overload +def fromregex( + file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], + regexp: str | bytes | Pattern[Any], + dtype: DTypeLike, + encoding: None | str = ... +) -> NDArray[Any]: ... + +@overload +def genfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: None = ..., + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... +@overload +def genfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: _DTypeLike[_SCT], + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def genfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + dtype: DTypeLike, + comments: str = ..., + delimiter: None | str | int | Iterable[int] = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: None | Sequence[int] = ..., + names: L[None, True] | str | Collection[str] = ..., + excludelist: None | Sequence[str] = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L['upper', 'lower'] = ..., + defaultfmt: str = ..., + unpack: None | bool = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: None | int = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def recfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[False] = ..., + **kwargs: Any, +) -> recarray[Any, dtype[record]]: ... +@overload +def recfromtxt( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[True], + **kwargs: Any, +) -> MaskedRecords[Any, dtype[void]]: ... + +@overload +def recfromcsv( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[False] = ..., + **kwargs: Any, +) -> recarray[Any, dtype[record]]: ... +@overload +def recfromcsv( + fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + *, + usemask: L[True], + **kwargs: Any, +) -> MaskedRecords[Any, dtype[void]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/polynomial.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/polynomial.py new file mode 100644 index 00000000..3b8db2a9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/polynomial.py @@ -0,0 +1,1453 @@ +""" +Functions to operate on polynomials. + +""" +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit', 'RankWarning'] + +import functools +import re +import warnings + +from .._utils import set_module +import numpy.core.numeric as NX + +from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, + ones) +from numpy.core import overrides +from numpy.lib.twodim_base import diag, vander +from numpy.lib.function_base import trim_zeros +from numpy.lib.type_check import iscomplex, real, imag, mintypecode +from numpy.linalg import eigvals, lstsq, inv + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +@set_module('numpy') +class RankWarning(UserWarning): + """ + Issued by `polyfit` when the Vandermonde matrix is rank deficient. + + For more information, a way to suppress the warning, and an example of + `RankWarning` being issued, see `polyfit`. + + """ + pass + + +def _poly_dispatcher(seq_of_zeros): + return seq_of_zeros + + +@array_function_dispatch(_poly_dispatcher) +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Compute polynomial values. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + Given a sequence of a polynomial's zeros: + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1., 0., 0., 0.]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) + for zero in seq_of_zeros: + a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): + a = a.real.copy() + + return a + + +def _roots_dispatcher(p): + return p + + +@array_function_dispatch(_roots_dispatcher) +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Compute polynomial values. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if p.ndim != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1])+1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N-2,), p.dtype), -1) + A[0,:] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + + +def _polyint_dispatcher(p, m=None, k=None): + return (p,) + + +@array_function_dispatch(_polyint_dispatcher) +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : array_like or poly1d + Polynomial to integrate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : list of `m` scalars or scalar, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + The defining property of the antiderivative: + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0]*NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + + +def _polyder_dispatcher(p, m=None): + return (p,) + + +@array_function_dispatch(_polyder_dispatcher) +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([0]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + + +def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): + return (x, y, w) + + +@array_function_dispatch(_polyfit_dispatcher) +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit ` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + cov : bool or str, optional + If given and not `False`, return not just the estimate but also its + covariance matrix. By default, the covariance are scaled by + chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed + to be unreliable except in a relative sense and everything is scaled + such that the reduced chi2 is unity. This scaling is omitted if + ``cov='unscaled'``, as is relevant for the case that the weights are + w = 1/sigma, with sigma known to be a reliable estimate of the + uncertainty. + + Returns + ------- + p : ndarray, shape (deg + 1,) or (deg + 1, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the effective rank of the scaled Vandermonde + coefficient matrix + - singular_values -- singular values of the scaled Vandermonde + coefficient matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + V : ndarray, shape (M,M) or (M,M,K) + Present only if ``full == False`` and ``cov == True``. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + polyval : Compute polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math:: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `RankWarning` when the least-squares fit is badly + conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + https://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> import warnings + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 # may vary + >>> p(3.5) + -0.34732142857143039 # may vary + >>> p(10) + 22.579365079365115 # may vary + + High-order polynomials may oscillate wildly: + + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', np.RankWarning) + ... p30 = np.poly1d(np.polyfit(x, y, 30)) + ... + >>> p30(4) + -0.80000000000000204 # may vary + >>> p30(5) + -0.99999999999999445 # may vary + >>> p30(4.5) + -0.10547061179440398 # may vary + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x)*finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs*lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T/scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + if cov == "unscaled": + fac = 1 + else: + if len(x) <= order: + raise ValueError("the number of data points must exceed order " + "to scale the covariance matrix") + # note, this used to be: fac = resids / (len(x) - order - 2.0) + # it was deciced that the "- 2" (originally justified by "Bayesian + # uncertainty analysis") is not what the user expects + # (see gh-11196 and gh-11197) + fac = resids / (len(x) - order) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:,:, NX.newaxis] * fac + else: + return c + + +def _polyval_dispatcher(p, x): + return (p, x) + + +@array_function_dispatch(_polyval_dispatcher) +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + If `p` is of length N, this function returns the value: + + ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` + + If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. + If `x` is another polynomial then the composite polynomial ``p(x(t))`` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, an array of numbers, or an instance of poly1d, at + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + If `x` is a subtype of `ndarray` the return value will be of the same type. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([76]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([76]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asanyarray(x) + y = NX.zeros_like(x) + for pv in p: + y = y * x + pv + return y + + +def _binary_op_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_binary_op_dispatcher) +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print(p1) + 1 x + 2 + >>> print(p2) + 2 + 9 x + 5 x + 4 + >>> print(np.polyadd(p1, p2)) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polymul(a1, a2): + """ + Find the product of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print(p1) + 2 + 1 x + 2 x + 3 + >>> print(p2) + 2 + 9 x + 5 x + 1 + >>> print(np.polymul(p1, p2)) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + + +def _polydiv_dispatcher(u, v): + return (u, v) + + +@array_function_dispatch(_polydiv_dispatcher) +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([1.5 , 1.75]), array([0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(0, m-n+1): + d = scale * r[k] + q[k] = d + r[k:k+n+1] -= d*v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + +_poly_mat = re.compile(r"\*\*([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' '*(len(power)-1) + toadd1 = ' '*(len(partstr)-1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' '*(len(power)-1) + line1 += ' '*(len(partstr)-1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +@set_module('numpy') +class poly1d: + """ + A one-dimensional polynomial class. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> p = np.poly1d([1, 2, 3]) + >>> print(np.poly1d(p)) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print(p) + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1., -3., 2.]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + __hash__ = None + + @property + def coeffs(self): + """ The polynomial coefficients """ + return self._coeffs + + @coeffs.setter + def coeffs(self, value): + # allowing this makes p.coeffs *= 2 legal + if value is not self._coeffs: + raise AttributeError("Cannot set attribute") + + @property + def variable(self): + """ The name of the polynomial variable """ + return self._variable + + # calculated attributes + @property + def order(self): + """ The order or degree of the polynomial """ + return len(self._coeffs) - 1 + + @property + def roots(self): + """ The roots of the polynomial, where self(x) == 0 """ + return roots(self._coeffs) + + # our internal _coeffs property need to be backed by __dict__['coeffs'] for + # scipy to work correctly. + @property + def _coeffs(self): + return self.__dict__['coeffs'] + @_coeffs.setter + def _coeffs(self, coeffs): + self.__dict__['coeffs'] = coeffs + + # alias attributes + r = roots + c = coef = coefficients = coeffs + o = order + + def __init__(self, c_or_r, r=False, variable=None): + if isinstance(c_or_r, poly1d): + self._variable = c_or_r._variable + self._coeffs = c_or_r._coeffs + + if set(c_or_r.__dict__) - set(self.__dict__): + msg = ("In the future extra properties will not be copied " + "across when constructing one poly1d from another") + warnings.warn(msg, FutureWarning, stacklevel=2) + self.__dict__.update(c_or_r.__dict__) + + if variable is not None: + self._variable = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if c_or_r.ndim > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0], dtype=c_or_r.dtype) + self._coeffs = c_or_r + if variable is None: + variable = 'x' + self._variable = variable + + def __array__(self, t=None): + if t: + return NX.asarray(self.coeffs, t) + else: + return NX.asarray(self.coeffs) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return "poly1d(%s)" % vals + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs)-1 + + def fmt_float(q): + s = '%.4g' % q + if s.endswith('.0000'): + s = s[:-5] + return s + + for k, coeff in enumerate(coeffs): + if not iscomplex(coeff): + coefstr = fmt_float(real(coeff)) + elif real(coeff) == 0: + coefstr = '%sj' % fmt_float(imag(coeff)) + else: + coefstr = '(%s + %sj)' % (fmt_float(real(coeff)), + fmt_float(imag(coeff))) + + power = (N-k) + if power == 0: + if coefstr != '0': + newstr = '%s' % (coefstr,) + else: + if k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = '%s %s' % (coefstr, var) + else: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = "%s - %s" % (thestr, newstr[1:]) + else: + thestr = "%s + %s" % (thestr, newstr) + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __div__(self, other): + if isscalar(other): + return poly1d(self.coeffs/other) + else: + other = poly1d(other) + return polydiv(self, other) + + __truediv__ = __div__ + + def __rdiv__(self, other): + if isscalar(other): + return poly1d(other/self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + __rtruediv__ = __rdiv__ + + def __eq__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + return not self.__eq__(other) + + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return self.coeffs.dtype.type(0) + if val < 0: + return self.coeffs.dtype.type(0) + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key-self.order, self.coeffs.dtype) + self._coeffs = NX.concatenate((zr, self.coeffs)) + ind = 0 + self._coeffs[ind] = val + return + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + +warnings.simplefilter('always', RankWarning) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/polynomial.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/polynomial.pyi new file mode 100644 index 00000000..14bbaf39 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/polynomial.pyi @@ -0,0 +1,303 @@ +from typing import ( + Literal as L, + overload, + Any, + SupportsInt, + SupportsIndex, + TypeVar, + NoReturn, +) + +from numpy import ( + RankWarning as RankWarning, + poly1d as poly1d, + unsignedinteger, + signedinteger, + floating, + complexfloating, + bool_, + int32, + int64, + float64, + complex128, + object_, +) + +from numpy._typing import ( + NDArray, + ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) + +_T = TypeVar("_T") + +_2Tup = tuple[_T, _T] +_5Tup = tuple[ + _T, + NDArray[float64], + NDArray[int32], + NDArray[float64], + NDArray[float64], +] + +__all__: list[str] + +def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ... + +# Returns either a float or complex array depending on the input values. +# See `np.linalg.eigvals`. +def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ... + +@overload +def polyint( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., +) -> poly1d: ... +@overload +def polyint( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeFloat_co = ..., +) -> NDArray[floating[Any]]: ... +@overload +def polyint( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeComplex_co = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyint( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., + k: None | _ArrayLikeObject_co = ..., +) -> NDArray[object_]: ... + +@overload +def polyder( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., +) -> poly1d: ... +@overload +def polyder( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[floating[Any]]: ... +@overload +def polyder( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyder( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[object_]: ... + +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[False] = ..., +) -> NDArray[float64]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[False] = ..., +) -> NDArray[complex128]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[False] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: L[True, "unscaled"] = ..., +) -> _2Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[True] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: None | float = ..., + full: L[True] = ..., + w: None | _ArrayLikeFloat_co = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... + +@overload +def polyval( + p: _ArrayLikeBool_co, + x: _ArrayLikeBool_co, +) -> NDArray[int64]: ... +@overload +def polyval( + p: _ArrayLikeUInt_co, + x: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polyval( + p: _ArrayLikeInt_co, + x: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polyval( + p: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polyval( + p: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyval( + p: _ArrayLikeObject_co, + x: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polyadd( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NDArray[bool_]: ... +@overload +def polyadd( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polyadd( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polysub( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NoReturn: ... +@overload +def polysub( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def polysub( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +# NOTE: Not an alias, but they do have the same signature (that we can reuse) +polymul = polyadd + +@overload +def polydiv( + u: poly1d, + v: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co | _ArrayLikeObject_co, + v: poly1d, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, +) -> _2Tup[NDArray[floating[Any]]]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, +) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ... +@overload +def polydiv( + u: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, +) -> _2Tup[NDArray[Any]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/recfunctions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/recfunctions.py new file mode 100644 index 00000000..83ae413c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/recfunctions.py @@ -0,0 +1,1673 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +import itertools +import numpy as np +import numpy.ma as ma +from numpy import ndarray, recarray +from numpy.ma import MaskedArray +from numpy.ma.mrecords import MaskedRecords +from numpy.core.overrides import array_function_dispatch +from numpy.lib._iotools import _is_string_like + +_check_fill_value = np.ma.core._check_fill_value + + +__all__ = [ + 'append_fields', 'apply_along_fields', 'assign_fields_by_name', + 'drop_fields', 'find_duplicates', 'flatten_descr', + 'get_fieldstructure', 'get_names', 'get_names_flat', + 'join_by', 'merge_arrays', 'rec_append_fields', + 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', + 'rename_fields', 'repack_fields', 'require_fields', + 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured', + ] + + +def _recursive_fill_fields_dispatcher(input, output): + return (input, output) + + +@array_function_dispatch(_recursive_fill_fields_dispatcher) +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) + >>> dt.descr + [(('a', 'A'), '>> _get_fieldspec(dt) + [(('a', 'A'), dtype('int64')), ('b', dtype(('>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) + ('A',) + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Input datatype + must have fields otherwise error is raised. + Nested structure are flattened beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None + False + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names is not None: + listnames.extend(get_names_flat(current)) + return tuple(listnames) + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return (('', ndtype),) + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names is not None: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def _zip_dtype(seqarrays, flatten=False): + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + if current.names is not None and len(current.names) == 1: + # special case - dtypes of 1 field are flattened + newdtype.extend(_get_fieldspec(current)) + else: + newdtype.append(('', current)) + return np.dtype(newdtype) + + +def _zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + return _zip_dtype(seqarrays, flatten=flatten).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used interbally during recursion). + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = [_ for _ in (parents.get(lastname, []) or [])] + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + yield from _izip_fields_flat(tuple(element)) + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, str)): + yield from _izip_fields(element) + elif isinstance(element, np.void) and len(tuple(element)) == 1: + # this statement is the same from the previous expression + yield from _izip_fields(element) + else: + yield element + + +def _izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + + for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value): + yield tuple(zipfunc(tup)) + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, + usemask=None, asrecarray=None): + return seqarrays + + +@array_function_dispatch(_merge_arrays_dispatcher) +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) + >>> rfn.drop_fields(a, 'a') + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all( + max(len(base), len(data)), + dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype)) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def _rec_append_fields_dispatcher(base, names, data, dtypes=None): + yield base + yield from data + + +@array_function_dispatch(_rec_append_fields_dispatcher) +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def _repack_fields_dispatcher(a, align=None, recurse=None): + return (a,) + + +@array_function_dispatch(_repack_fields_dispatcher) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to + `numpy.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1, >> dt + dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '>> print_offsets(dt) + offsets: [0, 8, 16] + itemsize: 24 + >>> packed_dt = rfn.repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 9] + itemsize: 17 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + return a + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + + if len(tup) == 3: + name = (tup[2], name) + + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) + +def _get_fields_and_offsets(dt, offset=0): + """ + Returns a flat list of (dtype, count, offset) tuples of all the + scalar fields in the dtype "dt", including nested fields, in left + to right order. + """ + + # counts up elements in subarrays, including nested subarrays, and returns + # base dtype and count + def count_elem(dt): + count = 1 + while dt.shape != (): + for size in dt.shape: + count *= size + dt = dt.base + return dt, count + + fields = [] + for name in dt.names: + field = dt.fields[name] + f_dt, f_offset = field[0], field[1] + f_dt, n = count_elem(f_dt) + + if f_dt.names is None: + fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) + else: + subfields = _get_fields_and_offsets(f_dt, f_offset + offset) + size = f_dt.itemsize + + for i in range(n): + if i == 0: + # optimization: avoid list comprehension if no subarray + fields.extend(subfields) + else: + fields.extend([(d, c, o + i*size) for d, c, o in subfields]) + return fields + +def _common_stride(offsets, counts, itemsize): + """ + Returns the stride between the fields, or None if the stride is not + constant. The values in "counts" designate the lengths of + subarrays. Subarrays are treated as many contiguous fields, with + always positive stride. + """ + if len(offsets) <= 1: + return itemsize + + negative = offsets[1] < offsets[0] # negative stride + if negative: + # reverse, so offsets will be ascending + it = zip(reversed(offsets), reversed(counts)) + else: + it = zip(offsets, counts) + + prev_offset = None + stride = None + for offset, count in it: + if count != 1: # subarray: always c-contiguous + if negative: + return None # subarrays can never have a negative stride + if stride is None: + stride = itemsize + if stride != itemsize: + return None + end_offset = offset + (count - 1) * itemsize + else: + end_offset = offset + + if prev_offset is not None: + new_stride = offset - prev_offset + if stride is None: + stride = new_stride + if stride != new_stride: + return None + + prev_offset = end_offset + + if negative: + return -stride + return stride + + +def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, + casting=None): + return (arr,) + +@array_function_dispatch(_structured_to_unstructured_dispatcher) +def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): + """ + Converts an n-D structured array into an (n+1)-D unstructured array. + + The new array will have a new last dimension equal in size to the + number of field-elements of the input array. If not supplied, the output + datatype is determined from the numpy type promotion rules applied to all + the field datatypes. + + Nested fields, as well as each element of any subarray fields, all count + as a single field-elements. + + Parameters + ---------- + arr : ndarray + Structured array or dtype to convert. Cannot contain object datatype. + dtype : dtype, optional + The dtype of the output unstructured array. + copy : bool, optional + If true, always return a copy. If false, a view is returned if + possible, such as when the `dtype` and strides of the fields are + suitable and the array subtype is one of `np.ndarray`, `np.recarray` + or `np.memmap`. + + .. versionchanged:: 1.25.0 + A view can now be returned if the fields are separated by a + uniform stride. + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + unstructured : ndarray + Unstructured array with one more dimension. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a + array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), + (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], + dtype=[('a', '>> rfn.structured_to_unstructured(a) + array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + fields = _get_fields_and_offsets(arr.dtype) + n_fields = len(fields) + if n_fields == 0 and dtype is None: + raise ValueError("arr has no fields. Unable to guess dtype") + elif n_fields == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("arr with no fields is not supported") + + dts, counts, offsets = zip(*fields) + names = ['f{}'.format(n) for n in range(n_fields)] + + if dtype is None: + out_dtype = np.result_type(*[dt.base for dt in dts]) + else: + out_dtype = np.dtype(dtype) + + # Use a series of views and casts to convert to an unstructured array: + + # first view using flattened fields (doesn't work for object arrays) + # Note: dts may include a shape for subarrays + flattened_fields = np.dtype({'names': names, + 'formats': dts, + 'offsets': offsets, + 'itemsize': arr.dtype.itemsize}) + arr = arr.view(flattened_fields) + + # we only allow a few types to be unstructured by manipulating the + # strides, because we know it won't work with, for example, np.matrix nor + # np.ma.MaskedArray. + can_view = type(arr) in (np.ndarray, np.recarray, np.memmap) + if (not copy) and can_view and all(dt.base == out_dtype for dt in dts): + # all elements have the right dtype already; if they have a common + # stride, we can just return a view + common_stride = _common_stride(offsets, counts, out_dtype.itemsize) + if common_stride is not None: + wrap = arr.__array_wrap__ + + new_shape = arr.shape + (sum(counts), out_dtype.itemsize) + new_strides = arr.strides + (abs(common_stride), 1) + + arr = arr[..., np.newaxis].view(np.uint8) # view as bytes + arr = arr[..., min(offsets):] # remove the leading unused data + arr = np.lib.stride_tricks.as_strided(arr, + new_shape, + new_strides, + subok=True) + + # cast and drop the last dimension again + arr = arr.view(out_dtype)[..., 0] + + if common_stride < 0: + arr = arr[..., ::-1] # reverse, if the stride was negative + if type(arr) is not type(wrap.__self__): + # Some types (e.g. recarray) turn into an ndarray along the + # way, so we have to wrap it again in order to match the + # behavior with copy=True. + arr = wrap(arr) + return arr + + # next cast to a packed format with all fields converted to new dtype + packed_fields = np.dtype({'names': names, + 'formats': [(out_dtype, dt.shape) for dt in dts]}) + arr = arr.astype(packed_fields, copy=copy, casting=casting) + + # finally is it safe to view the packed fields as the unstructured type + return arr.view((out_dtype, (sum(counts),))) + + +def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, + align=None, copy=None, casting=None): + return (arr,) + +@array_function_dispatch(_unstructured_to_structured_dispatcher) +def unstructured_to_structured(arr, dtype=None, names=None, align=False, + copy=False, casting='unsafe'): + """ + Converts an n-D unstructured array into an (n-1)-D structured array. + + The last dimension of the input array is converted into a structure, with + number of field-elements equal to the size of the last dimension of the + input array. By default all output fields have the input array's dtype, but + an output structured dtype with an equal number of fields-elements can be + supplied instead. + + Nested fields, as well as each element of any subarray fields, all count + towards the number of field-elements. + + Parameters + ---------- + arr : ndarray + Unstructured array or dtype to convert. + dtype : dtype, optional + The structured dtype of the output array + names : list of strings, optional + If dtype is not supplied, this specifies the field names for the output + dtype, in order. The field dtypes will be the same as the input array. + align : boolean, optional + Whether to create an aligned memory layout. + copy : bool, optional + See copy argument to `numpy.ndarray.astype`. If true, always return a + copy. If false, and `dtype` requirements are satisfied, a view is + returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + structured : ndarray + Structured array with fewer dimensions. + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a = np.arange(20).reshape((4,5)) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]]) + >>> rfn.unstructured_to_structured(a, dt) + array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], + dtype=[('a', '>> from numpy.lib import recfunctions as rfn + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> rfn.apply_along_fields(np.mean, b) + array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) + >>> rfn.apply_along_fields(np.mean, b[['x', 'z']]) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + uarr = structured_to_unstructured(arr) + return func(uarr, axis=-1) + # works and avoids axis requirement, but very, very slow: + #return np.apply_along_axis(func, -1, uarr) + +def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): + return dst, src + +@array_function_dispatch(_assign_fields_by_name_dispatcher) +def assign_fields_by_name(dst, src, zero_unassigned=True): + """ + Assigns values from one structured array to another by field name. + + Normally in numpy >= 1.14, assignment of one structured array to another + copies fields "by position", meaning that the first field from the src is + copied to the first field of the dst, and so on, regardless of field name. + + This function instead copies "by field name", such that fields in the dst + are assigned from the identically named field in the src. This applies + recursively for nested structures. This is how structure assignment worked + in numpy >= 1.6 to <= 1.13. + + Parameters + ---------- + dst : ndarray + src : ndarray + The source and destination arrays during assignment. + zero_unassigned : bool, optional + If True, fields in the dst for which there was no matching + field in the src are filled with the value 0 (zero). This + was the behavior of numpy <= 1.13. If False, those fields + are not modified. + """ + + if dst.dtype.names is None: + dst[...] = src + return + + for name in dst.dtype.names: + if name not in src.dtype.names: + if zero_unassigned: + dst[name] = 0 + else: + assign_fields_by_name(dst[name], src[name], + zero_unassigned) + +def _require_fields_dispatcher(array, required_dtype): + return (array,) + +@array_function_dispatch(_require_fields_dispatcher) +def require_fields(array, required_dtype): + """ + Casts a structured array to a new dtype using assignment by field-name. + + This function assigns from the old to the new array by name, so the + value of a field in the output array is the value of the field with the + same name in the source array. This has the effect of creating a new + ndarray containing only the fields "required" by the required_dtype. + + If a field name in the required_dtype does not exist in the + input array, that field is created and set to 0 in the output array. + + Parameters + ---------- + a : ndarray + array to cast + required_dtype : dtype + datatype for output array + + Returns + ------- + out : ndarray + array with the new dtype, with field values copied from the fields in + the input array with the same name + + Examples + -------- + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')]) + array([(1., 1), (1., 1), (1., 1), (1., 1)], + dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')]) + array([(1., 0), (1., 0), (1., 0), (1., 0)], + dtype=[('b', '>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1.e+20, 1.e+20), + dtype=[('A', 'S3'), ('B', ' '%s'" % + (cdtype, fdtype)) + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output['f%i' % len(seen)][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def _find_duplicates_dispatcher( + a, key=None, ignoremask=None, return_index=None): + return (a,) + + +@array_function_dispatch(_find_duplicates_dispatcher) +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = _get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in _get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in _get_fieldspec(r2.dtype): + # Have we seen the current name already ? + # we need to rebuild this list every time + names = list(name for name, dtype in ndtype) + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) + else: + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = dict(usemask=usemask, asrecarray=asrecarray) + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def _rec_join_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None): + return (r1, r2) + + +@array_function_dispatch(_rec_join_dispatcher) +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix, + defaults=defaults, usemask=False, asrecarray=True) + return join_by(key, r1, r2, **kwargs) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/scimath.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/scimath.py new file mode 100644 index 00000000..b7ef0d71 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/scimath.py @@ -0,0 +1,625 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + sqrt + log + log2 + logn + log10 + power + arccos + arcsin + arctanh + +""" +import numpy.core.numeric as nx +import numpy.core.numerictypes as nt +from numpy.core.numeric import asarray, any +from numpy.core.overrides import array_function_dispatch +from numpy.lib.type_check import isreal + + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + + +def _unary_dispatcher(x): + return (x,) + + +@array_function_dispatch(_unary_dispatcher) +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> np.emath.sqrt(1) + 1.0 + >>> np.emath.sqrt([1, 4]) + array([1., 2.]) + + But it automatically handles negative inputs: + + >>> np.emath.sqrt(-1) + 1j + >>> np.emath.sqrt([-1,4]) + array([0.+1.j, 2.+0.j]) + + Different results are expected because: + floating point 0.0 and -0.0 are distinct. + + For more control, explicitly use complex() as follows: + + >>> np.emath.sqrt(complex(-4.0, 0.0)) + 2j + >>> np.emath.sqrt(complex(-4.0, -0.0)) + -2j + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + + +@array_function_dispatch(_unary_dispatcher) +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + + +@array_function_dispatch(_unary_dispatcher) +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + + +def _logn_dispatcher(n, x): + return (n, x,) + + +@array_function_dispatch(_logn_dispatcher) +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : array_like + The integer base(s) in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.logn(2, [4, 8]) + array([2., 3.]) + >>> np.emath.logn(2, [-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x)/nx.log(n) + + +@array_function_dispatch(_unary_dispatcher) +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + + +def _power_dispatcher(x, p): + return (x, p) + + +@array_function_dispatch(_power_dispatcher) +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.power([2, 4], 2) + array([ 4, 16]) + >>> np.emath.power([2, 4], -2) + array([0.25 , 0.0625]) + >>> np.emath.power([-2, 4], 2) + array([ 4.-0.j, 16.+0.j]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + + +@array_function_dispatch(_unary_dispatcher) +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([0.-0.j , 0.-1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + + +@array_function_dispatch(_unary_dispatcher) +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + + +@array_function_dispatch(_unary_dispatcher) +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that + ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + ``x=-1`` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for ``x = +/-1``). + + Examples + -------- + >>> np.set_printoptions(precision=4) + + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) + >>> np.emath.arctanh([1j]) + array([0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/scimath.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/scimath.pyi new file mode 100644 index 00000000..589feb15 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/scimath.pyi @@ -0,0 +1,94 @@ +from typing import overload, Any + +from numpy import complexfloating + +from numpy._typing import ( + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ComplexLike_co, + _FloatLike_co, +) + +__all__: list[str] + +@overload +def sqrt(x: _FloatLike_co) -> Any: ... +@overload +def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log(x: _FloatLike_co) -> Any: ... +@overload +def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log10(x: _FloatLike_co) -> Any: ... +@overload +def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def log2(x: _FloatLike_co) -> Any: ... +@overload +def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ... +@overload +def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ... +@overload +def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arccos(x: _FloatLike_co) -> Any: ... +@overload +def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arcsin(x: _FloatLike_co) -> Any: ... +@overload +def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def arctanh(x: _FloatLike_co) -> Any: ... +@overload +def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ... +@overload +def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/setup.py new file mode 100644 index 00000000..7520b72d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/setup.py @@ -0,0 +1,12 @@ +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('lib', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_dir('tests/data') + config.add_data_files('*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/shape_base.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/shape_base.py new file mode 100644 index 00000000..5d8a41bf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/shape_base.py @@ -0,0 +1,1274 @@ +import functools + +import numpy.core.numeric as _nx +from numpy.core.numeric import asarray, zeros, array, asanyarray +from numpy.core.fromnumeric import reshape, transpose +from numpy.core.multiarray import normalize_axis_index +from numpy.core import overrides +from numpy.core import vstack, atleast_3d +from numpy.core.numeric import normalize_axis_tuple +from numpy.core.shape_base import _arrays_for_stack_dispatcher +from numpy.lib.index_tricks import ndindex +from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells + + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis', + 'put_along_axis' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _make_along_axis_idx(arr_shape, indices, axis): + # compute dimensions to iterate over + if not _nx.issubdtype(indices.dtype, _nx.integer): + raise IndexError('`indices` must be an integer array') + if len(arr_shape) != indices.ndim: + raise ValueError( + "`indices` and `arr` must have the same number of dimensions") + shape_ones = (1,) * indices.ndim + dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim)) + + # build a fancy index, consisting of orthogonal aranges, with the + # requested index inserted at the right location + fancy_index = [] + for dim, n in zip(dest_dims, arr_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:] + fancy_index.append(_nx.arange(n).reshape(ind_shape)) + + return tuple(fancy_index) + + +def _take_along_axis_dispatcher(arr, indices, axis): + return (arr, indices) + + +@array_function_dispatch(_take_along_axis_dispatcher) +def take_along_axis(arr, indices, axis): + """ + Take values from the input array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to look up values in the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Source array + indices : ndarray (Ni..., J, Nk...) + Indices to take along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions Ni and Nj only need to broadcast + against `arr`. + axis : int + The axis to take 1d slices along. If axis is None, the input array is + treated as if it had first been flattened to 1d, for consistency with + `sort` and `argsort`. + + Returns + ------- + out: ndarray (Ni..., J, Nk...) + The indexed result. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + out = np.empty(Ni + (J,) + Nk) + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + out_1d = out [ii + s_[:,] + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + out_1d[:] = a_1d[indices_1d] + + See Also + -------- + take : Take along an axis, using the same indices for every 1d slice + put_along_axis : + Put values into the destination array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1) + >>> ai + array([[0, 2, 1], + [1, 2, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you maintain the trivial dimension + with ``keepdims``: + + >>> np.max(a, axis=1, keepdims=True) + array([[30], + [60]]) + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.argmin(a, axis=1, keepdims=True) + >>> ai_max = np.argmax(a, axis=1, keepdims=True) + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + """ + # normalize inputs + if axis is None: + arr = arr.flat + arr_shape = (len(arr),) # flatiter has no .shape + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + return arr[_make_along_axis_idx(arr_shape, indices, axis)] + + +def _put_along_axis_dispatcher(arr, indices, values, axis): + return (arr, indices, values) + + +@array_function_dispatch(_put_along_axis_dispatcher) +def put_along_axis(arr, indices, values, axis): + """ + Put values into the destination array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + .. versionadded:: 1.15.0 + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Destination array. + indices : ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values : array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + values_1d = values [ii + s_[:,] + kk] + for j in range(J): + a_1d[indices_1d[j]] = values_1d[j] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + a_1d[indices_1d] = values_1d + + See Also + -------- + take_along_axis : + Take values from the input array by matching 1d index and data slices + + Examples + -------- + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + + """ + # normalize inputs + if axis is None: + arr = arr.flat + axis = 0 + arr_shape = (len(arr),) # flatiter has no .shape + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + + +def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): + return (arr,) + + +@array_function_dispatch(_apply_along_axis_dispatcher) +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays + and `a` is a 1-D slice of `arr` along `axis`. + + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + + Parameters + ---------- + func1d : function (M,) -> (Nj...) + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray (Ni..., M, Nk...) + Input array. + args : any + Additional arguments to `func1d`. + kwargs : any + Additional named arguments to `func1d`. + + .. versionadded:: 1.9.0 + + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of + `arr`, except along the `axis` dimension. This axis is removed, and + replaced with new dimensions equal to the shape of the return value + of `func1d`. So if `func1d` returns a scalar `out` will have one + fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([2., 5., 8.]) + + For a function that returns a 1D array, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + For a function that returns a higher dimensional array, those dimensions + are inserted in place of the `axis` dimension. + + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(np.diag, -1, b) + array([[[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], + [[4, 0, 0], + [0, 5, 0], + [0, 0, 6]], + [[7, 0, 0], + [0, 8, 0], + [0, 0, 9]]]) + """ + # handle negative axes + arr = asanyarray(arr) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + + # arr, with the iteration axis at the end + in_dims = list(range(nd)) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis]) + + # compute indices for the iteration axes, and append a trailing ellipsis to + # prevent 0d arrays decaying to scalars, which fixes gh-8642 + inds = ndindex(inarr_view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + + # invoke the function on the first item + try: + ind0 = next(inds) + except StopIteration: + raise ValueError( + 'Cannot apply_along_axis when any iteration dimensions are 0' + ) from None + res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) + + # build a buffer for storing evaluations of func1d. + # remove the requested axis, and add the new ones on the end. + # laid out so that each write is contiguous. + # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) + buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype) + + # permutation of axes such that out = buff.transpose(buff_permute) + buff_dims = list(range(buff.ndim)) + buff_permute = ( + buff_dims[0 : axis] + + buff_dims[buff.ndim-res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim-res.ndim] + ) + + # matrices have a nasty __array_prepare__ and __array_wrap__ + if not isinstance(res, matrix): + buff = res.__array_prepare__(buff) + + # save the first result, then compute and save all remaining results + buff[ind0] = res + for ind in inds: + buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) + + if not isinstance(res, matrix): + # wrap the array, to preserve subclasses + buff = res.__array_wrap__(buff) + + # finally, rotate the inserted axes back to where they belong + return transpose(buff, buff_permute) + + else: + # matrices have to be transposed first, because they collapse dimensions! + out_arr = transpose(buff, buff_permute) + return res.__array_wrap__(out_arr) + + +def _apply_over_axes_dispatcher(func, a, axes): + return (a,) + + +@array_function_dispatch(_apply_over_axes_dispatcher) +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ----- + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been available since + version 1.7.0. + + Examples + -------- + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +def _expand_dims_dispatcher(a, axis): + return (a,) + + +@array_function_dispatch(_expand_dims_dispatcher) +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis that will appear at the `axis` position in the expanded + array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int or tuple of ints + Position in the expanded axes where the new axis (or axes) is placed. + + .. deprecated:: 1.13.0 + Passing an axis where ``axis > a.ndim`` will be treated as + ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will + be treated as ``axis == 0``. This behavior is deprecated. + + .. versionchanged:: 1.18.0 + A tuple of axes is now supported. Out of range axes as + described above are now forbidden and raise an `AxisError`. + + Returns + ------- + result : ndarray + View of `a` with the number of dimensions increased. + + See Also + -------- + squeeze : The inverse operation, removing singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + doc.indexing, atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> x = np.array([1, 2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + The following is equivalent to ``x[:, np.newaxis]``: + + >>> y = np.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + ``axis`` may also be a tuple: + + >>> y = np.expand_dims(x, axis=(0, 1)) + >>> y + array([[[1, 2]]]) + + >>> y = np.expand_dims(x, axis=(2, 0)) + >>> y + array([[[1], + [2]]]) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + if isinstance(a, matrix): + a = asarray(a) + else: + a = asanyarray(a) + + if type(axis) not in (tuple, list): + axis = (axis,) + + out_ndim = len(axis) + a.ndim + axis = normalize_axis_tuple(axis, out_ndim) + + shape_it = iter(a.shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + + return a.reshape(shape) + + +row_stack = vstack + + +def _column_stack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_column_stack_dispatcher) +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + arrays = [] + for v in tup: + arr = asanyarray(v) + if arr.ndim < 2: + arr = array(arr, copy=False, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + + +def _dstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_dstack_dispatcher) +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of arrays + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + column_stack : Stack 1-D arrays as columns into a 2-D array. + dsplit : Split array along third axis. + + Examples + -------- + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + arrs = atleast_3d(*tup) + if not isinstance(arrs, list): + arrs = [arrs] + return _nx.concatenate(arrs, 2) + + +def _replace_zero_by_x_arrays(sub_arys): + for i in range(len(sub_arys)): + if _nx.ndim(sub_arys[i]) == 0: + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + return sub_arys + + +def _array_split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_array_split_dispatcher) +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] + + >>> x = np.arange(9) + >>> np.array_split(x, 4) + [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle array case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') from None + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section+1] + + (Nsections-extras) * [Neach_section]) + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + return sub_arys + + +def _split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_split_dispatcher) +def split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays as views into `ary`. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays as views into `ary`. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') from None + return array_split(ary, indices_or_sections, axis) + + +def _hvdsplit_dispatcher(ary, indices_or_sections): + return (ary, indices_or_sections) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis except for 1-D arrays, where it is split at ``axis=0``. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] + + With a 1-D array, the split is along axis 0. + + >>> x = np.array([0, 1, 2, 3, 4, 5]) + >>> np.hsplit(x, 2) + [array([0, 1, 2]), array([3, 4, 5])] + + """ + if _nx.ndim(ary) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if ary.ndim > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[0., 1.], + [2., 3.]]]), array([[[4., 5.], + [6., 7.]]])] + + """ + if _nx.ndim(ary) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] + """ + if _nx.ndim(ary) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + + +def get_array_prepare(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_prepare__) for i, x in enumerate(args) + if hasattr(x, '__array_prepare__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None + """ + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def _kron_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_kron_dispatcher) +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, ..., 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, ..., 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + # Working: + # 1. Equalise the shapes by prepending smaller array with 1s + # 2. Expand shapes of both the arrays by adding new axes at + # odd positions for 1st array and even positions for 2nd + # 3. Compute the product of the modified array + # 4. The inner most array elements now contain the rows of + # the Kronecker product + # 5. Reshape the result to kron's shape, which is same as + # product of shapes of the two arrays. + b = asanyarray(b) + a = array(a, copy=False, subok=True, ndmin=b.ndim) + is_any_mat = isinstance(a, matrix) or isinstance(b, matrix) + ndb, nda = b.ndim, a.ndim + nd = max(ndb, nda) + + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + + # Equalise the shapes by prepending smaller one with 1s + as_ = (1,)*max(0, ndb-nda) + as_ + bs = (1,)*max(0, nda-ndb) + bs + + # Insert empty dimensions + a_arr = expand_dims(a, axis=tuple(range(ndb-nda))) + b_arr = expand_dims(b, axis=tuple(range(nda-ndb))) + + # Compute the product + a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2))) + b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2))) + # In case of `mat`, convert result to `array` + result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat)) + + # Reshape back + result = result.reshape(_nx.multiply(as_, bs)) + + return result if not is_any_mat else matrix(result, copy=False) + + +def _tile_dispatcher(A, reps): + return (A, reps) + + +@array_function_dispatch(_tile_dispatcher) +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape + + Examples + -------- + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): + # Fixes the problem that the function does not make a copy if A is a + # numpy array and the repetitions are 1 in all dimensions + return _nx.array(A, copy=True, subok=True, ndmin=d) + else: + # Note that no copy of zero-sized arrays is made. However since they + # have no data there is no risk of an inadvertent overwrite. + c = _nx.array(A, copy=False, subok=True, ndmin=d) + if (d < c.ndim): + tup = (1,)*(c.ndim-d) + tup + shape_out = tuple(s*t for s, t in zip(c.shape, tup)) + n = c.size + if n > 0: + for dim_in, nrep in zip(c.shape, tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + n //= dim_in + return c.reshape(shape_out) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/shape_base.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/shape_base.pyi new file mode 100644 index 00000000..7cd9608b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/shape_base.pyi @@ -0,0 +1,220 @@ +import sys +from collections.abc import Callable, Sequence +from typing import TypeVar, Any, overload, SupportsIndex, Protocol + +if sys.version_info >= (3, 10): + from typing import ParamSpec, Concatenate +else: + from typing_extensions import ParamSpec, Concatenate + +from numpy import ( + generic, + integer, + ufunc, + bool_, + unsignedinteger, + signedinteger, + floating, + complexfloating, + object_, +) + +from numpy._typing import ( + ArrayLike, + NDArray, + _ShapeLike, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) + +from numpy.core.shape_base import vstack + +_P = ParamSpec("_P") +_SCT = TypeVar("_SCT", bound=generic) + +# The signatures of `__array_wrap__` and `__array_prepare__` are the same; +# give them unique names for the sake of clarity +class _ArrayWrap(Protocol): + def __call__( + self, + array: NDArray[Any], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + /, + ) -> Any: ... + +class _ArrayPrepare(Protocol): + def __call__( + self, + array: NDArray[Any], + context: None | tuple[ufunc, tuple[Any, ...], int] = ..., + /, + ) -> Any: ... + +class _SupportsArrayWrap(Protocol): + @property + def __array_wrap__(self) -> _ArrayWrap: ... + +class _SupportsArrayPrepare(Protocol): + @property + def __array_prepare__(self) -> _ArrayPrepare: ... + +__all__: list[str] + +row_stack = vstack + +def take_along_axis( + arr: _SCT | NDArray[_SCT], + indices: NDArray[integer[Any]], + axis: None | int, +) -> NDArray[_SCT]: ... + +def put_along_axis( + arr: NDArray[_SCT], + indices: NDArray[integer[Any]], + values: ArrayLike, + axis: None | int, +) -> None: ... + +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[_SCT]: ... +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[Any]: ... + +def apply_over_axes( + func: Callable[[NDArray[Any], int], NDArray[_SCT]], + a: ArrayLike, + axes: int | Sequence[int], +) -> NDArray[_SCT]: ... + +@overload +def expand_dims( + a: _ArrayLike[_SCT], + axis: _ShapeLike, +) -> NDArray[_SCT]: ... +@overload +def expand_dims( + a: ArrayLike, + axis: _ShapeLike, +) -> NDArray[Any]: ... + +@overload +def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def array_split( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_SCT]]: ... +@overload +def array_split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def split( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_SCT]]: ... +@overload +def split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def hsplit( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_SCT]]: ... +@overload +def hsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def vsplit( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_SCT]]: ... +@overload +def vsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def dsplit( + ary: _ArrayLike[_SCT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_SCT]]: ... +@overload +def dsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ... +@overload +def get_array_prepare(*args: object) -> None | _ArrayPrepare: ... + +@overload +def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... +@overload +def get_array_wrap(*args: object) -> None | _ArrayWrap: ... + +@overload +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +@overload +def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... + +@overload +def tile( + A: _ArrayLike[_SCT], + reps: int | Sequence[int], +) -> NDArray[_SCT]: ... +@overload +def tile( + A: ArrayLike, + reps: int | Sequence[int], +) -> NDArray[Any]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/stride_tricks.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/stride_tricks.py new file mode 100644 index 00000000..6794ad55 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/stride_tricks.py @@ -0,0 +1,547 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the "ndarray.rst" file in the +NumPy reference guide. + +""" +import numpy as np +from numpy.core.numeric import normalize_axis_tuple +from numpy.core.overrides import array_function_dispatch, set_module + +__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] + + +class DummyArray: + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + +def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + .. versionadded:: 1.10 + + If True, subclasses are preserved. + writeable : bool, optional + .. versionadded:: 1.12 + + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to : broadcast an array to a given shape. + reshape : reshape an array. + lib.stride_tricks.sliding_window_view : + userfriendly and safe function for the creation of sliding window views. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. + """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=False, subok=subok) + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x)) + # The route via `__interface__` does not preserve structured + # dtypes. Since dtype should remain unchanged, we set it explicitly. + array.dtype = x.dtype + + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _sliding_window_view_dispatcher(x, window_shape, axis=None, *, + subok=None, writeable=None): + return (x,) + + +@array_function_dispatch(_sliding_window_view_dispatcher) +def sliding_window_view(x, window_shape, axis=None, *, + subok=False, writeable=False): + """ + Create a sliding window view into the array with the given window shape. + + Also known as rolling or moving window, the window slides across all + dimensions of the array and extracts subsets of the array at all window + positions. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + x : array_like + Array to create the sliding window view from. + window_shape : int or tuple of int + Size of window over each axis that takes part in the sliding window. + If `axis` is not present, must have same length as the number of input + array dimensions. Single integers `i` are treated as if they were the + tuple `(i,)`. + axis : int or tuple of int, optional + Axis or axes along which the sliding window is applied. + By default, the sliding window is applied to all axes and + `window_shape[i]` will refer to axis `i` of `x`. + If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to + the axis `axis[i]` of `x`. + Single integers `i` are treated as if they were the tuple `(i,)`. + subok : bool, optional + If True, sub-classes will be passed-through, otherwise the returned + array will be forced to be a base-class array (default). + writeable : bool, optional + When true, allow writing to the returned view. The default is false, + as this should be used with caution: the returned view contains the + same memory location multiple times, so writing to one location will + cause others to change. + + Returns + ------- + view : ndarray + Sliding window view of the array. The sliding window dimensions are + inserted at the end, and the original dimensions are trimmed as + required by the size of the sliding window. + That is, ``view.shape = x_shape_trimmed + window_shape``, where + ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less + than the corresponding window size. + + See Also + -------- + lib.stride_tricks.as_strided: A lower-level and less safe routine for + creating arbitrary views from custom shape and strides. + broadcast_to: broadcast an array to a given shape. + + Notes + ----- + For many applications using a sliding window view can be convenient, but + potentially very slow. Often specialized solutions exist, for example: + + - `scipy.signal.fftconvolve` + + - filtering functions in `scipy.ndimage` + + - moving window functions provided by + `bottleneck `_. + + As a rough estimate, a sliding window approach with an input size of `N` + and a window size of `W` will scale as `O(N*W)` where frequently a special + algorithm can achieve `O(N)`. That means that the sliding window variant + for a window size of 100 can be a 100 times slower than a more specialized + version. + + Nevertheless, for small window sizes, when no custom algorithm exists, or + as a prototyping and developing tool, this function can be a good solution. + + Examples + -------- + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + + This also works in more dimensions, e.g. + + >>> i, j = np.ogrid[:3, :4] + >>> x = 10*i + j + >>> x.shape + (3, 4) + >>> x + array([[ 0, 1, 2, 3], + [10, 11, 12, 13], + [20, 21, 22, 23]]) + >>> shape = (2,2) + >>> v = sliding_window_view(x, shape) + >>> v.shape + (2, 3, 2, 2) + >>> v + array([[[[ 0, 1], + [10, 11]], + [[ 1, 2], + [11, 12]], + [[ 2, 3], + [12, 13]]], + [[[10, 11], + [20, 21]], + [[11, 12], + [21, 22]], + [[12, 13], + [22, 23]]]]) + + The axis can be specified explicitly: + + >>> v = sliding_window_view(x, 3, 0) + >>> v.shape + (1, 4, 3) + >>> v + array([[[ 0, 10, 20], + [ 1, 11, 21], + [ 2, 12, 22], + [ 3, 13, 23]]]) + + The same axis can be used several times. In that case, every use reduces + the corresponding original dimension: + + >>> v = sliding_window_view(x, (2, 3), (1, 1)) + >>> v.shape + (3, 1, 2, 3) + >>> v + array([[[[ 0, 1, 2], + [ 1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + + Combining with stepped slicing (`::step`), this can be used to take sliding + views which skip elements: + + >>> x = np.arange(7) + >>> sliding_window_view(x, 5)[:, ::2] + array([[0, 2, 4], + [1, 3, 5], + [2, 4, 6]]) + + or views which move by multiple elements + + >>> x = np.arange(7) + >>> sliding_window_view(x, 3)[::2, :] + array([[0, 1, 2], + [2, 3, 4], + [4, 5, 6]]) + + A common application of `sliding_window_view` is the calculation of running + statistics. The simplest example is the + `moving average `_: + + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + >>> moving_average = v.mean(axis=-1) + >>> moving_average + array([1., 2., 3., 4.]) + + Note that a sliding window approach is often **not** optimal (see Notes). + """ + window_shape = (tuple(window_shape) + if np.iterable(window_shape) + else (window_shape,)) + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=False, subok=subok) + + window_shape_array = np.array(window_shape) + if np.any(window_shape_array < 0): + raise ValueError('`window_shape` cannot contain negative values') + + if axis is None: + axis = tuple(range(x.ndim)) + if len(window_shape) != len(axis): + raise ValueError(f'Since axis is `None`, must provide ' + f'window_shape for all dimensions of `x`; ' + f'got {len(window_shape)} window_shape elements ' + f'and `x.ndim` is {x.ndim}.') + else: + axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) + if len(window_shape) != len(axis): + raise ValueError(f'Must provide matching length window_shape and ' + f'axis; got {len(window_shape)} window_shape ' + f'elements and {len(axis)} axes elements.') + + out_strides = x.strides + tuple(x.strides[ax] for ax in axis) + + # note: same axis can be windowed repeatedly + x_shape_trimmed = list(x.shape) + for ax, dim in zip(axis, window_shape): + if x_shape_trimmed[ax] < dim: + raise ValueError( + 'window shape cannot be larger than input array shape') + x_shape_trimmed[ax] -= dim - 1 + out_shape = tuple(x_shape_trimmed) + window_shape + return as_strided(x, strides=out_strides, shape=out_shape, + subok=subok, writeable=writeable) + + +def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=False, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + extras = [] + it = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=['readonly'], itershape=shape, order='C') + with it: + # never really has writebackifcopy semantics + broadcast = it.itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + # In a future version this will go away + if not readonly and array.flags._writeable_no_warn: + result.flags.writeable = True + result.flags._warn_on_write = True + return result + + +def _broadcast_to_dispatcher(array, shape, subok=None): + return (array,) + + +@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') +def broadcast_to(array, shape, subok=False): + """Broadcast an array to a new shape. + + Parameters + ---------- + array : array_like + The array to broadcast. + shape : tuple or int + The shape of the desired array. A single integer ``i`` is interpreted + as ``(i,)``. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + + Returns + ------- + broadcast : array + A readonly view on the original array with the given shape. It is + typically not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to NumPy's + broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_shapes + + Notes + ----- + .. versionadded:: 1.10.0 + + Examples + -------- + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +def _broadcast_shape(*args): + """Returns the shape of the arrays that would result from broadcasting the + supplied arrays against each other. + """ + # use the old-iterator because np.nditer does not handle size 0 arrays + # consistently + b = np.broadcast(*args[:32]) + # unfortunately, it cannot handle 32 or more arguments directly + for pos in range(32, len(args), 31): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) + b = np.broadcast(b, *args[pos:(pos + 31)]) + return b.shape + + +@set_module('numpy') +def broadcast_shapes(*args): + """ + Broadcast the input shapes into a single shape. + + :ref:`Learn more about broadcasting here `. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + `*args` : tuples of ints, or ints + The shapes to be broadcast against each other. + + Returns + ------- + tuple + Broadcasted shape. + + Raises + ------ + ValueError + If the shapes are not compatible and cannot be broadcast according + to NumPy's broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_to + + Examples + -------- + >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) + (3, 2) + + >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) + (5, 6, 7) + """ + arrays = [np.empty(x, dtype=[]) for x in args] + return _broadcast_shape(*arrays) + + +def _broadcast_arrays_dispatcher(*args, subok=None): + return args + + +@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') +def broadcast_arrays(*args, subok=False): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + `*args` : array_likes + The arrays to broadcast. + + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + + Returns + ------- + broadcasted : list of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you need + to write to the arrays, make copies first. While you can set the + ``writable`` flag True, writing to a single output value may end up + changing more than one location in the output array. + + .. deprecated:: 1.17 + The output is currently marked so that if written to, a deprecation + warning will be emitted. A future version will set the + ``writable`` flag False so writing to it will raise an error. + + See Also + -------- + broadcast + broadcast_to + broadcast_shapes + + Examples + -------- + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[4],[5]]) + >>> np.broadcast_arrays(x, y) + [array([[1, 2, 3], + [1, 2, 3]]), array([[4, 4, 4], + [5, 5, 5]])] + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3]]), array([[4, 4, 4], + [5, 5, 5]])] + + """ + # nditer is not used here to avoid the limit of 32 arrays. + # Otherwise, something like the following one-liner would suffice: + # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], + # order='C').itviews + + args = [np.array(_m, copy=False, subok=subok) for _m in args] + + shape = _broadcast_shape(*args) + + if all(array.shape == shape for array in args): + # Common case where nothing needs to be broadcasted. + return args + + return [_broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi new file mode 100644 index 00000000..4c9a98e8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi @@ -0,0 +1,80 @@ +from collections.abc import Iterable +from typing import Any, TypeVar, overload, SupportsIndex + +from numpy import generic +from numpy._typing import ( + NDArray, + ArrayLike, + _ShapeLike, + _Shape, + _ArrayLike +) + +_SCT = TypeVar("_SCT", bound=generic) + +__all__: list[str] + +class DummyArray: + __array_interface__: dict[str, Any] + base: None | NDArray[Any] + def __init__( + self, + interface: dict[str, Any], + base: None | NDArray[Any] = ..., + ) -> None: ... + +@overload +def as_strided( + x: _ArrayLike[_SCT], + shape: None | Iterable[int] = ..., + strides: None | Iterable[int] = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def as_strided( + x: ArrayLike, + shape: None | Iterable[int] = ..., + strides: None | Iterable[int] = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def sliding_window_view( + x: _ArrayLike[_SCT], + window_shape: int | Iterable[int], + axis: None | SupportsIndex = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def sliding_window_view( + x: ArrayLike, + window_shape: int | Iterable[int], + axis: None | SupportsIndex = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def broadcast_to( + array: _ArrayLike[_SCT], + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def broadcast_to( + array: ArrayLike, + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[Any]: ... + +def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... + +def broadcast_arrays( + *args: ArrayLike, + subok: bool = ..., +) -> list[NDArray[Any]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.py new file mode 100644 index 00000000..c8149abc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.py @@ -0,0 +1,350 @@ +import os +import pytest +from tempfile import mkdtemp, mkstemp, NamedTemporaryFile +from shutil import rmtree + +import numpy.lib._datasource as datasource +from numpy.testing import assert_, assert_equal, assert_raises + +import urllib.request as urllib_request +from urllib.parse import urlparse +from urllib.error import URLError + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + +# setup and teardown +old_urlopen = None + + +def setup_module(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown_module(): + urllib_request.urlopen = old_urlopen + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = b'three is the magic number' + + +# Utility functions used by many tests +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path+http_file + + +def invalid_httpurl(): + return http_fakepath+http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + fh = self.ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self): + url = invalid_httpurl() + assert_raises(OSError, self.ds.open, url) + try: + self.ds.open(url) + except OSError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self): + assert_raises(URLError, self.ds._cache, invalid_httpurl()) + + def test_ValidFile(self): + local_file = valid_textfile(self.tmpdir) + fh = self.ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self): + invalid_file = invalid_textfile(self.tmpdir) + assert_raises(OSError, self.ds.open, invalid_file) + + def test_ValidGzipFile(self): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for Gzip files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + def test_ValidBz2File(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + +class TestDataSourceExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + assert_(self.ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self): + assert_equal(self.ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self): + # Test valid file in destpath + tmpfile = valid_textfile(self.tmpdir) + assert_(self.ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(self.ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.ds.exists(tmpfile), False) + + +class TestDataSourceAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_equal(local_path, self.ds.abspath(valid_httpurl())) + + def test_ValidFile(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_equal(tmpfile, self.ds.abspath(tmpfile)) + + def test_InvalidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + + def test_InvalidFile(self): + invalidfile = valid_textfile(self.tmpdir) + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_(invalidfile != self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_(invalidfile != self.ds.abspath(tmpfile)) + + def test_sandboxing(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + + tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + + assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(tmpfile).startswith(self.tmpdir)) + assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_ValidFile() + self.test_InvalidHTTP() + self.test_InvalidFile() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = self.repos.abspath(valid_httpfile()) + assert_equal(local_path, filepath) + + def test_sandboxing(self): + tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) + assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path+fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidFile(self): + # Create local temp file + tmpfile = valid_textfile(self.tmpdir) + assert_(self.repos.exists(tmpfile)) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self): + assert_(self.repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + scheme, netloc, upath, pms, qry, frg = urlparse(localfile) + local_path = os.path.join(self.repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(self.repos.exists(tmpfile)) + + +class TestOpenFunc: + def setup_method(self): + self.tmpdir = mkdtemp() + + def teardown_method(self): + rmtree(self.tmpdir) + + def test_DataSourceOpen(self): + local_file = valid_textfile(self.tmpdir) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=self.tmpdir) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + +def test_del_attr_handling(): + # DataSource __del__ can be called + # even if __init__ fails when the + # Exception object is caught by the + # caller as happens in refguide_check + # is_deprecated() function + + ds = datasource.DataSource() + # simulate failed __init__ by removing key attribute + # produced within __init__ and expected by __del__ + del ds._istmpdest + # should not raise an AttributeError if __del__ + # gracefully handles failed __init__: + ds.__del__() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.py new file mode 100644 index 00000000..a5b78702 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.py @@ -0,0 +1,353 @@ +import time +from datetime import date + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_allclose, assert_raises, + ) +from numpy.lib._iotools import ( + LineSplitter, NameValidator, StringConverter, + has_nested_fields, easy_dtype, flatten_dtype + ) + + +class TestLineSplitter: + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter()(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + test = LineSplitter('')(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + + def test_space_delimiter(self): + "Test space delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + # + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get encoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(3)(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(20)(strg) + assert_equal(test, ['1 3 4 5 6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(30)(strg) + assert_equal(test, ['1 3 4 5 6']) + + def test_variable_fixed_width(self): + strg = " 1 3 4 5 6# test" + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, ['1', '3', '4 5', '6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, ['1', '3 4', '5 6']) + +# ----------------------------------------------------------------------------- + + +class TestNameValidator: + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + # check exceptions + assert_raises(ValueError, NameValidator, case_sensitive='foobar') + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +# ----------------------------------------------------------------------------- + + +def _bytes_to_date(s): + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter: + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + + converter = StringConverter() + assert_equal(converter._status, 0) + + # test int + assert_equal(converter.upgrade('0'), 0) + assert_equal(converter._status, 1) + + # On systems where long defaults to 32-bit, the statuses will be + # offset by one, so we check for this here. + import numpy.core.numeric as nx + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) + + # test int > 2**32 + assert_equal(converter.upgrade('17179869184'), 17179869184) + assert_equal(converter._status, 1 + status_offset) + + # test float + assert_allclose(converter.upgrade('0.'), 0.0) + assert_equal(converter._status, 2 + status_offset) + + # test complex + assert_equal(converter.upgrade('0j'), complex('0j')) + assert_equal(converter._status, 3 + status_offset) + + # test str + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (8). + for s in ['a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is str) + assert_equal(res, 'a') + assert_equal(converter._status, 8 + status_offset) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) + try: + converter('miss') + except ValueError: + pass + + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + _original_mapper = StringConverter._mapper[:] + try: + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 1, 1)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 1, 1)) + test = convert('') + assert_equal(test, date(2000, 1, 1)) + finally: + StringConverter._mapper = _original_mapper + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper, old_mapper) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values='', + default=-999) + converter.upgrade('3.14159265') + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values='', default=0) + converter.upgrade('3.14159265') + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal( + converter.missing_values, {'', 'N/A'}) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = "-9223372036854775807" + assert_(converter(val) == -9223372036854775807) + val = "9223372036854775807" + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = "9223372043271415339" + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions: + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__version.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__version.py new file mode 100644 index 00000000..e6d41ad9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test__version.py @@ -0,0 +1,64 @@ +"""Tests for the NumpyVersion class. + +""" +from numpy.testing import assert_, assert_raises +from numpy.lib import NumpyVersion + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.py new file mode 100644 index 00000000..0bebe369 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,1380 @@ +"""Tests for the array padding functions. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose, assert_equal +from numpy.lib.arraypad import _as_pairs + + +_numeric_dtypes = ( + np.sctypes["uint"] + + np.sctypes["int"] + + np.sctypes["float"] + + np.sctypes["complex"] +) +_all_modes = { + 'constant': {'constant_values': 0}, + 'edge': {}, + 'linear_ramp': {'end_values': 0}, + 'maximum': {'stat_length': None}, + 'mean': {'stat_length': None}, + 'median': {'stat_length': None}, + 'minimum': {'stat_length': None}, + 'reflect': {'reflect_type': 'even'}, + 'symmetric': {'reflect_type': 'even'}, + 'wrap': {}, + 'empty': {} +} + + +class TestAsPairs: + def test_single_value(self): + """Test casting for a single value.""" + expected = np.array([[3, 3]] * 10) + for x in (3, [3], [[3]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # Test with dtype=object + obj = object() + assert_equal( + _as_pairs(obj, 10), + np.array([[obj, obj]] * 10) + ) + + def test_two_values(self): + """Test proper casting for two different values.""" + # Broadcasting in the first dimension with numbers + expected = np.array([[3, 4]] * 10) + for x in ([3, 4], [[3, 4]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # and with dtype=object + obj = object() + assert_equal( + _as_pairs(["a", obj], 10), + np.array([["a", obj]] * 10) + ) + + # Broadcasting in the second / last dimension with numbers + assert_equal( + _as_pairs([[3], [4]], 2), + np.array([[3, 3], [4, 4]]) + ) + # and with dtype=object + assert_equal( + _as_pairs([["a"], [obj]], 2), + np.array([["a", "a"], [obj, obj]]) + ) + + def test_with_none(self): + expected = ((None, None), (None, None), (None, None)) + assert_equal( + _as_pairs(None, 3, as_index=False), + expected + ) + assert_equal( + _as_pairs(None, 3, as_index=True), + expected + ) + + def test_pass_through(self): + """Test if `x` already matching desired output are passed through.""" + expected = np.arange(12).reshape((6, 2)) + assert_equal( + _as_pairs(expected, 6), + expected + ) + + def test_as_index(self): + """Test results if `as_index=True`.""" + assert_equal( + _as_pairs([2.6, 3.3], 10, as_index=True), + np.array([[3, 3]] * 10, dtype=np.intp) + ) + assert_equal( + _as_pairs([2.6, 4.49], 10, as_index=True), + np.array([[3, 4]] * 10, dtype=np.intp) + ) + for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], + [[1, 2]] * 9 + [[1, -2]]): + with pytest.raises(ValueError, match="negative values"): + _as_pairs(x, 10, as_index=True) + + def test_exceptions(self): + """Ensure faulty usage is discovered.""" + with pytest.raises(ValueError, match="more dimensions than allowed"): + _as_pairs([[[3]]], 10) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs([[1, 2], [3, 4]], 3) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs(np.ones((2, 3)), 3) + + +class TestConditionalShortcuts: + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_padding_shortcuts(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(0, 0) for _ in test.shape] + assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_shallow_statistic_range(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(1, 1) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode='edge'), + np.pad(test, pad_amt, mode=mode, stat_length=1)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_clip_statistic_range(self, mode): + test = np.arange(30).reshape(5, 6) + pad_amt = [(3, 3) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode=mode), + np.pad(test, pad_amt, mode=mode, stat_length=30)) + + +class TestStatistic: + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_maximum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum', stat_length=10) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_minimum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'minimum', stat_length=10) + b = np.array( + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_median_stat_length(self): + a = np.arange(100).astype('f') + a[1] = 2. + a[97] = 96. + a = np.pad(a, (25, 20), 'median', stat_length=(3, 5)) + b = np.array( + [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., + + 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., + + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("mode", [ + "mean", + "median", + "minimum", + "maximum" + ]) + def test_same_prepend_append(self, mode): + """ Test that appended and prepended values are equal """ + # This test is constructed to trigger floating point rounding errors in + # a way that caused gh-11216 for mode=='mean' + a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) + a = np.pad(a, (1, 1), mode) + assert_equal(a[0], a[-1]) + + @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"]) + @pytest.mark.parametrize( + "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))] + ) + def test_check_negative_stat_length(self, mode, stat_length): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, 2, mode, stat_length=stat_length) + + def test_simple_stat_length(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning" + ) + @pytest.mark.parametrize("mode", ["mean", "median"]) + def test_zero_stat_length_valid(self, mode): + arr = np.pad([1., 2.], (1, 2), mode, stat_length=0) + expected = np.array([np.nan, 1., 2., np.nan, np.nan]) + assert_equal(arr, expected) + + @pytest.mark.parametrize("mode", ["minimum", "maximum"]) + def test_zero_stat_length_invalid(self, mode): + match = "stat_length of 0 yields no value for padding" + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=(1, 0)) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=(1, 0)) + + +class TestConstant: + def test_check_constant(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + def test_check_constant_zeros(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_constant_float(self): + # If input array is int, but constant_values are float, the dtype of + # the array to be padded is kept + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, (1, 2), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1, 1, 1, 1, 1, 1, 1, 1, 1], + + [ 1, 0, 1, 2, 3, 4, 5, 1, 1], + [ 1, 6, 7, 8, 9, 10, 11, 1, 1], + [ 1, 12, 13, 14, 15, 16, 17, 1, 1], + [ 1, 18, 19, 20, 21, 22, 23, 1, 1], + [ 1, 24, 25, 26, 27, 28, 29, 1, 1], + + [ 1, 1, 1, 1, 1, 1, 1, 1, 1], + [ 1, 1, 1, 1, 1, 1, 1, 1, 1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float2(self): + # If input array is float, and constant_values are float, the dtype of + # the array to be padded is kept - here retaining the float constants + arr = np.arange(30).reshape(5, 6) + arr_float = arr.astype(np.float64) + test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', + constant_values=1.1) + expected = np.array( + [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + + [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], + [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], + [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], + [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], + [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], + + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float3(self): + a = np.arange(100, dtype=float) + a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) + b = np.array( + [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] + ) + assert_allclose(a, b) + + def test_check_constant_odd_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, ((1,), (2,)), mode='constant', + constant_values=3) + expected = np.array( + [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + + [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + + [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + ) + assert_allclose(test, expected) + + def test_check_constant_pad_2d(self): + arr = np.arange(4).reshape(2, 2) + test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant', + constant_values=((1, 2), (3, 4))) + expected = np.array( + [[3, 1, 1, 4, 4, 4], + [3, 0, 1, 4, 4, 4], + [3, 2, 3, 4, 4, 4], + [3, 2, 2, 4, 4, 4], + [3, 2, 2, 4, 4, 4]] + ) + assert_allclose(test, expected) + + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + + def test_check_object_array(self): + arr = np.empty(1, dtype=object) + obj_a = object() + arr[0] = obj_a + obj_b = object() + obj_c = object() + arr = np.pad(arr, pad_width=1, mode='constant', + constant_values=(obj_b, obj_c)) + + expected = np.empty((3,), dtype=object) + expected[0] = obj_b + expected[1] = obj_a + expected[2] = obj_c + + assert_array_equal(arr, expected) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") + assert result.shape == (3, 4, 4) + + +class TestLinearRamp: + def test_check_simple(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_allclose(a, b, rtol=1e-5, atol=1e-5) + + def test_check_2d(self): + arr = np.arange(20).reshape(4, 5).astype(np.float64) + test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) + expected = np.array( + [[0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], + [0., 0., 0., 1., 2., 3., 4., 2., 0.], + [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], + [0., 5., 10., 11., 12., 13., 14., 7., 0.], + [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], + [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + assert_allclose(test, expected) + + @pytest.mark.xfail(exceptions=(AssertionError,)) + def test_object_array(self): + from fractions import Fraction + arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) + actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) + + # deliberately chosen to have a non-power-of-2 denominator such that + # rounding to floats causes a failure. + expected = np.array([ + Fraction( 0, 12), + Fraction( 3, 12), + Fraction( 6, 12), + Fraction(-6, 12), + Fraction(-4, 12), + Fraction(-2, 12), + Fraction(-0, 12), + ]) + assert_equal(actual, expected) + + def test_end_values(self): + """Ensure that end values are exact.""" + a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") + assert_equal(a[:, 0], 0.) + assert_equal(a[:, -1], 0.) + assert_equal(a[0, :], 0.) + assert_equal(a[-1, :], 0.) + + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + + +class TestReflect: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect', reflect_type='odd') + b = np.array( + [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, + -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, + -5, -4, -3, -2, -1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestEmptyArray: + """Check how padding behaves on arrays with an empty dimension.""" + + @pytest.mark.parametrize( + # Keep parametrization ordered, otherwise pytest-xdist might believe + # that different tests were collected during parallelization + "mode", sorted(_all_modes.keys() - {"constant", "empty"}) + ) + def test_pad_empty_dimension(self, mode): + match = ("can't extend empty axis 0 using modes other than 'constant' " + "or 'empty'") + with pytest.raises(ValueError, match=match): + np.pad([], 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.ndarray(0), 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_pad_non_empty_dimension(self, mode): + result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) + assert result.shape == (8, 0, 4) + + +class TestSymmetric: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric') + b = np.array( + [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, + 4, 3, 2, 1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, + 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd') + b = np.array( + [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, + -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, + -4, -3, -2, -1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + + assert_array_equal(a, b) + + def test_check_large_pad_odd(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd') + b = np.array( + [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'symmetric') + b = np.array([2, 1, 1, 2, 3, 3, 2]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'symmetric') + b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 6, 'symmetric') + b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = np.pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + def test_pad_with_zero(self): + a = np.ones((3, 5)) + b = np.pad(a, (0, 5), mode="wrap") + assert_array_equal(a, b[:-5, :-5]) + + def test_repeated_wrapping(self): + """ + Check wrapping on each side individually if the wrapped area is longer + than the original array. + """ + a = np.arange(5) + b = np.pad(a, (12, 0), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][3:], b) + + a = np.arange(5) + b = np.pad(a, (0, 12), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][:-3], b) + + def test_repeated_wrapping_multiple_origin(self): + """ + Assert that 'wrap' pads only with multiples of the original area if + the pad width is larger than the original array. + """ + a = np.arange(4).reshape(2, 2) + a = np.pad(a, [(1, 3), (3, 1)], mode='wrap') + b = np.array( + [[3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0]] + ) + assert_array_equal(a, b) + + +class TestEdge: + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + def test_check_width_shape_1_2(self): + # Check a pad_width of the form ((1, 2),). + # Regression test for issue gh-7808. + a = np.array([1, 2, 3]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.array([1, 1, 2, 3, 3, 3]) + assert_array_equal(padded, expected) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + a = np.arange(24).reshape(2, 3, 4) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + +class TestEmpty: + def test_simple(self): + arr = np.arange(24).reshape(4, 6) + result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") + assert result.shape == (9, 10) + assert_equal(arr, result[2:-3, 3:-1]) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") + assert result.shape == (3, 4, 4) + + +def test_legacy_vector_functionality(): + def _padwithtens(vector, pad_width, iaxis, kwargs): + vector[:pad_width[0]] = 10 + vector[-pad_width[1]:] = 10 + + a = np.arange(6).reshape(2, 3) + a = np.pad(a, 2, _padwithtens) + b = np.array( + [[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]] + ) + assert_array_equal(a, b) + + +def test_unicode_mode(): + a = np.pad([1], 2, mode='constant') + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + +@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"]) +def test_object_input(mode): + # Regression test for issue gh-11395. + a = np.full((4, 3), fill_value=None) + pad_amt = ((2, 3), (3, 2)) + b = np.full((9, 8), fill_value=None) + assert_array_equal(np.pad(a, pad_amt, mode=mode), b) + + +class TestPadWidth: + @pytest.mark.parametrize("pad_width", [ + (4, 5, 6, 7), + ((1,), (2,), (3,)), + ((1, 2), (3, 4), (5, 6)), + ((3, 4, 5), (0, 1, 2)), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "operands could not be broadcast together" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width_2(self, mode): + arr = np.arange(30).reshape((6, 5)) + match = ("input operand has more dimensions than allowed by the axis " + "remapping") + with pytest.raises(ValueError, match=match): + np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode) + + @pytest.mark.parametrize( + "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_negative_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("pad_width, dtype", [ + ("3", None), + ("word", None), + (None, None), + (object(), None), + (3.4, None), + (((2, 3, 4), (3, 2)), object), + (complex(1, -1), None), + (((-2.1, 3), (3, 2)), None), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_bad_type(self, pad_width, dtype, mode): + arr = np.arange(30).reshape((6, 5)) + match = "`pad_width` must be of integral type." + if dtype is not None: + # avoid DeprecationWarning when not specifying dtype + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width, dtype=dtype), mode) + else: + with pytest.raises(TypeError, match=match): + np.pad(arr, pad_width, mode) + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width), mode) + + def test_pad_width_as_ndarray(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape(6, 5) + assert_array_equal(arr, np.pad(arr, pad_width, mode=mode)) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_kwargs(mode): + """Test behavior of pad's kwargs for the given mode.""" + allowed = _all_modes[mode] + not_allowed = {} + for kwargs in _all_modes.values(): + if kwargs != allowed: + not_allowed.update(kwargs) + # Test if allowed keyword arguments pass + np.pad([1, 2, 3], 1, mode, **allowed) + # Test if prohibited keyword arguments of other modes raise an error + for key, value in not_allowed.items(): + match = "unsupported keyword arguments for mode '{}'".format(mode) + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 1, mode, **{key: value}) + + +def test_constant_zero_default(): + arr = np.array([1, 1]) + assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) + + +@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) +def test_unsupported_mode(mode): + match= "mode '{}' is not supported".format(mode) + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 4, mode=mode) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_non_contiguous_array(mode): + arr = np.arange(24).reshape(4, 6)[::2, ::2] + result = np.pad(arr, (2, 3), mode) + assert result.shape == (7, 8) + assert_equal(result[2:-3, 2:-3], arr) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_memory_layout_persistence(mode): + """Test if C and F order is preserved for all pad modes.""" + x = np.ones((5, 10), order='C') + assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] + x = np.ones((5, 10), order='F') + assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] + + +@pytest.mark.parametrize("dtype", _numeric_dtypes) +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_dtype_persistence(dtype, mode): + arr = np.zeros((3, 2, 1), dtype=dtype) + result = np.pad(arr, 1, mode=mode) + assert result.dtype == dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 00000000..a180accb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,944 @@ +"""Test functions for 1D array set operations. + +""" +import numpy as np + +from numpy.testing import (assert_array_equal, assert_equal, + assert_raises, assert_raises_regex) +from numpy.lib.arraysetops import ( + ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin + ) +import pytest + + +class TestSetOps: + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + assert_array_equal([], intersect1d([], [])) + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test: + def __array__(self): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + + def test_intersect1d_indices(self): + # unique inputs + a = np.array([1, 2, 3, 4]) + b = np.array([2, 1, 4, 6]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ee = np.array([1, 2, 4]) + assert_array_equal(c, ee) + assert_array_equal(a[i1], ee) + assert_array_equal(b[i2], ee) + + # non-unique inputs + a = np.array([1, 2, 2, 3, 4, 3, 2]) + b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ef = np.array([1, 2, 3, 4]) + assert_array_equal(c, ef) + assert_array_equal(a[i1], ef) + assert_array_equal(b[i2], ef) + + # non1d, unique inputs + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 6, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + # non1d, not assumed to be uniqueinputs + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9)) + assert_array_equal([5, 6, 1, 7, 8], + ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8])) + assert_array_equal([1, 9], ediff1d(two_elem, to_end=9)) + assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8])) + assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7)) + assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6])) + + @pytest.mark.parametrize("ary, prepend, append, expected", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan, + 'to_end'), + # should fail because attempting + # to downcast to int type: + (np.array([1, 2, 3], dtype=np.int64), + np.array([5, 7, 2], dtype=np.float32), + None, + 'to_begin'), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary), + # `to_begin` is in the error message as the impl checks this first: + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan, + 'to_begin'), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = 'dtype of `{}` must be compatible'.format(expected) + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize( + "ary,prepend,append,expected", + [ + (np.array([1, 2, 3], dtype=np.int16), + 2**16, # will be cast to int16 under same kind rule. + 2**16 + 4, + np.array([0, 1, 1, 4], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.float32), + np.array([5], dtype=np.float64), + None, + np.array([5, 1, 1], dtype=np.float32)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ] + ) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + assert actual.dtype == expected.dtype + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin(self, kind): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + def _isin_slow(a, b): + b = np.asarray(b).flatten().tolist() + return a in b + isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) + + def assert_isin_equal(a, b): + x = isin(a, b, kind=kind) + y = isin_slow(a, b) + assert_array_equal(x, y) + + # multidimensional arrays in both arguments + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + assert_isin_equal(a, b) + + # array-likes as both arguments + c = [(9, 8), (7, 6)] + d = (9, 7) + assert_isin_equal(c, d) + + # zero-d array: + f = np.array(3) + assert_isin_equal(f, b) + assert_isin_equal(a, f) + assert_isin_equal(f, f) + + # scalar: + assert_isin_equal(5, b) + assert_isin_equal(a, 6) + assert_isin_equal(5, 6) + + # empty array-like: + if kind != "table": + # An empty list will become float64, + # which is invalid for kind="table" + x = [] + assert_isin_equal(x, b) + assert_isin_equal(a, x) + assert_isin_equal(x, x) + + # empty array with various types: + for dtype in [bool, np.int64, np.float64]: + if kind == "table" and dtype == np.float64: + continue + + if dtype in {np.int64, np.float64}: + ar = np.array([10, 20, 30], dtype=dtype) + elif dtype in {bool}: + ar = np.array([True, False, False]) + + empty_array = np.array([], dtype=dtype) + + assert_isin_equal(empty_array, ar) + assert_isin_equal(ar, empty_array) + assert_isin_equal(empty_array, empty_array) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_in1d(self, kind): + # we use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + # One check without np.array to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = in1d(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = in1d(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = in1d(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, + False, True, False, False, False] + c = in1d(a, b, kind=kind) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = in1d(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = in1d(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = in1d(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = in1d(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = in1d(a, b, kind=kind) + assert_array_equal(c, ec) + + if kind in {None, "sort"}: + assert_array_equal(in1d([], [], kind=kind), []) + + def test_in1d_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = in1d(a, b) + + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_in1d_invert(self, kind): + "Test in1d's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in in1d(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(in1d(a, b, kind=kind)), + in1d(a, b, invert=True, kind=kind)) + + # float: + if kind in {None, "sort"}: + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], + dtype=np.float32) + b = [2, 3, 4] * mult + b = np.array(b, dtype=np.float32) + assert_array_equal(np.invert(in1d(a, b, kind=kind)), + in1d(a, b, invert=True, kind=kind)) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_in1d_ravel(self, kind): + # Test that in1d ravels its input arrays. This is not documented + # behavior however. The test is to ensure consistentency. + a = np.arange(6).reshape(2, 3) + b = np.arange(3, 9).reshape(3, 2) + long_b = np.arange(3, 63).reshape(30, 2) + ec = np.array([False, False, False, True, True, True]) + + assert_array_equal(in1d(a, b, assume_unique=True, kind=kind), + ec) + assert_array_equal(in1d(a, b, assume_unique=False, + kind=kind), + ec) + assert_array_equal(in1d(a, long_b, assume_unique=True, + kind=kind), + ec) + assert_array_equal(in1d(a, long_b, assume_unique=False, + kind=kind), + ec) + + def test_in1d_hit_alternate_algorithm(self): + """Hit the standard isin code with integers""" + # Need extreme range to hit standard code + # This hits it without the use of kind='table' + a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64) + b = np.array([2, 3, 4, 1e9], dtype=np.int64) + expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool) + assert_array_equal(expected, in1d(a, b)) + assert_array_equal(np.invert(expected), in1d(a, b, invert=True)) + + a = np.array([5, 7, 1, 2], dtype=np.int64) + b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64) + ec = np.array([True, False, True, True]) + c = in1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_in1d_boolean(self, kind): + """Test that in1d works for boolean input""" + a = np.array([True, False]) + b = np.array([False, False, False]) + expected = np.array([False, True]) + assert_array_equal(expected, + in1d(a, b, kind=kind)) + assert_array_equal(np.invert(expected), + in1d(a, b, invert=True, kind=kind)) + + @pytest.mark.parametrize("kind", [None, "sort"]) + def test_in1d_timedelta(self, kind): + """Test that in1d works for timedelta input""" + rstate = np.random.RandomState(0) + a = rstate.randint(0, 100, size=10) + b = rstate.randint(0, 100, size=10) + truth = in1d(a, b) + a_timedelta = a.astype("timedelta64[s]") + b_timedelta = b.astype("timedelta64[s]") + assert_array_equal(truth, in1d(a_timedelta, b_timedelta, kind=kind)) + + def test_in1d_table_timedelta_fails(self): + a = np.array([0, 1, 2], dtype="timedelta64[s]") + b = a + # Make sure it raises a value error: + with pytest.raises(ValueError): + in1d(a, b, kind="table") + + @pytest.mark.parametrize( + "dtype1,dtype2", + [ + (np.int8, np.int16), + (np.int16, np.int8), + (np.uint8, np.uint16), + (np.uint16, np.uint8), + (np.uint8, np.int16), + (np.int16, np.uint8), + ] + ) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_in1d_mixed_dtype(self, dtype1, dtype2, kind): + """Test that in1d works as expected for mixed dtype input.""" + is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger) + ar1 = np.array([0, 0, 1, 1], dtype=dtype1) + + if is_dtype2_signed: + ar2 = np.array([-128, 0, 127], dtype=dtype2) + else: + ar2 = np.array([127, 0, 255], dtype=dtype2) + + expected = np.array([True, True, False, False]) + + expect_failure = kind == "table" and any(( + dtype1 == np.int8 and dtype2 == np.int16, + dtype1 == np.int16 and dtype2 == np.int8 + )) + + if expect_failure: + with pytest.raises(RuntimeError, match="exceed the maximum"): + in1d(ar1, ar2, kind=kind) + else: + assert_array_equal(in1d(ar1, ar2, kind=kind), expected) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_in1d_mixed_boolean(self, kind): + """Test that in1d works as expected for bool/int input.""" + for dtype in np.typecodes["AllInteger"]: + a = np.array([True, False, False], dtype=bool) + b = np.array([0, 0, 0, 0], dtype=dtype) + expected = np.array([False, True, True], dtype=bool) + assert_array_equal(in1d(a, b, kind=kind), expected) + + a, b = b, a + expected = np.array([True, True, True, True], dtype=bool) + assert_array_equal(in1d(a, b, kind=kind), expected) + + def test_in1d_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None]*10) + expected = np.array([False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None]*10) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)]*10, dtype=dt) + expected = np.array([True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + + def test_in1d_with_arrays_containing_tuples(self): + ar1 = np.array([(1,), 2], dtype=object) + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + result = np.in1d(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + # An integer is added at the end of the array to make sure + # that the array builder will create the array with tuples + # and after it's created the integer is removed. + # There's a bug in the array constructor that doesn't handle + # tuples properly and adding the integer fixes that. + ar1 = np.array([(1,), (2, 1), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), (2, 1), 1], dtype=object) + ar2 = ar2[:-1] + expected = np.array([True, True]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + result = np.in1d(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + ar1 = np.array([(1,), (2, 3), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, False]) + result = np.in1d(ar1, ar2) + assert_array_equal(result, expected) + result = np.in1d(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + def test_in1d_errors(self): + """Test that in1d raises expected errors.""" + + # Error 1: `kind` is not one of 'sort' 'table' or None. + ar1 = np.array([1, 2, 3, 4, 5]) + ar2 = np.array([2, 4, 6, 8, 10]) + assert_raises(ValueError, in1d, ar1, ar2, kind='quicksort') + + # Error 2: `kind="table"` does not work for non-integral arrays. + obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object) + obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object) + assert_raises(ValueError, in1d, obj_ar1, obj_ar2, kind='table') + + for dtype in [np.int32, np.int64]: + ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype) + # The range of this array will overflow: + overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype) + + # Error 3: `kind="table"` will trigger a runtime error + # if there is an integer overflow expected when computing the + # range of ar2 + assert_raises( + RuntimeError, + in1d, ar1, overflow_ar2, kind='table' + ) + + # Non-error: `kind=None` will *not* trigger a runtime error + # if there is an integer overflow, it will switch to + # the `sort` algorithm. + result = np.in1d(ar1, overflow_ar2, kind=None) + assert_array_equal(result, [True] + [False] * 4) + result = np.in1d(ar1, overflow_ar2, kind='sort') + assert_array_equal(result, [True] + [False] * 4) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + a = np.array((), np.uint32) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_unique(self): + a = np.array([3, 2, 1]) + b = np.array([7, 5, 2]) + expected = np.array([3, 1]) + actual = setdiff1d(a, b, assume_unique=True) + assert_equal(actual, expected) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +class TestUnique: + + def test_unique_1d(self): + + def check_all(a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, True, False, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, False, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, False, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, True, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, True, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, False, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, True, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + + a = [5, 7, 1, 2, 1, 5, 7]*10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3]*10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [0, 1, 0, 1, 1, 2] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + # test for chararrays with return_inverse (gh-5099) + a = np.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, + return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + + # test for ticket 2111 - float + a = [2.0, np.nan, 1.0, np.nan] + ua = [1.0, 2.0, np.nan] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - complex + a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)] + ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)] + ua_idx = [2, 0, 3] + ua_inv = [1, 2, 0, 2, 2] + ua_cnt = [1, 1, 3] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - datetime64 + nat = np.datetime64('nat') + a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] + ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - timedelta + nat = np.timedelta64('nat') + a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] + ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + + def test_unique_axis_errors(self): + assert_raises(TypeError, self._run_axis_tests, object) + assert_raises(TypeError, self._run_axis_tests, + [('a', int), ('b', object)]) + + assert_raises(np.AxisError, unique, np.arange(10), axis=2) + assert_raises(np.AxisError, unique, np.arange(10), axis=-2) + + def test_unique_axis_list(self): + msg = "Unique failed on list of lists" + inp = [[0, 1, 0], [0, 1, 0]] + inp_arr = np.asarray(inp) + assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) + assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) + + def test_unique_axis(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) + + for dtype in types: + self._run_axis_tests(dtype) + + msg = 'Non-bitwise-equal booleans test failed' + data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) + result = np.array([[False, True], [True, True]], dtype=bool) + assert_array_equal(unique(data, axis=0), result, msg) + + msg = 'Negative zero equality test failed' + data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) + result = np.array([[-0.0, 0.0]]) + assert_array_equal(unique(data, axis=0), result, msg) + + @pytest.mark.parametrize("axis", [0, -1]) + def test_unique_1d_with_axis(self, axis): + x = np.array([4, 3, 2, 3, 2, 1, 2, 2]) + uniq = unique(x, axis=axis) + assert_array_equal(uniq, [1, 2, 3, 4]) + + def test_unique_axis_zeros(self): + # issue 15559 + single_zero = np.empty(shape=(2, 0), dtype=np.int8) + uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True, + return_inverse=True, return_counts=True) + + # there's 1 element of shape (0,) along axis 0 + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(1, 0))) + assert_array_equal(idx, np.array([0])) + assert_array_equal(inv, np.array([0, 0])) + assert_array_equal(cnt, np.array([2])) + + # there's 0 elements of shape (2,) along axis 1 + uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True, + return_inverse=True, return_counts=True) + + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(2, 0))) + assert_array_equal(idx, np.array([])) + assert_array_equal(inv, np.array([])) + assert_array_equal(cnt, np.array([])) + + # test a "complicated" shape + shape = (0, 2, 0, 3, 0, 4, 0) + multiple_zeros = np.empty(shape=shape) + for axis in range(len(shape)): + expected_shape = list(shape) + if shape[axis] == 0: + expected_shape[axis] = 0 + else: + expected_shape[axis] = 1 + + assert_array_equal(unique(multiple_zeros, axis=axis), + np.empty(shape=expected_shape)) + + def test_unique_masked(self): + # issue 8664 + x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], + dtype='uint8') + y = np.ma.masked_equal(x, 0) + + v = np.unique(y) + v2, i, c = np.unique(y, return_index=True, return_counts=True) + + msg = 'Unique returned different results when asked for index' + assert_array_equal(v.data, v2.data, msg) + assert_array_equal(v.mask, v2.mask, msg) + + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1], [0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + + def _run_axis_tests(self, dtype): + data = np.array([[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0]]).astype(dtype) + + msg = 'Unique with 1d array and axis=0 failed' + result = np.array([0, 1]) + assert_array_equal(unique(data), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=0 failed' + result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=1 failed' + result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) + assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) + + msg = 'Unique with 3d array and axis=2 failed' + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) + assert_array_equal(unique(data3d, axis=2), result, msg) + + uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=0" + assert_array_equal(data[idx], uniq, msg) + msg = "Unique's return_inverse=True failed with axis=0" + assert_array_equal(uniq[inv], data) + msg = "Unique's return_counts=True failed with axis=0" + assert_array_equal(cnt, np.array([2, 2]), msg) + + uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=1" + assert_array_equal(data[:, idx], uniq) + msg = "Unique's return_inverse=True failed with axis=1" + assert_array_equal(uniq[:, inv], data) + msg = "Unique's return_counts=True failed with axis=1" + assert_array_equal(cnt, np.array([2, 1, 1]), msg) + + def test_unique_nanequals(self): + # issue 20326 + a = np.array([1, 1, np.nan, np.nan, np.nan]) + unq = np.unique(a) + not_unq = np.unique(a, equal_nan=False) + assert_array_equal(unq, np.array([1, np.nan])) + assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 00000000..c00ed13d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,46 @@ +from operator import mul +from functools import reduce + +import numpy as np +from numpy.random import randint +from numpy.lib import Arrayterator +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5)+1 + shape = tuple(randint(10)+1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els) + a.shape = shape + + buf_size = randint(2*els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim)+1 for dim in shape] + step = [randint(dim)+1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.py new file mode 100644 index 00000000..838f999a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.py @@ -0,0 +1,11 @@ +import sys +import pytest +import numpy as np + + +def test_financial_expired(): + match = 'NEP 32' + with pytest.warns(DeprecationWarning, match=match): + func = np.fv + with pytest.raises(RuntimeError, match=match): + func(1, 2, 3) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_format.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_format.py new file mode 100644 index 00000000..3bbbb215 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_format.py @@ -0,0 +1,1028 @@ +# doctest +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print(repr(f.getvalue())) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' +import sys +import os +import warnings +import pytest +from io import BytesIO + +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, assert_raises_regex, + assert_warns, IS_PYPY, IS_WASM + ) +from numpy.testing._private.utils import requires_memory +from numpy.lib import format + + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + np.zeros(1, dtype=[('c', ('= (3, 12), reason="see gh-23988") +@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") +def test_python2_python3_interoperability(): + fname = 'win64python2.npy' + path = os.path.join(os.path.dirname(__file__), 'data', fname) + with pytest.warns(UserWarning, match="Reading.*this warning\\."): + data = np.load(path) + assert_array_equal(data, np.ones(2)) + +def test_pickle_python2_python3(): + # Test that loading object arrays saved on Python 2 works both on + # Python 2 and Python 3 and vice versa + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + expected = np.array([None, range, '\u512a\u826f', + b'\xe4\xb8\x8d\xe8\x89\xaf'], + dtype=object) + + for fname in ['py2-objarr.npy', 'py2-objarr.npz', + 'py3-objarr.npy', 'py3-objarr.npz']: + path = os.path.join(data_dir, fname) + + for encoding in ['bytes', 'latin1']: + data_f = np.load(path, allow_pickle=True, encoding=encoding) + if fname.endswith('.npz'): + data = data_f['x'] + data_f.close() + else: + data = data_f + + if encoding == 'latin1' and fname.startswith('py2'): + assert_(isinstance(data[3], str)) + assert_array_equal(data[:-1], expected[:-1]) + # mojibake occurs + assert_array_equal(data[-1].encode(encoding), expected[-1]) + else: + assert_(isinstance(data[3], bytes)) + assert_array_equal(data, expected) + + if fname.startswith('py2'): + if fname.endswith('.npz'): + data = np.load(path, allow_pickle=True) + assert_raises(UnicodeError, data.__getitem__, 'x') + data.close() + data = np.load(path, allow_pickle=True, fix_imports=False, + encoding='latin1') + assert_raises(ImportError, data.__getitem__, 'x') + data.close() + else: + assert_raises(UnicodeError, np.load, path, + allow_pickle=True) + assert_raises(ImportError, np.load, path, + allow_pickle=True, fix_imports=False, + encoding='latin1') + + +def test_pickle_disallow(tmpdir): + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + path = os.path.join(data_dir, 'py2-objarr.npy') + assert_raises(ValueError, np.load, path, + allow_pickle=False, encoding='latin1') + + path = os.path.join(data_dir, 'py2-objarr.npz') + with np.load(path, allow_pickle=False, encoding='latin1') as f: + assert_raises(ValueError, f.__getitem__, 'x') + + path = os.path.join(tmpdir, 'pickle-disabled.npy') + assert_raises(ValueError, np.save, path, np.array([None], dtype=object), + allow_pickle=False) + +@pytest.mark.parametrize('dt', [ + np.dtype(np.dtype([('a', np.int8), + ('b', np.int16), + ('c', np.int32), + ], align=True), + (3,)), + np.dtype([('x', np.dtype({'names':['a','b'], + 'formats':['i1','i1'], + 'offsets':[0,4], + 'itemsize':8, + }, + (3,)), + (4,), + )]), + np.dtype([('x', + (' 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + place(a, np.zeros(7), []) + assert_array_equal(a, np.arange(1, 8)) + + place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) + assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) + assert_raises_regex(ValueError, "Cannot insert from an empty array", + lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) + + # See Issue #6974 + a = np.array(['12', '34']) + place(a, [0, 1], '9') + assert_array_equal(a, ['12', '9']) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +# _foo1 and _foo2 are used in some tests in TestVectorize. + +def _foo1(x, y=1.0): + return y*math.floor(x) + + +def _foo2(x, y=1.0, z=0.0): + return y*math.floor(x) + z + + +class TestVectorize: + + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + f = vectorize(math.cos) + args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_almost_equal(r1, r2) + + def test_keywords(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order1(self): + # gh-1620: The second call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0), 1.0) + r2 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order2(self): + # gh-1620: The second call of f would crash with + # `ValueError: non-broadcastable output operand with shape () + # doesn't match the broadcast shape (3,)`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), 1.0) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order3(self): + # gh-1620: The third call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), y=1.0) + r3 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + assert_array_equal(r1, r3) + + def test_keywords_with_otypes_several_kwd_args1(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(10.4, z=100) + r2 = f(10.4, y=-1) + r3 = f(10.4) + assert_equal(r1, _foo2(10.4, z=100)) + assert_equal(r2, _foo2(10.4, y=-1)) + assert_equal(r3, _foo2(10.4)) + + def test_keywords_with_otypes_several_kwd_args2(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(z=100, x=10.4, y=-1) + r2 = f(1, 2, 3) + assert_equal(r1, _foo2(z=100, x=10.4, y=-1)) + assert_equal(r2, _foo2(1, 2, 3)) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + vectorize(random.randrange) # Should succeed + except Exception: + raise AssertionError() + + def test_keywords2_ticket_2100(self): + # Test kwarg support: enhancement ticket 2100 + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + # Test excluded with mixed positional and kwargs: ticket 2100 + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res * x + _p.pop(0) + return res + + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + # Test vectorizing function with no positional args. + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + # Test vectorizing function with no kwargs args. + @vectorize + def f(*v): + return np.prod(v) + + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + """Original documentation""" + return x + + f = vectorize(foo) + assert_equal(f.__doc__, foo.__doc__) + + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + # Regression test for issue 1156 + class Foo: + b = 2 + + def bar(self, a): + return a ** self.b + + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9) ** 2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9) ** 2) + + def test_execution_order_ticket_1487(self): + # Regression test for dependence on execution order: issue 1487 + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + # Test vectorization over strings: issue 1892. + f = np.vectorize(lambda x: x) + s = '0123456789' * 10 + assert_equal(s, f(s)) + + def test_cache(self): + # Ensure that vectorized func called exactly once per argument. + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x ** 2 + + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x * x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + def test_parse_gufunc_signature(self): + assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + # Tests to check if whitespaces are ignored + assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature( + '( ), ( a, b,c ) ,( d) -> (d , e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x)(y)->()') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x),(y)->') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('((x))->(x)') + + def test_signature_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract, signature='(),()->()') + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_signature_mean_last(self): + def mean(a): + return a.mean() + + f = vectorize(mean, signature='(n)->()') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [2, 3]) + + def test_signature_center(self): + def center(a): + return a - a.mean() + + f = vectorize(center, signature='(n)->(n)') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [[-1, 1], [-1, 1]]) + + def test_signature_two_outputs(self): + f = vectorize(lambda x: (x, x), signature='()->(),()') + r = f([1, 2, 3]) + assert_(isinstance(r, tuple) and len(r) == 2) + assert_array_equal(r[0], [1, 2, 3]) + assert_array_equal(r[1], [1, 2, 3]) + + def test_signature_outer(self): + f = vectorize(np.outer, signature='(a),(b)->(a,b)') + r = f([1, 2], [1, 2, 3]) + assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) + + r = f([[[1, 2]]], [1, 2, 3]) + assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) + + r = f([[1, 0], [2, 0]], [1, 2, 3]) + assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], + [[2, 4, 6], [0, 0, 0]]]) + + r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) + assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], + [[0, 0, 0], [0, 0, 0]]]) + + def test_signature_computed_size(self): + f = vectorize(lambda x: x[:-1], signature='(n)->(m)') + r = f([1, 2, 3]) + assert_array_equal(r, [1, 2]) + + r = f([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(r, [[1, 2], [2, 3]]) + + def test_signature_excluded(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo, signature='()->()', excluded={'b'}) + assert_array_equal(f([1, 2, 3]), [2, 3, 4]) + assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) + + def test_signature_otypes(self): + f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + + def test_signature_invalid_inputs(self): + f = vectorize(operator.add, signature='(n),(n)->(n)') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f([1, 2]) + with assert_raises_regex( + ValueError, 'does not have enough dimensions'): + f(1, 2) + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2], [1, 2, 3]) + + f = vectorize(operator.add, signature='()->()') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f(1, 2) + + def test_signature_invalid_outputs(self): + + f = vectorize(lambda x: x[:-1], signature='(n)->(n)') + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2, 3]) + + f = vectorize(lambda x: x, signature='()->(),()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f(1) + + f = vectorize(lambda x: (x, x), signature='()->()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f([1, 2]) + + def test_size_zero_output(self): + # see issue 5868 + f = np.vectorize(lambda x: x) + x = np.zeros([0, 5], dtype=int) + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f.otypes = 'i' + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='()->()') + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f = np.vectorize(lambda x: x, signature='()->()', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)') + assert_array_equal(f(x.T), x.T) + + f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') + with assert_raises_regex(ValueError, 'new output dimensions'): + f(x) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + + m = np.array([[1., 0., 0.], + [0., 0., 1.], + [0., 1., 0.]]).view(subclass) + v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass) + # generalized (gufunc) + matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)') + r = matvec(m, v) + assert_equal(type(r), subclass) + assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) + + # element-wise (ufunc) + mult = np.vectorize(lambda x, y: x*y) + r = mult(m, v) + assert_equal(type(r), subclass) + assert_equal(r, m * v) + + def test_name(self): + #See gh-23021 + @np.vectorize + def f2(a, b): + return a + b + + assert f2.__name__ == 'f2' + + def test_decorator(self): + @vectorize + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_docstring(self): + @vectorize + def f(x): + """Docstring""" + return x + + if sys.flags.optimize < 2: + assert f.__doc__ == "Docstring" + + def test_partial(self): + def foo(x, y): + return x + y + + bar = partial(foo, 3) + vbar = np.vectorize(bar) + assert vbar(1) == 4 + + def test_signature_otypes_decorator(self): + @vectorize(signature='(n)->(n)', otypes=['float64']) + def f(x): + return x + + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + assert f.__name__ == 'f' + + def test_bad_input(self): + with assert_raises(TypeError): + A = np.vectorize(pyfunc = 3) + + def test_no_keywords(self): + with assert_raises(TypeError): + @np.vectorize("string") + def foo(): + return "bar" + + def test_positional_regression_9477(self): + # This supplies the first keyword argument as a positional, + # to ensure that they are still properly forwarded after the + # enhancement for #9477 + f = vectorize((lambda x: x), ['float64']) + r = f([2]) + assert_equal(r.dtype, np.dtype('float64')) + + +class TestLeaks: + class A: + iters = 20 + + def bound(self, *args): + return 0 + + @staticmethod + def unbound(*args): + return 0 + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize('name, incr', [ + ('bound', A.iters), + ('unbound', 0), + ]) + def test_frompyfunc_leaks(self, name, incr): + # exposed in gh-11867 as np.vectorized, but the problem stems from + # frompyfunc. + # class.attribute = np.frompyfunc() creates a + # reference cycle if is a bound class method. It requires a + # gc collection cycle to break the cycle (on CPython 3) + import gc + A_func = getattr(self.A, name) + gc.disable() + try: + refcount = sys.getrefcount(A_func) + for i in range(self.A.iters): + a = self.A() + a.f = np.frompyfunc(getattr(a, name), 1, 1) + out = a.f(np.arange(10)) + a = None + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) + for i in range(5): + gc.collect() + assert_equal(sys.getrefcount(A_func), refcount) + finally: + gc.enable() + + +class TestDigitize: + + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + def test_casting_error(self): + x = [1, 2, 3 + 1.j] + bins = [1, 2, 3] + assert_raises(TypeError, digitize, x, bins) + x, bins = bins, x + assert_raises(TypeError, digitize, x, bins) + + def test_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + assert_(not isinstance(digitize(b, a, False), A)) + assert_(not isinstance(digitize(b, a, True), A)) + + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np.core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + + +class TestUnwrap: + + def test_simple(self): + # check that unwrap removes jumps greater that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + def test_period(self): + # check that unwrap removes jumps greater that 255 + assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255)) + # check simple case + simple_seq = np.array([0, 75, 150, 225, 300]) + wrap_seq = np.mod(simple_seq, 255) + assert_array_equal(unwrap(wrap_seq, period=255), simple_seq) + # check custom discont value + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + no_discont = unwrap(wrap_uneven, period=250) + assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) + sm_discont = unwrap(wrap_uneven, period=250, discont=140) + assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) + assert sm_discont.dtype == wrap_uneven.dtype + + +@pytest.mark.parametrize( + "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"] +) +@pytest.mark.parametrize("M", [0, 1, 10]) +class TestFilterwindows: + + def test_hanning(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hanning(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hamming(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = bartlett(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = blackman(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + def test_kaiser(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = kaiser(scalar, 0) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 10, 15) + + +class TestTrapz: + + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None,:, None] + z[None, None,:] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapz(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y[None,:, None], axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z[None, None,:], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapz(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapz(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapz(y, xm), r) + + +class TestSinc: + + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + # check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + +class TestUnique: + + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite: + + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.lib.asarray_chkfinite(a) + assert_raises(ValueError, np.lib.asarray_chkfinite, b) + assert_raises(ValueError, np.lib.asarray_chkfinite, c) + + def test_dtype_order(self): + # Regression test for missing dtype and order arguments + a = [1, 2, 3] + a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef: + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + tgt1 = corrcoef(self.A) + assert_almost_equal(tgt1, self.res1) + assert_(np.all(np.abs(tgt1) <= 1.0)) + + tgt2 = corrcoef(self.A, self.B) + assert_almost_equal(tgt2, self.res2) + assert_(np.all(np.abs(tgt2) <= 1.0)) + + def test_ddof(self): + # ddof raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + sup.filter(DeprecationWarning) + # ddof has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) + assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) + + def test_bias(self): + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) + sup.filter(DeprecationWarning) + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, bias=1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = corrcoef(x) + tgt = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(res, tgt) + assert_(np.all(np.abs(res) <= 1.0)) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + assert_(np.all(np.abs(c) <= 1.0)) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_corrcoef_dtype(self, test_type): + cast_A = self.A.astype(test_type) + res = corrcoef(cast_A, dtype=test_type) + assert test_type == res.dtype + + +class TestCov: + x1 = np.array([[0, 2], [1, 1], [2, 0]]).T + res1 = np.array([[1., -1.], [-1., 1.]]) + x2 = np.array([0.0, 1.0, 2.0], ndmin=2) + frequencies = np.array([1, 4, 1]) + x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T + res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) + unit_frequencies = np.ones(3, dtype=np.int_) + weights = np.array([1.0, 4.0, 1.0]) + res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) + unit_weights = np.ones(3) + x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + + def test_basic(self): + assert_allclose(cov(self.x1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(self.x1, ddof=5), + np.array([[np.inf, -np.inf], + [-np.inf, np.inf]])) + + def test_1D_rowvar(self): + assert_allclose(cov(self.x3), cov(self.x3, rowvar=False)) + y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) + assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False)) + + def test_1D_variance(self): + assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) + + def test_fweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies), + self.res1) + nonint = self.frequencies + 0.5 + assert_raises(TypeError, cov, self.x1, fweights=nonint) + f = np.ones((2, 3), dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = np.ones(2, dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = -1 * np.ones(3, dtype=np.int_) + assert_raises(ValueError, cov, self.x1, fweights=f) + + def test_aweights(self): + assert_allclose(cov(self.x1, aweights=self.weights), self.res3) + assert_allclose(cov(self.x1, aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) + w = np.ones((2, 3)) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = np.ones(2) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = -1.0 * np.ones(3) + assert_raises(ValueError, cov, self.x1, aweights=w) + + def test_unit_fweights_and_aweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies, + aweights=self.unit_weights), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies, + aweights=self.unit_weights), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.weights), + self.res3) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_cov_dtype(self, test_type): + cast_x1 = self.x1.astype(test_type) + res = cov(cast_x1, dtype=test_type) + assert test_type == res.dtype + + +class Test_I0: + + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + # need at least one test above 8, as the implementation is piecewise + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + assert_almost_equal(i0(A), expected) + assert_almost_equal(i0(-A), expected) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + # Regression test for gh-11205 + i0_0 = np.i0([0.]) + assert_equal(i0_0.shape, (1,)) + assert_array_equal(np.i0([0.]), np.array([1.])) + + def test_non_array(self): + a = np.arange(4) + + class array_like: + __array_interface__ = a.__array_interface__ + + def __array_wrap__(self, arr): + return self + + # E.g. pandas series survive ufunc calls through array-wrap: + assert isinstance(np.abs(array_like()), array_like) + exp = np.i0(a) + res = np.i0(array_like()) + + assert_array_equal(exp, res) + + def test_complex(self): + a = np.array([0, 1 + 2j]) + with pytest.raises(TypeError, match="i0 not supported for complex values"): + res = i0(a) + + +class TestKaiser: + + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMsort: + + def test_simple(self): + A = np.array([[0.44567325, 0.79115165, 0.54900530], + [0.36844147, 0.37325583, 0.96098397], + [0.64864341, 0.52929049, 0.39172155]]) + with pytest.warns(DeprecationWarning, match="msort is deprecated"): + assert_almost_equal( + msort(A), + np.array([[0.36844147, 0.37325583, 0.39172155], + [0.44567325, 0.52929049, 0.54900530], + [0.64864341, 0.79115165, 0.96098397]])) + + +class TestMeshgrid: + + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + assert_array_equal([], meshgrid(*args, copy=False)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + def test_return_type(self): + # Test for appropriate dtype in returned arrays. + # Regression test for issue #5297 + # https://github.com/numpy/numpy/issues/5297 + x = np.arange(0, 10, dtype=np.float32) + y = np.arange(10, 20, dtype=np.float64) + + X, Y = np.meshgrid(x,y) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # copy + X, Y = np.meshgrid(x,y, copy=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # sparse + X, Y = np.meshgrid(x,y, sparse=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + def test_writeback(self): + # Issue 8561 + X = np.array([1.1, 2.2]) + Y = np.array([3.3, 4.4]) + x, y = np.meshgrid(X, Y, sparse=False, copy=True) + + x[0, :] = 0 + assert_equal(x[0, :], 0) + assert_equal(x[1, :], X) + + def test_nd_shape(self): + a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) + expected_shape = (2, 1, 3, 4, 5) + assert_equal(a.shape, expected_shape) + assert_equal(b.shape, expected_shape) + assert_equal(c.shape, expected_shape) + assert_equal(d.shape, expected_shape) + assert_equal(e.shape, expected_shape) + + def test_nd_values(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) + assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) + + def test_nd_indexing(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') + assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5], [3, 4, 5]]]) + + +class TestPiecewise: + + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [True, False], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + # With 3 ranges (It was failing, before) + y = piecewise(x, [False, False, True], [1, 2, 3]) + assert_array_equal(y, 3) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + assert_equal(y, 4) + + # With 3 ranges (It was failing, before) + x = 4 + y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) + assert_array_equal(y, 2) + + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + x = np.arange(5.).view(subclass) + r = piecewise(x, [x<2., x>=4], [-1., 1., 0.]) + assert_equal(type(r), subclass) + assert_equal(r, [-1., -1., 0., 0., 1.]) + + +class TestBincount: + + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + x = np.arange(5) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_dtype_reference_leaks(self): + # gh-6805 + intp_refcount = sys.getrefcount(np.dtype(np.intp)) + double_refcount = sys.getrefcount(np.dtype(np.double)) + + for j in range(10): + np.bincount([1, 2, 3]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + for j in range(10): + np.bincount([1, 2, 3], [4, 5, 6]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + @pytest.mark.parametrize("vals", [[[2, 2]], 2]) + def test_error_not_1d(self, vals): + # Test that values has to be 1-D (both as array and nested list) + vals_arr = np.asarray(vals) + with assert_raises(ValueError): + np.bincount(vals_arr) + with assert_raises(ValueError): + np.bincount(vals) + + +class TestInterp: + + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_non_finite_behavior_exact_x(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + + @pytest.fixture(params=[ + lambda x: np.float_(x), + lambda x: _make_complex(x, 0), + lambda x: _make_complex(0, x), + lambda x: _make_complex(x, np.multiply(x, -2)) + ], ids=[ + 'real', + 'complex-real', + 'complex-imag', + 'complex-both' + ]) + def sc(self, request): + """ scale function used by the below tests """ + return request.param + + def test_non_finite_any_nan(self, sc): + """ test that nans are propagated """ + assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan)) + + def test_non_finite_inf(self, sc): + """ Test that interp between opposite infs gives nan """ + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + + # unless the y values are equal + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) + + def test_non_finite_half_inf_xf(self, sc): + """ Test that interp where both axes have a bound at inf gives nan """ + assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + + def test_non_finite_half_inf_x(self, sc): + """ Test interp where the x axis has a bound at inf """ + assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) + assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) + + def test_non_finite_half_inf_f(self, sc): + """ Test interp where the f axis has a bound at inf """ + assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf)) + + def test_complex_interp(self): + # test complex interpolation + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j + x0 = 0.3 + y0 = x0 + (1+x0)*1.0j + assert_almost_equal(np.interp(x0, x, y), y0) + # test complex left and right + x0 = -1 + left = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, left=left), left) + x0 = 2.0 + right = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2+1j, np.inf, 4] + y = [1, 2+1j, np.inf+0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) + # test complex periodic + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5+1.0j, 10+2j, 3+3j, 4+4j] + y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j, + 3.5+3.5j, 3.75+3.75j] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + +class TestPercentile: + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + x[1] = np.nan + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, method='nearest'), np.nan) + + def test_fraction(self): + x = [Fraction(i, 2) for i in range(8)] + + p = np.percentile(x, Fraction(0)) + assert_equal(p, Fraction(0)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(100)) + assert_equal(p, Fraction(7, 2)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(50)) + assert_equal(p, Fraction(7, 4)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, [Fraction(50)]) + assert_equal(p, np.array([Fraction(7, 4)])) + assert_equal(type(p), np.ndarray) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_complex(self): + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + def test_linear_nan_1D(self, dtype): + # METHOD 1 of H&F + arr = np.asarray([15.0, np.NAN, 35.0, 40.0, 50.0], dtype=dtype) + res = np.percentile( + arr, + 40.0, + method="linear") + np.testing.assert_equal(res, np.NAN) + np.testing.assert_equal(res.dtype, arr.dtype) + + H_F_TYPE_CODES = [(int_type, np.float64) + for int_type in np.typecodes["AllInteger"] + ] + [(np.float16, np.float16), + (np.float32, np.float32), + (np.float64, np.float64), + (np.longdouble, np.longdouble), + (np.dtype("O"), np.float64)] + + @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES) + @pytest.mark.parametrize(["method", "expected"], + [("inverted_cdf", 20), + ("averaged_inverted_cdf", 27.5), + ("closest_observation", 20), + ("interpolated_inverted_cdf", 20), + ("hazen", 27.5), + ("weibull", 26), + ("linear", 29), + ("median_unbiased", 27), + ("normal_unbiased", 27.125), + ]) + def test_linear_interpolation(self, + method, + expected, + input_dtype, + expected_dtype): + expected_dtype = np.dtype(expected_dtype) + if np._get_promotion_state() == "legacy": + expected_dtype = np.promote_types(expected_dtype, np.float64) + + arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) + actual = np.percentile(arr, 40.0, method=method) + + np.testing.assert_almost_equal( + actual, expected_dtype.type(expected), 14) + + if method in ["inverted_cdf", "closest_observation"]: + if input_dtype == "O": + np.testing.assert_equal(np.asarray(actual).dtype, np.float64) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(input_dtype)) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(expected_dtype)) + + TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O" + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_lower_higher(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='lower'), 4) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='higher'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_midpoint(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='midpoint'), 4.5) + assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50, + method='midpoint'), 5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 51, + method='midpoint'), 5.5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 50, + method='midpoint'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_nearest(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='nearest'), 5) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 49, + method='nearest'), 4) + + def test_linear_interpolation_extrapolation(self): + arr = np.random.rand(5) + + actual = np.percentile(arr, 100) + np.testing.assert_equal(actual, arr.max()) + + actual = np.percentile(arr, 0) + np.testing.assert_equal(actual, arr.min()) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal( + np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + method="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + method="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + method="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + method="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + method="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + method="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + method="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, method='lower'), 5.) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + c0 = np.percentile(x, 50, method='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([1., 5., 9.]) + c1 = np.percentile(x, 50, method='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, method='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + method='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + def test_percentile_out(self): + x = np.array([1, 2, 3]) + y = np.zeros((3,)) + p = (1, 2, 3) + np.percentile(x, p, out=y) + assert_equal(np.percentile(x, p), y) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + + y = np.zeros((3, 3)) + np.percentile(x, p, axis=0, out=y) + assert_equal(np.percentile(x, p, axis=0), y) + + y = np.zeros((3, 2)) + np.percentile(x, p, axis=1, out=y) + assert_equal(np.percentile(x, p, axis=1), y) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4)) + assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11 * 2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + method='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + method='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:,:,:, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:,:, 1,:].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:,:, 2,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2,:,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1,:,:].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2,:,:, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2,:, 2,:].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(np.AxisError, np.percentile, d, axis=4, q=25) + assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.percentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 2, out=o), o) + assert_equal(np.percentile(d, 2, method='nearest', out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal( + np.percentile(d, 0, 0, method='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal( + np.percentile(d, 1, 1, method='nearest', out=o), o) + o = np.zeros(()) + assert_equal(np.percentile(d, 1, out=o), o) + assert_equal( + np.percentile(d, 1, method='nearest', out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) + + # axis0 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 0), b) + + # axis0 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], 0) + b[:, 2, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + + # axis1 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 1), b) + # axis1 not zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) + b[:, 1, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + + # axis02 zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.percentile(a, 0.3, (0, 2)), b) + # axis02 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2)) + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + # axis02 not zerod with method='nearest' + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2), method='nearest') + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), method='nearest'), b) + + def test_nan_q(self): + # GH18830 + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], np.nan) + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], [np.nan]) + q = np.linspace(1.0, 99.0, 16) + q[0] = np.nan + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], q) + + @pytest.mark.parametrize("dtype", ["m8[D]", "M8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_basic(self, dtype, pos): + # TODO: Note that times have dubious rounding as of fixing NaTs! + # NaT and NaN should behave the same, do basic tests for NaT: + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.percentile(a, 30) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.percentile(a, 30, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + + +class TestQuantile: + # most of this is already tested by TestPercentile + + def V(self, x, y, alpha): + # Identification function used in several tests. + return (x >= y) - alpha + + def test_max_ulp(self): + x = [0.0, 0.2, 0.4] + a = np.quantile(x, 0.45) + # The default linear method would result in 0 + 0.2 * (0.45/2) = 0.18. + # 0.18 is not exactly representable and the formula leads to a 1 ULP + # different result. Ensure it is this exact within 1 ULP, see gh-20331. + np.testing.assert_array_max_ulp(a, 0.18, maxulp=1) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_correct_quantile_value(self): + a = np.array([True]) + tf_quant = np.quantile(True, False) + assert_equal(tf_quant, a[0]) + assert_equal(type(tf_quant), a.dtype) + a = np.array([False, True, True]) + quant_res = np.quantile(a, a) + assert_array_equal(quant_res, a) + assert_equal(quant_res.dtype, a.dtype) + + def test_fraction(self): + # fractional input, integral quantile + x = [Fraction(i, 2) for i in range(8)] + q = np.quantile(x, 0) + assert_equal(q, 0) + assert_equal(type(q), Fraction) + + q = np.quantile(x, 1) + assert_equal(q, Fraction(7, 2)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, Fraction(1, 2)) + assert_equal(q, Fraction(7, 4)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, [Fraction(1, 2)]) + assert_equal(q, np.array([Fraction(7, 4)])) + assert_equal(type(q), np.ndarray) + + q = np.quantile(x, [[Fraction(1, 2)]]) + assert_equal(q, np.array([[Fraction(7, 4)]])) + assert_equal(type(q), np.ndarray) + + # repeat with integral input but fractional quantile + x = np.arange(8) + assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) + + def test_complex(self): + #See gh-22652 + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_quantile_preserve_int_type(self, dtype): + res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], + method="nearest") + assert res.dtype == dtype + + @pytest.mark.parametrize("method", quantile_methods) + def test_quantile_monotonic(self, method): + # GH 14685 + # test that the return value of quantile is monotonic if p0 is ordered + # Also tests that the boundary values are not mishandled. + p0 = np.linspace(0, 1, 101) + quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, + 8, 8, 7]) * 0.1, p0, method=method) + assert_equal(np.sort(quantile), quantile) + + # Also test one where the number of data points is clearly divisible: + quantile = np.quantile([0., 1., 2., 3.], p0, method=method) + assert_equal(np.sort(quantile), quantile) + + @hypothesis.given( + arr=arrays(dtype=np.float64, + shape=st.integers(min_value=3, max_value=1000), + elements=st.floats(allow_infinity=False, allow_nan=False, + min_value=-1e300, max_value=1e300))) + def test_quantile_monotonic_hypo(self, arr): + p0 = np.arange(0, 1, 0.01) + quantile = np.quantile(arr, p0) + assert_equal(np.sort(quantile), quantile) + + def test_quantile_scalar_nan(self): + a = np.array([[10., 7., 4.], [3., 2., 1.]]) + a[0][1] = np.nan + actual = np.quantile(a, 0.5) + assert np.isscalar(actual) + assert_equal(np.quantile(a, 0.5), np.nan) + + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_identification_equation(self, method, alpha): + # Test that the identification equation holds for the empirical + # CDF: + # E[V(x, Y)] = 0 <=> x is quantile + # with Y the random variable for which we have observed values and + # V(x, y) the canonical identification function for the quantile (at + # level alpha), see + # https://doi.org/10.48550/arXiv.0912.0902 + rng = np.random.default_rng(4321) + # We choose n and alpha such that we cover 3 cases: + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + x = np.quantile(y, alpha, method=method) + if method in ("higher",): + # These methods do not fulfill the identification equation. + assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n + elif int(n * alpha) == n * alpha: + # We can expect exact results, up to machine precision. + assert_allclose(np.mean(self.V(x, y, alpha)), 0, atol=1e-14) + else: + # V = (x >= y) - alpha cannot sum to zero exactly but within + # "sample precision". + assert_allclose(np.mean(self.V(x, y, alpha)), 0, + atol=1 / n / np.amin([alpha, 1 - alpha])) + + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_add_and_multiply_constant(self, method, alpha): + # Test that + # 1. quantile(c + x) = c + quantile(x) + # 2. quantile(c * x) = c * quantile(x) + # 3. quantile(-x) = -quantile(x, 1 - alpha) + # On empirical quantiles, this equation does not hold exactly. + # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these + # properties equivariance. + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + q = np.quantile(y, alpha, method=method) + c = 13.5 + + # 1 + assert_allclose(np.quantile(c + y, alpha, method=method), c + q) + # 2 + assert_allclose(np.quantile(c * y, alpha, method=method), c * q) + # 3 + q = -np.quantile(-y, 1 - alpha, method=method) + if method == "inverted_cdf": + if ( + n * alpha == int(n * alpha) + or np.round(n * alpha) == int(n * alpha) + 1 + ): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "closest_observation": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif np.round(n * alpha) == int(n * alpha) + 1: + assert_allclose( + q, np.quantile(y, alpha + 1/n, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "interpolated_inverted_cdf": + assert_allclose(q, np.quantile(y, alpha + 1/n, method=method)) + elif method == "nearest": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha + 1/n, method=method)) + else: + assert_allclose(q, np.quantile(y, alpha, method=method)) + elif method == "lower": + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif method == "higher": + assert_allclose(q, np.quantile(y, alpha, method="lower")) + else: + # "averaged_inverted_cdf", "hazen", "weibull", "linear", + # "median_unbiased", "normal_unbiased", "midpoint" + assert_allclose(q, np.quantile(y, alpha, method=method)) + + +class TestLerp: + @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + t1=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a = st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b = st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): + l0 = nfb._lerp(a, b, t0) + l1 = nfb._lerp(a, b, t1) + if t0 == t1 or a == b: + assert l0 == l1 # uninteresting + elif (t0 < t1) == (a < b): + assert l0 <= l1 + else: + assert l0 >= l1 + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_bounded(self, t, a, b): + if a <= b: + assert a <= nfb._lerp(a, b, t) <= b + else: + assert b <= nfb._lerp(a, b, t) <= a + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_symmetric(self, t, a, b): + # double subtraction is needed to remove the extra precision of t < 0.5 + left = nfb._lerp(a, b, 1 - (1 - t)) + right = nfb._lerp(b, a, 1 - t) + assert_allclose(left, right) + + def test_linear_interpolation_formula_0d_inputs(self): + a = np.array(2) + b = np.array(5) + t = np.array(0.2) + assert nfb._lerp(a, b, t) == 2.6 + + +class TestMedian: + + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + assert_equal(np.median(a).ndim, 0) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), + [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), + [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + np.random.shuffle(a4.ravel()) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1, 2, 3]) + assert_equal(np.median(a), -7) + + @pytest.mark.parametrize('arr', + ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.)) + def test_subclass2(self, arr): + """Check that we return subclasses, even if a NaN scalar.""" + class MySubclass(np.ndarray): + pass + + m = np.median(np.array(arr).view(MySubclass)) + assert isinstance(m, MySubclass) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) + + # axis0 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 0), b) + + # axis1 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 1), b) + + # axis02 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.median(a, (0, 2)), b) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly") + def test_empty(self): + # mean(empty array) emits two warnings: empty slice and divide by 0 + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + assert_equal(len(w), 2) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.arange(7.) + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:,:,:, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:,:, 1,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:,:, 2,:].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2,:,:,:].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1,:,:].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2,:,:, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2,:, 2,:].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.median, d, axis=-5) + assert_raises(np.AxisError, np.median, d, axis=(0, -5)) + assert_raises(np.AxisError, np.median, d, axis=4) + assert_raises(np.AxisError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("dtype", ["m8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_behavior(self, dtype, pos): + # TODO: Median does not support Datetime, due to `mean`. + # NaT and NaN should behave the same, do basic tests for NaT. + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.median(a) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.median(a, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +class TestAdd_newdoc_ufunc: + + def test_ufunc_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") + assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") + + def test_string_arg(self): + assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) + + +class TestAdd_newdoc: + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_doc(self): + # test that np.add_newdoc did attach a docstring successfully: + tgt = "Current flat index into the array." + assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt) + assert_(len(np.core.ufunc.identity.__doc__) > 300) + assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_errors_are_ignored(self): + prev_doc = np.core.flatiter.index.__doc__ + # nothing changed, but error ignored, this should probably + # give a warning (or even error) in the future. + np.add_newdoc("numpy.core", "flatiter", ("index", "bad docstring")) + assert prev_doc == np.core.flatiter.index.__doc__ + + +class TestAddDocstring(): + # Test should possibly be moved, but it also fits to be close to + # the newdoc tests... + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_same_docstring(self): + # test for attributes (which are C-level defined) + np.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) + # And typical functions: + def func(): + """docstring""" + return + + np.add_docstring(func, func.__doc__) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_different_docstring_fails(self): + # test for attributes (which are C-level defined) + with assert_raises(RuntimeError): + np.add_docstring(np.ndarray.flat, "different docstring") + # And typical functions: + def func(): + """docstring""" + return + + with assert_raises(RuntimeError): + np.add_docstring(func, "different docstring") + + +class TestSortComplex: + + @pytest.mark.parametrize("type_in, type_out", [ + ('l', 'D'), + ('h', 'F'), + ('H', 'F'), + ('b', 'F'), + ('B', 'F'), + ('g', 'G'), + ]) + def test_sort_real(self, type_in, type_out): + # sort_complex() type casting for real input types + a = np.array([5, 3, 6, 2, 1], dtype=type_in) + actual = np.sort_complex(a) + expected = np.sort(a).astype(type_out) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + def test_sort_complex(self): + # sort_complex() handling of complex input + a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') + expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') + actual = np.sort_complex(a) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.py new file mode 100644 index 00000000..8c55f16d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.py @@ -0,0 +1,816 @@ +import numpy as np + +from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose, + assert_array_max_ulp, assert_raises_regex, suppress_warnings, + ) +from numpy.testing._private.utils import requires_memory +import pytest + + +class TestHistogram: + + def setup_method(self): + pass + + def teardown_method(self): + pass + + def test_simple(self): + n = 100 + v = np.random.rand(n) + (a, b) = histogram(v) + # check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + # check that the bin counts are evenly spaced when the data is from + # a linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a * np.diff(b)), 1) + + # Test that passing False works too + a, b = histogram(v, bins, density=False) + assert_array_equal(a, [1, 2, 3, 4]) + + # Variable bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], density=True) + assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, density=True) + assert_equal((h * np.diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_arr_weights_mismatch(self): + a = np.arange(10) + .5 + w = np.arange(11) + .5 + with assert_raises_regex(ValueError, "same shape as"): + h, b = histogram(a, range=[1, 9], weights=w, density=True) + + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, density=True) + assert_(np.issubdtype(h.dtype, np.floating)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(np.issubdtype(h.dtype, np.floating)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_bool_conversion(self): + # gh-12107 + # Reference integer histogram + a = np.array([1, 1, 0], dtype=np.uint8) + int_hist, int_edges = np.histogram(a) + + # Should raise an warning on booleans + # Ensure that the histograms are equivalent, need to suppress + # the warnings to get the actual outputs + with suppress_warnings() as sup: + rec = sup.record(RuntimeWarning, 'Converting input from .*') + hist, edges = np.histogram([True, True, False]) + # A warning should be issued + assert_equal(len(rec), 1) + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) + + def test_weights(self): + v = np.random.rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, density=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, density=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_exotic_weights(self): + + # Test the use of weights that are not integer or floats, but e.g. + # complex numbers or object types. + + # Complex weights + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Decimal weights + from decimal import Decimal + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + def test_no_side_effects(self): + # This is a regression test that ensures that values passed to + # ``histogram`` are unchanged. + values = np.array([1.3, 2.5, 2.3]) + np.histogram(values, range=[-10, 10], bins=100) + assert_array_almost_equal(values, [1.3, 2.5, 2.3]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_error_binnum_type (self): + # Tests if right Error is raised if bins argument is float + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, 5) + assert_raises(TypeError, histogram, vals, 2.4) + + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25,0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan,0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25,np.inf]) + + def test_invalid_range(self): + # start of range must be < end of range + vals = np.linspace(0.0, 1.0, num=100) + with assert_raises_regex(ValueError, "max must be larger than"): + np.histogram(vals, range=[0.1, 0.01]) + + def test_bin_edge_cases(self): + # Ensure that floating-point computations correctly place edge cases. + arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) + hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) + mask = hist > 0 + left_edges = edges[:-1][mask] + right_edges = edges[1:][mask] + for x, left, right in zip(arr, left_edges, right_edges): + assert_(x >= left) + assert_(x < right) + + def test_last_bin_inclusive_range(self): + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) + assert_equal(hist[-1], 1) + + def test_bin_array_dims(self): + # gracefully handle bins object > 1 dimension + vals = np.linspace(0.0, 1.0, num=100) + bins = np.array([[0, 0.5], [0.6, 1.0]]) + with assert_raises_regex(ValueError, "must be 1d"): + np.histogram(vals, bins=bins) + + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + + def test_object_array_of_0d(self): + # gh-7864 + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [np.inf]) + + # these should not crash + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) + np.histogram([np.array(0.5) for i in range(10)] + [.5]) + + def test_some_nan_values(self): + # gh-7503 + one_nan = np.array([0, 1, np.nan]) + all_nan = np.array([np.nan, np.nan]) + + # the internal comparisons with NaN give warnings + sup = suppress_warnings() + sup.filter(RuntimeWarning) + with sup: + # can't infer range with nan + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted + + def test_datetime(self): + begin = np.datetime64('2000-01-01', 'D') + offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) + bins = np.array([0, 2, 7, 20]) + dates = begin + offsets + date_bins = begin + bins + + td = np.dtype('timedelta64[D]') + + # Results should be the same for integer offsets or datetime values. + # For now, only explicit bins are supported, since linspace does not + # work on datetimes or timedeltas + d_count, d_edge = histogram(dates, bins=date_bins) + t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) + i_count, i_edge = histogram(offsets, bins=bins) + + assert_equal(d_count, i_count) + assert_equal(t_count, i_count) + + assert_equal((d_edge - begin).astype(int), i_edge) + assert_equal(t_edge.astype(int), i_edge) + + assert_equal(d_edge.dtype, dates.dtype) + assert_equal(t_edge.dtype, td) + + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + + def do_precision_lower_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([1.0 + eps, 2.0], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[0] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [1]) + + # gh-10322 means that the type comes from arr - this may change + assert_equal(x_loc.dtype, float_small) + + def do_precision_upper_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([0.0, 1.0 - eps], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[-1] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [1]) + + # gh-10322 means that the type comes from arr - this may change + assert_equal(x_loc.dtype, float_small) + + def do_precision(self, float_small, float_large): + self.do_precision_lower_bound(float_small, float_large) + self.do_precision_upper_bound(float_small, float_large) + + def test_precision(self): + # not looping results in a useful stack trace upon failure + self.do_precision(np.half, np.single) + self.do_precision(np.half, np.double) + self.do_precision(np.half, np.longdouble) + self.do_precision(np.single, np.double) + self.do_precision(np.single, np.longdouble) + self.do_precision(np.double, np.longdouble) + + def test_histogram_bin_edges(self): + hist, e = histogram([1, 2, 3, 4], [1, 2]) + edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) + assert_array_equal(edges, e) + + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, e = histogram(arr, bins=30, range=(-0.5, 5)) + edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) + assert_array_equal(edges, e) + + hist, e = histogram(arr, bins='auto', range=(0, 1)) + edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) + assert_array_equal(edges, e) + + # @requires_memory(free_bytes=1e10) + # @pytest.mark.slow + @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") + def test_big_arrays(self): + sample = np.zeros([100000000, 3]) + xbins = 400 + ybins = 400 + zbins = np.arange(16000) + hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins)) + assert_equal(type(hist), type((1, 2))) + + def test_gh_23110(self): + hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'), + bins=2, + range=(-1e-308, -2e-313)) + expected_hist = np.array([1, 0]) + assert_array_equal(hist, expected_hist) + + +class TestHistogramOptimBinNums: + """ + Provide test coverage when using provided estimators for optimal number of + bins + """ + + def test_empty(self): + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto', 'stone'] + # check it can deal with empty data + for estimator in estimator_list: + a, b = histogram([], bins=estimator) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_simple(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change + """ + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} + + for testlen, expectedResults in basic_test.items(): + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x = np.concatenate((x1, x2)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator) + assert_equal(len(a), numbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_small(self): + """ + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. + """ + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'stone': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2, 'stone': 1}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2, 'stone': 1}} + + for testlen, expectedResults in small_dat.items(): + testdat = np.arange(testlen) + for estimator, expbins in expectedResults.items(): + a, b = np.histogram(testdat, estimator) + assert_equal(len(a), expbins, err_msg="For the {0} estimator " + "with datasize of {1}".format(estimator, testlen)) + + def test_incorrect_methods(self): + """ + Check a Value Error is thrown when an unknown string is passed in + """ + check_list = ['mad', 'freeman', 'histograms', 'IQR'] + for estimator in check_list: + assert_raises(ValueError, histogram, [1, 2, 3], estimator) + + def test_novariance(self): + """ + Check that methods handle no variance in data + Primarily for Scott and FD as the SD and IQR are both 0 in this case + """ + novar_dataset = np.ones(100) + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} + + for estimator, numbins in novar_resultdict.items(): + a, b = np.histogram(novar_dataset, estimator) + assert_equal(len(a), numbins, err_msg="{0} estimator, " + "No Variance test".format(estimator)) + + def test_limited_variance(self): + """ + Check when IQR is 0, but variance exists, we return the sturges value + and not the fd value. + """ + lim_var_data = np.ones(1000) + lim_var_data[:3] = 0 + lim_var_data[-4:] = 100 + + edges_auto = histogram_bin_edges(lim_var_data, 'auto') + assert_equal(edges_auto, np.linspace(0, 100, 12)) + + edges_fd = histogram_bin_edges(lim_var_data, 'fd') + assert_equal(edges_fd, np.array([0, 100])) + + edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') + assert_equal(edges_sturges, np.linspace(0, 100, 12)) + + def test_outlier(self): + """ + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. + """ + xcenter = np.linspace(-10, 10, 50) + outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) + + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} + + for estimator, numbins in outlier_resultdict.items(): + a, b = np.histogram(outlier_dataset, estimator) + assert_equal(len(a), numbins) + + def test_scott_vs_stone(self): + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + + def nbins_ratio(seed, size): + rng = np.random.RandomState(seed) + x = rng.normal(loc=0, scale=2, size=size) + a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) + return a / (a + b) + + ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] + for seed in range(10)] + + # the average difference between the two methods decreases as the dataset size increases. + avg = abs(np.mean(ll, axis=0) - 0.5) + assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) + + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). Adding in a 3rd mixture that will then be + completely ignored. All test values have been precomputed and + the shouldn't change. + """ + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14, 'stone': 8}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20, 'stone': 80}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33, 'stone': 80} + } + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with + # (3 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range = (-20, 20)) + msg = "For the {0} estimator".format(estimator) + msg += " with datasize of {0}".format(testlen) + assert_equal(len(a), numbins, err_msg=msg) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_signed_integer_data(self, bins): + # Regression test for gh-14379. + a = np.array([-2, 0, 127], dtype=np.int8) + hist, edges = np.histogram(a, bins=bins) + hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) + assert_array_equal(hist, hist32) + assert_array_equal(edges, edges32) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) + + +class TestHistogramdd: + + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, density=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + density=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = np.random.rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = np.random.rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = np.random.rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, density=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + # There are two ways to specify bins. Check for the right errors + # when mixing those. + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + # Test using +/-inf bin edges works. See #1788. + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + # Test event very close to rightmost binedge. See Github issue #4266 + x = [0.9999999995] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + x = [1.0001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + def test_finite_range(self): + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) + + def test_density_non_uniform_2d(self): + # Defines the following grid: + # + # 0 2 8 + # 0+-+-----+ + # + | + + # + | + + # 6+-+-----+ + # 8+-+-----+ + x_edges = np.array([0, 2, 8]) + y_edges = np.array([0, 6, 8]) + relative_areas = np.array([ + [3, 9], + [1, 3]]) + + # ensure the number of points in each region is proportional to its area + x = np.array([1] + [1]*3 + [7]*3 + [7]*9) + y = np.array([7] + [1]*3 + [7]*3 + [1]*9) + + # sanity check that the above worked as intended + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) + assert_equal(hist, relative_areas) + + # resulting histogram should be uniform, since counts and areas are proportional + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) + assert_equal(hist, 1 / (8*8)) + + def test_density_non_uniform_1d(self): + # compare to histogram to show the results are the same + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 00000000..b599cb34 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,551 @@ +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_raises_regex, + ) +from numpy.lib.index_tricks import ( + mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, + index_exp, ndindex, r_, s_, ix_ + ) + + +class TestRavelUnravelIndex: + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + + # test that new shape argument works properly + assert_equal(np.unravel_index(indices=2, + shape=(2, 2)), + (1, 0)) + + # test that an invalid second keyword argument + # is properly handled, including the old name `dims`. + with assert_raises(TypeError): + np.unravel_index(indices=2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(254, ims=(17, 94)) + + with assert_raises(TypeError): + np.unravel_index(254, dims=(17, 94)) + + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_empty_indices(self): + msg1 = 'indices must be integral: the provided empty sequence was' + msg2 = 'only int indices permitted' + assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) + assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) + assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), + (10, 3, 5)) + assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)), + [[], [], []]) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), + (10, 3)) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), + (10, 3)) + assert_raises_regex(TypeError, msg2, np.ravel_multi_index, + (np.array([]), np.array([])), (5, 3)) + assert_equal(np.ravel_multi_index( + (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) + assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), + (5, 3)), []) + + def test_big_indices(self): + # ravel_multi_index for big indices (issue #7546) + if np.intp == np.int64: + arr = ([1, 29], [3, 5], [3, 117], [19, 2], + [2379, 1284], [2, 2], [0, 1]) + assert_equal( + np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), + [5627771580, 117259570957]) + + # test unravel_index for big indices (issue #9538) + assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1)) + + # test overflow checking for too big array (issue #7546) + dummy_arr = ([0],[0]) + half_max = np.iinfo(np.intp).max // 2 + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2)) + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F') + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8*coords[0]+coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10*(8*coords[0]+coords[1])+coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0]+5*(coords[1]+8*coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + def test_writeability(self): + # See gh-7269 + x, y = np.unravel_index([1, 2, 3], (4, 5)) + assert_(x.flags.writeable) + assert_(y.flags.writeable) + + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) + def test_empty_array_ravel(self, mode): + res = np.ravel_multi_index( + np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) + assert(res.shape == (0,)) + + with assert_raises(ValueError): + np.ravel_multi_index( + np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) + + def test_empty_array_unravel(self): + res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) + # res is a tuple of three empty arrays + assert(len(res) == 3) + assert(all(a.shape == (0,) for a in res)) + + with assert_raises(ValueError): + np.unravel_index([1], (2, 1, 0)) + +class TestGrid: + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1]-b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0]+19*0.1, 11) + assert_almost_equal(a[1]-a[0], 2.0/9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=True) + assert_almost_equal(st, 8/49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1*np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2*np.ones(20, 'd'), 11) + + def test_sparse(self): + grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_sparse = ogrid[-1:1:10j, -2:2:10j] + + # sparse grids can be made dense by broadcasting + grid_broadcast = np.broadcast_arrays(*grid_sparse) + for f, b in zip(grid_full, grid_broadcast): + assert_equal(f, b) + + @pytest.mark.parametrize("start, stop, step, expected", [ + (None, 10, 10j, (200, 10)), + (-10, 20, None, (1800, 30)), + ]) + def test_mgrid_size_none_handling(self, start, stop, step, expected): + # regression test None value handling for + # start and step values used by mgrid; + # internally, this aims to cover previously + # unexplored code paths in nd_grid() + grid = mgrid[start:stop:step, start:stop:step] + # need a smaller grid to explore one of the + # untested code paths + grid_small = mgrid[start:stop:step] + assert_equal(grid.size, expected[0]) + assert_equal(grid_small.size, expected[1]) + + def test_accepts_npfloating(self): + # regression test for #16466 + grid64 = mgrid[0.1:0.33:0.1, ] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + def test_accepts_longdouble(self): + # regression tests for #16945 + grid64 = mgrid[0.1:0.33:0.1, ] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1), + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + grid128c_a = mgrid[0:np.longdouble(1):3.4j] + grid128c_b = mgrid[0:np.longdouble(1):3.4j, ] + assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble) + assert_array_equal(grid128c_a, grid128c_b[0]) + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1) + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + def test_accepts_npcomplexfloating(self): + # Related to #16466 + assert_array_almost_equal( + mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ] + ) + + # different code path for single slice + assert_array_almost_equal( + mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)] + ) + + # Related to #16945 + grid64_a = mgrid[0.1:0.3:3.3j] + grid64_b = mgrid[0.1:0.3:3.3j, ][0] + assert_(grid64_a.dtype == grid64_b.dtype == np.float64) + assert_array_equal(grid64_a, grid64_b) + + grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)] + grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0] + assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble) + assert_array_equal(grid64_a, grid64_b) + + +class TestConcatenator: + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_complex_step(self): + # Regression test for #12262 + g = r_[0:36:100j] + assert_(g.shape == (100,)) + + # Related to #16466 + g = r_[0:36:np.complex64(100j)] + assert_(g.shape == (100,)) + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + def test_0d(self): + assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) + assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) + assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) + + +class TestNdenumerate: + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression: + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +class TestIx_: + def test_regression_1(self): + # Test empty untyped inputs create outputs of indexing type, gh-5804 + a, = np.ix_(range(0)) + assert_equal(a.dtype, np.intp) + + a, = np.ix_([]) + assert_equal(a.dtype, np.intp) + + # but if the type is specified, don't change it + a, = np.ix_(np.array([], dtype=np.float32)) + assert_equal(a.dtype, np.float32) + + def test_shape_and_dtype(self): + sizes = (4, 5, 3, 2) + # Test both lists and arrays + for func in (range, np.arange): + arrays = np.ix_(*[func(sz) for sz in sizes]) + for k, (a, sz) in enumerate(zip(arrays, sizes)): + assert_equal(a.shape[k], sz) + assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) + assert_(np.issubdtype(a.dtype, np.integer)) + + def test_bool(self): + bool_a = [True, False, True, True] + int_a, = np.nonzero(bool_a) + assert_equal(np.ix_(bool_a)[0], int_a) + + def test_1d_only(self): + idx2d = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, np.ix_, idx2d) + + def test_repeated_input(self): + length_of_vector = 5 + x = np.arange(length_of_vector) + out = ix_(x, x) + assert_equal(out[0].shape, (length_of_vector, 1)) + assert_equal(out[1].shape, (1, length_of_vector)) + # check that input shape is not modified + assert_equal(x.shape, (length_of_vector,)) + + +def test_c_(): + a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +class TestFillDiagonal: + def test_basic(self): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + ) + + def test_tall_matrix(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + ) + + def test_tall_matrix_wrap(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]]) + ) + + def test_wide_matrix(self): + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) + ) + + def test_operate_4d_array(self): + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + assert_equal(np.where(a != 0), (i, i, i, i)) + + def test_low_dim_handling(self): + # raise error with low dimensionality + a = np.zeros(3, int) + with assert_raises_regex(ValueError, "at least 2-d"): + fill_diagonal(a, 5) + + def test_hetero_shape_handling(self): + # raise error with high dimensionality and + # shape mismatch + a = np.zeros((3,3,7,3), int) + with assert_raises_regex(ValueError, "equal length"): + fill_diagonal(a, 2) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + assert_array_equal( + a, np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]]) + ) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + assert_array_equal( + a, np.array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + ) + + +class TestDiagIndicesFrom: + + def test_diag_indices_from(self): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + def test_error_small_input(self): + x = np.ones(7) + with assert_raises_regex(ValueError, "at least 2-d"): + diag_indices_from(x) + + def test_error_shape_mismatch(self): + x = np.zeros((3, 3, 2, 3), int) + with assert_raises_regex(ValueError, "equal length"): + diag_indices_from(x) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_io.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_io.py new file mode 100644 index 00000000..c1032df8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_io.py @@ -0,0 +1,2775 @@ +import sys +import gc +import gzip +import os +import threading +import time +import warnings +import io +import re +import pytest +from pathlib import Path +from tempfile import NamedTemporaryFile +from io import BytesIO, StringIO +from datetime import datetime +import locale +from multiprocessing import Value, get_context +from ctypes import c_bool + +import numpy as np +import numpy.ma as ma +from numpy.lib._iotools import ConverterError, ConversionWarning +from numpy.compat import asbytes +from numpy.ma.testutils import assert_equal +from numpy.testing import ( + assert_warns, assert_, assert_raises_regex, assert_raises, + assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY, + HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings, + break_cycles, IS_WASM + ) +from numpy.testing._private.utils import requires_memory + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False + + +def strptime(s, fmt=None): + """ + This function is available in the datetime module only from Python >= + 2.5. + + """ + if type(s) == bytes: + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest: + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {"allow_pickle": True}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if 'arr_reloaded' in locals(): + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + @pytest.mark.slow + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) + assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) + assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, arr in enumerate(self.arr): + reloaded = self.arr_reloaded['arr_%d' % n] + assert_equal(arr, reloaded) + assert_equal(arr.dtype, reloaded.dtype) + assert_equal(arr.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if self.arr_reloaded.fid: + self.arr_reloaded.fid.close() + os.remove(self.arr_reloaded.fid.name) + + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.slow + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] # Should succeed + npfile.close() + del a # Avoid pyflakes unused variable warning. + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + + def test_tuple_getitem_raises(self): + # gh-23748 + a = np.array([1, 2, 3]) + f = BytesIO() + np.savez(f, a=a) + f.seek(0) + l = np.load(f) + with pytest.raises(KeyError, match="(1, 2)"): + l[1, 2] + + def test_BagObj(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(sorted(dir(l.f)), ['file_a','file_b']) + assert_equal(a, l.f.file_a) + assert_equal(b, l.f.file_b) + + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + with temppath(suffix='.npz') as tmp: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) + + @pytest.mark.slow_pypy + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python 3 running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. + with suppress_warnings() as sup: + sup.filter(ResourceWarning) # TODO: specify exact message + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = "Failed to load data from a file: %s" % e + raise AssertionError(msg) + finally: + if IS_PYPY: + gc.collect() + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + @pytest.mark.parametrize("count, expected_repr", [ + (1, "NpzFile {fname!r} with keys: arr_0"), + (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"), + # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are + # expected to end in '...' + (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."), + ]) + def test_repr_lists_keys(self, count, expected_repr): + a = np.array([[1, 2], [3, 4]], float) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, *[a]*count) + l = np.load(tmp) + assert repr(l) == expected_repr.format(fname=tmp) + l.close() + + +class TestSaveTxt: + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_structured(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_structured_padded(self): + # gh-13297 + a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[ + ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') + ]) + c = BytesIO() + np.savetxt(c, a[['foo', 'baz']], fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 3\n', b'4 6\n']) + + def test_multifield_view(self): + a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) + v = a[['x', 'z']] + with temppath(suffix='.npy') as path: + path = Path(path) + np.save(path, v) + data = np.load(path) + assert_array_equal(data, v) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overridden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + # Test the functionality of the header and footer keyword argument. + + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + def test_file_roundtrip(self): + with temppath() as name: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(name, a) + b = np.loadtxt(name) + assert_array_equal(a, b) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_complex_negative_exponent(self): + # Previous to 1.15, some formats generated x+-yj, gh 7895 + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', + b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) + + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.str_) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + @pytest.mark.parametrize("fmt", ["%f", b"%f"]) + @pytest.mark.parametrize("iotype", [StringIO, BytesIO]) + def test_unicode_and_bytes_fmt(self, fmt, iotype): + # string type of fmt should not matter, see also gh-4053 + a = np.array([1.]) + s = iotype() + np.savetxt(s, a, fmt=fmt) + s.seek(0) + if iotype is StringIO: + assert_equal(s.read(), "%f\n" % 1.) + else: + assert_equal(s.read(), b"%f\n" % 1.) + + @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work") + @pytest.mark.slow + @requires_memory(free_bytes=7e9) + def test_large_zip(self): + def check_large_zip(memoryerror_raised): + memoryerror_raised.value = False + try: + # The test takes at least 6GB of memory, writes a file larger + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` + test_data = np.asarray([np.random.rand( + np.random.randint(50,100),4) + for i in range(800000)], dtype=object) + with tempdir() as tmpdir: + np.savez(os.path.join(tmpdir, 'test.npz'), + test_data=test_data) + except MemoryError: + memoryerror_raised.value = True + raise + # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # Use an object in shared memory to re-raise the MemoryError exception + # in our process if needed, see gh-16889 + memoryerror_raised = Value(c_bool) + + # Since Python 3.8, the default start method for multiprocessing has + # been changed from 'fork' to 'spawn' on macOS, causing inconsistency + # on memory sharing model, lead to failed test for check_large_zip + ctx = get_context('fork') + p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) + p.start() + p.join() + if memoryerror_raised.value: + raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + assert p.exitcode == 0 + +class LoadTxtBase: + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") + def test_compressed_bz2(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") + def test_compressed_lzma(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.str_, + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with io.open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.str_, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setup_method(self): + # lower chunksize for testing + self.orig_chunk = np.lib.npyio._loadtxt_chunksize + np.lib.npyio._loadtxt_chunksize = 1 + + def teardown_method(self): + np.lib.npyio._loadtxt_chunksize = self.orig_chunk + + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64 75.0\nF 25 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments_unicode(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_byte(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=b'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_multiple(self): + c = TextIO() + c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=['#', '@', '//']) + a = np.array([[1, 2, 3], [4, 5, 6]], int) + assert_array_equal(x, a) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_comments_multi_chars(self): + c = TextIO() + c.write('/* comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='/*') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + # Check that '/*' is not transformed to ['/', '*'] + c = TextIO() + c.write('*/ comment\n1,2,3,5\n') + c.seek(0) + assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', + comments='/*') + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt: + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx).__name__, + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + '^usecols must be.*%s' % type(bogus_idx).__name__, + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + + def test_bad_usecols(self): + with pytest.raises(OverflowError): + np.loadtxt(["1\n"], usecols=[2**64], delimiter=",") + with pytest.raises((ValueError, OverflowError)): + # Overflow error on 32bit platforms + np.loadtxt(["1\n"], usecols=[2**62], delimiter=",") + with pytest.raises(TypeError, + match="If a structured dtype .*. But 1 usecols were given and " + "the number of fields is 3."): + np.loadtxt(["1,1\n"], dtype="i,(2)i", usecols=[0], delimiter=",") + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_str_dtype(self): + # see gh-8033 + c = ["str1", "str2"] + + for dt in (str, np.bytes_): + a = np.array(["str1", "str2"], dtype=dt) + x = np.loadtxt(c, dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with pytest.warns(UserWarning, match="input contained no data"): + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_from_float_hex(self): + # IEEE doubles and floats only, otherwise the float32 + # conversion may fail. + tgt = np.logspace(-10, 10, 5).astype(np.float32) + tgt = np.hstack((tgt, -tgt)).astype(float) + inp = '\n'.join(map(float.hex, tgt)) + c = TextIO() + c.write(inp) + for dt in [float, np.float32]: + c.seek(0) + res = np.loadtxt( + c, dtype=dt, converters=float.fromhex, encoding="latin1") + assert_equal(res, tgt, err_msg="%s" % dt) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_no_default_hex_conversion(self): + """ + Ensure that fromhex is only used for values with the correct prefix and + is not called by default. Regression test related to gh-19598. + """ + c = TextIO("a b c") + with pytest.raises(ValueError, + match=".*convert string 'a' to float64 at row 0, column 1"): + np.loadtxt(c) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_exception(self): + """ + Ensure that the exception message raised during failed floating point + conversion is correct. Regression test related to gh-19598. + """ + c = TextIO("qrs tuv") # Invalid values for default float converter + with pytest.raises(ValueError, + match="could not convert string 'qrs' to float64"): + np.loadtxt(c) + + def test_from_complex(self): + tgt = (complex(1, 1), complex(1, -1)) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, tgt) + + def test_complex_misformatted(self): + # test for backward compatibility + # some complex formats used to generate x+-yj + a = np.zeros((2, 2), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.16e') + c.seek(0) + txt = c.read() + c.seek(0) + # misformat the sign on the imaginary part, gh 7895 + txt_bad = txt.replace(b'e+00-', b'e00+-') + assert_(txt_bad != txt) + c.write(txt_bad) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, a) + + def test_universal_newline(self): + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': (' num rows + c = TextIO() + c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1, max_rows=6) + a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) + assert_array_equal(x, a) + + @pytest.mark.parametrize(["skip", "data"], [ + (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), + # "Bad" lines that do not end in newlines: + (1, ["ignored", "1,2", "", "3,4"]), + (1, StringIO("ignored\n1,2\n\n3,4")), + # Same as above, but do not skip any lines: + (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), + (0, ["-1,0", "1,2", "", "3,4"]), + (0, StringIO("-1,0\n1,2\n\n3,4"))]) + def test_max_rows_empty_lines(self, skip, data): + with pytest.warns(UserWarning, + match=f"Input line 3.*max_rows={3-skip}"): + res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3-skip) + assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) + + if isinstance(data, StringIO): + data.seek(0) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + with pytest.raises(UserWarning): + np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3-skip) + +class Testfromregex: + def test_record(self): + c = TextIO() + c.write('1.312 foo\n1.534 bar\n4.444 qux') + c.seek(0) + + dt = [('num', np.float64), ('val', 'S3')] + x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_2(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.int32), ('val', 'S3')] + x = np.fromregex(c, r"(\d+)\s+(...)", dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_3(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.float64)] + x = np.fromregex(c, r"(\d+)\s+...", dt) + a = np.array([(1312,), (1534,), (4444,)], dtype=dt) + assert_array_equal(x, a) + + @pytest.mark.parametrize("path_type", [str, Path]) + def test_record_unicode(self, path_type): + utf8 = b'\xcf\x96' + with temppath() as str_path: + path = path_type(str_path) + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + + def test_compiled_bytes(self): + regexp = re.compile(b'(\\d)') + c = BytesIO(b'123') + dt = [('num', np.float64)] + a = np.array([1, 2, 3], dtype=dt) + x = np.fromregex(c, regexp, dt) + assert_array_equal(x, a) + + def test_bad_dtype_not_structured(self): + regexp = re.compile(b'(\\d)') + c = BytesIO(b'123') + with pytest.raises(TypeError, match='structured datatype'): + np.fromregex(c, regexp, dtype=np.float64) + + +#####-------------------------------------------------------------------------- + + +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + + def test_record(self): + # Test w/ explicit dtype + data = TextIO('1 2\n3 4') + test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = TextIO('M 64.0 75.0\nF 25.0 60.0') + descriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.genfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + # Test outputting a standard ndarray + data = TextIO('1 2\n3 4') + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1, 2], [3, 4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + # Test squeezing to 1D + control = np.array([1, 2, 3, 4], int) + # + data = TextIO('1\n2\n3\n4\n') + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = TextIO('1,2,3,4\n') + test = np.genfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + # Test the stripping of comments + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = TextIO('# comment\n1,2,3,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = TextIO('1,2,3,5# comment\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + # Test row skipping + control = np.array([1, 2, 3, 5], int) + kwargs = dict(dtype=int, delimiter=',') + # + data = TextIO('comment\n1,2,3,5\n') + test = np.genfromtxt(data, skip_header=1, **kwargs) + assert_equal(test, control) + # + data = TextIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, skiprows=1, **kwargs) + assert_equal(test, control) + + def test_skip_footer(self): + data = ["# %i" % i for i in range(1, 6)] + data.append("A, B, C") + data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) + data[-1] = "99,99" + kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) + test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) + ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], + dtype=[(_, float) for _ in "ABC"]) + assert_equal(test, ctrl) + + def test_skip_footer_with_invalid(self): + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + TextIO(basestr), skip_footer=1) + # except ValueError: + # pass + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(TextIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt( + TextIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + + def test_header(self): + # Test retrieving a header + data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, names=True) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = {'gender': np.array([b'M', b'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + # Test the automatic definition of the output dtype + data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = [np.array([b'A', b'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3 + 4j, 5 + 6j]), + np.array([True, False]), ] + assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test['f%i' % i], ctrl) + + def test_auto_dtype_uniform(self): + # Tests whether the output dtype can be uniformized + data = TextIO('1 2 3 4\n5 6 7 8\n') + test = np.genfromtxt(data, dtype=None) + control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + assert_equal(test, control) + + def test_fancy_dtype(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_names_overwrite(self): + # Test overwriting the names of the dtype + descriptor = {'names': ('g', 'a', 'w'), + 'formats': ('S1', 'i4', 'f4')} + data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') + names = ('gender', 'age', 'weight') + test = np.genfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + def test_bad_fname(self): + with pytest.raises(TypeError, match='fname must be a string,'): + np.genfromtxt(123) + + def test_commented_header(self): + # Check that names can be retrieved even if the line is commented out. + data = TextIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = TextIO(b""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test, ctrl) + + def test_names_and_comments_none(self): + # Tests case when names is true but comments is None (gh-10780) + data = TextIO('col1 col2\n 1 2\n 3 4') + test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) + control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) + assert_equal(test, control) + + def test_file_is_closed_on_error(self): + # gh-13200 + with tempdir() as tmpdir: + fpath = os.path.join(tmpdir, "test.csv") + with open(fpath, "wb") as f: + f.write('\N{GREEK PI SYMBOL}'.encode()) + + # ResourceWarnings are emitted from a destructor, so won't be + # detected by regular propagation to errors. + with assert_no_warnings(): + with pytest.raises(UnicodeDecodeError): + np.genfromtxt(fpath, encoding="ascii") + + def test_autonames_and_usecols(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_with_usecols(self): + # Test the combination user-defined converters and usecol + data = TextIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is np.VisibleDeprecationWarning) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + # Test the conversion to datetime. + converter = { + 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + def test_converters_cornercases2(self): + # Test the conversion to datetime64. + converter = { + 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', 'datetime64[us]'), ('stid', float)]) + assert_equal(test, control) + + def test_unused_converter(self): + # Test whether unused converters are forgotten + data = TextIO("1 21\n 3 42\n") + test = np.genfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.genfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + def test_invalid_converter(self): + strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or + (b'r' not in x.lower() and x.strip() or 0.0)) + strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or + (b'%' not in x.lower() and x.strip() or 0.0)) + s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" + "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" + "D02N03,10/10/2004,R 1,,7,145.55") + kwargs = dict( + converters={2: strip_per, 3: strip_rand}, delimiter=",", + dtype=None) + assert_raises(ConverterError, np.genfromtxt, s, **kwargs) + + def test_tricky_converter_bug1666(self): + # Test some corner cases + s = TextIO('q1,2\nq3,4') + cnv = lambda s: float(s[1:]) + test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) + control = np.array([[1., 2.], [3., 4.]]) + assert_equal(test, control) + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: bytes}) + control = np.array([('2009', 23., 46)], + dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3} + dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv) + control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp) + assert_equal(test, control) + dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')] + test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0,1,3), names=None, converters=conv) + control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp) + assert_equal(test, control) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + ndtype = [('nest', [('idx', int), ('code', object)])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + # nested but empty fields also aren't supported + ndtype = [('idx', int), ('code', object), ('nest', [])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + def test_dtype_with_object_no_converter(self): + # Object without a converter uses bytes: + parsed = np.genfromtxt(TextIO("1"), dtype=object) + assert parsed[()] == b"1" + parsed = np.genfromtxt(TextIO("string"), dtype=object) + assert parsed[()] == b"string" + + def test_userconverters_with_explicit_dtype(self): + # Test user_converters w/ explicit (standard) dtype + data = TextIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: bytes}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: np.compat.unicode}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): + # Test space delimiter + data = TextIO("1 2 3 4 5\n6 7 8 9 10") + test = np.genfromtxt(data) + control = np.array([[1., 2., 3., 4., 5.], + [6., 7., 8., 9., 10.]]) + assert_equal(test, control) + + def test_integer_delimiter(self): + # Test using an integer for delimiter + data = " 1 2 3\n 4 5 67\n890123 4" + test = np.genfromtxt(TextIO(data), delimiter=3) + control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) + assert_equal(test, control) + + def test_missing(self): + data = TextIO('1,2,3,,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + def test_missing_with_tabs(self): + # Test w/ a delimiter tab + txt = "1\t2\t3\n\t2\t\n1\t\t3" + test = np.genfromtxt(TextIO(txt), delimiter="\t", + usemask=True,) + ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) + ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) + assert_equal(test.data, ctrl_d) + assert_equal(test.mask, ctrl_m) + + def test_usecols(self): + # Test the selection of columns + # Select 1 column + control = np.array([[1, 2], [3, 4]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array([[1, 2, 3], [3, 4, 5]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + + def test_usecols_as_css(self): + # Test giving usecols with a comma-separated string + data = "1 2 3\n4 5 6" + test = np.genfromtxt(TextIO(data), + names="a, b, c", usecols="a, c") + ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) + assert_equal(test, ctrl) + + def test_usecols_with_structured_dtype(self): + # Test usecols with an explicit structured dtype + data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.genfromtxt( + data, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(test['stid'], [b"JOE", b"BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + def test_usecols_with_integer(self): + # Test usecols with an integer + test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) + assert_equal(test, np.array([1., 4.])) + + def test_usecols_with_named_columns(self): + # Test usecols with named columns + ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) + data = "1 2 3\n4 5 6" + kwargs = dict(names="a, b, c") + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data), + usecols=('a', 'c'), **kwargs) + assert_equal(test, ctrl) + + def test_empty_file(self): + # Test that an empty file raises the proper warning. + with suppress_warnings() as sup: + sup.filter(message="genfromtxt: Empty input file:") + data = TextIO() + test = np.genfromtxt(data) + assert_equal(test, np.array([])) + + # when skip_header > 0 + test = np.genfromtxt(data, skip_header=1) + assert_equal(test, np.array([])) + + def test_fancy_dtype_alt(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True) + control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.genfromtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_withmissing(self): + data = TextIO('A,B\n0,1\n2,N/A') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.genfromtxt(data, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', float), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_user_missing_values(self): + data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + basekwargs = dict(dtype=None, delimiter=",", names=True,) + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.genfromtxt(TextIO(data), missing_values="N/A", + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + basekwargs['dtype'] = mdtype + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 'B': -99, 'C': -999j}, + usemask=True, + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + def test_user_filling_values(self): + # Test with missing and filling values + ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) + data = "N/A, 2, 3\n4, ,???" + kwargs = dict(delimiter=",", + dtype=int, + names="a,b,c", + missing_values={0: "N/A", 'b': " ", 2: "???"}, + filling_values={0: 0, 'b': 0, 2: -999}) + test = np.genfromtxt(TextIO(data), **kwargs) + ctrl = np.array([(0, 2, 3), (4, 0, -999)], + dtype=[(_, int) for _ in "abc"]) + assert_equal(test, ctrl) + # + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) + assert_equal(test, ctrl) + + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + + def test_withmissing_float(self): + data = TextIO('A,B\n0,1.5\n2,-999.00') + test = np.genfromtxt(data, dtype=None, delimiter=',', + missing_values='-999.0', names=True, usemask=True) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_with_masked_column_uniform(self): + # Test masked column + data = TextIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + # Test masked column + data = TextIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0), (0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + def test_invalid_raise(self): + # Test invalid raise + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = dict(delimiter=",", dtype=None, names=True) + def f(): + return np.genfromtxt(mdata, invalid_raise=False, **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) + # + mdata.seek(0) + assert_raises(ValueError, np.genfromtxt, mdata, + delimiter=",", names=True) + + def test_invalid_raise_with_usecols(self): + # Test invalid_raise with usecols + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = dict(delimiter=",", dtype=None, names=True, + invalid_raise=False) + def f(): + return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) + # + mdata.seek(0) + mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs) + assert_equal(len(mtest), 50) + control = np.ones(50, dtype=[(_, int) for _ in 'ab']) + control[[10 * _ for _ in range(5)]] = (2, 2) + assert_equal(mtest, control) + + def test_inconsistent_dtype(self): + # Test inconsistent dtype + data = ["1, 1, 1, 1, -1.1"] * 50 + mdata = TextIO("\n".join(data)) + + converters = {4: lambda x: "(%s)" % x.decode()} + kwargs = dict(delimiter=",", converters=converters, + dtype=[(_, int) for _ in 'abcde'],) + assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) + + def test_default_field_format(self): + # Test default format + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=None, defaultfmt="f%02i") + ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], + dtype=[("f00", int), ("f01", int), ("f02", float)]) + assert_equal(mtest, ctrl) + + def test_single_dtype_wo_names(self): + # Test single dtype w/o names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, defaultfmt="f%02i") + ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_explicit_names(self): + # Test single dtype w explicit names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names="a, b, c") + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_implicit_names(self): + # Test single dtype w implicit names + data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names=True) + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_easy_structured_dtype(self): + # Test easy structured dtype + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), delimiter=",", + dtype=(int, float, float), defaultfmt="f_%02i") + ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], + dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) + assert_equal(mtest, ctrl) + + def test_autostrip(self): + # Test autostrip + data = "01/01/2003 , 1.3, abcde" + kwargs = dict(delimiter=",", dtype=None) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], + dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) + assert_equal(mtest, ctrl) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is np.VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003', 1.3, 'abcde')], + dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) + assert_equal(mtest, ctrl) + + def test_replace_space(self): + # Test the 'replace_space' option + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_replace_space_known_dtype(self): + # Test the 'replace_space' (and related) options when dtype != None + txt = "A.A, B (B), C:C\n1, 2, 3" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_incomplete_names(self): + # Test w/ incomplete names + data = "A,,C\n0,1,2\n3,4,5" + kwargs = dict(delimiter=",", names=True) + # w/ dtype=None + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, int) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), dtype=None, **kwargs) + assert_equal(test, ctrl) + # w/ default dtype + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, float) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), **kwargs) + + def test_names_auto_completion(self): + # Make sure that names are properly completed + data = "1 2 3\n 4 5 6" + test = np.genfromtxt(TextIO(data), + dtype=(int, float, int), names="a") + ctrl = np.array([(1, 2, 3), (4, 5, 6)], + dtype=[('a', int), ('f0', float), ('f1', int)]) + assert_equal(test, ctrl) + + def test_names_with_usecols_bug1636(self): + # Make sure we pick up the right names w/ usecols + data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" + ctrl_names = ("A", "C", "E") + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=(0, 2, 4), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=int, delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + + def test_fixed_width_names(self): + # Test fix-width w/ names + data = " A B C\n 0 1 2.3\n 45 67 9." + kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + # + kwargs = dict(delimiter=5, names=True, dtype=None) + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_filling_values(self): + # Test missing values + data = b"1, 2, 3\n1, , 5\n0, 6, \n" + kwargs = dict(delimiter=",", dtype=None, filling_values=-999) + ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_comments_is_none(self): + # Github issue 329 (None was previously being converted to 'None'). + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1], b'testNonetherestofthedata') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1], b' testNonetherestofthedata') + + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], "test1") + assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], "test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', np.VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',') + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = '\u03d6' + latin1 = '\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of io.open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with io.open(path, "wt") as f: + f.write("norm1,norm2,norm3\n") + f.write("norm1," + latin1 + ",norm3\n") + f.write("test1,testNonethe" + utf8 + ",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + np.VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',') + # Check for warning when encoding not specified. + assert_(w[0].category is np.VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + def test_recfromtxt(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.recfromtxt(data, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + def test_recfromcsv(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) + test = np.recfromcsv(data, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = TextIO('A,B\n0,1\n2,3') + test = np.recfromcsv(data, missing_values='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,3') + dtype = [('a', int), ('b', float)] + test = np.recfromcsv(data, missing_values='N/A', dtype=dtype) + control = np.array([(0, 1), (2, 3)], + dtype=dtype) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + #gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))]) + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + def test_max_rows(self): + # Test the `max_rows` keyword argument. + data = '1 2\n3 4\n5 6\n7 8\n9 10\n' + txt = TextIO(data) + a1 = np.genfromtxt(txt, max_rows=3) + a2 = np.genfromtxt(txt) + assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) + assert_equal(a2, [[7, 8], [9, 10]]) + + # max_rows must be at least 1. + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) + + # An input with several invalid rows. + data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' + + test = np.genfromtxt(TextIO(data), max_rows=2) + control = np.array([[1., 1.], [2., 2.]]) + assert_equal(test, control) + + # Test keywords conflict + assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, + max_rows=4) + + # Test with invalid value + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) + + # Test with invalid not raise + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + + test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + # Structured array with field names. + data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' + + # Test with header, names and comments + txt = TextIO(data) + test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) + control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], + dtype=[('c', ' should convert to float + # 2**34 = 17179869184 => should convert to int64 + # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, + # int64 on 64-bit systems) + + data = TextIO('73786976294838206464 17179869184 1024') + + test = np.genfromtxt(data, dtype=None) + + assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) + + assert_(test.dtype['f0'] == float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.int_) + + assert_allclose(test['f0'], 73786976294838206464.) + assert_equal(test['f1'], 17179869184) + assert_equal(test['f2'], 1024) + + def test_unpack_float_data(self): + txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0") + a, b, c = np.loadtxt(txt, delimiter=",", unpack=True) + assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0])) + assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0])) + assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0])) + + def test_unpack_structured(self): + # Regression test for gh-4341 + # Unpacking should work on structured arrays + txt = TextIO("M 21 72\nF 35 58") + dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')} + a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_equal(a.dtype, np.dtype('S1')) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(c.dtype, np.dtype('f4')) + assert_array_equal(a, np.array([b'M', b'F'])) + assert_array_equal(b, np.array([21, 35])) + assert_array_equal(c, np.array([72., 58.])) + + def test_unpack_auto_dtype(self): + # Regression test for gh-4341 + # Unpacking should work when dtype=None + txt = TextIO("M 21 72.\nF 35 58.") + expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.])) + test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8") + for arr, result in zip(expected, test): + assert_array_equal(arr, result) + assert_equal(arr.dtype, result.dtype) + + def test_unpack_single_name(self): + # Regression test for gh-4341 + # Unpacking should work when structured dtype has only one field + txt = TextIO("21\n35") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array([21, 35], dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal(expected.dtype, test.dtype) + + def test_squeeze_scalar(self): + # Regression test for gh-4341 + # Unpacking a scalar should give zero-dim output, + # even if dtype is structured + txt = TextIO("1") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array((1,), dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal((), test.shape) + assert_equal(expected.dtype, test.dtype) + + @pytest.mark.parametrize("ndim", [0, 1, 2]) + def test_ndmin_keyword(self, ndim: int): + # lets have the same behaviour of ndmin as loadtxt + # as they should be the same for non-missing values + txt = "42" + + a = np.loadtxt(StringIO(txt), ndmin=ndim) + b = np.genfromtxt(StringIO(txt), ndmin=ndim) + + assert_array_equal(a, b) + + +class TestPathUsage: + # Test that pathlib.Path can be used + def test_loadtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([[1.1, 2], [3, 4]]) + np.savetxt(path, a) + x = np.loadtxt(path) + assert_array_equal(x, a) + + def test_save_load(self): + # Test that pathlib.Path instances can be used with save. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path) + assert_array_equal(data, a) + + def test_save_load_memmap(self): + # Test that pathlib.Path instances can be loaded mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path, mmap_mode='r') + assert_array_equal(data, a) + # close the mem-mapped file + del data + if IS_PYPY: + break_cycles() + break_cycles() + + @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") + def test_save_load_memmap_readwrite(self): + # Test that pathlib.Path instances can be written mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + b = np.load(path, mmap_mode='r+') + a[0][0] = 5 + b[0][0] = 5 + del b # closes the file + if IS_PYPY: + break_cycles() + break_cycles() + data = np.load(path) + assert_array_equal(data, a) + + def test_savez_load(self): + # Test that pathlib.Path instances can be used with savez. + with temppath(suffix='.npz') as path: + path = Path(path) + np.savez(path, lab='place holder') + with np.load(path) as data: + assert_array_equal(data['lab'], 'place holder') + + def test_savez_compressed_load(self): + # Test that pathlib.Path instances can be used with savez. + with temppath(suffix='.npz') as path: + path = Path(path) + np.savez_compressed(path, lab='place holder') + data = np.load(path) + assert_array_equal(data['lab'], 'place holder') + data.close() + + def test_genfromtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([(1, 2), (3, 4)]) + np.savetxt(path, a) + data = np.genfromtxt(path) + assert_array_equal(a, data) + + def test_recfromtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = dict(delimiter=",", missing_values="N/A", names=True) + test = np.recfromtxt(path, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + def test_recfromcsv(self): + with temppath(suffix='.txt') as path: + path = Path(path) + with path.open('w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = dict(missing_values="N/A", names=True, case_sensitive=True) + test = np.recfromcsv(path, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +# These next two classes encode the minimal API needed to save()/load() arrays. +# The `test_ducktyping` ensures they work correctly +class JustWriter: + def __init__(self, base): + self.base = base + + def write(self, s): + return self.base.write(s) + + def flush(self): + return self.base.flush() + +class JustReader: + def __init__(self, base): + self.base = base + + def read(self, n): + return self.base.read(n) + + def seek(self, off, whence=0): + return self.base.seek(off, whence) + + +def test_ducktyping(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = JustWriter(s) + + np.save(f, a) + f.flush() + s.seek(0) + + f = JustReader(s) + assert_array_equal(np.load(f), a) + + + +def test_gzip_loadtxt(): + # Thanks to another windows brokenness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() + + assert_array_equal(res, [1, 2, 3]) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + with assert_no_gc_cycles(): + np.load(f) + + f.seek(0) + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + with assert_no_gc_cycles(): + x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + +def test_load_multiple_arrays_until_eof(): + f = BytesIO() + np.save(f, 1) + np.save(f, 2) + f.seek(0) + assert np.load(f) == 1 + assert np.load(f) == 2 + with pytest.raises(EOFError): + np.load(f) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.py new file mode 100644 index 00000000..2d805e43 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.py @@ -0,0 +1,1048 @@ +""" +Tests specific to `np.loadtxt` added during the move of loadtxt to be backed +by C code. +These tests complement those found in `test_io.py`. +""" + +import sys +import os +import pytest +from tempfile import NamedTemporaryFile, mkstemp +from io import StringIO + +import numpy as np +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_array_equal, HAS_REFCOUNT, IS_PYPY + + +def test_scientific_notation(): + """Test that both 'e' and 'E' are parsed correctly.""" + data = StringIO( + ( + "1.0e-1,2.0E1,3.0\n" + "4.0e-2,5.0E-1,6.0\n" + "7.0e-3,8.0E1,9.0\n" + "0.0e-4,1.0E-1,2.0" + ) + ) + expected = np.array( + [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] + ) + assert_array_equal(np.loadtxt(data, delimiter=","), expected) + + +@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"]) +def test_comment_multiple_chars(comment): + content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n" + txt = StringIO(content.replace("#", comment)) + a = np.loadtxt(txt, delimiter=",", comments=comment) + assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) + + +@pytest.fixture +def mixed_types_structured(): + """ + Fixture providing hetergeneous input data with a structured dtype, along + with the associated structured array. + """ + data = StringIO( + ( + "1000;2.4;alpha;-34\n" + "2000;3.1;beta;29\n" + "3500;9.9;gamma;120\n" + "4090;8.1;delta;0\n" + "5001;4.4;epsilon;-99\n" + "6543;7.8;omega;-1\n" + ) + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + return data, dtype, expected + + +@pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) +def test_structured_dtype_and_skiprows_no_empty_lines( + skiprows, mixed_types_structured): + data, dtype, expected = mixed_types_structured + a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) + assert_array_equal(a, expected[skiprows:]) + + +def test_unpack_structured(mixed_types_structured): + data, dtype, expected = mixed_types_structured + + a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) + assert_array_equal(a, expected["f0"]) + assert_array_equal(b, expected["f1"]) + assert_array_equal(c, expected["f2"]) + assert_array_equal(d, expected["f3"]) + + +def test_structured_dtype_with_shape(): + dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)]) + data = StringIO("0,1,2,3\n6,7,8,9\n") + expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected) + + +def test_structured_dtype_with_multi_shape(): + dtype = np.dtype([("a", "u1", (2, 2))]) + data = StringIO("0 1 2 3\n") + expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype) + assert_array_equal(np.loadtxt(data, dtype=dtype), expected) + + +def test_nested_structured_subarray(): + # Test from gh-16678 + point = np.dtype([('x', float), ('y', float)]) + dt = np.dtype([('code', int), ('points', point, (2,))]) + data = StringIO("100,1,2,3,4\n200,5,6,7,8\n") + expected = np.array( + [ + (100, [(1., 2.), (3., 4.)]), + (200, [(5., 6.), (7., 8.)]), + ], + dtype=dt + ) + assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected) + + +def test_structured_dtype_offsets(): + # An aligned structured dtype will have additional padding + dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True) + data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n") + expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_negative_row_limits(param): + """skiprows and max_rows should raise for negative parameters.""" + with pytest.raises(ValueError, match="argument must be nonnegative"): + np.loadtxt("foo.bar", **{param: -3}) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_noninteger_row_limits(param): + with pytest.raises(TypeError, match="argument must be an integer"): + np.loadtxt("foo.bar", **{param: 1.0}) + + +@pytest.mark.parametrize( + "data, shape", + [ + ("1 2 3 4 5\n", (1, 5)), # Single row + ("1\n2\n3\n4\n5\n", (5, 1)), # Single column + ] +) +def test_ndmin_single_row_or_col(data, shape): + arr = np.array([1, 2, 3, 4, 5]) + arr2d = arr.reshape(shape) + + assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d) + + +@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"]) +def test_bad_ndmin(badval): + with pytest.raises(ValueError, match="Illegal value of ndmin keyword"): + np.loadtxt("foo.bar", ndmin=badval) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_blank_lines_spaces_delimit(ws): + txt = StringIO( + f"1 2{ws}30\n\n{ws}\n" + f"4 5 60{ws}\n {ws} \n" + f"7 8 {ws} 90\n # comment\n" + f"3 2 1" + ) + # NOTE: It is unclear that the ` # comment` should succeed. Except + # for delimiter=None, which should use any whitespace (and maybe + # should just be implemented closer to Python + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected + ) + + +def test_blank_lines_normal_delimiter(): + txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1') + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected + ) + + +@pytest.mark.parametrize("dtype", (float, object)) +def test_maxrows_no_blank_lines(dtype): + txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0") + res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2) + assert_equal(res.dtype, dtype) + assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) +def test_exception_message_bad_values(dtype): + txt = StringIO("1,2\n3,XXX\n5,6") + msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2" + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + +def test_converters_negative_indices(): + txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]]) + res = np.loadtxt( + txt, dtype=np.float64, delimiter=",", converters=conv, encoding=None + ) + assert_equal(res, expected) + + +def test_converters_negative_indices_with_usecols(): + txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]]) + res = np.loadtxt( + txt, + dtype=np.float64, + delimiter=",", + converters=conv, + usecols=[0, -1], + encoding=None, + ) + assert_equal(res, expected) + + # Second test with variable number of rows: + res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",", + usecols=[0, -1], converters={-1: (lambda x: -1)}) + assert_array_equal(res, [[0, -1], [0, -1]]) + + +def test_ragged_error(): + rows = ["1,2,3", "1,2,3", "4,3,2,1"] + with pytest.raises(ValueError, + match="the number of columns changed from 3 to 4 at row 3"): + np.loadtxt(rows, delimiter=",") + + +def test_ragged_usecols(): + # usecols, and negative ones, work even with varying number of columns. + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + expected = np.array([[0, 0], [0, 0], [0, 0]]) + res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + assert_equal(res, expected) + + txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n") + with pytest.raises(ValueError, + match="invalid column index -2 at row 2 with 1 columns"): + # There is no -2 column in the second row: + np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + + +def test_empty_usecols(): + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[]) + assert res.shape == (3,) + assert res.dtype == np.dtype([]) + + +@pytest.mark.parametrize("c1", ["a", "の", "🫕"]) +@pytest.mark.parametrize("c2", ["a", "の", "🫕"]) +def test_large_unicode_characters(c1, c2): + # c1 and c2 span ascii, 16bit and 32bit range. + txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g") + res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",") + expected = np.array( + [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")], + dtype=np.dtype('U12') + ) + assert_equal(res, expected) + + +def test_unicode_with_converter(): + txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n") + conv = {0: lambda s: s.upper()} + res = np.loadtxt( + txt, + dtype=np.dtype("U12"), + converters=conv, + delimiter=",", + encoding=None + ) + expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']]) + assert_equal(res, expected) + + +def test_converter_with_structured_dtype(): + txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n') + dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')]) + conv = {0: lambda s: int(10*float(s)), -1: lambda s: s.upper()} + res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv) + expected = np.array( + [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt + ) + assert_equal(res, expected) + + +def test_converter_with_unicode_dtype(): + """ + With the default 'bytes' encoding, tokens are encoded prior to being + passed to the converter. This means that the output of the converter may + be bytes instead of unicode as expected by `read_rows`. + + This test checks that outputs from the above scenario are properly decoded + prior to parsing by `read_rows`. + """ + txt = StringIO('abc,def\nrst,xyz') + conv = bytes.upper + res = np.loadtxt( + txt, dtype=np.dtype("U3"), converters=conv, delimiter=",") + expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) + assert_equal(res, expected) + + +def test_read_huge_row(): + row = "1.5, 2.5," * 50000 + row = row[:-1] + "\n" + txt = StringIO(row * 2) + res = np.loadtxt(txt, delimiter=",", dtype=float) + assert_equal(res, np.tile([1.5, 2.5], (2, 50000))) + + +@pytest.mark.parametrize("dtype", "edfgFDG") +def test_huge_float(dtype): + # Covers a non-optimized path that is rarely taken: + field = "0" * 1000 + ".123456789" + dtype = np.dtype(dtype) + value = np.loadtxt([field], dtype=dtype)[()] + assert value == dtype.type("0.123456789") + + +@pytest.mark.parametrize( + ("given_dtype", "expected_dtype"), + [ + ("S", np.dtype("S5")), + ("U", np.dtype("U5")), + ], +) +def test_string_no_length_given(given_dtype, expected_dtype): + """ + The given dtype is just 'S' or 'U' with no length. In these cases, the + length of the resulting dtype is determined by the longest string found + in the file. + """ + txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n") + res = np.loadtxt(txt, dtype=given_dtype, delimiter=",") + expected = np.array( + [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype + ) + assert_equal(res, expected) + assert_equal(res.dtype, expected_dtype) + + +def test_float_conversion(): + """ + Some tests that the conversion to float64 works as accurately as the + Python built-in `float` function. In a naive version of the float parser, + these strings resulted in values that were off by an ULP or two. + """ + strings = [ + '0.9999999999999999', + '9876543210.123456', + '5.43215432154321e+300', + '0.901', + '0.333', + ] + txt = StringIO('\n'.join(strings)) + res = np.loadtxt(txt) + expected = np.array([float(s) for s in strings]) + assert_equal(res, expected) + + +def test_bool(): + # Simple test for bool via integer + txt = StringIO("1, 0\n10, -1") + res = np.loadtxt(txt, dtype=bool, delimiter=",") + assert res.dtype == bool + assert_array_equal(res, [[True, False], [True, True]]) + # Make sure we use only 1 and 0 on the byte level: + assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_integer_signs(dtype): + dtype = np.dtype(dtype) + assert np.loadtxt(["+2"], dtype=dtype) == 2 + if dtype.kind == "u": + with pytest.raises(ValueError): + np.loadtxt(["-1\n"], dtype=dtype) + else: + assert np.loadtxt(["-2\n"], dtype=dtype) == -2 + + for sign in ["++", "+-", "--", "-+"]: + with pytest.raises(ValueError): + np.loadtxt([f"{sign}2\n"], dtype=dtype) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_implicit_cast_float_to_int_fails(dtype): + txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6") + with pytest.raises(ValueError): + np.loadtxt(txt, dtype=dtype, delimiter=",") + +@pytest.mark.parametrize("dtype", (np.complex64, np.complex128)) +@pytest.mark.parametrize("with_parens", (False, True)) +def test_complex_parsing(dtype, with_parens): + s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)" + if not with_parens: + s = s.replace("(", "").replace(")", "") + + res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",") + expected = np.array( + [[1.0-2.5j, 3.75, 7-5j], [4.0, -1900j, 0]], dtype=dtype + ) + assert_equal(res, expected) + + +def test_read_from_generator(): + def gen(): + for i in range(4): + yield f"{i},{2*i},{i**2}" + + res = np.loadtxt(gen(), dtype=int, delimiter=",") + expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]]) + assert_equal(res, expected) + + +def test_read_from_generator_multitype(): + def gen(): + for i in range(3): + yield f"{i} {i / 4}" + + res = np.loadtxt(gen(), dtype="i, d", delimiter=" ") + expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d") + assert_equal(res, expected) + + +def test_read_from_bad_generator(): + def gen(): + for entry in ["1,2", b"3, 5", 12738]: + yield entry + + with pytest.raises( + TypeError, match=r"non-string returned while reading data"): + np.loadtxt(gen(), dtype="i, i", delimiter=",") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_object_cleanup_on_read_error(): + sentinel = object() + already_read = 0 + + def conv(x): + nonlocal already_read + if already_read > 4999: + raise ValueError("failed half-way through!") + already_read += 1 + return sentinel + + txt = StringIO("x\n" * 10000) + + with pytest.raises(ValueError, match="at row 5000, column 1"): + np.loadtxt(txt, dtype=object, converters={0: conv}) + + assert sys.getrefcount(sentinel) == 2 + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_character_not_bytes_compatible(): + """Test exception when a character cannot be encoded as 'S'.""" + data = StringIO("–") # == \u2013 + with pytest.raises(ValueError): + np.loadtxt(data, dtype="S5") + + +@pytest.mark.parametrize("conv", (0, [float], "")) +def test_invalid_converter(conv): + msg = ( + "converters must be a dictionary mapping columns to converter " + "functions or a single callable." + ) + with pytest.raises(TypeError, match=msg): + np.loadtxt(StringIO("1 2\n3 4"), converters=conv) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_converters_dict_raises_non_integer_key(): + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0) + + +@pytest.mark.parametrize("bad_col_ind", (3, -3)) +def test_converters_dict_raises_non_col_key(bad_col_ind): + data = StringIO("1 2\n3 4") + with pytest.raises(ValueError, match="converter specified for column"): + np.loadtxt(data, converters={bad_col_ind: int}) + + +def test_converters_dict_raises_val_not_callable(): + with pytest.raises(TypeError, + match="values of the converters dictionary must be callable"): + np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1}) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field(q): + txt = StringIO( + f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q) + assert_array_equal(res, expected) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field_with_whitepace_delimiter(q): + txt = StringIO( + f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q) + assert_array_equal(res, expected) + + +def test_quote_support_default(): + """Support for quoted fields is disabled by default.""" + txt = StringIO('"lat,long", 45, 30\n') + dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)]) + + with pytest.raises(ValueError, + match="the dtype passed requires 3 columns but 4 were"): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + # Enable quoting support with non-None value for quotechar param + txt.seek(0) + expected = np.array([("lat,long", 45., 30.)], dtype=dtype) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_quotechar_multichar_error(): + txt = StringIO("1,2\n3,4") + msg = r".*must be a single unicode character or None" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=",", quotechar="''") + + +def test_comment_multichar_error_with_quote(): + txt = StringIO("1,2\n3,4") + msg = ( + "when multiple comments or a multi-character comment is given, " + "quotes are not supported." + ) + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments="123", quotechar='"') + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"') + + # A single character string in a tuple is unpacked though: + res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'") + assert_equal(res, [[1, 2], [3, 4]]) + + +def test_structured_dtype_with_quotes(): + data = StringIO( + ( + "1000;2.4;'alpha';-34\n" + "2000;3.1;'beta';29\n" + "3500;9.9;'gamma';120\n" + "4090;8.1;'delta';0\n" + "5001;4.4;'epsilon';-99\n" + "6543;7.8;'omega';-1\n" + ) + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'") + assert_array_equal(res, expected) + + +def test_quoted_field_is_not_empty(): + txt = StringIO('1\n\n"4"\n""') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_quoted_field_is_not_empty_nonstrict(): + # Same as test_quoted_field_is_not_empty but check that we are not strict + # about missing closing quote (this is the `csv.reader` default also) + txt = StringIO('1\n\n"4"\n"') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_consecutive_quotechar_escaped(): + txt = StringIO('"Hello, my name is ""Monty""!"') + expected = np.array('Hello, my name is "Monty"!', dtype="U40") + res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"') + assert_equal(res, expected) + + +@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n")) +@pytest.mark.parametrize("ndmin", (0, 1, 2)) +@pytest.mark.parametrize("usecols", [None, (1, 2, 3)]) +def test_warn_on_no_data(data, ndmin, usecols): + """Check that a UserWarning is emitted when no data is read from input.""" + if usecols is not None: + expected_shape = (0, 3) + elif ndmin == 2: + expected_shape = (0, 1) # guess a single column?! + else: + expected_shape = (0,) + + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + + with NamedTemporaryFile(mode="w") as fh: + fh.write(data) + fh.seek(0) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + +@pytest.mark.parametrize("skiprows", (2, 3)) +def test_warn_on_skipped_data(skiprows): + data = "1 2 3\n4 5 6" + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + np.loadtxt(txt, skiprows=skiprows) + + +@pytest.mark.parametrize(["dtype", "value"], [ + ("i2", 0x0001), ("u2", 0x0001), + ("i4", 0x00010203), ("u4", 0x00010203), + ("i8", 0x0001020304050607), ("u8", 0x0001020304050607), + # The following values are constructed to lead to unique bytes: + ("float16", 3.07e-05), + ("float32", 9.2557e-41), ("complex64", 9.2557e-41+2.8622554e-29j), + ("float64", -1.758571353180402e-24), + # Here and below, the repr side-steps a small loss of precision in + # complex `str` in PyPy (which is probably fine, as repr works): + ("complex128", repr(5.406409232372729e-29-1.758571353180402e-24j)), + # Use integer values that fit into double. Everything else leads to + # problems due to longdoubles going via double and decimal strings + # causing rounding errors. + ("longdouble", 0x01020304050607), + ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))), + ("U2", "\U00010203\U000a0b0c")]) +@pytest.mark.parametrize("swap", [True, False]) +def test_byteswapping_and_unaligned(dtype, value, swap): + # Try to create "interesting" values within the valid unicode range: + dtype = np.dtype(dtype) + data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + if swap: + dtype = dtype.newbyteorder() + full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) + # The above ensures that the interesting "b" field is unaligned: + assert full_dt.fields["b"][1] == 1 + res = np.loadtxt(data, dtype=full_dt, delimiter=",", encoding=None, + max_rows=1) # max-rows prevents over-allocation + assert res["b"] == dtype.type(value) + + +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efdFD" + "?") +def test_unicode_whitespace_stripping(dtype): + # Test that all numeric types (and bool) strip whitespace correctly + # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted. + # Currently, skip float128 as it did not always support this and has no + # "custom" parsing: + txt = StringIO(' 3 ,"\u202F2\n"') + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, np.array([3, 2]).astype(dtype)) + + +@pytest.mark.parametrize("dtype", "FD") +def test_unicode_whitespace_stripping_complex(dtype): + # Complex has a few extra cases since it has two components and + # parentheses + line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n" + data = [line, line.replace(" ", "\u202F")] + res = np.loadtxt(data, dtype=dtype, delimiter=',') + assert_array_equal(res, np.array([[1, 2+3j, 4+5j, 6-7j, 8j, 9j]] * 2)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", "FD") +@pytest.mark.parametrize("field", + ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) +def test_bad_complex(dtype, field): + with pytest.raises(ValueError): + np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_nul_character_error(dtype): + # Test that a \0 character is correctly recognized as an error even if + # what comes before is valid (not everything gets parsed internally). + if dtype.lower() == "g": + pytest.xfail("longdouble/clongdouble assignment may misbehave.") + with pytest.raises(ValueError): + np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_no_thousands_support(dtype): + # Mainly to document behaviour, Python supports thousands like 1_1. + # (e and G may end up using different conversion and support it, this is + # a bug but happens...) + if dtype == "e": + pytest.skip("half assignment currently uses Python float converter") + if dtype in "eG": + pytest.xfail("clongdouble assignment is buggy (uses `complex`?).") + + assert int("1_1") == float("1_1") == complex("1_1") == 11 + with pytest.raises(ValueError): + np.loadtxt(["1_1\n"], dtype=dtype) + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2\n,3\n"], + ["1,2\n", "2\r,3\n"]]) +def test_bad_newline_in_iterator(data): + # In NumPy <=1.22 this was accepted, because newlines were completely + # ignored when the input was an iterable. This could be changed, but right + # now, we raise an error. + msg = "Found an unquoted embedded newline within a single line" + with pytest.raises(ValueError, match=msg): + np.loadtxt(data, delimiter=",") + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2,3\r\n"], # a universal newline + ["1,2\n", "'2\n',3\n"], # a quoted newline + ["1,2\n", "'2\r',3\n"], + ["1,2\n", "'2\r\n',3\n"], +]) +def test_good_newline_in_iterator(data): + # The quoted newlines will be untransformed here, but are just whitespace. + res = np.loadtxt(data, delimiter=",", quotechar="'") + assert_array_equal(res, [[1., 2.], [2., 3.]]) + + +@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"]) +def test_universal_newlines_quoted(newline): + # Check that universal newline support within the tokenizer is not applied + # to quoted fields. (note that lines must end in newline or quoted + # fields will not include a newline at all) + data = ['1,"2\n"\n', '3,"4\n', '1"\n'] + data = [row.replace("\n", newline) for row in data] + res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"') + assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']]) + + +def test_null_character(): + # Basic tests to check that the NUL character is not special: + res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000") + assert_array_equal(res, [[1, 2, 3], [4, 5, 6]]) + + # Also not as part of a field (avoid unicode/arrays as unicode strips \0) + res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"], + delimiter=",", dtype=object) + assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]] + + +def test_iterator_fails_getting_next_line(): + class BadSequence: + def __len__(self): + return 100 + + def __getitem__(self, item): + if item == 50: + raise RuntimeError("Bad things happened!") + return f"{item}, {item+1}" + + with pytest.raises(RuntimeError, match="Bad things happened!"): + np.loadtxt(BadSequence(), dtype=int, delimiter=",") + + +class TestCReaderUnitTests: + # These are internal tests for path that should not be possible to hit + # unless things go very very wrong somewhere. + def test_not_an_filelike(self): + with pytest.raises(AttributeError, match=".*read"): + np.core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_read_fails(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + class BadFileLike: + counter = 0 + + def read(self, size): + self.counter += 1 + if self.counter > 20: + raise RuntimeError("Bad bad bad!") + return "1,2,3\n" + + with pytest.raises(RuntimeError, match="Bad bad bad!"): + np.core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_bad_read(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + + class BadFileLike: + counter = 0 + + def read(self, size): + return 1234 # not a string! + + with pytest.raises(TypeError, + match="non-string returned while reading data"): + np.core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_not_an_iter(self): + with pytest.raises(TypeError, + match="error reading from object, expected an iterable"): + np.core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False) + + def test_bad_type(self): + with pytest.raises(TypeError, match="internal error: dtype must"): + np.core._multiarray_umath._load_from_filelike( + object(), dtype="i", filelike=False) + + def test_bad_encoding(self): + with pytest.raises(TypeError, match="encoding must be a unicode"): + np.core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False, encoding=123) + + @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"]) + def test_manual_universal_newlines(self, newline): + # This is currently not available to users, because we should always + # open files with universal newlines enabled `newlines=None`. + # (And reading from an iterator uses slightly different code paths.) + # We have no real support for `newline="\r"` or `newline="\n" as the + # user cannot specify those options. + data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline), + newline="") + + res = np.core._multiarray_umath._load_from_filelike( + data, dtype=np.dtype("U10"), filelike=True, + quote='"', comment="#", skiplines=1) + assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "]) + + +def test_delimiter_comment_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",") + + +def test_delimiter_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",") + + +def test_comment_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#") + + +def test_delimiter_and_multiple_comments_collision_raises(): + with pytest.raises( + TypeError, match="Comment characters.*cannot include the delimiter" + ): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","]) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_collision_with_default_delimiter_raises(ws): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws) + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws) + + +@pytest.mark.parametrize("nl", ("\n", "\r")) +def test_control_character_newline_raises(nl): + txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}") + msg = "control character.*cannot be a newline" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, comments=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, quotechar=nl) + + +@pytest.mark.parametrize( + ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"), + [ + ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes + ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str + ], +) +@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize +def test_parametric_unit_discovery( + generic_data, long_datum, unitless_dtype, expected_dtype, nrows +): + """Check that the correct unit (e.g. month, day, second) is discovered from + the data when a user specifies a unitless datetime.""" + # Unit should be "D" (days) due to last entry + data = [generic_data] * 50000 + [long_datum] + expected = np.array(data, dtype=expected_dtype) + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype=unitless_dtype) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + a = np.loadtxt(fname, dtype=unitless_dtype) + os.remove(fname) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + +def test_str_dtype_unit_discovery_with_converter(): + data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"] + expected = np.array( + ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" + ) + conv = lambda s: s.strip("XXX") + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype="U", converters=conv, encoding=None) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + a = np.loadtxt(fname, dtype="U", converters=conv, encoding=None) + os.remove(fname) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_control_character_empty(): + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), delimiter="") + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), quotechar="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments=["#", ""]) + + +def test_control_characters_as_bytes(): + """Byte control characters (comments, delimiter) are supported.""" + a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",") + assert_equal(a, [1, 2, 3]) + + +@pytest.mark.filterwarnings('ignore::UserWarning') +def test_field_growing_cases(): + # Test empty field appending/growing (each field still takes 1 character) + # to see if the final field appending does not create issues. + res = np.loadtxt([""], delimiter=",", dtype=bytes) + assert len(res) == 0 + + for i in range(1, 1024): + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes) + assert len(res) == i+1 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.py new file mode 100644 index 00000000..63205876 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.py @@ -0,0 +1,216 @@ +import numbers +import operator + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + + +# NOTE: This class should be kept as an exact copy of the example from the +# docstring for NDArrayOperatorsMixin. + +class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.value) + + +def wrap_array_like(result): + if type(result) is tuple: + return tuple(ArrayLike(r) for r in result) + else: + return ArrayLike(result) + + +def _assert_equal_type_and_value(result, expected, err_msg=None): + assert_equal(type(result), type(expected), err_msg=err_msg) + if isinstance(result, tuple): + assert_equal(len(result), len(expected), err_msg=err_msg) + for result_item, expected_item in zip(result, expected): + _assert_equal_type_and_value(result_item, expected_item, err_msg) + else: + assert_equal(result.value, expected.value, err_msg=err_msg) + assert_equal(getattr(result.value, 'dtype', None), + getattr(expected.value, 'dtype', None), err_msg=err_msg) + + +_ALL_BINARY_OPERATORS = [ + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.floordiv, + operator.mod, + divmod, + pow, + operator.lshift, + operator.rshift, + operator.and_, + operator.xor, + operator.or_, +] + + +class TestNDArrayOperatorsMixin: + + def test_array_like_add(self): + + def check(result): + _assert_equal_type_and_value(result, ArrayLike(0)) + + check(ArrayLike(0) + 0) + check(0 + ArrayLike(0)) + + check(ArrayLike(0) + np.array(0)) + check(np.array(0) + ArrayLike(0)) + + check(ArrayLike(np.array(0)) + 0) + check(0 + ArrayLike(np.array(0))) + + check(ArrayLike(np.array(0)) + np.array(0)) + check(np.array(0) + ArrayLike(np.array(0))) + + def test_inplace(self): + array_like = ArrayLike(np.array([0])) + array_like += 1 + _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) + + array = np.array([0]) + array += ArrayLike(1) + _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) + + def test_opt_out(self): + + class OptOut: + """Object that opts out of __array_ufunc__.""" + __array_ufunc__ = None + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + array_like = ArrayLike(1) + opt_out = OptOut() + + # supported operations + assert_(array_like + opt_out is opt_out) + assert_(opt_out + array_like is opt_out) + + # not supported + with assert_raises(TypeError): + # don't use the Python default, array_like = array_like + opt_out + array_like += opt_out + with assert_raises(TypeError): + array_like - opt_out + with assert_raises(TypeError): + opt_out - array_like + + def test_subclass(self): + + class SubArrayLike(ArrayLike): + """Should take precedence over ArrayLike.""" + + x = ArrayLike(0) + y = SubArrayLike(1) + _assert_equal_type_and_value(x + y, y) + _assert_equal_type_and_value(y + x, y) + + def test_object(self): + x = ArrayLike(0) + obj = object() + with assert_raises(TypeError): + x + obj + with assert_raises(TypeError): + obj + x + with assert_raises(TypeError): + x += obj + + def test_unary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in [operator.neg, + operator.pos, + abs, + operator.invert]: + _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) + + def test_forward_binary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(array, 1)) + actual = op(array_like, 1) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_reflected_binary_methods(self): + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(2, 1)) + actual = op(2, ArrayLike(1)) + err_msg = 'failed for operator {}'.format(op) + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_matmul(self): + array = np.array([1, 2], dtype=np.float64) + array_like = ArrayLike(array) + expected = ArrayLike(np.float64(5)) + _assert_equal_type_and_value(expected, np.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) + + def test_ufunc_at(self): + array = ArrayLike(np.array([1, 2, 3, 4])) + assert_(np.negative.at(array, np.array([0, 1])) is None) + _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) + + def test_ufunc_two_outputs(self): + mantissa, exponent = np.frexp(2 ** -3) + expected = (ArrayLike(mantissa), ArrayLike(exponent)) + _assert_equal_type_and_value( + np.frexp(ArrayLike(2 ** -3)), expected) + _assert_equal_type_and_value( + np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 00000000..257de381 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,1268 @@ +import warnings +import pytest +import inspect + +import numpy as np +from numpy.core.numeric import normalize_axis_tuple +from numpy.lib.nanfunctions import _nan_mask, _replace_nan +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_raises, + assert_array_equal, suppress_warnings + ) + + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + +# Rows of _ndat with nans converted to ones +_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], + [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], + [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], + [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) + +# Rows of _ndat with nans converted to zeros +_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], + [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], + [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], + [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) + + +class TestSignatureMatch: + NANFUNCS = { + np.nanmin: np.amin, + np.nanmax: np.amax, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanpercentile: np.percentile, + np.nanquantile: np.quantile, + np.nanvar: np.var, + np.nanstd: np.std, + } + IDS = [k.__name__ for k in NANFUNCS] + + @staticmethod + def get_signature(func, default="..."): + """Construct a signature and replace all default parameter-values.""" + prm_list = [] + signature = inspect.signature(func) + for prm in signature.parameters.values(): + if prm.default is inspect.Parameter.empty: + prm_list.append(prm) + else: + prm_list.append(prm.replace(default=default)) + return inspect.Signature(prm_list) + + @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS) + def test_signature_match(self, nan_func, func): + # Ignore the default parameter-values as they can sometimes differ + # between the two functions (*e.g.* one has `False` while the other + # has `np._NoValue`) + signature = self.get_signature(func) + nan_signature = self.get_signature(nan_func) + np.testing.assert_equal(signature, nan_signature) + + def test_exhaustiveness(self): + """Validate that all nan functions are actually tested.""" + np.testing.assert_equal( + set(self.IDS), set(np.lib.nanfunctions.__all__) + ) + + +class TestNanFunctions_MinMax: + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "All-NaN slice encountered" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + # check that rows of nan are dealt with for subclasses (#4628) + mine[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine) + assert_(res.shape == ()) + assert_(res != np.nan) + assert_(len(w) == 0) + + def test_object_array(self): + arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) + assert_equal(np.nanmin(arr), 1.0) + assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # assert_equal does not work on object arrays of nan + assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + initial = 100 if f is np.nanmax else 0 + + ret1 = f(ar, initial=initial) + assert ret1.dtype == dtype + assert ret1 == initial + + ret2 = f(ar.view(MyNDArray), initial=initial) + assert ret2.dtype == dtype + assert ret2 == initial + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool_) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 4 if f is np.nanmin else 8 + + ret1 = f(ar, where=where, initial=5) + assert ret1.dtype == dtype + assert ret1 == reference + + ret2 = f(ar.view(MyNDArray), where=where, initial=5) + assert ret2.dtype == dtype + assert ret2 == reference + + +class TestNanFunctions_ArgminArgmax: + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in") + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func in self.nanfuncs: + with pytest.raises(ValueError, match="All-NaN slice encountered"): + func(array, axis=axis) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises(ValueError, f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_keepdims(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, keepdims=True) + assert ret.ndim == ar.ndim + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_out(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + out = np.zeros((), dtype=np.intp) + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, out=out) + assert ret is out + assert ret == reference + + + +_TEST_ARRAYS = { + "0d": np.array(5), + "1d": np.array([127, 39, 93, 87, 46]) +} +for _v in _TEST_ARRAYS.values(): + _v.setflags(write=False) + + +@pytest.mark.parametrize( + "dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O", +) +@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys()) +class TestNanFunctions_NumberTypes: + nanfuncs = { + np.nanmin: np.min, + np.nanmax: np.max, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanvar: np.var, + np.nanstd: np.std, + } + nanfunc_ids = [i.__name__ for i in nanfuncs] + + @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids) + @np.errstate(over="ignore") + def test_nanfunc(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat) + out = nanfunc(mat) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)], + ids=["nanquantile", "nanpercentile"], + ) + def test_nanfunc_q(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + if mat.dtype.kind == "c": + assert_raises(TypeError, func, mat, q=1) + assert_raises(TypeError, nanfunc, mat, q=1) + + else: + tgt = func(mat, q=1) + out = nanfunc(mat, q=1) + + assert_almost_equal(out, tgt) + + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanvar, np.var), (np.nanstd, np.std)], + ids=["nanvar", "nanstd"], + ) + def test_nanfunc_ddof(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat, ddof=0.5) + out = nanfunc(mat, ddof=0.5) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + +class SharedNanFunctionsTestsMixin: + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(np.ComplexWarning) + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(np.ComplexWarning) + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, "res %s, tgt %s" % (res, tgt)) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + array = np.eye(3) + mine = array.view(MyNDArray) + for f in self.nanfuncs: + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + + +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nansum, np.nanprod] + stdfuncs = [np.sum, np.prod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array, axis=axis) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): + mat = np.zeros((0, 3)) + tgt = [tgt_value]*3 + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = tgt_value + res = f(mat, axis=None) + assert_equal(res, tgt) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 28 if f is np.nansum else 3360 + ret = f(ar, initial=2) + assert ret.dtype == dtype + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool_) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 26 if f is np.nansum else 2240 + ret = f(ar, where=where, initial=2) + assert ret.dtype == dtype + assert ret == reference + + +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nancumsum, np.nancumprod] + stdfuncs = [np.cumsum, np.cumprod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan) + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + mat = np.zeros((0, 3)) + tgt = tgt_value*np.ones((0, 3)) + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = mat + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = np.zeros((0)) + res = f(mat, axis=None) + assert_equal(res, tgt) + + def test_keepdims(self): + for f, g in zip(self.nanfuncs, self.stdfuncs): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = f(mat, axis=axis, out=None) + res = g(mat, axis=axis, out=None) + assert_(res.ndim == tgt.ndim) + + for f in self.nanfuncs: + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + rs = np.random.RandomState(0) + d[rs.rand(*d.shape) < 0.5] = np.nan + res = f(d, axis=None) + assert_equal(res.shape, (1155,)) + for axis in np.arange(4): + res = f(d, axis=axis) + assert_equal(res.shape, (3, 5, 7, 11)) + + def test_result_values(self): + for axis in (-2, -1, 0, 1, None): + tgt = np.cumprod(_ndat_ones, axis=axis) + res = np.nancumprod(_ndat, axis=axis) + assert_almost_equal(res, tgt) + tgt = np.cumsum(_ndat_zeros,axis=axis) + res = np.nancumsum(_ndat, axis=axis) + assert_almost_equal(res, tgt) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.eye(3) + for axis in (-2, -1, 0, 1): + tgt = rf(mat, axis=axis) + res = nf(mat, axis=axis, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool_, np.int_, np.object_]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + sup.filter(np.ComplexWarning) + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + + # `nanvar` and `nanstd` convert complex inputs to their + # corresponding floating dtype + if func is np.nanmean: + assert out.dtype == array.dtype + else: + assert out.dtype == np.abs(array).dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool_) + where[:, 0] = False + + for f, f_std in zip(self.nanfuncs, self.stdfuncs): + reference = f_std(ar[where][2:]) + dtype_reference = dtype if f is np.nanmean else ar.real.dtype + + ret = f(ar, where=where) + assert ret.dtype == dtype_reference + np.testing.assert_allclose(ret, reference) + + +_TIME_UNITS = ( + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" +) + +# All `inexact` + `timdelta64` type codes +_TYPE_CODES = list(np.typecodes["AllFloat"]) +_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] + + +class TestNanFunctions_Median: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.nanmedian(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:,0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", _TYPE_CODES) + def test_allnans(self, dtype, axis): + mat = np.full((3, 3), np.nan).astype(dtype) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + + output = np.nanmedian(mat, axis=axis) + assert output.dtype == mat.dtype + assert np.isnan(output).all() + + if axis is None: + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 3) + + # Check scalar + scalar = np.array(np.nan).astype(dtype)[()] + output_scalar = np.nanmedian(scalar) + assert output_scalar.dtype == scalar.dtype + assert np.isnan(output_scalar) + + if axis is None: + assert_(len(sup.log) == 2) + else: + assert_(len(sup.log) == 4) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.nanmedian, d, axis=-5) + assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5)) + assert_raises(np.AxisError, np.nanmedian, d, axis=4) + assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + def test_float_special(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) + assert_equal(np.nanmedian(a), inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, inf], + [np.nan, np.nan, inf]]) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) + assert_equal(np.nanmedian(a, axis=1), inf) + + # no mask path + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.nanmedian(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + if inf > 0: + assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.nanmedian(a), 4.5) + else: + assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.nanmedian(a), -2.5) + assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=1), inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [inf] * j) + + a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) + assert_equal(np.nanmedian(a), -inf) + assert_equal(np.nanmedian(a, axis=1), -inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [-inf] * j) + + +class TestNanFunctions_Percentile: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.percentile(mat, 42, axis=1) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None) + res = np.nanpercentile(nan_mat, 42, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_complex(self): + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + + def test_result_values(self): + tgt = [np.percentile(d, 28) for d in _rdat] + res = np.nanpercentile(_ndat, 28, axis=1) + assert_almost_equal(res, tgt) + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat]) + res = np.nanpercentile(_ndat, (28, 98), axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanpercentile(array, 60, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_equal(np.nanpercentile(0., 100), 0.) + a = np.arange(6) + r = np.nanpercentile(a, 50, axis=0) + assert_equal(r, 2.5) + assert_(np.isscalar(r)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4) + assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] *= 2 + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + + megamat = np.ones((3, 4, 5, 6)) + assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)) + + +class TestNanFunctions_Quantile: + # most of this is already tested by TestPercentile + + def test_regression(self): + ar = np.arange(24).reshape(2, 3, 4).astype(float) + ar[0][1] = np.nan + + assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50)) + assert_equal(np.nanquantile(ar, q=0.5, axis=0), + np.nanpercentile(ar, q=50, axis=0)) + assert_equal(np.nanquantile(ar, q=0.5, axis=1), + np.nanpercentile(ar, q=50, axis=1)) + assert_equal(np.nanquantile(ar, q=[0.5], axis=1), + np.nanpercentile(ar, q=[50], axis=1)) + assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1), + np.nanpercentile(ar, q=[25, 50, 75], axis=1)) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.nanquantile(x, 0), 0.) + assert_equal(np.nanquantile(x, 1), 3.5) + assert_equal(np.nanquantile(x, 0.5), 1.75) + + def test_complex(self): + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip(f"`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanquantile(array, 1, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + +@pytest.mark.parametrize("arr, expected", [ + # array of floats with some nans + (np.array([np.nan, 5.0, np.nan, np.inf]), + np.array([False, True, False, True])), + # int64 array that can't possibly have nans + (np.array([1, 5, 7, 9], dtype=np.int64), + True), + # bool array that can't possibly have nans + (np.array([False, True, False, True]), + True), + # 2-D complex array with nans + (np.array([[np.nan, 5.0], + [np.nan, np.inf]], dtype=np.complex64), + np.array([[False, True], + [False, True]])), + ]) +def test__nan_mask(arr, expected): + for out in [None, np.empty(arr.shape, dtype=np.bool_)]: + actual = _nan_mask(arr, out=out) + assert_equal(actual, expected) + # the above won't distinguish between True proper + # and an array of True values; we want True proper + # for types that can't possibly contain NaN + if type(expected) is not np.ndarray: + assert actual is True + + +def test__replace_nan(): + """ Test that _replace_nan returns the original array if there are no + NaNs, not a copy. + """ + for dtype in [np.bool_, np.int32, np.int64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 0) + assert mask is None + # do not make a copy if there are no nans + assert result is arr + + for dtype in [np.float32, np.float64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 2) + assert (mask == False).all() + # mask is not None, so we make a copy + assert result is not arr + assert_equal(result, arr) + + arr_nan = np.array([0, 1, np.nan], dtype=dtype) + result_nan, mask_nan = _replace_nan(arr_nan, 2) + assert_equal(mask_nan, np.array([False, False, True])) + assert result_nan is not arr_nan + assert_equal(result_nan, np.array([0, 1, 2])) + assert np.isnan(arr_nan[-1]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.py new file mode 100644 index 00000000..5b07f41c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.py @@ -0,0 +1,376 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_equal, assert_raises +import pytest +from itertools import chain + +def test_packbits(): + # Copied from the docstring. + a = [[[1, 0, 1], [0, 1, 0]], + [[1, 1, 0], [0, 0, 1]]] + for dt in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dt) + b = np.packbits(arr, axis=-1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_empty(): + shapes = [ + (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), + (0, 0, 20), (0, 0, 0), + ] + for dt in '?bBhHiIlLqQ': + for shape in shapes: + a = np.empty(shape, dtype=dt) + b = np.packbits(a) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, (0,)) + + +def test_packbits_empty_with_axis(): + # Original shapes and lists of packed shapes for different axes. + shapes = [ + ((0,), [(0,)]), + ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), + ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), + ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), + ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), + ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), + ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), + ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), + ] + for dt in '?bBhHiIlLqQ': + for in_shape, out_shapes in shapes: + for ax, out_shape in enumerate(out_shapes): + a = np.empty(in_shape, dtype=dt) + b = np.packbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + +@pytest.mark.parametrize('bitorder', ('little', 'big')) +def test_packbits_large(bitorder): + # test data large enough for 16 byte vectorization + a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, + 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, + 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) + a = a.repeat(3) + for dtype in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + b = np.packbits(arr, axis=None, bitorder=bitorder) + assert_equal(b.dtype, np.uint8) + r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, + 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, + 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, + 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, + 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, + 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, + 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, + 129, 248, 227, 129, 199, 31, 128] + if bitorder == 'big': + assert_array_equal(b, r) + # equal for size being multiple of 8 + assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a) + + # check last byte of different remainders (16 byte vectorization) + b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] + assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, + 198, 196, 192]) + + + arr = arr.reshape(36, 25) + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, + 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, + 107, 75, 74, 88], + [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, + 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, + 41, 104, 122, 90, 18], + [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, + 150, 150, 146, 210, 210, 246, 255, 255, 223, + 151, 21, 17, 17, 131, 163], + [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, + 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, + 202, 234, 170, 168], + [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, + 240, 208, 144, 128, 160, 224, 240, 208, 144, + 144, 176, 240, 224, 192, 128]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 127, 192, 0], + [ 7, 252, 15, 128], + [240, 0, 28, 0], + [255, 128, 0, 128], + [192, 31, 255, 128], + [142, 63, 0, 0], + [255, 240, 7, 0], + [ 7, 224, 14, 0], + [126, 0, 224, 0], + [255, 255, 199, 0], + [ 56, 28, 126, 0], + [113, 248, 227, 128], + [227, 142, 63, 0], + [ 0, 28, 112, 0], + [ 15, 248, 3, 128], + [ 28, 126, 56, 0], + [ 56, 255, 241, 128], + [240, 7, 224, 0], + [227, 129, 192, 128], + [255, 255, 254, 0], + [126, 0, 224, 0], + [ 3, 241, 248, 0], + [ 0, 255, 241, 128], + [128, 0, 255, 128], + [224, 1, 255, 128], + [248, 252, 126, 0], + [ 0, 7, 3, 128], + [224, 113, 248, 0], + [ 0, 252, 127, 128], + [142, 63, 224, 0], + [224, 14, 63, 0], + [ 7, 3, 128, 0], + [113, 255, 255, 128], + [ 28, 113, 199, 0], + [ 7, 227, 142, 0], + [ 14, 56, 252, 0]]) + + arr = arr.T.copy() + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, + 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, + 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, + 7, 113, 28, 7, 14], + [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, + 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, + 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, + 3, 255, 113, 227, 56], + [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, + 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, + 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, + 128, 255, 199, 142, 252], + [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, + 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, + 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 72, 113, 214, 0], + [186, 216, 120, 210, 128], + [178, 248, 248, 210, 128], + [178, 241, 216, 64, 192], + [150, 227, 152, 68, 80], + [215, 195, 24, 5, 112], + [ 87, 202, 60, 5, 48], + [ 83, 90, 52, 1, 160], + [ 83, 90, 182, 72, 160], + [195, 83, 150, 88, 224], + [199, 83, 150, 92, 240], + [206, 119, 150, 92, 208], + [204, 127, 146, 78, 144], + [204, 109, 210, 110, 128], + [140, 73, 210, 39, 160], + [140, 64, 246, 181, 224], + [136, 208, 255, 149, 240], + [136, 244, 255, 220, 208], + [ 8, 189, 223, 222, 144], + [ 40, 45, 151, 218, 144], + [105, 41, 21, 218, 176], + [107, 104, 17, 202, 240], + [ 75, 122, 17, 234, 224], + [ 74, 90, 131, 170, 192], + [ 88, 18, 163, 168, 128]]) + + + # result is the same if input is multiplied with a nonzero value + for dtype in 'bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + rnd = np.random.randint(low=np.iinfo(dtype).min, + high=np.iinfo(dtype).max, size=arr.size, + dtype=dtype) + rnd[rnd == 0] = 1 + arr *= rnd.astype(dtype) + b = np.packbits(arr, axis=-1) + assert_array_equal(np.unpackbits(b)[:-4], a) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_very_large(): + # test some with a larger arrays gh-8637 + # code is covered earlier but larger array makes crash on bug more likely + for s in range(950, 1050): + for dt in '?bBhHiIlLqQ': + x = np.ones((200, s), dtype=bool) + np.packbits(x, axis=1) + + +def test_unpackbits(): + # Copied from the docstring. + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]])) + +def test_pack_unpack_order(): + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + b_little = np.unpackbits(a, axis=1, bitorder='little') + b_big = np.unpackbits(a, axis=1, bitorder='big') + assert_array_equal(b, b_big) + assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) + assert_array_equal(b[:,::-1], b_little) + assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) + assert_raises(ValueError, np.unpackbits, a, bitorder='r') + assert_raises(TypeError, np.unpackbits, a, bitorder=10) + + + +def test_unpackbits_empty(): + a = np.empty((0,), dtype=np.uint8) + b = np.unpackbits(a) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.empty((0,))) + + +def test_unpackbits_empty_with_axis(): + # Lists of packed shapes for different axes and unpacked shapes. + shapes = [ + ([(0,)], (0,)), + ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), + ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), + ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), + ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), + ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), + ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), + ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), + ] + for in_shapes, out_shape in shapes: + for ax, in_shape in enumerate(in_shapes): + a = np.empty(in_shape, dtype=np.uint8) + b = np.unpackbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_unpackbits_large(): + # test all possible numbers via comparison to already tested packbits + d = np.arange(277, dtype=np.uint8) + assert_array_equal(np.packbits(np.unpackbits(d)), d) + assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) + d = np.tile(d, (3, 1)) + assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) + d = d.T.copy() + assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) + + +class TestCount(): + x = np.array([ + [1, 0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 1], + [1, 1, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 0, 1, 0, 1, 0], + ], dtype=np.uint8) + padded1 = np.zeros(57, dtype=np.uint8) + padded1[:49] = x.ravel() + padded1b = np.zeros(57, dtype=np.uint8) + padded1b[:49] = x[::-1].copy().ravel() + padded2 = np.zeros((9, 9), dtype=np.uint8) + padded2[:7, :7] = x + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1))) + def test_roundtrip(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + # test complete invertibility of packbits and unpackbits with count + packed = np.packbits(self.x, bitorder=bitorder) + unpacked = np.unpackbits(packed, count=count, bitorder=bitorder) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + ]) + def test_count(self, kwargs): + packed = np.packbits(self.x) + unpacked = np.unpackbits(packed, **kwargs) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:-1]) + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + # delta==-1 when count<0 because one extra zero of padding + @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1))) + def test_roundtrip_axis(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + packed0 = np.packbits(self.x, axis=0, bitorder=bitorder) + unpacked0 = np.unpackbits(packed0, axis=0, count=count, + bitorder=bitorder) + assert_equal(unpacked0.dtype, np.uint8) + assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1, bitorder=bitorder) + unpacked1 = np.unpackbits(packed1, axis=1, count=count, + bitorder=bitorder) + assert_equal(unpacked1.dtype, np.uint8) + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + {'bitorder' : 'little'}, + {'bitorder': 'little', 'count': None}, + {'bitorder' : 'big'}, + {'bitorder': 'big', 'count': None}, + ]) + def test_axis_count(self, kwargs): + packed0 = np.packbits(self.x, axis=0) + unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) + assert_equal(unpacked0.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]]) + else: + assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1) + unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) + assert_equal(unpacked1.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1]) + else: + assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1]) + + def test_bad_count(self): + packed0 = np.packbits(self.x, axis=0) + assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) + packed1 = np.packbits(self.x, axis=1) + assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) + packed = np.packbits(self.x) + assert_raises(ValueError, np.unpackbits, packed, count=-57) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py new file mode 100644 index 00000000..3734344d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,303 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_raises, assert_allclose + ) + +import pytest + +# `poly1d` has some support for `bool_` and `timedelta64`, +# but it is limited and they are therefore excluded here +TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O" + + +class TestPolynomial: + def test_poly1d_str_and_repr(self): + p = np.poly1d([1., 2, 3]) + assert_equal(repr(p), 'poly1d([1., 2., 3.])') + assert_equal(str(p), + ' 2\n' + '1 x + 2 x + 3') + + q = np.poly1d([3., 2, 1]) + assert_equal(repr(q), 'poly1d([3., 2., 1.])') + assert_equal(str(q), + ' 2\n' + '3 x + 2 x + 1') + + r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) + assert_equal(str(r), + ' 3 2\n' + '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') + + assert_equal(str(np.poly1d([-3, -2, -1])), + ' 2\n' + '-3 x - 2 x - 1') + + def test_poly1d_resolution(self): + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p(0), 3.0) + assert_equal(p(5), 38.0) + assert_equal(q(0), 1.0) + assert_equal(q(5), 86.0) + + def test_poly1d_math(self): + # here we use some simple coeffs to make calculations easier + p = np.poly1d([1., 2, 4]) + q = np.poly1d([4., 2, 1]) + assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.])) + + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) + assert_equal(p + q, np.poly1d([4., 4., 4.])) + assert_equal(p - q, np.poly1d([-2., 0., 2.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) + assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) + assert_equal(p.deriv(), np.poly1d([2., 2.])) + assert_equal(p.deriv(2), np.poly1d([2.])) + assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), + (np.poly1d([1., -1.]), np.poly1d([0.]))) + + @pytest.mark.parametrize("type_code", TYPE_CODES) + def test_poly1d_misc(self, type_code: str) -> None: + dtype = np.dtype(type_code) + ar = np.array([1, 2, 3], dtype=dtype) + p = np.poly1d(ar) + + # `__eq__` + assert_equal(np.asarray(p), ar) + assert_equal(np.asarray(p).dtype, dtype) + assert_equal(len(p), 2) + + # `__getitem__` + comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0} + for index, ref in comparison_dct.items(): + scalar = p[index] + assert_equal(scalar, ref) + if dtype == np.object_: + assert isinstance(scalar, int) + else: + assert_equal(scalar.dtype, dtype) + + def test_poly1d_variable_arg(self): + q = np.poly1d([1., 2, 3], variable='y') + assert_equal(str(q), + ' 2\n' + '1 y + 2 y + 3') + q = np.poly1d([1., 2, 3], variable='lambda') + assert_equal(str(q), + ' 2\n' + '1 lambda + 2 lambda + 3') + + def test_poly(self): + assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), + [1, -3, -2, 6]) + + # From matlab docs + A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] + assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) + + # Should produce real output for perfect conjugates + assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) + assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, + 1-2j, 1.+3.5j, 1-3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) + assert_(np.isrealobj(np.poly([1j, -1j]))) + assert_(np.isrealobj(np.poly([1, -1]))) + + assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) + + np.random.seed(42) + a = np.random.randn(100) + 1j*np.random.randn(100) + assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2/7.0 + + # Check exception when too few points for variance estimate. Note that + # the estimate requires the number of data points to exceed + # degree + 1 + assert_raises(ValueError, np.polyfit, + [1], [1], deg=0, cov=True) + + # check 1D case + m, cov = np.polyfit(x, y+err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[ 1.4694, -2.9388, 0.8163], + [-2.9388, 6.3673, -2.1224], + [ 0.8163, -2.1224, 1.161 ]] + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[ 4.3964, -5.0052, 0.4878], + [-5.0052, 6.8067, -0.9089], + [ 0.4878, -0.9089, 0.3337]] + assert_almost_equal(val, cov2, decimal=4) + + m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled") + assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) + val = [[ 0.1473, -0.1677, 0.0163], + [-0.1677, 0.228 , -0.0304], + [ 0.0163, -0.0304, 0.0112]] + assert_almost_equal(val, cov3, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + # check order 1 (deg=0) case, were the analytic results are simple + np.random.seed(123) + y = np.random.normal(size=(4, 10000)) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) + # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # Without scaling, since reduced chi2 is 1, the result should be the same. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), + deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.5) + # If we estimate our errors wrong, no change with scaling: + w = np.full(y.shape[0], 1./0.5) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # But if we do not scale, our estimate for the error in the mean will + # differ. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.25) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_( + (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + def test_zero_poly_dtype(self): + """ + Regression test for gh-16354. + """ + z = np.array([0, 0, 0]) + p = np.poly1d(z.astype(np.int64)) + assert_equal(p.coeffs.dtype, np.int64) + + p = np.poly1d(z.astype(np.float32)) + assert_equal(p.coeffs.dtype, np.float32) + + p = np.poly1d(z.astype(np.complex64)) + assert_equal(p.coeffs.dtype, np.complex64) + + def test_poly_eq(self): + p = np.poly1d([1, 2, 3]) + p2 = np.poly1d([1, 2, 4]) + assert_equal(p == None, False) + assert_equal(p != None, True) + assert_equal(p == p, True) + assert_equal(p == p2, False) + assert_equal(p != p2, True) + + def test_polydiv(self): + b = np.poly1d([2, 6, 6, 1]) + a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) + q, r = np.polydiv(b, a) + assert_equal(q.coeffs.dtype, np.complex128) + assert_equal(r.coeffs.dtype, np.complex128) + assert_equal(q*a + r, b) + + c = [1, 2, 3] + d = np.poly1d([1, 2, 3]) + s, t = np.polydiv(c, d) + assert isinstance(s, np.poly1d) + assert isinstance(t, np.poly1d) + u, v = np.polydiv(d, c) + assert isinstance(u, np.poly1d) + assert isinstance(v, np.poly1d) + + def test_poly_coeffs_mutable(self): + """ Coefficients should be modifiable """ + p = np.poly1d([1, 2, 3]) + + p.coeffs += 1 + assert_equal(p.coeffs, [2, 3, 4]) + + p.coeffs[2] += 10 + assert_equal(p.coeffs, [2, 3, 14]) + + # this never used to be allowed - let's not add features to deprecated + # APIs + assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 00000000..98860dfd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,1043 @@ +import pytest + +import numpy as np +import numpy.ma as ma +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises +from numpy.lib.recfunctions import ( + drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, + find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, + repack_fields, unstructured_to_structured, structured_to_unstructured, + apply_along_fields, require_fields, assign_fields_by_name) +get_fieldspec = np.lib.recfunctions._get_fieldspec +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions._zip_descr +zip_dtype = np.lib.recfunctions._zip_dtype + + +class TestRecFunctions: + # Misc tests + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_zip_descr(self): + # Test zip_descr + (w, x, y, z) = self.data + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # dropping all fields results in an array with no fields + test = drop_fields(a, ['a', 'b']) + control = np.array([(), ()], dtype=[]) + assert_equal(test, control) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ()))) + + ndtype = np.dtype([]) + test = get_names(ndtype) + assert_equal(test, ()) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b')) + + ndtype = np.dtype([]) + test = get_names_flat(ndtype) + assert_equal(test, ()) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + # 0 fields + ndtype = np.dtype([]) + test = get_fieldstructure(ndtype) + assert_equal(test, {}) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) + + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + def test_structured_to_unstructured(self, tmp_path): + a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + out = structured_to_unstructured(a) + assert_equal(out, np.zeros((4,5), dtype='f8')) + + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ])) + out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) + assert_equal(out, np.array([ 1. , 4. , 7. , 10. ])) + + c = np.arange(20).reshape((4,5)) + out = unstructured_to_structured(c, a.dtype) + want = np.array([( 0, ( 1., 2), [ 3., 4.]), + ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), + (15, (16., 17), [18., 19.])], + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) + assert_equal(out, want) + + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + assert_equal(apply_along_fields(np.mean, d), + np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ])) + assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), + np.array([ 3. , 5.5, 9. , 11. ])) + + # check that for uniform field dtypes we get a view, not a copy: + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing the order of attributes works + dd_attrib_rev = structured_to_unstructured(d[['z', 'x']]) + assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]]) + assert_(np.shares_memory(dd_attrib_rev, d)) + + # including uniform fields with subarrays unpacked + d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), + (8, [9, 10], [[11, 12], [13, 14]])], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2)))]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing with sub-arrays works as expected + d_rev = d[::-1] + dd_rev = structured_to_unstructured(d_rev) + assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14], + [1, 2, 3, 4, 5, 6, 7]]) + + # check that sub-arrays keep the order of their values + d_attrib_rev = d[['x2', 'x1', 'x0']] + dd_attrib_rev = structured_to_unstructured(d_attrib_rev) + assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1], + [11, 12, 13, 14, 9, 10, 8]]) + + # with ignored field at the end + d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32), + (8, [9, 10], [[11, 12], [13, 14]], 64)], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2))), ('ignored', 'u1')]) + dd = structured_to_unstructured(d[['x0', 'x1', 'x2']]) + assert_(np.shares_memory(dd, d)) + assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7], + [8, 9, 10, 11, 12, 13, 14]]) + + # test that nested fields with identical names don't break anything + point = np.dtype([('x', int), ('y', int)]) + triangle = np.dtype([('a', point), ('b', point), ('c', point)]) + arr = np.zeros(10, triangle) + res = structured_to_unstructured(arr, dtype=int) + assert_equal(res, np.zeros((10, 6), dtype=int)) + + + # test nested combinations of subarrays and structured arrays, gh-13333 + def subarray(dt, shape): + return np.dtype((dt, shape)) + + def structured(*dts): + return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)]) + + def inspect(dt, dtype=None): + arr = np.zeros((), dt) + ret = structured_to_unstructured(arr, dtype=dtype) + backarr = unstructured_to_structured(ret, dt) + return ret.shape, ret.dtype, backarr.dtype + + dt = structured(subarray(structured(np.int32, np.int32), 3)) + assert_equal(inspect(dt), ((6,), np.int32, dt)) + + dt = structured(subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((4,), np.int32, dt)) + + dt = structured(np.int32) + assert_equal(inspect(dt), ((1,), np.int32, dt)) + + dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((5,), np.int32, dt)) + + dt = structured() + assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) + + # these currently don't work, but we may make it work in the future + assert_raises(NotImplementedError, structured_to_unstructured, + np.zeros(3, dt), dtype=np.int32) + assert_raises(NotImplementedError, unstructured_to_structured, + np.zeros((3,0), dtype=np.int32)) + + # test supported ndarray subclasses + d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')]) + dd_expected = structured_to_unstructured(d_plain, copy=True) + + # recarray + d = d_plain.view(np.recarray) + + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.recarray) + assert_(type(ddd) is np.recarray) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + # memmap + d = np.memmap(tmp_path / 'memmap', + mode='w+', + dtype=d_plain.dtype, + shape=d_plain.shape) + d[:] = d_plain + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.memmap) + assert_(type(ddd) is np.memmap) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + def test_unstructured_to_structured(self): + # test if dtype is the args of np.dtype + a = np.zeros((20, 2)) + test_dtype_args = [('x', float), ('y', float)] + test_dtype = np.dtype(test_dtype_args) + field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now + field2 = unstructured_to_structured(a, dtype=test_dtype) # before + assert_equal(field1, field2) + + def test_field_assignment_by_name(self): + a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + newdt = [('b', 'f4'), ('c', 'u1')] + + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + + b = np.array([(1,2), (3,4)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype)) + + # test nested fields + a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) + newdt = [('a', [('c', 'u1')])] + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + b = np.array([((2,),), ((3,),)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype)) + + # test unstructured code path for 0d arrays + a, b = np.array(3), np.array(0) + assign_fields_by_name(b, a) + assert_equal(b[()], 3) + + +class TestRecursiveFillFields: + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays: + # Test merge_arrays + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test merge_arrays on a single array. + (_, x, _, z) = self.data + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self.data[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + (_, x, y, _) = self.data + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + (_, x, _, z) = self.data + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + (w, x, _, _) = self.data + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + controldtype = [('f0', int), + ('f1', [('a', int), + ('b', [('ba', float), ('bb', int), ('bc', [])])])] + control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + (_, x, _, _) = self.data + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self.data[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes warnings about unused variables + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + (_, x, y, z) = self.data + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields: + # Test append_fields + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_append_single(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self.data[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self.data[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays: + # Test stack_arrays + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test stack_arrays on single arrays + (_, x, _, _) = self.data + test = stack_arrays((x,)) + assert_equal(test, x) + assert_(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + assert_(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + (_, x, y, _) = self.data + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + (_, x, _, z) = self.data + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + (_, x, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + (_, _, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + with assert_raises(TypeError): + stack_arrays((a, b), autoconvert=False) + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + + +class TestJoinBy: + def setup_method(self): + self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_inner_join(self): + # Basic test of join_by + a, b = self.a, self.b + + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self.a, self.b + + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + + # Hack to avoid pyflakes unused variable warnings + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1,2,3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + + def test_outer_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_different_field_order(self): + # gh-8940 + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + # this should not give a FutureWarning: + j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) + assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) + + def test_duplicate_keys(self): + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', ' 2**32 + + +def _add_keepdims(func): + """ hack in keepdims behavior into a function taking an axis """ + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now a scalar, so we can insert this anywhere + return np.expand_dims(res, axis=axis) + return wrapped + + +class TestTakeAlongAxis: + def test_argequivalent(self): + """ Test it translates from arg to """ + from numpy.random import rand + a = rand(3, 4, 5) + + funcs = [ + (np.sort, np.argsort, dict()), + (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()), + (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()), + (np.partition, np.argpartition, dict(kth=2)), + ] + + for func, argfunc, kwargs in funcs: + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) + + def test_invalid(self): + """ Test it errors when indices has too few dimensions """ + a = np.ones((10, 10)) + ai = np.ones((10, 2), dtype=np.intp) + + # sanity check + take_along_axis(a, ai, axis=1) + + # not enough indices + assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) + # bool arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) + # float arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) + # invalid axis + assert_raises(np.AxisError, take_along_axis, a, ai, axis=10) + + def test_empty(self): + """ Test everything is ok with empty results, even with inserted dims """ + a = np.ones((3, 4, 5)) + ai = np.ones((3, 0, 5), dtype=np.intp) + + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, ai.shape) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, (3, 2, 5)) + + +class TestPutAlongAxis: + def test_replace_max(self): + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + + for axis in list(range(a_base.ndim)) + [None]: + # we mutate this in the loop + a = a_base.copy() + + # replace the max with a small value + i_max = _add_keepdims(np.argmax)(a, axis=axis) + put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(np.argmin)(a, axis=axis) + + assert_equal(i_min, i_max) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 + put_along_axis(a, ai, 20, axis=1) + assert_equal(take_along_axis(a, ai, axis=1), 20) + + +class TestApplyAlongAxis: + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_simple101(self): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + def test_preserve_subclass(self): + def double(row): + return row * 2 + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) + + result = apply_along_axis(double, 0, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + result = apply_along_axis(double, 1, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + def test_subclass(self): + class MinimalSubclass(np.ndarray): + data = 1 + + def minimal_function(array): + return array.data + + a = np.zeros((6, 3)).view(MinimalSubclass) + + assert_array_equal( + apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) + ) + + def test_scalar_array(self, cls=np.ndarray): + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(np.sum, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + def test_0d_array(self, cls=np.ndarray): + def sum_to_0d(x): + """ Sum x, returning a 0d array of the same class """ + assert_equal(x.ndim, 1) + return np.squeeze(np.sum(x, keepdims=True)) + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(sum_to_0d, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + res = apply_along_axis(sum_to_0d, 1, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) + + def test_axis_insertion(self, cls=np.ndarray): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + return (x[::-1] * x[1:,None]).view(cls) + + a2d = np.arange(6*3).reshape((6, 3)) + + # 2d insertion along first axis + actual = apply_along_axis(f1to2, 0, a2d) + expected = np.stack([ + f1to2(a2d[:,i]) for i in range(a2d.shape[1]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 2d insertion along last axis + actual = apply_along_axis(f1to2, 1, a2d) + expected = np.stack([ + f1to2(a2d[i,:]) for i in range(a2d.shape[0]) + ], axis=0).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 3d insertion along middle axis + a3d = np.arange(6*5*3).reshape((6, 5, 3)) + + actual = apply_along_axis(f1to2, 1, a3d) + expected = np.stack([ + np.stack([ + f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) + ], axis=0) + for j in range(a3d.shape[2]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + def test_subclass_preservation(self): + class MinimalSubclass(np.ndarray): + pass + self.test_scalar_array(MinimalSubclass) + self.test_0d_array(MinimalSubclass) + self.test_axis_insertion(MinimalSubclass) + + def test_axis_insertion_ma(self): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + res = x[::-1] * x[1:,None] + return np.ma.masked_where(res%5==0, res) + a = np.arange(6*3).reshape((6, 3)) + res = apply_along_axis(f1to2, 0, a) + assert_(isinstance(res, np.ma.masked_array)) + assert_equal(res.ndim, 3) + assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) + assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) + assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) + + def test_tuple_func1d(self): + def sample_1d(x): + return x[1], x[0] + res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) + assert_array_equal(res, np.array([[2, 1], [4, 3]])) + + def test_empty(self): + # can't apply_along_axis when there's no chance to call the function + def never_call(x): + assert_(False) # should never be reached + + a = np.empty((0, 0)) + assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) + assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) + + # but it's sometimes ok with some non-zero dimensions + def empty_to_1(x): + assert_(len(x) == 0) + return 1 + + a = np.empty((10, 0)) + actual = np.apply_along_axis(empty_to_1, 1, a) + assert_equal(actual, np.ones(10)) + assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) + + def test_with_iterable_object(self): + # from issue 5248 + d = np.array([ + [{1, 11}, {2, 22}, {3, 33}], + [{4, 44}, {5, 55}, {6, 66}] + ]) + actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) + expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) + + assert_equal(actual, expected) + + # issue 8642 - assert_equal doesn't detect this! + for i in np.ndindex(actual.shape): + assert_equal(type(actual[i]), type(expected[i])) + + +class TestApplyOverAxes: + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestExpandDims: + def test_functionality(self): + s = (2, 3, 4, 5) + a = np.empty(s) + for axis in range(-5, 4): + b = expand_dims(a, axis) + assert_(b.shape[axis] == 1) + assert_(np.squeeze(b).shape == s) + + def test_axis_tuple(self): + a = np.empty((3, 3, 3)) + assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) + assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) + assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) + assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) + + def test_axis_out_of_range(self): + s = (2, 3, 4, 5) + a = np.empty(s) + assert_raises(np.AxisError, expand_dims, a, -6) + assert_raises(np.AxisError, expand_dims, a, 5) + + a = np.empty((3, 3, 3)) + assert_raises(np.AxisError, expand_dims, a, (0, -6)) + assert_raises(np.AxisError, expand_dims, a, (0, 5)) + + def test_repeated_axis(self): + a = np.empty((3, 3, 3)) + assert_raises(ValueError, expand_dims, a, axis=(1, 1)) + + def test_subclasses(self): + a = np.arange(10).reshape((2, 5)) + a = np.ma.array(a, mask=a%3 == 0) + + expanded = np.expand_dims(a, axis=1) + assert_(isinstance(expanded, np.ma.MaskedArray)) + assert_equal(expanded.shape, (2, 1, 5)) + assert_equal(expanded.mask.shape, (2, 1, 5)) + + +class TestArraySplit: + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + # Same thing for manual splits: + res = array_split(a, [0, 1], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit: + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestColumnStack: + def test_non_iterable(self): + assert_raises(TypeError, column_stack, 1) + + def test_1D_arrays(self): + # example from docstring + a = np.array((1, 2, 3)) + b = np.array((2, 3, 4)) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_2D_arrays(self): + # same as hstack 2D docstring example + a = np.array([[1], [2], [3]]) + b = np.array([[2], [3], [4]]) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + column_stack((np.arange(3) for _ in range(2))) + + +class TestDstack: + def test_non_iterable(self): + assert_raises(TypeError, dstack, 1) + + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dstack((np.arange(3) for _ in range(2))) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, hsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, vsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, vsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit: + # Only testing for integer splits. + def test_non_iterable(self): + assert_raises(ValueError, dsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, dsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + assert_raises(ValueError, dsplit, a, 2) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze: + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron: + def test_basic(self): + # Using 0-dimensional ndarray + a = np.array(1) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[1, 2], [3, 4]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array(1) + assert_array_equal(np.kron(a, b), k) + + # Using 1-dimensional ndarray + a = np.array([3]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[3, 6], [9, 12]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([3]) + assert_array_equal(np.kron(a, b), k) + + # Using 3-dimensional ndarray + a = np.array([[[1]], [[2]]]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([[[1]], [[2]]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + + def test_return_type(self): + class myarray(np.ndarray): + __array_priority__ = 1.0 + + a = np.ones([2, 2]) + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), myarray) + assert_equal(type(kron(ma, a)), myarray) + + @pytest.mark.parametrize( + "array_class", [np.asarray, np.mat] + ) + def test_kron_smoke(self, array_class): + a = array_class(np.ones([3, 3])) + b = array_class(np.ones([3, 3])) + k = array_class(np.ones([9, 9])) + + assert_array_equal(np.kron(a, b), k) + + def test_kron_ma(self): + x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) + k = np.ma.array(np.diag([1, 4, 4, 16]), + mask=~np.array(np.identity(4), dtype=bool)) + + assert_array_equal(k, np.kron(x, x)) + + @pytest.mark.parametrize( + "shape_a,shape_b", [ + ((1, 1), (1, 1)), + ((1, 2, 3), (4, 5, 6)), + ((2, 2), (2, 2, 2)), + ((1, 0), (1, 1)), + ((2, 0, 2), (2, 2)), + ((2, 0, 0, 2), (2, 0, 2)), + ]) + def test_kron_shape(self, shape_a, shape_b): + a = np.ones(shape_a) + b = np.ones(shape_b) + normalised_shape_a = (1,) * max(0, len(shape_b)-len(shape_a)) + shape_a + normalised_shape_b = (1,) * max(0, len(shape_a)-len(shape_b)) + shape_b + expected_shape = np.multiply(normalised_shape_a, normalised_shape_b) + + k = np.kron(a, b) + assert np.array_equal( + k.shape, expected_shape), "Unexpected shape from kron" + + +class TestTile: + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_tile_one_repetition_on_array_gh4679(self): + a = np.arange(5) + b = tile(a, 1) + b += 2 + assert_equal(a, np.arange(5)) + + def test_empty(self): + a = np.array([[[]]]) + b = np.array([[], []]) + c = tile(b, 2).shape + d = tile(a, (3, 2, 5)).shape + assert_equal(c, (2, 0)) + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory: + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + assert_(np.may_share_memory(d, d)) + assert_(np.may_share_memory(d, d[::-1])) + assert_(np.may_share_memory(d, d[::2])) + assert_(np.may_share_memory(d, d[1:, ::-1])) + + assert_(not np.may_share_memory(d[::-1], d2)) + assert_(not np.may_share_memory(d[::2], d2)) + assert_(not np.may_share_memory(d[1:, ::-1], d2)) + assert_(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + """Compare lists of arrays.""" + if len(res) != len(desired): + raise ValueError("Iterables have different lengths") + # See also PEP 618 for Python 3.10 + for x, y in zip(res, desired): + assert_array_equal(x, y) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 00000000..efec5d24 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,645 @@ +import numpy as np +from numpy.core._rational_tests import rational +from numpy.testing import ( + assert_equal, assert_array_equal, assert_raises, assert_, + assert_raises_regex, assert_warns, + ) +from numpy.lib.stride_tricks import ( + as_strided, broadcast_arrays, _broadcast_shape, broadcast_to, + broadcast_shapes, sliding_window_view, + ) +import pytest + + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, 'got an unexpected keyword'): + broadcast_arrays(x, y, dtype='float64') + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + "Shapes: %s %s" % (input_shapes[0], input_shapes[1])) + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + + +def test_broadcast_to_succeeds(): + data = [ + [np.array(0), (0,), np.array(0)], + [np.array(0), (1,), np.zeros(1)], + [np.array(0), (3,), np.zeros(3)], + [np.ones(1), (1,), np.ones(1)], + [np.ones(1), (2,), np.ones(2)], + [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], + [np.arange(3), (3,), np.arange(3)], + [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], + [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], + # test if shape is not a tuple + [np.ones(0), 0, np.ones(0)], + [np.ones(1), 1, np.ones(1)], + [np.ones(1), 2, np.ones(2)], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs (see test_same_as_ufunc above) + [np.ones(1), (0,), np.ones(0)], + [np.ones((1, 2)), (0, 2), np.ones((0, 2))], + [np.ones((2, 1)), (2, 0), np.ones((2, 0))], + ] + for input_array, shape, expected in data: + actual = broadcast_to(input_array, shape) + assert_array_equal(expected, actual) + + +def test_broadcast_to_raises(): + data = [ + [(0,), ()], + [(1,), ()], + [(3,), ()], + [(3,), (1,)], + [(3,), (2,)], + [(3,), (4,)], + [(1, 2), (2, 1)], + [(1, 1), (1,)], + [(1,), -1], + [(1,), (-1,)], + [(1, 2), (-1, 2)], + ] + for orig_shape, target_shape in data: + arr = np.zeros(orig_shape) + assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) + + +def test_broadcast_shape(): + # tests internal _broadcast_shape + # _broadcast_shape is already exercised indirectly by broadcast_arrays + # _broadcast_shape is also exercised by the public broadcast_shapes function + assert_equal(_broadcast_shape(), ()) + assert_equal(_broadcast_shape([1, 2]), (2,)) + assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) + assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) + bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 + assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) + + +def test_broadcast_shapes_succeeds(): + # tests public broadcast_shapes + data = [ + [[], ()], + [[()], ()], + [[(7,)], (7,)], + [[(1, 2), (2,)], (1, 2)], + [[(1, 1)], (1, 1)], + [[(1, 1), (3, 4)], (3, 4)], + [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)], + [[(5, 6, 1)], (5, 6, 1)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + [[(1,), (3,)], (3,)], + [[2, (3, 2)], (3, 2)], + ] + for input_shapes, target_shape in data: + assert_equal(broadcast_shapes(*input_shapes), target_shape) + + assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2)) + assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,)) + + +def test_broadcast_shapes_raises(): + # tests public broadcast_shapes + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + [(1, 2), (3,1), (3,2), (10, 5)], + [2, (2, 3)], + ] + for input_shapes in data: + assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes)) + + bad_args = [(2,)] * 32 + [(3,)] * 32 + assert_raises(ValueError, lambda: broadcast_shapes(*bad_args)) + + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + # Regression test for gh-5081 + dt = np.dtype([('num', 'i4'), ('obj', 'O')]) + a = np.empty((4,), dtype=dt) + a['num'] = np.arange(1, 5) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + expected_num = [[1, 2, 3, 4]] * 3 + expected_obj = [[None]*4]*3 + assert_equal(a_view.dtype, dt) + assert_array_equal(expected_num, a_view['num']) + assert_array_equal(expected_obj, a_view['obj']) + + # Make sure that void types without fields are kept unchanged + a = np.empty((4,), dtype='V4') + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Make sure that the only type that could fail is properly handled + dt = np.dtype({'names': [''], 'formats': ['V4']}) + a = np.empty((4,), dtype=dt) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Custom dtypes should not be lost (gh-9161) + r = [rational(i) for i in range(4)] + a = np.array(r, dtype=rational) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + assert_array_equal([r] * 3, a_view) + + +class TestSlidingWindowView: + def test_1d(self): + arr = np.arange(5) + arr_view = sliding_window_view(arr, 2) + expected = np.array([[0, 1], + [1, 2], + [2, 3], + [3, 4]]) + assert_array_equal(arr_view, expected) + + def test_2d(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + shape = (2, 2) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1], [10, 11]], + [[1, 2], [11, 12]], + [[2, 3], [12, 13]]], + [[[10, 11], [20, 21]], + [[11, 12], [21, 22]], + [[12, 13], [22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_with_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + arr_view = sliding_window_view(arr, 3, 0) + expected = np.array([[[0, 10, 20], + [1, 11, 21], + [2, 12, 22], + [3, 13, 23]]]) + assert_array_equal(arr_view, expected) + + def test_2d_repeated_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10*i + j + arr_view = sliding_window_view(arr, (2, 3), (1, 1)) + expected = np.array([[[[0, 1, 2], + [1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_without_axis(self): + i, j = np.ogrid[:4, :4] + arr = 10*i + j + shape = (2, 3) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1, 2], [10, 11, 12]], + [[1, 2, 3], [11, 12, 13]]], + [[[10, 11, 12], [20, 21, 22]], + [[11, 12, 13], [21, 22, 23]]], + [[[20, 21, 22], [30, 31, 32]], + [[21, 22, 23], [31, 32, 33]]]]) + assert_array_equal(arr_view, expected) + + def test_errors(self): + i, j = np.ogrid[:4, :4] + arr = 10*i + j + with pytest.raises(ValueError, match='cannot contain negative values'): + sliding_window_view(arr, (-1, 3)) + with pytest.raises( + ValueError, + match='must provide window_shape for all dimensions of `x`'): + sliding_window_view(arr, (1,)) + with pytest.raises( + ValueError, + match='Must provide matching length window_shape and axis'): + sliding_window_view(arr, (1, 3, 4), axis=(0, 1)) + with pytest.raises( + ValueError, + match='window shape cannot be larger than input array'): + sliding_window_view(arr, (5, 5)) + + def test_writeable(self): + arr = np.arange(5) + view = sliding_window_view(arr, 2, writeable=False) + assert_(not view.flags.writeable) + with pytest.raises( + ValueError, + match='assignment destination is read-only'): + view[0, 0] = 3 + view = sliding_window_view(arr, 2, writeable=True) + assert_(view.flags.writeable) + view[0, 1] = 3 + assert_array_equal(arr, np.array([0, 3, 2, 3, 4])) + + def test_subok(self): + class MyArray(np.ndarray): + pass + + arr = np.arange(5).view(MyArray) + assert_(not isinstance(sliding_window_view(arr, 2, + subok=False), + MyArray)) + assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray)) + # Default behavior + assert_(not isinstance(sliding_window_view(arr, 2), MyArray)) + + +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + + +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, subok=True, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + self = np.array(*args, subok=True, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + # and for broadcast_to + shape = (2, 4) + a_view = broadcast_to(a, shape) + assert_(type(a_view) is np.ndarray) + assert_(a_view.shape == shape) + a_view = broadcast_to(a, shape, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(a_view.shape == shape) + + +def test_writeable(): + # broadcast_to should return a readonly array + original = np.array([1, 2, 3]) + result = broadcast_to(original, (2, 3)) + assert_equal(result.flags.writeable, False) + assert_raises(ValueError, result.__setitem__, slice(None), 0) + + # but the result of broadcast_arrays needs to be writeable, to + # preserve backwards compatibility + for is_broadcast, results in [(False, broadcast_arrays(original,)), + (True, broadcast_arrays(0, original))]: + for result in results: + # This will change to False in a future version + if is_broadcast: + with assert_warns(FutureWarning): + assert_equal(result.flags.writeable, True) + with assert_warns(DeprecationWarning): + result[:] = 0 + # Warning not emitted, writing to the array resets it + assert_equal(result.flags.writeable, True) + else: + # No warning: + assert_equal(result.flags.writeable, True) + + for results in [broadcast_arrays(original), + broadcast_arrays(0, original)]: + for result in results: + # resets the warn_on_write DeprecationWarning + result.flags.writeable = True + # check: no warning emitted + assert_equal(result.flags.writeable, True) + result[:] = 0 + + # keep readonly input readonly + original.flags.writeable = False + _, result = broadcast_arrays(0, original) + assert_equal(result.flags.writeable, False) + + # regression test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + + +def test_writeable_memoryview(): + # The result of broadcast_arrays exports as a non-writeable memoryview + # because otherwise there is no good way to opt in to the new behaviour + # (i.e. you would need to set writeable to False explicitly). + # See gh-13929. + original = np.array([1, 2, 3]) + + for is_broadcast, results in [(False, broadcast_arrays(original,)), + (True, broadcast_arrays(0, original))]: + for result in results: + # This will change to False in a future version + if is_broadcast: + # memoryview(result, writable=True) will give warning but cannot + # be tested using the python API. + assert memoryview(result).readonly + else: + assert not memoryview(result).readonly + + +def test_reference_types(): + input_array = np.array('a', dtype=object) + expected = np.array(['a'] * 3, dtype=object) + actual = broadcast_to(input_array, (3,)) + assert_array_equal(expected, actual) + + actual, _ = broadcast_arrays(input_array, np.ones(3)) + assert_array_equal(expected, actual) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 00000000..eb008c60 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,541 @@ +"""Test functions for matrix module + +""" +from numpy.testing import ( + assert_equal, assert_array_equal, assert_array_max_ulp, + assert_array_almost_equal, assert_raises, assert_ +) +from numpy import ( + arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, + tri, mask_indices, triu_indices, triu_indices_from, tril_indices, + tril_indices_from, vander, +) +import numpy as np + +import pytest + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye: + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_uint64(self): + # Regression test for gh-9982 + assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]])) + assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)), + array([[0, 1, 0, 0], [0, 0, 1, 0]])) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + [[b'1', b''], [b'', b'1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + def test_order(self): + mat_c = eye(4, 3, k=-1) + mat_f = eye(4, 3, k=-1, order='F') + assert_equal(mat_c, mat_f) + assert mat_c.flags.c_contiguous + assert not mat_c.flags.f_contiguous + assert not mat_f.flags.c_contiguous + assert mat_f.flags.f_contiguous + + +class TestDiag: + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + assert_raises(ValueError, diag, [[[1]]]) + + +class TestFliplr: + def test_basic(self): + assert_raises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud: + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestHistogram2d: + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer/8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_density(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]])/9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + def test_binparameter_combination(self): + x = array( + [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, + 0.59944483, 1]) + y = array( + [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, + 0.15886423, 1]) + edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) + H, xe, ye = histogram2d(x, y, (edges, 4)) + answer = array( + [[2., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 1., 0., 0.], + [1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) + H, xe, ye = histogram2d(x, y, (4, edges)) + answer = array( + [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) + + def test_dispatch(self): + class ShouldDispatch: + def __array_function__(self, function, types, args, kwargs): + return types, args, kwargs + + xy = [1, 2] + s_d = ShouldDispatch() + r = histogram2d(s_d, xy) + # Cannot use assert_equal since that dispatches... + assert_(r == ((ShouldDispatch,), (s_d, xy), {})) + r = histogram2d(xy, s_d) + assert_(r == ((ShouldDispatch,), (xy, s_d), {})) + r = histogram2d(xy, xy, bins=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d))) + r = histogram2d(xy, xy, bins=[s_d, 5]) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5]))) + assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) + r = histogram2d(xy, xy, weights=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d))) + + @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)]) + def test_bad_length(self, x_len, y_len): + x, y = np.ones(x_len), np.ones(y_len) + with pytest.raises(ValueError, + match='x and y must have the same length.'): + histogram2d(x, y) + + +class TestTri: + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + assert_array_equal(b, [[1, 0], [1, 1]]) + assert_array_equal(c, b.T) + # should return the same dtype as the original array + assert_equal(b.dtype, a.dtype) + assert_equal(c.dtype, a.dtype) + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + assert_array_equal(a_triu_observed, a_triu_desired) + assert_array_equal(a_tril_observed, a_tril_desired) + assert_equal(a_triu_observed.dtype, a.dtype) + assert_equal(a_tril_observed.dtype, a.dtype) + + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3, 3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + assert_array_equal(a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + assert_array_equal(a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + assert_array_equal(b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + assert_array_equal(a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + assert_array_equal(b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + assert_array_equal(a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + assert_array_equal(b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices: + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + assert_array_equal(a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + assert_array_equal(b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, + 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + assert_array_equal(a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + assert_array_equal(b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + assert_array_equal(a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + assert_array_equal(b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander: + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + assert_array_equal(v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + assert_array_equal(v, powers[:, m-n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + assert_array_equal(v, expected) + + c = array([1.0+1j, 1.0-1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1+1j, 1], + [-2j, 1-1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + assert_array_equal(v, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.py new file mode 100644 index 00000000..ea032613 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.py @@ -0,0 +1,478 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_raises + ) +from numpy.lib.type_check import ( + common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, + nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close + ) + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType: + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) + acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af16) == np.float16) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.csingle) + assert_(common_type(acd) == np.cdouble) + + +class TestMintypecode: + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype+'f'), 'f') + assert_equal(mintypecode(itype+'d'), 'd') + assert_equal(mintypecode(itype+'F'), 'F') + assert_equal(mintypecode(itype+'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar: + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(4.0)) + + +class TestReal: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + y = np.array(1) + out = np.real(y) + assert_array_equal(y, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.real(y) + assert_equal(y, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + y = np.array(1 + 1j) + out = np.real(y) + assert_array_equal(y.real, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.real(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestImag: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + y = np.array(1) + out = np.imag(y) + assert_array_equal(0, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.imag(y) + assert_equal(0, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,)+1j*np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + y = np.array(1 + 1j) + out = np.imag(y) + assert_array_equal(y.imag, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.imag(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestIscomplex: + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.any(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal: + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj: + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + def test_scalar(self): + assert_(not iscomplexobj(1.0)) + assert_(iscomplexobj(1+0j)) + + def test_list(self): + assert_(iscomplexobj([3, 1+0j, True])) + assert_(not iscomplexobj([3, 1, True])) + + def test_duck(self): + class DummyComplexArray: + @property + def dtype(self): + return np.dtype(complex) + dummy = DummyComplexArray() + assert_(iscomplexobj(dummy)) + + def test_pandas_duck(self): + # This tests a custom np.dtype duck-typed class, such as used by pandas + # (pandas.core.dtypes) + class PdComplex(np.complex128): + pass + class PdDtype: + name = 'category' + names = None + type = PdComplex + kind = 'c' + str = ' 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same tests but with nan, posinf and neginf keywords + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., + nan=10, posinf=20, neginf=30) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False) + + assert_(result is vals) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) + + assert_(result is vals) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + def test_array(self): + vals = nan_to_num([1]) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + vals = nan_to_num([1], nan=10, posinf=20, neginf=30) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + vals = nan_to_num(1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + + def test_float(self): + vals = nan_to_num(1.0) + assert_all(vals == 1.0) + assert_equal(type(vals), np.float_) + vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1.1) + assert_equal(type(vals), np.float_) + + def test_complex_good(self): + vals = nan_to_num(1+1j) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex_) + vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex_) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0+1.j)/0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex_) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1+1.j)/0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex_) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + def test_do_not_rewrite_previous_keyword(self): + # This is done to test that when, for instance, nan=np.inf then these + # values are not rewritten by posinf keyword to the posinf value. + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) + assert_all(np.isfinite(vals[[0, 2]])) + assert_all(vals[0] < -1e10) + assert_equal(vals[[1, 2]], [np.inf, 999]) + assert_equal(type(vals), np.ndarray) + + +class TestRealIfClose: + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a+1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a+1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a+1e-7j, tol=1e-6) + assert_all(isrealobj(b)) + + +class TestArrayConversion: + + def test_asfarray(self): + a = asfarray(np.array([1, 2, 3])) + assert_equal(a.__class__, np.ndarray) + assert_(np.issubdtype(a.dtype, np.floating)) + + # previously this would infer dtypes from arrays, unlike every single + # other numpy function + assert_raises(TypeError, + asfarray, np.array([1, 2, 3]), dtype=np.array(1.0)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 00000000..fac4f41d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,98 @@ +import numpy as np +import numpy.core as nx +import numpy.lib.ufunclike as ufl +from numpy.testing import ( + assert_, assert_equal, assert_array_equal, assert_warns, assert_raises +) + + +class TestUfunclike: + + def test_isposinf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([True, False, False, False, False, False]) + + res = ufl.isposinf(a) + assert_equal(res, tgt) + res = ufl.isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex_) + with assert_raises(TypeError): + ufl.isposinf(a) + + def test_isneginf(self): + a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0]) + out = nx.zeros(a.shape, bool) + tgt = nx.array([False, True, False, False, False, False]) + + res = ufl.isneginf(a) + assert_equal(res, tgt) + res = ufl.isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex_) + with assert_raises(TypeError): + ufl.isneginf(a) + + def test_fix(self): + a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = nx.zeros(a.shape, float) + tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = ufl.fix(a) + assert_equal(res, tgt) + res = ufl.fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(ufl.fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(nx.ndarray): + def __new__(cls, data, metadata=None): + res = nx.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None): + if isinstance(obj, MyArray): + obj.metadata = self.metadata + return obj + + def __array_finalize__(self, obj): + self.metadata = getattr(obj, 'metadata', None) + return self + + a = nx.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = ufl.fix(m) + assert_array_equal(f, nx.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0,...] + m0d.metadata = 'bar' + f0d = ufl.fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + def test_scalar(self): + x = np.inf + actual = np.isposinf(x) + expected = np.True_ + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + x = -3.4 + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_utils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_utils.py new file mode 100644 index 00000000..45416b05 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/tests/test_utils.py @@ -0,0 +1,228 @@ +import inspect +import sys +import pytest + +import numpy as np +from numpy.core import arange +from numpy.testing import assert_, assert_equal, assert_raises_regex +from numpy.lib import deprecate, deprecate_with_doc +import numpy.lib.utils as utils + +from io import StringIO + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") +@pytest.mark.skipif( + sys.version_info == (3, 10, 0, "candidate", 1), + reason="Broken as of bpo-44524", +) +def test_lookfor(): + out = StringIO() + utils.lookfor('eigenvalue', module='numpy', output=out, + import_modules=False) + out = out.getvalue() + assert_('numpy.linalg.eig' in out) + + +@deprecate +def old_func(self, x): + return x + + +@deprecate(message="Rather use new_func2") +def old_func2(self, x): + return x + + +def old_func3(self, x): + return x +new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") + + +def old_func4(self, x): + """Summary. + + Further info. + """ + return x +new_func4 = deprecate(old_func4) + + +def old_func5(self, x): + """Summary. + + Bizarre indentation. + """ + return x +new_func5 = deprecate(old_func5, message="This function is\ndeprecated.") + + +def old_func6(self, x): + """ + Also in PEP-257. + """ + return x +new_func6 = deprecate(old_func6) + + +@deprecate_with_doc(msg="Rather use new_func7") +def old_func7(self,x): + return x + + +def test_deprecate_decorator(): + assert_('deprecated' in old_func.__doc__) + + +def test_deprecate_decorator_message(): + assert_('Rather use new_func2' in old_func2.__doc__) + + +def test_deprecate_fn(): + assert_('old_func3' in new_func3.__doc__) + assert_('new_func3' in new_func3.__doc__) + + +def test_deprecate_with_doc_decorator_message(): + assert_('Rather use new_func7' in old_func7.__doc__) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") +@pytest.mark.parametrize('old_func, new_func', [ + (old_func4, new_func4), + (old_func5, new_func5), + (old_func6, new_func6), +]) +def test_deprecate_help_indentation(old_func, new_func): + _compare_docs(old_func, new_func) + # Ensure we don't mess up the indentation + for knd, func in (('old', old_func), ('new', new_func)): + for li, line in enumerate(func.__doc__.split('\n')): + if li == 0: + assert line.startswith(' ') or not line.startswith(' '), knd + elif line: + assert line.startswith(' '), knd + + +def _compare_docs(old_func, new_func): + old_doc = inspect.getdoc(old_func) + new_doc = inspect.getdoc(new_func) + index = new_doc.index('\n\n') + 2 + assert_equal(new_doc[index:], old_doc) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") +def test_deprecate_preserve_whitespace(): + assert_('\n Bizarre' in new_func5.__doc__) + + +def test_deprecate_module(): + assert_(old_func.__module__ == __name__) + + +def test_safe_eval_nameconstant(): + # Test if safe_eval supports Python 3.4 _ast.NameConstant + utils.safe_eval('None') + + +class TestByteBounds: + + def test_byte_bounds(self): + # pointer difference matches size * itemsize + # due to contiguity + a = arange(12).reshape(3, 4) + low, high = utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + def test_unusual_order_positive_stride(self): + a = arange(12).reshape(3, 4) + b = a.T + low, high = utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_unusual_order_negative_stride(self): + a = arange(12).reshape(3, 4) + b = a.T[::-1] + low, high = utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_strided(self): + a = arange(12) + b = a[::2] + low, high = utils.byte_bounds(b) + # the largest pointer address is lost (even numbers only in the + # stride), and compensate addresses for striding by 2 + assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) + + +def test_assert_raises_regex_context_manager(): + with assert_raises_regex(ValueError, 'no deprecation warning'): + raise ValueError('no deprecation warning') + + +def test_info_method_heading(): + # info(class) should only print "Methods:" heading if methods exist + + class NoPublicMethods: + pass + + class WithPublicMethods: + def first_method(): + pass + + def _has_method_heading(cls): + out = StringIO() + utils.info(cls, output=out) + return 'Methods:' in out.getvalue() + + assert _has_method_heading(WithPublicMethods) + assert not _has_method_heading(NoPublicMethods) + + +def test_drop_metadata(): + def _compare_dtypes(dt1, dt2): + return np.can_cast(dt1, dt2, casting='no') + + # structured dtype + dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])], + metadata={'msg': 'titi'}) + dt_m = utils.drop_metadata(dt) + assert _compare_dtypes(dt, dt_m) is True + assert dt_m.metadata is None + assert dt_m['l1'].metadata is None + assert dt_m['l1']['l2'].metadata is None + + # alignement + dt = np.dtype([('x', '= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def _flip_dispatcher(m): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def fliplr(m): + """ + Reverse the order of elements along axis 1 (left/right). + + For a 2-D array, this flips the entries in each row in the left/right + direction. Columns are preserved, but appear in a different order than + before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. + Requires the array to be at least 2-D. + + Examples + -------- + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.fliplr(A) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.fliplr(A) == A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +@array_function_dispatch(_flip_dispatcher) +def flipud(m): + """ + Reverse the order of elements along axis 0 (up/down). + + For a 2-D array, this flips the entries in each column in the up/down + direction. Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. + Requires the array to be at least 1-D. + + Examples + -------- + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.flipud(A) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) + + >>> A = np.random.randn(2,3,5) + >>> np.all(np.flipud(A) == A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +@set_array_function_like_doc +@set_module('numpy') +def eye(N, M=None, k=0, dtype=float, order='C', *, like=None): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + if like is not None: + return _eye_with_like(like, N, M=M, k=k, dtype=dtype, order=order) + if M is None: + M = N + m = zeros((N, M), dtype=dtype, order=order) + if k >= M: + return m + # Ensure M and k are integers, so we don't get any surprise casting + # results in the expressions `M-k` and `M+1` used below. This avoids + # a problem with inputs with type (for example) np.uint64. + M = operator.index(M) + k = operator.index(k) + if k >= 0: + i = k + else: + i = (-k) * M + m[:M-k].flat[i::M+1] = 1 + return m + + +_eye_with_like = array_function_dispatch()(eye) + + +def _diag_dispatcher(v, k=None): + return (v,) + + +@array_function_dispatch(_diag_dispatcher) +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asanyarray(v) + s = v.shape + if len(s) == 1: + n = s[0]+abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n-k].flat[i::n+1] = v + return res + elif len(s) == 2: + return diagonal(v, k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +@array_function_dispatch(_diag_dispatcher) +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + try: + wrap = v.__array_wrap__ + except AttributeError: + wrap = None + v = asarray(v).ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n-k, dtype=intp) + fi = i+k+i*n + else: + i = arange(0, n+k, dtype=intp) + fi = i+(i-k)*n + res.flat[fi] = v + if not wrap: + return res + return wrap(res) + + +@set_array_function_like_doc +@set_module('numpy') +def tri(N, M=None, k=0, dtype=float, *, like=None): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. + + Examples + -------- + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) + + """ + if like is not None: + return _tri_with_like(like, N, M=M, k=k, dtype=dtype) + + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M-k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +_tri_with_like = array_function_dispatch()(tri) + + +def _trilu_dispatcher(m, k=None): + return (m,) + + +@array_function_dispatch(_trilu_dispatcher) +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two + axes. + + Parameters + ---------- + m : array_like, shape (..., M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (..., M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 0, 0, 0, 0], + [ 5, 6, 0, 0, 0], + [10, 11, 12, 0, 0], + [15, 16, 17, 18, 0]], + [[20, 0, 0, 0, 0], + [25, 26, 0, 0, 0], + [30, 31, 32, 0, 0], + [35, 36, 37, 38, 0]], + [[40, 0, 0, 0, 0], + [45, 46, 0, 0, 0], + [50, 51, 52, 0, 0], + [55, 56, 57, 58, 0]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +@array_function_dispatch(_trilu_dispatcher) +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of an array with the elements below the `k`-th diagonal + zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the + final two axes. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 1, 2, 3, 4], + [ 0, 6, 7, 8, 9], + [ 0, 0, 12, 13, 14], + [ 0, 0, 0, 18, 19]], + [[20, 21, 22, 23, 24], + [ 0, 26, 27, 28, 29], + [ 0, 0, 32, 33, 34], + [ 0, 0, 0, 38, 39]], + [[40, 41, 42, 43, 44], + [ 0, 46, 47, 48, 49], + [ 0, 0, 52, 53, 54], + [ 0, 0, 0, 58, 59]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k-1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +def _vander_dispatcher(x, N=None, increasing=None): + return (x,) + + +# Originally borrowed from John Hunter and matplotlib +@array_function_dispatch(_vander_dispatcher) +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + .. versionadded:: 1.9.0 + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 # may vary + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None, + weights=None): + yield x + yield y + + # This terrible logic is adapted from the checks in histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + if N == 2: + yield from bins # bins=[x, y] + else: + yield bins + + yield weights + + +@array_function_dispatch(_histogram2d_dispatcher) +def histogram2d(x, y, bins=10, range=None, density=None, weights=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or array_like or [int, int] or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + * A combination [int, array] or [array, int], where int + is the number of bins and array is the bin edges. + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_area``. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `density` is True. If `density` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx+1,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny+1,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `density` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> from matplotlib.image import NonUniformImage + >>> import matplotlib.pyplot as plt + + Construct a 2-D histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(2, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) + >>> # Histogram does not follow Cartesian convention (see Notes), + >>> # therefore transpose H for visualization purposes. + >>> H = H.T + + :func:`imshow ` can only display square bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131, title='imshow: square bins') + >>> plt.imshow(H, interpolation='nearest', origin='lower', + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + + :func:`pcolormesh ` can display actual edges: + + >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', + ... aspect='equal') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + + + :class:`NonUniformImage ` can be used to + display actual bin edges with interpolation: + + >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', + ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) + >>> im = NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 + >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 + >>> im.set_data(xcenters, ycenters, H) + >>> ax.add_image(im) + >>> plt.show() + + It is also possible to construct a 2-D histogram without specifying bin + edges: + + >>> # Generate non-symmetric test data + >>> n = 10000 + >>> x = np.linspace(1, 100, n) + >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 + >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges + >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) + + Now we can plot the histogram using + :func:`pcolormesh `, and a + :func:`hexbin ` for comparison. + + >>> # Plot histogram using pcolormesh + >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) + >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') + >>> ax1.plot(x, 2*np.log(x), 'k-') + >>> ax1.set_xlim(x.min(), x.max()) + >>> ax1.set_ylim(y.min(), y.max()) + >>> ax1.set_xlabel('x') + >>> ax1.set_ylabel('y') + >>> ax1.set_title('histogram2d') + >>> ax1.grid() + + >>> # Create hexbin plot for comparison + >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') + >>> ax2.plot(x, 2*np.log(x), 'k-') + >>> ax2.set_title('hexbin') + >>> ax2.set_xlim(x.min(), x.max()) + >>> ax2.set_xlabel('x') + >>> ax2.grid() + + >>> plt.show() + """ + from numpy import histogramdd + + if len(x) != len(y): + raise ValueError('x and y must have the same length.') + + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + xedges = yedges = asarray(bins) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, density, weights) + return hist, edges[0], edges[1] + + +@set_module('numpy') +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return nonzero(a != 0) + + +@set_module('numpy') +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il2 = np.tril_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, ..., 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + tri_ = tri(n, m, k=k, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +def _trilu_indices_form_dispatcher(arr, k=None): + return (arr,) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + Examples + -------- + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the lower triangular elements. + + >>> trili = np.tril_indices_from(a) + >>> trili + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + >>> a[trili] + array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + + This is syntactic sugar for tril_indices(). + + >>> np.tril_indices(a.shape[0]) + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Use the `k` parameter to return the indices for the lower triangular array + up to the k-th diagonal. + + >>> trili1 = np.tril_indices_from(a, k=1) + >>> a[trili1] + array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]) + + See Also + -------- + tril_indices, tril, triu_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +@set_module('numpy') +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + .. versionadded:: 1.9.0 + + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The indices for the triangle. The returned tuple contains two arrays, + each with the indices along one dimension of the array. Can be used + to slice a ndarray of shape(`n`, `n`). + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Notes + ----- + .. versionadded:: 1.4.0 + + Examples + -------- + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu2 = np.triu_indices(4, 2) + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, ..., 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + tri_ = ~tri(n, m, k=k - 1, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + Examples + -------- + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the upper triangular elements. + + >>> triui = np.triu_indices_from(a) + >>> triui + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + >>> a[triui] + array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + + This is syntactic sugar for triu_indices(). + + >>> np.triu_indices(a.shape[0]) + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Use the `k` parameter to return the indices for the upper triangular array + from the k-th diagonal. + + >>> triuim1 = np.triu_indices_from(a, k=1) + >>> a[triuim1] + array([ 1, 2, 3, 6, 7, 11]) + + + See Also + -------- + triu_indices, triu, tril_indices_from + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi new file mode 100644 index 00000000..1b3b94bd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi @@ -0,0 +1,239 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + overload, + TypeVar, + Union, +) + +from numpy import ( + generic, + number, + bool_, + timedelta64, + datetime64, + int_, + intp, + float64, + signedinteger, + floating, + complexfloating, + object_, + _OrderCF, +) + +from numpy._typing import ( + DTypeLike, + _DTypeLike, + ArrayLike, + _ArrayLike, + NDArray, + _SupportsArrayFunc, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeObject_co, +) + +_T = TypeVar("_T") +_SCT = TypeVar("_SCT", bound=generic) + +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc = Callable[ + [NDArray[int_], _T], + NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]], +] + +__all__: list[str] + +@overload +def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def fliplr(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def flipud(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def eye( + N: int, + M: None | int = ..., + k: int = ..., + dtype: None = ..., + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[float64]: ... +@overload +def eye( + N: int, + M: None | int = ..., + k: int = ..., + dtype: _DTypeLike[_SCT] = ..., + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload +def eye( + N: int, + M: None | int = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + like: None | _SupportsArrayFunc = ..., +) -> NDArray[Any]: ... + +@overload +def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: None = ..., + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[float64]: ... +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: _DTypeLike[_SCT] = ..., + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[_SCT]: ... +@overload +def tri( + N: int, + M: None | int = ..., + k: int = ..., + dtype: DTypeLike = ..., + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[Any]: ... + +@overload +def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +@overload +def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeInt_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[signedinteger[Any]]: ... +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeFloat_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def vander( + x: _ArrayLikeComplex_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def vander( + x: _ArrayLikeObject_co, + N: None | int = ..., + increasing: bool = ..., +) -> NDArray[object_]: ... + +@overload +def histogram2d( # type: ignore[misc] + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + bins: int | Sequence[int] = ..., + range: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[floating[Any]], + NDArray[floating[Any]], +]: ... +@overload +def histogram2d( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + bins: int | Sequence[int] = ..., + range: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[complexfloating[Any, Any]], + NDArray[complexfloating[Any, Any]], +]: ... +@overload # TODO: Sort out `bins` +def histogram2d( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + bins: Sequence[_ArrayLikeInt_co], + range: None | _ArrayLikeFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[Any], + NDArray[Any], +]: ... + +# NOTE: we're assuming/demanding here the `mask_func` returns +# an ndarray of shape `(n, n)`; otherwise there is the possibility +# of the output tuple having more or less than 2 elements +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[int], + k: int = ..., +) -> tuple[NDArray[intp], NDArray[intp]]: ... +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[_T], + k: _T, +) -> tuple[NDArray[intp], NDArray[intp]]: ... + +def tril_indices( + n: int, + k: int = ..., + m: None | int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def tril_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices( + n: int, + k: int = ..., + m: None | int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/type_check.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/type_check.py new file mode 100644 index 00000000..3f84b80e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/type_check.py @@ -0,0 +1,735 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +import functools + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'asfarray', 'mintypecode', + 'common_type'] + +from .._utils import set_module +import numpy.core.numeric as _nx +from numpy.core.numeric import asarray, asanyarray, isnan, zeros +from numpy.core import overrides, getlimits +from .ufunclike import isneginf, isposinf + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + + +@set_module('numpy') +def mintypecode(typechars, typeset='GDFgdf', default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype, sctype2char, maximum_sctype + + Examples + -------- + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars) + intersection = set(t for t in typecodes if t in typeset) + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + return min(intersection, key=_typecodes_by_elsize.index) + + +def _asfarray_dispatcher(a, dtype=None): + return (a,) + + +@array_function_dispatch(_asfarray_dispatcher) +def asfarray(a, dtype=_nx.float_): + """ + Return an array converted to a float type. + + Parameters + ---------- + a : array_like + The input array. + dtype : str or dtype object, optional + Float type code to coerce input array `a`. If `dtype` is one of the + 'int' dtypes, it is replaced with float64. + + Returns + ------- + out : ndarray + The input `a` as a float ndarray. + + Examples + -------- + >>> np.asfarray([2, 3]) + array([2., 3.]) + >>> np.asfarray([2, 3], dtype='float') + array([2., 3.]) + >>> np.asfarray([2, 3], dtype='int8') + array([2., 3.]) + + """ + if not _nx.issubdtype(dtype, _nx.inexact): + dtype = _nx.float_ + return asarray(a, dtype=dtype) + + +def _real_dispatcher(val): + return (val,) + + +@array_function_dispatch(_real_dispatcher) +def real(val): + """ + Return the real part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The real component of the complex argument. If `val` is real, the type + of `val` is used for the output. If `val` has complex elements, the + returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([1., 3., 5.]) + >>> a.real = 9 + >>> a + array([9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([9.+2.j, 8.+4.j, 7.+6.j]) + >>> np.real(1 + 1j) + 1.0 + + """ + try: + return val.real + except AttributeError: + return asanyarray(val).real + + +def _imag_dispatcher(val): + return (val,) + + +@array_function_dispatch(_imag_dispatcher) +def imag(val): + """ + Return the imaginary part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The imaginary component of the complex argument. If `val` is real, + the type of `val` is used for the output. If `val` has complex + elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([1. +8.j, 3.+10.j, 5.+12.j]) + >>> np.imag(1 + 1j) + 1.0 + + """ + try: + return val.imag + except AttributeError: + return asanyarray(val).imag + + +def _is_type_dispatcher(x): + return (x,) + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True]) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return res[()] # convert to scalar if needed + + +@array_function_dispatch(_is_type_dispatcher) +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero complex part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + Notes + ----- + `isreal` may behave unexpectedly for string or object arrays (see examples) + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> np.isreal(a) + array([False, True, True, True, True, False]) + + The function does not work on string arrays. + + >>> a = np.array([2j, "a"], dtype="U") + >>> np.isreal(a) # Warns about non-elementwise comparison + False + + Returns True for all elements in input array of ``dtype=object`` even if + any of the elements is complex. + + >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> np.isreal(a) + array([ True, True, True]) + + isreal should not be used with object arrays + + >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> np.isreal(a) + array([ True, True]) + + """ + return imag(x) == 0 + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + try: + dtype = x.dtype + type_ = dtype.type + except AttributeError: + type_ = asarray(x).dtype.type + return issubclass(type_, _nx.complexfloating) + + +@array_function_dispatch(_is_type_dispatcher) +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Notes + ----- + The function is only meant for arrays with numerical values but it + accepts all other objects. Since it assumes array input, the return + value of other objects may be True. + + >>> np.isrealobj('A string') + True + >>> np.isrealobj(False) + True + >>> np.isrealobj(None) + True + + Examples + -------- + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not iscomplexobj(x) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy.core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + + +def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): + return (x,) + + +@array_function_dispatch(_nan_to_num_dispatcher) +def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): + """ + Replace NaN with zero and infinity with large finite numbers (default + behaviour) or with the numbers defined by the user using the `nan`, + `posinf` and/or `neginf` keywords. + + If `x` is inexact, NaN is replaced by zero or by the user defined value in + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined + value in `neginf` keyword. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. + + Parameters + ---------- + x : scalar or array_like + Input data. + copy : bool, optional + Whether to create a copy of `x` (True) or to replace values + in-place (False). The in-place operation only occurs if + casting to an array does not require a copy. + Default is True. + + .. versionadded:: 1.13 + nan : int, float, optional + Value to be used to fill NaN values. If no value is passed + then NaN values will be replaced with 0.0. + + .. versionadded:: 1.17 + posinf : int, float, optional + Value to be used to fill positive infinity values. If no value is + passed then positive infinity values will be replaced with a very + large number. + + .. versionadded:: 1.17 + neginf : int, float, optional + Value to be used to fill negative infinity values. If no value is + passed then negative infinity values will be replaced with a very + small (or negative) number. + + .. versionadded:: 1.17 + + + + Returns + ------- + out : ndarray + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> np.nan_to_num(np.inf) + 1.7976931348623157e+308 + >>> np.nan_to_num(-np.inf) + -1.7976931348623157e+308 + >>> np.nan_to_num(np.nan) + 0.0 + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + -1.2800000e+02, 1.2800000e+02]) + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, # may vary + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) + >>> np.nan_to_num(y, nan=111111, posinf=222222) + array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + """ + x = _nx.array(x, subok=True, copy=copy) + xtype = x.dtype.type + + isscalar = (x.ndim == 0) + + if not issubclass(xtype, _nx.inexact): + return x[()] if isscalar else x + + iscomplex = issubclass(xtype, _nx.complexfloating) + + dest = (x.real, x.imag) if iscomplex else (x,) + maxf, minf = _getmaxmin(x.real.dtype) + if posinf is not None: + maxf = posinf + if neginf is not None: + minf = neginf + for d in dest: + idx_nan = isnan(d) + idx_posinf = isposinf(d) + idx_neginf = isneginf(d) + _nx.copyto(d, nan, where=idx_nan) + _nx.copyto(d, maxf, where=idx_posinf) + _nx.copyto(d, minf, where=idx_neginf) + return x[()] if isscalar else x + +#----------------------------------------------------------------------------- + +def _real_if_close_dispatcher(a, tol=None): + return (a,) + + +@array_function_dispatch(_real_if_close_dispatcher) +def real_if_close(a, tol=100): + """ + If input is complex with all imaginary parts close to zero, return + real parts. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. If the tolerance is <=1, then the absolute tolerance + is used. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> np.finfo(float).eps + 2.2204460492503131e-16 # may vary + + >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) + array([2.1, 5.2]) + >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) + array([2.1+4.e-13j, 5.2 + 3e-15j]) + + """ + a = asanyarray(a) + type_ = a.dtype.type + if not issubclass(type_, _nx.complexfloating): + return a + if tol > 1: + f = getlimits.finfo(type_) + tol = f.eps * tol + if _nx.all(_nx.absolute(a.imag) < tol): + a = a.real + return a + + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +@set_module('numpy') +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype, typecodes + + Examples + -------- + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print(typechar, ' : ', np.typename(typechar)) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble], + [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]] +array_precision = {_nx.half: 0, + _nx.single: 1, + _nx.double: 2, + _nx.longdouble: 3, + _nx.csingle: 1, + _nx.cdouble: 2, + _nx.clongdouble: 3} + + +def _common_type_dispatcher(*arrays): + return arrays + + +@array_function_dispatch(_common_type_dispatcher) +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/type_check.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/type_check.pyi new file mode 100644 index 00000000..b04da21d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/type_check.pyi @@ -0,0 +1,222 @@ +from collections.abc import Container, Iterable +from typing import ( + Literal as L, + Any, + overload, + TypeVar, + Protocol, +) + +from numpy import ( + dtype, + generic, + bool_, + floating, + float64, + complexfloating, + integer, +) + +from numpy._typing import ( + ArrayLike, + DTypeLike, + NBitBase, + NDArray, + _64Bit, + _SupportsDType, + _ScalarLike_co, + _ArrayLike, + _DTypeLikeComplex, +) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=generic) +_NBit1 = TypeVar("_NBit1", bound=NBitBase) +_NBit2 = TypeVar("_NBit2", bound=NBitBase) + +class _SupportsReal(Protocol[_T_co]): + @property + def real(self) -> _T_co: ... + +class _SupportsImag(Protocol[_T_co]): + @property + def imag(self) -> _T_co: ... + +__all__: list[str] + +def mintypecode( + typechars: Iterable[str | ArrayLike], + typeset: Container[str] = ..., + default: str = ..., +) -> str: ... + +# `asfarray` ignores dtypes if they're not inexact + +@overload +def asfarray( + a: object, + dtype: None | type[float] = ..., +) -> NDArray[float64]: ... +@overload +def asfarray( # type: ignore[misc] + a: Any, + dtype: _DTypeLikeComplex, +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def asfarray( + a: Any, + dtype: DTypeLike, +) -> NDArray[floating[Any]]: ... + +@overload +def real(val: _SupportsReal[_T]) -> _T: ... +@overload +def real(val: ArrayLike) -> NDArray[Any]: ... + +@overload +def imag(val: _SupportsImag[_T]) -> _T: ... +@overload +def imag(val: ArrayLike) -> NDArray[Any]: ... + +@overload +def iscomplex(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc] +@overload +def iscomplex(x: ArrayLike) -> NDArray[bool_]: ... + +@overload +def isreal(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc] +@overload +def isreal(x: ArrayLike) -> NDArray[bool_]: ... + +def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... + +def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ... + +@overload +def nan_to_num( # type: ignore[misc] + x: _SCT, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> _SCT: ... +@overload +def nan_to_num( + x: _ScalarLike_co, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> Any: ... +@overload +def nan_to_num( + x: _ArrayLike[_SCT], + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> NDArray[_SCT]: ... +@overload +def nan_to_num( + x: ArrayLike, + copy: bool = ..., + nan: float = ..., + posinf: None | float = ..., + neginf: None | float = ..., +) -> NDArray[Any]: ... + +# If one passes a complex array to `real_if_close`, then one is reasonably +# expected to verify the output dtype (so we can return an unsafe union here) + +@overload +def real_if_close( # type: ignore[misc] + a: _ArrayLike[complexfloating[_NBit1, _NBit1]], + tol: float = ..., +) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ... +@overload +def real_if_close( + a: _ArrayLike[_SCT], + tol: float = ..., +) -> NDArray[_SCT]: ... +@overload +def real_if_close( + a: ArrayLike, + tol: float = ..., +) -> NDArray[Any]: ... + +@overload +def typename(char: L['S1']) -> L['character']: ... +@overload +def typename(char: L['?']) -> L['bool']: ... +@overload +def typename(char: L['b']) -> L['signed char']: ... +@overload +def typename(char: L['B']) -> L['unsigned char']: ... +@overload +def typename(char: L['h']) -> L['short']: ... +@overload +def typename(char: L['H']) -> L['unsigned short']: ... +@overload +def typename(char: L['i']) -> L['integer']: ... +@overload +def typename(char: L['I']) -> L['unsigned integer']: ... +@overload +def typename(char: L['l']) -> L['long integer']: ... +@overload +def typename(char: L['L']) -> L['unsigned long integer']: ... +@overload +def typename(char: L['q']) -> L['long long integer']: ... +@overload +def typename(char: L['Q']) -> L['unsigned long long integer']: ... +@overload +def typename(char: L['f']) -> L['single precision']: ... +@overload +def typename(char: L['d']) -> L['double precision']: ... +@overload +def typename(char: L['g']) -> L['long precision']: ... +@overload +def typename(char: L['F']) -> L['complex single precision']: ... +@overload +def typename(char: L['D']) -> L['complex double precision']: ... +@overload +def typename(char: L['G']) -> L['complex long double precision']: ... +@overload +def typename(char: L['S']) -> L['string']: ... +@overload +def typename(char: L['U']) -> L['unicode']: ... +@overload +def typename(char: L['V']) -> L['void']: ... +@overload +def typename(char: L['O']) -> L['object']: ... + +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + integer[Any] + ]] +) -> type[floating[_64Bit]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + floating[_NBit1] + ]] +) -> type[floating[_NBit1]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + integer[Any] | floating[_NBit1] + ]] +) -> type[floating[_NBit1 | _64Bit]]: ... +@overload +def common_type( # type: ignore[misc] + *arrays: _SupportsDType[dtype[ + floating[_NBit1] | complexfloating[_NBit2, _NBit2] + ]] +) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ... +@overload +def common_type( + *arrays: _SupportsDType[dtype[ + integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2] + ]] +) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/ufunclike.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/ufunclike.py new file mode 100644 index 00000000..05fe60c5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/ufunclike.py @@ -0,0 +1,210 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy.core.numeric as nx +from numpy.core.overrides import array_function_dispatch +import warnings +import functools + + +def _dispatcher(x, out=None): + return (x, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def fix(x, out=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values are returned as floats. + + Parameters + ---------- + x : array_like + An array of floats to be rounded + out : ndarray, optional + A location into which the result is stored. If provided, it must have + a shape that the input broadcasts to. If not provided or None, a + freshly-allocated array is returned. + + Returns + ------- + out : ndarray of floats + A float array with the same dimensions as the input. + If second argument is not supplied then a float array is returned + with the rounded values. + + If a second argument is supplied the result is stored there. + The return value `out` is then a reference to that array. + + See Also + -------- + rint, trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3.0 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + # promote back to an array if flattened + res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) + + # when no out argument is passed and no subclasses are involved, flatten + # scalars + if out is None and type(res) is nx.ndarray: + res = res[()] + return res + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isposinf(x, out=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `out` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values + + Examples + -------- + >>> np.isposinf(np.PINF) + True + >>> np.isposinf(np.inf) + True + >>> np.isposinf(np.NINF) + False + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + is_inf = nx.isinf(x) + try: + signbit = ~nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isneginf(x, out=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `out` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values. + + Examples + -------- + >>> np.isneginf(np.NINF) + True + >>> np.isneginf(np.inf) + False + >>> np.isneginf(np.PINF) + False + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + is_inf = nx.isinf(x) + try: + signbit = nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi new file mode 100644 index 00000000..82537e2a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi @@ -0,0 +1,66 @@ +from typing import Any, overload, TypeVar + +from numpy import floating, bool_, object_, ndarray +from numpy._typing import ( + NDArray, + _FloatLike_co, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, +) + +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +__all__: list[str] + +@overload +def fix( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> floating[Any]: ... +@overload +def fix( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating[Any]]: ... +@overload +def fix( + x: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def fix( + x: _ArrayLikeFloat_co | _ArrayLikeObject_co, + out: _ArrayType, +) -> _ArrayType: ... + +@overload +def isposinf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> bool_: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[bool_]: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: _ArrayType, +) -> _ArrayType: ... + +@overload +def isneginf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> bool_: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[bool_]: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: _ArrayType, +) -> _ArrayType: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/user_array.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/user_array.py new file mode 100644 index 00000000..0e96b477 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/user_array.py @@ -0,0 +1,286 @@ +""" +Standard container-class for easy multiple-inheritance. + +Try to inherit from the ndarray instead of using this class as this is not +complete. + +""" +from numpy.core import ( + array, asarray, absolute, add, subtract, multiply, divide, + remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, + bitwise_xor, invert, less, less_equal, not_equal, equal, greater, + greater_equal, shape, reshape, arange, sin, sqrt, transpose +) + + +class container: + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + tostring + byteswap + astype + + """ + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if self.ndim > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __div__(self, other): + return self._rc(divide(self.array, asarray(other))) + + def __rdiv__(self, other): + return self._rc(divide(asarray(other), self.array)) + + def __idiv__(self, other): + divide(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if self.ndim == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + "" + return self._rc(self.array.copy()) + + def tostring(self): + "" + return self.array.tostring() + + def tobytes(self): + "" + return self.array.tobytes() + + def byteswap(self): + "" + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + "" + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/utils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/utils.py new file mode 100644 index 00000000..6174c8d0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/utils.py @@ -0,0 +1,1211 @@ +import os +import sys +import textwrap +import types +import re +import warnings +import functools +import platform + +from .._utils import set_module +from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype +from numpy.core import ndarray, ufunc, asarray +import numpy as np + +__all__ = [ + 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate', + 'deprecate_with_doc', 'get_include', 'info', 'source', 'who', + 'lookfor', 'byte_bounds', 'safe_eval', 'show_runtime' + ] + + +def show_runtime(): + """ + Print information about various resources in the system + including available intrinsic support and BLAS/LAPACK library + in use + + .. versionadded:: 1.24.0 + + See Also + -------- + show_config : Show libraries in the system on which NumPy was built. + + Notes + ----- + 1. Information is derived with the help of `threadpoolctl `_ + library if available. + 2. SIMD related information is derived from ``__cpu_features__``, + ``__cpu_baseline__`` and ``__cpu_dispatch__`` + + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + from pprint import pprint + config_found = [{ + "numpy_version": np.__version__, + "python": sys.version, + "uname": platform.uname(), + }] + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + config_found.append({ + "simd_extensions": { + "baseline": __cpu_baseline__, + "found": features_found, + "not_found": features_not_found + } + }) + try: + from threadpoolctl import threadpool_info + config_found.extend(threadpool_info()) + except ImportError: + print("WARNING: `threadpoolctl` not found in system!" + " Install it by `pip install threadpoolctl`." + " Once installed, try `np.show_runtime` again" + " for more detailed build information") + pprint(config_found) + + +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy should use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``distutils``, for example in ``setup.py``:: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include') + else: + # using installed numpy core headers + import numpy.core as core + d = os.path.join(os.path.dirname(core.__file__), 'include') + return d + + +class _Deprecate: + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + if old_name is None: + old_name = func.__name__ + if new_name is None: + depdoc = "`%s` is deprecated!" % old_name + else: + depdoc = "`%s` is deprecated, use `%s` instead!" % \ + (old_name, new_name) + + if message is not None: + depdoc += "\n" + message + + @functools.wraps(func) + def newfunc(*args, **kwds): + warnings.warn(depdoc, DeprecationWarning, stacklevel=2) + return func(*args, **kwds) + + newfunc.__name__ = old_name + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + lines = doc.expandtabs().split('\n') + indent = _get_indent(lines[1:]) + if lines[0].lstrip(): + # Indent the original first line to let inspect.cleandoc() + # dedent the docstring despite the deprecation notice. + doc = indent * ' ' + doc + else: + # Remove the same leading blank lines as cleandoc() would. + skip = len(lines[0]) + 1 + for line in lines[1:]: + if len(line) > indent: + break + skip += len(line) + 1 + doc = doc[skip:] + depdoc = textwrap.indent(depdoc, ' ' * indent) + doc = '\n\n'.join([depdoc, doc]) + newfunc.__doc__ = doc + + return newfunc + + +def _get_indent(lines): + """ + Determines the leading whitespace that could be removed from all the lines. + """ + indent = sys.maxsize + for line in lines: + content = len(line.lstrip()) + if content: + indent = min(indent, len(line) - content) + if indent == sys.maxsize: + indent = 0 + return indent + + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary + >>> olduint(6) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + if args: + fn = args[0] + args = args[1:] + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + + +def deprecate_with_doc(msg): + """ + Deprecates a function and includes the deprecation in its docstring. + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's + docstring and returns the new function object. + + See Also + -------- + deprecate : Decorate a function such that it issues a `DeprecationWarning` + + Parameters + ---------- + msg : str + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + obj : object + + """ + return _Deprecate(message=msg) + + +#-------------------------------------------- +# Determine if two arrays can share memory +#-------------------------------------------- + +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2); I.dtype + dtype('float64') + >>> low, high = np.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape-1)*stride + else: + a_high += (shape-1)*stride + a_high += bytes_a + return a_low, a_high + + +#----------------------------------------------------------------------------- +# Function for output and information on the variables used. +#----------------------------------------------------------------------------- + + +def who(vardict=None): + """ + Print the NumPy arrays in the given dictionary. + + If there is no dictionary passed in or `vardict` is None then returns + NumPy arrays in the globals() dictionary (all NumPy arrays in the + namespace). + + Parameters + ---------- + vardict : dict, optional + A dictionary possibly containing ndarrays. Default is globals(). + + Returns + ------- + out : None + Returns 'None'. + + Notes + ----- + Prints out the name, shape, bytes and type of all of the ndarrays + present in `vardict`. + + Examples + -------- + >>> a = np.arange(10) + >>> b = np.ones(20) + >>> np.who() + Name Shape Bytes Type + =========================================================== + a 10 80 int64 + b 20 160 float64 + Upper bound on total bytes = 240 + + >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', + ... 'idx':5} + >>> np.who(d) + Name Shape Bytes Type + =========================================================== + x 2 16 float64 + y 3 24 float64 + Upper bound on total bytes = 40 + + """ + if vardict is None: + frame = sys._getframe().f_back + vardict = frame.f_globals + sta = [] + cache = {} + for name in vardict.keys(): + if isinstance(vardict[name], ndarray): + var = vardict[name] + idv = id(var) + if idv in cache.keys(): + namestr = name + " (%s)" % cache[idv] + original = 0 + else: + cache[idv] = name + namestr = name + original = 1 + shapestr = " x ".join(map(str, var.shape)) + bytestr = str(var.nbytes) + sta.append([namestr, shapestr, bytestr, var.dtype.name, + original]) + + maxname = 0 + maxshape = 0 + maxbyte = 0 + totalbytes = 0 + for val in sta: + if maxname < len(val[0]): + maxname = len(val[0]) + if maxshape < len(val[1]): + maxshape = len(val[1]) + if maxbyte < len(val[2]): + maxbyte = len(val[2]) + if val[4]: + totalbytes += int(val[2]) + + if len(sta) > 0: + sp1 = max(10, maxname) + sp2 = max(10, maxshape) + sp3 = max(10, maxbyte) + prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ') + print(prval + "\n" + "="*(len(prval)+5) + "\n") + + for val in sta: + print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4), + val[1], ' '*(sp2-len(val[1])+5), + val[2], ' '*(sp3-len(val[2])+5), + val[3])) + print("\nUpper bound on total bytes = %d" % totalbytes) + return + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works similarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " "*(firstwidth+2) + argument + else: + newstr = newstr + addstr + argument + return newstr + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__:module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=None): + """Provide information about ndarray obj. + + Parameters + ---------- + obj : ndarray + Must be ndarray, not checked. + output + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + if output is None: + output = sys.stdout + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print("%s%s%s" % (tic, sys.byteorder, tic), file=output) + byteswap = False + elif endian == '>': + print("%sbig%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "big" + else: + print("%slittle%s" % (tic, tic), file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print("type: %s" % obj.dtype, file=output) + + +@set_module('numpy') +def info(object=None, maxwidth=76, output=None, toplevel='numpy'): + """ + Get help information for an array, function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is + an `ndarray` instance, information about the array is printed. + If `object` is a numpy object, its docstring is given. If it is + a string, available modules are searched for matching objects. + If None, information about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``None``, in which case ``sys.stdout`` will be used. + The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + See Also + -------- + source, lookfor + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + When the argument is an array, information about the array is printed. + + >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64) + >>> np.info(a) + class: ndarray + shape: (2, 3) + strides: (24, 8) + itemsize: 8 + aligned: True + contiguous: True + fortran: False + data pointer: 0x562b6e0d2860 # may vary + byteorder: little + byteswap: False + type: complex64 + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import pydoc + import inspect + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if output is None: + output = sys.stdout + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print("\n " + "*** Repeat reference found in %s *** " % namestr, + file=output + ) + else: + objlist.append(id(obj)) + print(" *** Found in %s ***" % namestr, file=output) + info(obj) + print("-"*maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print("Help for %s not found." % object, file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object) or inspect.ismethod(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name+arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + + public_methods = [meth for meth in methods if meth[0] != '_'] + if public_methods: + print("\n\nMethods:\n", file=output) + for meth in public_methods: + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(" %s -- %s" % (meth, methstr), file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +@set_module('numpy') +def source(object, output=sys.stdout): + """ + Print or write to a file the source code for a NumPy object. + + The source code is only returned for objects written in Python. Many + functions and classes are defined in C and will therefore not return + useful information. + + Parameters + ---------- + object : numpy object + Input object. This can be any object (function, class, module, + ...). + output : file object, optional + If `output` not supplied then source code is printed to screen + (sys.stdout). File object must be created with either write 'w' or + append 'a' modes. + + See Also + -------- + lookfor, info + + Examples + -------- + >>> np.source(np.interp) #doctest: +SKIP + In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py + def interp(x, xp, fp, left=None, right=None): + \"\"\".... (full docstring printed)\"\"\" + if isinstance(x, (float, int, number)): + return compiled_interp([x], xp, fp, left, right).item() + else: + return compiled_interp(x, xp, fp, left, right) + + The source code is only returned for objects written in Python. + + >>> np.source(np.array) #doctest: +SKIP + Not available for this object. + + """ + # Local import to speed up numpy's import time. + import inspect + try: + print("In file: %s\n" % inspect.getsourcefile(object), file=output) + print(inspect.getsource(object), file=output) + except Exception: + print("Not available for this object.", file=output) + + +# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...} +# where kind: "func", "class", "module", "object" +# and index: index in breadth-first namespace traversal +_lookfor_caches = {} + +# regexp whose match indicates that the string may contain a function +# signature +_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I) + + +@set_module('numpy') +def lookfor(what, module=None, import_modules=True, regenerate=False, + output=None): + """ + Do a keyword search on docstrings. + + A list of objects that matched the search is displayed, + sorted by relevance. All given keywords need to be found in the + docstring for it to be returned as a result, but the order does + not matter. + + Parameters + ---------- + what : str + String containing words to look for. + module : str or list, optional + Name of module(s) whose docstrings to go through. + import_modules : bool, optional + Whether to import sub-modules in packages. Default is True. + regenerate : bool, optional + Whether to re-generate the docstring cache. Default is False. + output : file-like, optional + File-like object to write the output to. If omitted, use a pager. + + See Also + -------- + source, info + + Notes + ----- + Relevance is determined only roughly, by checking if the keywords occur + in the function name, at the start of a docstring, etc. + + Examples + -------- + >>> np.lookfor('binary representation') # doctest: +SKIP + Search results for 'binary representation' + ------------------------------------------ + numpy.binary_repr + Return the binary representation of the input number as a string. + numpy.core.setup_common.long_double_representation + Given a binary dump as given by GNU od -b, look for long double + numpy.base_repr + Return a string representation of a number in the given base system. + ... + + """ + import pydoc + + # Cache + cache = _lookfor_generate_cache(module, import_modules, regenerate) + + # Search + # XXX: maybe using a real stemming search engine would be better? + found = [] + whats = str(what).lower().split() + if not whats: + return + + for name, (docstring, kind, index) in cache.items(): + if kind in ('module', 'object'): + # don't show modules or objects + continue + doc = docstring.lower() + if all(w in doc for w in whats): + found.append(name) + + # Relevance sort + # XXX: this is full Harrison-Stetson heuristics now, + # XXX: it probably could be improved + + kind_relevance = {'func': 1000, 'class': 1000, + 'module': -1000, 'object': -1000} + + def relevance(name, docstr, kind, index): + r = 0 + # do the keywords occur within the start of the docstring? + first_doc = "\n".join(docstr.lower().strip().split("\n")[:3]) + r += sum([200 for w in whats if w in first_doc]) + # do the keywords occur in the function name? + r += sum([30 for w in whats if w in name]) + # is the full name long? + r += -len(name) * 5 + # is the object of bad type? + r += kind_relevance.get(kind, -1000) + # is the object deep in namespace hierarchy? + r += -name.count('.') * 10 + r += max(-index / 100, -100) + return r + + def relevance_value(a): + return relevance(a, *cache[a]) + found.sort(key=relevance_value) + + # Pretty-print + s = "Search results for '%s'" % (' '.join(whats)) + help_text = [s, "-"*len(s)] + for name in found[::-1]: + doc, kind, ix = cache[name] + + doclines = [line.strip() for line in doc.strip().split("\n") + if line.strip()] + + # find a suitable short description + try: + first_doc = doclines[0].strip() + if _function_signature_re.search(first_doc): + first_doc = doclines[1].strip() + except IndexError: + first_doc = "" + help_text.append("%s\n %s" % (name, first_doc)) + + if not found: + help_text.append("Nothing found.") + + # Output + if output is not None: + output.write("\n".join(help_text)) + elif len(help_text) > 10: + pager = pydoc.getpager() + pager("\n".join(help_text)) + else: + print("\n".join(help_text)) + +def _lookfor_generate_cache(module, import_modules, regenerate): + """ + Generate docstring cache for given module. + + Parameters + ---------- + module : str, None, module + Module for which to generate docstring cache + import_modules : bool + Whether to import sub-modules in packages. + regenerate : bool + Re-generate the docstring cache + + Returns + ------- + cache : dict {obj_full_name: (docstring, kind, index), ...} + Docstring cache for the module, either cached one (regenerate=False) + or newly generated. + + """ + # Local import to speed up numpy's import time. + import inspect + + from io import StringIO + + if module is None: + module = "numpy" + + if isinstance(module, str): + try: + __import__(module) + except ImportError: + return {} + module = sys.modules[module] + elif isinstance(module, list) or isinstance(module, tuple): + cache = {} + for mod in module: + cache.update(_lookfor_generate_cache(mod, import_modules, + regenerate)) + return cache + + if id(module) in _lookfor_caches and not regenerate: + return _lookfor_caches[id(module)] + + # walk items and collect docstrings + cache = {} + _lookfor_caches[id(module)] = cache + seen = {} + index = 0 + stack = [(module.__name__, module)] + while stack: + name, item = stack.pop(0) + if id(item) in seen: + continue + seen[id(item)] = True + + index += 1 + kind = "object" + + if inspect.ismodule(item): + kind = "module" + try: + _all = item.__all__ + except AttributeError: + _all = None + + # import sub-packages + if import_modules and hasattr(item, '__path__'): + for pth in item.__path__: + for mod_path in os.listdir(pth): + this_py = os.path.join(pth, mod_path) + init_py = os.path.join(pth, mod_path, '__init__.py') + if (os.path.isfile(this_py) and + mod_path.endswith('.py')): + to_import = mod_path[:-3] + elif os.path.isfile(init_py): + to_import = mod_path + else: + continue + if to_import == '__init__': + continue + + try: + old_stdout = sys.stdout + old_stderr = sys.stderr + try: + sys.stdout = StringIO() + sys.stderr = StringIO() + __import__("%s.%s" % (name, to_import)) + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr + except KeyboardInterrupt: + # Assume keyboard interrupt came from a user + raise + except BaseException: + # Ignore also SystemExit and pytests.importorskip + # `Skipped` (these are BaseExceptions; gh-22345) + continue + + for n, v in _getmembers(item): + try: + item_name = getattr(v, '__name__', "%s.%s" % (name, n)) + mod_name = getattr(v, '__module__', None) + except NameError: + # ref. SWIG's global cvars + # NameError: Unknown C global variable + item_name = "%s.%s" % (name, n) + mod_name = None + if '.' not in item_name and mod_name: + item_name = "%s.%s" % (mod_name, item_name) + + if not item_name.startswith(name + '.'): + # don't crawl "foreign" objects + if isinstance(v, ufunc): + # ... unless they are ufuncs + pass + else: + continue + elif not (inspect.ismodule(v) or _all is None or n in _all): + continue + stack.append(("%s.%s" % (name, n), v)) + elif inspect.isclass(item): + kind = "class" + for n, v in _getmembers(item): + stack.append(("%s.%s" % (name, n), v)) + elif hasattr(item, "__call__"): + kind = "func" + + try: + doc = inspect.getdoc(item) + except NameError: + # ref SWIG's NameError: Unknown C global variable + doc = None + if doc is not None: + cache[name] = (doc, kind, index) + + return cache + +def _getmembers(item): + import inspect + try: + members = inspect.getmembers(item) + except Exception: + members = [(x, getattr(item, x)) for x in dir(item) + if hasattr(item, x)] + return members + + +def safe_eval(source): + """ + Protected string evaluation. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + .. warning:: + + This function is identical to :py:meth:`ast.literal_eval` and + has the same security implications. It may not always be safe + to evaluate large input strings. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + ValueError: malformed node or string: <_ast.Call object at 0x...> + + """ + # Local import to speed up numpy's import time. + import ast + return ast.literal_eval(source) + + +def _median_nancheck(data, result, axis): + """ + Utility function to check median result from data for NaN values at the end + and return NaN in that case. Input result can also be a MaskedArray. + + Parameters + ---------- + data : array + Sorted input data to median function + result : Array or MaskedArray + Result of median function. + axis : int + Axis along which the median was computed. + + Returns + ------- + result : scalar or ndarray + Median or NaN in axes which contained NaN in the input. If the input + was an array, NaN will be inserted in-place. If a scalar, either the + input itself or a scalar NaN. + """ + if data.size == 0: + return result + potential_nans = data.take(-1, axis=axis) + n = np.isnan(potential_nans) + # masked NaN values are ok, although for masked the copyto may fail for + # unmasked ones (this was always broken) when the result is a scalar. + if np.ma.isMaskedArray(n): + n = n.filled(False) + + if not n.any(): + return result + + # Without given output, it is possible that the current result is a + # numpy scalar, which is not writeable. If so, just return nan. + if isinstance(result, np.generic): + return potential_nans + + # Otherwise copy NaNs (if there are any) + np.copyto(result, potential_nans, where=n) + return result + +def _opt_info(): + """ + Returns a string contains the supported CPU features by the current build. + + The string format can be explained as follows: + - dispatched features that are supported by the running machine + end with `*`. + - dispatched features that are "not" supported by the running machine + end with `?`. + - remained features are representing the baseline. + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + + if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: + return '' + + enabled_features = ' '.join(__cpu_baseline__) + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + enabled_features += f" {feature}*" + else: + enabled_features += f" {feature}?" + + return enabled_features + + +def drop_metadata(dtype, /): + """ + Returns the dtype unchanged if it contained no metadata or a copy of the + dtype if it (or any of its structure dtypes) contained metadata. + + This utility is used by `np.save` and `np.savez` to drop metadata before + saving. + + .. note:: + + Due to its limitation this function may move to a more appropriate + home or change in the future and is considered semi-public API only. + + .. warning:: + + This function does not preserve more strange things like record dtypes + and user dtypes may simply return the wrong thing. If you need to be + sure about the latter, check the result with: + ``np.can_cast(new_dtype, dtype, casting="no")``. + + """ + if dtype.fields is not None: + found_metadata = dtype.metadata is not None + + names = [] + formats = [] + offsets = [] + titles = [] + for name, field in dtype.fields.items(): + field_dt = drop_metadata(field[0]) + if field_dt is not field[0]: + found_metadata = True + + names.append(name) + formats.append(field_dt) + offsets.append(field[1]) + titles.append(None if len(field) < 3 else field[2]) + + if not found_metadata: + return dtype + + structure = dict( + names=names, formats=formats, offsets=offsets, titles=titles, + itemsize=dtype.itemsize) + + # NOTE: Could pass (dtype.type, structure) to preserve record dtypes... + return np.dtype(structure, align=dtype.isalignedstruct) + elif dtype.subdtype is not None: + # subarray dtype + subdtype, shape = dtype.subdtype + new_subdtype = drop_metadata(subdtype) + if dtype.metadata is None and new_subdtype is subdtype: + return dtype + + return np.dtype((new_subdtype, shape)) + else: + # Normal unstructured dtype + if dtype.metadata is None: + return dtype + # Note that `dt.str` doesn't round-trip e.g. for user-dtypes. + return np.dtype(dtype.str) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/lib/utils.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/utils.pyi new file mode 100644 index 00000000..52ca9277 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/lib/utils.pyi @@ -0,0 +1,91 @@ +from ast import AST +from collections.abc import Callable, Mapping, Sequence +from typing import ( + Any, + overload, + TypeVar, + Protocol, +) + +from numpy import ndarray, generic + +from numpy.core.numerictypes import ( + issubclass_ as issubclass_, + issubdtype as issubdtype, + issubsctype as issubsctype, +) + +_T_contra = TypeVar("_T_contra", contravariant=True) +_FuncType = TypeVar("_FuncType", bound=Callable[..., Any]) + +# A file-like object opened in `w` mode +class _SupportsWrite(Protocol[_T_contra]): + def write(self, s: _T_contra, /) -> Any: ... + +__all__: list[str] + +class _Deprecate: + old_name: None | str + new_name: None | str + message: None | str + def __init__( + self, + old_name: None | str = ..., + new_name: None | str = ..., + message: None | str = ..., + ) -> None: ... + # NOTE: `__call__` can in principle take arbitrary `*args` and `**kwargs`, + # even though they aren't used for anything + def __call__(self, func: _FuncType) -> _FuncType: ... + +def get_include() -> str: ... + +@overload +def deprecate( + *, + old_name: None | str = ..., + new_name: None | str = ..., + message: None | str = ..., +) -> _Deprecate: ... +@overload +def deprecate( + func: _FuncType, + /, + old_name: None | str = ..., + new_name: None | str = ..., + message: None | str = ..., +) -> _FuncType: ... + +def deprecate_with_doc(msg: None | str) -> _Deprecate: ... + +# NOTE: In practice `byte_bounds` can (potentially) take any object +# implementing the `__array_interface__` protocol. The caveat is +# that certain keys, marked as optional in the spec, must be present for +# `byte_bounds`. This concerns `"strides"` and `"data"`. +def byte_bounds(a: generic | ndarray[Any, Any]) -> tuple[int, int]: ... + +def who(vardict: None | Mapping[str, ndarray[Any, Any]] = ...) -> None: ... + +def info( + object: object = ..., + maxwidth: int = ..., + output: None | _SupportsWrite[str] = ..., + toplevel: str = ..., +) -> None: ... + +def source( + object: object, + output: None | _SupportsWrite[str] = ..., +) -> None: ... + +def lookfor( + what: str, + module: None | str | Sequence[str] = ..., + import_modules: bool = ..., + regenerate: bool = ..., + output: None | _SupportsWrite[str] =..., +) -> None: ... + +def safe_eval(source: str | AST) -> Any: ... + +def show_runtime() -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/__init__.py new file mode 100644 index 00000000..93943de3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/__init__.py @@ -0,0 +1,80 @@ +""" +``numpy.linalg`` +================ + +The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient +low level implementations of standard linear algebra algorithms. Those +libraries may be provided by NumPy itself using C versions of a subset of their +reference implementations but, when possible, highly optimized libraries that +take advantage of specialized processor functionality are preferred. Examples +of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries +are multithreaded and processor dependent, environmental variables and external +packages such as threadpoolctl may be needed to control the number of threads +or specify the processor architecture. + +- OpenBLAS: https://www.openblas.net/ +- threadpoolctl: https://github.com/joblib/threadpoolctl + +Please note that the most-used linear algebra functions in NumPy are present in +the main ``numpy`` namespace rather than in ``numpy.linalg``. There are: +``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``, +``einsum_path`` and ``kron``. + +Functions present in numpy.linalg are listed below. + + +Matrix and vector products +-------------------------- + + multi_dot + matrix_power + +Decompositions +-------------- + + cholesky + qr + svd + +Matrix eigenvalues +------------------ + + eig + eigh + eigvals + eigvalsh + +Norms and other numbers +----------------------- + + norm + cond + det + matrix_rank + slogdet + +Solving equations and inverting matrices +---------------------------------------- + + solve + tensorsolve + lstsq + inv + pinv + tensorinv + +Exceptions +---------- + + LinAlgError + +""" +# To get sub-modules +from . import linalg +from .linalg import * + +__all__ = linalg.__all__.copy() + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/__init__.pyi new file mode 100644 index 00000000..d9acd558 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/__init__.pyi @@ -0,0 +1,30 @@ +from numpy.linalg.linalg import ( + matrix_power as matrix_power, + solve as solve, + tensorsolve as tensorsolve, + tensorinv as tensorinv, + inv as inv, + cholesky as cholesky, + eigvals as eigvals, + eigvalsh as eigvalsh, + pinv as pinv, + slogdet as slogdet, + det as det, + svd as svd, + eig as eig, + eigh as eigh, + lstsq as lstsq, + norm as norm, + qr as qr, + cond as cond, + matrix_rank as matrix_rank, + multi_dot as multi_dot, +) + +from numpy._pytesttester import PytestTester + +__all__: list[str] +__path__: list[str] +test: PytestTester + +class LinAlgError(Exception): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.cpython-39-darwin.so new file mode 100755 index 00000000..a79201d4 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/lapack_lite.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/lapack_lite.cpython-39-darwin.so new file mode 100755 index 00000000..cd96778d Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/lapack_lite.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/linalg.py b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/linalg.py new file mode 100644 index 00000000..b838b939 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/linalg.py @@ -0,0 +1,2836 @@ +"""Lite version of scipy.linalg. + +Notes +----- +This module is a lite version of the linalg.py module in SciPy which +contains high-level Python interface to the LAPACK library. The lite +version only accesses the following LAPACK functions: dgesv, zgesv, +dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, +zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. +""" + +__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', + 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', + 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', + 'LinAlgError', 'multi_dot'] + +import functools +import operator +import warnings +from typing import NamedTuple, Any + +from .._utils import set_module +from numpy.core import ( + array, asarray, zeros, empty, empty_like, intc, single, double, + csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot, + add, multiply, sqrt, sum, isfinite, + finfo, errstate, geterrobj, moveaxis, amin, amax, prod, abs, + atleast_2d, intp, asanyarray, object_, matmul, + swapaxes, divide, count_nonzero, isnan, sign, argsort, sort, + reciprocal +) +from numpy.core.multiarray import normalize_axis_index +from numpy.core import overrides +from numpy.lib.twodim_base import triu, eye +from numpy.linalg import _umath_linalg + +from numpy._typing import NDArray + +class EigResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class EighResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class QRResult(NamedTuple): + Q: NDArray[Any] + R: NDArray[Any] + +class SlogdetResult(NamedTuple): + sign: NDArray[Any] + logabsdet: NDArray[Any] + +class SVDResult(NamedTuple): + U: NDArray[Any] + S: NDArray[Any] + Vh: NDArray[Any] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.linalg') + + +fortran_int = intc + + +@set_module('numpy.linalg') +class LinAlgError(ValueError): + """ + Generic Python-exception-derived object raised by linalg functions. + + General purpose exception class, derived from Python's ValueError + class, programmatically raised in linalg functions when a Linear + Algebra-related condition would prevent further correct execution of the + function. + + Parameters + ---------- + None + + Examples + -------- + >>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + + +def _determine_error_states(): + errobj = geterrobj() + bufsize = errobj[0] + + with errstate(invalid='call', over='ignore', + divide='ignore', under='ignore'): + invalid_call_errmask = geterrobj()[1] + + return [bufsize, invalid_call_errmask, None] + +# Dealing with errors in _umath_linalg +_linalg_error_extobj = _determine_error_states() +del _determine_error_states + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def _raise_linalgerror_lstsq(err, flag): + raise LinAlgError("SVD did not converge in Linear Least Squares") + +def _raise_linalgerror_qr(err, flag): + raise LinAlgError("Incorrect argument found while performing " + "QR factorization") + +def get_linalg_error_extobj(callback): + extobj = list(_linalg_error_extobj) # make a copy + extobj[2] = callback + return extobj + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_prepare__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + +_real_types_map = {single : single, + double : double, + csingle : single, + cdouble : double} + +_complex_types_map = {single : csingle, + double : cdouble, + csingle : csingle, + cdouble : cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + type_ = a.dtype.type + if issubclass(type_, inexact): + if isComplexType(type_): + is_complex = True + rt = _realType(type_, default=None) + if rt is double: + result_type = double + elif rt is None: + # unsupported inexact scalar + raise TypeError("array type %s is unsupported in linalg" % + (a.dtype.name,)) + else: + result_type = double + if is_complex: + result_type = _complex_types_map[result_type] + return cdouble, result_type + else: + return double, result_type + + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + + +def _assert_2d(*arrays): + for a in arrays: + if a.ndim != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % a.ndim) + +def _assert_stacked_2d(*arrays): + for a in arrays: + if a.ndim < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + +def _assert_stacked_square(*arrays): + for a in arrays: + m, n = a.shape[-2:] + if m != n: + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assert_finite(*arrays): + for a in arrays: + if not isfinite(a).all(): + raise LinAlgError("Array must not contain infs or NaNs") + +def _is_empty_2d(arr): + # check size first for efficiency + return arr.size == 0 and prod(arr.shape[-2:]) == 0 + + +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) + +# Linear equations + +def _tensorsolve_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensorsolve_dispatcher) +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=x.ndim)``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorinv, numpy.einsum + + Examples + -------- + >>> a = np.eye(2*3*4) + >>> a.shape = (2*3, 4, 2, 3, 4) + >>> b = np.random.randn(2*3, 4) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(0, an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an-b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + if a.size != prod ** 2: + raise LinAlgError( + "Input arrays must satisfy the requirement \ + prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])" + ) + + a = a.reshape(prod, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + + +def _solve_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_solve_dispatcher) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(..., M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is identical to `b`. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + See Also + -------- + scipy.linalg.solve : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine ``_gesv``. + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``: + + >>> a = np.array([[1, 2], [3, 5]]) + >>> b = np.array([1, 2]) + >>> x = np.linalg.solve(a, b) + >>> x + array([-1., 1.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == a.ndim - 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + r = gufunc(a, b, signature=signature, extobj=extobj) + + return wrap(r.astype(result_t, copy=False)) + + +def _tensorinv_dispatcher(a, ind=None): + return (a,) + + +@array_function_dispatch(_tensorinv_dispatcher) +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorsolve + + Examples + -------- + >>> a = np.eye(4*6) + >>> a.shape = (4, 6, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> b = np.random.randn(4, 6) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6) + >>> a.shape = (24, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> b = np.random.randn(24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def _unary_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_dispatcher) +def inv(a): + """ + Compute the (multiplicative) inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + (Multiplicative) inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + See Also + -------- + scipy.linalg.inv : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + Examples + -------- + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(np.dot(a, ainv), np.eye(2)) + True + >>> np.allclose(np.dot(ainv, a), np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5 , -0.5 ]], + [[-1.25, 0.75], + [ 0.75, -0.25]]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_singular) + ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) + return wrap(ainv.astype(result_t, copy=False)) + + +def _matrix_power_dispatcher(a, n): + return (a,) + + +@array_function_dispatch(_matrix_power_dispatcher) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + .. note:: Stacks of object matrices are not currently supported. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered". + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + + try: + n = operator.index(n) + except TypeError as e: + raise TypeError("exponent must be an integer") from e + + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return fmatmul(a, a) + + elif n == 3: + return fmatmul(fmatmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else fmatmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else fmatmul(result, z) + + return result + + +# Cholesky decomposition + + +@array_function_dispatch(_unary_dispatcher) +def cholesky(a): + """ + Cholesky decomposition. + + Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, + where `L` is lower-triangular and .H is the conjugate transpose operator + (which is the ordinary transpose if `a` is real-valued). `a` must be + Hermitian (symmetric if real-valued) and positive-definite. No + checking is performed to verify whether `a` is Hermitian or not. + In addition, only the lower-triangular and diagonal elements of `a` + are used. Only `L` is actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + + Returns + ------- + L : (..., M, M) array_like + Lower-triangular Cholesky factor of `a`. Returns a matrix object if + `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + See Also + -------- + scipy.linalg.cholesky : Similar function in SciPy. + scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian + positive-definite matrix. + scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in + `scipy.linalg.cho_solve`. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L.H \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[1.+0.j, 0.-2.j], + [0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> np.linalg.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + + """ + extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) + gufunc = _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = gufunc(a, signature=signature, extobj=extobj) + return wrap(r.astype(result_t, copy=False)) + + +# QR decomposition + +def _qr_dispatcher(a, mode=None): + return (a,) + + +@array_function_dispatch(_qr_dispatcher) +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (..., M, N) + An array-like object with the dimensionality of at least 2. + mode : {'reduced', 'complete', 'r', 'raw'}, optional + If K = min(M, N), then + + * 'reduced' : returns Q, R with dimensions (..., M, K), (..., K, N) (default) + * 'complete' : returns Q, R with dimensions (..., M, M), (..., M, N) + * 'r' : returns R only with dimensions (..., K, N) + * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,) + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced', and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes `Q` and `R`. + + Q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. In case the number of dimensions in the input array is + greater than 2 then a stack of the matrices with above properties + is returned. + R : ndarray of float or complex, optional + The upper-triangular matrix or a stack of upper-triangular + matrices if the number of dimensions in the input array is greater + than 2. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + See Also + -------- + scipy.linalg.qr : Similar function in SciPy. + scipy.linalg.rq : Compute RQ decomposition of a matrix. + + Notes + ----- + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, + ``dorgqr``, and ``zungqr``. + + For more information on the qr factorization, see for example: + https://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> a = np.random.randn(9, 6) + >>> Q, R = np.linalg.qr(a) + >>> np.allclose(a, np.dot(Q, R)) # a does equal QR + True + >>> R2 = np.linalg.qr(a, mode='r') + >>> np.allclose(R, R2) # mode='r' returns the same R as mode='full' + True + >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input + >>> Q, R = np.linalg.qr(a) + >>> Q.shape + (3, 2, 2) + >>> R.shape + (3, 2, 2) + >>> np.allclose(a, np.matmul(Q, R)) + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = QR such that Q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(R) * (Q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 2, 2, 3]) + >>> Q, R = np.linalg.qr(A) + >>> p = np.dot(Q.T, b) + >>> np.dot(np.linalg.inv(R), p) + array([ 1., 1.]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + # 2013-04-01, 1.8 + msg = "".join(( + "The 'full' option is deprecated in favor of 'reduced'.\n", + "For backward compatibility let mode default.")) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'reduced' + elif mode in ('e', 'economic'): + # 2013-04-01, 1.8 + msg = "The 'economic' option is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'economic' + else: + raise ValueError(f"Unrecognized mode '{mode}'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + m, n = a.shape[-2:] + t, result_t = _commonType(a) + a = a.astype(t, copy=True) + a = _to_native_byte_order(a) + mn = min(m, n) + + if m <= n: + gufunc = _umath_linalg.qr_r_raw_m + else: + gufunc = _umath_linalg.qr_r_raw_n + + signature = 'D->D' if isComplexType(t) else 'd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_qr) + tau = gufunc(a, signature=signature, extobj=extobj) + + # handle modes that don't return q + if mode == 'r': + r = triu(a[..., :mn, :]) + r = r.astype(result_t, copy=False) + return wrap(r) + + if mode == 'raw': + q = transpose(a) + q = q.astype(result_t, copy=False) + tau = tau.astype(result_t, copy=False) + return wrap(q), tau + + if mode == 'economic': + a = a.astype(result_t, copy=False) + return wrap(a) + + # mc is the number of columns in the resulting q + # matrix. If the mode is complete then it is + # same as number of rows, and if the mode is reduced, + # then it is the minimum of number of rows and columns. + if mode == 'complete' and m > n: + mc = m + gufunc = _umath_linalg.qr_complete + else: + mc = mn + gufunc = _umath_linalg.qr_reduced + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + extobj = get_linalg_error_extobj(_raise_linalgerror_qr) + q = gufunc(a, tau, signature=signature, extobj=extobj) + r = triu(a[..., :mc, :]) + + q = q.astype(result_t, copy=False) + r = r.astype(result_t, copy=False) + + return QRResult(wrap(q), wrap(r)) + +# Eigenvalues + + +@array_function_dispatch(_unary_dispatcher) +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigh : eigenvalues and eigenvectors of real symmetric or complex + Hermitian (conjugate symmetric) arrays. + scipy.linalg.eigvals : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) # random + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->D' if isComplexType(t) else 'd->D' + w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t, copy=False) + + +def _eigvalsh_dispatcher(a, UPLO=None): + return (a,) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a complex Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + scipy.linalg.eigvalsh : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288, 5.82842712]) # may vary + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() + >>> # with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa = LA.eigvalsh(a) + >>> wb = LA.eigvals(b) + >>> wa; wb + array([1., 6.]) + array([6.+0.j, 1.+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + w = gufunc(a, signature=signature, extobj=extobj) + return w.astype(_realType(result_t), copy=False) + +def _convertarray(a): + t, result_t = _commonType(a) + a = a.astype(t).T.copy() + return a, t, result_t + + +# Eigenvectors + + +@array_function_dispatch(_unary_dispatcher) +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary + part) or occur in conjugate pairs + + eigenvectors : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``eigenvectors[:,i]`` is the eigenvector corresponding to the + eigenvalue ``eigenvalues[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of a non-symmetric array. + eigh : eigenvalues and eigenvectors of a real symmetric or complex + Hermitian (conjugate symmetric) array. + eigvalsh : eigenvalues of a real symmetric or complex Hermitian + (conjugate symmetric) array. + scipy.linalg.eig : Similar function in SciPy that also solves the + generalized eigenvalue problem. + scipy.linalg.schur : Best choice for unitary and other non-Hermitian + normal matrices. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector `v` such + that ``a @ v = w * v``. Thus, the arrays `a`, `eigenvalues`, and + `eigenvectors` satisfy the equations ``a @ eigenvectors[:,i] = + eigenvalues[i] * eigenvalues[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. + + The array `eigenvectors` may not be of maximum rank, that is, some of the + columns may be linearly dependent, although round-off error may obscure + that fact. If the eigenvalues are all different, then theoretically the + eigenvectors are linearly independent and `a` can be diagonalized by a + similarity transformation using `eigenvectors`, i.e, ``inv(eigenvectors) @ + a @ eigenvectors`` is diagonal. + + For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur` + is preferred because the matrix `eigenvectors` is guaranteed to be + unitary, which is not the case when using `eig`. The Schur factorization + produces an upper triangular matrix rather than a diagonal matrix, but for + normal matrices only the diagonal of the upper triangular matrix is + needed, the rest is roundoff error. + + Finally, it is emphasized that `eigenvectors` consists of the *right* (as + in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``y.T @ a + = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, + and, in general, the left and right eigenvectors of a matrix are not + necessarily the (perhaps conjugate) transposes of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> from numpy import linalg as LA + + (Almost) trivial example with real eigenvalues and eigenvectors. + + >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) + >>> eigenvalues + array([1., 2., 3.]) + >>> eigenvectors + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Real matrix possessing complex eigenvalues and eigenvectors; note that the + eigenvalues are complex conjugates of each other. + + >>> eigenvalues, eigenvectors = LA.eig(np.array([[1, -1], [1, 1]])) + >>> eigenvalues + array([1.+1.j, 1.-1.j]) + >>> eigenvectors + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + Complex-valued matrix with real eigenvalues (but complex-valued eigenvectors); + note that ``a.conj().T == a``, i.e., `a` is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([2.+0.j, 0.+0.j]) + >>> eigenvectors + array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary + [ 0.70710678+0.j , -0. +0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. eigenvalues are 1 +/- 1e-9 + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([1., 1.]) + >>> eigenvectors + array([[1., 0.], + [0., 1.]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + signature = 'D->DD' if isComplexType(t) else 'd->DD' + w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t, copy=False) + return EigResult(w.astype(result_t, copy=False), wrap(vt)) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a complex Hermitian + (conjugate symmetric) or a real symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + a : (..., M, M) array + Hermitian or real symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + eigenvectors : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``eigenvectors[:, i]`` is the normalized eigenvector + corresponding to the eigenvalue ``eigenvalues[i]``. Will return a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + scipy.linalg.eigh : Similar function in SciPy (but also solves the + generalized eigenvalue problem). + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, + ``_heevd``. + + The eigenvalues of real symmetric or complex Hermitian matrices are always + real. [1]_ The array `eigenvalues` of (column) eigenvectors is unitary and + `a`, `eigenvalues`, and `eigenvectors` satisfy the equations ``dot(a, + eigenvectors[:, i]) = eigenvalues[i] * eigenvectors[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(a) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> np.dot(a, eigenvectors[:, 0]) - eigenvalues[0] * eigenvectors[:, 0] # verify 1st eigenval/vec pair + array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) + >>> np.dot(a, eigenvectors[:, 1]) - eigenvalues[1] * eigenvectors[:, 1] # verify 2nd eigenval/vec pair + array([0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(A) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa, va = LA.eigh(a) + >>> wb, vb = LA.eig(b) + >>> wa; wb + array([1., 6.]) + array([6.+0.j, 1.+0.j]) + >>> va; vb + array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary + [ 0. +0.89442719j, 0. -0.4472136j ]]) + array([[ 0.89442719+0.j , -0. +0.4472136j], + [-0. +0.4472136j, 0.89442719+0.j ]]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj( + _raise_linalgerror_eigenvalues_nonconvergence) + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + w, vt = gufunc(a, signature=signature, extobj=extobj) + w = w.astype(_realType(result_t), copy=False) + vt = vt.astype(result_t, copy=False) + return EighResult(w, wrap(vt)) + + +# Singular value decomposition + +def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_svd_dispatcher) +def svd(a, full_matrices=True, compute_uv=True, hermitian=False): + """ + Singular Value Decomposition. + + When `a` is a 2D array, and ``full_matrices=False``, then it is + factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where + `u` and the Hermitian transpose of `vh` are 2D arrays with + orthonormal columns and `s` is a 1D array of `a`'s singular + values. When `a` is higher-dimensional, SVD is applied in + stacked mode as explained below. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex array with ``a.ndim >= 2``. + full_matrices : bool, optional + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether or not to compute `u` and `vh` in addition to `s`. True + by default. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.17.0 + + Returns + ------- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: + + U : { (..., M, M), (..., M, K) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + S : (..., K) array + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + Vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + See Also + -------- + scipy.linalg.svd : Similar function in SciPy. + scipy.linalg.svdvals : Compute singular values of a matrix. + + Notes + ----- + + .. versionchanged:: 1.8.0 + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) + + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. + + Examples + -------- + >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) + >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) + + Reconstruction based on full SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on reduced SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U * S, Vh)) + True + >>> smat = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on full SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U[..., :3] * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U[..., :3], S[..., None] * Vh)) + True + + Reconstruction based on reduced SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U, S[..., None] * Vh)) + True + + """ + import numpy as _nx + a, wrap = _makearray(a) + + if hermitian: + # note: lapack svd returns eigenvalues with s ** 2 sorted descending, + # but eig returns s sorted ascending, so we re-order the eigenvalues + # and related arrays to have the correct order + if compute_uv: + s, u = eigh(a) + sgn = sign(s) + s = abs(s) + sidx = argsort(s)[..., ::-1] + sgn = _nx.take_along_axis(sgn, sidx, axis=-1) + s = _nx.take_along_axis(s, sidx, axis=-1) + u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) + # singular values are unsigned, move the sign into v + vt = transpose(u * sgn[..., None, :]).conjugate() + return SVDResult(wrap(u), s, wrap(vt)) + else: + s = eigvalsh(a) + s = abs(s) + return sort(s)[..., ::-1] + + _assert_stacked_2d(a) + t, result_t = _commonType(a) + + extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) + + m, n = a.shape[-2:] + if compute_uv: + if full_matrices: + if m < n: + gufunc = _umath_linalg.svd_m_f + else: + gufunc = _umath_linalg.svd_n_f + else: + if m < n: + gufunc = _umath_linalg.svd_m_s + else: + gufunc = _umath_linalg.svd_n_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + u, s, vh = gufunc(a, signature=signature, extobj=extobj) + u = u.astype(result_t, copy=False) + s = s.astype(_realType(result_t), copy=False) + vh = vh.astype(result_t, copy=False) + return SVDResult(wrap(u), s, wrap(vh)) + else: + if m < n: + gufunc = _umath_linalg.svd_m + else: + gufunc = _umath_linalg.svd_n + + signature = 'D->d' if isComplexType(t) else 'd->d' + s = gufunc(a, signature=signature, extobj=extobj) + s = s.astype(_realType(result_t), copy=False) + return s + + +def _cond_dispatcher(x, p=None): + return (x,) + + +@array_function_dispatch(_cond_dispatcher) +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (..., M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm used in the condition number computation: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the `numpy.inf` object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 # may vary + >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False)) + 0.70710678118654746 # may vary + + """ + x = asarray(x) # in case we have a matrix + if _is_empty_2d(x): + raise LinAlgError("cond is not defined on empty arrays") + if p is None or p == 2 or p == -2: + s = svd(x, compute_uv=False) + with errstate(all='ignore'): + if p == -2: + r = s[..., -1] / s[..., 0] + else: + r = s[..., 0] / s[..., -1] + else: + # Call inv(x) ignoring errors. The result array will + # contain nans in the entries where inversion failed. + _assert_stacked_2d(x) + _assert_stacked_square(x) + t, result_t = _commonType(x) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(all='ignore'): + invx = _umath_linalg.inv(x, signature=signature) + r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) + r = r.astype(result_t, copy=False) + + # Convert nans to infs unless the original array had nan entries + r = asarray(r) + nan_mask = isnan(r) + if nan_mask.any(): + nan_mask &= ~isnan(x).any(axis=(-2, -1)) + if r.ndim > 0: + r[nan_mask] = Inf + elif nan_mask: + r[()] = Inf + + # Convention is to return scalars instead of 0d arrays + if r.ndim == 0: + r = r[()] + + return r + + +def _matrix_rank_dispatcher(A, tol=None, hermitian=None): + return (A,) + + +@array_function_dispatch(_matrix_rank_dispatcher) +def matrix_rank(A, tol=None, hermitian=False): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of singular values of the array that are + greater than `tol`. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + A : {(M,), (..., M, N)} array_like + Input vector or stack of matrices. + tol : (...) array_like, float, optional + Threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M, N) * eps``. + + .. versionchanged:: 1.14 + Broadcasted against the stack of matrices + hermitian : bool, optional + If True, `A` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.14 + + Returns + ------- + rank : (...) array_like + Rank of A. + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `A`. By default, we identify singular values less + than ``S.max() * max(M, N) * eps`` as indicating rank deficiency (with + the symbols defined above). This is the algorithm MATLAB uses [1]. It also + appears in *Numerical recipes* in the discussion of SVD solutions for linear + least squares [2]. + + This default threshold is designed to detect rank deficiency accounting for + the numerical errors of the SVD computation. Imagine that there is a column + in `A` that is an exact (in floating point) linear combination of other + columns in `A`. Computing the SVD on `A` will not produce a singular value + exactly equal to 0 in general: any difference of the smallest SVD value from + 0 will be caused by numerical imprecision in the calculation of the SVD. + Our threshold for small SVD values takes this numerical imprecision into + account, and the default threshold will detect such numerical rank + deficiency. The threshold may declare a matrix `A` rank deficient even if + the linear combination of some columns of `A` is not exactly equal to + another column of `A` but only numerically very close to another column of + `A`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about the + sources of error in `A` that would make you consider other tolerance values + to detect *effective* rank deficiency. The most useful measure of the + tolerance depends on the operations you intend to use on your matrix. For + example, if your data come from uncertain measurements with uncertainties + greater than floating point epsilon, choosing a tolerance near that + uncertainty may be preferable. The tolerance may be absolute if the + uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documentation, "Rank" + https://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + A = asarray(A) + if A.ndim < 2: + return int(not all(A==0)) + S = svd(A, compute_uv=False, hermitian=hermitian) + if tol is None: + tol = S.max(axis=-1, keepdims=True) * max(A.shape[-2:]) * finfo(S.dtype).eps + else: + tol = asarray(tol)[..., newaxis] + return count_nonzero(S > tol, axis=-1) + + +# Generalized inverse + +def _pinv_dispatcher(a, rcond=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_pinv_dispatcher) +def pinv(a, rcond=1e-15, hermitian=False): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + .. versionchanged:: 1.14 + Can now operate on stacks of matrices + + Parameters + ---------- + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float + Cutoff for small singular values. + Singular values less than or equal to + ``rcond * largest_singular_value`` are set to zero. + Broadcasts against the stack of matrices. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + .. versionadded:: 1.17.0 + + Returns + ------- + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + See Also + -------- + scipy.linalg.pinv : Similar function in SciPy. + scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a + Hermitian matrix. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> a = np.random.randn(9, 6) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + rcond = asarray(rcond) + if _is_empty_2d(a): + m, n = a.shape[-2:] + res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) + return wrap(res) + a = a.conjugate() + u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) + return wrap(res) + + +# Determinant + + +@array_function_dispatch(_unary_dispatcher) +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, then a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + A namedtuple with the following attributes: + + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logabsdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logabsdet` will be + -Inf. In all cases, the determinant is equal to ``sign * np.exp(logabsdet)``. + + See Also + -------- + det + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + .. versionadded:: 1.6.0 + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logabsdet) = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (-1, 0.69314718055994529) # may vary + >>> sign * np.exp(logabsdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logabsdet = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logabsdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) + return SlogdetResult(sign, logdet) + + +@array_function_dispatch(_unary_dispatcher) +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to represent the determinant, more suitable + for large matrices where underflow/overflow may occur. + scipy.linalg.det : Similar function in SciPy. + + Notes + ----- + + .. versionadded:: 1.8.0 + + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 # may vary + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assert_stacked_2d(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = _umath_linalg.det(a, signature=signature) + r = r.astype(result_t, copy=False) + return r + + +# Linear Least Squares + +def _lstsq_dispatcher(a, b, rcond=None): + return (a, b) + + +@array_function_dispatch(_lstsq_dispatcher) +def lstsq(a, b, rcond="warn"): + r""" + Return the least-squares solution to a linear matrix equation. + + Computes the vector `x` that approximately solves the equation + ``a @ x = b``. The equation may be under-, well-, or over-determined + (i.e., the number of linearly independent rows of `a` can be less than, + equal to, or greater than its number of linearly independent columns). + If `a` is square and of full rank, then `x` (but for round-off error) + is the "exact" solution of the equation. Else, `x` minimizes the + Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing + solutions, the one with the smallest 2-norm :math:`||x||` is returned. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + For the purposes of rank determination, singular values are treated + as zero if they are smaller than `rcond` times the largest singular + value of `a`. + + .. versionchanged:: 1.14.0 + If not set, a FutureWarning is given. The previous default + of ``-1`` will use the machine precision as `rcond` parameter, + the new default will use the machine precision times `max(M, N)`. + To silence the warning and use the new default, use ``rcond=None``, + to keep using the old behavior, use ``rcond=-1``. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(1,), (K,), (0,)} ndarray + Sums of squared residuals: Squared Euclidean 2-norm for each column in + ``b - a @ x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + See Also + -------- + scipy.linalg.lstsq : Similar function in SciPy. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0] + >>> m, c + (1.0 -0.95) # may vary + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> _ = plt.legend() + >>> plt.show() + + """ + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = b.ndim == 1 + if is_1d: + b = b[:, newaxis] + _assert_2d(a, b) + m, n = a.shape[-2:] + m2, n_rhs = b.shape[-2:] + if m != m2: + raise LinAlgError('Incompatible dimensions') + + t, result_t = _commonType(a, b) + result_real_t = _realType(result_t) + + # Determine default rcond value + if rcond == "warn": + # 2017-08-19, 1.14.0 + warnings.warn("`rcond` parameter will change to the default of " + "machine precision times ``max(M, N)`` where M and N " + "are the input matrix dimensions.\n" + "To use the future default and silence this warning " + "we advise to pass `rcond=None`, to keep using the old, " + "explicitly pass `rcond=-1`.", + FutureWarning, stacklevel=2) + rcond = -1 + if rcond is None: + rcond = finfo(t).eps * max(n, m) + + if m <= n: + gufunc = _umath_linalg.lstsq_m + else: + gufunc = _umath_linalg.lstsq_n + + signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' + extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq) + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) + x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed + return wrap(x), wrap(resids), rank, s + + +def _multi_svd_norm(x, row_axis, col_axis, op): + """Compute a function of the singular values of the 2-D matrices in `x`. + + This is a private utility function used by `numpy.linalg.norm()`. + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or `numpy.amax` or `numpy.sum`. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum or sum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax` or `numpy.sum`. + + """ + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) + result = op(svd(y, compute_uv=False), axis=-1) + return result + + +def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): + return (x,) + + +@array_function_dispatch(_norm_dispatcher) +def norm(x, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``x.ravel`` will be returned. + ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. The default is None. + axis : {None, int, 2-tuple of ints}, optional. + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default + is None. + + .. versionadded:: 1.8.0 + + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + .. versionadded:: 1.10.0 + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + See Also + -------- + scipy.linalg.norm : Similar function in SciPy. + + Notes + ----- + For values of ``ord < 1``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices and raise a ValueError when ``x.ndim != 2``. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4.0 + >>> LA.norm(b, np.inf) + 9.0 + >>> LA.norm(a, -np.inf) + 0.0 + >>> LA.norm(b, -np.inf) + 2.0 + + >>> LA.norm(a, 1) + 20.0 + >>> LA.norm(b, 1) + 7.0 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6.0 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + 0.0 + >>> LA.norm(b, -2) + 1.8570331885190563e-016 # may vary + >>> LA.norm(a, 3) + 5.8480354764257312 # may vary + >>> LA.norm(a, -3) + 0.0 + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([ 6., 6.]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + if not issubclass(x.dtype.type, (inexact, object_)): + x = x.astype(float) + + # Immediately handle some default, simple, fast, and common cases. + if axis is None: + ndim = x.ndim + if ((ord is None) or + (ord in ('f', 'fro') and ndim == 2) or + (ord == 2 and ndim == 1)): + + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + x_real = x.real + x_imag = x.imag + sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag) + else: + sqnorm = x.dot(x) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim*[1]) + return ret + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + try: + axis = int(axis) + except Exception as e: + raise TypeError("'axis' must be None, an integer or a tuple of integers") from e + axis = (axis,) + + if len(axis) == 1: + if ord == Inf: + return abs(x).max(axis=axis, keepdims=keepdims) + elif ord == -Inf: + return abs(x).min(axis=axis, keepdims=keepdims) + elif ord == 0: + # Zero norm + return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis, keepdims=keepdims) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) + # None of the str-type keywords for ord ('fro', 'nuc') + # are valid for vectors + elif isinstance(ord, str): + raise ValueError(f"Invalid norm order '{ord}' for vectors") + else: + absx = abs(x) + absx **= ord + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= reciprocal(ord, dtype=ret.dtype) + return ret + elif len(axis) == 2: + row_axis, col_axis = axis + row_axis = normalize_axis_index(row_axis, nd) + col_axis = normalize_axis_index(col_axis, nd) + if row_axis == col_axis: + raise ValueError('Duplicate axes given.') + if ord == 2: + ret = _multi_svd_norm(x, row_axis, col_axis, amax) + elif ord == -2: + ret = _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) + elif ord == Inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -Inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) + elif ord == 'nuc': + ret = _multi_svd_norm(x, row_axis, col_axis, sum) + else: + raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret + else: + raise ValueError("Improper number of dimensions to norm.") + + +# multi_dot + +def _multidot_dispatcher(arrays, *, out=None): + yield from arrays + yield out + + +@array_function_dispatch(_multidot_dispatcher) +def multi_dot(arrays, *, out=None): + """ + Compute the dot product of two or more arrays in a single function call, + while automatically selecting the fastest evaluation order. + + `multi_dot` chains `numpy.dot` and uses optimal parenthesization + of the matrices [1]_ [2]_. Depending on the shapes of the matrices, + this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + If the last argument is 1-D it is treated as a column vector. + The other arguments must be 2-D. + + Think of `multi_dot` as:: + + def multi_dot(arrays): return functools.reduce(np.dot, arrays) + + + Parameters + ---------- + arrays : sequence of array_like + If the first argument is 1-D it is treated as row vector. + If the last argument is 1-D it is treated as column vector. + The other arguments must be 2-D. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a, b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.19.0 + + Returns + ------- + output : ndarray + Returns the dot product of the supplied arrays. + + See Also + -------- + numpy.dot : dot multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Examples + -------- + `multi_dot` allows you to write:: + + >>> from numpy.linalg import multi_dot + >>> # Prepare some data + >>> A = np.random.random((10000, 100)) + >>> B = np.random.random((100, 1000)) + >>> C = np.random.random((1000, 5)) + >>> D = np.random.random((5, 333)) + >>> # the actual dot multiplication + >>> _ = multi_dot([A, B, C, D]) + + instead of:: + + >>> _ = np.dot(np.dot(np.dot(A, B), C), D) + >>> # or + >>> _ = A.dot(B).dot(C).dot(D) + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Assume we have three matrices + :math:`A_{10x100}, B_{100x5}, C_{5x50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + """ + n = len(arrays) + # optimization only makes sense for len(arrays) > 2 + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return dot(arrays[0], arrays[1], out=out) + + arrays = [asanyarray(a) for a in arrays] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim + # Explicitly convert vectors to 2D arrays to keep the logic of the internal + # _multi_dot_* functions as simple as possible. + if arrays[0].ndim == 1: + arrays[0] = atleast_2d(arrays[0]) + if arrays[-1].ndim == 1: + arrays[-1] = atleast_2d(arrays[-1]).T + _assert_2d(*arrays) + + # _multi_dot_three is much faster than _multi_dot_matrix_chain_order + if n == 3: + result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out) + else: + order = _multi_dot_matrix_chain_order(arrays) + result = _multi_dot(arrays, order, 0, n - 1, out=out) + + # return proper shape + if ndim_first == 1 and ndim_last == 1: + return result[0, 0] # scalar + elif ndim_first == 1 or ndim_last == 1: + return result.ravel() # 1-D + else: + return result + + +def _multi_dot_three(A, B, C, out=None): + """ + Find the best order for three arrays and do the multiplication. + + For three arguments `_multi_dot_three` is approximately 15 times faster + than `_multi_dot_matrix_chain_order` + + """ + a0, a1b0 = A.shape + b1c0, c1 = C.shape + # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 + cost1 = a0 * b1c0 * (a1b0 + c1) + # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return dot(dot(A, B), C, out=out) + else: + return dot(A, dot(B, C), out=out) + + +def _multi_dot_matrix_chain_order(arrays, return_costs=False): + """ + Return a np.array that encodes the optimal order of mutiplications. + + The optimal order array is then used by `_multi_dot()` to do the + multiplication. + + Also return the cost matrix if `return_costs` is `True` + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = zeros((n, n), dtype=double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = empty((n, n), dtype=intp) + + for l in range(1, n): + for i in range(n - l): + j = i + l + m[i, j] = Inf + for k in range(i, j): + q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + + return (s, m) if return_costs else s + + +def _multi_dot(arrays, order, i, j, out=None): + """Actually do the multiplication with the given order.""" + if i == j: + # the initial call with non-None out should never get here + assert out is None + + return arrays[i] + else: + return dot(_multi_dot(arrays, order, i, order[i, j]), + _multi_dot(arrays, order, order[i, j] + 1, j), + out=out) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/linalg.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/linalg.pyi new file mode 100644 index 00000000..c0b2f29b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/linalg.pyi @@ -0,0 +1,297 @@ +from collections.abc import Iterable +from typing import ( + Literal as L, + overload, + TypeVar, + Any, + SupportsIndex, + SupportsInt, + NamedTuple, + Generic, +) + +from numpy import ( + generic, + floating, + complexfloating, + int32, + float64, + complex128, +) + +from numpy.linalg import LinAlgError as LinAlgError + +from numpy._typing import ( + NDArray, + ArrayLike, + _ArrayLikeInt_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeTD64_co, + _ArrayLikeObject_co, +) + +_T = TypeVar("_T") +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_SCT = TypeVar("_SCT", bound=generic, covariant=True) +_SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) + +_2Tuple = tuple[_T, _T] +_ModeKind = L["reduced", "complete", "r", "raw"] + +__all__: list[str] + +class EigResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class EighResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class QRResult(NamedTuple): + Q: NDArray[Any] + R: NDArray[Any] + +class SlogdetResult(NamedTuple): + # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and + # a `(x.ndim - 2)`` dimensionl arrays otherwise + sign: Any + logabsdet: Any + +class SVDResult(NamedTuple): + U: NDArray[Any] + S: NDArray[Any] + Vh: NDArray[Any] + +@overload +def tensorsolve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: None | Iterable[int] =..., +) -> NDArray[float64]: ... +@overload +def tensorsolve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: None | Iterable[int] =..., +) -> NDArray[floating[Any]]: ... +@overload +def tensorsolve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: None | Iterable[int] =..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def solve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, +) -> NDArray[float64]: ... +@overload +def solve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, +) -> NDArray[floating[Any]]: ... +@overload +def solve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def tensorinv( + a: _ArrayLikeInt_co, + ind: int = ..., +) -> NDArray[float64]: ... +@overload +def tensorinv( + a: _ArrayLikeFloat_co, + ind: int = ..., +) -> NDArray[floating[Any]]: ... +@overload +def tensorinv( + a: _ArrayLikeComplex_co, + ind: int = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +# TODO: The supported input and output dtypes are dependent on the value of `n`. +# For example: `n < 0` always casts integer types to float64 +def matrix_power( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + n: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +@overload +def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... + +@overload +def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... +@overload +def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]: ... +@overload +def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + +@overload +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +@overload +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]: ... + +@overload +def eig(a: _ArrayLikeInt_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeFloat_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeComplex_co) -> EigResult: ... + +@overload +def eigh( + a: _ArrayLikeInt_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeFloat_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeComplex_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... + +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeFloat_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[floating[Any]]: ... + +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensionl array otherwise +def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any: ... + +# TODO: Returns `int` for <2D arrays and `intp` otherwise +def matrix_rank( + A: _ArrayLikeComplex_co, + tol: None | _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> Any: ... + +@overload +def pinv( + a: _ArrayLikeInt_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def pinv( + a: _ArrayLikeFloat_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co = ..., + hermitian: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def det(a: _ArrayLikeComplex_co) -> Any: ... + +@overload +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[ + NDArray[float64], + NDArray[float64], + int32, + NDArray[float64], +]: ... +@overload +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[ + NDArray[floating[Any]], + NDArray[floating[Any]], + int32, + NDArray[floating[Any]], +]: ... +@overload +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[ + NDArray[complexfloating[Any, Any]], + NDArray[floating[Any]], + int32, + NDArray[floating[Any]], +]: ... + +@overload +def norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + axis: None = ..., + keepdims: bool = ..., +) -> floating[Any]: ... +@overload +def norm( + x: ArrayLike, + ord: None | float | L["fro", "nuc"] = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., + keepdims: bool = ..., +) -> Any: ... + +# TODO: Returns a scalar or array +def multi_dot( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], + *, + out: None | NDArray[Any] = ..., +) -> Any: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.py b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 00000000..cd4c1083 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,20 @@ +"""Test deprecation and future warnings. + +""" +import numpy as np +from numpy.testing import assert_warns + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.py b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.py new file mode 100644 index 00000000..5dabdfdf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,2198 @@ +""" Test functions for linalg module + +""" +import os +import sys +import itertools +import traceback +import textwrap +import subprocess +import pytest + +import numpy as np +from numpy import array, single, double, csingle, cdouble, dot, identity, matmul +from numpy.core import swapaxes +from numpy import multiply, atleast_2d, inf, asarray +from numpy import linalg +from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError +from numpy.linalg.linalg import _multi_dot_matrix_chain_order +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_array_equal, + assert_almost_equal, assert_allclose, suppress_warnings, + assert_raises_regex, HAS_LAPACK64, IS_WASM + ) +try: + import numpy.linalg.lapack_lite +except ImportError: + # May be broken when numpy was built without BLAS/LAPACK present + # If so, ensure we don't break the whole test suite - the `lapack_lite` + # submodule should be removed, it's only used in two tests in this file. + pass + + +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = single_decimal + else: + decimal = double_decimal + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + + +# used to categorize tests +all_tags = { + 'square', 'nonsquare', 'hermitian', # mutually exclusive + 'generalized', 'size-0', 'strided' # optional additions +} + + +class LinalgCase: + def __init__(self, name, a, b, tags=set()): + """ + A bundle of arguments to be passed to a test case, with an identifying + name, the operands a and b, and a set of tags to filter the tests + """ + assert_(isinstance(name, str)) + self.name = name + self.a = a + self.b = b + self.tags = frozenset(tags) # prevent shared tags + + def check(self, do): + """ + Run the function `do` on this test case, expanding arguments + """ + do(self.a, self.b, tags=self.tags) + + def __repr__(self): + return f'' + + +def apply_tag(tag, cases): + """ + Add the given tag (a string) to each of the cases (a list of LinalgCase + objects) + """ + assert tag in all_tags, "Invalid tag" + for case in cases: + case.tags = case.tags | {tag} + return cases + + +# +# Base test cases +# + +np.random.seed(1234) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + LinalgCase("0x0", + np.empty((0, 0), dtype=double), + np.empty((0,), dtype=double), + tags={'size-0'}), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), +]) + +# non-square test-cases +CASES += apply_tag('nonsquare', [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(8)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(1)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(5)), + LinalgCase("0x4", + np.random.rand(0, 4), + np.random.rand(0), + tags={'size-0'}), + LinalgCase("4x0", + np.random.rand(4, 0), + np.random.rand(4), + tags={'size-0'}), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + np.empty((0, 0), dtype=double), + None, + tags={'size-0'}), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +]) + + +# +# Gufunc test cases +# +def _make_generalized_cases(): + new_cases = [] + + for case in CASES: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2 * case.a, 3 * case.a]) + if case.b is None: + b = None + else: + b = np.array([case.b, 7 * case.b, 6 * case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + else: + b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + return new_cases + + +CASES += _make_generalized_cases() + + +# +# Generate stride combination variations of the above +# +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)] * x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] + slices = tuple([slice(None, None, repeat) for repeat in repeats]) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert_(np.all(xi == x)) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + + +def _make_strided_cases(): + new_cases = [] + for case in CASES: + for a, a_label in _stride_comb_iter(case.a): + for b, b_label in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, + tags=case.tags | {'strided'}) + new_cases.append(new_case) + return new_cases + + +CASES += _make_strided_cases() + + +# +# Test different routines against the above cases +# +class LinalgTestCase: + TEST_CASES = CASES + + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue + + try: + case.check(self.do) + except Exception as e: + msg = f'In test case: {case!r}\n\n' + msg += traceback.format_exc() + raise AssertionError(msg) from e + + +class LinalgSquareTestCase(LinalgTestCase): + + def test_sq_cases(self): + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) + + def test_empty_sq_cases(self): + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) + + +class LinalgNonsquareTestCase(LinalgTestCase): + + def test_nonsq_cases(self): + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) + + def test_empty_nonsq_cases(self): + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) + + +class HermitianTestCase(LinalgTestCase): + + def test_herm_cases(self): + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) + + def test_empty_herm_cases(self): + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) + + +class LinalgGeneralizedSquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_sq_cases(self): + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_sq_cases(self): + self.check_cases(require={'generalized', 'square', 'size-0'}) + + +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) + + +class HermitianGeneralizedTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) + + +def dot_generalized(a, b): + a = asarray(a) + if a.ndim >= 3: + if a.ndim == b.ndim: + # matrix x matrix + new_shape = a.shape[:-1] + b.shape[-1:] + elif a.ndim == b.ndim + 1: + # matrix x vector + new_shape = a.shape[:-1] + else: + raise ValueError("Not implemented...") + r = np.empty(new_shape, dtype=np.common_type(a, b)) + for c in itertools.product(*map(range, a.shape[:-2])): + r[c] = dot(a[c], b[c]) + return r + else: + return dot(a, b) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + r[...] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. + def do(self, a, b, tags): + x = linalg.solve(a, b) + assert_almost_equal(b, dot_generalized(a, x)) + assert_(consistent_subclass(x, b)) + + +class TestSolve(SolveCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0, :] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, :, 0:0] + result = linalg.solve(a, b[:, :, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + a_inv = linalg.inv(a) + assert_almost_equal(dot_generalized(a, a_inv), + identity_like_generalized(a)) + assert_(consistent_subclass(a_inv, a)) + + +class TestInv(InvCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + +class TestEigvals(EigvalsCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.complex64) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + res = linalg.eig(a) + eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors + assert_allclose(dot_generalized(a, eigenvectors), + np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], + rtol=get_rtol(eigenvalues.dtype)) + assert_(consistent_subclass(eigenvectors, a)) + + +class TestEig(EigCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class SVDBaseTests: + hermitian = False + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + res = linalg.svd(x) + U, S, Vh = res.U, res.S, res.Vh + assert_equal(U.dtype, dtype) + assert_equal(S.dtype, get_real_dtype(dtype)) + assert_equal(Vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + assert_equal(s.dtype, get_real_dtype(dtype)) + + +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False) + assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVD(SVDCases, SVDBaseTests): + def test_empty_identity(self): + """ Empty input should put an identity matrix in u or vh """ + x = np.empty((4, 0)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (4, 4)) + assert_equal(vh.shape, (0, 0)) + assert_equal(u, np.eye(4)) + + x = np.empty((0, 4)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (0, 0)) + assert_equal(vh.shape, (4, 4)) + assert_equal(vh, np.eye(4)) + + +class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False, hermitian=True) + assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + def hermitian(mat): + axes = list(range(mat.ndim)) + axes[-1], axes[-2] = axes[-2], axes[-1] + return np.conj(np.transpose(mat, axes=axes)) + + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) + assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + assert_equal(np.sort(s)[..., ::-1], s) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVDHermitian(SVDHermitianCases, SVDBaseTests): + hermitian = True + + +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # cond(x, p) for p in (None, 2, -2) + + def do(self, a, b, tags): + c = asarray(a) # a might be a matrix + if 'size-0' in tags: + assert_raises(LinAlgError, linalg.cond, c) + return + + # +-2 norms + s = linalg.svd(c, compute_uv=False) + assert_almost_equal( + linalg.cond(a), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 2), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -2), s[..., -1] / s[..., 0], + single_decimal=5, double_decimal=11) + + # Other norms + cinv = np.linalg.inv(c) + assert_almost_equal( + linalg.cond(a, 1), + abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -1), + abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, np.inf), + abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -np.inf), + abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 'fro'), + np.sqrt((abs(c)**2).sum(-1).sum(-1) + * (abs(cinv)**2).sum(-1).sum(-1)), + single_decimal=5, double_decimal=11) + + +class TestCond(CondCases): + def test_basic_nonsvd(self): + # Smoketest the non-svd norms + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + assert_almost_equal(linalg.cond(A, inf), 4) + assert_almost_equal(linalg.cond(A, -inf), 2/3) + assert_almost_equal(linalg.cond(A, 1), 4) + assert_almost_equal(linalg.cond(A, -1), 0.5) + assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + + def test_singular(self): + # Singular matrices have infinite condition number for + # positive norms, and negative norms shouldn't raise + # exceptions + As = [np.zeros((2, 2)), np.ones((2, 2))] + p_pos = [None, 1, 2, 'fro'] + p_neg = [-1, -2] + for A, p in itertools.product(As, p_pos): + # Inversion may not hit exact infinity, so just check the + # number is large + assert_(linalg.cond(A, p) > 1e15) + for A, p in itertools.product(As, p_neg): + linalg.cond(A, p) + + @pytest.mark.xfail(True, run=False, + reason="Platform/LAPACK-dependent failure, " + "see gh-18914") + def test_nan(self): + # nans should be passed through, not converted to infs + ps = [None, 1, -1, 2, -2, 'fro'] + p_pos = [None, 1, 2, 'fro'] + + A = np.ones((2, 2)) + A[0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(isinstance(c, np.float_)) + assert_(np.isnan(c)) + + A = np.ones((3, 2, 2)) + A[1,0,1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(np.isnan(c[1])) + if p in p_pos: + assert_(c[0] > 1e15) + assert_(c[2] > 1e15) + else: + assert_(not np.isnan(c[0])) + assert_(not np.isnan(c[2])) + + def test_stacked_singular(self): + # Check behavior when only some of the stacked matrices are + # singular + np.random.seed(1234) + A = np.random.rand(2, 2, 2, 2) + A[0,0] = 0 + A[1,1] = 0 + + for p in (None, 1, 2, 'fro', -1, -2): + c = linalg.cond(A, p) + assert_equal(c[0,0], np.inf) + assert_equal(c[1,1], np.inf) + assert_(np.isfinite(c[0,1])) + assert_(np.isfinite(c[1,0])) + + +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a) + # `a @ a_ginv == I` does not hold if a is singular + dot = dot_generalized + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass + + +class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a, hermitian=True) + # `a @ a_ginv == I` does not hold if a is singular + dot = dot_generalized + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinvHermitian(PinvHermitianCases): + pass + + +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + d = linalg.det(a) + res = linalg.slogdet(a) + s, ld = res.sign, res.logabsdet + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + +class TestDet(DetCases): + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + + def test_0_size(self): + a = np.zeros((0, 0), dtype=np.complex64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.complex64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.complex64) + assert_(res[1].dtype.type is np.float32) + + a = np.zeros((0, 0), dtype=np.float64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.float64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.float64) + assert_(res[1].dtype.type is np.float64) + + +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): + + def do(self, a, b, tags): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, False) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = ( + np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if np.asarray(b).ndim == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + + +class TestLstsq(LstsqCases): + def test_future_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + with suppress_warnings() as sup: + w = sup.record(FutureWarning, "`rcond` parameter will change") + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + # Warning should be raised exactly once (first command) + assert_(len(w) == 1) + + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + def test_incompatible_dims(self): + # use modified version of docstring example + x = np.array([0, 1, 2, 3]) + y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) + A = np.vstack([x, np.ones(len(x))]).T + with assert_raises_regex(LinAlgError, "Incompatible dimensions"): + linalg.lstsq(A, y, rcond=None) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +class TestMatrixPower: + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]]*2) + #FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] + + def test_large_power(self, dt): + rshft = self.rshft_1.astype(dt) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) + + def test_power_is_zero(self, dt): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity_like_generalized(M)) + assert_equal(mz.dtype, M.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) + + +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + + +class TestEigvalsh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float32) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + res = linalg.eigh(a) + ev, evc = res.eigenvalues, res.eigenvectors + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(dot_generalized(a, evc), + np.asarray(ev)[..., None, :] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + assert_almost_equal(ev2, evalues) + + assert_allclose(dot_generalized(a, evc2), + np.asarray(ev2)[..., None, :] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + +class TestEigh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.float32) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class _TestNormBase: + dt = None + dec = None + + @staticmethod + def check_dtype(x, res): + if issubclass(x.dtype.type, np.inexact): + assert_equal(res.dtype, x.real.dtype) + else: + # For integer input, don't have to test float precision of output. + assert_(issubclass(res.dtype.type, np.floating)) + + +class _TestNormGeneral(_TestNormBase): + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + self.check_dtype(at, an) + assert_almost_equal(an, 2) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0)) + + an = norm(at, 4) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0)) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + nd = B.ndim + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: + for axis in itertools.combinations(range(-nd, nd), 2): + row_axis, col_axis = axis + if row_axis < 0: + row_axis += nd + if col_axis < 0: + col_axis += nd + if row_axis == col_axis: + assert_raises(ValueError, norm, B, ord=order, axis=axis) + else: + n = norm(B, ord=order, axis=axis) + + # The logic using k_index only works for nd = 3. + # This has to be changed if nd is increased. + k_index = nd - (row_axis + col_axis) + if row_axis < col_axis: + expected = [norm(B[:].take(k, axis=k_index), ord=order) + for k in range(B.shape[k_index])] + else: + expected = [norm(B[:].take(k, axis=k_index).T, ord=order) + for k in range(B.shape[k_index])] + assert_almost_equal(n, expected) + + def test_keepdims(self): + A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None, None)) + expected_shape = (1, 1, 1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, 3.0**(1.0/2.0)) + + an = norm(at, -2) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + self.check_dtype(at, an) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` + # or `ord='nuc'` or any other string raises a ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, A, 'nuc', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + assert_raises(ValueError, norm, [3, 4], 'nuc', None) + assert_raises(ValueError, norm, [3, 4], 'test', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(np.AxisError, norm, B, None, 3) + assert_raises(np.AxisError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + +class TestNorm_NonSystematic: + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6 + 7j + d[1] = -6 + 7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): + dt = np.double + dec = 12 + + +class _TestNormSingleBase(_TestNormBase): + dt = np.float32 + dec = 6 + + +class _TestNormInt64Base(_TestNormBase): + dt = np.int64 + dec = 12 + + +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + +class TestMatrixRank: + + def test_matrix_rank(self): + # Full rank matrix + assert_equal(4, matrix_rank(np.eye(4))) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(matrix_rank(I), 3) + # All zeros - zero rank + assert_equal(matrix_rank(np.zeros((4, 4))), 0) + # 1 dimension - rank 1 unless all 0 + assert_equal(matrix_rank([1, 0, 0, 0]), 1) + assert_equal(matrix_rank(np.zeros((4,))), 0) + # accepts array-like + assert_equal(matrix_rank([1]), 1) + # greater than 2 dimensions treated as stacked matrices + ms = np.array([I, np.eye(4), np.zeros((4,4))]) + assert_equal(matrix_rank(ms), np.array([3, 4, 0])) + # works on scalar + assert_equal(matrix_rank(1), 1) + + def test_symmetric_rank(self): + assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) + assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) + assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(3, matrix_rank(I, hermitian=True)) + # manually supplied tolerance + I[-1, -1] = 1e-8 + assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) + assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR: + # Define the array class here, so run this on matrices elsewhere. + array = np.array + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + res = linalg.qr(a, mode='complete') + Q, R = res.Q, res.R + assert_(Q.dtype == a_dtype) + assert_(R.dtype == a_dtype) + assert_(isinstance(Q, a_type)) + assert_(isinstance(R, a_type)) + assert_(Q.shape == (m, m)) + assert_(R.shape == (m, n)) + assert_almost_equal(dot(Q, R), a) + assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m)) + assert_almost_equal(np.triu(R), R) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + + @pytest.mark.parametrize(["m", "n"], [ + (3, 0), + (0, 3), + (0, 0) + ]) + def test_qr_empty(self, m, n): + k = min(m, n) + a = np.empty((m, n)) + + self.check_qr(a) + + h, tau = np.linalg.qr(a, mode='raw') + assert_equal(h.dtype, np.double) + assert_equal(tau.dtype, np.double) + assert_equal(h.shape, (n, m)) + assert_equal(tau.shape, (k,)) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + def check_qr_stacked(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape[-2:] + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape[-2:] == (m, m)) + assert_(r.shape[-2:] == (m, n)) + assert_almost_equal(matmul(q, r), a) + I_mat = np.identity(q.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q.shape[:-2] + (q.shape[-1],)*2) + assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) + assert_almost_equal(np.triu(r[..., :, :]), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape[-2:] == (m, k)) + assert_(r1.shape[-2:] == (k, n)) + assert_almost_equal(matmul(q1, r1), a) + I_mat = np.identity(q1.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q1.shape[:-2] + (q1.shape[-1],)*2) + assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), + stack_I_mat) + assert_almost_equal(np.triu(r1[..., :, :]), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize("size", [ + (3, 4), (4, 3), (4, 4), + (3, 0), (0, 3)]) + @pytest.mark.parametrize("outer_size", [ + (2, 2), (2,), (2, 3, 4)]) + @pytest.mark.parametrize("dt", [ + np.single, np.double, + np.csingle, np.cdouble]) + def test_stacked_inputs(self, outer_size, size, dt): + + A = np.random.normal(size=outer_size + size).astype(dt) + B = np.random.normal(size=outer_size + size).astype(dt) + self.check_qr_stacked(A) + self.check_qr_stacked(A + 1.j*B) + + +class TestCholesky: + # TODO: are there no other tests for cholesky? + + @pytest.mark.parametrize( + 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + ) + @pytest.mark.parametrize( + 'dtype', (np.float32, np.float64, np.complex64, np.complex128) + ) + def test_basic_property(self, shape, dtype): + # Check A = L L^H + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j*np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a) + + b = np.matmul(c, c.transpose(t).conj()) + with np._no_nep50_warning(): + atol = 500 * a.shape[0] * np.finfo(dtype).eps + assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.float64) + # for documentation purpose: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.complex64) + assert_(isinstance(res, np.ndarray)) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.newbyteorder(native) + sw_arr = arr.newbyteorder('S').byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + + +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + + XERBLA_OK = 255 + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + pytest.skip("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except Exception: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success, reuse error code to mark success as + # FORTRAN STOP returns as success. + os._exit(XERBLA_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != XERBLA_OK: + pytest.skip('Numpy xerbla not linked in.') + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + +class TestMultiDot: + + def test_basic_function_with_three_arguments(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) + assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) + + def test_basic_function_with_two_arguments(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + + assert_almost_equal(multi_dot([A, B]), A.dot(B)) + assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) + + def test_basic_function_with_dynamic_programming_optimization(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) + + def test_vector_as_first_argument(self): + # The first argument can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 2)) + + # the result should be 1-D + assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) + + def test_vector_as_last_argument(self): + # The last argument can be 1-D + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be 1-D + assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) + + def test_vector_as_first_and_last_argument(self): + # The first and last arguments can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be a scalar + assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) + + def test_three_arguments_and_out(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + out = np.zeros((6, 2)) + ret = multi_dot([A, B, C], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C)) + assert_almost_equal(out, np.dot(A, np.dot(B, C))) + + def test_two_arguments_and_out(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + out = np.zeros((6, 6)) + ret = multi_dot([A, B], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B)) + assert_almost_equal(out, np.dot(A, B)) + + def test_dynamic_programming_optimization_and_out(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate test + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + out = np.zeros((6, 1)) + ret = multi_dot([A, B, C, D], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C).dot(D)) + + def test_dynamic_programming_logic(self): + # Test for the dynamic programming part + # This test is directly taken from Cormen page 376. + arrays = [np.random.random((30, 35)), + np.random.random((35, 15)), + np.random.random((15, 5)), + np.random.random((5, 10)), + np.random.random((10, 20)), + np.random.random((20, 25))] + m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], + [0., 0., 2625., 4375., 7125., 10500.], + [0., 0., 0., 750., 2500., 5375.], + [0., 0., 0., 0., 1000., 3500.], + [0., 0., 0., 0., 0., 5000.], + [0., 0., 0., 0., 0., 0.]]) + s_expected = np.array([[0, 1, 1, 3, 3, 3], + [0, 0, 2, 3, 3, 3], + [0, 0, 0, 3, 3, 3], + [0, 0, 0, 0, 4, 5], + [0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0]], dtype=int) + s_expected -= 1 # Cormen uses 1-based index, python does not. + + s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) + + # Only the upper triangular part (without the diagonal) is interesting. + assert_almost_equal(np.triu(s[:-1, 1:]), + np.triu(s_expected[:-1, 1:])) + assert_almost_equal(np.triu(m), np.triu(m_expected)) + + def test_too_few_input_arrays(self): + assert_raises(ValueError, multi_dot, []) + assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) + + +class TestTensorinv: + + @pytest.mark.parametrize("arr, ind", [ + (np.ones((4, 6, 8, 2)), 2), + (np.ones((3, 3, 2)), 1), + ]) + def test_non_square_handling(self, arr, ind): + with assert_raises(LinAlgError): + linalg.tensorinv(arr, ind=ind) + + @pytest.mark.parametrize("shape, ind", [ + # examples from docstring + ((4, 6, 8, 3), 2), + ((24, 8, 3), 1), + ]) + def test_tensorinv_shape(self, shape, ind): + a = np.eye(24) + a.shape = shape + ainv = linalg.tensorinv(a=a, ind=ind) + expected = a.shape[ind:] + a.shape[:ind] + actual = ainv.shape + assert_equal(actual, expected) + + @pytest.mark.parametrize("ind", [ + 0, -2, + ]) + def test_tensorinv_ind_limit(self, ind): + a = np.eye(24) + a.shape = (4, 6, 8, 3) + with assert_raises(ValueError): + linalg.tensorinv(a=a, ind=ind) + + def test_tensorinv_result(self): + # mimic a docstring example + a = np.eye(24) + a.shape = (24, 8, 3) + ainv = linalg.tensorinv(a, ind=1) + b = np.ones(24) + assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + + +class TestTensorsolve: + + @pytest.mark.parametrize("a, axes", [ + (np.ones((4, 6, 8, 2)), None), + (np.ones((3, 3, 2)), (0, 2)), + ]) + def test_non_square_handling(self, a, axes): + with assert_raises(LinAlgError): + b = np.ones(a.shape[:2]) + linalg.tensorsolve(a, b, axes=axes) + + @pytest.mark.parametrize("shape", + [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)], + ) + def test_tensorsolve_result(self, shape): + a = np.random.randn(*shape) + b = np.ones(a.shape[:2]) + x = np.linalg.tensorsolve(a, b) + assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b) + + +def test_unsupported_commontype(): + # linalg gracefully handles unsupported type + arr = np.array([[1, -2], [2, 5]], dtype='float16') + with assert_raises_regex(TypeError, "unsupported in linalg"): + linalg.cholesky(arr) + + +#@pytest.mark.slow +#@pytest.mark.xfail(not HAS_LAPACK64, run=False, +# reason="Numpy not compiled with 64-bit BLAS/LAPACK") +#@requires_memory(free_bytes=16e9) +@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") +def test_blas64_dot(): + n = 2**32 + a = np.zeros([1, n], dtype=np.float32) + b = np.ones([1, 1], dtype=np.float32) + a[0,-1] = 1 + c = np.dot(b, a) + assert_equal(c[0,-1], 1) + + +@pytest.mark.xfail(not HAS_LAPACK64, + reason="Numpy not compiled with 64-bit BLAS/LAPACK") +def test_blas64_geqrf_lwork_smoketest(): + # Smoke test LAPACK geqrf lwork call with 64-bit integers + dtype = np.float64 + lapack_routine = np.linalg.lapack_lite.dgeqrf + + m = 2**32 + 1 + n = 2**32 + 1 + lda = m + + # Dummy arrays, not referenced by the lapack routine, so don't + # need to be of the right size + a = np.zeros([1, 1], dtype=dtype) + work = np.zeros([1], dtype=dtype) + tau = np.zeros([1], dtype=dtype) + + # Size query + results = lapack_routine(m, n, a, lda, tau, work, -1, 0) + assert_equal(results['info'], 0) + assert_equal(results['m'], m) + assert_equal(results['n'], m) + + # Should result to an integer of a reasonable size + lwork = int(work.item()) + assert_(2**32 < lwork < 2**42) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.py b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.py new file mode 100644 index 00000000..af38443a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.py @@ -0,0 +1,145 @@ +""" Test functions for linalg module +""" +import warnings + +import numpy as np +from numpy import linalg, arange, float64, array, dot, transpose +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_array_equal, + assert_array_almost_equal, assert_array_less +) + + +class TestRegression: + + def test_eig_build(self): + # Ticket #652 + rva = array([1.03221168e+02 + 0.j, + -1.91843603e+01 + 0.j, + -6.04004526e-01 + 15.84422474j, + -6.04004526e-01 - 15.84422474j, + -1.13692929e+01 + 0.j, + -6.57612485e-01 + 10.41755503j, + -6.57612485e-01 - 10.41755503j, + 1.82126812e+01 + 0.j, + 1.06011014e+01 + 0.j, + 7.80732773e+00 + 0.j, + -7.65390898e-01 + 0.j, + 1.51971555e-15 + 0.j, + -1.51308713e-15 + 0.j]) + a = arange(13 * 13, dtype=float64) + a.shape = (13, 13) + a = a % 17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self): + # Ticket 662. + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[77.70273908, 3.51489954, 15.64602427], + [3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self): + # Ticket 627. + a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + # Regression for #786: Frobenius norm for vectors raises + # ValueError. + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + def test_norm_object_array(self): + # gh-7575 + testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) + + norm = linalg.norm(testvector) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testvector, ord=1) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype != np.dtype('float64')) + + norm = linalg.norm(testvector, ord=2) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=0) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) + + testmatrix = np.array([[np.array([0, 1]), 0, 0], + [0, 0, 0]], dtype=object) + + norm = linalg.norm(testmatrix) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testmatrix, ord='fro') + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/API_CHANGES.txt b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/API_CHANGES.txt new file mode 100644 index 00000000..a3d792a1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/API_CHANGES.txt @@ -0,0 +1,135 @@ +.. -*- rest -*- + +================================================== +API changes in the new masked array implementation +================================================== + +Masked arrays are subclasses of ndarray +--------------------------------------- + +Contrary to the original implementation, masked arrays are now regular +ndarrays:: + + >>> x = masked_array([1,2,3],mask=[0,0,1]) + >>> print isinstance(x, numpy.ndarray) + True + + +``_data`` returns a view of the masked array +-------------------------------------------- + +Masked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the +``_data`` part will return a regular ndarray or any of its subclass, depending +on the initial data:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> print x._data + [[1 2] + [3 4]] + >>> print type(x._data) + + + +In practice, ``_data`` is implemented as a property, not as an attribute. +Therefore, you cannot access it directly, and some simple tests such as the +following one will fail:: + + >>>x._data is x._data + False + + +``filled(x)`` can return a subclass of ndarray +---------------------------------------------- +The function ``filled(a)`` returns an array of the same type as ``a._data``:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> y = filled(x) + >>> print type(y) + + >>> print y + matrix([[ 1, 2], + [ 3, 999999]]) + + +``put``, ``putmask`` behave like their ndarray counterparts +----------------------------------------------------------- + +Previously, ``putmask`` was used like this:: + + mask = [False,True,True] + x = array([1,4,7],mask=mask) + putmask(x,mask,[3]) + +which translated to:: + + x[~mask] = [3] + +(Note that a ``True``-value in a mask suppresses a value.) + +In other words, the mask had the same length as ``x``, whereas +``values`` had ``sum(~mask)`` elements. + +Now, the behaviour is similar to that of ``ndarray.putmask``, where +the mask and the values are both the same length as ``x``, i.e. + +:: + + putmask(x,mask,[3,0,0]) + + +``fill_value`` is a property +---------------------------- + +``fill_value`` is no longer a method, but a property:: + + >>> print x.fill_value + 999999 + +``cumsum`` and ``cumprod`` ignore missing values +------------------------------------------------ + +Missing values are assumed to be the identity element, i.e. 0 for +``cumsum`` and 1 for ``cumprod``:: + + >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False]) + >>> print x + [1 -- 3 4] + >>> print x.cumsum() + [1 -- 4 8] + >> print x.cumprod() + [1 -- 3 12] + +``bool(x)`` raises a ValueError +------------------------------- + +Masked arrays now behave like regular ``ndarrays``, in that they cannot be +converted to booleans: + +:: + + >>> x = N.ma.array([1,2,3]) + >>> bool(x) + Traceback (most recent call last): + File "", line 1, in + ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + + +================================== +New features (non exhaustive list) +================================== + +``mr_`` +------- + +``mr_`` mimics the behavior of ``r_`` for masked arrays:: + + >>> np.ma.mr_[3,4,5] + masked_array(data = [3 4 5], + mask = False, + fill_value=999999) + + +``anom`` +-------- + +The ``anom`` method returns the deviations from the average (anomalies). diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/LICENSE b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/LICENSE new file mode 100644 index 00000000..b41aae0c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/LICENSE @@ -0,0 +1,24 @@ +* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant +* All rights reserved. +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of the University of Georgia nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY +* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/README.rst b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/README.rst new file mode 100644 index 00000000..47f20d64 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/README.rst @@ -0,0 +1,236 @@ +================================== +A Guide to Masked Arrays in NumPy +================================== + +.. Contents:: + +See http://www.scipy.org/scipy/numpy/wiki/MaskedArray (dead link) +for updates of this document. + + +History +------- + +As a regular user of MaskedArray, I (Pierre G.F. Gerard-Marchant) became +increasingly frustrated with the subclassing of masked arrays (even if +I can only blame my inexperience). I needed to develop a class of arrays +that could store some additional information along with numerical values, +while keeping the possibility for missing data (picture storing a series +of dates along with measurements, what would later become the `TimeSeries +Scikit `__ +(dead link). + +I started to implement such a class, but then quickly realized that +any additional information disappeared when processing these subarrays +(for example, adding a constant value to a subarray would erase its +dates). I ended up writing the equivalent of *numpy.core.ma* for my +particular class, ufuncs included. Everything went fine until I needed to +subclass my new class, when more problems showed up: some attributes of +the new subclass were lost during processing. I identified the culprit as +MaskedArray, which returns masked ndarrays when I expected masked +arrays of my class. I was preparing myself to rewrite *numpy.core.ma* +when I forced myself to learn how to subclass ndarrays. As I became more +familiar with the *__new__* and *__array_finalize__* methods, +I started to wonder why masked arrays were objects, and not ndarrays, +and whether it wouldn't be more convenient for subclassing if they did +behave like regular ndarrays. + +The new *maskedarray* is what I eventually come up with. The +main differences with the initial *numpy.core.ma* package are +that MaskedArray is now a subclass of *ndarray* and that the +*_data* section can now be any subclass of *ndarray*. Apart from a +couple of issues listed below, the behavior of the new MaskedArray +class reproduces the old one. Initially the *maskedarray* +implementation was marginally slower than *numpy.ma* in some areas, +but work is underway to speed it up; the expectation is that it can be +made substantially faster than the present *numpy.ma*. + + +Note that if the subclass has some special methods and +attributes, they are not propagated to the masked version: +this would require a modification of the *__getattribute__* +method (first trying *ndarray.__getattribute__*, then trying +*self._data.__getattribute__* if an exception is raised in the first +place), which really slows things down. + +Main differences +---------------- + + * The *_data* part of the masked array can be any subclass of ndarray (but not recarray, cf below). + * *fill_value* is now a property, not a function. + * in the majority of cases, the mask is forced to *nomask* when no value is actually masked. A notable exception is when a masked array (with no masked values) has just been unpickled. + * I got rid of the *share_mask* flag, I never understood its purpose. + * *put*, *putmask* and *take* now mimic the ndarray methods, to avoid unpleasant surprises. Moreover, *put* and *putmask* both update the mask when needed. * if *a* is a masked array, *bool(a)* raises a *ValueError*, as it does with ndarrays. + * in the same way, the comparison of two masked arrays is a masked array, not a boolean + * *filled(a)* returns an array of the same subclass as *a._data*, and no test is performed on whether it is contiguous or not. + * the mask is always printed, even if it's *nomask*, which makes things easy (for me at least) to remember that a masked array is used. + * *cumsum* works as if the *_data* array was filled with 0. The mask is preserved, but not updated. + * *cumprod* works as if the *_data* array was filled with 1. The mask is preserved, but not updated. + +New features +------------ + +This list is non-exhaustive... + + * the *mr_* function mimics *r_* for masked arrays. + * the *anom* method returns the anomalies (deviations from the average) + +Using the new package with numpy.core.ma +---------------------------------------- + +I tried to make sure that the new package can understand old masked +arrays. Unfortunately, there's no upward compatibility. + +For example: + +>>> import numpy.core.ma as old_ma +>>> import maskedarray as new_ma +>>> x = old_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> x +array(data = + [ 1 2 999999 4 5], + mask = + [False False True False False], + fill_value=999999) +>>> y = new_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> y +array(data = [1 2 -- 4 5], + mask = [False False True False False], + fill_value=999999) +>>> x==y +array(data = + [True True True True True], + mask = + [False False True False False], + fill_value=?) +>>> old_ma.getmask(x) == new_ma.getmask(x) +array([True, True, True, True, True]) +>>> old_ma.getmask(y) == new_ma.getmask(y) +array([True, True, False, True, True]) +>>> old_ma.getmask(y) +False + + +Using maskedarray with matplotlib +--------------------------------- + +Starting with matplotlib 0.91.2, the masked array importing will work with +the maskedarray branch) as well as with earlier versions. + +By default matplotlib still uses numpy.ma, but there is an rcParams setting +that you can use to select maskedarray instead. In the matplotlibrc file +you will find:: + + #maskedarray : False # True to use external maskedarray module + # instead of numpy.ma; this is a temporary # + setting for testing maskedarray. + + +Uncomment and set to True to select maskedarray everywhere. +Alternatively, you can test a script with maskedarray by using a +command-line option, e.g.:: + + python simple_plot.py --maskedarray + + +Masked records +-------------- + +Like *numpy.core.ma*, the *ndarray*-based implementation +of MaskedArray is limited when working with records: you can +mask any record of the array, but not a field in a record. If you +need this feature, you may want to give the *mrecords* package +a try (available in the *maskedarray* directory in the scipy +sandbox). This module defines a new class, *MaskedRecord*. An +instance of this class accepts a *recarray* as data, and uses two +masks: the *fieldmask* has as many entries as records in the array, +each entry with the same fields as a record, but of boolean types: +they indicate whether the field is masked or not; a record entry +is flagged as masked in the *mask* array if all the fields are +masked. A few examples in the file should give you an idea of what +can be done. Note that *mrecords* is still experimental... + +Optimizing maskedarray +---------------------- + +Should masked arrays be filled before processing or not? +-------------------------------------------------------- + +In the current implementation, most operations on masked arrays involve +the following steps: + + * the input arrays are filled + * the operation is performed on the filled arrays + * the mask is set for the results, from the combination of the input masks and the mask corresponding to the domain of the operation. + +For example, consider the division of two masked arrays:: + + import numpy + import maskedarray as ma + x = ma.array([1,2,3,4],mask=[1,0,0,0], dtype=numpy.float_) + y = ma.array([-1,0,1,2], mask=[0,0,0,1], dtype=numpy.float_) + +The division of x by y is then computed as:: + + d1 = x.filled(0) # d1 = array([0., 2., 3., 4.]) + d2 = y.filled(1) # array([-1., 0., 1., 1.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + result = (d1/d2).view(MaskedArray) # masked_array([-0. inf, 3., 4.]) + result._mask = logical_or(m, dm) + +Note that a division by zero takes place. To avoid it, we can consider +to fill the input arrays, taking the domain mask into account, so that:: + + d1 = x._data.copy() # d1 = array([1., 2., 3., 4.]) + d2 = y._data.copy() # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + numpy.putmask(d2, dm, 1) # d2 = array([-1., 1., 1., 2.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. 0., 3., 2.]) + result._mask = logical_or(m, dm) + +Note that the *.copy()* is required to avoid updating the inputs with +*putmask*. The *.filled()* method also involves a *.copy()*. + +A third possibility consists in avoid filling the arrays:: + + d1 = x._data # d1 = array([1., 2., 3., 4.]) + d2 = y._data # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. inf, 3., 2.]) + result._mask = logical_or(m, dm) + +Note that here again the division by zero takes place. + +A quick benchmark gives the following results: + + * *numpy.ma.divide* : 2.69 ms per loop + * classical division : 2.21 ms per loop + * division w/ prefilling : 2.34 ms per loop + * division w/o filling : 1.55 ms per loop + +So, is it worth filling the arrays beforehand ? Yes, if we are interested +in avoiding floating-point exceptions that may fill the result with infs +and nans. No, if we are only interested into speed... + + +Thanks +------ + +I'd like to thank Paul Dubois, Travis Oliphant and Sasha for the +original masked array package: without you, I would never have started +that (it might be argued that I shouldn't have anyway, but that's +another story...). I also wish to extend these thanks to Reggie Dugard +and Eric Firing for their suggestions and numerous improvements. + + +Revision notes +-------------- + + * 08/25/2007 : Creation of this page + * 01/23/2007 : The package has been moved to the SciPy sandbox, and is regularly updated: please check out your SVN version! diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/__init__.py new file mode 100644 index 00000000..870cc4ef --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/__init__.py @@ -0,0 +1,54 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], + mask = [False False False True False False False True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +.. moduleauthor:: Pierre Gerard-Marchant +.. moduleauthor:: Jarrod Millman + +""" +from . import core +from .core import * + +from . import extras +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/__init__.pyi new file mode 100644 index 00000000..ce72383e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/__init__.pyi @@ -0,0 +1,234 @@ +from numpy._pytesttester import PytestTester + +from numpy.ma import extras as extras + +from numpy.ma.core import ( + MAError as MAError, + MaskError as MaskError, + MaskType as MaskType, + MaskedArray as MaskedArray, + abs as abs, + absolute as absolute, + add as add, + all as all, + allclose as allclose, + allequal as allequal, + alltrue as alltrue, + amax as amax, + amin as amin, + angle as angle, + anom as anom, + anomalies as anomalies, + any as any, + append as append, + arange as arange, + arccos as arccos, + arccosh as arccosh, + arcsin as arcsin, + arcsinh as arcsinh, + arctan as arctan, + arctan2 as arctan2, + arctanh as arctanh, + argmax as argmax, + argmin as argmin, + argsort as argsort, + around as around, + array as array, + asanyarray as asanyarray, + asarray as asarray, + bitwise_and as bitwise_and, + bitwise_or as bitwise_or, + bitwise_xor as bitwise_xor, + bool_ as bool_, + ceil as ceil, + choose as choose, + clip as clip, + common_fill_value as common_fill_value, + compress as compress, + compressed as compressed, + concatenate as concatenate, + conjugate as conjugate, + convolve as convolve, + copy as copy, + correlate as correlate, + cos as cos, + cosh as cosh, + count as count, + cumprod as cumprod, + cumsum as cumsum, + default_fill_value as default_fill_value, + diag as diag, + diagonal as diagonal, + diff as diff, + divide as divide, + empty as empty, + empty_like as empty_like, + equal as equal, + exp as exp, + expand_dims as expand_dims, + fabs as fabs, + filled as filled, + fix_invalid as fix_invalid, + flatten_mask as flatten_mask, + flatten_structured_array as flatten_structured_array, + floor as floor, + floor_divide as floor_divide, + fmod as fmod, + frombuffer as frombuffer, + fromflex as fromflex, + fromfunction as fromfunction, + getdata as getdata, + getmask as getmask, + getmaskarray as getmaskarray, + greater as greater, + greater_equal as greater_equal, + harden_mask as harden_mask, + hypot as hypot, + identity as identity, + ids as ids, + indices as indices, + inner as inner, + innerproduct as innerproduct, + isMA as isMA, + isMaskedArray as isMaskedArray, + is_mask as is_mask, + is_masked as is_masked, + isarray as isarray, + left_shift as left_shift, + less as less, + less_equal as less_equal, + log as log, + log10 as log10, + log2 as log2, + logical_and as logical_and, + logical_not as logical_not, + logical_or as logical_or, + logical_xor as logical_xor, + make_mask as make_mask, + make_mask_descr as make_mask_descr, + make_mask_none as make_mask_none, + mask_or as mask_or, + masked as masked, + masked_array as masked_array, + masked_equal as masked_equal, + masked_greater as masked_greater, + masked_greater_equal as masked_greater_equal, + masked_inside as masked_inside, + masked_invalid as masked_invalid, + masked_less as masked_less, + masked_less_equal as masked_less_equal, + masked_not_equal as masked_not_equal, + masked_object as masked_object, + masked_outside as masked_outside, + masked_print_option as masked_print_option, + masked_singleton as masked_singleton, + masked_values as masked_values, + masked_where as masked_where, + max as max, + maximum as maximum, + maximum_fill_value as maximum_fill_value, + mean as mean, + min as min, + minimum as minimum, + minimum_fill_value as minimum_fill_value, + mod as mod, + multiply as multiply, + mvoid as mvoid, + ndim as ndim, + negative as negative, + nomask as nomask, + nonzero as nonzero, + not_equal as not_equal, + ones as ones, + outer as outer, + outerproduct as outerproduct, + power as power, + prod as prod, + product as product, + ptp as ptp, + put as put, + putmask as putmask, + ravel as ravel, + remainder as remainder, + repeat as repeat, + reshape as reshape, + resize as resize, + right_shift as right_shift, + round as round, + set_fill_value as set_fill_value, + shape as shape, + sin as sin, + sinh as sinh, + size as size, + soften_mask as soften_mask, + sometrue as sometrue, + sort as sort, + sqrt as sqrt, + squeeze as squeeze, + std as std, + subtract as subtract, + sum as sum, + swapaxes as swapaxes, + take as take, + tan as tan, + tanh as tanh, + trace as trace, + transpose as transpose, + true_divide as true_divide, + var as var, + where as where, + zeros as zeros, +) + +from numpy.ma.extras import ( + apply_along_axis as apply_along_axis, + apply_over_axes as apply_over_axes, + atleast_1d as atleast_1d, + atleast_2d as atleast_2d, + atleast_3d as atleast_3d, + average as average, + clump_masked as clump_masked, + clump_unmasked as clump_unmasked, + column_stack as column_stack, + compress_cols as compress_cols, + compress_nd as compress_nd, + compress_rowcols as compress_rowcols, + compress_rows as compress_rows, + count_masked as count_masked, + corrcoef as corrcoef, + cov as cov, + diagflat as diagflat, + dot as dot, + dstack as dstack, + ediff1d as ediff1d, + flatnotmasked_contiguous as flatnotmasked_contiguous, + flatnotmasked_edges as flatnotmasked_edges, + hsplit as hsplit, + hstack as hstack, + isin as isin, + in1d as in1d, + intersect1d as intersect1d, + mask_cols as mask_cols, + mask_rowcols as mask_rowcols, + mask_rows as mask_rows, + masked_all as masked_all, + masked_all_like as masked_all_like, + median as median, + mr_ as mr_, + ndenumerate as ndenumerate, + notmasked_contiguous as notmasked_contiguous, + notmasked_edges as notmasked_edges, + polyfit as polyfit, + row_stack as row_stack, + setdiff1d as setdiff1d, + setxor1d as setxor1d, + stack as stack, + unique as unique, + union1d as union1d, + vander as vander, + vstack as vstack, +) + +__all__: list[str] +__path__: list[str] +test: PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/core.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/core.py new file mode 100644 index 00000000..16f74e89 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/core.py @@ -0,0 +1,8565 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# pylint: disable-msg=E1002 +import builtins +import inspect +import operator +import warnings +import textwrap +import re +from functools import reduce + +import numpy as np +import numpy.core.umath as umath +import numpy.core.numerictypes as ntypes +from numpy.core import multiarray as mu +from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue +from numpy import array as narray +from numpy.lib.function_base import angle +from numpy.compat import ( + getargspec, formatargspec, long, unicode, bytes + ) +from numpy import expand_dims +from numpy.core.numeric import normalize_axis_tuple + + +__all__ = [ + 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', + 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', + 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', + 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', + 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', + 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', + 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', + 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', + 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', + 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', + 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', + 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', + 'less', 'less_equal', 'log', 'log10', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', + 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', + 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', + 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', + 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', 'zeros', 'zeros_like', + ] + +MaskType = np.bool_ +nomask = MaskType(0) + +class MaskedArrayFutureWarning(FutureWarning): + pass + +def _deprecate_argsort_axis(arr): + """ + Adjust the axis passed to argsort, warning if necessary + + Parameters + ---------- + arr + The array which argsort was called on + + np.ma.argsort has a long-term bug where the default of the axis argument + is wrong (gh-8701), which now must be kept for backwards compatibility. + Thankfully, this only makes a difference when arrays are 2- or more- + dimensional, so we only need a warning then. + """ + if arr.ndim <= 1: + # no warning needed - but switch to -1 anyway, to avoid surprising + # subclasses, which are more likely to implement scalar axes. + return -1 + else: + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + return None + + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + + """ + if initialdoc is None: + return + if note is None: + return initialdoc + + notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) + notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note) + + return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) + + +def get_object_signature(obj): + """ + Get the signature from obj + + """ + try: + sig = formatargspec(*getargspec(obj)) + except TypeError: + sig = '' + return sig + + +############################################################################### +# Exceptions # +############################################################################### + + +class MAError(Exception): + """ + Class for masked array related errors. + + """ + pass + + +class MaskError(MAError): + """ + Class for mask related errors. + + """ + pass + + +############################################################################### +# Filling options # +############################################################################### + + +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c': 1.e20 + 0.0j, + 'f': 1.e20, + 'i': 999999, + 'O': '?', + 'S': b'N/A', + 'u': 999999, + 'V': b'???', + 'U': 'N/A' + } + +# Add datetime64 and timedelta64 types +for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", + "fs", "as"]: + default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) + default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) + +float_types_list = [np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble] +max_filler = ntypes._minvals +max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) +max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) + +min_filler = ntypes._maxvals +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) + +del float_types_list + +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names is not None: + # We wrap into `array` here, which ensures we use NumPy cast rules + # for integer casts, this allows the use of 99999 as a fill value + # for int8. + # TODO: This is probably a mess, but should best preserve behavior? + vals = tuple( + np.array(_recursive_fill_value(dtype[name], f)) + for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + ======== ======== + datatype default + ======== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + ======== ======== + + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype] + except KeyError as e: + raise TypeError( + f"Unsuitable type {dtype} for calculating {extremum_name}." + ) from None + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + return _extremum_fill_value(obj, min_filler, "minimum") + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + return _extremum_fill_value(obj, max_filler, "maximum") + + +def _recursive_set_fill_value(fillvalue, dt): + """ + Create a fill value for a structured dtype. + + Parameters + ---------- + fillvalue : scalar or array_like + Scalar or array representing the fill value. If it is of shorter + length than the number of fields in dt, it will be resized. + dt : dtype + The structured dtype for which to create the fill value. + + Returns + ------- + val : tuple + A tuple of values corresponding to the structured fill value. + + """ + fillvalue = np.resize(fillvalue, len(dt.names)) + output_value = [] + for (fval, name) in zip(fillvalue, dt.names): + cdtype = dt[name] + if cdtype.subdtype: + cdtype = cdtype.subdtype[0] + + if cdtype.names is not None: + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype. + + If fill_value is not None, its value is forced to the given dtype. + + The result is always a 0d array. + + """ + ndtype = np.dtype(ndtype) + if fill_value is None: + fill_value = default_fill_value(ndtype) + elif ndtype.names is not None: + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.array(fill_value, copy=False, dtype=ndtype) + except ValueError as e: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, ndtype)) from e + else: + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), + dtype=ndtype) + else: + if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.array(fill_value, copy=False, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = list(range(5)) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + return + + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +def filled(a, fill_value=None): + """ + Return input as an array with masked data replaced by a fill value. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : array_like, optional. + Can be scalar or non-scalar. If non-scalar, the + resulting filled array should be broadcastable + over input array. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=333) + array([[333, 1, 2], + [333, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=np.arange(3)) + array([[0, 1, 2], + [0, 4, 5], + [6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + + +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + + +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=False, subok=subok) + if not subok: + return data.view(ndarray) + return data + + +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data=[--, -1.0, nan, inf], + mask=[ True, False, False, False], + fill_value=1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data=[--, -1.0, --, --], + mask=[ True, False, True, True], + fill_value=1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) + >>> x.data + array([ 1., -1., nan, inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + +def is_string_or_list_of_strings(val): + return (isinstance(val, str) or + (isinstance(val, list) and val and + builtins.all(isinstance(s, str) for s in val))) + +############################################################################### +# Ufuncs # +############################################################################### + + +ufunc_domain = {} +ufunc_fills = {} + + +class _DomainCheckInterval: + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if a > b: + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__(self, x): + "Execute the call behavior." + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(invalid='ignore'): + return umath.logical_or(umath.greater(x, self.b), + umath.less(x, self.a)) + + +class _DomainTan: + """ + Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + +class _DomainSafeDivide: + """ + Define a domain for safe division. + + """ + + def __init__(self, tolerance=None): + self.tolerance = tolerance + + def __call__(self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + # don't call ma ufuncs from __array_wrap__ which would fail for scalars + a, b = np.asarray(a), np.asarray(b) + with np.errstate(invalid='ignore'): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + +class _DomainGreater: + """ + DomainGreater(v)(x) is True where x <= v. + + """ + + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less_equal(x, self.critical_value) + + +class _DomainGreaterEqual: + """ + DomainGreaterEqual(v)(x) is True where x < v. + + """ + + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(x, self.critical_value) + + +class _MaskedUFunc: + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + + def __str__(self): + return f"Masked version of {self.f}" + + +class _MaskedUnaryOperation(_MaskedUFunc): + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + + def __init__(self, mufunc, fill=0, domain=None): + super().__init__(mufunc) + self.fill = fill + self.domain = domain + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + + def __call__(self, a, *args, **kwargs): + """ + Execute the call behavior. + + """ + d = getdata(a) + # Deal with domain + if self.domain is not None: + # Case 1.1. : Domained function + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + else: + # Case 1.2. : Function without a domain + # Get the result and the mask + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + m = getmask(a) + + if not result.ndim: + # Case 2.1. : The result is scalarscalar + if m: + return masked + return result + + if m is not nomask: + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input Now, + # that's plain silly: in C, we would just skip the element and + # keep the original, but we do have to do it that way in Python + + # In case result has a lower dtype than the inputs (as in + # equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + masked_result = result.view(get_masked_subclass(a)) + masked_result._mask = m + masked_result._update_from(a) + return masked_result + + +class _MaskedBinaryOperation(_MaskedUFunc): + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, mbfunc, fillx=0, filly=0): + """ + abfunc(fillx, filly) must be defined. + + abfunc(x, filly) = x for all x to enable reduce. + + """ + super().__init__(mbfunc) + self.fillx = fillx + self.filly = filly + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + """ + Execute the call behavior. + + """ + # Get the data, as ndarray + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(): + np.seterr(divide='ignore', invalid='ignore') + result = self.f(da, db, *args, **kwargs) + # Get the mask for the result + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + + # Case 2. : array + # Revert result to da where masked + if m is not nomask and m.any(): + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, da, casting='unsafe', where=m) + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + def reduce(self, target, axis=0, dtype=None): + """ + Reduce `target` along the given `axis`. + + """ + tclass = get_masked_subclass(target) + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=True) + m.shape = (1,) + + if m is nomask: + tr = self.f.reduce(t, axis) + mr = nomask + else: + tr = self.f.reduce(t, axis, dtype=dtype) + mr = umath.logical_and.reduce(m, axis) + + if not tr.shape: + if mr: + return masked + else: + return tr + masked_tr = tr.view(tclass) + masked_tr._mask = mr + return masked_tr + + def outer(self, a, b): + """ + Return the function applied to the outer product of a and b. + + """ + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + if m is not nomask: + np.copyto(d, da, where=m) + if not d.shape: + return d + masked_d = d.view(get_masked_subclass(a, b)) + masked_d._mask = m + return masked_d + + def accumulate(self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + tclass = get_masked_subclass(target) + t = filled(target, self.filly) + result = self.f.accumulate(t, axis) + masked_result = result.view(tclass) + return masked_result + + + +class _DomainedBinaryOperation(_MaskedUFunc): + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + super().__init__(dbfunc) + self.domain = domain + self.fillx = fillx + self.filly = filly + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # Get the mask as a combination of the source masks and invalid + m = ~umath.isfinite(result) + m |= getmask(a) + m |= getmask(b) + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= domain(da, db) + # Take care of the scalar case first + if not m.ndim: + if m: + return masked + else: + return result + # When the mask is True, put back da if possible + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, 0, casting='unsafe', where=m) + # avoid using "*" since this may be overlaid + masked_da = umath.multiply(m, da) + # only add back if it can be cast safely + if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): + result += masked_da + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.round_) +logical_not = _MaskedUnaryOperation(umath.logical_not) + +# Domained unary ufuncs +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) + +# Binary ufuncs +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) + +# Domained binary ufuncs +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = _DomainedBinaryOperation(umath.true_divide, + _DomainSafeDivide(), 0, 1) +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) + + +############################################################################### +# Mask creation functions # +############################################################################### + + +def _replace_dtype_fields_recursive(dtype, primitive_dtype): + "Private function allowing recursion in _replace_dtype_fields." + _recurse = _replace_dtype_fields_recursive + + # Do we have some name fields ? + if dtype.names is not None: + descr = [] + for name in dtype.names: + field = dtype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recurse(field[0], primitive_dtype))) + new_dtype = np.dtype(descr) + + # Is this some kind of composite a la (float,2) + elif dtype.subdtype: + descr = list(dtype.subdtype) + descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) + new_dtype = np.dtype(tuple(descr)) + + # this is a primitive type, so do a direct replacement + else: + new_dtype = primitive_dtype + + # preserve identity of dtypes + if new_dtype == dtype: + new_dtype = dtype + + return new_dtype + + +def _replace_dtype_fields(dtype, primitive_dtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with all fields and subtypes in the given type + recursively replaced with `primitive_dtype`. + + Arguments are coerced to dtypes first. + """ + dtype = np.dtype(dtype) + primitive_dtype = np.dtype(primitive_dtype) + return _replace_dtype_fields_recursive(dtype, primitive_dtype) + + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + dtype('bool') + + """ + return _replace_dtype_fields(ndtype, MaskType) + + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmask(a) + array([[False, True], + [False, False]]) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]]) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) + + +get_mask = getmask + + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]]) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.getmaskarray(b) + array([[False, False], + [False, False]]) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) + return mask + + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + ma.isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False]) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + ... 'formats':[bool, bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + ... dtype=dtype) + >>> m + array([( True, False), (False, True), ( True, False)], + dtype=[('monty', '?'), ('pithon', '?')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + + +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if m.dtype.names is None and not m.any(): + return nomask + else: + return m + + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interpreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True]) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False]) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + ... 'formats':[np.int64, np.int64]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + + # Make sure the input dtype is valid. + dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_: + return np.ones(m.shape, dtype=dtype) + + # Fill the mask in case there are missing data; turn it into an ndarray. + result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) + # Bas les masques ! + if shrink: + result = _shrink_mask(result) + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False]) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + + +def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names is not None: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + + +def mask_or(m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False]) + + """ + + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if dtype1 != dtype2: + raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + if dtype1.names is not None: + # Allocate an output mask array with the properly broadcast shape. + newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> mask = np.array([0, 0, 1]) + >>> np.ma.flatten_mask(mask) + array([False, False, True]) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> np.ma.flatten_mask(mask) + array([False, False, False, True]) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> np.ma.flatten_mask(mask) + array([False, False, False, False, False, True]) + + """ + + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames is not None: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + yield from _flatsequence(element) + else: + yield element + except TypeError: + yield sequence + + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array([_ for _ in flattened], dtype=bool) + + +def _check_mask_axis(mask, axis, keepdims=np._NoValue): + "Check whether there are masked values along the given axis" + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if mask is not nomask: + return mask.all(axis=axis, **kwargs) + return nomask + + +############################################################################### +# Masking functions # +############################################################################### + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where `not` equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data=['a', 'b', --, 'd'], + mask=[False, False, True, False], + fill_value='N/A', + dtype='>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data=[--, 1, 2, 3], + mask=[ True, False, False, False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data=[--, 1, --, --], + mask=[ True, False, True, True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition, shrink=False) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistent shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + # Assign to *.mask so that structured masks are handled correctly. + result.mask = _shrink_mask(cond) + # There is no view of a boolean so when 'a' is a MaskedArray with nomask + # the update to the result's mask has no effect. + if not copy and hasattr(a, '_mask') and getmask(a) is nomask: + a._mask = result._mask.view() + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data=[0, 1, 2, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data=[0, 1, --, --], + mask=[False, False, True, True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data=[--, --, 2, 3], + mask=[ True, True, False, False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where `not` equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data=[--, --, 2, --], + mask=[ True, True, False, True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + Return a MaskedArray, masked where the data in array `x` are + equal to `value`. The fill_value of the returned MaskedArray + is set to `value`. + + For floating point arrays, consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=2) + + """ + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> eat + masked_array(data=[--, 'ham'], + mask=[ True, False], + fill_value='green_eggs', + dtype=object) + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. + + For integer types, exact equality is used, in the same way as + `masked_equal`. + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data=[1.0, --, 2.0, --, 3.0], + mask=[False, True, False, True, False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 2.1) + masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], + mask=False, + fill_value=2.1) + + Unlike `masked_equal`, `masked_values` can perform approximate equalities. + + >>> ma.masked_values(x, 2.1, atol=1e-1) + masked_array(data=[1.0, 1.1, --, 1.1, 3.0], + mask=[False, False, True, False, False], + fill_value=2.1) + + """ + xnew = filled(x, value) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) + else: + mask = umath.equal(xnew, value) + ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) + if shrink: + ret.shrink_mask() + return ret + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=float) + >>> a[2] = np.NaN + >>> a[3] = np.PINF + >>> a + array([ 0., 1., nan, inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data=[0.0, 1.0, --, --, 4.0], + mask=[False, False, True, True, False], + fill_value=1e+20) + + """ + a = np.array(a, copy=False, subok=True) + res = masked_where(~(np.isfinite(a)), a, copy=copy) + # masked_invalid previously never returned nomask as a mask and doing so + # threw off matplotlib (gh-22842). So use shrink=False: + if res._mask is nomask: + res._mask = make_mask_none(res.shape, res.dtype) + return res + +############################################################################### +# Printing options # +############################################################################### + + +class _MaskedPrintOption: + """ + Handle the string used to represent missing data in a masked array. + + """ + + def __init__(self, display): + """ + Create the masked_print_option object. + + """ + self._display = display + self._enabled = True + + def display(self): + """ + Display the string to print for masked values. + + """ + return self._display + + def set_display(self, s): + """ + Set the string to print for masked values. + + """ + self._display = s + + def enabled(self): + """ + Is the use of the display value enabled? + + """ + return self._enabled + + def enable(self, shrink=1): + """ + Set the enabling shrink to `shrink`. + + """ + self._enabled = shrink + + def __str__(self): + return str(self._display) + + __repr__ = __str__ + +# if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + + Private function allowing for recursion + + """ + names = result.dtype.names + if names is not None: + for name in names: + curdata = result[name] + curmask = mask[name] + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(result, printopt, where=mask) + return + +# For better or worse, these end in a newline +_legacy_print_templates = dict( + long_std=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + long_flx=textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + short_std=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + short_flx=textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +) + +############################################################################### +# MaskedArray class # +############################################################################### + + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names is not None: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> np.ma.flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + + def flatten_sequence(iterable): + """ + Flattens a compound of nested iterables. + + """ + for elm in iter(iterable): + if hasattr(elm, '__iter__'): + yield from flatten_sequence(elm) + else: + yield elm + + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + +def _arraymethod(funcname, onmask=True): + """ + Return a class method wrapper around a basic array method. + + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. + + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) + return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method + + +class MaskedIterator: + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + # This won't work if ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> next(fl) + 3 + >>> next(fl) + masked + >>> next(fl) + Traceback (most recent call last): + ... + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. + + Examples + -------- + + The ``mask`` can be initialized with an array of boolean values + with the same shape as ``data``. + + >>> data = np.arange(6).reshape((2, 3)) + >>> np.ma.MaskedArray(data, mask=[[False, True, False], + ... [False, False, True]]) + masked_array( + data=[[0, --, 2], + [3, 4, --]], + mask=[[False, True, False], + [False, False, True]], + fill_value=999999) + + Alternatively, the ``mask`` can be initialized to homogeneous boolean + array with the same shape as ``data`` by passing in a scalar + boolean value: + + >>> np.ma.MaskedArray(data, mask=False) + masked_array( + data=[[0, 1, 2], + [3, 4, 5]], + mask=[[False, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.MaskedArray(data, mask=True) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=999999, + dtype=int64) + + .. note:: + The recommended practice for initializing ``mask`` with a scalar + boolean value is to use ``True``/``False`` rather than + ``np.True_``/``np.False_``. The reason is :attr:`nomask` + is represented internally as ``np.False_``. + + >>> np.False_ is np.ma.nomask + True + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + # Maximum number of elements per axis used when printing an array. The + # 1d case is handled separately because we need more values in this case. + _print_width = 100 + _print_width_1d = 1500 + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data. + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask. + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + + # Here, we copy the _view_, so that we can attach new properties to it + # we must never do .view(MaskedConstant), as that would create a new + # instance of np.ma.masked, which make identity comparison fail + if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): + _data = ndarray.view(_data, type(data)) + else: + _data = ndarray.view(_data, cls) + + # Handle the case where data is not a subclass of ndarray, but + # still has the _mask attribute like MaskedArrays + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + # FIXME: should we set `_data._sharedmask = True`? + # Process mask. + # Type of the mask + mdtype = make_mask_descr(_data.dtype) + if mask is nomask: + # Case 1. : no mask in input. + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) + except (ValueError, TypeError): + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + _data._sharedmask = not copy + if copy: + _data._mask = _data._mask.copy() + # Reset the shape of the original mask + if getmask(data) is not nomask: + # gh-21022 encounters an issue here + # because data._mask.shape is not writeable, but + # the op was also pointless in that case, because + # the shapes were the same, so we can at least + # avoid that path + if data._mask.shape != data.shape: + data._mask.shape = data.shape + else: + # Case 2. : With a mask in input. + # If mask is boolean, create an array of True or False + + # if users pass `mask=None` be forgiving here and cast it False + # for speed; although the default is `mask=nomask` and can differ. + if mask is None: + mask = False + + if mask is True and mdtype == MaskType: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False and mdtype == MaskType: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MaskError(msg % (nd, nm)) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + else: + if not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + + # Update fill_value. + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check. + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + + + def _update_from(self, obj): + """ + Copies some attributes of obj to self. + + """ + if isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = dict(_fill_value=getattr(obj, '_fill_value', None), + _hardmask=getattr(obj, '_hardmask', False), + _sharedmask=getattr(obj, '_sharedmask', False), + _isfield=getattr(obj, '_isfield', False), + _baseclass=getattr(obj, '_baseclass', _baseclass), + _optinfo=_optinfo, + _basedict=_optinfo) + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + return + + def __array_finalize__(self, obj): + """ + Finalizes the masked array. + + """ + # Get main attributes. + self._update_from(obj) + + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view. + # The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. + if isinstance(obj, ndarray): + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names is not None: + _mask = getmaskarray(obj) + else: + _mask = getmask(obj) + + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (_mask is not nomask and obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + # We should make a copy. But we could get here via astype, + # in which case the mask might need a new dtype as well + # (e.g., changing to or from a structured dtype), and the + # order could have changed. So, change the mask type if + # needed and use astype instead of copy. + if self.dtype == obj.dtype: + _mask_dtype = _mask.dtype + else: + _mask_dtype = make_mask_descr(self.dtype) + + if self.flags.c_contiguous: + order = "C" + elif self.flags.f_contiguous: + order = "F" + else: + order = "K" + + _mask = _mask.astype(_mask_dtype, order) + else: + # Take a view so shape changes, etc., do not propagate back. + _mask = _mask.view() + else: + _mask = nomask + + self._mask = _mask + # Finalize the mask + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + + # Finalize the fill_value + if self._fill_value is not None: + self._fill_value = _check_fill_value(self._fill_value, self.dtype) + elif self.dtype.names is not None: + # Finalize the default fill_value for structured arrays + self._fill_value = _check_fill_value(None, self.dtype) + + def __array_wrap__(self, obj, context=None): + """ + Special hook for ufuncs. + + Wraps the numpy array and sets the mask according to context. + + """ + if obj is self: # for in-place operations + result = obj + else: + result = obj.view(type(self)) + result._update_from(self) + + if context is not None: + result._mask = result._mask.copy() + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + # Get the domain mask + domain = ufunc_domain.get(func, None) + if domain is not None: + # Take the domain, and make sure it's a ndarray + with np.errstate(divide='ignore', invalid='ignore'): + d = filled(domain(*input_args), True) + + if d.any(): + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + + np.copyto(result, fill_value, where=d) + + # Update the mask + if m is nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + + # Make sure the mask has the proper size + if result is not self and result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + + return result + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, either ndarray or a subclass. The + default None results in type preservation. + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, then this argument is inferred from the passed `dtype`, or + in its absence the original array, as discussed in the notes below. + + See Also + -------- + numpy.ndarray.view : Equivalent method on ndarray object. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + + # also make the mask be a view (so attr changes to the view's + # mask do no affect original object's mask) + # (especially important to avoid affecting np.masked singleton) + if getmask(output) is not nomask: + output._mask = output._mask.view() + + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + + def __getitem__(self, indx): + """ + x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # We could directly use ndarray.__getitem__ on self. + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet + # So it's easier to stick to the current version + dout = self.data[indx] + _mask = self._mask + + def _is_scalar(m): + return not isinstance(m, np.ndarray) + + def _scalar_heuristic(arr, elem): + """ + Return whether `elem` is a scalar result of indexing `arr`, or None + if undecidable without promoting nomask to a full mask + """ + # obviously a scalar + if not isinstance(elem, np.ndarray): + return True + + # object array scalar indexing can return anything + elif arr.dtype.type is np.object_: + if arr.dtype is not elem.dtype: + # elem is an array, but dtypes do not match, so must be + # an element + return True + + # well-behaved subclass that only returns 0d arrays when + # expected - this is not a scalar + elif type(arr).__getitem__ == ndarray.__getitem__: + return False + + return None + + if _mask is not nomask: + # _mask cannot be a subclass, so it tells us whether we should + # expect a scalar. It also cannot be of dtype object. + mout = _mask[indx] + scalar_expected = _is_scalar(mout) + + else: + # attempt to apply the heuristic to avoid constructing a full mask + mout = nomask + scalar_expected = _scalar_heuristic(self.data, dout) + if scalar_expected is None: + # heuristics have failed + # construct a full array, so we can be certain. This is costly. + # we could also fall back on ndarray.__getitem__(self.data, indx) + scalar_expected = _is_scalar(getmaskarray(self)[indx]) + + # Did we extract a single item? + if scalar_expected: + # A record + if isinstance(dout, np.void): + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + return mvoid(dout, mask=mout, hardmask=self._hardmask) + + # special case introduced in gh-5962 + elif (self.dtype.type is np.object_ and + isinstance(dout, np.ndarray) and + dout is not masked): + # If masked, turn into a MaskedArray, with everything masked. + if mout: + return MaskedArray(dout, mask=True) + else: + return dout + + # Just a scalar + else: + if mout: + return masked + else: + return dout + else: + # Force dout to MA + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value + if is_string_or_list_of_strings(indx): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + + # Something like gh-15895 has happened if this check fails. + # _fill_value should always be an ndarray. + if not isinstance(dout._fill_value, np.ndarray): + raise RuntimeError('Internal NumPy error.') + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # M.dtype[field].ndim). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + f"{indx!s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + f"all to {dout._fill_value[0]!s}.", + stacklevel=2) + # Need to use `.flat[0:1].squeeze(...)` instead of just + # `.flat[0]` to ensure the result is a 0d array and not + # a scalar. + dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) + dout._isfield = True + # Update the mask if needed + if mout is not nomask: + # set shape to match that of data; this is needed for matrices + dout._mask = reshape(mout, dout.shape) + dout._sharedmask = True + # Note: Don't try to check for m.any(), that'll take too long + return dout + + # setitem may put NaNs into integer arrays or occasionally overflow a + # float. But this may happen in masked values, so avoid otherwise + # correct warnings (as is typical also in masked calculations). + @np.errstate(over='ignore', invalid='ignore') + def __setitem__(self, indx, value): + """ + x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + _data = self._data + _mask = self._mask + if isinstance(indx, str): + _data[indx] = value + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + + _dtype = _data.dtype + + if value is masked: + # The mask wasn't set: create a full version. + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) + else: + _mask[indx] = True + return + + # Get the _data part of the new value + dval = getattr(value, '_data', value) + # Get the _mask part of the new value + mval = getmask(value) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) + if _mask is nomask: + # Set the data, then the mask + _data[indx] = dval + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + _mask[indx] = mval + elif not self._hardmask: + # Set the data, then the mask + if (isinstance(indx, masked_array) and + not isinstance(value, masked_array)): + _data[indx.data] = dval + else: + _data[indx] = dval + _mask[indx] = mval + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + _data[indx] = dval + else: + if _dtype.names is not None: + err_msg = "Flexible 'hard' masks are not yet supported." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + _data[indx] = dindx + _mask[indx] = mindx + return + + # Define so that we can overwrite the setter. + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MaskedArray, type(self)).dtype.__set__(self, dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + # Try to reset the shape of the mask (if we don't have a void). + # This raises a ValueError if the dtype change won't work. + try: + self._mask.shape = self.shape + except (AttributeError, TypeError): + pass + + @property + def shape(self): + return super().shape + + @shape.setter + def shape(self, shape): + super(MaskedArray, type(self)).shape.__set__(self, shape) + # Cannot use self._mask, since it may not (yet) exist when a + # masked matrix sets the shape. + if getmask(self) is not nomask: + self._mask.shape = self.shape + + def __setmask__(self, mask, copy=False): + """ + Set the mask. + + """ + idtype = self.dtype + current_mask = self._mask + if mask is masked: + mask = True + + if current_mask is nomask: + # Make sure the mask is set + # Just don't do anything if there's nothing to do. + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + + if idtype.names is None: + # No named fields. + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + else: + # Named fields w/ + mdtype = current_mask.dtype + mask = np.array(mask, copy=False) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()] * len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool_, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + + _set_mask = __setmask__ + + @property + def mask(self): + """ Current mask. """ + + # We could try to force a reshape, but that wouldn't work in some + # cases. + # Return a view so that the dtype and shape cannot be changed in place + # This still preserves nomask by identity + return self._mask.view() + + @mask.setter + def mask(self, value): + self.__setmask__(value) + + @property + def recordmask(self): + """ + Get or set the mask of the array if it has no named fields. For + structured arrays, returns a ndarray of booleans where entries are + ``True`` if **all** the fields are masked, ``False`` otherwise: + + >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], + ... dtype=[('a', int), ('b', int)]) + >>> x.recordmask + array([False, False, True, False, False]) + """ + + _mask = self._mask.view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis=-1) + + @recordmask.setter + def recordmask(self, mask): + raise NotImplementedError("Coming soon: setting the mask per records!") + + def harden_mask(self): + """ + Force the mask to hard, preventing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `harden_mask` sets + `~ma.MaskedArray.hardmask` to ``True`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.soften_mask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft (default), allowing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `soften_mask` sets + `~ma.MaskedArray.hardmask` to ``False`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.harden_mask + + """ + self._hardmask = False + return self + + @property + def hardmask(self): + """ + Specifies whether values can be unmasked through assignments. + + By default, assigning definite values to masked array entries will + unmask them. When `hardmask` is ``True``, the mask will not change + through assignments. + + See Also + -------- + ma.MaskedArray.harden_mask + ma.MaskedArray.soften_mask + + Examples + -------- + >>> x = np.arange(10) + >>> m = np.ma.masked_array(x, x>5) + >>> assert not m.hardmask + + Since `m` has a soft mask, assigning an element value unmasks that + element: + + >>> m[8] = 42 + >>> m + masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + After hardening, the mask is not affected by assignments: + + >>> hardened = np.ma.harden_mask(m) + >>> assert m.hardmask and hardened is m + >>> m[:] = 23 + >>> m + masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + """ + return self._hardmask + + def unshare_mask(self): + """ + Copy the mask and set the `sharedmask` flag to ``False``. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not + shared. A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + @property + def sharedmask(self): + """ Share status of the mask (read-only). """ + return self._sharedmask + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]]) + >>> x.shrink_mask() + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> x.mask + False + + """ + self._mask = _shrink_mask(self._mask) + return self + + @property + def baseclass(self): + """ Class of the underlying data (read-only). """ + return self._baseclass + + def _get_data(self): + """ + Returns the underlying data, as a view of the masked array. + + If the underlying data is a subclass of :class:`numpy.ndarray`, it is + returned as such. + + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.data + matrix([[1, 2], + [3, 4]]) + + The type of the data can be accessed through the :attr:`baseclass` + attribute. + """ + return ndarray.view(self, self._baseclass) + + _data = property(fget=_get_data) + data = property(fget=_get_data) + + @property + def flat(self): + """ Return a flat iterator, or set a flattened version of self to value. """ + return MaskedIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + @property + def fill_value(self): + """ + The filling value of the masked array is a scalar. When setting, None + will set to a default based on the data type. + + Examples + -------- + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + 999999 + 999999 + 1e+20 + (1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + -inf + >>> x.fill_value = np.pi + >>> x.fill_value + 3.1415926535897931 # may vary + + Reset to default: + + >>> x.fill_value = None + >>> x.fill_value + 1e+20 + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + + # Temporary workaround to account for the fact that str and bytes + # scalars cannot be indexed with (), whereas all other numpy + # scalars can. See issues #7259 and #7267. + # The if-block can be removed after #7267 has been fixed. + if isinstance(self._fill_value, ndarray): + return self._fill_value[()] + return self._fill_value + + @fill_value.setter + def fill_value(self, value=None): + target = _check_fill_value(value, self.dtype) + if not target.ndim == 0: + # 2019-11-12, 1.18.0 + warnings.warn( + "Non-scalar arrays for the fill value are deprecated. Use " + "arrays with scalar values instead. The filled function " + "still supports any array as `fill_value`.", + DeprecationWarning, stacklevel=2) + + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + # kept for compatibility + get_fill_value = fill_value.fget + set_fill_value = fill_value.fset + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or non-scalar. + If non-scalar, the resulting ndarray must be broadcastable over + input array. Default is None, in which case, the `fill_value` + attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([ 1, 2, -999, 4, -999]) + >>> x.filled(fill_value=1000) + array([ 1, 2, 1000, 4, 1000]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `~ma.MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.compress([1, 0, 1]) + masked_array(data=[1, 3], + mask=[False, False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array( + data=[[1, 3], + [--, --], + [7, 9]], + mask=[[False, False], + [ True, True], + [False, False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + + # Force the condition to a regular ndarray and forget the missing + # values. + condition = np.asarray(condition) + + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + def _insert_masked_print(self): + """ + Replace masked values with masked_print_option, casting all innermost + dtypes to object. + """ + if masked_print_option.enabled(): + mask = self._mask + if mask is nomask: + res = self._data + else: + # convert to object array to make filled work + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) + else: + res = self.filled(self.fill_value) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + def __repr__(self): + """ + Literal string representation. + + """ + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + + # 2016-11-19: Demoted to legacy format + if np.core.arrayprint._get_legacy_print_mode() <= 113: + is_long = self.ndim > 1 + parameters = dict( + name=name, + nlen=" " * len(name), + data=str(self), + mask=str(self._mask), + fill=str(self.fill_value), + dtype=str(self.dtype) + ) + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = f"masked_{name}(" + + dtype_needed = ( + not np.core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = {k: ' ' * min_indent for k in keys} + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + reprs['fill_value'] = repr(self.fill_value) + if dtype_needed: + reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + '{}{}={}'.format(indents[k], k, reprs[k]) + for k in keys + ) + return prefix + result + ')' + + def _delegate_binop(self, other): + # This emulates the logic in + # private/binop_override.h:forward_binop_should_defer + if isinstance(other, type(self)): + return False + array_ufunc = getattr(other, "__array_ufunc__", False) + if array_ufunc is False: + other_priority = getattr(other, "__array_priority__", -1000000) + return self.__array_priority__ < other_priority + else: + # If array_ufunc is not None, it will be called inside the ufunc; + # None explicitly tells us to not call the ufunc, i.e., defer. + return array_ufunc is None + + def _comparison(self, other, compare): + """Compare self with other using operator.eq or operator.ne. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + omask = getmask(other) + smask = self.mask + mask = mask_or(smask, omask, copy=True) + + odata = getdata(other) + if mask.dtype.names is not None: + # only == and != are reasonably defined for structured dtypes, + # so give up early for all other comparisons: + if compare not in (operator.eq, operator.ne): + return NotImplemented + # For possibly masked structured arrays we need to be careful, + # since the standard structured array comparison will use all + # fields, masked or not. To avoid masked fields influencing the + # outcome, we set all masked fields in self to other, so they'll + # count as equal. To prepare, we ensure we have the right shape. + broadcast_shape = np.broadcast(self, odata).shape + sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) + sbroadcast._mask = mask + sdata = sbroadcast.filled(odata) + # Now take care of the mask; the merged mask should have an item + # masked if all fields were masked (in one and/or other). + mask = (mask == np.ones((), mask.dtype)) + # Ensure we can compare masks below if other was not masked. + if omask is np.False_: + omask = np.zeros((), smask.dtype) + + else: + # For regular arrays, just use the data as they come. + sdata = self.data + + check = compare(sdata, odata) + + if isinstance(check, (np.bool_, bool)): + return masked if mask else check + + if mask is not nomask: + if compare in (operator.eq, operator.ne): + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + # Ignore this for operations other than `==` and `!=` + check = np.where(mask, compare(smask, omask), check) + + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() + + check = check.view(type(self)) + check._update_from(self) + check._mask = mask + + # Cast fill value to bool_ if needed. If it cannot be cast, the + # default boolean fill value is used. + if check._fill_value is not None: + try: + fill = _check_fill_value(check._fill_value, np.bool_) + except (TypeError, ValueError): + fill = _check_fill_value(None, np.bool_) + check._fill_value = fill + + return check + + def __eq__(self, other): + """Check whether other equals self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.eq) + + def __ne__(self, other): + """Check whether other does not equal self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.ne) + + # All other comparisons: + def __le__(self, other): + return self._comparison(other, operator.le) + + def __lt__(self, other): + return self._comparison(other, operator.lt) + + def __ge__(self, other): + return self._comparison(other, operator.ge) + + def __gt__(self, other): + return self._comparison(other, operator.gt) + + def __add__(self, other): + """ + Add self to other, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return add(self, other) + + def __radd__(self, other): + """ + Add other to self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other + self`. + return add(other, self) + + def __sub__(self, other): + """ + Subtract other from self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return subtract(self, other) + + def __rsub__(self, other): + """ + Subtract self from other, and return a new masked array. + + """ + return subtract(other, self) + + def __mul__(self, other): + "Multiply self by other, and return a new masked array." + if self._delegate_binop(other): + return NotImplemented + return multiply(self, other) + + def __rmul__(self, other): + """ + Multiply other by self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other * self`. + return multiply(other, self) + + def __div__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return divide(self, other) + + def __truediv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return true_divide(self, other) + + def __rtruediv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return true_divide(other, self) + + def __floordiv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return floor_divide(self, other) + + def __rfloordiv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return floor_divide(other, self) + + def __pow__(self, other): + """ + Raise self to the power other, masking the potential NaNs/Infs + + """ + if self._delegate_binop(other): + return NotImplemented + return power(self, other) + + def __rpow__(self, other): + """ + Raise other to the power self, masking the potential NaNs/Infs + + """ + return power(other, self) + + def __iadd__(self, other): + """ + Add other to self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + else: + if m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__iadd__(other_data) + return self + + def __isub__(self, other): + """ + Subtract other from self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__isub__(other_data) + return self + + def __imul__(self, other): + """ + Multiply self by other in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__imul__(other_data) + return self + + def __idiv__(self, other): + """ + Divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 4 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__idiv__(other_data) + return self + + def __ifloordiv__(self, other): + """ + Floor divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__ifloordiv__(other_data) + return self + + def __itruediv__(self, other): + """ + True divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__itruediv__(other_data) + return self + + def __ipow__(self, other): + """ + Raise self to the power other, in place. + + """ + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + self._data.__ipow__(other_data) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + + def __float__(self): + """ + Convert to float. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) + return np.nan + return float(self.item()) + + def __int__(self): + """ + Convert to int. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + @property + def imag(self): + """ + The imaginary part of the masked array. + + This property is a view on the imaginary part of this `MaskedArray`. + + See Also + -------- + real + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.imag + masked_array(data=[1.0, --, 1.6], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_imag = imag.fget + + @property + def real(self): + """ + The real part of the masked array. + + This property is a view on the real part of this `MaskedArray`. + + See Also + -------- + imag + + Examples + -------- + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.real + masked_array(data=[1.0, --, 3.45], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_real = real.fget + + def count(self, axis=None, keepdims=np._NoValue): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which the count is performed. + The default, None, performs the count over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + .. versionadded:: 1.10.0 + + If this is a tuple of ints, the count is performed on multiple + axes, instead of a single axis or all the axes as before. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + result : ndarray or scalar + An array with the same shape as the input array, with the specified + axis removed. If the array is a 0-d array, or if `axis` is None, a + scalar is returned. + + See Also + -------- + ma.count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, --, --]], + mask=[[False, False, False], + [ True, True, True]], + fill_value=999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + m = self._mask + # special case for matrices (we assume no other subclasses modify + # their dimensions) + if isinstance(self.data, np.matrix): + if m is nomask: + m = np.zeros(self.shape, dtype=np.bool_) + m = m.view(type(self.data)) + + if m is nomask: + # compare to _count_reduce_items in _methods.py + + if self.shape == (): + if axis not in (None, 0): + raise np.AxisError(axis=axis, ndim=self.ndim) + return 1 + elif axis is None: + if kwargs.get('keepdims', False): + return np.array(self.size, dtype=np.intp, ndmin=self.ndim) + return self.size + + axes = normalize_axis_tuple(axis, self.ndim) + items = 1 + for ax in axes: + items *= self.shape[ax] + + if kwargs.get('keepdims', False): + out_dims = list(self.shape) + for a in axes: + out_dims[a] = 1 + else: + out_dims = [d for n, d in enumerate(self.shape) + if n not in axes] + # make sure to return a 0-d array if axis is supplied + return np.full(out_dims, items, dtype=np.intp) + + # take care of the masked singleton + if self is masked: + return 0 + + return (~m).sum(axis=axis, dtype=np.intp, **kwargs) + + def ravel(self, order='C'): + """ + Returns a 1D version of self, as a view. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + (Masked arrays currently use 'A' on the data when 'K' is passed.) + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.ravel() + masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], + mask=[False, True, False, True, False, True, False, True, + False], + fill_value=999999) + + """ + # The order of _data and _mask could be different (it shouldn't be + # normally). Passing order `K` or `A` would be incorrect. + # So we ignore the mask memory order. + # TODO: We don't actually support K, so use A instead. We could + # try to guess this correct by sorting strides or deprecate. + if order in "kKaA": + order = "F" if self._data.flags.fnc else "C" + r = ndarray.ravel(self._data, order=order).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) + else: + r._mask = nomask + return r + + + def reshape(self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> x + masked_array( + data=[[--, 2], + [3, --]], + mask=[[ True, False], + [False, True]], + fill_value=999999) + >>> x = x.reshape((4,1)) + >>> x + masked_array( + data=[[--], + [2], + [3], + [--]], + mask=[[ True], + [False], + [False], + [ True]], + fill_value=999999) + + """ + kwargs.update(order=kwargs.get('order', 'C')) + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.put([0,4,8],[10,20,30]) + >>> x + masked_array( + data=[[10, --, 3], + [--, 20, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + >>> x.put(4,999) + >>> x + masked_array( + data=[[10, --, 3], + [--, 999, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + """ + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=False) + values = narray(values, copy=False, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + + self._data.put(indices, values, mode=mode) + + # short circuit if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self) + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + return + + def ids(self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) # may vary + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284) # may vary + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + def all(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if all elements evaluate to True. + + The output array is masked where all the values along the given axis + are masked: if the output would have been a scalar and that all the + values are masked, then the output is `masked`. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.ndarray.all : corresponding function for ndarrays + numpy.all : equivalent function + + Examples + -------- + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def any(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if any of the elements of `a` evaluate to True. + + Masked values are considered as False during computation. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.ndarray.any : corresponding function for ndarrays + numpy.any : equivalent function + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + numpy.ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array( + data=[[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], + mask=False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[1.0, 0.0, 0.0], + [0.0, --, 0.0], + [0.0, 0.0, 1.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array( + data=[[False, False, False], + [ True, True, True], + [ True, True, True]], + mask=False, + fill_value=True) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return narray(self.filled(0), copy=False).nonzero() + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + #!!!: implement out + test! + m = self._mask + if m is nomask: + result = super().trace(offset=offset, axis1=axis1, axis2=axis2, + out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + .. versionadded:: 1.10.0 + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) + + def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of the array elements over the given axis. + + Masked elements are set to 0 internally. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.ndarray.sum : corresponding function for ndarrays + numpy.sum : equivalent function + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.sum() + 25 + >>> x.sum(axis=1) + masked_array(data=[4, 5, 16], + mask=[False, False, False], + fill_value=999999) + >>> x.sum(axis=0) + masked_array(data=[8, 5, 12], + mask=[False, False, False], + fill_value=999999) + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) + + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the array elements over the given axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumsum` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumsum : corresponding function for ndarrays + numpy.cumsum : equivalent function + + Examples + -------- + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> marr.cumsum() + masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], + mask=[False, False, False, True, True, True, False, False, + False, False], + fill_value=999999) + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of the array elements over the given axis. + + Masked elements are set to 1 internally for computation. + + Refer to `numpy.prod` for full documentation. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + See Also + -------- + numpy.ndarray.prod : corresponding function for ndarrays + numpy.prod : equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the array elements over the given axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumprod` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumprod : corresponding function for ndarrays + numpy.cumprod : equivalent function + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Returns the average of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.ndarray.mean : corresponding function for ndarrays + numpy.mean : Equivalent function + numpy.ma.average : Weighted average. + + Examples + -------- + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.mean() + 1.5 + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if self._mask is nomask: + result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] + else: + is_float16_result = False + if dtype is None: + if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool_)): + dtype = mu.dtype('f8') + elif issubclass(self.dtype.type, ntypes.float16): + dtype = mu.dtype('f4') + is_float16_result = True + dsum = self.sum(axis=axis, dtype=dtype, **kwargs) + cnt = self.count(axis=axis, **kwargs) + if cnt.shape == () and (cnt == 0): + result = masked + elif is_float16_result: + result = self.dtype.type(dsum * 1. / cnt) + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getmask(result) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data=[-1., 0., 1.], + mask=False, + fill_value=1e+20) + + """ + m = self.mean(axis, dtype) + if not axis: + return self - m + else: + return self - expand_dims(m, axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue): + """ + Returns the variance of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.ndarray.var : corresponding function for ndarrays + numpy.var : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + # Easy case: nomask, business as usual + if self._mask is nomask: + ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs)[()] + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(nomask) + return out + return ret + + # Some data are masked, yay! + cnt = self.count(axis=axis, **kwargs) - ddof + danom = self - self.mean(axis, dtype, keepdims=True) + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) + dvar._update_from(self) + elif getmask(dvar): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + def std(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue): + """ + Returns the standard deviation of the array elements along given axis. + + Masked entries are ignored. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.ndarray.std : corresponding function for ndarrays + numpy.std : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + dvar = self.var(axis, dtype, out, ddof, **kwargs) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + + def round(self, decimals=0, out=None): + """ + Return each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.ndarray.round : corresponding function for ndarrays + numpy.around : equivalent function + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + if result.ndim > 0: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + + def argsort(self, axis=np._NoValue, kind=None, order=None, + endwith=True, fill_value=None): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + + .. versionchanged:: 1.13.0 + Previously, the default was documented to be -1, but that was + in error. At some future date, the default will change to -1, as + originally intended. + Until then, the axis should be given explicitly when + ``arr.ndim > 1``, to avoid a FutureWarning. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + ma.MaskedArray.sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + numpy.ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data=[3, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(self) + + if fill_value is None: + if endwith: + # nan > inf + if np.issubdtype(self.dtype, np.floating): + fill_value = np.nan + else: + fill_value = minimum_fill_value(self) + else: + fill_value = maximum_fill_value(self) + + filled = self.filled(fill_value) + return filled.argsort(axis=axis, kind=kind, order=order) + + def argmin(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + ndarray or scalar + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> x + masked_array( + data=[[--, --], + [2, 3]], + mask=[[ True, True], + [False, False]], + fill_value=999999) + >>> x.argmin(axis=0, fill_value=-1) + array([0, 0]) + >>> x.argmin(axis=0, fill_value=9) + array([1, 1]) + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmin(axis, out=out, keepdims=keepdims) + + def argmax(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmax(axis, out=out, keepdims=keepdims) + + def sort(self, axis=-1, kind=None, order=None, + endwith=True, fill_value=None): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values sorting at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + numpy.ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> a + masked_array(data=[1, 3, 5, --, --], + mask=[False, False, False, True, True], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> a + masked_array(data=[--, --, 1, 3, 5], + mask=[ True, True, False, False, False], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> a + masked_array(data=[1, --, --, 3, 5], + mask=[False, True, True, False, False], + fill_value=999999) + + """ + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + return + + if self is masked: + return + + sidx = self.argsort(axis=axis, kind=kind, order=order, + fill_value=fill_value, endwith=endwith) + + self[...] = np.take_along_axis(self, sidx, axis=axis) + + def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + .. versionadded:: 1.7.0 + If this is a tuple of ints, the minimum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.minimum_fill_value + Returns the minimum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]] + >>> mask = [[1, 1, 0], [0, 0, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[--, --, 3.0], + [0.2, -0.7, --]], + mask=[[ True, True, False], + [False, False, True]], + fill_value=1e+20) + >>> ma.min(masked_x) + -0.7 + >>> ma.min(masked_x, axis=-1) + masked_array(data=[3.0, -0.7], + mask=[False, False], + fill_value=1e+20) + >>> ma.min(masked_x, axis=0, keepdims=True) + masked_array(data=[[0.2, -0.7, 3.0]], + mask=[[False, False, False]], + fill_value=1e+20) + >>> mask = [[1, 1, 1,], [1, 1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.min(masked_x, axis=0) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + .. versionadded:: 1.7.0 + If this is a tuple of ints, the maximum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.maximum_fill_value + Returns the maximum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[-1., 2.5], [4., -2.], [3., 0.]] + >>> mask = [[0, 0], [1, 0], [1, 0]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[-1.0, 2.5], + [--, -2.0], + [--, 0.0]], + mask=[[False, False], + [ True, False], + [ True, False]], + fill_value=1e+20) + >>> ma.max(masked_x) + 2.5 + >>> ma.max(masked_x, axis=0) + masked_array(data=[-1.0, 2.5], + mask=[False, False], + fill_value=1e+20) + >>> ma.max(masked_x, axis=1, keepdims=True) + masked_array( + data=[[2.5], + [-2.0], + [0.0]], + mask=[[False], + [False], + [False]], + fill_value=1e+20) + >>> mask = [[1, 1], [1, 1], [1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.max(masked_x, axis=1) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): + """ + Return (maximum - minimum) along the given dimension + (i.e. peak-to-peak value). + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : scalar or None, optional + Value used to fill in the masked values. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + Examples + -------- + >>> x = np.ma.MaskedArray([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> x.ptp(axis=1) + masked_array(data=[8, 6], + mask=False, + fill_value=999999) + + >>> x.ptp(axis=0) + masked_array(data=[2, 0, 5, 2], + mask=False, + fill_value=999999) + + >>> x.ptp() + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.ma.MaskedArray([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> y.ptp(axis=1) + masked_array(data=[ 126, 127, -128, -127], + mask=False, + fill_value=999999, + dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> y.ptp(axis=1).view(np.uint8) + masked_array(data=[126, 127, 128, 129], + mask=False, + fill_value=999999, + dtype=uint8) + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value, + keepdims=keepdims) + result -= self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value, + keepdims=keepdims) + min_value = self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def partition(self, *args, **kwargs): + warnings.warn("Warning: 'partition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().partition(*args, **kwargs) + + def argpartition(self, *args, **kwargs): + warnings.warn("Warning: 'argpartition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().argpartition(*args, **kwargs) + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getmask(indices) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data, promoting scalars to 0d arrays with [...] so that + # .view works correctly + if out is None: + out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + # demote 0d arrays back to scalars, for consistency with ndarray.take + return out[()] + + # Array methods + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') + squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') + + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays. + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays. + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() + + def tostring(self, fill_value=None, order='C'): + r""" + A compatibility alias for `tobytes`, with exactly the same behavior. + + Despite its name, it returns `bytes` not `str`\ s. + + .. deprecated:: 1.19.0 + """ + # 2020-03-30, Numpy 1.19.0 + warnings.warn( + "tostring() is deprecated. Use tobytes() instead.", + DeprecationWarning, stacklevel=2) + + return self.tobytes(fill_value, order=order) + + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Default is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + numpy.ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("MaskedArray.tofile() not implemented yet.") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.toflex() + array([[(1, False), (2, True), (3, False)], + [(4, True), (5, False), (6, True)], + [(7, False), (8, True), (9, False)]], + dtype=[('_data', 'i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if m is nomask: + return str(self._data) + + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super()._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) + + __repr__ = __str__ + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + yield from _data + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or + non-scalar. If latter is the case, the filled array should + be broadcastable over input array. Default is None, in + which case the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + +############################################################################## +# Shortcuts # +############################################################################## + + +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array( + data=[[1.0, --, --], + [--, 1.0, --], + [--, --, 1.0]], + mask=[[False, True, True], + [ True, False, True], + [ True, True, False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) + + +isarray = isMaskedArray +isMA = isMaskedArray # backward compatibility + + +class MaskedConstant(MaskedArray): + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) + + return cls.__singleton + + def __array_finalize__(self, obj): + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super().__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) + + def __array_prepare__(self, obj, context=None): + return self.view(MaskedArray).__array_prepare__(obj, context) + + def __array_wrap__(self, obj, context=None): + return self.view(MaskedArray).__array_wrap__(obj, context) + + def __str__(self): + return str(masked_print_option._display) + + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) + + def __format__(self, format_spec): + # Replace ndarray.__format__ with the default, which supports no format characters. + # Supporting format characters is unwise here, because we do not know what type + # the user was expecting - better to not guess. + try: + return object.__format__(self, format_spec) + except TypeError: + # 2020-03-23, NumPy 1.19.0 + warnings.warn( + "Format strings passed to MaskedConstant are ignored, but in future may " + "error or produce different behavior", + FutureWarning, stacklevel=2 + ) + return object.__format__(self, "") + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool_` scalars. + return self + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __setattr__(self, attr, value): + if not self.__has_singleton(): + # allow the singleton to be initialized + return super().__setattr__(attr, value) + elif self is self.__singleton: + raise AttributeError( + f"attributes of {self!r} are not writeable") + else: + # duplicate instance - we can end up here from __array_finalize__, + # where we set the __class__ attribute + return super().__setattr__(attr, value) + + +masked = masked_singleton = MaskedConstant() +masked_array = MaskedArray + + +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): + """ + Shortcut to MaskedArray. + + The options are in a different order for convenience and backwards + compatibility. + + """ + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) +array.__doc__ = masked_array.__doc__ + + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data=[0, 1, 0, 2, 3], + mask=False, + fill_value=42) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +############################################################################## +# Extrema functions # +############################################################################## + + +class _extrema_operation(_MaskedUFunc): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __init__(self, ufunc, compare, fill_value): + super().__init__(ufunc) + self.compare = compare + self.fill_value_func = fill_value + + def __call__(self, a, b): + "Executes the call behavior." + + return where(self.compare(a, b), a, b) + + def reduce(self, target, axis=np._NoValue): + "Reduce target along the given axis." + target = narray(target, copy=False, subok=True) + m = getmask(target) + + if axis is np._NoValue and target.ndim > 1: + # 2017-05-06, Numpy 1.13.0: warn on axis default + warnings.warn( + f"In the future the default for ma.{self.__name__}.reduce will be axis=0, " + f"not the current None, to match np.{self.__name__}.reduce. " + "Explicitly pass 0 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=2) + axis = None + + if axis is not np._NoValue: + kwargs = dict(axis=axis) + else: + kwargs = dict() + + if m is nomask: + t = self.f.reduce(target, **kwargs) + else: + target = target.filled( + self.fill_value_func(target)).view(type(target)) + t = self.f.reduce(target, **kwargs) + m = umath.logical_and.reduce(m, **kwargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + + def outer(self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.f.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a min method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, + out=out, **kwargs) +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a max method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, + out=out, **kwargs) +max.__doc__ = MaskedArray.max.__doc__ + + +def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + try: + return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method or if the method doesn't accept + # a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, + out=out, **kwargs) +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +############################################################################## +# Definition of functions from the corresponding methods # +############################################################################## + + +class _frommethod: + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + + """ + + def __init__(self, methodname, reversed=False): + self.__name__ = methodname + self.__doc__ = self.getdoc() + self.reversed = reversed + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + meth = getattr(MaskedArray, self.__name__, None) or\ + getattr(np, self.__name__, None) + signature = self.__name__ + get_object_signature(meth) + if meth is not None: + doc = """ %s\n%s""" % ( + signature, getattr(meth, '__doc__', None)) + return doc + + def __call__(self, a, *args, **params): + if self.reversed: + args = list(args) + a, args[0] = args[0], a + + marr = asanyarray(a) + method_name = self.__name__ + method = getattr(type(marr), method_name, None) + if method is None: + # use the corresponding np function + method = getattr(np, method_name) + + return method(marr, *args, **params) + + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +compress = _frommethod('compress', reversed=True) +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) +mean = _frommethod('mean') +minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('prod') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + +count = _frommethod('count') + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, 2) + masked_array(data=[125.43999999999998, 15.784728999999999, + 0.6416010000000001, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> y = [-0.5, 2, 0, 17] + >>> masked_y = ma.masked_array(y, mask) + >>> masked_y + masked_array(data=[-0.5, 2.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, masked_y) + masked_array(data=[0.29880715233359845, 15.784728999999999, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not result.ndim: + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + +argmin = _frommethod('argmin') +argmax = _frommethod('argmax') + +def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None): + "Function version of the eponymous method." + a = np.asanyarray(a) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(a) + + if isinstance(a, MaskedArray): + return a.argsort(axis=axis, kind=kind, order=order, + endwith=endwith, fill_value=fill_value) + else: + return a.argsort(axis=axis, kind=kind, order=order) +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None): + """ + Return a sorted copy of the masked array. + + Equivalent to creating a copy of the array + and applying the MaskedArray ``sort()`` method. + + Refer to ``MaskedArray.sort`` for the full documentation + + See Also + -------- + MaskedArray.sort : equivalent method + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.sort(masked_x) + masked_array(data=[-3.973, 0.801, 11.2, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + a = np.array(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + + if isinstance(a, MaskedArray): + a.sort(axis=axis, kind=kind, order=order, + endwith=endwith, fill_value=fill_value) + else: + a.sort(axis=axis, kind=kind, order=order) + return a + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details. + + See Also + -------- + ma.MaskedArray.compressed : Equivalent method. + + Examples + -------- + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[1, --, 0], + [2, --, 3], + [7, 4, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=999999) + + Compress the masked array into a 1-D array of non-masked values: + + >>> np.ma.compressed(masked_x) + array([1, 0, 2, 3, 7, 4]) + + """ + return asanyarray(x).compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + masked_array(data=[2, 3, 4], + mask=False, + fill_value=999999) + >>> ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask. + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + + # If we decide to keep a '_shrinkmask' option, we want to check that + # all of them are True, and then check for dm.any() + data._mask = _shrink_mask(dm) + return data + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + Examples + -------- + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[11.2, --, 18.0], + [0.801, --, 12.0], + [7.0, 33.0, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=1e+20) + + Isolate the main diagonal from the masked array: + + >>> np.ma.diag(masked_x) + masked_array(data=[11.2, --, --], + mask=[False, True, True], + fill_value=1e+20) + + Isolate the first diagonal below the main diagonal: + + >>> np.ma.diag(masked_x, -1) + masked_array(data=[0.801, 33.0], + mask=[False, False], + fill_value=1e+20) + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def left_shift(a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def right_shift(a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11, 3, 8, 1] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11, 3, 8, --], + mask=[False, False, False, True], + fill_value=999999) + >>> ma.right_shift(masked_x,1) + masked_array(data=[5, 1, 4, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return narray(a, copy=False).put(indices, values, mode=mode) + + +def putmask(a, mask, values): # , mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + return + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[0, 1], + [2, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + + >>> ma.transpose(x) + masked_array( + data=[[0, 2], + [1, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + """ + # We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return narray(a, copy=False).transpose(axes).view(MaskedArray) + + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + """ + # We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = narray(a, copy=False).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + >>> np.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, --, 3], + [4, 1, --], + [3, 4, 1]], + mask=[[False, True, False], + [False, False, True], + [False, False, False]], + fill_value=999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +def ndim(obj): + """ + maskedarray version of the numpy function. + + """ + return np.ndim(getdata(obj)) + +ndim.__doc__ = np.ndim.__doc__ + + +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) +shape.__doc__ = np.shape.__doc__ + + +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) +size.__doc__ = np.size.__doc__ + + +def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + Preserves the input mask. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : MaskedArray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + numpy.diff : Equivalent function in the top-level NumPy module. + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.ma.diff(u8_arr) + masked_array(data=[255], + mask=False, + fill_value=999999, + dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + 255 + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.ma.diff(i16_arr) + masked_array(data=[-1], + mask=False, + fill_value=999999, + dtype=int16) + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) + >>> x = np.ma.masked_where(a < 2, a) + >>> np.ma.diff(x) + masked_array(data=[--, 1, 1, 3, --, --, 1], + mask=[ True, False, False, False, True, True, False], + fill_value=999999) + + >>> np.ma.diff(x, n=2) + masked_array(data=[--, 0, 2, --, --, --], + mask=[ True, False, False, True, True, True], + fill_value=999999) + + >>> a = np.array([[1, 3, 1, 5, 10], [0, 1, 5, 6, 8]]) + >>> x = np.ma.masked_equal(a, value=1) + >>> np.ma.diff(x) + masked_array( + data=[[--, --, --, 5], + [--, --, 1, 2]], + mask=[[ True, True, True, False], + [ True, True, False, False]], + fill_value=1) + + >>> np.ma.diff(x, axis=0) + masked_array(data=[[--, --, --, 1, -2]], + mask=[[ True, True, True, False, False]], + fill_value=1) + + """ + if n == 0: + return a + if n < 0: + raise ValueError("order must be non-negative but got " + repr(n)) + + a = np.ma.asanyarray(a) + if a.ndim == 0: + raise ValueError( + "diff requires input that is at least one dimensional" + ) + + combined = [] + if prepend is not np._NoValue: + prepend = np.ma.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.ma.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.ma.concatenate(combined, axis) + + # GH 22465 np.diff without prepend/append preserves the mask + return np.diff(a, n, axis) + + +############################################################################## +# Extra functions # +############################################################################## + + +def where(condition, x=_NoValue, y=_NoValue): + """ + Return a masked array with elements from `x` or `y`, depending on condition. + + .. note:: + When only `condition` is provided, this function is identical to + `nonzero`. The rest of this documentation covers only the case where + all three arguments are provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : MaskedArray + An masked array with `masked` elements where the condition is masked, + elements from `x` where `condition` is True, and elements from `y` + elsewhere. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + nonzero : The function that is called when x and y are omitted + + Examples + -------- + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> x + masked_array( + data=[[0.0, --, 2.0], + [--, 4.0, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + >>> np.ma.where(x > 5, x, -3.1416) + masked_array( + data=[[-3.1416, --, -3.1416], + [--, -3.1416, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + + """ + + # handle the single-argument case + missing = (x is _NoValue, y is _NoValue).count(True) + if missing == 1: + raise ValueError("Must provide both 'x' and 'y' or neither.") + if missing == 2: + return nonzero(condition) + + # we only care if the condition is true - false or masked pick y + cf = filled(condition, False) + xd = getdata(x) + yd = getdata(y) + + # we need the full arrays here for correct final dimensions + cm = getmaskarray(condition) + xm = getmaskarray(x) + ym = getmaskarray(y) + + # deal with the fact that masked.dtype == float64, but we don't actually + # want to treat it as that. + if x is masked and y is not masked: + xd = np.zeros((), dtype=yd.dtype) + xm = np.ones((), dtype=ym.dtype) + elif y is masked and x is not masked: + yd = np.zeros((), dtype=xd.dtype) + ym = np.ones((), dtype=xm.dtype) + + data = np.where(cf, xd, yd) + mask = np.where(cf, xm, ym) + mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) + + # collapse the mask, for backwards compatibility + mask = _shrink_mask(mask) + + return masked_array(data, mask=mask) + + +def choose(indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a list of choices. + + Given an array of integers and a list of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `index` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + indices : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data=[3, 2, 1], + mask=False, + fill_value=999999) + + """ + def fmask(x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + + def nmask(x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices. + c = filled(indices, 0) + # Get the masks. + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=False, shrink=True) + # Get the choices. + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + if out is None: + return np.round_(a, decimals, out) + else: + np.round_(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out +round = round_ + + +def _mask_propagate(a, axis): + """ + Mask whole 1-d vectors of an array that contain masked values. + """ + a = array(a, subok=False) + m = getmask(a) + if m is nomask or not m.any() or axis is None: + return a + a._mask = a._mask.copy() + axes = normalize_axis_tuple(axis, a.ndim) + for ax in axes: + a._mask |= m.any(axis=ax, keepdims=True) + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + .. versionadded:: 1.10.2 + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array( + data=[[21, 26], + [45, 64]], + mask=[[False, False], + [False, False]], + fill_value=999999) + >>> np.ma.dot(a, b, strict=True) + masked_array( + data=[[--, --], + [--, 64]], + mask=[[ True, True], + [ True, False]], + fill_value=999999) + + """ + if strict is True: + if np.ndim(a) == 0 or np.ndim(b) == 0: + pass + elif b.ndim == 1: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 1) + else: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 2) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if np.ndim(d) == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if fa.ndim == 0: + fa.shape = (1,) + if fb.ndim == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) + return masked_array(d, mask=m) +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + + +def _convolve_or_correlate(f, a, v, mode, propagate_mask): + """ + Helper function for ma.correlate and ma.convolve + """ + if propagate_mask: + # results which are contributed to by either item in any pair being invalid + mask = ( + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) + ) + data = f(getdata(a), getdata(v), mode=mode) + else: + # results which are not contributed to by any pair of valid elements + mask = ~f(~getmaskarray(a), ~getmaskarray(v)) + data = f(filled(a, 0), filled(v, 0), mode=mode) + + return masked_array(data, mask=mask) + + +def correlate(a, v, mode='valid', propagate_mask=True): + """ + Cross-correlation of two 1-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + propagate_mask : bool + If True, then a result element is masked if any masked element contributes towards it. + If False, then a result element is only masked if no non-masked element + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + numpy.correlate : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) + + +def convolve(a, v, mode='full', propagate_mask=True): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. + propagate_mask : bool + If True, then if any masked element is included in the sum for a result + element, then the result is masked. + If False, then the result element is only masked if no non-masked cells + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + numpy.convolve : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) + + +def allequal(a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + + >>> b = np.array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> np.ma.allequal(a, b, fill_value=False) + False + >>> np.ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + + +def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + False + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + + return np.all(d) + + +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asarray(x)) + + + """ + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) + + +def asanyarray(a, dtype=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + # workaround for #8666, to preserve identity. Ideally the bottom line + # would handle this for us. + if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + return a + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + +############################################################################## +# Pickling # +############################################################################## + + +def fromfile(file, dtype=float, count=-1, sep=''): + raise NotImplementedError( + "fromfile() not yet implemented for a MaskedArray.") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array( + data=[[0, --, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.), (0, False, 0.)], + [(0, False, 0.), (0, False, 0.)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array( + data=[[0, 0], + [0, 0]], + mask=[[False, False], + [False, False]], + fill_value=999999, + dtype=int32) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + +class _convert2ma: + + """ + Convert functions from numpy to numpy.ma. + + Parameters + ---------- + _methodname : string + Name of the method to transform. + + """ + __doc__ = None + + def __init__(self, funcname, np_ret, np_ma_ret, params=None): + self._func = getattr(np, funcname) + self.__doc__ = self.getdoc(np_ret, np_ma_ret) + self._extras = params or {} + + def getdoc(self, np_ret, np_ma_ret): + "Return the doc of the function (from the doc of the method)." + doc = getattr(self._func, '__doc__', None) + sig = get_object_signature(self._func) + if doc: + doc = self._replace_return_type(doc, np_ret, np_ma_ret) + # Add the signature of the function at the beginning of the doc + if sig: + sig = "%s%s\n" % (self._func.__name__, sig) + doc = sig + doc + return doc + + def _replace_return_type(self, doc, np_ret, np_ma_ret): + """ + Replace documentation of ``np`` function's return type. + + Replaces it with the proper type for the ``np.ma`` function. + + Parameters + ---------- + doc : str + The documentation of the ``np`` method. + np_ret : str + The return type string of the ``np`` method that we want to + replace. (e.g. "out : ndarray") + np_ma_ret : str + The return type string of the ``np.ma`` method. + (e.g. "out : MaskedArray") + """ + if np_ret not in doc: + raise RuntimeError( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{self._func.__name__}`. " + f"Fix the docstring for `np.{self._func.__name__}` or " + "update the expected string for return type." + ) + + return doc.replace(np_ret, np_ma_ret) + + def __call__(self, *args, **params): + # Find the common parameters to the call and the definition + _extras = self._extras + common_params = set(params).intersection(_extras) + # Drop the common parameters from the call + for p in common_params: + _extras[p] = params.pop(p) + # Get the result + result = self._func.__call__(*args, **params).view(MaskedArray) + if "fill_value" in common_params: + result.fill_value = _extras.get("fill_value", None) + if "hardmask" in common_params: + result._hardmask = bool(_extras.get("hard_mask", False)) + return result + + +arange = _convert2ma( + 'arange', + params=dict(fill_value=None, hardmask=False), + np_ret='arange : ndarray', + np_ma_ret='arange : MaskedArray', +) +clip = _convert2ma( + 'clip', + params=dict(fill_value=None, hardmask=False), + np_ret='clipped_array : ndarray', + np_ma_ret='clipped_array : MaskedArray', +) +empty = _convert2ma( + 'empty', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +empty_like = _convert2ma( + 'empty_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +frombuffer = _convert2ma( + 'frombuffer', + np_ret='out : ndarray', + np_ma_ret='out: MaskedArray', +) +fromfunction = _convert2ma( + 'fromfunction', + np_ret='fromfunction : any', + np_ma_ret='fromfunction: MaskedArray', +) +identity = _convert2ma( + 'identity', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +indices = _convert2ma( + 'indices', + params=dict(fill_value=None, hardmask=False), + np_ret='grid : one ndarray or tuple of ndarrays', + np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', +) +ones = _convert2ma( + 'ones', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +ones_like = _convert2ma( + 'ones_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +squeeze = _convert2ma( + 'squeeze', + params=dict(fill_value=None, hardmask=False), + np_ret='squeezed : ndarray', + np_ma_ret='squeezed : MaskedArray', +) +zeros = _convert2ma( + 'zeros', + params=dict(fill_value=None, hardmask=False), + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +zeros_like = _convert2ma( + 'zeros_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) + + +def append(a, b, axis=None): + """Append values to the end of an array. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + a : array_like + Values are appended to a copy of this array. + b : array_like + These values are appended to a copy of `a`. It must be of the + correct shape (the same shape as `a`, excluding `axis`). If `axis` + is not specified, `b` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `v` are appended. If `axis` is not given, + both `a` and `b` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `a` with `b` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> ma.append(a, b) + masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], + mask=[False, True, False, False, False, False, True, False, + False], + fill_value=999999) + """ + return concatenate([a, b], axis) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/core.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/core.pyi new file mode 100644 index 00000000..e94ebce3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/core.pyi @@ -0,0 +1,471 @@ +from collections.abc import Callable +from typing import Any, TypeVar +from numpy import ndarray, dtype, float64 + +from numpy import ( + amax as amax, + amin as amin, + bool_ as bool_, + expand_dims as expand_dims, + clip as clip, + indices as indices, + ones_like as ones_like, + squeeze as squeeze, + zeros_like as zeros_like, +) + +from numpy.lib.function_base import ( + angle as angle, +) + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +__all__: list[str] + +MaskType = bool_ +nomask: bool_ + +class MaskedArrayFutureWarning(FutureWarning): ... +class MAError(Exception): ... +class MaskError(MAError): ... + +def default_fill_value(obj): ... +def minimum_fill_value(obj): ... +def maximum_fill_value(obj): ... +def set_fill_value(a, fill_value): ... +def common_fill_value(a, b): ... +def filled(a, fill_value=...): ... +def getdata(a, subok=...): ... +get_data = getdata + +def fix_invalid(a, mask=..., copy=..., fill_value=...): ... + +class _MaskedUFunc: + f: Any + __doc__: Any + __name__: Any + def __init__(self, ufunc): ... + +class _MaskedUnaryOperation(_MaskedUFunc): + fill: Any + domain: Any + def __init__(self, mufunc, fill=..., domain=...): ... + def __call__(self, a, *args, **kwargs): ... + +class _MaskedBinaryOperation(_MaskedUFunc): + fillx: Any + filly: Any + def __init__(self, mbfunc, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + def reduce(self, target, axis=..., dtype=...): ... + def outer(self, a, b): ... + def accumulate(self, target, axis=...): ... + +class _DomainedBinaryOperation(_MaskedUFunc): + domain: Any + fillx: Any + filly: Any + def __init__(self, dbfunc, domain, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + +exp: _MaskedUnaryOperation +conjugate: _MaskedUnaryOperation +sin: _MaskedUnaryOperation +cos: _MaskedUnaryOperation +arctan: _MaskedUnaryOperation +arcsinh: _MaskedUnaryOperation +sinh: _MaskedUnaryOperation +cosh: _MaskedUnaryOperation +tanh: _MaskedUnaryOperation +abs: _MaskedUnaryOperation +absolute: _MaskedUnaryOperation +fabs: _MaskedUnaryOperation +negative: _MaskedUnaryOperation +floor: _MaskedUnaryOperation +ceil: _MaskedUnaryOperation +around: _MaskedUnaryOperation +logical_not: _MaskedUnaryOperation +sqrt: _MaskedUnaryOperation +log: _MaskedUnaryOperation +log2: _MaskedUnaryOperation +log10: _MaskedUnaryOperation +tan: _MaskedUnaryOperation +arcsin: _MaskedUnaryOperation +arccos: _MaskedUnaryOperation +arccosh: _MaskedUnaryOperation +arctanh: _MaskedUnaryOperation + +add: _MaskedBinaryOperation +subtract: _MaskedBinaryOperation +multiply: _MaskedBinaryOperation +arctan2: _MaskedBinaryOperation +equal: _MaskedBinaryOperation +not_equal: _MaskedBinaryOperation +less_equal: _MaskedBinaryOperation +greater_equal: _MaskedBinaryOperation +less: _MaskedBinaryOperation +greater: _MaskedBinaryOperation +logical_and: _MaskedBinaryOperation +alltrue: _MaskedBinaryOperation +logical_or: _MaskedBinaryOperation +sometrue: Callable[..., Any] +logical_xor: _MaskedBinaryOperation +bitwise_and: _MaskedBinaryOperation +bitwise_or: _MaskedBinaryOperation +bitwise_xor: _MaskedBinaryOperation +hypot: _MaskedBinaryOperation +divide: _MaskedBinaryOperation +true_divide: _MaskedBinaryOperation +floor_divide: _MaskedBinaryOperation +remainder: _MaskedBinaryOperation +fmod: _MaskedBinaryOperation +mod: _MaskedBinaryOperation + +def make_mask_descr(ndtype): ... +def getmask(a): ... +get_mask = getmask + +def getmaskarray(arr): ... +def is_mask(m): ... +def make_mask(m, copy=..., shrink=..., dtype=...): ... +def make_mask_none(newshape, dtype=...): ... +def mask_or(m1, m2, copy=..., shrink=...): ... +def flatten_mask(mask): ... +def masked_where(condition, a, copy=...): ... +def masked_greater(x, value, copy=...): ... +def masked_greater_equal(x, value, copy=...): ... +def masked_less(x, value, copy=...): ... +def masked_less_equal(x, value, copy=...): ... +def masked_not_equal(x, value, copy=...): ... +def masked_equal(x, value, copy=...): ... +def masked_inside(x, v1, v2, copy=...): ... +def masked_outside(x, v1, v2, copy=...): ... +def masked_object(x, value, copy=..., shrink=...): ... +def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... +def masked_invalid(a, copy=...): ... + +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=...): ... + +masked_print_option: _MaskedPrintOption + +def flatten_structured_array(a): ... + +class MaskedIterator: + ma: Any + dataiter: Any + maskiter: Any + def __init__(self, ma): ... + def __iter__(self): ... + def __getitem__(self, indx): ... + def __setitem__(self, index, value): ... + def __next__(self): ... + +class MaskedArray(ndarray[_ShapeType, _DType_co]): + __array_priority__: Any + def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=...): ... + def view(self, dtype=..., type=..., fill_value=...): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + @property + def dtype(self): ... + @dtype.setter + def dtype(self, dtype): ... + @property + def shape(self): ... + @shape.setter + def shape(self, shape): ... + def __setmask__(self, mask, copy=...): ... + @property + def mask(self): ... + @mask.setter + def mask(self, value): ... + @property + def recordmask(self): ... + @recordmask.setter + def recordmask(self, mask): ... + def harden_mask(self): ... + def soften_mask(self): ... + @property + def hardmask(self): ... + def unshare_mask(self): ... + @property + def sharedmask(self): ... + def shrink_mask(self): ... + @property + def baseclass(self): ... + data: Any + @property + def flat(self): ... + @flat.setter + def flat(self, value): ... + @property + def fill_value(self): ... + @fill_value.setter + def fill_value(self, value=...): ... + get_fill_value: Any + set_fill_value: Any + def filled(self, fill_value=...): ... + def compressed(self): ... + def compress(self, condition, axis=..., out=...): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __ge__(self, other): ... + def __gt__(self, other): ... + def __le__(self, other): ... + def __lt__(self, other): ... + def __add__(self, other): ... + def __radd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __div__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __pow__(self, other): ... + def __rpow__(self, other): ... + def __iadd__(self, other): ... + def __isub__(self, other): ... + def __imul__(self, other): ... + def __idiv__(self, other): ... + def __ifloordiv__(self, other): ... + def __itruediv__(self, other): ... + def __ipow__(self, other): ... + def __float__(self): ... + def __int__(self): ... + @property # type: ignore[misc] + def imag(self): ... + get_imag: Any + @property # type: ignore[misc] + def real(self): ... + get_real: Any + def count(self, axis=..., keepdims=...): ... + def ravel(self, order=...): ... + def reshape(self, *s, **kwargs): ... + def resize(self, newshape, refcheck=..., order=...): ... + def put(self, indices, values, mode=...): ... + def ids(self): ... + def iscontiguous(self): ... + def all(self, axis=..., out=..., keepdims=...): ... + def any(self, axis=..., out=..., keepdims=...): ... + def nonzero(self): ... + def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + def dot(self, b, out=..., strict=...): ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + def cumsum(self, axis=..., dtype=..., out=...): ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + product: Any + def cumprod(self, axis=..., dtype=..., out=...): ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + def anom(self, axis=..., dtype=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def round(self, decimals=..., out=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... + def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... + def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... + def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... + def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... + # NOTE: deprecated + # def tostring(self, fill_value=..., order=...): ... + def max(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ... + def partition(self, *args, **kwargs): ... + def argpartition(self, *args, **kwargs): ... + def take(self, indices, axis=..., out=..., mode=...): ... + copy: Any + diagonal: Any + flatten: Any + repeat: Any + squeeze: Any + swapaxes: Any + T: Any + transpose: Any + def tolist(self, fill_value=...): ... + def tobytes(self, fill_value=..., order=...): ... + def tofile(self, fid, sep=..., format=...): ... + def toflex(self): ... + torecords: Any + def __reduce__(self): ... + def __deepcopy__(self, memo=...): ... + +class mvoid(MaskedArray[_ShapeType, _DType_co]): + def __new__( + self, + data, + mask=..., + dtype=..., + fill_value=..., + hardmask=..., + copy=..., + subok=..., + ): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def __iter__(self): ... + def __len__(self): ... + def filled(self, fill_value=...): ... + def tolist(self): ... + +def isMaskedArray(x): ... +isarray = isMaskedArray +isMA = isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[Any, dtype[float64]]): + def __new__(cls): ... + __class__: Any + def __array_finalize__(self, obj): ... + def __array_prepare__(self, obj, context=...): ... + def __array_wrap__(self, obj, context=...): ... + def __format__(self, format_spec): ... + def __reduce__(self): ... + def __iop__(self, other): ... + __iadd__: Any + __isub__: Any + __imul__: Any + __ifloordiv__: Any + __itruediv__: Any + __ipow__: Any + def copy(self, *args, **kwargs): ... + def __copy__(self): ... + def __deepcopy__(self, memo): ... + def __setattr__(self, attr, value): ... + +masked: MaskedConstant +masked_singleton: MaskedConstant +masked_array = MaskedArray + +def array( + data, + dtype=..., + copy=..., + order=..., + mask=..., + fill_value=..., + keep_mask=..., + hard_mask=..., + shrink=..., + subok=..., + ndmin=..., +): ... +def is_masked(x): ... + +class _extrema_operation(_MaskedUFunc): + compare: Any + fill_value_func: Any + def __init__(self, ufunc, compare, fill_value): ... + # NOTE: in practice `b` has a default value, but users should + # explicitly provide a value here as the default is deprecated + def __call__(self, a, b): ... + def reduce(self, target, axis=...): ... + def outer(self, a, b): ... + +def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ... +def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ... + +class _frommethod: + __name__: Any + __doc__: Any + reversed: Any + def __init__(self, methodname, reversed=...): ... + def getdoc(self): ... + def __call__(self, a, *args, **params): ... + +all: _frommethod +anomalies: _frommethod +anom: _frommethod +any: _frommethod +compress: _frommethod +cumprod: _frommethod +cumsum: _frommethod +copy: _frommethod +diagonal: _frommethod +harden_mask: _frommethod +ids: _frommethod +mean: _frommethod +nonzero: _frommethod +prod: _frommethod +product: _frommethod +ravel: _frommethod +repeat: _frommethod +soften_mask: _frommethod +std: _frommethod +sum: _frommethod +swapaxes: _frommethod +trace: _frommethod +var: _frommethod +count: _frommethod +argmin: _frommethod +argmax: _frommethod + +minimum: _extrema_operation +maximum: _extrema_operation + +def take(a, indices, axis=..., out=..., mode=...): ... +def power(a, b, third=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... +def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ... +def compressed(x): ... +def concatenate(arrays, axis=...): ... +def diag(v, k=...): ... +def left_shift(a, n): ... +def right_shift(a, n): ... +def put(a, indices, values, mode=...): ... +def putmask(a, mask, values): ... +def transpose(a, axes=...): ... +def reshape(a, new_shape, order=...): ... +def resize(x, new_shape): ... +def ndim(obj): ... +def shape(obj): ... +def size(obj, axis=...): ... +def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def where(condition, x=..., y=...): ... +def choose(indices, choices, out=..., mode=...): ... +def round(a, decimals=..., out=...): ... + +def inner(a, b): ... +innerproduct = inner + +def outer(a, b): ... +outerproduct = outer + +def correlate(a, v, mode=..., propagate_mask=...): ... +def convolve(a, v, mode=..., propagate_mask=...): ... +def allequal(a, b, fill_value=...): ... +def allclose(a, b, masked_equal=..., rtol=..., atol=...): ... +def asarray(a, dtype=..., order=...): ... +def asanyarray(a, dtype=...): ... +def fromflex(fxarray): ... + +class _convert2ma: + __doc__: Any + def __init__(self, funcname, params=...): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +arange: _convert2ma +empty: _convert2ma +empty_like: _convert2ma +frombuffer: _convert2ma +fromfunction: _convert2ma +identity: _convert2ma +ones: _convert2ma +zeros: _convert2ma + +def append(a, b, axis=...): ... +def dot(a, b, strict=..., out=...): ... +def mask_rowcols(a, axis=...): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/extras.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/extras.py new file mode 100644 index 00000000..8a6246c3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/extras.py @@ -0,0 +1,2133 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +__all__ = [ + 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack', + 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows', + 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d', + 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', + 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows', + 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate', + 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', + ] + +import itertools +import warnings + +from . import core as ma +from .core import ( + MaskedArray, MAError, add, array, asarray, concatenate, filled, count, + getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, + nomask, ones, sort, zeros, getdata, get_masked_subclass, dot + ) + +import numpy as np +from numpy import ndarray, array as nxarray +from numpy.core.multiarray import normalize_axis_index +from numpy.core.numeric import normalize_axis_tuple +from numpy.lib.function_base import _ureduce +from numpy.lib.index_tricks import AxisConcatenator + + +def issequence(seq): + """ + Is seq a sequence (ndarray, list or tuple)? + + """ + return isinstance(seq, (ndarray, tuple, list)) + + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.arange(9).reshape((3,3)) + >>> a = ma.array(a) + >>> a[1, 0] = ma.masked + >>> a[1, 2] = ma.masked + >>> a[2, 1] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Examples + -------- + >>> import numpy.ma as ma + >>> ma.masked_all((3, 3)) + masked_array( + data=[[--, --, --], + [--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float64) + + The `dtype` parameter defines the underlying data type. + + >>> a = ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Examples + -------- + >>> import numpy.ma as ma + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32) + >>> ma.masked_all_like(arr) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float32) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- +class _fromnxfunction: + """ + Defines a wrapper to adapt NumPy functions to masked arrays. + + + An instance of `_fromnxfunction` can be called with the same parameters + as the wrapped NumPy function. The docstring of `newfunc` is adapted from + the wrapped function as well, see `getdoc`. + + This class should not be used directly. Instead, one of its extensions that + provides support for a specific type of input should be used. + + Parameters + ---------- + funcname : str + The name of the function to be adapted. The function should be + in the NumPy namespace (i.e. ``np.funcname``). + + """ + + def __init__(self, funcname): + self.__name__ = funcname + self.__doc__ = self.getdoc() + + def getdoc(self): + """ + Retrieve the docstring and signature from the function. + + The ``__doc__`` attribute of the function is used as the docstring for + the new masked array version of the function. A note on application + of the function to the mask is appended. + + Parameters + ---------- + None + + """ + npfunc = getattr(np, self.__name__, None) + doc = getattr(npfunc, '__doc__', None) + if doc: + sig = self.__name__ + ma.get_object_signature(npfunc) + doc = ma.doc_note(doc, "The function is applied to both the _data " + "and the _mask, if any.") + return '\n\n'.join((sig, doc)) + return + + def __call__(self, *args, **params): + pass + + +class _fromnxfunction_single(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single array + argument followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + if isinstance(x, ndarray): + _d = func(x.__array__(), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + else: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_seq(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single sequence + of arrays followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + _d = func(tuple([np.asarray(a) for a in x]), *args, **params) + _m = func(tuple([getmaskarray(a) for a in x]), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_args(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. The first non-array-like input marks the beginning of the + arguments that are passed verbatim for both the data and mask calls. + Array arguments are processed independently and the results are + returned in a list. If only one array is found, the return value is + just the processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + arrays = [] + args = list(args) + while len(args) > 0 and issequence(args[0]): + arrays.append(args.pop(0)) + res = [] + for x in arrays: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + res.append(masked_array(_d, mask=_m)) + if len(arrays) == 1: + return res[0] + return res + + +class _fromnxfunction_allargs(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. Similar to `_fromnxfunction_args` except that all args + are converted to arrays even if they are not so already. This makes + it possible to process scalars as 1-D arrays. Only keyword arguments + are passed through verbatim for the data and mask calls. Arrays + arguments are processed independently and the results are returned + in a list. If only one arg is present, the return value is just the + processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + res = [] + for x in args: + _d = func(np.asarray(x), **params) + _m = func(getmaskarray(x), **params) + res.append(masked_array(_d, mask=_m)) + if len(args) == 1: + return res[0] + return res + + +atleast_1d = _fromnxfunction_allargs('atleast_1d') +atleast_2d = _fromnxfunction_allargs('atleast_2d') +atleast_3d = _fromnxfunction_allargs('atleast_3d') + +vstack = row_stack = _fromnxfunction_seq('vstack') +hstack = _fromnxfunction_seq('hstack') +column_stack = _fromnxfunction_seq('column_stack') +dstack = _fromnxfunction_seq('dstack') +stack = _fromnxfunction_seq('stack') + +hsplit = _fromnxfunction_single('hsplit') + +diagflat = _fromnxfunction_single('diagflat') + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result + # so we force the type to object, and build a list of dtypes. We'll + # just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.prod(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.prod(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +if apply_over_axes.__doc__ is not None: + apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> a = np.ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = np.ma.masked + >>> a[:,1,:] = np.ma.masked + >>> a + masked_array( + data=[[[0, --, 2, 3], + [--, --, --, --], + [8, 9, 10, 11]], + [[12, --, 14, 15], + [--, --, --, --], + [20, 21, 22, 23]]], + mask=[[[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]], + [[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]]], + fill_value=999999) + >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + """ + + +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : int, optional + Axis along which to average `a`. If None, averaging is done over + the flattened array. + weights : array_like, optional + The importance that each element has in the computation of the average. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If ``weights=None``, then all data in `a` are assumed to have a + weight equal to one. The 1-D calculation is:: + + avg = sum(a * weights) / sum(weights) + + The only constraint on `weights` is that `sum(weights)` must not be 0. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type and floats smaller than `float64`, or the + input data-type, otherwise. If returned, `sum_of_weights` is always + `float64`. + + Examples + -------- + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> x + masked_array( + data=[[0., 1.], + [2., 3.], + [4., 5.]], + mask=False, + fill_value=1e+20) + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> avg + masked_array(data=[2.6666666666666665, 3.6666666666666665], + mask=[False, False], + fill_value=1e+20) + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.ma.average(x, axis=1, keepdims=True) + masked_array( + data=[[0.5], + [2.5], + [4.5]], + mask=False, + fill_value=1e+20) + """ + a = asarray(a) + m = getmask(a) + + # inspired by 'average' in numpy/lib/function_base.py + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + scl = avg.dtype.type(a.count(axis)) + else: + wgt = asarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool_)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ.") + if wgt.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis.") + + # setup wgt to broadcast along axis + wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True) + wgt = wgt.swapaxes(-1, axis) + + if m is not nomask: + wgt = wgt*(~a.mask) + wgt.mask |= a.mask + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + avg = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + .. versionadded:: 1.10.0 + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.median(x) + 2.5 + >>> np.ma.median(x, axis=-1, overwrite_input=True) + masked_array(data=[2.0, 5.0], + mask=[False, False], + fill_value=1e+20) + + """ + if not hasattr(a, 'mask'): + m = np.median(getdata(a, subok=True), axis=axis, + out=out, overwrite_input=overwrite_input, + keepdims=keepdims) + if isinstance(m, np.ndarray) and 1 <= m.ndim: + return masked_array(m, copy=False) + else: + return m + + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # when an unmasked NaN is present return it, so we need to sort the NaN + # values behind the mask + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.inf + else: + fill_value = None + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort(fill_value=fill_value) + else: + a.sort(axis=axis, fill_value=fill_value) + asorted = a + else: + asorted = sort(a, axis=axis, fill_value=fill_value) + + if axis is None: + axis = 0 + else: + axis = normalize_axis_index(axis, asorted.ndim) + + if asorted.shape[axis] == 0: + # for empty axis integer indices fail so use slicing to get same result + # as median (which is mean of empty slice = nan) + indexer = [slice(None)] * asorted.ndim + indexer[axis] = slice(0, 0) + indexer = tuple(indexer) + return np.ma.mean(asorted[indexer], axis=axis, out=out) + + if asorted.ndim == 1: + idx, odd = divmod(count(asorted), 2) + mid = asorted[idx + odd - 1:idx + 1] + if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: + # avoid inf / x = masked + s = mid.sum(out=out) + if not odd: + s = np.true_divide(s, 2., casting='safe', out=out) + s = np.lib.utils._median_nancheck(asorted, s, axis) + else: + s = mid.mean(out=out) + + # if result is masked either the input contained enough + # minimum_fill_value so that it would be the median or all values + # masked + if np.ma.is_masked(s) and not np.all(asorted.mask): + return np.ma.minimum_fill_value(asorted) + return s + + counts = count(asorted, axis=axis, keepdims=True) + h = counts // 2 + + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + l = np.where(odd, h, h-1) + + lh = np.concatenate([l,h], axis=axis) + + # get low and high median + low_high = np.take_along_axis(asorted, lh, axis=axis) + + def replace_masked(s): + # Replace masked entries with minimum_full_value unless it all values + # are masked. This is required as the sort order of values equal or + # larger than the fill value is undefined and a valid value placed + # elsewhere, e.g. [4, --, inf]. + if np.ma.is_masked(s): + rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask + s.data[rep] = np.ma.minimum_fill_value(asorted) + s.mask[rep] = False + + replace_masked(low_high) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum(low_high, axis=axis, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + + s = np.lib.utils._median_nancheck(asorted, s, axis) + else: + s = np.ma.mean(low_high, axis=axis, out=out) + + return s + + +def compress_nd(x, axis=None): + """Suppress slices from multiple dimensions which contain masked values. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with `mask` + set to `nomask`. + axis : tuple of ints or int, optional + Which dimensions to suppress slices from can be configured with this + parameter. + - If axis is a tuple of ints, those are the axes to suppress slices from. + - If axis is an int, then that is the only axis to suppress slices from. + - If axis is None, all axis are selected. + + Returns + ------- + compress_array : ndarray + The compressed array. + """ + x = asarray(x) + m = getmask(x) + # Set axis to tuple of ints + if axis is None: + axis = tuple(range(x.ndim)) + else: + axis = normalize_axis_tuple(axis, x.ndim) + + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Filter elements through boolean indexing + data = x._data + for ax in axis: + axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) + data = data[(slice(None),)*ax + (~m.any(axis=axes),)] + return data + + +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array( + data=[[--, 1, 2], + [--, 4, 5], + [6, 7, 8]], + mask=[[ True, False, False], + [ True, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + if asarray(x).ndim != 2: + raise NotImplementedError("compress_rowcols works for 2D arrays only.") + return compress_nd(x, axis=axis) + + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see + `compress_rowcols` for details. + + See Also + -------- + compress_rowcols + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_rows works for 2D arrays only.") + return compress_rowcols(a, 0) + + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see + `compress_rowcols` for details. + + See Also + -------- + compress_rowcols + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_cols works for 2D arrays only.") + return compress_rowcols(a, 1) + + +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked), the result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> ma.mask_rowcols(a) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +def mask_rows(a, axis=np._NoValue): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + + >>> ma.mask_rows(a) + masked_array( + data=[[0, 0, 0], + [--, --, --], + [0, 0, 0]], + mask=[[False, False, False], + [ True, True, True], + [False, False, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 0) + + +def mask_cols(a, axis=np._NoValue): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> ma.mask_cols(a) + masked_array( + data=[[0, --, 0], + [0, --, 0], + [0, --, 0]], + mask=[[False, True, False], + [False, True, False], + [False, True, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 1) + + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = [1, 2, 1000, 2, 3] + >>> mask = [0, 0, 1, 0, 0] + >>> masked_a = ma.masked_array(a, mask) + >>> masked_a + masked_array(data=[1, 2, --, 2, 3], + mask=[False, False, True, False, False], + fill_value=999999) + >>> ma.unique(masked_a) + masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999) + >>> ma.unique(masked_a, return_index=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2])) + >>> ma.unique(masked_a, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 3, 1, 2])) + >>> ma.unique(masked_a, return_index=True, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2])) + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> np.ma.intersect1d(x, y) + masked_array(data=[1, 3, --], + mask=[False, False, True], + fill_value=999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. See `numpy.in1d` for more details. + + We recommend using :func:`isin` instead of `in1d` for new code. + + See Also + -------- + isin : Version of this function that preserves the shape of ar1. + numpy.in1d : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over + `element` only. + + The output is always a masked array of the same shape as `element`. + See `numpy.isin` for more details. + + See Also + -------- + in1d : Flattened version of this function. + numpy.isin : Equivalent function for ndarrays. + + Notes + ----- + .. versionadded:: 1.13.0 + + """ + element = ma.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See Also + -------- + numpy.union1d : Equivalent function for ndarrays. + + """ + return unique(ma.concatenate((ar1, ar2), axis=None)) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.setdiff1d(x, [1, 2]) + masked_array(data=[3, --], + mask=[False, True], + fill_value=999999) + + """ + if assume_unique: + ar1 = ma.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + + +############################################################################### +# Covariance # +############################################################################### + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data.") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + xnotmask = np.logical_not(xmask).astype(int) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data.") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + xmask = x._mask = y._mask = ymask = common_mask + x._sharedmask = False + y._sharedmask = False + x = ma.concatenate((x, y), axis) + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + .. versionadded:: 1.5 + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof + result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof + result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, + ddof=np._NoValue): + """ + Return Pearson product-moment correlation coefficients. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + Notes + ----- + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + """ + msg = 'bias and ddof have no effect and are deprecated' + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn(msg, DeprecationWarning, stacklevel=2) + # Get the data + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + # Compute the covariance matrix + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) * 1. + c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) * 1. + c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + # Check whether we have a scalar + try: + diag = ma.diagonal(c) + except ValueError: + return 1 + # + if xnotmask.all(): + _denom = ma.sqrt(ma.multiply.outer(diag, diag)) + else: + _denom = diagflat(diag) + _denom._sharedmask = False # We know return is always a copy + n = x.shape[1 - rowvar] + if rowvar: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + else: + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols( + vstack((x[:, i], x[:, j]))).var(axis=1) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + return c / _denom + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + concatenate = staticmethod(concatenate) + + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super().makemat(arr.data, copy=False) + return array(data, mask=arr.mask) + + def __getitem__(self, key): + # matrix builder syntax, like 'a, b; c, d' + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + + return super().__getitem__(key) + + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `lib.index_tricks.RClass`. + + See Also + -------- + lib.index_tricks.RClass + + Examples + -------- + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + masked_array(data=[1, 2, 3, ..., 4, 5, 6], + mask=False, + fill_value=999999) + + """ + def __init__(self): + MAxisConcatenator.__init__(self, 0) + +mr_ = mr_class() + + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def ndenumerate(a, compressed=True): + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values, + skipping elements that are masked. With `compressed=False`, + `ma.masked` is yielded as the value of masked elements. This + behavior differs from that of `numpy.ndenumerate`, which yields the + value of the underlying data array. + + Notes + ----- + .. versionadded:: 1.23.0 + + Parameters + ---------- + a : array_like + An array with (possibly) masked elements. + compressed : bool, optional + If True (default), masked elements are skipped. + + See Also + -------- + numpy.ndenumerate : Equivalent function ignoring any mask. + + Examples + -------- + >>> a = np.ma.arange(9).reshape((3, 3)) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> for index, x in np.ma.ndenumerate(a): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 1) 4 + (2, 0) 6 + (2, 2) 8 + + >>> for index, x in np.ma.ndenumerate(a, compressed=False): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 0) -- + (1, 1) 4 + (1, 2) -- + (2, 0) 6 + (2, 1) -- + (2, 2) 8 + """ + for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat): + if not mask: + yield it + elif not compressed: + yield it[0], masked + + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + a : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_edges(a) + array([0, 9]) + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print(np.ma.flatnotmasked_edges(a)) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous + clump_masked, clump_unmasked + + Examples + -------- + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.notmasked_edges(am) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), + tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array. + + Parameters + ---------- + a : array_like + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of `slice` objects (start index, end index). + + .. versionchanged:: 1.15.0 + Now returns an empty list instead of None for a fully masked array + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_contiguous(a) + [slice(0, 10, None)] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> np.ma.flatnotmasked_contiguous(a) + [] + + """ + m = getmask(a) + if m is nomask: + return [slice(0, a.size)] + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result + + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array, and this + is the same as `flatnotmasked_contiguous`. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + If the input is 2d and axis is specified, the result is a list of lists. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> a = np.arange(12).reshape((3, 4)) + >>> mask = np.zeros_like(a) + >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 + >>> ma = np.ma.array(a, mask=mask) + >>> ma + masked_array( + data=[[0, --, 2, 3], + [--, --, --, 7], + [8, --, --, 11]], + mask=[[False, True, False, False], + [ True, True, True, False], + [False, True, True, False]], + fill_value=999999) + >>> np.array(ma[~ma.mask]) + array([ 0, 2, 3, 7, 8, 11]) + + >>> np.ma.notmasked_contiguous(ma) + [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] + + >>> np.ma.notmasked_contiguous(ma, axis=0) + [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] + + >>> np.ma.notmasked_contiguous(ma, axis=1) + [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] + + """ + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to at most 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[tuple(idx)])) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_masked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + return _ezclump(~mask) + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + Notes + ----- + .. versionadded:: 1.4.0 + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + return _ezclump(mask) + + +############################################################################### +# Polynomial fit # +############################################################################### + + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + not_m = ~m + if w is not None: + w = w[not_m] + return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) + else: + return np.polyfit(x, y, deg, rcond, full, w, cov) + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/extras.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/extras.pyi new file mode 100644 index 00000000..56228b92 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/extras.pyi @@ -0,0 +1,85 @@ +from typing import Any +from numpy.lib.index_tricks import AxisConcatenator + +from numpy.ma.core import ( + dot as dot, + mask_rowcols as mask_rowcols, +) + +__all__: list[str] + +def count_masked(arr, axis=...): ... +def masked_all(shape, dtype = ...): ... +def masked_all_like(arr): ... + +class _fromnxfunction: + __name__: Any + __doc__: Any + def __init__(self, funcname): ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +class _fromnxfunction_single(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_seq(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_allargs(_fromnxfunction): + def __call__(self, *args, **params): ... + +atleast_1d: _fromnxfunction_allargs +atleast_2d: _fromnxfunction_allargs +atleast_3d: _fromnxfunction_allargs + +vstack: _fromnxfunction_seq +row_stack: _fromnxfunction_seq +hstack: _fromnxfunction_seq +column_stack: _fromnxfunction_seq +dstack: _fromnxfunction_seq +stack: _fromnxfunction_seq + +hsplit: _fromnxfunction_single +diagflat: _fromnxfunction_single + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def average(a, axis=..., weights=..., returned=..., keepdims=...): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def compress_nd(x, axis=...): ... +def compress_rowcols(x, axis=...): ... +def compress_rows(a): ... +def compress_cols(a): ... +def mask_rows(a, axis = ...): ... +def mask_cols(a, axis = ...): ... +def ediff1d(arr, to_end=..., to_begin=...): ... +def unique(ar1, return_index=..., return_inverse=...): ... +def intersect1d(ar1, ar2, assume_unique=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... +def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... + +class MAxisConcatenator(AxisConcatenator): + concatenate: Any + @classmethod + def makemat(cls, arr): ... + def __getitem__(self, key): ... + +class mr_class(MAxisConcatenator): + def __init__(self): ... + +mr_: mr_class + +def ndenumerate(a, compressed=...): ... +def flatnotmasked_edges(a): ... +def notmasked_edges(a, axis=...): ... +def flatnotmasked_contiguous(a): ... +def notmasked_contiguous(a, axis=...): ... +def clump_unmasked(a): ... +def clump_masked(a): ... +def vander(x, n=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/mrecords.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/mrecords.py new file mode 100644 index 00000000..1e8103bc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/mrecords.py @@ -0,0 +1,783 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# We should make sure that no field is called '_mask','mask','_fieldmask', +# or whatever restricted keywords. An idea would be to no bother in the +# first place, and then rename the invalid fields with a trailing +# underscore. Maybe we could just overload the parser function ? + +from numpy.ma import ( + MAError, MaskedArray, masked, nomask, masked_array, getdata, + getmaskarray, filled +) +import numpy.ma as ma +import warnings + +import numpy as np +from numpy import ( + bool_, dtype, ndarray, recarray, array as narray +) +from numpy.core.records import ( + fromarrays as recfromarrays, fromrecords as recfromrecords +) + +_byteorderconv = np.core.records._byteorderconv + + +_check_fill_value = ma.core._check_fill_value + + +__all__ = [ + 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', + 'fromtextfile', 'addfield', +] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + + +def _checknames(descr, names=None): + """ + Checks that field names ``descr`` are not reserved keywords. + + If this is the case, a default 'f%i' is substituted. If the argument + `names` is not None, updates the field names to valid names. + + """ + ndescr = len(descr) + default_names = ['f%i' % i for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError(f'illegal input names {names!r}') + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(MaskedArray): + """ + + Attributes + ---------- + _data : recarray + Underlying data, as a record array. + _mask : boolean array + Mask of the records. A record is masked when all its fields are + masked. + _fieldmask : boolean recarray + Record array of booleans, setting the mask of each individual field + of each record. + _fill_value : record + Filling values for each field. + + """ + + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + + self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + + mdtype = ma.make_mask_descr(self.dtype) + if mask is nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = "Mask and data not compatible: data size is %i, " + \ + "mask size is %i." + raise MAError(msg % (nd, nm)) + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', nomask) + _dtype = ndarray.__getattribute__(self, 'dtype') + if objmask is nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = narray([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == ndarray: + _dict['_baseclass'] = recarray + return + + @property + def _data(self): + """ + Returns the data as a recarray. + + """ + return ndarray.view(self, recarray) + + @property + def _fieldmask(self): + """ + Alias to mask. + + """ + return self._mask + + def __len__(self): + """ + Returns the length + + """ + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: + # attr must be a fieldname + pass + fielddict = ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + # So far, so good + _localdict = ndarray.__getattribute__(self, '__dict__') + _data = ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.names is not None: + raise NotImplementedError("MaskedRecords is currently limited to" + "simple records.") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + tp_len = len(_mask.dtype) + hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() + if (obj.shape or hasmasked): + obj = obj.view(MaskedArray) + obj._baseclass = ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + def __setattr__(self, attr, val): + """ + Sets the attribute attr to the value val. + + """ + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except Exception: + # Not a generic attribute: exit if it's not a valid field + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + raise + else: + # Get the list of names + fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + + if val is masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = filled(val) + mval = getmaskarray(val) + obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + def __getitem__(self, indx): + """ + Returns all the fields sharing the same fieldname base. + + The fieldname base is either `_data` or `_mask`. + + """ + _localdict = self.__dict__ + _mask = ndarray.__getattribute__(self, '_mask') + _data = ndarray.view(self, _localdict['_baseclass']) + # We want a field + if isinstance(indx, str): + # Make sure _sharedmask is True to propagate back to _fieldmask + # Don't use _set_mask, there are some copies being made that + # break propagation Don't force the mask to nomask, that wreaks + # easy masking + obj = _data[indx].view(MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return masked + return obj + # We want some elements. + # First, the data. + obj = np.array(_data[indx], copy=False).view(mrecarray) + obj._mask = np.array(_mask[indx], copy=False).view(recarray) + return obj + + def __setitem__(self, indx, value): + """ + Sets the given record to value. + + """ + MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, str): + self._mask[indx] = ma.getmaskarray(value) + + def __str__(self): + """ + Calculates the string representation. + + """ + if self.size > 1: + mstr = [f"({','.join([str(i) for i in s])})" + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return f"[{', '.join(mstr)}]" + else: + mstr = [f"{','.join([str(i) for i in s])}" + for s in zip([getattr(self, f) for f in self.dtype.names])] + return f"({', '.join(mstr)})" + + def __repr__(self): + """ + Calculates the repr representation. + + """ + _names = self.dtype.names + fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,) + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) + + def view(self, dtype=None, type=None): + """ + Returns a view of the mrecarray. + + """ + # OK, basic copy-paste from MaskedArray.view. + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + # Here again. + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # of subclasses (eg, TimeSeriesRecords), so we'll force a type + # set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = ndarray.view(self, dtype) + output._fill_value = None + else: + output = ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', nomask) is not nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + """ + Forces the mask to hard. + + """ + self._hardmask = True + + def soften_mask(self): + """ + Forces the mask to soft + + """ + self._hardmask = False + + def copy(self): + """ + Returns a copy of the masked record. + + """ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """ + Return the data portion of the array as a list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to fill_value. If fill_value is None, + the corresponding entries in the output list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = narray(self.filled().tolist(), dtype=object) + mask = narray(self._mask.tolist()) + result[mask] = None + return result.tolist() + + def __getstate__(self): + """Return the internal state of the masked array. + + This is for pickling. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + + def __setstate__(self, state): + """ + Restore the internal state of the masked array. + + This is for pickling. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """ + Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """ + Build a new MaskedArray from the information stored in a pickle. + + """ + _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = ndarray.__new__(ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + +mrecarray = MaskedRecords + + +############################################################################### +# Constructors # +############################################################################### + + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """ + Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + datalist = [getdata(x) for x in arraylist] + masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] + _array = recfromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=nomask): + """ + Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records. + if isinstance(reclist, ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, MaskedArray): + reclist = reclist.filled().view(ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not nomask: + mask = np.array(mask, copy=False) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif mask.ndim == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + + +def _guessvartypes(arr): + """ + Tries to guess the dtypes of the str_ ndarray `arr`. + + Guesses by testing element-wise conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test + is performed on the first line. An exception is raised if the file is + 3D or more. + + """ + vartypes = [] + arr = np.asarray(arr) + if arr.ndim == 2: + arr = arr[0] + elif arr.ndim > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop. + for f in arr: + try: + int(f) + except (ValueError, TypeError): + try: + float(f) + except (ValueError, TypeError): + try: + complex(f) + except (ValueError, TypeError): + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + + +def openfile(fname): + """ + Opens the file handle of file `fname`. + + """ + # A file handle + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except FileNotFoundError as e: + raise FileNotFoundError(f"No such file: '{fname}'") from e + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', + varnames=None, vartypes=None, + *, delimitor=np._NoValue): # backwards compatibility + """ + Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + fname : {file name/handle} + Handle of an opened file. + delimiter : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + if delimitor is not np._NoValue: + if delimiter is not None: + raise TypeError("fromtextfile() got multiple values for argument " + "'delimiter'") + # NumPy 1.22.0, 2021-09-23 + warnings.warn("The 'delimitor' keyword argument of " + "numpy.ma.mrecords.fromtextfile() is deprecated " + "since NumPy 1.22.0, use 'delimiter' instead.", + DeprecationWarning, stacklevel=2) + delimiter = delimitor + + # Try to open the file. + ftext = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = ftext.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimiter) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data. + _variables = masked_array([line.strip().split(delimiter) for line in ftext + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + ftext.close() + + # Try to guess the dtype. + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = "Attempting to %i dtypes for %i fields!" + msg += " Reverting to default." + warnings.warn(msg % (len(vartypes), nfields), stacklevel=2) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor. + mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + + +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array + + Uses `newfield` as data and `newfieldname` as name. If `newfieldname` + is None, the new field name is set to 'fi', where `i` is the number of + existing fields. + + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = 'f%i' % len(_data.dtype) + newfield = ma.array(newfield) + # Get the new data. + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = recarray(_data.shape, newdtype) + # Add the existing field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask + # Create a new empty recarray + newmdtype = np.dtype([(n, bool_) for n in newdtype.names]) + newmask = recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/mrecords.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/mrecords.pyi new file mode 100644 index 00000000..264807e0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/mrecords.pyi @@ -0,0 +1,90 @@ +from typing import Any, TypeVar + +from numpy import dtype +from numpy.ma import MaskedArray + +__all__: list[str] + +# TODO: Set the `bound` to something more suitable once we +# have proper shape support +_ShapeType = TypeVar("_ShapeType", bound=Any) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) + +class MaskedRecords(MaskedArray[_ShapeType, _DType_co]): + def __new__( + cls, + shape, + dtype=..., + buf=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + byteorder=..., + aligned=..., + mask=..., + hard_mask=..., + fill_value=..., + keep_mask=..., + copy=..., + **options, + ): ... + _mask: Any + _fill_value: Any + @property + def _data(self): ... + @property + def _fieldmask(self): ... + def __array_finalize__(self, obj): ... + def __len__(self): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def view(self, dtype=..., type=...): ... + def harden_mask(self): ... + def soften_mask(self): ... + def copy(self): ... + def tolist(self, fill_value=...): ... + def __reduce__(self): ... + +mrecarray = MaskedRecords + +def fromarrays( + arraylist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., +): ... + +def fromrecords( + reclist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., + mask=..., +): ... + +def fromtextfile( + fname, + delimiter=..., + commentchar=..., + missingchar=..., + varnames=..., + vartypes=..., + # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 + # delimitor=..., +): ... + +def addfield(mrecord, newfield, newfieldname=...): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/setup.py new file mode 100644 index 00000000..018d38cd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/setup.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('ma', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_files('*.pyi') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_core.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_core.py new file mode 100644 index 00000000..08ddc46c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_core.py @@ -0,0 +1,5687 @@ +# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +__author__ = "Pierre GF Gerard-Marchant" + +import sys +import warnings +import copy +import operator +import itertools +import textwrap +import pytest + +from functools import reduce + + +import numpy as np +import numpy.ma.core +import numpy.core.fromnumeric as fromnumeric +import numpy.core.umath as umath +from numpy.testing import ( + assert_raises, assert_warns, suppress_warnings, IS_WASM + ) +from numpy.testing._private.utils import requires_memory +from numpy import ndarray +from numpy.compat import asbytes +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal, + assert_equal_records, fail_if_equal, assert_not_equal, + assert_mask_equal + ) +from numpy.ma.core import ( + MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, + allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, + arcsin, arctan, argsort, array, asarray, choose, concatenate, + conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note, + empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, + flatten_structured_array, fromflex, getmask, getmaskarray, greater, + greater_equal, identity, inner, isMaskedArray, less, less_equal, log, + log10, make_mask, make_mask_descr, mask_or, masked, masked_array, + masked_equal, masked_greater, masked_greater_equal, masked_inside, + masked_less, masked_less_equal, masked_not_equal, masked_outside, + masked_print_option, masked_values, masked_where, max, maximum, + maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, + mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put, + putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, + sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like, + ) +from numpy.compat import pickle + +pi = np.pi + + +suppress_copy_mask_on_assignment = suppress_warnings() +suppress_copy_mask_on_assignment.filter( + numpy.ma.core.MaskedArrayFutureWarning, + "setting an item on a masked array which has a shared mask will not copy") + + +# For parametrized numeric testing +num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] +num_ids = [dt_.char for dt_ in num_dts] + + +class TestMaskedArray: + # Base test class for MaskedArrays. + + def setup_method(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + assert_(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y:x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # Concatenation along an axis + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + x = array([1, 2, 3], mask=None) + assert_equal(x._mask, [False, False, False]) + + def test_masked_singleton_array_creation_warns(self): + # The first works, but should not (ideally), there may be no way + # to solve this, however, as long as `np.ma.masked` is an ndarray. + np.array(np.ma.masked) + with pytest.warns(UserWarning): + # Tries to create a float array, using `float(np.ma.masked)`. + # We may want to define this is invalid behaviour in the future! + # (requiring np.ma.masked to be a known NumPy scalar probably + # with a DType.) + np.array([3., np.ma.masked]) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creating a masked array from a list of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_(data.mask is nomask) + + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") + assert_array_equal(res.mask, [[True, False], [False, False]]) + + # The above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool(): + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + + def test_creation_from_ndarray_with_padding(self): + x = np.array([('A', 0)], dtype={'names':['f0','f1'], + 'formats':['S4','i8'], + 'offsets':[0,8]}) + array(x) # used to fail due to 'V' padding field in x.dtype.descr + + def test_unknown_keyword_parameter(self): + with pytest.raises(TypeError, match="unexpected keyword argument"): + MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. + + def test_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m) + assert_(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m, order='C') + assert_(new_m.flags.c_contiguous) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + assert_(str(masked) == '--') + assert_(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + assert_(a[0] is x) + + import datetime + dt = datetime.datetime.now() + a[0] = dt + assert_(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_setitem_no_warning(self): + # Setitem shouldn't warn, because the assignment might be masked + # and warning for a masked assignment is weird (see gh-23000) + # (When the value is masked, otherwise a warning would be acceptable + # but is not given currently.) + x = np.ma.arange(60).reshape((6, 10)) + index = (slice(1, 5, 2), [7, 5]) + value = np.ma.masked_all((2, 2)) + value._data[...] = np.inf # not a valid integer... + x[index] = value + # The masked scalar is special cased, but test anyway (it's NaN): + x[...] = np.ma.masked + # Finally, a large value that cannot be cast to the float32 `x` + x = np.ma.arange(3., dtype=np.float32) + value = np.ma.array([2e234, 1, 1], mask=[True, False, False]) + x[...] = value + x[[0, 1, 2]] = value + + @suppress_copy_mask_on_assignment + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert_(allequal(x1, y1.data)) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + # Default for masked array is not to copy; see gh-10318. + assert_(y1a._data.__array_interface__ == + y1._data.__array_interface__) + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + + def test_copy_on_python_builtins(self): + # Tests copy works on python builtins (issue#8019) + assert_(isMaskedArray(np.ma.copy([1,2,3]))) + assert_(isMaskedArray(np.ma.copy((1,2,3)))) + + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_format(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(format(a), "[0 -- 2]") + assert_equal(format(masked), "--") + assert_equal(format(masked, ""), "--") + + # Postponed from PR #15410, perhaps address in the future. + # assert_equal(format(masked, " >5"), " --") + # assert_equal(format(masked, " <5"), "-- ") + + # Expect a FutureWarning for using format_spec with MaskedElement + with assert_warns(FutureWarning): + with_format_string = format(masked, " >5") + assert_equal(with_format_string, "--") + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + + # arrays with a continuation + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') + ) + + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1,1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent('''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value=999999, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = 'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), '--') + + def test_pickling(self): + # Tests pickling + for dtype in (int, float, str, object): + a = arange(10).astype(dtype) + a.fill_value = 999 + + masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked + True, # Fully masked + False) # Fully unmasked + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for mask in masks: + a.mask = mask + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + if dtype in (object, int): + assert_equal(a_pickled.fill_value, 999) + else: + assert_equal(a_pickled.fill_value, dtype(999)) + assert_array_equal(a_pickled.mask, mask) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.recarray)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10) + a.shape = (-1, 2) + b = a.T + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test = pickle.loads(pickle.dumps(b, protocol=proto)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + + with suppress_warnings() as sup: + sup.filter(UserWarning, 'Warning: converting a masked element') + assert_(np.isnan(float(array([1], mask=[1])))) + + a = array([1, 2, 3], mask=[1, 0, 0]) + assert_raises(TypeError, lambda: float(a)) + assert_equal(float(a[-1]), 3.) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + assert_raises(MAError, lambda:int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + @suppress_copy_mask_on_assignment + def test_oddfeatures_3(self): + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_with_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_with_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_with_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_with_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + + def test_filled_with_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_optinfo_forward_propagation(self): + a = array([1,2,2,4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask = (False, [[True, False, True], + [False, False, True]], + False), + dtype = "int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + assert_(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) + assert_equal(f[1], 4) + + # exotic dtype + A = masked_array(data=[([0,1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + # also check if there are object datatypes (see gh-7493) + mx = array([(1,), (2,)], dtype=[('a', 'O')]) + assert_equal(str(mx[0]), "(1,)") + + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data = [([1, 2, 3],)], + mask = [([False, True, False],)], + fill_value = ([999999, 999999, 999999],), + dtype = [('a', ' 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.prod(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_ufunc_nomask(self): + # check the case ufuncs should set the mask to false + m = np.ma.array([1]) + # check we don't get array([False], dtype=bool) + assert_equal(np.true_divide(m, 5).mask.shape, ()) + + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] == a) + assert_equal(test.data, [[True, False], [False, False]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] != a) + assert_equal(test.data, [[False, True], [True, True]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_eq_ne_structured_with_non_masked(self): + a = array([(1, 1), (2, 2), (3, 4)], + mask=[(0, 1), (0, 0), (1, 1)], dtype='i4,i4') + eq = a == a.data + ne = a.data != a + # Test the obvious. + assert_(np.all(eq)) + assert_(not np.any(ne)) + # Expect the mask set only for items with all fields masked. + expected_mask = a.mask == np.ones((), a.mask.dtype) + assert_array_equal(eq.mask, expected_mask) + assert_array_equal(ne.mask, expected_mask) + # The masked element will indicated not equal, because the + # masks did not match. + assert_equal(eq.data, [True, True, False]) + assert_array_equal(eq.data, ~ne.data) + + def test_eq_ne_structured_extra(self): + # ensure simple examples are symmetric and make sense. + # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 + dt = np.dtype('i4,i4') + for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), + mvoid((1, 2), mask=(0, 1), dtype=dt), + mvoid((1, 2), mask=(1, 0), dtype=dt), + mvoid((1, 2), mask=(1, 1), dtype=dt)): + ma1 = m1.view(MaskedArray) + r1 = ma1.view('2i4') + for m2 in (np.array((1, 1), dtype=dt), + mvoid((1, 1), dtype=dt), + mvoid((1, 0), mask=(0, 1), dtype=dt), + mvoid((3, 2), mask=(0, 1), dtype=dt)): + ma2 = m2.view(MaskedArray) + r2 = ma2.view('2i4') + eq_expected = (r1 == r2).all() + assert_equal(m1 == m2, eq_expected) + assert_equal(m2 == m1, eq_expected) + assert_equal(ma1 == m2, eq_expected) + assert_equal(m1 == ma2, eq_expected) + assert_equal(ma1 == ma2, eq_expected) + # Also check it is the same if we do it element by element. + el_by_el = [m1[name] == m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) + ne_expected = (r1 != r2).any() + assert_equal(m1 != m2, ne_expected) + assert_equal(m2 != m1, ne_expected) + assert_equal(ma1 != m2, ne_expected) + assert_equal(m1 != ma2, ne_expected) + assert_equal(ma1 != ma2, ne_expected) + el_by_el = [m1[name] != m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_eq_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_ne_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_eq_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize("op", [operator.eq, operator.lt]) + def test_eq_broadcast_with_unmasked(self, op): + a = array([0, 1], mask=[0, 1]) + b = np.arange(10).reshape(5, 2) + result = op(a, b) + assert_(result.mask.shape == b.shape) + assert_equal(result.mask, np.zeros(b.shape, bool) | a.mask) + + @pytest.mark.parametrize("op", [operator.eq, operator.gt]) + def test_comp_no_mask_not_broadcast(self, op): + # Regression test for failing doctest in MaskedArray.nonzero + # after gh-24556. + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = op(a, 3) + assert_(not result.mask.shape) + assert_(result.mask is nomask) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_ne_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_comparisons_for_numeric(self, op, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = op(a, a) + assert_equal(test.data, op(a._data, a._data)) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = op(a, a[0]) + assert_equal(test.data, op(a._data, a._data[0])) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = op(a, b) + assert_equal(test.data, op(a._data, b._data)) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = op(a[0], b) + assert_equal(test.data, op(a._data[0], b._data)) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = op(b, a[0]) + assert_equal(test.data, op(b._data, a._data[0])) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + @pytest.mark.parametrize('fill', [None, "N/A"]) + def test_comparisons_strings(self, op, fill): + # See gh-21770, mask propagation is broken for strings (and some other + # cases) so we explicitly test strings here. + # In principle only == and != may need special handling... + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + def test_eq_with_None(self): + # Really, comparisons with None should not be done, but check them + # anyway. Note that pep8 will flag these tests. + # Deprecation is in place for arrays, and when it happens this + # test will fail (and have to be changed accordingly). + + # With partial mask + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Comparison to `None`") + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) + assert_equal(a.data == None, [True, False]) + assert_equal(a != None, array([False, True], mask=[0, 1])) + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) + assert_equal(a != None, [False, True]) + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) + assert_equal(a != None, array([True, False], mask=True)) + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) + + def test_eq_with_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + b = array(1, mask=True) + assert_equal(b == 0, masked) + assert_equal(b == 1, masked) + assert_equal(b != 0, masked) + assert_equal(b != 1, masked) + + def test_eq_different_dimensions(self): + m1 = array([1, 1], mask=[0, 1]) + # test comparison with both masked and regular arrays. + for m2 in (array([[0, 1], [1, 2]]), + np.array([[0, 1], [1, 2]])): + test = (m1 == m2) + assert_equal(test.data, [[False, False], + [True, False]]) + assert_equal(test.mask, [[False, True], + [False, True]]) + + def test_numpyarithmetic(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +class TestMaskedArrayAttributes: + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + assert_(xh._hardmask) + assert_(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + + def test_assign_dtype(self): + # check that the mask's dtype is updated when dtype is changed + a = np.zeros(4, dtype='f4,i4') + + m = np.ma.array(a) + m.dtype = np.dtype('f4') + repr(m) # raises? + assert_equal(m.dtype, np.dtype('f4')) + + # check that dtype changes that change shape of mask too much + # are not allowed + def assign(): + m = np.ma.array(a) + m.dtype = np.dtype('f8') + assert_raises(ValueError, assign) + + b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? + assert_equal(b.dtype, np.dtype('f4')) + + # check that nomask is preserved + a = np.zeros(4, dtype='f4') + m = np.ma.array(a) + m.dtype = np.dtype('f4,i4') + assert_equal(m.dtype, np.dtype('f4,i4')) + assert_equal(m._mask, np.ma.nomask) + + +class TestFillingValues: + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + + fval = _check_fill_value(0, "|S3") + assert_equal(fval, b"0") + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value(b"camelot!")) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, b"???") + fval = _check_fill_value(fill_val, object) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #assert_(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array([b'3', b'4', b'5']) + a._optinfo.update({'comment':"updated!"}) + + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., b'999']) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, b'999') + + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, b'???')) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, b'???') + + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + def test_subarray_fillvalue(self): + # gh-10483 test multi-field index fill value + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Numpy has detected") + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) + + def test_fillvalue_datetime_timedelta(self): + # Test default fillvalue for datetime64 and timedelta64 types. + # See issue #4476, this would return '?' which would cause errors + # elsewhere + + for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", + "h", "D", "W", "M", "Y"): + control = numpy.datetime64("NaT", timecode) + test = default_fill_value(numpy.dtype(" 0 + + # test different unary domains + sqrt(m) + log(m) + tan(m) + arcsin(m) + arccos(m) + arccosh(m) + + # test binary domains + divide(m, 2) + + # also check that allclose uses ma ufuncs, to avoid warning + allclose(m, 0.5) + +class TestMaskedArrayInPlaceArithmetic: + # Test MaskedArray Arithmetic + + def setup_method(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + self.intdata = (x, y, xm) + self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) + self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + self.othertypes = [np.dtype(_).type for _ in self.othertypes] + self.uint8data = ( + x.astype(np.uint8), + y.astype(np.uint8), + xm.astype(np.uint8) + ) + + def test_inplace_addition_scalar(self): + # Test of inplace additions + (x, y, xm) = self.intdata + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + + (x, _, xm) = self.floatdata + id1 = x.data.ctypes.data + x += 1. + assert_(id1 == x.data.ctypes.data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + (x, y, xm) = self.intdata + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + (x, y, xm) = self.intdata + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + (x, y, xm) = self.intdata + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_inplace_addition_scalar_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + xm[2] = masked + x += t(1) + assert_equal(x, y + t(1)) + xm += t(1) + assert_equal(xm, y + t(1)) + + def test_inplace_addition_array_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x -= t(1) + assert_equal(x, y - t(1)) + xm -= t(1) + assert_equal(xm, y - t(1)) + + def test_inplace_subtraction_array_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x *= t(2) + assert_equal(x, y * t(2)) + xm *= t(2) + assert_equal(xm, y * t(2)) + + def test_inplace_multiplication_array_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_floor_division_scalar_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + try: + x //= t(2) + xm //= t(2) + assert_equal(x, y) + assert_equal(xm, y) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_floor_division_array_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + try: + x //= a + xm //= a + assert_equal(x, y // a) + assert_equal(xm, y // a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= t(2) + assert_equal(x, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= t(2) + assert_equal(xm, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= a + assert_equal(x, y / a) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= a + assert_equal(xm, y / a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_pow_type(self): + # Test keeping data w/ (inplace) power + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + # Test pow on scalar + x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) + xx = x ** t(2) + xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) + assert_equal(xx.data, xx_r.data) + assert_equal(xx.mask, xx_r.mask) + # Test ipow on scalar + x **= t(2) + assert_equal(x.data, xx_r.data) + assert_equal(x.mask, xx_r.mask) + + +class TestMaskedArrayMethods: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + assert_(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + assert_(not allclose(a, b)) + b[0] = np.inf + assert_(allclose(a, b)) + # Test allclose w/ masked + a = masked_array(a) + a[-1] = masked + assert_(allclose(a, b, masked_equal=True)) + assert_(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + assert_(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + assert_(allclose(a, a)) + + def test_allclose_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, 4]], dtype="m8[ns]") + assert allclose(a, a, atol=0) + assert allclose(a, a, atol=np.timedelta64(1, "ns")) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + + assert_(not mxbig.all()) + assert_(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + + assert_(not mxsmall.all()) + assert_(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + + assert_(full.all() is masked) + full.all(out=store) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) + + store = empty((), dtype=bool) + assert_(full.any() is masked) + full.any(out=store) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_clip_out(self): + # gh-14140 + a = np.arange(10) + m = np.ma.MaskedArray(a, mask=[0, 1] * 5) + m.clip(0, 5, out=m) + assert_equal(m.mask, [0, 1] * 5) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_zeros(self): + # Tests zeros/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = zeros(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = zeros_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check zeros_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = zeros_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_ones(self): + # Tests ones/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = ones(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = ones_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check ones_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = ones_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + @suppress_copy_mask_on_assignment + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + assert_(x[0] is not masked) + assert_equal(x[0], 0) + assert_(x[1] is not masked) + assert_equal(x[1], 3) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_equal(x[3], 0) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + # Test index ordering + assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) + assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + + @pytest.mark.parametrize("order", "AKCF") + @pytest.mark.parametrize("data_order", "CF") + def test_ravel_order(self, order, data_order): + # Ravelling must ravel mask and data in the same order always to avoid + # misaligning the two in the ravel result. + arr = np.ones((5, 10), order=data_order) + arr[0, :] = 0 + mask = np.ones((10, 5), dtype=bool, order=data_order).T + mask[0, :] = False + x = array(arr, mask=mask) + assert x._data.flags.fnc != x._mask.flags.fnc + assert (x.filled(0) == 0).all() + raveled = x.ravel(order) + assert (raveled.filled(0) == 0).all() + + # NOTE: Can be wrong if arr order is neither C nor F and `order="K"` + assert_array_equal(arr.ravel(order), x.ravel(order)._data) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + + x = [1, 4, 2, 3] + sortedx = sort(x) + assert_(not isinstance(sorted, MaskedArray)) + + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + x = array([0, -1], dtype=np.int8) + sortedx = sort(x, kind="stable") + assert_equal(sortedx, array([-1, 0], dtype=np.int8)) + + def test_stable_sort(self): + x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) + expected = array([0, 3, 1, 4, 2, 5]) + computed = argsort(x, kind='stable') + assert_equal(computed, expected) + + def test_argsort_matches_sort(self): + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + for kwargs in [dict(), + dict(endwith=True), + dict(endwith=False), + dict(fill_value=2), + dict(fill_value=2, endwith=True), + dict(fill_value=2, endwith=False)]: + sortedx = sort(x, **kwargs) + argsortedx = x[argsort(x, **kwargs)] + assert_equal(sortedx._data, argsortedx._data) + assert_equal(sortedx._mask, argsortedx._mask) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on structured dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + mask_last = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianness and `endwith`. + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + test = sort(a) + test = sort(a, endwith=False) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0,0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0,0], 2) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + + # assert_equal crashes when passed np.ma.mask + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) + + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None,:] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + assert_(xlist[1] is None) + assert_(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, b'one'), + (2, 2.2, b'two'), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = np.dtype("int, (2,3)float, float") + data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))], + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0,1] = np.ma.masked + xt = x.T + + xt[1,0] = 10 + xt[0,1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3,3)) + x[0,0] = 10 + x[1,1] = np.ma.masked + x[2,2] = 20 + xd = x.diagonal() + x[1,1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) + + +class TestMaskedArrayMathMethods: + + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, float) + cols = np.zeros(m, float) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_add_object(self): + x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) + y = x + 'x' + assert_equal(y[1], 'bx') + assert_(y.mask[0]) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_anom_shape(self): + a = masked_array([1, 2, 3]) + assert_equal(a.anom().shape, a.shape) + a.mask = True + assert_equal(a.anom().shape, a.shape) + assert_(np.ma.is_masked(a.anom())) + + def test_anom(self): + a = masked_array(np.arange(1, 7).reshape(2, 3)) + assert_almost_equal(a.anom(), + [[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) + assert_almost_equal(a.anom(axis=0), + [[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) + assert_almost_equal(a.anom(axis=1), + [[-1., 0., 1.], [-1., 0., 1.]]) + a.mask = [[0, 0, 1], [0, 1, 0]] + mval = -99 + assert_almost_equal(a.anom().filled(mval), + [[-2.25, -1.25, mval], [0.75, mval, 2.75]]) + assert_almost_equal(a.anom(axis=0).filled(mval), + [[-1.5, 0.0, mval], [1.5, mval, 0.0]]) + assert_almost_equal(a.anom(axis=1).filled(mval), + [[-0.5, 0.5, mval], [-1.0, mval, 1.0]]) + + def test_trace(self): + # Tests trace on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + assert_equal(np.trace(mX), mX.trace()) + + # gh-5560 + arr = np.arange(2*4*4).reshape(2,4,4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + + def test_dot(self): + # Tests dot on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + fx = mx.filled(0) + r = mx.dot(mx) + assert_almost_equal(r.filled(0), fx.dot(fx)) + assert_(r.mask is nomask) + + fX = mX.filled(0) + r = mX.dot(mX) + assert_almost_equal(r.filled(0), fX.dot(fX)) + assert_(r.mask[1,3]) + r1 = empty_like(r) + mX.dot(mX, out=r1) + assert_almost_equal(r, r1) + + mYY = mXX.swapaxes(-1, -2) + fXX, fYY = mXX.filled(0), mYY.filled(0) + r = mXX.dot(mYY) + assert_almost_equal(r.filled(0), fXX.dot(fYY)) + r1 = empty_like(r) + mXX.dot(mYY, out=r1) + assert_almost_equal(r, r1) + + def test_dot_shape_mismatch(self): + # regression test + x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]]) + z = masked_array([[0,1],[3,3]]) + x.dot(y, out=z) + assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) + assert_almost_equal(z.mask, [[0, 1], [0, 0]]) + + def test_varmean_nomask(self): + # gh-5769 + foo = array([1,2,3,4], dtype='f8') + bar = array([1,2,3,4], dtype='f8') + assert_equal(type(foo.mean()), np.float64) + assert_equal(type(foo.var()), np.float64) + assert((foo.mean() == bar.mean()) is np.bool_(True)) + + # check array type is preserved and out works + foo = array(np.arange(16).reshape((4,4)), dtype='f8') + bar = empty(4, dtype='f4') + assert_equal(type(foo.mean(axis=1)), MaskedArray) + assert_equal(type(foo.var(axis=1)), MaskedArray) + assert_(foo.mean(axis=1, out=bar) is bar) + assert_(foo.var(axis=1, out=bar) is bar) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + @suppress_copy_mask_on_assignment + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) + # Using a masked array as explicit output + method(out=mout) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout) + assert_(np.isnan(nout)) + + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + assert_(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + @requires_memory(free_bytes=2 * 10000 * 1000 * 2) + def test_mean_overflow(self): + # Test overflow in masked arrays + # gh-20272 + a = masked_array(np.full((10000, 10000), 65535, dtype=np.uint16), + mask=np.zeros((10000, 10000))) + assert_equal(a.mean(), 65535.0) + + def test_diff_with_prepend(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[3:], value=2) + a_prep = np.ma.masked_equal(x[:3], value=2) + diff1 = np.ma.diff(a, prepend=a_prep, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_append(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[:3], value=2) + a_app = np.ma.masked_equal(x[3:], value=2) + diff1 = np.ma.diff(a, append=a_app, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_dim_0(self): + with pytest.raises( + ValueError, + match="diff requires input that is at least one dimensional" + ): + np.ma.diff(np.array(1)) + + def test_diff_with_n_0(self): + a = np.ma.masked_equal([1, 2, 2, 3, 4, 2, 1, 1], value=2) + diff = np.ma.diff(a, n=0, axis=0) + + assert_(np.ma.allequal(a, diff)) + + +class TestMaskedArrayMathMethodsComplex: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +class TestMaskedArrayFunctions: + # Test class for miscellaneous functions. + + def setup_method(self): + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + self.info = (xm, ym) + + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + with assert_raises(IndexError): + masked_equal(1, a) + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_where_structured(self): + # test that masked_where on a structured array sets a structured + # mask (see issue #2972) + a = np.zeros(10, dtype=[("A", " 6, x) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + assert_(result is output) + + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + + def test_identity(self): + a = identity(5) + assert_(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + assert_(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_with_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + + with np.errstate(invalid="warn"): + # The fill value is 1e20, it cannot be converted to `int`: + with pytest.warns(RuntimeWarning, match="invalid value"): + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_object(self): + a = np.array(None) + b = masked_array(None) + r = b.copy() + assert_equal(np.ma.where(True, a, a), r) + assert_equal(np.ma.where(True, b, b), r) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.result_type(np.int32, np.float32) + assert_equal(test, control) + + def test_where_broadcast(self): + # Issue 8599 + x = np.arange(9).reshape(3, 3) + y = np.zeros(3) + core = np.where([1, 0, 1], x, y) + ma = where([1, 0, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured(self): + # Issue 8600 + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + y = np.array((10, 20), dtype=dt) + core = np.where([0, 1, 1], x, y) + ma = np.where([0, 1, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured_masked(self): + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + + ma = where([0, 1, 1], x, masked) + expected = masked_where([1, 0, 0], x) + + assert_equal(ma.dtype, expected.dtype) + assert_equal(ma, expected) + assert_equal(ma.mask, expected.mask) + + def test_masked_invalid_error(self): + a = np.arange(5, dtype=object) + a[3] = np.PINF + a[2] = np.NaN + with pytest.raises(TypeError, + match="not supported for the input types"): + np.ma.masked_invalid(a) + + def test_masked_invalid_pandas(self): + # getdata() used to be bad for pandas series due to its _data + # attribute. This test is a regression test mainly and may be + # removed if getdata() is adjusted. + class Series(): + _data = "nonsense" + + def __array__(self): + return np.array([5, np.nan, np.inf]) + + arr = np.ma.masked_invalid(Series()) + assert_array_equal(arr._data, np.array(Series())) + assert_array_equal(arr._mask, [False, True, True]) + + @pytest.mark.parametrize("copy", [True, False]) + def test_masked_invalid_full_mask(self, copy): + # Matplotlib relied on masked_invalid always returning a full mask + # (Also astropy projects, but were ok with it gh-22720 and gh-22842) + a = np.ma.array([1, 2, 3, 4]) + assert a._mask is nomask + res = np.ma.masked_invalid(a, copy=copy) + assert res.mask is not nomask + # mask of a should not be mutated + assert a.mask is nomask + assert np.may_share_memory(a._data, res._data) != copy + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + assert_(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + + c = np.reshape(a, (2, 5)) + assert_(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) + + def test_make_mask_descr(self): + # Flexible + ntype = [('a', float), ('b', float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', bool), ('b', bool)]) + assert_(test is make_mask_descr(test)) + + # Standard w/ shape + ntype = (float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (bool, 2)) + assert_(test is make_mask_descr(test)) + + # Standard standard + ntype = float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(bool)) + assert_(test is make_mask_descr(test)) + + # Nested + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + assert_(test is make_mask_descr(test)) + + # Named+ shape + ntype = [('a', (float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (bool, 2))])) + assert_(test is make_mask_descr(test)) + + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + assert_(test is make_mask_descr(test)) + + # nested boolean types should preserve identity + base_type = np.dtype([('a', int, 3)]) + base_mtype = make_mask_descr(base_type) + sub_type = np.dtype([('a', int), ('b', base_mtype)]) + test = make_mask_descr(sub_type) + assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) + assert_(test.fields['b'][0] is base_mtype) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # Ensure this also works for void + mask = np.array((False, True), dtype='?,?')[()] + assert_(isinstance(mask, np.void)) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test, mask) + assert_(test is not mask) + mask = np.array((0, 1), dtype='i4,i4')[()] + test2 = make_mask(mask, dtype=mask.dtype) + assert_equal(test2, test) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): + # Initialize + mtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', bool), ('B', bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + def test_flatten_mask(self): + # Tests flatten mask + # Standard dtype + mask = np.array([0, 0, 1], dtype=bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + + # Test that compress flattens + test = np.ma.compressed([[1],[2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test.ndim, 1) + + # with .compressed() overridden + class M(MaskedArray): + def compressed(self): + return 42 + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test, 42) + + def test_convolve(self): + a = masked_equal(np.arange(5), 2) + b = np.array([1, 1]) + test = np.ma.convolve(a, b) + assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1)) + + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1)) + + test = np.ma.convolve([1, 1], [1, 1, 1]) + assert_equal(test, masked_equal([1, 2, 2, 1], -1)) + + a = [1, 1] + b = masked_equal([1, -1, -1, 1], -1) + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) + test = np.ma.convolve(a, b, propagate_mask=True) + assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) + + +class TestMaskedFields: + + def setup_method(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype) + + def test_set_records_masks(self): + base = self.data['base'] + mdtype = self.data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'two', b'three', b'four', b'five']) + + def test_set_record_slice(self): + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'pi', b'pi', b'four', b'five']) + + def test_mask_element(self): + "Check record access" + base = self.data['base'] + base[0] = masked + + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + + def _test_index(i): + assert_equal(type(a[i]), mvoid) + assert_equal_records(a[i]._data, a._data[i]) + assert_equal_records(a[i]._mask, a._mask[i]) + + assert_equal(type(a[i, ...]), MaskedArray) + assert_equal_records(a[i,...]._data, a._data[i,...]) + assert_equal_records(a[i,...]._mask, a._mask[i,...]) + + _test_index(1) # No mask + _test_index(0) # One element masked + _test_index(-2) # All element masked + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_setitem_scalar(self): + # 8510 + mask_0d = np.ma.masked_array(1, mask=True) + arr = np.ma.arange(3) + arr[0] = mask_0d + assert_array_equal(arr.mask, [True, False, False]) + + def test_element_len(self): + # check that len() works for mvoid (Github issue #576) + for rec in self.data['base']: + assert_equal(len(rec), len(self.data['ddtype'])) + + +class TestMaskedObjectArray: + + def test_getitem(self): + arr = np.ma.array([None, None]) + for dt in [float, object]: + a0 = np.eye(2).astype(dt) + a1 = np.eye(3).astype(dt) + arr[0] = a0 + arr[1] = a1 + + assert_(arr[0] is a0) + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_(arr[0,...][()] is a0) + assert_(arr[1,...][()] is a1) + + arr[0] = np.ma.masked + + assert_(arr[1] is a1) + assert_(isinstance(arr[0,...], MaskedArray)) + assert_(isinstance(arr[1,...], MaskedArray)) + assert_equal(arr[0,...].mask, True) + assert_(arr[1,...][()] is a1) + + # gh-5962 - object arrays of arrays do something special + assert_equal(arr[0].data, a0) + assert_equal(arr[0].mask, True) + assert_equal(arr[0,...][()].data, a0) + assert_equal(arr[0,...][()].mask, True) + + def test_nested_ma(self): + + arr = np.ma.array([None, None]) + # set the first object to be an unmasked masked constant. A little fiddly + arr[0,...] = np.array([np.ma.masked], object)[0,...] + + # check the above line did what we were aiming for + assert_(arr.data[0] is np.ma.masked) + + # test that getitem returned the value by identity + assert_(arr[0] is np.ma.masked) + + # now mask the masked value! + arr[0] = np.ma.masked + assert_(arr[0] is np.ma.masked) + + +class TestMaskedView: + + def setup_method(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + self.data = (data, a, controlmask) + + def test_view_to_nothing(self): + (data, a, controlmask) = self.data + test = a.view() + assert_(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + (data, a, controlmask) = self.data + test = a.view(np.ndarray) + assert_(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view(float) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + (data, a, controlmask) = self.data + + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + + test = a[0].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + + test = a[-1].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + (data, a, controlmask) = self.data + + test = a.view((float, 2), np.recarray) + assert_equal(test, data) + assert_(isinstance(test, np.recarray)) + assert_(not isinstance(test, MaskedArray)) + + +class TestOptionalArgs: + def test_ndarrayfuncs(self): + # test axis arg behaves the same as ndarray (including multiple axes) + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + # mask out last element of last dimension + m[:,:,-1] = True + a = np.ma.array(d, mask=m) + + def testaxis(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test axis arg + assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1)) + assert_equal(ma_f(a, axis=(0,1))[...,:-1], + numpy_f(d[...,:-1], axis=(0,1))) + + def testkeepdims(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test keepdims arg + assert_equal(ma_f(a, keepdims=True).shape, + numpy_f(d, keepdims=True).shape) + assert_equal(ma_f(a, keepdims=False).shape, + numpy_f(d, keepdims=False).shape) + + # test both at once + assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1], + numpy_f(d[...,:-1], axis=(0,1), keepdims=True)) + + for f in ['sum', 'prod', 'mean', 'var', 'std']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + for f in ['min', 'max']: + testaxis(f, a, d) + + d = (np.arange(24).reshape((2,3,4))%2 == 0) + a = np.ma.array(d, mask=m) + for f in ['all', 'any']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + def test_count(self): + # test np.ma.count specially + + d = np.arange(24.0).reshape((2,3,4)) + m = np.zeros(24, dtype=bool).reshape((2,3,4)) + m[:,0,:] = True + a = np.ma.array(d, mask=m) + + assert_equal(count(a), 16) + assert_equal(count(a, axis=1), 2*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 4*ones((4,))) + assert_equal(count(a, keepdims=True), 16*ones((1,1,1))) + assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4))) + assert_equal(count(a, axis=-2), 2*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(np.AxisError, count, a, axis=3) + + # check the 'nomask' path + a = np.ma.array(d, mask=nomask) + + assert_equal(count(a), 24) + assert_equal(count(a, axis=1), 3*ones((2,4))) + assert_equal(count(a, axis=(0,1)), 6*ones((4,))) + assert_equal(count(a, keepdims=True), 24*ones((1,1,1))) + assert_equal(np.ndim(count(a, keepdims=True)), 3) + assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4))) + assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4))) + assert_equal(count(a, axis=-2), 3*ones((2,4))) + assert_raises(ValueError, count, a, axis=(1,1)) + assert_raises(np.AxisError, count, a, axis=3) + + # check the 'masked' singleton + assert_equal(count(np.ma.masked), 0) + + # check 0-d arrays do not allow axis > 0 + assert_raises(np.AxisError, count, np.ma.array(1), axis=1) + + +class TestMaskedConstant: + def _do_add_test(self, add): + # sanity check + assert_(add(np.ma.masked, 1) is np.ma.masked) + + # now try with a vector + vector = np.array([1, 2, 3]) + result = add(np.ma.masked, vector) + + # lots of things could go wrong here + assert_(result is not np.ma.masked) + assert_(not isinstance(result, np.ma.core.MaskedConstant)) + assert_equal(result.shape, vector.shape) + assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) + + def test_ufunc(self): + self._do_add_test(np.add) + + def test_operator(self): + self._do_add_test(lambda a, b: a + b) + + def test_ctor(self): + m = np.ma.array(np.ma.masked) + + # most importantly, we do not want to create a new MaskedConstant + # instance + assert_(not isinstance(m, np.ma.core.MaskedConstant)) + assert_(m is not np.ma.masked) + + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(np.ma.masked, f, protocol=proto) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test__copy(self): + import copy + assert_( + copy.copy(np.ma.masked) is np.ma.masked) + + def test_deepcopy(self): + import copy + assert_( + copy.deepcopy(np.ma.masked) is np.ma.masked) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError,operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], '--') + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + def test_attributes_readonly(self): + assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) + assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) + + +class TestMaskedWhereAliases: + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) + assert_(res.mask is np.ma.nomask) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) + assert_equal(res.mask, [False] * 4) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + +def test_masked_array_no_copy(): + # check nomask array is updated in place + a = np.ma.array([1, 2, 3, 4]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [False, False, True, False]) + # check masked array is updated in place + a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [True, False, True, False]) + # check masked array with masked_invalid is updated in place + a = np.ma.array([np.inf, 1, 2, 3, 4]) + _ = np.ma.masked_invalid(a, copy=False) + assert_array_equal(a.mask, [True, False, False, False, False]) + +def test_append_masked_array(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_equal([4,3,2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2,2)) + b = np.ma.ones((3,1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1,2,3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis,:], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3,3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + +def test_default_fill_value_complex(): + # regression test for Python 3, where 'unicode' was not defined + assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) + + +def test_ufunc_with_output(): + # check that giving an output argument always returns that output. + # Regression test for gh-8416. + x = array([1., 2., 3.], mask=[0, 0, 1]) + y = np.add(x, 1., out=x) + assert_(y is x) + + +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype_mask_ordering(): + descr = np.dtype([('v', int, 3), ('x', [('y', float)])]) + x = array([ + [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], + [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) + x[0]['v'][0] = np.ma.masked + + x_a = x.astype(descr) + assert x_a.dtype.names == np.dtype(descr).names + assert x_a.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a) + + assert_(x is x.astype(x.dtype, copy=False)) + assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) + + x_f = x.astype(x.dtype, order='F') + assert_(x_f.flags.f_contiguous) + assert_(x_f.mask.flags.f_contiguous) + + # Also test the same indirectly, via np.array + x_a2 = np.array(x, dtype=descr, subok=True) + assert x_a2.dtype.names == np.dtype(descr).names + assert x_a2.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a2) + + assert_(x is np.array(x, dtype=descr, copy=False, subok=True)) + + x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) + assert_(x_f2.flags.f_contiguous) + assert_(x_f2.mask.flags.f_contiguous) + + +@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) +@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) +@pytest.mark.filterwarnings('ignore::numpy.ComplexWarning') +def test_astype_basic(dt1, dt2): + # See gh-12070 + src = np.ma.array(ones(3, dt1), fill_value=1) + dst = src.astype(dt2) + + assert_(src.fill_value == 1) + assert_(src.dtype == dt1) + assert_(src.fill_value.dtype == dt1) + + assert_(dst.fill_value == 1) + assert_(dst.dtype == dt2) + assert_(dst.fill_value.dtype == dt2) + + assert_equal(src, dst) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + +def test_mask_shape_assignment_does_not_break_masked(): + a = np.ma.masked + b = np.ma.array(1, mask=a.mask) + b.shape = (1,) + assert_equal(a.mask.shape, ()) + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +def test_doc_note(): + def method(self): + """This docstring + + Has multiple lines + + And notes + + Notes + ----- + original note + """ + pass + + expected_doc = """This docstring + +Has multiple lines + +And notes + +Notes +----- +note + +original note""" + + assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc) + + +def test_gh_22556(): + source = np.ma.array([0, [0, 1, 2]], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[1].append('this should not appear in source') + assert len(source[1]) == 3 + + +def test_gh_21022(): + # testing for absence of reported error + source = np.ma.masked_array(data=[-1, -1], mask=True, dtype=np.float64) + axis = np.array(0) + result = np.prod(source, axis=axis, keepdims=False) + result = np.ma.masked_array(result, + mask=np.ones(result.shape, dtype=np.bool_)) + array = np.ma.masked_array(data=-1, mask=True, dtype=np.float64) + copy.deepcopy(array) + copy.deepcopy(result) + + +def test_deepcopy_2d_obj(): + source = np.ma.array([[0, "dog"], + [1, 1], + [[1, 2], "cat"]], + mask=[[0, 1], + [0, 0], + [0, 0]], + dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[2, 0].extend(['this should not appear in source', 3]) + assert len(source[2, 0]) == 2 + assert len(deepcopy[2, 0]) == 4 + assert_equal(deepcopy._mask, source._mask) + deepcopy._mask[0, 0] = 1 + assert source._mask[0, 0] == 0 + + +def test_deepcopy_0d_obj(): + source = np.ma.array(0, mask=[0], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[...] = 17 + assert_equal(source, 0) + assert_equal(deepcopy, 17) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.py new file mode 100644 index 00000000..40c8418f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.py @@ -0,0 +1,84 @@ +"""Test deprecation and future warnings. + +""" +import pytest +import numpy as np +from numpy.testing import assert_warns +from numpy.ma.testutils import assert_equal +from numpy.ma.core import MaskedArrayFutureWarning +import io +import textwrap + +class TestArgsort: + """ gh-8701 """ + def _test_base(self, argsort, cls): + arr_0d = np.array(1).view(cls) + argsort(arr_0d) + + arr_1d = np.array([1, 2, 3]).view(cls) + argsort(arr_1d) + + # argsort has a bad default for >1d arrays + arr_2d = np.array([[1, 2], [3, 4]]).view(cls) + result = assert_warns( + np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) + assert_equal(result, argsort(arr_2d, axis=None)) + + # should be no warnings for explicitly specifying it + argsort(arr_2d, axis=None) + argsort(arr_2d, axis=-1) + + def test_function_ndarray(self): + return self._test_base(np.ma.argsort, np.ndarray) + + def test_function_maskedarray(self): + return self._test_base(np.ma.argsort, np.ma.MaskedArray) + + def test_method(self): + return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) + + +class TestMinimumMaximum: + + def test_axis_default(self): + # NumPy 1.13, 2017-05-06 + + data1d = np.ma.arange(6) + data2d = data1d.reshape(2, 3) + + ma_min = np.ma.minimum.reduce + ma_max = np.ma.maximum.reduce + + # check that the default axis is still None, but warns on 2d arrays + result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + assert_equal(result, ma_max(data2d, axis=None)) + + result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + assert_equal(result, ma_min(data2d, axis=None)) + + # no warnings on 1d, as both new and old defaults are equivalent + result = ma_min(data1d) + assert_equal(result, ma_min(data1d, axis=None)) + assert_equal(result, ma_min(data1d, axis=0)) + + result = ma_max(data1d) + assert_equal(result, ma_max(data1d, axis=None)) + assert_equal(result, ma_max(data1d, axis=0)) + + +class TestFromtextfile: + def test_fromtextfile_delimitor(self): + # NumPy 1.22.0, 2021-09-23 + + textfile = io.StringIO(textwrap.dedent( + """ + A,B,C,D + 'string 1';1;1.0;'mixed column' + 'string 2';2;2.0; + 'string 3';3;3.0;123 + 'string 4';4;4.0;3.14 + """ + )) + + with pytest.warns(DeprecationWarning): + result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_extras.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_extras.py new file mode 100644 index 00000000..d09a50fe --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_extras.py @@ -0,0 +1,1870 @@ +# pylint: disable-msg=W0611, W0612, W0511 +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +import warnings +import itertools +import pytest + +import numpy as np +from numpy.core.numeric import normalize_axis_tuple +from numpy.testing import ( + assert_warns, suppress_warnings + ) +from numpy.ma.testutils import ( + assert_, assert_array_equal, assert_equal, assert_almost_equal + ) +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, + nomask, ones, zeros, count + ) +from numpy.ma.extras import ( + atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, + median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, + ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, + mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, + notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, + diagflat, ndenumerate, stack, vstack + ) + + +class TestGeneric: + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_with_object_nested(self): + # Test masked_all works with nested array with dtype of an 'object' + # refers to issue #15895 + my_dtype = np.dtype([('b', ([('c', object)], (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']['c']), 1) + assert_equal(masked_arr['b']['c'].shape, (1, 1)) + assert_equal(masked_arr['b']['c']._fill_value.shape, ()) + + def test_masked_all_with_object(self): + # same as above except that the array is not nested + my_dtype = np.dtype([('b', (object, (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']), 1) + assert_equal(masked_arr['b'].shape, (1, 1)) + assert_equal(masked_arr['b']._fill_value.shape, ()) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + self.check_clump(clump_masked) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + self.check_clump(clump_unmasked) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(0, a.size)]) + # mask of all false + a.mask = np.zeros(10, dtype=bool) + assert_equal(test, [slice(0, a.size)]) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, []) + + +class TestAverage: + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_equal(2.0, result) + assert_(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=True) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float_) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float_), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_testAverage4(self): + # Test that `keepdims` works with average + x = np.array([2, 3, 4]).reshape(3, 1) + b = np.ma.array(x, mask=[[False], [False], [True]]) + w = np.array([4, 5, 6]).reshape(3, 1) + actual = average(b, weights=w, axis=1, keepdims=True) + desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]]) + assert_equal(actual, desired) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j], + [9j, 0+1j, 2+3j, 4+5j, 7+7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0)*1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1)*1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + @pytest.mark.parametrize( + 'x, axis, expected_avg, weights, expected_wavg, expected_wsum', + [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]), + ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]], + [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])], + ) + def test_basic_keepdims(self, x, axis, expected_avg, + weights, expected_wavg, expected_wsum): + avg = np.ma.average(x, axis=axis, keepdims=True) + assert avg.shape == np.shape(expected_avg) + assert_array_equal(avg, expected_avg) + + wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + + wavg, wsum = np.ma.average(x, axis=axis, weights=weights, + returned=True, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + assert wsum.shape == np.shape(expected_wsum) + assert_array_equal(wsum, expected_wsum) + + def test_masked_weights(self): + # Test with masked weights. + # (Regression test for https://github.com/numpy/numpy/issues/10438) + a = np.ma.array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + weights_unmasked = masked_array([5, 28, 31], mask=False) + weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0]) + + avg_unmasked = average(a, axis=0, + weights=weights_unmasked, returned=False) + expected_unmasked = np.array([6.0, 5.21875, 6.21875]) + assert_almost_equal(avg_unmasked, expected_unmasked) + + avg_masked = average(a, axis=0, weights=weights_masked, returned=False) + expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) + assert_almost_equal(avg_masked, expected_masked) + + # weights should be masked if needed + # depending on the array mask. This is to avoid summing + # masked nan or other values that are not cancelled by a zero + a = np.ma.array([1.0, 2.0, 3.0, 4.0], + mask=[False, False, True, True]) + avg_unmasked = average(a, weights=[1, 1, 1, np.nan]) + + assert_almost_equal(avg_unmasked, 1.5) + + a = np.ma.array([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 1.0, 2.0, 3.0], + ], mask=[ + [False, True, True, False], + [True, False, True, True], + [True, False, True, False], + ]) + + avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0) + avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5], + mask=[False, True, True, False]) + + assert_almost_equal(avg_masked, avg_expected) + assert_equal(avg_masked.mask, avg_expected.mask) + + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = np.random.rand(5, 5) + a_2 = np.random.rand(5, 5) + m_1 = np.round(np.random.rand(5, 5), 0) + m_2 = np.round(np.random.rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5,:], b_1) + assert_array_equal(d[5:,:], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + def test_masked_constant(self): + actual = mr_[np.ma.masked, 1] + assert_equal(actual.mask, [True, False]) + assert_equal(actual.data[1], 1) + + actual = mr_[[1, 2], np.ma.masked] + assert_equal(actual.mask, [False, False, True]) + assert_equal(actual.data[:2], [1, 2]) + + +class TestNotMasked: + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0]]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp, [ + slice(0, 4, None), + slice(16, 22, None), + slice(23, 24, None) + ]) + + tmp = notmasked_contiguous(a, 0) + assert_equal(tmp, [ + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(2, 3, None)], + [slice(2, 3, None)], + [], + [slice(2, 3, None)] + ]) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp, [ + [slice(0, 4, None)], + [], + [slice(0, 6, None), slice(7, 8, None)] + ]) + + +class TestCompressFunctions: + + def test_compress_nd(self): + # Tests compress_nd + x = np.array(list(range(3*4*5))).reshape(3, 4, 5) + m = np.zeros((3,4,5)).astype(bool) + m[1,1,1] = True + x = array(x, mask=m) + + # axis=None + a = compress_nd(x) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + # axis=0 + a = compress_nd(x, 0) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [45, 46, 47, 48, 49], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + # axis=1 + a = compress_nd(x, 1) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + a2 = compress_nd(x, (1,)) + a3 = compress_nd(x, -2) + a4 = compress_nd(x, (-2,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=2 + a = compress_nd(x, 2) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [25, 27, 28, 29], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (2,)) + a3 = compress_nd(x, -1) + a4 = compress_nd(x, (-1,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 1) + a = compress_nd(x, (0, 1)) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + a2 = compress_nd(x, (0, -2)) + assert_equal(a, a2) + + # axis=(1, 2) + a = compress_nd(x, (1, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (-2, 2)) + a3 = compress_nd(x, (1, -1)) + a4 = compress_nd(x, (-2, -1)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 2) + a = compress_nd(x, (0, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (0, -1)) + assert_equal(a, a2) + + def test_compress_rowcols(self): + # Tests compress_rowcols + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize(["func", "rowcols_axis"], + [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) + def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): + # Test deprecation of the axis argument to `mask_rows` and `mask_cols` + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + with assert_warns(DeprecationWarning): + res = func(x, axis=axis) + assert_equal(res, mask_rowcols(x, rowcols_axis)) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) + c = dot(a, b, strict=True) + assert_equal(c.mask, + [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]], + [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, + [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]], + [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = 5. + c = dot(a, b, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(2), mask=[0, 1]) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[1, 0], [0, 0]]) + + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + + +class TestApplyAlongAxis: + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1+offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes: + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian: + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) + + def test_inf(self): + # test that even which computes handles inf / x = masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=-1) + assert_equal(r, np.inf) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=None) + assert_equal(r, np.inf) + # all masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=-1) + assert_equal(r.mask, True) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=None) + assert_equal(r.mask, True) + + def test_non_masked(self): + x = np.arange(9) + assert_equal(np.ma.median(x), 4.) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = range(8) + assert_equal(np.ma.median(x), 3.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = 5 + assert_equal(np.ma.median(x), 5.) + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = np.arange(9 * 8).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + # float + x = np.arange(9 * 8.).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + + def test_docstring_examples(self): + "test the examples given in the docstring of ma.median" + x = array(np.arange(8), mask=[0]*4 + [1]*4) + assert_equal(np.ma.median(x), 1.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + ma_x = np.ma.median(x, axis=-1, overwrite_input=True) + assert_equal(ma_x, [2., 5.]) + assert_equal(ma_x.shape, (2,), "shape mismatch") + assert_(type(ma_x) is MaskedArray) + + def test_axis_argument_errors(self): + msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" + for ndmin in range(5): + for mask in [False, True]: + x = array(1, ndmin=ndmin, mask=mask) + + # Valid axis values should not raise exception + args = itertools.product(range(-ndmin, ndmin), [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except Exception: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + # Invalid axis values should raise exception + args = itertools.product([-(ndmin + 1), ndmin], [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except np.AxisError: + pass + else: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + def test_masked_0d(self): + # Check values + x = array(1, mask=False) + assert_equal(np.ma.median(x), 1) + x = array(1, mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + + def test_masked_1d(self): + x = array(np.arange(5), mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) + x = array(np.arange(5), mask=False) + assert_equal(np.ma.median(x), 2.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,0,0,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0,1,1,1,1]) + assert_equal(np.ma.median(x), 0.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(5), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(5.), mask=[0,1,1,0,0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(6), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(6.), mask=[0,1,1,1,1,0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + + def test_1d_shape_consistency(self): + assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape, + np.ma.median(array([1,2,3],mask=[0,1,0])).shape ) + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_(type(np.ma.median(x, axis=0)) is MaskedArray) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_(type(np.ma.median(x, axis=1)) is MaskedArray) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out_1d(self): + # integer float even odd + for v in (30, 30., 31, 31.): + x = masked_array(np.arange(v)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(())) + r = median(x, out=out) + if v == 30: + assert_equal(out, 14.5) + else: + assert_equal(out, 15.) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_out(self): + # integer float even odd + for v in (40, 40., 30, 30.): + x = masked_array(np.arange(v).reshape(10, -1)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + if v == 30: + e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + else: + e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3, + mask=[True]*3 + [False]*4 + [True]*3) + assert_equal(r, e) + assert_(r is out) + assert_(type(r) is MaskedArray) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + mask = np.zeros((3, 5, 7, 11), dtype=bool) + # Randomly set some elements to True: + w = np.random.random((4, 200)) * np.array(mask.shape)[:, None] + w = w.astype(np.intp) + mask[tuple(w)] = np.nan + d = masked_array(np.ones(mask.shape), mask=mask) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = masked_array(np.empty(shape_out)) + result = median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_single_non_masked_value_on_axis(self): + data = [[1., 0.], + [0., 3.], + [0., 0.]] + masked_arr = np.ma.masked_equal(data, 0) + expected = [1., 3.] + assert_array_equal(np.ma.median(masked_arr, axis=0), + expected) + + def test_nan(self): + for mask in (False, np.zeros(6, dtype=bool)): + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm.mask = mask + + # scalar result + r = np.ma.median(dm, axis=None) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + r = np.ma.median(dm.ravel(), axis=0) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + + r = np.ma.median(dm, axis=0) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [1, np.nan, 3]) + r = np.ma.median(dm, axis=1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + r = np.ma.median(dm, axis=-1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm[:, 2] = np.ma.masked + assert_array_equal(np.ma.median(dm, axis=None), np.nan) + assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) + assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) + + def test_out_nan(self): + o = np.ma.masked_array(np.zeros((4,))) + d = np.ma.masked_array(np.ones((3, 4))) + d[2, 1] = np.nan + d[2, 2] = np.ma.masked + assert_equal(np.ma.median(d, 0, out=o), o) + o = np.ma.masked_array(np.zeros((3,))) + assert_equal(np.ma.median(d, 1, out=o), o) + o = np.ma.masked_array(np.zeros(())) + assert_equal(np.ma.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.ma.masked_array(np.arange(24, dtype=float)) + a[::3] = np.ma.masked + a[2] = np.nan + assert_array_equal(np.ma.median(a), np.nan) + assert_array_equal(np.ma.median(a, axis=0), np.nan) + + a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) + a.mask = np.arange(a.size) % 2 == 1 + aorig = a.copy() + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_array_equal(np.ma.median(a), np.nan) + assert_(np.isscalar(np.ma.median(a))) + + # axis0 + b = np.ma.median(aorig, axis=0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 0), b) + + # axis1 + b = np.ma.median(aorig, axis=1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 1), b) + + # axis02 + b = np.ma.median(aorig, axis=(0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.ma.median(a, (0, 2)), b) + + def test_ambigous_fill(self): + # 255 is max value, used as filler for sort + a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) + a = np.ma.masked_array(a, mask=a == 3) + assert_array_equal(np.ma.median(a, axis=1), 255) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), 255) + + def test_special(self): + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a), inf) + + a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_array_equal(np.ma.median(a, axis=1), inf) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), inf) + + # no mask + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=0), inf) + assert_equal(np.ma.median(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + a = np.ma.masked_array(a, mask=np.isnan(a)) + if inf > 0: + assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.ma.median(a), 4.5) + else: + assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.ma.median(a), -2.5) + assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) + + for i in range(0, 10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=1), inf) + assert_equal(np.ma.median(a, axis=0), + ([np.nan] * i) + [inf] * j) + + def test_empty(self): + # empty arrays + a = np.ma.masked_array(np.array([], dtype=float)) + with suppress_warnings() as w: + w.record(RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # multiple dimensions + a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) + # no axis + with suppress_warnings() as w: + w.record(RuntimeWarning) + warnings.filterwarnings('always', '', RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) + assert_equal(np.ma.median(a, axis=0), b) + assert_equal(np.ma.median(a, axis=1), b) + + # axis 2 + b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.ma.masked_array(np.arange(7.)) + assert_(type(np.ma.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.ma.median(o.astype(object))), float) + + +class TestCov: + + def setup_method(self): + self.data = array(np.random.rand(12)) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test cov 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_with_missing(self): + # Test cov on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef: + + def setup_method(self): + self.data = array(np.random.rand(12)) + self.data2 = array(np.random.rand(12)) + + def test_ddof(self): + # ddof raises DeprecationWarning + x, y = self.data, self.data2 + expected = np.corrcoef(x) + expected2 = np.corrcoef(x, y) + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof has no or negligible effect on the function + assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) + assert_almost_equal(corrcoef(x, ddof=-1), expected) + assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) + assert_almost_equal(corrcoef(x, ddof=3), expected) + assert_almost_equal(corrcoef(x, y, ddof=3), expected2) + + def test_bias(self): + x, y = self.data, self.data2 + expected = np.corrcoef(x) + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, y, True, False) + assert_warns(DeprecationWarning, corrcoef, x, y, True, True) + assert_warns(DeprecationWarning, corrcoef, x, bias=False) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(x, bias=1), expected) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx), corrcoef(x)) + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], bias=1)) + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], ddof=2)) + + def test_2d_with_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], + control[:-1, :-1]) + + +class TestPolynomial: + # + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + def test_polyfit_with_masked_NaNs(self): + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + + x[0] = np.nan + y[-1,-1] = np.nan + x = x.view(MaskedArray) + y = y.view(MaskedArray) + x[0] = masked + y[-1,-1] = masked + + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps: + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + assert_(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + a = np.arange(24).reshape([2, 3, 4]) + mask = np.zeros([2, 3, 4]) + mask[1, 2, 0] = 1 + a = array(a, mask=mask) + b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], + mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) + ec = zeros((2, 3, 4), dtype=bool) + ec[0, 0, 0] = True + ec[0, 0, 1] = True + ec[0, 2, 3] = True + c = isin(a, b) + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, ec) + #compare results of np.isin to ma.isin + d = np.isin(a, b[~b.mask]) & ~a.mask + assert_array_equal(c, d) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + a = array([], np.uint32, mask=[]) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase: + + def test_atleast_2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + assert_equal(b.mask.shape, b.data.shape) + + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + # Additionally, the atleast functions should accept multiple scalars + # correctly + b = atleast_1d(1.0) + assert_equal(b.shape, (1,)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_1d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1,)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_2d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_3d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + +class TestNDEnumerate: + + def test_ndenumerate_nomasked(self): + ordinary = np.arange(6.).reshape((1, 3, 2)) + empty_mask = np.zeros_like(ordinary, dtype=bool) + with_mask = masked_array(ordinary, mask=empty_mask) + assert_equal(list(np.ndenumerate(ordinary)), + list(ndenumerate(ordinary))) + assert_equal(list(ndenumerate(ordinary)), + list(ndenumerate(with_mask))) + assert_equal(list(ndenumerate(with_mask)), + list(ndenumerate(with_mask, compressed=False))) + + def test_ndenumerate_allmasked(self): + a = masked_all(()) + b = masked_all((100,)) + c = masked_all((2, 3, 4)) + assert_equal(list(ndenumerate(a)), []) + assert_equal(list(ndenumerate(b)), []) + assert_equal(list(ndenumerate(b, compressed=False)), + list(zip(np.ndindex((100,)), 100 * [masked]))) + assert_equal(list(ndenumerate(c)), []) + assert_equal(list(ndenumerate(c, compressed=False)), + list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked]))) + + def test_ndenumerate_mixedmasked(self): + a = masked_array(np.arange(12).reshape((3, 4)), + mask=[[1, 1, 1, 1], + [1, 1, 0, 1], + [0, 0, 0, 0]]) + items = [((1, 2), 6), + ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)] + assert_equal(list(ndenumerate(a)), items) + assert_equal(len(list(ndenumerate(a, compressed=False))), a.size) + for coordinate, value in ndenumerate(a, compressed=False): + assert_equal(a[coordinate], value) + + +class TestStack: + + def test_stack_1d(self): + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = masked_array([9, 8, 7], mask=[1, 0, 0]) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_masks(self): + a = masked_array([0, 1, 2], mask=True) + b = masked_array([9, 8, 7], mask=False) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_nd(self): + # 2D + shp = (3, 2) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) + + # 4D + shp = (3, 2, 4, 5,) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py new file mode 100644 index 00000000..77123c3c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,493 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import numpy as np +import numpy.ma as ma +from numpy import recarray +from numpy.ma import masked, nomask +from numpy.testing import temppath +from numpy.core.records import ( + fromrecords as recfromrecords, fromarrays as recfromarrays + ) +from numpy.ma.mrecords import ( + MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, + addfield + ) +from numpy.ma.testutils import ( + assert_, assert_equal, + assert_equal_records, + ) +from numpy.compat import pickle + + +class TestMRecords: + + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_((mbase_last['a'] is masked)) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1]*5) + assert_equal(ma.getmaskarray(mbase['a']), [0]*5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1]*5) + assert_equal(mbase.c.recordmask, [1]*5) + assert_equal(ma.getmaskarray(mbase['c']), [1]*5) + assert_equal(ma.getdata(mbase['c']), [b'N/A']*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1]*5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)]*5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0]*5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)]*5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitialize and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'5', b'5', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'5', b'five']) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + assert_(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + assert_(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + assert_(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + _ = pickle.dumps(mrec, protocol=proto) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, b'two'), + (None, None, b'three')]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, b'1', 1.)) + + solo = mrecarray(1, dtype=[('f0', ' 1: + assert_(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.prod(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1._mask is m) + + y1a = array(y1, copy=0) + # For copy=False, one might expect that the array would just + # passed on, i.e., that it would be "is" instead of "==". + # See gh-4043 for discussion. + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2._mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a._mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x._mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x._mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + def test_testMaPut(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=np.float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minimum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum.reduce(x) == 0) + assert_(maximum.reduce(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(np.float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + s = pickle.dumps(x, protocol=proto) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_(eq(2.0, result)) + assert_(wts == 4.0) + ott[:] = masked + assert_(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=True) + assert_(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + assert_(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0)*2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + assert_(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + assert_(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + assert_(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + assert_(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + #TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + assert_equal(a.ndim, 1) + + def test_testAPI(self): + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_assignment_by_condition(self): + # Test for gh-18951 + a = array([1, 2, 3, 4], mask=[1, 0, 1, 0]) + c = a >= 3 + a[c] = 5 + assert_(a[2] is masked) + + def test_assignment_by_condition_2(self): + # gh-19721 + a = masked_array([0, 1], mask=[False, False]) + b = masked_array([0, 1], mask=[True, True]) + mask = a < 1 + b[mask] = a[mask] + expected_mask = [False, True] + assert_equal(b.mask, expected_mask) + + +class TestUfuncs: + def setup_method(self): + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + 'floor', 'ceil', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self.d[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self.d[0] + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + assert_(eq(nonzero(x), [0])) + + +class TestArrayMethods: + + def setup_method(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + self.d = (x, X, XX, m, mx, mX, mXX) + + def test_trace(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + clipped = mx.clip(2, 8) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), mx.compressed().ptp()) + rows = np.zeros(n, np.float_) + cols = np.zeros(m, np.float_) + for k in range(m): + cols[k] = mX[:, k].compressed().ptp() + for k in range(n): + rows[k] = mX[k].compressed().ptp() + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXswapped = mX.swapaxes(0, 1) + assert_(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumprod(0) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumsum(0) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_regression.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_regression.py new file mode 100644 index 00000000..f4f32cc7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_regression.py @@ -0,0 +1,97 @@ +import numpy as np +from numpy.testing import ( + assert_, assert_array_equal, assert_allclose, suppress_warnings + ) + + +class TestRegression: + def test_masked_array_create(self): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self): + # Ticket #62 + from numpy.ma import masked_where, MaskType + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a-c + + def test_masked_array_multiply(self): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a*b + b*a + + def test_masked_array_repeat(self): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array("Unicode")) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_ddof_corrcoef(self): + # See gh-3336 + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) + # this test can be removed after deprecation. + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + r0 = np.ma.corrcoef(x, y, ddof=0) + r1 = np.ma.corrcoef(x, y, ddof=1) + # ddof should not have an effect (it gets cancelled out) + assert_allclose(r0.data, r1.data) + + def test_mask_not_backmangled(self): + # See gh-10314. Test case taken from gh-3140. + a = np.ma.MaskedArray([1., 2.], mask=[False, False]) + assert_(a.mask.shape == (2,)) + b = np.tile(a, (2, 1)) + # Check that the above no longer changes a.shape to (1, 2) + assert_(a.mask.shape == (2,)) + assert_(b.shape == (2, 2)) + assert_(b.mask.shape == (2, 2)) + + def test_empty_list_on_structured(self): + # See gh-12464. Indexing with empty list should give empty result. + ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') + assert_array_equal(ma[[]], ma[:0]) + + def test_masked_array_tobytes_fortran(self): + ma = np.ma.arange(4).reshape((2,2)) + assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) + + def test_structured_array(self): + # see gh-22041 + np.ma.array((1, (b"", b"")), + dtype=[("x", np.int_), + ("y", [("i", np.void), ("j", np.void)])]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py new file mode 100644 index 00000000..e3c88525 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,460 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ + +""" +import numpy as np +from numpy.lib.mixins import NDArrayOperatorsMixin +from numpy.testing import assert_, assert_raises +from numpy.ma.testutils import assert_equal +from numpy.ma.core import ( + array, arange, masked, MaskedArray, masked_array, log, add, hypot, + divide, asarray, asanyarray, nomask + ) +# from numpy.ma.core import ( + +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls,arr,info={}): + x = np.asanyarray(arr).view(cls) + x.info = info.copy() + return x + + def __array_finalize__(self, obj): + super().__array_finalize__(obj) + self.info = getattr(obj, 'info', {}).copy() + return + + def __add__(self, other): + result = super().__add__(other) + result.info['added'] = result.info.get('added', 0) + 1 + return result + + def __iadd__(self, other): + result = super().__iadd__(other) + result.info['iadded'] = result.info.get('iadded', 0) + 1 + return result + + +subarray = SubArray + + +class SubMaskedArray(MaskedArray): + """Pure subclass of MaskedArray, keeping some info on subclass.""" + def __new__(cls, info=None, **kwargs): + obj = super().__new__(cls, **kwargs) + obj._optinfo['info'] = info + return obj + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + +msubarray = MSubArray + + +# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +# and overrides __array_wrap__, updating the info dict, to check that this +# doesn't get destroyed by MaskedArray._update_from. But this one also needs +# its own iterator... +class CSAIterator: + """ + Flat iterator object that uses its own setter/getter + (works around ndarray.flat not propagating subclass setters/getters + see https://github.com/numpy/numpy/issues/4564) + roughly following MaskedIterator + """ + def __init__(self, a): + self._original = a + self._dataiter = a.view(np.ndarray).flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + out = self._dataiter.__getitem__(indx) + if not isinstance(out, np.ndarray): + out = out.__array__() + out = out.view(type(self._original)) + return out + + def __setitem__(self, index, value): + self._dataiter[index] = self._original._validate_input(value) + + def __next__(self): + return next(self._dataiter).__array__().view(type(self._original)) + + +class ComplicatedSubArray(SubArray): + + def __str__(self): + return f'myprefix {self.view(SubArray)} mypostfix' + + def __repr__(self): + # Return a repr that does not start with 'name(' + return f'<{self.__class__.__name__} {self}>' + + def _validate_input(self, value): + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + return value + + def __setitem__(self, item, value): + # validation ensures direct assignment with ndarray or + # masked_print_option will fail + super().__setitem__(item, self._validate_input(value)) + + def __getitem__(self, item): + # ensure getter returns our own class also for scalars + value = super().__getitem__(item) + if not isinstance(value, np.ndarray): # scalar + value = value.__array__().view(ComplicatedSubArray) + return value + + @property + def flat(self): + return CSAIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + def __array_wrap__(self, obj, context=None): + obj = super().__array_wrap__(obj, context) + if context is not None and context[0] is np.multiply: + obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 + + return obj + + +class WrappedArray(NDArrayOperatorsMixin): + """ + Wrapping a MaskedArray rather than subclassing to test that + ufunc deferrals are commutative. + See: https://github.com/numpy/numpy/issues/15200) + """ + __slots__ = ('_array', 'attrs') + __array_priority__ = 20 + + def __init__(self, array, **attrs): + self._array = array + self.attrs = attrs + + def __repr__(self): + return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)" + + def __array__(self): + return np.asarray(self._array) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == '__call__': + inputs = [arg._array if isinstance(arg, self.__class__) else arg + for arg in inputs] + return self.__class__(ufunc(*inputs, **kwargs), **self.attrs) + else: + return NotImplemented + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + assert_(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + assert_(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, subarray)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), msubarray)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0]+[1]*4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my+1) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym+1) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) + # Test that inplace methods from data get used (gh-4617) + ym += 1 + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name':'x'}) + mxsub = masked_array(xsub) + assert_(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = [(i, j) for (i, j) in zip(x, m)] + xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) + # + mxsub = masked_array(xsub, subok=False) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_items(self): + """test that getter and setter go via baseclass""" + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + # getter should return a ComplicatedSubArray, even for single item + # first check we wrote ComplicatedSubArray correctly + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1,...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) + + # now that it propagates inside the MaskedArray + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + + # also for flattened version (which goes via MaskedIterator) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + + # setter should only work with ComplicatedSubArray input + # first check we wrote ComplicatedSubArray correctly + assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) + # now that it propagates inside the MaskedArray + assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) + mxcsub[1] = xcsub[4] + mxcsub[1:4] = xcsub[1:4] + # also for flattened version (which goes via MaskedIterator) + assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) + mxcsub.flat[1] = xcsub[4] + mxcsub.flat[1:4] = xcsub[1:4] + + def test_subclass_nomask_items(self): + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub_nomask = masked_array(xcsub) + + assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray)) + + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + assert_startswith(repr(mx), 'masked_array') + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_startswith(repr(mxsub), + f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_equal(str(mxsub), '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') + + def test_pure_subclass_info_preservation(self): + # Test that ufuncs and methods conserve extra information consistently; + # see gh-7122. + arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) + arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) + diff1 = np.subtract(arr1, arr2) + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') + diff2 = arr1 - arr2 + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') + + +class ArrayNoInheritance: + """Quantity-like class that does not inherit from ndarray""" + def __init__(self, data, units): + self.magnitude = data + self.units = units + + def __getattr__(self, attr): + return getattr(self.magnitude, attr) + + +def test_array_no_inheritance(): + data_masked = np.ma.array([1, 2, 3], mask=[True, False, True]) + data_masked_units = ArrayNoInheritance(data_masked, 'meters') + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test sharing the mask + data_masked.mask = [True, False, False] + assert_equal(data_masked.mask, new_array.mask) + assert_(new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, copy=True) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test that the mask is not shared when copy=True + data_masked.mask = [True, False, True] + assert_equal([True, False, False], new_array.mask) + assert_(not new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, keep_mask=False) + assert_equal(data_masked.data, new_array.data) + # The change did not affect the original mask + assert_equal(data_masked.mask, [True, False, True]) + # Test that the mask is False and not shared when keep_mask=False + assert_(not new_array.mask) + assert_(not new_array.sharedmask) + + +class TestClassWrapping: + # Test suite for classes that wrap MaskedArrays + + def setup_method(self): + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + self.data = (m, wm) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (m, wm) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(np.log(wm), WrappedArray)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (m, wm) = self.data + # Result should be a WrappedArray + assert_(isinstance(np.add(wm, wm), WrappedArray)) + assert_(isinstance(np.add(m, wm), WrappedArray)) + assert_(isinstance(np.add(wm, m), WrappedArray)) + # add and '+' should call the same ufunc + assert_equal(np.add(m, wm), m + wm) + assert_(isinstance(np.hypot(m, wm), WrappedArray)) + assert_(isinstance(np.hypot(wm, m), WrappedArray)) + # Test domained binary operations + assert_(isinstance(np.divide(wm, m), WrappedArray)) + assert_(isinstance(np.divide(m, wm), WrappedArray)) + assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm) + # Test broadcasting + m2 = np.stack([m, m]) + assert_(isinstance(np.divide(wm, m2), WrappedArray)) + assert_(isinstance(np.divide(m2, wm), WrappedArray)) + assert_equal(np.divide(m2, wm), np.divide(wm, m2)) + + def test_mixins_have_slots(self): + mixin = NDArrayOperatorsMixin() + # Should raise an error + assert_raises(AttributeError, mixin.__setattr__, "not_a_real_attr", 1) + + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + assert_raises(AttributeError, wm.__setattr__, "not_an_attr", 2) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/testutils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/testutils.py new file mode 100644 index 00000000..7a633906 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/testutils.py @@ -0,0 +1,288 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ + +""" +import operator + +import numpy as np +from numpy import ndarray, float_ +import numpy.core.umath as umath +import numpy.testing +from numpy.testing import ( + assert_, assert_allclose, assert_array_almost_equal_nulp, + assert_raises, build_err_msg + ) +from .core import mask_or, getmask, masked_array, nomask, masked, filled + +__all__masked = [ + 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', + 'assert_array_approx_equal', 'assert_array_compare', + 'assert_array_equal', 'assert_array_less', 'assert_close', + 'assert_equal', 'assert_equal_records', 'assert_mask_equal', + 'assert_not_equal', 'fail_if_array_equal', + ] + +# Include some normal test functions to avoid breaking other projects who +# have mistakenly included them from this file. SciPy is one. That is +# unfortunate, as some of these functions are not intended to work with +# masked arrays. But there was no way to tell before. +from unittest import TestCase +__some__from_testing = [ + 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', + 'assert_raises' + ] + +__all__ = __all__masked + __some__from_testing + + +def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """ + Returns true if all components of a and b are equal to given tolerances. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. The relative error rtol should + be positive and << 1.0 The absolute error atol comes into play for + those elements of b that are very small or zero; it says how small a + must be also. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """ + Returns True if a and b are equal up to decimal places. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +def _assert_equal_on_sequences(actual, desired, err_msg=''): + """ + Asserts the equality of two non-array sequences. + + """ + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + + +def assert_equal_records(a, b): + """ + Asserts that two records are equal. + + Pretty crude for now. + + """ + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + return + + +def assert_equal(actual, desired, err_msg=''): + """ + Asserts that two items are equal. + + """ + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(f"{k} not in {actual}") + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.asanyarray(actual) + desired = np.asanyarray(desired) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """ + Raises an assertion error if two items are equal. + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Asserts that two items are almost equal. + + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). + + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """ + Asserts that comparison between two masked arrays is satisfied. + + The comparison is elementwise. + + """ + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Checks the elementwise equality of two masked arrays. + + """ + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an assertion error if two masked arrays are not equal elementwise. + + """ + def compare(x, y): + return (not np.all(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Checks that x is smaller than y elementwise. + + """ + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """ + Asserts the equality of two masks. + + """ + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/ma/timer_comparison.py b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/timer_comparison.py new file mode 100644 index 00000000..9eb1a23c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/ma/timer_comparison.py @@ -0,0 +1,443 @@ +import timeit +from functools import reduce + +import numpy as np +from numpy import float_ +import numpy.core.fromnumeric as fromnumeric + +from numpy.testing import build_err_msg + + +pi = np.pi + +class ModuleTester: + def __init__(self, module): + self.module = module + self.allequal = module.allequal + self.arange = module.arange + self.array = module.array + self.concatenate = module.concatenate + self.count = module.count + self.equal = module.equal + self.filled = module.filled + self.getmask = module.getmask + self.getmaskarray = module.getmaskarray + self.id = id + self.inner = module.inner + self.make_mask = module.make_mask + self.masked = module.masked + self.masked_array = module.masked_array + self.masked_values = module.masked_values + self.mask_or = module.mask_or + self.nomask = module.nomask + self.ones = module.ones + self.outer = module.outer + self.repeat = module.repeat + self.resize = module.resize + self.sort = module.sort + self.take = module.take + self.transpose = module.transpose + self.zeros = module.zeros + self.MaskType = module.MaskType + try: + self.umath = module.umath + except AttributeError: + self.umath = module.core.umath + self.testnames = [] + + def assert_array_compare(self, comparison, x, y, err_msg='', header='', + fill_value=True): + """ + Assert that a comparison of two masked arrays is satisfied elementwise. + + """ + xf = self.filled(x) + yf = self.filled(y) + m = self.mask_or(self.getmask(x), self.getmask(y)) + + x = self.filled(self.masked_array(xf, mask=m), fill_value) + y = self.filled(self.masked_array(yf, mask=m), fill_value) + if (x.dtype.char != "O"): + x = x.astype(float_) + if isinstance(x, np.ndarray) and x.size > 1: + x[np.isnan(x)] = 0 + elif np.isnan(x): + x = 0 + if (y.dtype.char != "O"): + y = y.astype(float_) + if isinstance(y, np.ndarray) and y.size > 1: + y[np.isnan(y)] = 0 + elif np.isnan(y): + y = 0 + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + f'\n(shapes {x.shape}, {y.shape} mismatch)', + header=header, + names=('x', 'y')) + assert cond, msg + val = comparison(x, y) + if m is not self.nomask and fill_value: + val = self.masked_array(val, mask=m) + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + header=header, + names=('x', 'y')) + assert cond, msg + except ValueError as e: + msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) + raise ValueError(msg) from e + + def assert_array_equal(self, x, y, err_msg=''): + """ + Checks the elementwise equality of two masked arrays. + + """ + self.assert_array_compare(self.equal, x, y, err_msg=err_msg, + header='Arrays are not equal') + + @np.errstate(all='ignore') + def test_0(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + xm = self.masked_array(x, mask=m) + xm[0] + + @np.errstate(all='ignore') + def test_1(self): + """ + Tests creation + + """ + x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = self.masked_array(x, mask=m1) + ym = self.masked_array(y, mask=m2) + xf = np.where(m1, 1.e+20, x) + xm.set_fill_value(1.e+20) + + assert((xm-ym).filled(0).any()) + s = x.shape + assert(xm.size == reduce(lambda x, y:x*y, s)) + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + + @np.errstate(all='ignore') + def test_2(self): + """ + Tests conversions and indexing. + + """ + x1 = np.array([1, 2, 4, 3]) + x2 = self.array(x1, mask=[1, 0, 0, 0]) + x3 = self.array(x1, mask=[0, 1, 0, 1]) + x4 = self.array(x1) + # test conversion to strings, no errors + str(x2) + repr(x2) + # tests of indexing + assert type(x2[1]) is type(x1[1]) + assert x1[1] == x2[1] + x1[2] = 9 + x2[2] = 9 + self.assert_array_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + x2[1] = self.masked + x2[1:3] = self.masked + x2[:] = x1 + x2[1] = self.masked + x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + x1 = np.arange(5)*1.0 + x2 = self.masked_values(x1, 3.0) + x1 = self.array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + # check that no error occurs. + x1[1] + x2[1] + assert x1[1:1].shape == (0,) + # Tests copy-size + n = [0, 0, 1, 0, 0] + m = self.make_mask(n) + m2 = self.make_mask(m) + assert(m is m2) + m3 = self.make_mask(m, copy=1) + assert(m is not m3) + + @np.errstate(all='ignore') + def test_3(self): + """ + Tests resize/repeat + + """ + x4 = self.arange(4) + x4[2] = self.masked + y4 = self.resize(x4, (8,)) + assert self.allequal(self.concatenate([x4, x4]), y4) + assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) + self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = self.repeat(x4, 2, axis=0) + assert self.allequal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert self.allequal(y5, y7) + y8 = x4.repeat(2, 0) + assert self.allequal(y5, y8) + + @np.errstate(all='ignore') + def test_4(self): + """ + Test of take, transpose, inner, outer products. + + """ + x = self.arange(24) + y = np.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) + assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) + assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), + self.inner(x, y)) + assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), + self.outer(x, y)) + y = self.array(['abc', 1, 'def', 2, 3], object) + y[2] = self.masked + t = self.take(y, [0, 3, 4]) + assert t[0] == 'abc' + assert t[1] == 2 + assert t[2] == 3 + + @np.errstate(all='ignore') + def test_5(self): + """ + Tests inplace w/ scalar + + """ + x = self.arange(10) + y = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x += 1 + assert self.allequal(x, y+1) + xm += 1 + assert self.allequal(xm, y+1) + + x = self.arange(10) + xm = self.arange(10) + xm[2] = self.masked + x -= 1 + assert self.allequal(x, y-1) + xm -= 1 + assert self.allequal(xm, y-1) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x *= 2.0 + assert self.allequal(x, y*2) + xm *= 2.0 + assert self.allequal(xm, y*2) + + x = self.arange(10)*2 + xm = self.arange(10)*2 + xm[2] = self.masked + x /= 2 + assert self.allequal(x, y) + xm /= 2 + assert self.allequal(xm, y) + + x = self.arange(10)*1.0 + xm = self.arange(10)*1.0 + xm[2] = self.masked + x /= 2.0 + assert self.allequal(x, y/2.0) + xm /= self.arange(10) + self.assert_array_equal(xm, self.ones((10,))) + + x = self.arange(10).astype(float_) + xm = self.arange(10) + xm[2] = self.masked + x += 1. + assert self.allequal(x, y + 1.) + + @np.errstate(all='ignore') + def test_6(self): + """ + Tests inplace w/ array + + """ + x = self.arange(10, dtype=float_) + y = self.arange(10) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x += a + xm += a + assert self.allequal(x, y+a) + assert self.allequal(xm, y+a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x -= a + xm -= a + assert self.allequal(x, y-a) + assert self.allequal(xm, y-a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x *= a + xm *= a + assert self.allequal(x, y*a) + assert self.allequal(xm, y*a) + assert self.allequal(xm.mask, self.mask_or(m, a.mask)) + + x = self.arange(10, dtype=float_) + xm = self.arange(10, dtype=float_) + xm[2] = self.masked + m = xm.mask + a = self.arange(10, dtype=float_) + a[-1] = self.masked + x /= a + xm /= a + + @np.errstate(all='ignore') + def test_7(self): + "Tests ufunc" + d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), + self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', +# 'sin', 'cos', 'tan', +# 'arcsin', 'arccos', 'arctan', +# 'sinh', 'cosh', 'tanh', +# 'arcsinh', +# 'arccosh', +# 'arctanh', +# 'absolute', 'fabs', 'negative', +# # 'nonzero', 'around', +# 'floor', 'ceil', +# # 'sometrue', 'alltrue', +# 'logical_not', +# 'add', 'subtract', 'multiply', +# 'divide', 'true_divide', 'floor_divide', +# 'remainder', 'fmod', 'hypot', 'arctan2', +# 'equal', 'not_equal', 'less_equal', 'greater_equal', +# 'less', 'greater', +# 'logical_and', 'logical_or', 'logical_xor', + ]: + try: + uf = getattr(self.umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(self.module, f) + args = d[:uf.nin] + ur = uf(*args) + mr = mf(*args) + self.assert_array_equal(ur.filled(0), mr.filled(0), f) + self.assert_array_equal(ur._mask, mr._mask) + + @np.errstate(all='ignore') + def test_99(self): + # test average + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + self.assert_array_equal(2.0, self.average(ott, axis=0)) + self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) + result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) + self.assert_array_equal(2.0, result) + assert(wts == 4.0) + ott[:] = self.masked + assert(self.average(ott, axis=0) is self.masked) + ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = self.masked + self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) + assert(self.average(ott, axis=1)[0] is self.masked) + self.assert_array_equal([2., 0.], self.average(ott, axis=0)) + result, wts = self.average(ott, axis=0, returned=1) + self.assert_array_equal(wts, [1., 0.]) + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = self.arange(6) + self.assert_array_equal(self.average(x, axis=0), 2.5) + self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) + y = self.array([self.arange(6), 2.0*self.arange(6)]) + self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) + self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) + self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) + self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) + m1 = self.zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = self.ones(6) + m5 = [0, 1, 1, 1, 1, 1] + self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) + self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) + self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) + z = self.masked_array(y, m3) + self.assert_array_equal(self.average(z, None), 20./6.) + self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) + self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) + + @np.errstate(all='ignore') + def test_A(self): + x = self.arange(24) + x[5:6] = self.masked + x = x.reshape(2, 3, 4) + + +if __name__ == '__main__': + setup_base = ("from __main__ import ModuleTester \n" + "import numpy\n" + "tester = ModuleTester(module)\n") + setup_cur = "import numpy.ma.core as module\n" + setup_base + (nrepeat, nloop) = (10, 10) + + for i in range(1, 8): + func = 'tester.test_%i()' % i + cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) + cur = np.sort(cur) + print("#%i" % i + 50*'.') + print(eval("ModuleTester.test_%i.__doc__" % i)) + print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}') diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matlib.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matlib.py new file mode 100644 index 00000000..e929fd9b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matlib.py @@ -0,0 +1,378 @@ +import warnings + +# 2018-05-29, PendingDeprecationWarning added to matrix.__new__ +# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning +warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. " + "The matrix subclass is not the recommended way to represent " + "matrices or deal with linear algebra (see " + "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). " + "Please adjust your code to use regular ndarray. ", + PendingDeprecationWarning, stacklevel=2) + +import numpy as np +from numpy.matrixlib.defmatrix import matrix, asmatrix +# Matlib.py contains all functions in the numpy namespace with a few +# replacements. See doc/source/reference/routines.matlib.rst for details. +# Need * as we're copying the numpy namespace. +from numpy import * # noqa: F403 + +__version__ = np.__version__ + +__all__ = np.__all__[:] # copy numpy namespace +__all__ += ['rand', 'randn', 'repmat'] + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + empty_like, zeros + + Notes + ----- + `empty`, unlike `zeros`, does not set the matrix values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[1., 1., 1.], + [1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[0., 0., 0.], + [0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n,dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1]+n*[0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n,M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + .. versionadded:: 1.14.0 + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.RandomState.rand + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) + >>> np.matlib.rand((2, 3)) + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, numpy.random.RandomState.randn + + Notes + ----- + For random samples from the normal distribution with mean ``mu`` and + standard deviation ``sigma``, use:: + + sigma * np.matlib.randn(...) + mu + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-1.0856306]]) + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) + + Two-by-four matrix of samples from the normal distribution with + mean 3 and standard deviation 2.5: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/__init__.py new file mode 100644 index 00000000..8a7597d3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/__init__.py @@ -0,0 +1,11 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from . import defmatrix +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi new file mode 100644 index 00000000..b0ca8c9c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi @@ -0,0 +1,15 @@ +from numpy._pytesttester import PytestTester + +from numpy import ( + matrix as matrix, +) + +from numpy.matrixlib.defmatrix import ( + bmat as bmat, + mat as mat, + asmatrix as asmatrix, +) + +__all__: list[str] +__path__: list[str] +test: PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py new file mode 100644 index 00000000..d029b13f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1114 @@ +__all__ = ['matrix', 'bmat', 'mat', 'asmatrix'] + +import sys +import warnings +import ast + +from .._utils import set_module +import numpy.core.numeric as N +from numpy.core.numeric import concatenate, isscalar +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + + +def _convert_from_string(data): + for char in '[]': + data = data.replace(char, '') + + rows = data.split(';') + newdata = [] + count = 0 + for row in rows: + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(ast.literal_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + count += 1 + newdata.append(newrow) + return newdata + + +@set_module('numpy') +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + dtype : data-type + Data-type of the output matrix. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + + +@set_module('numpy') +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + .. note:: It is no longer recommended to use this class, even for linear + algebra. Instead use regular arrays. The class may be removed + in the future. + + Returns a matrix from an array-like object, or from a string of data. + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> a = np.matrix('1 2; 3 4') + >>> a + matrix([[1, 2], + [3, 4]]) + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + def __new__(subtype, data, dtype=None, copy=True): + warnings.warn('the matrix subclass is not the recommended way to ' + 'represent matrices or deal with linear algebra (see ' + 'https://docs.scipy.org/doc/numpy/user/' + 'numpy-for-matlab-users.html). ' + 'Please adjust your code to use regular ndarray.', + PendingDeprecationWarning, stacklevel=2) + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: return new.copy() + else: return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = 'C' + if (ndim == 2) and arr.flags.fortran: + order = 'F' + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple([x for x in self.shape if x > 1]) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except Exception: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)) : + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__') : + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis==0: + return self + elif axis==1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[3.], + [7.]]) + >>> out = np.zeros((2, 1), dtype='float') + >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + matrix([[3.], + [7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the axes of length one in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 # may vary + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary + >>> x.var(1) + matrix([[1.25], + [1.25], + [1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]]) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]]) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]]) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.ptp(self, axis, out)._align(axis) + + @property + def I(self): + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], # may vary + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.linalg import inv as func + else: + from numpy.linalg import pinv as func + return asmatrix(func(self)) + + @property + def A(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + @property + def A1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, ..., 9, 10, 11]) + + + """ + return self.__array__().ravel() + + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + @property + def T(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + @property + def H(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + # kept for compatibility + getT = T.fget + getA = A.fget + getA1 = A1.fget + getH = H.fget + getI = I.fget + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError as e: + raise NameError(f"name {col!r} is not defined") from None + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +@set_module('numpy') +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. If a string, variables in the current scope may be + referenced by name. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is None. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + block : + A generalization of this function for N-d arrays, that returns normal + ndarrays. + + Examples + -------- + >>> A = np.mat('1 1; 1 1') + >>> B = np.mat('2 2; 2 2') + >>> C = np.mat('3 4; 5 6') + >>> D = np.mat('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) + +mat = asmatrix diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.pyi new file mode 100644 index 00000000..9d0d1ee5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.pyi @@ -0,0 +1,16 @@ +from collections.abc import Sequence, Mapping +from typing import Any +from numpy import matrix as matrix +from numpy._typing import ArrayLike, DTypeLike, NDArray + +__all__: list[str] + +def bmat( + obj: str | Sequence[ArrayLike] | NDArray[Any], + ldict: None | Mapping[str, Any] = ..., + gdict: None | Mapping[str, Any] = ..., +) -> matrix[Any, Any]: ... + +def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ... + +mat = asmatrix diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/setup.py new file mode 100644 index 00000000..4fed75de --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/setup.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('matrixlib', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_files('*.pyi') + return config + +if __name__ == "__main__": + from numpy.distutils.core import setup + config = configuration(top_path='').todict() + setup(**config) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 00000000..4cb5f3a3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,453 @@ +import collections.abc + +import numpy as np +from numpy import matrix, asmatrix, bmat +from numpy.testing import ( + assert_, assert_equal, assert_almost_equal, assert_array_equal, + assert_array_almost_equal, assert_raises + ) +from numpy.linalg import matrix_power +from numpy.matrixlib import mat + +class TestCtor: + def test_basic(self): + A = np.array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(np.all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(np.all(B.A == D)) + assert_(np.all(C.A == D)) + + E = np.array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(np.all(bmat([A, E]) == AEresult)) + + vec = np.arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for ValueError when called with invalid string data. + assert_raises(ValueError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[5, 6], [7, 8]]) + Aresult = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + mixresult = np.array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(np.all(bmat("A,A;A,A") == Aresult)) + assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B}) + assert_( + np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A}) + assert_(np.all(b2 == mixresult)) + + +class TestProperties: + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + assert_(x.ptp() == 3) + assert_(np.all(x.ptp(0) == np.array([2, 2]))) + assert_(np.all(x.ptp(1) == np.array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(np.allclose(linalg.inv(A), mA.I)) + assert_(np.all(np.array(np.transpose(A) == mA.T))) + assert_(np.all(np.array(np.transpose(A) == mA.H))) + assert_(np.all(A == mA.A)) + + B = A + 2j*A + mB = matrix(B) + assert_(np.allclose(linalg.inv(B), mB.I)) + assert_(np.all(np.array(np.transpose(B) == mB.T))) + assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) + + def test_pinv(self): + x = matrix(np.arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(np.all(mB == A+0.1)) + assert_(np.all(mB == matrix(A+0.1))) + assert_(not np.any(mB == matrix(A-0.1))) + assert_(np.all(mA < mB)) + assert_(np.all(mA <= mB)) + assert_(np.all(mA <= mA)) + assert_(not np.any(mA < mA)) + + assert_(not np.any(mB < mA)) + assert_(np.all(mB >= mA)) + assert_(np.all(mB >= mB)) + assert_(not np.any(mB > mB)) + + assert_(np.all(mA == mA)) + assert_(not np.any(mA == mB)) + assert_(np.all(mB != mA)) + + assert_(not np.all(abs(mA) > 0)) + assert_(np.all(abs(mB > 0))) + + def test_asmatrix(self): + A = np.arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + + def test_make_bool_matrix_from_str(self): + A = matrix('True; True; False') + B = matrix([[True], [True], [False]]) + assert_array_equal(A, B) + +class TestCasting: + def test_basic(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = np.ones((10, 10), np.float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == np.float64) + assert_(np.all(mA != mB)) + assert_(np.all(mB == mA+0.1)) + + mC = mA.copy() + O = np.ones((10, 10), np.complex128) + mC = mC * O + assert_(mC.dtype.type == np.complex128) + assert_(np.all(mA != mB)) + + +class TestAlgebra: + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], [3., 4.]]) + mA = matrix(A) + + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** i).A, B)) + B = np.dot(B, A) + + Ainv = linalg.inv(A) + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** -i).A, B)) + B = np.dot(B, Ainv) + + assert_(np.allclose((mA * mA).A, np.dot(A, A))) + assert_(np.allclose((mA + mA).A, (A + A))) + assert_(np.allclose((3*mA).A, (3*A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(np.allclose(mA2.A, 3*A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + with assert_raises(TypeError): + 1.0**A + + # __mul__ with something not a list, ndarray, tuple, or scalar + with assert_raises(TypeError): + A*object() + + +class TestMatrixReturn: + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', + 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'itemset', + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections.abc.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + if attrib in methodargs: + args = methodargs[attrib] + else: + args = () + b = f(*args) + assert_(type(b) is matrix, "%s" % attrib) + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is np.ndarray) + assert_(type(d) is np.ndarray) + + +class TestIndexing: + def test_basic(self): + x = asmatrix(np.zeros((3, 2), float)) + y = np.zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y > 0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing: + a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = np.array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(np.zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0,:], [[1, 0]]) + assert_array_equal(x[1,:], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, np.array([True, False])], x[:, 0]) + assert_array_equal(x[np.array([True, False, False]),:], x[0,:]) + + def test_list_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0],:], x[::-1,:]) + + +class TestPower: + def test_returntype(self): + a = np.array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is np.ndarray) + a = mat(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + + +class TestShape: + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + def test_expand_dims_matrix(self): + # matrices are always 2d - so expand_dims only makes sense when the + # type is changed away from matrix. + a = np.arange(10).reshape((2, 5)).view(np.matrix) + expanded = np.expand_dims(a, axis=1) + assert_equal(expanded.ndim, 3) + assert_(not isinstance(expanded, np.matrix)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 00000000..5154bd62 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,354 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +import pytest + +import textwrap +import warnings + +import numpy as np +from numpy.testing import (assert_, assert_equal, assert_raises, + assert_raises_regex, assert_array_equal, + assert_almost_equal, assert_array_almost_equal) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3*3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0/3]]) + + +def test_trapz_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = x * x + r = np.trapz(y, x) + mx = np.matrix(x) + my = np.matrix(y) + mr = np.trapz(my, mx) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) + assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix)) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix: + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + with pytest.raises(AssertionError) as exc_info: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + msg = str(exc_info.value) + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + x: array([1, 2]) + y: matrix([[1, 2]])""") + assert_equal(msg, msg_reference) + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 00000000..d0ce357a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,231 @@ +import numpy as np +from numpy.testing import assert_warns +from numpy.ma.testutils import (assert_, assert_equal, assert_raises, + assert_array_equal) +from numpy.ma.core import (masked_array, masked_values, masked, allequal, + MaskType, getmask, MaskedArray, nomask, + log, add, hypot, divide) +from numpy.ma.extras import mr_ +from numpy.compat import pickle + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + return + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +class TestMaskedMatrix: + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool_) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx+x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + with assert_warns(DeprecationWarning): + assert_(isinstance(add.outer(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_matrix_builder(self): + assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) + + def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. + actual = mr_['r', 1, 2, 3] + expected = np.ma.array(np.r_['r', 1, 2, 3]) + assert_array_equal(actual, expected) + + # outer type is masked array, inner type is matrix + assert_equal(type(actual), type(expected)) + assert_equal(type(actual.data), type(expected.data)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 00000000..106c2e38 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,93 @@ +""" Test functions for linalg module using the matrix class.""" +import numpy as np + +from numpy.linalg.tests.test_linalg import ( + LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, + _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, + SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, + PinvCases, DetCases, LstsqCases) + + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.py b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 00000000..638d0d15 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,16 @@ +import numpy as np +from numpy.testing import assert_, assert_equal, assert_array_equal + +class TestView: + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='>> from numpy.polynomial import Chebyshev + >>> c = Chebyshev.fit(xdata, ydata, deg=1) + +is preferred over the `chebyshev.chebfit` function from the +``np.polynomial.chebyshev`` module:: + + >>> from numpy.polynomial.chebyshev import chebfit + >>> c = chebfit(xdata, ydata, deg=1) + +See :doc:`routines.polynomials.classes` for more details. + +Convenience Classes +=================== + +The following lists the various constants and methods common to all of +the classes representing the various kinds of polynomials. In the following, +the term ``Poly`` represents any one of the convenience classes (e.g. +`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.) +while the lowercase ``p`` represents an **instance** of a polynomial class. + +Constants +--------- + +- ``Poly.domain`` -- Default domain +- ``Poly.window`` -- Default window +- ``Poly.basis_name`` -- String used to represent the basis +- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed +- ``Poly.nickname`` -- String used in printing + +Creation +-------- + +Methods for creating polynomial instances. + +- ``Poly.basis(degree)`` -- Basis polynomial of given degree +- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x`` +- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients + determined by the least-squares fit to the data ``x``, ``y`` +- ``Poly.fromroots(roots)`` -- ``p`` with specified roots +- ``p.copy()`` -- Create a copy of ``p`` + +Conversion +---------- + +Methods for converting a polynomial instance of one kind to another. + +- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` +- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map + between ``domain`` and ``window`` + +Calculus +-------- +- ``p.deriv()`` -- Take the derivative of ``p`` +- ``p.integ()`` -- Integrate ``p`` + +Validation +---------- +- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match +- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match +- ``Poly.has_sametype(p1, p2)`` -- Check if types match +- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match + +Misc +---- +- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain`` +- ``p.mapparms()`` -- Return the parameters for the linear mapping between + ``domain`` and ``window``. +- ``p.roots()`` -- Return the roots of `p`. +- ``p.trim()`` -- Remove trailing coefficients. +- ``p.cutdeg(degree)`` -- Truncate p to given degree +- ``p.truncate(size)`` -- Truncate p to given size + +""" +from .polynomial import Polynomial +from .chebyshev import Chebyshev +from .legendre import Legendre +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre + +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + + +def set_default_printstyle(style): + """ + Set the default format for the string representation of polynomials. + + Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii' + or 'unicode'. + + Parameters + ---------- + style : str + Format string for default printing style. Must be either 'ascii' or + 'unicode'. + + Notes + ----- + The default format depends on the platform: 'unicode' is used on + Unix-based systems and 'ascii' on Windows. This determination is based on + default font support for the unicode superscript and subscript ranges. + + Examples + -------- + >>> p = np.polynomial.Polynomial([1, 2, 3]) + >>> c = np.polynomial.Chebyshev([1, 2, 3]) + >>> np.polynomial.set_default_printstyle('unicode') + >>> print(p) + 1.0 + 2.0·x + 3.0·x² + >>> print(c) + 1.0 + 2.0·T₁(x) + 3.0·T₂(x) + >>> np.polynomial.set_default_printstyle('ascii') + >>> print(p) + 1.0 + 2.0 x + 3.0 x**2 + >>> print(c) + 1.0 + 2.0 T_1(x) + 3.0 T_2(x) + >>> # Formatting supersedes all class/package-level defaults + >>> print(f"{p:unicode}") + 1.0 + 2.0·x + 3.0·x² + """ + if style not in ('unicode', 'ascii'): + raise ValueError( + f"Unsupported format string '{style}'. Valid options are 'ascii' " + f"and 'unicode'" + ) + _use_unicode = True + if style == 'ascii': + _use_unicode = False + from ._polybase import ABCPolyBase + ABCPolyBase._use_unicode = _use_unicode + + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi new file mode 100644 index 00000000..c9d1c27a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi @@ -0,0 +1,22 @@ +from numpy._pytesttester import PytestTester + +from numpy.polynomial import ( + chebyshev as chebyshev, + hermite as hermite, + hermite_e as hermite_e, + laguerre as laguerre, + legendre as legendre, + polynomial as polynomial, +) +from numpy.polynomial.chebyshev import Chebyshev as Chebyshev +from numpy.polynomial.hermite import Hermite as Hermite +from numpy.polynomial.hermite_e import HermiteE as HermiteE +from numpy.polynomial.laguerre import Laguerre as Laguerre +from numpy.polynomial.legendre import Legendre as Legendre +from numpy.polynomial.polynomial import Polynomial as Polynomial + +__all__: list[str] +__path__: list[str] +test: PytestTester + +def set_default_printstyle(style): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/_polybase.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/_polybase.py new file mode 100644 index 00000000..9730574c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/_polybase.py @@ -0,0 +1,1206 @@ +""" +Abstract base class for the various polynomial Classes. + +The ABCPolyBase class provides the methods needed to implement the common API +for the various polynomial classes. It operates as a mixin, but uses the +abc module from the stdlib, hence it is only available for Python >= 2.6. + +""" +import os +import abc +import numbers + +import numpy as np +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(abc.ABC): + """An abstract base class for immutable series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + symbol : str + Symbol representing the independent variable. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + + # Not hashable + __hash__ = None + + # Opt out of numpy ufuncs and Python ops with ndarray subclasses. + __array_ufunc__ = None + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + # Unicode character mappings for improved __str__ + _superscript_mapping = str.maketrans({ + "0": "⁰", + "1": "¹", + "2": "²", + "3": "³", + "4": "⁴", + "5": "⁵", + "6": "⁶", + "7": "⁷", + "8": "⁸", + "9": "⁹" + }) + _subscript_mapping = str.maketrans({ + "0": "₀", + "1": "₁", + "2": "₂", + "3": "₃", + "4": "₄", + "5": "₅", + "6": "₆", + "7": "₇", + "8": "₈", + "9": "₉" + }) + # Some fonts don't support full unicode character ranges necessary for + # the full set of superscripts and subscripts, including common/default + # fonts in Windows shells/terminals. Therefore, default to ascii-only + # printing on windows. + _use_unicode = not os.name == 'nt' + + @property + def symbol(self): + return self._symbol + + @property + @abc.abstractmethod + def domain(self): + pass + + @property + @abc.abstractmethod + def window(self): + pass + + @property + @abc.abstractmethod + def basis_name(self): + pass + + @staticmethod + @abc.abstractmethod + def _add(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _sub(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _mul(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _div(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _pow(c, pow, maxpower=None): + pass + + @staticmethod + @abc.abstractmethod + def _val(x, c): + pass + + @staticmethod + @abc.abstractmethod + def _int(c, m, k, lbnd, scl): + pass + + @staticmethod + @abc.abstractmethod + def _der(c, m, scl): + pass + + @staticmethod + @abc.abstractmethod + def _fit(x, y, deg, rcond, full): + pass + + @staticmethod + @abc.abstractmethod + def _line(off, scl): + pass + + @staticmethod + @abc.abstractmethod + def _roots(c): + pass + + @staticmethod + @abc.abstractmethod + def _fromroots(r): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + if len(self.coef) != len(other.coef): + return False + elif not np.all(self.coef == other.coef): + return False + else: + return True + + def has_samedomain(self, other): + """Check if domains match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + .. versionadded:: 1.6.0 + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + .. versionadded:: 1.9.0 + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + elif self.symbol != other.symbol: + raise ValueError("Polynomial symbols differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None, symbol='x'): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + # Validation for symbol + try: + if not symbol.isidentifier(): + raise ValueError( + "Symbol string must be a valid Python identifier" + ) + # If a user passes in something other than a string, the above + # results in an AttributeError. Catch this and raise a more + # informative exception + except AttributeError: + raise TypeError("Symbol must be a non-empty string") + + self._symbol = symbol + + def __repr__(self): + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return (f"{name}({coef}, domain={domain}, window={window}, " + f"symbol='{self.symbol}')") + + def __format__(self, fmt_str): + if fmt_str == '': + return self.__str__() + if fmt_str not in ('ascii', 'unicode'): + raise ValueError( + f"Unsupported format string '{fmt_str}' passed to " + f"{self.__class__}.__format__. Valid options are " + f"'ascii' and 'unicode'" + ) + if fmt_str == 'ascii': + return self._generate_string(self._str_term_ascii) + return self._generate_string(self._str_term_unicode) + + def __str__(self): + if self._use_unicode: + return self._generate_string(self._str_term_unicode) + return self._generate_string(self._str_term_ascii) + + def _generate_string(self, term_method): + """ + Generate the full string representation of the polynomial, using + ``term_method`` to generate each polynomial term. + """ + # Get configuration for line breaks + linewidth = np.get_printoptions().get('linewidth', 75) + if linewidth < 1: + linewidth = 1 + out = pu.format_float(self.coef[0]) + for i, coef in enumerate(self.coef[1:]): + out += " " + power = str(i + 1) + # Polynomial coefficient + # The coefficient array can be an object array with elements that + # will raise a TypeError with >= 0 (e.g. strings or Python + # complex). In this case, represent the coefficient as-is. + try: + if coef >= 0: + next_term = f"+ " + pu.format_float(coef, parens=True) + else: + next_term = f"- " + pu.format_float(-coef, parens=True) + except TypeError: + next_term = f"+ {coef}" + # Polynomial term + next_term += term_method(power, self.symbol) + # Length of the current line with next term added + line_len = len(out.split('\n')[-1]) + len(next_term) + # If not the last term in the polynomial, it will be two + # characters longer due to the +/- with the next term + if i < len(self.coef[1:]) - 1: + line_len += 2 + # Handle linebreaking + if line_len >= linewidth: + next_term = next_term.replace(" ", "\n", 1) + out += next_term + return out + + @classmethod + def _str_term_unicode(cls, i, arg_str): + """ + String representation of single polynomial term using unicode + characters for superscripts and subscripts. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_unicode(cls, i, arg_str)" + ) + return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}" + f"({arg_str})") + + @classmethod + def _str_term_ascii(cls, i, arg_str): + """ + String representation of a single polynomial term using ** and _ to + represent superscripts and subscripts, respectively. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_ascii(cls, i, arg_str)" + ) + return f" {cls.basis_name}_{i}({arg_str})" + + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})" + + @staticmethod + def _repr_latex_scalar(x, parens=False): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return r'\text{{{}}}'.format(pu.format_float(x, parens=parens)) + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + if off == 0 and scale == 1: + term = self.symbol + needs_parens = False + elif scale == 1: + term = f"{self._repr_latex_scalar(off)} + {self.symbol}" + needs_parens = True + elif off == 0: + term = f"{self._repr_latex_scalar(scale)}{self.symbol}" + needs_parens = True + else: + term = ( + f"{self._repr_latex_scalar(off)} + " + f"{self._repr_latex_scalar(scale)}{self.symbol}" + ) + needs_parens = True + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = f"{self._repr_latex_scalar(c)}" + elif not isinstance(c, numbers.Real): + coef_str = f" + ({self._repr_latex_scalar(c)})" + elif not np.signbit(c): + coef_str = f" + {self._repr_latex_scalar(c, parens=True)}" + else: + coef_str = f" - {self._repr_latex_scalar(-c, parens=True)}" + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = rf"{coef_str}\,{term_str}" + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return rf"${self.symbol} \mapsto {body}$" + + + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + ret['symbol'] = self.symbol + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + off, scl = pu.mapparms(self.domain, self.window) + arg = off + scl*arg + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__( + -self.coef, self.domain, self.window, self.symbol + ) + + def __pos__(self): + return self + + def __add__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._add(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __sub__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._sub(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __mul__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._mul(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, numbers.Number) or isinstance(other, bool): + raise TypeError( + f"unsupported types for true division: " + f"'{type(self)}', '{type(other)}'" + ) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + othercoef = self._get_coefficients(other) + try: + quo, rem = self._div(self.coef, othercoef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window, self.symbol) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rdiv__(self, other): + # set to __floordiv__ /. + return self.__rfloordiv__(other) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef) and + (self.symbol == other.symbol)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window, self.symbol) + + def degree(self): + """The degree of the series. + + .. versionadded:: 1.5.0 + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + Examples + -------- + + Create a polynomial object for ``1 + 7*x + 4*x**2``: + + >>> poly = np.polynomial.Polynomial([1, 7, 4]) + >>> print(poly) + 1.0 + 7.0·x + 4.0·x² + >>> poly.degree() + 2 + + Note that this method does not check for non-zero coefficients. + You must trim the polynomial to remove any trailing zeroes: + + >>> poly = np.polynomial.Polynomial([1, 7, 0]) + >>> print(poly) + 1.0 + 7.0·x + 0.0·x² + >>> poly.degree() + 2 + >>> poly.trim().degree() + 1 + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + New instance of series with trimmed coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window, self.symbol) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window, symbol=self.symbol)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl*lbnd + coef = self._int(self.coef, m, k, lbnd, 1./scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + Find the derivative of order `m`. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decreases the further outside the `domain` they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None, symbol='x'): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) + y-coordinates of the M sample points ``(x[i], y[i])``. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is len(x)*eps, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have + the same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. + + [resid, rank, sv, rcond] : list + These values are only returned if ``full == True`` + + - resid -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - sv -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return ( + cls(coef, domain=domain, window=window, symbol=symbol), status + ) + else: + coef = res + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def fromroots(cls, roots, domain=[], window=None, symbol='x'): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif type(domain) is list and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl*roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def identity(cls, domain=None, window=None, symbol='x'): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window, symbol) + + @classmethod + def basis(cls, deg, domain=None, window=None, symbol='x'): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0]*ideg + [1], domain, window, symbol) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi new file mode 100644 index 00000000..25c740db --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi @@ -0,0 +1,71 @@ +import abc +from typing import Any, ClassVar + +__all__: list[str] + +class ABCPolyBase(abc.ABC): + __hash__: ClassVar[None] # type: ignore[assignment] + __array_ufunc__: ClassVar[None] + maxpower: ClassVar[int] + coef: Any + @property + def symbol(self) -> str: ... + @property + @abc.abstractmethod + def domain(self): ... + @property + @abc.abstractmethod + def window(self): ... + @property + @abc.abstractmethod + def basis_name(self): ... + def has_samecoef(self, other): ... + def has_samedomain(self, other): ... + def has_samewindow(self, other): ... + def has_sametype(self, other): ... + def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None: ... + def __format__(self, fmt_str): ... + def __call__(self, arg): ... + def __iter__(self): ... + def __len__(self): ... + def __neg__(self): ... + def __pos__(self): ... + def __add__(self, other): ... + def __sub__(self, other): ... + def __mul__(self, other): ... + def __truediv__(self, other): ... + def __floordiv__(self, other): ... + def __mod__(self, other): ... + def __divmod__(self, other): ... + def __pow__(self, other): ... + def __radd__(self, other): ... + def __rsub__(self, other): ... + def __rmul__(self, other): ... + def __rdiv__(self, other): ... + def __rtruediv__(self, other): ... + def __rfloordiv__(self, other): ... + def __rmod__(self, other): ... + def __rdivmod__(self, other): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def copy(self): ... + def degree(self): ... + def cutdeg(self, deg): ... + def trim(self, tol=...): ... + def truncate(self, size): ... + def convert(self, domain=..., kind=..., window=...): ... + def mapparms(self): ... + def integ(self, m=..., k = ..., lbnd=...): ... + def deriv(self, m=...): ... + def roots(self): ... + def linspace(self, n=..., domain=...): ... + @classmethod + def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ... + @classmethod + def fromroots(cls, roots, domain = ..., window=...): ... + @classmethod + def identity(cls, domain=..., window=...): ... + @classmethod + def basis(cls, deg, domain=..., window=...): ... + @classmethod + def cast(cls, series, domain=..., window=...): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py new file mode 100644 index 00000000..efbe13e0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py @@ -0,0 +1,2082 @@ +""" +==================================================== +Chebyshev Series (:mod:`numpy.polynomial.chebyshev`) +==================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- + +.. autosummary:: + :toctree: generated/ + + Chebyshev + + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + chebdomain + chebzero + chebone + chebx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + chebadd + chebsub + chebmulx + chebmul + chebdiv + chebpow + chebval + chebval2d + chebval3d + chebgrid2d + chebgrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + chebder + chebint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + chebfromroots + chebroots + chebvander + chebvander2d + chebvander3d + chebgauss + chebweight + chebcompanion + chebfit + chebpts1 + chebpts2 + chebtrim + chebline + cheb2poly + poly2cheb + chebinterpolate + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math:: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math:: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight', 'chebinterpolate'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Convert Chebyshev series to z-series. + + Convert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2*n-1, dtype=c.dtype) + zs[n-1:] = c/2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Convert z-series to a Chebyshev series. + + Convert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1)//2 + c = zs[n-1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + lc1 = len(z1) + lc2 = len(z2) + if lc2 == 1: + z1 /= z2 + return z1, z1[:1]*0 + elif lc1 < lc2: + return z1[:1]*0, z1 + else: + dlen = lc1 - lc2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r*z2 + z1[i:i+lc2] -= tmp + z1[j:j+lc2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r*z2 + z1[i:i+lc2] -= tmp + quo /= scl + rem = z1[i+1:i-1+lc2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n+1)*2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs)//2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n+1)*2 + zs[:n] /= div[:n] + zs[n+1:] /= div[n+1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.]) + >>> P.chebyshev.poly2cheb(range(4)) + array([1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.]) + >>> P.chebyshev.cheb2poly(range(4)) + array([-2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1, 1]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([1.5+0.j, 0. +0.j, 0.5+0.j]) + + """ + return pu._fromroots(chebline, chebmul, roots) + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([1. , 2.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:]/2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebmulx, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + # note: this is more efficient than `pu._div(chebmul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebdiv + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) + + """ + # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it + # avoids converting between z and c series repeatedly + + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([14., 12., 24.]) + >>> C.chebder(c,3) + array([96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j)*c[j] + c[j - 2] += (j*c[j])/(j - 2) + if n > 1: + der[1] = 4*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/4 + for j in range(2, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[j - 1] -= c[j]/(2*(j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2*x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1*x2 + return c0 + c1*x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(chebval, c, x, y) + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(chebval, c, x, y) + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(chebval, c, x, y, z) + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(chebval, c, x, y, z) + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x*0 + 1 + if ideg > 0: + x2 = 2*x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x2 - v[i-2] + return np.moveaxis(v, 0, -1) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Chebyshev series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer, + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(chebvander, x, y, deg, rcond, full, w) + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)]*(n-1)) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[0] = np.sqrt(.5) + top[1:] = 1/2 + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = chebcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5*order + + return c + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \\pi / n + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) + w = np.ones(ideg)*(np.pi/ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = 0.5 * np.pi / _npts * np.arange(-_npts+1, _npts+1, 2) + return np.sin(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]`` sorted in ascending + order. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + .. versionadded:: 1.14.0 + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebfromfunction` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + + # Virtual properties + domain = np.array(chebdomain) + window = np.array(chebdomain) + basis_name = 'T' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi new file mode 100644 index 00000000..e8113dba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi @@ -0,0 +1,51 @@ +from typing import Any + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +chebtrim = trimcoef + +def poly2cheb(pol): ... +def cheb2poly(c): ... + +chebdomain: ndarray[Any, dtype[int_]] +chebzero: ndarray[Any, dtype[int_]] +chebone: ndarray[Any, dtype[int_]] +chebx: ndarray[Any, dtype[int_]] + +def chebline(off, scl): ... +def chebfromroots(roots): ... +def chebadd(c1, c2): ... +def chebsub(c1, c2): ... +def chebmulx(c): ... +def chebmul(c1, c2): ... +def chebdiv(c1, c2): ... +def chebpow(c, pow, maxpower=...): ... +def chebder(c, m=..., scl=..., axis=...): ... +def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def chebval(x, c, tensor=...): ... +def chebval2d(x, y, c): ... +def chebgrid2d(x, y, c): ... +def chebval3d(x, y, z, c): ... +def chebgrid3d(x, y, z, c): ... +def chebvander(x, deg): ... +def chebvander2d(x, y, deg): ... +def chebvander3d(x, y, z, deg): ... +def chebfit(x, y, deg, rcond=..., full=..., w=...): ... +def chebcompanion(c): ... +def chebroots(c): ... +def chebinterpolate(func, deg, args = ...): ... +def chebgauss(deg): ... +def chebweight(x): ... +def chebpts1(npts): ... +def chebpts2(npts): ... + +class Chebyshev(ABCPolyBase): + @classmethod + def interpolate(cls, func, deg, domain=..., args = ...): ... + domain: Any + window: Any + basis_name: Any diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite.py new file mode 100644 index 00000000..210df25f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite.py @@ -0,0 +1,1703 @@ +""" +============================================================== +Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`) +============================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Hermite + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermdomain + hermzero + hermone + hermx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermadd + hermsub + hermmulx + hermmul + hermdiv + hermpow + hermval + hermval2d + hermval3d + hermgrid2d + hermgrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermder + hermint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermfromroots + hermroots + hermvander + hermvander2d + hermvander3d + hermgauss + hermweight + hermcompanion + hermfit + hermtrim + hermline + herm2poly + poly2herm + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(2*(i - 1))) + c1 = polyadd(tmp, polymulx(c1)*2) + return polyadd(c0, polymulx(c1)*2) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1/2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl/2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermline, hermmul, roots) + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0]/2 + for i in range(1, len(c)): + prd[i + 1] = c[i]/2 + prd[i - 1] += c[i]*i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermmulx, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1))) + c1 = hermadd(tmp, hermmulx(c1)*2) + return hermadd(c0, hermmulx(c1)*2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(hermmul, c1, c2) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([81., 52., 82., 12., 9.]) + + """ + return pu._pow(hermmul, c, pow, maxpower) + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2*j)*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0]/2 + for j in range(1, n): + tmp[j + 1] = c[j]/(2*(j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate an Hermite series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + x2 = x*2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(2*(nd - 1)) + c1 = tmp + c1*x2 + return c0 + c1*x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermval, c, x, y) + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermval, c, x, y) + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermval, c, x, y, z) + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermval, c, x, y, z) + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + x2 = x*2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1))) + return np.moveaxis(v, 0, -1) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite_e.hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([1.0218, 1.9986, 2.9999]) # may vary + + """ + return pu._fit(hermvander, x, y, deg, rcond, full, w) + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5*c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(.5*np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/(2.0*c[-1]) + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5*c[0]/c[1]]) + + # rotated companion matrix reduces error + m = hermcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(2./nd) + nd = nd - 1.0 + return c0 + c1*x*np.sqrt(2) + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1], dtype=np.float64) + m = hermcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\\exp(-x^2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """An Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Hermite coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + domain = np.array(hermdomain) + window = np.array(hermdomain) + basis_name = 'H' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi new file mode 100644 index 00000000..0d3556d6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi @@ -0,0 +1,46 @@ +from typing import Any + +from numpy import ndarray, dtype, int_, float_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermtrim = trimcoef + +def poly2herm(pol): ... +def herm2poly(c): ... + +hermdomain: ndarray[Any, dtype[int_]] +hermzero: ndarray[Any, dtype[int_]] +hermone: ndarray[Any, dtype[int_]] +hermx: ndarray[Any, dtype[float_]] + +def hermline(off, scl): ... +def hermfromroots(roots): ... +def hermadd(c1, c2): ... +def hermsub(c1, c2): ... +def hermmulx(c): ... +def hermmul(c1, c2): ... +def hermdiv(c1, c2): ... +def hermpow(c, pow, maxpower=...): ... +def hermder(c, m=..., scl=..., axis=...): ... +def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermval(x, c, tensor=...): ... +def hermval2d(x, y, c): ... +def hermgrid2d(x, y, c): ... +def hermval3d(x, y, z, c): ... +def hermgrid3d(x, y, z, c): ... +def hermvander(x, deg): ... +def hermvander2d(x, y, deg): ... +def hermvander3d(x, y, z, deg): ... +def hermfit(x, y, deg, rcond=..., full=..., w=...): ... +def hermcompanion(c): ... +def hermroots(c): ... +def hermgauss(deg): ... +def hermweight(x): ... + +class Hermite(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py new file mode 100644 index 00000000..bdf29405 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py @@ -0,0 +1,1695 @@ +""" +=================================================================== +HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`) +=================================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + HermiteE + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermedomain + hermezero + hermeone + hermex + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermeadd + hermesub + hermemulx + hermemul + hermediv + hermepow + hermeval + hermeval2d + hermeval3d + hermegrid2d + hermegrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermeder + hermeint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermefromroots + hermeroots + hermevander + hermevander2d + hermevander3d + hermegauss + hermeweight + hermecompanion + hermefit + hermetrim + hermeline + herme2poly + poly2herme + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1*(i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1, 1]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.chebyshev.chebfromroots + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermeline, hermemul, roots) + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i]*i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i]*xs, c1*(nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 2.])) + + """ + return pu._div(hermemul, c1, c2) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([23., 28., 46., 12., 9.]) + + """ + return pu._pow(hermemul, c, pow, maxpower) + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + return c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate an HermiteE series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1*(nd - 1) + c1 = tmp + c1*x + return c0 + c1*x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermeval, c, x, y) + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermeval, c, x, y) + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(hermeval, c, x, y, z) + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(hermeval, c, x, y, z) + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x - v[i-2]*(i - 1)) + return np.moveaxis(v, 0, -1) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full = False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.laguerre.lagfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefit, hermeval + >>> x = np.linspace(-10, 10) + >>> np.random.seed(123) + >>> err = np.random.randn(len(x))/10 + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([ 1.01690445, 1.99951418, 2.99948696]) # may vary + + """ + return pu._fit(hermevander, x, y, deg, rcond, full, w) + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl*c[:-1]/c[-1] + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.chebyshev.chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = hermecompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + .. versionadded:: 1.10.0 + + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi))) + + c0 = 0. + c1 = 1./np.sqrt(np.sqrt(2*np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1*np.sqrt((nd - 1.)/nd) + c1 = tmp + c1*x*np.sqrt(1./nd) + nd = nd - 1.0 + return c0 + c1*x + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = hermecompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_e_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1/(fm * fm) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= np.sqrt(2*np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\\exp(-x^2/2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-.5*x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """An HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + HermiteE coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + domain = np.array(hermedomain) + window = np.array(hermedomain) + basis_name = 'He' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi new file mode 100644 index 00000000..0b7152a2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi @@ -0,0 +1,46 @@ +from typing import Any + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +hermetrim = trimcoef + +def poly2herme(pol): ... +def herme2poly(c): ... + +hermedomain: ndarray[Any, dtype[int_]] +hermezero: ndarray[Any, dtype[int_]] +hermeone: ndarray[Any, dtype[int_]] +hermex: ndarray[Any, dtype[int_]] + +def hermeline(off, scl): ... +def hermefromroots(roots): ... +def hermeadd(c1, c2): ... +def hermesub(c1, c2): ... +def hermemulx(c): ... +def hermemul(c1, c2): ... +def hermediv(c1, c2): ... +def hermepow(c, pow, maxpower=...): ... +def hermeder(c, m=..., scl=..., axis=...): ... +def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def hermeval(x, c, tensor=...): ... +def hermeval2d(x, y, c): ... +def hermegrid2d(x, y, c): ... +def hermeval3d(x, y, z, c): ... +def hermegrid3d(x, y, z, c): ... +def hermevander(x, deg): ... +def hermevander2d(x, y, deg): ... +def hermevander3d(x, y, z, deg): ... +def hermefit(x, y, deg, rcond=..., full=..., w=...): ... +def hermecompanion(c): ... +def hermeroots(c): ... +def hermegauss(deg): ... +def hermeweight(x): ... + +class HermiteE(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/laguerre.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/laguerre.py new file mode 100644 index 00000000..925d4898 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/laguerre.py @@ -0,0 +1,1651 @@ +""" +================================================== +Laguerre Series (:mod:`numpy.polynomial.laguerre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Laguerre + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + lagdomain + lagzero + lagone + lagx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + lagadd + lagsub + lagmulx + lagmul + lagdiv + lagpow + lagval + lagval2d + lagval3d + laggrid2d + laggrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + lagder + lagint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + lagfromroots + lagroots + lagvander + lagvander2d + lagvander3d + laggauss + lagweight + lagcompanion + lagfit + lagtrim + lagline + lag2poly + poly2lag + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + res = 0 + for p in pol[::-1]: + res = lagadd(lagmulx(res), p) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i) + return polyadd(c0, polysub(c1, polymulx(c1))) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0, 1]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(lagline, lagmul, roots) + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + + """ + return pu._add(c1, c2) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([-1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i]*(i + 1) + prd[i] += c[i]*(2*i + 1) + prd[i - 1] -= c[i]*i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagmulx, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(lagmul, c1, c2) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + return pu._pow(lagmul, c, pow, maxpower) + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1,2,3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1,2],[3,4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*((2*nd - 1) - x))/nd + return c0 + c1*(1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(lagval, c, x, y) + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(lagval, c, x, y) + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(lagval, c, x, y, z) + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(lagval, c, x, y, z) + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where ``n`` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column *k* of `y` are in column + *k*. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series ``p`` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where ``V`` is the weighted pseudo Vandermonde matrix of `x`, ``c`` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of ``V``. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> err = np.random.randn(len(x))/10 + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([ 0.96971004, 2.00193749, 3.00288744]) # may vary + + """ + return pu._fit(lagvander, x, y, deg, rcond, full, w) + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n+1] + mid = mat.reshape(-1)[0::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = -np.arange(1, n) + mid[...] = 2.*np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1]/c[-1])*n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0]/c[1]]) + + # rotated companion matrix reduces error + m = lagcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` + with the weight function :math:`f(x) = \\exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = lagcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + domain = np.array(lagdomain) + window = np.array(lagdomain) + basis_name = 'L' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi new file mode 100644 index 00000000..e546bc20 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi @@ -0,0 +1,46 @@ +from typing import Any + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +lagtrim = trimcoef + +def poly2lag(pol): ... +def lag2poly(c): ... + +lagdomain: ndarray[Any, dtype[int_]] +lagzero: ndarray[Any, dtype[int_]] +lagone: ndarray[Any, dtype[int_]] +lagx: ndarray[Any, dtype[int_]] + +def lagline(off, scl): ... +def lagfromroots(roots): ... +def lagadd(c1, c2): ... +def lagsub(c1, c2): ... +def lagmulx(c): ... +def lagmul(c1, c2): ... +def lagdiv(c1, c2): ... +def lagpow(c, pow, maxpower=...): ... +def lagder(c, m=..., scl=..., axis=...): ... +def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def lagval(x, c, tensor=...): ... +def lagval2d(x, y, c): ... +def laggrid2d(x, y, c): ... +def lagval3d(x, y, z, c): ... +def laggrid3d(x, y, z, c): ... +def lagvander(x, deg): ... +def lagvander2d(x, y, deg): ... +def lagvander3d(x, y, z, deg): ... +def lagfit(x, y, deg, rcond=..., full=..., w=...): ... +def lagcompanion(c): ... +def lagroots(c): ... +def laggauss(deg): ... +def lagweight(x): ... + +class Laguerre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/legendre.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/legendre.py new file mode 100644 index 00000000..8e9c19d9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/legendre.py @@ -0,0 +1,1664 @@ +""" +================================================== +Legendre Series (:mod:`numpy.polynomial.legendre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Legendre + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain + legzero + legone + legx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legadd + legsub + legmulx + legmul + legdiv + legpow + legval + legval2d + legval3d + leggrid2d + leggrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder + legint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots + legroots + legvander + legvander2d + legvander3d + leggauss + legweight + legcompanion + legfit + legtrim + legline + leg2poly + poly2leg + +See also +-------- +numpy.polynomial + +""" +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Legendre(range(4)) + >>> c + Legendre([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., 1.]) + >>> P.legendre.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polysub, polymulx + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1*(i - 1))/i) + c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i) + return polyadd(c0, polymulx(c1)) + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1, 1]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the `r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary + + """ + return pu._fromroots(legline, legmul, roots) + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + legadd, legmul, legdiv, legpow + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i]*j)/s + prd[k] += (c[i]*i)/s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legmulx, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> L.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0]*xs + c1 = 0 + elif len(c) == 2: + c0 = c[0]*xs + c1 = c[1]*xs + else: + nd = len(c) + c0 = c[-2]*xs + c1 = c[-1]*xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd) + c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmulx, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary + + """ + return pu._div(legmul, c1, c2) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmulx, legmul, legdiv + + """ + return pu._pow(legmul, c, pow, maxpower) + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2*j - 1)*c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3*c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0]*0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1]/3 + for j in range(2, n): + t = c[j]/(2*j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1*(nd - 1))/nd + c1 = tmp + (c1*x*(2*nd - 1))/nd + return c0 + c1*x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(legval, c, x, y) + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in `c[i,j]`. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(legval, c, x, y) + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(legval, c, x, y, z) + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(legval, c, x, y, z) + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i + return np.moveaxis(v, 0, -1) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((legvander, legvander), (x, y), deg) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `RankWarning` will be issued. This means that the + coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(legvander, x, y, deg, rcond, full, w) + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1./np.sqrt(2*np.arange(n) + 1) + top = mat.reshape(-1)[1::n+1] + bot = mat.reshape(-1)[n::n+1] + top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = legcompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + + .. versionadded:: 1.7.0 + + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0]*deg + [1]) + m = legcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy/df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1/(fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1])/2 + x = (x - x[::-1])/2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + w = x*0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + domain = np.array(legdomain) + window = np.array(legdomain) + basis_name = 'P' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi new file mode 100644 index 00000000..63a1c3f3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi @@ -0,0 +1,46 @@ +from typing import Any + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +legtrim = trimcoef + +def poly2leg(pol): ... +def leg2poly(c): ... + +legdomain: ndarray[Any, dtype[int_]] +legzero: ndarray[Any, dtype[int_]] +legone: ndarray[Any, dtype[int_]] +legx: ndarray[Any, dtype[int_]] + +def legline(off, scl): ... +def legfromroots(roots): ... +def legadd(c1, c2): ... +def legsub(c1, c2): ... +def legmulx(c): ... +def legmul(c1, c2): ... +def legdiv(c1, c2): ... +def legpow(c, pow, maxpower=...): ... +def legder(c, m=..., scl=..., axis=...): ... +def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... +def legval(x, c, tensor=...): ... +def legval2d(x, y, c): ... +def leggrid2d(x, y, c): ... +def legval3d(x, y, z, c): ... +def leggrid3d(x, y, z, c): ... +def legvander(x, deg): ... +def legvander2d(x, y, deg): ... +def legvander3d(x, y, z, deg): ... +def legfit(x, y, deg, rcond=..., full=..., w=...): ... +def legcompanion(c): ... +def legroots(c): ... +def leggauss(deg): ... +def legweight(x): ... + +class Legendre(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polynomial.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polynomial.py new file mode 100644 index 00000000..ceadff0b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polynomial.py @@ -0,0 +1,1542 @@ +""" +================================================= +Power Series (:mod:`numpy.polynomial.polynomial`) +================================================= + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Polynomial + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + polydomain + polyzero + polyone + polyx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + polyadd + polysub + polymulx + polymul + polydiv + polypow + polyval + polyval2d + polyval3d + polygrid2d + polygrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + polyder + polyint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + polyfromroots + polyroots + polyvalfromroots + polyvander + polyvander2d + polyvander3d + polycompanion + polyfit + polytrim + polyline + +See Also +-------- +`numpy.polynomial` + +""" +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d'] + +import numpy as np +import numpy.linalg as la +from numpy.core.multiarray import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1, 1]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1,-1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1,-1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the ``r_n`` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form ``(x - r_i)``, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that ``1`` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([1.+0.j, 0.+0.j, 1.+0.j]) + + """ + return pu._fromroots(polyline, polymul, roots) + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> sum = P.polyadd(c1,c2); sum + array([4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + return pu._add(c1, c2) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2,c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + + Notes + ----- + + .. versionadded:: 1.5.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0]*0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polymulx, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polymul(c1,c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> P.polydiv(c1,c2) + (array([3.]), array([-8., -4.])) + >>> P.polydiv(c2,c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + # note: this is more efficient than `pu._div(polymul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + dlen = lc1 - lc2 + scl = c2[-1] + c2 = c2[:-1]/scl + i = dlen + j = lc1 - 1 + while i >= 0: + c1[i:j] -= c2*c1[j] + i -= 1 + j -= 1 + return c1[j+1:]/scl, pu.trimseq(c1[:j+1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polydiv + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1,2,3], 2) + array([ 1., 4., 10., 12., 9.]) + + """ + # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it + # avoids calling `as_series` repeatedly + return pu._pow(np.convolve, c, pow, maxpower) + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3 + >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 + array([ 2., 6., 12.]) + >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 + array([24.]) + >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 + array([ -2., -6., -12.]) + >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1]*0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j*c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + .. versionadded:: 1.7.0 + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1,2,3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([0., 1., 1., 1.]) + >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary + 0.05 ]) + >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) + array([3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + k = list(k) + [0]*(cnt - len(k)) + c = np.moveaxis(c, iaxis, 0) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0]*0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j]/(j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length `n + 1`, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + .. versionadded:: 1.7.0 + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1,2,3]) + array([[ 1., 6.], + [17., 34.]]) + >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1,2], coef, tensor=True) + array([[2., 4.], + [4., 7.]]) + >>> polyval([1,2], coef, tensor=False) + array([2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=False) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,)*x.ndim) + + c0 = c[-1] + x*0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0*x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length `N`, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor` is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + .. versionadded:: 1.12 + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1,2,3]) + 0.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[-0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + """ + r = np.array(r, ndmin=1, copy=False) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,)*x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return np.prod(x - r, axis=0) + + +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points `(x, y)`, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in `c[i,j]`. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(polyval, c, x, y) + + +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(polyval, c, x, y) + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._valnd(polyval, c, x, y, z) + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points `(a, b, c)` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._gridnd(polyval, c, x, y, z) + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where `0 <= i <= deg`. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + """ + ideg = pu._deprecate_as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=False, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x*0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i-1]*x + return np.moveaxis(v, 0, -1) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y)`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, + + where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of + `V` index the points `(x, y)` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + """ + return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading + indices of `V` index the points `(x, y, z)` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + .. versionadded:: 1.5.0 + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if ``full == False``. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', np.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `RankWarning` will be raised. + This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> np.random.seed(123) + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + Gaussian noise + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> np.random.seed(123) + >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 + array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary + >>> stats # note the large SSR, explaining the rather poor results + [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary + 0.28853036]), 1.1324274851176597e-014] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 + array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary + 0.50443316, 0.28853036]), 1.1324274851176597e-014] + + """ + return pu._fit(polyvander, x, y, deg, rcond, full, w) + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Notes + ----- + + .. versionadded:: 1.7.0 + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0]/c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n+1] + bot[...] = 1 + mat[:, -1] -= c[:-1]/c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0]/c[1]]) + + # rotated companion matrix reduces error + m = polycompanion(c)[::-1,::-1] + r = la.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed in the `ABCPolyBase` documentation. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1, 1]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1, 1]. + + .. versionadded:: 1.6.0 + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + domain = np.array(polydomain) + window = np.array(polydomain) + basis_name = None + + @classmethod + def _str_term_unicode(cls, i, arg_str): + if i == '1': + return f"·{arg_str}" + else: + return f"·{arg_str}{i.translate(cls._superscript_mapping)}" + + @staticmethod + def _str_term_ascii(i, arg_str): + if i == '1': + return f" {arg_str}" + else: + return f" {arg_str}**{i}" + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = rf"\left({arg_str}\right)" + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return f"{arg_str}^{{{i}}}" diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi new file mode 100644 index 00000000..3c87f9d2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi @@ -0,0 +1,41 @@ +from typing import Any + +from numpy import ndarray, dtype, int_ +from numpy.polynomial._polybase import ABCPolyBase +from numpy.polynomial.polyutils import trimcoef + +__all__: list[str] + +polytrim = trimcoef + +polydomain: ndarray[Any, dtype[int_]] +polyzero: ndarray[Any, dtype[int_]] +polyone: ndarray[Any, dtype[int_]] +polyx: ndarray[Any, dtype[int_]] + +def polyline(off, scl): ... +def polyfromroots(roots): ... +def polyadd(c1, c2): ... +def polysub(c1, c2): ... +def polymulx(c): ... +def polymul(c1, c2): ... +def polydiv(c1, c2): ... +def polypow(c, pow, maxpower=...): ... +def polyder(c, m=..., scl=..., axis=...): ... +def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ... +def polyval(x, c, tensor=...): ... +def polyvalfromroots(x, r, tensor=...): ... +def polyval2d(x, y, c): ... +def polygrid2d(x, y, c): ... +def polyval3d(x, y, z, c): ... +def polygrid3d(x, y, z, c): ... +def polyvander(x, deg): ... +def polyvander2d(x, y, deg): ... +def polyvander3d(x, y, z, deg): ... +def polyfit(x, y, deg, rcond=..., full=..., w=...): ... +def polyroots(c): ... + +class Polynomial(ABCPolyBase): + domain: Any + window: Any + basis_name: Any diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polyutils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polyutils.py new file mode 100644 index 00000000..48291389 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polyutils.py @@ -0,0 +1,789 @@ +""" +Utility classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Warning objects +--------------- + +.. autosummary:: + :toctree: generated/ + + RankWarning raised in least-squares fit for rank-deficient matrix. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +import operator +import functools +import warnings + +import numpy as np + +from numpy.core.multiarray import dragon4_positional, dragon4_scientific +from numpy.core.umath import absolute + +__all__ = [ + 'RankWarning', 'as_series', 'trimseq', + 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', + 'format_float'] + +# +# Warnings and Exceptions +# + +class RankWarning(UserWarning): + """Issued by chebfit when the design matrix is rank deficient.""" + pass + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. This routine fails for + empty sequences. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i+1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + alist : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> a = np.arange(4) + >>> pu.as_series(a) + [array([0.]), array([1.]), array([2.]), array([3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> pu.as_series(b) + [array([0., 1., 2.]), array([3., 4., 5.])] + + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([1.]), array([0., 1., 2.]), array([0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([2.]), array([1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([2.]), array([1.1, 0. ])] + + """ + arrays = [np.array(a, ndmin=1, copy=False) for a in alist] + if min([a.size for a in arrays]) == 0: + raise ValueError("Coefficient array is empty") + if any(a.ndim != 1 for a in arrays): + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + if any(a.dtype == np.dtype(object) for a in arrays): + ret = [] + for a in arrays: + if a.dtype != np.dtype(object): + tmp = np.empty(len(a), dtype=np.dtype(object)) + tmp[:] = a[:] + ret.append(tmp) + else: + ret.append(a.copy()) + else: + try: + dtype = np.common_type(*arrays) + except Exception as e: + raise ValueError("Coefficient arrays have no common type") from e + ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + See Also + -------- + trimseq + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) + array([0., 0., 3., 0., 5.]) + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([0.]) + >>> i = complex(0,1) # works for complex + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([0.0003+0.j , 0.001 -0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.nonzero(np.abs(c) > tol) + if len(ind) == 0: + return c[:1]*0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> pu.mapparms((1,-1),(-1,1)) + (-0.0, -1.0) + >>> i = complex(0,1) + >>> pu.mapparms((-i,-1),(1,i)) + ((1+1j), (1-0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1]*new[0] - old[0]*new[1])/oldlen + scl = newlen/oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math:: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math:: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary + 6.28318531]) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) + array([0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) + >>> new_z = pu.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary + + """ + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl*x + + +def _nth_slice(i, ndim): + sl = [np.newaxis] * ndim + sl[i] = slice(None) + return tuple(sl) + + +def _vander_nd(vander_fs, points, degrees): + r""" + A generalization of the Vandermonde matrix for N dimensions + + The result is built by combining the results of 1d Vandermonde matrices, + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]} + + where + + .. math:: + N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\ + M &= \texttt{points[k].ndim} \\ + V_k &= \texttt{vander\_fs[k]} \\ + x_k &= \texttt{points[k]} \\ + 0 \le j_k &\le \texttt{degrees[k]} + + Expanding the one-dimensional :math:`V_k` functions gives: + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])} + + where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along + dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`. + + Parameters + ---------- + vander_fs : Sequence[function(array_like, int) -> ndarray] + The 1d vander function to use for each axis, such as ``polyvander`` + points : Sequence[array_like] + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + This must be the same length as `vander_fs`. + degrees : Sequence[int] + The maximum degree (inclusive) to use for each axis. + This must be the same length as `vander_fs`. + + Returns + ------- + vander_nd : ndarray + An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. + """ + n_dims = len(vander_fs) + if n_dims != len(points): + raise ValueError( + f"Expected {n_dims} dimensions of sample points, got {len(points)}") + if n_dims != len(degrees): + raise ValueError( + f"Expected {n_dims} dimensions of degrees, got {len(degrees)}") + if n_dims == 0: + raise ValueError("Unable to guess a dtype or shape when no points are given") + + # convert to the same shape and type + points = tuple(np.array(tuple(points), copy=False) + 0.0) + + # produce the vandermonde matrix for each dimension, placing the last + # axis of each in an independent trailing axis of the output + vander_arrays = ( + vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] + for i in range(n_dims) + ) + + # we checked this wasn't empty already, so no `initial` needed + return functools.reduce(operator.mul, vander_arrays) + + +def _vander_nd_flat(vander_fs, points, degrees): + """ + Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis + + Used to implement the public ``vanderd`` functions. + """ + v = _vander_nd(vander_fs, points, degrees) + return v.reshape(v.shape[:-len(degrees)] + (-1,)) + + +def _fromroots(line_f, mul_f, roots): + """ + Helper function used to implement the ``fromroots`` functions. + + Parameters + ---------- + line_f : function(float, float) -> ndarray + The ``line`` function, such as ``polyline`` + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + roots + See the ``fromroots`` functions for more detail + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = as_series([roots], trim=False) + roots.sort() + p = [line_f(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [mul_f(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = mul_f(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def _valnd(val_f, c, *args): + """ + Helper function used to implement the ``vald`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``vald`` functions for more detail + """ + args = [np.asanyarray(a) for a in args] + shape0 = args[0].shape + if not all((a.shape == shape0 for a in args[1:])): + if len(args) == 3: + raise ValueError('x, y, z are incompatible') + elif len(args) == 2: + raise ValueError('x, y are incompatible') + else: + raise ValueError('ordinates are incompatible') + it = iter(args) + x0 = next(it) + + # use tensor on only the first + c = val_f(x0, c) + for xi in it: + c = val_f(xi, c, tensor=False) + return c + + +def _gridnd(val_f, c, *args): + """ + Helper function used to implement the ``gridd`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``gridd`` functions for more detail + """ + for xi in args: + c = val_f(xi, c) + return c + + +def _div(mul_f, c1, c2): + """ + Helper function used to implement the ``div`` functions. + + Implementation uses repeated subtraction of c2 multiplied by the nth basis. + For some polynomial types, a more efficient approach may be possible. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> array_like + The ``mul`` function, such as ``polymul`` + c1, c2 + See the ``div`` functions for more detail + """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = mul_f([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, trimseq(rem) + + +def _add(c1, c2): + """ Helper function used to implement the ``add`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _sub(c1, c2): + """ Helper function used to implement the ``sub`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): + """ + Helper function used to implement the ``fit`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + c1, c2 + See the ``fit`` functions for more detail + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = vander_f(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = vander_f(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def _pow(mul_f, c, pow, maxpower): + """ + Helper function used to implement the ``pow`` functions. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + c : array_like + 1-D array of array of series coefficients + pow, maxpower + See the ``pow`` functions for more detail + """ + # c is a trimmed copy + [c] = as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = mul_f(prd, c) + return prd + + +def _deprecate_as_int(x, desc): + """ + Like `operator.index`, but emits a deprecation warning when passed a float + + Parameters + ---------- + x : int-like, or float with integral value + Value to interpret as an integer + desc : str + description to include in any error message + + Raises + ------ + TypeError : if x is a non-integral float or non-numeric + DeprecationWarning : if x is an integral float + """ + try: + return operator.index(x) + except TypeError as e: + # Numpy 1.17.0, 2019-03-11 + try: + ix = int(x) + except TypeError: + pass + else: + if ix == x: + warnings.warn( + f"In future, this will raise TypeError, as {desc} will " + "need to be an integer not just an integral float.", + DeprecationWarning, + stacklevel=3 + ) + return ix + + raise TypeError(f"{desc} must be an integer") from e + + +def format_float(x, parens=False): + if not np.issubdtype(type(x), np.floating): + return str(x) + + opts = np.get_printoptions() + + if np.isnan(x): + return opts['nanstr'] + elif np.isinf(x): + return opts['infstr'] + + exp_format = False + if x != 0: + a = absolute(x) + if a >= 1.e8 or a < 10**min(0, -(opts['precision']-1)//2): + exp_format = True + + trim, unique = '0', True + if opts['floatmode'] == 'fixed': + trim, unique = 'k', False + + if exp_format: + s = dragon4_scientific(x, precision=opts['precision'], + unique=unique, trim=trim, + sign=opts['sign'] == '+') + if parens: + s = '(' + s + ')' + else: + s = dragon4_positional(x, precision=opts['precision'], + fractional=True, + unique=unique, trim=trim, + sign=opts['sign'] == '+') + return s diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi new file mode 100644 index 00000000..c0bcc678 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi @@ -0,0 +1,11 @@ +__all__: list[str] + +class RankWarning(UserWarning): ... + +def trimseq(seq): ... +def as_series(alist, trim=...): ... +def trimcoef(c, tol=...): ... +def getdomain(x): ... +def mapparms(old, new): ... +def mapdomain(x, old, new): ... +def format_float(x, parens=...): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/setup.py new file mode 100644 index 00000000..b58e867a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/setup.py @@ -0,0 +1,10 @@ +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('polynomial', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_files('*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 00000000..2f54bebf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,619 @@ +"""Tests for chebyshev module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate: + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1]*i, np.double) + tgt = np.array([.5]*i + [2] + [.5]*i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5]*i + [2] + [.5]*i, np.double) + tgt = np.array([2] + [1]*i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants: + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic: + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c]*j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + #check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = cheb.chebval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_chebint(self): + # check exceptions + assert_raises(TypeError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(TypeError, cheb.chebint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_chebder(self): + # check exceptions + assert_raises(TypeError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_chebfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(0, 10): + for p in range(0, deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = [0]*i + [1] + res = cheb.chebfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + #test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + #test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + #test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.py new file mode 100644 index 00000000..6322062f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,600 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +import operator as op +from numbers import Number + +import pytest +import numpy as np +from numpy.polynomial import ( + Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) +from numpy.polynomial.polyutils import RankWarning + +# +# fixtures +# + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE + ) +classids = tuple(cls.__name__ for cls in classes) + +@pytest.fixture(params=classes, ids=classids) +def Poly(request): + return request.param + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = f"Result: {p1}\nTarget: {p2}" + raise AssertionError(msg) + + +# +# Test conversion methods that depend on combinations of two classes. +# + +Poly1 = Poly +Poly2 = Poly + + +def test_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def test_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,))*.25 + w1 = Poly1.window + random((2,))*.25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,))*.25 + w2 = Poly2.window + random((2,))*.25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# test methods that depend on one class +# + + +def test_identity(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def test_basis(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0]*5 + [1]) + + +def test_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def test_bad_conditioned_fit(Poly): + + x = [0., 0., 1.] + y = [1., 2., 3.] + + # check RankWarning is raised + with pytest.warns(RankWarning) as record: + Poly.fit(x, y, 2) + assert record[0].message.args[0] == "The fit may be poorly conditioned" + + +def test_fit(Poly): + + def f(x): + return x*(x - 1)*(x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape)*.25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def test_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def test_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def test_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def test_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def test_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def test_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5*p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def test_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1,2,3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [tuple(), list(), dict(), bool(), np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def test_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def test_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5*p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def test_roots(Poly): + d = Poly.domain * 1.25 + .25 + w = Poly.window + tgt = np.linspace(d[0], d[1], 5) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def test_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def test_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def test_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1*2, 2*3, 3*4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2*Poly.domain + p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def test_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def test_linspace(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def test_pow(Poly): + d = Poly.domain + random((2,))*.25 + w = Poly.window + random((2,))*.25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def test_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x*(2 + 3*x) + res = p(x) + assert_almost_equal(res, tgt) + + +def test_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def test_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def test_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def test_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2*d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +def test_ufunc_override(Poly): + p = Poly([1, 2, 3]) + x = np.ones(3) + assert_raises(TypeError, np.add, p, x) + assert_raises(TypeError, np.add, x, p) + + +# +# Test class method that only exists for some classes +# + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(0, 10): + for t in range(0, deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 00000000..53ee0844 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,555 @@ +"""Tests for hermite module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c]*j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + #check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herm.hermval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_hermint(self): + # check exceptions + assert_raises(TypeError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(TypeError, herm.hermint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermder(self): + # check exceptions + assert_raises(TypeError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss: + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 00000000..2d262a33 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,556 @@ +"""Tests for hermite_e module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c]*j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + #check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herme.hermeval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_hermeint(self): + # check exceptions + assert_raises(TypeError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(TypeError, herme.hermeint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermeder(self): + # check exceptions + assert_raises(TypeError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermefit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2*np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5*x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 00000000..227ef3c5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,537 @@ +"""Tests for laguerre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1])/1 +L1 = np.array([1, -1])/1 +L2 = np.array([2, -4, 1])/2 +L3 = np.array([6, -18, 9, -1])/6 +L4 = np.array([24, -96, 72, -16, 1])/24 +L5 = np.array([120, -600, 600, -200, 25, -1])/120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants: + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c]*j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + #check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = f"At i={i}" + tgt = y[i] + res = lag.lagval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_lagint(self): + # check exceptions + assert_raises(TypeError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(TypeError, lag.lagint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_lagder(self): + # check exceptions + assert_raises(TypeError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_lagfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss: + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 00000000..92399c16 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,568 @@ +"""Tests for legendre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3])/2 +L3 = np.array([0, -3, 0, 5])/2 +L4 = np.array([3, 0, -30, 0, 35])/8 +L5 = np.array([0, 15, 0, -70, 0, 63])/8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants: + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2*i + 1 + ser = [0]*i + [1] + tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0]*i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0]*j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1*val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1] + cj = [0]*j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c]*j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + #check empty input + assert_equal(leg.legval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = leg.legval(x, [0]*i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_legint(self): + # check exceptions + assert_raises(TypeError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(TypeError, leg.legint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + def test_legint_zerointord(self): + assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3)) + + +class TestDerivative: + + def test_legder(self): + # check exceptions + assert_raises(TypeError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + def test_legder_orderhigherthancoeff(self): + c = (1, 2, 3, 4) + assert_equal(leg.legder(c, 4), [0]) + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_legvander_negdeg(self): + assert_raises(ValueError, leg.legvander, (1, 2, 3), -1) + + +class TestFitting: + + def test_legfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1/np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_legline_zeroscl(self): + assert_equal(leg.legline(3, 0), [3]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 00000000..6b3ef238 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,611 @@ +"""Tests for polynomial module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.polynomial as poly +import pickle +from copy import deepcopy +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + assert_warns, assert_array_equal, assert_raises_regex) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants: + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + def test_copy(self): + x = poly.Polynomial([1, 2, 3]) + y = deepcopy(x) + assert_equal(x, y) + + def test_pickle(self): + x = poly.Polynomial([1, 2, 3]) + y = pickle.loads(pickle.dumps(x)) + assert_equal(x, y) + +class TestArithmetic: + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0]*i + [1] + tgt = [0]*(i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0]*i + [1], [0]*j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0]*i + [1, 2] + cj = [0]*j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c]*j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + #check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + #check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0]*i + [1]) + assert_almost_equal(res, tgt) + tgt = x*(x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + #check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + #check masked arrays are processed correctly + mask = [False, True, False] + mx = np.ma.array([1, 2, 3], mask=mask) + res = np.polyval([7, 5, 3], mx) + assert_array_equal(res.mask, mask) + + #check subtypes of ndarray are preserved + class C(np.ndarray): + pass + + cx = np.array([1, 2, 3]).view(C) + assert_equal(type(np.polyval([2, 3, 4], cx)), C) + + def test_polyvalfromroots(self): + # check exception for broadcasting x values over root array with + # too few dimensions + assert_raises(ValueError, poly.polyvalfromroots, + [1], [1], tensor=False) + + # check empty input + assert_equal(poly.polyvalfromroots([], [1]).size, 0) + assert_(poly.polyvalfromroots([], [1]).shape == (0,)) + + # check empty input + multidimensional roots + assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) + assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) + + # check scalar input + assert_equal(poly.polyvalfromroots(1, 1), 0) + assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(1, 5): + tgt = y[i] + res = poly.polyvalfromroots(x, [0]*i) + assert_almost_equal(res, tgt) + tgt = x*(x - 1)*(x + 1) + res = poly.polyvalfromroots(x, [-1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2]*i + x = np.zeros(dims) + assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) + + # check compatibility with factorization + ptest = [15, 2, -16, -2, 1] + r = poly.polyroots(ptest) + x = np.linspace(-1, 1) + assert_almost_equal(poly.polyval(x, ptest), + poly.polyvalfromroots(x, r)) + + # check multidimensional arrays of roots and values + # check tensor=False + rshape = (3, 5) + x = np.arange(-3, 2) + r = np.random.randint(-5, 5, size=rshape) + res = poly.polyvalfromroots(x, r, tensor=False) + tgt = np.empty(r.shape[1:]) + for ii in range(tgt.size): + tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) + assert_equal(res, tgt) + + # check tensor=True + x = np.vstack([x, 2*x]) + res = poly.polyvalfromroots(x, r, tensor=True) + tgt = np.empty(r.shape[1:] + x.shape) + for ii in range(r.shape[1]): + for jj in range(x.shape[0]): + tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) + assert_equal(res, tgt) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval2d, x1, x2[:2], self.c2d) + + #test values + tgt = y1*y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval3d, x1, x2, x3[:2], self.c3d) + + #test values + tgt = y1*y2*y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3)*2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + #test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + #test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)*3) + + +class TestIntegral: + + def test_polyint(self): + # check exceptions + assert_raises(TypeError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(TypeError, poly.polyint, [0], axis=.5) + with assert_warns(DeprecationWarning): + poly.polyint([1, 1], 1.) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0]*(i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [1/scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0]*i + [1] + tgt = [i] + [0]*i + [2/scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0]*i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_polyder(self): + # check exceptions + assert_raises(TypeError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0]*i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0]*i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5))*2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0]*i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_polyvandernegdeg(self): + x = np.arange(3) + assert_raises(ValueError, poly.polyvander, x, -1) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0]*i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc: + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots)*2**(i-1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyfit(self): + def f(x): + return x*(x - 1)*(x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) + + def test_polyline_zero(self): + assert_equal(poly.polyline(3, 0), [3]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 00000000..cc630790 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,121 @@ +"""Tests for polyutils module. + +""" +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_almost_equal, assert_raises, assert_equal, assert_, + ) + + +class TestMisc: + + def test_trimseq(self): + for i in range(5): + tgt = [1] + res = pu.trimseq([1] + [0]*5) + assert_equal(res, tgt) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + def test_vander_nd_exception(self): + # n_dims != len(points) + assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) + # n_dims != len(degrees) + assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) + # n_dims == 0 + assert_raises(ValueError, pu._vander_nd, (), (), []) + + def test_div_zerodiv(self): + # c2[-1] == 0 + assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) + + def test_pow_too_large(self): + # power > maxpower + assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) + +class TestDomain: + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu.mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + + dom1 = [0, 4] + dom2 = [1, 3] + x = np.array([dom1, dom1]).view(MyNDArray) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, MyNDArray)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.py new file mode 100644 index 00000000..6f2a5092 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,530 @@ +from math import nan, inf +import pytest +from numpy.core import array, arange, printoptions +import numpy.polynomial as poly +from numpy.testing import assert_equal, assert_ + +# For testing polynomial printing with object arrays +from fractions import Fraction +from decimal import Decimal + + +class TestStrUnicodeSuperSubscripts: + + @pytest.fixture(scope='class', autouse=True) + def use_unicode(self): + poly.set_default_printstyle('unicode') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·x + 3.0·x²"), + ([-1, 0, 3, -1], "-1.0 + 0.0·x + 3.0·x² - 1.0·x³"), + (arange(12), ("0.0 + 1.0·x + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + " + "6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + " + "11.0·x¹¹")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"), + (arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + " + "5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + " + "9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"), + (arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + " + "5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + " + "9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"), + (arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + " + "5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + " + "9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"), + (arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + " + "4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + " + "8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n" + "11.0·He₁₁(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"), + (arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + " + "5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + " + "9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + +class TestStrAscii: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 x + 3.0 x**2"), + ([-1, 0, 3, -1], "-1.0 + 0.0 x + 3.0 x**2 - 1.0 x**3"), + (arange(12), ("0.0 + 1.0 x + 2.0 x**2 + 3.0 x**3 + 4.0 x**4 + " + "5.0 x**5 + 6.0 x**6 +\n7.0 x**7 + 8.0 x**8 + " + "9.0 x**9 + 10.0 x**10 + 11.0 x**11")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 T_1(x) + 3.0 T_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 T_1(x) + 3.0 T_2(x) - 1.0 T_3(x)"), + (arange(12), ("0.0 + 1.0 T_1(x) + 2.0 T_2(x) + 3.0 T_3(x) + " + "4.0 T_4(x) + 5.0 T_5(x) +\n6.0 T_6(x) + 7.0 T_7(x) + " + "8.0 T_8(x) + 9.0 T_9(x) + 10.0 T_10(x) +\n" + "11.0 T_11(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 P_1(x) + 3.0 P_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 P_1(x) + 3.0 P_2(x) - 1.0 P_3(x)"), + (arange(12), ("0.0 + 1.0 P_1(x) + 2.0 P_2(x) + 3.0 P_3(x) + " + "4.0 P_4(x) + 5.0 P_5(x) +\n6.0 P_6(x) + 7.0 P_7(x) + " + "8.0 P_8(x) + 9.0 P_9(x) + 10.0 P_10(x) +\n" + "11.0 P_11(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 H_1(x) + 3.0 H_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 H_1(x) + 3.0 H_2(x) - 1.0 H_3(x)"), + (arange(12), ("0.0 + 1.0 H_1(x) + 2.0 H_2(x) + 3.0 H_3(x) + " + "4.0 H_4(x) + 5.0 H_5(x) +\n6.0 H_6(x) + 7.0 H_7(x) + " + "8.0 H_8(x) + 9.0 H_9(x) + 10.0 H_10(x) +\n" + "11.0 H_11(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 He_1(x) + 3.0 He_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 He_1(x) + 3.0 He_2(x) - 1.0 He_3(x)"), + (arange(12), ("0.0 + 1.0 He_1(x) + 2.0 He_2(x) + 3.0 He_3(x) + " + "4.0 He_4(x) +\n5.0 He_5(x) + 6.0 He_6(x) + " + "7.0 He_7(x) + 8.0 He_8(x) + 9.0 He_9(x) +\n" + "10.0 He_10(x) + 11.0 He_11(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 L_1(x) + 3.0 L_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 L_1(x) + 3.0 L_2(x) - 1.0 L_3(x)"), + (arange(12), ("0.0 + 1.0 L_1(x) + 2.0 L_2(x) + 3.0 L_3(x) + " + "4.0 L_4(x) + 5.0 L_5(x) +\n6.0 L_6(x) + 7.0 L_7(x) + " + "8.0 L_8(x) + 9.0 L_9(x) + 10.0 L_10(x) +\n" + "11.0 L_11(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + +class TestLinebreaking: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_single_line_one_less(self): + # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74) + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 123]) + assert_equal(len(str(p)), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 123.0 x**4' + )) + + def test_num_chars_is_linewidth(self): + # len(str(p)) == default linewidth == 75 + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 1234]) + assert_equal(len(str(p)), 75) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 +\n1234.0 x**4' + )) + + def test_first_linebreak_multiline_one_less_than_linewidth(self): + # Multiline str where len(first_line) + len(next_term) == lw - 1 == 74 + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678, 1, 12345678] + ) + assert_equal(len(str(p).split('\n')[0]), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 1.0 x**4 +\n12345678.0 x**5' + )) + + def test_first_linebreak_multiline_on_linewidth(self): + # First line is one character longer than previous test + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678.12, 1, 12345678] + ) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.12 x**3 +\n1.0 x**4 + 12345678.0 x**5' + )) + + @pytest.mark.parametrize(('lw', 'tgt'), ( + (75, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + (45, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n' + '900.0 x**9')), + (132, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + )) + def test_linewidth_printoption(self, lw, tgt): + p = poly.Polynomial( + [0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900] + ) + with printoptions(linewidth=lw): + assert_equal(str(p), tgt) + for line in str(p).split('\n'): + assert_(len(line) < lw) + + +def test_set_default_printoptions(): + p = poly.Polynomial([1, 2, 3]) + c = poly.Chebyshev([1, 2, 3]) + poly.set_default_printstyle('ascii') + assert_equal(str(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)") + poly.set_default_printstyle('unicode') + assert_equal(str(p), "1.0 + 2.0·x + 3.0·x²") + assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)") + with pytest.raises(ValueError): + poly.set_default_printstyle('invalid_input') + + +def test_complex_coefficients(): + """Test both numpy and built-in complex.""" + coefs = [0+1j, 1+1j, -2+2j, 3+0j] + # numpy complex + p1 = poly.Polynomial(coefs) + # Python complex + p2 = poly.Polynomial(array(coefs, dtype=object)) + poly.set_default_printstyle('unicode') + assert_equal(str(p1), "1j + (1+1j)·x - (2-2j)·x² + (3+0j)·x³") + assert_equal(str(p2), "1j + (1+1j)·x + (-2+2j)·x² + (3+0j)·x³") + poly.set_default_printstyle('ascii') + assert_equal(str(p1), "1j + (1+1j) x - (2-2j) x**2 + (3+0j) x**3") + assert_equal(str(p2), "1j + (1+1j) x + (-2+2j) x**2 + (3+0j) x**3") + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([Fraction(1, 2), Fraction(3, 4)], dtype=object), ( + "1/2 + 3/4·x" + )), + (array([1, 2, Fraction(5, 7)], dtype=object), ( + "1 + 2·x + 5/7·x²" + )), + (array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), ( + "1.00 + 2.2·x + 3·x²" + )), +)) +def test_numeric_object_coefficients(coefs, tgt): + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([1, 2, 'f'], dtype=object), '1 + 2·x + f·x²'), + (array([1, 2, [3, 4]], dtype=object), '1 + 2·x + [3, 4]·x²'), +)) +def test_nonnumeric_object_coefficients(coefs, tgt): + """ + Test coef fallback for object arrays of non-numeric coefficients. + """ + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +class TestFormat: + def test_format_unicode(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal(format(p, 'unicode'), "1.0 + 2.0·x + 0.0·x² - 1.0·x³") + + def test_format_ascii(self): + poly.set_default_printstyle('unicode') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal( + format(p, 'ascii'), "1.0 + 2.0 x + 0.0 x**2 - 1.0 x**3" + ) + + def test_empty_formatstr(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 3]) + assert_equal(format(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(f"{p}", "1.0 + 2.0 x + 3.0 x**2") + + def test_bad_formatstr(self): + p = poly.Polynomial([1, 2, 0, -1]) + with pytest.raises(ValueError): + format(p, '.2f') + + +@pytest.mark.parametrize(('poly', 'tgt'), ( + (poly.Polynomial, '1.0 + 2.0·z + 3.0·z²'), + (poly.Chebyshev, '1.0 + 2.0·T₁(z) + 3.0·T₂(z)'), + (poly.Hermite, '1.0 + 2.0·H₁(z) + 3.0·H₂(z)'), + (poly.HermiteE, '1.0 + 2.0·He₁(z) + 3.0·He₂(z)'), + (poly.Laguerre, '1.0 + 2.0·L₁(z) + 3.0·L₂(z)'), + (poly.Legendre, '1.0 + 2.0·P₁(z) + 3.0·P₂(z)'), +)) +def test_symbol(poly, tgt): + p = poly([1, 2, 3], symbol='z') + assert_equal(f"{p:unicode}", tgt) + + +class TestRepr: + def test_polynomial_str(self): + res = repr(poly.Polynomial([0, 1])) + tgt = ( + "Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_chebyshev_str(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = ( + "Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = ( + "Legendre([0., 1.], domain=[-1, 1], window=[-1, 1], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = ( + "Hermite([0., 1.], domain=[-1, 1], window=[-1, 1], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = ( + "HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = ( + "Laguerre([0., 1.], domain=[0, 1], window=[0, 1], " + "symbol='x')" + ) + assert_equal(res, tgt) + + +class TestLatexRepr: + """Test the latex repr used by Jupyter""" + + def as_latex(self, obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x, parens=False: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = poly.Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') + + def test_basis_func(self): + p = poly.Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') + + def test_multichar_basis_func(self): + p = poly.HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') + + def test_symbol_basic(self): + # default input + p = poly.Polynomial([1, 2, 3], symbol='z') + assert_equal(self.as_latex(p), + r'$z \mapsto 1.0 + 2.0\,z + 3.0\,z^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + z\right) + 3.0\,' + r'\left(1.0 + z\right)^{2}$' + ), + ) + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(2.0z\right) + 3.0\,' + r'\left(2.0z\right)^{2}$' + ), + ) + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + 2.0z\right) + 3.0\,' + r'\left(1.0 + 2.0z\right)^{2}$' + ), + ) + + +SWITCH_TO_EXP = ( + '1.0 + (1.0e-01) x + (1.0e-02) x**2', + '1.2 + (1.2e-01) x + (1.2e-02) x**2', + '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', + '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', + '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', + '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4', + '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5', + '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + +class TestPrintOptions: + """ + Test the output is properly configured via printoptions. + The exponential notation is enabled automatically when the values + are too small or too large. + """ + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_str(self): + p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 ' + '+ (1.42857143e+08) x**3') + + with printoptions(precision=3): + assert_equal(str(p), '0.5 + 0.143 x + 14285714.286 x**2 ' + '+ (1.429e+08) x**3') + + def test_latex(self): + p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9]) + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' + r'\text{14285714.28571429}\,x^{2} + ' + r'\text{(1.42857143e+08)}\,x^{3}$') + + with printoptions(precision=3): + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.143}\,x + ' + r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$') + + def test_fixed(self): + p = poly.Polynomial([1/2]) + assert_equal(str(p), '0.5') + + with printoptions(floatmode='fixed'): + assert_equal(str(p), '0.50000000') + + with printoptions(floatmode='fixed', precision=4): + assert_equal(str(p), '0.5000') + + def test_switch_to_exp(self): + for i, s in enumerate(SWITCH_TO_EXP): + with printoptions(precision=i): + p = poly.Polynomial([1.23456789*10**-i + for i in range(i//2+3)]) + assert str(p).replace('\n', ' ') == s + + def test_non_finite(self): + p = poly.Polynomial([nan, inf]) + assert str(p) == 'nan + inf x' + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' + with printoptions(nanstr='NAN', infstr='INF'): + assert str(p) == 'NAN + INF x' + assert p._repr_latex_() == \ + r'$x \mapsto \text{NAN} + \text{INF}\,x$' diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.py b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.py new file mode 100644 index 00000000..4ea6035e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.py @@ -0,0 +1,216 @@ +""" +Tests related to the ``symbol`` attribute of the ABCPolyBase class. +""" + +import pytest +import numpy.polynomial as poly +from numpy.core import array +from numpy.testing import assert_equal, assert_raises, assert_ + + +class TestInit: + """ + Test polynomial creation with symbol kwarg. + """ + c = [1, 2, 3] + + def test_default_symbol(self): + p = poly.Polynomial(self.c) + assert_equal(p.symbol, 'x') + + @pytest.mark.parametrize(('bad_input', 'exception'), ( + ('', ValueError), + ('3', ValueError), + (None, TypeError), + (1, TypeError), + )) + def test_symbol_bad_input(self, bad_input, exception): + with pytest.raises(exception): + p = poly.Polynomial(self.c, symbol=bad_input) + + @pytest.mark.parametrize('symbol', ( + 'x', + 'x_1', + 'A', + 'xyz', + 'β', + )) + def test_valid_symbols(self, symbol): + """ + Values for symbol that should pass input validation. + """ + p = poly.Polynomial(self.c, symbol=symbol) + assert_equal(p.symbol, symbol) + + def test_property(self): + """ + 'symbol' attribute is read only. + """ + p = poly.Polynomial(self.c, symbol='x') + with pytest.raises(AttributeError): + p.symbol = 'z' + + def test_change_symbol(self): + p = poly.Polynomial(self.c, symbol='y') + # Create new polynomial from p with different symbol + pt = poly.Polynomial(p.coef, symbol='t') + assert_equal(pt.symbol, 't') + + +class TestUnaryOperators: + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_neg(self): + n = -self.p + assert_equal(n.symbol, 'z') + + def test_scalarmul(self): + out = self.p * 10 + assert_equal(out.symbol, 'z') + + def test_rscalarmul(self): + out = 10 * self.p + assert_equal(out.symbol, 'z') + + def test_pow(self): + out = self.p ** 3 + assert_equal(out.symbol, 'z') + + +@pytest.mark.parametrize( + 'rhs', + ( + poly.Polynomial([4, 5, 6], symbol='z'), + array([4, 5, 6]), + ), +) +class TestBinaryOperatorsSameSymbol: + """ + Ensure symbol is preserved for numeric operations on polynomials with + the same symbol + """ + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_add(self, rhs): + out = self.p + rhs + assert_equal(out.symbol, 'z') + + def test_sub(self, rhs): + out = self.p - rhs + assert_equal(out.symbol, 'z') + + def test_polymul(self, rhs): + out = self.p * rhs + assert_equal(out.symbol, 'z') + + def test_divmod(self, rhs): + for out in divmod(self.p, rhs): + assert_equal(out.symbol, 'z') + + def test_radd(self, rhs): + out = rhs + self.p + assert_equal(out.symbol, 'z') + + def test_rsub(self, rhs): + out = rhs - self.p + assert_equal(out.symbol, 'z') + + def test_rmul(self, rhs): + out = rhs * self.p + assert_equal(out.symbol, 'z') + + def test_rdivmod(self, rhs): + for out in divmod(rhs, self.p): + assert_equal(out.symbol, 'z') + + +class TestBinaryOperatorsDifferentSymbol: + p = poly.Polynomial([1, 2, 3], symbol='x') + other = poly.Polynomial([4, 5, 6], symbol='y') + ops = (p.__add__, p.__sub__, p.__mul__, p.__floordiv__, p.__mod__) + + @pytest.mark.parametrize('f', ops) + def test_binops_fails(self, f): + assert_raises(ValueError, f, self.other) + + +class TestEquality: + p = poly.Polynomial([1, 2, 3], symbol='x') + + def test_eq(self): + other = poly.Polynomial([1, 2, 3], symbol='x') + assert_(self.p == other) + + def test_neq(self): + other = poly.Polynomial([1, 2, 3], symbol='y') + assert_(not self.p == other) + + +class TestExtraMethods: + """ + Test other methods for manipulating/creating polynomial objects. + """ + p = poly.Polynomial([1, 2, 3, 0], symbol='z') + + def test_copy(self): + other = self.p.copy() + assert_equal(other.symbol, 'z') + + def test_trim(self): + other = self.p.trim() + assert_equal(other.symbol, 'z') + + def test_truncate(self): + other = self.p.truncate(2) + assert_equal(other.symbol, 'z') + + @pytest.mark.parametrize('kwarg', ( + {'domain': [-10, 10]}, + {'window': [-10, 10]}, + {'kind': poly.Chebyshev}, + )) + def test_convert(self, kwarg): + other = self.p.convert(**kwarg) + assert_equal(other.symbol, 'z') + + def test_integ(self): + other = self.p.integ() + assert_equal(other.symbol, 'z') + + def test_deriv(self): + other = self.p.deriv() + assert_equal(other.symbol, 'z') + + +def test_composition(): + p = poly.Polynomial([3, 2, 1], symbol="t") + q = poly.Polynomial([5, 1, 0, -1], symbol="λ_1") + r = p(q) + assert r.symbol == "λ_1" + + +# +# Class methods that result in new polynomial class instances +# + + +def test_fit(): + x, y = (range(10),)*2 + p = poly.Polynomial.fit(x, y, deg=1, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_froomroots(): + roots = [-2, 2] + p = poly.Polynomial.fromroots(roots, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_identity(): + p = poly.Polynomial.identity(domain=[-1, 1], window=[5, 20], symbol='z') + assert_equal(p.symbol, 'z') + + +def test_basis(): + p = poly.Polynomial.basis(3, symbol='z') + assert_equal(p.symbol, 'z') diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/py.typed b/dbdpy-env/lib/python3.9/site-packages/numpy/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/LICENSE.md b/dbdpy-env/lib/python3.9/site-packages/numpy/random/LICENSE.md new file mode 100644 index 00000000..a6cf1b17 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.pxd b/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.pxd new file mode 100644 index 00000000..1f905729 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.pxd @@ -0,0 +1,14 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +from numpy.random.bit_generator cimport BitGenerator, SeedSequence diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.py new file mode 100644 index 00000000..2e8f99fe --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.py @@ -0,0 +1,215 @@ +""" +======================== +Random Number Generation +======================== + +Use ``default_rng()`` to create a `Generator` and call its methods. + +=============== ========================================================= +Generator +--------------- --------------------------------------------------------- +Generator Class implementing all of the random number distributions +default_rng Default constructor for ``Generator`` +=============== ========================================================= + +============================================= === +BitGenerator Streams that work with Generator +--------------------------------------------- --- +MT19937 +PCG64 +PCG64DXSM +Philox +SFC64 +============================================= === + +============================================= === +Getting entropy to initialize a BitGenerator +--------------------------------------------- --- +SeedSequence +============================================= === + + +Legacy +------ + +For backwards compatibility with previous versions of numpy before 1.17, the +various aliases to the global `RandomState` methods are left alone and do not +use the new `Generator` API. + +==================== ========================================================= +Utility functions +-------------------- --------------------------------------------------------- +random Uniformly distributed floats over ``[0, 1)`` +bytes Uniformly distributed random bytes. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +choice Random sample from 1-D array. +==================== ========================================================= + +==================== ========================================================= +Compatibility +functions - removed +in the new API +-------------------- --------------------------------------------------------- +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +random_integers Uniformly distributed integers in a given range. + (deprecated, use ``integers(..., closed=True)`` instead) +random_sample Alias for `random_sample` +randint Uniformly distributed integers in a given range +seed Seed the legacy random number generator. +==================== ========================================================= + +==================== ========================================================= +Univariate +distributions +-------------------- --------------------------------------------------------- +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================== +Multivariate +distributions +-------------------- ---------------------------------------------------------- +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================== + +==================== ========================================================= +Standard +distributions +-------------------- --------------------------------------------------------- +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +-------------------- --------------------------------------------------------- +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + + +""" +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random', + 'random_integers', + 'random_sample', + 'ranf', + 'rayleigh', + 'sample', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf', +] + +# add these for module-freeze analysis (like PyInstaller) +from . import _pickle +from . import _common +from . import _bounded_integers + +from ._generator import Generator, default_rng +from .bit_generator import SeedSequence, BitGenerator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .mtrand import * + +__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] + + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this + function's entire purpose is to return a newly allocated RandomState whose + state pickle can set. Consequently the RandomState returned by this function + is a freshly allocated copy with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.pyi new file mode 100644 index 00000000..99ef6f3e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/__init__.pyi @@ -0,0 +1,72 @@ +from numpy._pytesttester import PytestTester + +from numpy.random._generator import Generator as Generator +from numpy.random._generator import default_rng as default_rng +from numpy.random._mt19937 import MT19937 as MT19937 +from numpy.random._pcg64 import ( + PCG64 as PCG64, + PCG64DXSM as PCG64DXSM, +) +from numpy.random._philox import Philox as Philox +from numpy.random._sfc64 import SFC64 as SFC64 +from numpy.random.bit_generator import BitGenerator as BitGenerator +from numpy.random.bit_generator import SeedSequence as SeedSequence +from numpy.random.mtrand import ( + RandomState as RandomState, + beta as beta, + binomial as binomial, + bytes as bytes, + chisquare as chisquare, + choice as choice, + dirichlet as dirichlet, + exponential as exponential, + f as f, + gamma as gamma, + geometric as geometric, + get_bit_generator as get_bit_generator, + get_state as get_state, + gumbel as gumbel, + hypergeometric as hypergeometric, + laplace as laplace, + logistic as logistic, + lognormal as lognormal, + logseries as logseries, + multinomial as multinomial, + multivariate_normal as multivariate_normal, + negative_binomial as negative_binomial, + noncentral_chisquare as noncentral_chisquare, + noncentral_f as noncentral_f, + normal as normal, + pareto as pareto, + permutation as permutation, + poisson as poisson, + power as power, + rand as rand, + randint as randint, + randn as randn, + random as random, + random_integers as random_integers, + random_sample as random_sample, + ranf as ranf, + rayleigh as rayleigh, + sample as sample, + seed as seed, + set_bit_generator as set_bit_generator, + set_state as set_state, + shuffle as shuffle, + standard_cauchy as standard_cauchy, + standard_exponential as standard_exponential, + standard_gamma as standard_gamma, + standard_normal as standard_normal, + standard_t as standard_t, + triangular as triangular, + uniform as uniform, + vonmises as vonmises, + wald as wald, + weibull as weibull, + zipf as zipf, +) + +__all__: list[str] +__path__: list[str] +test: PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_bounded_integers.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_bounded_integers.cpython-39-darwin.so new file mode 100755 index 00000000..b77727be Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_bounded_integers.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd new file mode 100644 index 00000000..7e41463a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd @@ -0,0 +1,29 @@ +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int8_t, int16_t, int32_t, int64_t, intptr_t) +import numpy as np +cimport numpy as np +ctypedef np.npy_bool bool_t + +from numpy.random cimport bitgen_t + +cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: + """Mask generator for use in bounded random numbers""" + # Smallest bit mask >= max + cdef uint64_t mask = max_val + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + +cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_common.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_common.cpython-39-darwin.so new file mode 100755 index 00000000..d1c2a64c Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_common.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_common.pxd b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_common.pxd new file mode 100644 index 00000000..659da0d2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_common.pxd @@ -0,0 +1,106 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +import numpy as np +cimport numpy as np + +from numpy.random cimport bitgen_t + +cdef double POISSON_LAM_MAX +cdef double LEGACY_POISSON_LAM_MAX +cdef uint64_t MAXSIZE + +cdef enum ConstraintType: + CONS_NONE + CONS_NON_NEGATIVE + CONS_POSITIVE + CONS_POSITIVE_NOT_NAN + CONS_BOUNDED_0_1 + CONS_BOUNDED_GT_0_1 + CONS_BOUNDED_LT_0_1 + CONS_GT_1 + CONS_GTE_1 + CONS_POISSON + LEGACY_CONS_POISSON + +ctypedef ConstraintType constraint_type + +cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) +cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) +cdef object prepare_cffi(bitgen_t *bitgen) +cdef object prepare_ctypes(bitgen_t *bitgen) +cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 + +cdef extern from "include/aligned_malloc.h": + cdef void *PyArray_realloc_aligned(void *p, size_t n) + cdef void *PyArray_malloc_aligned(size_t n) + cdef void *PyArray_calloc_aligned(size_t n, size_t s) + cdef void PyArray_free_aligned(void *p) + +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil +ctypedef double (*random_double_0)(void *state) noexcept nogil +ctypedef double (*random_double_1)(void *state, double a) noexcept nogil +ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil +ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil + +ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil +ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil +ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil + +ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil +ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil +ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil +ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil +ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil +ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil + +ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil +ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil + +ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil +ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil + +cdef double kahan_sum(double *darr, np.npy_intp n) noexcept + +cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil: + return (rnd >> 11) * (1.0 / 9007199254740992.0) + +cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object wrap_int(object val, object bits) + +cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) + +cdef validate_output_shape(iter_shape, np.ndarray output) + +cdef object cont(void *func, void *state, object size, object lock, int narg, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint, + object out) + +cdef object disc(void *func, void *state, object size, object lock, + int narg_double, int narg_int64, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint) + +cdef object cont_f(void *func, bitgen_t *state, object size, object lock, + object a, object a_name, constraint_type a_constraint, + object out) + +cdef object cont_broadcast_3(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) + +cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py new file mode 100644 index 00000000..8440d400 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py @@ -0,0 +1,40 @@ +""" +Use cffi to access any of the underlying C functions from distributions.h +""" +import os +import numpy as np +import cffi +from .parse import parse_distributions_h +ffi = cffi.FFI() + +inc_dir = os.path.join(np.get_include(), 'numpy') + +# Basic numpy types +ffi.cdef(''' + typedef intptr_t npy_intp; + typedef unsigned char npy_bool; + +''') + +parse_distributions_h(ffi, inc_dir) + +lib = ffi.dlopen(np.random._generator.__file__) + +# Compare the distributions.h random_standard_normal_fill to +# Generator.standard_random +bit_gen = np.random.PCG64() +rng = np.random.Generator(bit_gen) +state = bit_gen.state + +interface = rng.bit_generator.cffi +n = 100 +vals_cffi = ffi.new('double[%d]' % n) +lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) + +# reset the state +bit_gen.state = state + +vals = rng.standard_normal(n) + +for i in range(n): + assert vals[i] == vals_cffi[i] diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py new file mode 100644 index 00000000..d41c4c2d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py @@ -0,0 +1,54 @@ +import os + + +def parse_distributions_h(ffi, inc_dir): + """ + Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef + + Read the function declarations without the "#define ..." macros that will + be filled in when loading the library. + """ + + with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid: + s = [] + for line in fid: + # massage the include file + if line.strip().startswith('#'): + continue + s.append(line) + ffi.cdef('\n'.join(s)) + + with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid: + s = [] + in_skip = 0 + ignoring = False + for line in fid: + # check for and remove extern "C" guards + if ignoring: + if line.strip().startswith('#endif'): + ignoring = False + continue + if line.strip().startswith('#ifdef __cplusplus'): + ignoring = True + + # massage the include file + if line.strip().startswith('#'): + continue + + # skip any inlined function definition + # which starts with 'static inline xxx(...) {' + # and ends with a closing '}' + if line.strip().startswith('static inline'): + in_skip += line.count('{') + continue + elif in_skip > 0: + in_skip += line.count('{') + in_skip -= line.count('}') + continue + + # replace defines with their value or remove them + line = line.replace('DECLDIR', '') + line = line.replace('RAND_INT_TYPE', 'int64_t') + s.append(line) + ffi.cdef('\n'.join(s)) + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx new file mode 100644 index 00000000..30efd744 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +#cython: language_level=3 + +from libc.stdint cimport uint32_t +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer + +import numpy as np +cimport numpy as np +cimport cython + +from numpy.random cimport bitgen_t +from numpy.random import PCG64 + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniform_mean(Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + cdef np.ndarray randoms + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n) + # Best practice is to acquire the lock whenever generating random values. + # This prevents other threads from modifying the state. Acquiring the lock + # is only necessary if the GIL is also released, as in this example. + with x.lock, nogil: + for i in range(n): + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + return randoms.mean() + + +# This function is declared nogil so it can be used without the GIL below +cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil: + cdef uint32_t mask, delta, val + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = rng.next_uint32(rng.state) & mask + while val > delta: + val = rng.next_uint32(rng.state) & mask + + return lb + val + + +@cython.boundscheck(False) +@cython.wraparound(False) +def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef uint32_t[::1] out + cdef const char *capsule_name = "BitGenerator" + + x = PCG64() + out = np.empty(n, dtype=np.uint32) + capsule = x.capsule + + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + + with x.lock, nogil: + for i in range(n): + out[i] = bounded_uint(lb, ub, rng) + return np.asarray(out) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx new file mode 100644 index 00000000..d908e92d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +#cython: language_level=3 +""" +This file shows how the to use a BitGenerator to create a distribution. +""" +import numpy as np +cimport numpy as np +cimport cython +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer +from libc.stdint cimport uint16_t, uint64_t +from numpy.random cimport bitgen_t +from numpy.random import PCG64 +from numpy.random.c_distributions cimport ( + random_standard_uniform_fill, random_standard_uniform_fill_f) + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniforms(Py_ssize_t n): + """ + Create an array of `n` uniformly distributed doubles. + A 'real' distribution would want to process the values into + some non-uniform distribution + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + + x = PCG64() + capsule = x.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='float64') + with x.lock, nogil: + for i in range(n): + # Call the function + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + + return randoms + +# cython example 2 +@cython.boundscheck(False) +@cython.wraparound(False) +def uint10_uniforms(Py_ssize_t n): + """Uniform 10 bit integers stored as 16-bit unsigned integers""" + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef uint16_t[::1] random_values + cdef int bits_remaining + cdef int width = 10 + cdef uint64_t buff, mask = 0x3FF + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='uint16') + # Best practice is to release GIL and acquire the lock + bits_remaining = 0 + with x.lock, nogil: + for i in range(n): + if bits_remaining < width: + buff = rng.next_uint64(rng.state) + random_values[i] = buff & mask + buff >>= width + + randoms = np.asarray(random_values) + return randoms + +# cython example 3 +def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): + """ + Create an array of `n` uniformly distributed doubles via a "fill" function. + + A 'real' distribution would want to process the values into + some non-uniform distribution + + Parameters + ---------- + bit_generator: BitGenerator instance + n: int + Output vector length + dtype: {str, dtype}, optional + Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The + default dtype value is 'd' + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef np.ndarray randoms + + capsule = bit_generator.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + + _dtype = np.dtype(dtype) + randoms = np.empty(n, dtype=_dtype) + if _dtype == np.float32: + with bit_generator.lock: + random_standard_uniform_fill_f(rng, n, np.PyArray_DATA(randoms)) + elif _dtype == np.float64: + with bit_generator.lock: + random_standard_uniform_fill(rng, n, np.PyArray_DATA(randoms)) + else: + raise TypeError('Unsupported dtype %r for random' % _dtype) + return randoms + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/meson.build b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/meson.build new file mode 100644 index 00000000..c00837d4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/cython/meson.build @@ -0,0 +1,45 @@ +project('random-build-examples', 'c', 'cpp', 'cython') + +py_mod = import('python') +py3 = py_mod.find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +if not cy.version().version_compare('>=0.29.35') + error('tests requires Cython >= 0.29.35') +endif + +_numpy_abs = run_command(py3, ['-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], + check: true).stdout().strip() + +npymath_path = _numpy_abs / 'core' / 'lib' +npy_include_path = _numpy_abs / 'core' / 'include' +npyrandom_path = _numpy_abs / 'random' / 'lib' +npymath_lib = cc.find_library('npymath', dirs: npymath_path) +npyrandom_lib = cc.find_library('npyrandom', dirs: npyrandom_path) + +py3.extension_module( + 'extending_distributions', + 'extending_distributions.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], +) +py3.extension_module( + 'extending', + 'extending.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], +) +py3.extension_module( + 'extending_cpp', + 'extending_distributions.pyx', + install: false, + override_options : ['cython_language=cpp'], + cython_args: ['--module-name', 'extending_cpp'], + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], +) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py new file mode 100644 index 00000000..f387db69 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py @@ -0,0 +1,84 @@ +import numpy as np +import numba as nb + +from numpy.random import PCG64 +from timeit import timeit + +bit_gen = PCG64() +next_d = bit_gen.cffi.next_double +state_addr = bit_gen.cffi.state_address + +def normals(n, state): + out = np.empty(n) + for i in range((n + 1) // 2): + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + f = np.sqrt(-2.0 * np.log(r2) / r2) + out[2 * i] = f * x1 + if 2 * i + 1 < n: + out[2 * i + 1] = f * x2 + return out + +# Compile using Numba +normalsj = nb.jit(normals, nopython=True) +# Must use state address not state with numba +n = 10000 + +def numbacall(): + return normalsj(n, state_addr) + +rg = np.random.Generator(PCG64()) + +def numpycall(): + return rg.normal(size=n) + +# Check that the functions work +r1 = numbacall() +r2 = numpycall() +assert r1.shape == (n,) +assert r1.shape == r2.shape + +t1 = timeit(numbacall, number=1000) +print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms') +t2 = timeit(numpycall, number=1000) +print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms') + +# example 2 + +next_u32 = bit_gen.ctypes.next_uint32 +ctypes_state = bit_gen.ctypes.state + +@nb.jit(nopython=True) +def bounded_uint(lb, ub, state): + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = next_u32(state) & mask + while val > delta: + val = next_u32(state) & mask + + return lb + val + + +print(bounded_uint(323, 2394691, ctypes_state.value)) + + +@nb.jit(nopython=True) +def bounded_uints(lb, ub, n, state): + out = np.empty(n, dtype=np.uint32) + for i in range(n): + out[i] = bounded_uint(lb, ub, state) + + +bounded_uints(323, 2394691, 10000000, ctypes_state.value) + + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py new file mode 100644 index 00000000..7cf8bf0b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py @@ -0,0 +1,67 @@ +r""" +Building the required library in this example requires a source distribution +of NumPy or clone of the NumPy git repository since distributions.c is not +included in binary distributions. + +On *nix, execute in numpy/random/src/distributions + +export ${PYTHON_VERSION}=3.8 # Python version +export PYTHON_INCLUDE=#path to Python's include folder, usually \ + ${PYTHON_HOME}/include/python${PYTHON_VERSION}m +export NUMPY_INCLUDE=#path to numpy's include folder, usually \ + ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include +gcc -shared -o libdistributions.so -fPIC distributions.c \ + -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE} +mv libdistributions.so ../../_examples/numba/ + +On Windows + +rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example +set PYTHON_HOME=c:\Anaconda +set PYTHON_VERSION=38 +cl.exe /LD .\distributions.c -DDLL_EXPORT \ + -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \ + -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib +move distributions.dll ../../_examples/numba/ +""" +import os + +import numba as nb +import numpy as np +from cffi import FFI + +from numpy.random import PCG64 + +ffi = FFI() +if os.path.exists('./distributions.dll'): + lib = ffi.dlopen('./distributions.dll') +elif os.path.exists('./libdistributions.so'): + lib = ffi.dlopen('./libdistributions.so') +else: + raise RuntimeError('Required DLL/so file was not found.') + +ffi.cdef(""" +double random_standard_normal(void *bitgen_state); +""") +x = PCG64() +xffi = x.cffi +bit_generator = xffi.bit_generator + +random_standard_normal = lib.random_standard_normal + + +def normals(n, bit_generator): + out = np.empty(n) + for i in range(n): + out[i] = random_standard_normal(bit_generator) + return out + + +normalsj = nb.jit(normals, nopython=True) + +# Numba requires a memory address for void * +# Can also get address from x.ctypes.bit_generator.value +bit_generator_address = int(ffi.cast('uintptr_t', bit_generator)) + +norm = normalsj(1000, bit_generator_address) +print(norm[:12]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_generator.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_generator.cpython-39-darwin.so new file mode 100755 index 00000000..c08ee0e7 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_generator.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_generator.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_generator.pyi new file mode 100644 index 00000000..e1cdefb1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_generator.pyi @@ -0,0 +1,681 @@ +from collections.abc import Callable +from typing import Any, Union, overload, TypeVar, Literal + +from numpy import ( + bool_, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + int_, + ndarray, + uint, + uint8, + uint16, + uint32, + uint64, +) +from numpy.random import BitGenerator, SeedSequence +from numpy._typing import ( + ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DoubleCodes, + _DTypeLikeBool, + _DTypeLikeInt, + _DTypeLikeUInt, + _Float32Codes, + _Float64Codes, + _FloatLike_co, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, +) + +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) + +_DTypeLikeFloat32 = Union[ + dtype[float32], + _SupportsDType[dtype[float32]], + type[float32], + _Float32Codes, + _SingleCodes, +] + +_DTypeLikeFloat64 = Union[ + dtype[float64], + _SupportsDType[dtype[float64]], + type[float], + type[float64], + _Float64Codes, + _DoubleCodes, +] + +class Generator: + def __init__(self, bit_generator: BitGenerator) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ... + @property + def bit_generator(self) -> BitGenerator: ... + def spawn(self, n_children: int) -> list[Generator]: ... + def bytes(self, length: int) -> bytes: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ... + @overload + def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ... + @overload + def standard_exponential( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + *, + method: Literal["zig", "inv"] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + method: Literal["zig", "inv"] = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def random( + self, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + *, + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def beta( + self, + a: _FloatLike_co, + b: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def beta( + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + ) -> int: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeBool = ..., + endpoint: bool = ..., + ) -> bool: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeInt | _DTypeLikeUInt = ..., + endpoint: bool = ..., + ) -> int: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: _DTypeLikeBool = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[bool_]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int8]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int16]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int32]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint8]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint16]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint32]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def integers( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + endpoint: bool = ..., + ) -> ndarray[Any, dtype[uint]]: ... + # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any] + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> ndarray[Any, Any]: ... + @overload + def uniform( + self, + low: _FloatLike_co = ..., + high: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def normal( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: _FloatLike_co, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + *, + out: ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: None | ndarray[Any, dtype[float32]] = ..., + ) -> ndarray[Any, dtype[float32]]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: None | ndarray[Any, dtype[float64]] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def laplace( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gumbel( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def logistic( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def lognormal( + self, + mean: _FloatLike_co = ..., + sigma: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def triangular( + self, + left: _FloatLike_co, + mode: _FloatLike_co, + right: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + *, + method: Literal["svd", "eigh", "cholesky"] = ..., + ) -> ndarray[Any, dtype[float64]]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int64]]: ... + def multivariate_hypergeometric( + self, + colors: _ArrayLikeInt_co, + nsample: int, + size: None | _ShapeLike = ..., + method: Literal["marginals", "count"] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + def permuted( + self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ... + ) -> ndarray[Any, Any]: ... + def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... + +def default_rng( + seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ... +) -> Generator: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_mt19937.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_mt19937.cpython-39-darwin.so new file mode 100755 index 00000000..35d6ed91 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_mt19937.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_mt19937.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_mt19937.pyi new file mode 100644 index 00000000..55cfb2db --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_mt19937.pyi @@ -0,0 +1,22 @@ +from typing import Any, TypedDict + +from numpy import dtype, ndarray, uint32 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _MT19937Internal(TypedDict): + key: ndarray[Any, dtype[uint32]] + pos: int + +class _MT19937State(TypedDict): + bit_generator: str + state: _MT19937Internal + +class MT19937(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... + def jumped(self, jumps: int = ...) -> MT19937: ... + @property + def state(self) -> _MT19937State: ... + @state.setter + def state(self, value: _MT19937State) -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pcg64.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pcg64.cpython-39-darwin.so new file mode 100755 index 00000000..1f82ad5f Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pcg64.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pcg64.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pcg64.pyi new file mode 100644 index 00000000..470aee86 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pcg64.pyi @@ -0,0 +1,42 @@ +from typing import TypedDict + +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _PCG64Internal(TypedDict): + state: int + inc: int + +class _PCG64State(TypedDict): + bit_generator: str + state: _PCG64Internal + has_uint32: int + uinteger: int + +class PCG64(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_philox.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_philox.cpython-39-darwin.so new file mode 100755 index 00000000..63e2a9d0 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_philox.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_philox.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_philox.pyi new file mode 100644 index 00000000..26ce726e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_philox.pyi @@ -0,0 +1,36 @@ +from typing import Any, TypedDict + +from numpy import dtype, ndarray, uint64 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _PhiloxInternal(TypedDict): + counter: ndarray[Any, dtype[uint64]] + key: ndarray[Any, dtype[uint64]] + +class _PhiloxState(TypedDict): + bit_generator: str + state: _PhiloxInternal + buffer: ndarray[Any, dtype[uint64]] + buffer_pos: int + has_uint32: int + uinteger: int + +class Philox(BitGenerator): + def __init__( + self, + seed: None | _ArrayLikeInt_co | SeedSequence = ..., + counter: None | _ArrayLikeInt_co = ..., + key: None | _ArrayLikeInt_co = ..., + ) -> None: ... + @property + def state( + self, + ) -> _PhiloxState: ... + @state.setter + def state( + self, + value: _PhiloxState, + ) -> None: ... + def jumped(self, jumps: int = ...) -> Philox: ... + def advance(self, delta: int) -> Philox: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pickle.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pickle.py new file mode 100644 index 00000000..07399372 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_pickle.py @@ -0,0 +1,80 @@ +from .mtrand import RandomState +from ._philox import Philox +from ._pcg64 import PCG64, PCG64DXSM +from ._sfc64 import SFC64 + +from ._generator import Generator +from ._mt19937 import MT19937 + +BitGenerators = {'MT19937': MT19937, + 'PCG64': PCG64, + 'PCG64DXSM': PCG64DXSM, + 'Philox': Philox, + 'SFC64': SFC64, + } + + +def __bit_generator_ctor(bit_generator_name='MT19937'): + """ + Pickling helper function that returns a bit generator object + + Parameters + ---------- + bit_generator_name : str + String containing the name of the BitGenerator + + Returns + ------- + bit_generator : BitGenerator + BitGenerator instance + """ + if bit_generator_name in BitGenerators: + bit_generator = BitGenerators[bit_generator_name] + else: + raise ValueError(str(bit_generator_name) + ' is not a known ' + 'BitGenerator module.') + + return bit_generator() + + +def __generator_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a Generator object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator's name + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rg : Generator + Generator using the named core BitGenerator + """ + return Generator(bit_generator_ctor(bit_generator_name)) + + +def __randomstate_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a legacy RandomState-like object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator's name + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rs : RandomState + Legacy RandomState using the named core BitGenerator + """ + + return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_sfc64.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_sfc64.cpython-39-darwin.so new file mode 100755 index 00000000..464b8fed Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_sfc64.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/_sfc64.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_sfc64.pyi new file mode 100644 index 00000000..e1810e7d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/_sfc64.pyi @@ -0,0 +1,28 @@ +from typing import Any, TypedDict + +from numpy import dtype as dtype +from numpy import ndarray as ndarray +from numpy import uint64 +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy._typing import _ArrayLikeInt_co + +class _SFC64Internal(TypedDict): + state: ndarray[Any, dtype[uint64]] + +class _SFC64State(TypedDict): + bit_generator: str + state: _SFC64Internal + has_uint32: int + uinteger: int + +class SFC64(BitGenerator): + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + @property + def state( + self, + ) -> _SFC64State: ... + @state.setter + def state( + self, + value: _SFC64State, + ) -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.cpython-39-darwin.so new file mode 100755 index 00000000..777eddec Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.pxd b/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.pxd new file mode 100644 index 00000000..dfa7d0a7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.pxd @@ -0,0 +1,35 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +cdef class BitGenerator(): + cdef readonly object _seed_seq + cdef readonly object lock + cdef bitgen_t _bitgen + cdef readonly object _ctypes + cdef readonly object _cffi + cdef readonly object capsule + + +cdef class SeedSequence(): + cdef readonly object entropy + cdef readonly tuple spawn_key + cdef readonly Py_ssize_t pool_size + cdef readonly object pool + cdef readonly uint32_t n_children_spawned + + cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, + np.ndarray[np.npy_uint32, ndim=1] entropy_array) + cdef get_assembled_entropy(self) + +cdef class SeedlessSequence(): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.pyi new file mode 100644 index 00000000..8b9779ca --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/bit_generator.pyi @@ -0,0 +1,112 @@ +import abc +from threading import Lock +from collections.abc import Callable, Mapping, Sequence +from typing import ( + Any, + NamedTuple, + TypedDict, + TypeVar, + Union, + overload, + Literal, +) + +from numpy import dtype, ndarray, uint32, uint64 +from numpy._typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes + +_T = TypeVar("_T") + +_DTypeLikeUint32 = Union[ + dtype[uint32], + _SupportsDType[dtype[uint32]], + type[uint32], + _UInt32Codes, +] +_DTypeLikeUint64 = Union[ + dtype[uint64], + _SupportsDType[dtype[uint64]], + type[uint64], + _UInt64Codes, +] + +class _SeedSeqState(TypedDict): + entropy: None | int | Sequence[int] + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + +class _Interface(NamedTuple): + state_address: Any + state: Any + next_uint64: Any + next_uint32: Any + next_double: Any + bit_generator: Any + +class ISeedSequence(abc.ABC): + @abc.abstractmethod + def generate_state( + self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... + ) -> ndarray[Any, dtype[uint32 | uint64]]: ... + +class ISpawnableSeedSequence(ISeedSequence): + @abc.abstractmethod + def spawn(self: _T, n_children: int) -> list[_T]: ... + +class SeedlessSeedSequence(ISpawnableSeedSequence): + def generate_state( + self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... + ) -> ndarray[Any, dtype[uint32 | uint64]]: ... + def spawn(self: _T, n_children: int) -> list[_T]: ... + +class SeedSequence(ISpawnableSeedSequence): + entropy: None | int | Sequence[int] + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + pool: ndarray[Any, dtype[uint32]] + def __init__( + self, + entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ..., + *, + spawn_key: Sequence[int] = ..., + pool_size: int = ..., + n_children_spawned: int = ..., + ) -> None: ... + def __repr__(self) -> str: ... + @property + def state( + self, + ) -> _SeedSeqState: ... + def generate_state( + self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... + ) -> ndarray[Any, dtype[uint32 | uint64]]: ... + def spawn(self, n_children: int) -> list[SeedSequence]: ... + +class BitGenerator(abc.ABC): + lock: Lock + def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__( + self, + ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ... + @abc.abstractmethod + @property + def state(self) -> Mapping[str, Any]: ... + @state.setter + def state(self, value: Mapping[str, Any]) -> None: ... + @property + def seed_seq(self) -> ISeedSequence: ... + def spawn(self, n_children: int) -> list[BitGenerator]: ... + @overload + def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc] + @overload + def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc] + @overload + def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc] + def _benchmark(self, cnt: int, method: str = ...) -> None: ... + @property + def ctypes(self) -> _Interface: ... + @property + def cffi(self) -> _Interface: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/c_distributions.pxd b/dbdpy-env/lib/python3.9/site-packages/numpy/random/c_distributions.pxd new file mode 100644 index 00000000..b978d135 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/c_distributions.pxd @@ -0,0 +1,120 @@ +#!python +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 +from numpy cimport npy_intp + +from libc.stdint cimport (uint64_t, int32_t, int64_t) +from numpy.random cimport bitgen_t + +cdef extern from "numpy/random/distributions.h": + + struct s_binomial_t: + int has_binomial + double psave + int64_t nsave + double r + double q + double fm + int64_t m + double p1 + double xm + double xl + double xr + double c + double laml + double lamr + double p2 + double p3 + double p4 + + ctypedef s_binomial_t binomial_t + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + double random_standard_uniform(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_exponential(bitgen_t *bitgen_state) nogil + float random_standard_exponential_f(bitgen_t *bitgen_state) nogil + void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_normal(bitgen_t* bitgen_state) nogil + float random_standard_normal_f(bitgen_t *bitgen_state) nogil + void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil + void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil + double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil + float random_standard_normal_f(bitgen_t* bitgen_state) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + int64_t random_positive_int64(bitgen_t *bitgen_state) nogil + int32_t random_positive_int32(bitgen_t *bitgen_state) nogil + int64_t random_positive_int(bitgen_t *bitgen_state) nogil + uint64_t random_uint(bitgen_t *bitgen_state) nogil + + double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil + + double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil + float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil + + double random_exponential(bitgen_t *bitgen_state, double scale) nogil + double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil + double random_beta(bitgen_t *bitgen_state, double a, double b) nogil + double random_chisquare(bitgen_t *bitgen_state, double df) nogil + double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil + double random_standard_cauchy(bitgen_t *bitgen_state) nogil + double random_pareto(bitgen_t *bitgen_state, double a) nogil + double random_weibull(bitgen_t *bitgen_state, double a) nogil + double random_power(bitgen_t *bitgen_state, double a) nogil + double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil + double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil + double random_standard_t(bitgen_t *bitgen_state, double df) nogil + double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc) nogil + double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc) nogil + double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil + double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil + double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right) nogil + + int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil + int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil + int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil + int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil + int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil + int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, + int64_t sample) nogil + + uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil + + # Generate random uint64 numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + + void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix, + double *pix, npy_intp d, binomial_t *binomial) nogil + + int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.a b/dbdpy-env/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.a new file mode 100644 index 00000000..7fb9753a Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.a differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/mtrand.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/numpy/random/mtrand.cpython-39-darwin.so new file mode 100755 index 00000000..a6b5e378 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/numpy/random/mtrand.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/mtrand.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/random/mtrand.pyi new file mode 100644 index 00000000..b5f60065 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/mtrand.pyi @@ -0,0 +1,571 @@ +import builtins +from collections.abc import Callable +from typing import Any, Union, overload, Literal + +from numpy import ( + bool_, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + int_, + ndarray, + uint, + uint8, + uint16, + uint32, + uint64, +) +from numpy.random.bit_generator import BitGenerator +from numpy._typing import ( + ArrayLike, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DoubleCodes, + _DTypeLikeBool, + _DTypeLikeInt, + _DTypeLikeUInt, + _Float32Codes, + _Float64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, +) + +_DTypeLikeFloat32 = Union[ + dtype[float32], + _SupportsDType[dtype[float32]], + type[float32], + _Float32Codes, + _SingleCodes, +] + +_DTypeLikeFloat64 = Union[ + dtype[float64], + _SupportsDType[dtype[float64]], + type[float], + type[float64], + _Float64Codes, + _DoubleCodes, +] + +class RandomState: + _bit_generator: BitGenerator + def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ... + def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... + @overload + def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + @overload + def get_state( + self, legacy: Literal[True] = ... + ) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ... + def set_state( + self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float] + ) -> None: ... + @overload + def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def random(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def beta( + self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeBool = ..., + ) -> bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: _DTypeLikeInt | _DTypeLikeUInt = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: _DTypeLikeBool = ..., + ) -> ndarray[Any, dtype[bool_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + ) -> ndarray[Any, dtype[int8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + ) -> ndarray[Any, dtype[int16]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + ) -> ndarray[Any, dtype[int32]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + ) -> ndarray[Any, dtype[int64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + ) -> ndarray[Any, dtype[uint8]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + ) -> ndarray[Any, dtype[uint16]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + ) -> ndarray[Any, dtype[uint32]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + ) -> ndarray[Any, dtype[uint64]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + ) -> ndarray[Any, dtype[uint]]: ... + def bytes(self, length: int) -> builtins.bytes: ... + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: None | _ArrayLikeFloat_co = ..., + ) -> ndarray[Any, Any]: ... + @overload + def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rand(self) -> float: ... + @overload + def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ... + @overload + def randn(self) -> float: ... + @overload + def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ... + @overload + def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: None | _ArrayLikeInt_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_normal( # type: ignore[misc] + self, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ... + @overload + def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[float64]]: ... + @overload + def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: None | _ShapeLike = ..., + ) -> ndarray[Any, dtype[int_]]: ... + @overload + def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: None | _ShapeLike = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + ) -> ndarray[Any, dtype[float64]]: ... + def multinomial( + self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[int_]]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + ) -> ndarray[Any, dtype[float64]]: ... + def shuffle(self, x: ArrayLike) -> None: ... + @overload + def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ... + @overload + def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ... + +_rand: RandomState + +beta = _rand.beta +binomial = _rand.binomial +bytes = _rand.bytes +chisquare = _rand.chisquare +choice = _rand.choice +dirichlet = _rand.dirichlet +exponential = _rand.exponential +f = _rand.f +gamma = _rand.gamma +get_state = _rand.get_state +geometric = _rand.geometric +gumbel = _rand.gumbel +hypergeometric = _rand.hypergeometric +laplace = _rand.laplace +logistic = _rand.logistic +lognormal = _rand.lognormal +logseries = _rand.logseries +multinomial = _rand.multinomial +multivariate_normal = _rand.multivariate_normal +negative_binomial = _rand.negative_binomial +noncentral_chisquare = _rand.noncentral_chisquare +noncentral_f = _rand.noncentral_f +normal = _rand.normal +pareto = _rand.pareto +permutation = _rand.permutation +poisson = _rand.poisson +power = _rand.power +rand = _rand.rand +randint = _rand.randint +randn = _rand.randn +random = _rand.random +random_integers = _rand.random_integers +random_sample = _rand.random_sample +rayleigh = _rand.rayleigh +seed = _rand.seed +set_state = _rand.set_state +shuffle = _rand.shuffle +standard_cauchy = _rand.standard_cauchy +standard_exponential = _rand.standard_exponential +standard_gamma = _rand.standard_gamma +standard_normal = _rand.standard_normal +standard_t = _rand.standard_t +triangular = _rand.triangular +uniform = _rand.uniform +vonmises = _rand.vonmises +wald = _rand.wald +weibull = _rand.weibull +zipf = _rand.zipf +# Two legacy that are trivial wrappers around random_sample +sample = _rand.random_sample +ranf = _rand.random_sample + +def set_bit_generator(bitgen: BitGenerator) -> None: + ... + +def get_bit_generator() -> BitGenerator: + ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_direct.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_direct.py new file mode 100644 index 00000000..fa2ae866 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_direct.py @@ -0,0 +1,518 @@ +import os +from os.path import join +import sys + +import numpy as np +from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, + assert_raises) +import pytest + +from numpy.random import ( + Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence, + SFC64, default_rng +) +from numpy.random._common import interface + +try: + import cffi # noqa: F401 + + MISSING_CFFI = False +except ImportError: + MISSING_CFFI = True + +try: + import ctypes # noqa: F401 + + MISSING_CTYPES = False +except ImportError: + MISSING_CTYPES = False + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + MISSING_CFFI = True + + +pwd = os.path.dirname(os.path.abspath(__file__)) + + +def assert_state_equal(actual, target): + for key in actual: + if isinstance(actual[key], dict): + assert_state_equal(actual[key], target[key]) + elif isinstance(actual[key], np.ndarray): + assert_array_equal(actual[key], target[key]) + else: + assert actual[key] == target[key] + + +def uint32_to_float32(u): + return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32) + + +def uniform32_from_uint64(x): + x = np.uint64(x) + upper = np.array(x >> np.uint64(32), dtype=np.uint32) + lower = np.uint64(0xffffffff) + lower = np.array(x & lower, dtype=np.uint32) + joined = np.column_stack([lower, upper]).ravel() + return uint32_to_float32(joined) + + +def uniform32_from_uint53(x): + x = np.uint64(x) >> np.uint64(16) + x = np.uint32(x & np.uint64(0xffffffff)) + return uint32_to_float32(x) + + +def uniform32_from_uint32(x): + return uint32_to_float32(x) + + +def uniform32_from_uint(x, bits): + if bits == 64: + return uniform32_from_uint64(x) + elif bits == 53: + return uniform32_from_uint53(x) + elif bits == 32: + return uniform32_from_uint32(x) + else: + raise NotImplementedError + + +def uniform_from_uint(x, bits): + if bits in (64, 63, 53): + return uniform_from_uint64(x) + elif bits == 32: + return uniform_from_uint32(x) + + +def uniform_from_uint64(x): + return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0) + + +def uniform_from_uint32(x): + out = np.empty(len(x) // 2) + for i in range(0, len(x), 2): + a = x[i] >> 5 + b = x[i + 1] >> 6 + out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0 + return out + + +def uniform_from_dsfmt(x): + return x.view(np.double) - 1.0 + + +def gauss_from_uint(x, n, bits): + if bits in (64, 63): + doubles = uniform_from_uint64(x) + elif bits == 32: + doubles = uniform_from_uint32(x) + else: # bits == 'dsfmt' + doubles = uniform_from_dsfmt(x) + gauss = [] + loc = 0 + x1 = x2 = 0.0 + while len(gauss) < n: + r2 = 2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * doubles[loc] - 1.0 + x2 = 2.0 * doubles[loc + 1] - 1.0 + r2 = x1 * x1 + x2 * x2 + loc += 2 + + f = np.sqrt(-2.0 * np.log(r2) / r2) + gauss.append(f * x2) + gauss.append(f * x1) + + return gauss[:n] + + +def test_seedsequence(): + from numpy.random.bit_generator import (ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence) + + s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) + s1.spawn(10) + s2 = SeedSequence(**s1.state) + assert_equal(s1.state, s2.state) + assert_equal(s1.n_children_spawned, s2.n_children_spawned) + + # The interfaces cannot be instantiated themselves. + assert_raises(TypeError, ISeedSequence) + assert_raises(TypeError, ISpawnableSeedSequence) + dummy = SeedlessSeedSequence() + assert_raises(NotImplementedError, dummy.generate_state, 10) + assert len(dummy.spawn(10)) == 10 + + +def test_generator_spawning(): + """ Test spawning new generators and bit_generators directly. + """ + rng = np.random.default_rng() + seq = rng.bit_generator.seed_seq + new_ss = seq.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5)] + assert [c.spawn_key for c in new_ss] == expected_keys + + new_bgs = rng.bit_generator.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5, 10)] + assert [bg.seed_seq.spawn_key for bg in new_bgs] == expected_keys + + new_rngs = rng.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(10, 15)] + found_keys = [rng.bit_generator.seed_seq.spawn_key for rng in new_rngs] + assert found_keys == expected_keys + + # Sanity check that streams are actually different: + assert new_rngs[0].uniform() != new_rngs[1].uniform() + + +def test_non_spawnable(): + from numpy.random.bit_generator import ISeedSequence + + class FakeSeedSequence: + def generate_state(self, n_words, dtype=np.uint32): + return np.zeros(n_words, dtype=dtype) + + ISeedSequence.register(FakeSeedSequence) + + rng = np.random.default_rng(FakeSeedSequence()) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.spawn(5) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.bit_generator.spawn(5) + + +class Base: + dtype = np.uint64 + data2 = data1 = {} + + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [] + + @classmethod + def _read_csv(cls, filename): + with open(filename) as csv: + seed = csv.readline() + seed = seed.split(',') + seed = [int(s.strip(), 0) for s in seed[1:]] + data = [] + for line in csv: + data.append(int(line.split(',')[-1].strip(), 0)) + return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)} + + def test_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data1['data']) + + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw() + assert_equal(uints, self.data1['data'][0]) + + bit_generator = self.bit_generator(*self.data2['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data2['data']) + + def test_random_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(output=False) + assert uints is None + uints = bit_generator.random_raw(1000, output=False) + assert uints is None + + def test_gauss_inv(self): + n = 25 + rs = RandomState(self.bit_generator(*self.data1['seed'])) + gauss = rs.standard_normal(n) + assert_allclose(gauss, + gauss_from_uint(self.data1['data'], n, self.bits)) + + rs = RandomState(self.bit_generator(*self.data2['seed'])) + gauss = rs.standard_normal(25) + assert_allclose(gauss, + gauss_from_uint(self.data2['data'], n, self.bits)) + + def test_uniform_double(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + def test_uniform_float(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform32_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform32_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + def test_repr(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in repr(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs) + + def test_str(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in str(rs) + assert str(self.bit_generator.__name__) in str(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs) + + def test_pickle(self): + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + bitgen_pkl = pickle.dumps(bit_generator) + reloaded = pickle.loads(bitgen_pkl) + reloaded_state = reloaded.state + assert_array_equal(Generator(bit_generator).standard_normal(1000), + Generator(reloaded).standard_normal(1000)) + assert bit_generator is not reloaded + assert_state_equal(reloaded_state, state) + + ss = SeedSequence(100) + aa = pickle.loads(pickle.dumps(ss)) + assert_equal(ss.state, aa.state) + + def test_invalid_state_type(self): + bit_generator = self.bit_generator(*self.data1['seed']) + with pytest.raises(TypeError): + bit_generator.state = {'1'} + + def test_invalid_state_value(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + state['bit_generator'] = 'otherBitGenerator' + with pytest.raises(ValueError): + bit_generator.state = state + + def test_invalid_init_type(self): + bit_generator = self.bit_generator + for st in self.invalid_init_types: + with pytest.raises(TypeError): + bit_generator(*st) + + def test_invalid_init_values(self): + bit_generator = self.bit_generator + for st in self.invalid_init_values: + with pytest.raises((ValueError, OverflowError)): + bit_generator(*st) + + def test_benchmark(self): + bit_generator = self.bit_generator(*self.data1['seed']) + bit_generator._benchmark(1) + bit_generator._benchmark(1, 'double') + with pytest.raises(ValueError): + bit_generator._benchmark(1, 'int32') + + @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available') + def test_cffi(self): + bit_generator = self.bit_generator(*self.data1['seed']) + cffi_interface = bit_generator.cffi + assert isinstance(cffi_interface, interface) + other_cffi_interface = bit_generator.cffi + assert other_cffi_interface is cffi_interface + + @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available') + def test_ctypes(self): + bit_generator = self.bit_generator(*self.data1['seed']) + ctypes_interface = bit_generator.ctypes + assert isinstance(ctypes_interface, interface) + other_ctypes_interface = bit_generator.ctypes + assert other_ctypes_interface is ctypes_interface + + def test_getstate(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + alt_state = bit_generator.__getstate__() + assert_state_equal(state, alt_state) + + +class TestPhilox(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/philox-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/philox-testset-2.csv')) + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)] + + def test_set_key(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + keyed = self.bit_generator(counter=state['state']['counter'], + key=state['state']['key']) + assert_state_equal(bit_generator.state, keyed.state) + + +class TestPCG64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state["state"] + initial_state = 287608843259529770491897792873167516365 + assert state["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 135275564607035429730177404003164635391 + assert state["state"] == advanced_state + + +class TestPCG64DXSM(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state + initial_state = 287608843259529770491897792873167516365 + assert state["state"]["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 277778083536782149546677086420637664879 + assert state["state"] == advanced_state + + +class TestMT19937(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.bits = 32 + cls.dtype = np.uint32 + cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv')) + cls.seed_error_type = ValueError + cls.invalid_init_types = [] + cls.invalid_init_values = [(-1,)] + + def test_seed_float_array(self): + assert_raises(TypeError, self.bit_generator, np.array([np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([-np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([0, np.pi])) + assert_raises(TypeError, self.bit_generator, [np.pi]) + assert_raises(TypeError, self.bit_generator, [0, np.pi]) + + def test_state_tuple(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + bit_generator = rs.bit_generator + state = bit_generator.state + desired = rs.integers(2 ** 16) + tup = (state['bit_generator'], state['state']['key'], + state['state']['pos']) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + tup = tup + (0, 0.0) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + + +class TestSFC64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/sfc64-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/sfc64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + +class TestDefaultRNG: + def test_seed(self): + for args in [(), (None,), (1234,), ([1234, 5678],)]: + rg = default_rng(*args) + assert isinstance(rg.bit_generator, PCG64) + + def test_passthrough(self): + bg = Philox() + rg = default_rng(bg) + assert rg.bit_generator is bg + rg2 = default_rng(rg) + assert rg2 is rg + assert rg2.bit_generator is bg diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_extending.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_extending.py new file mode 100644 index 00000000..2783d1cd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_extending.py @@ -0,0 +1,118 @@ +from importlib.util import spec_from_file_location, module_from_spec +import os +import pathlib +import pytest +import shutil +import subprocess +import sys +import sysconfig +import textwrap +import warnings + +import numpy as np +from numpy.testing import IS_WASM + + +try: + import cffi +except ImportError: + cffi = None + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + cffi = None + +try: + with warnings.catch_warnings(record=True) as w: + # numba issue gh-4733 + warnings.filterwarnings('always', '', DeprecationWarning) + import numba +except (ImportError, SystemError): + # Certain numpy/numba versions trigger a SystemError due to a numba bug + numba = None + +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + # Cython 0.29.30 is required for Python 3.11 and there are + # other fixes in the 0.29 series that are needed even for earlier + # Python versions. + # Note: keep in sync with the one in pyproject.toml + required_version = '0.29.35' + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + + +@pytest.mark.skipif( + sys.platform == "win32" and sys.maxsize < 2**32, + reason="Failing in 32-bit Windows wheel build job, skip for now" +) +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.slow +def test_cython(tmp_path): + import glob + # build the examples in a temporary directory + srcdir = os.path.join(os.path.dirname(__file__), '..') + shutil.copytree(srcdir, tmp_path / 'random') + build_dir = tmp_path / 'random' / '_examples' / 'cython' + target_dir = build_dir / "build" + os.makedirs(target_dir, exist_ok=True) + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", str(build_dir)], + cwd=target_dir, + ) + else: + subprocess.check_call(["meson", "setup", str(build_dir)], + cwd=target_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) + + # gh-16162: make sure numpy's __init__.pxd was used for cython + # not really part of this test, but it is a convenient place to check + + g = glob.glob(str(target_dir / "*" / "extending.pyx.c")) + with open(g[0]) as fid: + txt_to_find = 'NumPy API declarations from "numpy/__init__' + for i, line in enumerate(fid): + if txt_to_find in line: + break + else: + assert False, ("Could not find '{}' in C file, " + "wrong pxd used".format(txt_to_find)) + # import without adding the directory to sys.path + suffix = sysconfig.get_config_var('EXT_SUFFIX') + + def load(modname): + so = (target_dir / modname).with_suffix(suffix) + spec = spec_from_file_location(modname, so) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + # test that the module can be imported + load("extending") + load("extending_cpp") + # actually test the cython c-extension + extending_distributions = load("extending_distributions") + from numpy.random import PCG64 + values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd') + assert values.shape == (10,) + assert values.dtype == np.float64 + +@pytest.mark.skipif(numba is None or cffi is None, + reason="requires numba and cffi") +def test_numba(): + from numpy.random._examples.numba import extending # noqa: F401 + +@pytest.mark.skipif(cffi is None, reason="requires cffi") +def test_cffi(): + from numpy.random._examples.cffi import extending # noqa: F401 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.py new file mode 100644 index 00000000..e744f5ba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.py @@ -0,0 +1,2746 @@ +import sys +import hashlib + +import pytest + +import numpy as np +from numpy.linalg import LinAlgError +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_allclose, + assert_warns, assert_no_warnings, assert_array_equal, + assert_array_almost_equal, suppress_warnings, IS_WASM) + +from numpy.random import Generator, MT19937, SeedSequence, RandomState + +random = Generator(MT19937()) + +JUMP_TEST_DATA = [ + { + "seed": 0, + "steps": 10, + "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, + "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, + }, + { + "seed":384908324, + "steps":312, + "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, + "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, + }, + { + "seed": [839438204, 980239840, 859048019, 821], + "steps": 511, + "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, + "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, + }, +] + + +@pytest.fixture(scope='module', params=[True, False]) +def endpoint(request): + return request.param + + +class TestSeed: + def test_scalar(self): + s = Generator(MT19937(0)) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937(4294967295)) + assert_equal(s.integers(1000), 324) + + def test_array(self): + s = Generator(MT19937(range(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937(np.arange(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937([0])) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937([4294967295])) + assert_equal(s.integers(1000), 324) + + def test_seedsequence(self): + s = MT19937(SeedSequence(0)) + assert_equal(s.random_raw(1), 2058676884) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, MT19937, -0.5) + assert_raises(ValueError, MT19937, -1) + + def test_invalid_array(self): + # seed must be an unsigned integer + assert_raises(TypeError, MT19937, [-0.5]) + assert_raises(ValueError, MT19937, [-1]) + assert_raises(ValueError, MT19937, [1, -2, 4294967296]) + + def test_noninstantized_bitgen(self): + assert_raises(ValueError, Generator, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.integers(-5, -1) < -1) + x = random.integers(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random = Generator(MT19937(1432985819)) + non_contig = random.multinomial(100, pvals=pvals) + random = Generator(MT19937(1432985819)) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + random = Generator(MT19937(1432985819)) + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + +class TestMultivariateHypergeometric: + + def setup_method(self): + self.seed = 8675309 + + def test_argument_validation(self): + # Error cases... + + # `colors` must be a 1-d sequence + assert_raises(ValueError, random.multivariate_hypergeometric, + 10, 4) + + # Negative nsample + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], -1) + + # Negative color + assert_raises(ValueError, random.multivariate_hypergeometric, + [-1, 2, 3], 2) + + # nsample exceeds sum(colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], 10) + + # nsample exceeds sum(colors) (edge case of empty colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [], 1) + + # Validation errors associated with very large values in colors. + assert_raises(ValueError, random.multivariate_hypergeometric, + [999999999, 101], 5, 1, 'marginals') + + int64_info = np.iinfo(np.int64) + max_int64 = int64_info.max + max_int64_index = max_int64 // int64_info.dtype.itemsize + assert_raises(ValueError, random.multivariate_hypergeometric, + [max_int64_index - 100, 101], 5, 1, 'count') + + @pytest.mark.parametrize('method', ['count', 'marginals']) + def test_edge_cases(self, method): + # Set the seed, but in fact, all the results in this test are + # deterministic, so we don't really need this. + random = Generator(MT19937(self.seed)) + + x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([], 0, method=method) + assert_array_equal(x, []) + + x = random.multivariate_hypergeometric([], 0, size=1, method=method) + assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) + + x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) + assert_array_equal(x, [3, 0, 0]) + + colors = [1, 1, 0, 1, 1] + x = random.multivariate_hypergeometric(colors, sum(colors), + method=method) + assert_array_equal(x, colors) + + x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, + method=method) + assert_array_equal(x, [[3, 4, 5]]*3) + + # Cases for nsample: + # nsample < 10 + # 10 <= nsample < colors.sum()/2 + # colors.sum()/2 < nsample < colors.sum() - 10 + # colors.sum() - 10 < nsample < colors.sum() + @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) + @pytest.mark.parametrize('method', ['count', 'marginals']) + @pytest.mark.parametrize('size', [5, (2, 3), 150000]) + def test_typical_cases(self, nsample, method, size): + random = Generator(MT19937(self.seed)) + + colors = np.array([10, 5, 20, 25]) + sample = random.multivariate_hypergeometric(colors, nsample, size, + method=method) + if isinstance(size, int): + expected_shape = (size,) + colors.shape + else: + expected_shape = size + colors.shape + assert_equal(sample.shape, expected_shape) + assert_((sample >= 0).all()) + assert_((sample <= colors).all()) + assert_array_equal(sample.sum(axis=-1), + np.full(size, fill_value=nsample, dtype=int)) + if isinstance(size, int) and size >= 100000: + # This sample is large enough to compare its mean to + # the expected values. + assert_allclose(sample.mean(axis=0), + nsample * colors / colors.sum(), + rtol=1e-3, atol=0.005) + + def test_repeatability1(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, + method='count') + expected = np.array([[2, 1, 2], + [2, 1, 2], + [1, 1, 3], + [2, 0, 3], + [2, 1, 2]]) + assert_array_equal(sample, expected) + + def test_repeatability2(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 50, + size=5, + method='marginals') + expected = np.array([[ 9, 17, 24], + [ 7, 13, 30], + [ 9, 15, 26], + [ 9, 17, 24], + [12, 14, 24]]) + assert_array_equal(sample, expected) + + def test_repeatability3(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 12, + size=5, + method='marginals') + expected = np.array([[2, 3, 7], + [5, 3, 4], + [2, 5, 5], + [5, 3, 4], + [1, 5, 6]]) + assert_array_equal(sample, expected) + + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.rg = Generator(MT19937(self.seed)) + self.bit_generator = self.rg.bit_generator + self.state = self.bit_generator.state + self.legacy_state = (self.state['bit_generator'], + self.state['state']['key'], + self.state['state']['pos']) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.rg.standard_normal(size=3) + self.bit_generator.state = self.state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.rg.standard_normal() + state = self.bit_generator.state + old = self.rg.standard_normal(size=3) + self.bit_generator.state = state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.rg.negative_binomial(0.5, 0.5) + + +class TestIntegers: + rfunc = random.integers + + # valid integer/boolean types + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self, endpoint): + assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float) + + def test_bounds_checking(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, + dtype=dt) + + assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd], [lbnd], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, [0], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd+1], [ubnd], + endpoint=endpoint, dtype=dt) + + def test_bounds_checking_array(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint) + + assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd] * 2, + [ubnd + 1] * 2, endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [1] * 2, 0, + endpoint=endpoint, dtype=dt) + + def test_rng_zero_and_extremes(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + is_open = not endpoint + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], [tgt + is_open], + size=1000, endpoint=endpoint, dtype=dt), + tgt) + + def test_rng_zero_and_extremes_array(self, endpoint): + size = 1000 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + tgt = ubnd - 1 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + def test_full_range(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_full_range_array(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self, endpoint): + # Don't use fixed seed + random = Generator(MT19937()) + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16, + endpoint=endpoint, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, + dtype=bool) + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_scalar_array_equiv(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + size = 1000 + random = Generator(MT19937(1234)) + scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + scalar_array = random.integers([lbnd], [ubnd], size=size, + endpoint=endpoint, dtype=dt) + + random = Generator(MT19937(1234)) + array = random.integers([lbnd] * size, [ubnd] * + size, size=size, endpoint=endpoint, dtype=dt) + assert_array_equal(scalar, scalar_array) + assert_array_equal(scalar, array) + + def test_repeatability(self, endpoint): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', + 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', + 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', + 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', + 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', + 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', + 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', + 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', + 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} + + for dt in self.itype[1:]: + random = Generator(MT19937(1234)) + + # view as little endian for hash + if sys.byteorder == 'little': + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt) + else: + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt).byteswap() + + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random = Generator(MT19937(1234)) + val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint, + dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_repeatability_broadcasting(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min + ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # view as little endian for hash + random = Generator(MT19937(1234)) + val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint, + dtype=dt) + + assert_array_equal(val, val_bc) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000, + endpoint=endpoint, dtype=dt) + + assert_array_equal(val, val_bc) + + @pytest.mark.parametrize( + 'bound, expected', + [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612, + 3769704066, 1170797179, 4108474671])), + (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613, + 3769704067, 1170797180, 4108474672])), + (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673, + 1831631863, 1215661561, 3869512430]))] + ) + def test_repeatability_32bit_boundary(self, bound, expected): + for size in [None, len(expected)]: + random = Generator(MT19937(1234)) + x = random.integers(bound, size=size) + assert_equal(x, expected if size is not None else expected[0]) + + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[1622936284, 3620788691, 1659384060], + [1417365545, 760222891, 1909653332], + [3788118662, 660249498, 4092002593]], + [[3625610153, 2979601262, 3844162757], + [ 685800658, 120261497, 2694012896], + [1207779440, 1586594375, 3854335050]], + [[3004074748, 2310761796, 3012642217], + [2067714190, 2786677879, 1363865881], + [ 791663441, 1867303284, 2169727960]], + [[1939603804, 1250951100, 298950036], + [1040128489, 3791912209, 3317053765], + [3155528714, 61360675, 2305155588]], + [[ 817688762, 1335621943, 3288952434], + [1770890872, 1102951817, 1957607470], + [3099996017, 798043451, 48334215]]]) + for size in [None, (5, 3, 3)]: + random = Generator(MT19937(12345)) + x = random.integers([[-1], [0], [1]], + [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_broadcast_exceptions(self, endpoint): + configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), + np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), + (-2**63-1, -2**63-1))} + for dtype in configs: + for config in configs[dtype]: + low, high = config + high = high - endpoint + low_a = np.array([[low]*10]) + high_a = np.array([high] * 10) + assert_raises(ValueError, random.integers, low, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_a, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high_a, + endpoint=endpoint, dtype=dtype) + + low_o = np.array([[low]*10], dtype=object) + high_o = np.array([high] * 10, dtype=object) + assert_raises(ValueError, random.integers, low_o, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_o, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_o, high_o, + endpoint=endpoint, dtype=dtype) + + def test_int64_uint64_corner_case(self, endpoint): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint) + + # None of these function calls should + # generate a ValueError now. + actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool_ if dt is bool else dt + + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + + for dt in (bool, int): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert not hasattr(sample, 'dtype') + assert_equal(type(sample), dt) + + def test_respect_dtype_array(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool_ if dt is bool else dt + + sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, + dtype=dt) + assert_equal(sample.dtype, dt) + + def test_zero_size(self, endpoint): + # See gh-7203 + for dt in self.itype: + sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt) + assert sample.shape == (3, 0, 4) + assert sample.dtype == dt + assert self.rfunc(0, -10, 0, endpoint=endpoint, + dtype=dt).shape == (0,) + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + + def test_error_byteorder(self): + other_byteord_dt = 'i4' + with pytest.raises(ValueError): + random.integers(0, 200, size=10, dtype=other_byteord_dt) + + # chi2max is the maximum acceptable chi-squared value. + @pytest.mark.slow + @pytest.mark.parametrize('sample_size,high,dtype,chi2max', + [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25 + (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30 + (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25 + (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25 + ]) + def test_integers_small_dtype_chisquared(self, sample_size, high, + dtype, chi2max): + # Regression test for gh-14774. + samples = random.integers(high, size=sample_size, dtype=dtype) + + values, counts = np.unique(samples, return_counts=True) + expected = sample_size / high + chi2 = ((counts - expected)**2 / expected).sum() + assert chi2 < chi2max + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_integers(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2)) + desired = np.array([[-80, -56], [41, 37], [-83, -16]]) + assert_array_equal(actual, desired) + + def test_integers_masked(self): + # Test masked rejection sampling algorithm to generate array of + # uint32 in an interval. + random = Generator(MT19937(self.seed)) + actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32) + desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32) + assert_array_equal(actual, desired) + + def test_integers_closed(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2), endpoint=True) + desired = np.array([[-80, -56], [ 41, 38], [-83, -15]]) + assert_array_equal(actual, desired) + + def test_integers_max_int(self): + # Tests whether integers with closed=True can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + actual = random.integers(np.iinfo('l').max, np.iinfo('l').max, + endpoint=True) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.096999199829214, 0.707517457682192], + [0.084364834598269, 0.767731206553125], + [0.665069021359413, 0.715487190596693]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random = Generator(MT19937(self.seed)) + actual = random.random() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_random_float(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.0969992 , 0.70751746], + [0.08436483, 0.76773121], + [0.66506902, 0.71548719]]) + assert_array_almost_equal(actual, desired, decimal=7) + + def test_random_float_scalar(self): + random = Generator(MT19937(self.seed)) + actual = random.random(dtype=np.float32) + desired = 0.0969992 + assert_array_almost_equal(actual, desired, decimal=7) + + @pytest.mark.parametrize('dtype, uint_view_type', + [(np.float32, np.uint32), + (np.float64, np.uint64)]) + def test_random_distribution_of_lsb(self, dtype, uint_view_type): + random = Generator(MT19937(self.seed)) + sample = random.random(100000, dtype=dtype) + num_ones_in_lsb = np.count_nonzero(sample.view(uint_view_type) & 1) + # The probability of a 1 in the least significant bit is 0.25. + # With a sample size of 100000, the probability that num_ones_in_lsb + # is outside the following range is less than 5e-11. + assert 24100 < num_ones_in_lsb < 25900 + + def test_random_unsupported_type(self): + assert_raises(TypeError, random.random, dtype='int32') + + def test_choice_uniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4) + desired = np.array([0, 0, 2, 2], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([0, 1, 0, 1], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False) + desired = np.array([2, 0, 3], dtype=np.int64) + assert_array_equal(actual, desired) + actual = random.choice(4, 4, replace=False, shuffle=False) + desired = np.arange(4, dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([0, 2, 3], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['a', 'a', 'c', 'c']) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_default_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3) + desired = np.array([[0, 1], [0, 1], [4, 5]]) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1) + desired = np.array([[0], [2], [4], [6]]) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random = Generator(MT19937(self.seed)) + non_contig = random.choice(5, 3, p=p[::2]) + random = Generator(MT19937(self.seed)) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_choice_return_type(self): + # gh 9867 + p = np.ones(4) / 4. + actual = random.choice(4, 2) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, replace=False) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p, replace=False) + assert actual.dtype == np.int64 + + def test_choice_large_sample(self): + choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222' + random = Generator(MT19937(self.seed)) + actual = random.choice(10000, 5000, replace=False) + if sys.byteorder != 'little': + actual = actual.byteswap() + res = hashlib.sha256(actual.view(np.int8)).hexdigest() + assert_(choice_hash == res) + + def test_bytes(self): + random = Generator(MT19937(self.seed)) + actual = random.bytes(10) + desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random = Generator(MT19937(self.seed)) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=1) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=-1) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis_empty(self): + random = Generator(MT19937(self.seed)) + desired = np.array([]).reshape((0, 6)) + for axis in (0, 1): + actual = np.array([]).reshape((0, 6)) + random.shuffle(actual, axis=axis) + assert_array_equal(actual, desired) + + def test_shuffle_axis_nonsquare(self): + y1 = np.arange(20).reshape(2, 10) + y2 = y1.copy() + random = Generator(MT19937(self.seed)) + random.shuffle(y1, axis=1) + random = Generator(MT19937(self.seed)) + random.shuffle(y2.T) + assert_array_equal(y1, y2) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(np.AxisError, random.shuffle, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(np.AxisError, random.shuffle, arr, 3) + assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) + arr = [[1, 2, 3], [4, 5, 6]] + assert_raises(NotImplementedError, random.shuffle, arr, 1) + + arr = np.array(3) + assert_raises(TypeError, random.shuffle, arr) + arr = np.ones((3, 2)) + assert_raises(np.AxisError, random.shuffle, arr, 2) + + def test_shuffle_not_writeable(self): + random = Generator(MT19937(self.seed)) + a = np.zeros(5) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.shuffle(a) + + def test_permutation(self): + random = Generator(MT19937(self.seed)) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7] + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + bad_x_str = "abcd" + assert_raises(np.AxisError, random.permutation, bad_x_str) + + bad_x_float = 1.2 + assert_raises(np.AxisError, random.permutation, bad_x_float) + + random = Generator(MT19937(self.seed)) + integer_val = 10 + desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] + + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_permutation_custom_axis(self): + a = np.arange(16).reshape((4, 4)) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=1) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=-1) + assert_array_equal(actual, desired) + + def test_permutation_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(np.AxisError, random.permutation, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(np.AxisError, random.permutation, arr, 3) + assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize("axis, expected", + [(None, np.array([[3, 7, 0, 9, 10, 11], + [8, 4, 2, 5, 1, 6]])), + (0, np.array([[6, 1, 2, 9, 10, 11], + [0, 7, 8, 3, 4, 5]])), + (1, np.array([[ 5, 3, 4, 0, 2, 1], + [11, 9, 10, 6, 8, 7]]))]) + def test_permuted(self, dtype, axis, expected): + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + random.permuted(x, axis=axis, out=x) + assert_array_equal(x, expected) + + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + y = random.permuted(x, axis=axis) + assert y.dtype == dtype + assert_array_equal(y, expected) + + def test_permuted_with_strides(self): + random = Generator(MT19937(self.seed)) + x0 = np.arange(22).reshape(2, 11) + x1 = x0.copy() + x = x0[:, ::3] + y = random.permuted(x, axis=1, out=x) + expected = np.array([[0, 9, 3, 6], + [14, 20, 11, 17]]) + assert_array_equal(y, expected) + x1[:, ::3] = expected + # Verify that the original x0 was modified in-place as expected. + assert_array_equal(x1, x0) + + def test_permuted_empty(self): + y = random.permuted([]) + assert_array_equal(y, []) + + @pytest.mark.parametrize('outshape', [(2, 3), 5]) + def test_permuted_out_with_wrong_shape(self, outshape): + a = np.array([1, 2, 3]) + out = np.zeros(outshape, dtype=a.dtype) + with pytest.raises(ValueError, match='same shape'): + random.permuted(a, out=out) + + def test_permuted_out_with_wrong_type(self): + out = np.zeros((3, 5), dtype=np.int32) + x = np.ones((3, 5)) + with pytest.raises(TypeError, match='Cannot cast'): + random.permuted(x, axis=1, out=out) + + def test_permuted_not_writeable(self): + x = np.zeros((2, 5)) + x.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.permuted(x, axis=1, out=x) + + def test_beta(self): + random = Generator(MT19937(self.seed)) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.083029353267698e-10, 2.449965303168024e-11], + [2.397085162969853e-02, 3.590779671820755e-08], + [2.830254190078299e-04, 1.744709918330393e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[42, 41], + [42, 48], + [44, 50]]) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456) + desired = 42 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[32.9850547060149, 39.0219480493301], + [56.2006134779419, 57.3474165711485], + [55.4243733880198, 55.4209797925213]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.5439892869558927, 0.45601071304410745], + [0.5588917345860708, 0.4411082654139292 ]], + [[0.5632074165063435, 0.43679258349365657], + [0.54862581112627, 0.45137418887373015]], + [[0.49961831357047226, 0.5003816864295278 ], + [0.52374806183482, 0.47625193816517997]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random = Generator(MT19937(self.seed)) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random = Generator(MT19937(self.seed)) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_dirichlet_small_alpha(self): + eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc. + alpha = eps * np.array([1., 1.0e-3]) + random = Generator(MT19937(self.seed)) + actual = random.dirichlet(alpha, size=(3, 2)) + expected = np.array([ + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]] + ]) + assert_array_almost_equal(actual, expected, decimal=15) + + @pytest.mark.slow + def test_dirichlet_moderately_small_alpha(self): + # Use alpha.max() < 0.1 to trigger stick breaking code path + alpha = np.array([0.02, 0.04, 0.03]) + exact_mean = alpha / alpha.sum() + random = Generator(MT19937(self.seed)) + sample = random.dirichlet(alpha, size=20000000) + sample_mean = sample.mean(axis=0) + assert_allclose(sample_mean, exact_mean, rtol=1e-3) + + # This set of parameters includes inputs with alpha.max() >= 0.1 and + # alpha.max() < 0.1 to exercise both generation methods within the + # dirichlet code. + @pytest.mark.parametrize( + 'alpha', + [[5, 9, 0, 8], + [0.5, 0, 0, 0], + [1, 5, 0, 0, 1.5, 0, 0, 0], + [0.01, 0.03, 0, 0.005], + [1e-5, 0, 0, 0], + [0.002, 0.015, 0, 0, 0.04, 0, 0, 0], + [0.0], + [0, 0, 0]], + ) + def test_dirichlet_multiple_zeros_in_alpha(self, alpha): + alpha = np.array(alpha) + y = random.dirichlet(alpha) + assert_equal(y[alpha == 0], 0.0) + + def test_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[0.098845481066258, 1.560752510746964], + [0.075730916041636, 1.769098974710777], + [1.488602544592235, 2.49684815275751 ]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random = Generator(MT19937(self.seed)) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[0.461720027077085, 1.100441958872451], + [1.100337455217484, 0.91421736740018 ], + [0.500811891303113, 0.826802454552058]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[ 5.03850858902096, 7.9228656732049 ], + [18.73983605132985, 19.57961681699238], + [18.17897755150825, 18.17653912505234]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random = Generator(MT19937(self.seed)) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[1, 11], + [1, 12], + [11, 17]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random = Generator(MT19937(self.seed)) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[ 4.688397515056245, -0.289514845417841], + [ 4.981176042584683, -0.633224272589149], + [-0.055915275687488, -0.333962478257953]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[ 9, 9], + [ 9, 9], + [10, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random = Generator(MT19937(self.seed)) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.156353949272393, 1.195863024830054], + [-3.435458081645966, 1.656882398925444], + [ 0.924824032467446, 1.251116432209336]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-4.338584631510999, 1.890171436749954], + [-4.64547787337966 , 2.514545562919217], + [ 1.495389489198666, 1.967827627577474]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[ 0.0268252166335, 13.9534486483053], + [ 0.1204014788936, 2.2422077497792], + [ 4.2484199496128, 12.0093343977523]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random = Generator(MT19937(self.seed)) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[14, 17], + [3, 18], + [5, 1]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + random = Generator(MT19937(self.seed)) + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + random = Generator(MT19937(self.seed)) + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[1, 5, 1, 6, 4, 3], + [4, 2, 6, 2, 4, 2]], + [[5, 3, 2, 6, 3, 1], + [4, 4, 0, 2, 3, 7]], + [[6, 3, 1, 5, 3, 2], + [5, 5, 3, 1, 2, 4]]]) + assert_array_equal(actual, desired) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal(self, method): + random = Generator(MT19937(self.seed)) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size, method=method) + desired = np.array([[[-1.747478062846581, 11.25613495182354 ], + [-0.9967333370066214, 10.342002097029821 ]], + [[ 0.7850019631242964, 11.181113712443013 ], + [ 0.8901349653255224, 8.873825399642492 ]], + [[ 0.7130260107430003, 9.551628690083056 ], + [ 0.7127098726541128, 11.991709234143173 ]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov, method=method) + desired = np.array([0.233278563284287, 9.424140804347195]) + assert_array_almost_equal(actual, desired, decimal=15) + # Check that non symmetric covariance input raises exception when + # check_valid='raises' if using default svd method. + mean = [0, 0] + cov = [[1, 2], [1, 2]] + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + method='eigh') + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise', method='eigh') + + # check degenerate samples from singular covariance matrix + cov = [[1, 1], [1, 1]] + if method in ('svd', 'eigh'): + samples = random.multivariate_normal(mean, cov, size=(3, 2), + method=method) + assert_array_almost_equal(samples[..., 0], samples[..., 1], + decimal=6) + else: + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov, method=method) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + @pytest.mark.parametrize('mean, cov', [([0], [[1+1j]]), ([0j], [[1]])]) + def test_multivariate_normal_disallow_complex(self, mean, cov): + random = Generator(MT19937(self.seed)) + with pytest.raises(TypeError, match="must not be complex"): + random.multivariate_normal(mean, cov) + + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal_basic_stats(self, method): + random = Generator(MT19937(self.seed)) + n_s = 1000 + mean = np.array([1, 2]) + cov = np.array([[2, 1], [1, 2]]) + s = random.multivariate_normal(mean, cov, size=(n_s,), method=method) + s_center = s - mean + cov_emp = (s_center.T @ s_center) / (n_s - 1) + # these are pretty loose and are only designed to detect major errors + assert np.all(np.abs(s_center.mean(-2)) < 0.1) + assert np.all(np.abs(cov_emp - cov) < 0.2) + + def test_negative_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[543, 727], + [775, 760], + [600, 674]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_negative_binomial_p0_exception(self): + # Verify that p=0 raises an exception. + with assert_raises(ValueError): + x = random.negative_binomial(1, 0) + + def test_negative_binomial_invalid_p_n_combination(self): + # Verify that values of p and n that would result in an overflow + # or infinite loop raise an exception. + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 2**62, 0.1) + assert_raises(ValueError, random.negative_binomial, [2**62], [0.1]) + + def test_noncentral_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[ 1.70561552362133, 15.97378184942111], + [13.71483425173724, 20.17859633310629], + [11.3615477156643 , 3.67891108738029]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04], + [1.14554372041263e+00, 1.38187755933435e-03], + [1.90659181905387e+00, 1.21772577941822e+00]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[0.82947954590419, 1.80139670767078], + [6.58720057417794, 7.00491463609814], + [6.31101879073157, 6.30982307753005]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[0.060310671139 , 0.23866058175939], + [0.86860246709073, 0.2668510459738 ], + [0.23375780078364, 1.88922102885943]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.618412914693162, 2.635726692647081], + [-2.116923463013243, 0.807460983059643], + [ 1.446547137248593, 2.485684213886024]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random = Generator(MT19937(self.seed)) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04], + [7.2640150889064703e-01, 3.4650454783825594e+05], + [4.5852344481994740e+04, 6.5851383009539105e+07]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random = Generator(MT19937(self.seed)) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [0, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('int64').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random = Generator(MT19937(self.seed)) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02], + [2.482442984543471e-10, 1.527108843266079e-01], + [8.188283434244285e-02, 3.950547209346948e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[4.19494429102666, 16.66920198906598], + [3.67184544902662, 17.74695521962917], + [16.27935397855501, 21.08355560691792]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[-1.489437778266206, -3.275389641569784], + [ 0.560102864910406, -0.680780916282552], + [-1.314912905226277, 0.295852965660225]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_exponential(size=(3, 2), method='inv') + desired = np.array([[0.102031839440643, 1.229350298474972], + [0.088137284693098, 1.459859985522667], + [1.093830802293668, 1.256977002164613]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_expoential_type_error(self): + assert_raises(TypeError, random.standard_exponential, dtype=np.int32) + + def test_standard_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62970724056362, 1.22379851271008], + [3.899412530884 , 4.12479964250139], + [3.74994102464584, 3.74929307690815]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gammma_scalar_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(3, dtype=np.float32) + desired = 2.9242148399353027 + assert_array_almost_equal(actual, desired, decimal=6) + + def test_standard_gamma_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62971, 1.2238 ], + [3.89941, 4.1248 ], + [3.74994, 3.74929]]) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gammma_float_out(self): + actual = np.zeros((3, 2), dtype=np.float32) + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, dtype=np.float32) + desired = np.array([[10.14987, 7.87012], + [ 9.46284, 12.56832], + [13.82495, 7.81533]], dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gamma_unknown_type(self): + assert_raises(TypeError, random.standard_gamma, 1., + dtype='int32') + + def test_out_size_mismatch(self): + out = np.zeros(10) + assert_raises(ValueError, random.standard_gamma, 10.0, size=20, + out=out) + assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), + out=out) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[-1.870934851846581, 1.25613495182354 ], + [-1.120190126006621, 0.342002097029821], + [ 0.661545174124296, 1.181113712443012]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_normal_unsupported_type(self): + assert_raises(TypeError, random.standard_normal, dtype=np.int32) + + def test_standard_t(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[-1.484666193042647, 0.30597891831161 ], + [ 1.056684299648085, -0.407312602088507], + [ 0.130704414281157, -2.038053410490321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random = Generator(MT19937(self.seed)) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[ 7.86664070590917, 13.6313848513185 ], + [ 7.68152445215983, 14.36169131136546], + [13.16105603911429, 13.72341621856971]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[2.13306255040998 , 7.816987531021207], + [2.015436610109887, 8.377577533009589], + [7.421792588856135, 7.891185744455209]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_uniform_zero_range(self): + func = random.uniform + result = func(1.5, 1.5) + assert_allclose(result, 1.5) + result = func([0.0, np.pi], [0.0, np.pi]) + assert_allclose(result, [0.0, np.pi]) + result = func([[2145.12], [2145.12]], [2145.12, 2145.12]) + assert_allclose(result, 2145.12 + np.zeros((2, 2))) + + def test_uniform_neg_range(self): + func = random.uniform + assert_raises(ValueError, func, 2, 1) + assert_raises(ValueError, func, [1, 2], [1, 1]) + assert_raises(ValueError, func, [[0, 1],[2, 3]], 2) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[ 1.107972248690106, 2.841536476232361], + [ 1.832602376042457, 1.945511926976032], + [-0.260147475776542, 2.058047492231698]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_nan(self): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + @pytest.mark.parametrize("kappa", [1e4, 1e15]) + def test_vonmises_large_kappa(self, kappa): + random = Generator(MT19937(self.seed)) + rs = RandomState(random.bit_generator) + state = random.bit_generator.state + + random_state_vals = rs.vonmises(0, kappa, size=10) + random.bit_generator.state = state + gen_vals = random.vonmises(0, kappa, size=10) + if kappa < 1e6: + assert_allclose(random_state_vals, gen_vals) + else: + assert np.all(random_state_vals != gen_vals) + + @pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2]) + @pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15]) + def test_vonmises_large_kappa_range(self, mu, kappa): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu, kappa, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_wald(self): + random = Generator(MT19937(self.seed)) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[0.26871721804551, 3.2233942732115 ], + [2.20328374987066, 2.40958405189353], + [2.07093587449261, 0.73073890064369]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + random = Generator(MT19937(self.seed)) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.138613914769468, 1.306463419753191], + [0.111623365934763, 1.446570494646721], + [1.257145775276011, 1.914247725027957]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random = Generator(MT19937(self.seed)) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random = Generator(MT19937(self.seed)) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[ 1, 1], + [ 10, 867], + [354, 2]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095]) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + random = Generator(MT19937(self.seed)) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + + random = Generator(MT19937(self.seed)) + actual = random.normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.normal, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + normal = random.normal + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455]) + + random = Generator(MT19937(self.seed)) + beta = random.beta + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + random = Generator(MT19937(self.seed)) + actual = random.beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + std_gamma = random.standard_gamma + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258]) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763]) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629]) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + random = Generator(MT19937(self.seed)) + desired = np.array([0.04714867120827, 0.1239390327694]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589]) + + random = Generator(MT19937(self.seed)) + actual = random.chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399]) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983]) + + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326]) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013]) + + random = Generator(MT19937(self.seed)) + actual = random.pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807]) + + random = Generator(MT19937(self.seed)) + actual = random.power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202]) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081]) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397]) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276]) + + random = Generator(MT19937(self.seed)) + lognormal = random.lognormal + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean, sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + desired = np.array( + [1.1597068009872629, + 0.6539188836253857, + 1.1981526554349398] + ) + + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864]) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean, scale * 3) + assert_raises(ValueError, random.wald, mean, bad_scale * 3) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326]) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + binom = random.binomial + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 2, 1], dtype=np.int64) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + + lam = [1] + bad_lam_one = [-1] + desired = np.array([0, 0, 3]) + + random = Generator(MT19937(self.seed)) + max_lam = random._poisson_lam_max + bad_lam_two = [max_lam * 2] + poisson = random.poisson + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + desired = np.array([1, 8, 1]) + + random = Generator(MT19937(self.seed)) + zipf = random.zipf + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 1, 3]) + + random = Generator(MT19937(self.seed)) + geometric = random.geometric + actual = geometric(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geometric, bad_p_one * 3) + assert_raises(ValueError, geometric, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [-1] + bad_nsample_two = [4] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) + + random = Generator(MT19937(self.seed)) + hypergeom = random.hypergeometric + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, -1) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + # ValueError for arguments that are too big. + assert_raises(ValueError, hypergeom, 2**30, 10, 20) + assert_raises(ValueError, hypergeom, 999, 2**31, 50) + assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + desired = np.array([1, 1, 1]) + + random = Generator(MT19937(self.seed)) + logseries = random.logseries + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], + [[1, 0, 1, 0, 2, 1], + [7, 2, 2, 1, 4, 4]], + [[0, 2, 0, 1, 2, 0], + [3, 2, 3, 3, 4, 5]]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [[1 / 6.] * 6] * 2) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([[5], [20]], [[1 / 6.] * 6] * 2) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [0, 0, 2, 1, 1, 1]], + [[4, 2, 3, 3, 5, 3], + [7, 2, 2, 1, 4, 4]]], dtype=np.int64) + assert_array_equal(actual, desired) + + @pytest.mark.parametrize("n", [10, + np.array([10, 10]), + np.array([[[10]], [[10]]]) + ] + ) + def test_multinomial_pval_broadcast(self, n): + random = Generator(MT19937(self.seed)) + pvals = np.array([1 / 4] * 4) + actual = random.multinomial(n, pvals) + n_shape = tuple() if isinstance(n, int) else n.shape + expected_shape = n_shape + (4,) + assert actual.shape == expected_shape + pvals = np.vstack([pvals, pvals]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + (4,) + assert actual.shape == expected_shape + + pvals = np.vstack([[pvals], [pvals]]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + assert actual.shape == expected_shape + (4,) + actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape) + assert actual.shape == (3, 2) + expected_shape + (4,) + + with pytest.raises(ValueError): + # Ensure that size is not broadcast + actual = random.multinomial(n, pvals, size=(1,) * 6) + + def test_invalid_pvals_broadcast(self): + random = Generator(MT19937(self.seed)) + pvals = [[1 / 6] * 6, [1 / 4] * 6] + assert_raises(ValueError, random.multinomial, 1, pvals) + assert_raises(ValueError, random.multinomial, 6, 0.5) + + def test_empty_outputs(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6) + assert actual.shape == (10, 0, 6, 6) + actual = random.multinomial(12, np.empty((10, 0, 10))) + assert actual.shape == (10, 0, 10) + actual = random.multinomial(np.empty((3, 0, 7), "i8"), + np.empty((3, 0, 7, 4))) + assert actual.shape == (3, 0, 7, 4) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(Generator(MT19937(s)), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(Generator(MT19937(s)), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_integers(self, endpoint): + itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = random.integers + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low[0], high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low, high[0], endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +@pytest.mark.parametrize("config", JUMP_TEST_DATA) +def test_jumped(config): + # Each config contains the initial seed, a number of raw steps + # the sha256 hashes of the initial and the final states' keys and + # the position of the initial and the final state. + # These were produced using the original C implementation. + seed = config["seed"] + steps = config["steps"] + + mt19937 = MT19937(seed) + # Burn step + mt19937.random_raw(steps) + key = mt19937.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert mt19937.state["state"]["pos"] == config["initial"]["pos"] + assert sha256.hexdigest() == config["initial"]["key_sha256"] + + jumped = mt19937.jumped() + key = jumped.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert jumped.state["state"]["pos"] == config["jumped"]["pos"] + assert sha256.hexdigest() == config["jumped"]["key_sha256"] + + +def test_broadcast_size_error(): + mu = np.ones(3) + sigma = np.ones((4, 3)) + size = (10, 4, 2) + assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=size) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(1, 3)) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(4, 1, 1)) + # 1 arg + shape = np.ones((4, 3)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=size) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=(3,)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=3) + # Check out + out = np.empty(size) + with pytest.raises(ValueError): + random.standard_gamma(shape, out=out) + + # 2 arg + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.multinomial([2, 2], [.3, .7], size=(2, 1)) + + # 3 arg + a = random.chisquare(5, size=3) + b = random.chisquare(5, size=(4, 3)) + c = random.chisquare(5, size=(5, 4, 3)) + assert random.noncentral_f(a, b, c).shape == (5, 4, 3) + with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"): + random.noncentral_f(a, b, c, size=(6, 5, 1, 1)) + + +def test_broadcast_size_scalar(): + mu = np.ones(3) + sigma = np.ones(3) + random.normal(mu, sigma, size=3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=2) + + +def test_ragged_shuffle(): + # GH 18142 + seq = [[], [], 1] + gen = Generator(MT19937(0)) + assert_no_warnings(gen.shuffle, seq) + assert seq == [1, [], []] + + +@pytest.mark.parametrize("high", [-2, [-2]]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_single_arg_integer_exception(high, endpoint): + # GH 14333 + gen = Generator(MT19937(0)) + msg = 'high < 0' if endpoint else 'high <= 0' + with pytest.raises(ValueError, match=msg): + gen.integers(high, endpoint=endpoint) + msg = 'low > high' if endpoint else 'low >= high' + with pytest.raises(ValueError, match=msg): + gen.integers(-1, high, endpoint=endpoint) + with pytest.raises(ValueError, match=msg): + gen.integers([-1], high, endpoint=endpoint) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +def test_c_contig_req_out(dtype): + # GH 18704 + out = np.empty((2, 3), order="F", dtype=dtype) + shape = [1, 2, 3] + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, dtype=dtype) + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("dist", [random.standard_normal, random.random]) +def test_contig_req_out(dist, order, dtype): + # GH 18704 + out = np.empty((2, 3), dtype=dtype, order=order) + variates = dist(out=out, dtype=dtype) + assert variates is out + variates = dist(out=out, dtype=dtype, size=out.shape) + assert variates is out + + +def test_generator_ctor_old_style_pickle(): + rg = np.random.Generator(np.random.PCG64DXSM(0)) + rg.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, args, state_a = rg.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert args[:1] == ("PCG64DXSM",) + b = ctor(*args[:1]) + b.bit_generator.state = state_a + state_b = b.bit_generator.state + assert state_a == state_b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py new file mode 100644 index 00000000..f16af2b2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py @@ -0,0 +1,165 @@ +from numpy.testing import (assert_, assert_array_equal) +import numpy as np +import pytest +from numpy.random import Generator, MT19937 + + +class TestRegression: + + def setup_method(self): + self.mt19937 = Generator(MT19937(121263137472525314065)) + + def test_vonmises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = self.mt19937.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems + assert_(self.mt19937.hypergeometric(*args) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + rvsn = self.mt19937.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + mt19937 = Generator(MT19937(12345)) + shuffled = np.array(t, dtype=object) + mt19937.shuffle(shuffled) + expected = np.array([t[2], t[0], t[3], t[1]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom BitGenerator does not call into global state + res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4]) + for i in range(3): + mt19937 = Generator(MT19937(i)) + m = Generator(MT19937(4321)) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + self.mt19937.multivariate_normal([0], [[0]], size=1) + self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + x = self.mt19937.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') + + def test_beta_very_small_parameters(self): + # gh-24203: beta would hang with very small parameters. + self.mt19937.beta(1e-49, 1e-40) + + def test_beta_ridiculously_small_parameters(self): + # gh-24266: beta would generate nan when the parameters + # were subnormal or a small multiple of the smallest normal. + tiny = np.finfo(1.0).tiny + x = self.mt19937.beta(tiny/32, tiny/40, size=50) + assert not np.any(np.isnan(x)) + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = self.mt19937.choice(a, p=probs) + assert_(c in a) + with pytest.raises(ValueError): + self.mt19937.choice(a, p=probs*0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + self.mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + self.mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + + class N(np.ndarray): + pass + + mt19937 = Generator(MT19937(1)) + orig = np.arange(3).view(N) + perm = mt19937.permutation(orig) + assert_array_equal(perm, np.array([2, 0, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self): + return self.a + + mt19937 = Generator(MT19937(1)) + m = M() + perm = mt19937.permutation(m) + assert_array_equal(perm, np.array([4, 1, 3, 0, 2])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_gamma_0(self): + assert self.mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + + actual = self.mt19937.standard_gamma([0.0], dtype='float') + expected = np.array([0.], dtype=np.float32) + assert_array_equal(actual, expected) + + def test_geometric_tiny_prob(self): + # Regression test for gh-17007. + # When p = 1e-30, the probability that a sample will exceed 2**63-1 + # is 0.9999999999907766, so we expect the result to be all 2**63-1. + assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + np.iinfo(np.int64).max) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_random.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_random.py new file mode 100644 index 00000000..3d081fe1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_random.py @@ -0,0 +1,1750 @@ +import warnings + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_warns, + assert_no_warnings, assert_array_equal, assert_array_almost_equal, + suppress_warnings, IS_WASM + ) +from numpy import random +import sys + + +class TestSeed: + def test_scalar(self): + s = np.random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = np.random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = np.random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = np.random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, -0.5) + assert_raises(ValueError, np.random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, [-0.5]) + assert_raises(ValueError, np.random.RandomState, [-1]) + assert_raises(ValueError, np.random.RandomState, [4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, np.random.RandomState, + np.array([], dtype=np.int64)) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, np.random.multinomial, 1, p, + float(1)) + + def test_multidimensional_pvals(self): + assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]]) + assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]])) + + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.prng = random.RandomState(self.seed) + self.state = self.prng.get_state() + + def test_basic(self): + old = self.prng.tomaxint(16) + self.prng.set_state(self.state) + new = self.prng.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.prng.standard_normal(size=3) + self.prng.set_state(self.state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.prng.standard_normal() + state = self.prng.get_state() + old = self.prng.standard_normal(size=3) + self.prng.set_state(state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.prng.standard_normal(size=16) + self.prng.set_state(old_state) + x2 = self.prng.standard_normal(size=16) + self.prng.set_state(self.state) + x3 = self.prng.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.prng.negative_binomial(0.5, 0.5) + + def test_set_invalid_state(self): + # gh-25402 + with pytest.raises(IndexError): + self.prng.set_state(()) + + +class TestRandint: + + rfunc = np.random.randint + + # valid integer/boolean types + itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd)//2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + np.random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + import hashlib + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + + for dt in self.itype[1:]: + np.random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + np.random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = np.random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_rand(self): + np.random.seed(self.seed) + actual = np.random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + np.random.seed(self.seed) + actual = np.random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randint(self): + np.random.seed(self.seed) + actual = np.random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + np.random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random(self): + np.random.seed(self.seed) + actual = np.random.random((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_choice_uniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False, + p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + np.random.seed(self.seed) + actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = np.random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(np.random.choice(2, replace=True))) + assert_(np.isscalar(np.random.choice(2, replace=False))) + assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) + assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) + assert_(np.random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(np.random.choice(2, s, replace=True))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False))) + assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) + assert_(np.random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(np.random.choice(6, s, replace=True).shape, s) + assert_equal(np.random.choice(6, s, replace=False).shape, s) + assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) + assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) + assert_equal(np.random.choice(0, size=0).shape, (0,)) + assert_equal(np.random.choice([], size=(0,)).shape, (0,)) + assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, np.random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, np.random.choice, a, p=p) + + def test_bytes(self): + np.random.seed(self.seed) + actual = np.random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object), ("b", np.int32)])]: + np.random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + np.random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + np.random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + np.random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + def test_shuffle_untyped_warning(self, random): + # Create a dict works like a sequence but isn't one + values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + with pytest.warns(UserWarning, + match="you are shuffling a 'dict' object") as rec: + random.shuffle(values) + assert "test_random" in rec[0].filename + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + @pytest.mark.parametrize("use_array_like", [True, False]) + def test_shuffle_no_object_unpacking(self, random, use_array_like): + class MyArr(np.ndarray): + pass + + items = [ + None, np.array([3]), np.float64(3), np.array(10), np.float64(7) + ] + arr = np.array(items, dtype=object) + item_ids = {id(i) for i in items} + if use_array_like: + arr = arr.view(MyArr) + + # The array was created fine, and did not modify any objects: + assert all(id(i) in item_ids for i in arr) + + if use_array_like and not isinstance(random, np.random.Generator): + # The old API gives incorrect results, but warns about it. + with pytest.warns(UserWarning, + match="Shuffling a one dimensional array.*"): + random.shuffle(arr) + else: + random.shuffle(arr) + assert all(id(i) in item_ids for i in arr) + + def test_shuffle_memoryview(self): + # gh-18273 + # allow graceful handling of memoryviews + # (treat the same as arrays) + np.random.seed(self.seed) + a = np.arange(5).data + np.random.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) + rng = np.random.RandomState(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) + rng = np.random.default_rng(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [4, 1, 0, 3, 2]) + + def test_shuffle_not_writeable(self): + a = np.zeros(3) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + np.random.shuffle(a) + + def test_beta(self): + np.random.seed(self.seed) + actual = np.random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + np.random.seed(self.seed) + actual = np.random.binomial(100, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + def test_chisquare(self): + np.random.seed(self.seed) + actual = np.random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + np.random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, np.random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_exponential(self): + np.random.seed(self.seed) + actual = np.random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(np.random.exponential(scale=0), 0) + assert_raises(ValueError, np.random.exponential, scale=-0.) + + def test_f(self): + np.random.seed(self.seed) + actual = np.random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + np.random.seed(self.seed) + actual = np.random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(np.random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + np.random.seed(self.seed) + actual = np.random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_gumbel(self): + np.random.seed(self.seed) + actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(np.random.gumbel(scale=0), 0) + assert_raises(ValueError, np.random.gumbel, scale=-0.) + + def test_hypergeometric(self): + np.random.seed(self.seed) + actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = np.random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = np.random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + np.random.seed(self.seed) + actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(np.random.laplace(scale=0), 0) + assert_raises(ValueError, np.random.laplace, scale=-0.) + + def test_logistic(self): + np.random.seed(self.seed) + actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + np.random.seed(self.seed) + actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(np.random.lognormal(sigma=0), 1) + assert_raises(ValueError, np.random.lognormal, sigma=-0.) + + def test_logseries(self): + np.random.seed(self.seed) + actual = np.random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_multinomial(self): + np.random.seed(self.seed) + actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + np.random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = np.random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = np.random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(np.random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + np.random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + def test_negative_binomial(self): + np.random.seed(self.seed) + actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_noncentral_chisquare(self): + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + np.random.seed(self.seed) + actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + np.random.seed(self.seed) + actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(np.random.normal(scale=0), 0) + assert_raises(ValueError, np.random.normal, scale=-0.) + + def test_pareto(self): + np.random.seed(self.seed) + actual = np.random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + np.random.seed(self.seed) + actual = np.random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, np.random.poisson, lamneg) + assert_raises(ValueError, np.random.poisson, [lamneg]*10) + assert_raises(ValueError, np.random.poisson, lambig) + assert_raises(ValueError, np.random.poisson, [lambig]*10) + + def test_power(self): + np.random.seed(self.seed) + actual = np.random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + np.random.seed(self.seed) + actual = np.random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(np.random.rayleigh(scale=0), 0) + assert_raises(ValueError, np.random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + np.random.seed(self.seed) + actual = np.random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + np.random.seed(self.seed) + actual = np.random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + np.random.seed(self.seed) + actual = np.random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(np.random.standard_gamma(shape=0), 0) + assert_raises(ValueError, np.random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + np.random.seed(self.seed) + actual = np.random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + np.random.seed(self.seed) + actual = np.random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + np.random.seed(self.seed) + actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + np.random.seed(self.seed) + actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = np.random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, np.random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + __index__ = __int__ + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + np.random.seed(self.seed) + actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + np.random.seed(self.seed) + r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + np.testing.assert_(np.isfinite(r).all()) + + def test_wald(self): + np.random.seed(self.seed) + actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + np.random.seed(self.seed) + actual = np.random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + np.random.seed(self.seed) + assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, np.random.weibull, a=-0.) + + def test_zipf(self): + np.random.seed(self.seed) + actual = np.random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def setSeed(self): + np.random.seed(self.seed) + + # TODO: Include test for randint once it can broadcast + # Can steal the test written in PR #6938 + + def test_uniform(self): + low = [0] + high = [1] + uniform = np.random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.setSeed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = np.random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.setSeed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.setSeed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = np.random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.setSeed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.setSeed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = np.random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = np.random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = np.random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.setSeed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.setSeed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = np.random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.setSeed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.setSeed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = np.random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.setSeed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.setSeed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = np.random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.setSeed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = np.random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.setSeed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.setSeed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = np.random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.setSeed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = np.random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.setSeed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.setSeed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = np.random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.setSeed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = np.random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = np.random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = np.random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.setSeed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.setSeed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = np.random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.setSeed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.setSeed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = np.random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.setSeed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.setSeed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = np.random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.setSeed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + self.setSeed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = np.random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.setSeed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = np.random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.setSeed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + + self.setSeed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = np.random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.setSeed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.setSeed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.setSeed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = np.random.binomial + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.setSeed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = np.random.negative_binomial + desired = np.array([1, 0, 1]) + + self.setSeed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.setSeed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = np.random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = np.random.poisson + desired = np.array([1, 1, 0]) + + self.setSeed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = np.random.zipf + desired = np.array([2, 2, 1]) + + self.setSeed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = np.random.geometric + desired = np.array([2, 2, 2]) + + self.setSeed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = np.random.hypergeometric + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = np.random.logseries + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(np.random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(np.random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1/6.]*6, size=10000) + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (np.random.exponential, np.random.standard_gamma, + np.random.chisquare, np.random.standard_t, + np.random.pareto, np.random.weibull, + np.random.power, np.random.rayleigh, + np.random.poisson, np.random.zipf, + np.random.geometric, np.random.logseries) + + probfuncs = (np.random.geometric, np.random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (np.random.uniform, np.random.normal, + np.random.beta, np.random.gamma, + np.random.f, np.random.noncentral_chisquare, + np.random.vonmises, np.random.laplace, + np.random.gumbel, np.random.logistic, + np.random.lognormal, np.random.wald, + np.random.binomial, np.random.negative_binomial) + + probfuncs = (np.random.binomial, np.random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_randint(self): + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = np.random.randint + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low[0], high, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low, high[0], dtype=dt) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [np.random.noncentral_f, np.random.triangular, + np.random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.py new file mode 100644 index 00000000..c77bfce8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.py @@ -0,0 +1,2121 @@ +import hashlib +import pickle +import sys +import warnings + +import numpy as np +import pytest +from numpy.testing import ( + assert_, assert_raises, assert_equal, assert_warns, + assert_no_warnings, assert_array_equal, assert_array_almost_equal, + suppress_warnings, IS_WASM + ) + +from numpy.random import MT19937, PCG64 +from numpy import random + +INT_FUNCS = {'binomial': (100.0, 0.6), + 'geometric': (.5,), + 'hypergeometric': (20, 20, 10), + 'logseries': (.5,), + 'multinomial': (20, np.ones(6) / 6.0), + 'negative_binomial': (100, .5), + 'poisson': (10.0,), + 'zipf': (2,), + } + +if np.iinfo(int).max < 2**32: + # Windows and some 32-bit platforms, e.g., ARM + INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', + 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', + 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', + 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', + 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', + 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', + 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', + 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', + } +else: + INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', + 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', + 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', + 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', + 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', + 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', + 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', + 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', + } + + +@pytest.fixture(scope='module', params=INT_FUNCS) +def int_func(request): + return (request.param, INT_FUNCS[request.param], + INT_FUNC_HASHES[request.param]) + + +@pytest.fixture +def restore_singleton_bitgen(): + """Ensures that the singleton bitgen is restored after a test""" + orig_bitgen = np.random.get_bit_generator() + yield + np.random.set_bit_generator(orig_bitgen) + + +def assert_mt19937_state_equal(a, b): + assert_equal(a['bit_generator'], b['bit_generator']) + assert_array_equal(a['state']['key'], b['state']['key']) + assert_array_equal(a['state']['pos'], b['state']['pos']) + assert_equal(a['has_gauss'], b['has_gauss']) + assert_equal(a['gauss'], b['gauss']) + + +class TestSeed: + def test_scalar(self): + s = random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, -0.5) + assert_raises(ValueError, random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, [-0.5]) + assert_raises(ValueError, random.RandomState, [-1]) + assert_raises(ValueError, random.RandomState, [4294967296]) + assert_raises(ValueError, random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, random.RandomState, np.array([], + dtype=np.int64)) + assert_raises(ValueError, random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + def test_cannot_seed(self): + rs = random.RandomState(PCG64(0)) + with assert_raises(TypeError): + rs.seed(1234) + + def test_invalid_initialization(self): + assert_raises(ValueError, random.RandomState, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random.seed(1432985819) + non_contig = random.multinomial(100, pvals=pvals) + random.seed(1432985819) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + def test_multinomial_n_float(self): + # Non-index integer types should gracefully truncate floats + random.multinomial(100.5, [0.2, 0.8]) + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.random_state = random.RandomState(self.seed) + self.state = self.random_state.get_state() + + def test_basic(self): + old = self.random_state.tomaxint(16) + self.random_state.set_state(self.state) + new = self.random_state.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(self.state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.random_state.standard_normal() + state = self.random_state.get_state() + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.random_state.standard_normal(size=16) + self.random_state.set_state(old_state) + x2 = self.random_state.standard_normal(size=16) + self.random_state.set_state(self.state) + x3 = self.random_state.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.random_state.negative_binomial(0.5, 0.5) + + def test_get_state_warning(self): + rs = random.RandomState(PCG64()) + with suppress_warnings() as sup: + w = sup.record(RuntimeWarning) + state = rs.get_state() + assert_(len(w) == 1) + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' + + def test_invalid_legacy_state_setting(self): + state = self.random_state.get_state() + new_state = ('Unknown', ) + state[1:] + assert_raises(ValueError, self.random_state.set_state, new_state) + assert_raises(TypeError, self.random_state.set_state, + np.array(new_state, dtype=object)) + state = self.random_state.get_state(legacy=False) + del state['bit_generator'] + assert_raises(ValueError, self.random_state.set_state, state) + + def test_pickle(self): + self.random_state.seed(0) + self.random_state.random_sample(100) + self.random_state.standard_normal() + pickled = self.random_state.get_state(legacy=False) + assert_equal(pickled['has_gauss'], 1) + rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + unpickled = rs_unpick.get_state(legacy=False) + assert_mt19937_state_equal(pickled, unpickled) + + def test_state_setting(self): + attr_state = self.random_state.__getstate__() + self.random_state.standard_normal() + self.random_state.__setstate__(attr_state) + state = self.random_state.get_state(legacy=False) + assert_mt19937_state_equal(attr_state, state) + + def test_repr(self): + assert repr(self.random_state).startswith('RandomState(MT19937)') + + +class TestRandint: + + rfunc = random.randint + + # valid integer/boolean types + itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd)//2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} + + for dt in self.itype[1:]: + random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[3992670689, 2438360420, 2557845020], + [4107320065, 4142558326, 3216529513], + [1605979228, 2807061240, 665605495]], + [[3211410639, 4128781000, 457175120], + [1712592594, 1282922662, 3081439808], + [3997822960, 2008322436, 1563495165]], + [[1398375547, 4269260146, 115316740], + [3414372578, 3437564012, 2112038651], + [3572980305, 2260248732, 3908238631]], + [[2561372503, 223155946, 3127879445], + [ 441282060, 3514786552, 2148440361], + [1629275283, 3479737011, 3003195987]], + [[ 412181688, 940383289, 3047321305], + [2978368172, 764731833, 2282559898], + [ 105711276, 720447391, 3596512484]]]) + for size in [None, (5, 3, 3)]: + random.seed(12345) + x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min + ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_rand(self): + random.seed(self.seed) + actual = random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rand_singleton(self): + random.seed(self.seed) + actual = random.rand() + desired = 0.61879477158567997 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + random.seed(self.seed) + actual = random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.randn() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_randint(self): + random.seed(self.seed) + actual = random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(198, size=(3, 2)) + assert_(len(w) == 1) + assert_array_equal(actual, desired + 100) + + def test_tomaxint(self): + random.seed(self.seed) + rs = random.RandomState(self.seed) + actual = rs.tomaxint(size=(3, 2)) + if np.iinfo(int).max == 2147483647: + desired = np.array([[1328851649, 731237375], + [1270502067, 320041495], + [1908433478, 499156889]], dtype=np.int64) + else: + desired = np.array([[5707374374421908479, 5456764827585442327], + [8196659375100692377, 8224063923314595285], + [4220315081820346526, 7177518203184491332]], + dtype=np.int64) + + assert_equal(actual, desired) + + rs.seed(self.seed) + actual = rs.tomaxint() + assert_equal(actual, desired[0, 0]) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + typer = np.dtype('l').type + actual = random.random_integers(typer(np.iinfo('l').max), + typer(np.iinfo('l').max)) + assert_(len(w) == 1) + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random_sample(self): + random.seed(self.seed) + actual = random.random_sample((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.random_sample() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_choice_uniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random.seed(self.seed) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = tuple() + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.randint(0, -10, size=0).shape, (0,)) + assert_equal(random.randint(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random.seed(self.seed) + non_contig = random.choice(5, 3, p=p[::2]) + random.seed(self.seed) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_bytes(self): + random.seed(self.seed) + actual = random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_invalid_objects(self): + x = np.array(3) + assert_raises(TypeError, random.shuffle, x) + + def test_permutation(self): + random.seed(self.seed) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] + assert_array_equal(actual, desired) + + random.seed(self.seed) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + random.seed(self.seed) + bad_x_str = "abcd" + assert_raises(IndexError, random.permutation, bad_x_str) + + random.seed(self.seed) + bad_x_float = 1.2 + assert_raises(IndexError, random.permutation, bad_x_float) + + integer_val = 10 + desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] + + random.seed(self.seed) + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_beta(self): + random.seed(self.seed) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random.seed(self.seed) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + actual = random.binomial(100.123, .456) + desired = 37 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random.seed(self.seed) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random.seed(self.seed) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random.seed(self.seed) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_exponential(self): + random.seed(self.seed) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random.seed(self.seed) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random.seed(self.seed) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random.seed(self.seed) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random.seed(self.seed) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random.seed(self.seed) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random.seed(self.seed) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random.seed(self.seed) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random.seed(self.seed) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random.seed(self.seed) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + random.seed(self.seed) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + def test_negative_binomial(self): + random.seed(self.seed) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_noncentral_chisquare(self): + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random.seed(self.seed) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random.seed(self.seed) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random.seed(self.seed) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random.seed(self.seed) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random.seed(self.seed) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random.seed(self.seed) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random.seed(self.seed) + actual = random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + random.seed(self.seed) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random.seed(self.seed) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn_singleton(self): + random.seed(self.seed) + actual = random.randn() + desired = np.array(1.34016345771863121) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + random.seed(self.seed) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random.seed(self.seed) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random.seed(self.seed) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random.seed(self.seed) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_large(self): + # guard against changes in RandomState when Generator is fixed + random.seed(self.seed) + actual = random.vonmises(mu=0., kappa=1e7, size=3) + desired = np.array([4.634253748521111e-04, + 3.558873596114509e-04, + -2.337119622577433e-04]) + assert_array_almost_equal(actual, desired, decimal=8) + + def test_vonmises_nan(self): + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + def test_wald(self): + random.seed(self.seed) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + random.seed(self.seed) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random.seed(self.seed) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random.seed(self.seed) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def set_seed(self): + random.seed(self.seed) + + def test_uniform(self): + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.set_seed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.set_seed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.set_seed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.set_seed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.set_seed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.set_seed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.set_seed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.set_seed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.set_seed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.set_seed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.set_seed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.set_seed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.set_seed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.set_seed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.set_seed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.set_seed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.set_seed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.set_seed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.set_seed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.set_seed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.set_seed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.set_seed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.set_seed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.set_seed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.set_seed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) + + self.set_seed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.set_seed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.set_seed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + self.set_seed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.set_seed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.set_seed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.set_seed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = random.binomial + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.set_seed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = random.negative_binomial + desired = np.array([1, 0, 1]) + + self.set_seed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.set_seed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = random.poisson + desired = np.array([1, 1, 0]) + + self.set_seed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = random.zipf + desired = np.array([2, 2, 1]) + + self.set_seed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = random.geometric + desired = np.array([2, 2, 2]) + + self.set_seed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = random.hypergeometric + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, 0) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = random.logseries + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +# Ensure returned array dtype is correct for platform +def test_integer_dtype(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + actual = f(*args, size=2) + assert_(actual.dtype == np.dtype('l')) + + +def test_integer_repeat(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + val = f(*args, size=1000000) + if sys.byteorder != 'little': + val = val.byteswap() + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(res == sha256) + + +def test_broadcast_size_error(): + # GH-16833 + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + + +def test_randomstate_ctor_old_style_pickle(): + rs = np.random.RandomState(MT19937(0)) + rs.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, args, state_a = rs.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert args[:1] == ("MT19937",) + b = ctor(*args[:1]) + b.set_state(state_a) + state_b = b.get_state(legacy=False) + + assert_equal(state_a['bit_generator'], state_b['bit_generator']) + assert_array_equal(state_a['state']['key'], state_b['state']['key']) + assert_array_equal(state_a['state']['pos'], state_b['state']['pos']) + assert_equal(state_a['has_gauss'], state_b['has_gauss']) + assert_equal(state_a['gauss'], state_b['gauss']) + + +def test_hot_swap(restore_singleton_bitgen): + # GH 21808 + def_bg = np.random.default_rng(0) + bg = def_bg.bit_generator + np.random.set_bit_generator(bg) + assert isinstance(np.random.mtrand._rand._bit_generator, type(bg)) + + second_bg = np.random.get_bit_generator() + assert bg is second_bg + + +def test_seed_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + bg = PCG64(0) + np.random.set_bit_generator(bg) + state = np.random.get_state(legacy=False) + np.random.seed(1) + new_state = np.random.get_state(legacy=False) + print(state) + print(new_state) + assert state["bit_generator"] == "PCG64" + assert state["state"]["state"] != new_state["state"]["state"] + assert state["state"]["inc"] != new_state["state"]["inc"] + + +def test_state_error_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + state = np.random.get_state() + bg = PCG64(0) + np.random.set_bit_generator(bg) + with pytest.raises(ValueError, match="state must be for a PCG64"): + np.random.set_state(state) + + +def test_swap_worked(restore_singleton_bitgen): + # GH 21808 + np.random.seed(98765) + vals = np.random.randint(0, 2 ** 30, 10) + bg = PCG64(0) + state = bg.state + np.random.set_bit_generator(bg) + state_direct = np.random.get_state(legacy=False) + for field in state: + assert state[field] == state_direct[field] + np.random.seed(98765) + pcg_vals = np.random.randint(0, 2 ** 30, 10) + assert not np.all(vals == pcg_vals) + new_state = bg.state + assert new_state["state"]["state"] != state["state"]["state"] + assert new_state["state"]["inc"] == new_state["state"]["inc"] + + +def test_swapped_singleton_against_direct(restore_singleton_bitgen): + np.random.set_bit_generator(PCG64(98765)) + singleton_vals = np.random.randint(0, 2 ** 30, 10) + rg = np.random.RandomState(PCG64(98765)) + non_singleton_vals = rg.randint(0, 2 ** 30, 10) + assert_equal(non_singleton_vals, singleton_vals) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.py new file mode 100644 index 00000000..7ad19ab5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.py @@ -0,0 +1,216 @@ +import sys + +import pytest + +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, + ) +import numpy as np + +from numpy import random + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + random.seed(0) + rvsn = random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + random.multivariate_normal([0], [[0]], size=1) + random.multivariate_normal([0], [[0]], size=np.int_(1)) + random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + random.seed(1234567890) + x = random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, random.choice, a, p=probs*0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + random.seed(1) + orig = np.arange(3).view(N) + perm = random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self): + return self.a + + random.seed(1) + m = M() + perm = random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_warns_byteorder(self): + # GH 13159 + other_byteord_dt = 'i4' + with pytest.deprecated_call(match='non-native byteorder is not'): + random.randint(0, 200, size=10, dtype=other_byteord_dt) + + def test_named_argument_initialization(self): + # GH 13669 + rs1 = np.random.RandomState(123456789) + rs2 = np.random.RandomState(seed=123456789) + assert rs1.randint(0, 100) == rs2.randint(0, 100) + + def test_choice_retun_dtype(self): + # GH 9867 + c = np.random.choice(10, p=[.1]*10, size=2) + assert c.dtype == np.dtype(int) + c = np.random.choice(10, p=[.1]*10, replace=False, size=2) + assert c.dtype == np.dtype(int) + c = np.random.choice(10, size=2) + assert c.dtype == np.dtype(int) + c = np.random.choice(10, replace=False, size=2) + assert c.dtype == np.dtype(int) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_randint_117(self): + # GH 14189 + random.seed(0) + expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, + 2588848963, 3684848379, 2340255427, 3638918503, + 1819583497, 2678185683], dtype='int64') + actual = random.randint(2**32, size=10) + assert_array_equal(actual, expected) + + def test_p_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(12345) + assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + [0, 0, 0, 1, 1]) + + def test_n_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(8675309) + expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) + assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + expected) + + +def test_multinomial_empty(): + # gh-20483 + # Ensure that empty p-vals are correctly handled + assert random.multinomial(10, []).shape == (0,) + assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0) + + +def test_multinomial_1d_pval(): + # gh-20483 + with pytest.raises(TypeError, match="pvals must be a 1-d"): + random.multinomial(10, 0.3) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_regression.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_regression.py new file mode 100644 index 00000000..8bf41987 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_regression.py @@ -0,0 +1,149 @@ +import sys +from numpy.testing import ( + assert_, assert_array_equal, assert_raises, + ) +from numpy import random +import numpy as np + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.mtrand.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(np.random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + np.random.seed(0) + rvsn = np.random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + np.random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = np.random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + np.random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + np.random.multivariate_normal([0], [[0]], size=1) + np.random.multivariate_normal([0], [[0]], size=np.int_(1)) + np.random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + np.random.seed(1234567890) + x = np.random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + np.random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = np.random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, np.random.choice, a, p=probs*0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + np.random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + np.random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + np.random.seed(1) + orig = np.arange(3).view(N) + perm = np.random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self): + return self.a + + np.random.seed(1) + m = M() + perm = np.random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py new file mode 100644 index 00000000..f08cf80f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py @@ -0,0 +1,80 @@ +import numpy as np +from numpy.testing import assert_array_equal, assert_array_compare + +from numpy.random import SeedSequence + + +def test_reference_data(): + """ Check that SeedSequence generates data the same as the C++ reference. + + https://gist.github.com/imneme/540829265469e673d045 + """ + inputs = [ + [3735928559, 195939070, 229505742, 305419896], + [3668361503, 4165561550, 1661411377, 3634257570], + [164546577, 4166754639, 1765190214, 1303880213], + [446610472, 3941463886, 522937693, 1882353782], + [1864922766, 1719732118, 3882010307, 1776744564], + [4141682960, 3310988675, 553637289, 902896340], + [1134851934, 2352871630, 3699409824, 2648159817], + [1240956131, 3107113773, 1283198141, 1924506131], + [2669565031, 579818610, 3042504477, 2774880435], + [2766103236, 2883057919, 4029656435, 862374500], + ] + outputs = [ + [3914649087, 576849849, 3593928901, 2229911004], + [2240804226, 3691353228, 1365957195, 2654016646], + [3562296087, 3191708229, 1147942216, 3726991905], + [1403443605, 3591372999, 1291086759, 441919183], + [1086200464, 2191331643, 560336446, 3658716651], + [3249937430, 2346751812, 847844327, 2996632307], + [2584285912, 4034195531, 3523502488, 169742686], + [959045797, 3875435559, 1886309314, 359682705], + [3978441347, 432478529, 3223635119, 138903045], + [296367413, 4262059219, 13109864, 3283683422], + ] + outputs64 = [ + [2477551240072187391, 9577394838764454085], + [15854241394484835714, 11398914698975566411], + [13708282465491374871, 16007308345579681096], + [15424829579845884309, 1898028439751125927], + [9411697742461147792, 15714068361935982142], + [10079222287618677782, 12870437757549876199], + [17326737873898640088, 729039288628699544], + [16644868984619524261, 1544825456798124994], + [1857481142255628931, 596584038813451439], + [18305404959516669237, 14103312907920476776], + ] + for seed, expected, expected64 in zip(inputs, outputs, outputs64): + expected = np.array(expected, dtype=np.uint32) + ss = SeedSequence(seed) + state = ss.generate_state(len(expected)) + assert_array_equal(state, expected) + state64 = ss.generate_state(len(expected64), dtype=np.uint64) + assert_array_equal(state64, expected64) + + +def test_zero_padding(): + """ Ensure that the implicit zero-padding does not cause problems. + """ + # Ensure that large integers are inserted in little-endian fashion to avoid + # trailing 0s. + ss0 = SeedSequence(42) + ss1 = SeedSequence(42 << 32) + assert_array_compare( + np.not_equal, + ss0.generate_state(4), + ss1.generate_state(4)) + + # Ensure backwards compatibility with the original 0.17 release for small + # integers and no spawn key. + expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988], + dtype=np.uint32) + assert_array_equal(SeedSequence(42).generate_state(4), expected42) + + # Regression test for gh-16539 to ensure that the implicit 0s don't + # conflict with spawn keys. + assert_array_compare( + np.not_equal, + SeedSequence(42, spawn_key=(0,)).generate_state(4), + expected42) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_smoke.py b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_smoke.py new file mode 100644 index 00000000..9becc434 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/random/tests/test_smoke.py @@ -0,0 +1,818 @@ +import pickle +from functools import partial + +import numpy as np +import pytest +from numpy.testing import assert_equal, assert_, assert_array_equal +from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64) + +@pytest.fixture(scope='module', + params=(np.bool_, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64)) +def dtype(request): + return request.param + + +def params_0(f): + val = f() + assert_(np.isscalar(val)) + val = f(10) + assert_(val.shape == (10,)) + val = f((10, 10)) + assert_(val.shape == (10, 10)) + val = f((10, 10, 10)) + assert_(val.shape == (10, 10, 10)) + val = f(size=(5, 5)) + assert_(val.shape == (5, 5)) + + +def params_1(f, bounded=False): + a = 5.0 + b = np.arange(2.0, 12.0) + c = np.arange(2.0, 102.0).reshape((10, 10)) + d = np.arange(2.0, 1002.0).reshape((10, 10, 10)) + e = np.array([2.0, 3.0]) + g = np.arange(2.0, 12.0).reshape((1, 10, 1)) + if bounded: + a = 0.5 + b = b / (1.5 * b.max()) + c = c / (1.5 * c.max()) + d = d / (1.5 * d.max()) + e = e / (1.5 * e.max()) + g = g / (1.5 * g.max()) + + # Scalar + f(a) + # Scalar - size + f(a, size=(10, 10)) + # 1d + f(b) + # 2d + f(c) + # 3d + f(d) + # 1d size + f(b, size=10) + # 2d - size - broadcast + f(e, size=(10, 2)) + # 3d - size + f(g, size=(10, 10, 10)) + + +def comp_state(state1, state2): + identical = True + if isinstance(state1, dict): + for key in state1: + identical &= comp_state(state1[key], state2[key]) + elif type(state1) != type(state2): + identical &= type(state1) == type(state2) + else: + if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) + else: + identical &= state1 == state2 + return identical + + +def warmup(rg, n=None): + if n is None: + n = 11 + np.random.randint(0, 20) + rg.standard_normal(n) + rg.standard_normal(n) + rg.standard_normal(n, dtype=np.float32) + rg.standard_normal(n, dtype=np.float32) + rg.integers(0, 2 ** 24, n, dtype=np.uint64) + rg.integers(0, 2 ** 48, n, dtype=np.uint64) + rg.standard_gamma(11.0, n) + rg.standard_gamma(11.0, n, dtype=np.float32) + rg.random(n, dtype=np.float64) + rg.random(n, dtype=np.float32) + + +class RNG: + @classmethod + def setup_class(cls): + # Overridden in test classes. Place holder to silence IDE noise + cls.bit_generator = PCG64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + @classmethod + def _extra_setup(cls): + cls.vec_1d = np.arange(2.0, 102.0) + cls.vec_2d = np.arange(2.0, 102.0)[None, :] + cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + cls.seed_error = TypeError + + def _reset_state(self): + self.rg.bit_generator.state = self.initial_state + + def test_init(self): + rg = Generator(self.bit_generator()) + state = rg.bit_generator.state + rg.standard_normal(1) + rg.standard_normal(1) + rg.bit_generator.state = state + new_state = rg.bit_generator.state + assert_(comp_state(state, new_state)) + + def test_advance(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'advance'): + self.rg.bit_generator.advance(self.advance) + assert_(not comp_state(state, self.rg.bit_generator.state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + pytest.skip(f'Advance is not supported by {bitgen_name}') + + def test_jump(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'jumped'): + bit_gen2 = self.rg.bit_generator.jumped() + jumped_state = bit_gen2.state + assert_(not comp_state(state, jumped_state)) + self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + self.rg.bit_generator.state = state + bit_gen3 = self.rg.bit_generator.jumped() + rejumped_state = bit_gen3.state + assert_(comp_state(jumped_state, rejumped_state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + if bitgen_name not in ('SFC64',): + raise AttributeError(f'no "jumped" in {bitgen_name}') + pytest.skip(f'Jump is not supported by {bitgen_name}') + + def test_uniform(self): + r = self.rg.uniform(-1.0, 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_uniform_array(self): + r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(np.array([-1.0] * 10), + np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_random(self): + assert_(len(self.rg.random(10)) == 10) + params_0(self.rg.random) + + def test_standard_normal_zig(self): + assert_(len(self.rg.standard_normal(10)) == 10) + + def test_standard_normal(self): + assert_(len(self.rg.standard_normal(10)) == 10) + params_0(self.rg.standard_normal) + + def test_standard_gamma(self): + assert_(len(self.rg.standard_gamma(10, 10)) == 10) + assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(self.rg.standard_gamma) + + def test_standard_exponential(self): + assert_(len(self.rg.standard_exponential(10)) == 10) + params_0(self.rg.standard_exponential) + + def test_standard_exponential_float(self): + randoms = self.rg.standard_exponential(10, dtype='float32') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32')) + + def test_standard_exponential_float_log(self): + randoms = self.rg.standard_exponential(10, dtype='float32', + method='inv') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32', + method='inv')) + + def test_standard_cauchy(self): + assert_(len(self.rg.standard_cauchy(10)) == 10) + params_0(self.rg.standard_cauchy) + + def test_standard_t(self): + assert_(len(self.rg.standard_t(10, 10)) == 10) + params_1(self.rg.standard_t) + + def test_binomial(self): + assert_(self.rg.binomial(10, .5) >= 0) + assert_(self.rg.binomial(1000, .5) >= 0) + + def test_reset_state(self): + state = self.rg.bit_generator.state + int_1 = self.rg.integers(2**31) + self.rg.bit_generator.state = state + int_2 = self.rg.integers(2**31) + assert_(int_1 == int_2) + + def test_entropy_init(self): + rg = Generator(self.bit_generator()) + rg2 = Generator(self.bit_generator()) + assert_(not comp_state(rg.bit_generator.state, + rg2.bit_generator.state)) + + def test_seed(self): + rg = Generator(self.bit_generator(*self.seed)) + rg2 = Generator(self.bit_generator(*self.seed)) + rg.random() + rg2.random() + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_reset_state_gauss(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.standard_normal() + state = rg.bit_generator.state + n1 = rg.standard_normal(size=10) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.standard_normal(size=10) + assert_array_equal(n1, n2) + + def test_reset_state_uint32(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.integers(0, 2 ** 24, 120, dtype=np.uint32) + state = rg.bit_generator.state + n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) + assert_array_equal(n1, n2) + + def test_reset_state_float(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.random(dtype='float32') + state = rg.bit_generator.state + n1 = rg.random(size=10, dtype='float32') + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.random(size=10, dtype='float32') + assert_((n1 == n2).all()) + + def test_shuffle(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_permutation(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_beta(self): + vals = self.rg.beta(2.0, 2.0, 10) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), 2.0) + assert_(len(vals) == 10) + vals = self.rg.beta(2.0, np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + assert_(vals.shape == (10, 10)) + + def test_bytes(self): + vals = self.rg.bytes(10) + assert_(len(vals) == 10) + + def test_chisquare(self): + vals = self.rg.chisquare(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.chisquare) + + def test_exponential(self): + vals = self.rg.exponential(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential) + + def test_f(self): + vals = self.rg.f(3, 1000, 10) + assert_(len(vals) == 10) + + def test_gamma(self): + vals = self.rg.gamma(3, 2, 10) + assert_(len(vals) == 10) + + def test_geometric(self): + vals = self.rg.geometric(0.5, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential, bounded=True) + + def test_gumbel(self): + vals = self.rg.gumbel(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_laplace(self): + vals = self.rg.laplace(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logitic(self): + vals = self.rg.logistic(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logseries(self): + vals = self.rg.logseries(0.5, 10) + assert_(len(vals) == 10) + + def test_negative_binomial(self): + vals = self.rg.negative_binomial(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_noncentral_chisquare(self): + vals = self.rg.noncentral_chisquare(10, 2, 10) + assert_(len(vals) == 10) + + def test_noncentral_f(self): + vals = self.rg.noncentral_f(3, 1000, 2, 10) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + assert_(len(vals) == 10) + + def test_normal(self): + vals = self.rg.normal(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_pareto(self): + vals = self.rg.pareto(3.0, 10) + assert_(len(vals) == 10) + + def test_poisson(self): + vals = self.rg.poisson(10, 10) + assert_(len(vals) == 10) + vals = self.rg.poisson(np.array([10] * 10)) + assert_(len(vals) == 10) + params_1(self.rg.poisson) + + def test_power(self): + vals = self.rg.power(0.2, 10) + assert_(len(vals) == 10) + + def test_integers(self): + vals = self.rg.integers(10, 20, 10) + assert_(len(vals) == 10) + + def test_rayleigh(self): + vals = self.rg.rayleigh(0.2, 10) + assert_(len(vals) == 10) + params_1(self.rg.rayleigh, bounded=True) + + def test_vonmises(self): + vals = self.rg.vonmises(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_wald(self): + vals = self.rg.wald(1.0, 1.0, 10) + assert_(len(vals) == 10) + + def test_weibull(self): + vals = self.rg.weibull(1.0, 10) + assert_(len(vals) == 10) + + def test_zipf(self): + vals = self.rg.zipf(10, 10) + assert_(len(vals) == 10) + vals = self.rg.zipf(self.vec_1d) + assert_(len(vals) == 100) + vals = self.rg.zipf(self.vec_2d) + assert_(vals.shape == (1, 100)) + vals = self.rg.zipf(self.mat) + assert_(vals.shape == (100, 100)) + + def test_hypergeometric(self): + vals = self.rg.hypergeometric(25, 25, 20) + assert_(np.isscalar(vals)) + vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + assert_(vals.shape == (10,)) + + def test_triangular(self): + vals = self.rg.triangular(-5, 0, 5) + assert_(np.isscalar(vals)) + vals = self.rg.triangular(-5, np.array([0] * 10), 5) + assert_(vals.shape == (10,)) + + def test_multivariate_normal(self): + mean = [0, 0] + cov = [[1, 0], [0, 100]] # diagonal covariance + x = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_zig = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_inv = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + assert_((x_zig != x_inv).any()) + + def test_multinomial(self): + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + assert_(vals.shape == (2,)) + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + assert_(vals.shape == (10, 2)) + + def test_dirichlet(self): + s = self.rg.dirichlet((10, 5, 3), 20) + assert_(s.shape == (20, 3)) + + def test_pickle(self): + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_((type(self.rg) == type(unpick))) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_((type(self.rg) == type(unpick))) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + def test_seed_array(self): + if self.seed_vector_bits is None: + bitgen_name = self.bit_generator.__name__ + pytest.skip(f'Vector seeding is not supported by {bitgen_name}') + + if self.seed_vector_bits == 32: + dtype = np.uint32 + else: + dtype = np.uint64 + seed = np.array([1], dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(1) + state2 = bg.state + assert_(comp_state(state1, state2)) + + seed = np.arange(4, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = np.arange(1500, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = 2 ** np.mod(np.arange(1500, dtype=dtype), + self.seed_vector_bits - 1) + 1 + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + def test_uniform_float(self): + rg = Generator(self.bit_generator(12345)) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.random(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.random(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_gamma_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_zig_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_output_fill(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=existing) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size) + assert_equal(direct, existing) + + sized = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=sized, size=sized.shape) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_normal(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_uniform(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.random(out=existing) + rg.bit_generator.state = state + direct = rg.random(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.random(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.random(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_exponential(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_exponential(out=existing) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_exponential(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma_broadcast(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + mu = np.arange(97.0) + 1.0 + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_fill_error(self): + rg = self.rg + size = (31, 7, 97) + existing = np.empty(size) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_normal(out=existing[::3]) + existing = np.empty(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float64) + + existing = np.zeros(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float64) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32) + existing = np.zeros(size, dtype=np.float64) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3]) + + def test_integers_broadcast(self, dtype): + if dtype == np.bool_: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + self._reset_state() + a = self.rg.integers(lower, [upper] * 10, dtype=dtype) + self._reset_state() + b = self.rg.integers([lower] * 10, upper, dtype=dtype) + assert_equal(a, b) + self._reset_state() + c = self.rg.integers(lower, upper, size=10, dtype=dtype) + assert_equal(a, c) + self._reset_state() + d = self.rg.integers(np.array( + [lower] * 10), np.array([upper], dtype=object), size=10, + dtype=dtype) + assert_equal(a, d) + self._reset_state() + e = self.rg.integers( + np.array([lower] * 10), np.array([upper] * 10), size=10, + dtype=dtype) + assert_equal(a, e) + + self._reset_state() + a = self.rg.integers(0, upper, size=10, dtype=dtype) + self._reset_state() + b = self.rg.integers([upper] * 10, dtype=dtype) + assert_equal(a, b) + + def test_integers_numpy(self, dtype): + high = np.array([1]) + low = np.array([0]) + + out = self.rg.integers(low, high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low[0], high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low, high[0], dtype=dtype) + assert out.shape == (1,) + + def test_integers_broadcast_errors(self, dtype): + if dtype == np.bool_: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + with pytest.raises(ValueError): + self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([0], [0], dtype=dtype) + + +class TestMT19937(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.advance = None + cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 32 + cls._extra_setup() + cls.seed_error = ValueError + + def test_numpy_state(self): + nprg = np.random.RandomState() + nprg.standard_normal(99) + state = nprg.get_state() + self.rg.bit_generator.state = state + state2 = self.rg.bit_generator.state + assert_((state[1] == state2['state']['key']).all()) + assert_((state[2] == state2['state']['pos'])) + + +class TestPhilox(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestSFC64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 192 + cls._extra_setup() + + +class TestPCG64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestPCG64DXSM(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestDefaultRNG(RNG): + @classmethod + def setup_class(cls): + # This will duplicate some tests that directly instantiate a fresh + # Generator(), but that's okay. + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = np.random.default_rng(*cls.seed) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + def test_default_is_pcg64(self): + # In order to change the default BitGenerator, we'll go through + # a deprecation cycle to move to a different function. + assert_(isinstance(self.rg.bit_generator, PCG64)) + + def test_seed(self): + np.random.default_rng() + np.random.default_rng(None) + np.random.default_rng(12345) + np.random.default_rng(0) + np.random.default_rng(43660444402423911716352051725018508569) + np.random.default_rng([43660444402423911716352051725018508569, + 279705150948142787361475340226491943209]) + with pytest.raises(ValueError): + np.random.default_rng(-1) + with pytest.raises(ValueError): + np.random.default_rng([12345, -1]) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/__init__.py new file mode 100644 index 00000000..8a34221e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/__init__.py @@ -0,0 +1,22 @@ +"""Common test support for all numpy test scripts. + +This single module should provide all the common functionality for numpy tests +in a single location, so that test scripts can just import it and work right +away. + +""" +from unittest import TestCase + +from . import _private +from ._private.utils import * +from ._private.utils import (_assert_valid_refcount, _gen_alignment_data) +from ._private import extbuild +from . import overrides + +__all__ = ( + _private.utils.__all__ + ['TestCase', 'overrides'] +) + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/__init__.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/__init__.pyi new file mode 100644 index 00000000..d65860cc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/__init__.pyi @@ -0,0 +1,50 @@ +from numpy._pytesttester import PytestTester + +from unittest import ( + TestCase as TestCase, +) + +from numpy.testing._private.utils import ( + assert_equal as assert_equal, + assert_almost_equal as assert_almost_equal, + assert_approx_equal as assert_approx_equal, + assert_array_equal as assert_array_equal, + assert_array_less as assert_array_less, + assert_string_equal as assert_string_equal, + assert_array_almost_equal as assert_array_almost_equal, + assert_raises as assert_raises, + build_err_msg as build_err_msg, + decorate_methods as decorate_methods, + jiffies as jiffies, + memusage as memusage, + print_assert_equal as print_assert_equal, + rundocs as rundocs, + runstring as runstring, + verbose as verbose, + measure as measure, + assert_ as assert_, + assert_array_almost_equal_nulp as assert_array_almost_equal_nulp, + assert_raises_regex as assert_raises_regex, + assert_array_max_ulp as assert_array_max_ulp, + assert_warns as assert_warns, + assert_no_warnings as assert_no_warnings, + assert_allclose as assert_allclose, + IgnoreException as IgnoreException, + clear_and_catch_warnings as clear_and_catch_warnings, + SkipTest as SkipTest, + KnownFailureException as KnownFailureException, + temppath as temppath, + tempdir as tempdir, + IS_PYPY as IS_PYPY, + IS_PYSTON as IS_PYSTON, + HAS_REFCOUNT as HAS_REFCOUNT, + suppress_warnings as suppress_warnings, + assert_array_compare as assert_array_compare, + assert_no_gc_cycles as assert_no_gc_cycles, + break_cycles as break_cycles, + HAS_LAPACK64 as HAS_LAPACK64, +) + +__all__: list[str] +__path__: list[str] +test: PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/extbuild.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/extbuild.py new file mode 100644 index 00000000..541f5511 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/extbuild.py @@ -0,0 +1,248 @@ +""" +Build a c-extension module on-the-fly in tests. +See build_and_import_extensions for usage hints + +""" + +import os +import pathlib +import subprocess +import sys +import sysconfig +import textwrap + +__all__ = ['build_and_import_extension', 'compile_extension_module'] + + +def build_and_import_extension( + modname, functions, *, prologue="", build_dir=None, + include_dirs=[], more_init=""): + """ + Build and imports a c-extension module `modname` from a list of function + fragments `functions`. + + + Parameters + ---------- + functions : list of fragments + Each fragment is a sequence of func_name, calling convention, snippet. + prologue : string + Code to precede the rest, usually extra ``#include`` or ``#define`` + macros. + build_dir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + more_init : string + Code to appear in the module PyMODINIT_FUNC + + Returns + ------- + out: module + The module will have been loaded and is ready for use + + Examples + -------- + >>> functions = [("test_bytes", "METH_O", \"\"\" + if ( !PyBytesCheck(args)) { + Py_RETURN_FALSE; + } + Py_RETURN_TRUE; + \"\"\")] + >>> mod = build_and_import_extension("testme", functions) + >>> assert not mod.test_bytes(u'abc') + >>> assert mod.test_bytes(b'abc') + """ + body = prologue + _make_methods(functions, modname) + init = """PyObject *mod = PyModule_Create(&moduledef); + """ + if not build_dir: + build_dir = pathlib.Path('.') + if more_init: + init += """#define INITERROR return NULL + """ + init += more_init + init += "\nreturn mod;" + source_string = _make_source(modname, init, body) + try: + mod_so = compile_extension_module( + modname, build_dir, include_dirs, source_string) + except Exception as e: + # shorten the exception chain + raise RuntimeError(f"could not compile in {build_dir}:") from e + import importlib.util + spec = importlib.util.spec_from_file_location(modname, mod_so) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo + + +def compile_extension_module( + name, builddir, include_dirs, + source_string, libraries=[], library_dirs=[]): + """ + Build an extension module and return the filename of the resulting + native code file. + + Parameters + ---------- + name : string + name of the module, possibly including dots if it is a module inside a + package. + builddir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + libraries : list + Libraries to link into the extension module + library_dirs: list + Where to find the libraries, ``-L`` passed to the linker + """ + modname = name.split('.')[-1] + dirname = builddir / name + dirname.mkdir(exist_ok=True) + cfile = _convert_str_to_file(source_string, dirname) + include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')] + + return _c_compile( + cfile, outputfilename=dirname / modname, + include_dirs=include_dirs, libraries=[], library_dirs=[], + ) + + +def _convert_str_to_file(source, dirname): + """Helper function to create a file ``source.c`` in `dirname` that contains + the string in `source`. Returns the file name + """ + filename = dirname / 'source.c' + with filename.open('w') as f: + f.write(str(source)) + return filename + + +def _make_methods(functions, modname): + """ Turns the name, signature, code in functions into complete functions + and lists them in a methods_table. Then turns the methods_table into a + ``PyMethodDef`` structure and returns the resulting code fragment ready + for compilation + """ + methods_table = [] + codes = [] + for funcname, flags, code in functions: + cfuncname = "%s_%s" % (modname, funcname) + if 'METH_KEYWORDS' in flags: + signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' + else: + signature = '(PyObject *self, PyObject *args)' + methods_table.append( + "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) + func_code = """ + static PyObject* {cfuncname}{signature} + {{ + {code} + }} + """.format(cfuncname=cfuncname, signature=signature, code=code) + codes.append(func_code) + + body = "\n".join(codes) + """ + static PyMethodDef methods[] = { + %(methods)s + { NULL } + }; + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "%(modname)s", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + }; + """ % dict(methods='\n'.join(methods_table), modname=modname) + return body + + +def _make_source(name, init, body): + """ Combines the code fragments into source code ready to be compiled + """ + code = """ + #include + + %(body)s + + PyMODINIT_FUNC + PyInit_%(name)s(void) { + %(init)s + } + """ % dict( + name=name, init=init, body=body, + ) + return code + + +def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[], + library_dirs=[]): + if sys.platform == 'win32': + compile_extra = ["/we4013"] + link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')] + elif sys.platform.startswith('linux'): + compile_extra = [ + "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] + link_extra = [] + else: + compile_extra = link_extra = [] + pass + if sys.platform == 'win32': + link_extra = link_extra + ['/DEBUG'] # generate .pdb file + if sys.platform == 'darwin': + # support Fink & Darwinports + for s in ('/sw/', '/opt/local/'): + if (s + 'include' not in include_dirs + and os.path.exists(s + 'include')): + include_dirs.append(s + 'include') + if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'): + library_dirs.append(s + 'lib') + + outputfilename = outputfilename.with_suffix(get_so_suffix()) + build( + cfile, outputfilename, + compile_extra, link_extra, + include_dirs, libraries, library_dirs) + return outputfilename + + +def build(cfile, outputfilename, compile_extra, link_extra, + include_dirs, libraries, library_dirs): + "use meson to build" + + build_dir = cfile.parent / "build" + os.makedirs(build_dir, exist_ok=True) + so_name = outputfilename.parts[-1] + with open(cfile.parent / "meson.build", "wt") as fid: + includes = ['-I' + d for d in include_dirs] + link_dirs = ['-L' + d for d in library_dirs] + fid.write(textwrap.dedent(f"""\ + project('foo', 'c') + shared_module('{so_name}', '{cfile.parts[-1]}', + c_args: {includes} + {compile_extra}, + link_args: {link_dirs} + {link_extra}, + link_with: {libraries}, + name_prefix: '', + name_suffix: 'dummy', + ) + """)) + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", ".."], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--vsenv", ".."], + cwd=build_dir + ) + subprocess.check_call(["meson", "compile"], cwd=build_dir) + os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name) + +def get_so_suffix(): + ret = sysconfig.get_config_var('EXT_SUFFIX') + assert ret + return ret diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/utils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/utils.py new file mode 100644 index 00000000..28dd656c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/utils.py @@ -0,0 +1,2509 @@ +""" +Utility function to facilitate testing. + +""" +import os +import sys +import platform +import re +import gc +import operator +import warnings +from functools import partial, wraps +import shutil +import contextlib +from tempfile import mkdtemp, mkstemp +from unittest.case import SkipTest +from warnings import WarningMessage +import pprint +import sysconfig + +import numpy as np +from numpy.core import ( + intp, float32, empty, arange, array_repr, ndarray, isnat, array) +from numpy import isfinite, isnan, isinf +import numpy.linalg._umath_linalg + +from io import StringIO + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', + 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', + '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE' + ] + + +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + + +KnownFailureTest = KnownFailureException # backwards compat +verbose = 0 + +IS_WASM = platform.machine() in ["wasm32", "wasm64"] +IS_PYPY = sys.implementation.name == 'pypy' +IS_PYSTON = hasattr(sys, "pyston_version_info") +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 + +_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy' + +IS_MUSL = False +# alternate way is +# from packaging.tags import sys_tags +# _tags = list(sys_tags()) +# if 'musllinux' in _tags[0].platform: +_v = sysconfig.get_config_var('HOST_GNU_TYPE') or '' +if 'musl' in _v: + IS_MUSL = True + + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) + # My older explanation for this was that the "AddCounter" process + # forced the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath( (machine, object, instance, None, + inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'): + """ + Return virtual memory size in bytes of the running python. + + """ + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[13]) + except Exception: + return int(100*(time.time()-_load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100*(time.time()-_load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = f'[repr failed for <{type(a).__name__}>: {exc}]' + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(f' {names[i]}: {r}') + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + When one of `actual` and `desired` is a scalar and the other is array_like, + the function checks that each element of the array_like object is equal to + the scalar. + + This function handles NaN comparisons as if NaN was a "normal" number. + That is, AssertionError is not raised if both objects have NaNs in the same + positions. This is in contrast to the IEEE standard on NaNs, which says + that NaN compared to anything must return False. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal. + + Examples + -------- + >>> np.testing.assert_equal([4,5], [4,6]) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + The following comparison does not raise an exception. There are NaNs + in the inputs, but they are in the same positions. + + >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + verbose) + return + from numpy.core import ndarray, isscalar, signbit + from numpy.lib import iscomplexobj, real, imag + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except (ValueError, TypeError): + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + try: + isdesnat = isnat(desired) + isactnat = isnat(actual) + dtypes_match = (np.asarray(desired).dtype.type == + np.asarray(actual).dtype.type) + if isdesnat and isactnat: + # If both are NaT (and have the same dtype -- datetime or + # timedelta) they are considered equal. + if dtypes_match: + return + else: + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + # Inf/nan/negative zero handling + try: + isdesnan = isnan(desired) + isactnan = isnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + array_actual = np.asarray(actual) + array_desired = np.asarray(desired) + if (array_actual.dtype.char in 'Mm' or + array_desired.dtype.char in 'Mm'): + # version 1.18 + # until this version, isnan failed for datetime64 and timedelta64. + # Now it succeeds but comparison to scalar with a different type + # emits a DeprecationWarning. + # Avoid that by skipping the next check + raise NotImplementedError('cannot compare to a scalar ' + 'with a different type') + + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if 'elementwise == comparison' in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +@np._no_nep50_warning() +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of `actual` and `desired` satisfy. + + ``abs(desired-actual) < float64(1.5 * 10**(-decimal))`` + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> from numpy.testing import assert_almost_equal + >>> assert_almost_equal(2.3333333333333, 2.33333334) + >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 10 decimals + ACTUAL: 2.3333333333333 + DESIRED: 2.33333334 + + >>> assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 9 decimals + + Mismatched elements: 1 / 2 (50%) + Max absolute difference: 6.66669964e-09 + Max relative difference: 2.85715698e-09 + x: array([1. , 2.333333333]) + y: array([1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import ndarray + from numpy.lib import iscomplexobj, real, imag + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(_build_err_msg()) + else: + if not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)): + raise AssertionError(_build_err_msg()) + + +@np._no_nep50_warning() +def assert_approx_equal(actual, desired, significant=7, err_msg='', + verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + ... significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + ... significant=8) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-21 + DESIRED: 1.2345672e-21 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired/scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual/scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg( + [actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + raise AssertionError(msg) + + +@np._no_nep50_warning() +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + precision=6, equal_nan=True, equal_inf=True, + *, strict=False): + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import (array2string, isnan, inf, bool_, errstate, + all, max, object_) + + x = np.asanyarray(x) + y = np.asanyarray(y) + + # original array for output formatting + ox, oy = x, y + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def istime(x): + return x.dtype.char in "Mm" + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on `masked` array scalars can return masked arrays, so we + # use != True + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to bool_() and + # use isinstance(..., bool) checks + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting such subclasses, but it's nice to + # support them if possible. + if bool_(x_id == y_id).all() != True: + msg = build_err_msg([x, y], + err_msg + '\nx and y %s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + # If there is a scalar, then here we know the array has the same + # flag as it everywhere, so we should return the scalar flag. + if isinstance(x_id, bool) or x_id.ndim == 0: + return bool_(x_id) + elif isinstance(y_id, bool) or y_id.ndim == 0: + return bool_(y_id) + else: + return y_id + + try: + if strict: + cond = x.shape == y.shape and x.dtype == y.dtype + else: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + if x.shape != y.shape: + reason = f'\n(shapes {x.shape}, {y.shape} mismatch)' + else: + reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)' + msg = build_err_msg([x, y], + err_msg + + reason, + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + flagged = bool_(False) + if isnumber(x) and isnumber(y): + if equal_nan: + flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') + + if equal_inf: + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == +inf, + hasval='+inf') + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == -inf, + hasval='-inf') + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT") + + if flagged.ndim > 0: + x, y = x[~flagged], y[~flagged] + # Only do the comparison if actual values are left + if x.size == 0: + return + elif flagged: + # no sense doing comparison if everything is flagged. + return + + val = comparison(x, y) + + if isinstance(val, bool): + cond = val + reduced = array([val]) + else: + reduced = val.ravel() + cond = reduced.all() + + # The below comparison is a hack to ensure that fully masked + # results, for which val.ravel().all() returns np.ma.masked, + # do not trigger a failure (np.ma.masked != True evaluates as + # np.ma.masked, which is falsy). + if cond != True: + n_mismatch = reduced.size - reduced.sum(dtype=intp) + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements + remarks = [ + 'Mismatched elements: {} / {} ({:.3g}%)'.format( + n_mismatch, n_elements, percent_mismatch)] + + with errstate(all='ignore'): + # ignore errors for non-numeric types + with contextlib.suppress(TypeError): + error = abs(x - y) + if np.issubdtype(x.dtype, np.unsignedinteger): + error2 = abs(y - x) + np.minimum(error, error2, out=error) + max_abs_error = max(error) + if getattr(error, 'dtype', object_) == object_: + remarks.append('Max absolute difference: ' + + str(max_abs_error)) + else: + remarks.append('Max absolute difference: ' + + array2string(max_abs_error)) + + # note: this definition of relative error matches that one + # used by assert_allclose (found in np.isclose) + # Filter values where the divisor would be zero + nonzero = bool_(y != 0) + if all(~nonzero): + max_rel_error = array(inf) + else: + max_rel_error = max(error[nonzero] / abs(y[nonzero])) + if getattr(error, 'dtype', object_) == object_: + remarks.append('Max relative difference: ' + + str(max_rel_error)) + else: + remarks.append('Max relative difference: ' + + array2string(max_rel_error)) + + err_msg += '\n' + '\n'.join(remarks) + msg = build_err_msg([ox, oy], err_msg, + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = f'error during assertion:\n\n{efmt}\n\n{header}' + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise ValueError(msg) + + +def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal (but see the Notes for the special + handling of a scalar). An exception is raised at shape mismatch or + conflicting values. In contrast to the standard usage in numpy, NaNs + are compared like numbers, no assertion is raised if both objects have + NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + .. versionadded:: 1.24.0 + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Notes + ----- + When one of `x` and `y` is a scalar and the other is array_like, the + function checks that each element of the array_like object is equal to + the scalar. This behaviour can be disabled with the `strict` parameter. + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical imprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 4.4408921e-16 + Max relative difference: 1.41357986e-16 + x: array([1. , 3.141593, nan]) + y: array([1. , 3.141593, nan]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + As mentioned in the Notes section, `assert_array_equal` has special + handling for scalars. Here the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_array_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array: + + >>> np.testing.assert_array_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + x: array([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + y: array(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_array_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes int64, float32 mismatch) + x: array([2, 2, 2]) + y: array([2., 2., 2.], dtype=float32) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal', + strict=strict) + + +@np._no_nep50_warning() +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + ... [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 6.e-05 + Max relative difference: 2.57136612e-05 + x: array([1. , 2.33333, nan]) + y: array([1. , 2.33339, nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + x and y nan location mismatch: + x: array([1. , 2.33333, nan]) + y: array([1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import number, float_, result_type + from numpy.core.numerictypes import issubdtype + from numpy.core.fromnumeric import any as npany + + def compare(x, y): + try: + if npany(isinf(x)) or npany(isinf(y)): + xinfid = isinf(x) + yinfid = isinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = np.asanyarray(y, dtype) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(float_) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects, check that the shape is equal and all + elements of the first object are strictly smaller than those of the + second object. An exception is raised at shape mismatch or incorrectly + ordered values. Shape mismatch does not raise if an object has zero + dimension. In contrast to the standard usage in numpy, NaNs are + compared, no assertion is raised if both objects have NaNs in the same + positions. + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If x is not strictly smaller than y, element-wise. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + Examples + -------- + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference: 1. + Max relative difference: 0.5 + x: array([ 1., 1., nan]) + y: array([ 1., 2., nan]) + + >>> np.testing.assert_array_less([1.0, 4.0], 3) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + Mismatched elements: 1 / 2 (50%) + Max absolute difference: 2. + Max relative difference: 0.66666667 + x: array([1., 4.]) + y: array(3) + + >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not less-ordered + + (shapes (3,), (1,) mismatch) + x: array([1., 2., 3.]) + y: array([4]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not less-ordered', + equal_inf=False) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if desired == actual: + return + + diff = list(difflib.Differ().compare(actual.splitlines(True), + desired.splitlines(True))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if d2[2:] == d1[2:]: + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}" + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for `numpy.lib`: + + >>> np.lib.test(doctests=True) # doctest: +SKIP + """ + from numpy.distutils.misc_util import exec_mod_from_location + import doctest + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = exec_mod_from_location(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = lambda s: msg.append(s) + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def check_support_sve(): + """ + gh-22982 + """ + + import subprocess + cmd = 'lscpu' + try: + output = subprocess.run(cmd, capture_output=True, text=True) + return 'sve' in output.stdout + except OSError: + return False + + +_SUPPORTS_SVE = check_support_sve() + +# +# assert_raises and assert_raises_regex are taken from unittest. +# +import unittest + + +class _Dummy(unittest.TestCase): + def nop(self): + pass + + +_d = _Dummy('nop') + + +def assert_raises(*args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaises(*args, **kwargs) + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + + Notes + ----- + .. versionadded:: 1.9.0 + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + return + + +def measure(code_str, times=1, label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> times = 10 + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times) + >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, f'Test name: {label} ', 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01*elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + + import gc + import numpy as np + + b = np.arange(100*100).reshape(100, 100) + c = b + i = 1 + + gc.disable() + try: + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + finally: + gc.enable() + del d # for pyflakes + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + Given two array_like objects, check that their shapes and all elements + are equal (but see the Notes for the special handling of a scalar). An + exception is raised if the shapes mismatch or any values conflict. In + contrast to the standard usage in numpy, NaNs are compared like numbers, + no assertion is raised if both objects have NaNs in the same positions. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note + that ``allclose`` has different default values). It compares the difference + between `actual` and `desired` to ``atol + rtol * abs(desired)``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is + array_like, the function checks that each element of the array_like + object is equal to the scalar. + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: X and Y are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x-y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = "X and Y are not equal to %d ULP" % nulp + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g " + "ULP (max difference is %g ULP)" % + (maxulp, np.max(ret))) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.asarray(x, dtype=dtype) + y = np.asarray(y, dtype=dtype) + else: + x = np.asarray(x) + y = np.asarray(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array([x], dtype=t) + y = np.array([y], dtype=t) + + x[np.isnan(x)] = np.nan + y[np.isnan(y)] = np.nan + + if not x.shape == y.shape: + raise ValueError("x and y do not have the same shape: %s - %s" % + (x.shape, y.shape)) + + def _diff(rx, ry, vdt): + diff = np.asarray(rx-ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + else: + if rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation + of x.""" + import numpy as np + if x.dtype == np.float16: + return _integer_repr(x, np.int16, np.int16(-2**15)) + elif x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError(f'Unsupported dtype {x.dtype}') + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable, optional + Callable to test + *args : Arguments + Arguments for `func`. + **kwargs : Kwargs + Keyword arguments for `func`. + + Returns + ------- + The value returned by `func`. + + Examples + -------- + >>> import warnings + >>> def deprecated_func(num): + ... warnings.warn("Please upgrade", DeprecationWarning) + ... return num*num + >>> with np.testing.assert_warns(DeprecationWarning): + ... assert deprecated_func(4) == 16 + >>> # or passing a func + >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) + >>> assert ret == 16 + """ + if not args: + return _assert_warns_context(warning_class) + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError(f'Got warnings{name_str}: {l}') + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + pass + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with np.testing.clear_and_catch_warnings( + ... modules=[np.core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np.core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super().__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super().__enter__() + + def __exit__(self, *exc_info): + super().__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings: + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + + With a context manager:: + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Some text") + sup.filter(module=np.ma.core) + log = sup.record(FutureWarning, "Does this occur?") + command_giving_warnings() + # The FutureWarning was given once, the filtered warnings were + # ignored. All other warnings abide outside settings (may be + # printed/error) + assert_(len(log) == 1) + assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator:: + + sup = np.testing.suppress_warnings() + sup.filter(module=np.ma.core) # module must match exactly + @sup + def some_function(): + # do something which causes a warning in np.ma.core + pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, use_warnmsg=None, **kwargs): + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + + +@contextlib.contextmanager +def _assert_no_gc_cycles_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + + # not meaningful to test if there is no refcounting + if not HAS_REFCOUNT: + yield + return + + assert_(gc.isenabled()) + gc.disable() + gc_debug = gc.get_debug() + try: + for i in range(100): + if gc.collect() == 0: + break + else: + raise RuntimeError( + "Unable to fully collect garbage - perhaps a __del__ method " + "is creating more reference cycles?") + + gc.set_debug(gc.DEBUG_SAVEALL) + yield + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created in the context + n_objects_in_cycles = gc.collect() + objects_in_cycles = gc.garbage[:] + finally: + del gc.garbage[:] + gc.set_debug(gc_debug) + gc.enable() + + if n_objects_in_cycles: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError( + "Reference cycles were found{}: {} objects were collected, " + "of which {} are shown below:{}" + .format( + name_str, + n_objects_in_cycles, + len(objects_in_cycles), + ''.join( + "\n {} object with id={}:\n {}".format( + type(o).__name__, + id(o), + pprint.pformat(o).replace('\n', '\n ') + ) for o in objects_in_cycles + ) + ) + ) + + +def assert_no_gc_cycles(*args, **kwargs): + """ + Fail if the given callable produces any reference cycles. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_gc_cycles(): + do_something() + + .. versionadded:: 1.15.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + Nothing. The result is deliberately discarded to ensure that all cycles + are found. + + """ + if not args: + return _assert_no_gc_cycles_context() + + func = args[0] + args = args[1:] + with _assert_no_gc_cycles_context(name=func.__name__): + func(*args, **kwargs) + + +def break_cycles(): + """ + Break reference cycles by calling gc.collect + Objects can call other objects' methods (for instance, another object's + __del__) inside their own __del__. On PyPy, the interpreter only runs + between calls to gc.collect, so multiple calls are needed to completely + release all cycles. + """ + + gc.collect() + if IS_PYPY: + # a few more, just to make sure all the finalizers are called + gc.collect() + gc.collect() + gc.collect() + gc.collect() + + +def requires_memory(free_bytes): + """Decorator to skip a test if not enough memory is available""" + import pytest + + def decorator(func): + @wraps(func) + def wrapper(*a, **kw): + msg = check_free_memory(free_bytes) + if msg is not None: + pytest.skip(msg) + + try: + return func(*a, **kw) + except MemoryError: + # Probably ran out of memory regardless: don't regard as failure + pytest.xfail("MemoryError raised") + + return wrapper + + return decorator + + +def check_free_memory(free_bytes): + """ + Check whether `free_bytes` amount of memory is currently free. + Returns: None if enough memory available, otherwise error message + """ + env_var = 'NPY_AVAILABLE_MEM' + env_value = os.environ.get(env_var) + if env_value is not None: + try: + mem_free = _parse_size(env_value) + except ValueError as exc: + raise ValueError(f'Invalid environment variable {env_var}: {exc}') + + msg = (f'{free_bytes/1e9} GB memory required, but environment variable ' + f'NPY_AVAILABLE_MEM={env_value} set') + else: + mem_free = _get_mem_available() + + if mem_free is None: + msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM " + "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run " + "the test.") + mem_free = -1 + else: + msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available' + + return msg if mem_free < free_bytes else None + + +def _parse_size(size_str): + """Convert memory size strings ('12 GB' etc.) to float""" + suffixes = {'': 1, 'b': 1, + 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4, + 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, + 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} + + size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format( + '|'.join(suffixes.keys())), re.I) + + m = size_re.match(size_str.lower()) + if not m or m.group(2) not in suffixes: + raise ValueError(f'value {size_str!r} not a valid size') + return int(float(m.group(1)) * suffixes[m.group(2)]) + + +def _get_mem_available(): + """Return available memory in bytes, or None if unknown.""" + try: + import psutil + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith('linux'): + info = {} + with open('/proc/meminfo') as f: + for line in f: + p = line.split() + info[p[0].strip(':').lower()] = int(p[1]) * 1024 + + if 'memavailable' in info: + # Linux >= 3.14 + return info['memavailable'] + else: + return info['memfree'] + info['cached'] + + return None + + +def _no_tracing(func): + """ + Decorator to temporarily turn off tracing for the duration of a test. + Needed in tests that check refcounting, otherwise the tracing itself + influences the refcounts + """ + if not hasattr(sys, 'gettrace'): + return func + else: + @wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + return wrapper + + +def _get_glibc_version(): + try: + ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] + except Exception: + ver = '0.0' + + return ver + + +_glibcver = _get_glibc_version() +_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) + diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/utils.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/utils.pyi new file mode 100644 index 00000000..6baefd83 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/_private/utils.pyi @@ -0,0 +1,402 @@ +import os +import sys +import ast +import types +import warnings +import unittest +import contextlib +from re import Pattern +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Literal as L, + Any, + AnyStr, + ClassVar, + NoReturn, + overload, + type_check_only, + TypeVar, + Union, + Final, + SupportsIndex, +) +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec + +from numpy import generic, dtype, number, object_, bool_, _FloatValue +from numpy._typing import ( + NDArray, + ArrayLike, + DTypeLike, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, +) + +from unittest.case import ( + SkipTest as SkipTest, +) + +_P = ParamSpec("_P") +_T = TypeVar("_T") +_ET = TypeVar("_ET", bound=BaseException) +_FT = TypeVar("_FT", bound=Callable[..., Any]) + +# Must return a bool or an ndarray/generic type +# that is supported by `np.logical_and.reduce` +_ComparisonFunc = Callable[ + [NDArray[Any], NDArray[Any]], + Union[ + bool, + bool_, + number[Any], + NDArray[Union[bool_, number[Any], object_]], + ], +] + +__all__: list[str] + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +class clear_and_catch_warnings(warnings.catch_warnings): + class_modules: ClassVar[tuple[types.ModuleType, ...]] + modules: set[types.ModuleType] + @overload + def __new__( + cls, + record: L[False] = ..., + modules: Iterable[types.ModuleType] = ..., + ) -> _clear_and_catch_warnings_without_records: ... + @overload + def __new__( + cls, + record: L[True], + modules: Iterable[types.ModuleType] = ..., + ) -> _clear_and_catch_warnings_with_records: ... + @overload + def __new__( + cls, + record: bool, + modules: Iterable[types.ModuleType] = ..., + ) -> clear_and_catch_warnings: ... + def __enter__(self) -> None | list[warnings.WarningMessage]: ... + def __exit__( + self, + __exc_type: None | type[BaseException] = ..., + __exc_val: None | BaseException = ..., + __exc_tb: None | types.TracebackType = ..., + ) -> None: ... + +# Type-check only `clear_and_catch_warnings` subclasses for both values of the +# `record` parameter. Copied from the stdlib `warnings` stubs. + +@type_check_only +class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): + def __enter__(self) -> list[warnings.WarningMessage]: ... + +@type_check_only +class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): + def __enter__(self) -> None: ... + +class suppress_warnings: + log: list[warnings.WarningMessage] + def __init__( + self, + forwarding_rule: L["always", "module", "once", "location"] = ..., + ) -> None: ... + def filter( + self, + category: type[Warning] = ..., + message: str = ..., + module: None | types.ModuleType = ..., + ) -> None: ... + def record( + self, + category: type[Warning] = ..., + message: str = ..., + module: None | types.ModuleType = ..., + ) -> list[warnings.WarningMessage]: ... + def __enter__(self: _T) -> _T: ... + def __exit__( + self, + __exc_type: None | type[BaseException] = ..., + __exc_val: None | BaseException = ..., + __exc_tb: None | types.TracebackType = ..., + ) -> None: ... + def __call__(self, func: _FT) -> _FT: ... + +verbose: int +IS_PYPY: Final[bool] +IS_PYSTON: Final[bool] +HAS_REFCOUNT: Final[bool] +HAS_LAPACK64: Final[bool] + +def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ... + +# Contrary to runtime we can't do `os.name` checks while type checking, +# only `sys.platform` checks +if sys.platform == "win32" or sys.platform == "cygwin": + def memusage(processName: str = ..., instance: int = ...) -> int: ... +elif sys.platform == "linux": + def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ... +else: + def memusage() -> NoReturn: ... + +if sys.platform == "linux": + def jiffies( + _proc_pid_stat: str | bytes | os.PathLike[Any] = ..., + _load_time: list[float] = ..., + ) -> int: ... +else: + def jiffies(_load_time: list[float] = ...) -> int: ... + +def build_err_msg( + arrays: Iterable[object], + err_msg: str, + header: str = ..., + verbose: bool = ..., + names: Sequence[str] = ..., + precision: None | SupportsIndex = ..., +) -> str: ... + +def assert_equal( + actual: object, + desired: object, + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... + +def print_assert_equal( + test_string: str, + actual: object, + desired: object, +) -> None: ... + +def assert_almost_equal( + actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, + desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, + decimal: int = ..., + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... + +# Anything that can be coerced into `builtins.float` +def assert_approx_equal( + actual: _FloatValue, + desired: _FloatValue, + significant: int = ..., + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... + +def assert_array_compare( + comparison: _ComparisonFunc, + x: ArrayLike, + y: ArrayLike, + err_msg: str = ..., + verbose: bool = ..., + header: str = ..., + precision: SupportsIndex = ..., + equal_nan: bool = ..., + equal_inf: bool = ..., + *, + strict: bool = ... +) -> None: ... + +def assert_array_equal( + x: ArrayLike, + y: ArrayLike, + err_msg: str = ..., + verbose: bool = ..., + *, + strict: bool = ... +) -> None: ... + +def assert_array_almost_equal( + x: _ArrayLikeNumber_co | _ArrayLikeObject_co, + y: _ArrayLikeNumber_co | _ArrayLikeObject_co, + decimal: float = ..., + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... + +@overload +def assert_array_less( + x: _ArrayLikeNumber_co | _ArrayLikeObject_co, + y: _ArrayLikeNumber_co | _ArrayLikeObject_co, + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... +@overload +def assert_array_less( + x: _ArrayLikeTD64_co, + y: _ArrayLikeTD64_co, + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... +@overload +def assert_array_less( + x: _ArrayLikeDT64_co, + y: _ArrayLikeDT64_co, + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... + +def runstring( + astr: str | bytes | types.CodeType, + dict: None | dict[str, Any], +) -> Any: ... + +def assert_string_equal(actual: str, desired: str) -> None: ... + +def rundocs( + filename: None | str | os.PathLike[str] = ..., + raise_on_error: bool = ..., +) -> None: ... + +def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ... + +@overload +def assert_raises( # type: ignore + expected_exception: type[BaseException] | tuple[type[BaseException], ...], + callable: Callable[_P, Any], + /, + *args: _P.args, + **kwargs: _P.kwargs, +) -> None: ... +@overload +def assert_raises( + expected_exception: type[_ET] | tuple[type[_ET], ...], + *, + msg: None | str = ..., +) -> unittest.case._AssertRaisesContext[_ET]: ... + +@overload +def assert_raises_regex( + expected_exception: type[BaseException] | tuple[type[BaseException], ...], + expected_regex: str | bytes | Pattern[Any], + callable: Callable[_P, Any], + /, + *args: _P.args, + **kwargs: _P.kwargs, +) -> None: ... +@overload +def assert_raises_regex( + expected_exception: type[_ET] | tuple[type[_ET], ...], + expected_regex: str | bytes | Pattern[Any], + *, + msg: None | str = ..., +) -> unittest.case._AssertRaisesContext[_ET]: ... + +def decorate_methods( + cls: type[Any], + decorator: Callable[[Callable[..., Any]], Any], + testmatch: None | str | bytes | Pattern[Any] = ..., +) -> None: ... + +def measure( + code_str: str | bytes | ast.mod | ast.AST, + times: int = ..., + label: None | str = ..., +) -> float: ... + +@overload +def assert_allclose( + actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, + desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... +@overload +def assert_allclose( + actual: _ArrayLikeTD64_co, + desired: _ArrayLikeTD64_co, + rtol: float = ..., + atol: float = ..., + equal_nan: bool = ..., + err_msg: str = ..., + verbose: bool = ..., +) -> None: ... + +def assert_array_almost_equal_nulp( + x: _ArrayLikeNumber_co, + y: _ArrayLikeNumber_co, + nulp: float = ..., +) -> None: ... + +def assert_array_max_ulp( + a: _ArrayLikeNumber_co, + b: _ArrayLikeNumber_co, + maxulp: float = ..., + dtype: DTypeLike = ..., +) -> NDArray[Any]: ... + +@overload +def assert_warns( + warning_class: type[Warning], +) -> contextlib._GeneratorContextManager[None]: ... +@overload +def assert_warns( + warning_class: type[Warning], + func: Callable[_P, _T], + /, + *args: _P.args, + **kwargs: _P.kwargs, +) -> _T: ... + +@overload +def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ... +@overload +def assert_no_warnings( + func: Callable[_P, _T], + /, + *args: _P.args, + **kwargs: _P.kwargs, +) -> _T: ... + +@overload +def tempdir( + suffix: None = ..., + prefix: None = ..., + dir: None = ..., +) -> contextlib._GeneratorContextManager[str]: ... +@overload +def tempdir( + suffix: None | AnyStr = ..., + prefix: None | AnyStr = ..., + dir: None | AnyStr | os.PathLike[AnyStr] = ..., +) -> contextlib._GeneratorContextManager[AnyStr]: ... + +@overload +def temppath( + suffix: None = ..., + prefix: None = ..., + dir: None = ..., + text: bool = ..., +) -> contextlib._GeneratorContextManager[str]: ... +@overload +def temppath( + suffix: None | AnyStr = ..., + prefix: None | AnyStr = ..., + dir: None | AnyStr | os.PathLike[AnyStr] = ..., + text: bool = ..., +) -> contextlib._GeneratorContextManager[AnyStr]: ... + +@overload +def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ... +@overload +def assert_no_gc_cycles( + func: Callable[_P, Any], + /, + *args: _P.args, + **kwargs: _P.kwargs, +) -> None: ... + +def break_cycles() -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/overrides.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/overrides.py new file mode 100644 index 00000000..edc7132c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/overrides.py @@ -0,0 +1,83 @@ +"""Tools for testing implementations of __array_function__ and ufunc overrides + + +""" + +from numpy.core.overrides import ARRAY_FUNCTIONS as _array_functions +from numpy import ufunc as _ufunc +import numpy.core.umath as _umath + +def get_overridable_numpy_ufuncs(): + """List all numpy ufuncs overridable via `__array_ufunc__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all overridable ufuncs in the public numpy API. + """ + ufuncs = {obj for obj in _umath.__dict__.values() + if isinstance(obj, _ufunc)} + return ufuncs + + +def allows_array_ufunc_override(func): + """Determine if a function can be overridden via `__array_ufunc__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_ufunc__` + + Returns + ------- + bool + `True` if `func` is overridable via `__array_ufunc__` and + `False` otherwise. + + Notes + ----- + This function is equivalent to ``isinstance(func, np.ufunc)`` and + will work correctly for ufuncs defined outside of Numpy. + + """ + return isinstance(func, np.ufunc) + + +def get_overridable_numpy_array_functions(): + """List all numpy functions overridable via `__array_function__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all functions in the public numpy API that are + overridable via `__array_function__`. + + """ + # 'import numpy' doesn't import recfunctions, so make sure it's imported + # so ufuncs defined there show up in the ufunc listing + from numpy.lib import recfunctions + return _array_functions.copy() + +def allows_array_function_override(func): + """Determine if a Numpy function can be overridden via `__array_function__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_function__` + + Returns + ------- + bool + `True` if `func` is a function in the Numpy API that is + overridable via `__array_function__` and `False` otherwise. + """ + return func in _array_functions diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py new file mode 100755 index 00000000..c1d4cdff --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +"""Prints type-coercion tables for the built-in NumPy types + +""" +import numpy as np +from collections import namedtuple + +# Generic object that can be added, but doesn't do anything else +class GenericObject: + def __init__(self, v): + self.v = v + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + dtype = np.dtype('O') + +def print_cancast_table(ntypes): + print('X', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + print(row, end=' ') + for col in ntypes: + if np.can_cast(row, col, "equiv"): + cast = "#" + elif np.can_cast(row, col, "safe"): + cast = "=" + elif np.can_cast(row, col, "same_kind"): + cast = "~" + elif np.can_cast(row, col, "unsafe"): + cast = "." + else: + cast = " " + print(cast, end=' ') + print() + +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): + print('+', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + if row == 'O': + rowtype = GenericObject + else: + rowtype = np.obj2sctype(row) + + print(row, end=' ') + for col in ntypes: + if col == 'O': + coltype = GenericObject + else: + coltype = np.obj2sctype(col) + try: + if firstarray: + rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) + else: + rowvalue = rowtype(inputfirstvalue) + colvalue = coltype(inputsecondvalue) + if use_promote_types: + char = np.promote_types(rowvalue.dtype, colvalue.dtype).char + else: + value = np.add(rowvalue, colvalue) + if isinstance(value, np.ndarray): + char = value.dtype.char + else: + char = np.dtype(type(value)).char + except ValueError: + char = '!' + except OverflowError: + char = '@' + except TypeError: + char = '#' + print(char, end=' ') + print() + + +def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): + """Prints new casts, the values given are default "can-cast" values, not + actual ones. + """ + from numpy.core._multiarray_tests import get_all_cast_information + + cast_table = { + -1: " ", + 0: "#", # No cast (classify as equivalent here) + 1: "#", # equivalent casting + 2: "=", # safe casting + 3: "~", # same-kind casting + 4: ".", # unsafe casting + } + flags_table = { + 0 : "▗", 7: "█", + 1: "▚", 2: "▐", 4: "▄", + 3: "▜", 5: "▙", + 6: "▟", + } + + cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"]) + no_cast_info = cast_info(" ", " ", " ") + + casts = get_all_cast_information() + table = {} + dtypes = set() + for cast in casts: + dtypes.add(cast["from"]) + dtypes.add(cast["to"]) + + if cast["from"] not in table: + table[cast["from"]] = {} + to_dict = table[cast["from"]] + + can_cast = cast_table[cast["casting"]] + legacy = "L" if cast["legacy"] else "." + flags = 0 + if cast["requires_pyapi"]: + flags |= 1 + if cast["supports_unaligned"]: + flags |= 2 + if cast["no_floatingpoint_errors"]: + flags |= 4 + + flags = flags_table[flags] + to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags) + + # The np.dtype(x.type) is a bit strange, because dtype classes do + # not expose much yet. + types = np.typecodes["All"] + def sorter(x): + # This is a bit weird hack, to get a table as close as possible to + # the one printing all typecodes (but expecting user-dtypes). + dtype = np.dtype(x.type) + try: + indx = types.index(dtype.char) + except ValueError: + indx = np.inf + return (indx, dtype.char) + + dtypes = sorted(dtypes, key=sorter) + + def print_table(field="can_cast"): + print('X', end=' ') + for dt in dtypes: + print(np.dtype(dt.type).char, end=' ') + print() + for from_dt in dtypes: + print(np.dtype(from_dt.type).char, end=' ') + row = table.get(from_dt, {}) + for to_dt in dtypes: + print(getattr(row.get(to_dt, no_cast_info), field), end=' ') + print() + + if can_cast: + # Print the actual table: + print() + print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe") + print() + print_table("can_cast") + + if legacy: + print() + print("L denotes a legacy cast . a non-legacy one.") + print() + print_table("legacy") + + if flags: + print() + print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, " + f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors") + print() + print_table("flags") + + +if __name__ == '__main__': + print("can cast") + print_cancast_table(np.typecodes['All']) + print() + print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") + print() + print("scalar + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, False) + print() + print("scalar + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, False) + print() + print("array + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, True) + print() + print("array + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, True) + print() + print("promote_types") + print_coercion_table(np.typecodes['All'], 0, 0, False, True) + print("New casting type promotion:") + print_new_cast_table(can_cast=True, legacy=True, flags=True) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/setup.py new file mode 100755 index 00000000..6f203e87 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/setup.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('testing', parent_package, top_path) + + config.add_subpackage('_private') + config.add_subpackage('tests') + config.add_data_files('*.pyi') + config.add_data_files('_private/*.pyi') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(maintainer="NumPy Developers", + maintainer_email="numpy-dev@numpy.org", + description="NumPy test module", + url="https://www.numpy.org", + license="NumPy License (BSD Style)", + configuration=configuration, + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/testing/tests/test_utils.py b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/tests/test_utils.py new file mode 100644 index 00000000..0aaa508e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/testing/tests/test_utils.py @@ -0,0 +1,1626 @@ +import warnings +import sys +import os +import itertools +import pytest +import weakref + +import numpy as np +from numpy.testing import ( + assert_equal, assert_array_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_less, build_err_msg, + assert_raises, assert_warns, assert_no_warnings, assert_allclose, + assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, + clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, + tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT + ) + + +class _GenericTest: + + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + with assert_raises(AssertionError): + self._assert_func(a, b) + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + + +class TestArrayEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_equal + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_0_ndim_array(self): + x = np.array(473963742225900817127911193656584771) + y = np.array(18535119325151578301457182298393896) + assert_raises(AssertionError, self._assert_func, x, y) + + y = x + self._assert_func(x, y) + + x = np.array(43) + y = np.array(10) + assert_raises(AssertionError, self._assert_func, x, y) + + y = x + self._assert_func(x, y) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', float), ('floupa', float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', float), + ('floupi', float), ('floupa', float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + with pytest.raises(TypeError): + self._test_not_equal(c, b) + + def test_masked_nan_inf(self): + # Regression test for gh-11121 + a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) + b = np.array([3., np.nan, 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) + b = np.array([np.inf, 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_overrides_eq(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return bool(np.equal(self, other).all()) + + def __ne__(self, other): + return not self == other + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + assert_(type(a == a), bool) + assert_(a == a) + assert_(a != b) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_subclass_that_does_not_implement_npall(self): + class MyArray(np.ndarray): + def __array_function__(self, *args, **kwargs): + return NotImplemented + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + with assert_raises(TypeError): + np.all(a) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_suppress_overflow_warnings(self): + # Based on issue #18992 + with pytest.raises(AssertionError): + with np.errstate(all="raise"): + np.testing.assert_array_equal( + np.array([1, 2, 3], np.float32), + np.array([1, 1e-40, 3], np.float32)) + + def test_array_vs_scalar_is_equal(self): + """Test comparing an array with a scalar when all values are equal.""" + a = np.array([1., 1., 1.]) + b = 1. + + self._test_equal(a, b) + + def test_array_vs_scalar_not_equal(self): + """Test comparing an array with a scalar when not all values equal.""" + a = np.array([1., 2., 3.]) + b = 1. + + self._test_not_equal(a, b) + + def test_array_vs_scalar_strict(self): + """Test comparing an array with a scalar with strict option.""" + a = np.array([1., 1., 1.]) + b = 1. + + with pytest.raises(AssertionError): + assert_array_equal(a, b, strict=True) + + def test_array_vs_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1., 1., 1.]) + b = np.array([1., 1., 1.]) + + assert_array_equal(a, b, strict=True) + + def test_array_vs_float_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1, 1, 1]) + b = np.array([1., 1., 1.]) + + with pytest.raises(AssertionError): + assert_array_equal(a, b, strict=True) + + +class TestBuildErrorMessage: + + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') + assert_equal(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + assert_equal(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' + '3.00004])') + assert_equal(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') + assert_equal(a, b) + + +class TestEqual(TestArrayEqual): + + def setup_method(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + + def test_nat_items(self): + # not a datetime + nadt_no_unit = np.datetime64("NaT") + nadt_s = np.datetime64("NaT", "s") + nadt_d = np.datetime64("NaT", "ns") + # not a timedelta + natd_no_unit = np.timedelta64("NaT") + natd_s = np.timedelta64("NaT", "s") + natd_d = np.timedelta64("NaT", "ns") + + dts = [nadt_no_unit, nadt_s, nadt_d] + tds = [natd_no_unit, natd_s, natd_d] + for a, b in itertools.product(dts, dts): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, tds): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, dts): + self._test_not_equal(a, b) + self._test_not_equal(a, [b]) + self._test_not_equal([a], [b]) + self._test_not_equal([a], np.datetime64("2017-01-01", "s")) + self._test_not_equal([b], np.datetime64("2017-01-01", "s")) + self._test_not_equal([a], np.timedelta64(123, "s")) + self._test_not_equal([b], np.timedelta64(123, "s")) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(np.PZERO, np.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + + def test_object(self): + #gh-12942 + import datetime + a = np.array([datetime.datetime(2000, 1, 1), + datetime.datetime(2000, 1, 2)]) + self._test_not_equal(a, a[::-1]) + + +class TestArrayAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, decimal=5)) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + assert_raises(AssertionError, + lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, + lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, + lambda: self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + b[0, 0] = -np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + self._assert_func(a, b) + self._assert_func(b, a) + self._assert_func(b, b) + + # Test fully masked as well (see gh-11123). + a = np.ma.MaskedArray(3.5, mask=True) + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.masked + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array([1., 2., 3.]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array(1.) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, np.inf)) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(-np.inf, np.inf)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value. + Also check the message when input includes inf or nan (gh12200)""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # Test with a different amount of decimal digits + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y, decimal=12) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal( + msgs[6], + ' x: array([1.00000000001, 2.00000000002, 3.00003 ])') + assert_equal( + msgs[7], + ' y: array([1.00000000002, 2.00000000003, 3.00004 ])') + + # With the default value of decimal digits, only the 3rd element + # differs. Note that we only check for the formatting of the arrays + # themselves. + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)') + assert_equal(msgs[4], 'Max absolute difference: 1.e-05') + assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06') + assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])') + assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])') + + # Check the error message when input includes inf + x = np.array([np.inf, 0]) + y = np.array([np.inf, 1]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + assert_equal(msgs[6], ' x: array([inf, 0.])') + assert_equal(msgs[7], ' y: array([inf, 1.])') + + # Check the error message when dividing by zero + x = np.array([1, 2]) + y = np.array([0, 0]) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 2') + assert_equal(msgs[5], 'Max relative difference: inf') + + def test_error_message_2(self): + """Check the message is formatted correctly when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 1.') + + y = 2 + x = np.ones(20) + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)') + assert_equal(msgs[4], 'Max absolute difference: 1.') + assert_equal(msgs[5], 'Max relative difference: 0.5') + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestApproxEqual: + + def setup_method(self): + self._assert_func = assert_approx_equal + + def test_simple_0d_arrays(self): + x = np.array(1234.22) + y = np.array(1234.23) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + +class TestArrayAssertLess: + + def setup_method(self): + self._assert_func = assert_array_less + + def test_simple_arrays(self): + x = np.array([1.1, 2.2]) + y = np.array([1.2, 2.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 2.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank2(self): + x = np.array([[1.1, 2.2], [3.3, 4.4]]) + y = np.array([[1.2, 2.3], [3.4, 4.5]]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([[1.0, 2.3], [3.4, 4.5]]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank3(self): + x = np.ones(shape=(2, 2, 2)) + y = np.ones(shape=(2, 2, 2))+1 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y[0, 0, 0] = 0 + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_simple_items(self): + x = 1.1 + y = 2.2 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([2.2, 3.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 3.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_nan_noncompare(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_noncompare_array(self): + x = np.array([1.1, 2.2, 3.3]) + anan = np.array(np.nan) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + x = np.array([1.1, 2.2, np.nan]) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + y = np.array([1.0, 2.0, np.nan]) + + self._assert_func(y, x) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_inf_compare(self): + aone = np.array(1) + ainf = np.array(np.inf) + + self._assert_func(aone, ainf) + self._assert_func(-ainf, aone) + self._assert_func(-ainf, ainf) + assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) + assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) + + def test_inf_compare_array(self): + x = np.array([1.1, 2.2, np.inf]) + ainf = np.array(np.inf) + + assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) + assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) + self._assert_func(-ainf, x) + + +class TestWarns: + + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + # Should raise a DeprecationWarning + assert_warns(UserWarning, f) + failed = True + except DeprecationWarning: + pass + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + + +class TestAssertAllclose: + + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + assert_raises(AssertionError, assert_allclose, x, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + assert_raises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference: 1\n' + 'Max relative difference: 0.5' in msg) + + def test_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + # Should not raise: + assert_allclose(a, b, equal_nan=True) + + def test_not_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + def test_equal_nan_default(self): + # Make sure equal_nan default behavior remains unchanged. (All + # of these functions use assert_array_compare under the hood.) + # None of these should raise. + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_array_equal(a, b) + assert_array_almost_equal(a, b) + assert_array_less(a, b) + assert_allclose(a, b) + + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + with pytest.raises(AssertionError) as exc_info: + assert_allclose(a, b) + msg = str(exc_info.value) + assert_('Max relative difference: 0.5' in msg) + + def test_timedelta(self): + # see gh-18286 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) + + def test_error_message_unsigned(self): + """Check the the message is formatted correctly when overflow can occur + (gh21768)""" + # Ensure to test for potential overflow in the case of: + # x - y + # and + # y - x + x = np.asarray([0, 1, 8], dtype='uint8') + y = np.asarray([4, 4, 4], dtype='uint8') + with pytest.raises(AssertionError) as exc_info: + assert_allclose(x, y, atol=3) + msgs = str(exc_info.value).split('\n') + assert_equal(msgs[4], 'Max absolute difference: 4') + + +class TestArrayAlmostEqualNulp: + + def test_float64_pass(self): + # The number of units of least precision + # In this case, use a few places above the lowest level (ie nulp=1) + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + # Addition + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + # Subtraction + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float64_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint64(0xffffffff) + nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64) + nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones. + nan1_f64 = nan1_i64.view(np.float64) + nan2_f64 = nan2_i64.view(np.float64) + assert_array_max_ulp(nan1_f64, nan2_f64, 0) + + def test_float32_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float32_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float32_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint32(0xffff) + nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32) + nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones. + nan1_f32 = nan1_i32.view(np.float32) + nan2_f32 = nan2_i32.view(np.float32) + assert_array_max_ulp(nan1_f32, nan2_f32, 0) + + def test_float16_pass(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float16_fail(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float16_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint16(0xff) + nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16) + nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones. + nan1_f16 = nan1_i16.view(np.float16) + nan2_f16 = nan2_i16.view(np.float16) + assert_array_max_ulp(nan1_f16, nan2_f16, 0) + + def test_complex128_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex128_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + def test_complex64_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x + x*eps*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp/2. + assert_array_almost_equal_nulp(xi, x + y*1j, nulp) + assert_array_almost_equal_nulp(xi, y + x*1j, nulp) + y = x - x*epsneg*nulp/4. + assert_array_almost_equal_nulp(xi, y + y*1j, nulp) + + def test_complex64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x*1j + + eps = np.finfo(x.dtype).eps + y = x + x*eps*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x + x*eps*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x*epsneg*nulp*2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y*1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x*1j, nulp) + y = x - x*epsneg*nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y*1j, nulp) + + +class TestULP: + + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x+eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x+eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([np.PZERO]).astype(dt) + nzero = np.array([np.NZERO]).astype(dt) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) + + +class TestStringEqual: + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + with pytest.raises(AssertionError) as exc_info: + assert_string_equal("foo\nbar", "hello\nbar") + msg = str(exc_info.value) + assert_equal(msg, "Differences in strings:\n- foo\n+ hello") + + assert_raises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def test_regex(self): + assert_string_equal("a+*b", "a+*b") + + assert_raises(AssertionError, + lambda: assert_string_equal("aaa", "a+b")) + + +def assert_warn_len_equal(mod, n_in_context): + try: + mod_warns = mod.__warningregistry__ + except AttributeError: + # the lack of a __warningregistry__ + # attribute means that no warning has + # occurred; this can be triggered in + # a parallel test scenario, while in + # a serial test scenario an initial + # warning (and therefore the attribute) + # are always created first + mod_warns = {} + + num_warns = len(mod_warns) + + if 'version' in mod_warns: + # Python 3 adds a 'version' entry to the registry, + # do not count it. + num_warns -= 1 + + assert_equal(num_warns, n_in_context) + + +def test_warn_len_equal_call_scenarios(): + # assert_warn_len_equal is called under + # varying circumstances depending on serial + # vs. parallel test scenarios; this test + # simply aims to probe both code paths and + # check that no assertion is uncaught + + # parallel scenario -- no warning issued yet + class mod: + pass + + mod_inst = mod() + + assert_warn_len_equal(mod=mod_inst, + n_in_context=0) + + # serial test scenario -- the __warningregistry__ + # attribute should be present + class mod: + def __init__(self): + self.__warningregistry__ = {'warning1':1, + 'warning2':2} + + mod_inst = mod() + assert_warn_len_equal(mod=mod_inst, + n_in_context=2) + + +def _get_fresh_mod(): + # Get this module, with warning registry empty + my_mod = sys.modules[__name__] + try: + my_mod.__warningregistry__.clear() + except AttributeError: + # will not have a __warningregistry__ unless warning has been + # raised in the module at some point + pass + return my_mod + + +def test_clear_and_catch_warnings(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + # Without specified modules, don't clear warnings during context. + # catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Manually adding two warnings to the registry: + my_mod.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + # Confirm that specifying module keeps old warning, does not add new + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 2) + + # Another warning, no module spec it clears up registry + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_module(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning 2", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + # Test module based warning suppression: + assert_warn_len_equal(my_mod, 0) + with suppress_warnings() as sup: + sup.record(UserWarning) + # suppress warning from other module (may have .pyc ending), + # if apply_along_axis is moved, had to be changed. + sup.filter(module=np.lib.shape_base) + warnings.warn("Some warning") + warn_other_module() + # Check that the suppression did test the file correctly (this module + # got filtered) + assert_equal(len(sup.log), 1) + assert_equal(sup.log[0].message.args[0], "Some warning") + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + # Will have to be changed if apply_along_axis is moved: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_type(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + # Test module based warning suppression: + with suppress_warnings() as sup: + sup.filter(UserWarning) + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + sup.filter(UserWarning) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_decorate_no_record(): + sup = suppress_warnings() + sup.filter(UserWarning) + + @sup + def warn(category): + warnings.warn('Some warning', category) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warn(UserWarning) # should be supppressed + warn(RuntimeWarning) + assert_equal(len(w), 1) + + +def test_suppress_warnings_record(): + sup = suppress_warnings() + log1 = sup.record() + + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2),1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Do it again, with the same context to see if some warnings survived: + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Test nested: + with suppress_warnings() as sup: + sup.record() + with suppress_warnings() as sup2: + sup2.record(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + assert_equal(len(sup2.log), 1) + assert_equal(len(sup.log), 1) + + +def test_suppress_warnings_forwarding(): + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("always"): + for i in range(2): + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("location"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("module"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("once"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some other warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + raised = False + try: + with tempdir() as tdir: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isdir(tdir)) + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w'): + pass + assert_(not os.path.isfile(fpath)) + + raised = False + try: + with temppath() as fpath: + raise ValueError() + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isfile(fpath)) + + +class my_cacw(clear_and_catch_warnings): + + class_modules = (sys.modules[__name__],) + + +def test_clear_and_catch_warnings_inherit(): + # Test can subclass and add default modules + my_mod = _get_fresh_mod() + with my_cacw(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestAssertNoGcCycles: + """ Test assert_no_gc_cycles """ + def test_passes(self): + def no_cycle(): + b = [] + b.append([]) + return b + + with assert_no_gc_cycles(): + no_cycle() + + assert_no_gc_cycles(no_cycle) + + def test_asserts(self): + def make_cycle(): + a = [] + a.append(a) + a.append(a) + return a + + with assert_raises(AssertionError): + with assert_no_gc_cycles(): + make_cycle() + + with assert_raises(AssertionError): + assert_no_gc_cycles(make_cycle) + + @pytest.mark.slow + def test_fails(self): + """ + Test that in cases where the garbage cannot be collected, we raise an + error, instead of hanging forever trying to clear it. + """ + + class ReferenceCycleInDel: + """ + An object that not only contains a reference cycle, but creates new + cycles whenever it's garbage-collected and its __del__ runs + """ + make_cycle = True + + def __init__(self): + self.cycle = self + + def __del__(self): + # break the current cycle so that `self` can be freed + self.cycle = None + + if ReferenceCycleInDel.make_cycle: + # but create a new one so that the garbage collector has more + # work to do. + ReferenceCycleInDel() + + try: + w = weakref.ref(ReferenceCycleInDel()) + try: + with assert_raises(RuntimeError): + # this will be unable to get a baseline empty garbage + assert_no_gc_cycles(lambda: None) + except AssertionError: + # the above test is only necessary if the GC actually tried to free + # our object anyway, which python 2.7 does not. + if w() is not None: + pytest.skip("GC does not call __del__ on cyclic objects") + raise + + finally: + # make sure that we stop creating reference cycles + ReferenceCycleInDel.make_cycle = False diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test__all__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test__all__.py new file mode 100644 index 00000000..e44bda3d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test__all__.py @@ -0,0 +1,9 @@ + +import collections +import numpy as np + + +def test_no_duplicates_in_np__all__(): + # Regression test for gh-10198. + dups = {k: v for k, v in collections.Counter(np.__all__).items() if v > 1} + assert len(dups) == 0 diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.py new file mode 100644 index 00000000..965e547e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.py @@ -0,0 +1,370 @@ +import sys +import sysconfig +import weakref +from pathlib import Path + +import pytest + +import numpy as np +from numpy.ctypeslib import ndpointer, load_library, as_array +from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal + +try: + import ctypes +except ImportError: + ctypes = None +else: + cdll = None + test_cdll = None + if hasattr(sys, 'gettotalrefcount'): + try: + cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__) + except OSError: + pass + try: + test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) + except OSError: + pass + if cdll is None: + cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__) + if test_cdll is None: + test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__) + + c_forward_pointer = test_cdll.forward_pointer + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +@pytest.mark.skipif(sys.platform == 'cygwin', + reason="Known to fail on cygwin") +class TestLoadLibrary: + def test_basic(self): + loader_path = np.core._multiarray_umath.__file__ + + out1 = load_library('_multiarray_umath', loader_path) + out2 = load_library(Path('_multiarray_umath'), loader_path) + out3 = load_library('_multiarray_umath', Path(loader_path)) + out4 = load_library(b'_multiarray_umath', loader_path) + + assert isinstance(out1, ctypes.CDLL) + assert out1 is out2 is out3 is out4 + + def test_basic2(self): + # Regression for #801: load_library with a full library name + # (including extension) does not work. + try: + so_ext = sysconfig.get_config_var('EXT_SUFFIX') + load_library('_multiarray_umath%s' % so_ext, + np.core._multiarray_umath.__file__) + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + +class TestNdpointer: + def test_dtype(self): + dt = np.intc + p = ndpointer(dtype=dt) + assert_(p.from_param(np.array([1], dt))) + dt = 'i4') + p = ndpointer(dtype=dt) + p.from_param(np.array([1], dt)) + assert_raises(TypeError, p.from_param, + np.array([1], dt.newbyteorder('swap'))) + dtnames = ['x', 'y'] + dtformats = [np.intc, np.float64] + dtdescr = {'names': dtnames, 'formats': dtformats} + dt = np.dtype(dtdescr) + p = ndpointer(dtype=dt) + assert_(p.from_param(np.zeros((10,), dt))) + samedt = np.dtype(dtdescr) + p = ndpointer(dtype=samedt) + assert_(p.from_param(np.zeros((10,), dt))) + dt2 = np.dtype(dtdescr, align=True) + if dt.itemsize != dt2.itemsize: + assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) + else: + assert_(p.from_param(np.zeros((10,), dt2))) + + def test_ndim(self): + p = ndpointer(ndim=0) + assert_(p.from_param(np.array(1))) + assert_raises(TypeError, p.from_param, np.array([1])) + p = ndpointer(ndim=1) + assert_raises(TypeError, p.from_param, np.array(1)) + assert_(p.from_param(np.array([1]))) + p = ndpointer(ndim=2) + assert_(p.from_param(np.array([[1]]))) + + def test_shape(self): + p = ndpointer(shape=(1, 2)) + assert_(p.from_param(np.array([[1, 2]]))) + assert_raises(TypeError, p.from_param, np.array([[1], [2]])) + p = ndpointer(shape=()) + assert_(p.from_param(np.array(1))) + + def test_flags(self): + x = np.array([[1, 2], [3, 4]], order='F') + p = ndpointer(flags='FORTRAN') + assert_(p.from_param(x)) + p = ndpointer(flags='CONTIGUOUS') + assert_raises(TypeError, p.from_param, x) + p = ndpointer(flags=x.flags.num) + assert_(p.from_param(x)) + assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + + def test_cache(self): + assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) + + # shapes are normalized + assert_(ndpointer(shape=2) is ndpointer(shape=(2,))) + + # 1.12 <= v < 1.16 had a bug that made these fail + assert_(ndpointer(shape=2) is not ndpointer(ndim=2)) + assert_(ndpointer(ndim=2) is not ndpointer(shape=2)) + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available on this python installation") +class TestNdpointerCFunc: + def test_arguments(self): + """ Test that arguments are coerced from arrays """ + c_forward_pointer.restype = ctypes.c_void_p + c_forward_pointer.argtypes = (ndpointer(ndim=2),) + + c_forward_pointer(np.zeros((2, 3))) + # too many dimensions + assert_raises( + ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4))) + + @pytest.mark.parametrize( + 'dt', [ + float, + np.dtype(dict( + formats=['u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16.__ctype_be__) + + dt = np.dtype('u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16) + + def test_subarray(self): + dt = np.dtype((np.int32, (2, 3))) + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, 2 * (3 * ctypes.c_int32)) + + def test_structure(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ]) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_structure_aligned(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ], align=True) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('', ctypes.c_char * 2), # padding + ('b', ctypes.c_uint32), + ]) + + def test_union(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 0], + formats=[np.uint16, np.uint32] + )) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_padded_union(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 0], + formats=[np.uint16, np.uint32], + itemsize=5, + )) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ('', ctypes.c_char * 5), # padding + ]) + + def test_overlapping(self): + dt = np.dtype(dict( + names=['a', 'b'], + offsets=[0, 2], + formats=[np.uint32, np.uint32] + )) + assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_lazyloading.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_lazyloading.py new file mode 100644 index 00000000..f31a4eab --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_lazyloading.py @@ -0,0 +1,38 @@ +import sys +import importlib +from importlib.util import LazyLoader, find_spec, module_from_spec +import pytest + + +# Warning raised by _reload_guard() in numpy/__init__.py +@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") +def test_lazy_load(): + # gh-22045. lazyload doesn't import submodule names into the namespace + # muck with sys.modules to test the importing system + old_numpy = sys.modules.pop("numpy") + + numpy_modules = {} + for mod_name, mod in list(sys.modules.items()): + if mod_name[:6] == "numpy.": + numpy_modules[mod_name] = mod + sys.modules.pop(mod_name) + + try: + # create lazy load of numpy as np + spec = find_spec("numpy") + module = module_from_spec(spec) + sys.modules["numpy"] = module + loader = LazyLoader(spec.loader) + loader.exec_module(module) + np = module + + # test a subpackage import + from numpy.lib import recfunctions + + # test triggering the import of the package + np.ndarray + + finally: + if old_numpy: + sys.modules["numpy"] = old_numpy + sys.modules.update(numpy_modules) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_matlib.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_matlib.py new file mode 100644 index 00000000..0e93c484 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_matlib.py @@ -0,0 +1,58 @@ +import numpy as np +import numpy.matlib +from numpy.testing import assert_array_equal, assert_ + +def test_empty(): + x = numpy.matlib.empty((2,)) + assert_(isinstance(x, np.matrix)) + assert_(x.shape, (1, 2)) + +def test_ones(): + assert_array_equal(numpy.matlib.ones((2, 3)), + np.matrix([[ 1., 1., 1.], + [ 1., 1., 1.]])) + + assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]])) + +def test_zeros(): + assert_array_equal(numpy.matlib.zeros((2, 3)), + np.matrix([[ 0., 0., 0.], + [ 0., 0., 0.]])) + + assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]])) + +def test_identity(): + x = numpy.matlib.identity(2, dtype=int) + assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) + +def test_eye(): + xc = numpy.matlib.eye(3, k=1, dtype=int) + assert_array_equal(xc, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + assert xc.flags.c_contiguous + assert not xc.flags.f_contiguous + + xf = numpy.matlib.eye(3, 4, dtype=int, order='F') + assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], + [ 0, 1, 0, 0], + [ 0, 0, 1, 0]])) + assert not xf.flags.c_contiguous + assert xf.flags.f_contiguous + +def test_rand(): + x = numpy.matlib.rand(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_randn(): + x = np.matlib.randn(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_repmat(): + a1 = np.arange(4) + x = numpy.matlib.repmat(a1, 2, 2) + y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + assert_array_equal(x, y) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_config.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_config.py new file mode 100644 index 00000000..82c1ad70 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_config.py @@ -0,0 +1,44 @@ +""" +Check the numpy config is valid. +""" +import numpy as np +import pytest +from unittest.mock import Mock, patch + +pytestmark = pytest.mark.skipif( + not hasattr(np.__config__, "_built_with_meson"), + reason="Requires Meson builds", +) + + +class TestNumPyConfigs: + REQUIRED_CONFIG_KEYS = [ + "Compilers", + "Machine Information", + "Python Information", + ] + + @patch("numpy.__config__._check_pyyaml") + def test_pyyaml_not_found(self, mock_yaml_importer): + mock_yaml_importer.side_effect = ModuleNotFoundError() + with pytest.warns(UserWarning): + np.show_config() + + def test_dict_mode(self): + config = np.show_config(mode="dicts") + + assert isinstance(config, dict) + assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), ( + "Required key missing," + " see index of `False` with `REQUIRED_CONFIG_KEYS`" + ) + + def test_invalid_mode(self): + with pytest.raises(AttributeError): + np.show_config(mode="foo") + + def test_warn_to_add_tests(self): + assert len(np.__config__.DisplayModes) == 2, ( + "New mode detected," + " please add UT if applicable and increment this count" + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_version.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_version.py new file mode 100644 index 00000000..61643426 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_numpy_version.py @@ -0,0 +1,41 @@ +""" +Check the numpy version is valid. + +Note that a development version is marked by the presence of 'dev0' or '+' +in the version string, all else is treated as a release. The version string +itself is set from the output of ``git describe`` which relies on tags. + +Examples +-------- + +Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2 +Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0 +Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a + +Note that a release is determined by the version string, which in turn +is controlled by the result of the ``git describe`` command. +""" +import re + +import numpy as np +from numpy.testing import assert_ + + +def test_valid_numpy_version(): + # Verify that the numpy version is a valid one (no .post suffix or other + # nonsense). See gh-6431 for an issue caused by an invalid version. + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9])?" + dev_suffix = r"(\.dev[0-9]+(\+git[0-9]+\.[0-9a-f]+)?)?" + res = re.match(version_pattern + dev_suffix + '$', np.__version__) + + assert_(res is not None, np.__version__) + + +def test_short_version(): + # Check numpy.short_version actually exists + if np.version.release: + assert_(np.__version__ == np.version.short_version, + "short_version mismatch in release version") + else: + assert_(np.__version__.split("+")[0] == np.version.short_version, + "short_version mismatch in development version") diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_public_api.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_public_api.py new file mode 100644 index 00000000..54bf3dac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_public_api.py @@ -0,0 +1,551 @@ +import sys +import sysconfig +import subprocess +import pkgutil +import types +import importlib +import warnings + +import numpy as np +import numpy +import pytest +from numpy.testing import IS_WASM + +try: + import ctypes +except ImportError: + ctypes = None + + +def check_dir(module, module_name=None): + """Returns a mapping of all objects with the wrong __module__ attribute.""" + if module_name is None: + module_name = module.__name__ + results = {} + for name in dir(module): + item = getattr(module, name) + if (hasattr(item, '__module__') and hasattr(item, '__name__') + and item.__module__ != module_name): + results[name] = item.__module__ + '.' + item.__name__ + return results + + +def test_numpy_namespace(): + # None of these objects are publicly documented to be part of the main + # NumPy namespace (some are useful though, others need to be cleaned up) + undocumented = { + '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', + 'add_docstring': 'numpy.core._multiarray_umath.add_docstring', + 'add_newdoc': 'numpy.core.function_base.add_newdoc', + 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc', + 'byte_bounds': 'numpy.lib.utils.byte_bounds', + 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays', + 'deprecate': 'numpy.lib.utils.deprecate', + 'deprecate_with_doc': 'numpy.lib.utils.deprecate_with_doc', + 'disp': 'numpy.lib.function_base.disp', + 'fastCopyAndTranspose': 'numpy.core._multiarray_umath.fastCopyAndTranspose', + 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap', + 'get_include': 'numpy.lib.utils.get_include', + 'recfromcsv': 'numpy.lib.npyio.recfromcsv', + 'recfromtxt': 'numpy.lib.npyio.recfromtxt', + 'safe_eval': 'numpy.lib.utils.safe_eval', + 'set_string_function': 'numpy.core.arrayprint.set_string_function', + 'show_config': 'numpy.__config__.show', + 'show_runtime': 'numpy.lib.utils.show_runtime', + 'who': 'numpy.lib.utils.who', + } + # We override dir to not show these members + allowlist = undocumented + bad_results = check_dir(np) + # pytest gives better error messages with the builtin assert than with + # assert_equal + assert bad_results == allowlist + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +@pytest.mark.parametrize('name', ['testing']) +def test_import_lazy_import(name): + """Make sure we can actually use the modules we lazy load. + + While not exported as part of the public API, it was accessible. With the + use of __getattr__ and __dir__, this isn't always true It can happen that + an infinite recursion may happen. + + This is the only way I found that would force the failure to appear on the + badly implemented code. + + We also test for the presence of the lazily imported modules in dir + + """ + exe = (sys.executable, '-c', "import numpy; numpy." + name) + result = subprocess.check_output(exe) + assert not result + + # Make sure they are still in the __dir__ + assert name in dir(np) + + +def test_dir_testing(): + """Assert that output of dir has only one "testing/tester" + attribute without duplicate""" + assert len(dir(np)) == len(set(dir(np))) + + +def test_numpy_linalg(): + bad_results = check_dir(np.linalg) + assert bad_results == {} + + +def test_numpy_fft(): + bad_results = check_dir(np.fft) + assert bad_results == {} + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +def test_NPY_NO_EXPORT(): + cdll = ctypes.CDLL(np.core._multiarray_tests.__file__) + # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden + f = getattr(cdll, 'test_not_exported', None) + assert f is None, ("'test_not_exported' is mistakenly exported, " + "NPY_NO_EXPORT does not work") + + +# Historically NumPy has not used leading underscores for private submodules +# much. This has resulted in lots of things that look like public modules +# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`), +# but were never intended to be public. The PUBLIC_MODULES list contains +# modules that are either public because they were meant to be, or because they +# contain public functions/objects that aren't present in any other namespace +# for whatever reason and therefore should be treated as public. +# +# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack +# of underscores) but should not be used. For many of those modules the +# current status is fine. For others it may make sense to work on making them +# private, to clean up our public API and avoid confusion. +PUBLIC_MODULES = ['numpy.' + s for s in [ + "array_api", + "array_api.linalg", + "ctypeslib", + "doc", + "doc.constants", + "doc.ufuncs", + "dtypes", + "exceptions", + "f2py", + "fft", + "lib", + "lib.format", # was this meant to be public? + "lib.mixins", + "lib.recfunctions", + "lib.scimath", + "lib.stride_tricks", + "linalg", + "ma", + "ma.extras", + "ma.mrecords", + "matlib", + "polynomial", + "polynomial.chebyshev", + "polynomial.hermite", + "polynomial.hermite_e", + "polynomial.laguerre", + "polynomial.legendre", + "polynomial.polynomial", + "random", + "testing", + "testing.overrides", + "typing", + "typing.mypy_plugin", + "version" # Should be removed for NumPy 2.0 +]] +if sys.version_info < (3, 12): + PUBLIC_MODULES += [ + 'numpy.' + s for s in [ + "distutils", + "distutils.cpuinfo", + "distutils.exec_command", + "distutils.misc_util", + "distutils.log", + "distutils.system_info", + ] + ] + + + +PUBLIC_ALIASED_MODULES = [ + "numpy.char", + "numpy.emath", + "numpy.rec", +] + + +PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ + "compat", + "compat.py3k", + "conftest", + "core", + "core.arrayprint", + "core.defchararray", + "core.einsumfunc", + "core.fromnumeric", + "core.function_base", + "core.getlimits", + "core.memmap", + "core.multiarray", + "core.numeric", + "core.numerictypes", + "core.overrides", + "core.records", + "core.shape_base", + "core.umath", + "f2py.auxfuncs", + "f2py.capi_maps", + "f2py.cb_rules", + "f2py.cfuncs", + "f2py.common_rules", + "f2py.crackfortran", + "f2py.diagnose", + "f2py.f2py2e", + "f2py.f90mod_rules", + "f2py.func2subr", + "f2py.rules", + "f2py.symbolic", + "f2py.use_rules", + "fft.helper", + "lib.arraypad", + "lib.arraysetops", + "lib.arrayterator", + "lib.function_base", + "lib.histograms", + "lib.index_tricks", + "lib.nanfunctions", + "lib.npyio", + "lib.polynomial", + "lib.shape_base", + "lib.twodim_base", + "lib.type_check", + "lib.ufunclike", + "lib.user_array", # note: not in np.lib, but probably should just be deleted + "lib.utils", + "linalg.lapack_lite", + "linalg.linalg", + "ma.core", + "ma.testutils", + "ma.timer_comparison", + "matrixlib", + "matrixlib.defmatrix", + "polynomial.polyutils", + "random.mtrand", + "random.bit_generator", + "testing.print_coercion_tables", +]] +if sys.version_info < (3, 12): + PRIVATE_BUT_PRESENT_MODULES += [ + 'numpy.' + s for s in [ + "distutils.armccompiler", + "distutils.fujitsuccompiler", + "distutils.ccompiler", + 'distutils.ccompiler_opt', + "distutils.command", + "distutils.command.autodist", + "distutils.command.bdist_rpm", + "distutils.command.build", + "distutils.command.build_clib", + "distutils.command.build_ext", + "distutils.command.build_py", + "distutils.command.build_scripts", + "distutils.command.build_src", + "distutils.command.config", + "distutils.command.config_compiler", + "distutils.command.develop", + "distutils.command.egg_info", + "distutils.command.install", + "distutils.command.install_clib", + "distutils.command.install_data", + "distutils.command.install_headers", + "distutils.command.sdist", + "distutils.conv_template", + "distutils.core", + "distutils.extension", + "distutils.fcompiler", + "distutils.fcompiler.absoft", + "distutils.fcompiler.arm", + "distutils.fcompiler.compaq", + "distutils.fcompiler.environment", + "distutils.fcompiler.g95", + "distutils.fcompiler.gnu", + "distutils.fcompiler.hpux", + "distutils.fcompiler.ibm", + "distutils.fcompiler.intel", + "distutils.fcompiler.lahey", + "distutils.fcompiler.mips", + "distutils.fcompiler.nag", + "distutils.fcompiler.none", + "distutils.fcompiler.pathf95", + "distutils.fcompiler.pg", + "distutils.fcompiler.nv", + "distutils.fcompiler.sun", + "distutils.fcompiler.vast", + "distutils.fcompiler.fujitsu", + "distutils.from_template", + "distutils.intelccompiler", + "distutils.lib2def", + "distutils.line_endings", + "distutils.mingw32ccompiler", + "distutils.msvccompiler", + "distutils.npy_pkg_config", + "distutils.numpy_distribution", + "distutils.pathccompiler", + "distutils.unixccompiler", + ] + ] + + +def is_unexpected(name): + """Check if this needs to be considered.""" + if '._' in name or '.tests' in name or '.setup' in name: + return False + + if name in PUBLIC_MODULES: + return False + + if name in PUBLIC_ALIASED_MODULES: + return False + + if name in PRIVATE_BUT_PRESENT_MODULES: + return False + + return True + + +# These are present in a directory with an __init__.py but cannot be imported +# code_generators/ isn't installed, but present for an inplace build +SKIP_LIST = [ + "numpy.core.code_generators", + "numpy.core.code_generators.genapi", + "numpy.core.code_generators.generate_umath", + "numpy.core.code_generators.ufunc_docstrings", + "numpy.core.code_generators.generate_numpy_api", + "numpy.core.code_generators.generate_ufunc_api", + "numpy.core.code_generators.numpy_api", + "numpy.core.code_generators.generate_umath_doc", + "numpy.core.code_generators.verify_c_api_version", + "numpy.core.cversions", + "numpy.core.generate_numpy_api", + "numpy.core.umath_tests", +] +if sys.version_info < (3, 12): + SKIP_LIST += ["numpy.distutils.msvc9compiler"] + + +# suppressing warnings from deprecated modules +@pytest.mark.filterwarnings("ignore:.*np.compat.*:DeprecationWarning") +def test_all_modules_are_expected(): + """ + Test that we don't add anything that looks like a new public module by + accident. Check is based on filenames. + """ + + modnames = [] + for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, + prefix=np.__name__ + '.', + onerror=None): + if is_unexpected(modname) and modname not in SKIP_LIST: + # We have a name that is new. If that's on purpose, add it to + # PUBLIC_MODULES. We don't expect to have to add anything to + # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! + modnames.append(modname) + + if modnames: + raise AssertionError(f'Found unexpected modules: {modnames}') + + +# Stuff that clearly shouldn't be in the API and is detected by the next test +# below +SKIP_LIST_2 = [ + 'numpy.math', + 'numpy.doc.constants.re', + 'numpy.doc.constants.textwrap', + 'numpy.lib.emath', + 'numpy.lib.math', + 'numpy.matlib.char', + 'numpy.matlib.rec', + 'numpy.matlib.emath', + 'numpy.matlib.exceptions', + 'numpy.matlib.math', + 'numpy.matlib.linalg', + 'numpy.matlib.fft', + 'numpy.matlib.random', + 'numpy.matlib.ctypeslib', + 'numpy.matlib.ma', +] +if sys.version_info < (3, 12): + SKIP_LIST_2 += [ + 'numpy.distutils.log.sys', + 'numpy.distutils.log.logging', + 'numpy.distutils.log.warnings', + ] + + +def test_all_modules_are_expected_2(): + """ + Method checking all objects. The pkgutil-based method in + `test_all_modules_are_expected` does not catch imports into a namespace, + only filenames. So this test is more thorough, and checks this like: + + import .lib.scimath as emath + + To check if something in a module is (effectively) public, one can check if + there's anything in that namespace that's a public function/object but is + not exposed in a higher-level namespace. For example for a `numpy.lib` + submodule:: + + mod = np.lib.mixins + for obj in mod.__all__: + if obj in np.__all__: + continue + elif obj in np.lib.__all__: + continue + + else: + print(obj) + + """ + + def find_unexpected_members(mod_name): + members = [] + module = importlib.import_module(mod_name) + if hasattr(module, '__all__'): + objnames = module.__all__ + else: + objnames = dir(module) + + for objname in objnames: + if not objname.startswith('_'): + fullobjname = mod_name + '.' + objname + if isinstance(getattr(module, objname), types.ModuleType): + if is_unexpected(fullobjname): + if fullobjname not in SKIP_LIST_2: + members.append(fullobjname) + + return members + + unexpected_members = find_unexpected_members("numpy") + for modname in PUBLIC_MODULES: + unexpected_members.extend(find_unexpected_members(modname)) + + if unexpected_members: + raise AssertionError("Found unexpected object(s) that look like " + "modules: {}".format(unexpected_members)) + + +def test_api_importable(): + """ + Check that all submodules listed higher up in this file can be imported + + Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may + simply need to be removed from the list (deprecation may or may not be + needed - apply common sense). + """ + def check_importable(module_name): + try: + importlib.import_module(module_name) + except (ImportError, AttributeError): + return False + + return True + + module_names = [] + for module_name in PUBLIC_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that cannot be " + "imported: {}".format(module_names)) + + for module_name in PUBLIC_ALIASED_MODULES: + try: + eval(module_name) + except AttributeError: + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that were not " + "found: {}".format(module_names)) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=DeprecationWarning) + warnings.filterwarnings('always', category=ImportWarning) + for module_name in PRIVATE_BUT_PRESENT_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules that are not really public but looked " + "public and can not be imported: " + "{}".format(module_names)) + + +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG") not in (None, 0, "0"), + reason=( + "NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, " + "which does not expose the `array_api` entry point. " + "See https://github.com/numpy/numpy/pull/19800" + ), +) +def test_array_api_entry_point(): + """ + Entry point for Array API implementation can be found with importlib and + returns the numpy.array_api namespace. + """ + # For a development install that did not go through meson-python, + # the entrypoint will not have been installed. So ensure this test fails + # only if numpy is inside site-packages. + numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ + + eps = importlib.metadata.entry_points() + try: + xp_eps = eps.select(group="array_api") + except AttributeError: + # The select interface for entry_points was introduced in py3.10, + # deprecating its dict interface. We fallback to dict keys for finding + # Array API entry points so that running this test in <=3.9 will + # still work - see https://github.com/numpy/numpy/pull/19800. + xp_eps = eps.get("array_api", []) + if len(xp_eps) == 0: + if numpy_in_sitepackages: + msg = "No entry points for 'array_api' found" + raise AssertionError(msg) from None + return + + try: + ep = next(ep for ep in xp_eps if ep.name == "numpy") + except StopIteration: + if numpy_in_sitepackages: + msg = "'numpy' not in array_api entry points" + raise AssertionError(msg) from None + return + + xp = ep.load() + msg = ( + f"numpy entry point value '{ep.value}' " + "does not point to our Array API implementation" + ) + assert xp is numpy.array_api, msg + + +@pytest.mark.parametrize("name", [ + 'ModuleDeprecationWarning', 'VisibleDeprecationWarning', + 'ComplexWarning', 'TooHardError', 'AxisError']) +def test_moved_exceptions(name): + # These were moved to the exceptions namespace, but currently still + # available + assert name in np.__all__ + assert name not in np.__dir__() + # Fetching works, but __module__ is set correctly: + assert getattr(np, name).__module__ == "numpy.exceptions" + assert name in np.exceptions.__all__ + getattr(np.exceptions, name) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_reloading.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_reloading.py new file mode 100644 index 00000000..a1f36008 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_reloading.py @@ -0,0 +1,72 @@ +from numpy.testing import ( + assert_raises, + assert_warns, + assert_, + assert_equal, + IS_WASM, +) +from numpy.compat import pickle + +import pytest +import sys +import subprocess +import textwrap +from importlib import reload + + +def test_numpy_reloading(): + # gh-7844. Also check that relevant globals retain their identity. + import numpy as np + import numpy._globals + + _NoValue = np._NoValue + VisibleDeprecationWarning = np.VisibleDeprecationWarning + ModuleDeprecationWarning = np.ModuleDeprecationWarning + + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) + + assert_raises(RuntimeError, reload, numpy._globals) + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning) + +def test_novalue(): + import numpy as np + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(repr(np._NoValue), '') + assert_(pickle.loads(pickle.dumps(np._NoValue, + protocol=proto)) is np._NoValue) + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +def test_full_reimport(): + """At the time of writing this, it is *not* truly supported, but + apparently enough users rely on it, for it to be an annoying change + when it started failing previously. + """ + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from pytest import warns + import numpy as np + + for k in list(sys.modules.keys()): + if "numpy" in k: + del sys.modules[k] + + with warns(UserWarning): + import numpy as np + """) + p = subprocess.run([sys.executable, '-c', code], capture_output=True) + if p.returncode: + raise AssertionError( + f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_scripts.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_scripts.py new file mode 100644 index 00000000..892c04ee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_scripts.py @@ -0,0 +1,47 @@ +""" Test scripts + +Test that we can run executable scripts that have been installed with numpy. +""" +import sys +import os +import pytest +from os.path import join as pathjoin, isfile, dirname +import subprocess + +import numpy as np +from numpy.testing import assert_equal, IS_WASM + +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) + + +def find_f2py_commands(): + if sys.platform == 'win32': + exe_dir = dirname(sys.executable) + if exe_dir.endswith('Scripts'): # virtualenv + return [os.path.join(exe_dir, 'f2py')] + else: + return [os.path.join(exe_dir, "Scripts", 'f2py')] + else: + # Three scripts are installed in Unix-like systems: + # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, + # if installed with python3.9 the scripts would be named + # 'f2py', 'f2py3', and 'f2py3.9'. + version = sys.version_info + major = str(version.major) + minor = str(version.minor) + return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] + + +@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") +@pytest.mark.xfail(reason="Test is unreliable") +@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) +def test_f2py(f2py_cmd): + # test that we can run f2py script + stdout = subprocess.check_output([f2py_cmd, '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +def test_pep338(): + stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_warnings.py b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_warnings.py new file mode 100644 index 00000000..ee5124c5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/tests/test_warnings.py @@ -0,0 +1,77 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. +""" +import pytest + +from pathlib import Path +import sys +import ast +import tokenize +import numpy + +class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + +class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if node.args[0].s == "ignore": + raise AssertionError( + "warnings should have an appropriate stacklevel; found in " + "{} on line {}".format(self.__filename, node.lineno)) + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if "testing/tests/test_warnings.py" == self.__filename: + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" in args: + return + raise AssertionError( + "warnings should have an appropriate stacklevel; found in " + "{} on line {}".format(self.__filename, node.lineno)) + + +@pytest.mark.slow +@pytest.mark.skipif(sys.version_info >= (3, 12), + reason="Deprecation warning in ast") +def test_warning_calls(): + # combined "ignore" and stacklevel error + base = Path(numpy.__file__).parent + + for path in base.rglob("*.py"): + if base / "testing" in path.parents: + continue + if path == base / "__init__.py": + continue + if path == base / "random" / "__init__.py": + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read()) + FindFuncs(path).visit(tree) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/typing/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/__init__.py new file mode 100644 index 00000000..5cf02fe8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/__init__.py @@ -0,0 +1,175 @@ +""" +============================ +Typing (:mod:`numpy.typing`) +============================ + +.. versionadded:: 1.20 + +Large parts of the NumPy API have :pep:`484`-style type annotations. In +addition a number of type aliases are available to users, most prominently +the two below: + +- `ArrayLike`: objects that can be converted to arrays +- `DTypeLike`: objects that can be converted to dtypes + +.. _typing-extensions: https://pypi.org/project/typing-extensions/ + +Mypy plugin +----------- + +.. versionadded:: 1.21 + +.. automodule:: numpy.typing.mypy_plugin + +.. currentmodule:: numpy.typing + +Differences from the runtime NumPy API +-------------------------------------- + +NumPy is very flexible. Trying to describe the full range of +possibilities statically would result in types that are not very +helpful. For that reason, the typed NumPy API is often stricter than +the runtime NumPy API. This section describes some notable +differences. + +ArrayLike +~~~~~~~~~ + +The `ArrayLike` type tries to avoid creating object arrays. For +example, + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) + array( at ...>, dtype=object) + +is valid NumPy code which will create a 0-dimensional object +array. Type checkers will complain about the above example when using +the NumPy types however. If you really intended to do the above, then +you can either use a ``# type: ignore`` comment: + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) # type: ignore + +or explicitly type the array like object as `~typing.Any`: + +.. code-block:: python + + >>> from typing import Any + >>> array_like: Any = (x**2 for x in range(10)) + >>> np.array(array_like) + array( at ...>, dtype=object) + +ndarray +~~~~~~~ + +It's possible to mutate the dtype of an array at runtime. For example, +the following code is valid: + +.. code-block:: python + + >>> x = np.array([1, 2]) + >>> x.dtype = np.bool_ + +This sort of mutation is not allowed by the types. Users who want to +write statically typed code should instead use the `numpy.ndarray.view` +method to create a view of the array with a different dtype. + +DTypeLike +~~~~~~~~~ + +The `DTypeLike` type tries to avoid creation of dtype objects using +dictionary of fields like below: + +.. code-block:: python + + >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)}) + +Although this is valid NumPy code, the type checker will complain about it, +since its usage is discouraged. +Please see : :ref:`Data type objects ` + +Number precision +~~~~~~~~~~~~~~~~ + +The precision of `numpy.number` subclasses is treated as a covariant generic +parameter (see :class:`~NBitBase`), simplifying the annotating of processes +involving precision-based casting. + +.. code-block:: python + + >>> from typing import TypeVar + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T = TypeVar("T", bound=npt.NBitBase) + >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + ... ... + +Consequently, the likes of `~numpy.float16`, `~numpy.float32` and +`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to +runtime, they're not necessarily considered as sub-classes. + +Timedelta64 +~~~~~~~~~~~ + +The `~numpy.timedelta64` class is not considered a subclass of +`~numpy.signedinteger`, the former only inheriting from `~numpy.generic` +while static type checking. + +0D arrays +~~~~~~~~~ + +During runtime numpy aggressively casts any passed 0D arrays into their +corresponding `~numpy.generic` instance. Until the introduction of shape +typing (see :pep:`646`) it is unfortunately not possible to make the +necessary distinction between 0D and >0D arrays. While thus not strictly +correct, all operations are that can potentially perform a 0D-array -> scalar +cast are currently annotated as exclusively returning an `ndarray`. + +If it is known in advance that an operation _will_ perform a +0D-array -> scalar cast, then one can consider manually remedying the +situation with either `typing.cast` or a ``# type: ignore`` comment. + +Record array dtypes +~~~~~~~~~~~~~~~~~~~ + +The dtype of `numpy.recarray`, and the `numpy.rec` functions in general, +can be specified in one of two ways: + +* Directly via the ``dtype`` argument. +* With up to five helper arguments that operate via `numpy.format_parser`: + ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``. + +These two approaches are currently typed as being mutually exclusive, +*i.e.* if ``dtype`` is specified than one may not specify ``formats``. +While this mutual exclusivity is not (strictly) enforced during runtime, +combining both dtype specifiers can lead to unexpected or even downright +buggy behavior. + +API +--- + +""" +# NOTE: The API section will be appended with additional entries +# further down in this file + +from numpy._typing import ( + ArrayLike, + DTypeLike, + NBitBase, + NDArray, +) + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + +if __doc__ is not None: + from numpy._typing._add_docstring import _docstrings + __doc__ += _docstrings + __doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n' + del _docstrings + +from numpy._pytesttester import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py new file mode 100644 index 00000000..8ec96370 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py @@ -0,0 +1,196 @@ +"""A mypy_ plugin for managing a number of platform-specific annotations. +Its functionality can be split into three distinct parts: + +* Assigning the (platform-dependent) precisions of certain `~numpy.number` + subclasses, including the likes of `~numpy.int_`, `~numpy.intp` and + `~numpy.longlong`. See the documentation on + :ref:`scalar types ` for a comprehensive overview + of the affected classes. Without the plugin the precision of all relevant + classes will be inferred as `~typing.Any`. +* Removing all extended-precision `~numpy.number` subclasses that are + unavailable for the platform in question. Most notably this includes the + likes of `~numpy.float128` and `~numpy.complex256`. Without the plugin *all* + extended-precision types will, as far as mypy is concerned, be available + to all platforms. +* Assigning the (platform-dependent) precision of `~numpy.ctypeslib.c_intp`. + Without the plugin the type will default to `ctypes.c_int64`. + + .. versionadded:: 1.22 + +Examples +-------- +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + +.. _mypy: http://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html + +""" + +from __future__ import annotations + +from collections.abc import Iterable +from typing import Final, TYPE_CHECKING, Callable + +import numpy as np + +try: + import mypy.types + from mypy.types import Type + from mypy.plugin import Plugin, AnalyzeTypeContext + from mypy.nodes import MypyFile, ImportFrom, Statement + from mypy.build import PRI_MED + + _HookFunc = Callable[[AnalyzeTypeContext], Type] + MYPY_EX: None | ModuleNotFoundError = None +except ModuleNotFoundError as ex: + MYPY_EX = ex + +__all__: list[str] = [] + + +def _get_precision_dict() -> dict[str, str]: + names = [ + ("_NBitByte", np.byte), + ("_NBitShort", np.short), + ("_NBitIntC", np.intc), + ("_NBitIntP", np.intp), + ("_NBitInt", np.int_), + ("_NBitLongLong", np.longlong), + + ("_NBitHalf", np.half), + ("_NBitSingle", np.single), + ("_NBitDouble", np.double), + ("_NBitLongDouble", np.longdouble), + ] + ret = {} + for name, typ in names: + n: int = 8 * typ().dtype.itemsize + ret[f'numpy._typing._nbit.{name}'] = f"numpy._{n}Bit" + return ret + + +def _get_extended_precision_list() -> list[str]: + extended_names = [ + "uint128", + "uint256", + "int128", + "int256", + "float80", + "float96", + "float128", + "float256", + "complex160", + "complex192", + "complex256", + "complex512", + ] + return [i for i in extended_names if hasattr(np, i)] + + +def _get_c_intp_name() -> str: + # Adapted from `np.core._internal._getintp_ctype` + char = np.dtype('p').char + if char == 'i': + return "c_int" + elif char == 'l': + return "c_long" + elif char == 'q': + return "c_longlong" + else: + return "c_long" + + +#: A dictionary mapping type-aliases in `numpy._typing._nbit` to +#: concrete `numpy.typing.NBitBase` subclasses. +_PRECISION_DICT: Final = _get_precision_dict() + +#: A list with the names of all extended precision `np.number` subclasses. +_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list() + +#: The name of the ctypes quivalent of `np.intp` +_C_INTP: Final = _get_c_intp_name() + + +def _hook(ctx: AnalyzeTypeContext) -> Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"numpy._typing._nbit.{name}"] + return api.named_type(name_new) + + +if TYPE_CHECKING or MYPY_EX is None: + def _index(iterable: Iterable[Statement], id: str) -> int: + """Identify the first ``ImportFrom`` instance the specified `id`.""" + for i, value in enumerate(iterable): + if getattr(value, "id", None) == id: + return i + raise ValueError("Failed to identify a `ImportFrom` instance " + f"with the following id: {id!r}") + + def _override_imports( + file: MypyFile, + module: str, + imports: list[tuple[str, None | str]], + ) -> None: + """Override the first `module`-based import with new `imports`.""" + # Construct a new `from module import y` statement + import_obj = ImportFrom(module, 0, names=imports) + import_obj.is_top_level = True + + # Replace the first `module`-based import statement with `import_obj` + for lst in [file.defs, file.imports]: # type: list[Statement] + i = _index(lst, module) + lst[i] = import_obj + + class _NumpyPlugin(Plugin): + """A mypy plugin for handling versus numpy-specific typing tasks.""" + + def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc: + """Set the precision of platform-specific `numpy.number` + subclasses. + + For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`. + """ + if fullname in _PRECISION_DICT: + return _hook + return None + + def get_additional_deps( + self, file: MypyFile + ) -> list[tuple[int, str, int]]: + """Handle all import-based overrides. + + * Import platform-specific extended-precision `numpy.number` + subclasses (*e.g.* `numpy.float96`, `numpy.float128` and + `numpy.complex256`). + * Import the appropriate `ctypes` equivalent to `numpy.intp`. + + """ + ret = [(PRI_MED, file.fullname, -1)] + + if file.fullname == "numpy": + _override_imports( + file, "numpy._typing._extended_precision", + imports=[(v, v) for v in _EXTENDED_PRECISION_LIST], + ) + elif file.fullname == "numpy.ctypeslib": + _override_imports( + file, "ctypes", + imports=[(_C_INTP, "_c_intp")], + ) + return ret + + def plugin(version: str) -> type[_NumpyPlugin]: + """An entry-point for mypy.""" + return _NumpyPlugin + +else: + def plugin(version: str) -> type[_NumpyPlugin]: + """An entry-point for mypy.""" + raise MYPY_EX diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/typing/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/setup.py new file mode 100644 index 00000000..c444e769 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/setup.py @@ -0,0 +1,11 @@ +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('typing', parent_package, top_path) + config.add_subpackage('tests') + config.add_data_dir('tests/data') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.py b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.py new file mode 100644 index 00000000..2ca2c9b2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.py @@ -0,0 +1,32 @@ +import os +import sys +from pathlib import Path + +import numpy as np +from numpy.testing import assert_ + +ROOT = Path(np.__file__).parents[0] +FILES = [ + ROOT / "py.typed", + ROOT / "__init__.pyi", + ROOT / "ctypeslib.pyi", + ROOT / "core" / "__init__.pyi", + ROOT / "f2py" / "__init__.pyi", + ROOT / "fft" / "__init__.pyi", + ROOT / "lib" / "__init__.pyi", + ROOT / "linalg" / "__init__.pyi", + ROOT / "ma" / "__init__.pyi", + ROOT / "matrixlib" / "__init__.pyi", + ROOT / "polynomial" / "__init__.pyi", + ROOT / "random" / "__init__.pyi", + ROOT / "testing" / "__init__.pyi", +] +if sys.version_info < (3, 12): + FILES += [ROOT / "distutils" / "__init__.pyi"] + + +class TestIsFile: + def test_isfile(self): + """Test if all ``.pyi`` files are properly installed.""" + for file in FILES: + assert_(os.path.isfile(file)) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.py b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.py new file mode 100644 index 00000000..c32c5db3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.py @@ -0,0 +1,109 @@ +"""Test the runtime usage of `numpy.typing`.""" + +from __future__ import annotations + +from typing import ( + get_type_hints, + Union, + NamedTuple, + get_args, + get_origin, + Any, +) + +import pytest +import numpy as np +import numpy.typing as npt +import numpy._typing as _npt + + +class TypeTup(NamedTuple): + typ: type + args: tuple[type, ...] + origin: None | type + + +NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) + +TYPES = { + "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), + "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), + "NBitBase": TypeTup(npt.NBitBase, (), None), + "NDArray": NDArrayTup, +} + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_args(name: type, tup: TypeTup) -> None: + """Test `typing.get_args`.""" + typ, ref = tup.typ, tup.args + out = get_args(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_origin(name: type, tup: TypeTup) -> None: + """Test `typing.get_origin`.""" + typ, ref = tup.typ, tup.origin + out = get_origin(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints`.""" + typ = tup.typ + + # Explicitly set `__annotations__` in order to circumvent the + # stringification performed by `from __future__ import annotations` + def func(a): pass + func.__annotations__ = {"a": typ, "return": None} + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints_str(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints` with string-representation of types.""" + typ_str, typ = f"npt.{name}", tup.typ + + # Explicitly set `__annotations__` in order to circumvent the + # stringification performed by `from __future__ import annotations` + def func(a): pass + func.__annotations__ = {"a": typ_str, "return": None} + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +def test_keys() -> None: + """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced.""" + keys = TYPES.keys() + ref = set(npt.__all__) + assert keys == ref + + +PROTOCOLS: dict[str, tuple[type[Any], object]] = { + "_SupportsDType": (_npt._SupportsDType, np.int64(1)), + "_SupportsArray": (_npt._SupportsArray, np.arange(10)), + "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), + "_NestedSequence": (_npt._NestedSequence, [1]), +} + + +@pytest.mark.parametrize("cls,obj", PROTOCOLS.values(), ids=PROTOCOLS.keys()) +class TestRuntimeProtocol: + def test_isinstance(self, cls: type[Any], obj: object) -> None: + assert isinstance(obj, cls) + assert not isinstance(None, cls) + + def test_issubclass(self, cls: type[Any], obj: object) -> None: + if cls is _npt._SupportsDType: + pytest.xfail( + "Protocols with non-method members don't support issubclass()" + ) + assert issubclass(type(obj), cls) + assert not issubclass(type(None), cls) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_typing.py b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_typing.py new file mode 100644 index 00000000..68c6f5d0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/typing/tests/test_typing.py @@ -0,0 +1,302 @@ +from __future__ import annotations + +import importlib.util +import os +import re +import shutil +from collections import defaultdict +from collections.abc import Iterator +from typing import TYPE_CHECKING + +import pytest +from numpy.typing.mypy_plugin import _EXTENDED_PRECISION_LIST + + +# Only trigger a full `mypy` run if this environment variable is set +# Note that these tests tend to take over a minute even on a macOS M1 CPU, +# and more than that in CI. +RUN_MYPY = "NPY_RUN_MYPY_IN_TESTSUITE" in os.environ +if RUN_MYPY and RUN_MYPY not in ('0', '', 'false'): + RUN_MYPY = True + +# Skips all functions in this file +pytestmark = pytest.mark.skipif( + not RUN_MYPY, + reason="`NPY_RUN_MYPY_IN_TESTSUITE` not set" +) + + +# Only trigger a full `mypy` run if this environment variable is set +# Note that these tests tend to take over a minute even on a macOS M1 CPU, +# and more than that in CI. +RUN_MYPY = "NPY_RUN_MYPY_IN_TESTSUITE" in os.environ +if RUN_MYPY and RUN_MYPY not in ('0', '', 'false'): + RUN_MYPY = True + +# Skips all functions in this file +pytestmark = pytest.mark.skipif( + not RUN_MYPY, + reason="`NPY_RUN_MYPY_IN_TESTSUITE` not set" +) + + +try: + from mypy import api +except ImportError: + NO_MYPY = True +else: + NO_MYPY = False + +if TYPE_CHECKING: + # We need this as annotation, but it's located in a private namespace. + # As a compromise, do *not* import it during runtime + from _pytest.mark.structures import ParameterSet + +DATA_DIR = os.path.join(os.path.dirname(__file__), "data") +PASS_DIR = os.path.join(DATA_DIR, "pass") +FAIL_DIR = os.path.join(DATA_DIR, "fail") +REVEAL_DIR = os.path.join(DATA_DIR, "reveal") +MISC_DIR = os.path.join(DATA_DIR, "misc") +MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") +CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") + +#: A dictionary with file names as keys and lists of the mypy stdout as values. +#: To-be populated by `run_mypy`. +OUTPUT_MYPY: defaultdict[str, list[str]] = defaultdict(list) + + +def _key_func(key: str) -> str: + """Split at the first occurrence of the ``:`` character. + + Windows drive-letters (*e.g.* ``C:``) are ignored herein. + """ + drive, tail = os.path.splitdrive(key) + return os.path.join(drive, tail.split(":", 1)[0]) + + +def _strip_filename(msg: str) -> tuple[int, str]: + """Strip the filename and line number from a mypy message.""" + _, tail = os.path.splitdrive(msg) + _, lineno, msg = tail.split(":", 2) + return int(lineno), msg.strip() + + +def strip_func(match: re.Match[str]) -> str: + """`re.sub` helper function for stripping module names.""" + return match.groups()[1] + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.fixture(scope="module", autouse=True) +def run_mypy() -> None: + """Clears the cache and run mypy before running any of the typing tests. + + The mypy results are cached in `OUTPUT_MYPY` for further use. + + The cache refresh can be skipped using + + NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests + """ + if ( + os.path.isdir(CACHE_DIR) + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) + ): + shutil.rmtree(CACHE_DIR) + + split_pattern = re.compile(r"(\s+)?\^(\~+)?") + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): + # Run mypy + stdout, stderr, exit_code = api.run([ + "--config-file", + MYPY_INI, + "--cache-dir", + CACHE_DIR, + directory, + ]) + if stderr: + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}") + elif exit_code not in {0, 1}: + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}") + + str_concat = "" + filename: str | None = None + for i in stdout.split("\n"): + if "note:" in i: + continue + if filename is None: + filename = _key_func(i) + + str_concat += f"{i}\n" + if split_pattern.match(i) is not None: + OUTPUT_MYPY[filename].append(str_concat) + str_concat = "" + filename = None + + +def get_test_cases(directory: str) -> Iterator[ParameterSet]: + for root, _, files in os.walk(directory): + for fname in files: + short_fname, ext = os.path.splitext(fname) + if ext in (".pyi", ".py"): + fullpath = os.path.join(root, fname) + yield pytest.param(fullpath, id=short_fname) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) +def test_success(path) -> None: + # Alias `OUTPUT_MYPY` so that it appears in the local namespace + output_mypy = OUTPUT_MYPY + if path in output_mypy: + msg = "Unexpected mypy output\n\n" + msg += "\n".join(_strip_filename(v)[1] for v in output_mypy[path]) + raise AssertionError(msg) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR)) +def test_fail(path: str) -> None: + __tracebackhide__ = True + + with open(path) as fin: + lines = fin.readlines() + + errors = defaultdict(lambda: "") + + output_mypy = OUTPUT_MYPY + assert path in output_mypy + + for error_line in output_mypy[path]: + lineno, error_line = _strip_filename(error_line) + errors[lineno] += f'{error_line}\n' + + for i, line in enumerate(lines): + lineno = i + 1 + if ( + line.startswith('#') + or (" E:" not in line and lineno not in errors) + ): + continue + + target_line = lines[lineno - 1] + if "# E:" in target_line: + expression, _, marker = target_line.partition(" # E: ") + expected_error = errors[lineno].strip() + marker = marker.strip() + _test_fail(path, expression, marker, expected_error, lineno) + else: + pytest.fail( + f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}" + ) + + +_FAIL_MSG1 = """Extra error at line {} + +Expression: {} +Extra error: {!r} +""" + +_FAIL_MSG2 = """Error mismatch at line {} + +Expression: {} +Expected error: {} +Observed error: {!r} +""" + + +def _test_fail( + path: str, + expression: str, + error: str, + expected_error: None | str, + lineno: int, +) -> None: + if expected_error is None: + raise AssertionError(_FAIL_MSG1.format(lineno, expression, error)) + elif error not in expected_error: + raise AssertionError(_FAIL_MSG2.format( + lineno, expression, expected_error, error + )) + + +_REVEAL_MSG = """Reveal mismatch at line {} + +{} +""" + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR)) +def test_reveal(path: str) -> None: + """Validate that mypy correctly infers the return-types of + the expressions in `path`. + """ + __tracebackhide__ = True + + output_mypy = OUTPUT_MYPY + if path not in output_mypy: + return + + for error_line in output_mypy[path]: + lineno, error_line = _strip_filename(error_line) + raise AssertionError(_REVEAL_MSG.format(lineno, error_line)) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) +def test_code_runs(path: str) -> None: + """Validate that the code in `path` properly during runtime.""" + path_without_extension, _ = os.path.splitext(path) + dirname, filename = path.split(os.sep)[-2:] + + spec = importlib.util.spec_from_file_location( + f"{dirname}.{filename}", path + ) + assert spec is not None + assert spec.loader is not None + + test_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(test_module) + + +LINENO_MAPPING = { + 11: "uint128", + 12: "uint256", + 14: "int128", + 15: "int256", + 17: "float80", + 18: "float96", + 19: "float128", + 20: "float256", + 22: "complex160", + 23: "complex192", + 24: "complex256", + 25: "complex512", +} + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +def test_extended_precision() -> None: + path = os.path.join(MISC_DIR, "extended_precision.pyi") + output_mypy = OUTPUT_MYPY + assert path in output_mypy + + with open(path) as f: + expression_list = f.readlines() + + for _msg in output_mypy[path]: + lineno, msg = _strip_filename(_msg) + expression = expression_list[lineno - 1].rstrip("\n") + + if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST: + raise AssertionError(_REVEAL_MSG.format(lineno, msg)) + elif "error" not in msg: + _test_fail( + path, expression, msg, 'Expression is of type "Any"', lineno + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/version.py b/dbdpy-env/lib/python3.9/site-packages/numpy/version.py new file mode 100644 index 00000000..9277ef55 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/numpy/version.py @@ -0,0 +1,8 @@ + +version = "1.26.3" +__version__ = version +full_version = version + +git_revision = "b4bf93b936802618ebb49ee43e382b576b29a0a6" +release = 'dev' not in version and '+' not in version +short_version = version.split("+")[0] diff --git a/dbdpy-env/lib/python3.9/site-packages/pip-21.2.4.dist-info/top_level.txt b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/INSTALLER similarity index 100% rename from dbdpy-env/lib/python3.9/site-packages/pip-21.2.4.dist-info/top_level.txt rename to dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/INSTALLER diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/LICENSE b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/LICENSE new file mode 100644 index 00000000..cdfa749d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/LICENSE @@ -0,0 +1,31 @@ +BSD 3-Clause License + +Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2011-2023, Open source contributors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/METADATA b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/METADATA new file mode 100644 index 00000000..033775f7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/METADATA @@ -0,0 +1,344 @@ +Metadata-Version: 2.1 +Name: pandas +Version: 2.1.4 +Summary: Powerful data structures for data analysis, time series, and statistics +Home-page: https://pandas.pydata.org +Author-Email: The Pandas Development Team +License: BSD 3-Clause License + + Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team + All rights reserved. + + Copyright (c) 2011-2023, Open source contributors. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Scientific/Engineering +Project-URL: Homepage, https://pandas.pydata.org +Project-URL: Documentation, https://pandas.pydata.org/docs/ +Project-URL: Repository, https://github.com/pandas-dev/pandas +Requires-Python: >=3.9 +Requires-Dist: numpy<2,>=1.22.4; python_version < "3.11" +Requires-Dist: numpy<2,>=1.23.2; python_version == "3.11" +Requires-Dist: numpy<2,>=1.26.0; python_version >= "3.12" +Requires-Dist: python-dateutil>=2.8.2 +Requires-Dist: pytz>=2020.1 +Requires-Dist: tzdata>=2022.1 +Requires-Dist: hypothesis>=6.46.1; extra == "test" +Requires-Dist: pytest>=7.3.2; extra == "test" +Requires-Dist: pytest-xdist>=2.2.0; extra == "test" +Requires-Dist: bottleneck>=1.3.4; extra == "performance" +Requires-Dist: numba>=0.55.2; extra == "performance" +Requires-Dist: numexpr>=2.8.0; extra == "performance" +Requires-Dist: scipy>=1.8.1; extra == "computation" +Requires-Dist: xarray>=2022.03.0; extra == "computation" +Requires-Dist: fsspec>=2022.05.0; extra == "fss" +Requires-Dist: s3fs>=2022.05.0; extra == "aws" +Requires-Dist: gcsfs>=2022.05.0; extra == "gcp" +Requires-Dist: pandas-gbq>=0.17.5; extra == "gcp" +Requires-Dist: odfpy>=1.4.1; extra == "excel" +Requires-Dist: openpyxl>=3.0.10; extra == "excel" +Requires-Dist: pyxlsb>=1.0.9; extra == "excel" +Requires-Dist: xlrd>=2.0.1; extra == "excel" +Requires-Dist: xlsxwriter>=3.0.3; extra == "excel" +Requires-Dist: pyarrow>=7.0.0; extra == "parquet" +Requires-Dist: pyarrow>=7.0.0; extra == "feather" +Requires-Dist: tables>=3.7.0; extra == "hdf5" +Requires-Dist: pyreadstat>=1.1.5; extra == "spss" +Requires-Dist: SQLAlchemy>=1.4.36; extra == "postgresql" +Requires-Dist: psycopg2>=2.9.3; extra == "postgresql" +Requires-Dist: SQLAlchemy>=1.4.36; extra == "mysql" +Requires-Dist: pymysql>=1.0.2; extra == "mysql" +Requires-Dist: SQLAlchemy>=1.4.36; extra == "sql-other" +Requires-Dist: beautifulsoup4>=4.11.1; extra == "html" +Requires-Dist: html5lib>=1.1; extra == "html" +Requires-Dist: lxml>=4.8.0; extra == "html" +Requires-Dist: lxml>=4.8.0; extra == "xml" +Requires-Dist: matplotlib>=3.6.1; extra == "plot" +Requires-Dist: jinja2>=3.1.2; extra == "output-formatting" +Requires-Dist: tabulate>=0.8.10; extra == "output-formatting" +Requires-Dist: PyQt5>=5.15.6; extra == "clipboard" +Requires-Dist: qtpy>=2.2.0; extra == "clipboard" +Requires-Dist: zstandard>=0.17.0; extra == "compression" +Requires-Dist: dataframe-api-compat>=0.1.7; extra == "consortium-standard" +Requires-Dist: beautifulsoup4>=4.11.1; extra == "all" +Requires-Dist: bottleneck>=1.3.4; extra == "all" +Requires-Dist: dataframe-api-compat>=0.1.7; extra == "all" +Requires-Dist: fastparquet>=0.8.1; extra == "all" +Requires-Dist: fsspec>=2022.05.0; extra == "all" +Requires-Dist: gcsfs>=2022.05.0; extra == "all" +Requires-Dist: html5lib>=1.1; extra == "all" +Requires-Dist: hypothesis>=6.46.1; extra == "all" +Requires-Dist: jinja2>=3.1.2; extra == "all" +Requires-Dist: lxml>=4.8.0; extra == "all" +Requires-Dist: matplotlib>=3.6.1; extra == "all" +Requires-Dist: numba>=0.55.2; extra == "all" +Requires-Dist: numexpr>=2.8.0; extra == "all" +Requires-Dist: odfpy>=1.4.1; extra == "all" +Requires-Dist: openpyxl>=3.0.10; extra == "all" +Requires-Dist: pandas-gbq>=0.17.5; extra == "all" +Requires-Dist: psycopg2>=2.9.3; extra == "all" +Requires-Dist: pyarrow>=7.0.0; extra == "all" +Requires-Dist: pymysql>=1.0.2; extra == "all" +Requires-Dist: PyQt5>=5.15.6; extra == "all" +Requires-Dist: pyreadstat>=1.1.5; extra == "all" +Requires-Dist: pytest>=7.3.2; extra == "all" +Requires-Dist: pytest-xdist>=2.2.0; extra == "all" +Requires-Dist: pyxlsb>=1.0.9; extra == "all" +Requires-Dist: qtpy>=2.2.0; extra == "all" +Requires-Dist: scipy>=1.8.1; extra == "all" +Requires-Dist: s3fs>=2022.05.0; extra == "all" +Requires-Dist: SQLAlchemy>=1.4.36; extra == "all" +Requires-Dist: tables>=3.7.0; extra == "all" +Requires-Dist: tabulate>=0.8.10; extra == "all" +Requires-Dist: xarray>=2022.03.0; extra == "all" +Requires-Dist: xlrd>=2.0.1; extra == "all" +Requires-Dist: xlsxwriter>=3.0.3; extra == "all" +Requires-Dist: zstandard>=0.17.0; extra == "all" +Provides-Extra: test +Provides-Extra: performance +Provides-Extra: computation +Provides-Extra: fss +Provides-Extra: aws +Provides-Extra: gcp +Provides-Extra: excel +Provides-Extra: parquet +Provides-Extra: feather +Provides-Extra: hdf5 +Provides-Extra: spss +Provides-Extra: postgresql +Provides-Extra: mysql +Provides-Extra: sql-other +Provides-Extra: html +Provides-Extra: xml +Provides-Extra: plot +Provides-Extra: output-formatting +Provides-Extra: clipboard +Provides-Extra: compression +Provides-Extra: consortium-standard +Provides-Extra: all +Description-Content-Type: text/markdown + +
+
+
+ +----------------- + +# pandas: powerful Python data analysis toolkit + +| | | +| --- | --- | +| Testing | [![CI - Test](https://github.com/pandas-dev/pandas/actions/workflows/unit-tests.yml/badge.svg)](https://github.com/pandas-dev/pandas/actions/workflows/unit-tests.yml) [![Coverage](https://codecov.io/github/pandas-dev/pandas/coverage.svg?branch=main)](https://codecov.io/gh/pandas-dev/pandas) | +| Package | [![PyPI Latest Release](https://img.shields.io/pypi/v/pandas.svg)](https://pypi.org/project/pandas/) [![PyPI Downloads](https://img.shields.io/pypi/dm/pandas.svg?label=PyPI%20downloads)](https://pypi.org/project/pandas/) [![Conda Latest Release](https://anaconda.org/conda-forge/pandas/badges/version.svg)](https://anaconda.org/conda-forge/pandas) [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/pandas.svg?label=Conda%20downloads)](https://anaconda.org/conda-forge/pandas) | +| Meta | [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3509134.svg)](https://doi.org/10.5281/zenodo.3509134) [![License - BSD 3-Clause](https://img.shields.io/pypi/l/pandas.svg)](https://github.com/pandas-dev/pandas/blob/main/LICENSE) [![Slack](https://img.shields.io/badge/join_Slack-information-brightgreen.svg?logo=slack)](https://pandas.pydata.org/docs/dev/development/community.html?highlight=slack#community-slack) | + + +## What is it? + +**pandas** is a Python package that provides fast, flexible, and expressive data +structures designed to make working with "relational" or "labeled" data both +easy and intuitive. It aims to be the fundamental high-level building block for +doing practical, **real world** data analysis in Python. Additionally, it has +the broader goal of becoming **the most powerful and flexible open source data +analysis / manipulation tool available in any language**. It is already well on +its way towards this goal. + +## Table of Contents + +- [Main Features](#main-features) +- [Where to get it](#where-to-get-it) +- [Dependencies](#dependencies) +- [Installation from sources](#installation-from-sources) +- [License](#license) +- [Documentation](#documentation) +- [Background](#background) +- [Getting Help](#getting-help) +- [Discussion and Development](#discussion-and-development) +- [Contributing to pandas](#contributing-to-pandas) + +## Main Features +Here are just a few of the things that pandas does well: + + - Easy handling of [**missing data**][missing-data] (represented as + `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data + - Size mutability: columns can be [**inserted and + deleted**][insertion-deletion] from DataFrame and higher dimensional + objects + - Automatic and explicit [**data alignment**][alignment]: objects can + be explicitly aligned to a set of labels, or the user can simply + ignore the labels and let `Series`, `DataFrame`, etc. automatically + align the data for you in computations + - Powerful, flexible [**group by**][groupby] functionality to perform + split-apply-combine operations on data sets, for both aggregating + and transforming data + - Make it [**easy to convert**][conversion] ragged, + differently-indexed data in other Python and NumPy data structures + into DataFrame objects + - Intelligent label-based [**slicing**][slicing], [**fancy + indexing**][fancy-indexing], and [**subsetting**][subsetting] of + large data sets + - Intuitive [**merging**][merging] and [**joining**][joining] data + sets + - Flexible [**reshaping**][reshape] and [**pivoting**][pivot-table] of + data sets + - [**Hierarchical**][mi] labeling of axes (possible to have multiple + labels per tick) + - Robust IO tools for loading data from [**flat files**][flat-files] + (CSV and delimited), [**Excel files**][excel], [**databases**][db], + and saving/loading data from the ultrafast [**HDF5 format**][hdfstore] + - [**Time series**][timeseries]-specific functionality: date range + generation and frequency conversion, moving window statistics, + date shifting and lagging + + + [missing-data]: https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html + [insertion-deletion]: https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html#column-selection-addition-deletion + [alignment]: https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html?highlight=alignment#intro-to-data-structures + [groupby]: https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#group-by-split-apply-combine + [conversion]: https://pandas.pydata.org/pandas-docs/stable/user_guide/dsintro.html#dataframe + [slicing]: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#slicing-ranges + [fancy-indexing]: https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#advanced + [subsetting]: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#boolean-indexing + [merging]: https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#database-style-dataframe-or-named-series-joining-merging + [joining]: https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#joining-on-index + [reshape]: https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html + [pivot-table]: https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html + [mi]: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#hierarchical-indexing-multiindex + [flat-files]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#csv-text-files + [excel]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#excel-files + [db]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#sql-queries + [hdfstore]: https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#hdf5-pytables + [timeseries]: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#time-series-date-functionality + +## Where to get it +The source code is currently hosted on GitHub at: +https://github.com/pandas-dev/pandas + +Binary installers for the latest released version are available at the [Python +Package Index (PyPI)](https://pypi.org/project/pandas) and on [Conda](https://docs.conda.io/en/latest/). + +```sh +# conda +conda install -c conda-forge pandas +``` + +```sh +# or PyPI +pip install pandas +``` + +The list of changes to pandas between each release can be found +[here](https://pandas.pydata.org/pandas-docs/stable/whatsnew/index.html). For full +details, see the commit logs at https://github.com/pandas-dev/pandas. + +## Dependencies +- [NumPy - Adds support for large, multi-dimensional arrays, matrices and high-level mathematical functions to operate on these arrays](https://www.numpy.org) +- [python-dateutil - Provides powerful extensions to the standard datetime module](https://dateutil.readthedocs.io/en/stable/index.html) +- [pytz - Brings the Olson tz database into Python which allows accurate and cross platform timezone calculations](https://github.com/stub42/pytz) + +See the [full installation instructions](https://pandas.pydata.org/pandas-docs/stable/install.html#dependencies) for minimum supported versions of required, recommended and optional dependencies. + +## Installation from sources +To install pandas from source you need [Cython](https://cython.org/) in addition to the normal +dependencies above. Cython can be installed from PyPI: + +```sh +pip install cython +``` + +In the `pandas` directory (same one where you found this file after +cloning the git repo), execute: + +```sh +pip install . +``` + +or for installing in [development mode](https://pip.pypa.io/en/latest/cli/pip_install/#install-editable): + + +```sh +python -m pip install -ve . --no-build-isolation --config-settings=editable-verbose=true +``` + +See the full instructions for [installing from source](https://pandas.pydata.org/docs/dev/development/contributing_environment.html). + +## License +[BSD 3](LICENSE) + +## Documentation +The official documentation is hosted on [PyData.org](https://pandas.pydata.org/pandas-docs/stable/). + +## Background +Work on ``pandas`` started at [AQR](https://www.aqr.com/) (a quantitative hedge fund) in 2008 and +has been under active development since then. + +## Getting Help + +For usage questions, the best place to go to is [StackOverflow](https://stackoverflow.com/questions/tagged/pandas). +Further, general questions and discussions can also take place on the [pydata mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata). + +## Discussion and Development +Most development discussions take place on GitHub in this repo, via the [GitHub issue tracker](https://github.com/pandas-dev/pandas/issues). + +Further, the [pandas-dev mailing list](https://mail.python.org/mailman/listinfo/pandas-dev) can also be used for specialized discussions or design issues, and a [Slack channel](https://pandas.pydata.org/docs/dev/development/community.html?highlight=slack#community-slack) is available for quick development related questions. + +There are also frequent [community meetings](https://pandas.pydata.org/docs/dev/development/community.html#community-meeting) for project maintainers open to the community as well as monthly [new contributor meetings](https://pandas.pydata.org/docs/dev/development/community.html#new-contributor-meeting) to help support new contributors. + +Additional information on the communication channels can be found on the [contributor community](https://pandas.pydata.org/docs/development/community.html) page. + +## Contributing to pandas + +[![Open Source Helpers](https://www.codetriage.com/pandas-dev/pandas/badges/users.svg)](https://www.codetriage.com/pandas-dev/pandas) + +All contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas are welcome. + +A detailed overview on how to contribute can be found in the **[contributing guide](https://pandas.pydata.org/docs/dev/development/contributing.html)**. + +If you are simply looking to start working with the pandas codebase, navigate to the [GitHub "issues" tab](https://github.com/pandas-dev/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open) and [good first issue](https://github.com/pandas-dev/pandas/issues?labels=good+first+issue&sort=updated&state=open) where you could start out. + +You can also triage issues which may include reproducing bug reports, or asking for vital information such as version numbers or reproduction instructions. If you would like to start triaging issues, one easy way to get started is to [subscribe to pandas on CodeTriage](https://www.codetriage.com/pandas-dev/pandas). + +Or maybe through using pandas you have an idea of your own or are looking for something in the documentation and thinking ‘this can be improved’...you can do something about it! + +Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Slack](https://pandas.pydata.org/docs/dev/development/community.html?highlight=slack#community-slack). + +As contributors and maintainers to this project, you are expected to abide by pandas' code of conduct. More information can be found at: [Contributor Code of Conduct](https://github.com/pandas-dev/.github/blob/master/CODE_OF_CONDUCT.md) + +
+ +[Go to Top](#table-of-contents) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/RECORD b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/RECORD new file mode 100644 index 00000000..a02ef830 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/RECORD @@ -0,0 +1,2844 @@ +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_config/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_config/config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_config/dates.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_config/display.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_config/localization.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_hypothesis.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_io.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_warnings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/asserters.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/contexts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_typing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_version.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/_version_meson.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/api/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/api/extensions/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/api/indexers/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/api/interchange/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/api/types/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/api/typing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/arrays/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_constants.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_optional.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/compressors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/function.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pickle_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pyarrow.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/executor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/mean_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/min_max_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/shared.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/sum_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/var_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/algorithms.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/apply.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/datetimelike_accumulations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_accumulations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/putmask.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/quantile.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/replace.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/take.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/transforms.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arraylike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_arrow_string_mixins.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_mixins.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_ranges.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/_arrow_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/extension_types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/boolean.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimelike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/floating.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/integer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/masked.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numpy_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/scipy_sparse.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_arrow.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/timedeltas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/align.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/check.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/engines.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/eval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expressions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/parsing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/pytables.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/scope.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/config_init.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/construction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/cast.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/generic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/inference.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/flags.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/frame.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/generic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/generic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/grouper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/numba_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/objects.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/accessors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/category.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimelike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/extension.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/frozen.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/multi.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/range.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/timedeltas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/buffer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/column.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe_protocol.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/from_dataframe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/array_manager.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/blocks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/construction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/managers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/describe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/selectn.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/to_dict.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/nanops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/array_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/dispatch.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/docstrings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/invalid.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/mask_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/resample.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/encoding.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/melt.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/merge.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/pivot.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/reshape.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/tile.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/roperator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/sample.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/series.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/shared_docs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/sorting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/sparse/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/sparse/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/object_array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/datetimes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/timedeltas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/times.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/hashing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/numba_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/doc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/ewm.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/expanding.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/numba_.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/online.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/rolling.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/errors/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboard/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboards.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odfreader.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odswriter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_openpyxl.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_pyxlsb.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlrd.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlsxwriter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/feather_format.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/_color_data.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/console.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/css.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/csvs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/excel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/format.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/html.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/info.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/printing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/style.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/style_render.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/xml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/gbq.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/html.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_json.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_normalize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_table_schema.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/orc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/parquet.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/arrow_parser_wrapper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/base_parser.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/c_parser_wrapper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/python_parser.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/readers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/pytables.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_constants.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_xport.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sasreader.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/spss.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/sql.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/stata.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/io/xml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_core.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/boxplot.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/converter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/core.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/hist.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/misc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/style.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/timeseries.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/tools.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_misc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/testing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_types.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply_relabeling.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_transform.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_invalid_arg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply_relabeling.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_transform.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_str.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_array_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_datetime64.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_object.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_timedelta64.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_comparison.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_construction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_function.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_logical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_reduction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_repr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_algos.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_analytics.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_map.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_operators.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_replace.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_repr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_sorting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_subclass.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_take.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_warnings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_cumulative.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_comparison.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_construction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_contains.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_function.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_repr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_to_numpy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_comparison.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_construction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_function.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_reduction.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_repr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arrow_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_function.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked_shared.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_numpy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_arrow_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_arithmetics.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_combine_concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_libsparse.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_unary.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string_arrow.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimelike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_ndarray_backed.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_timedeltas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_cumulative.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_conversion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_misc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_transpose.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_unique.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_value_counts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_eval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_config.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_localization.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/construction/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/construction/test_extract_array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_datetimeindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_periodindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_timedeltaindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_clip.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_core_functionalities.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_internals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_interp_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_methods.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_replace.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_setitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_can_hold_element.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_from_scalar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_object_arr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_dict_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_downcast.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_find_common_type.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_datetimelike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_maybe_box_native.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_promote.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_generic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_inference.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/accumulate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/casting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dim2.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/getitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/interface.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/io.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/methods.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/printing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reduce.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reshaping.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/setitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/test_decimal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/test_json.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/test_list.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_arrow.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_extension.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_masked.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_numpy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_sparse.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_dict.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_records.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_coercion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_delitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get_value.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_getitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_insert.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_mask.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_set_value.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_setitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_take.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_where.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_xs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_add_prefix_suffix.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_align.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asfreq.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asof.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_assign.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_at_time.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_between_time.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_clip.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine_first.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_compare.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_convert_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_copy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_count.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_cov_corr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_describe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_diff.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dot.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop_duplicates.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_droplevel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_duplicated.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_equals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_explode.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_filter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_and_last.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_valid_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_get_numeric_data.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_head_tail.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_infer_objects.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_interpolate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_is_homogeneous_dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isetitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isin.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_iterrows.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_map.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_matmul.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_nlargest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pct_change.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pipe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pop.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_quantile.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rank.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex_like.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename_axis.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reorder_levels.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_replace.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reset_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_round.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sample.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_select_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_axis.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_shift.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_size.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_values.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swapaxes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swaplevel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_csv.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict_of_blocks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_numpy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_records.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_timestamp.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_transpose.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_truncate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_convert.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_localize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_update.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_value_counts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_values.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_alter_axes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_block_internals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_cumulative.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_iteration.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_logical_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_nonunique_indexes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_npfuncs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_query_eval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_repr_info.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_stack_unstack.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_subclass.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_ufunc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_unary.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_validate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_duplicate_labels.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_finalize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_frame.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_generic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_label_or_level_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_series.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_to_xarray.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_aggregate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_cython.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_numba.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_other.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_any_all.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply_mutate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_bin_groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_counting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_filters.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_function.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_dropna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_shift_diff.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_subclass.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_grouping.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_index_as_string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_libgroupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_min_max.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nth.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_numba.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nunique.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_pipe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_quantile.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_raises.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_rank.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_sample.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_size.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_skew.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_timegrouper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_value_counts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_numba.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_transform.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_reshape.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_where.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_append.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_category.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_equals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_map.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_reindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_drop_duplicates.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_equals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_is_monotonic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_nat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_sort_values.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_value_counts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_factorize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_isocalendar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_repeat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_shift.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_snap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_frame.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_series.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_asof.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_date_range.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetimelike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_delete.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_freq_attr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_map.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_misc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_npfuncs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_partial_slicing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_reindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_timezones.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_unique.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_equals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_range.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_tree.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_analytics.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_conversion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_copy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_drop.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_duplicates.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_equivalence.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_level_values.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_set.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_integrity.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_isin.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_lexsort.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_monotonic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_names.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_partial_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reshape.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_sorting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_take.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_asfreq.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_factorize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_insert.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_is_full.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_repeat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_shift.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_to_timestamp.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_freq_attr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_monotonic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_partial_slicing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period_range.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_resolution.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_scalar_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_searchsorted.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_tools.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_range.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_any_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_datetimelike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_engines.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_frozen.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_index_new.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_numpy_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_old_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_subclass.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_factorize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_delete.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_freq_attr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_scalar_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_setops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval_new.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_chaining_and_caching.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_getitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_iloc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_loc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_multiindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_partial.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_setitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_slice.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_sorted.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_at.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_chaining_and_caching.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_check_indexer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_coercion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_floats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iloc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_loc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_na_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_partial.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_scalar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_impl.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_spec_conformance.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_utils.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_internals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_managers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odf.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odswriter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_openpyxl.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_readers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_style.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_writers.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlrd.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlsxwriter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_bar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_exceptions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_format.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_highlight.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_html.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_matplotlib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_non_unique.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_style.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_to_latex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_to_string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_tooltip.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_console.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_css.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_eng_formatting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_format.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_info.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_printing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_series_info.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_to_csv.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_to_excel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_to_html.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_to_latex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_to_markdown.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/test_to_string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/generate_legacy_storage_files.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_compression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_deprecated_kwargs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_json_table_schema.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_normalize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_pandas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_readlines.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/json/test_ujson.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_chunksize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_common_basic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_data_list.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_decimal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_float.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_inf.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_ints.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_iterator.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_read_errors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/common/test_verbose.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/dtypes/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/dtypes/test_categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/dtypes/test_empty.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_c_parser_only.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_comment.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_compression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_concatenate_chunks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_converters.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_dialect.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_encoding.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_header.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_index_col.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_mangle_dupes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_multi_thread.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_na_values.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_network.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_parse_dates.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_python_parser_only.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_quoting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_read_fwf.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_skiprows.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_textreader.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_unsupported.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/test_upcast.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_strings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_append.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_compat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_complex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_errors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_file_handling.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_keys.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_put.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_pytables_missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_read.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_retain_attributes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_round_trip.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_select.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_store.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_subclass.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_time_series.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/pytables/test_timezones.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/sas/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/sas/test_byteswap.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/sas/test_sas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/sas/test_sas7bdat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/sas/test_xport.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_clipboard.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_compression.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_feather.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_fsspec.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_gcs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_html.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_orc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_parquet.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_pickle.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_s3.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_spss.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_sql.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_stata.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/test_user_agent.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/xml/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/xml/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/xml/test_to_xml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/xml/test_xml.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/xml/test_xml_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/libs/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/libs/test_hashtable.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/libs/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/libs/test_lib.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/frame/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame_color.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame_groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame_legend.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_frame_subplots.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/frame/test_hist_box_by.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_backend.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_boxplot_method.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_converter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_datetimelike.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_hist_method.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_misc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_series.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/plotting/test_style.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reductions/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reductions/test_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reductions/test_stat_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/test_base.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/test_datetime_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/test_period_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/test_resample_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/test_resampler_grouper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/test_time_grouper.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/resample/test_timedelta.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_append.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_append_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_categorical.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_concat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_dataframe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_datetimes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_empty.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_invalid.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_series.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/concat/test_sort.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/test_join.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/test_merge.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/test_merge_asof.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/test_merge_cross.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/test_merge_ordered.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/merge/test_multi.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_crosstab.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_cut.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_from_dummies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_get_dummies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_melt.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_pivot.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_pivot_multilevel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_qcut.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_union_categoricals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/reshape/test_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/interval/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_interval.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/interval/test_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/period/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/period/test_asfreq.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/period/test_period.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/test_na_scalar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/test_nat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/test_timedelta.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_comparisons.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_formats.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_rendering.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_timestamp.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_timezones.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/scalar/timestamp/test_unary_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/accessors/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/accessors/test_cat_accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/accessors/test_dt_accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/accessors/test_sparse_accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/accessors/test_str_accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_delitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_get.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_getitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_indexing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_mask.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_set_value.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_setitem.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_take.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_where.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/indexing/test_xs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_add_prefix_suffix.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_align.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_argsort.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_asof.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_astype.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_autocorr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_between.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_clip.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_combine.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_combine_first.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_compare.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_convert_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_copy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_count.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_cov_corr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_describe.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_diff.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_drop.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_drop_duplicates.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_dropna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_duplicated.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_equals.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_explode.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_fillna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_get_numeric_data.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_head_tail.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_infer_objects.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_interpolate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_is_monotonic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_is_unique.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_isin.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_isna.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_item.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_map.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_matmul.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_nlargest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_nunique.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_pct_change.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_pop.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_quantile.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_rank.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_reindex.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_reindex_like.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_rename.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_rename_axis.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_repeat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_replace.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_reset_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_round.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_searchsorted.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_set_name.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_size.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_sort_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_sort_values.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_to_csv.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_to_dict.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_to_frame.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_to_numpy.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_tolist.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_truncate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_tz_localize.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_unique.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_unstack.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_update.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_value_counts.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_values.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/methods/test_view.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_arithmetic.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_constructors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_cumulative.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_iteration.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_logical_ops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_missing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_npfuncs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_reductions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_repr.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_subclass.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_ufunc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_unary.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/series/test_validate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_case_justify.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_cat.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_extract.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_find_replace.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_get_dummies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_split_partition.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_string_array.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/strings/test_strings.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_aggregation.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_algos.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_downstream.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_errors.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_expressions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_flags.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_multilevel.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_nanops.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_optional_dependency.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_register_accessor.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_sorting.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/test_take.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tools/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tools/test_to_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tools/test_to_numeric.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tools/test_to_time.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tools/test_to_timedelta.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/frequencies/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/frequencies/test_freq_code.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/frequencies/test_frequencies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/frequencies/test_inference.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/holiday/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/holiday/test_calendar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/holiday/test_federal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/holiday/test_holiday.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/holiday/test_observance.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_business_day.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_business_hour.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_business_month.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_business_quarter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_business_year.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_common.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_custom_business_day.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_custom_business_hour.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_custom_business_month.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_dst.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_easter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_fiscal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_index.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_month.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_offsets.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_offsets_properties.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_quarter.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_ticks.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_week.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_year.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_array_to_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_ccalendar.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_conversion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_fields.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_libfrequencies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_liboffsets.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_np_datetime.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_parse_iso8601.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_parsing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_period_asfreq.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_resolution.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_timedeltas.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_timezones.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_to_offset.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/tslibs/test_tzconversion.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_almost_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_attr_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_categorical_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_extension_array_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_frame_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_index_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_interval_array_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_numpy_array_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_produces_warning.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_assert_series_equal.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_deprecate.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_deprecate_kwarg.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_deprecate_nonkeyword_arguments.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_doc.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_hashing.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_make_objects.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_numba.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_rewrite_warning.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_safe_import.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_shares_memory.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_show_versions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_util.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_validate_args.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_validate_args_and_kwargs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_validate_inclusive.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/util/test_validate_kwargs.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/moments/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/moments/conftest.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/moments/test_moments_consistency_ewm.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/moments/test_moments_consistency_expanding.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/moments/test_moments_consistency_rolling.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_apply.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_base_indexer.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_cython_aggregations.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_dtypes.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_ewm.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_expanding.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_groupby.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_numba.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_online.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_pairwise.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_rolling.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_rolling_functions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_rolling_quantile.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_rolling_skew_kurt.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_timeseries_window.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tests/window/test_win_type.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tseries/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tseries/api.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tseries/frequencies.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tseries/holiday.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/tseries/offsets.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/__init__.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/_decorators.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/_doctools.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/_exceptions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/_print_versions.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/_test_decorators.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/_tester.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/_validators.cpython-39.pyc,, +../../../../../../Library/Caches/com.apple.python/Users/billchen/Desktop/dbdpy/dbdpy-env/lib/python3.9/site-packages/pandas/util/version/__init__.cpython-39.pyc,, +pandas-2.1.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pandas-2.1.4.dist-info/LICENSE,sha256=Uz620LmOW-Pd0S3Ol7413REoL1xHzfjQjIF1b9XXCiY,1634 +pandas-2.1.4.dist-info/METADATA,sha256=QJL6X_86sek_WyREoKglLQYA2pNX-NoNSdRbPjGRaQg,18837 +pandas-2.1.4.dist-info/RECORD,, +pandas-2.1.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas-2.1.4.dist-info/WHEEL,sha256=rFEbl7VGpdcTiWMLyPHKRieKvxwiToMCxVXFuj50F_A,91 +pandas-2.1.4.dist-info/entry_points.txt,sha256=OVLKNEPs-Q7IWypWBL6fxv56_zt4sRnEI7zawo6y_0w,69 +pandas/__init__.py,sha256=_EAHN_0ZzKV3Mui7dPywJIcgufLCw3s337k2XtO7T18,8201 +pandas/_config/__init__.py,sha256=p0blNtwff5qc4pxqlQ34kizoqP0NLXstSoKxUDdxFpQ,1179 +pandas/_config/config.py,sha256=V6UiP3P0ksCD9auJ2boizOo-FW1HQc7ekJQH2wRtxzc,25435 +pandas/_config/dates.py,sha256=HgZFPT02hugJO7uhSTjwebcKOd34JkcYY2gSPtOydmg,668 +pandas/_config/display.py,sha256=xv_TetWUhFlVpog23QzyhMYsScops_OOsWIAGnmKdJ8,1804 +pandas/_config/localization.py,sha256=79Q2KU1aHxX6Q8Wn8EGOEUAyv3XIjQ4YaTaEzeFbtwM,5190 +pandas/_libs/__init__.py,sha256=AVt9-0_xycboh1kBrMMJIgWrnwfJCSSgQjeh0TOp5eQ,691 +pandas/_libs/algos.cpython-39-darwin.so,sha256=m_GMSYhbKFgc2C-yQAQKO5fXjdu-mFIVdRegn2vdzMs,1837767 +pandas/_libs/algos.pyi,sha256=KEF48zZLn3TSUCmd8thdo4DzYvJ5zaCK60hYX6nzyZI,15182 +pandas/_libs/arrays.cpython-39-darwin.so,sha256=wMzA3fwGnkcLYuwFYZJxiXYk5h-OM_c5zHP2cdXcM2c,114472 +pandas/_libs/arrays.pyi,sha256=d_VNFX4IMTUIBEAvJwnFlPNT4tQqrBGMNUUaoahblRk,1094 +pandas/_libs/byteswap.cpython-39-darwin.so,sha256=ixlO9bkHcbiV-AC2wmNzG-KK-_65XidJ82JYNbhxT9s,72682 +pandas/_libs/byteswap.pyi,sha256=SxL2I1rKqe73WZgkO511PWJx20P160V4hrws1TG0JTk,423 +pandas/_libs/groupby.cpython-39-darwin.so,sha256=7L-feVmGwUUXcCxemFvXkMdIgMfSS6jfNHZEt0B-O5g,1747305 +pandas/_libs/groupby.pyi,sha256=n1VH08XOkA9ZmFX04u8VfgDguvgITAmZIopALGh6qiY,6854 +pandas/_libs/hashing.cpython-39-darwin.so,sha256=tye2VQmY0Y4GwpAlUM83RdKnoRjsWgRMVTtgOsKK4fI,171401 +pandas/_libs/hashing.pyi,sha256=cdNwppEilaMnVN77ABt3TBadjUawMtMFgSQb1PCqwQk,181 +pandas/_libs/hashtable.cpython-39-darwin.so,sha256=tggNs8xB-wY74iGD5dUEVGghhO338Y2XiQeCJf68erA,1661851 +pandas/_libs/hashtable.pyi,sha256=jhAlqw1hAJuaDH4AALRYOgOkPqrpnDw3T5PPVKT2eEI,7394 +pandas/_libs/index.cpython-39-darwin.so,sha256=yPknYoH45dve5sFD4fSF2qW8LFnci__uWFWXQKQ1cRI,798199 +pandas/_libs/index.pyi,sha256=ZJr_i63Zt3zeoZ0_u3VEW5Mmbo7c4eoUvQ6ySC_cJ9g,3943 +pandas/_libs/indexing.cpython-39-darwin.so,sha256=lmTcnD5OUcSnS6wn70SpbCkZbWtSx5I0ADNOze-P-m0,76586 +pandas/_libs/indexing.pyi,sha256=hlJwakbofPRdt1Lm31bfQ3CvHW-nMxm0nrInSWAey58,427 +pandas/_libs/internals.cpython-39-darwin.so,sha256=LJqprvScPKfD-FhE93tuE_VS-phabWGhAw6piDtNOJw,322555 +pandas/_libs/internals.pyi,sha256=NzQbcE8LFv0eyD2U9ap1nwYNBUYc_olPiVEMZjOKsQs,3093 +pandas/_libs/interval.cpython-39-darwin.so,sha256=iyl_fDFVDkkfGKEQIetRqK1AaYyZa-QipMo0OjDaMSM,1165002 +pandas/_libs/interval.pyi,sha256=cotxOfoqp7DX7XgIeKrGd31mfAeNerW1WD-yBrLfTlE,5378 +pandas/_libs/join.cpython-39-darwin.so,sha256=1GJJ63boa5AAn1E2N1WUwq1gKjHxsHxn1daPIkcyI90,2103862 +pandas/_libs/join.pyi,sha256=aGqsWVmoYLZK8FahsBxeksvAqIlnLtdo7dYy2vcfstw,2740 +pandas/_libs/json.cpython-39-darwin.so,sha256=RR3nXxTmvfSW_aFsgxCQBsotLu3OLnz8OLm04-S10HY,93958 +pandas/_libs/json.pyi,sha256=kbqlmh7HTk4cc2hIDWdXZSFqOfh0cqGpBwcys3m32XM,496 +pandas/_libs/lib.cpython-39-darwin.so,sha256=rLTvtJp7rifxol3wPklzHFKz-JuvRFrKr2GBtlB1itk,641493 +pandas/_libs/lib.pyi,sha256=TtboY8Po_ICQ1ptl7idDOyZN8NErBaZjY8y2dP_SisQ,7047 +pandas/_libs/missing.cpython-39-darwin.so,sha256=OQtS_ivm9omV6H8nNWe57wyUCdmfwDtyjTXq5Edr-sM,206825 +pandas/_libs/missing.pyi,sha256=ZKaXutHk52FAQ0NdOR0YBHZueQD6CcOZ4ZiFCOwVhTo,591 +pandas/_libs/ops.cpython-39-darwin.so,sha256=lx3E33sUpOqkLSSxyeVMDk1k_HUKbbWjcw2LWYNZPBA,222885 +pandas/_libs/ops.pyi,sha256=jGotp0htINzMDK1th_WaSWCV5w4J5qzefm0-B1UMk-4,1302 +pandas/_libs/ops_dispatch.cpython-39-darwin.so,sha256=9K0U9atpX1ZW6MqqUNW_tLHy_iRAhIydi1i7WjI60_Y,78078 +pandas/_libs/ops_dispatch.pyi,sha256=Yxq3SUJ-qoMZ8ErL7wfHfCsTTcETOuu0FuoCOyhmGl0,124 +pandas/_libs/pandas_datetime.cpython-39-darwin.so,sha256=LlF-TG7CvAU_eDW385DIru-Ngtnab9YEmVlKTYcxhqY,69537 +pandas/_libs/pandas_parser.cpython-39-darwin.so,sha256=UyZEs15iyPDAIYL2GrgulENAatPiO9EltTXvK_XdrAo,70271 +pandas/_libs/parsers.cpython-39-darwin.so,sha256=AFqQza9HNtRAOC2HWrVN5XfyHCz8pOEagAvw-MHScfk,460265 +pandas/_libs/parsers.pyi,sha256=raoGhPLoRKLQAthm9JQT5rTjLR1PGFDS179aqtQdgnY,2378 +pandas/_libs/properties.cpython-39-darwin.so,sha256=zkr1SsP7wYTClLuUtODQ58uWgo6Xq_O0Ux7C-xU3bqc,95388 +pandas/_libs/properties.pyi,sha256=HF93vy5OSNtQKz5NL_zwTnOj6tzBtW9Cog-5Zk2bnAA,717 +pandas/_libs/reshape.cpython-39-darwin.so,sha256=YcfAjK5848sx6gowlxwQXIu2p9YYBTcRhu4kZSPu2Ak,224825 +pandas/_libs/reshape.pyi,sha256=xaU-NNnRhXVT9AVrksVXrbKfAC7Ny9p-Vwp6srRoGns,419 +pandas/_libs/sas.cpython-39-darwin.so,sha256=zSm5aWiMAN2STVFrgoahPLGYnPvVe8-1kwwLswwrVK8,207477 +pandas/_libs/sas.pyi,sha256=qkrJiuBd7GQbw3DQyhH9M6cMfNSkovArOXRdhJ8PFDA,224 +pandas/_libs/sparse.cpython-39-darwin.so,sha256=O28_Qc8GToCv3BDzrnMxZn1ifCJ0grjtu8TBs4NfwD0,712568 +pandas/_libs/sparse.pyi,sha256=3SPDg0lQNgEwGEKFoVCFfQ4yJOmQDD5JY_kvlMs0Lp4,1331 +pandas/_libs/testing.cpython-39-darwin.so,sha256=3lzKk26m_Fvbme0CSRe_1ExZXuOcjpr2CpPJxek7tNs,112665 +pandas/_libs/testing.pyi,sha256=_fpEWiBmlWGR_3QUj1RU42WCTtW2Ug-EXHpM-kP6vB0,243 +pandas/_libs/tslib.cpython-39-darwin.so,sha256=09A2Z55qe2A98CED_f375GB5ztvuxrB60i88EhhePrE,242759 +pandas/_libs/tslib.pyi,sha256=lNhmfEyVtCnbEUA7oQ8aTmpEJsC_mTHMIG2DPhBUAgA,885 +pandas/_libs/tslibs/__init__.py,sha256=B4SMpujmg-X9vfsO_--3mSfC70MLe5gTV8BPhdL-p5Q,2034 +pandas/_libs/tslibs/base.cpython-39-darwin.so,sha256=v96l1xl428GM7D4OzS5z1mGMrY1JjBy93_F2xEPRTO0,75366 +pandas/_libs/tslibs/ccalendar.cpython-39-darwin.so,sha256=F96oFwBchNfzVvm6Vts3oXtYkfif3mtzJoEOrN72E0A,76875 +pandas/_libs/tslibs/ccalendar.pyi,sha256=dizWWmYtxWa5Lc4Hv69iRaJoazRhegJaDGWYgWtJu-U,502 +pandas/_libs/tslibs/conversion.cpython-39-darwin.so,sha256=1Mqud6ZhJpK1kFA-_9LVATZkomnyxQ2Xl6A1_Az5WoY,226492 +pandas/_libs/tslibs/conversion.pyi,sha256=pazB4ETu95szlLCmNiHbk8HBa9-t9E97-f88GV7F1ck,275 +pandas/_libs/tslibs/dtypes.cpython-39-darwin.so,sha256=vnAu9ZSAeINFXKIkRZhns7QZ9ULSjuV7ZQlGhpCWvb0,157448 +pandas/_libs/tslibs/dtypes.pyi,sha256=FbCXVsc2k3dSTiuCiKH6K-d1TTM9a5coJ1f0bMvP3sw,2174 +pandas/_libs/tslibs/fields.cpython-39-darwin.so,sha256=l3irftDtx9h9T1VYI0xJoEIISnTbrlYy4rody8EZHaw,279832 +pandas/_libs/tslibs/fields.pyi,sha256=LOke0XZ9XJnzX2MC9nL3u-JpbmddBfpy0UQ_d-_NvN8,1860 +pandas/_libs/tslibs/nattype.cpython-39-darwin.so,sha256=gMFCjhxwas03Ath7gwM6feloasvdGu_WE6P0TMLvHYY,245897 +pandas/_libs/tslibs/nattype.pyi,sha256=BRZ78AHnzWcDgkC8ZSOy4Mf2bWulAOZoqbLbLrdve30,3763 +pandas/_libs/tslibs/np_datetime.cpython-39-darwin.so,sha256=45GF373v-0pEXSCh45fkYWffDU2JMQirk1wWXb-tYBQ,132189 +pandas/_libs/tslibs/np_datetime.pyi,sha256=8zVSkqjWd6YEU2dAtvy2GK14bcIYD3duwiZMkeTmymo,596 +pandas/_libs/tslibs/offsets.cpython-39-darwin.so,sha256=cRx40X94ufZYOhz2zb2T1f9rpUiXR9_pB_00fKFBq28,914905 +pandas/_libs/tslibs/offsets.pyi,sha256=yWPr8GI508RM7Aoi1xKCXplqh9i1w67vY6IR5FBX7Z4,8208 +pandas/_libs/tslibs/parsing.cpython-39-darwin.so,sha256=TMrCgo6nQoh3EekUHn9UNrbOCg7ICmppRTm5cFSg1_A,373353 +pandas/_libs/tslibs/parsing.pyi,sha256=K-u0fVDxuZnin-d3Ku2hGDuB9VDoH0wVbi3ejaSkTwk,1120 +pandas/_libs/tslibs/period.cpython-39-darwin.so,sha256=g9xRHYojt3ntC7pJ2ePm8zMxvQjuNjioOHmCZJanCNY,410616 +pandas/_libs/tslibs/period.pyi,sha256=_prQRtfaiUX5tiGtWnDAjsy5KYI-Gm5esl7Kme12KbQ,3884 +pandas/_libs/tslibs/strptime.cpython-39-darwin.so,sha256=ra2o8DqXOdQERWjlXOEDQMJ_Nsb2JFpm75By3zMjQ-E,297082 +pandas/_libs/tslibs/strptime.pyi,sha256=TsIU5Eof9ntF4vhDtU9iS7CBkKydMpL3CgQUlaDXmt8,307 +pandas/_libs/tslibs/timedeltas.cpython-39-darwin.so,sha256=56LEsu65LBQJNvXsYMJMP_gkKakn2YmJrLFjOTWjk6o,532908 +pandas/_libs/tslibs/timedeltas.pyi,sha256=YfRRhn1E1lsaX8AeTWRYznFByacV9Nlm5mNtxzX75Hw,4803 +pandas/_libs/tslibs/timestamps.cpython-39-darwin.so,sha256=MbPgTAfbv4G7ofmjS6THdBFVnaSf1SFdX7u2jkZJ7mM,573244 +pandas/_libs/tslibs/timestamps.pyi,sha256=-z8KnFYkV2IsALIw8NpqKCqPgUNbu4Bmj5IYM6Ak7X4,7811 +pandas/_libs/tslibs/timezones.cpython-39-darwin.so,sha256=5DuZmdCyGYTGjxCs14F1ZpCOulUht01lI2Hj74c6d70,225627 +pandas/_libs/tslibs/timezones.pyi,sha256=MZ9kC5E1J3XlVqyBwFuVd7NsqL8STztzT8W8NK-_2r0,600 +pandas/_libs/tslibs/tzconversion.cpython-39-darwin.so,sha256=IgbTGq6SO0VcNNDauEGvlDW-nqTkwfGaCOIu4Zkdj7k,258654 +pandas/_libs/tslibs/tzconversion.pyi,sha256=ZOVW__gQIn1GhjRRieU5d_QpLgSjp-JNJorML8E4858,556 +pandas/_libs/tslibs/vectorized.cpython-39-darwin.so,sha256=wb7sFGGem-ylIppjQtZ07KHkblVjFTiyebzU8RVTQpk,188924 +pandas/_libs/tslibs/vectorized.pyi,sha256=hAgI4zsG0GopoYFqMnzf4dyV5ULJCspLr-PYIPGXm6E,1236 +pandas/_libs/window/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/_libs/window/aggregations.cpython-39-darwin.so,sha256=FOVR8SSRbINVqG6IVPOI_yQePZJ8QFMAmtptT9d4pfo,319358 +pandas/_libs/window/aggregations.pyi,sha256=VAaBpwBcpLwKVjydF0Q3VJF0Cl3HEteYs6Ym4EGYnNo,4042 +pandas/_libs/window/indexers.cpython-39-darwin.so,sha256=wGLXRJXAHChcmlgOoJhQym0rIiXp82LG1uY2FzqJsdM,170266 +pandas/_libs/window/indexers.pyi,sha256=53aBxew7jBcAc9sbSoOlvpQHhiLDSWPXFcVbCeJDbQA,319 +pandas/_libs/writers.cpython-39-darwin.so,sha256=KEpHqlDsdf1O2FUOvbfUR0smcBnWS7z2vrHfCpOoZBc,191849 +pandas/_libs/writers.pyi,sha256=RvwFCzrsU4RkKm7Mc3wo12RqdGdo-PuANkMo3Z9hLiU,516 +pandas/_testing/__init__.py,sha256=MT5-sPbnDOwOZpdZ66IzfrPNdnqxwNdS4h_jLtcbiwY,33557 +pandas/_testing/_hypothesis.py,sha256=jkn3plK-9OeSH8m9l-sJnUwRh0ax24qpdV6YUV_6a1s,2310 +pandas/_testing/_io.py,sha256=vZ8UwLF_92E1ACgL8qAwLBy4OlNq10q5ZiyepSrRP9g,4440 +pandas/_testing/_warnings.py,sha256=Nx40foDSLW0TCrpwowm62-5rR-GalAp32jaKwZJpXlU,8046 +pandas/_testing/asserters.py,sha256=2Q3_8oA78KaU2-6sM_HVON-QJGO7TpZ7MKVns2OEZes,44439 +pandas/_testing/compat.py,sha256=0o_biVI-wLh7kcw9FHvbwYyzNvM0PI06QRD2ZhiD2Fs,658 +pandas/_testing/contexts.py,sha256=Nyo8ilLJF0tsz0EPzhGMeQ3qVyckKECYj_OGBTgmu64,5358 +pandas/_typing.py,sha256=unm40VEBgn-yrH_CRsdQCxCzwFVTm9CmIXEPuZXnIe4,13110 +pandas/_version.py,sha256=DtyYo75o-X-GbjJI1IpQ-d52T-4HtKbuE9tIN7XpfPs,23605 +pandas/_version_meson.py,sha256=VjwyiViz42s-qEqIsbvRF_2cZCK65oRqV2psrFaPEWw,79 +pandas/api/__init__.py,sha256=QnoYVW828TM17uq-3ELeethZm8XN2Y0DkEaTc3sLr3Q,219 +pandas/api/extensions/__init__.py,sha256=O7tmzpvIT0uv9H5K-yMTKcwZpml9cEaB5CLVMiUkRCk,685 +pandas/api/indexers/__init__.py,sha256=kNbZv9nja9iLVmGZU2D6w2dqB2ndsbqTfcsZsGz_Yo0,357 +pandas/api/interchange/__init__.py,sha256=J2hQIYAvL7gyh8hG9r3XYPX69lK7nJS3IIHZl4FESjw,230 +pandas/api/types/__init__.py,sha256=bOU3TUuskT12Dpp-SsCYtCWdHvBDp3MWf3Etq4ZMdT8,447 +pandas/api/typing/__init__.py,sha256=IC4_ZmjsX4804Nnu-lQDccQr0zt5mzIZEaB3Bzdva8Y,1244 +pandas/arrays/__init__.py,sha256=_Riw-dORJXdnCzKM_DkvRq2mj07j7G4CitP24JIsBxc,1198 +pandas/compat/__init__.py,sha256=EkijD4fdJsE1Jb5koACvmi13tyFelks3dH0ZUb34QQ4,4274 +pandas/compat/_constants.py,sha256=3_ryOkmiJTO-iTQAla_ApEJfp3V_lClbnepSM3Gi9S4,536 +pandas/compat/_optional.py,sha256=ghzHDHSboh_yYHZJ1hXX1w5ANqcbRoMGr622eP3EIu0,4876 +pandas/compat/compressors.py,sha256=GdDWdKzWqkImjdwzuVBwW2JvI7aMzpPV8QyhxWgJo0g,1975 +pandas/compat/numpy/__init__.py,sha256=gY8rgiIir2ouZsnMbFoPJKMMcc_LtvqvQqERLXctIEc,1328 +pandas/compat/numpy/function.py,sha256=m9alc4uhDait-u01mV3pySlyP03QBbNiU-nYFmDa82g,13218 +pandas/compat/pickle_compat.py,sha256=h-PbSMZS6mcgkG592F-8p3SshLpi8RLuiQkF4qecJvw,7715 +pandas/compat/pyarrow.py,sha256=PGHEjgXyLxjnSmCKNOB5u_a7h8fBnih4-DlwWazw-wc,1082 +pandas/conftest.py,sha256=KG9qTu7b0RS7DuAaLEkStq70MObwoTTKFC5z26MmRZ0,50219 +pandas/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/_numba/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/_numba/executor.py,sha256=zmS22q-_980LrSvnzcYeOzH3iDvbu265WM4TsCy5U3k,6227 +pandas/core/_numba/kernels/__init__.py,sha256=Z1t4IUC2MO0a5KbA0LurWfRZL4wNksHVBDLprGtPLlo,520 +pandas/core/_numba/kernels/mean_.py,sha256=BesqY1gwFXPIeuXAQtDvvDBZuegsszFVTnl4lxguXEA,5646 +pandas/core/_numba/kernels/min_max_.py,sha256=tJ7OSKhne7jXpy4XSBpQS0tkP_0LggkH6iqWlxQ-FeE,3284 +pandas/core/_numba/kernels/shared.py,sha256=JUBa96LX4NmXhgXNyo859IwMXEl29EyhmRdMoQo1n78,611 +pandas/core/_numba/kernels/sum_.py,sha256=FeKOQl22qO6kN4hAmwmA3wXihrph5S03ucSt65GBquU,6488 +pandas/core/_numba/kernels/var_.py,sha256=J-yuf8ZCNSgeTauRtE6s5UnFH99GIBMsn5IG365L9V8,7061 +pandas/core/accessor.py,sha256=In9OfyQqRNgCtdwe-fN7LRzjgmtmAWHw2MORNzK0ESA,10008 +pandas/core/algorithms.py,sha256=4ZcwmutYMjxVA6JPoqIgMeTVnDoO52n-8FG01kEPQqc,57656 +pandas/core/api.py,sha256=9tm275sTpOKtdUvsFCXYQHmBdeJczGNBV1QGv3TQOOc,2911 +pandas/core/apply.py,sha256=jjY_ohxvMLr25y3M2uXwcYwJYrKtb6mzsCA8V_-p0JI,58084 +pandas/core/array_algos/__init__.py,sha256=8YLlO6TysEPxltfbNKdG9MlVXeDLfTIGNo2nUR-Zwl0,408 +pandas/core/array_algos/datetimelike_accumulations.py,sha256=BCy87HXqI2WO0_cCGK-redvi2STJzCxswYYs06YdxB4,1686 +pandas/core/array_algos/masked_accumulations.py,sha256=PL-ZAMai7H1PIXLKE2f9LSL2Ow6WZqkusSQkFfIE8d4,2618 +pandas/core/array_algos/masked_reductions.py,sha256=iUFmp_Fu3-BXM0EBiFfiPERteITlIFFI7IEpHXVkvoY,4855 +pandas/core/array_algos/putmask.py,sha256=g02wtMt5MTIuT4IS6ukE1Eh8KWb3Hi932hc47dszqJ4,4593 +pandas/core/array_algos/quantile.py,sha256=lIz08CzXmWHCbIWF7Q4bcMMfpWd5PqNQEUrAqXkvens,6578 +pandas/core/array_algos/replace.py,sha256=p8CdDslj7WwVNYjpLsT_36e8dmrxfeWzh5ECHe4uxCQ,3918 +pandas/core/array_algos/take.py,sha256=4BE_e072WcmpZHkEgyDNQVVJtMuZT8dvxYiRJ8j4U24,20894 +pandas/core/array_algos/transforms.py,sha256=TPpSPX5CiePVGTFUwnimpcC5YeBOtjAPK20wQvG92QI,1104 +pandas/core/arraylike.py,sha256=12znroVBPcTHQ_29gT_aapUpTVpNMJJFLNaQ_9HRBP0,17600 +pandas/core/arrays/__init__.py,sha256=dE6WRTblcq40JKhXJQDsOwvhFPJstj_8cegiLthH0ks,1314 +pandas/core/arrays/_arrow_string_mixins.py,sha256=QZ0ZV2XvVk5Jl-aQl88zaZDCb_1HIEXyv07t4a3n5aY,2606 +pandas/core/arrays/_mixins.py,sha256=voRGy9aryYHe0U_9HhTWQq-vELu7tqr7ZItRlrWxIAo,16599 +pandas/core/arrays/_ranges.py,sha256=T-LOLzSu2CmblQKR0lbo0n9XjdJbk4ty4IqaHfWtJEM,7149 +pandas/core/arrays/arrow/__init__.py,sha256=LjphesrvQINfg4UVIN2xGCBW4Gi9sOFEEQ8bAVNiaHE,98 +pandas/core/arrays/arrow/_arrow_utils.py,sha256=KjsV7ts963RSyNEGLGQliypzHJ_hs3mTslWPMXZpGpE,2151 +pandas/core/arrays/arrow/array.py,sha256=X6zYZq9Mp9fzLQ6PYekEAXsAh91K7StuKPvnpI_juFo,91627 +pandas/core/arrays/arrow/extension_types.py,sha256=4npcEV9P5fT0HaYLq-1iGXF_KNu__BvCBWEw5d-Dngo,5427 +pandas/core/arrays/base.py,sha256=YAtzFoLb63STqPkGdzZSQUFoNZmhBY3Db4r8KeHiDb4,80690 +pandas/core/arrays/boolean.py,sha256=Nb9IpnzdQSO_IxZzlDmqSJyACg6RI4rZwO_rAGh4-pI,12411 +pandas/core/arrays/categorical.py,sha256=13ZRLrQuhWG12bWloYf-6mby7a7JQsVN89sQkB7AqNE,97427 +pandas/core/arrays/datetimelike.py,sha256=KvEmq5XHvWl3Dw1aImVfC2mtlAXlxwBpGSBJGr5w3AE,84424 +pandas/core/arrays/datetimes.py,sha256=hgpzk-9qXruUivj-Wxh7qMz4TL-xPGZo8lNjJAPmzm4,91480 +pandas/core/arrays/floating.py,sha256=afSIlaWGdIYiYXpIc8l1FE1cz1vQgYNG_-8a9uYdgpo,4256 +pandas/core/arrays/integer.py,sha256=TDD_6MzardGVmVv_l7VFqHGNG-H-PIoB6dMKCj2OGVE,6321 +pandas/core/arrays/interval.py,sha256=rjrXYiqIMKcpJt79qMA6LgCttiks0wXKBL0eJnVEEkI,63419 +pandas/core/arrays/masked.py,sha256=m3tCsj-WwdfSp9vVJpkOnrjcZEts6u4J29YBuusABvo,51739 +pandas/core/arrays/numeric.py,sha256=z5GUIVINpAr-UbpGI_7o6prLJ2oJokfKrDEhdXvwDto,8985 +pandas/core/arrays/numpy_.py,sha256=3ULDIeEE0FtsybaDtn6rfSPKqKMQ5c-vRfTY8Wyj_00,17528 +pandas/core/arrays/period.py,sha256=Mz7aM82994NFTIKZcDR_q7Q5JeyGBCcWBq82t4BHQMo,39603 +pandas/core/arrays/sparse/__init__.py,sha256=iwvVqa2GG9TjYrd1rxCBjdLeGQBoRqUO2fZnILElbZg,356 +pandas/core/arrays/sparse/accessor.py,sha256=TdVXoYTWNodsGiWRmKlPRVdkrOyTlyPI_Z5G4gCxg-g,12522 +pandas/core/arrays/sparse/array.py,sha256=FkZqkz9Q_aIXAfezwntug_Ot8U0D0qSv1lWC26YXYYc,63097 +pandas/core/arrays/sparse/scipy_sparse.py,sha256=rVaj3PtVRrMPlzkoVFSkIopWV0xg0GJnpt1YljWT_zg,6462 +pandas/core/arrays/string_.py,sha256=GRWI-Lny3YKlcCARiFxyTB_oNM2LBOg8dQECgUtend4,21209 +pandas/core/arrays/string_arrow.py,sha256=yLEfHtt_7neHLMPOwDApksO2GzVvu72YIZLJmgIfq9Q,21428 +pandas/core/arrays/timedeltas.py,sha256=b1oX04ZTiXqbFGXvoTK-ghBm0MQKRbN0JStO_1tCxW0,38821 +pandas/core/base.py,sha256=-VXQ6If1hl9i-p2A2bywojoCOjGEoLU60Rn9d0xkv2g,40915 +pandas/core/common.py,sha256=oZ4w-eA9O_ILXUwgzHFI_KRTQSwbnfUuqxa6yBxtw7I,17203 +pandas/core/computation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/computation/align.py,sha256=FlztKI7Mp-37LYacMYY7rZfpozyur1RDFLe4mFmWdfI,6173 +pandas/core/computation/api.py,sha256=CQ2AF0hwydcgTHycMCFiyZIAU57RcZT-TVid17SIsV4,65 +pandas/core/computation/check.py,sha256=nWdO0qx_42z-XPTabg1jYA_4gKgMdHO37dDKGGw0yJ4,337 +pandas/core/computation/common.py,sha256=-2EHScxo2jfEQ1oqnnlQ_2eOvtAIn8O2krBaveSwmjs,1442 +pandas/core/computation/engines.py,sha256=g9eiyVCUtNmJGbexh7KvTreAKKhs5mQaWx4Z5UeOZ5s,3314 +pandas/core/computation/eval.py,sha256=uxynIA9MP2LZChwFq9xUwrME8D9nZyWYr121NJvr9-M,14253 +pandas/core/computation/expr.py,sha256=0_JEOTFG4UJy1MZDYKBw04RsYfJamG_cGdN9I0-knhw,25073 +pandas/core/computation/expressions.py,sha256=K0vu_v8JBVjJn6eQqNocC4ciNKsIYnEZrq8xwvhik2M,7503 +pandas/core/computation/ops.py,sha256=gIg_XjbK9mnSj42KEhGynXIKI4LOgnS1f5s6tCkpd5U,16160 +pandas/core/computation/parsing.py,sha256=VhYh3en2onhyJkzTelz32-U4Vc3XadyjTwOVctsqlEI,6399 +pandas/core/computation/pytables.py,sha256=E2GyJKhFMiTww852Ra8hUY5UAlT3nxlaZ9kyc1Ju6ww,20062 +pandas/core/computation/scope.py,sha256=eyMdfx-gcgJaVIRY2NBgQDt2nW5KSdUZ3M9VRPYUJtU,10203 +pandas/core/config_init.py,sha256=6nuKvoPFeWyPdYcJkmnCbK0ektzsyBcJDV9-nlzafpQ,25633 +pandas/core/construction.py,sha256=ynf_dqaM9BhbyMQ1Y-cUxodwbY-b7WJxLhvQ9pZml0A,26073 +pandas/core/dtypes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/dtypes/api.py,sha256=5mtML1OspdDbsWShw1fsDq93pg2pmuUGSBrvQWQcCgg,1819 +pandas/core/dtypes/astype.py,sha256=sDFk-AIxI1FnnrcYwlaBAzvWSJnJZJB33g2BMA6BtXU,9244 +pandas/core/dtypes/base.py,sha256=Ab7kSjfpUtCVHWxbV0eP8crkHk5bYYYpWvwA53A40WY,15808 +pandas/core/dtypes/cast.py,sha256=fHhaaJ9BT8KdW5bBzJenCCKGFslH5DVrCsaeRfikqgM,60141 +pandas/core/dtypes/common.py,sha256=0nwicWxTvXWoLw2iWT-3LUKlrbqb2TGA3b3bmXf4eWs,47049 +pandas/core/dtypes/concat.py,sha256=Lnul3nFxIVcD3yNPeCzFU8L5pGmf2naPdsylCHU1iwo,12197 +pandas/core/dtypes/dtypes.py,sha256=R0EARuWMoo42dLDZ8LgxEXfN68VsMFPJKeWkkXSqWA4,74415 +pandas/core/dtypes/generic.py,sha256=avKoJBzIQ0pJiFg9mmQ1D5ltkZsYxu8uPa46Hat70Ro,4122 +pandas/core/dtypes/inference.py,sha256=yd60KpxbNizmg5gNSDv_ybAh_tnZB7Qrj1ctyHSqEvc,9004 +pandas/core/dtypes/missing.py,sha256=4uW6j7-No0tpvIxXm7iwfoi7_NTHE8aIm3WUI7Em2SI,22581 +pandas/core/flags.py,sha256=39KzIE4y1JHNcRTatmeVxrOavf517LS7Kh6YiSxfh5U,3792 +pandas/core/frame.py,sha256=kCHCdSo-VNGAjwTH3PfgvEfEi-i-uVj409_jlVRAQSU,431097 +pandas/core/generic.py,sha256=fhtpVlIv2g1ED2RUxf_Zot7k_MiA9j8q77q5c61G7-o,451940 +pandas/core/groupby/__init__.py,sha256=KamY9WI5B4cMap_3wZ5ycMdXM_rOxGSL7RtoKKPfjAo,301 +pandas/core/groupby/base.py,sha256=OrqG2_h_Bp8Z_MeLrAGWGROG-MtSloGqeaJ79qYbJm0,2740 +pandas/core/groupby/categorical.py,sha256=iCsl3d_unK4zAh_lR3eDIBVOhwsv9Bj9X1wbnaR90pw,3047 +pandas/core/groupby/generic.py,sha256=ZRijItkqX8tXovcztaIGW5rhi_XzAgVoxd1D8_6lLWs,97544 +pandas/core/groupby/groupby.py,sha256=69jujygOdxVBFWKx7-v55cDAx6iyOwvmSUnpBI-_-y8,184443 +pandas/core/groupby/grouper.py,sha256=NZupW8cqa9gQnpW_XlAfGJOt4WI8rCPbCm3clABq30s,37370 +pandas/core/groupby/indexing.py,sha256=QY4GZ4wDd-1K-we0EfdiFvmdAZ_VxVgPrYB0kBZf6wU,9510 +pandas/core/groupby/numba_.py,sha256=XjfPfYGbYJgkIKYFiq7Gjnr5wwZ8mKrkeHKTW42HZMg,4894 +pandas/core/groupby/ops.py,sha256=qG_9aW7MCjHAPceRR9us-xBMh8zZe1LHYUOFp3t74Jk,37915 +pandas/core/indexers/__init__.py,sha256=M4CyNLiQoQ5ohoAMH5HES9Rh2lpryAM1toL-b1TJXj0,736 +pandas/core/indexers/objects.py,sha256=wnh_XeMqRAiP24HwDtWo-SXSqSUmaCdk418xWCvM-xY,14641 +pandas/core/indexers/utils.py,sha256=TgVCAX9r4MZw3QPH6aE-d55gRZcKN9H9X-MTZ4u-LiY,16069 +pandas/core/indexes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/indexes/accessors.py,sha256=vax2wxz0SHFss5k55VbgkqCddz1FfceJoXY5Vq3f5Os,18262 +pandas/core/indexes/api.py,sha256=cbVFABdKC4pbDgRzC2yFJ4HXuIgSNNohbq5WGXcWhB0,10160 +pandas/core/indexes/base.py,sha256=5wi3fdb6TtxnjDm7DVWJwJoyqrh4EUk19vWGXpQdsbs,258184 +pandas/core/indexes/category.py,sha256=yW3kw-efYah3JHoSjR7-sa2gqlfijo8W3Q9mS7Z8H8Y,16450 +pandas/core/indexes/datetimelike.py,sha256=8ST2Pecovlqm7W4B2NLM1xwAk-09SALAXjK55nUdz2g,27321 +pandas/core/indexes/datetimes.py,sha256=W_qm8VpZqX3znNHkbpovk4fea0pJ75O7gcdRsYhNOIE,38297 +pandas/core/indexes/extension.py,sha256=Wy4XfMrJdc4HxuApZw4D-Xr3RyBlGCOKbI27L16tHEE,5188 +pandas/core/indexes/frozen.py,sha256=V5nNWoxvoJclxTkCYXDAqp3zQ8961K3J1Vua8glI4LU,3398 +pandas/core/indexes/interval.py,sha256=jzJOpusqDohG4CdJ_xTNAgIzaFxOYmDpPeK0a_FZdEg,39025 +pandas/core/indexes/multi.py,sha256=lHGAsTfesx3GW-gC7AGYYoH2VzyeOghRIXvRN2bCkcs,138722 +pandas/core/indexes/period.py,sha256=Y3X7bXrIpUj8XXuedp8KkXGPG7M95BOnikV2RurYyLQ,16623 +pandas/core/indexes/range.py,sha256=-_n1W_W1GRPhDT7vsgETsIILpREcbnFrFoQWmOY8PyY,38318 +pandas/core/indexes/timedeltas.py,sha256=zLNuTJH_VO2cK2Acv_zeI8UYRnP1unLbx_35mqTRAR0,10889 +pandas/core/indexing.py,sha256=gvT66Cn1pvRjNvsTeTzXeRHiWn8qERHHqTS-xXCNUHo,92923 +pandas/core/interchange/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/interchange/buffer.py,sha256=4w7opAVY5iNRvhIRD2JVLzW_Fwq15gANPUEnUj8APFg,2247 +pandas/core/interchange/column.py,sha256=z7v9E23pZGadUh-9hTs-6THUIpSDOHbif7Pt7rE0b38,14265 +pandas/core/interchange/dataframe.py,sha256=9SsNToKrQkEI3xUe4E6fR0eeJgKvGv_D6aaNx3AZ6Mg,3951 +pandas/core/interchange/dataframe_protocol.py,sha256=L9Wy8vB5oTsuYJQ9NBY4RIEAWXBclnTOH3I_txkIbZk,16177 +pandas/core/interchange/from_dataframe.py,sha256=7opT_HD5X0MqqGmQIq4pfmIsuFvPBSl41D7H-ppAQMI,16994 +pandas/core/interchange/utils.py,sha256=20S9rutUShMRsyqVa57bDdgIMZK59jdkz056jsvCssk,3587 +pandas/core/internals/__init__.py,sha256=HTf7c4j_Pw_cxDx2EX7WRSnj-q6D9Tp6zVBOzDNx5iE,1618 +pandas/core/internals/api.py,sha256=rLYyzxn0HNGuKu5xSb9Q2kyvfmEVzOr4WMuId_222W0,3322 +pandas/core/internals/array_manager.py,sha256=4epfZddKpJTepUolwOg6kqwfYIcr25lCCXFM9pLdPC4,43610 +pandas/core/internals/base.py,sha256=l5WEhLE5ExhN5wmApEv6IEN24935YwslpMCGy0gZ-m0,10135 +pandas/core/internals/blocks.py,sha256=Tk1Ao7RsgYUIEfpi6LI8QmfLU-k6FhVl7Hz44rHxFaM,88328 +pandas/core/internals/concat.py,sha256=Q_MnHIKSMBvIvA6DpMNkcsQSv8aU9DivUn1mlA_9zEs,19151 +pandas/core/internals/construction.py,sha256=VVe3Z5kSOVbGzP1_g88pv7mAEOBoDfvOfhEwU-F-9aQ,33858 +pandas/core/internals/managers.py,sha256=ilR0WdOpjtUD5nYbp7Su_upu2SjVUwSaY3sI8kdNg-E,79783 +pandas/core/internals/ops.py,sha256=Rh2-gWjeSwXnjkiacohSNM5iNvqQqBiAqgblwP6rD9o,5145 +pandas/core/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/methods/describe.py,sha256=rFOrCVZh-GrTTRCa97JhKqM0lPCgS5LklOcFEvOSnsA,11960 +pandas/core/methods/selectn.py,sha256=sbe2VZQJcxbzmjz0ZIFx_9IOpojZw1AaH-lMM395hVQ,7565 +pandas/core/methods/to_dict.py,sha256=5OCW4ZNi2chthSD3LgxgrMRiECJgOPl8fNfdU0USfNw,7203 +pandas/core/missing.py,sha256=AcMgb7ZDPl_1D5xOinAaxWPW6ESV55bN82XaE651ug4,32131 +pandas/core/nanops.py,sha256=_Hziwfzj1F2JJzCn0KMts6bntGonx7Mhg2_4aiRfgAY,50442 +pandas/core/ops/__init__.py,sha256=CQ7tQB-QPUxD6ZnbS2SzFVjjvCD7-ciglexkdbbn7y8,1620 +pandas/core/ops/array_ops.py,sha256=U5FxeihFurIjGAVPRO3MsWeZ7uDZjnPonFdz-d9m8bY,19074 +pandas/core/ops/common.py,sha256=jVf_L_oN6bKcUOuH6FgaKOx18se9C3Hl2JPd0Uoj4t4,3500 +pandas/core/ops/dispatch.py,sha256=5XFIr7HV1Dicohgm0ZJu-6argn2Qd0OwES2bBxQwCj0,635 +pandas/core/ops/docstrings.py,sha256=WlGWcWjNsldPW73krxbgRwQvkacmKqRqJsN4VVz-FXU,18448 +pandas/core/ops/invalid.py,sha256=5-gRzdBfk2F8qIZ_vzUlnI-vo1HsAh2F5BYJUEN--m0,1433 +pandas/core/ops/mask_ops.py,sha256=0sm9L1LB_USp8DxNBuCdoB8cJ_MzzvSAb_u3QQmQrKI,5409 +pandas/core/ops/missing.py,sha256=0WlqN_us0LU5RAdoitM-Ko_4xghJ_HBRkteLQ53fU14,5140 +pandas/core/resample.py,sha256=swOmfdIJJoebSuv8D1SL7P_RDrDA_1mtQqKpXmNV5_A,89850 +pandas/core/reshape/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/reshape/api.py,sha256=Qk5y-D5-OdRYKkCgc-ktcxKGNGSCPteISEsByXFWI9M,680 +pandas/core/reshape/concat.py,sha256=t3LnTS1GcwUHXarbJt-0so5HIoxkWfelnEX7i19YgHs,27654 +pandas/core/reshape/encoding.py,sha256=G3bYXKxWf1Uhl6d9_BjqBh8Ip-q0e6V6GcxGYZZhoBw,18179 +pandas/core/reshape/melt.py,sha256=OT6ajjyNUgD0qwVhta8Zes2cfy2gBsWquBPeYPumt_I,18008 +pandas/core/reshape/merge.py,sha256=7HWSR_3Lpx90n3ZXfBMQM7VmjvKbn6MKiBb6yZcBUEw,98453 +pandas/core/reshape/pivot.py,sha256=J2sP967vDxZI-SOZEMaTdmxuSldW_Hcp_P7ngYLgCGw,27985 +pandas/core/reshape/reshape.py,sha256=PvoxzGfakmFkURJoQjJVeg7H2KDAZshd5hO59QzyLpA,34634 +pandas/core/reshape/tile.py,sha256=uhD3zYGWR3JYB250twLhItBsBGyXprIErGXrCeM8RI8,21858 +pandas/core/reshape/util.py,sha256=waGxF-iy59KedyPtrsieH3mMDfPNyPdE2ADkc8yBFog,2058 +pandas/core/roperator.py,sha256=ljko3iHhBm5ZvEVqrGEbwGV4z0cXd4TE1uSzf-LZlQ8,1114 +pandas/core/sample.py,sha256=QEPzbFmeMRMxAIqfkRrJLnIjUZgSupbP8YUEezW-Pcw,4626 +pandas/core/series.py,sha256=Fwu_UZSBtZLrSSj0l49ApzpHbDji_N94xmjylTeIqDw,199651 +pandas/core/shared_docs.py,sha256=RpHK33ywDEV-MZ8FdomREfSCFC4M45JdKw9IN8X79QU,29333 +pandas/core/sorting.py,sha256=Mi2DOFUERdOQ6wVAnU896DYZgN4AJSoSKRkl2wlpjDM,25405 +pandas/core/sparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/sparse/api.py,sha256=y0onCpBKCj_5Iaybw5e-gxk8zAa9d1p5Zu58RLzPT1k,143 +pandas/core/strings/__init__.py,sha256=KYCMtwb7XWzZXsIZGijtjw9ofs2DIqE9psfKoxRsHuw,1087 +pandas/core/strings/accessor.py,sha256=TkEOX9xOXP8aB1t9xZTrQnPEnYMIGdj-MWj2dEUoRqw,111646 +pandas/core/strings/base.py,sha256=AdPlNkPgT218Mffx6Blt4aJF1GGxSYII3mem6EjWntY,5528 +pandas/core/strings/object_array.py,sha256=Wzmj-g2qmKSu-gZXYY8XPwhj5_jfBP_NGqhW8tGsej4,15420 +pandas/core/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/tools/datetimes.py,sha256=zSxK7Qg6x78RY2PTBd5U1vos1c7J9bgwRuHPiDlaXvI,46337 +pandas/core/tools/numeric.py,sha256=TkJj1vrfXgH69TE1OrsAGOigrTZ-YQ2XBz5qgLKUY3I,10658 +pandas/core/tools/timedeltas.py,sha256=BULgskYVdwzHju3iqppWonAN5IvMgB5DT66v6LhZ4Cw,8748 +pandas/core/tools/times.py,sha256=PF-_xQUlCuuSYEEBL1fifm_47T2qv3jbhLceKsW63dk,4967 +pandas/core/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/core/util/hashing.py,sha256=LlYoJfn80z0zj0xNt5P3PYRVFJafXI3bRnSYV361Avs,9657 +pandas/core/util/numba_.py,sha256=rNjwx_otohQjDm9iBexqe569Nl9uYgDG1UYDPMvFLe8,2309 +pandas/core/window/__init__.py,sha256=DewB8XXkLGEDgtQqICYPmnkZZ3Y4tN6zPoTYvpNuJGE,450 +pandas/core/window/common.py,sha256=LZBddjEy7C_nb-9gmsk2wQr-FsF1WBMsGKd8ptmMdug,6714 +pandas/core/window/doc.py,sha256=iCAs_hJ_pwstet2FHwSilVSXoTaKRuuMHwyZ9l2dz_c,4158 +pandas/core/window/ewm.py,sha256=ngr1yHmxGQspUJg9l-sJBv3JGP0hssmoInsJSieBMUc,34778 +pandas/core/window/expanding.py,sha256=MnepmpreeY11OX9nQHj5TxgYdnOPJIRC-Cr3MyDnC38,27845 +pandas/core/window/numba_.py,sha256=7x9RvcIvPab0C5uXT4U9cP1VNaI7Yym0CevTsMIu27U,10606 +pandas/core/window/online.py,sha256=ljzNUk8E0UCzKTOFmX6B0B_CC_yjJK_wJM-PJimY-FY,3728 +pandas/core/window/rolling.py,sha256=K9AmZO8GSrxDDoD2vjIGWl3yBQNiwEsIe0P13mOPSIk,95090 +pandas/errors/__init__.py,sha256=R0Je9JEKC94MY93xSOijJY81Ql7bUt9_rTvTrNQal5c,24910 +pandas/io/__init__.py,sha256=4YJcSmLT6iTWceVgxGNSyRJq91wxhrgsNr47uc4Rw-I,293 +pandas/io/_util.py,sha256=0_dKFBocN0FV3XTzhOlDP55ToeHCre22RIKe6d6tRZs,961 +pandas/io/api.py,sha256=w7Ux3U8PI-SeP13hD3PMjWMf3YbOGog6zCDqj0nfnpI,1264 +pandas/io/clipboard/__init__.py,sha256=Lm47X7MO_YY8-_Vnee8S3N9T1nto-XLN8JL-r4dz1q0,21778 +pandas/io/clipboards.py,sha256=t88NnxP8TOpmM1V438o6jgvlEMzlRLaqWBxUQiH_EQ8,6320 +pandas/io/common.py,sha256=po0u9zxMn7XZICD-cJ7IzoGCho2Nmip9Hm7luxD-2Pg,40395 +pandas/io/excel/__init__.py,sha256=w62gHQ9nF3XgBOmjhM8eHmV-YXF7gflz1lFqxFq7io8,486 +pandas/io/excel/_base.py,sha256=MGlfHEjbA1k62qG6bj48Uxqjv7XC_mm0pxJaJnqM_RQ,58670 +pandas/io/excel/_odfreader.py,sha256=ZEB8SzjkJ-fdEl3kIXo2Ergnr6jochDQeLiXFyISy7U,8337 +pandas/io/excel/_odswriter.py,sha256=RbCXhc8aLg-QHqQeyTe5OAg92R-M0YXG_8J6ZlKOVXA,10939 +pandas/io/excel/_openpyxl.py,sha256=DnTF2npgtGKuhvGpP32W6oidME3ZeG9lUaMbML-APsU,19842 +pandas/io/excel/_pyxlsb.py,sha256=74huu-7ISIsfvguwDID84B3KIooHtU53XOP3PFkX6ts,4358 +pandas/io/excel/_util.py,sha256=1fwMlNjLSd_qlCGLGBcXDPLnZ_SOpAZTIaUgYUVr0_0,8105 +pandas/io/excel/_xlrd.py,sha256=jsrgRQS5LrvOOP0RttU-vPu13RcanZUIXimCZdOrIS0,4418 +pandas/io/excel/_xlsxwriter.py,sha256=GR8Fwvbiys5x9ZPso_dQLPBWFLKRgngFTt09xl0y57Y,9216 +pandas/io/feather_format.py,sha256=3aTKeEFQoCvoAXiouozNArcCB6KSf7LviA9bvIM2m7A,4515 +pandas/io/formats/__init__.py,sha256=MGhPbyRcirFXg_uAGxyQ_q8Bky6ZUpBZ0nHXQa5LYd8,238 +pandas/io/formats/_color_data.py,sha256=fZ_QluvMFUNKUE4-T32x7Pn0nulQgxmsEMHB9URcBOY,4332 +pandas/io/formats/console.py,sha256=dcoFM-rirR8qdc1bvgJySPhZvk23S6Nkz3-2Lc30pMk,2748 +pandas/io/formats/css.py,sha256=gCSjRV6QatAMY-La26wnrQmyF78G4BruMfpWrDIKIkk,12793 +pandas/io/formats/csvs.py,sha256=2oUlcVu_GCQLdJ0DUn34yIBSHGuz497JP5hP-xF-qHE,10434 +pandas/io/formats/excel.py,sha256=CebnwOen2C61vja08hcwE8N57CEHIYbTXwixh8OCHH4,33221 +pandas/io/formats/format.py,sha256=RTn5O57Khp9dSy7P5Kv-Uuo-6cL2vAVByC6yFxBQOh4,71977 +pandas/io/formats/html.py,sha256=mWXFf8fMFG33-QWpnIwdFh7id29sQzDBFzasQocc9lc,24021 +pandas/io/formats/info.py,sha256=sOvzyn_LNJdQjZYyqu1RxIPrcRFWJDEwXmdy2fKgd78,32584 +pandas/io/formats/printing.py,sha256=YdieM9sGsW9286jlCh5XDDC7x9qZMRWDENOUxquGbsc,15722 +pandas/io/formats/string.py,sha256=3cRzp6R6AlL5m5XmGJep_uAaZKQyA60cSYI_I7-I3TE,6713 +pandas/io/formats/style.py,sha256=_uv2B6ImaOTaDHpGQDpG8lnCZP7Ebh2Ad08NuPo5ktQ,155885 +pandas/io/formats/style_render.py,sha256=Dx9g00zTAHuAkRtT8bSfD5TOkiJKQsaCjMiguk6kG2I,90813 +pandas/io/formats/templates/html.tpl,sha256=KA-w_npfnHM_1c5trtJtkd3OD9j8hqtoQAY4GCC5UgI,412 +pandas/io/formats/templates/html_style.tpl,sha256=_gCqktLyUGAo5TzL3I-UCp1Njj8KyeLCWunHz4nYHsE,694 +pandas/io/formats/templates/html_table.tpl,sha256=MJxwJFwOa4KNli-ix7vYAGjRzw59FLAmYKHMy9nC32k,1811 +pandas/io/formats/templates/latex.tpl,sha256=m-YMxqKVJ52kLd61CA9V2MiC_Dtwwa-apvU8YtH8TYU,127 +pandas/io/formats/templates/latex_longtable.tpl,sha256=opn-JNfuMX81g1UOWYFJLKdQSUwoSP_UAKbK4kYRph4,2877 +pandas/io/formats/templates/latex_table.tpl,sha256=YNvnvjtwYXrWFVXndQZdJqKFIXYTUj8f1YOUdMmxXmQ,2221 +pandas/io/formats/templates/string.tpl,sha256=Opr87f1tY8yp_G7GOY8ouFllR_7vffN_ok7Ndf98joE,344 +pandas/io/formats/xml.py,sha256=Yo6y3DletZ1QhjtI7lSD7QTiHY8G2IYjZ_8EvG_KCYs,15674 +pandas/io/gbq.py,sha256=xheqv174YvQgDzg3YuXknqozXYIjgVuMrh1vJB0duRg,8655 +pandas/io/html.py,sha256=RxCXef5sKS_c6wOjQqUQMbPwuD4MWjjZJ1zork_elXo,39534 +pandas/io/json/__init__.py,sha256=xLQhyOvbR-uicDsltb9jijfWblNesFGWOFzucDPMhtY,276 +pandas/io/json/_json.py,sha256=AnhlPGKwGXD8KmucPP07aXv1GjqY_7KODha76dZJECc,47223 +pandas/io/json/_normalize.py,sha256=rbyrEKwuxotrABiv6Jmb9JN6k6rCXd99ONrEZv2IbXI,17212 +pandas/io/json/_table_schema.py,sha256=hWfnJLoAuEwv1J3tlkptjtoOBPBAecghU983sjJFCKY,11271 +pandas/io/orc.py,sha256=6KB4v7ne_qxXptoBk5ymQf4cO6xdncMeFosZ7HyaJQs,9091 +pandas/io/parquet.py,sha256=mxDEkbEqwOcRYwfk9QqF27eIE4kYRV0sEVpkwp1E8SA,23831 +pandas/io/parsers/__init__.py,sha256=7BLx4kn9y5ipgfZUWZ4y_MLEUNgX6MQ5DyDwshhJxVM,204 +pandas/io/parsers/arrow_parser_wrapper.py,sha256=lr33zBpeFVC_7d26H77Z32qBVYtp626m7v_TVvK261o,8407 +pandas/io/parsers/base_parser.py,sha256=LumibELH86hfMxPALLeSv61RDTmyOMDWywsyc7ow7HI,48453 +pandas/io/parsers/c_parser_wrapper.py,sha256=yXK-ZrUOxZcXdZ9rtINgRl7l426tdoch8GyZIS_nCMI,14199 +pandas/io/parsers/python_parser.py,sha256=rs93Vb3Vuk1at9XgZqQjznNw8PiHccD1j0Wa7sd2cBM,48317 +pandas/io/parsers/readers.py,sha256=Df55CQixT9CTueT4uCWuvBp91GW4WK8pAjhGNrNlHGI,79848 +pandas/io/pickle.py,sha256=c407mX8geb5X_NK-nWhxAU3xlKbo7Wh_aSG23CyT6AI,6648 +pandas/io/pytables.py,sha256=az0EqP9uSZDx8bUmwhohbx77K-42XvS8Srv_FqWpC4o,176270 +pandas/io/sas/__init__.py,sha256=AIAudC9f784kcEzuho8GiXU63vj2ThRitKznl7Imkq4,69 +pandas/io/sas/sas7bdat.py,sha256=c7x8yrhh5nbFjbhuCQWCq_kyThcPIrjISH4Ruw1yPEU,27195 +pandas/io/sas/sas_constants.py,sha256=CM1wSNzXn6nkjLMSTeBhBJlL6d0hU-1YdNwEO8HE-9U,8719 +pandas/io/sas/sas_xport.py,sha256=HaSGz4NXj39AHxU1mvbOidBefhQ2b46BIG4egMgOqLk,15126 +pandas/io/sas/sasreader.py,sha256=4p2TzdIZrfk2PCf0rEbeyhbmmX63TwFFPY_u0ZoZ4LM,4981 +pandas/io/spss.py,sha256=ggl2fARjISOdiej6i7TnwxEzz-DzYmoRNXO2ekAn-6M,2142 +pandas/io/sql.py,sha256=kAY68zK9HW-TKNWZtL15xO_9ldHlv6MqFts-PImBVl8,87060 +pandas/io/stata.py,sha256=9ot-aE_RpCNWJck1BBPXovmfYZkKDiugzVjQO3eezZI,136929 +pandas/io/xml.py,sha256=7HKwjdM3XxRaguXz0e9t13WP91nezufvlGM7S-yb58A,37760 +pandas/plotting/__init__.py,sha256=W_2wP9v02mNCK4lV5ekG1iJHYSF8dD1NbByJiNq3g8I,2826 +pandas/plotting/_core.py,sha256=22NIHO5wxzXpNK-eQdcaLHpkIgow8C9H_vtOcOmpQvE,66567 +pandas/plotting/_matplotlib/__init__.py,sha256=jGq_ouunQTV3zzX_crl9kCVX2ztk1p62McqD2WVRnAk,2044 +pandas/plotting/_matplotlib/boxplot.py,sha256=FmhkzWJz1bmMLGILY3Ch1i1yHI8ar1L9Ihk45jpSWwI,17763 +pandas/plotting/_matplotlib/converter.py,sha256=6NDYukB7Gzp7lYTrgd6WH4QDip0jXI4jff4t4GvwRaA,37361 +pandas/plotting/_matplotlib/core.py,sha256=Ouj-J6VPLI-Ky1CvF_sPdO5JVvmQh4O7xkmywGRNMjY,64348 +pandas/plotting/_matplotlib/groupby.py,sha256=2aRbNAjM9q4jFF-AtTYyJqQlppfRfnnairP5pZ6gxnY,4271 +pandas/plotting/_matplotlib/hist.py,sha256=dkvdfEPX5IrhB9QrFO-v96MsLDcCBSRbf0-u0oiNHWs,15530 +pandas/plotting/_matplotlib/misc.py,sha256=tzbAVRDGc1Ep6BR3QbYAEKEHgkX2vwMBX9k9uwN-j8c,13358 +pandas/plotting/_matplotlib/style.py,sha256=HyYt7a93osagb795vBcUO6PwELGZ5HyNExncB8sZT9M,8180 +pandas/plotting/_matplotlib/timeseries.py,sha256=FNua7BFYJmB-JupCsApo8GHzp3tL9Bx7-NkdM05Huvo,10781 +pandas/plotting/_matplotlib/tools.py,sha256=YVInBxWBuOXbruDvHGyEF1zRvwb4LIw1FNgx5h_lAmI,15068 +pandas/plotting/_misc.py,sha256=VRy3A9KTVrNQ3RmzkKhTsXMiJ19__Is9rvBYoM3lYWw,20911 +pandas/pyproject.toml,sha256=FYvbzMJuj1n5Ciluv5NUrlkNIvnrCPkYuUQqtB_E5yg,23608 +pandas/testing.py,sha256=3XTHuY440lezW7rxw4LW9gfxzDEa7s0l16cdnkRYwwM,313 +pandas/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/api/test_api.py,sha256=ZQI3_TgIuolTfuKy-a4eds0io74Q4kvy8fG6NZDoj-M,9394 +pandas/tests/api/test_types.py,sha256=ZR8n_efaY7HWGY6XnRZKNIiRWmaszpNU8p22kvAbyEQ,1711 +pandas/tests/apply/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/apply/common.py,sha256=A8TqjvKR4h4WaLtovGR9hDULpWs4rV-1Jx_Q4Zz5Dew,298 +pandas/tests/apply/conftest.py,sha256=mwVPfC41ZkqEOH6yccseMVDSniNWPsG1ePeO8VYlzsw,399 +pandas/tests/apply/test_frame_apply.py,sha256=ivm87Cy4AX4TS4Prpenf6gT6VrL0waOPBe8DKzDYtGQ,50522 +pandas/tests/apply/test_frame_apply_relabeling.py,sha256=jHfewakLcFvc1nartXtElv7HM5eGUIelIcm-McXX2KQ,3772 +pandas/tests/apply/test_frame_transform.py,sha256=pYHAzqdu9XkmcNzMfbQIm-uCGxUXL8nbJv5EosBnwIA,8028 +pandas/tests/apply/test_invalid_arg.py,sha256=ZKteYeo9EUM8AbcVsZ0yL4Kj8LuE92cDVh-sU7Vmfeg,10844 +pandas/tests/apply/test_series_apply.py,sha256=5BdCPy7ZzEnF_EfVuQkyvFDXdhbXQjv6dKeRbxiBOe4,22054 +pandas/tests/apply/test_series_apply_relabeling.py,sha256=_HkoIybNJQFEpIaafHvD1Q0nx_U9J2aL8ualcwhp5Fs,1510 +pandas/tests/apply/test_series_transform.py,sha256=rrJO-C5HagNKJo542h32eB5TOWVDxirJv1u5PXJkh_I,2404 +pandas/tests/apply/test_str.py,sha256=QA1BdlEmIMH5q8jK358rtjUw3u6H1_hJiE1stiUl5Xw,10560 +pandas/tests/arithmetic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arithmetic/common.py,sha256=C_s1Zc2_0U_oBciQNt5xJp-8FaLmkscEdmnX2Nq16UY,4362 +pandas/tests/arithmetic/conftest.py,sha256=HGI64yXIv2z9D6FK9svlbrA5DHXy3xE3qWmsLb1HkH8,5771 +pandas/tests/arithmetic/test_array_ops.py,sha256=4lmZRZAlbJEnphzzwfcvsO4kEv1LG9l3uCmaF_8kcAA,1064 +pandas/tests/arithmetic/test_categorical.py,sha256=lK5fXv4cRIu69ocvOHfKL5bjeK0jDdW3psvrrssjDoA,742 +pandas/tests/arithmetic/test_datetime64.py,sha256=cOgfS-t3D6SWNt_3gGwsqOfJEnKj0Ay_C4hmmT1kMRM,89391 +pandas/tests/arithmetic/test_interval.py,sha256=2TG1Lh4VZXaxwjs5y5RjXzIukOfoVetyLfPlOo5h4vQ,10951 +pandas/tests/arithmetic/test_numeric.py,sha256=2lWaNixugDH1sDk58fZ_G8pV7zBudoLvSU3YNA3WaYE,53263 +pandas/tests/arithmetic/test_object.py,sha256=Ic_IiJCcuq3kC3wFQDFVbt62Jv8JfiYnYxy1Df5RIUM,13080 +pandas/tests/arithmetic/test_period.py,sha256=MqBdH5l2qGzoq7mRrbig2_iu-fboxd1tO9cSX4dCQJo,57357 +pandas/tests/arithmetic/test_timedelta64.py,sha256=GczTezMa8Z_2q-pR7lWfvKvA0qvxEXC4d8wd_1AAtw4,79106 +pandas/tests/arrays/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/boolean/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/boolean/test_arithmetic.py,sha256=y7lw5kCLywP1cZ51FQdAb05wenb9tLQTYhWX-mJ_jkQ,3964 +pandas/tests/arrays/boolean/test_astype.py,sha256=0AEVw8lNNjHomdqgpQ7ZYCauUb23QHvxY3NPDe7vIQw,1614 +pandas/tests/arrays/boolean/test_comparison.py,sha256=QIX85ffCwMvtzXtLkWePFQkso_mVtIffWpbgy4ykEz0,1976 +pandas/tests/arrays/boolean/test_construction.py,sha256=_NwX72fhihM7MMJNTInA8sSUwesII9cegIJ1PBwIgEY,12410 +pandas/tests/arrays/boolean/test_function.py,sha256=eAVsu1XUeokLh7Ko0-bDNUQqmVrGAyOvv9vJdWCQj0M,4061 +pandas/tests/arrays/boolean/test_indexing.py,sha256=BorrK8_ZJbN5HWcIX9fCP-BbTCaJsgAGUiza5IwhYr4,361 +pandas/tests/arrays/boolean/test_logical.py,sha256=7kJTl0KbLA7n8dOV0PZtiZ7gPm65Ggc3p0tHOF5i0d0,9335 +pandas/tests/arrays/boolean/test_ops.py,sha256=iM_FRYMtvvdEpMtLUSuBd_Ww5nHr284v2fRxHaydvIM,975 +pandas/tests/arrays/boolean/test_reduction.py,sha256=eBdonU5n9zsbC86AscHCLxF68XqiqhWWyBJV-7YCOdA,2183 +pandas/tests/arrays/boolean/test_repr.py,sha256=RRljPIDi6jDNhUdbjKMc75Mst-wm92l-H6b5Y-lCCJA,437 +pandas/tests/arrays/categorical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/categorical/conftest.py,sha256=bpaY2AaezX3c1bd6XvlSqZN1EueYwDdKL7ZBcBzNHj8,359 +pandas/tests/arrays/categorical/test_algos.py,sha256=SLguZHlE5eyi14kRoMUGpIohPJM7jQqboKlnTvidpg0,2710 +pandas/tests/arrays/categorical/test_analytics.py,sha256=Bl7A_lPouoS7uK8EnybqvtMXp6WatI7U89OQwMecWVY,13213 +pandas/tests/arrays/categorical/test_api.py,sha256=6IGO0GoufiKpSYxjvfHgT0mJgPritzzd1ndZ_8P3EIk,19802 +pandas/tests/arrays/categorical/test_astype.py,sha256=cynPqUGtYUstwrYJkHRQ81DTansqMi1mCyIExEk-z0w,5539 +pandas/tests/arrays/categorical/test_constructors.py,sha256=QomKImiJusGyjxGi1jGF92Bx1ZuLFpRrlBcqTyc6cLc,30508 +pandas/tests/arrays/categorical/test_dtypes.py,sha256=h1ZhuPvbHp9aFA4doAkmQ96zQW4A5UX6y6Yv2G5QTb8,5523 +pandas/tests/arrays/categorical/test_indexing.py,sha256=9nLaQ1oNXspmSnoXEzvLGpslqKfGloy0bzYbCCa30uA,12792 +pandas/tests/arrays/categorical/test_map.py,sha256=TO6GY6B2n2dhkcNRQinbvID9eBfwtVnWsT1yexQg00U,5152 +pandas/tests/arrays/categorical/test_missing.py,sha256=5KdSj982_KUkfB8Cg-l7Jcir5I8n7Gz6SbnHnIqmu8A,7814 +pandas/tests/arrays/categorical/test_operators.py,sha256=_FzkmoVH4J9buEDXSgcG0S38KlrrnBsW7DIiKq0sjrM,15816 +pandas/tests/arrays/categorical/test_replace.py,sha256=s9gMHjbaUsRR90tK96JPX8JEFYsIHDOcD46bEXE2a0E,3327 +pandas/tests/arrays/categorical/test_repr.py,sha256=EVwlZRcGFOFD6aZWgOSpwsCAxcr_P7_ko9GHFmHxuyw,26514 +pandas/tests/arrays/categorical/test_sorting.py,sha256=gEhLklhDxhqf8UDOB17TMKhrabxS5n0evPg9DWSMd5s,5052 +pandas/tests/arrays/categorical/test_subclass.py,sha256=V3OSKErKdD_3kEbYLl0i8FhJCyjukOm9AVI3nMmdT74,868 +pandas/tests/arrays/categorical/test_take.py,sha256=WNAku8I6fNHhcaN6rSIPNv3ZYi9yCpTu58TXzbMwFOc,3349 +pandas/tests/arrays/categorical/test_warnings.py,sha256=XqvGeAb9lrXP1VdwKSOvbDuytqDuJ5VSDsLKQAa5gIk,682 +pandas/tests/arrays/datetimes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/datetimes/test_constructors.py,sha256=sJBX-Km4IVfOPtXwAKBQCa-TTI88rpDAaElUW66mEcc,8954 +pandas/tests/arrays/datetimes/test_cumulative.py,sha256=DcdVsskzOS4u_Y9F2snzxCMjJPuHCrB0Ubb-FjBPCoU,1311 +pandas/tests/arrays/datetimes/test_reductions.py,sha256=Vw_9fBZJStFgGuavTu0HBAkJr99ck8WmF4A2VSorD1w,5770 +pandas/tests/arrays/floating/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/floating/conftest.py,sha256=PkAOd0oDvePBtXL-N0MnmEGCmDMP3_Dw-YwpxgNfl-k,1161 +pandas/tests/arrays/floating/test_arithmetic.py,sha256=h4hec6yPaBq-cr8Uh4MLgzT4Nn2K5QVO7YBbxSlLrGQ,8059 +pandas/tests/arrays/floating/test_astype.py,sha256=pvgAFQ0bTRyuoBpgmiyQza_zPOXBC7RYdGJc7F6tP4c,4047 +pandas/tests/arrays/floating/test_comparison.py,sha256=C-rwNTv5FtUvo3oWB8XNquCOa_XQHf6R9JRYX6JVAG0,2071 +pandas/tests/arrays/floating/test_concat.py,sha256=-RO-pwRRY93FQnOjBLs1fMVf7uBCoEGRkGWPAdX8ltU,573 +pandas/tests/arrays/floating/test_construction.py,sha256=weDvGh2hSfHmVnQ-6Kc5QmAUaGTF9mvEI3qtZSEHHAk,6455 +pandas/tests/arrays/floating/test_contains.py,sha256=oTsN_kyhRi7hHdKRzi9PzwSu2gHiE3EP4FkuR31BZFM,204 +pandas/tests/arrays/floating/test_function.py,sha256=YiXRdFHEU2iAGXwd68kDyfsjBZ8ztoC8fikZU6AnbRE,6403 +pandas/tests/arrays/floating/test_repr.py,sha256=N_BX7NbU8Pljiz2bouWMzrP22xh_6w_8pHePEB2ycVw,1157 +pandas/tests/arrays/floating/test_to_numpy.py,sha256=j06KcX-U4OWoj6qLmAqiQuZXxGNv4wzhaUkP8YfKY48,4987 +pandas/tests/arrays/integer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/integer/conftest.py,sha256=TejO1KxvoPETsN-ZdefGePhwJ-szaoYanP9AQXHgY18,1555 +pandas/tests/arrays/integer/test_arithmetic.py,sha256=9SFZO4gc1v1s44NGp_v7aonBQq_3UJc6-VQrOGM2hl0,11942 +pandas/tests/arrays/integer/test_comparison.py,sha256=jUr8dmk_6FQsTNjDkYsazWnioHis4cLi94noy4txG54,1212 +pandas/tests/arrays/integer/test_concat.py,sha256=TmHNsCxxvp-KDLD5SaTmeEuWJDzUS51Eg04uSWet9Pg,2351 +pandas/tests/arrays/integer/test_construction.py,sha256=5gNXvyAVFKeX18WRBm5eWnpxfmeOvcqYulJNOnT_KUk,7664 +pandas/tests/arrays/integer/test_dtypes.py,sha256=EeTyOZz2jwtoLCLkOi5DEG11RtUl-LUJ3g3c83deC1M,8794 +pandas/tests/arrays/integer/test_function.py,sha256=hCqZIrrISPtn_7mlX92wpQNItAF1o-q-g56W93wnyhI,6627 +pandas/tests/arrays/integer/test_indexing.py,sha256=rgwcafGbwJztl_N4CalvAnW6FKfKVNzJcE-RjcXMpR8,498 +pandas/tests/arrays/integer/test_reduction.py,sha256=XOgHPBOTRNaE7sx-py3K6t_52QZ9iMPlYAoesbFp9ZI,4100 +pandas/tests/arrays/integer/test_repr.py,sha256=fLTZusgFHPXO4orpygmHIOG6JQLzYcdbTJHRvvsN0sM,1652 +pandas/tests/arrays/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/interval/test_astype.py,sha256=8rb7rssqvIoSztzCfFb5pY4oIH_GjDStKrXkC6bnUZk,776 +pandas/tests/arrays/interval/test_interval.py,sha256=DCaIsTQVjf-28DAPe9uZUaVzrXmUYH8nkZNZxrQsTz8,13964 +pandas/tests/arrays/interval/test_ops.py,sha256=4QNJBVY5Fb150Rf3lS5a6p_ScHy8U-sAuWTWetbCmVc,3279 +pandas/tests/arrays/masked/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/masked/test_arithmetic.py,sha256=wchNK8BesRBPSclagK_egl_EG9J4KPCquzL9iRZOK20,8175 +pandas/tests/arrays/masked/test_arrow_compat.py,sha256=TziOFeF8P3kP3sfg5_-P7S5NYv6tjxh_KlYNOZemeTQ,7100 +pandas/tests/arrays/masked/test_function.py,sha256=wUM1D1dDTDHda7rsEPnfClhxRAK8lOMc-HUTd5F_kNw,1489 +pandas/tests/arrays/masked/test_indexing.py,sha256=xjr8EECp7WStcIeEY8YNhmkZ90Q2o-l3izolkLpG2W0,1916 +pandas/tests/arrays/masked_shared.py,sha256=ANp_CU9Hcly9-NBxknm7g-uWxljstTmriq3S8f5kPsM,5194 +pandas/tests/arrays/numpy_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/numpy_/test_indexing.py,sha256=-0lB-Mw-gzM4Mpe-SRCj-w4C6QxLfp3BH65U_DVULNY,1452 +pandas/tests/arrays/numpy_/test_numpy.py,sha256=56QYJmxvcQ_Z_UTYFBYdwWGlWXMF8f7lvBkqd_nwhsk,8763 +pandas/tests/arrays/period/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/period/test_arrow_compat.py,sha256=b4NL6y_6q2U3V768ZPZOZSjK3BNYiGJBMLWscZbLL0U,3614 +pandas/tests/arrays/period/test_astype.py,sha256=gMjT5iIblB7olJdX_G43yYR0LigvuboKIqbl4t6xVH0,2331 +pandas/tests/arrays/period/test_constructors.py,sha256=46ou2R2KmUaVfcRAxRNlfetzHWBJTMHKY4ujdzQN9rI,4748 +pandas/tests/arrays/period/test_reductions.py,sha256=gYiheQK3Z0Bwdo-0UaHIyfXGpmL1_UvoMP9FVIpztlM,1050 +pandas/tests/arrays/sparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/sparse/test_accessor.py,sha256=EReITkC1ib-_36L6gS5UfjWai_Brp8Iaf4w7WObJZjM,9025 +pandas/tests/arrays/sparse/test_arithmetics.py,sha256=TC2Af6gA4OkRIxDTWy_5jmHNIrgsqWGmOVF707wOn8M,20152 +pandas/tests/arrays/sparse/test_array.py,sha256=HbW0y7KLlWPz3QI6gtE44ZRZF5vS8ZwjM3IjOQfNNSQ,16794 +pandas/tests/arrays/sparse/test_astype.py,sha256=JwcFBWzfg2KOv9_6GsP0oV4WWDmFugT8dHrXDWCLZwM,4763 +pandas/tests/arrays/sparse/test_combine_concat.py,sha256=3NMQXaRQc7Bxn5HhSHffcUE24GZi_VYflnFLnixOgbs,2651 +pandas/tests/arrays/sparse/test_constructors.py,sha256=N5GJ8SrwVZ4hNGaM_QlALl283EM13nSVbtO8uBRSAwY,10835 +pandas/tests/arrays/sparse/test_dtype.py,sha256=xcZIrh0SPqvPzMt9EbMF04ADSu5Xueemvl81llkjq64,6122 +pandas/tests/arrays/sparse/test_indexing.py,sha256=QlKqCPx2WXwMDenA12rvTxZx5KEhRvMwEG_ifjYR5tM,10022 +pandas/tests/arrays/sparse/test_libsparse.py,sha256=_hfr36t-jm-QOhI9Gwbd6sQZI5aVWMMixHY-OYOqKuM,19293 +pandas/tests/arrays/sparse/test_reductions.py,sha256=D7R_jhlFtmH8l-tERmhtP1K3KbcAyPuyIy_Y_gVcN6Q,9721 +pandas/tests/arrays/sparse/test_unary.py,sha256=GtqeMdylKdtu-0HPxmTDVjo32riOcEtqPhjI_XK5LkM,2864 +pandas/tests/arrays/string_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/string_/test_string.py,sha256=kYa8h3rrSWl7KFYMhOHbwUktfIbjPeXFBOF4u6bYRY8,22773 +pandas/tests/arrays/string_/test_string_arrow.py,sha256=_HUtytojBkVGyhO1aMgiUVvIXnLYpTwJZ18W57WG8uM,8783 +pandas/tests/arrays/test_array.py,sha256=qik2G5JwOJAgAmUm2ymBC2nFn3aCT0WO4YXPBPEUhCI,14491 +pandas/tests/arrays/test_datetimelike.py,sha256=EDcYrAePndQJjOOfaeFWv_Fdyf6N9-26Hli1LdDAfoA,44828 +pandas/tests/arrays/test_datetimes.py,sha256=XX7rVZ4UMVdSJpu8q-QHNDvO7zP9RwiZTE2nzTITFqQ,26090 +pandas/tests/arrays/test_ndarray_backed.py,sha256=6unFuF9S6hG5FDJDjiqbKg3rL8ItzJQHwY9vMdju4-0,2331 +pandas/tests/arrays/test_period.py,sha256=0Iv5uAhrzDIOsUT8BkuprNY6fzvnf_6Wh7s9P5BeQmw,5572 +pandas/tests/arrays/test_timedeltas.py,sha256=qZAWGh9yAbkjH2yU7sQUYqFsEcR9UbVTeHXBxW0cOYo,10643 +pandas/tests/arrays/timedeltas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/arrays/timedeltas/test_constructors.py,sha256=EBDW0Lbq-RNvGYHQHE8qZO0RAc_L0_s9YNgeaDbP1Jk,2353 +pandas/tests/arrays/timedeltas/test_cumulative.py,sha256=AXeC2lMVWiiBguVH_kH2c5Pv3kes3IvTPm0Ru7_SH9M,647 +pandas/tests/arrays/timedeltas/test_reductions.py,sha256=rMoKvgf6wsiBSl6fedjN-P6LLOn-CHMF3PMfow9KE-g,6434 +pandas/tests/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/base/common.py,sha256=-cLXvhzuQi0XMfU-NdqTQAiruN0MU9A9HE2goo7ZzJQ,266 +pandas/tests/base/test_constructors.py,sha256=EhQ8iFnHFdAqtDRJhyCWyavNsHmBmSqwvLjvZJNA06E,5113 +pandas/tests/base/test_conversion.py,sha256=xzmrYiq58Svz7sBj3KNuAlmP6jGAi3UWaEVp38UyUiQ,16967 +pandas/tests/base/test_fillna.py,sha256=q9LZhUp2HXaVQw4wSxK0VU4Z9z62WI12r9ivsZu0gOg,1522 +pandas/tests/base/test_misc.py,sha256=edVVjG5KHMwXjm_DJzMFTlNU0uTm7b4Cte3ICvc8imQ,5807 +pandas/tests/base/test_transpose.py,sha256=138_O_JwwdCmfmyjp47PSVa-4Sr7SOuLprr0PzRm6BQ,1694 +pandas/tests/base/test_unique.py,sha256=TBreSGIvlY3y5U3gBDGFDpC89PncHIqweMnN-n_roXg,4241 +pandas/tests/base/test_value_counts.py,sha256=pRINilZG2eFE0wvJm7lKDNAnMNW9W3eKLBCzONrd-0o,10846 +pandas/tests/computation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/computation/test_compat.py,sha256=dHstyvdaXybrwm1WQndV9aQBwOsOvCIVZb5pxLXsYfM,872 +pandas/tests/computation/test_eval.py,sha256=eeDRzjX-MxAUj_Iihcumu3ZeNLva06MMcAAESZWzDQs,69664 +pandas/tests/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/config/test_config.py,sha256=T3PKV_lWTp_4ZU566fpWt_N9_tr3BfsxHlJ_vqnQiiQ,15858 +pandas/tests/config/test_localization.py,sha256=xC7SJfih_Kus5WGpSWZdwyAQR3ttgpsxxlNesbwrYfM,4479 +pandas/tests/construction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/construction/test_extract_array.py,sha256=L3fEjATPsAy3a6zrdQJaXXaQ7FvR2LOeiPJMjGNkwKQ,637 +pandas/tests/copy_view/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/copy_view/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/copy_view/index/test_datetimeindex.py,sha256=NqzOzce51_sUU-OszlAtyEBKWzivo8vFxFLqbbw5q2Q,1883 +pandas/tests/copy_view/index/test_index.py,sha256=2cmyzQPvxuKOOT3vz92ynJQXN8uM2NA3vOO4a_RCNYI,4907 +pandas/tests/copy_view/index/test_periodindex.py,sha256=p1GRBCLeEaFkcWfQp2L3k-EAKxUbjp3EgjUNpjdghyE,556 +pandas/tests/copy_view/index/test_timedeltaindex.py,sha256=PQa9rEKiDLgplDmOmMrKOXWaTrlVoSwxtMPSsmICa5M,564 +pandas/tests/copy_view/test_array.py,sha256=FdmvpjFfbk2MBX5JaoCkNUjbWcp88IEuOTyJWocxxyE,5680 +pandas/tests/copy_view/test_astype.py,sha256=4X0aJTYYVm78DG_5VHfktFloh5zHSY5SR9L21lCeFtA,8724 +pandas/tests/copy_view/test_clip.py,sha256=nUlXcsLPZPZyqckFVg1wLdA-8GJzk1WEZp8vL9CWUKQ,2435 +pandas/tests/copy_view/test_constructors.py,sha256=aGIrNKFDWm8Fo5EaZbMfDuccQWS3EdrEARd7wS42vT8,12557 +pandas/tests/copy_view/test_core_functionalities.py,sha256=w9-8cwH-OJUlfFaNgW4wTz5ewZ6hDr77jxT2OKQ1eXA,3185 +pandas/tests/copy_view/test_functions.py,sha256=FZP92GSOEUNCVogDxngdGS2eodNwhw7w7Xs6jQgZGyg,15505 +pandas/tests/copy_view/test_indexing.py,sha256=vFS9LGe-6NHEwJWuILnnKajd9Bb2B52BT8QvGv7j5qY,37598 +pandas/tests/copy_view/test_internals.py,sha256=mBEJH08zBch3LBtSzU7wXqBKc01uH2GTzZgUx3otcC8,5020 +pandas/tests/copy_view/test_interp_fillna.py,sha256=uRVKDboJjig-k_WE4bmnpH9QcxC9wh6UI5AMo0wykr8,12893 +pandas/tests/copy_view/test_methods.py,sha256=Bhd_fidQkpPWAgvvs-2nWdwsGi0Sgf0V0XXa_VUqrj0,65907 +pandas/tests/copy_view/test_replace.py,sha256=7-V689Y86Fp9n8mIeQXYe-RdJI_LkALsTUvOAlJSXkI,15040 +pandas/tests/copy_view/test_setitem.py,sha256=dXoE8HwMe3eh7zGCX3OzS0-b-idkymPba7_Xz5XCL8w,4371 +pandas/tests/copy_view/test_util.py,sha256=ClWLprMJhf6okUNu9AX6Ar9IXZgKkY0nNuDzHRO70Hk,385 +pandas/tests/copy_view/util.py,sha256=oNtCgxmTmkiM1DiUxjnzTeAxCj_7jjeewtby-3gdoo0,899 +pandas/tests/dtypes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/dtypes/cast/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/dtypes/cast/test_can_hold_element.py,sha256=2zASUgxB7l8ttG2fKjCpIjtt_TQ7j4NJ2L9xFzcyUPU,2408 +pandas/tests/dtypes/cast/test_construct_from_scalar.py,sha256=INdOiQ7MowXLr6ZReCiq0JykUeFvRWocxk3f-ilk9v0,1780 +pandas/tests/dtypes/cast/test_construct_ndarray.py,sha256=YXylbW1pq_tt4lgp33H5_rTicbxc5z6bkkVH2RC5dgc,1101 +pandas/tests/dtypes/cast/test_construct_object_arr.py,sha256=eOmUu4q0ihGTbYpCleoCnYtvwh1TBCEZQQjLeJaUMNA,717 +pandas/tests/dtypes/cast/test_dict_compat.py,sha256=qyn7kP5b14MywtqOUL5C-NOvjf2qK4PsXGpCvqmo-4E,476 +pandas/tests/dtypes/cast/test_downcast.py,sha256=FeDtnzR-oBOwDwLa-x0bXX_F3Ir2H4PslluetWEecmw,2766 +pandas/tests/dtypes/cast/test_find_common_type.py,sha256=aqXcBVOLm53vUj-7U98gSpelOJki92IN-v8vrSuZAdA,5226 +pandas/tests/dtypes/cast/test_infer_datetimelike.py,sha256=6vor_eqEbMKcBLEkfayXzVzwwf5BZcCvQhFZuqhvyKU,603 +pandas/tests/dtypes/cast/test_infer_dtype.py,sha256=apTTrXlotmE1g1Og0Qiptm7umPsXn7QpnHXSyvaf4GI,5736 +pandas/tests/dtypes/cast/test_maybe_box_native.py,sha256=uEkoLnSVi4kR8-c5FMhpEba7luZum3PeRIrxIdeGeM4,996 +pandas/tests/dtypes/cast/test_promote.py,sha256=ZFW9PQMgMqdoXRiYH3rY6rm0EVMHwKpcnPzW8JY8Afk,20699 +pandas/tests/dtypes/test_common.py,sha256=G1oKHden_APT4_VEygVk1fvoaWus2baCKZ8VSJl8L00,26217 +pandas/tests/dtypes/test_concat.py,sha256=vlsumyKcJ7b8EdJKONU5txCA34zMaoKDvA0KmcuP8XU,1799 +pandas/tests/dtypes/test_dtypes.py,sha256=4ez16d-CnDlcTTJOUiZtPRUDfdlNMg11tqV6HUwqk8g,43199 +pandas/tests/dtypes/test_generic.py,sha256=qKS9tORYqHLjZpq1fVn7rTcbV65fntEGBUSDMCW8Rfs,4811 +pandas/tests/dtypes/test_inference.py,sha256=hoMPGwNtyzWvq91MlSB3T-C-4wY4TgY4dqgIdTqoZI4,68693 +pandas/tests/dtypes/test_missing.py,sha256=zlkI9KSHpcFiCOITnsLOY0hQL8FBepD4MuJlvfuLoSA,30214 +pandas/tests/extension/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/extension/array_with_attr/__init__.py,sha256=bXkwWSW6GRX8Xw221iMyaQOQVaWmyuRP3tGhvjXtiV8,149 +pandas/tests/extension/array_with_attr/array.py,sha256=VsqxWsjJjmeVv5tN33wsW_iK13Cf5vrCvYZ-RxBmsVw,2398 +pandas/tests/extension/array_with_attr/test_array_with_attr.py,sha256=TuuBA1lCxjVOgWsWM9jhgc-PyGuXzajO3UWWKZEquZA,1373 +pandas/tests/extension/base/__init__.py,sha256=yQtUNnjRE2wlozzf9wcLafI4Bgm9pnFc0DO2mosaGUE,3075 +pandas/tests/extension/base/accumulate.py,sha256=OnG8rM8KNJ1IhGrssp7iA5CWyXOFKsj95DlirW-tHtc,1479 +pandas/tests/extension/base/base.py,sha256=aSfTPvuvzzQUxEIrGUASWuwcVv6Uw5bvkFXvqjhRV1M,35 +pandas/tests/extension/base/casting.py,sha256=KWGZGeC1Kh2mDXUto7Xap6lkSja8661Qi1g58HgFpSM,3077 +pandas/tests/extension/base/constructors.py,sha256=UQ3kCHj9dE0tPR6dGncrB8f3KzsNTnJYg-wUsZpg1H4,5591 +pandas/tests/extension/base/dim2.py,sha256=qWwdi6U09pflhVT68r7SSSnAsJ_DCK5JIgwtpkZKVIQ,11459 +pandas/tests/extension/base/dtype.py,sha256=DUBaPmRwh2fusxGViv25MoBv0sGvdq8MmU12zKKt_ug,3888 +pandas/tests/extension/base/getitem.py,sha256=leq9dxp_KexAv7mhexLCWXcIMKNBPOVfhFv6Nuc5PkQ,15673 +pandas/tests/extension/base/groupby.py,sha256=vIqbS1bXl0YlHQ3nhP4X7dM8X2UhDvzpg4Hj27fCTQs,5891 +pandas/tests/extension/base/index.py,sha256=fD5Jugbt_39nZ1eVjPNdAgoDRuNXTcnZB9lA4w687vM,517 +pandas/tests/extension/base/interface.py,sha256=jMknymZnUiQd-FFJhtzXMOu1JGrM47mlycDf1LGbo_g,4402 +pandas/tests/extension/base/io.py,sha256=n9WJ-hcKGqPFUT1jTv8zun1J4zw9VvV-_bayDSsEar0,571 +pandas/tests/extension/base/methods.py,sha256=SQyNmXl5A1IodFADh9jL9paAqa1zmMdaziglq_s-Xtk,26069 +pandas/tests/extension/base/missing.py,sha256=bk6hmbOUUaHPixTif3xJDNzVW7Hxy4PQZxZ9pjmiIag,5577 +pandas/tests/extension/base/ops.py,sha256=C4poPXr-Y1jlzetQ-X0knQtN7JqVru8GI4sHDGKtFVM,9717 +pandas/tests/extension/base/printing.py,sha256=pVwGn1id_vO_b9nrz3M9Q_Qh9vqDqC0eZHom0_oGr-A,1109 +pandas/tests/extension/base/reduce.py,sha256=ExiCUik5rQBidXz2dChw2CqJFH2YHiV3-9wk4RyKS6Q,5628 +pandas/tests/extension/base/reshaping.py,sha256=a_aMR8QUF_TlhFelgSRvbFoUg5Vlvgm2doOrEYhWuKk,13732 +pandas/tests/extension/base/setitem.py,sha256=YyE0jkCbtK9pX7yuQnSchPOVtYTe4lb_5IX5WoFeeVI,14764 +pandas/tests/extension/conftest.py,sha256=h7WNXB_L4m_-1thzN3vBz7T7i8BTpcjxI1C4OoAtidI,4852 +pandas/tests/extension/date/__init__.py,sha256=-pIaBe_vmgnM_ok6T_-t-wVHetXtNw30SOMWVWNDqLI,118 +pandas/tests/extension/date/array.py,sha256=ZfQRH2qIR7yXeePBGQFTvzxduPIiJxM8VA3DE_5L0H0,5798 +pandas/tests/extension/decimal/__init__.py,sha256=wgvjyfS3v3AHfh3sEfb5C8rSuOyo2satof8ESijM7bw,191 +pandas/tests/extension/decimal/array.py,sha256=u2Rk1yimffhRh7ydVfxHDv93OuclO445Wadqf5UvwfQ,9624 +pandas/tests/extension/decimal/test_decimal.py,sha256=KRxj5CcmpsWTehqEQFfwq801BBiAdh24j7IEoiqqFzA,18383 +pandas/tests/extension/json/__init__.py,sha256=JvjCnVMfzIUSoHKL-umrkT9H5T8J3Alt8-QoKXMSB4I,146 +pandas/tests/extension/json/array.py,sha256=aSxWxEc4wxRUa63v9FADI5uG7Nti93Z4_XLwiSoB2TQ,7874 +pandas/tests/extension/json/test_json.py,sha256=_rHw-MVkWrZ8gOHaCoHD0wQexRINVjW3zaizwqwhPO8,12176 +pandas/tests/extension/list/__init__.py,sha256=FlpTrgdAMl_5puN2zDjvdmosw8aTvaCD-Hi2GtIK-k0,146 +pandas/tests/extension/list/array.py,sha256=V1j7PGez0zlfICAv5HlxfvYiQlhcYTxoUjs0lJ_QMqA,3903 +pandas/tests/extension/list/test_list.py,sha256=XyGJ1tWEgjIZVtZ3gP0x6sAgK_8w87Kfu91I1PbVCy8,668 +pandas/tests/extension/test_arrow.py,sha256=NHfBc3mueyW8mMIwZMW5fNVRwOzJnR6j90IN2J058dw,106218 +pandas/tests/extension/test_categorical.py,sha256=hGhBaxRnG9IC19nCuxkT3JNk7kCw7ZdN4_awM5ELr5Q,6983 +pandas/tests/extension/test_common.py,sha256=KUcrkiwK7bW1AgocIj5QrARr_3UgxvEm5mVR6MRMe1s,2870 +pandas/tests/extension/test_datetime.py,sha256=vTgMn9kUvNxYUTixzWoesDjkyVA8ElYVlDM1QO-QyOk,4107 +pandas/tests/extension/test_extension.py,sha256=eyLZa4imT1Qdd7PCbDX9l0EtDu39T80eCrSre2wmTuE,559 +pandas/tests/extension/test_interval.py,sha256=YsNkD48QWPhw6bJQUIBZl1mDdQ1_L9Ifpv-T56EMFjE,3046 +pandas/tests/extension/test_masked.py,sha256=g6jhFiCDM0nw7LUZszaplE9BhmKC6sK1dOYbiNi5TUA,14123 +pandas/tests/extension/test_numpy.py,sha256=8rijY-vVqD7-W4wb0RWkwW3pKQGS-4RthJRaQO4OtPg,14920 +pandas/tests/extension/test_period.py,sha256=Eo3V3JF1VLRvCFuw4t9I2EWY7G8xCA3fGNssMwoJDXI,3537 +pandas/tests/extension/test_sparse.py,sha256=ahGMdLAo6dsmNRYZ_AOCnbVHeFG6bPsVpI6Te4G-nm0,15555 +pandas/tests/extension/test_string.py,sha256=1imXTYevhbv-gSm2Od26PPg2XWAWcZOAM4j8xCnzOtY,7428 +pandas/tests/frame/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/frame/common.py,sha256=BmnEMlREF7G0B5zdaJRsdzqIRdh8diiTisBbCVI6Fp0,1873 +pandas/tests/frame/conftest.py,sha256=4A8qUs4pJbdzpOXn1J_F0SkIgRS6-sVwIVxXrwtagdg,8497 +pandas/tests/frame/constructors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/frame/constructors/test_from_dict.py,sha256=s7KETyaC7YF1tI2CuqhM6EmTh71U1uJqLQcD4Zyv4fg,7375 +pandas/tests/frame/constructors/test_from_records.py,sha256=yHRO7-igllxRgAGwcxZEucgjGEzKwO930rFZ07fQc_o,18366 +pandas/tests/frame/indexing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/frame/indexing/test_coercion.py,sha256=rHCkOLIlUkukh-P0XzPMtD4B8Lha3i1hqdvvZwCIAm8,5991 +pandas/tests/frame/indexing/test_delitem.py,sha256=-YERBfZbhTZ3eKzjmWln8AjoQEO7Yvae6elau4njhM0,1832 +pandas/tests/frame/indexing/test_get.py,sha256=N00_igU25_HjYuvAqDQKqBpqbz6HjB97o9Exvbo9BzM,662 +pandas/tests/frame/indexing/test_get_value.py,sha256=A-GbCHlbDfVPGB10dNGnGg4DtrKrlRbRspYfuDTUmPM,679 +pandas/tests/frame/indexing/test_getitem.py,sha256=IV1UCq9MNBftz1C3t0ohnKzYwPoY_Dw38fV1tmba_aU,15121 +pandas/tests/frame/indexing/test_indexing.py,sha256=dtC8Mn_9_YjSI_K_wDPmCowhUS33BZGIvpZjM7ah8JA,68175 +pandas/tests/frame/indexing/test_insert.py,sha256=Frq5nt-l0aKpa9EQ13ghO-gMYouKfD-o0haiWe-AR1A,4114 +pandas/tests/frame/indexing/test_mask.py,sha256=1Bql-TBfyBDmlXkECYXk-ZH_y4SPSOZYjCR2Ex7Km1k,4862 +pandas/tests/frame/indexing/test_set_value.py,sha256=cnKKXuy4o0YibDM35Y9VmI9nWzmhIR2GvG4vtF-zNqo,2635 +pandas/tests/frame/indexing/test_setitem.py,sha256=KuFN8tlfM1Rva6HGLCdrg600RkzkrxmJFATvEvoHeRc,48905 +pandas/tests/frame/indexing/test_take.py,sha256=SMBM5BO7ybxTq8gTAX1Qg1UW8vcNiRrHTQwrt1f-Rig,3230 +pandas/tests/frame/indexing/test_where.py,sha256=8qKvrhmPY6HK_LtwX1o6knj9cfxsxnBcE1_EZ71fug8,36783 +pandas/tests/frame/indexing/test_xs.py,sha256=PUZaKsEGbQKMyNWKlUQLdGDlJaUPYG7Eoyzb3FDX3r8,15590 +pandas/tests/frame/methods/__init__.py,sha256=M6dCS5d750Fzf9GX7xyNka-SZ2wJFCL66y5j-moHhwo,229 +pandas/tests/frame/methods/test_add_prefix_suffix.py,sha256=iPfzSPx0CArx79na7xcI9ZcPTAwq73IdOCcREVO7k4E,1910 +pandas/tests/frame/methods/test_align.py,sha256=402ShhQXLUzSxhN_rb0sBAOi_GlLs1q-Pa5t8mYHlOo,18428 +pandas/tests/frame/methods/test_asfreq.py,sha256=99hUcSjhjG-hOGfsdcCSFZJIvUH1hH6L0SiSh95XL2A,8307 +pandas/tests/frame/methods/test_asof.py,sha256=9ZvpMj08IevoRh_3xTKw1M2gh9MksI1iuoXWyyWZ_l8,6732 +pandas/tests/frame/methods/test_assign.py,sha256=xFGREzLhP1wj3MowBimeYbMWBNiII0280DiOXI6WDB0,2982 +pandas/tests/frame/methods/test_astype.py,sha256=_knN2hJtSCLShVBHZngRXEuXo9JzqBcJQb_0up6BYAo,31407 +pandas/tests/frame/methods/test_at_time.py,sha256=uO9Hsdnmx5Q1FB4du6Zjc-fg5oCCnb_OKYbfQxog-ek,4708 +pandas/tests/frame/methods/test_between_time.py,sha256=TJSKAQZW9U7H3Rl3MEn7x9sDxtXTLGVGYsN6xYLj1yY,8083 +pandas/tests/frame/methods/test_clip.py,sha256=r5yKx_hoIf0PAkZKQ8cGPVv_ix1zxowXndtXwHu8i-0,7095 +pandas/tests/frame/methods/test_combine.py,sha256=wNaQqokqHsJmrZ9NQIao58ZT0hSkkTH14I7_Oq8tADs,1359 +pandas/tests/frame/methods/test_combine_first.py,sha256=o9plrT5SVcwJTOBFwdKSuXZFKkjeC4tlimc26nBLZYI,19123 +pandas/tests/frame/methods/test_compare.py,sha256=j7Z_-yBVts4-xl1fVsJtOBAXYbLao2hwzI2x3aniFz0,9615 +pandas/tests/frame/methods/test_convert_dtypes.py,sha256=LScBE1cudmEG7roS1jpVMturHf6DusXu0vvByUPT9UA,7070 +pandas/tests/frame/methods/test_copy.py,sha256=GGDnze2TD4d9GbZXwqKJPVu8qDiILwIIsXVqbwoy4PY,1876 +pandas/tests/frame/methods/test_count.py,sha256=avzIu1dZ3pls4SM6g173M7Q4i8zMUzeAVI2EeIzWC0c,1083 +pandas/tests/frame/methods/test_cov_corr.py,sha256=Ec27q7rJdVbc8MMrU5OvRf15zngRR-l_rgNuNjuG64I,17377 +pandas/tests/frame/methods/test_describe.py,sha256=weftB0nmdAZN__k3nKHbH_ImUd3pxB1VpQC9qnOAzdI,14500 +pandas/tests/frame/methods/test_diff.py,sha256=vf9S4hD90zJ0qvTO1YhGyBwQAXQY-Sd0yci_bsiITcs,9930 +pandas/tests/frame/methods/test_dot.py,sha256=tfZD1HWlbO78DEgdjpBctgjWHtzjC3K9essVl_5XBMA,4623 +pandas/tests/frame/methods/test_drop.py,sha256=nAuD9pig8VOknKC5uy4A7zvNH2gNIFS21wKGV6tuZeI,20324 +pandas/tests/frame/methods/test_drop_duplicates.py,sha256=XiPnDzIQg9UoqtSIM-sQUimpBJqGE9w5ZGraV-LqBWE,14512 +pandas/tests/frame/methods/test_droplevel.py,sha256=L1gAMjYYPB6eYmSppXfbwPVKa3HCNofqPVUZ3gxLldA,1253 +pandas/tests/frame/methods/test_dropna.py,sha256=9l8GBOLpvmEowzFaq0kRxN3815gJCuNamX4S5dn5Mmw,10315 +pandas/tests/frame/methods/test_dtypes.py,sha256=Hm5h_cQryeGDEaiaxxZyXPfGfPlfFw7NlAInDi7y9LQ,4973 +pandas/tests/frame/methods/test_duplicated.py,sha256=sgRx3P11WwR7AxlMhF3Z6tx4pidKhHYzbjhiD-Mu8-o,3314 +pandas/tests/frame/methods/test_equals.py,sha256=qukOrpsiPFAPC_8aFtA0qQkueesvTDN-eMnu1-Dj8qk,2945 +pandas/tests/frame/methods/test_explode.py,sha256=fPBGh0sUnJYLrE_R3xEJvz6UVPmsSYKld_Rif0HCKYI,8810 +pandas/tests/frame/methods/test_fillna.py,sha256=3OCwymQxeTWAVBjtot0l5g3fAWwcFrSpCFCiLn1fueI,30246 +pandas/tests/frame/methods/test_filter.py,sha256=oT63-WLaQv3isFsWJFtqZwxiw2J-7xZwyOOxpn-kTNo,5422 +pandas/tests/frame/methods/test_first_and_last.py,sha256=b5B8jPo-gJj0AeDs09n45NR6CZe-EptUUCXA-9nceXA,4605 +pandas/tests/frame/methods/test_first_valid_index.py,sha256=xj-5BjT8LyA_0D7HR8rxfmHtZmy_kmynwGPsVUycaLo,2545 +pandas/tests/frame/methods/test_get_numeric_data.py,sha256=3fGtzi7paa9D8ddpP6NBfuocfbmKRzd4OAc1G9kw49o,3215 +pandas/tests/frame/methods/test_head_tail.py,sha256=quuFkpS5IgonJDSb9_Po4eO3Wi5wlcNKq723EMYL6Ns,1935 +pandas/tests/frame/methods/test_infer_objects.py,sha256=LNOf2VJsV17FDT9ogEDba6la414yUmm5z_7B97nLN24,1241 +pandas/tests/frame/methods/test_interpolate.py,sha256=iaNRQLtkFJfaVt7bNCBabItL1m5AIQYvs5_4d9YWNYc,18373 +pandas/tests/frame/methods/test_is_homogeneous_dtype.py,sha256=NNyf83FGWwcQyaysOSPyRSUR-okaNUY2L0n8Bils9ac,1422 +pandas/tests/frame/methods/test_isetitem.py,sha256=VoxA-yXow_CRikJ1tlni1PsAAOT1D2X8PtTZyJOGQXU,1428 +pandas/tests/frame/methods/test_isin.py,sha256=P2TVUsL_p366aSxwWcq27VlT9zFstOXlsJSTFlw2n20,7599 +pandas/tests/frame/methods/test_iterrows.py,sha256=hfFRA20tRYmXJAoJZLGI04J131Z7QaaEbINm3FwfVbQ,338 +pandas/tests/frame/methods/test_join.py,sha256=5xATtwJyggaoQg2MSpyJwVsZgPZ6EafcGwqsTOmn4fs,17521 +pandas/tests/frame/methods/test_map.py,sha256=aXXeVZm3TavNORiXc-L1EsjBIHTfMSrRoneDgNmAC4U,5998 +pandas/tests/frame/methods/test_matmul.py,sha256=i1BG41S9da2R0nATvc3kZXsiwl5t6MHDFIb0IJ4lAbQ,3137 +pandas/tests/frame/methods/test_nlargest.py,sha256=xcYC8luGLUmU2khFwN0ke6XgBRCnGg5N3yaj744nYt4,8190 +pandas/tests/frame/methods/test_pct_change.py,sha256=s0Ho617mHdRHBEV-9cRAz3_Z_Q5BzTd_cd6MuobTlbo,6530 +pandas/tests/frame/methods/test_pipe.py,sha256=ts5ghk8g6PYXKpdsBdovBXxPGO2qq75FEVzBgjAVfRw,1023 +pandas/tests/frame/methods/test_pop.py,sha256=pXEuon1ds2Kut_Kq8q-MW0zEiCZ-RAFzAG4gHyeA-p8,2143 +pandas/tests/frame/methods/test_quantile.py,sha256=x8MbRBe7lxpIEAhyoh3E3YuSDvCUKAKPErhAu25twTA,36288 +pandas/tests/frame/methods/test_rank.py,sha256=2A94OpvfWVIkOp_j5IEh-oabuCAySH7jOOZN-evbOvg,17282 +pandas/tests/frame/methods/test_reindex.py,sha256=ZKbS_iv89oercPqS-60LXWZYtHtWhGdXLjqH7ubEQLY,48030 +pandas/tests/frame/methods/test_reindex_like.py,sha256=2qgqaHDSEKYO1hwE9MaPTFJhl4m7rejHyuOcrmvqaBg,1187 +pandas/tests/frame/methods/test_rename.py,sha256=unaZGlItfXk183TqsHoh1_im_X36YKbZBJC5pslyavY,15351 +pandas/tests/frame/methods/test_rename_axis.py,sha256=90QFtDi0p-8bxEdFfLs75EtJQtJEOTmCdXoiS7h9F-Y,4091 +pandas/tests/frame/methods/test_reorder_levels.py,sha256=VJVEdltyRoz89mQR1Xp0A9yKlTeEFIpsPaKWQujT-C8,2729 +pandas/tests/frame/methods/test_replace.py,sha256=E1h3j1TJqLIdWwf86zanALnSokRIVCMD_IDoda8HwUI,59335 +pandas/tests/frame/methods/test_reset_index.py,sha256=kWJE4qtR1ZhFB_lORT0h4OR1f8yYp6WWvKM-Emyk0XM,28767 +pandas/tests/frame/methods/test_round.py,sha256=dcPlBxHqpKJ6JTBJskvw2CE3IYfa-Xt020jfSslwLjs,7978 +pandas/tests/frame/methods/test_sample.py,sha256=vPDSUU6oBD5X2C5rKUhIHk6o2xftm0zzMTwvuipelRM,13431 +pandas/tests/frame/methods/test_select_dtypes.py,sha256=tyVDCbySXhDo3xD3b8mjaK-T4_cG6a9nAHWYCdK27Nk,16535 +pandas/tests/frame/methods/test_set_axis.py,sha256=xiyZyjgDIO0B5HWGLeV_fVDyXj3YMDBfLyEDh5rQvcw,4608 +pandas/tests/frame/methods/test_set_index.py,sha256=lJ_MWDRDGyFpDhrs2GaTxu61kwUj0gtcK9KjROqvBqc,25360 +pandas/tests/frame/methods/test_shift.py,sha256=tMOakIajui4m-1KF_wDYzLAPqCLIjSjCU0hF5Ychis4,27382 +pandas/tests/frame/methods/test_size.py,sha256=zFzVSvOpjHkA9_tEB2mPnfq9PJIBuBa4lCi6BvXbBDE,571 +pandas/tests/frame/methods/test_sort_index.py,sha256=ZlwyJq1IS-qETUPK42uHar0B6Kw77x3SGtC3rdO4jpM,33790 +pandas/tests/frame/methods/test_sort_values.py,sha256=4OKMPqMRKr0sGzzNIgy1fad9Cn21wA0CoBfQvwepI9o,32990 +pandas/tests/frame/methods/test_swapaxes.py,sha256=-IuPIvjEz7X8-qxnWy1no5hG2WklPn6qERkmQQ-gAv0,1466 +pandas/tests/frame/methods/test_swaplevel.py,sha256=Y8npUpIQM0lSdIwY7auGcLJaF21JOb-KlVU3cvSLsOg,1277 +pandas/tests/frame/methods/test_to_csv.py,sha256=qBh5wO3MBpSQAuOD9BEnp-OmPaX_3AikGx_EfCp1DuE,48909 +pandas/tests/frame/methods/test_to_dict.py,sha256=WlQhFjpTZIstKDHsMLSGfpO3UQ13B-yEq8XMJASAGfc,17293 +pandas/tests/frame/methods/test_to_dict_of_blocks.py,sha256=zFxUTBZdFGRg05HKlWw4aGLAg6lgrWVzKBFYVVz4aEg,3036 +pandas/tests/frame/methods/test_to_numpy.py,sha256=47-d29xA6qzZYnd08lBaKK3yj9aBZ9TKkoqgguGl1oQ,1795 +pandas/tests/frame/methods/test_to_period.py,sha256=Xiebi3IA_vUKrFNftLBkhF4N0gMbpI76ZCQpqhgO4iU,2863 +pandas/tests/frame/methods/test_to_records.py,sha256=5m7Gq48a-3Lv7jTZLGFcsgvJl3OtjTs__reLVeW3kf0,18553 +pandas/tests/frame/methods/test_to_timestamp.py,sha256=GiC7hqgssghwEYc49syg3hQlZxx_EJXnKRYxJMpm9bg,5857 +pandas/tests/frame/methods/test_transpose.py,sha256=wchpypmImr7xE4X6ueY2bop2rsrZPXz71ktJDY-c6mw,5704 +pandas/tests/frame/methods/test_truncate.py,sha256=T2o8iFzcBzXPplMvJxj7n5vvfjfqIajWgcn5ro5ccew,5216 +pandas/tests/frame/methods/test_tz_convert.py,sha256=96K0xOlbFLshlhUpqBNV_U0oMxOLZ8ErPciSxW-GXXU,4708 +pandas/tests/frame/methods/test_tz_localize.py,sha256=2idbifvuEIQISOIjKWsOnpH11U3TiTEZz7CIZMWmXQk,2084 +pandas/tests/frame/methods/test_update.py,sha256=sDdlH7HpS4E_uNFz_OMUT_SKcUjbvGj2IdtZr3e0xoM,5928 +pandas/tests/frame/methods/test_value_counts.py,sha256=JdUDmRnWUQy62c-I_JnYKNGkl-wqlrVoz7uGuqcByFk,5131 +pandas/tests/frame/methods/test_values.py,sha256=ASljAwM9CEBMX6bA3FqWoSv4sOcRjuz8ZTfLSjo_F6Y,9406 +pandas/tests/frame/test_alter_axes.py,sha256=yHyCho1zs84UETsGGtw-gf3eTIyPj9zYUUA7wHTdRVk,873 +pandas/tests/frame/test_api.py,sha256=vVaA_dXS9dfOVwnO4fQIUhfifyUtJ_t3QKmAy-RFxE4,11904 +pandas/tests/frame/test_arithmetic.py,sha256=V2UMDFZs2J-nXC8vsRq4sW_39DWHP1oVj_HUsZ2lFaE,72856 +pandas/tests/frame/test_block_internals.py,sha256=OaELLl-x_Bo2ET_GQrpH5ZN-YI3tDAWF5mDZfPX0b5g,15936 +pandas/tests/frame/test_constructors.py,sha256=t4G1uBZ9JoGheVp5bBdBEzypEiCus715eloNjIwNUkI,120923 +pandas/tests/frame/test_cumulative.py,sha256=Ku20LYWW1hrycH8gslF8oNwXMv88RmaJC7x0a5GPbYw,2389 +pandas/tests/frame/test_iteration.py,sha256=F4-UXT4xbIGSZgM5tTbl3Wsrh1_B7_oWr7IgdgGiQFM,5184 +pandas/tests/frame/test_logical_ops.py,sha256=Z0RURFogUEs8nm8vg5XLaLWyDM6BTlG9_c2v1W-xbMs,7042 +pandas/tests/frame/test_nonunique_indexes.py,sha256=LbrK-qq2G9-wNQwQA021CCII0jlMKscd_kRoPu-uAQU,11863 +pandas/tests/frame/test_npfuncs.py,sha256=DRLl7MSP7e5vRrVs3FgOooI4pZNmECurbVqkAAqvlUI,2751 +pandas/tests/frame/test_query_eval.py,sha256=5QBFZGSctvUhOLmZh0jiPMpAm6x6k0Ab1vKRPqoy9Eg,54063 +pandas/tests/frame/test_reductions.py,sha256=qyTHNs0BIXWrctvK69MxX516_KgIuONPXghB394hOE4,73815 +pandas/tests/frame/test_repr_info.py,sha256=SjCI7bT5yN5i8hIB7PPbK_B09iIiUVwRzSKqZM23lc8,14569 +pandas/tests/frame/test_stack_unstack.py,sha256=DrF--ZAQfwe6TEHJpEqA6jQgTvjRbXR83frSA-WbzeE,91719 +pandas/tests/frame/test_subclass.py,sha256=RCda_f7mhpcZkfTaTWQ7qdN9Wxl7Y25W42fryTJc0d4,27486 +pandas/tests/frame/test_ufunc.py,sha256=DoZOFU7XfLTZOEb0t4R9oy3Y1gPSU-KE0bcdHD4meuA,10566 +pandas/tests/frame/test_unary.py,sha256=HuY-VS0QfKdKGBpKNE0YnvWMQ1VoPilcsA5RCJx0qa0,6218 +pandas/tests/frame/test_validate.py,sha256=hSQAfdZOKBe2MnbTBgWULmtA459zctixj7Qjy6bRg20,1094 +pandas/tests/generic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/generic/test_duplicate_labels.py,sha256=uyUq36RBTPexDnda0CCkM25SVLuKalQRLNDnZOF4hlA,14452 +pandas/tests/generic/test_finalize.py,sha256=qJU0Dg33ut-nErmy6epVHha2-uTbwObvfjeTInhKj9g,28998 +pandas/tests/generic/test_frame.py,sha256=FzBoLl4n91wB4fmKgX8BWmSEGH1AMqMlXkz4GCQRqrQ,7330 +pandas/tests/generic/test_generic.py,sha256=sftKQ4cHn9gSYDN5XpJr987PXTcJzoN35crxI4kFfG8,15956 +pandas/tests/generic/test_label_or_level_utils.py,sha256=PhsVWjYjOHPZRqX4mwUc7jlOH3tnd7p9pkMFh87CtKU,10244 +pandas/tests/generic/test_series.py,sha256=-3BG-oTHs_h1MsiuqMFbwaCCJpjHtlA_16akgxCz60Q,5671 +pandas/tests/generic/test_to_xarray.py,sha256=wuVehXkQ8zNDwY0M-Bg0_NpA4TfeAPx8ZwlWjEmHNJk,4058 +pandas/tests/groupby/__init__.py,sha256=O41hwVGLyFtIhv-zbe2JBZiqD3heGA7LOk10RuxfcKc,659 +pandas/tests/groupby/aggregate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/groupby/aggregate/test_aggregate.py,sha256=yT-BuIdrub9_EtylkZivf5CiYg01o9CurWyiE631Dfk,54681 +pandas/tests/groupby/aggregate/test_cython.py,sha256=Fiv1ysUPjidYRcllw1P8CaMsnSpDz1qys7C_av1yRUk,12798 +pandas/tests/groupby/aggregate/test_numba.py,sha256=Ba1zZzFC2-cjXE4OMOAStDvh_CeHy3hZwUhwDLDGkcY,13039 +pandas/tests/groupby/aggregate/test_other.py,sha256=jRH9PEZPqWIPTdKg-dabwK0_LgPcl2TyCPDEuBfz7Ew,20311 +pandas/tests/groupby/conftest.py,sha256=RFE_UVpTpFYL2sKPdpAhe003SMd4Qh1OS53iI7d8-tg,5136 +pandas/tests/groupby/test_any_all.py,sha256=JlPUUQmJ0SPF_eq1r7Sn-MCezf6gklNR7ZMwSvYfPTk,5672 +pandas/tests/groupby/test_api.py,sha256=ODh1SRQK3xmzYKrqY450s6PRPVT8WFrEvWRFlSHfhyQ,8269 +pandas/tests/groupby/test_apply.py,sha256=kKGKnSBpyZ_62LBy18rvFksmc5U6HO7GaSAu6cjpzpg,44281 +pandas/tests/groupby/test_apply_mutate.py,sha256=ulaldNIm7tkaM_bDgOgKijbPbtEmKM96NRPMpmdfCh0,4093 +pandas/tests/groupby/test_bin_groupby.py,sha256=nZGe01NsuZmS88cMqq8fGFbKl-umvmWjXd8BGmR3jTo,1769 +pandas/tests/groupby/test_categorical.py,sha256=DPNTQ_dKPJEZF-p9jjJhUIBeXHQo9hzgePGLDmm5fhk,72182 +pandas/tests/groupby/test_counting.py,sha256=ds8a1nBpxUlrWFcmtLJK3LCvhWY-u5Y1b4Wqze42vAw,13473 +pandas/tests/groupby/test_filters.py,sha256=-fqMvq0XwU9-Y5WaU-Na5hZdxjVY8i38Qu45vhaNOjA,21773 +pandas/tests/groupby/test_function.py,sha256=TzuK3S-F44flQNjGjGICcSoRObcZOFAPhZypTAiT5Uk,59440 +pandas/tests/groupby/test_groupby.py,sha256=87Hh_aEa1Oy6Jm67J7C3mb4iJHiGXWn1Ci_q8zI9MMI,101673 +pandas/tests/groupby/test_groupby_dropna.py,sha256=qntuCFxc6mDfIVqLYjONruF781RG5XjEJlmwVP6CUSg,23344 +pandas/tests/groupby/test_groupby_shift_diff.py,sha256=nHP88-hkyvlx7zFuzazHSVvVTFjjDzZuUTvNIovlISk,7862 +pandas/tests/groupby/test_groupby_subclass.py,sha256=OheHSWGShXz5qc4_tyl7-49733Gnbs7Xf8grgMSQQbM,3480 +pandas/tests/groupby/test_grouping.py,sha256=vqdIstE1UyyAZDREkPUxwpX6pvvPtOoBuphXpz1FQRw,42984 +pandas/tests/groupby/test_index_as_string.py,sha256=bwAMXa4aSzVDUY1t3HmzK4y-jO5jIwbbRu85Jmb8-U0,2274 +pandas/tests/groupby/test_indexing.py,sha256=Ln_43WnuxtAVrWoaUHWh1IqUSY0i42nY9VnEnw86oXg,9521 +pandas/tests/groupby/test_libgroupby.py,sha256=xiFJcUw_cwTUpQh6E9L47EZm8HopmDrKuYSTI0gHnDs,10457 +pandas/tests/groupby/test_min_max.py,sha256=BXq4nFkhqRPqHwsZRiI8X94gBhQ8O9j2w-MBJz64k7Y,8380 +pandas/tests/groupby/test_missing.py,sha256=G3kI7dIkkllkFn9gZenypMGKLidlcub9izETyvpmgu8,5251 +pandas/tests/groupby/test_nth.py,sha256=c-FzZa3q1hpD6cXw3y34Ie6UAnDHhFfgDnJ1uybyCDI,27016 +pandas/tests/groupby/test_numba.py,sha256=B2ygkBddeTyLE7a6okHM_CbFwsOaqMceHh4h6fmmQNg,3260 +pandas/tests/groupby/test_nunique.py,sha256=oMMCC9tXvj1Kf4GBq47_JiWyIi1IBN7kZgVnKWSkOWA,6095 +pandas/tests/groupby/test_pipe.py,sha256=BpMDqw-ZGT-tHUJN7k6XoWz2H46sBqSxmouppbWMHsU,2098 +pandas/tests/groupby/test_quantile.py,sha256=Xhqc_31c_CRHKdPcByWYYFm0OTqkic3xGqD9-p4jpps,16628 +pandas/tests/groupby/test_raises.py,sha256=hAujqsEODk6f_ytkc8JyNSURaTNnuDUfo5fpPCzsR3g,21093 +pandas/tests/groupby/test_rank.py,sha256=5qzGPZpulnyT4w6GH0VYLuukV9rEYDnpyYMEf-vus-I,23894 +pandas/tests/groupby/test_sample.py,sha256=n_dLYblQo9MWnpngMRIIGLZFGEGOeAfEqsL9c9gLCKg,5155 +pandas/tests/groupby/test_size.py,sha256=PQ2op8vrqyDhNYwQyM2x19v2jJzrTvUH0GCSv0xE_eU,4250 +pandas/tests/groupby/test_skew.py,sha256=_FTlnXtE_fic6ZZ322S583IXUY5hEQggi-3Xbuboahw,841 +pandas/tests/groupby/test_timegrouper.py,sha256=x_rxeFluPfOeV1kKS4hznel4dI4flV5nwWubrwxHB24,33372 +pandas/tests/groupby/test_value_counts.py,sha256=ZPC0yDqGsDuUCIVGhwNuMLh2B2uzQzGXzoUIbQSOU38,37880 +pandas/tests/groupby/transform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/groupby/transform/test_numba.py,sha256=6GJOeWL6kOIJQQaBCAD9ajv_-m6NmCrpxB9wwoCSr0A,9684 +pandas/tests/groupby/transform/test_transform.py,sha256=khY9oq-viYafDA0kqJALx0a7ipBIJ5fP3E-oHmPysoo,54598 +pandas/tests/indexes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/base_class/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/base_class/test_constructors.py,sha256=aCaUds7GeFX0wDYctr0Y6D0f3_EhCZ25awmrWMuvAbE,1997 +pandas/tests/indexes/base_class/test_formats.py,sha256=xNzgJAG9JtdV7oMSuBWymTC3SXI2vOL07JkKV7EV6Gs,5611 +pandas/tests/indexes/base_class/test_indexing.py,sha256=1zbBHv-nJCIfXRicDPXPtyLBL3Iy-LvH5bkamnoFGrI,3687 +pandas/tests/indexes/base_class/test_pickle.py,sha256=ANKn2SirZRA2AHaZoCDHCB1AjLEuUTgXU2mXI6n3Tvw,309 +pandas/tests/indexes/base_class/test_reshape.py,sha256=HnOE42zKp_IUPE-0V1Ugprj0hy53sUTpFsdl6VQXUN8,3061 +pandas/tests/indexes/base_class/test_setops.py,sha256=WfHaTfTQF1IDJMKjIQ3evIm43cgzhRLMOqMFyyvATf4,8885 +pandas/tests/indexes/base_class/test_where.py,sha256=uq7oB-lk7rsgYQer8qeUsqD5aSECtRPSEUfKzn91BiE,341 +pandas/tests/indexes/categorical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/categorical/test_append.py,sha256=LjLMq8GkNrsIVNfTrujLv_TlKo79oA_XbpNUFs-pqVQ,2191 +pandas/tests/indexes/categorical/test_astype.py,sha256=9pBn4l1NQiDYPlf4TqbYodYzt2mZtuygSm5XYFiczMs,2846 +pandas/tests/indexes/categorical/test_category.py,sha256=cBdu3Hk3QJuuhKepnghfdIXJhi1NzIKatPfPzayAd7o,14455 +pandas/tests/indexes/categorical/test_constructors.py,sha256=g3hEVtOS576z11miVwakwud3cLXkFI2ErImUaFW9N6U,5536 +pandas/tests/indexes/categorical/test_equals.py,sha256=AIrr-W5WeqDj5KbELqjHm3-hqqx3q8YxBrv1z2oco94,3569 +pandas/tests/indexes/categorical/test_fillna.py,sha256=sH68aWCabI2qy5dbgxQCXeTfvn1NQgDfM1OT4ojFmaU,1850 +pandas/tests/indexes/categorical/test_formats.py,sha256=-6n70kxUK9jAsSAeBnrOKGtbMWfN3KcT2GyMHC-pc0Q,5994 +pandas/tests/indexes/categorical/test_indexing.py,sha256=zBvryPgX3VF5P4HqUQ1h1FD2warHLfSvb0nBq6rxjrc,14978 +pandas/tests/indexes/categorical/test_map.py,sha256=VHsSFGWEBmgQLvvquC6-y3QDq3lwzSpqPWZHTLiGdzw,4664 +pandas/tests/indexes/categorical/test_reindex.py,sha256=XWBAMQChIpEiORZKQzXzRcGA2_SpKjSXsqPPd4Q2lSU,2954 +pandas/tests/indexes/conftest.py,sha256=gMw_DOvDGIe-prz_KVdKoBioI33HgFR1-c1KIlZAO-w,1481 +pandas/tests/indexes/datetimelike_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/datetimelike_/test_drop_duplicates.py,sha256=MdjY3XwZt2fggZ5cyx3ANdh3WzmUykZuWIMUxH_RT3Y,2596 +pandas/tests/indexes/datetimelike_/test_equals.py,sha256=xwiDo78zjpi7GmwipTnrg8oS9gPF08kpJwi78KJ-ftI,6316 +pandas/tests/indexes/datetimelike_/test_indexing.py,sha256=Y38s7zHSY86KSkSrYM2L_I-e2oInjl6xRD8wZCo2c48,1294 +pandas/tests/indexes/datetimelike_/test_is_monotonic.py,sha256=TSxFR2oyOC88-UANueky4q2aOvQKA2-nOTVx2NGVGMg,1522 +pandas/tests/indexes/datetimelike_/test_nat.py,sha256=6-Yr-n4JskfsjbaEPFgaRPKX4S7R-LhQOEQSC7cBybw,1335 +pandas/tests/indexes/datetimelike_/test_sort_values.py,sha256=RE8C8doqoMhnOropFCAyI0ra8YLRUKzxSxfCjIaBH3I,11463 +pandas/tests/indexes/datetimelike_/test_value_counts.py,sha256=1rFBzF1cZj_3BcuLyr-PkL9EaypeJPmVHZcjVawuhFs,3150 +pandas/tests/indexes/datetimes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/datetimes/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/datetimes/methods/test_astype.py,sha256=2Isc5YIaajNwz9H7tBbEzcxKSZfDkeTWPyxktMsvutg,11452 +pandas/tests/indexes/datetimes/methods/test_factorize.py,sha256=xybBvRrsnW2pDltySoPfJkE2qzUXiRDWYbbGna16fvw,4467 +pandas/tests/indexes/datetimes/methods/test_fillna.py,sha256=eESnVTQ8J3iBL24bWKt7TmHxC5FJiLZMpKjw1V376qY,2004 +pandas/tests/indexes/datetimes/methods/test_insert.py,sha256=amCV3pFTxJgAO8BXug0BiSAq8zF8YK0AAMHM3WiyhsU,8952 +pandas/tests/indexes/datetimes/methods/test_isocalendar.py,sha256=Fn46hJpQtTNew2ThFpjzQm_lQ1MacKq_TmMEvCGgaZg,674 +pandas/tests/indexes/datetimes/methods/test_repeat.py,sha256=oYwWHoEg8sPH5wn8WtIaxrZKr5vBttp4pBboN9Dm0tk,2397 +pandas/tests/indexes/datetimes/methods/test_shift.py,sha256=ZMBOK-kpIusmod8UjSuwP2LHdoL0dUBk_yVK6TiKW_Y,5475 +pandas/tests/indexes/datetimes/methods/test_snap.py,sha256=smwfWvN33B6UgLagKaBQkllTuGAm7Wiaq87M9nxu8g8,1305 +pandas/tests/indexes/datetimes/methods/test_to_frame.py,sha256=C6glyGdxSs-hMDQSt9jkftmRlTGPMCGdIQlfChR9iGk,998 +pandas/tests/indexes/datetimes/methods/test_to_period.py,sha256=iGyi98T207mhJ2zbbDTn8VEEJ_SMQTIP_l6MU_CqQ54,6764 +pandas/tests/indexes/datetimes/methods/test_to_series.py,sha256=8ZW3AxMkHj3IV1wVgM797SH_rRLKQ9zld1UVkhk1C8Q,493 +pandas/tests/indexes/datetimes/test_asof.py,sha256=-fxHseqPYK14ugv7nc3x_WBHx8eY_UrhLosw4XIPRHM,751 +pandas/tests/indexes/datetimes/test_constructors.py,sha256=9pB1-nBYgueh_BIiLM50okOSi3V29vMH-Rnm8AlIhSw,40309 +pandas/tests/indexes/datetimes/test_date_range.py,sha256=xicuJdMymvDdtL0zhDyiijrD6kPTlFWs8KdvuKBdtck,45848 +pandas/tests/indexes/datetimes/test_datetime.py,sha256=risuz0zlj-ovWe8cl4zDQhilBW9PqO4R0wQVi16qkpQ,6982 +pandas/tests/indexes/datetimes/test_datetimelike.py,sha256=qDjHgNXJ8HUBBVNKsIzRxVPRuG8Z-ef5BwtsGkyIPuU,290 +pandas/tests/indexes/datetimes/test_delete.py,sha256=O1cbea-LEF4l8yHLLrNaLBI67KXrfKUvYlYzQ_4DGfo,4594 +pandas/tests/indexes/datetimes/test_formats.py,sha256=LJQv6oVzaGr_u1ljKRXXpIaWh99rHrE_RtR5yv9x7EE,10241 +pandas/tests/indexes/datetimes/test_freq_attr.py,sha256=GbLRYe-E-Jraq7UVQrdnuwwUEHyy9vA_DQLK1cDvUz0,1732 +pandas/tests/indexes/datetimes/test_indexing.py,sha256=lqL8Tj8BiJNVMxVePA1oFAxt2hl8xt0i4tmSSNeKi9s,25205 +pandas/tests/indexes/datetimes/test_join.py,sha256=0gYiZIz2MmIrrbuqnHwV8C-fC79OtjtiyJRQyrtaw9c,4913 +pandas/tests/indexes/datetimes/test_map.py,sha256=JILLZ1zcVd7jXKYWrgek7CtymjbTaEQajLMfVwZBr4A,1370 +pandas/tests/indexes/datetimes/test_misc.py,sha256=e9t1OGbKFFdykq631xzcG5_jpWqYN066YuMirRQc6rw,11638 +pandas/tests/indexes/datetimes/test_npfuncs.py,sha256=cjjuxeekM2IUf-nx3WKVonrwNAuhZnVgQHNAXdhglog,384 +pandas/tests/indexes/datetimes/test_ops.py,sha256=ADlYknXyZpYXXnSe5LRlHjk3hGIVkfhZWgLeUUWdK4Y,2175 +pandas/tests/indexes/datetimes/test_partial_slicing.py,sha256=Loc1zw2Pjdf2zei9po3k3U9EdzgF8aBtNkoDp8w75FQ,16449 +pandas/tests/indexes/datetimes/test_pickle.py,sha256=cpuQl8fsaqJhP4qroLU0LUQjqFQ0uaX3sHql2UYOSg4,1358 +pandas/tests/indexes/datetimes/test_reindex.py,sha256=s1pt3OlK_JdWcaHsxlsvSh34mqFsR4wrONAwFBo5yVw,2145 +pandas/tests/indexes/datetimes/test_scalar_compat.py,sha256=de5uirfUAIX0WoIPz9Ol5Xum96A-lpObX4Ir9COclbI,12012 +pandas/tests/indexes/datetimes/test_setops.py,sha256=RrsZs5G-ZXqLsYWmqItGShpNS_C-bVoGg8Em-TIDejk,21184 +pandas/tests/indexes/datetimes/test_timezones.py,sha256=B0UVf1dlnqYizXf_7rDFNPYwCfWw2GUREneurV_C7Gc,45295 +pandas/tests/indexes/datetimes/test_unique.py,sha256=QVhLkL7u5g-0ATQe-E-RQp8rXpUyuDQaeteMWUg4td8,2065 +pandas/tests/indexes/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/interval/test_astype.py,sha256=7h7n8euKiXPnRU2d-4FYTAf-6iqPDR703dU7Oq10qwM,8809 +pandas/tests/indexes/interval/test_base.py,sha256=Am74UvKJHnsLHWVBkb-GMlN2FBEuXByz9n1ZU5rK4fg,1872 +pandas/tests/indexes/interval/test_constructors.py,sha256=g-4smR60ae7M26BQe1VKPfoRlKo6Z2lUu78DpFssPi4,17595 +pandas/tests/indexes/interval/test_equals.py,sha256=a7GA_whLbOiS4WxUdtDrqKOUhsfqq3TL0nkhqPccuss,1226 +pandas/tests/indexes/interval/test_formats.py,sha256=PkTjDkOzy9JLMNIVn2HCzuv6JWto-o3S0q-E92JP9LM,3244 +pandas/tests/indexes/interval/test_indexing.py,sha256=9qmJgpqHSzOiId5XEdWKd0Cy3-H73d3iOUkcfow47O0,22969 +pandas/tests/indexes/interval/test_interval.py,sha256=kmdJcFIcXoptDIoxdYfXveVsRzKHW6BH3hgW8E3-aI8,35324 +pandas/tests/indexes/interval/test_interval_range.py,sha256=-awbVH7W6vFHq2Jdg37-cLQQhYQc82IcpEpAfQmgVb8,13612 +pandas/tests/indexes/interval/test_interval_tree.py,sha256=RBYySgTeDaItmudzMkPYvfiirvmj6NpXlYguAmuKNao,7612 +pandas/tests/indexes/interval/test_join.py,sha256=HQJQLS9-RT7de6nBHsw50lBo4arBmXEVZhVMt4iuHyg,1148 +pandas/tests/indexes/interval/test_pickle.py,sha256=Jsmm_p3_qQpfJ9OqCpD3uLMzBkpsxufj1w6iUorYqmk,435 +pandas/tests/indexes/interval/test_setops.py,sha256=Bxr__XGHJyfpOZFZeXkcT95Bw-5qk_pNB6aq8vfUU6M,8118 +pandas/tests/indexes/multi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/multi/conftest.py,sha256=ZgvdOQaEdSuZlqaxOPruOT82EYGixYOZkky5KKC3TBI,2152 +pandas/tests/indexes/multi/test_analytics.py,sha256=TvZ7YyKz_x9BL-2ZKXMil3co_zgVoqJrsKr1fK6zUmU,6708 +pandas/tests/indexes/multi/test_astype.py,sha256=YmTnPF6qXwvYY82wZfQ8XFwVwOYYsIls3LSrdADDW-4,924 +pandas/tests/indexes/multi/test_compat.py,sha256=NqzR1udCxVYzrAsZVehtAvf35P8ikDWML3Wp-O3c2BQ,3918 +pandas/tests/indexes/multi/test_constructors.py,sha256=sdOfrFEGfH6ruB1Sl2KoxHKjnCksgoTmFKbkjWUsyWM,26784 +pandas/tests/indexes/multi/test_conversion.py,sha256=8okPvlaOQgJzneUiy3MTwHU4Z9_th4cadqAxPiV-nLc,4957 +pandas/tests/indexes/multi/test_copy.py,sha256=9Xperk7a4yBTQKo8fgk3gCa2SwJr30mH2JYYMYWguWY,2405 +pandas/tests/indexes/multi/test_drop.py,sha256=Mv5FB-riRSuwwvVFJ60GwxRGbuFkU_LU5DPW8KY8NTk,6089 +pandas/tests/indexes/multi/test_duplicates.py,sha256=GpNLQklOTHPn7vI80J_oZ6qk9JDmIKEIVKgTK_qf4s4,11056 +pandas/tests/indexes/multi/test_equivalence.py,sha256=LKBMAg82PbzkuMMy18u6Iktjzuavo1PIY-IxtPGBpZE,8530 +pandas/tests/indexes/multi/test_formats.py,sha256=8H35ibB08QSRb0sH4ThW57xvTwUP8_Tm9L7CQkDPrmE,8275 +pandas/tests/indexes/multi/test_get_level_values.py,sha256=4nK1QSCRHxWITdQK0y745cY7ZAp92GokMdwTF_avZZo,3970 +pandas/tests/indexes/multi/test_get_set.py,sha256=QQa0L3aGdFMAk-MZ7MJpgWLj2Qg47bnT6xY-3PLTSxA,12574 +pandas/tests/indexes/multi/test_indexing.py,sha256=oS2IH3frVpjJPq1Y7kH07VuRcJxBVdoywWbSxzsl-F4,35168 +pandas/tests/indexes/multi/test_integrity.py,sha256=HwJyW1Mm4tsPTHKFcTQrghIzZ_hFom9NXzeWdRDK2Qg,8648 +pandas/tests/indexes/multi/test_isin.py,sha256=OtlwJ9zZDvwgZOgbeY_oidWPOUmii_JBCCBpHnLw8us,3426 +pandas/tests/indexes/multi/test_join.py,sha256=_SPH0NZ3QHpEHuhT512wJd-DjW1976ICYpXybTbN8s8,8495 +pandas/tests/indexes/multi/test_lexsort.py,sha256=KbwMnYF6GTIdefQ7eACQusNNuehbtiuqzBMqsOSfDU0,1358 +pandas/tests/indexes/multi/test_missing.py,sha256=hHjKWxl5vkG5k9B9fxglrYB4eQldKamkMbACAu6OvUY,3348 +pandas/tests/indexes/multi/test_monotonic.py,sha256=5xlESrQOEcFWdr0iB3OipJtA6-RzriU3Yq2OQGgP0M4,7007 +pandas/tests/indexes/multi/test_names.py,sha256=D1DGqxnYlihIDVMJuTqZ0lQ3KJ8luHWXJwCnUYbvxAk,6618 +pandas/tests/indexes/multi/test_partial_indexing.py,sha256=5nR6tybKW-LJ7oBePNyFJdoWYjGw5BW2rixkYKA1cy0,4768 +pandas/tests/indexes/multi/test_pickle.py,sha256=ZJVZo0DcXDtV6BAUuPAKbwMV8aGfazJLU7Lw6lRmBcw,259 +pandas/tests/indexes/multi/test_reindex.py,sha256=Em0HI2ePjB0cJsMDPvHjlKwspb0wrrM-LlGouAd5EMw,5782 +pandas/tests/indexes/multi/test_reshape.py,sha256=yRcnTGS0M5749jUZGEZA8_UxSZ-CeOeCsWYBbTS0nTY,6711 +pandas/tests/indexes/multi/test_setops.py,sha256=VhCsycHAOEjx5YuRE7Ra0nNMuf-VggVoQvhmY2_0BGY,25200 +pandas/tests/indexes/multi/test_sorting.py,sha256=rZXsoJQxtPFLVplz5L4aiJTaDGxBknNPihOZjloNfV4,10426 +pandas/tests/indexes/multi/test_take.py,sha256=4MaxPM4ZJQPXJKiqgwEwhZ71TyH4KQfIs5LgS40vvLM,2487 +pandas/tests/indexes/numeric/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/numeric/test_astype.py,sha256=P19W9zZl8tN0EK-PaEi2gIFHLwCbruTMEUm7_ALGH9Q,3618 +pandas/tests/indexes/numeric/test_indexing.py,sha256=nDzkrokWvcmHkeHWjE8umPfxX4lR6AnQorAV7ppElCI,22761 +pandas/tests/indexes/numeric/test_join.py,sha256=P-8YL2vSV_Mf5mWA0tfXDxY90YOxgBeCUilzD4EexjY,15039 +pandas/tests/indexes/numeric/test_numeric.py,sha256=-2vrHcOTGCQR3L_Ts1wa_01nYiFJ1drEE42RtnibEho,17765 +pandas/tests/indexes/numeric/test_setops.py,sha256=6supAkqLe5ekHC44ns1HA1hwBFVHf5U2U5u2_hozO34,5763 +pandas/tests/indexes/object/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/object/test_astype.py,sha256=hoVg_-U_F-vNQHVKEp1YTuyqSLz_fnQITjyQMA4LPrc,1046 +pandas/tests/indexes/object/test_indexing.py,sha256=vHdaQBiqsaofx1VST0pUdxJ55aCTPqfv2UHg9s_GtSk,8447 +pandas/tests/indexes/period/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/period/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/period/methods/test_asfreq.py,sha256=-6nZTXF6asLzISApyN1in7qv2pgjFuepnD-IehBoVfg,5755 +pandas/tests/indexes/period/methods/test_astype.py,sha256=XJINQ34ltWdoaQmhKqOtOucGXvZB7okfsRDv8kah3TE,5389 +pandas/tests/indexes/period/methods/test_factorize.py,sha256=9Mx-xl1iAWCAXi7PvYMEOIetB4DxfLks7crRuDXcwsM,1921 +pandas/tests/indexes/period/methods/test_fillna.py,sha256=BsNanStMuVV5T8S4tPNC7BJSExKOY2zmTws45qTkBGE,1125 +pandas/tests/indexes/period/methods/test_insert.py,sha256=JT9lBhbF90m2zRgIwarhPqPtVbrvkLiihZxO-4WHvTU,482 +pandas/tests/indexes/period/methods/test_is_full.py,sha256=hQgnnd22PyTFp68XVlsfcARnC-wzrkYJ3ejjdTGRQM4,570 +pandas/tests/indexes/period/methods/test_repeat.py,sha256=1Nwn-ePYBEXWY4N9pFdHaqcZoKhWuinKdFJ-EjZtFlY,772 +pandas/tests/indexes/period/methods/test_shift.py,sha256=RZuixemQ-4CWpXnjPXq4azk2t336kgn7ctwk84_j4pM,4405 +pandas/tests/indexes/period/methods/test_to_timestamp.py,sha256=ya4SeZn6BgL-6j9O7vyitThtGHSleRtuXYpM8_otPPU,4667 +pandas/tests/indexes/period/test_constructors.py,sha256=DRxIZj5D6UvmaXvXeD0n4jOdUF61jd39GhsO01tWEM8,21534 +pandas/tests/indexes/period/test_formats.py,sha256=U39tLdSx1icWbzRvhhGMDapA9ovJO2fHB63CIrJQikA,6587 +pandas/tests/indexes/period/test_freq_attr.py,sha256=KL1xaip5r7nY-3oLW16bmogfkYljsGJEJGKxn6w72Fo,646 +pandas/tests/indexes/period/test_indexing.py,sha256=c9_mMwjzdhG6QwSWGiaQLyrLuR5ESU9R4xTkDIVGlo8,27893 +pandas/tests/indexes/period/test_join.py,sha256=2UbZyM0F2q9Qyq9ngxPUHHWYo2JJ6X5tzNHD8PCkBac,1836 +pandas/tests/indexes/period/test_monotonic.py,sha256=9Sb4WOykj99hn3MQOfm_MqYRxO5kADZt6OuakhSukp4,1258 +pandas/tests/indexes/period/test_partial_slicing.py,sha256=zoWzpZU1_K8XaDHPAAl2iU27eOBYcxMRAABq0uJ22Co,7355 +pandas/tests/indexes/period/test_period.py,sha256=uKsuGUl0K8R1gP97VKmBTAuQT0w2fOQ4H6biZn-myX4,11217 +pandas/tests/indexes/period/test_period_range.py,sha256=6t7JYdgPAAEY6Q3VaEne4eEVagVRSkF2u4gbyzv7frM,4259 +pandas/tests/indexes/period/test_pickle.py,sha256=KPeO9sWtcl78h0fqapzEW_CUCwPhiYhbK_zhfXWy9lk,692 +pandas/tests/indexes/period/test_resolution.py,sha256=bTh8yDI26eG73SVQH1exf9TA5Vt4XiWu70f3fb8i2L4,567 +pandas/tests/indexes/period/test_scalar_compat.py,sha256=6JhdX0MjbZTSaeotjrHzGaJod1MH8eDPrO7Z-47s6As,1349 +pandas/tests/indexes/period/test_searchsorted.py,sha256=AF1ruU22wQWnDiDEjKD_lg6en9cJRQbky9Z6z-3QZCM,2604 +pandas/tests/indexes/period/test_setops.py,sha256=UU6biJpupp1A_3_Q8z783iaCU1T-D9fJEZe41sToN4w,12471 +pandas/tests/indexes/period/test_tools.py,sha256=gYGMJ20HOaC7xbiWqnKUfglyHlIMaR_PNyrJyckkU5Q,1356 +pandas/tests/indexes/ranges/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/ranges/test_constructors.py,sha256=ceX79fbjGyc5VNkmz29Q1N7WGXLj40BvTuz5PfNAw4I,5328 +pandas/tests/indexes/ranges/test_indexing.py,sha256=WCJFjnEzFIqQUv_i2cy-wHRQ4Txfi8uq4UBp20s4LRw,5171 +pandas/tests/indexes/ranges/test_join.py,sha256=lniHRyuEJWY7UGc0TpJ20xzUftn6BpYJbZQPo2I0dxE,6268 +pandas/tests/indexes/ranges/test_range.py,sha256=dULaQMePxOA029xVqWWoVQ9mVAc2NaWj8XqnKF0X0xo,19893 +pandas/tests/indexes/ranges/test_setops.py,sha256=yuiXAKlZJ5c3LkjPzFltAKFQmhVqaBleiJ7nzXs4_eA,17534 +pandas/tests/indexes/test_any_index.py,sha256=QgHuIfkF_E3BFaNveFThmGAbrMpyR_UL-KQ0FhPFTyY,5131 +pandas/tests/indexes/test_base.py,sha256=alr1adEboRCxXIBAlwan1zWZF7AhJwcleFE_bgW1bZo,57157 +pandas/tests/indexes/test_common.py,sha256=f0lNehPGKYR4IBPuZH62b06ZxKQieY5LSeXoMW27soA,17677 +pandas/tests/indexes/test_datetimelike.py,sha256=BGe09scZwP9P-nJYlqtyPI8T092WohKHdZSpG1afwS8,5468 +pandas/tests/indexes/test_engines.py,sha256=rq3JzDXNc2mZS5ZC2mQLpTeydheOX9OLoq1FLR53wbI,6699 +pandas/tests/indexes/test_frozen.py,sha256=ocwmaa3rzwC7UrU2Ng6o9xxQgxc8lDnrlAhlGNvQE0E,3125 +pandas/tests/indexes/test_index_new.py,sha256=5xvKj-R0ikyrgx_UDk3zV4MnMClStJ04O51a1rUuu2M,13891 +pandas/tests/indexes/test_indexing.py,sha256=jwcq_dujP7z8tfnLqQ-G2NoJ0CxrDIa33jWwRLKk-8w,11309 +pandas/tests/indexes/test_numpy_compat.py,sha256=fnrc8fNrV7v3BRTY7Huu9cyrBw2aNUrv5i4UUEublFE,5776 +pandas/tests/indexes/test_old_base.py,sha256=PGkgpfJguR5hHaIMwjzJgGm8tB29xrvQ1nNVLc8MTNo,38210 +pandas/tests/indexes/test_setops.py,sha256=KzXruTpkMZ3SFlXjkGdm-7FPyt4g-1vz0Iw-77VoEZE,31331 +pandas/tests/indexes/test_subclass.py,sha256=lmZHuQ8OSlwP3xcR8Xy2Mfvjxp2ry2zUL4DO2P4hbnk,1058 +pandas/tests/indexes/timedeltas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/timedeltas/methods/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexes/timedeltas/methods/test_astype.py,sha256=5MDcsuzwKr8oRjNhLPMF6xTiZOSbjlOo_YghNVCdYCY,4135 +pandas/tests/indexes/timedeltas/methods/test_factorize.py,sha256=aqhhwRKZvfGxa3v09X5vZ7uBup8n5OjaUadfJpV6FoI,1292 +pandas/tests/indexes/timedeltas/methods/test_fillna.py,sha256=F7fBoEG-mnu16ypWYmK5wbIovQJKL0h86C1MzGkhPoE,597 +pandas/tests/indexes/timedeltas/methods/test_insert.py,sha256=fDYCuOIefgjNBJ7zhAUYniNVl5SltSs275XaNoL0S-s,4713 +pandas/tests/indexes/timedeltas/methods/test_repeat.py,sha256=vPcNBkY4H2RxsykW1bjTg-FSlTlQ2H1yLb-ZsYffsEg,926 +pandas/tests/indexes/timedeltas/methods/test_shift.py,sha256=W3Kb9MAAm3uUWPsf1pVvOkFIiOO-dOa_n17ticU8chA,2750 +pandas/tests/indexes/timedeltas/test_constructors.py,sha256=_cE4YExuP1pc-C1N8Z2OE6puF-ELy2paJe1OfWWbM7I,9810 +pandas/tests/indexes/timedeltas/test_delete.py,sha256=-5uYhDUCD55zv5I3Z8aVFEBzdChSWtbPNSP05nqUEiA,2398 +pandas/tests/indexes/timedeltas/test_formats.py,sha256=3U2kMrD4Jmhrj8u5pkxM6yRlptxenAZ3vBBPD9GQFL4,3293 +pandas/tests/indexes/timedeltas/test_freq_attr.py,sha256=nKgOcnnetwZ2z5ccW1GkEL_SYLQth_d5KApaYqSobQQ,2176 +pandas/tests/indexes/timedeltas/test_indexing.py,sha256=nr1FwBRfPfVx0-MWxW8OkOxmlo2S3yLAl587u0UDxEg,12160 +pandas/tests/indexes/timedeltas/test_join.py,sha256=9m_8w7IjKycSZR-xb4WHaxREgPUaJAkSTLOWI_-wmXk,1569 +pandas/tests/indexes/timedeltas/test_ops.py,sha256=nfGyNJvNy7_jmWebKjevLKhyAMNvI5jytkZTNlpEC-g,393 +pandas/tests/indexes/timedeltas/test_pickle.py,sha256=QesBThE22Ba17eUdG21lWNqPRvBhyupLnPsXueLazHw,302 +pandas/tests/indexes/timedeltas/test_scalar_compat.py,sha256=46KGdJ7q37JcEhkDET2RIGsqiUtinkuMlmD59S-jWGw,4571 +pandas/tests/indexes/timedeltas/test_searchsorted.py,sha256=kCE0PkuPk1CxkZHODe3aZ54V-Hc1AiHkyNNVjN5REIM,967 +pandas/tests/indexes/timedeltas/test_setops.py,sha256=MRv-uVp_wwBRnSlARtoz_yo3zdFnb7YKcPB7G3G3q6Y,9402 +pandas/tests/indexes/timedeltas/test_timedelta.py,sha256=aV88z3yUwnZTxa1FzN4hGpur41-dgmDuak1VwWrgs5g,5158 +pandas/tests/indexes/timedeltas/test_timedelta_range.py,sha256=4tfbM2RHLb2DzybhHWCpoPyaaFelK4K1kcjHhCjNMK0,4158 +pandas/tests/indexing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexing/common.py,sha256=LtCDO4TeMhLWAiTGiJET3YP8RO6T3OQqmdpJ8JH391g,1021 +pandas/tests/indexing/conftest.py,sha256=9C84qvdnHzbM5C0KIVw3ueQhHzuUMoAlw07dVJqCAmQ,2677 +pandas/tests/indexing/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexing/interval/test_interval.py,sha256=KjsDEm-Akpbg14BIkjlI6M_1VfCDs55DaoqthXS1ZE4,5940 +pandas/tests/indexing/interval/test_interval_new.py,sha256=kuAbIv_RVpiDDtf-wXvELAf7VeKF0kqAVYpk3XmebAo,7961 +pandas/tests/indexing/multiindex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/indexing/multiindex/test_chaining_and_caching.py,sha256=FIKANTbi95DzJnfnKSdfcsNKMH44LTalDLxSBmq_kRA,2544 +pandas/tests/indexing/multiindex/test_datetime.py,sha256=tl1yr3h50R0t7uvwTcfsRW-jt1n9vsqf4BWp4dNTdd8,1234 +pandas/tests/indexing/multiindex/test_getitem.py,sha256=VPUOBnItOtdpZhvh90Ek5K3yTIBP7qD--M5s1Fe9evM,12662 +pandas/tests/indexing/multiindex/test_iloc.py,sha256=G2CUPRhd5pRImZpH0uOVIPid7fzB4OuJZjH8arQMrE0,4918 +pandas/tests/indexing/multiindex/test_indexing_slow.py,sha256=nMfW1LQn7YlJauNceeR-uo_yPxRG2E8hcbgqTBMxaH4,3335 +pandas/tests/indexing/multiindex/test_loc.py,sha256=H-ZhuAbFegaTPgZ1bf3AwNx9cs9mgyqdlSMNOKTqRAw,32322 +pandas/tests/indexing/multiindex/test_multiindex.py,sha256=ixvS18gCccgCZe-wcYhd3e3uzB7M3_GIC3DRNOWwHX4,7947 +pandas/tests/indexing/multiindex/test_partial.py,sha256=bGSWpkwNyuaErs-PqHNXuUX6TEoRyyKyfj6I2zLowgs,8705 +pandas/tests/indexing/multiindex/test_setitem.py,sha256=uzcR2xB88GENqYuW0WdjtGxfhLgg5IoWE6eLqmIWUz0,19036 +pandas/tests/indexing/multiindex/test_slice.py,sha256=0I1cmPd24BYwXgrW3qWSyAlryZzIuLgNwa2wsK1cicY,27076 +pandas/tests/indexing/multiindex/test_sorted.py,sha256=xCdmS_0DBN2yoTVcSB-x6Ecwcw93p6erw3bTiU6_J3s,5192 +pandas/tests/indexing/test_at.py,sha256=eQhts-_Z5PWS7BpwfC3-e3YUEBm2pHsxcUY781OVQfg,8092 +pandas/tests/indexing/test_categorical.py,sha256=QlzNP37VWgQ4jI0Ue3_B7t2HOLO5p6hMnFo8vhh52lU,19287 +pandas/tests/indexing/test_chaining_and_caching.py,sha256=0TQoa-RkwKVRvenUSzHGJSctlCR-AI74OGUUUHPKT-Y,22685 +pandas/tests/indexing/test_check_indexer.py,sha256=tfr2a1h6uokN2MJDE7TKiZ0iRaHvfSWPPC-86RqaaDU,3159 +pandas/tests/indexing/test_coercion.py,sha256=NMjSvLmvTG_KkHoGp_nDLJ0KsfedbPeKZMR_K-f_ahY,30980 +pandas/tests/indexing/test_datetime.py,sha256=YgXTSlOHnk5ZO-VcMSNur5xaVHiAalPSihvZacztdSU,5645 +pandas/tests/indexing/test_floats.py,sha256=CwpkyFbyTaUfdtoa-QV7CWBSRa8wQtbsCxBvSKNDq1k,20462 +pandas/tests/indexing/test_iat.py,sha256=OHtnjp9F-lNkz6_roG0PMOcQfwW4-Q87hIdrmOYZM-U,1325 +pandas/tests/indexing/test_iloc.py,sha256=zUAogQ1-2prjDxd2ELZv5BJKBZUeKjTtAxEaOX6Sn4A,50457 +pandas/tests/indexing/test_indexers.py,sha256=agN_MCo403fOvqapKi_WYQli9AkDFAk4TDB5XpbJ8js,1661 +pandas/tests/indexing/test_indexing.py,sha256=ABNJV9QWFK58XDFwBepnLX3Zv7mFl79pqvWlHEnjjVU,39352 +pandas/tests/indexing/test_loc.py,sha256=meFv6ismCPxf1KMTTEBR58tuAAhcsiYdCiTG7t-VBSM,116073 +pandas/tests/indexing/test_na_indexing.py,sha256=Ek_7A7ctm_WB-32NePbODbQ5LDMZBAmCvDgPKbIUOcg,2322 +pandas/tests/indexing/test_partial.py,sha256=BTO8pwPT5JyNRh6r985fFPhnxjrSDHtlb1SrQAP8gxI,24300 +pandas/tests/indexing/test_scalar.py,sha256=neZNxk7NlC7Jry0VqDrfdURphCjWCJkggo1uOyeHrZs,9476 +pandas/tests/interchange/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/interchange/test_impl.py,sha256=rDqUzrghgY5cplgJH9vT-KCr1kr3h37QWo1o1UWHA98,11487 +pandas/tests/interchange/test_spec_conformance.py,sha256=JnE2kQOLr4EjUCH6Nzc1fCEXhbZ52WzKbioW6f6EVxo,5593 +pandas/tests/interchange/test_utils.py,sha256=15liIDJirQDoP7TxxQkmZJ9gCAVNCd2BwShW_GlwL2A,2965 +pandas/tests/internals/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/internals/test_api.py,sha256=LrmLmyAm4A-FCE0gdBzoQbkypEtwXukOrFXhfhX9M9U,1184 +pandas/tests/internals/test_internals.py,sha256=w2w2kp57YuaIbyeo44djpN8Ovf-eIFVWsXATahDT3gs,50478 +pandas/tests/internals/test_managers.py,sha256=kSk5OIuZ2P5P7DkR1Nf7Pwr7Q8CK14SArEYquvEjP_0,2525 +pandas/tests/io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/conftest.py,sha256=DT78DKUuv1qYTCZ2Vcc9CtqqSnPiuuN-9JIBOrwYwlo,6794 +pandas/tests/io/excel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/excel/conftest.py,sha256=qWmJwooP_4cTxnWpNpT_b7d8z-L-17UXvWFOoaxOPYo,850 +pandas/tests/io/excel/test_odf.py,sha256=iPVcsuHTUoV0Le4V8iWJWNiNsK1EPpnUO2zYQaiLUkI,1416 +pandas/tests/io/excel/test_odswriter.py,sha256=r6KnQ-k2dOWTe4DHjvE_ymX9bc0DmlPZln7gfm4EYWc,1519 +pandas/tests/io/excel/test_openpyxl.py,sha256=6JY-bGvpl1h23H8L2Jp2zpvO0E1MkmnRKSwGwOPmVzM,14120 +pandas/tests/io/excel/test_readers.py,sha256=f92XaQJaZDXeiTmUyfvXU9W4jALWeKyV4Nt-yWNd4M4,62788 +pandas/tests/io/excel/test_style.py,sha256=h-ry_ePObRAPMs5osIeyrlXaeMNJVXYORSyeVk-f9hs,11171 +pandas/tests/io/excel/test_writers.py,sha256=h51D-hGDw-t-ZIzrKPYh_H_7tqEYIgH_vARpIuohcRE,49761 +pandas/tests/io/excel/test_xlrd.py,sha256=BgWUUxXapajmlBWgS0-g1Q2Id33gYTVKysMrXeooMps,1564 +pandas/tests/io/excel/test_xlsxwriter.py,sha256=PbucNqDm4JlTPBezLo370-MGj6tiYyTrktxlguM6z8M,2667 +pandas/tests/io/formats/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/formats/style/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/formats/style/test_bar.py,sha256=5aq5hdiEJ2_I48VhXaMP5g6BoEsg80iCs01J_-b8o7g,10281 +pandas/tests/io/formats/style/test_exceptions.py,sha256=qm62Nu_E61TOrGXzxMSYm5Ciqm7qKhCFaTDP0QJmjJo,1002 +pandas/tests/io/formats/style/test_format.py,sha256=9siaXSHvCrA-YEuRI0-zun0gwQf2fVZwSPMIrb7CLTE,21154 +pandas/tests/io/formats/style/test_highlight.py,sha256=p2vRhU8aefAfmqLptxNO4XYbrVsccERvFQRd1OowC10,7003 +pandas/tests/io/formats/style/test_html.py,sha256=FvW0Zh6U8CkOKo0Plvz8W-udOgsczg9qawyVq-xzKqc,32702 +pandas/tests/io/formats/style/test_matplotlib.py,sha256=KPTvs_DbJlT5u7xQiQW3Ct-0jmpFHuah_lfQgZkiuQw,11649 +pandas/tests/io/formats/style/test_non_unique.py,sha256=JG_rE5A5Zk5exlfivZHnOI3Upzm8dJjmKKHkwEje4LQ,4366 +pandas/tests/io/formats/style/test_style.py,sha256=x7r8-nhnYdifw_PjopT0a4t99MTGzlOBv-g38HOHxik,58095 +pandas/tests/io/formats/style/test_to_latex.py,sha256=0H0dWqQANhiEu-7sD9FCR4th79bD_j7zyGRrgGxZPfE,32960 +pandas/tests/io/formats/style/test_to_string.py,sha256=8UZoCGo3mHDT2-ucN0pJUK5dijSH05k0tvGfPnVnz4U,1853 +pandas/tests/io/formats/style/test_tooltip.py,sha256=GMqwXrXi9Ppp0khfZHEwgeRqahwju5U2iIhZan3ndZE,2899 +pandas/tests/io/formats/test_console.py,sha256=jAk1wudhPiLBhhtydTNRlZ43961LqFu3uYt6cVA_jV0,2435 +pandas/tests/io/formats/test_css.py,sha256=YFHK3UFe2jcnz6AhmOFb7ZU1jd5Y_LYxIx5PBrJXNLQ,8669 +pandas/tests/io/formats/test_eng_formatting.py,sha256=2hSUlSSQ-NYwPU0E4P1V1P7M9fKF7rtK7PQ2fm30WOY,8137 +pandas/tests/io/formats/test_format.py,sha256=-Jy5YQUOMFWfRGUn5IDxjKSUDJjXOGEKo8AL_th4EmU,129882 +pandas/tests/io/formats/test_info.py,sha256=uLNAEiVHrSWSKuME4pXqoS2EwN9eMnq1pqrtdXRXxz8,15684 +pandas/tests/io/formats/test_printing.py,sha256=d9kvMmbXBjLiFkaUQYVMdARwlAYRDjWxqfL9B9mz2oE,8510 +pandas/tests/io/formats/test_series_info.py,sha256=7wVrUCg0LVMCODxKVmvcp6dUYuA1q2KxbrZGzfgAGVY,4908 +pandas/tests/io/formats/test_to_csv.py,sha256=71ADzbAx1201TiI0VF1vX8HCrfaurOYXmZeXUYBMaj8,26711 +pandas/tests/io/formats/test_to_excel.py,sha256=ecNeSrVd2mSPsdIqm3lM911b4mPwLIVkoz3MnJFZE3g,15320 +pandas/tests/io/formats/test_to_html.py,sha256=mWE4HnBxSmV6pPF1RDV1lfFNEl8wNNuxyZG9Eqoji9k,31307 +pandas/tests/io/formats/test_to_latex.py,sha256=b2NVvlx9NXSYOjeLwQCQT_Pdg6zFil8ny5D1o46MTew,41130 +pandas/tests/io/formats/test_to_markdown.py,sha256=glqmclVQfGHZFUqYE8qdc7h7RXzk6ucEUYYeJ4nm_1k,2321 +pandas/tests/io/formats/test_to_string.py,sha256=68TS9OkJL1jRGPtiezBzLYqaV42mF4dHG9p3WRwmMNw,9765 +pandas/tests/io/generate_legacy_storage_files.py,sha256=hRLqkSJO6TreyxtRHe1Va2-aGl8MFDUKhLQsXTFh0QM,9914 +pandas/tests/io/json/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/json/conftest.py,sha256=a7fOknNcQX1o2dZTj_hG4xJ7bj49ttaiCMty8Ns5QTs,377 +pandas/tests/io/json/test_compression.py,sha256=wTzfwLg6q9DgnXoTAWbX-_PD3Zu2r76IIFu3yAT9cBo,4271 +pandas/tests/io/json/test_deprecated_kwargs.py,sha256=DKuEh2V2IkJOu-BnurWvax8Mq5EcQHtG-K-zncGZRpo,690 +pandas/tests/io/json/test_json_table_schema.py,sha256=UBsZrbfFVOHVoUTLGbeYOL5FXGapwH_vXQh2MWVjr8I,29623 +pandas/tests/io/json/test_json_table_schema_ext_dtype.py,sha256=mTwJ_IpOBewvrLU98eLo-_yibYtOqD64LKLI_WIr5n0,9500 +pandas/tests/io/json/test_normalize.py,sha256=U1U55CZyUP5fB17qRIPw21afnDU0S0ZiIVKimzXW0do,30837 +pandas/tests/io/json/test_pandas.py,sha256=RzSDeFL8rkY9OS4uPLDop8VDS3x64hZ5YqdvKd2bKSU,75134 +pandas/tests/io/json/test_readlines.py,sha256=6ATYnh5lWq01dzsQeYOQqC-FRmBrG0HfY4ILZQh5EKA,18565 +pandas/tests/io/json/test_ujson.py,sha256=KtsPVYbtCu3IezkQimz6Gf1OogYNY_vPO4qakcfLv7c,36120 +pandas/tests/io/parser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/common/test_chunksize.py,sha256=qLLfPVaA490mVRLe92HCfBombgftSNTo65r5QDqWhbw,7757 +pandas/tests/io/parser/common/test_common_basic.py,sha256=QmbYKytrTQpnVVpHrXMivHVK6aJFvPtVPdSiU294Xa4,26037 +pandas/tests/io/parser/common/test_data_list.py,sha256=SJTVxZsQzJUmId6ZqXP7bm82NLWMUBKYrPQxsWEcHxY,2116 +pandas/tests/io/parser/common/test_decimal.py,sha256=QuFpFpw7cfUFf-drWp-OpghaNT21nm5zQYKQugJn8zA,1588 +pandas/tests/io/parser/common/test_file_buffer_url.py,sha256=r1sPHwAIYzlgY2YmffNRu3w_g-6xcSQCGy3kEExdzKs,11483 +pandas/tests/io/parser/common/test_float.py,sha256=7P8p1G0gmVFXrVlkUvFPKgaLjlaWBOpIyztuTy2tzdk,2152 +pandas/tests/io/parser/common/test_index.py,sha256=HsBCRshT8aAamkvwpfGkSY2AWarC9z7YpiyAwi0CHg4,8030 +pandas/tests/io/parser/common/test_inf.py,sha256=ODKFUPVYOANoMq64knNGPij2oPEVpAwSoYlP2StgdJ0,1777 +pandas/tests/io/parser/common/test_ints.py,sha256=k-WG0wVqDFwKCDMmoSjZfPS4CJv1LAfR_yhT__zSHSQ,6502 +pandas/tests/io/parser/common/test_iterator.py,sha256=8KF39K24m9_6jLr0JtXv-aJu0RsVAxHD1W3wCQl1bOc,2741 +pandas/tests/io/parser/common/test_read_errors.py,sha256=23t8VJ57PWPoXBLdnwSx_WcPeRS5H2zNs5AFc4oLTbw,7692 +pandas/tests/io/parser/common/test_verbose.py,sha256=nIdnxbNR0Nirx51p0Y62ykQirpVI7BXPF1NInKG-HLc,1317 +pandas/tests/io/parser/conftest.py,sha256=6HDgTV8Onq2XBRgAFz6BhEt2AGe7SkynhvV9de5ndZU,8167 +pandas/tests/io/parser/dtypes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/dtypes/test_categorical.py,sha256=vHtAGX4K-YVvgD3f3xdmNr_quIHCn_YA22FEJWnpuUc,8820 +pandas/tests/io/parser/dtypes/test_dtypes_basic.py,sha256=yrlflpXLyB3h3lTgxFQ_HHe1ygopIwaIyijfKJXbonM,15919 +pandas/tests/io/parser/dtypes/test_empty.py,sha256=qIdwn5Pl5p2glMXych2kXHWdWOr9p8PfQD0vmyZiHDQ,4854 +pandas/tests/io/parser/test_c_parser_only.py,sha256=hiBlFPKxiEphpUmgZkBSjxeVPEYMm0I7rGNPJoEnWmc,21240 +pandas/tests/io/parser/test_comment.py,sha256=TpFoZQjs8a88s9SEgPxtE32_vmYHu44oTWC65bDrp5U,4824 +pandas/tests/io/parser/test_compression.py,sha256=SlnJBvRjbAzZEKHThwN_-72ty6P0R0zhZplpmwJjnXg,6491 +pandas/tests/io/parser/test_concatenate_chunks.py,sha256=RD1MUklgLBtBNvJu5J92cVZbrO3n38UzdQvh4BAvAqI,1128 +pandas/tests/io/parser/test_converters.py,sha256=Uc1WnuqqJMZW8bw99MLIRV-vlpD4QFK-bXzcAwsuhgQ,4983 +pandas/tests/io/parser/test_dialect.py,sha256=3Wxee3glu8U_hSfsmcqZc7fxbeHThUjqBihGyHNnPLg,4292 +pandas/tests/io/parser/test_encoding.py,sha256=0qCDFl4Fi8mUh1lyiE6QZOqLbOpzkblVYhKbbQrZcnk,9845 +pandas/tests/io/parser/test_header.py,sha256=3kqnIfkQKCd8OSfV0tZzclM7nf6gI238kBLWdc4yPqk,18597 +pandas/tests/io/parser/test_index_col.py,sha256=4OxKWCBNn7UmP5r1ChxwU7Ca3JfE96Dw2d1_Mnk2nBw,10723 +pandas/tests/io/parser/test_mangle_dupes.py,sha256=zPhOMpKBEvOfk02Al8yzlExyfIbAHfdnLEZ--nRtIQA,5021 +pandas/tests/io/parser/test_multi_thread.py,sha256=2493wkVXYMs43SrWZBJe26lzet-IX_JqFC3jy1JTHP4,3839 +pandas/tests/io/parser/test_na_values.py,sha256=yThpzaCTRXSJeCWa4XRwktODVxUEj7AQV6MLg3D6G-U,17464 +pandas/tests/io/parser/test_network.py,sha256=s6Qz75Zz1YgQjUt2r9tlm4f-Nvj6SKmns9um3zKhS6A,12677 +pandas/tests/io/parser/test_parse_dates.py,sha256=aF3Jv4uKH5nvH8YNQkPZDJvTG9KLRXmOdd6PyXdtLMA,66129 +pandas/tests/io/parser/test_python_parser_only.py,sha256=XQR_qtK4oc0q5PB5xXFQPSzrwGtv5f3mgnLaXIW5qJA,15845 +pandas/tests/io/parser/test_quoting.py,sha256=YR4jcB2dOHNHlCEl58pmcYrb5skAcAmB39HOPBeaYas,5482 +pandas/tests/io/parser/test_read_fwf.py,sha256=jjwYg8tGVt7shlGLARDpJAHcFrG3Zl9SF1syEdx_O4I,29555 +pandas/tests/io/parser/test_skiprows.py,sha256=qdQJI9Of1G5xNji7s3_R5t7LAPJ9nLauielWHt97Q4I,7845 +pandas/tests/io/parser/test_textreader.py,sha256=zE6YACsvUSiHx4SV4oQL8o3L9UeHG5dYugeV4ekSD7Y,10651 +pandas/tests/io/parser/test_unsupported.py,sha256=T4RVy0WMe11GH4tOH372BxNK-IsLRz4ZHHEp-CmsqL0,7440 +pandas/tests/io/parser/test_upcast.py,sha256=XEjHUvgExlKwxTCSjSfWMxjwge0HeW9q2BMIQGuxfTk,3141 +pandas/tests/io/parser/usecols/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/parser/usecols/test_parse_dates.py,sha256=SAPzpRVn59UBe513Qa1zNGPjP5L2b1dKiiZ0mQCpKsk,3817 +pandas/tests/io/parser/usecols/test_strings.py,sha256=BmS16i1PDoJ_GyrlAI_W_36HQNIIyeUzdbeFg9T9G4c,2476 +pandas/tests/io/parser/usecols/test_usecols_basic.py,sha256=pcq1DY4Ih0BjiyeQlUTpHIbVncPeHPX4Ser49mgCcgU,13399 +pandas/tests/io/pytables/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/pytables/common.py,sha256=m3IH26TCzLDpS8ctvzJKLA8x414ur5jlX3sdT4sB4m8,1264 +pandas/tests/io/pytables/conftest.py,sha256=vQgspEHypJUvbAU3P0I5BDBW2vRK4CgmcNqY5ZXksns,136 +pandas/tests/io/pytables/test_append.py,sha256=7grVyolhOZ6eqsm4CMRwSlLU1Zl_TPkcw69vgCZXNXM,34007 +pandas/tests/io/pytables/test_categorical.py,sha256=3LE76h20pVoXpl0dWV3jdhvZqpXyAnyrMJaBBNmRwfc,6978 +pandas/tests/io/pytables/test_compat.py,sha256=qsaDgIDMQOOMA_ZYv7r9r9sBUUbA9Fe2jb2j8XAeY_s,2547 +pandas/tests/io/pytables/test_complex.py,sha256=rp0vo-h5XzG3rDxVYI2yNGcDs4tkGBG9sAdhizanXvg,5904 +pandas/tests/io/pytables/test_errors.py,sha256=dBbFUB2KIbuzZUN68IYv0VRKpBFtk_HdbeckFC-wqUo,7637 +pandas/tests/io/pytables/test_file_handling.py,sha256=hiekYWZcyJw8GSmxNReiNs9xVKkpJN7IWdNfsA9Q1yI,12701 +pandas/tests/io/pytables/test_keys.py,sha256=qagoEPILUP_9xHGeKMKAol3fLDn4l29-c5axIVjROi4,2297 +pandas/tests/io/pytables/test_put.py,sha256=4zgw4cE_2jklcZIJ8NBpxs4ocErYwvAob-uhdfuSWlk,11531 +pandas/tests/io/pytables/test_pytables_missing.py,sha256=mS4LkjqTPsAovK9V_aKLLMPlEi055_sp-5zykczITRA,341 +pandas/tests/io/pytables/test_read.py,sha256=tFlVgYMqSU3O4X0Dta9z_1fwV4g7OsBRzUxaf4_yJgM,13021 +pandas/tests/io/pytables/test_retain_attributes.py,sha256=XoUey8OFNJd6N52fgY1bekrPntdG-fnU1UcZsewh6mo,3075 +pandas/tests/io/pytables/test_round_trip.py,sha256=fT5Ep-ZVY3jVwKlYJ3geGH2YrFZ6MbfKRhSJH3cBBQc,17267 +pandas/tests/io/pytables/test_select.py,sha256=v9qKuCMfpPSFS9PNHmlfxFEjDOXuTXPGNikAsAMbKQw,34143 +pandas/tests/io/pytables/test_store.py,sha256=jNfe3uE2OAezthUtbM8Ilc0wIm9SCpULwJD-d5KpP24,31451 +pandas/tests/io/pytables/test_subclass.py,sha256=i4iHg-sAjOVb-VfVuY33hVMU3JRWY0xGFv2v1CtWpkI,1361 +pandas/tests/io/pytables/test_time_series.py,sha256=44PRKqisN-LwsQYHW7258AYlbHomFbeDRVM6TUo_Il8,2243 +pandas/tests/io/pytables/test_timezones.py,sha256=sxle2rq9bgBi74xjEbo6gL712Kz8f3XYS52tDJitBfI,11645 +pandas/tests/io/sas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/sas/test_byteswap.py,sha256=fIqzF9LZs3TLm7JI4tEk4JxkynmWqZ5TydCmc12sGQs,1987 +pandas/tests/io/sas/test_sas.py,sha256=M9OeR39l3-DGJSBr84IVmnYMpMs_3xVfCgSSR8u7m-k,1057 +pandas/tests/io/sas/test_sas7bdat.py,sha256=73fWCcg0orYq1YoGw_tePqpksl7J-Ps49xz1d5yIOEM,14274 +pandas/tests/io/sas/test_xport.py,sha256=-gNRR9_2QZS2dQ7Zu756Omg5Bpaz-2I5nCovqEqJVwU,5728 +pandas/tests/io/test_clipboard.py,sha256=vvtY2mhEL4phvvyBesC_9zR67mMs7rB89Y4mMbO_guk,14724 +pandas/tests/io/test_common.py,sha256=fT-7j9loMTQ6fu3Ex7frkE3phzfM2qXUW_7vJx4ArDc,22455 +pandas/tests/io/test_compression.py,sha256=XrO5RvTw4-PNsfQAW6gnnR7tBMqgOfrhjs6tqOB9QQw,11811 +pandas/tests/io/test_feather.py,sha256=HUoUJCYbWvj_7-jGdB28Lyna0246nSXjyp7E6NxLRBY,8345 +pandas/tests/io/test_fsspec.py,sha256=dZDKPN8oWZiW0IhoB1dP825msEo4rdFLbsR3oJX7aYw,9650 +pandas/tests/io/test_gcs.py,sha256=UgqoYxNbmNBWmJVNH8I6dQNj58KwUqDHdBAxk0tgY_Q,6787 +pandas/tests/io/test_html.py,sha256=PJBHd6A2vv4_l08e6QMViEnwb4Hhjm9j_n00AdHp2-o,54543 +pandas/tests/io/test_orc.py,sha256=6iJvhbgg8RhZ1UrVo0ETwaRoL0wdaJQaLZ9PMi8ZWzE,13551 +pandas/tests/io/test_parquet.py,sha256=9dRdsOsyEMPqQ_x27Rt0kX09F2np2KGHFERs_WFybdA,50432 +pandas/tests/io/test_pickle.py,sha256=b7r2pn1x1CSM0JqeCIHbtCJxPUzmLCmO5TrbLpuEfPo,18149 +pandas/tests/io/test_s3.py,sha256=vjUi3XP6hzANckna0wuEvaJ3InmMVCiovmHGn0ikX4U,1451 +pandas/tests/io/test_spss.py,sha256=Y4eTQ7GkWUKP1a4GGaChQ1ljIKgfAnvdSxOH827uNKs,4142 +pandas/tests/io/test_sql.py,sha256=24B_v5Q927-zydD-cmWBe8hbSiY906aRsNu8dQVyJBE,126406 +pandas/tests/io/test_stata.py,sha256=PChggJp4qRTXjGOxCnvsIg6w8kLKEV8fRZdS2g6nwz4,90083 +pandas/tests/io/test_user_agent.py,sha256=LGsT19K4f8nNxxoE4by4gh02QXfHr4c4F1CElbsS69g,12398 +pandas/tests/io/xml/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/io/xml/conftest.py,sha256=-rCyda1S_p28p836G7ih3rfkp0pOYfP8bMaf6h4y_aA,833 +pandas/tests/io/xml/test_to_xml.py,sha256=IxG7rT8KV0BghiUMvVMyd5GkbDR9xqWSmSDqT3CUAKM,35612 +pandas/tests/io/xml/test_xml.py,sha256=m0VyejIFj866i9zc77Sh0cArtalfvyP20fsS_ehXebI,60652 +pandas/tests/io/xml/test_xml_dtypes.py,sha256=lrmkC2eufsl0onMH7LpGoICCJJYZIhu4Qs54qG4KBPM,13199 +pandas/tests/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/libs/test_hashtable.py,sha256=tb7QLENzCsD8RubZaT_94Pm15nxm6RFBFCvHB1cWD7E,25645 +pandas/tests/libs/test_join.py,sha256=z5JeLRMmF_vu4wwOpi3cG6k-p6lkhjAKPad6ShMqS30,10811 +pandas/tests/libs/test_lib.py,sha256=iiYT79WGEiF-nHJuz7k-AoKwxd9x0BjcGry4j5SCFrc,10592 +pandas/tests/plotting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/plotting/common.py,sha256=rfTz4Uv56ln4zPBL-w-bgk2itb6jQM6uV-svjeMxZVA,16905 +pandas/tests/plotting/conftest.py,sha256=WGxjahxQkw-Gk4DlnLW0rDsei0dmuoCuZusNMepwty0,1531 +pandas/tests/plotting/frame/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/plotting/frame/test_frame.py,sha256=FFiceWOKojl2XHAwby12IzcbmDkTUqD7XTVQCcUz4cc,94387 +pandas/tests/plotting/frame/test_frame_color.py,sha256=gBkX_6DMH-joE-4GjwZpIYgWHJkrWPPDJ8R9gKuHqH8,28488 +pandas/tests/plotting/frame/test_frame_groupby.py,sha256=JNd4J9E4BEtcU5ed47_SZK5p77P6vthENn_shRPbAJQ,2547 +pandas/tests/plotting/frame/test_frame_legend.py,sha256=mwMC0RmOKSzipTT7yYs5j0N9iM3y8GGKSKGmzqkCEwE,10440 +pandas/tests/plotting/frame/test_frame_subplots.py,sha256=i0SPuVTEgX8JY7DMKS_NWP_eC0aeuaH0PaGOmgmq_qU,28983 +pandas/tests/plotting/frame/test_hist_box_by.py,sha256=8jqVQfLrE5AKvn7iKMX7L5Gbe7e4rv6Ic8MnNp7NALI,10969 +pandas/tests/plotting/test_backend.py,sha256=c_xYZTnIMEta8plZjZkAzvAU8ijIZEjN_nsCz_upDzk,3362 +pandas/tests/plotting/test_boxplot_method.py,sha256=E8YVWugnCbgfz69dExxUir7MP6yoPpdyeg8fwUQPLMw,28617 +pandas/tests/plotting/test_common.py,sha256=if9WnxryRdUhub-3yjdTEKO2PME-Yhf5YIG8e2nvAXU,1869 +pandas/tests/plotting/test_converter.py,sha256=0653UunEuOlI9Esji3t_EwWbe2ju63kXdRj_5IRyovc,13181 +pandas/tests/plotting/test_datetimelike.py,sha256=dNRcQCw4hUH4_5lXI2iy_H6phHEGR6Qeq6OLtBpKVY4,62139 +pandas/tests/plotting/test_groupby.py,sha256=mcM2bOmfvJteLz9H0qMawxN3Yef-Nj2zCa_MUUBWF_c,5735 +pandas/tests/plotting/test_hist_method.py,sha256=VCrzCehr-H2NoRotV0tTZboPlDkOi_xgSMZ0Y0MyzeE,34849 +pandas/tests/plotting/test_misc.py,sha256=uYP_TY26CnT2lxYwBy-VhJcdWp0G15J3zlGjD_KPcsw,23877 +pandas/tests/plotting/test_series.py,sha256=EClu8ZmgoSpoQZlB66m7ntCrCVZXxC0q1dYZUbck50k,35141 +pandas/tests/plotting/test_style.py,sha256=3YMcq45IgmIomuihBowBT-lyJfpJR_Q8fbMOEQXUkao,5172 +pandas/tests/reductions/__init__.py,sha256=vflo8yMcocx2X1Rdw9vt8NpiZ4ZFq9xZRC3PW6Gp-Cs,125 +pandas/tests/reductions/test_reductions.py,sha256=JY2Qaxx3FFoiaeKNU8y6Fwg_PPMIGxMJzf97HcNslKs,56640 +pandas/tests/reductions/test_stat_reductions.py,sha256=JZGwo0R0-RvWMalcWAV5BV5E2zj04HibliHJ6GH7oao,9431 +pandas/tests/resample/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/resample/conftest.py,sha256=hul1iiA5oG_dticu9z-Zmq_ZN1Q8nM759kGhcH7W9to,4536 +pandas/tests/resample/test_base.py,sha256=7jmhC_InOMP_-jyjnDPFiQAyk2OIHz6tkJ112GD8FTQ,11153 +pandas/tests/resample/test_datetime_index.py,sha256=9ZvV1z7caFnBOSUhg1CIHAEPSzbLPPnJ35TVgwTMm9g,66477 +pandas/tests/resample/test_period_index.py,sha256=-0YF_kpzA8cAVZ8U-5weaVjTdH-4nbZugr6JAj-WBww,34900 +pandas/tests/resample/test_resample_api.py,sha256=gyZE1VpL-qlTh8PzVPGQ1CJUIOQv7z68ah5uLkg4ea8,34894 +pandas/tests/resample/test_resampler_grouper.py,sha256=60_tCpCZiTs6kVuuPbeOPZ8gFbLqyQcJWW0PybgRYeA,21133 +pandas/tests/resample/test_time_grouper.py,sha256=1wFoHjuEEU3vlTIzN-5nYOlgw3hEc2skd-_n_aeu4ko,11804 +pandas/tests/resample/test_timedelta.py,sha256=dk4h_dFcjdhEA90Ro0U7k5g043EJ_SrcfiOx_bwCVso,7005 +pandas/tests/reshape/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/reshape/concat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/reshape/concat/conftest.py,sha256=s94n_rOGHsQKdP2KbCAQEfZeQpesYmhH_d-RNNTkvYc,162 +pandas/tests/reshape/concat/test_append.py,sha256=mCBndbLvwmM8qTbwH7HoyZjFGLQWOsOMGjn1I1Mz8PA,14299 +pandas/tests/reshape/concat/test_append_common.py,sha256=ZnzgE9cMcAAUD4BmgiIAvRomlZxn4FZcIkkbct7kEDw,27762 +pandas/tests/reshape/concat/test_categorical.py,sha256=UUR6EtT0ezcAiemOOCb6A0yaD01KNLo49jy8-03QVP8,9434 +pandas/tests/reshape/concat/test_concat.py,sha256=pddCeW3EAh1MLLKgHBZe8X7rZnDX2oEIgClXD2gvzS0,30728 +pandas/tests/reshape/concat/test_dataframe.py,sha256=iVvSVbCoZwI_dAoKG0KJlx1RjgB-JeQBq65gSfawsy4,8869 +pandas/tests/reshape/concat/test_datetimes.py,sha256=toQ1OwOCIGdww7P4J9Ba59UhF-gk-NQJHg_WyjZ4JZk,20755 +pandas/tests/reshape/concat/test_empty.py,sha256=7VVtsJNjaCnSJrpmlZ6VoLCsJeyYaOAOrnlvaXIVZKg,10082 +pandas/tests/reshape/concat/test_index.py,sha256=BReX5IGZn0IzikEOJTbs4AzKkFSjwYmqJoV5uEUboq0,17301 +pandas/tests/reshape/concat/test_invalid.py,sha256=t2UTFfFPnQP3OmRnssjvz_qJPZ7N3mRg-JZ8ux1MoXE,1630 +pandas/tests/reshape/concat/test_series.py,sha256=ua7QXw1BPZYK4RXxnfmiGUa87Wf4MPWMCQf19T_NU80,5852 +pandas/tests/reshape/concat/test_sort.py,sha256=RuXIJduLa56IJDmUQaCwyYOz_U0KXMDWf04WEzi8y7E,4350 +pandas/tests/reshape/merge/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/reshape/merge/test_join.py,sha256=LCz3gaeP2fZ-2Oz9_qgeEPAGcnI9GD02xOBsImG-hj4,35835 +pandas/tests/reshape/merge/test_merge.py,sha256=NSq_jJ4QyRuXCqpnrdN8oCAO-axBPMCrE10vruqF7jk,101038 +pandas/tests/reshape/merge/test_merge_asof.py,sha256=58jDtFKHpHEdGGoway_oFPnZrZr5B82sz6BspLwMZ4w,57528 +pandas/tests/reshape/merge/test_merge_cross.py,sha256=9BVH6HWJRh-dHKDTBy8Q2it97gjVW79FgPC99HNLIc4,3146 +pandas/tests/reshape/merge/test_merge_index_as_string.py,sha256=w_9BccpqfB7yPhy_TBlMGx2BPOBwPhfg-pYRKA4HEC8,5357 +pandas/tests/reshape/merge/test_merge_ordered.py,sha256=9z2vohDOB6je8FZBg6XV0afKs0fnUigv5SqbbQWTxbc,6560 +pandas/tests/reshape/merge/test_multi.py,sha256=oy74dUIi5UbavUSshoZ3Ow9J2NS_RlX3mH1k1IDtitA,30256 +pandas/tests/reshape/test_crosstab.py,sha256=rgFaYR9j837aGqzTJ3v4DeBqqNp25G_c3EVpDcF-gBM,32776 +pandas/tests/reshape/test_cut.py,sha256=iRpRilGueXd_J2D9wQUadX62CFVoYz0Gji7LPjGPWmY,23075 +pandas/tests/reshape/test_from_dummies.py,sha256=92sBfZd-jj5BSihyoRXxb0pTXUUb2bodjjHYFjKXxIc,13151 +pandas/tests/reshape/test_get_dummies.py,sha256=iM34i7jxgOihVer1HulzNWDzSPrdxn8wkXhp9ThR5pM,25908 +pandas/tests/reshape/test_melt.py,sha256=OzPURAQ2_HFqUOwgVlnmjLmZiMSZiskiPsc4yDvh8TQ,38798 +pandas/tests/reshape/test_pivot.py,sha256=7bX-sySWormHZFb5uqsgeOzYPvOZPGKtDuNhgLGZWo0,91112 +pandas/tests/reshape/test_pivot_multilevel.py,sha256=DYp3BZ0h80UEgqFs0sNVqnUWBWgYU4622wp62SdCDdI,7549 +pandas/tests/reshape/test_qcut.py,sha256=fJOkR8BkSjXliFX9iH7DX1dtSN5k23usg9zulwrPpdc,8278 +pandas/tests/reshape/test_union_categoricals.py,sha256=pxmeVsuAQ1Wm6HgVb8J12HBtUEw3UFnE9bqKLaxAL9g,15004 +pandas/tests/reshape/test_util.py,sha256=mk60VTWL9YPWNPAmVBHwkOAOtrHIDU6L3EAnlasx6IQ,2897 +pandas/tests/scalar/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/interval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/interval/test_arithmetic.py,sha256=Hu-HBZGYo6m9JrAl6ccoVaPaV_ZSZMKYX5Qywki8BVU,1837 +pandas/tests/scalar/interval/test_interval.py,sha256=uKa8JMBrKHtIQWCl472wfofDkFYjeRpzvEXfHWI56Fk,8711 +pandas/tests/scalar/interval/test_ops.py,sha256=wtRHnuxgZzFvsXz4XfGqiBdZCIQRCNm6Lxdwm0xHaic,4170 +pandas/tests/scalar/period/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/period/test_asfreq.py,sha256=IcjoObHYocZWJ6GZchfz-nlBl4kspb1GEu2eurT3f2k,38070 +pandas/tests/scalar/period/test_period.py,sha256=udJedhe0YwPtnnOgiPH6s77tjUU1xwqM_pyIZJVFAgI,55594 +pandas/tests/scalar/test_na_scalar.py,sha256=0t4r9nDTQtXUSeXRBxDfgWegznLM6TvMk2pK0gLScJc,7227 +pandas/tests/scalar/test_nat.py,sha256=01vztEnGlTeG33znF9i-5yaSo79u3aSAeWqRNxJ71Us,19591 +pandas/tests/scalar/timedelta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/timedelta/test_arithmetic.py,sha256=6cTlhYeykDcZdsmu7AIbbybEnosul7e3xXYcuqsuJaI,38016 +pandas/tests/scalar/timedelta/test_constructors.py,sha256=1zWhcU5nzC2wXTKiQu8ssgfb_S3leOP3MZLn_N6fiII,17267 +pandas/tests/scalar/timedelta/test_formats.py,sha256=afiVjnkmjtnprcbtxg0v70VqMVnolTWyFJBXMlWaIY8,1261 +pandas/tests/scalar/timedelta/test_timedelta.py,sha256=ma1eFeOvVIFdKvBD6xhOMAch5DYEYMOfCJGqnKF-8oc,35286 +pandas/tests/scalar/timestamp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/scalar/timestamp/test_arithmetic.py,sha256=cj4SodETJFyatlaZiL9ZpQMpknExU57YmTNQgAzZGRk,9965 +pandas/tests/scalar/timestamp/test_comparisons.py,sha256=zxzSqDtYxP7Fc4vXcIqxYq0Yg7KeKEdAn3iwbgAv-ns,10059 +pandas/tests/scalar/timestamp/test_constructors.py,sha256=UFaiHqnj84Q1wrY4f-SYKPueFePIggCrsWoNnCGj3s0,32760 +pandas/tests/scalar/timestamp/test_formats.py,sha256=LIlcteUcqqIEmbLniLNRn16m3IR3pd03_hX4aZY1fJc,2162 +pandas/tests/scalar/timestamp/test_rendering.py,sha256=1VsIozZ9CXIt7FXlI4PJDFD6jpPtD5fsfScZkZufqm4,3173 +pandas/tests/scalar/timestamp/test_timestamp.py,sha256=eLMIh0p_Zi-UmvSdRBV0AZqjCgqQ7Ly0HH7aOxkvahc,40135 +pandas/tests/scalar/timestamp/test_timezones.py,sha256=BDl7v2Ql_iDbmFX1O1BXAudwL6DB32beH1ImucwnRV8,17717 +pandas/tests/scalar/timestamp/test_unary_ops.py,sha256=tPi_G0-zFIjs3GOEuGbwy589MVCtJiN38ql-Rri3jg0,21586 +pandas/tests/series/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/series/accessors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/series/accessors/test_cat_accessor.py,sha256=1-ZRI4h_lsBclkXljCrYFwGIYXbhrpE1iET-MjNKngk,9611 +pandas/tests/series/accessors/test_dt_accessor.py,sha256=rC7YAkMPUfUgDJ_qHmuHsAgwF30UJgjt8wYBLO25f94,29450 +pandas/tests/series/accessors/test_sparse_accessor.py,sha256=yPxK1Re7RDPLi5v2r9etrgsUfSL9NN45CAvuR3tYVwA,296 +pandas/tests/series/accessors/test_str_accessor.py,sha256=M29X62c2ekvH1FTv56yye2TLcXyYUCM5AegAQVWLFc8,853 +pandas/tests/series/indexing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/series/indexing/test_datetime.py,sha256=20Hy6GUFTpealD4dXSEE-epGzknA6gRgBdnQUg16buM,14578 +pandas/tests/series/indexing/test_delitem.py,sha256=bQwJNiGqH3GQQUkq7linphR9PL2oXOQSeAitqupiRRQ,1979 +pandas/tests/series/indexing/test_get.py,sha256=g81xoprF31o9hWUo-xtiMmJkP4Qp5o1a9xV7CJrgeFY,5670 +pandas/tests/series/indexing/test_getitem.py,sha256=4HIugezhQbKuIwESUnIMAI8Yi4sYCb7q0lX13KGKdcQ,24366 +pandas/tests/series/indexing/test_indexing.py,sha256=7q1ZEpMAFKrTwL1NL52Wi8h6uhJEsX1kZfH7BzQuGco,16323 +pandas/tests/series/indexing/test_mask.py,sha256=ecPdJ-CM8HbaaZoGUfwcoOuo0eIz7aEq-x8wL0PZWbE,1711 +pandas/tests/series/indexing/test_set_value.py,sha256=UwVNpW3Fh3PKhNiFzZiVK07W871CmFM2fGtC6CTW5z0,991 +pandas/tests/series/indexing/test_setitem.py,sha256=QkN_ktLGfDzmOvjCPlxZMwj0SaM_QBPCS9D9vUDuwr0,58617 +pandas/tests/series/indexing/test_take.py,sha256=574cgL0w0fj-YnZma9b188Y0mTWs-Go6ZzB9zQSdpAk,1353 +pandas/tests/series/indexing/test_where.py,sha256=JmB0oVK68IQXuUpeM2bqJQ_OziyKc-m3bKXwDJ8ZStk,12959 +pandas/tests/series/indexing/test_xs.py,sha256=8EKGIgnK86_hsBjPIY5lednYnzatv14O6rq3LjR_KxI,2760 +pandas/tests/series/methods/__init__.py,sha256=zVXqGxDIQ-ebxxcetI9KcJ9ZEHeIC4086CoDvyc8CNM,225 +pandas/tests/series/methods/test_add_prefix_suffix.py,sha256=PeUIeDHa9rGggraEbVJRtLi2GcnNcXkrXb0otlthOC4,1556 +pandas/tests/series/methods/test_align.py,sha256=lHlTUaTyBhgSh0Z9-hg0k1SShB4tpKhaJWtstR4YHRw,7700 +pandas/tests/series/methods/test_argsort.py,sha256=B-tk3s7wtMqQLAvfqgzW-Vb0639VcPfaK1igCZGUlq8,2761 +pandas/tests/series/methods/test_asof.py,sha256=C_Odg0CV7GMDYIcSvvv9D6w0VPjYMjWPPJo_QN2Pq0Q,6324 +pandas/tests/series/methods/test_astype.py,sha256=qRQdSRyHNC4Ln1t2j6d7zotvrcFCGuSYMcV6FS0Bdy4,23708 +pandas/tests/series/methods/test_autocorr.py,sha256=SnxELB9bcE8H68tYUDN3UKMMPu-sEfbwTlLUn8WirV8,1015 +pandas/tests/series/methods/test_between.py,sha256=QsQgMbGS2Bq6vMOClUpPIGiIt9fRBFnIL8I--S545G4,2585 +pandas/tests/series/methods/test_clip.py,sha256=3viA5lsy_5RrL2z0h4zRWXlOWqk63miqcIXh8_hkPSE,4777 +pandas/tests/series/methods/test_combine.py,sha256=ye8pwpjolpG_kUKSFTC8ZoRdj3ze8qtJXvDUZ5gpap4,627 +pandas/tests/series/methods/test_combine_first.py,sha256=j2PqB-Xe4Yzm4jFbfQn0bFsUwUrOpockvX-BlCN7Iwc,5310 +pandas/tests/series/methods/test_compare.py,sha256=uRA4CKyOTPSzW3sihILLvxpxdSD1hb7mHrSydGFV2J4,4658 +pandas/tests/series/methods/test_convert_dtypes.py,sha256=7aRS8HAMuaKJZtYLvjhTu71emFpmWYb56_UL1d_fLXo,8302 +pandas/tests/series/methods/test_copy.py,sha256=E3YqECoD3R31d0m4P3jfODizCKbug7y7H2GNiiu8qnk,2984 +pandas/tests/series/methods/test_count.py,sha256=mju3vjyHXg8qRH85cRLWvRL8lFnF7HGdETjt2e_pK7M,938 +pandas/tests/series/methods/test_cov_corr.py,sha256=28Btj4dOOcjhdFcZsCrKNJMpNayY060B5inq6CAZd0k,5464 +pandas/tests/series/methods/test_describe.py,sha256=brDSZ2qicnLANI2ReYiYQiXzu6m9VxFr4DVULEyGgSA,6646 +pandas/tests/series/methods/test_diff.py,sha256=ctmz7_gFctiDK-C7YqqeeRnF3FPOcFIcG7ln7E4P-N4,2425 +pandas/tests/series/methods/test_drop.py,sha256=nqTXYfvY76BZ2cl46kUb8mkkll5StdCzBaTn_YkGfIk,3394 +pandas/tests/series/methods/test_drop_duplicates.py,sha256=P6jHz77EAtuiI2IE25pNjBx3pXteUc0JUMoj2mWo8T4,9235 +pandas/tests/series/methods/test_dropna.py,sha256=D15V4c9k3xiqA1QzZEk83yXOsnR3bMQ11UKh4coL1eQ,3414 +pandas/tests/series/methods/test_dtypes.py,sha256=IkYkFl0o2LQ5qurobwoPgp4jqi2uKU7phoAk3oZtiYo,209 +pandas/tests/series/methods/test_duplicated.py,sha256=ACzVs9IJY4lC2SQb6frHVe4dGd6YLFID5UAw4BuZa7c,2059 +pandas/tests/series/methods/test_equals.py,sha256=eUB1_euVjZmlia8F-JWA1wdP6DOhnDAiON-A4OaW2TQ,4020 +pandas/tests/series/methods/test_explode.py,sha256=IEULfWfndz_gCHhblAU8q0_DgFUgau-Seut62PZuBn4,4704 +pandas/tests/series/methods/test_fillna.py,sha256=eR0_fmqmFgeQYaRnkLQzOTPa-ksRKtQQEVZHnx-MN5A,34888 +pandas/tests/series/methods/test_get_numeric_data.py,sha256=XvdjfI_hKghaIHcFTtqOnQWelRCKEyc2sCUECNutUss,1084 +pandas/tests/series/methods/test_head_tail.py,sha256=1EWojjTzcLvYH34VvyvEHxczDy7zL3dMTyayFHsVSzY,343 +pandas/tests/series/methods/test_infer_objects.py,sha256=qjI71XDxabpvuyg4_4qWo0X6mXwAdqq5yl6huGWwPk8,1903 +pandas/tests/series/methods/test_interpolate.py,sha256=yFeZ6Qd8FapcaVBVChT_LTNOEt0QKJg19Ov5EfLJHdk,34262 +pandas/tests/series/methods/test_is_monotonic.py,sha256=vvyWZFxiSybq88peF0zN5dM16rH2SgCEEA-gT2rRSSY,838 +pandas/tests/series/methods/test_is_unique.py,sha256=d3aLS5q491IVZkfKx8HTc4jkgTtuN0SOaUVfkyBTImE,953 +pandas/tests/series/methods/test_isin.py,sha256=juZ6Q0xjrx0Z46zeR0Co0gJbFE4vWPY7mv9lUj3o8HM,8156 +pandas/tests/series/methods/test_isna.py,sha256=TzNID2_dMG6ChWSwOMIqlF9AWcc1UjtjCHLNmT0vlBE,940 +pandas/tests/series/methods/test_item.py,sha256=z9gMBXHmc-Xhpyad9O0fT2RySMhlTa6MSrz2jPSUHxc,1627 +pandas/tests/series/methods/test_map.py,sha256=zcSA1NKiqvIkVVbXN6CVHb5IT9CeBoFW5Omertfy_Mo,17654 +pandas/tests/series/methods/test_matmul.py,sha256=cIj2nJctMnOvEDgTefpB3jypWJ6-RHasqtxywrxXw0g,2767 +pandas/tests/series/methods/test_nlargest.py,sha256=oIkyZ6Z2NiUL09sSTvAFK7IlcfQDiVgwssFe6NtsyIE,8442 +pandas/tests/series/methods/test_nunique.py,sha256=6B7fs9niuN2QYyxjVNX33WLBJvF2SJZRCn6SInTIz0g,481 +pandas/tests/series/methods/test_pct_change.py,sha256=6U_yMCrOYlyNafAdA5m7ue9p2np-pL8NUJrUlFVkpAU,4329 +pandas/tests/series/methods/test_pop.py,sha256=xr9ZuFCI7O2gTW8a3WBr-ooQcOhBzoUK4N1x0K5G380,295 +pandas/tests/series/methods/test_quantile.py,sha256=0EeomT8JtE0LY_2XoxiYHkJNpBjL5okHgI3qijQuuws,8035 +pandas/tests/series/methods/test_rank.py,sha256=PokA09Wyiil9JGQ5CBNqEtRP_uvZlwTWPd-8TsGsrfw,18104 +pandas/tests/series/methods/test_reindex.py,sha256=aX9tIr1M6AIYCGaHOwu_BUOAtT3wA4Xzvz0iopSWM0o,14336 +pandas/tests/series/methods/test_reindex_like.py,sha256=e_nuGo4QLgsdpnZrC49xDVfcz_prTGAOXGyjEEbkKM4,1245 +pandas/tests/series/methods/test_rename.py,sha256=NCobZF4vLYPSozGQUvviQrX7uBSNsB8lgdsqZsr4hv0,5855 +pandas/tests/series/methods/test_rename_axis.py,sha256=TqGeZdhB3Ektvj48JfbX2Jr_qsCovtoWimpfX_ViJyg,1520 +pandas/tests/series/methods/test_repeat.py,sha256=WvER_QkoVNYU4bg5hQbLdCXIWxqVnSmJ6K3_3OLLLAI,1274 +pandas/tests/series/methods/test_replace.py,sha256=rEvnns4vMV56mEYm2Hsf4KfmBdO5ZcHLDWB_YDKGQa0,29540 +pandas/tests/series/methods/test_reset_index.py,sha256=BeL_XZ2rbKYwtQzLHXcGgTrAoOLux9LqH7YMYUM-Iec,7203 +pandas/tests/series/methods/test_round.py,sha256=DgFQ4IJTE9XSunMKKLi5CxvrAHjjb5Az_nT-O_vQFa8,2273 +pandas/tests/series/methods/test_searchsorted.py,sha256=2nk-hXPbFjgZfKm4bO_TiKm2xjd4hj0L9hiqR4nZ2Ss,2493 +pandas/tests/series/methods/test_set_name.py,sha256=rt1BK8BnWMd8D8vrO7yQNN4o-Fnapq5bRmlHyrYpxk4,595 +pandas/tests/series/methods/test_size.py,sha256=3-LfpWtTLM_dPAHFG_mmCxAk3dJY9WIe13czw1d9Fn4,566 +pandas/tests/series/methods/test_sort_index.py,sha256=NYWSNTCfwlFiM0G-YQGjBtt8ff3IwnRw6k2H60BfSGI,12040 +pandas/tests/series/methods/test_sort_values.py,sha256=jIvHYYMz-RySUtJnB9aFLR88s-M20-B5E5PwK9VQhns,9372 +pandas/tests/series/methods/test_to_csv.py,sha256=Zfs6_R7XM7aQhjuJ1Q1zAx48ptRIXxVAQWQJSQfNJ8s,6332 +pandas/tests/series/methods/test_to_dict.py,sha256=dIzABUIwzHmhh7po9mYnx3dYF6qvmft7phy1aABCydo,1168 +pandas/tests/series/methods/test_to_frame.py,sha256=nUkHQTpMTffkpDR7w3EcQvQAevEfflD6tHm3pTBxpTI,1992 +pandas/tests/series/methods/test_to_numpy.py,sha256=YNCq5rU8aGD9o-hf2xC1wuHb2Akn1EEoMP9A_dSE_wY,623 +pandas/tests/series/methods/test_tolist.py,sha256=5F0VAYJTPDUTlqb5zDNEec-BeBY25ZjnjqYHFQq5GPU,1115 +pandas/tests/series/methods/test_truncate.py,sha256=suMKI1jMEVVSd_b5rlLM2iqsQ08c8a9CbN8mbNKdNEU,2307 +pandas/tests/series/methods/test_tz_localize.py,sha256=H9HAKzuEYpqFHbEmDKtxIMwGE6prZibgOqT4MMC1LyM,4305 +pandas/tests/series/methods/test_unique.py,sha256=MQB5s4KVopor1V1CgvF6lZNUSX6ZcOS2_H5JRYf7emU,2219 +pandas/tests/series/methods/test_unstack.py,sha256=go9V8rzyVtaO-ftyPGXAvFHBWuh9bBMl56-JluUl5BU,4939 +pandas/tests/series/methods/test_update.py,sha256=SXU6PT7FB8RMbqNuKTccaSacwAXZJ9THX9Y7Z0A0uRs,5194 +pandas/tests/series/methods/test_value_counts.py,sha256=acS6QcT5NsSlRmfCKtYMC36ubxOpB7uYJlzpjiTZX7Y,9377 +pandas/tests/series/methods/test_values.py,sha256=Q2jACWauws0GxIc_QzxbAOgMrJR6Qs7oyx_6LK7zVt8,747 +pandas/tests/series/methods/test_view.py,sha256=C8dwXCYdRVgs4ZR3UExOgB1TFCO46KULFffn3VEbzk0,1699 +pandas/tests/series/test_api.py,sha256=fnHWS1YkBlhfMwePz0I_qIzlCviWT3UdVdbHvHTqiS0,10007 +pandas/tests/series/test_arithmetic.py,sha256=NujmfNzJRM0WgnvKr2ArX0MfWQeD3Ylj2C9xUCJADKM,32724 +pandas/tests/series/test_constructors.py,sha256=xLU2jVXSJ1TptriXWAwGsGu8_L3D5FBJSezK1Co0DJM,82537 +pandas/tests/series/test_cumulative.py,sha256=lYFRlmwTQBWBP-svJnt6e55b_wnCdDVZVhuvP0ezcR8,5034 +pandas/tests/series/test_iteration.py,sha256=LKCUh0-OueVvxOr7uEG8U9cQxrAk7X-WDwfgEIKUekI,1408 +pandas/tests/series/test_logical_ops.py,sha256=pYyvfk7rVOUn1v78ubGMqeQsL3mSc2LVH1CAS4ozbYE,18852 +pandas/tests/series/test_missing.py,sha256=6TtIBFZgw-vrOYqRzSxhYCIBngoVX8r8-sT5jFgkWKM,3277 +pandas/tests/series/test_npfuncs.py,sha256=OvtX42j2-yLjjnQpI-BPOPmEozGPgEmsZTA1vhMjWyQ,776 +pandas/tests/series/test_reductions.py,sha256=bM79xKJHiFNCUJNOymIanE3OcR9A6EBdM6HLJZLDV5U,5453 +pandas/tests/series/test_repr.py,sha256=_wwvMf3LqywlsgpcMbGREpKXI30yt2XUriq6muSkc4w,16192 +pandas/tests/series/test_subclass.py,sha256=aL5tgGGXZPPIXWIgpCPBrc7Q5KS8h1ipZNKCwciw-jY,2667 +pandas/tests/series/test_ufunc.py,sha256=rgT_TJ20yA2n9RlVSrJsdbdPlt8y67uq8yza-jb8iSg,14722 +pandas/tests/series/test_unary.py,sha256=Sbe_6gjcgMNCfy5dx1QRDxlLvHjNdDdWL3cBrz4x9x0,1622 +pandas/tests/series/test_validate.py,sha256=ziCmKi_jYuGyxcnsVaJpVgwSCjBgpHDJ0dbzWLa1-kA,668 +pandas/tests/strings/__init__.py,sha256=_uWelCEA7j9QwfQkgZomjbpFbuB_FlQO1sdMXak8Zn4,367 +pandas/tests/strings/conftest.py,sha256=srGhNnfZgVQloQ0hEGXFO5wwXJ-a7QJI69sep6VlVK0,5214 +pandas/tests/strings/test_api.py,sha256=rBInf2g-RgZP0dAznfrUF4zEkiz4l5vQEUK_3kmiQjU,4627 +pandas/tests/strings/test_case_justify.py,sha256=8CZvi18KX7yWOfnVpSdfKZlpRM9xs5kD0qeTszJ_Wx8,13282 +pandas/tests/strings/test_cat.py,sha256=DvCgrgIl_Gyn15Vtery19P5xOefv-PfC40DpIMYxp5g,12310 +pandas/tests/strings/test_extract.py,sha256=6ngkNivsXdthQespg4Pj1vECXVQZlKo5hYa59q_m1xI,26330 +pandas/tests/strings/test_find_replace.py,sha256=Nbwn76HXhgC0Bz7gnBxzzwl07Ko0JTqyIbJ4O5opTLA,34521 +pandas/tests/strings/test_get_dummies.py,sha256=LyWHwMrb5pgX69t4b9ouHflXKp4gBXadTCkaZSk_HB4,1608 +pandas/tests/strings/test_split_partition.py,sha256=nv4CfL7JpkN7JMsMz5phhlCaEpiU57POok1iAxH6-mI,23178 +pandas/tests/strings/test_string_array.py,sha256=C4ATNd7u-2NspzvTkNS4MQvh2ACB5iXfpWBi6Cr66yA,3467 +pandas/tests/strings/test_strings.py,sha256=ZT7exj-YhXTVKQ3z2blPcaacefsnamgvPywL-B4cbuo,25356 +pandas/tests/test_aggregation.py,sha256=-9GlIUg7qPr3Ppj_TNbBF85oKjSIMAv056hfcYZvhWw,2779 +pandas/tests/test_algos.py,sha256=XfJD7UIYy7L9_7442njsdUe3iLLoJl2KX7-hrsIItWI,82793 +pandas/tests/test_common.py,sha256=SHkM8XyjSNxUJquSiEDa3lqE0GJ7tLsfwdro0x2leAg,7695 +pandas/tests/test_downstream.py,sha256=wPHHSWlvXsjO5OYZD1QkTRYELaow6N01e-ofwZdW3F0,10752 +pandas/tests/test_errors.py,sha256=4WVxQSyv6okTRVQC9LC9thX5ZjXVMrX-3l93bEd9KZ8,2789 +pandas/tests/test_expressions.py,sha256=Ps-b6Dl8-VcP-RBnjpJEdruNNzyg9zL3ZWuVxt55pdA,13918 +pandas/tests/test_flags.py,sha256=Dsu6pvQ5A6Manyt1VlQLK8pRpZtr-S2T3ubJvRQaRlA,1550 +pandas/tests/test_multilevel.py,sha256=3-Gmz-7nEzWFDYT5k_nzRL17xLCj2ZF3q69dzHO5sL8,12206 +pandas/tests/test_nanops.py,sha256=_kokHPt4dMrZZWLSw8PxLudRTPCeQK80uKij3RZQ9qc,42132 +pandas/tests/test_optional_dependency.py,sha256=tT5SDWQaIRBCxX8-USqnMA68FVSOdUJfUA7TapBtsK0,2684 +pandas/tests/test_register_accessor.py,sha256=6ShgolLRlqXP6aBuLBO_C2X6iwPIh3LKmYsQlly0yrI,2763 +pandas/tests/test_sorting.py,sha256=dbnO7tBkUz7HYzmOgjr5-Q8Y3SUVqxqI9z2Tl2mjRuA,16871 +pandas/tests/test_take.py,sha256=YSMLvpggEaY_MOT3PkVtQYUw0MfwN4bVvI3EgmOgxfA,11539 +pandas/tests/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tools/test_to_datetime.py,sha256=F_ZzaBlPAiVN7mbWsMbtpw55qZaddxe6G98uDfDyc64,139180 +pandas/tests/tools/test_to_numeric.py,sha256=S2oajUwwVp9iHQ8cIf2brpsfA0bwLpiOW1c_HNT0aa0,28419 +pandas/tests/tools/test_to_time.py,sha256=CHVErvV7H_lY2WvQ2CMQqf4g00CEaDWyrJqr-RPeZF0,2300 +pandas/tests/tools/test_to_timedelta.py,sha256=7yh-Nt3CVxPC-mAS37exQliDioTGu9GjUsUEWtywtDA,11305 +pandas/tests/tseries/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/frequencies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/frequencies/test_freq_code.py,sha256=p6h32RFKW-Mj0-1MDFtTmU66io31nZne83iTewT9W9w,2474 +pandas/tests/tseries/frequencies/test_frequencies.py,sha256=tyI9e6ve7sEXdALy9GYjMV3mAQHmQF2IqW-xFzPdgjY,821 +pandas/tests/tseries/frequencies/test_inference.py,sha256=peUtwqrtNt-swhLWPpgwJMeyHRzLpwB-fII2M1SqZLU,14607 +pandas/tests/tseries/holiday/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/holiday/test_calendar.py,sha256=HBXCzENK_gROEDauPW5xrznHMgLkaob57j8mjVvibSM,3543 +pandas/tests/tseries/holiday/test_federal.py,sha256=ukOOSRoUdcfUOlAT10AWVj8uxiD-88_H8xd--WpOsG0,1948 +pandas/tests/tseries/holiday/test_holiday.py,sha256=bqnoFmOqY7-lkmYNbF6zfyW-4dg-Xh1pXrw6Ly5bgII,10478 +pandas/tests/tseries/holiday/test_observance.py,sha256=GJBqIF4W6QG4k3Yzz6_13WMOR4nHSVzPbixHxO8Tukw,2723 +pandas/tests/tseries/offsets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tseries/offsets/common.py,sha256=D3D8mcwwzW2kSEB8uX8gO6ARX4dB4PEu3_953APlRmk,900 +pandas/tests/tseries/offsets/conftest.py,sha256=0WCK7rSljU53z8oZFv6i5jnUGM9lLFQxtCPp_WAbuds,881 +pandas/tests/tseries/offsets/test_business_day.py,sha256=dqOwIoAq3Mcxrc0EEeqJnnDvJYCFz5lA0JewVuODhBc,6808 +pandas/tests/tseries/offsets/test_business_hour.py,sha256=BP56jBBM4XACku2GktiEL-cX2c_5HYchuzFKt_8AbFQ,59141 +pandas/tests/tseries/offsets/test_business_month.py,sha256=tG8ztJYScgDN3KCkECu21EgGbS86Rv3GNiIVkpJDLA4,6715 +pandas/tests/tseries/offsets/test_business_quarter.py,sha256=_ZTJSIppdvjLqdW1ZFArmkLK1PqeEz5Q7tqf5Tmoj08,12290 +pandas/tests/tseries/offsets/test_business_year.py,sha256=OBs55t5gGKSPhTsnGafi5Uqsrjmq1cKpfuwWLUBR8Uo,6436 +pandas/tests/tseries/offsets/test_common.py,sha256=GMDM6UzN86fMAJiXBLrL2ePMMi5dJLGLNJRzT7Bpmcg,7387 +pandas/tests/tseries/offsets/test_custom_business_day.py,sha256=YNN53-HvTW4JrbLYwyUiM10rQqIof1iA_W1uYkiHw7w,3180 +pandas/tests/tseries/offsets/test_custom_business_hour.py,sha256=a65J0d16JnK-MEzQ0UV42yYqaVsoQwcSTmYF_cG3N0Q,12312 +pandas/tests/tseries/offsets/test_custom_business_month.py,sha256=p7ptYFCOIwgMcqLfSA5vr2QBchHYeLkzNvQlfNwgJ7c,14108 +pandas/tests/tseries/offsets/test_dst.py,sha256=RRK52_UMYCRqEexPfdhG_5hcuECVn2OQUuKKKkzKyq8,7962 +pandas/tests/tseries/offsets/test_easter.py,sha256=oZlJ3lESuLTEv6A_chVDsD3Pa_cqgbVc4_zxrEE7cvc,1150 +pandas/tests/tseries/offsets/test_fiscal.py,sha256=yBAcT8wbPe2P_dlk24mKHiF8_3bSSu4FnSTMWMhJBHk,26542 +pandas/tests/tseries/offsets/test_index.py,sha256=2e-wN5uf_y7SzO11Z7Jo6EjDC5fFPTVZLtx7G7H6ZWA,1145 +pandas/tests/tseries/offsets/test_month.py,sha256=csFAHZn7STCrICMRGqxSvIkeoAWS82FcOpT0p_y0EiI,23727 +pandas/tests/tseries/offsets/test_offsets.py,sha256=efkIekdpAHKPBT05L5lf3bxLWjqILV8j2fa9A19pZig,37896 +pandas/tests/tseries/offsets/test_offsets_properties.py,sha256=P_16zBX7ocaGN-br0pEQBGTlewfiDpJsnf5R1ei83JQ,1971 +pandas/tests/tseries/offsets/test_quarter.py,sha256=eae9t4k60ftPYO5-YCs_QUexCHA6SVj_c60I6kyaj9E,11540 +pandas/tests/tseries/offsets/test_ticks.py,sha256=F1x-BQ6kdLAPnEcBayZhxRy1o4uMCYnEglvx82z2oHs,10871 +pandas/tests/tseries/offsets/test_week.py,sha256=oPsSTLkNAkU7b0nvmJL9SWRqRAXITqgajNqKlnXVHdA,12169 +pandas/tests/tseries/offsets/test_year.py,sha256=EM9DThnH2c6CMw518YpxkrpJixPmH3OVQ_Qp8iMIHPQ,10455 +pandas/tests/tslibs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/tslibs/test_api.py,sha256=WtQZ72rSe-VOBCg9WbH9KOp6WZd6i7wXXJD0q7et25k,1492 +pandas/tests/tslibs/test_array_to_datetime.py,sha256=y7JknjwUq1oyLcOSMzK-CnxcyH5Ec7Kv2VD89Ap0w9Y,6231 +pandas/tests/tslibs/test_ccalendar.py,sha256=Rl2OjoB8pHaOyXW5MmshsHmm8nNMuHQvS_Du1L6ODqw,1903 +pandas/tests/tslibs/test_conversion.py,sha256=616kdPnHoPxhHpf3Px7P6Wyk_5kdZUEhKufxLzN1zY0,4554 +pandas/tests/tslibs/test_fields.py,sha256=BQKlBXOC4LsXe7eT2CK5mRGR_25g9qYykQZ6ojoGjbE,1352 +pandas/tests/tslibs/test_libfrequencies.py,sha256=1aQnyjAA2F2-xfTlTa081uVE3dTBb2CdkYv8Cry5Gn0,769 +pandas/tests/tslibs/test_liboffsets.py,sha256=958cVv4vva5nawrYcmSinfu62NIL7lYOXOHN7yU-gAE,5108 +pandas/tests/tslibs/test_np_datetime.py,sha256=n7MNYHw7i03w4ZcVTM6GkoRN7Y7UIGxnshjHph2eDPs,7889 +pandas/tests/tslibs/test_parse_iso8601.py,sha256=XGQ_GBOCosTiOFFjK4rYoDDZcIBitnyIb_0SXxKF9yo,4535 +pandas/tests/tslibs/test_parsing.py,sha256=wXkjtgvUV3yl13NJAPyTv0gW_Lb6Gbld29TgC36erZ0,12440 +pandas/tests/tslibs/test_period_asfreq.py,sha256=LQP7Er-5P2tBq1yDFXCJz0vHSEV23MpsQj7gwocRVDo,3119 +pandas/tests/tslibs/test_resolution.py,sha256=TfTpo9aGRlSU1JqTkSUWnXAL-pSS4bolKkZB1lLxsVY,641 +pandas/tests/tslibs/test_timedeltas.py,sha256=DaaxCrPg5Usv1UtpaVWpiYWixUtNT1FqjtS26MJq9PI,4662 +pandas/tests/tslibs/test_timezones.py,sha256=Hb56aLljCgRtBmXp7N_TaXM55ODLs6Mvl851dncnpsQ,4724 +pandas/tests/tslibs/test_to_offset.py,sha256=V5Xv79KEnCgxNpM-lyftRXzbzdx959uMWzLcDpu1htI,4786 +pandas/tests/tslibs/test_tzconversion.py,sha256=6Ouplo1p8ArDrxCzPNyH9xpYkxERNPvbd4C_-WmTNd4,953 +pandas/tests/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/util/conftest.py,sha256=loEbQsEtHtv-T4Umeq_UeV6R7s8SO01GHbW6gn8lvlo,476 +pandas/tests/util/test_assert_almost_equal.py,sha256=B1QXukp_xTKmwGIIi_kfvc9hDxBVTx6IjDNWRohTPGs,16804 +pandas/tests/util/test_assert_attr_equal.py,sha256=ZXTojP4V5Kle96QOFhxCZjq-dQf6gHvNOorYyOuFP1I,1045 +pandas/tests/util/test_assert_categorical_equal.py,sha256=yDmVzU22k5k5txSHixGfRJ4nKeP46FdNoh3CY1xEwEM,2728 +pandas/tests/util/test_assert_extension_array_equal.py,sha256=NYDyksC73o4dSEHtldxv1oNxPV6rQlOvdGcYh4OxQWI,3462 +pandas/tests/util/test_assert_frame_equal.py,sha256=KuvcO_MCM1gRO6tzqHOR26g6wHUQdBJqUuZkAcRinhI,13081 +pandas/tests/util/test_assert_index_equal.py,sha256=xsjnkqD4p4yIb_9flEqC7E7EyGKoE3QuGjAcQZMHIjc,10047 +pandas/tests/util/test_assert_interval_array_equal.py,sha256=ITqL0Z8AAy5D1knACPOHodI64AHxmNzxiG-i9FeU0b8,2158 +pandas/tests/util/test_assert_numpy_array_equal.py,sha256=fgb8GdUwX4EYiR3PWbjJULNfAJz4DfJ8RJXchssygO4,6624 +pandas/tests/util/test_assert_produces_warning.py,sha256=A-pN3V12hnIqlbFYArYbdU-992RgJ-fqsaKbM0yvYPw,8412 +pandas/tests/util/test_assert_series_equal.py,sha256=B2QADQg3X38Qn07Q06WkI3I82yTvSE2CS9CqmfngMv0,12920 +pandas/tests/util/test_deprecate.py,sha256=1hGoeUQTew5o0DnCjLV5-hOfEuSoIGOXGByq5KpAP7A,1617 +pandas/tests/util/test_deprecate_kwarg.py,sha256=7T2QkCxXUoJHhCxUjAH_5_hM-BHC6nPWG635LFY35lo,2043 +pandas/tests/util/test_deprecate_nonkeyword_arguments.py,sha256=0UkqIi4ehxD3aoA3z7y8-3dpOs6o30_Gp8rZvFX1W9Q,3623 +pandas/tests/util/test_doc.py,sha256=u0fxCg4zZWhB4SkJYc2huQ0xv7sKKAt0OlpWldmhh_M,1492 +pandas/tests/util/test_hashing.py,sha256=oxlRxUuSSTMKKHzkHDtEkAStcPcXaN_V0J-7k5eTQTQ,13032 +pandas/tests/util/test_make_objects.py,sha256=S6VsvnLIokB8joEL6tsd-enLSg8qwxjRq3IkW0JKgyU,269 +pandas/tests/util/test_numba.py,sha256=6eOVcokESth7h6yyeehVizx61FtwDdVbF8wV8j3t-Ic,308 +pandas/tests/util/test_rewrite_warning.py,sha256=AUHz_OT0HS6kXs-9e59GflBCP3Tb5jy8jl9FxBg5rDs,1151 +pandas/tests/util/test_safe_import.py,sha256=UxH90Ju9wyQ7Rs7SduRj3dkxroyehIwaWbBEz3ZzvEw,1020 +pandas/tests/util/test_shares_memory.py,sha256=pohzczmtzQtM9wOa-dUkJVCOYPb5VFTxrjlUJL9xmlA,345 +pandas/tests/util/test_show_versions.py,sha256=FjYUrUMAF7hOzphaXED__8yjeF0HTccZS6q05__rH44,2096 +pandas/tests/util/test_util.py,sha256=uozcwrFUkjhT1UKrRIIQp2crEoepMk7QtGfioE9dSH0,1194 +pandas/tests/util/test_validate_args.py,sha256=9Z4zTqnKAWn1q9KZNvuO3DF6oszHjQrQgtOOimurWcs,1907 +pandas/tests/util/test_validate_args_and_kwargs.py,sha256=d_XcMRAQ9r--yIAAWSdJML6KeWgksy5qRNFXaY1BMQA,2456 +pandas/tests/util/test_validate_inclusive.py,sha256=w2twetJgIedm6KGQ4WmdmGC_6-RShFjXBMBVxR0gcME,896 +pandas/tests/util/test_validate_kwargs.py,sha256=NAZi-4Z0DrlQKZkkcKrWxoHxzWuKFxY8iphCBweA9jk,1808 +pandas/tests/window/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/window/conftest.py,sha256=rlS3eILzfTByRmmm7HLjk-FHEIbdTVVE9c0Dq-nfxa4,3137 +pandas/tests/window/moments/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pandas/tests/window/moments/conftest.py,sha256=xSkyyVltsAkJETLDHJSksjRkjcVHsnhfyCiNvhsQ3no,1595 +pandas/tests/window/moments/test_moments_consistency_ewm.py,sha256=4FPmIGVQuOUg13aT5c9l_DN7j7K3J9QEU0KXeO2Qrt0,8107 +pandas/tests/window/moments/test_moments_consistency_expanding.py,sha256=eUa5UFG7UAqmG56XsYmihGvesbDNrj0DPV7eJgpxksY,5541 +pandas/tests/window/moments/test_moments_consistency_rolling.py,sha256=4bcg6lGfz096yOU_AcI5qR5BKIjwULusj7ZALlFe8DU,7825 +pandas/tests/window/test_api.py,sha256=iZQH9RRfssneuXLbVGGUMWuzSgxv2_Ufe67F1477rKQ,13171 +pandas/tests/window/test_apply.py,sha256=TCm3O4RY6zqGyQzWXm7geU141OZpbgXCEZAGiedvsdE,9783 +pandas/tests/window/test_base_indexer.py,sha256=Fz81kU5x1g6OnNmRra6PRarPpq5HEYuA8XX0sR_y6LI,15954 +pandas/tests/window/test_cython_aggregations.py,sha256=wPAk76yfrG9D1-IzI0kDklpiTVqgp4xsEGjONe9lCY4,3967 +pandas/tests/window/test_dtypes.py,sha256=a3Xnqcq_jO0kczZmhmuBKkmCsKHOOufy9h6yNCPHlMk,5785 +pandas/tests/window/test_ewm.py,sha256=QTMavyNBFb5UA78W0-xLi0vFDZ8ggm77xYittZFYKRY,22986 +pandas/tests/window/test_expanding.py,sha256=Kz-2wSWxj4E31kd6y4jo7T7gE7aSe7yGHMYE7b4Bq18,24239 +pandas/tests/window/test_groupby.py,sha256=7zVQpDKxVnthyI8uAnWb-ePsqGBDAtW1jPOHsqH6scI,44457 +pandas/tests/window/test_numba.py,sha256=UZ3gzfo71WBPt6_C5BRPus3txhOmRSOZcjs0OhD1ZbI,16185 +pandas/tests/window/test_online.py,sha256=vD9JQ84yS7s7IksowtV9erDj5g9cO2HX584NXdnBXAs,3701 +pandas/tests/window/test_pairwise.py,sha256=NwFsbGhuwzMx812TU2HyeeZrBFXpCpMJxBC-Wl4OXco,16032 +pandas/tests/window/test_rolling.py,sha256=kSZRImALKA0DyCbrCGj_0bBjisJVJJhImmQDgCUrZjI,59557 +pandas/tests/window/test_rolling_functions.py,sha256=9jXqaRL7k69jhevLMa54wCr3ODzOX4elNuhmuqmHxKA,17880 +pandas/tests/window/test_rolling_quantile.py,sha256=AvsqMR5YrVAlAFfhL0lHHAZIazXnzI1VkoVuPuiDEro,5516 +pandas/tests/window/test_rolling_skew_kurt.py,sha256=Emw9AJhTZyuVnxPg-nfYxpRNGJToWJ-he7obTSOy8iU,7807 +pandas/tests/window/test_timeseries_window.py,sha256=Cf8UA7ZbtDelJP0Ak5E_jtcw7MsRNYn6hlXr0qiG9W4,23706 +pandas/tests/window/test_win_type.py,sha256=GRu_7tF1tQAEH8hcb6kZPSG2FJihUTE1_85tH1iYaN8,17522 +pandas/tseries/__init__.py,sha256=CM1Forog6FJC_5YY4IueiWfQ9cATlSDJ4hF23RTniBQ,293 +pandas/tseries/api.py,sha256=OZHjOUxEVMuy-B5a83GM3iBczALddZomTUHCeZ_7MN0,146 +pandas/tseries/frequencies.py,sha256=JkrDznbxDoslk_W9aM61erf5PAyjzwI_YeSFj6FtM2w,17770 +pandas/tseries/holiday.py,sha256=q5e8IdYzOA289pM9W9zFMM3XGV4pwGEgmgnUWx9b6m0,18601 +pandas/tseries/offsets.py,sha256=wLWH1_fg7dYGDsHDRyBxc62788G9CDhLcpDeZHt5ixI,1531 +pandas/util/__init__.py,sha256=Al9mLq6l8MHkcPhg_FKwekgBfioAv6eVDZB-NsAE-i8,760 +pandas/util/_decorators.py,sha256=VXtxippH9t2IS4bEcZ9HAgPuyW6ouVEmIS-3ru0vM1Q,17119 +pandas/util/_doctools.py,sha256=Es1FLqrmsOLpJ_7Y24q_vqdXGw5Vy6vcajcfbIi_FCo,6819 +pandas/util/_exceptions.py,sha256=Xxc-hSfIgfYnnlZMJd1nY0LYUByWPwvj_p71q-NygCQ,2648 +pandas/util/_print_versions.py,sha256=3rf49cPXhW9HEWa2wy_onygdd_qOsqtB03KsMMi0Krg,4766 +pandas/util/_test_decorators.py,sha256=WtOSWrj4N878rGk8bJeIm0b_J11xUEJ5y-r0K0lZ3Dw,6601 +pandas/util/_tester.py,sha256=Mluqpd_YwVdcdgZfSu-_oVdadk_JjX9FuPGFjn_S6ZA,1462 +pandas/util/_validators.py,sha256=gKlRueyyvuJwMgLzJtgrQe_bhiQ-sJAA8mzY8_eS38g,14276 +pandas/util/version/__init__.py,sha256=1Px2m-0ZscGc4q6_96pCU7a7WCxZRshuwA6Wcx2dvL8,16432 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/REQUESTED b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/REQUESTED new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/WHEEL b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/WHEEL new file mode 100644 index 00000000..7991c975 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp39-cp39-macosx_11_0_arm64 \ No newline at end of file diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/entry_points.txt b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/entry_points.txt new file mode 100644 index 00000000..3c1b523d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas-2.1.4.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[pandas_plotting_backends] +matplotlib = pandas:plotting._matplotlib + diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/__init__.py new file mode 100644 index 00000000..d11a4299 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/__init__.py @@ -0,0 +1,353 @@ +from __future__ import annotations + +__docformat__ = "restructuredtext" + +# Let users know if they're missing any of our hard dependencies +_hard_dependencies = ("numpy", "pytz", "dateutil") +_missing_dependencies = [] + +for _dependency in _hard_dependencies: + try: + __import__(_dependency) + except ImportError as _e: # pragma: no cover + _missing_dependencies.append(f"{_dependency}: {_e}") + +if _missing_dependencies: # pragma: no cover + raise ImportError( + "Unable to import required dependencies:\n" + "\n".join(_missing_dependencies) + ) +del _hard_dependencies, _dependency, _missing_dependencies + +try: + # numpy compat + from pandas.compat import ( + is_numpy_dev as _is_numpy_dev, # pyright: ignore[reportUnusedImport] # noqa: F401,E501 + ) +except ImportError as _err: # pragma: no cover + _module = _err.name + raise ImportError( + f"C extension: {_module} not built. If you want to import " + "pandas from the source directory, you may need to run " + "'python setup.py build_ext' to build the C extensions first." + ) from _err + +from pandas._config import ( + get_option, + set_option, + reset_option, + describe_option, + option_context, + options, +) + +# let init-time option registration happen +import pandas.core.config_init # pyright: ignore[reportUnusedImport] # noqa: F401 + +from pandas.core.api import ( + # dtype + ArrowDtype, + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + Float32Dtype, + Float64Dtype, + CategoricalDtype, + PeriodDtype, + IntervalDtype, + DatetimeTZDtype, + StringDtype, + BooleanDtype, + # missing + NA, + isna, + isnull, + notna, + notnull, + # indexes + Index, + CategoricalIndex, + RangeIndex, + MultiIndex, + IntervalIndex, + TimedeltaIndex, + DatetimeIndex, + PeriodIndex, + IndexSlice, + # tseries + NaT, + Period, + period_range, + Timedelta, + timedelta_range, + Timestamp, + date_range, + bdate_range, + Interval, + interval_range, + DateOffset, + # conversion + to_numeric, + to_datetime, + to_timedelta, + # misc + Flags, + Grouper, + factorize, + unique, + value_counts, + NamedAgg, + array, + Categorical, + set_eng_float_format, + Series, + DataFrame, +) + +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.tseries.api import infer_freq +from pandas.tseries import offsets + +from pandas.core.computation.api import eval + +from pandas.core.reshape.api import ( + concat, + lreshape, + melt, + wide_to_long, + merge, + merge_asof, + merge_ordered, + crosstab, + pivot, + pivot_table, + get_dummies, + from_dummies, + cut, + qcut, +) + +from pandas import api, arrays, errors, io, plotting, tseries +from pandas import testing +from pandas.util._print_versions import show_versions + +from pandas.io.api import ( + # excel + ExcelFile, + ExcelWriter, + read_excel, + # parsers + read_csv, + read_fwf, + read_table, + # pickle + read_pickle, + to_pickle, + # pytables + HDFStore, + read_hdf, + # sql + read_sql, + read_sql_query, + read_sql_table, + # misc + read_clipboard, + read_parquet, + read_orc, + read_feather, + read_gbq, + read_html, + read_xml, + read_json, + read_stata, + read_sas, + read_spss, +) + +from pandas.io.json._normalize import json_normalize + +from pandas.util._tester import test + +# use the closest tagged version if possible +_built_with_meson = False +try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __version__, + __git_version__, + ) + + _built_with_meson = True +except ImportError: + from pandas._version import get_versions + + v = get_versions() + __version__ = v.get("closest-tag", v["version"]) + __git_version__ = v.get("full-revisionid") + del get_versions, v + + +# module level doc-string +__doc__ = """ +pandas - a powerful data analysis and manipulation library for Python +===================================================================== + +**pandas** is a Python package providing fast, flexible, and expressive data +structures designed to make working with "relational" or "labeled" data both +easy and intuitive. It aims to be the fundamental high-level building block for +doing practical, **real world** data analysis in Python. Additionally, it has +the broader goal of becoming **the most powerful and flexible open source data +analysis / manipulation tool available in any language**. It is already well on +its way toward this goal. + +Main Features +------------- +Here are just a few of the things that pandas does well: + + - Easy handling of missing data in floating point as well as non-floating + point data. + - Size mutability: columns can be inserted and deleted from DataFrame and + higher dimensional objects + - Automatic and explicit data alignment: objects can be explicitly aligned + to a set of labels, or the user can simply ignore the labels and let + `Series`, `DataFrame`, etc. automatically align the data for you in + computations. + - Powerful, flexible group by functionality to perform split-apply-combine + operations on data sets, for both aggregating and transforming data. + - Make it easy to convert ragged, differently-indexed data in other Python + and NumPy data structures into DataFrame objects. + - Intelligent label-based slicing, fancy indexing, and subsetting of large + data sets. + - Intuitive merging and joining data sets. + - Flexible reshaping and pivoting of data sets. + - Hierarchical labeling of axes (possible to have multiple labels per tick). + - Robust IO tools for loading data from flat files (CSV and delimited), + Excel files, databases, and saving/loading data from the ultrafast HDF5 + format. + - Time series-specific functionality: date range generation and frequency + conversion, moving window statistics, date shifting and lagging. +""" + +# Use __all__ to let type checkers know what is part of the public API. +# Pandas is not (yet) a py.typed library: the public API is determined +# based on the documentation. +__all__ = [ + "ArrowDtype", + "BooleanDtype", + "Categorical", + "CategoricalDtype", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "DatetimeIndex", + "DatetimeTZDtype", + "ExcelFile", + "ExcelWriter", + "Flags", + "Float32Dtype", + "Float64Dtype", + "Grouper", + "HDFStore", + "Index", + "IndexSlice", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "Int8Dtype", + "Interval", + "IntervalDtype", + "IntervalIndex", + "MultiIndex", + "NA", + "NaT", + "NamedAgg", + "Period", + "PeriodDtype", + "PeriodIndex", + "RangeIndex", + "Series", + "SparseDtype", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "Timestamp", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "UInt8Dtype", + "api", + "array", + "arrays", + "bdate_range", + "concat", + "crosstab", + "cut", + "date_range", + "describe_option", + "errors", + "eval", + "factorize", + "get_dummies", + "from_dummies", + "get_option", + "infer_freq", + "interval_range", + "io", + "isna", + "isnull", + "json_normalize", + "lreshape", + "melt", + "merge", + "merge_asof", + "merge_ordered", + "notna", + "notnull", + "offsets", + "option_context", + "options", + "period_range", + "pivot", + "pivot_table", + "plotting", + "qcut", + "read_clipboard", + "read_csv", + "read_excel", + "read_feather", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_json", + "read_orc", + "read_parquet", + "read_pickle", + "read_sas", + "read_spss", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_xml", + "reset_option", + "set_eng_float_format", + "set_option", + "show_versions", + "test", + "testing", + "timedelta_range", + "to_datetime", + "to_numeric", + "to_pickle", + "to_timedelta", + "tseries", + "unique", + "value_counts", + "wide_to_long", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_config/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/__init__.py new file mode 100644 index 00000000..daeb135f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/__init__.py @@ -0,0 +1,45 @@ +""" +pandas._config is considered explicitly upstream of everything else in pandas, +should have no intra-pandas dependencies. + +importing `dates` and `display` ensures that keys needed by _libs +are initialized. +""" +__all__ = [ + "config", + "detect_console_encoding", + "get_option", + "set_option", + "reset_option", + "describe_option", + "option_context", + "options", + "using_copy_on_write", +] +from pandas._config import config +from pandas._config import dates # pyright: ignore[reportUnusedImport] # noqa: F401 +from pandas._config.config import ( + _global_config, + describe_option, + get_option, + option_context, + options, + reset_option, + set_option, +) +from pandas._config.display import detect_console_encoding + + +def using_copy_on_write() -> bool: + _mode_options = _global_config["mode"] + return _mode_options["copy_on_write"] and _mode_options["data_manager"] == "block" + + +def using_nullable_dtypes() -> bool: + _mode_options = _global_config["mode"] + return _mode_options["nullable_dtypes"] + + +def using_pyarrow_string_dtype() -> bool: + _mode_options = _global_config["future"] + return _mode_options["infer_string"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_config/config.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/config.py new file mode 100644 index 00000000..a7768257 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/config.py @@ -0,0 +1,946 @@ +""" +The config module holds package-wide configurables and provides +a uniform API for working with them. + +Overview +======== + +This module supports the following requirements: +- options are referenced using keys in dot.notation, e.g. "x.y.option - z". +- keys are case-insensitive. +- functions should accept partial/regex keys, when unambiguous. +- options can be registered by modules at import time. +- options can be registered at init-time (via core.config_init) +- options have a default value, and (optionally) a description and + validation function associated with them. +- options can be deprecated, in which case referencing them + should produce a warning. +- deprecated options can optionally be rerouted to a replacement + so that accessing a deprecated option reroutes to a differently + named option. +- options can be reset to their default value. +- all option can be reset to their default value at once. +- all options in a certain sub - namespace can be reset at once. +- the user can set / get / reset or ask for the description of an option. +- a developer can register and mark an option as deprecated. +- you can register a callback to be invoked when the option value + is set or reset. Changing the stored value is considered misuse, but + is not verboten. + +Implementation +============== + +- Data is stored using nested dictionaries, and should be accessed + through the provided API. + +- "Registered options" and "Deprecated options" have metadata associated + with them, which are stored in auxiliary dictionaries keyed on the + fully-qualified key, e.g. "x.y.z.option". + +- the config_init module is imported by the package's __init__.py file. + placing any register_option() calls there will ensure those options + are available as soon as pandas is loaded. If you use register_option + in a module, it will only be available after that module is imported, + which you should be aware of. + +- `config_prefix` is a context_manager (for use with the `with` keyword) + which can save developers some typing, see the docstring. + +""" + +from __future__ import annotations + +from contextlib import ( + ContextDecorator, + contextmanager, +) +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + NamedTuple, + cast, +) +import warnings + +from pandas._typing import ( + F, + T, +) +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Iterable, + ) + + +class DeprecatedOption(NamedTuple): + key: str + msg: str | None + rkey: str | None + removal_ver: str | None + + +class RegisteredOption(NamedTuple): + key: str + defval: object + doc: str + validator: Callable[[object], Any] | None + cb: Callable[[str], Any] | None + + +# holds deprecated option metadata +_deprecated_options: dict[str, DeprecatedOption] = {} + +# holds registered option metadata +_registered_options: dict[str, RegisteredOption] = {} + +# holds the current values for registered options +_global_config: dict[str, Any] = {} + +# keys which have a special meaning +_reserved_keys: list[str] = ["all"] + + +class OptionError(AttributeError, KeyError): + """ + Exception raised for pandas.options. + + Backwards compatible with KeyError checks. + + Examples + -------- + >>> pd.options.context + Traceback (most recent call last): + OptionError: No such option + """ + + +# +# User API + + +def _get_single_key(pat: str, silent: bool) -> str: + keys = _select_options(pat) + if len(keys) == 0: + if not silent: + _warn_if_deprecated(pat) + raise OptionError(f"No such keys(s): {repr(pat)}") + if len(keys) > 1: + raise OptionError("Pattern matched multiple keys") + key = keys[0] + + if not silent: + _warn_if_deprecated(key) + + key = _translate_key(key) + + return key + + +def _get_option(pat: str, silent: bool = False) -> Any: + key = _get_single_key(pat, silent) + + # walk the nested dict + root, k = _get_root(key) + return root[k] + + +def _set_option(*args, **kwargs) -> None: + # must at least 1 arg deal with constraints later + nargs = len(args) + if not nargs or nargs % 2 != 0: + raise ValueError("Must provide an even number of non-keyword arguments") + + # default to false + silent = kwargs.pop("silent", False) + + if kwargs: + kwarg = next(iter(kwargs.keys())) + raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"') + + for k, v in zip(args[::2], args[1::2]): + key = _get_single_key(k, silent) + + o = _get_registered_option(key) + if o and o.validator: + o.validator(v) + + # walk the nested dict + root, k_root = _get_root(key) + root[k_root] = v + + if o.cb: + if silent: + with warnings.catch_warnings(record=True): + o.cb(key) + else: + o.cb(key) + + +def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None: + keys = _select_options(pat) + if len(keys) == 0: + raise OptionError("No such keys(s)") + + s = "\n".join([_build_option_description(k) for k in keys]) + + if _print_desc: + print(s) + return None + return s + + +def _reset_option(pat: str, silent: bool = False) -> None: + keys = _select_options(pat) + + if len(keys) == 0: + raise OptionError("No such keys(s)") + + if len(keys) > 1 and len(pat) < 4 and pat != "all": + raise ValueError( + "You must specify at least 4 characters when " + "resetting multiple keys, use the special keyword " + '"all" to reset all the options to their default value' + ) + + for k in keys: + _set_option(k, _registered_options[k].defval, silent=silent) + + +def get_default_val(pat: str): + key = _get_single_key(pat, silent=True) + return _get_registered_option(key).defval + + +class DictWrapper: + """provide attribute-style access to a nested dict""" + + def __init__(self, d: dict[str, Any], prefix: str = "") -> None: + object.__setattr__(self, "d", d) + object.__setattr__(self, "prefix", prefix) + + def __setattr__(self, key: str, val: Any) -> None: + prefix = object.__getattribute__(self, "prefix") + if prefix: + prefix += "." + prefix += key + # you can't set new keys + # can you can't overwrite subtrees + if key in self.d and not isinstance(self.d[key], dict): + _set_option(prefix, val) + else: + raise OptionError("You can only set the value of existing options") + + def __getattr__(self, key: str): + prefix = object.__getattribute__(self, "prefix") + if prefix: + prefix += "." + prefix += key + try: + v = object.__getattribute__(self, "d")[key] + except KeyError as err: + raise OptionError("No such option") from err + if isinstance(v, dict): + return DictWrapper(v, prefix) + else: + return _get_option(prefix) + + def __dir__(self) -> Iterable[str]: + return list(self.d.keys()) + + +# For user convenience, we'd like to have the available options described +# in the docstring. For dev convenience we'd like to generate the docstrings +# dynamically instead of maintaining them by hand. To this, we use the +# class below which wraps functions inside a callable, and converts +# __doc__ into a property function. The doctsrings below are templates +# using the py2.6+ advanced formatting syntax to plug in a concise list +# of options, and option descriptions. + + +class CallableDynamicDoc(Generic[T]): + def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None: + self.__doc_tmpl__ = doc_tmpl + self.__func__ = func + + def __call__(self, *args, **kwds) -> T: + return self.__func__(*args, **kwds) + + # error: Signature of "__doc__" incompatible with supertype "object" + @property + def __doc__(self) -> str: # type: ignore[override] + opts_desc = _describe_option("all", _print_desc=False) + opts_list = pp_options_list(list(_registered_options.keys())) + return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list) + + +_get_option_tmpl = """ +get_option(pat) + +Retrieves the value of the specified option. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. + +Returns +------- +result : the value of the option + +Raises +------ +OptionError : if no such option exists + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.get_option('display.max_columns') # doctest: +SKIP +4 +""" + +_set_option_tmpl = """ +set_option(pat, value) + +Sets the value of the specified option. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp which should match a single option. + Note: partial matches are supported for convenience, but unless you use the + full option name (e.g. x.y.z.option_name), your code may break in future + versions if new options with similar names are introduced. +value : object + New value of option. + +Returns +------- +None + +Raises +------ +OptionError if no such option exists + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.set_option('display.max_columns', 4) +>>> df = pd.DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) +>>> df + 0 1 ... 3 4 +0 1 2 ... 4 5 +1 6 7 ... 9 10 +[2 rows x 5 columns] +>>> pd.reset_option('display.max_columns') +""" + +_describe_option_tmpl = """ +describe_option(pat, _print_desc=False) + +Prints the description for one or more registered options. + +Call with no arguments to get a listing for all registered options. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str + Regexp pattern. All matching keys will have their description displayed. +_print_desc : bool, default True + If True (default) the description(s) will be printed to stdout. + Otherwise, the description(s) will be returned as a unicode string + (for testing). + +Returns +------- +None by default, the description(s) as a unicode string if _print_desc +is False + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.describe_option('display.max_columns') # doctest: +SKIP +display.max_columns : int + If max_cols is exceeded, switch to truncate view... +""" + +_reset_option_tmpl = """ +reset_option(pat) + +Reset one or more options to their default value. + +Pass "all" as argument to reset all options. + +Available options: + +{opts_list} + +Parameters +---------- +pat : str/regex + If specified only options matching `prefix*` will be reset. + Note: partial matches are supported for convenience, but unless you + use the full option name (e.g. x.y.z.option_name), your code may break + in future versions if new options with similar names are introduced. + +Returns +------- +None + +Notes +----- +Please reference the :ref:`User Guide ` for more information. + +The available options with its descriptions: + +{opts_desc} + +Examples +-------- +>>> pd.reset_option('display.max_columns') # doctest: +SKIP +""" + +# bind the functions with their docstrings into a Callable +# and use that as the functions exposed in pd.api +get_option = CallableDynamicDoc(_get_option, _get_option_tmpl) +set_option = CallableDynamicDoc(_set_option, _set_option_tmpl) +reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl) +describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl) +options = DictWrapper(_global_config) + +# +# Functions for use by pandas developers, in addition to User - api + + +class option_context(ContextDecorator): + """ + Context manager to temporarily set options in the `with` statement context. + + You need to invoke as ``option_context(pat, val, [(pat, val), ...])``. + + Examples + -------- + >>> from pandas import option_context + >>> with option_context('display.max_rows', 10, 'display.max_columns', 5): + ... pass + """ + + def __init__(self, *args) -> None: + if len(args) % 2 != 0 or len(args) < 2: + raise ValueError( + "Need to invoke as option_context(pat, val, [(pat, val), ...])." + ) + + self.ops = list(zip(args[::2], args[1::2])) + + def __enter__(self) -> None: + self.undo = [(pat, _get_option(pat)) for pat, val in self.ops] + + for pat, val in self.ops: + _set_option(pat, val, silent=True) + + def __exit__(self, *args) -> None: + if self.undo: + for pat, val in self.undo: + _set_option(pat, val, silent=True) + + +def register_option( + key: str, + defval: object, + doc: str = "", + validator: Callable[[object], Any] | None = None, + cb: Callable[[str], Any] | None = None, +) -> None: + """ + Register an option in the package-wide pandas config object + + Parameters + ---------- + key : str + Fully-qualified key, e.g. "x.y.option - z". + defval : object + Default value of the option. + doc : str + Description of the option. + validator : Callable, optional + Function of a single argument, should raise `ValueError` if + called with a value which is not a legal value for the option. + cb + a function of a single argument "key", which is called + immediately after an option value is set/reset. key is + the full name of the option. + + Raises + ------ + ValueError if `validator` is specified and `defval` is not a valid value. + + """ + import keyword + import tokenize + + key = key.lower() + + if key in _registered_options: + raise OptionError(f"Option '{key}' has already been registered") + if key in _reserved_keys: + raise OptionError(f"Option '{key}' is a reserved key") + + # the default value should be legal + if validator: + validator(defval) + + # walk the nested dict, creating dicts as needed along the path + path = key.split(".") + + for k in path: + if not re.match("^" + tokenize.Name + "$", k): + raise ValueError(f"{k} is not a valid identifier") + if keyword.iskeyword(k): + raise ValueError(f"{k} is a python keyword") + + cursor = _global_config + msg = "Path prefix to option '{option}' is already an option" + + for i, p in enumerate(path[:-1]): + if not isinstance(cursor, dict): + raise OptionError(msg.format(option=".".join(path[:i]))) + if p not in cursor: + cursor[p] = {} + cursor = cursor[p] + + if not isinstance(cursor, dict): + raise OptionError(msg.format(option=".".join(path[:-1]))) + + cursor[path[-1]] = defval # initialize + + # save the option metadata + _registered_options[key] = RegisteredOption( + key=key, defval=defval, doc=doc, validator=validator, cb=cb + ) + + +def deprecate_option( + key: str, + msg: str | None = None, + rkey: str | None = None, + removal_ver: str | None = None, +) -> None: + """ + Mark option `key` as deprecated, if code attempts to access this option, + a warning will be produced, using `msg` if given, or a default message + if not. + if `rkey` is given, any access to the key will be re-routed to `rkey`. + + Neither the existence of `key` nor that if `rkey` is checked. If they + do not exist, any subsequence access will fail as usual, after the + deprecation warning is given. + + Parameters + ---------- + key : str + Name of the option to be deprecated. + must be a fully-qualified option name (e.g "x.y.z.rkey"). + msg : str, optional + Warning message to output when the key is referenced. + if no message is given a default message will be emitted. + rkey : str, optional + Name of an option to reroute access to. + If specified, any referenced `key` will be + re-routed to `rkey` including set/get/reset. + rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). + used by the default message if no `msg` is specified. + removal_ver : str, optional + Specifies the version in which this option will + be removed. used by the default message if no `msg` is specified. + + Raises + ------ + OptionError + If the specified key has already been deprecated. + """ + key = key.lower() + + if key in _deprecated_options: + raise OptionError(f"Option '{key}' has already been defined as deprecated.") + + _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver) + + +# +# functions internal to the module + + +def _select_options(pat: str) -> list[str]: + """ + returns a list of keys matching `pat` + + if pat=="all", returns all registered options + """ + # short-circuit for exact key + if pat in _registered_options: + return [pat] + + # else look through all of them + keys = sorted(_registered_options.keys()) + if pat == "all": # reserved key + return keys + + return [k for k in keys if re.search(pat, k, re.I)] + + +def _get_root(key: str) -> tuple[dict[str, Any], str]: + path = key.split(".") + cursor = _global_config + for p in path[:-1]: + cursor = cursor[p] + return cursor, path[-1] + + +def _is_deprecated(key: str) -> bool: + """Returns True if the given option has been deprecated""" + key = key.lower() + return key in _deprecated_options + + +def _get_deprecated_option(key: str): + """ + Retrieves the metadata for a deprecated option, if `key` is deprecated. + + Returns + ------- + DeprecatedOption (namedtuple) if key is deprecated, None otherwise + """ + try: + d = _deprecated_options[key] + except KeyError: + return None + else: + return d + + +def _get_registered_option(key: str): + """ + Retrieves the option metadata if `key` is a registered option. + + Returns + ------- + RegisteredOption (namedtuple) if key is deprecated, None otherwise + """ + return _registered_options.get(key) + + +def _translate_key(key: str) -> str: + """ + if key id deprecated and a replacement key defined, will return the + replacement key, otherwise returns `key` as - is + """ + d = _get_deprecated_option(key) + if d: + return d.rkey or key + else: + return key + + +def _warn_if_deprecated(key: str) -> bool: + """ + Checks if `key` is a deprecated option and if so, prints a warning. + + Returns + ------- + bool - True if `key` is deprecated, False otherwise. + """ + d = _get_deprecated_option(key) + if d: + if d.msg: + warnings.warn( + d.msg, + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + msg = f"'{key}' is deprecated" + if d.removal_ver: + msg += f" and will be removed in {d.removal_ver}" + if d.rkey: + msg += f", please use '{d.rkey}' instead." + else: + msg += ", please refrain from using it." + + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + return True + return False + + +def _build_option_description(k: str) -> str: + """Builds a formatted description of a registered option and prints it""" + o = _get_registered_option(k) + d = _get_deprecated_option(k) + + s = f"{k} " + + if o.doc: + s += "\n".join(o.doc.strip().split("\n")) + else: + s += "No description available." + + if o: + s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]" + + if d: + rkey = d.rkey or "" + s += "\n (Deprecated" + s += f", use `{rkey}` instead." + s += ")" + + return s + + +def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False): + """Builds a concise listing of available options, grouped by prefix""" + from itertools import groupby + from textwrap import wrap + + def pp(name: str, ks: Iterable[str]) -> list[str]: + pfx = "- " + name + ".[" if name else "" + ls = wrap( + ", ".join(ks), + width, + initial_indent=pfx, + subsequent_indent=" ", + break_long_words=False, + ) + if ls and ls[-1] and name: + ls[-1] = ls[-1] + "]" + return ls + + ls: list[str] = [] + singles = [x for x in sorted(keys) if x.find(".") < 0] + if singles: + ls += pp("", singles) + keys = [x for x in keys if x.find(".") >= 0] + + for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]): + ks = [x[len(k) + 1 :] for x in list(g)] + ls += pp(k, ks) + s = "\n".join(ls) + if _print: + print(s) + else: + return s + + +# +# helpers + + +@contextmanager +def config_prefix(prefix: str) -> Generator[None, None, None]: + """ + contextmanager for multiple invocations of API with a common prefix + + supported API functions: (register / get / set )__option + + Warning: This is not thread - safe, and won't work properly if you import + the API functions into your module using the "from x import y" construct. + + Example + ------- + import pandas._config.config as cf + with cf.config_prefix("display.font"): + cf.register_option("color", "red") + cf.register_option("size", " 5 pt") + cf.set_option(size, " 6 pt") + cf.get_option(size) + ... + + etc' + + will register options "display.font.color", "display.font.size", set the + value of "display.font.size"... and so on. + """ + # Note: reset_option relies on set_option, and on key directly + # it does not fit in to this monkey-patching scheme + + global register_option, get_option, set_option + + def wrap(func: F) -> F: + def inner(key: str, *args, **kwds): + pkey = f"{prefix}.{key}" + return func(pkey, *args, **kwds) + + return cast(F, inner) + + _register_option = register_option + _get_option = get_option + _set_option = set_option + set_option = wrap(set_option) + get_option = wrap(get_option) + register_option = wrap(register_option) + try: + yield + finally: + set_option = _set_option + get_option = _get_option + register_option = _register_option + + +# These factories and methods are handy for use as the validator +# arg in register_option + + +def is_type_factory(_type: type[Any]) -> Callable[[Any], None]: + """ + + Parameters + ---------- + `_type` - a type to be compared against (e.g. type(x) == `_type`) + + Returns + ------- + validator - a function of a single argument x , which raises + ValueError if type(x) is not equal to `_type` + + """ + + def inner(x) -> None: + if type(x) != _type: + raise ValueError(f"Value must have type '{_type}'") + + return inner + + +def is_instance_factory(_type) -> Callable[[Any], None]: + """ + + Parameters + ---------- + `_type` - the type to be checked against + + Returns + ------- + validator - a function of a single argument x , which raises + ValueError if x is not an instance of `_type` + + """ + if isinstance(_type, (tuple, list)): + _type = tuple(_type) + type_repr = "|".join(map(str, _type)) + else: + type_repr = f"'{_type}'" + + def inner(x) -> None: + if not isinstance(x, _type): + raise ValueError(f"Value must be an instance of {type_repr}") + + return inner + + +def is_one_of_factory(legal_values) -> Callable[[Any], None]: + callables = [c for c in legal_values if callable(c)] + legal_values = [c for c in legal_values if not callable(c)] + + def inner(x) -> None: + if x not in legal_values: + if not any(c(x) for c in callables): + uvals = [str(lval) for lval in legal_values] + pp_values = "|".join(uvals) + msg = f"Value must be one of {pp_values}" + if len(callables): + msg += " or a callable" + raise ValueError(msg) + + return inner + + +def is_nonnegative_int(value: object) -> None: + """ + Verify that value is None or a positive int. + + Parameters + ---------- + value : None or int + The `value` to be checked. + + Raises + ------ + ValueError + When the value is not None or is a negative integer + """ + if value is None: + return + + elif isinstance(value, int): + if value >= 0: + return + + msg = "Value must be a nonnegative integer or None" + raise ValueError(msg) + + +# common type validators, for convenience +# usage: register_option(... , validator = is_int) +is_int = is_type_factory(int) +is_bool = is_type_factory(bool) +is_float = is_type_factory(float) +is_str = is_type_factory(str) +is_text = is_instance_factory((str, bytes)) + + +def is_callable(obj) -> bool: + """ + + Parameters + ---------- + `obj` - the object to be checked + + Returns + ------- + validator - returns True if object is callable + raises ValueError otherwise. + + """ + if not callable(obj): + raise ValueError("Value must be a callable") + return True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_config/dates.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/dates.py new file mode 100644 index 00000000..b37831f9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/dates.py @@ -0,0 +1,25 @@ +""" +config for datetime formatting +""" +from __future__ import annotations + +from pandas._config import config as cf + +pc_date_dayfirst_doc = """ +: boolean + When True, prints and parses dates with the day first, eg 20/01/2005 +""" + +pc_date_yearfirst_doc = """ +: boolean + When True, prints and parses dates with the year first, eg 2005/01/20 +""" + +with cf.config_prefix("display"): + # Needed upstream of `_libs` because these are used in tslibs.parsing + cf.register_option( + "date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool + ) + cf.register_option( + "date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_config/display.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/display.py new file mode 100644 index 00000000..df2c3ad3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/display.py @@ -0,0 +1,62 @@ +""" +Unopinionated display configuration. +""" + +from __future__ import annotations + +import locale +import sys + +from pandas._config import config as cf + +# ----------------------------------------------------------------------------- +# Global formatting options +_initial_defencoding: str | None = None + + +def detect_console_encoding() -> str: + """ + Try to find the most capable encoding supported by the console. + slightly modified from the way IPython handles the same issue. + """ + global _initial_defencoding + + encoding = None + try: + encoding = sys.stdout.encoding or sys.stdin.encoding + except (AttributeError, OSError): + pass + + # try again for something better + if not encoding or "ascii" in encoding.lower(): + try: + encoding = locale.getpreferredencoding() + except locale.Error: + # can be raised by locale.setlocale(), which is + # called by getpreferredencoding + # (on some systems, see stdlib locale docs) + pass + + # when all else fails. this will usually be "ascii" + if not encoding or "ascii" in encoding.lower(): + encoding = sys.getdefaultencoding() + + # GH#3360, save the reported defencoding at import time + # MPL backends may change it. Make available for debugging. + if not _initial_defencoding: + _initial_defencoding = sys.getdefaultencoding() + + return encoding + + +pc_encoding_doc = """ +: str/unicode + Defaults to the detected encoding of the console. + Specifies the encoding to be used for strings returned by to_string, + these are generally strings meant to be displayed on the console. +""" + +with cf.config_prefix("display"): + cf.register_option( + "encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_config/localization.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/localization.py new file mode 100644 index 00000000..5c1a0ff1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_config/localization.py @@ -0,0 +1,172 @@ +""" +Helpers for configuring locale settings. + +Name `localization` is chosen to avoid overlap with builtin `locale` module. +""" +from __future__ import annotations + +from contextlib import contextmanager +import locale +import platform +import re +import subprocess +from typing import TYPE_CHECKING + +from pandas._config.config import options + +if TYPE_CHECKING: + from collections.abc import Generator + + +@contextmanager +def set_locale( + new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL +) -> Generator[str | tuple[str, str], None, None]: + """ + Context manager for temporarily setting a locale. + + Parameters + ---------- + new_locale : str or tuple + A string of the form .. For example to set + the current locale to US English with a UTF8 encoding, you would pass + "en_US.UTF-8". + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Notes + ----- + This is useful when you want to run a particular block of code under a + particular locale, without globally setting the locale. This probably isn't + thread-safe. + """ + # getlocale is not always compliant with setlocale, use setlocale. GH#46595 + current_locale = locale.setlocale(lc_var) + + try: + locale.setlocale(lc_var, new_locale) + normalized_code, normalized_encoding = locale.getlocale() + if normalized_code is not None and normalized_encoding is not None: + yield f"{normalized_code}.{normalized_encoding}" + else: + yield new_locale + finally: + locale.setlocale(lc_var, current_locale) + + +def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool: + """ + Check to see if we can set a locale, and subsequently get the locale, + without raising an Exception. + + Parameters + ---------- + lc : str + The locale to attempt to set. + lc_var : int, default `locale.LC_ALL` + The category of the locale being set. + + Returns + ------- + bool + Whether the passed locale can be set + """ + try: + with set_locale(lc, lc_var=lc_var): + pass + except (ValueError, locale.Error): + # horrible name for a Exception subclass + return False + else: + return True + + +def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]: + """ + Return a list of normalized locales that do not throw an ``Exception`` + when set. + + Parameters + ---------- + locales : str + A string where each locale is separated by a newline. + normalize : bool + Whether to call ``locale.normalize`` on each locale. + + Returns + ------- + valid_locales : list + A list of valid locales. + """ + return [ + loc + for loc in ( + locale.normalize(loc.strip()) if normalize else loc.strip() + for loc in locales + ) + if can_set_locale(loc) + ] + + +def get_locales( + prefix: str | None = None, + normalize: bool = True, +) -> list[str]: + """ + Get all the locales that are available on the system. + + Parameters + ---------- + prefix : str + If not ``None`` then return only those locales with the prefix + provided. For example to get all English language locales (those that + start with ``"en"``), pass ``prefix="en"``. + normalize : bool + Call ``locale.normalize`` on the resulting list of available locales. + If ``True``, only locales that can be set without throwing an + ``Exception`` are returned. + + Returns + ------- + locales : list of strings + A list of locale strings that can be set with ``locale.setlocale()``. + For example:: + + locale.setlocale(locale.LC_ALL, locale_string) + + On error will return an empty list (no locale available, e.g. Windows) + + """ + if platform.system() in ("Linux", "Darwin"): + raw_locales = subprocess.check_output(["locale", "-a"]) + else: + # Other platforms e.g. windows platforms don't define "locale -a" + # Note: is_platform_windows causes circular import here + return [] + + try: + # raw_locales is "\n" separated list of locales + # it may contain non-decodable parts, so split + # extract what we can and then rejoin. + split_raw_locales = raw_locales.split(b"\n") + out_locales = [] + for x in split_raw_locales: + try: + out_locales.append(str(x, encoding=options.display.encoding)) + except UnicodeError: + # 'locale -a' is used to populated 'raw_locales' and on + # Redhat 7 Linux (and maybe others) prints locale names + # using windows-1252 encoding. Bug only triggered by + # a few special characters and when there is an + # extensive list of installed locales. + out_locales.append(str(x, encoding="windows-1252")) + + except TypeError: + pass + + if prefix is None: + return _valid_locales(out_locales, normalize) + + pattern = re.compile(f"{prefix}.*") + found = pattern.findall("\n".join(out_locales)) + return _valid_locales(found, normalize) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/__init__.py new file mode 100644 index 00000000..b084a259 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/__init__.py @@ -0,0 +1,27 @@ +__all__ = [ + "NaT", + "NaTType", + "OutOfBoundsDatetime", + "Period", + "Timedelta", + "Timestamp", + "iNaT", + "Interval", +] + + +# Below imports needs to happen first to ensure pandas top level +# module gets monkeypatched with the pandas_datetime_CAPI +# see pandas_datetime_exec in pd_datetime.c +import pandas._libs.pandas_parser # noqa: E501 # isort: skip # type: ignore[reportUnusedImport] +import pandas._libs.pandas_datetime # noqa: F401,E501 # isort: skip # type: ignore[reportUnusedImport] +from pandas._libs.interval import Interval +from pandas._libs.tslibs import ( + NaT, + NaTType, + OutOfBoundsDatetime, + Period, + Timedelta, + Timestamp, + iNaT, +) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/algos.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/algos.cpython-39-darwin.so new file mode 100755 index 00000000..7e9e8f77 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/algos.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/algos.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/algos.pyi new file mode 100644 index 00000000..caf5425d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/algos.pyi @@ -0,0 +1,416 @@ +from typing import Any + +import numpy as np + +from pandas._typing import npt + +class Infinity: + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __lt__(self, other) -> bool: ... + def __le__(self, other) -> bool: ... + def __gt__(self, other) -> bool: ... + def __ge__(self, other) -> bool: ... + +class NegInfinity: + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __lt__(self, other) -> bool: ... + def __le__(self, other) -> bool: ... + def __gt__(self, other) -> bool: ... + def __ge__(self, other) -> bool: ... + +def unique_deltas( + arr: np.ndarray, # const int64_t[:] +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] +def is_lexsorted(list_of_arrays: list[npt.NDArray[np.int64]]) -> bool: ... +def groupsort_indexer( + index: np.ndarray, # const int64_t[:] + ngroups: int, +) -> tuple[ + np.ndarray, # ndarray[int64_t, ndim=1] + np.ndarray, # ndarray[int64_t, ndim=1] +]: ... +def kth_smallest( + arr: np.ndarray, # numeric[:] + k: int, +) -> Any: ... # numeric + +# ---------------------------------------------------------------------- +# Pairwise correlation/covariance + +def nancorr( + mat: npt.NDArray[np.float64], # const float64_t[:, :] + cov: bool = ..., + minp: int | None = ..., +) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2] +def nancorr_spearman( + mat: npt.NDArray[np.float64], # ndarray[float64_t, ndim=2] + minp: int = ..., +) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2] + +# ---------------------------------------------------------------------- + +def validate_limit(nobs: int | None, limit=...) -> int: ... +def get_fill_indexer( + mask: npt.NDArray[np.bool_], + limit: int | None = None, +) -> npt.NDArray[np.intp]: ... +def pad( + old: np.ndarray, # ndarray[numeric_object_t] + new: np.ndarray, # ndarray[numeric_object_t] + limit=..., +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] +def pad_inplace( + values: np.ndarray, # numeric_object_t[:] + mask: np.ndarray, # uint8_t[:] + limit=..., +) -> None: ... +def pad_2d_inplace( + values: np.ndarray, # numeric_object_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] + limit=..., +) -> None: ... +def backfill( + old: np.ndarray, # ndarray[numeric_object_t] + new: np.ndarray, # ndarray[numeric_object_t] + limit=..., +) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1] +def backfill_inplace( + values: np.ndarray, # numeric_object_t[:] + mask: np.ndarray, # uint8_t[:] + limit=..., +) -> None: ... +def backfill_2d_inplace( + values: np.ndarray, # numeric_object_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] + limit=..., +) -> None: ... +def is_monotonic( + arr: np.ndarray, # ndarray[numeric_object_t, ndim=1] + timelike: bool, +) -> tuple[bool, bool, bool]: ... + +# ---------------------------------------------------------------------- +# rank_1d, rank_2d +# ---------------------------------------------------------------------- + +def rank_1d( + values: np.ndarray, # ndarray[numeric_object_t, ndim=1] + labels: np.ndarray | None = ..., # const int64_t[:]=None + is_datetimelike: bool = ..., + ties_method=..., + ascending: bool = ..., + pct: bool = ..., + na_option=..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] +def rank_2d( + in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2] + axis: int = ..., + is_datetimelike: bool = ..., + ties_method=..., + ascending: bool = ..., + na_option=..., + pct: bool = ..., +) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] +def diff_2d( + arr: np.ndarray, # ndarray[diff_t, ndim=2] + out: np.ndarray, # ndarray[out_t, ndim=2] + periods: int, + axis: int, + datetimelike: bool = ..., +) -> None: ... +def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ... +def ensure_object(arr: object) -> npt.NDArray[np.object_]: ... +def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ... +def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ... +def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ... +def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ... +def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ... +def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ... +def take_1d_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int8( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int16( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_int64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float32( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float64_float64( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_object_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_bool( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_object( + values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int8_int8( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int8_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int16( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int16_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_int32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int32_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int64_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float32_float32( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float32_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_float64_float64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_object_object( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_bool_bool( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_bool_object( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... +def take_2d_multi_int64_int64( + values: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value=..., +) -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/arrays.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/arrays.cpython-39-darwin.so new file mode 100755 index 00000000..1e009705 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/arrays.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/arrays.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/arrays.pyi new file mode 100644 index 00000000..78fee8f0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/arrays.pyi @@ -0,0 +1,40 @@ +from typing import Sequence + +import numpy as np + +from pandas._typing import ( + AxisInt, + DtypeObj, + Self, + Shape, +) + +class NDArrayBacked: + _dtype: DtypeObj + _ndarray: np.ndarray + def __init__(self, values: np.ndarray, dtype: DtypeObj) -> None: ... + @classmethod + def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ... + def _from_backing_data(self, values: np.ndarray): ... + def __setstate__(self, state): ... + def __len__(self) -> int: ... + @property + def shape(self) -> Shape: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def nbytes(self) -> int: ... + def copy(self): ... + def delete(self, loc, axis=...): ... + def swapaxes(self, axis1, axis2): ... + def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ... + def reshape(self, *args, **kwargs): ... + def ravel(self, order=...): ... + @property + def T(self): ... + @classmethod + def _concat_same_type( + cls, to_concat: Sequence[Self], axis: AxisInt = ... + ) -> Self: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/byteswap.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/byteswap.cpython-39-darwin.so new file mode 100755 index 00000000..4e38152c Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/byteswap.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/byteswap.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/byteswap.pyi new file mode 100644 index 00000000..bb0dbfc6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/byteswap.pyi @@ -0,0 +1,5 @@ +def read_float_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ... +def read_double_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ... +def read_uint16_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... +def read_uint32_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... +def read_uint64_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/groupby.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/groupby.cpython-39-darwin.so new file mode 100755 index 00000000..9cd9961a Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/groupby.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/groupby.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/groupby.pyi new file mode 100644 index 00000000..d165ddd6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/groupby.pyi @@ -0,0 +1,203 @@ +from typing import Literal + +import numpy as np + +from pandas._typing import npt + +def group_median_float64( + out: np.ndarray, # ndarray[float64_t, ndim=2] + counts: npt.NDArray[np.int64], + values: np.ndarray, # ndarray[float64_t, ndim=2] + labels: npt.NDArray[np.int64], + min_count: int = ..., # Py_ssize_t + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cumprod( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # const float64_t[:, :] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cumsum( + out: np.ndarray, # int64float_t[:, ::1] + values: np.ndarray, # ndarray[int64float_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + skipna: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_shift_indexer( + out: np.ndarray, # int64_t[::1] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + periods: int, +) -> None: ... +def group_fillna_indexer( + out: np.ndarray, # ndarray[intp_t] + labels: np.ndarray, # ndarray[int64_t] + sorted_labels: npt.NDArray[np.intp], + mask: npt.NDArray[np.uint8], + direction: Literal["ffill", "bfill"], + limit: int, # int64_t + dropna: bool, +) -> None: ... +def group_any_all( + out: np.ndarray, # uint8_t[::1] + values: np.ndarray, # const uint8_t[::1] + labels: np.ndarray, # const int64_t[:] + mask: np.ndarray, # const uint8_t[::1] + val_test: Literal["any", "all"], + skipna: bool, + nullable: bool, +) -> None: ... +def group_sum( + out: np.ndarray, # complexfloatingintuint_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., + min_count: int = ..., + is_datetimelike: bool = ..., +) -> None: ... +def group_prod( + out: np.ndarray, # int64float_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[int64float_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., + min_count: int = ..., +) -> None: ... +def group_var( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., # Py_ssize_t + ddof: int = ..., # int64_t + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + is_datetimelike: bool = ..., + name: str = ..., +) -> None: ... +def group_skew( + out: np.ndarray, # float64_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[float64_T, ndim=2] + labels: np.ndarray, # const intp_t[::1] + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... +def group_mean( + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., # Py_ssize_t + is_datetimelike: bool = ..., # bint + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_ohlc( + out: np.ndarray, # floatingintuint_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floatingintuint_t, ndim=2] + labels: np.ndarray, # const intp_t[:] + min_count: int = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_quantile( + out: npt.NDArray[np.float64], + values: np.ndarray, # ndarray[numeric, ndim=1] + labels: npt.NDArray[np.intp], + mask: npt.NDArray[np.uint8], + qs: npt.NDArray[np.float64], # const + starts: npt.NDArray[np.int64], + ends: npt.NDArray[np.int64], + interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], + result_mask: np.ndarray | None, + is_datetimelike: bool, +) -> None: ... +def group_last( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., + min_count: int = ..., # Py_ssize_t + is_datetimelike: bool = ..., +) -> None: ... +def group_nth( + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None = ..., + min_count: int = ..., # int64_t + rank: int = ..., # int64_t + is_datetimelike: bool = ..., +) -> None: ... +def group_rank( + out: np.ndarray, # float64_t[:, ::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + ties_method: Literal["average", "min", "max", "first", "dense"] = ..., + ascending: bool = ..., + pct: bool = ..., + na_option: Literal["keep", "top", "bottom"] = ..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> None: ... +def group_max( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_min( + out: np.ndarray, # groupby_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., + is_datetimelike: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., +) -> None: ... +def group_cummin( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... +def group_cummax( + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + ngroups: int, + is_datetimelike: bool, + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., + skipna: bool = ..., +) -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashing.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashing.cpython-39-darwin.so new file mode 100755 index 00000000..59562936 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashing.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashing.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashing.pyi new file mode 100644 index 00000000..8361026e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashing.pyi @@ -0,0 +1,9 @@ +import numpy as np + +from pandas._typing import npt + +def hash_object_array( + arr: npt.NDArray[np.object_], + key: str, + encoding: str = ..., +) -> npt.NDArray[np.uint64]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashtable.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashtable.cpython-39-darwin.so new file mode 100755 index 00000000..e6abaa3f Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashtable.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashtable.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashtable.pyi new file mode 100644 index 00000000..2bc6d74f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/hashtable.pyi @@ -0,0 +1,251 @@ +from typing import ( + Any, + Hashable, + Literal, +) + +import numpy as np + +from pandas._typing import npt + +def unique_label_indices( + labels: np.ndarray, # const int64_t[:] +) -> np.ndarray: ... + +class Factorizer: + count: int + uniques: Any + def __init__(self, size_hint: int) -> None: ... + def get_count(self) -> int: ... + def factorize( + self, + values: np.ndarray, + sort: bool = ..., + na_sentinel=..., + na_value=..., + mask=..., + ) -> npt.NDArray[np.intp]: ... + +class ObjectFactorizer(Factorizer): + table: PyObjectHashTable + uniques: ObjectVector + +class Int64Factorizer(Factorizer): + table: Int64HashTable + uniques: Int64Vector + +class UInt64Factorizer(Factorizer): + table: UInt64HashTable + uniques: UInt64Vector + +class Int32Factorizer(Factorizer): + table: Int32HashTable + uniques: Int32Vector + +class UInt32Factorizer(Factorizer): + table: UInt32HashTable + uniques: UInt32Vector + +class Int16Factorizer(Factorizer): + table: Int16HashTable + uniques: Int16Vector + +class UInt16Factorizer(Factorizer): + table: UInt16HashTable + uniques: UInt16Vector + +class Int8Factorizer(Factorizer): + table: Int8HashTable + uniques: Int8Vector + +class UInt8Factorizer(Factorizer): + table: UInt8HashTable + uniques: UInt8Vector + +class Float64Factorizer(Factorizer): + table: Float64HashTable + uniques: Float64Vector + +class Float32Factorizer(Factorizer): + table: Float32HashTable + uniques: Float32Vector + +class Complex64Factorizer(Factorizer): + table: Complex64HashTable + uniques: Complex64Vector + +class Complex128Factorizer(Factorizer): + table: Complex128HashTable + uniques: Complex128Vector + +class Int64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int64]: ... + +class Int32Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int32]: ... + +class Int16Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int16]: ... + +class Int8Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.int8]: ... + +class UInt64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint64]: ... + +class UInt32Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint32]: ... + +class UInt16Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint16]: ... + +class UInt8Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.uint8]: ... + +class Float64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.float64]: ... + +class Float32Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.float32]: ... + +class Complex128Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.complex128]: ... + +class Complex64Vector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.complex64]: ... + +class StringVector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.object_]: ... + +class ObjectVector: + def __init__(self, *args) -> None: ... + def __len__(self) -> int: ... + def to_array(self) -> npt.NDArray[np.object_]: ... + +class HashTable: + # NB: The base HashTable class does _not_ actually have these methods; + # we are putting them here for the sake of mypy to avoid + # reproducing them in each subclass below. + def __init__(self, size_hint: int = ..., uses_mask: bool = ...) -> None: ... + def __len__(self) -> int: ... + def __contains__(self, key: Hashable) -> bool: ... + def sizeof(self, deep: bool = ...) -> int: ... + def get_state(self) -> dict[str, int]: ... + # TODO: `item` type is subclass-specific + def get_item(self, item): ... # TODO: return type? + def set_item(self, item, val) -> None: ... + def get_na(self): ... # TODO: return type? + def set_na(self, val) -> None: ... + def map_locations( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + mask: npt.NDArray[np.bool_] | None = ..., + ) -> None: ... + def lookup( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + mask: npt.NDArray[np.bool_] | None = ..., + ) -> npt.NDArray[np.intp]: ... + def get_labels( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + uniques, # SubclassTypeVector + count_prior: int = ..., + na_sentinel: int = ..., + na_value: object = ..., + mask=..., + ) -> npt.NDArray[np.intp]: ... + def unique( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + return_inverse: bool = ..., + ) -> ( + tuple[ + np.ndarray, # np.ndarray[subclass-specific] + npt.NDArray[np.intp], + ] + | np.ndarray + ): ... # np.ndarray[subclass-specific] + def factorize( + self, + values: np.ndarray, # np.ndarray[subclass-specific] + na_sentinel: int = ..., + na_value: object = ..., + mask=..., + ) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific] + +class Complex128HashTable(HashTable): ... +class Complex64HashTable(HashTable): ... +class Float64HashTable(HashTable): ... +class Float32HashTable(HashTable): ... + +class Int64HashTable(HashTable): + # Only Int64HashTable has get_labels_groupby, map_keys_to_values + def get_labels_groupby( + self, + values: npt.NDArray[np.int64], # const int64_t[:] + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ... + def map_keys_to_values( + self, + keys: npt.NDArray[np.int64], + values: npt.NDArray[np.int64], # const int64_t[:] + ) -> None: ... + +class Int32HashTable(HashTable): ... +class Int16HashTable(HashTable): ... +class Int8HashTable(HashTable): ... +class UInt64HashTable(HashTable): ... +class UInt32HashTable(HashTable): ... +class UInt16HashTable(HashTable): ... +class UInt8HashTable(HashTable): ... +class StringHashTable(HashTable): ... +class PyObjectHashTable(HashTable): ... +class IntpHashTable(HashTable): ... + +def duplicated( + values: np.ndarray, + keep: Literal["last", "first", False] = ..., + mask: npt.NDArray[np.bool_] | None = ..., +) -> npt.NDArray[np.bool_]: ... +def mode( + values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ... +) -> np.ndarray: ... +def value_count( + values: np.ndarray, + dropna: bool, + mask: npt.NDArray[np.bool_] | None = ..., +) -> tuple[np.ndarray, npt.NDArray[np.int64]]: ... # np.ndarray[same-as-values] + +# arr and values should have same dtype +def ismember( + arr: np.ndarray, + values: np.ndarray, +) -> npt.NDArray[np.bool_]: ... +def object_hash(obj) -> int: ... +def objects_are_equal(a, b) -> bool: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/index.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/index.cpython-39-darwin.so new file mode 100755 index 00000000..fa029c2b Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/index.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/index.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/index.pyi new file mode 100644 index 00000000..8321200a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/index.pyi @@ -0,0 +1,107 @@ +import numpy as np + +from pandas._typing import npt + +from pandas import MultiIndex +from pandas.core.arrays import ExtensionArray + +multiindex_nulls_shift: int + +class IndexEngine: + over_size_threshold: bool + def __init__(self, values: np.ndarray) -> None: ... + def __contains__(self, val: object) -> bool: ... + + # -> int | slice | np.ndarray[bool] + def get_loc(self, val: object) -> int | slice | np.ndarray: ... + def sizeof(self, deep: bool = ...) -> int: ... + def __sizeof__(self) -> int: ... + @property + def is_unique(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + @property + def is_monotonic_decreasing(self) -> bool: ... + @property + def is_mapping_populated(self) -> bool: ... + def clear_mapping(self): ... + def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, + targets: np.ndarray, + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + +class MaskedIndexEngine(IndexEngine): + def __init__(self, values: object) -> None: ... + def get_indexer_non_unique( + self, targets: object + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + +class Float64Engine(IndexEngine): ... +class Float32Engine(IndexEngine): ... +class Complex128Engine(IndexEngine): ... +class Complex64Engine(IndexEngine): ... +class Int64Engine(IndexEngine): ... +class Int32Engine(IndexEngine): ... +class Int16Engine(IndexEngine): ... +class Int8Engine(IndexEngine): ... +class UInt64Engine(IndexEngine): ... +class UInt32Engine(IndexEngine): ... +class UInt16Engine(IndexEngine): ... +class UInt8Engine(IndexEngine): ... +class ObjectEngine(IndexEngine): ... +class DatetimeEngine(Int64Engine): ... +class TimedeltaEngine(DatetimeEngine): ... +class PeriodEngine(Int64Engine): ... +class BoolEngine(UInt8Engine): ... +class MaskedFloat64Engine(MaskedIndexEngine): ... +class MaskedFloat32Engine(MaskedIndexEngine): ... +class MaskedComplex128Engine(MaskedIndexEngine): ... +class MaskedComplex64Engine(MaskedIndexEngine): ... +class MaskedInt64Engine(MaskedIndexEngine): ... +class MaskedInt32Engine(MaskedIndexEngine): ... +class MaskedInt16Engine(MaskedIndexEngine): ... +class MaskedInt8Engine(MaskedIndexEngine): ... +class MaskedUInt64Engine(MaskedIndexEngine): ... +class MaskedUInt32Engine(MaskedIndexEngine): ... +class MaskedUInt16Engine(MaskedIndexEngine): ... +class MaskedUInt8Engine(MaskedIndexEngine): ... +class MaskedBoolEngine(MaskedUInt8Engine): ... + +class BaseMultiIndexCodesEngine: + levels: list[np.ndarray] + offsets: np.ndarray # ndarray[uint64_t, ndim=1] + + def __init__( + self, + levels: list[np.ndarray], # all entries hashable + labels: list[np.ndarray], # all entries integer-dtyped + offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1] + ) -> None: ... + def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ... + def _extract_level_codes(self, target: MultiIndex) -> np.ndarray: ... + def get_indexer_with_fill( + self, + target: np.ndarray, # np.ndarray[object] of tuples + values: np.ndarray, # np.ndarray[object] of tuples + method: str, + limit: int | None, + ) -> npt.NDArray[np.intp]: ... + +class ExtensionEngine: + def __init__(self, values: ExtensionArray) -> None: ... + def __contains__(self, val: object) -> bool: ... + def get_loc(self, val: object) -> int | slice | np.ndarray: ... + def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, + targets: np.ndarray, + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + @property + def is_unique(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + @property + def is_monotonic_decreasing(self) -> bool: ... + def sizeof(self, deep: bool = ...) -> int: ... + def clear_mapping(self): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/indexing.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/indexing.cpython-39-darwin.so new file mode 100755 index 00000000..808c9c8d Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/indexing.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/indexing.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/indexing.pyi new file mode 100644 index 00000000..3ae5c504 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/indexing.pyi @@ -0,0 +1,17 @@ +from typing import ( + Generic, + TypeVar, +) + +from pandas.core.indexing import IndexingMixin + +_IndexingMixinT = TypeVar("_IndexingMixinT", bound=IndexingMixin) + +class NDFrameIndexerBase(Generic[_IndexingMixinT]): + name: str + # in practice obj is either a DataFrame or a Series + obj: _IndexingMixinT + + def __init__(self, name: str, obj: _IndexingMixinT) -> None: ... + @property + def ndim(self) -> int: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/internals.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/internals.cpython-39-darwin.so new file mode 100755 index 00000000..a7fafc4e Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/internals.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/internals.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/internals.pyi new file mode 100644 index 00000000..ce112413 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/internals.pyi @@ -0,0 +1,106 @@ +from typing import ( + Iterator, + Sequence, + final, + overload, +) +import weakref + +import numpy as np + +from pandas._typing import ( + ArrayLike, + Self, + npt, +) + +from pandas import Index +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.internals.blocks import Block as B + +def slice_len(slc: slice, objlen: int = ...) -> int: ... +def get_concat_blkno_indexers( + blknos_list: list[npt.NDArray[np.intp]], +) -> list[tuple[npt.NDArray[np.intp], BlockPlacement]]: ... +def get_blkno_indexers( + blknos: np.ndarray, # int64_t[:] + group: bool = ..., +) -> list[tuple[int, slice | np.ndarray]]: ... +def get_blkno_placements( + blknos: np.ndarray, + group: bool = ..., +) -> Iterator[tuple[int, BlockPlacement]]: ... +def update_blklocs_and_blknos( + blklocs: npt.NDArray[np.intp], + blknos: npt.NDArray[np.intp], + loc: int, + nblocks: int, +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +@final +class BlockPlacement: + def __init__(self, val: int | slice | np.ndarray) -> None: ... + @property + def indexer(self) -> np.ndarray | slice: ... + @property + def as_array(self) -> np.ndarray: ... + @property + def as_slice(self) -> slice: ... + @property + def is_slice_like(self) -> bool: ... + @overload + def __getitem__( + self, loc: slice | Sequence[int] | npt.NDArray[np.intp] + ) -> BlockPlacement: ... + @overload + def __getitem__(self, loc: int) -> int: ... + def __iter__(self) -> Iterator[int]: ... + def __len__(self) -> int: ... + def delete(self, loc) -> BlockPlacement: ... + def add(self, other) -> BlockPlacement: ... + def append(self, others: list[BlockPlacement]) -> BlockPlacement: ... + def tile_for_unstack(self, factor: int) -> npt.NDArray[np.intp]: ... + +class SharedBlock: + _mgr_locs: BlockPlacement + ndim: int + values: ArrayLike + refs: BlockValuesRefs + def __init__( + self, + values: ArrayLike, + placement: BlockPlacement, + ndim: int, + refs: BlockValuesRefs | None = ..., + ) -> None: ... + +class NumpyBlock(SharedBlock): + values: np.ndarray + @final + def slice_block_rows(self, slicer: slice) -> Self: ... + +class NDArrayBackedBlock(SharedBlock): + values: NDArrayBackedExtensionArray + @final + def slice_block_rows(self, slicer: slice) -> Self: ... + +class Block(SharedBlock): ... + +class BlockManager: + blocks: tuple[B, ...] + axes: list[Index] + _known_consolidated: bool + _is_consolidated: bool + _blknos: np.ndarray + _blklocs: np.ndarray + def __init__( + self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=... + ) -> None: ... + def get_slice(self, slobj: slice, axis: int = ...) -> Self: ... + def _rebuild_blknos_and_blklocs(self) -> None: ... + +class BlockValuesRefs: + referenced_blocks: list[weakref.ref] + def __init__(self, blk: SharedBlock | None = ...) -> None: ... + def add_reference(self, blk: SharedBlock) -> None: ... + def add_index_reference(self, index: Index) -> None: ... + def has_reference(self) -> bool: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/interval.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/interval.cpython-39-darwin.so new file mode 100755 index 00000000..e9d784d4 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/interval.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/interval.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/interval.pyi new file mode 100644 index 00000000..587fdf84 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/interval.pyi @@ -0,0 +1,174 @@ +from typing import ( + Any, + Generic, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt + +from pandas._typing import ( + IntervalClosedType, + Timedelta, + Timestamp, +) + +VALID_CLOSED: frozenset[str] + +_OrderableScalarT = TypeVar("_OrderableScalarT", int, float) +_OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta) +_OrderableT = TypeVar("_OrderableT", int, float, Timestamp, Timedelta) + +class _LengthDescriptor: + @overload + def __get__( + self, instance: Interval[_OrderableScalarT], owner: Any + ) -> _OrderableScalarT: ... + @overload + def __get__( + self, instance: Interval[_OrderableTimesT], owner: Any + ) -> Timedelta: ... + +class _MidDescriptor: + @overload + def __get__(self, instance: Interval[_OrderableScalarT], owner: Any) -> float: ... + @overload + def __get__( + self, instance: Interval[_OrderableTimesT], owner: Any + ) -> _OrderableTimesT: ... + +class IntervalMixin: + @property + def closed_left(self) -> bool: ... + @property + def closed_right(self) -> bool: ... + @property + def open_left(self) -> bool: ... + @property + def open_right(self) -> bool: ... + @property + def is_empty(self) -> bool: ... + def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ... + +class Interval(IntervalMixin, Generic[_OrderableT]): + @property + def left(self: Interval[_OrderableT]) -> _OrderableT: ... + @property + def right(self: Interval[_OrderableT]) -> _OrderableT: ... + @property + def closed(self) -> IntervalClosedType: ... + mid: _MidDescriptor + length: _LengthDescriptor + def __init__( + self, + left: _OrderableT, + right: _OrderableT, + closed: IntervalClosedType = ..., + ) -> None: ... + def __hash__(self) -> int: ... + @overload + def __contains__( + self: Interval[Timedelta], key: Timedelta | Interval[Timedelta] + ) -> bool: ... + @overload + def __contains__( + self: Interval[Timestamp], key: Timestamp | Interval[Timestamp] + ) -> bool: ... + @overload + def __contains__( + self: Interval[_OrderableScalarT], + key: _OrderableScalarT | Interval[_OrderableScalarT], + ) -> bool: ... + @overload + def __add__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __add__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __add__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __radd__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __radd__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __radd__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __sub__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __sub__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __sub__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __rsub__( + self: Interval[_OrderableTimesT], y: Timedelta + ) -> Interval[_OrderableTimesT]: ... + @overload + def __rsub__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __rsub__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __mul__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __mul__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __rmul__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __rmul__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __truediv__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __truediv__(self: Interval[float], y: float) -> Interval[float]: ... + @overload + def __floordiv__( + self: Interval[int], y: _OrderableScalarT + ) -> Interval[_OrderableScalarT]: ... + @overload + def __floordiv__(self: Interval[float], y: float) -> Interval[float]: ... + def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ... + +def intervals_to_interval_bounds( + intervals: np.ndarray, validate_closed: bool = ... +) -> tuple[np.ndarray, np.ndarray, IntervalClosedType]: ... + +class IntervalTree(IntervalMixin): + def __init__( + self, + left: np.ndarray, + right: np.ndarray, + closed: IntervalClosedType = ..., + leaf_size: int = ..., + ) -> None: ... + @property + def mid(self) -> np.ndarray: ... + @property + def length(self) -> np.ndarray: ... + def get_indexer(self, target) -> npt.NDArray[np.intp]: ... + def get_indexer_non_unique( + self, target + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... + _na_count: int + @property + def is_overlapping(self) -> bool: ... + @property + def is_monotonic_increasing(self) -> bool: ... + def clear_mapping(self) -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/join.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/join.cpython-39-darwin.so new file mode 100755 index 00000000..dceb7d35 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/join.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/join.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/join.pyi new file mode 100644 index 00000000..7ee649a5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/join.pyi @@ -0,0 +1,78 @@ +import numpy as np + +from pandas._typing import npt + +def inner_join( + left: np.ndarray, # const intp_t[:] + right: np.ndarray, # const intp_t[:] + max_groups: int, +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def left_outer_join( + left: np.ndarray, # const intp_t[:] + right: np.ndarray, # const intp_t[:] + max_groups: int, + sort: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def full_outer_join( + left: np.ndarray, # const intp_t[:] + right: np.ndarray, # const intp_t[:] + max_groups: int, +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def ffill_indexer( + indexer: np.ndarray, # const intp_t[:] +) -> npt.NDArray[np.intp]: ... +def left_join_indexer_unique( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> npt.NDArray[np.intp]: ... +def left_join_indexer( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> tuple[ + np.ndarray, # np.ndarray[join_t] + npt.NDArray[np.intp], + npt.NDArray[np.intp], +]: ... +def inner_join_indexer( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> tuple[ + np.ndarray, # np.ndarray[join_t] + npt.NDArray[np.intp], + npt.NDArray[np.intp], +]: ... +def outer_join_indexer( + left: np.ndarray, # ndarray[join_t] + right: np.ndarray, # ndarray[join_t] +) -> tuple[ + np.ndarray, # np.ndarray[join_t] + npt.NDArray[np.intp], + npt.NDArray[np.intp], +]: ... +def asof_join_backward_on_X_by_Y( + left_values: np.ndarray, # ndarray[numeric_t] + right_values: np.ndarray, # ndarray[numeric_t] + left_by_values: np.ndarray, # ndarray[by_t] + right_by_values: np.ndarray, # ndarray[by_t] + allow_exact_matches: bool = ..., + tolerance: np.number | float | None = ..., + use_hashtable: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def asof_join_forward_on_X_by_Y( + left_values: np.ndarray, # ndarray[numeric_t] + right_values: np.ndarray, # ndarray[numeric_t] + left_by_values: np.ndarray, # ndarray[by_t] + right_by_values: np.ndarray, # ndarray[by_t] + allow_exact_matches: bool = ..., + tolerance: np.number | float | None = ..., + use_hashtable: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... +def asof_join_nearest_on_X_by_Y( + left_values: np.ndarray, # ndarray[numeric_t] + right_values: np.ndarray, # ndarray[numeric_t] + left_by_values: np.ndarray, # ndarray[by_t] + right_by_values: np.ndarray, # ndarray[by_t] + allow_exact_matches: bool = ..., + tolerance: np.number | float | None = ..., + use_hashtable: bool = ..., +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/json.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/json.cpython-39-darwin.so new file mode 100755 index 00000000..6a3f4f77 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/json.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/json.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/json.pyi new file mode 100644 index 00000000..bc4fe685 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/json.pyi @@ -0,0 +1,23 @@ +from typing import ( + Any, + Callable, +) + +def ujson_dumps( + obj: Any, + ensure_ascii: bool = ..., + double_precision: int = ..., + indent: int = ..., + orient: str = ..., + date_unit: str = ..., + iso_dates: bool = ..., + default_handler: None + | Callable[[Any], str | float | bool | list | dict | None] = ..., +) -> str: ... +def ujson_loads( + s: str, + precise_float: bool = ..., + numpy: bool = ..., + dtype: None = ..., + labelled: bool = ..., +) -> Any: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/lib.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/lib.cpython-39-darwin.so new file mode 100755 index 00000000..0de3d6bb Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/lib.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/lib.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/lib.pyi new file mode 100644 index 00000000..15bd5a73 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/lib.pyi @@ -0,0 +1,207 @@ +# TODO(npdtypes): Many types specified here can be made more specific/accurate; +# the more specific versions are specified in comments +from decimal import Decimal +from typing import ( + Any, + Callable, + Final, + Generator, + Hashable, + Literal, + TypeAlias, + overload, +) + +import numpy as np + +from pandas._libs.interval import Interval +from pandas._libs.tslibs import Period +from pandas._typing import ( + ArrayLike, + DtypeObj, + TypeGuard, + npt, +) + +# placeholder until we can specify np.ndarray[object, ndim=2] +ndarray_obj_2d = np.ndarray + +from enum import Enum + +class _NoDefault(Enum): + no_default = ... + +no_default: Final = _NoDefault.no_default +NoDefault: TypeAlias = Literal[_NoDefault.no_default] + +i8max: int +u8max: int + +def is_np_dtype(dtype: object, kinds: str | None = ...) -> TypeGuard[np.dtype]: ... +def item_from_zerodim(val: object) -> object: ... +def infer_dtype(value: object, skipna: bool = ...) -> str: ... +def is_iterator(obj: object) -> bool: ... +def is_scalar(val: object) -> bool: ... +def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ... +def is_pyarrow_array(obj: object) -> bool: ... +def is_period(val: object) -> TypeGuard[Period]: ... +def is_interval(val: object) -> TypeGuard[Interval]: ... +def is_decimal(val: object) -> TypeGuard[Decimal]: ... +def is_complex(val: object) -> TypeGuard[complex]: ... +def is_bool(val: object) -> TypeGuard[bool | np.bool_]: ... +def is_integer(val: object) -> TypeGuard[int | np.integer]: ... +def is_int_or_none(obj) -> bool: ... +def is_float(val: object) -> TypeGuard[float]: ... +def is_interval_array(values: np.ndarray) -> bool: ... +def is_datetime64_array(values: np.ndarray) -> bool: ... +def is_timedelta_or_timedelta64_array(values: np.ndarray) -> bool: ... +def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ... +def is_time_array(values: np.ndarray, skipna: bool = ...): ... +def is_date_array(values: np.ndarray, skipna: bool = ...): ... +def is_datetime_array(values: np.ndarray, skipna: bool = ...): ... +def is_string_array(values: np.ndarray, skipna: bool = ...): ... +def is_float_array(values: np.ndarray, skipna: bool = ...): ... +def is_integer_array(values: np.ndarray, skipna: bool = ...): ... +def is_bool_array(values: np.ndarray, skipna: bool = ...): ... +def fast_multiget(mapping: dict, keys: np.ndarray, default=...) -> np.ndarray: ... +def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ... +def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ... +def map_infer( + arr: np.ndarray, + f: Callable[[Any], Any], + convert: bool = ..., + ignore_na: bool = ..., +) -> np.ndarray: ... +@overload +def maybe_convert_objects( + objects: npt.NDArray[np.object_], + *, + try_float: bool = ..., + safe: bool = ..., + convert_numeric: bool = ..., + convert_non_numeric: Literal[False] = ..., + convert_to_nullable_dtype: Literal[False] = ..., + dtype_if_all_nat: DtypeObj | None = ..., +) -> npt.NDArray[np.object_ | np.number]: ... +@overload +def maybe_convert_objects( + objects: npt.NDArray[np.object_], + *, + try_float: bool = ..., + safe: bool = ..., + convert_numeric: bool = ..., + convert_non_numeric: bool = ..., + convert_to_nullable_dtype: Literal[True] = ..., + dtype_if_all_nat: DtypeObj | None = ..., +) -> ArrayLike: ... +@overload +def maybe_convert_objects( + objects: npt.NDArray[np.object_], + *, + try_float: bool = ..., + safe: bool = ..., + convert_numeric: bool = ..., + convert_non_numeric: bool = ..., + convert_to_nullable_dtype: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., +) -> ArrayLike: ... +@overload +def maybe_convert_numeric( + values: npt.NDArray[np.object_], + na_values: set, + convert_empty: bool = ..., + coerce_numeric: bool = ..., + convert_to_masked_nullable: Literal[False] = ..., +) -> tuple[np.ndarray, None]: ... +@overload +def maybe_convert_numeric( + values: npt.NDArray[np.object_], + na_values: set, + convert_empty: bool = ..., + coerce_numeric: bool = ..., + *, + convert_to_masked_nullable: Literal[True], +) -> tuple[np.ndarray, np.ndarray]: ... + +# TODO: restrict `arr`? +def ensure_string_array( + arr, + na_value: object = ..., + convert_na_value: bool = ..., + copy: bool = ..., + skipna: bool = ..., +) -> npt.NDArray[np.object_]: ... +def convert_nans_to_NA( + arr: npt.NDArray[np.object_], +) -> npt.NDArray[np.object_]: ... +def fast_zip(ndarrays: list) -> npt.NDArray[np.object_]: ... + +# TODO: can we be more specific about rows? +def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ... +def tuples_to_object_array( + tuples: npt.NDArray[np.object_], +) -> ndarray_obj_2d: ... + +# TODO: can we be more specific about rows? +def to_object_array(rows: object, min_width: int = ...) -> ndarray_obj_2d: ... +def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ... +def maybe_booleans_to_slice( + mask: npt.NDArray[np.uint8], +) -> slice | npt.NDArray[np.uint8]: ... +def maybe_indices_to_slice( + indices: npt.NDArray[np.intp], + max_len: int, +) -> slice | npt.NDArray[np.intp]: ... +def is_all_arraylike(obj: list) -> bool: ... + +# ----------------------------------------------------------------- +# Functions which in reality take memoryviews + +def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64 +def map_infer_mask( + arr: np.ndarray, + f: Callable[[Any], Any], + mask: np.ndarray, # const uint8_t[:] + convert: bool = ..., + na_value: Any = ..., + dtype: np.dtype = ..., +) -> np.ndarray: ... +def indices_fast( + index: npt.NDArray[np.intp], + labels: np.ndarray, # const int64_t[:] + keys: list, + sorted_labels: list[npt.NDArray[np.int64]], +) -> dict[Hashable, npt.NDArray[np.intp]]: ... +def generate_slices( + labels: np.ndarray, ngroups: int # const intp_t[:] +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ... +def count_level_2d( + mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True], + labels: np.ndarray, # const intp_t[:] + max_bin: int, +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2] +def get_level_sorter( + label: np.ndarray, # const int64_t[:] + starts: np.ndarray, # const intp_t[:] +) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1] +def generate_bins_dt64( + values: npt.NDArray[np.int64], + binner: np.ndarray, # const int64_t[:] + closed: object = ..., + hasnans: bool = ..., +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] +def array_equivalent_object( + left: npt.NDArray[np.object_], + right: npt.NDArray[np.object_], +) -> bool: ... +def has_infs(arr: np.ndarray) -> bool: ... # const floating[:] +def has_only_ints_or_nan(arr: np.ndarray) -> bool: ... # const floating[:] +def get_reverse_indexer( + indexer: np.ndarray, # const intp_t[:] + length: int, +) -> npt.NDArray[np.intp]: ... +def is_bool_list(obj: list) -> bool: ... +def dtypes_all_equal(types: list[DtypeObj]) -> bool: ... +def is_range_indexer( + left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1] +) -> bool: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/missing.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/missing.cpython-39-darwin.so new file mode 100755 index 00000000..1dac14ef Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/missing.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/missing.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/missing.pyi new file mode 100644 index 00000000..d5c9f134 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/missing.pyi @@ -0,0 +1,17 @@ +import numpy as np +from numpy import typing as npt + +class NAType: + def __new__(cls, *args, **kwargs): ... + +NA: NAType + +def is_matching_na( + left: object, right: object, nan_matches_none: bool = ... +) -> bool: ... +def isposinf_scalar(val: object) -> bool: ... +def isneginf_scalar(val: object) -> bool: ... +def checknull(val: object, inf_as_na: bool = ...) -> bool: ... +def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ... +def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ... +def is_float_nan(values: np.ndarray) -> npt.NDArray[np.bool_]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops.cpython-39-darwin.so new file mode 100755 index 00000000..d0576e32 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops.pyi new file mode 100644 index 00000000..515f7aa5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops.pyi @@ -0,0 +1,51 @@ +from typing import ( + Any, + Callable, + Iterable, + Literal, + TypeAlias, + overload, +) + +import numpy as np + +from pandas._typing import npt + +_BinOp: TypeAlias = Callable[[Any, Any], Any] +_BoolOp: TypeAlias = Callable[[Any, Any], bool] + +def scalar_compare( + values: np.ndarray, # object[:] + val: object, + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> npt.NDArray[np.bool_]: ... +def vec_compare( + left: npt.NDArray[np.object_], + right: npt.NDArray[np.object_], + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> npt.NDArray[np.bool_]: ... +def scalar_binop( + values: np.ndarray, # object[:] + val: object, + op: _BinOp, # binary operator +) -> np.ndarray: ... +def vec_binop( + left: np.ndarray, # object[:] + right: np.ndarray, # object[:] + op: _BinOp, # binary operator +) -> np.ndarray: ... +@overload +def maybe_convert_bool( + arr: npt.NDArray[np.object_], + true_values: Iterable = ..., + false_values: Iterable = ..., + convert_to_masked_nullable: Literal[False] = ..., +) -> tuple[np.ndarray, None]: ... +@overload +def maybe_convert_bool( + arr: npt.NDArray[np.object_], + true_values: Iterable = ..., + false_values: Iterable = ..., + *, + convert_to_masked_nullable: Literal[True], +) -> tuple[np.ndarray, np.ndarray]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops_dispatch.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops_dispatch.cpython-39-darwin.so new file mode 100755 index 00000000..44847805 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops_dispatch.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops_dispatch.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops_dispatch.pyi new file mode 100644 index 00000000..91b5a4db --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/ops_dispatch.pyi @@ -0,0 +1,5 @@ +import numpy as np + +def maybe_dispatch_ufunc_to_dunder_op( + self, ufunc: np.ufunc, method: str, *inputs, **kwargs +): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/pandas_datetime.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/pandas_datetime.cpython-39-darwin.so new file mode 100755 index 00000000..6478797c Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/pandas_datetime.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/pandas_parser.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/pandas_parser.cpython-39-darwin.so new file mode 100755 index 00000000..35b0f117 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/pandas_parser.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/parsers.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/parsers.cpython-39-darwin.so new file mode 100755 index 00000000..d8f2581e Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/parsers.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/parsers.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/parsers.pyi new file mode 100644 index 00000000..253bb730 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/parsers.pyi @@ -0,0 +1,77 @@ +from typing import ( + Hashable, + Literal, +) + +import numpy as np + +from pandas._typing import ( + ArrayLike, + Dtype, + npt, +) + +STR_NA_VALUES: set[str] +DEFAULT_BUFFER_HEURISTIC: int + +def sanitize_objects( + values: npt.NDArray[np.object_], + na_values: set, +) -> int: ... + +class TextReader: + unnamed_cols: set[str] + table_width: int # int64_t + leading_cols: int # int64_t + header: list[list[int]] # non-negative integers + def __init__( + self, + source, + delimiter: bytes | str = ..., # single-character only + header=..., + header_start: int = ..., # int64_t + header_end: int = ..., # uint64_t + index_col=..., + names=..., + tokenize_chunksize: int = ..., # int64_t + delim_whitespace: bool = ..., + converters=..., + skipinitialspace: bool = ..., + escapechar: bytes | str | None = ..., # single-character only + doublequote: bool = ..., + quotechar: str | bytes | None = ..., # at most 1 character + quoting: int = ..., + lineterminator: bytes | str | None = ..., # at most 1 character + comment=..., + decimal: bytes | str = ..., # single-character only + thousands: bytes | str | None = ..., # single-character only + dtype: Dtype | dict[Hashable, Dtype] = ..., + usecols=..., + error_bad_lines: bool = ..., + warn_bad_lines: bool = ..., + na_filter: bool = ..., + na_values=..., + na_fvalues=..., + keep_default_na: bool = ..., + true_values=..., + false_values=..., + allow_leading_cols: bool = ..., + skiprows=..., + skipfooter: int = ..., # int64_t + verbose: bool = ..., + float_precision: Literal["round_trip", "legacy", "high"] | None = ..., + skip_blank_lines: bool = ..., + encoding_errors: bytes | str = ..., + ) -> None: ... + def set_noconvert(self, i: int) -> None: ... + def remove_noconvert(self, i: int) -> None: ... + def close(self) -> None: ... + def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ... + def read_low_memory(self, rows: int | None) -> list[dict[int, ArrayLike]]: ... + +# _maybe_upcast, na_values are only exposed for testing +na_values: dict + +def _maybe_upcast( + arr, use_dtype_backend: bool = ..., dtype_backend: str = ... +) -> np.ndarray: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/properties.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/properties.cpython-39-darwin.so new file mode 100755 index 00000000..16030b44 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/properties.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/properties.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/properties.pyi new file mode 100644 index 00000000..aaa44a0c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/properties.pyi @@ -0,0 +1,27 @@ +from typing import ( + Sequence, + overload, +) + +from pandas._typing import ( + AnyArrayLike, + DataFrame, + Index, + Series, +) + +# note: this is a lie to make type checkers happy (they special +# case property). cache_readonly uses attribute names similar to +# property (fget) but it does not provide fset and fdel. +cache_readonly = property + +class AxisProperty: + axis: int + def __init__(self, axis: int = ..., doc: str = ...) -> None: ... + @overload + def __get__(self, obj: DataFrame | Series, type) -> Index: ... + @overload + def __get__(self, obj: None, type) -> AxisProperty: ... + def __set__( + self, obj: DataFrame | Series, value: AnyArrayLike | Sequence + ) -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/reshape.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/reshape.cpython-39-darwin.so new file mode 100755 index 00000000..9097257f Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/reshape.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/reshape.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/reshape.pyi new file mode 100644 index 00000000..110687fc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/reshape.pyi @@ -0,0 +1,16 @@ +import numpy as np + +from pandas._typing import npt + +def unstack( + values: np.ndarray, # reshape_t[:, :] + mask: np.ndarray, # const uint8_t[:] + stride: int, + length: int, + width: int, + new_values: np.ndarray, # reshape_t[:, :] + new_mask: np.ndarray, # uint8_t[:, :] +) -> None: ... +def explode( + values: npt.NDArray[np.object_], +) -> tuple[npt.NDArray[np.object_], npt.NDArray[np.int64]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sas.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sas.cpython-39-darwin.so new file mode 100755 index 00000000..299792cd Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sas.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sas.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sas.pyi new file mode 100644 index 00000000..5d65e2b5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sas.pyi @@ -0,0 +1,7 @@ +from pandas.io.sas.sas7bdat import SAS7BDATReader + +class Parser: + def __init__(self, parser: SAS7BDATReader) -> None: ... + def read(self, nrows: int) -> None: ... + +def get_subheader_index(signature: bytes) -> int: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sparse.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sparse.cpython-39-darwin.so new file mode 100755 index 00000000..05e1fd16 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sparse.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sparse.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sparse.pyi new file mode 100644 index 00000000..9e5cecc6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/sparse.pyi @@ -0,0 +1,47 @@ +from typing import Sequence + +import numpy as np + +from pandas._typing import ( + Self, + npt, +) + +class SparseIndex: + length: int + npoints: int + def __init__(self) -> None: ... + @property + def ngaps(self) -> int: ... + @property + def nbytes(self) -> int: ... + @property + def indices(self) -> npt.NDArray[np.int32]: ... + def equals(self, other) -> bool: ... + def lookup(self, index: int) -> np.int32: ... + def lookup_array(self, indexer: npt.NDArray[np.int32]) -> npt.NDArray[np.int32]: ... + def to_int_index(self) -> IntIndex: ... + def to_block_index(self) -> BlockIndex: ... + def intersect(self, y_: SparseIndex) -> Self: ... + def make_union(self, y_: SparseIndex) -> Self: ... + +class IntIndex(SparseIndex): + indices: npt.NDArray[np.int32] + def __init__( + self, length: int, indices: Sequence[int], check_integrity: bool = ... + ) -> None: ... + +class BlockIndex(SparseIndex): + nblocks: int + blocs: np.ndarray + blengths: np.ndarray + def __init__( + self, length: int, blocs: np.ndarray, blengths: np.ndarray + ) -> None: ... + +def make_mask_object_ndarray( + arr: npt.NDArray[np.object_], fill_value +) -> npt.NDArray[np.bool_]: ... +def get_blocks( + indices: npt.NDArray[np.int32], +) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/testing.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/testing.cpython-39-darwin.so new file mode 100755 index 00000000..50c9d932 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/testing.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/testing.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/testing.pyi new file mode 100644 index 00000000..01da4969 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/testing.pyi @@ -0,0 +1,12 @@ +def assert_dict_equal(a, b, compare_keys: bool = ...): ... +def assert_almost_equal( + a, + b, + rtol: float = ..., + atol: float = ..., + check_dtype: bool = ..., + obj=..., + lobj=..., + robj=..., + index_values=..., +): ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslib.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslib.cpython-39-darwin.so new file mode 100755 index 00000000..5bcd1212 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslib.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslib.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslib.pyi new file mode 100644 index 00000000..9819b517 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslib.pyi @@ -0,0 +1,32 @@ +from datetime import tzinfo + +import numpy as np + +from pandas._typing import npt + +def format_array_from_datetime( + values: npt.NDArray[np.int64], + tz: tzinfo | None = ..., + format: str | None = ..., + na_rep: str | float = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.object_]: ... +def array_with_unit_to_datetime( + values: npt.NDArray[np.object_], + unit: str, + errors: str = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... +def first_non_null(values: np.ndarray) -> int: ... +def array_to_datetime( + values: npt.NDArray[np.object_], + errors: str = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: bool = ..., +) -> tuple[np.ndarray, tzinfo | None]: ... + +# returned ndarray may be object dtype or datetime64[ns] + +def array_to_datetime_with_tz( + values: npt.NDArray[np.object_], tz: tzinfo +) -> npt.NDArray[np.int64]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/__init__.py new file mode 100644 index 00000000..2cabbe3f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/__init__.py @@ -0,0 +1,85 @@ +__all__ = [ + "dtypes", + "localize_pydatetime", + "NaT", + "NaTType", + "iNaT", + "nat_strings", + "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", + "IncompatibleFrequency", + "Period", + "Resolution", + "Timedelta", + "normalize_i8_timestamps", + "is_date_array_normalized", + "dt64arr_to_periodarr", + "delta_to_nanoseconds", + "ints_to_pydatetime", + "ints_to_pytimedelta", + "get_resolution", + "Timestamp", + "tz_convert_from_utc_single", + "tz_convert_from_utc", + "to_offset", + "Tick", + "BaseOffset", + "tz_compare", + "is_unitless", + "astype_overflowsafe", + "get_unit_from_dtype", + "periods_per_day", + "periods_per_second", + "is_supported_unit", + "npy_unit_to_abbrev", + "get_supported_reso", +] + +from pandas._libs.tslibs import dtypes # pylint: disable=import-self +from pandas._libs.tslibs.conversion import localize_pydatetime +from pandas._libs.tslibs.dtypes import ( + Resolution, + get_supported_reso, + is_supported_unit, + npy_unit_to_abbrev, + periods_per_day, + periods_per_second, +) +from pandas._libs.tslibs.nattype import ( + NaT, + NaTType, + iNaT, + nat_strings, +) +from pandas._libs.tslibs.np_datetime import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + astype_overflowsafe, + is_unitless, + py_get_unit_from_dtype as get_unit_from_dtype, +) +from pandas._libs.tslibs.offsets import ( + BaseOffset, + Tick, + to_offset, +) +from pandas._libs.tslibs.period import ( + IncompatibleFrequency, + Period, +) +from pandas._libs.tslibs.timedeltas import ( + Timedelta, + delta_to_nanoseconds, + ints_to_pytimedelta, +) +from pandas._libs.tslibs.timestamps import Timestamp +from pandas._libs.tslibs.timezones import tz_compare +from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single +from pandas._libs.tslibs.vectorized import ( + dt64arr_to_periodarr, + get_resolution, + ints_to_pydatetime, + is_date_array_normalized, + normalize_i8_timestamps, + tz_convert_from_utc, +) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/base.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/base.cpython-39-darwin.so new file mode 100755 index 00000000..45257f4e Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/base.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/ccalendar.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/ccalendar.cpython-39-darwin.so new file mode 100755 index 00000000..2e57fc5e Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/ccalendar.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/ccalendar.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/ccalendar.pyi new file mode 100644 index 00000000..993f18a6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/ccalendar.pyi @@ -0,0 +1,12 @@ +DAYS: list[str] +MONTH_ALIASES: dict[int, str] +MONTH_NUMBERS: dict[str, int] +MONTHS: list[str] +int_to_weekday: dict[int, str] + +def get_firstbday(year: int, month: int) -> int: ... +def get_lastbday(year: int, month: int) -> int: ... +def get_day_of_year(year: int, month: int, day: int) -> int: ... +def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ... +def get_week_of_year(year: int, month: int, day: int) -> int: ... +def get_days_in_month(year: int, month: int) -> int: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/conversion.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/conversion.cpython-39-darwin.so new file mode 100755 index 00000000..80a8e035 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/conversion.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/conversion.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/conversion.pyi new file mode 100644 index 00000000..d564d767 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/conversion.pyi @@ -0,0 +1,14 @@ +from datetime import ( + datetime, + tzinfo, +) + +import numpy as np + +DT64NS_DTYPE: np.dtype +TD64NS_DTYPE: np.dtype + +def precision_from_unit( + unit: str, +) -> tuple[int, int]: ... # (int64_t, _) +def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/dtypes.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/dtypes.cpython-39-darwin.so new file mode 100755 index 00000000..9774cbf6 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/dtypes.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/dtypes.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/dtypes.pyi new file mode 100644 index 00000000..bea3e182 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/dtypes.pyi @@ -0,0 +1,88 @@ +from enum import Enum + +# These are not public API, but are exposed in the .pyi file because they +# are imported in tests. +_attrname_to_abbrevs: dict[str, str] +_period_code_map: dict[str, int] + +def periods_per_day(reso: int) -> int: ... +def periods_per_second(reso: int) -> int: ... +def is_supported_unit(reso: int) -> bool: ... +def npy_unit_to_abbrev(reso: int) -> str: ... +def get_supported_reso(reso: int) -> int: ... +def abbrev_to_npy_unit(abbrev: str) -> int: ... + +class PeriodDtypeBase: + _dtype_code: int # PeriodDtypeCode + _n: int + + # actually __cinit__ + def __new__(cls, code: int, n: int): ... + @property + def _freq_group_code(self) -> int: ... + @property + def _resolution_obj(self) -> Resolution: ... + def _get_to_timestamp_base(self) -> int: ... + @property + def _freqstr(self) -> str: ... + def __hash__(self) -> int: ... + def _is_tick_like(self) -> bool: ... + @property + def _creso(self) -> int: ... + @property + def _td64_unit(self) -> str: ... + +class FreqGroup(Enum): + FR_ANN: int + FR_QTR: int + FR_MTH: int + FR_WK: int + FR_BUS: int + FR_DAY: int + FR_HR: int + FR_MIN: int + FR_SEC: int + FR_MS: int + FR_US: int + FR_NS: int + FR_UND: int + @staticmethod + def from_period_dtype_code(code: int) -> FreqGroup: ... + +class Resolution(Enum): + RESO_NS: int + RESO_US: int + RESO_MS: int + RESO_SEC: int + RESO_MIN: int + RESO_HR: int + RESO_DAY: int + RESO_MTH: int + RESO_QTR: int + RESO_YR: int + def __lt__(self, other: Resolution) -> bool: ... + def __ge__(self, other: Resolution) -> bool: ... + @property + def attrname(self) -> str: ... + @classmethod + def from_attrname(cls, attrname: str) -> Resolution: ... + @classmethod + def get_reso_from_freqstr(cls, freq: str) -> Resolution: ... + @property + def attr_abbrev(self) -> str: ... + +class NpyDatetimeUnit(Enum): + NPY_FR_Y: int + NPY_FR_M: int + NPY_FR_W: int + NPY_FR_D: int + NPY_FR_h: int + NPY_FR_m: int + NPY_FR_s: int + NPY_FR_ms: int + NPY_FR_us: int + NPY_FR_ns: int + NPY_FR_ps: int + NPY_FR_fs: int + NPY_FR_as: int + NPY_FR_GENERIC: int diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/fields.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/fields.cpython-39-darwin.so new file mode 100755 index 00000000..8a83419f Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/fields.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/fields.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/fields.pyi new file mode 100644 index 00000000..c6cfd44e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/fields.pyi @@ -0,0 +1,62 @@ +import numpy as np + +from pandas._typing import npt + +def build_field_sarray( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + reso: int, # NPY_DATETIMEUNIT +) -> np.ndarray: ... +def month_position_check(fields, weekdays) -> str | None: ... +def get_date_name_field( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + field: str, + locale: str | None = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.object_]: ... +def get_start_end_field( + dtindex: npt.NDArray[np.int64], + field: str, + freqstr: str | None = ..., + month_kw: int = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.bool_]: ... +def get_date_field( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + field: str, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int32]: ... +def get_timedelta_field( + tdindex: npt.NDArray[np.int64], # const int64_t[:] + field: str, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int32]: ... +def get_timedelta_days( + tdindex: npt.NDArray[np.int64], # const int64_t[:] + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... +def isleapyear_arr( + years: np.ndarray, +) -> npt.NDArray[np.bool_]: ... +def build_isocalendar_sarray( + dtindex: npt.NDArray[np.int64], # const int64_t[:] + reso: int, # NPY_DATETIMEUNIT +) -> np.ndarray: ... +def _get_locale_names(name_type: str, locale: str | None = ...): ... + +class RoundTo: + @property + def MINUS_INFTY(self) -> int: ... + @property + def PLUS_INFTY(self) -> int: ... + @property + def NEAREST_HALF_EVEN(self) -> int: ... + @property + def NEAREST_HALF_PLUS_INFTY(self) -> int: ... + @property + def NEAREST_HALF_MINUS_INFTY(self) -> int: ... + +def round_nsint64( + values: npt.NDArray[np.int64], + mode: RoundTo, + nanos: int, +) -> npt.NDArray[np.int64]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/nattype.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/nattype.cpython-39-darwin.so new file mode 100755 index 00000000..d85d62ff Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/nattype.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/nattype.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/nattype.pyi new file mode 100644 index 00000000..437b5ab6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/nattype.pyi @@ -0,0 +1,135 @@ +from datetime import ( + datetime, + timedelta, + tzinfo as _tzinfo, +) +import typing + +import numpy as np + +from pandas._libs.tslibs.period import Period + +NaT: NaTType +iNaT: int +nat_strings: set[str] + +_NaTComparisonTypes: typing.TypeAlias = ( + datetime | timedelta | Period | np.datetime64 | np.timedelta64 +) + +class _NatComparison: + def __call__(self, other: _NaTComparisonTypes) -> bool: ... + +class NaTType: + _value: np.int64 + @property + def value(self) -> int: ... + @property + def asm8(self) -> np.datetime64: ... + def to_datetime64(self) -> np.datetime64: ... + def to_numpy( + self, dtype: np.dtype | str | None = ..., copy: bool = ... + ) -> np.datetime64 | np.timedelta64: ... + @property + def is_leap_year(self) -> bool: ... + @property + def is_month_start(self) -> bool: ... + @property + def is_quarter_start(self) -> bool: ... + @property + def is_year_start(self) -> bool: ... + @property + def is_month_end(self) -> bool: ... + @property + def is_quarter_end(self) -> bool: ... + @property + def is_year_end(self) -> bool: ... + @property + def day_of_year(self) -> float: ... + @property + def dayofyear(self) -> float: ... + @property + def days_in_month(self) -> float: ... + @property + def daysinmonth(self) -> float: ... + @property + def day_of_week(self) -> float: ... + @property + def dayofweek(self) -> float: ... + @property + def week(self) -> float: ... + @property + def weekofyear(self) -> float: ... + def day_name(self) -> float: ... + def month_name(self) -> float: ... + def weekday(self) -> float: ... + def isoweekday(self) -> float: ... + def total_seconds(self) -> float: ... + def today(self, *args, **kwargs) -> NaTType: ... + def now(self, *args, **kwargs) -> NaTType: ... + def to_pydatetime(self) -> NaTType: ... + def date(self) -> NaTType: ... + def round(self) -> NaTType: ... + def floor(self) -> NaTType: ... + def ceil(self) -> NaTType: ... + @property + def tzinfo(self) -> None: ... + @property + def tz(self) -> None: ... + def tz_convert(self, tz: _tzinfo | str | None) -> NaTType: ... + def tz_localize( + self, + tz: _tzinfo | str | None, + ambiguous: str = ..., + nonexistent: str = ..., + ) -> NaTType: ... + def replace( + self, + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + nanosecond: int | None = ..., + tzinfo: _tzinfo | None = ..., + fold: int | None = ..., + ) -> NaTType: ... + @property + def year(self) -> float: ... + @property + def quarter(self) -> float: ... + @property + def month(self) -> float: ... + @property + def day(self) -> float: ... + @property + def hour(self) -> float: ... + @property + def minute(self) -> float: ... + @property + def second(self) -> float: ... + @property + def millisecond(self) -> float: ... + @property + def microsecond(self) -> float: ... + @property + def nanosecond(self) -> float: ... + # inject Timedelta properties + @property + def days(self) -> float: ... + @property + def microseconds(self) -> float: ... + @property + def nanoseconds(self) -> float: ... + # inject Period properties + @property + def qyear(self) -> float: ... + def __eq__(self, other: object) -> bool: ... + def __ne__(self, other: object) -> bool: ... + __lt__: _NatComparison + __le__: _NatComparison + __gt__: _NatComparison + __ge__: _NatComparison + def as_unit(self, unit: str, round_ok: bool = ...) -> NaTType: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/np_datetime.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/np_datetime.cpython-39-darwin.so new file mode 100755 index 00000000..8ab59817 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/np_datetime.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/np_datetime.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/np_datetime.pyi new file mode 100644 index 00000000..0cb0e3b0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/np_datetime.pyi @@ -0,0 +1,21 @@ +import numpy as np + +from pandas._typing import npt + +class OutOfBoundsDatetime(ValueError): ... +class OutOfBoundsTimedelta(ValueError): ... + +# only exposed for testing +def py_get_unit_from_dtype(dtype: np.dtype): ... +def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ... +def astype_overflowsafe( + arr: np.ndarray, + dtype: np.dtype, + copy: bool = ..., + round_ok: bool = ..., + is_coerce: bool = ..., +) -> np.ndarray: ... +def is_unitless(dtype: np.dtype) -> bool: ... +def compare_mismatched_resolutions( + left: np.ndarray, right: np.ndarray, op +) -> npt.NDArray[np.bool_]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/offsets.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/offsets.cpython-39-darwin.so new file mode 100755 index 00000000..ec7bac4d Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/offsets.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/offsets.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/offsets.pyi new file mode 100644 index 00000000..1a474211 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/offsets.pyi @@ -0,0 +1,283 @@ +from datetime import ( + datetime, + time, + timedelta, +) +from typing import ( + Any, + Collection, + Literal, + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs.tslibs.nattype import NaTType +from pandas._typing import ( + OffsetCalendar, + Self, + npt, +) + +from .timedeltas import Timedelta + +_BaseOffsetT = TypeVar("_BaseOffsetT", bound=BaseOffset) +_DatetimeT = TypeVar("_DatetimeT", bound=datetime) +_TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta) + +_relativedelta_kwds: set[str] +prefix_mapping: dict[str, type] + +class ApplyTypeError(TypeError): ... + +class BaseOffset: + n: int + def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... + def __eq__(self, other) -> bool: ... + def __ne__(self, other) -> bool: ... + def __hash__(self) -> int: ... + @property + def kwds(self) -> dict: ... + @property + def base(self) -> BaseOffset: ... + @overload + def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... + @overload + def __add__(self, other: BaseOffset) -> Self: ... + @overload + def __add__(self, other: _DatetimeT) -> _DatetimeT: ... + @overload + def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ... + @overload + def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... + @overload + def __radd__(self, other: BaseOffset) -> Self: ... + @overload + def __radd__(self, other: _DatetimeT) -> _DatetimeT: ... + @overload + def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ... + @overload + def __radd__(self, other: NaTType) -> NaTType: ... + def __sub__(self, other: BaseOffset) -> Self: ... + @overload + def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... + @overload + def __rsub__(self, other: BaseOffset): ... + @overload + def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ... + @overload + def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ... + @overload + def __mul__(self, other: np.ndarray) -> np.ndarray: ... + @overload + def __mul__(self, other: int): ... + @overload + def __rmul__(self, other: np.ndarray) -> np.ndarray: ... + @overload + def __rmul__(self, other: int) -> Self: ... + def __neg__(self) -> Self: ... + def copy(self) -> Self: ... + @property + def name(self) -> str: ... + @property + def rule_code(self) -> str: ... + @property + def freqstr(self) -> str: ... + def _apply(self, other): ... + def _apply_array(self, dtarr) -> None: ... + def rollback(self, dt: datetime) -> datetime: ... + def rollforward(self, dt: datetime) -> datetime: ... + def is_on_offset(self, dt: datetime) -> bool: ... + def __setstate__(self, state) -> None: ... + def __getstate__(self): ... + @property + def nanos(self) -> int: ... + def is_anchored(self) -> bool: ... + +def _get_offset(name: str) -> BaseOffset: ... + +class SingleConstructorOffset(BaseOffset): + @classmethod + def _from_name(cls, suffix: None = ...): ... + def __reduce__(self): ... + +@overload +def to_offset(freq: None) -> None: ... +@overload +def to_offset(freq: _BaseOffsetT) -> _BaseOffsetT: ... +@overload +def to_offset(freq: timedelta | str) -> BaseOffset: ... + +class Tick(SingleConstructorOffset): + _creso: int + _prefix: str + def __init__(self, n: int = ..., normalize: bool = ...) -> None: ... + @property + def delta(self) -> Timedelta: ... + @property + def nanos(self) -> int: ... + +def delta_to_tick(delta: timedelta) -> Tick: ... + +class Day(Tick): ... +class Hour(Tick): ... +class Minute(Tick): ... +class Second(Tick): ... +class Milli(Tick): ... +class Micro(Tick): ... +class Nano(Tick): ... + +class RelativeDeltaOffset(BaseOffset): + def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ... + +class BusinessMixin(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., offset: timedelta = ... + ) -> None: ... + +class BusinessDay(BusinessMixin): ... + +class BusinessHour(BusinessMixin): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + start: str | time | Collection[str | time] = ..., + end: str | time | Collection[str | time] = ..., + offset: timedelta = ..., + ) -> None: ... + +class WeekOfMonthMixin(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., weekday: int = ... + ) -> None: ... + +class YearOffset(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., month: int | None = ... + ) -> None: ... + +class BYearEnd(YearOffset): ... +class BYearBegin(YearOffset): ... +class YearEnd(YearOffset): ... +class YearBegin(YearOffset): ... + +class QuarterOffset(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ... + ) -> None: ... + +class BQuarterEnd(QuarterOffset): ... +class BQuarterBegin(QuarterOffset): ... +class QuarterEnd(QuarterOffset): ... +class QuarterBegin(QuarterOffset): ... +class MonthOffset(SingleConstructorOffset): ... +class MonthEnd(MonthOffset): ... +class MonthBegin(MonthOffset): ... +class BusinessMonthEnd(MonthOffset): ... +class BusinessMonthBegin(MonthOffset): ... + +class SemiMonthOffset(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ... + ) -> None: ... + +class SemiMonthEnd(SemiMonthOffset): ... +class SemiMonthBegin(SemiMonthOffset): ... + +class Week(SingleConstructorOffset): + def __init__( + self, n: int = ..., normalize: bool = ..., weekday: int | None = ... + ) -> None: ... + +class WeekOfMonth(WeekOfMonthMixin): + def __init__( + self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ... + ) -> None: ... + +class LastWeekOfMonth(WeekOfMonthMixin): ... + +class FY5253Mixin(SingleConstructorOffset): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekday: int = ..., + startingMonth: int = ..., + variation: Literal["nearest", "last"] = ..., + ) -> None: ... + +class FY5253(FY5253Mixin): ... + +class FY5253Quarter(FY5253Mixin): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekday: int = ..., + startingMonth: int = ..., + qtr_with_extra_week: int = ..., + variation: Literal["nearest", "last"] = ..., + ) -> None: ... + +class Easter(SingleConstructorOffset): ... + +class _CustomBusinessMonth(BusinessMixin): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekmask: str = ..., + holidays: list | None = ..., + calendar: OffsetCalendar | None = ..., + offset: timedelta = ..., + ) -> None: ... + +class CustomBusinessDay(BusinessDay): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekmask: str = ..., + holidays: list | None = ..., + calendar: OffsetCalendar | None = ..., + offset: timedelta = ..., + ) -> None: ... + +class CustomBusinessHour(BusinessHour): + def __init__( + self, + n: int = ..., + normalize: bool = ..., + weekmask: str = ..., + holidays: list | None = ..., + calendar: OffsetCalendar | None = ..., + start: str | time | Collection[str | time] = ..., + end: str | time | Collection[str | time] = ..., + offset: timedelta = ..., + ) -> None: ... + +class CustomBusinessMonthEnd(_CustomBusinessMonth): ... +class CustomBusinessMonthBegin(_CustomBusinessMonth): ... +class OffsetMeta(type): ... +class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): ... + +BDay = BusinessDay +BMonthEnd = BusinessMonthEnd +BMonthBegin = BusinessMonthBegin +CBMonthEnd = CustomBusinessMonthEnd +CBMonthBegin = CustomBusinessMonthBegin +CDay = CustomBusinessDay + +def roll_qtrday( + other: datetime, n: int, month: int, day_opt: str, modby: int +) -> int: ... + +INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"] + +def shift_months( + dtindex: npt.NDArray[np.int64], months: int, day_opt: str | None = ... +) -> npt.NDArray[np.int64]: ... + +_offset_map: dict[str, BaseOffset] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/parsing.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/parsing.cpython-39-darwin.so new file mode 100755 index 00000000..e1091a04 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/parsing.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/parsing.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/parsing.pyi new file mode 100644 index 00000000..83a5b008 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/parsing.pyi @@ -0,0 +1,38 @@ +from datetime import datetime + +import numpy as np + +from pandas._typing import npt + +class DateParseError(ValueError): ... + +def py_parse_datetime_string( + date_string: str, + dayfirst: bool = ..., + yearfirst: bool = ..., +) -> datetime: ... +def parse_datetime_string_with_reso( + date_string: str, + freq: str | None = ..., + dayfirst: bool | None = ..., + yearfirst: bool | None = ..., +) -> tuple[datetime, str]: ... +def _does_string_look_like_datetime(py_string: str) -> bool: ... +def quarter_to_myear(year: int, quarter: int, freq: str) -> tuple[int, int]: ... +def try_parse_dates( + values: npt.NDArray[np.object_], # object[:] + parser, +) -> npt.NDArray[np.object_]: ... +def try_parse_year_month_day( + years: npt.NDArray[np.object_], # object[:] + months: npt.NDArray[np.object_], # object[:] + days: npt.NDArray[np.object_], # object[:] +) -> npt.NDArray[np.object_]: ... +def guess_datetime_format( + dt_str, + dayfirst: bool | None = ..., +) -> str | None: ... +def concat_date_cols( + date_cols: tuple, +) -> npt.NDArray[np.object_]: ... +def get_rule_month(source: str) -> str: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/period.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/period.cpython-39-darwin.so new file mode 100755 index 00000000..1d1ab748 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/period.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/period.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/period.pyi new file mode 100644 index 00000000..8826757e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/period.pyi @@ -0,0 +1,135 @@ +from datetime import timedelta +from typing import Literal + +import numpy as np + +from pandas._libs.tslibs.dtypes import PeriodDtypeBase +from pandas._libs.tslibs.nattype import NaTType +from pandas._libs.tslibs.offsets import BaseOffset +from pandas._libs.tslibs.timestamps import Timestamp +from pandas._typing import ( + Frequency, + npt, +) + +INVALID_FREQ_ERR_MSG: str +DIFFERENT_FREQ: str + +class IncompatibleFrequency(ValueError): ... + +def periodarr_to_dt64arr( + periodarr: npt.NDArray[np.int64], # const int64_t[:] + freq: int, +) -> npt.NDArray[np.int64]: ... +def period_asfreq_arr( + arr: npt.NDArray[np.int64], + freq1: int, + freq2: int, + end: bool, +) -> npt.NDArray[np.int64]: ... +def get_period_field_arr( + field: str, + arr: npt.NDArray[np.int64], # const int64_t[:] + freq: int, +) -> npt.NDArray[np.int64]: ... +def from_ordinals( + values: npt.NDArray[np.int64], # const int64_t[:] + freq: timedelta | BaseOffset | str, +) -> npt.NDArray[np.int64]: ... +def extract_ordinals( + values: npt.NDArray[np.object_], + freq: Frequency | int, +) -> npt.NDArray[np.int64]: ... +def extract_freq( + values: npt.NDArray[np.object_], +) -> BaseOffset: ... +def period_array_strftime( + values: npt.NDArray[np.int64], + dtype_code: int, + na_rep, + date_format: str | None, +) -> npt.NDArray[np.object_]: ... + +# exposed for tests +def period_asfreq(ordinal: int, freq1: int, freq2: int, end: bool) -> int: ... +def period_ordinal( + y: int, m: int, d: int, h: int, min: int, s: int, us: int, ps: int, freq: int +) -> int: ... +def freq_to_dtype_code(freq: BaseOffset) -> int: ... +def validate_end_alias(how: str) -> Literal["E", "S"]: ... + +class PeriodMixin: + @property + def end_time(self) -> Timestamp: ... + @property + def start_time(self) -> Timestamp: ... + def _require_matching_freq(self, other, base: bool = ...) -> None: ... + +class Period(PeriodMixin): + ordinal: int # int64_t + freq: BaseOffset + _dtype: PeriodDtypeBase + + # error: "__new__" must return a class instance (got "Union[Period, NaTType]") + def __new__( # type: ignore[misc] + cls, + value=..., + freq: int | str | BaseOffset | None = ..., + ordinal: int | None = ..., + year: int | None = ..., + month: int | None = ..., + quarter: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + ) -> Period | NaTType: ... + @classmethod + def _maybe_convert_freq(cls, freq) -> BaseOffset: ... + @classmethod + def _from_ordinal(cls, ordinal: int, freq) -> Period: ... + @classmethod + def now(cls, freq: BaseOffset = ...) -> Period: ... + def strftime(self, fmt: str) -> str: ... + def to_timestamp( + self, + freq: str | BaseOffset | None = ..., + how: str = ..., + ) -> Timestamp: ... + def asfreq(self, freq: str | BaseOffset, how: str = ...) -> Period: ... + @property + def freqstr(self) -> str: ... + @property + def is_leap_year(self) -> bool: ... + @property + def daysinmonth(self) -> int: ... + @property + def days_in_month(self) -> int: ... + @property + def qyear(self) -> int: ... + @property + def quarter(self) -> int: ... + @property + def day_of_year(self) -> int: ... + @property + def weekday(self) -> int: ... + @property + def day_of_week(self) -> int: ... + @property + def week(self) -> int: ... + @property + def weekofyear(self) -> int: ... + @property + def second(self) -> int: ... + @property + def minute(self) -> int: ... + @property + def hour(self) -> int: ... + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + def __sub__(self, other) -> Period | BaseOffset: ... + def __add__(self, other) -> Period: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/strptime.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/strptime.cpython-39-darwin.so new file mode 100755 index 00000000..a4c3ad85 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/strptime.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/strptime.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/strptime.pyi new file mode 100644 index 00000000..4565bb7e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/strptime.pyi @@ -0,0 +1,13 @@ +import numpy as np + +from pandas._typing import npt + +def array_strptime( + values: npt.NDArray[np.object_], + fmt: str | None, + exact: bool = ..., + errors: str = ..., + utc: bool = ..., +) -> tuple[np.ndarray, np.ndarray]: ... + +# first ndarray is M8[ns], second is object ndarray of tzinfo | None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timedeltas.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timedeltas.cpython-39-darwin.so new file mode 100755 index 00000000..67b0376f Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timedeltas.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timedeltas.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timedeltas.pyi new file mode 100644 index 00000000..aba9b25b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timedeltas.pyi @@ -0,0 +1,169 @@ +from datetime import timedelta +from typing import ( + ClassVar, + Literal, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs.tslibs import ( + NaTType, + Tick, +) +from pandas._typing import ( + Self, + npt, +) + +# This should be kept consistent with the keys in the dict timedelta_abbrevs +# in pandas/_libs/tslibs/timedeltas.pyx +UnitChoices: TypeAlias = Literal[ + "Y", + "y", + "M", + "W", + "w", + "D", + "d", + "days", + "day", + "hours", + "hour", + "hr", + "h", + "m", + "minute", + "min", + "minutes", + "T", + "t", + "s", + "seconds", + "sec", + "second", + "ms", + "milliseconds", + "millisecond", + "milli", + "millis", + "L", + "l", + "us", + "microseconds", + "microsecond", + "µs", + "micro", + "micros", + "u", + "ns", + "nanoseconds", + "nano", + "nanos", + "nanosecond", + "n", +] +_S = TypeVar("_S", bound=timedelta) + +def ints_to_pytimedelta( + arr: npt.NDArray[np.timedelta64], + box: bool = ..., +) -> npt.NDArray[np.object_]: ... +def array_to_timedelta64( + values: npt.NDArray[np.object_], + unit: str | None = ..., + errors: str = ..., +) -> np.ndarray: ... # np.ndarray[m8ns] +def parse_timedelta_unit(unit: str | None) -> UnitChoices: ... +def delta_to_nanoseconds( + delta: np.timedelta64 | timedelta | Tick, + reso: int = ..., # NPY_DATETIMEUNIT + round_ok: bool = ..., +) -> int: ... +def floordiv_object_array( + left: np.ndarray, right: npt.NDArray[np.object_] +) -> np.ndarray: ... +def truediv_object_array( + left: np.ndarray, right: npt.NDArray[np.object_] +) -> np.ndarray: ... + +class Timedelta(timedelta): + _creso: int + min: ClassVar[Timedelta] + max: ClassVar[Timedelta] + resolution: ClassVar[Timedelta] + value: int # np.int64 + _value: int # np.int64 + # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") + def __new__( # type: ignore[misc] + cls: type[_S], + value=..., + unit: str | None = ..., + **kwargs: float | np.integer | np.floating, + ) -> _S | NaTType: ... + @classmethod + def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ... + @property + def days(self) -> int: ... + @property + def seconds(self) -> int: ... + @property + def microseconds(self) -> int: ... + def total_seconds(self) -> float: ... + def to_pytimedelta(self) -> timedelta: ... + def to_timedelta64(self) -> np.timedelta64: ... + @property + def asm8(self) -> np.timedelta64: ... + # TODO: round/floor/ceil could return NaT? + def round(self, freq: str) -> Self: ... + def floor(self, freq: str) -> Self: ... + def ceil(self, freq: str) -> Self: ... + @property + def resolution_string(self) -> str: ... + def __add__(self, other: timedelta) -> Timedelta: ... + def __radd__(self, other: timedelta) -> Timedelta: ... + def __sub__(self, other: timedelta) -> Timedelta: ... + def __rsub__(self, other: timedelta) -> Timedelta: ... + def __neg__(self) -> Timedelta: ... + def __pos__(self) -> Timedelta: ... + def __abs__(self) -> Timedelta: ... + def __mul__(self, other: float) -> Timedelta: ... + def __rmul__(self, other: float) -> Timedelta: ... + # error: Signature of "__floordiv__" incompatible with supertype "timedelta" + @overload # type: ignore[override] + def __floordiv__(self, other: timedelta) -> int: ... + @overload + def __floordiv__(self, other: float) -> Timedelta: ... + @overload + def __floordiv__( + self, other: npt.NDArray[np.timedelta64] + ) -> npt.NDArray[np.intp]: ... + @overload + def __floordiv__( + self, other: npt.NDArray[np.number] + ) -> npt.NDArray[np.timedelta64] | Timedelta: ... + @overload + def __rfloordiv__(self, other: timedelta | str) -> int: ... + @overload + def __rfloordiv__(self, other: None | NaTType) -> NaTType: ... + @overload + def __rfloordiv__(self, other: np.ndarray) -> npt.NDArray[np.timedelta64]: ... + @overload + def __truediv__(self, other: timedelta) -> float: ... + @overload + def __truediv__(self, other: float) -> Timedelta: ... + def __mod__(self, other: timedelta) -> Timedelta: ... + def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ... + def __le__(self, other: timedelta) -> bool: ... + def __lt__(self, other: timedelta) -> bool: ... + def __ge__(self, other: timedelta) -> bool: ... + def __gt__(self, other: timedelta) -> bool: ... + def __hash__(self) -> int: ... + def isoformat(self) -> str: ... + def to_numpy(self) -> np.timedelta64: ... + def view(self, dtype: npt.DTypeLike = ...) -> object: ... + @property + def unit(self) -> str: ... + def as_unit(self, unit: str, round_ok: bool = ...) -> Timedelta: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timestamps.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timestamps.cpython-39-darwin.so new file mode 100755 index 00000000..77dc498b Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timestamps.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timestamps.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timestamps.pyi new file mode 100644 index 00000000..36ae2d6d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timestamps.pyi @@ -0,0 +1,240 @@ +from datetime import ( + date as _date, + datetime, + time as _time, + timedelta, + tzinfo as _tzinfo, +) +from time import struct_time +from typing import ( + ClassVar, + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs.tslibs import ( + BaseOffset, + NaTType, + Period, + Tick, + Timedelta, +) +from pandas._typing import ( + Self, + TimestampNonexistent, +) + +_DatetimeT = TypeVar("_DatetimeT", bound=datetime) + +def integer_op_not_supported(obj: object) -> TypeError: ... + +class Timestamp(datetime): + _creso: int + min: ClassVar[Timestamp] + max: ClassVar[Timestamp] + + resolution: ClassVar[Timedelta] + _value: int # np.int64 + # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") + def __new__( # type: ignore[misc] + cls: type[_DatetimeT], + ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ..., + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + tzinfo: _tzinfo | None = ..., + *, + nanosecond: int | None = ..., + tz: str | _tzinfo | None | int = ..., + unit: str | int | None = ..., + fold: int | None = ..., + ) -> _DatetimeT | NaTType: ... + @classmethod + def _from_value_and_reso( + cls, value: int, reso: int, tz: _tzinfo | None + ) -> Timestamp: ... + @property + def value(self) -> int: ... # np.int64 + @property + def year(self) -> int: ... + @property + def month(self) -> int: ... + @property + def day(self) -> int: ... + @property + def hour(self) -> int: ... + @property + def minute(self) -> int: ... + @property + def second(self) -> int: ... + @property + def microsecond(self) -> int: ... + @property + def nanosecond(self) -> int: ... + @property + def tzinfo(self) -> _tzinfo | None: ... + @property + def tz(self) -> _tzinfo | None: ... + @property + def fold(self) -> int: ... + @classmethod + def fromtimestamp(cls, ts: float, tz: _tzinfo | None = ...) -> Self: ... + @classmethod + def utcfromtimestamp(cls, ts: float) -> Self: ... + @classmethod + def today(cls, tz: _tzinfo | str | None = ...) -> Self: ... + @classmethod + def fromordinal( + cls, + ordinal: int, + tz: _tzinfo | str | None = ..., + ) -> Self: ... + @classmethod + def now(cls, tz: _tzinfo | str | None = ...) -> Self: ... + @classmethod + def utcnow(cls) -> Self: ... + # error: Signature of "combine" incompatible with supertype "datetime" + @classmethod + def combine( # type: ignore[override] + cls, date: _date, time: _time + ) -> datetime: ... + @classmethod + def fromisoformat(cls, date_string: str) -> Self: ... + def strftime(self, format: str) -> str: ... + def __format__(self, fmt: str) -> str: ... + def toordinal(self) -> int: ... + def timetuple(self) -> struct_time: ... + def timestamp(self) -> float: ... + def utctimetuple(self) -> struct_time: ... + def date(self) -> _date: ... + def time(self) -> _time: ... + def timetz(self) -> _time: ... + # LSP violation: nanosecond is not present in datetime.datetime.replace + # and has positional args following it + def replace( # type: ignore[override] + self, + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + nanosecond: int | None = ..., + tzinfo: _tzinfo | type[object] | None = ..., + fold: int | None = ..., + ) -> Self: ... + # LSP violation: datetime.datetime.astimezone has a default value for tz + def astimezone(self, tz: _tzinfo | None) -> Self: ... # type: ignore[override] + def ctime(self) -> str: ... + def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... + @classmethod + def strptime( + # Note: strptime is actually disabled and raises NotImplementedError + cls, + date_string: str, + format: str, + ) -> Self: ... + def utcoffset(self) -> timedelta | None: ... + def tzname(self) -> str | None: ... + def dst(self) -> timedelta | None: ... + def __le__(self, other: datetime) -> bool: ... # type: ignore[override] + def __lt__(self, other: datetime) -> bool: ... # type: ignore[override] + def __ge__(self, other: datetime) -> bool: ... # type: ignore[override] + def __gt__(self, other: datetime) -> bool: ... # type: ignore[override] + # error: Signature of "__add__" incompatible with supertype "date"/"datetime" + @overload # type: ignore[override] + def __add__(self, other: np.ndarray) -> np.ndarray: ... + @overload + def __add__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ... + def __radd__(self, other: timedelta) -> Self: ... + @overload # type: ignore[override] + def __sub__(self, other: datetime) -> Timedelta: ... + @overload + def __sub__(self, other: timedelta | np.timedelta64 | Tick) -> Self: ... + def __hash__(self) -> int: ... + def weekday(self) -> int: ... + def isoweekday(self) -> int: ... + # Return type "Tuple[int, int, int]" of "isocalendar" incompatible with return + # type "_IsoCalendarDate" in supertype "date" + def isocalendar(self) -> tuple[int, int, int]: ... # type: ignore[override] + @property + def is_leap_year(self) -> bool: ... + @property + def is_month_start(self) -> bool: ... + @property + def is_quarter_start(self) -> bool: ... + @property + def is_year_start(self) -> bool: ... + @property + def is_month_end(self) -> bool: ... + @property + def is_quarter_end(self) -> bool: ... + @property + def is_year_end(self) -> bool: ... + def to_pydatetime(self, warn: bool = ...) -> datetime: ... + def to_datetime64(self) -> np.datetime64: ... + def to_period(self, freq: BaseOffset | str = ...) -> Period: ... + def to_julian_date(self) -> np.float64: ... + @property + def asm8(self) -> np.datetime64: ... + def tz_convert(self, tz: _tzinfo | str | None) -> Self: ... + # TODO: could return NaT? + def tz_localize( + self, + tz: _tzinfo | str | None, + ambiguous: str = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def normalize(self) -> Self: ... + # TODO: round/floor/ceil could return NaT? + def round( + self, + freq: str, + ambiguous: bool | str = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def floor( + self, + freq: str, + ambiguous: bool | str = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def ceil( + self, + freq: str, + ambiguous: bool | str = ..., + nonexistent: TimestampNonexistent = ..., + ) -> Self: ... + def day_name(self, locale: str | None = ...) -> str: ... + def month_name(self, locale: str | None = ...) -> str: ... + @property + def day_of_week(self) -> int: ... + @property + def dayofweek(self) -> int: ... + @property + def day_of_year(self) -> int: ... + @property + def dayofyear(self) -> int: ... + @property + def quarter(self) -> int: ... + @property + def week(self) -> int: ... + def to_numpy( + self, dtype: np.dtype | None = ..., copy: bool = ... + ) -> np.datetime64: ... + @property + def _date_repr(self) -> str: ... + @property + def days_in_month(self) -> int: ... + @property + def daysinmonth(self) -> int: ... + @property + def unit(self) -> str: ... + def as_unit(self, unit: str, round_ok: bool = ...) -> Timestamp: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timezones.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timezones.cpython-39-darwin.so new file mode 100755 index 00000000..05deea35 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timezones.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timezones.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timezones.pyi new file mode 100644 index 00000000..4e9f0c6a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/timezones.pyi @@ -0,0 +1,21 @@ +from datetime import ( + datetime, + tzinfo, +) +from typing import Callable + +import numpy as np + +# imported from dateutil.tz +dateutil_gettz: Callable[[str], tzinfo] + +def tz_standardize(tz: tzinfo) -> tzinfo: ... +def tz_compare(start: tzinfo | None, end: tzinfo | None) -> bool: ... +def infer_tzinfo( + start: datetime | None, + end: datetime | None, +) -> tzinfo | None: ... +def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ... +def get_timezone(tz: tzinfo) -> tzinfo | str: ... +def is_utc(tz: tzinfo | None) -> bool: ... +def is_fixed_offset(tz: tzinfo) -> bool: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/tzconversion.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/tzconversion.cpython-39-darwin.so new file mode 100755 index 00000000..a4b9b63a Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/tzconversion.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/tzconversion.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/tzconversion.pyi new file mode 100644 index 00000000..a354765a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/tzconversion.pyi @@ -0,0 +1,21 @@ +from datetime import ( + timedelta, + tzinfo, +) +from typing import Iterable + +import numpy as np + +from pandas._typing import npt + +# tz_convert_from_utc_single exposed for testing +def tz_convert_from_utc_single( + val: np.int64, tz: tzinfo, creso: int = ... +) -> np.int64: ... +def tz_localize_to_utc( + vals: npt.NDArray[np.int64], + tz: tzinfo | None, + ambiguous: str | bool | Iterable[bool] | None = ..., + nonexistent: str | timedelta | np.timedelta64 | None = ..., + creso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/vectorized.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/vectorized.cpython-39-darwin.so new file mode 100755 index 00000000..c8ee5aeb Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/vectorized.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/vectorized.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/vectorized.pyi new file mode 100644 index 00000000..3fd9e250 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/tslibs/vectorized.pyi @@ -0,0 +1,43 @@ +""" +For cython types that cannot be represented precisely, closest-available +python equivalents are used, and the precise types kept as adjacent comments. +""" +from datetime import tzinfo + +import numpy as np + +from pandas._libs.tslibs.dtypes import Resolution +from pandas._typing import npt + +def dt64arr_to_periodarr( + stamps: npt.NDArray[np.int64], + freq: int, + tz: tzinfo | None, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... +def is_date_array_normalized( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None, + reso: int, # NPY_DATETIMEUNIT +) -> bool: ... +def normalize_i8_timestamps( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None, + reso: int, # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... +def get_resolution( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> Resolution: ... +def ints_to_pydatetime( + arr: npt.NDArray[np.int64], + tz: tzinfo | None = ..., + box: str = ..., + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.object_]: ... +def tz_convert_from_utc( + stamps: npt.NDArray[np.int64], + tz: tzinfo | None, + reso: int = ..., # NPY_DATETIMEUNIT +) -> npt.NDArray[np.int64]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/aggregations.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/aggregations.cpython-39-darwin.so new file mode 100755 index 00000000..5469c497 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/aggregations.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/aggregations.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/aggregations.pyi new file mode 100644 index 00000000..b926a7cb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/aggregations.pyi @@ -0,0 +1,127 @@ +from typing import ( + Any, + Callable, + Literal, +) + +import numpy as np + +from pandas._typing import ( + WindowingRankType, + npt, +) + +def roll_sum( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_mean( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_var( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t + ddof: int = ..., +) -> np.ndarray: ... # np.ndarray[float] +def roll_skew( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_kurt( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_median_c( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_max( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_min( + values: np.ndarray, # np.ndarray[np.float64] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t +) -> np.ndarray: ... # np.ndarray[float] +def roll_quantile( + values: np.ndarray, # const float64_t[:] + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t + quantile: float, # float64_t + interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], +) -> np.ndarray: ... # np.ndarray[float] +def roll_rank( + values: np.ndarray, + start: np.ndarray, + end: np.ndarray, + minp: int, + percentile: bool, + method: WindowingRankType, + ascending: bool, +) -> np.ndarray: ... # np.ndarray[float] +def roll_apply( + obj: object, + start: np.ndarray, # np.ndarray[np.int64] + end: np.ndarray, # np.ndarray[np.int64] + minp: int, # int64_t + function: Callable[..., Any], + raw: bool, + args: tuple[Any, ...], + kwargs: dict[str, Any], +) -> npt.NDArray[np.float64]: ... +def roll_weighted_sum( + values: np.ndarray, # const float64_t[:] + weights: np.ndarray, # const float64_t[:] + minp: int, +) -> np.ndarray: ... # np.ndarray[np.float64] +def roll_weighted_mean( + values: np.ndarray, # const float64_t[:] + weights: np.ndarray, # const float64_t[:] + minp: int, +) -> np.ndarray: ... # np.ndarray[np.float64] +def roll_weighted_var( + values: np.ndarray, # const float64_t[:] + weights: np.ndarray, # const float64_t[:] + minp: int, # int64_t + ddof: int, # unsigned int +) -> np.ndarray: ... # np.ndarray[np.float64] +def ewm( + vals: np.ndarray, # const float64_t[:] + start: np.ndarray, # const int64_t[:] + end: np.ndarray, # const int64_t[:] + minp: int, + com: float, # float64_t + adjust: bool, + ignore_na: bool, + deltas: np.ndarray, # const float64_t[:] + normalize: bool, +) -> np.ndarray: ... # np.ndarray[np.float64] +def ewmcov( + input_x: np.ndarray, # const float64_t[:] + start: np.ndarray, # const int64_t[:] + end: np.ndarray, # const int64_t[:] + minp: int, + input_y: np.ndarray, # const float64_t[:] + com: float, # float64_t + adjust: bool, + ignore_na: bool, + bias: bool, +) -> np.ndarray: ... # np.ndarray[np.float64] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/indexers.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/indexers.cpython-39-darwin.so new file mode 100755 index 00000000..f7083b5f Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/indexers.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/indexers.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/indexers.pyi new file mode 100644 index 00000000..c9bc64be --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/window/indexers.pyi @@ -0,0 +1,12 @@ +import numpy as np + +from pandas._typing import npt + +def calculate_variable_window_bounds( + num_values: int, # int64_t + window_size: int, # int64_t + min_periods, + center: bool, + closed: str | None, + index: np.ndarray, # const int64_t[:] +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/writers.cpython-39-darwin.so b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/writers.cpython-39-darwin.so new file mode 100755 index 00000000..da1ce6e2 Binary files /dev/null and b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/writers.cpython-39-darwin.so differ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/writers.pyi b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/writers.pyi new file mode 100644 index 00000000..7b418565 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_libs/writers.pyi @@ -0,0 +1,20 @@ +import numpy as np + +from pandas._typing import ArrayLike + +def write_csv_rows( + data: list[ArrayLike], + data_index: np.ndarray, + nlevels: int, + cols: np.ndarray, + writer: object, # _csv.writer +) -> None: ... +def convert_json_to_lines(arr: str) -> str: ... +def max_len_string_array( + arr: np.ndarray, # pandas_string[:] +) -> int: ... +def word_len(val: object) -> int: ... +def string_array_replace_from_nan_rep( + arr: np.ndarray, # np.ndarray[object, ndim=1] + nan_rep: object, +) -> None: ... diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/__init__.py new file mode 100644 index 00000000..73835252 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/__init__.py @@ -0,0 +1,1184 @@ +from __future__ import annotations + +import collections +from collections import Counter +from datetime import datetime +from decimal import Decimal +import operator +import os +import re +import string +from sys import byteorder +from typing import ( + TYPE_CHECKING, + Callable, + ContextManager, + cast, +) + +import numpy as np + +from pandas._config.localization import ( + can_set_locale, + get_locales, + set_locale, +) + +from pandas.compat import pa_version_under7p0 + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_sequence, + is_signed_integer_dtype, + is_unsigned_integer_dtype, + pandas_dtype, +) + +import pandas as pd +from pandas import ( + ArrowDtype, + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + RangeIndex, + Series, + bdate_range, +) +from pandas._testing._io import ( + round_trip_localpath, + round_trip_pathlib, + round_trip_pickle, + write_to_compressed, +) +from pandas._testing._warnings import ( + assert_produces_warning, + maybe_produces_warning, +) +from pandas._testing.asserters import ( + assert_almost_equal, + assert_attr_equal, + assert_categorical_equal, + assert_class_equal, + assert_contains_all, + assert_copy, + assert_datetime_array_equal, + assert_dict_equal, + assert_equal, + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_indexing_slices_equivalent, + assert_interval_array_equal, + assert_is_sorted, + assert_is_valid_plot_return_object, + assert_metadata_equivalent, + assert_numpy_array_equal, + assert_period_array_equal, + assert_series_equal, + assert_sp_array_equal, + assert_timedelta_array_equal, + raise_assert_detail, +) +from pandas._testing.compat import ( + get_dtype, + get_obj, +) +from pandas._testing.contexts import ( + decompress_file, + ensure_clean, + raises_chained_assignment_error, + set_timezone, + use_numexpr, + with_csv_dialect, +) +from pandas.core.arrays import ( + BaseMaskedArray, + ExtensionArray, + NumpyExtensionArray, +) +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import extract_array + +if TYPE_CHECKING: + from collections.abc import Iterable + + from pandas._typing import ( + Dtype, + Frequency, + NpDtype, + ) + + from pandas import ( + PeriodIndex, + TimedeltaIndex, + ) + from pandas.core.arrays import ArrowExtensionArray + +_N = 30 +_K = 4 + +UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"] +UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"] +SIGNED_INT_NUMPY_DTYPES: list[NpDtype] = [int, "int8", "int16", "int32", "int64"] +SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"] +ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES +ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES +ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES] + +FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"] +FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"] +ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES] + +COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"] +STRING_DTYPES: list[Dtype] = [str, "str", "U"] + +DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"] +TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"] + +BOOL_DTYPES: list[Dtype] = [bool, "bool"] +BYTES_DTYPES: list[Dtype] = [bytes, "bytes"] +OBJECT_DTYPES: list[Dtype] = [object, "object"] + +ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES +ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES +ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES] +ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES] + +ALL_NUMPY_DTYPES = ( + ALL_REAL_NUMPY_DTYPES + + COMPLEX_DTYPES + + STRING_DTYPES + + DATETIME64_DTYPES + + TIMEDELTA64_DTYPES + + BOOL_DTYPES + + OBJECT_DTYPES + + BYTES_DTYPES +) + +NARROW_NP_DTYPES = [ + np.float16, + np.float32, + np.int8, + np.int16, + np.int32, + np.uint8, + np.uint16, + np.uint32, +] + +PYTHON_DATA_TYPES = [ + str, + int, + float, + complex, + list, + tuple, + range, + dict, + set, + frozenset, + bool, + bytes, + bytearray, + memoryview, +] + +ENDIAN = {"little": "<", "big": ">"}[byteorder] + +NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")] +NP_NAT_OBJECTS = [ + cls("NaT", unit) + for cls in [np.datetime64, np.timedelta64] + for unit in [ + "Y", + "M", + "W", + "D", + "h", + "m", + "s", + "ms", + "us", + "ns", + "ps", + "fs", + "as", + ] +] + +if not pa_version_under7p0: + import pyarrow as pa + + UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()] + SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()] + ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES + ALL_INT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES + ] + + # pa.float16 doesn't seem supported + # https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86 + FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()] + FLOAT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES + ] + DECIMAL_PYARROW_DTYPES = [pa.decimal128(7, 3)] + STRING_PYARROW_DTYPES = [pa.string()] + BINARY_PYARROW_DTYPES = [pa.binary()] + + TIME_PYARROW_DTYPES = [ + pa.time32("s"), + pa.time32("ms"), + pa.time64("us"), + pa.time64("ns"), + ] + DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()] + DATETIME_PYARROW_DTYPES = [ + pa.timestamp(unit=unit, tz=tz) + for unit in ["s", "ms", "us", "ns"] + for tz in [None, "UTC", "US/Pacific", "US/Eastern"] + ] + TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]] + + BOOL_PYARROW_DTYPES = [pa.bool_()] + + # TODO: Add container like pyarrow types: + # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions + ALL_PYARROW_DTYPES = ( + ALL_INT_PYARROW_DTYPES + + FLOAT_PYARROW_DTYPES + + DECIMAL_PYARROW_DTYPES + + STRING_PYARROW_DTYPES + + BINARY_PYARROW_DTYPES + + TIME_PYARROW_DTYPES + + DATE_PYARROW_DTYPES + + DATETIME_PYARROW_DTYPES + + TIMEDELTA_PYARROW_DTYPES + + BOOL_PYARROW_DTYPES + ) +else: + FLOAT_PYARROW_DTYPES_STR_REPR = [] + ALL_INT_PYARROW_DTYPES_STR_REPR = [] + ALL_PYARROW_DTYPES = [] + + +EMPTY_STRING_PATTERN = re.compile("^$") + + +arithmetic_dunder_methods = [ + "__add__", + "__radd__", + "__sub__", + "__rsub__", + "__mul__", + "__rmul__", + "__floordiv__", + "__rfloordiv__", + "__truediv__", + "__rtruediv__", + "__pow__", + "__rpow__", + "__mod__", + "__rmod__", +] + +comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"] + + +def reset_display_options() -> None: + """ + Reset the display options for printing and representing objects. + """ + pd.reset_option("^display.", silent=True) + + +# ----------------------------------------------------------------------------- +# Comparators + + +def equalContents(arr1, arr2) -> bool: + """ + Checks if the set of unique elements of arr1 and arr2 are equivalent. + """ + return frozenset(arr1) == frozenset(arr2) + + +def box_expected(expected, box_cls, transpose: bool = True): + """ + Helper function to wrap the expected output of a test in a given box_class. + + Parameters + ---------- + expected : np.ndarray, Index, Series + box_cls : {Index, Series, DataFrame} + + Returns + ------- + subclass of box_cls + """ + if box_cls is pd.array: + if isinstance(expected, RangeIndex): + # pd.array would return an IntegerArray + expected = NumpyExtensionArray(np.asarray(expected._values)) + else: + expected = pd.array(expected, copy=False) + elif box_cls is Index: + expected = Index(expected) + elif box_cls is Series: + expected = Series(expected) + elif box_cls is DataFrame: + expected = Series(expected).to_frame() + if transpose: + # for vector operations, we need a DataFrame to be a single-row, + # not a single-column, in order to operate against non-DataFrame + # vectors of the same length. But convert to two rows to avoid + # single-row special cases in datetime arithmetic + expected = expected.T + expected = pd.concat([expected] * 2, ignore_index=True) + elif box_cls is np.ndarray or box_cls is np.array: + expected = np.array(expected) + elif box_cls is to_array: + expected = to_array(expected) + else: + raise NotImplementedError(box_cls) + return expected + + +def to_array(obj): + """ + Similar to pd.array, but does not cast numpy dtypes to nullable dtypes. + """ + # temporary implementation until we get pd.array in place + dtype = getattr(obj, "dtype", None) + + if dtype is None: + return np.asarray(obj) + + return extract_array(obj, extract_numpy=True) + + +# ----------------------------------------------------------------------------- +# Others + + +def rands_array( + nchars, size: int, dtype: NpDtype = "O", replace: bool = True +) -> np.ndarray: + """ + Generate an array of byte strings. + """ + chars = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1)) + retval = ( + np.random.default_rng(2) + .choice(chars, size=nchars * np.prod(size), replace=replace) + .view((np.str_, nchars)) + .reshape(size) + ) + return retval.astype(dtype) + + +def getCols(k) -> str: + return string.ascii_uppercase[:k] + + +# make index +def makeStringIndex(k: int = 10, name=None) -> Index: + return Index(rands_array(nchars=10, size=k), name=name) + + +def makeCategoricalIndex( + k: int = 10, n: int = 3, name=None, **kwargs +) -> CategoricalIndex: + """make a length k index or n categories""" + x = rands_array(nchars=4, size=n, replace=False) + return CategoricalIndex( + Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs + ) + + +def makeIntervalIndex(k: int = 10, name=None, **kwargs) -> IntervalIndex: + """make a length k IntervalIndex""" + x = np.linspace(0, 100, num=(k + 1)) + return IntervalIndex.from_breaks(x, name=name, **kwargs) + + +def makeBoolIndex(k: int = 10, name=None) -> Index: + if k == 1: + return Index([True], name=name) + elif k == 2: + return Index([False, True], name=name) + return Index([False, True] + [False] * (k - 2), name=name) + + +def makeNumericIndex(k: int = 10, *, name=None, dtype: Dtype | None) -> Index: + dtype = pandas_dtype(dtype) + assert isinstance(dtype, np.dtype) + + if dtype.kind in "iu": + values = np.arange(k, dtype=dtype) + if is_unsigned_integer_dtype(dtype): + values += 2 ** (dtype.itemsize * 8 - 1) + elif dtype.kind == "f": + values = np.random.default_rng(2).random(k) - np.random.default_rng(2).random(1) + values.sort() + values = values * (10 ** np.random.default_rng(2).integers(0, 9)) + else: + raise NotImplementedError(f"wrong dtype {dtype}") + + return Index(values, dtype=dtype, name=name) + + +def makeIntIndex(k: int = 10, *, name=None, dtype: Dtype = "int64") -> Index: + dtype = pandas_dtype(dtype) + if not is_signed_integer_dtype(dtype): + raise TypeError(f"Wrong dtype {dtype}") + return makeNumericIndex(k, name=name, dtype=dtype) + + +def makeUIntIndex(k: int = 10, *, name=None, dtype: Dtype = "uint64") -> Index: + dtype = pandas_dtype(dtype) + if not is_unsigned_integer_dtype(dtype): + raise TypeError(f"Wrong dtype {dtype}") + return makeNumericIndex(k, name=name, dtype=dtype) + + +def makeRangeIndex(k: int = 10, name=None, **kwargs) -> RangeIndex: + return RangeIndex(0, k, 1, name=name, **kwargs) + + +def makeFloatIndex(k: int = 10, *, name=None, dtype: Dtype = "float64") -> Index: + dtype = pandas_dtype(dtype) + if not is_float_dtype(dtype): + raise TypeError(f"Wrong dtype {dtype}") + return makeNumericIndex(k, name=name, dtype=dtype) + + +def makeDateIndex( + k: int = 10, freq: Frequency = "B", name=None, **kwargs +) -> DatetimeIndex: + dt = datetime(2000, 1, 1) + dr = bdate_range(dt, periods=k, freq=freq, name=name) + return DatetimeIndex(dr, name=name, **kwargs) + + +def makeTimedeltaIndex( + k: int = 10, freq: Frequency = "D", name=None, **kwargs +) -> TimedeltaIndex: + return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs) + + +def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex: + dt = datetime(2000, 1, 1) + pi = pd.period_range(start=dt, periods=k, freq="D", name=name, **kwargs) + return pi + + +def makeMultiIndex(k: int = 10, names=None, **kwargs): + N = (k // 2) + 1 + rng = range(N) + mi = MultiIndex.from_product([("foo", "bar"), rng], names=names, **kwargs) + assert len(mi) >= k # GH#38795 + return mi[:k] + + +def index_subclass_makers_generator(): + make_index_funcs = [ + makeDateIndex, + makePeriodIndex, + makeTimedeltaIndex, + makeRangeIndex, + makeIntervalIndex, + makeCategoricalIndex, + makeMultiIndex, + ] + yield from make_index_funcs + + +def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]: + """ + Generator which can be iterated over to get instances of all the classes + which represent time-series. + + Parameters + ---------- + k: length of each of the index instances + """ + make_index_funcs: list[Callable[..., Index]] = [ + makeDateIndex, + makePeriodIndex, + makeTimedeltaIndex, + ] + for make_index_func in make_index_funcs: + yield make_index_func(k=k) + + +# make series +def make_rand_series(name=None, dtype=np.float64) -> Series: + index = makeStringIndex(_N) + data = np.random.default_rng(2).standard_normal(_N) + with np.errstate(invalid="ignore"): + data = data.astype(dtype, copy=False) + return Series(data, index=index, name=name) + + +def makeFloatSeries(name=None) -> Series: + return make_rand_series(name=name) + + +def makeStringSeries(name=None) -> Series: + return make_rand_series(name=name) + + +def makeObjectSeries(name=None) -> Series: + data = makeStringIndex(_N) + data = Index(data, dtype=object) + index = makeStringIndex(_N) + return Series(data, index=index, name=name) + + +def getSeriesData() -> dict[str, Series]: + index = makeStringIndex(_N) + return { + c: Series(np.random.default_rng(i).standard_normal(_N), index=index) + for i, c in enumerate(getCols(_K)) + } + + +def makeTimeSeries(nper=None, freq: Frequency = "B", name=None) -> Series: + if nper is None: + nper = _N + return Series( + np.random.default_rng(2).standard_normal(nper), + index=makeDateIndex(nper, freq=freq), + name=name, + ) + + +def makePeriodSeries(nper=None, name=None) -> Series: + if nper is None: + nper = _N + return Series( + np.random.default_rng(2).standard_normal(nper), + index=makePeriodIndex(nper), + name=name, + ) + + +def getTimeSeriesData(nper=None, freq: Frequency = "B") -> dict[str, Series]: + return {c: makeTimeSeries(nper, freq) for c in getCols(_K)} + + +def getPeriodData(nper=None) -> dict[str, Series]: + return {c: makePeriodSeries(nper) for c in getCols(_K)} + + +# make frame +def makeTimeDataFrame(nper=None, freq: Frequency = "B") -> DataFrame: + data = getTimeSeriesData(nper, freq) + return DataFrame(data) + + +def makeDataFrame() -> DataFrame: + data = getSeriesData() + return DataFrame(data) + + +def getMixedTypeDict(): + index = Index(["a", "b", "c", "d", "e"]) + + data = { + "A": [0.0, 1.0, 2.0, 3.0, 4.0], + "B": [0.0, 1.0, 0.0, 1.0, 0.0], + "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], + "D": bdate_range("1/1/2009", periods=5), + } + + return index, data + + +def makeMixedDataFrame() -> DataFrame: + return DataFrame(getMixedTypeDict()[1]) + + +def makePeriodFrame(nper=None) -> DataFrame: + data = getPeriodData(nper) + return DataFrame(data) + + +def makeCustomIndex( + nentries, + nlevels, + prefix: str = "#", + names: bool | str | list[str] | None = False, + ndupe_l=None, + idx_type=None, +) -> Index: + """ + Create an index/multindex with given dimensions, levels, names, etc' + + nentries - number of entries in index + nlevels - number of levels (> 1 produces multindex) + prefix - a string prefix for labels + names - (Optional), bool or list of strings. if True will use default + names, if false will use no names, if a list is given, the name of + each level in the index will be taken from the list. + ndupe_l - (Optional), list of ints, the number of rows for which the + label will repeated at the corresponding level, you can specify just + the first few, the rest will use the default ndupe_l of 1. + len(ndupe_l) <= nlevels. + idx_type - "i"/"f"/"s"/"dt"/"p"/"td". + If idx_type is not None, `idx_nlevels` must be 1. + "i"/"f" creates an integer/float index, + "s" creates a string + "dt" create a datetime index. + "td" create a datetime index. + + if unspecified, string labels will be generated. + """ + if ndupe_l is None: + ndupe_l = [1] * nlevels + assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels + assert names is None or names is False or names is True or len(names) is nlevels + assert idx_type is None or ( + idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1 + ) + + if names is True: + # build default names + names = [prefix + str(i) for i in range(nlevels)] + if names is False: + # pass None to index constructor for no name + names = None + + # make singleton case uniform + if isinstance(names, str) and nlevels == 1: + names = [names] + + # specific 1D index type requested? + idx_func_dict: dict[str, Callable[..., Index]] = { + "i": makeIntIndex, + "f": makeFloatIndex, + "s": makeStringIndex, + "dt": makeDateIndex, + "td": makeTimedeltaIndex, + "p": makePeriodIndex, + } + idx_func = idx_func_dict.get(idx_type) + if idx_func: + idx = idx_func(nentries) + # but we need to fill in the name + if names: + idx.name = names[0] + return idx + elif idx_type is not None: + raise ValueError( + f"{repr(idx_type)} is not a legal value for `idx_type`, " + "use 'i'/'f'/'s'/'dt'/'p'/'td'." + ) + + if len(ndupe_l) < nlevels: + ndupe_l.extend([1] * (nlevels - len(ndupe_l))) + assert len(ndupe_l) == nlevels + + assert all(x > 0 for x in ndupe_l) + + list_of_lists = [] + for i in range(nlevels): + + def keyfunc(x): + numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_") + return [int(num) for num in numeric_tuple] + + # build a list of lists to create the index from + div_factor = nentries // ndupe_l[i] + 1 + + # Deprecated since version 3.9: collections.Counter now supports []. See PEP 585 + # and Generic Alias Type. + cnt: Counter[str] = collections.Counter() + for j in range(div_factor): + label = f"{prefix}_l{i}_g{j}" + cnt[label] = ndupe_l[i] + # cute Counter trick + result = sorted(cnt.elements(), key=keyfunc)[:nentries] + list_of_lists.append(result) + + tuples = list(zip(*list_of_lists)) + + # convert tuples to index + if nentries == 1: + # we have a single level of tuples, i.e. a regular Index + name = None if names is None else names[0] + index = Index(tuples[0], name=name) + elif nlevels == 1: + name = None if names is None else names[0] + index = Index((x[0] for x in tuples), name=name) + else: + index = MultiIndex.from_tuples(tuples, names=names) + return index + + +def makeCustomDataframe( + nrows, + ncols, + c_idx_names: bool | list[str] = True, + r_idx_names: bool | list[str] = True, + c_idx_nlevels: int = 1, + r_idx_nlevels: int = 1, + data_gen_f=None, + c_ndupe_l=None, + r_ndupe_l=None, + dtype=None, + c_idx_type=None, + r_idx_type=None, +) -> DataFrame: + """ + Create a DataFrame using supplied parameters. + + Parameters + ---------- + nrows, ncols - number of data rows/cols + c_idx_names, r_idx_names - False/True/list of strings, yields No names , + default names or uses the provided names for the levels of the + corresponding index. You can provide a single string when + c_idx_nlevels ==1. + c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex + r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex + data_gen_f - a function f(row,col) which return the data value + at that position, the default generator used yields values of the form + "RxCy" based on position. + c_ndupe_l, r_ndupe_l - list of integers, determines the number + of duplicates for each label at a given level of the corresponding + index. The default `None` value produces a multiplicity of 1 across + all levels, i.e. a unique index. Will accept a partial list of length + N < idx_nlevels, for just the first N levels. If ndupe doesn't divide + nrows/ncol, the last label might have lower multiplicity. + dtype - passed to the DataFrame constructor as is, in case you wish to + have more control in conjunction with a custom `data_gen_f` + r_idx_type, c_idx_type - "i"/"f"/"s"/"dt"/"td". + If idx_type is not None, `idx_nlevels` must be 1. + "i"/"f" creates an integer/float index, + "s" creates a string index + "dt" create a datetime index. + "td" create a timedelta index. + + if unspecified, string labels will be generated. + + Examples + -------- + # 5 row, 3 columns, default names on both, single index on both axis + >> makeCustomDataframe(5,3) + + # make the data a random int between 1 and 100 + >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100)) + + # 2-level multiindex on rows with each label duplicated + # twice on first level, default names on both axis, single + # index on both axis + >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2]) + + # DatetimeIndex on row, index with unicode labels on columns + # no names on either axis + >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False, + r_idx_type="dt",c_idx_type="u") + + # 4-level multindex on rows with names provided, 2-level multindex + # on columns with default labels and default names. + >> a=makeCustomDataframe(5,3,r_idx_nlevels=4, + r_idx_names=["FEE","FIH","FOH","FUM"], + c_idx_nlevels=2) + + >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) + """ + assert c_idx_nlevels > 0 + assert r_idx_nlevels > 0 + assert r_idx_type is None or ( + r_idx_type in ("i", "f", "s", "dt", "p", "td") and r_idx_nlevels == 1 + ) + assert c_idx_type is None or ( + c_idx_type in ("i", "f", "s", "dt", "p", "td") and c_idx_nlevels == 1 + ) + + columns = makeCustomIndex( + ncols, + nlevels=c_idx_nlevels, + prefix="C", + names=c_idx_names, + ndupe_l=c_ndupe_l, + idx_type=c_idx_type, + ) + index = makeCustomIndex( + nrows, + nlevels=r_idx_nlevels, + prefix="R", + names=r_idx_names, + ndupe_l=r_ndupe_l, + idx_type=r_idx_type, + ) + + # by default, generate data based on location + if data_gen_f is None: + data_gen_f = lambda r, c: f"R{r}C{c}" + + data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)] + + return DataFrame(data, index, columns, dtype=dtype) + + +class SubclassedSeries(Series): + _metadata = ["testattr", "name"] + + @property + def _constructor(self): + # For testing, those properties return a generic callable, and not + # the actual class. In this case that is equivalent, but it is to + # ensure we don't rely on the property returning a class + # See https://github.com/pandas-dev/pandas/pull/46018 and + # https://github.com/pandas-dev/pandas/issues/32638 and linked issues + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + @property + def _constructor_expanddim(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + +class SubclassedDataFrame(DataFrame): + _metadata = ["testattr"] + + @property + def _constructor(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + @property + def _constructor_sliced(self): + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + +class SubclassedCategorical(Categorical): + pass + + +def _make_skipna_wrapper(alternative, skipna_alternative=None): + """ + Create a function for calling on an array. + + Parameters + ---------- + alternative : function + The function to be called on the array with no NaNs. + Only used when 'skipna_alternative' is None. + skipna_alternative : function + The function to be called on the original array + + Returns + ------- + function + """ + if skipna_alternative: + + def skipna_wrapper(x): + return skipna_alternative(x.values) + + else: + + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alternative(nona) + + return skipna_wrapper + + +def convert_rows_list_to_csv_str(rows_list: list[str]) -> str: + """ + Convert list of CSV rows to single CSV-formatted string for current OS. + + This method is used for creating expected value of to_csv() method. + + Parameters + ---------- + rows_list : List[str] + Each element represents the row of csv. + + Returns + ------- + str + Expected output of to_csv() in current OS. + """ + sep = os.linesep + return sep.join(rows_list) + sep + + +def external_error_raised(expected_exception: type[Exception]) -> ContextManager: + """ + Helper function to mark pytest.raises that have an external error message. + + Parameters + ---------- + expected_exception : Exception + Expected error to raise. + + Returns + ------- + Callable + Regular `pytest.raises` function with `match` equal to `None`. + """ + import pytest + + return pytest.raises(expected_exception, match=None) + + +cython_table = pd.core.common._cython_table.items() + + +def get_cython_table_params(ndframe, func_names_and_expected): + """ + Combine frame, functions from com._cython_table + keys and expected result. + + Parameters + ---------- + ndframe : DataFrame or Series + func_names_and_expected : Sequence of two items + The first item is a name of a NDFrame method ('sum', 'prod') etc. + The second item is the expected return value. + + Returns + ------- + list + List of three items (DataFrame, function, expected result) + """ + results = [] + for func_name, expected in func_names_and_expected: + results.append((ndframe, func_name, expected)) + results += [ + (ndframe, func, expected) + for func, name in cython_table + if name == func_name + ] + return results + + +def get_op_from_name(op_name: str) -> Callable: + """ + The operator function for a given op name. + + Parameters + ---------- + op_name : str + The op name, in form of "add" or "__add__". + + Returns + ------- + function + A function performing the operation. + """ + short_opname = op_name.strip("_") + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op + + +# ----------------------------------------------------------------------------- +# Indexing test helpers + + +def getitem(x): + return x + + +def setitem(x): + return x + + +def loc(x): + return x.loc + + +def iloc(x): + return x.iloc + + +def at(x): + return x.at + + +def iat(x): + return x.iat + + +# ----------------------------------------------------------------------------- + + +def shares_memory(left, right) -> bool: + """ + Pandas-compat for np.shares_memory. + """ + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + return np.shares_memory(left, right) + elif isinstance(left, np.ndarray): + # Call with reversed args to get to unpacking logic below. + return shares_memory(right, left) + + if isinstance(left, RangeIndex): + return False + if isinstance(left, MultiIndex): + return shares_memory(left._codes, right) + if isinstance(left, (Index, Series)): + return shares_memory(left._values, right) + + if isinstance(left, NDArrayBackedExtensionArray): + return shares_memory(left._ndarray, right) + if isinstance(left, pd.core.arrays.SparseArray): + return shares_memory(left.sp_values, right) + if isinstance(left, pd.core.arrays.IntervalArray): + return shares_memory(left._left, right) or shares_memory(left._right, right) + + if isinstance(left, ExtensionArray) and left.dtype == "string[pyarrow]": + # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669 + left = cast("ArrowExtensionArray", left) + if isinstance(right, ExtensionArray) and right.dtype == "string[pyarrow]": + right = cast("ArrowExtensionArray", right) + left_pa_data = left._pa_array + right_pa_data = right._pa_array + left_buf1 = left_pa_data.chunk(0).buffers()[1] + right_buf1 = right_pa_data.chunk(0).buffers()[1] + return left_buf1 == right_buf1 + + if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray): + # By convention, we'll say these share memory if they share *either* + # the _data or the _mask + return np.shares_memory(left._data, right._data) or np.shares_memory( + left._mask, right._mask + ) + + if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1: + arr = left._mgr.arrays[0] + return shares_memory(arr, right) + + raise NotImplementedError(type(left), type(right)) + + +__all__ = [ + "ALL_INT_EA_DTYPES", + "ALL_INT_NUMPY_DTYPES", + "ALL_NUMPY_DTYPES", + "ALL_REAL_NUMPY_DTYPES", + "all_timeseries_index_generator", + "assert_almost_equal", + "assert_attr_equal", + "assert_categorical_equal", + "assert_class_equal", + "assert_contains_all", + "assert_copy", + "assert_datetime_array_equal", + "assert_dict_equal", + "assert_equal", + "assert_extension_array_equal", + "assert_frame_equal", + "assert_index_equal", + "assert_indexing_slices_equivalent", + "assert_interval_array_equal", + "assert_is_sorted", + "assert_is_valid_plot_return_object", + "assert_metadata_equivalent", + "assert_numpy_array_equal", + "assert_period_array_equal", + "assert_produces_warning", + "assert_series_equal", + "assert_sp_array_equal", + "assert_timedelta_array_equal", + "at", + "BOOL_DTYPES", + "box_expected", + "BYTES_DTYPES", + "can_set_locale", + "COMPLEX_DTYPES", + "convert_rows_list_to_csv_str", + "DATETIME64_DTYPES", + "decompress_file", + "EMPTY_STRING_PATTERN", + "ENDIAN", + "ensure_clean", + "equalContents", + "external_error_raised", + "FLOAT_EA_DTYPES", + "FLOAT_NUMPY_DTYPES", + "getCols", + "get_cython_table_params", + "get_dtype", + "getitem", + "get_locales", + "getMixedTypeDict", + "get_obj", + "get_op_from_name", + "getPeriodData", + "getSeriesData", + "getTimeSeriesData", + "iat", + "iloc", + "index_subclass_makers_generator", + "loc", + "makeBoolIndex", + "makeCategoricalIndex", + "makeCustomDataframe", + "makeCustomIndex", + "makeDataFrame", + "makeDateIndex", + "makeFloatIndex", + "makeFloatSeries", + "makeIntervalIndex", + "makeIntIndex", + "makeMixedDataFrame", + "makeMultiIndex", + "makeNumericIndex", + "makeObjectSeries", + "makePeriodFrame", + "makePeriodIndex", + "makePeriodSeries", + "make_rand_series", + "makeRangeIndex", + "makeStringIndex", + "makeStringSeries", + "makeTimeDataFrame", + "makeTimedeltaIndex", + "makeTimeSeries", + "makeUIntIndex", + "maybe_produces_warning", + "NARROW_NP_DTYPES", + "NP_NAT_OBJECTS", + "NULL_OBJECTS", + "OBJECT_DTYPES", + "raise_assert_detail", + "reset_display_options", + "raises_chained_assignment_error", + "round_trip_localpath", + "round_trip_pathlib", + "round_trip_pickle", + "setitem", + "set_locale", + "set_timezone", + "shares_memory", + "SIGNED_INT_EA_DTYPES", + "SIGNED_INT_NUMPY_DTYPES", + "STRING_DTYPES", + "SubclassedCategorical", + "SubclassedDataFrame", + "SubclassedSeries", + "TIMEDELTA64_DTYPES", + "to_array", + "UNSIGNED_INT_EA_DTYPES", + "UNSIGNED_INT_NUMPY_DTYPES", + "use_numexpr", + "with_csv_dialect", + "write_to_compressed", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_hypothesis.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_hypothesis.py new file mode 100644 index 00000000..5256a303 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_hypothesis.py @@ -0,0 +1,89 @@ +""" +Hypothesis data generator helpers. +""" +from datetime import datetime + +from hypothesis import strategies as st +from hypothesis.extra.dateutil import timezones as dateutil_timezones +from hypothesis.extra.pytz import timezones as pytz_timezones + +from pandas.compat import is_platform_windows + +import pandas as pd + +from pandas.tseries.offsets import ( + BMonthBegin, + BMonthEnd, + BQuarterBegin, + BQuarterEnd, + BYearBegin, + BYearEnd, + MonthBegin, + MonthEnd, + QuarterBegin, + QuarterEnd, + YearBegin, + YearEnd, +) + +OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3) + +OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3) + +OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3) + +OPTIONAL_DICTS = st.lists( + st.one_of(st.none(), st.dictionaries(st.text(), st.integers())), + max_size=10, + min_size=3, +) + +OPTIONAL_LISTS = st.lists( + st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)), + max_size=10, + min_size=3, +) + +OPTIONAL_ONE_OF_ALL = st.one_of( + OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT +) + +if is_platform_windows(): + DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1)) +else: + DATETIME_NO_TZ = st.datetimes() + +DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes( + min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(), + max_value=pd.Timestamp(1900, 1, 1).to_pydatetime(), + timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()), +) + +DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes( + min_value=pd.Timestamp.min.to_pydatetime(warn=False), + max_value=pd.Timestamp.max.to_pydatetime(warn=False), +) + +INT_NEG_999_TO_POS_999 = st.integers(-999, 999) + +# The strategy for each type is registered in conftest.py, as they don't carry +# enough runtime information (e.g. type hints) to infer how to build them. +YQM_OFFSET = st.one_of( + *map( + st.from_type, + [ + MonthBegin, + MonthEnd, + BMonthBegin, + BMonthEnd, + QuarterBegin, + QuarterEnd, + BQuarterBegin, + BQuarterEnd, + YearBegin, + YearEnd, + BYearBegin, + BYearEnd, + ], + ) +) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_io.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_io.py new file mode 100644 index 00000000..edbba945 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_io.py @@ -0,0 +1,170 @@ +from __future__ import annotations + +import gzip +import io +import pathlib +import tarfile +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import uuid +import zipfile + +from pandas.compat import ( + get_bz2_file, + get_lzma_file, +) +from pandas.compat._optional import import_optional_dependency + +import pandas as pd +from pandas._testing.contexts import ensure_clean + +if TYPE_CHECKING: + from pandas._typing import ( + FilePath, + ReadPickleBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + +# ------------------------------------------------------------------ +# File-IO + + +def round_trip_pickle( + obj: Any, path: FilePath | ReadPickleBuffer | None = None +) -> DataFrame | Series: + """ + Pickle an object and then read it again. + + Parameters + ---------- + obj : any object + The object to pickle and then re-read. + path : str, path object or file-like object, default None + The path where the pickled object is written and then read. + + Returns + ------- + pandas object + The original object that was pickled and then re-read. + """ + _path = path + if _path is None: + _path = f"__{uuid.uuid4()}__.pickle" + with ensure_clean(_path) as temp_path: + pd.to_pickle(obj, temp_path) + return pd.read_pickle(temp_path) + + +def round_trip_pathlib(writer, reader, path: str | None = None): + """ + Write an object to file specified by a pathlib.Path and read it back + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + Path = pathlib.Path + if path is None: + path = "___pathlib___" + with ensure_clean(path) as path: + writer(Path(path)) # type: ignore[arg-type] + obj = reader(Path(path)) # type: ignore[arg-type] + return obj + + +def round_trip_localpath(writer, reader, path: str | None = None): + """ + Write an object to file specified by a py.path LocalPath and read it back. + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + import pytest + + LocalPath = pytest.importorskip("py.path").local + if path is None: + path = "___localpath___" + with ensure_clean(path) as path: + writer(LocalPath(path)) + obj = reader(LocalPath(path)) + return obj + + +def write_to_compressed(compression, path, data, dest: str = "test"): + """ + Write data to a compressed file. + + Parameters + ---------- + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'} + The compression type to use. + path : str + The file path to write the data. + data : str + The data to write. + dest : str, default "test" + The destination file (for ZIP only) + + Raises + ------ + ValueError : An invalid compression value was passed in. + """ + args: tuple[Any, ...] = (data,) + mode = "wb" + method = "write" + compress_method: Callable + + if compression == "zip": + compress_method = zipfile.ZipFile + mode = "w" + args = (dest, data) + method = "writestr" + elif compression == "tar": + compress_method = tarfile.TarFile + mode = "w" + file = tarfile.TarInfo(name=dest) + bytes = io.BytesIO(data) + file.size = len(data) + args = (file, bytes) + method = "addfile" + elif compression == "gzip": + compress_method = gzip.GzipFile + elif compression == "bz2": + compress_method = get_bz2_file() + elif compression == "zstd": + compress_method = import_optional_dependency("zstandard").open + elif compression == "xz": + compress_method = get_lzma_file() + else: + raise ValueError(f"Unrecognized compression type: {compression}") + + with compress_method(path, mode=mode) as f: + getattr(f, method)(*args) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_warnings.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_warnings.py new file mode 100644 index 00000000..c1c70605 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/_warnings.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +from contextlib import ( + contextmanager, + nullcontext, +) +import re +import sys +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) +import warnings + +from pandas.compat import PY311 + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Sequence, + ) + + +@contextmanager +def assert_produces_warning( + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning, + filter_level: Literal[ + "error", "ignore", "always", "default", "module", "once" + ] = "always", + check_stacklevel: bool = True, + raise_on_extra_warnings: bool = True, + match: str | None = None, +) -> Generator[list[warnings.WarningMessage], None, None]: + """ + Context manager for running code expected to either raise a specific warning, + multiple specific warnings, or not raise any warnings. Verifies that the code + raises the expected warning(s), and that it does not raise any other unexpected + warnings. It is basically a wrapper around ``warnings.catch_warnings``. + + Parameters + ---------- + expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning + The type of Exception raised. ``exception.Warning`` is the base + class for all warnings. To raise multiple types of exceptions, + pass them as a tuple. To check that no warning is returned, + specify ``False`` or ``None``. + filter_level : str or None, default "always" + Specifies whether warnings are ignored, displayed, or turned + into errors. + Valid values are: + + * "error" - turns matching warnings into exceptions + * "ignore" - discard the warning + * "always" - always emit a warning + * "default" - print the warning the first time it is generated + from each location + * "module" - print the warning the first time it is generated + from each module + * "once" - print the warning the first time it is generated + + check_stacklevel : bool, default True + If True, displays the line that called the function containing + the warning to show were the function is called. Otherwise, the + line that implements the function is displayed. + raise_on_extra_warnings : bool, default True + Whether extra warnings not of the type `expected_warning` should + cause the test to fail. + match : str, optional + Match warning message. + + Examples + -------- + >>> import warnings + >>> with assert_produces_warning(): + ... warnings.warn(UserWarning()) + ... + >>> with assert_produces_warning(False): + ... warnings.warn(RuntimeWarning()) + ... + Traceback (most recent call last): + ... + AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. + >>> with assert_produces_warning(UserWarning): + ... warnings.warn(RuntimeWarning()) + Traceback (most recent call last): + ... + AssertionError: Did not see expected warning of class 'UserWarning'. + + ..warn:: This is *not* thread-safe. + """ + __tracebackhide__ = True + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter(filter_level) + try: + yield w + finally: + if expected_warning: + expected_warning = cast(type[Warning], expected_warning) + _assert_caught_expected_warning( + caught_warnings=w, + expected_warning=expected_warning, + match=match, + check_stacklevel=check_stacklevel, + ) + if raise_on_extra_warnings: + _assert_caught_no_extra_warnings( + caught_warnings=w, + expected_warning=expected_warning, + ) + + +def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs): + """ + Return a context manager that possibly checks a warning based on the condition + """ + if condition: + return assert_produces_warning(warning, **kwargs) + else: + return nullcontext() + + +def _assert_caught_expected_warning( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning], + match: str | None, + check_stacklevel: bool, +) -> None: + """Assert that there was the expected warning among the caught warnings.""" + saw_warning = False + matched_message = False + unmatched_messages = [] + + for actual_warning in caught_warnings: + if issubclass(actual_warning.category, expected_warning): + saw_warning = True + + if check_stacklevel: + _assert_raised_with_correct_stacklevel(actual_warning) + + if match is not None: + if re.search(match, str(actual_warning.message)): + matched_message = True + else: + unmatched_messages.append(actual_warning.message) + + if not saw_warning: + raise AssertionError( + f"Did not see expected warning of class " + f"{repr(expected_warning.__name__)}" + ) + + if match and not matched_message: + raise AssertionError( + f"Did not see warning {repr(expected_warning.__name__)} " + f"matching '{match}'. The emitted warning messages are " + f"{unmatched_messages}" + ) + + +def _assert_caught_no_extra_warnings( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None, +) -> None: + """Assert that no extra warnings apart from the expected ones are caught.""" + extra_warnings = [] + + for actual_warning in caught_warnings: + if _is_unexpected_warning(actual_warning, expected_warning): + # GH#38630 pytest.filterwarnings does not suppress these. + if actual_warning.category == ResourceWarning: + # GH 44732: Don't make the CI flaky by filtering SSL-related + # ResourceWarning from dependencies + if "unclosed bool: + """Check if the actual warning issued is unexpected.""" + if actual_warning and not expected_warning: + return True + expected_warning = cast(type[Warning], expected_warning) + return bool(not issubclass(actual_warning.category, expected_warning)) + + +def _assert_raised_with_correct_stacklevel( + actual_warning: warnings.WarningMessage, +) -> None: + from inspect import ( + getframeinfo, + stack, + ) + + caller = getframeinfo(stack()[4][0]) + msg = ( + "Warning not set with correct stacklevel. " + f"File where warning is raised: {actual_warning.filename} != " + f"{caller.filename}. Warning message: {actual_warning.message}" + ) + assert actual_warning.filename == caller.filename, msg diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/asserters.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/asserters.py new file mode 100644 index 00000000..0591394f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/asserters.py @@ -0,0 +1,1365 @@ +from __future__ import annotations + +import operator +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) + +import numpy as np + +from pandas._libs.missing import is_matching_na +from pandas._libs.sparse import SparseIndex +import pandas._libs.testing as _testing +from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions + +from pandas.core.dtypes.common import ( + is_bool, + is_integer_dtype, + is_number, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + NumpyEADtype, +) +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + IntervalDtype, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, +) +from pandas.core.algorithms import take_nd +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays.string_ import StringDtype +from pandas.core.indexes.api import safe_sort_index + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def assert_almost_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = "equiv", + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + **kwargs, +) -> None: + """ + Check that the left and right objects are approximately equal. + + By approximately equal, we refer to objects that are numbers or that + contain numbers which may be equivalent to specific levels of precision. + + Parameters + ---------- + left : object + right : object + check_dtype : bool or {'equiv'}, default 'equiv' + Check dtype if both a and b are the same type. If 'equiv' is passed in, + then `RangeIndex` and `Index` with int64 dtype are also considered + equivalent when doing type checking. + rtol : float, default 1e-5 + Relative tolerance. + atol : float, default 1e-8 + Absolute tolerance. + """ + if isinstance(left, Index): + assert_index_equal( + left, + right, + check_exact=False, + exact=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, Series): + assert_series_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, DataFrame): + assert_frame_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + else: + # Other sequences. + if check_dtype: + if is_number(left) and is_number(right): + # Do not compare numeric classes, like np.float64 and float. + pass + elif is_bool(left) and is_bool(right): + # Do not compare bool classes, like np.bool_ and bool. + pass + else: + if isinstance(left, np.ndarray) or isinstance(right, np.ndarray): + obj = "numpy array" + else: + obj = "Input" + assert_class_equal(left, right, obj=obj) + + # if we have "equiv", this becomes True + _testing.assert_almost_equal( + left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs + ) + + +def _check_isinstance(left, right, cls): + """ + Helper method for our assert_* methods that ensures that + the two objects being compared have the right type before + proceeding with the comparison. + + Parameters + ---------- + left : The first object being compared. + right : The second object being compared. + cls : The class type to check against. + + Raises + ------ + AssertionError : Either `left` or `right` is not an instance of `cls`. + """ + cls_name = cls.__name__ + + if not isinstance(left, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(left)} instead" + ) + if not isinstance(right, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(right)} instead" + ) + + +def assert_dict_equal(left, right, compare_keys: bool = True) -> None: + _check_isinstance(left, right, dict) + _testing.assert_dict_equal(left, right, compare_keys=compare_keys) + + +def assert_index_equal( + left: Index, + right: Index, + exact: bool | str = "equiv", + check_names: bool = True, + check_exact: bool = True, + check_categorical: bool = True, + check_order: bool = True, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + obj: str = "Index", +) -> None: + """ + Check that left and right Index are equal. + + Parameters + ---------- + left : Index + right : Index + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + check_names : bool, default True + Whether to check the names attribute. + check_exact : bool, default True + Whether to compare number exactly. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_order : bool, default True + Whether to compare the order of index entries as well as their values. + If True, both indexes must contain the same elements, in the same order. + If False, both indexes must contain the same elements, but in any order. + + .. versionadded:: 1.2.0 + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Index' + Specify object name being compared, internally used to show appropriate + assertion message. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Index([1, 2, 3]) + >>> b = pd.Index([1, 2, 3]) + >>> tm.assert_index_equal(a, b) + """ + __tracebackhide__ = True + + def _check_types(left, right, obj: str = "Index") -> None: + if not exact: + return + + assert_class_equal(left, right, exact=exact, obj=obj) + assert_attr_equal("inferred_type", left, right, obj=obj) + + # Skip exact dtype checking when `check_categorical` is False + if isinstance(left.dtype, CategoricalDtype) and isinstance( + right.dtype, CategoricalDtype + ): + if check_categorical: + assert_attr_equal("dtype", left, right, obj=obj) + assert_index_equal(left.categories, right.categories, exact=exact) + return + + assert_attr_equal("dtype", left, right, obj=obj) + + def _get_ilevel_values(index, level): + # accept level number only + unique = index.levels[level] + level_codes = index.codes[level] + filled = take_nd(unique._values, level_codes, fill_value=unique._na_value) + return unique._shallow_copy(filled, name=index.names[level]) + + # instance validation + _check_isinstance(left, right, Index) + + # class / dtype comparison + _check_types(left, right, obj=obj) + + # level comparison + if left.nlevels != right.nlevels: + msg1 = f"{obj} levels are different" + msg2 = f"{left.nlevels}, {left}" + msg3 = f"{right.nlevels}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # length comparison + if len(left) != len(right): + msg1 = f"{obj} length are different" + msg2 = f"{len(left)}, {left}" + msg3 = f"{len(right)}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # If order doesn't matter then sort the index entries + if not check_order: + left = safe_sort_index(left) + right = safe_sort_index(right) + + # MultiIndex special comparison for little-friendly error messages + if isinstance(left, MultiIndex): + right = cast(MultiIndex, right) + + for level in range(left.nlevels): + # cannot use get_level_values here because it can change dtype + llevel = _get_ilevel_values(left, level) + rlevel = _get_ilevel_values(right, level) + + lobj = f"MultiIndex level [{level}]" + assert_index_equal( + llevel, + rlevel, + exact=exact, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + rtol=rtol, + atol=atol, + obj=lobj, + ) + # get_level_values may change dtype + _check_types(left.levels[level], right.levels[level], obj=obj) + + # skip exact index checking when `check_categorical` is False + elif check_exact and check_categorical: + if not left.equals(right): + mismatch = left._values != right._values + + if not isinstance(mismatch, np.ndarray): + mismatch = cast("ExtensionArray", mismatch).fillna(True) + + diff = np.sum(mismatch.astype(int)) * 100.0 / len(left) + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right) + else: + # if we have "equiv", this becomes True + exact_bool = bool(exact) + _testing.assert_almost_equal( + left.values, + right.values, + rtol=rtol, + atol=atol, + check_dtype=exact_bool, + obj=obj, + lobj=left, + robj=right, + ) + + # metadata comparison + if check_names: + assert_attr_equal("names", left, right, obj=obj) + if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex): + assert_attr_equal("dtype", left, right, obj=obj) + if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex): + assert_interval_array_equal(left._values, right._values) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal(left._values, right._values, obj=f"{obj} category") + + +def assert_class_equal( + left, right, exact: bool | str = True, obj: str = "Input" +) -> None: + """ + Checks classes are equal. + """ + __tracebackhide__ = True + + def repr_class(x): + if isinstance(x, Index): + # return Index as it is to include values in the error message + return x + + return type(x).__name__ + + def is_class_equiv(idx: Index) -> bool: + """Classes that are a RangeIndex (sub-)instance or exactly an `Index` . + + This only checks class equivalence. There is a separate check that the + dtype is int64. + """ + return type(idx) is Index or isinstance(idx, RangeIndex) + + if type(left) == type(right): + return + + if exact == "equiv": + if is_class_equiv(left) and is_class_equiv(right): + return + + msg = f"{obj} classes are different" + raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) + + +def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None: + """ + Check attributes are equal. Both objects must have attribute. + + Parameters + ---------- + attr : str + Attribute name being compared. + left : object + right : object + obj : str, default 'Attributes' + Specify object name being compared, internally used to show appropriate + assertion message + """ + __tracebackhide__ = True + + left_attr = getattr(left, attr) + right_attr = getattr(right, attr) + + if left_attr is right_attr or is_matching_na(left_attr, right_attr): + # e.g. both np.nan, both NaT, both pd.NA, ... + return None + + try: + result = left_attr == right_attr + except TypeError: + # datetimetz on rhs may raise TypeError + result = False + if (left_attr is pd.NA) ^ (right_attr is pd.NA): + result = False + elif not isinstance(result, bool): + result = result.all() + + if not result: + msg = f'Attribute "{attr}" are different' + raise_assert_detail(obj, msg, left_attr, right_attr) + return None + + +def assert_is_valid_plot_return_object(objs) -> None: + import matplotlib.pyplot as plt + + if isinstance(objs, (Series, np.ndarray)): + for el in objs.ravel(): + msg = ( + "one of 'objs' is not a matplotlib Axes instance, " + f"type encountered {repr(type(el).__name__)}" + ) + assert isinstance(el, (plt.Axes, dict)), msg + else: + msg = ( + "objs is neither an ndarray of Artist instances nor a single " + "ArtistArtist instance, tuple, or dict, 'objs' is a " + f"{repr(type(objs).__name__)}" + ) + assert isinstance(objs, (plt.Artist, tuple, dict)), msg + + +def assert_is_sorted(seq) -> None: + """Assert that the sequence is sorted.""" + if isinstance(seq, (Index, Series)): + seq = seq.values + # sorting does not change precisions + assert_numpy_array_equal(seq, np.sort(np.array(seq))) + + +def assert_categorical_equal( + left, + right, + check_dtype: bool = True, + check_category_order: bool = True, + obj: str = "Categorical", +) -> None: + """ + Test that Categoricals are equivalent. + + Parameters + ---------- + left : Categorical + right : Categorical + check_dtype : bool, default True + Check that integer dtype of the codes are the same. + check_category_order : bool, default True + Whether the order of the categories should be compared, which + implies identical integer codes. If False, only the resulting + values are compared. The ordered attribute is + checked regardless. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message. + """ + _check_isinstance(left, right, Categorical) + + exact: bool | str + if isinstance(left.categories, RangeIndex) or isinstance( + right.categories, RangeIndex + ): + exact = "equiv" + else: + # We still want to require exact matches for Index + exact = True + + if check_category_order: + assert_index_equal( + left.categories, right.categories, obj=f"{obj}.categories", exact=exact + ) + assert_numpy_array_equal( + left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes" + ) + else: + try: + lc = left.categories.sort_values() + rc = right.categories.sort_values() + except TypeError: + # e.g. '<' not supported between instances of 'int' and 'str' + lc, rc = left.categories, right.categories + assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact) + assert_index_equal( + left.categories.take(left.codes), + right.categories.take(right.codes), + obj=f"{obj}.values", + exact=exact, + ) + + assert_attr_equal("ordered", left, right, obj=obj) + + +def assert_interval_array_equal( + left, right, exact: bool | Literal["equiv"] = "equiv", obj: str = "IntervalArray" +) -> None: + """ + Test that two IntervalArrays are equivalent. + + Parameters + ---------- + left, right : IntervalArray + The IntervalArrays to compare. + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + obj : str, default 'IntervalArray' + Specify object name being compared, internally used to show appropriate + assertion message + """ + _check_isinstance(left, right, IntervalArray) + + kwargs = {} + if left._left.dtype.kind in "mM": + # We have a DatetimeArray or TimedeltaArray + kwargs["check_freq"] = False + + assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs) + assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs) + + assert_attr_equal("closed", left, right, obj=obj) + + +def assert_period_array_equal(left, right, obj: str = "PeriodArray") -> None: + _check_isinstance(left, right, PeriodArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_datetime_array_equal( + left, right, obj: str = "DatetimeArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, DatetimeArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + assert_attr_equal("tz", left, right, obj=obj) + + +def assert_timedelta_array_equal( + left, right, obj: str = "TimedeltaArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, TimedeltaArray) + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + + +def raise_assert_detail( + obj, message, left, right, diff=None, first_diff=None, index_values=None +): + __tracebackhide__ = True + + msg = f"""{obj} are different + +{message}""" + + if isinstance(index_values, np.ndarray): + msg += f"\n[index]: {pprint_thing(index_values)}" + + if isinstance(left, np.ndarray): + left = pprint_thing(left) + elif isinstance(left, (CategoricalDtype, NumpyEADtype, StringDtype)): + left = repr(left) + + if isinstance(right, np.ndarray): + right = pprint_thing(right) + elif isinstance(right, (CategoricalDtype, NumpyEADtype, StringDtype)): + right = repr(right) + + msg += f""" +[left]: {left} +[right]: {right}""" + + if diff is not None: + msg += f"\n[diff]: {diff}" + + if first_diff is not None: + msg += f"\n{first_diff}" + + raise AssertionError(msg) + + +def assert_numpy_array_equal( + left, + right, + strict_nan: bool = False, + check_dtype: bool | Literal["equiv"] = True, + err_msg=None, + check_same=None, + obj: str = "numpy array", + index_values=None, +) -> None: + """ + Check that 'np.ndarray' is equivalent. + + Parameters + ---------- + left, right : numpy.ndarray or iterable + The two arrays to be compared. + strict_nan : bool, default False + If True, consider NaN and None to be different. + check_dtype : bool, default True + Check dtype if both a and b are np.ndarray. + err_msg : str, default None + If provided, used as assertion message. + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area. + obj : str, default 'numpy array' + Specify object name being compared, internally used to show appropriate + assertion message. + index_values : numpy.ndarray, default None + optional index (shared by both left and right), used in output. + """ + __tracebackhide__ = True + + # instance validation + # Show a detailed error message when classes are different + assert_class_equal(left, right, obj=obj) + # both classes must be an np.ndarray + _check_isinstance(left, right, np.ndarray) + + def _get_base(obj): + return obj.base if getattr(obj, "base", None) is not None else obj + + left_base = _get_base(left) + right_base = _get_base(right) + + if check_same == "same": + if left_base is not right_base: + raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}") + elif check_same == "copy": + if left_base is right_base: + raise AssertionError(f"{repr(left_base)} is {repr(right_base)}") + + def _raise(left, right, err_msg): + if err_msg is None: + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shapes are different", left.shape, right.shape + ) + + diff = 0 + for left_arr, right_arr in zip(left, right): + # count up differences + if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan): + diff += 1 + + diff = diff * 100.0 / left.size + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right, index_values=index_values) + + raise AssertionError(err_msg) + + # compare shape and values + if not array_equivalent(left, right, strict_nan=strict_nan): + _raise(left, right, err_msg) + + if check_dtype: + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_extension_array_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + index_values=None, + check_exact: bool = False, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + obj: str = "ExtensionArray", +) -> None: + """ + Check that left and right ExtensionArrays are equal. + + Parameters + ---------- + left, right : ExtensionArray + The two arrays to compare. + check_dtype : bool, default True + Whether to check if the ExtensionArray dtypes are identical. + index_values : numpy.ndarray, default None + Optional index (shared by both left and right), used in output. + check_exact : bool, default False + Whether to compare number exactly. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'ExtensionArray' + Specify object name being compared, internally used to show appropriate + assertion message. + + .. versionadded:: 2.0.0 + + Notes + ----- + Missing values are checked separately from valid values. + A mask of missing values is computed for each and checked to match. + The remaining all-valid values are cast to object dtype and checked. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b, c = a.array, a.array + >>> tm.assert_extension_array_equal(b, c) + """ + assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" + assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" + if check_dtype: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + + if ( + isinstance(left, DatetimeLikeArrayMixin) + and isinstance(right, DatetimeLikeArrayMixin) + and type(right) == type(left) + ): + # GH 52449 + if not check_dtype and left.dtype.kind in "mM": + if not isinstance(left.dtype, np.dtype): + l_unit = cast(DatetimeTZDtype, left.dtype).unit + else: + l_unit = np.datetime_data(left.dtype)[0] + if not isinstance(right.dtype, np.dtype): + r_unit = cast(DatetimeTZDtype, left.dtype).unit + else: + r_unit = np.datetime_data(right.dtype)[0] + if ( + l_unit != r_unit + and compare_mismatched_resolutions( + left._ndarray, right._ndarray, operator.eq + ).all() + ): + return + # Avoid slow object-dtype comparisons + # np.asarray for case where we have a np.MaskedArray + assert_numpy_array_equal( + np.asarray(left.asi8), + np.asarray(right.asi8), + index_values=index_values, + obj=obj, + ) + return + + left_na = np.asarray(left.isna()) + right_na = np.asarray(right.isna()) + assert_numpy_array_equal( + left_na, right_na, obj=f"{obj} NA mask", index_values=index_values + ) + + left_valid = left[~left_na].to_numpy(dtype=object) + right_valid = right[~right_na].to_numpy(dtype=object) + if check_exact: + assert_numpy_array_equal( + left_valid, right_valid, obj=obj, index_values=index_values + ) + else: + _testing.assert_almost_equal( + left_valid, + right_valid, + check_dtype=bool(check_dtype), + rtol=rtol, + atol=atol, + obj=obj, + index_values=index_values, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_series_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_series_type: bool = True, + check_names: bool = True, + check_exact: bool = False, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_category_order: bool = True, + check_freq: bool = True, + check_flags: bool = True, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + obj: str = "Series", + *, + check_index: bool = True, + check_like: bool = False, +) -> None: + """ + Check that left and right Series are equal. + + Parameters + ---------- + left : Series + right : Series + check_dtype : bool, default True + Whether to check the Series dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_series_type : bool, default True + Whether to check the Series class is identical. + check_names : bool, default True + Whether to check the Series and Index names attribute. + check_exact : bool, default False + Whether to compare number exactly. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_category_order : bool, default True + Whether to compare category order of internal Categoricals. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + + .. versionadded:: 1.2.0 + + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Series' + Specify object name being compared, internally used to show appropriate + assertion message. + check_index : bool, default True + Whether to check index equivalence. If False, then compare only values. + + .. versionadded:: 1.3.0 + check_like : bool, default False + If True, ignore the order of the index. Must be False if check_index is False. + Note: same labels must be with the same data. + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b = pd.Series([1, 2, 3, 4]) + >>> tm.assert_series_equal(a, b) + """ + __tracebackhide__ = True + + if not check_index and check_like: + raise ValueError("check_like must be False if check_index is False") + + # instance validation + _check_isinstance(left, right, Series) + + if check_series_type: + assert_class_equal(left, right, obj=obj) + + # length comparison + if len(left) != len(right): + msg1 = f"{len(left)}, {left.index}" + msg2 = f"{len(right)}, {right.index}" + raise_assert_detail(obj, "Series length are different", msg1, msg2) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + if check_index: + # GH #38183 + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=rtol, + atol=atol, + obj=f"{obj}.index", + ) + + if check_like: + left = left.reindex_like(right) + + if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)): + lidx = left.index + ridx = right.index + assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq) + + if check_dtype: + # We want to skip exact dtype checking when `check_categorical` + # is False. We'll still raise if only one is a `Categorical`, + # regardless of `check_categorical` + if ( + isinstance(left.dtype, CategoricalDtype) + and isinstance(right.dtype, CategoricalDtype) + and not check_categorical + ): + pass + else: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + + if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype): + left_values = left._values + right_values = right._values + # Only check exact if dtype is numeric + if isinstance(left_values, ExtensionArray) and isinstance( + right_values, ExtensionArray + ): + assert_extension_array_equal( + left_values, + right_values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), + obj=str(obj), + ) + else: + assert_numpy_array_equal( + left_values, + right_values, + check_dtype=check_dtype, + obj=str(obj), + index_values=np.asarray(left.index), + ) + elif check_datetimelike_compat and ( + needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype) + ): + # we want to check only if we have compat dtypes + # e.g. integer and M|m are NOT compat, but we can simply check + # the values in that case + + # datetimelike may have different objects (e.g. datetime.datetime + # vs Timestamp) but will compare equal + if not Index(left._values).equals(Index(right._values)): + msg = ( + f"[datetimelike_compat=True] {left._values} " + f"is not equal to {right._values}." + ) + raise AssertionError(msg) + elif isinstance(left.dtype, IntervalDtype) and isinstance( + right.dtype, IntervalDtype + ): + assert_interval_array_equal(left.array, right.array) + elif isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=np.asarray(left.index), + ) + elif isinstance(left.dtype, ExtensionDtype) and isinstance( + right.dtype, ExtensionDtype + ): + assert_extension_array_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=check_dtype, + index_values=np.asarray(left.index), + obj=str(obj), + ) + elif is_extension_array_dtype_and_needs_i8_conversion( + left.dtype, right.dtype + ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype): + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), + obj=str(obj), + ) + elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype): + # DatetimeArray or TimedeltaArray + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), + obj=str(obj), + ) + else: + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=np.asarray(left.index), + ) + + # metadata comparison + if check_names: + assert_attr_equal("name", left, right, obj=obj) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal( + left._values, + right._values, + obj=f"{obj} category", + check_category_order=check_category_order, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_frame_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_column_type: bool | Literal["equiv"] = "equiv", + check_frame_type: bool = True, + check_names: bool = True, + by_blocks: bool = False, + check_exact: bool = False, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_like: bool = False, + check_freq: bool = True, + check_flags: bool = True, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + obj: str = "DataFrame", +) -> None: + """ + Check that left and right DataFrame are equal. + + This function is intended to compare two DataFrames and output any + differences. It is mostly intended for use in unit tests. + Additional parameters allow varying the strictness of the + equality checks performed. + + Parameters + ---------- + left : DataFrame + First DataFrame to compare. + right : DataFrame + Second DataFrame to compare. + check_dtype : bool, default True + Whether to check the DataFrame dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_column_type : bool or {'equiv'}, default 'equiv' + Whether to check the columns class, dtype and inferred_type + are identical. Is passed as the ``exact`` argument of + :func:`assert_index_equal`. + check_frame_type : bool, default True + Whether to check the DataFrame class is identical. + check_names : bool, default True + Whether to check that the `names` attribute for both the `index` + and `column` attributes of the DataFrame is identical. + by_blocks : bool, default False + Specify how to compare internal data. If False, compare by columns. + If True, compare by blocks. + check_exact : bool, default False + Whether to compare number exactly. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_like : bool, default False + If True, ignore the order of index & columns. + Note: index labels must match their respective rows + (same as in columns) - same labels must be with the same data. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'DataFrame' + Specify object name being compared, internally used to show appropriate + assertion message. + + See Also + -------- + assert_series_equal : Equivalent method for asserting Series equality. + DataFrame.equals : Check DataFrame equality. + + Examples + -------- + This example shows comparing two DataFrames that are equal + but with columns of differing dtypes. + + >>> from pandas.testing import assert_frame_equal + >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) + >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]}) + + df1 equals itself. + + >>> assert_frame_equal(df1, df1) + + df1 differs from df2 as column 'b' is of a different type. + + >>> assert_frame_equal(df1, df2) + Traceback (most recent call last): + ... + AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different + + Attribute "dtype" are different + [left]: int64 + [right]: float64 + + Ignore differing dtypes in columns with check_dtype. + + >>> assert_frame_equal(df1, df2, check_dtype=False) + """ + __tracebackhide__ = True + + # instance validation + _check_isinstance(left, right, DataFrame) + + if check_frame_type: + assert isinstance(left, type(right)) + # assert_class_equal(left, right, obj=obj) + + # shape comparison + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}" + ) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + # index comparison + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=rtol, + atol=atol, + obj=f"{obj}.index", + ) + + # column comparison + assert_index_equal( + left.columns, + right.columns, + exact=check_column_type, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=rtol, + atol=atol, + obj=f"{obj}.columns", + ) + + if check_like: + left = left.reindex_like(right) + + # compare by blocks + if by_blocks: + rblocks = right._to_dict_of_blocks(copy=False) + lblocks = left._to_dict_of_blocks(copy=False) + for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): + assert dtype in lblocks + assert dtype in rblocks + assert_frame_equal( + lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj + ) + + # compare by columns + else: + for i, col in enumerate(left.columns): + # We have already checked that columns match, so we can do + # fast location-based lookups + lcol = left._ixs(i, axis=1) + rcol = right._ixs(i, axis=1) + + # GH #38183 + # use check_index=False, because we do not want to run + # assert_index_equal for each column, + # as we already checked it for the whole dataframe before. + assert_series_equal( + lcol, + rcol, + check_dtype=check_dtype, + check_index_type=check_index_type, + check_exact=check_exact, + check_names=check_names, + check_datetimelike_compat=check_datetimelike_compat, + check_categorical=check_categorical, + check_freq=check_freq, + obj=f'{obj}.iloc[:, {i}] (column name="{col}")', + rtol=rtol, + atol=atol, + check_index=False, + check_flags=False, + ) + + +def assert_equal(left, right, **kwargs) -> None: + """ + Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. + + Parameters + ---------- + left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray + The two items to be compared. + **kwargs + All keyword arguments are passed through to the underlying assert method. + """ + __tracebackhide__ = True + + if isinstance(left, Index): + assert_index_equal(left, right, **kwargs) + if isinstance(left, (DatetimeIndex, TimedeltaIndex)): + assert left.freq == right.freq, (left.freq, right.freq) + elif isinstance(left, Series): + assert_series_equal(left, right, **kwargs) + elif isinstance(left, DataFrame): + assert_frame_equal(left, right, **kwargs) + elif isinstance(left, IntervalArray): + assert_interval_array_equal(left, right, **kwargs) + elif isinstance(left, PeriodArray): + assert_period_array_equal(left, right, **kwargs) + elif isinstance(left, DatetimeArray): + assert_datetime_array_equal(left, right, **kwargs) + elif isinstance(left, TimedeltaArray): + assert_timedelta_array_equal(left, right, **kwargs) + elif isinstance(left, ExtensionArray): + assert_extension_array_equal(left, right, **kwargs) + elif isinstance(left, np.ndarray): + assert_numpy_array_equal(left, right, **kwargs) + elif isinstance(left, str): + assert kwargs == {} + assert left == right + else: + assert kwargs == {} + assert_almost_equal(left, right) + + +def assert_sp_array_equal(left, right) -> None: + """ + Check that the left and right SparseArray are equal. + + Parameters + ---------- + left : SparseArray + right : SparseArray + """ + _check_isinstance(left, right, pd.arrays.SparseArray) + + assert_numpy_array_equal(left.sp_values, right.sp_values) + + # SparseIndex comparison + assert isinstance(left.sp_index, SparseIndex) + assert isinstance(right.sp_index, SparseIndex) + + left_index = left.sp_index + right_index = right.sp_index + + if not left_index.equals(right_index): + raise_assert_detail( + "SparseArray.index", "index are not equal", left_index, right_index + ) + else: + # Just ensure a + pass + + assert_attr_equal("fill_value", left, right) + assert_attr_equal("dtype", left, right) + assert_numpy_array_equal(left.to_dense(), right.to_dense()) + + +def assert_contains_all(iterable, dic) -> None: + for k in iterable: + assert k in dic, f"Did not contain item: {repr(k)}" + + +def assert_copy(iter1, iter2, **eql_kwargs) -> None: + """ + iter1, iter2: iterables that produce elements + comparable with assert_almost_equal + + Checks that the elements are equal, but not + the same object. (Does not check that items + in sequences are also not the same object) + """ + for elem1, elem2 in zip(iter1, iter2): + assert_almost_equal(elem1, elem2, **eql_kwargs) + msg = ( + f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be " + "different objects, but they were the same object." + ) + assert elem1 is not elem2, msg + + +def is_extension_array_dtype_and_needs_i8_conversion( + left_dtype: DtypeObj, right_dtype: DtypeObj +) -> bool: + """ + Checks that we have the combination of an ExtensionArraydtype and + a dtype that should be converted to int64 + + Returns + ------- + bool + + Related to issue #37609 + """ + return isinstance(left_dtype, ExtensionDtype) and needs_i8_conversion(right_dtype) + + +def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None: + """ + Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable, + ser[l_slc]. + """ + expected = ser.iloc[i_slc] + + assert_series_equal(ser.loc[l_slc], expected) + + if not is_integer_dtype(ser.index): + # For integer indices, .loc and plain getitem are position-based. + assert_series_equal(ser[l_slc], expected) + + +def assert_metadata_equivalent( + left: DataFrame | Series, right: DataFrame | Series | None = None +) -> None: + """ + Check that ._metadata attributes are equivalent. + """ + for attr in left._metadata: + val = getattr(left, attr, None) + if right is None: + assert val is None + else: + assert val == getattr(right, attr, None) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/compat.py new file mode 100644 index 00000000..cc352ba7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/compat.py @@ -0,0 +1,29 @@ +""" +Helpers for sharing tests between DataFrame/Series +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas import DataFrame + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def get_dtype(obj) -> DtypeObj: + if isinstance(obj, DataFrame): + # Note: we are assuming only one column + return obj.dtypes.iat[0] + else: + return obj.dtype + + +def get_obj(df: DataFrame, klass): + """ + For sharing tests using frame_or_series, either return the DataFrame + unchanged or return it's first column as a Series. + """ + if klass is DataFrame: + return df + return df._ixs(0, axis=1) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/contexts.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/contexts.py new file mode 100644 index 00000000..b2bb8e71 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_testing/contexts.py @@ -0,0 +1,216 @@ +from __future__ import annotations + +from contextlib import contextmanager +import os +from pathlib import Path +import tempfile +from typing import ( + IO, + TYPE_CHECKING, + Any, +) +import uuid + +from pandas.compat import PYPY +from pandas.errors import ChainedAssignmentError + +from pandas import set_option + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import Generator + + from pandas._typing import ( + BaseBuffer, + CompressionOptions, + FilePath, + ) + + +@contextmanager +def decompress_file( + path: FilePath | BaseBuffer, compression: CompressionOptions +) -> Generator[IO[bytes], None, None]: + """ + Open a compressed file and return a file object. + + Parameters + ---------- + path : str + The path where the file is read from. + + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None} + Name of the decompression to use + + Returns + ------- + file object + """ + with get_handle(path, "rb", compression=compression, is_text=False) as handle: + yield handle.handle + + +@contextmanager +def set_timezone(tz: str) -> Generator[None, None, None]: + """ + Context manager for temporarily setting a timezone. + + Parameters + ---------- + tz : str + A string representing a valid timezone. + + Examples + -------- + >>> from datetime import datetime + >>> from dateutil.tz import tzlocal + >>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP + 'IST' + + >>> with set_timezone('US/Eastern'): + ... tzlocal().tzname(datetime(2021, 1, 1)) + ... + 'EST' + """ + import time + + def setTZ(tz) -> None: + if tz is None: + try: + del os.environ["TZ"] + except KeyError: + pass + else: + os.environ["TZ"] = tz + time.tzset() + + orig_tz = os.environ.get("TZ") + setTZ(tz) + try: + yield + finally: + setTZ(orig_tz) + + +@contextmanager +def ensure_clean( + filename=None, return_filelike: bool = False, **kwargs: Any +) -> Generator[Any, None, None]: + """ + Gets a temporary path and agrees to remove on close. + + This implementation does not use tempfile.mkstemp to avoid having a file handle. + If the code using the returned path wants to delete the file itself, windows + requires that no program has a file handle to it. + + Parameters + ---------- + filename : str (optional) + suffix of the created file. + return_filelike : bool (default False) + if True, returns a file-like which is *always* cleaned. Necessary for + savefig and other functions which want to append extensions. + **kwargs + Additional keywords are passed to open(). + + """ + folder = Path(tempfile.gettempdir()) + + if filename is None: + filename = "" + filename = str(uuid.uuid4()) + filename + path = folder / filename + + path.touch() + + handle_or_str: str | IO = str(path) + encoding = kwargs.pop("encoding", None) + if return_filelike: + kwargs.setdefault("mode", "w+b") + if encoding is None and "b" not in kwargs["mode"]: + encoding = "utf-8" + handle_or_str = open(path, encoding=encoding, **kwargs) + + try: + yield handle_or_str + finally: + if not isinstance(handle_or_str, str): + handle_or_str.close() + if path.is_file(): + path.unlink() + + +@contextmanager +def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]: + """ + Context manager to temporarily register a CSV dialect for parsing CSV. + + Parameters + ---------- + name : str + The name of the dialect. + kwargs : mapping + The parameters for the dialect. + + Raises + ------ + ValueError : the name of the dialect conflicts with a builtin one. + + See Also + -------- + csv : Python's CSV library. + """ + import csv + + _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} + + if name in _BUILTIN_DIALECTS: + raise ValueError("Cannot override builtin dialect.") + + csv.register_dialect(name, **kwargs) + try: + yield + finally: + csv.unregister_dialect(name) + + +@contextmanager +def use_numexpr(use, min_elements=None) -> Generator[None, None, None]: + from pandas.core.computation import expressions as expr + + if min_elements is None: + min_elements = expr._MIN_ELEMENTS + + olduse = expr.USE_NUMEXPR + oldmin = expr._MIN_ELEMENTS + set_option("compute.use_numexpr", use) + expr._MIN_ELEMENTS = min_elements + try: + yield + finally: + expr._MIN_ELEMENTS = oldmin + set_option("compute.use_numexpr", olduse) + + +def raises_chained_assignment_error(extra_warnings=(), extra_match=()): + from pandas._testing import assert_produces_warning + + if PYPY and not extra_warnings: + from contextlib import nullcontext + + return nullcontext() + elif PYPY and extra_warnings: + return assert_produces_warning( + extra_warnings, + match="|".join(extra_match), + ) + else: + match = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment" + ) + return assert_produces_warning( + (ChainedAssignmentError, *extra_warnings), + match="|".join((match, *extra_match)), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_typing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_typing.py new file mode 100644 index 00000000..743815b9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_typing.py @@ -0,0 +1,476 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, +) +from datetime import ( + date, + datetime, + timedelta, + tzinfo, +) +from os import PathLike +import sys +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Protocol, + Type as type_t, + TypeVar, + Union, +) + +import numpy as np + +# To prevent import cycles place any internal imports in the branch below +# and use a string literal forward reference to it in subsequent types +# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles +if TYPE_CHECKING: + import numpy.typing as npt + + from pandas._libs import ( + NaTType, + Period, + Timedelta, + Timestamp, + ) + from pandas._libs.tslibs import BaseOffset + + from pandas.core.dtypes.dtypes import ExtensionDtype + + from pandas import Interval + from pandas.arrays import ( + DatetimeArray, + TimedeltaArray, + ) + from pandas.core.arrays.base import ExtensionArray + from pandas.core.frame import DataFrame + from pandas.core.generic import NDFrame + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + GroupBy, + SeriesGroupBy, + ) + from pandas.core.indexes.base import Index + from pandas.core.internals import ( + ArrayManager, + BlockManager, + SingleArrayManager, + SingleBlockManager, + ) + from pandas.core.resample import Resampler + from pandas.core.series import Series + from pandas.core.window.rolling import BaseWindow + + from pandas.io.formats.format import EngFormatter + from pandas.tseries.holiday import AbstractHolidayCalendar + + ScalarLike_co = Union[ + int, + float, + complex, + str, + bytes, + np.generic, + ] + + # numpy compatible types + NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike] + # Name "npt._ArrayLikeInt_co" is not defined [name-defined] + NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined] + + if sys.version_info >= (3, 10): + from typing import TypeGuard # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import TypeGuard # pyright: ignore[reportUnusedImport] + + if sys.version_info >= (3, 11): + from typing import Self # pyright: ignore[reportUnusedImport] + else: + from typing_extensions import Self # pyright: ignore[reportUnusedImport] +else: + npt: Any = None + Self: Any = None + TypeGuard: Any = None + +HashableT = TypeVar("HashableT", bound=Hashable) + +# array-like + +ArrayLike = Union["ExtensionArray", np.ndarray] +AnyArrayLike = Union[ArrayLike, "Index", "Series"] +TimeArrayLike = Union["DatetimeArray", "TimedeltaArray"] + +# list-like + +# Cannot use `Sequence` because a string is a sequence, and we don't want to +# accept that. Could refine if https://github.com/python/typing/issues/256 is +# resolved to differentiate between Sequence[str] and str +ListLike = Union[AnyArrayLike, list, range] + +# scalars + +PythonScalar = Union[str, float, bool] +DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"] +PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] +Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, date] +IntStrT = TypeVar("IntStrT", int, str) + + +# timestamp and timedelta convertible types + +TimestampConvertibleTypes = Union[ + "Timestamp", date, np.datetime64, np.int64, float, str +] +TimestampNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +TimedeltaConvertibleTypes = Union[ + "Timedelta", timedelta, np.timedelta64, np.int64, float, str +] +Timezone = Union[str, tzinfo] + +ToTimestampHow = Literal["s", "e", "start", "end"] + +# NDFrameT is stricter and ensures that the same subclass of NDFrame always is +# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a +# Series is passed into a function, a Series is always returned and if a DataFrame is +# passed in, a DataFrame is always returned. +NDFrameT = TypeVar("NDFrameT", bound="NDFrame") + +NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index") + +AxisInt = int +Axis = Union[AxisInt, Literal["index", "columns", "rows"]] +IndexLabel = Union[Hashable, Sequence[Hashable]] +Level = Hashable +Shape = tuple[int, ...] +Suffixes = tuple[Optional[str], Optional[str]] +Ordered = Optional[bool] +JSONSerializable = Optional[Union[PythonScalar, list, dict]] +Frequency = Union[str, "BaseOffset"] +Axes = ListLike + +RandomState = Union[ + int, + np.ndarray, + np.random.Generator, + np.random.BitGenerator, + np.random.RandomState, +] + +# dtypes +NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]] +Dtype = Union["ExtensionDtype", NpDtype] +AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"] +# DtypeArg specifies all allowable dtypes in a functions its dtype argument +DtypeArg = Union[Dtype, dict[Hashable, Dtype]] +DtypeObj = Union[np.dtype, "ExtensionDtype"] + +# converters +ConvertersArg = dict[Hashable, Callable[[Dtype], Dtype]] + +# parse_dates +ParseDatesArg = Union[ + bool, list[Hashable], list[list[Hashable]], dict[Hashable, list[Hashable]] +] + +# For functions like rename that convert one label to another +Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]] + +# to maintain type information across generic functions and parametrization +T = TypeVar("T") + +# used in decorators to preserve the signature of the function it decorates +# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators +FuncType = Callable[..., Any] +F = TypeVar("F", bound=FuncType) + +# types of vectorized key functions for DataFrame::sort_values and +# DataFrame::sort_index, among others +ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]] +IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] + +# types of `func` kwarg for DataFrame.aggregate and Series.aggregate +AggFuncTypeBase = Union[Callable, str] +AggFuncTypeDict = dict[Hashable, Union[AggFuncTypeBase, list[AggFuncTypeBase]]] +AggFuncType = Union[ + AggFuncTypeBase, + list[AggFuncTypeBase], + AggFuncTypeDict, +] +AggObjType = Union[ + "Series", + "DataFrame", + "GroupBy", + "SeriesGroupBy", + "DataFrameGroupBy", + "BaseWindow", + "Resampler", +] + +PythonFuncType = Callable[[Any], Any] + +# filenames and file-like-objects +AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True) +AnyStr_contra = TypeVar("AnyStr_contra", str, bytes, contravariant=True) + + +class BaseBuffer(Protocol): + @property + def mode(self) -> str: + # for _get_filepath_or_buffer + ... + + def seek(self, __offset: int, __whence: int = ...) -> int: + # with one argument: gzip.GzipFile, bz2.BZ2File + # with two arguments: zip.ZipFile, read_sas + ... + + def seekable(self) -> bool: + # for bz2.BZ2File + ... + + def tell(self) -> int: + # for zip.ZipFile, read_stata, to_stata + ... + + +class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): + def read(self, __n: int = ...) -> AnyStr_co: + # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File + ... + + +class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): + def write(self, __b: AnyStr_contra) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + def flush(self) -> Any: + # for gzip.GzipFile, bz2.BZ2File + ... + + +class ReadPickleBuffer(ReadBuffer[bytes], Protocol): + def readline(self) -> bytes: + ... + + +class WriteExcelBuffer(WriteBuffer[bytes], Protocol): + def truncate(self, size: int | None = ...) -> int: + ... + + +class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol): + def __iter__(self) -> Iterator[AnyStr_co]: + # for engine=python + ... + + def fileno(self) -> int: + # for _MMapWrapper + ... + + def readline(self) -> AnyStr_co: + # for engine=python + ... + + @property + def closed(self) -> bool: + # for enine=pyarrow + ... + + +FilePath = Union[str, "PathLike[str]"] + +# for arbitrary kwargs passed during reading/writing files +StorageOptions = Optional[dict[str, Any]] + + +# compression keywords and compression +CompressionDict = dict[str, Any] +CompressionOptions = Optional[ + Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] +] + +# types in DataFrameFormatter +FormattersType = Union[ + list[Callable], tuple[Callable, ...], Mapping[Union[str, int], Callable] +] +ColspaceType = Mapping[Hashable, Union[str, int]] +FloatFormatType = Union[str, Callable, "EngFormatter"] +ColspaceArgType = Union[ + str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]] +] + +# Arguments for fillna() +FillnaOptions = Literal["backfill", "bfill", "ffill", "pad"] +InterpolateOptions = Literal[ + "linear", + "time", + "index", + "values", + "nearest", + "zero", + "slinear", + "quadratic", + "cubic", + "barycentric", + "polynomial", + "krogh", + "piecewise_polynomial", + "spline", + "pchip", + "akima", + "cubicspline", + "from_derivatives", +] + +# internals +Manager = Union[ + "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" +] +SingleManager = Union["SingleArrayManager", "SingleBlockManager"] +Manager2D = Union["ArrayManager", "BlockManager"] + +# indexing +# PositionalIndexer -> valid 1D positional indexer, e.g. can pass +# to ndarray.__getitem__ +# ScalarIndexer is for a single value as the index +# SequenceIndexer is for list like or slices (but not tuples) +# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays +# These are used in various __getitem__ overloads +# TODO(typing#684): add Ellipsis, see +# https://github.com/python/typing/issues/684#issuecomment-548203158 +# https://bugs.python.org/issue41810 +# Using List[int] here rather than Sequence[int] to disallow tuples. +ScalarIndexer = Union[int, np.integer] +SequenceIndexer = Union[slice, list[int], np.ndarray] +PositionalIndexer = Union[ScalarIndexer, SequenceIndexer] +PositionalIndexerTuple = tuple[PositionalIndexer, PositionalIndexer] +PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple] +if TYPE_CHECKING: + TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]] +else: + TakeIndexer = Any + +# Shared by functions such as drop and astype +IgnoreRaise = Literal["ignore", "raise"] + +# Windowing rank methods +WindowingRankType = Literal["average", "min", "max"] + +# read_csv engines +CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"] + +# read_json engines +JSONEngine = Literal["ujson", "pyarrow"] + +# read_xml parsers +XMLParsers = Literal["lxml", "etree"] + +# Interval closed type +IntervalLeftRight = Literal["left", "right"] +IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] + +# datetime and NaTType +DatetimeNaTType = Union[datetime, "NaTType"] +DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] + +# sort_index +SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] +NaPosition = Literal["first", "last"] + +# Arguments for nsmalles and n_largest +NsmallestNlargestKeep = Literal["first", "last", "all"] + +# quantile interpolation +QuantileInterpolation = Literal["linear", "lower", "higher", "midpoint", "nearest"] + +# plotting +PlottingOrientation = Literal["horizontal", "vertical"] + +# dropna +AnyAll = Literal["any", "all"] + +# merge +MergeHow = Literal["left", "right", "inner", "outer", "cross"] +MergeValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# join +JoinHow = Literal["left", "right", "inner", "outer"] +JoinValidate = Literal[ + "one_to_one", + "1:1", + "one_to_many", + "1:m", + "many_to_one", + "m:1", + "many_to_many", + "m:m", +] + +# reindex +ReindexMethod = Union[FillnaOptions, Literal["nearest"]] + +MatplotlibColor = Union[str, Sequence[float]] +TimeGrouperOrigin = Union[ + "Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"] +] +TimeAmbiguous = Union[Literal["infer", "NaT", "raise"], "npt.NDArray[np.bool_]"] +TimeNonexistent = Union[ + Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta +] +DropKeep = Literal["first", "last", False] +CorrelationMethod = Union[ + Literal["pearson", "kendall", "spearman"], Callable[[np.ndarray, np.ndarray], float] +] +AlignJoin = Literal["outer", "inner", "left", "right"] +DtypeBackend = Literal["pyarrow", "numpy_nullable"] + +TimeUnit = Literal["s", "ms", "us", "ns"] +OpenFileErrors = Literal[ + "strict", + "ignore", + "replace", + "surrogateescape", + "xmlcharrefreplace", + "backslashreplace", + "namereplace", +] + +# update +UpdateJoin = Literal["left"] + +# applymap +NaAction = Literal["ignore"] + +# from_dict +FromDictOrient = Literal["columns", "index", "tight"] + +# to_gbc +ToGbqIfexist = Literal["fail", "replace", "append"] + +# to_stata +ToStataByteorder = Literal[">", "<", "little", "big"] + +# ExcelWriter +ExcelWriterIfSheetExists = Literal["error", "new", "replace", "overlay"] + +# Offsets +OffsetCalendar = Union[np.busdaycalendar, "AbstractHolidayCalendar"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_version.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_version.py new file mode 100644 index 00000000..5d610b5e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_version.py @@ -0,0 +1,692 @@ +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. +# Generated by versioneer-0.28 +# https://github.com/python-versioneer/python-versioneer + +"""Git implementation of _version.py.""" + +import errno +import functools +import os +import re +import subprocess +import sys +from typing import Callable + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + git_date = "$Format:%ci$" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "pandas-" + cfg.versionfile_source = "pandas/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: dict[str, str] = {} +HANDLERS: dict[str, dict[str, Callable]] = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + dispcmd = str([command] + args) + try: + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen( + [command] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + **popen_kwargs, + ) + break + except OSError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print(f"unable to run {dispcmd}") + print(e) + return None, None + else: + if verbose: + print(f"unable to find command, tried {commands}") + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print(f"unable to run {dispcmd} (error)") + print(f"stdout was {stdout}") + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print( + f"Tried directories {str(rootdirs)} \ + but none started with prefix {parentdir_prefix}" + ) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + with open(versionfile_abs, encoding="utf-8") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r"\d", r)} + if verbose: + print(f"discarding '{','.join(refs - tags)}', no digits") + if verbose: + print(f"likely tags: {','.join(sorted(tags))}") + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix) :] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r"\d", r): + continue + if verbose: + print(f"picking {r}") + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) + if rc != 0: + if verbose: + print(f"Directory {root} not under git control") + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + f"{tag_prefix}[[:digit:]]*", + ], + cwd=root, + ) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[: git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = f"unable to parse git-describe output: '{describe_out}'" + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces[ + "error" + ] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix) :] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces): + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+untagged.{pieces['distance']}.g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver): + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces): + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += f".post{post_version + 1}.dev{pieces['distance']}" + else: + rendered += f".post0.dev{pieces['distance']}" + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = f"0.post0.dev{pieces['distance']}" + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + return rendered + + +def render_pep440_post_branch(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f".post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += f"g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += f"+g{pieces['short']}" + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = f"0.post{pieces['distance']}" + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += f"-{pieces['distance']}-g{pieces['short']}" + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError(f"unknown style '{style}'") + + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split("/"): + root = os.path.dirname(root) + except NameError: + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/_version_meson.py b/dbdpy-env/lib/python3.9/site-packages/pandas/_version_meson.py new file mode 100644 index 00000000..32119b66 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/_version_meson.py @@ -0,0 +1,2 @@ +__version__="2.1.4" +__git_version__="a671b5a8bf5dd13fb19f0e88edc679bc9e15c673" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/api/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/api/__init__.py new file mode 100644 index 00000000..a0d42b65 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/api/__init__.py @@ -0,0 +1,16 @@ +""" public toolkit API """ +from pandas.api import ( + extensions, + indexers, + interchange, + types, + typing, +) + +__all__ = [ + "interchange", + "extensions", + "indexers", + "types", + "typing", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/api/extensions/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/api/extensions/__init__.py new file mode 100644 index 00000000..ea5f1ba9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/api/extensions/__init__.py @@ -0,0 +1,33 @@ +""" +Public API for extending pandas objects. +""" + +from pandas._libs.lib import no_default + +from pandas.core.dtypes.base import ( + ExtensionDtype, + register_extension_dtype, +) + +from pandas.core.accessor import ( + register_dataframe_accessor, + register_index_accessor, + register_series_accessor, +) +from pandas.core.algorithms import take +from pandas.core.arrays import ( + ExtensionArray, + ExtensionScalarOpsMixin, +) + +__all__ = [ + "no_default", + "ExtensionDtype", + "register_extension_dtype", + "register_dataframe_accessor", + "register_index_accessor", + "register_series_accessor", + "take", + "ExtensionArray", + "ExtensionScalarOpsMixin", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/api/indexers/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/api/indexers/__init__.py new file mode 100644 index 00000000..78357f11 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/api/indexers/__init__.py @@ -0,0 +1,17 @@ +""" +Public API for Rolling Window Indexers. +""" + +from pandas.core.indexers import check_array_indexer +from pandas.core.indexers.objects import ( + BaseIndexer, + FixedForwardWindowIndexer, + VariableOffsetWindowIndexer, +) + +__all__ = [ + "check_array_indexer", + "BaseIndexer", + "FixedForwardWindowIndexer", + "VariableOffsetWindowIndexer", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/api/interchange/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/api/interchange/__init__.py new file mode 100644 index 00000000..2f3a73bc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/api/interchange/__init__.py @@ -0,0 +1,8 @@ +""" +Public API for DataFrame interchange protocol. +""" + +from pandas.core.interchange.dataframe_protocol import DataFrame +from pandas.core.interchange.from_dataframe import from_dataframe + +__all__ = ["from_dataframe", "DataFrame"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/api/types/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/api/types/__init__.py new file mode 100644 index 00000000..c601086b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/api/types/__init__.py @@ -0,0 +1,23 @@ +""" +Public toolkit API. +""" + +from pandas._libs.lib import infer_dtype + +from pandas.core.dtypes.api import * # noqa: F403 +from pandas.core.dtypes.concat import union_categoricals +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) + +__all__ = [ + "infer_dtype", + "union_categoricals", + "CategoricalDtype", + "DatetimeTZDtype", + "IntervalDtype", + "PeriodDtype", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/api/typing/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/api/typing/__init__.py new file mode 100644 index 00000000..9b5d2cb0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/api/typing/__init__.py @@ -0,0 +1,55 @@ +""" +Public API classes that store intermediate results useful for type-hinting. +""" + +from pandas._libs import NaTType +from pandas._libs.missing import NAType + +from pandas.core.groupby import ( + DataFrameGroupBy, + SeriesGroupBy, +) +from pandas.core.resample import ( + DatetimeIndexResamplerGroupby, + PeriodIndexResamplerGroupby, + Resampler, + TimedeltaIndexResamplerGroupby, + TimeGrouper, +) +from pandas.core.window import ( + Expanding, + ExpandingGroupby, + ExponentialMovingWindow, + ExponentialMovingWindowGroupby, + Rolling, + RollingGroupby, + Window, +) + +# TODO: Can't import Styler without importing jinja2 +# from pandas.io.formats.style import Styler +from pandas.io.json._json import JsonReader +from pandas.io.stata import StataReader + +__all__ = [ + "DataFrameGroupBy", + "DatetimeIndexResamplerGroupby", + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "JsonReader", + "NaTType", + "NAType", + "PeriodIndexResamplerGroupby", + "Resampler", + "Rolling", + "RollingGroupby", + "SeriesGroupBy", + "StataReader", + # See TODO above + # "Styler", + "TimedeltaIndexResamplerGroupby", + "TimeGrouper", + "Window", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/arrays/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/arrays/__init__.py new file mode 100644 index 00000000..32e2afc0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/arrays/__init__.py @@ -0,0 +1,53 @@ +""" +All of pandas' ExtensionArrays. + +See :ref:`extending.extension-types` for more. +""" +from pandas.core.arrays import ( + ArrowExtensionArray, + ArrowStringArray, + BooleanArray, + Categorical, + DatetimeArray, + FloatingArray, + IntegerArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + StringArray, + TimedeltaArray, +) + +__all__ = [ + "ArrowExtensionArray", + "ArrowStringArray", + "BooleanArray", + "Categorical", + "DatetimeArray", + "FloatingArray", + "IntegerArray", + "IntervalArray", + "NumpyExtensionArray", + "PeriodArray", + "SparseArray", + "StringArray", + "TimedeltaArray", +] + + +def __getattr__(name: str): + if name == "PandasArray": + # GH#53694 + import warnings + + from pandas.util._exceptions import find_stack_level + + warnings.warn( + "PandasArray has been renamed NumpyExtensionArray. Use that " + "instead. This alias will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return NumpyExtensionArray + raise AttributeError(f"module 'pandas.arrays' has no attribute '{name}'") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/__init__.py new file mode 100644 index 00000000..68969453 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/__init__.py @@ -0,0 +1,199 @@ +""" +compat +====== + +Cross-compatible functions for different versions of Python. + +Other items: +* platform checker +""" +from __future__ import annotations + +import os +import platform +import sys +from typing import TYPE_CHECKING + +from pandas.compat._constants import ( + IS64, + ISMUSL, + PY310, + PY311, + PY312, + PYPY, +) +import pandas.compat.compressors +from pandas.compat.numpy import is_numpy_dev +from pandas.compat.pyarrow import ( + pa_version_under7p0, + pa_version_under8p0, + pa_version_under9p0, + pa_version_under11p0, + pa_version_under13p0, + pa_version_under14p0, + pa_version_under14p1, +) + +if TYPE_CHECKING: + from pandas._typing import F + + +def set_function_name(f: F, name: str, cls: type) -> F: + """ + Bind the name/qualname attributes of the function. + """ + f.__name__ = name + f.__qualname__ = f"{cls.__name__}.{name}" + f.__module__ = cls.__module__ + return f + + +def is_platform_little_endian() -> bool: + """ + Checking if the running platform is little endian. + + Returns + ------- + bool + True if the running platform is little endian. + """ + return sys.byteorder == "little" + + +def is_platform_windows() -> bool: + """ + Checking if the running platform is windows. + + Returns + ------- + bool + True if the running platform is windows. + """ + return sys.platform in ["win32", "cygwin"] + + +def is_platform_linux() -> bool: + """ + Checking if the running platform is linux. + + Returns + ------- + bool + True if the running platform is linux. + """ + return sys.platform == "linux" + + +def is_platform_mac() -> bool: + """ + Checking if the running platform is mac. + + Returns + ------- + bool + True if the running platform is mac. + """ + return sys.platform == "darwin" + + +def is_platform_arm() -> bool: + """ + Checking if the running platform use ARM architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith( + "armv" + ) + + +def is_platform_power() -> bool: + """ + Checking if the running platform use Power architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("ppc64", "ppc64le") + + +def is_ci_environment() -> bool: + """ + Checking if running in a continuous integration environment by checking + the PANDAS_CI environment variable. + + Returns + ------- + bool + True if the running in a continuous integration environment. + """ + return os.environ.get("PANDAS_CI", "0") == "1" + + +def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]: + """ + Importing the `LZMAFile` class from the `lzma` module. + + Returns + ------- + class + The `LZMAFile` class from the `lzma` module. + + Raises + ------ + RuntimeError + If the `lzma` module was not imported correctly, or didn't exist. + """ + if not pandas.compat.compressors.has_lzma: + raise RuntimeError( + "lzma module not available. " + "A Python re-install with the proper dependencies, " + "might be required to solve this issue." + ) + return pandas.compat.compressors.LZMAFile + + +def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]: + """ + Importing the `BZ2File` class from the `bz2` module. + + Returns + ------- + class + The `BZ2File` class from the `bz2` module. + + Raises + ------ + RuntimeError + If the `bz2` module was not imported correctly, or didn't exist. + """ + if not pandas.compat.compressors.has_bz2: + raise RuntimeError( + "bz2 module not available. " + "A Python re-install with the proper dependencies, " + "might be required to solve this issue." + ) + return pandas.compat.compressors.BZ2File + + +__all__ = [ + "is_numpy_dev", + "pa_version_under7p0", + "pa_version_under8p0", + "pa_version_under9p0", + "pa_version_under11p0", + "pa_version_under13p0", + "pa_version_under14p0", + "pa_version_under14p1", + "IS64", + "ISMUSL", + "PY310", + "PY311", + "PY312", + "PYPY", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_constants.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_constants.py new file mode 100644 index 00000000..7bc3fbaa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_constants.py @@ -0,0 +1,30 @@ +""" +_constants +====== + +Constants relevant for the Python implementation. +""" + +from __future__ import annotations + +import platform +import sys +import sysconfig + +IS64 = sys.maxsize > 2**32 + +PY310 = sys.version_info >= (3, 10) +PY311 = sys.version_info >= (3, 11) +PY312 = sys.version_info >= (3, 12) +PYPY = platform.python_implementation() == "PyPy" +ISMUSL = "musl" in (sysconfig.get_config_var("HOST_GNU_TYPE") or "") +REF_COUNT = 2 if PY311 else 3 + +__all__ = [ + "IS64", + "ISMUSL", + "PY310", + "PY311", + "PY312", + "PYPY", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_optional.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_optional.py new file mode 100644 index 00000000..c5792fa1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/_optional.py @@ -0,0 +1,163 @@ +from __future__ import annotations + +import importlib +import sys +from typing import TYPE_CHECKING +import warnings + +from pandas.util._exceptions import find_stack_level + +from pandas.util.version import Version + +if TYPE_CHECKING: + import types + +# Update install.rst & setup.cfg when updating versions! + +VERSIONS = { + "bs4": "4.11.1", + "blosc": "1.21.0", + "bottleneck": "1.3.4", + "dataframe-api-compat": "0.1.7", + "fastparquet": "0.8.1", + "fsspec": "2022.05.0", + "html5lib": "1.1", + "hypothesis": "6.46.1", + "gcsfs": "2022.05.0", + "jinja2": "3.1.2", + "lxml.etree": "4.8.0", + "matplotlib": "3.6.1", + "numba": "0.55.2", + "numexpr": "2.8.0", + "odfpy": "1.4.1", + "openpyxl": "3.0.10", + "pandas_gbq": "0.17.5", + "psycopg2": "2.9.3", # (dt dec pq3 ext lo64) + "pymysql": "1.0.2", + "pyarrow": "7.0.0", + "pyreadstat": "1.1.5", + "pytest": "7.3.2", + "pyxlsb": "1.0.9", + "s3fs": "2022.05.0", + "scipy": "1.8.1", + "sqlalchemy": "1.4.36", + "tables": "3.7.0", + "tabulate": "0.8.10", + "xarray": "2022.03.0", + "xlrd": "2.0.1", + "xlsxwriter": "3.0.3", + "zstandard": "0.17.0", + "tzdata": "2022.1", + "qtpy": "2.2.0", + "pyqt5": "5.15.6", +} + +# A mapping from import name to package name (on PyPI) for packages where +# these two names are different. + +INSTALL_MAPPING = { + "bs4": "beautifulsoup4", + "bottleneck": "Bottleneck", + "jinja2": "Jinja2", + "lxml.etree": "lxml", + "odf": "odfpy", + "pandas_gbq": "pandas-gbq", + "sqlalchemy": "SQLAlchemy", + "tables": "pytables", +} + + +def get_version(module: types.ModuleType) -> str: + version = getattr(module, "__version__", None) + + if version is None: + raise ImportError(f"Can't determine version for {module.__name__}") + if module.__name__ == "psycopg2": + # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version + version = version.split()[0] + return version + + +def import_optional_dependency( + name: str, + extra: str = "", + errors: str = "raise", + min_version: str | None = None, +): + """ + Import an optional dependency. + + By default, if a dependency is missing an ImportError with a nice + message will be raised. If a dependency is present, but too old, + we raise. + + Parameters + ---------- + name : str + The module name. + extra : str + Additional text to include in the ImportError message. + errors : str {'raise', 'warn', 'ignore'} + What to do when a dependency is not found or its version is too old. + + * raise : Raise an ImportError + * warn : Only applicable when a module's version is to old. + Warns that the version is too old and returns None + * ignore: If the module is not installed, return None, otherwise, + return the module, even if the version is too old. + It's expected that users validate the version locally when + using ``errors="ignore"`` (see. ``io/html.py``) + min_version : str, default None + Specify a minimum version that is different from the global pandas + minimum version required. + Returns + ------- + maybe_module : Optional[ModuleType] + The imported module, when found and the version is correct. + None is returned when the package is not found and `errors` + is False, or when the package's version is too old and `errors` + is ``'warn'``. + """ + + assert errors in {"warn", "raise", "ignore"} + + package_name = INSTALL_MAPPING.get(name) + install_name = package_name if package_name is not None else name + + msg = ( + f"Missing optional dependency '{install_name}'. {extra} " + f"Use pip or conda to install {install_name}." + ) + try: + module = importlib.import_module(name) + except ImportError: + if errors == "raise": + raise ImportError(msg) + return None + + # Handle submodules: if we have submodule, grab parent module from sys.modules + parent = name.split(".")[0] + if parent != name: + install_name = parent + module_to_get = sys.modules[install_name] + else: + module_to_get = module + minimum_version = min_version if min_version is not None else VERSIONS.get(parent) + if minimum_version: + version = get_version(module_to_get) + if version and Version(version) < Version(minimum_version): + msg = ( + f"Pandas requires version '{minimum_version}' or newer of '{parent}' " + f"(version '{version}' currently installed)." + ) + if errors == "warn": + warnings.warn( + msg, + UserWarning, + stacklevel=find_stack_level(), + ) + return None + elif errors == "raise": + raise ImportError(msg) + + return module diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/compressors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/compressors.py new file mode 100644 index 00000000..1f31e34c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/compressors.py @@ -0,0 +1,77 @@ +""" +Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5. +""" + +from __future__ import annotations + +from pickle import PickleBuffer + +from pandas.compat._constants import PY310 + +try: + import bz2 + + has_bz2 = True +except ImportError: + has_bz2 = False + +try: + import lzma + + has_lzma = True +except ImportError: + has_lzma = False + + +def flatten_buffer( + b: bytes | bytearray | memoryview | PickleBuffer, +) -> bytes | bytearray | memoryview: + """ + Return some 1-D `uint8` typed buffer. + + Coerces anything that does not match that description to one that does + without copying if possible (otherwise will copy). + """ + + if isinstance(b, (bytes, bytearray)): + return b + + if not isinstance(b, PickleBuffer): + b = PickleBuffer(b) + + try: + # coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy + return b.raw() + except BufferError: + # perform in-memory copy if buffer is not contiguous + return memoryview(b).tobytes("A") + + +if has_bz2: + + class BZ2File(bz2.BZ2File): + if not PY310: + + def write(self, b) -> int: + # Workaround issue where `bz2.BZ2File` expects `len` + # to return the number of bytes in `b` by converting + # `b` into something that meets that constraint with + # minimal copying. + # + # Note: This is fixed in Python 3.10. + return super().write(flatten_buffer(b)) + + +if has_lzma: + + class LZMAFile(lzma.LZMAFile): + if not PY310: + + def write(self, b) -> int: + # Workaround issue where `lzma.LZMAFile` expects `len` + # to return the number of bytes in `b` by converting + # `b` into something that meets that constraint with + # minimal copying. + # + # Note: This is fixed in Python 3.10. + return super().write(flatten_buffer(b)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/__init__.py new file mode 100644 index 00000000..0552301b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/__init__.py @@ -0,0 +1,52 @@ +""" support numpy compatibility across versions """ +import warnings + +import numpy as np + +from pandas.util.version import Version + +# numpy versioning +_np_version = np.__version__ +_nlv = Version(_np_version) +np_version_gte1p24 = _nlv >= Version("1.24") +np_version_gte1p24p3 = _nlv >= Version("1.24.3") +np_version_gte1p25 = _nlv >= Version("1.25") +np_version_gt2 = _nlv >= Version("2.0.0.dev0") +is_numpy_dev = _nlv.dev is not None +_min_numpy_ver = "1.22.4" + + +if _nlv < Version(_min_numpy_ver): + raise ImportError( + f"this version of pandas is incompatible with numpy < {_min_numpy_ver}\n" + f"your numpy version is {_np_version}.\n" + f"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version" + ) + + +np_long: type +np_ulong: type + +if np_version_gt2: + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + r".*In the future `np\.long` will be defined as.*", + FutureWarning, + ) + np_long = np.long # type: ignore[attr-defined] + np_ulong = np.ulong # type: ignore[attr-defined] + except AttributeError: + np_long = np.int_ + np_ulong = np.uint +else: + np_long = np.int_ + np_ulong = np.uint + + +__all__ = [ + "np", + "_np_version", + "is_numpy_dev", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/function.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/function.py new file mode 100644 index 00000000..a36e25a9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/numpy/function.py @@ -0,0 +1,416 @@ +""" +For compatibility with numpy libraries, pandas functions or methods have to +accept '*args' and '**kwargs' parameters to accommodate numpy arguments that +are not actually used or respected in the pandas implementation. + +To ensure that users do not abuse these parameters, validation is performed in +'validators.py' to make sure that any extra parameters passed correspond ONLY +to those in the numpy signature. Part of that validation includes whether or +not the user attempted to pass in non-default values for these extraneous +parameters. As we want to discourage users from relying on these parameters +when calling the pandas implementation, we want them only to pass in the +default values for these parameters. + +This module provides a set of commonly used default arguments for functions and +methods that are spread throughout the codebase. This module will make it +easier to adjust to future upstream changes in the analogous numpy signatures. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + TypeVar, + cast, + overload, +) + +import numpy as np +from numpy import ndarray + +from pandas._libs.lib import ( + is_bool, + is_integer, +) +from pandas.errors import UnsupportedFunctionCall +from pandas.util._validators import ( + validate_args, + validate_args_and_kwargs, + validate_kwargs, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Axis, + AxisInt, + ) + + AxisNoneT = TypeVar("AxisNoneT", Axis, None) + + +class CompatValidator: + def __init__( + self, + defaults, + fname=None, + method: str | None = None, + max_fname_arg_count=None, + ) -> None: + self.fname = fname + self.method = method + self.defaults = defaults + self.max_fname_arg_count = max_fname_arg_count + + def __call__( + self, + args, + kwargs, + fname=None, + max_fname_arg_count=None, + method: str | None = None, + ) -> None: + if not args and not kwargs: + return None + + fname = self.fname if fname is None else fname + max_fname_arg_count = ( + self.max_fname_arg_count + if max_fname_arg_count is None + else max_fname_arg_count + ) + method = self.method if method is None else method + + if method == "args": + validate_args(fname, args, max_fname_arg_count, self.defaults) + elif method == "kwargs": + validate_kwargs(fname, kwargs, self.defaults) + elif method == "both": + validate_args_and_kwargs( + fname, args, kwargs, max_fname_arg_count, self.defaults + ) + else: + raise ValueError(f"invalid validation method '{method}'") + + +ARGMINMAX_DEFAULTS = {"out": None} +validate_argmin = CompatValidator( + ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1 +) +validate_argmax = CompatValidator( + ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1 +) + + +def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]: + if isinstance(skipna, ndarray) or skipna is None: + args = (skipna,) + args + skipna = True + + return skipna, args + + +def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: + """ + If 'Series.argmin' is called via the 'numpy' library, the third parameter + in its signature is 'out', which takes either an ndarray or 'None', so + check if the 'skipna' parameter is either an instance of ndarray or is + None, since 'skipna' itself should be a boolean + """ + skipna, args = process_skipna(skipna, args) + validate_argmin(args, kwargs) + return skipna + + +def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: + """ + If 'Series.argmax' is called via the 'numpy' library, the third parameter + in its signature is 'out', which takes either an ndarray or 'None', so + check if the 'skipna' parameter is either an instance of ndarray or is + None, since 'skipna' itself should be a boolean + """ + skipna, args = process_skipna(skipna, args) + validate_argmax(args, kwargs) + return skipna + + +ARGSORT_DEFAULTS: dict[str, int | str | None] = {} +ARGSORT_DEFAULTS["axis"] = -1 +ARGSORT_DEFAULTS["kind"] = "quicksort" +ARGSORT_DEFAULTS["order"] = None +ARGSORT_DEFAULTS["kind"] = None + + +validate_argsort = CompatValidator( + ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both" +) + +# two different signatures of argsort, this second validation for when the +# `kind` param is supported +ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {} +ARGSORT_DEFAULTS_KIND["axis"] = -1 +ARGSORT_DEFAULTS_KIND["order"] = None +validate_argsort_kind = CompatValidator( + ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both" +) + + +def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool: + """ + If 'Categorical.argsort' is called via the 'numpy' library, the first + parameter in its signature is 'axis', which takes either an integer or + 'None', so check if the 'ascending' parameter has either integer type or is + None, since 'ascending' itself should be a boolean + """ + if is_integer(ascending) or ascending is None: + args = (ascending,) + args + ascending = True + + validate_argsort_kind(args, kwargs, max_fname_arg_count=3) + ascending = cast(bool, ascending) + return ascending + + +CLIP_DEFAULTS: dict[str, Any] = {"out": None} +validate_clip = CompatValidator( + CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3 +) + + +@overload +def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: + ... + + +@overload +def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: + ... + + +def validate_clip_with_axis( + axis: ndarray | AxisNoneT, args, kwargs +) -> AxisNoneT | None: + """ + If 'NDFrame.clip' is called via the numpy library, the third parameter in + its signature is 'out', which can takes an ndarray, so check if the 'axis' + parameter is an instance of ndarray, since 'axis' itself should either be + an integer or None + """ + if isinstance(axis, ndarray): + args = (axis,) + args + # error: Incompatible types in assignment (expression has type "None", + # variable has type "Union[ndarray[Any, Any], str, int]") + axis = None # type: ignore[assignment] + + validate_clip(args, kwargs) + # error: Incompatible return value type (got "Union[ndarray[Any, Any], + # str, int]", expected "Union[str, int, None]") + return axis # type: ignore[return-value] + + +CUM_FUNC_DEFAULTS: dict[str, Any] = {} +CUM_FUNC_DEFAULTS["dtype"] = None +CUM_FUNC_DEFAULTS["out"] = None +validate_cum_func = CompatValidator( + CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1 +) +validate_cumsum = CompatValidator( + CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1 +) + + +def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool: + """ + If this function is called via the 'numpy' library, the third parameter in + its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so + check if the 'skipna' parameter is a boolean or not + """ + if not is_bool(skipna): + args = (skipna,) + args + skipna = True + elif isinstance(skipna, np.bool_): + skipna = bool(skipna) + + validate_cum_func(args, kwargs, fname=name) + return skipna + + +ALLANY_DEFAULTS: dict[str, bool | None] = {} +ALLANY_DEFAULTS["dtype"] = None +ALLANY_DEFAULTS["out"] = None +ALLANY_DEFAULTS["keepdims"] = False +ALLANY_DEFAULTS["axis"] = None +validate_all = CompatValidator( + ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1 +) +validate_any = CompatValidator( + ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1 +) + +LOGICAL_FUNC_DEFAULTS = {"out": None, "keepdims": False} +validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs") + +MINMAX_DEFAULTS = {"axis": None, "dtype": None, "out": None, "keepdims": False} +validate_min = CompatValidator( + MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1 +) +validate_max = CompatValidator( + MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1 +) + +RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"} +validate_reshape = CompatValidator( + RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1 +) + +REPEAT_DEFAULTS: dict[str, Any] = {"axis": None} +validate_repeat = CompatValidator( + REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1 +) + +ROUND_DEFAULTS: dict[str, Any] = {"out": None} +validate_round = CompatValidator( + ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1 +) + +SORT_DEFAULTS: dict[str, int | str | None] = {} +SORT_DEFAULTS["axis"] = -1 +SORT_DEFAULTS["kind"] = "quicksort" +SORT_DEFAULTS["order"] = None +validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs") + +STAT_FUNC_DEFAULTS: dict[str, Any | None] = {} +STAT_FUNC_DEFAULTS["dtype"] = None +STAT_FUNC_DEFAULTS["out"] = None + +SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy() +SUM_DEFAULTS["axis"] = None +SUM_DEFAULTS["keepdims"] = False +SUM_DEFAULTS["initial"] = None + +PROD_DEFAULTS = SUM_DEFAULTS.copy() + +MEAN_DEFAULTS = SUM_DEFAULTS.copy() + +MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy() +MEDIAN_DEFAULTS["overwrite_input"] = False +MEDIAN_DEFAULTS["keepdims"] = False + +STAT_FUNC_DEFAULTS["keepdims"] = False + +validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs") +validate_sum = CompatValidator( + SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1 +) +validate_prod = CompatValidator( + PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1 +) +validate_mean = CompatValidator( + MEAN_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1 +) +validate_median = CompatValidator( + MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1 +) + +STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {} +STAT_DDOF_FUNC_DEFAULTS["dtype"] = None +STAT_DDOF_FUNC_DEFAULTS["out"] = None +STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False +validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs") + +TAKE_DEFAULTS: dict[str, str | None] = {} +TAKE_DEFAULTS["out"] = None +TAKE_DEFAULTS["mode"] = "raise" +validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs") + + +def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool: + """ + If this function is called via the 'numpy' library, the third parameter in + its signature is 'axis', which takes either an ndarray or 'None', so check + if the 'convert' parameter is either an instance of ndarray or is None + """ + if isinstance(convert, ndarray) or convert is None: + args = (convert,) + args + convert = True + + validate_take(args, kwargs, max_fname_arg_count=3, method="both") + return convert + + +TRANSPOSE_DEFAULTS = {"axes": None} +validate_transpose = CompatValidator( + TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0 +) + + +def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None: + """ + 'args' and 'kwargs' should be empty, except for allowed kwargs because all + of their necessary parameters are explicitly listed in the function + signature + """ + if allowed is None: + allowed = [] + + kwargs = set(kwargs) - set(allowed) + + if len(args) + len(kwargs) > 0: + raise UnsupportedFunctionCall( + "numpy operations are not valid with groupby. " + f"Use .groupby(...).{name}() instead" + ) + + +RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var") + + +def validate_resampler_func(method: str, args, kwargs) -> None: + """ + 'args' and 'kwargs' should be empty because all of their necessary + parameters are explicitly listed in the function signature + """ + if len(args) + len(kwargs) > 0: + if method in RESAMPLER_NUMPY_OPS: + raise UnsupportedFunctionCall( + "numpy operations are not valid with resample. " + f"Use .resample(...).{method}() instead" + ) + raise TypeError("too many arguments passed in") + + +def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None: + """ + Ensure that the axis argument passed to min, max, argmin, or argmax is zero + or None, as otherwise it will be incorrectly ignored. + + Parameters + ---------- + axis : int or None + ndim : int, default 1 + + Raises + ------ + ValueError + """ + if axis is None: + return + if axis >= ndim or (axis < 0 and ndim + axis < 0): + raise ValueError(f"`axis` must be fewer than the number of dimensions ({ndim})") + + +_validation_funcs = { + "median": validate_median, + "mean": validate_mean, + "min": validate_min, + "max": validate_max, + "sum": validate_sum, + "prod": validate_prod, +} + + +def validate_func(fname, args, kwargs) -> None: + if fname not in _validation_funcs: + return validate_stat_func(args, kwargs, fname=fname) + + validation_func = _validation_funcs[fname] + return validation_func(args, kwargs) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pickle_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pickle_compat.py new file mode 100644 index 00000000..e3e8c03e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pickle_compat.py @@ -0,0 +1,262 @@ +""" +Support pre-0.12 series pickle compatibility. +""" +from __future__ import annotations + +import contextlib +import copy +import io +import pickle as pkl +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import BaseOffset + +from pandas import Index +from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.internals import BlockManager + +if TYPE_CHECKING: + from collections.abc import Generator + + +def load_reduce(self): + stack = self.stack + args = stack.pop() + func = stack[-1] + + try: + stack[-1] = func(*args) + return + except TypeError as err: + # If we have a deprecated function, + # try to replace and try again. + + msg = "_reconstruct: First argument must be a sub-type of ndarray" + + if msg in str(err): + try: + cls = args[0] + stack[-1] = object.__new__(cls) + return + except TypeError: + pass + elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset): + # TypeError: object.__new__(Day) is not safe, use Day.__new__() + cls = args[0] + stack[-1] = cls.__new__(*args) + return + elif args and issubclass(args[0], PeriodArray): + cls = args[0] + stack[-1] = NDArrayBacked.__new__(*args) + return + + raise + + +# If classes are moved, provide compat here. +_class_locations_map = { + ("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"), + # 15477 + ("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"), + # Re-routing unpickle block logic to go through _unpickle_block instead + # for pandas <= 1.3.5 + ("pandas.core.internals.blocks", "new_block"): ( + "pandas._libs.internals", + "_unpickle_block", + ), + ("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"), + ("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"), + # 10890 + ("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"), + ("pandas.sparse.series", "SparseTimeSeries"): ( + "pandas.core.sparse.series", + "SparseSeries", + ), + # 12588, extensions moving + ("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"), + ("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"), + # 18543 moving period + ("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"), + ("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"), + # 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype + ("pandas.tslib", "__nat_unpickle"): ( + "pandas._libs.tslibs.nattype", + "__nat_unpickle", + ), + ("pandas._libs.tslib", "__nat_unpickle"): ( + "pandas._libs.tslibs.nattype", + "__nat_unpickle", + ), + # 15998 top-level dirs moving + ("pandas.sparse.array", "SparseArray"): ( + "pandas.core.arrays.sparse", + "SparseArray", + ), + ("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"), + ("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"), + ("pandas.indexes.numeric", "Int64Index"): ( + "pandas.core.indexes.base", + "Index", # updated in 50775 + ), + ("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"), + ("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"), + ("pandas.tseries.index", "_new_DatetimeIndex"): ( + "pandas.core.indexes.datetimes", + "_new_DatetimeIndex", + ), + ("pandas.tseries.index", "DatetimeIndex"): ( + "pandas.core.indexes.datetimes", + "DatetimeIndex", + ), + ("pandas.tseries.period", "PeriodIndex"): ( + "pandas.core.indexes.period", + "PeriodIndex", + ), + # 19269, arrays moving + ("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"), + # 19939, add timedeltaindex, float64index compat from 15998 move + ("pandas.tseries.tdi", "TimedeltaIndex"): ( + "pandas.core.indexes.timedeltas", + "TimedeltaIndex", + ), + ("pandas.indexes.numeric", "Float64Index"): ( + "pandas.core.indexes.base", + "Index", # updated in 50775 + ), + # 50775, remove Int64Index, UInt64Index & Float64Index from codabase + ("pandas.core.indexes.numeric", "Int64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.indexes.numeric", "UInt64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.indexes.numeric", "Float64Index"): ( + "pandas.core.indexes.base", + "Index", + ), + ("pandas.core.arrays.sparse.dtype", "SparseDtype"): ( + "pandas.core.dtypes.dtypes", + "SparseDtype", + ), +} + + +# our Unpickler sub-class to override methods and some dispatcher +# functions for compat and uses a non-public class of the pickle module. + + +class Unpickler(pkl._Unpickler): + def find_class(self, module, name): + # override superclass + key = (module, name) + module, name = _class_locations_map.get(key, key) + return super().find_class(module, name) + + +Unpickler.dispatch = copy.copy(Unpickler.dispatch) +Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce + + +def load_newobj(self) -> None: + args = self.stack.pop() + cls = self.stack[-1] + + # compat + if issubclass(cls, Index): + obj = object.__new__(cls) + elif issubclass(cls, DatetimeArray) and not args: + arr = np.array([], dtype="M8[ns]") + obj = cls.__new__(cls, arr, arr.dtype) + elif issubclass(cls, TimedeltaArray) and not args: + arr = np.array([], dtype="m8[ns]") + obj = cls.__new__(cls, arr, arr.dtype) + elif cls is BlockManager and not args: + obj = cls.__new__(cls, (), [], False) + else: + obj = cls.__new__(cls, *args) + + self.stack[-1] = obj + + +Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj + + +def load_newobj_ex(self) -> None: + kwargs = self.stack.pop() + args = self.stack.pop() + cls = self.stack.pop() + + # compat + if issubclass(cls, Index): + obj = object.__new__(cls) + else: + obj = cls.__new__(cls, *args, **kwargs) + self.append(obj) + + +try: + Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex +except (AttributeError, KeyError): + pass + + +def load(fh, encoding: str | None = None, is_verbose: bool = False): + """ + Load a pickle, with a provided encoding, + + Parameters + ---------- + fh : a filelike object + encoding : an optional encoding + is_verbose : show exception output + """ + try: + fh.seek(0) + if encoding is not None: + up = Unpickler(fh, encoding=encoding) + else: + up = Unpickler(fh) + # "Unpickler" has no attribute "is_verbose" [attr-defined] + up.is_verbose = is_verbose # type: ignore[attr-defined] + + return up.load() + except (ValueError, TypeError): + raise + + +def loads( + bytes_object: bytes, + *, + fix_imports: bool = True, + encoding: str = "ASCII", + errors: str = "strict", +): + """ + Analogous to pickle._loads. + """ + fd = io.BytesIO(bytes_object) + return Unpickler( + fd, fix_imports=fix_imports, encoding=encoding, errors=errors + ).load() + + +@contextlib.contextmanager +def patch_pickle() -> Generator[None, None, None]: + """ + Temporarily patch pickle to use our unpickler. + """ + orig_loads = pkl.loads + try: + setattr(pkl, "loads", loads) + yield + finally: + setattr(pkl, "loads", orig_loads) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pyarrow.py b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pyarrow.py new file mode 100644 index 00000000..be3a038d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/compat/pyarrow.py @@ -0,0 +1,31 @@ +""" support pyarrow compatibility across versions """ + +from __future__ import annotations + +from pandas.util.version import Version + +try: + import pyarrow as pa + + _palv = Version(Version(pa.__version__).base_version) + pa_version_under7p0 = _palv < Version("7.0.0") + pa_version_under8p0 = _palv < Version("8.0.0") + pa_version_under9p0 = _palv < Version("9.0.0") + pa_version_under10p0 = _palv < Version("10.0.0") + pa_version_under11p0 = _palv < Version("11.0.0") + pa_version_under12p0 = _palv < Version("12.0.0") + pa_version_under13p0 = _palv < Version("13.0.0") + pa_version_under14p0 = _palv < Version("14.0.0") + pa_version_under14p1 = _palv < Version("14.0.1") + pa_version_under15p0 = _palv < Version("15.0.0") +except ImportError: + pa_version_under7p0 = True + pa_version_under8p0 = True + pa_version_under9p0 = True + pa_version_under10p0 = True + pa_version_under11p0 = True + pa_version_under12p0 = True + pa_version_under13p0 = True + pa_version_under14p0 = True + pa_version_under14p1 = True + pa_version_under15p0 = True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/conftest.py new file mode 100644 index 00000000..b1b35448 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/conftest.py @@ -0,0 +1,2011 @@ +""" +This file is very long and growing, but it was decided to not split it yet, as +it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989 + +Instead of splitting it was decided to define sections here: +- Configuration / Settings +- Autouse fixtures +- Common arguments +- Missing values & co. +- Classes +- Indices +- Series' +- DataFrames +- Operators & Operations +- Data sets/files +- Time zones +- Dtypes +- Misc +""" +from __future__ import annotations + +from collections import abc +from datetime import ( + date, + datetime, + time, + timedelta, + timezone, +) +from decimal import Decimal +import operator +import os +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Callable, +) + +from dateutil.tz import ( + tzlocal, + tzutc, +) +import hypothesis +from hypothesis import strategies as st +import numpy as np +import pytest +from pytz import ( + FixedOffset, + utc, +) + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + IntervalDtype, +) + +import pandas as pd +from pandas import ( + DataFrame, + Interval, + Period, + Series, + Timedelta, + Timestamp, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.indexes.api import ( + Index, + MultiIndex, +) +from pandas.util.version import Version + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + +try: + import pyarrow as pa +except ImportError: + has_pyarrow = False +else: + del pa + has_pyarrow = True + +import zoneinfo + +try: + zoneinfo.ZoneInfo("UTC") +except zoneinfo.ZoneInfoNotFoundError: + zoneinfo = None # type: ignore[assignment] + + +# ---------------------------------------------------------------- +# Configuration / Settings +# ---------------------------------------------------------------- +# pytest + + +def pytest_addoption(parser) -> None: + parser.addoption( + "--no-strict-data-files", + action="store_false", + help="Don't fail if a test is skipped for missing data file.", + ) + + +def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None: + """Ignore doctest warning. + + Parameters + ---------- + item : pytest.Item + pytest test item. + path : str + Module path to Python object, e.g. "pandas.core.frame.DataFrame.append". A + warning will be filtered when item.name ends with in given path. So it is + sufficient to specify e.g. "DataFrame.append". + message : str + Message to be filtered. + """ + if item.name.endswith(path): + item.add_marker(pytest.mark.filterwarnings(f"ignore:{message}")) + + +def pytest_collection_modifyitems(items, config) -> None: + is_doctest = config.getoption("--doctest-modules") or config.getoption( + "--doctest-cython", default=False + ) + + # Warnings from doctests that can be ignored; place reason in comment above. + # Each entry specifies (path, message) - see the ignore_doctest_warning function + ignored_doctest_warnings = [ + ("is_int64_dtype", "is_int64_dtype is deprecated"), + ("is_interval_dtype", "is_interval_dtype is deprecated"), + ("is_period_dtype", "is_period_dtype is deprecated"), + ("is_datetime64tz_dtype", "is_datetime64tz_dtype is deprecated"), + ("is_categorical_dtype", "is_categorical_dtype is deprecated"), + ("is_sparse", "is_sparse is deprecated"), + ("NDFrame.replace", "The 'method' keyword"), + ("NDFrame.replace", "Series.replace without 'value'"), + ("Series.idxmin", "The behavior of Series.idxmin"), + ("Series.idxmax", "The behavior of Series.idxmax"), + ("SeriesGroupBy.idxmin", "The behavior of Series.idxmin"), + ("SeriesGroupBy.idxmax", "The behavior of Series.idxmax"), + # Docstring divides by zero to show behavior difference + ("missing.mask_zero_div_zero", "divide by zero encountered"), + ( + "to_pydatetime", + "The behavior of DatetimeProperties.to_pydatetime is deprecated", + ), + ( + "pandas.core.generic.NDFrame.bool", + "(Series|DataFrame).bool is now deprecated and will be removed " + "in future version of pandas", + ), + ( + "pandas.core.generic.NDFrame.first", + "first is deprecated and will be removed in a future version. " + "Please create a mask and filter using `.loc` instead", + ), + ( + "Resampler.fillna", + "DatetimeIndexResampler.fillna is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrameGroupBy.fillna with 'method' is deprecated", + ), + ( + "DataFrameGroupBy.fillna", + "DataFrame.fillna with 'method' is deprecated", + ), + ] + + for item in items: + if is_doctest: + # autouse=True for the add_doctest_imports can lead to expensive teardowns + # since doctest_namespace is a session fixture + item.add_marker(pytest.mark.usefixtures("add_doctest_imports")) + + for path, message in ignored_doctest_warnings: + ignore_doctest_warning(item, path, message) + + # mark all tests in the pandas/tests/frame directory with "arraymanager" + if "/frame/" in item.nodeid: + item.add_marker(pytest.mark.arraymanager) + + +hypothesis_health_checks = [hypothesis.HealthCheck.too_slow] +if Version(hypothesis.__version__) >= Version("6.83.2"): + hypothesis_health_checks.append(hypothesis.HealthCheck.differing_executors) + +# Hypothesis +hypothesis.settings.register_profile( + "ci", + # Hypothesis timing checks are tuned for scalars by default, so we bump + # them from 200ms to 500ms per test case as the global default. If this + # is too short for a specific test, (a) try to make it faster, and (b) + # if it really is slow add `@settings(deadline=...)` with a working value, + # or `deadline=None` to entirely disable timeouts for that test. + # 2022-02-09: Changed deadline from 500 -> None. Deadline leads to + # non-actionable, flaky CI failures (# GH 24641, 44969, 45118, 44969) + deadline=None, + suppress_health_check=tuple(hypothesis_health_checks), +) +hypothesis.settings.load_profile("ci") + +# Registering these strategies makes them globally available via st.from_type, +# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py +for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans()) + ) + +for name in "YearBegin YearEnd BYearBegin BYearEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-5, 5), + normalize=st.booleans(), + month=st.integers(min_value=1, max_value=12), + ), + ) + +for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split(): + cls = getattr(pd.tseries.offsets, name) + st.register_type_strategy( + cls, + st.builds( + cls, + n=st.integers(-24, 24), + normalize=st.booleans(), + startingMonth=st.integers(min_value=1, max_value=12), + ), + ) + + +@pytest.fixture +def add_doctest_imports(doctest_namespace) -> None: + """ + Make `np` and `pd` names available for doctests. + """ + doctest_namespace["np"] = np + doctest_namespace["pd"] = pd + + +# ---------------------------------------------------------------- +# Autouse fixtures +# ---------------------------------------------------------------- +@pytest.fixture(autouse=True) +def configure_tests() -> None: + """ + Configure settings for all tests and test modules. + """ + pd.set_option("chained_assignment", "raise") + + +# ---------------------------------------------------------------- +# Common arguments +# ---------------------------------------------------------------- +@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={repr(x)}") +def axis(request): + """ + Fixture for returning the axis numbers of a DataFrame. + """ + return request.param + + +axis_frame = axis + + +@pytest.fixture(params=[1, "columns"], ids=lambda x: f"axis={repr(x)}") +def axis_1(request): + """ + Fixture for returning aliases of axis 1 of a DataFrame. + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def observed(request): + """ + Pass in the observed keyword to groupby for [True, False] + This indicates whether categoricals should return values for + values which are not in the grouper [False / None], or only values which + appear in the grouper [True]. [None] is supported for future compatibility + if we decide to change the default (and would need to warn if this + parameter is not passed). + """ + return request.param + + +@pytest.fixture(params=[True, False, None]) +def ordered(request): + """ + Boolean 'ordered' parameter for Categorical. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def skipna(request): + """ + Boolean 'skipna' parameter. + """ + return request.param + + +@pytest.fixture(params=["first", "last", False]) +def keep(request): + """ + Valid values for the 'keep' parameter used in + .duplicated or .drop_duplicates + """ + return request.param + + +@pytest.fixture(params=["both", "neither", "left", "right"]) +def inclusive_endpoints_fixture(request): + """ + Fixture for trying all interval 'inclusive' parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def closed(request): + """ + Fixture for trying all interval closed parameters. + """ + return request.param + + +@pytest.fixture(params=["left", "right", "both", "neither"]) +def other_closed(request): + """ + Secondary closed fixture to allow parametrizing over all pairs of closed. + """ + return request.param + + +@pytest.fixture( + params=[ + None, + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression(request): + """ + Fixture for trying common compression types in compression tests. + """ + return request.param + + +@pytest.fixture( + params=[ + "gzip", + "bz2", + "zip", + "xz", + "tar", + pytest.param("zstd", marks=td.skip_if_no("zstandard")), + ] +) +def compression_only(request): + """ + Fixture for trying common compression types in compression tests excluding + uncompressed case. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def writable(request): + """ + Fixture that an array is writable. + """ + return request.param + + +@pytest.fixture(params=["inner", "outer", "left", "right"]) +def join_type(request): + """ + Fixture for trying all types of join operations. + """ + return request.param + + +@pytest.fixture(params=["nlargest", "nsmallest"]) +def nselect_method(request): + """ + Fixture for trying all nselect methods. + """ + return request.param + + +# ---------------------------------------------------------------- +# Missing values & co. +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.NULL_OBJECTS, ids=lambda x: type(x).__name__) +def nulls_fixture(request): + """ + Fixture for each null type in pandas. + """ + return request.param + + +nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture + + +@pytest.fixture(params=[None, np.nan, pd.NaT]) +def unique_nulls_fixture(request): + """ + Fixture for each null type in pandas, each null type exactly once. + """ + return request.param + + +# Generate cartesian product of unique_nulls_fixture: +unique_nulls_fixture2 = unique_nulls_fixture + + +@pytest.fixture(params=tm.NP_NAT_OBJECTS, ids=lambda x: type(x).__name__) +def np_nat_fixture(request): + """ + Fixture for each NaT type in numpy. + """ + return request.param + + +# Generate cartesian product of np_nat_fixture: +np_nat_fixture2 = np_nat_fixture + + +# ---------------------------------------------------------------- +# Classes +# ---------------------------------------------------------------- + + +@pytest.fixture(params=[DataFrame, Series]) +def frame_or_series(request): + """ + Fixture to parametrize over DataFrame and Series. + """ + return request.param + + +@pytest.fixture(params=[Index, Series], ids=["index", "series"]) +def index_or_series(request): + """ + Fixture to parametrize over Index and Series, made necessary by a mypy + bug, giving an error: + + List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]" + + See GH#29725 + """ + return request.param + + +# Generate cartesian product of index_or_series fixture: +index_or_series2 = index_or_series + + +@pytest.fixture(params=[Index, Series, pd.array], ids=["index", "series", "array"]) +def index_or_series_or_array(request): + """ + Fixture to parametrize over Index, Series, and ExtensionArray + """ + return request.param + + +@pytest.fixture(params=[Index, Series, DataFrame, pd.array], ids=lambda x: x.__name__) +def box_with_array(request): + """ + Fixture to test behavior for Index, Series, DataFrame, and pandas Array + classes + """ + return request.param + + +box_with_array2 = box_with_array + + +@pytest.fixture +def dict_subclass(): + """ + Fixture for a dictionary subclass. + """ + + class TestSubDict(dict): + def __init__(self, *args, **kwargs) -> None: + dict.__init__(self, *args, **kwargs) + + return TestSubDict + + +@pytest.fixture +def non_dict_mapping_subclass(): + """ + Fixture for a non-mapping dictionary subclass. + """ + + class TestNonDictMapping(abc.Mapping): + def __init__(self, underlying_dict) -> None: + self._data = underlying_dict + + def __getitem__(self, key): + return self._data.__getitem__(key) + + def __iter__(self) -> Iterator: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + return TestNonDictMapping + + +# ---------------------------------------------------------------- +# Indices +# ---------------------------------------------------------------- +@pytest.fixture +def multiindex_year_month_day_dataframe_random_data(): + """ + DataFrame with 3 level MultiIndex (year, month, day) covering + first 100 business days from 2000-01-01 with random data + """ + tdf = tm.makeTimeDataFrame(100) + ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() + # use int64 Index, to make sure things work + ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels]) + ymd.index.set_names(["year", "month", "day"], inplace=True) + return ymd + + +@pytest.fixture +def lexsorted_two_level_string_multiindex() -> MultiIndex: + """ + 2-level MultiIndex, lexsorted, with string names. + """ + return MultiIndex( + levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + + +@pytest.fixture +def multiindex_dataframe_random_data( + lexsorted_two_level_string_multiindex, +) -> DataFrame: + """DataFrame with 2 level MultiIndex with random data""" + index = lexsorted_two_level_string_multiindex + return DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=Index(["A", "B", "C"], name="exp"), + ) + + +def _create_multiindex(): + """ + MultiIndex used to test the general functionality of this object + """ + + # See Also: tests.multi.conftest.idx + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 1, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + index_names = ["first", "second"] + return MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=index_names, + verify_integrity=False, + ) + + +def _create_mi_with_dt64tz_level(): + """ + MultiIndex with a level that is a tzaware DatetimeIndex. + """ + # GH#8367 round trip with pickle + return MultiIndex.from_product( + [[1, 2], ["a", "b"], pd.date_range("20130101", periods=3, tz="US/Eastern")], + names=["one", "two", "three"], + ) + + +indices_dict = { + "string": tm.makeStringIndex(100), + "datetime": tm.makeDateIndex(100), + "datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"), + "period": tm.makePeriodIndex(100), + "timedelta": tm.makeTimedeltaIndex(100), + "range": tm.makeRangeIndex(100), + "int8": tm.makeIntIndex(100, dtype="int8"), + "int16": tm.makeIntIndex(100, dtype="int16"), + "int32": tm.makeIntIndex(100, dtype="int32"), + "int64": tm.makeIntIndex(100, dtype="int64"), + "uint8": tm.makeUIntIndex(100, dtype="uint8"), + "uint16": tm.makeUIntIndex(100, dtype="uint16"), + "uint32": tm.makeUIntIndex(100, dtype="uint32"), + "uint64": tm.makeUIntIndex(100, dtype="uint64"), + "float32": tm.makeFloatIndex(100, dtype="float32"), + "float64": tm.makeFloatIndex(100, dtype="float64"), + "bool-object": tm.makeBoolIndex(10).astype(object), + "bool-dtype": Index(np.random.default_rng(2).standard_normal(10) < 0), + "complex64": tm.makeNumericIndex(100, dtype="float64").astype("complex64"), + "complex128": tm.makeNumericIndex(100, dtype="float64").astype("complex128"), + "categorical": tm.makeCategoricalIndex(100), + "interval": tm.makeIntervalIndex(100), + "empty": Index([]), + "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), + "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(), + "multi": _create_multiindex(), + "repeats": Index([0, 0, 1, 1, 2, 2]), + "nullable_int": Index(np.arange(100), dtype="Int64"), + "nullable_uint": Index(np.arange(100), dtype="UInt16"), + "nullable_float": Index(np.arange(100), dtype="Float32"), + "nullable_bool": Index(np.arange(100).astype(bool), dtype="boolean"), + "string-python": Index(pd.array(tm.makeStringIndex(100), dtype="string[python]")), +} +if has_pyarrow: + idx = Index(pd.array(tm.makeStringIndex(100), dtype="string[pyarrow]")) + indices_dict["string-pyarrow"] = idx + + +@pytest.fixture(params=indices_dict.keys()) +def index(request): + """ + Fixture for many "simple" kinds of indices. + + These indices are unlikely to cover corner cases, e.g. + - no names + - no NaTs/NaNs + - no values near implementation bounds + - ... + """ + # copy to avoid mutation, e.g. setting .name + return indices_dict[request.param].copy() + + +# Needed to generate cartesian product of indices +index_fixture2 = index + + +@pytest.fixture( + params=[ + key for key, value in indices_dict.items() if not isinstance(value, MultiIndex) + ] +) +def index_flat(request): + """ + index fixture, but excluding MultiIndex cases. + """ + key = request.param + return indices_dict[key].copy() + + +# Alias so we can test with cartesian product of index_flat +index_flat2 = index_flat + + +@pytest.fixture( + params=[ + key + for key, value in indices_dict.items() + if not ( + key.startswith(("int", "uint", "float")) + or key in ["range", "empty", "repeats", "bool-dtype"] + ) + and not isinstance(value, MultiIndex) + ] +) +def index_with_missing(request): + """ + Fixture for indices with missing values. + + Integer-dtype and empty cases are excluded because they cannot hold missing + values. + + MultiIndex is excluded because isna() is not defined for MultiIndex. + """ + + # GH 35538. Use deep copy to avoid illusive bug on np-dev + # GHA pipeline that writes into indices_dict despite copy + ind = indices_dict[request.param].copy(deep=True) + vals = ind.values.copy() + if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]: + # For setting missing values in the top level of MultiIndex + vals = ind.tolist() + vals[0] = (None,) + vals[0][1:] + vals[-1] = (None,) + vals[-1][1:] + return MultiIndex.from_tuples(vals) + else: + vals[0] = None + vals[-1] = None + return type(ind)(vals) + + +# ---------------------------------------------------------------- +# Series' +# ---------------------------------------------------------------- +@pytest.fixture +def string_series() -> Series: + """ + Fixture for Series of floats with Index of unique strings + """ + s = tm.makeStringSeries() + s.name = "series" + return s + + +@pytest.fixture +def object_series() -> Series: + """ + Fixture for Series of dtype object with Index of unique strings + """ + s = tm.makeObjectSeries() + s.name = "objects" + return s + + +@pytest.fixture +def datetime_series() -> Series: + """ + Fixture for Series of floats with DatetimeIndex + """ + s = tm.makeTimeSeries() + s.name = "ts" + return s + + +def _create_series(index): + """Helper for the _series dict""" + size = len(index) + data = np.random.default_rng(2).standard_normal(size) + return Series(data, index=index, name="a", copy=False) + + +_series = { + f"series-with-{index_id}-index": _create_series(index) + for index_id, index in indices_dict.items() +} + + +@pytest.fixture +def series_with_simple_index(index) -> Series: + """ + Fixture for tests on series with changing types of indices. + """ + return _create_series(index) + + +@pytest.fixture +def series_with_multilevel_index() -> Series: + """ + Fixture with a Series with a 2-level MultiIndex. + """ + arrays = [ + ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = zip(*arrays) + index = MultiIndex.from_tuples(tuples) + data = np.random.default_rng(2).standard_normal(8) + ser = Series(data, index=index) + ser.iloc[3] = np.nan + return ser + + +_narrow_series = { + f"{dtype.__name__}-series": tm.make_rand_series(name="a", dtype=dtype) + for dtype in tm.NARROW_NP_DTYPES +} + + +_index_or_series_objs = {**indices_dict, **_series, **_narrow_series} + + +@pytest.fixture(params=_index_or_series_objs.keys()) +def index_or_series_obj(request): + """ + Fixture for tests on indexes, series and series with a narrow dtype + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_objs[request.param].copy(deep=True) + + +_typ_objects_series = { + f"{dtype.__name__}-series": Series(dtype) for dtype in tm.PYTHON_DATA_TYPES +} + + +_index_or_series_memory_objs = { + **indices_dict, + **_series, + **_narrow_series, + **_typ_objects_series, +} + + +@pytest.fixture(params=_index_or_series_memory_objs.keys()) +def index_or_series_memory_obj(request): + """ + Fixture for tests on indexes, series, series with a narrow dtype and + series with empty objects type + copy to avoid mutation, e.g. setting .name + """ + return _index_or_series_memory_objs[request.param].copy(deep=True) + + +# ---------------------------------------------------------------- +# DataFrames +# ---------------------------------------------------------------- +@pytest.fixture +def int_frame() -> DataFrame: + """ + Fixture for DataFrame of ints with index of unique strings + + Columns are ['A', 'B', 'C', 'D'] + + A B C D + vpBeWjM651 1 0 1 0 + 5JyxmrP1En -1 0 0 0 + qEDaoD49U2 -1 1 0 0 + m66TkTfsFe 0 0 0 0 + EHPaNzEUFm -1 0 -1 0 + fpRJCevQhi 2 0 0 0 + OlQvnmfi3Q 0 0 -2 0 + ... .. .. .. .. + uB1FPlz4uP 0 0 0 1 + EcSe6yNzCU 0 0 -1 0 + L50VudaiI8 -1 1 -2 0 + y3bpw4nwIp 0 -1 0 0 + H0RdLLwrCT 1 1 0 0 + rY82K0vMwm 0 0 0 0 + 1OPIUjnkjk 2 0 0 0 + + [30 rows x 4 columns] + """ + return DataFrame(tm.getSeriesData()).astype("int64") + + +@pytest.fixture +def datetime_frame() -> DataFrame: + """ + Fixture for DataFrame of floats with DatetimeIndex + + Columns are ['A', 'B', 'C', 'D'] + + A B C D + 2000-01-03 -1.122153 0.468535 0.122226 1.693711 + 2000-01-04 0.189378 0.486100 0.007864 -1.216052 + 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357 + 2000-01-06 0.430050 0.894352 0.090719 0.036939 + 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335 + 2000-01-10 -0.752633 0.328434 -0.815325 0.699674 + 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106 + ... ... ... ... ... + 2000-02-03 1.642618 -0.579288 0.046005 1.385249 + 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351 + 2000-02-07 -2.656149 -0.601387 1.410148 0.444150 + 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300 + 2000-02-09 1.377373 0.398619 1.008453 -0.928207 + 2000-02-10 0.473194 -0.636677 0.984058 0.511519 + 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948 + + [30 rows x 4 columns] + """ + return DataFrame(tm.getTimeSeriesData()) + + +@pytest.fixture +def float_frame() -> DataFrame: + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + + A B C D + P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465 + qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901 + tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433 + wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651 + M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938 + QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053 + r78Jwns6dn -0.653707 0.883127 0.682199 0.206159 + ... ... ... ... ... + IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316 + lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999 + qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121 + yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962 + 65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987 + eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871 + xSucinXxuV -1.263557 0.252799 -0.552247 0.400426 + + [30 rows x 4 columns] + """ + return DataFrame(tm.getSeriesData()) + + +@pytest.fixture +def mixed_type_frame() -> DataFrame: + """ + Fixture for DataFrame of float/int/string columns with RangeIndex + Columns are ['a', 'b', 'c', 'float32', 'int32']. + """ + return DataFrame( + { + "a": 1.0, + "b": 2, + "c": "foo", + "float32": np.array([1.0] * 10, dtype="float32"), + "int32": np.array([1] * 10, dtype="int32"), + }, + index=np.arange(10), + ) + + +@pytest.fixture +def rand_series_with_duplicate_datetimeindex() -> Series: + """ + Fixture for Series with a DatetimeIndex that has duplicates. + """ + dates = [ + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ] + + return Series(np.random.default_rng(2).standard_normal(len(dates)), index=dates) + + +# ---------------------------------------------------------------- +# Scalars +# ---------------------------------------------------------------- +@pytest.fixture( + params=[ + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + (Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")), + (Period("2012-01", freq="M"), "period[M]"), + (Period("2012-02-01", freq="D"), "period[D]"), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), + ), + (Timedelta(seconds=500), "timedelta64[ns]"), + ] +) +def ea_scalar_and_dtype(request): + return request.param + + +# ---------------------------------------------------------------- +# Operators & Operations +# ---------------------------------------------------------------- + + +@pytest.fixture(params=tm.arithmetic_dunder_methods) +def all_arithmetic_operators(request): + """ + Fixture for dunder names for common arithmetic operations. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + operator.eq, + operator.ne, + operator.lt, + operator.le, + operator.gt, + operator.ge, + operator.and_, + ops.rand_, + operator.xor, + ops.rxor, + operator.or_, + ops.ror_, + ] +) +def all_binary_operators(request): + """ + Fixture for operator and roperator arithmetic, comparison, and logical ops. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.add, + ops.radd, + operator.sub, + ops.rsub, + operator.mul, + ops.rmul, + operator.truediv, + ops.rtruediv, + operator.floordiv, + ops.rfloordiv, + operator.mod, + ops.rmod, + operator.pow, + ops.rpow, + ] +) +def all_arithmetic_functions(request): + """ + Fixture for operator and roperator arithmetic functions. + + Notes + ----- + This includes divmod and rdivmod, whereas all_arithmetic_operators + does not. + """ + return request.param + + +_all_numeric_reductions = [ + "count", + "sum", + "max", + "min", + "mean", + "prod", + "std", + "var", + "median", + "kurt", + "skew", + "sem", +] + + +@pytest.fixture(params=_all_numeric_reductions) +def all_numeric_reductions(request): + """ + Fixture for numeric reduction names. + """ + return request.param + + +_all_boolean_reductions = ["all", "any"] + + +@pytest.fixture(params=_all_boolean_reductions) +def all_boolean_reductions(request): + """ + Fixture for boolean reduction names. + """ + return request.param + + +_all_reductions = _all_numeric_reductions + _all_boolean_reductions + + +@pytest.fixture(params=_all_reductions) +def all_reductions(request): + """ + Fixture for all (boolean + numeric) reduction names. + """ + return request.param + + +@pytest.fixture( + params=[ + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.lt, + operator.le, + ] +) +def comparison_op(request): + """ + Fixture for operator module comparison functions. + """ + return request.param + + +@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"]) +def compare_operators_no_eq_ne(request): + """ + Fixture for dunder names for compare operations except == and != + + * >= + * > + * < + * <= + """ + return request.param + + +@pytest.fixture( + params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"] +) +def all_logical_operators(request): + """ + Fixture for dunder names for common logical operations + + * | + * & + * ^ + """ + return request.param + + +_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"] + + +@pytest.fixture(params=_all_numeric_accumulations) +def all_numeric_accumulations(request): + """ + Fixture for numeric accumulation names + """ + return request.param + + +# ---------------------------------------------------------------- +# Data sets/files +# ---------------------------------------------------------------- +@pytest.fixture +def strict_data_files(pytestconfig): + """ + Returns the configuration for the test setting `--no-strict-data-files`. + """ + return pytestconfig.getoption("--no-strict-data-files") + + +@pytest.fixture +def tests_path() -> Path: + return Path(__file__).parent / "tests" + + +@pytest.fixture +def tests_io_data_path(tests_path) -> Path: + return tests_path / "io" / "data" + + +@pytest.fixture +def datapath(strict_data_files: str) -> Callable[..., str]: + """ + Get the path to a data file. + + Parameters + ---------- + path : str + Path to the file, relative to ``pandas/tests/`` + + Returns + ------- + path including ``pandas/tests``. + + Raises + ------ + ValueError + If the path doesn't exist and the --no-strict-data-files option is not set. + """ + BASE_PATH = os.path.join(os.path.dirname(__file__), "tests") + + def deco(*args): + path = os.path.join(BASE_PATH, *args) + if not os.path.exists(path): + if strict_data_files: + raise ValueError( + f"Could not find file {path} and --no-strict-data-files is not set." + ) + pytest.skip(f"Could not find {path}.") + return path + + return deco + + +@pytest.fixture +def iris(datapath) -> DataFrame: + """ + The iris dataset as a DataFrame. + """ + return pd.read_csv(datapath("io", "data", "csv", "iris.csv")) + + +# ---------------------------------------------------------------- +# Time zones +# ---------------------------------------------------------------- +TIMEZONES = [ + None, + "UTC", + "US/Eastern", + "Asia/Tokyo", + "dateutil/US/Pacific", + "dateutil/Asia/Singapore", + "+01:15", + "-02:15", + "UTC+01:15", + "UTC-02:15", + tzutc(), + tzlocal(), + FixedOffset(300), + FixedOffset(0), + FixedOffset(-300), + timezone.utc, + timezone(timedelta(hours=1)), + timezone(timedelta(hours=-1), name="foo"), +] +if zoneinfo is not None: + TIMEZONES.extend( + [ + zoneinfo.ZoneInfo("US/Pacific"), # type: ignore[list-item] + zoneinfo.ZoneInfo("UTC"), # type: ignore[list-item] + ] + ) +TIMEZONE_IDS = [repr(i) for i in TIMEZONES] + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS)) +@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS) +def tz_naive_fixture(request): + """ + Fixture for trying timezones including default (None): {0} + """ + return request.param + + +@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:])) +@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:]) +def tz_aware_fixture(request): + """ + Fixture for trying explicit timezones: {0} + """ + return request.param + + +# Generate cartesian product of tz_aware_fixture: +tz_aware_fixture2 = tz_aware_fixture + + +_UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc] +if zoneinfo is not None: + _UTCS.append(zoneinfo.ZoneInfo("UTC")) + + +@pytest.fixture(params=_UTCS) +def utc_fixture(request): + """ + Fixture to provide variants of UTC timezone strings and tzinfo objects. + """ + return request.param + + +utc_fixture2 = utc_fixture + + +# ---------------------------------------------------------------- +# Dtypes +# ---------------------------------------------------------------- +@pytest.fixture(params=tm.STRING_DTYPES) +def string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * str + * 'str' + * 'U' + """ + return request.param + + +@pytest.fixture( + params=[ + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ] +) +def nullable_string_dtype(request): + """ + Parametrized fixture for string dtypes. + + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture( + params=[ + "python", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + pytest.param("pyarrow_numpy", marks=td.skip_if_no("pyarrow")), + ] +) +def string_storage(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + * 'pyarrow_numpy' + """ + return request.param + + +@pytest.fixture( + params=[ + "numpy_nullable", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + ] +) +def dtype_backend(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + """ + return request.param + + +# Alias so we can test with cartesian product of string_storage +string_storage2 = string_storage + + +@pytest.fixture(params=tm.BYTES_DTYPES) +def bytes_dtype(request): + """ + Parametrized fixture for bytes dtypes. + + * bytes + * 'bytes' + """ + return request.param + + +@pytest.fixture(params=tm.OBJECT_DTYPES) +def object_dtype(request): + """ + Parametrized fixture for object dtypes. + + * object + * 'object' + """ + return request.param + + +@pytest.fixture( + params=[ + "object", + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ] +) +def any_string_dtype(request): + """ + Parametrized fixture for string dtypes. + * 'object' + * 'string[python]' + * 'string[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.DATETIME64_DTYPES) +def datetime64_dtype(request): + """ + Parametrized fixture for datetime64 dtypes. + + * 'datetime64[ns]' + * 'M8[ns]' + """ + return request.param + + +@pytest.fixture(params=tm.TIMEDELTA64_DTYPES) +def timedelta64_dtype(request): + """ + Parametrized fixture for timedelta64 dtypes. + + * 'timedelta64[ns]' + * 'm8[ns]' + """ + return request.param + + +@pytest.fixture +def fixed_now_ts() -> Timestamp: + """ + Fixture emits fixed Timestamp.now() + """ + return Timestamp( + year=2021, month=1, day=1, hour=12, minute=4, second=13, microsecond=22 + ) + + +@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES) +def float_numpy_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.FLOAT_EA_DTYPES) +def float_ea_dtype(request): + """ + Parameterized fixture for float dtypes. + + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_FLOAT_DTYPES) +def any_float_dtype(request): + """ + Parameterized fixture for float dtypes. + + * float + * 'float32' + * 'float64' + * 'Float32' + * 'Float64' + """ + return request.param + + +@pytest.fixture(params=tm.COMPLEX_DTYPES) +def complex_dtype(request): + """ + Parameterized fixture for complex dtypes. + + * complex + * 'complex64' + * 'complex128' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_NUMPY_DTYPES) +def any_signed_int_numpy_dtype(request): + """ + Parameterized fixture for signed integer dtypes. + + * int + * 'int8' + * 'int16' + * 'int32' + * 'int64' + """ + return request.param + + +@pytest.fixture(params=tm.UNSIGNED_INT_NUMPY_DTYPES) +def any_unsigned_int_numpy_dtype(request): + """ + Parameterized fixture for unsigned integer dtypes. + + * 'uint8' + * 'uint16' + * 'uint32' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_NUMPY_DTYPES) +def any_int_numpy_dtype(request): + """ + Parameterized fixture for any integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES) +def any_int_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_DTYPES) +def any_int_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES) +def any_numeric_ea_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# Unsupported operand types for + ("List[Union[str, ExtensionDtype, dtype[Any], +# Type[object]]]" and "List[str]") +@pytest.fixture( + params=tm.ALL_INT_EA_DTYPES + + tm.FLOAT_EA_DTYPES + + tm.ALL_INT_PYARROW_DTYPES_STR_REPR + + tm.FLOAT_PYARROW_DTYPES_STR_REPR # type: ignore[operator] +) +def any_numeric_ea_and_arrow_dtype(request): + """ + Parameterized fixture for any nullable integer dtype and + any float ea dtypes. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + * 'uint8[pyarrow]' + * 'int8[pyarrow]' + * 'uint16[pyarrow]' + * 'int16[pyarrow]' + * 'uint32[pyarrow]' + * 'int32[pyarrow]' + * 'uint64[pyarrow]' + * 'int64[pyarrow]' + * 'float32[pyarrow]' + * 'float64[pyarrow]' + """ + return request.param + + +@pytest.fixture(params=tm.SIGNED_INT_EA_DTYPES) +def any_signed_int_ea_dtype(request): + """ + Parameterized fixture for any signed nullable integer dtype. + + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_NUMPY_DTYPES) +def any_real_numpy_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_REAL_DTYPES) +def any_real_numeric_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + + and associated ea dtypes. + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMPY_DTYPES) +def any_numpy_dtype(request): + """ + Parameterized fixture for all numpy dtypes. + + * bool + * 'bool' + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * str + * 'str' + * 'U' + * bytes + * 'bytes' + * 'datetime64[ns]' + * 'M8[ns]' + * 'timedelta64[ns]' + * 'm8[ns]' + * object + * 'object' + """ + return request.param + + +@pytest.fixture(params=tm.ALL_NUMERIC_DTYPES) +def any_numeric_dtype(request): + """ + Parameterized fixture for all numeric dtypes. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * float + * 'float32' + * 'float64' + * complex + * 'complex64' + * 'complex128' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + * 'Float32' + * 'Float64' + """ + return request.param + + +# categoricals are handled separately +_any_skipna_inferred_dtype = [ + ("string", ["a", np.nan, "c"]), + ("string", ["a", pd.NA, "c"]), + ("mixed", ["a", pd.NaT, "c"]), # pd.NaT not considered valid by is_string_array + ("bytes", [b"a", np.nan, b"c"]), + ("empty", [np.nan, np.nan, np.nan]), + ("empty", []), + ("mixed-integer", ["a", np.nan, 2]), + ("mixed", ["a", np.nan, 2.0]), + ("floating", [1.0, np.nan, 2.0]), + ("integer", [1, np.nan, 2]), + ("mixed-integer-float", [1, np.nan, 2.0]), + ("decimal", [Decimal(1), np.nan, Decimal(2)]), + ("boolean", [True, np.nan, False]), + ("boolean", [True, pd.NA, False]), + ("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]), + ("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]), + ("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]), + ("complex", [1 + 1j, np.nan, 2 + 2j]), + # The following dtype is commented out due to GH 23554 + # ('timedelta64', [np.timedelta64(1, 'D'), + # np.nan, np.timedelta64(2, 'D')]), + ("timedelta", [timedelta(1), np.nan, timedelta(2)]), + ("time", [time(1), np.nan, time(2)]), + ("period", [Period(2013), pd.NaT, Period(2018)]), + ("interval", [Interval(0, 1), np.nan, Interval(0, 2)]), +] +ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id + + +@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids) +def any_skipna_inferred_dtype(request): + """ + Fixture for all inferred dtypes from _libs.lib.infer_dtype + + The covered (inferred) types are: + * 'string' + * 'empty' + * 'bytes' + * 'mixed' + * 'mixed-integer' + * 'mixed-integer-float' + * 'floating' + * 'integer' + * 'decimal' + * 'boolean' + * 'datetime64' + * 'datetime' + * 'date' + * 'timedelta' + * 'time' + * 'period' + * 'interval' + + Returns + ------- + inferred_dtype : str + The string for the inferred dtype from _libs.lib.infer_dtype + values : np.ndarray + An array of object dtype that will be inferred to have + `inferred_dtype` + + Examples + -------- + >>> from pandas._libs import lib + >>> + >>> def test_something(any_skipna_inferred_dtype): + ... inferred_dtype, values = any_skipna_inferred_dtype + ... # will pass + ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype + """ + inferred_dtype, values = request.param + values = np.array(values, dtype=object) # object dtype to avoid casting + + # correctness of inference tested in tests/dtypes/test_inference.py + return inferred_dtype, values + + +# ---------------------------------------------------------------- +# Misc +# ---------------------------------------------------------------- +@pytest.fixture +def ip(): + """ + Get an instance of IPython.InteractiveShell. + + Will raise a skip if IPython is not installed. + """ + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.interactiveshell import InteractiveShell + + # GH#35711 make sure sqlite history file handle is not leaked + from traitlets.config import Config # isort:skip + + c = Config() + c.HistoryManager.hist_file = ":memory:" + + return InteractiveShell(config=c) + + +@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"]) +def spmatrix(request): + """ + Yields scipy sparse matrix classes. + """ + sparse = pytest.importorskip("scipy.sparse") + + return getattr(sparse, request.param + "_matrix") + + +@pytest.fixture( + params=[ + getattr(pd.offsets, o) + for o in pd.offsets.__all__ + if issubclass(getattr(pd.offsets, o), pd.offsets.Tick) and o != "Tick" + ] +) +def tick_classes(request): + """ + Fixture for Tick based datetime offsets available for a time series. + """ + return request.param + + +@pytest.fixture(params=[None, lambda x: x]) +def sort_by_key(request): + """ + Simple fixture for testing keys in sorting methods. + Tests None (no key) and the identity key. + """ + return request.param + + +@pytest.fixture() +def fsspectest(): + pytest.importorskip("fsspec") + from fsspec import register_implementation + from fsspec.implementations.memory import MemoryFileSystem + from fsspec.registry import _registry as registry + + class TestMemoryFS(MemoryFileSystem): + protocol = "testmem" + test = [None] + + def __init__(self, **kwargs) -> None: + self.test[0] = kwargs.pop("test", None) + super().__init__(**kwargs) + + register_implementation("testmem", TestMemoryFS, clobber=True) + yield TestMemoryFS() + registry.pop("testmem", None) + TestMemoryFS.test[0] = None + TestMemoryFS.store.clear() + + +@pytest.fixture( + params=[ + ("foo", None, None), + ("Egon", "Venkman", None), + ("NCC1701D", "NCC1701D", "NCC1701D"), + # possibly-matching NAs + (np.nan, np.nan, np.nan), + (np.nan, pd.NaT, None), + (np.nan, pd.NA, None), + (pd.NA, pd.NA, pd.NA), + ] +) +def names(request) -> tuple[Hashable, Hashable, Hashable]: + """ + A 3-tuple of names, the first two for operands, the last for a result. + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc]) +def indexer_sli(request): + """ + Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.loc, tm.iloc]) +def indexer_li(request): + """ + Parametrize over loc.__getitem__, iloc.__getitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.iloc]) +def indexer_si(request): + """ + Parametrize over __setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.setitem, tm.loc]) +def indexer_sl(request): + """ + Parametrize over __setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.at, tm.loc]) +def indexer_al(request): + """ + Parametrize over at.__setitem__, loc.__setitem__ + """ + return request.param + + +@pytest.fixture(params=[tm.iat, tm.iloc]) +def indexer_ial(request): + """ + Parametrize over iat.__setitem__, iloc.__setitem__ + """ + return request.param + + +@pytest.fixture +def using_array_manager() -> bool: + """ + Fixture to check if the array manager is being used. + """ + return pd.options.mode.data_manager == "array" + + +@pytest.fixture +def using_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is enabled. + """ + return pd.options.mode.copy_on_write and pd.options.mode.data_manager == "block" + + +warsaws = ["Europe/Warsaw", "dateutil/Europe/Warsaw"] +if zoneinfo is not None: + warsaws.append(zoneinfo.ZoneInfo("Europe/Warsaw")) # type: ignore[arg-type] + + +@pytest.fixture(params=warsaws) +def warsaw(request) -> str: + """ + tzinfo for Europe/Warsaw using pytz, dateutil, or zoneinfo. + """ + return request.param + + +@pytest.fixture() +def arrow_string_storage(): + return ("pyarrow", "pyarrow_numpy") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/executor.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/executor.py new file mode 100644 index 00000000..5cd47799 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/executor.py @@ -0,0 +1,200 @@ +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +if TYPE_CHECKING: + from pandas._typing import Scalar + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + + +@functools.cache +def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel): + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + if is_grouped_kernel: + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def column_looper( + values: np.ndarray, + labels: np.ndarray, + ngroups: int, + min_periods: int, + *args, + ): + result = np.empty((values.shape[0], ngroups), dtype=result_dtype) + na_positions = {} + for i in numba.prange(values.shape[0]): + output, na_pos = func( + values[i], result_dtype, labels, ngroups, min_periods, *args + ) + result[i] = output + if len(na_pos) > 0: + na_positions[i] = np.array(na_pos) + return result, na_positions + + else: + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def column_looper( + values: np.ndarray, + start: np.ndarray, + end: np.ndarray, + min_periods: int, + *args, + ): + result = np.empty((values.shape[0], len(start)), dtype=result_dtype) + na_positions = {} + for i in numba.prange(values.shape[0]): + output, na_pos = func( + values[i], result_dtype, start, end, min_periods, *args + ) + result[i] = output + if len(na_pos) > 0: + na_positions[i] = np.array(na_pos) + return result, na_positions + + return column_looper + + +default_dtype_mapping: dict[np.dtype, Any] = { + np.dtype("int8"): np.int64, + np.dtype("int16"): np.int64, + np.dtype("int32"): np.int64, + np.dtype("int64"): np.int64, + np.dtype("uint8"): np.uint64, + np.dtype("uint16"): np.uint64, + np.dtype("uint32"): np.uint64, + np.dtype("uint64"): np.uint64, + np.dtype("float32"): np.float64, + np.dtype("float64"): np.float64, + np.dtype("complex64"): np.complex128, + np.dtype("complex128"): np.complex128, +} + + +# TODO: Preserve complex dtypes + +float_dtype_mapping: dict[np.dtype, Any] = { + np.dtype("int8"): np.float64, + np.dtype("int16"): np.float64, + np.dtype("int32"): np.float64, + np.dtype("int64"): np.float64, + np.dtype("uint8"): np.float64, + np.dtype("uint16"): np.float64, + np.dtype("uint32"): np.float64, + np.dtype("uint64"): np.float64, + np.dtype("float32"): np.float64, + np.dtype("float64"): np.float64, + np.dtype("complex64"): np.float64, + np.dtype("complex128"): np.float64, +} + +identity_dtype_mapping: dict[np.dtype, Any] = { + np.dtype("int8"): np.int8, + np.dtype("int16"): np.int16, + np.dtype("int32"): np.int32, + np.dtype("int64"): np.int64, + np.dtype("uint8"): np.uint8, + np.dtype("uint16"): np.uint16, + np.dtype("uint32"): np.uint32, + np.dtype("uint64"): np.uint64, + np.dtype("float32"): np.float32, + np.dtype("float64"): np.float64, + np.dtype("complex64"): np.complex64, + np.dtype("complex128"): np.complex128, +} + + +def generate_shared_aggregator( + func: Callable[..., Scalar], + dtype_mapping: dict[np.dtype, np.dtype], + is_grouped_kernel: bool, + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a Numba function that loops over the columns 2D object and applies + a 1D numba kernel over each column. + + Parameters + ---------- + func : function + aggregation function to be applied to each column + dtype_mapping: dict or None + If not None, maps a dtype to a result dtype. + Otherwise, will fall back to default mapping. + is_grouped_kernel: bool, default False + Whether func operates using the group labels (True) + or using starts/ends arrays + + If true, you also need to pass the number of groups to this function + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + + # A wrapper around the looper function, + # to dispatch based on dtype since numba is unable to do that in nopython mode + + # It also post-processes the values by inserting nans where number of observations + # is less than min_periods + # Cannot do this in numba nopython mode + # (you'll run into type-unification error when you cast int -> float) + def looper_wrapper( + values, + start=None, + end=None, + labels=None, + ngroups=None, + min_periods: int = 0, + **kwargs, + ): + result_dtype = dtype_mapping[values.dtype] + column_looper = make_looper( + func, result_dtype, is_grouped_kernel, nopython, nogil, parallel + ) + # Need to unpack kwargs since numba only supports *args + if is_grouped_kernel: + result, na_positions = column_looper( + values, labels, ngroups, min_periods, *kwargs.values() + ) + else: + result, na_positions = column_looper( + values, start, end, min_periods, *kwargs.values() + ) + if result.dtype.kind == "i": + # Look if na_positions is not empty + # If so, convert the whole block + # This is OK since int dtype cannot hold nan, + # so if min_periods not satisfied for 1 col, it is not satisfied for + # all columns at that index + for na_pos in na_positions.values(): + if len(na_pos) > 0: + result = result.astype("float64") + break + # TODO: Optimize this + for i, na_pos in na_positions.items(): + if len(na_pos) > 0: + result[i, na_pos] = np.nan + return result + + return looper_wrapper diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/__init__.py new file mode 100644 index 00000000..1116c61c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/__init__.py @@ -0,0 +1,27 @@ +from pandas.core._numba.kernels.mean_ import ( + grouped_mean, + sliding_mean, +) +from pandas.core._numba.kernels.min_max_ import ( + grouped_min_max, + sliding_min_max, +) +from pandas.core._numba.kernels.sum_ import ( + grouped_sum, + sliding_sum, +) +from pandas.core._numba.kernels.var_ import ( + grouped_var, + sliding_var, +) + +__all__ = [ + "sliding_mean", + "grouped_mean", + "sliding_sum", + "grouped_sum", + "sliding_var", + "grouped_var", + "sliding_min_max", + "grouped_min_max", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/mean_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/mean_.py new file mode 100644 index 00000000..f4158047 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/mean_.py @@ -0,0 +1,196 @@ +""" +Numba 1D mean kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba +import numpy as np + +from pandas.core._numba.kernels.shared import is_monotonic_increasing +from pandas.core._numba.kernels.sum_ import grouped_kahan_sum + +if TYPE_CHECKING: + from pandas._typing import npt + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def add_mean( + val: float, + nobs: int, + sum_x: float, + neg_ct: int, + compensation: float, + num_consecutive_same_value: int, + prev_value: float, +) -> tuple[int, float, int, float, int, float]: + if not np.isnan(val): + nobs += 1 + y = val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + if val < 0: + neg_ct += 1 + + if val == prev_value: + num_consecutive_same_value += 1 + else: + num_consecutive_same_value = 1 + prev_value = val + + return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def remove_mean( + val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float +) -> tuple[int, float, int, float]: + if not np.isnan(val): + nobs -= 1 + y = -val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + if val < 0: + neg_ct -= 1 + return nobs, sum_x, neg_ct, compensation + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_mean( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + N = len(start) + nobs = 0 + sum_x = 0.0 + neg_ct = 0 + compensation_add = 0.0 + compensation_remove = 0.0 + + is_monotonic_increasing_bounds = is_monotonic_increasing( + start + ) and is_monotonic_increasing(end) + + output = np.empty(N, dtype=result_dtype) + + for i in range(N): + s = start[i] + e = end[i] + if i == 0 or not is_monotonic_increasing_bounds: + prev_value = values[s] + num_consecutive_same_value = 0 + + for j in range(s, e): + val = values[j] + ( + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_mean( + val, + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, # pyright: ignore[reportGeneralTypeIssues] + ) + else: + for j in range(start[i - 1], s): + val = values[j] + nobs, sum_x, neg_ct, compensation_remove = remove_mean( + val, nobs, sum_x, neg_ct, compensation_remove + ) + + for j in range(end[i - 1], e): + val = values[j] + ( + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_mean( + val, + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, # pyright: ignore[reportGeneralTypeIssues] + ) + + if nobs >= min_periods and nobs > 0: + result = sum_x / nobs + if num_consecutive_same_value >= nobs: + result = prev_value + elif neg_ct == 0 and result < 0: + result = 0 + elif neg_ct == nobs and result > 0: + result = 0 + else: + result = np.nan + + output[i] = result + + if not is_monotonic_increasing_bounds: + nobs = 0 + sum_x = 0.0 + neg_ct = 0 + compensation_remove = 0.0 + + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_mean( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum( + values, result_dtype, labels, ngroups + ) + + # Post-processing, replace sums that don't satisfy min_periods + for lab in range(ngroups): + nobs = nobs_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + sum_x = output[lab] + if nobs >= min_periods: + if num_consecutive_same_value >= nobs: + result = prev_value * nobs + else: + result = sum_x + else: + result = np.nan + result /= nobs + output[lab] = result + + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/min_max_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/min_max_.py new file mode 100644 index 00000000..c9803980 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/min_max_.py @@ -0,0 +1,125 @@ +""" +Numba 1D min/max kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_min_max( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, + is_max: bool, +) -> tuple[np.ndarray, list[int]]: + N = len(start) + nobs = 0 + output = np.empty(N, dtype=result_dtype) + na_pos = [] + # Use deque once numba supports it + # https://github.com/numba/numba/issues/7417 + Q: list = [] + W: list = [] + for i in range(N): + curr_win_size = end[i] - start[i] + if i == 0: + st = start[i] + else: + st = end[i - 1] + + for k in range(st, end[i]): + ai = values[k] + if not np.isnan(ai): + nobs += 1 + elif is_max: + ai = -np.inf + else: + ai = np.inf + # Discard previous entries if we find new min or max + if is_max: + while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]): + Q.pop() + else: + while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]): + Q.pop() + Q.append(k) + W.append(k) + + # Discard entries outside and left of current window + while Q and Q[0] <= start[i] - 1: + Q.pop(0) + while W and W[0] <= start[i] - 1: + if not np.isnan(values[W[0]]): + nobs -= 1 + W.pop(0) + + # Save output based on index in input value array + if Q and curr_win_size > 0 and nobs >= min_periods: + output[i] = values[Q[0]] + else: + if values.dtype.kind != "i": + output[i] = np.nan + else: + na_pos.append(i) + + return output, na_pos + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_min_max( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, + is_max: bool, +) -> tuple[np.ndarray, list[int]]: + N = len(labels) + nobs = np.zeros(ngroups, dtype=np.int64) + na_pos = [] + output = np.empty(ngroups, dtype=result_dtype) + + for i in range(N): + lab = labels[i] + val = values[i] + if lab < 0: + continue + + if values.dtype.kind == "i" or not np.isnan(val): + nobs[lab] += 1 + else: + # NaN value cannot be a min/max value + continue + + if nobs[lab] == 1: + # First element in group, set output equal to this + output[lab] = val + continue + + if is_max: + if val > output[lab]: + output[lab] = val + else: + if val < output[lab]: + output[lab] = val + + # Set labels that don't satisfy min_periods as np.nan + for lab, count in enumerate(nobs): + if count < min_periods: + na_pos.append(lab) + + return output, na_pos diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/shared.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/shared.py new file mode 100644 index 00000000..c52372fe --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/shared.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba + +if TYPE_CHECKING: + import numpy as np + + +@numba.jit( + # error: Any? not callable + numba.boolean(numba.int64[:]), # type: ignore[misc] + nopython=True, + nogil=True, + parallel=False, +) +def is_monotonic_increasing(bounds: np.ndarray) -> bool: + """Check if int64 values are monotonically increasing.""" + n = len(bounds) + if n < 2: + return True + prev = bounds[0] + for i in range(1, n): + cur = bounds[i] + if cur < prev: + return False + prev = cur + return True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/sum_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/sum_.py new file mode 100644 index 00000000..94db8426 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/sum_.py @@ -0,0 +1,244 @@ +""" +Numba 1D sum kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numba +from numba.extending import register_jitable +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + +from pandas.core._numba.kernels.shared import is_monotonic_increasing + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def add_sum( + val: Any, + nobs: int, + sum_x: Any, + compensation: Any, + num_consecutive_same_value: int, + prev_value: Any, +) -> tuple[int, Any, Any, int, Any]: + if not np.isnan(val): + nobs += 1 + y = val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + + if val == prev_value: + num_consecutive_same_value += 1 + else: + num_consecutive_same_value = 1 + prev_value = val + + return nobs, sum_x, compensation, num_consecutive_same_value, prev_value + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def remove_sum( + val: Any, nobs: int, sum_x: Any, compensation: Any +) -> tuple[int, Any, Any]: + if not np.isnan(val): + nobs -= 1 + y = -val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + return nobs, sum_x, compensation + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_sum( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + dtype = values.dtype + + na_val: object = np.nan + if dtype.kind == "i": + na_val = 0 + + N = len(start) + nobs = 0 + sum_x = 0 + compensation_add = 0 + compensation_remove = 0 + na_pos = [] + + is_monotonic_increasing_bounds = is_monotonic_increasing( + start + ) and is_monotonic_increasing(end) + + output = np.empty(N, dtype=result_dtype) + + for i in range(N): + s = start[i] + e = end[i] + if i == 0 or not is_monotonic_increasing_bounds: + prev_value = values[s] + num_consecutive_same_value = 0 + + for j in range(s, e): + val = values[j] + ( + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_sum( + val, + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + else: + for j in range(start[i - 1], s): + val = values[j] + nobs, sum_x, compensation_remove = remove_sum( + val, nobs, sum_x, compensation_remove + ) + + for j in range(end[i - 1], e): + val = values[j] + ( + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_sum( + val, + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + + if nobs == 0 == min_periods: + result: object = 0 + elif nobs >= min_periods: + if num_consecutive_same_value >= nobs: + result = prev_value * nobs + else: + result = sum_x + else: + result = na_val + if dtype.kind == "i": + na_pos.append(i) + + output[i] = result + + if not is_monotonic_increasing_bounds: + nobs = 0 + sum_x = 0 + compensation_remove = 0 + + return output, na_pos + + +# Mypy/pyright don't like the fact that the decorator is untyped +@register_jitable # type: ignore[misc] +def grouped_kahan_sum( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, +) -> tuple[ + np.ndarray, npt.NDArray[np.int64], np.ndarray, npt.NDArray[np.int64], np.ndarray +]: + N = len(labels) + + nobs_arr = np.zeros(ngroups, dtype=np.int64) + comp_arr = np.zeros(ngroups, dtype=values.dtype) + consecutive_counts = np.zeros(ngroups, dtype=np.int64) + prev_vals = np.zeros(ngroups, dtype=values.dtype) + output = np.zeros(ngroups, dtype=result_dtype) + + for i in range(N): + lab = labels[i] + val = values[i] + + if lab < 0: + continue + + sum_x = output[lab] + nobs = nobs_arr[lab] + compensation_add = comp_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + + ( + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_sum( + val, + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + + output[lab] = sum_x + consecutive_counts[lab] = num_consecutive_same_value + prev_vals[lab] = prev_value + comp_arr[lab] = compensation_add + nobs_arr[lab] = nobs + return output, nobs_arr, comp_arr, consecutive_counts, prev_vals + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_sum( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + na_pos = [] + + output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum( + values, result_dtype, labels, ngroups + ) + + # Post-processing, replace sums that don't satisfy min_periods + for lab in range(ngroups): + nobs = nobs_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + sum_x = output[lab] + if nobs >= min_periods: + if num_consecutive_same_value >= nobs: + result = prev_value * nobs + else: + result = sum_x + else: + result = sum_x # Don't change val, will be replaced by nan later + na_pos.append(lab) + output[lab] = result + + return output, na_pos diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/var_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/var_.py new file mode 100644 index 00000000..e150c719 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/_numba/kernels/var_.py @@ -0,0 +1,245 @@ +""" +Numba 1D var kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + +from pandas.core._numba.kernels.shared import is_monotonic_increasing + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def add_var( + val: float, + nobs: int, + mean_x: float, + ssqdm_x: float, + compensation: float, + num_consecutive_same_value: int, + prev_value: float, +) -> tuple[int, float, float, float, int, float]: + if not np.isnan(val): + if val == prev_value: + num_consecutive_same_value += 1 + else: + num_consecutive_same_value = 1 + prev_value = val + + nobs += 1 + prev_mean = mean_x - compensation + y = val - compensation + t = y - mean_x + compensation = t + mean_x - y + delta = t + if nobs: + mean_x += delta / nobs + else: + mean_x = 0 + ssqdm_x += (val - prev_mean) * (val - mean_x) + return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def remove_var( + val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float +) -> tuple[int, float, float, float]: + if not np.isnan(val): + nobs -= 1 + if nobs: + prev_mean = mean_x - compensation + y = val - compensation + t = y - mean_x + compensation = t + mean_x - y + delta = t + mean_x -= delta / nobs + ssqdm_x -= (val - prev_mean) * (val - mean_x) + else: + mean_x = 0 + ssqdm_x = 0 + return nobs, mean_x, ssqdm_x, compensation + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_var( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, + ddof: int = 1, +) -> tuple[np.ndarray, list[int]]: + N = len(start) + nobs = 0 + mean_x = 0.0 + ssqdm_x = 0.0 + compensation_add = 0.0 + compensation_remove = 0.0 + + min_periods = max(min_periods, 1) + is_monotonic_increasing_bounds = is_monotonic_increasing( + start + ) and is_monotonic_increasing(end) + + output = np.empty(N, dtype=result_dtype) + + for i in range(N): + s = start[i] + e = end[i] + if i == 0 or not is_monotonic_increasing_bounds: + prev_value = values[s] + num_consecutive_same_value = 0 + + for j in range(s, e): + val = values[j] + ( + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_var( + val, + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, # pyright: ignore[reportGeneralTypeIssues] + ) + else: + for j in range(start[i - 1], s): + val = values[j] + nobs, mean_x, ssqdm_x, compensation_remove = remove_var( + val, nobs, mean_x, ssqdm_x, compensation_remove + ) + + for j in range(end[i - 1], e): + val = values[j] + ( + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_var( + val, + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, # pyright: ignore[reportGeneralTypeIssues] + ) + + if nobs >= min_periods and nobs > ddof: + if nobs == 1 or num_consecutive_same_value >= nobs: + result = 0.0 + else: + result = ssqdm_x / (nobs - ddof) + else: + result = np.nan + + output[i] = result + + if not is_monotonic_increasing_bounds: + nobs = 0 + mean_x = 0.0 + ssqdm_x = 0.0 + compensation_remove = 0.0 + + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_var( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, + ddof: int = 1, +) -> tuple[np.ndarray, list[int]]: + N = len(labels) + + nobs_arr = np.zeros(ngroups, dtype=np.int64) + comp_arr = np.zeros(ngroups, dtype=values.dtype) + consecutive_counts = np.zeros(ngroups, dtype=np.int64) + prev_vals = np.zeros(ngroups, dtype=values.dtype) + output = np.zeros(ngroups, dtype=result_dtype) + means = np.zeros(ngroups, dtype=result_dtype) + + for i in range(N): + lab = labels[i] + val = values[i] + + if lab < 0: + continue + + mean_x = means[lab] + ssqdm_x = output[lab] + nobs = nobs_arr[lab] + compensation_add = comp_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + + ( + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_var( + val, + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + + output[lab] = ssqdm_x + means[lab] = mean_x + consecutive_counts[lab] = num_consecutive_same_value + prev_vals[lab] = prev_value + comp_arr[lab] = compensation_add + nobs_arr[lab] = nobs + + # Post-processing, replace vars that don't satisfy min_periods + for lab in range(ngroups): + nobs = nobs_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + ssqdm_x = output[lab] + if nobs >= min_periods and nobs > ddof: + if nobs == 1 or num_consecutive_same_value >= nobs: + result = 0.0 + else: + result = ssqdm_x / (nobs - ddof) + else: + result = np.nan + output[lab] = result + + # Second pass to get the std.dev + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/accessor.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/accessor.py new file mode 100644 index 00000000..1b366599 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/accessor.py @@ -0,0 +1,340 @@ +""" + +accessor.py contains base classes for implementing accessor properties +that can be mixed into or pinned onto other pandas classes. + +""" +from __future__ import annotations + +from typing import ( + Callable, + final, +) +import warnings + +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level + + +class DirNamesMixin: + _accessors: set[str] = set() + _hidden_attrs: frozenset[str] = frozenset() + + @final + def _dir_deletions(self) -> set[str]: + """ + Delete unwanted __dir__ for this object. + """ + return self._accessors | self._hidden_attrs + + def _dir_additions(self) -> set[str]: + """ + Add additional __dir__ for this object. + """ + return {accessor for accessor in self._accessors if hasattr(self, accessor)} + + def __dir__(self) -> list[str]: + """ + Provide method name lookup and completion. + + Notes + ----- + Only provide 'public' methods. + """ + rv = set(super().__dir__()) + rv = (rv - self._dir_deletions()) | self._dir_additions() + return sorted(rv) + + +class PandasDelegate: + """ + Abstract base class for delegating methods/properties. + """ + + def _delegate_property_get(self, name: str, *args, **kwargs): + raise TypeError(f"You cannot access the property {name}") + + def _delegate_property_set(self, name: str, value, *args, **kwargs): + raise TypeError(f"The property {name} cannot be set") + + def _delegate_method(self, name: str, *args, **kwargs): + raise TypeError(f"You cannot call method {name}") + + @classmethod + def _add_delegate_accessors( + cls, + delegate, + accessors: list[str], + typ: str, + overwrite: bool = False, + accessor_mapping: Callable[[str], str] = lambda x: x, + raise_on_missing: bool = True, + ) -> None: + """ + Add accessors to cls from the delegate class. + + Parameters + ---------- + cls + Class to add the methods/properties to. + delegate + Class to get methods/properties and doc-strings. + accessors : list of str + List of accessors to add. + typ : {'property', 'method'} + overwrite : bool, default False + Overwrite the method/property in the target class if it exists. + accessor_mapping: Callable, default lambda x: x + Callable to map the delegate's function to the cls' function. + raise_on_missing: bool, default True + Raise if an accessor does not exist on delegate. + False skips the missing accessor. + """ + + def _create_delegator_property(name: str): + def _getter(self): + return self._delegate_property_get(name) + + def _setter(self, new_values): + return self._delegate_property_set(name, new_values) + + _getter.__name__ = name + _setter.__name__ = name + + return property( + fget=_getter, + fset=_setter, + doc=getattr(delegate, accessor_mapping(name)).__doc__, + ) + + def _create_delegator_method(name: str): + def f(self, *args, **kwargs): + return self._delegate_method(name, *args, **kwargs) + + f.__name__ = name + f.__doc__ = getattr(delegate, accessor_mapping(name)).__doc__ + + return f + + for name in accessors: + if ( + not raise_on_missing + and getattr(delegate, accessor_mapping(name), None) is None + ): + continue + + if typ == "property": + f = _create_delegator_property(name) + else: + f = _create_delegator_method(name) + + # don't overwrite existing methods/properties + if overwrite or not hasattr(cls, name): + setattr(cls, name, f) + + +def delegate_names( + delegate, + accessors: list[str], + typ: str, + overwrite: bool = False, + accessor_mapping: Callable[[str], str] = lambda x: x, + raise_on_missing: bool = True, +): + """ + Add delegated names to a class using a class decorator. This provides + an alternative usage to directly calling `_add_delegate_accessors` + below a class definition. + + Parameters + ---------- + delegate : object + The class to get methods/properties & doc-strings. + accessors : Sequence[str] + List of accessor to add. + typ : {'property', 'method'} + overwrite : bool, default False + Overwrite the method/property in the target class if it exists. + accessor_mapping: Callable, default lambda x: x + Callable to map the delegate's function to the cls' function. + raise_on_missing: bool, default True + Raise if an accessor does not exist on delegate. + False skips the missing accessor. + + Returns + ------- + callable + A class decorator. + + Examples + -------- + @delegate_names(Categorical, ["categories", "ordered"], "property") + class CategoricalAccessor(PandasDelegate): + [...] + """ + + def add_delegate_accessors(cls): + cls._add_delegate_accessors( + delegate, + accessors, + typ, + overwrite=overwrite, + accessor_mapping=accessor_mapping, + raise_on_missing=raise_on_missing, + ) + return cls + + return add_delegate_accessors + + +# Ported with modifications from xarray +# https://github.com/pydata/xarray/blob/master/xarray/core/extensions.py +# 1. We don't need to catch and re-raise AttributeErrors as RuntimeErrors +# 2. We use a UserWarning instead of a custom Warning + + +class CachedAccessor: + """ + Custom property-like object. + + A descriptor for caching accessors. + + Parameters + ---------- + name : str + Namespace that will be accessed under, e.g. ``df.foo``. + accessor : cls + Class with the extension methods. + + Notes + ----- + For accessor, The class's __init__ method assumes that one of + ``Series``, ``DataFrame`` or ``Index`` as the + single argument ``data``. + """ + + def __init__(self, name: str, accessor) -> None: + self._name = name + self._accessor = accessor + + def __get__(self, obj, cls): + if obj is None: + # we're accessing the attribute of the class, i.e., Dataset.geo + return self._accessor + accessor_obj = self._accessor(obj) + # Replace the property with the accessor object. Inspired by: + # https://www.pydanny.com/cached-property.html + # We need to use object.__setattr__ because we overwrite __setattr__ on + # NDFrame + object.__setattr__(obj, self._name, accessor_obj) + return accessor_obj + + +@doc(klass="", others="") +def _register_accessor(name: str, cls): + """ + Register a custom accessor on {klass} objects. + + Parameters + ---------- + name : str + Name under which the accessor should be registered. A warning is issued + if this name conflicts with a preexisting attribute. + + Returns + ------- + callable + A class decorator. + + See Also + -------- + register_dataframe_accessor : Register a custom accessor on DataFrame objects. + register_series_accessor : Register a custom accessor on Series objects. + register_index_accessor : Register a custom accessor on Index objects. + + Notes + ----- + When accessed, your accessor will be initialized with the pandas object + the user is interacting with. So the signature must be + + .. code-block:: python + + def __init__(self, pandas_object): # noqa: E999 + ... + + For consistency with pandas methods, you should raise an ``AttributeError`` + if the data passed to your accessor has an incorrect dtype. + + >>> pd.Series(['a', 'b']).dt + Traceback (most recent call last): + ... + AttributeError: Can only use .dt accessor with datetimelike values + + Examples + -------- + In your library code:: + + import pandas as pd + + @pd.api.extensions.register_dataframe_accessor("geo") + class GeoAccessor: + def __init__(self, pandas_obj): + self._obj = pandas_obj + + @property + def center(self): + # return the geographic center point of this DataFrame + lat = self._obj.latitude + lon = self._obj.longitude + return (float(lon.mean()), float(lat.mean())) + + def plot(self): + # plot this array's data on a map, e.g., using Cartopy + pass + + Back in an interactive IPython session: + + .. code-block:: ipython + + In [1]: ds = pd.DataFrame({{"longitude": np.linspace(0, 10), + ...: "latitude": np.linspace(0, 20)}}) + In [2]: ds.geo.center + Out[2]: (5.0, 10.0) + In [3]: ds.geo.plot() # plots data on a map + """ + + def decorator(accessor): + if hasattr(cls, name): + warnings.warn( + f"registration of accessor {repr(accessor)} under name " + f"{repr(name)} for type {repr(cls)} is overriding a preexisting " + f"attribute with the same name.", + UserWarning, + stacklevel=find_stack_level(), + ) + setattr(cls, name, CachedAccessor(name, accessor)) + cls._accessors.add(name) + return accessor + + return decorator + + +@doc(_register_accessor, klass="DataFrame") +def register_dataframe_accessor(name: str): + from pandas import DataFrame + + return _register_accessor(name, DataFrame) + + +@doc(_register_accessor, klass="Series") +def register_series_accessor(name: str): + from pandas import Series + + return _register_accessor(name, Series) + + +@doc(_register_accessor, klass="Index") +def register_index_accessor(name: str): + from pandas import Index + + return _register_accessor(name, Index) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/algorithms.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/algorithms.py new file mode 100644 index 00000000..5f9fced2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/algorithms.py @@ -0,0 +1,1818 @@ +""" +Generic data algorithms. This module is experimental at the moment and not +intended for public consumption +""" +from __future__ import annotations + +import operator +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + algos, + hashtable as htable, + iNaT, + lib, +) +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + AxisInt, + DtypeObj, + TakeIndexer, + npt, +) +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + construct_1d_object_array_from_listlike, + np_find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_float64, + ensure_object, + ensure_platform_int, + is_array_like, + is_bool_dtype, + is_complex_dtype, + is_dict_like, + is_extension_array_dtype, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_signed_integer_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + BaseMaskedDtype, + CategoricalDtype, + ExtensionDtype, + NumpyEADtype, +) +from pandas.core.dtypes.generic import ( + ABCDatetimeArray, + ABCExtensionArray, + ABCIndex, + ABCMultiIndex, + ABCSeries, + ABCTimedeltaArray, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, +) + +from pandas.core.array_algos.take import take_nd +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import validate_indices + +if TYPE_CHECKING: + from pandas._typing import ( + ListLike, + NumpySorter, + NumpyValueArrayLike, + ) + + from pandas import ( + Categorical, + Index, + Series, + ) + from pandas.core.arrays import ( + BaseMaskedArray, + ExtensionArray, + ) + + +# --------------- # +# dtype access # +# --------------- # +def _ensure_data(values: ArrayLike) -> np.ndarray: + """ + routine to ensure that our data is of the correct + input dtype for lower-level routines + + This will coerce: + - ints -> int64 + - uint -> uint64 + - bool -> uint8 + - datetimelike -> i8 + - datetime64tz -> i8 (in local tz) + - categorical -> codes + + Parameters + ---------- + values : np.ndarray or ExtensionArray + + Returns + ------- + np.ndarray + """ + + if not isinstance(values, ABCMultiIndex): + # extract_array would raise + values = extract_array(values, extract_numpy=True) + + if is_object_dtype(values.dtype): + return ensure_object(np.asarray(values)) + + elif isinstance(values.dtype, BaseMaskedDtype): + # i.e. BooleanArray, FloatingArray, IntegerArray + values = cast("BaseMaskedArray", values) + if not values._hasna: + # No pd.NAs -> We can avoid an object-dtype cast (and copy) GH#41816 + # recurse to avoid re-implementing logic for eg bool->uint8 + return _ensure_data(values._data) + return np.asarray(values) + + elif isinstance(values.dtype, CategoricalDtype): + # NB: cases that go through here should NOT be using _reconstruct_data + # on the back-end. + values = cast("Categorical", values) + return values.codes + + elif is_bool_dtype(values.dtype): + if isinstance(values, np.ndarray): + # i.e. actually dtype == np.dtype("bool") + return np.asarray(values).view("uint8") + else: + # e.g. Sparse[bool, False] # TODO: no test cases get here + return np.asarray(values).astype("uint8", copy=False) + + elif is_integer_dtype(values.dtype): + return np.asarray(values) + + elif is_float_dtype(values.dtype): + # Note: checking `values.dtype == "float128"` raises on Windows and 32bit + # error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]" + # has no attribute "itemsize" + if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr] + # we dont (yet) have float128 hashtable support + return ensure_float64(values) + return np.asarray(values) + + elif is_complex_dtype(values.dtype): + return cast(np.ndarray, values) + + # datetimelike + elif needs_i8_conversion(values.dtype): + npvalues = values.view("i8") + npvalues = cast(np.ndarray, npvalues) + return npvalues + + # we have failed, return object + values = np.asarray(values, dtype=object) + return ensure_object(values) + + +def _reconstruct_data( + values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike +) -> ArrayLike: + """ + reverse of _ensure_data + + Parameters + ---------- + values : np.ndarray or ExtensionArray + dtype : np.dtype or ExtensionDtype + original : AnyArrayLike + + Returns + ------- + ExtensionArray or np.ndarray + """ + if isinstance(values, ABCExtensionArray) and values.dtype == dtype: + # Catch DatetimeArray/TimedeltaArray + return values + + if not isinstance(dtype, np.dtype): + # i.e. ExtensionDtype; note we have ruled out above the possibility + # that values.dtype == dtype + cls = dtype.construct_array_type() + + values = cls._from_sequence(values, dtype=dtype) + + else: + values = values.astype(dtype, copy=False) + + return values + + +def _ensure_arraylike(values, func_name: str) -> ArrayLike: + """ + ensure that we are arraylike if not already + """ + if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)): + # GH#52986 + if func_name != "isin-targets": + # Make an exception for the comps argument in isin. + warnings.warn( + f"{func_name} with argument that is not not a Series, Index, " + "ExtensionArray, or np.ndarray is deprecated and will raise in a " + "future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + inferred = lib.infer_dtype(values, skipna=False) + if inferred in ["mixed", "string", "mixed-integer"]: + # "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160 + if isinstance(values, tuple): + values = list(values) + values = construct_1d_object_array_from_listlike(values) + else: + values = np.asarray(values) + return values + + +_hashtables = { + "complex128": htable.Complex128HashTable, + "complex64": htable.Complex64HashTable, + "float64": htable.Float64HashTable, + "float32": htable.Float32HashTable, + "uint64": htable.UInt64HashTable, + "uint32": htable.UInt32HashTable, + "uint16": htable.UInt16HashTable, + "uint8": htable.UInt8HashTable, + "int64": htable.Int64HashTable, + "int32": htable.Int32HashTable, + "int16": htable.Int16HashTable, + "int8": htable.Int8HashTable, + "string": htable.StringHashTable, + "object": htable.PyObjectHashTable, +} + + +def _get_hashtable_algo(values: np.ndarray): + """ + Parameters + ---------- + values : np.ndarray + + Returns + ------- + htable : HashTable subclass + values : ndarray + """ + values = _ensure_data(values) + + ndtype = _check_object_for_strings(values) + hashtable = _hashtables[ndtype] + return hashtable, values + + +def _check_object_for_strings(values: np.ndarray) -> str: + """ + Check if we can use string hashtable instead of object hashtable. + + Parameters + ---------- + values : ndarray + + Returns + ------- + str + """ + ndtype = values.dtype.name + if ndtype == "object": + # it's cheaper to use a String Hash Table than Object; we infer + # including nulls because that is the only difference between + # StringHashTable and ObjectHashtable + if lib.is_string_array(values, skipna=False): + ndtype = "string" + return ndtype + + +# --------------- # +# top-level algos # +# --------------- # + + +def unique(values): + """ + Return unique values based on a hash table. + + Uniques are returned in order of appearance. This does NOT sort. + + Significantly faster than numpy.unique for long enough sequences. + Includes NA values. + + Parameters + ---------- + values : 1d array-like + + Returns + ------- + numpy.ndarray or ExtensionArray + + The return can be: + + * Index : when the input is an Index + * Categorical : when the input is a Categorical dtype + * ndarray : when the input is a Series/ndarray + + Return numpy.ndarray or ExtensionArray. + + See Also + -------- + Index.unique : Return unique values from an Index. + Series.unique : Return unique values of Series object. + + Examples + -------- + >>> pd.unique(pd.Series([2, 1, 3, 3])) + array([2, 1, 3]) + + >>> pd.unique(pd.Series([2] + [1] * 5)) + array([2, 1]) + + >>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")])) + array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') + + >>> pd.unique( + ... pd.Series( + ... [ + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... ] + ... ) + ... ) + + ['2016-01-01 00:00:00-05:00'] + Length: 1, dtype: datetime64[ns, US/Eastern] + + >>> pd.unique( + ... pd.Index( + ... [ + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... pd.Timestamp("20160101", tz="US/Eastern"), + ... ] + ... ) + ... ) + DatetimeIndex(['2016-01-01 00:00:00-05:00'], + dtype='datetime64[ns, US/Eastern]', + freq=None) + + >>> pd.unique(np.array(list("baabc"), dtype="O")) + array(['b', 'a', 'c'], dtype=object) + + An unordered Categorical will return categories in the + order of appearance. + + >>> pd.unique(pd.Series(pd.Categorical(list("baabc")))) + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + >>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc")))) + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + An ordered Categorical preserves the category ordering. + + >>> pd.unique( + ... pd.Series( + ... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True) + ... ) + ... ) + ['b', 'a', 'c'] + Categories (3, object): ['a' < 'b' < 'c'] + + An array of tuples + + >>> pd.unique(pd.Series([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]).values) + array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) + """ + return unique_with_mask(values) + + +def nunique_ints(values: ArrayLike) -> int: + """ + Return the number of unique values for integer array-likes. + + Significantly faster than pandas.unique for long enough sequences. + No checks are done to ensure input is integral. + + Parameters + ---------- + values : 1d array-like + + Returns + ------- + int : The number of unique values in ``values`` + """ + if len(values) == 0: + return 0 + values = _ensure_data(values) + # bincount requires intp + result = (np.bincount(values.ravel().astype("intp")) != 0).sum() + return result + + +def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None): + """See algorithms.unique for docs. Takes a mask for masked arrays.""" + values = _ensure_arraylike(values, func_name="unique") + + if isinstance(values.dtype, ExtensionDtype): + # Dispatch to extension dtype's unique. + return values.unique() + + original = values + hashtable, values = _get_hashtable_algo(values) + + table = hashtable(len(values)) + if mask is None: + uniques = table.unique(values) + uniques = _reconstruct_data(uniques, original.dtype, original) + return uniques + + else: + uniques, mask = table.unique(values, mask=mask) + uniques = _reconstruct_data(uniques, original.dtype, original) + assert mask is not None # for mypy + return uniques, mask.astype("bool") + + +unique1d = unique + + +_MINIMUM_COMP_ARR_LEN = 1_000_000 + + +def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]: + """ + Compute the isin boolean array. + + Parameters + ---------- + comps : list-like + values : list-like + + Returns + ------- + ndarray[bool] + Same length as `comps`. + """ + if not is_list_like(comps): + raise TypeError( + "only list-like objects are allowed to be passed " + f"to isin(), you passed a `{type(comps).__name__}`" + ) + if not is_list_like(values): + raise TypeError( + "only list-like objects are allowed to be passed " + f"to isin(), you passed a `{type(values).__name__}`" + ) + + if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)): + orig_values = list(values) + values = _ensure_arraylike(orig_values, func_name="isin-targets") + + if ( + len(values) > 0 + and values.dtype.kind in "iufcb" + and not is_signed_integer_dtype(comps) + ): + # GH#46485 Use object to avoid upcast to float64 later + # TODO: Share with _find_common_type_compat + values = construct_1d_object_array_from_listlike(orig_values) + + elif isinstance(values, ABCMultiIndex): + # Avoid raising in extract_array + values = np.array(values) + else: + values = extract_array(values, extract_numpy=True, extract_range=True) + + comps_array = _ensure_arraylike(comps, func_name="isin") + comps_array = extract_array(comps_array, extract_numpy=True) + if not isinstance(comps_array, np.ndarray): + # i.e. Extension Array + return comps_array.isin(values) + + elif needs_i8_conversion(comps_array.dtype): + # Dispatch to DatetimeLikeArrayMixin.isin + return pd_array(comps_array).isin(values) + elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps_array.dtype): + # e.g. comps_array are integers and values are datetime64s + return np.zeros(comps_array.shape, dtype=bool) + # TODO: not quite right ... Sparse/Categorical + elif needs_i8_conversion(values.dtype): + return isin(comps_array, values.astype(object)) + + elif isinstance(values.dtype, ExtensionDtype): + return isin(np.asarray(comps_array), np.asarray(values)) + + # GH16012 + # Ensure np.isin doesn't get object types or it *may* throw an exception + # Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array), + # isin is faster for small sizes + if ( + len(comps_array) > _MINIMUM_COMP_ARR_LEN + and len(values) <= 26 + and comps_array.dtype != object + ): + # If the values include nan we need to check for nan explicitly + # since np.nan it not equal to np.nan + if isna(values).any(): + + def f(c, v): + return np.logical_or(np.isin(c, v).ravel(), np.isnan(c)) + + else: + f = lambda a, b: np.isin(a, b).ravel() + + else: + common = np_find_common_type(values.dtype, comps_array.dtype) + values = values.astype(common, copy=False) + comps_array = comps_array.astype(common, copy=False) + f = htable.ismember + + return f(comps_array, values) + + +def factorize_array( + values: np.ndarray, + use_na_sentinel: bool = True, + size_hint: int | None = None, + na_value: object = None, + mask: npt.NDArray[np.bool_] | None = None, +) -> tuple[npt.NDArray[np.intp], np.ndarray]: + """ + Factorize a numpy array to codes and uniques. + + This doesn't do any coercion of types or unboxing before factorization. + + Parameters + ---------- + values : ndarray + use_na_sentinel : bool, default True + If True, the sentinel -1 will be used for NaN values. If False, + NaN values will be encoded as non-negative integers and will not drop the + NaN from the uniques of the values. + size_hint : int, optional + Passed through to the hashtable's 'get_labels' method + na_value : object, optional + A value in `values` to consider missing. Note: only use this + parameter when you know that you don't have any values pandas would + consider missing in the array (NaN for float data, iNaT for + datetimes, etc.). + mask : ndarray[bool], optional + If not None, the mask is used as indicator for missing values + (True = missing, False = valid) instead of `na_value` or + condition "val != val". + + Returns + ------- + codes : ndarray[np.intp] + uniques : ndarray + """ + original = values + if values.dtype.kind in "mM": + # _get_hashtable_algo will cast dt64/td64 to i8 via _ensure_data, so we + # need to do the same to na_value. We are assuming here that the passed + # na_value is an appropriately-typed NaT. + # e.g. test_where_datetimelike_categorical + na_value = iNaT + + hash_klass, values = _get_hashtable_algo(values) + + table = hash_klass(size_hint or len(values)) + uniques, codes = table.factorize( + values, + na_sentinel=-1, + na_value=na_value, + mask=mask, + ignore_na=use_na_sentinel, + ) + + # re-cast e.g. i8->dt64/td64, uint8->bool + uniques = _reconstruct_data(uniques, original.dtype, original) + + codes = ensure_platform_int(codes) + return codes, uniques + + +@doc( + values=dedent( + """\ + values : sequence + A 1-D sequence. Sequences that aren't pandas objects are + coerced to ndarrays before factorization. + """ + ), + sort=dedent( + """\ + sort : bool, default False + Sort `uniques` and shuffle `codes` to maintain the + relationship. + """ + ), + size_hint=dedent( + """\ + size_hint : int, optional + Hint to the hashtable sizer. + """ + ), +) +def factorize( + values, + sort: bool = False, + use_na_sentinel: bool = True, + size_hint: int | None = None, +) -> tuple[np.ndarray, np.ndarray | Index]: + """ + Encode the object as an enumerated type or categorical variable. + + This method is useful for obtaining a numeric representation of an + array when all that matters is identifying distinct values. `factorize` + is available as both a top-level function :func:`pandas.factorize`, + and as a method :meth:`Series.factorize` and :meth:`Index.factorize`. + + Parameters + ---------- + {values}{sort} + use_na_sentinel : bool, default True + If True, the sentinel -1 will be used for NaN values. If False, + NaN values will be encoded as non-negative integers and will not drop the + NaN from the uniques of the values. + + .. versionadded:: 1.5.0 + {size_hint}\ + + Returns + ------- + codes : ndarray + An integer ndarray that's an indexer into `uniques`. + ``uniques.take(codes)`` will have the same values as `values`. + uniques : ndarray, Index, or Categorical + The unique valid values. When `values` is Categorical, `uniques` + is a Categorical. When `values` is some other pandas object, an + `Index` is returned. Otherwise, a 1-D ndarray is returned. + + .. note:: + + Even if there's a missing value in `values`, `uniques` will + *not* contain an entry for it. + + See Also + -------- + cut : Discretize continuous-valued array. + unique : Find the unique value in an array. + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + These examples all show factorize as a top-level method like + ``pd.factorize(values)``. The results are identical for methods like + :meth:`Series.factorize`. + + >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O")) + >>> codes + array([0, 0, 1, 2, 0]) + >>> uniques + array(['b', 'a', 'c'], dtype=object) + + With ``sort=True``, the `uniques` will be sorted, and `codes` will be + shuffled so that the relationship is the maintained. + + >>> codes, uniques = pd.factorize(np.array(['b', 'b', 'a', 'c', 'b'], dtype="O"), + ... sort=True) + >>> codes + array([1, 1, 0, 2, 1]) + >>> uniques + array(['a', 'b', 'c'], dtype=object) + + When ``use_na_sentinel=True`` (the default), missing values are indicated in + the `codes` with the sentinel value ``-1`` and missing values are not + included in `uniques`. + + >>> codes, uniques = pd.factorize(np.array(['b', None, 'a', 'c', 'b'], dtype="O")) + >>> codes + array([ 0, -1, 1, 2, 0]) + >>> uniques + array(['b', 'a', 'c'], dtype=object) + + Thus far, we've only factorized lists (which are internally coerced to + NumPy arrays). When factorizing pandas objects, the type of `uniques` + will differ. For Categoricals, a `Categorical` is returned. + + >>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c']) + >>> codes, uniques = pd.factorize(cat) + >>> codes + array([0, 0, 1]) + >>> uniques + ['a', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + Notice that ``'b'`` is in ``uniques.categories``, despite not being + present in ``cat.values``. + + For all other pandas objects, an Index of the appropriate type is + returned. + + >>> cat = pd.Series(['a', 'a', 'c']) + >>> codes, uniques = pd.factorize(cat) + >>> codes + array([0, 0, 1]) + >>> uniques + Index(['a', 'c'], dtype='object') + + If NaN is in the values, and we want to include NaN in the uniques of the + values, it can be achieved by setting ``use_na_sentinel=False``. + + >>> values = np.array([1, 2, 1, np.nan]) + >>> codes, uniques = pd.factorize(values) # default: use_na_sentinel=True + >>> codes + array([ 0, 1, 0, -1]) + >>> uniques + array([1., 2.]) + + >>> codes, uniques = pd.factorize(values, use_na_sentinel=False) + >>> codes + array([0, 1, 0, 2]) + >>> uniques + array([ 1., 2., nan]) + """ + # Implementation notes: This method is responsible for 3 things + # 1.) coercing data to array-like (ndarray, Index, extension array) + # 2.) factorizing codes and uniques + # 3.) Maybe boxing the uniques in an Index + # + # Step 2 is dispatched to extension types (like Categorical). They are + # responsible only for factorization. All data coercion, sorting and boxing + # should happen here. + if isinstance(values, (ABCIndex, ABCSeries)): + return values.factorize(sort=sort, use_na_sentinel=use_na_sentinel) + + values = _ensure_arraylike(values, func_name="factorize") + original = values + + if ( + isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray)) + and values.freq is not None + ): + # The presence of 'freq' means we can fast-path sorting and know there + # aren't NAs + codes, uniques = values.factorize(sort=sort) + return codes, uniques + + elif not isinstance(values, np.ndarray): + # i.e. ExtensionArray + codes, uniques = values.factorize(use_na_sentinel=use_na_sentinel) + + else: + values = np.asarray(values) # convert DTA/TDA/MultiIndex + + if not use_na_sentinel and values.dtype == object: + # factorize can now handle differentiating various types of null values. + # These can only occur when the array has object dtype. + # However, for backwards compatibility we only use the null for the + # provided dtype. This may be revisited in the future, see GH#48476. + null_mask = isna(values) + if null_mask.any(): + na_value = na_value_for_dtype(values.dtype, compat=False) + # Don't modify (potentially user-provided) array + values = np.where(null_mask, na_value, values) + + codes, uniques = factorize_array( + values, + use_na_sentinel=use_na_sentinel, + size_hint=size_hint, + ) + + if sort and len(uniques) > 0: + uniques, codes = safe_sort( + uniques, + codes, + use_na_sentinel=use_na_sentinel, + assume_unique=True, + verify=False, + ) + + uniques = _reconstruct_data(uniques, original.dtype, original) + + return codes, uniques + + +def value_counts( + values, + sort: bool = True, + ascending: bool = False, + normalize: bool = False, + bins=None, + dropna: bool = True, +) -> Series: + """ + Compute a histogram of the counts of non-null values. + + Parameters + ---------- + values : ndarray (1-d) + sort : bool, default True + Sort by values + ascending : bool, default False + Sort in ascending order + normalize: bool, default False + If True then compute a relative histogram + bins : integer, optional + Rather than count values, group them into half-open bins, + convenience for pd.cut, only works with numeric data + dropna : bool, default True + Don't include counts of NaN + + Returns + ------- + Series + """ + warnings.warn( + # GH#53493 + "pandas.value_counts is deprecated and will be removed in a " + "future version. Use pd.Series(obj).value_counts() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return value_counts_internal( + values, + sort=sort, + ascending=ascending, + normalize=normalize, + bins=bins, + dropna=dropna, + ) + + +def value_counts_internal( + values, + sort: bool = True, + ascending: bool = False, + normalize: bool = False, + bins=None, + dropna: bool = True, +) -> Series: + from pandas import ( + Index, + Series, + ) + + index_name = getattr(values, "name", None) + name = "proportion" if normalize else "count" + + if bins is not None: + from pandas.core.reshape.tile import cut + + if isinstance(values, Series): + values = values._values + + try: + ii = cut(values, bins, include_lowest=True) + except TypeError as err: + raise TypeError("bins argument only works with numeric data.") from err + + # count, remove nulls (from the index), and but the bins + result = ii.value_counts(dropna=dropna) + result.name = name + result = result[result.index.notna()] + result.index = result.index.astype("interval") + result = result.sort_index() + + # if we are dropna and we have NO values + if dropna and (result._values == 0).all(): + result = result.iloc[0:0] + + # normalizing is by len of all (regardless of dropna) + counts = np.array([len(ii)]) + + else: + if is_extension_array_dtype(values): + # handle Categorical and sparse, + result = Series(values, copy=False)._values.value_counts(dropna=dropna) + result.name = name + result.index.name = index_name + counts = result._values + if not isinstance(counts, np.ndarray): + # e.g. ArrowExtensionArray + counts = np.asarray(counts) + + elif isinstance(values, ABCMultiIndex): + # GH49558 + levels = list(range(values.nlevels)) + result = ( + Series(index=values, name=name) + .groupby(level=levels, dropna=dropna) + .size() + ) + result.index.names = values.names + counts = result._values + + else: + values = _ensure_arraylike(values, func_name="value_counts") + keys, counts = value_counts_arraylike(values, dropna) + if keys.dtype == np.float16: + keys = keys.astype(np.float32) + + # For backwards compatibility, we let Index do its normal type + # inference, _except_ for if if infers from object to bool. + idx = Index(keys) + if idx.dtype == bool and keys.dtype == object: + idx = idx.astype(object) + idx.name = index_name + + result = Series(counts, index=idx, name=name, copy=False) + + if sort: + result = result.sort_values(ascending=ascending) + + if normalize: + result = result / counts.sum() + + return result + + +# Called once from SparseArray, otherwise could be private +def value_counts_arraylike( + values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = None +) -> tuple[ArrayLike, npt.NDArray[np.int64]]: + """ + Parameters + ---------- + values : np.ndarray + dropna : bool + mask : np.ndarray[bool] or None, default None + + Returns + ------- + uniques : np.ndarray + counts : np.ndarray[np.int64] + """ + original = values + values = _ensure_data(values) + + keys, counts = htable.value_count(values, dropna, mask=mask) + + if needs_i8_conversion(original.dtype): + # datetime, timedelta, or period + + if dropna: + mask = keys != iNaT + keys, counts = keys[mask], counts[mask] + + res_keys = _reconstruct_data(keys, original.dtype, original) + return res_keys, counts + + +def duplicated( + values: ArrayLike, keep: Literal["first", "last", False] = "first" +) -> npt.NDArray[np.bool_]: + """ + Return boolean ndarray denoting duplicate values. + + Parameters + ---------- + values : nd.array, ExtensionArray or Series + Array over which to check for duplicate values. + keep : {'first', 'last', False}, default 'first' + - ``first`` : Mark duplicates as ``True`` except for the first + occurrence. + - ``last`` : Mark duplicates as ``True`` except for the last + occurrence. + - False : Mark all duplicates as ``True``. + + Returns + ------- + duplicated : ndarray[bool] + """ + if hasattr(values, "dtype"): + if isinstance(values.dtype, ArrowDtype) and values.dtype.kind in "ifub": + values = values._to_masked() # type: ignore[union-attr] + + if isinstance(values.dtype, BaseMaskedDtype): + values = cast("BaseMaskedArray", values) + return htable.duplicated(values._data, keep=keep, mask=values._mask) + + values = _ensure_data(values) + return htable.duplicated(values, keep=keep) + + +def mode( + values: ArrayLike, dropna: bool = True, mask: npt.NDArray[np.bool_] | None = None +) -> ArrayLike: + """ + Returns the mode(s) of an array. + + Parameters + ---------- + values : array-like + Array over which to check for duplicate values. + dropna : bool, default True + Don't consider counts of NaN/NaT. + + Returns + ------- + np.ndarray or ExtensionArray + """ + values = _ensure_arraylike(values, func_name="mode") + original = values + + if needs_i8_conversion(values.dtype): + # Got here with ndarray; dispatch to DatetimeArray/TimedeltaArray. + values = ensure_wrapped_if_datetimelike(values) + values = cast("ExtensionArray", values) + return values._mode(dropna=dropna) + + values = _ensure_data(values) + + npresult = htable.mode(values, dropna=dropna, mask=mask) + try: + npresult = np.sort(npresult) + except TypeError as err: + warnings.warn( + f"Unable to sort modes: {err}", + stacklevel=find_stack_level(), + ) + + result = _reconstruct_data(npresult, original.dtype, original) + return result + + +def rank( + values: ArrayLike, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, +) -> npt.NDArray[np.float64]: + """ + Rank the values along a given axis. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + Array whose values will be ranked. The number of dimensions in this + array must not exceed 2. + axis : int, default 0 + Axis over which to perform rankings. + method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' + The method by which tiebreaks are broken during the ranking. + na_option : {'keep', 'top'}, default 'keep' + The method by which NaNs are placed in the ranking. + - ``keep``: rank each NaN value with a NaN ranking + - ``top``: replace each NaN with either +/- inf so that they + there are ranked at the top + ascending : bool, default True + Whether or not the elements should be ranked in ascending order. + pct : bool, default False + Whether or not to the display the returned rankings in integer form + (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). + """ + is_datetimelike = needs_i8_conversion(values.dtype) + values = _ensure_data(values) + + if values.ndim == 1: + ranks = algos.rank_1d( + values, + is_datetimelike=is_datetimelike, + ties_method=method, + ascending=ascending, + na_option=na_option, + pct=pct, + ) + elif values.ndim == 2: + ranks = algos.rank_2d( + values, + axis=axis, + is_datetimelike=is_datetimelike, + ties_method=method, + ascending=ascending, + na_option=na_option, + pct=pct, + ) + else: + raise TypeError("Array with ndim > 2 are not supported.") + + return ranks + + +def checked_add_with_arr( + arr: npt.NDArray[np.int64], + b: int | npt.NDArray[np.int64], + arr_mask: npt.NDArray[np.bool_] | None = None, + b_mask: npt.NDArray[np.bool_] | None = None, +) -> npt.NDArray[np.int64]: + """ + Perform array addition that checks for underflow and overflow. + + Performs the addition of an int64 array and an int64 integer (or array) + but checks that they do not result in overflow first. For elements that + are indicated to be NaN, whether or not there is overflow for that element + is automatically ignored. + + Parameters + ---------- + arr : np.ndarray[int64] addend. + b : array or scalar addend. + arr_mask : np.ndarray[bool] or None, default None + array indicating which elements to exclude from checking + b_mask : np.ndarray[bool] or None, default None + array or scalar indicating which element(s) to exclude from checking + + Returns + ------- + sum : An array for elements x + b for each element x in arr if b is + a scalar or an array for elements x + y for each element pair + (x, y) in (arr, b). + + Raises + ------ + OverflowError if any x + y exceeds the maximum or minimum int64 value. + """ + # For performance reasons, we broadcast 'b' to the new array 'b2' + # so that it has the same size as 'arr'. + b2 = np.broadcast_to(b, arr.shape) + if b_mask is not None: + # We do the same broadcasting for b_mask as well. + b2_mask = np.broadcast_to(b_mask, arr.shape) + else: + b2_mask = None + + # For elements that are NaN, regardless of their value, we should + # ignore whether they overflow or not when doing the checked add. + if arr_mask is not None and b2_mask is not None: + not_nan = np.logical_not(arr_mask | b2_mask) + elif arr_mask is not None: + not_nan = np.logical_not(arr_mask) + elif b_mask is not None: + # error: Argument 1 to "__call__" of "_UFunc_Nin1_Nout1" has + # incompatible type "Optional[ndarray[Any, dtype[bool_]]]"; + # expected "Union[_SupportsArray[dtype[Any]], _NestedSequence + # [_SupportsArray[dtype[Any]]], bool, int, float, complex, str + # , bytes, _NestedSequence[Union[bool, int, float, complex, str + # , bytes]]]" + not_nan = np.logical_not(b2_mask) # type: ignore[arg-type] + else: + not_nan = np.empty(arr.shape, dtype=bool) + not_nan.fill(True) + + # gh-14324: For each element in 'arr' and its corresponding element + # in 'b2', we check the sign of the element in 'b2'. If it is positive, + # we then check whether its sum with the element in 'arr' exceeds + # np.iinfo(np.int64).max. If so, we have an overflow error. If it + # it is negative, we then check whether its sum with the element in + # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow + # error as well. + i8max = lib.i8max + i8min = iNaT + + mask1 = b2 > 0 + mask2 = b2 < 0 + + if not mask1.any(): + to_raise = ((i8min - b2 > arr) & not_nan).any() + elif not mask2.any(): + to_raise = ((i8max - b2 < arr) & not_nan).any() + else: + to_raise = ((i8max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ( + (i8min - b2[mask2] > arr[mask2]) & not_nan[mask2] + ).any() + + if to_raise: + raise OverflowError("Overflow in int64 addition") + + result = arr + b + if arr_mask is not None or b2_mask is not None: + np.putmask(result, ~not_nan, iNaT) + + return result + + +# ---- # +# take # +# ---- # + + +def take( + arr, + indices: TakeIndexer, + axis: AxisInt = 0, + allow_fill: bool = False, + fill_value=None, +): + """ + Take elements from an array. + + Parameters + ---------- + arr : array-like or scalar value + Non array-likes (sequences/scalars without a dtype) are coerced + to an ndarray. + + .. deprecated:: 2.1.0 + Passing an argument other than a numpy.ndarray, ExtensionArray, + Index, or Series is deprecated. + + indices : sequence of int or one-dimensional np.ndarray of int + Indices to be taken. + axis : int, default 0 + The axis over which to select values. + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + negative values raise a ``ValueError``. + + fill_value : any, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type (``self.dtype.na_value``) is used. + + For multi-dimensional `arr`, each *element* is filled with + `fill_value`. + + Returns + ------- + ndarray or ExtensionArray + Same type as the input. + + Raises + ------ + IndexError + When `indices` is out of bounds for the array. + ValueError + When the indexer contains negative values other than ``-1`` + and `allow_fill` is True. + + Notes + ----- + When `allow_fill` is False, `indices` may be whatever dimensionality + is accepted by NumPy for `arr`. + + When `allow_fill` is True, `indices` should be 1-D. + + See Also + -------- + numpy.take : Take elements from an array along an axis. + + Examples + -------- + >>> import pandas as pd + + With the default ``allow_fill=False``, negative numbers indicate + positional indices from the right. + + >>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1]) + array([10, 10, 30]) + + Setting ``allow_fill=True`` will place `fill_value` in those positions. + + >>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True) + array([10., 10., nan]) + + >>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True, + ... fill_value=-10) + array([ 10, 10, -10]) + """ + if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)): + # GH#52981 + warnings.warn( + "pd.api.extensions.take accepting non-standard inputs is deprecated " + "and will raise in a future version. Pass either a numpy.ndarray, " + "ExtensionArray, Index, or Series instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if not is_array_like(arr): + arr = np.asarray(arr) + + indices = ensure_platform_int(indices) + + if allow_fill: + # Pandas style, -1 means NA + validate_indices(indices, arr.shape[axis]) + result = take_nd( + arr, indices, axis=axis, allow_fill=True, fill_value=fill_value + ) + else: + # NumPy style + result = arr.take(indices, axis=axis) + return result + + +# ------------ # +# searchsorted # +# ------------ # + + +def searchsorted( + arr: ArrayLike, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, +) -> npt.NDArray[np.intp] | np.intp: + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `arr` (a) such that, if the + corresponding elements in `value` were inserted before the indices, + the order of `arr` would be preserved. + + Assuming that `arr` is sorted: + + ====== ================================ + `side` returned index `i` satisfies + ====== ================================ + left ``arr[i-1] < value <= self[i]`` + right ``arr[i-1] <= value < self[i]`` + ====== ================================ + + Parameters + ---------- + arr: np.ndarray, ExtensionArray, Series + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + value : array-like or scalar + Values to insert into `arr`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `self`). + sorter : 1-D array-like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + array of ints or int + If value is array-like, array of insertion points. + If value is scalar, a single integer. + + See Also + -------- + numpy.searchsorted : Similar method from NumPy. + """ + if sorter is not None: + sorter = ensure_platform_int(sorter) + + if ( + isinstance(arr, np.ndarray) + and arr.dtype.kind in "iu" + and (is_integer(value) or is_integer_dtype(value)) + ): + # if `arr` and `value` have different dtypes, `arr` would be + # recast by numpy, causing a slow search. + # Before searching below, we therefore try to give `value` the + # same dtype as `arr`, while guarding against integer overflows. + iinfo = np.iinfo(arr.dtype.type) + value_arr = np.array([value]) if is_integer(value) else np.array(value) + if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all(): + # value within bounds, so no overflow, so can convert value dtype + # to dtype of arr + dtype = arr.dtype + else: + dtype = value_arr.dtype + + if is_integer(value): + # We know that value is int + value = cast(int, dtype.type(value)) + else: + value = pd_array(cast(ArrayLike, value), dtype=dtype) + else: + # E.g. if `arr` is an array with dtype='datetime64[ns]' + # and `value` is a pd.Timestamp, we may need to convert value + arr = ensure_wrapped_if_datetimelike(arr) + + # Argument 1 to "searchsorted" of "ndarray" has incompatible type + # "Union[NumpyValueArrayLike, ExtensionArray]"; expected "NumpyValueArrayLike" + return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type] + + +# ---- # +# diff # +# ---- # + +_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"} + + +def diff(arr, n: int, axis: AxisInt = 0): + """ + difference of n between self, + analogous to s-s.shift(n) + + Parameters + ---------- + arr : ndarray or ExtensionArray + n : int + number of periods + axis : {0, 1} + axis to shift on + stacklevel : int, default 3 + The stacklevel for the lost dtype warning. + + Returns + ------- + shifted + """ + + n = int(n) + na = np.nan + dtype = arr.dtype + + is_bool = is_bool_dtype(dtype) + if is_bool: + op = operator.xor + else: + op = operator.sub + + if isinstance(dtype, NumpyEADtype): + # NumpyExtensionArray cannot necessarily hold shifted versions of itself. + arr = arr.to_numpy() + dtype = arr.dtype + + if not isinstance(arr, np.ndarray): + # i.e ExtensionArray + if hasattr(arr, f"__{op.__name__}__"): + if axis != 0: + raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}") + return op(arr, arr.shift(n)) + else: + raise TypeError( + f"{type(arr).__name__} has no 'diff' method. " + "Convert to a suitable dtype prior to calling 'diff'." + ) + + is_timedelta = False + if arr.dtype.kind in "mM": + dtype = np.int64 + arr = arr.view("i8") + na = iNaT + is_timedelta = True + + elif is_bool: + # We have to cast in order to be able to hold np.nan + dtype = np.object_ + + elif dtype.kind in "iu": + # We have to cast in order to be able to hold np.nan + + # int8, int16 are incompatible with float64, + # see https://github.com/cython/cython/issues/2646 + if arr.dtype.name in ["int8", "int16"]: + dtype = np.float32 + else: + dtype = np.float64 + + orig_ndim = arr.ndim + if orig_ndim == 1: + # reshape so we can always use algos.diff_2d + arr = arr.reshape(-1, 1) + # TODO: require axis == 0 + + dtype = np.dtype(dtype) + out_arr = np.empty(arr.shape, dtype=dtype) + + na_indexer = [slice(None)] * 2 + na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None) + out_arr[tuple(na_indexer)] = na + + if arr.dtype.name in _diff_special: + # TODO: can diff_2d dtype specialization troubles be fixed by defining + # out_arr inside diff_2d? + algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta) + else: + # To keep mypy happy, _res_indexer is a list while res_indexer is + # a tuple, ditto for lag_indexer. + _res_indexer = [slice(None)] * 2 + _res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n) + res_indexer = tuple(_res_indexer) + + _lag_indexer = [slice(None)] * 2 + _lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None) + lag_indexer = tuple(_lag_indexer) + + out_arr[res_indexer] = op(arr[res_indexer], arr[lag_indexer]) + + if is_timedelta: + out_arr = out_arr.view("timedelta64[ns]") + + if orig_ndim == 1: + out_arr = out_arr[:, 0] + return out_arr + + +# -------------------------------------------------------------------- +# Helper functions + + +# Note: safe_sort is in algorithms.py instead of sorting.py because it is +# low-dependency, is used in this module, and used private methods from +# this module. +def safe_sort( + values: Index | ArrayLike, + codes: npt.NDArray[np.intp] | None = None, + use_na_sentinel: bool = True, + assume_unique: bool = False, + verify: bool = True, +) -> AnyArrayLike | tuple[AnyArrayLike, np.ndarray]: + """ + Sort ``values`` and reorder corresponding ``codes``. + + ``values`` should be unique if ``codes`` is not None. + Safe for use with mixed types (int, str), orders ints before strs. + + Parameters + ---------- + values : list-like + Sequence; must be unique if ``codes`` is not None. + codes : np.ndarray[intp] or None, default None + Indices to ``values``. All out of bound indices are treated as + "not found" and will be masked with ``-1``. + use_na_sentinel : bool, default True + If True, the sentinel -1 will be used for NaN values. If False, + NaN values will be encoded as non-negative integers and will not drop the + NaN from the uniques of the values. + assume_unique : bool, default False + When True, ``values`` are assumed to be unique, which can speed up + the calculation. Ignored when ``codes`` is None. + verify : bool, default True + Check if codes are out of bound for the values and put out of bound + codes equal to ``-1``. If ``verify=False``, it is assumed there + are no out of bound codes. Ignored when ``codes`` is None. + + Returns + ------- + ordered : AnyArrayLike + Sorted ``values`` + new_codes : ndarray + Reordered ``codes``; returned when ``codes`` is not None. + + Raises + ------ + TypeError + * If ``values`` is not list-like or if ``codes`` is neither None + nor list-like + * If ``values`` cannot be sorted + ValueError + * If ``codes`` is not None and ``values`` contain duplicates. + """ + if not isinstance(values, (np.ndarray, ABCExtensionArray, ABCIndex)): + raise TypeError( + "Only np.ndarray, ExtensionArray, and Index objects are allowed to " + "be passed to safe_sort as values" + ) + + sorter = None + ordered: AnyArrayLike + + if ( + not isinstance(values.dtype, ExtensionDtype) + and lib.infer_dtype(values, skipna=False) == "mixed-integer" + ): + ordered = _sort_mixed(values) + else: + try: + sorter = values.argsort() + ordered = values.take(sorter) + except TypeError: + # Previous sorters failed or were not applicable, try `_sort_mixed` + # which would work, but which fails for special case of 1d arrays + # with tuples. + if values.size and isinstance(values[0], tuple): + # error: Argument 1 to "_sort_tuples" has incompatible type + # "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected + # "ndarray[Any, Any]" + ordered = _sort_tuples(values) # type: ignore[arg-type] + else: + ordered = _sort_mixed(values) + + # codes: + + if codes is None: + return ordered + + if not is_list_like(codes): + raise TypeError( + "Only list-like objects or None are allowed to " + "be passed to safe_sort as codes" + ) + codes = ensure_platform_int(np.asarray(codes)) + + if not assume_unique and not len(unique(values)) == len(values): + raise ValueError("values should be unique if codes is not None") + + if sorter is None: + # mixed types + # error: Argument 1 to "_get_hashtable_algo" has incompatible type + # "Union[Index, ExtensionArray, ndarray[Any, Any]]"; expected + # "ndarray[Any, Any]" + hash_klass, values = _get_hashtable_algo(values) # type: ignore[arg-type] + t = hash_klass(len(values)) + t.map_locations(values) + sorter = ensure_platform_int(t.lookup(ordered)) + + if use_na_sentinel: + # take_nd is faster, but only works for na_sentinels of -1 + order2 = sorter.argsort() + new_codes = take_nd(order2, codes, fill_value=-1) + if verify: + mask = (codes < -len(values)) | (codes >= len(values)) + else: + mask = None + else: + reverse_indexer = np.empty(len(sorter), dtype=int) + reverse_indexer.put(sorter, np.arange(len(sorter))) + # Out of bound indices will be masked with `-1` next, so we + # may deal with them here without performance loss using `mode='wrap'` + new_codes = reverse_indexer.take(codes, mode="wrap") + + if use_na_sentinel: + mask = codes == -1 + if verify: + mask = mask | (codes < -len(values)) | (codes >= len(values)) + + if use_na_sentinel and mask is not None: + np.putmask(new_codes, mask, -1) + + return ordered, ensure_platform_int(new_codes) + + +def _sort_mixed(values) -> AnyArrayLike: + """order ints before strings before nulls in 1d arrays""" + str_pos = np.array([isinstance(x, str) for x in values], dtype=bool) + null_pos = np.array([isna(x) for x in values], dtype=bool) + num_pos = ~str_pos & ~null_pos + str_argsort = np.argsort(values[str_pos]) + num_argsort = np.argsort(values[num_pos]) + # convert boolean arrays to positional indices, then order by underlying values + str_locs = str_pos.nonzero()[0].take(str_argsort) + num_locs = num_pos.nonzero()[0].take(num_argsort) + null_locs = null_pos.nonzero()[0] + locs = np.concatenate([num_locs, str_locs, null_locs]) + return values.take(locs) + + +def _sort_tuples(values: np.ndarray) -> np.ndarray: + """ + Convert array of tuples (1d) to array of arrays (2d). + We need to keep the columns separately as they contain different types and + nans (can't use `np.sort` as it may fail when str and nan are mixed in a + column as types cannot be compared). + """ + from pandas.core.internals.construction import to_arrays + from pandas.core.sorting import lexsort_indexer + + arrays, _ = to_arrays(values, None) + indexer = lexsort_indexer(arrays, orders=True) + return values[indexer] + + +def union_with_duplicates( + lvals: ArrayLike | Index, rvals: ArrayLike | Index +) -> ArrayLike | Index: + """ + Extracts the union from lvals and rvals with respect to duplicates and nans in + both arrays. + + Parameters + ---------- + lvals: np.ndarray or ExtensionArray + left values which is ordered in front. + rvals: np.ndarray or ExtensionArray + right values ordered after lvals. + + Returns + ------- + np.ndarray or ExtensionArray + Containing the unsorted union of both arrays. + + Notes + ----- + Caller is responsible for ensuring lvals.dtype == rvals.dtype. + """ + from pandas import Series + + l_count = value_counts_internal(lvals, dropna=False) + r_count = value_counts_internal(rvals, dropna=False) + l_count, r_count = l_count.align(r_count, fill_value=0) + final_count = np.maximum(l_count.values, r_count.values) + final_count = Series(final_count, index=l_count.index, dtype="int", copy=False) + if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex): + unique_vals = lvals.append(rvals).unique() + else: + if isinstance(lvals, ABCIndex): + lvals = lvals._values + if isinstance(rvals, ABCIndex): + rvals = rvals._values + # error: List item 0 has incompatible type "Union[ExtensionArray, + # ndarray[Any, Any], Index]"; expected "Union[ExtensionArray, + # ndarray[Any, Any]]" + combined = concat_compat([lvals, rvals]) # type: ignore[list-item] + unique_vals = unique(combined) + unique_vals = ensure_wrapped_if_datetimelike(unique_vals) + repeats = final_count.reindex(unique_vals).values + return np.repeat(unique_vals, repeats) + + +def map_array( + arr: ArrayLike, + mapper, + na_action: Literal["ignore"] | None = None, + convert: bool = True, +) -> np.ndarray | ExtensionArray | Index: + """ + Map values using an input mapping or function. + + Parameters + ---------- + mapper : function, dict, or Series + Mapping correspondence. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NA values, without passing them to the + mapping correspondence. + convert : bool, default True + Try to find better dtype for elementwise function results. If + False, leave as dtype=object. + + Returns + ------- + Union[ndarray, Index, ExtensionArray] + The output of the mapping function applied to the array. + If the function returns a tuple with more than one element + a MultiIndex will be returned. + """ + if na_action not in (None, "ignore"): + msg = f"na_action must either be 'ignore' or None, {na_action} was passed" + raise ValueError(msg) + + # we can fastpath dict/Series to an efficient map + # as we know that we are not going to have to yield + # python types + if is_dict_like(mapper): + if isinstance(mapper, dict) and hasattr(mapper, "__missing__"): + # If a dictionary subclass defines a default value method, + # convert mapper to a lookup function (GH #15999). + dict_with_default = mapper + mapper = lambda x: dict_with_default[ + np.nan if isinstance(x, float) and np.isnan(x) else x + ] + else: + # Dictionary does not have a default. Thus it's safe to + # convert to an Series for efficiency. + # we specify the keys here to handle the + # possibility that they are tuples + + # The return value of mapping with an empty mapper is + # expected to be pd.Series(np.nan, ...). As np.nan is + # of dtype float64 the return value of this method should + # be float64 as well + from pandas import Series + + if len(mapper) == 0: + mapper = Series(mapper, dtype=np.float64) + else: + mapper = Series(mapper) + + if isinstance(mapper, ABCSeries): + if na_action == "ignore": + mapper = mapper[mapper.index.notna()] + + # Since values were input this means we came from either + # a dict or a series and mapper should be an index + indexer = mapper.index.get_indexer(arr) + new_values = take_nd(mapper._values, indexer) + + return new_values + + if not len(arr): + return arr.copy() + + # we must convert to python types + values = arr.astype(object, copy=False) + if na_action is None: + return lib.map_infer(values, mapper, convert=convert) + else: + return lib.map_infer_mask( + values, mapper, mask=isna(values).view(np.uint8), convert=convert + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/api.py new file mode 100644 index 00000000..2cfe5ffc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/api.py @@ -0,0 +1,140 @@ +from pandas._libs import ( + NaT, + Period, + Timedelta, + Timestamp, +) +from pandas._libs.missing import NA + +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import ( + isna, + isnull, + notna, + notnull, +) + +from pandas.core.algorithms import ( + factorize, + unique, + value_counts, +) +from pandas.core.arrays import Categorical +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) +from pandas.core.arrays.string_ import StringDtype +from pandas.core.construction import array +from pandas.core.flags import Flags +from pandas.core.groupby import ( + Grouper, + NamedAgg, +) +from pandas.core.indexes.api import ( + CategoricalIndex, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + TimedeltaIndex, +) +from pandas.core.indexes.datetimes import ( + bdate_range, + date_range, +) +from pandas.core.indexes.interval import ( + Interval, + interval_range, +) +from pandas.core.indexes.period import period_range +from pandas.core.indexes.timedeltas import timedelta_range +from pandas.core.indexing import IndexSlice +from pandas.core.series import Series +from pandas.core.tools.datetimes import to_datetime +from pandas.core.tools.numeric import to_numeric +from pandas.core.tools.timedeltas import to_timedelta + +from pandas.io.formats.format import set_eng_float_format +from pandas.tseries.offsets import DateOffset + +# DataFrame needs to be imported after NamedAgg to avoid a circular import +from pandas.core.frame import DataFrame # isort:skip + +__all__ = [ + "array", + "ArrowDtype", + "bdate_range", + "BooleanDtype", + "Categorical", + "CategoricalDtype", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "date_range", + "DatetimeIndex", + "DatetimeTZDtype", + "factorize", + "Flags", + "Float32Dtype", + "Float64Dtype", + "Grouper", + "Index", + "IndexSlice", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "Int8Dtype", + "Interval", + "IntervalDtype", + "IntervalIndex", + "interval_range", + "isna", + "isnull", + "MultiIndex", + "NA", + "NamedAgg", + "NaT", + "notna", + "notnull", + "Period", + "PeriodDtype", + "PeriodIndex", + "period_range", + "RangeIndex", + "Series", + "set_eng_float_format", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "timedelta_range", + "Timestamp", + "to_datetime", + "to_numeric", + "to_timedelta", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "UInt8Dtype", + "unique", + "value_counts", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/apply.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/apply.py new file mode 100644 index 00000000..43bc26f6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/apply.py @@ -0,0 +1,1835 @@ +from __future__ import annotations + +import abc +from collections import defaultdict +from functools import partial +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, + DefaultDict, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._config import option_context + +from pandas._libs import lib +from pandas._typing import ( + AggFuncType, + AggFuncTypeBase, + AggFuncTypeDict, + AggObjType, + Axis, + AxisInt, + NDFrameT, + npt, +) +from pandas.errors import SpecificationError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import is_nested_object +from pandas.core.dtypes.common import ( + is_dict_like, + is_list_like, + is_sequence, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCNDFrame, + ABCSeries, +) + +import pandas.core.common as com +from pandas.core.construction import ensure_wrapped_if_datetimelike + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Iterator, + Sequence, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + from pandas.core.groupby import GroupBy + from pandas.core.resample import Resampler + from pandas.core.window.rolling import BaseWindow + + +ResType = dict[int, Any] + + +def frame_apply( + obj: DataFrame, + func: AggFuncType, + axis: Axis = 0, + raw: bool = False, + result_type: str | None = None, + by_row: Literal[False, "compat"] = "compat", + args=None, + kwargs=None, +) -> FrameApply: + """construct and return a row or column based frame apply object""" + axis = obj._get_axis_number(axis) + klass: type[FrameApply] + if axis == 0: + klass = FrameRowApply + elif axis == 1: + klass = FrameColumnApply + + _, func, _, _ = reconstruct_func(func, **kwargs) + assert func is not None + + return klass( + obj, + func, + raw=raw, + result_type=result_type, + by_row=by_row, + args=args, + kwargs=kwargs, + ) + + +class Apply(metaclass=abc.ABCMeta): + axis: AxisInt + + def __init__( + self, + obj: AggObjType, + func: AggFuncType, + raw: bool, + result_type: str | None, + *, + by_row: Literal[False, "compat", "_compat"] = "compat", + args, + kwargs, + ) -> None: + self.obj = obj + self.raw = raw + + assert by_row is False or by_row in ["compat", "_compat"] + self.by_row = by_row + + self.args = args or () + self.kwargs = kwargs or {} + + if result_type not in [None, "reduce", "broadcast", "expand"]: + raise ValueError( + "invalid value for result_type, must be one " + "of {None, 'reduce', 'broadcast', 'expand'}" + ) + + self.result_type = result_type + + self.func = func + + @abc.abstractmethod + def apply(self) -> DataFrame | Series: + pass + + @abc.abstractmethod + def agg_or_apply_list_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + pass + + @abc.abstractmethod + def agg_or_apply_dict_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + pass + + def agg(self) -> DataFrame | Series | None: + """ + Provide an implementation for the aggregators. + + Returns + ------- + Result of aggregation, or None if agg cannot be performed by + this method. + """ + obj = self.obj + func = self.func + args = self.args + kwargs = self.kwargs + + if isinstance(func, str): + return self.apply_str() + + if is_dict_like(func): + return self.agg_dict_like() + elif is_list_like(func): + # we require a list, but not a 'str' + return self.agg_list_like() + + if callable(func): + f = com.get_cython_func(func) + if f and not args and not kwargs: + warn_alias_replacement(obj, func, f) + return getattr(obj, f)() + + # caller can react + return None + + def transform(self) -> DataFrame | Series: + """ + Transform a DataFrame or Series. + + Returns + ------- + DataFrame or Series + Result of applying ``func`` along the given axis of the + Series or DataFrame. + + Raises + ------ + ValueError + If the transform function fails or does not transform. + """ + obj = self.obj + func = self.func + axis = self.axis + args = self.args + kwargs = self.kwargs + + is_series = obj.ndim == 1 + + if obj._get_axis_number(axis) == 1: + assert not is_series + return obj.T.transform(func, 0, *args, **kwargs).T + + if is_list_like(func) and not is_dict_like(func): + func = cast(list[AggFuncTypeBase], func) + # Convert func equivalent dict + if is_series: + func = {com.get_callable_name(v) or v: v for v in func} + else: + func = {col: func for col in obj} + + if is_dict_like(func): + func = cast(AggFuncTypeDict, func) + return self.transform_dict_like(func) + + # func is either str or callable + func = cast(AggFuncTypeBase, func) + try: + result = self.transform_str_or_callable(func) + except TypeError: + raise + except Exception as err: + raise ValueError("Transform function failed") from err + + # Functions that transform may return empty Series/DataFrame + # when the dtype is not appropriate + if ( + isinstance(result, (ABCSeries, ABCDataFrame)) + and result.empty + and not obj.empty + ): + raise ValueError("Transform function failed") + # error: Argument 1 to "__get__" of "AxisProperty" has incompatible type + # "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy, + # DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame, + # Series]" + if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals( + obj.index # type: ignore[arg-type] + ): + raise ValueError("Function did not transform") + + return result + + def transform_dict_like(self, func): + """ + Compute transform in the case of a dict-like func + """ + from pandas.core.reshape.concat import concat + + obj = self.obj + args = self.args + kwargs = self.kwargs + + # transform is currently only for Series/DataFrame + assert isinstance(obj, ABCNDFrame) + + if len(func) == 0: + raise ValueError("No transform functions were provided") + + func = self.normalize_dictlike_arg("transform", obj, func) + + results: dict[Hashable, DataFrame | Series] = {} + for name, how in func.items(): + colg = obj._gotitem(name, ndim=1) + results[name] = colg.transform(how, 0, *args, **kwargs) + return concat(results, axis=1) + + def transform_str_or_callable(self, func) -> DataFrame | Series: + """ + Compute transform in the case of a string or callable func + """ + obj = self.obj + args = self.args + kwargs = self.kwargs + + if isinstance(func, str): + return self._apply_str(obj, func, *args, **kwargs) + + if not args and not kwargs: + f = com.get_cython_func(func) + if f: + warn_alias_replacement(obj, func, f) + return getattr(obj, f)() + + # Two possible ways to use a UDF - apply or call directly + try: + return obj.apply(func, args=args, **kwargs) + except Exception: + return func(obj, *args, **kwargs) + + def agg_list_like(self) -> DataFrame | Series: + """ + Compute aggregation in the case of a list-like argument. + + Returns + ------- + Result of aggregation. + """ + return self.agg_or_apply_list_like(op_name="agg") + + def compute_list_like( + self, + op_name: Literal["agg", "apply"], + selected_obj: Series | DataFrame, + kwargs: dict[str, Any], + ) -> tuple[list[Hashable], list[Any]]: + """ + Compute agg/apply results for like-like input. + + Parameters + ---------- + op_name : {"agg", "apply"} + Operation being performed. + selected_obj : Series or DataFrame + Data to perform operation on. + kwargs : dict + Keyword arguments to pass to the functions. + + Returns + ------- + keys : list[hashable] + Index labels for result. + results : list + Data for result. When aggregating with a Series, this can contain any + Python objects. + """ + func = cast(list[AggFuncTypeBase], self.func) + obj = self.obj + + results = [] + keys = [] + + # degenerate case + if selected_obj.ndim == 1: + for a in func: + colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj) + args = ( + [self.axis, *self.args] + if include_axis(op_name, colg) + else self.args + ) + new_res = getattr(colg, op_name)(a, *args, **kwargs) + results.append(new_res) + + # make sure we find a good name + name = com.get_callable_name(a) or a + keys.append(name) + + else: + indices = [] + for index, col in enumerate(selected_obj): + colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index]) + args = ( + [self.axis, *self.args] + if include_axis(op_name, colg) + else self.args + ) + new_res = getattr(colg, op_name)(func, *args, **kwargs) + results.append(new_res) + indices.append(index) + keys = selected_obj.columns.take(indices) + + return keys, results + + def wrap_results_list_like( + self, keys: list[Hashable], results: list[Series | DataFrame] + ): + from pandas.core.reshape.concat import concat + + obj = self.obj + + try: + return concat(results, keys=keys, axis=1, sort=False) + except TypeError as err: + # we are concatting non-NDFrame objects, + # e.g. a list of scalars + from pandas import Series + + result = Series(results, index=keys, name=obj.name) + if is_nested_object(result): + raise ValueError( + "cannot combine transform and aggregation operations" + ) from err + return result + + def agg_dict_like(self) -> DataFrame | Series: + """ + Compute aggregation in the case of a dict-like argument. + + Returns + ------- + Result of aggregation. + """ + return self.agg_or_apply_dict_like(op_name="agg") + + def compute_dict_like( + self, + op_name: Literal["agg", "apply"], + selected_obj: Series | DataFrame, + selection: Hashable | Sequence[Hashable], + kwargs: dict[str, Any], + ) -> tuple[list[Hashable], list[Any]]: + """ + Compute agg/apply results for dict-like input. + + Parameters + ---------- + op_name : {"agg", "apply"} + Operation being performed. + selected_obj : Series or DataFrame + Data to perform operation on. + selection : hashable or sequence of hashables + Used by GroupBy, Window, and Resample if selection is applied to the object. + kwargs : dict + Keyword arguments to pass to the functions. + + Returns + ------- + keys : list[hashable] + Index labels for result. + results : list + Data for result. When aggregating with a Series, this can contain any + Python object. + """ + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, + ) + + obj = self.obj + is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) + func = cast(AggFuncTypeDict, self.func) + func = self.normalize_dictlike_arg(op_name, selected_obj, func) + + is_non_unique_col = ( + selected_obj.ndim == 2 + and selected_obj.columns.nunique() < len(selected_obj.columns) + ) + + if selected_obj.ndim == 1: + # key only used for output + colg = obj._gotitem(selection, ndim=1) + results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()] + keys = list(func.keys()) + elif not is_groupby and is_non_unique_col: + # key used for column selection and output + # GH#51099 + results = [] + keys = [] + for key, how in func.items(): + indices = selected_obj.columns.get_indexer_for([key]) + labels = selected_obj.columns.take(indices) + label_to_indices = defaultdict(list) + for index, label in zip(indices, labels): + label_to_indices[label].append(index) + + key_data = [ + getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs) + for label, indices in label_to_indices.items() + for indice in indices + ] + + keys += [key] * len(key_data) + results += key_data + else: + # key used for column selection and output + results = [ + getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs) + for key, how in func.items() + ] + keys = list(func.keys()) + + return keys, results + + def wrap_results_dict_like( + self, + selected_obj: Series | DataFrame, + result_index: list[Hashable], + result_data: list, + ): + from pandas import Index + from pandas.core.reshape.concat import concat + + obj = self.obj + + # Avoid making two isinstance calls in all and any below + is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data] + + if all(is_ndframe): + results = dict(zip(result_index, result_data)) + keys_to_use: Iterable[Hashable] + keys_to_use = [k for k in result_index if not results[k].empty] + # Have to check, if at least one DataFrame is not empty. + keys_to_use = keys_to_use if keys_to_use != [] else result_index + if selected_obj.ndim == 2: + # keys are columns, so we can preserve names + ktu = Index(keys_to_use) + ktu._set_names(selected_obj.columns.names) + keys_to_use = ktu + + axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1 + result = concat( + {k: results[k] for k in keys_to_use}, + axis=axis, + keys=keys_to_use, + ) + elif any(is_ndframe): + # There is a mix of NDFrames and scalars + raise ValueError( + "cannot perform both aggregation " + "and transformation operations " + "simultaneously" + ) + else: + from pandas import Series + + # we have a list of scalars + # GH 36212 use name only if obj is a series + if obj.ndim == 1: + obj = cast("Series", obj) + name = obj.name + else: + name = None + + result = Series(result_data, index=result_index, name=name) + + return result + + def apply_str(self) -> DataFrame | Series: + """ + Compute apply in case of a string. + + Returns + ------- + result: Series or DataFrame + """ + # Caller is responsible for checking isinstance(self.f, str) + func = cast(str, self.func) + + obj = self.obj + + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, + ) + + # Support for `frame.transform('method')` + # Some methods (shift, etc.) require the axis argument, others + # don't, so inspect and insert if necessary. + method = getattr(obj, func, None) + if callable(method): + sig = inspect.getfullargspec(method) + arg_names = (*sig.args, *sig.kwonlyargs) + if self.axis != 0 and ( + "axis" not in arg_names or func in ("corrwith", "skew") + ): + raise ValueError(f"Operation {func} does not support axis=1") + if "axis" in arg_names: + if isinstance(obj, (SeriesGroupBy, DataFrameGroupBy)): + # Try to avoid FutureWarning for deprecated axis keyword; + # If self.axis matches the axis we would get by not passing + # axis, we safely exclude the keyword. + + default_axis = 0 + if func in ["idxmax", "idxmin"]: + # DataFrameGroupBy.idxmax, idxmin axis defaults to self.axis, + # whereas other axis keywords default to 0 + default_axis = self.obj.axis + + if default_axis != self.axis: + self.kwargs["axis"] = self.axis + else: + self.kwargs["axis"] = self.axis + return self._apply_str(obj, func, *self.args, **self.kwargs) + + def apply_list_or_dict_like(self) -> DataFrame | Series: + """ + Compute apply in case of a list-like or dict-like. + + Returns + ------- + result: Series, DataFrame, or None + Result when self.func is a list-like or dict-like, None otherwise. + """ + if self.axis == 1 and isinstance(self.obj, ABCDataFrame): + return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T + + func = self.func + kwargs = self.kwargs + + if is_dict_like(func): + result = self.agg_or_apply_dict_like(op_name="apply") + else: + result = self.agg_or_apply_list_like(op_name="apply") + + result = reconstruct_and_relabel_result(result, func, **kwargs) + + return result + + def normalize_dictlike_arg( + self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict + ) -> AggFuncTypeDict: + """ + Handler for dict-like argument. + + Ensures that necessary columns exist if obj is a DataFrame, and + that a nested renamer is not passed. Also normalizes to all lists + when values consists of a mix of list and non-lists. + """ + assert how in ("apply", "agg", "transform") + + # Can't use func.values(); wouldn't work for a Series + if ( + how == "agg" + and isinstance(obj, ABCSeries) + and any(is_list_like(v) for _, v in func.items()) + ) or (any(is_dict_like(v) for _, v in func.items())): + # GH 15931 - deprecation of renaming keys + raise SpecificationError("nested renamer is not supported") + + if obj.ndim != 1: + # Check for missing columns on a frame + from pandas import Index + + cols = Index(list(func.keys())).difference(obj.columns, sort=True) + if len(cols) > 0: + raise KeyError(f"Column(s) {list(cols)} do not exist") + + aggregator_types = (list, tuple, dict) + + # if we have a dict of any non-scalars + # eg. {'A' : ['mean']}, normalize all to + # be list-likes + # Cannot use func.values() because arg may be a Series + if any(isinstance(x, aggregator_types) for _, x in func.items()): + new_func: AggFuncTypeDict = {} + for k, v in func.items(): + if not isinstance(v, aggregator_types): + new_func[k] = [v] + else: + new_func[k] = v + func = new_func + return func + + def _apply_str(self, obj, func: str, *args, **kwargs): + """ + if arg is a string, then try to operate on it: + - try to find a function (or attribute) on obj + - try to find a numpy function + - raise + """ + assert isinstance(func, str) + + if hasattr(obj, func): + f = getattr(obj, func) + if callable(f): + return f(*args, **kwargs) + + # people may aggregate on a non-callable attribute + # but don't let them think they can pass args to it + assert len(args) == 0 + assert len([kwarg for kwarg in kwargs if kwarg not in ["axis"]]) == 0 + return f + elif hasattr(np, func) and hasattr(obj, "__array__"): + # in particular exclude Window + f = getattr(np, func) + return f(obj, *args, **kwargs) + else: + msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object" + raise AttributeError(msg) + + +class NDFrameApply(Apply): + """ + Methods shared by FrameApply and SeriesApply but + not GroupByApply or ResamplerWindowApply + """ + + obj: DataFrame | Series + + @property + def index(self) -> Index: + return self.obj.index + + @property + def agg_axis(self) -> Index: + return self.obj._get_agg_axis(self.axis) + + def agg_or_apply_list_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + obj = self.obj + kwargs = self.kwargs + + if op_name == "apply": + if isinstance(self, FrameApply): + by_row = self.by_row + + elif isinstance(self, SeriesApply): + by_row = "_compat" if self.by_row else False + else: + by_row = False + kwargs = {**kwargs, "by_row": by_row} + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + keys, results = self.compute_list_like(op_name, obj, kwargs) + result = self.wrap_results_list_like(keys, results) + return result + + def agg_or_apply_dict_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + assert op_name in ["agg", "apply"] + obj = self.obj + + kwargs = {} + if op_name == "apply": + by_row = "_compat" if self.by_row else False + kwargs.update({"by_row": by_row}) + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + selection = None + result_index, result_data = self.compute_dict_like( + op_name, obj, selection, kwargs + ) + result = self.wrap_results_dict_like(obj, result_index, result_data) + return result + + +class FrameApply(NDFrameApply): + obj: DataFrame + + def __init__( + self, + obj: AggObjType, + func: AggFuncType, + raw: bool, + result_type: str | None, + *, + by_row: Literal[False, "compat"] = False, + args, + kwargs, + ) -> None: + if by_row is not False and by_row != "compat": + raise ValueError(f"by_row={by_row} not allowed") + super().__init__( + obj, func, raw, result_type, by_row=by_row, args=args, kwargs=kwargs + ) + + # --------------------------------------------------------------- + # Abstract Methods + + @property + @abc.abstractmethod + def result_index(self) -> Index: + pass + + @property + @abc.abstractmethod + def result_columns(self) -> Index: + pass + + @property + @abc.abstractmethod + def series_generator(self) -> Iterator[Series]: + pass + + @abc.abstractmethod + def wrap_results_for_axis( + self, results: ResType, res_index: Index + ) -> DataFrame | Series: + pass + + # --------------------------------------------------------------- + + @property + def res_columns(self) -> Index: + return self.result_columns + + @property + def columns(self) -> Index: + return self.obj.columns + + @cache_readonly + def values(self): + return self.obj.values + + def apply(self) -> DataFrame | Series: + """compute the results""" + # dispatch to handle list-like or dict-like + if is_list_like(self.func): + return self.apply_list_or_dict_like() + + # all empty + if len(self.columns) == 0 and len(self.index) == 0: + return self.apply_empty_result() + + # string dispatch + if isinstance(self.func, str): + return self.apply_str() + + # ufunc + elif isinstance(self.func, np.ufunc): + with np.errstate(all="ignore"): + results = self.obj._mgr.apply("apply", func=self.func) + # _constructor will retain self.index and self.columns + return self.obj._constructor_from_mgr(results, axes=results.axes) + + # broadcasting + if self.result_type == "broadcast": + return self.apply_broadcast(self.obj) + + # one axis empty + elif not all(self.obj.shape): + return self.apply_empty_result() + + # raw + elif self.raw: + return self.apply_raw() + + return self.apply_standard() + + def agg(self): + obj = self.obj + axis = self.axis + + # TODO: Avoid having to change state + self.obj = self.obj if self.axis == 0 else self.obj.T + self.axis = 0 + + result = None + try: + result = super().agg() + finally: + self.obj = obj + self.axis = axis + + if axis == 1: + result = result.T if result is not None else result + + if result is None: + result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs) + + return result + + def apply_empty_result(self): + """ + we have an empty result; at least 1 axis is 0 + + we will try to apply the function to an empty + series in order to see if this is a reduction function + """ + assert callable(self.func) + + # we are not asked to reduce or infer reduction + # so just return a copy of the existing object + if self.result_type not in ["reduce", None]: + return self.obj.copy() + + # we may need to infer + should_reduce = self.result_type == "reduce" + + from pandas import Series + + if not should_reduce: + try: + if self.axis == 0: + r = self.func( + Series([], dtype=np.float64), *self.args, **self.kwargs + ) + else: + r = self.func( + Series(index=self.columns, dtype=np.float64), + *self.args, + **self.kwargs, + ) + except Exception: + pass + else: + should_reduce = not isinstance(r, Series) + + if should_reduce: + if len(self.agg_axis): + r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs) + else: + r = np.nan + + return self.obj._constructor_sliced(r, index=self.agg_axis) + else: + return self.obj.copy() + + def apply_raw(self): + """apply to the values as a numpy array""" + + def wrap_function(func): + """ + Wrap user supplied function to work around numpy issue. + + see https://github.com/numpy/numpy/issues/8352 + """ + + def wrapper(*args, **kwargs): + result = func(*args, **kwargs) + if isinstance(result, str): + result = np.array(result, dtype=object) + return result + + return wrapper + + result = np.apply_along_axis( + wrap_function(self.func), self.axis, self.values, *self.args, **self.kwargs + ) + + # TODO: mixed type case + if result.ndim == 2: + return self.obj._constructor(result, index=self.index, columns=self.columns) + else: + return self.obj._constructor_sliced(result, index=self.agg_axis) + + def apply_broadcast(self, target: DataFrame) -> DataFrame: + assert callable(self.func) + + result_values = np.empty_like(target.values) + + # axis which we want to compare compliance + result_compare = target.shape[0] + + for i, col in enumerate(target.columns): + res = self.func(target[col], *self.args, **self.kwargs) + ares = np.asarray(res).ndim + + # must be a scalar or 1d + if ares > 1: + raise ValueError("too many dims to broadcast") + if ares == 1: + # must match return dim + if result_compare != len(res): + raise ValueError("cannot broadcast result") + + result_values[:, i] = res + + # we *always* preserve the original index / columns + result = self.obj._constructor( + result_values, index=target.index, columns=target.columns + ) + return result + + def apply_standard(self): + results, res_index = self.apply_series_generator() + + # wrap results + return self.wrap_results(results, res_index) + + def apply_series_generator(self) -> tuple[ResType, Index]: + assert callable(self.func) + + series_gen = self.series_generator + res_index = self.result_index + + results = {} + + with option_context("mode.chained_assignment", None): + for i, v in enumerate(series_gen): + # ignore SettingWithCopy here in case the user mutates + results[i] = self.func(v, *self.args, **self.kwargs) + if isinstance(results[i], ABCSeries): + # If we have a view on v, we need to make a copy because + # series_generator will swap out the underlying data + results[i] = results[i].copy(deep=False) + + return results, res_index + + def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series: + from pandas import Series + + # see if we can infer the results + if len(results) > 0 and 0 in results and is_sequence(results[0]): + return self.wrap_results_for_axis(results, res_index) + + # dict of scalars + + # the default dtype of an empty Series is `object`, but this + # code can be hit by df.mean() where the result should have dtype + # float64 even if it's an empty Series. + constructor_sliced = self.obj._constructor_sliced + if len(results) == 0 and constructor_sliced is Series: + result = constructor_sliced(results, dtype=np.float64) + else: + result = constructor_sliced(results) + result.index = res_index + + return result + + def apply_str(self) -> DataFrame | Series: + # Caller is responsible for checking isinstance(self.func, str) + # TODO: GH#39993 - Avoid special-casing by replacing with lambda + if self.func == "size": + # Special-cased because DataFrame.size returns a single scalar + obj = self.obj + value = obj.shape[self.axis] + return obj._constructor_sliced(value, index=self.agg_axis) + return super().apply_str() + + +class FrameRowApply(FrameApply): + axis: AxisInt = 0 + + @property + def series_generator(self): + return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) + + @property + def result_index(self) -> Index: + return self.columns + + @property + def result_columns(self) -> Index: + return self.index + + def wrap_results_for_axis( + self, results: ResType, res_index: Index + ) -> DataFrame | Series: + """return the results for the rows""" + + if self.result_type == "reduce": + # e.g. test_apply_dict GH#8735 + res = self.obj._constructor_sliced(results) + res.index = res_index + return res + + elif self.result_type is None and all( + isinstance(x, dict) for x in results.values() + ): + # Our operation was a to_dict op e.g. + # test_apply_dict GH#8735, test_apply_reduce_to_dict GH#25196 #37544 + res = self.obj._constructor_sliced(results) + res.index = res_index + return res + + try: + result = self.obj._constructor(data=results) + except ValueError as err: + if "All arrays must be of the same length" in str(err): + # e.g. result = [[2, 3], [1.5], ['foo', 'bar']] + # see test_agg_listlike_result GH#29587 + res = self.obj._constructor_sliced(results) + res.index = res_index + return res + else: + raise + + if not isinstance(results[0], ABCSeries): + if len(result.index) == len(self.res_columns): + result.index = self.res_columns + + if len(result.columns) == len(res_index): + result.columns = res_index + + return result + + +class FrameColumnApply(FrameApply): + axis: AxisInt = 1 + + def apply_broadcast(self, target: DataFrame) -> DataFrame: + result = super().apply_broadcast(target.T) + return result.T + + @property + def series_generator(self): + values = self.values + values = ensure_wrapped_if_datetimelike(values) + assert len(values) > 0 + + # We create one Series object, and will swap out the data inside + # of it. Kids: don't do this at home. + ser = self.obj._ixs(0, axis=0) + mgr = ser._mgr + + if isinstance(ser.dtype, ExtensionDtype): + # values will be incorrect for this block + # TODO(EA2D): special case would be unnecessary with 2D EAs + obj = self.obj + for i in range(len(obj)): + yield obj._ixs(i, axis=0) + + else: + for arr, name in zip(values, self.index): + # GH#35462 re-pin mgr in case setitem changed it + ser._mgr = mgr + mgr.set_values(arr) + object.__setattr__(ser, "_name", name) + yield ser + + @property + def result_index(self) -> Index: + return self.index + + @property + def result_columns(self) -> Index: + return self.columns + + def wrap_results_for_axis( + self, results: ResType, res_index: Index + ) -> DataFrame | Series: + """return the results for the columns""" + result: DataFrame | Series + + # we have requested to expand + if self.result_type == "expand": + result = self.infer_to_same_shape(results, res_index) + + # we have a non-series and don't want inference + elif not isinstance(results[0], ABCSeries): + result = self.obj._constructor_sliced(results) + result.index = res_index + + # we may want to infer results + else: + result = self.infer_to_same_shape(results, res_index) + + return result + + def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame: + """infer the results to the same shape as the input object""" + result = self.obj._constructor(data=results) + result = result.T + + # set the index + result.index = res_index + + # infer dtypes + result = result.infer_objects(copy=False) + + return result + + +class SeriesApply(NDFrameApply): + obj: Series + axis: AxisInt = 0 + by_row: Literal[False, "compat", "_compat"] # only relevant for apply() + + def __init__( + self, + obj: Series, + func: AggFuncType, + *, + convert_dtype: bool | lib.NoDefault = lib.no_default, + by_row: Literal[False, "compat", "_compat"] = "compat", + args, + kwargs, + ) -> None: + if convert_dtype is lib.no_default: + convert_dtype = True + else: + warnings.warn( + "the convert_dtype parameter is deprecated and will be removed in a " + "future version. Do ``ser.astype(object).apply()`` " + "instead if you want ``convert_dtype=False``.", + FutureWarning, + stacklevel=find_stack_level(), + ) + self.convert_dtype = convert_dtype + + super().__init__( + obj, + func, + raw=False, + result_type=None, + by_row=by_row, + args=args, + kwargs=kwargs, + ) + + def apply(self) -> DataFrame | Series: + obj = self.obj + + if len(obj) == 0: + return self.apply_empty_result() + + # dispatch to handle list-like or dict-like + if is_list_like(self.func): + return self.apply_list_or_dict_like() + + if isinstance(self.func, str): + # if we are a string, try to dispatch + return self.apply_str() + + if self.by_row == "_compat": + return self.apply_compat() + + # self.func is Callable + return self.apply_standard() + + def agg(self): + result = super().agg() + if result is None: + obj = self.obj + func = self.func + # string, list-like, and dict-like are entirely handled in super + assert callable(func) + + # GH53325: The setup below is just to keep current behavior while emitting a + # deprecation message. In the future this will all be replaced with a simple + # `result = f(self.obj, *self.args, **self.kwargs)`. + try: + result = obj.apply(func, args=self.args, **self.kwargs) + except (ValueError, AttributeError, TypeError): + result = func(obj, *self.args, **self.kwargs) + else: + msg = ( + f"using {func} in {type(obj).__name__}.agg cannot aggregate and " + f"has been deprecated. Use {type(obj).__name__}.transform to " + f"keep behavior unchanged." + ) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + + return result + + def apply_empty_result(self) -> Series: + obj = self.obj + return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__( + obj, method="apply" + ) + + def apply_compat(self): + """compat apply method for funcs in listlikes and dictlikes. + + Used for each callable when giving listlikes and dictlikes of callables to + apply. Needed for compatibility with Pandas < v2.1. + + .. versionadded:: 2.1.0 + """ + obj = self.obj + func = self.func + + if callable(func): + f = com.get_cython_func(func) + if f and not self.args and not self.kwargs: + return obj.apply(func, by_row=False) + + try: + result = obj.apply(func, by_row="compat") + except (ValueError, AttributeError, TypeError): + result = obj.apply(func, by_row=False) + return result + + def apply_standard(self) -> DataFrame | Series: + # caller is responsible for ensuring that f is Callable + func = cast(Callable, self.func) + obj = self.obj + + if isinstance(func, np.ufunc): + with np.errstate(all="ignore"): + return func(obj, *self.args, **self.kwargs) + elif not self.by_row: + return func(obj, *self.args, **self.kwargs) + + if self.args or self.kwargs: + # _map_values does not support args/kwargs + def curried(x): + return func(x, *self.args, **self.kwargs) + + else: + curried = func + + # row-wise access + # apply doesn't have a `na_action` keyword and for backward compat reasons + # we need to give `na_action="ignore"` for categorical data. + # TODO: remove the `na_action="ignore"` when that default has been changed in + # Categorical (GH51645). + action = "ignore" if isinstance(obj.dtype, CategoricalDtype) else None + mapped = obj._map_values( + mapper=curried, na_action=action, convert=self.convert_dtype + ) + + if len(mapped) and isinstance(mapped[0], ABCSeries): + # GH#43986 Need to do list(mapped) in order to get treated as nested + # See also GH#25959 regarding EA support + return obj._constructor_expanddim(list(mapped), index=obj.index) + else: + return obj._constructor(mapped, index=obj.index).__finalize__( + obj, method="apply" + ) + + +class GroupByApply(Apply): + obj: GroupBy | Resampler | BaseWindow + + def __init__( + self, + obj: GroupBy[NDFrameT], + func: AggFuncType, + *, + args, + kwargs, + ) -> None: + kwargs = kwargs.copy() + self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0)) + super().__init__( + obj, + func, + raw=False, + result_type=None, + args=args, + kwargs=kwargs, + ) + + def apply(self): + raise NotImplementedError + + def transform(self): + raise NotImplementedError + + def agg_or_apply_list_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + obj = self.obj + kwargs = self.kwargs + if op_name == "apply": + kwargs = {**kwargs, "by_row": False} + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + if obj._selected_obj.ndim == 1: + # For SeriesGroupBy this matches _obj_with_exclusions + selected_obj = obj._selected_obj + else: + selected_obj = obj._obj_with_exclusions + + # Only set as_index=True on groupby objects, not Window or Resample + # that inherit from this class. + with com.temp_setattr( + obj, "as_index", True, condition=hasattr(obj, "as_index") + ): + keys, results = self.compute_list_like(op_name, selected_obj, kwargs) + result = self.wrap_results_list_like(keys, results) + return result + + def agg_or_apply_dict_like( + self, op_name: Literal["agg", "apply"] + ) -> DataFrame | Series: + from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, + ) + + assert op_name in ["agg", "apply"] + + obj = self.obj + kwargs = {} + if op_name == "apply": + by_row = "_compat" if self.by_row else False + kwargs.update({"by_row": by_row}) + + if getattr(obj, "axis", 0) == 1: + raise NotImplementedError("axis other than 0 is not supported") + + selected_obj = obj._selected_obj + selection = obj._selection + + is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) + + # Numba Groupby engine/engine-kwargs passthrough + if is_groupby: + engine = self.kwargs.get("engine", None) + engine_kwargs = self.kwargs.get("engine_kwargs", None) + kwargs.update({"engine": engine, "engine_kwargs": engine_kwargs}) + + with com.temp_setattr( + obj, "as_index", True, condition=hasattr(obj, "as_index") + ): + result_index, result_data = self.compute_dict_like( + op_name, selected_obj, selection, kwargs + ) + result = self.wrap_results_dict_like(selected_obj, result_index, result_data) + return result + + +class ResamplerWindowApply(GroupByApply): + axis: AxisInt = 0 + obj: Resampler | BaseWindow + + def __init__( + self, + obj: Resampler | BaseWindow, + func: AggFuncType, + *, + args, + kwargs, + ) -> None: + super(GroupByApply, self).__init__( + obj, + func, + raw=False, + result_type=None, + args=args, + kwargs=kwargs, + ) + + def apply(self): + raise NotImplementedError + + def transform(self): + raise NotImplementedError + + +def reconstruct_func( + func: AggFuncType | None, **kwargs +) -> tuple[bool, AggFuncType, list[str] | None, npt.NDArray[np.intp] | None]: + """ + This is the internal function to reconstruct func given if there is relabeling + or not and also normalize the keyword to get new order of columns. + + If named aggregation is applied, `func` will be None, and kwargs contains the + column and aggregation function information to be parsed; + If named aggregation is not applied, `func` is either string (e.g. 'min') or + Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name + and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]}) + + If relabeling is True, will return relabeling, reconstructed func, column + names, and the reconstructed order of columns. + If relabeling is False, the columns and order will be None. + + Parameters + ---------- + func: agg function (e.g. 'min' or Callable) or list of agg functions + (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}). + **kwargs: dict, kwargs used in is_multi_agg_with_relabel and + normalize_keyword_aggregation function for relabelling + + Returns + ------- + relabelling: bool, if there is relabelling or not + func: normalized and mangled func + columns: list of column names + order: array of columns indices + + Examples + -------- + >>> reconstruct_func(None, **{"foo": ("col", "min")}) + (True, defaultdict(, {'col': ['min']}), ('foo',), array([0])) + + >>> reconstruct_func("min") + (False, 'min', None, None) + """ + relabeling = func is None and is_multi_agg_with_relabel(**kwargs) + columns: list[str] | None = None + order: npt.NDArray[np.intp] | None = None + + if not relabeling: + if isinstance(func, list) and len(func) > len(set(func)): + # GH 28426 will raise error if duplicated function names are used and + # there is no reassigned name + raise SpecificationError( + "Function names must be unique if there is no new column names " + "assigned" + ) + if func is None: + # nicer error message + raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") + + if relabeling: + func, columns, order = normalize_keyword_aggregation(kwargs) + assert func is not None + + return relabeling, func, columns, order + + +def is_multi_agg_with_relabel(**kwargs) -> bool: + """ + Check whether kwargs passed to .agg look like multi-agg with relabeling. + + Parameters + ---------- + **kwargs : dict + + Returns + ------- + bool + + Examples + -------- + >>> is_multi_agg_with_relabel(a="max") + False + >>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min")) + True + >>> is_multi_agg_with_relabel() + False + """ + return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and ( + len(kwargs) > 0 + ) + + +def normalize_keyword_aggregation( + kwargs: dict, +) -> tuple[dict, list[str], npt.NDArray[np.intp]]: + """ + Normalize user-provided "named aggregation" kwargs. + Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs + to the old Dict[str, List[scalar]]]. + + Parameters + ---------- + kwargs : dict + + Returns + ------- + aggspec : dict + The transformed kwargs. + columns : List[str] + The user-provided keys. + col_idx_order : List[int] + List of columns indices. + + Examples + -------- + >>> normalize_keyword_aggregation({"output": ("input", "sum")}) + (defaultdict(, {'input': ['sum']}), ('output',), array([0])) + """ + from pandas.core.indexes.base import Index + + # Normalize the aggregation functions as Mapping[column, List[func]], + # process normally, then fixup the names. + # TODO: aggspec type: typing.Dict[str, List[AggScalar]] + # May be hitting https://github.com/python/mypy/issues/5958 + # saying it doesn't have an attribute __name__ + aggspec: DefaultDict = defaultdict(list) + order = [] + columns, pairs = list(zip(*kwargs.items())) + + for column, aggfunc in pairs: + aggspec[column].append(aggfunc) + order.append((column, com.get_callable_name(aggfunc) or aggfunc)) + + # uniquify aggfunc name if duplicated in order list + uniquified_order = _make_unique_kwarg_list(order) + + # GH 25719, due to aggspec will change the order of assigned columns in aggregation + # uniquified_aggspec will store uniquified order list and will compare it with order + # based on index + aggspec_order = [ + (column, com.get_callable_name(aggfunc) or aggfunc) + for column, aggfuncs in aggspec.items() + for aggfunc in aggfuncs + ] + uniquified_aggspec = _make_unique_kwarg_list(aggspec_order) + + # get the new index of columns by comparison + col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) + return aggspec, columns, col_idx_order + + +def _make_unique_kwarg_list( + seq: Sequence[tuple[Any, Any]] +) -> Sequence[tuple[Any, Any]]: + """ + Uniquify aggfunc name of the pairs in the order list + + Examples: + -------- + >>> kwarg_list = [('a', ''), ('a', ''), ('b', '')] + >>> _make_unique_kwarg_list(kwarg_list) + [('a', '_0'), ('a', '_1'), ('b', '')] + """ + return [ + (pair[0], f"{pair[1]}_{seq[:i].count(pair)}") if seq.count(pair) > 1 else pair + for i, pair in enumerate(seq) + ] + + +def relabel_result( + result: DataFrame | Series, + func: dict[str, list[Callable | str]], + columns: Iterable[Hashable], + order: Iterable[int], +) -> dict[Hashable, Series]: + """ + Internal function to reorder result if relabelling is True for + dataframe.agg, and return the reordered result in dict. + + Parameters: + ---------- + result: Result from aggregation + func: Dict of (column name, funcs) + columns: New columns name for relabelling + order: New order for relabelling + + Examples + -------- + >>> from pandas.core.apply import relabel_result + >>> result = pd.DataFrame( + ... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}, + ... index=["max", "mean", "min"] + ... ) + >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]} + >>> columns = ("foo", "aab", "bar", "dat") + >>> order = [0, 1, 2, 3] + >>> result_in_dict = relabel_result(result, funcs, columns, order) + >>> pd.DataFrame(result_in_dict, index=columns) + A C B + foo 2.0 NaN NaN + aab NaN 6.0 NaN + bar NaN NaN 4.0 + dat NaN NaN 2.5 + """ + from pandas.core.indexes.base import Index + + reordered_indexes = [ + pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1]) + ] + reordered_result_in_dict: dict[Hashable, Series] = {} + idx = 0 + + reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1 + for col, fun in func.items(): + s = result[col].dropna() + + # In the `_aggregate`, the callable names are obtained and used in `result`, and + # these names are ordered alphabetically. e.g. + # C2 C1 + # 1 NaN + # amax NaN 4.0 + # max NaN 4.0 + # sum 18.0 6.0 + # Therefore, the order of functions for each column could be shuffled + # accordingly so need to get the callable name if it is not parsed names, and + # reorder the aggregated result for each column. + # e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is + # [sum, ], but in `result`, it will be [, sum], and we need to + # reorder so that aggregated values map to their functions regarding the order. + + # However there is only one column being used for aggregation, not need to + # reorder since the index is not sorted, and keep as is in `funcs`, e.g. + # A + # min 1.0 + # mean 1.5 + # mean 1.5 + if reorder_mask: + fun = [ + com.get_callable_name(f) if not isinstance(f, str) else f for f in fun + ] + col_idx_order = Index(s.index).get_indexer(fun) + s = s.iloc[col_idx_order] + + # assign the new user-provided "named aggregation" as index names, and reindex + # it based on the whole user-provided names. + s.index = reordered_indexes[idx : idx + len(fun)] + reordered_result_in_dict[col] = s.reindex(columns, copy=False) + idx = idx + len(fun) + return reordered_result_in_dict + + +def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series: + from pandas import DataFrame + + relabeling, func, columns, order = reconstruct_func(func, **kwargs) + + if relabeling: + # This is to keep the order to columns occurrence unchanged, and also + # keep the order of new columns occurrence unchanged + + # For the return values of reconstruct_func, if relabeling is + # False, columns and order will be None. + assert columns is not None + assert order is not None + + result_in_dict = relabel_result(result, func, columns, order) + result = DataFrame(result_in_dict, index=columns) + + return result + + +# TODO: Can't use, because mypy doesn't like us setting __name__ +# error: "partial[Any]" has no attribute "__name__" +# the type is: +# typing.Sequence[Callable[..., ScalarResult]] +# -> typing.Sequence[Callable[..., ScalarResult]]: + + +def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]: + """ + Possibly mangle a list of aggfuncs. + + Parameters + ---------- + aggfuncs : Sequence + + Returns + ------- + mangled: list-like + A new AggSpec sequence, where lambdas have been converted + to have unique names. + + Notes + ----- + If just one aggfunc is passed, the name will not be mangled. + """ + if len(aggfuncs) <= 1: + # don't mangle for .agg([lambda x: .]) + return aggfuncs + i = 0 + mangled_aggfuncs = [] + for aggfunc in aggfuncs: + if com.get_callable_name(aggfunc) == "": + aggfunc = partial(aggfunc) + aggfunc.__name__ = f"" + i += 1 + mangled_aggfuncs.append(aggfunc) + + return mangled_aggfuncs + + +def maybe_mangle_lambdas(agg_spec: Any) -> Any: + """ + Make new lambdas with unique names. + + Parameters + ---------- + agg_spec : Any + An argument to GroupBy.agg. + Non-dict-like `agg_spec` are pass through as is. + For dict-like `agg_spec` a new spec is returned + with name-mangled lambdas. + + Returns + ------- + mangled : Any + Same type as the input. + + Examples + -------- + >>> maybe_mangle_lambdas('sum') + 'sum' + >>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP + [, + .f(*args, **kwargs)>] + """ + is_dict = is_dict_like(agg_spec) + if not (is_dict or is_list_like(agg_spec)): + return agg_spec + mangled_aggspec = type(agg_spec)() # dict or OrderedDict + + if is_dict: + for key, aggfuncs in agg_spec.items(): + if is_list_like(aggfuncs) and not is_dict_like(aggfuncs): + mangled_aggfuncs = _managle_lambda_list(aggfuncs) + else: + mangled_aggfuncs = aggfuncs + + mangled_aggspec[key] = mangled_aggfuncs + else: + mangled_aggspec = _managle_lambda_list(agg_spec) + + return mangled_aggspec + + +def validate_func_kwargs( + kwargs: dict, +) -> tuple[list[str], list[str | Callable[..., Any]]]: + """ + Validates types of user-provided "named aggregation" kwargs. + `TypeError` is raised if aggfunc is not `str` or callable. + + Parameters + ---------- + kwargs : dict + + Returns + ------- + columns : List[str] + List of user-provided keys. + func : List[Union[str, callable[...,Any]]] + List of user-provided aggfuncs + + Examples + -------- + >>> validate_func_kwargs({'one': 'min', 'two': 'max'}) + (['one', 'two'], ['min', 'max']) + """ + tuple_given_message = "func is expected but received {} in **kwargs." + columns = list(kwargs) + func = [] + for col_func in kwargs.values(): + if not (isinstance(col_func, str) or callable(col_func)): + raise TypeError(tuple_given_message.format(type(col_func).__name__)) + func.append(col_func) + if not columns: + no_arg_message = "Must provide 'func' or named aggregation **kwargs." + raise TypeError(no_arg_message) + return columns, func + + +def include_axis(op_name: Literal["agg", "apply"], colg: Series | DataFrame) -> bool: + return isinstance(colg, ABCDataFrame) or ( + isinstance(colg, ABCSeries) and op_name == "agg" + ) + + +def warn_alias_replacement( + obj: AggObjType, + func: Callable, + alias: str, +) -> None: + if alias.startswith("np."): + full_alias = alias + else: + full_alias = f"{type(obj).__name__}.{alias}" + alias = f'"{alias}"' + warnings.warn( + f"The provided callable {func} is currently using " + f"{full_alias}. In a future version of pandas, " + f"the provided callable will be used directly. To keep current " + f"behavior pass the string {alias} instead.", + category=FutureWarning, + stacklevel=find_stack_level(), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/__init__.py new file mode 100644 index 00000000..a7655a01 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/__init__.py @@ -0,0 +1,9 @@ +""" +core.array_algos is for algorithms that operate on ndarray and ExtensionArray. +These should: + +- Assume that any Index, Series, or DataFrame objects have already been unwrapped. +- Assume that any list arguments have already been cast to ndarray/EA. +- Not depend on Index, Series, or DataFrame, nor import any of these. +- May dispatch to ExtensionArray methods, but should not import from core.arrays. +""" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/datetimelike_accumulations.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/datetimelike_accumulations.py new file mode 100644 index 00000000..825fe60e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/datetimelike_accumulations.py @@ -0,0 +1,67 @@ +""" +datetimelke_accumulations.py is for accumulations of datetimelike extension arrays +""" + +from __future__ import annotations + +from typing import Callable + +import numpy as np + +from pandas._libs import iNaT + +from pandas.core.dtypes.missing import isna + + +def _cum_func( + func: Callable, + values: np.ndarray, + *, + skipna: bool = True, +): + """ + Accumulations for 1D datetimelike arrays. + + Parameters + ---------- + func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). Values is changed is modified inplace. + skipna : bool, default True + Whether to skip NA. + """ + try: + fill_value = { + np.maximum.accumulate: np.iinfo(np.int64).min, + np.cumsum: 0, + np.minimum.accumulate: np.iinfo(np.int64).max, + }[func] + except KeyError: + raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray") + + mask = isna(values) + y = values.view("i8") + y[mask] = fill_value + + if not skipna: + mask = np.maximum.accumulate(mask) + + result = func(y) + result[mask] = iNaT + + if values.dtype.kind in "mM": + return result.view(values.dtype.base) + return result + + +def cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray: + return _cum_func(np.cumsum, values, skipna=skipna) + + +def cummin(values: np.ndarray, *, skipna: bool = True): + return _cum_func(np.minimum.accumulate, values, skipna=skipna) + + +def cummax(values: np.ndarray, *, skipna: bool = True): + return _cum_func(np.maximum.accumulate, values, skipna=skipna) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_accumulations.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_accumulations.py new file mode 100644 index 00000000..ad9e96d3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_accumulations.py @@ -0,0 +1,90 @@ +""" +masked_accumulations.py is for accumulation algorithms using a mask-based approach +for missing values. +""" + +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Callable, +) + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +def _cum_func( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, +): + """ + Accumulations for 1D masked array. + + We will modify values in place to replace NAs with the appropriate fill value. + + Parameters + ---------- + func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + """ + dtype_info: np.iinfo | np.finfo + if values.dtype.kind == "f": + dtype_info = np.finfo(values.dtype.type) + elif values.dtype.kind in "iu": + dtype_info = np.iinfo(values.dtype.type) + elif values.dtype.kind == "b": + # Max value of bool is 1, but since we are setting into a boolean + # array, 255 is fine as well. Min value has to be 0 when setting + # into the boolean array. + dtype_info = np.iinfo(np.uint8) + else: + raise NotImplementedError( + f"No masked accumulation defined for dtype {values.dtype.type}" + ) + try: + fill_value = { + np.cumprod: 1, + np.maximum.accumulate: dtype_info.min, + np.cumsum: 0, + np.minimum.accumulate: dtype_info.max, + }[func] + except KeyError: + raise NotImplementedError( + f"No accumulation for {func} implemented on BaseMaskedArray" + ) + + values[mask] = fill_value + + if not skipna: + mask = np.maximum.accumulate(mask) + + values = func(values) + return values, mask + + +def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumsum, values, mask, skipna=skipna) + + +def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumprod, values, mask, skipna=skipna) + + +def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna) + + +def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_reductions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_reductions.py new file mode 100644 index 00000000..335fa1af --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/masked_reductions.py @@ -0,0 +1,197 @@ +""" +masked_reductions.py is for reduction algorithms using a mask-based approach +for missing values. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Callable, +) +import warnings + +import numpy as np + +from pandas._libs import missing as libmissing + +from pandas.core.nanops import check_below_min_count + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + npt, + ) + + +def _reductions( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = None, + **kwargs, +): + """ + Sum, mean or product for 1D masked array. + + Parameters + ---------- + func : np.sum or np.prod + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray[bool] + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + min_count : int, default 0 + The required number of valid values to perform the operation. If fewer than + ``min_count`` non-NA values are present the result will be NA. + axis : int, optional, default None + """ + if not skipna: + if mask.any() or check_below_min_count(values.shape, None, min_count): + return libmissing.NA + else: + return func(values, axis=axis, **kwargs) + else: + if check_below_min_count(values.shape, mask, min_count) and ( + axis is None or values.ndim == 1 + ): + return libmissing.NA + + return func(values, where=~mask, axis=axis, **kwargs) + + +def sum( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = None, +): + return _reductions( + np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis + ) + + +def prod( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = None, +): + return _reductions( + np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis + ) + + +def _minmax( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + """ + Reduction for 1D masked array. + + Parameters + ---------- + func : np.min or np.max + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray[bool] + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + axis : int, optional, default None + """ + if not skipna: + if mask.any() or not values.size: + # min/max with empty array raise in numpy, pandas returns NA + return libmissing.NA + else: + return func(values, axis=axis) + else: + subset = values[~mask] + if subset.size: + return func(subset, axis=axis) + else: + # min/max with empty array raise in numpy, pandas returns NA + return libmissing.NA + + +def min( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis) + + +def max( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis) + + +def mean( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + if not values.size or mask.all(): + return libmissing.NA + return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis) + + +def var( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, + ddof: int = 1, +): + if not values.size or mask.all(): + return libmissing.NA + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + return _reductions( + np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof + ) + + +def std( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, + ddof: int = 1, +): + if not values.size or mask.all(): + return libmissing.NA + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + return _reductions( + np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/putmask.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/putmask.py new file mode 100644 index 00000000..f65d2d20 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/putmask.py @@ -0,0 +1,149 @@ +""" +EA-compatible analogue to np.putmask +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.cast import infer_dtype_from +from pandas.core.dtypes.common import is_list_like + +from pandas.core.arrays import ExtensionArray + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + npt, + ) + + from pandas import MultiIndex + + +def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None: + """ + ExtensionArray-compatible implementation of np.putmask. The main + difference is we do not handle repeating or truncating like numpy. + + Parameters + ---------- + values: np.ndarray or ExtensionArray + mask : np.ndarray[bool] + We assume extract_bool_array has already been called. + value : Any + """ + + if ( + not isinstance(values, np.ndarray) + or (values.dtype == object and not lib.is_scalar(value)) + # GH#43424: np.putmask raises TypeError if we cannot cast between types with + # rule = "safe", a stricter guarantee we may not have here + or ( + isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype) + ) + ): + # GH#19266 using np.putmask gives unexpected results with listlike value + # along with object dtype + if is_list_like(value) and len(value) == len(values): + values[mask] = value[mask] + else: + values[mask] = value + else: + # GH#37833 np.putmask is more performant than __setitem__ + np.putmask(values, mask, value) + + +def putmask_without_repeat( + values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any +) -> None: + """ + np.putmask will truncate or repeat if `new` is a listlike with + len(new) != len(values). We require an exact match. + + Parameters + ---------- + values : np.ndarray + mask : np.ndarray[bool] + new : Any + """ + if getattr(new, "ndim", 0) >= 1: + new = new.astype(values.dtype, copy=False) + + # TODO: this prob needs some better checking for 2D cases + nlocs = mask.sum() + if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1: + shape = np.shape(new) + # np.shape compat for if setitem_datetimelike_compat + # changed arraylike to list e.g. test_where_dt64_2d + if nlocs == shape[-1]: + # GH#30567 + # If length of ``new`` is less than the length of ``values``, + # `np.putmask` would first repeat the ``new`` array and then + # assign the masked values hence produces incorrect result. + # `np.place` on the other hand uses the ``new`` values at it is + # to place in the masked locations of ``values`` + np.place(values, mask, new) + # i.e. values[mask] = new + elif mask.shape[-1] == shape[-1] or shape[-1] == 1: + np.putmask(values, mask, new) + else: + raise ValueError("cannot assign mismatch length to masked array") + else: + np.putmask(values, mask, new) + + +def validate_putmask( + values: ArrayLike | MultiIndex, mask: np.ndarray +) -> tuple[npt.NDArray[np.bool_], bool]: + """ + Validate mask and check if this putmask operation is a no-op. + """ + mask = extract_bool_array(mask) + if mask.shape != values.shape: + raise ValueError("putmask: mask and data must be the same size") + + noop = not mask.any() + return mask, noop + + +def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]: + """ + If we have a SparseArray or BooleanArray, convert it to ndarray[bool]. + """ + if isinstance(mask, ExtensionArray): + # We could have BooleanArray, Sparse[bool], ... + # Except for BooleanArray, this is equivalent to just + # np.asarray(mask, dtype=bool) + mask = mask.to_numpy(dtype=bool, na_value=False) + + mask = np.asarray(mask, dtype=bool) + return mask + + +def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other): + """ + Parameters + ---------- + values : np.ndarray + num_set : int + For putmask, this is mask.sum() + other : Any + """ + if values.dtype == object: + dtype, _ = infer_dtype_from(other) + + if lib.is_np_dtype(dtype, "mM"): + # https://github.com/numpy/numpy/issues/12550 + # timedelta64 will incorrectly cast to int + if not is_list_like(other): + other = [other] * num_set + else: + other = list(other) + + return other diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/quantile.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/quantile.py new file mode 100644 index 00000000..ee6f00b2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/quantile.py @@ -0,0 +1,226 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Scalar, + npt, + ) + + +def quantile_compat( + values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str +) -> ArrayLike: + """ + Compute the quantiles of the given values for each quantile in `qs`. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + qs : np.ndarray[float64] + interpolation : str + + Returns + ------- + np.ndarray or ExtensionArray + """ + if isinstance(values, np.ndarray): + fill_value = na_value_for_dtype(values.dtype, compat=False) + mask = isna(values) + return quantile_with_mask(values, mask, fill_value, qs, interpolation) + else: + return values._quantile(qs, interpolation) + + +def quantile_with_mask( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + fill_value, + qs: npt.NDArray[np.float64], + interpolation: str, +) -> np.ndarray: + """ + Compute the quantiles of the given values for each quantile in `qs`. + + Parameters + ---------- + values : np.ndarray + For ExtensionArray, this is _values_for_factorize()[0] + mask : np.ndarray[bool] + mask = isna(values) + For ExtensionArray, this is computed before calling _value_for_factorize + fill_value : Scalar + The value to interpret fill NA entries with + For ExtensionArray, this is _values_for_factorize()[1] + qs : np.ndarray[float64] + interpolation : str + Type of interpolation + + Returns + ------- + np.ndarray + + Notes + ----- + Assumes values is already 2D. For ExtensionArray this means np.atleast_2d + has been called on _values_for_factorize()[0] + + Quantile is computed along axis=1. + """ + assert values.shape == mask.shape + if values.ndim == 1: + # unsqueeze, operate, re-squeeze + values = np.atleast_2d(values) + mask = np.atleast_2d(mask) + res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation) + return res_values[0] + + assert values.ndim == 2 + + is_empty = values.shape[1] == 0 + + if is_empty: + # create the array of na_values + # 2d len(values) * len(qs) + flat = np.array([fill_value] * len(qs)) + result = np.repeat(flat, len(values)).reshape(len(values), len(qs)) + else: + result = _nanpercentile( + values, + qs * 100.0, + na_value=fill_value, + mask=mask, + interpolation=interpolation, + ) + + result = np.array(result, copy=False) + result = result.T + + return result + + +def _nanpercentile_1d( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + qs: npt.NDArray[np.float64], + na_value: Scalar, + interpolation: str, +) -> Scalar | np.ndarray: + """ + Wrapper for np.percentile that skips missing values, specialized to + 1-dimensional case. + + Parameters + ---------- + values : array over which to find quantiles + mask : ndarray[bool] + locations in values that should be considered missing + qs : np.ndarray[float64] of quantile indices to find + na_value : scalar + value to return for empty or all-null values + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + # mask is Union[ExtensionArray, ndarray] + values = values[~mask] + + if len(values) == 0: + # Can't pass dtype=values.dtype here bc we might have na_value=np.nan + # with values.dtype=int64 see test_quantile_empty + # equiv: 'np.array([na_value] * len(qs))' but much faster + return np.full(len(qs), na_value) + + return np.percentile( + values, + qs, + # error: No overload variant of "percentile" matches argument + # types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]" + # , "Dict[str, str]" [call-overload] + method=interpolation, # type: ignore[call-overload] + ) + + +def _nanpercentile( + values: np.ndarray, + qs: npt.NDArray[np.float64], + *, + na_value, + mask: npt.NDArray[np.bool_], + interpolation: str, +): + """ + Wrapper for np.percentile that skips missing values. + + Parameters + ---------- + values : np.ndarray[ndim=2] over which to find quantiles + qs : np.ndarray[float64] of quantile indices to find + na_value : scalar + value to return for empty or all-null values + mask : np.ndarray[bool] + locations in values that should be considered missing + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + + if values.dtype.kind in "mM": + # need to cast to integer to avoid rounding errors in numpy + result = _nanpercentile( + values.view("i8"), + qs=qs, + na_value=na_value.view("i8"), + mask=mask, + interpolation=interpolation, + ) + + # Note: we have to do `astype` and not view because in general we + # have float result at this point, not i8 + return result.astype(values.dtype) + + if mask.any(): + # Caller is responsible for ensuring mask shape match + assert mask.shape == values.shape + result = [ + _nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation) + for (val, m) in zip(list(values), list(mask)) + ] + if values.dtype.kind == "f": + # preserve itemsize + result = np.array(result, dtype=values.dtype, copy=False).T + else: + result = np.array(result, copy=False).T + if ( + result.dtype != values.dtype + and not mask.all() + and (result == result.astype(values.dtype, copy=False)).all() + ): + # mask.all() will never get cast back to int + # e.g. values id integer dtype and result is floating dtype, + # only cast back to integer dtype if result values are all-integer. + result = result.astype(values.dtype, copy=False) + return result + else: + return np.percentile( + values, + qs, + axis=1, + # error: No overload variant of "percentile" matches argument types + # "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]", + # "int", "Dict[str, str]" [call-overload] + method=interpolation, # type: ignore[call-overload] + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/replace.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/replace.py new file mode 100644 index 00000000..5f377276 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/replace.py @@ -0,0 +1,152 @@ +""" +Methods used by Block.replace and related methods. +""" +from __future__ import annotations + +import operator +import re +from re import Pattern +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas.core.dtypes.common import ( + is_bool, + is_re, + is_re_compilable, +) +from pandas.core.dtypes.missing import isna + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Scalar, + npt, + ) + + +def should_use_regex(regex: bool, to_replace: Any) -> bool: + """ + Decide whether to treat `to_replace` as a regular expression. + """ + if is_re(to_replace): + regex = True + + regex = regex and is_re_compilable(to_replace) + + # Don't use regex if the pattern is empty. + regex = regex and re.compile(to_replace).pattern != "" + return regex + + +def compare_or_regex_search( + a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_] +) -> ArrayLike: + """ + Compare two array-like inputs of the same shape or two scalar values + + Calls operator.eq or re.search, depending on regex argument. If regex is + True, perform an element-wise regex matching. + + Parameters + ---------- + a : array-like + b : scalar or regex pattern + regex : bool + mask : np.ndarray[bool] + + Returns + ------- + mask : array-like of bool + """ + if isna(b): + return ~mask + + def _check_comparison_types( + result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern + ): + """ + Raises an error if the two arrays (a,b) cannot be compared. + Otherwise, returns the comparison result as expected. + """ + if is_bool(result) and isinstance(a, np.ndarray): + type_names = [type(a).__name__, type(b).__name__] + + type_names[0] = f"ndarray(dtype={a.dtype})" + + raise TypeError( + f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" + ) + + if not regex or not should_use_regex(regex, b): + # TODO: should use missing.mask_missing? + op = lambda x: operator.eq(x, b) + else: + op = np.vectorize( + lambda x: bool(re.search(b, x)) + if isinstance(x, str) and isinstance(b, (str, Pattern)) + else False + ) + + # GH#32621 use mask to avoid comparing to NAs + if isinstance(a, np.ndarray): + a = a[mask] + + result = op(a) + + if isinstance(result, np.ndarray) and mask is not None: + # The shape of the mask can differ to that of the result + # since we may compare only a subset of a's or b's elements + tmp = np.zeros(mask.shape, dtype=np.bool_) + np.place(tmp, mask, result) + result = tmp + + _check_comparison_types(result, a, b) + return result + + +def replace_regex( + values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None +) -> None: + """ + Parameters + ---------- + values : ArrayLike + Object dtype. + rx : re.Pattern + value : Any + mask : np.ndarray[bool], optional + + Notes + ----- + Alters values in-place. + """ + + # deal with replacing values with objects (strings) that match but + # whose replacement is not a string (numeric, nan, object) + if isna(value) or not isinstance(value, str): + + def re_replacer(s): + if is_re(rx) and isinstance(s, str): + return value if rx.search(s) is not None else s + else: + return s + + else: + # value is guaranteed to be a string here, s can be either a string + # or null if it's null it gets returned + def re_replacer(s): + if is_re(rx) and isinstance(s, str): + return rx.sub(value, s) + else: + return s + + f = np.vectorize(re_replacer, otypes=[np.object_]) + + if mask is None: + values[:] = f(values) + else: + values[mask] = f(values[mask]) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/take.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/take.py new file mode 100644 index 00000000..8ea70e26 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/take.py @@ -0,0 +1,595 @@ +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + cast, + overload, +) + +import numpy as np + +from pandas._libs import ( + algos as libalgos, + lib, +) + +from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, +) +from pandas.core.dtypes.missing import na_value_for_dtype + +from pandas.core.construction import ensure_wrapped_if_datetimelike + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + npt, + ) + + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + from pandas.core.arrays.base import ExtensionArray + + +@overload +def take_nd( + arr: np.ndarray, + indexer, + axis: AxisInt = ..., + fill_value=..., + allow_fill: bool = ..., +) -> np.ndarray: + ... + + +@overload +def take_nd( + arr: ExtensionArray, + indexer, + axis: AxisInt = ..., + fill_value=..., + allow_fill: bool = ..., +) -> ArrayLike: + ... + + +def take_nd( + arr: ArrayLike, + indexer, + axis: AxisInt = 0, + fill_value=lib.no_default, + allow_fill: bool = True, +) -> ArrayLike: + """ + Specialized Cython take which sets NaN values in one pass + + This dispatches to ``take`` defined on ExtensionArrays. It does not + currently dispatch to ``SparseArray.take`` for sparse ``arr``. + + Note: this function assumes that the indexer is a valid(ated) indexer with + no out of bound indices. + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + Input array. + indexer : ndarray + 1-D array of indices to take, subarrays corresponding to -1 value + indices are filed with fill_value + axis : int, default 0 + Axis to take from + fill_value : any, default np.nan + Fill value to replace -1 values with + allow_fill : bool, default True + If False, indexer is assumed to contain no -1 values so no filling + will be done. This short-circuits computation of a mask. Result is + undefined if allow_fill == False and -1 is present in indexer. + + Returns + ------- + subarray : np.ndarray or ExtensionArray + May be the same type as the input, or cast to an ndarray. + """ + if fill_value is lib.no_default: + fill_value = na_value_for_dtype(arr.dtype, compat=False) + elif lib.is_np_dtype(arr.dtype, "mM"): + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if arr.dtype != dtype: + # EA.take is strict about returning a new object of the same type + # so for that case cast upfront + arr = arr.astype(dtype) + + if not isinstance(arr, np.ndarray): + # i.e. ExtensionArray, + # includes for EA to catch DatetimeArray, TimedeltaArray + if not is_1d_only_ea_dtype(arr.dtype): + # i.e. DatetimeArray, TimedeltaArray + arr = cast("NDArrayBackedExtensionArray", arr) + return arr.take( + indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis + ) + + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + + arr = np.asarray(arr) + return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill) + + +def _take_nd_ndarray( + arr: np.ndarray, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value, + allow_fill: bool, +) -> np.ndarray: + if indexer is None: + indexer = np.arange(arr.shape[axis], dtype=np.intp) + dtype, fill_value = arr.dtype, arr.dtype.type() + else: + indexer = ensure_platform_int(indexer) + + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value, allow_fill + ) + + flip_order = False + if arr.ndim == 2 and arr.flags.f_contiguous: + flip_order = True + + if flip_order: + arr = arr.T + axis = arr.ndim - axis - 1 + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out_shape_ = list(arr.shape) + out_shape_[axis] = len(indexer) + out_shape = tuple(out_shape_) + if arr.flags.f_contiguous and axis == arr.ndim - 1: + # minor tweak that can make an order-of-magnitude difference + # for dataframes initialized directly from 2-d ndarrays + # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its + # f-contiguous transpose) + out = np.empty(out_shape, dtype=dtype, order="F") + else: + out = np.empty(out_shape, dtype=dtype) + + func = _get_take_nd_function( + arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info + ) + func(arr, indexer, out, fill_value) + + if flip_order: + out = out.T + return out + + +def take_1d( + arr: ArrayLike, + indexer: npt.NDArray[np.intp], + fill_value=None, + allow_fill: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> ArrayLike: + """ + Specialized version for 1D arrays. Differences compared to `take_nd`: + + - Assumes input array has already been converted to numpy array / EA + - Assumes indexer is already guaranteed to be intp dtype ndarray + - Only works for 1D arrays + + To ensure the lowest possible overhead. + + Note: similarly to `take_nd`, this function assumes that the indexer is + a valid(ated) indexer with no out of bound indices. + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + Input array. + indexer : ndarray + 1-D array of indices to take (validated indices, intp dtype). + fill_value : any, default np.nan + Fill value to replace -1 values with + allow_fill : bool, default True + If False, indexer is assumed to contain no -1 values so no filling + will be done. This short-circuits computation of a mask. Result is + undefined if allow_fill == False and -1 is present in indexer. + mask : np.ndarray, optional, default None + If `allow_fill` is True, and the mask (where indexer == -1) is already + known, it can be passed to avoid recomputation. + """ + if not isinstance(arr, np.ndarray): + # ExtensionArray -> dispatch to their method + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + + if not allow_fill: + return arr.take(indexer) + + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value, True, mask + ) + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out = np.empty(indexer.shape, dtype=dtype) + + func = _get_take_nd_function( + arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info + ) + func(arr, indexer, out, fill_value) + + return out + + +def take_2d_multi( + arr: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + fill_value=np.nan, +) -> np.ndarray: + """ + Specialized Cython take which sets NaN values in one pass. + """ + # This is only called from one place in DataFrame._reindex_multi, + # so we know indexer is well-behaved. + assert indexer is not None + assert indexer[0] is not None + assert indexer[1] is not None + + row_idx, col_idx = indexer + + row_idx = ensure_platform_int(row_idx) + col_idx = ensure_platform_int(col_idx) + indexer = row_idx, col_idx + mask_info = None + + # check for promotion based on types only (do this first because + # it's faster than computing a mask) + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if dtype != arr.dtype: + # check if promotion is actually required based on indexer + row_mask = row_idx == -1 + col_mask = col_idx == -1 + row_needs = row_mask.any() + col_needs = col_mask.any() + mask_info = (row_mask, col_mask), (row_needs, col_needs) + + if not (row_needs or col_needs): + # if not, then depromote, set fill_value to dummy + # (it won't be used but we don't want the cython code + # to crash when trying to cast it to dtype) + dtype, fill_value = arr.dtype, arr.dtype.type() + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out_shape = len(row_idx), len(col_idx) + out = np.empty(out_shape, dtype=dtype) + + func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) + if func is None and arr.dtype != out.dtype: + func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) + if func is not None: + func = _convert_wrapper(func, out.dtype) + + if func is not None: + func(arr, indexer, out=out, fill_value=fill_value) + else: + # test_reindex_multi + _take_2d_multi_object( + arr, indexer, out, fill_value=fill_value, mask_info=mask_info + ) + + return out + + +@functools.lru_cache +def _get_take_nd_function_cached( + ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt +): + """ + Part of _get_take_nd_function below that doesn't need `mask_info` and thus + can be cached (mask_info potentially contains a numpy ndarray which is not + hashable and thus cannot be used as argument for cached function). + """ + tup = (arr_dtype.name, out_dtype.name) + if ndim == 1: + func = _take_1d_dict.get(tup, None) + elif ndim == 2: + if axis == 0: + func = _take_2d_axis0_dict.get(tup, None) + else: + func = _take_2d_axis1_dict.get(tup, None) + if func is not None: + return func + + # We get here with string, uint, float16, and complex dtypes that could + # potentially be handled in algos_take_helper. + # Also a couple with (M8[ns], object) and (m8[ns], object) + tup = (out_dtype.name, out_dtype.name) + if ndim == 1: + func = _take_1d_dict.get(tup, None) + elif ndim == 2: + if axis == 0: + func = _take_2d_axis0_dict.get(tup, None) + else: + func = _take_2d_axis1_dict.get(tup, None) + if func is not None: + func = _convert_wrapper(func, out_dtype) + return func + + return None + + +def _get_take_nd_function( + ndim: int, + arr_dtype: np.dtype, + out_dtype: np.dtype, + axis: AxisInt = 0, + mask_info=None, +): + """ + Get the appropriate "take" implementation for the given dimension, axis + and dtypes. + """ + func = None + if ndim <= 2: + # for this part we don't need `mask_info` -> use the cached algo lookup + func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis) + + if func is None: + + def func(arr, indexer, out, fill_value=np.nan) -> None: + indexer = ensure_platform_int(indexer) + _take_nd_object( + arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info + ) + + return func + + +def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None): + def wrapper( + arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan + ) -> None: + if arr_dtype is not None: + arr = arr.view(arr_dtype) + if out_dtype is not None: + out = out.view(out_dtype) + if fill_wrap is not None: + # FIXME: if we get here with dt64/td64 we need to be sure we have + # matching resos + if fill_value.dtype.kind == "m": + fill_value = fill_value.astype("m8[ns]") + else: + fill_value = fill_value.astype("M8[ns]") + fill_value = fill_wrap(fill_value) + + f(arr, indexer, out, fill_value=fill_value) + + return wrapper + + +def _convert_wrapper(f, conv_dtype): + def wrapper( + arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan + ) -> None: + if conv_dtype == object: + # GH#39755 avoid casting dt64/td64 to integers + arr = ensure_wrapped_if_datetimelike(arr) + arr = arr.astype(conv_dtype) + f(arr, indexer, out, fill_value=fill_value) + + return wrapper + + +_take_1d_dict = { + ("int8", "int8"): libalgos.take_1d_int8_int8, + ("int8", "int32"): libalgos.take_1d_int8_int32, + ("int8", "int64"): libalgos.take_1d_int8_int64, + ("int8", "float64"): libalgos.take_1d_int8_float64, + ("int16", "int16"): libalgos.take_1d_int16_int16, + ("int16", "int32"): libalgos.take_1d_int16_int32, + ("int16", "int64"): libalgos.take_1d_int16_int64, + ("int16", "float64"): libalgos.take_1d_int16_float64, + ("int32", "int32"): libalgos.take_1d_int32_int32, + ("int32", "int64"): libalgos.take_1d_int32_int64, + ("int32", "float64"): libalgos.take_1d_int32_float64, + ("int64", "int64"): libalgos.take_1d_int64_int64, + ("int64", "float64"): libalgos.take_1d_int64_float64, + ("float32", "float32"): libalgos.take_1d_float32_float32, + ("float32", "float64"): libalgos.take_1d_float32_float64, + ("float64", "float64"): libalgos.take_1d_float64_float64, + ("object", "object"): libalgos.take_1d_object_object, + ("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8), + ("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 + ), +} + +_take_2d_axis0_dict = { + ("int8", "int8"): libalgos.take_2d_axis0_int8_int8, + ("int8", "int32"): libalgos.take_2d_axis0_int8_int32, + ("int8", "int64"): libalgos.take_2d_axis0_int8_int64, + ("int8", "float64"): libalgos.take_2d_axis0_int8_float64, + ("int16", "int16"): libalgos.take_2d_axis0_int16_int16, + ("int16", "int32"): libalgos.take_2d_axis0_int16_int32, + ("int16", "int64"): libalgos.take_2d_axis0_int16_int64, + ("int16", "float64"): libalgos.take_2d_axis0_int16_float64, + ("int32", "int32"): libalgos.take_2d_axis0_int32_int32, + ("int32", "int64"): libalgos.take_2d_axis0_int32_int64, + ("int32", "float64"): libalgos.take_2d_axis0_int32_float64, + ("int64", "int64"): libalgos.take_2d_axis0_int64_int64, + ("int64", "float64"): libalgos.take_2d_axis0_int64_float64, + ("float32", "float32"): libalgos.take_2d_axis0_float32_float32, + ("float32", "float64"): libalgos.take_2d_axis0_float32_float64, + ("float64", "float64"): libalgos.take_2d_axis0_float64_float64, + ("object", "object"): libalgos.take_2d_axis0_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_axis0_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + +_take_2d_axis1_dict = { + ("int8", "int8"): libalgos.take_2d_axis1_int8_int8, + ("int8", "int32"): libalgos.take_2d_axis1_int8_int32, + ("int8", "int64"): libalgos.take_2d_axis1_int8_int64, + ("int8", "float64"): libalgos.take_2d_axis1_int8_float64, + ("int16", "int16"): libalgos.take_2d_axis1_int16_int16, + ("int16", "int32"): libalgos.take_2d_axis1_int16_int32, + ("int16", "int64"): libalgos.take_2d_axis1_int16_int64, + ("int16", "float64"): libalgos.take_2d_axis1_int16_float64, + ("int32", "int32"): libalgos.take_2d_axis1_int32_int32, + ("int32", "int64"): libalgos.take_2d_axis1_int32_int64, + ("int32", "float64"): libalgos.take_2d_axis1_int32_float64, + ("int64", "int64"): libalgos.take_2d_axis1_int64_int64, + ("int64", "float64"): libalgos.take_2d_axis1_int64_float64, + ("float32", "float32"): libalgos.take_2d_axis1_float32_float32, + ("float32", "float64"): libalgos.take_2d_axis1_float32_float64, + ("float64", "float64"): libalgos.take_2d_axis1_float64_float64, + ("object", "object"): libalgos.take_2d_axis1_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_axis1_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + +_take_2d_multi_dict = { + ("int8", "int8"): libalgos.take_2d_multi_int8_int8, + ("int8", "int32"): libalgos.take_2d_multi_int8_int32, + ("int8", "int64"): libalgos.take_2d_multi_int8_int64, + ("int8", "float64"): libalgos.take_2d_multi_int8_float64, + ("int16", "int16"): libalgos.take_2d_multi_int16_int16, + ("int16", "int32"): libalgos.take_2d_multi_int16_int32, + ("int16", "int64"): libalgos.take_2d_multi_int16_int64, + ("int16", "float64"): libalgos.take_2d_multi_int16_float64, + ("int32", "int32"): libalgos.take_2d_multi_int32_int32, + ("int32", "int64"): libalgos.take_2d_multi_int32_int64, + ("int32", "float64"): libalgos.take_2d_multi_int32_float64, + ("int64", "int64"): libalgos.take_2d_multi_int64_int64, + ("int64", "float64"): libalgos.take_2d_multi_int64_float64, + ("float32", "float32"): libalgos.take_2d_multi_float32_float32, + ("float32", "float64"): libalgos.take_2d_multi_float32_float64, + ("float64", "float64"): libalgos.take_2d_multi_float64_float64, + ("object", "object"): libalgos.take_2d_multi_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_multi_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + + +def _take_nd_object( + arr: np.ndarray, + indexer: npt.NDArray[np.intp], + out: np.ndarray, + axis: AxisInt, + fill_value, + mask_info, +) -> None: + if mask_info is not None: + mask, needs_masking = mask_info + else: + mask = indexer == -1 + needs_masking = mask.any() + if arr.dtype != out.dtype: + arr = arr.astype(out.dtype) + if arr.shape[axis] > 0: + arr.take(indexer, axis=axis, out=out) + if needs_masking: + outindexer = [slice(None)] * arr.ndim + outindexer[axis] = mask + out[tuple(outindexer)] = fill_value + + +def _take_2d_multi_object( + arr: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value, + mask_info, +) -> None: + # this is not ideal, performance-wise, but it's better than raising + # an exception (best to optimize in Cython to avoid getting here) + row_idx, col_idx = indexer # both np.intp + if mask_info is not None: + (row_mask, col_mask), (row_needs, col_needs) = mask_info + else: + row_mask = row_idx == -1 + col_mask = col_idx == -1 + row_needs = row_mask.any() + col_needs = col_mask.any() + if fill_value is not None: + if row_needs: + out[row_mask, :] = fill_value + if col_needs: + out[:, col_mask] = fill_value + for i, u_ in enumerate(row_idx): + if u_ != -1: + for j, v in enumerate(col_idx): + if v != -1: + out[i, j] = arr[u_, v] + + +def _take_preprocess_indexer_and_fill_value( + arr: np.ndarray, + indexer: npt.NDArray[np.intp], + fill_value, + allow_fill: bool, + mask: npt.NDArray[np.bool_] | None = None, +): + mask_info: tuple[np.ndarray | None, bool] | None = None + + if not allow_fill: + dtype, fill_value = arr.dtype, arr.dtype.type() + mask_info = None, False + else: + # check for promotion based on types only (do this first because + # it's faster than computing a mask) + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if dtype != arr.dtype: + # check if promotion is actually required based on indexer + if mask is not None: + needs_masking = True + else: + mask = indexer == -1 + needs_masking = bool(mask.any()) + mask_info = mask, needs_masking + if not needs_masking: + # if not, then depromote, set fill_value to dummy + # (it won't be used but we don't want the cython code + # to crash when trying to cast it to dtype) + dtype, fill_value = arr.dtype, arr.dtype.type() + + return dtype, fill_value, mask_info diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/transforms.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/transforms.py new file mode 100644 index 00000000..ec672449 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/array_algos/transforms.py @@ -0,0 +1,50 @@ +""" +transforms.py is for shape-preserving functions. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + Scalar, + ) + + +def shift( + values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar +) -> np.ndarray: + new_values = values + + if periods == 0 or values.size == 0: + return new_values.copy() + + # make sure array sent to np.roll is c_contiguous + f_ordered = values.flags.f_contiguous + if f_ordered: + new_values = new_values.T + axis = new_values.ndim - axis - 1 + + if new_values.size: + new_values = np.roll( + new_values, + np.intp(periods), + axis=axis, + ) + + axis_indexer = [slice(None)] * values.ndim + if periods > 0: + axis_indexer[axis] = slice(None, periods) + else: + axis_indexer[axis] = slice(periods, None) + new_values[tuple(axis_indexer)] = fill_value + + # restore original order + if f_ordered: + new_values = new_values.T + + return new_values diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arraylike.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arraylike.py new file mode 100644 index 00000000..62f6737d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arraylike.py @@ -0,0 +1,527 @@ +""" +Methods that can be shared by many array-like classes or subclasses: + Series + Index + ExtensionArray +""" +from __future__ import annotations + +import operator +from typing import Any + +import numpy as np + +from pandas._libs import lib +from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op + +from pandas.core.dtypes.generic import ABCNDFrame + +from pandas.core import roperator +from pandas.core.construction import extract_array +from pandas.core.ops.common import unpack_zerodim_and_defer + +REDUCTION_ALIASES = { + "maximum": "max", + "minimum": "min", + "add": "sum", + "multiply": "prod", +} + + +class OpsMixin: + # ------------------------------------------------------------- + # Comparisons + + def _cmp_method(self, other, op): + return NotImplemented + + @unpack_zerodim_and_defer("__eq__") + def __eq__(self, other): + return self._cmp_method(other, operator.eq) + + @unpack_zerodim_and_defer("__ne__") + def __ne__(self, other): + return self._cmp_method(other, operator.ne) + + @unpack_zerodim_and_defer("__lt__") + def __lt__(self, other): + return self._cmp_method(other, operator.lt) + + @unpack_zerodim_and_defer("__le__") + def __le__(self, other): + return self._cmp_method(other, operator.le) + + @unpack_zerodim_and_defer("__gt__") + def __gt__(self, other): + return self._cmp_method(other, operator.gt) + + @unpack_zerodim_and_defer("__ge__") + def __ge__(self, other): + return self._cmp_method(other, operator.ge) + + # ------------------------------------------------------------- + # Logical Methods + + def _logical_method(self, other, op): + return NotImplemented + + @unpack_zerodim_and_defer("__and__") + def __and__(self, other): + return self._logical_method(other, operator.and_) + + @unpack_zerodim_and_defer("__rand__") + def __rand__(self, other): + return self._logical_method(other, roperator.rand_) + + @unpack_zerodim_and_defer("__or__") + def __or__(self, other): + return self._logical_method(other, operator.or_) + + @unpack_zerodim_and_defer("__ror__") + def __ror__(self, other): + return self._logical_method(other, roperator.ror_) + + @unpack_zerodim_and_defer("__xor__") + def __xor__(self, other): + return self._logical_method(other, operator.xor) + + @unpack_zerodim_and_defer("__rxor__") + def __rxor__(self, other): + return self._logical_method(other, roperator.rxor) + + # ------------------------------------------------------------- + # Arithmetic Methods + + def _arith_method(self, other, op): + return NotImplemented + + @unpack_zerodim_and_defer("__add__") + def __add__(self, other): + """ + Get Addition of DataFrame and other, column-wise. + + Equivalent to ``DataFrame.add(other)``. + + Parameters + ---------- + other : scalar, sequence, Series, dict or DataFrame + Object to be added to the DataFrame. + + Returns + ------- + DataFrame + The result of adding ``other`` to DataFrame. + + See Also + -------- + DataFrame.add : Add a DataFrame and another object, with option for index- + or column-oriented addition. + + Examples + -------- + >>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]}, + ... index=['elk', 'moose']) + >>> df + height weight + elk 1.5 500 + moose 2.6 800 + + Adding a scalar affects all rows and columns. + + >>> df[['height', 'weight']] + 1.5 + height weight + elk 3.0 501.5 + moose 4.1 801.5 + + Each element of a list is added to a column of the DataFrame, in order. + + >>> df[['height', 'weight']] + [0.5, 1.5] + height weight + elk 2.0 501.5 + moose 3.1 801.5 + + Keys of a dictionary are aligned to the DataFrame, based on column names; + each value in the dictionary is added to the corresponding column. + + >>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5} + height weight + elk 2.0 501.5 + moose 3.1 801.5 + + When `other` is a :class:`Series`, the index of `other` is aligned with the + columns of the DataFrame. + + >>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height']) + >>> df[['height', 'weight']] + s1 + height weight + elk 3.0 500.5 + moose 4.1 800.5 + + Even when the index of `other` is the same as the index of the DataFrame, + the :class:`Series` will not be reoriented. If index-wise alignment is desired, + :meth:`DataFrame.add` should be used with `axis='index'`. + + >>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose']) + >>> df[['height', 'weight']] + s2 + elk height moose weight + elk NaN NaN NaN NaN + moose NaN NaN NaN NaN + + >>> df[['height', 'weight']].add(s2, axis='index') + height weight + elk 2.0 500.5 + moose 4.1 801.5 + + When `other` is a :class:`DataFrame`, both columns names and the + index are aligned. + + >>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]}, + ... index=['elk', 'moose', 'deer']) + >>> df[['height', 'weight']] + other + height weight + deer NaN NaN + elk 1.7 NaN + moose 3.0 NaN + """ + return self._arith_method(other, operator.add) + + @unpack_zerodim_and_defer("__radd__") + def __radd__(self, other): + return self._arith_method(other, roperator.radd) + + @unpack_zerodim_and_defer("__sub__") + def __sub__(self, other): + return self._arith_method(other, operator.sub) + + @unpack_zerodim_and_defer("__rsub__") + def __rsub__(self, other): + return self._arith_method(other, roperator.rsub) + + @unpack_zerodim_and_defer("__mul__") + def __mul__(self, other): + return self._arith_method(other, operator.mul) + + @unpack_zerodim_and_defer("__rmul__") + def __rmul__(self, other): + return self._arith_method(other, roperator.rmul) + + @unpack_zerodim_and_defer("__truediv__") + def __truediv__(self, other): + return self._arith_method(other, operator.truediv) + + @unpack_zerodim_and_defer("__rtruediv__") + def __rtruediv__(self, other): + return self._arith_method(other, roperator.rtruediv) + + @unpack_zerodim_and_defer("__floordiv__") + def __floordiv__(self, other): + return self._arith_method(other, operator.floordiv) + + @unpack_zerodim_and_defer("__rfloordiv") + def __rfloordiv__(self, other): + return self._arith_method(other, roperator.rfloordiv) + + @unpack_zerodim_and_defer("__mod__") + def __mod__(self, other): + return self._arith_method(other, operator.mod) + + @unpack_zerodim_and_defer("__rmod__") + def __rmod__(self, other): + return self._arith_method(other, roperator.rmod) + + @unpack_zerodim_and_defer("__divmod__") + def __divmod__(self, other): + return self._arith_method(other, divmod) + + @unpack_zerodim_and_defer("__rdivmod__") + def __rdivmod__(self, other): + return self._arith_method(other, roperator.rdivmod) + + @unpack_zerodim_and_defer("__pow__") + def __pow__(self, other): + return self._arith_method(other, operator.pow) + + @unpack_zerodim_and_defer("__rpow__") + def __rpow__(self, other): + return self._arith_method(other, roperator.rpow) + + +# ----------------------------------------------------------------------------- +# Helpers to implement __array_ufunc__ + + +def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): + """ + Compatibility with numpy ufuncs. + + See also + -------- + numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__ + """ + from pandas.core.frame import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + from pandas.core.internals import BlockManager + + cls = type(self) + + kwargs = _standardize_out_kwarg(**kwargs) + + # for binary ops, use our custom dunder methods + result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) + if result is not NotImplemented: + return result + + # Determine if we should defer. + no_defer = ( + np.ndarray.__array_ufunc__, + cls.__array_ufunc__, + ) + + for item in inputs: + higher_priority = ( + hasattr(item, "__array_priority__") + and item.__array_priority__ > self.__array_priority__ + ) + has_array_ufunc = ( + hasattr(item, "__array_ufunc__") + and type(item).__array_ufunc__ not in no_defer + and not isinstance(item, self._HANDLED_TYPES) + ) + if higher_priority or has_array_ufunc: + return NotImplemented + + # align all the inputs. + types = tuple(type(x) for x in inputs) + alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)] + + if len(alignable) > 1: + # This triggers alignment. + # At the moment, there aren't any ufuncs with more than two inputs + # so this ends up just being x1.index | x2.index, but we write + # it to handle *args. + set_types = set(types) + if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types): + # We currently don't handle ufunc(DataFrame, Series) + # well. Previously this raised an internal ValueError. We might + # support it someday, so raise a NotImplementedError. + raise NotImplementedError( + f"Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs." + ) + axes = self.axes + for obj in alignable[1:]: + # this relies on the fact that we aren't handling mixed + # series / frame ufuncs. + for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)): + axes[i] = ax1.union(ax2) + + reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes)) + inputs = tuple( + x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x + for x, t in zip(inputs, types) + ) + else: + reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes)) + + if self.ndim == 1: + names = [getattr(x, "name") for x in inputs if hasattr(x, "name")] + name = names[0] if len(set(names)) == 1 else None + reconstruct_kwargs = {"name": name} + else: + reconstruct_kwargs = {} + + def reconstruct(result): + if ufunc.nout > 1: + # np.modf, np.frexp, np.divmod + return tuple(_reconstruct(x) for x in result) + + return _reconstruct(result) + + def _reconstruct(result): + if lib.is_scalar(result): + return result + + if result.ndim != self.ndim: + if method == "outer": + raise NotImplementedError + return result + if isinstance(result, BlockManager): + # we went through BlockManager.apply e.g. np.sqrt + result = self._constructor_from_mgr(result, axes=result.axes) + else: + # we converted an array, lost our axes + result = self._constructor( + result, **reconstruct_axes, **reconstruct_kwargs, copy=False + ) + # TODO: When we support multiple values in __finalize__, this + # should pass alignable to `__finalize__` instead of self. + # Then `np.add(a, b)` would consider attrs from both a and b + # when a and b are NDFrames. + if len(alignable) == 1: + result = result.__finalize__(self) + return result + + if "out" in kwargs: + # e.g. test_multiindex_get_loc + result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) + return reconstruct(result) + + if method == "reduce": + # e.g. test.series.test_ufunc.test_reduce + result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) + if result is not NotImplemented: + return result + + # We still get here with kwargs `axis` for e.g. np.maximum.accumulate + # and `dtype` and `keepdims` for np.ptp + + if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1): + # Just give up on preserving types in the complex case. + # In theory we could preserve them for them. + # * nout>1 is doable if BlockManager.apply took nout and + # returned a Tuple[BlockManager]. + # * len(inputs) > 1 is doable when we know that we have + # aligned blocks / dtypes. + + # e.g. my_ufunc, modf, logaddexp, heaviside, subtract, add + inputs = tuple(np.asarray(x) for x in inputs) + # Note: we can't use default_array_ufunc here bc reindexing means + # that `self` may not be among `inputs` + result = getattr(ufunc, method)(*inputs, **kwargs) + elif self.ndim == 1: + # ufunc(series, ...) + inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs) + result = getattr(ufunc, method)(*inputs, **kwargs) + else: + # ufunc(dataframe) + if method == "__call__" and not kwargs: + # for np.(..) calls + # kwargs cannot necessarily be handled block-by-block, so only + # take this path if there are no kwargs + mgr = inputs[0]._mgr + result = mgr.apply(getattr(ufunc, method)) + else: + # otherwise specific ufunc methods (eg np..accumulate(..)) + # Those can have an axis keyword and thus can't be called block-by-block + result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs) + # e.g. np.negative (only one reached), with "where" and "out" in kwargs + + result = reconstruct(result) + return result + + +def _standardize_out_kwarg(**kwargs) -> dict: + """ + If kwargs contain "out1" and "out2", replace that with a tuple "out" + + np.divmod, np.modf, np.frexp can have either `out=(out1, out2)` or + `out1=out1, out2=out2)` + """ + if "out" not in kwargs and "out1" in kwargs and "out2" in kwargs: + out1 = kwargs.pop("out1") + out2 = kwargs.pop("out2") + out = (out1, out2) + kwargs["out"] = out + return kwargs + + +def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + """ + If we have an `out` keyword, then call the ufunc without `out` and then + set the result into the given `out`. + """ + + # Note: we assume _standardize_out_kwarg has already been called. + out = kwargs.pop("out") + where = kwargs.pop("where", None) + + result = getattr(ufunc, method)(*inputs, **kwargs) + + if result is NotImplemented: + return NotImplemented + + if isinstance(result, tuple): + # i.e. np.divmod, np.modf, np.frexp + if not isinstance(out, tuple) or len(out) != len(result): + raise NotImplementedError + + for arr, res in zip(out, result): + _assign_where(arr, res, where) + + return out + + if isinstance(out, tuple): + if len(out) == 1: + out = out[0] + else: + raise NotImplementedError + + _assign_where(out, result, where) + return out + + +def _assign_where(out, result, where) -> None: + """ + Set a ufunc result into 'out', masking with a 'where' argument if necessary. + """ + if where is None: + # no 'where' arg passed to ufunc + out[:] = result + else: + np.putmask(out, where, result) + + +def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + """ + Fallback to the behavior we would get if we did not define __array_ufunc__. + + Notes + ----- + We are assuming that `self` is among `inputs`. + """ + if not any(x is self for x in inputs): + raise NotImplementedError + + new_inputs = [x if x is not self else np.asarray(x) for x in inputs] + + return getattr(ufunc, method)(*new_inputs, **kwargs) + + +def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + """ + Dispatch ufunc reductions to self's reduction methods. + """ + assert method == "reduce" + + if len(inputs) != 1 or inputs[0] is not self: + return NotImplemented + + if ufunc.__name__ not in REDUCTION_ALIASES: + return NotImplemented + + method_name = REDUCTION_ALIASES[ufunc.__name__] + + # NB: we are assuming that min/max represent minimum/maximum methods, + # which would not be accurate for e.g. Timestamp.min + if not hasattr(self, method_name): + return NotImplemented + + if self.ndim > 1: + if isinstance(self, ABCNDFrame): + # TODO: test cases where this doesn't hold, i.e. 2D DTA/TDA + kwargs["numeric_only"] = False + + if "axis" not in kwargs: + # For DataFrame reductions we don't want the default axis=0 + # Note: np.min is not a ufunc, but uses array_function_dispatch, + # so calls DataFrame.min (without ever getting here) with the np.min + # default of axis=None, which DataFrame.min catches and changes to axis=0. + # np.minimum.reduce(df) gets here bc axis is not in kwargs, + # so we set axis=0 to match the behaviorof np.minimum.reduce(df.values) + kwargs["axis"] = 0 + + # By default, numpy's reductions do not skip NaNs, so we have to + # pass skipna=False + return getattr(self, method_name)(skipna=False, **kwargs) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/__init__.py new file mode 100644 index 00000000..245a171f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/__init__.py @@ -0,0 +1,43 @@ +from pandas.core.arrays.arrow import ArrowExtensionArray +from pandas.core.arrays.base import ( + ExtensionArray, + ExtensionOpsMixin, + ExtensionScalarOpsMixin, +) +from pandas.core.arrays.boolean import BooleanArray +from pandas.core.arrays.categorical import Categorical +from pandas.core.arrays.datetimes import DatetimeArray +from pandas.core.arrays.floating import FloatingArray +from pandas.core.arrays.integer import IntegerArray +from pandas.core.arrays.interval import IntervalArray +from pandas.core.arrays.masked import BaseMaskedArray +from pandas.core.arrays.numpy_ import NumpyExtensionArray +from pandas.core.arrays.period import ( + PeriodArray, + period_array, +) +from pandas.core.arrays.sparse import SparseArray +from pandas.core.arrays.string_ import StringArray +from pandas.core.arrays.string_arrow import ArrowStringArray +from pandas.core.arrays.timedeltas import TimedeltaArray + +__all__ = [ + "ArrowExtensionArray", + "ExtensionArray", + "ExtensionOpsMixin", + "ExtensionScalarOpsMixin", + "ArrowStringArray", + "BaseMaskedArray", + "BooleanArray", + "Categorical", + "DatetimeArray", + "FloatingArray", + "IntegerArray", + "IntervalArray", + "NumpyExtensionArray", + "PeriodArray", + "period_array", + "SparseArray", + "StringArray", + "TimedeltaArray", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_arrow_string_mixins.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_arrow_string_mixins.py new file mode 100644 index 00000000..63db0334 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_arrow_string_mixins.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from typing import Literal + +import numpy as np + +from pandas.compat import pa_version_under7p0 + +if not pa_version_under7p0: + import pyarrow as pa + import pyarrow.compute as pc + + +class ArrowStringArrayMixin: + _pa_array = None + + def __init__(self, *args, **kwargs) -> None: + raise NotImplementedError + + def _str_pad( + self, + width: int, + side: Literal["left", "right", "both"] = "left", + fillchar: str = " ", + ): + if side == "left": + pa_pad = pc.utf8_lpad + elif side == "right": + pa_pad = pc.utf8_rpad + elif side == "both": + pa_pad = pc.utf8_center + else: + raise ValueError( + f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'" + ) + return type(self)(pa_pad(self._pa_array, width=width, padding=fillchar)) + + def _str_get(self, i: int): + lengths = pc.utf8_length(self._pa_array) + if i >= 0: + out_of_bounds = pc.greater_equal(i, lengths) + start = i + stop = i + 1 + step = 1 + else: + out_of_bounds = pc.greater(-i, lengths) + start = i + stop = i - 1 + step = -1 + not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True)) + selected = pc.utf8_slice_codeunits( + self._pa_array, start=start, stop=stop, step=step + ) + null_value = pa.scalar( + None, type=self._pa_array.type # type: ignore[attr-defined] + ) + result = pc.if_else(not_out_of_bounds, selected, null_value) + return type(self)(result) + + def _str_slice_replace( + self, start: int | None = None, stop: int | None = None, repl: str | None = None + ): + if repl is None: + repl = "" + if start is None: + start = 0 + if stop is None: + stop = np.iinfo(np.int64).max + return type(self)(pc.utf8_replace_slice(self._pa_array, start, stop, repl)) + + def _str_capitalize(self): + return type(self)(pc.utf8_capitalize(self._pa_array)) + + def _str_title(self): + return type(self)(pc.utf8_title(self._pa_array)) + + def _str_swapcase(self): + return type(self)(pc.utf8_swapcase(self._pa_array)) + + def _str_removesuffix(self, suffix: str): + ends_with = pc.ends_with(self._pa_array, pattern=suffix) + removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix)) + result = pc.if_else(ends_with, removed, self._pa_array) + return type(self)(result) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_mixins.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_mixins.py new file mode 100644 index 00000000..6d21f4a1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_mixins.py @@ -0,0 +1,528 @@ +from __future__ import annotations + +from functools import wraps +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, + overload, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.arrays import NDArrayBacked +from pandas._typing import ( + ArrayLike, + AxisInt, + Dtype, + F, + FillnaOptions, + PositionalIndexer2D, + PositionalIndexerTuple, + ScalarIndexer, + Self, + SequenceIndexer, + Shape, + TakeIndexer, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._validators import ( + validate_bool_kwarg, + validate_fillna_kwargs, + validate_insert_loc, +) + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import array_equivalent + +from pandas.core import missing +from pandas.core.algorithms import ( + take, + unique, + value_counts_internal as value_counts, +) +from pandas.core.array_algos.quantile import quantile_with_mask +from pandas.core.array_algos.transforms import shift +from pandas.core.arrays.base import ExtensionArray +from pandas.core.construction import extract_array +from pandas.core.indexers import check_array_indexer +from pandas.core.sorting import nargminmax + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + NumpySorter, + NumpyValueArrayLike, + ) + + from pandas import Series + + +def ravel_compat(meth: F) -> F: + """ + Decorator to ravel a 2D array before passing it to a cython operation, + then reshape the result to our own shape. + """ + + @wraps(meth) + def method(self, *args, **kwargs): + if self.ndim == 1: + return meth(self, *args, **kwargs) + + flags = self._ndarray.flags + flat = self.ravel("K") + result = meth(flat, *args, **kwargs) + order = "F" if flags.f_contiguous else "C" + return result.reshape(self.shape, order=order) + + return cast(F, method) + + +class NDArrayBackedExtensionArray(NDArrayBacked, ExtensionArray): + """ + ExtensionArray that is backed by a single NumPy ndarray. + """ + + _ndarray: np.ndarray + + # scalar used to denote NA value inside our self._ndarray, e.g. -1 + # for Categorical, iNaT for Period. Outside of object dtype, + # self.isna() should be exactly locations in self._ndarray with + # _internal_fill_value. + _internal_fill_value: Any + + def _box_func(self, x): + """ + Wrap numpy type in our dtype.type if necessary. + """ + return x + + def _validate_scalar(self, value): + # used by NDArrayBackedExtensionIndex.insert + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + + def view(self, dtype: Dtype | None = None) -> ArrayLike: + # We handle datetime64, datetime64tz, timedelta64, and period + # dtypes here. Everything else we pass through to the underlying + # ndarray. + if dtype is None or dtype is self.dtype: + return self._from_backing_data(self._ndarray) + + if isinstance(dtype, type): + # we sometimes pass non-dtype objects, e.g np.ndarray; + # pass those through to the underlying ndarray + return self._ndarray.view(dtype) + + dtype = pandas_dtype(dtype) + arr = self._ndarray + + if isinstance(dtype, (PeriodDtype, DatetimeTZDtype)): + cls = dtype.construct_array_type() + return cls(arr.view("i8"), dtype=dtype) + elif dtype == "M8[ns]": + from pandas.core.arrays import DatetimeArray + + return DatetimeArray(arr.view("i8"), dtype=dtype) + elif dtype == "m8[ns]": + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray(arr.view("i8"), dtype=dtype) + + # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible + # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None, + # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + return arr.view(dtype=dtype) # type: ignore[arg-type] + + def take( + self, + indices: TakeIndexer, + *, + allow_fill: bool = False, + fill_value: Any = None, + axis: AxisInt = 0, + ) -> Self: + if allow_fill: + fill_value = self._validate_scalar(fill_value) + + new_data = take( + self._ndarray, + indices, + allow_fill=allow_fill, + fill_value=fill_value, + axis=axis, + ) + return self._from_backing_data(new_data) + + # ------------------------------------------------------------------------ + + def equals(self, other) -> bool: + if type(self) is not type(other): + return False + if self.dtype != other.dtype: + return False + return bool(array_equivalent(self._ndarray, other._ndarray, dtype_equal=True)) + + @classmethod + def _from_factorized(cls, values, original): + assert values.dtype == original._ndarray.dtype + return original._from_backing_data(values) + + def _values_for_argsort(self) -> np.ndarray: + return self._ndarray + + def _values_for_factorize(self): + return self._ndarray, self._internal_fill_value + + def _hash_pandas_object( + self, *, encoding: str, hash_key: str, categorize: bool + ) -> npt.NDArray[np.uint64]: + from pandas.core.util.hashing import hash_array + + values = self._ndarray + return hash_array( + values, encoding=encoding, hash_key=hash_key, categorize=categorize + ) + + # Signature of "argmin" incompatible with supertype "ExtensionArray" + def argmin(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override] + # override base class by adding axis keyword + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmin", axis=axis) + + # Signature of "argmax" incompatible with supertype "ExtensionArray" + def argmax(self, axis: AxisInt = 0, skipna: bool = True): # type: ignore[override] + # override base class by adding axis keyword + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmax", axis=axis) + + def unique(self) -> Self: + new_data = unique(self._ndarray) + return self._from_backing_data(new_data) + + @classmethod + @doc(ExtensionArray._concat_same_type) + def _concat_same_type( + cls, + to_concat: Sequence[Self], + axis: AxisInt = 0, + ) -> Self: + if not lib.dtypes_all_equal([x.dtype for x in to_concat]): + dtypes = {str(x.dtype) for x in to_concat} + raise ValueError("to_concat must have the same dtype", dtypes) + + return super()._concat_same_type(to_concat, axis=axis) + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + npvalue = self._validate_setitem_value(value) + return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter) + + @doc(ExtensionArray.shift) + def shift(self, periods: int = 1, fill_value=None): + # NB: shift is always along axis=0 + axis = 0 + fill_value = self._validate_scalar(fill_value) + new_values = shift(self._ndarray, periods, axis, fill_value) + + return self._from_backing_data(new_values) + + def __setitem__(self, key, value) -> None: + key = check_array_indexer(self, key) + value = self._validate_setitem_value(value) + self._ndarray[key] = value + + def _validate_setitem_value(self, value): + return value + + @overload + def __getitem__(self, key: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__( + self, + key: SequenceIndexer | PositionalIndexerTuple, + ) -> Self: + ... + + def __getitem__( + self, + key: PositionalIndexer2D, + ) -> Self | Any: + if lib.is_integer(key): + # fast-path + result = self._ndarray[key] + if self.ndim == 1: + return self._box_func(result) + return self._from_backing_data(result) + + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Union[int, slice, ndarray]") + key = extract_array(key, extract_numpy=True) # type: ignore[assignment] + key = check_array_indexer(self, key) + result = self._ndarray[key] + if lib.is_scalar(result): + return self._box_func(result) + + result = self._from_backing_data(result) + return result + + def _fill_mask_inplace( + self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] + ) -> None: + # (for now) when self.ndim == 2, we assume axis=0 + func = missing.get_fill_func(method, ndim=self.ndim) + func(self._ndarray.T, limit=limit, mask=mask.T) + + def _pad_or_backfill( + self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True + ) -> Self: + mask = self.isna() + if mask.any(): + # (for now) when self.ndim == 2, we assume axis=0 + func = missing.get_fill_func(method, ndim=self.ndim) + + npvalues = self._ndarray.T + if copy: + npvalues = npvalues.copy() + func(npvalues, limit=limit, mask=mask.T) + npvalues = npvalues.T + + if copy: + new_values = self._from_backing_data(npvalues) + else: + new_values = self + + else: + if copy: + new_values = self.copy() + else: + new_values = self + return new_values + + @doc(ExtensionArray.fillna) + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + value, method = validate_fillna_kwargs( + value, method, validate_scalar_dict_value=False + ) + + mask = self.isna() + # error: Argument 2 to "check_value_size" has incompatible type + # "ExtensionArray"; expected "ndarray" + value = missing.check_value_size( + value, mask, len(self) # type: ignore[arg-type] + ) + + if mask.any(): + if method is not None: + # (for now) when self.ndim == 2, we assume axis=0 + func = missing.get_fill_func(method, ndim=self.ndim) + npvalues = self._ndarray.T + if copy: + npvalues = npvalues.copy() + func(npvalues, limit=limit, mask=mask.T) + npvalues = npvalues.T + + # TODO: NumpyExtensionArray didn't used to copy, need tests + # for this + new_values = self._from_backing_data(npvalues) + else: + # fill with value + if copy: + new_values = self.copy() + else: + new_values = self[:] + new_values[mask] = value + else: + # We validate the fill_value even if there is nothing to fill + if value is not None: + self._validate_setitem_value(value) + + if not copy: + new_values = self[:] + else: + new_values = self.copy() + return new_values + + # ------------------------------------------------------------------------ + # Reductions + + def _wrap_reduction_result(self, axis: AxisInt | None, result): + if axis is None or self.ndim == 1: + return self._box_func(result) + return self._from_backing_data(result) + + # ------------------------------------------------------------------------ + # __array_function__ methods + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + """ + Analogue to np.putmask(self, mask, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Raises + ------ + TypeError + If value cannot be cast to self.dtype. + """ + value = self._validate_setitem_value(value) + + np.putmask(self._ndarray, mask, value) + + def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self: + """ + Analogue to np.where(mask, self, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Raises + ------ + TypeError + If value cannot be cast to self.dtype. + """ + value = self._validate_setitem_value(value) + + res_values = np.where(mask, self._ndarray, value) + return self._from_backing_data(res_values) + + # ------------------------------------------------------------------------ + # Index compat methods + + def insert(self, loc: int, item) -> Self: + """ + Make new ExtensionArray inserting new item at location. Follows + Python list.append semantics for negative values. + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + type(self) + """ + loc = validate_insert_loc(loc, len(self)) + + code = self._validate_scalar(item) + + new_vals = np.concatenate( + ( + self._ndarray[:loc], + np.asarray([code], dtype=self._ndarray.dtype), + self._ndarray[loc:], + ) + ) + return self._from_backing_data(new_vals) + + # ------------------------------------------------------------------------ + # Additional array methods + # These are not part of the EA API, but we implement them because + # pandas assumes they're there. + + def value_counts(self, dropna: bool = True) -> Series: + """ + Return a Series containing counts of unique values. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NA values. + + Returns + ------- + Series + """ + if self.ndim != 1: + raise NotImplementedError + + from pandas import ( + Index, + Series, + ) + + if dropna: + # error: Unsupported operand type for ~ ("ExtensionArray") + values = self[~self.isna()]._ndarray # type: ignore[operator] + else: + values = self._ndarray + + result = value_counts(values, sort=False, dropna=dropna) + + index_arr = self._from_backing_data(np.asarray(result.index._data)) + index = Index(index_arr, name=result.index.name) + return Series(result._values, index=index, name=result.name, copy=False) + + def _quantile( + self, + qs: npt.NDArray[np.float64], + interpolation: str, + ) -> Self: + # TODO: disable for Categorical if not ordered? + + mask = np.asarray(self.isna()) + arr = self._ndarray + fill_value = self._internal_fill_value + + res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) + + res_values = self._cast_quantile_result(res_values) + return self._from_backing_data(res_values) + + # TODO: see if we can share this with other dispatch-wrapping methods + def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray: + """ + Cast the result of quantile_with_mask to an appropriate dtype + to pass to _from_backing_data in _quantile. + """ + return res_values + + # ------------------------------------------------------------------------ + # numpy-like methods + + @classmethod + def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self: + """ + Analogous to np.empty(shape, dtype=dtype) + + Parameters + ---------- + shape : tuple[int] + dtype : ExtensionDtype + """ + # The base implementation uses a naive approach to find the dtype + # for the backing ndarray + arr = cls._from_sequence([], dtype=dtype) + backing = np.empty(shape, dtype=arr._ndarray.dtype) + return arr._from_backing_data(backing) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_ranges.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_ranges.py new file mode 100644 index 00000000..5f040918 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/_ranges.py @@ -0,0 +1,209 @@ +""" +Helper functions to generate range-like data for DatetimeArray +(and possibly TimedeltaArray/PeriodArray) +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.lib import i8max +from pandas._libs.tslibs import ( + BaseOffset, + OutOfBoundsDatetime, + Timedelta, + Timestamp, + iNaT, +) + +if TYPE_CHECKING: + from pandas._typing import npt + + +def generate_regular_range( + start: Timestamp | Timedelta | None, + end: Timestamp | Timedelta | None, + periods: int | None, + freq: BaseOffset, + unit: str = "ns", +) -> npt.NDArray[np.intp]: + """ + Generate a range of dates or timestamps with the spans between dates + described by the given `freq` DateOffset. + + Parameters + ---------- + start : Timedelta, Timestamp or None + First point of produced date range. + end : Timedelta, Timestamp or None + Last point of produced date range. + periods : int or None + Number of periods in produced date range. + freq : Tick + Describes space between dates in produced date range. + unit : str, default "ns" + The resolution the output is meant to represent. + + Returns + ------- + ndarray[np.int64] + Representing the given resolution. + """ + istart = start._value if start is not None else None + iend = end._value if end is not None else None + freq.nanos # raises if non-fixed frequency + td = Timedelta(freq) + b: int | np.int64 | np.uint64 + e: int | np.int64 | np.uint64 + try: + td = td.as_unit( # pyright: ignore[reportGeneralTypeIssues] + unit, round_ok=False + ) + except ValueError as err: + raise ValueError( + f"freq={freq} is incompatible with unit={unit}. " + "Use a lower freq or a higher unit instead." + ) from err + stride = int(td._value) + + if periods is None and istart is not None and iend is not None: + b = istart + # cannot just use e = Timestamp(end) + 1 because arange breaks when + # stride is too large, see GH10887 + e = b + (iend - b) // stride * stride + stride // 2 + 1 + elif istart is not None and periods is not None: + b = istart + e = _generate_range_overflow_safe(b, periods, stride, side="start") + elif iend is not None and periods is not None: + e = iend + stride + b = _generate_range_overflow_safe(e, periods, stride, side="end") + else: + raise ValueError( + "at least 'start' or 'end' should be specified if a 'period' is given." + ) + + with np.errstate(over="raise"): + # If the range is sufficiently large, np.arange may overflow + # and incorrectly return an empty array if not caught. + try: + values = np.arange(b, e, stride, dtype=np.int64) + except FloatingPointError: + xdr = [b] + while xdr[-1] != e: + xdr.append(xdr[-1] + stride) + values = np.array(xdr[:-1], dtype=np.int64) + return values + + +def _generate_range_overflow_safe( + endpoint: int, periods: int, stride: int, side: str = "start" +) -> np.int64 | np.uint64: + """ + Calculate the second endpoint for passing to np.arange, checking + to avoid an integer overflow. Catch OverflowError and re-raise + as OutOfBoundsDatetime. + + Parameters + ---------- + endpoint : int + nanosecond timestamp of the known endpoint of the desired range + periods : int + number of periods in the desired range + stride : int + nanoseconds between periods in the desired range + side : {'start', 'end'} + which end of the range `endpoint` refers to + + Returns + ------- + other_end : np.int64 | np.uint64 + + Raises + ------ + OutOfBoundsDatetime + """ + # GH#14187 raise instead of incorrectly wrapping around + assert side in ["start", "end"] + + i64max = np.uint64(i8max) + msg = f"Cannot generate range with {side}={endpoint} and periods={periods}" + + with np.errstate(over="raise"): + # if periods * strides cannot be multiplied within the *uint64* bounds, + # we cannot salvage the operation by recursing, so raise + try: + addend = np.uint64(periods) * np.uint64(np.abs(stride)) + except FloatingPointError as err: + raise OutOfBoundsDatetime(msg) from err + + if np.abs(addend) <= i64max: + # relatively easy case without casting concerns + return _generate_range_overflow_safe_signed(endpoint, periods, stride, side) + + elif (endpoint > 0 and side == "start" and stride > 0) or ( + endpoint < 0 < stride and side == "end" + ): + # no chance of not-overflowing + raise OutOfBoundsDatetime(msg) + + elif side == "end" and endpoint - stride <= i64max < endpoint: + # in _generate_regular_range we added `stride` thereby overflowing + # the bounds. Adjust to fix this. + return _generate_range_overflow_safe( + endpoint - stride, periods - 1, stride, side + ) + + # split into smaller pieces + mid_periods = periods // 2 + remaining = periods - mid_periods + assert 0 < remaining < periods, (remaining, periods, endpoint, stride) + + midpoint = int(_generate_range_overflow_safe(endpoint, mid_periods, stride, side)) + return _generate_range_overflow_safe(midpoint, remaining, stride, side) + + +def _generate_range_overflow_safe_signed( + endpoint: int, periods: int, stride: int, side: str +) -> np.int64 | np.uint64: + """ + A special case for _generate_range_overflow_safe where `periods * stride` + can be calculated without overflowing int64 bounds. + """ + assert side in ["start", "end"] + if side == "end": + stride *= -1 + + with np.errstate(over="raise"): + addend = np.int64(periods) * np.int64(stride) + try: + # easy case with no overflows + result = np.int64(endpoint) + addend + if result == iNaT: + # Putting this into a DatetimeArray/TimedeltaArray + # would incorrectly be interpreted as NaT + raise OverflowError + return result + except (FloatingPointError, OverflowError): + # with endpoint negative and addend positive we risk + # FloatingPointError; with reversed signed we risk OverflowError + pass + + # if stride and endpoint had opposite signs, then endpoint + addend + # should never overflow. so they must have the same signs + assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0) + + if stride > 0: + # watch out for very special case in which we just slightly + # exceed implementation bounds, but when passing the result to + # np.arange will get a result slightly within the bounds + + uresult = np.uint64(endpoint) + np.uint64(addend) + i64max = np.uint64(i8max) + assert uresult > i64max + if uresult <= i64max + np.uint64(stride): + return uresult + + raise OutOfBoundsDatetime( + f"Cannot generate range with {side}={endpoint} and periods={periods}" + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/__init__.py new file mode 100644 index 00000000..58b268cb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/__init__.py @@ -0,0 +1,3 @@ +from pandas.core.arrays.arrow.array import ArrowExtensionArray + +__all__ = ["ArrowExtensionArray"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/_arrow_utils.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/_arrow_utils.py new file mode 100644 index 00000000..2a053fac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/_arrow_utils.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +import warnings + +import numpy as np +import pyarrow + +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level + + +def fallback_performancewarning(version: str | None = None) -> None: + """ + Raise a PerformanceWarning for falling back to ExtensionArray's + non-pyarrow method + """ + msg = "Falling back on a non-pyarrow code path which may decrease performance." + if version is not None: + msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning." + warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) + + +def pyarrow_array_to_numpy_and_mask( + arr, dtype: np.dtype +) -> tuple[np.ndarray, np.ndarray]: + """ + Convert a primitive pyarrow.Array to a numpy array and boolean mask based + on the buffers of the Array. + + At the moment pyarrow.BooleanArray is not supported. + + Parameters + ---------- + arr : pyarrow.Array + dtype : numpy.dtype + + Returns + ------- + (data, mask) + Tuple of two numpy arrays with the raw data (with specified dtype) and + a boolean mask (validity mask, so False means missing) + """ + dtype = np.dtype(dtype) + + if pyarrow.types.is_null(arr.type): + # No initialization of data is needed since everything is null + data = np.empty(len(arr), dtype=dtype) + mask = np.zeros(len(arr), dtype=bool) + return data, mask + buflist = arr.buffers() + # Since Arrow buffers might contain padding and the data might be offset, + # the buffer gets sliced here before handing it to numpy. + # See also https://github.com/pandas-dev/pandas/issues/40896 + offset = arr.offset * dtype.itemsize + length = len(arr) * dtype.itemsize + data_buf = buflist[1][offset : offset + length] + data = np.frombuffer(data_buf, dtype=dtype) + bitmask = buflist[0] + if bitmask is not None: + mask = pyarrow.BooleanArray.from_buffers( + pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset + ) + mask = np.asarray(mask) + else: + mask = np.ones(len(arr), dtype=bool) + return data, mask diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/array.py new file mode 100644 index 00000000..119f37ce --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/array.py @@ -0,0 +1,2631 @@ +from __future__ import annotations + +import functools +import operator +import re +import textwrap +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, +) +import unicodedata + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + Timedelta, + Timestamp, + timezones, +) +from pandas.compat import ( + pa_version_under7p0, + pa_version_under8p0, + pa_version_under9p0, + pa_version_under11p0, + pa_version_under13p0, +) +from pandas.util._decorators import doc +from pandas.util._validators import validate_fillna_kwargs + +from pandas.core.dtypes.cast import infer_dtype_from_scalar +from pandas.core.dtypes.common import ( + CategoricalDtype, + is_array_like, + is_bool_dtype, + is_integer, + is_list_like, + is_object_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + missing, + roperator, +) +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin +from pandas.core.arrays.base import ( + ExtensionArray, + ExtensionArraySupportsAnyAll, +) +from pandas.core.arrays.masked import BaseMaskedArray +from pandas.core.arrays.string_ import StringDtype +import pandas.core.common as com +from pandas.core.indexers import ( + check_array_indexer, + unpack_tuple_and_ellipses, + validate_indices, +) +from pandas.core.strings.base import BaseStringArrayMethods + +from pandas.io._util import _arrow_dtype_mapping +from pandas.tseries.frequencies import to_offset + +if not pa_version_under7p0: + import pyarrow as pa + import pyarrow.compute as pc + + from pandas.core.dtypes.dtypes import ArrowDtype + + ARROW_CMP_FUNCS = { + "eq": pc.equal, + "ne": pc.not_equal, + "lt": pc.less, + "gt": pc.greater, + "le": pc.less_equal, + "ge": pc.greater_equal, + } + + ARROW_LOGICAL_FUNCS = { + "and_": pc.and_kleene, + "rand_": lambda x, y: pc.and_kleene(y, x), + "or_": pc.or_kleene, + "ror_": lambda x, y: pc.or_kleene(y, x), + "xor": pc.xor, + "rxor": lambda x, y: pc.xor(y, x), + } + + ARROW_BIT_WISE_FUNCS = { + "and_": pc.bit_wise_and, + "rand_": lambda x, y: pc.bit_wise_and(y, x), + "or_": pc.bit_wise_or, + "ror_": lambda x, y: pc.bit_wise_or(y, x), + "xor": pc.bit_wise_xor, + "rxor": lambda x, y: pc.bit_wise_xor(y, x), + } + + def cast_for_truediv( + arrow_array: pa.ChunkedArray, pa_object: pa.Array | pa.Scalar + ) -> pa.ChunkedArray: + # Ensure int / int -> float mirroring Python/Numpy behavior + # as pc.divide_checked(int, int) -> int + if pa.types.is_integer(arrow_array.type) and pa.types.is_integer( + pa_object.type + ): + return arrow_array.cast(pa.float64()) + return arrow_array + + def floordiv_compat( + left: pa.ChunkedArray | pa.Array | pa.Scalar, + right: pa.ChunkedArray | pa.Array | pa.Scalar, + ) -> pa.ChunkedArray: + # Ensure int // int -> int mirroring Python/Numpy behavior + # as pc.floor(pc.divide_checked(int, int)) -> float + converted_left = cast_for_truediv(left, right) + result = pc.floor(pc.divide(converted_left, right)) + if pa.types.is_integer(left.type) and pa.types.is_integer(right.type): + result = result.cast(left.type) + return result + + ARROW_ARITHMETIC_FUNCS = { + "add": pc.add_checked, + "radd": lambda x, y: pc.add_checked(y, x), + "sub": pc.subtract_checked, + "rsub": lambda x, y: pc.subtract_checked(y, x), + "mul": pc.multiply_checked, + "rmul": lambda x, y: pc.multiply_checked(y, x), + "truediv": lambda x, y: pc.divide(cast_for_truediv(x, y), y), + "rtruediv": lambda x, y: pc.divide(y, cast_for_truediv(x, y)), + "floordiv": lambda x, y: floordiv_compat(x, y), + "rfloordiv": lambda x, y: floordiv_compat(y, x), + "mod": NotImplemented, + "rmod": NotImplemented, + "divmod": NotImplemented, + "rdivmod": NotImplemented, + "pow": pc.power_checked, + "rpow": lambda x, y: pc.power_checked(y, x), + } + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + Dtype, + FillnaOptions, + Iterator, + NpDtype, + NumpySorter, + NumpyValueArrayLike, + PositionalIndexer, + Scalar, + Self, + SortKind, + TakeIndexer, + TimeAmbiguous, + TimeNonexistent, + npt, + ) + + from pandas import Series + from pandas.core.arrays.datetimes import DatetimeArray + from pandas.core.arrays.timedeltas import TimedeltaArray + + +def get_unit_from_pa_dtype(pa_dtype): + # https://github.com/pandas-dev/pandas/pull/50998#discussion_r1100344804 + if pa_version_under11p0: + unit = str(pa_dtype).split("[", 1)[-1][:-1] + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError(pa_dtype) + return unit + return pa_dtype.unit + + +def to_pyarrow_type( + dtype: ArrowDtype | pa.DataType | Dtype | None, +) -> pa.DataType | None: + """ + Convert dtype to a pyarrow type instance. + """ + if isinstance(dtype, ArrowDtype): + return dtype.pyarrow_dtype + elif isinstance(dtype, pa.DataType): + return dtype + elif isinstance(dtype, DatetimeTZDtype): + return pa.timestamp(dtype.unit, dtype.tz) + elif dtype: + try: + # Accepts python types too + # Doesn't handle all numpy types + return pa.from_numpy_dtype(dtype) + except pa.ArrowNotImplementedError: + pass + return None + + +class ArrowExtensionArray( + OpsMixin, + ExtensionArraySupportsAnyAll, + ArrowStringArrayMixin, + BaseStringArrayMethods, +): + """ + Pandas ExtensionArray backed by a PyArrow ChunkedArray. + + .. warning:: + + ArrowExtensionArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : pyarrow.Array or pyarrow.ChunkedArray + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + ArrowExtensionArray + + Notes + ----- + Most methods are implemented using `pyarrow compute functions. `__ + Some methods may either raise an exception or raise a ``PerformanceWarning`` if an + associated compute function is not available based on the installed version of PyArrow. + + Please install the latest version of PyArrow to enable the best functionality and avoid + potential bugs in prior versions of PyArrow. + + Examples + -------- + Create an ArrowExtensionArray with :func:`pandas.array`: + + >>> pd.array([1, 1, None], dtype="int64[pyarrow]") + + [1, 1, ] + Length: 3, dtype: int64[pyarrow] + """ # noqa: E501 (http link too long) + + _pa_array: pa.ChunkedArray + _dtype: ArrowDtype + + def __init__(self, values: pa.Array | pa.ChunkedArray) -> None: + if pa_version_under7p0: + msg = "pyarrow>=7.0.0 is required for PyArrow backed ArrowExtensionArray." + raise ImportError(msg) + if isinstance(values, pa.Array): + self._pa_array = pa.chunked_array([values]) + elif isinstance(values, pa.ChunkedArray): + self._pa_array = values + else: + raise ValueError( + f"Unsupported type '{type(values)}' for ArrowExtensionArray" + ) + self._dtype = ArrowDtype(self._pa_array.type) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + """ + Construct a new ExtensionArray from a sequence of scalars. + """ + pa_type = to_pyarrow_type(dtype) + pa_array = cls._box_pa_array(scalars, pa_type=pa_type, copy=copy) + arr = cls(pa_array) + return arr + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ): + """ + Construct a new ExtensionArray from a sequence of strings. + """ + pa_type = to_pyarrow_type(dtype) + if ( + pa_type is None + or pa.types.is_binary(pa_type) + or pa.types.is_string(pa_type) + ): + # pa_type is None: Let pa.array infer + # pa_type is string/binary: scalars already correct type + scalars = strings + elif pa.types.is_timestamp(pa_type): + from pandas.core.tools.datetimes import to_datetime + + scalars = to_datetime(strings, errors="raise") + elif pa.types.is_date(pa_type): + from pandas.core.tools.datetimes import to_datetime + + scalars = to_datetime(strings, errors="raise").date + elif pa.types.is_duration(pa_type): + from pandas.core.tools.timedeltas import to_timedelta + + scalars = to_timedelta(strings, errors="raise") + if pa_type.unit != "ns": + # GH51175: test_from_sequence_of_strings_pa_array + # attempt to parse as int64 reflecting pyarrow's + # duration to string casting behavior + mask = isna(scalars) + if not isinstance(strings, (pa.Array, pa.ChunkedArray)): + strings = pa.array(strings, type=pa.string(), from_pandas=True) + strings = pc.if_else(mask, None, strings) + try: + scalars = strings.cast(pa.int64()) + except pa.ArrowInvalid: + pass + elif pa.types.is_time(pa_type): + from pandas.core.tools.times import to_time + + # "coerce" to allow "null times" (None) to not raise + scalars = to_time(strings, errors="coerce") + elif pa.types.is_boolean(pa_type): + # pyarrow string->bool casting is case-insensitive: + # "true" or "1" -> True + # "false" or "0" -> False + # Note: BooleanArray was previously used to parse these strings + # and allows "1.0" and "0.0". Pyarrow casting does not support + # this, but we allow it here. + if isinstance(strings, (pa.Array, pa.ChunkedArray)): + scalars = strings + else: + scalars = pa.array(strings, type=pa.string(), from_pandas=True) + scalars = pc.if_else(pc.equal(scalars, "1.0"), "1", scalars) + scalars = pc.if_else(pc.equal(scalars, "0.0"), "0", scalars) + scalars = scalars.cast(pa.bool_()) + elif ( + pa.types.is_integer(pa_type) + or pa.types.is_floating(pa_type) + or pa.types.is_decimal(pa_type) + ): + from pandas.core.tools.numeric import to_numeric + + scalars = to_numeric(strings, errors="raise") + else: + raise NotImplementedError( + f"Converting strings to {pa_type} is not implemented." + ) + return cls._from_sequence(scalars, dtype=pa_type, copy=copy) + + @classmethod + def _box_pa( + cls, value, pa_type: pa.DataType | None = None + ) -> pa.Array | pa.ChunkedArray | pa.Scalar: + """ + Box value into a pyarrow Array, ChunkedArray or Scalar. + + Parameters + ---------- + value : any + pa_type : pa.DataType | None + + Returns + ------- + pa.Array or pa.ChunkedArray or pa.Scalar + """ + if isinstance(value, pa.Scalar) or not is_list_like(value): + return cls._box_pa_scalar(value, pa_type) + return cls._box_pa_array(value, pa_type) + + @classmethod + def _box_pa_scalar(cls, value, pa_type: pa.DataType | None = None) -> pa.Scalar: + """ + Box value into a pyarrow Scalar. + + Parameters + ---------- + value : any + pa_type : pa.DataType | None + + Returns + ------- + pa.Scalar + """ + if isinstance(value, pa.Scalar): + pa_scalar = value + elif isna(value): + pa_scalar = pa.scalar(None, type=pa_type) + else: + # Workaround https://github.com/apache/arrow/issues/37291 + if isinstance(value, Timedelta): + if pa_type is None: + pa_type = pa.duration(value.unit) + elif value.unit != pa_type.unit: + value = value.as_unit(pa_type.unit) + value = value._value + elif isinstance(value, Timestamp): + if pa_type is None: + pa_type = pa.timestamp(value.unit, tz=value.tz) + elif value.unit != pa_type.unit: + value = value.as_unit(pa_type.unit) + value = value._value + + pa_scalar = pa.scalar(value, type=pa_type, from_pandas=True) + + if pa_type is not None and pa_scalar.type != pa_type: + pa_scalar = pa_scalar.cast(pa_type) + + return pa_scalar + + @classmethod + def _box_pa_array( + cls, value, pa_type: pa.DataType | None = None, copy: bool = False + ) -> pa.Array | pa.ChunkedArray: + """ + Box value into a pyarrow Array or ChunkedArray. + + Parameters + ---------- + value : Sequence + pa_type : pa.DataType | None + + Returns + ------- + pa.Array or pa.ChunkedArray + """ + if isinstance(value, cls): + pa_array = value._pa_array + elif isinstance(value, (pa.Array, pa.ChunkedArray)): + pa_array = value + elif isinstance(value, BaseMaskedArray): + # GH 52625 + if copy: + value = value.copy() + pa_array = value.__arrow_array__() + else: + if ( + isinstance(value, np.ndarray) + and pa_type is not None + and ( + pa.types.is_large_binary(pa_type) + or pa.types.is_large_string(pa_type) + ) + ): + # See https://github.com/apache/arrow/issues/35289 + value = value.tolist() + elif copy and is_array_like(value): + # pa array should not get updated when numpy array is updated + value = value.copy() + + if ( + pa_type is not None + and pa.types.is_duration(pa_type) + and (not isinstance(value, np.ndarray) or value.dtype.kind not in "mi") + ): + # Workaround https://github.com/apache/arrow/issues/37291 + from pandas.core.tools.timedeltas import to_timedelta + + value = to_timedelta(value, unit=pa_type.unit).as_unit(pa_type.unit) + value = value.to_numpy() + + try: + pa_array = pa.array(value, type=pa_type, from_pandas=True) + except (pa.ArrowInvalid, pa.ArrowTypeError): + # GH50430: let pyarrow infer type, then cast + pa_array = pa.array(value, from_pandas=True) + + if pa_type is None and pa.types.is_duration(pa_array.type): + # Workaround https://github.com/apache/arrow/issues/37291 + from pandas.core.tools.timedeltas import to_timedelta + + value = to_timedelta(value) + value = value.to_numpy() + pa_array = pa.array(value, type=pa_type, from_pandas=True) + + if pa.types.is_duration(pa_array.type) and pa_array.null_count > 0: + # GH52843: upstream bug for duration types when originally + # constructed with data containing numpy NaT. + # https://github.com/apache/arrow/issues/35088 + arr = cls(pa_array) + arr = arr.fillna(arr.dtype.na_value) + pa_array = arr._pa_array + + if pa_type is not None and pa_array.type != pa_type: + if pa.types.is_dictionary(pa_type): + pa_array = pa_array.dictionary_encode() + else: + pa_array = pa_array.cast(pa_type) + + return pa_array + + def __getitem__(self, item: PositionalIndexer): + """Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + item = check_array_indexer(self, item) + + if isinstance(item, np.ndarray): + if not len(item): + # Removable once we migrate StringDtype[pyarrow] to ArrowDtype[string] + if self._dtype.name == "string" and self._dtype.storage in ( + "pyarrow", + "pyarrow_numpy", + ): + pa_dtype = pa.string() + else: + pa_dtype = self._dtype.pyarrow_dtype + return type(self)(pa.chunked_array([], type=pa_dtype)) + elif item.dtype.kind in "iu": + return self.take(item) + elif item.dtype.kind == "b": + return type(self)(self._pa_array.filter(item)) + else: + raise IndexError( + "Only integers, slices and integer or " + "boolean arrays are valid indices." + ) + elif isinstance(item, tuple): + item = unpack_tuple_and_ellipses(item) + + if item is Ellipsis: + # TODO: should be handled by pyarrow? + item = slice(None) + + if is_scalar(item) and not is_integer(item): + # e.g. "foo" or 2.5 + # exception message copied from numpy + raise IndexError( + r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " + r"(`None`) and integer or boolean arrays are valid indices" + ) + # We are not an array indexer, so maybe e.g. a slice or integer + # indexer. We dispatch to pyarrow. + if isinstance(item, slice): + # Arrow bug https://github.com/apache/arrow/issues/38768 + if item.start == item.stop: + pass + elif ( + item.stop is not None + and item.stop < -len(self) + and item.step is not None + and item.step < 0 + ): + item = slice(item.start, None, item.step) + + value = self._pa_array[item] + if isinstance(value, pa.ChunkedArray): + return type(self)(value) + else: + pa_type = self._pa_array.type + scalar = value.as_py() + if scalar is None: + return self._dtype.na_value + elif pa.types.is_timestamp(pa_type) and pa_type.unit != "ns": + # GH 53326 + return Timestamp(scalar).as_unit(pa_type.unit) + elif pa.types.is_duration(pa_type) and pa_type.unit != "ns": + # GH 53326 + return Timedelta(scalar).as_unit(pa_type.unit) + else: + return scalar + + def __iter__(self) -> Iterator[Any]: + """ + Iterate over elements of the array. + """ + na_value = self._dtype.na_value + # GH 53326 + pa_type = self._pa_array.type + box_timestamp = pa.types.is_timestamp(pa_type) and pa_type.unit != "ns" + box_timedelta = pa.types.is_duration(pa_type) and pa_type.unit != "ns" + for value in self._pa_array: + val = value.as_py() + if val is None: + yield na_value + elif box_timestamp: + yield Timestamp(val).as_unit(pa_type.unit) + elif box_timedelta: + yield Timedelta(val).as_unit(pa_type.unit) + else: + yield val + + def __arrow_array__(self, type=None): + """Convert myself to a pyarrow ChunkedArray.""" + return self._pa_array + + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + """Correctly construct numpy arrays when passed to `np.asarray()`.""" + return self.to_numpy(dtype=dtype) + + def __invert__(self) -> Self: + # This is a bit wise op for integer types + if pa.types.is_integer(self._pa_array.type): + return type(self)(pc.bit_wise_not(self._pa_array)) + else: + return type(self)(pc.invert(self._pa_array)) + + def __neg__(self) -> Self: + return type(self)(pc.negate_checked(self._pa_array)) + + def __pos__(self) -> Self: + return type(self)(self._pa_array) + + def __abs__(self) -> Self: + return type(self)(pc.abs_checked(self._pa_array)) + + # GH 42600: __getstate__/__setstate__ not necessary once + # https://issues.apache.org/jira/browse/ARROW-10739 is addressed + def __getstate__(self): + state = self.__dict__.copy() + state["_pa_array"] = self._pa_array.combine_chunks() + return state + + def __setstate__(self, state) -> None: + if "_data" in state: + data = state.pop("_data") + else: + data = state["_pa_array"] + state["_pa_array"] = pa.chunked_array(data) + self.__dict__.update(state) + + def _cmp_method(self, other, op): + pc_func = ARROW_CMP_FUNCS[op.__name__] + if isinstance( + other, (ArrowExtensionArray, np.ndarray, list, BaseMaskedArray) + ) or isinstance(getattr(other, "dtype", None), CategoricalDtype): + result = pc_func(self._pa_array, self._box_pa(other)) + elif is_scalar(other): + try: + result = pc_func(self._pa_array, self._box_pa(other)) + except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid): + mask = isna(self) | isna(other) + valid = ~mask + result = np.zeros(len(self), dtype="bool") + result[valid] = op(np.array(self)[valid], other) + result = pa.array(result, type=pa.bool_()) + result = pc.if_else(valid, result, None) + else: + raise NotImplementedError( + f"{op.__name__} not implemented for {type(other)}" + ) + return ArrowExtensionArray(result) + + def _evaluate_op_method(self, other, op, arrow_funcs): + pa_type = self._pa_array.type + other = self._box_pa(other) + + if (pa.types.is_string(pa_type) or pa.types.is_binary(pa_type)) and op in [ + operator.add, + roperator.radd, + ]: + sep = pa.scalar("", type=pa_type) + if op is operator.add: + result = pc.binary_join_element_wise(self._pa_array, other, sep) + else: + result = pc.binary_join_element_wise(other, self._pa_array, sep) + return type(self)(result) + + if ( + isinstance(other, pa.Scalar) + and pc.is_null(other).as_py() + and op.__name__ in ARROW_LOGICAL_FUNCS + ): + # pyarrow kleene ops require null to be typed + other = other.cast(pa_type) + + pc_func = arrow_funcs[op.__name__] + if pc_func is NotImplemented: + raise NotImplementedError(f"{op.__name__} not implemented.") + + result = pc_func(self._pa_array, other) + return type(self)(result) + + def _logical_method(self, other, op): + # For integer types `^`, `|`, `&` are bitwise operators and return + # integer types. Otherwise these are boolean ops. + if pa.types.is_integer(self._pa_array.type): + return self._evaluate_op_method(other, op, ARROW_BIT_WISE_FUNCS) + else: + return self._evaluate_op_method(other, op, ARROW_LOGICAL_FUNCS) + + def _arith_method(self, other, op): + return self._evaluate_op_method(other, op, ARROW_ARITHMETIC_FUNCS) + + def equals(self, other) -> bool: + if not isinstance(other, ArrowExtensionArray): + return False + # I'm told that pyarrow makes __eq__ behave like pandas' equals; + # TODO: is this documented somewhere? + return self._pa_array == other._pa_array + + @property + def dtype(self) -> ArrowDtype: + """ + An instance of 'ExtensionDtype'. + """ + return self._dtype + + @property + def nbytes(self) -> int: + """ + The number of bytes needed to store this object in memory. + """ + return self._pa_array.nbytes + + def __len__(self) -> int: + """ + Length of this array. + + Returns + ------- + length : int + """ + return len(self._pa_array) + + def __contains__(self, key) -> bool: + # https://github.com/pandas-dev/pandas/pull/51307#issuecomment-1426372604 + if isna(key) and key is not self.dtype.na_value: + if self.dtype.kind == "f" and lib.is_float(key): + return pc.any(pc.is_nan(self._pa_array)).as_py() + + # e.g. date or timestamp types we do not allow None here to match pd.NA + return False + # TODO: maybe complex? object? + + return bool(super().__contains__(key)) + + @property + def _hasna(self) -> bool: + return self._pa_array.null_count > 0 + + def isna(self) -> npt.NDArray[np.bool_]: + """ + Boolean NumPy array indicating if each value is missing. + + This should return a 1-D array the same length as 'self'. + """ + # GH51630: fast paths + null_count = self._pa_array.null_count + if null_count == 0: + return np.zeros(len(self), dtype=np.bool_) + elif null_count == len(self): + return np.ones(len(self), dtype=np.bool_) + + return self._pa_array.is_null().to_numpy() + + def any(self, *, skipna: bool = True, **kwargs): + """ + Return whether any element is truthy. + + Returns False unless there is at least one element that is truthy. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be False, as for an empty array. + If `skipna` is False, the result will still be True if there is + at least one element that is truthy, otherwise NA will be returned + if there are NA's present. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + ArrowExtensionArray.all : Return whether all elements are truthy. + + Examples + -------- + The result indicates whether any element is truthy (and by default + skips NAs): + + >>> pd.array([True, False, True], dtype="boolean[pyarrow]").any() + True + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").any() + True + >>> pd.array([False, False, pd.NA], dtype="boolean[pyarrow]").any() + False + >>> pd.array([], dtype="boolean[pyarrow]").any() + False + >>> pd.array([pd.NA], dtype="boolean[pyarrow]").any() + False + >>> pd.array([pd.NA], dtype="float64[pyarrow]").any() + False + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + True + >>> pd.array([1, 0, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + True + >>> pd.array([False, False, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + + >>> pd.array([0, 0, pd.NA], dtype="boolean[pyarrow]").any(skipna=False) + + """ + return self._reduce("any", skipna=skipna, **kwargs) + + def all(self, *, skipna: bool = True, **kwargs): + """ + Return whether all elements are truthy. + + Returns True unless there is at least one element that is falsey. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be True, as for an empty array. + If `skipna` is False, the result will still be False if there is + at least one element that is falsey, otherwise NA will be returned + if there are NA's present. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + ArrowExtensionArray.any : Return whether any element is truthy. + + Examples + -------- + The result indicates whether all elements are truthy (and by default + skips NAs): + + >>> pd.array([True, True, pd.NA], dtype="boolean[pyarrow]").all() + True + >>> pd.array([1, 1, pd.NA], dtype="boolean[pyarrow]").all() + True + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").all() + False + >>> pd.array([], dtype="boolean[pyarrow]").all() + True + >>> pd.array([pd.NA], dtype="boolean[pyarrow]").all() + True + >>> pd.array([pd.NA], dtype="float64[pyarrow]").all() + True + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, True, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + + >>> pd.array([1, 1, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + + >>> pd.array([True, False, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + False + >>> pd.array([1, 0, pd.NA], dtype="boolean[pyarrow]").all(skipna=False) + False + """ + return self._reduce("all", skipna=skipna, **kwargs) + + def argsort( + self, + *, + ascending: bool = True, + kind: SortKind = "quicksort", + na_position: str = "last", + **kwargs, + ) -> np.ndarray: + order = "ascending" if ascending else "descending" + null_placement = {"last": "at_end", "first": "at_start"}.get(na_position, None) + if null_placement is None: + raise ValueError(f"invalid na_position: {na_position}") + + result = pc.array_sort_indices( + self._pa_array, order=order, null_placement=null_placement + ) + np_result = result.to_numpy() + return np_result.astype(np.intp, copy=False) + + def _argmin_max(self, skipna: bool, method: str) -> int: + if self._pa_array.length() in (0, self._pa_array.null_count) or ( + self._hasna and not skipna + ): + # For empty or all null, pyarrow returns -1 but pandas expects TypeError + # For skipna=False and data w/ null, pandas expects NotImplementedError + # let ExtensionArray.arg{max|min} raise + return getattr(super(), f"arg{method}")(skipna=skipna) + + data = self._pa_array + if pa.types.is_duration(data.type): + data = data.cast(pa.int64()) + + value = getattr(pc, method)(data, skip_nulls=skipna) + return pc.index(data, value).as_py() + + def argmin(self, skipna: bool = True) -> int: + return self._argmin_max(skipna, "min") + + def argmax(self, skipna: bool = True) -> int: + return self._argmin_max(skipna, "max") + + def copy(self) -> Self: + """ + Return a shallow copy of the array. + + Underlying ChunkedArray is immutable, so a deep copy is unnecessary. + + Returns + ------- + type(self) + """ + return type(self)(self._pa_array) + + def dropna(self) -> Self: + """ + Return ArrowExtensionArray without NA values. + + Returns + ------- + ArrowExtensionArray + """ + return type(self)(pc.drop_null(self._pa_array)) + + def _pad_or_backfill( + self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True + ) -> Self: + if not self._hasna: + # TODO(CoW): Not necessary anymore when CoW is the default + return self.copy() + + if limit is None: + method = missing.clean_fill_method(method) + try: + if method == "pad": + return type(self)(pc.fill_null_forward(self._pa_array)) + elif method == "backfill": + return type(self)(pc.fill_null_backward(self._pa_array)) + except pa.ArrowNotImplementedError: + # ArrowNotImplementedError: Function 'coalesce' has no kernel + # matching input types (duration[ns], duration[ns]) + # TODO: remove try/except wrapper if/when pyarrow implements + # a kernel for duration types. + pass + + # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove + # this method entirely. + return super()._pad_or_backfill(method=method, limit=limit, copy=copy) + + @doc(ExtensionArray.fillna) + def fillna( + self, + value: object | ArrayLike | None = None, + method: FillnaOptions | None = None, + limit: int | None = None, + copy: bool = True, + ) -> Self: + value, method = validate_fillna_kwargs(value, method) + + if not self._hasna: + # TODO(CoW): Not necessary anymore when CoW is the default + return self.copy() + + if limit is not None: + return super().fillna(value=value, method=method, limit=limit, copy=copy) + + if method is not None: + return super().fillna(method=method, limit=limit, copy=copy) + + if isinstance(value, (np.ndarray, ExtensionArray)): + # Similar to check_value_size, but we do not mask here since we may + # end up passing it to the super() method. + if len(value) != len(self): + raise ValueError( + f"Length of 'value' does not match. Got ({len(value)}) " + f" expected {len(self)}" + ) + + try: + fill_value = self._box_pa(value, pa_type=self._pa_array.type) + except pa.ArrowTypeError as err: + msg = f"Invalid value '{str(value)}' for dtype {self.dtype}" + raise TypeError(msg) from err + + try: + return type(self)(pc.fill_null(self._pa_array, fill_value=fill_value)) + except pa.ArrowNotImplementedError: + # ArrowNotImplementedError: Function 'coalesce' has no kernel + # matching input types (duration[ns], duration[ns]) + # TODO: remove try/except wrapper if/when pyarrow implements + # a kernel for duration types. + pass + + return super().fillna(value=value, method=method, limit=limit, copy=copy) + + def isin(self, values) -> npt.NDArray[np.bool_]: + # short-circuit to return all False array. + if not len(values): + return np.zeros(len(self), dtype=bool) + + result = pc.is_in(self._pa_array, value_set=pa.array(values, from_pandas=True)) + # pyarrow 2.0.0 returned nulls, so we explicitly specify dtype to convert nulls + # to False + return np.array(result, dtype=np.bool_) + + def _values_for_factorize(self) -> tuple[np.ndarray, Any]: + """ + Return an array and missing value suitable for factorization. + + Returns + ------- + values : ndarray + na_value : pd.NA + + Notes + ----- + The values returned by this method are also used in + :func:`pandas.util.hash_pandas_object`. + """ + values = self._pa_array.to_numpy() + return values, self.dtype.na_value + + @doc(ExtensionArray.factorize) + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, ExtensionArray]: + null_encoding = "mask" if use_na_sentinel else "encode" + + data = self._pa_array + pa_type = data.type + if pa_version_under11p0 and pa.types.is_duration(pa_type): + # https://github.com/apache/arrow/issues/15226#issuecomment-1376578323 + data = data.cast(pa.int64()) + + if pa.types.is_dictionary(data.type): + encoded = data + else: + encoded = data.dictionary_encode(null_encoding=null_encoding) + if encoded.length() == 0: + indices = np.array([], dtype=np.intp) + uniques = type(self)(pa.chunked_array([], type=encoded.type.value_type)) + else: + # GH 54844 + combined = encoded.combine_chunks() + pa_indices = combined.indices + if pa_indices.null_count > 0: + pa_indices = pc.fill_null(pa_indices, -1) + indices = pa_indices.to_numpy(zero_copy_only=False, writable=True).astype( + np.intp, copy=False + ) + uniques = type(self)(combined.dictionary) + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + uniques = cast(ArrowExtensionArray, uniques.astype(self.dtype)) + return indices, uniques + + def reshape(self, *args, **kwargs): + raise NotImplementedError( + f"{type(self)} does not support reshape " + f"as backed by a 1D pyarrow.ChunkedArray." + ) + + def round(self, decimals: int = 0, *args, **kwargs) -> Self: + """ + Round each value in the array a to the given number of decimals. + + Parameters + ---------- + decimals : int, default 0 + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + *args, **kwargs + Additional arguments and keywords have no effect. + + Returns + ------- + ArrowExtensionArray + Rounded values of the ArrowExtensionArray. + + See Also + -------- + DataFrame.round : Round values of a DataFrame. + Series.round : Round values of a Series. + """ + return type(self)(pc.round(self._pa_array, ndigits=decimals)) + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if self._hasna: + raise ValueError( + "searchsorted requires array to be sorted, which is impossible " + "with NAs present." + ) + if isinstance(value, ExtensionArray): + value = value.astype(object) + # Base class searchsorted would cast to object, which is *much* slower. + return self.to_numpy().searchsorted(value, side=side, sorter=sorter) + + def take( + self, + indices: TakeIndexer, + allow_fill: bool = False, + fill_value: Any = None, + ) -> ArrowExtensionArray: + """ + Take elements from an array. + + Parameters + ---------- + indices : sequence of int or one-dimensional np.ndarray of int + Indices to be taken. + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : any, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + Returns + ------- + ExtensionArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + + See Also + -------- + numpy.take + api.extensions.take + + Notes + ----- + ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, + ``iloc``, when `indices` is a sequence of values. Additionally, + it's called by :meth:`Series.reindex`, or any other method + that causes realignment, with a `fill_value`. + """ + indices_array = np.asanyarray(indices) + + if len(self._pa_array) == 0 and (indices_array >= 0).any(): + raise IndexError("cannot do a non-empty take") + if indices_array.size > 0 and indices_array.max() >= len(self._pa_array): + raise IndexError("out of bounds value in 'indices'.") + + if allow_fill: + fill_mask = indices_array < 0 + if fill_mask.any(): + validate_indices(indices_array, len(self._pa_array)) + # TODO(ARROW-9433): Treat negative indices as NULL + indices_array = pa.array(indices_array, mask=fill_mask) + result = self._pa_array.take(indices_array) + if isna(fill_value): + return type(self)(result) + # TODO: ArrowNotImplementedError: Function fill_null has no + # kernel matching input types (array[string], scalar[string]) + result = type(self)(result) + result[fill_mask] = fill_value + return result + # return type(self)(pc.fill_null(result, pa.scalar(fill_value))) + else: + # Nothing to fill + return type(self)(self._pa_array.take(indices)) + else: # allow_fill=False + # TODO(ARROW-9432): Treat negative indices as indices from the right. + if (indices_array < 0).any(): + # Don't modify in-place + indices_array = np.copy(indices_array) + indices_array[indices_array < 0] += len(self._pa_array) + return type(self)(self._pa_array.take(indices_array)) + + def _maybe_convert_datelike_array(self): + """Maybe convert to a datelike array.""" + pa_type = self._pa_array.type + if pa.types.is_timestamp(pa_type): + return self._to_datetimearray() + elif pa.types.is_duration(pa_type): + return self._to_timedeltaarray() + return self + + def _to_datetimearray(self) -> DatetimeArray: + """Convert a pyarrow timestamp typed array to a DatetimeArray.""" + from pandas.core.arrays.datetimes import ( + DatetimeArray, + tz_to_dtype, + ) + + pa_type = self._pa_array.type + assert pa.types.is_timestamp(pa_type) + np_dtype = np.dtype(f"M8[{pa_type.unit}]") + dtype = tz_to_dtype(pa_type.tz, pa_type.unit) + np_array = self._pa_array.to_numpy() + np_array = np_array.astype(np_dtype) + return DatetimeArray._simple_new(np_array, dtype=dtype) + + def _to_timedeltaarray(self) -> TimedeltaArray: + """Convert a pyarrow duration typed array to a TimedeltaArray.""" + from pandas.core.arrays.timedeltas import TimedeltaArray + + pa_type = self._pa_array.type + assert pa.types.is_duration(pa_type) + np_dtype = np.dtype(f"m8[{pa_type.unit}]") + np_array = self._pa_array.to_numpy() + np_array = np_array.astype(np_dtype) + return TimedeltaArray._simple_new(np_array, dtype=np_dtype) + + @doc(ExtensionArray.to_numpy) + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + if dtype is not None: + dtype = np.dtype(dtype) + elif self._hasna: + dtype = np.dtype(object) + + if na_value is lib.no_default: + na_value = self.dtype.na_value + + pa_type = self._pa_array.type + if pa.types.is_timestamp(pa_type) or pa.types.is_duration(pa_type): + result = self._maybe_convert_datelike_array() + if dtype is None or dtype.kind == "O": + result = result.to_numpy(dtype=object, na_value=na_value) + else: + result = result.to_numpy(dtype=dtype) + return result + elif pa.types.is_time(pa_type) or pa.types.is_date(pa_type): + # convert to list of python datetime.time objects before + # wrapping in ndarray + result = np.array(list(self), dtype=dtype) + elif is_object_dtype(dtype) and self._hasna: + result = np.empty(len(self), dtype=object) + mask = ~self.isna() + result[mask] = np.asarray(self[mask]._pa_array) + elif pa.types.is_null(self._pa_array.type): + fill_value = None if isna(na_value) else na_value + return np.full(len(self), fill_value=fill_value, dtype=dtype) + elif self._hasna: + data = self.fillna(na_value) + result = data._pa_array.to_numpy() + if dtype is not None: + result = result.astype(dtype, copy=False) + return result + else: + result = self._pa_array.to_numpy() + if dtype is not None: + result = result.astype(dtype, copy=False) + if copy: + result = result.copy() + return result + if self._hasna: + result[self.isna()] = na_value + return result + + def unique(self) -> Self: + """ + Compute the ArrowExtensionArray of unique values. + + Returns + ------- + ArrowExtensionArray + """ + pa_type = self._pa_array.type + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + # https://github.com/apache/arrow/issues/15226#issuecomment-1376578323 + data = self._pa_array.cast(pa.int64()) + else: + data = self._pa_array + + pa_result = pc.unique(data) + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + pa_result = pa_result.cast(pa_type) + + return type(self)(pa_result) + + def value_counts(self, dropna: bool = True) -> Series: + """ + Return a Series containing counts of each unique value. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of missing values. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + pa_type = self._pa_array.type + if pa_version_under11p0 and pa.types.is_duration(pa_type): + # https://github.com/apache/arrow/issues/15226#issuecomment-1376578323 + data = self._pa_array.cast(pa.int64()) + else: + data = self._pa_array + + from pandas import ( + Index, + Series, + ) + + vc = data.value_counts() + + values = vc.field(0) + counts = vc.field(1) + if dropna and data.null_count > 0: + mask = values.is_valid() + values = values.filter(mask) + counts = counts.filter(mask) + + if pa_version_under11p0 and pa.types.is_duration(pa_type): + values = values.cast(pa_type) + + counts = ArrowExtensionArray(counts) + + index = Index(type(self)(values)) + + return Series(counts, index=index, name="count", copy=False) + + @classmethod + def _concat_same_type(cls, to_concat) -> Self: + """ + Concatenate multiple ArrowExtensionArrays. + + Parameters + ---------- + to_concat : sequence of ArrowExtensionArrays + + Returns + ------- + ArrowExtensionArray + """ + chunks = [array for ea in to_concat for array in ea._pa_array.iterchunks()] + if to_concat[0].dtype == "string": + # StringDtype has no attribute pyarrow_dtype + pa_dtype = pa.string() + else: + pa_dtype = to_concat[0].dtype.pyarrow_dtype + arr = pa.chunked_array(chunks, type=pa_dtype) + return cls(arr) + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> ArrowExtensionArray | ExtensionArray: + """ + Return an ExtensionArray performing an accumulation operation. + + The underlying data type might change. + + Parameters + ---------- + name : str + Name of the function, supported values are: + - cummin + - cummax + - cumsum + - cumprod + skipna : bool, default True + If True, skip NA values. + **kwargs + Additional keyword arguments passed to the accumulation function. + Currently, there is no supported kwarg. + + Returns + ------- + array + + Raises + ------ + NotImplementedError : subclass does not define accumulations + """ + pyarrow_name = { + "cummax": "cumulative_max", + "cummin": "cumulative_min", + "cumprod": "cumulative_prod_checked", + "cumsum": "cumulative_sum_checked", + }.get(name, name) + pyarrow_meth = getattr(pc, pyarrow_name, None) + if pyarrow_meth is None: + return super()._accumulate(name, skipna=skipna, **kwargs) + + data_to_accum = self._pa_array + + pa_dtype = data_to_accum.type + + convert_to_int = ( + pa.types.is_temporal(pa_dtype) and name in ["cummax", "cummin"] + ) or (pa.types.is_duration(pa_dtype) and name == "cumsum") + + if convert_to_int: + if pa_dtype.bit_width == 32: + data_to_accum = data_to_accum.cast(pa.int32()) + else: + data_to_accum = data_to_accum.cast(pa.int64()) + + result = pyarrow_meth(data_to_accum, skip_nulls=skipna, **kwargs) + + if convert_to_int: + result = result.cast(pa_dtype) + + return type(self)(result) + + def _reduce_pyarrow(self, name: str, *, skipna: bool = True, **kwargs) -> pa.Scalar: + """ + Return a pyarrow scalar result of performing the reduction operation. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + skipna : bool, default True + If True, skip NaN values. + **kwargs + Additional keyword arguments passed to the reduction function. + Currently, `ddof` is the only supported kwarg. + + Returns + ------- + pyarrow scalar + + Raises + ------ + TypeError : subclass does not define reductions + """ + pa_type = self._pa_array.type + + data_to_reduce = self._pa_array + + cast_kwargs = {} if pa_version_under13p0 else {"safe": False} + + if name in ["any", "all"] and ( + pa.types.is_integer(pa_type) + or pa.types.is_floating(pa_type) + or pa.types.is_duration(pa_type) + or pa.types.is_decimal(pa_type) + ): + # pyarrow only supports any/all for boolean dtype, we allow + # for other dtypes, matching our non-pyarrow behavior + + if pa.types.is_duration(pa_type): + data_to_cmp = self._pa_array.cast(pa.int64()) + else: + data_to_cmp = self._pa_array + + not_eq = pc.not_equal(data_to_cmp, 0) + data_to_reduce = not_eq + + elif name in ["min", "max", "sum"] and pa.types.is_duration(pa_type): + data_to_reduce = self._pa_array.cast(pa.int64()) + + elif name in ["median", "mean", "std", "sem"] and pa.types.is_temporal(pa_type): + nbits = pa_type.bit_width + if nbits == 32: + data_to_reduce = self._pa_array.cast(pa.int32()) + else: + data_to_reduce = self._pa_array.cast(pa.int64()) + + if name == "sem": + + def pyarrow_meth(data, skip_nulls, **kwargs): + numerator = pc.stddev(data, skip_nulls=skip_nulls, **kwargs) + denominator = pc.sqrt_checked(pc.count(self._pa_array)) + return pc.divide_checked(numerator, denominator) + + else: + pyarrow_name = { + "median": "quantile", + "prod": "product", + "std": "stddev", + "var": "variance", + }.get(name, name) + # error: Incompatible types in assignment + # (expression has type "Optional[Any]", variable has type + # "Callable[[Any, Any, KwArg(Any)], Any]") + pyarrow_meth = getattr(pc, pyarrow_name, None) # type: ignore[assignment] + if pyarrow_meth is None: + # Let ExtensionArray._reduce raise the TypeError + return super()._reduce(name, skipna=skipna, **kwargs) + + # GH51624: pyarrow defaults to min_count=1, pandas behavior is min_count=0 + if name in ["any", "all"] and "min_count" not in kwargs: + kwargs["min_count"] = 0 + elif name == "median": + # GH 52679: Use quantile instead of approximate_median + kwargs["q"] = 0.5 + + try: + result = pyarrow_meth(data_to_reduce, skip_nulls=skipna, **kwargs) + except (AttributeError, NotImplementedError, TypeError) as err: + msg = ( + f"'{type(self).__name__}' with dtype {self.dtype} " + f"does not support reduction '{name}' with pyarrow " + f"version {pa.__version__}. '{name}' may be supported by " + f"upgrading pyarrow." + ) + raise TypeError(msg) from err + if name == "median": + # GH 52679: Use quantile instead of approximate_median; returns array + result = result[0] + if pc.is_null(result).as_py(): + return result + + if name in ["min", "max", "sum"] and pa.types.is_duration(pa_type): + result = result.cast(pa_type) + if name in ["median", "mean"] and pa.types.is_temporal(pa_type): + if not pa_version_under13p0: + nbits = pa_type.bit_width + if nbits == 32: + result = result.cast(pa.int32(), **cast_kwargs) + else: + result = result.cast(pa.int64(), **cast_kwargs) + result = result.cast(pa_type) + if name in ["std", "sem"] and pa.types.is_temporal(pa_type): + result = result.cast(pa.int64(), **cast_kwargs) + if pa.types.is_duration(pa_type): + result = result.cast(pa_type) + elif pa.types.is_time(pa_type): + unit = get_unit_from_pa_dtype(pa_type) + result = result.cast(pa.duration(unit)) + elif pa.types.is_date(pa_type): + # go with closest available unit, i.e. "s" + result = result.cast(pa.duration("s")) + else: + # i.e. timestamp + result = result.cast(pa.duration(pa_type.unit)) + + return result + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + """ + Return a scalar result of performing the reduction operation. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + skipna : bool, default True + If True, skip NaN values. + **kwargs + Additional keyword arguments passed to the reduction function. + Currently, `ddof` is the only supported kwarg. + + Returns + ------- + scalar + + Raises + ------ + TypeError : subclass does not define reductions + """ + result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs) + if isinstance(result, pa.Array): + return type(self)(result) + else: + return result + + def _reduce_calc( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + pa_result = self._reduce_pyarrow(name, skipna=skipna, **kwargs) + + if keepdims: + if isinstance(pa_result, pa.Scalar): + result = pa.array([pa_result.as_py()], type=pa_result.type) + else: + result = pa.array( + [pa_result], + type=to_pyarrow_type(infer_dtype_from_scalar(pa_result)[0]), + ) + return result + + if pc.is_null(pa_result).as_py(): + return self.dtype.na_value + elif isinstance(pa_result, pa.Scalar): + return pa_result.as_py() + else: + return pa_result + + def _explode(self): + """ + See Series.explode.__doc__. + """ + values = self + counts = pa.compute.list_value_length(values._pa_array) + counts = counts.fill_null(1).to_numpy() + fill_value = pa.scalar([None], type=self._pa_array.type) + mask = counts == 0 + if mask.any(): + values = values.copy() + values[mask] = fill_value + counts = counts.copy() + counts[mask] = 1 + values = values.fillna(fill_value) + values = type(self)(pa.compute.list_flatten(values._pa_array)) + return values, counts + + def __setitem__(self, key, value) -> None: + """Set one or more values inplace. + + Parameters + ---------- + key : int, ndarray, or slice + When called from, e.g. ``Series.__setitem__``, ``key`` will be + one of + + * scalar int + * ndarray of integers. + * boolean ndarray + * slice object + + value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object + value or values to be set of ``key``. + + Returns + ------- + None + """ + # GH50085: unwrap 1D indexers + if isinstance(key, tuple) and len(key) == 1: + key = key[0] + + key = check_array_indexer(self, key) + value = self._maybe_convert_setitem_value(value) + + if com.is_null_slice(key): + # fast path (GH50248) + data = self._if_else(True, value, self._pa_array) + + elif is_integer(key): + # fast path + key = cast(int, key) + n = len(self) + if key < 0: + key += n + if not 0 <= key < n: + raise IndexError( + f"index {key} is out of bounds for axis 0 with size {n}" + ) + if isinstance(value, pa.Scalar): + value = value.as_py() + elif is_list_like(value): + raise ValueError("Length of indexer and values mismatch") + chunks = [ + *self._pa_array[:key].chunks, + pa.array([value], type=self._pa_array.type, from_pandas=True), + *self._pa_array[key + 1 :].chunks, + ] + data = pa.chunked_array(chunks).combine_chunks() + + elif is_bool_dtype(key): + key = np.asarray(key, dtype=np.bool_) + data = self._replace_with_mask(self._pa_array, key, value) + + elif is_scalar(value) or isinstance(value, pa.Scalar): + mask = np.zeros(len(self), dtype=np.bool_) + mask[key] = True + data = self._if_else(mask, value, self._pa_array) + + else: + indices = np.arange(len(self))[key] + if len(indices) != len(value): + raise ValueError("Length of indexer and values mismatch") + if len(indices) == 0: + return + argsort = np.argsort(indices) + indices = indices[argsort] + value = value.take(argsort) + mask = np.zeros(len(self), dtype=np.bool_) + mask[indices] = True + data = self._replace_with_mask(self._pa_array, mask, value) + + if isinstance(data, pa.Array): + data = pa.chunked_array([data]) + self._pa_array = data + + def _rank_calc( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + if pa_version_under9p0 or axis != 0: + ranked = super()._rank( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + # keep dtypes consistent with the implementation below + if method == "average" or pct: + pa_type = pa.float64() + else: + pa_type = pa.uint64() + result = pa.array(ranked, type=pa_type, from_pandas=True) + return result + + data = self._pa_array.combine_chunks() + sort_keys = "ascending" if ascending else "descending" + null_placement = "at_start" if na_option == "top" else "at_end" + tiebreaker = "min" if method == "average" else method + + result = pc.rank( + data, + sort_keys=sort_keys, + null_placement=null_placement, + tiebreaker=tiebreaker, + ) + + if na_option == "keep": + mask = pc.is_null(self._pa_array) + null = pa.scalar(None, type=result.type) + result = pc.if_else(mask, null, result) + + if method == "average": + result_max = pc.rank( + data, + sort_keys=sort_keys, + null_placement=null_placement, + tiebreaker="max", + ) + result_max = result_max.cast(pa.float64()) + result_min = result.cast(pa.float64()) + result = pc.divide(pc.add(result_min, result_max), 2) + + if pct: + if not pa.types.is_floating(result.type): + result = result.cast(pa.float64()) + if method == "dense": + divisor = pc.max(result) + else: + divisor = pc.count(result) + result = pc.divide(result, divisor) + + return result + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + return type(self)( + self._rank_calc( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + ) + + def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: + """ + Compute the quantiles of self for each quantile in `qs`. + + Parameters + ---------- + qs : np.ndarray[float64] + interpolation: str + + Returns + ------- + same type as self + """ + pa_dtype = self._pa_array.type + + data = self._pa_array + if pa.types.is_temporal(pa_dtype): + # https://github.com/apache/arrow/issues/33769 in these cases + # we can cast to ints and back + nbits = pa_dtype.bit_width + if nbits == 32: + data = data.cast(pa.int32()) + else: + data = data.cast(pa.int64()) + + result = pc.quantile(data, q=qs, interpolation=interpolation) + + if pa.types.is_temporal(pa_dtype): + if pa.types.is_floating(result.type): + result = pc.floor(result) + nbits = pa_dtype.bit_width + if nbits == 32: + result = result.cast(pa.int32()) + else: + result = result.cast(pa.int64()) + result = result.cast(pa_dtype) + + return type(self)(result) + + def _mode(self, dropna: bool = True) -> Self: + """ + Returns the mode(s) of the ExtensionArray. + + Always returns `ExtensionArray` even if only one value. + + Parameters + ---------- + dropna : bool, default True + Don't consider counts of NA values. + + Returns + ------- + same type as self + Sorted, if possible. + """ + pa_type = self._pa_array.type + if pa.types.is_temporal(pa_type): + nbits = pa_type.bit_width + if nbits == 32: + data = self._pa_array.cast(pa.int32()) + elif nbits == 64: + data = self._pa_array.cast(pa.int64()) + else: + raise NotImplementedError(pa_type) + else: + data = self._pa_array + + if dropna: + data = data.drop_null() + + res = pc.value_counts(data) + most_common = res.field("values").filter( + pc.equal(res.field("counts"), pc.max(res.field("counts"))) + ) + + if pa.types.is_temporal(pa_type): + most_common = most_common.cast(pa_type) + + most_common = most_common.take(pc.array_sort_indices(most_common)) + return type(self)(most_common) + + def _maybe_convert_setitem_value(self, value): + """Maybe convert value to be pyarrow compatible.""" + try: + value = self._box_pa(value, self._pa_array.type) + except pa.ArrowTypeError as err: + msg = f"Invalid value '{str(value)}' for dtype {self.dtype}" + raise TypeError(msg) from err + return value + + @classmethod + def _if_else( + cls, + cond: npt.NDArray[np.bool_] | bool, + left: ArrayLike | Scalar, + right: ArrayLike | Scalar, + ): + """ + Choose values based on a condition. + + Analogous to pyarrow.compute.if_else, with logic + to fallback to numpy for unsupported types. + + Parameters + ---------- + cond : npt.NDArray[np.bool_] or bool + left : ArrayLike | Scalar + right : ArrayLike | Scalar + + Returns + ------- + pa.Array + """ + try: + return pc.if_else(cond, left, right) + except pa.ArrowNotImplementedError: + pass + + def _to_numpy_and_type(value) -> tuple[np.ndarray, pa.DataType | None]: + if isinstance(value, (pa.Array, pa.ChunkedArray)): + pa_type = value.type + elif isinstance(value, pa.Scalar): + pa_type = value.type + value = value.as_py() + else: + pa_type = None + return np.array(value, dtype=object), pa_type + + left, left_type = _to_numpy_and_type(left) + right, right_type = _to_numpy_and_type(right) + pa_type = left_type or right_type + result = np.where(cond, left, right) + return pa.array(result, type=pa_type, from_pandas=True) + + @classmethod + def _replace_with_mask( + cls, + values: pa.Array | pa.ChunkedArray, + mask: npt.NDArray[np.bool_] | bool, + replacements: ArrayLike | Scalar, + ): + """ + Replace items selected with a mask. + + Analogous to pyarrow.compute.replace_with_mask, with logic + to fallback to numpy for unsupported types. + + Parameters + ---------- + values : pa.Array or pa.ChunkedArray + mask : npt.NDArray[np.bool_] or bool + replacements : ArrayLike or Scalar + Replacement value(s) + + Returns + ------- + pa.Array or pa.ChunkedArray + """ + if isinstance(replacements, pa.ChunkedArray): + # replacements must be array or scalar, not ChunkedArray + replacements = replacements.combine_chunks() + if pa_version_under8p0: + # pc.replace_with_mask seems to be a bit unreliable for versions < 8.0: + # version <= 7: segfaults with various types + # version <= 6: fails to replace nulls + if isinstance(replacements, pa.Array): + indices = np.full(len(values), None) + indices[mask] = np.arange(len(replacements)) + indices = pa.array(indices, type=pa.int64()) + replacements = replacements.take(indices) + return cls._if_else(mask, replacements, values) + if isinstance(values, pa.ChunkedArray) and pa.types.is_boolean(values.type): + # GH#52059 replace_with_mask segfaults for chunked array + # https://github.com/apache/arrow/issues/34634 + values = values.combine_chunks() + try: + return pc.replace_with_mask(values, mask, replacements) + except pa.ArrowNotImplementedError: + pass + if isinstance(replacements, pa.Array): + replacements = np.array(replacements, dtype=object) + elif isinstance(replacements, pa.Scalar): + replacements = replacements.as_py() + result = np.array(values, dtype=object) + result[mask] = replacements + return pa.array(result, type=values.type, from_pandas=True) + + # ------------------------------------------------------------------ + # GroupBy Methods + + def _to_masked(self): + pa_dtype = self._pa_array.type + + if pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype): + na_value = 1 + elif pa.types.is_boolean(pa_dtype): + na_value = True + else: + raise NotImplementedError + + dtype = _arrow_dtype_mapping()[pa_dtype] + mask = self.isna() + arr = self.to_numpy(dtype=dtype.numpy_dtype, na_value=na_value) + return dtype.construct_array_type()(arr, mask) + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + if isinstance(self.dtype, StringDtype): + return super()._groupby_op( + how=how, + has_dropped_na=has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=ids, + **kwargs, + ) + + masked = self._to_masked() + + result = masked._groupby_op( + how=how, + has_dropped_na=has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=ids, + **kwargs, + ) + if isinstance(result, np.ndarray): + return result + return type(self)._from_sequence(result, copy=False) + + def _apply_elementwise(self, func: Callable) -> list[list[Any]]: + """Apply a callable to each element while maintaining the chunking structure.""" + return [ + [ + None if val is None else func(val) + for val in chunk.to_numpy(zero_copy_only=False) + ] + for chunk in self._pa_array.iterchunks() + ] + + def _str_count(self, pat: str, flags: int = 0): + if flags: + raise NotImplementedError(f"count not implemented with {flags=}") + return type(self)(pc.count_substring_regex(self._pa_array, pat)) + + def _str_contains( + self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True + ): + if flags: + raise NotImplementedError(f"contains not implemented with {flags=}") + + if regex: + pa_contains = pc.match_substring_regex + else: + pa_contains = pc.match_substring + result = pa_contains(self._pa_array, pat, ignore_case=not case) + if not isna(na): + result = result.fill_null(na) + return type(self)(result) + + def _str_startswith(self, pat: str, na=None): + result = pc.starts_with(self._pa_array, pattern=pat) + if not isna(na): + result = result.fill_null(na) + return type(self)(result) + + def _str_endswith(self, pat: str, na=None): + result = pc.ends_with(self._pa_array, pattern=pat) + if not isna(na): + result = result.fill_null(na) + return type(self)(result) + + def _str_replace( + self, + pat: str | re.Pattern, + repl: str | Callable, + n: int = -1, + case: bool = True, + flags: int = 0, + regex: bool = True, + ): + if isinstance(pat, re.Pattern) or callable(repl) or not case or flags: + raise NotImplementedError( + "replace is not supported with a re.Pattern, callable repl, " + "case=False, or flags!=0" + ) + + func = pc.replace_substring_regex if regex else pc.replace_substring + result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=n) + return type(self)(result) + + def _str_repeat(self, repeats: int | Sequence[int]): + if not isinstance(repeats, int): + raise NotImplementedError( + f"repeat is not implemented when repeats is {type(repeats).__name__}" + ) + else: + return type(self)(pc.binary_repeat(self._pa_array, repeats)) + + def _str_match( + self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.startswith("^"): + pat = f"^{pat}" + return self._str_contains(pat, case, flags, na, regex=True) + + def _str_fullmatch( + self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.endswith("$") or pat.endswith("//$"): + pat = f"{pat}$" + return self._str_match(pat, case, flags, na) + + def _str_find(self, sub: str, start: int = 0, end: int | None = None): + if start != 0 and end is not None: + slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) + result = pc.find_substring(slices, sub) + not_found = pc.equal(result, -1) + offset_result = pc.add(result, end - start) + result = pc.if_else(not_found, result, offset_result) + elif start == 0 and end is None: + slices = self._pa_array + result = pc.find_substring(slices, sub) + else: + raise NotImplementedError( + f"find not implemented with {sub=}, {start=}, {end=}" + ) + return type(self)(result) + + def _str_join(self, sep: str): + if pa.types.is_string(self._pa_array.type): + result = self._apply_elementwise(list) + result = pa.chunked_array(result, type=pa.list_(pa.string())) + else: + result = self._pa_array + return type(self)(pc.binary_join(result, sep)) + + def _str_partition(self, sep: str, expand: bool): + predicate = lambda val: val.partition(sep) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_rpartition(self, sep: str, expand: bool): + predicate = lambda val: val.rpartition(sep) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_slice( + self, start: int | None = None, stop: int | None = None, step: int | None = None + ): + if start is None: + start = 0 + if step is None: + step = 1 + return type(self)( + pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step) + ) + + def _str_isalnum(self): + return type(self)(pc.utf8_is_alnum(self._pa_array)) + + def _str_isalpha(self): + return type(self)(pc.utf8_is_alpha(self._pa_array)) + + def _str_isdecimal(self): + return type(self)(pc.utf8_is_decimal(self._pa_array)) + + def _str_isdigit(self): + return type(self)(pc.utf8_is_digit(self._pa_array)) + + def _str_islower(self): + return type(self)(pc.utf8_is_lower(self._pa_array)) + + def _str_isnumeric(self): + return type(self)(pc.utf8_is_numeric(self._pa_array)) + + def _str_isspace(self): + return type(self)(pc.utf8_is_space(self._pa_array)) + + def _str_istitle(self): + return type(self)(pc.utf8_is_title(self._pa_array)) + + def _str_isupper(self): + return type(self)(pc.utf8_is_upper(self._pa_array)) + + def _str_len(self): + return type(self)(pc.utf8_length(self._pa_array)) + + def _str_lower(self): + return type(self)(pc.utf8_lower(self._pa_array)) + + def _str_upper(self): + return type(self)(pc.utf8_upper(self._pa_array)) + + def _str_strip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_trim_whitespace(self._pa_array) + else: + result = pc.utf8_trim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_lstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_ltrim_whitespace(self._pa_array) + else: + result = pc.utf8_ltrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_rstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_rtrim_whitespace(self._pa_array) + else: + result = pc.utf8_rtrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_removeprefix(self, prefix: str): + # TODO: Should work once https://github.com/apache/arrow/issues/14991 is fixed + # starts_with = pc.starts_with(self._pa_array, pattern=prefix) + # removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix)) + # result = pc.if_else(starts_with, removed, self._pa_array) + # return type(self)(result) + predicate = lambda val: val.removeprefix(prefix) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_casefold(self): + predicate = lambda val: val.casefold() + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_encode(self, encoding: str, errors: str = "strict"): + predicate = lambda val: val.encode(encoding, errors) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + raise NotImplementedError( + "str.extract not supported with pd.ArrowDtype(pa.string())." + ) + + def _str_findall(self, pat: str, flags: int = 0): + regex = re.compile(pat, flags=flags) + predicate = lambda val: regex.findall(val) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_get_dummies(self, sep: str = "|"): + split = pc.split_pattern(self._pa_array, sep) + flattened_values = pc.list_flatten(split) + uniques = flattened_values.unique() + uniques_sorted = uniques.take(pa.compute.array_sort_indices(uniques)) + lengths = pc.list_value_length(split).fill_null(0).to_numpy() + n_rows = len(self) + n_cols = len(uniques) + indices = pc.index_in(flattened_values, uniques_sorted).to_numpy() + indices = indices + np.arange(n_rows).repeat(lengths) * n_cols + dummies = np.zeros(n_rows * n_cols, dtype=np.bool_) + dummies[indices] = True + dummies = dummies.reshape((n_rows, n_cols)) + result = type(self)(pa.array(list(dummies))) + return result, uniques_sorted.to_pylist() + + def _str_index(self, sub: str, start: int = 0, end: int | None = None): + predicate = lambda val: val.index(sub, start, end) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_rindex(self, sub: str, start: int = 0, end: int | None = None): + predicate = lambda val: val.rindex(sub, start, end) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_normalize(self, form: str): + predicate = lambda val: unicodedata.normalize(form, val) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_rfind(self, sub: str, start: int = 0, end=None): + predicate = lambda val: val.rfind(sub, start, end) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_split( + self, + pat: str | None = None, + n: int | None = -1, + expand: bool = False, + regex: bool | None = None, + ): + if n in {-1, 0}: + n = None + if pat is None: + split_func = pc.utf8_split_whitespace + elif regex: + split_func = functools.partial(pc.split_pattern_regex, pattern=pat) + else: + split_func = functools.partial(pc.split_pattern, pattern=pat) + return type(self)(split_func(self._pa_array, max_splits=n)) + + def _str_rsplit(self, pat: str | None = None, n: int | None = -1): + if n in {-1, 0}: + n = None + if pat is None: + return type(self)( + pc.utf8_split_whitespace(self._pa_array, max_splits=n, reverse=True) + ) + else: + return type(self)( + pc.split_pattern(self._pa_array, pat, max_splits=n, reverse=True) + ) + + def _str_translate(self, table: dict[int, str]): + predicate = lambda val: val.translate(table) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + def _str_wrap(self, width: int, **kwargs): + kwargs["width"] = width + tw = textwrap.TextWrapper(**kwargs) + predicate = lambda val: "\n".join(tw.wrap(val)) + result = self._apply_elementwise(predicate) + return type(self)(pa.chunked_array(result)) + + @property + def _dt_year(self): + return type(self)(pc.year(self._pa_array)) + + @property + def _dt_day(self): + return type(self)(pc.day(self._pa_array)) + + @property + def _dt_day_of_week(self): + return type(self)(pc.day_of_week(self._pa_array)) + + _dt_dayofweek = _dt_day_of_week + _dt_weekday = _dt_day_of_week + + @property + def _dt_day_of_year(self): + return type(self)(pc.day_of_year(self._pa_array)) + + _dt_dayofyear = _dt_day_of_year + + @property + def _dt_hour(self): + return type(self)(pc.hour(self._pa_array)) + + def _dt_isocalendar(self): + return type(self)(pc.iso_calendar(self._pa_array)) + + @property + def _dt_is_leap_year(self): + return type(self)(pc.is_leap_year(self._pa_array)) + + @property + def _dt_is_month_start(self): + return type(self)(pc.equal(pc.day(self._pa_array), 1)) + + @property + def _dt_is_month_end(self): + result = pc.equal( + pc.days_between( + pc.floor_temporal(self._pa_array, unit="day"), + pc.ceil_temporal(self._pa_array, unit="month"), + ), + 1, + ) + return type(self)(result) + + @property + def _dt_is_year_start(self): + return type(self)( + pc.and_( + pc.equal(pc.month(self._pa_array), 1), + pc.equal(pc.day(self._pa_array), 1), + ) + ) + + @property + def _dt_is_year_end(self): + return type(self)( + pc.and_( + pc.equal(pc.month(self._pa_array), 12), + pc.equal(pc.day(self._pa_array), 31), + ) + ) + + @property + def _dt_is_quarter_start(self): + result = pc.equal( + pc.floor_temporal(self._pa_array, unit="quarter"), + pc.floor_temporal(self._pa_array, unit="day"), + ) + return type(self)(result) + + @property + def _dt_is_quarter_end(self): + result = pc.equal( + pc.days_between( + pc.floor_temporal(self._pa_array, unit="day"), + pc.ceil_temporal(self._pa_array, unit="quarter"), + ), + 1, + ) + return type(self)(result) + + @property + def _dt_days_in_month(self): + result = pc.days_between( + pc.floor_temporal(self._pa_array, unit="month"), + pc.ceil_temporal(self._pa_array, unit="month"), + ) + return type(self)(result) + + _dt_daysinmonth = _dt_days_in_month + + @property + def _dt_microsecond(self): + return type(self)(pc.microsecond(self._pa_array)) + + @property + def _dt_minute(self): + return type(self)(pc.minute(self._pa_array)) + + @property + def _dt_month(self): + return type(self)(pc.month(self._pa_array)) + + @property + def _dt_nanosecond(self): + return type(self)(pc.nanosecond(self._pa_array)) + + @property + def _dt_quarter(self): + return type(self)(pc.quarter(self._pa_array)) + + @property + def _dt_second(self): + return type(self)(pc.second(self._pa_array)) + + @property + def _dt_date(self): + return type(self)(self._pa_array.cast(pa.date32())) + + @property + def _dt_time(self): + unit = ( + self.dtype.pyarrow_dtype.unit + if self.dtype.pyarrow_dtype.unit in {"us", "ns"} + else "ns" + ) + return type(self)(self._pa_array.cast(pa.time64(unit))) + + @property + def _dt_tz(self): + return timezones.maybe_get_tz(self.dtype.pyarrow_dtype.tz) + + @property + def _dt_unit(self): + return self.dtype.pyarrow_dtype.unit + + def _dt_normalize(self): + return type(self)(pc.floor_temporal(self._pa_array, 1, "day")) + + def _dt_strftime(self, format: str): + return type(self)(pc.strftime(self._pa_array, format=format)) + + def _round_temporally( + self, + method: Literal["ceil", "floor", "round"], + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + if ambiguous != "raise": + raise NotImplementedError("ambiguous is not supported.") + if nonexistent != "raise": + raise NotImplementedError("nonexistent is not supported.") + offset = to_offset(freq) + if offset is None: + raise ValueError(f"Must specify a valid frequency: {freq}") + pa_supported_unit = { + "A": "year", + "AS": "year", + "Q": "quarter", + "QS": "quarter", + "M": "month", + "MS": "month", + "W": "week", + "D": "day", + "H": "hour", + "T": "minute", + "S": "second", + "L": "millisecond", + "U": "microsecond", + "N": "nanosecond", + } + unit = pa_supported_unit.get(offset._prefix, None) + if unit is None: + raise ValueError(f"{freq=} is not supported") + multiple = offset.n + rounding_method = getattr(pc, f"{method}_temporal") + return type(self)(rounding_method(self._pa_array, multiple=multiple, unit=unit)) + + def _dt_ceil( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + return self._round_temporally("ceil", freq, ambiguous, nonexistent) + + def _dt_floor( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + return self._round_temporally("floor", freq, ambiguous, nonexistent) + + def _dt_round( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + return self._round_temporally("round", freq, ambiguous, nonexistent) + + def _dt_day_name(self, locale: str | None = None): + if locale is None: + locale = "C" + return type(self)(pc.strftime(self._pa_array, format="%A", locale=locale)) + + def _dt_month_name(self, locale: str | None = None): + if locale is None: + locale = "C" + return type(self)(pc.strftime(self._pa_array, format="%B", locale=locale)) + + def _dt_to_pydatetime(self): + if pa.types.is_date(self.dtype.pyarrow_dtype): + raise ValueError( + f"to_pydatetime cannot be called with {self.dtype.pyarrow_dtype} type. " + "Convert to pyarrow timestamp type." + ) + data = self._pa_array.to_pylist() + if self._dtype.pyarrow_dtype.unit == "ns": + data = [None if ts is None else ts.to_pydatetime(warn=False) for ts in data] + return np.array(data, dtype=object) + + def _dt_tz_localize( + self, + tz, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ): + if ambiguous != "raise": + raise NotImplementedError(f"{ambiguous=} is not supported") + nonexistent_pa = { + "raise": "raise", + "shift_backward": "earliest", + "shift_forward": "latest", + }.get( + nonexistent, None # type: ignore[arg-type] + ) + if nonexistent_pa is None: + raise NotImplementedError(f"{nonexistent=} is not supported") + if tz is None: + result = self._pa_array.cast(pa.timestamp(self.dtype.pyarrow_dtype.unit)) + else: + result = pc.assume_timezone( + self._pa_array, str(tz), ambiguous=ambiguous, nonexistent=nonexistent_pa + ) + return type(self)(result) + + def _dt_tz_convert(self, tz): + if self.dtype.pyarrow_dtype.tz is None: + raise TypeError( + "Cannot convert tz-naive timestamps, use tz_localize to localize" + ) + current_unit = self.dtype.pyarrow_dtype.unit + result = self._pa_array.cast(pa.timestamp(current_unit, tz)) + return type(self)(result) + + +def transpose_homogeneous_pyarrow( + arrays: Sequence[ArrowExtensionArray], +) -> list[ArrowExtensionArray]: + """Transpose arrow extension arrays in a list, but faster. + + Input should be a list of arrays of equal length and all have the same + dtype. The caller is responsible for ensuring validity of input data. + """ + arrays = list(arrays) + nrows, ncols = len(arrays[0]), len(arrays) + indices = np.arange(nrows * ncols).reshape(ncols, nrows).T.flatten() + arr = pa.chunked_array([chunk for arr in arrays for chunk in arr._pa_array.chunks]) + arr = arr.take(indices) + return [ArrowExtensionArray(arr.slice(i * ncols, ncols)) for i in range(nrows)] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/extension_types.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/extension_types.py new file mode 100644 index 00000000..36d536bf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/arrow/extension_types.py @@ -0,0 +1,174 @@ +from __future__ import annotations + +import json +from typing import TYPE_CHECKING + +import pyarrow + +from pandas.compat import pa_version_under14p1 + +from pandas.core.dtypes.dtypes import ( + IntervalDtype, + PeriodDtype, +) + +from pandas.core.arrays.interval import VALID_CLOSED + +if TYPE_CHECKING: + from pandas._typing import IntervalClosedType + + +class ArrowPeriodType(pyarrow.ExtensionType): + def __init__(self, freq) -> None: + # attributes need to be set first before calling + # super init (as that calls serialize) + self._freq = freq + pyarrow.ExtensionType.__init__(self, pyarrow.int64(), "pandas.period") + + @property + def freq(self): + return self._freq + + def __arrow_ext_serialize__(self) -> bytes: + metadata = {"freq": self.freq} + return json.dumps(metadata).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType: + metadata = json.loads(serialized.decode()) + return ArrowPeriodType(metadata["freq"]) + + def __eq__(self, other): + if isinstance(other, pyarrow.BaseExtensionType): + return type(self) == type(other) and self.freq == other.freq + else: + return NotImplemented + + def __ne__(self, other) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((str(self), self.freq)) + + def to_pandas_dtype(self): + return PeriodDtype(freq=self.freq) + + +# register the type with a dummy instance +_period_type = ArrowPeriodType("D") +pyarrow.register_extension_type(_period_type) + + +class ArrowIntervalType(pyarrow.ExtensionType): + def __init__(self, subtype, closed: IntervalClosedType) -> None: + # attributes need to be set first before calling + # super init (as that calls serialize) + assert closed in VALID_CLOSED + self._closed: IntervalClosedType = closed + if not isinstance(subtype, pyarrow.DataType): + subtype = pyarrow.type_for_alias(str(subtype)) + self._subtype = subtype + + storage_type = pyarrow.struct([("left", subtype), ("right", subtype)]) + pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval") + + @property + def subtype(self): + return self._subtype + + @property + def closed(self) -> IntervalClosedType: + return self._closed + + def __arrow_ext_serialize__(self) -> bytes: + metadata = {"subtype": str(self.subtype), "closed": self.closed} + return json.dumps(metadata).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType: + metadata = json.loads(serialized.decode()) + subtype = pyarrow.type_for_alias(metadata["subtype"]) + closed = metadata["closed"] + return ArrowIntervalType(subtype, closed) + + def __eq__(self, other): + if isinstance(other, pyarrow.BaseExtensionType): + return ( + type(self) == type(other) + and self.subtype == other.subtype + and self.closed == other.closed + ) + else: + return NotImplemented + + def __ne__(self, other) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((str(self), str(self.subtype), self.closed)) + + def to_pandas_dtype(self): + return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed) + + +# register the type with a dummy instance +_interval_type = ArrowIntervalType(pyarrow.int64(), "left") +pyarrow.register_extension_type(_interval_type) + + +_ERROR_MSG = """\ +Disallowed deserialization of 'arrow.py_extension_type': +storage_type = {storage_type} +serialized = {serialized} +pickle disassembly:\n{pickle_disassembly} + +Reading of untrusted Parquet or Feather files with a PyExtensionType column +allows arbitrary code execution. +If you trust this file, you can enable reading the extension type by one of: + +- upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)` +- install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running + `import pyarrow_hotfix; pyarrow_hotfix.uninstall()` + +We strongly recommend updating your Parquet/Feather files to use extension types +derived from `pyarrow.ExtensionType` instead, and register this type explicitly. +""" + + +def patch_pyarrow(): + # starting from pyarrow 14.0.1, it has its own mechanism + if not pa_version_under14p1: + return + + # if https://github.com/pitrou/pyarrow-hotfix was installed and enabled + if getattr(pyarrow, "_hotfix_installed", False): + return + + class ForbiddenExtensionType(pyarrow.ExtensionType): + def __arrow_ext_serialize__(self): + return b"" + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + import io + import pickletools + + out = io.StringIO() + pickletools.dis(serialized, out) + raise RuntimeError( + _ERROR_MSG.format( + storage_type=storage_type, + serialized=serialized, + pickle_disassembly=out.getvalue(), + ) + ) + + pyarrow.unregister_extension_type("arrow.py_extension_type") + pyarrow.register_extension_type( + ForbiddenExtensionType(pyarrow.null(), "arrow.py_extension_type") + ) + + pyarrow._hotfix_installed = True + + +patch_pyarrow() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/base.py new file mode 100644 index 00000000..9a859b01 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/base.py @@ -0,0 +1,2454 @@ +""" +An interface for extending pandas with custom arrays. + +.. warning:: + + This is an experimental API and subject to breaking changes + without warning. +""" +from __future__ import annotations + +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + algos as libalgos, + lib, +) +from pandas.compat import set_function_name +from pandas.compat.numpy import function as nv +from pandas.errors import AbstractMethodError +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + validate_bool_kwarg, + validate_fillna_kwargs, + validate_insert_loc, +) + +from pandas.core.dtypes.cast import maybe_cast_pointwise_result +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + arraylike, + missing, + roperator, +) +from pandas.core.algorithms import ( + factorize_array, + isin, + map_array, + mode, + rank, + unique, +) +from pandas.core.array_algos.quantile import quantile_with_mask +from pandas.core.sorting import ( + nargminmax, + nargsort, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + AstypeArg, + AxisInt, + Dtype, + FillnaOptions, + InterpolateOptions, + NumpySorter, + NumpyValueArrayLike, + PositionalIndexer, + ScalarIndexer, + Self, + SequenceIndexer, + Shape, + SortKind, + TakeIndexer, + npt, + ) + + from pandas import Index + +_extension_array_shared_docs: dict[str, str] = {} + + +class ExtensionArray: + """ + Abstract base class for custom 1-D array types. + + pandas will recognize instances of this class as proper arrays + with a custom type and will not attempt to coerce them to objects. They + may be stored directly inside a :class:`DataFrame` or :class:`Series`. + + Attributes + ---------- + dtype + nbytes + ndim + shape + + Methods + ------- + argsort + astype + copy + dropna + factorize + fillna + equals + insert + interpolate + isin + isna + ravel + repeat + searchsorted + shift + take + tolist + unique + view + _accumulate + _concat_same_type + _formatter + _from_factorized + _from_sequence + _from_sequence_of_strings + _hash_pandas_object + _pad_or_backfill + _reduce + _values_for_argsort + _values_for_factorize + + Notes + ----- + The interface includes the following abstract methods that must be + implemented by subclasses: + + * _from_sequence + * _from_factorized + * __getitem__ + * __len__ + * __eq__ + * dtype + * nbytes + * isna + * take + * copy + * _concat_same_type + * interpolate + + A default repr displaying the type, (truncated) data, length, + and dtype is provided. It can be customized or replaced by + by overriding: + + * __repr__ : A default repr for the ExtensionArray. + * _formatter : Print scalars inside a Series or DataFrame. + + Some methods require casting the ExtensionArray to an ndarray of Python + objects with ``self.astype(object)``, which may be expensive. When + performance is a concern, we highly recommend overriding the following + methods: + + * fillna + * _pad_or_backfill + * dropna + * unique + * factorize / _values_for_factorize + * argsort, argmax, argmin / _values_for_argsort + * searchsorted + * map + + The remaining methods implemented on this class should be performant, + as they only compose abstract methods. Still, a more efficient + implementation may be available, and these methods can be overridden. + + One can implement methods to handle array accumulations or reductions. + + * _accumulate + * _reduce + + One can implement methods to handle parsing from strings that will be used + in methods such as ``pandas.io.parsers.read_csv``. + + * _from_sequence_of_strings + + This class does not inherit from 'abc.ABCMeta' for performance reasons. + Methods and properties required by the interface raise + ``pandas.errors.AbstractMethodError`` and no ``register`` method is + provided for registering virtual subclasses. + + ExtensionArrays are limited to 1 dimension. + + They may be backed by none, one, or many NumPy arrays. For example, + ``pandas.Categorical`` is an extension array backed by two arrays, + one for codes and one for categories. An array of IPv6 address may + be backed by a NumPy structured array with two fields, one for the + lower 64 bits and one for the upper 64 bits. Or they may be backed + by some other storage type, like Python lists. Pandas makes no + assumptions on how the data are stored, just that it can be converted + to a NumPy array. + The ExtensionArray interface does not impose any rules on how this data + is stored. However, currently, the backing data cannot be stored in + attributes called ``.values`` or ``._values`` to ensure full compatibility + with pandas internals. But other names as ``.data``, ``._data``, + ``._items``, ... can be freely used. + + If implementing NumPy's ``__array_ufunc__`` interface, pandas expects + that + + 1. You defer by returning ``NotImplemented`` when any Series are present + in `inputs`. Pandas will extract the arrays and call the ufunc again. + 2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class. + Pandas inspect this to determine whether the ufunc is valid for the + types present. + + See :ref:`extending.extension.ufunc` for more. + + By default, ExtensionArrays are not hashable. Immutable subclasses may + override this behavior. + + Examples + -------- + Please see the following: + + https://github.com/pandas-dev/pandas/blob/main/pandas/tests/extension/list/array.py + """ + + # '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray. + # Don't override this. + _typ = "extension" + + # similar to __array_priority__, positions ExtensionArray after Index, + # Series, and DataFrame. EA subclasses may override to choose which EA + # subclass takes priority. If overriding, the value should always be + # strictly less than 2000 to be below Index.__pandas_priority__. + __pandas_priority__ = 1000 + + # ------------------------------------------------------------------------ + # Constructors + # ------------------------------------------------------------------------ + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + """ + Construct a new ExtensionArray from a sequence of scalars. + + Parameters + ---------- + scalars : Sequence + Each element will be an instance of the scalar type for this + array, ``cls.dtype.type`` or be converted into this type in this method. + dtype : dtype, optional + Construct for this particular dtype. This should be a Dtype + compatible with the ExtensionArray. + copy : bool, default False + If True, copy the underlying data. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> pd.arrays.IntegerArray._from_sequence([4, 5]) + + [4, 5] + Length: 2, dtype: Int64 + """ + raise AbstractMethodError(cls) + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ): + """ + Construct a new ExtensionArray from a sequence of strings. + + Parameters + ---------- + strings : Sequence + Each element will be an instance of the scalar type for this + array, ``cls.dtype.type``. + dtype : dtype, optional + Construct for this particular dtype. This should be a Dtype + compatible with the ExtensionArray. + copy : bool, default False + If True, copy the underlying data. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> pd.arrays.IntegerArray._from_sequence_of_strings(["1", "2", "3"]) + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + raise AbstractMethodError(cls) + + @classmethod + def _from_factorized(cls, values, original): + """ + Reconstruct an ExtensionArray after factorization. + + Parameters + ---------- + values : ndarray + An integer ndarray with the factorized values. + original : ExtensionArray + The original ExtensionArray that factorize was called on. + + See Also + -------- + factorize : Top-level factorize method that dispatches here. + ExtensionArray.factorize : Encode the extension array as an enumerated type. + + Examples + -------- + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), + ... pd.Interval(1, 5), pd.Interval(1, 5)]) + >>> codes, uniques = pd.factorize(interv_arr) + >>> pd.arrays.IntervalArray._from_factorized(uniques, interv_arr) + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + """ + raise AbstractMethodError(cls) + + # ------------------------------------------------------------------------ + # Must be a Sequence + # ------------------------------------------------------------------------ + @overload + def __getitem__(self, item: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__(self, item: SequenceIndexer) -> Self: + ... + + def __getitem__(self, item: PositionalIndexer) -> Self | Any: + """ + Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + * list[int]: A list of int + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + raise AbstractMethodError(self) + + def __setitem__(self, key, value) -> None: + """ + Set one or more values inplace. + + This method is not required to satisfy the pandas extension array + interface. + + Parameters + ---------- + key : int, ndarray, or slice + When called from, e.g. ``Series.__setitem__``, ``key`` will be + one of + + * scalar int + * ndarray of integers. + * boolean ndarray + * slice object + + value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object + value or values to be set of ``key``. + + Returns + ------- + None + """ + # Some notes to the ExtensionArray implementor who may have ended up + # here. While this method is not required for the interface, if you + # *do* choose to implement __setitem__, then some semantics should be + # observed: + # + # * Setting multiple values : ExtensionArrays should support setting + # multiple values at once, 'key' will be a sequence of integers and + # 'value' will be a same-length sequence. + # + # * Broadcasting : For a sequence 'key' and a scalar 'value', + # each position in 'key' should be set to 'value'. + # + # * Coercion : Most users will expect basic coercion to work. For + # example, a string like '2018-01-01' is coerced to a datetime + # when setting on a datetime64ns array. In general, if the + # __init__ method coerces that value, then so should __setitem__ + # Note, also, that Series/DataFrame.where internally use __setitem__ + # on a copy of the data. + raise NotImplementedError(f"{type(self)} does not implement __setitem__.") + + def __len__(self) -> int: + """ + Length of this array + + Returns + ------- + length : int + """ + raise AbstractMethodError(self) + + def __iter__(self) -> Iterator[Any]: + """ + Iterate over elements of the array. + """ + # This needs to be implemented so that pandas recognizes extension + # arrays as list-like. The default implementation makes successive + # calls to ``__getitem__``, which may be slower than necessary. + for i in range(len(self)): + yield self[i] + + def __contains__(self, item: object) -> bool | np.bool_: + """ + Return for `item in self`. + """ + # GH37867 + # comparisons of any item to pd.NA always return pd.NA, so e.g. "a" in [pd.NA] + # would raise a TypeError. The implementation below works around that. + if is_scalar(item) and isna(item): + if not self._can_hold_na: + return False + elif item is self.dtype.na_value or isinstance(item, self.dtype.type): + return self._hasna + else: + return False + else: + # error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no + # attribute "any" + return (item == self).any() # type: ignore[union-attr] + + # error: Signature of "__eq__" incompatible with supertype "object" + def __eq__(self, other: Any) -> ArrayLike: # type: ignore[override] + """ + Return for `self == other` (element-wise equality). + """ + # Implementer note: this should return a boolean numpy ndarray or + # a boolean ExtensionArray. + # When `other` is one of Series, Index, or DataFrame, this method should + # return NotImplemented (to ensure that those objects are responsible for + # first unpacking the arrays, and then dispatch the operation to the + # underlying arrays) + raise AbstractMethodError(self) + + # error: Signature of "__ne__" incompatible with supertype "object" + def __ne__(self, other: Any) -> ArrayLike: # type: ignore[override] + """ + Return for `self != other` (element-wise in-equality). + """ + return ~(self == other) + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert to a NumPy ndarray. + + This is similar to :meth:`numpy.asarray`, but may provide additional control + over how the conversion is done. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to pass to :meth:`numpy.asarray`. + copy : bool, default False + Whether to ensure that the returned value is a not a view on + another array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. + na_value : Any, optional + The value to use for missing values. The default value depends + on `dtype` and the type of the array. + + Returns + ------- + numpy.ndarray + """ + result = np.asarray(self, dtype=dtype) + if copy or na_value is not lib.no_default: + result = result.copy() + if na_value is not lib.no_default: + result[self.isna()] = na_value + return result + + # ------------------------------------------------------------------------ + # Required attributes + # ------------------------------------------------------------------------ + + @property + def dtype(self) -> ExtensionDtype: + """ + An instance of ExtensionDtype. + + Examples + -------- + >>> pd.array([1, 2, 3]).dtype + Int64Dtype() + """ + raise AbstractMethodError(self) + + @property + def shape(self) -> Shape: + """ + Return a tuple of the array dimensions. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.shape + (3,) + """ + return (len(self),) + + @property + def size(self) -> int: + """ + The number of elements in the array. + """ + # error: Incompatible return value type (got "signedinteger[_64Bit]", + # expected "int") [return-value] + return np.prod(self.shape) # type: ignore[return-value] + + @property + def ndim(self) -> int: + """ + Extension Arrays are only allowed to be 1-dimensional. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.ndim + 1 + """ + return 1 + + @property + def nbytes(self) -> int: + """ + The number of bytes needed to store this object in memory. + + Examples + -------- + >>> pd.array([1, 2, 3]).nbytes + 27 + """ + # If this is expensive to compute, return an approximate lower bound + # on the number of bytes needed. + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + # Additional Methods + # ------------------------------------------------------------------------ + + @overload + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: + ... + + @overload + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: + ... + + @overload + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: + ... + + def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: + """ + Cast to a NumPy array or ExtensionArray with 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + np.ndarray or pandas.api.extensions.ExtensionArray + An ``ExtensionArray`` if ``dtype`` is ``ExtensionDtype``, + otherwise a Numpy ndarray with ``dtype`` for its dtype. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr + + [1, 2, 3] + Length: 3, dtype: Int64 + + Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``: + + >>> arr1 = arr.astype('Float64') + >>> arr1 + + [1.0, 2.0, 3.0] + Length: 3, dtype: Float64 + >>> arr1.dtype + Float64Dtype() + + Otherwise, we will get a Numpy ndarray: + + >>> arr2 = arr.astype('float64') + >>> arr2 + array([1., 2., 3.]) + >>> arr2.dtype + dtype('float64') + """ + dtype = pandas_dtype(dtype) + if dtype == self.dtype: + if not copy: + return self + else: + return self.copy() + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + return cls._from_sequence(self, dtype=dtype, copy=copy) + + elif lib.is_np_dtype(dtype, "M"): + from pandas.core.arrays import DatetimeArray + + return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy) + + elif lib.is_np_dtype(dtype, "m"): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy) + + return np.array(self, dtype=dtype, copy=copy) + + def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll: + """ + A 1-D array indicating if each value is missing. + + Returns + ------- + numpy.ndarray or pandas.api.extensions.ExtensionArray + In most cases, this should return a NumPy ndarray. For + exceptional cases like ``SparseArray``, where returning + an ndarray would be expensive, an ExtensionArray may be + returned. + + Notes + ----- + If returning an ExtensionArray, then + + * ``na_values._is_boolean`` should be True + * `na_values` should implement :func:`ExtensionArray._reduce` + * ``na_values.any`` and ``na_values.all`` should be implemented + + Examples + -------- + >>> arr = pd.array([1, 2, np.nan, np.nan]) + >>> arr.isna() + array([False, False, True, True]) + """ + raise AbstractMethodError(self) + + @property + def _hasna(self) -> bool: + # GH#22680 + """ + Equivalent to `self.isna().any()`. + + Some ExtensionArray subclasses may be able to optimize this check. + """ + return bool(self.isna().any()) + + def _values_for_argsort(self) -> np.ndarray: + """ + Return values for sorting. + + Returns + ------- + ndarray + The transformed values should maintain the ordering between values + within the array. + + See Also + -------- + ExtensionArray.argsort : Return the indices that would sort this array. + + Notes + ----- + The caller is responsible for *not* modifying these values in-place, so + it is safe for implementors to give views on ``self``. + + Functions that use this (e.g. ``ExtensionArray.argsort``) should ignore + entries with missing values in the original array (according to + ``self.isna()``). This means that the corresponding entries in the returned + array don't need to be modified to sort correctly. + + Examples + -------- + In most cases, this is the underlying Numpy array of the ``ExtensionArray``: + + >>> arr = pd.array([1, 2, 3]) + >>> arr._values_for_argsort() + array([1, 2, 3]) + """ + # Note: this is used in `ExtensionArray.argsort/argmin/argmax`. + return np.array(self) + + def argsort( + self, + *, + ascending: bool = True, + kind: SortKind = "quicksort", + na_position: str = "last", + **kwargs, + ) -> np.ndarray: + """ + Return the indices that would sort this array. + + Parameters + ---------- + ascending : bool, default True + Whether the indices should result in an ascending + or descending sort. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. + na_position : {'first', 'last'}, default 'last' + If ``'first'``, put ``NaN`` values at the beginning. + If ``'last'``, put ``NaN`` values at the end. + *args, **kwargs: + Passed through to :func:`numpy.argsort`. + + Returns + ------- + np.ndarray[np.intp] + Array of indices that sort ``self``. If NaN values are contained, + NaN values are placed at the end. + + See Also + -------- + numpy.argsort : Sorting implementation used internally. + + Examples + -------- + >>> arr = pd.array([3, 1, 2, 5, 4]) + >>> arr.argsort() + array([1, 2, 0, 4, 3]) + """ + # Implementor note: You have two places to override the behavior of + # argsort. + # 1. _values_for_argsort : construct the values passed to np.argsort + # 2. argsort : total control over sorting. In case of overriding this, + # it is recommended to also override argmax/argmin + ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) + + values = self._values_for_argsort() + return nargsort( + values, + kind=kind, + ascending=ascending, + na_position=na_position, + mask=np.asarray(self.isna()), + ) + + def argmin(self, skipna: bool = True) -> int: + """ + Return the index of minimum value. + + In case of multiple occurrences of the minimum value, the index + corresponding to the first occurrence is returned. + + Parameters + ---------- + skipna : bool, default True + + Returns + ------- + int + + See Also + -------- + ExtensionArray.argmax : Return the index of the maximum value. + + Examples + -------- + >>> arr = pd.array([3, 1, 2, 5, 4]) + >>> arr.argmin() + 1 + """ + # Implementor note: You have two places to override the behavior of + # argmin. + # 1. _values_for_argsort : construct the values used in nargminmax + # 2. argmin itself : total control over sorting. + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmin") + + def argmax(self, skipna: bool = True) -> int: + """ + Return the index of maximum value. + + In case of multiple occurrences of the maximum value, the index + corresponding to the first occurrence is returned. + + Parameters + ---------- + skipna : bool, default True + + Returns + ------- + int + + See Also + -------- + ExtensionArray.argmin : Return the index of the minimum value. + + Examples + -------- + >>> arr = pd.array([3, 1, 2, 5, 4]) + >>> arr.argmax() + 3 + """ + # Implementor note: You have two places to override the behavior of + # argmax. + # 1. _values_for_argsort : construct the values used in nargminmax + # 2. argmax itself : total control over sorting. + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return nargminmax(self, "argmax") + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index: Index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> Self: + """ + See DataFrame.interpolate.__doc__. + + Examples + -------- + >>> arr = pd.arrays.NumpyExtensionArray(np.array([0, 1, np.nan, 3])) + >>> arr.interpolate(method="linear", + ... limit=3, + ... limit_direction="forward", + ... index=pd.Index([1, 2, 3, 4]), + ... fill_value=1, + ... copy=False, + ... axis=0, + ... limit_area="inside" + ... ) + + [0.0, 1.0, 2.0, 3.0] + Length: 4, dtype: float64 + """ + # NB: we return type(self) even if copy=False + raise NotImplementedError( + f"{type(self).__name__} does not implement interpolate" + ) + + def _pad_or_backfill( + self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True + ) -> Self: + """ + Pad or backfill values, used by Series/DataFrame ffill and bfill. + + Parameters + ---------- + method : {'backfill', 'bfill', 'pad', 'ffill'} + Method to use for filling holes in reindexed Series: + + * pad / ffill: propagate last valid observation forward to next valid. + * backfill / bfill: use NEXT valid observation to fill gap. + + limit : int, default None + This is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + + copy : bool, default True + Whether to make a copy of the data before filling. If False, then + the original should be modified and no new memory should be allocated. + For ExtensionArray subclasses that cannot do this, it is at the + author's discretion whether to ignore "copy=False" or to raise. + The base class implementation ignores the keyword if any NAs are + present. + + Returns + ------- + Same type as self + + Examples + -------- + >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan]) + >>> arr._pad_or_backfill(method="backfill", limit=1) + + [, 2, 2, 3, , ] + Length: 6, dtype: Int64 + """ + + # If a 3rd-party EA has implemented this functionality in fillna, + # we warn that they need to implement _pad_or_backfill instead. + if ( + type(self).fillna is not ExtensionArray.fillna + and type(self)._pad_or_backfill is ExtensionArray._pad_or_backfill + ): + # Check for _pad_or_backfill here allows us to call + # super()._pad_or_backfill without getting this warning + warnings.warn( + "ExtensionArray.fillna 'method' keyword is deprecated. " + "In a future version. arr._pad_or_backfill will be called " + "instead. 3rd-party ExtensionArray authors need to implement " + "_pad_or_backfill.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + return self.fillna(method=method, limit=limit) + + mask = self.isna() + + if mask.any(): + # NB: the base class does not respect the "copy" keyword + meth = missing.clean_fill_method(method) + + npmask = np.asarray(mask) + if meth == "pad": + indexer = libalgos.get_fill_indexer(npmask, limit=limit) + return self.take(indexer, allow_fill=True) + else: + # i.e. meth == "backfill" + indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1] + return self[::-1].take(indexer, allow_fill=True) + + else: + if not copy: + return self + new_values = self.copy() + return new_values + + def fillna( + self, + value: object | ArrayLike | None = None, + method: FillnaOptions | None = None, + limit: int | None = None, + copy: bool = True, + ) -> Self: + """ + Fill NA/NaN values using the specified method. + + Parameters + ---------- + value : scalar, array-like + If a scalar value is passed it is used to fill all missing values. + Alternatively, an array-like "value" can be given. It's expected + that the array-like have the same length as 'self'. + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series: + + * pad / ffill: propagate last valid observation forward to next valid. + * backfill / bfill: use NEXT valid observation to fill gap. + + .. deprecated:: 2.1.0 + + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + + .. deprecated:: 2.1.0 + + copy : bool, default True + Whether to make a copy of the data before filling. If False, then + the original should be modified and no new memory should be allocated. + For ExtensionArray subclasses that cannot do this, it is at the + author's discretion whether to ignore "copy=False" or to raise. + The base class implementation ignores the keyword in pad/backfill + cases. + + Returns + ------- + ExtensionArray + With NA/NaN filled. + + Examples + -------- + >>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan]) + >>> arr.fillna(0) + + [0, 0, 2, 3, 0, 0] + Length: 6, dtype: Int64 + """ + if method is not None: + warnings.warn( + f"The 'method' keyword in {type(self).__name__}.fillna is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + value, method = validate_fillna_kwargs(value, method) + + mask = self.isna() + # error: Argument 2 to "check_value_size" has incompatible type + # "ExtensionArray"; expected "ndarray" + value = missing.check_value_size( + value, mask, len(self) # type: ignore[arg-type] + ) + + if mask.any(): + if method is not None: + meth = missing.clean_fill_method(method) + + npmask = np.asarray(mask) + if meth == "pad": + indexer = libalgos.get_fill_indexer(npmask, limit=limit) + return self.take(indexer, allow_fill=True) + else: + # i.e. meth == "backfill" + indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1] + return self[::-1].take(indexer, allow_fill=True) + else: + # fill with value + if not copy: + new_values = self[:] + else: + new_values = self.copy() + new_values[mask] = value + else: + if not copy: + new_values = self[:] + else: + new_values = self.copy() + return new_values + + def dropna(self) -> Self: + """ + Return ExtensionArray without NA values. + + Returns + ------- + + Examples + -------- + >>> pd.array([1, 2, np.nan]).dropna() + + [1, 2] + Length: 2, dtype: Int64 + """ + # error: Unsupported operand type for ~ ("ExtensionArray") + return self[~self.isna()] # type: ignore[operator] + + def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray: + """ + Shift values by desired number. + + Newly introduced missing values are filled with + ``self.dtype.na_value``. + + Parameters + ---------- + periods : int, default 1 + The number of periods to shift. Negative values are allowed + for shifting backwards. + + fill_value : object, optional + The scalar value to use for newly introduced missing values. + The default is ``self.dtype.na_value``. + + Returns + ------- + ExtensionArray + Shifted. + + Notes + ----- + If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is + returned. + + If ``periods > len(self)``, then an array of size + len(self) is returned, with all values filled with + ``self.dtype.na_value``. + + For 2-dimensional ExtensionArrays, we are always shifting along axis=0. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.shift(2) + + [, , 1] + Length: 3, dtype: Int64 + """ + # Note: this implementation assumes that `self.dtype.na_value` can be + # stored in an instance of your ExtensionArray with `self.dtype`. + if not len(self) or periods == 0: + return self.copy() + + if isna(fill_value): + fill_value = self.dtype.na_value + + empty = self._from_sequence( + [fill_value] * min(abs(periods), len(self)), dtype=self.dtype + ) + if periods > 0: + a = empty + b = self[:-periods] + else: + a = self[abs(periods) :] + b = empty + return self._concat_same_type([a, b]) + + def unique(self) -> Self: + """ + Compute the ExtensionArray of unique values. + + Returns + ------- + pandas.api.extensions.ExtensionArray + + Examples + -------- + >>> arr = pd.array([1, 2, 3, 1, 2, 3]) + >>> arr.unique() + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + uniques = unique(self.astype(object)) + return self._from_sequence(uniques, dtype=self.dtype) + + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `self` (a) such that, if the + corresponding elements in `value` were inserted before the indices, + the order of `self` would be preserved. + + Assuming that `self` is sorted: + + ====== ================================ + `side` returned index `i` satisfies + ====== ================================ + left ``self[i-1] < value <= self[i]`` + right ``self[i-1] <= value < self[i]`` + ====== ================================ + + Parameters + ---------- + value : array-like, list or scalar + Value(s) to insert into `self`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `self`). + sorter : 1-D array-like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + array of ints or int + If value is array-like, array of insertion points. + If value is scalar, a single integer. + + See Also + -------- + numpy.searchsorted : Similar method from NumPy. + + Examples + -------- + >>> arr = pd.array([1, 2, 3, 5]) + >>> arr.searchsorted([4]) + array([3]) + """ + # Note: the base tests provided by pandas only test the basics. + # We do not test + # 1. Values outside the range of the `data_for_sorting` fixture + # 2. Values between the values in the `data_for_sorting` fixture + # 3. Missing values. + arr = self.astype(object) + if isinstance(value, ExtensionArray): + value = value.astype(object) + return arr.searchsorted(value, side=side, sorter=sorter) + + def equals(self, other: object) -> bool: + """ + Return if another array is equivalent to this array. + + Equivalent means that both arrays have the same shape and dtype, and + all values compare equal. Missing values in the same location are + considered equal (in contrast with normal equality). + + Parameters + ---------- + other : ExtensionArray + Array to compare to this Array. + + Returns + ------- + boolean + Whether the arrays are equivalent. + + Examples + -------- + >>> arr1 = pd.array([1, 2, np.nan]) + >>> arr2 = pd.array([1, 2, np.nan]) + >>> arr1.equals(arr2) + True + """ + if type(self) != type(other): + return False + other = cast(ExtensionArray, other) + if self.dtype != other.dtype: + return False + elif len(self) != len(other): + return False + else: + equal_values = self == other + if isinstance(equal_values, ExtensionArray): + # boolean array with NA -> fill with False + equal_values = equal_values.fillna(False) + # error: Unsupported left operand type for & ("ExtensionArray") + equal_na = self.isna() & other.isna() # type: ignore[operator] + return bool((equal_values | equal_na).all()) + + def isin(self, values) -> npt.NDArray[np.bool_]: + """ + Pointwise comparison for set containment in the given values. + + Roughly equivalent to `np.array([x in values for x in self])` + + Parameters + ---------- + values : Sequence + + Returns + ------- + np.ndarray[bool] + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.isin([1]) + + [True, False, False] + Length: 3, dtype: boolean + """ + return isin(np.asarray(self), values) + + def _values_for_factorize(self) -> tuple[np.ndarray, Any]: + """ + Return an array and missing value suitable for factorization. + + Returns + ------- + values : ndarray + An array suitable for factorization. This should maintain order + and be a supported dtype (Float64, Int64, UInt64, String, Object). + By default, the extension array is cast to object dtype. + na_value : object + The value in `values` to consider missing. This will be treated + as NA in the factorization routines, so it will be coded as + `-1` and not included in `uniques`. By default, + ``np.nan`` is used. + + Notes + ----- + The values returned by this method are also used in + :func:`pandas.util.hash_pandas_object`. If needed, this can be + overridden in the ``self._hash_pandas_object()`` method. + + Examples + -------- + >>> pd.array([1, 2, 3])._values_for_factorize() + (array([1, 2, 3], dtype=object), nan) + """ + return self.astype(object), np.nan + + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, ExtensionArray]: + """ + Encode the extension array as an enumerated type. + + Parameters + ---------- + use_na_sentinel : bool, default True + If True, the sentinel -1 will be used for NaN values. If False, + NaN values will be encoded as non-negative integers and will not drop the + NaN from the uniques of the values. + + .. versionadded:: 1.5.0 + + Returns + ------- + codes : ndarray + An integer NumPy array that's an indexer into the original + ExtensionArray. + uniques : ExtensionArray + An ExtensionArray containing the unique values of `self`. + + .. note:: + + uniques will *not* contain an entry for the NA value of + the ExtensionArray if there are any missing values present + in `self`. + + See Also + -------- + factorize : Top-level factorize method that dispatches here. + + Notes + ----- + :meth:`pandas.factorize` offers a `sort` keyword as well. + + Examples + -------- + >>> idx1 = pd.PeriodIndex(["2014-01", "2014-01", "2014-02", "2014-02", + ... "2014-03", "2014-03"], freq="M") + >>> arr, idx = idx1.factorize() + >>> arr + array([0, 0, 1, 1, 2, 2]) + >>> idx + PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]') + """ + # Implementer note: There are two ways to override the behavior of + # pandas.factorize + # 1. _values_for_factorize and _from_factorize. + # Specify the values passed to pandas' internal factorization + # routines, and how to convert from those values back to the + # original ExtensionArray. + # 2. ExtensionArray.factorize. + # Complete control over factorization. + arr, na_value = self._values_for_factorize() + + codes, uniques = factorize_array( + arr, use_na_sentinel=use_na_sentinel, na_value=na_value + ) + + uniques_ea = self._from_factorized(uniques, self) + return codes, uniques_ea + + _extension_array_shared_docs[ + "repeat" + ] = """ + Repeat elements of a %(klass)s. + + Returns a new %(klass)s where each element of the current %(klass)s + is repeated consecutively a given number of times. + + Parameters + ---------- + repeats : int or array of ints + The number of repetitions for each element. This should be a + non-negative integer. Repeating 0 times will return an empty + %(klass)s. + axis : None + Must be ``None``. Has no effect but is accepted for compatibility + with numpy. + + Returns + ------- + %(klass)s + Newly created %(klass)s with repeated elements. + + See Also + -------- + Series.repeat : Equivalent function for Series. + Index.repeat : Equivalent function for Index. + numpy.repeat : Similar method for :class:`numpy.ndarray`. + ExtensionArray.take : Take arbitrary positions. + + Examples + -------- + >>> cat = pd.Categorical(['a', 'b', 'c']) + >>> cat + ['a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> cat.repeat(2) + ['a', 'a', 'b', 'b', 'c', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> cat.repeat([1, 2, 3]) + ['a', 'b', 'b', 'c', 'c', 'c'] + Categories (3, object): ['a', 'b', 'c'] + """ + + @Substitution(klass="ExtensionArray") + @Appender(_extension_array_shared_docs["repeat"]) + def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None = None) -> Self: + nv.validate_repeat((), {"axis": axis}) + ind = np.arange(len(self)).repeat(repeats) + return self.take(ind) + + # ------------------------------------------------------------------------ + # Indexing methods + # ------------------------------------------------------------------------ + + def take( + self, + indices: TakeIndexer, + *, + allow_fill: bool = False, + fill_value: Any = None, + ) -> Self: + """ + Take elements from an array. + + Parameters + ---------- + indices : sequence of int or one-dimensional np.ndarray of int + Indices to be taken. + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : any, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + Returns + ------- + ExtensionArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + + See Also + -------- + numpy.take : Take elements from an array along an axis. + api.extensions.take : Take elements from an array. + + Notes + ----- + ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, + ``iloc``, when `indices` is a sequence of values. Additionally, + it's called by :meth:`Series.reindex`, or any other method + that causes realignment, with a `fill_value`. + + Examples + -------- + Here's an example implementation, which relies on casting the + extension array to object dtype. This uses the helper method + :func:`pandas.api.extensions.take`. + + .. code-block:: python + + def take(self, indices, allow_fill=False, fill_value=None): + from pandas.core.algorithms import take + + # If the ExtensionArray is backed by an ndarray, then + # just pass that here instead of coercing to object. + data = self.astype(object) + + if allow_fill and fill_value is None: + fill_value = self.dtype.na_value + + # fill value should always be translated from the scalar + # type for the array, to the physical storage type for + # the data, before passing to take. + + result = take(data, indices, fill_value=fill_value, + allow_fill=allow_fill) + return self._from_sequence(result, dtype=self.dtype) + """ + # Implementer note: The `fill_value` parameter should be a user-facing + # value, an instance of self.dtype.type. When passed `fill_value=None`, + # the default of `self.dtype.na_value` should be used. + # This may differ from the physical storage type your ExtensionArray + # uses. In this case, your implementation is responsible for casting + # the user-facing type to the storage type, before using + # pandas.api.extensions.take + raise AbstractMethodError(self) + + def copy(self) -> Self: + """ + Return a copy of the array. + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr2 = arr.copy() + >>> arr[0] = 2 + >>> arr2 + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + raise AbstractMethodError(self) + + def view(self, dtype: Dtype | None = None) -> ArrayLike: + """ + Return a view on the array. + + Parameters + ---------- + dtype : str, np.dtype, or ExtensionDtype, optional + Default None. + + Returns + ------- + ExtensionArray or np.ndarray + A view on the :class:`ExtensionArray`'s data. + + Examples + -------- + This gives view on the underlying data of an ``ExtensionArray`` and is not a + copy. Modifications on either the view or the original ``ExtensionArray`` + will be reflectd on the underlying data: + + >>> arr = pd.array([1, 2, 3]) + >>> arr2 = arr.view() + >>> arr[0] = 2 + >>> arr2 + + [2, 2, 3] + Length: 3, dtype: Int64 + """ + # NB: + # - This must return a *new* object referencing the same data, not self. + # - The only case that *must* be implemented is with dtype=None, + # giving a view with the same dtype as self. + if dtype is not None: + raise NotImplementedError(dtype) + return self[:] + + # ------------------------------------------------------------------------ + # Printing + # ------------------------------------------------------------------------ + + def __repr__(self) -> str: + if self.ndim > 1: + return self._repr_2d() + + from pandas.io.formats.printing import format_object_summary + + # the short repr has no trailing newline, while the truncated + # repr does. So we include a newline in our template, and strip + # any trailing newlines from format_object_summary + data = format_object_summary( + self, self._formatter(), indent_for_name=False + ).rstrip(", \n") + class_name = f"<{type(self).__name__}>\n" + return f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}" + + def _repr_2d(self) -> str: + from pandas.io.formats.printing import format_object_summary + + # the short repr has no trailing newline, while the truncated + # repr does. So we include a newline in our template, and strip + # any trailing newlines from format_object_summary + lines = [ + format_object_summary(x, self._formatter(), indent_for_name=False).rstrip( + ", \n" + ) + for x in self + ] + data = ",\n".join(lines) + class_name = f"<{type(self).__name__}>" + return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}" + + def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]: + """ + Formatting function for scalar values. + + This is used in the default '__repr__'. The returned formatting + function receives instances of your scalar type. + + Parameters + ---------- + boxed : bool, default False + An indicated for whether or not your array is being printed + within a Series, DataFrame, or Index (True), or just by + itself (False). This may be useful if you want scalar values + to appear differently within a Series versus on its own (e.g. + quoted or not). + + Returns + ------- + Callable[[Any], str] + A callable that gets instances of the scalar type and + returns a string. By default, :func:`repr` is used + when ``boxed=False`` and :func:`str` is used when + ``boxed=True``. + + Examples + -------- + >>> class MyExtensionArray(pd.arrays.NumpyExtensionArray): + ... def _formatter(self, boxed=False): + ... return lambda x: '*' + str(x) + '*' if boxed else repr(x) + '*' + >>> MyExtensionArray(np.array([1, 2, 3, 4])) + + [1*, 2*, 3*, 4*] + Length: 4, dtype: int64 + """ + if boxed: + return str + return repr + + # ------------------------------------------------------------------------ + # Reshaping + # ------------------------------------------------------------------------ + + def transpose(self, *axes: int) -> ExtensionArray: + """ + Return a transposed view on this array. + + Because ExtensionArrays are always 1D, this is a no-op. It is included + for compatibility with np.ndarray. + """ + return self[:] + + @property + def T(self) -> ExtensionArray: + return self.transpose() + + def ravel(self, order: Literal["C", "F", "A", "K"] | None = "C") -> ExtensionArray: + """ + Return a flattened view on this array. + + Parameters + ---------- + order : {None, 'C', 'F', 'A', 'K'}, default 'C' + + Returns + ------- + ExtensionArray + + Notes + ----- + - Because ExtensionArrays are 1D-only, this is a no-op. + - The "order" argument is ignored, is for compatibility with NumPy. + + Examples + -------- + >>> pd.array([1, 2, 3]).ravel() + + [1, 2, 3] + Length: 3, dtype: Int64 + """ + return self + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self: + """ + Concatenate multiple array of this dtype. + + Parameters + ---------- + to_concat : sequence of this type + + Returns + ------- + ExtensionArray + + Examples + -------- + >>> arr1 = pd.array([1, 2, 3]) + >>> arr2 = pd.array([4, 5, 6]) + >>> pd.arrays.IntegerArray._concat_same_type([arr1, arr2]) + + [1, 2, 3, 4, 5, 6] + Length: 6, dtype: Int64 + """ + # Implementer note: this method will only be called with a sequence of + # ExtensionArrays of this class and with the same dtype as self. This + # should allow "easy" concatenation (no upcasting needed), and result + # in a new ExtensionArray of the same dtype. + # Note: this strict behaviour is only guaranteed starting with pandas 1.1 + raise AbstractMethodError(cls) + + # The _can_hold_na attribute is set to True so that pandas internals + # will use the ExtensionDtype.na_value as the NA value in operations + # such as take(), reindex(), shift(), etc. In addition, those results + # will then be of the ExtensionArray subclass rather than an array + # of objects + @cache_readonly + def _can_hold_na(self) -> bool: + return self.dtype._can_hold_na + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> ExtensionArray: + """ + Return an ExtensionArray performing an accumulation operation. + + The underlying data type might change. + + Parameters + ---------- + name : str + Name of the function, supported values are: + - cummin + - cummax + - cumsum + - cumprod + skipna : bool, default True + If True, skip NA values. + **kwargs + Additional keyword arguments passed to the accumulation function. + Currently, there is no supported kwarg. + + Returns + ------- + array + + Raises + ------ + NotImplementedError : subclass does not define accumulations + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr._accumulate(name='cumsum') + + [1, 3, 6] + Length: 3, dtype: Int64 + """ + raise NotImplementedError(f"cannot perform {name} with type {self.dtype}") + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + """ + Return a scalar result of performing the reduction operation. + + Parameters + ---------- + name : str + Name of the function, supported values are: + { any, all, min, max, sum, mean, median, prod, + std, var, sem, kurt, skew }. + skipna : bool, default True + If True, skip NaN values. + keepdims : bool, default False + If False, a scalar is returned. + If True, the result has dimension with size one along the reduced axis. + + .. versionadded:: 2.1 + + This parameter is not required in the _reduce signature to keep backward + compatibility, but will become required in the future. If the parameter + is not found in the method signature, a FutureWarning will be emitted. + **kwargs + Additional keyword arguments passed to the reduction function. + Currently, `ddof` is the only supported kwarg. + + Returns + ------- + scalar + + Raises + ------ + TypeError : subclass does not define reductions + + Examples + -------- + >>> pd.array([1, 2, 3])._reduce("min") + 1 + """ + meth = getattr(self, name, None) + if meth is None: + raise TypeError( + f"'{type(self).__name__}' with dtype {self.dtype} " + f"does not support reduction '{name}'" + ) + result = meth(skipna=skipna, **kwargs) + if keepdims: + result = np.array([result]) + + return result + + # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 + # Incompatible types in assignment (expression has type "None", base class + # "object" defined the type as "Callable[[object], int]") + __hash__: ClassVar[None] # type: ignore[assignment] + + # ------------------------------------------------------------------------ + # Non-Optimized Default Methods; in the case of the private methods here, + # these are not guaranteed to be stable across pandas versions. + + def _values_for_json(self) -> np.ndarray: + """ + Specify how to render our entries in to_json. + + Notes + ----- + The dtype on the returned ndarray is not restricted, but for non-native + types that are not specifically handled in objToJSON.c, to_json is + liable to raise. In these cases, it may be safer to return an ndarray + of strings. + """ + return np.asarray(self) + + def _hash_pandas_object( + self, *, encoding: str, hash_key: str, categorize: bool + ) -> npt.NDArray[np.uint64]: + """ + Hook for hash_pandas_object. + + Default is to use the values returned by _values_for_factorize. + + Parameters + ---------- + encoding : str + Encoding for data & key when strings. + hash_key : str + Hash_key for string key to encode. + categorize : bool + Whether to first categorize object arrays before hashing. This is more + efficient when the array contains duplicate values. + + Returns + ------- + np.ndarray[uint64] + + Examples + -------- + >>> pd.array([1, 2])._hash_pandas_object(encoding='utf-8', + ... hash_key="1000000000000000", + ... categorize=False + ... ) + array([11381023671546835630, 4641644667904626417], dtype=uint64) + """ + from pandas.core.util.hashing import hash_array + + values, _ = self._values_for_factorize() + return hash_array( + values, encoding=encoding, hash_key=hash_key, categorize=categorize + ) + + def tolist(self) -> list: + """ + Return a list of the values. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + list + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.tolist() + [1, 2, 3] + """ + if self.ndim > 1: + return [x.tolist() for x in self] + return list(self) + + def delete(self, loc: PositionalIndexer) -> Self: + indexer = np.delete(np.arange(len(self)), loc) + return self.take(indexer) + + def insert(self, loc: int, item) -> Self: + """ + Insert an item at the given position. + + Parameters + ---------- + loc : int + item : scalar-like + + Returns + ------- + same type as self + + Notes + ----- + This method should be both type and dtype-preserving. If the item + cannot be held in an array of this type/dtype, either ValueError or + TypeError should be raised. + + The default implementation relies on _from_sequence to raise on invalid + items. + + Examples + -------- + >>> arr = pd.array([1, 2, 3]) + >>> arr.insert(2, -1) + + [1, 2, -1, 3] + Length: 4, dtype: Int64 + """ + loc = validate_insert_loc(loc, len(self)) + + item_arr = type(self)._from_sequence([item], dtype=self.dtype) + + return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]]) + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + """ + Analogue to np.putmask(self, mask, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + If listlike, must be arraylike with same length as self. + + Returns + ------- + None + + Notes + ----- + Unlike np.putmask, we do not repeat listlike values with mismatched length. + 'value' should either be a scalar or an arraylike with the same length + as self. + """ + if is_list_like(value): + val = value[mask] + else: + val = value + + self[mask] = val + + def _where(self, mask: npt.NDArray[np.bool_], value) -> Self: + """ + Analogue to np.where(mask, self, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + + Returns + ------- + same type as self + """ + result = self.copy() + + if is_list_like(value): + val = value[~mask] + else: + val = value + + result[~mask] = val + return result + + def _fill_mask_inplace( + self, method: str, limit: int | None, mask: npt.NDArray[np.bool_] + ) -> None: + """ + Replace values in locations specified by 'mask' using pad or backfill. + + See also + -------- + ExtensionArray.fillna + """ + func = missing.get_fill_func(method) + npvalues = self.astype(object) + # NB: if we don't copy mask here, it may be altered inplace, which + # would mess up the `self[mask] = ...` below. + func(npvalues, limit=limit, mask=mask.copy()) + new_values = self._from_sequence(npvalues, dtype=self.dtype) + self[mask] = new_values[mask] + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + if axis != 0: + raise NotImplementedError + + return rank( + self._values_for_argsort(), + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + + @classmethod + def _empty(cls, shape: Shape, dtype: ExtensionDtype): + """ + Create an ExtensionArray with the given shape and dtype. + + See also + -------- + ExtensionDtype.empty + ExtensionDtype.empty is the 'official' public version of this API. + """ + # Implementer note: while ExtensionDtype.empty is the public way to + # call this method, it is still required to implement this `_empty` + # method as well (it is called internally in pandas) + obj = cls._from_sequence([], dtype=dtype) + + taker = np.broadcast_to(np.intp(-1), shape) + result = obj.take(taker, allow_fill=True) + if not isinstance(result, cls) or dtype != result.dtype: + raise NotImplementedError( + f"Default 'empty' implementation is invalid for dtype='{dtype}'" + ) + return result + + def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: + """ + Compute the quantiles of self for each quantile in `qs`. + + Parameters + ---------- + qs : np.ndarray[float64] + interpolation: str + + Returns + ------- + same type as self + """ + mask = np.asarray(self.isna()) + arr = np.asarray(self) + fill_value = np.nan + + res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) + return type(self)._from_sequence(res_values) + + def _mode(self, dropna: bool = True) -> Self: + """ + Returns the mode(s) of the ExtensionArray. + + Always returns `ExtensionArray` even if only one value. + + Parameters + ---------- + dropna : bool, default True + Don't consider counts of NA values. + + Returns + ------- + same type as self + Sorted, if possible. + """ + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "Self") + return mode(self, dropna=dropna) # type: ignore[return-value] + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + if any( + isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs + ): + return NotImplemented + + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + return arraylike.default_array_ufunc(self, ufunc, method, *inputs, **kwargs) + + def map(self, mapper, na_action=None): + """ + Map values using an input mapping or function. + + Parameters + ---------- + mapper : function, dict, or Series + Mapping correspondence. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NA values, without passing them to the + mapping correspondence. If 'ignore' is not supported, a + ``NotImplementedError`` should be raised. + + Returns + ------- + Union[ndarray, Index, ExtensionArray] + The output of the mapping function applied to the array. + If the function returns a tuple with more than one element + a MultiIndex will be returned. + """ + return map_array(self, mapper, na_action=na_action) + + # ------------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ) -> ArrayLike: + """ + Dispatch GroupBy reduction or transformation operation. + + This is an *experimental* API to allow ExtensionArray authors to implement + reductions and transformations. The API is subject to change. + + Parameters + ---------- + how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median', + 'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc', + 'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'} + has_dropped_na : bool + min_count : int + ngroups : int + ids : np.ndarray[np.intp] + ids[i] gives the integer label for the group that self[i] belongs to. + **kwargs : operation-specific + 'any', 'all' -> ['skipna'] + 'var', 'std', 'sem' -> ['ddof'] + 'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna'] + 'rank' -> ['ties_method', 'ascending', 'na_option', 'pct'] + + Returns + ------- + np.ndarray or ExtensionArray + """ + from pandas.core.arrays.string_ import StringDtype + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + # GH#43682 + if isinstance(self.dtype, StringDtype): + # StringArray + if op.how not in ["any", "all"]: + # Fail early to avoid conversion to object + op._get_cython_function(op.kind, op.how, np.dtype(object), False) + npvalues = self.to_numpy(object, na_value=np.nan) + else: + raise NotImplementedError( + f"function is not implemented for this dtype: {self.dtype}" + ) + + res_values = op._cython_op_ndim_compat( + npvalues, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=None, + **kwargs, + ) + + if op.how in op.cast_blocklist: + # i.e. how in ["rank"], since other cast_blocklist methods don't go + # through cython_operation + return res_values + + if isinstance(self.dtype, StringDtype): + dtype = self.dtype + string_array_cls = dtype.construct_array_type() + return string_array_cls._from_sequence(res_values, dtype=dtype) + + else: + raise NotImplementedError + + +class ExtensionArraySupportsAnyAll(ExtensionArray): + def any(self, *, skipna: bool = True) -> bool: + raise AbstractMethodError(self) + + def all(self, *, skipna: bool = True) -> bool: + raise AbstractMethodError(self) + + +class ExtensionOpsMixin: + """ + A base class for linking the operators to their dunder names. + + .. note:: + + You may want to set ``__array_priority__`` if you want your + implementation to be called when involved in binary operations + with NumPy arrays. + """ + + @classmethod + def _create_arithmetic_method(cls, op): + raise AbstractMethodError(cls) + + @classmethod + def _add_arithmetic_ops(cls) -> None: + setattr(cls, "__add__", cls._create_arithmetic_method(operator.add)) + setattr(cls, "__radd__", cls._create_arithmetic_method(roperator.radd)) + setattr(cls, "__sub__", cls._create_arithmetic_method(operator.sub)) + setattr(cls, "__rsub__", cls._create_arithmetic_method(roperator.rsub)) + setattr(cls, "__mul__", cls._create_arithmetic_method(operator.mul)) + setattr(cls, "__rmul__", cls._create_arithmetic_method(roperator.rmul)) + setattr(cls, "__pow__", cls._create_arithmetic_method(operator.pow)) + setattr(cls, "__rpow__", cls._create_arithmetic_method(roperator.rpow)) + setattr(cls, "__mod__", cls._create_arithmetic_method(operator.mod)) + setattr(cls, "__rmod__", cls._create_arithmetic_method(roperator.rmod)) + setattr(cls, "__floordiv__", cls._create_arithmetic_method(operator.floordiv)) + setattr( + cls, "__rfloordiv__", cls._create_arithmetic_method(roperator.rfloordiv) + ) + setattr(cls, "__truediv__", cls._create_arithmetic_method(operator.truediv)) + setattr(cls, "__rtruediv__", cls._create_arithmetic_method(roperator.rtruediv)) + setattr(cls, "__divmod__", cls._create_arithmetic_method(divmod)) + setattr(cls, "__rdivmod__", cls._create_arithmetic_method(roperator.rdivmod)) + + @classmethod + def _create_comparison_method(cls, op): + raise AbstractMethodError(cls) + + @classmethod + def _add_comparison_ops(cls) -> None: + setattr(cls, "__eq__", cls._create_comparison_method(operator.eq)) + setattr(cls, "__ne__", cls._create_comparison_method(operator.ne)) + setattr(cls, "__lt__", cls._create_comparison_method(operator.lt)) + setattr(cls, "__gt__", cls._create_comparison_method(operator.gt)) + setattr(cls, "__le__", cls._create_comparison_method(operator.le)) + setattr(cls, "__ge__", cls._create_comparison_method(operator.ge)) + + @classmethod + def _create_logical_method(cls, op): + raise AbstractMethodError(cls) + + @classmethod + def _add_logical_ops(cls) -> None: + setattr(cls, "__and__", cls._create_logical_method(operator.and_)) + setattr(cls, "__rand__", cls._create_logical_method(roperator.rand_)) + setattr(cls, "__or__", cls._create_logical_method(operator.or_)) + setattr(cls, "__ror__", cls._create_logical_method(roperator.ror_)) + setattr(cls, "__xor__", cls._create_logical_method(operator.xor)) + setattr(cls, "__rxor__", cls._create_logical_method(roperator.rxor)) + + +class ExtensionScalarOpsMixin(ExtensionOpsMixin): + """ + A mixin for defining ops on an ExtensionArray. + + It is assumed that the underlying scalar objects have the operators + already defined. + + Notes + ----- + If you have defined a subclass MyExtensionArray(ExtensionArray), then + use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to + get the arithmetic operators. After the definition of MyExtensionArray, + insert the lines + + MyExtensionArray._add_arithmetic_ops() + MyExtensionArray._add_comparison_ops() + + to link the operators to your class. + + .. note:: + + You may want to set ``__array_priority__`` if you want your + implementation to be called when involved in binary operations + with NumPy arrays. + """ + + @classmethod + def _create_method(cls, op, coerce_to_dtype: bool = True, result_dtype=None): + """ + A class method that returns a method that will correspond to an + operator for an ExtensionArray subclass, by dispatching to the + relevant operator defined on the individual elements of the + ExtensionArray. + + Parameters + ---------- + op : function + An operator that takes arguments op(a, b) + coerce_to_dtype : bool, default True + boolean indicating whether to attempt to convert + the result to the underlying ExtensionArray dtype. + If it's not possible to create a new ExtensionArray with the + values, an ndarray is returned instead. + + Returns + ------- + Callable[[Any, Any], Union[ndarray, ExtensionArray]] + A method that can be bound to a class. When used, the method + receives the two arguments, one of which is the instance of + this class, and should return an ExtensionArray or an ndarray. + + Returning an ndarray may be necessary when the result of the + `op` cannot be stored in the ExtensionArray. The dtype of the + ndarray uses NumPy's normal inference rules. + + Examples + -------- + Given an ExtensionArray subclass called MyExtensionArray, use + + __add__ = cls._create_method(operator.add) + + in the class definition of MyExtensionArray to create the operator + for addition, that will be based on the operator implementation + of the underlying elements of the ExtensionArray + """ + + def _binop(self, other): + def convert_values(param): + if isinstance(param, ExtensionArray) or is_list_like(param): + ovalues = param + else: # Assume its an object + ovalues = [param] * len(self) + return ovalues + + if isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)): + # rely on pandas to unbox and dispatch to us + return NotImplemented + + lvalues = self + rvalues = convert_values(other) + + # If the operator is not defined for the underlying objects, + # a TypeError should be raised + res = [op(a, b) for (a, b) in zip(lvalues, rvalues)] + + def _maybe_convert(arr): + if coerce_to_dtype: + # https://github.com/pandas-dev/pandas/issues/22850 + # We catch all regular exceptions here, and fall back + # to an ndarray. + res = maybe_cast_pointwise_result(arr, self.dtype, same_dtype=False) + if not isinstance(res, type(self)): + # exception raised in _from_sequence; ensure we have ndarray + res = np.asarray(arr) + else: + res = np.asarray(arr, dtype=result_dtype) + return res + + if op.__name__ in {"divmod", "rdivmod"}: + a, b = zip(*res) + return _maybe_convert(a), _maybe_convert(b) + + return _maybe_convert(res) + + op_name = f"__{op.__name__}__" + return set_function_name(_binop, op_name, cls) + + @classmethod + def _create_arithmetic_method(cls, op): + return cls._create_method(op) + + @classmethod + def _create_comparison_method(cls, op): + return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/boolean.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/boolean.py new file mode 100644 index 00000000..43344f04 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/boolean.py @@ -0,0 +1,406 @@ +from __future__ import annotations + +import numbers +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.dtypes import register_extension_dtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ops +from pandas.core.array_algos import masked_accumulations +from pandas.core.arrays.masked import ( + BaseMaskedArray, + BaseMaskedDtype, +) + +if TYPE_CHECKING: + import pyarrow + + from pandas._typing import ( + Dtype, + DtypeObj, + Self, + npt, + type_t, + ) + + +@register_extension_dtype +class BooleanDtype(BaseMaskedDtype): + """ + Extension dtype for boolean data. + + .. warning:: + + BooleanDtype is considered experimental. The implementation and + parts of the API may change without warning. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.BooleanDtype() + BooleanDtype + """ + + name = "boolean" + + # https://github.com/python/mypy/issues/4125 + # error: Signature of "type" incompatible with supertype "BaseMaskedDtype" + @property + def type(self) -> type: # type: ignore[override] + return np.bool_ + + @property + def kind(self) -> str: + return "b" + + @property + def numpy_dtype(self) -> np.dtype: + return np.dtype("bool") + + @classmethod + def construct_array_type(cls) -> type_t[BooleanArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return BooleanArray + + def __repr__(self) -> str: + return "BooleanDtype" + + @property + def _is_boolean(self) -> bool: + return True + + @property + def _is_numeric(self) -> bool: + return True + + def __from_arrow__( + self, array: pyarrow.Array | pyarrow.ChunkedArray + ) -> BooleanArray: + """ + Construct BooleanArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + if array.type != pyarrow.bool_() and not pyarrow.types.is_null(array.type): + raise TypeError(f"Expected array of boolean type, got {array.type} instead") + + if isinstance(array, pyarrow.Array): + chunks = [array] + length = len(array) + else: + # pyarrow.ChunkedArray + chunks = array.chunks + length = array.length() + + if pyarrow.types.is_null(array.type): + mask = np.ones(length, dtype=bool) + # No need to init data, since all null + data = np.empty(length, dtype=bool) + return BooleanArray(data, mask) + + results = [] + for arr in chunks: + buflist = arr.buffers() + data = pyarrow.BooleanArray.from_buffers( + arr.type, len(arr), [None, buflist[1]], offset=arr.offset + ).to_numpy(zero_copy_only=False) + if arr.null_count != 0: + mask = pyarrow.BooleanArray.from_buffers( + arr.type, len(arr), [None, buflist[0]], offset=arr.offset + ).to_numpy(zero_copy_only=False) + mask = ~mask + else: + mask = np.zeros(len(arr), dtype=bool) + + bool_arr = BooleanArray(data, mask) + results.append(bool_arr) + + if not results: + return BooleanArray( + np.array([], dtype=np.bool_), np.array([], dtype=np.bool_) + ) + else: + return BooleanArray._concat_same_type(results) + + +def coerce_to_array( + values, mask=None, copy: bool = False +) -> tuple[np.ndarray, np.ndarray]: + """ + Coerce the input values array to numpy arrays with a mask. + + Parameters + ---------- + values : 1D list-like + mask : bool 1D array, optional + copy : bool, default False + if True, copy the input + + Returns + ------- + tuple of (values, mask) + """ + if isinstance(values, BooleanArray): + if mask is not None: + raise ValueError("cannot pass mask for BooleanArray input") + values, mask = values._data, values._mask + if copy: + values = values.copy() + mask = mask.copy() + return values, mask + + mask_values = None + if isinstance(values, np.ndarray) and values.dtype == np.bool_: + if copy: + values = values.copy() + elif isinstance(values, np.ndarray) and values.dtype.kind in "iufcb": + mask_values = isna(values) + + values_bool = np.zeros(len(values), dtype=bool) + values_bool[~mask_values] = values[~mask_values].astype(bool) + + if not np.all( + values_bool[~mask_values].astype(values.dtype) == values[~mask_values] + ): + raise TypeError("Need to pass bool-like values") + + values = values_bool + else: + values_object = np.asarray(values, dtype=object) + + inferred_dtype = lib.infer_dtype(values_object, skipna=True) + integer_like = ("floating", "integer", "mixed-integer-float") + if inferred_dtype not in ("boolean", "empty") + integer_like: + raise TypeError("Need to pass bool-like values") + + # mypy does not narrow the type of mask_values to npt.NDArray[np.bool_] + # within this branch, it assumes it can also be None + mask_values = cast("npt.NDArray[np.bool_]", isna(values_object)) + values = np.zeros(len(values), dtype=bool) + values[~mask_values] = values_object[~mask_values].astype(bool) + + # if the values were integer-like, validate it were actually 0/1's + if (inferred_dtype in integer_like) and not ( + np.all( + values[~mask_values].astype(float) + == values_object[~mask_values].astype(float) + ) + ): + raise TypeError("Need to pass bool-like values") + + if mask is None and mask_values is None: + mask = np.zeros(values.shape, dtype=bool) + elif mask is None: + mask = mask_values + else: + if isinstance(mask, np.ndarray) and mask.dtype == np.bool_: + if mask_values is not None: + mask = mask | mask_values + else: + if copy: + mask = mask.copy() + else: + mask = np.array(mask, dtype=bool) + if mask_values is not None: + mask = mask | mask_values + + if values.shape != mask.shape: + raise ValueError("values.shape and mask.shape must match") + + return values, mask + + +class BooleanArray(BaseMaskedArray): + """ + Array of boolean (True/False) data with missing values. + + This is a pandas Extension array for boolean data, under the hood + represented by 2 numpy arrays: a boolean array with the data and + a boolean array with the mask (True indicating missing). + + BooleanArray implements Kleene logic (sometimes called three-value + logic) for logical operations. See :ref:`boolean.kleene` for more. + + To construct an BooleanArray from generic array-like input, use + :func:`pandas.array` specifying ``dtype="boolean"`` (see examples + below). + + .. warning:: + + BooleanArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : numpy.ndarray + A 1-d boolean-dtype array with the data. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values (True + indicates missing). + copy : bool, default False + Whether to copy the `values` and `mask` arrays. + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + BooleanArray + + Examples + -------- + Create an BooleanArray with :func:`pandas.array`: + + >>> pd.array([True, False, None], dtype="boolean") + + [True, False, ] + Length: 3, dtype: boolean + """ + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value = False + # Fill values used for any/all + # Incompatible types in assignment (expression has type "bool", base class + # "BaseMaskedArray" defined the type as "") + _truthy_value = True # type: ignore[assignment] + _falsey_value = False # type: ignore[assignment] + _TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"} + _FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"} + + @classmethod + def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self: + result = super()._simple_new(values, mask) + result._dtype = BooleanDtype() + return result + + def __init__( + self, values: np.ndarray, mask: np.ndarray, copy: bool = False + ) -> None: + if not (isinstance(values, np.ndarray) and values.dtype == np.bool_): + raise TypeError( + "values should be boolean numpy array. Use " + "the 'pd.array' function instead" + ) + self._dtype = BooleanDtype() + super().__init__(values, mask, copy=copy) + + @property + def dtype(self) -> BooleanDtype: + return self._dtype + + @classmethod + def _from_sequence_of_strings( + cls, + strings: list[str], + *, + dtype: Dtype | None = None, + copy: bool = False, + true_values: list[str] | None = None, + false_values: list[str] | None = None, + ) -> BooleanArray: + true_values_union = cls._TRUE_VALUES.union(true_values or []) + false_values_union = cls._FALSE_VALUES.union(false_values or []) + + def map_string(s) -> bool: + if s in true_values_union: + return True + elif s in false_values_union: + return False + else: + raise ValueError(f"{s} cannot be cast to bool") + + scalars = np.array(strings, dtype=object) + mask = isna(scalars) + scalars[~mask] = list(map(map_string, scalars[~mask])) + return cls._from_sequence(scalars, dtype=dtype, copy=copy) + + _HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_) + + @classmethod + def _coerce_to_array( + cls, value, *, dtype: DtypeObj, copy: bool = False + ) -> tuple[np.ndarray, np.ndarray]: + if dtype: + assert dtype == "boolean" + return coerce_to_array(value, copy=copy) + + def _logical_method(self, other, op): + assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"} + other_is_scalar = lib.is_scalar(other) + mask = None + + if isinstance(other, BooleanArray): + other, mask = other._data, other._mask + elif is_list_like(other): + other = np.asarray(other, dtype="bool") + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + other, mask = coerce_to_array(other, copy=False) + elif isinstance(other, np.bool_): + other = other.item() + + if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other): + raise TypeError( + "'other' should be pandas.NA or a bool. " + f"Got {type(other).__name__} instead." + ) + + if not other_is_scalar and len(self) != len(other): + raise ValueError("Lengths must match") + + if op.__name__ in {"or_", "ror_"}: + result, mask = ops.kleene_or(self._data, other, self._mask, mask) + elif op.__name__ in {"and_", "rand_"}: + result, mask = ops.kleene_and(self._data, other, self._mask, mask) + else: + # i.e. xor, rxor + result, mask = ops.kleene_xor(self._data, other, self._mask, mask) + + # i.e. BooleanArray + return self._maybe_mask_result(result, mask) + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> BaseMaskedArray: + data = self._data + mask = self._mask + if name in ("cummin", "cummax"): + op = getattr(masked_accumulations, name) + data, mask = op(data, mask, skipna=skipna, **kwargs) + return self._simple_new(data, mask) + else: + from pandas.core.arrays import IntegerArray + + return IntegerArray(data.astype(int), mask)._accumulate( + name, skipna=skipna, **kwargs + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/categorical.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/categorical.py new file mode 100644 index 00000000..53c942f6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/categorical.py @@ -0,0 +1,3024 @@ +from __future__ import annotations + +from csv import QUOTE_NONNUMERIC +from functools import partial +import operator +from shutil import get_terminal_size +from typing import ( + TYPE_CHECKING, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import ( + NaT, + algos as libalgos, + lib, +) +from pandas._libs.arrays import NDArrayBacked +from pandas.compat.numpy import function as nv +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.cast import ( + coerce_indexer_dtype, + find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_int64, + ensure_platform_int, + is_any_real_numeric_dtype, + is_bool_dtype, + is_dict_like, + is_hashable, + is_integer_dtype, + is_list_like, + is_scalar, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, +) + +from pandas.core import ( + algorithms, + arraylike, + ops, +) +from pandas.core.accessor import ( + PandasDelegate, + delegate_names, +) +from pandas.core.algorithms import ( + factorize, + take_nd, +) +from pandas.core.arrays._mixins import ( + NDArrayBackedExtensionArray, + ravel_compat, +) +from pandas.core.base import ( + ExtensionArray, + NoNewAttributesMixin, + PandasObject, +) +import pandas.core.common as com +from pandas.core.construction import ( + extract_array, + sanitize_array, +) +from pandas.core.ops.common import unpack_zerodim_and_defer +from pandas.core.sorting import nargsort +from pandas.core.strings.object_array import ObjectStringArrayMixin + +from pandas.io.formats import console + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + AstypeArg, + AxisInt, + Dtype, + NpDtype, + Ordered, + Self, + Shape, + SortKind, + npt, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +def _cat_compare_op(op): + opname = f"__{op.__name__}__" + fill_value = op is operator.ne + + @unpack_zerodim_and_defer(opname) + def func(self, other): + hashable = is_hashable(other) + if is_list_like(other) and len(other) != len(self) and not hashable: + # in hashable case we may have a tuple that is itself a category + raise ValueError("Lengths must match.") + + if not self.ordered: + if opname in ["__lt__", "__gt__", "__le__", "__ge__"]: + raise TypeError( + "Unordered Categoricals can only compare equality or not" + ) + if isinstance(other, Categorical): + # Two Categoricals can only be compared if the categories are + # the same (maybe up to ordering, depending on ordered) + + msg = "Categoricals can only be compared if 'categories' are the same." + if not self._categories_match_up_to_permutation(other): + raise TypeError(msg) + + if not self.ordered and not self.categories.equals(other.categories): + # both unordered and different order + other_codes = recode_for_categories( + other.codes, other.categories, self.categories, copy=False + ) + else: + other_codes = other._codes + + ret = op(self._codes, other_codes) + mask = (self._codes == -1) | (other_codes == -1) + if mask.any(): + ret[mask] = fill_value + return ret + + if hashable: + if other in self.categories: + i = self._unbox_scalar(other) + ret = op(self._codes, i) + + if opname not in {"__eq__", "__ge__", "__gt__"}: + # GH#29820 performance trick; get_loc will always give i>=0, + # so in the cases (__ne__, __le__, __lt__) the setting + # here is a no-op, so can be skipped. + mask = self._codes == -1 + ret[mask] = fill_value + return ret + else: + return ops.invalid_comparison(self, other, op) + else: + # allow categorical vs object dtype array comparisons for equality + # these are only positional comparisons + if opname not in ["__eq__", "__ne__"]: + raise TypeError( + f"Cannot compare a Categorical for op {opname} with " + f"type {type(other)}.\nIf you want to compare values, " + "use 'np.asarray(cat) other'." + ) + + if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype): + # We would return NotImplemented here, but that messes up + # ExtensionIndex's wrapped methods + return op(other, self) + return getattr(np.array(self), opname)(np.array(other)) + + func.__name__ = opname + + return func + + +def contains(cat, key, container) -> bool: + """ + Helper for membership check for ``key`` in ``cat``. + + This is a helper method for :method:`__contains__` + and :class:`CategoricalIndex.__contains__`. + + Returns True if ``key`` is in ``cat.categories`` and the + location of ``key`` in ``categories`` is in ``container``. + + Parameters + ---------- + cat : :class:`Categorical`or :class:`categoricalIndex` + key : a hashable object + The key to check membership for. + container : Container (e.g. list-like or mapping) + The container to check for membership in. + + Returns + ------- + is_in : bool + True if ``key`` is in ``self.categories`` and location of + ``key`` in ``categories`` is in ``container``, else False. + + Notes + ----- + This method does not check for NaN values. Do that separately + before calling this method. + """ + hash(key) + + # get location of key in categories. + # If a KeyError, the key isn't in categories, so logically + # can't be in container either. + try: + loc = cat.categories.get_loc(key) + except (KeyError, TypeError): + return False + + # loc is the location of key in categories, but also the *value* + # for key in container. So, `key` may be in categories, + # but still not in `container`. Example ('b' in categories, + # but not in values): + # 'b' in Categorical(['a'], categories=['a', 'b']) # False + if is_scalar(loc): + return loc in container + else: + # if categories is an IntervalIndex, loc is an array. + return any(loc_ in container for loc_ in loc) + + +class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMixin): + """ + Represent a categorical variable in classic R / S-plus fashion. + + `Categoricals` can only take on a limited, and usually fixed, number + of possible values (`categories`). In contrast to statistical categorical + variables, a `Categorical` might have an order, but numerical operations + (additions, divisions, ...) are not possible. + + All values of the `Categorical` are either in `categories` or `np.nan`. + Assigning values outside of `categories` will raise a `ValueError`. Order + is defined by the order of the `categories`, not lexical order of the + values. + + Parameters + ---------- + values : list-like + The values of the categorical. If categories are given, values not in + categories will be replaced with NaN. + categories : Index-like (unique), optional + The unique categories for this categorical. If not given, the + categories are assumed to be the unique values of `values` (sorted, if + possible, otherwise in the order in which they appear). + ordered : bool, default False + Whether or not this categorical is treated as a ordered categorical. + If True, the resulting categorical will be ordered. + An ordered categorical respects, when sorted, the order of its + `categories` attribute (which in turn is the `categories` argument, if + provided). + dtype : CategoricalDtype + An instance of ``CategoricalDtype`` to use for this categorical. + + Attributes + ---------- + categories : Index + The categories of this categorical. + codes : ndarray + The codes (integer positions, which point to the categories) of this + categorical, read only. + ordered : bool + Whether or not this Categorical is ordered. + dtype : CategoricalDtype + The instance of ``CategoricalDtype`` storing the ``categories`` + and ``ordered``. + + Methods + ------- + from_codes + __array__ + + Raises + ------ + ValueError + If the categories do not validate. + TypeError + If an explicit ``ordered=True`` is given but no `categories` and the + `values` are not sortable. + + See Also + -------- + CategoricalDtype : Type for categorical data. + CategoricalIndex : An Index with an underlying ``Categorical``. + + Notes + ----- + See the `user guide + `__ + for more. + + Examples + -------- + >>> pd.Categorical([1, 2, 3, 1, 2, 3]) + [1, 2, 3, 1, 2, 3] + Categories (3, int64): [1, 2, 3] + + >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']) + ['a', 'b', 'c', 'a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + Missing values are not included as a category. + + >>> c = pd.Categorical([1, 2, 3, 1, 2, 3, np.nan]) + >>> c + [1, 2, 3, 1, 2, 3, NaN] + Categories (3, int64): [1, 2, 3] + + However, their presence is indicated in the `codes` attribute + by code `-1`. + + >>> c.codes + array([ 0, 1, 2, 0, 1, 2, -1], dtype=int8) + + Ordered `Categoricals` can be sorted according to the custom order + of the categories and can have a min and max value. + + >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True, + ... categories=['c', 'b', 'a']) + >>> c + ['a', 'b', 'c', 'a', 'b', 'c'] + Categories (3, object): ['c' < 'b' < 'a'] + >>> c.min() + 'c' + """ + + # For comparisons, so that numpy uses our implementation if the compare + # ops, which raise + __array_priority__ = 1000 + # tolist is not actually deprecated, just suppressed in the __dir__ + _hidden_attrs = PandasObject._hidden_attrs | frozenset(["tolist"]) + _typ = "categorical" + + _dtype: CategoricalDtype + + @classmethod + # error: Argument 2 of "_simple_new" is incompatible with supertype + # "NDArrayBacked"; supertype defines the argument type as + # "Union[dtype[Any], ExtensionDtype]" + def _simple_new( # type: ignore[override] + cls, codes: np.ndarray, dtype: CategoricalDtype + ) -> Self: + # NB: This is not _quite_ as simple as the "usual" _simple_new + codes = coerce_indexer_dtype(codes, dtype.categories) + dtype = CategoricalDtype(ordered=False).update_dtype(dtype) + return super()._simple_new(codes, dtype) + + def __init__( + self, + values, + categories=None, + ordered=None, + dtype: Dtype | None = None, + fastpath: bool | lib.NoDefault = lib.no_default, + copy: bool = True, + ) -> None: + if fastpath is not lib.no_default: + # GH#20110 + warnings.warn( + "The 'fastpath' keyword in Categorical is deprecated and will " + "be removed in a future version. Use Categorical.from_codes instead", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + else: + fastpath = False + + dtype = CategoricalDtype._from_values_or_dtype( + values, categories, ordered, dtype + ) + # At this point, dtype is always a CategoricalDtype, but + # we may have dtype.categories be None, and we need to + # infer categories in a factorization step further below + + if fastpath: + codes = coerce_indexer_dtype(values, dtype.categories) + dtype = CategoricalDtype(ordered=False).update_dtype(dtype) + super().__init__(codes, dtype) + return + + if not is_list_like(values): + # GH#38433 + raise TypeError("Categorical input must be list-like") + + # null_mask indicates missing values we want to exclude from inference. + # This means: only missing values in list-likes (not arrays/ndframes). + null_mask = np.array(False) + + # sanitize input + vdtype = getattr(values, "dtype", None) + if isinstance(vdtype, CategoricalDtype): + if dtype.categories is None: + dtype = CategoricalDtype(values.categories, dtype.ordered) + elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)): + values = com.convert_to_list_like(values) + if isinstance(values, list) and len(values) == 0: + # By convention, empty lists result in object dtype: + values = np.array([], dtype=object) + elif isinstance(values, np.ndarray): + if values.ndim > 1: + # preempt sanitize_array from raising ValueError + raise NotImplementedError( + "> 1 ndim Categorical are not supported at this time" + ) + values = sanitize_array(values, None) + else: + # i.e. must be a list + arr = sanitize_array(values, None) + null_mask = isna(arr) + if null_mask.any(): + # We remove null values here, then below will re-insert + # them, grep "full_codes" + arr_list = [values[idx] for idx in np.where(~null_mask)[0]] + + # GH#44900 Do not cast to float if we have only missing values + if arr_list or arr.dtype == "object": + sanitize_dtype = None + else: + sanitize_dtype = arr.dtype + + arr = sanitize_array(arr_list, None, dtype=sanitize_dtype) + values = arr + + if dtype.categories is None: + if not isinstance(values, ABCIndex): + # in particular RangeIndex xref test_index_equal_range_categories + values = sanitize_array(values, None) + try: + codes, categories = factorize(values, sort=True) + except TypeError as err: + codes, categories = factorize(values, sort=False) + if dtype.ordered: + # raise, as we don't have a sortable data structure and so + # the user should give us one by specifying categories + raise TypeError( + "'values' is not ordered, please " + "explicitly specify the categories order " + "by passing in a categories argument." + ) from err + + # we're inferring from values + dtype = CategoricalDtype(categories, dtype.ordered) + + elif isinstance(values.dtype, CategoricalDtype): + old_codes = extract_array(values)._codes + codes = recode_for_categories( + old_codes, values.dtype.categories, dtype.categories, copy=copy + ) + + else: + codes = _get_codes_for_values(values, dtype.categories) + + if null_mask.any(): + # Reinsert -1 placeholders for previously removed missing values + full_codes = -np.ones(null_mask.shape, dtype=codes.dtype) + full_codes[~null_mask] = codes + codes = full_codes + + dtype = CategoricalDtype(ordered=False).update_dtype(dtype) + arr = coerce_indexer_dtype(codes, dtype.categories) + super().__init__(arr, dtype) + + @property + def dtype(self) -> CategoricalDtype: + """ + The :class:`~pandas.api.types.CategoricalDtype` for this instance. + + Examples + -------- + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat + ['a', 'b'] + Categories (2, object): ['a' < 'b'] + >>> cat.dtype + CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object) + """ + return self._dtype + + @property + def _internal_fill_value(self) -> int: + # using the specific numpy integer instead of python int to get + # the correct dtype back from _quantile in the all-NA case + dtype = self._ndarray.dtype + return dtype.type(-1) + + @classmethod + def _from_sequence( + cls, scalars, *, dtype: Dtype | None = None, copy: bool = False + ) -> Self: + return cls(scalars, dtype=dtype, copy=copy) + + @overload + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: + ... + + @overload + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: + ... + + @overload + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: + ... + + def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: + """ + Coerce this type to another dtype + + Parameters + ---------- + dtype : numpy dtype or pandas type + copy : bool, default True + By default, astype always returns a newly allocated object. + If copy is set to False and dtype is categorical, the original + object is returned. + """ + dtype = pandas_dtype(dtype) + if self.dtype is dtype: + result = self.copy() if copy else self + + elif isinstance(dtype, CategoricalDtype): + # GH 10696/18593/18630 + dtype = self.dtype.update_dtype(dtype) + self = self.copy() if copy else self + result = self._set_dtype(dtype) + + elif isinstance(dtype, ExtensionDtype): + return super().astype(dtype, copy=copy) + + elif dtype.kind in "iu" and self.isna().any(): + raise ValueError("Cannot convert float NaN to integer") + + elif len(self.codes) == 0 or len(self.categories) == 0: + result = np.array( + self, + dtype=dtype, + copy=copy, + ) + + else: + # GH8628 (PERF): astype category codes instead of astyping array + new_cats = self.categories._values + + try: + new_cats = new_cats.astype(dtype=dtype, copy=copy) + fill_value = self.categories._na_value + if not is_valid_na_for_dtype(fill_value, dtype): + fill_value = lib.item_from_zerodim( + np.array(self.categories._na_value).astype(dtype) + ) + except ( + TypeError, # downstream error msg for CategoricalIndex is misleading + ValueError, + ): + msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}" + raise ValueError(msg) + + result = take_nd( + new_cats, ensure_platform_int(self._codes), fill_value=fill_value + ) + + return result + + def to_list(self): + """ + Alias for tolist. + """ + # GH#51254 + warnings.warn( + "Categorical.to_list is deprecated and will be removed in a future " + "version. Use obj.tolist() instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.tolist() + + @classmethod + def _from_inferred_categories( + cls, inferred_categories, inferred_codes, dtype, true_values=None + ) -> Self: + """ + Construct a Categorical from inferred values. + + For inferred categories (`dtype` is None) the categories are sorted. + For explicit `dtype`, the `inferred_categories` are cast to the + appropriate type. + + Parameters + ---------- + inferred_categories : Index + inferred_codes : Index + dtype : CategoricalDtype or 'category' + true_values : list, optional + If none are provided, the default ones are + "True", "TRUE", and "true." + + Returns + ------- + Categorical + """ + from pandas import ( + Index, + to_datetime, + to_numeric, + to_timedelta, + ) + + cats = Index(inferred_categories) + known_categories = ( + isinstance(dtype, CategoricalDtype) and dtype.categories is not None + ) + + if known_categories: + # Convert to a specialized type with `dtype` if specified. + if is_any_real_numeric_dtype(dtype.categories.dtype): + cats = to_numeric(inferred_categories, errors="coerce") + elif lib.is_np_dtype(dtype.categories.dtype, "M"): + cats = to_datetime(inferred_categories, errors="coerce") + elif lib.is_np_dtype(dtype.categories.dtype, "m"): + cats = to_timedelta(inferred_categories, errors="coerce") + elif is_bool_dtype(dtype.categories.dtype): + if true_values is None: + true_values = ["True", "TRUE", "true"] + + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + cats = cats.isin(true_values) # type: ignore[assignment] + + if known_categories: + # Recode from observation order to dtype.categories order. + categories = dtype.categories + codes = recode_for_categories(inferred_codes, cats, categories) + elif not cats.is_monotonic_increasing: + # Sort categories and recode for unknown categories. + unsorted = cats.copy() + categories = cats.sort_values() + + codes = recode_for_categories(inferred_codes, unsorted, categories) + dtype = CategoricalDtype(categories, ordered=False) + else: + dtype = CategoricalDtype(cats, ordered=False) + codes = inferred_codes + + return cls._simple_new(codes, dtype=dtype) + + @classmethod + def from_codes( + cls, + codes, + categories=None, + ordered=None, + dtype: Dtype | None = None, + validate: bool = True, + ) -> Self: + """ + Make a Categorical type from codes and categories or dtype. + + This constructor is useful if you already have codes and + categories/dtype and so do not need the (computation intensive) + factorization step, which is usually done on the constructor. + + If your data does not follow this convention, please use the normal + constructor. + + Parameters + ---------- + codes : array-like of int + An integer array, where each integer points to a category in + categories or dtype.categories, or else is -1 for NaN. + categories : index-like, optional + The categories for the categorical. Items need to be unique. + If the categories are not given here, then they must be provided + in `dtype`. + ordered : bool, optional + Whether or not this categorical is treated as an ordered + categorical. If not given here or in `dtype`, the resulting + categorical will be unordered. + dtype : CategoricalDtype or "category", optional + If :class:`CategoricalDtype`, cannot be used together with + `categories` or `ordered`. + validate : bool, default True + If True, validate that the codes are valid for the dtype. + If False, don't validate that the codes are valid. Be careful about skipping + validation, as invalid codes can lead to severe problems, such as segfaults. + + .. versionadded:: 2.1.0 + + Returns + ------- + Categorical + + Examples + -------- + >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) + >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) + ['a', 'b', 'a', 'b'] + Categories (2, object): ['a' < 'b'] + """ + dtype = CategoricalDtype._from_values_or_dtype( + categories=categories, ordered=ordered, dtype=dtype + ) + if dtype.categories is None: + msg = ( + "The categories must be provided in 'categories' or " + "'dtype'. Both were None." + ) + raise ValueError(msg) + + if validate: + # beware: non-valid codes may segfault + codes = cls._validate_codes_for_dtype(codes, dtype=dtype) + + return cls._simple_new(codes, dtype=dtype) + + # ------------------------------------------------------------------ + # Categories/Codes/Ordered + + @property + def categories(self) -> Index: + """ + The categories of this categorical. + + Setting assigns new values to each category (effectively a rename of + each individual category). + + The assigned value has to be a list-like object. All items must be + unique and the number of items in the new categories must be the same + as the number of items in the old categories. + + Raises + ------ + ValueError + If the new categories do not validate as categories or if the + number of new categories is unequal the number of old categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser.cat.categories + Index(['a', 'b', 'c'], dtype='object') + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], categories=['b', 'c', 'd']) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.categories + Index(['b', 'c', 'd'], dtype='object') + + For :class:`pandas.Categorical`: + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat.categories + Index(['a', 'b'], dtype='object') + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'c', 'b', 'a', 'c', 'b']) + >>> ci.categories + Index(['a', 'b', 'c'], dtype='object') + + >>> ci = pd.CategoricalIndex(['a', 'c'], categories=['c', 'b', 'a']) + >>> ci.categories + Index(['c', 'b', 'a'], dtype='object') + """ + return self.dtype.categories + + @property + def ordered(self) -> Ordered: + """ + Whether the categories have an ordered relationship. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser.cat.ordered + False + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.ordered + True + + For :class:`pandas.Categorical`: + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat.ordered + True + + >>> cat = pd.Categorical(['a', 'b'], ordered=False) + >>> cat.ordered + False + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b'], ordered=True) + >>> ci.ordered + True + + >>> ci = pd.CategoricalIndex(['a', 'b'], ordered=False) + >>> ci.ordered + False + """ + return self.dtype.ordered + + @property + def codes(self) -> np.ndarray: + """ + The category codes of this categorical index. + + Codes are an array of integers which are the positions of the actual + values in the categories array. + + There is no setter, use the other categorical methods and the normal item + setter to change values in the categorical. + + Returns + ------- + ndarray[int] + A non-writable view of the ``codes`` array. + + Examples + -------- + For :class:`pandas.Categorical`: + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + >>> cat.codes + array([0, 1], dtype=int8) + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c']) + >>> ci.codes + array([0, 1, 2, 0, 1, 2], dtype=int8) + + >>> ci = pd.CategoricalIndex(['a', 'c'], categories=['c', 'b', 'a']) + >>> ci.codes + array([2, 0], dtype=int8) + """ + v = self._codes.view() + v.flags.writeable = False + return v + + def _set_categories(self, categories, fastpath: bool = False) -> None: + """ + Sets new categories inplace + + Parameters + ---------- + fastpath : bool, default False + Don't perform validation of the categories for uniqueness or nulls + + Examples + -------- + >>> c = pd.Categorical(['a', 'b']) + >>> c + ['a', 'b'] + Categories (2, object): ['a', 'b'] + + >>> c._set_categories(pd.Index(['a', 'c'])) + >>> c + ['a', 'c'] + Categories (2, object): ['a', 'c'] + """ + if fastpath: + new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) + else: + new_dtype = CategoricalDtype(categories, ordered=self.ordered) + if ( + not fastpath + and self.dtype.categories is not None + and len(new_dtype.categories) != len(self.dtype.categories) + ): + raise ValueError( + "new categories need to have the same number of " + "items as the old categories!" + ) + + super().__init__(self._ndarray, new_dtype) + + def _set_dtype(self, dtype: CategoricalDtype) -> Self: + """ + Internal method for directly updating the CategoricalDtype + + Parameters + ---------- + dtype : CategoricalDtype + + Notes + ----- + We don't do any validation here. It's assumed that the dtype is + a (valid) instance of `CategoricalDtype`. + """ + codes = recode_for_categories(self.codes, self.categories, dtype.categories) + return type(self)._simple_new(codes, dtype=dtype) + + def set_ordered(self, value: bool) -> Self: + """ + Set the ordered attribute to the boolean value. + + Parameters + ---------- + value : bool + Set whether this categorical is ordered (True) or not (False). + """ + new_dtype = CategoricalDtype(self.categories, ordered=value) + cat = self.copy() + NDArrayBacked.__init__(cat, cat._ndarray, new_dtype) + return cat + + def as_ordered(self) -> Self: + """ + Set the Categorical to be ordered. + + Returns + ------- + Categorical + Ordered Categorical. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser.cat.ordered + False + >>> ser = ser.cat.as_ordered() + >>> ser.cat.ordered + True + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) + >>> ci.ordered + False + >>> ci = ci.as_ordered() + >>> ci.ordered + True + """ + return self.set_ordered(True) + + def as_unordered(self) -> Self: + """ + Set the Categorical to be unordered. + + Returns + ------- + Categorical + Unordered Categorical. + + Examples + -------- + For :class:`pandas.Series`: + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'a'], ordered=True) + >>> ser = pd.Series(raw_cat) + >>> ser.cat.ordered + True + >>> ser = ser.cat.as_unordered() + >>> ser.cat.ordered + False + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a'], ordered=True) + >>> ci.ordered + True + >>> ci = ci.as_unordered() + >>> ci.ordered + False + """ + return self.set_ordered(False) + + def set_categories(self, new_categories, ordered=None, rename: bool = False): + """ + Set the categories to the specified new categories. + + ``new_categories`` can include new categories (which will result in + unused categories) or remove old categories (which results in values + set to ``NaN``). If ``rename=True``, the categories will simply be renamed + (less or more items than in old categories will result in values set to + ``NaN`` or in unused categories respectively). + + This method can be used to perform more than one action of adding, + removing, and reordering simultaneously and is therefore faster than + performing the individual steps via the more specialised methods. + + On the other hand this methods does not do checks (e.g., whether the + old categories are included in the new categories on a reorder), which + can result in surprising changes, for example when using special string + dtypes, which does not considers a S1 string equal to a single char + python string. + + Parameters + ---------- + new_categories : Index-like + The categories in new order. + ordered : bool, default False + Whether or not the categorical is treated as a ordered categorical. + If not given, do not change the ordered information. + rename : bool, default False + Whether or not the new_categories should be considered as a rename + of the old categories or as reordered categories. + + Returns + ------- + Categorical with reordered categories. + + Raises + ------ + ValueError + If new_categories does not validate as categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + + Examples + -------- + For :class:`pandas.Series`: + + >>> raw_cat = pd.Categorical(['a', 'b', 'c', 'A'], + ... categories=['a', 'b', 'c'], ordered=True) + >>> ser = pd.Series(raw_cat) + >>> ser + 0 a + 1 b + 2 c + 3 NaN + dtype: category + Categories (3, object): ['a' < 'b' < 'c'] + + >>> ser.cat.set_categories(['A', 'B', 'C'], rename=True) + 0 A + 1 B + 2 C + 3 NaN + dtype: category + Categories (3, object): ['A' < 'B' < 'C'] + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'A'], + ... categories=['a', 'b', 'c'], ordered=True) + >>> ci + CategoricalIndex(['a', 'b', 'c', nan], categories=['a', 'b', 'c'], + ordered=True, dtype='category') + + >>> ci.set_categories(['A', 'b', 'c']) + CategoricalIndex([nan, 'b', 'c', nan], categories=['A', 'b', 'c'], + ordered=True, dtype='category') + >>> ci.set_categories(['A', 'b', 'c'], rename=True) + CategoricalIndex(['A', 'b', 'c', nan], categories=['A', 'b', 'c'], + ordered=True, dtype='category') + """ + + if ordered is None: + ordered = self.dtype.ordered + new_dtype = CategoricalDtype(new_categories, ordered=ordered) + + cat = self.copy() + if rename: + if cat.dtype.categories is not None and len(new_dtype.categories) < len( + cat.dtype.categories + ): + # remove all _codes which are larger and set to -1/NaN + cat._codes[cat._codes >= len(new_dtype.categories)] = -1 + codes = cat._codes + else: + codes = recode_for_categories( + cat.codes, cat.categories, new_dtype.categories + ) + NDArrayBacked.__init__(cat, codes, new_dtype) + return cat + + def rename_categories(self, new_categories) -> Self: + """ + Rename categories. + + Parameters + ---------- + new_categories : list-like, dict-like or callable + + New categories which will replace old categories. + + * list-like: all items must be unique and the number of items in + the new categories must match the existing number of categories. + + * dict-like: specifies a mapping from + old categories to new. Categories not contained in the mapping + are passed through and extra categories in the mapping are + ignored. + + * callable : a callable that is called on all items in the old + categories and whose return values comprise the new categories. + + Returns + ------- + Categorical + Categorical with renamed categories. + + Raises + ------ + ValueError + If new categories are list-like and do not have the same number of + items than the current categories or do not validate as categories + + See Also + -------- + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['a', 'a', 'b']) + >>> c.rename_categories([0, 1]) + [0, 0, 1] + Categories (2, int64): [0, 1] + + For dict-like ``new_categories``, extra keys are ignored and + categories not in the dictionary are passed through + + >>> c.rename_categories({'a': 'A', 'c': 'C'}) + ['A', 'A', 'b'] + Categories (2, object): ['A', 'b'] + + You may also provide a callable to create the new categories + + >>> c.rename_categories(lambda x: x.upper()) + ['A', 'A', 'B'] + Categories (2, object): ['A', 'B'] + """ + + if is_dict_like(new_categories): + new_categories = [ + new_categories.get(item, item) for item in self.categories + ] + elif callable(new_categories): + new_categories = [new_categories(item) for item in self.categories] + + cat = self.copy() + cat._set_categories(new_categories) + return cat + + def reorder_categories(self, new_categories, ordered=None) -> Self: + """ + Reorder categories as specified in new_categories. + + ``new_categories`` need to include all old categories and no new category + items. + + Parameters + ---------- + new_categories : Index-like + The categories in new order. + ordered : bool, optional + Whether or not the categorical is treated as a ordered categorical. + If not given, do not change the ordered information. + + Returns + ------- + Categorical + Categorical with reordered categories. + + Raises + ------ + ValueError + If the new categories do not contain all old category items or any + new ones + + See Also + -------- + rename_categories : Rename categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + For :class:`pandas.Series`: + + >>> ser = pd.Series(['a', 'b', 'c', 'a'], dtype='category') + >>> ser = ser.cat.reorder_categories(['c', 'b', 'a'], ordered=True) + >>> ser + 0 a + 1 b + 2 c + 3 a + dtype: category + Categories (3, object): ['c' < 'b' < 'a'] + + >>> ser.sort_values() + 2 c + 1 b + 0 a + 3 a + dtype: category + Categories (3, object): ['c' < 'b' < 'a'] + + For :class:`pandas.CategoricalIndex`: + + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a']) + >>> ci + CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'], + ordered=False, dtype='category') + >>> ci.reorder_categories(['c', 'b', 'a'], ordered=True) + CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'], + ordered=True, dtype='category') + """ + if ( + len(self.categories) != len(new_categories) + or not self.categories.difference(new_categories).empty + ): + raise ValueError( + "items in new_categories are not the same as in old categories" + ) + return self.set_categories(new_categories, ordered=ordered) + + def add_categories(self, new_categories) -> Self: + """ + Add new categories. + + `new_categories` will be included at the last/highest place in the + categories and will be unused directly after this call. + + Parameters + ---------- + new_categories : category or list-like of category + The new categories to be included. + + Returns + ------- + Categorical + Categorical with new categories added. + + Raises + ------ + ValueError + If the new categories include old categories or do not validate as + categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['c', 'b', 'c']) + >>> c + ['c', 'b', 'c'] + Categories (2, object): ['b', 'c'] + + >>> c.add_categories(['d', 'a']) + ['c', 'b', 'c'] + Categories (4, object): ['b', 'c', 'd', 'a'] + """ + + if not is_list_like(new_categories): + new_categories = [new_categories] + already_included = set(new_categories) & set(self.dtype.categories) + if len(already_included) != 0: + raise ValueError( + f"new categories must not include old categories: {already_included}" + ) + + if hasattr(new_categories, "dtype"): + from pandas import Series + + dtype = find_common_type( + [self.dtype.categories.dtype, new_categories.dtype] + ) + new_categories = Series( + list(self.dtype.categories) + list(new_categories), dtype=dtype + ) + else: + new_categories = list(self.dtype.categories) + list(new_categories) + + new_dtype = CategoricalDtype(new_categories, self.ordered) + cat = self.copy() + codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories) + NDArrayBacked.__init__(cat, codes, new_dtype) + return cat + + def remove_categories(self, removals) -> Self: + """ + Remove the specified categories. + + `removals` must be included in the old categories. Values which were in + the removed categories will be set to NaN + + Parameters + ---------- + removals : category or list of categories + The categories which should be removed. + + Returns + ------- + Categorical + Categorical with removed categories. + + Raises + ------ + ValueError + If the removals are not contained in the categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) + >>> c + ['a', 'c', 'b', 'c', 'd'] + Categories (4, object): ['a', 'b', 'c', 'd'] + + >>> c.remove_categories(['d', 'a']) + [NaN, 'c', 'b', 'c', NaN] + Categories (2, object): ['b', 'c'] + """ + from pandas import Index + + if not is_list_like(removals): + removals = [removals] + + removals = Index(removals).unique().dropna() + new_categories = ( + self.dtype.categories.difference(removals, sort=False) + if self.dtype.ordered is True + else self.dtype.categories.difference(removals) + ) + not_included = removals.difference(self.dtype.categories) + + if len(not_included) != 0: + not_included = set(not_included) + raise ValueError(f"removals must all be in old categories: {not_included}") + + return self.set_categories(new_categories, ordered=self.ordered, rename=False) + + def remove_unused_categories(self) -> Self: + """ + Remove categories which are not used. + + Returns + ------- + Categorical + Categorical with unused categories dropped. + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + set_categories : Set the categories to the specified ones. + + Examples + -------- + >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd']) + >>> c + ['a', 'c', 'b', 'c', 'd'] + Categories (4, object): ['a', 'b', 'c', 'd'] + + >>> c[2] = 'a' + >>> c[4] = 'c' + >>> c + ['a', 'c', 'a', 'c', 'c'] + Categories (4, object): ['a', 'b', 'c', 'd'] + + >>> c.remove_unused_categories() + ['a', 'c', 'a', 'c', 'c'] + Categories (2, object): ['a', 'c'] + """ + idx, inv = np.unique(self._codes, return_inverse=True) + + if idx.size != 0 and idx[0] == -1: # na sentinel + idx, inv = idx[1:], inv - 1 + + new_categories = self.dtype.categories.take(idx) + new_dtype = CategoricalDtype._from_fastpath( + new_categories, ordered=self.ordered + ) + new_codes = coerce_indexer_dtype(inv, new_dtype.categories) + + cat = self.copy() + NDArrayBacked.__init__(cat, new_codes, new_dtype) + return cat + + # ------------------------------------------------------------------ + + def map( + self, + mapper, + na_action: Literal["ignore"] | None | lib.NoDefault = lib.no_default, + ): + """ + Map categories using an input mapping or function. + + Maps the categories to new categories. If the mapping correspondence is + one-to-one the result is a :class:`~pandas.Categorical` which has the + same order property as the original, otherwise a :class:`~pandas.Index` + is returned. NaN values are unaffected. + + If a `dict` or :class:`~pandas.Series` is used any unmapped category is + mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` + will be returned. + + Parameters + ---------- + mapper : function, dict, or Series + Mapping correspondence. + na_action : {None, 'ignore'}, default 'ignore' + If 'ignore', propagate NaN values, without passing them to the + mapping correspondence. + + .. deprecated:: 2.1.0 + + The default value of 'ignore' has been deprecated and will be changed to + None in the future. + + Returns + ------- + pandas.Categorical or pandas.Index + Mapped categorical. + + See Also + -------- + CategoricalIndex.map : Apply a mapping correspondence on a + :class:`~pandas.CategoricalIndex`. + Index.map : Apply a mapping correspondence on an + :class:`~pandas.Index`. + Series.map : Apply a mapping correspondence on a + :class:`~pandas.Series`. + Series.apply : Apply more complex functions on a + :class:`~pandas.Series`. + + Examples + -------- + >>> cat = pd.Categorical(['a', 'b', 'c']) + >>> cat + ['a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> cat.map(lambda x: x.upper(), na_action=None) + ['A', 'B', 'C'] + Categories (3, object): ['A', 'B', 'C'] + >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'}, na_action=None) + ['first', 'second', 'third'] + Categories (3, object): ['first', 'second', 'third'] + + If the mapping is one-to-one the ordering of the categories is + preserved: + + >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True) + >>> cat + ['a', 'b', 'c'] + Categories (3, object): ['a' < 'b' < 'c'] + >>> cat.map({'a': 3, 'b': 2, 'c': 1}, na_action=None) + [3, 2, 1] + Categories (3, int64): [3 < 2 < 1] + + If the mapping is not one-to-one an :class:`~pandas.Index` is returned: + + >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'}, na_action=None) + Index(['first', 'second', 'first'], dtype='object') + + If a `dict` is used, all unmapped categories are mapped to `NaN` and + the result is an :class:`~pandas.Index`: + + >>> cat.map({'a': 'first', 'b': 'second'}, na_action=None) + Index(['first', 'second', nan], dtype='object') + """ + if na_action is lib.no_default: + warnings.warn( + "The default value of 'ignore' for the `na_action` parameter in " + "pandas.Categorical.map is deprecated and will be " + "changed to 'None' in a future version. Please set na_action to the " + "desired value to avoid seeing this warning", + FutureWarning, + stacklevel=find_stack_level(), + ) + na_action = "ignore" + + assert callable(mapper) or is_dict_like(mapper) + + new_categories = self.categories.map(mapper) + + has_nans = np.any(self._codes == -1) + + na_val = np.nan + if na_action is None and has_nans: + na_val = mapper(np.nan) if callable(mapper) else mapper.get(np.nan, np.nan) + + if new_categories.is_unique and not new_categories.hasnans and na_val is np.nan: + new_dtype = CategoricalDtype(new_categories, ordered=self.ordered) + return self.from_codes(self._codes.copy(), dtype=new_dtype, validate=False) + + if has_nans: + new_categories = new_categories.insert(len(new_categories), na_val) + + return np.take(new_categories, self._codes) + + __eq__ = _cat_compare_op(operator.eq) + __ne__ = _cat_compare_op(operator.ne) + __lt__ = _cat_compare_op(operator.lt) + __gt__ = _cat_compare_op(operator.gt) + __le__ = _cat_compare_op(operator.le) + __ge__ = _cat_compare_op(operator.ge) + + # ------------------------------------------------------------- + # Validators; ideally these can be de-duplicated + + def _validate_setitem_value(self, value): + if not is_hashable(value): + # wrap scalars and hashable-listlikes in list + return self._validate_listlike(value) + else: + return self._validate_scalar(value) + + def _validate_scalar(self, fill_value): + """ + Convert a user-facing fill_value to a representation to use with our + underlying ndarray, raising TypeError if this is not possible. + + Parameters + ---------- + fill_value : object + + Returns + ------- + fill_value : int + + Raises + ------ + TypeError + """ + + if is_valid_na_for_dtype(fill_value, self.categories.dtype): + fill_value = -1 + elif fill_value in self.categories: + fill_value = self._unbox_scalar(fill_value) + else: + raise TypeError( + "Cannot setitem on a Categorical with a new " + f"category ({fill_value}), set the categories first" + ) from None + return fill_value + + @classmethod + def _validate_codes_for_dtype(cls, codes, *, dtype: CategoricalDtype) -> np.ndarray: + if isinstance(codes, ExtensionArray) and is_integer_dtype(codes.dtype): + # Avoid the implicit conversion of Int to object + if isna(codes).any(): + raise ValueError("codes cannot contain NA values") + codes = codes.to_numpy(dtype=np.int64) + else: + codes = np.asarray(codes) + if len(codes) and codes.dtype.kind not in "iu": + raise ValueError("codes need to be array-like integers") + + if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1): + raise ValueError("codes need to be between -1 and len(categories)-1") + return codes + + # ------------------------------------------------------------- + + @ravel_compat + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + """ + The numpy array interface. + + Returns + ------- + numpy.array + A numpy array of either the specified dtype or, + if dtype==None (default), the same dtype as + categorical.categories.dtype. + + Examples + -------- + + >>> cat = pd.Categorical(['a', 'b'], ordered=True) + + The following calls ``cat.__array__`` + + >>> np.asarray(cat) + array(['a', 'b'], dtype=object) + """ + ret = take_nd(self.categories._values, self._codes) + if dtype and np.dtype(dtype) != self.categories.dtype: + return np.asarray(ret, dtype) + # When we're a Categorical[ExtensionArray], like Interval, + # we need to ensure __array__ gets all the way to an + # ndarray. + return np.asarray(ret) + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # for binary ops, use our custom dunder methods + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. test_numpy_ufuncs_out + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + # e.g. TestCategoricalAnalytics::test_min_max_ordered + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + # for all other cases, raise for now (similarly as what happens in + # Series.__array_prepare__) + raise TypeError( + f"Object with dtype {self.dtype} cannot perform " + f"the numpy op {ufunc.__name__}" + ) + + def __setstate__(self, state) -> None: + """Necessary for making this object picklable""" + if not isinstance(state, dict): + return super().__setstate__(state) + + if "_dtype" not in state: + state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"]) + + if "_codes" in state and "_ndarray" not in state: + # backward compat, changed what is property vs attribute + state["_ndarray"] = state.pop("_codes") + + super().__setstate__(state) + + @property + def nbytes(self) -> int: + return self._codes.nbytes + self.dtype.categories.values.nbytes + + def memory_usage(self, deep: bool = False) -> int: + """ + Memory usage of my values + + Parameters + ---------- + deep : bool + Introspect the data deeply, interrogate + `object` dtypes for system-level memory consumption + + Returns + ------- + bytes used + + Notes + ----- + Memory usage does not include memory consumed by elements that + are not components of the array if deep=False + + See Also + -------- + numpy.ndarray.nbytes + """ + return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep) + + def isna(self) -> npt.NDArray[np.bool_]: + """ + Detect missing values + + Missing values (-1 in .codes) are detected. + + Returns + ------- + np.ndarray[bool] of whether my values are null + + See Also + -------- + isna : Top-level isna. + isnull : Alias of isna. + Categorical.notna : Boolean inverse of Categorical.isna. + + """ + return self._codes == -1 + + isnull = isna + + def notna(self) -> npt.NDArray[np.bool_]: + """ + Inverse of isna + + Both missing values (-1 in .codes) and NA as a category are detected as + null. + + Returns + ------- + np.ndarray[bool] of whether my values are not null + + See Also + -------- + notna : Top-level notna. + notnull : Alias of notna. + Categorical.isna : Boolean inverse of Categorical.notna. + + """ + return ~self.isna() + + notnull = notna + + def value_counts(self, dropna: bool = True) -> Series: + """ + Return a Series containing counts of each category. + + Every category will have an entry, even those with a count of 0. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NaN. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + from pandas import ( + CategoricalIndex, + Series, + ) + + code, cat = self._codes, self.categories + ncat, mask = (len(cat), code >= 0) + ix, clean = np.arange(ncat), mask.all() + + if dropna or clean: + obs = code if clean else code[mask] + count = np.bincount(obs, minlength=ncat or 0) + else: + count = np.bincount(np.where(mask, code, ncat)) + ix = np.append(ix, -1) + + ix = coerce_indexer_dtype(ix, self.dtype.categories) + ix = self._from_backing_data(ix) + + return Series( + count, index=CategoricalIndex(ix), dtype="int64", name="count", copy=False + ) + + # error: Argument 2 of "_empty" is incompatible with supertype + # "NDArrayBackedExtensionArray"; supertype defines the argument type as + # "ExtensionDtype" + @classmethod + def _empty( # type: ignore[override] + cls, shape: Shape, dtype: CategoricalDtype + ) -> Self: + """ + Analogous to np.empty(shape, dtype=dtype) + + Parameters + ---------- + shape : tuple[int] + dtype : CategoricalDtype + """ + arr = cls._from_sequence([], dtype=dtype) + + # We have to use np.zeros instead of np.empty otherwise the resulting + # ndarray may contain codes not supported by this dtype, in which + # case repr(result) could segfault. + backing = np.zeros(shape, dtype=arr._ndarray.dtype) + + return arr._from_backing_data(backing) + + def _internal_get_values(self): + """ + Return the values. + + For internal compatibility with pandas formatting. + + Returns + ------- + np.ndarray or Index + A numpy array of the same dtype as categorical.categories.dtype or + Index if datetime / periods. + """ + # if we are a datetime and period index, return Index to keep metadata + if needs_i8_conversion(self.categories.dtype): + return self.categories.take(self._codes, fill_value=NaT) + elif is_integer_dtype(self.categories.dtype) and -1 in self._codes: + return self.categories.astype("object").take(self._codes, fill_value=np.nan) + return np.array(self) + + def check_for_ordered(self, op) -> None: + """assert that we are ordered""" + if not self.ordered: + raise TypeError( + f"Categorical is not ordered for operation {op}\n" + "you can use .as_ordered() to change the " + "Categorical to an ordered one\n" + ) + + def argsort( + self, *, ascending: bool = True, kind: SortKind = "quicksort", **kwargs + ): + """ + Return the indices that would sort the Categorical. + + Missing values are sorted at the end. + + Parameters + ---------- + ascending : bool, default True + Whether the indices should result in an ascending + or descending sort. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. + **kwargs: + passed through to :func:`numpy.argsort`. + + Returns + ------- + np.ndarray[np.intp] + + See Also + -------- + numpy.ndarray.argsort + + Notes + ----- + While an ordering is applied to the category values, arg-sorting + in this context refers more to organizing and grouping together + based on matching category values. Thus, this function can be + called on an unordered Categorical instance unlike the functions + 'Categorical.min' and 'Categorical.max'. + + Examples + -------- + >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort() + array([2, 0, 1, 3]) + + >>> cat = pd.Categorical(['b', 'b', 'a', 'c'], + ... categories=['c', 'b', 'a'], + ... ordered=True) + >>> cat.argsort() + array([3, 0, 1, 2]) + + Missing values are placed at the end + + >>> cat = pd.Categorical([2, None, 1]) + >>> cat.argsort() + array([2, 0, 1]) + """ + return super().argsort(ascending=ascending, kind=kind, **kwargs) + + @overload + def sort_values( + self, + *, + inplace: Literal[False] = ..., + ascending: bool = ..., + na_position: str = ..., + ) -> Self: + ... + + @overload + def sort_values( + self, *, inplace: Literal[True], ascending: bool = ..., na_position: str = ... + ) -> None: + ... + + def sort_values( + self, + *, + inplace: bool = False, + ascending: bool = True, + na_position: str = "last", + ) -> Self | None: + """ + Sort the Categorical by category value returning a new + Categorical by default. + + While an ordering is applied to the category values, sorting in this + context refers more to organizing and grouping together based on + matching category values. Thus, this function can be called on an + unordered Categorical instance unlike the functions 'Categorical.min' + and 'Categorical.max'. + + Parameters + ---------- + inplace : bool, default False + Do operation in place. + ascending : bool, default True + Order ascending. Passing False orders descending. The + ordering parameter provides the method by which the + category values are organized. + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end + + Returns + ------- + Categorical or None + + See Also + -------- + Categorical.sort + Series.sort_values + + Examples + -------- + >>> c = pd.Categorical([1, 2, 2, 1, 5]) + >>> c + [1, 2, 2, 1, 5] + Categories (3, int64): [1, 2, 5] + >>> c.sort_values() + [1, 1, 2, 2, 5] + Categories (3, int64): [1, 2, 5] + >>> c.sort_values(ascending=False) + [5, 2, 2, 1, 1] + Categories (3, int64): [1, 2, 5] + + >>> c = pd.Categorical([1, 2, 2, 1, 5]) + + 'sort_values' behaviour with NaNs. Note that 'na_position' + is independent of the 'ascending' parameter: + + >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) + >>> c + [NaN, 2, 2, NaN, 5] + Categories (2, int64): [2, 5] + >>> c.sort_values() + [2, 2, 5, NaN, NaN] + Categories (2, int64): [2, 5] + >>> c.sort_values(ascending=False) + [5, 2, 2, NaN, NaN] + Categories (2, int64): [2, 5] + >>> c.sort_values(na_position='first') + [NaN, NaN, 2, 2, 5] + Categories (2, int64): [2, 5] + >>> c.sort_values(ascending=False, na_position='first') + [NaN, NaN, 5, 2, 2] + Categories (2, int64): [2, 5] + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if na_position not in ["last", "first"]: + raise ValueError(f"invalid na_position: {repr(na_position)}") + + sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) + + if not inplace: + codes = self._codes[sorted_idx] + return self._from_backing_data(codes) + self._codes[:] = self._codes[sorted_idx] + return None + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + if axis != 0: + raise NotImplementedError + vff = self._values_for_rank() + return algorithms.rank( + vff, + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + + def _values_for_rank(self) -> np.ndarray: + """ + For correctly ranking ordered categorical data. See GH#15420 + + Ordered categorical data should be ranked on the basis of + codes with -1 translated to NaN. + + Returns + ------- + numpy.array + + """ + from pandas import Series + + if self.ordered: + values = self.codes + mask = values == -1 + if mask.any(): + values = values.astype("float64") + values[mask] = np.nan + elif is_any_real_numeric_dtype(self.categories.dtype): + values = np.array(self) + else: + # reorder the categories (so rank can use the float codes) + # instead of passing an object array to rank + values = np.array( + self.rename_categories( + Series(self.categories, copy=False).rank().values + ) + ) + return values + + def _hash_pandas_object( + self, *, encoding: str, hash_key: str, categorize: bool + ) -> npt.NDArray[np.uint64]: + """ + Hash a Categorical by hashing its categories, and then mapping the codes + to the hashes. + + Parameters + ---------- + encoding : str + hash_key : str + categorize : bool + Ignored for Categorical. + + Returns + ------- + np.ndarray[uint64] + """ + # Note we ignore categorize, as we are already Categorical. + from pandas.core.util.hashing import hash_array + + # Convert ExtensionArrays to ndarrays + values = np.asarray(self.categories._values) + hashed = hash_array(values, encoding, hash_key, categorize=False) + + # we have uint64, as we don't directly support missing values + # we don't want to use take_nd which will coerce to float + # instead, directly construct the result with a + # max(np.uint64) as the missing value indicator + # + # TODO: GH#15362 + + mask = self.isna() + if len(hashed): + result = hashed.take(self._codes) + else: + result = np.zeros(len(mask), dtype="uint64") + + if mask.any(): + result[mask] = lib.u8max + + return result + + # ------------------------------------------------------------------ + # NDArrayBackedExtensionArray compat + + @property + def _codes(self) -> np.ndarray: + return self._ndarray + + def _box_func(self, i: int): + if i == -1: + return np.nan + return self.categories[i] + + def _unbox_scalar(self, key) -> int: + # searchsorted is very performance sensitive. By converting codes + # to same dtype as self.codes, we get much faster performance. + code = self.categories.get_loc(key) + code = self._ndarray.dtype.type(code) + return code + + # ------------------------------------------------------------------ + + def __iter__(self) -> Iterator: + """ + Returns an Iterator over the values of this Categorical. + """ + if self.ndim == 1: + return iter(self._internal_get_values().tolist()) + else: + return (self[n] for n in range(len(self))) + + def __contains__(self, key) -> bool: + """ + Returns True if `key` is in this Categorical. + """ + # if key is a NaN, check if any NaN is in self. + if is_valid_na_for_dtype(key, self.categories.dtype): + return bool(self.isna().any()) + + return contains(self, key, container=self._codes) + + # ------------------------------------------------------------------ + # Rendering Methods + + def _formatter(self, boxed: bool = False): + # Defer to CategoricalFormatter's formatter. + return None + + def _tidy_repr(self, max_vals: int = 10, footer: bool = True) -> str: + """ + a short repr displaying only max_vals and an optional (but default + footer) + """ + num = max_vals // 2 + head = self[:num]._get_repr(length=False, footer=False) + tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False) + + result = f"{head[:-1]}, ..., {tail[1:]}" + if footer: + result = f"{result}\n{self._repr_footer()}" + + return str(result) + + def _repr_categories(self) -> list[str]: + """ + return the base repr for the categories + """ + max_categories = ( + 10 + if get_option("display.max_categories") == 0 + else get_option("display.max_categories") + ) + from pandas.io.formats import format as fmt + + format_array = partial( + fmt.format_array, formatter=None, quoting=QUOTE_NONNUMERIC + ) + if len(self.categories) > max_categories: + num = max_categories // 2 + head = format_array(self.categories[:num]) + tail = format_array(self.categories[-num:]) + category_strs = head + ["..."] + tail + else: + category_strs = format_array(self.categories) + + # Strip all leading spaces, which format_array adds for columns... + category_strs = [x.strip() for x in category_strs] + return category_strs + + def _repr_categories_info(self) -> str: + """ + Returns a string representation of the footer. + """ + category_strs = self._repr_categories() + dtype = str(self.categories.dtype) + levheader = f"Categories ({len(self.categories)}, {dtype}): " + width, _ = get_terminal_size() + max_width = get_option("display.width") or width + if console.in_ipython_frontend(): + # 0 = no breaks + max_width = 0 + levstring = "" + start = True + cur_col_len = len(levheader) # header + sep_len, sep = (3, " < ") if self.ordered else (2, ", ") + linesep = f"{sep.rstrip()}\n" # remove whitespace + for val in category_strs: + if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: + levstring += linesep + (" " * (len(levheader) + 1)) + cur_col_len = len(levheader) + 1 # header + a whitespace + elif not start: + levstring += sep + cur_col_len += len(val) + levstring += val + start = False + # replace to simple save space by + return f"{levheader}[{levstring.replace(' < ... < ', ' ... ')}]" + + def _repr_footer(self) -> str: + info = self._repr_categories_info() + return f"Length: {len(self)}\n{info}" + + def _get_repr( + self, length: bool = True, na_rep: str = "NaN", footer: bool = True + ) -> str: + from pandas.io.formats import format as fmt + + formatter = fmt.CategoricalFormatter( + self, length=length, na_rep=na_rep, footer=footer + ) + result = formatter.to_string() + return str(result) + + def __repr__(self) -> str: + """ + String representation. + """ + _maxlen = 10 + if len(self._codes) > _maxlen: + result = self._tidy_repr(_maxlen) + elif len(self._codes) > 0: + result = self._get_repr(length=len(self) > _maxlen) + else: + msg = self._get_repr(length=False, footer=True).replace("\n", ", ") + result = f"[], {msg}" + + return result + + # ------------------------------------------------------------------ + + def _validate_listlike(self, value): + # NB: here we assume scalar-like tuples have already been excluded + value = extract_array(value, extract_numpy=True) + + # require identical categories set + if isinstance(value, Categorical): + if self.dtype != value.dtype: + raise TypeError( + "Cannot set a Categorical with another, " + "without identical categories" + ) + # dtype equality implies categories_match_up_to_permutation + value = self._encode_with_my_categories(value) + return value._codes + + from pandas import Index + + # tupleize_cols=False for e.g. test_fillna_iterable_category GH#41914 + to_add = Index._with_infer(value, tupleize_cols=False).difference( + self.categories + ) + + # no assignments of values not in categories, but it's always ok to set + # something to np.nan + if len(to_add) and not isna(to_add).all(): + raise TypeError( + "Cannot setitem on a Categorical with a new " + "category, set the categories first" + ) + + codes = self.categories.get_indexer(value) + return codes.astype(self._ndarray.dtype, copy=False) + + def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """ + Compute the inverse of a categorical, returning + a dict of categories -> indexers. + + *This is an internal function* + + Returns + ------- + Dict[Hashable, np.ndarray[np.intp]] + dict of categories -> indexers + + Examples + -------- + >>> c = pd.Categorical(list('aabca')) + >>> c + ['a', 'a', 'b', 'c', 'a'] + Categories (3, object): ['a', 'b', 'c'] + >>> c.categories + Index(['a', 'b', 'c'], dtype='object') + >>> c.codes + array([0, 0, 1, 2, 0], dtype=int8) + >>> c._reverse_indexer() + {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} + + """ + categories = self.categories + r, counts = libalgos.groupsort_indexer( + ensure_platform_int(self.codes), categories.size + ) + counts = ensure_int64(counts).cumsum() + _result = (r[start:end] for start, end in zip(counts, counts[1:])) + return dict(zip(categories, _result)) + + # ------------------------------------------------------------------ + # Reductions + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + result = super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) + if name in ["argmax", "argmin"]: + # don't wrap in Categorical! + return result + if keepdims: + return type(self)(result, dtype=self.dtype) + else: + return result + + def min(self, *, skipna: bool = True, **kwargs): + """ + The minimum value of the object. + + Only ordered `Categoricals` have a minimum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + min : the minimum of this `Categorical`, NA value if empty + """ + nv.validate_minmax_axis(kwargs.get("axis", 0)) + nv.validate_min((), kwargs) + self.check_for_ordered("min") + + if not len(self._codes): + return self.dtype.na_value + + good = self._codes != -1 + if not good.all(): + if skipna and good.any(): + pointer = self._codes[good].min() + else: + return np.nan + else: + pointer = self._codes.min() + return self._wrap_reduction_result(None, pointer) + + def max(self, *, skipna: bool = True, **kwargs): + """ + The maximum value of the object. + + Only ordered `Categoricals` have a maximum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + max : the maximum of this `Categorical`, NA if array is empty + """ + nv.validate_minmax_axis(kwargs.get("axis", 0)) + nv.validate_max((), kwargs) + self.check_for_ordered("max") + + if not len(self._codes): + return self.dtype.na_value + + good = self._codes != -1 + if not good.all(): + if skipna and good.any(): + pointer = self._codes[good].max() + else: + return np.nan + else: + pointer = self._codes.max() + return self._wrap_reduction_result(None, pointer) + + def _mode(self, dropna: bool = True) -> Categorical: + codes = self._codes + mask = None + if dropna: + mask = self.isna() + + res_codes = algorithms.mode(codes, mask=mask) + res_codes = cast(np.ndarray, res_codes) + assert res_codes.dtype == codes.dtype + res = self._from_backing_data(res_codes) + return res + + # ------------------------------------------------------------------ + # ExtensionArray Interface + + def unique(self): + """ + Return the ``Categorical`` which ``categories`` and ``codes`` are + unique. + + .. versionchanged:: 1.3.0 + + Previously, unused categories were dropped from the new categories. + + Returns + ------- + Categorical + + See Also + -------- + pandas.unique + CategoricalIndex.unique + Series.unique : Return unique values of Series object. + + Examples + -------- + >>> pd.Categorical(list("baabc")).unique() + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> pd.Categorical(list("baab"), categories=list("abc"), ordered=True).unique() + ['b', 'a'] + Categories (3, object): ['a' < 'b' < 'c'] + """ + # pylint: disable=useless-parent-delegation + return super().unique() + + def _cast_quantile_result(self, res_values: np.ndarray) -> np.ndarray: + # make sure we have correct itemsize for resulting codes + assert res_values.dtype == self._ndarray.dtype + return res_values + + def equals(self, other: object) -> bool: + """ + Returns True if categorical arrays are equal. + + Parameters + ---------- + other : `Categorical` + + Returns + ------- + bool + """ + if not isinstance(other, Categorical): + return False + elif self._categories_match_up_to_permutation(other): + other = self._encode_with_my_categories(other) + return np.array_equal(self._codes, other._codes) + return False + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[Self], axis: AxisInt = 0) -> Self: + from pandas.core.dtypes.concat import union_categoricals + + first = to_concat[0] + if axis >= first.ndim: + raise ValueError( + f"axis {axis} is out of bounds for array of dimension {first.ndim}" + ) + + if axis == 1: + # Flatten, concatenate then reshape + if not all(x.ndim == 2 for x in to_concat): + raise ValueError + + # pass correctly-shaped to union_categoricals + tc_flat = [] + for obj in to_concat: + tc_flat.extend([obj[:, i] for i in range(obj.shape[1])]) + + res_flat = cls._concat_same_type(tc_flat, axis=0) + + result = res_flat.reshape(len(first), -1, order="F") + return result + + result = union_categoricals(to_concat) + return result + + # ------------------------------------------------------------------ + + def _encode_with_my_categories(self, other: Categorical) -> Categorical: + """ + Re-encode another categorical using this Categorical's categories. + + Notes + ----- + This assumes we have already checked + self._categories_match_up_to_permutation(other). + """ + # Indexing on codes is more efficient if categories are the same, + # so we can apply some optimizations based on the degree of + # dtype-matching. + codes = recode_for_categories( + other.codes, other.categories, self.categories, copy=False + ) + return self._from_backing_data(codes) + + def _categories_match_up_to_permutation(self, other: Categorical) -> bool: + """ + Returns True if categoricals are the same dtype + same categories, and same ordered + + Parameters + ---------- + other : Categorical + + Returns + ------- + bool + """ + return hash(self.dtype) == hash(other.dtype) + + def describe(self) -> DataFrame: + """ + Describes this Categorical + + Returns + ------- + description: `DataFrame` + A dataframe with frequency and counts by category. + """ + counts = self.value_counts(dropna=False) + freqs = counts / counts.sum() + + from pandas import Index + from pandas.core.reshape.concat import concat + + result = concat([counts, freqs], axis=1) + result.columns = Index(["counts", "freqs"]) + result.index.name = "categories" + + return result + + def isin(self, values) -> npt.NDArray[np.bool_]: + """ + Check whether `values` are contained in Categorical. + + Return a boolean NumPy Array showing whether each element in + the Categorical matches an element in the passed sequence of + `values` exactly. + + Parameters + ---------- + values : set or list-like + The sequence of values to test. Passing in a single string will + raise a ``TypeError``. Instead, turn a single string into a + list of one element. + + Returns + ------- + np.ndarray[bool] + + Raises + ------ + TypeError + * If `values` is not a set or list-like + + See Also + -------- + pandas.Series.isin : Equivalent method on Series. + + Examples + -------- + >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', + ... 'hippo']) + >>> s.isin(['cow', 'lama']) + array([ True, True, True, False, True, False]) + + Passing a single string as ``s.isin('lama')`` will raise an error. Use + a list of one element instead: + + >>> s.isin(['lama']) + array([ True, False, True, False, True, False]) + """ + if not is_list_like(values): + values_type = type(values).__name__ + raise TypeError( + "only list-like objects are allowed to be passed " + f"to isin(), you passed a `{values_type}`" + ) + values = sanitize_array(values, None, None) + null_mask = np.asarray(isna(values)) + code_values = self.categories.get_indexer(values) + code_values = code_values[null_mask | (code_values >= 0)] + return algorithms.isin(self.codes, code_values) + + def _replace(self, *, to_replace, value, inplace: bool = False): + from pandas import Index + + inplace = validate_bool_kwarg(inplace, "inplace") + cat = self if inplace else self.copy() + + mask = isna(np.asarray(value)) + if mask.any(): + removals = np.asarray(to_replace)[mask] + removals = cat.categories[cat.categories.isin(removals)] + new_cat = cat.remove_categories(removals) + NDArrayBacked.__init__(cat, new_cat.codes, new_cat.dtype) + + ser = cat.categories.to_series() + ser = ser.replace(to_replace=to_replace, value=value) + + all_values = Index(ser) + + # GH51016: maintain order of existing categories + idxr = cat.categories.get_indexer_for(all_values) + locs = np.arange(len(ser)) + locs = np.where(idxr == -1, locs, idxr) + locs = locs.argsort() + + new_categories = ser.take(locs) + new_categories = new_categories.drop_duplicates(keep="first") + new_categories = Index(new_categories) + new_codes = recode_for_categories( + cat._codes, all_values, new_categories, copy=False + ) + new_dtype = CategoricalDtype(new_categories, ordered=self.dtype.ordered) + NDArrayBacked.__init__(cat, new_codes, new_dtype) + + if not inplace: + return cat + + # ------------------------------------------------------------------------ + # String methods interface + def _str_map( + self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True + ): + # Optimization to apply the callable `f` to the categories once + # and rebuild the result by `take`ing from the result with the codes. + # Returns the same type as the object-dtype implementation though. + from pandas.core.arrays import NumpyExtensionArray + + categories = self.categories + codes = self.codes + result = NumpyExtensionArray(categories.to_numpy())._str_map(f, na_value, dtype) + return take_nd(result, codes, fill_value=na_value) + + def _str_get_dummies(self, sep: str = "|"): + # sep may not be in categories. Just bail on this. + from pandas.core.arrays import NumpyExtensionArray + + return NumpyExtensionArray(self.astype(str))._str_get_dummies(sep) + + # ------------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + dtype = self.dtype + if how in ["sum", "prod", "cumsum", "cumprod", "skew"]: + raise TypeError(f"{dtype} type does not support {how} operations") + if how in ["min", "max", "rank"] and not dtype.ordered: + # raise TypeError instead of NotImplementedError to ensure we + # don't go down a group-by-group path, since in the empty-groups + # case that would fail to raise + raise TypeError(f"Cannot perform {how} with non-ordered Categorical") + if how not in ["rank", "any", "all", "first", "last", "min", "max"]: + if kind == "transform": + raise TypeError(f"{dtype} type does not support {how} operations") + raise TypeError(f"{dtype} dtype does not support aggregation '{how}'") + + result_mask = None + mask = self.isna() + if how == "rank": + assert self.ordered # checked earlier + npvalues = self._ndarray + elif how in ["first", "last", "min", "max"]: + npvalues = self._ndarray + result_mask = np.zeros(ngroups, dtype=bool) + else: + # any/all + npvalues = self.astype(bool) + + res_values = op._cython_op_ndim_compat( + npvalues, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + + if how in op.cast_blocklist: + return res_values + elif how in ["first", "last", "min", "max"]: + res_values[result_mask == 1] = -1 + return self._from_backing_data(res_values) + + +# The Series.cat accessor + + +@delegate_names( + delegate=Categorical, accessors=["categories", "ordered"], typ="property" +) +@delegate_names( + delegate=Categorical, + accessors=[ + "rename_categories", + "reorder_categories", + "add_categories", + "remove_categories", + "remove_unused_categories", + "set_categories", + "as_ordered", + "as_unordered", + ], + typ="method", +) +class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): + """ + Accessor object for categorical properties of the Series values. + + Parameters + ---------- + data : Series or CategoricalIndex + + Examples + -------- + >>> s = pd.Series(list("abbccc")).astype("category") + >>> s + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a', 'b', 'c'] + + >>> s.cat.categories + Index(['a', 'b', 'c'], dtype='object') + + >>> s.cat.rename_categories(list("cba")) + 0 c + 1 b + 2 b + 3 a + 4 a + 5 a + dtype: category + Categories (3, object): ['c', 'b', 'a'] + + >>> s.cat.reorder_categories(list("cba")) + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['c', 'b', 'a'] + + >>> s.cat.add_categories(["d", "e"]) + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (5, object): ['a', 'b', 'c', 'd', 'e'] + + >>> s.cat.remove_categories(["a", "c"]) + 0 NaN + 1 b + 2 b + 3 NaN + 4 NaN + 5 NaN + dtype: category + Categories (1, object): ['b'] + + >>> s1 = s.cat.add_categories(["d", "e"]) + >>> s1.cat.remove_unused_categories() + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a', 'b', 'c'] + + >>> s.cat.set_categories(list("abcde")) + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (5, object): ['a', 'b', 'c', 'd', 'e'] + + >>> s.cat.as_ordered() + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a' < 'b' < 'c'] + + >>> s.cat.as_unordered() + 0 a + 1 b + 2 b + 3 c + 4 c + 5 c + dtype: category + Categories (3, object): ['a', 'b', 'c'] + """ + + def __init__(self, data) -> None: + self._validate(data) + self._parent = data.values + self._index = data.index + self._name = data.name + self._freeze() + + @staticmethod + def _validate(data): + if not isinstance(data.dtype, CategoricalDtype): + raise AttributeError("Can only use .cat accessor with a 'category' dtype") + + # error: Signature of "_delegate_property_get" incompatible with supertype + # "PandasDelegate" + def _delegate_property_get(self, name: str): # type: ignore[override] + return getattr(self._parent, name) + + # error: Signature of "_delegate_property_set" incompatible with supertype + # "PandasDelegate" + def _delegate_property_set(self, name: str, new_values): # type: ignore[override] + return setattr(self._parent, name, new_values) + + @property + def codes(self) -> Series: + """ + Return Series of codes as well as the index. + + Examples + -------- + >>> raw_cate = pd.Categorical(["a", "b", "c", "a"], categories=["a", "b"]) + >>> ser = pd.Series(raw_cate) + >>> ser.cat.codes + 0 0 + 1 1 + 2 -1 + 3 0 + dtype: int8 + """ + from pandas import Series + + return Series(self._parent.codes, index=self._index) + + def _delegate_method(self, name: str, *args, **kwargs): + from pandas import Series + + method = getattr(self._parent, name) + res = method(*args, **kwargs) + if res is not None: + return Series(res, index=self._index, name=self._name) + + +# utility routines + + +def _get_codes_for_values(values, categories: Index) -> np.ndarray: + """ + utility routine to turn values into codes given the specified categories + + If `values` is known to be a Categorical, use recode_for_categories instead. + """ + if values.ndim > 1: + flat = values.ravel() + codes = _get_codes_for_values(flat, categories) + return codes.reshape(values.shape) + + codes = categories.get_indexer_for(values) + return coerce_indexer_dtype(codes, categories) + + +def recode_for_categories( + codes: np.ndarray, old_categories, new_categories, copy: bool = True +) -> np.ndarray: + """ + Convert a set of codes for to a new set of categories + + Parameters + ---------- + codes : np.ndarray + old_categories, new_categories : Index + copy: bool, default True + Whether to copy if the codes are unchanged. + + Returns + ------- + new_codes : np.ndarray[np.int64] + + Examples + -------- + >>> old_cat = pd.Index(['b', 'a', 'c']) + >>> new_cat = pd.Index(['a', 'b']) + >>> codes = np.array([0, 1, 1, 2]) + >>> recode_for_categories(codes, old_cat, new_cat) + array([ 1, 0, 0, -1], dtype=int8) + """ + if len(old_categories) == 0: + # All null anyway, so just retain the nulls + if copy: + return codes.copy() + return codes + elif new_categories.equals(old_categories): + # Same categories, so no need to actually recode + if copy: + return codes.copy() + return codes + + indexer = coerce_indexer_dtype( + new_categories.get_indexer_for(old_categories), new_categories + ) + new_codes = take_nd(indexer, codes, fill_value=-1) + return new_codes + + +def factorize_from_iterable(values) -> tuple[np.ndarray, Index]: + """ + Factorize an input `values` into `categories` and `codes`. Preserves + categorical dtype in `categories`. + + Parameters + ---------- + values : list-like + + Returns + ------- + codes : ndarray + categories : Index + If `values` has a categorical dtype, then `categories` is + a CategoricalIndex keeping the categories and order of `values`. + """ + from pandas import CategoricalIndex + + if not is_list_like(values): + raise TypeError("Input must be list-like") + + categories: Index + + vdtype = getattr(values, "dtype", None) + if isinstance(vdtype, CategoricalDtype): + values = extract_array(values) + # The Categorical we want to build has the same categories + # as values but its codes are by def [0, ..., len(n_categories) - 1] + cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype) + cat = Categorical.from_codes(cat_codes, dtype=values.dtype, validate=False) + + categories = CategoricalIndex(cat) + codes = values.codes + else: + # The value of ordered is irrelevant since we don't use cat as such, + # but only the resulting categories, the order of which is independent + # from ordered. Set ordered to False as default. See GH #15457 + cat = Categorical(values, ordered=False) + categories = cat.categories + codes = cat.codes + return codes, categories + + +def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]: + """ + A higher-level wrapper over `factorize_from_iterable`. + + Parameters + ---------- + iterables : list-like of list-likes + + Returns + ------- + codes : list of ndarrays + categories : list of Indexes + + Notes + ----- + See `factorize_from_iterable` for more info. + """ + if len(iterables) == 0: + # For consistency, it should return two empty lists. + return [], [] + + codes, categories = zip(*(factorize_from_iterable(it) for it in iterables)) + return list(codes), list(categories) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimelike.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimelike.py new file mode 100644 index 00000000..1a6438b2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimelike.py @@ -0,0 +1,2468 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, +) +from functools import wraps +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Union, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + algos, + lib, +) +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import ( + BaseOffset, + IncompatibleFrequency, + NaT, + NaTType, + Period, + Resolution, + Tick, + Timedelta, + Timestamp, + astype_overflowsafe, + delta_to_nanoseconds, + get_unit_from_dtype, + iNaT, + ints_to_pydatetime, + ints_to_pytimedelta, + to_offset, +) +from pandas._libs.tslibs.fields import ( + RoundTo, + round_nsint64, +) +from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions +from pandas._libs.tslibs.timestamps import integer_op_not_supported +from pandas._typing import ( + ArrayLike, + AxisInt, + DatetimeLikeScalar, + Dtype, + DtypeObj, + F, + InterpolateOptions, + NpDtype, + PositionalIndexer2D, + PositionalIndexerTuple, + ScalarIndexer, + Self, + SequenceIndexer, + TimeAmbiguous, + TimeNonexistent, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + AbstractMethodError, + InvalidComparison, + PerformanceWarning, +) +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_all_strings, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCCategorical, + ABCMultiIndex, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, +) + +from pandas.core import ( + algorithms, + missing, + nanops, + ops, +) +from pandas.core.algorithms import ( + checked_add_with_arr, + isin, + map_array, + unique1d, +) +from pandas.core.array_algos import datetimelike_accumulations +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays._mixins import ( + NDArrayBackedExtensionArray, + ravel_compat, +) +from pandas.core.arrays.arrow.array import ArrowExtensionArray +from pandas.core.arrays.base import ExtensionArray +from pandas.core.arrays.integer import IntegerArray +import pandas.core.common as com +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import ( + check_array_indexer, + check_setitem_lengths, +) +from pandas.core.ops.common import unpack_zerodim_and_defer +from pandas.core.ops.invalid import ( + invalid_comparison, + make_invalid_op, +) + +from pandas.tseries import frequencies + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + + from pandas import Index + from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, + ) + +DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType] + + +def _make_unpacked_invalid_op(op_name: str): + op = make_invalid_op(op_name) + return unpack_zerodim_and_defer(op_name)(op) + + +def _period_dispatch(meth: F) -> F: + """ + For PeriodArray methods, dispatch to DatetimeArray and re-wrap the results + in PeriodArray. We cannot use ._ndarray directly for the affected + methods because the i8 data has different semantics on NaT values. + """ + + @wraps(meth) + def new_meth(self, *args, **kwargs): + if not isinstance(self.dtype, PeriodDtype): + return meth(self, *args, **kwargs) + + arr = self.view("M8[ns]") + result = meth(arr, *args, **kwargs) + if result is NaT: + return NaT + elif isinstance(result, Timestamp): + return self._box_func(result._value) + + res_i8 = result.view("i8") + return self._from_backing_data(res_i8) + + return cast(F, new_meth) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class DatetimeLikeArrayMixin( # type: ignore[misc] + OpsMixin, NDArrayBackedExtensionArray +): + """ + Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray + + Assumes that __new__/__init__ defines: + _ndarray + + and that inheriting subclass implements: + freq + """ + + # _infer_matches -> which infer_dtype strings are close enough to our own + _infer_matches: tuple[str, ...] + _is_recognized_dtype: Callable[[DtypeObj], bool] + _recognized_scalars: tuple[type, ...] + _ndarray: np.ndarray + freq: BaseOffset | None + + @cache_readonly + def _can_hold_na(self) -> bool: + return True + + def __init__( + self, data, dtype: Dtype | None = None, freq=None, copy: bool = False + ) -> None: + raise AbstractMethodError(self) + + @property + def _scalar_type(self) -> type[DatetimeLikeScalar]: + """ + The scalar associated with this datelike + + * PeriodArray : Period + * DatetimeArray : Timestamp + * TimedeltaArray : Timedelta + """ + raise AbstractMethodError(self) + + def _scalar_from_string(self, value: str) -> DTScalarOrNaT: + """ + Construct a scalar type from a string. + + Parameters + ---------- + value : str + + Returns + ------- + Period, Timestamp, or Timedelta, or NaT + Whatever the type of ``self._scalar_type`` is. + + Notes + ----- + This should call ``self._check_compatible_with`` before + unboxing the result. + """ + raise AbstractMethodError(self) + + def _unbox_scalar( + self, value: DTScalarOrNaT + ) -> np.int64 | np.datetime64 | np.timedelta64: + """ + Unbox the integer value of a scalar `value`. + + Parameters + ---------- + value : Period, Timestamp, Timedelta, or NaT + Depending on subclass. + + Returns + ------- + int + + Examples + -------- + >>> arr = pd.arrays.DatetimeArray(np.array(['1970-01-01'], 'datetime64[ns]')) + >>> arr._unbox_scalar(arr[0]) + numpy.datetime64('1970-01-01T00:00:00.000000000') + """ + raise AbstractMethodError(self) + + def _check_compatible_with(self, other: DTScalarOrNaT) -> None: + """ + Verify that `self` and `other` are compatible. + + * DatetimeArray verifies that the timezones (if any) match + * PeriodArray verifies that the freq matches + * Timedelta has no verification + + In each case, NaT is considered compatible. + + Parameters + ---------- + other + + Raises + ------ + Exception + """ + raise AbstractMethodError(self) + + # ------------------------------------------------------------------ + + def _box_func(self, x): + """ + box function to get object from internal representation + """ + raise AbstractMethodError(self) + + def _box_values(self, values) -> np.ndarray: + """ + apply box func to passed values + """ + return lib.map_infer(values, self._box_func, convert=False) + + def __iter__(self) -> Iterator: + if self.ndim > 1: + return (self[n] for n in range(len(self))) + else: + return (self._box_func(v) for v in self.asi8) + + @property + def asi8(self) -> npt.NDArray[np.int64]: + """ + Integer representation of the values. + + Returns + ------- + ndarray + An ndarray with int64 dtype. + """ + # do not cache or you'll create a memory leak + return self._ndarray.view("i8") + + # ---------------------------------------------------------------- + # Rendering Methods + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None + ) -> npt.NDArray[np.object_]: + """ + Helper method for astype when converting to strings. + + Returns + ------- + ndarray[str] + """ + raise AbstractMethodError(self) + + def _formatter(self, boxed: bool = False): + # TODO: Remove Datetime & DatetimeTZ formatters. + return "'{}'".format + + # ---------------------------------------------------------------- + # Array-Like / EA-Interface Methods + + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + # used for Timedelta/DatetimeArray, overwritten by PeriodArray + if is_object_dtype(dtype): + return np.array(list(self), dtype=object) + return self._ndarray + + @overload + def __getitem__(self, item: ScalarIndexer) -> DTScalarOrNaT: + ... + + @overload + def __getitem__( + self, + item: SequenceIndexer | PositionalIndexerTuple, + ) -> Self: + ... + + def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT: + """ + This getitem defers to the underlying array, which by-definition can + only handle list-likes, slices, and integer scalars + """ + # Use cast as we know we will get back a DatetimeLikeArray or DTScalar, + # but skip evaluating the Union at runtime for performance + # (see https://github.com/pandas-dev/pandas/pull/44624) + result = cast("Union[Self, DTScalarOrNaT]", super().__getitem__(key)) + if lib.is_scalar(result): + return result + else: + # At this point we know the result is an array. + result = cast(Self, result) + result._freq = self._get_getitem_freq(key) + return result + + def _get_getitem_freq(self, key) -> BaseOffset | None: + """ + Find the `freq` attribute to assign to the result of a __getitem__ lookup. + """ + is_period = isinstance(self.dtype, PeriodDtype) + if is_period: + freq = self.freq + elif self.ndim != 1: + freq = None + else: + key = check_array_indexer(self, key) # maybe ndarray[bool] -> slice + freq = None + if isinstance(key, slice): + if self.freq is not None and key.step is not None: + freq = key.step * self.freq + else: + freq = self.freq + elif key is Ellipsis: + # GH#21282 indexing with Ellipsis is similar to a full slice, + # should preserve `freq` attribute + freq = self.freq + elif com.is_bool_indexer(key): + new_key = lib.maybe_booleans_to_slice(key.view(np.uint8)) + if isinstance(new_key, slice): + return self._get_getitem_freq(new_key) + return freq + + # error: Argument 1 of "__setitem__" is incompatible with supertype + # "ExtensionArray"; supertype defines the argument type as "Union[int, + # ndarray]" + def __setitem__( + self, + key: int | Sequence[int] | Sequence[bool] | slice, + value: NaTType | Any | Sequence[Any], + ) -> None: + # I'm fudging the types a bit here. "Any" above really depends + # on type(self). For PeriodArray, it's Period (or stuff coercible + # to a period in from_sequence). For DatetimeArray, it's Timestamp... + # I don't know if mypy can do that, possibly with Generics. + # https://mypy.readthedocs.io/en/latest/generics.html + + no_op = check_setitem_lengths(key, value, self) + + # Calling super() before the no_op short-circuit means that we raise + # on invalid 'value' even if this is a no-op, e.g. wrong-dtype empty array. + super().__setitem__(key, value) + + if no_op: + return + + self._maybe_clear_freq() + + def _maybe_clear_freq(self) -> None: + # inplace operations like __setitem__ may invalidate the freq of + # DatetimeArray and TimedeltaArray + pass + + def astype(self, dtype, copy: bool = True): + # Some notes on cases we don't have to handle here in the base class: + # 1. PeriodArray.astype handles period -> period + # 2. DatetimeArray.astype handles conversion between tz. + # 3. DatetimeArray.astype handles datetime -> period + dtype = pandas_dtype(dtype) + + if dtype == object: + if self.dtype.kind == "M": + self = cast("DatetimeArray", self) + # *much* faster than self._box_values + # for e.g. test_get_loc_tuple_monotonic_above_size_cutoff + i8data = self.asi8 + converted = ints_to_pydatetime( + i8data, + tz=self.tz, + box="timestamp", + reso=self._creso, + ) + return converted + + elif self.dtype.kind == "m": + return ints_to_pytimedelta(self._ndarray, box=True) + + return self._box_values(self.asi8.ravel()).reshape(self.shape) + + elif isinstance(dtype, ExtensionDtype): + return super().astype(dtype, copy=copy) + elif is_string_dtype(dtype): + return self._format_native_types() + elif dtype.kind in "iu": + # we deliberately ignore int32 vs. int64 here. + # See https://github.com/pandas-dev/pandas/issues/24381 for more. + values = self.asi8 + if dtype != np.int64: + raise TypeError( + f"Converting from {self.dtype} to {dtype} is not supported. " + "Do obj.astype('int64').astype(dtype) instead" + ) + + if copy: + values = values.copy() + return values + elif (dtype.kind in "mM" and self.dtype != dtype) or dtype.kind == "f": + # disallow conversion between datetime/timedelta, + # and conversions for any datetimelike to float + msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" + raise TypeError(msg) + else: + return np.asarray(self, dtype=dtype) + + @overload + def view(self) -> Self: + ... + + @overload + def view(self, dtype: Literal["M8[ns]"]) -> DatetimeArray: + ... + + @overload + def view(self, dtype: Literal["m8[ns]"]) -> TimedeltaArray: + ... + + @overload + def view(self, dtype: Dtype | None = ...) -> ArrayLike: + ... + + # pylint: disable-next=useless-parent-delegation + def view(self, dtype: Dtype | None = None) -> ArrayLike: + # we need to explicitly call super() method as long as the `@overload`s + # are present in this file. + return super().view(dtype) + + # ------------------------------------------------------------------ + # Validation Methods + # TODO: try to de-duplicate these, ensure identical behavior + + def _validate_comparison_value(self, other): + if isinstance(other, str): + try: + # GH#18435 strings get a pass from tzawareness compat + other = self._scalar_from_string(other) + except (ValueError, IncompatibleFrequency): + # failed to parse as Timestamp/Timedelta/Period + raise InvalidComparison(other) + + if isinstance(other, self._recognized_scalars) or other is NaT: + other = self._scalar_type(other) + try: + self._check_compatible_with(other) + except (TypeError, IncompatibleFrequency) as err: + # e.g. tzawareness mismatch + raise InvalidComparison(other) from err + + elif not is_list_like(other): + raise InvalidComparison(other) + + elif len(other) != len(self): + raise ValueError("Lengths must match") + + else: + try: + other = self._validate_listlike(other, allow_object=True) + self._check_compatible_with(other) + except (TypeError, IncompatibleFrequency) as err: + if is_object_dtype(getattr(other, "dtype", None)): + # We will have to operate element-wise + pass + else: + raise InvalidComparison(other) from err + + return other + + def _validate_scalar( + self, + value, + *, + allow_listlike: bool = False, + unbox: bool = True, + ): + """ + Validate that the input value can be cast to our scalar_type. + + Parameters + ---------- + value : object + allow_listlike: bool, default False + When raising an exception, whether the message should say + listlike inputs are allowed. + unbox : bool, default True + Whether to unbox the result before returning. Note: unbox=False + skips the setitem compatibility check. + + Returns + ------- + self._scalar_type or NaT + """ + if isinstance(value, self._scalar_type): + pass + + elif isinstance(value, str): + # NB: Careful about tzawareness + try: + value = self._scalar_from_string(value) + except ValueError as err: + msg = self._validation_error_message(value, allow_listlike) + raise TypeError(msg) from err + + elif is_valid_na_for_dtype(value, self.dtype): + # GH#18295 + value = NaT + + elif isna(value): + # if we are dt64tz and value is dt64("NaT"), dont cast to NaT, + # or else we'll fail to raise in _unbox_scalar + msg = self._validation_error_message(value, allow_listlike) + raise TypeError(msg) + + elif isinstance(value, self._recognized_scalars): + value = self._scalar_type(value) + + else: + msg = self._validation_error_message(value, allow_listlike) + raise TypeError(msg) + + if not unbox: + # NB: In general NDArrayBackedExtensionArray will unbox here; + # this option exists to prevent a performance hit in + # TimedeltaIndex.get_loc + return value + return self._unbox_scalar(value) + + def _validation_error_message(self, value, allow_listlike: bool = False) -> str: + """ + Construct an exception message on validation error. + + Some methods allow only scalar inputs, while others allow either scalar + or listlike. + + Parameters + ---------- + allow_listlike: bool, default False + + Returns + ------- + str + """ + if allow_listlike: + msg = ( + f"value should be a '{self._scalar_type.__name__}', 'NaT', " + f"or array of those. Got '{type(value).__name__}' instead." + ) + else: + msg = ( + f"value should be a '{self._scalar_type.__name__}' or 'NaT'. " + f"Got '{type(value).__name__}' instead." + ) + return msg + + def _validate_listlike(self, value, allow_object: bool = False): + if isinstance(value, type(self)): + return value + + if isinstance(value, list) and len(value) == 0: + # We treat empty list as our own dtype. + return type(self)._from_sequence([], dtype=self.dtype) + + if hasattr(value, "dtype") and value.dtype == object: + # `array` below won't do inference if value is an Index or Series. + # so do so here. in the Index case, inferred_type may be cached. + if lib.infer_dtype(value) in self._infer_matches: + try: + value = type(self)._from_sequence(value) + except (ValueError, TypeError): + if allow_object: + return value + msg = self._validation_error_message(value, True) + raise TypeError(msg) + + # Do type inference if necessary up front (after unpacking + # NumpyExtensionArray) + # e.g. we passed PeriodIndex.values and got an ndarray of Periods + value = extract_array(value, extract_numpy=True) + value = pd_array(value) + value = extract_array(value, extract_numpy=True) + + if is_all_strings(value): + # We got a StringArray + try: + # TODO: Could use from_sequence_of_strings if implemented + # Note: passing dtype is necessary for PeriodArray tests + value = type(self)._from_sequence(value, dtype=self.dtype) + except ValueError: + pass + + if isinstance(value.dtype, CategoricalDtype): + # e.g. we have a Categorical holding self.dtype + if value.categories.dtype == self.dtype: + # TODO: do we need equal dtype or just comparable? + value = value._internal_get_values() + value = extract_array(value, extract_numpy=True) + + if allow_object and is_object_dtype(value.dtype): + pass + + elif not type(self)._is_recognized_dtype(value.dtype): + msg = self._validation_error_message(value, True) + raise TypeError(msg) + + return value + + def _validate_setitem_value(self, value): + if is_list_like(value): + value = self._validate_listlike(value) + else: + return self._validate_scalar(value, allow_listlike=True) + + return self._unbox(value) + + @final + def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray: + """ + Unbox either a scalar with _unbox_scalar or an instance of our own type. + """ + if lib.is_scalar(other): + other = self._unbox_scalar(other) + else: + # same type as self + self._check_compatible_with(other) + other = other._ndarray + return other + + # ------------------------------------------------------------------ + # Additional array methods + # These are not part of the EA API, but we implement them because + # pandas assumes they're there. + + @ravel_compat + def map(self, mapper, na_action=None): + from pandas import Index + + result = map_array(self, mapper, na_action=na_action) + result = Index(result) + + if isinstance(result, ABCMultiIndex): + return result.to_numpy() + else: + return result.array + + def isin(self, values) -> npt.NDArray[np.bool_]: + """ + Compute boolean array of whether each value is found in the + passed set of values. + + Parameters + ---------- + values : set or sequence of values + + Returns + ------- + ndarray[bool] + """ + if not hasattr(values, "dtype"): + values = np.asarray(values) + + if values.dtype.kind in "fiuc": + # TODO: de-duplicate with equals, validate_comparison_value + return np.zeros(self.shape, dtype=bool) + + if not isinstance(values, type(self)): + inferable = [ + "timedelta", + "timedelta64", + "datetime", + "datetime64", + "date", + "period", + ] + if values.dtype == object: + inferred = lib.infer_dtype(values, skipna=False) + if inferred not in inferable: + if inferred == "string": + pass + + elif "mixed" in inferred: + return isin(self.astype(object), values) + else: + return np.zeros(self.shape, dtype=bool) + + try: + values = type(self)._from_sequence(values) + except ValueError: + return isin(self.astype(object), values) + + if self.dtype.kind in "mM": + self = cast("DatetimeArray | TimedeltaArray", self) + values = values.as_unit(self.unit) + + try: + self._check_compatible_with(values) + except (TypeError, ValueError): + # Includes tzawareness mismatch and IncompatibleFrequencyError + return np.zeros(self.shape, dtype=bool) + + return isin(self.asi8, values.asi8) + + # ------------------------------------------------------------------ + # Null Handling + + def isna(self) -> npt.NDArray[np.bool_]: + return self._isnan + + @property # NB: override with cache_readonly in immutable subclasses + def _isnan(self) -> npt.NDArray[np.bool_]: + """ + return if each value is nan + """ + return self.asi8 == iNaT + + @property # NB: override with cache_readonly in immutable subclasses + def _hasna(self) -> bool: + """ + return if I have any nans; enables various perf speedups + """ + return bool(self._isnan.any()) + + def _maybe_mask_results( + self, result: np.ndarray, fill_value=iNaT, convert=None + ) -> np.ndarray: + """ + Parameters + ---------- + result : np.ndarray + fill_value : object, default iNaT + convert : str, dtype or None + + Returns + ------- + result : ndarray with values replace by the fill_value + + mask the result if needed, convert to the provided dtype if its not + None + + This is an internal routine. + """ + if self._hasna: + if convert: + result = result.astype(convert) + if fill_value is None: + fill_value = np.nan + np.putmask(result, self._isnan, fill_value) + return result + + # ------------------------------------------------------------------ + # Frequency Properties/Methods + + @property + def freqstr(self) -> str | None: + """ + Return the frequency object as a string if it's set, otherwise None. + + Examples + -------- + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D") + >>> idx.freqstr + 'D' + + The frequency can be inferred if there are more than 2 points: + + >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"], + ... freq="infer") + >>> idx.freqstr + '2D' + + For PeriodIndex: + + >>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M") + >>> idx.freqstr + 'M' + """ + if self.freq is None: + return None + return self.freq.freqstr + + @property # NB: override with cache_readonly in immutable subclasses + def inferred_freq(self) -> str | None: + """ + Tries to return a string representing a frequency generated by infer_freq. + + Returns None if it can't autodetect the frequency. + + Examples + -------- + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"]) + >>> idx.inferred_freq + '2D' + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"]) + >>> tdelta_idx + TimedeltaIndex(['0 days', '10 days', '20 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.inferred_freq + '10D' + """ + if self.ndim != 1: + return None + try: + return frequencies.infer_freq(self) + except ValueError: + return None + + @property # NB: override with cache_readonly in immutable subclasses + def _resolution_obj(self) -> Resolution | None: + freqstr = self.freqstr + if freqstr is None: + return None + try: + return Resolution.get_reso_from_freqstr(freqstr) + except KeyError: + return None + + @property # NB: override with cache_readonly in immutable subclasses + def resolution(self) -> str: + """ + Returns day, hour, minute, second, millisecond or microsecond + """ + # error: Item "None" of "Optional[Any]" has no attribute "attrname" + return self._resolution_obj.attrname # type: ignore[union-attr] + + # monotonicity/uniqueness properties are called via frequencies.infer_freq, + # see GH#23789 + + @property + def _is_monotonic_increasing(self) -> bool: + return algos.is_monotonic(self.asi8, timelike=True)[0] + + @property + def _is_monotonic_decreasing(self) -> bool: + return algos.is_monotonic(self.asi8, timelike=True)[1] + + @property + def _is_unique(self) -> bool: + return len(unique1d(self.asi8.ravel("K"))) == self.size + + # ------------------------------------------------------------------ + # Arithmetic Methods + + def _cmp_method(self, other, op): + if self.ndim > 1 and getattr(other, "shape", None) == self.shape: + # TODO: handle 2D-like listlikes + return op(self.ravel(), other.ravel()).reshape(self.shape) + + try: + other = self._validate_comparison_value(other) + except InvalidComparison: + return invalid_comparison(self, other, op) + + dtype = getattr(other, "dtype", None) + if is_object_dtype(dtype): + # We have to use comp_method_OBJECT_ARRAY instead of numpy + # comparison otherwise it would raise when comparing to None + result = ops.comp_method_OBJECT_ARRAY( + op, np.asarray(self.astype(object)), other + ) + return result + if other is NaT: + if op is operator.ne: + result = np.ones(self.shape, dtype=bool) + else: + result = np.zeros(self.shape, dtype=bool) + return result + + if not isinstance(self.dtype, PeriodDtype): + self = cast(TimelikeOps, self) + if self._creso != other._creso: + if not isinstance(other, type(self)): + # i.e. Timedelta/Timestamp, cast to ndarray and let + # compare_mismatched_resolutions handle broadcasting + try: + # GH#52080 see if we can losslessly cast to shared unit + other = other.as_unit(self.unit, round_ok=False) + except ValueError: + other_arr = np.array(other.asm8) + return compare_mismatched_resolutions( + self._ndarray, other_arr, op + ) + else: + other_arr = other._ndarray + return compare_mismatched_resolutions(self._ndarray, other_arr, op) + + other_vals = self._unbox(other) + # GH#37462 comparison on i8 values is almost 2x faster than M8/m8 + result = op(self._ndarray.view("i8"), other_vals.view("i8")) + + o_mask = isna(other) + mask = self._isnan | o_mask + if mask.any(): + nat_result = op is operator.ne + np.putmask(result, mask, nat_result) + + return result + + # pow is invalid for all three subclasses; TimedeltaArray will override + # the multiplication and division ops + __pow__ = _make_unpacked_invalid_op("__pow__") + __rpow__ = _make_unpacked_invalid_op("__rpow__") + __mul__ = _make_unpacked_invalid_op("__mul__") + __rmul__ = _make_unpacked_invalid_op("__rmul__") + __truediv__ = _make_unpacked_invalid_op("__truediv__") + __rtruediv__ = _make_unpacked_invalid_op("__rtruediv__") + __floordiv__ = _make_unpacked_invalid_op("__floordiv__") + __rfloordiv__ = _make_unpacked_invalid_op("__rfloordiv__") + __mod__ = _make_unpacked_invalid_op("__mod__") + __rmod__ = _make_unpacked_invalid_op("__rmod__") + __divmod__ = _make_unpacked_invalid_op("__divmod__") + __rdivmod__ = _make_unpacked_invalid_op("__rdivmod__") + + @final + def _get_i8_values_and_mask( + self, other + ) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]: + """ + Get the int64 values and b_mask to pass to checked_add_with_arr. + """ + if isinstance(other, Period): + i8values = other.ordinal + mask = None + elif isinstance(other, (Timestamp, Timedelta)): + i8values = other._value + mask = None + else: + # PeriodArray, DatetimeArray, TimedeltaArray + mask = other._isnan + i8values = other.asi8 + return i8values, mask + + @final + def _get_arithmetic_result_freq(self, other) -> BaseOffset | None: + """ + Check if we can preserve self.freq in addition or subtraction. + """ + # Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving + # whenever self.freq is a Tick + if isinstance(self.dtype, PeriodDtype): + return self.freq + elif not lib.is_scalar(other): + return None + elif isinstance(self.freq, Tick): + # In these cases + return self.freq + return None + + @final + def _add_datetimelike_scalar(self, other) -> DatetimeArray: + if not lib.is_np_dtype(self.dtype, "m"): + raise TypeError( + f"cannot add {type(self).__name__} and {type(other).__name__}" + ) + + self = cast("TimedeltaArray", self) + + from pandas.core.arrays import DatetimeArray + from pandas.core.arrays.datetimes import tz_to_dtype + + assert other is not NaT + if isna(other): + # i.e. np.datetime64("NaT") + # In this case we specifically interpret NaT as a datetime, not + # the timedelta interpretation we would get by returning self + NaT + result = self._ndarray + NaT.to_datetime64().astype(f"M8[{self.unit}]") + # Preserve our resolution + return DatetimeArray._simple_new(result, dtype=result.dtype) + + other = Timestamp(other) + self, other = self._ensure_matching_resos(other) + self = cast("TimedeltaArray", self) + + other_i8, o_mask = self._get_i8_values_and_mask(other) + result = checked_add_with_arr( + self.asi8, other_i8, arr_mask=self._isnan, b_mask=o_mask + ) + res_values = result.view(f"M8[{self.unit}]") + + dtype = tz_to_dtype(tz=other.tz, unit=self.unit) + res_values = result.view(f"M8[{self.unit}]") + new_freq = self._get_arithmetic_result_freq(other) + return DatetimeArray._simple_new(res_values, dtype=dtype, freq=new_freq) + + @final + def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray: + if not lib.is_np_dtype(self.dtype, "m"): + raise TypeError( + f"cannot add {type(self).__name__} and {type(other).__name__}" + ) + + # defer to DatetimeArray.__add__ + return other + self + + @final + def _sub_datetimelike_scalar( + self, other: datetime | np.datetime64 + ) -> TimedeltaArray: + if self.dtype.kind != "M": + raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}") + + self = cast("DatetimeArray", self) + # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] + + if isna(other): + # i.e. np.datetime64("NaT") + return self - NaT + + ts = Timestamp(other) + + self, ts = self._ensure_matching_resos(ts) + return self._sub_datetimelike(ts) + + @final + def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray: + if self.dtype.kind != "M": + raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}") + + if len(self) != len(other): + raise ValueError("cannot add indices of unequal length") + + self = cast("DatetimeArray", self) + + self, other = self._ensure_matching_resos(other) + return self._sub_datetimelike(other) + + @final + def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray: + self = cast("DatetimeArray", self) + + from pandas.core.arrays import TimedeltaArray + + try: + self._assert_tzawareness_compat(other) + except TypeError as err: + new_message = str(err).replace("compare", "subtract") + raise type(err)(new_message) from err + + other_i8, o_mask = self._get_i8_values_and_mask(other) + res_values = checked_add_with_arr( + self.asi8, -other_i8, arr_mask=self._isnan, b_mask=o_mask + ) + res_m8 = res_values.view(f"timedelta64[{self.unit}]") + + new_freq = self._get_arithmetic_result_freq(other) + new_freq = cast("Tick | None", new_freq) + return TimedeltaArray._simple_new(res_m8, dtype=res_m8.dtype, freq=new_freq) + + @final + def _add_period(self, other: Period) -> PeriodArray: + if not lib.is_np_dtype(self.dtype, "m"): + raise TypeError(f"cannot add Period to a {type(self).__name__}") + + # We will wrap in a PeriodArray and defer to the reversed operation + from pandas.core.arrays.period import PeriodArray + + i8vals = np.broadcast_to(other.ordinal, self.shape) + dtype = PeriodDtype(other.freq) + parr = PeriodArray(i8vals, dtype=dtype) + return parr + self + + def _add_offset(self, offset): + raise AbstractMethodError(self) + + def _add_timedeltalike_scalar(self, other): + """ + Add a delta of a timedeltalike + + Returns + ------- + Same type as self + """ + if isna(other): + # i.e np.timedelta64("NaT") + new_values = np.empty(self.shape, dtype="i8").view(self._ndarray.dtype) + new_values.fill(iNaT) + return type(self)._simple_new(new_values, dtype=self.dtype) + + # PeriodArray overrides, so we only get here with DTA/TDA + self = cast("DatetimeArray | TimedeltaArray", self) + other = Timedelta(other) + self, other = self._ensure_matching_resos(other) + return self._add_timedeltalike(other) + + def _add_timedelta_arraylike(self, other: TimedeltaArray): + """ + Add a delta of a TimedeltaIndex + + Returns + ------- + Same type as self + """ + # overridden by PeriodArray + + if len(self) != len(other): + raise ValueError("cannot add indices of unequal length") + + self = cast("DatetimeArray | TimedeltaArray", self) + + self, other = self._ensure_matching_resos(other) + return self._add_timedeltalike(other) + + @final + def _add_timedeltalike(self, other: Timedelta | TimedeltaArray): + self = cast("DatetimeArray | TimedeltaArray", self) + + other_i8, o_mask = self._get_i8_values_and_mask(other) + new_values = checked_add_with_arr( + self.asi8, other_i8, arr_mask=self._isnan, b_mask=o_mask + ) + res_values = new_values.view(self._ndarray.dtype) + + new_freq = self._get_arithmetic_result_freq(other) + + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[dtype[datetime64], DatetimeTZDtype, + # dtype[timedelta64]]"; expected "Union[dtype[datetime64], DatetimeTZDtype]" + return type(self)._simple_new( + res_values, dtype=self.dtype, freq=new_freq # type: ignore[arg-type] + ) + + @final + def _add_nat(self): + """ + Add pd.NaT to self + """ + if isinstance(self.dtype, PeriodDtype): + raise TypeError( + f"Cannot add {type(self).__name__} and {type(NaT).__name__}" + ) + self = cast("TimedeltaArray | DatetimeArray", self) + + # GH#19124 pd.NaT is treated like a timedelta for both timedelta + # and datetime dtypes + result = np.empty(self.shape, dtype=np.int64) + result.fill(iNaT) + result = result.view(self._ndarray.dtype) # preserve reso + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[dtype[timedelta64], dtype[datetime64], + # DatetimeTZDtype]"; expected "Union[dtype[datetime64], DatetimeTZDtype]" + return type(self)._simple_new( + result, dtype=self.dtype, freq=None # type: ignore[arg-type] + ) + + @final + def _sub_nat(self): + """ + Subtract pd.NaT from self + """ + # GH#19124 Timedelta - datetime is not in general well-defined. + # We make an exception for pd.NaT, which in this case quacks + # like a timedelta. + # For datetime64 dtypes by convention we treat NaT as a datetime, so + # this subtraction returns a timedelta64 dtype. + # For period dtype, timedelta64 is a close-enough return dtype. + result = np.empty(self.shape, dtype=np.int64) + result.fill(iNaT) + if self.dtype.kind in "mM": + # We can retain unit in dtype + self = cast("DatetimeArray| TimedeltaArray", self) + return result.view(f"timedelta64[{self.unit}]") + else: + return result.view("timedelta64[ns]") + + @final + def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]: + # If the operation is well-defined, we return an object-dtype ndarray + # of DateOffsets. Null entries are filled with pd.NaT + if not isinstance(self.dtype, PeriodDtype): + raise TypeError( + f"cannot subtract {type(other).__name__} from {type(self).__name__}" + ) + + self = cast("PeriodArray", self) + self._check_compatible_with(other) + + other_i8, o_mask = self._get_i8_values_and_mask(other) + new_i8_data = checked_add_with_arr( + self.asi8, -other_i8, arr_mask=self._isnan, b_mask=o_mask + ) + new_data = np.array([self.freq.base * x for x in new_i8_data]) + + if o_mask is None: + # i.e. Period scalar + mask = self._isnan + else: + # i.e. PeriodArray + mask = self._isnan | o_mask + new_data[mask] = NaT + return new_data + + @final + def _addsub_object_array(self, other: npt.NDArray[np.object_], op): + """ + Add or subtract array-like of DateOffset objects + + Parameters + ---------- + other : np.ndarray[object] + op : {operator.add, operator.sub} + + Returns + ------- + np.ndarray[object] + Except in fastpath case with length 1 where we operate on the + contained scalar. + """ + assert op in [operator.add, operator.sub] + if len(other) == 1 and self.ndim == 1: + # Note: without this special case, we could annotate return type + # as ndarray[object] + # If both 1D then broadcasting is unambiguous + return op(self, other[0]) + + warnings.warn( + "Adding/subtracting object-dtype array to " + f"{type(self).__name__} not vectorized.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + # Caller is responsible for broadcasting if necessary + assert self.shape == other.shape, (self.shape, other.shape) + + res_values = op(self.astype("O"), np.asarray(other)) + return res_values + + def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> Self: + if name not in {"cummin", "cummax"}: + raise TypeError(f"Accumulation {name} not supported for {type(self)}") + + op = getattr(datetimelike_accumulations, name) + result = op(self.copy(), skipna=skipna, **kwargs) + + return type(self)._simple_new(result, dtype=self.dtype) + + @unpack_zerodim_and_defer("__add__") + def __add__(self, other): + other_dtype = getattr(other, "dtype", None) + other = ensure_wrapped_if_datetimelike(other) + + # scalar others + if other is NaT: + result = self._add_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_timedeltalike_scalar(other) + elif isinstance(other, BaseOffset): + # specifically _not_ a Tick + result = self._add_offset(other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._add_datetimelike_scalar(other) + elif isinstance(other, Period) and lib.is_np_dtype(self.dtype, "m"): + result = self._add_period(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add) + + # array-like others + elif lib.is_np_dtype(other_dtype, "m"): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_timedelta_arraylike(other) + elif is_object_dtype(other_dtype): + # e.g. Array/Index of DateOffset objects + result = self._addsub_object_array(other, operator.add) + elif lib.is_np_dtype(other_dtype, "M") or isinstance( + other_dtype, DatetimeTZDtype + ): + # DatetimeIndex, ndarray[datetime64] + return self._add_datetime_arraylike(other) + elif is_integer_dtype(other_dtype): + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add) + else: + # Includes Categorical, other ExtensionArrays + # For PeriodDtype, if self is a TimedeltaArray and other is a + # PeriodArray with a timedelta-like (i.e. Tick) freq, this + # operation is valid. Defer to the PeriodArray implementation. + # In remaining cases, this will end up raising TypeError. + return NotImplemented + + if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray(result) + return result + + def __radd__(self, other): + # alias for __add__ + return self.__add__(other) + + @unpack_zerodim_and_defer("__sub__") + def __sub__(self, other): + other_dtype = getattr(other, "dtype", None) + other = ensure_wrapped_if_datetimelike(other) + + # scalar others + if other is NaT: + result = self._sub_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_timedeltalike_scalar(-other) + elif isinstance(other, BaseOffset): + # specifically _not_ a Tick + result = self._add_offset(-other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._sub_datetimelike_scalar(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub) + + elif isinstance(other, Period): + result = self._sub_periodlike(other) + + # array-like others + elif lib.is_np_dtype(other_dtype, "m"): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_timedelta_arraylike(-other) + elif is_object_dtype(other_dtype): + # e.g. Array/Index of DateOffset objects + result = self._addsub_object_array(other, operator.sub) + elif lib.is_np_dtype(other_dtype, "M") or isinstance( + other_dtype, DatetimeTZDtype + ): + # DatetimeIndex, ndarray[datetime64] + result = self._sub_datetime_arraylike(other) + elif isinstance(other_dtype, PeriodDtype): + # PeriodIndex + result = self._sub_periodlike(other) + elif is_integer_dtype(other_dtype): + if not isinstance(self.dtype, PeriodDtype): + raise integer_op_not_supported(self) + obj = cast("PeriodArray", self) + result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub) + else: + # Includes ExtensionArrays, float_dtype + return NotImplemented + + if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, "m"): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray(result) + return result + + def __rsub__(self, other): + other_dtype = getattr(other, "dtype", None) + other_is_dt64 = lib.is_np_dtype(other_dtype, "M") or isinstance( + other_dtype, DatetimeTZDtype + ) + + if other_is_dt64 and lib.is_np_dtype(self.dtype, "m"): + # ndarray[datetime64] cannot be subtracted from self, so + # we need to wrap in DatetimeArray/Index and flip the operation + if lib.is_scalar(other): + # i.e. np.datetime64 object + return Timestamp(other) - self + if not isinstance(other, DatetimeLikeArrayMixin): + # Avoid down-casting DatetimeIndex + from pandas.core.arrays import DatetimeArray + + other = DatetimeArray(other) + return other - self + elif self.dtype.kind == "M" and hasattr(other, "dtype") and not other_is_dt64: + # GH#19959 datetime - datetime is well-defined as timedelta, + # but any other type - datetime is not well-defined. + raise TypeError( + f"cannot subtract {type(self).__name__} from {type(other).__name__}" + ) + elif isinstance(self.dtype, PeriodDtype) and lib.is_np_dtype(other_dtype, "m"): + # TODO: Can we simplify/generalize these cases at all? + raise TypeError(f"cannot subtract {type(self).__name__} from {other.dtype}") + elif lib.is_np_dtype(self.dtype, "m"): + self = cast("TimedeltaArray", self) + return (-self) + other + + # We get here with e.g. datetime objects + return -(self - other) + + def __iadd__(self, other) -> Self: + result = self + other + self[:] = result[:] + + if not isinstance(self.dtype, PeriodDtype): + # restore freq, which is invalidated by setitem + self._freq = result.freq + return self + + def __isub__(self, other) -> Self: + result = self - other + self[:] = result[:] + + if not isinstance(self.dtype, PeriodDtype): + # restore freq, which is invalidated by setitem + self._freq = result.freq + return self + + # -------------------------------------------------------------- + # Reductions + + @_period_dispatch + def _quantile( + self, + qs: npt.NDArray[np.float64], + interpolation: str, + ) -> Self: + return super()._quantile(qs=qs, interpolation=interpolation) + + @_period_dispatch + def min(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs): + """ + Return the minimum value of the Array or minimum along + an axis. + + See Also + -------- + numpy.ndarray.min + Index.min : Return the minimum value in an Index. + Series.min : Return the minimum value in a Series. + """ + nv.validate_min((), kwargs) + nv.validate_minmax_axis(axis, self.ndim) + + result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + @_period_dispatch + def max(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs): + """ + Return the maximum value of the Array or maximum along + an axis. + + See Also + -------- + numpy.ndarray.max + Index.max : Return the maximum value in an Index. + Series.max : Return the maximum value in a Series. + """ + nv.validate_max((), kwargs) + nv.validate_minmax_axis(axis, self.ndim) + + result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0): + """ + Return the mean value of the Array. + + Parameters + ---------- + skipna : bool, default True + Whether to ignore any NaT elements. + axis : int, optional, default 0 + + Returns + ------- + scalar + Timestamp or Timedelta. + + See Also + -------- + numpy.ndarray.mean : Returns the average of array elements along a given axis. + Series.mean : Return the mean value in a Series. + + Notes + ----- + mean is only defined for Datetime and Timedelta dtypes, not for Period. + + Examples + -------- + For :class:`pandas.DatetimeIndex`: + + >>> idx = pd.date_range('2001-01-01 00:00', periods=3) + >>> idx + DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.mean() + Timestamp('2001-01-02 00:00:00') + + For :class:`pandas.TimedeltaIndex`: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D') + >>> tdelta_idx + TimedeltaIndex(['1 days', '2 days', '3 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.mean() + Timedelta('2 days 00:00:00') + """ + if isinstance(self.dtype, PeriodDtype): + # See discussion in GH#24757 + raise TypeError( + f"mean is not implemented for {type(self).__name__} since the " + "meaning is ambiguous. An alternative is " + "obj.to_timestamp(how='start').mean()" + ) + + result = nanops.nanmean( + self._ndarray, axis=axis, skipna=skipna, mask=self.isna() + ) + return self._wrap_reduction_result(axis, result) + + @_period_dispatch + def median(self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs): + nv.validate_median((), kwargs) + + if axis is not None and abs(axis) >= self.ndim: + raise ValueError("abs(axis) must be less than ndim") + + result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def _mode(self, dropna: bool = True): + mask = None + if dropna: + mask = self.isna() + + i8modes = algorithms.mode(self.view("i8"), mask=mask) + npmodes = i8modes.view(self._ndarray.dtype) + npmodes = cast(np.ndarray, npmodes) + return self._from_backing_data(npmodes) + + # ------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + dtype = self.dtype + if dtype.kind == "M": + # Adding/multiplying datetimes is not valid + if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]: + raise TypeError(f"datetime64 type does not support {how} operations") + if how in ["any", "all"]: + # GH#34479 + warnings.warn( + f"'{how}' with datetime64 dtypes is deprecated and will raise in a " + f"future version. Use (obj != pd.Timestamp(0)).{how}() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + elif isinstance(dtype, PeriodDtype): + # Adding/multiplying Periods is not valid + if how in ["sum", "prod", "cumsum", "cumprod", "var", "skew"]: + raise TypeError(f"Period type does not support {how} operations") + if how in ["any", "all"]: + # GH#34479 + warnings.warn( + f"'{how}' with PeriodDtype is deprecated and will raise in a " + f"future version. Use (obj != pd.Period(0, freq)).{how}() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + # timedeltas we can add but not multiply + if how in ["prod", "cumprod", "skew", "var"]: + raise TypeError(f"timedelta64 type does not support {how} operations") + + # All of the functions implemented here are ordinal, so we can + # operate on the tz-naive equivalents + npvalues = self._ndarray.view("M8[ns]") + + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + res_values = op._cython_op_ndim_compat( + npvalues, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=None, + **kwargs, + ) + + if op.how in op.cast_blocklist: + # i.e. how in ["rank"], since other cast_blocklist methods don't go + # through cython_operation + return res_values + + # We did a view to M8[ns] above, now we go the other direction + assert res_values.dtype == "M8[ns]" + if how in ["std", "sem"]: + from pandas.core.arrays import TimedeltaArray + + if isinstance(self.dtype, PeriodDtype): + raise TypeError("'std' and 'sem' are not valid for PeriodDtype") + self = cast("DatetimeArray | TimedeltaArray", self) + new_dtype = f"m8[{self.unit}]" + res_values = res_values.view(new_dtype) + return TimedeltaArray(res_values) + + res_values = res_values.view(self._ndarray.dtype) + return self._from_backing_data(res_values) + + +class DatelikeOps(DatetimeLikeArrayMixin): + """ + Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex. + """ + + @Substitution( + URL="https://docs.python.org/3/library/datetime.html" + "#strftime-and-strptime-behavior" + ) + def strftime(self, date_format: str) -> npt.NDArray[np.object_]: + """ + Convert to Index using specified date_format. + + Return an Index of formatted strings specified by date_format, which + supports the same string format as the python standard library. Details + of the string format can be found in `python string format + doc <%(URL)s>`__. + + Formats supported by the C `strftime` API but not by the python string format + doc (such as `"%%R"`, `"%%r"`) are not officially supported and should be + preferably replaced with their supported equivalents (such as `"%%H:%%M"`, + `"%%I:%%M:%%S %%p"`). + + Note that `PeriodIndex` support additional directives, detailed in + `Period.strftime`. + + Parameters + ---------- + date_format : str + Date format string (e.g. "%%Y-%%m-%%d"). + + Returns + ------- + ndarray[object] + NumPy ndarray of formatted strings. + + See Also + -------- + to_datetime : Convert the given argument to datetime. + DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. + DatetimeIndex.round : Round the DatetimeIndex to the specified freq. + DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. + Timestamp.strftime : Format a single Timestamp. + Period.strftime : Format a single Period. + + Examples + -------- + >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), + ... periods=3, freq='s') + >>> rng.strftime('%%B %%d, %%Y, %%r') + Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', + 'March 10, 2018, 09:00:02 AM'], + dtype='object') + """ + result = self._format_native_types(date_format=date_format, na_rep=np.nan) + return result.astype(object, copy=False) + + +_round_doc = """ + Perform {op} operation on the data to the specified `freq`. + + Parameters + ---------- + freq : str or Offset + The frequency level to {op} the index to. Must be a fixed + frequency like 'S' (second) not 'ME' (month end). See + :ref:`frequency aliases ` for + a list of possible `freq` values. + ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' + Only relevant for DatetimeIndex: + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False designates + a non-DST time (note that this flag is only applicable for + ambiguous times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous + times. + + nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. + + - 'shift_forward' will shift the nonexistent time forward to the + closest existing time + - 'shift_backward' will shift the nonexistent time backward to the + closest existing time + - 'NaT' will return NaT where there are nonexistent times + - timedelta objects will shift nonexistent times by the timedelta + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times. + + Returns + ------- + DatetimeIndex, TimedeltaIndex, or Series + Index of the same type for a DatetimeIndex or TimedeltaIndex, + or a Series with the same index for a Series. + + Raises + ------ + ValueError if the `freq` cannot be converted. + + Notes + ----- + If the timestamps have a timezone, {op}ing will take place relative to the + local ("wall") time and re-localized to the same timezone. When {op}ing + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + + Examples + -------- + **DatetimeIndex** + + >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min') + >>> rng + DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00', + '2018-01-01 12:01:00'], + dtype='datetime64[ns]', freq='T') + """ + +_round_example = """>>> rng.round('H') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.round("H") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.floor("2H", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.floor("2H", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + """ + +_floor_example = """>>> rng.floor('H') + DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.floor("H") + 0 2018-01-01 11:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.floor("2H", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.floor("2H", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + """ + +_ceil_example = """>>> rng.ceil('H') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 13:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.ceil("H") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 13:00:00 + dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.ceil("H", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.ceil("H", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + """ + + +class TimelikeOps(DatetimeLikeArrayMixin): + """ + Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. + """ + + _default_dtype: np.dtype + + def __init__( + self, values, dtype=None, freq=lib.no_default, copy: bool = False + ) -> None: + values = extract_array(values, extract_numpy=True) + if isinstance(values, IntegerArray): + values = values.to_numpy("int64", na_value=iNaT) + + inferred_freq = getattr(values, "_freq", None) + explicit_none = freq is None + freq = freq if freq is not lib.no_default else None + + if isinstance(values, type(self)): + if explicit_none: + # don't inherit from values + pass + elif freq is None: + freq = values.freq + elif freq and values.freq: + freq = to_offset(freq) + freq, _ = validate_inferred_freq(freq, values.freq, False) + + if dtype is not None: + dtype = pandas_dtype(dtype) + if dtype != values.dtype: + # TODO: we only have tests for this for DTA, not TDA (2022-07-01) + raise TypeError( + f"dtype={dtype} does not match data dtype {values.dtype}" + ) + + dtype = values.dtype + values = values._ndarray + + elif dtype is None: + if isinstance(values, np.ndarray) and values.dtype.kind in "Mm": + dtype = values.dtype + else: + dtype = self._default_dtype + + if not isinstance(values, np.ndarray): + raise ValueError( + f"Unexpected type '{type(values).__name__}'. 'values' must be a " + f"{type(self).__name__}, ndarray, or Series or Index " + "containing one of those." + ) + if values.ndim not in [1, 2]: + raise ValueError("Only 1-dimensional input arrays are supported.") + + if values.dtype == "i8": + # for compat with datetime/timedelta/period shared methods, + # we can sometimes get here with int64 values. These represent + # nanosecond UTC (or tz-naive) unix timestamps + values = values.view(self._default_dtype) + + dtype = self._validate_dtype(values, dtype) + + if freq == "infer": + raise ValueError( + f"Frequency inference not allowed in {type(self).__name__}.__init__. " + "Use 'pd.array()' instead." + ) + + if copy: + values = values.copy() + if freq: + freq = to_offset(freq) + if values.dtype.kind == "m" and not isinstance(freq, Tick): + raise TypeError("TimedeltaArray/Index freq must be a Tick") + + NDArrayBacked.__init__(self, values=values, dtype=dtype) + self._freq = freq + + if inferred_freq is None and freq is not None: + type(self)._validate_frequency(self, freq) + + @classmethod + def _validate_dtype(cls, values, dtype): + raise AbstractMethodError(cls) + + @property + def freq(self): + """ + Return the frequency object if it is set, otherwise None. + """ + return self._freq + + @freq.setter + def freq(self, value) -> None: + if value is not None: + value = to_offset(value) + self._validate_frequency(self, value) + if self.dtype.kind == "m" and not isinstance(value, Tick): + raise TypeError("TimedeltaArray/Index freq must be a Tick") + + if self.ndim > 1: + raise ValueError("Cannot set freq with ndim > 1") + + self._freq = value + + @classmethod + def _validate_frequency(cls, index, freq, **kwargs): + """ + Validate that a frequency is compatible with the values of a given + Datetime Array/Index or Timedelta Array/Index + + Parameters + ---------- + index : DatetimeIndex or TimedeltaIndex + The index on which to determine if the given frequency is valid + freq : DateOffset + The frequency to validate + """ + inferred = index.inferred_freq + if index.size == 0 or inferred == freq.freqstr: + return None + + try: + on_freq = cls._generate_range( + start=index[0], + end=None, + periods=len(index), + freq=freq, + unit=index.unit, + **kwargs, + ) + if not np.array_equal(index.asi8, on_freq.asi8): + raise ValueError + except ValueError as err: + if "non-fixed" in str(err): + # non-fixed frequencies are not meaningful for timedelta64; + # we retain that error message + raise err + # GH#11587 the main way this is reached is if the `np.array_equal` + # check above is False. This can also be reached if index[0] + # is `NaT`, in which case the call to `cls._generate_range` will + # raise a ValueError, which we re-raise with a more targeted + # message. + raise ValueError( + f"Inferred frequency {inferred} from passed values " + f"does not conform to passed frequency {freq.freqstr}" + ) from err + + @classmethod + def _generate_range(cls, start, end, periods, freq, *args, **kwargs) -> Self: + raise AbstractMethodError(cls) + + # -------------------------------------------------------------- + + @cache_readonly + def _creso(self) -> int: + return get_unit_from_dtype(self._ndarray.dtype) + + @cache_readonly + def unit(self) -> str: + # e.g. "ns", "us", "ms" + # error: Argument 1 to "dtype_to_unit" has incompatible type + # "ExtensionDtype"; expected "Union[DatetimeTZDtype, dtype[Any]]" + return dtype_to_unit(self.dtype) # type: ignore[arg-type] + + def as_unit(self, unit: str) -> Self: + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("Supported units are 's', 'ms', 'us', 'ns'") + + dtype = np.dtype(f"{self.dtype.kind}8[{unit}]") + new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=True) + + if isinstance(self.dtype, np.dtype): + new_dtype = new_values.dtype + else: + tz = cast("DatetimeArray", self).tz + new_dtype = DatetimeTZDtype(tz=tz, unit=unit) + + # error: Unexpected keyword argument "freq" for "_simple_new" of + # "NDArrayBacked" [call-arg] + return type(self)._simple_new( + new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg] + ) + + # TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta + # with the return type matching input type. TypeVar? + def _ensure_matching_resos(self, other): + if self._creso != other._creso: + # Just as with Timestamp/Timedelta, we cast to the higher resolution + if self._creso < other._creso: + self = self.as_unit(other.unit) + else: + other = other.as_unit(self.unit) + return self, other + + # -------------------------------------------------------------- + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + if ( + ufunc in [np.isnan, np.isinf, np.isfinite] + and len(inputs) == 1 + and inputs[0] is self + ): + # numpy 1.18 changed isinf and isnan to not raise on dt64/td64 + return getattr(ufunc, method)(self._ndarray, **kwargs) + + return super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + + def _round(self, freq, mode, ambiguous, nonexistent): + # round the local times + if isinstance(self.dtype, DatetimeTZDtype): + # operate on naive timestamps, then convert back to aware + self = cast("DatetimeArray", self) + naive = self.tz_localize(None) + result = naive._round(freq, mode, ambiguous, nonexistent) + return result.tz_localize( + self.tz, ambiguous=ambiguous, nonexistent=nonexistent + ) + + values = self.view("i8") + values = cast(np.ndarray, values) + offset = to_offset(freq) + offset.nanos # raises on non-fixed frequencies + nanos = delta_to_nanoseconds(offset, self._creso) + if nanos == 0: + # GH 52761 + return self.copy() + result_i8 = round_nsint64(values, mode, nanos) + result = self._maybe_mask_results(result_i8, fill_value=iNaT) + result = result.view(self._ndarray.dtype) + return self._simple_new(result, dtype=self.dtype) + + @Appender((_round_doc + _round_example).format(op="round")) + def round( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent) + + @Appender((_round_doc + _floor_example).format(op="floor")) + def floor( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) + + @Appender((_round_doc + _ceil_example).format(op="ceil")) + def ceil( + self, + freq, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) + + # -------------------------------------------------------------- + # Reductions + + def any(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool: + # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype + return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) + + def all(self, *, axis: AxisInt | None = None, skipna: bool = True) -> bool: + # GH#34479 the nanops call will issue a FutureWarning for non-td64 dtype + + return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) + + # -------------------------------------------------------------- + # Frequency Methods + + def _maybe_clear_freq(self) -> None: + self._freq = None + + def _with_freq(self, freq) -> Self: + """ + Helper to get a view on the same data, with a new freq. + + Parameters + ---------- + freq : DateOffset, None, or "infer" + + Returns + ------- + Same type as self + """ + # GH#29843 + if freq is None: + # Always valid + pass + elif len(self) == 0 and isinstance(freq, BaseOffset): + # Always valid. In the TimedeltaArray case, we require a Tick offset + if self.dtype.kind == "m" and not isinstance(freq, Tick): + raise TypeError("TimedeltaArray/Index freq must be a Tick") + else: + # As an internal method, we can ensure this assertion always holds + assert freq == "infer" + freq = to_offset(self.inferred_freq) + + arr = self.view() + arr._freq = freq + return arr + + # -------------------------------------------------------------- + # ExtensionArray Interface + + def _values_for_json(self) -> np.ndarray: + # Small performance bump vs the base class which calls np.asarray(self) + if isinstance(self.dtype, np.dtype): + return self._ndarray + return super()._values_for_json() + + def factorize( + self, + use_na_sentinel: bool = True, + sort: bool = False, + ): + if self.freq is not None: + # We must be unique, so can short-circuit (and retain freq) + codes = np.arange(len(self), dtype=np.intp) + uniques = self.copy() # TODO: copy or view? + if sort and self.freq.n < 0: + codes = codes[::-1] + uniques = uniques[::-1] + return codes, uniques + + if sort: + # algorithms.factorize only passes sort=True here when freq is + # not None, so this should not be reached. + raise NotImplementedError( + f"The 'sort' keyword in {type(self).__name__}.factorize is " + "ignored unless arr.freq is not None. To factorize with sort, " + "call pd.factorize(obj, sort=True) instead." + ) + return super().factorize(use_na_sentinel=use_na_sentinel) + + @classmethod + def _concat_same_type( + cls, + to_concat: Sequence[Self], + axis: AxisInt = 0, + ) -> Self: + new_obj = super()._concat_same_type(to_concat, axis) + + obj = to_concat[0] + + if axis == 0: + # GH 3232: If the concat result is evenly spaced, we can retain the + # original frequency + to_concat = [x for x in to_concat if len(x)] + + if obj.freq is not None and all(x.freq == obj.freq for x in to_concat): + pairs = zip(to_concat[:-1], to_concat[1:]) + if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs): + new_freq = obj.freq + new_obj._freq = new_freq + return new_obj + + def copy(self, order: str = "C") -> Self: + # error: Unexpected keyword argument "order" for "copy" + new_obj = super().copy(order=order) # type: ignore[call-arg] + new_obj._freq = self.freq + return new_obj + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index: Index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> Self: + """ + See NDFrame.interpolate.__doc__. + """ + # NB: we return type(self) even if copy=False + if method != "linear": + raise NotImplementedError + + if not copy: + out_data = self._ndarray + else: + out_data = self._ndarray.copy() + + missing.interpolate_2d_inplace( + out_data, + method=method, + axis=axis, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + **kwargs, + ) + if not copy: + return self + return type(self)._simple_new(out_data, dtype=self.dtype) + + +# ------------------------------------------------------------------- +# Shared Constructor Helpers + + +def ensure_arraylike_for_datetimelike(data, copy: bool, cls_name: str): + if not hasattr(data, "dtype"): + # e.g. list, tuple + if not isinstance(data, (list, tuple)) and np.ndim(data) == 0: + # i.e. generator + data = list(data) + data = np.asarray(data) + copy = False + elif isinstance(data, ABCMultiIndex): + raise TypeError(f"Cannot create a {cls_name} from a MultiIndex.") + else: + data = extract_array(data, extract_numpy=True) + + if isinstance(data, IntegerArray) or ( + isinstance(data, ArrowExtensionArray) and data.dtype.kind in "iu" + ): + data = data.to_numpy("int64", na_value=iNaT) + copy = False + elif isinstance(data, ArrowExtensionArray): + data = data._maybe_convert_datelike_array() + data = data.to_numpy() + copy = False + elif not isinstance(data, (np.ndarray, ExtensionArray)): + # GH#24539 e.g. xarray, dask object + data = np.asarray(data) + + elif isinstance(data, ABCCategorical): + # GH#18664 preserve tz in going DTI->Categorical->DTI + # TODO: cases where we need to do another pass through maybe_convert_dtype, + # e.g. the categories are timedelta64s + data = data.categories.take(data.codes, fill_value=NaT)._values + copy = False + + return data, copy + + +@overload +def validate_periods(periods: None) -> None: + ... + + +@overload +def validate_periods(periods: int | float) -> int: + ... + + +def validate_periods(periods: int | float | None) -> int | None: + """ + If a `periods` argument is passed to the Datetime/Timedelta Array/Index + constructor, cast it to an integer. + + Parameters + ---------- + periods : None, float, int + + Returns + ------- + periods : None or int + + Raises + ------ + TypeError + if periods is None, float, or int + """ + if periods is not None: + if lib.is_float(periods): + periods = int(periods) + elif not lib.is_integer(periods): + raise TypeError(f"periods must be a number, got {periods}") + return periods + + +def validate_inferred_freq( + freq, inferred_freq, freq_infer +) -> tuple[BaseOffset | None, bool]: + """ + If the user passes a freq and another freq is inferred from passed data, + require that they match. + + Parameters + ---------- + freq : DateOffset or None + inferred_freq : DateOffset or None + freq_infer : bool + + Returns + ------- + freq : DateOffset or None + freq_infer : bool + + Notes + ----- + We assume at this point that `maybe_infer_freq` has been called, so + `freq` is either a DateOffset object or None. + """ + if inferred_freq is not None: + if freq is not None and freq != inferred_freq: + raise ValueError( + f"Inferred frequency {inferred_freq} from passed " + "values does not conform to passed frequency " + f"{freq.freqstr}" + ) + if freq is None: + freq = inferred_freq + freq_infer = False + + return freq, freq_infer + + +def maybe_infer_freq(freq): + """ + Comparing a DateOffset to the string "infer" raises, so we need to + be careful about comparisons. Make a dummy variable `freq_infer` to + signify the case where the given freq is "infer" and set freq to None + to avoid comparison trouble later on. + + Parameters + ---------- + freq : {DateOffset, None, str} + + Returns + ------- + freq : {DateOffset, None} + freq_infer : bool + Whether we should inherit the freq of passed data. + """ + freq_infer = False + if not isinstance(freq, BaseOffset): + # if a passed freq is None, don't infer automatically + if freq != "infer": + freq = to_offset(freq) + else: + freq_infer = True + freq = None + return freq, freq_infer + + +def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype) -> str: + """ + Return the unit str corresponding to the dtype's resolution. + + Parameters + ---------- + dtype : DatetimeTZDtype or np.dtype + If np.dtype, we assume it is a datetime64 dtype. + + Returns + ------- + str + """ + if isinstance(dtype, DatetimeTZDtype): + return dtype.unit + return np.datetime_data(dtype)[0] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimes.py new file mode 100644 index 00000000..8ad51e4a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/datetimes.py @@ -0,0 +1,2782 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, + tzinfo, +) +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + tslib, +) +from pandas._libs.tslibs import ( + BaseOffset, + NaT, + NaTType, + Resolution, + Timestamp, + astype_overflowsafe, + fields, + get_resolution, + get_supported_reso, + get_unit_from_dtype, + ints_to_pydatetime, + is_date_array_normalized, + is_supported_unit, + is_unitless, + normalize_i8_timestamps, + npy_unit_to_abbrev, + timezones, + to_offset, + tz_convert_from_utc, + tzconversion, +) +from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_inclusive + +from pandas.core.dtypes.common import ( + DT64NS_DTYPE, + INT64_DTYPE, + is_bool_dtype, + is_float_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.arrays import datetimelike as dtl +from pandas.core.arrays._ranges import generate_regular_range +import pandas.core.common as com + +from pandas.tseries.frequencies import get_period_alias +from pandas.tseries.offsets import ( + Day, + Tick, +) + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas._typing import ( + DateTimeErrorChoices, + IntervalClosedType, + Self, + TimeAmbiguous, + TimeNonexistent, + npt, + ) + + from pandas import DataFrame + from pandas.core.arrays import PeriodArray + + +def tz_to_dtype( + tz: tzinfo | None, unit: str = "ns" +) -> np.dtype[np.datetime64] | DatetimeTZDtype: + """ + Return a datetime64[ns] dtype appropriate for the given timezone. + + Parameters + ---------- + tz : tzinfo or None + unit : str, default "ns" + + Returns + ------- + np.dtype or Datetime64TZDType + """ + if tz is None: + return np.dtype(f"M8[{unit}]") + else: + return DatetimeTZDtype(tz=tz, unit=unit) + + +def _field_accessor(name: str, field: str, docstring: str | None = None): + def f(self): + values = self._local_timestamps() + + if field in self._bool_ops: + result: np.ndarray + + if field.endswith(("start", "end")): + freq = self.freq + month_kw = 12 + if freq: + kwds = freq.kwds + month_kw = kwds.get("startingMonth", kwds.get("month", 12)) + + result = fields.get_start_end_field( + values, field, self.freqstr, month_kw, reso=self._creso + ) + else: + result = fields.get_date_field(values, field, reso=self._creso) + + # these return a boolean by-definition + return result + + if field in self._object_ops: + result = fields.get_date_name_field(values, field, reso=self._creso) + result = self._maybe_mask_results(result, fill_value=None) + + else: + result = fields.get_date_field(values, field, reso=self._creso) + result = self._maybe_mask_results( + result, fill_value=None, convert="float64" + ) + + return result + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): # type: ignore[misc] + """ + Pandas ExtensionArray for tz-naive or tz-aware datetime data. + + .. warning:: + + DatetimeArray is currently experimental, and its API may change + without warning. In particular, :attr:`DatetimeArray.dtype` is + expected to change to always be an instance of an ``ExtensionDtype`` + subclass. + + Parameters + ---------- + values : Series, Index, DatetimeArray, ndarray + The datetime data. + + For DatetimeArray `values` (or a Series or Index boxing one), + `dtype` and `freq` will be extracted from `values`. + + dtype : numpy.dtype or DatetimeTZDtype + Note that the only NumPy dtype allowed is 'datetime64[ns]'. + freq : str or Offset, optional + The frequency. + copy : bool, default False + Whether to copy the underlying array of values. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.arrays.DatetimeArray(pd.DatetimeIndex(['2023-01-01', '2023-01-02']), + ... freq='D') + + ['2023-01-01 00:00:00', '2023-01-02 00:00:00'] + Length: 2, dtype: datetime64[ns] + """ + + _typ = "datetimearray" + _internal_fill_value = np.datetime64("NaT", "ns") + _recognized_scalars = (datetime, np.datetime64) + _is_recognized_dtype = lambda x: lib.is_np_dtype(x, "M") or isinstance( + x, DatetimeTZDtype + ) + _infer_matches = ("datetime", "datetime64", "date") + + @property + def _scalar_type(self) -> type[Timestamp]: + return Timestamp + + # define my properties & methods for delegation + _bool_ops: list[str] = [ + "is_month_start", + "is_month_end", + "is_quarter_start", + "is_quarter_end", + "is_year_start", + "is_year_end", + "is_leap_year", + ] + _object_ops: list[str] = ["freq", "tz"] + _field_ops: list[str] = [ + "year", + "month", + "day", + "hour", + "minute", + "second", + "weekday", + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "days_in_month", + "daysinmonth", + "microsecond", + "nanosecond", + ] + _other_ops: list[str] = ["date", "time", "timetz"] + _datetimelike_ops: list[str] = ( + _field_ops + _object_ops + _bool_ops + _other_ops + ["unit"] + ) + _datetimelike_methods: list[str] = [ + "to_period", + "tz_localize", + "tz_convert", + "normalize", + "strftime", + "round", + "floor", + "ceil", + "month_name", + "day_name", + "as_unit", + ] + + # ndim is inherited from ExtensionArray, must exist to ensure + # Timestamp.__richcmp__(DateTimeArray) operates pointwise + + # ensure that operations with numpy arrays defer to our implementation + __array_priority__ = 1000 + + # ----------------------------------------------------------------- + # Constructors + + _dtype: np.dtype[np.datetime64] | DatetimeTZDtype + _freq: BaseOffset | None = None + _default_dtype = DT64NS_DTYPE # used in TimeLikeOps.__init__ + + @classmethod + def _validate_dtype(cls, values, dtype): + # used in TimeLikeOps.__init__ + _validate_dt64_dtype(values.dtype) + dtype = _validate_dt64_dtype(dtype) + return dtype + + # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" + @classmethod + def _simple_new( # type: ignore[override] + cls, + values: npt.NDArray[np.datetime64], + freq: BaseOffset | None = None, + dtype: np.dtype[np.datetime64] | DatetimeTZDtype = DT64NS_DTYPE, + ) -> Self: + assert isinstance(values, np.ndarray) + assert dtype.kind == "M" + if isinstance(dtype, np.dtype): + assert dtype == values.dtype + assert not is_unitless(dtype) + else: + # DatetimeTZDtype. If we have e.g. DatetimeTZDtype[us, UTC], + # then values.dtype should be M8[us]. + assert dtype._creso == get_unit_from_dtype(values.dtype) + + result = super()._simple_new(values, dtype) + result._freq = freq + return result + + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False): + return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy) + + @classmethod + def _from_sequence_not_strict( + cls, + data, + *, + dtype=None, + copy: bool = False, + tz=lib.no_default, + freq: str | BaseOffset | lib.NoDefault | None = lib.no_default, + dayfirst: bool = False, + yearfirst: bool = False, + ambiguous: TimeAmbiguous = "raise", + ): + """ + A non-strict version of _from_sequence, called from DatetimeIndex.__new__. + """ + explicit_none = freq is None + freq = freq if freq is not lib.no_default else None + freq, freq_infer = dtl.maybe_infer_freq(freq) + + # if the user either explicitly passes tz=None or a tz-naive dtype, we + # disallows inferring a tz. + explicit_tz_none = tz is None + if tz is lib.no_default: + tz = None + else: + tz = timezones.maybe_get_tz(tz) + + dtype = _validate_dt64_dtype(dtype) + # if dtype has an embedded tz, capture it + tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none) + + unit = None + if dtype is not None: + if isinstance(dtype, np.dtype): + unit = np.datetime_data(dtype)[0] + else: + # DatetimeTZDtype + unit = dtype.unit + + subarr, tz, inferred_freq = _sequence_to_dt64ns( + data, + copy=copy, + tz=tz, + dayfirst=dayfirst, + yearfirst=yearfirst, + ambiguous=ambiguous, + out_unit=unit, + ) + # We have to call this again after possibly inferring a tz above + _validate_tz_from_dtype(dtype, tz, explicit_tz_none) + if tz is not None and explicit_tz_none: + raise ValueError( + "Passed data is timezone-aware, incompatible with 'tz=None'. " + "Use obj.tz_localize(None) instead." + ) + + freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer) + if explicit_none: + freq = None + + data_unit = np.datetime_data(subarr.dtype)[0] + data_dtype = tz_to_dtype(tz, data_unit) + result = cls._simple_new(subarr, freq=freq, dtype=data_dtype) + if unit is not None and unit != result.unit: + # If unit was specified in user-passed dtype, cast to it here + result = result.as_unit(unit) + + if inferred_freq is None and freq is not None: + # this condition precludes `freq_infer` + cls._validate_frequency(result, freq, ambiguous=ambiguous) + + elif freq_infer: + # Set _freq directly to bypass duplicative _validate_frequency + # check. + result._freq = to_offset(result.inferred_freq) + + return result + + # error: Signature of "_generate_range" incompatible with supertype + # "DatetimeLikeArrayMixin" + @classmethod + def _generate_range( # type: ignore[override] + cls, + start, + end, + periods, + freq, + tz=None, + normalize: bool = False, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + inclusive: IntervalClosedType = "both", + *, + unit: str | None = None, + ) -> Self: + periods = dtl.validate_periods(periods) + if freq is None and any(x is None for x in [periods, start, end]): + raise ValueError("Must provide freq argument if no data is supplied") + + if com.count_not_none(start, end, periods, freq) != 3: + raise ValueError( + "Of the four parameters: start, end, periods, " + "and freq, exactly three must be specified" + ) + freq = to_offset(freq) + + if start is not None: + start = Timestamp(start) + + if end is not None: + end = Timestamp(end) + + if start is NaT or end is NaT: + raise ValueError("Neither `start` nor `end` can be NaT") + + if unit is not None: + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'") + else: + unit = "ns" + + if start is not None and unit is not None: + start = start.as_unit(unit, round_ok=False) + if end is not None and unit is not None: + end = end.as_unit(unit, round_ok=False) + + left_inclusive, right_inclusive = validate_inclusive(inclusive) + start, end = _maybe_normalize_endpoints(start, end, normalize) + tz = _infer_tz_from_endpoints(start, end, tz) + + if tz is not None: + # Localize the start and end arguments + start_tz = None if start is None else start.tz + end_tz = None if end is None else end.tz + start = _maybe_localize_point( + start, start_tz, start, freq, tz, ambiguous, nonexistent + ) + end = _maybe_localize_point( + end, end_tz, end, freq, tz, ambiguous, nonexistent + ) + + if freq is not None: + # We break Day arithmetic (fixed 24 hour) here and opt for + # Day to mean calendar day (23/24/25 hour). Therefore, strip + # tz info from start and day to avoid DST arithmetic + if isinstance(freq, Day): + if start is not None: + start = start.tz_localize(None) + if end is not None: + end = end.tz_localize(None) + + if isinstance(freq, Tick): + i8values = generate_regular_range(start, end, periods, freq, unit=unit) + else: + xdr = _generate_range( + start=start, end=end, periods=periods, offset=freq, unit=unit + ) + i8values = np.array([x._value for x in xdr], dtype=np.int64) + + endpoint_tz = start.tz if start is not None else end.tz + + if tz is not None and endpoint_tz is None: + if not timezones.is_utc(tz): + # short-circuit tz_localize_to_utc which would make + # an unnecessary copy with UTC but be a no-op. + creso = abbrev_to_npy_unit(unit) + i8values = tzconversion.tz_localize_to_utc( + i8values, + tz, + ambiguous=ambiguous, + nonexistent=nonexistent, + creso=creso, + ) + + # i8values is localized datetime64 array -> have to convert + # start/end as well to compare + if start is not None: + start = start.tz_localize(tz, ambiguous, nonexistent) + if end is not None: + end = end.tz_localize(tz, ambiguous, nonexistent) + else: + # Create a linearly spaced date_range in local time + # Nanosecond-granularity timestamps aren't always correctly + # representable with doubles, so we limit the range that we + # pass to np.linspace as much as possible + i8values = ( + np.linspace(0, end._value - start._value, periods, dtype="int64") + + start._value + ) + if i8values.dtype != "i8": + # 2022-01-09 I (brock) am not sure if it is possible for this + # to overflow and cast to e.g. f8, but if it does we need to cast + i8values = i8values.astype("i8") + + if start == end: + if not left_inclusive and not right_inclusive: + i8values = i8values[1:-1] + else: + start_i8 = Timestamp(start)._value + end_i8 = Timestamp(end)._value + if not left_inclusive or not right_inclusive: + if not left_inclusive and len(i8values) and i8values[0] == start_i8: + i8values = i8values[1:] + if not right_inclusive and len(i8values) and i8values[-1] == end_i8: + i8values = i8values[:-1] + + dt64_values = i8values.view(f"datetime64[{unit}]") + dtype = tz_to_dtype(tz, unit=unit) + return cls._simple_new(dt64_values, freq=freq, dtype=dtype) + + # ----------------------------------------------------------------- + # DatetimeLike Interface + + def _unbox_scalar(self, value) -> np.datetime64: + if not isinstance(value, self._scalar_type) and value is not NaT: + raise ValueError("'value' should be a Timestamp.") + self._check_compatible_with(value) + if value is NaT: + return np.datetime64(value._value, self.unit) + else: + return value.as_unit(self.unit).asm8 + + def _scalar_from_string(self, value) -> Timestamp | NaTType: + return Timestamp(value, tz=self.tz) + + def _check_compatible_with(self, other) -> None: + if other is NaT: + return + self._assert_tzawareness_compat(other) + + # ----------------------------------------------------------------- + # Descriptive Properties + + def _box_func(self, x: np.datetime64) -> Timestamp | NaTType: + # GH#42228 + value = x.view("i8") + ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz) + return ts + + @property + # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype" + # incompatible with return type "ExtensionDtype" in supertype + # "ExtensionArray" + def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: # type: ignore[override] # noqa: E501 + """ + The dtype for the DatetimeArray. + + .. warning:: + + A future version of pandas will change dtype to never be a + ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will + always be an instance of an ``ExtensionDtype`` subclass. + + Returns + ------- + numpy.dtype or DatetimeTZDtype + If the values are tz-naive, then ``np.dtype('datetime64[ns]')`` + is returned. + + If the values are tz-aware, then the ``DatetimeTZDtype`` + is returned. + """ + return self._dtype + + @property + def tz(self) -> tzinfo | None: + """ + Return the timezone. + + Returns + ------- + datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None + Returns None when the array is tz-naive. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.tz + datetime.timezone.utc + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.tz + datetime.timezone.utc + """ + # GH 18595 + return getattr(self.dtype, "tz", None) + + @tz.setter + def tz(self, value): + # GH 3746: Prevent localizing or converting the index by setting tz + raise AttributeError( + "Cannot directly set timezone. Use tz_localize() " + "or tz_convert() as appropriate" + ) + + @property + def tzinfo(self) -> tzinfo | None: + """ + Alias for tz attribute + """ + return self.tz + + @property # NB: override with cache_readonly in immutable subclasses + def is_normalized(self) -> bool: + """ + Returns True if all of the dates are at midnight ("no time") + """ + return is_date_array_normalized(self.asi8, self.tz, reso=self._creso) + + @property # NB: override with cache_readonly in immutable subclasses + def _resolution_obj(self) -> Resolution: + return get_resolution(self.asi8, self.tz, reso=self._creso) + + # ---------------------------------------------------------------- + # Array-Like / EA-Interface Methods + + def __array__(self, dtype=None) -> np.ndarray: + if dtype is None and self.tz: + # The default for tz-aware is object, to preserve tz info + dtype = object + + return super().__array__(dtype=dtype) + + def __iter__(self) -> Iterator: + """ + Return an iterator over the boxed values + + Yields + ------ + tstamp : Timestamp + """ + if self.ndim > 1: + for i in range(len(self)): + yield self[i] + else: + # convert in chunks of 10k for efficiency + data = self.asi8 + length = len(self) + chunksize = 10000 + chunks = (length // chunksize) + 1 + + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, length) + converted = ints_to_pydatetime( + data[start_i:end_i], + tz=self.tz, + box="timestamp", + reso=self._creso, + ) + yield from converted + + def astype(self, dtype, copy: bool = True): + # We handle + # --> datetime + # --> period + # DatetimeLikeArrayMixin Super handles the rest. + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + elif isinstance(dtype, ExtensionDtype): + if not isinstance(dtype, DatetimeTZDtype): + # e.g. Sparse[datetime64[ns]] + return super().astype(dtype, copy=copy) + elif self.tz is None: + # pre-2.0 this did self.tz_localize(dtype.tz), which did not match + # the Series behavior which did + # values.tz_localize("UTC").tz_convert(dtype.tz) + raise TypeError( + "Cannot use .astype to convert from timezone-naive dtype to " + "timezone-aware dtype. Use obj.tz_localize instead or " + "series.dt.tz_localize instead" + ) + else: + # tzaware unit conversion e.g. datetime64[s, UTC] + np_dtype = np.dtype(dtype.str) + res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy) + return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq) + + elif ( + self.tz is None + and lib.is_np_dtype(dtype, "M") + and not is_unitless(dtype) + and is_supported_unit(get_unit_from_dtype(dtype)) + ): + # unit conversion e.g. datetime64[s] + res_values = astype_overflowsafe(self._ndarray, dtype, copy=True) + return type(self)._simple_new(res_values, dtype=res_values.dtype) + # TODO: preserve freq? + + elif self.tz is not None and lib.is_np_dtype(dtype, "M"): + # pre-2.0 behavior for DTA/DTI was + # values.tz_convert("UTC").tz_localize(None), which did not match + # the Series behavior + raise TypeError( + "Cannot use .astype to convert from timezone-aware dtype to " + "timezone-naive dtype. Use obj.tz_localize(None) or " + "obj.tz_convert('UTC').tz_localize(None) instead." + ) + + elif ( + self.tz is None + and lib.is_np_dtype(dtype, "M") + and dtype != self.dtype + and is_unitless(dtype) + ): + raise TypeError( + "Casting to unit-less dtype 'datetime64' is not supported. " + "Pass e.g. 'datetime64[ns]' instead." + ) + + elif isinstance(dtype, PeriodDtype): + return self.to_period(freq=dtype.freq) + return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy) + + # ----------------------------------------------------------------- + # Rendering Methods + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None, **kwargs + ) -> npt.NDArray[np.object_]: + from pandas.io.formats.format import get_format_datetime64_from_values + + fmt = get_format_datetime64_from_values(self, date_format) + + return tslib.format_array_from_datetime( + self.asi8, tz=self.tz, format=fmt, na_rep=na_rep, reso=self._creso + ) + + # ----------------------------------------------------------------- + # Comparison Methods + + def _has_same_tz(self, other) -> bool: + # vzone shouldn't be None if value is non-datetime like + if isinstance(other, np.datetime64): + # convert to Timestamp as np.datetime64 doesn't have tz attr + other = Timestamp(other) + + if not hasattr(other, "tzinfo"): + return False + other_tz = other.tzinfo + return timezones.tz_compare(self.tzinfo, other_tz) + + def _assert_tzawareness_compat(self, other) -> None: + # adapted from _Timestamp._assert_tzawareness_compat + other_tz = getattr(other, "tzinfo", None) + other_dtype = getattr(other, "dtype", None) + + if isinstance(other_dtype, DatetimeTZDtype): + # Get tzinfo from Series dtype + other_tz = other.dtype.tz + if other is NaT: + # pd.NaT quacks both aware and naive + pass + elif self.tz is None: + if other_tz is not None: + raise TypeError( + "Cannot compare tz-naive and tz-aware datetime-like objects." + ) + elif other_tz is None: + raise TypeError( + "Cannot compare tz-naive and tz-aware datetime-like objects" + ) + + # ----------------------------------------------------------------- + # Arithmetic Methods + + def _add_offset(self, offset) -> Self: + assert not isinstance(offset, Tick) + + if self.tz is not None: + values = self.tz_localize(None) + else: + values = self + + try: + result = offset._apply_array(values).view(values.dtype) + except NotImplementedError: + warnings.warn( + "Non-vectorized DateOffset being applied to Series or DatetimeIndex.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + result = self.astype("O") + offset + result = type(self)._from_sequence(result).as_unit(self.unit) + if not len(self): + # GH#30336 _from_sequence won't be able to infer self.tz + return result.tz_localize(self.tz) + + else: + result = type(self)._simple_new(result, dtype=result.dtype) + if self.tz is not None: + result = result.tz_localize(self.tz) + + return result + + # ----------------------------------------------------------------- + # Timezone Conversion and Localization Methods + + def _local_timestamps(self) -> npt.NDArray[np.int64]: + """ + Convert to an i8 (unix-like nanosecond timestamp) representation + while keeping the local timezone and not using UTC. + This is used to calculate time-of-day information as if the timestamps + were timezone-naive. + """ + if self.tz is None or timezones.is_utc(self.tz): + # Avoid the copy that would be made in tzconversion + return self.asi8 + return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) + + def tz_convert(self, tz) -> Self: + """ + Convert tz-aware Datetime Array/Index from one time zone to another. + + Parameters + ---------- + tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None + Time zone for time. Corresponding timestamps would be converted + to this time zone of the Datetime Array/Index. A `tz` of None will + convert to UTC and remove the timezone information. + + Returns + ------- + Array or Index + + Raises + ------ + TypeError + If Datetime Array/Index is tz-naive. + + See Also + -------- + DatetimeIndex.tz : A timezone that has a variable offset from UTC. + DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a + given time zone, or remove timezone from a tz-aware DatetimeIndex. + + Examples + -------- + With the `tz` parameter, we can change the DatetimeIndex + to other time zones: + + >>> dti = pd.date_range(start='2014-08-01 09:00', + ... freq='H', periods=3, tz='Europe/Berlin') + + >>> dti + DatetimeIndex(['2014-08-01 09:00:00+02:00', + '2014-08-01 10:00:00+02:00', + '2014-08-01 11:00:00+02:00'], + dtype='datetime64[ns, Europe/Berlin]', freq='H') + + >>> dti.tz_convert('US/Central') + DatetimeIndex(['2014-08-01 02:00:00-05:00', + '2014-08-01 03:00:00-05:00', + '2014-08-01 04:00:00-05:00'], + dtype='datetime64[ns, US/Central]', freq='H') + + With the ``tz=None``, we can remove the timezone (after converting + to UTC if necessary): + + >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', + ... periods=3, tz='Europe/Berlin') + + >>> dti + DatetimeIndex(['2014-08-01 09:00:00+02:00', + '2014-08-01 10:00:00+02:00', + '2014-08-01 11:00:00+02:00'], + dtype='datetime64[ns, Europe/Berlin]', freq='H') + + >>> dti.tz_convert(None) + DatetimeIndex(['2014-08-01 07:00:00', + '2014-08-01 08:00:00', + '2014-08-01 09:00:00'], + dtype='datetime64[ns]', freq='H') + """ + tz = timezones.maybe_get_tz(tz) + + if self.tz is None: + # tz naive, use tz_localize + raise TypeError( + "Cannot convert tz-naive timestamps, use tz_localize to localize" + ) + + # No conversion since timestamps are all UTC to begin with + dtype = tz_to_dtype(tz, unit=self.unit) + return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq) + + @dtl.ravel_compat + def tz_localize( + self, + tz, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + """ + Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. + + This method takes a time zone (tz) naive Datetime Array/Index object + and makes this time zone aware. It does not move the time to another + time zone. + + This method can also be used to do the inverse -- to create a time + zone unaware object from an aware object. To that end, pass `tz=None`. + + Parameters + ---------- + tz : str, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None + Time zone to convert timestamps to. Passing ``None`` will + remove the time zone information preserving local time. + ambiguous : 'infer', 'NaT', bool array, default 'raise' + When clocks moved backward due to DST, ambiguous times may arise. + For example in Central European Time (UTC+01), when going from + 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at + 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the + `ambiguous` parameter dictates how ambiguous times should be + handled. + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False signifies a + non-DST time (note that this flag is only applicable for + ambiguous times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous + times. + + nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ +default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. + + - 'shift_forward' will shift the nonexistent time forward to the + closest existing time + - 'shift_backward' will shift the nonexistent time backward to the + closest existing time + - 'NaT' will return NaT where there are nonexistent times + - timedelta objects will shift nonexistent times by the timedelta + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times. + + Returns + ------- + Same type as self + Array/Index converted to the specified time zone. + + Raises + ------ + TypeError + If the Datetime Array/Index is tz-aware and tz is not None. + + See Also + -------- + DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from + one time zone to another. + + Examples + -------- + >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) + >>> tz_naive + DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', + '2018-03-03 09:00:00'], + dtype='datetime64[ns]', freq='D') + + Localize DatetimeIndex in US/Eastern time zone: + + >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') + >>> tz_aware + DatetimeIndex(['2018-03-01 09:00:00-05:00', + '2018-03-02 09:00:00-05:00', + '2018-03-03 09:00:00-05:00'], + dtype='datetime64[ns, US/Eastern]', freq=None) + + With the ``tz=None``, we can remove the time zone information + while keeping the local time (not converted to UTC): + + >>> tz_aware.tz_localize(None) + DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', + '2018-03-03 09:00:00'], + dtype='datetime64[ns]', freq=None) + + Be careful with DST changes. When there is sequential data, pandas can + infer the DST time: + + >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 03:00:00', + ... '2018-10-28 03:30:00'])) + >>> s.dt.tz_localize('CET', ambiguous='infer') + 0 2018-10-28 01:30:00+02:00 + 1 2018-10-28 02:00:00+02:00 + 2 2018-10-28 02:30:00+02:00 + 3 2018-10-28 02:00:00+01:00 + 4 2018-10-28 02:30:00+01:00 + 5 2018-10-28 03:00:00+01:00 + 6 2018-10-28 03:30:00+01:00 + dtype: datetime64[ns, CET] + + In some cases, inferring the DST is impossible. In such cases, you can + pass an ndarray to the ambiguous parameter to set the DST explicitly + + >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', + ... '2018-10-28 02:36:00', + ... '2018-10-28 03:46:00'])) + >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) + 0 2018-10-28 01:20:00+02:00 + 1 2018-10-28 02:36:00+02:00 + 2 2018-10-28 03:46:00+01:00 + dtype: datetime64[ns, CET] + + If the DST transition causes nonexistent times, you can shift these + dates forward or backwards with a timedelta object or `'shift_forward'` + or `'shift_backwards'`. + + >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', + ... '2015-03-29 03:30:00'])) + >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') + 0 2015-03-29 03:00:00+02:00 + 1 2015-03-29 03:30:00+02:00 + dtype: datetime64[ns, Europe/Warsaw] + + >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') + 0 2015-03-29 01:59:59.999999999+01:00 + 1 2015-03-29 03:30:00+02:00 + dtype: datetime64[ns, Europe/Warsaw] + + >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) + 0 2015-03-29 03:30:00+02:00 + 1 2015-03-29 03:30:00+02:00 + dtype: datetime64[ns, Europe/Warsaw] + """ + nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") + if nonexistent not in nonexistent_options and not isinstance( + nonexistent, timedelta + ): + raise ValueError( + "The nonexistent argument must be one of 'raise', " + "'NaT', 'shift_forward', 'shift_backward' or " + "a timedelta object" + ) + + if self.tz is not None: + if tz is None: + new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) + else: + raise TypeError("Already tz-aware, use tz_convert to convert.") + else: + tz = timezones.maybe_get_tz(tz) + # Convert to UTC + + new_dates = tzconversion.tz_localize_to_utc( + self.asi8, + tz, + ambiguous=ambiguous, + nonexistent=nonexistent, + creso=self._creso, + ) + new_dates_dt64 = new_dates.view(f"M8[{self.unit}]") + dtype = tz_to_dtype(tz, unit=self.unit) + + freq = None + if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates_dt64[0])): + # we can preserve freq + # TODO: Also for fixed-offsets + freq = self.freq + elif tz is None and self.tz is None: + # no-op + freq = self.freq + return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq) + + # ---------------------------------------------------------------- + # Conversion Methods - Vectorized analogues of Timestamp methods + + def to_pydatetime(self) -> npt.NDArray[np.object_]: + """ + Return an ndarray of ``datetime.datetime`` objects. + + Returns + ------- + numpy.ndarray + + Examples + -------- + >>> idx = pd.date_range('2018-02-27', periods=3) + >>> idx.to_pydatetime() + array([datetime.datetime(2018, 2, 27, 0, 0), + datetime.datetime(2018, 2, 28, 0, 0), + datetime.datetime(2018, 3, 1, 0, 0)], dtype=object) + """ + return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso) + + def normalize(self) -> Self: + """ + Convert times to midnight. + + The time component of the date-time is converted to midnight i.e. + 00:00:00. This is useful in cases, when the time does not matter. + Length is unaltered. The timezones are unaffected. + + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on Datetime Array/Index. + + Returns + ------- + DatetimeArray, DatetimeIndex or Series + The same type as the original data. Series will have the same + name and index. DatetimeIndex will have the same name. + + See Also + -------- + floor : Floor the datetimes to the specified freq. + ceil : Ceil the datetimes to the specified freq. + round : Round the datetimes to the specified freq. + + Examples + -------- + >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', + ... periods=3, tz='Asia/Calcutta') + >>> idx + DatetimeIndex(['2014-08-01 10:00:00+05:30', + '2014-08-01 11:00:00+05:30', + '2014-08-01 12:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq='H') + >>> idx.normalize() + DatetimeIndex(['2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq=None) + """ + new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso) + dt64_values = new_values.view(self._ndarray.dtype) + + dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype) + dta = dta._with_freq("infer") + if self.tz is not None: + dta = dta.tz_localize(self.tz) + return dta + + def to_period(self, freq=None) -> PeriodArray: + """ + Cast to PeriodArray/PeriodIndex at a particular frequency. + + Converts DatetimeArray/Index to PeriodArray/PeriodIndex. + + Parameters + ---------- + freq : str or Period, optional + One of pandas' :ref:`period aliases ` + or an Period object. Will be inferred by default. + + Returns + ------- + PeriodArray/PeriodIndex + + Raises + ------ + ValueError + When converting a DatetimeArray/Index with non-regular values, + so that a frequency cannot be inferred. + + See Also + -------- + PeriodIndex: Immutable ndarray holding ordinal values. + DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. + + Examples + -------- + >>> df = pd.DataFrame({"y": [1, 2, 3]}, + ... index=pd.to_datetime(["2000-03-31 00:00:00", + ... "2000-05-31 00:00:00", + ... "2000-08-31 00:00:00"])) + >>> df.index.to_period("M") + PeriodIndex(['2000-03', '2000-05', '2000-08'], + dtype='period[M]') + + Infer the daily frequency + + >>> idx = pd.date_range("2017-01-01", periods=2) + >>> idx.to_period() + PeriodIndex(['2017-01-01', '2017-01-02'], + dtype='period[D]') + """ + from pandas.core.arrays import PeriodArray + + if self.tz is not None: + warnings.warn( + "Converting to PeriodArray/Index representation " + "will drop timezone information.", + UserWarning, + stacklevel=find_stack_level(), + ) + + if freq is None: + freq = self.freqstr or self.inferred_freq + + if freq is None: + raise ValueError( + "You must pass a freq argument as current index has none." + ) + + res = get_period_alias(freq) + + # https://github.com/pandas-dev/pandas/issues/33358 + if res is None: + res = freq + + freq = res + + return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz) + + # ----------------------------------------------------------------- + # Properties - Vectorized Timestamp Properties/Methods + + def month_name(self, locale=None) -> npt.NDArray[np.object_]: + """ + Return the month names with specified locale. + + Parameters + ---------- + locale : str, optional + Locale determining the language in which to return the month name. + Default is English locale (``'en_US.utf8'``). Use the command + ``locale -a`` on your terminal on Unix systems to find your locale + language code. + + Returns + ------- + Series or Index + Series or Index of month names. + + Examples + -------- + >>> s = pd.Series(pd.date_range(start='2018-01', freq='M', periods=3)) + >>> s + 0 2018-01-31 + 1 2018-02-28 + 2 2018-03-31 + dtype: datetime64[ns] + >>> s.dt.month_name() + 0 January + 1 February + 2 March + dtype: object + + >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) + >>> idx + DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], + dtype='datetime64[ns]', freq='M') + >>> idx.month_name() + Index(['January', 'February', 'March'], dtype='object') + + Using the ``locale`` parameter you can set a different locale language, + for example: ``idx.month_name(locale='pt_BR.utf8')`` will return month + names in Brazilian Portuguese language. + + >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) + >>> idx + DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], + dtype='datetime64[ns]', freq='M') + >>> idx.month_name(locale='pt_BR.utf8') # doctest: +SKIP + Index(['Janeiro', 'Fevereiro', 'Março'], dtype='object') + """ + values = self._local_timestamps() + + result = fields.get_date_name_field( + values, "month_name", locale=locale, reso=self._creso + ) + result = self._maybe_mask_results(result, fill_value=None) + return result + + def day_name(self, locale=None) -> npt.NDArray[np.object_]: + """ + Return the day names with specified locale. + + Parameters + ---------- + locale : str, optional + Locale determining the language in which to return the day name. + Default is English locale (``'en_US.utf8'``). Use the command + ``locale -a`` on your terminal on Unix systems to find your locale + language code. + + Returns + ------- + Series or Index + Series or Index of day names. + + Examples + -------- + >>> s = pd.Series(pd.date_range(start='2018-01-01', freq='D', periods=3)) + >>> s + 0 2018-01-01 + 1 2018-01-02 + 2 2018-01-03 + dtype: datetime64[ns] + >>> s.dt.day_name() + 0 Monday + 1 Tuesday + 2 Wednesday + dtype: object + + >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3) + >>> idx + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.day_name() + Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object') + + Using the ``locale`` parameter you can set a different locale language, + for example: ``idx.day_name(locale='pt_BR.utf8')`` will return day + names in Brazilian Portuguese language. + + >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3) + >>> idx + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.day_name(locale='pt_BR.utf8') # doctest: +SKIP + Index(['Segunda', 'Terça', 'Quarta'], dtype='object') + """ + values = self._local_timestamps() + + result = fields.get_date_name_field( + values, "day_name", locale=locale, reso=self._creso + ) + result = self._maybe_mask_results(result, fill_value=None) + return result + + @property + def time(self) -> npt.NDArray[np.object_]: + """ + Returns numpy array of :class:`datetime.time` objects. + + The time part of the Timestamps. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.time + 0 10:00:00 + 1 11:00:00 + dtype: object + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.time + array([datetime.time(10, 0), datetime.time(11, 0)], dtype=object) + """ + # If the Timestamps have a timezone that is not UTC, + # convert them into their i8 representation while + # keeping their timezone and not using UTC + timestamps = self._local_timestamps() + + return ints_to_pydatetime(timestamps, box="time", reso=self._creso) + + @property + def timetz(self) -> npt.NDArray[np.object_]: + """ + Returns numpy array of :class:`datetime.time` objects with timezones. + + The time part of the Timestamps. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.timetz + 0 10:00:00+00:00 + 1 11:00:00+00:00 + dtype: object + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.timetz + array([datetime.time(10, 0, tzinfo=datetime.timezone.utc), + datetime.time(11, 0, tzinfo=datetime.timezone.utc)], dtype=object) + """ + return ints_to_pydatetime(self.asi8, self.tz, box="time", reso=self._creso) + + @property + def date(self) -> npt.NDArray[np.object_]: + """ + Returns numpy array of python :class:`datetime.date` objects. + + Namely, the date part of Timestamps without time and + timezone information. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.date + 0 2020-01-01 + 1 2020-02-01 + dtype: object + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.date + array([datetime.date(2020, 1, 1), datetime.date(2020, 2, 1)], dtype=object) + """ + # If the Timestamps have a timezone that is not UTC, + # convert them into their i8 representation while + # keeping their timezone and not using UTC + timestamps = self._local_timestamps() + + return ints_to_pydatetime(timestamps, box="date", reso=self._creso) + + def isocalendar(self) -> DataFrame: + """ + Calculate year, week, and day according to the ISO 8601 standard. + + Returns + ------- + DataFrame + With columns year, week and day. + + See Also + -------- + Timestamp.isocalendar : Function return a 3-tuple containing ISO year, + week number, and weekday for the given Timestamp object. + datetime.date.isocalendar : Return a named tuple object with + three components: year, week and weekday. + + Examples + -------- + >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4) + >>> idx.isocalendar() + year week day + 2019-12-29 2019 52 7 + 2019-12-30 2020 1 1 + 2019-12-31 2020 1 2 + 2020-01-01 2020 1 3 + >>> idx.isocalendar().week + 2019-12-29 52 + 2019-12-30 1 + 2019-12-31 1 + 2020-01-01 1 + Freq: D, Name: week, dtype: UInt32 + """ + from pandas import DataFrame + + values = self._local_timestamps() + sarray = fields.build_isocalendar_sarray(values, reso=self._creso) + iso_calendar_df = DataFrame( + sarray, columns=["year", "week", "day"], dtype="UInt32" + ) + if self._hasna: + iso_calendar_df.iloc[self._isnan] = None + return iso_calendar_df + + year = _field_accessor( + "year", + "Y", + """ + The year of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="Y") + ... ) + >>> datetime_series + 0 2000-12-31 + 1 2001-12-31 + 2 2002-12-31 + dtype: datetime64[ns] + >>> datetime_series.dt.year + 0 2000 + 1 2001 + 2 2002 + dtype: int32 + """, + ) + month = _field_accessor( + "month", + "M", + """ + The month as January=1, December=12. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="M") + ... ) + >>> datetime_series + 0 2000-01-31 + 1 2000-02-29 + 2 2000-03-31 + dtype: datetime64[ns] + >>> datetime_series.dt.month + 0 1 + 1 2 + 2 3 + dtype: int32 + """, + ) + day = _field_accessor( + "day", + "D", + """ + The day of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="D") + ... ) + >>> datetime_series + 0 2000-01-01 + 1 2000-01-02 + 2 2000-01-03 + dtype: datetime64[ns] + >>> datetime_series.dt.day + 0 1 + 1 2 + 2 3 + dtype: int32 + """, + ) + hour = _field_accessor( + "hour", + "h", + """ + The hours of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="h") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 01:00:00 + 2 2000-01-01 02:00:00 + dtype: datetime64[ns] + >>> datetime_series.dt.hour + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + minute = _field_accessor( + "minute", + "m", + """ + The minutes of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="T") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:01:00 + 2 2000-01-01 00:02:00 + dtype: datetime64[ns] + >>> datetime_series.dt.minute + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + second = _field_accessor( + "second", + "s", + """ + The seconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="s") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:00:01 + 2 2000-01-01 00:00:02 + dtype: datetime64[ns] + >>> datetime_series.dt.second + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + microsecond = _field_accessor( + "microsecond", + "us", + """ + The microseconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="us") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00.000000 + 1 2000-01-01 00:00:00.000001 + 2 2000-01-01 00:00:00.000002 + dtype: datetime64[ns] + >>> datetime_series.dt.microsecond + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + nanosecond = _field_accessor( + "nanosecond", + "ns", + """ + The nanoseconds of the datetime. + + Examples + -------- + >>> datetime_series = pd.Series( + ... pd.date_range("2000-01-01", periods=3, freq="ns") + ... ) + >>> datetime_series + 0 2000-01-01 00:00:00.000000000 + 1 2000-01-01 00:00:00.000000001 + 2 2000-01-01 00:00:00.000000002 + dtype: datetime64[ns] + >>> datetime_series.dt.nanosecond + 0 0 + 1 1 + 2 2 + dtype: int32 + """, + ) + _dayofweek_doc = """ + The day of the week with Monday=0, Sunday=6. + + Return the day of the week. It is assumed the week starts on + Monday, which is denoted by 0 and ends on Sunday which is denoted + by 6. This method is available on both Series with datetime + values (using the `dt` accessor) or DatetimeIndex. + + Returns + ------- + Series or Index + Containing integers indicating the day number. + + See Also + -------- + Series.dt.dayofweek : Alias. + Series.dt.weekday : Alias. + Series.dt.day_name : Returns the name of the day of the week. + + Examples + -------- + >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series() + >>> s.dt.dayofweek + 2016-12-31 5 + 2017-01-01 6 + 2017-01-02 0 + 2017-01-03 1 + 2017-01-04 2 + 2017-01-05 3 + 2017-01-06 4 + 2017-01-07 5 + 2017-01-08 6 + Freq: D, dtype: int32 + """ + day_of_week = _field_accessor("day_of_week", "dow", _dayofweek_doc) + dayofweek = day_of_week + weekday = day_of_week + + day_of_year = _field_accessor( + "dayofyear", + "doy", + """ + The ordinal day of the year. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.dayofyear + 0 1 + 1 32 + dtype: int32 + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.dayofyear + Index([1, 32], dtype='int32') + """, + ) + dayofyear = day_of_year + quarter = _field_accessor( + "quarter", + "q", + """ + The quarter of the date. + + Examples + -------- + For Series: + + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "4/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-04-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.quarter + 0 1 + 1 2 + dtype: int32 + + For DatetimeIndex: + + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", + ... "2/1/2020 11:00:00+00:00"]) + >>> idx.quarter + Index([1, 1], dtype='int32') + """, + ) + days_in_month = _field_accessor( + "days_in_month", + "dim", + """ + The number of days in the month. + + Examples + -------- + >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> s = pd.to_datetime(s) + >>> s + 0 2020-01-01 10:00:00+00:00 + 1 2020-02-01 11:00:00+00:00 + dtype: datetime64[ns, UTC] + >>> s.dt.daysinmonth + 0 31 + 1 29 + dtype: int32 + """, + ) + daysinmonth = days_in_month + _is_month_doc = """ + Indicates whether the date is the {first_or_last} day of the month. + + Returns + ------- + Series or array + For Series, returns a Series with boolean values. + For DatetimeIndex, returns a boolean array. + + See Also + -------- + is_month_start : Return a boolean indicating whether the date + is the first day of the month. + is_month_end : Return a boolean indicating whether the date + is the last day of the month. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> s = pd.Series(pd.date_range("2018-02-27", periods=3)) + >>> s + 0 2018-02-27 + 1 2018-02-28 + 2 2018-03-01 + dtype: datetime64[ns] + >>> s.dt.is_month_start + 0 False + 1 False + 2 True + dtype: bool + >>> s.dt.is_month_end + 0 False + 1 True + 2 False + dtype: bool + + >>> idx = pd.date_range("2018-02-27", periods=3) + >>> idx.is_month_start + array([False, False, True]) + >>> idx.is_month_end + array([False, True, False]) + """ + is_month_start = _field_accessor( + "is_month_start", "is_month_start", _is_month_doc.format(first_or_last="first") + ) + + is_month_end = _field_accessor( + "is_month_end", "is_month_end", _is_month_doc.format(first_or_last="last") + ) + + is_quarter_start = _field_accessor( + "is_quarter_start", + "is_quarter_start", + """ + Indicator for whether the date is the first day of a quarter. + + Returns + ------- + is_quarter_start : Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + quarter : Return the quarter of the date. + is_quarter_end : Similar property for indicating the quarter end. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30", + ... periods=4)}) + >>> df.assign(quarter=df.dates.dt.quarter, + ... is_quarter_start=df.dates.dt.is_quarter_start) + dates quarter is_quarter_start + 0 2017-03-30 1 False + 1 2017-03-31 1 False + 2 2017-04-01 2 True + 3 2017-04-02 2 False + + >>> idx = pd.date_range('2017-03-30', periods=4) + >>> idx + DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_quarter_start + array([False, False, True, False]) + """, + ) + is_quarter_end = _field_accessor( + "is_quarter_end", + "is_quarter_end", + """ + Indicator for whether the date is the last day of a quarter. + + Returns + ------- + is_quarter_end : Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + quarter : Return the quarter of the date. + is_quarter_start : Similar property indicating the quarter start. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30", + ... periods=4)}) + >>> df.assign(quarter=df.dates.dt.quarter, + ... is_quarter_end=df.dates.dt.is_quarter_end) + dates quarter is_quarter_end + 0 2017-03-30 1 False + 1 2017-03-31 1 True + 2 2017-04-01 2 False + 3 2017-04-02 2 False + + >>> idx = pd.date_range('2017-03-30', periods=4) + >>> idx + DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_quarter_end + array([False, True, False, False]) + """, + ) + is_year_start = _field_accessor( + "is_year_start", + "is_year_start", + """ + Indicate whether the date is the first day of a year. + + Returns + ------- + Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + is_year_end : Similar property indicating the last day of the year. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3)) + >>> dates + 0 2017-12-30 + 1 2017-12-31 + 2 2018-01-01 + dtype: datetime64[ns] + + >>> dates.dt.is_year_start + 0 False + 1 False + 2 True + dtype: bool + + >>> idx = pd.date_range("2017-12-30", periods=3) + >>> idx + DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_year_start + array([False, False, True]) + """, + ) + is_year_end = _field_accessor( + "is_year_end", + "is_year_end", + """ + Indicate whether the date is the last day of the year. + + Returns + ------- + Series or DatetimeIndex + The same type as the original data with boolean values. Series will + have the same name and index. DatetimeIndex will have the same + name. + + See Also + -------- + is_year_start : Similar property indicating the start of the year. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3)) + >>> dates + 0 2017-12-30 + 1 2017-12-31 + 2 2018-01-01 + dtype: datetime64[ns] + + >>> dates.dt.is_year_end + 0 False + 1 True + 2 False + dtype: bool + + >>> idx = pd.date_range("2017-12-30", periods=3) + >>> idx + DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'], + dtype='datetime64[ns]', freq='D') + + >>> idx.is_year_end + array([False, True, False]) + """, + ) + is_leap_year = _field_accessor( + "is_leap_year", + "is_leap_year", + """ + Boolean indicator if the date belongs to a leap year. + + A leap year is a year, which has 366 days (instead of 365) including + 29th of February as an intercalary day. + Leap years are years which are multiples of four with the exception + of years divisible by 100 but not by 400. + + Returns + ------- + Series or ndarray + Booleans indicating if dates belong to a leap year. + + Examples + -------- + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + >>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y") + >>> idx + DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'], + dtype='datetime64[ns]', freq='A-DEC') + >>> idx.is_leap_year + array([ True, False, False]) + + >>> dates_series = pd.Series(idx) + >>> dates_series + 0 2012-12-31 + 1 2013-12-31 + 2 2014-12-31 + dtype: datetime64[ns] + >>> dates_series.dt.is_leap_year + 0 True + 1 False + 2 False + dtype: bool + """, + ) + + def to_julian_date(self) -> npt.NDArray[np.float64]: + """ + Convert Datetime Array to float64 ndarray of Julian Dates. + 0 Julian date is noon January 1, 4713 BC. + https://en.wikipedia.org/wiki/Julian_day + """ + + # http://mysite.verizon.net/aesir_research/date/jdalg2.htm + year = np.asarray(self.year) + month = np.asarray(self.month) + day = np.asarray(self.day) + testarr = month < 3 + year[testarr] -= 1 + month[testarr] += 12 + return ( + day + + np.fix((153 * month - 457) / 5) + + 365 * year + + np.floor(year / 4) + - np.floor(year / 100) + + np.floor(year / 400) + + 1_721_118.5 + + ( + self.hour + + self.minute / 60 + + self.second / 3600 + + self.microsecond / 3600 / 10**6 + + self.nanosecond / 3600 / 10**9 + ) + / 24 + ) + + # ----------------------------------------------------------------- + # Reductions + + def std( + self, + axis=None, + dtype=None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + """ + Return sample standard deviation over requested axis. + + Normalized by `N-1` by default. This can be changed using ``ddof``. + + Parameters + ---------- + axis : int, optional + Axis for the function to be applied on. For :class:`pandas.Series` + this parameter is unused and defaults to ``None``. + ddof : int, default 1 + Degrees of Freedom. The divisor used in calculations is `N - ddof`, + where `N` represents the number of elements. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is ``NA``, the result + will be ``NA``. + + Returns + ------- + Timedelta + + See Also + -------- + numpy.ndarray.std : Returns the standard deviation of the array elements + along given axis. + Series.std : Return sample standard deviation over requested axis. + + Examples + -------- + For :class:`pandas.DatetimeIndex`: + + >>> idx = pd.date_range('2001-01-01 00:00', periods=3) + >>> idx + DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'], + dtype='datetime64[ns]', freq='D') + >>> idx.std() + Timedelta('1 days 00:00:00') + """ + # Because std is translation-invariant, we can get self.std + # by calculating (self - Timestamp(0)).std, and we can do it + # without creating a copy by using a view on self._ndarray + from pandas.core.arrays import TimedeltaArray + + # Find the td64 dtype with the same resolution as our dt64 dtype + dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64") + dtype = np.dtype(dtype_str) + + tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype) + + return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna) + + +# ------------------------------------------------------------------- +# Constructor Helpers + + +def _sequence_to_dt64ns( + data, + *, + copy: bool = False, + tz: tzinfo | None = None, + dayfirst: bool = False, + yearfirst: bool = False, + ambiguous: TimeAmbiguous = "raise", + out_unit: str | None = None, +): + """ + Parameters + ---------- + data : list-like + copy : bool, default False + tz : tzinfo or None, default None + dayfirst : bool, default False + yearfirst : bool, default False + ambiguous : str, bool, or arraylike, default 'raise' + See pandas._libs.tslibs.tzconversion.tz_localize_to_utc. + out_unit : str or None, default None + Desired output resolution. + + Returns + ------- + result : numpy.ndarray + The sequence converted to a numpy array with dtype ``datetime64[ns]``. + tz : tzinfo or None + Either the user-provided tzinfo or one inferred from the data. + inferred_freq : Tick or None + The inferred frequency of the sequence. + + Raises + ------ + TypeError : PeriodDType data is passed + """ + inferred_freq = None + + data, copy = dtl.ensure_arraylike_for_datetimelike( + data, copy, cls_name="DatetimeArray" + ) + + if isinstance(data, DatetimeArray): + inferred_freq = data.freq + + # By this point we are assured to have either a numpy array or Index + data, copy = maybe_convert_dtype(data, copy, tz=tz) + data_dtype = getattr(data, "dtype", None) + + out_dtype = DT64NS_DTYPE + if out_unit is not None: + out_dtype = np.dtype(f"M8[{out_unit}]") + + if data_dtype == object or is_string_dtype(data_dtype): + # TODO: We do not have tests specific to string-dtypes, + # also complex or categorical or other extension + copy = False + if lib.infer_dtype(data, skipna=False) == "integer": + data = data.astype(np.int64) + elif tz is not None and ambiguous == "raise": + # TODO: yearfirst/dayfirst/etc? + obj_data = np.asarray(data, dtype=object) + i8data = tslib.array_to_datetime_with_tz(obj_data, tz) + return i8data.view(DT64NS_DTYPE), tz, None + else: + # data comes back here as either i8 to denote UTC timestamps + # or M8[ns] to denote wall times + data, inferred_tz = objects_to_datetime64ns( + data, + dayfirst=dayfirst, + yearfirst=yearfirst, + allow_object=False, + ) + if tz and inferred_tz: + # two timezones: convert to intended from base UTC repr + assert data.dtype == "i8" + # GH#42505 + # by convention, these are _already_ UTC, e.g + return data.view(DT64NS_DTYPE), tz, None + + elif inferred_tz: + tz = inferred_tz + + data_dtype = data.dtype + + # `data` may have originally been a Categorical[datetime64[ns, tz]], + # so we need to handle these types. + if isinstance(data_dtype, DatetimeTZDtype): + # DatetimeArray -> ndarray + tz = _maybe_infer_tz(tz, data.tz) + result = data._ndarray + + elif lib.is_np_dtype(data_dtype, "M"): + # tz-naive DatetimeArray or ndarray[datetime64] + data = getattr(data, "_ndarray", data) + new_dtype = data.dtype + data_unit = get_unit_from_dtype(new_dtype) + if not is_supported_unit(data_unit): + # Cast to the nearest supported unit, generally "s" + new_reso = get_supported_reso(data_unit) + new_unit = npy_unit_to_abbrev(new_reso) + new_dtype = np.dtype(f"M8[{new_unit}]") + data = astype_overflowsafe(data, dtype=new_dtype, copy=False) + data_unit = get_unit_from_dtype(new_dtype) + copy = False + + if data.dtype.byteorder == ">": + # TODO: better way to handle this? non-copying alternative? + # without this, test_constructor_datetime64_bigendian fails + data = data.astype(data.dtype.newbyteorder("<")) + new_dtype = data.dtype + copy = False + + if tz is not None: + # Convert tz-naive to UTC + # TODO: if tz is UTC, are there situations where we *don't* want a + # copy? tz_localize_to_utc always makes one. + shape = data.shape + if data.ndim > 1: + data = data.ravel() + + data = tzconversion.tz_localize_to_utc( + data.view("i8"), tz, ambiguous=ambiguous, creso=data_unit + ) + data = data.view(new_dtype) + data = data.reshape(shape) + + assert data.dtype == new_dtype, data.dtype + result = data + + else: + # must be integer dtype otherwise + # assume this data are epoch timestamps + if data.dtype != INT64_DTYPE: + data = data.astype(np.int64, copy=False) + result = data.view(out_dtype) + + if copy: + result = result.copy() + + assert isinstance(result, np.ndarray), type(result) + assert result.dtype.kind == "M" + assert result.dtype != "M8" + assert is_supported_unit(get_unit_from_dtype(result.dtype)) + return result, tz, inferred_freq + + +def objects_to_datetime64ns( + data: np.ndarray, + dayfirst, + yearfirst, + utc: bool = False, + errors: DateTimeErrorChoices = "raise", + allow_object: bool = False, +): + """ + Convert data to array of timestamps. + + Parameters + ---------- + data : np.ndarray[object] + dayfirst : bool + yearfirst : bool + utc : bool, default False + Whether to convert/localize timestamps to UTC. + errors : {'raise', 'ignore', 'coerce'} + allow_object : bool + Whether to return an object-dtype ndarray instead of raising if the + data contains more than one timezone. + + Returns + ------- + result : ndarray + np.int64 dtype if returned values represent UTC timestamps + np.datetime64[ns] if returned values represent wall times + object if mixed timezones + inferred_tz : tzinfo or None + + Raises + ------ + ValueError : if data cannot be converted to datetimes + """ + assert errors in ["raise", "ignore", "coerce"] + + # if str-dtype, convert + data = np.array(data, copy=False, dtype=np.object_) + + result, tz_parsed = tslib.array_to_datetime( + data, + errors=errors, + utc=utc, + dayfirst=dayfirst, + yearfirst=yearfirst, + ) + + if tz_parsed is not None: + # We can take a shortcut since the datetime64 numpy array + # is in UTC + # Return i8 values to denote unix timestamps + return result.view("i8"), tz_parsed + elif result.dtype.kind == "M": + # returning M8[ns] denotes wall-times; since tz is None + # the distinction is a thin one + return result, tz_parsed + elif result.dtype == object: + # GH#23675 when called via `pd.to_datetime`, returning an object-dtype + # array is allowed. When called via `pd.DatetimeIndex`, we can + # only accept datetime64 dtype, so raise TypeError if object-dtype + # is returned, as that indicates the values can be recognized as + # datetimes but they have conflicting timezones/awareness + if allow_object: + return result, tz_parsed + raise TypeError("DatetimeIndex has mixed timezones") + else: # pragma: no cover + # GH#23675 this TypeError should never be hit, whereas the TypeError + # in the object-dtype branch above is reachable. + raise TypeError(result) + + +def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None): + """ + Convert data based on dtype conventions, issuing + errors where appropriate. + + Parameters + ---------- + data : np.ndarray or pd.Index + copy : bool + tz : tzinfo or None, default None + + Returns + ------- + data : np.ndarray or pd.Index + copy : bool + + Raises + ------ + TypeError : PeriodDType data is passed + """ + if not hasattr(data, "dtype"): + # e.g. collections.deque + return data, copy + + if is_float_dtype(data.dtype): + # pre-2.0 we treated these as wall-times, inconsistent with ints + # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes. + # Note: data.astype(np.int64) fails ARM tests, see + # https://github.com/pandas-dev/pandas/issues/49468. + data = data.astype(DT64NS_DTYPE).view("i8") + copy = False + + elif lib.is_np_dtype(data.dtype, "m") or is_bool_dtype(data.dtype): + # GH#29794 enforcing deprecation introduced in GH#23539 + raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]") + elif isinstance(data.dtype, PeriodDtype): + # Note: without explicitly raising here, PeriodIndex + # test_setops.test_join_does_not_recur fails + raise TypeError( + "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead" + ) + + elif isinstance(data.dtype, ExtensionDtype) and not isinstance( + data.dtype, DatetimeTZDtype + ): + # TODO: We have no tests for these + data = np.array(data, dtype=np.object_) + copy = False + + return data, copy + + +# ------------------------------------------------------------------- +# Validation and Inference + + +def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None: + """ + If a timezone is inferred from data, check that it is compatible with + the user-provided timezone, if any. + + Parameters + ---------- + tz : tzinfo or None + inferred_tz : tzinfo or None + + Returns + ------- + tz : tzinfo or None + + Raises + ------ + TypeError : if both timezones are present but do not match + """ + if tz is None: + tz = inferred_tz + elif inferred_tz is None: + pass + elif not timezones.tz_compare(tz, inferred_tz): + raise TypeError( + f"data is already tz-aware {inferred_tz}, unable to " + f"set specified tz: {tz}" + ) + return tz + + +def _validate_dt64_dtype(dtype): + """ + Check that a dtype, if passed, represents either a numpy datetime64[ns] + dtype or a pandas DatetimeTZDtype. + + Parameters + ---------- + dtype : object + + Returns + ------- + dtype : None, numpy.dtype, or DatetimeTZDtype + + Raises + ------ + ValueError : invalid dtype + + Notes + ----- + Unlike _validate_tz_from_dtype, this does _not_ allow non-existent + tz errors to go through + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + if dtype == np.dtype("M8"): + # no precision, disallowed GH#24806 + msg = ( + "Passing in 'datetime64' dtype with no precision is not allowed. " + "Please pass in 'datetime64[ns]' instead." + ) + raise ValueError(msg) + + if ( + isinstance(dtype, np.dtype) + and (dtype.kind != "M" or not is_supported_unit(get_unit_from_dtype(dtype))) + ) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)): + raise ValueError( + f"Unexpected value for 'dtype': '{dtype}'. " + "Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', " + "'datetime64[ns]' or DatetimeTZDtype'." + ) + + if getattr(dtype, "tz", None): + # https://github.com/pandas-dev/pandas/issues/18595 + # Ensure that we have a standard timezone for pytz objects. + # Without this, things like adding an array of timedeltas and + # a tz-aware Timestamp (with a tz specific to its datetime) will + # be incorrect(ish?) for the array as a whole + dtype = cast(DatetimeTZDtype, dtype) + dtype = DatetimeTZDtype( + unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz) + ) + + return dtype + + +def _validate_tz_from_dtype( + dtype, tz: tzinfo | None, explicit_tz_none: bool = False +) -> tzinfo | None: + """ + If the given dtype is a DatetimeTZDtype, extract the implied + tzinfo object from it and check that it does not conflict with the given + tz. + + Parameters + ---------- + dtype : dtype, str + tz : None, tzinfo + explicit_tz_none : bool, default False + Whether tz=None was passed explicitly, as opposed to lib.no_default. + + Returns + ------- + tz : consensus tzinfo + + Raises + ------ + ValueError : on tzinfo mismatch + """ + if dtype is not None: + if isinstance(dtype, str): + try: + dtype = DatetimeTZDtype.construct_from_string(dtype) + except TypeError: + # Things like `datetime64[ns]`, which is OK for the + # constructors, but also nonsense, which should be validated + # but not by us. We *do* allow non-existent tz errors to + # go through + pass + dtz = getattr(dtype, "tz", None) + if dtz is not None: + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError("cannot supply both a tz and a dtype with a tz") + if explicit_tz_none: + raise ValueError("Cannot pass both a timezone-aware dtype and tz=None") + tz = dtz + + if tz is not None and lib.is_np_dtype(dtype, "M"): + # We also need to check for the case where the user passed a + # tz-naive dtype (i.e. datetime64[ns]) + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError( + "cannot supply both a tz and a " + "timezone-naive dtype (i.e. datetime64[ns])" + ) + + return tz + + +def _infer_tz_from_endpoints( + start: Timestamp, end: Timestamp, tz: tzinfo | None +) -> tzinfo | None: + """ + If a timezone is not explicitly given via `tz`, see if one can + be inferred from the `start` and `end` endpoints. If more than one + of these inputs provides a timezone, require that they all agree. + + Parameters + ---------- + start : Timestamp + end : Timestamp + tz : tzinfo or None + + Returns + ------- + tz : tzinfo or None + + Raises + ------ + TypeError : if start and end timezones do not agree + """ + try: + inferred_tz = timezones.infer_tzinfo(start, end) + except AssertionError as err: + # infer_tzinfo raises AssertionError if passed mismatched timezones + raise TypeError( + "Start and end cannot both be tz-aware with different timezones" + ) from err + + inferred_tz = timezones.maybe_get_tz(inferred_tz) + tz = timezones.maybe_get_tz(tz) + + if tz is not None and inferred_tz is not None: + if not timezones.tz_compare(inferred_tz, tz): + raise AssertionError("Inferred time zone not equal to passed time zone") + + elif inferred_tz is not None: + tz = inferred_tz + + return tz + + +def _maybe_normalize_endpoints( + start: Timestamp | None, end: Timestamp | None, normalize: bool +): + if normalize: + if start is not None: + start = start.normalize() + + if end is not None: + end = end.normalize() + + return start, end + + +def _maybe_localize_point(ts, is_none, is_not_none, freq, tz, ambiguous, nonexistent): + """ + Localize a start or end Timestamp to the timezone of the corresponding + start or end Timestamp + + Parameters + ---------- + ts : start or end Timestamp to potentially localize + is_none : argument that should be None + is_not_none : argument that should not be None + freq : Tick, DateOffset, or None + tz : str, timezone object or None + ambiguous: str, localization behavior for ambiguous times + nonexistent: str, localization behavior for nonexistent times + + Returns + ------- + ts : Timestamp + """ + # Make sure start and end are timezone localized if: + # 1) freq = a Timedelta-like frequency (Tick) + # 2) freq = None i.e. generating a linspaced range + if is_none is None and is_not_none is not None: + # Note: We can't ambiguous='infer' a singular ambiguous time; however, + # we have historically defaulted ambiguous=False + ambiguous = ambiguous if ambiguous != "infer" else False + localize_args = {"ambiguous": ambiguous, "nonexistent": nonexistent, "tz": None} + if isinstance(freq, Tick) or freq is None: + localize_args["tz"] = tz + ts = ts.tz_localize(**localize_args) + return ts + + +def _generate_range( + start: Timestamp | None, + end: Timestamp | None, + periods: int | None, + offset: BaseOffset, + *, + unit: str, +): + """ + Generates a sequence of dates corresponding to the specified time + offset. Similar to dateutil.rrule except uses pandas DateOffset + objects to represent time increments. + + Parameters + ---------- + start : Timestamp or None + end : Timestamp or None + periods : int or None + offset : DateOffset + unit : str + + Notes + ----- + * This method is faster for generating weekdays than dateutil.rrule + * At least two of (start, end, periods) must be specified. + * If both start and end are specified, the returned dates will + satisfy start <= date <= end. + + Returns + ------- + dates : generator object + """ + offset = to_offset(offset) + + # Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]"; + # expected "Union[integer[Any], float, str, date, datetime64]" + start = Timestamp(start) # type: ignore[arg-type] + if start is not NaT: + start = start.as_unit(unit) + else: + start = None + + # Argument 1 to "Timestamp" has incompatible type "Optional[Timestamp]"; + # expected "Union[integer[Any], float, str, date, datetime64]" + end = Timestamp(end) # type: ignore[arg-type] + if end is not NaT: + end = end.as_unit(unit) + else: + end = None + + if start and not offset.is_on_offset(start): + # Incompatible types in assignment (expression has type "datetime", + # variable has type "Optional[Timestamp]") + start = offset.rollforward(start) # type: ignore[assignment] + + elif end and not offset.is_on_offset(end): + # Incompatible types in assignment (expression has type "datetime", + # variable has type "Optional[Timestamp]") + end = offset.rollback(end) # type: ignore[assignment] + + # Unsupported operand types for < ("Timestamp" and "None") + if periods is None and end < start and offset.n >= 0: # type: ignore[operator] + end = None + periods = 0 + + if end is None: + # error: No overload variant of "__radd__" of "BaseOffset" matches + # argument type "None" + end = start + (periods - 1) * offset # type: ignore[operator] + + if start is None: + # error: No overload variant of "__radd__" of "BaseOffset" matches + # argument type "None" + start = end - (periods - 1) * offset # type: ignore[operator] + + start = cast(Timestamp, start) + end = cast(Timestamp, end) + + cur = start + if offset.n >= 0: + while cur <= end: + yield cur + + if cur == end: + # GH#24252 avoid overflows by not performing the addition + # in offset.apply unless we have to + break + + # faster than cur + offset + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Discarding nonzero nanoseconds in conversion", + category=UserWarning, + ) + next_date = offset._apply(cur) + next_date = next_date.as_unit(unit) + if next_date <= cur: + raise ValueError(f"Offset {offset} did not increment date") + cur = next_date + else: + while cur >= end: + yield cur + + if cur == end: + # GH#24252 avoid overflows by not performing the addition + # in offset.apply unless we have to + break + + # faster than cur + offset + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Discarding nonzero nanoseconds in conversion", + category=UserWarning, + ) + next_date = offset._apply(cur) + next_date = next_date.as_unit(unit) + if next_date >= cur: + raise ValueError(f"Offset {offset} did not decrement date") + cur = next_date diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/floating.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/floating.py new file mode 100644 index 00000000..bd3c03f9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/floating.py @@ -0,0 +1,173 @@ +from __future__ import annotations + +import numpy as np + +from pandas.core.dtypes.base import register_extension_dtype +from pandas.core.dtypes.common import is_float_dtype + +from pandas.core.arrays.numeric import ( + NumericArray, + NumericDtype, +) + + +class FloatingDtype(NumericDtype): + """ + An ExtensionDtype to hold a single size of floating dtype. + + These specific implementations are subclasses of the non-public + FloatingDtype. For example we have Float32Dtype to represent float32. + + The attributes name & type are set when these subclasses are created. + """ + + _default_np_dtype = np.dtype(np.float64) + _checker = is_float_dtype + + @classmethod + def construct_array_type(cls) -> type[FloatingArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return FloatingArray + + @classmethod + def _get_dtype_mapping(cls) -> dict[np.dtype, FloatingDtype]: + return NUMPY_FLOAT_TO_DTYPE + + @classmethod + def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: + """ + Safely cast the values to the given dtype. + + "safe" in this context means the casting is lossless. + """ + # This is really only here for compatibility with IntegerDtype + # Here for compat with IntegerDtype + return values.astype(dtype, copy=copy) + + +class FloatingArray(NumericArray): + """ + Array of floating (optional missing) values. + + .. versionadded:: 1.2.0 + + .. warning:: + + FloatingArray is currently experimental, and its API or internal + implementation may change without warning. Especially the behaviour + regarding NaN (distinct from NA missing values) is subject to change. + + We represent a FloatingArray with 2 numpy arrays: + + - data: contains a numpy float array of the appropriate dtype + - mask: a boolean array holding a mask on the data, True is missing + + To construct an FloatingArray from generic array-like input, use + :func:`pandas.array` with one of the float dtypes (see examples). + + See :ref:`integer_na` for more. + + Parameters + ---------- + values : numpy.ndarray + A 1-d float-dtype array. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values. + copy : bool, default False + Whether to copy the `values` and `mask`. + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + FloatingArray + + Examples + -------- + Create an FloatingArray with :func:`pandas.array`: + + >>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype()) + + [0.1, , 0.3] + Length: 3, dtype: Float32 + + String aliases for the dtypes are also available. They are capitalized. + + >>> pd.array([0.1, None, 0.3], dtype="Float32") + + [0.1, , 0.3] + Length: 3, dtype: Float32 + """ + + _dtype_cls = FloatingDtype + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value = np.nan + # Fill values used for any/all + # Incompatible types in assignment (expression has type "float", base class + # "BaseMaskedArray" defined the type as "") + _truthy_value = 1.0 # type: ignore[assignment] + _falsey_value = 0.0 # type: ignore[assignment] + + +_dtype_docstring = """ +An ExtensionDtype for {dtype} data. + +This dtype uses ``pd.NA`` as missing value indicator. + +Attributes +---------- +None + +Methods +------- +None + +Examples +-------- +For Float32Dtype: + +>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype()) +>>> ser.dtype +Float32Dtype() + +For Float64Dtype: + +>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype()) +>>> ser.dtype +Float64Dtype() +""" + +# create the Dtype + + +@register_extension_dtype +class Float32Dtype(FloatingDtype): + type = np.float32 + name = "Float32" + __doc__ = _dtype_docstring.format(dtype="float32") + + +@register_extension_dtype +class Float64Dtype(FloatingDtype): + type = np.float64 + name = "Float64" + __doc__ = _dtype_docstring.format(dtype="float64") + + +NUMPY_FLOAT_TO_DTYPE: dict[np.dtype, FloatingDtype] = { + np.dtype(np.float32): Float32Dtype(), + np.dtype(np.float64): Float64Dtype(), +} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/integer.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/integer.py new file mode 100644 index 00000000..0e6e7a48 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/integer.py @@ -0,0 +1,270 @@ +from __future__ import annotations + +import numpy as np + +from pandas.core.dtypes.base import register_extension_dtype +from pandas.core.dtypes.common import is_integer_dtype + +from pandas.core.arrays.numeric import ( + NumericArray, + NumericDtype, +) + + +class IntegerDtype(NumericDtype): + """ + An ExtensionDtype to hold a single size & kind of integer dtype. + + These specific implementations are subclasses of the non-public + IntegerDtype. For example, we have Int8Dtype to represent signed int 8s. + + The attributes name & type are set when these subclasses are created. + """ + + _default_np_dtype = np.dtype(np.int64) + _checker = is_integer_dtype + + @classmethod + def construct_array_type(cls) -> type[IntegerArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return IntegerArray + + @classmethod + def _get_dtype_mapping(cls) -> dict[np.dtype, IntegerDtype]: + return NUMPY_INT_TO_DTYPE + + @classmethod + def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: + """ + Safely cast the values to the given dtype. + + "safe" in this context means the casting is lossless. e.g. if 'values' + has a floating dtype, each value must be an integer. + """ + try: + return values.astype(dtype, casting="safe", copy=copy) + except TypeError as err: + casted = values.astype(dtype, copy=copy) + if (casted == values).all(): + return casted + + raise TypeError( + f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}" + ) from err + + +class IntegerArray(NumericArray): + """ + Array of integer (optional missing) values. + + Uses :attr:`pandas.NA` as the missing value. + + .. warning:: + + IntegerArray is currently experimental, and its API or internal + implementation may change without warning. + + We represent an IntegerArray with 2 numpy arrays: + + - data: contains a numpy integer array of the appropriate dtype + - mask: a boolean array holding a mask on the data, True is missing + + To construct an IntegerArray from generic array-like input, use + :func:`pandas.array` with one of the integer dtypes (see examples). + + See :ref:`integer_na` for more. + + Parameters + ---------- + values : numpy.ndarray + A 1-d integer-dtype array. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values. + copy : bool, default False + Whether to copy the `values` and `mask`. + + Attributes + ---------- + None + + Methods + ------- + None + + Returns + ------- + IntegerArray + + Examples + -------- + Create an IntegerArray with :func:`pandas.array`. + + >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype()) + >>> int_array + + [1, , 3] + Length: 3, dtype: Int32 + + String aliases for the dtypes are also available. They are capitalized. + + >>> pd.array([1, None, 3], dtype='Int32') + + [1, , 3] + Length: 3, dtype: Int32 + + >>> pd.array([1, None, 3], dtype='UInt16') + + [1, , 3] + Length: 3, dtype: UInt16 + """ + + _dtype_cls = IntegerDtype + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value = 1 + # Fill values used for any/all + # Incompatible types in assignment (expression has type "int", base class + # "BaseMaskedArray" defined the type as "") + _truthy_value = 1 # type: ignore[assignment] + _falsey_value = 0 # type: ignore[assignment] + + +_dtype_docstring = """ +An ExtensionDtype for {dtype} integer data. + +Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`. + +Attributes +---------- +None + +Methods +------- +None + +Examples +-------- +For Int8Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype()) +>>> ser.dtype +Int8Dtype() + +For Int16Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype()) +>>> ser.dtype +Int16Dtype() + +For Int32Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype()) +>>> ser.dtype +Int32Dtype() + +For Int64Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype()) +>>> ser.dtype +Int64Dtype() + +For UInt8Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype()) +>>> ser.dtype +UInt8Dtype() + +For UInt16Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype()) +>>> ser.dtype +UInt16Dtype() + +For UInt32Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype()) +>>> ser.dtype +UInt32Dtype() + +For UInt64Dtype: + +>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype()) +>>> ser.dtype +UInt64Dtype() +""" + +# create the Dtype + + +@register_extension_dtype +class Int8Dtype(IntegerDtype): + type = np.int8 + name = "Int8" + __doc__ = _dtype_docstring.format(dtype="int8") + + +@register_extension_dtype +class Int16Dtype(IntegerDtype): + type = np.int16 + name = "Int16" + __doc__ = _dtype_docstring.format(dtype="int16") + + +@register_extension_dtype +class Int32Dtype(IntegerDtype): + type = np.int32 + name = "Int32" + __doc__ = _dtype_docstring.format(dtype="int32") + + +@register_extension_dtype +class Int64Dtype(IntegerDtype): + type = np.int64 + name = "Int64" + __doc__ = _dtype_docstring.format(dtype="int64") + + +@register_extension_dtype +class UInt8Dtype(IntegerDtype): + type = np.uint8 + name = "UInt8" + __doc__ = _dtype_docstring.format(dtype="uint8") + + +@register_extension_dtype +class UInt16Dtype(IntegerDtype): + type = np.uint16 + name = "UInt16" + __doc__ = _dtype_docstring.format(dtype="uint16") + + +@register_extension_dtype +class UInt32Dtype(IntegerDtype): + type = np.uint32 + name = "UInt32" + __doc__ = _dtype_docstring.format(dtype="uint32") + + +@register_extension_dtype +class UInt64Dtype(IntegerDtype): + type = np.uint64 + name = "UInt64" + __doc__ = _dtype_docstring.format(dtype="uint64") + + +NUMPY_INT_TO_DTYPE: dict[np.dtype, IntegerDtype] = { + np.dtype(np.int8): Int8Dtype(), + np.dtype(np.int16): Int16Dtype(), + np.dtype(np.int32): Int32Dtype(), + np.dtype(np.int64): Int64Dtype(), + np.dtype(np.uint8): UInt8Dtype(), + np.dtype(np.uint16): UInt16Dtype(), + np.dtype(np.uint32): UInt32Dtype(), + np.dtype(np.uint64): UInt64Dtype(), +} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/interval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/interval.py new file mode 100644 index 00000000..d0510ede --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/interval.py @@ -0,0 +1,1931 @@ +from __future__ import annotations + +import operator +from operator import ( + le, + lt, +) +import textwrap +from typing import ( + TYPE_CHECKING, + Literal, + Union, + overload, +) + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import lib +from pandas._libs.interval import ( + VALID_CLOSED, + Interval, + IntervalMixin, + intervals_to_interval_bounds, +) +from pandas._libs.missing import NA +from pandas._typing import ( + ArrayLike, + AxisInt, + Dtype, + FillnaOptions, + IntervalClosedType, + NpDtype, + PositionalIndexer, + ScalarIndexer, + Self, + SequenceIndexer, + SortKind, + TimeArrayLike, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import IntCastingNaNError +from pandas.util._decorators import Appender + +from pandas.core.dtypes.cast import ( + LossySetitemError, + maybe_upcast_numeric_to_64bit, +) +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_scalar, + is_string_dtype, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCDatetimeIndex, + ABCIntervalIndex, + ABCPeriodIndex, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + notna, +) + +from pandas.core.algorithms import ( + isin, + take, + unique, + value_counts_internal as value_counts, +) +from pandas.core.arrays.base import ( + ExtensionArray, + _extension_array_shared_docs, +) +from pandas.core.arrays.datetimes import DatetimeArray +from pandas.core.arrays.timedeltas import TimedeltaArray +import pandas.core.common as com +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import check_array_indexer +from pandas.core.ops import ( + invalid_comparison, + unpack_zerodim_and_defer, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + + from pandas import ( + Index, + Series, + ) + + +IntervalSideT = Union[TimeArrayLike, np.ndarray] +IntervalOrNA = Union[Interval, float] + +_interval_shared_docs: dict[str, str] = {} + +_shared_docs_kwargs = { + "klass": "IntervalArray", + "qualname": "arrays.IntervalArray", + "name": "", +} + + +_interval_shared_docs[ + "class" +] = """ +%(summary)s + +Parameters +---------- +data : array-like (1-dimensional) + Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing + Interval objects from which to build the %(klass)s. +closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both or + neither. +dtype : dtype or None, default None + If None, dtype will be inferred. +copy : bool, default False + Copy the input data. +%(name)s\ +verify_integrity : bool, default True + Verify that the %(klass)s is valid. + +Attributes +---------- +left +right +closed +mid +length +is_empty +is_non_overlapping_monotonic +%(extra_attributes)s\ + +Methods +------- +from_arrays +from_tuples +from_breaks +contains +overlaps +set_closed +to_tuples +%(extra_methods)s\ + +See Also +-------- +Index : The base pandas Index type. +Interval : A bounded slice-like interval; the elements of an %(klass)s. +interval_range : Function to create a fixed frequency IntervalIndex. +cut : Bin values into discrete Intervals. +qcut : Bin values into equal-sized Intervals based on rank or sample quantiles. + +Notes +----- +See the `user guide +`__ +for more. + +%(examples)s\ +""" + + +@Appender( + _interval_shared_docs["class"] + % { + "klass": "IntervalArray", + "summary": "Pandas array for interval data that are closed on the same side.", + "name": "", + "extra_attributes": "", + "extra_methods": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + A new ``IntervalArray`` can be constructed directly from an array-like of + ``Interval`` objects: + + >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + + It may also be constructed using one of the constructor + methods: :meth:`IntervalArray.from_arrays`, + :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`. + """ + ), + } +) +class IntervalArray(IntervalMixin, ExtensionArray): + can_hold_na = True + _na_value = _fill_value = np.nan + + @property + def ndim(self) -> Literal[1]: + return 1 + + # To make mypy recognize the fields + _left: IntervalSideT + _right: IntervalSideT + _dtype: IntervalDtype + + # --------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + data, + closed: IntervalClosedType | None = None, + dtype: Dtype | None = None, + copy: bool = False, + verify_integrity: bool = True, + ): + data = extract_array(data, extract_numpy=True) + + if isinstance(data, cls): + left: IntervalSideT = data._left + right: IntervalSideT = data._right + closed = closed or data.closed + dtype = IntervalDtype(left.dtype, closed=closed) + else: + # don't allow scalars + if is_scalar(data): + msg = ( + f"{cls.__name__}(...) must be called with a collection " + f"of some kind, {data} was passed" + ) + raise TypeError(msg) + + # might need to convert empty or purely na data + data = _maybe_convert_platform_interval(data) + left, right, infer_closed = intervals_to_interval_bounds( + data, validate_closed=closed is None + ) + if left.dtype == object: + left = lib.maybe_convert_objects(left) + right = lib.maybe_convert_objects(right) + closed = closed or infer_closed + + left, right, dtype = cls._ensure_simple_new_inputs( + left, + right, + closed=closed, + copy=copy, + dtype=dtype, + ) + + if verify_integrity: + cls._validate(left, right, dtype=dtype) + + return cls._simple_new( + left, + right, + dtype=dtype, + ) + + @classmethod + def _simple_new( + cls, + left: IntervalSideT, + right: IntervalSideT, + dtype: IntervalDtype, + ) -> Self: + result = IntervalMixin.__new__(cls) + result._left = left + result._right = right + result._dtype = dtype + + return result + + @classmethod + def _ensure_simple_new_inputs( + cls, + left, + right, + closed: IntervalClosedType | None = None, + copy: bool = False, + dtype: Dtype | None = None, + ) -> tuple[IntervalSideT, IntervalSideT, IntervalDtype]: + """Ensure correctness of input parameters for cls._simple_new.""" + from pandas.core.indexes.base import ensure_index + + left = ensure_index(left, copy=copy) + left = maybe_upcast_numeric_to_64bit(left) + + right = ensure_index(right, copy=copy) + right = maybe_upcast_numeric_to_64bit(right) + + if closed is None and isinstance(dtype, IntervalDtype): + closed = dtype.closed + + closed = closed or "right" + + if dtype is not None: + # GH 19262: dtype must be an IntervalDtype to override inferred + dtype = pandas_dtype(dtype) + if isinstance(dtype, IntervalDtype): + if dtype.subtype is not None: + left = left.astype(dtype.subtype) + right = right.astype(dtype.subtype) + else: + msg = f"dtype must be an IntervalDtype, got {dtype}" + raise TypeError(msg) + + if dtype.closed is None: + # possibly loading an old pickle + dtype = IntervalDtype(dtype.subtype, closed) + elif closed != dtype.closed: + raise ValueError("closed keyword does not match dtype.closed") + + # coerce dtypes to match if needed + if is_float_dtype(left.dtype) and is_integer_dtype(right.dtype): + right = right.astype(left.dtype) + elif is_float_dtype(right.dtype) and is_integer_dtype(left.dtype): + left = left.astype(right.dtype) + + if type(left) != type(right): + msg = ( + f"must not have differing left [{type(left).__name__}] and " + f"right [{type(right).__name__}] types" + ) + raise ValueError(msg) + if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype): + # GH 19016 + msg = ( + "category, object, and string subtypes are not supported " + "for IntervalArray" + ) + raise TypeError(msg) + if isinstance(left, ABCPeriodIndex): + msg = "Period dtypes are not supported, use a PeriodIndex instead" + raise ValueError(msg) + if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz): + msg = ( + "left and right must have the same time zone, got " + f"'{left.tz}' and '{right.tz}'" + ) + raise ValueError(msg) + + # For dt64/td64 we want DatetimeArray/TimedeltaArray instead of ndarray + left = ensure_wrapped_if_datetimelike(left) + left = extract_array(left, extract_numpy=True) + right = ensure_wrapped_if_datetimelike(right) + right = extract_array(right, extract_numpy=True) + + lbase = getattr(left, "_ndarray", left).base + rbase = getattr(right, "_ndarray", right).base + if lbase is not None and lbase is rbase: + # If these share data, then setitem could corrupt our IA + right = right.copy() + + dtype = IntervalDtype(left.dtype, closed=closed) + + return left, right, dtype + + @classmethod + def _from_sequence( + cls, + scalars, + *, + dtype: Dtype | None = None, + copy: bool = False, + ) -> Self: + return cls(scalars, dtype=dtype, copy=copy) + + @classmethod + def _from_factorized(cls, values: np.ndarray, original: IntervalArray) -> Self: + if len(values) == 0: + # An empty array returns object-dtype here. We can't create + # a new IA from an (empty) object-dtype array, so turn it into the + # correct dtype. + values = values.astype(original.dtype.subtype) + return cls(values, closed=original.closed) + + _interval_shared_docs["from_breaks"] = textwrap.dedent( + """ + Construct an %(klass)s from an array of splits. + + Parameters + ---------- + breaks : array-like (1-dimensional) + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither.\ + %(name)s + copy : bool, default False + Copy the data. + dtype : dtype or None, default None + If None, dtype will be inferred. + + Returns + ------- + %(klass)s + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_arrays : Construct from a left and right array. + %(klass)s.from_tuples : Construct from a sequence of tuples. + + %(examples)s\ + """ + ) + + @classmethod + @Appender( + _interval_shared_docs["from_breaks"] + % { + "klass": "IntervalArray", + "name": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3]) + + [(0, 1], (1, 2], (2, 3]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def from_breaks( + cls, + breaks, + closed: IntervalClosedType | None = "right", + copy: bool = False, + dtype: Dtype | None = None, + ) -> Self: + breaks = _maybe_convert_platform_interval(breaks) + + return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype) + + _interval_shared_docs["from_arrays"] = textwrap.dedent( + """ + Construct from two arrays defining the left and right bounds. + + Parameters + ---------- + left : array-like (1-dimensional) + Left bounds for each interval. + right : array-like (1-dimensional) + Right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither.\ + %(name)s + copy : bool, default False + Copy the data. + dtype : dtype, optional + If None, dtype will be inferred. + + Returns + ------- + %(klass)s + + Raises + ------ + ValueError + When a value is missing in only one of `left` or `right`. + When a value in `left` is greater than the corresponding value + in `right`. + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits. + %(klass)s.from_tuples : Construct an %(klass)s from an + array-like of tuples. + + Notes + ----- + Each element of `left` must be less than or equal to the `right` + element at the same position. If an element is missing, it must be + missing in both `left` and `right`. A TypeError is raised when + using an unsupported type for `left` or `right`. At the moment, + 'category', 'object', and 'string' subtypes are not supported. + + %(examples)s\ + """ + ) + + @classmethod + @Appender( + _interval_shared_docs["from_arrays"] + % { + "klass": "IntervalArray", + "name": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3]) + + [(0, 1], (1, 2], (2, 3]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def from_arrays( + cls, + left, + right, + closed: IntervalClosedType | None = "right", + copy: bool = False, + dtype: Dtype | None = None, + ) -> Self: + left = _maybe_convert_platform_interval(left) + right = _maybe_convert_platform_interval(right) + + left, right, dtype = cls._ensure_simple_new_inputs( + left, + right, + closed=closed, + copy=copy, + dtype=dtype, + ) + cls._validate(left, right, dtype=dtype) + + return cls._simple_new(left, right, dtype=dtype) + + _interval_shared_docs["from_tuples"] = textwrap.dedent( + """ + Construct an %(klass)s from an array-like of tuples. + + Parameters + ---------- + data : array-like (1-dimensional) + Array of tuples. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither.\ + %(name)s + copy : bool, default False + By-default copy the data, this is compat only and ignored. + dtype : dtype or None, default None + If None, dtype will be inferred. + + Returns + ------- + %(klass)s + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_arrays : Construct an %(klass)s from a left and + right array. + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits. + + %(examples)s\ + """ + ) + + @classmethod + @Appender( + _interval_shared_docs["from_tuples"] + % { + "klass": "IntervalArray", + "name": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) + + [(0, 1], (1, 2]] + Length: 2, dtype: interval[int64, right] + """ + ), + } + ) + def from_tuples( + cls, + data, + closed: IntervalClosedType | None = "right", + copy: bool = False, + dtype: Dtype | None = None, + ) -> Self: + if len(data): + left, right = [], [] + else: + # ensure that empty data keeps input dtype + left = right = data + + for d in data: + if not isinstance(d, tuple) and isna(d): + lhs = rhs = np.nan + else: + name = cls.__name__ + try: + # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] + lhs, rhs = d + except ValueError as err: + msg = f"{name}.from_tuples requires tuples of length 2, got {d}" + raise ValueError(msg) from err + except TypeError as err: + msg = f"{name}.from_tuples received an invalid item, {d}" + raise TypeError(msg) from err + left.append(lhs) + right.append(rhs) + + return cls.from_arrays(left, right, closed, copy=False, dtype=dtype) + + @classmethod + def _validate(cls, left, right, dtype: IntervalDtype) -> None: + """ + Verify that the IntervalArray is valid. + + Checks that + + * dtype is correct + * left and right match lengths + * left and right have the same missing values + * left is always below right + """ + if not isinstance(dtype, IntervalDtype): + msg = f"invalid dtype: {dtype}" + raise ValueError(msg) + if len(left) != len(right): + msg = "left and right must have the same length" + raise ValueError(msg) + left_mask = notna(left) + right_mask = notna(right) + if not (left_mask == right_mask).all(): + msg = ( + "missing values must be missing in the same " + "location both left and right sides" + ) + raise ValueError(msg) + if not (left[left_mask] <= right[left_mask]).all(): + msg = "left side of interval must be <= right side" + raise ValueError(msg) + + def _shallow_copy(self, left, right) -> Self: + """ + Return a new IntervalArray with the replacement attributes + + Parameters + ---------- + left : Index + Values to be used for the left-side of the intervals. + right : Index + Values to be used for the right-side of the intervals. + """ + dtype = IntervalDtype(left.dtype, closed=self.closed) + left, right, dtype = self._ensure_simple_new_inputs(left, right, dtype=dtype) + + return self._simple_new(left, right, dtype=dtype) + + # --------------------------------------------------------------------- + # Descriptive + + @property + def dtype(self) -> IntervalDtype: + return self._dtype + + @property + def nbytes(self) -> int: + return self.left.nbytes + self.right.nbytes + + @property + def size(self) -> int: + # Avoid materializing self.values + return self.left.size + + # --------------------------------------------------------------------- + # EA Interface + + def __iter__(self) -> Iterator: + return iter(np.asarray(self)) + + def __len__(self) -> int: + return len(self._left) + + @overload + def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: + ... + + @overload + def __getitem__(self, key: SequenceIndexer) -> Self: + ... + + def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA: + key = check_array_indexer(self, key) + left = self._left[key] + right = self._right[key] + + if not isinstance(left, (np.ndarray, ExtensionArray)): + # scalar + if is_scalar(left) and isna(left): + return self._fill_value + return Interval(left, right, self.closed) + if np.ndim(left) > 1: + # GH#30588 multi-dimensional indexer disallowed + raise ValueError("multi-dimensional indexing not allowed") + # Argument 2 to "_simple_new" of "IntervalArray" has incompatible type + # "Union[Period, Timestamp, Timedelta, NaTType, DatetimeArray, TimedeltaArray, + # ndarray[Any, Any]]"; expected "Union[Union[DatetimeArray, TimedeltaArray], + # ndarray[Any, Any]]" + return self._simple_new(left, right, dtype=self.dtype) # type: ignore[arg-type] + + def __setitem__(self, key, value) -> None: + value_left, value_right = self._validate_setitem_value(value) + key = check_array_indexer(self, key) + + self._left[key] = value_left + self._right[key] = value_right + + def _cmp_method(self, other, op): + # ensure pandas array for list-like and eliminate non-interval scalars + if is_list_like(other): + if len(self) != len(other): + raise ValueError("Lengths must match to compare") + other = pd_array(other) + elif not isinstance(other, Interval): + # non-interval scalar -> no matches + if other is NA: + # GH#31882 + from pandas.core.arrays import BooleanArray + + arr = np.empty(self.shape, dtype=bool) + mask = np.ones(self.shape, dtype=bool) + return BooleanArray(arr, mask) + return invalid_comparison(self, other, op) + + # determine the dtype of the elements we want to compare + if isinstance(other, Interval): + other_dtype = pandas_dtype("interval") + elif not isinstance(other.dtype, CategoricalDtype): + other_dtype = other.dtype + else: + # for categorical defer to categories for dtype + other_dtype = other.categories.dtype + + # extract intervals if we have interval categories with matching closed + if isinstance(other_dtype, IntervalDtype): + if self.closed != other.categories.closed: + return invalid_comparison(self, other, op) + + other = other.categories.take( + other.codes, allow_fill=True, fill_value=other.categories._na_value + ) + + # interval-like -> need same closed and matching endpoints + if isinstance(other_dtype, IntervalDtype): + if self.closed != other.closed: + return invalid_comparison(self, other, op) + elif not isinstance(other, Interval): + other = type(self)(other) + + if op is operator.eq: + return (self._left == other.left) & (self._right == other.right) + elif op is operator.ne: + return (self._left != other.left) | (self._right != other.right) + elif op is operator.gt: + return (self._left > other.left) | ( + (self._left == other.left) & (self._right > other.right) + ) + elif op is operator.ge: + return (self == other) | (self > other) + elif op is operator.lt: + return (self._left < other.left) | ( + (self._left == other.left) & (self._right < other.right) + ) + else: + # operator.lt + return (self == other) | (self < other) + + # non-interval/non-object dtype -> no matches + if not is_object_dtype(other_dtype): + return invalid_comparison(self, other, op) + + # object dtype -> iteratively check for intervals + result = np.zeros(len(self), dtype=bool) + for i, obj in enumerate(other): + try: + result[i] = op(self[i], obj) + except TypeError: + if obj is NA: + # comparison with np.nan returns NA + # github.com/pandas-dev/pandas/pull/37124#discussion_r509095092 + result = result.astype(object) + result[i] = NA + else: + raise + return result + + @unpack_zerodim_and_defer("__eq__") + def __eq__(self, other): + return self._cmp_method(other, operator.eq) + + @unpack_zerodim_and_defer("__ne__") + def __ne__(self, other): + return self._cmp_method(other, operator.ne) + + @unpack_zerodim_and_defer("__gt__") + def __gt__(self, other): + return self._cmp_method(other, operator.gt) + + @unpack_zerodim_and_defer("__ge__") + def __ge__(self, other): + return self._cmp_method(other, operator.ge) + + @unpack_zerodim_and_defer("__lt__") + def __lt__(self, other): + return self._cmp_method(other, operator.lt) + + @unpack_zerodim_and_defer("__le__") + def __le__(self, other): + return self._cmp_method(other, operator.le) + + def argsort( + self, + *, + ascending: bool = True, + kind: SortKind = "quicksort", + na_position: str = "last", + **kwargs, + ) -> np.ndarray: + ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) + + if ascending and kind == "quicksort" and na_position == "last": + # TODO: in an IntervalIndex we can re-use the cached + # IntervalTree.left_sorter + return np.lexsort((self.right, self.left)) + + # TODO: other cases we can use lexsort for? much more performant. + return super().argsort( + ascending=ascending, kind=kind, na_position=na_position, **kwargs + ) + + def min(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA: + nv.validate_minmax_axis(axis, self.ndim) + + if not len(self): + return self._na_value + + mask = self.isna() + if mask.any(): + if not skipna: + return self._na_value + obj = self[~mask] + else: + obj = self + + indexer = obj.argsort()[0] + return obj[indexer] + + def max(self, *, axis: AxisInt | None = None, skipna: bool = True) -> IntervalOrNA: + nv.validate_minmax_axis(axis, self.ndim) + + if not len(self): + return self._na_value + + mask = self.isna() + if mask.any(): + if not skipna: + return self._na_value + obj = self[~mask] + else: + obj = self + + indexer = obj.argsort()[-1] + return obj[indexer] + + def _pad_or_backfill( # pylint: disable=useless-parent-delegation + self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True + ) -> Self: + # TODO(3.0): after EA.fillna 'method' deprecation is enforced, we can remove + # this method entirely. + return super()._pad_or_backfill(method=method, limit=limit, copy=copy) + + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + """ + Fill NA/NaN values using the specified method. + + Parameters + ---------- + value : scalar, dict, Series + If a scalar value is passed it is used to fill all missing values. + Alternatively, a Series or dict can be used to fill in different + values for each index. The value should not be a list. The + value(s) passed should be either Interval objects or NA/NaN. + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + (Not implemented yet for IntervalArray) + Method to use for filling holes in reindexed Series + limit : int, default None + (Not implemented yet for IntervalArray) + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + copy : bool, default True + Whether to make a copy of the data before filling. If False, then + the original should be modified and no new memory should be allocated. + For ExtensionArray subclasses that cannot do this, it is at the + author's discretion whether to ignore "copy=False" or to raise. + + Returns + ------- + filled : IntervalArray with NA/NaN filled + """ + if copy is False: + raise NotImplementedError + if method is not None: + return super().fillna(value=value, method=method, limit=limit) + + value_left, value_right = self._validate_scalar(value) + + left = self.left.fillna(value=value_left) + right = self.right.fillna(value=value_right) + return self._shallow_copy(left, right) + + def astype(self, dtype, copy: bool = True): + """ + Cast to an ExtensionArray or NumPy array with dtype 'dtype'. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + + copy : bool, default True + Whether to copy the data, even if not necessary. If False, + a copy is made only if the old dtype does not match the + new dtype. + + Returns + ------- + array : ExtensionArray or ndarray + ExtensionArray or NumPy ndarray with 'dtype' for its dtype. + """ + from pandas import Index + + if dtype is not None: + dtype = pandas_dtype(dtype) + + if isinstance(dtype, IntervalDtype): + if dtype == self.dtype: + return self.copy() if copy else self + + if is_float_dtype(self.dtype.subtype) and needs_i8_conversion( + dtype.subtype + ): + # This is allowed on the Index.astype but we disallow it here + msg = ( + f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" + ) + raise TypeError(msg) + + # need to cast to different subtype + try: + # We need to use Index rules for astype to prevent casting + # np.nan entries to int subtypes + new_left = Index(self._left, copy=False).astype(dtype.subtype) + new_right = Index(self._right, copy=False).astype(dtype.subtype) + except IntCastingNaNError: + # e.g test_subtype_integer + raise + except (TypeError, ValueError) as err: + # e.g. test_subtype_integer_errors f8->u8 can be lossy + # and raises ValueError + msg = ( + f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" + ) + raise TypeError(msg) from err + return self._shallow_copy(new_left, new_right) + else: + try: + return super().astype(dtype, copy=copy) + except (TypeError, ValueError) as err: + msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" + raise TypeError(msg) from err + + def equals(self, other) -> bool: + if type(self) != type(other): + return False + + return bool( + self.closed == other.closed + and self.left.equals(other.left) + and self.right.equals(other.right) + ) + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self: + """ + Concatenate multiple IntervalArray + + Parameters + ---------- + to_concat : sequence of IntervalArray + + Returns + ------- + IntervalArray + """ + closed_set = {interval.closed for interval in to_concat} + if len(closed_set) != 1: + raise ValueError("Intervals must all be closed on the same side.") + closed = closed_set.pop() + + left = np.concatenate([interval.left for interval in to_concat]) + right = np.concatenate([interval.right for interval in to_concat]) + + left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed) + + return cls._simple_new(left, right, dtype=dtype) + + def copy(self) -> Self: + """ + Return a copy of the array. + + Returns + ------- + IntervalArray + """ + left = self._left.copy() + right = self._right.copy() + dtype = self.dtype + return self._simple_new(left, right, dtype=dtype) + + def isna(self) -> np.ndarray: + return isna(self._left) + + def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray: + if not len(self) or periods == 0: + return self.copy() + + self._validate_scalar(fill_value) + + # ExtensionArray.shift doesn't work for two reasons + # 1. IntervalArray.dtype.na_value may not be correct for the dtype. + # 2. IntervalArray._from_sequence only accepts NaN for missing values, + # not other values like NaT + + empty_len = min(abs(periods), len(self)) + if isna(fill_value): + from pandas import Index + + fill_value = Index(self._left, copy=False)._na_value + empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1)) + else: + empty = self._from_sequence([fill_value] * empty_len) + + if periods > 0: + a = empty + b = self[:-periods] + else: + a = self[abs(periods) :] + b = empty + return self._concat_same_type([a, b]) + + def take( + self, + indices, + *, + allow_fill: bool = False, + fill_value=None, + axis=None, + **kwargs, + ) -> Self: + """ + Take elements from the IntervalArray. + + Parameters + ---------- + indices : sequence of integers + Indices to be taken. + + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : Interval or NA, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + axis : any, default None + Present for compat with IntervalIndex; does nothing. + + Returns + ------- + IntervalArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + """ + nv.validate_take((), kwargs) + + fill_left = fill_right = fill_value + if allow_fill: + fill_left, fill_right = self._validate_scalar(fill_value) + + left_take = take( + self._left, indices, allow_fill=allow_fill, fill_value=fill_left + ) + right_take = take( + self._right, indices, allow_fill=allow_fill, fill_value=fill_right + ) + + return self._shallow_copy(left_take, right_take) + + def _validate_listlike(self, value): + # list-like of intervals + try: + array = IntervalArray(value) + self._check_closed_matches(array, name="value") + value_left, value_right = array.left, array.right + except TypeError as err: + # wrong type: not interval or NA + msg = f"'value' should be an interval type, got {type(value)} instead." + raise TypeError(msg) from err + + try: + self.left._validate_fill_value(value_left) + except (LossySetitemError, TypeError) as err: + msg = ( + "'value' should be a compatible interval type, " + f"got {type(value)} instead." + ) + raise TypeError(msg) from err + + return value_left, value_right + + def _validate_scalar(self, value): + if isinstance(value, Interval): + self._check_closed_matches(value, name="value") + left, right = value.left, value.right + # TODO: check subdtype match like _validate_setitem_value? + elif is_valid_na_for_dtype(value, self.left.dtype): + # GH#18295 + left = right = self.left._na_value + else: + raise TypeError( + "can only insert Interval objects and NA into an IntervalArray" + ) + return left, right + + def _validate_setitem_value(self, value): + if is_valid_na_for_dtype(value, self.left.dtype): + # na value: need special casing to set directly on numpy arrays + value = self.left._na_value + if is_integer_dtype(self.dtype.subtype): + # can't set NaN on a numpy integer array + # GH#45484 TypeError, not ValueError, matches what we get with + # non-NA un-holdable value. + raise TypeError("Cannot set float NaN to integer-backed IntervalArray") + value_left, value_right = value, value + + elif isinstance(value, Interval): + # scalar interval + self._check_closed_matches(value, name="value") + value_left, value_right = value.left, value.right + self.left._validate_fill_value(value_left) + self.left._validate_fill_value(value_right) + + else: + return self._validate_listlike(value) + + return value_left, value_right + + def value_counts(self, dropna: bool = True) -> Series: + """ + Returns a Series containing counts of each interval. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NaN. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + # TODO: implement this is a non-naive way! + return value_counts(np.asarray(self), dropna=dropna) + + # --------------------------------------------------------------------- + # Rendering Methods + + def _format_data(self) -> str: + # TODO: integrate with categorical and make generic + # name argument is unused here; just for compat with base / categorical + n = len(self) + max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10) + + formatter = str + + if n == 0: + summary = "[]" + elif n == 1: + first = formatter(self[0]) + summary = f"[{first}]" + elif n == 2: + first = formatter(self[0]) + last = formatter(self[-1]) + summary = f"[{first}, {last}]" + else: + if n > max_seq_items: + n = min(max_seq_items // 2, 10) + head = [formatter(x) for x in self[:n]] + tail = [formatter(x) for x in self[-n:]] + head_str = ", ".join(head) + tail_str = ", ".join(tail) + summary = f"[{head_str} ... {tail_str}]" + else: + tail = [formatter(x) for x in self] + tail_str = ", ".join(tail) + summary = f"[{tail_str}]" + + return summary + + def __repr__(self) -> str: + # the short repr has no trailing newline, while the truncated + # repr does. So we include a newline in our template, and strip + # any trailing newlines from format_object_summary + data = self._format_data() + class_name = f"<{type(self).__name__}>\n" + + template = f"{class_name}{data}\nLength: {len(self)}, dtype: {self.dtype}" + return template + + def _format_space(self) -> str: + space = " " * (len(type(self).__name__) + 1) + return f"\n{space}" + + # --------------------------------------------------------------------- + # Vectorized Interval Properties/Attributes + + @property + def left(self): + """ + Return the left endpoints of each Interval in the IntervalArray as an Index. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) + >>> interv_arr + + [(0, 1], (2, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.left + Index([0, 2], dtype='int64') + """ + from pandas import Index + + return Index(self._left, copy=False) + + @property + def right(self): + """ + Return the right endpoints of each Interval in the IntervalArray as an Index. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(2, 5)]) + >>> interv_arr + + [(0, 1], (2, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.right + Index([1, 5], dtype='int64') + """ + from pandas import Index + + return Index(self._right, copy=False) + + @property + def length(self) -> Index: + """ + Return an Index with entries denoting the length of each Interval. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.length + Index([1, 4], dtype='int64') + """ + return self.right - self.left + + @property + def mid(self) -> Index: + """ + Return the midpoint of each Interval in the IntervalArray as an Index. + + Examples + -------- + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.mid + Index([0.5, 3.0], dtype='float64') + """ + try: + return 0.5 * (self.left + self.right) + except TypeError: + # datetime safe version + return self.left + 0.5 * self.length + + _interval_shared_docs["overlaps"] = textwrap.dedent( + """ + Check elementwise if an Interval overlaps the values in the %(klass)s. + + Two intervals overlap if they share a common point, including closed + endpoints. Intervals that only have an open endpoint in common do not + overlap. + + Parameters + ---------- + other : %(klass)s + Interval to check against for an overlap. + + Returns + ------- + ndarray + Boolean array positionally indicating where an overlap occurs. + + See Also + -------- + Interval.overlaps : Check whether two Interval objects overlap. + + Examples + -------- + %(examples)s + >>> intervals.overlaps(pd.Interval(0.5, 1.5)) + array([ True, True, False]) + + Intervals that share closed endpoints overlap: + + >>> intervals.overlaps(pd.Interval(1, 3, closed='left')) + array([ True, True, True]) + + Intervals that only have an open endpoint in common do not overlap: + + >>> intervals.overlaps(pd.Interval(1, 2, closed='right')) + array([False, True, False]) + """ + ) + + @Appender( + _interval_shared_docs["overlaps"] + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( + """\ + >>> data = [(0, 1), (1, 3), (2, 4)] + >>> intervals = pd.arrays.IntervalArray.from_tuples(data) + >>> intervals + + [(0, 1], (1, 3], (2, 4]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def overlaps(self, other): + if isinstance(other, (IntervalArray, ABCIntervalIndex)): + raise NotImplementedError + if not isinstance(other, Interval): + msg = f"`other` must be Interval-like, got {type(other).__name__}" + raise TypeError(msg) + + # equality is okay if both endpoints are closed (overlap at a point) + op1 = le if (self.closed_left and other.closed_right) else lt + op2 = le if (other.closed_left and self.closed_right) else lt + + # overlaps is equivalent negation of two interval being disjoint: + # disjoint = (A.left > B.right) or (B.left > A.right) + # (simplifying the negation allows this to be done in less operations) + return op1(self.left, other.right) & op2(other.left, self.right) + + # --------------------------------------------------------------------- + + @property + def closed(self) -> IntervalClosedType: + """ + String describing the inclusive side the intervals. + + Either ``left``, ``right``, ``both`` or ``neither``. + + Examples + -------- + + For arrays: + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.closed + 'right' + + For Interval Index: + + >>> interv_idx = pd.interval_range(start=0, end=2) + >>> interv_idx + IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') + >>> interv_idx.closed + 'right' + """ + return self.dtype.closed + + _interval_shared_docs["set_closed"] = textwrap.dedent( + """ + Return an identical %(klass)s closed on the specified side. + + Parameters + ---------- + closed : {'left', 'right', 'both', 'neither'} + Whether the intervals are closed on the left-side, right-side, both + or neither. + + Returns + ------- + %(klass)s + + %(examples)s\ + """ + ) + + @Appender( + _interval_shared_docs["set_closed"] + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> index = pd.arrays.IntervalArray.from_breaks(range(4)) + >>> index + + [(0, 1], (1, 2], (2, 3]] + Length: 3, dtype: interval[int64, right] + >>> index.set_closed('both') + + [[0, 1], [1, 2], [2, 3]] + Length: 3, dtype: interval[int64, both] + """ + ), + } + ) + def set_closed(self, closed: IntervalClosedType) -> Self: + if closed not in VALID_CLOSED: + msg = f"invalid option for 'closed': {closed}" + raise ValueError(msg) + + left, right = self._left, self._right + dtype = IntervalDtype(left.dtype, closed=closed) + return self._simple_new(left, right, dtype=dtype) + + _interval_shared_docs[ + "is_non_overlapping_monotonic" + ] = """ + Return a boolean whether the %(klass)s is non-overlapping and monotonic. + + Non-overlapping means (no Intervals share points), and monotonic means + either monotonic increasing or monotonic decreasing. + + Examples + -------- + For arrays: + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> interv_arr + + [(0, 1], (1, 5]] + Length: 2, dtype: interval[int64, right] + >>> interv_arr.is_non_overlapping_monotonic + True + + >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), + ... pd.Interval(-1, 0.1)]) + >>> interv_arr + + [(0.0, 1.0], (-1.0, 0.1]] + Length: 2, dtype: interval[float64, right] + >>> interv_arr.is_non_overlapping_monotonic + False + + For Interval Index: + + >>> interv_idx = pd.interval_range(start=0, end=2) + >>> interv_idx + IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') + >>> interv_idx.is_non_overlapping_monotonic + True + + >>> interv_idx = pd.interval_range(start=0, end=2, closed='both') + >>> interv_idx + IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]') + >>> interv_idx.is_non_overlapping_monotonic + False + """ + + @property + @Appender( + _interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs + ) + def is_non_overlapping_monotonic(self) -> bool: + # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) + # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) + # we already require left <= right + + # strict inequality for closed == 'both'; equality implies overlapping + # at a point when both sides of intervals are included + if self.closed == "both": + return bool( + (self._right[:-1] < self._left[1:]).all() + or (self._left[:-1] > self._right[1:]).all() + ) + + # non-strict inequality when closed != 'both'; at least one side is + # not included in the intervals, so equality does not imply overlapping + return bool( + (self._right[:-1] <= self._left[1:]).all() + or (self._left[:-1] >= self._right[1:]).all() + ) + + # --------------------------------------------------------------------- + # Conversion + + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + """ + Return the IntervalArray's data as a numpy array of Interval + objects (with dtype='object') + """ + left = self._left + right = self._right + mask = self.isna() + closed = self.closed + + result = np.empty(len(left), dtype=object) + for i, left_value in enumerate(left): + if mask[i]: + result[i] = np.nan + else: + result[i] = Interval(left_value, right[i], closed) + return result + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + try: + subtype = pyarrow.from_numpy_dtype(self.dtype.subtype) + except TypeError as err: + raise TypeError( + f"Conversion to arrow with subtype '{self.dtype.subtype}' " + "is not supported" + ) from err + interval_type = ArrowIntervalType(subtype, self.closed) + storage_array = pyarrow.StructArray.from_arrays( + [ + pyarrow.array(self._left, type=subtype, from_pandas=True), + pyarrow.array(self._right, type=subtype, from_pandas=True), + ], + names=["left", "right"], + ) + mask = self.isna() + if mask.any(): + # if there are missing values, set validity bitmap also on the array level + null_bitmap = pyarrow.array(~mask).buffers()[1] + storage_array = pyarrow.StructArray.from_buffers( + storage_array.type, + len(storage_array), + [null_bitmap], + children=[storage_array.field(0), storage_array.field(1)], + ) + + if type is not None: + if type.equals(interval_type.storage_type): + return storage_array + elif isinstance(type, ArrowIntervalType): + # ensure we have the same subtype and closed attributes + if not type.equals(interval_type): + raise TypeError( + "Not supported to convert IntervalArray to type with " + f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) " + f"and 'closed' ({self.closed} vs {type.closed}) attributes" + ) + else: + raise TypeError( + f"Not supported to convert IntervalArray to '{type}' type" + ) + + return pyarrow.ExtensionArray.from_storage(interval_type, storage_array) + + _interval_shared_docs["to_tuples"] = textwrap.dedent( + """ + Return an %(return_type)s of tuples of the form (left, right). + + Parameters + ---------- + na_tuple : bool, default True + If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``, + just return ``NA`` as ``nan``. + + Returns + ------- + tuples: %(return_type)s + %(examples)s\ + """ + ) + + @Appender( + _interval_shared_docs["to_tuples"] + % { + "return_type": ( + "ndarray (if self is IntervalArray) or Index (if self is IntervalIndex)" + ), + "examples": textwrap.dedent( + """\ + + Examples + -------- + For :class:`pandas.IntervalArray`: + + >>> idx = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) + >>> idx + + [(0, 1], (1, 2]] + Length: 2, dtype: interval[int64, right] + >>> idx.to_tuples() + array([(0, 1), (1, 2)], dtype=object) + + For :class:`pandas.IntervalIndex`: + + >>> idx = pd.interval_range(start=0, end=2) + >>> idx + IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') + >>> idx.to_tuples() + Index([(0, 1), (1, 2)], dtype='object') + """ + ), + } + ) + def to_tuples(self, na_tuple: bool = True) -> np.ndarray: + tuples = com.asarray_tuplesafe(zip(self._left, self._right)) + if not na_tuple: + # GH 18756 + tuples = np.where(~self.isna(), tuples, np.nan) + return tuples + + # --------------------------------------------------------------------- + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + value_left, value_right = self._validate_setitem_value(value) + + if isinstance(self._left, np.ndarray): + np.putmask(self._left, mask, value_left) + assert isinstance(self._right, np.ndarray) + np.putmask(self._right, mask, value_right) + else: + self._left._putmask(mask, value_left) + assert not isinstance(self._right, np.ndarray) + self._right._putmask(mask, value_right) + + def insert(self, loc: int, item: Interval) -> Self: + """ + Return a new IntervalArray inserting new item at location. Follows + Python numpy.insert semantics for negative values. Only Interval + objects and NA can be inserted into an IntervalIndex + + Parameters + ---------- + loc : int + item : Interval + + Returns + ------- + IntervalArray + """ + left_insert, right_insert = self._validate_scalar(item) + + new_left = self.left.insert(loc, left_insert) + new_right = self.right.insert(loc, right_insert) + + return self._shallow_copy(new_left, new_right) + + def delete(self, loc) -> Self: + if isinstance(self._left, np.ndarray): + new_left = np.delete(self._left, loc) + assert isinstance(self._right, np.ndarray) + new_right = np.delete(self._right, loc) + else: + new_left = self._left.delete(loc) + assert not isinstance(self._right, np.ndarray) + new_right = self._right.delete(loc) + return self._shallow_copy(left=new_left, right=new_right) + + @Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs) + def repeat( + self, + repeats: int | Sequence[int], + axis: AxisInt | None = None, + ) -> Self: + nv.validate_repeat((), {"axis": axis}) + left_repeat = self.left.repeat(repeats) + right_repeat = self.right.repeat(repeats) + return self._shallow_copy(left=left_repeat, right=right_repeat) + + _interval_shared_docs["contains"] = textwrap.dedent( + """ + Check elementwise if the Intervals contain the value. + + Return a boolean mask whether the value is contained in the Intervals + of the %(klass)s. + + Parameters + ---------- + other : scalar + The value to check whether it is contained in the Intervals. + + Returns + ------- + boolean array + + See Also + -------- + Interval.contains : Check whether Interval object contains value. + %(klass)s.overlaps : Check if an Interval overlaps the values in the + %(klass)s. + + Examples + -------- + %(examples)s + >>> intervals.contains(0.5) + array([ True, False, False]) + """ + ) + + @Appender( + _interval_shared_docs["contains"] + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( + """\ + >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)]) + >>> intervals + + [(0, 1], (1, 3], (2, 4]] + Length: 3, dtype: interval[int64, right] + """ + ), + } + ) + def contains(self, other): + if isinstance(other, Interval): + raise NotImplementedError("contains not implemented for two intervals") + + return (self._left < other if self.open_left else self._left <= other) & ( + other < self._right if self.open_right else other <= self._right + ) + + def isin(self, values) -> npt.NDArray[np.bool_]: + if not hasattr(values, "dtype"): + values = np.array(values) + values = extract_array(values, extract_numpy=True) + + if isinstance(values.dtype, IntervalDtype): + if self.closed != values.closed: + # not comparable -> no overlap + return np.zeros(self.shape, dtype=bool) + + if self.dtype == values.dtype: + # GH#38353 instead of casting to object, operating on a + # complex128 ndarray is much more performant. + left = self._combined.view("complex128") + right = values._combined.view("complex128") + # error: Argument 1 to "isin" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any], + # ndarray[Any, dtype[Any]]]"; expected + # "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], bool, + # int, float, complex, str, bytes, _NestedSequence[ + # Union[bool, int, float, complex, str, bytes]]]" + return np.isin(left, right).ravel() # type: ignore[arg-type] + + elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion( + values.left.dtype + ): + # not comparable -> no overlap + return np.zeros(self.shape, dtype=bool) + + return isin(self.astype(object), values.astype(object)) + + @property + def _combined(self) -> IntervalSideT: + left = self.left._values.reshape(-1, 1) + right = self.right._values.reshape(-1, 1) + if needs_i8_conversion(left.dtype): + comb = left._concat_same_type([left, right], axis=1) + else: + comb = np.concatenate([left, right], axis=1) + return comb + + def _from_combined(self, combined: np.ndarray) -> IntervalArray: + """ + Create a new IntervalArray with our dtype from a 1D complex128 ndarray. + """ + nc = combined.view("i8").reshape(-1, 2) + + dtype = self._left.dtype + if needs_i8_conversion(dtype): + assert isinstance(self._left, (DatetimeArray, TimedeltaArray)) + new_left = type(self._left)._from_sequence(nc[:, 0], dtype=dtype) + assert isinstance(self._right, (DatetimeArray, TimedeltaArray)) + new_right = type(self._right)._from_sequence(nc[:, 1], dtype=dtype) + else: + assert isinstance(dtype, np.dtype) + new_left = nc[:, 0].view(dtype) + new_right = nc[:, 1].view(dtype) + return self._shallow_copy(left=new_left, right=new_right) + + def unique(self) -> IntervalArray: + # No overload variant of "__getitem__" of "ExtensionArray" matches argument + # type "Tuple[slice, int]" + nc = unique( + self._combined.view("complex128")[:, 0] # type: ignore[call-overload] + ) + nc = nc[:, None] + return self._from_combined(nc) + + +def _maybe_convert_platform_interval(values) -> ArrayLike: + """ + Try to do platform conversion, with special casing for IntervalArray. + Wrapper around maybe_convert_platform that alters the default return + dtype in certain cases to be compatible with IntervalArray. For example, + empty lists return with integer dtype instead of object dtype, which is + prohibited for IntervalArray. + + Parameters + ---------- + values : array-like + + Returns + ------- + array + """ + if isinstance(values, (list, tuple)) and len(values) == 0: + # GH 19016 + # empty lists/tuples get object dtype by default, but this is + # prohibited for IntervalArray, so coerce to integer instead + return np.array([], dtype=np.int64) + elif not is_list_like(values) or isinstance(values, ABCDataFrame): + # This will raise later, but we avoid passing to maybe_convert_platform + return values + elif isinstance(getattr(values, "dtype", None), CategoricalDtype): + values = np.asarray(values) + elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)): + # TODO: should we just cast these to list? + return values + else: + values = extract_array(values, extract_numpy=True) + + if not hasattr(values, "dtype"): + values = np.asarray(values) + if values.dtype.kind in "iu" and values.dtype != np.int64: + values = values.astype(np.int64) + return values diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/masked.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/masked.py new file mode 100644 index 00000000..2cf28c28 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/masked.py @@ -0,0 +1,1531 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas._libs.tslibs import ( + get_unit_from_dtype, + is_supported_unit, +) +from pandas._typing import ( + ArrayLike, + AstypeArg, + AxisInt, + DtypeObj, + FillnaOptions, + NpDtype, + PositionalIndexer, + Scalar, + ScalarIndexer, + Self, + SequenceIndexer, + Shape, + npt, +) +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._validators import validate_fillna_kwargs + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.common import ( + is_bool, + is_integer_dtype, + is_list_like, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import BaseMaskedDtype +from pandas.core.dtypes.missing import ( + array_equivalent, + is_valid_na_for_dtype, + isna, + notna, +) + +from pandas.core import ( + algorithms as algos, + arraylike, + missing, + nanops, + ops, +) +from pandas.core.algorithms import ( + factorize_array, + isin, + take, +) +from pandas.core.array_algos import ( + masked_accumulations, + masked_reductions, +) +from pandas.core.array_algos.quantile import quantile_with_mask +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays.base import ExtensionArray +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import check_array_indexer +from pandas.core.ops import invalid_comparison + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Sequence, + ) + from pandas import Series + from pandas.core.arrays import BooleanArray + from pandas._typing import ( + NumpySorter, + NumpyValueArrayLike, + ) + +from pandas.compat.numpy import function as nv + + +class BaseMaskedArray(OpsMixin, ExtensionArray): + """ + Base class for masked arrays (which use _data and _mask to store the data). + + numpy based + """ + + # The value used to fill '_data' to avoid upcasting + _internal_fill_value: Scalar + # our underlying data and mask are each ndarrays + _data: np.ndarray + _mask: npt.NDArray[np.bool_] + + # Fill values used for any/all + _truthy_value = Scalar # bool(_truthy_value) = True + _falsey_value = Scalar # bool(_falsey_value) = False + + @classmethod + def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self: + result = BaseMaskedArray.__new__(cls) + result._data = values + result._mask = mask + return result + + def __init__( + self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False + ) -> None: + # values is supposed to already be validated in the subclass + if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_): + raise TypeError( + "mask should be boolean numpy array. Use " + "the 'pd.array' function instead" + ) + if values.shape != mask.shape: + raise ValueError("values.shape must match mask.shape") + + if copy: + values = values.copy() + mask = mask.copy() + + self._data = values + self._mask = mask + + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self: + values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy) + return cls(values, mask) + + @classmethod + @doc(ExtensionArray._empty) + def _empty(cls, shape: Shape, dtype: ExtensionDtype): + values = np.empty(shape, dtype=dtype.type) + values.fill(cls._internal_fill_value) + mask = np.ones(shape, dtype=bool) + result = cls(values, mask) + if not isinstance(result, cls) or dtype != result.dtype: + raise NotImplementedError( + f"Default 'empty' implementation is invalid for dtype='{dtype}'" + ) + return result + + def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]: + # NEP 51: https://github.com/numpy/numpy/pull/22449 + return str + + @property + def dtype(self) -> BaseMaskedDtype: + raise AbstractMethodError(self) + + @overload + def __getitem__(self, item: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__(self, item: SequenceIndexer) -> Self: + ... + + def __getitem__(self, item: PositionalIndexer) -> Self | Any: + item = check_array_indexer(self, item) + + newmask = self._mask[item] + if is_bool(newmask): + # This is a scalar indexing + if newmask: + return self.dtype.na_value + return self._data[item] + + return self._simple_new(self._data[item], newmask) + + def _pad_or_backfill( + self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True + ) -> Self: + mask = self._mask + + if mask.any(): + func = missing.get_fill_func(method, ndim=self.ndim) + + npvalues = self._data.T + new_mask = mask.T + if copy: + npvalues = npvalues.copy() + new_mask = new_mask.copy() + func(npvalues, limit=limit, mask=new_mask) + if copy: + return self._simple_new(npvalues.T, new_mask.T) + else: + return self + else: + if copy: + new_values = self.copy() + else: + new_values = self + return new_values + + @doc(ExtensionArray.fillna) + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + value, method = validate_fillna_kwargs(value, method) + + mask = self._mask + + value = missing.check_value_size(value, mask, len(self)) + + if mask.any(): + if method is not None: + func = missing.get_fill_func(method, ndim=self.ndim) + npvalues = self._data.T + new_mask = mask.T + if copy: + npvalues = npvalues.copy() + new_mask = new_mask.copy() + func(npvalues, limit=limit, mask=new_mask) + return self._simple_new(npvalues.T, new_mask.T) + else: + # fill with value + if copy: + new_values = self.copy() + else: + new_values = self[:] + new_values[mask] = value + else: + if copy: + new_values = self.copy() + else: + new_values = self[:] + return new_values + + @classmethod + def _coerce_to_array( + cls, values, *, dtype: DtypeObj, copy: bool = False + ) -> tuple[np.ndarray, np.ndarray]: + raise AbstractMethodError(cls) + + def _validate_setitem_value(self, value): + """ + Check if we have a scalar that we can cast losslessly. + + Raises + ------ + TypeError + """ + kind = self.dtype.kind + # TODO: get this all from np_can_hold_element? + if kind == "b": + if lib.is_bool(value): + return value + + elif kind == "f": + if lib.is_integer(value) or lib.is_float(value): + return value + + else: + if lib.is_integer(value) or (lib.is_float(value) and value.is_integer()): + return value + # TODO: unsigned checks + + # Note: without the "str" here, the f-string rendering raises in + # py38 builds. + raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}") + + def __setitem__(self, key, value) -> None: + key = check_array_indexer(self, key) + + if is_scalar(value): + if is_valid_na_for_dtype(value, self.dtype): + self._mask[key] = True + else: + value = self._validate_setitem_value(value) + self._data[key] = value + self._mask[key] = False + return + + value, mask = self._coerce_to_array(value, dtype=self.dtype) + + self._data[key] = value + self._mask[key] = mask + + def __contains__(self, key) -> bool: + if isna(key) and key is not self.dtype.na_value: + # GH#52840 + if self._data.dtype.kind == "f" and lib.is_float(key): + return bool((np.isnan(self._data) & ~self._mask).any()) + + return bool(super().__contains__(key)) + + def __iter__(self) -> Iterator: + if self.ndim == 1: + if not self._hasna: + for val in self._data: + yield val + else: + na_value = self.dtype.na_value + for isna_, val in zip(self._mask, self._data): + if isna_: + yield na_value + else: + yield val + else: + for i in range(len(self)): + yield self[i] + + def __len__(self) -> int: + return len(self._data) + + @property + def shape(self) -> Shape: + return self._data.shape + + @property + def ndim(self) -> int: + return self._data.ndim + + def swapaxes(self, axis1, axis2) -> Self: + data = self._data.swapaxes(axis1, axis2) + mask = self._mask.swapaxes(axis1, axis2) + return self._simple_new(data, mask) + + def delete(self, loc, axis: AxisInt = 0) -> Self: + data = np.delete(self._data, loc, axis=axis) + mask = np.delete(self._mask, loc, axis=axis) + return self._simple_new(data, mask) + + def reshape(self, *args, **kwargs) -> Self: + data = self._data.reshape(*args, **kwargs) + mask = self._mask.reshape(*args, **kwargs) + return self._simple_new(data, mask) + + def ravel(self, *args, **kwargs) -> Self: + # TODO: need to make sure we have the same order for data/mask + data = self._data.ravel(*args, **kwargs) + mask = self._mask.ravel(*args, **kwargs) + return type(self)(data, mask) + + @property + def T(self) -> Self: + return self._simple_new(self._data.T, self._mask.T) + + def round(self, decimals: int = 0, *args, **kwargs): + """ + Round each value in the array a to the given number of decimals. + + Parameters + ---------- + decimals : int, default 0 + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + NumericArray + Rounded values of the NumericArray. + + See Also + -------- + numpy.around : Round values of an np.array. + DataFrame.round : Round values of a DataFrame. + Series.round : Round values of a Series. + """ + nv.validate_round(args, kwargs) + values = np.round(self._data, decimals=decimals, **kwargs) + + # Usually we'll get same type as self, but ndarray[bool] casts to float + return self._maybe_mask_result(values, self._mask.copy()) + + # ------------------------------------------------------------------ + # Unary Methods + + def __invert__(self) -> Self: + return self._simple_new(~self._data, self._mask.copy()) + + def __neg__(self) -> Self: + return self._simple_new(-self._data, self._mask.copy()) + + def __pos__(self) -> Self: + return self.copy() + + def __abs__(self) -> Self: + return self._simple_new(abs(self._data), self._mask.copy()) + + # ------------------------------------------------------------------ + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert to a NumPy Array. + + By default converts to an object-dtype NumPy array. Specify the `dtype` and + `na_value` keywords to customize the conversion. + + Parameters + ---------- + dtype : dtype, default object + The numpy dtype to convert to. + copy : bool, default False + Whether to ensure that the returned value is a not a view on + the array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. This is typically + only possible when no missing values are present and `dtype` + is the equivalent numpy dtype. + na_value : scalar, optional + Scalar missing value indicator to use in numpy array. Defaults + to the native missing value indicator of this array (pd.NA). + + Returns + ------- + numpy.ndarray + + Examples + -------- + An object-dtype is the default result + + >>> a = pd.array([True, False, pd.NA], dtype="boolean") + >>> a.to_numpy() + array([True, False, ], dtype=object) + + When no missing values are present, an equivalent dtype can be used. + + >>> pd.array([True, False], dtype="boolean").to_numpy(dtype="bool") + array([ True, False]) + >>> pd.array([1, 2], dtype="Int64").to_numpy("int64") + array([1, 2]) + + However, requesting such dtype will raise a ValueError if + missing values are present and the default missing value :attr:`NA` + is used. + + >>> a = pd.array([True, False, pd.NA], dtype="boolean") + >>> a + + [True, False, ] + Length: 3, dtype: boolean + + >>> a.to_numpy(dtype="bool") + Traceback (most recent call last): + ... + ValueError: cannot convert to bool numpy array in presence of missing values + + Specify a valid `na_value` instead + + >>> a.to_numpy(dtype="bool", na_value=False) + array([ True, False, False]) + """ + if na_value is lib.no_default: + na_value = libmissing.NA + if dtype is None: + dtype = object + else: + dtype = np.dtype(dtype) + if self._hasna: + if ( + dtype != object + and not is_string_dtype(dtype) + and na_value is libmissing.NA + ): + raise ValueError( + f"cannot convert to '{dtype}'-dtype NumPy array " + "with missing values. Specify an appropriate 'na_value' " + "for this dtype." + ) + # don't pass copy to astype -> always need a copy since we are mutating + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + data = self._data.astype(dtype) + data[self._mask] = na_value + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + data = self._data.astype(dtype, copy=copy) + return data + + @doc(ExtensionArray.tolist) + def tolist(self): + if self.ndim > 1: + return [x.tolist() for x in self] + dtype = None if self._hasna else self._data.dtype + return self.to_numpy(dtype=dtype).tolist() + + @overload + def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray: + ... + + @overload + def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray: + ... + + @overload + def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike: + ... + + def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike: + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + # if we are astyping to another nullable masked dtype, we can fastpath + if isinstance(dtype, BaseMaskedDtype): + # TODO deal with NaNs for FloatingArray case + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + # TODO: Is rounding what we want long term? + data = self._data.astype(dtype.numpy_dtype, copy=copy) + # mask is copied depending on whether the data was copied, and + # not directly depending on the `copy` keyword + mask = self._mask if data is self._data else self._mask.copy() + cls = dtype.construct_array_type() + return cls(data, mask, copy=False) + + if isinstance(dtype, ExtensionDtype): + eacls = dtype.construct_array_type() + return eacls._from_sequence(self, dtype=dtype, copy=copy) + + na_value: float | np.datetime64 | lib.NoDefault + + # coerce + if dtype.kind == "f": + # In astype, we consider dtype=float to also mean na_value=np.nan + na_value = np.nan + elif dtype.kind == "M": + na_value = np.datetime64("NaT") + else: + na_value = lib.no_default + + # to_numpy will also raise, but we get somewhat nicer exception messages here + if dtype.kind in "iu" and self._hasna: + raise ValueError("cannot convert NA to integer") + if dtype.kind == "b" and self._hasna: + # careful: astype_nansafe converts np.nan to True + raise ValueError("cannot convert float NaN to bool") + + data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy) + return data + + __array_priority__ = 1000 # higher than ndarray so ops dispatch to us + + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + """ + the array interface, return my values + We return an object array here to preserve our scalar values + """ + return self.to_numpy(dtype=dtype) + + _HANDLED_TYPES: tuple[type, ...] + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # For MaskedArray inputs, we apply the ufunc to ._data + # and mask the result. + + out = kwargs.get("out", ()) + + for x in inputs + out: + if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)): + return NotImplemented + + # for binary ops, use our custom dunder methods + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. test_ufunc_with_out + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + mask = np.zeros(len(self), dtype=bool) + inputs2 = [] + for x in inputs: + if isinstance(x, BaseMaskedArray): + mask |= x._mask + inputs2.append(x._data) + else: + inputs2.append(x) + + def reconstruct(x: np.ndarray): + # we don't worry about scalar `x` here, since we + # raise for reduce up above. + from pandas.core.arrays import ( + BooleanArray, + FloatingArray, + IntegerArray, + ) + + if x.dtype.kind == "b": + m = mask.copy() + return BooleanArray(x, m) + elif x.dtype.kind in "iu": + m = mask.copy() + return IntegerArray(x, m) + elif x.dtype.kind == "f": + m = mask.copy() + if x.dtype == np.float16: + # reached in e.g. np.sqrt on BooleanArray + # we don't support float16 + x = x.astype(np.float32) + return FloatingArray(x, m) + else: + x[mask] = np.nan + return x + + result = getattr(ufunc, method)(*inputs2, **kwargs) + if ufunc.nout > 1: + # e.g. np.divmod + return tuple(reconstruct(x) for x in result) + elif method == "reduce": + # e.g. np.add.reduce; test_ufunc_reduce_raises + if self._mask.any(): + return self._na_value + return result + else: + return reconstruct(result) + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow as pa + + return pa.array(self._data, mask=self._mask, type=type) + + @property + def _hasna(self) -> bool: + # Note: this is expensive right now! The hope is that we can + # make this faster by having an optional mask, but not have to change + # source code using it.. + + # error: Incompatible return value type (got "bool_", expected "bool") + return self._mask.any() # type: ignore[return-value] + + def _propagate_mask( + self, mask: npt.NDArray[np.bool_] | None, other + ) -> npt.NDArray[np.bool_]: + if mask is None: + mask = self._mask.copy() # TODO: need test for BooleanArray needing a copy + if other is libmissing.NA: + # GH#45421 don't alter inplace + mask = mask | True + elif is_list_like(other) and len(other) == len(mask): + mask = mask | isna(other) + else: + mask = self._mask | mask + # Incompatible return value type (got "Optional[ndarray[Any, dtype[bool_]]]", + # expected "ndarray[Any, dtype[bool_]]") + return mask # type: ignore[return-value] + + def _arith_method(self, other, op): + op_name = op.__name__ + omask = None + + if ( + not hasattr(other, "dtype") + and is_list_like(other) + and len(other) == len(self) + ): + # Try inferring masked dtype instead of casting to object + other = pd_array(other) + other = extract_array(other, extract_numpy=True) + + if isinstance(other, BaseMaskedArray): + other, omask = other._data, other._mask + + elif is_list_like(other): + if not isinstance(other, ExtensionArray): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + + # We wrap the non-masked arithmetic logic used for numpy dtypes + # in Series/Index arithmetic ops. + other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) + pd_op = ops.get_array_op(op) + other = ensure_wrapped_if_datetimelike(other) + + if op_name in {"pow", "rpow"} and isinstance(other, np.bool_): + # Avoid DeprecationWarning: In future, it will be an error + # for 'np.bool_' scalars to be interpreted as an index + # e.g. test_array_scalar_like_equivalence + other = bool(other) + + mask = self._propagate_mask(omask, other) + + if other is libmissing.NA: + result = np.ones_like(self._data) + if self.dtype.kind == "b": + if op_name in { + "floordiv", + "rfloordiv", + "pow", + "rpow", + "truediv", + "rtruediv", + }: + # GH#41165 Try to match non-masked Series behavior + # This is still imperfect GH#46043 + raise NotImplementedError( + f"operator '{op_name}' not implemented for bool dtypes" + ) + if op_name in {"mod", "rmod"}: + dtype = "int8" + else: + dtype = "bool" + result = result.astype(dtype) + elif "truediv" in op_name and self.dtype.kind != "f": + # The actual data here doesn't matter since the mask + # will be all-True, but since this is division, we want + # to end up with floating dtype. + result = result.astype(np.float64) + else: + # Make sure we do this before the "pow" mask checks + # to get an expected exception message on shape mismatch. + if self.dtype.kind in "iu" and op_name in ["floordiv", "mod"]: + # TODO(GH#30188) ATM we don't match the behavior of non-masked + # types with respect to floordiv-by-zero + pd_op = op + + with np.errstate(all="ignore"): + result = pd_op(self._data, other) + + if op_name == "pow": + # 1 ** x is 1. + mask = np.where((self._data == 1) & ~self._mask, False, mask) + # x ** 0 is 1. + if omask is not None: + mask = np.where((other == 0) & ~omask, False, mask) + elif other is not libmissing.NA: + mask = np.where(other == 0, False, mask) + + elif op_name == "rpow": + # 1 ** x is 1. + if omask is not None: + mask = np.where((other == 1) & ~omask, False, mask) + elif other is not libmissing.NA: + mask = np.where(other == 1, False, mask) + # x ** 0 is 1. + mask = np.where((self._data == 0) & ~self._mask, False, mask) + + return self._maybe_mask_result(result, mask) + + _logical_method = _arith_method + + def _cmp_method(self, other, op) -> BooleanArray: + from pandas.core.arrays import BooleanArray + + mask = None + + if isinstance(other, BaseMaskedArray): + other, mask = other._data, other._mask + + elif is_list_like(other): + other = np.asarray(other) + if other.ndim > 1: + raise NotImplementedError("can only perform ops with 1-d structures") + if len(self) != len(other): + raise ValueError("Lengths must match to compare") + + if other is libmissing.NA: + # numpy does not handle pd.NA well as "other" scalar (it returns + # a scalar False instead of an array) + # This may be fixed by NA.__array_ufunc__. Revisit this check + # once that's implemented. + result = np.zeros(self._data.shape, dtype="bool") + mask = np.ones(self._data.shape, dtype="bool") + else: + with warnings.catch_warnings(): + # numpy may show a FutureWarning or DeprecationWarning: + # elementwise comparison failed; returning scalar instead, + # but in the future will perform elementwise comparison + # before returning NotImplemented. We fall back to the correct + # behavior today, so that should be fine to ignore. + warnings.filterwarnings("ignore", "elementwise", FutureWarning) + warnings.filterwarnings("ignore", "elementwise", DeprecationWarning) + method = getattr(self._data, f"__{op.__name__}__") + result = method(other) + + if result is NotImplemented: + result = invalid_comparison(self._data, other, op) + + mask = self._propagate_mask(mask, other) + return BooleanArray(result, mask, copy=False) + + def _maybe_mask_result( + self, result: np.ndarray | tuple[np.ndarray, np.ndarray], mask: np.ndarray + ): + """ + Parameters + ---------- + result : array-like or tuple[array-like] + mask : array-like bool + """ + if isinstance(result, tuple): + # i.e. divmod + div, mod = result + return ( + self._maybe_mask_result(div, mask), + self._maybe_mask_result(mod, mask), + ) + + if result.dtype.kind == "f": + from pandas.core.arrays import FloatingArray + + return FloatingArray(result, mask, copy=False) + + elif result.dtype.kind == "b": + from pandas.core.arrays import BooleanArray + + return BooleanArray(result, mask, copy=False) + + elif lib.is_np_dtype(result.dtype, "m") and is_supported_unit( + get_unit_from_dtype(result.dtype) + ): + # e.g. test_numeric_arr_mul_tdscalar_numexpr_path + from pandas.core.arrays import TimedeltaArray + + result[mask] = result.dtype.type("NaT") + + if not isinstance(result, TimedeltaArray): + return TimedeltaArray._simple_new(result, dtype=result.dtype) + + return result + + elif result.dtype.kind in "iu": + from pandas.core.arrays import IntegerArray + + return IntegerArray(result, mask, copy=False) + + else: + result[mask] = np.nan + return result + + def isna(self) -> np.ndarray: + return self._mask.copy() + + @property + def _na_value(self): + return self.dtype.na_value + + @property + def nbytes(self) -> int: + return self._data.nbytes + self._mask.nbytes + + @classmethod + def _concat_same_type( + cls, + to_concat: Sequence[Self], + axis: AxisInt = 0, + ) -> Self: + data = np.concatenate([x._data for x in to_concat], axis=axis) + mask = np.concatenate([x._mask for x in to_concat], axis=axis) + return cls(data, mask) + + def take( + self, + indexer, + *, + allow_fill: bool = False, + fill_value: Scalar | None = None, + axis: AxisInt = 0, + ) -> Self: + # we always fill with 1 internally + # to avoid upcasting + data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value + result = take( + self._data, + indexer, + fill_value=data_fill_value, + allow_fill=allow_fill, + axis=axis, + ) + + mask = take( + self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis + ) + + # if we are filling + # we only fill where the indexer is null + # not existing missing values + # TODO(jreback) what if we have a non-na float as a fill value? + if allow_fill and notna(fill_value): + fill_mask = np.asarray(indexer) == -1 + result[fill_mask] = fill_value + mask = mask ^ fill_mask + + return self._simple_new(result, mask) + + # error: Return type "BooleanArray" of "isin" incompatible with return type + # "ndarray" in supertype "ExtensionArray" + def isin(self, values) -> BooleanArray: # type: ignore[override] + from pandas.core.arrays import BooleanArray + + # algorithms.isin will eventually convert values to an ndarray, so no extra + # cost to doing it here first + values_arr = np.asarray(values) + result = isin(self._data, values_arr) + + if self._hasna: + values_have_NA = values_arr.dtype == object and any( + val is self.dtype.na_value for val in values_arr + ) + + # For now, NA does not propagate so set result according to presence of NA, + # see https://github.com/pandas-dev/pandas/pull/38379 for some discussion + result[self._mask] = values_have_NA + + mask = np.zeros(self._data.shape, dtype=bool) + return BooleanArray(result, mask, copy=False) + + def copy(self) -> Self: + data = self._data.copy() + mask = self._mask.copy() + return self._simple_new(data, mask) + + def unique(self) -> Self: + """ + Compute the BaseMaskedArray of unique values. + + Returns + ------- + uniques : BaseMaskedArray + """ + uniques, mask = algos.unique_with_mask(self._data, self._mask) + return self._simple_new(uniques, mask) + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if self._hasna: + raise ValueError( + "searchsorted requires array to be sorted, which is impossible " + "with NAs present." + ) + if isinstance(value, ExtensionArray): + value = value.astype(object) + # Base class searchsorted would cast to object, which is *much* slower. + return self._data.searchsorted(value, side=side, sorter=sorter) + + @doc(ExtensionArray.factorize) + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, ExtensionArray]: + arr = self._data + mask = self._mask + + # Use a sentinel for na; recode and add NA to uniques if necessary below + codes, uniques = factorize_array(arr, use_na_sentinel=True, mask=mask) + + # check that factorize_array correctly preserves dtype. + assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype) + + has_na = mask.any() + if use_na_sentinel or not has_na: + size = len(uniques) + else: + # Make room for an NA value + size = len(uniques) + 1 + uniques_mask = np.zeros(size, dtype=bool) + if not use_na_sentinel and has_na: + na_index = mask.argmax() + # Insert na with the proper code + if na_index == 0: + na_code = np.intp(0) + else: + na_code = codes[:na_index].max() + 1 + codes[codes >= na_code] += 1 + codes[codes == -1] = na_code + # dummy value for uniques; not used since uniques_mask will be True + uniques = np.insert(uniques, na_code, 0) + uniques_mask[na_code] = True + uniques_ea = self._simple_new(uniques, uniques_mask) + + return codes, uniques_ea + + @doc(ExtensionArray._values_for_argsort) + def _values_for_argsort(self) -> np.ndarray: + return self._data + + def value_counts(self, dropna: bool = True) -> Series: + """ + Returns a Series containing counts of each unique value. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of missing values. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + """ + from pandas import ( + Index, + Series, + ) + from pandas.arrays import IntegerArray + + keys, value_counts = algos.value_counts_arraylike( + self._data, dropna=True, mask=self._mask + ) + + if dropna: + res = Series(value_counts, index=keys, name="count", copy=False) + res.index = res.index.astype(self.dtype) + res = res.astype("Int64") + return res + + # if we want nans, count the mask + counts = np.empty(len(value_counts) + 1, dtype="int64") + counts[:-1] = value_counts + counts[-1] = self._mask.sum() + + index = Index(keys, dtype=self.dtype).insert(len(keys), self.dtype.na_value) + index = index.astype(self.dtype) + + mask = np.zeros(len(counts), dtype="bool") + counts_array = IntegerArray(counts, mask) + + return Series(counts_array, index=index, name="count", copy=False) + + @doc(ExtensionArray.equals) + def equals(self, other) -> bool: + if type(self) != type(other): + return False + if other.dtype != self.dtype: + return False + + # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT + # equal. + if not np.array_equal(self._mask, other._mask): + return False + + left = self._data[~self._mask] + right = other._data[~other._mask] + return array_equivalent(left, right, strict_nan=True, dtype_equal=True) + + def _quantile( + self, qs: npt.NDArray[np.float64], interpolation: str + ) -> BaseMaskedArray: + """ + Dispatch to quantile_with_mask, needed because we do not have + _from_factorized. + + Notes + ----- + We assume that all impacted cases are 1D-only. + """ + res = quantile_with_mask( + self._data, + mask=self._mask, + # TODO(GH#40932): na_value_for_dtype(self.dtype.numpy_dtype) + # instead of np.nan + fill_value=np.nan, + qs=qs, + interpolation=interpolation, + ) + + if self._hasna: + # Our result mask is all-False unless we are all-NA, in which + # case it is all-True. + if self.ndim == 2: + # I think this should be out_mask=self.isna().all(axis=1) + # but am holding off until we have tests + raise NotImplementedError + if self.isna().all(): + out_mask = np.ones(res.shape, dtype=bool) + + if is_integer_dtype(self.dtype): + # We try to maintain int dtype if possible for not all-na case + # as well + res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype) + else: + out_mask = np.zeros(res.shape, dtype=bool) + else: + out_mask = np.zeros(res.shape, dtype=bool) + return self._maybe_mask_result(res, mask=out_mask) + + # ------------------------------------------------------------------ + # Reductions + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + if name in {"any", "all", "min", "max", "sum", "prod", "mean", "var", "std"}: + result = getattr(self, name)(skipna=skipna, **kwargs) + else: + # median, skew, kurt, sem + data = self._data + mask = self._mask + op = getattr(nanops, f"nan{name}") + axis = kwargs.pop("axis", None) + result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs) + + if keepdims: + if isna(result): + return self._wrap_na_result(name=name, axis=0, mask_size=(1,)) + else: + result = result.reshape(1) + mask = np.zeros(1, dtype=bool) + return self._maybe_mask_result(result, mask) + + if isna(result): + return libmissing.NA + else: + return result + + def _wrap_reduction_result(self, name: str, result, *, skipna, axis): + if isinstance(result, np.ndarray): + if skipna: + # we only retain mask for all-NA rows/columns + mask = self._mask.all(axis=axis) + else: + mask = self._mask.any(axis=axis) + + return self._maybe_mask_result(result, mask) + return result + + def _wrap_na_result(self, *, name, axis, mask_size): + mask = np.ones(mask_size, dtype=bool) + + float_dtyp = "float32" if self.dtype == "Float32" else "float64" + if name in ["mean", "median", "var", "std", "skew", "kurt"]: + np_dtype = float_dtyp + elif name in ["min", "max"] or self.dtype.itemsize == 8: + np_dtype = self.dtype.numpy_dtype.name + else: + is_windows_or_32bit = is_platform_windows() or not IS64 + int_dtyp = "int32" if is_windows_or_32bit else "int64" + uint_dtyp = "uint32" if is_windows_or_32bit else "uint64" + np_dtype = {"b": int_dtyp, "i": int_dtyp, "u": uint_dtyp, "f": float_dtyp}[ + self.dtype.kind + ] + + value = np.array([1], dtype=np_dtype) + return self._maybe_mask_result(value, mask=mask) + + def _wrap_min_count_reduction_result( + self, name: str, result, *, skipna, min_count, axis + ): + if min_count == 0 and isinstance(result, np.ndarray): + return self._maybe_mask_result(result, np.zeros(result.shape, dtype=bool)) + return self._wrap_reduction_result(name, result, skipna=skipna, axis=axis) + + def sum( + self, + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = 0, + **kwargs, + ): + nv.validate_sum((), kwargs) + + result = masked_reductions.sum( + self._data, + self._mask, + skipna=skipna, + min_count=min_count, + axis=axis, + ) + return self._wrap_min_count_reduction_result( + "sum", result, skipna=skipna, min_count=min_count, axis=axis + ) + + def prod( + self, + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = 0, + **kwargs, + ): + nv.validate_prod((), kwargs) + + result = masked_reductions.prod( + self._data, + self._mask, + skipna=skipna, + min_count=min_count, + axis=axis, + ) + return self._wrap_min_count_reduction_result( + "prod", result, skipna=skipna, min_count=min_count, axis=axis + ) + + def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + nv.validate_mean((), kwargs) + result = masked_reductions.mean( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ) + return self._wrap_reduction_result("mean", result, skipna=skipna, axis=axis) + + def var( + self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs + ): + nv.validate_stat_ddof_func((), kwargs, fname="var") + result = masked_reductions.var( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ddof=ddof, + ) + return self._wrap_reduction_result("var", result, skipna=skipna, axis=axis) + + def std( + self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs + ): + nv.validate_stat_ddof_func((), kwargs, fname="std") + result = masked_reductions.std( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ddof=ddof, + ) + return self._wrap_reduction_result("std", result, skipna=skipna, axis=axis) + + def min(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + nv.validate_min((), kwargs) + result = masked_reductions.min( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ) + return self._wrap_reduction_result("min", result, skipna=skipna, axis=axis) + + def max(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + nv.validate_max((), kwargs) + result = masked_reductions.max( + self._data, + self._mask, + skipna=skipna, + axis=axis, + ) + return self._wrap_reduction_result("max", result, skipna=skipna, axis=axis) + + def any(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + """ + Return whether any element is truthy. + + Returns False unless there is at least one element that is truthy. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + .. versionchanged:: 1.4.0 + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be False, as for an empty array. + If `skipna` is False, the result will still be True if there is + at least one element that is truthy, otherwise NA will be returned + if there are NA's present. + axis : int, optional, default 0 + **kwargs : any, default None + Additional keywords have no effect but might be accepted for + compatibility with NumPy. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + numpy.any : Numpy version of this method. + BaseMaskedArray.all : Return whether all elements are truthy. + + Examples + -------- + The result indicates whether any element is truthy (and by default + skips NAs): + + >>> pd.array([True, False, True]).any() + True + >>> pd.array([True, False, pd.NA]).any() + True + >>> pd.array([False, False, pd.NA]).any() + False + >>> pd.array([], dtype="boolean").any() + False + >>> pd.array([pd.NA], dtype="boolean").any() + False + >>> pd.array([pd.NA], dtype="Float64").any() + False + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, False, pd.NA]).any(skipna=False) + True + >>> pd.array([1, 0, pd.NA]).any(skipna=False) + True + >>> pd.array([False, False, pd.NA]).any(skipna=False) + + >>> pd.array([0, 0, pd.NA]).any(skipna=False) + + """ + nv.validate_any((), kwargs) + + values = self._data.copy() + # error: Argument 3 to "putmask" has incompatible type "object"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], + # bool, int, float, complex, str, bytes, + # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]" + np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type] + result = values.any() + if skipna: + return result + else: + if result or len(self) == 0 or not self._mask.any(): + return result + else: + return self.dtype.na_value + + def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs): + """ + Return whether all elements are truthy. + + Returns True unless there is at least one element that is falsey. + By default, NAs are skipped. If ``skipna=False`` is specified and + missing values are present, similar :ref:`Kleene logic ` + is used as for logical operations. + + .. versionchanged:: 1.4.0 + + Parameters + ---------- + skipna : bool, default True + Exclude NA values. If the entire array is NA and `skipna` is + True, then the result will be True, as for an empty array. + If `skipna` is False, the result will still be False if there is + at least one element that is falsey, otherwise NA will be returned + if there are NA's present. + axis : int, optional, default 0 + **kwargs : any, default None + Additional keywords have no effect but might be accepted for + compatibility with NumPy. + + Returns + ------- + bool or :attr:`pandas.NA` + + See Also + -------- + numpy.all : Numpy version of this method. + BooleanArray.any : Return whether any element is truthy. + + Examples + -------- + The result indicates whether all elements are truthy (and by default + skips NAs): + + >>> pd.array([True, True, pd.NA]).all() + True + >>> pd.array([1, 1, pd.NA]).all() + True + >>> pd.array([True, False, pd.NA]).all() + False + >>> pd.array([], dtype="boolean").all() + True + >>> pd.array([pd.NA], dtype="boolean").all() + True + >>> pd.array([pd.NA], dtype="Float64").all() + True + + With ``skipna=False``, the result can be NA if this is logically + required (whether ``pd.NA`` is True or False influences the result): + + >>> pd.array([True, True, pd.NA]).all(skipna=False) + + >>> pd.array([1, 1, pd.NA]).all(skipna=False) + + >>> pd.array([True, False, pd.NA]).all(skipna=False) + False + >>> pd.array([1, 0, pd.NA]).all(skipna=False) + False + """ + nv.validate_all((), kwargs) + + values = self._data.copy() + # error: Argument 3 to "putmask" has incompatible type "object"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], + # bool, int, float, complex, str, bytes, + # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]" + np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type] + result = values.all(axis=axis) + + if skipna: + return result + else: + if not result or len(self) == 0 or not self._mask.any(): + return result + else: + return self.dtype.na_value + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> BaseMaskedArray: + data = self._data + mask = self._mask + + op = getattr(masked_accumulations, name) + data, mask = op(data, mask, skipna=skipna, **kwargs) + + return self._simple_new(data, mask) + + # ------------------------------------------------------------------ + # GroupBy Methods + + def _groupby_op( + self, + *, + how: str, + has_dropped_na: bool, + min_count: int, + ngroups: int, + ids: npt.NDArray[np.intp], + **kwargs, + ): + from pandas.core.groupby.ops import WrappedCythonOp + + kind = WrappedCythonOp.get_kind_from_how(how) + op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) + + # libgroupby functions are responsible for NOT altering mask + mask = self._mask + if op.kind != "aggregate": + result_mask = mask.copy() + else: + result_mask = np.zeros(ngroups, dtype=bool) + + res_values = op._cython_op_ndim_compat( + self._data, + min_count=min_count, + ngroups=ngroups, + comp_ids=ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + + if op.how == "ohlc": + arity = op._cython_arity.get(op.how, 1) + result_mask = np.tile(result_mask, (arity, 1)).T + + # res_values should already have the correct dtype, we just need to + # wrap in a MaskedArray + return self._maybe_mask_result(res_values, result_mask) + + +def transpose_homogeneous_masked_arrays( + masked_arrays: Sequence[BaseMaskedArray], +) -> list[BaseMaskedArray]: + """Transpose masked arrays in a list, but faster. + + Input should be a list of 1-dim masked arrays of equal length and all have the + same dtype. The caller is responsible for ensuring validity of input data. + """ + masked_arrays = list(masked_arrays) + values = [arr._data.reshape(1, -1) for arr in masked_arrays] + transposed_values = np.concatenate(values, axis=0) + + masks = [arr._mask.reshape(1, -1) for arr in masked_arrays] + transposed_masks = np.concatenate(masks, axis=0) + + dtype = masked_arrays[0].dtype + arr_type = dtype.construct_array_type() + transposed_arrays: list[BaseMaskedArray] = [] + for i in range(transposed_values.shape[1]): + transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i]) + transposed_arrays.append(transposed_arr) + + return transposed_arrays diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numeric.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numeric.py new file mode 100644 index 00000000..0e86c1ef --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numeric.py @@ -0,0 +1,278 @@ +from __future__ import annotations + +import numbers +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_string_dtype, + pandas_dtype, +) + +from pandas.core.arrays.masked import ( + BaseMaskedArray, + BaseMaskedDtype, +) + +if TYPE_CHECKING: + from collections.abc import Mapping + + import pyarrow + + from pandas._typing import ( + Dtype, + DtypeObj, + Self, + npt, + ) + + +class NumericDtype(BaseMaskedDtype): + _default_np_dtype: np.dtype + _checker: Callable[[Any], bool] # is_foo_dtype + + def __repr__(self) -> str: + return f"{self.name}Dtype()" + + @cache_readonly + def is_signed_integer(self) -> bool: + return self.kind == "i" + + @cache_readonly + def is_unsigned_integer(self) -> bool: + return self.kind == "u" + + @property + def _is_numeric(self) -> bool: + return True + + def __from_arrow__( + self, array: pyarrow.Array | pyarrow.ChunkedArray + ) -> BaseMaskedArray: + """ + Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + from pandas.core.arrays.arrow._arrow_utils import ( + pyarrow_array_to_numpy_and_mask, + ) + + array_class = self.construct_array_type() + + pyarrow_type = pyarrow.from_numpy_dtype(self.type) + if not array.type.equals(pyarrow_type) and not pyarrow.types.is_null( + array.type + ): + # test_from_arrow_type_error raise for string, but allow + # through itemsize conversion GH#31896 + rt_dtype = pandas_dtype(array.type.to_pandas_dtype()) + if rt_dtype.kind not in "iuf": + # Could allow "c" or potentially disallow float<->int conversion, + # but at the moment we specifically test that uint<->int works + raise TypeError( + f"Expected array of {self} type, got {array.type} instead" + ) + + array = array.cast(pyarrow_type) + + if isinstance(array, pyarrow.ChunkedArray): + # TODO this "if" can be removed when requiring pyarrow >= 10.0, which fixed + # combine_chunks for empty arrays https://github.com/apache/arrow/pull/13757 + if array.num_chunks == 0: + array = pyarrow.array([], type=array.type) + else: + array = array.combine_chunks() + + data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype) + return array_class(data.copy(), ~mask, copy=False) + + @classmethod + def _get_dtype_mapping(cls) -> Mapping[np.dtype, NumericDtype]: + raise AbstractMethodError(cls) + + @classmethod + def _standardize_dtype(cls, dtype: NumericDtype | str | np.dtype) -> NumericDtype: + """ + Convert a string representation or a numpy dtype to NumericDtype. + """ + if isinstance(dtype, str) and (dtype.startswith(("Int", "UInt", "Float"))): + # Avoid DeprecationWarning from NumPy about np.dtype("Int64") + # https://github.com/numpy/numpy/pull/7476 + dtype = dtype.lower() + + if not isinstance(dtype, NumericDtype): + mapping = cls._get_dtype_mapping() + try: + dtype = mapping[np.dtype(dtype)] + except KeyError as err: + raise ValueError(f"invalid dtype specified {dtype}") from err + return dtype + + @classmethod + def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: + """ + Safely cast the values to the given dtype. + + "safe" in this context means the casting is lossless. + """ + raise AbstractMethodError(cls) + + +def _coerce_to_data_and_mask(values, mask, dtype, copy, dtype_cls, default_dtype): + checker = dtype_cls._checker + + inferred_type = None + + if dtype is None and hasattr(values, "dtype"): + if checker(values.dtype): + dtype = values.dtype + + if dtype is not None: + dtype = dtype_cls._standardize_dtype(dtype) + + cls = dtype_cls.construct_array_type() + if isinstance(values, cls): + values, mask = values._data, values._mask + if dtype is not None: + values = values.astype(dtype.numpy_dtype, copy=False) + + if copy: + values = values.copy() + mask = mask.copy() + return values, mask, dtype, inferred_type + + original = values + values = np.array(values, copy=copy) + inferred_type = None + if values.dtype == object or is_string_dtype(values.dtype): + inferred_type = lib.infer_dtype(values, skipna=True) + if inferred_type == "boolean" and dtype is None: + name = dtype_cls.__name__.strip("_") + raise TypeError(f"{values.dtype} cannot be converted to {name}") + + elif values.dtype.kind == "b" and checker(dtype): + values = np.array(values, dtype=default_dtype, copy=copy) + + elif values.dtype.kind not in "iuf": + name = dtype_cls.__name__.strip("_") + raise TypeError(f"{values.dtype} cannot be converted to {name}") + + if values.ndim != 1: + raise TypeError("values must be a 1D list-like") + + if mask is None: + if values.dtype.kind in "iu": + # fastpath + mask = np.zeros(len(values), dtype=np.bool_) + else: + mask = libmissing.is_numeric_na(values) + else: + assert len(mask) == len(values) + + if mask.ndim != 1: + raise TypeError("mask must be a 1D list-like") + + # infer dtype if needed + if dtype is None: + dtype = default_dtype + else: + dtype = dtype.type + + if is_integer_dtype(dtype) and values.dtype.kind == "f" and len(values) > 0: + if mask.all(): + values = np.ones(values.shape, dtype=dtype) + else: + idx = np.nanargmax(values) + if int(values[idx]) != original[idx]: + # We have ints that lost precision during the cast. + inferred_type = lib.infer_dtype(original, skipna=True) + if ( + inferred_type not in ["floating", "mixed-integer-float"] + and not mask.any() + ): + values = np.array(original, dtype=dtype, copy=False) + else: + values = np.array(original, dtype="object", copy=False) + + # we copy as need to coerce here + if mask.any(): + values = values.copy() + values[mask] = cls._internal_fill_value + if inferred_type in ("string", "unicode"): + # casts from str are always safe since they raise + # a ValueError if the str cannot be parsed into a float + values = values.astype(dtype, copy=copy) + else: + values = dtype_cls._safe_cast(values, dtype, copy=False) + + return values, mask, dtype, inferred_type + + +class NumericArray(BaseMaskedArray): + """ + Base class for IntegerArray and FloatingArray. + """ + + _dtype_cls: type[NumericDtype] + + def __init__( + self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False + ) -> None: + checker = self._dtype_cls._checker + if not (isinstance(values, np.ndarray) and checker(values.dtype)): + descr = ( + "floating" + if self._dtype_cls.kind == "f" # type: ignore[comparison-overlap] + else "integer" + ) + raise TypeError( + f"values should be {descr} numpy array. Use " + "the 'pd.array' function instead" + ) + if values.dtype == np.float16: + # If we don't raise here, then accessing self.dtype would raise + raise TypeError("FloatingArray does not support np.float16 dtype.") + + super().__init__(values, mask, copy=copy) + + @cache_readonly + def dtype(self) -> NumericDtype: + mapping = self._dtype_cls._get_dtype_mapping() + return mapping[self._data.dtype] + + @classmethod + def _coerce_to_array( + cls, value, *, dtype: DtypeObj, copy: bool = False + ) -> tuple[np.ndarray, np.ndarray]: + dtype_cls = cls._dtype_cls + default_dtype = dtype_cls._default_np_dtype + mask = None + values, mask, _, _ = _coerce_to_data_and_mask( + value, mask, dtype, copy, dtype_cls, default_dtype + ) + return values, mask + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ) -> Self: + from pandas.core.tools.numeric import to_numeric + + scalars = to_numeric(strings, errors="raise", dtype_backend="numpy_nullable") + return cls._from_sequence(scalars, dtype=dtype, copy=copy) + + _HANDLED_TYPES = (np.ndarray, numbers.Number) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numpy_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numpy_.py new file mode 100644 index 00000000..efe0c0df --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/numpy_.py @@ -0,0 +1,566 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + get_unit_from_dtype, + is_supported_unit, +) +from pandas.compat.numpy import function as nv + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import NumpyEADtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + arraylike, + missing, + nanops, + ops, +) +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.strings.object_array import ObjectStringArrayMixin + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + Dtype, + FillnaOptions, + InterpolateOptions, + NpDtype, + Scalar, + Self, + npt, + ) + + from pandas import Index + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class NumpyExtensionArray( # type: ignore[misc] + OpsMixin, + NDArrayBackedExtensionArray, + ObjectStringArrayMixin, +): + """ + A pandas ExtensionArray for NumPy data. + + This is mostly for internal compatibility, and is not especially + useful on its own. + + Parameters + ---------- + values : ndarray + The NumPy ndarray to wrap. Must be 1-dimensional. + copy : bool, default False + Whether to copy `values`. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.arrays.NumpyExtensionArray(np.array([0, 1, 2, 3])) + + [0, 1, 2, 3] + Length: 4, dtype: int64 + """ + + # If you're wondering why pd.Series(cls) doesn't put the array in an + # ExtensionBlock, search for `ABCNumpyExtensionArray`. We check for + # that _typ to ensure that users don't unnecessarily use EAs inside + # pandas internals, which turns off things like block consolidation. + _typ = "npy_extension" + __array_priority__ = 1000 + _ndarray: np.ndarray + _dtype: NumpyEADtype + _internal_fill_value = np.nan + + # ------------------------------------------------------------------------ + # Constructors + + def __init__( + self, values: np.ndarray | NumpyExtensionArray, copy: bool = False + ) -> None: + if isinstance(values, type(self)): + values = values._ndarray + if not isinstance(values, np.ndarray): + raise ValueError( + f"'values' must be a NumPy array, not {type(values).__name__}" + ) + + if values.ndim == 0: + # Technically we support 2, but do not advertise that fact. + raise ValueError("NumpyExtensionArray must be 1-dimensional.") + + if copy: + values = values.copy() + + dtype = NumpyEADtype(values.dtype) + super().__init__(values, dtype) + + @classmethod + def _from_sequence( + cls, scalars, *, dtype: Dtype | None = None, copy: bool = False + ) -> NumpyExtensionArray: + if isinstance(dtype, NumpyEADtype): + dtype = dtype._dtype + + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object], + # None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, + # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], + # _DTypeDict, Tuple[Any, Any]]]" + result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type] + if ( + result.ndim > 1 + and not hasattr(scalars, "dtype") + and (dtype is None or dtype == object) + ): + # e.g. list-of-tuples + result = construct_1d_object_array_from_listlike(scalars) + + if copy and result is scalars: + result = result.copy() + return cls(result) + + def _from_backing_data(self, arr: np.ndarray) -> NumpyExtensionArray: + return type(self)(arr) + + # ------------------------------------------------------------------------ + # Data + + @property + def dtype(self) -> NumpyEADtype: + return self._dtype + + # ------------------------------------------------------------------------ + # NumPy Array Interface + + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + return np.asarray(self._ndarray, dtype=dtype) + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # Lightly modified version of + # https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html + # The primary modification is not boxing scalar return values + # in NumpyExtensionArray, since pandas' ExtensionArrays are 1-d. + out = kwargs.get("out", ()) + + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. test_ufunc_unary + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + # e.g. tests.series.test_ufunc.TestNumpyReductions + return result + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple( + x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in inputs + ) + if out: + kwargs["out"] = tuple( + x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in out + ) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if ufunc.nout > 1: + # multiple return values; re-box array-like results + return tuple(type(self)(x) for x in result) + elif method == "at": + # no return value + return None + elif method == "reduce": + if isinstance(result, np.ndarray): + # e.g. test_np_reduce_2d + return type(self)(result) + + # e.g. test_np_max_nested_tuples + return result + else: + # one return value; re-box array-like results + return type(self)(result) + + # ------------------------------------------------------------------------ + # Pandas ExtensionArray Interface + + def astype(self, dtype, copy: bool = True): + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + result = astype_array(self._ndarray, dtype=dtype, copy=copy) + return result + + def isna(self) -> np.ndarray: + return isna(self._ndarray) + + def _validate_scalar(self, fill_value): + if fill_value is None: + # Primarily for subclasses + fill_value = self.dtype.na_value + return fill_value + + def _values_for_factorize(self) -> tuple[np.ndarray, float | None]: + if self.dtype.kind in "iub": + fv = None + else: + fv = np.nan + return self._ndarray, fv + + # Base EA class (and all other EA classes) don't have limit_area keyword + # This can be removed here as well when the interpolate ffill/bfill method + # deprecation is enforced + def _pad_or_backfill( + self, + *, + method: FillnaOptions, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + copy: bool = True, + ) -> Self: + """ + ffill or bfill along axis=0. + """ + if copy: + out_data = self._ndarray.copy() + else: + out_data = self._ndarray + + meth = missing.clean_fill_method(method) + missing.pad_or_backfill_inplace( + out_data.T, + method=meth, + axis=0, + limit=limit, + limit_area=limit_area, + ) + + if not copy: + return self + return type(self)._simple_new(out_data, dtype=self.dtype) + + def interpolate( + self, + *, + method: InterpolateOptions, + axis: int, + index: Index, + limit, + limit_direction, + limit_area, + copy: bool, + **kwargs, + ) -> Self: + """ + See NDFrame.interpolate.__doc__. + """ + # NB: we return type(self) even if copy=False + if not copy: + out_data = self._ndarray + else: + out_data = self._ndarray.copy() + + # TODO: assert we have floating dtype? + missing.interpolate_2d_inplace( + out_data, + method=method, + axis=axis, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + **kwargs, + ) + if not copy: + return self + return type(self)._simple_new(out_data, dtype=self.dtype) + + # ------------------------------------------------------------------------ + # Reductions + + def any( + self, + *, + axis: AxisInt | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_any((), {"out": out, "keepdims": keepdims}) + result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def all( + self, + *, + axis: AxisInt | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_all((), {"out": out, "keepdims": keepdims}) + result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def min( + self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs + ) -> Scalar: + nv.validate_min((), kwargs) + result = nanops.nanmin( + values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def max( + self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs + ) -> Scalar: + nv.validate_max((), kwargs) + result = nanops.nanmax( + values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def sum( + self, + *, + axis: AxisInt | None = None, + skipna: bool = True, + min_count: int = 0, + **kwargs, + ) -> Scalar: + nv.validate_sum((), kwargs) + result = nanops.nansum( + self._ndarray, axis=axis, skipna=skipna, min_count=min_count + ) + return self._wrap_reduction_result(axis, result) + + def prod( + self, + *, + axis: AxisInt | None = None, + skipna: bool = True, + min_count: int = 0, + **kwargs, + ) -> Scalar: + nv.validate_prod((), kwargs) + result = nanops.nanprod( + self._ndarray, axis=axis, skipna=skipna, min_count=min_count + ) + return self._wrap_reduction_result(axis, result) + + def mean( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims}) + result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def median( + self, + *, + axis: AxisInt | None = None, + out=None, + overwrite_input: bool = False, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_median( + (), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims} + ) + result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def std( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std" + ) + result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + return self._wrap_reduction_result(axis, result) + + def var( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var" + ) + result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + return self._wrap_reduction_result(axis, result) + + def sem( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem" + ) + result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + return self._wrap_reduction_result(axis, result) + + def kurt( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt" + ) + result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + def skew( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew" + ) + result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna) + return self._wrap_reduction_result(axis, result) + + # ------------------------------------------------------------------------ + # Additional Methods + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + mask = self.isna() + if na_value is not lib.no_default and mask.any(): + result = self._ndarray.copy() + result[mask] = na_value + else: + result = self._ndarray + + result = np.asarray(result, dtype=dtype) + + if copy and result is self._ndarray: + result = result.copy() + + return result + + # ------------------------------------------------------------------------ + # Ops + + def __invert__(self) -> NumpyExtensionArray: + return type(self)(~self._ndarray) + + def __neg__(self) -> NumpyExtensionArray: + return type(self)(-self._ndarray) + + def __pos__(self) -> NumpyExtensionArray: + return type(self)(+self._ndarray) + + def __abs__(self) -> NumpyExtensionArray: + return type(self)(abs(self._ndarray)) + + def _cmp_method(self, other, op): + if isinstance(other, NumpyExtensionArray): + other = other._ndarray + + other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) + pd_op = ops.get_array_op(op) + other = ensure_wrapped_if_datetimelike(other) + result = pd_op(self._ndarray, other) + + if op is divmod or op is ops.rdivmod: + a, b = result + if isinstance(a, np.ndarray): + # for e.g. op vs TimedeltaArray, we may already + # have an ExtensionArray, in which case we do not wrap + return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b) + return a, b + + if isinstance(result, np.ndarray): + # for e.g. multiplication vs TimedeltaArray, we may already + # have an ExtensionArray, in which case we do not wrap + return self._wrap_ndarray_result(result) + return result + + _arith_method = _cmp_method + + def _wrap_ndarray_result(self, result: np.ndarray): + # If we have timedelta64[ns] result, return a TimedeltaArray instead + # of a NumpyExtensionArray + if result.dtype.kind == "m" and is_supported_unit( + get_unit_from_dtype(result.dtype) + ): + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray._simple_new(result, dtype=result.dtype) + return type(self)(result) + + # ------------------------------------------------------------------------ + # String methods interface + _str_na_value = np.nan diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/period.py new file mode 100644 index 00000000..a2e4b595 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/period.py @@ -0,0 +1,1286 @@ +from __future__ import annotations + +from datetime import timedelta +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + TypeVar, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + algos as libalgos, + lib, +) +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.tslibs import ( + BaseOffset, + NaT, + NaTType, + Timedelta, + astype_overflowsafe, + dt64arr_to_periodarr as c_dt64arr_to_periodarr, + get_unit_from_dtype, + iNaT, + parsing, + period as libperiod, + to_offset, +) +from pandas._libs.tslibs.dtypes import FreqGroup +from pandas._libs.tslibs.fields import isleapyear_arr +from pandas._libs.tslibs.offsets import ( + Tick, + delta_to_tick, +) +from pandas._libs.tslibs.period import ( + DIFFERENT_FREQ, + IncompatibleFrequency, + Period, + get_period_field_arr, + period_asfreq_arr, +) +from pandas.util._decorators import ( + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_object, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCPeriodIndex, + ABCSeries, + ABCTimedeltaArray, +) +from pandas.core.dtypes.missing import isna + +import pandas.core.algorithms as algos +from pandas.core.arrays import datetimelike as dtl +import pandas.core.common as com + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + AnyArrayLike, + Dtype, + FillnaOptions, + NpDtype, + NumpySorter, + NumpyValueArrayLike, + Self, + npt, + ) + + from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, + ) + from pandas.core.arrays.base import ExtensionArray + + +BaseOffsetT = TypeVar("BaseOffsetT", bound=BaseOffset) + + +_shared_doc_kwargs = { + "klass": "PeriodArray", +} + + +def _field_accessor(name: str, docstring: str | None = None): + def f(self): + base = self.dtype._dtype_code + result = get_period_field_arr(name, self.asi8, base) + return result + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): # type: ignore[misc] + """ + Pandas ExtensionArray for storing Period data. + + Users should use :func:`~pandas.array` to create new instances. + + Parameters + ---------- + values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex] + The data to store. These should be arrays that can be directly + converted to ordinals without inference or copy (PeriodArray, + ndarray[int64]), or a box around such an array (Series[period], + PeriodIndex). + dtype : PeriodDtype, optional + A PeriodDtype instance from which to extract a `freq`. If both + `freq` and `dtype` are specified, then the frequencies must match. + freq : str or DateOffset + The `freq` to use for the array. Mostly applicable when `values` + is an ndarray of integers, when `freq` is required. When `values` + is a PeriodArray (or box around), it's checked that ``values.freq`` + matches `freq`. + copy : bool, default False + Whether to copy the ordinals before storing. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + Period: Represents a period of time. + PeriodIndex : Immutable Index for period data. + period_range: Create a fixed-frequency PeriodArray. + array: Construct a pandas array. + + Notes + ----- + There are two components to a PeriodArray + + - ordinals : integer ndarray + - freq : pd.tseries.offsets.Offset + + The values are physically stored as a 1-D ndarray of integers. These are + called "ordinals" and represent some kind of offset from a base. + + The `freq` indicates the span covered by each element of the array. + All elements in the PeriodArray have the same `freq`. + + Examples + -------- + >>> pd.arrays.PeriodArray(pd.PeriodIndex(['2023-01-01', + ... '2023-01-02'], freq='D')) + + ['2023-01-01', '2023-01-02'] + Length: 2, dtype: period[D] + """ + + # array priority higher than numpy scalars + __array_priority__ = 1000 + _typ = "periodarray" # ABCPeriodArray + _internal_fill_value = np.int64(iNaT) + _recognized_scalars = (Period,) + _is_recognized_dtype = lambda x: isinstance( + x, PeriodDtype + ) # check_compatible_with checks freq match + _infer_matches = ("period",) + + @property + def _scalar_type(self) -> type[Period]: + return Period + + # Names others delegate to us + _other_ops: list[str] = [] + _bool_ops: list[str] = ["is_leap_year"] + _object_ops: list[str] = ["start_time", "end_time", "freq"] + _field_ops: list[str] = [ + "year", + "month", + "day", + "hour", + "minute", + "second", + "weekofyear", + "weekday", + "week", + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "qyear", + "days_in_month", + "daysinmonth", + ] + _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _datetimelike_methods: list[str] = ["strftime", "to_timestamp", "asfreq"] + + _dtype: PeriodDtype + + # -------------------------------------------------------------------- + # Constructors + + def __init__( + self, values, dtype: Dtype | None = None, freq=None, copy: bool = False + ) -> None: + if freq is not None: + # GH#52462 + warnings.warn( + "The 'freq' keyword in the PeriodArray constructor is deprecated " + "and will be removed in a future version. Pass 'dtype' instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + freq = validate_dtype_freq(dtype, freq) + dtype = PeriodDtype(freq) + + if dtype is not None: + dtype = pandas_dtype(dtype) + if not isinstance(dtype, PeriodDtype): + raise ValueError(f"Invalid dtype {dtype} for PeriodArray") + + if isinstance(values, ABCSeries): + values = values._values + if not isinstance(values, type(self)): + raise TypeError("Incorrect dtype") + + elif isinstance(values, ABCPeriodIndex): + values = values._values + + if isinstance(values, type(self)): + if dtype is not None and dtype != values.dtype: + raise raise_on_incompatible(values, dtype.freq) + values, dtype = values._ndarray, values.dtype + + values = np.array(values, dtype="int64", copy=copy) + if dtype is None: + raise ValueError("dtype is not specified and cannot be inferred") + dtype = cast(PeriodDtype, dtype) + NDArrayBacked.__init__(self, values, dtype) + + # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" + @classmethod + def _simple_new( # type: ignore[override] + cls, + values: npt.NDArray[np.int64], + dtype: PeriodDtype, + ) -> Self: + # alias for PeriodArray.__init__ + assertion_msg = "Should be numpy array of type i8" + assert isinstance(values, np.ndarray) and values.dtype == "i8", assertion_msg + return cls(values, dtype=dtype) + + @classmethod + def _from_sequence( + cls, + scalars, + *, + dtype: Dtype | None = None, + copy: bool = False, + ) -> Self: + if dtype is not None: + dtype = pandas_dtype(dtype) + if dtype and isinstance(dtype, PeriodDtype): + freq = dtype.freq + else: + freq = None + + if isinstance(scalars, cls): + validate_dtype_freq(scalars.dtype, freq) + if copy: + scalars = scalars.copy() + return scalars + + periods = np.asarray(scalars, dtype=object) + + freq = freq or libperiod.extract_freq(periods) + ordinals = libperiod.extract_ordinals(periods, freq) + dtype = PeriodDtype(freq) + return cls(ordinals, dtype=dtype) + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ) -> Self: + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + @classmethod + def _from_datetime64(cls, data, freq, tz=None) -> Self: + """ + Construct a PeriodArray from a datetime64 array + + Parameters + ---------- + data : ndarray[datetime64[ns], datetime64[ns, tz]] + freq : str or Tick + tz : tzinfo, optional + + Returns + ------- + PeriodArray[freq] + """ + data, freq = dt64arr_to_periodarr(data, freq, tz) + dtype = PeriodDtype(freq) + return cls(data, dtype=dtype) + + @classmethod + def _generate_range(cls, start, end, periods, freq, fields): + periods = dtl.validate_periods(periods) + + if freq is not None: + freq = Period._maybe_convert_freq(freq) + + field_count = len(fields) + if start is not None or end is not None: + if field_count > 0: + raise ValueError( + "Can either instantiate from fields or endpoints, but not both" + ) + subarr, freq = _get_ordinal_range(start, end, periods, freq) + elif field_count > 0: + subarr, freq = _range_from_fields(freq=freq, **fields) + else: + raise ValueError("Not enough parameters to construct Period range") + + return subarr, freq + + # ----------------------------------------------------------------- + # DatetimeLike Interface + + # error: Argument 1 of "_unbox_scalar" is incompatible with supertype + # "DatetimeLikeArrayMixin"; supertype defines the argument type as + # "Union[Union[Period, Any, Timedelta], NaTType]" + def _unbox_scalar( # type: ignore[override] + self, + value: Period | NaTType, + ) -> np.int64: + if value is NaT: + # error: Item "Period" of "Union[Period, NaTType]" has no attribute "value" + return np.int64(value._value) # type: ignore[union-attr] + elif isinstance(value, self._scalar_type): + self._check_compatible_with(value) + return np.int64(value.ordinal) + else: + raise ValueError(f"'value' should be a Period. Got '{value}' instead.") + + def _scalar_from_string(self, value: str) -> Period: + return Period(value, freq=self.freq) + + def _check_compatible_with(self, other) -> None: + if other is NaT: + return + self._require_matching_freq(other) + + # -------------------------------------------------------------------- + # Data / Attributes + + @cache_readonly + def dtype(self) -> PeriodDtype: + return self._dtype + + # error: Cannot override writeable attribute with read-only property + @property # type: ignore[override] + def freq(self) -> BaseOffset: + """ + Return the frequency object for this PeriodArray. + """ + return self.dtype.freq + + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + if dtype == "i8": + return self.asi8 + elif dtype == bool: + return ~self._isnan + + # This will raise TypeError for non-object dtypes + return np.array(list(self), dtype=object) + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow + + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + if type is not None: + if pyarrow.types.is_integer(type): + return pyarrow.array(self._ndarray, mask=self.isna(), type=type) + elif isinstance(type, ArrowPeriodType): + # ensure we have the same freq + if self.freqstr != type.freq: + raise TypeError( + "Not supported to convert PeriodArray to array with different " + f"'freq' ({self.freqstr} vs {type.freq})" + ) + else: + raise TypeError( + f"Not supported to convert PeriodArray to '{type}' type" + ) + + period_type = ArrowPeriodType(self.freqstr) + storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type="int64") + return pyarrow.ExtensionArray.from_storage(period_type, storage_array) + + # -------------------------------------------------------------------- + # Vectorized analogues of Period properties + + year = _field_accessor( + "year", + """ + The year of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y") + >>> idx.year + Index([2023, 2024, 2025], dtype='int64') + """, + ) + month = _field_accessor( + "month", + """ + The month as January=1, December=12. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.month + Index([1, 2, 3], dtype='int64') + """, + ) + day = _field_accessor( + "day", + """ + The days of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(['2020-01-31', '2020-02-28'], freq='D') + >>> idx.day + Index([31, 28], dtype='int64') + """, + ) + hour = _field_accessor( + "hour", + """ + The hour of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01 10:00", "2023-01-01 11:00"], freq='H') + >>> idx.hour + Index([10, 11], dtype='int64') + """, + ) + minute = _field_accessor( + "minute", + """ + The minute of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01 10:30:00", + ... "2023-01-01 11:50:00"], freq='min') + >>> idx.minute + Index([30, 50], dtype='int64') + """, + ) + second = _field_accessor( + "second", + """ + The second of the period. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01 10:00:30", + ... "2023-01-01 10:00:31"], freq='s') + >>> idx.second + Index([30, 31], dtype='int64') + """, + ) + weekofyear = _field_accessor( + "week", + """ + The week ordinal of the year. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.week # It can be written `weekofyear` + Index([5, 9, 13], dtype='int64') + """, + ) + week = weekofyear + day_of_week = _field_accessor( + "day_of_week", + """ + The day of the week with Monday=0, Sunday=6. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D") + >>> idx.weekday + Index([6, 0, 1], dtype='int64') + """, + ) + dayofweek = day_of_week + weekday = dayofweek + dayofyear = day_of_year = _field_accessor( + "day_of_year", + """ + The ordinal day of the year. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01-10", "2023-02-01", "2023-03-01"], freq="D") + >>> idx.dayofyear + Index([10, 32, 60], dtype='int64') + + >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y") + >>> idx + PeriodIndex(['2023', '2024', '2025'], dtype='period[A-DEC]') + >>> idx.dayofyear + Index([365, 366, 365], dtype='int64') + """, + ) + quarter = _field_accessor( + "quarter", + """ + The quarter of the date. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.quarter + Index([1, 1, 1], dtype='int64') + """, + ) + qyear = _field_accessor("qyear") + days_in_month = _field_accessor( + "days_in_month", + """ + The number of days in the month. + + Examples + -------- + For Series: + + >>> period = pd.period_range('2020-1-1 00:00', '2020-3-1 00:00', freq='M') + >>> s = pd.Series(period) + >>> s + 0 2020-01 + 1 2020-02 + 2 2020-03 + dtype: period[M] + >>> s.dt.days_in_month + 0 31 + 1 29 + 2 31 + dtype: int64 + + For PeriodIndex: + + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.days_in_month # It can be also entered as `daysinmonth` + Index([31, 28, 31], dtype='int64') + """, + ) + daysinmonth = days_in_month + + @property + def is_leap_year(self) -> npt.NDArray[np.bool_]: + """ + Logical indicating if the date belongs to a leap year. + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y") + >>> idx.is_leap_year + array([False, True, False]) + """ + return isleapyear_arr(np.asarray(self.year)) + + def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: + """ + Cast to DatetimeArray/Index. + + Parameters + ---------- + freq : str or DateOffset, optional + Target frequency. The default is 'D' for week or longer, + 'S' otherwise. + how : {'s', 'e', 'start', 'end'} + Whether to use the start or end of the time period being converted. + + Returns + ------- + DatetimeArray/Index + + Examples + -------- + >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") + >>> idx.to_timestamp() + DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'], + dtype='datetime64[ns]', freq='MS') + """ + from pandas.core.arrays import DatetimeArray + + how = libperiod.validate_end_alias(how) + + end = how == "E" + if end: + if freq == "B" or self.freq == "B": + # roll forward to ensure we land on B date + adjust = Timedelta(1, "D") - Timedelta(1, "ns") + return self.to_timestamp(how="start") + adjust + else: + adjust = Timedelta(1, "ns") + return (self + self.freq).to_timestamp(how="start") - adjust + + if freq is None: + freq = self._dtype._get_to_timestamp_base() + base = freq + else: + freq = Period._maybe_convert_freq(freq) + base = freq._period_dtype_code + + new_parr = self.asfreq(freq, how=how) + + new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) + dta = DatetimeArray(new_data) + + if self.freq.name == "B": + # See if we can retain BDay instead of Day in cases where + # len(self) is too small for infer_freq to distinguish between them + diffs = libalgos.unique_deltas(self.asi8) + if len(diffs) == 1: + diff = diffs[0] + if diff == self.dtype._n: + dta._freq = self.freq + elif diff == 1: + dta._freq = self.freq.base + # TODO: other cases? + return dta + else: + return dta._with_freq("infer") + + # -------------------------------------------------------------------- + + def _box_func(self, x) -> Period | NaTType: + return Period._from_ordinal(ordinal=x, freq=self.freq) + + @doc(**_shared_doc_kwargs, other="PeriodIndex", other_name="PeriodIndex") + def asfreq(self, freq=None, how: str = "E") -> Self: + """ + Convert the {klass} to the specified frequency `freq`. + + Equivalent to applying :meth:`pandas.Period.asfreq` with the given arguments + to each :class:`~pandas.Period` in this {klass}. + + Parameters + ---------- + freq : str + A frequency. + how : str {{'E', 'S'}}, default 'E' + Whether the elements should be aligned to the end + or start within pa period. + + * 'E', 'END', or 'FINISH' for end, + * 'S', 'START', or 'BEGIN' for start. + + January 31st ('END') vs. January 1st ('START') for example. + + Returns + ------- + {klass} + The transformed {klass} with the new frequency. + + See Also + -------- + {other}.asfreq: Convert each Period in a {other_name} to the given frequency. + Period.asfreq : Convert a :class:`~pandas.Period` object to the given frequency. + + Examples + -------- + >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') + >>> pidx + PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], + dtype='period[A-DEC]') + + >>> pidx.asfreq('M') + PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', + '2015-12'], dtype='period[M]') + + >>> pidx.asfreq('M', how='S') + PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', + '2015-01'], dtype='period[M]') + """ + how = libperiod.validate_end_alias(how) + + freq = Period._maybe_convert_freq(freq) + + base1 = self._dtype._dtype_code + base2 = freq._period_dtype_code + + asi8 = self.asi8 + # self.freq.n can't be negative or 0 + end = how == "E" + if end: + ordinal = asi8 + self.dtype._n - 1 + else: + ordinal = asi8 + + new_data = period_asfreq_arr(ordinal, base1, base2, end) + + if self._hasna: + new_data[self._isnan] = iNaT + + dtype = PeriodDtype(freq) + return type(self)(new_data, dtype=dtype) + + # ------------------------------------------------------------------ + # Rendering Methods + + def _formatter(self, boxed: bool = False): + if boxed: + return str + return "'{}'".format + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None, **kwargs + ) -> npt.NDArray[np.object_]: + """ + actually format my specific types + """ + return libperiod.period_array_strftime( + self.asi8, self.dtype._dtype_code, na_rep, date_format + ) + + # ------------------------------------------------------------------ + + def astype(self, dtype, copy: bool = True): + # We handle Period[T] -> Period[U] + # Our parent handles everything else. + dtype = pandas_dtype(dtype) + if dtype == self._dtype: + if not copy: + return self + else: + return self.copy() + if isinstance(dtype, PeriodDtype): + return self.asfreq(dtype.freq) + + if lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype): + # GH#45038 match PeriodIndex behavior. + tz = getattr(dtype, "tz", None) + return self.to_timestamp().tz_localize(tz) + + return super().astype(dtype, copy=copy) + + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + npvalue = self._validate_setitem_value(value).view("M8[ns]") + + # Cast to M8 to get datetime-like NaT placement, + # similar to dtl._period_dispatch + m8arr = self._ndarray.view("M8[ns]") + return m8arr.searchsorted(npvalue, side=side, sorter=sorter) + + def _pad_or_backfill( + self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True + ) -> Self: + # view as dt64 so we get treated as timelike in core.missing, + # similar to dtl._period_dispatch + dta = self.view("M8[ns]") + result = dta._pad_or_backfill(method=method, limit=limit, copy=copy) + if copy: + return cast("Self", result.view(self.dtype)) + else: + return self + + def fillna( + self, value=None, method=None, limit: int | None = None, copy: bool = True + ) -> Self: + if method is not None: + # view as dt64 so we get treated as timelike in core.missing, + # similar to dtl._period_dispatch + dta = self.view("M8[ns]") + result = dta.fillna(value=value, method=method, limit=limit, copy=copy) + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "PeriodArray") + return result.view(self.dtype) # type: ignore[return-value] + return super().fillna(value=value, method=method, limit=limit, copy=copy) + + # ------------------------------------------------------------------ + # Arithmetic Methods + + def _addsub_int_array_or_scalar( + self, other: np.ndarray | int, op: Callable[[Any, Any], Any] + ) -> Self: + """ + Add or subtract array of integers. + + Parameters + ---------- + other : np.ndarray[int64] or int + op : {operator.add, operator.sub} + + Returns + ------- + result : PeriodArray + """ + assert op in [operator.add, operator.sub] + if op is operator.sub: + other = -other + res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan) + return type(self)(res_values, dtype=self.dtype) + + def _add_offset(self, other: BaseOffset): + assert not isinstance(other, Tick) + + self._require_matching_freq(other, base=True) + return self._addsub_int_array_or_scalar(other.n, operator.add) + + # TODO: can we de-duplicate with Period._add_timedeltalike_scalar? + def _add_timedeltalike_scalar(self, other): + """ + Parameters + ---------- + other : timedelta, Tick, np.timedelta64 + + Returns + ------- + PeriodArray + """ + if not isinstance(self.freq, Tick): + # We cannot add timedelta-like to non-tick PeriodArray + raise raise_on_incompatible(self, other) + + if isna(other): + # i.e. np.timedelta64("NaT") + return super()._add_timedeltalike_scalar(other) + + td = np.asarray(Timedelta(other).asm8) + return self._add_timedelta_arraylike(td) + + def _add_timedelta_arraylike( + self, other: TimedeltaArray | npt.NDArray[np.timedelta64] + ) -> Self: + """ + Parameters + ---------- + other : TimedeltaArray or ndarray[timedelta64] + + Returns + ------- + PeriodArray + """ + if not self.dtype._is_tick_like(): + # We cannot add timedelta-like to non-tick PeriodArray + raise TypeError( + f"Cannot add or subtract timedelta64[ns] dtype from {self.dtype}" + ) + + dtype = np.dtype(f"m8[{self.dtype._td64_unit}]") + + # Similar to _check_timedeltalike_freq_compat, but we raise with a + # more specific exception message if necessary. + try: + delta = astype_overflowsafe( + np.asarray(other), dtype=dtype, copy=False, round_ok=False + ) + except ValueError as err: + # e.g. if we have minutes freq and try to add 30s + # "Cannot losslessly convert units" + raise IncompatibleFrequency( + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq." + ) from err + + b_mask = np.isnat(delta) + + res_values = algos.checked_add_with_arr( + self.asi8, delta.view("i8"), arr_mask=self._isnan, b_mask=b_mask + ) + np.putmask(res_values, self._isnan | b_mask, iNaT) + return type(self)(res_values, dtype=self.dtype) + + def _check_timedeltalike_freq_compat(self, other): + """ + Arithmetic operations with timedelta-like scalars or array `other` + are only valid if `other` is an integer multiple of `self.freq`. + If the operation is valid, find that integer multiple. Otherwise, + raise because the operation is invalid. + + Parameters + ---------- + other : timedelta, np.timedelta64, Tick, + ndarray[timedelta64], TimedeltaArray, TimedeltaIndex + + Returns + ------- + multiple : int or ndarray[int64] + + Raises + ------ + IncompatibleFrequency + """ + assert self.dtype._is_tick_like() # checked by calling function + + dtype = np.dtype(f"m8[{self.dtype._td64_unit}]") + + if isinstance(other, (timedelta, np.timedelta64, Tick)): + td = np.asarray(Timedelta(other).asm8) + else: + td = np.asarray(other) + + try: + delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False) + except ValueError as err: + raise raise_on_incompatible(self, other) from err + + delta = delta.view("i8") + return lib.item_from_zerodim(delta) + + +def raise_on_incompatible(left, right): + """ + Helper function to render a consistent error message when raising + IncompatibleFrequency. + + Parameters + ---------- + left : PeriodArray + right : None, DateOffset, Period, ndarray, or timedelta-like + + Returns + ------- + IncompatibleFrequency + Exception to be raised by the caller. + """ + # GH#24283 error message format depends on whether right is scalar + if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None: + other_freq = None + elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, BaseOffset)): + other_freq = right.freqstr + else: + other_freq = delta_to_tick(Timedelta(right)).freqstr + + msg = DIFFERENT_FREQ.format( + cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq + ) + return IncompatibleFrequency(msg) + + +# ------------------------------------------------------------------- +# Constructor Helpers + + +def period_array( + data: Sequence[Period | str | None] | AnyArrayLike, + freq: str | Tick | BaseOffset | None = None, + copy: bool = False, +) -> PeriodArray: + """ + Construct a new PeriodArray from a sequence of Period scalars. + + Parameters + ---------- + data : Sequence of Period objects + A sequence of Period objects. These are required to all have + the same ``freq.`` Missing values can be indicated by ``None`` + or ``pandas.NaT``. + freq : str, Tick, or Offset + The frequency of every element of the array. This can be specified + to avoid inferring the `freq` from `data`. + copy : bool, default False + Whether to ensure a copy of the data is made. + + Returns + ------- + PeriodArray + + See Also + -------- + PeriodArray + pandas.PeriodIndex + + Examples + -------- + >>> period_array([pd.Period('2017', freq='A'), + ... pd.Period('2018', freq='A')]) + + ['2017', '2018'] + Length: 2, dtype: period[A-DEC] + + >>> period_array([pd.Period('2017', freq='A'), + ... pd.Period('2018', freq='A'), + ... pd.NaT]) + + ['2017', '2018', 'NaT'] + Length: 3, dtype: period[A-DEC] + + Integers that look like years are handled + + >>> period_array([2000, 2001, 2002], freq='D') + + ['2000-01-01', '2001-01-01', '2002-01-01'] + Length: 3, dtype: period[D] + + Datetime-like strings may also be passed + + >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') + + ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] + Length: 4, dtype: period[Q-DEC] + """ + data_dtype = getattr(data, "dtype", None) + + if lib.is_np_dtype(data_dtype, "M"): + return PeriodArray._from_datetime64(data, freq) + if isinstance(data_dtype, PeriodDtype): + out = PeriodArray(data) + if freq is not None: + if freq == data_dtype.freq: + return out + return out.asfreq(freq) + return out + + # other iterable of some kind + if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): + data = list(data) + + arrdata = np.asarray(data) + + dtype: PeriodDtype | None + if freq: + dtype = PeriodDtype(freq) + else: + dtype = None + + if arrdata.dtype.kind == "f" and len(arrdata) > 0: + raise TypeError("PeriodIndex does not allow floating point in construction") + + if arrdata.dtype.kind in "iu": + arr = arrdata.astype(np.int64, copy=False) + # error: Argument 2 to "from_ordinals" has incompatible type "Union[str, + # Tick, None]"; expected "Union[timedelta, BaseOffset, str]" + ordinals = libperiod.from_ordinals(arr, freq) # type: ignore[arg-type] + return PeriodArray(ordinals, dtype=dtype) + + data = ensure_object(arrdata) + + return PeriodArray._from_sequence(data, dtype=dtype) + + +@overload +def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: + ... + + +@overload +def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: + ... + + +def validate_dtype_freq( + dtype, freq: BaseOffsetT | timedelta | str | None +) -> BaseOffsetT: + """ + If both a dtype and a freq are available, ensure they match. If only + dtype is available, extract the implied freq. + + Parameters + ---------- + dtype : dtype + freq : DateOffset or None + + Returns + ------- + freq : DateOffset + + Raises + ------ + ValueError : non-period dtype + IncompatibleFrequency : mismatch between dtype and freq + """ + if freq is not None: + # error: Incompatible types in assignment (expression has type + # "BaseOffset", variable has type "Union[BaseOffsetT, timedelta, + # str, None]") + freq = to_offset(freq) # type: ignore[assignment] + + if dtype is not None: + dtype = pandas_dtype(dtype) + if not isinstance(dtype, PeriodDtype): + raise ValueError("dtype must be PeriodDtype") + if freq is None: + freq = dtype.freq + elif freq != dtype.freq: + raise IncompatibleFrequency("specified freq and dtype are different") + # error: Incompatible return value type (got "Union[BaseOffset, Any, None]", + # expected "BaseOffset") + return freq # type: ignore[return-value] + + +def dt64arr_to_periodarr( + data, freq, tz=None +) -> tuple[npt.NDArray[np.int64], BaseOffset]: + """ + Convert an datetime-like array to values Period ordinals. + + Parameters + ---------- + data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] + freq : Optional[Union[str, Tick]] + Must match the `freq` on the `data` if `data` is a DatetimeIndex + or Series. + tz : Optional[tzinfo] + + Returns + ------- + ordinals : ndarray[int64] + freq : Tick + The frequency extracted from the Series or DatetimeIndex if that's + used. + + """ + if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M": + raise ValueError(f"Wrong dtype: {data.dtype}") + + if freq is None: + if isinstance(data, ABCIndex): + data, freq = data._values, data.freq + elif isinstance(data, ABCSeries): + data, freq = data._values, data.dt.freq + + elif isinstance(data, (ABCIndex, ABCSeries)): + data = data._values + + reso = get_unit_from_dtype(data.dtype) + freq = Period._maybe_convert_freq(freq) + base = freq._period_dtype_code + return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq + + +def _get_ordinal_range(start, end, periods, freq, mult: int = 1): + if com.count_not_none(start, end, periods) != 2: + raise ValueError( + "Of the three parameters: start, end, and periods, " + "exactly two must be specified" + ) + + if freq is not None: + freq = to_offset(freq) + mult = freq.n + + if start is not None: + start = Period(start, freq) + if end is not None: + end = Period(end, freq) + + is_start_per = isinstance(start, Period) + is_end_per = isinstance(end, Period) + + if is_start_per and is_end_per and start.freq != end.freq: + raise ValueError("start and end must have same freq") + if start is NaT or end is NaT: + raise ValueError("start and end must not be NaT") + + if freq is None: + if is_start_per: + freq = start.freq + elif is_end_per: + freq = end.freq + else: # pragma: no cover + raise ValueError("Could not infer freq from start/end") + mult = freq.n + + if periods is not None: + periods = periods * mult + if start is None: + data = np.arange( + end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64 + ) + else: + data = np.arange( + start.ordinal, start.ordinal + periods, mult, dtype=np.int64 + ) + else: + data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) + + return data, freq + + +def _range_from_fields( + year=None, + month=None, + quarter=None, + day=None, + hour=None, + minute=None, + second=None, + freq=None, +) -> tuple[np.ndarray, BaseOffset]: + if hour is None: + hour = 0 + if minute is None: + minute = 0 + if second is None: + second = 0 + if day is None: + day = 1 + + ordinals = [] + + if quarter is not None: + if freq is None: + freq = to_offset("Q") + base = FreqGroup.FR_QTR.value + else: + freq = to_offset(freq) + base = libperiod.freq_to_dtype_code(freq) + if base != FreqGroup.FR_QTR.value: + raise AssertionError("base must equal FR_QTR") + + freqstr = freq.freqstr + year, quarter = _make_field_arrays(year, quarter) + for y, q in zip(year, quarter): + calendar_year, calendar_month = parsing.quarter_to_myear(y, q, freqstr) + val = libperiod.period_ordinal( + calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base + ) + ordinals.append(val) + else: + freq = to_offset(freq) + base = libperiod.freq_to_dtype_code(freq) + arrays = _make_field_arrays(year, month, day, hour, minute, second) + for y, mth, d, h, mn, s in zip(*arrays): + ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base)) + + return np.array(ordinals, dtype=np.int64), freq + + +def _make_field_arrays(*fields) -> list[np.ndarray]: + length = None + for x in fields: + if isinstance(x, (list, np.ndarray, ABCSeries)): + if length is not None and len(x) != length: + raise ValueError("Mismatched Period array lengths") + if length is None: + length = len(x) + + # error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected + # "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int, + # integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]" + return [ + np.asarray(x) + if isinstance(x, (np.ndarray, list, ABCSeries)) + else np.repeat(x, length) # type: ignore[arg-type] + for x in fields + ] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/__init__.py new file mode 100644 index 00000000..adf83963 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/__init__.py @@ -0,0 +1,19 @@ +from pandas.core.arrays.sparse.accessor import ( + SparseAccessor, + SparseFrameAccessor, +) +from pandas.core.arrays.sparse.array import ( + BlockIndex, + IntIndex, + SparseArray, + make_sparse_index, +) + +__all__ = [ + "BlockIndex", + "IntIndex", + "make_sparse_index", + "SparseAccessor", + "SparseArray", + "SparseFrameAccessor", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/accessor.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/accessor.py new file mode 100644 index 00000000..6eb1387c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/accessor.py @@ -0,0 +1,414 @@ +"""Sparse accessor""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.core.accessor import ( + PandasDelegate, + delegate_names, +) +from pandas.core.arrays.sparse.array import SparseArray + +if TYPE_CHECKING: + from pandas import ( + DataFrame, + Series, + ) + + +class BaseAccessor: + _validation_msg = "Can only use the '.sparse' accessor with Sparse data." + + def __init__(self, data=None) -> None: + self._parent = data + self._validate(data) + + def _validate(self, data): + raise NotImplementedError + + +@delegate_names( + SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property" +) +class SparseAccessor(BaseAccessor, PandasDelegate): + """ + Accessor for SparseSparse from other sparse matrix data types. + + Examples + -------- + >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]") + >>> ser.sparse.density + 0.6 + >>> ser.sparse.sp_values + array([2, 2, 2]) + """ + + def _validate(self, data): + if not isinstance(data.dtype, SparseDtype): + raise AttributeError(self._validation_msg) + + def _delegate_property_get(self, name: str, *args, **kwargs): + return getattr(self._parent.array, name) + + def _delegate_method(self, name: str, *args, **kwargs): + if name == "from_coo": + return self.from_coo(*args, **kwargs) + elif name == "to_coo": + return self.to_coo(*args, **kwargs) + else: + raise ValueError + + @classmethod + def from_coo(cls, A, dense_index: bool = False) -> Series: + """ + Create a Series with sparse values from a scipy.sparse.coo_matrix. + + Parameters + ---------- + A : scipy.sparse.coo_matrix + dense_index : bool, default False + If False (default), the index consists of only the + coords of the non-null entries of the original coo_matrix. + If True, the index consists of the full sorted + (row, col) coordinates of the coo_matrix. + + Returns + ------- + s : Series + A Series with sparse values. + + Examples + -------- + >>> from scipy import sparse + + >>> A = sparse.coo_matrix( + ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4) + ... ) + >>> A + <3x4 sparse matrix of type '' + with 3 stored elements in COOrdinate format> + + >>> A.todense() + matrix([[0., 0., 1., 2.], + [3., 0., 0., 0.], + [0., 0., 0., 0.]]) + + >>> ss = pd.Series.sparse.from_coo(A) + >>> ss + 0 2 1.0 + 3 2.0 + 1 0 3.0 + dtype: Sparse[float64, nan] + """ + from pandas import Series + from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series + + result = coo_to_sparse_series(A, dense_index=dense_index) + result = Series(result.array, index=result.index, copy=False) + + return result + + def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False): + """ + Create a scipy.sparse.coo_matrix from a Series with MultiIndex. + + Use row_levels and column_levels to determine the row and column + coordinates respectively. row_levels and column_levels are the names + (labels) or numbers of the levels. {row_levels, column_levels} must be + a partition of the MultiIndex level names (or numbers). + + Parameters + ---------- + row_levels : tuple/list + column_levels : tuple/list + sort_labels : bool, default False + Sort the row and column labels before forming the sparse matrix. + When `row_levels` and/or `column_levels` refer to a single level, + set to `True` for a faster execution. + + Returns + ------- + y : scipy.sparse.coo_matrix + rows : list (row labels) + columns : list (column labels) + + Examples + -------- + >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) + >>> s.index = pd.MultiIndex.from_tuples( + ... [ + ... (1, 2, "a", 0), + ... (1, 2, "a", 1), + ... (1, 1, "b", 0), + ... (1, 1, "b", 1), + ... (2, 1, "b", 0), + ... (2, 1, "b", 1) + ... ], + ... names=["A", "B", "C", "D"], + ... ) + >>> s + A B C D + 1 2 a 0 3.0 + 1 NaN + 1 b 0 1.0 + 1 3.0 + 2 1 b 0 NaN + 1 NaN + dtype: float64 + + >>> ss = s.astype("Sparse") + >>> ss + A B C D + 1 2 a 0 3.0 + 1 NaN + 1 b 0 1.0 + 1 3.0 + 2 1 b 0 NaN + 1 NaN + dtype: Sparse[float64, nan] + + >>> A, rows, columns = ss.sparse.to_coo( + ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True + ... ) + >>> A + <3x4 sparse matrix of type '' + with 3 stored elements in COOrdinate format> + >>> A.todense() + matrix([[0., 0., 1., 3.], + [3., 0., 0., 0.], + [0., 0., 0., 0.]]) + + >>> rows + [(1, 1), (1, 2), (2, 1)] + >>> columns + [('a', 0), ('a', 1), ('b', 0), ('b', 1)] + """ + from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo + + A, rows, columns = sparse_series_to_coo( + self._parent, row_levels, column_levels, sort_labels=sort_labels + ) + return A, rows, columns + + def to_dense(self) -> Series: + """ + Convert a Series from sparse values to dense. + + Returns + ------- + Series: + A Series with the same values, stored as a dense array. + + Examples + -------- + >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0])) + >>> series + 0 0 + 1 1 + 2 0 + dtype: Sparse[int64, 0] + + >>> series.sparse.to_dense() + 0 0 + 1 1 + 2 0 + dtype: int64 + """ + from pandas import Series + + return Series( + self._parent.array.to_dense(), + index=self._parent.index, + name=self._parent.name, + copy=False, + ) + + +class SparseFrameAccessor(BaseAccessor, PandasDelegate): + """ + DataFrame accessor for sparse data. + + Examples + -------- + >>> df = pd.DataFrame({"a": [1, 2, 0, 0], + ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]") + >>> df.sparse.density + 0.5 + """ + + def _validate(self, data): + dtypes = data.dtypes + if not all(isinstance(t, SparseDtype) for t in dtypes): + raise AttributeError(self._validation_msg) + + @classmethod + def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame: + """ + Create a new DataFrame from a scipy sparse matrix. + + Parameters + ---------- + data : scipy.sparse.spmatrix + Must be convertible to csc format. + index, columns : Index, optional + Row and column labels to use for the resulting DataFrame. + Defaults to a RangeIndex. + + Returns + ------- + DataFrame + Each column of the DataFrame is stored as a + :class:`arrays.SparseArray`. + + Examples + -------- + >>> import scipy.sparse + >>> mat = scipy.sparse.eye(3) + >>> pd.DataFrame.sparse.from_spmatrix(mat) + 0 1 2 + 0 1.0 0.0 0.0 + 1 0.0 1.0 0.0 + 2 0.0 0.0 1.0 + """ + from pandas._libs.sparse import IntIndex + + from pandas import DataFrame + + data = data.tocsc() + index, columns = cls._prep_index(data, index, columns) + n_rows, n_columns = data.shape + # We need to make sure indices are sorted, as we create + # IntIndex with no input validation (i.e. check_integrity=False ). + # Indices may already be sorted in scipy in which case this adds + # a small overhead. + data.sort_indices() + indices = data.indices + indptr = data.indptr + array_data = data.data + dtype = SparseDtype(array_data.dtype, 0) + arrays = [] + for i in range(n_columns): + sl = slice(indptr[i], indptr[i + 1]) + idx = IntIndex(n_rows, indices[sl], check_integrity=False) + arr = SparseArray._simple_new(array_data[sl], idx, dtype) + arrays.append(arr) + return DataFrame._from_arrays( + arrays, columns=columns, index=index, verify_integrity=False + ) + + def to_dense(self) -> DataFrame: + """ + Convert a DataFrame with sparse values to dense. + + Returns + ------- + DataFrame + A DataFrame with the same values stored as dense arrays. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])}) + >>> df.sparse.to_dense() + A + 0 0 + 1 1 + 2 0 + """ + from pandas import DataFrame + + data = {k: v.array.to_dense() for k, v in self._parent.items()} + return DataFrame(data, index=self._parent.index, columns=self._parent.columns) + + def to_coo(self): + """ + Return the contents of the frame as a sparse SciPy COO matrix. + + Returns + ------- + scipy.sparse.spmatrix + If the caller is heterogeneous and contains booleans or objects, + the result will be of dtype=object. See Notes. + + Notes + ----- + The dtype will be the lowest-common-denominator type (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. By numpy.find_common_type convention, mixing int64 and + and uint64 will result in a float64 dtype. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) + >>> df.sparse.to_coo() + <4x1 sparse matrix of type '' + with 2 stored elements in COOrdinate format> + """ + import_optional_dependency("scipy") + from scipy.sparse import coo_matrix + + dtype = find_common_type(self._parent.dtypes.to_list()) + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + + cols, rows, data = [], [], [] + for col, (_, ser) in enumerate(self._parent.items()): + sp_arr = ser.array + if sp_arr.fill_value != 0: + raise ValueError("fill value must be 0 when converting to COO matrix") + + row = sp_arr.sp_index.indices + cols.append(np.repeat(col, len(row))) + rows.append(row) + data.append(sp_arr.sp_values.astype(dtype, copy=False)) + + cols = np.concatenate(cols) + rows = np.concatenate(rows) + data = np.concatenate(data) + return coo_matrix((data, (rows, cols)), shape=self._parent.shape) + + @property + def density(self) -> float: + """ + Ratio of non-sparse points to total (dense) data points. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) + >>> df.sparse.density + 0.5 + """ + tmp = np.mean([column.array.density for _, column in self._parent.items()]) + return tmp + + @staticmethod + def _prep_index(data, index, columns): + from pandas.core.indexes.api import ( + default_index, + ensure_index, + ) + + N, K = data.shape + if index is None: + index = default_index(N) + else: + index = ensure_index(index) + if columns is None: + columns = default_index(K) + else: + columns = ensure_index(columns) + + if len(columns) != K: + raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}") + if len(index) != N: + raise ValueError(f"Index length mismatch: {len(index)} vs. {N}") + return index, columns diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/array.py new file mode 100644 index 00000000..e38fa0a3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/array.py @@ -0,0 +1,1908 @@ +""" +SparseArray data structure +""" +from __future__ import annotations + +from collections import abc +import numbers +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +import pandas._libs.sparse as splib +from pandas._libs.sparse import ( + BlockIndex, + IntIndex, + SparseIndex, +) +from pandas._libs.tslibs import NaT +from pandas.compat.numpy import function as nv +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + validate_bool_kwarg, + validate_insert_loc, +) + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + find_common_type, + maybe_box_datetimelike, +) +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer, + is_list_like, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + SparseDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, + notna, +) + +from pandas.core import arraylike +import pandas.core.algorithms as algos +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ExtensionArray +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, + sanitize_array, +) +from pandas.core.indexers import ( + check_array_indexer, + unpack_tuple_and_ellipses, +) +from pandas.core.nanops import check_below_min_count + +from pandas.io.formats import printing + +# See https://github.com/python/typing/issues/684 +if TYPE_CHECKING: + from collections.abc import Sequence + from enum import Enum + + class ellipsis(Enum): + Ellipsis = "..." + + Ellipsis = ellipsis.Ellipsis + + from scipy.sparse import spmatrix + + from pandas._typing import ( + FillnaOptions, + NumpySorter, + ) + + SparseIndexKind = Literal["integer", "block"] + + from pandas._typing import ( + ArrayLike, + AstypeArg, + Axis, + AxisInt, + Dtype, + NpDtype, + PositionalIndexer, + Scalar, + ScalarIndexer, + Self, + SequenceIndexer, + npt, + ) + + from pandas import Series + +else: + ellipsis = type(Ellipsis) + + +# ---------------------------------------------------------------------------- +# Array + +_sparray_doc_kwargs = {"klass": "SparseArray"} + + +def _get_fill(arr: SparseArray) -> np.ndarray: + """ + Create a 0-dim ndarray containing the fill value + + Parameters + ---------- + arr : SparseArray + + Returns + ------- + fill_value : ndarray + 0-dim ndarray with just the fill value. + + Notes + ----- + coerce fill_value to arr dtype if possible + int64 SparseArray can have NaN as fill_value if there is no missing + """ + try: + return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) + except ValueError: + return np.asarray(arr.fill_value) + + +def _sparse_array_op( + left: SparseArray, right: SparseArray, op: Callable, name: str +) -> SparseArray: + """ + Perform a binary operation between two arrays. + + Parameters + ---------- + left : Union[SparseArray, ndarray] + right : Union[SparseArray, ndarray] + op : Callable + The binary operation to perform + name str + Name of the callable. + + Returns + ------- + SparseArray + """ + if name.startswith("__"): + # For lookups in _libs.sparse we need non-dunder op name + name = name[2:-2] + + # dtype used to find corresponding sparse method + ltype = left.dtype.subtype + rtype = right.dtype.subtype + + if ltype != rtype: + subtype = find_common_type([ltype, rtype]) + ltype = SparseDtype(subtype, left.fill_value) + rtype = SparseDtype(subtype, right.fill_value) + + left = left.astype(ltype, copy=False) + right = right.astype(rtype, copy=False) + dtype = ltype.subtype + else: + dtype = ltype + + # dtype the result must have + result_dtype = None + + if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0: + with np.errstate(all="ignore"): + result = op(left.to_dense(), right.to_dense()) + fill = op(_get_fill(left), _get_fill(right)) + + if left.sp_index.ngaps == 0: + index = left.sp_index + else: + index = right.sp_index + elif left.sp_index.equals(right.sp_index): + with np.errstate(all="ignore"): + result = op(left.sp_values, right.sp_values) + fill = op(_get_fill(left), _get_fill(right)) + index = left.sp_index + else: + if name[0] == "r": + left, right = right, left + name = name[1:] + + if name in ("and", "or", "xor") and dtype == "bool": + opname = f"sparse_{name}_uint8" + # to make template simple, cast here + left_sp_values = left.sp_values.view(np.uint8) + right_sp_values = right.sp_values.view(np.uint8) + result_dtype = bool + else: + opname = f"sparse_{name}_{dtype}" + left_sp_values = left.sp_values + right_sp_values = right.sp_values + + if ( + name in ["floordiv", "mod"] + and (right == 0).any() + and left.dtype.kind in "iu" + ): + # Match the non-Sparse Series behavior + opname = f"sparse_{name}_float64" + left_sp_values = left_sp_values.astype("float64") + right_sp_values = right_sp_values.astype("float64") + + sparse_op = getattr(splib, opname) + + with np.errstate(all="ignore"): + result, index, fill = sparse_op( + left_sp_values, + left.sp_index, + left.fill_value, + right_sp_values, + right.sp_index, + right.fill_value, + ) + + if name == "divmod": + # result is a 2-tuple + # error: Incompatible return value type (got "Tuple[SparseArray, + # SparseArray]", expected "SparseArray") + return ( # type: ignore[return-value] + _wrap_result(name, result[0], index, fill[0], dtype=result_dtype), + _wrap_result(name, result[1], index, fill[1], dtype=result_dtype), + ) + + if result_dtype is None: + result_dtype = result.dtype + + return _wrap_result(name, result, index, fill, dtype=result_dtype) + + +def _wrap_result( + name: str, data, sparse_index, fill_value, dtype: Dtype | None = None +) -> SparseArray: + """ + wrap op result to have correct dtype + """ + if name.startswith("__"): + # e.g. __eq__ --> eq + name = name[2:-2] + + if name in ("eq", "ne", "lt", "gt", "le", "ge"): + dtype = bool + + fill_value = lib.item_from_zerodim(fill_value) + + if is_bool_dtype(dtype): + # fill_value may be np.bool_ + fill_value = bool(fill_value) + return SparseArray( + data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype + ) + + +class SparseArray(OpsMixin, PandasObject, ExtensionArray): + """ + An ExtensionArray for storing sparse data. + + Parameters + ---------- + data : array-like or scalar + A dense array of values to store in the SparseArray. This may contain + `fill_value`. + sparse_index : SparseIndex, optional + fill_value : scalar, optional + Elements in data that are ``fill_value`` are not stored in the + SparseArray. For memory savings, this should be the most common value + in `data`. By default, `fill_value` depends on the dtype of `data`: + + =========== ========== + data.dtype na_value + =========== ========== + float ``np.nan`` + int ``0`` + bool False + datetime64 ``pd.NaT`` + timedelta64 ``pd.NaT`` + =========== ========== + + The fill value is potentially specified in three ways. In order of + precedence, these are + + 1. The `fill_value` argument + 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is + a ``SparseDtype`` + 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype` + is not a ``SparseDtype`` and `data` is a ``SparseArray``. + + kind : str + Can be 'integer' or 'block', default is 'integer'. + The type of storage for sparse locations. + + * 'block': Stores a `block` and `block_length` for each + contiguous *span* of sparse values. This is best when + sparse data tends to be clumped together, with large + regions of ``fill-value`` values between sparse values. + * 'integer': uses an integer to store the location of + each sparse value. + + dtype : np.dtype or SparseDtype, optional + The dtype to use for the SparseArray. For numpy dtypes, this + determines the dtype of ``self.sp_values``. For SparseDtype, + this determines ``self.sp_values`` and ``self.fill_value``. + copy : bool, default False + Whether to explicitly copy the incoming `data` array. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> arr = SparseArray([0, 0, 1, 2]) + >>> arr + [0, 0, 1, 2] + Fill: 0 + IntIndex + Indices: array([2, 3], dtype=int32) + """ + + _subtyp = "sparse_array" # register ABCSparseArray + _hidden_attrs = PandasObject._hidden_attrs | frozenset([]) + _sparse_index: SparseIndex + _sparse_values: np.ndarray + _dtype: SparseDtype + + def __init__( + self, + data, + sparse_index=None, + fill_value=None, + kind: SparseIndexKind = "integer", + dtype: Dtype | None = None, + copy: bool = False, + ) -> None: + if fill_value is None and isinstance(dtype, SparseDtype): + fill_value = dtype.fill_value + + if isinstance(data, type(self)): + # disable normal inference on dtype, sparse_index, & fill_value + if sparse_index is None: + sparse_index = data.sp_index + if fill_value is None: + fill_value = data.fill_value + if dtype is None: + dtype = data.dtype + # TODO: make kind=None, and use data.kind? + data = data.sp_values + + # Handle use-provided dtype + if isinstance(dtype, str): + # Two options: dtype='int', regular numpy dtype + # or dtype='Sparse[int]', a sparse dtype + try: + dtype = SparseDtype.construct_from_string(dtype) + except TypeError: + dtype = pandas_dtype(dtype) + + if isinstance(dtype, SparseDtype): + if fill_value is None: + fill_value = dtype.fill_value + dtype = dtype.subtype + + if is_scalar(data): + warnings.warn( + f"Constructing {type(self).__name__} with scalar data is deprecated " + "and will raise in a future version. Pass a sequence instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if sparse_index is None: + npoints = 1 + else: + npoints = sparse_index.length + + data = construct_1d_arraylike_from_scalar(data, npoints, dtype=None) + dtype = data.dtype + + if dtype is not None: + dtype = pandas_dtype(dtype) + + # TODO: disentangle the fill_value dtype inference from + # dtype inference + if data is None: + # TODO: What should the empty dtype be? Object or float? + + # error: Argument "dtype" to "array" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + data = np.array([], dtype=dtype) # type: ignore[arg-type] + + try: + data = sanitize_array(data, index=None) + except ValueError: + # NumPy may raise a ValueError on data like [1, []] + # we retry with object dtype here. + if dtype is None: + dtype = np.dtype(object) + data = np.atleast_1d(np.asarray(data, dtype=dtype)) + else: + raise + + if copy: + # TODO: avoid double copy when dtype forces cast. + data = data.copy() + + if fill_value is None: + fill_value_dtype = data.dtype if dtype is None else dtype + if fill_value_dtype is None: + fill_value = np.nan + else: + fill_value = na_value_for_dtype(fill_value_dtype) + + if isinstance(data, type(self)) and sparse_index is None: + sparse_index = data._sparse_index + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected "None" + sparse_values = np.asarray( + data.sp_values, dtype=dtype # type: ignore[arg-type] + ) + elif sparse_index is None: + data = extract_array(data, extract_numpy=True) + if not isinstance(data, np.ndarray): + # EA + if isinstance(data.dtype, DatetimeTZDtype): + warnings.warn( + f"Creating SparseArray from {data.dtype} data " + "loses timezone information. Cast to object before " + "sparse to retain timezone information.", + UserWarning, + stacklevel=find_stack_level(), + ) + data = np.asarray(data, dtype="datetime64[ns]") + if fill_value is NaT: + fill_value = np.datetime64("NaT", "ns") + data = np.asarray(data) + sparse_values, sparse_index, fill_value = _make_sparse( + # error: Argument "dtype" to "_make_sparse" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected + # "Optional[dtype[Any]]" + data, + kind=kind, + fill_value=fill_value, + dtype=dtype, # type: ignore[arg-type] + ) + else: + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected "None" + sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type] + if len(sparse_values) != sparse_index.npoints: + raise AssertionError( + f"Non array-like type {type(sparse_values)} must " + "have the same length as the index" + ) + self._sparse_index = sparse_index + self._sparse_values = sparse_values + self._dtype = SparseDtype(sparse_values.dtype, fill_value) + + @classmethod + def _simple_new( + cls, + sparse_array: np.ndarray, + sparse_index: SparseIndex, + dtype: SparseDtype, + ) -> Self: + new = object.__new__(cls) + new._sparse_index = sparse_index + new._sparse_values = sparse_array + new._dtype = dtype + return new + + @classmethod + def from_spmatrix(cls, data: spmatrix) -> Self: + """ + Create a SparseArray from a scipy.sparse matrix. + + Parameters + ---------- + data : scipy.sparse.sp_matrix + This should be a SciPy sparse matrix where the size + of the second dimension is 1. In other words, a + sparse matrix with a single column. + + Returns + ------- + SparseArray + + Examples + -------- + >>> import scipy.sparse + >>> mat = scipy.sparse.coo_matrix((4, 1)) + >>> pd.arrays.SparseArray.from_spmatrix(mat) + [0.0, 0.0, 0.0, 0.0] + Fill: 0.0 + IntIndex + Indices: array([], dtype=int32) + """ + length, ncol = data.shape + + if ncol != 1: + raise ValueError(f"'data' must have a single column, not '{ncol}'") + + # our sparse index classes require that the positions be strictly + # increasing. So we need to sort loc, and arr accordingly. + data = data.tocsc() + data.sort_indices() + arr = data.data + idx = data.indices + + zero = np.array(0, dtype=arr.dtype).item() + dtype = SparseDtype(arr.dtype, zero) + index = IntIndex(length, idx) + + return cls._simple_new(arr, index, dtype) + + def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: + fill_value = self.fill_value + + if self.sp_index.ngaps == 0: + # Compat for na dtype and int values. + return self.sp_values + if dtype is None: + # Can NumPy represent this type? + # If not, `np.result_type` will raise. We catch that + # and return object. + if self.sp_values.dtype.kind == "M": + # However, we *do* special-case the common case of + # a datetime64 with pandas NaT. + if fill_value is NaT: + # Can't put pd.NaT in a datetime64[ns] + fill_value = np.datetime64("NaT") + try: + dtype = np.result_type(self.sp_values.dtype, type(fill_value)) + except TypeError: + dtype = object + + out = np.full(self.shape, fill_value, dtype=dtype) + out[self.sp_index.indices] = self.sp_values + return out + + def __setitem__(self, key, value) -> None: + # I suppose we could allow setting of non-fill_value elements. + # TODO(SparseArray.__setitem__): remove special cases in + # ExtensionBlock.where + msg = "SparseArray does not support item assignment via setitem" + raise TypeError(msg) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + return cls(scalars, dtype=dtype) + + @classmethod + def _from_factorized(cls, values, original): + return cls(values, dtype=original.dtype) + + # ------------------------------------------------------------------------ + # Data + # ------------------------------------------------------------------------ + @property + def sp_index(self) -> SparseIndex: + """ + The SparseIndex containing the location of non- ``fill_value`` points. + """ + return self._sparse_index + + @property + def sp_values(self) -> np.ndarray: + """ + An ndarray containing the non- ``fill_value`` values. + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0) + >>> s.sp_values + array([1, 2]) + """ + return self._sparse_values + + @property + def dtype(self) -> SparseDtype: + return self._dtype + + @property + def fill_value(self): + """ + Elements in `data` that are `fill_value` are not stored. + + For memory savings, this should be the most common value in the array. + + Examples + -------- + >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]") + >>> ser.sparse.fill_value + 0 + >>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2) + >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype) + >>> ser.sparse.fill_value + 2 + """ + return self.dtype.fill_value + + @fill_value.setter + def fill_value(self, value) -> None: + self._dtype = SparseDtype(self.dtype.subtype, value) + + @property + def kind(self) -> SparseIndexKind: + """ + The kind of sparse index for this array. One of {'integer', 'block'}. + """ + if isinstance(self.sp_index, IntIndex): + return "integer" + else: + return "block" + + @property + def _valid_sp_values(self) -> np.ndarray: + sp_vals = self.sp_values + mask = notna(sp_vals) + return sp_vals[mask] + + def __len__(self) -> int: + return self.sp_index.length + + @property + def _null_fill_value(self) -> bool: + return self._dtype._is_na_fill_value + + def _fill_value_matches(self, fill_value) -> bool: + if self._null_fill_value: + return isna(fill_value) + else: + return self.fill_value == fill_value + + @property + def nbytes(self) -> int: + return self.sp_values.nbytes + self.sp_index.nbytes + + @property + def density(self) -> float: + """ + The percent of non- ``fill_value`` points, as decimal. + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) + >>> s.density + 0.6 + """ + return self.sp_index.npoints / self.sp_index.length + + @property + def npoints(self) -> int: + """ + The number of non- ``fill_value`` points. + + Examples + -------- + >>> from pandas.arrays import SparseArray + >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) + >>> s.npoints + 3 + """ + return self.sp_index.npoints + + def isna(self): + # If null fill value, we want SparseDtype[bool, true] + # to preserve the same memory usage. + dtype = SparseDtype(bool, self._null_fill_value) + if self._null_fill_value: + return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype) + mask = np.full(len(self), False, dtype=np.bool_) + mask[self.sp_index.indices] = isna(self.sp_values) + return type(self)(mask, fill_value=False, dtype=dtype) + + def _pad_or_backfill( # pylint: disable=useless-parent-delegation + self, *, method: FillnaOptions, limit: int | None = None, copy: bool = True + ) -> Self: + # TODO(3.0): We can remove this method once deprecation for fillna method + # keyword is enforced. + return super()._pad_or_backfill(method=method, limit=limit, copy=copy) + + def fillna( + self, + value=None, + method: FillnaOptions | None = None, + limit: int | None = None, + copy: bool = True, + ) -> Self: + """ + Fill missing values with `value`. + + Parameters + ---------- + value : scalar, optional + method : str, optional + + .. warning:: + + Using 'method' will result in high memory use, + as all `fill_value` methods will be converted to + an in-memory ndarray + + limit : int, optional + + copy: bool, default True + Ignored for SparseArray. + + Returns + ------- + SparseArray + + Notes + ----- + When `value` is specified, the result's ``fill_value`` depends on + ``self.fill_value``. The goal is to maintain low-memory use. + + If ``self.fill_value`` is NA, the result dtype will be + ``SparseDtype(self.dtype, fill_value=value)``. This will preserve + amount of memory used before and after filling. + + When ``self.fill_value`` is not NA, the result dtype will be + ``self.dtype``. Again, this preserves the amount of memory used. + """ + if (method is None and value is None) or ( + method is not None and value is not None + ): + raise ValueError("Must specify one of 'method' or 'value'.") + + if method is not None: + return super().fillna(method=method, limit=limit) + + else: + new_values = np.where(isna(self.sp_values), value, self.sp_values) + + if self._null_fill_value: + # This is essentially just updating the dtype. + new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) + else: + new_dtype = self.dtype + + return self._simple_new(new_values, self._sparse_index, new_dtype) + + def shift(self, periods: int = 1, fill_value=None) -> Self: + if not len(self) or periods == 0: + return self.copy() + + if isna(fill_value): + fill_value = self.dtype.na_value + + subtype = np.result_type(fill_value, self.dtype.subtype) + + if subtype != self.dtype.subtype: + # just coerce up front + arr = self.astype(SparseDtype(subtype, self.fill_value)) + else: + arr = self + + empty = self._from_sequence( + [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype + ) + + if periods > 0: + a = empty + b = arr[:-periods] + else: + a = arr[abs(periods) :] + b = empty + return arr._concat_same_type([a, b]) + + def _first_fill_value_loc(self): + """ + Get the location of the first fill value. + + Returns + ------- + int + """ + if len(self) == 0 or self.sp_index.npoints == len(self): + return -1 + + indices = self.sp_index.indices + if not len(indices) or indices[0] > 0: + return 0 + + # a number larger than 1 should be appended to + # the last in case of fill value only appears + # in the tail of array + diff = np.r_[np.diff(indices), 2] + return indices[(diff > 1).argmax()] + 1 + + def unique(self) -> Self: + uniques = algos.unique(self.sp_values) + if len(self.sp_values) != len(self): + fill_loc = self._first_fill_value_loc() + # Inorder to align the behavior of pd.unique or + # pd.Series.unique, we should keep the original + # order, here we use unique again to find the + # insertion place. Since the length of sp_values + # is not large, maybe minor performance hurt + # is worthwhile to the correctness. + insert_loc = len(algos.unique(self.sp_values[:fill_loc])) + uniques = np.insert(uniques, insert_loc, self.fill_value) + return type(self)._from_sequence(uniques, dtype=self.dtype) + + def _values_for_factorize(self): + # Still override this for hash_pandas_object + return np.asarray(self), self.fill_value + + def factorize( + self, + use_na_sentinel: bool = True, + ) -> tuple[np.ndarray, SparseArray]: + # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA] + # The sparsity on this is backwards from what Sparse would want. Want + # ExtensionArray.factorize -> Tuple[EA, EA] + # Given that we have to return a dense array of codes, why bother + # implementing an efficient factorize? + codes, uniques = algos.factorize( + np.asarray(self), use_na_sentinel=use_na_sentinel + ) + uniques_sp = SparseArray(uniques, dtype=self.dtype) + return codes, uniques_sp + + def value_counts(self, dropna: bool = True) -> Series: + """ + Returns a Series containing counts of unique values. + + Parameters + ---------- + dropna : bool, default True + Don't include counts of NaN, even if NaN is in sp_values. + + Returns + ------- + counts : Series + """ + from pandas import ( + Index, + Series, + ) + + keys, counts = algos.value_counts_arraylike(self.sp_values, dropna=dropna) + fcounts = self.sp_index.ngaps + if fcounts > 0 and (not self._null_fill_value or not dropna): + mask = isna(keys) if self._null_fill_value else keys == self.fill_value + if mask.any(): + counts[mask] += fcounts + else: + # error: Argument 1 to "insert" has incompatible type "Union[ + # ExtensionArray,ndarray[Any, Any]]"; expected "Union[ + # _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype + # [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]], + # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence + # [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]" + keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type] + counts = np.insert(counts, 0, fcounts) + + if not isinstance(keys, ABCIndex): + index = Index(keys) + else: + index = keys + return Series(counts, index=index, copy=False) + + # -------- + # Indexing + # -------- + @overload + def __getitem__(self, key: ScalarIndexer) -> Any: + ... + + @overload + def __getitem__( + self, + key: SequenceIndexer | tuple[int | ellipsis, ...], + ) -> Self: + ... + + def __getitem__( + self, + key: PositionalIndexer | tuple[int | ellipsis, ...], + ) -> Self | Any: + if isinstance(key, tuple): + key = unpack_tuple_and_ellipses(key) + if key is Ellipsis: + raise ValueError("Cannot slice with Ellipsis") + + if is_integer(key): + return self._get_val_at(key) + elif isinstance(key, tuple): + # error: Invalid index type "Tuple[Union[int, ellipsis], ...]" + # for "ndarray[Any, Any]"; expected type + # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_, + # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[ + # Union[bool_, integer[Any]]]]], _NestedSequence[Union[ + # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[ + # dtype[Union[bool_, integer[Any]]]], _NestedSequence[ + # _SupportsArray[dtype[Union[bool_, integer[Any]]]]], + # _NestedSequence[Union[bool, int]]], ...]]" + data_slice = self.to_dense()[key] # type: ignore[index] + elif isinstance(key, slice): + # Avoid densifying when handling contiguous slices + if key.step is None or key.step == 1: + start = 0 if key.start is None else key.start + if start < 0: + start += len(self) + + end = len(self) if key.stop is None else key.stop + if end < 0: + end += len(self) + + indices = self.sp_index.indices + keep_inds = np.flatnonzero((indices >= start) & (indices < end)) + sp_vals = self.sp_values[keep_inds] + + sp_index = indices[keep_inds].copy() + + # If we've sliced to not include the start of the array, all our indices + # should be shifted. NB: here we are careful to also not shift by a + # negative value for a case like [0, 1][-100:] where the start index + # should be treated like 0 + if start > 0: + sp_index -= start + + # Length of our result should match applying this slice to a range + # of the length of our original array + new_len = len(range(len(self))[key]) + new_sp_index = make_sparse_index(new_len, sp_index, self.kind) + return type(self)._simple_new(sp_vals, new_sp_index, self.dtype) + else: + indices = np.arange(len(self), dtype=np.int32)[key] + return self.take(indices) + + elif not is_list_like(key): + # e.g. "foo" or 2.5 + # exception message copied from numpy + raise IndexError( + r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " + r"(`None`) and integer or boolean arrays are valid indices" + ) + + else: + if isinstance(key, SparseArray): + # NOTE: If we guarantee that SparseDType(bool) + # has only fill_value - true, false or nan + # (see GH PR 44955) + # we can apply mask very fast: + if is_bool_dtype(key): + if isna(key.fill_value): + return self.take(key.sp_index.indices[key.sp_values]) + if not key.fill_value: + return self.take(key.sp_index.indices) + n = len(self) + mask = np.full(n, True, dtype=np.bool_) + mask[key.sp_index.indices] = False + return self.take(np.arange(n)[mask]) + else: + key = np.asarray(key) + + key = check_array_indexer(self, key) + + if com.is_bool_indexer(key): + # mypy doesn't know we have an array here + key = cast(np.ndarray, key) + return self.take(np.arange(len(key), dtype=np.int32)[key]) + elif hasattr(key, "__len__"): + return self.take(key) + else: + raise ValueError(f"Cannot slice with '{key}'") + + return type(self)(data_slice, kind=self.kind) + + def _get_val_at(self, loc): + loc = validate_insert_loc(loc, len(self)) + + sp_loc = self.sp_index.lookup(loc) + if sp_loc == -1: + return self.fill_value + else: + val = self.sp_values[sp_loc] + val = maybe_box_datetimelike(val, self.sp_values.dtype) + return val + + def take(self, indices, *, allow_fill: bool = False, fill_value=None) -> Self: + if is_scalar(indices): + raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.") + indices = np.asarray(indices, dtype=np.int32) + + dtype = None + if indices.size == 0: + result = np.array([], dtype="object") + dtype = self.dtype + elif allow_fill: + result = self._take_with_fill(indices, fill_value=fill_value) + else: + return self._take_without_fill(indices) + + return type(self)( + result, fill_value=self.fill_value, kind=self.kind, dtype=dtype + ) + + def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: + if fill_value is None: + fill_value = self.dtype.na_value + + if indices.min() < -1: + raise ValueError( + "Invalid value in 'indices'. Must be between -1 " + "and the length of the array." + ) + + if indices.max() >= len(self): + raise IndexError("out of bounds value in 'indices'.") + + if len(self) == 0: + # Empty... Allow taking only if all empty + if (indices == -1).all(): + dtype = np.result_type(self.sp_values, type(fill_value)) + taken = np.empty_like(indices, dtype=dtype) + taken.fill(fill_value) + return taken + else: + raise IndexError("cannot do a non-empty take from an empty axes.") + + # sp_indexer may be -1 for two reasons + # 1.) we took for an index of -1 (new) + # 2.) we took a value that was self.fill_value (old) + sp_indexer = self.sp_index.lookup_array(indices) + new_fill_indices = indices == -1 + old_fill_indices = (sp_indexer == -1) & ~new_fill_indices + + if self.sp_index.npoints == 0 and old_fill_indices.all(): + # We've looked up all valid points on an all-sparse array. + taken = np.full( + sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype + ) + + elif self.sp_index.npoints == 0: + # Avoid taking from the empty self.sp_values + _dtype = np.result_type(self.dtype.subtype, type(fill_value)) + taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype) + else: + taken = self.sp_values.take(sp_indexer) + + # Fill in two steps. + # Old fill values + # New fill values + # potentially coercing to a new dtype at each stage. + + m0 = sp_indexer[old_fill_indices] < 0 + m1 = sp_indexer[new_fill_indices] < 0 + + result_type = taken.dtype + + if m0.any(): + result_type = np.result_type(result_type, type(self.fill_value)) + taken = taken.astype(result_type) + taken[old_fill_indices] = self.fill_value + + if m1.any(): + result_type = np.result_type(result_type, type(fill_value)) + taken = taken.astype(result_type) + taken[new_fill_indices] = fill_value + + return taken + + def _take_without_fill(self, indices) -> Self: + to_shift = indices < 0 + + n = len(self) + + if (indices.max() >= n) or (indices.min() < -n): + if n == 0: + raise IndexError("cannot do a non-empty take from an empty axes.") + raise IndexError("out of bounds value in 'indices'.") + + if to_shift.any(): + indices = indices.copy() + indices[to_shift] += n + + sp_indexer = self.sp_index.lookup_array(indices) + value_mask = sp_indexer != -1 + new_sp_values = self.sp_values[sp_indexer[value_mask]] + + value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False) + + new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind) + return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype) + + def searchsorted( + self, + v: ArrayLike | object, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + msg = "searchsorted requires high memory usage." + warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) + v = np.asarray(v) + return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter) + + def copy(self) -> Self: + values = self.sp_values.copy() + return self._simple_new(values, self.sp_index, self.dtype) + + @classmethod + def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self: + fill_value = to_concat[0].fill_value + + values = [] + length = 0 + + if to_concat: + sp_kind = to_concat[0].kind + else: + sp_kind = "integer" + + sp_index: SparseIndex + if sp_kind == "integer": + indices = [] + + for arr in to_concat: + int_idx = arr.sp_index.indices.copy() + int_idx += length # TODO: wraparound + length += arr.sp_index.length + + values.append(arr.sp_values) + indices.append(int_idx) + + data = np.concatenate(values) + indices_arr = np.concatenate(indices) + # error: Argument 2 to "IntIndex" has incompatible type + # "ndarray[Any, dtype[signedinteger[_32Bit]]]"; + # expected "Sequence[int]" + sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type] + + else: + # when concatenating block indices, we don't claim that you'll + # get an identical index as concatenating the values and then + # creating a new index. We don't want to spend the time trying + # to merge blocks across arrays in `to_concat`, so the resulting + # BlockIndex may have more blocks. + blengths = [] + blocs = [] + + for arr in to_concat: + block_idx = arr.sp_index.to_block_index() + + values.append(arr.sp_values) + blocs.append(block_idx.blocs.copy() + length) + blengths.append(block_idx.blengths) + length += arr.sp_index.length + + data = np.concatenate(values) + blocs_arr = np.concatenate(blocs) + blengths_arr = np.concatenate(blengths) + + sp_index = BlockIndex(length, blocs_arr, blengths_arr) + + return cls(data, sparse_index=sp_index, fill_value=fill_value) + + def astype(self, dtype: AstypeArg | None = None, copy: bool = True): + """ + Change the dtype of a SparseArray. + + The output will always be a SparseArray. To convert to a dense + ndarray with a certain dtype, use :meth:`numpy.asarray`. + + Parameters + ---------- + dtype : np.dtype or ExtensionDtype + For SparseDtype, this changes the dtype of + ``self.sp_values`` and the ``self.fill_value``. + + For other dtypes, this only changes the dtype of + ``self.sp_values``. + + copy : bool, default True + Whether to ensure a copy is made, even if not necessary. + + Returns + ------- + SparseArray + + Examples + -------- + >>> arr = pd.arrays.SparseArray([0, 0, 1, 2]) + >>> arr + [0, 0, 1, 2] + Fill: 0 + IntIndex + Indices: array([2, 3], dtype=int32) + + >>> arr.astype(SparseDtype(np.dtype('int32'))) + [0, 0, 1, 2] + Fill: 0 + IntIndex + Indices: array([2, 3], dtype=int32) + + Using a NumPy dtype with a different kind (e.g. float) will coerce + just ``self.sp_values``. + + >>> arr.astype(SparseDtype(np.dtype('float64'))) + ... # doctest: +NORMALIZE_WHITESPACE + [nan, nan, 1.0, 2.0] + Fill: nan + IntIndex + Indices: array([2, 3], dtype=int32) + + Using a SparseDtype, you can also change the fill value as well. + + >>> arr.astype(SparseDtype("float64", fill_value=0.0)) + ... # doctest: +NORMALIZE_WHITESPACE + [0.0, 0.0, 1.0, 2.0] + Fill: 0.0 + IntIndex + Indices: array([2, 3], dtype=int32) + """ + if dtype == self._dtype: + if not copy: + return self + else: + return self.copy() + + future_dtype = pandas_dtype(dtype) + if not isinstance(future_dtype, SparseDtype): + # GH#34457 + values = np.asarray(self) + values = ensure_wrapped_if_datetimelike(values) + return astype_array(values, dtype=future_dtype, copy=False) + + dtype = self.dtype.update_dtype(dtype) + subtype = pandas_dtype(dtype._subtype_with_str) + subtype = cast(np.dtype, subtype) # ensured by update_dtype + values = ensure_wrapped_if_datetimelike(self.sp_values) + sp_values = astype_array(values, subtype, copy=copy) + sp_values = np.asarray(sp_values) + + return self._simple_new(sp_values, self.sp_index, dtype) + + def map(self, mapper, na_action=None) -> Self: + """ + Map categories using an input mapping or function. + + Parameters + ---------- + mapper : dict, Series, callable + The correspondence from old values to new. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NA values, without passing them to the + mapping correspondence. + + Returns + ------- + SparseArray + The output array will have the same density as the input. + The output fill value will be the result of applying the + mapping to ``self.fill_value`` + + Examples + -------- + >>> arr = pd.arrays.SparseArray([0, 1, 2]) + >>> arr.map(lambda x: x + 10) + [10, 11, 12] + Fill: 10 + IntIndex + Indices: array([1, 2], dtype=int32) + + >>> arr.map({0: 10, 1: 11, 2: 12}) + [10, 11, 12] + Fill: 10 + IntIndex + Indices: array([1, 2], dtype=int32) + + >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2])) + [10, 11, 12] + Fill: 10 + IntIndex + Indices: array([1, 2], dtype=int32) + """ + is_map = isinstance(mapper, (abc.Mapping, ABCSeries)) + + fill_val = self.fill_value + + if na_action is None or notna(fill_val): + fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val) + + def func(sp_val): + new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val) + # check identity and equality because nans are not equal to each other + if new_sp_val is fill_val or new_sp_val == fill_val: + msg = "fill value in the sparse values not supported" + raise ValueError(msg) + return new_sp_val + + sp_values = [func(x) for x in self.sp_values] + + return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val) + + def to_dense(self) -> np.ndarray: + """ + Convert SparseArray to a NumPy array. + + Returns + ------- + arr : NumPy array + """ + return np.asarray(self, dtype=self.sp_values.dtype) + + def _where(self, mask, value): + # NB: may not preserve dtype, e.g. result may be Sparse[float64] + # while self is Sparse[int64] + naive_implementation = np.where(mask, self, value) + dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value) + result = type(self)._from_sequence(naive_implementation, dtype=dtype) + return result + + # ------------------------------------------------------------------------ + # IO + # ------------------------------------------------------------------------ + def __setstate__(self, state) -> None: + """Necessary for making this object picklable""" + if isinstance(state, tuple): + # Compat for pandas < 0.24.0 + nd_state, (fill_value, sp_index) = state + sparse_values = np.array([]) + sparse_values.__setstate__(nd_state) + + self._sparse_values = sparse_values + self._sparse_index = sp_index + self._dtype = SparseDtype(sparse_values.dtype, fill_value) + else: + self.__dict__.update(state) + + def nonzero(self) -> tuple[npt.NDArray[np.int32]]: + if self.fill_value == 0: + return (self.sp_index.indices,) + else: + return (self.sp_index.indices[self.sp_values != 0],) + + # ------------------------------------------------------------------------ + # Reductions + # ------------------------------------------------------------------------ + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + method = getattr(self, name, None) + + if method is None: + raise TypeError(f"cannot perform {name} with type {self.dtype}") + + if skipna: + arr = self + else: + arr = self.dropna() + + result = getattr(arr, name)(**kwargs) + + if keepdims: + return type(self)([result], dtype=self.dtype) + else: + return result + + def all(self, axis=None, *args, **kwargs): + """ + Tests whether all elements evaluate True + + Returns + ------- + all : bool + + See Also + -------- + numpy.all + """ + nv.validate_all(args, kwargs) + + values = self.sp_values + + if len(values) != len(self) and not np.all(self.fill_value): + return False + + return values.all() + + def any(self, axis: AxisInt = 0, *args, **kwargs): + """ + Tests whether at least one of elements evaluate True + + Returns + ------- + any : bool + + See Also + -------- + numpy.any + """ + nv.validate_any(args, kwargs) + + values = self.sp_values + + if len(values) != len(self) and np.any(self.fill_value): + return True + + return values.any().item() + + def sum( + self, + axis: AxisInt = 0, + min_count: int = 0, + skipna: bool = True, + *args, + **kwargs, + ) -> Scalar: + """ + Sum of non-NA/null values + + Parameters + ---------- + axis : int, default 0 + Not Used. NumPy compatibility. + min_count : int, default 0 + The required number of valid values to perform the summation. If fewer + than ``min_count`` valid values are present, the result will be the missing + value indicator for subarray type. + *args, **kwargs + Not Used. NumPy compatibility. + + Returns + ------- + scalar + """ + nv.validate_sum(args, kwargs) + valid_vals = self._valid_sp_values + sp_sum = valid_vals.sum() + has_na = self.sp_index.ngaps > 0 and not self._null_fill_value + + if has_na and not skipna: + return na_value_for_dtype(self.dtype.subtype, compat=False) + + if self._null_fill_value: + if check_below_min_count(valid_vals.shape, None, min_count): + return na_value_for_dtype(self.dtype.subtype, compat=False) + return sp_sum + else: + nsparse = self.sp_index.ngaps + if check_below_min_count(valid_vals.shape, None, min_count - nsparse): + return na_value_for_dtype(self.dtype.subtype, compat=False) + return sp_sum + self.fill_value * nsparse + + def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray: + """ + Cumulative sum of non-NA/null values. + + When performing the cumulative summation, any non-NA/null values will + be skipped. The resulting SparseArray will preserve the locations of + NaN values, but the fill value will be `np.nan` regardless. + + Parameters + ---------- + axis : int or None + Axis over which to perform the cumulative summation. If None, + perform cumulative summation over flattened array. + + Returns + ------- + cumsum : SparseArray + """ + nv.validate_cumsum(args, kwargs) + + if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour. + raise ValueError(f"axis(={axis}) out of bounds") + + if not self._null_fill_value: + return SparseArray(self.to_dense()).cumsum() + + return SparseArray( + self.sp_values.cumsum(), + sparse_index=self.sp_index, + fill_value=self.fill_value, + ) + + def mean(self, axis: Axis = 0, *args, **kwargs): + """ + Mean of non-NA/null values + + Returns + ------- + mean : float + """ + nv.validate_mean(args, kwargs) + valid_vals = self._valid_sp_values + sp_sum = valid_vals.sum() + ct = len(valid_vals) + + if self._null_fill_value: + return sp_sum / ct + else: + nsparse = self.sp_index.ngaps + return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) + + def max(self, *, axis: AxisInt | None = None, skipna: bool = True): + """ + Max of array values, ignoring NA values if specified. + + Parameters + ---------- + axis : int, default 0 + Not Used. NumPy compatibility. + skipna : bool, default True + Whether to ignore NA values. + + Returns + ------- + scalar + """ + nv.validate_minmax_axis(axis, self.ndim) + return self._min_max("max", skipna=skipna) + + def min(self, *, axis: AxisInt | None = None, skipna: bool = True): + """ + Min of array values, ignoring NA values if specified. + + Parameters + ---------- + axis : int, default 0 + Not Used. NumPy compatibility. + skipna : bool, default True + Whether to ignore NA values. + + Returns + ------- + scalar + """ + nv.validate_minmax_axis(axis, self.ndim) + return self._min_max("min", skipna=skipna) + + def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar: + """ + Min/max of non-NA/null values + + Parameters + ---------- + kind : {"min", "max"} + skipna : bool + + Returns + ------- + scalar + """ + valid_vals = self._valid_sp_values + has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0 + + if len(valid_vals) > 0: + sp_min_max = getattr(valid_vals, kind)() + + # If a non-null fill value is currently present, it might be the min/max + if has_nonnull_fill_vals: + func = max if kind == "max" else min + return func(sp_min_max, self.fill_value) + elif skipna: + return sp_min_max + elif self.sp_index.ngaps == 0: + # No NAs present + return sp_min_max + else: + return na_value_for_dtype(self.dtype.subtype, compat=False) + elif has_nonnull_fill_vals: + return self.fill_value + else: + return na_value_for_dtype(self.dtype.subtype, compat=False) + + def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int: + values = self._sparse_values + index = self._sparse_index.indices + mask = np.asarray(isna(values)) + func = np.argmax if kind == "argmax" else np.argmin + + idx = np.arange(values.shape[0]) + non_nans = values[~mask] + non_nan_idx = idx[~mask] + + _candidate = non_nan_idx[func(non_nans)] + candidate = index[_candidate] + + if isna(self.fill_value): + return candidate + if kind == "argmin" and self[candidate] < self.fill_value: + return candidate + if kind == "argmax" and self[candidate] > self.fill_value: + return candidate + _loc = self._first_fill_value_loc() + if _loc == -1: + # fill_value doesn't exist + return candidate + else: + return _loc + + def argmax(self, skipna: bool = True) -> int: + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return self._argmin_argmax("argmax") + + def argmin(self, skipna: bool = True) -> int: + validate_bool_kwarg(skipna, "skipna") + if not skipna and self._hasna: + raise NotImplementedError + return self._argmin_argmax("argmin") + + # ------------------------------------------------------------------------ + # Ufuncs + # ------------------------------------------------------------------------ + + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + out = kwargs.get("out", ()) + + for x in inputs + out: + if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)): + return NotImplemented + + # for binary ops, use our custom dunder methods + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace + res = arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + return res + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + # e.g. tests.series.test_ufunc.TestNumpyReductions + return result + + if len(inputs) == 1: + # No alignment necessary. + sp_values = getattr(ufunc, method)(self.sp_values, **kwargs) + fill_value = getattr(ufunc, method)(self.fill_value, **kwargs) + + if ufunc.nout > 1: + # multiple outputs. e.g. modf + arrays = tuple( + self._simple_new( + sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv) + ) + for sp_value, fv in zip(sp_values, fill_value) + ) + return arrays + elif method == "reduce": + # e.g. reductions + return sp_values + + return self._simple_new( + sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value) + ) + + new_inputs = tuple(np.asarray(x) for x in inputs) + result = getattr(ufunc, method)(*new_inputs, **kwargs) + if out: + if len(out) == 1: + out = out[0] + return out + + if ufunc.nout > 1: + return tuple(type(self)(x) for x in result) + elif method == "at": + # no return value + return None + else: + return type(self)(result) + + # ------------------------------------------------------------------------ + # Ops + # ------------------------------------------------------------------------ + + def _arith_method(self, other, op): + op_name = op.__name__ + + if isinstance(other, SparseArray): + return _sparse_array_op(self, other, op, op_name) + + elif is_scalar(other): + with np.errstate(all="ignore"): + fill = op(_get_fill(self), np.asarray(other)) + result = op(self.sp_values, other) + + if op_name == "divmod": + left, right = result + lfill, rfill = fill + return ( + _wrap_result(op_name, left, self.sp_index, lfill), + _wrap_result(op_name, right, self.sp_index, rfill), + ) + + return _wrap_result(op_name, result, self.sp_index, fill) + + else: + other = np.asarray(other) + with np.errstate(all="ignore"): + if len(self) != len(other): + raise AssertionError( + f"length mismatch: {len(self)} vs. {len(other)}" + ) + if not isinstance(other, SparseArray): + dtype = getattr(other, "dtype", None) + other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) + return _sparse_array_op(self, other, op, op_name) + + def _cmp_method(self, other, op) -> SparseArray: + if not is_scalar(other) and not isinstance(other, type(self)): + # convert list-like to ndarray + other = np.asarray(other) + + if isinstance(other, np.ndarray): + # TODO: make this more flexible than just ndarray... + other = SparseArray(other, fill_value=self.fill_value) + + if isinstance(other, SparseArray): + if len(self) != len(other): + raise ValueError( + f"operands have mismatched length {len(self)} and {len(other)}" + ) + + op_name = op.__name__.strip("_") + return _sparse_array_op(self, other, op, op_name) + else: + # scalar + fill_value = op(self.fill_value, other) + result = np.full(len(self), fill_value, dtype=np.bool_) + result[self.sp_index.indices] = op(self.sp_values, other) + + return type(self)( + result, + fill_value=fill_value, + dtype=np.bool_, + ) + + _logical_method = _cmp_method + + def _unary_method(self, op) -> SparseArray: + fill_value = op(np.array(self.fill_value)).item() + dtype = SparseDtype(self.dtype.subtype, fill_value) + # NOTE: if fill_value doesn't change + # we just have to apply op to sp_values + if isna(self.fill_value) or fill_value == self.fill_value: + values = op(self.sp_values) + return type(self)._simple_new(values, self.sp_index, self.dtype) + # In the other case we have to recalc indexes + return type(self)(op(self.to_dense()), dtype=dtype) + + def __pos__(self) -> SparseArray: + return self._unary_method(operator.pos) + + def __neg__(self) -> SparseArray: + return self._unary_method(operator.neg) + + def __invert__(self) -> SparseArray: + return self._unary_method(operator.invert) + + def __abs__(self) -> SparseArray: + return self._unary_method(operator.abs) + + # ---------- + # Formatting + # ----------- + def __repr__(self) -> str: + pp_str = printing.pprint_thing(self) + pp_fill = printing.pprint_thing(self.fill_value) + pp_index = printing.pprint_thing(self.sp_index) + return f"{pp_str}\nFill: {pp_fill}\n{pp_index}" + + def _formatter(self, boxed: bool = False): + # Defer to the formatter from the GenericArrayFormatter calling us. + # This will infer the correct formatter from the dtype of the values. + return None + + +def _make_sparse( + arr: np.ndarray, + kind: SparseIndexKind = "block", + fill_value=None, + dtype: np.dtype | None = None, +): + """ + Convert ndarray to sparse format + + Parameters + ---------- + arr : ndarray + kind : {'block', 'integer'} + fill_value : NaN or another value + dtype : np.dtype, optional + copy : bool, default False + + Returns + ------- + (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar) + """ + assert isinstance(arr, np.ndarray) + + if arr.ndim > 1: + raise TypeError("expected dimension <= 1 data") + + if fill_value is None: + fill_value = na_value_for_dtype(arr.dtype) + + if isna(fill_value): + mask = notna(arr) + else: + # cast to object comparison to be safe + if is_string_dtype(arr.dtype): + arr = arr.astype(object) + + if is_object_dtype(arr.dtype): + # element-wise equality check method in numpy doesn't treat + # each element type, eg. 0, 0.0, and False are treated as + # same. So we have to check the both of its type and value. + mask = splib.make_mask_object_ndarray(arr, fill_value) + else: + mask = arr != fill_value + + length = len(arr) + if length != len(mask): + # the arr is a SparseArray + indices = mask.sp_index.indices + else: + indices = mask.nonzero()[0].astype(np.int32) + + index = make_sparse_index(length, indices, kind) + sparsified_values = arr[mask] + if dtype is not None: + sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values) + sparsified_values = astype_array(sparsified_values, dtype=dtype) + sparsified_values = np.asarray(sparsified_values) + + # TODO: copy + return sparsified_values, index, fill_value + + +@overload +def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex: + ... + + +@overload +def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex: + ... + + +def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex: + index: SparseIndex + if kind == "block": + locs, lens = splib.get_blocks(indices) + index = BlockIndex(length, locs, lens) + elif kind == "integer": + index = IntIndex(length, indices) + else: # pragma: no cover + raise ValueError("must be block or integer type") + return index diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/scipy_sparse.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/scipy_sparse.py new file mode 100644 index 00000000..71b71a97 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/sparse/scipy_sparse.py @@ -0,0 +1,207 @@ +""" +Interaction with scipy.sparse matrices. + +Currently only includes to_coo helpers. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas._libs import lib + +from pandas.core.dtypes.missing import notna + +from pandas.core.algorithms import factorize +from pandas.core.indexes.api import MultiIndex +from pandas.core.series import Series + +if TYPE_CHECKING: + from collections.abc import Iterable + + import numpy as np + import scipy.sparse + + from pandas._typing import ( + IndexLabel, + npt, + ) + + +def _check_is_partition(parts: Iterable, whole: Iterable): + whole = set(whole) + parts = [set(x) for x in parts] + if set.intersection(*parts) != set(): + raise ValueError("Is not a partition because intersection is not null.") + if set.union(*parts) != whole: + raise ValueError("Is not a partition because union is not the whole.") + + +def _levels_to_axis( + ss, + levels: tuple[int] | list[int], + valid_ilocs: npt.NDArray[np.intp], + sort_labels: bool = False, +) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]: + """ + For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`, + where `ax_coords` are the coordinates along one of the two axes of the + destination sparse matrix, and `ax_labels` are the labels from `ss`' Index + which correspond to these coordinates. + + Parameters + ---------- + ss : Series + levels : tuple/list + valid_ilocs : numpy.ndarray + Array of integer positions of valid values for the sparse matrix in ss. + sort_labels : bool, default False + Sort the axis labels before forming the sparse matrix. When `levels` + refers to a single level, set to True for a faster execution. + + Returns + ------- + ax_coords : numpy.ndarray (axis coordinates) + ax_labels : list (axis labels) + """ + # Since the labels are sorted in `Index.levels`, when we wish to sort and + # there is only one level of the MultiIndex for this axis, the desired + # output can be obtained in the following simpler, more efficient way. + if sort_labels and len(levels) == 1: + ax_coords = ss.index.codes[levels[0]][valid_ilocs] + ax_labels = ss.index.levels[levels[0]] + + else: + levels_values = lib.fast_zip( + [ss.index.get_level_values(lvl).to_numpy() for lvl in levels] + ) + codes, ax_labels = factorize(levels_values, sort=sort_labels) + ax_coords = codes[valid_ilocs] + + ax_labels = ax_labels.tolist() + return ax_coords, ax_labels + + +def _to_ijv( + ss, + row_levels: tuple[int] | list[int] = (0,), + column_levels: tuple[int] | list[int] = (1,), + sort_labels: bool = False, +) -> tuple[ + np.ndarray, + npt.NDArray[np.intp], + npt.NDArray[np.intp], + list[IndexLabel], + list[IndexLabel], +]: + """ + For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels, + jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo + constructor, and ilabels and jlabels are the row and column labels + respectively. + + Parameters + ---------- + ss : Series + row_levels : tuple/list + column_levels : tuple/list + sort_labels : bool, default False + Sort the row and column labels before forming the sparse matrix. + When `row_levels` and/or `column_levels` refer to a single level, + set to `True` for a faster execution. + + Returns + ------- + values : numpy.ndarray + Valid values to populate a sparse matrix, extracted from + ss. + i_coords : numpy.ndarray (row coordinates of the values) + j_coords : numpy.ndarray (column coordinates of the values) + i_labels : list (row labels) + j_labels : list (column labels) + """ + # index and column levels must be a partition of the index + _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) + # From the sparse Series, get the integer indices and data for valid sparse + # entries. + sp_vals = ss.array.sp_values + na_mask = notna(sp_vals) + values = sp_vals[na_mask] + valid_ilocs = ss.array.sp_index.indices[na_mask] + + i_coords, i_labels = _levels_to_axis( + ss, row_levels, valid_ilocs, sort_labels=sort_labels + ) + + j_coords, j_labels = _levels_to_axis( + ss, column_levels, valid_ilocs, sort_labels=sort_labels + ) + + return values, i_coords, j_coords, i_labels, j_labels + + +def sparse_series_to_coo( + ss: Series, + row_levels: Iterable[int] = (0,), + column_levels: Iterable[int] = (1,), + sort_labels: bool = False, +) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]: + """ + Convert a sparse Series to a scipy.sparse.coo_matrix using index + levels row_levels, column_levels as the row and column + labels respectively. Returns the sparse_matrix, row and column labels. + """ + import scipy.sparse + + if ss.index.nlevels < 2: + raise ValueError("to_coo requires MultiIndex with nlevels >= 2.") + if not ss.index.is_unique: + raise ValueError( + "Duplicate index entries are not allowed in to_coo transformation." + ) + + # to keep things simple, only rely on integer indexing (not labels) + row_levels = [ss.index._get_level_number(x) for x in row_levels] + column_levels = [ss.index._get_level_number(x) for x in column_levels] + + v, i, j, rows, columns = _to_ijv( + ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels + ) + sparse_matrix = scipy.sparse.coo_matrix( + (v, (i, j)), shape=(len(rows), len(columns)) + ) + return sparse_matrix, rows, columns + + +def coo_to_sparse_series( + A: scipy.sparse.coo_matrix, dense_index: bool = False +) -> Series: + """ + Convert a scipy.sparse.coo_matrix to a Series with type sparse. + + Parameters + ---------- + A : scipy.sparse.coo_matrix + dense_index : bool, default False + + Returns + ------- + Series + + Raises + ------ + TypeError if A is not a coo_matrix + """ + from pandas import SparseDtype + + try: + ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False) + except AttributeError as err: + raise TypeError( + f"Expected coo_matrix. Got {type(A).__name__} instead." + ) from err + ser = ser.sort_index() + ser = ser.astype(SparseDtype(ser.dtype)) + if dense_index: + ind = MultiIndex.from_product([A.row, A.col]) + ser = ser.reindex(ind) + return ser diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_.py new file mode 100644 index 00000000..a41214ae --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_.py @@ -0,0 +1,643 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, +) + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas._libs.arrays import NDArrayBacked +from pandas._libs.lib import ensure_string_array +from pandas.compat import pa_version_under7p0 +from pandas.compat.numpy import function as nv +from pandas.util._decorators import doc + +from pandas.core.dtypes.base import ( + ExtensionDtype, + StorageExtensionDtype, + register_extension_dtype, +) +from pandas.core.dtypes.common import ( + is_array_like, + is_bool_dtype, + is_integer_dtype, + is_object_dtype, + is_string_dtype, + pandas_dtype, +) + +from pandas.core import ops +from pandas.core.array_algos import masked_reductions +from pandas.core.arrays.base import ExtensionArray +from pandas.core.arrays.floating import ( + FloatingArray, + FloatingDtype, +) +from pandas.core.arrays.integer import ( + IntegerArray, + IntegerDtype, +) +from pandas.core.arrays.numpy_ import NumpyExtensionArray +from pandas.core.construction import extract_array +from pandas.core.indexers import check_array_indexer +from pandas.core.missing import isna + +if TYPE_CHECKING: + import pyarrow + + from pandas._typing import ( + AxisInt, + Dtype, + NumpySorter, + NumpyValueArrayLike, + Scalar, + npt, + type_t, + ) + + from pandas import Series + + +@register_extension_dtype +class StringDtype(StorageExtensionDtype): + """ + Extension dtype for string data. + + .. warning:: + + StringDtype is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + storage : {"python", "pyarrow", "pyarrow_numpy"}, optional + If not given, the value of ``pd.options.mode.string_storage``. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.StringDtype() + string[python] + + >>> pd.StringDtype(storage="pyarrow") + string[pyarrow] + """ + + name = "string" + + #: StringDtype().na_value uses pandas.NA except the implementation that + # follows NumPy semantics, which uses nan. + @property + def na_value(self) -> libmissing.NAType | float: # type: ignore[override] + if self.storage == "pyarrow_numpy": + return np.nan + else: + return libmissing.NA + + _metadata = ("storage",) + + def __init__(self, storage=None) -> None: + if storage is None: + infer_string = get_option("future.infer_string") + if infer_string: + storage = "pyarrow_numpy" + else: + storage = get_option("mode.string_storage") + if storage not in {"python", "pyarrow", "pyarrow_numpy"}: + raise ValueError( + f"Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'. " + f"Got {storage} instead." + ) + if storage in ("pyarrow", "pyarrow_numpy") and pa_version_under7p0: + raise ImportError( + "pyarrow>=7.0.0 is required for PyArrow backed StringArray." + ) + self.storage = storage + + @property + def type(self) -> type[str]: + return str + + @classmethod + def construct_from_string(cls, string): + """ + Construct a StringDtype from a string. + + Parameters + ---------- + string : str + The type of the name. The storage type will be taking from `string`. + Valid options and their storage types are + + ========================== ============================================== + string result storage + ========================== ============================================== + ``'string'`` pd.options.mode.string_storage, default python + ``'string[python]'`` python + ``'string[pyarrow]'`` pyarrow + ========================== ============================================== + + Returns + ------- + StringDtype + + Raise + ----- + TypeError + If the string is not a valid option. + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + if string == "string": + return cls() + elif string == "string[python]": + return cls(storage="python") + elif string == "string[pyarrow]": + return cls(storage="pyarrow") + elif string == "string[pyarrow_numpy]": + return cls(storage="pyarrow_numpy") + else: + raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + + # https://github.com/pandas-dev/pandas/issues/36126 + # error: Signature of "construct_array_type" incompatible with supertype + # "ExtensionDtype" + def construct_array_type( # type: ignore[override] + self, + ) -> type_t[BaseStringArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, + ) + + if self.storage == "python": + return StringArray + elif self.storage == "pyarrow": + return ArrowStringArray + else: + return ArrowStringArrayNumpySemantics + + def __from_arrow__( + self, array: pyarrow.Array | pyarrow.ChunkedArray + ) -> BaseStringArray: + """ + Construct StringArray from pyarrow Array/ChunkedArray. + """ + if self.storage == "pyarrow": + from pandas.core.arrays.string_arrow import ArrowStringArray + + return ArrowStringArray(array) + elif self.storage == "pyarrow_numpy": + from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics + + return ArrowStringArrayNumpySemantics(array) + else: + import pyarrow + + if isinstance(array, pyarrow.Array): + chunks = [array] + else: + # pyarrow.ChunkedArray + chunks = array.chunks + + results = [] + for arr in chunks: + # convert chunk by chunk to numpy and concatenate then, to avoid + # overflow for large string data when concatenating the pyarrow arrays + arr = arr.to_numpy(zero_copy_only=False) + arr = ensure_string_array(arr, na_value=libmissing.NA) + results.append(arr) + + if len(chunks) == 0: + arr = np.array([], dtype=object) + else: + arr = np.concatenate(results) + + # Bypass validation inside StringArray constructor, see GH#47781 + new_string_array = StringArray.__new__(StringArray) + NDArrayBacked.__init__( + new_string_array, + arr, + StringDtype(storage="python"), + ) + return new_string_array + + +class BaseStringArray(ExtensionArray): + """ + Mixin class for StringArray, ArrowStringArray. + """ + + @doc(ExtensionArray.tolist) + def tolist(self): + if self.ndim > 1: + return [x.tolist() for x in self] + return list(self.to_numpy()) + + +# error: Definition of "_concat_same_type" in base class "NDArrayBacked" is +# incompatible with definition in base class "ExtensionArray" +class StringArray(BaseStringArray, NumpyExtensionArray): # type: ignore[misc] + """ + Extension array for string data. + + .. warning:: + + StringArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : array-like + The array of data. + + .. warning:: + + Currently, this expects an object-dtype ndarray + where the elements are Python strings + or nan-likes (``None``, ``np.nan``, ``NA``). + This may change without warning in the future. Use + :meth:`pandas.array` with ``dtype="string"`` for a stable way of + creating a `StringArray` from any sequence. + + .. versionchanged:: 1.5.0 + + StringArray now accepts array-likes containing + nan-likes(``None``, ``np.nan``) for the ``values`` parameter + in addition to strings and :attr:`pandas.NA` + + copy : bool, default False + Whether to copy the array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + :func:`pandas.array` + The recommended function for creating a StringArray. + Series.str + The string methods are available on Series backed by + a StringArray. + + Notes + ----- + StringArray returns a BooleanArray for comparison methods. + + Examples + -------- + >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string") + + ['This is', 'some text', , 'data.'] + Length: 4, dtype: string + + Unlike arrays instantiated with ``dtype="object"``, ``StringArray`` + will convert the values to strings. + + >>> pd.array(['1', 1], dtype="object") + + ['1', 1] + Length: 2, dtype: object + >>> pd.array(['1', 1], dtype="string") + + ['1', '1'] + Length: 2, dtype: string + + However, instantiating StringArrays directly with non-strings will raise an error. + + For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`: + + >>> pd.array(["a", None, "c"], dtype="string") == "a" + + [True, , False] + Length: 3, dtype: boolean + """ + + # undo the NumpyExtensionArray hack + _typ = "extension" + + def __init__(self, values, copy: bool = False) -> None: + values = extract_array(values) + + super().__init__(values, copy=copy) + if not isinstance(values, type(self)): + self._validate() + NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python")) + + def _validate(self): + """Validate that we only store NA or strings.""" + if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True): + raise ValueError("StringArray requires a sequence of strings or pandas.NA") + if self._ndarray.dtype != "object": + raise ValueError( + "StringArray requires a sequence of strings or pandas.NA. Got " + f"'{self._ndarray.dtype}' dtype instead." + ) + # Check to see if need to convert Na values to pd.NA + if self._ndarray.ndim > 2: + # Ravel if ndims > 2 b/c no cythonized version available + lib.convert_nans_to_NA(self._ndarray.ravel("K")) + else: + lib.convert_nans_to_NA(self._ndarray) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False): + if dtype and not (isinstance(dtype, str) and dtype == "string"): + dtype = pandas_dtype(dtype) + assert isinstance(dtype, StringDtype) and dtype.storage == "python" + + from pandas.core.arrays.masked import BaseMaskedArray + + if isinstance(scalars, BaseMaskedArray): + # avoid costly conversion to object dtype + na_values = scalars._mask + result = scalars._data + result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) + result[na_values] = libmissing.NA + + else: + if lib.is_pyarrow_array(scalars): + # pyarrow array; we cannot rely on the "to_numpy" check in + # ensure_string_array because calling scalars.to_numpy would set + # zero_copy_only to True which caused problems see GH#52076 + scalars = np.array(scalars) + # convert non-na-likes to str, and nan-likes to StringDtype().na_value + result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy) + + # Manually creating new array avoids the validation step in the __init__, so is + # faster. Refactor need for validation? + new_string_array = cls.__new__(cls) + NDArrayBacked.__init__(new_string_array, result, StringDtype(storage="python")) + + return new_string_array + + @classmethod + def _from_sequence_of_strings( + cls, strings, *, dtype: Dtype | None = None, copy: bool = False + ): + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + @classmethod + def _empty(cls, shape, dtype) -> StringArray: + values = np.empty(shape, dtype=object) + values[:] = libmissing.NA + return cls(values).astype(dtype, copy=False) + + def __arrow_array__(self, type=None): + """ + Convert myself into a pyarrow Array. + """ + import pyarrow as pa + + if type is None: + type = pa.string() + + values = self._ndarray.copy() + values[self.isna()] = None + return pa.array(values, type=type, from_pandas=True) + + def _values_for_factorize(self): + arr = self._ndarray.copy() + mask = self.isna() + arr[mask] = None + return arr, None + + def __setitem__(self, key, value) -> None: + value = extract_array(value, extract_numpy=True) + if isinstance(value, type(self)): + # extract_array doesn't extract NumpyExtensionArray subclasses + value = value._ndarray + + key = check_array_indexer(self, key) + scalar_key = lib.is_scalar(key) + scalar_value = lib.is_scalar(value) + if scalar_key and not scalar_value: + raise ValueError("setting an array element with a sequence.") + + # validate new items + if scalar_value: + if isna(value): + value = libmissing.NA + elif not isinstance(value, str): + raise TypeError( + f"Cannot set non-string value '{value}' into a StringArray." + ) + else: + if not is_array_like(value): + value = np.asarray(value, dtype=object) + if len(value) and not lib.is_string_array(value, skipna=True): + raise TypeError("Must provide strings.") + + mask = isna(value) + if mask.any(): + value = value.copy() + value[isna(value)] = libmissing.NA + + super().__setitem__(key, value) + + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + # the super() method NDArrayBackedExtensionArray._putmask uses + # np.putmask which doesn't properly handle None/pd.NA, so using the + # base class implementation that uses __setitem__ + ExtensionArray._putmask(self, mask, value) + + def astype(self, dtype, copy: bool = True): + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + + elif isinstance(dtype, IntegerDtype): + arr = self._ndarray.copy() + mask = self.isna() + arr[mask] = 0 + values = arr.astype(dtype.numpy_dtype) + return IntegerArray(values, mask, copy=False) + elif isinstance(dtype, FloatingDtype): + arr = self.copy() + mask = self.isna() + arr[mask] = "0" + values = arr.astype(dtype.numpy_dtype) + return FloatingArray(values, mask, copy=False) + elif isinstance(dtype, ExtensionDtype): + # Skip the NumpyExtensionArray.astype method + return ExtensionArray.astype(self, dtype, copy) + elif np.issubdtype(dtype, np.floating): + arr = self._ndarray.copy() + mask = self.isna() + arr[mask] = 0 + values = arr.astype(dtype) + values[mask] = np.nan + return values + + return super().astype(dtype, copy) + + def _reduce( + self, name: str, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs + ): + if name in ["min", "max"]: + return getattr(self, name)(skipna=skipna, axis=axis) + + raise TypeError(f"Cannot perform reduction '{name}' with string dtype") + + def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar: + nv.validate_min((), kwargs) + result = masked_reductions.min( + values=self.to_numpy(), mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar: + nv.validate_max((), kwargs) + result = masked_reductions.max( + values=self.to_numpy(), mask=self.isna(), skipna=skipna + ) + return self._wrap_reduction_result(axis, result) + + def value_counts(self, dropna: bool = True) -> Series: + from pandas.core.algorithms import value_counts_internal as value_counts + + result = value_counts(self._ndarray, dropna=dropna).astype("Int64") + result.index = result.index.astype(self.dtype) + return result + + def memory_usage(self, deep: bool = False) -> int: + result = self._ndarray.nbytes + if deep: + return result + lib.memory_usage_of_objects(self._ndarray) + return result + + @doc(ExtensionArray.searchsorted) + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if self._hasna: + raise ValueError( + "searchsorted requires array to be sorted, which is impossible " + "with NAs present." + ) + return super().searchsorted(value=value, side=side, sorter=sorter) + + def _cmp_method(self, other, op): + from pandas.arrays import BooleanArray + + if isinstance(other, StringArray): + other = other._ndarray + + mask = isna(self) | isna(other) + valid = ~mask + + if not lib.is_scalar(other): + if len(other) != len(self): + # prevent improper broadcasting when other is 2D + raise ValueError( + f"Lengths of operands do not match: {len(self)} != {len(other)}" + ) + + other = np.asarray(other) + other = other[valid] + + if op.__name__ in ops.ARITHMETIC_BINOPS: + result = np.empty_like(self._ndarray, dtype="object") + result[mask] = libmissing.NA + result[valid] = op(self._ndarray[valid], other) + return StringArray(result) + else: + # logical + result = np.zeros(len(self._ndarray), dtype="bool") + result[valid] = op(self._ndarray[valid], other) + return BooleanArray(result, mask) + + _arith_method = _cmp_method + + # ------------------------------------------------------------------------ + # String methods interface + # error: Incompatible types in assignment (expression has type "NAType", + # base class "NumpyExtensionArray" defined the type as "float") + _str_na_value = libmissing.NA # type: ignore[assignment] + + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): + from pandas.arrays import BooleanArray + + if dtype is None: + dtype = StringDtype(storage="python") + if na_value is None: + na_value = self.dtype.na_value + + mask = isna(self) + arr = np.asarray(self) + + if is_integer_dtype(dtype) or is_bool_dtype(dtype): + constructor: type[IntegerArray] | type[BooleanArray] + if is_integer_dtype(dtype): + constructor = IntegerArray + else: + constructor = BooleanArray + + na_value_is_na = isna(na_value) + if na_value_is_na: + na_value = 1 + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + # error: Argument 1 to "dtype" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected + # "Type[object]" + dtype=np.dtype(dtype), # type: ignore[arg-type] + ) + + if not na_value_is_na: + mask[:] = False + + return constructor(result, mask) + + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # i.e. StringDtype + result = lib.map_infer_mask( + arr, f, mask.view("uint8"), convert=False, na_value=na_value + ) + return StringArray(result) + else: + # This is when the result type is object. We reach this when + # -> We know the result type is truly object (e.g. .encode returns bytes + # or .findall returns a list). + # -> We don't know the result type. E.g. `.get` can return anything. + return lib.map_infer_mask(arr, f, mask.view("uint8")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_arrow.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_arrow.py new file mode 100644 index 00000000..803e032b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/string_arrow.py @@ -0,0 +1,635 @@ +from __future__ import annotations + +from functools import partial +import operator +import re +from typing import ( + TYPE_CHECKING, + Callable, + Union, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas.compat import pa_version_under7p0 +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer_dtype, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin +from pandas.core.arrays.arrow import ArrowExtensionArray +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.arrays.integer import Int64Dtype +from pandas.core.arrays.numeric import NumericDtype +from pandas.core.arrays.string_ import ( + BaseStringArray, + StringDtype, +) +from pandas.core.strings.object_array import ObjectStringArrayMixin + +if not pa_version_under7p0: + import pyarrow as pa + import pyarrow.compute as pc + + from pandas.core.arrays.arrow._arrow_utils import fallback_performancewarning + + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + Dtype, + Scalar, + npt, + ) + + +ArrowStringScalarOrNAT = Union[str, libmissing.NAType] + + +def _chk_pyarrow_available() -> None: + if pa_version_under7p0: + msg = "pyarrow>=7.0.0 is required for PyArrow backed ArrowExtensionArray." + raise ImportError(msg) + + +# TODO: Inherit directly from BaseStringArrayMethods. Currently we inherit from +# ObjectStringArrayMixin because we want to have the object-dtype based methods as +# fallback for the ones that pyarrow doesn't yet support + + +class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray): + """ + Extension array for string data in a ``pyarrow.ChunkedArray``. + + .. versionadded:: 1.2.0 + + .. warning:: + + ArrowStringArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : pyarrow.Array or pyarrow.ChunkedArray + The array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + :func:`pandas.array` + The recommended function for creating a ArrowStringArray. + Series.str + The string methods are available on Series backed by + a ArrowStringArray. + + Notes + ----- + ArrowStringArray returns a BooleanArray for comparison methods. + + Examples + -------- + >>> pd.array(['This is', 'some text', None, 'data.'], dtype="string[pyarrow]") + + ['This is', 'some text', , 'data.'] + Length: 4, dtype: string + """ + + # error: Incompatible types in assignment (expression has type "StringDtype", + # base class "ArrowExtensionArray" defined the type as "ArrowDtype") + _dtype: StringDtype # type: ignore[assignment] + _storage = "pyarrow" + + def __init__(self, values) -> None: + super().__init__(values) + self._dtype = StringDtype(storage=self._storage) + + if not pa.types.is_string(self._pa_array.type) and not ( + pa.types.is_dictionary(self._pa_array.type) + and pa.types.is_string(self._pa_array.type.value_type) + ): + raise ValueError( + "ArrowStringArray requires a PyArrow (chunked) array of string type" + ) + + def __len__(self) -> int: + """ + Length of this array. + + Returns + ------- + length : int + """ + return len(self._pa_array) + + @classmethod + def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False): + from pandas.core.arrays.masked import BaseMaskedArray + + _chk_pyarrow_available() + + if dtype and not (isinstance(dtype, str) and dtype == "string"): + dtype = pandas_dtype(dtype) + assert isinstance(dtype, StringDtype) and dtype.storage in ( + "pyarrow", + "pyarrow_numpy", + ) + + if isinstance(scalars, BaseMaskedArray): + # avoid costly conversion to object dtype in ensure_string_array and + # numerical issues with Float32Dtype + na_values = scalars._mask + result = scalars._data + result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) + return cls(pa.array(result, mask=na_values, type=pa.string())) + elif isinstance(scalars, (pa.Array, pa.ChunkedArray)): + return cls(pc.cast(scalars, pa.string())) + + # convert non-na-likes to str + result = lib.ensure_string_array(scalars, copy=copy) + return cls(pa.array(result, type=pa.string(), from_pandas=True)) + + @classmethod + def _from_sequence_of_strings( + cls, strings, dtype: Dtype | None = None, copy: bool = False + ): + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + @property + def dtype(self) -> StringDtype: # type: ignore[override] + """ + An instance of 'string[pyarrow]'. + """ + return self._dtype + + def insert(self, loc: int, item) -> ArrowStringArray: + if not isinstance(item, str) and item is not libmissing.NA: + raise TypeError("Scalar must be NA or str") + return super().insert(loc, item) + + @classmethod + def _result_converter(cls, values, na=None): + return BooleanDtype().__from_arrow__(values) + + def _maybe_convert_setitem_value(self, value): + """Maybe convert value to be pyarrow compatible.""" + if is_scalar(value): + if isna(value): + value = None + elif not isinstance(value, str): + raise TypeError("Scalar must be NA or str") + else: + value = np.array(value, dtype=object, copy=True) + value[isna(value)] = None + for v in value: + if not (v is None or isinstance(v, str)): + raise TypeError("Scalar must be NA or str") + return super()._maybe_convert_setitem_value(value) + + def isin(self, values) -> npt.NDArray[np.bool_]: + value_set = [ + pa_scalar.as_py() + for pa_scalar in [pa.scalar(value, from_pandas=True) for value in values] + if pa_scalar.type in (pa.string(), pa.null()) + ] + + # short-circuit to return all False array. + if not len(value_set): + return np.zeros(len(self), dtype=bool) + + result = pc.is_in( + self._pa_array, value_set=pa.array(value_set, type=self._pa_array.type) + ) + # pyarrow 2.0.0 returned nulls, so we explicily specify dtype to convert nulls + # to False + return np.array(result, dtype=np.bool_) + + def astype(self, dtype, copy: bool = True): + dtype = pandas_dtype(dtype) + + if dtype == self.dtype: + if copy: + return self.copy() + return self + elif isinstance(dtype, NumericDtype): + data = self._pa_array.cast(pa.from_numpy_dtype(dtype.numpy_dtype)) + return dtype.__from_arrow__(data) + elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.floating): + return self.to_numpy(dtype=dtype, na_value=np.nan) + + return super().astype(dtype, copy=copy) + + @property + def _data(self): + # dask accesses ._data directlys + warnings.warn( + f"{type(self).__name__}._data is a deprecated and will be removed " + "in a future version, use ._pa_array instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._pa_array + + # ------------------------------------------------------------------------ + # String methods interface + + # error: Incompatible types in assignment (expression has type "NAType", + # base class "ObjectStringArrayMixin" defined the type as "float") + _str_na_value = libmissing.NA # type: ignore[assignment] + + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): + # TODO: de-duplicate with StringArray method. This method is moreless copy and + # paste. + + from pandas.arrays import ( + BooleanArray, + IntegerArray, + ) + + if dtype is None: + dtype = self.dtype + if na_value is None: + na_value = self.dtype.na_value + + mask = isna(self) + arr = np.asarray(self) + + if is_integer_dtype(dtype) or is_bool_dtype(dtype): + constructor: type[IntegerArray] | type[BooleanArray] + if is_integer_dtype(dtype): + constructor = IntegerArray + else: + constructor = BooleanArray + + na_value_is_na = isna(na_value) + if na_value_is_na: + na_value = 1 + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + # error: Argument 1 to "dtype" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected + # "Type[object]" + dtype=np.dtype(dtype), # type: ignore[arg-type] + ) + + if not na_value_is_na: + mask[:] = False + + return constructor(result, mask) + + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # i.e. StringDtype + result = lib.map_infer_mask( + arr, f, mask.view("uint8"), convert=False, na_value=na_value + ) + result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True) + return type(self)(result) + else: + # This is when the result type is object. We reach this when + # -> We know the result type is truly object (e.g. .encode returns bytes + # or .findall returns a list). + # -> We don't know the result type. E.g. `.get` can return anything. + return lib.map_infer_mask(arr, f, mask.view("uint8")) + + def _str_contains( + self, pat, case: bool = True, flags: int = 0, na=np.nan, regex: bool = True + ): + if flags: + fallback_performancewarning() + return super()._str_contains(pat, case, flags, na, regex) + + if regex: + result = pc.match_substring_regex(self._pa_array, pat, ignore_case=not case) + else: + result = pc.match_substring(self._pa_array, pat, ignore_case=not case) + result = self._result_converter(result, na=na) + if not isna(na): + result[isna(result)] = bool(na) + return result + + def _str_startswith(self, pat: str, na=None): + result = pc.starts_with(self._pa_array, pattern=pat) + if not isna(na): + result = result.fill_null(na) + result = self._result_converter(result) + if not isna(na): + result[isna(result)] = bool(na) + return result + + def _str_endswith(self, pat: str, na=None): + result = pc.ends_with(self._pa_array, pattern=pat) + if not isna(na): + result = result.fill_null(na) + result = self._result_converter(result) + if not isna(na): + result[isna(result)] = bool(na) + return result + + def _str_replace( + self, + pat: str | re.Pattern, + repl: str | Callable, + n: int = -1, + case: bool = True, + flags: int = 0, + regex: bool = True, + ): + if isinstance(pat, re.Pattern) or callable(repl) or not case or flags: + fallback_performancewarning() + return super()._str_replace(pat, repl, n, case, flags, regex) + + func = pc.replace_substring_regex if regex else pc.replace_substring + result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=n) + return type(self)(result) + + def _str_match( + self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.startswith("^"): + pat = f"^{pat}" + return self._str_contains(pat, case, flags, na, regex=True) + + def _str_fullmatch( + self, pat, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not pat.endswith("$") or pat.endswith("//$"): + pat = f"{pat}$" + return self._str_match(pat, case, flags, na) + + def _str_isalnum(self): + result = pc.utf8_is_alnum(self._pa_array) + return self._result_converter(result) + + def _str_isalpha(self): + result = pc.utf8_is_alpha(self._pa_array) + return self._result_converter(result) + + def _str_isdecimal(self): + result = pc.utf8_is_decimal(self._pa_array) + return self._result_converter(result) + + def _str_isdigit(self): + result = pc.utf8_is_digit(self._pa_array) + return self._result_converter(result) + + def _str_islower(self): + result = pc.utf8_is_lower(self._pa_array) + return self._result_converter(result) + + def _str_isnumeric(self): + result = pc.utf8_is_numeric(self._pa_array) + return self._result_converter(result) + + def _str_isspace(self): + result = pc.utf8_is_space(self._pa_array) + return self._result_converter(result) + + def _str_istitle(self): + result = pc.utf8_is_title(self._pa_array) + return self._result_converter(result) + + def _str_isupper(self): + result = pc.utf8_is_upper(self._pa_array) + return self._result_converter(result) + + def _str_len(self): + result = pc.utf8_length(self._pa_array) + return Int64Dtype().__from_arrow__(result) + + def _str_lower(self): + return type(self)(pc.utf8_lower(self._pa_array)) + + def _str_upper(self): + return type(self)(pc.utf8_upper(self._pa_array)) + + def _str_strip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_trim_whitespace(self._pa_array) + else: + result = pc.utf8_trim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_lstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_ltrim_whitespace(self._pa_array) + else: + result = pc.utf8_ltrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _str_rstrip(self, to_strip=None): + if to_strip is None: + result = pc.utf8_rtrim_whitespace(self._pa_array) + else: + result = pc.utf8_rtrim(self._pa_array, characters=to_strip) + return type(self)(result) + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs) + if name in ("argmin", "argmax") and isinstance(result, pa.Array): + return self._convert_int_dtype(result) + elif isinstance(result, pa.Array): + return type(self)(result) + else: + return result + + def _convert_int_dtype(self, result): + return Int64Dtype().__from_arrow__(result) + + def _rank( + self, + *, + axis: AxisInt = 0, + method: str = "average", + na_option: str = "keep", + ascending: bool = True, + pct: bool = False, + ): + """ + See Series.rank.__doc__. + """ + return self._convert_int_dtype( + self._rank_calc( + axis=axis, + method=method, + na_option=na_option, + ascending=ascending, + pct=pct, + ) + ) + + +class ArrowStringArrayNumpySemantics(ArrowStringArray): + _storage = "pyarrow_numpy" + + def __init__(self, values) -> None: + _chk_pyarrow_available() + + if isinstance(values, (pa.Array, pa.ChunkedArray)) and pa.types.is_large_string( + values.type + ): + values = pc.cast(values, pa.string()) + super().__init__(values) + + @classmethod + def _result_converter(cls, values, na=None): + if not isna(na): + values = values.fill_null(bool(na)) + return ArrowExtensionArray(values).to_numpy(na_value=np.nan) + + def __getattribute__(self, item): + # ArrowStringArray and we both inherit from ArrowExtensionArray, which + # creates inheritance problems (Diamond inheritance) + if item in ArrowStringArrayMixin.__dict__ and item not in ( + "_pa_array", + "__dict__", + ): + return partial(getattr(ArrowStringArrayMixin, item), self) + return super().__getattribute__(item) + + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): + if dtype is None: + dtype = self.dtype + if na_value is None: + na_value = self.dtype.na_value + + mask = isna(self) + arr = np.asarray(self) + + if is_integer_dtype(dtype) or is_bool_dtype(dtype): + if is_integer_dtype(dtype): + na_value = np.nan + else: + na_value = False + try: + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + dtype=np.dtype(dtype), # type: ignore[arg-type] + ) + return result + + except ValueError: + result = lib.map_infer_mask( + arr, + f, + mask.view("uint8"), + convert=False, + na_value=na_value, + ) + if convert and result.dtype == object: + result = lib.maybe_convert_objects(result) + return result + + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # i.e. StringDtype + result = lib.map_infer_mask( + arr, f, mask.view("uint8"), convert=False, na_value=na_value + ) + result = pa.array(result, mask=mask, type=pa.string(), from_pandas=True) + return type(self)(result) + else: + # This is when the result type is object. We reach this when + # -> We know the result type is truly object (e.g. .encode returns bytes + # or .findall returns a list). + # -> We don't know the result type. E.g. `.get` can return anything. + return lib.map_infer_mask(arr, f, mask.view("uint8")) + + def _convert_int_dtype(self, result): + if isinstance(result, pa.Array): + result = result.to_numpy(zero_copy_only=False) + elif not isinstance(result, np.ndarray): + result = result.to_numpy() + if result.dtype == np.int32: + result = result.astype(np.int64) + return result + + def _str_count(self, pat: str, flags: int = 0): + if flags: + return super()._str_count(pat, flags) + result = pc.count_substring_regex(self._pa_array, pat).to_numpy() + return self._convert_int_dtype(result) + + def _str_len(self): + result = pc.utf8_length(self._pa_array).to_numpy() + return self._convert_int_dtype(result) + + def _str_find(self, sub: str, start: int = 0, end: int | None = None): + if start != 0 and end is not None: + slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) + result = pc.find_substring(slices, sub) + not_found = pc.equal(result, -1) + offset_result = pc.add(result, end - start) + result = pc.if_else(not_found, result, offset_result) + elif start == 0 and end is None: + slices = self._pa_array + result = pc.find_substring(slices, sub) + else: + return super()._str_find(sub, start, end) + return self._convert_int_dtype(result.to_numpy()) + + def _cmp_method(self, other, op): + result = super()._cmp_method(other, op) + if op == operator.ne: + return result.to_numpy(np.bool_, na_value=True) + else: + return result.to_numpy(np.bool_, na_value=False) + + def value_counts(self, dropna: bool = True): + from pandas import Series + + result = super().value_counts(dropna) + return Series( + result._values.to_numpy(), index=result.index, name=result.name, copy=False + ) + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + if name in ["any", "all"]: + if not skipna and name == "all": + nas = pc.invert(pc.is_null(self._pa_array)) + arr = pc.and_kleene(nas, pc.not_equal(self._pa_array, "")) + else: + arr = pc.not_equal(self._pa_array, "") + return ArrowExtensionArray(arr)._reduce( + name, skipna=skipna, keepdims=keepdims, **kwargs + ) + else: + return super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) + + def insert(self, loc: int, item) -> ArrowStringArrayNumpySemantics: + if item is np.nan: + item = libmissing.NA + return super().insert(loc, item) # type: ignore[return-value] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/timedeltas.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/timedeltas.py new file mode 100644 index 00000000..a81609e1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/arrays/timedeltas.py @@ -0,0 +1,1212 @@ +from __future__ import annotations + +from datetime import timedelta +import operator +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + tslibs, +) +from pandas._libs.tslibs import ( + NaT, + NaTType, + Tick, + Timedelta, + astype_overflowsafe, + get_supported_reso, + get_unit_from_dtype, + iNaT, + is_supported_unit, + npy_unit_to_abbrev, + periods_per_second, + to_offset, +) +from pandas._libs.tslibs.conversion import precision_from_unit +from pandas._libs.tslibs.fields import ( + get_timedelta_days, + get_timedelta_field, +) +from pandas._libs.tslibs.timedeltas import ( + array_to_timedelta64, + floordiv_object_array, + ints_to_pytimedelta, + parse_timedelta_unit, + truediv_object_array, +) +from pandas.compat.numpy import function as nv +from pandas.util._validators import validate_endpoints + +from pandas.core.dtypes.common import ( + TD64NS_DTYPE, + is_float_dtype, + is_integer_dtype, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.missing import isna + +from pandas.core import ( + nanops, + roperator, +) +from pandas.core.array_algos import datetimelike_accumulations +from pandas.core.arrays import datetimelike as dtl +from pandas.core.arrays._ranges import generate_regular_range +import pandas.core.common as com +from pandas.core.ops.common import unpack_zerodim_and_defer + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas._typing import ( + AxisInt, + DateTimeErrorChoices, + DtypeObj, + NpDtype, + Self, + npt, + ) + + from pandas import DataFrame + +import textwrap + + +def _field_accessor(name: str, alias: str, docstring: str): + def f(self) -> np.ndarray: + values = self.asi8 + if alias == "days": + result = get_timedelta_days(values, reso=self._creso) + else: + # error: Incompatible types in assignment ( + # expression has type "ndarray[Any, dtype[signedinteger[_32Bit]]]", + # variable has type "ndarray[Any, dtype[signedinteger[_64Bit]]] + result = get_timedelta_field(values, alias, reso=self._creso) # type: ignore[assignment] # noqa: E501 + if self._hasna: + result = self._maybe_mask_results( + result, fill_value=None, convert="float64" + ) + + return result + + f.__name__ = name + f.__doc__ = f"\n{docstring}\n" + return property(f) + + +class TimedeltaArray(dtl.TimelikeOps): + """ + Pandas ExtensionArray for timedelta data. + + .. warning:: + + TimedeltaArray is currently experimental, and its API may change + without warning. In particular, :attr:`TimedeltaArray.dtype` is + expected to change to be an instance of an ``ExtensionDtype`` + subclass. + + Parameters + ---------- + values : array-like + The timedelta data. + + dtype : numpy.dtype + Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted. + freq : Offset, optional + copy : bool, default False + Whether to copy the underlying array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> pd.arrays.TimedeltaArray(pd.TimedeltaIndex(['1H', '2H'])) + + ['0 days 01:00:00', '0 days 02:00:00'] + Length: 2, dtype: timedelta64[ns] + """ + + _typ = "timedeltaarray" + _internal_fill_value = np.timedelta64("NaT", "ns") + _recognized_scalars = (timedelta, np.timedelta64, Tick) + _is_recognized_dtype = lambda x: lib.is_np_dtype(x, "m") + _infer_matches = ("timedelta", "timedelta64") + + @property + def _scalar_type(self) -> type[Timedelta]: + return Timedelta + + __array_priority__ = 1000 + # define my properties & methods for delegation + _other_ops: list[str] = [] + _bool_ops: list[str] = [] + _object_ops: list[str] = ["freq"] + _field_ops: list[str] = ["days", "seconds", "microseconds", "nanoseconds"] + _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + ["unit"] + _datetimelike_methods: list[str] = [ + "to_pytimedelta", + "total_seconds", + "round", + "floor", + "ceil", + "as_unit", + ] + + # Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray) + # operates pointwise. + + def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType: + y = x.view("i8") + if y == NaT._value: + return NaT + return Timedelta._from_value_and_reso(y, reso=self._creso) + + @property + # error: Return type "dtype" of "dtype" incompatible with return type + # "ExtensionDtype" in supertype "ExtensionArray" + def dtype(self) -> np.dtype[np.timedelta64]: # type: ignore[override] + """ + The dtype for the TimedeltaArray. + + .. warning:: + + A future version of pandas will change dtype to be an instance + of a :class:`pandas.api.extensions.ExtensionDtype` subclass, + not a ``numpy.dtype``. + + Returns + ------- + numpy.dtype + """ + return self._ndarray.dtype + + # ---------------------------------------------------------------- + # Constructors + + _freq = None + _default_dtype = TD64NS_DTYPE # used in TimeLikeOps.__init__ + + @classmethod + def _validate_dtype(cls, values, dtype): + # used in TimeLikeOps.__init__ + _validate_td64_dtype(values.dtype) + dtype = _validate_td64_dtype(dtype) + return dtype + + # error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked" + @classmethod + def _simple_new( # type: ignore[override] + cls, + values: npt.NDArray[np.timedelta64], + freq: Tick | None = None, + dtype: np.dtype[np.timedelta64] = TD64NS_DTYPE, + ) -> Self: + # Require td64 dtype, not unit-less, matching values.dtype + assert lib.is_np_dtype(dtype, "m") + assert not tslibs.is_unitless(dtype) + assert isinstance(values, np.ndarray), type(values) + assert dtype == values.dtype + assert freq is None or isinstance(freq, Tick) + + result = super()._simple_new(values=values, dtype=dtype) + result._freq = freq + return result + + @classmethod + def _from_sequence(cls, data, *, dtype=None, copy: bool = False) -> Self: + if dtype: + dtype = _validate_td64_dtype(dtype) + + data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=None) + freq, _ = dtl.validate_inferred_freq(None, inferred_freq, False) + freq = cast("Tick | None", freq) + + if dtype is not None: + data = astype_overflowsafe(data, dtype=dtype, copy=False) + + return cls._simple_new(data, dtype=data.dtype, freq=freq) + + @classmethod + def _from_sequence_not_strict( + cls, + data, + *, + dtype=None, + copy: bool = False, + freq=lib.no_default, + unit=None, + ) -> Self: + """ + A non-strict version of _from_sequence, called from TimedeltaIndex.__new__. + """ + if dtype: + dtype = _validate_td64_dtype(dtype) + + assert unit not in ["Y", "y", "M"] # caller is responsible for checking + + explicit_none = freq is None + freq = freq if freq is not lib.no_default else None + + freq, freq_infer = dtl.maybe_infer_freq(freq) + + data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit) + freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer) + freq = cast("Tick | None", freq) + if explicit_none: + freq = None + + if dtype is not None: + data = astype_overflowsafe(data, dtype=dtype, copy=False) + + result = cls._simple_new(data, dtype=data.dtype, freq=freq) + + if inferred_freq is None and freq is not None: + # this condition precludes `freq_infer` + cls._validate_frequency(result, freq) + + elif freq_infer: + # Set _freq directly to bypass duplicative _validate_frequency + # check. + result._freq = to_offset(result.inferred_freq) + + return result + + # Signature of "_generate_range" incompatible with supertype + # "DatetimeLikeArrayMixin" + @classmethod + def _generate_range( # type: ignore[override] + cls, start, end, periods, freq, closed=None, *, unit: str | None = None + ) -> Self: + periods = dtl.validate_periods(periods) + if freq is None and any(x is None for x in [periods, start, end]): + raise ValueError("Must provide freq argument if no data is supplied") + + if com.count_not_none(start, end, periods, freq) != 3: + raise ValueError( + "Of the four parameters: start, end, periods, " + "and freq, exactly three must be specified" + ) + + if start is not None: + start = Timedelta(start).as_unit("ns") + + if end is not None: + end = Timedelta(end).as_unit("ns") + + if unit is not None: + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'") + else: + unit = "ns" + + if start is not None and unit is not None: + start = start.as_unit(unit, round_ok=False) + if end is not None and unit is not None: + end = end.as_unit(unit, round_ok=False) + + left_closed, right_closed = validate_endpoints(closed) + + if freq is not None: + index = generate_regular_range(start, end, periods, freq, unit=unit) + else: + index = np.linspace(start._value, end._value, periods).astype("i8") + + if not left_closed: + index = index[1:] + if not right_closed: + index = index[:-1] + + td64values = index.view(f"m8[{unit}]") + return cls._simple_new(td64values, dtype=td64values.dtype, freq=freq) + + # ---------------------------------------------------------------- + # DatetimeLike Interface + + def _unbox_scalar(self, value) -> np.timedelta64: + if not isinstance(value, self._scalar_type) and value is not NaT: + raise ValueError("'value' should be a Timedelta.") + self._check_compatible_with(value) + if value is NaT: + return np.timedelta64(value._value, self.unit) + else: + return value.as_unit(self.unit).asm8 + + def _scalar_from_string(self, value) -> Timedelta | NaTType: + return Timedelta(value) + + def _check_compatible_with(self, other) -> None: + # we don't have anything to validate. + pass + + # ---------------------------------------------------------------- + # Array-Like / EA-Interface Methods + + def astype(self, dtype, copy: bool = True): + # We handle + # --> timedelta64[ns] + # --> timedelta64 + # DatetimeLikeArrayMixin super call handles other cases + dtype = pandas_dtype(dtype) + + if lib.is_np_dtype(dtype, "m"): + if dtype == self.dtype: + if copy: + return self.copy() + return self + + if is_supported_unit(get_unit_from_dtype(dtype)): + # unit conversion e.g. timedelta64[s] + res_values = astype_overflowsafe(self._ndarray, dtype, copy=False) + return type(self)._simple_new( + res_values, dtype=res_values.dtype, freq=self.freq + ) + else: + raise ValueError( + f"Cannot convert from {self.dtype} to {dtype}. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + + return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy) + + def __iter__(self) -> Iterator: + if self.ndim > 1: + for i in range(len(self)): + yield self[i] + else: + # convert in chunks of 10k for efficiency + data = self._ndarray + length = len(self) + chunksize = 10000 + chunks = (length // chunksize) + 1 + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, length) + converted = ints_to_pytimedelta(data[start_i:end_i], box=True) + yield from converted + + # ---------------------------------------------------------------- + # Reductions + + def sum( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + keepdims: bool = False, + initial=None, + skipna: bool = True, + min_count: int = 0, + ): + nv.validate_sum( + (), {"dtype": dtype, "out": out, "keepdims": keepdims, "initial": initial} + ) + + result = nanops.nansum( + self._ndarray, axis=axis, skipna=skipna, min_count=min_count + ) + return self._wrap_reduction_result(axis, result) + + def std( + self, + *, + axis: AxisInt | None = None, + dtype: NpDtype | None = None, + out=None, + ddof: int = 1, + keepdims: bool = False, + skipna: bool = True, + ): + nv.validate_stat_ddof_func( + (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std" + ) + + result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) + if axis is None or self.ndim == 1: + return self._box_func(result) + return self._from_backing_data(result) + + # ---------------------------------------------------------------- + # Accumulations + + def _accumulate(self, name: str, *, skipna: bool = True, **kwargs): + if name == "cumsum": + op = getattr(datetimelike_accumulations, name) + result = op(self._ndarray.copy(), skipna=skipna, **kwargs) + + return type(self)._simple_new(result, freq=None, dtype=self.dtype) + elif name == "cumprod": + raise TypeError("cumprod not supported for Timedelta.") + + else: + return super()._accumulate(name, skipna=skipna, **kwargs) + + # ---------------------------------------------------------------- + # Rendering Methods + + def _formatter(self, boxed: bool = False): + from pandas.io.formats.format import get_format_timedelta64 + + return get_format_timedelta64(self, box=True) + + def _format_native_types( + self, *, na_rep: str | float = "NaT", date_format=None, **kwargs + ) -> npt.NDArray[np.object_]: + from pandas.io.formats.format import get_format_timedelta64 + + # Relies on TimeDelta._repr_base + formatter = get_format_timedelta64(self._ndarray, na_rep) + # equiv: np.array([formatter(x) for x in self._ndarray]) + # but independent of dimension + return np.frompyfunc(formatter, 1, 1)(self._ndarray) + + # ---------------------------------------------------------------- + # Arithmetic Methods + + def _add_offset(self, other): + assert not isinstance(other, Tick) + raise TypeError( + f"cannot add the type {type(other).__name__} to a {type(self).__name__}" + ) + + @unpack_zerodim_and_defer("__mul__") + def __mul__(self, other) -> Self: + if is_scalar(other): + # numpy will accept float and int, raise TypeError for others + result = self._ndarray * other + freq = None + if self.freq is not None and not isna(other): + freq = self.freq * other + if freq.n == 0: + # GH#51575 Better to have no freq than an incorrect one + freq = None + return type(self)._simple_new(result, dtype=result.dtype, freq=freq) + + if not hasattr(other, "dtype"): + # list, tuple + other = np.array(other) + if len(other) != len(self) and not lib.is_np_dtype(other.dtype, "m"): + # Exclude timedelta64 here so we correctly raise TypeError + # for that instead of ValueError + raise ValueError("Cannot multiply with unequal lengths") + + if is_object_dtype(other.dtype): + # this multiplication will succeed only if all elements of other + # are int or float scalars, so we will end up with + # timedelta64[ns]-dtyped result + arr = self._ndarray + result = [arr[n] * other[n] for n in range(len(self))] + result = np.array(result) + return type(self)._simple_new(result, dtype=result.dtype) + + # numpy will accept float or int dtype, raise TypeError for others + result = self._ndarray * other + return type(self)._simple_new(result, dtype=result.dtype) + + __rmul__ = __mul__ + + def _scalar_divlike_op(self, other, op): + """ + Shared logic for __truediv__, __rtruediv__, __floordiv__, __rfloordiv__ + with scalar 'other'. + """ + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + # mypy assumes that __new__ returns an instance of the class + # github.com/python/mypy/issues/1020 + if cast("Timedelta | NaTType", other) is NaT: + # specifically timedelta64-NaT + res = np.empty(self.shape, dtype=np.float64) + res.fill(np.nan) + return res + + # otherwise, dispatch to Timedelta implementation + return op(self._ndarray, other) + + else: + # caller is responsible for checking lib.is_scalar(other) + # assume other is numeric, otherwise numpy will raise + + if op in [roperator.rtruediv, roperator.rfloordiv]: + raise TypeError( + f"Cannot divide {type(other).__name__} by {type(self).__name__}" + ) + + result = op(self._ndarray, other) + freq = None + + if self.freq is not None: + # Note: freq gets division, not floor-division, even if op + # is floordiv. + freq = self.freq / other + if freq.nanos == 0 and self.freq.nanos != 0: + # e.g. if self.freq is Nano(1) then dividing by 2 + # rounds down to zero + freq = None + + return type(self)._simple_new(result, dtype=result.dtype, freq=freq) + + def _cast_divlike_op(self, other): + if not hasattr(other, "dtype"): + # e.g. list, tuple + other = np.array(other) + + if len(other) != len(self): + raise ValueError("Cannot divide vectors with unequal lengths") + return other + + def _vector_divlike_op(self, other, op) -> np.ndarray | Self: + """ + Shared logic for __truediv__, __floordiv__, and their reversed versions + with timedelta64-dtype ndarray other. + """ + # Let numpy handle it + result = op(self._ndarray, np.asarray(other)) + + if (is_integer_dtype(other.dtype) or is_float_dtype(other.dtype)) and op in [ + operator.truediv, + operator.floordiv, + ]: + return type(self)._simple_new(result, dtype=result.dtype) + + if op in [operator.floordiv, roperator.rfloordiv]: + mask = self.isna() | isna(other) + if mask.any(): + result = result.astype(np.float64) + np.putmask(result, mask, np.nan) + + return result + + @unpack_zerodim_and_defer("__truediv__") + def __truediv__(self, other): + # timedelta / X is well-defined for timedelta-like or numeric X + op = operator.truediv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if ( + lib.is_np_dtype(other.dtype, "m") + or is_integer_dtype(other.dtype) + or is_float_dtype(other.dtype) + ): + return self._vector_divlike_op(other, op) + + if is_object_dtype(other.dtype): + other = np.asarray(other) + if self.ndim > 1: + res_cols = [left / right for left, right in zip(self, other)] + res_cols2 = [x.reshape(1, -1) for x in res_cols] + result = np.concatenate(res_cols2, axis=0) + else: + result = truediv_object_array(self._ndarray, other) + + return result + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__rtruediv__") + def __rtruediv__(self, other): + # X / timedelta is defined only for timedelta-like X + op = roperator.rtruediv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if lib.is_np_dtype(other.dtype, "m"): + return self._vector_divlike_op(other, op) + + elif is_object_dtype(other.dtype): + # Note: unlike in __truediv__, we do not _need_ to do type + # inference on the result. It does not raise, a numeric array + # is returned. GH#23829 + result_list = [other[n] / self[n] for n in range(len(self))] + return np.array(result_list) + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__floordiv__") + def __floordiv__(self, other): + op = operator.floordiv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if ( + lib.is_np_dtype(other.dtype, "m") + or is_integer_dtype(other.dtype) + or is_float_dtype(other.dtype) + ): + return self._vector_divlike_op(other, op) + + elif is_object_dtype(other.dtype): + other = np.asarray(other) + if self.ndim > 1: + res_cols = [left // right for left, right in zip(self, other)] + res_cols2 = [x.reshape(1, -1) for x in res_cols] + result = np.concatenate(res_cols2, axis=0) + else: + result = floordiv_object_array(self._ndarray, other) + + assert result.dtype == object + return result + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__rfloordiv__") + def __rfloordiv__(self, other): + op = roperator.rfloordiv + if is_scalar(other): + return self._scalar_divlike_op(other, op) + + other = self._cast_divlike_op(other) + if lib.is_np_dtype(other.dtype, "m"): + return self._vector_divlike_op(other, op) + + elif is_object_dtype(other.dtype): + result_list = [other[n] // self[n] for n in range(len(self))] + result = np.array(result_list) + return result + + else: + return NotImplemented + + @unpack_zerodim_and_defer("__mod__") + def __mod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + return self - (self // other) * other + + @unpack_zerodim_and_defer("__rmod__") + def __rmod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + return other - (other // self) * self + + @unpack_zerodim_and_defer("__divmod__") + def __divmod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + + res1 = self // other + res2 = self - res1 * other + return res1, res2 + + @unpack_zerodim_and_defer("__rdivmod__") + def __rdivmod__(self, other): + # Note: This is a naive implementation, can likely be optimized + if isinstance(other, self._recognized_scalars): + other = Timedelta(other) + + res1 = other // self + res2 = other - res1 * self + return res1, res2 + + def __neg__(self) -> TimedeltaArray: + freq = None + if self.freq is not None: + freq = -self.freq + return type(self)._simple_new(-self._ndarray, dtype=self.dtype, freq=freq) + + def __pos__(self) -> TimedeltaArray: + return type(self)(self._ndarray.copy(), freq=self.freq) + + def __abs__(self) -> TimedeltaArray: + # Note: freq is not preserved + return type(self)(np.abs(self._ndarray)) + + # ---------------------------------------------------------------- + # Conversion Methods - Vectorized analogues of Timedelta methods + + def total_seconds(self) -> npt.NDArray[np.float64]: + """ + Return total duration of each element expressed in seconds. + + This method is available directly on TimedeltaArray, TimedeltaIndex + and on Series containing timedelta values under the ``.dt`` namespace. + + Returns + ------- + ndarray, Index or Series + When the calling object is a TimedeltaArray, the return type + is ndarray. When the calling object is a TimedeltaIndex, + the return type is an Index with a float64 dtype. When the calling object + is a Series, the return type is Series of type `float64` whose + index is the same as the original. + + See Also + -------- + datetime.timedelta.total_seconds : Standard library version + of this method. + TimedeltaIndex.components : Return a DataFrame with components of + each Timedelta. + + Examples + -------- + **Series** + + >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) + >>> s + 0 0 days + 1 1 days + 2 2 days + 3 3 days + 4 4 days + dtype: timedelta64[ns] + + >>> s.dt.total_seconds() + 0 0.0 + 1 86400.0 + 2 172800.0 + 3 259200.0 + 4 345600.0 + dtype: float64 + + **TimedeltaIndex** + + >>> idx = pd.to_timedelta(np.arange(5), unit='d') + >>> idx + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq=None) + + >>> idx.total_seconds() + Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64') + """ + pps = periods_per_second(self._creso) + return self._maybe_mask_results(self.asi8 / pps, fill_value=None) + + def to_pytimedelta(self) -> npt.NDArray[np.object_]: + """ + Return an ndarray of datetime.timedelta objects. + + Returns + ------- + numpy.ndarray + + Examples + -------- + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='D') + >>> tdelta_idx + TimedeltaIndex(['1 days', '2 days', '3 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.to_pytimedelta() + array([datetime.timedelta(days=1), datetime.timedelta(days=2), + datetime.timedelta(days=3)], dtype=object) + """ + return ints_to_pytimedelta(self._ndarray) + + days_docstring = textwrap.dedent( + """Number of days for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='d')) + >>> ser + 0 1 days + 1 2 days + 2 3 days + dtype: timedelta64[ns] + >>> ser.dt.days + 0 1 + 1 2 + 2 3 + dtype: int64 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"]) + >>> tdelta_idx + TimedeltaIndex(['0 days', '10 days', '20 days'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.days + Index([0, 10, 20], dtype='int64')""" + ) + days = _field_accessor("days", "days", days_docstring) + + seconds_docstring = textwrap.dedent( + """Number of seconds (>= 0 and less than 1 day) for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='S')) + >>> ser + 0 0 days 00:00:01 + 1 0 days 00:00:02 + 2 0 days 00:00:03 + dtype: timedelta64[ns] + >>> ser.dt.seconds + 0 1 + 1 2 + 2 3 + dtype: int32 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='S') + >>> tdelta_idx + TimedeltaIndex(['0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.seconds + Index([1, 2, 3], dtype='int32')""" + ) + seconds = _field_accessor( + "seconds", + "seconds", + seconds_docstring, + ) + + microseconds_docstring = textwrap.dedent( + """Number of microseconds (>= 0 and less than 1 second) for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='U')) + >>> ser + 0 0 days 00:00:00.000001 + 1 0 days 00:00:00.000002 + 2 0 days 00:00:00.000003 + dtype: timedelta64[ns] + >>> ser.dt.microseconds + 0 1 + 1 2 + 2 3 + dtype: int32 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='U') + >>> tdelta_idx + TimedeltaIndex(['0 days 00:00:00.000001', '0 days 00:00:00.000002', + '0 days 00:00:00.000003'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.microseconds + Index([1, 2, 3], dtype='int32')""" + ) + microseconds = _field_accessor( + "microseconds", + "microseconds", + microseconds_docstring, + ) + + nanoseconds_docstring = textwrap.dedent( + """Number of nanoseconds (>= 0 and less than 1 microsecond) for each element. + + Examples + -------- + For Series: + + >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='N')) + >>> ser + 0 0 days 00:00:00.000000001 + 1 0 days 00:00:00.000000002 + 2 0 days 00:00:00.000000003 + dtype: timedelta64[ns] + >>> ser.dt.nanoseconds + 0 1 + 1 2 + 2 3 + dtype: int32 + + For TimedeltaIndex: + + >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='N') + >>> tdelta_idx + TimedeltaIndex(['0 days 00:00:00.000000001', '0 days 00:00:00.000000002', + '0 days 00:00:00.000000003'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.nanoseconds + Index([1, 2, 3], dtype='int32')""" + ) + nanoseconds = _field_accessor( + "nanoseconds", + "nanoseconds", + nanoseconds_docstring, + ) + + @property + def components(self) -> DataFrame: + """ + Return a DataFrame of the individual resolution components of the Timedeltas. + + The components (days, hours, minutes seconds, milliseconds, microseconds, + nanoseconds) are returned as columns in a DataFrame. + + Returns + ------- + DataFrame + + Examples + -------- + >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns']) + >>> tdelta_idx + TimedeltaIndex(['1 days 00:03:00.000002042'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.components + days hours minutes seconds milliseconds microseconds nanoseconds + 0 1 0 3 0 0 2 42 + """ + from pandas import DataFrame + + columns = [ + "days", + "hours", + "minutes", + "seconds", + "milliseconds", + "microseconds", + "nanoseconds", + ] + hasnans = self._hasna + if hasnans: + + def f(x): + if isna(x): + return [np.nan] * len(columns) + return x.components + + else: + + def f(x): + return x.components + + result = DataFrame([f(x) for x in self], columns=columns) + if not hasnans: + result = result.astype("int64") + return result + + +# --------------------------------------------------------------------- +# Constructor Helpers + + +def sequence_to_td64ns( + data, + copy: bool = False, + unit=None, + errors: DateTimeErrorChoices = "raise", +) -> tuple[np.ndarray, Tick | None]: + """ + Parameters + ---------- + data : list-like + copy : bool, default False + unit : str, optional + The timedelta unit to treat integers as multiples of. For numeric + data this defaults to ``'ns'``. + Must be un-specified if the data contains a str and ``errors=="raise"``. + errors : {"raise", "coerce", "ignore"}, default "raise" + How to handle elements that cannot be converted to timedelta64[ns]. + See ``pandas.to_timedelta`` for details. + + Returns + ------- + converted : numpy.ndarray + The sequence converted to a numpy array with dtype ``timedelta64[ns]``. + inferred_freq : Tick or None + The inferred frequency of the sequence. + + Raises + ------ + ValueError : Data cannot be converted to timedelta64[ns]. + + Notes + ----- + Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause + errors to be ignored; they are caught and subsequently ignored at a + higher level. + """ + assert unit not in ["Y", "y", "M"] # caller is responsible for checking + + inferred_freq = None + if unit is not None: + unit = parse_timedelta_unit(unit) + + data, copy = dtl.ensure_arraylike_for_datetimelike( + data, copy, cls_name="TimedeltaArray" + ) + + if isinstance(data, TimedeltaArray): + inferred_freq = data.freq + + # Convert whatever we have into timedelta64[ns] dtype + if data.dtype == object or is_string_dtype(data.dtype): + # no need to make a copy, need to convert if string-dtyped + data = _objects_to_td64ns(data, unit=unit, errors=errors) + copy = False + + elif is_integer_dtype(data.dtype): + # treat as multiples of the given unit + data, copy_made = _ints_to_td64ns(data, unit=unit) + copy = copy and not copy_made + + elif is_float_dtype(data.dtype): + # cast the unit, multiply base/frac separately + # to avoid precision issues from float -> int + if isinstance(data.dtype, ExtensionDtype): + mask = data._mask + data = data._data + else: + mask = np.isnan(data) + # The next few lines are effectively a vectorized 'cast_from_unit' + m, p = precision_from_unit(unit or "ns") + with warnings.catch_warnings(): + # Suppress RuntimeWarning about All-NaN slice + warnings.filterwarnings( + "ignore", "invalid value encountered in cast", RuntimeWarning + ) + base = data.astype(np.int64) + frac = data - base + if p: + frac = np.round(frac, p) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "invalid value encountered in cast", RuntimeWarning + ) + data = (base * m + (frac * m).astype(np.int64)).view("timedelta64[ns]") + data[mask] = iNaT + copy = False + + elif lib.is_np_dtype(data.dtype, "m"): + data_unit = get_unit_from_dtype(data.dtype) + if not is_supported_unit(data_unit): + # cast to closest supported unit, i.e. s or ns + new_reso = get_supported_reso(data_unit) + new_unit = npy_unit_to_abbrev(new_reso) + new_dtype = np.dtype(f"m8[{new_unit}]") + data = astype_overflowsafe(data, dtype=new_dtype, copy=False) + copy = False + + else: + # This includes datetime64-dtype, see GH#23539, GH#29794 + raise TypeError(f"dtype {data.dtype} cannot be converted to timedelta64[ns]") + + data = np.array(data, copy=copy) + + assert data.dtype.kind == "m" + assert data.dtype != "m8" # i.e. not unit-less + + return data, inferred_freq + + +def _ints_to_td64ns(data, unit: str = "ns"): + """ + Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating + the integers as multiples of the given timedelta unit. + + Parameters + ---------- + data : numpy.ndarray with integer-dtype + unit : str, default "ns" + The timedelta unit to treat integers as multiples of. + + Returns + ------- + numpy.ndarray : timedelta64[ns] array converted from data + bool : whether a copy was made + """ + copy_made = False + unit = unit if unit is not None else "ns" + + if data.dtype != np.int64: + # converting to int64 makes a copy, so we can avoid + # re-copying later + data = data.astype(np.int64) + copy_made = True + + if unit != "ns": + dtype_str = f"timedelta64[{unit}]" + data = data.view(dtype_str) + + data = astype_overflowsafe(data, dtype=TD64NS_DTYPE) + + # the astype conversion makes a copy, so we can avoid re-copying later + copy_made = True + + else: + data = data.view("timedelta64[ns]") + + return data, copy_made + + +def _objects_to_td64ns(data, unit=None, errors: DateTimeErrorChoices = "raise"): + """ + Convert a object-dtyped or string-dtyped array into an + timedelta64[ns]-dtyped array. + + Parameters + ---------- + data : ndarray or Index + unit : str, default "ns" + The timedelta unit to treat integers as multiples of. + Must not be specified if the data contains a str. + errors : {"raise", "coerce", "ignore"}, default "raise" + How to handle elements that cannot be converted to timedelta64[ns]. + See ``pandas.to_timedelta`` for details. + + Returns + ------- + numpy.ndarray : timedelta64[ns] array converted from data + + Raises + ------ + ValueError : Data cannot be converted to timedelta64[ns]. + + Notes + ----- + Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause + errors to be ignored; they are caught and subsequently ignored at a + higher level. + """ + # coerce Index to np.ndarray, converting string-dtype if necessary + values = np.array(data, dtype=np.object_, copy=False) + + result = array_to_timedelta64(values, unit=unit, errors=errors) + return result.view("timedelta64[ns]") + + +def _validate_td64_dtype(dtype) -> DtypeObj: + dtype = pandas_dtype(dtype) + if dtype == np.dtype("m8"): + # no precision disallowed GH#24806 + msg = ( + "Passing in 'timedelta' dtype with no precision is not allowed. " + "Please pass in 'timedelta64[ns]' instead." + ) + raise ValueError(msg) + + if ( + not isinstance(dtype, np.dtype) + or dtype.kind != "m" + or not is_supported_unit(get_unit_from_dtype(dtype)) + ): + raise ValueError(f"dtype {dtype} cannot be converted to timedelta64[ns]") + + return dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/base.py new file mode 100644 index 00000000..d973f8f5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/base.py @@ -0,0 +1,1390 @@ +""" +Base and utility classes for pandas objects. +""" + +from __future__ import annotations + +import textwrap +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Literal, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import lib +from pandas._typing import ( + AxisInt, + DtypeObj, + IndexLabel, + NDFrameT, + Self, + Shape, + npt, +) +from pandas.compat import PYPY +from pandas.compat.numpy import function as nv +from pandas.errors import AbstractMethodError +from pandas.util._decorators import ( + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import can_hold_element +from pandas.core.dtypes.common import ( + is_object_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + remove_na_arraylike, +) + +from pandas.core import ( + algorithms, + nanops, + ops, +) +from pandas.core.accessor import DirNamesMixin +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ExtensionArray +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + + from pandas._typing import ( + DropKeep, + NumpySorter, + NumpyValueArrayLike, + ScalarLike_co, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +_shared_docs: dict[str, str] = {} +_indexops_doc_kwargs = { + "klass": "IndexOpsMixin", + "inplace": "", + "unique": "IndexOpsMixin", + "duplicated": "IndexOpsMixin", +} + + +class PandasObject(DirNamesMixin): + """ + Baseclass for various pandas objects. + """ + + # results from calls to methods decorated with cache_readonly get added to _cache + _cache: dict[str, Any] + + @property + def _constructor(self): + """ + Class constructor (for this class it's just `__class__`. + """ + return type(self) + + def __repr__(self) -> str: + """ + Return a string representation for a particular object. + """ + # Should be overwritten by base classes + return object.__repr__(self) + + def _reset_cache(self, key: str | None = None) -> None: + """ + Reset cached properties. If ``key`` is passed, only clears that key. + """ + if not hasattr(self, "_cache"): + return + if key is None: + self._cache.clear() + else: + self._cache.pop(key, None) + + def __sizeof__(self) -> int: + """ + Generates the total memory usage for an object that returns + either a value or Series of values + """ + memory_usage = getattr(self, "memory_usage", None) + if memory_usage: + mem = memory_usage(deep=True) # pylint: disable=not-callable + return int(mem if is_scalar(mem) else mem.sum()) + + # no memory_usage attribute, so fall back to object's 'sizeof' + return super().__sizeof__() + + +class NoNewAttributesMixin: + """ + Mixin which prevents adding new attributes. + + Prevents additional attributes via xxx.attribute = "something" after a + call to `self.__freeze()`. Mainly used to prevent the user from using + wrong attributes on an accessor (`Series.cat/.str/.dt`). + + If you really want to add a new attribute at a later time, you need to use + `object.__setattr__(self, key, value)`. + """ + + def _freeze(self) -> None: + """ + Prevents setting additional attributes. + """ + object.__setattr__(self, "__frozen", True) + + # prevent adding any attribute via s.xxx.new_attribute = ... + def __setattr__(self, key: str, value) -> None: + # _cache is used by a decorator + # We need to check both 1.) cls.__dict__ and 2.) getattr(self, key) + # because + # 1.) getattr is false for attributes that raise errors + # 2.) cls.__dict__ doesn't traverse into base classes + if getattr(self, "__frozen", False) and not ( + key == "_cache" + or key in type(self).__dict__ + or getattr(self, key, None) is not None + ): + raise AttributeError(f"You cannot add any new attribute '{key}'") + object.__setattr__(self, key, value) + + +class SelectionMixin(Generic[NDFrameT]): + """ + mixin implementing the selection & aggregation interface on a group-like + object sub-classes need to define: obj, exclusions + """ + + obj: NDFrameT + _selection: IndexLabel | None = None + exclusions: frozenset[Hashable] + _internal_names = ["_cache", "__setstate__"] + _internal_names_set = set(_internal_names) + + @final + @property + def _selection_list(self): + if not isinstance( + self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray) + ): + return [self._selection] + return self._selection + + @cache_readonly + def _selected_obj(self): + if self._selection is None or isinstance(self.obj, ABCSeries): + return self.obj + else: + return self.obj[self._selection] + + @final + @cache_readonly + def ndim(self) -> int: + return self._selected_obj.ndim + + @final + @cache_readonly + def _obj_with_exclusions(self): + if isinstance(self.obj, ABCSeries): + return self.obj + + if self._selection is not None: + return self.obj._getitem_nocopy(self._selection_list) + + if len(self.exclusions) > 0: + # equivalent to `self.obj.drop(self.exclusions, axis=1) + # but this avoids consolidating and making a copy + # TODO: following GH#45287 can we now use .drop directly without + # making a copy? + return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True) + else: + return self.obj + + def __getitem__(self, key): + if self._selection is not None: + raise IndexError(f"Column(s) {self._selection} already selected") + + if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)): + if len(self.obj.columns.intersection(key)) != len(set(key)): + bad_keys = list(set(key).difference(self.obj.columns)) + raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}") + return self._gotitem(list(key), ndim=2) + + else: + if key not in self.obj: + raise KeyError(f"Column not found: {key}") + ndim = self.obj[key].ndim + return self._gotitem(key, ndim=ndim) + + def _gotitem(self, key, ndim: int, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : str / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + raise AbstractMethodError(self) + + @final + def _infer_selection(self, key, subset: Series | DataFrame): + """ + Infer the `selection` to pass to our constructor in _gotitem. + """ + # Shared by Rolling and Resample + selection = None + if subset.ndim == 2 and ( + (lib.is_scalar(key) and key in subset) or lib.is_list_like(key) + ): + selection = key + elif subset.ndim == 1 and lib.is_scalar(key) and key == subset.name: + selection = key + return selection + + def aggregate(self, func, *args, **kwargs): + raise AbstractMethodError(self) + + agg = aggregate + + +class IndexOpsMixin(OpsMixin): + """ + Common ops mixin to support a unified interface / docs for Series / Index + """ + + # ndarray compatibility + __array_priority__ = 1000 + _hidden_attrs: frozenset[str] = frozenset( + ["tolist"] # tolist is not deprecated, just suppressed in the __dir__ + ) + + @property + def dtype(self) -> DtypeObj: + # must be defined here as a property for mypy + raise AbstractMethodError(self) + + @property + def _values(self) -> ExtensionArray | np.ndarray: + # must be defined here as a property for mypy + raise AbstractMethodError(self) + + @final + def transpose(self, *args, **kwargs) -> Self: + """ + Return the transpose, which is by definition self. + + Returns + ------- + %(klass)s + """ + nv.validate_transpose(args, kwargs) + return self + + T = property( + transpose, + doc=""" + Return the transpose, which is by definition self. + + Examples + -------- + For Series: + + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.T + 0 Ant + 1 Bear + 2 Cow + dtype: object + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx.T + Index([1, 2, 3], dtype='int64') + """, + ) + + @property + def shape(self) -> Shape: + """ + Return a tuple of the shape of the underlying data. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.shape + (3,) + """ + return self._values.shape + + def __len__(self) -> int: + # We need this defined here for mypy + raise AbstractMethodError(self) + + @property + def ndim(self) -> Literal[1]: + """ + Number of dimensions of the underlying data, by definition 1. + + Examples + -------- + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.ndim + 1 + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.ndim + 1 + """ + return 1 + + @final + def item(self): + """ + Return the first element of the underlying data as a Python scalar. + + Returns + ------- + scalar + The first element of Series or Index. + + Raises + ------ + ValueError + If the data is not length = 1. + + Examples + -------- + >>> s = pd.Series([1]) + >>> s.item() + 1 + + For an index: + + >>> s = pd.Series([1], index=['a']) + >>> s.index.item() + 'a' + """ + if len(self) == 1: + return next(iter(self)) + raise ValueError("can only convert an array of size 1 to a Python scalar") + + @property + def nbytes(self) -> int: + """ + Return the number of bytes in the underlying data. + + Examples + -------- + For Series: + + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.nbytes + 24 + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.nbytes + 24 + """ + return self._values.nbytes + + @property + def size(self) -> int: + """ + Return the number of elements in the underlying data. + + Examples + -------- + For Series: + + >>> s = pd.Series(['Ant', 'Bear', 'Cow']) + >>> s + 0 Ant + 1 Bear + 2 Cow + dtype: object + >>> s.size + 3 + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.size + 3 + """ + return len(self._values) + + @property + def array(self) -> ExtensionArray: + """ + The ExtensionArray of the data backing this Series or Index. + + Returns + ------- + ExtensionArray + An ExtensionArray of the values stored within. For extension + types, this is the actual array. For NumPy native types, this + is a thin (no copy) wrapper around :class:`numpy.ndarray`. + + ``.array`` differs ``.values`` which may require converting the + data to a different form. + + See Also + -------- + Index.to_numpy : Similar method that always returns a NumPy array. + Series.to_numpy : Similar method that always returns a NumPy array. + + Notes + ----- + This table lays out the different array types for each extension + dtype within pandas. + + ================== ============================= + dtype array type + ================== ============================= + category Categorical + period PeriodArray + interval IntervalArray + IntegerNA IntegerArray + string StringArray + boolean BooleanArray + datetime64[ns, tz] DatetimeArray + ================== ============================= + + For any 3rd-party extension types, the array type will be an + ExtensionArray. + + For all remaining dtypes ``.array`` will be a + :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray + stored within. If you absolutely need a NumPy array (possibly with + copying / coercing data), then use :meth:`Series.to_numpy` instead. + + Examples + -------- + For regular NumPy types like int, and float, a NumpyExtensionArray + is returned. + + >>> pd.Series([1, 2, 3]).array + + [1, 2, 3] + Length: 3, dtype: int64 + + For extension types, like Categorical, the actual ExtensionArray + is returned + + >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) + >>> ser.array + ['a', 'b', 'a'] + Categories (2, object): ['a', 'b'] + """ + raise AbstractMethodError(self) + + @final + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + **kwargs, + ) -> np.ndarray: + """ + A NumPy ndarray representing the values in this Series or Index. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to pass to :meth:`numpy.asarray`. + copy : bool, default False + Whether to ensure that the returned value is not a view on + another array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. + na_value : Any, optional + The value to use for missing values. The default value depends + on `dtype` and the type of the array. + **kwargs + Additional keywords passed through to the ``to_numpy`` method + of the underlying array (for extension arrays). + + Returns + ------- + numpy.ndarray + + See Also + -------- + Series.array : Get the actual data stored within. + Index.array : Get the actual data stored within. + DataFrame.to_numpy : Similar method for DataFrame. + + Notes + ----- + The returned array will be the same up to equality (values equal + in `self` will be equal in the returned array; likewise for values + that are not equal). When `self` contains an ExtensionArray, the + dtype may be different. For example, for a category-dtype Series, + ``to_numpy()`` will return a NumPy array and the categorical dtype + will be lost. + + For NumPy dtypes, this will be a reference to the actual data stored + in this Series or Index (assuming ``copy=False``). Modifying the result + in place will modify the data stored in the Series or Index (not that + we recommend doing that). + + For extension types, ``to_numpy()`` *may* require copying data and + coercing the result to a NumPy type (possibly object), which may be + expensive. When you need a no-copy reference to the underlying data, + :attr:`Series.array` should be used instead. + + This table lays out the different dtypes and default return types of + ``to_numpy()`` for various dtypes within pandas. + + ================== ================================ + dtype array type + ================== ================================ + category[T] ndarray[T] (same dtype as input) + period ndarray[object] (Periods) + interval ndarray[object] (Intervals) + IntegerNA ndarray[object] + datetime64[ns] datetime64[ns] + datetime64[ns, tz] ndarray[object] (Timestamps) + ================== ================================ + + Examples + -------- + >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) + >>> ser.to_numpy() + array(['a', 'b', 'a'], dtype=object) + + Specify the `dtype` to control how datetime-aware data is represented. + Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp` + objects, each with the correct ``tz``. + + >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + >>> ser.to_numpy(dtype=object) + array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), + Timestamp('2000-01-02 00:00:00+0100', tz='CET')], + dtype=object) + + Or ``dtype='datetime64[ns]'`` to return an ndarray of native + datetime64 values. The values are converted to UTC and the timezone + info is dropped. + + >>> ser.to_numpy(dtype="datetime64[ns]") + ... # doctest: +ELLIPSIS + array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], + dtype='datetime64[ns]') + """ + if isinstance(self.dtype, ExtensionDtype): + return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs) + elif kwargs: + bad_keys = next(iter(kwargs.keys())) + raise TypeError( + f"to_numpy() got an unexpected keyword argument '{bad_keys}'" + ) + + fillna = ( + na_value is not lib.no_default + # no need to fillna with np.nan if we already have a float dtype + and not (na_value is np.nan and np.issubdtype(self.dtype, np.floating)) + ) + + values = self._values + if fillna: + if not can_hold_element(values, na_value): + # if we can't hold the na_value asarray either makes a copy or we + # error before modifying values. The asarray later on thus won't make + # another copy + values = np.asarray(values, dtype=dtype) + else: + values = values.copy() + + values[np.asanyarray(isna(self))] = na_value + + result = np.asarray(values, dtype=dtype) + + if (copy and not fillna) or (not copy and using_copy_on_write()): + if np.shares_memory(self._values[:2], result[:2]): + # Take slices to improve performance of check + if using_copy_on_write() and not copy: + result = result.view() + result.flags.writeable = False + else: + result = result.copy() + + return result + + @final + @property + def empty(self) -> bool: + return not self.size + + @doc(op="max", oppose="min", value="largest") + def argmax( + self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs + ) -> int: + """ + Return int position of the {value} value in the Series. + + If the {op}imum is achieved in multiple locations, + the first row position is returned. + + Parameters + ---------- + axis : {{None}} + Unused. Parameter needed for compatibility with DataFrame. + skipna : bool, default True + Exclude NA/null values when showing the result. + *args, **kwargs + Additional arguments and keywords for compatibility with NumPy. + + Returns + ------- + int + Row position of the {op}imum value. + + See Also + -------- + Series.arg{op} : Return position of the {op}imum value. + Series.arg{oppose} : Return position of the {oppose}imum value. + numpy.ndarray.arg{op} : Equivalent method for numpy arrays. + Series.idxmax : Return index label of the maximum values. + Series.idxmin : Return index label of the minimum values. + + Examples + -------- + Consider dataset containing cereal calories + + >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0, + ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}}) + >>> s + Corn Flakes 100.0 + Almond Delight 110.0 + Cinnamon Toast Crunch 120.0 + Cocoa Puff 110.0 + dtype: float64 + + >>> s.argmax() + 2 + >>> s.argmin() + 0 + + The maximum cereal calories is the third element and + the minimum cereal calories is the first element, + since series is zero-indexed. + """ + delegate = self._values + nv.validate_minmax_axis(axis) + skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) + + if isinstance(delegate, ExtensionArray): + if not skipna and delegate.isna().any(): + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return -1 + else: + return delegate.argmax() + else: + result = nanops.nanargmax(delegate, skipna=skipna) + if result == -1: + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # error: Incompatible return value type (got "Union[int, ndarray]", expected + # "int") + return result # type: ignore[return-value] + + @doc(argmax, op="min", oppose="max", value="smallest") + def argmin( + self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs + ) -> int: + delegate = self._values + nv.validate_minmax_axis(axis) + skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) + + if isinstance(delegate, ExtensionArray): + if not skipna and delegate.isna().any(): + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return -1 + else: + return delegate.argmin() + else: + result = nanops.nanargmin(delegate, skipna=skipna) + if result == -1: + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # error: Incompatible return value type (got "Union[int, ndarray]", expected + # "int") + return result # type: ignore[return-value] + + def tolist(self): + """ + Return a list of the values. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + list + + See Also + -------- + numpy.ndarray.tolist : Return the array as an a.ndim-levels deep + nested list of Python scalars. + + Examples + -------- + For Series + + >>> s = pd.Series([1, 2, 3]) + >>> s.to_list() + [1, 2, 3] + + For Index: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + + >>> idx.to_list() + [1, 2, 3] + """ + return self._values.tolist() + + to_list = tolist + + def __iter__(self) -> Iterator: + """ + Return an iterator of the values. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + + Returns + ------- + iterator + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> for x in s: + ... print(x) + 1 + 2 + 3 + """ + # We are explicitly making element iterators. + if not isinstance(self._values, np.ndarray): + # Check type instead of dtype to catch DTA/TDA + return iter(self._values) + else: + return map(self._values.item, range(self._values.size)) + + @cache_readonly + def hasnans(self) -> bool: + """ + Return True if there are any NaNs. + + Enables various performance speedups. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([1, 2, 3, None]) + >>> s + 0 1.0 + 1 2.0 + 2 3.0 + 3 NaN + dtype: float64 + >>> s.hasnans + True + """ + # error: Item "bool" of "Union[bool, ndarray[Any, dtype[bool_]], NDFrame]" + # has no attribute "any" + return bool(isna(self).any()) # type: ignore[union-attr] + + @final + def _map_values(self, mapper, na_action=None, convert: bool = True): + """ + An internal function that maps values using the input + correspondence (which can be a dict, Series, or function). + + Parameters + ---------- + mapper : function, dict, or Series + The input correspondence object + na_action : {None, 'ignore'} + If 'ignore', propagate NA values, without passing them to the + mapping function + convert : bool, default True + Try to find better dtype for elementwise function results. If + False, leave as dtype=object. Note that the dtype is always + preserved for some extension array dtypes, such as Categorical. + + Returns + ------- + Union[Index, MultiIndex], inferred + The output of the mapping function applied to the index. + If the function returns a tuple with more than one element + a MultiIndex will be returned. + """ + arr = self._values + + if isinstance(arr, ExtensionArray): + return arr.map(mapper, na_action=na_action) + + return algorithms.map_array(arr, mapper, na_action=na_action, convert=convert) + + @final + def value_counts( + self, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + bins=None, + dropna: bool = True, + ) -> Series: + """ + Return a Series containing counts of unique values. + + The resulting object will be in descending order so that the + first element is the most frequently-occurring element. + Excludes NA values by default. + + Parameters + ---------- + normalize : bool, default False + If True then the object returned will contain the relative + frequencies of the unique values. + sort : bool, default True + Sort by frequencies when True. Preserve the order of the data when False. + ascending : bool, default False + Sort in ascending order. + bins : int, optional + Rather than count values, group them into half-open bins, + a convenience for ``pd.cut``, only works with numeric data. + dropna : bool, default True + Don't include counts of NaN. + + Returns + ------- + Series + + See Also + -------- + Series.count: Number of non-NA elements in a Series. + DataFrame.count: Number of non-NA elements in a DataFrame. + DataFrame.value_counts: Equivalent method on DataFrames. + + Examples + -------- + >>> index = pd.Index([3, 1, 2, 3, 4, np.nan]) + >>> index.value_counts() + 3.0 2 + 1.0 1 + 2.0 1 + 4.0 1 + Name: count, dtype: int64 + + With `normalize` set to `True`, returns the relative frequency by + dividing all values by the sum of values. + + >>> s = pd.Series([3, 1, 2, 3, 4, np.nan]) + >>> s.value_counts(normalize=True) + 3.0 0.4 + 1.0 0.2 + 2.0 0.2 + 4.0 0.2 + Name: proportion, dtype: float64 + + **bins** + + Bins can be useful for going from a continuous variable to a + categorical variable; instead of counting unique + apparitions of values, divide the index in the specified + number of half-open bins. + + >>> s.value_counts(bins=3) + (0.996, 2.0] 2 + (2.0, 3.0] 2 + (3.0, 4.0] 1 + Name: count, dtype: int64 + + **dropna** + + With `dropna` set to `False` we can also see NaN index values. + + >>> s.value_counts(dropna=False) + 3.0 2 + 1.0 1 + 2.0 1 + 4.0 1 + NaN 1 + Name: count, dtype: int64 + """ + return algorithms.value_counts_internal( + self, + sort=sort, + ascending=ascending, + normalize=normalize, + bins=bins, + dropna=dropna, + ) + + def unique(self): + values = self._values + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + result = values.unique() + else: + result = algorithms.unique1d(values) + return result + + @final + def nunique(self, dropna: bool = True) -> int: + """ + Return number of unique elements in the object. + + Excludes NA values by default. + + Parameters + ---------- + dropna : bool, default True + Don't include NaN in the count. + + Returns + ------- + int + + See Also + -------- + DataFrame.nunique: Method nunique for DataFrame. + Series.count: Count non-NA/null observations in the Series. + + Examples + -------- + >>> s = pd.Series([1, 3, 5, 7, 7]) + >>> s + 0 1 + 1 3 + 2 5 + 3 7 + 4 7 + dtype: int64 + + >>> s.nunique() + 4 + """ + uniqs = self.unique() + if dropna: + uniqs = remove_na_arraylike(uniqs) + return len(uniqs) + + @property + def is_unique(self) -> bool: + """ + Return boolean if values in the object are unique. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.is_unique + True + + >>> s = pd.Series([1, 2, 3, 1]) + >>> s.is_unique + False + """ + return self.nunique(dropna=False) == len(self) + + @property + def is_monotonic_increasing(self) -> bool: + """ + Return boolean if values in the object are monotonically increasing. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([1, 2, 2]) + >>> s.is_monotonic_increasing + True + + >>> s = pd.Series([3, 2, 1]) + >>> s.is_monotonic_increasing + False + """ + from pandas import Index + + return Index(self).is_monotonic_increasing + + @property + def is_monotonic_decreasing(self) -> bool: + """ + Return boolean if values in the object are monotonically decreasing. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([3, 2, 2, 1]) + >>> s.is_monotonic_decreasing + True + + >>> s = pd.Series([1, 2, 3]) + >>> s.is_monotonic_decreasing + False + """ + from pandas import Index + + return Index(self).is_monotonic_decreasing + + @final + def _memory_usage(self, deep: bool = False) -> int: + """ + Memory usage of the values. + + Parameters + ---------- + deep : bool, default False + Introspect the data deeply, interrogate + `object` dtypes for system-level memory consumption. + + Returns + ------- + bytes used + + See Also + -------- + numpy.ndarray.nbytes : Total bytes consumed by the elements of the + array. + + Notes + ----- + Memory usage does not include memory consumed by elements that + are not components of the array if deep=False or if used on PyPy + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx.memory_usage() + 24 + """ + if hasattr(self.array, "memory_usage"): + return self.array.memory_usage( # pyright: ignore[reportGeneralTypeIssues] + deep=deep, + ) + + v = self.array.nbytes + if deep and is_object_dtype(self.dtype) and not PYPY: + values = cast(np.ndarray, self._values) + v += lib.memory_usage_of_objects(values) + return v + + @doc( + algorithms.factorize, + values="", + order="", + size_hint="", + sort=textwrap.dedent( + """\ + sort : bool, default False + Sort `uniques` and shuffle `codes` to maintain the + relationship. + """ + ), + ) + def factorize( + self, + sort: bool = False, + use_na_sentinel: bool = True, + ) -> tuple[npt.NDArray[np.intp], Index]: + codes, uniques = algorithms.factorize( + self._values, sort=sort, use_na_sentinel=use_na_sentinel + ) + if uniques.dtype == np.float16: + uniques = uniques.astype(np.float32) + + if isinstance(self, ABCIndex): + # preserve e.g. MultiIndex + uniques = self._constructor(uniques) + else: + from pandas import Index + + uniques = Index(uniques) + return codes, uniques + + _shared_docs[ + "searchsorted" + ] = """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted {klass} `self` such that, if the + corresponding elements in `value` were inserted before the indices, + the order of `self` would be preserved. + + .. note:: + + The {klass} *must* be monotonically sorted, otherwise + wrong locations will likely be returned. Pandas does *not* + check this for you. + + Parameters + ---------- + value : array-like or scalar + Values to insert into `self`. + side : {{'left', 'right'}}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `self`). + sorter : 1-D array-like, optional + Optional array of integer indices that sort `self` into ascending + order. They are typically the result of ``np.argsort``. + + Returns + ------- + int or array of int + A scalar or array of insertion points with the + same shape as `value`. + + See Also + -------- + sort_values : Sort by the values along either axis. + numpy.searchsorted : Similar method from NumPy. + + Notes + ----- + Binary search is used to find the required insertion points. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]) + >>> ser + 0 1 + 1 2 + 2 3 + dtype: int64 + + >>> ser.searchsorted(4) + 3 + + >>> ser.searchsorted([0, 4]) + array([0, 3]) + + >>> ser.searchsorted([1, 3], side='left') + array([0, 2]) + + >>> ser.searchsorted([1, 3], side='right') + array([1, 3]) + + >>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'])) + >>> ser + 0 2000-03-11 + 1 2000-03-12 + 2 2000-03-13 + dtype: datetime64[ns] + + >>> ser.searchsorted('3/14/2000') + 3 + + >>> ser = pd.Categorical( + ... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True + ... ) + >>> ser + ['apple', 'bread', 'bread', 'cheese', 'milk'] + Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk'] + + >>> ser.searchsorted('bread') + 1 + + >>> ser.searchsorted(['bread'], side='right') + array([3]) + + If the values are not monotonically sorted, wrong locations + may be returned: + + >>> ser = pd.Series([2, 1, 3]) + >>> ser + 0 2 + 1 1 + 2 3 + dtype: int64 + + >>> ser.searchsorted(1) # doctest: +SKIP + 0 # wrong result, correct would be 1 + """ + + # This overload is needed so that the call to searchsorted in + # pandas.core.resample.TimeGrouper._get_period_bins picks the correct result + + @overload + # The following ignore is also present in numpy/__init__.pyi + # Possibly a mypy bug?? + # error: Overloaded function signatures 1 and 2 overlap with incompatible + # return types [misc] + def searchsorted( # type: ignore[misc] + self, + value: ScalarLike_co, + side: Literal["left", "right"] = ..., + sorter: NumpySorter = ..., + ) -> np.intp: + ... + + @overload + def searchsorted( + self, + value: npt.ArrayLike | ExtensionArray, + side: Literal["left", "right"] = ..., + sorter: NumpySorter = ..., + ) -> npt.NDArray[np.intp]: + ... + + @doc(_shared_docs["searchsorted"], klass="Index") + def searchsorted( + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + if isinstance(value, ABCDataFrame): + msg = ( + "Value must be 1-D array-like or scalar, " + f"{type(value).__name__} is not supported" + ) + raise ValueError(msg) + + values = self._values + if not isinstance(values, np.ndarray): + # Going through EA.searchsorted directly improves performance GH#38083 + return values.searchsorted(value, side=side, sorter=sorter) + + return algorithms.searchsorted( + values, + value, + side=side, + sorter=sorter, + ) + + def drop_duplicates(self, *, keep: DropKeep = "first"): + duplicated = self._duplicated(keep=keep) + # error: Value of type "IndexOpsMixin" is not indexable + return self[~duplicated] # type: ignore[index] + + @final + def _duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: + return algorithms.duplicated(self._values, keep=keep) + + def _arith_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + + lvalues = self._values + rvalues = extract_array(other, extract_numpy=True, extract_range=True) + rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape) + rvalues = ensure_wrapped_if_datetimelike(rvalues) + if isinstance(rvalues, range): + rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step) + + with np.errstate(all="ignore"): + result = ops.arithmetic_op(lvalues, rvalues, op) + + return self._construct_result(result, name=res_name) + + def _construct_result(self, result, name): + """ + Construct an appropriately-wrapped result from the ArrayLike result + of an arithmetic-like operation. + """ + raise AbstractMethodError(self) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/common.py new file mode 100644 index 00000000..6d419098 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/common.py @@ -0,0 +1,645 @@ +""" +Misc tools for implementing data structures + +Note: pandas.core.common is *not* part of the public API. +""" +from __future__ import annotations + +import builtins +from collections import ( + abc, + defaultdict, +) +from collections.abc import ( + Collection, + Generator, + Hashable, + Iterable, + Sequence, +) +import contextlib +from functools import partial +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.compat.numpy import np_version_gte1p24 + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import iterable_not_string + +if TYPE_CHECKING: + from pandas._typing import ( + AnyArrayLike, + ArrayLike, + NpDtype, + RandomState, + T, + ) + + from pandas import Index + + +def flatten(line): + """ + Flatten an arbitrarily nested sequence. + + Parameters + ---------- + line : sequence + The non string sequence to flatten + + Notes + ----- + This doesn't consider strings sequences. + + Returns + ------- + flattened : generator + """ + for element in line: + if iterable_not_string(element): + yield from flatten(element) + else: + yield element + + +def consensus_name_attr(objs): + name = objs[0].name + for obj in objs[1:]: + try: + if obj.name != name: + name = None + except ValueError: + name = None + return name + + +def is_bool_indexer(key: Any) -> bool: + """ + Check whether `key` is a valid boolean indexer. + + Parameters + ---------- + key : Any + Only list-likes may be considered boolean indexers. + All other types are not considered a boolean indexer. + For array-like input, boolean ndarrays or ExtensionArrays + with ``_is_boolean`` set are considered boolean indexers. + + Returns + ------- + bool + Whether `key` is a valid boolean indexer. + + Raises + ------ + ValueError + When the array is an object-dtype ndarray or ExtensionArray + and contains missing values. + + See Also + -------- + check_array_indexer : Check that `key` is a valid array to index, + and convert to an ndarray. + """ + if isinstance(key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)): + if key.dtype == np.object_: + key_array = np.asarray(key) + + if not lib.is_bool_array(key_array): + na_msg = "Cannot mask with non-boolean array containing NA / NaN values" + if lib.is_bool_array(key_array, skipna=True): + # Don't raise on e.g. ["A", "B", np.nan], see + # test_loc_getitem_list_of_labels_categoricalindex_with_na + raise ValueError(na_msg) + return False + return True + elif is_bool_dtype(key.dtype): + return True + elif isinstance(key, list): + # check if np.array(key).dtype would be bool + if len(key) > 0: + if type(key) is not list: + # GH#42461 cython will raise TypeError if we pass a subclass + key = list(key) + return lib.is_bool_list(key) + + return False + + +def cast_scalar_indexer(val): + """ + Disallow indexing with a float key, even if that key is a round number. + + Parameters + ---------- + val : scalar + + Returns + ------- + outval : scalar + """ + # assumes lib.is_scalar(val) + if lib.is_float(val) and val.is_integer(): + raise IndexError( + # GH#34193 + "Indexing with a float is no longer supported. Manually convert " + "to an integer key instead." + ) + return val + + +def not_none(*args): + """ + Returns a generator consisting of the arguments that are not None. + """ + return (arg for arg in args if arg is not None) + + +def any_none(*args) -> bool: + """ + Returns a boolean indicating if any argument is None. + """ + return any(arg is None for arg in args) + + +def all_none(*args) -> bool: + """ + Returns a boolean indicating if all arguments are None. + """ + return all(arg is None for arg in args) + + +def any_not_none(*args) -> bool: + """ + Returns a boolean indicating if any argument is not None. + """ + return any(arg is not None for arg in args) + + +def all_not_none(*args) -> bool: + """ + Returns a boolean indicating if all arguments are not None. + """ + return all(arg is not None for arg in args) + + +def count_not_none(*args) -> int: + """ + Returns the count of arguments that are not None. + """ + return sum(x is not None for x in args) + + +@overload +def asarray_tuplesafe( + values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ... +) -> np.ndarray: + # ExtensionArray can only be returned when values is an Index, all other iterables + # will return np.ndarray. Unfortunately "all other" cannot be encoded in a type + # signature, so instead we special-case some common types. + ... + + +@overload +def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike: + ... + + +def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike: + if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")): + values = list(values) + elif isinstance(values, ABCIndex): + return values._values + + if isinstance(values, list) and dtype in [np.object_, object]: + return construct_1d_object_array_from_listlike(values) + + try: + with warnings.catch_warnings(): + # Can remove warning filter once NumPy 1.24 is min version + if not np_version_gte1p24: + warnings.simplefilter("ignore", np.VisibleDeprecationWarning) + result = np.asarray(values, dtype=dtype) + except ValueError: + # Using try/except since it's more performant than checking is_list_like + # over each element + # error: Argument 1 to "construct_1d_object_array_from_listlike" + # has incompatible type "Iterable[Any]"; expected "Sized" + return construct_1d_object_array_from_listlike(values) # type: ignore[arg-type] + + if issubclass(result.dtype.type, str): + result = np.asarray(values, dtype=object) + + if result.ndim == 2: + # Avoid building an array of arrays: + values = [tuple(x) for x in values] + result = construct_1d_object_array_from_listlike(values) + + return result + + +def index_labels_to_array( + labels: np.ndarray | Iterable, dtype: NpDtype | None = None +) -> np.ndarray: + """ + Transform label or iterable of labels to array, for use in Index. + + Parameters + ---------- + dtype : dtype + If specified, use as dtype of the resulting array, otherwise infer. + + Returns + ------- + array + """ + if isinstance(labels, (str, tuple)): + labels = [labels] + + if not isinstance(labels, (list, np.ndarray)): + try: + labels = list(labels) + except TypeError: # non-iterable + labels = [labels] + + labels = asarray_tuplesafe(labels, dtype=dtype) + + return labels + + +def maybe_make_list(obj): + if obj is not None and not isinstance(obj, (tuple, list)): + return [obj] + return obj + + +def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T: + """ + If obj is Iterable but not list-like, consume into list. + """ + if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized): + return list(obj) + obj = cast(Collection, obj) + return obj + + +def is_null_slice(obj) -> bool: + """ + We have a null slice. + """ + return ( + isinstance(obj, slice) + and obj.start is None + and obj.stop is None + and obj.step is None + ) + + +def is_empty_slice(obj) -> bool: + """ + We have an empty slice, e.g. no values are selected. + """ + return ( + isinstance(obj, slice) + and obj.start is not None + and obj.stop is not None + and obj.start == obj.stop + ) + + +def is_true_slices(line) -> list[bool]: + """ + Find non-trivial slices in "line": return a list of booleans with same length. + """ + return [isinstance(k, slice) and not is_null_slice(k) for k in line] + + +# TODO: used only once in indexing; belongs elsewhere? +def is_full_slice(obj, line: int) -> bool: + """ + We have a full length slice. + """ + return ( + isinstance(obj, slice) + and obj.start == 0 + and obj.stop == line + and obj.step is None + ) + + +def get_callable_name(obj): + # typical case has name + if hasattr(obj, "__name__"): + return getattr(obj, "__name__") + # some objects don't; could recurse + if isinstance(obj, partial): + return get_callable_name(obj.func) + # fall back to class name + if callable(obj): + return type(obj).__name__ + # everything failed (probably because the argument + # wasn't actually callable); we return None + # instead of the empty string in this case to allow + # distinguishing between no name and a name of '' + return None + + +def apply_if_callable(maybe_callable, obj, **kwargs): + """ + Evaluate possibly callable input using obj and kwargs if it is callable, + otherwise return as it is. + + Parameters + ---------- + maybe_callable : possibly a callable + obj : NDFrame + **kwargs + """ + if callable(maybe_callable): + return maybe_callable(obj, **kwargs) + + return maybe_callable + + +def standardize_mapping(into): + """ + Helper function to standardize a supplied mapping. + + Parameters + ---------- + into : instance or subclass of collections.abc.Mapping + Must be a class, an initialized collections.defaultdict, + or an instance of a collections.abc.Mapping subclass. + + Returns + ------- + mapping : a collections.abc.Mapping subclass or other constructor + a callable object that can accept an iterator to create + the desired Mapping. + + See Also + -------- + DataFrame.to_dict + Series.to_dict + """ + if not inspect.isclass(into): + if isinstance(into, defaultdict): + return partial(defaultdict, into.default_factory) + into = type(into) + if not issubclass(into, abc.Mapping): + raise TypeError(f"unsupported type: {into}") + if into == defaultdict: + raise TypeError("to_dict() only accepts initialized defaultdicts") + return into + + +@overload +def random_state(state: np.random.Generator) -> np.random.Generator: + ... + + +@overload +def random_state( + state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None, +) -> np.random.RandomState: + ... + + +def random_state(state: RandomState | None = None): + """ + Helper function for processing random_state arguments. + + Parameters + ---------- + state : int, array-like, BitGenerator, Generator, np.random.RandomState, None. + If receives an int, array-like, or BitGenerator, passes to + np.random.RandomState() as seed. + If receives an np.random RandomState or Generator, just returns that unchanged. + If receives `None`, returns np.random. + If receives anything else, raises an informative ValueError. + + Default None. + + Returns + ------- + np.random.RandomState or np.random.Generator. If state is None, returns np.random + + """ + if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)): + return np.random.RandomState(state) + elif isinstance(state, np.random.RandomState): + return state + elif isinstance(state, np.random.Generator): + return state + elif state is None: + return np.random + else: + raise ValueError( + "random_state must be an integer, array-like, a BitGenerator, Generator, " + "a numpy RandomState, or None" + ) + + +def pipe( + obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs +) -> T: + """ + Apply a function ``func`` to object ``obj`` either by passing obj as the + first argument to the function or, in the case that the func is a tuple, + interpret the first element of the tuple as a function and pass the obj to + that function as a keyword argument whose key is the value of the second + element of the tuple. + + Parameters + ---------- + func : callable or tuple of (callable, str) + Function to apply to this object or, alternatively, a + ``(callable, data_keyword)`` tuple where ``data_keyword`` is a + string indicating the keyword of ``callable`` that expects the + object. + *args : iterable, optional + Positional arguments passed into ``func``. + **kwargs : dict, optional + A dictionary of keyword arguments passed into ``func``. + + Returns + ------- + object : the return type of ``func``. + """ + if isinstance(func, tuple): + func, target = func + if target in kwargs: + msg = f"{target} is both the pipe target and a keyword argument" + raise ValueError(msg) + kwargs[target] = obj + return func(*args, **kwargs) + else: + return func(obj, *args, **kwargs) + + +def get_rename_function(mapper): + """ + Returns a function that will map names/labels, dependent if mapper + is a dict, Series or just a function. + """ + + def f(x): + if x in mapper: + return mapper[x] + else: + return x + + return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper + + +def convert_to_list_like( + values: Hashable | Iterable | AnyArrayLike, +) -> list | AnyArrayLike: + """ + Convert list-like or scalar input to list-like. List, numpy and pandas array-like + inputs are returned unmodified whereas others are converted to list. + """ + if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)): + return values + elif isinstance(values, abc.Iterable) and not isinstance(values, str): + return list(values) + + return [values] + + +@contextlib.contextmanager +def temp_setattr( + obj, attr: str, value, condition: bool = True +) -> Generator[None, None, None]: + """Temporarily set attribute on an object. + + Args: + obj: Object whose attribute will be modified. + attr: Attribute to modify. + value: Value to temporarily set attribute to. + condition: Whether to set the attribute. Provided in order to not have to + conditionally use this context manager. + + Yields: + obj with modified attribute. + """ + if condition: + old_value = getattr(obj, attr) + setattr(obj, attr, value) + try: + yield obj + finally: + if condition: + setattr(obj, attr, old_value) + + +def require_length_match(data, index: Index) -> None: + """ + Check the length of data matches the length of the index. + """ + if len(data) != len(index): + raise ValueError( + "Length of values " + f"({len(data)}) " + "does not match length of index " + f"({len(index)})" + ) + + +# the ufuncs np.maximum.reduce and np.minimum.reduce default to axis=0, +# whereas np.min and np.max (which directly call obj.min and obj.max) +# default to axis=None. +_builtin_table = { + builtins.sum: np.sum, + builtins.max: np.maximum.reduce, + builtins.min: np.minimum.reduce, +} + +# GH#53425: Only for deprecation +_builtin_table_alias = { + builtins.sum: "np.sum", + builtins.max: "np.maximum.reduce", + builtins.min: "np.minimum.reduce", +} + +_cython_table = { + builtins.sum: "sum", + builtins.max: "max", + builtins.min: "min", + np.all: "all", + np.any: "any", + np.sum: "sum", + np.nansum: "sum", + np.mean: "mean", + np.nanmean: "mean", + np.prod: "prod", + np.nanprod: "prod", + np.std: "std", + np.nanstd: "std", + np.var: "var", + np.nanvar: "var", + np.median: "median", + np.nanmedian: "median", + np.max: "max", + np.nanmax: "max", + np.min: "min", + np.nanmin: "min", + np.cumprod: "cumprod", + np.nancumprod: "cumprod", + np.cumsum: "cumsum", + np.nancumsum: "cumsum", +} + + +def get_cython_func(arg: Callable) -> str | None: + """ + if we define an internal function for this argument, return it + """ + return _cython_table.get(arg) + + +def is_builtin_func(arg): + """ + if we define a builtin function for this argument, return it, + otherwise return the arg + """ + return _builtin_table.get(arg, arg) + + +def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]: + """ + If a name is missing then replace it by level_n, where n is the count + + .. versionadded:: 1.4.0 + + Parameters + ---------- + names : list-like + list of column names or None values. + + Returns + ------- + list + list of column names with the None values replaced. + """ + return [f"level_{i}" if name is None else name for i, name in enumerate(names)] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/align.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/align.py new file mode 100644 index 00000000..85d412d0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/align.py @@ -0,0 +1,213 @@ +""" +Core eval alignment algorithms. +""" +from __future__ import annotations + +from functools import ( + partial, + wraps, +) +from typing import ( + TYPE_CHECKING, + Callable, +) +import warnings + +import numpy as np + +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.computation.common import result_type_many + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import F + + from pandas.core.generic import NDFrame + from pandas.core.indexes.api import Index + + +def _align_core_single_unary_op( + term, +) -> tuple[partial | type[NDFrame], dict[str, Index] | None]: + typ: partial | type[NDFrame] + axes: dict[str, Index] | None = None + + if isinstance(term.value, np.ndarray): + typ = partial(np.asanyarray, dtype=term.value.dtype) + else: + typ = type(term.value) + if hasattr(term.value, "axes"): + axes = _zip_axes_from_type(typ, term.value.axes) + + return typ, axes + + +def _zip_axes_from_type( + typ: type[NDFrame], new_axes: Sequence[Index] +) -> dict[str, Index]: + return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)} + + +def _any_pandas_objects(terms) -> bool: + """ + Check a sequence of terms for instances of PandasObject. + """ + return any(isinstance(term.value, PandasObject) for term in terms) + + +def _filter_special_cases(f) -> Callable[[F], F]: + @wraps(f) + def wrapper(terms): + # single unary operand + if len(terms) == 1: + return _align_core_single_unary_op(terms[0]) + + term_values = (term.value for term in terms) + + # we don't have any pandas objects + if not _any_pandas_objects(terms): + return result_type_many(*term_values), None + + return f(terms) + + return wrapper + + +@_filter_special_cases +def _align_core(terms): + term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")] + term_dims = [terms[i].value.ndim for i in term_index] + + from pandas import Series + + ndims = Series(dict(zip(term_index, term_dims))) + + # initial axes are the axes of the largest-axis'd term + biggest = terms[ndims.idxmax()].value + typ = biggest._constructor + axes = biggest.axes + naxes = len(axes) + gt_than_one_axis = naxes > 1 + + for value in (terms[i].value for i in term_index): + is_series = isinstance(value, ABCSeries) + is_series_and_gt_one_axis = is_series and gt_than_one_axis + + for axis, items in enumerate(value.axes): + if is_series_and_gt_one_axis: + ax, itm = naxes - 1, value.index + else: + ax, itm = axis, items + + if not axes[ax].is_(itm): + axes[ax] = axes[ax].join(itm, how="outer") + + for i, ndim in ndims.items(): + for axis, items in zip(range(ndim), axes): + ti = terms[i].value + + if hasattr(ti, "reindex"): + transpose = isinstance(ti, ABCSeries) and naxes > 1 + reindexer = axes[naxes - 1] if transpose else items + + term_axis_size = len(ti.axes[axis]) + reindexer_size = len(reindexer) + + ordm = np.log10(max(1, abs(reindexer_size - term_axis_size))) + if ordm >= 1 and reindexer_size >= 10000: + w = ( + f"Alignment difference on axis {axis} is larger " + f"than an order of magnitude on term {repr(terms[i].name)}, " + f"by more than {ordm:.4g}; performance may suffer." + ) + warnings.warn( + w, category=PerformanceWarning, stacklevel=find_stack_level() + ) + + obj = ti.reindex(reindexer, axis=axis, copy=False) + terms[i].update(obj) + + terms[i].update(terms[i].value.values) + + return typ, _zip_axes_from_type(typ, axes) + + +def align_terms(terms): + """ + Align a set of terms. + """ + try: + # flatten the parse tree (a nested list, really) + terms = list(com.flatten(terms)) + except TypeError: + # can't iterate so it must just be a constant or single variable + if isinstance(terms.value, (ABCSeries, ABCDataFrame)): + typ = type(terms.value) + return typ, _zip_axes_from_type(typ, terms.value.axes) + return np.result_type(terms.type), None + + # if all resolved variables are numeric scalars + if all(term.is_scalar for term in terms): + return result_type_many(*(term.value for term in terms)).type, None + + # perform the main alignment + typ, axes = _align_core(terms) + return typ, axes + + +def reconstruct_object(typ, obj, axes, dtype): + """ + Reconstruct an object given its type, raw value, and possibly empty + (None) axes. + + Parameters + ---------- + typ : object + A type + obj : object + The value to use in the type constructor + axes : dict + The axes to use to construct the resulting pandas object + + Returns + ------- + ret : typ + An object of type ``typ`` with the value `obj` and possible axes + `axes`. + """ + try: + typ = typ.type + except AttributeError: + pass + + res_t = np.result_type(obj.dtype, dtype) + + if not isinstance(typ, partial) and issubclass(typ, PandasObject): + return typ(obj, dtype=res_t, **axes) + + # special case for pathological things like ~True/~False + if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_: + ret_value = res_t.type(obj) + else: + ret_value = typ(obj).astype(res_t) + # The condition is to distinguish 0-dim array (returned in case of + # scalar) and 1 element array + # e.g. np.array(0) and np.array([0]) + if ( + len(obj.shape) == 1 + and len(obj) == 1 + and not isinstance(ret_value, np.ndarray) + ): + ret_value = np.array([ret_value]).astype(res_t) + + return ret_value diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/api.py new file mode 100644 index 00000000..bd3be5b3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/api.py @@ -0,0 +1,2 @@ +__all__ = ["eval"] +from pandas.core.computation.eval import eval diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/check.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/check.py new file mode 100644 index 00000000..3221b158 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/check.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from pandas.compat._optional import import_optional_dependency + +ne = import_optional_dependency("numexpr", errors="warn") +NUMEXPR_INSTALLED = ne is not None +if NUMEXPR_INSTALLED: + NUMEXPR_VERSION = ne.__version__ +else: + NUMEXPR_VERSION = None + +__all__ = ["NUMEXPR_INSTALLED", "NUMEXPR_VERSION"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/common.py new file mode 100644 index 00000000..11519182 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/common.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +from functools import reduce + +import numpy as np + +from pandas._config import get_option + + +def ensure_decoded(s) -> str: + """ + If we have bytes, decode them to unicode. + """ + if isinstance(s, (np.bytes_, bytes)): + s = s.decode(get_option("display.encoding")) + return s + + +def result_type_many(*arrays_and_dtypes): + """ + Wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32) + argument limit. + """ + try: + return np.result_type(*arrays_and_dtypes) + except ValueError: + # we have > NPY_MAXARGS terms in our expression + return reduce(np.result_type, arrays_and_dtypes) + except TypeError: + from pandas.core.dtypes.cast import find_common_type + from pandas.core.dtypes.common import is_extension_array_dtype + + arr_and_dtypes = list(arrays_and_dtypes) + ea_dtypes, non_ea_dtypes = [], [] + for arr_or_dtype in arr_and_dtypes: + if is_extension_array_dtype(arr_or_dtype): + ea_dtypes.append(arr_or_dtype) + else: + non_ea_dtypes.append(arr_or_dtype) + + if non_ea_dtypes: + try: + np_dtype = np.result_type(*non_ea_dtypes) + except ValueError: + np_dtype = reduce(np.result_type, arrays_and_dtypes) + return find_common_type(ea_dtypes + [np_dtype]) + + return find_common_type(ea_dtypes) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/engines.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/engines.py new file mode 100644 index 00000000..a3a05a9d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/engines.py @@ -0,0 +1,143 @@ +""" +Engine classes for :func:`~pandas.eval` +""" +from __future__ import annotations + +import abc +from typing import TYPE_CHECKING + +from pandas.errors import NumExprClobberingError + +from pandas.core.computation.align import ( + align_terms, + reconstruct_object, +) +from pandas.core.computation.ops import ( + MATHOPS, + REDUCTIONS, +) + +from pandas.io.formats import printing + +if TYPE_CHECKING: + from pandas.core.computation.expr import Expr + +_ne_builtins = frozenset(MATHOPS + REDUCTIONS) + + +def _check_ne_builtin_clash(expr: Expr) -> None: + """ + Attempt to prevent foot-shooting in a helpful way. + + Parameters + ---------- + expr : Expr + Terms can contain + """ + names = expr.names + overlap = names & _ne_builtins + + if overlap: + s = ", ".join([repr(x) for x in overlap]) + raise NumExprClobberingError( + f'Variables in expression "{expr}" overlap with builtins: ({s})' + ) + + +class AbstractEngine(metaclass=abc.ABCMeta): + """Object serving as a base class for all engines.""" + + has_neg_frac = False + + def __init__(self, expr) -> None: + self.expr = expr + self.aligned_axes = None + self.result_type = None + + def convert(self) -> str: + """ + Convert an expression for evaluation. + + Defaults to return the expression as a string. + """ + return printing.pprint_thing(self.expr) + + def evaluate(self) -> object: + """ + Run the engine on the expression. + + This method performs alignment which is necessary no matter what engine + is being used, thus its implementation is in the base class. + + Returns + ------- + object + The result of the passed expression. + """ + if not self._is_aligned: + self.result_type, self.aligned_axes = align_terms(self.expr.terms) + + # make sure no names in resolvers and locals/globals clash + res = self._evaluate() + return reconstruct_object( + self.result_type, res, self.aligned_axes, self.expr.terms.return_type + ) + + @property + def _is_aligned(self) -> bool: + return self.aligned_axes is not None and self.result_type is not None + + @abc.abstractmethod + def _evaluate(self): + """ + Return an evaluated expression. + + Parameters + ---------- + env : Scope + The local and global environment in which to evaluate an + expression. + + Notes + ----- + Must be implemented by subclasses. + """ + + +class NumExprEngine(AbstractEngine): + """NumExpr engine class""" + + has_neg_frac = True + + def _evaluate(self): + import numexpr as ne + + # convert the expression to a valid numexpr expression + s = self.convert() + + env = self.expr.env + scope = env.full_scope + _check_ne_builtin_clash(self.expr) + return ne.evaluate(s, local_dict=scope) + + +class PythonEngine(AbstractEngine): + """ + Evaluate an expression in Python space. + + Mostly for testing purposes. + """ + + has_neg_frac = False + + def evaluate(self): + return self.expr() + + def _evaluate(self) -> None: + pass + + +ENGINES: dict[str, type[AbstractEngine]] = { + "numexpr": NumExprEngine, + "python": PythonEngine, +} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/eval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/eval.py new file mode 100644 index 00000000..ce0c50a8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/eval.py @@ -0,0 +1,419 @@ +""" +Top level ``eval`` module. +""" +from __future__ import annotations + +import tokenize +from typing import TYPE_CHECKING +import warnings + +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.common import is_extension_array_dtype + +from pandas.core.computation.engines import ENGINES +from pandas.core.computation.expr import ( + PARSERS, + Expr, +) +from pandas.core.computation.parsing import tokenize_string +from pandas.core.computation.scope import ensure_scope +from pandas.core.generic import NDFrame + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas.core.computation.ops import BinOp + + +def _check_engine(engine: str | None) -> str: + """ + Make sure a valid engine is passed. + + Parameters + ---------- + engine : str + String to validate. + + Raises + ------ + KeyError + * If an invalid engine is passed. + ImportError + * If numexpr was requested but doesn't exist. + + Returns + ------- + str + Engine name. + """ + from pandas.core.computation.check import NUMEXPR_INSTALLED + from pandas.core.computation.expressions import USE_NUMEXPR + + if engine is None: + engine = "numexpr" if USE_NUMEXPR else "python" + + if engine not in ENGINES: + valid_engines = list(ENGINES.keys()) + raise KeyError( + f"Invalid engine '{engine}' passed, valid engines are {valid_engines}" + ) + + # TODO: validate this in a more general way (thinking of future engines + # that won't necessarily be import-able) + # Could potentially be done on engine instantiation + if engine == "numexpr" and not NUMEXPR_INSTALLED: + raise ImportError( + "'numexpr' is not installed or an unsupported version. Cannot use " + "engine='numexpr' for query/eval if 'numexpr' is not installed" + ) + + return engine + + +def _check_parser(parser: str): + """ + Make sure a valid parser is passed. + + Parameters + ---------- + parser : str + + Raises + ------ + KeyError + * If an invalid parser is passed + """ + if parser not in PARSERS: + raise KeyError( + f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}" + ) + + +def _check_resolvers(resolvers): + if resolvers is not None: + for resolver in resolvers: + if not hasattr(resolver, "__getitem__"): + name = type(resolver).__name__ + raise TypeError( + f"Resolver of type '{name}' does not " + "implement the __getitem__ method" + ) + + +def _check_expression(expr): + """ + Make sure an expression is not an empty string + + Parameters + ---------- + expr : object + An object that can be converted to a string + + Raises + ------ + ValueError + * If expr is an empty string + """ + if not expr: + raise ValueError("expr cannot be an empty string") + + +def _convert_expression(expr) -> str: + """ + Convert an object to an expression. + + This function converts an object to an expression (a unicode string) and + checks to make sure it isn't empty after conversion. This is used to + convert operators to their string representation for recursive calls to + :func:`~pandas.eval`. + + Parameters + ---------- + expr : object + The object to be converted to a string. + + Returns + ------- + str + The string representation of an object. + + Raises + ------ + ValueError + * If the expression is empty. + """ + s = pprint_thing(expr) + _check_expression(s) + return s + + +def _check_for_locals(expr: str, stack_level: int, parser: str): + at_top_of_stack = stack_level == 0 + not_pandas_parser = parser != "pandas" + + if not_pandas_parser: + msg = "The '@' prefix is only supported by the pandas parser" + elif at_top_of_stack: + msg = ( + "The '@' prefix is not allowed in top-level eval calls.\n" + "please refer to your variables by name without the '@' prefix." + ) + + if at_top_of_stack or not_pandas_parser: + for toknum, tokval in tokenize_string(expr): + if toknum == tokenize.OP and tokval == "@": + raise SyntaxError(msg) + + +def eval( + expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users + parser: str = "pandas", + engine: str | None = None, + local_dict=None, + global_dict=None, + resolvers=(), + level: int = 0, + target=None, + inplace: bool = False, +): + """ + Evaluate a Python expression as a string using various backends. + + The following arithmetic operations are supported: ``+``, ``-``, ``*``, + ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following + boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). + Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, + :keyword:`or`, and :keyword:`not` with the same semantics as the + corresponding bitwise operators. :class:`~pandas.Series` and + :class:`~pandas.DataFrame` objects are supported and behave as they would + with plain ol' Python evaluation. + + Parameters + ---------- + expr : str + The expression to evaluate. This string cannot contain any Python + `statements + `__, + only Python `expressions + `__. + parser : {'pandas', 'python'}, default 'pandas' + The parser to use to construct the syntax tree from the expression. The + default of ``'pandas'`` parses code slightly different than standard + Python. Alternatively, you can parse an expression using the + ``'python'`` parser to retain strict Python semantics. See the + :ref:`enhancing performance ` documentation for + more details. + engine : {'python', 'numexpr'}, default 'numexpr' + + The engine used to evaluate the expression. Supported engines are + + - None : tries to use ``numexpr``, falls back to ``python`` + - ``'numexpr'`` : This default engine evaluates pandas objects using + numexpr for large speed ups in complex expressions with large frames. + - ``'python'`` : Performs operations as if you had ``eval``'d in top + level python. This engine is generally not that useful. + + More backends may be available in the future. + local_dict : dict or None, optional + A dictionary of local variables, taken from locals() by default. + global_dict : dict or None, optional + A dictionary of global variables, taken from globals() by default. + resolvers : list of dict-like or None, optional + A list of objects implementing the ``__getitem__`` special method that + you can use to inject an additional collection of namespaces to use for + variable lookup. For example, this is used in the + :meth:`~DataFrame.query` method to inject the + ``DataFrame.index`` and ``DataFrame.columns`` + variables that refer to their respective :class:`~pandas.DataFrame` + instance attributes. + level : int, optional + The number of prior stack frames to traverse and add to the current + scope. Most users will **not** need to change this parameter. + target : object, optional, default None + This is the target object for assignment. It is used when there is + variable assignment in the expression. If so, then `target` must + support item assignment with string keys, and if a copy is being + returned, it must also support `.copy()`. + inplace : bool, default False + If `target` is provided, and the expression mutates `target`, whether + to modify `target` inplace. Otherwise, return a copy of `target` with + the mutation. + + Returns + ------- + ndarray, numeric scalar, DataFrame, Series, or None + The completion value of evaluating the given code or None if ``inplace=True``. + + Raises + ------ + ValueError + There are many instances where such an error can be raised: + + - `target=None`, but the expression is multiline. + - The expression is multiline, but not all them have item assignment. + An example of such an arrangement is this: + + a = b + 1 + a + 2 + + Here, there are expressions on different lines, making it multiline, + but the last line has no variable assigned to the output of `a + 2`. + - `inplace=True`, but the expression is missing item assignment. + - Item assignment is provided, but the `target` does not support + string item assignment. + - Item assignment is provided and `inplace=False`, but the `target` + does not support the `.copy()` method + + See Also + -------- + DataFrame.query : Evaluates a boolean expression to query the columns + of a frame. + DataFrame.eval : Evaluate a string describing operations on + DataFrame columns. + + Notes + ----- + The ``dtype`` of any objects involved in an arithmetic ``%`` operation are + recursively cast to ``float64``. + + See the :ref:`enhancing performance ` documentation for + more details. + + Examples + -------- + >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]}) + >>> df + animal age + 0 dog 10 + 1 pig 20 + + We can add a new column using ``pd.eval``: + + >>> pd.eval("double_age = df.age * 2", target=df) + animal age double_age + 0 dog 10 20 + 1 pig 20 40 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + + exprs: list[str | BinOp] + if isinstance(expr, str): + _check_expression(expr) + exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""] + else: + # ops.BinOp; for internal compat, not intended to be passed by users + exprs = [expr] + multi_line = len(exprs) > 1 + + if multi_line and target is None: + raise ValueError( + "multi-line expressions are only valid in the " + "context of data, use DataFrame.eval" + ) + engine = _check_engine(engine) + _check_parser(parser) + _check_resolvers(resolvers) + + ret = None + first_expr = True + target_modified = False + + for expr in exprs: + expr = _convert_expression(expr) + _check_for_locals(expr, level, parser) + + # get our (possibly passed-in) scope + env = ensure_scope( + level + 1, + global_dict=global_dict, + local_dict=local_dict, + resolvers=resolvers, + target=target, + ) + + parsed_expr = Expr(expr, engine=engine, parser=parser, env=env) + + if engine == "numexpr" and ( + is_extension_array_dtype(parsed_expr.terms.return_type) + or getattr(parsed_expr.terms, "operand_types", None) is not None + and any( + is_extension_array_dtype(elem) + for elem in parsed_expr.terms.operand_types + ) + ): + warnings.warn( + "Engine has switched to 'python' because numexpr does not support " + "extension array dtypes. Please set your engine to python manually.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + engine = "python" + + # construct the engine and evaluate the parsed expression + eng = ENGINES[engine] + eng_inst = eng(parsed_expr) + ret = eng_inst.evaluate() + + if parsed_expr.assigner is None: + if multi_line: + raise ValueError( + "Multi-line expressions are only valid " + "if all expressions contain an assignment" + ) + if inplace: + raise ValueError("Cannot operate inplace if there is no assignment") + + # assign if needed + assigner = parsed_expr.assigner + if env.target is not None and assigner is not None: + target_modified = True + + # if returning a copy, copy only on the first assignment + if not inplace and first_expr: + try: + target = env.target + if isinstance(target, NDFrame): + target = target.copy(deep=None) + else: + target = target.copy() + except AttributeError as err: + raise ValueError("Cannot return a copy of the target") from err + else: + target = env.target + + # TypeError is most commonly raised (e.g. int, list), but you + # get IndexError if you try to do this assignment on np.ndarray. + # we will ignore numpy warnings here; e.g. if trying + # to use a non-numeric indexer + try: + with warnings.catch_warnings(record=True): + # TODO: Filter the warnings we actually care about here. + if inplace and isinstance(target, NDFrame): + target.loc[:, assigner] = ret + else: + target[ # pyright: ignore[reportGeneralTypeIssues] + assigner + ] = ret + except (TypeError, IndexError) as err: + raise ValueError("Cannot assign expression output to target") from err + + if not resolvers: + resolvers = ({assigner: ret},) + else: + # existing resolver needs updated to handle + # case of mutating existing column in copy + for resolver in resolvers: + if assigner in resolver: + resolver[assigner] = ret + break + else: + resolvers += ({assigner: ret},) + + ret = None + first_expr = False + + # We want to exclude `inplace=None` as being False. + if inplace is False: + return target if target_modified else ret diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expr.py new file mode 100644 index 00000000..2f948567 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expr.py @@ -0,0 +1,839 @@ +""" +:func:`~pandas.eval` parsers. +""" +from __future__ import annotations + +import ast +from functools import ( + partial, + reduce, +) +from keyword import iskeyword +import tokenize +from typing import ( + Callable, + TypeVar, +) + +import numpy as np + +from pandas.errors import UndefinedVariableError + +import pandas.core.common as com +from pandas.core.computation.ops import ( + ARITH_OPS_SYMS, + BOOL_OPS_SYMS, + CMP_OPS_SYMS, + LOCAL_TAG, + MATHOPS, + REDUCTIONS, + UNARY_OPS_SYMS, + BinOp, + Constant, + Div, + FuncNode, + Op, + Term, + UnaryOp, + is_term, +) +from pandas.core.computation.parsing import ( + clean_backtick_quoted_toks, + tokenize_string, +) +from pandas.core.computation.scope import Scope + +from pandas.io.formats import printing + + +def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]: + """ + Rewrite the assignment operator for PyTables expressions that use ``=`` + as a substitute for ``==``. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + """ + toknum, tokval = tok + return toknum, "==" if tokval == "=" else tokval + + +def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]: + """ + Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise + precedence is changed to boolean precedence. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + """ + toknum, tokval = tok + if toknum == tokenize.OP: + if tokval == "&": + return tokenize.NAME, "and" + elif tokval == "|": + return tokenize.NAME, "or" + return toknum, tokval + return toknum, tokval + + +def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]: + """ + Replace local variables with a syntactically valid name. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tuple of int, str + Either the input or token or the replacement values + + Notes + ----- + This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as + ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` + is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. + """ + toknum, tokval = tok + if toknum == tokenize.OP and tokval == "@": + return tokenize.OP, LOCAL_TAG + return toknum, tokval + + +def _compose2(f, g): + """ + Compose 2 callables. + """ + return lambda *args, **kwargs: f(g(*args, **kwargs)) + + +def _compose(*funcs): + """ + Compose 2 or more callables. + """ + assert len(funcs) > 1, "At least 2 callables must be passed to compose" + return reduce(_compose2, funcs) + + +def _preparse( + source: str, + f=_compose( + _replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks + ), +) -> str: + """ + Compose a collection of tokenization functions. + + Parameters + ---------- + source : str + A Python source code string + f : callable + This takes a tuple of (toknum, tokval) as its argument and returns a + tuple with the same structure but possibly different elements. Defaults + to the composition of ``_rewrite_assign``, ``_replace_booleans``, and + ``_replace_locals``. + + Returns + ------- + str + Valid Python source code + + Notes + ----- + The `f` parameter can be any callable that takes *and* returns input of the + form ``(toknum, tokval)``, where ``toknum`` is one of the constants from + the ``tokenize`` module and ``tokval`` is a string. + """ + assert callable(f), "f must be callable" + return tokenize.untokenize(f(x) for x in tokenize_string(source)) + + +def _is_type(t): + """ + Factory for a type checking function of type ``t`` or tuple of types. + """ + return lambda x: isinstance(x.value, t) + + +_is_list = _is_type(list) +_is_str = _is_type(str) + + +# partition all AST nodes +_all_nodes = frozenset( + node + for node in (getattr(ast, name) for name in dir(ast)) + if isinstance(node, type) and issubclass(node, ast.AST) +) + + +def _filter_nodes(superclass, all_nodes=_all_nodes): + """ + Filter out AST nodes that are subclasses of ``superclass``. + """ + node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) + return frozenset(node_names) + + +_all_node_names = frozenset(x.__name__ for x in _all_nodes) +_mod_nodes = _filter_nodes(ast.mod) +_stmt_nodes = _filter_nodes(ast.stmt) +_expr_nodes = _filter_nodes(ast.expr) +_expr_context_nodes = _filter_nodes(ast.expr_context) +_boolop_nodes = _filter_nodes(ast.boolop) +_operator_nodes = _filter_nodes(ast.operator) +_unary_op_nodes = _filter_nodes(ast.unaryop) +_cmp_op_nodes = _filter_nodes(ast.cmpop) +_comprehension_nodes = _filter_nodes(ast.comprehension) +_handler_nodes = _filter_nodes(ast.excepthandler) +_arguments_nodes = _filter_nodes(ast.arguments) +_keyword_nodes = _filter_nodes(ast.keyword) +_alias_nodes = _filter_nodes(ast.alias) + + +# nodes that we don't support directly but are needed for parsing +_hacked_nodes = frozenset(["Assign", "Module", "Expr"]) + + +_unsupported_expr_nodes = frozenset( + [ + "Yield", + "GeneratorExp", + "IfExp", + "DictComp", + "SetComp", + "Repr", + "Lambda", + "Set", + "AST", + "Is", + "IsNot", + ] +) + +# these nodes are low priority or won't ever be supported (e.g., AST) +_unsupported_nodes = ( + _stmt_nodes + | _mod_nodes + | _handler_nodes + | _arguments_nodes + | _keyword_nodes + | _alias_nodes + | _expr_context_nodes + | _unsupported_expr_nodes +) - _hacked_nodes + +# we're adding a different assignment in some cases to be equality comparison +# and we don't want `stmt` and friends in their so get only the class whose +# names are capitalized +_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes +intersection = _unsupported_nodes & _base_supported_nodes +_msg = f"cannot both support and not support {intersection}" +assert not intersection, _msg + + +def _node_not_implemented(node_name: str) -> Callable[..., None]: + """ + Return a function that raises a NotImplementedError with a passed node name. + """ + + def f(self, *args, **kwargs): + raise NotImplementedError(f"'{node_name}' nodes are not implemented") + + return f + + +# should be bound by BaseExprVisitor but that creates a circular dependency: +# _T is used in disallow, but disallow is used to define BaseExprVisitor +# https://github.com/microsoft/pyright/issues/2315 +_T = TypeVar("_T") + + +def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]: + """ + Decorator to disallow certain nodes from parsing. Raises a + NotImplementedError instead. + + Returns + ------- + callable + """ + + def disallowed(cls: type[_T]) -> type[_T]: + # error: "Type[_T]" has no attribute "unsupported_nodes" + cls.unsupported_nodes = () # type: ignore[attr-defined] + for node in nodes: + new_method = _node_not_implemented(node) + name = f"visit_{node}" + # error: "Type[_T]" has no attribute "unsupported_nodes" + cls.unsupported_nodes += (name,) # type: ignore[attr-defined] + setattr(cls, name, new_method) + return cls + + return disallowed + + +def _op_maker(op_class, op_symbol): + """ + Return a function to create an op class with its symbol already passed. + + Returns + ------- + callable + """ + + def f(self, node, *args, **kwargs): + """ + Return a partial function with an Op subclass with an operator already passed. + + Returns + ------- + callable + """ + return partial(op_class, op_symbol, *args, **kwargs) + + return f + + +_op_classes = {"binary": BinOp, "unary": UnaryOp} + + +def add_ops(op_classes): + """ + Decorator to add default implementation of ops. + """ + + def f(cls): + for op_attr_name, op_class in op_classes.items(): + ops = getattr(cls, f"{op_attr_name}_ops") + ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map") + for op in ops: + op_node = ops_map[op] + if op_node is not None: + made_op = _op_maker(op_class, op) + setattr(cls, f"visit_{op_node}", made_op) + return cls + + return f + + +@disallow(_unsupported_nodes) +@add_ops(_op_classes) +class BaseExprVisitor(ast.NodeVisitor): + """ + Custom ast walker. Parsers of other engines should subclass this class + if necessary. + + Parameters + ---------- + env : Scope + engine : str + parser : str + preparser : callable + """ + + const_type: type[Term] = Constant + term_type = Term + + binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS + binary_op_nodes = ( + "Gt", + "Lt", + "GtE", + "LtE", + "Eq", + "NotEq", + "In", + "NotIn", + "BitAnd", + "BitOr", + "And", + "Or", + "Add", + "Sub", + "Mult", + None, + "Pow", + "FloorDiv", + "Mod", + ) + binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes)) + + unary_ops = UNARY_OPS_SYMS + unary_op_nodes = "UAdd", "USub", "Invert", "Not" + unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) + + rewrite_map = { + ast.Eq: ast.In, + ast.NotEq: ast.NotIn, + ast.In: ast.In, + ast.NotIn: ast.NotIn, + } + + unsupported_nodes: tuple[str, ...] + + def __init__(self, env, engine, parser, preparser=_preparse) -> None: + self.env = env + self.engine = engine + self.parser = parser + self.preparser = preparser + self.assigner = None + + def visit(self, node, **kwargs): + if isinstance(node, str): + clean = self.preparser(node) + try: + node = ast.fix_missing_locations(ast.parse(clean)) + except SyntaxError as e: + if any(iskeyword(x) for x in clean.split()): + e.msg = "Python keyword not valid identifier in numexpr query" + raise e + + method = f"visit_{type(node).__name__}" + visitor = getattr(self, method) + return visitor(node, **kwargs) + + def visit_Module(self, node, **kwargs): + if len(node.body) != 1: + raise SyntaxError("only a single expression is allowed") + expr = node.body[0] + return self.visit(expr, **kwargs) + + def visit_Expr(self, node, **kwargs): + return self.visit(node.value, **kwargs) + + def _rewrite_membership_op(self, node, left, right): + # the kind of the operator (is actually an instance) + op_instance = node.op + op_type = type(op_instance) + + # must be two terms and the comparison operator must be ==/!=/in/not in + if is_term(left) and is_term(right) and op_type in self.rewrite_map: + left_list, right_list = map(_is_list, (left, right)) + left_str, right_str = map(_is_str, (left, right)) + + # if there are any strings or lists in the expression + if left_list or right_list or left_str or right_str: + op_instance = self.rewrite_map[op_type]() + + # pop the string variable out of locals and replace it with a list + # of one string, kind of a hack + if right_str: + name = self.env.add_tmp([right.value]) + right = self.term_type(name, self.env) + + if left_str: + name = self.env.add_tmp([left.value]) + left = self.term_type(name, self.env) + + op = self.visit(op_instance) + return op, op_instance, left, right + + def _maybe_transform_eq_ne(self, node, left=None, right=None): + if left is None: + left = self.visit(node.left, side="left") + if right is None: + right = self.visit(node.right, side="right") + op, op_class, left, right = self._rewrite_membership_op(node, left, right) + return op, op_class, left, right + + def _maybe_downcast_constants(self, left, right): + f32 = np.dtype(np.float32) + if ( + left.is_scalar + and hasattr(left, "value") + and not right.is_scalar + and right.return_type == f32 + ): + # right is a float32 array, left is a scalar + name = self.env.add_tmp(np.float32(left.value)) + left = self.term_type(name, self.env) + if ( + right.is_scalar + and hasattr(right, "value") + and not left.is_scalar + and left.return_type == f32 + ): + # left is a float32 array, right is a scalar + name = self.env.add_tmp(np.float32(right.value)) + right = self.term_type(name, self.env) + + return left, right + + def _maybe_eval(self, binop, eval_in_python): + # eval `in` and `not in` (for now) in "partial" python space + # things that can be evaluated in "eval" space will be turned into + # temporary variables. for example, + # [1,2] in a + 2 * b + # in that case a + 2 * b will be evaluated using numexpr, and the "in" + # call will be evaluated using isin (in python space) + return binop.evaluate( + self.env, self.engine, self.parser, self.term_type, eval_in_python + ) + + def _maybe_evaluate_binop( + self, + op, + op_class, + lhs, + rhs, + eval_in_python=("in", "not in"), + maybe_eval_in_python=("==", "!=", "<", ">", "<=", ">="), + ): + res = op(lhs, rhs) + + if res.has_invalid_return_type: + raise TypeError( + f"unsupported operand type(s) for {res.op}: " + f"'{lhs.type}' and '{rhs.type}'" + ) + + if self.engine != "pytables" and ( + res.op in CMP_OPS_SYMS + and getattr(lhs, "is_datetime", False) + or getattr(rhs, "is_datetime", False) + ): + # all date ops must be done in python bc numexpr doesn't work + # well with NaT + return self._maybe_eval(res, self.binary_ops) + + if res.op in eval_in_python: + # "in"/"not in" ops are always evaluated in python + return self._maybe_eval(res, eval_in_python) + elif self.engine != "pytables": + if ( + getattr(lhs, "return_type", None) == object + or getattr(rhs, "return_type", None) == object + ): + # evaluate "==" and "!=" in python if either of our operands + # has an object return type + return self._maybe_eval(res, eval_in_python + maybe_eval_in_python) + return res + + def visit_BinOp(self, node, **kwargs): + op, op_class, left, right = self._maybe_transform_eq_ne(node) + left, right = self._maybe_downcast_constants(left, right) + return self._maybe_evaluate_binop(op, op_class, left, right) + + def visit_Div(self, node, **kwargs): + return lambda lhs, rhs: Div(lhs, rhs) + + def visit_UnaryOp(self, node, **kwargs): + op = self.visit(node.op) + operand = self.visit(node.operand) + return op(operand) + + def visit_Name(self, node, **kwargs): + return self.term_type(node.id, self.env, **kwargs) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_NameConstant(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_Num(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + def visit_Constant(self, node, **kwargs) -> Term: + return self.const_type(node.value, self.env) + + # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min + def visit_Str(self, node, **kwargs): + name = self.env.add_tmp(node.s) + return self.term_type(name, self.env) + + def visit_List(self, node, **kwargs): + name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts]) + return self.term_type(name, self.env) + + visit_Tuple = visit_List + + def visit_Index(self, node, **kwargs): + """df.index[4]""" + return self.visit(node.value) + + def visit_Subscript(self, node, **kwargs): + from pandas import eval as pd_eval + + value = self.visit(node.value) + slobj = self.visit(node.slice) + result = pd_eval( + slobj, local_dict=self.env, engine=self.engine, parser=self.parser + ) + try: + # a Term instance + v = value.value[result] + except AttributeError: + # an Op instance + lhs = pd_eval( + value, local_dict=self.env, engine=self.engine, parser=self.parser + ) + v = lhs[result] + name = self.env.add_tmp(v) + return self.term_type(name, env=self.env) + + def visit_Slice(self, node, **kwargs): + """df.index[slice(4,6)]""" + lower = node.lower + if lower is not None: + lower = self.visit(lower).value + upper = node.upper + if upper is not None: + upper = self.visit(upper).value + step = node.step + if step is not None: + step = self.visit(step).value + + return slice(lower, upper, step) + + def visit_Assign(self, node, **kwargs): + """ + support a single assignment node, like + + c = a + b + + set the assigner at the top level, must be a Name node which + might or might not exist in the resolvers + + """ + if len(node.targets) != 1: + raise SyntaxError("can only assign a single expression") + if not isinstance(node.targets[0], ast.Name): + raise SyntaxError("left hand side of an assignment must be a single name") + if self.env.target is None: + raise ValueError("cannot assign without a target object") + + try: + assigner = self.visit(node.targets[0], **kwargs) + except UndefinedVariableError: + assigner = node.targets[0].id + + self.assigner = getattr(assigner, "name", assigner) + if self.assigner is None: + raise SyntaxError( + "left hand side of an assignment must be a single resolvable name" + ) + + return self.visit(node.value, **kwargs) + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = node.ctx + if isinstance(ctx, ast.Load): + # resolve the value + resolved = self.visit(value).value + try: + v = getattr(resolved, attr) + name = self.env.add_tmp(v) + return self.term_type(name, self.env) + except AttributeError: + # something like datetime.datetime where scope is overridden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + raise + + raise ValueError(f"Invalid Attribute context {type(ctx).__name__}") + + def visit_Call(self, node, side=None, **kwargs): + if isinstance(node.func, ast.Attribute) and node.func.attr != "__call__": + res = self.visit_Attribute(node.func) + elif not isinstance(node.func, ast.Name): + raise TypeError("Only named functions are supported") + else: + try: + res = self.visit(node.func) + except UndefinedVariableError: + # Check if this is a supported function name + try: + res = FuncNode(node.func.id) + except ValueError: + # Raise original error + raise + + if res is None: + # error: "expr" has no attribute "id" + raise ValueError( + f"Invalid function call {node.func.id}" # type: ignore[attr-defined] + ) + if hasattr(res, "value"): + res = res.value + + if isinstance(res, FuncNode): + new_args = [self.visit(arg) for arg in node.args] + + if node.keywords: + raise TypeError( + f'Function "{res.name}" does not support keyword arguments' + ) + + return res(*new_args) + + else: + new_args = [self.visit(arg)(self.env) for arg in node.args] + + for key in node.keywords: + if not isinstance(key, ast.keyword): + # error: "expr" has no attribute "id" + raise ValueError( + "keyword error in function call " # type: ignore[attr-defined] + f"'{node.func.id}'" + ) + + if key.arg: + kwargs[key.arg] = self.visit(key.value)(self.env) + + name = self.env.add_tmp(res(*new_args, **kwargs)) + return self.term_type(name=name, env=self.env) + + def translate_In(self, op): + return op + + def visit_Compare(self, node, **kwargs): + ops = node.ops + comps = node.comparators + + # base case: we have something like a CMP b + if len(comps) == 1: + op = self.translate_In(ops[0]) + binop = ast.BinOp(op=op, left=node.left, right=comps[0]) + return self.visit(binop) + + # recursive case: we have a chained comparison, a CMP b CMP c, etc. + left = node.left + values = [] + for op, comp in zip(ops, comps): + new_node = self.visit( + ast.Compare(comparators=[comp], left=left, ops=[self.translate_In(op)]) + ) + left = comp + values.append(new_node) + return self.visit(ast.BoolOp(op=ast.And(), values=values)) + + def _try_visit_binop(self, bop): + if isinstance(bop, (Op, Term)): + return bop + return self.visit(bop) + + def visit_BoolOp(self, node, **kwargs): + def visitor(x, y): + lhs = self._try_visit_binop(x) + rhs = self._try_visit_binop(y) + + op, op_class, lhs, rhs = self._maybe_transform_eq_ne(node, lhs, rhs) + return self._maybe_evaluate_binop(op, node.op, lhs, rhs) + + operands = node.values + return reduce(visitor, operands) + + +_python_not_supported = frozenset(["Dict", "BoolOp", "In", "NotIn"]) +_numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS) + + +@disallow( + (_unsupported_nodes | _python_not_supported) + - (_boolop_nodes | frozenset(["BoolOp", "Attribute", "In", "NotIn", "Tuple"])) +) +class PandasExprVisitor(BaseExprVisitor): + def __init__( + self, + env, + engine, + parser, + preparser=partial( + _preparse, + f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks), + ), + ) -> None: + super().__init__(env, engine, parser, preparser) + + +@disallow(_unsupported_nodes | _python_not_supported | frozenset(["Not"])) +class PythonExprVisitor(BaseExprVisitor): + def __init__( + self, env, engine, parser, preparser=lambda source, f=None: source + ) -> None: + super().__init__(env, engine, parser, preparser=preparser) + + +class Expr: + """ + Object encapsulating an expression. + + Parameters + ---------- + expr : str + engine : str, optional, default 'numexpr' + parser : str, optional, default 'pandas' + env : Scope, optional, default None + level : int, optional, default 2 + """ + + env: Scope + engine: str + parser: str + + def __init__( + self, + expr, + engine: str = "numexpr", + parser: str = "pandas", + env: Scope | None = None, + level: int = 0, + ) -> None: + self.expr = expr + self.env = env or Scope(level=level + 1) + self.engine = engine + self.parser = parser + self._visitor = PARSERS[parser](self.env, self.engine, self.parser) + self.terms = self.parse() + + @property + def assigner(self): + return getattr(self._visitor, "assigner", None) + + def __call__(self): + return self.terms(self.env) + + def __repr__(self) -> str: + return printing.pprint_thing(self.terms) + + def __len__(self) -> int: + return len(self.expr) + + def parse(self): + """ + Parse an expression. + """ + return self._visitor.visit(self.expr) + + @property + def names(self): + """ + Get the names in an expression. + """ + if is_term(self.terms): + return frozenset([self.terms.name]) + return frozenset(term.name for term in com.flatten(self.terms)) + + +PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expressions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expressions.py new file mode 100644 index 00000000..6219cac4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/expressions.py @@ -0,0 +1,286 @@ +""" +Expressions +----------- + +Offer fast expression evaluation through numexpr + +""" +from __future__ import annotations + +import operator +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas.util._exceptions import find_stack_level + +from pandas.core import roperator +from pandas.core.computation.check import NUMEXPR_INSTALLED + +if NUMEXPR_INSTALLED: + import numexpr as ne + +if TYPE_CHECKING: + from pandas._typing import FuncType + +_TEST_MODE: bool | None = None +_TEST_RESULT: list[bool] = [] +USE_NUMEXPR = NUMEXPR_INSTALLED +_evaluate: FuncType | None = None +_where: FuncType | None = None + +# the set of dtypes that we will allow pass to numexpr +_ALLOWED_DTYPES = { + "evaluate": {"int64", "int32", "float64", "float32", "bool"}, + "where": {"int64", "float64", "bool"}, +} + +# the minimum prod shape that we will use numexpr +_MIN_ELEMENTS = 1_000_000 + + +def set_use_numexpr(v: bool = True) -> None: + # set/unset to use numexpr + global USE_NUMEXPR + if NUMEXPR_INSTALLED: + USE_NUMEXPR = v + + # choose what we are going to do + global _evaluate, _where + + _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard + _where = _where_numexpr if USE_NUMEXPR else _where_standard + + +def set_numexpr_threads(n=None) -> None: + # if we are using numexpr, set the threads to n + # otherwise reset + if NUMEXPR_INSTALLED and USE_NUMEXPR: + if n is None: + n = ne.detect_number_of_cores() + ne.set_num_threads(n) + + +def _evaluate_standard(op, op_str, a, b): + """ + Standard evaluation. + """ + if _TEST_MODE: + _store_test_result(False) + return op(a, b) + + +def _can_use_numexpr(op, op_str, a, b, dtype_check) -> bool: + """return a boolean if we WILL be using numexpr""" + if op_str is not None: + # required min elements (otherwise we are adding overhead) + if a.size > _MIN_ELEMENTS: + # check for dtype compatibility + dtypes: set[str] = set() + for o in [a, b]: + # ndarray and Series Case + if hasattr(o, "dtype"): + dtypes |= {o.dtype.name} + + # allowed are a superset + if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: + return True + + return False + + +def _evaluate_numexpr(op, op_str, a, b): + result = None + + if _can_use_numexpr(op, op_str, a, b, "evaluate"): + is_reversed = op.__name__.strip("_").startswith("r") + if is_reversed: + # we were originally called by a reversed op method + a, b = b, a + + a_value = a + b_value = b + + try: + result = ne.evaluate( + f"a_value {op_str} b_value", + local_dict={"a_value": a_value, "b_value": b_value}, + casting="safe", + ) + except TypeError: + # numexpr raises eg for array ** array with integers + # (https://github.com/pydata/numexpr/issues/379) + pass + except NotImplementedError: + if _bool_arith_fallback(op_str, a, b): + pass + else: + raise + + if is_reversed: + # reverse order to original for fallback + a, b = b, a + + if _TEST_MODE: + _store_test_result(result is not None) + + if result is None: + result = _evaluate_standard(op, op_str, a, b) + + return result + + +_op_str_mapping = { + operator.add: "+", + roperator.radd: "+", + operator.mul: "*", + roperator.rmul: "*", + operator.sub: "-", + roperator.rsub: "-", + operator.truediv: "/", + roperator.rtruediv: "/", + # floordiv not supported by numexpr 2.x + operator.floordiv: None, + roperator.rfloordiv: None, + # we require Python semantics for mod of negative for backwards compatibility + # see https://github.com/pydata/numexpr/issues/365 + # so sticking with unaccelerated for now GH#36552 + operator.mod: None, + roperator.rmod: None, + operator.pow: "**", + roperator.rpow: "**", + operator.eq: "==", + operator.ne: "!=", + operator.le: "<=", + operator.lt: "<", + operator.ge: ">=", + operator.gt: ">", + operator.and_: "&", + roperator.rand_: "&", + operator.or_: "|", + roperator.ror_: "|", + operator.xor: "^", + roperator.rxor: "^", + divmod: None, + roperator.rdivmod: None, +} + + +def _where_standard(cond, a, b): + # Caller is responsible for extracting ndarray if necessary + return np.where(cond, a, b) + + +def _where_numexpr(cond, a, b): + # Caller is responsible for extracting ndarray if necessary + result = None + + if _can_use_numexpr(None, "where", a, b, "where"): + result = ne.evaluate( + "where(cond_value, a_value, b_value)", + local_dict={"cond_value": cond, "a_value": a, "b_value": b}, + casting="safe", + ) + + if result is None: + result = _where_standard(cond, a, b) + + return result + + +# turn myself on +set_use_numexpr(get_option("compute.use_numexpr")) + + +def _has_bool_dtype(x): + try: + return x.dtype == bool + except AttributeError: + return isinstance(x, (bool, np.bool_)) + + +_BOOL_OP_UNSUPPORTED = {"+": "|", "*": "&", "-": "^"} + + +def _bool_arith_fallback(op_str, a, b) -> bool: + """ + Check if we should fallback to the python `_evaluate_standard` in case + of an unsupported operation by numexpr, which is the case for some + boolean ops. + """ + if _has_bool_dtype(a) and _has_bool_dtype(b): + if op_str in _BOOL_OP_UNSUPPORTED: + warnings.warn( + f"evaluating in Python space because the {repr(op_str)} " + "operator is not supported by numexpr for the bool dtype, " + f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.", + stacklevel=find_stack_level(), + ) + return True + return False + + +def evaluate(op, a, b, use_numexpr: bool = True): + """ + Evaluate and return the expression of the op on a and b. + + Parameters + ---------- + op : the actual operand + a : left operand + b : right operand + use_numexpr : bool, default True + Whether to try to use numexpr. + """ + op_str = _op_str_mapping[op] + if op_str is not None: + if use_numexpr: + # error: "None" not callable + return _evaluate(op, op_str, a, b) # type: ignore[misc] + return _evaluate_standard(op, op_str, a, b) + + +def where(cond, a, b, use_numexpr: bool = True): + """ + Evaluate the where condition cond on a and b. + + Parameters + ---------- + cond : np.ndarray[bool] + a : return if cond is True + b : return if cond is False + use_numexpr : bool, default True + Whether to try to use numexpr. + """ + assert _where is not None + return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b) + + +def set_test_mode(v: bool = True) -> None: + """ + Keeps track of whether numexpr was used. + + Stores an additional ``True`` for every successful use of evaluate with + numexpr since the last ``get_test_result``. + """ + global _TEST_MODE, _TEST_RESULT + _TEST_MODE = v + _TEST_RESULT = [] + + +def _store_test_result(used_numexpr: bool) -> None: + if used_numexpr: + _TEST_RESULT.append(used_numexpr) + + +def get_test_result() -> list[bool]: + """ + Get test result and reset test_results. + """ + global _TEST_RESULT + res = _TEST_RESULT + _TEST_RESULT = [] + return res diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/ops.py new file mode 100644 index 00000000..852bfae1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/ops.py @@ -0,0 +1,621 @@ +""" +Operator classes for eval. +""" + +from __future__ import annotations + +from datetime import datetime +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs.tslibs import Timestamp + +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, +) + +import pandas.core.common as com +from pandas.core.computation.common import ( + ensure_decoded, + result_type_many, +) +from pandas.core.computation.scope import DEFAULT_GLOBALS + +from pandas.io.formats.printing import ( + pprint_thing, + pprint_thing_encoded, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Iterator, + ) + +REDUCTIONS = ("sum", "prod", "min", "max") + +_unary_math_ops = ( + "sin", + "cos", + "exp", + "log", + "expm1", + "log1p", + "sqrt", + "sinh", + "cosh", + "tanh", + "arcsin", + "arccos", + "arctan", + "arccosh", + "arcsinh", + "arctanh", + "abs", + "log10", + "floor", + "ceil", +) +_binary_math_ops = ("arctan2",) + +MATHOPS = _unary_math_ops + _binary_math_ops + + +LOCAL_TAG = "__pd_eval_local_" + + +class Term: + def __new__(cls, name, env, side=None, encoding=None): + klass = Constant if not isinstance(name, str) else cls + # error: Argument 2 for "super" not an instance of argument 1 + supr_new = super(Term, klass).__new__ # type: ignore[misc] + return supr_new(klass) + + is_local: bool + + def __init__(self, name, env, side=None, encoding=None) -> None: + # name is a str for Term, but may be something else for subclasses + self._name = name + self.env = env + self.side = side + tname = str(name) + self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS + self._value = self._resolve_name() + self.encoding = encoding + + @property + def local_name(self) -> str: + return self.name.replace(LOCAL_TAG, "") + + def __repr__(self) -> str: + return pprint_thing(self.name) + + def __call__(self, *args, **kwargs): + return self.value + + def evaluate(self, *args, **kwargs) -> Term: + return self + + def _resolve_name(self): + local_name = str(self.local_name) + is_local = self.is_local + if local_name in self.env.scope and isinstance( + self.env.scope[local_name], type + ): + is_local = False + + res = self.env.resolve(local_name, is_local=is_local) + self.update(res) + + if hasattr(res, "ndim") and res.ndim > 2: + raise NotImplementedError( + "N-dimensional objects, where N > 2, are not supported with eval" + ) + return res + + def update(self, value) -> None: + """ + search order for local (i.e., @variable) variables: + + scope, key_variable + [('locals', 'local_name'), + ('globals', 'local_name'), + ('locals', 'key'), + ('globals', 'key')] + """ + key = self.name + + # if it's a variable name (otherwise a constant) + if isinstance(key, str): + self.env.swapkey(self.local_name, key, new_value=value) + + self.value = value + + @property + def is_scalar(self) -> bool: + return is_scalar(self._value) + + @property + def type(self): + try: + # potentially very slow for large, mixed dtype frames + return self._value.values.dtype + except AttributeError: + try: + # ndarray + return self._value.dtype + except AttributeError: + # scalar + return type(self._value) + + return_type = type + + @property + def raw(self) -> str: + return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})" + + @property + def is_datetime(self) -> bool: + try: + t = self.type.type + except AttributeError: + t = self.type + + return issubclass(t, (datetime, np.datetime64)) + + @property + def value(self): + return self._value + + @value.setter + def value(self, new_value) -> None: + self._value = new_value + + @property + def name(self): + return self._name + + @property + def ndim(self) -> int: + return self._value.ndim + + +class Constant(Term): + def _resolve_name(self): + return self._name + + @property + def name(self): + return self.value + + def __repr__(self) -> str: + # in python 2 str() of float + # can truncate shorter than repr() + return repr(self.name) + + +_bool_op_map = {"not": "~", "and": "&", "or": "|"} + + +class Op: + """ + Hold an operator of arbitrary arity. + """ + + op: str + + def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None: + self.op = _bool_op_map.get(op, op) + self.operands = operands + self.encoding = encoding + + def __iter__(self) -> Iterator: + return iter(self.operands) + + def __repr__(self) -> str: + """ + Print a generic n-ary operator and its operands using infix notation. + """ + # recurse over the operands + parened = (f"({pprint_thing(opr)})" for opr in self.operands) + return pprint_thing(f" {self.op} ".join(parened)) + + @property + def return_type(self): + # clobber types to bool if the op is a boolean operator + if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS): + return np.bool_ + return result_type_many(*(term.type for term in com.flatten(self))) + + @property + def has_invalid_return_type(self) -> bool: + types = self.operand_types + obj_dtype_set = frozenset([np.dtype("object")]) + return self.return_type == object and types - obj_dtype_set + + @property + def operand_types(self): + return frozenset(term.type for term in com.flatten(self)) + + @property + def is_scalar(self) -> bool: + return all(operand.is_scalar for operand in self.operands) + + @property + def is_datetime(self) -> bool: + try: + t = self.return_type.type + except AttributeError: + t = self.return_type + + return issubclass(t, (datetime, np.datetime64)) + + +def _in(x, y): + """ + Compute the vectorized membership of ``x in y`` if possible, otherwise + use Python. + """ + try: + return x.isin(y) + except AttributeError: + if is_list_like(x): + try: + return y.isin(x) + except AttributeError: + pass + return x in y + + +def _not_in(x, y): + """ + Compute the vectorized membership of ``x not in y`` if possible, + otherwise use Python. + """ + try: + return ~x.isin(y) + except AttributeError: + if is_list_like(x): + try: + return ~y.isin(x) + except AttributeError: + pass + return x not in y + + +CMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in") +_cmp_ops_funcs = ( + operator.gt, + operator.lt, + operator.ge, + operator.le, + operator.eq, + operator.ne, + _in, + _not_in, +) +_cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs)) + +BOOL_OPS_SYMS = ("&", "|", "and", "or") +_bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_) +_bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs)) + +ARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%") +_arith_ops_funcs = ( + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.pow, + operator.floordiv, + operator.mod, +) +_arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs)) + +SPECIAL_CASE_ARITH_OPS_SYMS = ("**", "//", "%") +_special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod) +_special_case_arith_ops_dict = dict( + zip(SPECIAL_CASE_ARITH_OPS_SYMS, _special_case_arith_ops_funcs) +) + +_binary_ops_dict = {} + +for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict): + _binary_ops_dict.update(d) + + +def _cast_inplace(terms, acceptable_dtypes, dtype) -> None: + """ + Cast an expression inplace. + + Parameters + ---------- + terms : Op + The expression that should cast. + acceptable_dtypes : list of acceptable numpy.dtype + Will not cast if term's dtype in this list. + dtype : str or numpy.dtype + The dtype to cast to. + """ + dt = np.dtype(dtype) + for term in terms: + if term.type in acceptable_dtypes: + continue + + try: + new_value = term.value.astype(dt) + except AttributeError: + new_value = dt.type(term.value) + term.update(new_value) + + +def is_term(obj) -> bool: + return isinstance(obj, Term) + + +class BinOp(Op): + """ + Hold a binary operator and its operands. + + Parameters + ---------- + op : str + lhs : Term or Op + rhs : Term or Op + """ + + def __init__(self, op: str, lhs, rhs) -> None: + super().__init__(op, (lhs, rhs)) + self.lhs = lhs + self.rhs = rhs + + self._disallow_scalar_only_bool_ops() + + self.convert_values() + + try: + self.func = _binary_ops_dict[op] + except KeyError as err: + # has to be made a list for python3 + keys = list(_binary_ops_dict.keys()) + raise ValueError( + f"Invalid binary operator {repr(op)}, valid operators are {keys}" + ) from err + + def __call__(self, env): + """ + Recursively evaluate an expression in Python space. + + Parameters + ---------- + env : Scope + + Returns + ------- + object + The result of an evaluated expression. + """ + # recurse over the left/right nodes + left = self.lhs(env) + right = self.rhs(env) + + return self.func(left, right) + + def evaluate(self, env, engine: str, parser, term_type, eval_in_python): + """ + Evaluate a binary operation *before* being passed to the engine. + + Parameters + ---------- + env : Scope + engine : str + parser : str + term_type : type + eval_in_python : list + + Returns + ------- + term_type + The "pre-evaluated" expression as an instance of ``term_type`` + """ + if engine == "python": + res = self(env) + else: + # recurse over the left/right nodes + + left = self.lhs.evaluate( + env, + engine=engine, + parser=parser, + term_type=term_type, + eval_in_python=eval_in_python, + ) + + right = self.rhs.evaluate( + env, + engine=engine, + parser=parser, + term_type=term_type, + eval_in_python=eval_in_python, + ) + + # base cases + if self.op in eval_in_python: + res = self.func(left.value, right.value) + else: + from pandas.core.computation.eval import eval + + res = eval(self, local_dict=env, engine=engine, parser=parser) + + name = env.add_tmp(res) + return term_type(name, env=env) + + def convert_values(self) -> None: + """ + Convert datetimes to a comparable value in an expression. + """ + + def stringify(value): + encoder: Callable + if self.encoding is not None: + encoder = partial(pprint_thing_encoded, encoding=self.encoding) + else: + encoder = pprint_thing + return encoder(value) + + lhs, rhs = self.lhs, self.rhs + + if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: + v = rhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = Timestamp(ensure_decoded(v)) + if v.tz is not None: + v = v.tz_convert("UTC") + self.rhs.update(v) + + if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: + v = lhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = Timestamp(ensure_decoded(v)) + if v.tz is not None: + v = v.tz_convert("UTC") + self.lhs.update(v) + + def _disallow_scalar_only_bool_ops(self): + rhs = self.rhs + lhs = self.lhs + + # GH#24883 unwrap dtype if necessary to ensure we have a type object + rhs_rt = rhs.return_type + rhs_rt = getattr(rhs_rt, "type", rhs_rt) + lhs_rt = lhs.return_type + lhs_rt = getattr(lhs_rt, "type", lhs_rt) + if ( + (lhs.is_scalar or rhs.is_scalar) + and self.op in _bool_ops_dict + and ( + not ( + issubclass(rhs_rt, (bool, np.bool_)) + and issubclass(lhs_rt, (bool, np.bool_)) + ) + ) + ): + raise NotImplementedError("cannot evaluate scalar only bool ops") + + +def isnumeric(dtype) -> bool: + return issubclass(np.dtype(dtype).type, np.number) + + +class Div(BinOp): + """ + Div operator to special case casting. + + Parameters + ---------- + lhs, rhs : Term or Op + The Terms or Ops in the ``/`` expression. + """ + + def __init__(self, lhs, rhs) -> None: + super().__init__("/", lhs, rhs) + + if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type): + raise TypeError( + f"unsupported operand type(s) for {self.op}: " + f"'{lhs.return_type}' and '{rhs.return_type}'" + ) + + # do not upcast float32s to float64 un-necessarily + acceptable_dtypes = [np.float32, np.float64] + _cast_inplace(com.flatten(self), acceptable_dtypes, np.float64) + + +UNARY_OPS_SYMS = ("+", "-", "~", "not") +_unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert) +_unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs)) + + +class UnaryOp(Op): + """ + Hold a unary operator and its operands. + + Parameters + ---------- + op : str + The token used to represent the operator. + operand : Term or Op + The Term or Op operand to the operator. + + Raises + ------ + ValueError + * If no function associated with the passed operator token is found. + """ + + def __init__(self, op: Literal["+", "-", "~", "not"], operand) -> None: + super().__init__(op, (operand,)) + self.operand = operand + + try: + self.func = _unary_ops_dict[op] + except KeyError as err: + raise ValueError( + f"Invalid unary operator {repr(op)}, " + f"valid operators are {UNARY_OPS_SYMS}" + ) from err + + def __call__(self, env) -> MathCall: + operand = self.operand(env) + # error: Cannot call function of unknown type + return self.func(operand) # type: ignore[operator] + + def __repr__(self) -> str: + return pprint_thing(f"{self.op}({self.operand})") + + @property + def return_type(self) -> np.dtype: + operand = self.operand + if operand.return_type == np.dtype("bool"): + return np.dtype("bool") + if isinstance(operand, Op) and ( + operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict + ): + return np.dtype("bool") + return np.dtype("int") + + +class MathCall(Op): + def __init__(self, func, args) -> None: + super().__init__(func.name, args) + self.func = func + + def __call__(self, env): + # error: "Op" not callable + operands = [op(env) for op in self.operands] # type: ignore[operator] + return self.func.func(*operands) + + def __repr__(self) -> str: + operands = map(str, self.operands) + return pprint_thing(f"{self.op}({','.join(operands)})") + + +class FuncNode: + def __init__(self, name: str) -> None: + if name not in MATHOPS: + raise ValueError(f'"{name}" is not a supported function') + self.name = name + self.func = getattr(np, name) + + def __call__(self, *args): + return MathCall(self, args) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/parsing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/parsing.py new file mode 100644 index 00000000..4cfa0f2b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/parsing.py @@ -0,0 +1,198 @@ +""" +:func:`~pandas.eval` source string parsing functions +""" +from __future__ import annotations + +from io import StringIO +from keyword import iskeyword +import token +import tokenize +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + +# A token value Python's tokenizer probably will never use. +BACKTICK_QUOTED_STRING = 100 + + +def create_valid_python_identifier(name: str) -> str: + """ + Create valid Python identifiers from any string. + + Check if name contains any special characters. If it contains any + special characters, the special characters will be replaced by + a special string and a prefix is added. + + Raises + ------ + SyntaxError + If the returned name is not a Python valid identifier, raise an exception. + This can happen if there is a hashtag in the name, as the tokenizer will + than terminate and not find the backtick. + But also for characters that fall out of the range of (U+0001..U+007F). + """ + if name.isidentifier() and not iskeyword(name): + return name + + # Create a dict with the special characters and their replacement string. + # EXACT_TOKEN_TYPES contains these special characters + # token.tok_name contains a readable description of the replacement string. + special_characters_replacements = { + char: f"_{token.tok_name[tokval]}_" + for char, tokval in (tokenize.EXACT_TOKEN_TYPES.items()) + } + special_characters_replacements.update( + { + " ": "_", + "?": "_QUESTIONMARK_", + "!": "_EXCLAMATIONMARK_", + "$": "_DOLLARSIGN_", + "€": "_EUROSIGN_", + "°": "_DEGREESIGN_", + # Including quotes works, but there are exceptions. + "'": "_SINGLEQUOTE_", + '"': "_DOUBLEQUOTE_", + # Currently not possible. Terminates parser and won't find backtick. + # "#": "_HASH_", + } + ) + + name = "".join([special_characters_replacements.get(char, char) for char in name]) + name = f"BACKTICK_QUOTED_STRING_{name}" + + if not name.isidentifier(): + raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.") + + return name + + +def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]: + """ + Clean up a column name if surrounded by backticks. + + Backtick quoted string are indicated by a certain tokval value. If a string + is a backtick quoted token it will processed by + :func:`_create_valid_python_identifier` so that the parser can find this + string when the query is executed. + In this case the tok will get the NAME tokval. + + Parameters + ---------- + tok : tuple of int, str + ints correspond to the all caps constants in the tokenize module + + Returns + ------- + tok : Tuple[int, str] + Either the input or token or the replacement values + """ + toknum, tokval = tok + if toknum == BACKTICK_QUOTED_STRING: + return tokenize.NAME, create_valid_python_identifier(tokval) + return toknum, tokval + + +def clean_column_name(name: Hashable) -> Hashable: + """ + Function to emulate the cleaning of a backtick quoted name. + + The purpose for this function is to see what happens to the name of + identifier if it goes to the process of being parsed a Python code + inside a backtick quoted string and than being cleaned + (removed of any special characters). + + Parameters + ---------- + name : hashable + Name to be cleaned. + + Returns + ------- + name : hashable + Returns the name after tokenizing and cleaning. + + Notes + ----- + For some cases, a name cannot be converted to a valid Python identifier. + In that case :func:`tokenize_string` raises a SyntaxError. + In that case, we just return the name unmodified. + + If this name was used in the query string (this makes the query call impossible) + an error will be raised by :func:`tokenize_backtick_quoted_string` instead, + which is not caught and propagates to the user level. + """ + try: + tokenized = tokenize_string(f"`{name}`") + tokval = next(tokenized)[1] + return create_valid_python_identifier(tokval) + except SyntaxError: + return name + + +def tokenize_backtick_quoted_string( + token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int +) -> tuple[int, str]: + """ + Creates a token from a backtick quoted string. + + Moves the token_generator forwards till right after the next backtick. + + Parameters + ---------- + token_generator : Iterator[tokenize.TokenInfo] + The generator that yields the tokens of the source string (Tuple[int, str]). + The generator is at the first token after the backtick (`) + + source : str + The Python source code string. + + string_start : int + This is the start of backtick quoted string inside the source string. + + Returns + ------- + tok: Tuple[int, str] + The token that represents the backtick quoted string. + The integer is equal to BACKTICK_QUOTED_STRING (100). + """ + for _, tokval, start, _, _ in token_generator: + if tokval == "`": + string_end = start[1] + break + + return BACKTICK_QUOTED_STRING, source[string_start:string_end] + + +def tokenize_string(source: str) -> Iterator[tuple[int, str]]: + """ + Tokenize a Python source code string. + + Parameters + ---------- + source : str + The Python source code string. + + Returns + ------- + tok_generator : Iterator[Tuple[int, str]] + An iterator yielding all tokens with only toknum and tokval (Tuple[ing, str]). + """ + line_reader = StringIO(source).readline + token_generator = tokenize.generate_tokens(line_reader) + + # Loop over all tokens till a backtick (`) is found. + # Then, take all tokens till the next backtick to form a backtick quoted string + for toknum, tokval, start, _, _ in token_generator: + if tokval == "`": + try: + yield tokenize_backtick_quoted_string( + token_generator, source, string_start=start[1] + 1 + ) + except Exception as err: + raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err + else: + yield toknum, tokval diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/pytables.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/pytables.py new file mode 100644 index 00000000..77d8d795 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/pytables.py @@ -0,0 +1,656 @@ +""" manage PyTables query interface via Expressions """ +from __future__ import annotations + +import ast +from decimal import ( + Decimal, + InvalidOperation, +) +from functools import partial +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs.tslibs import ( + Timedelta, + Timestamp, +) +from pandas.errors import UndefinedVariableError + +from pandas.core.dtypes.common import is_list_like + +import pandas.core.common as com +from pandas.core.computation import ( + expr, + ops, + scope as _scope, +) +from pandas.core.computation.common import ensure_decoded +from pandas.core.computation.expr import BaseExprVisitor +from pandas.core.computation.ops import is_term +from pandas.core.construction import extract_array +from pandas.core.indexes.base import Index + +from pandas.io.formats.printing import ( + pprint_thing, + pprint_thing_encoded, +) + +if TYPE_CHECKING: + from pandas._typing import npt + + +class PyTablesScope(_scope.Scope): + __slots__ = ("queryables",) + + queryables: dict[str, Any] + + def __init__( + self, + level: int, + global_dict=None, + local_dict=None, + queryables: dict[str, Any] | None = None, + ) -> None: + super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict) + self.queryables = queryables or {} + + +class Term(ops.Term): + env: PyTablesScope + + def __new__(cls, name, env, side=None, encoding=None): + if isinstance(name, str): + klass = cls + else: + klass = Constant + return object.__new__(klass) + + def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: + super().__init__(name, env, side=side, encoding=encoding) + + def _resolve_name(self): + # must be a queryables + if self.side == "left": + # Note: The behavior of __new__ ensures that self.name is a str here + if self.name not in self.env.queryables: + raise NameError(f"name {repr(self.name)} is not defined") + return self.name + + # resolve the rhs (and allow it to be None) + try: + return self.env.resolve(self.name, is_local=False) + except UndefinedVariableError: + return self.name + + # read-only property overwriting read/write property + @property # type: ignore[misc] + def value(self): + return self._value + + +class Constant(Term): + def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: + assert isinstance(env, PyTablesScope), type(env) + super().__init__(name, env, side=side, encoding=encoding) + + def _resolve_name(self): + return self._name + + +class BinOp(ops.BinOp): + _max_selectors = 31 + + op: str + queryables: dict[str, Any] + condition: str | None + + def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding) -> None: + super().__init__(op, lhs, rhs) + self.queryables = queryables + self.encoding = encoding + self.condition = None + + def _disallow_scalar_only_bool_ops(self) -> None: + pass + + def prune(self, klass): + def pr(left, right): + """create and return a new specialized BinOp from myself""" + if left is None: + return right + elif right is None: + return left + + k = klass + if isinstance(left, ConditionBinOp): + if isinstance(right, ConditionBinOp): + k = JointConditionBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + elif isinstance(left, FilterBinOp): + if isinstance(right, FilterBinOp): + k = JointFilterBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + return k( + self.op, left, right, queryables=self.queryables, encoding=self.encoding + ).evaluate() + + left, right = self.lhs, self.rhs + + if is_term(left) and is_term(right): + res = pr(left.value, right.value) + elif not is_term(left) and is_term(right): + res = pr(left.prune(klass), right.value) + elif is_term(left) and not is_term(right): + res = pr(left.value, right.prune(klass)) + elif not (is_term(left) or is_term(right)): + res = pr(left.prune(klass), right.prune(klass)) + + return res + + def conform(self, rhs): + """inplace conform rhs""" + if not is_list_like(rhs): + rhs = [rhs] + if isinstance(rhs, np.ndarray): + rhs = rhs.ravel() + return rhs + + @property + def is_valid(self) -> bool: + """return True if this is a valid field""" + return self.lhs in self.queryables + + @property + def is_in_table(self) -> bool: + """ + return True if this is a valid column name for generation (e.g. an + actual column in the table) + """ + return self.queryables.get(self.lhs) is not None + + @property + def kind(self): + """the kind of my field""" + return getattr(self.queryables.get(self.lhs), "kind", None) + + @property + def meta(self): + """the meta of my field""" + return getattr(self.queryables.get(self.lhs), "meta", None) + + @property + def metadata(self): + """the metadata of my field""" + return getattr(self.queryables.get(self.lhs), "metadata", None) + + def generate(self, v) -> str: + """create and return the op string for this TermValue""" + val = v.tostring(self.encoding) + return f"({self.lhs} {self.op} {val})" + + def convert_value(self, v) -> TermValue: + """ + convert the expression that is in the term to something that is + accepted by pytables + """ + + def stringify(value): + if self.encoding is not None: + return pprint_thing_encoded(value, encoding=self.encoding) + return pprint_thing(value) + + kind = ensure_decoded(self.kind) + meta = ensure_decoded(self.meta) + if kind in ("datetime64", "datetime"): + if isinstance(v, (int, float)): + v = stringify(v) + v = ensure_decoded(v) + v = Timestamp(v).as_unit("ns") + if v.tz is not None: + v = v.tz_convert("UTC") + return TermValue(v, v._value, kind) + elif kind in ("timedelta64", "timedelta"): + if isinstance(v, str): + v = Timedelta(v) + else: + v = Timedelta(v, unit="s") + v = v.as_unit("ns")._value + return TermValue(int(v), v, kind) + elif meta == "category": + metadata = extract_array(self.metadata, extract_numpy=True) + result: npt.NDArray[np.intp] | np.intp | int + if v not in metadata: + result = -1 + else: + result = metadata.searchsorted(v, side="left") + return TermValue(result, result, "integer") + elif kind == "integer": + try: + v_dec = Decimal(v) + except InvalidOperation: + # GH 54186 + # convert v to float to raise float's ValueError + float(v) + else: + v = int(v_dec.to_integral_exact(rounding="ROUND_HALF_EVEN")) + return TermValue(v, v, kind) + elif kind == "float": + v = float(v) + return TermValue(v, v, kind) + elif kind == "bool": + if isinstance(v, str): + v = v.strip().lower() not in [ + "false", + "f", + "no", + "n", + "none", + "0", + "[]", + "{}", + "", + ] + else: + v = bool(v) + return TermValue(v, v, kind) + elif isinstance(v, str): + # string quoting + return TermValue(v, stringify(v), "string") + else: + raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column") + + def convert_values(self) -> None: + pass + + +class FilterBinOp(BinOp): + filter: tuple[Any, Any, Index] | None = None + + def __repr__(self) -> str: + if self.filter is None: + return "Filter: Not Initialized" + return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]") + + def invert(self): + """invert the filter""" + if self.filter is not None: + self.filter = ( + self.filter[0], + self.generate_filter_op(invert=True), + self.filter[2], + ) + return self + + def format(self): + """return the actual filter format""" + return [self.filter] + + def evaluate(self): + if not self.is_valid: + raise ValueError(f"query term is not valid [{self}]") + + rhs = self.conform(self.rhs) + values = list(rhs) + + if self.is_in_table: + # if too many values to create the expression, use a filter instead + if self.op in ["==", "!="] and len(values) > self._max_selectors: + filter_op = self.generate_filter_op() + self.filter = (self.lhs, filter_op, Index(values)) + + return self + return None + + # equality conditions + if self.op in ["==", "!="]: + filter_op = self.generate_filter_op() + self.filter = (self.lhs, filter_op, Index(values)) + + else: + raise TypeError( + f"passing a filterable condition to a non-table indexer [{self}]" + ) + + return self + + def generate_filter_op(self, invert: bool = False): + if (self.op == "!=" and not invert) or (self.op == "==" and invert): + return lambda axis, vals: ~axis.isin(vals) + else: + return lambda axis, vals: axis.isin(vals) + + +class JointFilterBinOp(FilterBinOp): + def format(self): + raise NotImplementedError("unable to collapse Joint Filters") + + def evaluate(self): + return self + + +class ConditionBinOp(BinOp): + def __repr__(self) -> str: + return pprint_thing(f"[Condition : [{self.condition}]]") + + def invert(self): + """invert the condition""" + # if self.condition is not None: + # self.condition = "~(%s)" % self.condition + # return self + raise NotImplementedError( + "cannot use an invert condition when passing to numexpr" + ) + + def format(self): + """return the actual ne format""" + return self.condition + + def evaluate(self): + if not self.is_valid: + raise ValueError(f"query term is not valid [{self}]") + + # convert values if we are in the table + if not self.is_in_table: + return None + + rhs = self.conform(self.rhs) + values = [self.convert_value(v) for v in rhs] + + # equality conditions + if self.op in ["==", "!="]: + # too many values to create the expression? + if len(values) <= self._max_selectors: + vs = [self.generate(v) for v in values] + self.condition = f"({' | '.join(vs)})" + + # use a filter after reading + else: + return None + else: + self.condition = self.generate(values[0]) + + return self + + +class JointConditionBinOp(ConditionBinOp): + def evaluate(self): + self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})" + return self + + +class UnaryOp(ops.UnaryOp): + def prune(self, klass): + if self.op != "~": + raise NotImplementedError("UnaryOp only support invert type ops") + + operand = self.operand + operand = operand.prune(klass) + + if operand is not None and ( + issubclass(klass, ConditionBinOp) + and operand.condition is not None + or not issubclass(klass, ConditionBinOp) + and issubclass(klass, FilterBinOp) + and operand.filter is not None + ): + return operand.invert() + return None + + +class PyTablesExprVisitor(BaseExprVisitor): + const_type = Constant + term_type = Term + + def __init__(self, env, engine, parser, **kwargs) -> None: + super().__init__(env, engine, parser) + for bin_op in self.binary_ops: + bin_node = self.binary_op_nodes_map[bin_op] + setattr( + self, + f"visit_{bin_node}", + lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs), + ) + + def visit_UnaryOp(self, node, **kwargs): + if isinstance(node.op, (ast.Not, ast.Invert)): + return UnaryOp("~", self.visit(node.operand)) + elif isinstance(node.op, ast.USub): + return self.const_type(-self.visit(node.operand).value, self.env) + elif isinstance(node.op, ast.UAdd): + raise NotImplementedError("Unary addition not supported") + + def visit_Index(self, node, **kwargs): + return self.visit(node.value).value + + def visit_Assign(self, node, **kwargs): + cmpr = ast.Compare( + ops=[ast.Eq()], left=node.targets[0], comparators=[node.value] + ) + return self.visit(cmpr) + + def visit_Subscript(self, node, **kwargs): + # only allow simple subscripts + + value = self.visit(node.value) + slobj = self.visit(node.slice) + try: + value = value.value + except AttributeError: + pass + + if isinstance(slobj, Term): + # In py39 np.ndarray lookups with Term containing int raise + slobj = slobj.value + + try: + return self.const_type(value[slobj], self.env) + except TypeError as err: + raise ValueError( + f"cannot subscript {repr(value)} with {repr(slobj)}" + ) from err + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = type(node.ctx) + if ctx == ast.Load: + # resolve the value + resolved = self.visit(value) + + # try to get the value to see if we are another expression + try: + resolved = resolved.value + except AttributeError: + pass + + try: + return self.term_type(getattr(resolved, attr), self.env) + except AttributeError: + # something like datetime.datetime where scope is overridden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + + raise ValueError(f"Invalid Attribute context {ctx.__name__}") + + def translate_In(self, op): + return ast.Eq() if isinstance(op, ast.In) else op + + def _rewrite_membership_op(self, node, left, right): + return self.visit(node.op), node.op, left, right + + +def _validate_where(w): + """ + Validate that the where statement is of the right type. + + The type may either be String, Expr, or list-like of Exprs. + + Parameters + ---------- + w : String term expression, Expr, or list-like of Exprs. + + Returns + ------- + where : The original where clause if the check was successful. + + Raises + ------ + TypeError : An invalid data type was passed in for w (e.g. dict). + """ + if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)): + raise TypeError( + "where must be passed as a string, PyTablesExpr, " + "or list-like of PyTablesExpr" + ) + + return w + + +class PyTablesExpr(expr.Expr): + """ + Hold a pytables-like expression, comprised of possibly multiple 'terms'. + + Parameters + ---------- + where : string term expression, PyTablesExpr, or list-like of PyTablesExprs + queryables : a "kinds" map (dict of column name -> kind), or None if column + is non-indexable + encoding : an encoding that will encode the query terms + + Returns + ------- + a PyTablesExpr object + + Examples + -------- + 'index>=date' + "columns=['A', 'D']" + 'columns=A' + 'columns==A' + "~(columns=['A','B'])" + 'index>df.index[3] & string="bar"' + '(index>df.index[3] & index<=df.index[6]) | string="bar"' + "ts>=Timestamp('2012-02-01')" + "major_axis>=20130101" + """ + + _visitor: PyTablesExprVisitor | None + env: PyTablesScope + expr: str + + def __init__( + self, + where, + queryables: dict[str, Any] | None = None, + encoding=None, + scope_level: int = 0, + ) -> None: + where = _validate_where(where) + + self.encoding = encoding + self.condition = None + self.filter = None + self.terms = None + self._visitor = None + + # capture the environment if needed + local_dict: _scope.DeepChainMap[Any, Any] | None = None + + if isinstance(where, PyTablesExpr): + local_dict = where.env.scope + _where = where.expr + + elif is_list_like(where): + where = list(where) + for idx, w in enumerate(where): + if isinstance(w, PyTablesExpr): + local_dict = w.env.scope + else: + where[idx] = _validate_where(w) + _where = " & ".join([f"({w})" for w in com.flatten(where)]) + else: + # _validate_where ensures we otherwise have a string + _where = where + + self.expr = _where + self.env = PyTablesScope(scope_level + 1, local_dict=local_dict) + + if queryables is not None and isinstance(self.expr, str): + self.env.queryables.update(queryables) + self._visitor = PyTablesExprVisitor( + self.env, + queryables=queryables, + parser="pytables", + engine="pytables", + encoding=encoding, + ) + self.terms = self.parse() + + def __repr__(self) -> str: + if self.terms is not None: + return pprint_thing(self.terms) + return pprint_thing(self.expr) + + def evaluate(self): + """create and return the numexpr condition and filter""" + try: + self.condition = self.terms.prune(ConditionBinOp) + except AttributeError as err: + raise ValueError( + f"cannot process expression [{self.expr}], [{self}] " + "is not a valid condition" + ) from err + try: + self.filter = self.terms.prune(FilterBinOp) + except AttributeError as err: + raise ValueError( + f"cannot process expression [{self.expr}], [{self}] " + "is not a valid filter" + ) from err + + return self.condition, self.filter + + +class TermValue: + """hold a term value the we use to construct a condition/filter""" + + def __init__(self, value, converted, kind: str) -> None: + assert isinstance(kind, str), kind + self.value = value + self.converted = converted + self.kind = kind + + def tostring(self, encoding) -> str: + """quote the string if not encoded else encode and return""" + if self.kind == "string": + if encoding is not None: + return str(self.converted) + return f'"{self.converted}"' + elif self.kind == "float": + # python 2 str(float) is not always + # round-trippable so use repr() + return repr(self.converted) + return str(self.converted) + + +def maybe_expression(s) -> bool: + """loose checking if s is a pytables-acceptable expression""" + if not isinstance(s, str): + return False + operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",) + + # make sure we have an op at least + return any(op in s for op in operations) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/scope.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/scope.py new file mode 100644 index 00000000..7e553ca4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/computation/scope.py @@ -0,0 +1,355 @@ +""" +Module for scope operations +""" +from __future__ import annotations + +from collections import ChainMap +import datetime +import inspect +from io import StringIO +import itertools +import pprint +import struct +import sys +from typing import TypeVar + +import numpy as np + +from pandas._libs.tslibs import Timestamp +from pandas.errors import UndefinedVariableError + +_KT = TypeVar("_KT") +_VT = TypeVar("_VT") + + +# https://docs.python.org/3/library/collections.html#chainmap-examples-and-recipes +class DeepChainMap(ChainMap[_KT, _VT]): + """ + Variant of ChainMap that allows direct updates to inner scopes. + + Only works when all passed mapping are mutable. + """ + + def __setitem__(self, key: _KT, value: _VT) -> None: + for mapping in self.maps: + if key in mapping: + mapping[key] = value + return + self.maps[0][key] = value + + def __delitem__(self, key: _KT) -> None: + """ + Raises + ------ + KeyError + If `key` doesn't exist. + """ + for mapping in self.maps: + if key in mapping: + del mapping[key] + return + raise KeyError(key) + + +def ensure_scope( + level: int, global_dict=None, local_dict=None, resolvers=(), target=None +) -> Scope: + """Ensure that we are grabbing the correct scope.""" + return Scope( + level + 1, + global_dict=global_dict, + local_dict=local_dict, + resolvers=resolvers, + target=target, + ) + + +def _replacer(x) -> str: + """ + Replace a number with its hexadecimal representation. Used to tag + temporary variables with their calling scope's id. + """ + # get the hex repr of the binary char and remove 0x and pad by pad_size + # zeros + try: + hexin = ord(x) + except TypeError: + # bytes literals masquerade as ints when iterating in py3 + hexin = x + + return hex(hexin) + + +def _raw_hex_id(obj) -> str: + """Return the padded hexadecimal id of ``obj``.""" + # interpret as a pointer since that's what really what id returns + packed = struct.pack("@P", id(obj)) + return "".join([_replacer(x) for x in packed]) + + +DEFAULT_GLOBALS = { + "Timestamp": Timestamp, + "datetime": datetime.datetime, + "True": True, + "False": False, + "list": list, + "tuple": tuple, + "inf": np.inf, + "Inf": np.inf, +} + + +def _get_pretty_string(obj) -> str: + """ + Return a prettier version of obj. + + Parameters + ---------- + obj : object + Object to pretty print + + Returns + ------- + str + Pretty print object repr + """ + sio = StringIO() + pprint.pprint(obj, stream=sio) + return sio.getvalue() + + +class Scope: + """ + Object to hold scope, with a few bells to deal with some custom syntax + and contexts added by pandas. + + Parameters + ---------- + level : int + global_dict : dict or None, optional, default None + local_dict : dict or Scope or None, optional, default None + resolvers : list-like or None, optional, default None + target : object + + Attributes + ---------- + level : int + scope : DeepChainMap + target : object + temps : dict + """ + + __slots__ = ["level", "scope", "target", "resolvers", "temps"] + level: int + scope: DeepChainMap + resolvers: DeepChainMap + temps: dict + + def __init__( + self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None + ) -> None: + self.level = level + 1 + + # shallow copy because we don't want to keep filling this up with what + # was there before if there are multiple calls to Scope/_ensure_scope + self.scope = DeepChainMap(DEFAULT_GLOBALS.copy()) + self.target = target + + if isinstance(local_dict, Scope): + self.scope.update(local_dict.scope) + if local_dict.target is not None: + self.target = local_dict.target + self._update(local_dict.level) + + frame = sys._getframe(self.level) + + try: + # shallow copy here because we don't want to replace what's in + # scope when we align terms (alignment accesses the underlying + # numpy array of pandas objects) + scope_global = self.scope.new_child( + (global_dict if global_dict is not None else frame.f_globals).copy() + ) + self.scope = DeepChainMap(scope_global) + if not isinstance(local_dict, Scope): + scope_local = self.scope.new_child( + (local_dict if local_dict is not None else frame.f_locals).copy() + ) + self.scope = DeepChainMap(scope_local) + finally: + del frame + + # assumes that resolvers are going from outermost scope to inner + if isinstance(local_dict, Scope): + resolvers += tuple(local_dict.resolvers.maps) + self.resolvers = DeepChainMap(*resolvers) + self.temps = {} + + def __repr__(self) -> str: + scope_keys = _get_pretty_string(list(self.scope.keys())) + res_keys = _get_pretty_string(list(self.resolvers.keys())) + return f"{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})" + + @property + def has_resolvers(self) -> bool: + """ + Return whether we have any extra scope. + + For example, DataFrames pass Their columns as resolvers during calls to + ``DataFrame.eval()`` and ``DataFrame.query()``. + + Returns + ------- + hr : bool + """ + return bool(len(self.resolvers)) + + def resolve(self, key: str, is_local: bool): + """ + Resolve a variable name in a possibly local context. + + Parameters + ---------- + key : str + A variable name + is_local : bool + Flag indicating whether the variable is local or not (prefixed with + the '@' symbol) + + Returns + ------- + value : object + The value of a particular variable + """ + try: + # only look for locals in outer scope + if is_local: + return self.scope[key] + + # not a local variable so check in resolvers if we have them + if self.has_resolvers: + return self.resolvers[key] + + # if we're here that means that we have no locals and we also have + # no resolvers + assert not is_local and not self.has_resolvers + return self.scope[key] + except KeyError: + try: + # last ditch effort we look in temporaries + # these are created when parsing indexing expressions + # e.g., df[df > 0] + return self.temps[key] + except KeyError as err: + raise UndefinedVariableError(key, is_local) from err + + def swapkey(self, old_key: str, new_key: str, new_value=None) -> None: + """ + Replace a variable name, with a potentially new value. + + Parameters + ---------- + old_key : str + Current variable name to replace + new_key : str + New variable name to replace `old_key` with + new_value : object + Value to be replaced along with the possible renaming + """ + if self.has_resolvers: + maps = self.resolvers.maps + self.scope.maps + else: + maps = self.scope.maps + + maps.append(self.temps) + + for mapping in maps: + if old_key in mapping: + mapping[new_key] = new_value + return + + def _get_vars(self, stack, scopes: list[str]) -> None: + """ + Get specifically scoped variables from a list of stack frames. + + Parameters + ---------- + stack : list + A list of stack frames as returned by ``inspect.stack()`` + scopes : sequence of strings + A sequence containing valid stack frame attribute names that + evaluate to a dictionary. For example, ('locals', 'globals') + """ + variables = itertools.product(scopes, stack) + for scope, (frame, _, _, _, _, _) in variables: + try: + d = getattr(frame, f"f_{scope}") + self.scope = DeepChainMap(self.scope.new_child(d)) + finally: + # won't remove it, but DECREF it + # in Py3 this probably isn't necessary since frame won't be + # scope after the loop + del frame + + def _update(self, level: int) -> None: + """ + Update the current scope by going back `level` levels. + + Parameters + ---------- + level : int + """ + sl = level + 1 + + # add sl frames to the scope starting with the + # most distant and overwriting with more current + # makes sure that we can capture variable scope + stack = inspect.stack() + + try: + self._get_vars(stack[:sl], scopes=["locals"]) + finally: + del stack[:], stack + + def add_tmp(self, value) -> str: + """ + Add a temporary variable to the scope. + + Parameters + ---------- + value : object + An arbitrary object to be assigned to a temporary variable. + + Returns + ------- + str + The name of the temporary variable created. + """ + name = f"{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}" + + # add to inner most scope + assert name not in self.temps + self.temps[name] = value + assert name in self.temps + + # only increment if the variable gets put in the scope + return name + + @property + def ntemps(self) -> int: + """The number of temporary variables in this scope""" + return len(self.temps) + + @property + def full_scope(self) -> DeepChainMap: + """ + Return the full scope for use with passing to engines transparently + as a mapping. + + Returns + ------- + vars : DeepChainMap + All variables in this scope. + """ + maps = [self.temps] + self.resolvers.maps + self.scope.maps + return DeepChainMap(*maps) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/config_init.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/config_init.py new file mode 100644 index 00000000..765b24fb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/config_init.py @@ -0,0 +1,903 @@ +""" +This module is imported from the pandas package __init__.py file +in order to ensure that the core.config options registered here will +be available as soon as the user loads the package. if register_option +is invoked inside specific modules, they will not be registered until that +module is imported, which may or may not be a problem. + +If you need to make sure options are available even before a certain +module is imported, register them here rather than in the module. + +""" +from __future__ import annotations + +import os +from typing import Callable + +import pandas._config.config as cf +from pandas._config.config import ( + is_bool, + is_callable, + is_instance_factory, + is_int, + is_nonnegative_int, + is_one_of_factory, + is_str, + is_text, +) + +# compute + +use_bottleneck_doc = """ +: bool + Use the bottleneck library to accelerate if it is installed, + the default is True + Valid values: False,True +""" + + +def use_bottleneck_cb(key) -> None: + from pandas.core import nanops + + nanops.set_use_bottleneck(cf.get_option(key)) + + +use_numexpr_doc = """ +: bool + Use the numexpr library to accelerate computation if it is installed, + the default is True + Valid values: False,True +""" + + +def use_numexpr_cb(key) -> None: + from pandas.core.computation import expressions + + expressions.set_use_numexpr(cf.get_option(key)) + + +use_numba_doc = """ +: bool + Use the numba engine option for select operations if it is installed, + the default is False + Valid values: False,True +""" + + +def use_numba_cb(key) -> None: + from pandas.core.util import numba_ + + numba_.set_use_numba(cf.get_option(key)) + + +with cf.config_prefix("compute"): + cf.register_option( + "use_bottleneck", + True, + use_bottleneck_doc, + validator=is_bool, + cb=use_bottleneck_cb, + ) + cf.register_option( + "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb + ) + cf.register_option( + "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb + ) +# +# options from the "display" namespace + +pc_precision_doc = """ +: int + Floating point output precision in terms of number of places after the + decimal, for regular formatting as well as scientific notation. Similar + to ``precision`` in :meth:`numpy.set_printoptions`. +""" + +pc_colspace_doc = """ +: int + Default space for DataFrame columns. +""" + +pc_max_rows_doc = """ +: int + If max_rows is exceeded, switch to truncate view. Depending on + `large_repr`, objects are either centrally truncated or printed as + a summary view. 'None' value means unlimited. + + In case python/IPython is running in a terminal and `large_repr` + equals 'truncate' this can be set to 0 and pandas will auto-detect + the height of the terminal and print a truncated object which fits + the screen height. The IPython notebook, IPython qtconsole, or + IDLE do not run in a terminal and hence it is not possible to do + correct auto-detection. +""" + +pc_min_rows_doc = """ +: int + The numbers of rows to show in a truncated view (when `max_rows` is + exceeded). Ignored when `max_rows` is set to None or 0. When set to + None, follows the value of `max_rows`. +""" + +pc_max_cols_doc = """ +: int + If max_cols is exceeded, switch to truncate view. Depending on + `large_repr`, objects are either centrally truncated or printed as + a summary view. 'None' value means unlimited. + + In case python/IPython is running in a terminal and `large_repr` + equals 'truncate' this can be set to 0 or None and pandas will auto-detect + the width of the terminal and print a truncated object which fits + the screen width. The IPython notebook, IPython qtconsole, or IDLE + do not run in a terminal and hence it is not possible to do + correct auto-detection and defaults to 20. +""" + +pc_max_categories_doc = """ +: int + This sets the maximum number of categories pandas should output when + printing out a `Categorical` or a Series of dtype "category". +""" + +pc_max_info_cols_doc = """ +: int + max_info_columns is used in DataFrame.info method to decide if + per column information will be printed. +""" + +pc_nb_repr_h_doc = """ +: boolean + When True, IPython notebook will use html representation for + pandas objects (if it is available). +""" + +pc_pprint_nest_depth = """ +: int + Controls the number of nested levels to process when pretty-printing +""" + +pc_multi_sparse_doc = """ +: boolean + "sparsify" MultiIndex display (don't display repeated + elements in outer levels within groups) +""" + +float_format_doc = """ +: callable + The callable should accept a floating point number and return + a string with the desired format of the number. This is used + in some places like SeriesFormatter. + See formats.format.EngFormatter for an example. +""" + +max_colwidth_doc = """ +: int or None + The maximum width in characters of a column in the repr of + a pandas data structure. When the column overflows, a "..." + placeholder is embedded in the output. A 'None' value means unlimited. +""" + +colheader_justify_doc = """ +: 'left'/'right' + Controls the justification of column headers. used by DataFrameFormatter. +""" + +pc_expand_repr_doc = """ +: boolean + Whether to print out the full DataFrame repr for wide DataFrames across + multiple lines, `max_columns` is still respected, but the output will + wrap-around across multiple "pages" if its width exceeds `display.width`. +""" + +pc_show_dimensions_doc = """ +: boolean or 'truncate' + Whether to print out dimensions at the end of DataFrame repr. + If 'truncate' is specified, only print out the dimensions if the + frame is truncated (e.g. not display all rows and/or columns) +""" + +pc_east_asian_width_doc = """ +: boolean + Whether to use the Unicode East Asian Width to calculate the display text + width. + Enabling this may affect to the performance (default: False) +""" + +pc_ambiguous_as_wide_doc = """ +: boolean + Whether to handle Unicode characters belong to Ambiguous as Wide (width=2) + (default: False) +""" + +pc_table_schema_doc = """ +: boolean + Whether to publish a Table Schema representation for frontends + that support it. + (default: False) +""" + +pc_html_border_doc = """ +: int + A ``border=value`` attribute is inserted in the ```` tag + for the DataFrame HTML repr. +""" + +pc_html_use_mathjax_doc = """\ +: boolean + When True, Jupyter notebook will process table contents using MathJax, + rendering mathematical expressions enclosed by the dollar symbol. + (default: True) +""" + +pc_max_dir_items = """\ +: int + The number of items that will be added to `dir(...)`. 'None' value means + unlimited. Because dir is cached, changing this option will not immediately + affect already existing dataframes until a column is deleted or added. + + This is for instance used to suggest columns from a dataframe to tab + completion. +""" + +pc_width_doc = """ +: int + Width of the display in characters. In case python/IPython is running in + a terminal this can be set to None and pandas will correctly auto-detect + the width. + Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a + terminal and hence it is not possible to correctly detect the width. +""" + +pc_chop_threshold_doc = """ +: float or None + if set to a float value, all float values smaller than the given threshold + will be displayed as exactly 0 by repr and friends. +""" + +pc_max_seq_items = """ +: int or None + When pretty-printing a long sequence, no more then `max_seq_items` + will be printed. If items are omitted, they will be denoted by the + addition of "..." to the resulting string. + + If set to None, the number of items to be printed is unlimited. +""" + +pc_max_info_rows_doc = """ +: int or None + df.info() will usually show null-counts for each column. + For large frames this can be quite slow. max_info_rows and max_info_cols + limit this null check only to frames with smaller dimensions than + specified. +""" + +pc_large_repr_doc = """ +: 'truncate'/'info' + For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can + show a truncated table, or switch to the view from + df.info() (the behaviour in earlier versions of pandas). +""" + +pc_memory_usage_doc = """ +: bool, string or None + This specifies if the memory usage of a DataFrame should be displayed when + df.info() is called. Valid values True,False,'deep' +""" + + +def table_schema_cb(key) -> None: + from pandas.io.formats.printing import enable_data_resource_formatter + + enable_data_resource_formatter(cf.get_option(key)) + + +def is_terminal() -> bool: + """ + Detect if Python is running in a terminal. + + Returns True if Python is running in a terminal or False if not. + """ + try: + # error: Name 'get_ipython' is not defined + ip = get_ipython() # type: ignore[name-defined] + except NameError: # assume standard Python interpreter in a terminal + return True + else: + if hasattr(ip, "kernel"): # IPython as a Jupyter kernel + return False + else: # IPython in a terminal + return True + + +with cf.config_prefix("display"): + cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int) + cf.register_option( + "float_format", + None, + float_format_doc, + validator=is_one_of_factory([None, is_callable]), + ) + cf.register_option( + "max_info_rows", + 1690785, + pc_max_info_rows_doc, + validator=is_instance_factory((int, type(None))), + ) + cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int) + cf.register_option( + "min_rows", + 10, + pc_min_rows_doc, + validator=is_instance_factory([type(None), int]), + ) + cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int) + + cf.register_option( + "max_colwidth", + 50, + max_colwidth_doc, + validator=is_nonnegative_int, + ) + if is_terminal(): + max_cols = 0 # automatically determine optimal number of columns + else: + max_cols = 20 # cannot determine optimal number of columns + cf.register_option( + "max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int + ) + cf.register_option( + "large_repr", + "truncate", + pc_large_repr_doc, + validator=is_one_of_factory(["truncate", "info"]), + ) + cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int) + cf.register_option( + "colheader_justify", "right", colheader_justify_doc, validator=is_text + ) + cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool) + cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int) + cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool) + cf.register_option("expand_frame_repr", True, pc_expand_repr_doc) + cf.register_option( + "show_dimensions", + "truncate", + pc_show_dimensions_doc, + validator=is_one_of_factory([True, False, "truncate"]), + ) + cf.register_option("chop_threshold", None, pc_chop_threshold_doc) + cf.register_option("max_seq_items", 100, pc_max_seq_items) + cf.register_option( + "width", 80, pc_width_doc, validator=is_instance_factory([type(None), int]) + ) + cf.register_option( + "memory_usage", + True, + pc_memory_usage_doc, + validator=is_one_of_factory([None, True, False, "deep"]), + ) + cf.register_option( + "unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool + ) + cf.register_option( + "unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool + ) + cf.register_option( + "html.table_schema", + False, + pc_table_schema_doc, + validator=is_bool, + cb=table_schema_cb, + ) + cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int) + cf.register_option( + "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool + ) + cf.register_option( + "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int + ) + +tc_sim_interactive_doc = """ +: boolean + Whether to simulate interactive mode for purposes of testing +""" + +with cf.config_prefix("mode"): + cf.register_option("sim_interactive", False, tc_sim_interactive_doc) + +use_inf_as_na_doc = """ +: boolean + True means treat None, NaN, INF, -INF as NA (old way), + False means None and NaN are null, but INF, -INF are not NA + (new way). + + This option is deprecated in pandas 2.1.0 and will be removed in 3.0. +""" + +# We don't want to start importing everything at the global context level +# or we'll hit circular deps. + + +def use_inf_as_na_cb(key) -> None: + from pandas.core.dtypes.missing import _use_inf_as_na + + _use_inf_as_na(key) + + +with cf.config_prefix("mode"): + cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb) + +cf.deprecate_option( + # GH#51684 + "mode.use_inf_as_na", + "use_inf_as_na option is deprecated and will be removed in a future " + "version. Convert inf values to NaN before operating instead.", +) + +data_manager_doc = """ +: string + Internal data manager type; can be "block" or "array". Defaults to "block", + unless overridden by the 'PANDAS_DATA_MANAGER' environment variable (needs + to be set before pandas is imported). +""" + + +with cf.config_prefix("mode"): + cf.register_option( + "data_manager", + # Get the default from an environment variable, if set, otherwise defaults + # to "block". This environment variable can be set for testing. + os.environ.get("PANDAS_DATA_MANAGER", "block"), + data_manager_doc, + validator=is_one_of_factory(["block", "array"]), + ) + + +# TODO better name? +copy_on_write_doc = """ +: bool + Use new copy-view behaviour using Copy-on-Write. Defaults to False, + unless overridden by the 'PANDAS_COPY_ON_WRITE' environment variable + (if set to "1" for True, needs to be set before pandas is imported). +""" + + +with cf.config_prefix("mode"): + cf.register_option( + "copy_on_write", + # Get the default from an environment variable, if set, otherwise defaults + # to False. This environment variable can be set for testing. + os.environ.get("PANDAS_COPY_ON_WRITE", "0") == "1", + copy_on_write_doc, + validator=is_bool, + ) + + +# user warnings +chained_assignment = """ +: string + Raise an exception, warn, or no action if trying to use chained assignment, + The default is warn +""" + +with cf.config_prefix("mode"): + cf.register_option( + "chained_assignment", + "warn", + chained_assignment, + validator=is_one_of_factory([None, "warn", "raise"]), + ) + + +string_storage_doc = """ +: string + The default storage for StringDtype. This option is ignored if + ``future.infer_string`` is set to True. +""" + +with cf.config_prefix("mode"): + cf.register_option( + "string_storage", + "python", + string_storage_doc, + validator=is_one_of_factory(["python", "pyarrow", "pyarrow_numpy"]), + ) + + +# Set up the io.excel specific reader configuration. +reader_engine_doc = """ +: string + The default Excel reader engine for '{ext}' files. Available options: + auto, {others}. +""" + +_xls_options = ["xlrd"] +_xlsm_options = ["xlrd", "openpyxl"] +_xlsx_options = ["xlrd", "openpyxl"] +_ods_options = ["odf"] +_xlsb_options = ["pyxlsb"] + + +with cf.config_prefix("io.excel.xls"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)), + validator=is_one_of_factory(_xls_options + ["auto"]), + ) + +with cf.config_prefix("io.excel.xlsm"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), + validator=is_one_of_factory(_xlsm_options + ["auto"]), + ) + + +with cf.config_prefix("io.excel.xlsx"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), + validator=is_one_of_factory(_xlsx_options + ["auto"]), + ) + + +with cf.config_prefix("io.excel.ods"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)), + validator=is_one_of_factory(_ods_options + ["auto"]), + ) + +with cf.config_prefix("io.excel.xlsb"): + cf.register_option( + "reader", + "auto", + reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)), + validator=is_one_of_factory(_xlsb_options + ["auto"]), + ) + +# Set up the io.excel specific writer configuration. +writer_engine_doc = """ +: string + The default Excel writer engine for '{ext}' files. Available options: + auto, {others}. +""" + +_xlsm_options = ["openpyxl"] +_xlsx_options = ["openpyxl", "xlsxwriter"] +_ods_options = ["odf"] + + +with cf.config_prefix("io.excel.xlsm"): + cf.register_option( + "writer", + "auto", + writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)), + validator=str, + ) + + +with cf.config_prefix("io.excel.xlsx"): + cf.register_option( + "writer", + "auto", + writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)), + validator=str, + ) + + +with cf.config_prefix("io.excel.ods"): + cf.register_option( + "writer", + "auto", + writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)), + validator=str, + ) + + +# Set up the io.parquet specific configuration. +parquet_engine_doc = """ +: string + The default parquet reader/writer engine. Available options: + 'auto', 'pyarrow', 'fastparquet', the default is 'auto' +""" + +with cf.config_prefix("io.parquet"): + cf.register_option( + "engine", + "auto", + parquet_engine_doc, + validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]), + ) + + +# Set up the io.sql specific configuration. +sql_engine_doc = """ +: string + The default sql reader/writer engine. Available options: + 'auto', 'sqlalchemy', the default is 'auto' +""" + +with cf.config_prefix("io.sql"): + cf.register_option( + "engine", + "auto", + sql_engine_doc, + validator=is_one_of_factory(["auto", "sqlalchemy"]), + ) + +# -------- +# Plotting +# --------- + +plotting_backend_doc = """ +: str + The plotting backend to use. The default value is "matplotlib", the + backend provided with pandas. Other backends can be specified by + providing the name of the module that implements the backend. +""" + + +def register_plotting_backend_cb(key) -> None: + if key == "matplotlib": + # We defer matplotlib validation, since it's the default + return + from pandas.plotting._core import _get_plot_backend + + _get_plot_backend(key) + + +with cf.config_prefix("plotting"): + cf.register_option( + "backend", + defval="matplotlib", + doc=plotting_backend_doc, + validator=register_plotting_backend_cb, + ) + + +register_converter_doc = """ +: bool or 'auto'. + Whether to register converters with matplotlib's units registry for + dates, times, datetimes, and Periods. Toggling to False will remove + the converters, restoring any converters that pandas overwrote. +""" + + +def register_converter_cb(key) -> None: + from pandas.plotting import ( + deregister_matplotlib_converters, + register_matplotlib_converters, + ) + + if cf.get_option(key): + register_matplotlib_converters() + else: + deregister_matplotlib_converters() + + +with cf.config_prefix("plotting.matplotlib"): + cf.register_option( + "register_converters", + "auto", + register_converter_doc, + validator=is_one_of_factory(["auto", True, False]), + cb=register_converter_cb, + ) + +# ------ +# Styler +# ------ + +styler_sparse_index_doc = """ +: bool + Whether to sparsify the display of a hierarchical index. Setting to False will + display each explicit level element in a hierarchical key for each row. +""" + +styler_sparse_columns_doc = """ +: bool + Whether to sparsify the display of hierarchical columns. Setting to False will + display each explicit level element in a hierarchical key for each column. +""" + +styler_render_repr = """ +: str + Determine which output to use in Jupyter Notebook in {"html", "latex"}. +""" + +styler_max_elements = """ +: int + The maximum number of data-cell (", indent) + + if self.fmt.header: + self._write_col_header(indent + self.indent_delta) + + if self.show_row_idx_names: + self._write_row_header(indent + self.indent_delta) + + self.write("", indent) + + def _get_formatted_values(self) -> dict[int, list[str]]: + with option_context("display.max_colwidth", None): + fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)} + return fmt_values + + def _write_body(self, indent: int) -> None: + self.write("", indent) + fmt_values = self._get_formatted_values() + + # write values + if self.fmt.index and isinstance(self.frame.index, MultiIndex): + self._write_hierarchical_rows(fmt_values, indent + self.indent_delta) + else: + self._write_regular_rows(fmt_values, indent + self.indent_delta) + + self.write("", indent) + + def _write_regular_rows( + self, fmt_values: Mapping[int, list[str]], indent: int + ) -> None: + is_truncated_horizontally = self.fmt.is_truncated_horizontally + is_truncated_vertically = self.fmt.is_truncated_vertically + + nrows = len(self.fmt.tr_frame) + + if self.fmt.index: + fmt = self.fmt._get_formatter("__index__") + if fmt is not None: + index_values = self.fmt.tr_frame.index.map(fmt) + else: + index_values = self.fmt.tr_frame.index.format() + + row: list[str] = [] + for i in range(nrows): + if is_truncated_vertically and i == (self.fmt.tr_row_num): + str_sep_row = ["..."] * len(row) + self.write_tr( + str_sep_row, + indent, + self.indent_delta, + tags=None, + nindex_levels=self.row_levels, + ) + + row = [] + if self.fmt.index: + row.append(index_values[i]) + # see gh-22579 + # Column misalignment also occurs for + # a standard index when the columns index is named. + # Add blank cell before data cells. + elif self.show_col_idx_names: + row.append("") + row.extend(fmt_values[j][i] for j in range(self.ncols)) + + if is_truncated_horizontally: + dot_col_ix = self.fmt.tr_col_num + self.row_levels + row.insert(dot_col_ix, "...") + self.write_tr( + row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels + ) + + def _write_hierarchical_rows( + self, fmt_values: Mapping[int, list[str]], indent: int + ) -> None: + template = 'rowspan="{span}" valign="top"' + + is_truncated_horizontally = self.fmt.is_truncated_horizontally + is_truncated_vertically = self.fmt.is_truncated_vertically + frame = self.fmt.tr_frame + nrows = len(frame) + + assert isinstance(frame.index, MultiIndex) + idx_values = frame.index.format(sparsify=False, adjoin=False, names=False) + idx_values = list(zip(*idx_values)) + + if self.fmt.sparsify: + # GH3547 + sentinel = lib.no_default + levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) + + level_lengths = get_level_lengths(levels, sentinel) + inner_lvl = len(level_lengths) - 1 + if is_truncated_vertically: + # Insert ... row and adjust idx_values and + # level_lengths to take this into account. + ins_row = self.fmt.tr_row_num + inserted = False + for lnum, records in enumerate(level_lengths): + rec_new = {} + for tag, span in list(records.items()): + if tag >= ins_row: + rec_new[tag + 1] = span + elif tag + span > ins_row: + rec_new[tag] = span + 1 + + # GH 14882 - Make sure insertion done once + if not inserted: + dot_row = list(idx_values[ins_row - 1]) + dot_row[-1] = "..." + idx_values.insert(ins_row, tuple(dot_row)) + inserted = True + else: + dot_row = list(idx_values[ins_row]) + dot_row[inner_lvl - lnum] = "..." + idx_values[ins_row] = tuple(dot_row) + else: + rec_new[tag] = span + # If ins_row lies between tags, all cols idx cols + # receive ... + if tag + span == ins_row: + rec_new[ins_row] = 1 + if lnum == 0: + idx_values.insert( + ins_row, tuple(["..."] * len(level_lengths)) + ) + + # GH 14882 - Place ... in correct level + elif inserted: + dot_row = list(idx_values[ins_row]) + dot_row[inner_lvl - lnum] = "..." + idx_values[ins_row] = tuple(dot_row) + level_lengths[lnum] = rec_new + + level_lengths[inner_lvl][ins_row] = 1 + for ix_col in fmt_values: + fmt_values[ix_col].insert(ins_row, "...") + nrows += 1 + + for i in range(nrows): + row = [] + tags = {} + + sparse_offset = 0 + j = 0 + for records, v in zip(level_lengths, idx_values[i]): + if i in records: + if records[i] > 1: + tags[j] = template.format(span=records[i]) + else: + sparse_offset += 1 + continue + + j += 1 + row.append(v) + + row.extend(fmt_values[j][i] for j in range(self.ncols)) + if is_truncated_horizontally: + row.insert( + self.row_levels - sparse_offset + self.fmt.tr_col_num, "..." + ) + self.write_tr( + row, + indent, + self.indent_delta, + tags=tags, + nindex_levels=len(levels) - sparse_offset, + ) + else: + row = [] + for i in range(len(frame)): + if is_truncated_vertically and i == (self.fmt.tr_row_num): + str_sep_row = ["..."] * len(row) + self.write_tr( + str_sep_row, + indent, + self.indent_delta, + tags=None, + nindex_levels=self.row_levels, + ) + + idx_values = list( + zip(*frame.index.format(sparsify=False, adjoin=False, names=False)) + ) + row = [] + row.extend(idx_values[i]) + row.extend(fmt_values[j][i] for j in range(self.ncols)) + if is_truncated_horizontally: + row.insert(self.row_levels + self.fmt.tr_col_num, "...") + self.write_tr( + row, + indent, + self.indent_delta, + tags=None, + nindex_levels=frame.index.nlevels, + ) + + +class NotebookFormatter(HTMLFormatter): + """ + Internal class for formatting output data in html for display in Jupyter + Notebooks. This class is intended for functionality specific to + DataFrame._repr_html_() and DataFrame.to_html(notebook=True) + """ + + def _get_formatted_values(self) -> dict[int, list[str]]: + return {i: self.fmt.format_col(i) for i in range(self.ncols)} + + def _get_columns_formatted_values(self) -> list[str]: + return self.columns.format() + + def write_style(self) -> None: + # We use the "scoped" attribute here so that the desired + # style properties for the data frame are not then applied + # throughout the entire notebook. + template_first = """\ + """ + template_select = """\ + .dataframe %s { + %s: %s; + }""" + element_props = [ + ("tbody tr th:only-of-type", "vertical-align", "middle"), + ("tbody tr th", "vertical-align", "top"), + ] + if isinstance(self.columns, MultiIndex): + element_props.append(("thead tr th", "text-align", "left")) + if self.show_row_idx_names: + element_props.append( + ("thead tr:last-of-type th", "text-align", "right") + ) + else: + element_props.append(("thead th", "text-align", "right")) + template_mid = "\n\n".join(template_select % t for t in element_props) + template = dedent("\n".join((template_first, template_mid, template_last))) + self.write(template) + + def render(self) -> list[str]: + self.write("
") + self.write_style() + super().render() + self.write("
") + return self.elements diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/info.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/info.py new file mode 100644 index 00000000..d20c2a62 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/info.py @@ -0,0 +1,1101 @@ +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +import sys +from textwrap import dedent +from typing import TYPE_CHECKING + +from pandas._config import get_option + +from pandas.io.formats import format as fmt +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Iterator, + Mapping, + Sequence, + ) + + from pandas._typing import ( + Dtype, + WriteBuffer, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +frame_max_cols_sub = dedent( + """\ + max_cols : int, optional + When to switch from the verbose to the truncated output. If the + DataFrame has more than `max_cols` columns, the truncated output + is used. By default, the setting in + ``pandas.options.display.max_info_columns`` is used.""" +) + + +show_counts_sub = dedent( + """\ + show_counts : bool, optional + Whether to show the non-null counts. By default, this is shown + only if the DataFrame is smaller than + ``pandas.options.display.max_info_rows`` and + ``pandas.options.display.max_info_columns``. A value of True always + shows the counts, and False never shows the counts.""" +) + + +frame_examples_sub = dedent( + """\ + >>> int_values = [1, 2, 3, 4, 5] + >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] + >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] + >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values, + ... "float_col": float_values}) + >>> df + int_col text_col float_col + 0 1 alpha 0.00 + 1 2 beta 0.25 + 2 3 gamma 0.50 + 3 4 delta 0.75 + 4 5 epsilon 1.00 + + Prints information of all columns: + + >>> df.info(verbose=True) + + RangeIndex: 5 entries, 0 to 4 + Data columns (total 3 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 int_col 5 non-null int64 + 1 text_col 5 non-null object + 2 float_col 5 non-null float64 + dtypes: float64(1), int64(1), object(1) + memory usage: 248.0+ bytes + + Prints a summary of columns count and its dtypes but not per column + information: + + >>> df.info(verbose=False) + + RangeIndex: 5 entries, 0 to 4 + Columns: 3 entries, int_col to float_col + dtypes: float64(1), int64(1), object(1) + memory usage: 248.0+ bytes + + Pipe output of DataFrame.info to buffer instead of sys.stdout, get + buffer content and writes to a text file: + + >>> import io + >>> buffer = io.StringIO() + >>> df.info(buf=buffer) + >>> s = buffer.getvalue() + >>> with open("df_info.txt", "w", + ... encoding="utf-8") as f: # doctest: +SKIP + ... f.write(s) + 260 + + The `memory_usage` parameter allows deep introspection mode, specially + useful for big DataFrames and fine-tune memory optimization: + + >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) + >>> df = pd.DataFrame({ + ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6), + ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6), + ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6) + ... }) + >>> df.info() + + RangeIndex: 1000000 entries, 0 to 999999 + Data columns (total 3 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object + dtypes: object(3) + memory usage: 22.9+ MB + + >>> df.info(memory_usage='deep') + + RangeIndex: 1000000 entries, 0 to 999999 + Data columns (total 3 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object + dtypes: object(3) + memory usage: 165.9 MB""" +) + + +frame_see_also_sub = dedent( + """\ + DataFrame.describe: Generate descriptive statistics of DataFrame + columns. + DataFrame.memory_usage: Memory usage of DataFrame columns.""" +) + + +frame_sub_kwargs = { + "klass": "DataFrame", + "type_sub": " and columns", + "max_cols_sub": frame_max_cols_sub, + "show_counts_sub": show_counts_sub, + "examples_sub": frame_examples_sub, + "see_also_sub": frame_see_also_sub, + "version_added_sub": "", +} + + +series_examples_sub = dedent( + """\ + >>> int_values = [1, 2, 3, 4, 5] + >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] + >>> s = pd.Series(text_values, index=int_values) + >>> s.info() + + Index: 5 entries, 1 to 5 + Series name: None + Non-Null Count Dtype + -------------- ----- + 5 non-null object + dtypes: object(1) + memory usage: 80.0+ bytes + + Prints a summary excluding information about its values: + + >>> s.info(verbose=False) + + Index: 5 entries, 1 to 5 + dtypes: object(1) + memory usage: 80.0+ bytes + + Pipe output of Series.info to buffer instead of sys.stdout, get + buffer content and writes to a text file: + + >>> import io + >>> buffer = io.StringIO() + >>> s.info(buf=buffer) + >>> s = buffer.getvalue() + >>> with open("df_info.txt", "w", + ... encoding="utf-8") as f: # doctest: +SKIP + ... f.write(s) + 260 + + The `memory_usage` parameter allows deep introspection mode, specially + useful for big Series and fine-tune memory optimization: + + >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) + >>> s = pd.Series(np.random.choice(['a', 'b', 'c'], 10 ** 6)) + >>> s.info() + + RangeIndex: 1000000 entries, 0 to 999999 + Series name: None + Non-Null Count Dtype + -------------- ----- + 1000000 non-null object + dtypes: object(1) + memory usage: 7.6+ MB + + >>> s.info(memory_usage='deep') + + RangeIndex: 1000000 entries, 0 to 999999 + Series name: None + Non-Null Count Dtype + -------------- ----- + 1000000 non-null object + dtypes: object(1) + memory usage: 55.3 MB""" +) + + +series_see_also_sub = dedent( + """\ + Series.describe: Generate descriptive statistics of Series. + Series.memory_usage: Memory usage of Series.""" +) + + +series_sub_kwargs = { + "klass": "Series", + "type_sub": "", + "max_cols_sub": "", + "show_counts_sub": show_counts_sub, + "examples_sub": series_examples_sub, + "see_also_sub": series_see_also_sub, + "version_added_sub": "\n.. versionadded:: 1.4.0\n", +} + + +INFO_DOCSTRING = dedent( + """ + Print a concise summary of a {klass}. + + This method prints information about a {klass} including + the index dtype{type_sub}, non-null values and memory usage. + {version_added_sub}\ + + Parameters + ---------- + verbose : bool, optional + Whether to print the full summary. By default, the setting in + ``pandas.options.display.max_info_columns`` is followed. + buf : writable buffer, defaults to sys.stdout + Where to send the output. By default, the output is printed to + sys.stdout. Pass a writable buffer if you need to further process + the output. + {max_cols_sub} + memory_usage : bool, str, optional + Specifies whether total memory usage of the {klass} + elements (including the index) should be displayed. By default, + this follows the ``pandas.options.display.memory_usage`` setting. + + True always show memory usage. False never shows memory usage. + A value of 'deep' is equivalent to "True with deep introspection". + Memory usage is shown in human-readable units (base-2 + representation). Without deep introspection a memory estimation is + made based in column dtype and number of rows assuming values + consume the same memory amount for corresponding dtypes. With deep + memory introspection, a real memory usage calculation is performed + at the cost of computational resources. See the + :ref:`Frequently Asked Questions ` for more + details. + {show_counts_sub} + + Returns + ------- + None + This method prints a summary of a {klass} and returns None. + + See Also + -------- + {see_also_sub} + + Examples + -------- + {examples_sub} + """ +) + + +def _put_str(s: str | Dtype, space: int) -> str: + """ + Make string of specified length, padding to the right if necessary. + + Parameters + ---------- + s : Union[str, Dtype] + String to be formatted. + space : int + Length to force string to be of. + + Returns + ------- + str + String coerced to given length. + + Examples + -------- + >>> pd.io.formats.info._put_str("panda", 6) + 'panda ' + >>> pd.io.formats.info._put_str("panda", 4) + 'pand' + """ + return str(s)[:space].ljust(space) + + +def _sizeof_fmt(num: float, size_qualifier: str) -> str: + """ + Return size in human readable format. + + Parameters + ---------- + num : int + Size in bytes. + size_qualifier : str + Either empty, or '+' (if lower bound). + + Returns + ------- + str + Size in human readable format. + + Examples + -------- + >>> _sizeof_fmt(23028, '') + '22.5 KB' + + >>> _sizeof_fmt(23028, '+') + '22.5+ KB' + """ + for x in ["bytes", "KB", "MB", "GB", "TB"]: + if num < 1024.0: + return f"{num:3.1f}{size_qualifier} {x}" + num /= 1024.0 + return f"{num:3.1f}{size_qualifier} PB" + + +def _initialize_memory_usage( + memory_usage: bool | str | None = None, +) -> bool | str: + """Get memory usage based on inputs and display options.""" + if memory_usage is None: + memory_usage = get_option("display.memory_usage") + return memory_usage + + +class BaseInfo(ABC): + """ + Base class for DataFrameInfo and SeriesInfo. + + Parameters + ---------- + data : DataFrame or Series + Either dataframe or series. + memory_usage : bool or str, optional + If "deep", introspect the data deeply by interrogating object dtypes + for system-level memory consumption, and include it in the returned + values. + """ + + data: DataFrame | Series + memory_usage: bool | str + + @property + @abstractmethod + def dtypes(self) -> Iterable[Dtype]: + """ + Dtypes. + + Returns + ------- + dtypes : sequence + Dtype of each of the DataFrame's columns (or one series column). + """ + + @property + @abstractmethod + def dtype_counts(self) -> Mapping[str, int]: + """Mapping dtype - number of counts.""" + + @property + @abstractmethod + def non_null_counts(self) -> Sequence[int]: + """Sequence of non-null counts for all columns or column (if series).""" + + @property + @abstractmethod + def memory_usage_bytes(self) -> int: + """ + Memory usage in bytes. + + Returns + ------- + memory_usage_bytes : int + Object's total memory usage in bytes. + """ + + @property + def memory_usage_string(self) -> str: + """Memory usage in a form of human readable string.""" + return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n" + + @property + def size_qualifier(self) -> str: + size_qualifier = "" + if self.memory_usage: + if self.memory_usage != "deep": + # size_qualifier is just a best effort; not guaranteed to catch + # all cases (e.g., it misses categorical data even with object + # categories) + if ( + "object" in self.dtype_counts + or self.data.index._is_memory_usage_qualified() + ): + size_qualifier = "+" + return size_qualifier + + @abstractmethod + def render( + self, + *, + buf: WriteBuffer[str] | None, + max_cols: int | None, + verbose: bool | None, + show_counts: bool | None, + ) -> None: + pass + + +class DataFrameInfo(BaseInfo): + """ + Class storing dataframe-specific info. + """ + + def __init__( + self, + data: DataFrame, + memory_usage: bool | str | None = None, + ) -> None: + self.data: DataFrame = data + self.memory_usage = _initialize_memory_usage(memory_usage) + + @property + def dtype_counts(self) -> Mapping[str, int]: + return _get_dataframe_dtype_counts(self.data) + + @property + def dtypes(self) -> Iterable[Dtype]: + """ + Dtypes. + + Returns + ------- + dtypes + Dtype of each of the DataFrame's columns. + """ + return self.data.dtypes + + @property + def ids(self) -> Index: + """ + Column names. + + Returns + ------- + ids : Index + DataFrame's column names. + """ + return self.data.columns + + @property + def col_count(self) -> int: + """Number of columns to be summarized.""" + return len(self.ids) + + @property + def non_null_counts(self) -> Sequence[int]: + """Sequence of non-null counts for all columns or column (if series).""" + return self.data.count() + + @property + def memory_usage_bytes(self) -> int: + deep = self.memory_usage == "deep" + return self.data.memory_usage(index=True, deep=deep).sum() + + def render( + self, + *, + buf: WriteBuffer[str] | None, + max_cols: int | None, + verbose: bool | None, + show_counts: bool | None, + ) -> None: + printer = DataFrameInfoPrinter( + info=self, + max_cols=max_cols, + verbose=verbose, + show_counts=show_counts, + ) + printer.to_buffer(buf) + + +class SeriesInfo(BaseInfo): + """ + Class storing series-specific info. + """ + + def __init__( + self, + data: Series, + memory_usage: bool | str | None = None, + ) -> None: + self.data: Series = data + self.memory_usage = _initialize_memory_usage(memory_usage) + + def render( + self, + *, + buf: WriteBuffer[str] | None = None, + max_cols: int | None = None, + verbose: bool | None = None, + show_counts: bool | None = None, + ) -> None: + if max_cols is not None: + raise ValueError( + "Argument `max_cols` can only be passed " + "in DataFrame.info, not Series.info" + ) + printer = SeriesInfoPrinter( + info=self, + verbose=verbose, + show_counts=show_counts, + ) + printer.to_buffer(buf) + + @property + def non_null_counts(self) -> Sequence[int]: + return [self.data.count()] + + @property + def dtypes(self) -> Iterable[Dtype]: + return [self.data.dtypes] + + @property + def dtype_counts(self) -> Mapping[str, int]: + from pandas.core.frame import DataFrame + + return _get_dataframe_dtype_counts(DataFrame(self.data)) + + @property + def memory_usage_bytes(self) -> int: + """Memory usage in bytes. + + Returns + ------- + memory_usage_bytes : int + Object's total memory usage in bytes. + """ + deep = self.memory_usage == "deep" + return self.data.memory_usage(index=True, deep=deep) + + +class InfoPrinterAbstract: + """ + Class for printing dataframe or series info. + """ + + def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None: + """Save dataframe info into buffer.""" + table_builder = self._create_table_builder() + lines = table_builder.get_lines() + if buf is None: # pragma: no cover + buf = sys.stdout + fmt.buffer_put_lines(buf, lines) + + @abstractmethod + def _create_table_builder(self) -> TableBuilderAbstract: + """Create instance of table builder.""" + + +class DataFrameInfoPrinter(InfoPrinterAbstract): + """ + Class for printing dataframe info. + + Parameters + ---------- + info : DataFrameInfo + Instance of DataFrameInfo. + max_cols : int, optional + When to switch from the verbose to the truncated output. + verbose : bool, optional + Whether to print the full summary. + show_counts : bool, optional + Whether to show the non-null counts. + """ + + def __init__( + self, + info: DataFrameInfo, + max_cols: int | None = None, + verbose: bool | None = None, + show_counts: bool | None = None, + ) -> None: + self.info = info + self.data = info.data + self.verbose = verbose + self.max_cols = self._initialize_max_cols(max_cols) + self.show_counts = self._initialize_show_counts(show_counts) + + @property + def max_rows(self) -> int: + """Maximum info rows to be displayed.""" + return get_option("display.max_info_rows", len(self.data) + 1) + + @property + def exceeds_info_cols(self) -> bool: + """Check if number of columns to be summarized does not exceed maximum.""" + return bool(self.col_count > self.max_cols) + + @property + def exceeds_info_rows(self) -> bool: + """Check if number of rows to be summarized does not exceed maximum.""" + return bool(len(self.data) > self.max_rows) + + @property + def col_count(self) -> int: + """Number of columns to be summarized.""" + return self.info.col_count + + def _initialize_max_cols(self, max_cols: int | None) -> int: + if max_cols is None: + return get_option("display.max_info_columns", self.col_count + 1) + return max_cols + + def _initialize_show_counts(self, show_counts: bool | None) -> bool: + if show_counts is None: + return bool(not self.exceeds_info_cols and not self.exceeds_info_rows) + else: + return show_counts + + def _create_table_builder(self) -> DataFrameTableBuilder: + """ + Create instance of table builder based on verbosity and display settings. + """ + if self.verbose: + return DataFrameTableBuilderVerbose( + info=self.info, + with_counts=self.show_counts, + ) + elif self.verbose is False: # specifically set to False, not necessarily None + return DataFrameTableBuilderNonVerbose(info=self.info) + elif self.exceeds_info_cols: + return DataFrameTableBuilderNonVerbose(info=self.info) + else: + return DataFrameTableBuilderVerbose( + info=self.info, + with_counts=self.show_counts, + ) + + +class SeriesInfoPrinter(InfoPrinterAbstract): + """Class for printing series info. + + Parameters + ---------- + info : SeriesInfo + Instance of SeriesInfo. + verbose : bool, optional + Whether to print the full summary. + show_counts : bool, optional + Whether to show the non-null counts. + """ + + def __init__( + self, + info: SeriesInfo, + verbose: bool | None = None, + show_counts: bool | None = None, + ) -> None: + self.info = info + self.data = info.data + self.verbose = verbose + self.show_counts = self._initialize_show_counts(show_counts) + + def _create_table_builder(self) -> SeriesTableBuilder: + """ + Create instance of table builder based on verbosity. + """ + if self.verbose or self.verbose is None: + return SeriesTableBuilderVerbose( + info=self.info, + with_counts=self.show_counts, + ) + else: + return SeriesTableBuilderNonVerbose(info=self.info) + + def _initialize_show_counts(self, show_counts: bool | None) -> bool: + if show_counts is None: + return True + else: + return show_counts + + +class TableBuilderAbstract(ABC): + """ + Abstract builder for info table. + """ + + _lines: list[str] + info: BaseInfo + + @abstractmethod + def get_lines(self) -> list[str]: + """Product in a form of list of lines (strings).""" + + @property + def data(self) -> DataFrame | Series: + return self.info.data + + @property + def dtypes(self) -> Iterable[Dtype]: + """Dtypes of each of the DataFrame's columns.""" + return self.info.dtypes + + @property + def dtype_counts(self) -> Mapping[str, int]: + """Mapping dtype - number of counts.""" + return self.info.dtype_counts + + @property + def display_memory_usage(self) -> bool: + """Whether to display memory usage.""" + return bool(self.info.memory_usage) + + @property + def memory_usage_string(self) -> str: + """Memory usage string with proper size qualifier.""" + return self.info.memory_usage_string + + @property + def non_null_counts(self) -> Sequence[int]: + return self.info.non_null_counts + + def add_object_type_line(self) -> None: + """Add line with string representation of dataframe to the table.""" + self._lines.append(str(type(self.data))) + + def add_index_range_line(self) -> None: + """Add line with range of indices to the table.""" + self._lines.append(self.data.index._summary()) + + def add_dtypes_line(self) -> None: + """Add summary line with dtypes present in dataframe.""" + collected_dtypes = [ + f"{key}({val:d})" for key, val in sorted(self.dtype_counts.items()) + ] + self._lines.append(f"dtypes: {', '.join(collected_dtypes)}") + + +class DataFrameTableBuilder(TableBuilderAbstract): + """ + Abstract builder for dataframe info table. + + Parameters + ---------- + info : DataFrameInfo. + Instance of DataFrameInfo. + """ + + def __init__(self, *, info: DataFrameInfo) -> None: + self.info: DataFrameInfo = info + + def get_lines(self) -> list[str]: + self._lines = [] + if self.col_count == 0: + self._fill_empty_info() + else: + self._fill_non_empty_info() + return self._lines + + def _fill_empty_info(self) -> None: + """Add lines to the info table, pertaining to empty dataframe.""" + self.add_object_type_line() + self.add_index_range_line() + self._lines.append(f"Empty {type(self.data).__name__}\n") + + @abstractmethod + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty dataframe.""" + + @property + def data(self) -> DataFrame: + """DataFrame.""" + return self.info.data + + @property + def ids(self) -> Index: + """Dataframe columns.""" + return self.info.ids + + @property + def col_count(self) -> int: + """Number of dataframe columns to be summarized.""" + return self.info.col_count + + def add_memory_usage_line(self) -> None: + """Add line containing memory usage.""" + self._lines.append(f"memory usage: {self.memory_usage_string}") + + +class DataFrameTableBuilderNonVerbose(DataFrameTableBuilder): + """ + Dataframe info table builder for non-verbose output. + """ + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty dataframe.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_columns_summary_line() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + def add_columns_summary_line(self) -> None: + self._lines.append(self.ids._summary(name="Columns")) + + +class TableBuilderVerboseMixin(TableBuilderAbstract): + """ + Mixin for verbose info output. + """ + + SPACING: str = " " * 2 + strrows: Sequence[Sequence[str]] + gross_column_widths: Sequence[int] + with_counts: bool + + @property + @abstractmethod + def headers(self) -> Sequence[str]: + """Headers names of the columns in verbose table.""" + + @property + def header_column_widths(self) -> Sequence[int]: + """Widths of header columns (only titles).""" + return [len(col) for col in self.headers] + + def _get_gross_column_widths(self) -> Sequence[int]: + """Get widths of columns containing both headers and actual content.""" + body_column_widths = self._get_body_column_widths() + return [ + max(*widths) + for widths in zip(self.header_column_widths, body_column_widths) + ] + + def _get_body_column_widths(self) -> Sequence[int]: + """Get widths of table content columns.""" + strcols: Sequence[Sequence[str]] = list(zip(*self.strrows)) + return [max(len(x) for x in col) for col in strcols] + + def _gen_rows(self) -> Iterator[Sequence[str]]: + """ + Generator function yielding rows content. + + Each element represents a row comprising a sequence of strings. + """ + if self.with_counts: + return self._gen_rows_with_counts() + else: + return self._gen_rows_without_counts() + + @abstractmethod + def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data with counts.""" + + @abstractmethod + def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data without counts.""" + + def add_header_line(self) -> None: + header_line = self.SPACING.join( + [ + _put_str(header, col_width) + for header, col_width in zip(self.headers, self.gross_column_widths) + ] + ) + self._lines.append(header_line) + + def add_separator_line(self) -> None: + separator_line = self.SPACING.join( + [ + _put_str("-" * header_colwidth, gross_colwidth) + for header_colwidth, gross_colwidth in zip( + self.header_column_widths, self.gross_column_widths + ) + ] + ) + self._lines.append(separator_line) + + def add_body_lines(self) -> None: + for row in self.strrows: + body_line = self.SPACING.join( + [ + _put_str(col, gross_colwidth) + for col, gross_colwidth in zip(row, self.gross_column_widths) + ] + ) + self._lines.append(body_line) + + def _gen_non_null_counts(self) -> Iterator[str]: + """Iterator with string representation of non-null counts.""" + for count in self.non_null_counts: + yield f"{count} non-null" + + def _gen_dtypes(self) -> Iterator[str]: + """Iterator with string representation of column dtypes.""" + for dtype in self.dtypes: + yield pprint_thing(dtype) + + +class DataFrameTableBuilderVerbose(DataFrameTableBuilder, TableBuilderVerboseMixin): + """ + Dataframe info table builder for verbose output. + """ + + def __init__( + self, + *, + info: DataFrameInfo, + with_counts: bool, + ) -> None: + self.info = info + self.with_counts = with_counts + self.strrows: Sequence[Sequence[str]] = list(self._gen_rows()) + self.gross_column_widths: Sequence[int] = self._get_gross_column_widths() + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty dataframe.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_columns_summary_line() + self.add_header_line() + self.add_separator_line() + self.add_body_lines() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + @property + def headers(self) -> Sequence[str]: + """Headers names of the columns in verbose table.""" + if self.with_counts: + return [" # ", "Column", "Non-Null Count", "Dtype"] + return [" # ", "Column", "Dtype"] + + def add_columns_summary_line(self) -> None: + self._lines.append(f"Data columns (total {self.col_count} columns):") + + def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data without counts.""" + yield from zip( + self._gen_line_numbers(), + self._gen_columns(), + self._gen_dtypes(), + ) + + def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data with counts.""" + yield from zip( + self._gen_line_numbers(), + self._gen_columns(), + self._gen_non_null_counts(), + self._gen_dtypes(), + ) + + def _gen_line_numbers(self) -> Iterator[str]: + """Iterator with string representation of column numbers.""" + for i, _ in enumerate(self.ids): + yield f" {i}" + + def _gen_columns(self) -> Iterator[str]: + """Iterator with string representation of column names.""" + for col in self.ids: + yield pprint_thing(col) + + +class SeriesTableBuilder(TableBuilderAbstract): + """ + Abstract builder for series info table. + + Parameters + ---------- + info : SeriesInfo. + Instance of SeriesInfo. + """ + + def __init__(self, *, info: SeriesInfo) -> None: + self.info: SeriesInfo = info + + def get_lines(self) -> list[str]: + self._lines = [] + self._fill_non_empty_info() + return self._lines + + @property + def data(self) -> Series: + """Series.""" + return self.info.data + + def add_memory_usage_line(self) -> None: + """Add line containing memory usage.""" + self._lines.append(f"memory usage: {self.memory_usage_string}") + + @abstractmethod + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty series.""" + + +class SeriesTableBuilderNonVerbose(SeriesTableBuilder): + """ + Series info table builder for non-verbose output. + """ + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty series.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + +class SeriesTableBuilderVerbose(SeriesTableBuilder, TableBuilderVerboseMixin): + """ + Series info table builder for verbose output. + """ + + def __init__( + self, + *, + info: SeriesInfo, + with_counts: bool, + ) -> None: + self.info = info + self.with_counts = with_counts + self.strrows: Sequence[Sequence[str]] = list(self._gen_rows()) + self.gross_column_widths: Sequence[int] = self._get_gross_column_widths() + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty series.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_series_name_line() + self.add_header_line() + self.add_separator_line() + self.add_body_lines() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + def add_series_name_line(self) -> None: + self._lines.append(f"Series name: {self.data.name}") + + @property + def headers(self) -> Sequence[str]: + """Headers names of the columns in verbose table.""" + if self.with_counts: + return ["Non-Null Count", "Dtype"] + return ["Dtype"] + + def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data without counts.""" + yield from self._gen_dtypes() + + def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data with counts.""" + yield from zip( + self._gen_non_null_counts(), + self._gen_dtypes(), + ) + + +def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]: + """ + Create mapping between datatypes and their number of occurrences. + """ + # groupby dtype.name to collect e.g. Categorical columns + return df.dtypes.value_counts().groupby(lambda x: x.name).sum() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/printing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/printing.py new file mode 100644 index 00000000..b57797b7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/printing.py @@ -0,0 +1,503 @@ +""" +Printing tools. +""" +from __future__ import annotations + +from collections.abc import ( + Iterable, + Mapping, + Sequence, +) +import sys +from typing import ( + Any, + Callable, + TypeVar, + Union, +) + +from pandas._config import get_option + +from pandas.core.dtypes.inference import is_sequence + +EscapeChars = Union[Mapping[str, str], Iterable[str]] +_KT = TypeVar("_KT") +_VT = TypeVar("_VT") + + +def adjoin(space: int, *lists: list[str], **kwargs) -> str: + """ + Glues together two sets of strings using the amount of space requested. + The idea is to prettify. + + ---------- + space : int + number of spaces for padding + lists : str + list of str which being joined + strlen : callable + function used to calculate the length of each str. Needed for unicode + handling. + justfunc : callable + function used to justify str. Needed for unicode handling. + """ + strlen = kwargs.pop("strlen", len) + justfunc = kwargs.pop("justfunc", justify) + + newLists = [] + lengths = [max(map(strlen, x)) + space for x in lists[:-1]] + # not the last one + lengths.append(max(map(len, lists[-1]))) + maxLen = max(map(len, lists)) + for i, lst in enumerate(lists): + nl = justfunc(lst, lengths[i], mode="left") + nl = ([" " * lengths[i]] * (maxLen - len(lst))) + nl + newLists.append(nl) + toJoin = zip(*newLists) + return "\n".join("".join(lines) for lines in toJoin) + + +def justify(texts: Iterable[str], max_len: int, mode: str = "right") -> list[str]: + """ + Perform ljust, center, rjust against string or list-like + """ + if mode == "left": + return [x.ljust(max_len) for x in texts] + elif mode == "center": + return [x.center(max_len) for x in texts] + else: + return [x.rjust(max_len) for x in texts] + + +# Unicode consolidation +# --------------------- +# +# pprinting utility functions for generating Unicode text or +# bytes(3.x)/str(2.x) representations of objects. +# Try to use these as much as possible rather than rolling your own. +# +# When to use +# ----------- +# +# 1) If you're writing code internal to pandas (no I/O directly involved), +# use pprint_thing(). +# +# It will always return unicode text which can handled by other +# parts of the package without breakage. +# +# 2) if you need to write something out to file, use +# pprint_thing_encoded(encoding). +# +# If no encoding is specified, it defaults to utf-8. Since encoding pure +# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're +# working with straight ascii. + + +def _pprint_seq( + seq: Sequence, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds +) -> str: + """ + internal. pprinter for iterables. you should probably use pprint_thing() + rather than calling this directly. + + bounds length of printed sequence, depending on options + """ + if isinstance(seq, set): + fmt = "{{{body}}}" + else: + fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})" + + if max_seq_items is False: + nitems = len(seq) + else: + nitems = max_seq_items or get_option("max_seq_items") or len(seq) + + s = iter(seq) + # handle sets, no slicing + r = [ + pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds) + for i in range(min(nitems, len(seq))) + ] + body = ", ".join(r) + + if nitems < len(seq): + body += ", ..." + elif isinstance(seq, tuple) and len(seq) == 1: + body += "," + + return fmt.format(body=body) + + +def _pprint_dict( + seq: Mapping, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds +) -> str: + """ + internal. pprinter for iterables. you should probably use pprint_thing() + rather than calling this directly. + """ + fmt = "{{{things}}}" + pairs = [] + + pfmt = "{key}: {val}" + + if max_seq_items is False: + nitems = len(seq) + else: + nitems = max_seq_items or get_option("max_seq_items") or len(seq) + + for k, v in list(seq.items())[:nitems]: + pairs.append( + pfmt.format( + key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), + val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), + ) + ) + + if nitems < len(seq): + return fmt.format(things=", ".join(pairs) + ", ...") + else: + return fmt.format(things=", ".join(pairs)) + + +def pprint_thing( + thing: Any, + _nest_lvl: int = 0, + escape_chars: EscapeChars | None = None, + default_escapes: bool = False, + quote_strings: bool = False, + max_seq_items: int | None = None, +) -> str: + """ + This function is the sanctioned way of converting objects + to a string representation and properly handles nested sequences. + + Parameters + ---------- + thing : anything to be formatted + _nest_lvl : internal use only. pprint_thing() is mutually-recursive + with pprint_sequence, this argument is used to keep track of the + current nesting level, and limit it. + escape_chars : list or dict, optional + Characters to escape. If a dict is passed the values are the + replacements + default_escapes : bool, default False + Whether the input escape characters replaces or adds to the defaults + max_seq_items : int or None, default None + Pass through to other pretty printers to limit sequence printing + + Returns + ------- + str + """ + + def as_escaped_string( + thing: Any, escape_chars: EscapeChars | None = escape_chars + ) -> str: + translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} + if isinstance(escape_chars, dict): + if default_escapes: + translate.update(escape_chars) + else: + translate = escape_chars + escape_chars = list(escape_chars.keys()) + else: + escape_chars = escape_chars or () + + result = str(thing) + for c in escape_chars: + result = result.replace(c, translate[c]) + return result + + if hasattr(thing, "__next__"): + return str(thing) + elif isinstance(thing, dict) and _nest_lvl < get_option( + "display.pprint_nest_depth" + ): + result = _pprint_dict( + thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items + ) + elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"): + result = _pprint_seq( + thing, + _nest_lvl, + escape_chars=escape_chars, + quote_strings=quote_strings, + max_seq_items=max_seq_items, + ) + elif isinstance(thing, str) and quote_strings: + result = f"'{as_escaped_string(thing)}'" + else: + result = as_escaped_string(thing) + + return result + + +def pprint_thing_encoded( + object, encoding: str = "utf-8", errors: str = "replace" +) -> bytes: + value = pprint_thing(object) # get unicode representation of object + return value.encode(encoding, errors) + + +def enable_data_resource_formatter(enable: bool) -> None: + if "IPython" not in sys.modules: + # definitely not in IPython + return + from IPython import get_ipython + + ip = get_ipython() + if ip is None: + # still not in IPython + return + + formatters = ip.display_formatter.formatters + mimetype = "application/vnd.dataresource+json" + + if enable: + if mimetype not in formatters: + # define tableschema formatter + from IPython.core.formatters import BaseFormatter + from traitlets import ObjectName + + class TableSchemaFormatter(BaseFormatter): + print_method = ObjectName("_repr_data_resource_") + _return_type = (dict,) + + # register it: + formatters[mimetype] = TableSchemaFormatter() + # enable it if it's been disabled: + formatters[mimetype].enabled = True + # unregister tableschema mime-type + elif mimetype in formatters: + formatters[mimetype].enabled = False + + +def default_pprint(thing: Any, max_seq_items: int | None = None) -> str: + return pprint_thing( + thing, + escape_chars=("\t", "\r", "\n"), + quote_strings=True, + max_seq_items=max_seq_items, + ) + + +def format_object_summary( + obj, + formatter: Callable, + is_justify: bool = True, + name: str | None = None, + indent_for_name: bool = True, + line_break_each_value: bool = False, +) -> str: + """ + Return the formatted obj as a unicode string + + Parameters + ---------- + obj : object + must be iterable and support __getitem__ + formatter : callable + string formatter for an element + is_justify : bool + should justify the display + name : name, optional + defaults to the class name of the obj + indent_for_name : bool, default True + Whether subsequent lines should be indented to + align with the name. + line_break_each_value : bool, default False + If True, inserts a line break for each value of ``obj``. + If False, only break lines when the a line of values gets wider + than the display width. + + Returns + ------- + summary string + """ + from pandas.io.formats.console import get_console_size + from pandas.io.formats.format import get_adjustment + + display_width, _ = get_console_size() + if display_width is None: + display_width = get_option("display.width") or 80 + if name is None: + name = type(obj).__name__ + + if indent_for_name: + name_len = len(name) + space1 = f'\n{(" " * (name_len + 1))}' + space2 = f'\n{(" " * (name_len + 2))}' + else: + space1 = "\n" + space2 = "\n " # space for the opening '[' + + n = len(obj) + if line_break_each_value: + # If we want to vertically align on each value of obj, we need to + # separate values by a line break and indent the values + sep = ",\n " + " " * len(name) + else: + sep = "," + max_seq_items = get_option("display.max_seq_items") or n + + # are we a truncated display + is_truncated = n > max_seq_items + + # adj can optionally handle unicode eastern asian width + adj = get_adjustment() + + def _extend_line( + s: str, line: str, value: str, display_width: int, next_line_prefix: str + ) -> tuple[str, str]: + if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width: + s += line.rstrip() + line = next_line_prefix + line += value + return s, line + + def best_len(values: list[str]) -> int: + if values: + return max(adj.len(x) for x in values) + else: + return 0 + + close = ", " + + if n == 0: + summary = f"[]{close}" + elif n == 1 and not line_break_each_value: + first = formatter(obj[0]) + summary = f"[{first}]{close}" + elif n == 2 and not line_break_each_value: + first = formatter(obj[0]) + last = formatter(obj[-1]) + summary = f"[{first}, {last}]{close}" + else: + if max_seq_items == 1: + # If max_seq_items=1 show only last element + head = [] + tail = [formatter(x) for x in obj[-1:]] + elif n > max_seq_items: + n = min(max_seq_items // 2, 10) + head = [formatter(x) for x in obj[:n]] + tail = [formatter(x) for x in obj[-n:]] + else: + head = [] + tail = [formatter(x) for x in obj] + + # adjust all values to max length if needed + if is_justify: + if line_break_each_value: + # Justify each string in the values of head and tail, so the + # strings will right align when head and tail are stacked + # vertically. + head, tail = _justify(head, tail) + elif is_truncated or not ( + len(", ".join(head)) < display_width + and len(", ".join(tail)) < display_width + ): + # Each string in head and tail should align with each other + max_length = max(best_len(head), best_len(tail)) + head = [x.rjust(max_length) for x in head] + tail = [x.rjust(max_length) for x in tail] + # If we are not truncated and we are only a single + # line, then don't justify + + if line_break_each_value: + # Now head and tail are of type List[Tuple[str]]. Below we + # convert them into List[str], so there will be one string per + # value. Also truncate items horizontally if wider than + # max_space + max_space = display_width - len(space2) + value = tail[0] + max_items = 1 + for num_items in reversed(range(1, len(value) + 1)): + pprinted_seq = _pprint_seq(value, max_seq_items=num_items) + if len(pprinted_seq) < max_space: + max_items = num_items + break + head = [_pprint_seq(x, max_seq_items=max_items) for x in head] + tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail] + + summary = "" + line = space2 + + for head_value in head: + word = head_value + sep + " " + summary, line = _extend_line(summary, line, word, display_width, space2) + + if is_truncated: + # remove trailing space of last line + summary += line.rstrip() + space2 + "..." + line = space2 + + for tail_item in tail[:-1]: + word = tail_item + sep + " " + summary, line = _extend_line(summary, line, word, display_width, space2) + + # last value: no sep added + 1 space of width used for trailing ',' + summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2) + summary += line + + # right now close is either '' or ', ' + # Now we want to include the ']', but not the maybe space. + close = "]" + close.rstrip(" ") + summary += close + + if len(summary) > (display_width) or line_break_each_value: + summary += space1 + else: # one row + summary += " " + + # remove initial space + summary = "[" + summary[len(space2) :] + + return summary + + +def _justify( + head: list[Sequence[str]], tail: list[Sequence[str]] +) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]: + """ + Justify items in head and tail, so they are right-aligned when stacked. + + Parameters + ---------- + head : list-like of list-likes of strings + tail : list-like of list-likes of strings + + Returns + ------- + tuple of list of tuples of strings + Same as head and tail, but items are right aligned when stacked + vertically. + + Examples + -------- + >>> _justify([['a', 'b']], [['abc', 'abcd']]) + ([(' a', ' b')], [('abc', 'abcd')]) + """ + combined = head + tail + + # For each position for the sequences in ``combined``, + # find the length of the largest string. + max_length = [0] * len(combined[0]) + for inner_seq in combined: + length = [len(item) for item in inner_seq] + max_length = [max(x, y) for x, y in zip(max_length, length)] + + # justify each item in each list-like in head and tail using max_length + head_tuples = [ + tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head + ] + tail_tuples = [ + tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail + ] + return head_tuples, tail_tuples + + +class PrettyDict(dict[_KT, _VT]): + """Dict extension to support abbreviated __repr__""" + + def __repr__(self) -> str: + return pprint_thing(self) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/string.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/string.py new file mode 100644 index 00000000..769f9dee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/string.py @@ -0,0 +1,206 @@ +""" +Module for formatting output data in console (to string). +""" +from __future__ import annotations + +from shutil import get_terminal_size +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import Iterable + + from pandas.io.formats.format import DataFrameFormatter + + +class StringFormatter: + """Formatter for string representation of a dataframe.""" + + def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None) -> None: + self.fmt = fmt + self.adj = fmt.adj + self.frame = fmt.frame + self.line_width = line_width + + def to_string(self) -> str: + text = self._get_string_representation() + if self.fmt.should_show_dimensions: + text = "".join([text, self.fmt.dimensions_info]) + return text + + def _get_strcols(self) -> list[list[str]]: + strcols = self.fmt.get_strcols() + if self.fmt.is_truncated: + strcols = self._insert_dot_separators(strcols) + return strcols + + def _get_string_representation(self) -> str: + if self.fmt.frame.empty: + return self._empty_info_line + + strcols = self._get_strcols() + + if self.line_width is None: + # no need to wrap around just print the whole frame + return self.adj.adjoin(1, *strcols) + + if self._need_to_wrap_around: + return self._join_multiline(strcols) + + return self._fit_strcols_to_terminal_width(strcols) + + @property + def _empty_info_line(self) -> str: + return ( + f"Empty {type(self.frame).__name__}\n" + f"Columns: {pprint_thing(self.frame.columns)}\n" + f"Index: {pprint_thing(self.frame.index)}" + ) + + @property + def _need_to_wrap_around(self) -> bool: + return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0) + + def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]: + str_index = self.fmt._get_formatted_index(self.fmt.tr_frame) + index_length = len(str_index) + + if self.fmt.is_truncated_horizontally: + strcols = self._insert_dot_separator_horizontal(strcols, index_length) + + if self.fmt.is_truncated_vertically: + strcols = self._insert_dot_separator_vertical(strcols, index_length) + + return strcols + + @property + def _adjusted_tr_col_num(self) -> int: + return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num + + def _insert_dot_separator_horizontal( + self, strcols: list[list[str]], index_length: int + ) -> list[list[str]]: + strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length) + return strcols + + def _insert_dot_separator_vertical( + self, strcols: list[list[str]], index_length: int + ) -> list[list[str]]: + n_header_rows = index_length - len(self.fmt.tr_frame) + row_num = self.fmt.tr_row_num + for ix, col in enumerate(strcols): + cwidth = self.adj.len(col[row_num]) + + if self.fmt.is_truncated_horizontally: + is_dot_col = ix == self._adjusted_tr_col_num + else: + is_dot_col = False + + if cwidth > 3 or is_dot_col: + dots = "..." + else: + dots = ".." + + if ix == 0 and self.fmt.index: + dot_mode = "left" + elif is_dot_col: + cwidth = 4 + dot_mode = "right" + else: + dot_mode = "right" + + dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0] + col.insert(row_num + n_header_rows, dot_str) + return strcols + + def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str: + lwidth = self.line_width + adjoin_width = 1 + strcols = list(strcols_input) + + if self.fmt.index: + idx = strcols.pop(0) + lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width + + col_widths = [ + np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0 + for col in strcols + ] + + assert lwidth is not None + col_bins = _binify(col_widths, lwidth) + nbins = len(col_bins) + + str_lst = [] + start = 0 + for i, end in enumerate(col_bins): + row = strcols[start:end] + if self.fmt.index: + row.insert(0, idx) + if nbins > 1: + nrows = len(row[-1]) + if end <= len(strcols) and i < nbins - 1: + row.append([" \\"] + [" "] * (nrows - 1)) + else: + row.append([" "] * nrows) + str_lst.append(self.adj.adjoin(adjoin_width, *row)) + start = end + return "\n\n".join(str_lst) + + def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str: + from pandas import Series + + lines = self.adj.adjoin(1, *strcols).split("\n") + max_len = Series(lines).str.len().max() + # plus truncate dot col + width, _ = get_terminal_size() + dif = max_len - width + # '+ 1' to avoid too wide repr (GH PR #17023) + adj_dif = dif + 1 + col_lens = Series([Series(ele).str.len().max() for ele in strcols]) + n_cols = len(col_lens) + counter = 0 + while adj_dif > 0 and n_cols > 1: + counter += 1 + mid = round(n_cols / 2) + mid_ix = col_lens.index[mid] + col_len = col_lens[mid_ix] + # adjoin adds one + adj_dif -= col_len + 1 + col_lens = col_lens.drop(mid_ix) + n_cols = len(col_lens) + + # subtract index column + max_cols_fitted = n_cols - self.fmt.index + # GH-21180. Ensure that we print at least two. + max_cols_fitted = max(max_cols_fitted, 2) + self.fmt.max_cols_fitted = max_cols_fitted + + # Call again _truncate to cut frame appropriately + # and then generate string representation + self.fmt.truncate() + strcols = self._get_strcols() + return self.adj.adjoin(1, *strcols) + + +def _binify(cols: list[int], line_width: int) -> list[int]: + adjoin_width = 1 + bins = [] + curr_width = 0 + i_last_column = len(cols) - 1 + for i, w in enumerate(cols): + w_adjoined = w + adjoin_width + curr_width += w_adjoined + if i_last_column == i: + wrap = curr_width + 1 > line_width and i > 0 + else: + wrap = curr_width + 2 > line_width and i > 0 + if wrap: + bins.append(i) + curr_width = w_adjoined + + bins.append(len(cols)) + return bins diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/style.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/style.py new file mode 100644 index 00000000..f883d9de --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/style.py @@ -0,0 +1,4147 @@ +""" +Module for applying conditional formatting to DataFrames and Series. +""" +from __future__ import annotations + +from contextlib import contextmanager +import copy +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + overload, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import ( + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level + +import pandas as pd +from pandas import ( + IndexSlice, + RangeIndex, +) +import pandas.core.common as com +from pandas.core.frame import ( + DataFrame, + Series, +) +from pandas.core.generic import NDFrame +from pandas.core.shared_docs import _shared_docs + +from pandas.io.formats.format import save_to_buffer + +jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") + +from pandas.io.formats.style_render import ( + CSSProperties, + CSSStyles, + ExtFormatter, + StylerRenderer, + Subset, + Tooltips, + format_table_styles, + maybe_convert_css_to_tuples, + non_reducing_slice, + refactor_levels, +) + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Hashable, + Sequence, + ) + + from matplotlib.colors import Colormap + + from pandas._typing import ( + Axis, + AxisInt, + FilePath, + IndexLabel, + IntervalClosedType, + Level, + QuantileInterpolation, + Scalar, + StorageOptions, + WriteBuffer, + WriteExcelBuffer, + ) + + from pandas import ExcelWriter + +try: + import matplotlib as mpl + import matplotlib.pyplot as plt + + has_mpl = True +except ImportError: + has_mpl = False + + +@contextmanager +def _mpl(func: Callable) -> Generator[tuple[Any, Any], None, None]: + if has_mpl: + yield plt, mpl + else: + raise ImportError(f"{func.__name__} requires matplotlib.") + + +#### +# Shared Doc Strings + +subset_args = """subset : label, array-like, IndexSlice, optional + A valid 2d input to `DataFrame.loc[]`, or, in the case of a 1d input + or single key, to `DataFrame.loc[:, ]` where the columns are + prioritised, to limit ``data`` to *before* applying the function.""" + +properties_args = """props : str, default None + CSS properties to use for highlighting. If ``props`` is given, ``color`` + is not used.""" + +coloring_args = """color : str, default '{default}' + Background color to use for highlighting.""" + +buffering_args = """buf : str, path object, file-like object, optional + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If ``None``, the result is + returned as a string.""" + +encoding_args = """encoding : str, optional + Character encoding setting for file output (and meta tags if available). + Defaults to ``pandas.options.styler.render.encoding`` value of "utf-8".""" + +# +### + + +class Styler(StylerRenderer): + r""" + Helps style a DataFrame or Series according to the data with HTML and CSS. + + Parameters + ---------- + data : Series or DataFrame + Data to be styled - either a Series or DataFrame. + precision : int, optional + Precision to round floats to. If not given defaults to + ``pandas.options.styler.format.precision``. + + .. versionchanged:: 1.4.0 + table_styles : list-like, default None + List of {selector: (attr, value)} dicts; see Notes. + uuid : str, default None + A unique identifier to avoid CSS collisions; generated automatically. + caption : str, tuple, default None + String caption to attach to the table. Tuple only used for LaTeX dual captions. + table_attributes : str, default None + Items that show up in the opening ``
) elements that will be rendered before + trimming will occur over columns, rows or both if needed. +""" + +styler_max_rows = """ +: int, optional + The maximum number of rows that will be rendered. May still be reduced to + satisfy ``max_elements``, which takes precedence. +""" + +styler_max_columns = """ +: int, optional + The maximum number of columns that will be rendered. May still be reduced to + satisfy ``max_elements``, which takes precedence. +""" + +styler_precision = """ +: int + The precision for floats and complex numbers. +""" + +styler_decimal = """ +: str + The character representation for the decimal separator for floats and complex. +""" + +styler_thousands = """ +: str, optional + The character representation for thousands separator for floats, int and complex. +""" + +styler_na_rep = """ +: str, optional + The string representation for values identified as missing. +""" + +styler_escape = """ +: str, optional + Whether to escape certain characters according to the given context; html or latex. +""" + +styler_formatter = """ +: str, callable, dict, optional + A formatter object to be used as default within ``Styler.format``. +""" + +styler_multirow_align = """ +: {"c", "t", "b"} + The specifier for vertical alignment of sparsified LaTeX multirows. +""" + +styler_multicol_align = r""" +: {"r", "c", "l", "naive-l", "naive-r"} + The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe + decorators can also be added to non-naive values to draw vertical + rules, e.g. "\|r" will draw a rule on the left side of right aligned merged cells. +""" + +styler_hrules = """ +: bool + Whether to add horizontal rules on top and bottom and below the headers. +""" + +styler_environment = """ +: str + The environment to replace ``\\begin{table}``. If "longtable" is used results + in a specific longtable environment format. +""" + +styler_encoding = """ +: str + The encoding used for output HTML and LaTeX files. +""" + +styler_mathjax = """ +: bool + If False will render special CSS classes to table attributes that indicate Mathjax + will not be used in Jupyter Notebook. +""" + +with cf.config_prefix("styler"): + cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) + + cf.register_option( + "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool + ) + + cf.register_option( + "render.repr", + "html", + styler_render_repr, + validator=is_one_of_factory(["html", "latex"]), + ) + + cf.register_option( + "render.max_elements", + 2**18, + styler_max_elements, + validator=is_nonnegative_int, + ) + + cf.register_option( + "render.max_rows", + None, + styler_max_rows, + validator=is_nonnegative_int, + ) + + cf.register_option( + "render.max_columns", + None, + styler_max_columns, + validator=is_nonnegative_int, + ) + + cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) + + cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) + + cf.register_option( + "format.precision", 6, styler_precision, validator=is_nonnegative_int + ) + + cf.register_option( + "format.thousands", + None, + styler_thousands, + validator=is_instance_factory([type(None), str]), + ) + + cf.register_option( + "format.na_rep", + None, + styler_na_rep, + validator=is_instance_factory([type(None), str]), + ) + + cf.register_option( + "format.escape", + None, + styler_escape, + validator=is_one_of_factory([None, "html", "latex", "latex-math"]), + ) + + cf.register_option( + "format.formatter", + None, + styler_formatter, + validator=is_instance_factory([type(None), dict, Callable, str]), + ) + + cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) + + cf.register_option( + "latex.multirow_align", + "c", + styler_multirow_align, + validator=is_one_of_factory(["c", "t", "b", "naive"]), + ) + + val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] + val_mca += ["naive-l", "naive-r"] + cf.register_option( + "latex.multicol_align", + "r", + styler_multicol_align, + validator=is_one_of_factory(val_mca), + ) + + cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) + + cf.register_option( + "latex.environment", + None, + styler_environment, + validator=is_instance_factory([type(None), str]), + ) + + +with cf.config_prefix("future"): + cf.register_option( + "infer_string", + False, + "Whether to infer sequence of str objects as pyarrow string " + "dtype, which will be the default in pandas 3.0 " + "(at which point this option will be deprecated).", + validator=is_one_of_factory([True, False]), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/construction.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/construction.py new file mode 100644 index 00000000..39f0ddf1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/construction.py @@ -0,0 +1,818 @@ +""" +Constructor functions intended to be shared by pd.array, Series.__init__, +and Index.__new__. + +These should not depend on core.internals. +""" +from __future__ import annotations + +from collections.abc import Sequence +from typing import ( + TYPE_CHECKING, + Optional, + Union, + cast, + overload, +) +import warnings + +import numpy as np +from numpy import ma + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas._libs.tslibs import ( + Period, + get_unit_from_dtype, + is_supported_unit, +) +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + Dtype, + DtypeObj, + T, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + construct_1d_object_array_from_listlike, + maybe_cast_to_datetime, + maybe_cast_to_integer_array, + maybe_convert_platform, + maybe_infer_to_datetimelike, + maybe_promote, +) +from pandas.core.dtypes.common import ( + is_list_like, + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import NumpyEADtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +import pandas.core.common as com + +if TYPE_CHECKING: + from pandas import ( + Index, + Series, + ) + from pandas.core.arrays.base import ExtensionArray + + +def array( + data: Sequence[object] | AnyArrayLike, + dtype: Dtype | None = None, + copy: bool = True, +) -> ExtensionArray: + """ + Create an array. + + Parameters + ---------- + data : Sequence of objects + The scalars inside `data` should be instances of the + scalar type for `dtype`. It's expected that `data` + represents a 1-dimensional array of data. + + When `data` is an Index or Series, the underlying array + will be extracted from `data`. + + dtype : str, np.dtype, or ExtensionDtype, optional + The dtype to use for the array. This may be a NumPy + dtype or an extension type registered with pandas using + :meth:`pandas.api.extensions.register_extension_dtype`. + + If not specified, there are two possibilities: + + 1. When `data` is a :class:`Series`, :class:`Index`, or + :class:`ExtensionArray`, the `dtype` will be taken + from the data. + 2. Otherwise, pandas will attempt to infer the `dtype` + from the data. + + Note that when `data` is a NumPy array, ``data.dtype`` is + *not* used for inferring the array type. This is because + NumPy cannot represent all the types of data that can be + held in extension arrays. + + Currently, pandas will infer an extension dtype for sequences of + + ============================== ======================================= + Scalar Type Array Type + ============================== ======================================= + :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` + :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` + :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` + :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` + :class:`int` :class:`pandas.arrays.IntegerArray` + :class:`float` :class:`pandas.arrays.FloatingArray` + :class:`str` :class:`pandas.arrays.StringArray` or + :class:`pandas.arrays.ArrowStringArray` + :class:`bool` :class:`pandas.arrays.BooleanArray` + ============================== ======================================= + + The ExtensionArray created when the scalar type is :class:`str` is determined by + ``pd.options.mode.string_storage`` if the dtype is not explicitly given. + + For all other cases, NumPy's usual inference rules will be used. + + .. versionchanged:: 1.2.0 + + Pandas now also infers nullable-floating dtype for float-like + input data + + copy : bool, default True + Whether to copy the data, even if not necessary. Depending + on the type of `data`, creating the new array may require + copying data, even if ``copy=False``. + + Returns + ------- + ExtensionArray + The newly created array. + + Raises + ------ + ValueError + When `data` is not 1-dimensional. + + See Also + -------- + numpy.array : Construct a NumPy array. + Series : Construct a pandas Series. + Index : Construct a pandas Index. + arrays.NumpyExtensionArray : ExtensionArray wrapping a NumPy array. + Series.array : Extract the array stored within a Series. + + Notes + ----- + Omitting the `dtype` argument means pandas will attempt to infer the + best array type from the values in the data. As new array types are + added by pandas and 3rd party libraries, the "best" array type may + change. We recommend specifying `dtype` to ensure that + + 1. the correct array type for the data is returned + 2. the returned array type doesn't change as new extension types + are added by pandas and third-party libraries + + Additionally, if the underlying memory representation of the returned + array matters, we recommend specifying the `dtype` as a concrete object + rather than a string alias or allowing it to be inferred. For example, + a future version of pandas or a 3rd-party library may include a + dedicated ExtensionArray for string data. In this event, the following + would no longer return a :class:`arrays.NumpyExtensionArray` backed by a + NumPy array. + + >>> pd.array(['a', 'b'], dtype=str) + + ['a', 'b'] + Length: 2, dtype: str32 + + This would instead return the new ExtensionArray dedicated for string + data. If you really need the new array to be backed by a NumPy array, + specify that in the dtype. + + >>> pd.array(['a', 'b'], dtype=np.dtype(" + ['a', 'b'] + Length: 2, dtype: str32 + + Finally, Pandas has arrays that mostly overlap with NumPy + + * :class:`arrays.DatetimeArray` + * :class:`arrays.TimedeltaArray` + + When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is + passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` + rather than a ``NumpyExtensionArray``. This is for symmetry with the case of + timezone-aware data, which NumPy does not natively support. + + >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') + + ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] + Length: 2, dtype: datetime64[ns] + + >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') + + ['0 days 01:00:00', '0 days 02:00:00'] + Length: 2, dtype: timedelta64[ns] + + Examples + -------- + If a dtype is not specified, pandas will infer the best dtype from the values. + See the description of `dtype` for the types pandas infers for. + + >>> pd.array([1, 2]) + + [1, 2] + Length: 2, dtype: Int64 + + >>> pd.array([1, 2, np.nan]) + + [1, 2, ] + Length: 3, dtype: Int64 + + >>> pd.array([1.1, 2.2]) + + [1.1, 2.2] + Length: 2, dtype: Float64 + + >>> pd.array(["a", None, "c"]) + + ['a', , 'c'] + Length: 3, dtype: string + + >>> with pd.option_context("string_storage", "pyarrow"): + ... arr = pd.array(["a", None, "c"]) + ... + >>> arr + + ['a', , 'c'] + Length: 3, dtype: string + + >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) + + ['2000-01-01', '2000-01-01'] + Length: 2, dtype: period[D] + + You can use the string alias for `dtype` + + >>> pd.array(['a', 'b', 'a'], dtype='category') + ['a', 'b', 'a'] + Categories (2, object): ['a', 'b'] + + Or specify the actual dtype + + >>> pd.array(['a', 'b', 'a'], + ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) + ['a', 'b', 'a'] + Categories (3, object): ['a' < 'b' < 'c'] + + If pandas does not infer a dedicated extension type a + :class:`arrays.NumpyExtensionArray` is returned. + + >>> pd.array([1 + 1j, 3 + 2j]) + + [(1+1j), (3+2j)] + Length: 2, dtype: complex128 + + As mentioned in the "Notes" section, new extension types may be added + in the future (by pandas or 3rd party libraries), causing the return + value to no longer be a :class:`arrays.NumpyExtensionArray`. Specify the + `dtype` as a NumPy dtype if you need to ensure there's no future change in + behavior. + + >>> pd.array([1, 2], dtype=np.dtype("int32")) + + [1, 2] + Length: 2, dtype: int32 + + `data` must be 1-dimensional. A ValueError is raised when the input + has the wrong dimensionality. + + >>> pd.array(1) + Traceback (most recent call last): + ... + ValueError: Cannot pass scalar '1' to 'pandas.array'. + """ + from pandas.core.arrays import ( + BooleanArray, + DatetimeArray, + ExtensionArray, + FloatingArray, + IntegerArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, + ) + from pandas.core.arrays.string_ import StringDtype + + if lib.is_scalar(data): + msg = f"Cannot pass scalar '{data}' to 'pandas.array'." + raise ValueError(msg) + elif isinstance(data, ABCDataFrame): + raise TypeError("Cannot pass DataFrame to 'pandas.array'") + + if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)): + # Note: we exclude np.ndarray here, will do type inference on it + dtype = data.dtype + + data = extract_array(data, extract_numpy=True) + + # this returns None for not-found dtypes. + if dtype is not None: + dtype = pandas_dtype(dtype) + + if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype): + # e.g. TimedeltaArray[s], avoid casting to NumpyExtensionArray + if copy: + return data.copy() + return data + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + return cls._from_sequence(data, dtype=dtype, copy=copy) + + if dtype is None: + inferred_dtype = lib.infer_dtype(data, skipna=True) + if inferred_dtype == "period": + period_data = cast(Union[Sequence[Optional[Period]], AnyArrayLike], data) + return PeriodArray._from_sequence(period_data, copy=copy) + + elif inferred_dtype == "interval": + return IntervalArray(data, copy=copy) + + elif inferred_dtype.startswith("datetime"): + # datetime, datetime64 + try: + return DatetimeArray._from_sequence(data, copy=copy) + except ValueError: + # Mixture of timezones, fall back to NumpyExtensionArray + pass + + elif inferred_dtype.startswith("timedelta"): + # timedelta, timedelta64 + return TimedeltaArray._from_sequence(data, copy=copy) + + elif inferred_dtype == "string": + # StringArray/ArrowStringArray depending on pd.options.mode.string_storage + return StringDtype().construct_array_type()._from_sequence(data, copy=copy) + + elif inferred_dtype == "integer": + return IntegerArray._from_sequence(data, copy=copy) + elif inferred_dtype == "empty" and not hasattr(data, "dtype") and not len(data): + return FloatingArray._from_sequence(data, copy=copy) + elif ( + inferred_dtype in ("floating", "mixed-integer-float") + and getattr(data, "dtype", None) != np.float16 + ): + # GH#44715 Exclude np.float16 bc FloatingArray does not support it; + # we will fall back to NumpyExtensionArray. + return FloatingArray._from_sequence(data, copy=copy) + + elif inferred_dtype == "boolean": + return BooleanArray._from_sequence(data, copy=copy) + + # Pandas overrides NumPy for + # 1. datetime64[ns,us,ms,s] + # 2. timedelta64[ns,us,ms,s] + # so that a DatetimeArray is returned. + if lib.is_np_dtype(dtype, "M") and is_supported_unit(get_unit_from_dtype(dtype)): + return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) + if lib.is_np_dtype(dtype, "m") and is_supported_unit(get_unit_from_dtype(dtype)): + return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) + + elif lib.is_np_dtype(dtype, "mM"): + warnings.warn( + r"datetime64 and timedelta64 dtype resolutions other than " + r"'s', 'ms', 'us', and 'ns' are deprecated. " + r"In future releases passing unsupported resolutions will " + r"raise an exception.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy) + + +_typs = frozenset( + { + "index", + "rangeindex", + "multiindex", + "datetimeindex", + "timedeltaindex", + "periodindex", + "categoricalindex", + "intervalindex", + "series", + } +) + + +@overload +def extract_array( + obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... +) -> ArrayLike: + ... + + +@overload +def extract_array( + obj: T, extract_numpy: bool = ..., extract_range: bool = ... +) -> T | ArrayLike: + ... + + +def extract_array( + obj: T, extract_numpy: bool = False, extract_range: bool = False +) -> T | ArrayLike: + """ + Extract the ndarray or ExtensionArray from a Series or Index. + + For all other types, `obj` is just returned as is. + + Parameters + ---------- + obj : object + For Series / Index, the underlying ExtensionArray is unboxed. + + extract_numpy : bool, default False + Whether to extract the ndarray from a NumpyExtensionArray. + + extract_range : bool, default False + If we have a RangeIndex, return range._values if True + (which is a materialized integer ndarray), otherwise return unchanged. + + Returns + ------- + arr : object + + Examples + -------- + >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) + ['a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + Other objects like lists, arrays, and DataFrames are just passed through. + + >>> extract_array([1, 2, 3]) + [1, 2, 3] + + For an ndarray-backed Series / Index the ndarray is returned. + + >>> extract_array(pd.Series([1, 2, 3])) + array([1, 2, 3]) + + To extract all the way down to the ndarray, pass ``extract_numpy=True``. + + >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) + array([1, 2, 3]) + """ + typ = getattr(obj, "_typ", None) + if typ in _typs: + # i.e. isinstance(obj, (ABCIndex, ABCSeries)) + if typ == "rangeindex": + if extract_range: + # error: "T" has no attribute "_values" + return obj._values # type: ignore[attr-defined] + return obj + + # error: "T" has no attribute "_values" + return obj._values # type: ignore[attr-defined] + + elif extract_numpy and typ == "npy_extension": + # i.e. isinstance(obj, ABCNumpyExtensionArray) + # error: "T" has no attribute "to_numpy" + return obj.to_numpy() # type: ignore[attr-defined] + + return obj + + +def ensure_wrapped_if_datetimelike(arr): + """ + Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray. + """ + if isinstance(arr, np.ndarray): + if arr.dtype.kind == "M": + from pandas.core.arrays import DatetimeArray + + return DatetimeArray._from_sequence(arr) + + elif arr.dtype.kind == "m": + from pandas.core.arrays import TimedeltaArray + + return TimedeltaArray._from_sequence(arr) + + return arr + + +def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray: + """ + Convert numpy MaskedArray to ensure mask is softened. + """ + mask = ma.getmaskarray(data) + if mask.any(): + dtype, fill_value = maybe_promote(data.dtype, np.nan) + dtype = cast(np.dtype, dtype) + data = ma.asarray(data.astype(dtype, copy=True)) + data.soften_mask() # set hardmask False if it was True + data[mask] = fill_value + else: + data = data.copy() + return data + + +def sanitize_array( + data, + index: Index | None, + dtype: DtypeObj | None = None, + copy: bool = False, + *, + allow_2d: bool = False, +) -> ArrayLike: + """ + Sanitize input data to an ndarray or ExtensionArray, copy if specified, + coerce to the dtype if specified. + + Parameters + ---------- + data : Any + index : Index or None, default None + dtype : np.dtype, ExtensionDtype, or None, default None + copy : bool, default False + allow_2d : bool, default False + If False, raise if we have a 2D Arraylike. + + Returns + ------- + np.ndarray or ExtensionArray + """ + original_dtype = dtype + if isinstance(data, ma.MaskedArray): + data = sanitize_masked_array(data) + + if isinstance(dtype, NumpyEADtype): + # Avoid ending up with a NumpyExtensionArray + dtype = dtype.numpy_dtype + + object_index = False + if isinstance(data, ABCIndex) and data.dtype == object and dtype is None: + object_index = True + + # extract ndarray or ExtensionArray, ensure we have no NumpyExtensionArray + data = extract_array(data, extract_numpy=True, extract_range=True) + + if isinstance(data, np.ndarray) and data.ndim == 0: + if dtype is None: + dtype = data.dtype + data = lib.item_from_zerodim(data) + elif isinstance(data, range): + # GH#16804 + data = range_to_ndarray(data) + copy = False + + if not is_list_like(data): + if index is None: + raise ValueError("index must be specified when data is not list-like") + if ( + isinstance(data, str) + and using_pyarrow_string_dtype() + and original_dtype is None + ): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype("pyarrow_numpy") + data = construct_1d_arraylike_from_scalar(data, len(index), dtype) + + return data + + elif isinstance(data, ABCExtensionArray): + # it is already ensured above this is not a NumpyExtensionArray + # Until GH#49309 is fixed this check needs to come before the + # ExtensionDtype check + if dtype is not None: + subarr = data.astype(dtype, copy=copy) + elif copy: + subarr = data.copy() + else: + subarr = data + + elif isinstance(dtype, ExtensionDtype): + # create an extension array from its dtype + _sanitize_non_ordered(data) + cls = dtype.construct_array_type() + subarr = cls._from_sequence(data, dtype=dtype, copy=copy) + + # GH#846 + elif isinstance(data, np.ndarray): + if isinstance(data, np.matrix): + data = data.A + + if dtype is None: + subarr = data + if data.dtype == object: + subarr = maybe_infer_to_datetimelike(data) + if ( + object_index + and using_pyarrow_string_dtype() + and is_string_dtype(subarr) + ): + # Avoid inference when string option is set + subarr = data + elif data.dtype.kind == "U" and using_pyarrow_string_dtype(): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype(storage="pyarrow_numpy") + subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype) + + if subarr is data and copy: + subarr = subarr.copy() + + else: + # we will try to copy by-definition here + subarr = _try_cast(data, dtype, copy) + + elif hasattr(data, "__array__"): + # e.g. dask array GH#38645 + data = np.array(data, copy=copy) + return sanitize_array( + data, + index=index, + dtype=dtype, + copy=False, + allow_2d=allow_2d, + ) + + else: + _sanitize_non_ordered(data) + # materialize e.g. generators, convert e.g. tuples, abc.ValueView + data = list(data) + + if len(data) == 0 and dtype is None: + # We default to float64, matching numpy + subarr = np.array([], dtype=np.float64) + + elif dtype is not None: + subarr = _try_cast(data, dtype, copy) + + else: + subarr = maybe_convert_platform(data) + if subarr.dtype == object: + subarr = cast(np.ndarray, subarr) + subarr = maybe_infer_to_datetimelike(subarr) + + subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d) + + if isinstance(subarr, np.ndarray): + # at this point we should have dtype be None or subarr.dtype == dtype + dtype = cast(np.dtype, dtype) + subarr = _sanitize_str_dtypes(subarr, data, dtype, copy) + + return subarr + + +def range_to_ndarray(rng: range) -> np.ndarray: + """ + Cast a range object to ndarray. + """ + # GH#30171 perf avoid realizing range as a list in np.array + try: + arr = np.arange(rng.start, rng.stop, rng.step, dtype="int64") + except OverflowError: + # GH#30173 handling for ranges that overflow int64 + if (rng.start >= 0 and rng.step > 0) or (rng.step < 0 <= rng.stop): + try: + arr = np.arange(rng.start, rng.stop, rng.step, dtype="uint64") + except OverflowError: + arr = construct_1d_object_array_from_listlike(list(rng)) + else: + arr = construct_1d_object_array_from_listlike(list(rng)) + return arr + + +def _sanitize_non_ordered(data) -> None: + """ + Raise only for unordered sets, e.g., not for dict_keys + """ + if isinstance(data, (set, frozenset)): + raise TypeError(f"'{type(data).__name__}' type is unordered") + + +def _sanitize_ndim( + result: ArrayLike, + data, + dtype: DtypeObj | None, + index: Index | None, + *, + allow_2d: bool = False, +) -> ArrayLike: + """ + Ensure we have a 1-dimensional result array. + """ + if getattr(result, "ndim", 0) == 0: + raise ValueError("result should be arraylike with ndim > 0") + + if result.ndim == 1: + # the result that we want + result = _maybe_repeat(result, index) + + elif result.ndim > 1: + if isinstance(data, np.ndarray): + if allow_2d: + return result + raise ValueError( + f"Data must be 1-dimensional, got ndarray of shape {data.shape} instead" + ) + if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype): + # i.e. NumpyEADtype("O") + + result = com.asarray_tuplesafe(data, dtype=np.dtype("object")) + cls = dtype.construct_array_type() + result = cls._from_sequence(result, dtype=dtype) + else: + # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type + # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str, + # dtype[Any], None]" + result = com.asarray_tuplesafe(data, dtype=dtype) # type: ignore[arg-type] + return result + + +def _sanitize_str_dtypes( + result: np.ndarray, data, dtype: np.dtype | None, copy: bool +) -> np.ndarray: + """ + Ensure we have a dtype that is supported by pandas. + """ + + # This is to prevent mixed-type Series getting all casted to + # NumPy string type, e.g. NaN --> '-1#IND'. + if issubclass(result.dtype.type, str): + # GH#16605 + # If not empty convert the data to dtype + # GH#19853: If data is a scalar, result has already the result + if not lib.is_scalar(data): + if not np.all(isna(data)): + data = np.array(data, dtype=dtype, copy=False) + result = np.array(data, dtype=object, copy=copy) + return result + + +def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike: + """ + If we have a length-1 array and an index describing how long we expect + the result to be, repeat the array. + """ + if index is not None: + if 1 == len(arr) != len(index): + arr = arr.repeat(len(index)) + return arr + + +def _try_cast( + arr: list | np.ndarray, + dtype: np.dtype, + copy: bool, +) -> ArrayLike: + """ + Convert input to numpy ndarray and optionally cast to a given dtype. + + Parameters + ---------- + arr : ndarray or list + Excludes: ExtensionArray, Series, Index. + dtype : np.dtype + copy : bool + If False, don't copy the data if not needed. + + Returns + ------- + np.ndarray or ExtensionArray + """ + is_ndarray = isinstance(arr, np.ndarray) + + if dtype == object: + if not is_ndarray: + subarr = construct_1d_object_array_from_listlike(arr) + return subarr + return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy) + + elif dtype.kind == "U": + # TODO: test cases with arr.dtype.kind in "mM" + if is_ndarray: + arr = cast(np.ndarray, arr) + shape = arr.shape + if arr.ndim > 1: + arr = arr.ravel() + else: + shape = (len(arr),) + return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape( + shape + ) + + elif dtype.kind in "mM": + return maybe_cast_to_datetime(arr, dtype) + + # GH#15832: Check if we are requesting a numeric dtype and + # that we can convert the data to the requested dtype. + elif dtype.kind in "iu": + # this will raise if we have e.g. floats + + subarr = maybe_cast_to_integer_array(arr, dtype) + else: + subarr = np.array(arr, dtype=dtype, copy=copy) + + return subarr diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/api.py new file mode 100644 index 00000000..254abe33 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/api.py @@ -0,0 +1,85 @@ +from pandas.core.dtypes.common import ( + is_any_real_numeric_dtype, + is_array_like, + is_bool, + is_bool_dtype, + is_categorical_dtype, + is_complex, + is_complex_dtype, + is_datetime64_any_dtype, + is_datetime64_dtype, + is_datetime64_ns_dtype, + is_datetime64tz_dtype, + is_dict_like, + is_dtype_equal, + is_extension_array_dtype, + is_file_like, + is_float, + is_float_dtype, + is_hashable, + is_int64_dtype, + is_integer, + is_integer_dtype, + is_interval, + is_interval_dtype, + is_iterator, + is_list_like, + is_named_tuple, + is_number, + is_numeric_dtype, + is_object_dtype, + is_period_dtype, + is_re, + is_re_compilable, + is_scalar, + is_signed_integer_dtype, + is_sparse, + is_string_dtype, + is_timedelta64_dtype, + is_timedelta64_ns_dtype, + is_unsigned_integer_dtype, + pandas_dtype, +) + +__all__ = [ + "is_any_real_numeric_dtype", + "is_array_like", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dict_like", + "is_dtype_equal", + "is_extension_array_dtype", + "is_file_like", + "is_float", + "is_float_dtype", + "is_hashable", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_interval", + "is_interval_dtype", + "is_iterator", + "is_list_like", + "is_named_tuple", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_period_dtype", + "is_re", + "is_re_compilable", + "is_scalar", + "is_signed_integer_dtype", + "is_sparse", + "is_string_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "pandas_dtype", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/astype.py new file mode 100644 index 00000000..ac3a4427 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/astype.py @@ -0,0 +1,302 @@ +""" +Functions for implementing 'astype' methods according to pandas conventions, +particularly ones that differ from numpy. +""" +from __future__ import annotations + +import inspect +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.errors import IntCastingNaNError + +from pandas.core.dtypes.common import ( + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + NumpyEADtype, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + DtypeObj, + IgnoreRaise, + ) + + from pandas.core.arrays import ExtensionArray + +_dtype_obj = np.dtype(object) + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ... +) -> np.ndarray: + ... + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ... +) -> ExtensionArray: + ... + + +def _astype_nansafe( + arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False +) -> ArrayLike: + """ + Cast the elements of an array to a given dtype a nan-safe manner. + + Parameters + ---------- + arr : ndarray + dtype : np.dtype or ExtensionDtype + copy : bool, default True + If False, a view will be attempted but may fail, if + e.g. the item sizes don't align. + skipna: bool, default False + Whether or not we should skip NaN when casting as a string-type. + + Raises + ------ + ValueError + The dtype was a datetime64/timedelta64 dtype, but it had no unit. + """ + + # dispatch on extension dtype if needed + if isinstance(dtype, ExtensionDtype): + return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy) + + elif not isinstance(dtype, np.dtype): # pragma: no cover + raise ValueError("dtype must be np.dtype or ExtensionDtype") + + if arr.dtype.kind in "mM": + from pandas.core.construction import ensure_wrapped_if_datetimelike + + arr = ensure_wrapped_if_datetimelike(arr) + res = arr.astype(dtype, copy=copy) + return np.asarray(res) + + if issubclass(dtype.type, str): + shape = arr.shape + if arr.ndim > 1: + arr = arr.ravel() + return lib.ensure_string_array( + arr, skipna=skipna, convert_na_value=False + ).reshape(shape) + + elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu": + return _astype_float_to_int_nansafe(arr, dtype, copy) + + elif arr.dtype == object: + # if we have a datetime/timedelta array of objects + # then coerce to datetime64[ns] and use DatetimeArray.astype + + if lib.is_np_dtype(dtype, "M"): + from pandas import to_datetime + + dti = to_datetime(arr.ravel()) + dta = dti._data.reshape(arr.shape) + return dta.astype(dtype, copy=False)._ndarray + + elif lib.is_np_dtype(dtype, "m"): + from pandas.core.construction import ensure_wrapped_if_datetimelike + + # bc we know arr.dtype == object, this is equivalent to + # `np.asarray(to_timedelta(arr))`, but using a lower-level API that + # does not require a circular import. + tdvals = array_to_timedelta64(arr).view("m8[ns]") + + tda = ensure_wrapped_if_datetimelike(tdvals) + return tda.astype(dtype, copy=False)._ndarray + + if dtype.name in ("datetime64", "timedelta64"): + msg = ( + f"The '{dtype.name}' dtype has no unit. Please pass in " + f"'{dtype.name}[ns]' instead." + ) + raise ValueError(msg) + + if copy or arr.dtype == object or dtype == object: + # Explicit copy, or required since NumPy can't view from / to object. + return arr.astype(dtype, copy=True) + + return arr.astype(dtype, copy=copy) + + +def _astype_float_to_int_nansafe( + values: np.ndarray, dtype: np.dtype, copy: bool +) -> np.ndarray: + """ + astype with a check preventing converting NaN to an meaningless integer value. + """ + if not np.isfinite(values).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + if dtype.kind == "u": + # GH#45151 + if not (values >= 0).all(): + raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + return values.astype(dtype, copy=copy) + + +def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : dtype object + copy : bool, default False + copy if indicated + + Returns + ------- + ndarray or ExtensionArray + """ + if values.dtype == dtype: + if copy: + return values.copy() + return values + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + values = values.astype(dtype, copy=copy) + + else: + values = _astype_nansafe(values, dtype, copy=copy) + + # in pandas we don't store numpy str dtypes, so convert to object + if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + return values + + +def astype_array_safe( + values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise" +) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + This basically is the implementation for DataFrame/Series.astype and + includes all custom logic for pandas (NaN-safety, converting str to object, + not allowing ) + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : str, dtype convertible + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + ndarray or ExtensionArray + """ + errors_legal_values = ("raise", "ignore") + + if errors not in errors_legal_values: + invalid_arg = ( + "Expected value of kwarg 'errors' to be one of " + f"{list(errors_legal_values)}. Supplied value is '{errors}'" + ) + raise ValueError(invalid_arg) + + if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): + msg = ( + f"Expected an instance of {dtype.__name__}, " + "but got the class instead. Try instantiating 'dtype'." + ) + raise TypeError(msg) + + dtype = pandas_dtype(dtype) + if isinstance(dtype, NumpyEADtype): + # Ensure we don't end up with a NumpyExtensionArray + dtype = dtype.numpy_dtype + + try: + new_values = astype_array(values, dtype, copy=copy) + except (ValueError, TypeError): + # e.g. _astype_nansafe can fail on object-dtype of strings + # trying to convert to float + if errors == "ignore": + new_values = values + else: + raise + + return new_values + + +def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool: + """Checks if astype avoided copying the data. + + Parameters + ---------- + dtype : Original dtype + new_dtype : target dtype + + Returns + ------- + True if new data is a view or not guaranteed to be a copy, False otherwise + """ + if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype): + new_dtype, dtype = dtype, new_dtype + + if dtype == new_dtype: + return True + + elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype): + # Only equal numpy dtypes avoid a copy + return False + + elif is_string_dtype(dtype) and is_string_dtype(new_dtype): + # Potentially! a view when converting from object to string + return True + + elif is_object_dtype(dtype) and new_dtype.kind == "O": + # When the underlying array has dtype object, we don't have to make a copy + return True + + elif dtype.kind in "mM" and new_dtype.kind in "mM": + dtype = getattr(dtype, "numpy_dtype", dtype) + new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype) + return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None) + + numpy_dtype = getattr(dtype, "numpy_dtype", None) + new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None) + + if numpy_dtype is None and isinstance(dtype, np.dtype): + numpy_dtype = dtype + + if new_numpy_dtype is None and isinstance(new_dtype, np.dtype): + new_numpy_dtype = new_dtype + + if numpy_dtype is not None and new_numpy_dtype is not None: + # if both have NumPy dtype or one of them is a numpy dtype + # they are only a view when the numpy dtypes are equal, e.g. + # int64 -> Int64 or int64[pyarrow] + # int64 -> Int32 copies + return numpy_dtype == new_numpy_dtype + + # Assume this is a view since we don't know for sure if a copy was made + return True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/base.py new file mode 100644 index 00000000..bc776434 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/base.py @@ -0,0 +1,544 @@ +""" +Extend pandas with custom array types. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + TypeVar, + cast, + overload, +) + +import numpy as np + +from pandas._libs import missing as libmissing +from pandas._libs.hashtable import object_hash +from pandas.errors import AbstractMethodError + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + Self, + Shape, + npt, + type_t, + ) + + from pandas.core.arrays import ExtensionArray + + # To parameterize on same ExtensionDtype + ExtensionDtypeT = TypeVar("ExtensionDtypeT", bound="ExtensionDtype") + + +class ExtensionDtype: + """ + A custom data type, to be paired with an ExtensionArray. + + See Also + -------- + extensions.register_extension_dtype: Register an ExtensionType + with pandas as class decorator. + extensions.ExtensionArray: Abstract base class for custom 1-D array types. + + Notes + ----- + The interface includes the following abstract methods that must + be implemented by subclasses: + + * type + * name + * construct_array_type + + The following attributes and methods influence the behavior of the dtype in + pandas operations + + * _is_numeric + * _is_boolean + * _get_common_dtype + + The `na_value` class attribute can be used to set the default NA value + for this type. :attr:`numpy.nan` is used by default. + + ExtensionDtypes are required to be hashable. The base class provides + a default implementation, which relies on the ``_metadata`` class + attribute. ``_metadata`` should be a tuple containing the strings + that define your data type. For example, with ``PeriodDtype`` that's + the ``freq`` attribute. + + **If you have a parametrized dtype you should set the ``_metadata`` + class property**. + + Ideally, the attributes in ``_metadata`` will match the + parameters to your ``ExtensionDtype.__init__`` (if any). If any of + the attributes in ``_metadata`` don't implement the standard + ``__eq__`` or ``__hash__``, the default implementations here will not + work. + + Examples + -------- + + For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method + can be implemented: this method receives a pyarrow Array or ChunkedArray + as only argument and is expected to return the appropriate pandas + ExtensionArray for this dtype and the passed values: + + >>> import pyarrow + >>> from pandas.api.extensions import ExtensionArray + >>> class ExtensionDtype: + ... def __from_arrow__( + ... self, + ... array: pyarrow.Array | pyarrow.ChunkedArray + ... ) -> ExtensionArray: + ... ... + + This class does not inherit from 'abc.ABCMeta' for performance reasons. + Methods and properties required by the interface raise + ``pandas.errors.AbstractMethodError`` and no ``register`` method is + provided for registering virtual subclasses. + """ + + _metadata: tuple[str, ...] = () + + def __str__(self) -> str: + return self.name + + def __eq__(self, other: Any) -> bool: + """ + Check whether 'other' is equal to self. + + By default, 'other' is considered equal if either + + * it's a string matching 'self.name'. + * it's an instance of this type and all of the attributes + in ``self._metadata`` are equal between `self` and `other`. + + Parameters + ---------- + other : Any + + Returns + ------- + bool + """ + if isinstance(other, str): + try: + other = self.construct_from_string(other) + except TypeError: + return False + if isinstance(other, type(self)): + return all( + getattr(self, attr) == getattr(other, attr) for attr in self._metadata + ) + return False + + def __hash__(self) -> int: + # for python>=3.10, different nan objects have different hashes + # we need to avoid that and thus use hash function with old behavior + return object_hash(tuple(getattr(self, attr) for attr in self._metadata)) + + def __ne__(self, other: Any) -> bool: + return not self.__eq__(other) + + @property + def na_value(self) -> object: + """ + Default NA value to use for this type. + + This is used in e.g. ExtensionArray.take. This should be the + user-facing "boxed" version of the NA value, not the physical NA value + for storage. e.g. for JSONArray, this is an empty dictionary. + """ + return np.nan + + @property + def type(self) -> type_t[Any]: + """ + The scalar type for the array, e.g. ``int`` + + It's expected ``ExtensionArray[item]`` returns an instance + of ``ExtensionDtype.type`` for scalar ``item``, assuming + that value is valid (not NA). NA values do not need to be + instances of `type`. + """ + raise AbstractMethodError(self) + + @property + def kind(self) -> str: + """ + A character code (one of 'biufcmMOSUV'), default 'O' + + This should match the NumPy dtype used when the array is + converted to an ndarray, which is probably 'O' for object if + the extension type cannot be represented as a built-in NumPy + type. + + See Also + -------- + numpy.dtype.kind + """ + return "O" + + @property + def name(self) -> str: + """ + A string identifying the data type. + + Will be used for display in, e.g. ``Series.dtype`` + """ + raise AbstractMethodError(self) + + @property + def names(self) -> list[str] | None: + """ + Ordered list of field names, or None if there are no fields. + + This is for compatibility with NumPy arrays, and may be removed in the + future. + """ + return None + + @classmethod + def construct_array_type(cls) -> type_t[ExtensionArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + raise AbstractMethodError(cls) + + def empty(self, shape: Shape) -> ExtensionArray: + """ + Construct an ExtensionArray of this dtype with the given shape. + + Analogous to numpy.empty. + + Parameters + ---------- + shape : int or tuple[int] + + Returns + ------- + ExtensionArray + """ + cls = self.construct_array_type() + return cls._empty(shape, dtype=self) + + @classmethod + def construct_from_string(cls, string: str) -> Self: + r""" + Construct this type from a string. + + This is useful mainly for data types that accept parameters. + For example, a period dtype accepts a frequency parameter that + can be set as ``period[H]`` (where H means hourly frequency). + + By default, in the abstract class, just the name of the type is + expected. But subclasses can overwrite this method to accept + parameters. + + Parameters + ---------- + string : str + The name of the type, for example ``category``. + + Returns + ------- + ExtensionDtype + Instance of the dtype. + + Raises + ------ + TypeError + If a class cannot be constructed from this 'string'. + + Examples + -------- + For extension dtypes with arguments the following may be an + adequate implementation. + + >>> import re + >>> @classmethod + ... def construct_from_string(cls, string): + ... pattern = re.compile(r"^my_type\[(?P.+)\]$") + ... match = pattern.match(string) + ... if match: + ... return cls(**match.groupdict()) + ... else: + ... raise TypeError( + ... f"Cannot construct a '{cls.__name__}' from '{string}'" + ... ) + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + # error: Non-overlapping equality check (left operand type: "str", right + # operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap] + assert isinstance(cls.name, str), (cls, type(cls.name)) + if string != cls.name: + raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + return cls() + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + """ + Check if we match 'dtype'. + + Parameters + ---------- + dtype : object + The object to check. + + Returns + ------- + bool + + Notes + ----- + The default implementation is True if + + 1. ``cls.construct_from_string(dtype)`` is an instance + of ``cls``. + 2. ``dtype`` is an object and is an instance of ``cls`` + 3. ``dtype`` has a ``dtype`` attribute, and any of the above + conditions is true for ``dtype.dtype``. + """ + dtype = getattr(dtype, "dtype", dtype) + + if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)): + # https://github.com/pandas-dev/pandas/issues/22960 + # avoid passing data to `construct_from_string`. This could + # cause a FutureWarning from numpy about failing elementwise + # comparison from, e.g., comparing DataFrame == 'category'. + return False + elif dtype is None: + return False + elif isinstance(dtype, cls): + return True + if isinstance(dtype, str): + try: + return cls.construct_from_string(dtype) is not None + except TypeError: + return False + return False + + @property + def _is_numeric(self) -> bool: + """ + Whether columns with this dtype should be considered numeric. + + By default ExtensionDtypes are assumed to be non-numeric. + They'll be excluded from operations that exclude non-numeric + columns, like (groupby) reductions, plotting, etc. + """ + return False + + @property + def _is_boolean(self) -> bool: + """ + Whether this dtype should be considered boolean. + + By default, ExtensionDtypes are assumed to be non-numeric. + Setting this to True will affect the behavior of several places, + e.g. + + * is_bool + * boolean indexing + + Returns + ------- + bool + """ + return False + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + """ + Return the common dtype, if one exists. + + Used in `find_common_type` implementation. This is for example used + to determine the resulting dtype in a concat operation. + + If no common dtype exists, return None (which gives the other dtypes + the chance to determine a common dtype). If all dtypes in the list + return None, then the common dtype will be "object" dtype (this means + it is never needed to return "object" dtype from this method itself). + + Parameters + ---------- + dtypes : list of dtypes + The dtypes for which to determine a common dtype. This is a list + of np.dtype or ExtensionDtype instances. + + Returns + ------- + Common dtype (np.dtype or ExtensionDtype) or None + """ + if len(set(dtypes)) == 1: + # only itself + return self + else: + return None + + @property + def _can_hold_na(self) -> bool: + """ + Can arrays of this dtype hold NA values? + """ + return True + + @property + def _is_immutable(self) -> bool: + """ + Can arrays with this dtype be modified with __setitem__? If not, return + True. + + Immutable arrays are expected to raise TypeError on __setitem__ calls. + """ + return False + + +class StorageExtensionDtype(ExtensionDtype): + """ExtensionDtype that may be backed by more than one implementation.""" + + name: str + _metadata = ("storage",) + + def __init__(self, storage: str | None = None) -> None: + self.storage = storage + + def __repr__(self) -> str: + return f"{self.name}[{self.storage}]" + + def __str__(self) -> str: + return self.name + + def __eq__(self, other: Any) -> bool: + if isinstance(other, str) and other == self.name: + return True + return super().__eq__(other) + + def __hash__(self) -> int: + # custom __eq__ so have to override __hash__ + return super().__hash__() + + @property + def na_value(self) -> libmissing.NAType: + return libmissing.NA + + +def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: + """ + Register an ExtensionType with pandas as class decorator. + + This enables operations like ``.astype(name)`` for the name + of the ExtensionDtype. + + Returns + ------- + callable + A class decorator. + + Examples + -------- + >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype + >>> @register_extension_dtype + ... class MyExtensionDtype(ExtensionDtype): + ... name = "myextension" + """ + _registry.register(cls) + return cls + + +class Registry: + """ + Registry for dtype inference. + + The registry allows one to map a string repr of a extension + dtype to an extension dtype. The string alias can be used in several + places, including + + * Series and Index constructors + * :meth:`pandas.array` + * :meth:`pandas.Series.astype` + + Multiple extension types can be registered. + These are tried in order. + """ + + def __init__(self) -> None: + self.dtypes: list[type_t[ExtensionDtype]] = [] + + def register(self, dtype: type_t[ExtensionDtype]) -> None: + """ + Parameters + ---------- + dtype : ExtensionDtype class + """ + if not issubclass(dtype, ExtensionDtype): + raise ValueError("can only register pandas extension dtypes") + + self.dtypes.append(dtype) + + @overload + def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: + ... + + @overload + def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT: + ... + + @overload + def find(self, dtype: str) -> ExtensionDtype | None: + ... + + @overload + def find( + self, dtype: npt.DTypeLike + ) -> type_t[ExtensionDtype] | ExtensionDtype | None: + ... + + def find( + self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike + ) -> type_t[ExtensionDtype] | ExtensionDtype | None: + """ + Parameters + ---------- + dtype : ExtensionDtype class or instance or str or numpy dtype or python type + + Returns + ------- + return the first matching dtype, otherwise return None + """ + if not isinstance(dtype, str): + dtype_type: type_t + if not isinstance(dtype, type): + dtype_type = type(dtype) + else: + dtype_type = dtype + if issubclass(dtype_type, ExtensionDtype): + # cast needed here as mypy doesn't know we have figured + # out it is an ExtensionDtype or type_t[ExtensionDtype] + return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype) + + return None + + for dtype_type in self.dtypes: + try: + return dtype_type.construct_from_string(dtype) + except TypeError: + pass + + return None + + +_registry = Registry() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/cast.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/cast.py new file mode 100644 index 00000000..9a9a8ed2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/cast.py @@ -0,0 +1,1916 @@ +""" +Routines for casting. +""" + +from __future__ import annotations + +import datetime as dt +import functools +from typing import ( + TYPE_CHECKING, + Any, + Literal, + TypeVar, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas._libs.missing import ( + NA, + NAType, + checknull, +) +from pandas._libs.tslibs import ( + NaT, + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + Timedelta, + Timestamp, + get_unit_from_dtype, + is_supported_unit, +) +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.errors import ( + IntCastingNaNError, + LossySetitemError, +) + +from pandas.core.dtypes.common import ( + ensure_int8, + ensure_int16, + ensure_int32, + ensure_int64, + ensure_object, + ensure_str, + is_bool, + is_complex, + is_float, + is_integer, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype as pandas_dtype_func, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + BaseMaskedDtype, + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PandasExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import is_list_like +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, + notna, +) + +from pandas.io._util import _arrow_dtype_mapping + +if TYPE_CHECKING: + from collections.abc import ( + Sequence, + Sized, + ) + + from pandas._typing import ( + ArrayLike, + Dtype, + DtypeObj, + NumpyIndexT, + Scalar, + npt, + ) + + from pandas import Index + from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, + ) + + +_int8_max = np.iinfo(np.int8).max +_int16_max = np.iinfo(np.int16).max +_int32_max = np.iinfo(np.int32).max + +_dtype_obj = np.dtype(object) + +NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray) + + +def maybe_convert_platform( + values: list | tuple | range | np.ndarray | ExtensionArray, +) -> ArrayLike: + """try to do platform conversion, allow ndarray or list here""" + arr: ArrayLike + + if isinstance(values, (list, tuple, range)): + arr = construct_1d_object_array_from_listlike(values) + else: + # The caller is responsible for ensuring that we have np.ndarray + # or ExtensionArray here. + arr = values + + if arr.dtype == _dtype_obj: + arr = cast(np.ndarray, arr) + arr = lib.maybe_convert_objects(arr) + + return arr + + +def is_nested_object(obj) -> bool: + """ + return a boolean if we have a nested object, e.g. a Series with 1 or + more Series elements + + This may not be necessarily be performant. + + """ + return bool( + isinstance(obj, ABCSeries) + and is_object_dtype(obj.dtype) + and any(isinstance(v, ABCSeries) for v in obj._values) + ) + + +def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar: + """ + Cast scalar to Timestamp or Timedelta if scalar is datetime-like + and dtype is not object. + + Parameters + ---------- + value : scalar + dtype : Dtype, optional + + Returns + ------- + scalar + """ + if dtype == _dtype_obj: + pass + elif isinstance(value, (np.datetime64, dt.datetime)): + value = Timestamp(value) + elif isinstance(value, (np.timedelta64, dt.timedelta)): + value = Timedelta(value) + + return value + + +def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType: + """ + If passed a scalar cast the scalar to a python native type. + + Parameters + ---------- + value : scalar or Series + + Returns + ------- + scalar or Series + """ + if is_float(value): + value = float(value) + elif is_integer(value): + value = int(value) + elif is_bool(value): + value = bool(value) + elif isinstance(value, (np.datetime64, np.timedelta64)): + value = maybe_box_datetimelike(value) + elif value is NA: + value = None + return value + + +def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar: + """ + Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting + into a numpy array. Failing to unbox would risk dropping nanoseconds. + + Notes + ----- + Caller is responsible for checking dtype.kind in "mM" + """ + if is_valid_na_for_dtype(value, dtype): + # GH#36541: can't fill array directly with pd.NaT + # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT) + # ValueError: cannot convert float NaN to integer + value = dtype.type("NaT", "ns") + elif isinstance(value, Timestamp): + if value.tz is None: + value = value.to_datetime64() + elif not isinstance(dtype, DatetimeTZDtype): + raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype") + elif isinstance(value, Timedelta): + value = value.to_timedelta64() + + _disallow_mismatched_datetimelike(value, dtype) + return value + + +def _disallow_mismatched_datetimelike(value, dtype: DtypeObj): + """ + numpy allows np.array(dt64values, dtype="timedelta64[ns]") and + vice-versa, but we do not want to allow this, so we need to + check explicitly + """ + vdtype = getattr(value, "dtype", None) + if vdtype is None: + return + elif (vdtype.kind == "m" and dtype.kind == "M") or ( + vdtype.kind == "M" and dtype.kind == "m" + ): + raise TypeError(f"Cannot cast {repr(value)} to {dtype}") + + +@overload +def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray: + ... + + +@overload +def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike: + ... + + +def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike: + """ + try to cast to the specified dtype (e.g. convert back to bool/int + or could be an astype of float64->float32 + """ + do_round = False + + if isinstance(dtype, str): + if dtype == "infer": + inferred_type = lib.infer_dtype(result, skipna=False) + if inferred_type == "boolean": + dtype = "bool" + elif inferred_type == "integer": + dtype = "int64" + elif inferred_type == "datetime64": + dtype = "datetime64[ns]" + elif inferred_type in ["timedelta", "timedelta64"]: + dtype = "timedelta64[ns]" + + # try to upcast here + elif inferred_type == "floating": + dtype = "int64" + if issubclass(result.dtype.type, np.number): + do_round = True + + else: + # TODO: complex? what if result is already non-object? + dtype = "object" + + dtype = np.dtype(dtype) + + if not isinstance(dtype, np.dtype): + # enforce our signature annotation + raise TypeError(dtype) # pragma: no cover + + converted = maybe_downcast_numeric(result, dtype, do_round) + if converted is not result: + return converted + + # a datetimelike + # GH12821, iNaT is cast to float + if dtype.kind in "mM" and result.dtype.kind in "if": + result = result.astype(dtype) + + elif dtype.kind == "m" and result.dtype == _dtype_obj: + # test_where_downcast_to_td64 + result = cast(np.ndarray, result) + result = array_to_timedelta64(result) + + elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj: + result = cast(np.ndarray, result) + return np.asarray(maybe_cast_to_datetime(result, dtype=dtype)) + + return result + + +@overload +def maybe_downcast_numeric( + result: np.ndarray, dtype: np.dtype, do_round: bool = False +) -> np.ndarray: + ... + + +@overload +def maybe_downcast_numeric( + result: ExtensionArray, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + ... + + +def maybe_downcast_numeric( + result: ArrayLike, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + """ + Subset of maybe_downcast_to_dtype restricted to numeric dtypes. + + Parameters + ---------- + result : ndarray or ExtensionArray + dtype : np.dtype or ExtensionDtype + do_round : bool + + Returns + ------- + ndarray or ExtensionArray + """ + if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype): + # e.g. SparseDtype has no itemsize attr + return result + + def trans(x): + if do_round: + return x.round() + return x + + if dtype.kind == result.dtype.kind: + # don't allow upcasts here (except if empty) + if result.dtype.itemsize <= dtype.itemsize and result.size: + return result + + if dtype.kind in "biu": + if not result.size: + # if we don't have any elements, just astype it + return trans(result).astype(dtype) + + # do a test on the first element, if it fails then we are done + r = result.ravel() + arr = np.array([r[0]]) + + if isna(arr).any(): + # if we have any nulls, then we are done + return result + + elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)): + # a comparable, e.g. a Decimal may slip in here + return result + + if ( + issubclass(result.dtype.type, (np.object_, np.number)) + and notna(result).all() + ): + new_result = trans(result).astype(dtype) + if new_result.dtype.kind == "O" or result.dtype.kind == "O": + # np.allclose may raise TypeError on object-dtype + if (new_result == result).all(): + return new_result + else: + if np.allclose(new_result, result, rtol=0): + return new_result + + elif ( + issubclass(dtype.type, np.floating) + and result.dtype.kind != "b" + and not is_string_dtype(result.dtype) + ): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "overflow encountered in cast", RuntimeWarning + ) + new_result = result.astype(dtype) + + # Adjust tolerances based on floating point size + size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16} + + atol = size_tols.get(new_result.dtype.itemsize, 0.0) + + # Check downcast float values are still equal within 7 digits when + # converting from float64 to float32 + if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol): + return new_result + + elif dtype.kind == result.dtype.kind == "c": + new_result = result.astype(dtype) + + if np.array_equal(new_result, result, equal_nan=True): + # TODO: use tolerance like we do for float? + return new_result + + return result + + +def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT: + """ + If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. + + Parameters + ---------- + arr : ndarray or ExtensionArray + + Returns + ------- + ndarray or ExtensionArray + """ + dtype = arr.dtype + if dtype.kind == "i" and dtype != np.int64: + return arr.astype(np.int64) + elif dtype.kind == "u" and dtype != np.uint64: + return arr.astype(np.uint64) + elif dtype.kind == "f" and dtype != np.float64: + return arr.astype(np.float64) + else: + return arr + + +def maybe_cast_pointwise_result( + result: ArrayLike, + dtype: DtypeObj, + numeric_only: bool = False, + same_dtype: bool = True, +) -> ArrayLike: + """ + Try casting result of a pointwise operation back to the original dtype if + appropriate. + + Parameters + ---------- + result : array-like + Result to cast. + dtype : np.dtype or ExtensionDtype + Input Series from which result was calculated. + numeric_only : bool, default False + Whether to cast only numerics or datetimes as well. + same_dtype : bool, default True + Specify dtype when calling _from_sequence + + Returns + ------- + result : array-like + result maybe casted to the dtype. + """ + + if isinstance(dtype, ExtensionDtype): + if not isinstance(dtype, (CategoricalDtype, DatetimeTZDtype)): + # TODO: avoid this special-casing + # We have to special case categorical so as not to upcast + # things like counts back to categorical + + cls = dtype.construct_array_type() + if same_dtype: + result = _maybe_cast_to_extension_array(cls, result, dtype=dtype) + else: + result = _maybe_cast_to_extension_array(cls, result) + + elif (numeric_only and dtype.kind in "iufcb") or not numeric_only: + result = maybe_downcast_to_dtype(result, dtype) + + return result + + +def _maybe_cast_to_extension_array( + cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None +) -> ArrayLike: + """ + Call to `_from_sequence` that returns the object unchanged on Exception. + + Parameters + ---------- + cls : class, subclass of ExtensionArray + obj : arraylike + Values to pass to cls._from_sequence + dtype : ExtensionDtype, optional + + Returns + ------- + ExtensionArray or obj + """ + from pandas.core.arrays.string_ import BaseStringArray + + # Everything can be converted to StringArrays, but we may not want to convert + if issubclass(cls, BaseStringArray) and lib.infer_dtype(obj) != "string": + return obj + + try: + result = cls._from_sequence(obj, dtype=dtype) + except Exception: + # We can't predict what downstream EA constructors may raise + result = obj + return result + + +@overload +def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: + ... + + +@overload +def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: + ... + + +def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: + """ + If we have a dtype that cannot hold NA values, find the best match that can. + """ + if isinstance(dtype, ExtensionDtype): + if dtype._can_hold_na: + return dtype + elif isinstance(dtype, IntervalDtype): + # TODO(GH#45349): don't special-case IntervalDtype, allow + # overriding instead of returning object below. + return IntervalDtype(np.float64, closed=dtype.closed) + return _dtype_obj + elif dtype.kind == "b": + return _dtype_obj + elif dtype.kind in "iu": + return np.dtype(np.float64) + return dtype + + +_canonical_nans = { + np.datetime64: np.datetime64("NaT", "ns"), + np.timedelta64: np.timedelta64("NaT", "ns"), + type(np.nan): np.nan, +} + + +def maybe_promote(dtype: np.dtype, fill_value=np.nan): + """ + Find the minimal dtype that can hold both the given dtype and fill_value. + + Parameters + ---------- + dtype : np.dtype + fill_value : scalar, default np.nan + + Returns + ------- + dtype + Upcasted from dtype argument if necessary. + fill_value + Upcasted from fill_value argument if necessary. + + Raises + ------ + ValueError + If fill_value is a non-scalar and dtype is not object. + """ + orig = fill_value + orig_is_nat = False + if checknull(fill_value): + # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740 + # avoid cache misses with NaN/NaT values that are not singletons + if fill_value is not NA: + try: + orig_is_nat = np.isnat(fill_value) + except TypeError: + pass + + fill_value = _canonical_nans.get(type(fill_value), fill_value) + + # for performance, we are using a cached version of the actual implementation + # of the function in _maybe_promote. However, this doesn't always work (in case + # of non-hashable arguments), so we fallback to the actual implementation if needed + try: + # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type + # "Type[Any]"; expected "Hashable" [arg-type] + dtype, fill_value = _maybe_promote_cached( + dtype, fill_value, type(fill_value) # type: ignore[arg-type] + ) + except TypeError: + # if fill_value is not hashable (required for caching) + dtype, fill_value = _maybe_promote(dtype, fill_value) + + if (dtype == _dtype_obj and orig is not None) or ( + orig_is_nat and np.datetime_data(orig)[0] != "ns" + ): + # GH#51592,53497 restore our potentially non-canonical fill_value + fill_value = orig + return dtype, fill_value + + +@functools.lru_cache +def _maybe_promote_cached(dtype, fill_value, fill_value_type): + # The cached version of _maybe_promote below + # This also use fill_value_type as (unused) argument to use this in the + # cache lookup -> to differentiate 1 and True + return _maybe_promote(dtype, fill_value) + + +def _maybe_promote(dtype: np.dtype, fill_value=np.nan): + # The actual implementation of the function, use `maybe_promote` above for + # a cached version. + if not is_scalar(fill_value): + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + if dtype != object: + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + raise ValueError("fill_value must be a scalar") + dtype = _dtype_obj + return dtype, fill_value + + if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM": + dtype = ensure_dtype_can_hold_na(dtype) + fv = na_value_for_dtype(dtype) + return dtype, fv + + elif isinstance(dtype, CategoricalDtype): + if fill_value in dtype.categories or isna(fill_value): + return dtype, fill_value + else: + return object, ensure_object(fill_value) + + elif isna(fill_value): + dtype = _dtype_obj + if fill_value is None: + # but we retain e.g. pd.NA + fill_value = np.nan + return dtype, fill_value + + # returns tuple of (dtype, fill_value) + if issubclass(dtype.type, np.datetime64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray._from_sequence([], dtype="M8[ns]") + try: + fv = dta._validate_setitem_value(fill_value) + return dta.dtype, fv + except (ValueError, TypeError): + return _dtype_obj, fill_value + + elif issubclass(dtype.type, np.timedelta64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + elif inferred.kind == "m": + # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns] + # see if we can losslessly cast it to our dtype + unit = np.datetime_data(dtype)[0] + try: + td = Timedelta(fill_value).as_unit(unit, round_ok=False) + except OutOfBoundsTimedelta: + return _dtype_obj, fill_value + else: + return dtype, td.asm8 + + return _dtype_obj, fill_value + + elif is_float(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + dtype = np.dtype(np.float64) + + elif dtype.kind == "f": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.float64 and dtype is np.float32 + dtype = mst + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif is_bool(fill_value): + if not issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif is_integer(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type] + # upcast to prevent overflow + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + if dtype.kind == "f": + # Case where we disagree with numpy + dtype = np.dtype(np.object_) + + elif is_complex(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, (np.integer, np.floating)): + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.complex128 and dtype is np.complex64 + dtype = mst + + else: + dtype = np.dtype(np.object_) + + # in case we have a string that looked like a number + if issubclass(dtype.type, (bytes, str)): + dtype = np.dtype(np.object_) + + fill_value = _ensure_dtype_type(fill_value, dtype) + return dtype, fill_value + + +def _ensure_dtype_type(value, dtype: np.dtype): + """ + Ensure that the given value is an instance of the given dtype. + + e.g. if out dtype is np.complex64_, we should have an instance of that + as opposed to a python complex object. + + Parameters + ---------- + value : object + dtype : np.dtype + + Returns + ------- + object + """ + # Start with exceptions in which we do _not_ cast to numpy types + + if dtype == _dtype_obj: + return value + + # Note: before we get here we have already excluded isna(value) + return dtype.type(value) + + +def infer_dtype_from(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar or array. + + Parameters + ---------- + val : object + """ + if not is_list_like(val): + return infer_dtype_from_scalar(val) + return infer_dtype_from_array(val) + + +def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar. + + Parameters + ---------- + val : object + """ + dtype: DtypeObj = _dtype_obj + + # a 1-element ndarray + if isinstance(val, np.ndarray): + if val.ndim != 0: + msg = "invalid ndarray passed to infer_dtype_from_scalar" + raise ValueError(msg) + + dtype = val.dtype + val = lib.item_from_zerodim(val) + + elif isinstance(val, str): + # If we create an empty array using a string to infer + # the dtype, NumPy will only allocate one character per entry + # so this is kind of bad. Alternately we could use np.repeat + # instead of np.empty (but then you still don't want things + # coming out as np.str_! + + dtype = _dtype_obj + if using_pyarrow_string_dtype(): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype(storage="pyarrow_numpy") + + elif isinstance(val, (np.datetime64, dt.datetime)): + try: + val = Timestamp(val) + except OutOfBoundsDatetime: + return _dtype_obj, val + + if val is NaT or val.tz is None: + val = val.to_datetime64() + dtype = val.dtype + # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes + else: + dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz) + + elif isinstance(val, (np.timedelta64, dt.timedelta)): + try: + val = Timedelta(val) + except (OutOfBoundsTimedelta, OverflowError): + dtype = _dtype_obj + else: + if val is NaT: + val = np.timedelta64("NaT", "ns") + else: + val = val.asm8 + dtype = val.dtype + + elif is_bool(val): + dtype = np.dtype(np.bool_) + + elif is_integer(val): + if isinstance(val, np.integer): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.int64) + + try: + np.array(val, dtype=dtype) + except OverflowError: + dtype = np.array(val).dtype + + elif is_float(val): + if isinstance(val, np.floating): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.float64) + + elif is_complex(val): + dtype = np.dtype(np.complex128) + + if lib.is_period(val): + dtype = PeriodDtype(freq=val.freq) + elif lib.is_interval(val): + subtype = infer_dtype_from_scalar(val.left)[0] + dtype = IntervalDtype(subtype=subtype, closed=val.closed) + + return dtype, val + + +def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: + """ + Convert datetimelike-keyed dicts to a Timestamp-keyed dict. + + Parameters + ---------- + d: dict-like object + + Returns + ------- + dict + """ + return {maybe_box_datetimelike(key): value for key, value in d.items()} + + +def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]: + """ + Infer the dtype from an array. + + Parameters + ---------- + arr : array + + Returns + ------- + tuple (pandas-compat dtype, array) + + + Examples + -------- + >>> np.asarray([1, '1']) + array(['1', '1'], dtype='>> infer_dtype_from_array([1, '1']) + (dtype('O'), [1, '1']) + """ + if isinstance(arr, np.ndarray): + return arr.dtype, arr + + if not is_list_like(arr): + raise TypeError("'arr' must be list-like") + + arr_dtype = getattr(arr, "dtype", None) + if isinstance(arr_dtype, ExtensionDtype): + return arr.dtype, arr + + elif isinstance(arr, ABCSeries): + return arr.dtype, np.asarray(arr) + + # don't force numpy coerce with nan's + inferred = lib.infer_dtype(arr, skipna=False) + if inferred in ["string", "bytes", "mixed", "mixed-integer"]: + return (np.dtype(np.object_), arr) + + arr = np.asarray(arr) + return arr.dtype, arr + + +def _maybe_infer_dtype_type(element): + """ + Try to infer an object's dtype, for use in arithmetic ops. + + Uses `element.dtype` if that's available. + Objects implementing the iterator protocol are cast to a NumPy array, + and from there the array's type is used. + + Parameters + ---------- + element : object + Possibly has a `.dtype` attribute, and possibly the iterator + protocol. + + Returns + ------- + tipo : type + + Examples + -------- + >>> from collections import namedtuple + >>> Foo = namedtuple("Foo", "dtype") + >>> _maybe_infer_dtype_type(Foo(np.dtype("i8"))) + dtype('int64') + """ + tipo = None + if hasattr(element, "dtype"): + tipo = element.dtype + elif is_list_like(element): + element = np.asarray(element) + tipo = element.dtype + return tipo + + +def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None: + """ + Change string like dtypes to object for + ``DataFrame.select_dtypes()``. + """ + # error: Argument 1 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + # error: Argument 2 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + non_string_dtypes = dtype_set - { + np.dtype("S").type, # type: ignore[arg-type] + np.dtype(" np.ndarray: + """coerce the indexer input array to the smallest dtype possible""" + length = len(categories) + if length < _int8_max: + return ensure_int8(indexer) + elif length < _int16_max: + return ensure_int16(indexer) + elif length < _int32_max: + return ensure_int32(indexer) + return ensure_int64(indexer) + + +def convert_dtypes( + input_array: ArrayLike, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + infer_objects: bool = False, + dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable", +) -> DtypeObj: + """ + Convert objects to best possible type, and optionally, + to types supporting ``pd.NA``. + + Parameters + ---------- + input_array : ExtensionArray or np.ndarray + convert_string : bool, default True + Whether object dtypes should be converted to ``StringDtype()``. + convert_integer : bool, default True + Whether, if possible, conversion can be done to integer extension types. + convert_boolean : bool, defaults True + Whether object dtypes should be converted to ``BooleanDtypes()``. + convert_floating : bool, defaults True + Whether, if possible, conversion can be done to floating extension types. + If `convert_integer` is also True, preference will be give to integer + dtypes if the floats can be faithfully casted to integers. + infer_objects : bool, defaults False + Whether to also infer objects to float/int if possible. Is only hit if the + object array contains pd.NA. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + np.dtype, or ExtensionDtype + """ + inferred_dtype: str | DtypeObj + + if ( + convert_string or convert_integer or convert_boolean or convert_floating + ) and isinstance(input_array, np.ndarray): + if input_array.dtype == object: + inferred_dtype = lib.infer_dtype(input_array) + else: + inferred_dtype = input_array.dtype + + if is_string_dtype(inferred_dtype): + if not convert_string or inferred_dtype == "bytes": + inferred_dtype = input_array.dtype + else: + inferred_dtype = pandas_dtype_func("string") + + if convert_integer: + target_int_dtype = pandas_dtype_func("Int64") + + if input_array.dtype.kind in "iu": + from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + inferred_dtype = NUMPY_INT_TO_DTYPE.get( + input_array.dtype, target_int_dtype + ) + elif input_array.dtype.kind in "fcb": + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = target_int_dtype + else: + inferred_dtype = input_array.dtype + elif ( + infer_objects + and input_array.dtype == object + and (isinstance(inferred_dtype, str) and inferred_dtype == "integer") + ): + inferred_dtype = target_int_dtype + + if convert_floating: + if input_array.dtype.kind in "fcb": + # i.e. numeric but not integer + from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE + + inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get( + input_array.dtype, pandas_dtype_func("Float64") + ) + # if we could also convert to integer, check if all floats + # are actually integers + if convert_integer: + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = pandas_dtype_func("Int64") + else: + inferred_dtype = inferred_float_dtype + else: + inferred_dtype = inferred_float_dtype + elif ( + infer_objects + and input_array.dtype == object + and ( + isinstance(inferred_dtype, str) + and inferred_dtype == "mixed-integer-float" + ) + ): + inferred_dtype = pandas_dtype_func("Float64") + + if convert_boolean: + if input_array.dtype.kind == "b": + inferred_dtype = pandas_dtype_func("boolean") + elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean": + inferred_dtype = pandas_dtype_func("boolean") + + if isinstance(inferred_dtype, str): + # If we couldn't do anything else, then we retain the dtype + inferred_dtype = input_array.dtype + + else: + inferred_dtype = input_array.dtype + + if dtype_backend == "pyarrow": + from pandas.core.arrays.arrow.array import to_pyarrow_type + from pandas.core.arrays.string_ import StringDtype + + assert not isinstance(inferred_dtype, str) + + if ( + (convert_integer and inferred_dtype.kind in "iu") + or (convert_floating and inferred_dtype.kind in "fc") + or (convert_boolean and inferred_dtype.kind == "b") + or (convert_string and isinstance(inferred_dtype, StringDtype)) + or ( + inferred_dtype.kind not in "iufcb" + and not isinstance(inferred_dtype, StringDtype) + ) + ): + if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance( + inferred_dtype, DatetimeTZDtype + ): + base_dtype = inferred_dtype.base + elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)): + base_dtype = inferred_dtype.numpy_dtype + elif isinstance(inferred_dtype, StringDtype): + base_dtype = np.dtype(str) + else: + base_dtype = inferred_dtype + pa_type = to_pyarrow_type(base_dtype) + if pa_type is not None: + inferred_dtype = ArrowDtype(pa_type) + elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype): + # GH 53648 + inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype] + + # error: Incompatible return value type (got "Union[str, Union[dtype[Any], + # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]") + return inferred_dtype # type: ignore[return-value] + + +def maybe_infer_to_datetimelike( + value: npt.NDArray[np.object_], +) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray: + """ + we might have a array (or single object) that is datetime like, + and no dtype is passed don't change the value unless we find a + datetime/timedelta set + + this is pretty strict in that a datetime/timedelta is REQUIRED + in addition to possible nulls/string likes + + Parameters + ---------- + value : np.ndarray[object] + + Returns + ------- + np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray + + """ + if not isinstance(value, np.ndarray) or value.dtype != object: + # Caller is responsible for passing only ndarray[object] + raise TypeError(type(value)) # pragma: no cover + if value.ndim != 1: + # Caller is responsible + raise ValueError(value.ndim) # pragma: no cover + + if not len(value): + return value + + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray, + # TimedeltaArray, PeriodArray, IntervalArray]") + return lib.maybe_convert_objects( # type: ignore[return-value] + value, + # Here we do not convert numeric dtypes, as if we wanted that, + # numpy would have done it for us. + convert_numeric=False, + convert_non_numeric=True, + dtype_if_all_nat=np.dtype("M8[ns]"), + ) + + +def maybe_cast_to_datetime( + value: np.ndarray | list, dtype: np.dtype +) -> ExtensionArray | np.ndarray: + """ + try to cast the array/value to a datetimelike dtype, converting float + nan to iNaT + + Caller is responsible for handling ExtensionDtype cases and non dt64/td64 + cases. + """ + from pandas.core.arrays.datetimes import DatetimeArray + from pandas.core.arrays.timedeltas import TimedeltaArray + + assert dtype.kind in "mM" + if not is_list_like(value): + raise TypeError("value must be listlike") + + # TODO: _from_sequence would raise ValueError in cases where + # _ensure_nanosecond_dtype raises TypeError + _ensure_nanosecond_dtype(dtype) + + if lib.is_np_dtype(dtype, "m"): + res = TimedeltaArray._from_sequence(value, dtype=dtype) + return res + else: + try: + dta = DatetimeArray._from_sequence(value, dtype=dtype) + except ValueError as err: + # We can give a Series-specific exception message. + if "cannot supply both a tz and a timezone-naive dtype" in str(err): + raise ValueError( + "Cannot convert timezone-aware data to " + "timezone-naive dtype. Use " + "pd.Series(values).dt.tz_localize(None) instead." + ) from err + raise + + return dta + + +def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None: + """ + Convert dtypes with granularity less than nanosecond to nanosecond + + >>> _ensure_nanosecond_dtype(np.dtype("M8[us]")) + + >>> _ensure_nanosecond_dtype(np.dtype("M8[D]")) + Traceback (most recent call last): + ... + TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + + >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]")) + Traceback (most recent call last): + ... + TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + """ # noqa: E501 + msg = ( + f"The '{dtype.name}' dtype has no unit. " + f"Please pass in '{dtype.name}[ns]' instead." + ) + + # unpack e.g. SparseDtype + dtype = getattr(dtype, "subtype", dtype) + + if not isinstance(dtype, np.dtype): + # i.e. datetime64tz + pass + + elif dtype.kind in "mM": + reso = get_unit_from_dtype(dtype) + if not is_supported_unit(reso): + # pre-2.0 we would silently swap in nanos for lower-resolutions, + # raise for above-nano resolutions + if dtype.name in ["datetime64", "timedelta64"]: + raise ValueError(msg) + # TODO: ValueError or TypeError? existing test + # test_constructor_generic_timestamp_bad_frequency expects TypeError + raise TypeError( + f"dtype={dtype} is not supported. Supported resolutions are 's', " + "'ms', 'us', and 'ns'" + ) + + +# TODO: other value-dependent functions to standardize here include +# Index._find_common_type_compat +def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj: + """ + Find the type/dtype for the result of an operation between objects. + + This is similar to find_common_type, but looks at the right object instead + of just its dtype. This can be useful in particular when the right + object does not have a `dtype`. + + Parameters + ---------- + left_dtype : np.dtype or ExtensionDtype + right : Any + + Returns + ------- + np.dtype or ExtensionDtype + + See also + -------- + find_common_type + numpy.result_type + """ + new_dtype: DtypeObj + + if ( + isinstance(left_dtype, np.dtype) + and left_dtype.kind in "iuc" + and (lib.is_integer(right) or lib.is_float(right)) + ): + # e.g. with int8 dtype and right=512, we want to end up with + # np.int16, whereas infer_dtype_from(512) gives np.int64, + # which will make us upcast too far. + if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f": + right = int(right) + new_dtype = np.result_type(left_dtype, right) + + elif is_valid_na_for_dtype(right, left_dtype): + # e.g. IntervalDtype[int] and None/np.nan + new_dtype = ensure_dtype_can_hold_na(left_dtype) + + else: + dtype, _ = infer_dtype_from(right) + new_dtype = find_common_type([left_dtype, dtype]) + + return new_dtype + + +def common_dtype_categorical_compat( + objs: Sequence[Index | ArrayLike], dtype: DtypeObj +) -> DtypeObj: + """ + Update the result of find_common_type to account for NAs in a Categorical. + + Parameters + ---------- + objs : list[np.ndarray | ExtensionArray | Index] + dtype : np.dtype or ExtensionDtype + + Returns + ------- + np.dtype or ExtensionDtype + """ + # GH#38240 + + # TODO: more generally, could do `not can_hold_na(dtype)` + if lib.is_np_dtype(dtype, "iu"): + for obj in objs: + # We don't want to accientally allow e.g. "categorical" str here + obj_dtype = getattr(obj, "dtype", None) + if isinstance(obj_dtype, CategoricalDtype): + if isinstance(obj, ABCIndex): + # This check may already be cached + hasnas = obj.hasnans + else: + # Categorical + hasnas = cast("Categorical", obj)._hasna + + if hasnas: + # see test_union_int_categorical_with_nan + dtype = np.dtype(np.float64) + break + return dtype + + +def np_find_common_type(*dtypes: np.dtype) -> np.dtype: + """ + np.find_common_type implementation pre-1.25 deprecation using np.result_type + https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065 + + Parameters + ---------- + dtypes : np.dtypes + + Returns + ------- + np.dtype + """ + try: + common_dtype = np.result_type(*dtypes) + if common_dtype.kind in "mMSU": + # NumPy promotion currently (1.25) misbehaves for for times and strings, + # so fall back to object (find_common_dtype did unless there + # was only one dtype) + common_dtype = np.dtype("O") + + except TypeError: + common_dtype = np.dtype("O") + return common_dtype + + +@overload +def find_common_type(types: list[np.dtype]) -> np.dtype: + ... + + +@overload +def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: + ... + + +@overload +def find_common_type(types: list[DtypeObj]) -> DtypeObj: + ... + + +def find_common_type(types): + """ + Find a common data type among the given dtypes. + + Parameters + ---------- + types : list of dtypes + + Returns + ------- + pandas extension or numpy dtype + + See Also + -------- + numpy.find_common_type + + """ + if not types: + raise ValueError("no types given") + + first = types[0] + + # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) + # => object + if lib.dtypes_all_equal(list(types)): + return first + + # get unique types (dict.fromkeys is used as order-preserving set()) + types = list(dict.fromkeys(types).keys()) + + if any(isinstance(t, ExtensionDtype) for t in types): + for t in types: + if isinstance(t, ExtensionDtype): + res = t._get_common_dtype(types) + if res is not None: + return res + return np.dtype("object") + + # take lowest unit + if all(lib.is_np_dtype(t, "M") for t in types): + return np.dtype(max(types)) + if all(lib.is_np_dtype(t, "m") for t in types): + return np.dtype(max(types)) + + # don't mix bool / int or float or complex + # this is different from numpy, which casts bool with float/int as int + has_bools = any(t.kind == "b" for t in types) + if has_bools: + for t in types: + if t.kind in "iufc": + return np.dtype("object") + + return np_find_common_type(*types) + + +def construct_2d_arraylike_from_scalar( + value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool +) -> np.ndarray: + shape = (length, width) + + if dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + elif dtype == _dtype_obj: + if isinstance(value, (np.timedelta64, np.datetime64)): + # calling np.array below would cast to pytimedelta/pydatetime + out = np.empty(shape, dtype=object) + out.fill(value) + return out + + # Attempt to coerce to a numpy array + try: + arr = np.array(value, dtype=dtype, copy=copy) + except (ValueError, TypeError) as err: + raise TypeError( + f"DataFrame constructor called with incompatible data and dtype: {err}" + ) from err + + if arr.ndim != 0: + raise ValueError("DataFrame constructor not properly called!") + + return np.full(shape, arr) + + +def construct_1d_arraylike_from_scalar( + value: Scalar, length: int, dtype: DtypeObj | None +) -> ArrayLike: + """ + create a np.ndarray / pandas type of specified shape and dtype + filled with values + + Parameters + ---------- + value : scalar value + length : int + dtype : pandas_dtype or np.dtype + + Returns + ------- + np.ndarray / pandas type of length, filled with value + + """ + + if dtype is None: + try: + dtype, value = infer_dtype_from_scalar(value) + except OutOfBoundsDatetime: + dtype = _dtype_obj + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + seq = [] if length == 0 else [value] + subarr = cls._from_sequence(seq, dtype=dtype).repeat(length) + + else: + if length and dtype.kind in "iu" and isna(value): + # coerce if we have nan for an integer dtype + dtype = np.dtype("float64") + elif lib.is_np_dtype(dtype, "US"): + # we need to coerce to object dtype to avoid + # to allow numpy to take our string as a scalar value + dtype = np.dtype("object") + if not isna(value): + value = ensure_str(value) + elif dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + + subarr = np.empty(length, dtype=dtype) + if length: + # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes + subarr.fill(value) + + return subarr + + +def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj): + # Caller is responsible for checking dtype.kind in "mM" + + if isinstance(value, dt.datetime): + # we dont want to box dt64, in particular datetime64("NaT") + value = maybe_box_datetimelike(value, dtype) + + return _maybe_unbox_datetimelike(value, dtype) + + +def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray: + """ + Transform any list-like object in a 1-dimensional numpy array of object + dtype. + + Parameters + ---------- + values : any iterable which has a len() + + Raises + ------ + TypeError + * If `values` does not have a len() + + Returns + ------- + 1-dimensional numpy array of dtype object + """ + # numpy will try to interpret nested lists as further dimensions, hence + # making a 1D array that contains list-likes is a bit tricky: + result = np.empty(len(values), dtype="object") + result[:] = values + return result + + +def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray: + """ + Takes any dtype and returns the casted version, raising for when data is + incompatible with integer/unsigned integer dtypes. + + Parameters + ---------- + arr : np.ndarray or list + The array to cast. + dtype : np.dtype + The integer dtype to cast the array to. + + Returns + ------- + ndarray + Array of integer or unsigned integer dtype. + + Raises + ------ + OverflowError : the dtype is incompatible with the data + ValueError : loss of precision has occurred during casting + + Examples + -------- + If you try to coerce negative values to unsigned integers, it raises: + + >>> pd.Series([-1], dtype="uint64") + Traceback (most recent call last): + ... + OverflowError: Trying to coerce negative values to unsigned integers + + Also, if you try to coerce float values to integers, it raises: + + >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64")) + Traceback (most recent call last): + ... + ValueError: Trying to coerce float values to integers + """ + assert dtype.kind in "iu" + + try: + if not isinstance(arr, np.ndarray): + with warnings.catch_warnings(): + # We already disallow dtype=uint w/ negative numbers + # (test_constructor_coercion_signed_to_unsigned) so safe to ignore. + warnings.filterwarnings( + "ignore", + "NumPy will stop allowing conversion of out-of-bound Python int", + DeprecationWarning, + ) + casted = np.array(arr, dtype=dtype, copy=False) + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + casted = arr.astype(dtype, copy=False) + except OverflowError as err: + raise OverflowError( + "The elements provided in the data cannot all be " + f"casted to the dtype {dtype}" + ) from err + + if isinstance(arr, np.ndarray) and arr.dtype == dtype: + # avoid expensive array_equal check + return casted + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + warnings.filterwarnings( + "ignore", "elementwise comparison failed", FutureWarning + ) + if np.array_equal(arr, casted): + return casted + + # We do this casting to allow for proper + # data and dtype checking. + # + # We didn't do this earlier because NumPy + # doesn't handle `uint64` correctly. + arr = np.asarray(arr) + + if np.issubdtype(arr.dtype, str): + if (casted.astype(str) == arr).all(): + return casted + raise ValueError(f"string values cannot be losslessly cast to {dtype}") + + if dtype.kind == "u" and (arr < 0).any(): + raise OverflowError("Trying to coerce negative values to unsigned integers") + + if arr.dtype.kind == "f": + if not np.isfinite(arr).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + raise ValueError("Trying to coerce float values to integers") + if arr.dtype == object: + raise ValueError("Trying to coerce float values to integers") + + if casted.dtype < arr.dtype: + # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows + raise ValueError( + f"Values are too large to be losslessly converted to {dtype}. " + f"To cast anyway, use pd.Series(values).astype({dtype})" + ) + + if arr.dtype.kind in "mM": + # test_constructor_maskedarray_nonfloat + raise TypeError( + f"Constructing a Series or DataFrame from {arr.dtype} values and " + f"dtype={dtype} is not supported. Use values.view({dtype}) instead." + ) + + # No known cases that get here, but raising explicitly to cover our bases. + raise ValueError(f"values cannot be losslessly cast to {dtype}") + + +def can_hold_element(arr: ArrayLike, element: Any) -> bool: + """ + Can we do an inplace setitem with this element in an array with this dtype? + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + element : Any + + Returns + ------- + bool + """ + dtype = arr.dtype + if not isinstance(dtype, np.dtype) or dtype.kind in "mM": + if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)): + # np.dtype here catches datetime64ns and timedelta64ns; we assume + # in this case that we have DatetimeArray/TimedeltaArray + arr = cast( + "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr + ) + try: + arr._validate_setitem_value(element) + return True + except (ValueError, TypeError): + # TODO: re-use _catch_deprecated_value_error to ensure we are + # strict about what exceptions we allow through here. + return False + + # This is technically incorrect, but maintains the behavior of + # ExtensionBlock._can_hold_element + return True + + try: + np_can_hold_element(dtype, element) + return True + except (TypeError, LossySetitemError): + return False + + +def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: + """ + Raise if we cannot losslessly set this element into an ndarray with this dtype. + + Specifically about places where we disagree with numpy. i.e. there are + cases where numpy will raise in doing the setitem that we do not check + for here, e.g. setting str "X" into a numeric ndarray. + + Returns + ------- + Any + The element, potentially cast to the dtype. + + Raises + ------ + ValueError : If we cannot losslessly store this element with this dtype. + """ + if dtype == _dtype_obj: + return element + + tipo = _maybe_infer_dtype_type(element) + + if dtype.kind in "iu": + if isinstance(element, range): + if _dtype_can_hold_range(element, dtype): + return element + raise LossySetitemError + + if is_integer(element) or (is_float(element) and element.is_integer()): + # e.g. test_setitem_series_int8 if we have a python int 1 + # tipo may be np.int32, despite the fact that it will fit + # in smaller int dtypes. + info = np.iinfo(dtype) + if info.min <= element <= info.max: + return dtype.type(element) + raise LossySetitemError + + if tipo is not None: + if tipo.kind not in "iu": + if isinstance(element, np.ndarray) and element.dtype.kind == "f": + # If all can be losslessly cast to integers, then we can hold them + with np.errstate(invalid="ignore"): + # We check afterwards if cast was losslessly, so no need to show + # the warning + casted = element.astype(dtype) + comp = casted == element + if comp.all(): + # Return the casted values bc they can be passed to + # np.putmask, whereas the raw values cannot. + # see TestSetitemFloatNDarrayIntoIntegerSeries + return casted + raise LossySetitemError + + # Anything other than integer we cannot hold + raise LossySetitemError + if ( + dtype.kind == "u" + and isinstance(element, np.ndarray) + and element.dtype.kind == "i" + ): + # see test_where_uint64 + casted = element.astype(dtype) + if (casted == element).all(): + # TODO: faster to check (element >=0).all()? potential + # itemsize issues there? + return casted + raise LossySetitemError + if dtype.itemsize < tipo.itemsize: + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype; we can put this into an ndarray + # losslessly iff it has no NAs + if element._hasna: + raise LossySetitemError + return element + + return element + + raise LossySetitemError + + if dtype.kind == "f": + if lib.is_integer(element) or lib.is_float(element): + casted = dtype.type(element) + if np.isnan(casted) or casted == element: + return casted + # otherwise e.g. overflow see TestCoercionFloat32 + raise LossySetitemError + + if tipo is not None: + # TODO: itemsize check? + if tipo.kind not in "iuf": + # Anything other than float/integer we cannot hold + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype or FloatingDtype; + # we can put this into an ndarray losslessly iff it has no NAs + if element._hasna: + raise LossySetitemError + return element + elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind: + if isinstance(element, np.ndarray): + # e.g. TestDataFrameIndexingWhere::test_where_alignment + casted = element.astype(dtype) + if np.array_equal(casted, element, equal_nan=True): + return casted + raise LossySetitemError + + return element + + raise LossySetitemError + + if dtype.kind == "c": + if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element): + if np.isnan(element): + # see test_where_complex GH#6345 + return dtype.type(element) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + casted = dtype.type(element) + if casted == element: + return casted + # otherwise e.g. overflow see test_32878_complex_itemsize + raise LossySetitemError + + if tipo is not None: + if tipo.kind in "iufc": + return element + raise LossySetitemError + raise LossySetitemError + + if dtype.kind == "b": + if tipo is not None: + if tipo.kind == "b": + if not isinstance(tipo, np.dtype): + # i.e. we have a BooleanArray + if element._hasna: + # i.e. there are pd.NA elements + raise LossySetitemError + return element + raise LossySetitemError + if lib.is_bool(element): + return element + raise LossySetitemError + + if dtype.kind == "S": + # TODO: test tests.frame.methods.test_replace tests get here, + # need more targeted tests. xref phofl has a PR about this + if tipo is not None: + if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize: + return element + raise LossySetitemError + if isinstance(element, bytes) and len(element) <= dtype.itemsize: + return element + raise LossySetitemError + + if dtype.kind == "V": + # i.e. np.void, which cannot hold _anything_ + raise LossySetitemError + + raise NotImplementedError(dtype) + + +def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool: + """ + _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints), + but in many cases a range can be held by a smaller integer dtype. + Check if this is one of those cases. + """ + if not len(rng): + return True + return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype) + + +def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool: + """ + np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar + inference + + Parameters + ---------- + element : Scalar + dtype : np.dtype + + Returns + ------- + bool + """ + try: + np_can_hold_element(dtype, element) + return True + except (LossySetitemError, NotImplementedError): + return False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/common.py new file mode 100644 index 00000000..143dc463 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/common.py @@ -0,0 +1,1736 @@ +""" +Common type operations. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import warnings + +import numpy as np + +from pandas._libs import ( + Interval, + Period, + algos, + lib, +) +from pandas._libs.tslibs import conversion +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import _registry as registry +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PeriodDtype, + SparseDtype, +) +from pandas.core.dtypes.generic import ABCIndex +from pandas.core.dtypes.inference import ( + is_array_like, + is_bool, + is_complex, + is_dataclass, + is_decimal, + is_dict_like, + is_file_like, + is_float, + is_hashable, + is_integer, + is_interval, + is_iterator, + is_list_like, + is_named_tuple, + is_nested_list_like, + is_number, + is_re, + is_re_compilable, + is_scalar, + is_sequence, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + DtypeObj, + ) + +DT64NS_DTYPE = conversion.DT64NS_DTYPE +TD64NS_DTYPE = conversion.TD64NS_DTYPE +INT64_DTYPE = np.dtype(np.int64) + +# oh the troubles to reduce import time +_is_scipy_sparse = None + +ensure_float64 = algos.ensure_float64 +ensure_int64 = algos.ensure_int64 +ensure_int32 = algos.ensure_int32 +ensure_int16 = algos.ensure_int16 +ensure_int8 = algos.ensure_int8 +ensure_platform_int = algos.ensure_platform_int +ensure_object = algos.ensure_object +ensure_uint64 = algos.ensure_uint64 + + +def ensure_str(value: bytes | Any) -> str: + """ + Ensure that bytes and non-strings get converted into ``str`` objects. + """ + if isinstance(value, bytes): + value = value.decode("utf-8") + elif not isinstance(value, str): + value = str(value) + return value + + +def ensure_python_int(value: int | np.integer) -> int: + """ + Ensure that a value is a python int. + + Parameters + ---------- + value: int or numpy.integer + + Returns + ------- + int + + Raises + ------ + TypeError: if the value isn't an int or can't be converted to one. + """ + if not (is_integer(value) or is_float(value)): + if not is_scalar(value): + raise TypeError( + f"Value needs to be a scalar value, was type {type(value).__name__}" + ) + raise TypeError(f"Wrong type {type(value)} for value {value}") + try: + new_value = int(value) + assert new_value == value + except (TypeError, ValueError, AssertionError) as err: + raise TypeError(f"Wrong type {type(value)} for value {value}") from err + return new_value + + +def classes(*klasses) -> Callable: + """Evaluate if the tipo is a subclass of the klasses.""" + return lambda tipo: issubclass(tipo, klasses) + + +def _classes_and_not_datetimelike(*klasses) -> Callable: + """ + Evaluate if the tipo is a subclass of the klasses + and not a datetimelike. + """ + return lambda tipo: ( + issubclass(tipo, klasses) + and not issubclass(tipo, (np.datetime64, np.timedelta64)) + ) + + +def is_object_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the object dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the object dtype. + + Examples + -------- + >>> from pandas.api.types import is_object_dtype + >>> is_object_dtype(object) + True + >>> is_object_dtype(int) + False + >>> is_object_dtype(np.array([], dtype=object)) + True + >>> is_object_dtype(np.array([], dtype=int)) + False + >>> is_object_dtype([1, 2, 3]) + False + """ + return _is_dtype_type(arr_or_dtype, classes(np.object_)) + + +def is_sparse(arr) -> bool: + """ + Check whether an array-like is a 1-D pandas sparse array. + + Check that the one-dimensional array-like is a pandas sparse array. + Returns True if it is a pandas sparse array, not another type of + sparse array. + + Parameters + ---------- + arr : array-like + Array-like to check. + + Returns + ------- + bool + Whether or not the array-like is a pandas sparse array. + + Examples + -------- + Returns `True` if the parameter is a 1-D pandas sparse array. + + >>> from pandas.api.types import is_sparse + >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0])) + True + >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0]))) + True + + Returns `False` if the parameter is not sparse. + + >>> is_sparse(np.array([0, 0, 1, 0])) + False + >>> is_sparse(pd.Series([0, 1, 0, 0])) + False + + Returns `False` if the parameter is not a pandas sparse array. + + >>> from scipy.sparse import bsr_matrix + >>> is_sparse(bsr_matrix([0, 1, 0, 0])) + False + + Returns `False` if the parameter has more than one dimension. + """ + warnings.warn( + "is_sparse is deprecated and will be removed in a future " + "version. Check `isinstance(dtype, pd.SparseDtype)` instead.", + DeprecationWarning, + stacklevel=2, + ) + + dtype = getattr(arr, "dtype", arr) + return isinstance(dtype, SparseDtype) + + +def is_scipy_sparse(arr) -> bool: + """ + Check whether an array-like is a scipy.sparse.spmatrix instance. + + Parameters + ---------- + arr : array-like + The array-like to check. + + Returns + ------- + boolean + Whether or not the array-like is a scipy.sparse.spmatrix instance. + + Notes + ----- + If scipy is not installed, this function will always return False. + + Examples + -------- + >>> from scipy.sparse import bsr_matrix + >>> is_scipy_sparse(bsr_matrix([1, 2, 3])) + True + >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3])) + False + """ + global _is_scipy_sparse + + if _is_scipy_sparse is None: # pylint: disable=used-before-assignment + try: + from scipy.sparse import issparse as _is_scipy_sparse + except ImportError: + _is_scipy_sparse = lambda _: False + + assert _is_scipy_sparse is not None + return _is_scipy_sparse(arr) + + +def is_datetime64_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the datetime64 dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the datetime64 dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64_dtype + >>> is_datetime64_dtype(object) + False + >>> is_datetime64_dtype(np.datetime64) + True + >>> is_datetime64_dtype(np.array([], dtype=int)) + False + >>> is_datetime64_dtype(np.array([], dtype=np.datetime64)) + True + >>> is_datetime64_dtype([1, 2, 3]) + False + """ + if isinstance(arr_or_dtype, np.dtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.kind == "M" + return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) + + +def is_datetime64tz_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of a DatetimeTZDtype dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64tz_dtype + >>> is_datetime64tz_dtype(object) + False + >>> is_datetime64tz_dtype([1, 2, 3]) + False + >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive + False + >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) + True + + >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype + >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") + >>> s = pd.Series([], dtype=dtype) + >>> is_datetime64tz_dtype(dtype) + True + >>> is_datetime64tz_dtype(s) + True + """ + # GH#52607 + warnings.warn( + "is_datetime64tz_dtype is deprecated and will be removed in a future " + "version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, DatetimeTZDtype): + # GH#33400 fastpath for dtype object + # GH 34986 + return True + + if arr_or_dtype is None: + return False + return DatetimeTZDtype.is_dtype(arr_or_dtype) + + +def is_timedelta64_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the timedelta64 dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the timedelta64 dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_timedelta64_dtype + >>> is_timedelta64_dtype(object) + False + >>> is_timedelta64_dtype(np.timedelta64) + True + >>> is_timedelta64_dtype([1, 2, 3]) + False + >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]")) + True + >>> is_timedelta64_dtype('0 days') + False + """ + if isinstance(arr_or_dtype, np.dtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.kind == "m" + + return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) + + +def is_period_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the Period dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the Period dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_period_dtype + >>> is_period_dtype(object) + False + >>> is_period_dtype(pd.PeriodDtype(freq="D")) + True + >>> is_period_dtype([1, 2, 3]) + False + >>> is_period_dtype(pd.Period("2017-01-01")) + False + >>> is_period_dtype(pd.PeriodIndex([], freq="A")) + True + """ + warnings.warn( + "is_period_dtype is deprecated and will be removed in a future version. " + "Use `isinstance(dtype, pd.PeriodDtype)` instead", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, ExtensionDtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.type is Period + + if arr_or_dtype is None: + return False + return PeriodDtype.is_dtype(arr_or_dtype) + + +def is_interval_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the Interval dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the Interval dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_interval_dtype + >>> is_interval_dtype(object) + False + >>> is_interval_dtype(pd.IntervalDtype()) + True + >>> is_interval_dtype([1, 2, 3]) + False + >>> + >>> interval = pd.Interval(1, 2, closed="right") + >>> is_interval_dtype(interval) + False + >>> is_interval_dtype(pd.IntervalIndex([interval])) + True + """ + # GH#52607 + warnings.warn( + "is_interval_dtype is deprecated and will be removed in a future version. " + "Use `isinstance(dtype, pd.IntervalDtype)` instead", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, ExtensionDtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.type is Interval + + if arr_or_dtype is None: + return False + return IntervalDtype.is_dtype(arr_or_dtype) + + +def is_categorical_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the Categorical dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the Categorical dtype. + + Examples + -------- + >>> from pandas.api.types import is_categorical_dtype + >>> from pandas import CategoricalDtype + >>> is_categorical_dtype(object) + False + >>> is_categorical_dtype(CategoricalDtype()) + True + >>> is_categorical_dtype([1, 2, 3]) + False + >>> is_categorical_dtype(pd.Categorical([1, 2, 3])) + True + >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) + True + """ + # GH#52527 + warnings.warn( + "is_categorical_dtype is deprecated and will be removed in a future " + "version. Use isinstance(dtype, pd.CategoricalDtype) instead", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, ExtensionDtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.name == "category" + + if arr_or_dtype is None: + return False + return CategoricalDtype.is_dtype(arr_or_dtype) + + +def is_string_or_object_np_dtype(dtype: np.dtype) -> bool: + """ + Faster alternative to is_string_dtype, assumes we have a np.dtype object. + """ + return dtype == object or dtype.kind in "SU" + + +def is_string_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the string dtype. + + If an array is passed with an object dtype, the elements must be + inferred as strings. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of the string dtype. + + Examples + -------- + >>> from pandas.api.types import is_string_dtype + >>> is_string_dtype(str) + True + >>> is_string_dtype(object) + True + >>> is_string_dtype(int) + False + >>> is_string_dtype(np.array(['a', 'b'])) + True + >>> is_string_dtype(pd.Series([1, 2])) + False + >>> is_string_dtype(pd.Series([1, 2], dtype=object)) + False + """ + if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O": + return is_all_strings(arr_or_dtype) + + def condition(dtype) -> bool: + if is_string_or_object_np_dtype(dtype): + return True + try: + return dtype == "string" + except TypeError: + return False + + return _is_dtype(arr_or_dtype, condition) + + +def is_dtype_equal(source, target) -> bool: + """ + Check if two dtypes are equal. + + Parameters + ---------- + source : The first dtype to compare + target : The second dtype to compare + + Returns + ------- + boolean + Whether or not the two dtypes are equal. + + Examples + -------- + >>> is_dtype_equal(int, float) + False + >>> is_dtype_equal("int", int) + True + >>> is_dtype_equal(object, "category") + False + >>> is_dtype_equal(CategoricalDtype(), "category") + True + >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") + False + """ + if isinstance(target, str): + if not isinstance(source, str): + # GH#38516 ensure we get the same behavior from + # is_dtype_equal(CDT, "category") and CDT == "category" + try: + src = _get_dtype(source) + if isinstance(src, ExtensionDtype): + return src == target + except (TypeError, AttributeError, ImportError): + return False + elif isinstance(source, str): + return is_dtype_equal(target, source) + + try: + source = _get_dtype(source) + target = _get_dtype(target) + return source == target + except (TypeError, AttributeError, ImportError): + # invalid comparison + # object == category will hit this + return False + + +def is_integer_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of an integer dtype. + + Unlike in `is_any_int_dtype`, timedelta64 instances will return False. + + The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered + as integer by this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of an integer dtype and + not an instance of timedelta64. + + Examples + -------- + >>> from pandas.api.types import is_integer_dtype + >>> is_integer_dtype(str) + False + >>> is_integer_dtype(int) + True + >>> is_integer_dtype(float) + False + >>> is_integer_dtype(np.uint64) + True + >>> is_integer_dtype('int8') + True + >>> is_integer_dtype('Int8') + True + >>> is_integer_dtype(pd.Int8Dtype) + True + >>> is_integer_dtype(np.datetime64) + False + >>> is_integer_dtype(np.timedelta64) + False + >>> is_integer_dtype(np.array(['a', 'b'])) + False + >>> is_integer_dtype(pd.Series([1, 2])) + True + >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) + False + >>> is_integer_dtype(pd.Index([1, 2.])) # float + False + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.integer) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" + ) + + +def is_signed_integer_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a signed integer dtype. + + Unlike in `is_any_int_dtype`, timedelta64 instances will return False. + + The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered + as integer by this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a signed integer dtype + and not an instance of timedelta64. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_signed_integer_dtype + >>> is_signed_integer_dtype(str) + False + >>> is_signed_integer_dtype(int) + True + >>> is_signed_integer_dtype(float) + False + >>> is_signed_integer_dtype(np.uint64) # unsigned + False + >>> is_signed_integer_dtype('int8') + True + >>> is_signed_integer_dtype('Int8') + True + >>> is_signed_integer_dtype(pd.Int8Dtype) + True + >>> is_signed_integer_dtype(np.datetime64) + False + >>> is_signed_integer_dtype(np.timedelta64) + False + >>> is_signed_integer_dtype(np.array(['a', 'b'])) + False + >>> is_signed_integer_dtype(pd.Series([1, 2])) + True + >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64)) + False + >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float + False + >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned + False + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i" + ) + + +def is_unsigned_integer_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of an unsigned integer dtype. + + The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also + considered as integer by this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of an unsigned integer dtype. + + Examples + -------- + >>> from pandas.api.types import is_unsigned_integer_dtype + >>> is_unsigned_integer_dtype(str) + False + >>> is_unsigned_integer_dtype(int) # signed + False + >>> is_unsigned_integer_dtype(float) + False + >>> is_unsigned_integer_dtype(np.uint64) + True + >>> is_unsigned_integer_dtype('uint8') + True + >>> is_unsigned_integer_dtype('UInt8') + True + >>> is_unsigned_integer_dtype(pd.UInt8Dtype) + True + >>> is_unsigned_integer_dtype(np.array(['a', 'b'])) + False + >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed + False + >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float + False + >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) + True + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u" + ) + + +def is_int64_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the int64 dtype. + + .. deprecated:: 2.1.0 + + is_int64_dtype is deprecated and will be removed in a future + version. Use dtype == np.int64 instead. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of the int64 dtype. + + Notes + ----- + Depending on system architecture, the return value of `is_int64_dtype( + int)` will be True if the OS uses 64-bit integers and False if the OS + uses 32-bit integers. + + Examples + -------- + >>> from pandas.api.types import is_int64_dtype + >>> is_int64_dtype(str) # doctest: +SKIP + False + >>> is_int64_dtype(np.int32) # doctest: +SKIP + False + >>> is_int64_dtype(np.int64) # doctest: +SKIP + True + >>> is_int64_dtype('int8') # doctest: +SKIP + False + >>> is_int64_dtype('Int8') # doctest: +SKIP + False + >>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP + True + >>> is_int64_dtype(float) # doctest: +SKIP + False + >>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP + False + >>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP + False + >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP + True + >>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP + False + >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP + False + """ + # GH#52564 + warnings.warn( + "is_int64_dtype is deprecated and will be removed in a future " + "version. Use dtype == np.int64 instead.", + DeprecationWarning, + stacklevel=2, + ) + return _is_dtype_type(arr_or_dtype, classes(np.int64)) + + +def is_datetime64_any_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the datetime64 dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + bool + Whether or not the array or dtype is of the datetime64 dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64_any_dtype + >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype + >>> is_datetime64_any_dtype(str) + False + >>> is_datetime64_any_dtype(int) + False + >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive + True + >>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern")) + True + >>> is_datetime64_any_dtype(np.array(['a', 'b'])) + False + >>> is_datetime64_any_dtype(np.array([1, 2])) + False + >>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]")) + True + >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")) + True + """ + if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)): + # GH#33400 fastpath for dtype object + return arr_or_dtype.kind == "M" + + if arr_or_dtype is None: + return False + + try: + tipo = _get_dtype(arr_or_dtype) + except TypeError: + return False + return lib.is_np_dtype(tipo, "M") or isinstance(tipo, DatetimeTZDtype) + + +def is_datetime64_ns_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the datetime64[ns] dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + bool + Whether or not the array or dtype is of the datetime64[ns] dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64_ns_dtype + >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype + >>> is_datetime64_ns_dtype(str) + False + >>> is_datetime64_ns_dtype(int) + False + >>> is_datetime64_ns_dtype(np.datetime64) # no unit + False + >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern")) + True + >>> is_datetime64_ns_dtype(np.array(['a', 'b'])) + False + >>> is_datetime64_ns_dtype(np.array([1, 2])) + False + >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit + False + >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit + False + >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")) + True + """ + if arr_or_dtype is None: + return False + try: + tipo = _get_dtype(arr_or_dtype) + except TypeError: + return False + return tipo == DT64NS_DTYPE or ( + isinstance(tipo, DatetimeTZDtype) and tipo.unit == "ns" + ) + + +def is_timedelta64_ns_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the timedelta64[ns] dtype. + + This is a very specific dtype, so generic ones like `np.timedelta64` + will return False if passed into this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of the timedelta64[ns] dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype + >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]')) + True + >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency + False + >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]')) + True + >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64)) + False + """ + return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE) + + +# This exists to silence numpy deprecation warnings, see GH#29553 +def is_numeric_v_string_like(a: ArrayLike, b) -> bool: + """ + Check if we are comparing a string-like object to a numeric ndarray. + NumPy doesn't like to compare such objects, especially numeric arrays + and scalar string-likes. + + Parameters + ---------- + a : array-like, scalar + The first object to check. + b : array-like, scalar + The second object to check. + + Returns + ------- + boolean + Whether we return a comparing a string-like object to a numeric array. + + Examples + -------- + >>> is_numeric_v_string_like(np.array([1]), "foo") + True + >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"])) + True + >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2])) + True + >>> is_numeric_v_string_like(np.array([1]), np.array([2])) + False + >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"])) + False + """ + is_a_array = isinstance(a, np.ndarray) + is_b_array = isinstance(b, np.ndarray) + + is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b") + is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b") + is_a_string_array = is_a_array and a.dtype.kind in ("S", "U") + is_b_string_array = is_b_array and b.dtype.kind in ("S", "U") + + is_b_scalar_string_like = not is_b_array and isinstance(b, str) + + return ( + (is_a_numeric_array and is_b_scalar_string_like) + or (is_a_numeric_array and is_b_string_array) + or (is_b_numeric_array and is_a_string_array) + ) + + +def needs_i8_conversion(dtype: DtypeObj | None) -> bool: + """ + Check whether the dtype should be converted to int64. + + Dtype "needs" such a conversion if the dtype is of a datetime-like dtype + + Parameters + ---------- + dtype : np.dtype, ExtensionDtype, or None + + Returns + ------- + boolean + Whether or not the dtype should be converted to int64. + + Examples + -------- + >>> needs_i8_conversion(str) + False + >>> needs_i8_conversion(np.int64) + False + >>> needs_i8_conversion(np.datetime64) + False + >>> needs_i8_conversion(np.dtype(np.datetime64)) + True + >>> needs_i8_conversion(np.array(['a', 'b'])) + False + >>> needs_i8_conversion(pd.Series([1, 2])) + False + >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) + False + >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) + False + >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype) + True + """ + if isinstance(dtype, np.dtype): + return dtype.kind in "mM" + return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) + + +def is_numeric_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a numeric dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a numeric dtype. + + Examples + -------- + >>> from pandas.api.types import is_numeric_dtype + >>> is_numeric_dtype(str) + False + >>> is_numeric_dtype(int) + True + >>> is_numeric_dtype(float) + True + >>> is_numeric_dtype(np.uint64) + True + >>> is_numeric_dtype(np.datetime64) + False + >>> is_numeric_dtype(np.timedelta64) + False + >>> is_numeric_dtype(np.array(['a', 'b'])) + False + >>> is_numeric_dtype(pd.Series([1, 2])) + True + >>> is_numeric_dtype(pd.Index([1, 2.])) + True + >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) + False + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric + ) + + +def is_any_real_numeric_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a real number dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a real number dtype. + + Examples + -------- + >>> from pandas.api.types import is_any_real_numeric_dtype + >>> is_any_real_numeric_dtype(int) + True + >>> is_any_real_numeric_dtype(float) + True + >>> is_any_real_numeric_dtype(object) + False + >>> is_any_real_numeric_dtype(str) + False + >>> is_any_real_numeric_dtype(complex(1, 2)) + False + >>> is_any_real_numeric_dtype(bool) + False + """ + return ( + is_numeric_dtype(arr_or_dtype) + and not is_complex_dtype(arr_or_dtype) + and not is_bool_dtype(arr_or_dtype) + ) + + +def is_float_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a float dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a float dtype. + + Examples + -------- + >>> from pandas.api.types import is_float_dtype + >>> is_float_dtype(str) + False + >>> is_float_dtype(int) + False + >>> is_float_dtype(float) + True + >>> is_float_dtype(np.array(['a', 'b'])) + False + >>> is_float_dtype(pd.Series([1, 2])) + False + >>> is_float_dtype(pd.Index([1, 2.])) + True + """ + return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" + ) + + +def is_bool_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a boolean dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a boolean dtype. + + Notes + ----- + An ExtensionArray is considered boolean when the ``_is_boolean`` + attribute is set to True. + + Examples + -------- + >>> from pandas.api.types import is_bool_dtype + >>> is_bool_dtype(str) + False + >>> is_bool_dtype(int) + False + >>> is_bool_dtype(bool) + True + >>> is_bool_dtype(np.bool_) + True + >>> is_bool_dtype(np.array(['a', 'b'])) + False + >>> is_bool_dtype(pd.Series([1, 2])) + False + >>> is_bool_dtype(np.array([True, False])) + True + >>> is_bool_dtype(pd.Categorical([True, False])) + True + >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) + True + """ + if arr_or_dtype is None: + return False + try: + dtype = _get_dtype(arr_or_dtype) + except (TypeError, ValueError): + return False + + if isinstance(dtype, CategoricalDtype): + arr_or_dtype = dtype.categories + # now we use the special definition for Index + + if isinstance(arr_or_dtype, ABCIndex): + # Allow Index[object] that is all-bools or Index["boolean"] + if arr_or_dtype.inferred_type == "boolean": + if not is_bool_dtype(arr_or_dtype.dtype): + # GH#52680 + warnings.warn( + "The behavior of is_bool_dtype with an object-dtype Index " + "of bool objects is deprecated. In a future version, " + "this will return False. Cast the Index to a bool dtype instead.", + DeprecationWarning, + stacklevel=2, + ) + return True + return False + elif isinstance(dtype, ExtensionDtype): + return getattr(dtype, "_is_boolean", False) + + return issubclass(dtype.type, np.bool_) + + +def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool: + """ + Analogue to is_extension_array_dtype but excluding DatetimeTZDtype. + """ + # Note: if other EA dtypes are ever held in HybridBlock, exclude those + # here too. + # NB: need to check DatetimeTZDtype and not is_datetime64tz_dtype + # to exclude ArrowTimestampUSDtype + return isinstance(dtype, ExtensionDtype) and not isinstance( + dtype, (DatetimeTZDtype, PeriodDtype) + ) + + +def is_extension_array_dtype(arr_or_dtype) -> bool: + """ + Check if an object is a pandas extension array type. + + See the :ref:`Use Guide ` for more. + + Parameters + ---------- + arr_or_dtype : object + For array-like input, the ``.dtype`` attribute will + be extracted. + + Returns + ------- + bool + Whether the `arr_or_dtype` is an extension array type. + + Notes + ----- + This checks whether an object implements the pandas extension + array interface. In pandas, this includes: + + * Categorical + * Sparse + * Interval + * Period + * DatetimeArray + * TimedeltaArray + + Third-party libraries may implement arrays or types satisfying + this interface as well. + + Examples + -------- + >>> from pandas.api.types import is_extension_array_dtype + >>> arr = pd.Categorical(['a', 'b']) + >>> is_extension_array_dtype(arr) + True + >>> is_extension_array_dtype(arr.dtype) + True + + >>> arr = np.array(['a', 'b']) + >>> is_extension_array_dtype(arr.dtype) + False + """ + dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) + if isinstance(dtype, ExtensionDtype): + return True + elif isinstance(dtype, np.dtype): + return False + else: + return registry.find(dtype) is not None + + +def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool: + """ + Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype. + + Notes + ----- + Checks only for dtype objects, not dtype-castable strings or types. + """ + return isinstance(dtype, ExtensionDtype) or (lib.is_np_dtype(dtype, "mM")) + + +def is_complex_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a complex dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a complex dtype. + + Examples + -------- + >>> from pandas.api.types import is_complex_dtype + >>> is_complex_dtype(str) + False + >>> is_complex_dtype(int) + False + >>> is_complex_dtype(np.complex128) + True + >>> is_complex_dtype(np.array(['a', 'b'])) + False + >>> is_complex_dtype(pd.Series([1, 2])) + False + >>> is_complex_dtype(np.array([1 + 1j, 5])) + True + """ + return _is_dtype_type(arr_or_dtype, classes(np.complexfloating)) + + +def _is_dtype(arr_or_dtype, condition) -> bool: + """ + Return true if the condition is satisfied for the arr_or_dtype. + + Parameters + ---------- + arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType + The array-like or dtype object whose dtype we want to extract. + condition : callable[Union[np.dtype, ExtensionDtype]] + + Returns + ------- + bool + + """ + if arr_or_dtype is None: + return False + try: + dtype = _get_dtype(arr_or_dtype) + except (TypeError, ValueError): + return False + return condition(dtype) + + +def _get_dtype(arr_or_dtype) -> DtypeObj: + """ + Get the dtype instance associated with an array + or dtype object. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype object whose dtype we want to extract. + + Returns + ------- + obj_dtype : The extract dtype instance from the + passed in array or dtype object. + + Raises + ------ + TypeError : The passed in object is None. + """ + if arr_or_dtype is None: + raise TypeError("Cannot deduce dtype from null object") + + # fastpath + if isinstance(arr_or_dtype, np.dtype): + return arr_or_dtype + elif isinstance(arr_or_dtype, type): + return np.dtype(arr_or_dtype) + + # if we have an array-like + elif hasattr(arr_or_dtype, "dtype"): + arr_or_dtype = arr_or_dtype.dtype + + return pandas_dtype(arr_or_dtype) + + +def _is_dtype_type(arr_or_dtype, condition) -> bool: + """ + Return true if the condition is satisfied for the arr_or_dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype object whose dtype we want to extract. + condition : callable[Union[np.dtype, ExtensionDtypeType]] + + Returns + ------- + bool : if the condition is satisfied for the arr_or_dtype + """ + if arr_or_dtype is None: + return condition(type(None)) + + # fastpath + if isinstance(arr_or_dtype, np.dtype): + return condition(arr_or_dtype.type) + elif isinstance(arr_or_dtype, type): + if issubclass(arr_or_dtype, ExtensionDtype): + arr_or_dtype = arr_or_dtype.type + return condition(np.dtype(arr_or_dtype).type) + + # if we have an array-like + if hasattr(arr_or_dtype, "dtype"): + arr_or_dtype = arr_or_dtype.dtype + + # we are not possibly a dtype + elif is_list_like(arr_or_dtype): + return condition(type(None)) + + try: + tipo = pandas_dtype(arr_or_dtype).type + except (TypeError, ValueError): + if is_scalar(arr_or_dtype): + return condition(type(None)) + + return False + + return condition(tipo) + + +def infer_dtype_from_object(dtype) -> type: + """ + Get a numpy dtype.type-style object for a dtype object. + + This methods also includes handling of the datetime64[ns] and + datetime64[ns, TZ] objects. + + If no dtype can be found, we return ``object``. + + Parameters + ---------- + dtype : dtype, type + The dtype object whose numpy dtype.type-style + object we want to extract. + + Returns + ------- + type + """ + if isinstance(dtype, type) and issubclass(dtype, np.generic): + # Type object from a dtype + + return dtype + elif isinstance(dtype, (np.dtype, ExtensionDtype)): + # dtype object + try: + _validate_date_like_dtype(dtype) + except TypeError: + # Should still pass if we don't have a date-like + pass + if hasattr(dtype, "numpy_dtype"): + # TODO: Implement this properly + # https://github.com/pandas-dev/pandas/issues/52576 + return dtype.numpy_dtype.type + return dtype.type + + try: + dtype = pandas_dtype(dtype) + except TypeError: + pass + + if isinstance(dtype, ExtensionDtype): + return dtype.type + elif isinstance(dtype, str): + # TODO(jreback) + # should deprecate these + if dtype in ["datetimetz", "datetime64tz"]: + return DatetimeTZDtype.type + elif dtype in ["period"]: + raise NotImplementedError + + if dtype in ["datetime", "timedelta"]: + dtype += "64" + try: + return infer_dtype_from_object(getattr(np, dtype)) + except (AttributeError, TypeError): + # Handles cases like _get_dtype(int) i.e., + # Python objects that are valid dtypes + # (unlike user-defined types, in general) + # + # TypeError handles the float16 type code of 'e' + # further handle internal types + pass + + return infer_dtype_from_object(np.dtype(dtype)) + + +def _validate_date_like_dtype(dtype) -> None: + """ + Check whether the dtype is a date-like dtype. Raises an error if invalid. + + Parameters + ---------- + dtype : dtype, type + The dtype to check. + + Raises + ------ + TypeError : The dtype could not be casted to a date-like dtype. + ValueError : The dtype is an illegal date-like dtype (e.g. the + frequency provided is too specific) + """ + try: + typ = np.datetime_data(dtype)[0] + except ValueError as e: + raise TypeError(e) from e + if typ not in ["generic", "ns"]: + raise ValueError( + f"{repr(dtype.name)} is too specific of a frequency, " + f"try passing {repr(dtype.type.__name__)}" + ) + + +def validate_all_hashable(*args, error_name: str | None = None) -> None: + """ + Return None if all args are hashable, else raise a TypeError. + + Parameters + ---------- + *args + Arguments to validate. + error_name : str, optional + The name to use if error + + Raises + ------ + TypeError : If an argument is not hashable + + Returns + ------- + None + """ + if not all(is_hashable(arg) for arg in args): + if error_name: + raise TypeError(f"{error_name} must be a hashable type") + raise TypeError("All elements must be hashable") + + +def pandas_dtype(dtype) -> DtypeObj: + """ + Convert input into a pandas only dtype object or a numpy dtype object. + + Parameters + ---------- + dtype : object to be converted + + Returns + ------- + np.dtype or a pandas dtype + + Raises + ------ + TypeError if not a dtype + + Examples + -------- + >>> pd.api.types.pandas_dtype(int) + dtype('int64') + """ + # short-circuit + if isinstance(dtype, np.ndarray): + return dtype.dtype + elif isinstance(dtype, (np.dtype, ExtensionDtype)): + return dtype + + # registered extension types + result = registry.find(dtype) + if result is not None: + if isinstance(result, type): + # GH 31356, GH 54592 + warnings.warn( + f"Instantiating {result.__name__} without any arguments." + f"Pass a {result.__name__} instance to silence this warning.", + UserWarning, + stacklevel=find_stack_level(), + ) + result = result() + return result + + # try a numpy dtype + # raise a consistent TypeError if failed + try: + with warnings.catch_warnings(): + # GH#51523 - Series.astype(np.integer) doesn't show + # numpy deprecation warning of np.integer + # Hence enabling DeprecationWarning + warnings.simplefilter("always", DeprecationWarning) + npdtype = np.dtype(dtype) + except SyntaxError as err: + # np.dtype uses `eval` which can raise SyntaxError + raise TypeError(f"data type '{dtype}' not understood") from err + + # Any invalid dtype (such as pd.Timestamp) should raise an error. + # np.dtype(invalid_type).kind = 0 for such objects. However, this will + # also catch some valid dtypes such as object, np.object_ and 'object' + # which we safeguard against by catching them earlier and returning + # np.dtype(valid_dtype) before this condition is evaluated. + if is_hashable(dtype) and dtype in [ + object, + np.object_, + "object", + "O", + "object_", + ]: + # check hashability to avoid errors/DeprecationWarning when we get + # here and `dtype` is an array + return npdtype + elif npdtype.kind == "O": + raise TypeError(f"dtype '{dtype}' not understood") + + return npdtype + + +def is_all_strings(value: ArrayLike) -> bool: + """ + Check if this is an array of strings that we should try parsing. + + Includes object-dtype ndarray containing all-strings, StringArray, + and Categorical with all-string categories. + Does not include numpy string dtypes. + """ + dtype = value.dtype + + if isinstance(dtype, np.dtype): + return dtype == np.dtype("object") and lib.is_string_array( + np.asarray(value), skipna=False + ) + elif isinstance(dtype, CategoricalDtype): + return dtype.categories.inferred_type == "string" + return dtype == "string" + + +__all__ = [ + "classes", + "DT64NS_DTYPE", + "ensure_float64", + "ensure_python_int", + "ensure_str", + "infer_dtype_from_object", + "INT64_DTYPE", + "is_1d_only_ea_dtype", + "is_all_strings", + "is_any_real_numeric_dtype", + "is_array_like", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_dataclass", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_decimal", + "is_dict_like", + "is_dtype_equal", + "is_ea_or_datetimelike_dtype", + "is_extension_array_dtype", + "is_file_like", + "is_float_dtype", + "is_int64_dtype", + "is_integer_dtype", + "is_interval", + "is_interval_dtype", + "is_iterator", + "is_named_tuple", + "is_nested_list_like", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_period_dtype", + "is_re", + "is_re_compilable", + "is_scipy_sparse", + "is_sequence", + "is_signed_integer_dtype", + "is_sparse", + "is_string_dtype", + "is_string_or_object_np_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "needs_i8_conversion", + "pandas_dtype", + "TD64NS_DTYPE", + "validate_all_hashable", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/concat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/concat.py new file mode 100644 index 00000000..b489c14a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/concat.py @@ -0,0 +1,339 @@ +""" +Utility functions related to concat. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.cast import ( + common_dtype_categorical_compat, + find_common_type, + np_find_common_type, +) +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.generic import ( + ABCCategoricalIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + ) + + from pandas.core.arrays import ( + Categorical, + ExtensionArray, + ) + + +def _is_nonempty(x, axis) -> bool: + # filter empty arrays + # 1-d dtypes always are included here + if x.ndim <= axis: + return True + return x.shape[axis] > 0 + + +def concat_compat( + to_concat: Sequence[ArrayLike], axis: AxisInt = 0, ea_compat_axis: bool = False +) -> ArrayLike: + """ + provide concatenation of an array of arrays each of which is a single + 'normalized' dtypes (in that for example, if it's object, then it is a + non-datetimelike and provide a combined dtype for the resulting array that + preserves the overall dtype if possible) + + Parameters + ---------- + to_concat : sequence of arrays + axis : axis to provide concatenation + ea_compat_axis : bool, default False + For ExtensionArray compat, behave as if axis == 1 when determining + whether to drop empty arrays. + + Returns + ------- + a single array, preserving the combined dtypes + """ + if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]): + # fastpath! + obj = to_concat[0] + if isinstance(obj, np.ndarray): + to_concat_arrs = cast("Sequence[np.ndarray]", to_concat) + return np.concatenate(to_concat_arrs, axis=axis) + + to_concat_eas = cast("Sequence[ExtensionArray]", to_concat) + if ea_compat_axis: + # We have 1D objects, that don't support axis keyword + return obj._concat_same_type(to_concat_eas) + elif axis == 0: + return obj._concat_same_type(to_concat_eas) + else: + # e.g. DatetimeArray + # NB: We are assuming here that ensure_wrapped_if_arraylike has + # been called where relevant. + return obj._concat_same_type( + # error: Unexpected keyword argument "axis" for "_concat_same_type" + # of "ExtensionArray" + to_concat_eas, + axis=axis, # type: ignore[call-arg] + ) + + # If all arrays are empty, there's nothing to convert, just short-cut to + # the concatenation, #3121. + # + # Creating an empty array directly is tempting, but the winnings would be + # marginal given that it would still require shape & dtype calculation and + # np.concatenate which has them both implemented is compiled. + orig = to_concat + non_empties = [x for x in to_concat if _is_nonempty(x, axis)] + if non_empties and axis == 0 and not ea_compat_axis: + # ea_compat_axis see GH#39574 + to_concat = non_empties + + any_ea, kinds, target_dtype = _get_result_dtype(to_concat, non_empties) + + if len(to_concat) < len(orig): + _, _, alt_dtype = _get_result_dtype(orig, non_empties) + if alt_dtype != target_dtype: + # GH#39122 + warnings.warn( + "The behavior of array concatenation with empty entries is " + "deprecated. In a future version, this will no longer exclude " + "empty items when determining the result dtype. " + "To retain the old behavior, exclude the empty entries before " + "the concat operation.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if target_dtype is not None: + to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat] + + if not isinstance(to_concat[0], np.ndarray): + # i.e. isinstance(to_concat[0], ExtensionArray) + to_concat_eas = cast("Sequence[ExtensionArray]", to_concat) + cls = type(to_concat[0]) + return cls._concat_same_type(to_concat_eas) + else: + to_concat_arrs = cast("Sequence[np.ndarray]", to_concat) + result = np.concatenate(to_concat_arrs, axis=axis) + + if not any_ea and "b" in kinds and result.dtype.kind in "iuf": + # GH#39817 cast to object instead of casting bools to numeric + result = result.astype(object, copy=False) + return result + + +def _get_result_dtype( + to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike] +) -> tuple[bool, set[str], DtypeObj | None]: + target_dtype = None + + dtypes = {obj.dtype for obj in to_concat} + kinds = {obj.dtype.kind for obj in to_concat} + + any_ea = any(not isinstance(x, np.ndarray) for x in to_concat) + if any_ea: + # i.e. any ExtensionArrays + + # we ignore axis here, as internally concatting with EAs is always + # for axis=0 + if len(dtypes) != 1: + target_dtype = find_common_type([x.dtype for x in to_concat]) + target_dtype = common_dtype_categorical_compat(to_concat, target_dtype) + + elif not len(non_empties): + # we have all empties, but may need to coerce the result dtype to + # object if we have non-numeric type operands (numpy would otherwise + # cast this to float) + if len(kinds) != 1: + if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}): + # let numpy coerce + pass + else: + # coerce to object + target_dtype = np.dtype(object) + kinds = {"o"} + else: + # error: Argument 1 to "np_find_common_type" has incompatible type + # "*Set[Union[ExtensionDtype, Any]]"; expected "dtype[Any]" + target_dtype = np_find_common_type(*dtypes) # type: ignore[arg-type] + + return any_ea, kinds, target_dtype + + +def union_categoricals( + to_union, sort_categories: bool = False, ignore_order: bool = False +) -> Categorical: + """ + Combine list-like of Categorical-like, unioning categories. + + All categories must have the same dtype. + + Parameters + ---------- + to_union : list-like + Categorical, CategoricalIndex, or Series with dtype='category'. + sort_categories : bool, default False + If true, resulting categories will be lexsorted, otherwise + they will be ordered as they appear in the data. + ignore_order : bool, default False + If true, the ordered attribute of the Categoricals will be ignored. + Results in an unordered categorical. + + Returns + ------- + Categorical + + Raises + ------ + TypeError + - all inputs do not have the same dtype + - all inputs do not have the same ordered property + - all inputs are ordered and their categories are not identical + - sort_categories=True and Categoricals are ordered + ValueError + Empty list of categoricals passed + + Notes + ----- + To learn more about categories, see `link + `__ + + Examples + -------- + If you want to combine categoricals that do not necessarily have + the same categories, `union_categoricals` will combine a list-like + of categoricals. The new categories will be the union of the + categories being combined. + + >>> a = pd.Categorical(["b", "c"]) + >>> b = pd.Categorical(["a", "b"]) + >>> pd.api.types.union_categoricals([a, b]) + ['b', 'c', 'a', 'b'] + Categories (3, object): ['b', 'c', 'a'] + + By default, the resulting categories will be ordered as they appear + in the `categories` of the data. If you want the categories to be + lexsorted, use `sort_categories=True` argument. + + >>> pd.api.types.union_categoricals([a, b], sort_categories=True) + ['b', 'c', 'a', 'b'] + Categories (3, object): ['a', 'b', 'c'] + + `union_categoricals` also works with the case of combining two + categoricals of the same categories and order information (e.g. what + you could also `append` for). + + >>> a = pd.Categorical(["a", "b"], ordered=True) + >>> b = pd.Categorical(["a", "b", "a"], ordered=True) + >>> pd.api.types.union_categoricals([a, b]) + ['a', 'b', 'a', 'b', 'a'] + Categories (2, object): ['a' < 'b'] + + Raises `TypeError` because the categories are ordered and not identical. + + >>> a = pd.Categorical(["a", "b"], ordered=True) + >>> b = pd.Categorical(["a", "b", "c"], ordered=True) + >>> pd.api.types.union_categoricals([a, b]) + Traceback (most recent call last): + ... + TypeError: to union ordered Categoricals, all categories must be the same + + Ordered categoricals with different categories or orderings can be + combined by using the `ignore_ordered=True` argument. + + >>> a = pd.Categorical(["a", "b", "c"], ordered=True) + >>> b = pd.Categorical(["c", "b", "a"], ordered=True) + >>> pd.api.types.union_categoricals([a, b], ignore_order=True) + ['a', 'b', 'c', 'c', 'b', 'a'] + Categories (3, object): ['a', 'b', 'c'] + + `union_categoricals` also works with a `CategoricalIndex`, or `Series` + containing categorical data, but note that the resulting array will + always be a plain `Categorical` + + >>> a = pd.Series(["b", "c"], dtype='category') + >>> b = pd.Series(["a", "b"], dtype='category') + >>> pd.api.types.union_categoricals([a, b]) + ['b', 'c', 'a', 'b'] + Categories (3, object): ['b', 'c', 'a'] + """ + from pandas import Categorical + from pandas.core.arrays.categorical import recode_for_categories + + if len(to_union) == 0: + raise ValueError("No Categoricals to union") + + def _maybe_unwrap(x): + if isinstance(x, (ABCCategoricalIndex, ABCSeries)): + return x._values + elif isinstance(x, Categorical): + return x + else: + raise TypeError("all components to combine must be Categorical") + + to_union = [_maybe_unwrap(x) for x in to_union] + first = to_union[0] + + if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]): + raise TypeError("dtype of categories must be the same") + + ordered = False + if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]): + # identical categories - fastpath + categories = first.categories + ordered = first.ordered + + all_codes = [first._encode_with_my_categories(x)._codes for x in to_union] + new_codes = np.concatenate(all_codes) + + if sort_categories and not ignore_order and ordered: + raise TypeError("Cannot use sort_categories=True with ordered Categoricals") + + if sort_categories and not categories.is_monotonic_increasing: + categories = categories.sort_values() + indexer = categories.get_indexer(first.categories) + + from pandas.core.algorithms import take_nd + + new_codes = take_nd(indexer, new_codes, fill_value=-1) + elif ignore_order or all(not c.ordered for c in to_union): + # different categories - union and recode + cats = first.categories.append([c.categories for c in to_union[1:]]) + categories = cats.unique() + if sort_categories: + categories = categories.sort_values() + + new_codes = [ + recode_for_categories(c.codes, c.categories, categories) for c in to_union + ] + new_codes = np.concatenate(new_codes) + else: + # ordered - to show a proper error message + if all(c.ordered for c in to_union): + msg = "to union ordered Categoricals, all categories must be the same" + raise TypeError(msg) + raise TypeError("Categorical.ordered must be the same") + + if ignore_order: + ordered = False + + dtype = CategoricalDtype(categories=categories, ordered=ordered) + return Categorical._simple_new(new_codes, dtype=dtype) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/dtypes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/dtypes.py new file mode 100644 index 00000000..272e9928 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/dtypes.py @@ -0,0 +1,2300 @@ +""" +Define extension dtypes. +""" +from __future__ import annotations + +from datetime import ( + date, + datetime, + time, + timedelta, +) +from decimal import Decimal +import re +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +import numpy as np +import pytz + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas._libs.interval import Interval +from pandas._libs.properties import cache_readonly +from pandas._libs.tslibs import ( + BaseOffset, + NaT, + NaTType, + Period, + Timedelta, + Timestamp, + timezones, + to_offset, + tz_compare, +) +from pandas._libs.tslibs.dtypes import ( + PeriodDtypeBase, + abbrev_to_npy_unit, +) +from pandas._libs.tslibs.offsets import BDay +from pandas.compat import pa_version_under7p0 +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import ( + ExtensionDtype, + StorageExtensionDtype, + register_extension_dtype, +) +from pandas.core.dtypes.generic import ( + ABCCategoricalIndex, + ABCIndex, +) +from pandas.core.dtypes.inference import ( + is_bool, + is_list_like, +) + +if not pa_version_under7p0: + import pyarrow as pa + +if TYPE_CHECKING: + from collections.abc import MutableMapping + from datetime import tzinfo + + import pyarrow as pa # noqa: F811, TCH004 + + from pandas._typing import ( + Dtype, + DtypeObj, + IntervalClosedType, + Ordered, + npt, + type_t, + ) + + from pandas import ( + Categorical, + Index, + ) + from pandas.core.arrays import ( + BaseMaskedArray, + DatetimeArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + ) + from pandas.core.arrays.arrow import ArrowExtensionArray + +str_type = str + + +class PandasExtensionDtype(ExtensionDtype): + """ + A np.dtype duck-typed class, suitable for holding a custom dtype. + + THIS IS NOT A REAL NUMPY DTYPE + """ + + type: Any + kind: Any + # The Any type annotations above are here only because mypy seems to have a + # problem dealing with multiple inheritance from PandasExtensionDtype + # and ExtensionDtype's @properties in the subclasses below. The kind and + # type variables in those subclasses are explicitly typed below. + subdtype = None + str: str_type + num = 100 + shape: tuple[int, ...] = () + itemsize = 8 + base: DtypeObj | None = None + isbuiltin = 0 + isnative = 0 + _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} + + def __repr__(self) -> str_type: + """ + Return a string representation for a particular object. + """ + return str(self) + + def __hash__(self) -> int: + raise NotImplementedError("sub-classes should implement an __hash__ method") + + def __getstate__(self) -> dict[str_type, Any]: + # pickle support; we don't want to pickle the cache + return {k: getattr(self, k, None) for k in self._metadata} + + @classmethod + def reset_cache(cls) -> None: + """clear the cache""" + cls._cache_dtypes = {} + + +class CategoricalDtypeType(type): + """ + the type of CategoricalDtype, this metaclass determines subclass ability + """ + + +@register_extension_dtype +class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): + """ + Type for categorical data with the categories and orderedness. + + Parameters + ---------- + categories : sequence, optional + Must be unique, and must not contain any nulls. + The categories are stored in an Index, + and if an index is provided the dtype of that index will be used. + ordered : bool or None, default False + Whether or not this categorical is treated as a ordered categorical. + None can be used to maintain the ordered value of existing categoricals when + used in operations that combine categoricals, e.g. astype, and will resolve to + False if there is no existing ordered to maintain. + + Attributes + ---------- + categories + ordered + + Methods + ------- + None + + See Also + -------- + Categorical : Represent a categorical variable in classic R / S-plus fashion. + + Notes + ----- + This class is useful for specifying the type of a ``Categorical`` + independent of the values. See :ref:`categorical.categoricaldtype` + for more. + + Examples + -------- + >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True) + >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t) + 0 a + 1 b + 2 a + 3 NaN + dtype: category + Categories (2, object): ['b' < 'a'] + + An empty CategoricalDtype with a specific dtype can be created + by providing an empty index. As follows, + + >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype + dtype(' None: + self._finalize(categories, ordered, fastpath=False) + + @classmethod + def _from_fastpath( + cls, categories=None, ordered: bool | None = None + ) -> CategoricalDtype: + self = cls.__new__(cls) + self._finalize(categories, ordered, fastpath=True) + return self + + @classmethod + def _from_categorical_dtype( + cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None + ) -> CategoricalDtype: + if categories is ordered is None: + return dtype + if categories is None: + categories = dtype.categories + if ordered is None: + ordered = dtype.ordered + return cls(categories, ordered) + + @classmethod + def _from_values_or_dtype( + cls, + values=None, + categories=None, + ordered: bool | None = None, + dtype: Dtype | None = None, + ) -> CategoricalDtype: + """ + Construct dtype from the input parameters used in :class:`Categorical`. + + This constructor method specifically does not do the factorization + step, if that is needed to find the categories. This constructor may + therefore return ``CategoricalDtype(categories=None, ordered=None)``, + which may not be useful. Additional steps may therefore have to be + taken to create the final dtype. + + The return dtype is specified from the inputs in this prioritized + order: + 1. if dtype is a CategoricalDtype, return dtype + 2. if dtype is the string 'category', create a CategoricalDtype from + the supplied categories and ordered parameters, and return that. + 3. if values is a categorical, use value.dtype, but override it with + categories and ordered if either/both of those are not None. + 4. if dtype is None and values is not a categorical, construct the + dtype from categories and ordered, even if either of those is None. + + Parameters + ---------- + values : list-like, optional + The list-like must be 1-dimensional. + categories : list-like, optional + Categories for the CategoricalDtype. + ordered : bool, optional + Designating if the categories are ordered. + dtype : CategoricalDtype or the string "category", optional + If ``CategoricalDtype``, cannot be used together with + `categories` or `ordered`. + + Returns + ------- + CategoricalDtype + + Examples + -------- + >>> pd.CategoricalDtype._from_values_or_dtype() + CategoricalDtype(categories=None, ordered=None, categories_dtype=None) + >>> pd.CategoricalDtype._from_values_or_dtype( + ... categories=['a', 'b'], ordered=True + ... ) + CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object) + >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True) + >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False) + >>> c = pd.Categorical([0, 1], dtype=dtype1) + >>> pd.CategoricalDtype._from_values_or_dtype( + ... c, ['x', 'y'], ordered=True, dtype=dtype2 + ... ) + Traceback (most recent call last): + ... + ValueError: Cannot specify `categories` or `ordered` together with + `dtype`. + + The supplied dtype takes precedence over values' dtype: + + >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2) + CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object) + """ + + if dtype is not None: + # The dtype argument takes precedence over values.dtype (if any) + if isinstance(dtype, str): + if dtype == "category": + if ordered is None and cls.is_dtype(values): + # GH#49309 preserve orderedness + ordered = values.dtype.ordered + + dtype = CategoricalDtype(categories, ordered) + else: + raise ValueError(f"Unknown dtype {repr(dtype)}") + elif categories is not None or ordered is not None: + raise ValueError( + "Cannot specify `categories` or `ordered` together with `dtype`." + ) + elif not isinstance(dtype, CategoricalDtype): + raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}") + elif cls.is_dtype(values): + # If no "dtype" was passed, use the one from "values", but honor + # the "ordered" and "categories" arguments + dtype = values.dtype._from_categorical_dtype( + values.dtype, categories, ordered + ) + else: + # If dtype=None and values is not categorical, create a new dtype. + # Note: This could potentially have categories=None and + # ordered=None. + dtype = CategoricalDtype(categories, ordered) + + return cast(CategoricalDtype, dtype) + + @classmethod + def construct_from_string(cls, string: str_type) -> CategoricalDtype: + """ + Construct a CategoricalDtype from a string. + + Parameters + ---------- + string : str + Must be the string "category" in order to be successfully constructed. + + Returns + ------- + CategoricalDtype + Instance of the dtype. + + Raises + ------ + TypeError + If a CategoricalDtype cannot be constructed from the input. + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + if string != cls.name: + raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'") + + # need ordered=None to ensure that operations specifying dtype="category" don't + # override the ordered value for existing categoricals + return cls(ordered=None) + + def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None: + if ordered is not None: + self.validate_ordered(ordered) + + if categories is not None: + categories = self.validate_categories(categories, fastpath=fastpath) + + self._categories = categories + self._ordered = ordered + + def __setstate__(self, state: MutableMapping[str_type, Any]) -> None: + # for pickle compat. __get_state__ is defined in the + # PandasExtensionDtype superclass and uses the public properties to + # pickle -> need to set the settable private ones here (see GH26067) + self._categories = state.pop("categories", None) + self._ordered = state.pop("ordered", False) + + def __hash__(self) -> int: + # _hash_categories returns a uint64, so use the negative + # space for when we have unknown categories to avoid a conflict + if self.categories is None: + if self.ordered: + return -1 + else: + return -2 + # We *do* want to include the real self.ordered here + return int(self._hash_categories) + + def __eq__(self, other: Any) -> bool: + """ + Rules for CDT equality: + 1) Any CDT is equal to the string 'category' + 2) Any CDT is equal to itself + 3) Any CDT is equal to a CDT with categories=None regardless of ordered + 4) A CDT with ordered=True is only equal to another CDT with + ordered=True and identical categories in the same order + 5) A CDT with ordered={False, None} is only equal to another CDT with + ordered={False, None} and identical categories, but same order is + not required. There is no distinction between False/None. + 6) Any other comparison returns False + """ + if isinstance(other, str): + return other == self.name + elif other is self: + return True + elif not (hasattr(other, "ordered") and hasattr(other, "categories")): + return False + elif self.categories is None or other.categories is None: + # For non-fully-initialized dtypes, these are only equal to + # - the string "category" (handled above) + # - other CategoricalDtype with categories=None + return self.categories is other.categories + elif self.ordered or other.ordered: + # At least one has ordered=True; equal if both have ordered=True + # and the same values for categories in the same order. + return (self.ordered == other.ordered) and self.categories.equals( + other.categories + ) + else: + # Neither has ordered=True; equal if both have the same categories, + # but same order is not necessary. There is no distinction between + # ordered=False and ordered=None: CDT(., False) and CDT(., None) + # will be equal if they have the same categories. + left = self.categories + right = other.categories + + # GH#36280 the ordering of checks here is for performance + if not left.dtype == right.dtype: + return False + + if len(left) != len(right): + return False + + if self.categories.equals(other.categories): + # Check and see if they happen to be identical categories + return True + + if left.dtype != object: + # Faster than calculating hash + indexer = left.get_indexer(right) + # Because left and right have the same length and are unique, + # `indexer` not having any -1s implies that there is a + # bijection between `left` and `right`. + return (indexer != -1).all() + + # With object-dtype we need a comparison that identifies + # e.g. int(2) as distinct from float(2) + return hash(self) == hash(other) + + def __repr__(self) -> str_type: + if self.categories is None: + data = "None" + dtype = "None" + else: + data = self.categories._format_data(name=type(self).__name__) + if data is None: + # self.categories is RangeIndex + data = str(self.categories._range) + data = data.rstrip(", ") + dtype = self.categories.dtype + + return ( + f"CategoricalDtype(categories={data}, ordered={self.ordered}, " + f"categories_dtype={dtype})" + ) + + @cache_readonly + def _hash_categories(self) -> int: + from pandas.core.util.hashing import ( + combine_hash_arrays, + hash_array, + hash_tuples, + ) + + categories = self.categories + ordered = self.ordered + + if len(categories) and isinstance(categories[0], tuple): + # assumes if any individual category is a tuple, then all our. ATM + # I don't really want to support just some of the categories being + # tuples. + cat_list = list(categories) # breaks if a np.array of categories + cat_array = hash_tuples(cat_list) + else: + if categories.dtype == "O" and len({type(x) for x in categories}) != 1: + # TODO: hash_array doesn't handle mixed types. It casts + # everything to a str first, which means we treat + # {'1', '2'} the same as {'1', 2} + # find a better solution + hashed = hash((tuple(categories), ordered)) + return hashed + + if DatetimeTZDtype.is_dtype(categories.dtype): + # Avoid future warning. + categories = categories.view("datetime64[ns]") + + cat_array = hash_array(np.asarray(categories), categorize=False) + if ordered: + cat_array = np.vstack( + [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)] + ) + else: + cat_array = np.array([cat_array]) + combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) + return np.bitwise_xor.reduce(combined_hashed) + + @classmethod + def construct_array_type(cls) -> type_t[Categorical]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas import Categorical + + return Categorical + + @staticmethod + def validate_ordered(ordered: Ordered) -> None: + """ + Validates that we have a valid ordered parameter. If + it is not a boolean, a TypeError will be raised. + + Parameters + ---------- + ordered : object + The parameter to be verified. + + Raises + ------ + TypeError + If 'ordered' is not a boolean. + """ + if not is_bool(ordered): + raise TypeError("'ordered' must either be 'True' or 'False'") + + @staticmethod + def validate_categories(categories, fastpath: bool = False) -> Index: + """ + Validates that we have good categories + + Parameters + ---------- + categories : array-like + fastpath : bool + Whether to skip nan and uniqueness checks + + Returns + ------- + categories : Index + """ + from pandas.core.indexes.base import Index + + if not fastpath and not is_list_like(categories): + raise TypeError( + f"Parameter 'categories' must be list-like, was {repr(categories)}" + ) + if not isinstance(categories, ABCIndex): + categories = Index._with_infer(categories, tupleize_cols=False) + + if not fastpath: + if categories.hasnans: + raise ValueError("Categorical categories cannot be null") + + if not categories.is_unique: + raise ValueError("Categorical categories must be unique") + + if isinstance(categories, ABCCategoricalIndex): + categories = categories.categories + + return categories + + def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype: + """ + Returns a CategoricalDtype with categories and ordered taken from dtype + if specified, otherwise falling back to self if unspecified + + Parameters + ---------- + dtype : CategoricalDtype + + Returns + ------- + new_dtype : CategoricalDtype + """ + if isinstance(dtype, str) and dtype == "category": + # dtype='category' should not change anything + return self + elif not self.is_dtype(dtype): + raise ValueError( + f"a CategoricalDtype must be passed to perform an update, " + f"got {repr(dtype)}" + ) + else: + # from here on, dtype is a CategoricalDtype + dtype = cast(CategoricalDtype, dtype) + + # update categories/ordered unless they've been explicitly passed as None + new_categories = ( + dtype.categories if dtype.categories is not None else self.categories + ) + new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered + + return CategoricalDtype(new_categories, new_ordered) + + @property + def categories(self) -> Index: + """ + An ``Index`` containing the unique categories allowed. + + Examples + -------- + >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True) + >>> cat_type.categories + Index(['a', 'b'], dtype='object') + """ + return self._categories + + @property + def ordered(self) -> Ordered: + """ + Whether the categories have an ordered relationship. + + Examples + -------- + >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True) + >>> cat_type.ordered + True + + >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False) + >>> cat_type.ordered + False + """ + return self._ordered + + @property + def _is_boolean(self) -> bool: + from pandas.core.dtypes.common import is_bool_dtype + + return is_bool_dtype(self.categories) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # check if we have all categorical dtype with identical categories + if all(isinstance(x, CategoricalDtype) for x in dtypes): + first = dtypes[0] + if all(first == other for other in dtypes[1:]): + return first + + # special case non-initialized categorical + # TODO we should figure out the expected return value in general + non_init_cats = [ + isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes + ] + if all(non_init_cats): + return self + elif any(non_init_cats): + return None + + # categorical is aware of Sparse -> extract sparse subdtypes + dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] + # extract the categories' dtype + non_cat_dtypes = [ + x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes + ] + # TODO should categorical always give an answer? + from pandas.core.dtypes.cast import find_common_type + + return find_common_type(non_cat_dtypes) + + +@register_extension_dtype +class DatetimeTZDtype(PandasExtensionDtype): + """ + An ExtensionDtype for timezone-aware datetime data. + + **This is not an actual numpy dtype**, but a duck type. + + Parameters + ---------- + unit : str, default "ns" + The precision of the datetime data. Currently limited + to ``"ns"``. + tz : str, int, or datetime.tzinfo + The timezone. + + Attributes + ---------- + unit + tz + + Methods + ------- + None + + Raises + ------ + ZoneInfoNotFoundError + When the requested timezone cannot be found. + + Examples + -------- + >>> from zoneinfo import ZoneInfo + >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC')) + datetime64[ns, UTC] + + >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris')) + datetime64[ns, Europe/Paris] + """ + + type: type[Timestamp] = Timestamp + kind: str_type = "M" + num = 101 + _metadata = ("unit", "tz") + _match = re.compile(r"(datetime64|M8)\[(?P.+), (?P.+)\]") + _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} + + @property + def na_value(self) -> NaTType: + return NaT + + @cache_readonly + def base(self) -> DtypeObj: # type: ignore[override] + return np.dtype(f"M8[{self.unit}]") + + # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" + @cache_readonly + def str(self) -> str: # type: ignore[override] + return f"|M8[{self.unit}]" + + def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: + if isinstance(unit, DatetimeTZDtype): + # error: "str" has no attribute "tz" + unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] + + if unit != "ns": + if isinstance(unit, str) and tz is None: + # maybe a string like datetime64[ns, tz], which we support for + # now. + result = type(self).construct_from_string(unit) + unit = result.unit + tz = result.tz + msg = ( + f"Passing a dtype alias like 'datetime64[ns, {tz}]' " + "to DatetimeTZDtype is no longer supported. Use " + "'DatetimeTZDtype.construct_from_string()' instead." + ) + raise ValueError(msg) + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") + + if tz: + tz = timezones.maybe_get_tz(tz) + tz = timezones.tz_standardize(tz) + elif tz is not None: + raise pytz.UnknownTimeZoneError(tz) + if tz is None: + raise TypeError("A 'tz' is required.") + + self._unit = unit + self._tz = tz + + @cache_readonly + def _creso(self) -> int: + """ + The NPY_DATETIMEUNIT corresponding to this dtype's resolution. + """ + return abbrev_to_npy_unit(self.unit) + + @property + def unit(self) -> str_type: + """ + The precision of the datetime data. + + Examples + -------- + >>> from zoneinfo import ZoneInfo + >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles')) + >>> dtype.unit + 'ns' + """ + return self._unit + + @property + def tz(self) -> tzinfo: + """ + The timezone. + + Examples + -------- + >>> from zoneinfo import ZoneInfo + >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles')) + >>> dtype.tz + zoneinfo.ZoneInfo(key='America/Los_Angeles') + """ + return self._tz + + @classmethod + def construct_array_type(cls) -> type_t[DatetimeArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import DatetimeArray + + return DatetimeArray + + @classmethod + def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: + """ + Construct a DatetimeTZDtype from a string. + + Parameters + ---------- + string : str + The string alias for this DatetimeTZDtype. + Should be formatted like ``datetime64[ns, ]``, + where ```` is the timezone name. + + Examples + -------- + >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') + datetime64[ns, UTC] + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + + msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" + match = cls._match.match(string) + if match: + d = match.groupdict() + try: + return cls(unit=d["unit"], tz=d["tz"]) + except (KeyError, TypeError, ValueError) as err: + # KeyError if maybe_get_tz tries and fails to get a + # pytz timezone (actually pytz.UnknownTimeZoneError). + # TypeError if we pass a nonsense tz; + # ValueError if we pass a unit other than "ns" + raise TypeError(msg) from err + raise TypeError(msg) + + def __str__(self) -> str_type: + return f"datetime64[{self.unit}, {self.tz}]" + + @property + def name(self) -> str_type: + """A string representation of the dtype.""" + return str(self) + + def __hash__(self) -> int: + # make myself hashable + # TODO: update this. + return hash(str(self)) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, str): + if other.startswith("M8["): + other = f"datetime64[{other[3:]}" + return other == self.name + + return ( + isinstance(other, DatetimeTZDtype) + and self.unit == other.unit + and tz_compare(self.tz, other.tz) + ) + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray: + """ + Construct DatetimeArray from pyarrow Array/ChunkedArray. + + Note: If the units in the pyarrow Array are the same as this + DatetimeDtype, then values corresponding to the integer representation + of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`) + are converted to ``NaT``, regardless of the null indicator in the + pyarrow array. + + Parameters + ---------- + array : pyarrow.Array or pyarrow.ChunkedArray + The Arrow array to convert to DatetimeArray. + + Returns + ------- + extension array : DatetimeArray + """ + import pyarrow + + from pandas.core.arrays import DatetimeArray + + array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True) + + if isinstance(array, pyarrow.Array): + np_arr = array.to_numpy(zero_copy_only=False) + else: + np_arr = array.to_numpy() + + return DatetimeArray(np_arr, dtype=self, copy=False) + + def __setstate__(self, state) -> None: + # for pickle compat. __get_state__ is defined in the + # PandasExtensionDtype superclass and uses the public properties to + # pickle -> need to set the settable private ones here (see GH26067) + self._tz = state["tz"] + self._unit = state["unit"] + + +@register_extension_dtype +class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): + """ + An ExtensionDtype for Period data. + + **This is not an actual numpy dtype**, but a duck type. + + Parameters + ---------- + freq : str or DateOffset + The frequency of this PeriodDtype. + + Attributes + ---------- + freq + + Methods + ------- + None + + Examples + -------- + >>> pd.PeriodDtype(freq='D') + period[D] + + >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd()) + period[M] + """ + + type: type[Period] = Period + kind: str_type = "O" + str = "|O08" + base = np.dtype("O") + num = 102 + _metadata = ("freq",) + _match = re.compile(r"(P|p)eriod\[(?P.+)\]") + # error: Incompatible types in assignment (expression has type + # "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype" + # defined the type as "Dict[str, PandasExtensionDtype]") [assignment] + _cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment] + __hash__ = PeriodDtypeBase.__hash__ + _freq: BaseOffset + + def __new__(cls, freq): + """ + Parameters + ---------- + freq : PeriodDtype, BaseOffset, or string + """ + if isinstance(freq, PeriodDtype): + return freq + + if not isinstance(freq, BaseOffset): + freq = cls._parse_dtype_strict(freq) + + if isinstance(freq, BDay): + # GH#53446 + warnings.warn( + "PeriodDtype[B] is deprecated and will be removed in a future " + "version. Use a DatetimeIndex with freq='B' instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + try: + dtype_code = cls._cache_dtypes[freq] + except KeyError: + dtype_code = freq._period_dtype_code + cls._cache_dtypes[freq] = dtype_code + u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n) + u._freq = freq + return u + + def __reduce__(self): + return type(self), (self.name,) + + @property + def freq(self): + """ + The frequency object of this PeriodDtype. + + Examples + -------- + >>> dtype = pd.PeriodDtype(freq='D') + >>> dtype.freq + + """ + return self._freq + + @classmethod + def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset: + if isinstance(freq, str): # note: freq is already of type str! + if freq.startswith(("Period[", "period[")): + m = cls._match.search(freq) + if m is not None: + freq = m.group("freq") + + freq_offset = to_offset(freq) + if freq_offset is not None: + return freq_offset + + raise TypeError( + "PeriodDtype argument should be string or BaseOffset, " + f"got {type(freq).__name__}" + ) + + @classmethod + def construct_from_string(cls, string: str_type) -> PeriodDtype: + """ + Strict construction from a string, raise a TypeError if not + possible + """ + if ( + isinstance(string, str) + and (string.startswith(("period[", "Period["))) + or isinstance(string, BaseOffset) + ): + # do not parse string like U as period[U] + # avoid tuple to be regarded as freq + try: + return cls(freq=string) + except ValueError: + pass + if isinstance(string, str): + msg = f"Cannot construct a 'PeriodDtype' from '{string}'" + else: + msg = f"'construct_from_string' expects a string, got {type(string)}" + raise TypeError(msg) + + def __str__(self) -> str_type: + return self.name + + @property + def name(self) -> str_type: + return f"period[{self._freqstr}]" + + @property + def na_value(self) -> NaTType: + return NaT + + def __eq__(self, other: Any) -> bool: + if isinstance(other, str): + return other in [self.name, self.name.title()] + + return super().__eq__(other) + + def __ne__(self, other: Any) -> bool: + return not self.__eq__(other) + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + """ + Return a boolean if we if the passed type is an actual dtype that we + can match (via string or type) + """ + if isinstance(dtype, str): + # PeriodDtype can be instantiated from freq string like "U", + # but doesn't regard freq str like "U" as dtype. + if dtype.startswith(("period[", "Period[")): + try: + return cls._parse_dtype_strict(dtype) is not None + except ValueError: + return False + else: + return False + return super().is_dtype(dtype) + + @classmethod + def construct_array_type(cls) -> type_t[PeriodArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import PeriodArray + + return PeriodArray + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray: + """ + Construct PeriodArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + from pandas.core.arrays import PeriodArray + from pandas.core.arrays.arrow._arrow_utils import ( + pyarrow_array_to_numpy_and_mask, + ) + + if isinstance(array, pyarrow.Array): + chunks = [array] + else: + chunks = array.chunks + + results = [] + for arr in chunks: + data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64)) + parr = PeriodArray(data.copy(), dtype=self, copy=False) + # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray"; + # expected type "Union[int, Sequence[int], Sequence[bool], slice]" + parr[~mask] = NaT # type: ignore[index] + results.append(parr) + + if not results: + return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False) + return PeriodArray._concat_same_type(results) + + +@register_extension_dtype +class IntervalDtype(PandasExtensionDtype): + """ + An ExtensionDtype for Interval data. + + **This is not an actual numpy dtype**, but a duck type. + + Parameters + ---------- + subtype : str, np.dtype + The dtype of the Interval bounds. + + Attributes + ---------- + subtype + + Methods + ------- + None + + Examples + -------- + >>> pd.IntervalDtype(subtype='int64', closed='both') + interval[int64, both] + """ + + name = "interval" + kind: str_type = "O" + str = "|O08" + base = np.dtype("O") + num = 103 + _metadata = ( + "subtype", + "closed", + ) + + _match = re.compile( + r"(I|i)nterval\[(?P[^,]+(\[.+\])?)" + r"(, (?P(right|left|both|neither)))?\]" + ) + + _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} + _subtype: None | np.dtype + _closed: IntervalClosedType | None + + def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None: + from pandas.core.dtypes.common import ( + is_string_dtype, + pandas_dtype, + ) + + if closed is not None and closed not in {"right", "left", "both", "neither"}: + raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'") + + if isinstance(subtype, IntervalDtype): + if closed is not None and closed != subtype.closed: + raise ValueError( + "dtype.closed and 'closed' do not match. " + "Try IntervalDtype(dtype.subtype, closed) instead." + ) + self._subtype = subtype._subtype + self._closed = subtype._closed + elif subtype is None: + # we are called as an empty constructor + # generally for pickle compat + self._subtype = None + self._closed = closed + elif isinstance(subtype, str) and subtype.lower() == "interval": + self._subtype = None + self._closed = closed + else: + if isinstance(subtype, str): + m = IntervalDtype._match.search(subtype) + if m is not None: + gd = m.groupdict() + subtype = gd["subtype"] + if gd.get("closed", None) is not None: + if closed is not None: + if closed != gd["closed"]: + raise ValueError( + "'closed' keyword does not match value " + "specified in dtype string" + ) + closed = gd["closed"] # type: ignore[assignment] + + try: + subtype = pandas_dtype(subtype) + except TypeError as err: + raise TypeError("could not construct IntervalDtype") from err + if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype): + # GH 19016 + msg = ( + "category, object, and string subtypes are not supported " + "for IntervalDtype" + ) + raise TypeError(msg) + self._subtype = subtype + self._closed = closed + + @cache_readonly + def _can_hold_na(self) -> bool: + subtype = self._subtype + if subtype is None: + # partially-initialized + raise NotImplementedError( + "_can_hold_na is not defined for partially-initialized IntervalDtype" + ) + if subtype.kind in "iu": + return False + return True + + @property + def closed(self) -> IntervalClosedType: + return self._closed # type: ignore[return-value] + + @property + def subtype(self): + """ + The dtype of the Interval bounds. + + Examples + -------- + >>> dtype = pd.IntervalDtype(subtype='int64', closed='both') + >>> dtype.subtype + dtype('int64') + """ + return self._subtype + + @classmethod + def construct_array_type(cls) -> type[IntervalArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import IntervalArray + + return IntervalArray + + @classmethod + def construct_from_string(cls, string: str_type) -> IntervalDtype: + """ + attempt to construct this type from a string, raise a TypeError + if its not possible + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + + if string.lower() == "interval" or cls._match.search(string) is not None: + return cls(string) + + msg = ( + f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n" + "Incorrectly formatted string passed to constructor. " + "Valid formats include Interval or Interval[dtype] " + "where dtype is numeric, datetime, or timedelta" + ) + raise TypeError(msg) + + @property + def type(self) -> type[Interval]: + return Interval + + def __str__(self) -> str_type: + if self.subtype is None: + return "interval" + if self.closed is None: + # Only partially initialized GH#38394 + return f"interval[{self.subtype}]" + return f"interval[{self.subtype}, {self.closed}]" + + def __hash__(self) -> int: + # make myself hashable + return hash(str(self)) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, str): + return other.lower() in (self.name.lower(), str(self).lower()) + elif not isinstance(other, IntervalDtype): + return False + elif self.subtype is None or other.subtype is None: + # None should match any subtype + return True + elif self.closed != other.closed: + return False + else: + return self.subtype == other.subtype + + def __setstate__(self, state) -> None: + # for pickle compat. __get_state__ is defined in the + # PandasExtensionDtype superclass and uses the public properties to + # pickle -> need to set the settable private ones here (see GH26067) + self._subtype = state["subtype"] + + # backward-compat older pickles won't have "closed" key + self._closed = state.pop("closed", None) + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + """ + Return a boolean if we if the passed type is an actual dtype that we + can match (via string or type) + """ + if isinstance(dtype, str): + if dtype.lower().startswith("interval"): + try: + return cls.construct_from_string(dtype) is not None + except (ValueError, TypeError): + return False + else: + return False + return super().is_dtype(dtype) + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray: + """ + Construct IntervalArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + from pandas.core.arrays import IntervalArray + + if isinstance(array, pyarrow.Array): + chunks = [array] + else: + chunks = array.chunks + + results = [] + for arr in chunks: + if isinstance(arr, pyarrow.ExtensionArray): + arr = arr.storage + left = np.asarray(arr.field("left"), dtype=self.subtype) + right = np.asarray(arr.field("right"), dtype=self.subtype) + iarr = IntervalArray.from_arrays(left, right, closed=self.closed) + results.append(iarr) + + if not results: + return IntervalArray.from_arrays( + np.array([], dtype=self.subtype), + np.array([], dtype=self.subtype), + closed=self.closed, + ) + return IntervalArray._concat_same_type(results) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + if not all(isinstance(x, IntervalDtype) for x in dtypes): + return None + + closed = cast("IntervalDtype", dtypes[0]).closed + if not all(cast("IntervalDtype", x).closed == closed for x in dtypes): + return np.dtype(object) + + from pandas.core.dtypes.cast import find_common_type + + common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes]) + if common == object: + return np.dtype(object) + return IntervalDtype(common, closed=closed) + + +class NumpyEADtype(ExtensionDtype): + """ + A Pandas ExtensionDtype for NumPy dtypes. + + This is mostly for internal compatibility, and is not especially + useful on its own. + + Parameters + ---------- + dtype : object + Object to be converted to a NumPy data type object. + + See Also + -------- + numpy.dtype + """ + + _metadata = ("_dtype",) + + def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None: + if isinstance(dtype, NumpyEADtype): + # make constructor univalent + dtype = dtype.numpy_dtype + self._dtype = np.dtype(dtype) + + def __repr__(self) -> str: + return f"NumpyEADtype({repr(self.name)})" + + @property + def numpy_dtype(self) -> np.dtype: + """ + The NumPy dtype this NumpyEADtype wraps. + """ + return self._dtype + + @property + def name(self) -> str: + """ + A bit-width name for this data-type. + """ + return self._dtype.name + + @property + def type(self) -> type[np.generic]: + """ + The type object used to instantiate a scalar of this NumPy data-type. + """ + return self._dtype.type + + @property + def _is_numeric(self) -> bool: + # exclude object, str, unicode, void. + return self.kind in set("biufc") + + @property + def _is_boolean(self) -> bool: + return self.kind == "b" + + @classmethod + def construct_from_string(cls, string: str) -> NumpyEADtype: + try: + dtype = np.dtype(string) + except TypeError as err: + if not isinstance(string, str): + msg = f"'construct_from_string' expects a string, got {type(string)}" + else: + msg = f"Cannot construct a 'NumpyEADtype' from '{string}'" + raise TypeError(msg) from err + return cls(dtype) + + @classmethod + def construct_array_type(cls) -> type_t[NumpyExtensionArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import NumpyExtensionArray + + return NumpyExtensionArray + + @property + def kind(self) -> str: + """ + A character code (one of 'biufcmMOSUV') identifying the general kind of data. + """ + return self._dtype.kind + + @property + def itemsize(self) -> int: + """ + The element size of this data-type object. + """ + return self._dtype.itemsize + + +class BaseMaskedDtype(ExtensionDtype): + """ + Base class for dtypes for BaseMaskedArray subclasses. + """ + + name: str + base = None + type: type + + @property + def na_value(self) -> libmissing.NAType: + return libmissing.NA + + @cache_readonly + def numpy_dtype(self) -> np.dtype: + """Return an instance of our numpy dtype""" + return np.dtype(self.type) + + @cache_readonly + def kind(self) -> str: + return self.numpy_dtype.kind + + @cache_readonly + def itemsize(self) -> int: + """Return the number of bytes in this dtype""" + return self.numpy_dtype.itemsize + + @classmethod + def construct_array_type(cls) -> type_t[BaseMaskedArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + raise NotImplementedError + + @classmethod + def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype: + """ + Construct the MaskedDtype corresponding to the given numpy dtype. + """ + if dtype.kind == "b": + from pandas.core.arrays.boolean import BooleanDtype + + return BooleanDtype() + elif dtype.kind in "iu": + from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + return NUMPY_INT_TO_DTYPE[dtype] + elif dtype.kind == "f": + from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE + + return NUMPY_FLOAT_TO_DTYPE[dtype] + else: + raise NotImplementedError(dtype) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # We unwrap any masked dtypes, find the common dtype we would use + # for that, then re-mask the result. + from pandas.core.dtypes.cast import find_common_type + + new_dtype = find_common_type( + [ + dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype + for dtype in dtypes + ] + ) + if not isinstance(new_dtype, np.dtype): + # If we ever support e.g. Masked[DatetimeArray] then this will change + return None + try: + return type(self).from_numpy_dtype(new_dtype) + except (KeyError, NotImplementedError): + return None + + +@register_extension_dtype +class SparseDtype(ExtensionDtype): + """ + Dtype for data stored in :class:`SparseArray`. + + This dtype implements the pandas ExtensionDtype interface. + + Parameters + ---------- + dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 + The dtype of the underlying array storing the non-fill value values. + fill_value : scalar, optional + The scalar value not stored in the SparseArray. By default, this + depends on `dtype`. + + =========== ========== + dtype na_value + =========== ========== + float ``np.nan`` + int ``0`` + bool ``False`` + datetime64 ``pd.NaT`` + timedelta64 ``pd.NaT`` + =========== ========== + + The default value may be overridden by specifying a `fill_value`. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0)) + >>> ser + 0 1 + 1 0 + 2 0 + dtype: Sparse[int64, 0] + >>> ser.sparse.density + 0.3333333333333333 + """ + + _is_immutable = True + + # We include `_is_na_fill_value` in the metadata to avoid hash collisions + # between SparseDtype(float, 0.0) and SparseDtype(float, nan). + # Without is_na_fill_value in the comparison, those would be equal since + # hash(nan) is (sometimes?) 0. + _metadata = ("_dtype", "_fill_value", "_is_na_fill_value") + + def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: + if isinstance(dtype, type(self)): + if fill_value is None: + fill_value = dtype.fill_value + dtype = dtype.subtype + + from pandas.core.dtypes.common import ( + is_string_dtype, + pandas_dtype, + ) + from pandas.core.dtypes.missing import na_value_for_dtype + + dtype = pandas_dtype(dtype) + if is_string_dtype(dtype): + dtype = np.dtype("object") + if not isinstance(dtype, np.dtype): + # GH#53160 + raise TypeError("SparseDtype subtype must be a numpy dtype") + + if fill_value is None: + fill_value = na_value_for_dtype(dtype) + + self._dtype = dtype + self._fill_value = fill_value + self._check_fill_value() + + def __hash__(self) -> int: + # Python3 doesn't inherit __hash__ when a base class overrides + # __eq__, so we explicitly do it here. + return super().__hash__() + + def __eq__(self, other: Any) -> bool: + # We have to override __eq__ to handle NA values in _metadata. + # The base class does simple == checks, which fail for NA. + if isinstance(other, str): + try: + other = self.construct_from_string(other) + except TypeError: + return False + + if isinstance(other, type(self)): + subtype = self.subtype == other.subtype + if self._is_na_fill_value: + # this case is complicated by two things: + # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan) + # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT) + # i.e. we want to treat any floating-point NaN as equal, but + # not a floating-point NaN and a datetime NaT. + fill_value = ( + other._is_na_fill_value + and isinstance(self.fill_value, type(other.fill_value)) + or isinstance(other.fill_value, type(self.fill_value)) + ) + else: + with warnings.catch_warnings(): + # Ignore spurious numpy warning + warnings.filterwarnings( + "ignore", + "elementwise comparison failed", + category=DeprecationWarning, + ) + + fill_value = self.fill_value == other.fill_value + + return subtype and fill_value + return False + + @property + def fill_value(self): + """ + The fill value of the array. + + Converting the SparseArray to a dense ndarray will fill the + array with this value. + + .. warning:: + + It's possible to end up with a SparseArray that has ``fill_value`` + values in ``sp_values``. This can occur, for example, when setting + ``SparseArray.fill_value`` directly. + """ + return self._fill_value + + def _check_fill_value(self): + if not lib.is_scalar(self._fill_value): + raise ValueError( + f"fill_value must be a scalar. Got {self._fill_value} instead" + ) + + from pandas.core.dtypes.cast import can_hold_element + from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + ) + + from pandas.core.construction import ensure_wrapped_if_datetimelike + + # GH#23124 require fill_value and subtype to match + val = self._fill_value + if isna(val): + if not is_valid_na_for_dtype(val, self.subtype): + warnings.warn( + "Allowing arbitrary scalar fill_value in SparseDtype is " + "deprecated. In a future version, the fill_value must be " + "a valid value for the SparseDtype.subtype.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + dummy = np.empty(0, dtype=self.subtype) + dummy = ensure_wrapped_if_datetimelike(dummy) + + if not can_hold_element(dummy, val): + warnings.warn( + "Allowing arbitrary scalar fill_value in SparseDtype is " + "deprecated. In a future version, the fill_value must be " + "a valid value for the SparseDtype.subtype.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + @property + def _is_na_fill_value(self) -> bool: + from pandas import isna + + return isna(self.fill_value) + + @property + def _is_numeric(self) -> bool: + return not self.subtype == object + + @property + def _is_boolean(self) -> bool: + return self.subtype.kind == "b" + + @property + def kind(self) -> str: + """ + The sparse kind. Either 'integer', or 'block'. + """ + return self.subtype.kind + + @property + def type(self): + return self.subtype.type + + @property + def subtype(self): + return self._dtype + + @property + def name(self) -> str: + return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]" + + def __repr__(self) -> str: + return self.name + + @classmethod + def construct_array_type(cls) -> type_t[SparseArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays.sparse.array import SparseArray + + return SparseArray + + @classmethod + def construct_from_string(cls, string: str) -> SparseDtype: + """ + Construct a SparseDtype from a string form. + + Parameters + ---------- + string : str + Can take the following forms. + + string dtype + ================ ============================ + 'int' SparseDtype[np.int64, 0] + 'Sparse' SparseDtype[np.float64, nan] + 'Sparse[int]' SparseDtype[np.int64, 0] + 'Sparse[int, 0]' SparseDtype[np.int64, 0] + ================ ============================ + + It is not possible to specify non-default fill values + with a string. An argument like ``'Sparse[int, 1]'`` + will raise a ``TypeError`` because the default fill value + for integers is 0. + + Returns + ------- + SparseDtype + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + msg = f"Cannot construct a 'SparseDtype' from '{string}'" + if string.startswith("Sparse"): + try: + sub_type, has_fill_value = cls._parse_subtype(string) + except ValueError as err: + raise TypeError(msg) from err + else: + result = SparseDtype(sub_type) + msg = ( + f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt " + "looks like the fill_value in the string is not " + "the default for the dtype. Non-default fill_values " + "are not supported. Use the 'SparseDtype()' " + "constructor instead." + ) + if has_fill_value and str(result) != string: + raise TypeError(msg) + return result + else: + raise TypeError(msg) + + @staticmethod + def _parse_subtype(dtype: str) -> tuple[str, bool]: + """ + Parse a string to get the subtype + + Parameters + ---------- + dtype : str + A string like + + * Sparse[subtype] + * Sparse[subtype, fill_value] + + Returns + ------- + subtype : str + + Raises + ------ + ValueError + When the subtype cannot be extracted. + """ + xpr = re.compile(r"Sparse\[(?P[^,]*)(, )?(?P.*?)?\]$") + m = xpr.match(dtype) + has_fill_value = False + if m: + subtype = m.groupdict()["subtype"] + has_fill_value = bool(m.groupdict()["fill_value"]) + elif dtype == "Sparse": + subtype = "float64" + else: + raise ValueError(f"Cannot parse {dtype}") + return subtype, has_fill_value + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + dtype = getattr(dtype, "dtype", dtype) + if isinstance(dtype, str) and dtype.startswith("Sparse"): + sub_type, _ = cls._parse_subtype(dtype) + dtype = np.dtype(sub_type) + elif isinstance(dtype, cls): + return True + return isinstance(dtype, np.dtype) or dtype == "Sparse" + + def update_dtype(self, dtype) -> SparseDtype: + """ + Convert the SparseDtype to a new dtype. + + This takes care of converting the ``fill_value``. + + Parameters + ---------- + dtype : Union[str, numpy.dtype, SparseDtype] + The new dtype to use. + + * For a SparseDtype, it is simply returned + * For a NumPy dtype (or str), the current fill value + is converted to the new dtype, and a SparseDtype + with `dtype` and the new fill value is returned. + + Returns + ------- + SparseDtype + A new SparseDtype with the correct `dtype` and fill value + for that `dtype`. + + Raises + ------ + ValueError + When the current fill value cannot be converted to the + new `dtype` (e.g. trying to convert ``np.nan`` to an + integer dtype). + + + Examples + -------- + >>> SparseDtype(int, 0).update_dtype(float) + Sparse[float64, 0.0] + + >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) + Sparse[float64, nan] + """ + from pandas.core.dtypes.astype import astype_array + from pandas.core.dtypes.common import pandas_dtype + + cls = type(self) + dtype = pandas_dtype(dtype) + + if not isinstance(dtype, cls): + if not isinstance(dtype, np.dtype): + raise TypeError("sparse arrays of extension dtypes not supported") + + fv_asarray = np.atleast_1d(np.array(self.fill_value)) + fvarr = astype_array(fv_asarray, dtype) + # NB: not fv_0d.item(), as that casts dt64->int + fill_value = fvarr[0] + dtype = cls(dtype, fill_value=fill_value) + + return dtype + + @property + def _subtype_with_str(self): + """ + Whether the SparseDtype's subtype should be considered ``str``. + + Typically, pandas will store string data in an object-dtype array. + When converting values to a dtype, e.g. in ``.astype``, we need to + be more specific, we need the actual underlying type. + + Returns + ------- + >>> SparseDtype(int, 1)._subtype_with_str + dtype('int64') + + >>> SparseDtype(object, 1)._subtype_with_str + dtype('O') + + >>> dtype = SparseDtype(str, '') + >>> dtype.subtype + dtype('O') + + >>> dtype._subtype_with_str + + """ + if isinstance(self.fill_value, str): + return type(self.fill_value) + return self.subtype + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # TODO for now only handle SparseDtypes and numpy dtypes => extend + # with other compatible extension dtypes + from pandas.core.dtypes.cast import np_find_common_type + + if any( + isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype) + for x in dtypes + ): + return None + + fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)] + fill_value = fill_values[0] + + from pandas import isna + + # np.nan isn't a singleton, so we may end up with multiple + # NaNs here, so we ignore the all NA case too. + if not (len(set(fill_values)) == 1 or isna(fill_values).all()): + warnings.warn( + "Concatenating sparse arrays with multiple fill " + f"values: '{fill_values}'. Picking the first and " + "converting the rest.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes) + return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value) + + +@register_extension_dtype +class ArrowDtype(StorageExtensionDtype): + """ + An ExtensionDtype for PyArrow data types. + + .. warning:: + + ArrowDtype is considered experimental. The implementation and + parts of the API may change without warning. + + While most ``dtype`` arguments can accept the "string" + constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful + if the data type contains parameters like ``pyarrow.timestamp``. + + Parameters + ---------- + pyarrow_dtype : pa.DataType + An instance of a `pyarrow.DataType `__. + + Attributes + ---------- + pyarrow_dtype + + Methods + ------- + None + + Returns + ------- + ArrowDtype + + Examples + -------- + >>> import pyarrow as pa + >>> pd.ArrowDtype(pa.int64()) + int64[pyarrow] + + Types with parameters must be constructed with ArrowDtype. + + >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York")) + timestamp[s, tz=America/New_York][pyarrow] + >>> pd.ArrowDtype(pa.list_(pa.int64())) + list[pyarrow] + """ + + _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment] + + def __init__(self, pyarrow_dtype: pa.DataType) -> None: + super().__init__("pyarrow") + if pa_version_under7p0: + raise ImportError("pyarrow>=7.0.0 is required for ArrowDtype") + if not isinstance(pyarrow_dtype, pa.DataType): + raise ValueError( + f"pyarrow_dtype ({pyarrow_dtype}) must be an instance " + f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead." + ) + self.pyarrow_dtype = pyarrow_dtype + + def __repr__(self) -> str: + return self.name + + def __hash__(self) -> int: + # make myself hashable + return hash(str(self)) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, type(self)): + return super().__eq__(other) + return self.pyarrow_dtype == other.pyarrow_dtype + + @property + def type(self): + """ + Returns associated scalar type. + """ + pa_type = self.pyarrow_dtype + if pa.types.is_integer(pa_type): + return int + elif pa.types.is_floating(pa_type): + return float + elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type): + return str + elif ( + pa.types.is_binary(pa_type) + or pa.types.is_fixed_size_binary(pa_type) + or pa.types.is_large_binary(pa_type) + ): + return bytes + elif pa.types.is_boolean(pa_type): + return bool + elif pa.types.is_duration(pa_type): + if pa_type.unit == "ns": + return Timedelta + else: + return timedelta + elif pa.types.is_timestamp(pa_type): + if pa_type.unit == "ns": + return Timestamp + else: + return datetime + elif pa.types.is_date(pa_type): + return date + elif pa.types.is_time(pa_type): + return time + elif pa.types.is_decimal(pa_type): + return Decimal + elif pa.types.is_dictionary(pa_type): + # TODO: Potentially change this & CategoricalDtype.type to + # something more representative of the scalar + return CategoricalDtypeType + elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type): + return list + elif pa.types.is_fixed_size_list(pa_type): + return list + elif pa.types.is_map(pa_type): + return list + elif pa.types.is_struct(pa_type): + return dict + elif pa.types.is_null(pa_type): + # TODO: None? pd.NA? pa.null? + return type(pa_type) + elif isinstance(pa_type, pa.ExtensionType): + return type(self)(pa_type.storage_type).type + raise NotImplementedError(pa_type) + + @property + def name(self) -> str: # type: ignore[override] + """ + A string identifying the data type. + """ + return f"{str(self.pyarrow_dtype)}[{self.storage}]" + + @cache_readonly + def numpy_dtype(self) -> np.dtype: + """Return an instance of the related numpy dtype""" + if pa.types.is_timestamp(self.pyarrow_dtype): + # pa.timestamp(unit).to_pandas_dtype() returns ns units + # regardless of the pyarrow timestamp units. + # This can be removed if/when pyarrow addresses it: + # https://github.com/apache/arrow/issues/34462 + return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]") + if pa.types.is_duration(self.pyarrow_dtype): + # pa.duration(unit).to_pandas_dtype() returns ns units + # regardless of the pyarrow duration units + # This can be removed if/when pyarrow addresses it: + # https://github.com/apache/arrow/issues/34462 + return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]") + if pa.types.is_string(self.pyarrow_dtype): + # pa.string().to_pandas_dtype() = object which we don't want + return np.dtype(str) + try: + return np.dtype(self.pyarrow_dtype.to_pandas_dtype()) + except (NotImplementedError, TypeError): + return np.dtype(object) + + @cache_readonly + def kind(self) -> str: + if pa.types.is_timestamp(self.pyarrow_dtype): + # To mirror DatetimeTZDtype + return "M" + return self.numpy_dtype.kind + + @cache_readonly + def itemsize(self) -> int: + """Return the number of bytes in this dtype""" + return self.numpy_dtype.itemsize + + @classmethod + def construct_array_type(cls) -> type_t[ArrowExtensionArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays.arrow import ArrowExtensionArray + + return ArrowExtensionArray + + @classmethod + def construct_from_string(cls, string: str) -> ArrowDtype: + """ + Construct this type from a string. + + Parameters + ---------- + string : str + string should follow the format f"{pyarrow_type}[pyarrow]" + e.g. int64[pyarrow] + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + if not string.endswith("[pyarrow]"): + raise TypeError(f"'{string}' must end with '[pyarrow]'") + if string == "string[pyarrow]": + # Ensure Registry.find skips ArrowDtype to use StringDtype instead + raise TypeError("string[pyarrow] should be constructed by StringDtype") + + base_type = string[:-9] # get rid of "[pyarrow]" + try: + pa_dtype = pa.type_for_alias(base_type) + except ValueError as err: + has_parameters = re.search(r"[\[\(].*[\]\)]", base_type) + if has_parameters: + # Fallback to try common temporal types + try: + return cls._parse_temporal_dtype_string(base_type) + except (NotImplementedError, ValueError): + # Fall through to raise with nice exception message below + pass + + raise NotImplementedError( + "Passing pyarrow type specific parameters " + f"({has_parameters.group()}) in the string is not supported. " + "Please construct an ArrowDtype object with a pyarrow_dtype " + "instance with specific parameters." + ) from err + raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err + return cls(pa_dtype) + + # TODO(arrow#33642): This can be removed once supported by pyarrow + @classmethod + def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype: + """ + Construct a temporal ArrowDtype from string. + """ + # we assume + # 1) "[pyarrow]" has already been stripped from the end of our string. + # 2) we know "[" is present + head, tail = string.split("[", 1) + + if not tail.endswith("]"): + raise ValueError + tail = tail[:-1] + + if head == "timestamp": + assert "," in tail # otherwise type_for_alias should work + unit, tz = tail.split(",", 1) + unit = unit.strip() + tz = tz.strip() + if tz.startswith("tz="): + tz = tz[3:] + + pa_type = pa.timestamp(unit, tz=tz) + dtype = cls(pa_type) + return dtype + + raise NotImplementedError(string) + + @property + def _is_numeric(self) -> bool: + """ + Whether columns with this dtype should be considered numeric. + """ + # TODO: pa.types.is_boolean? + return ( + pa.types.is_integer(self.pyarrow_dtype) + or pa.types.is_floating(self.pyarrow_dtype) + or pa.types.is_decimal(self.pyarrow_dtype) + ) + + @property + def _is_boolean(self) -> bool: + """ + Whether this dtype should be considered boolean. + """ + return pa.types.is_boolean(self.pyarrow_dtype) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # We unwrap any masked dtypes, find the common dtype we would use + # for that, then re-mask the result. + # Mirrors BaseMaskedDtype + from pandas.core.dtypes.cast import find_common_type + + null_dtype = type(self)(pa.null()) + + new_dtype = find_common_type( + [ + dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype + for dtype in dtypes + if dtype != null_dtype + ] + ) + if not isinstance(new_dtype, np.dtype): + return None + try: + pa_dtype = pa.from_numpy_dtype(new_dtype) + return type(self)(pa_dtype) + except NotImplementedError: + return None + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray): + """ + Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray. + """ + array_class = self.construct_array_type() + arr = array.cast(self.pyarrow_dtype, safe=True) + return array_class(arr) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/generic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/generic.py new file mode 100644 index 00000000..9718ad60 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/generic.py @@ -0,0 +1,147 @@ +""" define generic base classes for pandas objects """ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Type, + cast, +) + +if TYPE_CHECKING: + from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + ) + from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, + ) + from pandas.core.generic import NDFrame + + +# define abstract base classes to enable isinstance type checking on our +# objects +def create_pandas_abc_type(name, attr, comp): + def _check(inst) -> bool: + return getattr(inst, attr, "_typ") in comp + + # https://github.com/python/mypy/issues/1006 + # error: 'classmethod' used with a non-method + @classmethod # type: ignore[misc] + def _instancecheck(cls, inst) -> bool: + return _check(inst) and not isinstance(inst, type) + + @classmethod # type: ignore[misc] + def _subclasscheck(cls, inst) -> bool: + # Raise instead of returning False + # This is consistent with default __subclasscheck__ behavior + if not isinstance(inst, type): + raise TypeError("issubclass() arg 1 must be a class") + + return _check(inst) + + dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck} + meta = type("ABCBase", (type,), dct) + return meta(name, (), dct) + + +ABCRangeIndex = cast( + "Type[RangeIndex]", + create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)), +) +ABCMultiIndex = cast( + "Type[MultiIndex]", + create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)), +) +ABCDatetimeIndex = cast( + "Type[DatetimeIndex]", + create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)), +) +ABCTimedeltaIndex = cast( + "Type[TimedeltaIndex]", + create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)), +) +ABCPeriodIndex = cast( + "Type[PeriodIndex]", + create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)), +) +ABCCategoricalIndex = cast( + "Type[CategoricalIndex]", + create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)), +) +ABCIntervalIndex = cast( + "Type[IntervalIndex]", + create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)), +) +ABCIndex = cast( + "Type[Index]", + create_pandas_abc_type( + "ABCIndex", + "_typ", + { + "index", + "rangeindex", + "multiindex", + "datetimeindex", + "timedeltaindex", + "periodindex", + "categoricalindex", + "intervalindex", + }, + ), +) + + +ABCNDFrame = cast( + "Type[NDFrame]", + create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")), +) +ABCSeries = cast( + "Type[Series]", + create_pandas_abc_type("ABCSeries", "_typ", ("series",)), +) +ABCDataFrame = cast( + "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) +) + +ABCCategorical = cast( + "Type[Categorical]", + create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")), +) +ABCDatetimeArray = cast( + "Type[DatetimeArray]", + create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")), +) +ABCTimedeltaArray = cast( + "Type[TimedeltaArray]", + create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")), +) +ABCPeriodArray = cast( + "Type[PeriodArray]", + create_pandas_abc_type("ABCPeriodArray", "_typ", ("periodarray",)), +) +ABCExtensionArray = cast( + "Type[ExtensionArray]", + create_pandas_abc_type( + "ABCExtensionArray", + "_typ", + # Note: IntervalArray and SparseArray are included bc they have _typ="extension" + {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"}, + ), +) +ABCNumpyExtensionArray = cast( + "Type[NumpyExtensionArray]", + create_pandas_abc_type("ABCNumpyExtensionArray", "_typ", ("npy_extension",)), +) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/inference.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/inference.py new file mode 100644 index 00000000..9c04e57b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/inference.py @@ -0,0 +1,437 @@ +""" basic inference routines """ + +from __future__ import annotations + +from collections import abc +from numbers import Number +import re +from re import Pattern +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs import lib + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import TypeGuard + +is_bool = lib.is_bool + +is_integer = lib.is_integer + +is_float = lib.is_float + +is_complex = lib.is_complex + +is_scalar = lib.is_scalar + +is_decimal = lib.is_decimal + +is_interval = lib.is_interval + +is_list_like = lib.is_list_like + +is_iterator = lib.is_iterator + + +def is_number(obj) -> TypeGuard[Number | np.number]: + """ + Check if the object is a number. + + Returns True when the object is a number, and False if is not. + + Parameters + ---------- + obj : any type + The object to check if is a number. + + Returns + ------- + bool + Whether `obj` is a number or not. + + See Also + -------- + api.types.is_integer: Checks a subgroup of numbers. + + Examples + -------- + >>> from pandas.api.types import is_number + >>> is_number(1) + True + >>> is_number(7.15) + True + + Booleans are valid because they are int subclass. + + >>> is_number(False) + True + + >>> is_number("foo") + False + >>> is_number("5") + False + """ + return isinstance(obj, (Number, np.number)) + + +def iterable_not_string(obj) -> bool: + """ + Check if the object is an iterable but not a string. + + Parameters + ---------- + obj : The object to check. + + Returns + ------- + is_iter_not_string : bool + Whether `obj` is a non-string iterable. + + Examples + -------- + >>> iterable_not_string([1, 2, 3]) + True + >>> iterable_not_string("foo") + False + >>> iterable_not_string(1) + False + """ + return isinstance(obj, abc.Iterable) and not isinstance(obj, str) + + +def is_file_like(obj) -> bool: + """ + Check if the object is a file-like object. + + For objects to be considered file-like, they must + be an iterator AND have either a `read` and/or `write` + method as an attribute. + + Note: file-like objects must be iterable, but + iterable objects need not be file-like. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` has file-like properties. + + Examples + -------- + >>> import io + >>> from pandas.api.types import is_file_like + >>> buffer = io.StringIO("data") + >>> is_file_like(buffer) + True + >>> is_file_like([1, 2, 3]) + False + """ + if not (hasattr(obj, "read") or hasattr(obj, "write")): + return False + + return bool(hasattr(obj, "__iter__")) + + +def is_re(obj) -> TypeGuard[Pattern]: + """ + Check if the object is a regex pattern instance. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` is a regex pattern. + + Examples + -------- + >>> from pandas.api.types import is_re + >>> import re + >>> is_re(re.compile(".*")) + True + >>> is_re("foo") + False + """ + return isinstance(obj, Pattern) + + +def is_re_compilable(obj) -> bool: + """ + Check if the object can be compiled into a regex pattern instance. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` can be compiled as a regex pattern. + + Examples + -------- + >>> from pandas.api.types import is_re_compilable + >>> is_re_compilable(".*") + True + >>> is_re_compilable(1) + False + """ + try: + re.compile(obj) + except TypeError: + return False + else: + return True + + +def is_array_like(obj) -> bool: + """ + Check if the object is array-like. + + For an object to be considered array-like, it must be list-like and + have a `dtype` attribute. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + is_array_like : bool + Whether `obj` has array-like properties. + + Examples + -------- + >>> is_array_like(np.array([1, 2, 3])) + True + >>> is_array_like(pd.Series(["a", "b"])) + True + >>> is_array_like(pd.Index(["2016-01-01"])) + True + >>> is_array_like([1, 2, 3]) + False + >>> is_array_like(("a", "b")) + False + """ + return is_list_like(obj) and hasattr(obj, "dtype") + + +def is_nested_list_like(obj) -> bool: + """ + Check if the object is list-like, and that all of its elements + are also list-like. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + is_list_like : bool + Whether `obj` has list-like properties. + + Examples + -------- + >>> is_nested_list_like([[1, 2, 3]]) + True + >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) + True + >>> is_nested_list_like(["foo"]) + False + >>> is_nested_list_like([]) + False + >>> is_nested_list_like([[1, 2, 3], 1]) + False + + Notes + ----- + This won't reliably detect whether a consumable iterator (e. g. + a generator) is a nested-list-like without consuming the iterator. + To avoid consuming it, we always return False if the outer container + doesn't define `__len__`. + + See Also + -------- + is_list_like + """ + return ( + is_list_like(obj) + and hasattr(obj, "__len__") + and len(obj) > 0 + and all(is_list_like(item) for item in obj) + ) + + +def is_dict_like(obj) -> bool: + """ + Check if the object is dict-like. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` has dict-like properties. + + Examples + -------- + >>> from pandas.api.types import is_dict_like + >>> is_dict_like({1: 2}) + True + >>> is_dict_like([1, 2, 3]) + False + >>> is_dict_like(dict) + False + >>> is_dict_like(dict()) + True + """ + dict_like_attrs = ("__getitem__", "keys", "__contains__") + return ( + all(hasattr(obj, attr) for attr in dict_like_attrs) + # [GH 25196] exclude classes + and not isinstance(obj, type) + ) + + +def is_named_tuple(obj) -> bool: + """ + Check if the object is a named tuple. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` is a named tuple. + + Examples + -------- + >>> from collections import namedtuple + >>> from pandas.api.types import is_named_tuple + >>> Point = namedtuple("Point", ["x", "y"]) + >>> p = Point(1, 2) + >>> + >>> is_named_tuple(p) + True + >>> is_named_tuple((1, 2)) + False + """ + return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields") + + +def is_hashable(obj) -> TypeGuard[Hashable]: + """ + Return True if hash(obj) will succeed, False otherwise. + + Some types will pass a test against collections.abc.Hashable but fail when + they are actually hashed with hash(). + + Distinguish between these and other types by trying the call to hash() and + seeing if they raise TypeError. + + Returns + ------- + bool + + Examples + -------- + >>> import collections + >>> from pandas.api.types import is_hashable + >>> a = ([],) + >>> isinstance(a, collections.abc.Hashable) + True + >>> is_hashable(a) + False + """ + # Unfortunately, we can't use isinstance(obj, collections.abc.Hashable), + # which can be faster than calling hash. That is because numpy scalars + # fail this test. + + # Reconsider this decision once this numpy bug is fixed: + # https://github.com/numpy/numpy/issues/5562 + + try: + hash(obj) + except TypeError: + return False + else: + return True + + +def is_sequence(obj) -> bool: + """ + Check if the object is a sequence of objects. + String types are not included as sequences here. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + is_sequence : bool + Whether `obj` is a sequence of objects. + + Examples + -------- + >>> l = [1, 2, 3] + >>> + >>> is_sequence(l) + True + >>> is_sequence(iter(l)) + False + """ + try: + iter(obj) # Can iterate over it. + len(obj) # Has a length associated with it. + return not isinstance(obj, (str, bytes)) + except (TypeError, AttributeError): + return False + + +def is_dataclass(item): + """ + Checks if the object is a data-class instance + + Parameters + ---------- + item : object + + Returns + -------- + is_dataclass : bool + True if the item is an instance of a data-class, + will return false if you pass the data class itself + + Examples + -------- + >>> from dataclasses import dataclass + >>> @dataclass + ... class Point: + ... x: int + ... y: int + + >>> is_dataclass(Point) + False + >>> is_dataclass(Point(0,2)) + True + + """ + try: + import dataclasses + + return dataclasses.is_dataclass(item) and not isinstance(item, type) + except ImportError: + return False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/missing.py new file mode 100644 index 00000000..8760c8ee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/dtypes/missing.py @@ -0,0 +1,778 @@ +""" +missing types & inference +""" +from __future__ import annotations + +from decimal import Decimal +from functools import partial +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import lib +import pandas._libs.missing as libmissing +from pandas._libs.tslibs import ( + NaT, + iNaT, +) + +from pandas.core.dtypes.common import ( + DT64NS_DTYPE, + TD64NS_DTYPE, + ensure_object, + is_scalar, + is_string_or_object_np_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCExtensionArray, + ABCIndex, + ABCMultiIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import is_list_like + +if TYPE_CHECKING: + from re import Pattern + + from pandas._typing import ( + ArrayLike, + DtypeObj, + NDFrame, + NDFrameT, + Scalar, + npt, + ) + + from pandas import Series + from pandas.core.indexes.base import Index + + +isposinf_scalar = libmissing.isposinf_scalar +isneginf_scalar = libmissing.isneginf_scalar + +nan_checker = np.isnan +INF_AS_NA = False +_dtype_object = np.dtype("object") +_dtype_str = np.dtype(str) + + +@overload +def isna(obj: Scalar | Pattern) -> bool: + ... + + +@overload +def isna( + obj: ArrayLike | Index | list, +) -> npt.NDArray[np.bool_]: + ... + + +@overload +def isna(obj: NDFrameT) -> NDFrameT: + ... + + +# handle unions +@overload +def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: + ... + + +@overload +def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + ... + + +def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + """ + Detect missing values for an array-like object. + + This function takes a scalar or array-like object and indicates + whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` + in object arrays, ``NaT`` in datetimelike). + + Parameters + ---------- + obj : scalar or array-like + Object to check for null or missing values. + + Returns + ------- + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is missing. + + See Also + -------- + notna : Boolean inverse of pandas.isna. + Series.isna : Detect missing values in a Series. + DataFrame.isna : Detect missing values in a DataFrame. + Index.isna : Detect missing values in an Index. + + Examples + -------- + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.isna('dog') + False + + >>> pd.isna(pd.NA) + True + + >>> pd.isna(np.nan) + True + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.isna(array) + array([[False, True, False], + [False, False, True]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.isna(index) + array([False, False, True, False]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.isna(df) + 0 1 2 + 0 False False False + 1 False True False + + >>> pd.isna(df[1]) + 0 False + 1 True + Name: 1, dtype: bool + """ + return _isna(obj) + + +isnull = isna + + +def _isna(obj, inf_as_na: bool = False): + """ + Detect missing values, treating None, NaN or NA as null. Infinite + values will also be treated as null if inf_as_na is True. + + Parameters + ---------- + obj: ndarray or object value + Input array or scalar value. + inf_as_na: bool + Whether to treat infinity as null. + + Returns + ------- + boolean ndarray or boolean + """ + if is_scalar(obj): + return libmissing.checknull(obj, inf_as_na=inf_as_na) + elif isinstance(obj, ABCMultiIndex): + raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False + elif isinstance(obj, (np.ndarray, ABCExtensionArray)): + return _isna_array(obj, inf_as_na=inf_as_na) + elif isinstance(obj, ABCIndex): + # Try to use cached isna, which also short-circuits for integer dtypes + # and avoids materializing RangeIndex._values + if not obj._can_hold_na: + return obj.isna() + return _isna_array(obj._values, inf_as_na=inf_as_na) + + elif isinstance(obj, ABCSeries): + result = _isna_array(obj._values, inf_as_na=inf_as_na) + # box + result = obj._constructor(result, index=obj.index, name=obj.name, copy=False) + return result + elif isinstance(obj, ABCDataFrame): + return obj.isna() + elif isinstance(obj, list): + return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na) + elif hasattr(obj, "__array__"): + return _isna_array(np.asarray(obj), inf_as_na=inf_as_na) + else: + return False + + +def _use_inf_as_na(key) -> None: + """ + Option change callback for na/inf behaviour. + + Choose which replacement for numpy.isnan / -numpy.isfinite is used. + + Parameters + ---------- + flag: bool + True means treat None, NaN, INF, -INF as null (old way), + False means None and NaN are null, but INF, -INF are not null + (new way). + + Notes + ----- + This approach to setting global module values is discussed and + approved here: + + * https://stackoverflow.com/questions/4859217/ + programmatically-creating-variables-in-python/4859312#4859312 + """ + inf_as_na = get_option(key) + globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na) + if inf_as_na: + globals()["nan_checker"] = lambda x: ~np.isfinite(x) + globals()["INF_AS_NA"] = True + else: + globals()["nan_checker"] = np.isnan + globals()["INF_AS_NA"] = False + + +def _isna_array(values: ArrayLike, inf_as_na: bool = False): + """ + Return an array indicating which values of the input array are NaN / NA. + + Parameters + ---------- + obj: ndarray or ExtensionArray + The input array whose elements are to be checked. + inf_as_na: bool + Whether or not to treat infinite values as NA. + + Returns + ------- + array-like + Array of boolean values denoting the NA status of each element. + """ + dtype = values.dtype + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + if inf_as_na and isinstance(dtype, CategoricalDtype): + result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na) + else: + # error: Incompatible types in assignment (expression has type + # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has + # type "ndarray[Any, dtype[bool_]]") + result = values.isna() # type: ignore[assignment] + elif isinstance(values, np.rec.recarray): + # GH 48526 + result = _isna_recarray_dtype(values, inf_as_na=inf_as_na) + elif is_string_or_object_np_dtype(values.dtype): + result = _isna_string_dtype(values, inf_as_na=inf_as_na) + elif dtype.kind in "mM": + # this is the NaT pattern + result = values.view("i8") == iNaT + else: + if inf_as_na: + result = ~np.isfinite(values) + else: + result = np.isnan(values) + + return result + + +def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]: + # Working around NumPy ticket 1542 + dtype = values.dtype + + if dtype.kind in ("S", "U"): + result = np.zeros(values.shape, dtype=bool) + else: + if values.ndim in {1, 2}: + result = libmissing.isnaobj(values, inf_as_na=inf_as_na) + else: + # 0-D, reached via e.g. mask_missing + result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na) + result = result.reshape(values.shape) + + return result + + +def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_: + is_inf_in_record = np.zeros(len(record_as_array), dtype=bool) + for i, value in enumerate(record_as_array): + is_element_inf = False + try: + is_element_inf = np.isinf(value) + except TypeError: + is_element_inf = False + is_inf_in_record[i] = is_element_inf + + return np.any(is_inf_in_record) + + +def _isna_recarray_dtype( + values: np.rec.recarray, inf_as_na: bool +) -> npt.NDArray[np.bool_]: + result = np.zeros(values.shape, dtype=bool) + for i, record in enumerate(values): + record_as_array = np.array(record.tolist()) + does_record_contain_nan = isna_all(record_as_array) + does_record_contain_inf = False + if inf_as_na: + does_record_contain_inf = bool(_has_record_inf_value(record_as_array)) + result[i] = np.any( + np.logical_or(does_record_contain_nan, does_record_contain_inf) + ) + + return result + + +@overload +def notna(obj: Scalar) -> bool: + ... + + +@overload +def notna( + obj: ArrayLike | Index | list, +) -> npt.NDArray[np.bool_]: + ... + + +@overload +def notna(obj: NDFrameT) -> NDFrameT: + ... + + +# handle unions +@overload +def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: + ... + + +@overload +def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + ... + + +def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + """ + Detect non-missing values for an array-like object. + + This function takes a scalar or array-like object and indicates + whether values are valid (not missing, which is ``NaN`` in numeric + arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). + + Parameters + ---------- + obj : array-like or object value + Object to check for *not* null or *non*-missing values. + + Returns + ------- + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is valid. + + See Also + -------- + isna : Boolean inverse of pandas.notna. + Series.notna : Detect valid values in a Series. + DataFrame.notna : Detect valid values in a DataFrame. + Index.notna : Detect valid values in an Index. + + Examples + -------- + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.notna('dog') + True + + >>> pd.notna(pd.NA) + False + + >>> pd.notna(np.nan) + False + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.notna(array) + array([[ True, False, True], + [ True, True, False]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.notna(index) + array([ True, True, False, True]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.notna(df) + 0 1 2 + 0 True True True + 1 True False True + + >>> pd.notna(df[1]) + 0 True + 1 False + Name: 1, dtype: bool + """ + res = isna(obj) + if isinstance(res, bool): + return not res + return ~res + + +notnull = notna + + +def array_equivalent( + left, + right, + strict_nan: bool = False, + dtype_equal: bool = False, +) -> bool: + """ + True if two arrays, left and right, have equal non-NaN elements, and NaNs + in corresponding locations. False otherwise. It is assumed that left and + right are NumPy arrays of the same dtype. The behavior of this function + (particularly with respect to NaNs) is not defined if the dtypes are + different. + + Parameters + ---------- + left, right : ndarrays + strict_nan : bool, default False + If True, consider NaN and None to be different. + dtype_equal : bool, default False + Whether `left` and `right` are known to have the same dtype + according to `is_dtype_equal`. Some methods like `BlockManager.equals`. + require that the dtypes match. Setting this to ``True`` can improve + performance, but will give different results for arrays that are + equal but different dtypes. + + Returns + ------- + b : bool + Returns True if the arrays are equivalent. + + Examples + -------- + >>> array_equivalent( + ... np.array([1, 2, np.nan]), + ... np.array([1, 2, np.nan])) + True + >>> array_equivalent( + ... np.array([1, np.nan, 2]), + ... np.array([1, 2, np.nan])) + False + """ + left, right = np.asarray(left), np.asarray(right) + + # shape compat + if left.shape != right.shape: + return False + + if dtype_equal: + # fastpath when we require that the dtypes match (Block.equals) + if left.dtype.kind in "fc": + return _array_equivalent_float(left, right) + elif left.dtype.kind in "mM": + return _array_equivalent_datetimelike(left, right) + elif is_string_or_object_np_dtype(left.dtype): + # TODO: fastpath for pandas' StringDtype + return _array_equivalent_object(left, right, strict_nan) + else: + return np.array_equal(left, right) + + # Slow path when we allow comparing different dtypes. + # Object arrays can contain None, NaN and NaT. + # string dtypes must be come to this path for NumPy 1.7.1 compat + if left.dtype.kind in "OSU" or right.dtype.kind in "OSU": + # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]` + # or `in ("O", "S", "U")` + return _array_equivalent_object(left, right, strict_nan) + + # NaNs can occur in float and complex arrays. + if left.dtype.kind in "fc": + if not (left.size and right.size): + return True + return ((left == right) | (isna(left) & isna(right))).all() + + elif left.dtype.kind in "mM" or right.dtype.kind in "mM": + # datetime64, timedelta64, Period + if left.dtype != right.dtype: + return False + + left = left.view("i8") + right = right.view("i8") + + # if we have structured dtypes, compare first + if ( + left.dtype.type is np.void or right.dtype.type is np.void + ) and left.dtype != right.dtype: + return False + + return np.array_equal(left, right) + + +def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool: + return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all()) + + +def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray): + return np.array_equal(left.view("i8"), right.view("i8")) + + +def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool): + if not strict_nan: + # isna considers NaN and None to be equivalent. + + return lib.array_equivalent_object(ensure_object(left), ensure_object(right)) + + for left_value, right_value in zip(left, right): + if left_value is NaT and right_value is not NaT: + return False + + elif left_value is libmissing.NA and right_value is not libmissing.NA: + return False + + elif isinstance(left_value, float) and np.isnan(left_value): + if not isinstance(right_value, float) or not np.isnan(right_value): + return False + else: + with warnings.catch_warnings(): + # suppress numpy's "elementwise comparison failed" + warnings.simplefilter("ignore", DeprecationWarning) + try: + if np.any(np.asarray(left_value != right_value)): + return False + except TypeError as err: + if "boolean value of NA is ambiguous" in str(err): + return False + raise + except ValueError: + # numpy can raise a ValueError if left and right cannot be + # compared (e.g. nested arrays) + return False + return True + + +def array_equals(left: ArrayLike, right: ArrayLike) -> bool: + """ + ExtensionArray-compatible implementation of array_equivalent. + """ + if left.dtype != right.dtype: + return False + elif isinstance(left, ABCExtensionArray): + return left.equals(right) + else: + return array_equivalent(left, right, dtype_equal=True) + + +def infer_fill_value(val): + """ + infer the fill value for the nan/NaT from the provided + scalar/ndarray/list-like if we are a NaT, return the correct dtyped + element to provide proper block construction + """ + if not is_list_like(val): + val = [val] + val = np.array(val, copy=False) + if val.dtype.kind in "mM": + return np.array("NaT", dtype=val.dtype) + elif val.dtype == object: + dtype = lib.infer_dtype(ensure_object(val), skipna=False) + if dtype in ["datetime", "datetime64"]: + return np.array("NaT", dtype=DT64NS_DTYPE) + elif dtype in ["timedelta", "timedelta64"]: + return np.array("NaT", dtype=TD64NS_DTYPE) + return np.array(np.nan, dtype=object) + elif val.dtype.kind == "U": + return np.array(np.nan, dtype=val.dtype) + return np.nan + + +def maybe_fill(arr: np.ndarray) -> np.ndarray: + """ + Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype. + """ + if arr.dtype.kind not in "iub": + arr.fill(np.nan) + return arr + + +def na_value_for_dtype(dtype: DtypeObj, compat: bool = True): + """ + Return a dtype compat na value + + Parameters + ---------- + dtype : string / dtype + compat : bool, default True + + Returns + ------- + np.dtype or a pandas dtype + + Examples + -------- + >>> na_value_for_dtype(np.dtype('int64')) + 0 + >>> na_value_for_dtype(np.dtype('int64'), compat=False) + nan + >>> na_value_for_dtype(np.dtype('float64')) + nan + >>> na_value_for_dtype(np.dtype('bool')) + False + >>> na_value_for_dtype(np.dtype('datetime64[ns]')) + numpy.datetime64('NaT') + """ + + if isinstance(dtype, ExtensionDtype): + return dtype.na_value + elif dtype.kind in "mM": + return dtype.type("NaT", "ns") + elif dtype.kind == "f": + return np.nan + elif dtype.kind in "iu": + if compat: + return 0 + return np.nan + elif dtype.kind == "b": + if compat: + return False + return np.nan + return np.nan + + +def remove_na_arraylike(arr: Series | Index | np.ndarray): + """ + Return array-like containing only true/non-NaN values, possibly empty. + """ + if isinstance(arr.dtype, ExtensionDtype): + return arr[notna(arr)] + else: + return arr[notna(np.asarray(arr))] + + +def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: + """ + isna check that excludes incompatible dtypes + + Parameters + ---------- + obj : object + dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype + + Returns + ------- + bool + """ + if not lib.is_scalar(obj) or not isna(obj): + return False + elif dtype.kind == "M": + if isinstance(dtype, np.dtype): + # i.e. not tzaware + return not isinstance(obj, (np.timedelta64, Decimal)) + # we have to rule out tznaive dt64("NaT") + return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal)) + elif dtype.kind == "m": + return not isinstance(obj, (np.datetime64, Decimal)) + elif dtype.kind in "iufc": + # Numeric + return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64)) + elif dtype.kind == "b": + # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype) + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + elif dtype == _dtype_str: + # numpy string dtypes to avoid float np.nan + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float)) + + elif dtype == _dtype_object: + # This is needed for Categorical, but is kind of weird + return True + + elif isinstance(dtype, PeriodDtype): + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + elif isinstance(dtype, IntervalDtype): + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + elif isinstance(dtype, CategoricalDtype): + return is_valid_na_for_dtype(obj, dtype.categories.dtype) + + # fallback, default to allowing NaN, None, NA, NaT + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + +def isna_all(arr: ArrayLike) -> bool: + """ + Optimized equivalent to isna(arr).all() + """ + total_len = len(arr) + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. + # parameters 1000 and 40 were chosen arbitrarily + chunk_len = max(total_len // 40, 1000) + + dtype = arr.dtype + if lib.is_np_dtype(dtype, "f"): + checker = nan_checker + + elif (lib.is_np_dtype(dtype, "mM")) or isinstance( + dtype, (DatetimeTZDtype, PeriodDtype) + ): + # error: Incompatible types in assignment (expression has type + # "Callable[[Any], Any]", variable has type "ufunc") + checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment] + + else: + # error: Incompatible types in assignment (expression has type "Callable[[Any], + # Any]", variable has type "ufunc") + checker = lambda x: _isna_array( # type: ignore[assignment] + x, inf_as_na=INF_AS_NA + ) + + return all( + checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len) + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/flags.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/flags.py new file mode 100644 index 00000000..038132f9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/flags.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +import weakref + +if TYPE_CHECKING: + from pandas.core.generic import NDFrame + + +class Flags: + """ + Flags that apply to pandas objects. + + .. versionadded:: 1.2.0 + + Parameters + ---------- + obj : Series or DataFrame + The object these flags are associated with. + allows_duplicate_labels : bool, default True + Whether to allow duplicate labels in this object. By default, + duplicate labels are permitted. Setting this to ``False`` will + cause an :class:`errors.DuplicateLabelError` to be raised when + `index` (or columns for DataFrame) is not unique, or any + subsequent operation on introduces duplicates. + See :ref:`duplicates.disallow` for more. + + .. warning:: + + This is an experimental feature. Currently, many methods fail to + propagate the ``allows_duplicate_labels`` value. In future versions + it is expected that every method taking or returning one or more + DataFrame or Series objects will propagate ``allows_duplicate_labels``. + + Examples + -------- + Attributes can be set in two ways: + + >>> df = pd.DataFrame() + >>> df.flags + + >>> df.flags.allows_duplicate_labels = False + >>> df.flags + + + >>> df.flags['allows_duplicate_labels'] = True + >>> df.flags + + """ + + _keys: set[str] = {"allows_duplicate_labels"} + + def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None: + self._allows_duplicate_labels = allows_duplicate_labels + self._obj = weakref.ref(obj) + + @property + def allows_duplicate_labels(self) -> bool: + """ + Whether this object allows duplicate labels. + + Setting ``allows_duplicate_labels=False`` ensures that the + index (and columns of a DataFrame) are unique. Most methods + that accept and return a Series or DataFrame will propagate + the value of ``allows_duplicate_labels``. + + See :ref:`duplicates` for more. + + See Also + -------- + DataFrame.attrs : Set global metadata on this object. + DataFrame.set_flags : Set global flags on this object. + + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a']) + >>> df.flags.allows_duplicate_labels + True + >>> df.flags.allows_duplicate_labels = False + Traceback (most recent call last): + ... + pandas.errors.DuplicateLabelError: Index has duplicates. + positions + label + a [0, 1] + """ + return self._allows_duplicate_labels + + @allows_duplicate_labels.setter + def allows_duplicate_labels(self, value: bool) -> None: + value = bool(value) + obj = self._obj() + if obj is None: + raise ValueError("This flag's object has been deleted.") + + if not value: + for ax in obj.axes: + ax._maybe_check_unique() + + self._allows_duplicate_labels = value + + def __getitem__(self, key: str): + if key not in self._keys: + raise KeyError(key) + + return getattr(self, key) + + def __setitem__(self, key: str, value) -> None: + if key not in self._keys: + raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}") + setattr(self, key, value) + + def __repr__(self) -> str: + return f"" + + def __eq__(self, other) -> bool: + if isinstance(other, type(self)): + return self.allows_duplicate_labels == other.allows_duplicate_labels + return False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/frame.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/frame.py new file mode 100644 index 00000000..605cf498 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/frame.py @@ -0,0 +1,12314 @@ +""" +DataFrame +--------- +An efficient 2D container for potentially mixed-type time series or other +labeled data series. + +Similar to its R counterpart, data.frame, except providing automatic data +alignment and a host of useful data manipulation methods having to do with the +labeling information +""" +from __future__ import annotations + +import collections +from collections import abc +from collections.abc import ( + Hashable, + Iterable, + Iterator, + Mapping, + Sequence, +) +import functools +from inspect import signature +from io import StringIO +import itertools +import operator +import sys +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np +from numpy import ma + +from pandas._config import ( + get_option, + using_copy_on_write, +) + +from pandas._libs import ( + algos as libalgos, + lib, + properties, +) +from pandas._libs.hashtable import duplicated +from pandas._libs.lib import is_range_indexer +from pandas.compat import PYPY +from pandas.compat._constants import REF_COUNT +from pandas.compat._optional import import_optional_dependency +from pandas.compat.numpy import function as nv +from pandas.errors import ( + ChainedAssignmentError, + InvalidIndexError, + _chained_assignment_method_msg, + _chained_assignment_msg, +) +from pandas.util._decorators import ( + Appender, + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + validate_ascending, + validate_bool_kwarg, + validate_percentile, +) + +from pandas.core.dtypes.cast import ( + LossySetitemError, + can_hold_element, + construct_1d_arraylike_from_scalar, + construct_2d_arraylike_from_scalar, + find_common_type, + infer_dtype_from_scalar, + invalidate_string_dtypes, + maybe_box_native, + maybe_downcast_to_dtype, +) +from pandas.core.dtypes.common import ( + infer_dtype_from_object, + is_1d_only_ea_dtype, + is_array_like, + is_bool_dtype, + is_dataclass, + is_dict_like, + is_float, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_iterator, + is_list_like, + is_scalar, + is_sequence, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + BaseMaskedDtype, + ExtensionDtype, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import ( + algorithms, + common as com, + nanops, + ops, + roperator, +) +from pandas.core.accessor import CachedAccessor +from pandas.core.apply import reconstruct_and_relabel_result +from pandas.core.array_algos.take import take_2d_multi +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ( + BaseMaskedArray, + DatetimeArray, + ExtensionArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.sparse import SparseFrameAccessor +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, + sanitize_array, + sanitize_masked_array, +) +from pandas.core.generic import ( + NDFrame, + make_doc, +) +from pandas.core.indexers import check_key_length +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + PeriodIndex, + default_index, + ensure_index, + ensure_index_from_sequences, +) +from pandas.core.indexes.multi import ( + MultiIndex, + maybe_droplevels, +) +from pandas.core.indexing import ( + check_bool_indexer, + check_dict_or_set_indexers, +) +from pandas.core.internals import ( + ArrayManager, + BlockManager, +) +from pandas.core.internals.construction import ( + arrays_to_mgr, + dataclasses_to_dicts, + dict_to_mgr, + mgr_to_mgr, + ndarray_to_mgr, + nested_data_to_arrays, + rec_array_to_mgr, + reorder_arrays, + to_arrays, + treat_as_nested, +) +from pandas.core.methods import selectn +from pandas.core.reshape.melt import melt +from pandas.core.series import Series +from pandas.core.shared_docs import _shared_docs +from pandas.core.sorting import ( + get_group_index, + lexsort_indexer, + nargsort, +) + +from pandas.io.common import get_handle +from pandas.io.formats import ( + console, + format as fmt, +) +from pandas.io.formats.info import ( + INFO_DOCSTRING, + DataFrameInfo, + frame_sub_kwargs, +) +import pandas.plotting + +if TYPE_CHECKING: + import datetime + + from pandas._libs.internals import BlockValuesRefs + from pandas._typing import ( + AggFuncType, + AnyAll, + AnyArrayLike, + ArrayLike, + Axes, + Axis, + AxisInt, + ColspaceArgType, + CompressionOptions, + CorrelationMethod, + DropKeep, + Dtype, + DtypeObj, + FilePath, + FloatFormatType, + FormattersType, + Frequency, + FromDictOrient, + IgnoreRaise, + IndexKeyFunc, + IndexLabel, + JoinValidate, + Level, + MergeHow, + MergeValidate, + NaAction, + NaPosition, + NsmallestNlargestKeep, + PythonFuncType, + QuantileInterpolation, + ReadBuffer, + ReindexMethod, + Renamer, + Scalar, + Self, + SortKind, + StorageOptions, + Suffixes, + ToGbqIfexist, + ToStataByteorder, + ToTimestampHow, + UpdateJoin, + ValueKeyFunc, + WriteBuffer, + XMLParsers, + npt, + ) + + from pandas.core.groupby.generic import DataFrameGroupBy + from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg + from pandas.core.internals import SingleDataManager + + from pandas.io.formats.style import Styler + +# --------------------------------------------------------------------- +# Docstring templates + +_shared_doc_kwargs = { + "axes": "index, columns", + "klass": "DataFrame", + "axes_single_arg": "{0 or 'index', 1 or 'columns'}", + "axis": """axis : {0 or 'index', 1 or 'columns'}, default 0 + If 0 or 'index': apply function to each column. + If 1 or 'columns': apply function to each row.""", + "inplace": """ + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one.""", + "optional_by": """ +by : str or list of str + Name or list of names to sort by. + + - if `axis` is 0 or `'index'` then `by` may contain index + levels and/or column labels. + - if `axis` is 1 or `'columns'` then `by` may contain column + levels and/or index labels.""", + "optional_reindex": """ +labels : array-like, optional + New labels / index to conform the axis specified by 'axis' to. +index : array-like, optional + New labels for the index. Preferably an Index object to avoid + duplicating data. +columns : array-like, optional + New labels for the columns. Preferably an Index object to avoid + duplicating data. +axis : int or str, optional + Axis to target. Can be either the axis name ('index', 'columns') + or number (0, 1).""", +} + +_merge_doc = """ +Merge DataFrame or named Series objects with a database-style join. + +A named Series object is treated as a DataFrame with a single named column. + +The join is done on columns or indexes. If joining columns on +columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes +on indexes or indexes on a column or columns, the index will be passed on. +When performing a cross merge, no column specifications to merge on are +allowed. + +.. warning:: + + If both key columns contain rows where the key is a null value, those + rows will be matched against each other. This is different from usual SQL + join behaviour and can lead to unexpected results. + +Parameters +----------%s +right : DataFrame or named Series + Object to merge with. +how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner' + Type of merge to be performed. + + * left: use only keys from left frame, similar to a SQL left outer join; + preserve key order. + * right: use only keys from right frame, similar to a SQL right outer join; + preserve key order. + * outer: use union of keys from both frames, similar to a SQL full outer + join; sort keys lexicographically. + * inner: use intersection of keys from both frames, similar to a SQL inner + join; preserve the order of the left keys. + * cross: creates the cartesian product from both frames, preserves the order + of the left keys. + + .. versionadded:: 1.2.0 + +on : label or list + Column or index level names to join on. These must be found in both + DataFrames. If `on` is None and not merging on indexes then this defaults + to the intersection of the columns in both DataFrames. +left_on : label or list, or array-like + Column or index level names to join on in the left DataFrame. Can also + be an array or list of arrays of the length of the left DataFrame. + These arrays are treated as if they are columns. +right_on : label or list, or array-like + Column or index level names to join on in the right DataFrame. Can also + be an array or list of arrays of the length of the right DataFrame. + These arrays are treated as if they are columns. +left_index : bool, default False + Use the index from the left DataFrame as the join key(s). If it is a + MultiIndex, the number of keys in the other DataFrame (either the index + or a number of columns) must match the number of levels. +right_index : bool, default False + Use the index from the right DataFrame as the join key. Same caveats as + left_index. +sort : bool, default False + Sort the join keys lexicographically in the result DataFrame. If False, + the order of the join keys depends on the join type (how keyword). +suffixes : list-like, default is ("_x", "_y") + A length-2 sequence where each element is optionally a string + indicating the suffix to add to overlapping column names in + `left` and `right` respectively. Pass a value of `None` instead + of a string to indicate that the column name from `left` or + `right` should be left as-is, with no suffix. At least one of the + values must not be None. +copy : bool, default True + If False, avoid copy if possible. +indicator : bool or str, default False + If True, adds a column to the output DataFrame called "_merge" with + information on the source of each row. The column can be given a different + name by providing a string argument. The column will have a Categorical + type with the value of "left_only" for observations whose merge key only + appears in the left DataFrame, "right_only" for observations + whose merge key only appears in the right DataFrame, and "both" + if the observation's merge key is found in both DataFrames. + +validate : str, optional + If specified, checks if merge is of specified type. + + * "one_to_one" or "1:1": check if merge keys are unique in both + left and right datasets. + * "one_to_many" or "1:m": check if merge keys are unique in left + dataset. + * "many_to_one" or "m:1": check if merge keys are unique in right + dataset. + * "many_to_many" or "m:m": allowed, but does not result in checks. + +Returns +------- +DataFrame + A DataFrame of the two merged objects. + +See Also +-------- +merge_ordered : Merge with optional filling/interpolation. +merge_asof : Merge on nearest keys. +DataFrame.join : Similar method using indices. + +Examples +-------- +>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], +... 'value': [1, 2, 3, 5]}) +>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], +... 'value': [5, 6, 7, 8]}) +>>> df1 + lkey value +0 foo 1 +1 bar 2 +2 baz 3 +3 foo 5 +>>> df2 + rkey value +0 foo 5 +1 bar 6 +2 baz 7 +3 foo 8 + +Merge df1 and df2 on the lkey and rkey columns. The value columns have +the default suffixes, _x and _y, appended. + +>>> df1.merge(df2, left_on='lkey', right_on='rkey') + lkey value_x rkey value_y +0 foo 1 foo 5 +1 foo 1 foo 8 +2 foo 5 foo 5 +3 foo 5 foo 8 +4 bar 2 bar 6 +5 baz 3 baz 7 + +Merge DataFrames df1 and df2 with specified left and right suffixes +appended to any overlapping columns. + +>>> df1.merge(df2, left_on='lkey', right_on='rkey', +... suffixes=('_left', '_right')) + lkey value_left rkey value_right +0 foo 1 foo 5 +1 foo 1 foo 8 +2 foo 5 foo 5 +3 foo 5 foo 8 +4 bar 2 bar 6 +5 baz 3 baz 7 + +Merge DataFrames df1 and df2, but raise an exception if the DataFrames have +any overlapping columns. + +>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) +Traceback (most recent call last): +... +ValueError: columns overlap but no suffix specified: + Index(['value'], dtype='object') + +>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]}) +>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]}) +>>> df1 + a b +0 foo 1 +1 bar 2 +>>> df2 + a c +0 foo 3 +1 baz 4 + +>>> df1.merge(df2, how='inner', on='a') + a b c +0 foo 1 3 + +>>> df1.merge(df2, how='left', on='a') + a b c +0 foo 1 3.0 +1 bar 2 NaN + +>>> df1 = pd.DataFrame({'left': ['foo', 'bar']}) +>>> df2 = pd.DataFrame({'right': [7, 8]}) +>>> df1 + left +0 foo +1 bar +>>> df2 + right +0 7 +1 8 + +>>> df1.merge(df2, how='cross') + left right +0 foo 7 +1 foo 8 +2 bar 7 +3 bar 8 +""" + + +# ----------------------------------------------------------------------- +# DataFrame class + + +class DataFrame(NDFrame, OpsMixin): + """ + Two-dimensional, size-mutable, potentially heterogeneous tabular data. + + Data structure also contains labeled axes (rows and columns). + Arithmetic operations align on both row and column labels. Can be + thought of as a dict-like container for Series objects. The primary + pandas data structure. + + Parameters + ---------- + data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame + Dict can contain Series, arrays, constants, dataclass or list-like objects. If + data is a dict, column order follows insertion-order. If a dict contains Series + which have an index defined, it is aligned by its index. This alignment also + occurs if data is a Series or a DataFrame itself. Alignment is done on + Series/DataFrame inputs. + + If data is a list of dicts, column order follows insertion-order. + + index : Index or array-like + Index to use for resulting frame. Will default to RangeIndex if + no indexing information part of input data and no index provided. + columns : Index or array-like + Column labels to use for resulting frame when data does not have them, + defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, + will perform column selection instead. + dtype : dtype, default None + Data type to force. Only a single dtype is allowed. If None, infer. + copy : bool or None, default None + Copy data from inputs. + For dict data, the default of None behaves like ``copy=True``. For DataFrame + or 2d ndarray input, the default of None behaves like ``copy=False``. + If data is a dict containing one or more Series (possibly of different dtypes), + ``copy=False`` will ensure that these inputs are not copied. + + .. versionchanged:: 1.3.0 + + See Also + -------- + DataFrame.from_records : Constructor from tuples, also record arrays. + DataFrame.from_dict : From dicts of Series, arrays, or dicts. + read_csv : Read a comma-separated values (csv) file into DataFrame. + read_table : Read general delimited file into DataFrame. + read_clipboard : Read text from clipboard into DataFrame. + + Notes + ----- + Please reference the :ref:`User Guide ` for more information. + + Examples + -------- + Constructing DataFrame from a dictionary. + + >>> d = {'col1': [1, 2], 'col2': [3, 4]} + >>> df = pd.DataFrame(data=d) + >>> df + col1 col2 + 0 1 3 + 1 2 4 + + Notice that the inferred dtype is int64. + + >>> df.dtypes + col1 int64 + col2 int64 + dtype: object + + To enforce a single dtype: + + >>> df = pd.DataFrame(data=d, dtype=np.int8) + >>> df.dtypes + col1 int8 + col2 int8 + dtype: object + + Constructing DataFrame from a dictionary including Series: + + >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} + >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) + col1 col2 + 0 0 NaN + 1 1 NaN + 2 2 2.0 + 3 3 3.0 + + Constructing DataFrame from numpy ndarray: + + >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), + ... columns=['a', 'b', 'c']) + >>> df2 + a b c + 0 1 2 3 + 1 4 5 6 + 2 7 8 9 + + Constructing DataFrame from a numpy ndarray that has labeled columns: + + >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], + ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) + >>> df3 = pd.DataFrame(data, columns=['c', 'a']) + ... + >>> df3 + c a + 0 3 1 + 1 6 4 + 2 9 7 + + Constructing DataFrame from dataclass: + + >>> from dataclasses import make_dataclass + >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) + >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) + x y + 0 0 0 + 1 0 3 + 2 2 3 + + Constructing DataFrame from Series/DataFrame: + + >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) + >>> df = pd.DataFrame(data=ser, index=["a", "c"]) + >>> df + 0 + a 1 + c 3 + + >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) + >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) + >>> df2 + x + a 1 + c 3 + """ + + _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set + _typ = "dataframe" + _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) + _accessors: set[str] = {"sparse"} + _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) + _mgr: BlockManager | ArrayManager + + # similar to __array_priority__, positions DataFrame before Series, Index, + # and ExtensionArray. Should NOT be overridden by subclasses. + __pandas_priority__ = 4000 + + @property + def _constructor(self) -> Callable[..., DataFrame]: + return DataFrame + + def _constructor_from_mgr(self, mgr, axes): + if self._constructor is DataFrame: + # we are pandas.DataFrame (or a subclass that doesn't override _constructor) + return DataFrame._from_mgr(mgr, axes=axes) + else: + assert axes is mgr.axes + return self._constructor(mgr) + + _constructor_sliced: Callable[..., Series] = Series + + def _sliced_from_mgr(self, mgr, axes) -> Series: + return Series._from_mgr(mgr, axes) + + def _constructor_sliced_from_mgr(self, mgr, axes): + if self._constructor_sliced is Series: + ser = self._sliced_from_mgr(mgr, axes) + ser._name = None # caller is responsible for setting real name + return ser + assert axes is mgr.axes + return self._constructor_sliced(mgr) + + # ---------------------------------------------------------------------- + # Constructors + + def __init__( + self, + data=None, + index: Axes | None = None, + columns: Axes | None = None, + dtype: Dtype | None = None, + copy: bool | None = None, + ) -> None: + if dtype is not None: + dtype = self._validate_dtype(dtype) + + if isinstance(data, DataFrame): + data = data._mgr + if not copy: + # if not copying data, ensure to still return a shallow copy + # to avoid the result sharing the same Manager + data = data.copy(deep=False) + + if isinstance(data, (BlockManager, ArrayManager)): + if using_copy_on_write(): + data = data.copy(deep=False) + # first check if a Manager is passed without any other arguments + # -> use fastpath (without checking Manager type) + if index is None and columns is None and dtype is None and not copy: + # GH#33357 fastpath + NDFrame.__init__(self, data) + return + + manager = get_option("mode.data_manager") + + # GH47215 + if isinstance(index, set): + raise ValueError("index cannot be a set") + if isinstance(columns, set): + raise ValueError("columns cannot be a set") + + if copy is None: + if isinstance(data, dict): + # retain pre-GH#38939 default behavior + copy = True + elif ( + manager == "array" + and isinstance(data, (np.ndarray, ExtensionArray)) + and data.ndim == 2 + ): + # INFO(ArrayManager) by default copy the 2D input array to get + # contiguous 1D arrays + copy = True + elif using_copy_on_write() and not isinstance( + data, (Index, DataFrame, Series) + ): + copy = True + else: + copy = False + + if data is None: + index = index if index is not None else default_index(0) + columns = columns if columns is not None else default_index(0) + dtype = dtype if dtype is not None else pandas_dtype(object) + data = [] + + if isinstance(data, (BlockManager, ArrayManager)): + mgr = self._init_mgr( + data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy + ) + + elif isinstance(data, dict): + # GH#38939 de facto copy defaults to False only in non-dict cases + mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) + elif isinstance(data, ma.MaskedArray): + from numpy.ma import mrecords + + # masked recarray + if isinstance(data, mrecords.MaskedRecords): + raise TypeError( + "MaskedRecords are not supported. Pass " + "{name: data[name] for name in data.dtype.names} " + "instead" + ) + + # a masked array + data = sanitize_masked_array(data) + mgr = ndarray_to_mgr( + data, + index, + columns, + dtype=dtype, + copy=copy, + typ=manager, + ) + + elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): + if data.dtype.names: + # i.e. numpy structured array + data = cast(np.ndarray, data) + mgr = rec_array_to_mgr( + data, + index, + columns, + dtype, + copy, + typ=manager, + ) + elif getattr(data, "name", None) is not None: + # i.e. Series/Index with non-None name + _copy = copy if using_copy_on_write() else True + mgr = dict_to_mgr( + # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no + # attribute "name" + {data.name: data}, # type: ignore[union-attr] + index, + columns, + dtype=dtype, + typ=manager, + copy=_copy, + ) + else: + mgr = ndarray_to_mgr( + data, + index, + columns, + dtype=dtype, + copy=copy, + typ=manager, + ) + + # For data is list-like, or Iterable (will consume into list) + elif is_list_like(data): + if not isinstance(data, abc.Sequence): + if hasattr(data, "__array__"): + # GH#44616 big perf improvement for e.g. pytorch tensor + data = np.asarray(data) + else: + data = list(data) + if len(data) > 0: + if is_dataclass(data[0]): + data = dataclasses_to_dicts(data) + if not isinstance(data, np.ndarray) and treat_as_nested(data): + # exclude ndarray as we may have cast it a few lines above + if columns is not None: + columns = ensure_index(columns) + arrays, columns, index = nested_data_to_arrays( + # error: Argument 3 to "nested_data_to_arrays" has incompatible + # type "Optional[Collection[Any]]"; expected "Optional[Index]" + data, + columns, + index, # type: ignore[arg-type] + dtype, + ) + mgr = arrays_to_mgr( + arrays, + columns, + index, + dtype=dtype, + typ=manager, + ) + else: + mgr = ndarray_to_mgr( + data, + index, + columns, + dtype=dtype, + copy=copy, + typ=manager, + ) + else: + mgr = dict_to_mgr( + {}, + index, + columns if columns is not None else default_index(0), + dtype=dtype, + typ=manager, + ) + # For data is scalar + else: + if index is None or columns is None: + raise ValueError("DataFrame constructor not properly called!") + + index = ensure_index(index) + columns = ensure_index(columns) + + if not dtype: + dtype, _ = infer_dtype_from_scalar(data) + + # For data is a scalar extension dtype + if isinstance(dtype, ExtensionDtype): + # TODO(EA2D): special case not needed with 2D EAs + + values = [ + construct_1d_arraylike_from_scalar(data, len(index), dtype) + for _ in range(len(columns)) + ] + mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) + else: + arr2d = construct_2d_arraylike_from_scalar( + data, + len(index), + len(columns), + dtype, + copy, + ) + + mgr = ndarray_to_mgr( + arr2d, + index, + columns, + dtype=arr2d.dtype, + copy=False, + typ=manager, + ) + + # ensure correct Manager type according to settings + mgr = mgr_to_mgr(mgr, typ=manager) + + NDFrame.__init__(self, mgr) + + # ---------------------------------------------------------------------- + + def __dataframe__( + self, nan_as_null: bool = False, allow_copy: bool = True + ) -> DataFrameXchg: + """ + Return the dataframe interchange object implementing the interchange protocol. + + Parameters + ---------- + nan_as_null : bool, default False + Whether to tell the DataFrame to overwrite null values in the data + with ``NaN`` (or ``NaT``). + allow_copy : bool, default True + Whether to allow memory copying when exporting. If set to False + it would cause non-zero-copy exports to fail. + + Returns + ------- + DataFrame interchange object + The object which consuming library can use to ingress the dataframe. + + Notes + ----- + Details on the interchange protocol: + https://data-apis.org/dataframe-protocol/latest/index.html + + `nan_as_null` currently has no effect; once support for nullable extension + dtypes is added, this value should be propagated to columns. + + Examples + -------- + >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> interchange_object = df_not_necessarily_pandas.__dataframe__() + >>> interchange_object.column_names() + Index(['A', 'B'], dtype='object') + >>> df_pandas = (pd.api.interchange.from_dataframe + ... (interchange_object.select_columns_by_name(['A']))) + >>> df_pandas + A + 0 1 + 1 2 + + These methods (``column_names``, ``select_columns_by_name``) should work + for any dataframe library which implements the interchange protocol. + """ + + from pandas.core.interchange.dataframe import PandasDataFrameXchg + + return PandasDataFrameXchg(self, nan_as_null, allow_copy) + + def __dataframe_consortium_standard__( + self, *, api_version: str | None = None + ) -> Any: + """ + Provide entry point to the Consortium DataFrame Standard API. + + This is developed and maintained outside of pandas. + Please report any issues to https://github.com/data-apis/dataframe-api-compat. + """ + dataframe_api_compat = import_optional_dependency("dataframe_api_compat") + convert_to_standard_compliant_dataframe = ( + dataframe_api_compat.pandas_standard.convert_to_standard_compliant_dataframe + ) + return convert_to_standard_compliant_dataframe(self, api_version=api_version) + + # ---------------------------------------------------------------------- + + @property + def axes(self) -> list[Index]: + """ + Return a list representing the axes of the DataFrame. + + It has the row axis labels and column axis labels as the only members. + They are returned in that order. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.axes + [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], + dtype='object')] + """ + return [self.index, self.columns] + + @property + def shape(self) -> tuple[int, int]: + """ + Return a tuple representing the dimensionality of the DataFrame. + + See Also + -------- + ndarray.shape : Tuple of array dimensions. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.shape + (2, 2) + + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], + ... 'col3': [5, 6]}) + >>> df.shape + (2, 3) + """ + return len(self.index), len(self.columns) + + @property + def _is_homogeneous_type(self) -> bool: + """ + Whether all the columns in a DataFrame have the same type. + + Returns + ------- + bool + + Examples + -------- + >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type + True + >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type + False + + Items with the same type but different sizes are considered + different types. + + >>> DataFrame({ + ... "A": np.array([1, 2], dtype=np.int32), + ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type + False + """ + # The "<" part of "<=" here is for empty DataFrame cases + return len({arr.dtype for arr in self._mgr.arrays}) <= 1 + + @property + def _can_fast_transpose(self) -> bool: + """ + Can we transpose this DataFrame without creating any new array objects. + """ + if isinstance(self._mgr, ArrayManager): + return False + blocks = self._mgr.blocks + if len(blocks) != 1: + return False + + dtype = blocks[0].dtype + # TODO(EA2D) special case would be unnecessary with 2D EAs + return not is_1d_only_ea_dtype(dtype) + + @property + def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: + """ + Analogue to ._values that may return a 2D ExtensionArray. + """ + mgr = self._mgr + + if isinstance(mgr, ArrayManager): + if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): + # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" + # has no attribute "reshape" + return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] + return ensure_wrapped_if_datetimelike(self.values) + + blocks = mgr.blocks + if len(blocks) != 1: + return ensure_wrapped_if_datetimelike(self.values) + + arr = blocks[0].values + if arr.ndim == 1: + # non-2D ExtensionArray + return self.values + + # more generally, whatever we allow in NDArrayBackedExtensionBlock + arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) + return arr.T + + # ---------------------------------------------------------------------- + # Rendering Methods + + def _repr_fits_vertical_(self) -> bool: + """ + Check length against max_rows. + """ + max_rows = get_option("display.max_rows") + return len(self) <= max_rows + + def _repr_fits_horizontal_(self) -> bool: + """ + Check if full repr fits in horizontal boundaries imposed by the display + options width and max_columns. + """ + width, height = console.get_console_size() + max_columns = get_option("display.max_columns") + nb_columns = len(self.columns) + + # exceed max columns + if (max_columns and nb_columns > max_columns) or ( + width and nb_columns > (width // 2) + ): + return False + + # used by repr_html under IPython notebook or scripts ignore terminal + # dims + if width is None or not console.in_interactive_session(): + return True + + if get_option("display.width") is not None or console.in_ipython_frontend(): + # check at least the column row for excessive width + max_rows = 1 + else: + max_rows = get_option("display.max_rows") + + # when auto-detecting, so width=None and not in ipython front end + # check whether repr fits horizontal by actually checking + # the width of the rendered repr + buf = StringIO() + + # only care about the stuff we'll actually print out + # and to_string on entire frame may be expensive + d = self + + if max_rows is not None: # unlimited rows + # min of two, where one may be None + d = d.iloc[: min(max_rows, len(d))] + else: + return True + + d.to_string(buf=buf) + value = buf.getvalue() + repr_width = max(len(line) for line in value.split("\n")) + + return repr_width < width + + def _info_repr(self) -> bool: + """ + True if the repr should show the info view. + """ + info_repr_option = get_option("display.large_repr") == "info" + return info_repr_option and not ( + self._repr_fits_horizontal_() and self._repr_fits_vertical_() + ) + + def __repr__(self) -> str: + """ + Return a string representation for a particular DataFrame. + """ + if self._info_repr(): + buf = StringIO() + self.info(buf=buf) + return buf.getvalue() + + repr_params = fmt.get_dataframe_repr_params() + return self.to_string(**repr_params) + + def _repr_html_(self) -> str | None: + """ + Return a html representation for a particular DataFrame. + + Mainly for IPython notebook. + """ + if self._info_repr(): + buf = StringIO() + self.info(buf=buf) + # need to escape the , should be the first line. + val = buf.getvalue().replace("<", r"<", 1) + val = val.replace(">", r">", 1) + return f"
{val}
" + + if get_option("display.notebook_repr_html"): + max_rows = get_option("display.max_rows") + min_rows = get_option("display.min_rows") + max_cols = get_option("display.max_columns") + show_dimensions = get_option("display.show_dimensions") + + formatter = fmt.DataFrameFormatter( + self, + columns=None, + col_space=None, + na_rep="NaN", + formatters=None, + float_format=None, + sparsify=None, + justify=None, + index_names=True, + header=True, + index=True, + bold_rows=True, + escape=True, + max_rows=max_rows, + min_rows=min_rows, + max_cols=max_cols, + show_dimensions=show_dimensions, + decimal=".", + ) + return fmt.DataFrameRenderer(formatter).to_html(notebook=True) + else: + return None + + @overload + def to_string( + self, + buf: None = ..., + columns: Axes | None = ..., + col_space: int | list[int] | dict[Hashable, int] | None = ..., + header: bool | list[str] = ..., + index: bool = ..., + na_rep: str = ..., + formatters: fmt.FormattersType | None = ..., + float_format: fmt.FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool = ..., + decimal: str = ..., + line_width: int | None = ..., + min_rows: int | None = ..., + max_colwidth: int | None = ..., + encoding: str | None = ..., + ) -> str: + ... + + @overload + def to_string( + self, + buf: FilePath | WriteBuffer[str], + columns: Axes | None = ..., + col_space: int | list[int] | dict[Hashable, int] | None = ..., + header: bool | list[str] = ..., + index: bool = ..., + na_rep: str = ..., + formatters: fmt.FormattersType | None = ..., + float_format: fmt.FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool = ..., + decimal: str = ..., + line_width: int | None = ..., + min_rows: int | None = ..., + max_colwidth: int | None = ..., + encoding: str | None = ..., + ) -> None: + ... + + @Substitution( + header_type="bool or list of str", + header="Write out the column names. If a list of columns " + "is given, it is assumed to be aliases for the " + "column names", + col_space_type="int, list or dict of int", + col_space="The minimum width of each column. If a list of ints is given " + "every integers corresponds with one column. If a dict is given, the key " + "references the column, while the value defines the space to use.", + ) + @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + columns: Axes | None = None, + col_space: int | list[int] | dict[Hashable, int] | None = None, + header: bool | list[str] = True, + index: bool = True, + na_rep: str = "NaN", + formatters: fmt.FormattersType | None = None, + float_format: fmt.FloatFormatType | None = None, + sparsify: bool | None = None, + index_names: bool = True, + justify: str | None = None, + max_rows: int | None = None, + max_cols: int | None = None, + show_dimensions: bool = False, + decimal: str = ".", + line_width: int | None = None, + min_rows: int | None = None, + max_colwidth: int | None = None, + encoding: str | None = None, + ) -> str | None: + """ + Render a DataFrame to a console-friendly tabular output. + %(shared_params)s + line_width : int, optional + Width to wrap a line in characters. + min_rows : int, optional + The number of rows to display in the console in a truncated repr + (when number of rows is above `max_rows`). + max_colwidth : int, optional + Max width to truncate each column in characters. By default, no limit. + encoding : str, default "utf-8" + Set character encoding. + %(returns)s + See Also + -------- + to_html : Convert DataFrame to HTML. + + Examples + -------- + >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} + >>> df = pd.DataFrame(d) + >>> print(df.to_string()) + col1 col2 + 0 1 4 + 1 2 5 + 2 3 6 + """ + from pandas import option_context + + with option_context("display.max_colwidth", max_colwidth): + formatter = fmt.DataFrameFormatter( + self, + columns=columns, + col_space=col_space, + na_rep=na_rep, + formatters=formatters, + float_format=float_format, + sparsify=sparsify, + justify=justify, + index_names=index_names, + header=header, + index=index, + min_rows=min_rows, + max_rows=max_rows, + max_cols=max_cols, + show_dimensions=show_dimensions, + decimal=decimal, + ) + return fmt.DataFrameRenderer(formatter).to_string( + buf=buf, + encoding=encoding, + line_width=line_width, + ) + + # ---------------------------------------------------------------------- + + @property + def style(self) -> Styler: + """ + Returns a Styler object. + + Contains methods for building a styled HTML representation of the DataFrame. + + See Also + -------- + io.formats.style.Styler : Helps style a DataFrame or Series according to the + data with HTML and CSS. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2, 3]}) + >>> df.style # doctest: +SKIP + + Please see + `Table Visualization <../../user_guide/style.ipynb>`_ for more examples. + """ + from pandas.io.formats.style import Styler + + return Styler(self) + + _shared_docs[ + "items" + ] = r""" + Iterate over (column name, Series) pairs. + + Iterates over the DataFrame columns, returning a tuple with + the column name and the content as a Series. + + Yields + ------ + label : object + The column names for the DataFrame being iterated over. + content : Series + The column entries belonging to each label, as a Series. + + See Also + -------- + DataFrame.iterrows : Iterate over DataFrame rows as + (index, Series) pairs. + DataFrame.itertuples : Iterate over DataFrame rows as namedtuples + of the values. + + Examples + -------- + >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], + ... 'population': [1864, 22000, 80000]}, + ... index=['panda', 'polar', 'koala']) + >>> df + species population + panda bear 1864 + polar bear 22000 + koala marsupial 80000 + >>> for label, content in df.items(): + ... print(f'label: {label}') + ... print(f'content: {content}', sep='\n') + ... + label: species + content: + panda bear + polar bear + koala marsupial + Name: species, dtype: object + label: population + content: + panda 1864 + polar 22000 + koala 80000 + Name: population, dtype: int64 + """ + + @Appender(_shared_docs["items"]) + def items(self) -> Iterable[tuple[Hashable, Series]]: + if self.columns.is_unique and hasattr(self, "_item_cache"): + for k in self.columns: + yield k, self._get_item_cache(k) + else: + for i, k in enumerate(self.columns): + yield k, self._ixs(i, axis=1) + + def iterrows(self) -> Iterable[tuple[Hashable, Series]]: + """ + Iterate over DataFrame rows as (index, Series) pairs. + + Yields + ------ + index : label or tuple of label + The index of the row. A tuple for a `MultiIndex`. + data : Series + The data of the row as a Series. + + See Also + -------- + DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. + DataFrame.items : Iterate over (column name, Series) pairs. + + Notes + ----- + 1. Because ``iterrows`` returns a Series for each row, + it does **not** preserve dtypes across the rows (dtypes are + preserved across columns for DataFrames). + + To preserve dtypes while iterating over the rows, it is better + to use :meth:`itertuples` which returns namedtuples of the values + and which is generally faster than ``iterrows``. + + 2. You should **never modify** something you are iterating over. + This is not guaranteed to work in all cases. Depending on the + data types, the iterator returns a copy and not a view, and writing + to it will have no effect. + + Examples + -------- + + >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) + >>> row = next(df.iterrows())[1] + >>> row + int 1.0 + float 1.5 + Name: 0, dtype: float64 + >>> print(row['int'].dtype) + float64 + >>> print(df['int'].dtype) + int64 + """ + columns = self.columns + klass = self._constructor_sliced + using_cow = using_copy_on_write() + for k, v in zip(self.index, self.values): + s = klass(v, index=columns, name=k).__finalize__(self) + if using_cow and self._mgr.is_single_block: + s._mgr.add_references(self._mgr) # type: ignore[arg-type] + yield k, s + + def itertuples( + self, index: bool = True, name: str | None = "Pandas" + ) -> Iterable[tuple[Any, ...]]: + """ + Iterate over DataFrame rows as namedtuples. + + Parameters + ---------- + index : bool, default True + If True, return the index as the first element of the tuple. + name : str or None, default "Pandas" + The name of the returned namedtuples or None to return regular + tuples. + + Returns + ------- + iterator + An object to iterate over namedtuples for each row in the + DataFrame with the first field possibly being the index and + following fields being the column values. + + See Also + -------- + DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) + pairs. + DataFrame.items : Iterate over (column name, Series) pairs. + + Notes + ----- + The column names will be renamed to positional names if they are + invalid Python identifiers, repeated, or start with an underscore. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, + ... index=['dog', 'hawk']) + >>> df + num_legs num_wings + dog 4 0 + hawk 2 2 + >>> for row in df.itertuples(): + ... print(row) + ... + Pandas(Index='dog', num_legs=4, num_wings=0) + Pandas(Index='hawk', num_legs=2, num_wings=2) + + By setting the `index` parameter to False we can remove the index + as the first element of the tuple: + + >>> for row in df.itertuples(index=False): + ... print(row) + ... + Pandas(num_legs=4, num_wings=0) + Pandas(num_legs=2, num_wings=2) + + With the `name` parameter set we set a custom name for the yielded + namedtuples: + + >>> for row in df.itertuples(name='Animal'): + ... print(row) + ... + Animal(Index='dog', num_legs=4, num_wings=0) + Animal(Index='hawk', num_legs=2, num_wings=2) + """ + arrays = [] + fields = list(self.columns) + if index: + arrays.append(self.index) + fields.insert(0, "Index") + + # use integer indexing because of possible duplicate column names + arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) + + if name is not None: + # https://github.com/python/mypy/issues/9046 + # error: namedtuple() expects a string literal as the first argument + itertuple = collections.namedtuple( # type: ignore[misc] + name, fields, rename=True + ) + return map(itertuple._make, zip(*arrays)) + + # fallback to regular tuples + return zip(*arrays) + + def __len__(self) -> int: + """ + Returns length of info axis, but here we use the index. + """ + return len(self.index) + + @overload + def dot(self, other: Series) -> Series: + ... + + @overload + def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: + ... + + def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: + """ + Compute the matrix multiplication between the DataFrame and other. + + This method computes the matrix product between the DataFrame and the + values of an other Series, DataFrame or a numpy array. + + It can also be called using ``self @ other``. + + Parameters + ---------- + other : Series, DataFrame or array-like + The other object to compute the matrix product with. + + Returns + ------- + Series or DataFrame + If other is a Series, return the matrix product between self and + other as a Series. If other is a DataFrame or a numpy.array, return + the matrix product of self and other in a DataFrame of a np.array. + + See Also + -------- + Series.dot: Similar method for Series. + + Notes + ----- + The dimensions of DataFrame and other must be compatible in order to + compute the matrix multiplication. In addition, the column names of + DataFrame and the index of other must contain the same values, as they + will be aligned prior to the multiplication. + + The dot method for Series computes the inner product, instead of the + matrix product here. + + Examples + -------- + Here we multiply a DataFrame with a Series. + + >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) + >>> s = pd.Series([1, 1, 2, 1]) + >>> df.dot(s) + 0 -4 + 1 5 + dtype: int64 + + Here we multiply a DataFrame with another DataFrame. + + >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) + >>> df.dot(other) + 0 1 + 0 1 4 + 1 2 2 + + Note that the dot method give the same result as @ + + >>> df @ other + 0 1 + 0 1 4 + 1 2 2 + + The dot method works also if other is an np.array. + + >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) + >>> df.dot(arr) + 0 1 + 0 1 4 + 1 2 2 + + Note how shuffling of the objects does not change the result. + + >>> s2 = s.reindex([1, 0, 2, 3]) + >>> df.dot(s2) + 0 -4 + 1 5 + dtype: int64 + """ + if isinstance(other, (Series, DataFrame)): + common = self.columns.union(other.index) + if len(common) > len(self.columns) or len(common) > len(other.index): + raise ValueError("matrices are not aligned") + + left = self.reindex(columns=common, copy=False) + right = other.reindex(index=common, copy=False) + lvals = left.values + rvals = right._values + else: + left = self + lvals = self.values + rvals = np.asarray(other) + if lvals.shape[1] != rvals.shape[0]: + raise ValueError( + f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" + ) + + if isinstance(other, DataFrame): + common_type = find_common_type(list(self.dtypes) + list(other.dtypes)) + return self._constructor( + np.dot(lvals, rvals), + index=left.index, + columns=other.columns, + copy=False, + dtype=common_type, + ) + elif isinstance(other, Series): + common_type = find_common_type(list(self.dtypes) + [other.dtypes]) + return self._constructor_sliced( + np.dot(lvals, rvals), index=left.index, copy=False, dtype=common_type + ) + elif isinstance(rvals, (np.ndarray, Index)): + result = np.dot(lvals, rvals) + if result.ndim == 2: + return self._constructor(result, index=left.index, copy=False) + else: + return self._constructor_sliced(result, index=left.index, copy=False) + else: # pragma: no cover + raise TypeError(f"unsupported type: {type(other)}") + + @overload + def __matmul__(self, other: Series) -> Series: + ... + + @overload + def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: + ... + + def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: + """ + Matrix multiplication using binary `@` operator. + """ + return self.dot(other) + + def __rmatmul__(self, other) -> DataFrame: + """ + Matrix multiplication using binary `@` operator. + """ + try: + return self.T.dot(np.transpose(other)).T + except ValueError as err: + if "shape mismatch" not in str(err): + raise + # GH#21581 give exception message for original shapes + msg = f"shapes {np.shape(other)} and {self.shape} not aligned" + raise ValueError(msg) from err + + # ---------------------------------------------------------------------- + # IO methods (to / from other formats) + + @classmethod + def from_dict( + cls, + data: dict, + orient: FromDictOrient = "columns", + dtype: Dtype | None = None, + columns: Axes | None = None, + ) -> DataFrame: + """ + Construct DataFrame from dict of array-like or dicts. + + Creates DataFrame object from dictionary by columns or by index + allowing dtype specification. + + Parameters + ---------- + data : dict + Of the form {field : array-like} or {field : dict}. + orient : {'columns', 'index', 'tight'}, default 'columns' + The "orientation" of the data. If the keys of the passed dict + should be the columns of the resulting DataFrame, pass 'columns' + (default). Otherwise if the keys should be rows, pass 'index'. + If 'tight', assume a dict with keys ['index', 'columns', 'data', + 'index_names', 'column_names']. + + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + + dtype : dtype, default None + Data type to force after DataFrame construction, otherwise infer. + columns : list, default None + Column labels to use when ``orient='index'``. Raises a ValueError + if used with ``orient='columns'`` or ``orient='tight'``. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.from_records : DataFrame from structured ndarray, sequence + of tuples or dicts, or DataFrame. + DataFrame : DataFrame object creation using constructor. + DataFrame.to_dict : Convert the DataFrame to a dictionary. + + Examples + -------- + By default the keys of the dict become the DataFrame columns: + + >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} + >>> pd.DataFrame.from_dict(data) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + + Specify ``orient='index'`` to create the DataFrame using dictionary + keys as rows: + + >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} + >>> pd.DataFrame.from_dict(data, orient='index') + 0 1 2 3 + row_1 3 2 1 0 + row_2 a b c d + + When using the 'index' orientation, the column names can be + specified manually: + + >>> pd.DataFrame.from_dict(data, orient='index', + ... columns=['A', 'B', 'C', 'D']) + A B C D + row_1 3 2 1 0 + row_2 a b c d + + Specify ``orient='tight'`` to create the DataFrame using a 'tight' + format: + + >>> data = {'index': [('a', 'b'), ('a', 'c')], + ... 'columns': [('x', 1), ('y', 2)], + ... 'data': [[1, 3], [2, 4]], + ... 'index_names': ['n1', 'n2'], + ... 'column_names': ['z1', 'z2']} + >>> pd.DataFrame.from_dict(data, orient='tight') + z1 x y + z2 1 2 + n1 n2 + a b 1 3 + c 2 4 + """ + index = None + orient = orient.lower() # type: ignore[assignment] + if orient == "index": + if len(data) > 0: + # TODO speed up Series case + if isinstance(next(iter(data.values())), (Series, dict)): + data = _from_nested_dict(data) + else: + index = list(data.keys()) + # error: Incompatible types in assignment (expression has type + # "List[Any]", variable has type "Dict[Any, Any]") + data = list(data.values()) # type: ignore[assignment] + elif orient in ("columns", "tight"): + if columns is not None: + raise ValueError(f"cannot use columns parameter with orient='{orient}'") + else: # pragma: no cover + raise ValueError( + f"Expected 'index', 'columns' or 'tight' for orient parameter. " + f"Got '{orient}' instead" + ) + + if orient != "tight": + return cls(data, index=index, columns=columns, dtype=dtype) + else: + realdata = data["data"] + + def create_index(indexlist, namelist): + index: Index + if len(namelist) > 1: + index = MultiIndex.from_tuples(indexlist, names=namelist) + else: + index = Index(indexlist, name=namelist[0]) + return index + + index = create_index(data["index"], data["index_names"]) + columns = create_index(data["columns"], data["column_names"]) + return cls(realdata, index=index, columns=columns, dtype=dtype) + + def to_numpy( + self, + dtype: npt.DTypeLike | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert the DataFrame to a NumPy array. + + By default, the dtype of the returned array will be the common NumPy + dtype of all types in the DataFrame. For example, if the dtypes are + ``float16`` and ``float32``, the results dtype will be ``float32``. + This may require copying data and coercing values, which may be + expensive. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to pass to :meth:`numpy.asarray`. + copy : bool, default False + Whether to ensure that the returned value is not a view on + another array. Note that ``copy=False`` does not *ensure* that + ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that + a copy is made, even if not strictly necessary. + na_value : Any, optional + The value to use for missing values. The default value depends + on `dtype` and the dtypes of the DataFrame columns. + + Returns + ------- + numpy.ndarray + + See Also + -------- + Series.to_numpy : Similar method for Series. + + Examples + -------- + >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() + array([[1, 3], + [2, 4]]) + + With heterogeneous data, the lowest common type will have to + be used. + + >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) + >>> df.to_numpy() + array([[1. , 3. ], + [2. , 4.5]]) + + For a mix of numeric and non-numeric types, the output array will + have object dtype. + + >>> df['C'] = pd.date_range('2000', periods=2) + >>> df.to_numpy() + array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], + [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) + """ + if dtype is not None: + dtype = np.dtype(dtype) + result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) + if result.dtype is not dtype: + result = np.array(result, dtype=dtype, copy=False) + + return result + + def _create_data_for_split_and_tight_to_dict( + self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] + ) -> list: + """ + Simple helper method to create data for to ``to_dict(orient="split")`` and + ``to_dict(orient="tight")`` to create the main output data + """ + if are_all_object_dtype_cols: + data = [ + list(map(maybe_box_native, t)) + for t in self.itertuples(index=False, name=None) + ] + else: + data = [list(t) for t in self.itertuples(index=False, name=None)] + if object_dtype_indices: + # If we have object_dtype_cols, apply maybe_box_naive after list + # comprehension for perf + for row in data: + for i in object_dtype_indices: + row[i] = maybe_box_native(row[i]) + return data + + @overload + def to_dict( + self, + orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., + into: type[dict] = ..., + ) -> dict: + ... + + @overload + def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: + ... + + def to_dict( + self, + orient: Literal[ + "dict", "list", "series", "split", "tight", "records", "index" + ] = "dict", + into: type[dict] = dict, + index: bool = True, + ) -> dict | list[dict]: + """ + Convert the DataFrame to a dictionary. + + The type of the key-value pairs can be customized with the parameters + (see below). + + Parameters + ---------- + orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} + Determines the type of the values of the dictionary. + + - 'dict' (default) : dict like {column -> {index -> value}} + - 'list' : dict like {column -> [values]} + - 'series' : dict like {column -> Series(values)} + - 'split' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} + - 'tight' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values], + 'index_names' -> [index.names], 'column_names' -> [column.names]} + - 'records' : list like + [{column -> value}, ... , {column -> value}] + - 'index' : dict like {index -> {column -> value}} + + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + + into : class, default dict + The collections.abc.Mapping subclass used for all Mappings + in the return value. Can be the actual class or an empty + instance of the mapping type you want. If you want a + collections.defaultdict, you must pass it initialized. + + index : bool, default True + Whether to include the index item (and index_names item if `orient` + is 'tight') in the returned dictionary. Can only be ``False`` + when `orient` is 'split' or 'tight'. + + .. versionadded:: 2.0.0 + + Returns + ------- + dict, list or collections.abc.Mapping + Return a collections.abc.Mapping object representing the DataFrame. + The resulting transformation depends on the `orient` parameter. + + See Also + -------- + DataFrame.from_dict: Create a DataFrame from a dictionary. + DataFrame.to_json: Convert a DataFrame to JSON format. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], + ... 'col2': [0.5, 0.75]}, + ... index=['row1', 'row2']) + >>> df + col1 col2 + row1 1 0.50 + row2 2 0.75 + >>> df.to_dict() + {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} + + You can specify the return orientation. + + >>> df.to_dict('series') + {'col1': row1 1 + row2 2 + Name: col1, dtype: int64, + 'col2': row1 0.50 + row2 0.75 + Name: col2, dtype: float64} + + >>> df.to_dict('split') + {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], + 'data': [[1, 0.5], [2, 0.75]]} + + >>> df.to_dict('records') + [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] + + >>> df.to_dict('index') + {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} + + >>> df.to_dict('tight') + {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], + 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} + + You can also specify the mapping type. + + >>> from collections import OrderedDict, defaultdict + >>> df.to_dict(into=OrderedDict) + OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), + ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) + + If you want a `defaultdict`, you need to initialize it: + + >>> dd = defaultdict(list) + >>> df.to_dict('records', into=dd) + [defaultdict(, {'col1': 1, 'col2': 0.5}), + defaultdict(, {'col1': 2, 'col2': 0.75})] + """ + from pandas.core.methods.to_dict import to_dict + + return to_dict(self, orient, into, index) + + def to_gbq( + self, + destination_table: str, + project_id: str | None = None, + chunksize: int | None = None, + reauth: bool = False, + if_exists: ToGbqIfexist = "fail", + auth_local_webserver: bool = True, + table_schema: list[dict[str, str]] | None = None, + location: str | None = None, + progress_bar: bool = True, + credentials=None, + ) -> None: + """ + Write a DataFrame to a Google BigQuery table. + + This function requires the `pandas-gbq package + `__. + + See the `How to authenticate with Google BigQuery + `__ + guide for authentication instructions. + + Parameters + ---------- + destination_table : str + Name of table to be written, in the form ``dataset.tablename``. + project_id : str, optional + Google BigQuery Account project ID. Optional when available from + the environment. + chunksize : int, optional + Number of rows to be inserted in each chunk from the dataframe. + Set to ``None`` to load the whole dataframe at once. + reauth : bool, default False + Force Google BigQuery to re-authenticate the user. This is useful + if multiple accounts are used. + if_exists : str, default 'fail' + Behavior when the destination table exists. Value can be one of: + + ``'fail'`` + If table exists raise pandas_gbq.gbq.TableCreationError. + ``'replace'`` + If table exists, drop it, recreate it, and insert data. + ``'append'`` + If table exists, insert data. Create if does not exist. + auth_local_webserver : bool, default True + Use the `local webserver flow`_ instead of the `console flow`_ + when getting user credentials. + + .. _local webserver flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server + .. _console flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console + + *New in version 0.2.0 of pandas-gbq*. + + .. versionchanged:: 1.5.0 + Default value is changed to ``True``. Google has deprecated the + ``auth_local_webserver = False`` `"out of band" (copy-paste) + flow + `_. + table_schema : list of dicts, optional + List of BigQuery table fields to which according DataFrame + columns conform to, e.g. ``[{'name': 'col1', 'type': + 'STRING'},...]``. If schema is not provided, it will be + generated according to dtypes of DataFrame columns. See + BigQuery API documentation on available names of a field. + + *New in version 0.3.1 of pandas-gbq*. + location : str, optional + Location where the load job should run. See the `BigQuery locations + documentation + `__ for a + list of available locations. The location must match that of the + target dataset. + + *New in version 0.5.0 of pandas-gbq*. + progress_bar : bool, default True + Use the library `tqdm` to show the progress bar for the upload, + chunk by chunk. + + *New in version 0.5.0 of pandas-gbq*. + credentials : google.auth.credentials.Credentials, optional + Credentials for accessing Google APIs. Use this parameter to + override default credentials, such as to use Compute Engine + :class:`google.auth.compute_engine.Credentials` or Service + Account :class:`google.oauth2.service_account.Credentials` + directly. + + *New in version 0.8.0 of pandas-gbq*. + + See Also + -------- + pandas_gbq.to_gbq : This function in the pandas-gbq library. + read_gbq : Read a DataFrame from Google BigQuery. + + Examples + -------- + Example taken from `Google BigQuery documentation + `_ + + >>> project_id = "my-project" + >>> table_id = 'my_dataset.my_table' + >>> df = pd.DataFrame({ + ... "my_string": ["a", "b", "c"], + ... "my_int64": [1, 2, 3], + ... "my_float64": [4.0, 5.0, 6.0], + ... "my_bool1": [True, False, True], + ... "my_bool2": [False, True, False], + ... "my_dates": pd.date_range("now", periods=3), + ... } + ... ) + + >>> df.to_gbq(table_id, project_id=project_id) # doctest: +SKIP + """ + from pandas.io import gbq + + gbq.to_gbq( + self, + destination_table, + project_id=project_id, + chunksize=chunksize, + reauth=reauth, + if_exists=if_exists, + auth_local_webserver=auth_local_webserver, + table_schema=table_schema, + location=location, + progress_bar=progress_bar, + credentials=credentials, + ) + + @classmethod + def from_records( + cls, + data, + index=None, + exclude=None, + columns=None, + coerce_float: bool = False, + nrows: int | None = None, + ) -> DataFrame: + """ + Convert structured or record ndarray to DataFrame. + + Creates a DataFrame object from a structured ndarray, sequence of + tuples or dicts, or DataFrame. + + Parameters + ---------- + data : structured ndarray, sequence of tuples or dicts, or DataFrame + Structured input data. + + .. deprecated:: 2.1.0 + Passing a DataFrame is deprecated. + index : str, list of fields, array-like + Field of array to use as the index, alternately a specific set of + input labels to use. + exclude : sequence, default None + Columns or fields to exclude. + columns : sequence, default None + Column names to use. If the passed data do not have names + associated with them, this argument provides names for the + columns. Otherwise this argument indicates the order of the columns + in the result (any names not found in the data will become all-NA + columns). + coerce_float : bool, default False + Attempt to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point, useful for SQL result sets. + nrows : int, default None + Number of rows to read if data is an iterator. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.from_dict : DataFrame from dict of array-like or dicts. + DataFrame : DataFrame object creation using constructor. + + Examples + -------- + Data can be provided as a structured ndarray: + + >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], + ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) + >>> pd.DataFrame.from_records(data) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + + Data can be provided as a list of dicts: + + >>> data = [{'col_1': 3, 'col_2': 'a'}, + ... {'col_1': 2, 'col_2': 'b'}, + ... {'col_1': 1, 'col_2': 'c'}, + ... {'col_1': 0, 'col_2': 'd'}] + >>> pd.DataFrame.from_records(data) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + + Data can be provided as a list of tuples with corresponding columns: + + >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] + >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) + col_1 col_2 + 0 3 a + 1 2 b + 2 1 c + 3 0 d + """ + if isinstance(data, DataFrame): + warnings.warn( + "Passing a DataFrame to DataFrame.from_records is deprecated. Use " + "set_index and/or drop to modify the DataFrame instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if columns is not None: + if is_scalar(columns): + columns = [columns] + data = data[columns] + if index is not None: + data = data.set_index(index) + if exclude is not None: + data = data.drop(columns=exclude) + return data.copy(deep=False) + + result_index = None + + # Make a copy of the input columns so we can modify it + if columns is not None: + columns = ensure_index(columns) + + def maybe_reorder( + arrays: list[ArrayLike], arr_columns: Index, columns: Index, index + ) -> tuple[list[ArrayLike], Index, Index | None]: + """ + If our desired 'columns' do not match the data's pre-existing 'arr_columns', + we re-order our arrays. This is like a pre-emptive (cheap) reindex. + """ + if len(arrays): + length = len(arrays[0]) + else: + length = 0 + + result_index = None + if len(arrays) == 0 and index is None and length == 0: + result_index = default_index(0) + + arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) + return arrays, arr_columns, result_index + + if is_iterator(data): + if nrows == 0: + return cls() + + try: + first_row = next(data) + except StopIteration: + return cls(index=index, columns=columns) + + dtype = None + if hasattr(first_row, "dtype") and first_row.dtype.names: + dtype = first_row.dtype + + values = [first_row] + + if nrows is None: + values += data + else: + values.extend(itertools.islice(data, nrows - 1)) + + if dtype is not None: + data = np.array(values, dtype=dtype) + else: + data = values + + if isinstance(data, dict): + if columns is None: + columns = arr_columns = ensure_index(sorted(data)) + arrays = [data[k] for k in columns] + else: + arrays = [] + arr_columns_list = [] + for k, v in data.items(): + if k in columns: + arr_columns_list.append(k) + arrays.append(v) + + arr_columns = Index(arr_columns_list) + arrays, arr_columns, result_index = maybe_reorder( + arrays, arr_columns, columns, index + ) + + elif isinstance(data, np.ndarray): + arrays, columns = to_arrays(data, columns) + arr_columns = columns + else: + arrays, arr_columns = to_arrays(data, columns) + if coerce_float: + for i, arr in enumerate(arrays): + if arr.dtype == object: + # error: Argument 1 to "maybe_convert_objects" has + # incompatible type "Union[ExtensionArray, ndarray]"; + # expected "ndarray" + arrays[i] = lib.maybe_convert_objects( + arr, # type: ignore[arg-type] + try_float=True, + ) + + arr_columns = ensure_index(arr_columns) + if columns is None: + columns = arr_columns + else: + arrays, arr_columns, result_index = maybe_reorder( + arrays, arr_columns, columns, index + ) + + if exclude is None: + exclude = set() + else: + exclude = set(exclude) + + if index is not None: + if isinstance(index, str) or not hasattr(index, "__iter__"): + i = columns.get_loc(index) + exclude.add(index) + if len(arrays) > 0: + result_index = Index(arrays[i], name=index) + else: + result_index = Index([], name=index) + else: + try: + index_data = [arrays[arr_columns.get_loc(field)] for field in index] + except (KeyError, TypeError): + # raised by get_loc, see GH#29258 + result_index = index + else: + result_index = ensure_index_from_sequences(index_data, names=index) + exclude.update(index) + + if any(exclude): + arr_exclude = [x for x in exclude if x in arr_columns] + to_remove = [arr_columns.get_loc(col) for col in arr_exclude] + arrays = [v for i, v in enumerate(arrays) if i not in to_remove] + + columns = columns.drop(exclude) + + manager = get_option("mode.data_manager") + mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) + + return cls(mgr) + + def to_records( + self, index: bool = True, column_dtypes=None, index_dtypes=None + ) -> np.rec.recarray: + """ + Convert DataFrame to a NumPy record array. + + Index will be included as the first field of the record array if + requested. + + Parameters + ---------- + index : bool, default True + Include index in resulting record array, stored in 'index' + field or using the index label, if set. + column_dtypes : str, type, dict, default None + If a string or type, the data type to store all columns. If + a dictionary, a mapping of column names and indices (zero-indexed) + to specific data types. + index_dtypes : str, type, dict, default None + If a string or type, the data type to store all index levels. If + a dictionary, a mapping of index level names and indices + (zero-indexed) to specific data types. + + This mapping is applied only if `index=True`. + + Returns + ------- + numpy.rec.recarray + NumPy ndarray with the DataFrame labels as fields and each row + of the DataFrame as entries. + + See Also + -------- + DataFrame.from_records: Convert structured or record ndarray + to DataFrame. + numpy.rec.recarray: An ndarray that allows field access using + attributes, analogous to typed columns in a + spreadsheet. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, + ... index=['a', 'b']) + >>> df + A B + a 1 0.50 + b 2 0.75 + >>> df.to_records() + rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], + dtype=[('index', 'O'), ('A', '>> df.index = df.index.rename("I") + >>> df.to_records() + rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], + dtype=[('I', 'O'), ('A', '>> df.to_records(index=False) + rec.array([(1, 0.5 ), (2, 0.75)], + dtype=[('A', '>> df.to_records(column_dtypes={"A": "int32"}) + rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], + dtype=[('I', 'O'), ('A', '>> df.to_records(index_dtypes=">> index_dtypes = f">> df.to_records(index_dtypes=index_dtypes) + rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], + dtype=[('I', 'S1'), ('A', ' Self: + """ + Create DataFrame from a list of arrays corresponding to the columns. + + Parameters + ---------- + arrays : list-like of arrays + Each array in the list corresponds to one column, in order. + columns : list-like, Index + The column names for the resulting DataFrame. + index : list-like, Index + The rows labels for the resulting DataFrame. + dtype : dtype, optional + Optional dtype to enforce for all arrays. + verify_integrity : bool, default True + Validate and homogenize all input. If set to False, it is assumed + that all elements of `arrays` are actual arrays how they will be + stored in a block (numpy ndarray or ExtensionArray), have the same + length as and are aligned with the index, and that `columns` and + `index` are ensured to be an Index object. + + Returns + ------- + DataFrame + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + + manager = get_option("mode.data_manager") + columns = ensure_index(columns) + if len(columns) != len(arrays): + raise ValueError("len(columns) must match len(arrays)") + mgr = arrays_to_mgr( + arrays, + columns, + index, + dtype=dtype, + verify_integrity=verify_integrity, + typ=manager, + ) + return cls(mgr) + + @doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path", + ) + def to_stata( + self, + path: FilePath | WriteBuffer[bytes], + *, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: ToStataByteorder | None = None, + time_stamp: datetime.datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + version: int | None = 114, + convert_strl: Sequence[Hashable] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + """ + Export DataFrame object to Stata dta format. + + Writes the DataFrame to a Stata dataset file. + "dta" files contain a Stata dataset. + + Parameters + ---------- + path : str, path object, or buffer + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. + + convert_dates : dict + Dictionary mapping columns containing datetime types to stata + internal format to use when writing the dates. Options are 'tc', + 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer + or a name. Datetime columns that do not have a conversion type + specified will be converted to 'tc'. Raises NotImplementedError if + a datetime column has timezone information. + write_index : bool + Write the index to Stata dataset. + byteorder : str + Can be ">", "<", "little", or "big". default is `sys.byteorder`. + time_stamp : datetime + A datetime to use as file creation date. Default is the current + time. + data_label : str, optional + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict + Dictionary containing columns as keys and variable labels as + values. Each label must be 80 characters or smaller. + version : {{114, 117, 118, 119, None}}, default 114 + Version to use in the output dta file. Set to None to let pandas + decide between 118 or 119 formats depending on the number of + columns in the frame. Version 114 can be read by Stata 10 and + later. Version 117 can be read by Stata 13 or later. Version 118 + is supported in Stata 14 and later. Version 119 is supported in + Stata 15 and later. Version 114 limits string variables to 244 + characters or fewer while versions 117 and later allow strings + with lengths up to 2,000,000 characters. Versions 118 and 119 + support Unicode characters, and version 119 supports more than + 32,767 variables. + + Version 119 should usually only be used when the number of + variables exceeds the capacity of dta format 118. Exporting + smaller datasets in format 119 may have unintended consequences, + and, as of November 2020, Stata SE cannot read version 119 files. + + convert_strl : list, optional + List of column names to convert to string columns to Stata StrL + format. Only available if version is 117. Storing strings in the + StrL format can produce smaller dta files if strings have more than + 8 characters and values are repeated. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + .. versionadded:: 1.2.0 + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. Labels for a single variable must be 32,000 + characters or smaller. + + .. versionadded:: 1.4.0 + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + * Column dtype is not representable in Stata + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime.datetime + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + See Also + -------- + read_stata : Import Stata data files. + io.stata.StataWriter : Low-level writer for Stata data files. + io.stata.StataWriter117 : Low-level writer for version 117 files. + + Examples + -------- + >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', + ... 'parrot'], + ... 'speed': [350, 18, 361, 15]}}) + >>> df.to_stata('animals.dta') # doctest: +SKIP + """ + if version not in (114, 117, 118, 119, None): + raise ValueError("Only formats 114, 117, 118 and 119 are supported.") + if version == 114: + if convert_strl is not None: + raise ValueError("strl is not supported in format 114") + from pandas.io.stata import StataWriter as statawriter + elif version == 117: + # Incompatible import of "statawriter" (imported name has type + # "Type[StataWriter117]", local name has type "Type[StataWriter]") + from pandas.io.stata import ( # type: ignore[assignment] + StataWriter117 as statawriter, + ) + else: # versions 118 and 119 + # Incompatible import of "statawriter" (imported name has type + # "Type[StataWriter117]", local name has type "Type[StataWriter]") + from pandas.io.stata import ( # type: ignore[assignment] + StataWriterUTF8 as statawriter, + ) + + kwargs: dict[str, Any] = {} + if version is None or version >= 117: + # strl conversion is only supported >= 117 + kwargs["convert_strl"] = convert_strl + if version is None or version >= 118: + # Specifying the version is only supported for UTF8 (118 or 119) + kwargs["version"] = version + + writer = statawriter( + path, + self, + convert_dates=convert_dates, + byteorder=byteorder, + time_stamp=time_stamp, + data_label=data_label, + write_index=write_index, + variable_labels=variable_labels, + compression=compression, + storage_options=storage_options, + value_labels=value_labels, + **kwargs, + ) + writer.write_file() + + def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: + """ + Write a DataFrame to the binary Feather format. + + Parameters + ---------- + path : str, path object, file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If a string or a path, + it will be used as Root Directory path when writing a partitioned dataset. + **kwargs : + Additional keywords passed to :func:`pyarrow.feather.write_feather`. + This includes the `compression`, `compression_level`, `chunksize` + and `version` keywords. + + Notes + ----- + This function writes the dataframe as a `feather file + `_. Requires a default + index. For saving the DataFrame with your custom index use a method that + supports custom indices e.g. `to_parquet`. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) + >>> df.to_feather("file.feather") # doctest: +SKIP + """ + from pandas.io.feather_format import to_feather + + to_feather(self, path, **kwargs) + + @doc( + Series.to_markdown, + klass=_shared_doc_kwargs["klass"], + storage_options=_shared_docs["storage_options"], + examples="""Examples + -------- + >>> df = pd.DataFrame( + ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} + ... ) + >>> print(df.to_markdown()) + | | animal_1 | animal_2 | + |---:|:-----------|:-----------| + | 0 | elk | dog | + | 1 | pig | quetzal | + + Output markdown with a tabulate option. + + >>> print(df.to_markdown(tablefmt="grid")) + +----+------------+------------+ + | | animal_1 | animal_2 | + +====+============+============+ + | 0 | elk | dog | + +----+------------+------------+ + | 1 | pig | quetzal | + +----+------------+------------+""", + ) + def to_markdown( + self, + buf: FilePath | WriteBuffer[str] | None = None, + mode: str = "wt", + index: bool = True, + storage_options: StorageOptions | None = None, + **kwargs, + ) -> str | None: + if "showindex" in kwargs: + raise ValueError("Pass 'index' instead of 'showindex") + + kwargs.setdefault("headers", "keys") + kwargs.setdefault("tablefmt", "pipe") + kwargs.setdefault("showindex", index) + tabulate = import_optional_dependency("tabulate") + result = tabulate.tabulate(self, **kwargs) + if buf is None: + return result + + with get_handle(buf, mode, storage_options=storage_options) as handles: + handles.handle.write(result) + return None + + @overload + def to_parquet( + self, + path: None = ..., + engine: Literal["auto", "pyarrow", "fastparquet"] = ..., + compression: str | None = ..., + index: bool | None = ..., + partition_cols: list[str] | None = ..., + storage_options: StorageOptions = ..., + **kwargs, + ) -> bytes: + ... + + @overload + def to_parquet( + self, + path: FilePath | WriteBuffer[bytes], + engine: Literal["auto", "pyarrow", "fastparquet"] = ..., + compression: str | None = ..., + index: bool | None = ..., + partition_cols: list[str] | None = ..., + storage_options: StorageOptions = ..., + **kwargs, + ) -> None: + ... + + @doc(storage_options=_shared_docs["storage_options"]) + def to_parquet( + self, + path: FilePath | WriteBuffer[bytes] | None = None, + engine: Literal["auto", "pyarrow", "fastparquet"] = "auto", + compression: str | None = "snappy", + index: bool | None = None, + partition_cols: list[str] | None = None, + storage_options: StorageOptions | None = None, + **kwargs, + ) -> bytes | None: + """ + Write a DataFrame to the binary parquet format. + + This function writes the dataframe as a `parquet file + `_. You can choose different parquet + backends, and have the option of compression. See + :ref:`the user guide ` for more details. + + Parameters + ---------- + path : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If None, the result is + returned as bytes. If a string or path, it will be used as Root Directory + path when writing a partitioned dataset. + + .. versionchanged:: 1.2.0 + + Previously this was "fname" + + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. + compression : str or None, default 'snappy' + Name of the compression to use. Use ``None`` for no compression. + Supported options: 'snappy', 'gzip', 'brotli', 'lz4', 'zstd'. + index : bool, default None + If ``True``, include the dataframe's index(es) in the file output. + If ``False``, they will not be written to the file. + If ``None``, similar to ``True`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + partition_cols : list, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + Must be None if path is not a string. + {storage_options} + + .. versionadded:: 1.2.0 + + **kwargs + Additional arguments passed to the parquet library. See + :ref:`pandas io ` for more details. + + Returns + ------- + bytes if no path argument is provided else None + + See Also + -------- + read_parquet : Read a parquet file. + DataFrame.to_orc : Write an orc file. + DataFrame.to_csv : Write a csv file. + DataFrame.to_sql : Write to a sql table. + DataFrame.to_hdf : Write to hdf. + + Notes + ----- + This function requires either the `fastparquet + `_ or `pyarrow + `_ library. + + Examples + -------- + >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) + >>> df.to_parquet('df.parquet.gzip', + ... compression='gzip') # doctest: +SKIP + >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP + col1 col2 + 0 1 3 + 1 2 4 + + If you want to get a buffer to the parquet content you can use a io.BytesIO + object, as long as you don't use partition_cols, which creates multiple files. + + >>> import io + >>> f = io.BytesIO() + >>> df.to_parquet(f) + >>> f.seek(0) + 0 + >>> content = f.read() + """ + from pandas.io.parquet import to_parquet + + return to_parquet( + self, + path, + engine, + compression=compression, + index=index, + partition_cols=partition_cols, + storage_options=storage_options, + **kwargs, + ) + + def to_orc( + self, + path: FilePath | WriteBuffer[bytes] | None = None, + *, + engine: Literal["pyarrow"] = "pyarrow", + index: bool | None = None, + engine_kwargs: dict[str, Any] | None = None, + ) -> bytes | None: + """ + Write a DataFrame to the ORC format. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + path : str, file-like object or None, default None + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handle + (e.g. via builtin open function). If path is None, + a bytes object is returned. + engine : {'pyarrow'}, default 'pyarrow' + ORC library to use. Pyarrow must be >= 7.0.0. + index : bool, optional + If ``True``, include the dataframe's index(es) in the file output. + If ``False``, they will not be written to the file. + If ``None``, similar to ``infer`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + engine_kwargs : dict[str, Any] or None, default None + Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. + + Returns + ------- + bytes if no path argument is provided else None + + Raises + ------ + NotImplementedError + Dtype of one or more columns is category, unsigned integers, interval, + period or sparse. + ValueError + engine is not pyarrow. + + See Also + -------- + read_orc : Read a ORC file. + DataFrame.to_parquet : Write a parquet file. + DataFrame.to_csv : Write a csv file. + DataFrame.to_sql : Write to a sql table. + DataFrame.to_hdf : Write to hdf. + + Notes + ----- + * Before using this function you should read the :ref:`user guide about + ORC ` and :ref:`install optional dependencies `. + * This function requires `pyarrow `_ + library. + * For supported dtypes please refer to `supported ORC features in Arrow + `__. + * Currently timezones in datetime columns are not preserved when a + dataframe is converted into ORC files. + + Examples + -------- + >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) + >>> df.to_orc('df.orc') # doctest: +SKIP + >>> pd.read_orc('df.orc') # doctest: +SKIP + col1 col2 + 0 1 4 + 1 2 3 + + If you want to get a buffer to the orc content you can write it to io.BytesIO + + >>> import io + >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP + >>> b.seek(0) # doctest: +SKIP + 0 + >>> content = b.read() # doctest: +SKIP + """ + from pandas.io.orc import to_orc + + return to_orc( + self, path, engine=engine, index=index, engine_kwargs=engine_kwargs + ) + + @overload + def to_html( + self, + buf: FilePath | WriteBuffer[str], + columns: Axes | None = ..., + col_space: ColspaceArgType | None = ..., + header: bool = ..., + index: bool = ..., + na_rep: str = ..., + formatters: FormattersType | None = ..., + float_format: FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool | str = ..., + decimal: str = ..., + bold_rows: bool = ..., + classes: str | list | tuple | None = ..., + escape: bool = ..., + notebook: bool = ..., + border: int | bool | None = ..., + table_id: str | None = ..., + render_links: bool = ..., + encoding: str | None = ..., + ) -> None: + ... + + @overload + def to_html( + self, + buf: None = ..., + columns: Axes | None = ..., + col_space: ColspaceArgType | None = ..., + header: bool = ..., + index: bool = ..., + na_rep: str = ..., + formatters: FormattersType | None = ..., + float_format: FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool | str = ..., + decimal: str = ..., + bold_rows: bool = ..., + classes: str | list | tuple | None = ..., + escape: bool = ..., + notebook: bool = ..., + border: int | bool | None = ..., + table_id: str | None = ..., + render_links: bool = ..., + encoding: str | None = ..., + ) -> str: + ... + + @Substitution( + header_type="bool", + header="Whether to print column labels, default True", + col_space_type="str or int, list or dict of int or str", + col_space="The minimum width of each column in CSS length " + "units. An int is assumed to be px units.", + ) + @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) + def to_html( + self, + buf: FilePath | WriteBuffer[str] | None = None, + columns: Axes | None = None, + col_space: ColspaceArgType | None = None, + header: bool = True, + index: bool = True, + na_rep: str = "NaN", + formatters: FormattersType | None = None, + float_format: FloatFormatType | None = None, + sparsify: bool | None = None, + index_names: bool = True, + justify: str | None = None, + max_rows: int | None = None, + max_cols: int | None = None, + show_dimensions: bool | str = False, + decimal: str = ".", + bold_rows: bool = True, + classes: str | list | tuple | None = None, + escape: bool = True, + notebook: bool = False, + border: int | bool | None = None, + table_id: str | None = None, + render_links: bool = False, + encoding: str | None = None, + ) -> str | None: + """ + Render a DataFrame as an HTML table. + %(shared_params)s + bold_rows : bool, default True + Make the row labels bold in the output. + classes : str or list or tuple, default None + CSS class(es) to apply to the resulting html table. + escape : bool, default True + Convert the characters <, >, and & to HTML-safe sequences. + notebook : {True, False}, default False + Whether the generated HTML is for IPython Notebook. + border : int + A ``border=border`` attribute is included in the opening + `` tag. Default ``pd.options.display.html.border``. + table_id : str, optional + A css id is included in the opening `
` tag if specified. + render_links : bool, default False + Convert URLs to HTML links. + encoding : str, default "utf-8" + Set character encoding. + %(returns)s + See Also + -------- + to_string : Convert DataFrame to a string. + + Examples + -------- + >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) + >>> html_string = '''
+ ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ... + ...
col1col2
014
123
''' + >>> assert html_string == df.to_html() + """ + if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: + raise ValueError("Invalid value for justify parameter") + + formatter = fmt.DataFrameFormatter( + self, + columns=columns, + col_space=col_space, + na_rep=na_rep, + header=header, + index=index, + formatters=formatters, + float_format=float_format, + bold_rows=bold_rows, + sparsify=sparsify, + justify=justify, + index_names=index_names, + escape=escape, + decimal=decimal, + max_rows=max_rows, + max_cols=max_cols, + show_dimensions=show_dimensions, + ) + # TODO: a generic formatter wld b in DataFrameFormatter + return fmt.DataFrameRenderer(formatter).to_html( + buf=buf, + classes=classes, + notebook=notebook, + border=border, + encoding=encoding, + table_id=table_id, + render_links=render_links, + ) + + @doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path_or_buffer", + ) + def to_xml( + self, + path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + index: bool = True, + root_name: str | None = "data", + row_name: str | None = "row", + na_rep: str | None = None, + attr_cols: list[str] | None = None, + elem_cols: list[str] | None = None, + namespaces: dict[str | None, str] | None = None, + prefix: str | None = None, + encoding: str = "utf-8", + xml_declaration: bool | None = True, + pretty_print: bool | None = True, + parser: XMLParsers | None = "lxml", + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + ) -> str | None: + """ + Render a DataFrame to an XML document. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a ``write()`` function. If None, the result is returned + as a string. + index : bool, default True + Whether to include index in XML document. + root_name : str, default 'data' + The name of root element in XML document. + row_name : str, default 'row' + The name of row element in XML document. + na_rep : str, optional + Missing data representation. + attr_cols : list-like, optional + List of columns to write as attributes in row element. + Hierarchical columns will be flattened with underscore + delimiting the different levels. + elem_cols : list-like, optional + List of columns to write as children in row element. By default, + all columns output as children of row element. Hierarchical + columns will be flattened with underscore delimiting the + different levels. + namespaces : dict, optional + All namespaces to be defined in root element. Keys of dict + should be prefix names and values of dict corresponding URIs. + Default namespaces should be given empty string key. For + example, :: + + namespaces = {{"": "https://example.com"}} + + prefix : str, optional + Namespace prefix to be used for every element and/or attribute + in document. This should be one of the keys in ``namespaces`` + dict. + encoding : str, default 'utf-8' + Encoding of the resulting document. + xml_declaration : bool, default True + Whether to include the XML declaration at start of document. + pretty_print : bool, default True + Whether output should be pretty printed with indentation and + line breaks. + parser : {{'lxml','etree'}}, default 'lxml' + Parser module to use for building of tree. Only 'lxml' and + 'etree' are supported. With 'lxml', the ability to use XSLT + stylesheet is supported. + stylesheet : str, path object or file-like object, optional + A URL, file-like object, or a raw string containing an XSLT + script used to transform the raw XML output. Script should use + layout of elements and attributes from original output. This + argument requires ``lxml`` to be installed. Only XSLT 1.0 + scripts and not later versions is currently supported. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + Returns + ------- + None or str + If ``io`` is None, returns the resulting XML format as a + string. Otherwise returns None. + + See Also + -------- + to_json : Convert the pandas object to a JSON string. + to_html : Convert DataFrame to a html. + + Examples + -------- + >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], + ... 'degrees': [360, 360, 180], + ... 'sides': [4, np.nan, 3]}}) + + >>> df.to_xml() # doctest: +SKIP + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + + + + >>> df.to_xml(attr_cols=[ + ... 'index', 'shape', 'degrees', 'sides' + ... ]) # doctest: +SKIP + + + + + + + + >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, + ... prefix="doc") # doctest: +SKIP + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + + + """ + + from pandas.io.formats.xml import ( + EtreeXMLFormatter, + LxmlXMLFormatter, + ) + + lxml = import_optional_dependency("lxml.etree", errors="ignore") + + TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] + + if parser == "lxml": + if lxml is not None: + TreeBuilder = LxmlXMLFormatter + else: + raise ImportError( + "lxml not found, please install or use the etree parser." + ) + + elif parser == "etree": + TreeBuilder = EtreeXMLFormatter + + else: + raise ValueError("Values for parser can only be lxml or etree.") + + xml_formatter = TreeBuilder( + self, + path_or_buffer=path_or_buffer, + index=index, + root_name=root_name, + row_name=row_name, + na_rep=na_rep, + attr_cols=attr_cols, + elem_cols=elem_cols, + namespaces=namespaces, + prefix=prefix, + encoding=encoding, + xml_declaration=xml_declaration, + pretty_print=pretty_print, + stylesheet=stylesheet, + compression=compression, + storage_options=storage_options, + ) + + return xml_formatter.write_output() + + # ---------------------------------------------------------------------- + @doc(INFO_DOCSTRING, **frame_sub_kwargs) + def info( + self, + verbose: bool | None = None, + buf: WriteBuffer[str] | None = None, + max_cols: int | None = None, + memory_usage: bool | str | None = None, + show_counts: bool | None = None, + ) -> None: + info = DataFrameInfo( + data=self, + memory_usage=memory_usage, + ) + info.render( + buf=buf, + max_cols=max_cols, + verbose=verbose, + show_counts=show_counts, + ) + + def memory_usage(self, index: bool = True, deep: bool = False) -> Series: + """ + Return the memory usage of each column in bytes. + + The memory usage can optionally include the contribution of + the index and elements of `object` dtype. + + This value is displayed in `DataFrame.info` by default. This can be + suppressed by setting ``pandas.options.display.memory_usage`` to False. + + Parameters + ---------- + index : bool, default True + Specifies whether to include the memory usage of the DataFrame's + index in returned Series. If ``index=True``, the memory usage of + the index is the first item in the output. + deep : bool, default False + If True, introspect the data deeply by interrogating + `object` dtypes for system-level memory consumption, and include + it in the returned values. + + Returns + ------- + Series + A Series whose index is the original column names and whose values + is the memory usage of each column in bytes. + + See Also + -------- + numpy.ndarray.nbytes : Total bytes consumed by the elements of an + ndarray. + Series.memory_usage : Bytes consumed by a Series. + Categorical : Memory-efficient array for string values with + many repeated values. + DataFrame.info : Concise summary of a DataFrame. + + Notes + ----- + See the :ref:`Frequently Asked Questions ` for more + details. + + Examples + -------- + >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] + >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) + ... for t in dtypes]) + >>> df = pd.DataFrame(data) + >>> df.head() + int64 float64 complex128 object bool + 0 1 1.0 1.0+0.0j 1 True + 1 1 1.0 1.0+0.0j 1 True + 2 1 1.0 1.0+0.0j 1 True + 3 1 1.0 1.0+0.0j 1 True + 4 1 1.0 1.0+0.0j 1 True + + >>> df.memory_usage() + Index 128 + int64 40000 + float64 40000 + complex128 80000 + object 40000 + bool 5000 + dtype: int64 + + >>> df.memory_usage(index=False) + int64 40000 + float64 40000 + complex128 80000 + object 40000 + bool 5000 + dtype: int64 + + The memory footprint of `object` dtype columns is ignored by default: + + >>> df.memory_usage(deep=True) + Index 128 + int64 40000 + float64 40000 + complex128 80000 + object 180000 + bool 5000 + dtype: int64 + + Use a Categorical for efficient storage of an object-dtype column with + many repeated values. + + >>> df['object'].astype('category').memory_usage(deep=True) + 5244 + """ + result = self._constructor_sliced( + [c.memory_usage(index=False, deep=deep) for col, c in self.items()], + index=self.columns, + dtype=np.intp, + ) + if index: + index_memory_usage = self._constructor_sliced( + self.index.memory_usage(deep=deep), index=["Index"] + ) + result = index_memory_usage._append(result) + return result + + def transpose(self, *args, copy: bool = False) -> DataFrame: + """ + Transpose index and columns. + + Reflect the DataFrame over its main diagonal by writing rows as columns + and vice-versa. The property :attr:`.T` is an accessor to the method + :meth:`transpose`. + + Parameters + ---------- + *args : tuple, optional + Accepted for compatibility with NumPy. + copy : bool, default False + Whether to copy the data after transposing, even for DataFrames + with a single dtype. + + Note that a copy is always required for mixed dtype DataFrames, + or for DataFrames with any extension types. + + Returns + ------- + DataFrame + The transposed DataFrame. + + See Also + -------- + numpy.transpose : Permute the dimensions of a given array. + + Notes + ----- + Transposing a DataFrame with mixed dtypes will result in a homogeneous + DataFrame with the `object` dtype. In such a case, a copy of the data + is always made. + + Examples + -------- + **Square DataFrame with homogeneous dtype** + + >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} + >>> df1 = pd.DataFrame(data=d1) + >>> df1 + col1 col2 + 0 1 3 + 1 2 4 + + >>> df1_transposed = df1.T # or df1.transpose() + >>> df1_transposed + 0 1 + col1 1 2 + col2 3 4 + + When the dtype is homogeneous in the original DataFrame, we get a + transposed DataFrame with the same dtype: + + >>> df1.dtypes + col1 int64 + col2 int64 + dtype: object + >>> df1_transposed.dtypes + 0 int64 + 1 int64 + dtype: object + + **Non-square DataFrame with mixed dtypes** + + >>> d2 = {'name': ['Alice', 'Bob'], + ... 'score': [9.5, 8], + ... 'employed': [False, True], + ... 'kids': [0, 0]} + >>> df2 = pd.DataFrame(data=d2) + >>> df2 + name score employed kids + 0 Alice 9.5 False 0 + 1 Bob 8.0 True 0 + + >>> df2_transposed = df2.T # or df2.transpose() + >>> df2_transposed + 0 1 + name Alice Bob + score 9.5 8.0 + employed False True + kids 0 0 + + When the DataFrame has mixed dtypes, we get a transposed DataFrame with + the `object` dtype: + + >>> df2.dtypes + name object + score float64 + employed bool + kids int64 + dtype: object + >>> df2_transposed.dtypes + 0 object + 1 object + dtype: object + """ + nv.validate_transpose(args, {}) + # construct the args + + dtypes = list(self.dtypes) + + if self._can_fast_transpose: + # Note: tests pass without this, but this improves perf quite a bit. + new_vals = self._values.T + if copy and not using_copy_on_write(): + new_vals = new_vals.copy() + + result = self._constructor( + new_vals, + index=self.columns, + columns=self.index, + copy=False, + dtype=new_vals.dtype, + ) + if using_copy_on_write() and len(self) > 0: + result._mgr.add_references(self._mgr) # type: ignore[arg-type] + + elif ( + self._is_homogeneous_type + and dtypes + and isinstance(dtypes[0], ExtensionDtype) + ): + new_values: list + if isinstance(dtypes[0], BaseMaskedDtype): + # We have masked arrays with the same dtype. We can transpose faster. + from pandas.core.arrays.masked import ( + transpose_homogeneous_masked_arrays, + ) + + new_values = transpose_homogeneous_masked_arrays( + cast(Sequence[BaseMaskedArray], self._iter_column_arrays()) + ) + elif isinstance(dtypes[0], ArrowDtype): + # We have arrow EAs with the same dtype. We can transpose faster. + from pandas.core.arrays.arrow.array import ( + ArrowExtensionArray, + transpose_homogeneous_pyarrow, + ) + + new_values = transpose_homogeneous_pyarrow( + cast(Sequence[ArrowExtensionArray], self._iter_column_arrays()) + ) + else: + # We have other EAs with the same dtype. We preserve dtype in transpose. + dtyp = dtypes[0] + arr_typ = dtyp.construct_array_type() + values = self.values + new_values = [arr_typ._from_sequence(row, dtype=dtyp) for row in values] + + result = type(self)._from_arrays( + new_values, + index=self.columns, + columns=self.index, + verify_integrity=False, + ) + + else: + new_arr = self.values.T + if copy and not using_copy_on_write(): + new_arr = new_arr.copy() + result = self._constructor( + new_arr, + index=self.columns, + columns=self.index, + dtype=new_arr.dtype, + # We already made a copy (more than one block) + copy=False, + ) + + return result.__finalize__(self, method="transpose") + + @property + def T(self) -> DataFrame: + """ + The transpose of the DataFrame. + + Returns + ------- + DataFrame + The transposed DataFrame. + + See Also + -------- + DataFrame.transpose : Transpose index and columns. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df + col1 col2 + 0 1 3 + 1 2 4 + + >>> df.T + 0 1 + col1 1 2 + col2 3 4 + """ + return self.transpose() + + # ---------------------------------------------------------------------- + # Indexing Methods + + def _ixs(self, i: int, axis: AxisInt = 0) -> Series: + """ + Parameters + ---------- + i : int + axis : int + + Returns + ------- + Series + """ + # irow + if axis == 0: + new_mgr = self._mgr.fast_xs(i) + + # if we are a copy, mark as such + copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None + result = self._constructor_sliced_from_mgr(new_mgr, axes=new_mgr.axes) + result._name = self.index[i] + result = result.__finalize__(self) + result._set_is_copy(self, copy=copy) + return result + + # icol + else: + label = self.columns[i] + + col_mgr = self._mgr.iget(i) + result = self._box_col_values(col_mgr, i) + + # this is a cached value, mark it so + result._set_as_cached(label, self) + return result + + def _get_column_array(self, i: int) -> ArrayLike: + """ + Get the values of the i'th column (ndarray or ExtensionArray, as stored + in the Block) + + Warning! The returned array is a view but doesn't handle Copy-on-Write, + so this should be used with caution (for read-only purposes). + """ + return self._mgr.iget_values(i) + + def _iter_column_arrays(self) -> Iterator[ArrayLike]: + """ + Iterate over the arrays of all columns in order. + This returns the values as stored in the Block (ndarray or ExtensionArray). + + Warning! The returned array is a view but doesn't handle Copy-on-Write, + so this should be used with caution (for read-only purposes). + """ + if isinstance(self._mgr, ArrayManager): + yield from self._mgr.arrays + else: + for i in range(len(self.columns)): + yield self._get_column_array(i) + + def _getitem_nocopy(self, key: list): + """ + Behaves like __getitem__, but returns a view in cases where __getitem__ + would make a copy. + """ + # TODO(CoW): can be removed if/when we are always Copy-on-Write + indexer = self.columns._get_indexer_strict(key, "columns")[1] + new_axis = self.columns[indexer] + + new_mgr = self._mgr.reindex_indexer( + new_axis, + indexer, + axis=0, + allow_dups=True, + copy=False, + only_slice=True, + ) + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + + def __getitem__(self, key): + check_dict_or_set_indexers(key) + key = lib.item_from_zerodim(key) + key = com.apply_if_callable(key, self) + + if is_hashable(key) and not is_iterator(key): + # is_iterator to exclude generator e.g. test_getitem_listlike + # shortcut if the key is in columns + is_mi = isinstance(self.columns, MultiIndex) + # GH#45316 Return view if key is not duplicated + # Only use drop_duplicates with duplicates for performance + if not is_mi and ( + self.columns.is_unique + and key in self.columns + or key in self.columns.drop_duplicates(keep=False) + ): + return self._get_item_cache(key) + + elif is_mi and self.columns.is_unique and key in self.columns: + return self._getitem_multilevel(key) + + # Do we have a slicer (on rows)? + if isinstance(key, slice): + return self._getitem_slice(key) + + # Do we have a (boolean) DataFrame? + if isinstance(key, DataFrame): + return self.where(key) + + # Do we have a (boolean) 1d indexer? + if com.is_bool_indexer(key): + return self._getitem_bool_array(key) + + # We are left with two options: a single key, and a collection of keys, + # We interpret tuples as collections only for non-MultiIndex + is_single_key = isinstance(key, tuple) or not is_list_like(key) + + if is_single_key: + if self.columns.nlevels > 1: + return self._getitem_multilevel(key) + indexer = self.columns.get_loc(key) + if is_integer(indexer): + indexer = [indexer] + else: + if is_iterator(key): + key = list(key) + indexer = self.columns._get_indexer_strict(key, "columns")[1] + + # take() does not accept boolean indexers + if getattr(indexer, "dtype", None) == bool: + indexer = np.where(indexer)[0] + + if isinstance(indexer, slice): + return self._slice(indexer, axis=1) + + data = self._take_with_is_copy(indexer, axis=1) + + if is_single_key: + # What does looking for a single key in a non-unique index return? + # The behavior is inconsistent. It returns a Series, except when + # - the key itself is repeated (test on data.shape, #9519), or + # - we have a MultiIndex on columns (test on self.columns, #21309) + if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): + # GH#26490 using data[key] can cause RecursionError + return data._get_item_cache(key) + + return data + + def _getitem_bool_array(self, key): + # also raises Exception if object array with NA values + # warning here just in case -- previously __setitem__ was + # reindexing but __getitem__ was not; it seems more reasonable to + # go with the __setitem__ behavior since that is more consistent + # with all other indexing behavior + if isinstance(key, Series) and not key.index.equals(self.index): + warnings.warn( + "Boolean Series key will be reindexed to match DataFrame index.", + UserWarning, + stacklevel=find_stack_level(), + ) + elif len(key) != len(self.index): + raise ValueError( + f"Item wrong length {len(key)} instead of {len(self.index)}." + ) + + # check_bool_indexer will throw exception if Series key cannot + # be reindexed to match DataFrame rows + key = check_bool_indexer(self.index, key) + + if key.all(): + return self.copy(deep=None) + + indexer = key.nonzero()[0] + return self._take_with_is_copy(indexer, axis=0) + + def _getitem_multilevel(self, key): + # self.columns is a MultiIndex + loc = self.columns.get_loc(key) + if isinstance(loc, (slice, np.ndarray)): + new_columns = self.columns[loc] + result_columns = maybe_droplevels(new_columns, key) + result = self.iloc[:, loc] + result.columns = result_columns + + # If there is only one column being returned, and its name is + # either an empty string, or a tuple with an empty string as its + # first element, then treat the empty string as a placeholder + # and return the column as if the user had provided that empty + # string in the key. If the result is a Series, exclude the + # implied empty string from its name. + if len(result.columns) == 1: + # e.g. test_frame_getitem_multicolumn_empty_level, + # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice + top = result.columns[0] + if isinstance(top, tuple): + top = top[0] + if top == "": + result = result[""] + if isinstance(result, Series): + result = self._constructor_sliced( + result, index=self.index, name=key + ) + + result._set_is_copy(self) + return result + else: + # loc is neither a slice nor ndarray, so must be an int + return self._ixs(loc, axis=1) + + def _get_value(self, index, col, takeable: bool = False) -> Scalar: + """ + Quickly retrieve single value at passed column and index. + + Parameters + ---------- + index : row label + col : column label + takeable : interpret the index/col as indexers, default False + + Returns + ------- + scalar + + Notes + ----- + Assumes that both `self.index._index_as_unique` and + `self.columns._index_as_unique`; Caller is responsible for checking. + """ + if takeable: + series = self._ixs(col, axis=1) + return series._values[index] + + series = self._get_item_cache(col) + engine = self.index._engine + + if not isinstance(self.index, MultiIndex): + # CategoricalIndex: Trying to use the engine fastpath may give incorrect + # results if our categories are integers that dont match our codes + # IntervalIndex: IntervalTree has no get_loc + row = self.index.get_loc(index) + return series._values[row] + + # For MultiIndex going through engine effectively restricts us to + # same-length tuples; see test_get_set_value_no_partial_indexing + loc = engine.get_loc(index) + return series._values[loc] + + def isetitem(self, loc, value) -> None: + """ + Set the given value in the column with position `loc`. + + This is a positional analogue to ``__setitem__``. + + Parameters + ---------- + loc : int or sequence of ints + Index position for the column. + value : scalar or arraylike + Value(s) for the column. + + Notes + ----- + ``frame.isetitem(loc, value)`` is an in-place method as it will + modify the DataFrame in place (not returning a new object). In contrast to + ``frame.iloc[:, i] = value`` which will try to update the existing values in + place, ``frame.isetitem(loc, value)`` will not update the values of the column + itself in place, it will instead insert a new array. + + In cases where ``frame.columns`` is unique, this is equivalent to + ``frame[frame.columns[i]] = value``. + """ + if isinstance(value, DataFrame): + if is_integer(loc): + loc = [loc] + + if len(loc) != len(value.columns): + raise ValueError( + f"Got {len(loc)} positions but value has {len(value.columns)} " + f"columns." + ) + + for i, idx in enumerate(loc): + arraylike, refs = self._sanitize_column(value.iloc[:, i]) + self._iset_item_mgr(idx, arraylike, inplace=False, refs=refs) + return + + arraylike, refs = self._sanitize_column(value) + self._iset_item_mgr(loc, arraylike, inplace=False, refs=refs) + + def __setitem__(self, key, value) -> None: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= 3: + warnings.warn( + _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 + ) + + key = com.apply_if_callable(key, self) + + # see if we can slice the rows + if isinstance(key, slice): + slc = self.index._convert_slice_indexer(key, kind="getitem") + return self._setitem_slice(slc, value) + + if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: + self._setitem_frame(key, value) + elif isinstance(key, (Series, np.ndarray, list, Index)): + self._setitem_array(key, value) + elif isinstance(value, DataFrame): + self._set_item_frame_value(key, value) + elif ( + is_list_like(value) + and not self.columns.is_unique + and 1 < len(self.columns.get_indexer_for([key])) == len(value) + ): + # Column to set is duplicated + self._setitem_array([key], value) + else: + # set column + self._set_item(key, value) + + def _setitem_slice(self, key: slice, value) -> None: + # NB: we can't just use self.loc[key] = value because that + # operates on labels and we need to operate positional for + # backwards-compat, xref GH#31469 + self._check_setitem_copy() + self.iloc[key] = value + + def _setitem_array(self, key, value): + # also raises Exception if object array with NA values + if com.is_bool_indexer(key): + # bool indexer is indexing along rows + if len(key) != len(self.index): + raise ValueError( + f"Item wrong length {len(key)} instead of {len(self.index)}!" + ) + key = check_bool_indexer(self.index, key) + indexer = key.nonzero()[0] + self._check_setitem_copy() + if isinstance(value, DataFrame): + # GH#39931 reindex since iloc does not align + value = value.reindex(self.index.take(indexer)) + self.iloc[indexer] = value + + else: + # Note: unlike self.iloc[:, indexer] = value, this will + # never try to overwrite values inplace + + if isinstance(value, DataFrame): + check_key_length(self.columns, key, value) + for k1, k2 in zip(key, value.columns): + self[k1] = value[k2] + + elif not is_list_like(value): + for col in key: + self[col] = value + + elif isinstance(value, np.ndarray) and value.ndim == 2: + self._iset_not_inplace(key, value) + + elif np.ndim(value) > 1: + # list of lists + value = DataFrame(value).values + return self._setitem_array(key, value) + + else: + self._iset_not_inplace(key, value) + + def _iset_not_inplace(self, key, value): + # GH#39510 when setting with df[key] = obj with a list-like key and + # list-like value, we iterate over those listlikes and set columns + # one at a time. This is different from dispatching to + # `self.loc[:, key]= value` because loc.__setitem__ may overwrite + # data inplace, whereas this will insert new arrays. + + def igetitem(obj, i: int): + # Note: we catch DataFrame obj before getting here, but + # hypothetically would return obj.iloc[:, i] + if isinstance(obj, np.ndarray): + return obj[..., i] + else: + return obj[i] + + if self.columns.is_unique: + if np.shape(value)[-1] != len(key): + raise ValueError("Columns must be same length as key") + + for i, col in enumerate(key): + self[col] = igetitem(value, i) + + else: + ilocs = self.columns.get_indexer_non_unique(key)[0] + if (ilocs < 0).any(): + # key entries not in self.columns + raise NotImplementedError + + if np.shape(value)[-1] != len(ilocs): + raise ValueError("Columns must be same length as key") + + assert np.ndim(value) <= 2 + + orig_columns = self.columns + + # Using self.iloc[:, i] = ... may set values inplace, which + # by convention we do not do in __setitem__ + try: + self.columns = Index(range(len(self.columns))) + for i, iloc in enumerate(ilocs): + self[iloc] = igetitem(value, i) + finally: + self.columns = orig_columns + + def _setitem_frame(self, key, value): + # support boolean setting with DataFrame input, e.g. + # df[df > df2] = 0 + if isinstance(key, np.ndarray): + if key.shape != self.shape: + raise ValueError("Array conditional must be same shape as self") + key = self._constructor(key, **self._construct_axes_dict(), copy=False) + + if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): + raise TypeError( + "Must pass DataFrame or 2-d ndarray with boolean values only" + ) + + self._check_setitem_copy() + self._where(-key, value, inplace=True) + + def _set_item_frame_value(self, key, value: DataFrame) -> None: + self._ensure_valid_index(value) + + # align columns + if key in self.columns: + loc = self.columns.get_loc(key) + cols = self.columns[loc] + len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) + if len_cols != len(value.columns): + raise ValueError("Columns must be same length as key") + + # align right-hand-side columns if self.columns + # is multi-index and self[key] is a sub-frame + if isinstance(self.columns, MultiIndex) and isinstance( + loc, (slice, Series, np.ndarray, Index) + ): + cols_droplevel = maybe_droplevels(cols, key) + if len(cols_droplevel) and not cols_droplevel.equals(value.columns): + value = value.reindex(cols_droplevel, axis=1) + + for col, col_droplevel in zip(cols, cols_droplevel): + self[col] = value[col_droplevel] + return + + if is_scalar(cols): + self[cols] = value[value.columns[0]] + return + + locs: np.ndarray | list + if isinstance(loc, slice): + locs = np.arange(loc.start, loc.stop, loc.step) + elif is_scalar(loc): + locs = [loc] + else: + locs = loc.nonzero()[0] + + return self.isetitem(locs, value) + + if len(value.columns) != 1: + raise ValueError( + "Cannot set a DataFrame with multiple columns to the single " + f"column {key}" + ) + + self[key] = value[value.columns[0]] + + def _iset_item_mgr( + self, + loc: int | slice | np.ndarray, + value, + inplace: bool = False, + refs: BlockValuesRefs | None = None, + ) -> None: + # when called from _set_item_mgr loc can be anything returned from get_loc + self._mgr.iset(loc, value, inplace=inplace, refs=refs) + self._clear_item_cache() + + def _set_item_mgr( + self, key, value: ArrayLike, refs: BlockValuesRefs | None = None + ) -> None: + try: + loc = self._info_axis.get_loc(key) + except KeyError: + # This item wasn't present, just insert at end + self._mgr.insert(len(self._info_axis), key, value, refs) + else: + self._iset_item_mgr(loc, value, refs=refs) + + # check if we are modifying a copy + # try to set first as we want an invalid + # value exception to occur first + if len(self): + self._check_setitem_copy() + + def _iset_item(self, loc: int, value: Series, inplace: bool = True) -> None: + # We are only called from _replace_columnwise which guarantees that + # no reindex is necessary + if using_copy_on_write(): + self._iset_item_mgr( + loc, value._values, inplace=inplace, refs=value._references + ) + else: + self._iset_item_mgr(loc, value._values.copy(), inplace=True) + + # check if we are modifying a copy + # try to set first as we want an invalid + # value exception to occur first + if len(self): + self._check_setitem_copy() + + def _set_item(self, key, value) -> None: + """ + Add series to DataFrame in specified column. + + If series is a numpy-array (not a Series/TimeSeries), it must be the + same length as the DataFrames index or an error will be thrown. + + Series/TimeSeries will be conformed to the DataFrames index to + ensure homogeneity. + """ + value, refs = self._sanitize_column(value) + + if ( + key in self.columns + and value.ndim == 1 + and not isinstance(value.dtype, ExtensionDtype) + ): + # broadcast across multiple columns if necessary + if not self.columns.is_unique or isinstance(self.columns, MultiIndex): + existing_piece = self[key] + if isinstance(existing_piece, DataFrame): + value = np.tile(value, (len(existing_piece.columns), 1)).T + refs = None + + self._set_item_mgr(key, value, refs) + + def _set_value( + self, index: IndexLabel, col, value: Scalar, takeable: bool = False + ) -> None: + """ + Put single value at passed column and index. + + Parameters + ---------- + index : Label + row label + col : Label + column label + value : scalar + takeable : bool, default False + Sets whether or not index/col interpreted as indexers + """ + try: + if takeable: + icol = col + iindex = cast(int, index) + else: + icol = self.columns.get_loc(col) + iindex = self.index.get_loc(index) + self._mgr.column_setitem(icol, iindex, value, inplace_only=True) + self._clear_item_cache() + + except (KeyError, TypeError, ValueError, LossySetitemError): + # get_loc might raise a KeyError for missing labels (falling back + # to (i)loc will do expansion of the index) + # column_setitem will do validation that may raise TypeError, + # ValueError, or LossySetitemError + # set using a non-recursive method & reset the cache + if takeable: + self.iloc[index, col] = value + else: + self.loc[index, col] = value + self._item_cache.pop(col, None) + + except InvalidIndexError as ii_err: + # GH48729: Seems like you are trying to assign a value to a + # row when only scalar options are permitted + raise InvalidIndexError( + f"You can only assign a scalar value not a {type(value)}" + ) from ii_err + + def _ensure_valid_index(self, value) -> None: + """ + Ensure that if we don't have an index, that we can create one from the + passed value. + """ + # GH5632, make sure that we are a Series convertible + if not len(self.index) and is_list_like(value) and len(value): + if not isinstance(value, DataFrame): + try: + value = Series(value) + except (ValueError, NotImplementedError, TypeError) as err: + raise ValueError( + "Cannot set a frame with no defined index " + "and a value that cannot be converted to a Series" + ) from err + + # GH31368 preserve name of index + index_copy = value.index.copy() + if self.index.name is not None: + index_copy.name = self.index.name + + self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) + + def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: + """ + Provide boxed values for a column. + """ + # Lookup in columns so that if e.g. a str datetime was passed + # we attach the Timestamp object as the name. + name = self.columns[loc] + # We get index=self.index bc values is a SingleDataManager + obj = self._constructor_sliced_from_mgr(values, axes=values.axes) + obj._name = name + return obj.__finalize__(self) + + # ---------------------------------------------------------------------- + # Lookup Caching + + def _clear_item_cache(self) -> None: + self._item_cache.clear() + + def _get_item_cache(self, item: Hashable) -> Series: + """Return the cached item, item represents a label indexer.""" + if using_copy_on_write(): + loc = self.columns.get_loc(item) + return self._ixs(loc, axis=1) + + cache = self._item_cache + res = cache.get(item) + if res is None: + # All places that call _get_item_cache have unique columns, + # pending resolution of GH#33047 + + loc = self.columns.get_loc(item) + res = self._ixs(loc, axis=1) + + cache[item] = res + + # for a chain + res._is_copy = self._is_copy + return res + + def _reset_cacher(self) -> None: + # no-op for DataFrame + pass + + def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: + """ + The object has called back to us saying maybe it has changed. + """ + loc = self._info_axis.get_loc(item) + arraylike = value._values + + old = self._ixs(loc, axis=1) + if old._values is value._values and inplace: + # GH#46149 avoid making unnecessary copies/block-splitting + return + + self._mgr.iset(loc, arraylike, inplace=inplace) + + # ---------------------------------------------------------------------- + # Unsorted + + @overload + def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: + ... + + @overload + def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: + ... + + @overload + def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: + ... + + def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: + """ + Query the columns of a DataFrame with a boolean expression. + + Parameters + ---------- + expr : str + The query string to evaluate. + + You can refer to variables + in the environment by prefixing them with an '@' character like + ``@a + b``. + + You can refer to column names that are not valid Python variable names + by surrounding them in backticks. Thus, column names containing spaces + or punctuations (besides underscores) or starting with digits must be + surrounded by backticks. (For example, a column named "Area (cm^2)" would + be referenced as ```Area (cm^2)```). Column names which are Python keywords + (like "list", "for", "import", etc) cannot be used. + + For example, if one of your columns is called ``a a`` and you want + to sum it with ``b``, your query should be ```a a` + b``. + + inplace : bool + Whether to modify the DataFrame rather than creating a new one. + **kwargs + See the documentation for :func:`eval` for complete details + on the keyword arguments accepted by :meth:`DataFrame.query`. + + Returns + ------- + DataFrame or None + DataFrame resulting from the provided query expression or + None if ``inplace=True``. + + See Also + -------- + eval : Evaluate a string describing operations on + DataFrame columns. + DataFrame.eval : Evaluate a string describing operations on + DataFrame columns. + + Notes + ----- + The result of the evaluation of this expression is first passed to + :attr:`DataFrame.loc` and if that fails because of a + multidimensional key (e.g., a DataFrame) then the result will be passed + to :meth:`DataFrame.__getitem__`. + + This method uses the top-level :func:`eval` function to + evaluate the passed query. + + The :meth:`~pandas.DataFrame.query` method uses a slightly + modified Python syntax by default. For example, the ``&`` and ``|`` + (bitwise) operators have the precedence of their boolean cousins, + :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, + however the semantics are different. + + You can change the semantics of the expression by passing the keyword + argument ``parser='python'``. This enforces the same semantics as + evaluation in Python space. Likewise, you can pass ``engine='python'`` + to evaluate an expression using Python itself as a backend. This is not + recommended as it is inefficient compared to using ``numexpr`` as the + engine. + + The :attr:`DataFrame.index` and + :attr:`DataFrame.columns` attributes of the + :class:`~pandas.DataFrame` instance are placed in the query namespace + by default, which allows you to treat both the index and columns of the + frame as a column in the frame. + The identifier ``index`` is used for the frame index; you can also + use the name of the index to identify it in a query. Please note that + Python keywords may not be used as identifiers. + + For further details and examples see the ``query`` documentation in + :ref:`indexing `. + + *Backtick quoted variables* + + Backtick quoted variables are parsed as literal Python code and + are converted internally to a Python valid identifier. + This can lead to the following problems. + + During parsing a number of disallowed characters inside the backtick + quoted string are replaced by strings that are allowed as a Python identifier. + These characters include all operators in Python, the space character, the + question mark, the exclamation mark, the dollar sign, and the euro sign. + For other characters that fall outside the ASCII range (U+0001..U+007F) + and those that are not further specified in PEP 3131, + the query parser will raise an error. + This excludes whitespace different than the space character, + but also the hashtag (as it is used for comments) and the backtick + itself (backtick can also not be escaped). + + In a special case, quotes that make a pair around a backtick can + confuse the parser. + For example, ```it's` > `that's``` will raise an error, + as it forms a quoted string (``'s > `that'``) with a backtick inside. + + See also the Python documentation about lexical analysis + (https://docs.python.org/3/reference/lexical_analysis.html) + in combination with the source code in :mod:`pandas.core.computation.parsing`. + + Examples + -------- + >>> df = pd.DataFrame({'A': range(1, 6), + ... 'B': range(10, 0, -2), + ... 'C C': range(10, 5, -1)}) + >>> df + A B C C + 0 1 10 10 + 1 2 8 9 + 2 3 6 8 + 3 4 4 7 + 4 5 2 6 + >>> df.query('A > B') + A B C C + 4 5 2 6 + + The previous expression is equivalent to + + >>> df[df.A > df.B] + A B C C + 4 5 2 6 + + For columns with spaces in their name, you can use backtick quoting. + + >>> df.query('B == `C C`') + A B C C + 0 1 10 10 + + The previous expression is equivalent to + + >>> df[df.B == df['C C']] + A B C C + 0 1 10 10 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if not isinstance(expr, str): + msg = f"expr must be a string to be evaluated, {type(expr)} given" + raise ValueError(msg) + kwargs["level"] = kwargs.pop("level", 0) + 1 + kwargs["target"] = None + res = self.eval(expr, **kwargs) + + try: + result = self.loc[res] + except ValueError: + # when res is multi-dimensional loc raises, but this is sometimes a + # valid query + result = self[res] + + if inplace: + self._update_inplace(result) + return None + else: + return result + + @overload + def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: + ... + + @overload + def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: + ... + + def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: + """ + Evaluate a string describing operations on DataFrame columns. + + Operates on columns only, not specific rows or elements. This allows + `eval` to run arbitrary code, which can make you vulnerable to code + injection if you pass user input to this function. + + Parameters + ---------- + expr : str + The expression string to evaluate. + inplace : bool, default False + If the expression contains an assignment, whether to perform the + operation inplace and mutate the existing DataFrame. Otherwise, + a new DataFrame is returned. + **kwargs + See the documentation for :func:`eval` for complete details + on the keyword arguments accepted by + :meth:`~pandas.DataFrame.query`. + + Returns + ------- + ndarray, scalar, pandas object, or None + The result of the evaluation or None if ``inplace=True``. + + See Also + -------- + DataFrame.query : Evaluates a boolean expression to query the columns + of a frame. + DataFrame.assign : Can evaluate an expression or function to create new + values for a column. + eval : Evaluate a Python expression as a string using various + backends. + + Notes + ----- + For more details see the API documentation for :func:`~eval`. + For detailed examples see :ref:`enhancing performance with eval + `. + + Examples + -------- + >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) + >>> df + A B + 0 1 10 + 1 2 8 + 2 3 6 + 3 4 4 + 4 5 2 + >>> df.eval('A + B') + 0 11 + 1 10 + 2 9 + 3 8 + 4 7 + dtype: int64 + + Assignment is allowed though by default the original DataFrame is not + modified. + + >>> df.eval('C = A + B') + A B C + 0 1 10 11 + 1 2 8 10 + 2 3 6 9 + 3 4 4 8 + 4 5 2 7 + >>> df + A B + 0 1 10 + 1 2 8 + 2 3 6 + 3 4 4 + 4 5 2 + + Multiple columns can be assigned to using multi-line expressions: + + >>> df.eval( + ... ''' + ... C = A + B + ... D = A - B + ... ''' + ... ) + A B C D + 0 1 10 11 -9 + 1 2 8 10 -6 + 2 3 6 9 -3 + 3 4 4 8 0 + 4 5 2 7 3 + """ + from pandas.core.computation.eval import eval as _eval + + inplace = validate_bool_kwarg(inplace, "inplace") + kwargs["level"] = kwargs.pop("level", 0) + 1 + index_resolvers = self._get_index_resolvers() + column_resolvers = self._get_cleaned_column_resolvers() + resolvers = column_resolvers, index_resolvers + if "target" not in kwargs: + kwargs["target"] = self + kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers + + return _eval(expr, inplace=inplace, **kwargs) + + def select_dtypes(self, include=None, exclude=None) -> Self: + """ + Return a subset of the DataFrame's columns based on the column dtypes. + + Parameters + ---------- + include, exclude : scalar or list-like + A selection of dtypes or strings to be included/excluded. At least + one of these parameters must be supplied. + + Returns + ------- + DataFrame + The subset of the frame including the dtypes in ``include`` and + excluding the dtypes in ``exclude``. + + Raises + ------ + ValueError + * If both of ``include`` and ``exclude`` are empty + * If ``include`` and ``exclude`` have overlapping elements + * If any kind of string dtype is passed in. + + See Also + -------- + DataFrame.dtypes: Return Series with the data type of each column. + + Notes + ----- + * To select all *numeric* types, use ``np.number`` or ``'number'`` + * To select strings you must use the ``object`` dtype, but note that + this will return *all* object dtype columns + * See the `numpy dtype hierarchy + `__ + * To select datetimes, use ``np.datetime64``, ``'datetime'`` or + ``'datetime64'`` + * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or + ``'timedelta64'`` + * To select Pandas categorical dtypes, use ``'category'`` + * To select Pandas datetimetz dtypes, use ``'datetimetz'`` + or ``'datetime64[ns, tz]'`` + + Examples + -------- + >>> df = pd.DataFrame({'a': [1, 2] * 3, + ... 'b': [True, False] * 3, + ... 'c': [1.0, 2.0] * 3}) + >>> df + a b c + 0 1 True 1.0 + 1 2 False 2.0 + 2 1 True 1.0 + 3 2 False 2.0 + 4 1 True 1.0 + 5 2 False 2.0 + + >>> df.select_dtypes(include='bool') + b + 0 True + 1 False + 2 True + 3 False + 4 True + 5 False + + >>> df.select_dtypes(include=['float64']) + c + 0 1.0 + 1 2.0 + 2 1.0 + 3 2.0 + 4 1.0 + 5 2.0 + + >>> df.select_dtypes(exclude=['int64']) + b c + 0 True 1.0 + 1 False 2.0 + 2 True 1.0 + 3 False 2.0 + 4 True 1.0 + 5 False 2.0 + """ + if not is_list_like(include): + include = (include,) if include is not None else () + if not is_list_like(exclude): + exclude = (exclude,) if exclude is not None else () + + selection = (frozenset(include), frozenset(exclude)) + + if not any(selection): + raise ValueError("at least one of include or exclude must be nonempty") + + # convert the myriad valid dtypes object to a single representation + def check_int_infer_dtype(dtypes): + converted_dtypes: list[type] = [] + for dtype in dtypes: + # Numpy maps int to different types (int32, in64) on Windows and Linux + # see https://github.com/numpy/numpy/issues/9464 + if (isinstance(dtype, str) and dtype == "int") or (dtype is int): + converted_dtypes.append(np.int32) + converted_dtypes.append(np.int64) + elif dtype == "float" or dtype is float: + # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 + converted_dtypes.extend([np.float64, np.float32]) + else: + converted_dtypes.append(infer_dtype_from_object(dtype)) + return frozenset(converted_dtypes) + + include = check_int_infer_dtype(include) + exclude = check_int_infer_dtype(exclude) + + for dtypes in (include, exclude): + invalidate_string_dtypes(dtypes) + + # can't both include AND exclude! + if not include.isdisjoint(exclude): + raise ValueError(f"include and exclude overlap on {(include & exclude)}") + + def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: + # GH 46870: BooleanDtype._is_numeric == True but should be excluded + dtype = dtype if not isinstance(dtype, ArrowDtype) else dtype.numpy_dtype + return issubclass(dtype.type, tuple(dtypes_set)) or ( + np.number in dtypes_set + and getattr(dtype, "_is_numeric", False) + and not is_bool_dtype(dtype) + ) + + def predicate(arr: ArrayLike) -> bool: + dtype = arr.dtype + if include: + if not dtype_predicate(dtype, include): + return False + + if exclude: + if dtype_predicate(dtype, exclude): + return False + + return True + + mgr = self._mgr._get_data_subset(predicate).copy(deep=None) + return self._constructor_from_mgr(mgr, axes=mgr.axes).__finalize__(self) + + def insert( + self, + loc: int, + column: Hashable, + value: Scalar | AnyArrayLike, + allow_duplicates: bool | lib.NoDefault = lib.no_default, + ) -> None: + """ + Insert column into DataFrame at specified location. + + Raises a ValueError if `column` is already contained in the DataFrame, + unless `allow_duplicates` is set to True. + + Parameters + ---------- + loc : int + Insertion index. Must verify 0 <= loc <= len(columns). + column : str, number, or hashable object + Label of the inserted column. + value : Scalar, Series, or array-like + allow_duplicates : bool, optional, default lib.no_default + + See Also + -------- + Index.insert : Insert new item by index. + + Examples + -------- + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df + col1 col2 + 0 1 3 + 1 2 4 + >>> df.insert(1, "newcol", [99, 99]) + >>> df + col1 newcol col2 + 0 1 99 3 + 1 2 99 4 + >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) + >>> df + col1 col1 newcol col2 + 0 100 1 99 3 + 1 100 2 99 4 + + Notice that pandas uses index alignment in case of `value` from type `Series`: + + >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) + >>> df + col0 col1 col1 newcol col2 + 0 NaN 100 1 99 3 + 1 5.0 100 2 99 4 + """ + if allow_duplicates is lib.no_default: + allow_duplicates = False + if allow_duplicates and not self.flags.allows_duplicate_labels: + raise ValueError( + "Cannot specify 'allow_duplicates=True' when " + "'self.flags.allows_duplicate_labels' is False." + ) + if not allow_duplicates and column in self.columns: + # Should this be a different kind of error?? + raise ValueError(f"cannot insert {column}, already exists") + if not is_integer(loc): + raise TypeError("loc must be int") + # convert non stdlib ints to satisfy typing checks + loc = int(loc) + if isinstance(value, DataFrame) and len(value.columns) > 1: + raise ValueError( + f"Expected a one-dimensional object, got a DataFrame with " + f"{len(value.columns)} columns instead." + ) + elif isinstance(value, DataFrame): + value = value.iloc[:, 0] + + value, refs = self._sanitize_column(value) + self._mgr.insert(loc, column, value, refs=refs) + + def assign(self, **kwargs) -> DataFrame: + r""" + Assign new columns to a DataFrame. + + Returns a new object with all original columns in addition to new ones. + Existing columns that are re-assigned will be overwritten. + + Parameters + ---------- + **kwargs : dict of {str: callable or Series} + The column names are keywords. If the values are + callable, they are computed on the DataFrame and + assigned to the new columns. The callable must not + change input DataFrame (though pandas doesn't check it). + If the values are not callable, (e.g. a Series, scalar, or array), + they are simply assigned. + + Returns + ------- + DataFrame + A new DataFrame with the new columns in addition to + all the existing columns. + + Notes + ----- + Assigning multiple columns within the same ``assign`` is possible. + Later items in '\*\*kwargs' may refer to newly created or modified + columns in 'df'; items are computed and assigned into 'df' in order. + + Examples + -------- + >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, + ... index=['Portland', 'Berkeley']) + >>> df + temp_c + Portland 17.0 + Berkeley 25.0 + + Where the value is a callable, evaluated on `df`: + + >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + Alternatively, the same behavior can be achieved by directly + referencing an existing Series or sequence: + + >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + You can create multiple columns within the same assign where one + of the columns depends on another one defined within the same assign: + + >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, + ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) + temp_c temp_f temp_k + Portland 17.0 62.6 290.15 + Berkeley 25.0 77.0 298.15 + """ + data = self.copy(deep=None) + + for k, v in kwargs.items(): + data[k] = com.apply_if_callable(v, data) + return data + + def _sanitize_column(self, value) -> tuple[ArrayLike, BlockValuesRefs | None]: + """ + Ensures new columns (which go into the BlockManager as new blocks) are + always copied (or a reference is being tracked to them under CoW) + and converted into an array. + + Parameters + ---------- + value : scalar, Series, or array-like + + Returns + ------- + tuple of numpy.ndarray or ExtensionArray and optional BlockValuesRefs + """ + self._ensure_valid_index(value) + + # Using a DataFrame would mean coercing values to one dtype + assert not isinstance(value, DataFrame) + if is_dict_like(value): + if not isinstance(value, Series): + value = Series(value) + return _reindex_for_setitem(value, self.index) + + if is_list_like(value): + com.require_length_match(value, self.index) + return sanitize_array(value, self.index, copy=True, allow_2d=True), None + + @property + def _series(self): + return { + item: Series( + self._mgr.iget(idx), index=self.index, name=item, fastpath=True + ) + for idx, item in enumerate(self.columns) + } + + # ---------------------------------------------------------------------- + # Reindexing and alignment + + def _reindex_multi( + self, axes: dict[str, Index], copy: bool, fill_value + ) -> DataFrame: + """ + We are guaranteed non-Nones in the axes. + """ + + new_index, row_indexer = self.index.reindex(axes["index"]) + new_columns, col_indexer = self.columns.reindex(axes["columns"]) + + if row_indexer is not None and col_indexer is not None: + # Fastpath. By doing two 'take's at once we avoid making an + # unnecessary copy. + # We only get here with `self._can_fast_transpose`, which (almost) + # ensures that self.values is cheap. It may be worth making this + # condition more specific. + indexer = row_indexer, col_indexer + new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) + return self._constructor( + new_values, index=new_index, columns=new_columns, copy=False + ) + else: + return self._reindex_with_indexers( + {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, + copy=copy, + fill_value=fill_value, + ) + + @Appender( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + + Change the row labels. + + >>> df.set_axis(['a', 'b', 'c'], axis='index') + A B + a 1 4 + b 2 5 + c 3 6 + + Change the column labels. + + >>> df.set_axis(['I', 'II'], axis='columns') + I II + 0 1 4 + 1 2 5 + 2 3 6 + """ + ) + @Substitution( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + extended_summary_sub=" column or", + axis_description_sub=", and 1 identifies the columns", + see_also_sub=" or columns", + ) + @Appender(NDFrame.set_axis.__doc__) + def set_axis( + self, + labels, + *, + axis: Axis = 0, + copy: bool | None = None, + ) -> DataFrame: + return super().set_axis(labels, axis=axis, copy=copy) + + @doc( + NDFrame.reindex, + klass=_shared_doc_kwargs["klass"], + optional_reindex=_shared_doc_kwargs["optional_reindex"], + ) + def reindex( + self, + labels=None, + *, + index=None, + columns=None, + axis: Axis | None = None, + method: ReindexMethod | None = None, + copy: bool | None = None, + level: Level | None = None, + fill_value: Scalar | None = np.nan, + limit: int | None = None, + tolerance=None, + ) -> DataFrame: + return super().reindex( + labels=labels, + index=index, + columns=columns, + axis=axis, + method=method, + copy=copy, + level=level, + fill_value=fill_value, + limit=limit, + tolerance=tolerance, + ) + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level = ..., + inplace: Literal[True], + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level = ..., + inplace: Literal[False] = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level = ..., + inplace: bool = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame | None: + ... + + def drop( + self, + labels: IndexLabel | None = None, + *, + axis: Axis = 0, + index: IndexLabel | None = None, + columns: IndexLabel | None = None, + level: Level | None = None, + inplace: bool = False, + errors: IgnoreRaise = "raise", + ) -> DataFrame | None: + """ + Drop specified labels from rows or columns. + + Remove rows or columns by specifying label names and corresponding + axis, or by directly specifying index or column names. When using a + multi-index, labels on different levels can be removed by specifying + the level. See the :ref:`user guide ` + for more information about the now unused levels. + + Parameters + ---------- + labels : single label or list-like + Index or column labels to drop. A tuple will be used as a single + label and not treated as a list-like. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Whether to drop labels from the index (0 or 'index') or + columns (1 or 'columns'). + index : single label or list-like + Alternative to specifying axis (``labels, axis=0`` + is equivalent to ``index=labels``). + columns : single label or list-like + Alternative to specifying axis (``labels, axis=1`` + is equivalent to ``columns=labels``). + level : int or level name, optional + For MultiIndex, level from which the labels will be removed. + inplace : bool, default False + If False, return a copy. Otherwise, do operation + in place and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and only existing labels are + dropped. + + Returns + ------- + DataFrame or None + Returns DataFrame or None DataFrame with the specified + index or column labels removed or None if inplace=True. + + Raises + ------ + KeyError + If any of the labels is not found in the selected axis. + + See Also + -------- + DataFrame.loc : Label-location based indexer for selection by label. + DataFrame.dropna : Return DataFrame with labels on given axis omitted + where (all or any) data are missing. + DataFrame.drop_duplicates : Return DataFrame with duplicate rows + removed, optionally only considering certain columns. + Series.drop : Return Series with specified index labels removed. + + Examples + -------- + >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), + ... columns=['A', 'B', 'C', 'D']) + >>> df + A B C D + 0 0 1 2 3 + 1 4 5 6 7 + 2 8 9 10 11 + + Drop columns + + >>> df.drop(['B', 'C'], axis=1) + A D + 0 0 3 + 1 4 7 + 2 8 11 + + >>> df.drop(columns=['B', 'C']) + A D + 0 0 3 + 1 4 7 + 2 8 11 + + Drop a row by index + + >>> df.drop([0, 1]) + A B C D + 2 8 9 10 11 + + Drop columns and/or rows of MultiIndex DataFrame + + >>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'], + ... ['speed', 'weight', 'length']], + ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], + ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], + ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], + ... [250, 150], [1.5, 0.8], [320, 250], + ... [1, 0.8], [0.3, 0.2]]) + >>> df + big small + llama speed 45.0 30.0 + weight 200.0 100.0 + length 1.5 1.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + length 1.5 0.8 + falcon speed 320.0 250.0 + weight 1.0 0.8 + length 0.3 0.2 + + Drop a specific index combination from the MultiIndex + DataFrame, i.e., drop the combination ``'falcon'`` and + ``'weight'``, which deletes only the corresponding row + + >>> df.drop(index=('falcon', 'weight')) + big small + llama speed 45.0 30.0 + weight 200.0 100.0 + length 1.5 1.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + length 1.5 0.8 + falcon speed 320.0 250.0 + length 0.3 0.2 + + >>> df.drop(index='cow', columns='small') + big + llama speed 45.0 + weight 200.0 + length 1.5 + falcon speed 320.0 + weight 1.0 + length 0.3 + + >>> df.drop(index='length', level=1) + big small + llama speed 45.0 30.0 + weight 200.0 100.0 + cow speed 30.0 20.0 + weight 250.0 150.0 + falcon speed 320.0 250.0 + weight 1.0 0.8 + """ + return super().drop( + labels=labels, + axis=axis, + index=index, + columns=columns, + level=level, + inplace=inplace, + errors=errors, + ) + + @overload + def rename( + self, + mapper: Renamer | None = ..., + *, + index: Renamer | None = ..., + columns: Renamer | None = ..., + axis: Axis | None = ..., + copy: bool | None = ..., + inplace: Literal[True], + level: Level = ..., + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def rename( + self, + mapper: Renamer | None = ..., + *, + index: Renamer | None = ..., + columns: Renamer | None = ..., + axis: Axis | None = ..., + copy: bool | None = ..., + inplace: Literal[False] = ..., + level: Level = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame: + ... + + @overload + def rename( + self, + mapper: Renamer | None = ..., + *, + index: Renamer | None = ..., + columns: Renamer | None = ..., + axis: Axis | None = ..., + copy: bool | None = ..., + inplace: bool = ..., + level: Level = ..., + errors: IgnoreRaise = ..., + ) -> DataFrame | None: + ... + + def rename( + self, + mapper: Renamer | None = None, + *, + index: Renamer | None = None, + columns: Renamer | None = None, + axis: Axis | None = None, + copy: bool | None = None, + inplace: bool = False, + level: Level | None = None, + errors: IgnoreRaise = "ignore", + ) -> DataFrame | None: + """ + Rename columns or index labels. + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + See the :ref:`user guide ` for more. + + Parameters + ---------- + mapper : dict-like or function + Dict-like or function transformations to apply to + that axis' values. Use either ``mapper`` and ``axis`` to + specify the axis to target with ``mapper``, or ``index`` and + ``columns``. + index : dict-like or function + Alternative to specifying axis (``mapper, axis=0`` + is equivalent to ``index=mapper``). + columns : dict-like or function + Alternative to specifying axis (``mapper, axis=1`` + is equivalent to ``columns=mapper``). + axis : {0 or 'index', 1 or 'columns'}, default 0 + Axis to target with ``mapper``. Can be either the axis name + ('index', 'columns') or number (0, 1). The default is 'index'. + copy : bool, default True + Also copy underlying data. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + If True then value of copy is ignored. + level : int or level name, default None + In case of a MultiIndex, only rename labels in the specified + level. + errors : {'ignore', 'raise'}, default 'ignore' + If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, + or `columns` contains labels that are not present in the Index + being transformed. + If 'ignore', existing keys will be renamed and extra keys will be + ignored. + + Returns + ------- + DataFrame or None + DataFrame with the renamed axis labels or None if ``inplace=True``. + + Raises + ------ + KeyError + If any of the labels is not found in the selected axis and + "errors='raise'". + + See Also + -------- + DataFrame.rename_axis : Set the name of the axis. + + Examples + -------- + ``DataFrame.rename`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...)`` + * ``(mapper, axis={'index', 'columns'}, ...)`` + + We *highly* recommend using keyword arguments to clarify your + intent. + + Rename columns using a mapping: + + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + >>> df.rename(columns={"A": "a", "B": "c"}) + a c + 0 1 4 + 1 2 5 + 2 3 6 + + Rename index using a mapping: + + >>> df.rename(index={0: "x", 1: "y", 2: "z"}) + A B + x 1 4 + y 2 5 + z 3 6 + + Cast index labels to a different type: + + >>> df.index + RangeIndex(start=0, stop=3, step=1) + >>> df.rename(index=str).index + Index(['0', '1', '2'], dtype='object') + + >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") + Traceback (most recent call last): + KeyError: ['C'] not found in axis + + Using axis-style parameters: + + >>> df.rename(str.lower, axis='columns') + a b + 0 1 4 + 1 2 5 + 2 3 6 + + >>> df.rename({1: 2, 2: 4}, axis='index') + A B + 0 1 4 + 2 2 5 + 4 3 6 + """ + return super()._rename( + mapper=mapper, + index=index, + columns=columns, + axis=axis, + copy=copy, + inplace=inplace, + level=level, + errors=errors, + ) + + def pop(self, item: Hashable) -> Series: + """ + Return item and drop from frame. Raise KeyError if not found. + + Parameters + ---------- + item : label + Label of column to be popped. + + Returns + ------- + Series + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan)], + ... columns=('name', 'class', 'max_speed')) + >>> df + name class max_speed + 0 falcon bird 389.0 + 1 parrot bird 24.0 + 2 lion mammal 80.5 + 3 monkey mammal NaN + + >>> df.pop('class') + 0 bird + 1 bird + 2 mammal + 3 mammal + Name: class, dtype: object + + >>> df + name max_speed + 0 falcon 389.0 + 1 parrot 24.0 + 2 lion 80.5 + 3 monkey NaN + """ + return super().pop(item=item) + + def _replace_columnwise( + self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex + ): + """ + Dispatch to Series.replace column-wise. + + Parameters + ---------- + mapping : dict + of the form {col: (target, value)} + inplace : bool + regex : bool or same types as `to_replace` in DataFrame.replace + + Returns + ------- + DataFrame or None + """ + # Operate column-wise + res = self if inplace else self.copy(deep=None) + ax = self.columns + + for i, ax_value in enumerate(ax): + if ax_value in mapping: + ser = self.iloc[:, i] + + target, value = mapping[ax_value] + newobj = ser.replace(target, value, regex=regex) + + res._iset_item(i, newobj, inplace=inplace) + + if inplace: + return + return res.__finalize__(self) + + @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) + def shift( + self, + periods: int | Sequence[int] = 1, + freq: Frequency | None = None, + axis: Axis = 0, + fill_value: Hashable = lib.no_default, + suffix: str | None = None, + ) -> DataFrame: + if freq is not None and fill_value is not lib.no_default: + # GH#53832 + warnings.warn( + "Passing a 'freq' together with a 'fill_value' silently ignores " + "the fill_value and is deprecated. This will raise in a future " + "version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + fill_value = lib.no_default + + axis = self._get_axis_number(axis) + + if is_list_like(periods): + periods = cast(Sequence, periods) + if axis == 1: + raise ValueError( + "If `periods` contains multiple shifts, `axis` cannot be 1." + ) + if len(periods) == 0: + raise ValueError("If `periods` is an iterable, it cannot be empty.") + from pandas.core.reshape.concat import concat + + shifted_dataframes = [] + for period in periods: + if not is_integer(period): + raise TypeError( + f"Periods must be integer, but {period} is {type(period)}." + ) + period = cast(int, period) + shifted_dataframes.append( + super() + .shift(periods=period, freq=freq, axis=axis, fill_value=fill_value) + .add_suffix(f"{suffix}_{period}" if suffix else f"_{period}") + ) + return concat(shifted_dataframes, axis=1) + elif suffix: + raise ValueError("Cannot specify `suffix` if `periods` is an int.") + periods = cast(int, periods) + + ncols = len(self.columns) + arrays = self._mgr.arrays + if axis == 1 and periods != 0 and ncols > 0 and freq is None: + if fill_value is lib.no_default: + # We will infer fill_value to match the closest column + + # Use a column that we know is valid for our column's dtype GH#38434 + label = self.columns[0] + + if periods > 0: + result = self.iloc[:, :-periods] + for col in range(min(ncols, abs(periods))): + # TODO(EA2D): doing this in a loop unnecessary with 2D EAs + # Define filler inside loop so we get a copy + filler = self.iloc[:, 0].shift(len(self)) + result.insert(0, label, filler, allow_duplicates=True) + else: + result = self.iloc[:, -periods:] + for col in range(min(ncols, abs(periods))): + # Define filler inside loop so we get a copy + filler = self.iloc[:, -1].shift(len(self)) + result.insert( + len(result.columns), label, filler, allow_duplicates=True + ) + + result.columns = self.columns.copy() + return result + elif len(arrays) > 1 or ( + # If we only have one block and we know that we can't + # keep the same dtype (i.e. the _can_hold_element check) + # then we can go through the reindex_indexer path + # (and avoid casting logic in the Block method). + not can_hold_element(arrays[0], fill_value) + ): + # GH#35488 we need to watch out for multi-block cases + # We only get here with fill_value not-lib.no_default + nper = abs(periods) + nper = min(nper, ncols) + if periods > 0: + indexer = np.array( + [-1] * nper + list(range(ncols - periods)), dtype=np.intp + ) + else: + indexer = np.array( + list(range(nper, ncols)) + [-1] * nper, dtype=np.intp + ) + mgr = self._mgr.reindex_indexer( + self.columns, + indexer, + axis=0, + fill_value=fill_value, + allow_dups=True, + ) + res_df = self._constructor_from_mgr(mgr, axes=mgr.axes) + return res_df.__finalize__(self, method="shift") + else: + return self.T.shift(periods=periods, fill_value=fill_value).T + + return super().shift( + periods=periods, freq=freq, axis=axis, fill_value=fill_value + ) + + @overload + def set_index( + self, + keys, + *, + drop: bool = ..., + append: bool = ..., + inplace: Literal[False] = ..., + verify_integrity: bool = ..., + ) -> DataFrame: + ... + + @overload + def set_index( + self, + keys, + *, + drop: bool = ..., + append: bool = ..., + inplace: Literal[True], + verify_integrity: bool = ..., + ) -> None: + ... + + def set_index( + self, + keys, + *, + drop: bool = True, + append: bool = False, + inplace: bool = False, + verify_integrity: bool = False, + ) -> DataFrame | None: + """ + Set the DataFrame index using existing columns. + + Set the DataFrame index (row labels) using one or more existing + columns or arrays (of the correct length). The index can replace the + existing index or expand on it. + + Parameters + ---------- + keys : label or array-like or list of labels/arrays + This parameter can be either a single column key, a single array of + the same length as the calling DataFrame, or a list containing an + arbitrary combination of column keys and arrays. Here, "array" + encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and + instances of :class:`~collections.abc.Iterator`. + drop : bool, default True + Delete columns to be used as the new index. + append : bool, default False + Whether to append columns to existing index. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + verify_integrity : bool, default False + Check the new index for duplicates. Otherwise defer the check until + necessary. Setting to False will improve the performance of this + method. + + Returns + ------- + DataFrame or None + Changed row labels or None if ``inplace=True``. + + See Also + -------- + DataFrame.reset_index : Opposite of set_index. + DataFrame.reindex : Change to new indices or expand indices. + DataFrame.reindex_like : Change to same indices as other DataFrame. + + Examples + -------- + >>> df = pd.DataFrame({'month': [1, 4, 7, 10], + ... 'year': [2012, 2014, 2013, 2014], + ... 'sale': [55, 40, 84, 31]}) + >>> df + month year sale + 0 1 2012 55 + 1 4 2014 40 + 2 7 2013 84 + 3 10 2014 31 + + Set the index to become the 'month' column: + + >>> df.set_index('month') + year sale + month + 1 2012 55 + 4 2014 40 + 7 2013 84 + 10 2014 31 + + Create a MultiIndex using columns 'year' and 'month': + + >>> df.set_index(['year', 'month']) + sale + year month + 2012 1 55 + 2014 4 40 + 2013 7 84 + 2014 10 31 + + Create a MultiIndex using an Index and a column: + + >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) + month sale + year + 1 2012 1 55 + 2 2014 4 40 + 3 2013 7 84 + 4 2014 10 31 + + Create a MultiIndex using two Series: + + >>> s = pd.Series([1, 2, 3, 4]) + >>> df.set_index([s, s**2]) + month year sale + 1 1 1 2012 55 + 2 4 4 2014 40 + 3 9 7 2013 84 + 4 16 10 2014 31 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + self._check_inplace_and_allows_duplicate_labels(inplace) + if not isinstance(keys, list): + keys = [keys] + + err_msg = ( + 'The parameter "keys" may be a column key, one-dimensional ' + "array, or a list containing only valid column keys and " + "one-dimensional arrays." + ) + + missing: list[Hashable] = [] + for col in keys: + if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)): + # arrays are fine as long as they are one-dimensional + # iterators get converted to list below + if getattr(col, "ndim", 1) != 1: + raise ValueError(err_msg) + else: + # everything else gets tried as a key; see GH 24969 + try: + found = col in self.columns + except TypeError as err: + raise TypeError( + f"{err_msg}. Received column of type {type(col)}" + ) from err + else: + if not found: + missing.append(col) + + if missing: + raise KeyError(f"None of {missing} are in the columns") + + if inplace: + frame = self + else: + # GH 49473 Use "lazy copy" with Copy-on-Write + frame = self.copy(deep=None) + + arrays: list[Index] = [] + names: list[Hashable] = [] + if append: + names = list(self.index.names) + if isinstance(self.index, MultiIndex): + arrays.extend( + self.index._get_level_values(i) for i in range(self.index.nlevels) + ) + else: + arrays.append(self.index) + + to_remove: list[Hashable] = [] + for col in keys: + if isinstance(col, MultiIndex): + arrays.extend(col._get_level_values(n) for n in range(col.nlevels)) + names.extend(col.names) + elif isinstance(col, (Index, Series)): + # if Index then not MultiIndex (treated above) + + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[Index, Series]"; expected "Index" + arrays.append(col) # type: ignore[arg-type] + names.append(col.name) + elif isinstance(col, (list, np.ndarray)): + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[List[Any], ndarray]"; expected "Index" + arrays.append(col) # type: ignore[arg-type] + names.append(None) + elif isinstance(col, abc.Iterator): + # error: Argument 1 to "append" of "list" has incompatible type + # "List[Any]"; expected "Index" + arrays.append(list(col)) # type: ignore[arg-type] + names.append(None) + # from here, col can only be a column label + else: + arrays.append(frame[col]) + names.append(col) + if drop: + to_remove.append(col) + + if len(arrays[-1]) != len(self): + # check newest element against length of calling frame, since + # ensure_index_from_sequences would not raise for append=False. + raise ValueError( + f"Length mismatch: Expected {len(self)} rows, " + f"received array of length {len(arrays[-1])}" + ) + + index = ensure_index_from_sequences(arrays, names) + + if verify_integrity and not index.is_unique: + duplicates = index[index.duplicated()].unique() + raise ValueError(f"Index has duplicate keys: {duplicates}") + + # use set to handle duplicate column names gracefully in case of drop + for c in set(to_remove): + del frame[c] + + # clear up memory usage + index._cleanup() + + frame.index = index + + if not inplace: + return frame + return None + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + inplace: Literal[False] = ..., + col_level: Hashable = ..., + col_fill: Hashable = ..., + allow_duplicates: bool | lib.NoDefault = ..., + names: Hashable | Sequence[Hashable] | None = None, + ) -> DataFrame: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + inplace: Literal[True], + col_level: Hashable = ..., + col_fill: Hashable = ..., + allow_duplicates: bool | lib.NoDefault = ..., + names: Hashable | Sequence[Hashable] | None = None, + ) -> None: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + inplace: bool = ..., + col_level: Hashable = ..., + col_fill: Hashable = ..., + allow_duplicates: bool | lib.NoDefault = ..., + names: Hashable | Sequence[Hashable] | None = None, + ) -> DataFrame | None: + ... + + def reset_index( + self, + level: IndexLabel | None = None, + *, + drop: bool = False, + inplace: bool = False, + col_level: Hashable = 0, + col_fill: Hashable = "", + allow_duplicates: bool | lib.NoDefault = lib.no_default, + names: Hashable | Sequence[Hashable] | None = None, + ) -> DataFrame | None: + """ + Reset the index, or a level of it. + + Reset the index of the DataFrame, and use the default one instead. + If the DataFrame has a MultiIndex, this method can remove one or more + levels. + + Parameters + ---------- + level : int, str, tuple, or list, default None + Only remove the given levels from the index. Removes all levels by + default. + drop : bool, default False + Do not try to insert index into dataframe columns. This resets + the index to the default integer index. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + col_level : int or str, default 0 + If the columns have multiple levels, determines which level the + labels are inserted into. By default it is inserted into the first + level. + col_fill : object, default '' + If the columns have multiple levels, determines how the other + levels are named. If None then the index name is repeated. + allow_duplicates : bool, optional, default lib.no_default + Allow duplicate column labels to be created. + + .. versionadded:: 1.5.0 + + names : int, str or 1-dimensional list, default None + Using the given string, rename the DataFrame column which contains the + index data. If the DataFrame has a MultiIndex, this has to be a list or + tuple with length equal to the number of levels. + + .. versionadded:: 1.5.0 + + Returns + ------- + DataFrame or None + DataFrame with the new index or None if ``inplace=True``. + + See Also + -------- + DataFrame.set_index : Opposite of reset_index. + DataFrame.reindex : Change to new indices or expand indices. + DataFrame.reindex_like : Change to same indices as other DataFrame. + + Examples + -------- + >>> df = pd.DataFrame([('bird', 389.0), + ... ('bird', 24.0), + ... ('mammal', 80.5), + ... ('mammal', np.nan)], + ... index=['falcon', 'parrot', 'lion', 'monkey'], + ... columns=('class', 'max_speed')) + >>> df + class max_speed + falcon bird 389.0 + parrot bird 24.0 + lion mammal 80.5 + monkey mammal NaN + + When we reset the index, the old index is added as a column, and a + new sequential index is used: + + >>> df.reset_index() + index class max_speed + 0 falcon bird 389.0 + 1 parrot bird 24.0 + 2 lion mammal 80.5 + 3 monkey mammal NaN + + We can use the `drop` parameter to avoid the old index being added as + a column: + + >>> df.reset_index(drop=True) + class max_speed + 0 bird 389.0 + 1 bird 24.0 + 2 mammal 80.5 + 3 mammal NaN + + You can also use `reset_index` with `MultiIndex`. + + >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), + ... ('bird', 'parrot'), + ... ('mammal', 'lion'), + ... ('mammal', 'monkey')], + ... names=['class', 'name']) + >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), + ... ('species', 'type')]) + >>> df = pd.DataFrame([(389.0, 'fly'), + ... (24.0, 'fly'), + ... (80.5, 'run'), + ... (np.nan, 'jump')], + ... index=index, + ... columns=columns) + >>> df + speed species + max type + class name + bird falcon 389.0 fly + parrot 24.0 fly + mammal lion 80.5 run + monkey NaN jump + + Using the `names` parameter, choose a name for the index column: + + >>> df.reset_index(names=['classes', 'names']) + classes names speed species + max type + 0 bird falcon 389.0 fly + 1 bird parrot 24.0 fly + 2 mammal lion 80.5 run + 3 mammal monkey NaN jump + + If the index has multiple levels, we can reset a subset of them: + + >>> df.reset_index(level='class') + class speed species + max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + + If we are not dropping the index, by default, it is placed in the top + level. We can place it in another level: + + >>> df.reset_index(level='class', col_level=1) + speed species + class max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + + When the index is inserted under another level, we can specify under + which one with the parameter `col_fill`: + + >>> df.reset_index(level='class', col_level=1, col_fill='species') + species speed species + class max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + + If we specify a nonexistent level for `col_fill`, it is created: + + >>> df.reset_index(level='class', col_level=1, col_fill='genus') + genus speed species + class max type + name + falcon bird 389.0 fly + parrot bird 24.0 fly + lion mammal 80.5 run + monkey mammal NaN jump + """ + inplace = validate_bool_kwarg(inplace, "inplace") + self._check_inplace_and_allows_duplicate_labels(inplace) + if inplace: + new_obj = self + else: + new_obj = self.copy(deep=None) + if allow_duplicates is not lib.no_default: + allow_duplicates = validate_bool_kwarg(allow_duplicates, "allow_duplicates") + + new_index = default_index(len(new_obj)) + if level is not None: + if not isinstance(level, (tuple, list)): + level = [level] + level = [self.index._get_level_number(lev) for lev in level] + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) + + if not drop: + to_insert: Iterable[tuple[Any, Any | None]] + + default = "index" if "index" not in self else "level_0" + names = self.index._get_default_index_names(names, default) + + if isinstance(self.index, MultiIndex): + to_insert = zip(self.index.levels, self.index.codes) + else: + to_insert = ((self.index, None),) + + multi_col = isinstance(self.columns, MultiIndex) + for i, (lev, lab) in reversed(list(enumerate(to_insert))): + if level is not None and i not in level: + continue + name = names[i] + if multi_col: + col_name = list(name) if isinstance(name, tuple) else [name] + if col_fill is None: + if len(col_name) not in (1, self.columns.nlevels): + raise ValueError( + "col_fill=None is incompatible " + f"with incomplete column name {name}" + ) + col_fill = col_name[0] + + lev_num = self.columns._get_level_number(col_level) + name_lst = [col_fill] * lev_num + col_name + missing = self.columns.nlevels - len(name_lst) + name_lst += [col_fill] * missing + name = tuple(name_lst) + + # to ndarray and maybe infer different dtype + level_values = lev._values + if level_values.dtype == np.object_: + level_values = lib.maybe_convert_objects(level_values) + + if lab is not None: + # if we have the codes, extract the values with a mask + level_values = algorithms.take( + level_values, lab, allow_fill=True, fill_value=lev._na_value + ) + + new_obj.insert( + 0, + name, + level_values, + allow_duplicates=allow_duplicates, + ) + + new_obj.index = new_index + if not inplace: + return new_obj + + return None + + # ---------------------------------------------------------------------- + # Reindex-based selection methods + + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) + def isna(self) -> DataFrame: + res_mgr = self._mgr.isna(func=isna) + result = self._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + return result.__finalize__(self, method="isna") + + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) + def isnull(self) -> DataFrame: + """ + DataFrame.isnull is an alias for DataFrame.isna. + """ + return self.isna() + + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) + def notna(self) -> DataFrame: + return ~self.isna() + + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) + def notnull(self) -> DataFrame: + """ + DataFrame.notnull is an alias for DataFrame.notna. + """ + return ~self.isna() + + @overload + def dropna( + self, + *, + axis: Axis = ..., + how: AnyAll | lib.NoDefault = ..., + thresh: int | lib.NoDefault = ..., + subset: IndexLabel = ..., + inplace: Literal[False] = ..., + ignore_index: bool = ..., + ) -> DataFrame: + ... + + @overload + def dropna( + self, + *, + axis: Axis = ..., + how: AnyAll | lib.NoDefault = ..., + thresh: int | lib.NoDefault = ..., + subset: IndexLabel = ..., + inplace: Literal[True], + ignore_index: bool = ..., + ) -> None: + ... + + def dropna( + self, + *, + axis: Axis = 0, + how: AnyAll | lib.NoDefault = lib.no_default, + thresh: int | lib.NoDefault = lib.no_default, + subset: IndexLabel | None = None, + inplace: bool = False, + ignore_index: bool = False, + ) -> DataFrame | None: + """ + Remove missing values. + + See the :ref:`User Guide ` for more on which values are + considered missing, and how to work with missing data. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + Determine if rows or columns which contain missing values are + removed. + + * 0, or 'index' : Drop rows which contain missing values. + * 1, or 'columns' : Drop columns which contain missing value. + + Only a single axis is allowed. + + how : {'any', 'all'}, default 'any' + Determine if row or column is removed from DataFrame, when we have + at least one NA or all NA. + + * 'any' : If any NA values are present, drop that row or column. + * 'all' : If all values are NA, drop that row or column. + + thresh : int, optional + Require that many non-NA values. Cannot be combined with how. + subset : column label or sequence of labels, optional + Labels along other axis to consider, e.g. if you are dropping rows + these would be a list of columns to include. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 2.0.0 + + Returns + ------- + DataFrame or None + DataFrame with NA entries dropped from it or None if ``inplace=True``. + + See Also + -------- + DataFrame.isna: Indicate missing values. + DataFrame.notna : Indicate existing (non-missing) values. + DataFrame.fillna : Replace missing values. + Series.dropna : Drop missing values. + Index.dropna : Drop missing indices. + + Examples + -------- + >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], + ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], + ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), + ... pd.NaT]}) + >>> df + name toy born + 0 Alfred NaN NaT + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + + Drop the rows where at least one element is missing. + + >>> df.dropna() + name toy born + 1 Batman Batmobile 1940-04-25 + + Drop the columns where at least one element is missing. + + >>> df.dropna(axis='columns') + name + 0 Alfred + 1 Batman + 2 Catwoman + + Drop the rows where all elements are missing. + + >>> df.dropna(how='all') + name toy born + 0 Alfred NaN NaT + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + + Keep only the rows with at least 2 non-NA values. + + >>> df.dropna(thresh=2) + name toy born + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + + Define in which columns to look for missing values. + + >>> df.dropna(subset=['name', 'toy']) + name toy born + 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT + """ + if (how is not lib.no_default) and (thresh is not lib.no_default): + raise TypeError( + "You cannot set both the how and thresh arguments at the same time." + ) + + if how is lib.no_default: + how = "any" + + inplace = validate_bool_kwarg(inplace, "inplace") + if isinstance(axis, (tuple, list)): + # GH20987 + raise TypeError("supplying multiple axes to axis is no longer supported.") + + axis = self._get_axis_number(axis) + agg_axis = 1 - axis + + agg_obj = self + if subset is not None: + # subset needs to be list + if not is_list_like(subset): + subset = [subset] + ax = self._get_axis(agg_axis) + indices = ax.get_indexer_for(subset) + check = indices == -1 + if check.any(): + raise KeyError(np.array(subset)[check].tolist()) + agg_obj = self.take(indices, axis=agg_axis) + + if thresh is not lib.no_default: + count = agg_obj.count(axis=agg_axis) + mask = count >= thresh + elif how == "any": + # faster equivalent to 'agg_obj.count(agg_axis) == self.shape[agg_axis]' + mask = notna(agg_obj).all(axis=agg_axis, bool_only=False) + elif how == "all": + # faster equivalent to 'agg_obj.count(agg_axis) > 0' + mask = notna(agg_obj).any(axis=agg_axis, bool_only=False) + else: + raise ValueError(f"invalid how option: {how}") + + if np.all(mask): + result = self.copy(deep=None) + else: + result = self.loc(axis=axis)[mask] + + if ignore_index: + result.index = default_index(len(result)) + + if not inplace: + return result + self._update_inplace(result) + return None + + @overload + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = ..., + *, + keep: DropKeep = ..., + inplace: Literal[True], + ignore_index: bool = ..., + ) -> None: + ... + + @overload + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = ..., + *, + keep: DropKeep = ..., + inplace: Literal[False] = ..., + ignore_index: bool = ..., + ) -> DataFrame: + ... + + @overload + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = ..., + *, + keep: DropKeep = ..., + inplace: bool = ..., + ignore_index: bool = ..., + ) -> DataFrame | None: + ... + + def drop_duplicates( + self, + subset: Hashable | Sequence[Hashable] | None = None, + *, + keep: DropKeep = "first", + inplace: bool = False, + ignore_index: bool = False, + ) -> DataFrame | None: + """ + Return DataFrame with duplicate rows removed. + + Considering certain columns is optional. Indexes, including time indexes + are ignored. + + Parameters + ---------- + subset : column label or sequence of labels, optional + Only consider certain columns for identifying duplicates, by + default use all of the columns. + keep : {'first', 'last', ``False``}, default 'first' + Determines which duplicates (if any) to keep. + + - 'first' : Drop duplicates except for the first occurrence. + - 'last' : Drop duplicates except for the last occurrence. + - ``False`` : Drop all duplicates. + + inplace : bool, default ``False`` + Whether to modify the DataFrame rather than creating a new one. + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + Returns + ------- + DataFrame or None + DataFrame with duplicates removed or None if ``inplace=True``. + + See Also + -------- + DataFrame.value_counts: Count unique combinations of columns. + + Examples + -------- + Consider dataset containing ramen rating. + + >>> df = pd.DataFrame({ + ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], + ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], + ... 'rating': [4, 4, 3.5, 15, 5] + ... }) + >>> df + brand style rating + 0 Yum Yum cup 4.0 + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + By default, it removes duplicate rows based on all columns. + + >>> df.drop_duplicates() + brand style rating + 0 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + To remove duplicates on specific column(s), use ``subset``. + + >>> df.drop_duplicates(subset=['brand']) + brand style rating + 0 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + + To remove duplicates and keep last occurrences, use ``keep``. + + >>> df.drop_duplicates(subset=['brand', 'style'], keep='last') + brand style rating + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 4 Indomie pack 5.0 + """ + if self.empty: + return self.copy(deep=None) + + inplace = validate_bool_kwarg(inplace, "inplace") + ignore_index = validate_bool_kwarg(ignore_index, "ignore_index") + + result = self[-self.duplicated(subset, keep=keep)] + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + self._update_inplace(result) + return None + else: + return result + + def duplicated( + self, + subset: Hashable | Sequence[Hashable] | None = None, + keep: DropKeep = "first", + ) -> Series: + """ + Return boolean Series denoting duplicate rows. + + Considering certain columns is optional. + + Parameters + ---------- + subset : column label or sequence of labels, optional + Only consider certain columns for identifying duplicates, by + default use all of the columns. + keep : {'first', 'last', False}, default 'first' + Determines which duplicates (if any) to mark. + + - ``first`` : Mark duplicates as ``True`` except for the first occurrence. + - ``last`` : Mark duplicates as ``True`` except for the last occurrence. + - False : Mark all duplicates as ``True``. + + Returns + ------- + Series + Boolean series for each duplicated rows. + + See Also + -------- + Index.duplicated : Equivalent method on index. + Series.duplicated : Equivalent method on Series. + Series.drop_duplicates : Remove duplicate values from Series. + DataFrame.drop_duplicates : Remove duplicate values from DataFrame. + + Examples + -------- + Consider dataset containing ramen rating. + + >>> df = pd.DataFrame({ + ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], + ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], + ... 'rating': [4, 4, 3.5, 15, 5] + ... }) + >>> df + brand style rating + 0 Yum Yum cup 4.0 + 1 Yum Yum cup 4.0 + 2 Indomie cup 3.5 + 3 Indomie pack 15.0 + 4 Indomie pack 5.0 + + By default, for each set of duplicated values, the first occurrence + is set on False and all others on True. + + >>> df.duplicated() + 0 False + 1 True + 2 False + 3 False + 4 False + dtype: bool + + By using 'last', the last occurrence of each set of duplicated values + is set on False and all others on True. + + >>> df.duplicated(keep='last') + 0 True + 1 False + 2 False + 3 False + 4 False + dtype: bool + + By setting ``keep`` on False, all duplicates are True. + + >>> df.duplicated(keep=False) + 0 True + 1 True + 2 False + 3 False + 4 False + dtype: bool + + To find duplicates on specific column(s), use ``subset``. + + >>> df.duplicated(subset=['brand']) + 0 False + 1 True + 2 False + 3 True + 4 True + dtype: bool + """ + + if self.empty: + return self._constructor_sliced(dtype=bool) + + def f(vals) -> tuple[np.ndarray, int]: + labels, shape = algorithms.factorize(vals, size_hint=len(self)) + return labels.astype("i8", copy=False), len(shape) + + if subset is None: + # https://github.com/pandas-dev/pandas/issues/28770 + # Incompatible types in assignment (expression has type "Index", variable + # has type "Sequence[Any]") + subset = self.columns # type: ignore[assignment] + elif ( + not np.iterable(subset) + or isinstance(subset, str) + or isinstance(subset, tuple) + and subset in self.columns + ): + subset = (subset,) + + # needed for mypy since can't narrow types using np.iterable + subset = cast(Sequence, subset) + + # Verify all columns in subset exist in the queried dataframe + # Otherwise, raise a KeyError, same as if you try to __getitem__ with a + # key that doesn't exist. + diff = set(subset) - set(self.columns) + if diff: + raise KeyError(Index(diff)) + + if len(subset) == 1 and self.columns.is_unique: + # GH#45236 This is faster than get_group_index below + result = self[subset[0]].duplicated(keep) + result.name = None + else: + vals = (col.values for name, col in self.items() if name in subset) + labels, shape = map(list, zip(*map(f, vals))) + + ids = get_group_index( + labels, + # error: Argument 1 to "tuple" has incompatible type "List[_T]"; + # expected "Iterable[int]" + tuple(shape), # type: ignore[arg-type] + sort=False, + xnull=False, + ) + result = self._constructor_sliced(duplicated(ids, keep), index=self.index) + return result.__finalize__(self, method="duplicated") + + # ---------------------------------------------------------------------- + # Sorting + # error: Signature of "sort_values" incompatible with supertype "NDFrame" + @overload # type: ignore[override] + def sort_values( + self, + by: IndexLabel, + *, + axis: Axis = ..., + ascending=..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> DataFrame: + ... + + @overload + def sort_values( + self, + by: IndexLabel, + *, + axis: Axis = ..., + ascending=..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: str = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> None: + ... + + def sort_values( + self, + by: IndexLabel, + *, + axis: Axis = 0, + ascending: bool | list[bool] | tuple[bool, ...] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: str = "last", + ignore_index: bool = False, + key: ValueKeyFunc | None = None, + ) -> DataFrame | None: + """ + Sort by the values along either axis. + + Parameters + ---------- + by : str or list of str + Name or list of names to sort by. + + - if `axis` is 0 or `'index'` then `by` may contain index + levels and/or column labels. + - if `axis` is 1 or `'columns'` then `by` may contain column + levels and/or index labels. + axis : "{0 or 'index', 1 or 'columns'}", default 0 + Axis to be sorted. + ascending : bool or list of bool, default True + Sort ascending vs. descending. Specify list for multiple sort + orders. If this is a list of bools, must match the length of + the by. + inplace : bool, default False + If True, perform operation in-place. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. `mergesort` and `stable` are the only stable algorithms. For + DataFrames, this option is only applied when sorting on a single + column or label. + na_position : {'first', 'last'}, default 'last' + Puts NaNs at the beginning if `first`; `last` puts NaNs at the + end. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + Apply the key function to the values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect a + ``Series`` and return a Series with the same shape as the input. + It will be applied to each column in `by` independently. + + Returns + ------- + DataFrame or None + DataFrame with sorted values or None if ``inplace=True``. + + See Also + -------- + DataFrame.sort_index : Sort a DataFrame by the index. + Series.sort_values : Similar method for a Series. + + Examples + -------- + >>> df = pd.DataFrame({ + ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], + ... 'col2': [2, 1, 9, 8, 7, 4], + ... 'col3': [0, 1, 9, 4, 2, 3], + ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] + ... }) + >>> df + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + + Sort by col1 + + >>> df.sort_values(by=['col1']) + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 5 C 4 3 F + 4 D 7 2 e + 3 NaN 8 4 D + + Sort by multiple columns + + >>> df.sort_values(by=['col1', 'col2']) + col1 col2 col3 col4 + 1 A 1 1 B + 0 A 2 0 a + 2 B 9 9 c + 5 C 4 3 F + 4 D 7 2 e + 3 NaN 8 4 D + + Sort Descending + + >>> df.sort_values(by='col1', ascending=False) + col1 col2 col3 col4 + 4 D 7 2 e + 5 C 4 3 F + 2 B 9 9 c + 0 A 2 0 a + 1 A 1 1 B + 3 NaN 8 4 D + + Putting NAs first + + >>> df.sort_values(by='col1', ascending=False, na_position='first') + col1 col2 col3 col4 + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + 2 B 9 9 c + 0 A 2 0 a + 1 A 1 1 B + + Sorting with a key function + + >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + + Natural sort with the key argument, + using the `natsort ` package. + + >>> df = pd.DataFrame({ + ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], + ... "value": [10, 20, 30, 40, 50] + ... }) + >>> df + time value + 0 0hr 10 + 1 128hr 20 + 2 72hr 30 + 3 48hr 40 + 4 96hr 50 + >>> from natsort import index_natsorted + >>> df.sort_values( + ... by="time", + ... key=lambda x: np.argsort(index_natsorted(df["time"])) + ... ) + time value + 0 0hr 10 + 3 48hr 40 + 2 72hr 30 + 4 96hr 50 + 1 128hr 20 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + axis = self._get_axis_number(axis) + ascending = validate_ascending(ascending) + if not isinstance(by, list): + by = [by] + # error: Argument 1 to "len" has incompatible type "Union[bool, List[bool]]"; + # expected "Sized" + if is_sequence(ascending) and ( + len(by) != len(ascending) # type: ignore[arg-type] + ): + # error: Argument 1 to "len" has incompatible type "Union[bool, + # List[bool]]"; expected "Sized" + raise ValueError( + f"Length of ascending ({len(ascending)})" # type: ignore[arg-type] + f" != length of by ({len(by)})" + ) + if len(by) > 1: + keys = [self._get_label_or_level_values(x, axis=axis) for x in by] + + # need to rewrap columns in Series to apply key function + if key is not None: + # error: List comprehension has incompatible type List[Series]; + # expected List[ndarray] + keys = [ + Series(k, name=name) # type: ignore[misc] + for (k, name) in zip(keys, by) + ] + + indexer = lexsort_indexer( + keys, orders=ascending, na_position=na_position, key=key + ) + elif len(by): + # len(by) == 1 + + k = self._get_label_or_level_values(by[0], axis=axis) + + # need to rewrap column in Series to apply key function + if key is not None: + # error: Incompatible types in assignment (expression has type + # "Series", variable has type "ndarray") + k = Series(k, name=by[0]) # type: ignore[assignment] + + if isinstance(ascending, (tuple, list)): + ascending = ascending[0] + + indexer = nargsort( + k, kind=kind, ascending=ascending, na_position=na_position, key=key + ) + else: + if inplace: + return self._update_inplace(self) + else: + return self.copy(deep=None) + + if is_range_indexer(indexer, len(indexer)): + result = self.copy(deep=(not inplace and not using_copy_on_write())) + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + return self._update_inplace(result) + else: + return result + + new_data = self._mgr.take( + indexer, axis=self._get_block_manager_axis(axis), verify=False + ) + + if ignore_index: + new_data.set_axis( + self._get_block_manager_axis(axis), default_index(len(indexer)) + ) + + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="sort_values") + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> DataFrame: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: bool = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> DataFrame | None: + ... + + def sort_index( + self, + *, + axis: Axis = 0, + level: IndexLabel | None = None, + ascending: bool | Sequence[bool] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + sort_remaining: bool = True, + ignore_index: bool = False, + key: IndexKeyFunc | None = None, + ) -> DataFrame | None: + """ + Sort object by labels (along an axis). + + Returns a new DataFrame sorted by label if `inplace` argument is + ``False``, otherwise updates the original DataFrame and returns None. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis along which to sort. The value 0 identifies the rows, + and 1 identifies the columns. + level : int or level name or list of ints or list of level names + If not None, sort on values in specified index level(s). + ascending : bool or list-like of bools, default True + Sort ascending vs. descending. When the index is a MultiIndex the + sort direction can be controlled for each level individually. + inplace : bool, default False + Whether to modify the DataFrame rather than creating a new one. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. `mergesort` and `stable` are the only stable algorithms. For + DataFrames, this option is only applied when sorting on a single + column or label. + na_position : {'first', 'last'}, default 'last' + Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. + Not implemented for MultiIndex. + sort_remaining : bool, default True + If True and sorting by level and index is multilevel, sort by other + levels too (in order) after sorting by specified level. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + If not None, apply the key function to the index values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect an + ``Index`` and return an ``Index`` of the same shape. For MultiIndex + inputs, the key is applied *per level*. + + Returns + ------- + DataFrame or None + The original DataFrame sorted by the labels or None if ``inplace=True``. + + See Also + -------- + Series.sort_index : Sort Series by the index. + DataFrame.sort_values : Sort DataFrame by the value. + Series.sort_values : Sort Series by the value. + + Examples + -------- + >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150], + ... columns=['A']) + >>> df.sort_index() + A + 1 4 + 29 2 + 100 1 + 150 5 + 234 3 + + By default, it sorts in ascending order, to sort in descending order, + use ``ascending=False`` + + >>> df.sort_index(ascending=False) + A + 234 3 + 150 5 + 100 1 + 29 2 + 1 4 + + A key function can be specified which is applied to the index before + sorting. For a ``MultiIndex`` this is applied to each level separately. + + >>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd']) + >>> df.sort_index(key=lambda x: x.str.lower()) + a + A 1 + b 2 + C 3 + d 4 + """ + return super().sort_index( + axis=axis, + level=level, + ascending=ascending, + inplace=inplace, + kind=kind, + na_position=na_position, + sort_remaining=sort_remaining, + ignore_index=ignore_index, + key=key, + ) + + def value_counts( + self, + subset: IndexLabel | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> Series: + """ + Return a Series containing the frequency of each distinct row in the Dataframe. + + Parameters + ---------- + subset : label or list of labels, optional + Columns to use when counting unique combinations. + normalize : bool, default False + Return proportions rather than frequencies. + sort : bool, default True + Sort by frequencies when True. Sort by DataFrame column values when False. + ascending : bool, default False + Sort in ascending order. + dropna : bool, default True + Don't include counts of rows that contain NA values. + + .. versionadded:: 1.3.0 + + Returns + ------- + Series + + See Also + -------- + Series.value_counts: Equivalent method on Series. + + Notes + ----- + The returned Series will have a MultiIndex with one level per input + column but an Index (non-multi) for a single label. By default, rows + that contain any NA values are omitted from the result. By default, + the resulting Series will be in descending order so that the first + element is the most frequently-occurring row. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6], + ... 'num_wings': [2, 0, 0, 0]}, + ... index=['falcon', 'dog', 'cat', 'ant']) + >>> df + num_legs num_wings + falcon 2 2 + dog 4 0 + cat 4 0 + ant 6 0 + + >>> df.value_counts() + num_legs num_wings + 4 0 2 + 2 2 1 + 6 0 1 + Name: count, dtype: int64 + + >>> df.value_counts(sort=False) + num_legs num_wings + 2 2 1 + 4 0 2 + 6 0 1 + Name: count, dtype: int64 + + >>> df.value_counts(ascending=True) + num_legs num_wings + 2 2 1 + 6 0 1 + 4 0 2 + Name: count, dtype: int64 + + >>> df.value_counts(normalize=True) + num_legs num_wings + 4 0 0.50 + 2 2 0.25 + 6 0 0.25 + Name: proportion, dtype: float64 + + With `dropna` set to `False` we can also count rows with NA values. + + >>> df = pd.DataFrame({'first_name': ['John', 'Anne', 'John', 'Beth'], + ... 'middle_name': ['Smith', pd.NA, pd.NA, 'Louise']}) + >>> df + first_name middle_name + 0 John Smith + 1 Anne + 2 John + 3 Beth Louise + + >>> df.value_counts() + first_name middle_name + Beth Louise 1 + John Smith 1 + Name: count, dtype: int64 + + >>> df.value_counts(dropna=False) + first_name middle_name + Anne NaN 1 + Beth Louise 1 + John Smith 1 + NaN 1 + Name: count, dtype: int64 + + >>> df.value_counts("first_name") + first_name + John 2 + Anne 1 + Beth 1 + Name: count, dtype: int64 + """ + if subset is None: + subset = self.columns.tolist() + + name = "proportion" if normalize else "count" + counts = self.groupby(subset, dropna=dropna, observed=False).grouper.size() + counts.name = name + + if sort: + counts = counts.sort_values(ascending=ascending) + if normalize: + counts /= counts.sum() + + # Force MultiIndex for a list_like subset with a single column + if is_list_like(subset) and len(subset) == 1: # type: ignore[arg-type] + counts.index = MultiIndex.from_arrays( + [counts.index], names=[counts.index.name] + ) + + return counts + + def nlargest( + self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep = "first" + ) -> DataFrame: + """ + Return the first `n` rows ordered by `columns` in descending order. + + Return the first `n` rows with the largest values in `columns`, in + descending order. The columns that are not specified are returned as + well, but not used for ordering. + + This method is equivalent to + ``df.sort_values(columns, ascending=False).head(n)``, but more + performant. + + Parameters + ---------- + n : int + Number of rows to return. + columns : label or list of labels + Column label(s) to order by. + keep : {'first', 'last', 'all'}, default 'first' + Where there are duplicate values: + + - ``first`` : prioritize the first occurrence(s) + - ``last`` : prioritize the last occurrence(s) + - ``all`` : do not drop any duplicates, even it means + selecting more than `n` items. + + Returns + ------- + DataFrame + The first `n` rows ordered by the given columns in descending + order. + + See Also + -------- + DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in + ascending order. + DataFrame.sort_values : Sort DataFrame by the values. + DataFrame.head : Return the first `n` rows without re-ordering. + + Notes + ----- + This function cannot be used with all column types. For example, when + specifying columns with `object` or `category` dtypes, ``TypeError`` is + raised. + + Examples + -------- + >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, + ... 434000, 434000, 337000, 11300, + ... 11300, 11300], + ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, + ... 17036, 182, 38, 311], + ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", + ... "IS", "NR", "TV", "AI"]}, + ... index=["Italy", "France", "Malta", + ... "Maldives", "Brunei", "Iceland", + ... "Nauru", "Tuvalu", "Anguilla"]) + >>> df + population GDP alpha-2 + Italy 59000000 1937894 IT + France 65000000 2583560 FR + Malta 434000 12011 MT + Maldives 434000 4520 MV + Brunei 434000 12128 BN + Iceland 337000 17036 IS + Nauru 11300 182 NR + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + + In the following example, we will use ``nlargest`` to select the three + rows having the largest values in column "population". + + >>> df.nlargest(3, 'population') + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Malta 434000 12011 MT + + When using ``keep='last'``, ties are resolved in reverse order: + + >>> df.nlargest(3, 'population', keep='last') + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Brunei 434000 12128 BN + + When using ``keep='all'``, all duplicate items are maintained: + + >>> df.nlargest(3, 'population', keep='all') + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Malta 434000 12011 MT + Maldives 434000 4520 MV + Brunei 434000 12128 BN + + To order by the largest values in column "population" and then "GDP", + we can specify multiple columns like in the next example. + + >>> df.nlargest(3, ['population', 'GDP']) + population GDP alpha-2 + France 65000000 2583560 FR + Italy 59000000 1937894 IT + Brunei 434000 12128 BN + """ + return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() + + def nsmallest( + self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep = "first" + ) -> DataFrame: + """ + Return the first `n` rows ordered by `columns` in ascending order. + + Return the first `n` rows with the smallest values in `columns`, in + ascending order. The columns that are not specified are returned as + well, but not used for ordering. + + This method is equivalent to + ``df.sort_values(columns, ascending=True).head(n)``, but more + performant. + + Parameters + ---------- + n : int + Number of items to retrieve. + columns : list or str + Column name or names to order by. + keep : {'first', 'last', 'all'}, default 'first' + Where there are duplicate values: + + - ``first`` : take the first occurrence. + - ``last`` : take the last occurrence. + - ``all`` : do not drop any duplicates, even it means + selecting more than `n` items. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.nlargest : Return the first `n` rows ordered by `columns` in + descending order. + DataFrame.sort_values : Sort DataFrame by the values. + DataFrame.head : Return the first `n` rows without re-ordering. + + Examples + -------- + >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, + ... 434000, 434000, 337000, 337000, + ... 11300, 11300], + ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, + ... 17036, 182, 38, 311], + ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", + ... "IS", "NR", "TV", "AI"]}, + ... index=["Italy", "France", "Malta", + ... "Maldives", "Brunei", "Iceland", + ... "Nauru", "Tuvalu", "Anguilla"]) + >>> df + population GDP alpha-2 + Italy 59000000 1937894 IT + France 65000000 2583560 FR + Malta 434000 12011 MT + Maldives 434000 4520 MV + Brunei 434000 12128 BN + Iceland 337000 17036 IS + Nauru 337000 182 NR + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + + In the following example, we will use ``nsmallest`` to select the + three rows having the smallest values in column "population". + + >>> df.nsmallest(3, 'population') + population GDP alpha-2 + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + Iceland 337000 17036 IS + + When using ``keep='last'``, ties are resolved in reverse order: + + >>> df.nsmallest(3, 'population', keep='last') + population GDP alpha-2 + Anguilla 11300 311 AI + Tuvalu 11300 38 TV + Nauru 337000 182 NR + + When using ``keep='all'``, all duplicate items are maintained: + + >>> df.nsmallest(3, 'population', keep='all') + population GDP alpha-2 + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + Iceland 337000 17036 IS + Nauru 337000 182 NR + + To order by the smallest values in column "population" and then "GDP", we can + specify multiple columns like in the next example. + + >>> df.nsmallest(3, ['population', 'GDP']) + population GDP alpha-2 + Tuvalu 11300 38 TV + Anguilla 11300 311 AI + Nauru 337000 182 NR + """ + return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest() + + @doc( + Series.swaplevel, + klass=_shared_doc_kwargs["klass"], + extra_params=dedent( + """axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to swap levels on. 0 or 'index' for row-wise, 1 or + 'columns' for column-wise.""" + ), + examples=dedent( + """\ + Examples + -------- + >>> df = pd.DataFrame( + ... {"Grade": ["A", "B", "A", "C"]}, + ... index=[ + ... ["Final exam", "Final exam", "Coursework", "Coursework"], + ... ["History", "Geography", "History", "Geography"], + ... ["January", "February", "March", "April"], + ... ], + ... ) + >>> df + Grade + Final exam History January A + Geography February B + Coursework History March A + Geography April C + + In the following example, we will swap the levels of the indices. + Here, we will swap the levels column-wise, but levels can be swapped row-wise + in a similar manner. Note that column-wise is the default behaviour. + By not supplying any arguments for i and j, we swap the last and second to + last indices. + + >>> df.swaplevel() + Grade + Final exam January History A + February Geography B + Coursework March History A + April Geography C + + By supplying one argument, we can choose which index to swap the last + index with. We can for example swap the first index with the last one as + follows. + + >>> df.swaplevel(0) + Grade + January History Final exam A + February Geography Final exam B + March History Coursework A + April Geography Coursework C + + We can also define explicitly which indices we want to swap by supplying values + for both i and j. Here, we for example swap the first and second indices. + + >>> df.swaplevel(0, 1) + Grade + History Final exam January A + Geography Final exam February B + History Coursework March A + Geography Coursework April C""" + ), + ) + def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame: + result = self.copy(deep=None) + + axis = self._get_axis_number(axis) + + if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover + raise TypeError("Can only swap levels on a hierarchical axis.") + + if axis == 0: + assert isinstance(result.index, MultiIndex) + result.index = result.index.swaplevel(i, j) + else: + assert isinstance(result.columns, MultiIndex) + result.columns = result.columns.swaplevel(i, j) + return result + + def reorder_levels(self, order: Sequence[int | str], axis: Axis = 0) -> DataFrame: + """ + Rearrange index levels using input order. May not drop or duplicate levels. + + Parameters + ---------- + order : list of int or list of str + List representing new level order. Reference level by number + (position) or by key (label). + axis : {0 or 'index', 1 or 'columns'}, default 0 + Where to reorder levels. + + Returns + ------- + DataFrame + + Examples + -------- + >>> data = { + ... "class": ["Mammals", "Mammals", "Reptiles"], + ... "diet": ["Omnivore", "Carnivore", "Carnivore"], + ... "species": ["Humans", "Dogs", "Snakes"], + ... } + >>> df = pd.DataFrame(data, columns=["class", "diet", "species"]) + >>> df = df.set_index(["class", "diet"]) + >>> df + species + class diet + Mammals Omnivore Humans + Carnivore Dogs + Reptiles Carnivore Snakes + + Let's reorder the levels of the index: + + >>> df.reorder_levels(["diet", "class"]) + species + diet class + Omnivore Mammals Humans + Carnivore Mammals Dogs + Reptiles Snakes + """ + axis = self._get_axis_number(axis) + if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover + raise TypeError("Can only reorder levels on a hierarchical axis.") + + result = self.copy(deep=None) + + if axis == 0: + assert isinstance(result.index, MultiIndex) + result.index = result.index.reorder_levels(order) + else: + assert isinstance(result.columns, MultiIndex) + result.columns = result.columns.reorder_levels(order) + return result + + # ---------------------------------------------------------------------- + # Arithmetic Methods + + def _cmp_method(self, other, op): + axis: Literal[1] = 1 # only relevant for Series other case + + self, other = self._align_for_op(other, axis, flex=False, level=None) + + # See GH#4537 for discussion of scalar op behavior + new_data = self._dispatch_frame_op(other, op, axis=axis) + return self._construct_result(new_data) + + def _arith_method(self, other, op): + if self._should_reindex_frame_op(other, op, 1, None, None): + return self._arith_method_with_reindex(other, op) + + axis: Literal[1] = 1 # only relevant for Series other case + other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],)) + + self, other = self._align_for_op(other, axis, flex=True, level=None) + + with np.errstate(all="ignore"): + new_data = self._dispatch_frame_op(other, op, axis=axis) + return self._construct_result(new_data) + + _logical_method = _arith_method + + def _dispatch_frame_op( + self, right, func: Callable, axis: AxisInt | None = None + ) -> DataFrame: + """ + Evaluate the frame operation func(left, right) by evaluating + column-by-column, dispatching to the Series implementation. + + Parameters + ---------- + right : scalar, Series, or DataFrame + func : arithmetic or comparison operator + axis : {None, 0, 1} + + Returns + ------- + DataFrame + + Notes + ----- + Caller is responsible for setting np.errstate where relevant. + """ + # Get the appropriate array-op to apply to each column/block's values. + array_op = ops.get_array_op(func) + + right = lib.item_from_zerodim(right) + if not is_list_like(right): + # i.e. scalar, faster than checking np.ndim(right) == 0 + bm = self._mgr.apply(array_op, right=right) + return self._constructor_from_mgr(bm, axes=bm.axes) + + elif isinstance(right, DataFrame): + assert self.index.equals(right.index) + assert self.columns.equals(right.columns) + # TODO: The previous assertion `assert right._indexed_same(self)` + # fails in cases with empty columns reached via + # _frame_arith_method_with_reindex + + # TODO operate_blockwise expects a manager of the same type + bm = self._mgr.operate_blockwise( + # error: Argument 1 to "operate_blockwise" of "ArrayManager" has + # incompatible type "Union[ArrayManager, BlockManager]"; expected + # "ArrayManager" + # error: Argument 1 to "operate_blockwise" of "BlockManager" has + # incompatible type "Union[ArrayManager, BlockManager]"; expected + # "BlockManager" + right._mgr, # type: ignore[arg-type] + array_op, + ) + return self._constructor_from_mgr(bm, axes=bm.axes) + + elif isinstance(right, Series) and axis == 1: + # axis=1 means we want to operate row-by-row + assert right.index.equals(self.columns) + + right = right._values + # maybe_align_as_frame ensures we do not have an ndarray here + assert not isinstance(right, np.ndarray) + + arrays = [ + array_op(_left, _right) + for _left, _right in zip(self._iter_column_arrays(), right) + ] + + elif isinstance(right, Series): + assert right.index.equals(self.index) + right = right._values + + arrays = [array_op(left, right) for left in self._iter_column_arrays()] + + else: + raise NotImplementedError(right) + + return type(self)._from_arrays( + arrays, self.columns, self.index, verify_integrity=False + ) + + def _combine_frame(self, other: DataFrame, func, fill_value=None): + # at this point we have `self._indexed_same(other)` + + if fill_value is None: + # since _arith_op may be called in a loop, avoid function call + # overhead if possible by doing this check once + _arith_op = func + + else: + + def _arith_op(left, right): + # for the mixed_type case where we iterate over columns, + # _arith_op(left, right) is equivalent to + # left._binop(right, func, fill_value=fill_value) + left, right = ops.fill_binop(left, right, fill_value) + return func(left, right) + + new_data = self._dispatch_frame_op(other, _arith_op) + return new_data + + def _arith_method_with_reindex(self, right: DataFrame, op) -> DataFrame: + """ + For DataFrame-with-DataFrame operations that require reindexing, + operate only on shared columns, then reindex. + + Parameters + ---------- + right : DataFrame + op : binary operator + + Returns + ------- + DataFrame + """ + left = self + + # GH#31623, only operate on shared columns + cols, lcols, rcols = left.columns.join( + right.columns, how="inner", level=None, return_indexers=True + ) + + new_left = left.iloc[:, lcols] + new_right = right.iloc[:, rcols] + result = op(new_left, new_right) + + # Do the join on the columns instead of using left._align_for_op + # to avoid constructing two potentially large/sparse DataFrames + join_columns, _, _ = left.columns.join( + right.columns, how="outer", level=None, return_indexers=True + ) + + if result.columns.has_duplicates: + # Avoid reindexing with a duplicate axis. + # https://github.com/pandas-dev/pandas/issues/35194 + indexer, _ = result.columns.get_indexer_non_unique(join_columns) + indexer = algorithms.unique1d(indexer) + result = result._reindex_with_indexers( + {1: [join_columns, indexer]}, allow_dups=True + ) + else: + result = result.reindex(join_columns, axis=1) + + return result + + def _should_reindex_frame_op(self, right, op, axis: int, fill_value, level) -> bool: + """ + Check if this is an operation between DataFrames that will need to reindex. + """ + if op is operator.pow or op is roperator.rpow: + # GH#32685 pow has special semantics for operating with null values + return False + + if not isinstance(right, DataFrame): + return False + + if fill_value is None and level is None and axis == 1: + # TODO: any other cases we should handle here? + + # Intersection is always unique so we have to check the unique columns + left_uniques = self.columns.unique() + right_uniques = right.columns.unique() + cols = left_uniques.intersection(right_uniques) + if len(cols) and not ( + len(cols) == len(left_uniques) and len(cols) == len(right_uniques) + ): + # TODO: is there a shortcut available when len(cols) == 0? + return True + + return False + + def _align_for_op( + self, + other, + axis: AxisInt, + flex: bool | None = False, + level: Level | None = None, + ): + """ + Convert rhs to meet lhs dims if input is list, tuple or np.ndarray. + + Parameters + ---------- + left : DataFrame + right : Any + axis : int + flex : bool or None, default False + Whether this is a flex op, in which case we reindex. + None indicates not to check for alignment. + level : int or level name, default None + + Returns + ------- + left : DataFrame + right : Any + """ + left, right = self, other + + def to_series(right): + msg = ( + "Unable to coerce to Series, " + "length must be {req_len}: given {given_len}" + ) + + # pass dtype to avoid doing inference, which would break consistency + # with Index/Series ops + dtype = None + if getattr(right, "dtype", None) == object: + # can't pass right.dtype unconditionally as that would break on e.g. + # datetime64[h] ndarray + dtype = object + + if axis == 0: + if len(left.index) != len(right): + raise ValueError( + msg.format(req_len=len(left.index), given_len=len(right)) + ) + right = left._constructor_sliced(right, index=left.index, dtype=dtype) + else: + if len(left.columns) != len(right): + raise ValueError( + msg.format(req_len=len(left.columns), given_len=len(right)) + ) + right = left._constructor_sliced(right, index=left.columns, dtype=dtype) + return right + + if isinstance(right, np.ndarray): + if right.ndim == 1: + right = to_series(right) + + elif right.ndim == 2: + # We need to pass dtype=right.dtype to retain object dtype + # otherwise we lose consistency with Index and array ops + dtype = None + if right.dtype == object: + # can't pass right.dtype unconditionally as that would break on e.g. + # datetime64[h] ndarray + dtype = object + + if right.shape == left.shape: + right = left._constructor( + right, index=left.index, columns=left.columns, dtype=dtype + ) + + elif right.shape[0] == left.shape[0] and right.shape[1] == 1: + # Broadcast across columns + right = np.broadcast_to(right, left.shape) + right = left._constructor( + right, index=left.index, columns=left.columns, dtype=dtype + ) + + elif right.shape[1] == left.shape[1] and right.shape[0] == 1: + # Broadcast along rows + right = to_series(right[0, :]) + + else: + raise ValueError( + "Unable to coerce to DataFrame, shape " + f"must be {left.shape}: given {right.shape}" + ) + + elif right.ndim > 2: + raise ValueError( + "Unable to coerce to Series/DataFrame, " + f"dimension must be <= 2: {right.shape}" + ) + + elif is_list_like(right) and not isinstance(right, (Series, DataFrame)): + # GH#36702. Raise when attempting arithmetic with list of array-like. + if any(is_array_like(el) for el in right): + raise ValueError( + f"Unable to coerce list of {type(right[0])} to Series/DataFrame" + ) + # GH#17901 + right = to_series(right) + + if flex is not None and isinstance(right, DataFrame): + if not left._indexed_same(right): + if flex: + left, right = left.align( + right, join="outer", level=level, copy=False + ) + else: + raise ValueError( + "Can only compare identically-labeled (both index and columns) " + "DataFrame objects" + ) + elif isinstance(right, Series): + # axis=1 is default for DataFrame-with-Series op + axis = axis if axis is not None else 1 + if not flex: + if not left.axes[axis].equals(right.index): + raise ValueError( + "Operands are not aligned. Do " + "`left, right = left.align(right, axis=1, copy=False)` " + "before operating." + ) + + left, right = left.align( + right, + join="outer", + axis=axis, + level=level, + copy=False, + ) + right = left._maybe_align_series_as_frame(right, axis) + + return left, right + + def _maybe_align_series_as_frame(self, series: Series, axis: AxisInt): + """ + If the Series operand is not EA-dtype, we can broadcast to 2D and operate + blockwise. + """ + rvalues = series._values + if not isinstance(rvalues, np.ndarray): + # TODO(EA2D): no need to special-case with 2D EAs + if rvalues.dtype in ("datetime64[ns]", "timedelta64[ns]"): + # We can losslessly+cheaply cast to ndarray + rvalues = np.asarray(rvalues) + else: + return series + + if axis == 0: + rvalues = rvalues.reshape(-1, 1) + else: + rvalues = rvalues.reshape(1, -1) + + rvalues = np.broadcast_to(rvalues, self.shape) + # pass dtype to avoid doing inference + return self._constructor( + rvalues, + index=self.index, + columns=self.columns, + dtype=rvalues.dtype, + ) + + def _flex_arith_method( + self, other, op, *, axis: Axis = "columns", level=None, fill_value=None + ): + axis = self._get_axis_number(axis) if axis is not None else 1 + + if self._should_reindex_frame_op(other, op, axis, fill_value, level): + return self._arith_method_with_reindex(other, op) + + if isinstance(other, Series) and fill_value is not None: + # TODO: We could allow this in cases where we end up going + # through the DataFrame path + raise NotImplementedError(f"fill_value {fill_value} not supported.") + + other = ops.maybe_prepare_scalar_for_op(other, self.shape) + self, other = self._align_for_op(other, axis, flex=True, level=level) + + with np.errstate(all="ignore"): + if isinstance(other, DataFrame): + # Another DataFrame + new_data = self._combine_frame(other, op, fill_value) + + elif isinstance(other, Series): + new_data = self._dispatch_frame_op(other, op, axis=axis) + else: + # in this case we always have `np.ndim(other) == 0` + if fill_value is not None: + self = self.fillna(fill_value) + + new_data = self._dispatch_frame_op(other, op) + + return self._construct_result(new_data) + + def _construct_result(self, result) -> DataFrame: + """ + Wrap the result of an arithmetic, comparison, or logical operation. + + Parameters + ---------- + result : DataFrame + + Returns + ------- + DataFrame + """ + out = self._constructor(result, copy=False).__finalize__(self) + # Pin columns instead of passing to constructor for compat with + # non-unique columns case + out.columns = self.columns + out.index = self.index + return out + + def __divmod__(self, other) -> tuple[DataFrame, DataFrame]: + # Naive implementation, room for optimization + div = self // other + mod = self - div * other + return div, mod + + def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]: + # Naive implementation, room for optimization + div = other // self + mod = other - div * self + return div, mod + + def _flex_cmp_method(self, other, op, *, axis: Axis = "columns", level=None): + axis = self._get_axis_number(axis) if axis is not None else 1 + + self, other = self._align_for_op(other, axis, flex=True, level=level) + + new_data = self._dispatch_frame_op(other, op, axis=axis) + return self._construct_result(new_data) + + @Appender(ops.make_flex_doc("eq", "dataframe")) + def eq(self, other, axis: Axis = "columns", level=None): + return self._flex_cmp_method(other, operator.eq, axis=axis, level=level) + + @Appender(ops.make_flex_doc("ne", "dataframe")) + def ne(self, other, axis: Axis = "columns", level=None): + return self._flex_cmp_method(other, operator.ne, axis=axis, level=level) + + @Appender(ops.make_flex_doc("le", "dataframe")) + def le(self, other, axis: Axis = "columns", level=None): + return self._flex_cmp_method(other, operator.le, axis=axis, level=level) + + @Appender(ops.make_flex_doc("lt", "dataframe")) + def lt(self, other, axis: Axis = "columns", level=None): + return self._flex_cmp_method(other, operator.lt, axis=axis, level=level) + + @Appender(ops.make_flex_doc("ge", "dataframe")) + def ge(self, other, axis: Axis = "columns", level=None): + return self._flex_cmp_method(other, operator.ge, axis=axis, level=level) + + @Appender(ops.make_flex_doc("gt", "dataframe")) + def gt(self, other, axis: Axis = "columns", level=None): + return self._flex_cmp_method(other, operator.gt, axis=axis, level=level) + + @Appender(ops.make_flex_doc("add", "dataframe")) + def add(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, operator.add, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("radd", "dataframe")) + def radd(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, roperator.radd, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("sub", "dataframe")) + def sub(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, operator.sub, level=level, fill_value=fill_value, axis=axis + ) + + subtract = sub + + @Appender(ops.make_flex_doc("rsub", "dataframe")) + def rsub(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, roperator.rsub, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mul", "dataframe")) + def mul(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, operator.mul, level=level, fill_value=fill_value, axis=axis + ) + + multiply = mul + + @Appender(ops.make_flex_doc("rmul", "dataframe")) + def rmul(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, roperator.rmul, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("truediv", "dataframe")) + def truediv(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, operator.truediv, level=level, fill_value=fill_value, axis=axis + ) + + div = truediv + divide = truediv + + @Appender(ops.make_flex_doc("rtruediv", "dataframe")) + def rtruediv(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, roperator.rtruediv, level=level, fill_value=fill_value, axis=axis + ) + + rdiv = rtruediv + + @Appender(ops.make_flex_doc("floordiv", "dataframe")) + def floordiv(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, operator.floordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rfloordiv", "dataframe")) + def rfloordiv(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, roperator.rfloordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mod", "dataframe")) + def mod(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, operator.mod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rmod", "dataframe")) + def rmod(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, roperator.rmod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("pow", "dataframe")) + def pow(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, operator.pow, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rpow", "dataframe")) + def rpow(self, other, axis: Axis = "columns", level=None, fill_value=None): + return self._flex_arith_method( + other, roperator.rpow, level=level, fill_value=fill_value, axis=axis + ) + + # ---------------------------------------------------------------------- + # Combination-Related + + @doc( + _shared_docs["compare"], + dedent( + """ + Returns + ------- + DataFrame + DataFrame that shows the differences stacked side by side. + + The resulting index will be a MultiIndex with 'self' and 'other' + stacked alternately at the inner level. + + Raises + ------ + ValueError + When the two DataFrames don't have identical labels or shape. + + See Also + -------- + Series.compare : Compare with another Series and show differences. + DataFrame.equals : Test whether two objects contain the same elements. + + Notes + ----- + Matching NaNs will not appear as a difference. + + Can only compare identically-labeled + (i.e. same shape, identical row and column labels) DataFrames + + Examples + -------- + >>> df = pd.DataFrame( + ... {{ + ... "col1": ["a", "a", "b", "b", "a"], + ... "col2": [1.0, 2.0, 3.0, np.nan, 5.0], + ... "col3": [1.0, 2.0, 3.0, 4.0, 5.0] + ... }}, + ... columns=["col1", "col2", "col3"], + ... ) + >>> df + col1 col2 col3 + 0 a 1.0 1.0 + 1 a 2.0 2.0 + 2 b 3.0 3.0 + 3 b NaN 4.0 + 4 a 5.0 5.0 + + >>> df2 = df.copy() + >>> df2.loc[0, 'col1'] = 'c' + >>> df2.loc[2, 'col3'] = 4.0 + >>> df2 + col1 col2 col3 + 0 c 1.0 1.0 + 1 a 2.0 2.0 + 2 b 3.0 4.0 + 3 b NaN 4.0 + 4 a 5.0 5.0 + + Align the differences on columns + + >>> df.compare(df2) + col1 col3 + self other self other + 0 a c NaN NaN + 2 NaN NaN 3.0 4.0 + + Assign result_names + + >>> df.compare(df2, result_names=("left", "right")) + col1 col3 + left right left right + 0 a c NaN NaN + 2 NaN NaN 3.0 4.0 + + Stack the differences on rows + + >>> df.compare(df2, align_axis=0) + col1 col3 + 0 self a NaN + other c NaN + 2 self NaN 3.0 + other NaN 4.0 + + Keep the equal values + + >>> df.compare(df2, keep_equal=True) + col1 col3 + self other self other + 0 a c 1.0 1.0 + 2 b b 3.0 4.0 + + Keep all original rows and columns + + >>> df.compare(df2, keep_shape=True) + col1 col2 col3 + self other self other self other + 0 a c NaN NaN NaN NaN + 1 NaN NaN NaN NaN NaN NaN + 2 NaN NaN NaN NaN 3.0 4.0 + 3 NaN NaN NaN NaN NaN NaN + 4 NaN NaN NaN NaN NaN NaN + + Keep all original rows and columns and also all original values + + >>> df.compare(df2, keep_shape=True, keep_equal=True) + col1 col2 col3 + self other self other self other + 0 a c 1.0 1.0 1.0 1.0 + 1 a a 2.0 2.0 2.0 2.0 + 2 b b 3.0 3.0 3.0 4.0 + 3 b b NaN NaN 4.0 4.0 + 4 a a 5.0 5.0 5.0 5.0 + """ + ), + klass=_shared_doc_kwargs["klass"], + ) + def compare( + self, + other: DataFrame, + align_axis: Axis = 1, + keep_shape: bool = False, + keep_equal: bool = False, + result_names: Suffixes = ("self", "other"), + ) -> DataFrame: + return super().compare( + other=other, + align_axis=align_axis, + keep_shape=keep_shape, + keep_equal=keep_equal, + result_names=result_names, + ) + + def combine( + self, + other: DataFrame, + func: Callable[[Series, Series], Series | Hashable], + fill_value=None, + overwrite: bool = True, + ) -> DataFrame: + """ + Perform column-wise combine with another DataFrame. + + Combines a DataFrame with `other` DataFrame using `func` + to element-wise combine columns. The row and column indexes of the + resulting DataFrame will be the union of the two. + + Parameters + ---------- + other : DataFrame + The DataFrame to merge column-wise. + func : function + Function that takes two series as inputs and return a Series or a + scalar. Used to merge the two dataframes column by columns. + fill_value : scalar value, default None + The value to fill NaNs with prior to passing any column to the + merge func. + overwrite : bool, default True + If True, columns in `self` that do not exist in `other` will be + overwritten with NaNs. + + Returns + ------- + DataFrame + Combination of the provided DataFrames. + + See Also + -------- + DataFrame.combine_first : Combine two DataFrame objects and default to + non-null values in frame calling the method. + + Examples + -------- + Combine using a simple function that chooses the smaller column. + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 + >>> df1.combine(df2, take_smaller) + A B + 0 0 3 + 1 0 3 + + Example using a true element-wise combine function. + + >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine(df2, np.minimum) + A B + 0 1 2 + 1 0 3 + + Using `fill_value` fills Nones prior to passing the column to the + merge function. + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine(df2, take_smaller, fill_value=-5) + A B + 0 0 -5.0 + 1 0 4.0 + + However, if the same element in both dataframes is None, that None + is preserved + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) + >>> df1.combine(df2, take_smaller, fill_value=-5) + A B + 0 0 -5.0 + 1 0 3.0 + + Example that demonstrates the use of `overwrite` and behavior when + the axis differ between the dataframes. + + >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) + >>> df1.combine(df2, take_smaller) + A B C + 0 NaN NaN NaN + 1 NaN 3.0 -10.0 + 2 NaN 3.0 1.0 + + >>> df1.combine(df2, take_smaller, overwrite=False) + A B C + 0 0.0 NaN NaN + 1 0.0 3.0 -10.0 + 2 NaN 3.0 1.0 + + Demonstrating the preference of the passed in dataframe. + + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) + >>> df2.combine(df1, take_smaller) + A B C + 0 0.0 NaN NaN + 1 0.0 3.0 NaN + 2 NaN 3.0 NaN + + >>> df2.combine(df1, take_smaller, overwrite=False) + A B C + 0 0.0 NaN NaN + 1 0.0 3.0 1.0 + 2 NaN 3.0 1.0 + """ + other_idxlen = len(other.index) # save for compare + + this, other = self.align(other, copy=False) + new_index = this.index + + if other.empty and len(new_index) == len(self.index): + return self.copy() + + if self.empty and len(other) == other_idxlen: + return other.copy() + + # sorts if possible; otherwise align above ensures that these are set-equal + new_columns = this.columns.union(other.columns) + do_fill = fill_value is not None + result = {} + for col in new_columns: + series = this[col] + other_series = other[col] + + this_dtype = series.dtype + other_dtype = other_series.dtype + + this_mask = isna(series) + other_mask = isna(other_series) + + # don't overwrite columns unnecessarily + # DO propagate if this column is not in the intersection + if not overwrite and other_mask.all(): + result[col] = this[col].copy() + continue + + if do_fill: + series = series.copy() + other_series = other_series.copy() + series[this_mask] = fill_value + other_series[other_mask] = fill_value + + if col not in self.columns: + # If self DataFrame does not have col in other DataFrame, + # try to promote series, which is all NaN, as other_dtype. + new_dtype = other_dtype + try: + series = series.astype(new_dtype, copy=False) + except ValueError: + # e.g. new_dtype is integer types + pass + else: + # if we have different dtypes, possibly promote + new_dtype = find_common_type([this_dtype, other_dtype]) + series = series.astype(new_dtype, copy=False) + other_series = other_series.astype(new_dtype, copy=False) + + arr = func(series, other_series) + if isinstance(new_dtype, np.dtype): + # if new_dtype is an EA Dtype, then `func` is expected to return + # the correct dtype without any additional casting + # error: No overload variant of "maybe_downcast_to_dtype" matches + # argument types "Union[Series, Hashable]", "dtype[Any]" + arr = maybe_downcast_to_dtype( # type: ignore[call-overload] + arr, new_dtype + ) + + result[col] = arr + + # convert_objects just in case + frame_result = self._constructor(result, index=new_index, columns=new_columns) + return frame_result.__finalize__(self, method="combine") + + def combine_first(self, other: DataFrame) -> DataFrame: + """ + Update null elements with value in the same location in `other`. + + Combine two DataFrame objects by filling null values in one DataFrame + with non-null values from other DataFrame. The row and column indexes + of the resulting DataFrame will be the union of the two. The resulting + dataframe contains the 'first' dataframe values and overrides the + second one values where both first.loc[index, col] and + second.loc[index, col] are not missing values, upon calling + first.combine_first(second). + + Parameters + ---------- + other : DataFrame + Provided DataFrame to use to fill null values. + + Returns + ------- + DataFrame + The result of combining the provided DataFrame with the other object. + + See Also + -------- + DataFrame.combine : Perform series-wise operation on two DataFrames + using a given function. + + Examples + -------- + >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) + >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine_first(df2) + A B + 0 1.0 3.0 + 1 0.0 4.0 + + Null values still persist if the location of that null value + does not exist in `other` + + >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) + >>> df1.combine_first(df2) + A B C + 0 NaN 4.0 NaN + 1 0.0 3.0 1.0 + 2 NaN 3.0 1.0 + """ + from pandas.core.computation import expressions + + def combiner(x, y): + mask = extract_array(isna(x)) + + x_values = extract_array(x, extract_numpy=True) + y_values = extract_array(y, extract_numpy=True) + + # If the column y in other DataFrame is not in first DataFrame, + # just return y_values. + if y.name not in self.columns: + return y_values + + return expressions.where(mask, y_values, x_values) + + if len(other) == 0: + combined = self.reindex( + self.columns.append(other.columns.difference(self.columns)), axis=1 + ) + combined = combined.astype(other.dtypes) + else: + combined = self.combine(other, combiner, overwrite=False) + + dtypes = { + col: find_common_type([self.dtypes[col], other.dtypes[col]]) + for col in self.columns.intersection(other.columns) + if combined.dtypes[col] != self.dtypes[col] + } + + if dtypes: + combined = combined.astype(dtypes) + + return combined.__finalize__(self, method="combine_first") + + def update( + self, + other, + join: UpdateJoin = "left", + overwrite: bool = True, + filter_func=None, + errors: IgnoreRaise = "ignore", + ) -> None: + """ + Modify in place using non-NA values from another DataFrame. + + Aligns on indices. There is no return value. + + Parameters + ---------- + other : DataFrame, or object coercible into a DataFrame + Should have at least one matching index/column label + with the original DataFrame. If a Series is passed, + its name attribute must be set, and that will be + used as the column name to align with the original DataFrame. + join : {'left'}, default 'left' + Only left join is implemented, keeping the index and columns of the + original object. + overwrite : bool, default True + How to handle non-NA values for overlapping keys: + + * True: overwrite original DataFrame's values + with values from `other`. + * False: only update values that are NA in + the original DataFrame. + + filter_func : callable(1d-array) -> bool 1d-array, optional + Can choose to replace values other than NA. Return True for values + that should be updated. + errors : {'raise', 'ignore'}, default 'ignore' + If 'raise', will raise a ValueError if the DataFrame and `other` + both contain non-NA data in the same place. + + Returns + ------- + None + This method directly changes calling object. + + Raises + ------ + ValueError + * When `errors='raise'` and there's overlapping non-NA data. + * When `errors` is not either `'ignore'` or `'raise'` + NotImplementedError + * If `join != 'left'` + + See Also + -------- + dict.update : Similar method for dictionaries. + DataFrame.merge : For column(s)-on-column(s) operations. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2, 3], + ... 'B': [400, 500, 600]}) + >>> new_df = pd.DataFrame({'B': [4, 5, 6], + ... 'C': [7, 8, 9]}) + >>> df.update(new_df) + >>> df + A B + 0 1 4 + 1 2 5 + 2 3 6 + + The DataFrame's length does not increase as a result of the update, + only values at matching index/column labels are updated. + + >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], + ... 'B': ['x', 'y', 'z']}) + >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) + >>> df.update(new_df) + >>> df + A B + 0 a d + 1 b e + 2 c f + + For Series, its name attribute must be set. + + >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], + ... 'B': ['x', 'y', 'z']}) + >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) + >>> df.update(new_column) + >>> df + A B + 0 a d + 1 b y + 2 c e + >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], + ... 'B': ['x', 'y', 'z']}) + >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) + >>> df.update(new_df) + >>> df + A B + 0 a x + 1 b d + 2 c e + + If `other` contains NaNs the corresponding values are not updated + in the original dataframe. + + >>> df = pd.DataFrame({'A': [1, 2, 3], + ... 'B': [400, 500, 600]}) + >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) + >>> df.update(new_df) + >>> df + A B + 0 1 4 + 1 2 500 + 2 3 6 + """ + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + from pandas.core.computation import expressions + + # TODO: Support other joins + if join != "left": # pragma: no cover + raise NotImplementedError("Only left join is supported") + if errors not in ["ignore", "raise"]: + raise ValueError("The parameter errors must be either 'ignore' or 'raise'") + + if not isinstance(other, DataFrame): + other = DataFrame(other) + + other = other.reindex(self.index) + + for col in self.columns.intersection(other.columns): + this = self[col]._values + that = other[col]._values + + if filter_func is not None: + mask = ~filter_func(this) | isna(that) + else: + if errors == "raise": + mask_this = notna(that) + mask_that = notna(this) + if any(mask_this & mask_that): + raise ValueError("Data overlaps.") + + if overwrite: + mask = isna(that) + else: + mask = notna(this) + + # don't overwrite columns unnecessarily + if mask.all(): + continue + + self.loc[:, col] = expressions.where(mask, this, that) + + # ---------------------------------------------------------------------- + # Data reshaping + @Appender( + dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', + ... 'Parrot', 'Parrot'], + ... 'Max Speed': [380., 370., 24., 26.]}) + >>> df + Animal Max Speed + 0 Falcon 380.0 + 1 Falcon 370.0 + 2 Parrot 24.0 + 3 Parrot 26.0 + >>> df.groupby(['Animal']).mean() + Max Speed + Animal + Falcon 375.0 + Parrot 25.0 + + **Hierarchical Indexes** + + We can groupby different levels of a hierarchical index + using the `level` parameter: + + >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], + ... ['Captive', 'Wild', 'Captive', 'Wild']] + >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) + >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, + ... index=index) + >>> df + Max Speed + Animal Type + Falcon Captive 390.0 + Wild 350.0 + Parrot Captive 30.0 + Wild 20.0 + >>> df.groupby(level=0).mean() + Max Speed + Animal + Falcon 370.0 + Parrot 25.0 + >>> df.groupby(level="Type").mean() + Max Speed + Type + Captive 210.0 + Wild 185.0 + + We can also choose to include NA in group keys or not by setting + `dropna` parameter, the default setting is `True`. + + >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] + >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) + + >>> df.groupby(by=["b"]).sum() + a c + b + 1.0 2 3 + 2.0 2 5 + + >>> df.groupby(by=["b"], dropna=False).sum() + a c + b + 1.0 2 3 + 2.0 2 5 + NaN 1 4 + + >>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]] + >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) + + >>> df.groupby(by="a").sum() + b c + a + a 13.0 13.0 + b 12.3 123.0 + + >>> df.groupby(by="a", dropna=False).sum() + b c + a + a 13.0 13.0 + b 12.3 123.0 + NaN 12.3 33.0 + + When using ``.apply()``, use ``group_keys`` to include or exclude the + group keys. The ``group_keys`` argument defaults to ``True`` (include). + + >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', + ... 'Parrot', 'Parrot'], + ... 'Max Speed': [380., 370., 24., 26.]}) + >>> df.groupby("Animal", group_keys=True).apply(lambda x: x) + Animal Max Speed + Animal + Falcon 0 Falcon 380.0 + 1 Falcon 370.0 + Parrot 2 Parrot 24.0 + 3 Parrot 26.0 + + >>> df.groupby("Animal", group_keys=False).apply(lambda x: x) + Animal Max Speed + 0 Falcon 380.0 + 1 Falcon 370.0 + 2 Parrot 24.0 + 3 Parrot 26.0 + """ + ) + ) + @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) + def groupby( + self, + by=None, + axis: Axis | lib.NoDefault = lib.no_default, + level: IndexLabel | None = None, + as_index: bool = True, + sort: bool = True, + group_keys: bool = True, + observed: bool | lib.NoDefault = lib.no_default, + dropna: bool = True, + ) -> DataFrameGroupBy: + if axis is not lib.no_default: + axis = self._get_axis_number(axis) + if axis == 1: + warnings.warn( + "DataFrame.groupby with axis=1 is deprecated. Do " + "`frame.T.groupby(...)` without axis instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + "The 'axis' keyword in DataFrame.groupby is deprecated and " + "will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + + from pandas.core.groupby.generic import DataFrameGroupBy + + if level is None and by is None: + raise TypeError("You have to supply one of 'by' and 'level'") + + return DataFrameGroupBy( + obj=self, + keys=by, + axis=axis, + level=level, + as_index=as_index, + sort=sort, + group_keys=group_keys, + observed=observed, + dropna=dropna, + ) + + _shared_docs[ + "pivot" + ] = """ + Return reshaped DataFrame organized by given index / column values. + + Reshape data (produce a "pivot" table) based on column values. Uses + unique values from specified `index` / `columns` to form axes of the + resulting DataFrame. This function does not support data + aggregation, multiple values will result in a MultiIndex in the + columns. See the :ref:`User Guide ` for more on reshaping. + + Parameters + ----------%s + columns : str or object or a list of str + Column to use to make new frame's columns. + index : str or object or a list of str, optional + Column to use to make new frame's index. If not given, uses existing index. + values : str, object or a list of the previous, optional + Column(s) to use for populating new frame's values. If not + specified, all remaining columns will be used and the result will + have hierarchically indexed columns. + + Returns + ------- + DataFrame + Returns reshaped DataFrame. + + Raises + ------ + ValueError: + When there are any `index`, `columns` combinations with multiple + values. `DataFrame.pivot_table` when you need to aggregate. + + See Also + -------- + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. + + Notes + ----- + For finer-tuned control, see hierarchical indexing documentation along + with the related stack/unstack methods. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', + ... 'two'], + ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], + ... 'baz': [1, 2, 3, 4, 5, 6], + ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) + >>> df + foo bar baz zoo + 0 one A 1 x + 1 one B 2 y + 2 one C 3 z + 3 two A 4 q + 4 two B 5 w + 5 two C 6 t + + >>> df.pivot(index='foo', columns='bar', values='baz') + bar A B C + foo + one 1 2 3 + two 4 5 6 + + >>> df.pivot(index='foo', columns='bar')['baz'] + bar A B C + foo + one 1 2 3 + two 4 5 6 + + >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) + baz zoo + bar A B C A B C + foo + one 1 2 3 x y z + two 4 5 6 q w t + + You could also assign a list of column names or a list of index names. + + >>> df = pd.DataFrame({ + ... "lev1": [1, 1, 1, 2, 2, 2], + ... "lev2": [1, 1, 2, 1, 1, 2], + ... "lev3": [1, 2, 1, 2, 1, 2], + ... "lev4": [1, 2, 3, 4, 5, 6], + ... "values": [0, 1, 2, 3, 4, 5]}) + >>> df + lev1 lev2 lev3 lev4 values + 0 1 1 1 1 0 + 1 1 1 2 2 1 + 2 1 2 1 3 2 + 3 2 1 2 4 3 + 4 2 1 1 5 4 + 5 2 2 2 6 5 + + >>> df.pivot(index="lev1", columns=["lev2", "lev3"], values="values") + lev2 1 2 + lev3 1 2 1 2 + lev1 + 1 0.0 1.0 2.0 NaN + 2 4.0 3.0 NaN 5.0 + + >>> df.pivot(index=["lev1", "lev2"], columns=["lev3"], values="values") + lev3 1 2 + lev1 lev2 + 1 1 0.0 1.0 + 2 2.0 NaN + 2 1 4.0 3.0 + 2 NaN 5.0 + + A ValueError is raised if there are any duplicates. + + >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], + ... "bar": ['A', 'A', 'B', 'C'], + ... "baz": [1, 2, 3, 4]}) + >>> df + foo bar baz + 0 one A 1 + 1 one A 2 + 2 two B 3 + 3 two C 4 + + Notice that the first two rows are the same for our `index` + and `columns` arguments. + + >>> df.pivot(index='foo', columns='bar', values='baz') + Traceback (most recent call last): + ... + ValueError: Index contains duplicate entries, cannot reshape + """ + + @Substitution("") + @Appender(_shared_docs["pivot"]) + def pivot( + self, *, columns, index=lib.no_default, values=lib.no_default + ) -> DataFrame: + from pandas.core.reshape.pivot import pivot + + return pivot(self, index=index, columns=columns, values=values) + + _shared_docs[ + "pivot_table" + ] = """ + Create a spreadsheet-style pivot table as a DataFrame. + + The levels in the pivot table will be stored in MultiIndex objects + (hierarchical indexes) on the index and columns of the result DataFrame. + + Parameters + ----------%s + values : list-like or scalar, optional + Column or columns to aggregate. + index : column, Grouper, array, or list of the previous + Keys to group by on the pivot table index. If a list is passed, + it can contain any of the other types (except list). If an array is + passed, it must be the same length as the data and will be used in + the same manner as column values. + columns : column, Grouper, array, or list of the previous + Keys to group by on the pivot table column. If a list is passed, + it can contain any of the other types (except list). If an array is + passed, it must be the same length as the data and will be used in + the same manner as column values. + aggfunc : function, list of functions, dict, default "mean" + If a list of functions is passed, the resulting pivot table will have + hierarchical columns whose top level are the function names + (inferred from the function objects themselves). + If a dict is passed, the key is column to aggregate and the value is + function or list of functions. If ``margin=True``, aggfunc will be + used to calculate the partial aggregates. + fill_value : scalar, default None + Value to replace missing values with (in the resulting pivot table, + after aggregation). + margins : bool, default False + If ``margins=True``, special ``All`` columns and rows + will be added with partial group aggregates across the categories + on the rows and columns. + dropna : bool, default True + Do not include columns whose entries are all NaN. If True, + rows with a NaN value in any column will be omitted before + computing margins. + margins_name : str, default 'All' + Name of the row / column that will contain the totals + when margins is True. + observed : bool, default False + This only applies if any of the groupers are Categoricals. + If True: only show observed values for categorical groupers. + If False: show all values for categorical groupers. + + sort : bool, default True + Specifies if the result should be sorted. + + .. versionadded:: 1.3.0 + + Returns + ------- + DataFrame + An Excel style pivot table. + + See Also + -------- + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.melt: Unpivot a DataFrame from wide to long format, + optionally leaving identifiers set. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", + ... "bar", "bar", "bar", "bar"], + ... "B": ["one", "one", "one", "two", "two", + ... "one", "one", "two", "two"], + ... "C": ["small", "large", "large", "small", + ... "small", "large", "small", "small", + ... "large"], + ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) + >>> df + A B C D E + 0 foo one small 1 2 + 1 foo one large 2 4 + 2 foo one large 2 5 + 3 foo two small 3 5 + 4 foo two small 3 6 + 5 bar one large 4 6 + 6 bar one small 5 8 + 7 bar two small 6 9 + 8 bar two large 7 9 + + This first example aggregates values by taking the sum. + + >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], + ... columns=['C'], aggfunc="sum") + >>> table + C large small + A B + bar one 4.0 5.0 + two 7.0 6.0 + foo one 4.0 1.0 + two NaN 6.0 + + We can also fill missing values using the `fill_value` parameter. + + >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], + ... columns=['C'], aggfunc="sum", fill_value=0) + >>> table + C large small + A B + bar one 4 5 + two 7 6 + foo one 4 1 + two 0 6 + + The next example aggregates by taking the mean across multiple columns. + + >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], + ... aggfunc={'D': "mean", 'E': "mean"}) + >>> table + D E + A C + bar large 5.500000 7.500000 + small 5.500000 8.500000 + foo large 2.000000 4.500000 + small 2.333333 4.333333 + + We can also calculate multiple types of aggregations for any given + value column. + + >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], + ... aggfunc={'D': "mean", + ... 'E': ["min", "max", "mean"]}) + >>> table + D E + mean max mean min + A C + bar large 5.500000 9 7.500000 6 + small 5.500000 9 8.500000 8 + foo large 2.000000 5 4.500000 4 + small 2.333333 6 4.333333 2 + """ + + @Substitution("") + @Appender(_shared_docs["pivot_table"]) + def pivot_table( + self, + values=None, + index=None, + columns=None, + aggfunc: AggFuncType = "mean", + fill_value=None, + margins: bool = False, + dropna: bool = True, + margins_name: Level = "All", + observed: bool = False, + sort: bool = True, + ) -> DataFrame: + from pandas.core.reshape.pivot import pivot_table + + return pivot_table( + self, + values=values, + index=index, + columns=columns, + aggfunc=aggfunc, + fill_value=fill_value, + margins=margins, + dropna=dropna, + margins_name=margins_name, + observed=observed, + sort=sort, + ) + + def stack( + self, + level: IndexLabel = -1, + dropna: bool | lib.NoDefault = lib.no_default, + sort: bool | lib.NoDefault = lib.no_default, + future_stack: bool = False, + ): + """ + Stack the prescribed level(s) from columns to index. + + Return a reshaped DataFrame or Series having a multi-level + index with one or more new inner-most levels compared to the current + DataFrame. The new inner-most levels are created by pivoting the + columns of the current dataframe: + + - if the columns have a single level, the output is a Series; + - if the columns have multiple levels, the new index + level(s) is (are) taken from the prescribed level(s) and + the output is a DataFrame. + + Parameters + ---------- + level : int, str, list, default -1 + Level(s) to stack from the column axis onto the index + axis, defined as one index or label, or a list of indices + or labels. + dropna : bool, default True + Whether to drop rows in the resulting Frame/Series with + missing values. Stacking a column level onto the index + axis can create combinations of index and column values + that are missing from the original dataframe. See Examples + section. + sort : bool, default True + Whether to sort the levels of the resulting MultiIndex. + future_stack : bool, default False + Whether to use the new implementation that will replace the current + implementation in pandas 3.0. When True, dropna and sort have no impact + on the result and must remain unspecified. See :ref:`pandas 2.1.0 Release + notes ` for more details. + + Returns + ------- + DataFrame or Series + Stacked dataframe or series. + + See Also + -------- + DataFrame.unstack : Unstack prescribed level(s) from index axis + onto column axis. + DataFrame.pivot : Reshape dataframe from long format to wide + format. + DataFrame.pivot_table : Create a spreadsheet-style pivot table + as a DataFrame. + + Notes + ----- + The function is named by analogy with a collection of books + being reorganized from being side by side on a horizontal + position (the columns of the dataframe) to being stacked + vertically on top of each other (in the index of the + dataframe). + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + **Single level columns** + + >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]], + ... index=['cat', 'dog'], + ... columns=['weight', 'height']) + + Stacking a dataframe with a single level column axis returns a Series: + + >>> df_single_level_cols + weight height + cat 0 1 + dog 2 3 + >>> df_single_level_cols.stack(future_stack=True) + cat weight 0 + height 1 + dog weight 2 + height 3 + dtype: int64 + + **Multi level columns: simple case** + + >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), + ... ('weight', 'pounds')]) + >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]], + ... index=['cat', 'dog'], + ... columns=multicol1) + + Stacking a dataframe with a multi-level column axis: + + >>> df_multi_level_cols1 + weight + kg pounds + cat 1 2 + dog 2 4 + >>> df_multi_level_cols1.stack(future_stack=True) + weight + cat kg 1 + pounds 2 + dog kg 2 + pounds 4 + + **Missing values** + + >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), + ... ('height', 'm')]) + >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], + ... index=['cat', 'dog'], + ... columns=multicol2) + + It is common to have missing values when stacking a dataframe + with multi-level columns, as the stacked dataframe typically + has more values than the original dataframe. Missing values + are filled with NaNs: + + >>> df_multi_level_cols2 + weight height + kg m + cat 1.0 2.0 + dog 3.0 4.0 + >>> df_multi_level_cols2.stack(future_stack=True) + weight height + cat kg 1.0 NaN + m NaN 2.0 + dog kg 3.0 NaN + m NaN 4.0 + + **Prescribing the level(s) to be stacked** + + The first parameter controls which level or levels are stacked: + + >>> df_multi_level_cols2.stack(0, future_stack=True) + kg m + cat weight 1.0 NaN + height NaN 2.0 + dog weight 3.0 NaN + height NaN 4.0 + >>> df_multi_level_cols2.stack([0, 1], future_stack=True) + cat weight kg 1.0 + height m 2.0 + dog weight kg 3.0 + height m 4.0 + dtype: float64 + + **Dropping missing values** + + >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]], + ... index=['cat', 'dog'], + ... columns=multicol2) + + Note that rows where all values are missing are dropped by + default but this behaviour can be controlled via the dropna + keyword parameter: + + >>> df_multi_level_cols3 + weight height + kg m + cat NaN 1.0 + dog 2.0 3.0 + >>> df_multi_level_cols3.stack(dropna=False) + weight height + cat kg NaN NaN + m NaN 1.0 + dog kg 2.0 NaN + m NaN 3.0 + >>> df_multi_level_cols3.stack(dropna=True) + weight height + cat m NaN 1.0 + dog kg 2.0 NaN + m NaN 3.0 + """ + if not future_stack: + from pandas.core.reshape.reshape import ( + stack, + stack_multiple, + ) + + if dropna is lib.no_default: + dropna = True + if sort is lib.no_default: + sort = True + + if isinstance(level, (tuple, list)): + result = stack_multiple(self, level, dropna=dropna, sort=sort) + else: + result = stack(self, level, dropna=dropna, sort=sort) + else: + from pandas.core.reshape.reshape import stack_v3 + + if dropna is not lib.no_default: + raise ValueError( + "dropna must be unspecified with future_stack=True as the new " + "implementation does not introduce rows of NA values. This " + "argument will be removed in a future version of pandas." + ) + + if sort is not lib.no_default: + raise ValueError( + "Cannot specify sort with future_stack=True, this argument will be " + "removed in a future version of pandas. Sort the result using " + ".sort_index instead." + ) + + if ( + isinstance(level, (tuple, list)) + and not all(lev in self.columns.names for lev in level) + and not all(isinstance(lev, int) for lev in level) + ): + raise ValueError( + "level should contain all level names or all level " + "numbers, not a mixture of the two." + ) + + if not isinstance(level, (tuple, list)): + level = [level] + level = [self.columns._get_level_number(lev) for lev in level] + result = stack_v3(self, level) + + return result.__finalize__(self, method="stack") + + def explode( + self, + column: IndexLabel, + ignore_index: bool = False, + ) -> DataFrame: + """ + Transform each element of a list-like to a row, replicating index values. + + Parameters + ---------- + column : IndexLabel + Column(s) to explode. + For multiple columns, specify a non-empty list with each element + be str or tuple, and all specified columns their list-like data + on same row of the frame must have matching length. + + .. versionadded:: 1.3.0 + Multi-column explode + + ignore_index : bool, default False + If True, the resulting index will be labeled 0, 1, …, n - 1. + + Returns + ------- + DataFrame + Exploded lists to rows of the subset columns; + index will be duplicated for these rows. + + Raises + ------ + ValueError : + * If columns of the frame are not unique. + * If specified columns to explode is empty list. + * If specified columns to explode have not matching count of + elements rowwise in the frame. + + See Also + -------- + DataFrame.unstack : Pivot a level of the (necessarily hierarchical) + index labels. + DataFrame.melt : Unpivot a DataFrame from wide format to long format. + Series.explode : Explode a DataFrame from list-like columns to long format. + + Notes + ----- + This routine will explode list-likes including lists, tuples, sets, + Series, and np.ndarray. The result dtype of the subset rows will + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of rows in the + output will be non-deterministic when exploding sets. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> df = pd.DataFrame({'A': [[0, 1, 2], 'foo', [], [3, 4]], + ... 'B': 1, + ... 'C': [['a', 'b', 'c'], np.nan, [], ['d', 'e']]}) + >>> df + A B C + 0 [0, 1, 2] 1 [a, b, c] + 1 foo 1 NaN + 2 [] 1 [] + 3 [3, 4] 1 [d, e] + + Single-column explode. + + >>> df.explode('A') + A B C + 0 0 1 [a, b, c] + 0 1 1 [a, b, c] + 0 2 1 [a, b, c] + 1 foo 1 NaN + 2 NaN 1 [] + 3 3 1 [d, e] + 3 4 1 [d, e] + + Multi-column explode. + + >>> df.explode(list('AC')) + A B C + 0 0 1 a + 0 1 1 b + 0 2 1 c + 1 foo 1 NaN + 2 NaN 1 NaN + 3 3 1 d + 3 4 1 e + """ + if not self.columns.is_unique: + duplicate_cols = self.columns[self.columns.duplicated()].tolist() + raise ValueError( + f"DataFrame columns must be unique. Duplicate columns: {duplicate_cols}" + ) + + columns: list[Hashable] + if is_scalar(column) or isinstance(column, tuple): + columns = [column] + elif isinstance(column, list) and all( + is_scalar(c) or isinstance(c, tuple) for c in column + ): + if not column: + raise ValueError("column must be nonempty") + if len(column) > len(set(column)): + raise ValueError("column must be unique") + columns = column + else: + raise ValueError("column must be a scalar, tuple, or list thereof") + + df = self.reset_index(drop=True) + if len(columns) == 1: + result = df[columns[0]].explode() + else: + mylen = lambda x: len(x) if (is_list_like(x) and len(x) > 0) else 1 + counts0 = self[columns[0]].apply(mylen) + for c in columns[1:]: + if not all(counts0 == self[c].apply(mylen)): + raise ValueError("columns must have matching element counts") + result = DataFrame({c: df[c].explode() for c in columns}) + result = df.drop(columns, axis=1).join(result) + if ignore_index: + result.index = default_index(len(result)) + else: + result.index = self.index.take(result.index) + result = result.reindex(columns=self.columns, copy=False) + + return result.__finalize__(self, method="explode") + + def unstack(self, level: IndexLabel = -1, fill_value=None, sort: bool = True): + """ + Pivot a level of the (necessarily hierarchical) index labels. + + Returns a DataFrame having a new level of column labels whose inner-most level + consists of the pivoted index labels. + + If the index is not a MultiIndex, the output will be a Series + (the analogue of stack when the columns are not a MultiIndex). + + Parameters + ---------- + level : int, str, or list of these, default -1 (last level) + Level(s) of index to unstack, can pass level name. + fill_value : int, str or dict + Replace NaN with this value if the unstack produces missing values. + sort : bool, default True + Sort the level(s) in the resulting MultiIndex columns. + + Returns + ------- + Series or DataFrame + + See Also + -------- + DataFrame.pivot : Pivot a table based on column values. + DataFrame.stack : Pivot a level of the column labels (inverse operation + from `unstack`). + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), + ... ('two', 'a'), ('two', 'b')]) + >>> s = pd.Series(np.arange(1.0, 5.0), index=index) + >>> s + one a 1.0 + b 2.0 + two a 3.0 + b 4.0 + dtype: float64 + + >>> s.unstack(level=-1) + a b + one 1.0 2.0 + two 3.0 4.0 + + >>> s.unstack(level=0) + one two + a 1.0 3.0 + b 2.0 4.0 + + >>> df = s.unstack(level=0) + >>> df.unstack() + one a 1.0 + b 2.0 + two a 3.0 + b 4.0 + dtype: float64 + """ + from pandas.core.reshape.reshape import unstack + + result = unstack(self, level, fill_value, sort) + + return result.__finalize__(self, method="unstack") + + @Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"}) + def melt( + self, + id_vars=None, + value_vars=None, + var_name=None, + value_name: Hashable = "value", + col_level: Level | None = None, + ignore_index: bool = True, + ) -> DataFrame: + return melt( + self, + id_vars=id_vars, + value_vars=value_vars, + var_name=var_name, + value_name=value_name, + col_level=col_level, + ignore_index=ignore_index, + ).__finalize__(self, method="melt") + + # ---------------------------------------------------------------------- + # Time series-related + + @doc( + Series.diff, + klass="DataFrame", + extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n " + "Take difference over rows (0) or columns (1).\n", + other_klass="Series", + examples=dedent( + """ + Difference with previous row + + >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], + ... 'b': [1, 1, 2, 3, 5, 8], + ... 'c': [1, 4, 9, 16, 25, 36]}) + >>> df + a b c + 0 1 1 1 + 1 2 1 4 + 2 3 2 9 + 3 4 3 16 + 4 5 5 25 + 5 6 8 36 + + >>> df.diff() + a b c + 0 NaN NaN NaN + 1 1.0 0.0 3.0 + 2 1.0 1.0 5.0 + 3 1.0 1.0 7.0 + 4 1.0 2.0 9.0 + 5 1.0 3.0 11.0 + + Difference with previous column + + >>> df.diff(axis=1) + a b c + 0 NaN 0 0 + 1 NaN -1 3 + 2 NaN -1 7 + 3 NaN -1 13 + 4 NaN 0 20 + 5 NaN 2 28 + + Difference with 3rd previous row + + >>> df.diff(periods=3) + a b c + 0 NaN NaN NaN + 1 NaN NaN NaN + 2 NaN NaN NaN + 3 3.0 2.0 15.0 + 4 3.0 4.0 21.0 + 5 3.0 6.0 27.0 + + Difference with following row + + >>> df.diff(periods=-1) + a b c + 0 -1.0 0.0 -3.0 + 1 -1.0 -1.0 -5.0 + 2 -1.0 -1.0 -7.0 + 3 -1.0 -2.0 -9.0 + 4 -1.0 -3.0 -11.0 + 5 NaN NaN NaN + + Overflow in input dtype + + >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8) + >>> df.diff() + a + 0 NaN + 1 255.0""" + ), + ) + def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame: + if not lib.is_integer(periods): + if not (is_float(periods) and periods.is_integer()): + raise ValueError("periods must be an integer") + periods = int(periods) + + axis = self._get_axis_number(axis) + if axis == 1: + if periods != 0: + # in the periods == 0 case, this is equivalent diff of 0 periods + # along axis=0, and the Manager method may be somewhat more + # performant, so we dispatch in that case. + return self - self.shift(periods, axis=axis) + # With periods=0 this is equivalent to a diff with axis=0 + axis = 0 + + new_data = self._mgr.diff(n=periods) + res_df = self._constructor_from_mgr(new_data, axes=new_data.axes) + return res_df.__finalize__(self, "diff") + + # ---------------------------------------------------------------------- + # Function application + + def _gotitem( + self, + key: IndexLabel, + ndim: int, + subset: DataFrame | Series | None = None, + ) -> DataFrame | Series: + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + if subset is None: + subset = self + elif subset.ndim == 1: # is Series + return subset + + # TODO: _shallow_copy(subset)? + return subset[key] + + _agg_see_also_doc = dedent( + """ + See Also + -------- + DataFrame.apply : Perform any type of operations. + DataFrame.transform : Perform transformation type operations. + core.groupby.GroupBy : Perform operations over groups. + core.resample.Resampler : Perform operations over resampled bins. + core.window.Rolling : Perform operations over rolling window. + core.window.Expanding : Perform operations over expanding window. + core.window.ExponentialMovingWindow : Perform operation over exponential weighted + window. + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9], + ... [np.nan, np.nan, np.nan]], + ... columns=['A', 'B', 'C']) + + Aggregate these functions over the rows. + + >>> df.agg(['sum', 'min']) + A B C + sum 12.0 15.0 18.0 + min 1.0 2.0 3.0 + + Different aggregations per column. + + >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) + A B + sum 12.0 NaN + min 1.0 2.0 + max NaN 8.0 + + Aggregate different functions over the columns and rename the index of the resulting + DataFrame. + + >>> df.agg(x=('A', 'max'), y=('B', 'min'), z=('C', 'mean')) + A B C + x 7.0 NaN NaN + y NaN 2.0 NaN + z NaN NaN 6.0 + + Aggregate over the columns. + + >>> df.agg("mean", axis="columns") + 0 2.0 + 1 5.0 + 2 8.0 + 3 NaN + dtype: float64 + """ + ) + + @doc( + _shared_docs["aggregate"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + ) + def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): + from pandas.core.apply import frame_apply + + axis = self._get_axis_number(axis) + + op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) + result = op.agg() + result = reconstruct_and_relabel_result(result, func, **kwargs) + return result + + agg = aggregate + + @doc( + _shared_docs["transform"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + ) + def transform( + self, func: AggFuncType, axis: Axis = 0, *args, **kwargs + ) -> DataFrame: + from pandas.core.apply import frame_apply + + op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) + result = op.transform() + assert isinstance(result, DataFrame) + return result + + def apply( + self, + func: AggFuncType, + axis: Axis = 0, + raw: bool = False, + result_type: Literal["expand", "reduce", "broadcast"] | None = None, + args=(), + by_row: Literal[False, "compat"] = "compat", + **kwargs, + ): + """ + Apply a function along an axis of the DataFrame. + + Objects passed to the function are Series objects whose index is + either the DataFrame's index (``axis=0``) or the DataFrame's columns + (``axis=1``). By default (``result_type=None``), the final return type + is inferred from the return type of the applied function. Otherwise, + it depends on the `result_type` argument. + + Parameters + ---------- + func : function + Function to apply to each column or row. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Axis along which the function is applied: + + * 0 or 'index': apply function to each column. + * 1 or 'columns': apply function to each row. + + raw : bool, default False + Determines if row or column is passed as a Series or ndarray object: + + * ``False`` : passes each row or column as a Series to the + function. + * ``True`` : the passed function will receive ndarray objects + instead. + If you are just applying a NumPy reduction function this will + achieve much better performance. + + result_type : {'expand', 'reduce', 'broadcast', None}, default None + These only act when ``axis=1`` (columns): + + * 'expand' : list-like results will be turned into columns. + * 'reduce' : returns a Series if possible rather than expanding + list-like results. This is the opposite of 'expand'. + * 'broadcast' : results will be broadcast to the original shape + of the DataFrame, the original index and columns will be + retained. + + The default behaviour (None) depends on the return value of the + applied function: list-like results will be returned as a Series + of those. However if the apply function returns a Series these + are expanded to columns. + args : tuple + Positional arguments to pass to `func` in addition to the + array/series. + by_row : False or "compat", default "compat" + Only has an effect when ``func`` is a listlike or dictlike of funcs + and the func isn't a string. + If "compat", will if possible first translate the func into pandas + methods (e.g. ``Series().apply(np.sum)`` will be translated to + ``Series().sum()``). If that doesn't work, will try call to apply again with + ``by_row=True`` and if that fails, will call apply again with + ``by_row=False`` (backward compatible). + If False, the funcs will be passed the whole Series at once. + + .. versionadded:: 2.1.0 + **kwargs + Additional keyword arguments to pass as keywords arguments to + `func`. + + Returns + ------- + Series or DataFrame + Result of applying ``func`` along the given axis of the + DataFrame. + + See Also + -------- + DataFrame.map: For elementwise operations. + DataFrame.aggregate: Only perform aggregating type operations. + DataFrame.transform: Only perform transforming type operations. + + Notes + ----- + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) + >>> df + A B + 0 4 9 + 1 4 9 + 2 4 9 + + Using a numpy universal function (in this case the same as + ``np.sqrt(df)``): + + >>> df.apply(np.sqrt) + A B + 0 2.0 3.0 + 1 2.0 3.0 + 2 2.0 3.0 + + Using a reducing function on either axis + + >>> df.apply(np.sum, axis=0) + A 12 + B 27 + dtype: int64 + + >>> df.apply(np.sum, axis=1) + 0 13 + 1 13 + 2 13 + dtype: int64 + + Returning a list-like will result in a Series + + >>> df.apply(lambda x: [1, 2], axis=1) + 0 [1, 2] + 1 [1, 2] + 2 [1, 2] + dtype: object + + Passing ``result_type='expand'`` will expand list-like results + to columns of a Dataframe + + >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') + 0 1 + 0 1 2 + 1 1 2 + 2 1 2 + + Returning a Series inside the function is similar to passing + ``result_type='expand'``. The resulting column names + will be the Series index. + + >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) + foo bar + 0 1 2 + 1 1 2 + 2 1 2 + + Passing ``result_type='broadcast'`` will ensure the same shape + result, whether list-like or scalar is returned by the function, + and broadcast it along the axis. The resulting column names will + be the originals. + + >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') + A B + 0 1 2 + 1 1 2 + 2 1 2 + """ + from pandas.core.apply import frame_apply + + op = frame_apply( + self, + func=func, + axis=axis, + raw=raw, + result_type=result_type, + by_row=by_row, + args=args, + kwargs=kwargs, + ) + return op.apply().__finalize__(self, method="apply") + + def map( + self, func: PythonFuncType, na_action: str | None = None, **kwargs + ) -> DataFrame: + """ + Apply a function to a Dataframe elementwise. + + .. versionadded:: 2.1.0 + + DataFrame.applymap was deprecated and renamed to DataFrame.map. + + This method applies a function that accepts and returns a scalar + to every element of a DataFrame. + + Parameters + ---------- + func : callable + Python function, returns a single value from a single value. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NaN values, without passing them to func. + **kwargs + Additional keyword arguments to pass as keywords arguments to + `func`. + + Returns + ------- + DataFrame + Transformed DataFrame. + + See Also + -------- + DataFrame.apply : Apply a function along input axis of DataFrame. + DataFrame.replace: Replace values given in `to_replace` with `value`. + Series.map : Apply a function elementwise on a Series. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) + >>> df + 0 1 + 0 1.000 2.120 + 1 3.356 4.567 + + >>> df.map(lambda x: len(str(x))) + 0 1 + 0 3 4 + 1 5 5 + + Like Series.map, NA values can be ignored: + + >>> df_copy = df.copy() + >>> df_copy.iloc[0, 0] = pd.NA + >>> df_copy.map(lambda x: len(str(x)), na_action='ignore') + 0 1 + 0 NaN 4 + 1 5.0 5 + + Note that a vectorized version of `func` often exists, which will + be much faster. You could square each number elementwise. + + >>> df.map(lambda x: x**2) + 0 1 + 0 1.000000 4.494400 + 1 11.262736 20.857489 + + But it's better to avoid map in that case. + + >>> df ** 2 + 0 1 + 0 1.000000 4.494400 + 1 11.262736 20.857489 + """ + if na_action not in {"ignore", None}: + raise ValueError( + f"na_action must be 'ignore' or None. Got {repr(na_action)}" + ) + + if self.empty: + return self.copy() + + func = functools.partial(func, **kwargs) + + def infer(x): + return x._map_values(func, na_action=na_action) + + return self.apply(infer).__finalize__(self, "map") + + def applymap( + self, func: PythonFuncType, na_action: NaAction | None = None, **kwargs + ) -> DataFrame: + """ + Apply a function to a Dataframe elementwise. + + .. deprecated:: 2.1.0 + + DataFrame.applymap has been deprecated. Use DataFrame.map instead. + + This method applies a function that accepts and returns a scalar + to every element of a DataFrame. + + Parameters + ---------- + func : callable + Python function, returns a single value from a single value. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NaN values, without passing them to func. + **kwargs + Additional keyword arguments to pass as keywords arguments to + `func`. + + Returns + ------- + DataFrame + Transformed DataFrame. + + See Also + -------- + DataFrame.apply : Apply a function along input axis of DataFrame. + DataFrame.map : Apply a function along input axis of DataFrame. + DataFrame.replace: Replace values given in `to_replace` with `value`. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) + >>> df + 0 1 + 0 1.000 2.120 + 1 3.356 4.567 + + >>> df.map(lambda x: len(str(x))) + 0 1 + 0 3 4 + 1 5 5 + """ + warnings.warn( + "DataFrame.applymap has been deprecated. Use DataFrame.map instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.map(func, na_action=na_action, **kwargs) + + # ---------------------------------------------------------------------- + # Merging / joining methods + + def _append( + self, + other, + ignore_index: bool = False, + verify_integrity: bool = False, + sort: bool = False, + ) -> DataFrame: + if isinstance(other, (Series, dict)): + if isinstance(other, dict): + if not ignore_index: + raise TypeError("Can only append a dict if ignore_index=True") + other = Series(other) + if other.name is None and not ignore_index: + raise TypeError( + "Can only append a Series if ignore_index=True " + "or if the Series has a name" + ) + + index = Index( + [other.name], + name=self.index.names + if isinstance(self.index, MultiIndex) + else self.index.name, + ) + row_df = other.to_frame().T + # infer_objects is needed for + # test_append_empty_frame_to_series_with_dateutil_tz + other = row_df.infer_objects(copy=False).rename_axis( + index.names, copy=False + ) + elif isinstance(other, list): + if not other: + pass + elif not isinstance(other[0], DataFrame): + other = DataFrame(other) + if self.index.name is not None and not ignore_index: + other.index.name = self.index.name + + from pandas.core.reshape.concat import concat + + if isinstance(other, (list, tuple)): + to_concat = [self, *other] + else: + to_concat = [self, other] + + result = concat( + to_concat, + ignore_index=ignore_index, + verify_integrity=verify_integrity, + sort=sort, + ) + return result.__finalize__(self, method="append") + + def join( + self, + other: DataFrame | Series | Iterable[DataFrame | Series], + on: IndexLabel | None = None, + how: MergeHow = "left", + lsuffix: str = "", + rsuffix: str = "", + sort: bool = False, + validate: JoinValidate | None = None, + ) -> DataFrame: + """ + Join columns of another DataFrame. + + Join columns with `other` DataFrame either on index or on a key + column. Efficiently join multiple DataFrame objects by index at once by + passing a list. + + Parameters + ---------- + other : DataFrame, Series, or a list containing any combination of them + Index should be similar to one of the columns in this one. If a + Series is passed, its name attribute must be set, and that will be + used as the column name in the resulting joined DataFrame. + on : str, list of str, or array-like, optional + Column or index level name(s) in the caller to join on the index + in `other`, otherwise joins index-on-index. If multiple + values given, the `other` DataFrame must have a MultiIndex. Can + pass an array as the join key if it is not already contained in + the calling DataFrame. Like an Excel VLOOKUP operation. + how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'left' + How to handle the operation of the two objects. + + * left: use calling frame's index (or column if on is specified) + * right: use `other`'s index. + * outer: form union of calling frame's index (or column if on is + specified) with `other`'s index, and sort it lexicographically. + * inner: form intersection of calling frame's index (or column if + on is specified) with `other`'s index, preserving the order + of the calling's one. + * cross: creates the cartesian product from both frames, preserves the order + of the left keys. + + .. versionadded:: 1.2.0 + + lsuffix : str, default '' + Suffix to use from left frame's overlapping columns. + rsuffix : str, default '' + Suffix to use from right frame's overlapping columns. + sort : bool, default False + Order result DataFrame lexicographically by the join key. If False, + the order of the join key depends on the join type (how keyword). + validate : str, optional + If specified, checks if join is of specified type. + + * "one_to_one" or "1:1": check if join keys are unique in both left + and right datasets. + * "one_to_many" or "1:m": check if join keys are unique in left dataset. + * "many_to_one" or "m:1": check if join keys are unique in right dataset. + * "many_to_many" or "m:m": allowed, but does not result in checks. + + .. versionadded:: 1.5.0 + + Returns + ------- + DataFrame + A dataframe containing columns from both the caller and `other`. + + See Also + -------- + DataFrame.merge : For column(s)-on-column(s) operations. + + Notes + ----- + Parameters `on`, `lsuffix`, and `rsuffix` are not supported when + passing a list of `DataFrame` objects. + + Examples + -------- + >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], + ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) + + >>> df + key A + 0 K0 A0 + 1 K1 A1 + 2 K2 A2 + 3 K3 A3 + 4 K4 A4 + 5 K5 A5 + + >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], + ... 'B': ['B0', 'B1', 'B2']}) + + >>> other + key B + 0 K0 B0 + 1 K1 B1 + 2 K2 B2 + + Join DataFrames using their indexes. + + >>> df.join(other, lsuffix='_caller', rsuffix='_other') + key_caller A key_other B + 0 K0 A0 K0 B0 + 1 K1 A1 K1 B1 + 2 K2 A2 K2 B2 + 3 K3 A3 NaN NaN + 4 K4 A4 NaN NaN + 5 K5 A5 NaN NaN + + If we want to join using the key columns, we need to set key to be + the index in both `df` and `other`. The joined DataFrame will have + key as its index. + + >>> df.set_index('key').join(other.set_index('key')) + A B + key + K0 A0 B0 + K1 A1 B1 + K2 A2 B2 + K3 A3 NaN + K4 A4 NaN + K5 A5 NaN + + Another option to join using the key columns is to use the `on` + parameter. DataFrame.join always uses `other`'s index but we can use + any column in `df`. This method preserves the original DataFrame's + index in the result. + + >>> df.join(other.set_index('key'), on='key') + key A B + 0 K0 A0 B0 + 1 K1 A1 B1 + 2 K2 A2 B2 + 3 K3 A3 NaN + 4 K4 A4 NaN + 5 K5 A5 NaN + + Using non-unique key values shows how they are matched. + + >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K1', 'K3', 'K0', 'K1'], + ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) + + >>> df + key A + 0 K0 A0 + 1 K1 A1 + 2 K1 A2 + 3 K3 A3 + 4 K0 A4 + 5 K1 A5 + + >>> df.join(other.set_index('key'), on='key', validate='m:1') + key A B + 0 K0 A0 B0 + 1 K1 A1 B1 + 2 K1 A2 B1 + 3 K3 A3 NaN + 4 K0 A4 B0 + 5 K1 A5 B1 + """ + from pandas.core.reshape.concat import concat + from pandas.core.reshape.merge import merge + + if isinstance(other, Series): + if other.name is None: + raise ValueError("Other Series must have a name") + other = DataFrame({other.name: other}) + + if isinstance(other, DataFrame): + if how == "cross": + return merge( + self, + other, + how=how, + on=on, + suffixes=(lsuffix, rsuffix), + sort=sort, + validate=validate, + ) + return merge( + self, + other, + left_on=on, + how=how, + left_index=on is None, + right_index=True, + suffixes=(lsuffix, rsuffix), + sort=sort, + validate=validate, + ) + else: + if on is not None: + raise ValueError( + "Joining multiple DataFrames only supported for joining on index" + ) + + if rsuffix or lsuffix: + raise ValueError( + "Suffixes not supported when joining multiple DataFrames" + ) + + # Mypy thinks the RHS is a + # "Union[DataFrame, Series, Iterable[Union[DataFrame, Series]]]" whereas + # the LHS is an "Iterable[DataFrame]", but in reality both types are + # "Iterable[Union[DataFrame, Series]]" due to the if statements + frames = [cast("DataFrame | Series", self)] + list(other) + + can_concat = all(df.index.is_unique for df in frames) + + # join indexes only using concat + if can_concat: + if how == "left": + res = concat( + frames, axis=1, join="outer", verify_integrity=True, sort=sort + ) + return res.reindex(self.index, copy=False) + else: + return concat( + frames, axis=1, join=how, verify_integrity=True, sort=sort + ) + + joined = frames[0] + + for frame in frames[1:]: + joined = merge( + joined, + frame, + how=how, + left_index=True, + right_index=True, + validate=validate, + ) + + return joined + + @Substitution("") + @Appender(_merge_doc, indents=2) + def merge( + self, + right: DataFrame | Series, + how: MergeHow = "inner", + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = False, + suffixes: Suffixes = ("_x", "_y"), + copy: bool | None = None, + indicator: str | bool = False, + validate: MergeValidate | None = None, + ) -> DataFrame: + from pandas.core.reshape.merge import merge + + return merge( + self, + right, + how=how, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + copy=copy, + indicator=indicator, + validate=validate, + ) + + def round( + self, decimals: int | dict[IndexLabel, int] | Series = 0, *args, **kwargs + ) -> DataFrame: + """ + Round a DataFrame to a variable number of decimal places. + + Parameters + ---------- + decimals : int, dict, Series + Number of decimal places to round each column to. If an int is + given, round each column to the same number of places. + Otherwise dict and Series round to variable numbers of places. + Column names should be in the keys if `decimals` is a + dict-like, or in the index if `decimals` is a Series. Any + columns not included in `decimals` will be left as is. Elements + of `decimals` which are not columns of the input will be + ignored. + *args + Additional keywords have no effect but might be accepted for + compatibility with numpy. + **kwargs + Additional keywords have no effect but might be accepted for + compatibility with numpy. + + Returns + ------- + DataFrame + A DataFrame with the affected columns rounded to the specified + number of decimal places. + + See Also + -------- + numpy.around : Round a numpy array to the given number of decimals. + Series.round : Round a Series to the given number of decimals. + + Examples + -------- + >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], + ... columns=['dogs', 'cats']) + >>> df + dogs cats + 0 0.21 0.32 + 1 0.01 0.67 + 2 0.66 0.03 + 3 0.21 0.18 + + By providing an integer each column is rounded to the same number + of decimal places + + >>> df.round(1) + dogs cats + 0 0.2 0.3 + 1 0.0 0.7 + 2 0.7 0.0 + 3 0.2 0.2 + + With a dict, the number of places for specific columns can be + specified with the column names as key and the number of decimal + places as value + + >>> df.round({'dogs': 1, 'cats': 0}) + dogs cats + 0 0.2 0.0 + 1 0.0 1.0 + 2 0.7 0.0 + 3 0.2 0.0 + + Using a Series, the number of places for specific columns can be + specified with the column names as index and the number of + decimal places as value + + >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) + >>> df.round(decimals) + dogs cats + 0 0.2 0.0 + 1 0.0 1.0 + 2 0.7 0.0 + 3 0.2 0.0 + """ + from pandas.core.reshape.concat import concat + + def _dict_round(df: DataFrame, decimals): + for col, vals in df.items(): + try: + yield _series_round(vals, decimals[col]) + except KeyError: + yield vals + + def _series_round(ser: Series, decimals: int) -> Series: + if is_integer_dtype(ser.dtype) or is_float_dtype(ser.dtype): + return ser.round(decimals) + return ser + + nv.validate_round(args, kwargs) + + if isinstance(decimals, (dict, Series)): + if isinstance(decimals, Series) and not decimals.index.is_unique: + raise ValueError("Index of decimals must be unique") + if is_dict_like(decimals) and not all( + is_integer(value) for _, value in decimals.items() + ): + raise TypeError("Values in decimals must be integers") + new_cols = list(_dict_round(self, decimals)) + elif is_integer(decimals): + # Dispatch to Block.round + # Argument "decimals" to "round" of "BaseBlockManager" has incompatible + # type "Union[int, integer[Any]]"; expected "int" + new_mgr = self._mgr.round( + decimals=decimals, # type: ignore[arg-type] + using_cow=using_copy_on_write(), + ) + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__( + self, method="round" + ) + else: + raise TypeError("decimals must be an integer, a dict-like or a Series") + + if new_cols is not None and len(new_cols) > 0: + return self._constructor( + concat(new_cols, axis=1), index=self.index, columns=self.columns + ).__finalize__(self, method="round") + else: + return self.copy(deep=False) + + # ---------------------------------------------------------------------- + # Statistical methods, etc. + + def corr( + self, + method: CorrelationMethod = "pearson", + min_periods: int = 1, + numeric_only: bool = False, + ) -> DataFrame: + """ + Compute pairwise correlation of columns, excluding NA/null values. + + Parameters + ---------- + method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + + * pearson : standard correlation coefficient + * kendall : Kendall Tau correlation coefficient + * spearman : Spearman rank correlation + * callable: callable with input two 1d ndarrays + and returning a float. Note that the returned matrix from corr + will have 1 along the diagonals and will be symmetric + regardless of the callable's behavior. + min_periods : int, optional + Minimum number of observations required per pair of columns + to have a valid result. Currently only available for Pearson + and Spearman correlation. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + Returns + ------- + DataFrame + Correlation matrix. + + See Also + -------- + DataFrame.corrwith : Compute pairwise correlation with another + DataFrame or Series. + Series.corr : Compute the correlation between two Series. + + Notes + ----- + Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. + + * `Pearson correlation coefficient `_ + * `Kendall rank correlation coefficient `_ + * `Spearman's rank correlation coefficient `_ + + Examples + -------- + >>> def histogram_intersection(a, b): + ... v = np.minimum(a, b).sum().round(decimals=1) + ... return v + >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], + ... columns=['dogs', 'cats']) + >>> df.corr(method=histogram_intersection) + dogs cats + dogs 1.0 0.3 + cats 0.3 1.0 + + >>> df = pd.DataFrame([(1, 1), (2, np.nan), (np.nan, 3), (4, 4)], + ... columns=['dogs', 'cats']) + >>> df.corr(min_periods=3) + dogs cats + dogs 1.0 NaN + cats NaN 1.0 + """ # noqa: E501 + data = self._get_numeric_data() if numeric_only else self + cols = data.columns + idx = cols.copy() + mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False) + + if method == "pearson": + correl = libalgos.nancorr(mat, minp=min_periods) + elif method == "spearman": + correl = libalgos.nancorr_spearman(mat, minp=min_periods) + elif method == "kendall" or callable(method): + if min_periods is None: + min_periods = 1 + mat = mat.T + corrf = nanops.get_corr_func(method) + K = len(cols) + correl = np.empty((K, K), dtype=float) + mask = np.isfinite(mat) + for i, ac in enumerate(mat): + for j, bc in enumerate(mat): + if i > j: + continue + + valid = mask[i] & mask[j] + if valid.sum() < min_periods: + c = np.nan + elif i == j: + c = 1.0 + elif not valid.all(): + c = corrf(ac[valid], bc[valid]) + else: + c = corrf(ac, bc) + correl[i, j] = c + correl[j, i] = c + else: + raise ValueError( + "method must be either 'pearson', " + "'spearman', 'kendall', or a callable, " + f"'{method}' was supplied" + ) + + result = self._constructor(correl, index=idx, columns=cols, copy=False) + return result.__finalize__(self, method="corr") + + def cov( + self, + min_periods: int | None = None, + ddof: int | None = 1, + numeric_only: bool = False, + ) -> DataFrame: + """ + Compute pairwise covariance of columns, excluding NA/null values. + + Compute the pairwise covariance among the series of a DataFrame. + The returned data frame is the `covariance matrix + `__ of the columns + of the DataFrame. + + Both NA and null values are automatically excluded from the + calculation. (See the note below about bias from missing values.) + A threshold can be set for the minimum number of + observations for each value created. Comparisons with observations + below this threshold will be returned as ``NaN``. + + This method is generally used for the analysis of time series data to + understand the relationship between different measures + across time. + + Parameters + ---------- + min_periods : int, optional + Minimum number of observations required per pair of columns + to have a valid result. + + ddof : int, default 1 + Delta degrees of freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + This argument is applicable only when no ``nan`` is in the dataframe. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + Returns + ------- + DataFrame + The covariance matrix of the series of the DataFrame. + + See Also + -------- + Series.cov : Compute covariance with another Series. + core.window.ewm.ExponentialMovingWindow.cov : Exponential weighted sample + covariance. + core.window.expanding.Expanding.cov : Expanding sample covariance. + core.window.rolling.Rolling.cov : Rolling sample covariance. + + Notes + ----- + Returns the covariance matrix of the DataFrame's time series. + The covariance is normalized by N-ddof. + + For DataFrames that have Series that are missing data (assuming that + data is `missing at random + `__) + the returned covariance matrix will be an unbiased estimate + of the variance and covariance between the member Series. + + However, for many applications this estimate may not be acceptable + because the estimate covariance matrix is not guaranteed to be positive + semi-definite. This could lead to estimate correlations having + absolute values which are greater than one, and/or a non-invertible + covariance matrix. See `Estimation of covariance matrices + `__ for more details. + + Examples + -------- + >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], + ... columns=['dogs', 'cats']) + >>> df.cov() + dogs cats + dogs 0.666667 -1.000000 + cats -1.000000 1.666667 + + >>> np.random.seed(42) + >>> df = pd.DataFrame(np.random.randn(1000, 5), + ... columns=['a', 'b', 'c', 'd', 'e']) + >>> df.cov() + a b c d e + a 0.998438 -0.020161 0.059277 -0.008943 0.014144 + b -0.020161 1.059352 -0.008543 -0.024738 0.009826 + c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 + d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 + e 0.014144 0.009826 -0.000271 -0.013692 0.977795 + + **Minimum number of periods** + + This method also supports an optional ``min_periods`` keyword + that specifies the required minimum number of non-NA observations for + each column pair in order to have a valid result: + + >>> np.random.seed(42) + >>> df = pd.DataFrame(np.random.randn(20, 3), + ... columns=['a', 'b', 'c']) + >>> df.loc[df.index[:5], 'a'] = np.nan + >>> df.loc[df.index[5:10], 'b'] = np.nan + >>> df.cov(min_periods=12) + a b c + a 0.316741 NaN -0.150812 + b NaN 1.248003 0.191417 + c -0.150812 0.191417 0.895202 + """ + data = self._get_numeric_data() if numeric_only else self + cols = data.columns + idx = cols.copy() + mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False) + + if notna(mat).all(): + if min_periods is not None and min_periods > len(mat): + base_cov = np.empty((mat.shape[1], mat.shape[1])) + base_cov.fill(np.nan) + else: + base_cov = np.cov(mat.T, ddof=ddof) + base_cov = base_cov.reshape((len(cols), len(cols))) + else: + base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods) + + result = self._constructor(base_cov, index=idx, columns=cols, copy=False) + return result.__finalize__(self, method="cov") + + def corrwith( + self, + other: DataFrame | Series, + axis: Axis = 0, + drop: bool = False, + method: CorrelationMethod = "pearson", + numeric_only: bool = False, + ) -> Series: + """ + Compute pairwise correlation. + + Pairwise correlation is computed between rows or columns of + DataFrame with rows or columns of Series or DataFrame. DataFrames + are first aligned along both axes before computing the + correlations. + + Parameters + ---------- + other : DataFrame, Series + Object with which to compute correlations. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. 0 or 'index' to compute row-wise, 1 or 'columns' for + column-wise. + drop : bool, default False + Drop missing indices from result. + method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + + * pearson : standard correlation coefficient + * kendall : Kendall Tau correlation coefficient + * spearman : Spearman rank correlation + * callable: callable with input two 1d ndarrays + and returning a float. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + Returns + ------- + Series + Pairwise correlations. + + See Also + -------- + DataFrame.corr : Compute pairwise correlation of columns. + + Examples + -------- + >>> index = ["a", "b", "c", "d", "e"] + >>> columns = ["one", "two", "three", "four"] + >>> df1 = pd.DataFrame(np.arange(20).reshape(5, 4), index=index, columns=columns) + >>> df2 = pd.DataFrame(np.arange(16).reshape(4, 4), index=index[:4], columns=columns) + >>> df1.corrwith(df2) + one 1.0 + two 1.0 + three 1.0 + four 1.0 + dtype: float64 + + >>> df2.corrwith(df1, axis=1) + a 1.0 + b 1.0 + c 1.0 + d 1.0 + e NaN + dtype: float64 + """ # noqa: E501 + axis = self._get_axis_number(axis) + this = self._get_numeric_data() if numeric_only else self + + if isinstance(other, Series): + return this.apply(lambda x: other.corr(x, method=method), axis=axis) + + if numeric_only: + other = other._get_numeric_data() + left, right = this.align(other, join="inner", copy=False) + + if axis == 1: + left = left.T + right = right.T + + if method == "pearson": + # mask missing values + left = left + right * 0 + right = right + left * 0 + + # demeaned data + ldem = left - left.mean(numeric_only=numeric_only) + rdem = right - right.mean(numeric_only=numeric_only) + + num = (ldem * rdem).sum() + dom = ( + (left.count() - 1) + * left.std(numeric_only=numeric_only) + * right.std(numeric_only=numeric_only) + ) + + correl = num / dom + + elif method in ["kendall", "spearman"] or callable(method): + + def c(x): + return nanops.nancorr(x[0], x[1], method=method) + + correl = self._constructor_sliced( + map(c, zip(left.values.T, right.values.T)), + index=left.columns, + copy=False, + ) + + else: + raise ValueError( + f"Invalid method {method} was passed, " + "valid methods are: 'pearson', 'kendall', " + "'spearman', or callable" + ) + + if not drop: + # Find non-matching labels along the given axis + # and append missing correlations (GH 22375) + raxis: AxisInt = 1 if axis == 0 else 0 + result_index = this._get_axis(raxis).union(other._get_axis(raxis)) + idx_diff = result_index.difference(correl.index) + + if len(idx_diff) > 0: + correl = correl._append( + Series([np.nan] * len(idx_diff), index=idx_diff) + ) + + return correl + + # ---------------------------------------------------------------------- + # ndarray-like stats methods + + def count(self, axis: Axis = 0, numeric_only: bool = False): + """ + Count non-NA cells for each column or row. + + The values `None`, `NaN`, `NaT`, ``pandas.NA`` are considered NA. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + If 0 or 'index' counts are generated for each column. + If 1 or 'columns' counts are generated for each row. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + Returns + ------- + Series + For each column/row the number of non-NA/null entries. + + See Also + -------- + Series.count: Number of non-NA elements in a Series. + DataFrame.value_counts: Count unique combinations of columns. + DataFrame.shape: Number of DataFrame rows and columns (including NA + elements). + DataFrame.isna: Boolean same-sized DataFrame showing places of NA + elements. + + Examples + -------- + Constructing DataFrame from a dictionary: + + >>> df = pd.DataFrame({"Person": + ... ["John", "Myla", "Lewis", "John", "Myla"], + ... "Age": [24., np.nan, 21., 33, 26], + ... "Single": [False, True, True, True, False]}) + >>> df + Person Age Single + 0 John 24.0 False + 1 Myla NaN True + 2 Lewis 21.0 True + 3 John 33.0 True + 4 Myla 26.0 False + + Notice the uncounted NA values: + + >>> df.count() + Person 5 + Age 4 + Single 5 + dtype: int64 + + Counts for each **row**: + + >>> df.count(axis='columns') + 0 3 + 1 2 + 2 3 + 3 3 + 4 3 + dtype: int64 + """ + axis = self._get_axis_number(axis) + + if numeric_only: + frame = self._get_numeric_data() + else: + frame = self + + # GH #423 + if len(frame._get_axis(axis)) == 0: + result = self._constructor_sliced(0, index=frame._get_agg_axis(axis)) + else: + result = notna(frame).sum(axis=axis) + + return result.astype("int64", copy=False).__finalize__(self, method="count") + + def _reduce( + self, + op, + name: str, + *, + axis: Axis = 0, + skipna: bool = True, + numeric_only: bool = False, + filter_type=None, + **kwds, + ): + assert filter_type is None or filter_type == "bool", filter_type + out_dtype = "bool" if filter_type == "bool" else None + + if axis is not None: + axis = self._get_axis_number(axis) + + def func(values: np.ndarray): + # We only use this in the case that operates on self.values + return op(values, axis=axis, skipna=skipna, **kwds) + + dtype_has_keepdims: dict[ExtensionDtype, bool] = {} + + def blk_func(values, axis: Axis = 1): + if isinstance(values, ExtensionArray): + if not is_1d_only_ea_dtype(values.dtype) and not isinstance( + self._mgr, ArrayManager + ): + return values._reduce(name, axis=1, skipna=skipna, **kwds) + has_keepdims = dtype_has_keepdims.get(values.dtype) + if has_keepdims is None: + sign = signature(values._reduce) + has_keepdims = "keepdims" in sign.parameters + dtype_has_keepdims[values.dtype] = has_keepdims + if has_keepdims: + return values._reduce(name, skipna=skipna, keepdims=True, **kwds) + else: + warnings.warn( + f"{type(values)}._reduce will require a `keepdims` parameter " + "in the future", + FutureWarning, + stacklevel=find_stack_level(), + ) + result = values._reduce(name, skipna=skipna, **kwds) + return np.array([result]) + else: + return op(values, axis=axis, skipna=skipna, **kwds) + + def _get_data() -> DataFrame: + if filter_type is None: + data = self._get_numeric_data() + else: + # GH#25101, GH#24434 + assert filter_type == "bool" + data = self._get_bool_data() + return data + + # Case with EAs see GH#35881 + df = self + if numeric_only: + df = _get_data() + if axis is None: + dtype = find_common_type([arr.dtype for arr in df._mgr.arrays]) + if isinstance(dtype, ExtensionDtype): + df = df.astype(dtype, copy=False) + arr = concat_compat(list(df._iter_column_arrays())) + return arr._reduce(name, skipna=skipna, keepdims=False, **kwds) + return func(df.values) + elif axis == 1: + if len(df.index) == 0: + # Taking a transpose would result in no columns, losing the dtype. + # In the empty case, reducing along axis 0 or 1 gives the same + # result dtype, so reduce with axis=0 and ignore values + result = df._reduce( + op, + name, + axis=0, + skipna=skipna, + numeric_only=False, + filter_type=filter_type, + **kwds, + ).iloc[:0] + result.index = df.index + return result + + # kurtosis excluded since groupby does not implement it + if df.shape[1] and name != "kurt": + dtype = find_common_type([arr.dtype for arr in df._mgr.arrays]) + if isinstance(dtype, ExtensionDtype): + # GH 54341: fastpath for EA-backed axis=1 reductions + # This flattens the frame into a single 1D array while keeping + # track of the row and column indices of the original frame. Once + # flattened, grouping by the row indices and aggregating should + # be equivalent to transposing the original frame and aggregating + # with axis=0. + name = {"argmax": "idxmax", "argmin": "idxmin"}.get(name, name) + df = df.astype(dtype, copy=False) + arr = concat_compat(list(df._iter_column_arrays())) + nrows, ncols = df.shape + row_index = np.tile(np.arange(nrows), ncols) + col_index = np.repeat(np.arange(ncols), nrows) + ser = Series(arr, index=col_index, copy=False) + result = ser.groupby(row_index).agg(name, **kwds) + result.index = df.index + if not skipna and name not in ("any", "all"): + mask = df.isna().to_numpy(dtype=np.bool_).any(axis=1) + other = -1 if name in ("idxmax", "idxmin") else lib.no_default + result = result.mask(mask, other) + return result + + df = df.T + + # After possibly _get_data and transposing, we are now in the + # simple case where we can use BlockManager.reduce + res = df._mgr.reduce(blk_func) + out = df._constructor_from_mgr(res, axes=res.axes).iloc[0] + if out_dtype is not None and out.dtype != "boolean": + out = out.astype(out_dtype) + elif (df._mgr.get_dtypes() == object).any() and name not in ["any", "all"]: + out = out.astype(object) + elif len(self) == 0 and out.dtype == object and name in ("sum", "prod"): + # Even if we are object dtype, follow numpy and return + # float64, see test_apply_funcs_over_empty + out = out.astype(np.float64) + + return out + + def _reduce_axis1(self, name: str, func, skipna: bool) -> Series: + """ + Special case for _reduce to try to avoid a potentially-expensive transpose. + + Apply the reduction block-wise along axis=1 and then reduce the resulting + 1D arrays. + """ + if name == "all": + result = np.ones(len(self), dtype=bool) + ufunc = np.logical_and + elif name == "any": + result = np.zeros(len(self), dtype=bool) + # error: Incompatible types in assignment + # (expression has type "_UFunc_Nin2_Nout1[Literal['logical_or'], + # Literal[20], Literal[False]]", variable has type + # "_UFunc_Nin2_Nout1[Literal['logical_and'], Literal[20], + # Literal[True]]") + ufunc = np.logical_or # type: ignore[assignment] + else: + raise NotImplementedError(name) + + for arr in self._mgr.arrays: + middle = func(arr, axis=0, skipna=skipna) + result = ufunc(result, middle) + + res_ser = self._constructor_sliced(result, index=self.index, copy=False) + return res_ser + + @doc(make_doc("any", ndim=2)) + # error: Signature of "any" incompatible with supertype "NDFrame" + def any( # type: ignore[override] + self, + *, + axis: Axis = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> Series | bool: + result = self._logical_func( + "any", nanops.nanany, axis, bool_only, skipna, **kwargs + ) + if isinstance(result, Series): + result = result.__finalize__(self, method="any") + return result + + @doc(make_doc("all", ndim=2)) + def all( + self, + axis: Axis = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> Series | bool: + result = self._logical_func( + "all", nanops.nanall, axis, bool_only, skipna, **kwargs + ) + if isinstance(result, Series): + result = result.__finalize__(self, method="all") + return result + + @doc(make_doc("min", ndim=2)) + def min( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().min(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="min") + return result + + @doc(make_doc("max", ndim=2)) + def max( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().max(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="max") + return result + + @doc(make_doc("sum", ndim=2)) + def sum( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + result = super().sum(axis, skipna, numeric_only, min_count, **kwargs) + return result.__finalize__(self, method="sum") + + @doc(make_doc("prod", ndim=2)) + def prod( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + result = super().prod(axis, skipna, numeric_only, min_count, **kwargs) + return result.__finalize__(self, method="prod") + + @doc(make_doc("mean", ndim=2)) + def mean( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().mean(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="mean") + return result + + @doc(make_doc("median", ndim=2)) + def median( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().median(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="median") + return result + + @doc(make_doc("sem", ndim=2)) + def sem( + self, + axis: Axis | None = 0, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + result = super().sem(axis, skipna, ddof, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="sem") + return result + + @doc(make_doc("var", ndim=2)) + def var( + self, + axis: Axis | None = 0, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + result = super().var(axis, skipna, ddof, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="var") + return result + + @doc(make_doc("std", ndim=2)) + def std( + self, + axis: Axis | None = 0, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + result = super().std(axis, skipna, ddof, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="std") + return result + + @doc(make_doc("skew", ndim=2)) + def skew( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().skew(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="skew") + return result + + @doc(make_doc("kurt", ndim=2)) + def kurt( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + result = super().kurt(axis, skipna, numeric_only, **kwargs) + if isinstance(result, Series): + result = result.__finalize__(self, method="kurt") + return result + + kurtosis = kurt + product = prod + + @doc(make_doc("cummin", ndim=2)) + def cummin(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummin(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cummax", ndim=2)) + def cummax(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummax(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumsum", ndim=2)) + def cumsum(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumprod", 2)) + def cumprod(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) + + def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series: + """ + Count number of distinct elements in specified axis. + + Return Series with number of distinct elements. Can ignore NaN + values. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for + column-wise. + dropna : bool, default True + Don't include NaN in the counts. + + Returns + ------- + Series + + See Also + -------- + Series.nunique: Method nunique for Series. + DataFrame.count: Count non-NA cells for each column or row. + + Examples + -------- + >>> df = pd.DataFrame({'A': [4, 5, 6], 'B': [4, 1, 1]}) + >>> df.nunique() + A 3 + B 2 + dtype: int64 + + >>> df.nunique(axis=1) + 0 1 + 1 2 + 2 2 + dtype: int64 + """ + return self.apply(Series.nunique, axis=axis, dropna=dropna) + + @doc(_shared_docs["idxmin"], numeric_only_default="False") + def idxmin( + self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False + ) -> Series: + axis = self._get_axis_number(axis) + + if self.empty and len(self.axes[axis]): + axis_dtype = self.axes[axis].dtype + return self._constructor_sliced(dtype=axis_dtype) + + if numeric_only: + data = self._get_numeric_data() + else: + data = self + + res = data._reduce( + nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False + ) + indices = res._values + # indices will always be np.ndarray since axis is not N + + if (indices == -1).any(): + warnings.warn( + f"The behavior of {type(self).__name__}.idxmin with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + + index = data._get_axis(axis) + result = algorithms.take( + index._values, indices, allow_fill=True, fill_value=index._na_value + ) + final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis)) + return final_result.__finalize__(self, method="idxmin") + + @doc(_shared_docs["idxmax"], numeric_only_default="False") + def idxmax( + self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False + ) -> Series: + axis = self._get_axis_number(axis) + + if self.empty and len(self.axes[axis]): + axis_dtype = self.axes[axis].dtype + return self._constructor_sliced(dtype=axis_dtype) + + if numeric_only: + data = self._get_numeric_data() + else: + data = self + + res = data._reduce( + nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False + ) + indices = res._values + # indices will always be 1d array since axis is not None + + if (indices == -1).any(): + warnings.warn( + f"The behavior of {type(self).__name__}.idxmax with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + + index = data._get_axis(axis) + result = algorithms.take( + index._values, indices, allow_fill=True, fill_value=index._na_value + ) + final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis)) + return final_result.__finalize__(self, method="idxmax") + + def _get_agg_axis(self, axis_num: int) -> Index: + """ + Let's be explicit about this. + """ + if axis_num == 0: + return self.columns + elif axis_num == 1: + return self.index + else: + raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})") + + def mode( + self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True + ) -> DataFrame: + """ + Get the mode(s) of each element along the selected axis. + + The mode of a set of values is the value that appears most often. + It can be multiple values. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to iterate over while searching for the mode: + + * 0 or 'index' : get mode of each column + * 1 or 'columns' : get mode of each row. + + numeric_only : bool, default False + If True, only apply to numeric columns. + dropna : bool, default True + Don't consider counts of NaN/NaT. + + Returns + ------- + DataFrame + The modes of each column or row. + + See Also + -------- + Series.mode : Return the highest frequency value in a Series. + Series.value_counts : Return the counts of values in a Series. + + Examples + -------- + >>> df = pd.DataFrame([('bird', 2, 2), + ... ('mammal', 4, np.nan), + ... ('arthropod', 8, 0), + ... ('bird', 2, np.nan)], + ... index=('falcon', 'horse', 'spider', 'ostrich'), + ... columns=('species', 'legs', 'wings')) + >>> df + species legs wings + falcon bird 2 2.0 + horse mammal 4 NaN + spider arthropod 8 0.0 + ostrich bird 2 NaN + + By default, missing values are not considered, and the mode of wings + are both 0 and 2. Because the resulting DataFrame has two rows, + the second row of ``species`` and ``legs`` contains ``NaN``. + + >>> df.mode() + species legs wings + 0 bird 2.0 0.0 + 1 NaN NaN 2.0 + + Setting ``dropna=False`` ``NaN`` values are considered and they can be + the mode (like for wings). + + >>> df.mode(dropna=False) + species legs wings + 0 bird 2 NaN + + Setting ``numeric_only=True``, only the mode of numeric columns is + computed, and columns of other types are ignored. + + >>> df.mode(numeric_only=True) + legs wings + 0 2.0 0.0 + 1 NaN 2.0 + + To compute the mode over columns and not rows, use the axis parameter: + + >>> df.mode(axis='columns', numeric_only=True) + 0 1 + falcon 2.0 NaN + horse 4.0 NaN + spider 0.0 8.0 + ostrich 2.0 NaN + """ + data = self if not numeric_only else self._get_numeric_data() + + def f(s): + return s.mode(dropna=dropna) + + data = data.apply(f, axis=axis) + # Ensure index is type stable (should always use int index) + if data.empty: + data.index = default_index(0) + + return data + + @overload + def quantile( + self, + q: float = ..., + axis: Axis = ..., + numeric_only: bool = ..., + interpolation: QuantileInterpolation = ..., + ) -> Series: + ... + + @overload + def quantile( + self, + q: AnyArrayLike | Sequence[float], + axis: Axis = ..., + numeric_only: bool = ..., + interpolation: QuantileInterpolation = ..., + ) -> Series | DataFrame: + ... + + @overload + def quantile( + self, + q: float | AnyArrayLike | Sequence[float] = ..., + axis: Axis = ..., + numeric_only: bool = ..., + interpolation: QuantileInterpolation = ..., + ) -> Series | DataFrame: + ... + + def quantile( + self, + q: float | AnyArrayLike | Sequence[float] = 0.5, + axis: Axis = 0, + numeric_only: bool = False, + interpolation: QuantileInterpolation = "linear", + method: Literal["single", "table"] = "single", + ) -> Series | DataFrame: + """ + Return values at the given quantile over requested axis. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + Value between 0 <= q <= 1, the quantile(s) to compute. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + method : {'single', 'table'}, default 'single' + Whether to compute quantiles per-column ('single') or over all columns + ('table'). When 'table', the only allowed interpolation methods are + 'nearest', 'lower', and 'higher'. + + Returns + ------- + Series or DataFrame + + If ``q`` is an array, a DataFrame will be returned where the + index is ``q``, the columns are the columns of self, and the + values are the quantiles. + If ``q`` is a float, a Series will be returned where the + index is the columns of self and the values are the quantiles. + + See Also + -------- + core.window.rolling.Rolling.quantile: Rolling quantile. + numpy.percentile: Numpy function to compute the percentile. + + Examples + -------- + >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), + ... columns=['a', 'b']) + >>> df.quantile(.1) + a 1.3 + b 3.7 + Name: 0.1, dtype: float64 + >>> df.quantile([.1, .5]) + a b + 0.1 1.3 3.7 + 0.5 2.5 55.0 + + Specifying `method='table'` will compute the quantile over all columns. + + >>> df.quantile(.1, method="table", interpolation="nearest") + a 1 + b 1 + Name: 0.1, dtype: int64 + >>> df.quantile([.1, .5], method="table", interpolation="nearest") + a b + 0.1 1 1 + 0.5 3 100 + + Specifying `numeric_only=False` will also compute the quantile of + datetime and timedelta data. + + >>> df = pd.DataFrame({'A': [1, 2], + ... 'B': [pd.Timestamp('2010'), + ... pd.Timestamp('2011')], + ... 'C': [pd.Timedelta('1 days'), + ... pd.Timedelta('2 days')]}) + >>> df.quantile(0.5, numeric_only=False) + A 1.5 + B 2010-07-02 12:00:00 + C 1 days 12:00:00 + Name: 0.5, dtype: object + """ + validate_percentile(q) + axis = self._get_axis_number(axis) + + if not is_list_like(q): + # BlockManager.quantile expects listlike, so we wrap and unwrap here + # error: List item 0 has incompatible type "Union[float, Union[Union[ + # ExtensionArray, ndarray[Any, Any]], Index, Series], Sequence[float]]"; + # expected "float" + res_df = self.quantile( # type: ignore[call-overload] + [q], + axis=axis, + numeric_only=numeric_only, + interpolation=interpolation, + method=method, + ) + if method == "single": + res = res_df.iloc[0] + else: + # cannot directly iloc over sparse arrays + res = res_df.T.iloc[:, 0] + if axis == 1 and len(self) == 0: + # GH#41544 try to get an appropriate dtype + dtype = find_common_type(list(self.dtypes)) + if needs_i8_conversion(dtype): + return res.astype(dtype) + return res + + q = Index(q, dtype=np.float64) + data = self._get_numeric_data() if numeric_only else self + + if axis == 1: + data = data.T + + if len(data.columns) == 0: + # GH#23925 _get_numeric_data may have dropped all columns + cols = Index([], name=self.columns.name) + + dtype = np.float64 + if axis == 1: + # GH#41544 try to get an appropriate dtype + cdtype = find_common_type(list(self.dtypes)) + if needs_i8_conversion(cdtype): + dtype = cdtype + + res = self._constructor([], index=q, columns=cols, dtype=dtype) + return res.__finalize__(self, method="quantile") + + valid_method = {"single", "table"} + if method not in valid_method: + raise ValueError( + f"Invalid method: {method}. Method must be in {valid_method}." + ) + if method == "single": + res = data._mgr.quantile(qs=q, interpolation=interpolation) + elif method == "table": + valid_interpolation = {"nearest", "lower", "higher"} + if interpolation not in valid_interpolation: + raise ValueError( + f"Invalid interpolation: {interpolation}. " + f"Interpolation must be in {valid_interpolation}" + ) + # handle degenerate case + if len(data) == 0: + if data.ndim == 2: + dtype = find_common_type(list(self.dtypes)) + else: + dtype = self.dtype + return self._constructor([], index=q, columns=data.columns, dtype=dtype) + + q_idx = np.quantile(np.arange(len(data)), q, method=interpolation) + + by = data.columns + if len(by) > 1: + keys = [data._get_label_or_level_values(x) for x in by] + indexer = lexsort_indexer(keys) + else: + k = data._get_label_or_level_values(by[0]) + indexer = nargsort(k) + + res = data._mgr.take(indexer[q_idx], verify=False) + res.axes[1] = q + + result = self._constructor_from_mgr(res, axes=res.axes) + return result.__finalize__(self, method="quantile") + + def to_timestamp( + self, + freq: Frequency | None = None, + how: ToTimestampHow = "start", + axis: Axis = 0, + copy: bool | None = None, + ) -> DataFrame: + """ + Cast to DatetimeIndex of timestamps, at *beginning* of period. + + Parameters + ---------- + freq : str, default frequency of PeriodIndex + Desired frequency. + how : {'s', 'e', 'start', 'end'} + Convention for converting period to timestamp; start of period + vs. end. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to convert (the index by default). + copy : bool, default True + If False then underlying input data is not copied. + + Returns + ------- + DataFrame + The DataFrame has a DatetimeIndex. + + Examples + -------- + >>> idx = pd.PeriodIndex(['2023', '2024'], freq='Y') + >>> d = {'col1': [1, 2], 'col2': [3, 4]} + >>> df1 = pd.DataFrame(data=d, index=idx) + >>> df1 + col1 col2 + 2023 1 3 + 2024 2 4 + + The resulting timestamps will be at the beginning of the year in this case + + >>> df1 = df1.to_timestamp() + >>> df1 + col1 col2 + 2023-01-01 1 3 + 2024-01-01 2 4 + >>> df1.index + DatetimeIndex(['2023-01-01', '2024-01-01'], dtype='datetime64[ns]', freq=None) + + Using `freq` which is the offset that the Timestamps will have + + >>> df2 = pd.DataFrame(data=d, index=idx) + >>> df2 = df2.to_timestamp(freq='M') + >>> df2 + col1 col2 + 2023-01-31 1 3 + 2024-01-31 2 4 + >>> df2.index + DatetimeIndex(['2023-01-31', '2024-01-31'], dtype='datetime64[ns]', freq=None) + """ + new_obj = self.copy(deep=copy and not using_copy_on_write()) + + axis_name = self._get_axis_name(axis) + old_ax = getattr(self, axis_name) + if not isinstance(old_ax, PeriodIndex): + raise TypeError(f"unsupported Type {type(old_ax).__name__}") + + new_ax = old_ax.to_timestamp(freq=freq, how=how) + + setattr(new_obj, axis_name, new_ax) + return new_obj + + def to_period( + self, freq: Frequency | None = None, axis: Axis = 0, copy: bool | None = None + ) -> DataFrame: + """ + Convert DataFrame from DatetimeIndex to PeriodIndex. + + Convert DataFrame from DatetimeIndex to PeriodIndex with desired + frequency (inferred from index if not passed). + + Parameters + ---------- + freq : str, default + Frequency of the PeriodIndex. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to convert (the index by default). + copy : bool, default True + If False then underlying input data is not copied. + + Returns + ------- + DataFrame + The DataFrame has a PeriodIndex. + + Examples + -------- + >>> idx = pd.to_datetime( + ... [ + ... "2001-03-31 00:00:00", + ... "2002-05-31 00:00:00", + ... "2003-08-31 00:00:00", + ... ] + ... ) + + >>> idx + DatetimeIndex(['2001-03-31', '2002-05-31', '2003-08-31'], + dtype='datetime64[ns]', freq=None) + + >>> idx.to_period("M") + PeriodIndex(['2001-03', '2002-05', '2003-08'], dtype='period[M]') + + For the yearly frequency + + >>> idx.to_period("Y") + PeriodIndex(['2001', '2002', '2003'], dtype='period[A-DEC]') + """ + new_obj = self.copy(deep=copy and not using_copy_on_write()) + + axis_name = self._get_axis_name(axis) + old_ax = getattr(self, axis_name) + if not isinstance(old_ax, DatetimeIndex): + raise TypeError(f"unsupported Type {type(old_ax).__name__}") + + new_ax = old_ax.to_period(freq=freq) + + setattr(new_obj, axis_name, new_ax) + return new_obj + + def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame: + """ + Whether each element in the DataFrame is contained in values. + + Parameters + ---------- + values : iterable, Series, DataFrame or dict + The result will only be true at a location if all the + labels match. If `values` is a Series, that's the index. If + `values` is a dict, the keys must be the column names, + which must match. If `values` is a DataFrame, + then both the index and column labels must match. + + Returns + ------- + DataFrame + DataFrame of booleans showing whether each element in the DataFrame + is contained in values. + + See Also + -------- + DataFrame.eq: Equality test for DataFrame. + Series.isin: Equivalent method on Series. + Series.str.contains: Test if pattern or regex is contained within a + string of a Series or Index. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, + ... index=['falcon', 'dog']) + >>> df + num_legs num_wings + falcon 2 2 + dog 4 0 + + When ``values`` is a list check whether every value in the DataFrame + is present in the list (which animals have 0 or 2 legs or wings) + + >>> df.isin([0, 2]) + num_legs num_wings + falcon True True + dog False True + + To check if ``values`` is *not* in the DataFrame, use the ``~`` operator: + + >>> ~df.isin([0, 2]) + num_legs num_wings + falcon False False + dog True False + + When ``values`` is a dict, we can pass values to check for each + column separately: + + >>> df.isin({'num_wings': [0, 3]}) + num_legs num_wings + falcon False False + dog False True + + When ``values`` is a Series or DataFrame the index and column must + match. Note that 'falcon' does not match based on the number of legs + in other. + + >>> other = pd.DataFrame({'num_legs': [8, 3], 'num_wings': [0, 2]}, + ... index=['spider', 'falcon']) + >>> df.isin(other) + num_legs num_wings + falcon False True + dog False False + """ + if isinstance(values, dict): + from pandas.core.reshape.concat import concat + + values = collections.defaultdict(list, values) + result = concat( + ( + self.iloc[:, [i]].isin(values[col]) + for i, col in enumerate(self.columns) + ), + axis=1, + ) + elif isinstance(values, Series): + if not values.index.is_unique: + raise ValueError("cannot compute isin with a duplicate axis.") + result = self.eq(values.reindex_like(self), axis="index") + elif isinstance(values, DataFrame): + if not (values.columns.is_unique and values.index.is_unique): + raise ValueError("cannot compute isin with a duplicate axis.") + result = self.eq(values.reindex_like(self)) + else: + if not is_list_like(values): + raise TypeError( + "only list-like or dict-like objects are allowed " + "to be passed to DataFrame.isin(), " + f"you passed a '{type(values).__name__}'" + ) + + def isin_(x): + # error: Argument 2 to "isin" has incompatible type "Union[Series, + # DataFrame, Sequence[Any], Mapping[Any, Any]]"; expected + # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, + # Series], List[Any], range]" + result = algorithms.isin( + x.ravel(), + values, # type: ignore[arg-type] + ) + return result.reshape(x.shape) + + res_mgr = self._mgr.apply(isin_) + result = self._constructor_from_mgr( + res_mgr, + axes=res_mgr.axes, + ) + return result.__finalize__(self, method="isin") + + # ---------------------------------------------------------------------- + # Add index and columns + _AXIS_ORDERS: list[Literal["index", "columns"]] = ["index", "columns"] + _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = { + **NDFrame._AXIS_TO_AXIS_NUMBER, + 1: 1, + "columns": 1, + } + _AXIS_LEN = len(_AXIS_ORDERS) + _info_axis_number: Literal[1] = 1 + _info_axis_name: Literal["columns"] = "columns" + + index = properties.AxisProperty( + axis=1, + doc=""" + The index (row labels) of the DataFrame. + + The index of a DataFrame is a series of labels that identify each row. + The labels can be integers, strings, or any other hashable type. The index + is used for label-based access and alignment, and can be accessed or + modified using this attribute. + + Returns + ------- + pandas.Index + The index labels of the DataFrame. + + See Also + -------- + DataFrame.columns : The column labels of the DataFrame. + DataFrame.to_numpy : Convert the DataFrame to a NumPy array. + + Examples + -------- + >>> df = pd.DataFrame({'Name': ['Alice', 'Bob', 'Aritra'], + ... 'Age': [25, 30, 35], + ... 'Location': ['Seattle', 'New York', 'Kona']}, + ... index=([10, 20, 30])) + >>> df.index + Index([10, 20, 30], dtype='int64') + + In this example, we create a DataFrame with 3 rows and 3 columns, + including Name, Age, and Location information. We set the index labels to + be the integers 10, 20, and 30. We then access the `index` attribute of the + DataFrame, which returns an `Index` object containing the index labels. + + >>> df.index = [100, 200, 300] + >>> df + Name Age Location + 100 Alice 25 Seattle + 200 Bob 30 New York + 300 Aritra 35 Kona + + In this example, we modify the index labels of the DataFrame by assigning + a new list of labels to the `index` attribute. The DataFrame is then + updated with the new labels, and the output shows the modified DataFrame. + """, + ) + columns = properties.AxisProperty( + axis=0, + doc=dedent( + """ + The column labels of the DataFrame. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> df + A B + 0 1 3 + 1 2 4 + >>> df.columns + Index(['A', 'B'], dtype='object') + """ + ), + ) + + # ---------------------------------------------------------------------- + # Add plotting methods to DataFrame + plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) + hist = pandas.plotting.hist_frame + boxplot = pandas.plotting.boxplot_frame + sparse = CachedAccessor("sparse", SparseFrameAccessor) + + # ---------------------------------------------------------------------- + # Internal Interface Methods + + def _to_dict_of_blocks(self, copy: bool = True): + """ + Return a dict of dtype -> Constructor Types that + each is a homogeneous dtype. + + Internal ONLY - only works for BlockManager + """ + mgr = self._mgr + # convert to BlockManager if needed -> this way support ArrayManager as well + mgr = mgr_to_mgr(mgr, "block") + mgr = cast(BlockManager, mgr) + return { + k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self) + for k, v, in mgr.to_dict(copy=copy).items() + } + + @property + def values(self) -> np.ndarray: + """ + Return a Numpy representation of the DataFrame. + + .. warning:: + + We recommend using :meth:`DataFrame.to_numpy` instead. + + Only the values in the DataFrame will be returned, the axes labels + will be removed. + + Returns + ------- + numpy.ndarray + The values of the DataFrame. + + See Also + -------- + DataFrame.to_numpy : Recommended alternative to this method. + DataFrame.index : Retrieve the index labels. + DataFrame.columns : Retrieving the column names. + + Notes + ----- + The dtype will be a lower-common-denominator dtype (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. Use this + with care if you are not dealing with the blocks. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. If dtypes are int32 and uint8, dtype will be upcast to + int32. By :func:`numpy.find_common_type` convention, mixing int64 + and uint64 will result in a float64 dtype. + + Examples + -------- + A DataFrame where all columns are the same type (e.g., int64) results + in an array of the same type. + + >>> df = pd.DataFrame({'age': [ 3, 29], + ... 'height': [94, 170], + ... 'weight': [31, 115]}) + >>> df + age height weight + 0 3 94 31 + 1 29 170 115 + >>> df.dtypes + age int64 + height int64 + weight int64 + dtype: object + >>> df.values + array([[ 3, 94, 31], + [ 29, 170, 115]]) + + A DataFrame with mixed type columns(e.g., str/object, int64, float32) + results in an ndarray of the broadest type that accommodates these + mixed types (e.g., object). + + >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), + ... ('lion', 80.5, 1), + ... ('monkey', np.nan, None)], + ... columns=('name', 'max_speed', 'rank')) + >>> df2.dtypes + name object + max_speed float64 + rank object + dtype: object + >>> df2.values + array([['parrot', 24.0, 'second'], + ['lion', 80.5, 1], + ['monkey', nan, None]], dtype=object) + """ + return self._mgr.as_array() + + +def _from_nested_dict(data) -> collections.defaultdict: + new_data: collections.defaultdict = collections.defaultdict(dict) + for index, s in data.items(): + for col, v in s.items(): + new_data[col][index] = v + return new_data + + +def _reindex_for_setitem( + value: DataFrame | Series, index: Index +) -> tuple[ArrayLike, BlockValuesRefs | None]: + # reindex if necessary + + if value.index.equals(index) or not len(index): + if using_copy_on_write() and isinstance(value, Series): + return value._values, value._references + return value._values.copy(), None + + # GH#4107 + try: + reindexed_value = value.reindex(index)._values + except ValueError as err: + # raised in MultiIndex.from_tuples, see test_insert_error_msmgs + if not value.index.is_unique: + # duplicate axis + raise err + + raise TypeError( + "incompatible index of inserted column with frame index" + ) from err + return reindexed_value, None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/generic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/generic.py new file mode 100644 index 00000000..564d5722 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/generic.py @@ -0,0 +1,13443 @@ +# pyright: reportPropertyTypeMismatch=false +from __future__ import annotations + +import collections +import datetime as dt +from functools import partial +import gc +from json import loads +import operator +import pickle +import re +import sys +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Literal, + NoReturn, + cast, + final, + overload, +) +import warnings +import weakref + +import numpy as np + +from pandas._config import ( + config, + using_copy_on_write, +) + +from pandas._libs import lib +from pandas._libs.lib import is_range_indexer +from pandas._libs.tslibs import ( + Period, + Tick, + Timestamp, + to_offset, +) +from pandas._typing import ( + AlignJoin, + AnyArrayLike, + ArrayLike, + Axes, + Axis, + AxisInt, + CompressionOptions, + DtypeArg, + DtypeBackend, + DtypeObj, + FilePath, + FillnaOptions, + FloatFormatType, + FormattersType, + Frequency, + IgnoreRaise, + IndexKeyFunc, + IndexLabel, + InterpolateOptions, + IntervalClosedType, + JSONSerializable, + Level, + Manager, + NaPosition, + NDFrameT, + OpenFileErrors, + RandomState, + ReindexMethod, + Renamer, + Scalar, + Self, + SortKind, + StorageOptions, + Suffixes, + T, + TimeAmbiguous, + TimedeltaConvertibleTypes, + TimeNonexistent, + TimestampConvertibleTypes, + TimeUnit, + ValueKeyFunc, + WriteBuffer, + WriteExcelBuffer, + npt, +) +from pandas.compat import PYPY +from pandas.compat._constants import REF_COUNT +from pandas.compat._optional import import_optional_dependency +from pandas.compat.numpy import function as nv +from pandas.errors import ( + AbstractMethodError, + ChainedAssignmentError, + InvalidIndexError, + SettingWithCopyError, + SettingWithCopyWarning, + _chained_assignment_method_msg, +) +from pandas.util._decorators import ( + deprecate_nonkeyword_arguments, + doc, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + check_dtype_backend, + validate_ascending, + validate_bool_kwarg, + validate_fillna_kwargs, + validate_inclusive, +) + +from pandas.core.dtypes.astype import astype_is_view +from pandas.core.dtypes.common import ( + ensure_object, + ensure_platform_int, + ensure_str, + is_bool, + is_bool_dtype, + is_dict_like, + is_extension_array_dtype, + is_list_like, + is_number, + is_numeric_dtype, + is_re_compilable, + is_scalar, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.inference import ( + is_hashable, + is_nested_list_like, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import ( + algorithms as algos, + arraylike, + common, + indexing, + missing, + nanops, + sample, +) +from pandas.core.array_algos.replace import should_use_regex +from pandas.core.arrays import ExtensionArray +from pandas.core.base import PandasObject +from pandas.core.construction import extract_array +from pandas.core.flags import Flags +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + MultiIndex, + PeriodIndex, + RangeIndex, + default_index, + ensure_index, +) +from pandas.core.internals import ( + ArrayManager, + BlockManager, + SingleArrayManager, +) +from pandas.core.internals.construction import ( + mgr_to_mgr, + ndarray_to_mgr, +) +from pandas.core.methods.describe import describe_ndframe +from pandas.core.missing import ( + clean_fill_method, + clean_reindex_fill_method, + find_valid_index, +) +from pandas.core.reshape.concat import concat +from pandas.core.shared_docs import _shared_docs +from pandas.core.sorting import get_indexer_indexer +from pandas.core.window import ( + Expanding, + ExponentialMovingWindow, + Rolling, + Window, +) + +from pandas.io.formats.format import ( + DataFrameFormatter, + DataFrameRenderer, +) +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, + ) + + from pandas._libs.tslibs import BaseOffset + + from pandas import ( + DataFrame, + ExcelWriter, + HDFStore, + Series, + ) + from pandas.core.indexers.objects import BaseIndexer + from pandas.core.resample import Resampler + +# goal is to be able to define the docs close to function, while still being +# able to share +_shared_docs = {**_shared_docs} +_shared_doc_kwargs = { + "axes": "keywords for axes", + "klass": "Series/DataFrame", + "axes_single_arg": "{0 or 'index'} for Series, {0 or 'index', 1 or 'columns'} for DataFrame", # noqa: E501 + "inplace": """ + inplace : bool, default False + If True, performs operation inplace and returns None.""", + "optional_by": """ + by : str or list of str + Name or list of names to sort by""", +} + + +bool_t = bool # Need alias because NDFrame has def bool: + + +class NDFrame(PandasObject, indexing.IndexingMixin): + """ + N-dimensional analogue of DataFrame. Store multi-dimensional in a + size-mutable, labeled data structure + + Parameters + ---------- + data : BlockManager + axes : list + copy : bool, default False + """ + + _internal_names: list[str] = [ + "_mgr", + "_cacher", + "_item_cache", + "_cache", + "_is_copy", + "_name", + "_metadata", + "__array_struct__", + "__array_interface__", + "_flags", + ] + _internal_names_set: set[str] = set(_internal_names) + _accessors: set[str] = set() + _hidden_attrs: frozenset[str] = frozenset([]) + _metadata: list[str] = [] + _is_copy: weakref.ReferenceType[NDFrame] | str | None = None + _mgr: Manager + _attrs: dict[Hashable, Any] + _typ: str + + # ---------------------------------------------------------------------- + # Constructors + + def __init__(self, data: Manager) -> None: + object.__setattr__(self, "_is_copy", None) + object.__setattr__(self, "_mgr", data) + object.__setattr__(self, "_item_cache", {}) + object.__setattr__(self, "_attrs", {}) + object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) + + @final + @classmethod + def _init_mgr( + cls, + mgr: Manager, + axes: dict[Literal["index", "columns"], Axes | None], + dtype: DtypeObj | None = None, + copy: bool_t = False, + ) -> Manager: + """passed a manager and a axes dict""" + for a, axe in axes.items(): + if axe is not None: + axe = ensure_index(axe) + bm_axis = cls._get_block_manager_axis(a) + mgr = mgr.reindex_axis(axe, axis=bm_axis) + + # make a copy if explicitly requested + if copy: + mgr = mgr.copy() + if dtype is not None: + # avoid further copies if we can + if ( + isinstance(mgr, BlockManager) + and len(mgr.blocks) == 1 + and mgr.blocks[0].values.dtype == dtype + ): + pass + else: + mgr = mgr.astype(dtype=dtype) + return mgr + + @final + def _as_manager(self, typ: str, copy: bool_t = True) -> Self: + """ + Private helper function to create a DataFrame with specific manager. + + Parameters + ---------- + typ : {"block", "array"} + copy : bool, default True + Only controls whether the conversion from Block->ArrayManager + copies the 1D arrays (to ensure proper/contiguous memory layout). + + Returns + ------- + DataFrame + New DataFrame using specified manager type. Is not guaranteed + to be a copy or not. + """ + new_mgr: Manager + new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) + # fastpath of passing a manager doesn't check the option/manager class + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) + + @classmethod + def _from_mgr(cls, mgr: Manager, axes: list[Index]) -> Self: + """ + Construct a new object of this type from a Manager object and axes. + + Parameters + ---------- + mgr : Manager + Must have the same ndim as cls. + axes : list[Index] + + Notes + ----- + The axes must match mgr.axes, but are required for future-proofing + in the event that axes are refactored out of the Manager objects. + """ + obj = cls.__new__(cls) + NDFrame.__init__(obj, mgr) + return obj + + # ---------------------------------------------------------------------- + # attrs and flags + + @property + def attrs(self) -> dict[Hashable, Any]: + """ + Dictionary of global attributes of this dataset. + + .. warning:: + + attrs is experimental and may change without warning. + + See Also + -------- + DataFrame.flags : Global flags applying to this object. + + Examples + -------- + For Series: + + >>> ser = pd.Series([1, 2, 3]) + >>> ser.attrs = {"A": [10, 20, 30]} + >>> ser.attrs + {'A': [10, 20, 30]} + + For DataFrame: + + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> df.attrs = {"A": [10, 20, 30]} + >>> df.attrs + {'A': [10, 20, 30]} + """ + return self._attrs + + @attrs.setter + def attrs(self, value: Mapping[Hashable, Any]) -> None: + self._attrs = dict(value) + + @final + @property + def flags(self) -> Flags: + """ + Get the properties associated with this pandas object. + + The available flags are + + * :attr:`Flags.allows_duplicate_labels` + + See Also + -------- + Flags : Flags that apply to pandas objects. + DataFrame.attrs : Global metadata applying to this dataset. + + Notes + ----- + "Flags" differ from "metadata". Flags reflect properties of the + pandas object (the Series or DataFrame). Metadata refer to properties + of the dataset, and should be stored in :attr:`DataFrame.attrs`. + + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2]}) + >>> df.flags + + + Flags can be get or set using ``.`` + + >>> df.flags.allows_duplicate_labels + True + >>> df.flags.allows_duplicate_labels = False + + Or by slicing with a key + + >>> df.flags["allows_duplicate_labels"] + False + >>> df.flags["allows_duplicate_labels"] = True + """ + return self._flags + + @final + def set_flags( + self, + *, + copy: bool_t = False, + allows_duplicate_labels: bool_t | None = None, + ) -> Self: + """ + Return a new object with updated flags. + + Parameters + ---------- + copy : bool, default False + Specify if a copy of the object should be made. + allows_duplicate_labels : bool, optional + Whether the returned object allows duplicate labels. + + Returns + ------- + Series or DataFrame + The same type as the caller. + + See Also + -------- + DataFrame.attrs : Global metadata applying to this dataset. + DataFrame.flags : Global flags applying to this object. + + Notes + ----- + This method returns a new object that's a view on the same data + as the input. Mutating the input or the output values will be reflected + in the other. + + This method is intended to be used in method chains. + + "Flags" differ from "metadata". Flags reflect properties of the + pandas object (the Series or DataFrame). Metadata refer to properties + of the dataset, and should be stored in :attr:`DataFrame.attrs`. + + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2]}) + >>> df.flags.allows_duplicate_labels + True + >>> df2 = df.set_flags(allows_duplicate_labels=False) + >>> df2.flags.allows_duplicate_labels + False + """ + df = self.copy(deep=copy and not using_copy_on_write()) + if allows_duplicate_labels is not None: + df.flags["allows_duplicate_labels"] = allows_duplicate_labels + return df + + @final + @classmethod + def _validate_dtype(cls, dtype) -> DtypeObj | None: + """validate the passed dtype""" + if dtype is not None: + dtype = pandas_dtype(dtype) + + # a compound dtype + if dtype.kind == "V": + raise NotImplementedError( + "compound dtypes are not implemented " + f"in the {cls.__name__} constructor" + ) + + return dtype + + # ---------------------------------------------------------------------- + # Construction + + @property + def _constructor(self) -> Callable[..., Self]: + """ + Used when a manipulation result has the same dimensions as the + original. + """ + raise AbstractMethodError(self) + + # ---------------------------------------------------------------------- + # Internals + + @final + @property + def _data(self): + # GH#33054 retained because some downstream packages uses this, + # e.g. fastparquet + # GH#33333 + warnings.warn( + f"{type(self).__name__}._data is deprecated and will be removed in " + "a future version. Use public APIs instead.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + return self._mgr + + # ---------------------------------------------------------------------- + # Axis + _AXIS_ORDERS: list[Literal["index", "columns"]] + _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} + _info_axis_number: int + _info_axis_name: Literal["index", "columns"] + _AXIS_LEN: int + + @final + def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): + """Return an axes dictionary for myself.""" + d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} + # error: Argument 1 to "update" of "MutableMapping" has incompatible type + # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" + d.update(kwargs) # type: ignore[arg-type] + return d + + @final + @classmethod + def _get_axis_number(cls, axis: Axis) -> AxisInt: + try: + return cls._AXIS_TO_AXIS_NUMBER[axis] + except KeyError: + raise ValueError(f"No axis named {axis} for object type {cls.__name__}") + + @final + @classmethod + def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: + axis_number = cls._get_axis_number(axis) + return cls._AXIS_ORDERS[axis_number] + + @final + def _get_axis(self, axis: Axis) -> Index: + axis_number = self._get_axis_number(axis) + assert axis_number in {0, 1} + return self.index if axis_number == 0 else self.columns + + @final + @classmethod + def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: + """Map the axis to the block_manager axis.""" + axis = cls._get_axis_number(axis) + ndim = cls._AXIS_LEN + if ndim == 2: + # i.e. DataFrame + return 1 - axis + return axis + + @final + def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: + # index or columns + axis_index = getattr(self, axis) + d = {} + prefix = axis[0] + + for i, name in enumerate(axis_index.names): + if name is not None: + key = level = name + else: + # prefix with 'i' or 'c' depending on the input axis + # e.g., you must do ilevel_0 for the 0th level of an unnamed + # multiiindex + key = f"{prefix}level_{i}" + level = i + + level_values = axis_index.get_level_values(level) + s = level_values.to_series() + s.index = axis_index + d[key] = s + + # put the index/columns itself in the dict + if isinstance(axis_index, MultiIndex): + dindex = axis_index + else: + dindex = axis_index.to_series() + + d[axis] = dindex + return d + + @final + def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: + from pandas.core.computation.parsing import clean_column_name + + d: dict[str, Series | MultiIndex] = {} + for axis_name in self._AXIS_ORDERS: + d.update(self._get_axis_resolvers(axis_name)) + + return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} + + @final + def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: + """ + Return the special character free column resolvers of a dataframe. + + Column names with special characters are 'cleaned up' so that they can + be referred to by backtick quoting. + Used in :meth:`DataFrame.eval`. + """ + from pandas.core.computation.parsing import clean_column_name + + if isinstance(self, ABCSeries): + return {clean_column_name(self.name): self} + + return { + clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) + } + + @final + @property + def _info_axis(self) -> Index: + return getattr(self, self._info_axis_name) + + @property + def shape(self) -> tuple[int, ...]: + """ + Return a tuple of axis dimensions + """ + return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) + + @property + def axes(self) -> list[Index]: + """ + Return index label(s) of the internal NDFrame + """ + # we do it this way because if we have reversed axes, then + # the block manager shows then reversed + return [self._get_axis(a) for a in self._AXIS_ORDERS] + + @final + @property + def ndim(self) -> int: + """ + Return an int representing the number of axes / array dimensions. + + Return 1 if Series. Otherwise return 2 if DataFrame. + + See Also + -------- + ndarray.ndim : Number of array dimensions. + + Examples + -------- + >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) + >>> s.ndim + 1 + + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.ndim + 2 + """ + return self._mgr.ndim + + @final + @property + def size(self) -> int: + """ + Return an int representing the number of elements in this object. + + Return the number of rows if Series. Otherwise return the number of + rows times number of columns if DataFrame. + + See Also + -------- + ndarray.size : Number of elements in the array. + + Examples + -------- + >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) + >>> s.size + 3 + + >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) + >>> df.size + 4 + """ + + return int(np.prod(self.shape)) + + def set_axis( + self, + labels, + *, + axis: Axis = 0, + copy: bool_t | None = None, + ) -> Self: + """ + Assign desired index to given axis. + + Indexes for%(extended_summary_sub)s row labels can be changed by assigning + a list-like or Index. + + Parameters + ---------- + labels : list-like, Index + The values for the new index. + + axis : %(axes_single_arg)s, default 0 + The axis to update. The value 0 identifies the rows. For `Series` + this parameter is unused and defaults to 0. + + copy : bool, default True + Whether to make a copy of the underlying data. + + .. versionadded:: 1.5.0 + + Returns + ------- + %(klass)s + An object of type %(klass)s. + + See Also + -------- + %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. + """ + return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) + + @final + def _set_axis_nocheck( + self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None + ): + if inplace: + setattr(self, self._get_axis_name(axis), labels) + else: + # With copy=False, we create a new object but don't copy the + # underlying data. + obj = self.copy(deep=copy and not using_copy_on_write()) + setattr(obj, obj._get_axis_name(axis), labels) + return obj + + @final + def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: + """ + This is called from the cython code when we set the `index` attribute + directly, e.g. `series.index = [1, 2, 3]`. + """ + labels = ensure_index(labels) + self._mgr.set_axis(axis, labels) + self._clear_item_cache() + + @final + def swapaxes(self, axis1: Axis, axis2: Axis, copy: bool_t | None = None) -> Self: + """ + Interchange axes and swap values axes appropriately. + + .. deprecated:: 2.1.0 + ``swapaxes`` is deprecated and will be removed. + Please use ``transpose`` instead. + + Returns + ------- + same as input + + Examples + -------- + Please see examples for :meth:`DataFrame.transpose`. + """ + warnings.warn( + # GH#51946 + f"'{type(self).__name__}.swapaxes' is deprecated and " + "will be removed in a future version. " + f"Please use '{type(self).__name__}.transpose' instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + i = self._get_axis_number(axis1) + j = self._get_axis_number(axis2) + + if i == j: + return self.copy(deep=copy and not using_copy_on_write()) + + mapping = {i: j, j: i} + + new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] + new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] + if self._mgr.is_single_block and isinstance(self._mgr, BlockManager): + # This should only get hit in case of having a single block, otherwise a + # copy is made, we don't have to set up references. + new_mgr = ndarray_to_mgr( + new_values, + new_axes[0], + new_axes[1], + dtype=None, + copy=False, + typ="block", + ) + assert isinstance(new_mgr, BlockManager) + assert isinstance(self._mgr, BlockManager) + new_mgr.blocks[0].refs = self._mgr.blocks[0].refs + new_mgr.blocks[0].refs.add_reference( + new_mgr.blocks[0] # type: ignore[arg-type] + ) + if not using_copy_on_write() and copy is not False: + new_mgr = new_mgr.copy(deep=True) + + return self._constructor(new_mgr).__finalize__(self, method="swapaxes") + + return self._constructor( + new_values, + *new_axes, + # The no-copy case for CoW is handled above + copy=False, + ).__finalize__(self, method="swapaxes") + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def droplevel(self, level: IndexLabel, axis: Axis = 0) -> Self: + """ + Return {klass} with requested index / column level(s) removed. + + Parameters + ---------- + level : int, str, or list-like + If a string is given, must be the name of a level + If list-like, elements must be names or positional indexes + of levels. + + axis : {{0 or 'index', 1 or 'columns'}}, default 0 + Axis along which the level(s) is removed: + + * 0 or 'index': remove level(s) in column. + * 1 or 'columns': remove level(s) in row. + + For `Series` this parameter is unused and defaults to 0. + + Returns + ------- + {klass} + {klass} with requested index / column level(s) removed. + + Examples + -------- + >>> df = pd.DataFrame([ + ... [1, 2, 3, 4], + ... [5, 6, 7, 8], + ... [9, 10, 11, 12] + ... ]).set_index([0, 1]).rename_axis(['a', 'b']) + + >>> df.columns = pd.MultiIndex.from_tuples([ + ... ('c', 'e'), ('d', 'f') + ... ], names=['level_1', 'level_2']) + + >>> df + level_1 c d + level_2 e f + a b + 1 2 3 4 + 5 6 7 8 + 9 10 11 12 + + >>> df.droplevel('a') + level_1 c d + level_2 e f + b + 2 3 4 + 6 7 8 + 10 11 12 + + >>> df.droplevel('level_2', axis=1) + level_1 c d + a b + 1 2 3 4 + 5 6 7 8 + 9 10 11 12 + """ + labels = self._get_axis(axis) + new_labels = labels.droplevel(level) + return self.set_axis(new_labels, axis=axis, copy=None) + + def pop(self, item: Hashable) -> Series | Any: + result = self[item] + del self[item] + + return result + + @final + def squeeze(self, axis: Axis | None = None): + """ + Squeeze 1 dimensional axis objects into scalars. + + Series or DataFrames with a single element are squeezed to a scalar. + DataFrames with a single column or a single row are squeezed to a + Series. Otherwise the object is unchanged. + + This method is most useful when you don't know if your + object is a Series or DataFrame, but you do know it has just a single + column. In that case you can safely call `squeeze` to ensure you have a + Series. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns', None}, default None + A specific axis to squeeze. By default, all length-1 axes are + squeezed. For `Series` this parameter is unused and defaults to `None`. + + Returns + ------- + DataFrame, Series, or scalar + The projection after squeezing `axis` or all the axes. + + See Also + -------- + Series.iloc : Integer-location based indexing for selecting scalars. + DataFrame.iloc : Integer-location based indexing for selecting Series. + Series.to_frame : Inverse of DataFrame.squeeze for a + single-column DataFrame. + + Examples + -------- + >>> primes = pd.Series([2, 3, 5, 7]) + + Slicing might produce a Series with a single value: + + >>> even_primes = primes[primes % 2 == 0] + >>> even_primes + 0 2 + dtype: int64 + + >>> even_primes.squeeze() + 2 + + Squeezing objects with more than one value in every axis does nothing: + + >>> odd_primes = primes[primes % 2 == 1] + >>> odd_primes + 1 3 + 2 5 + 3 7 + dtype: int64 + + >>> odd_primes.squeeze() + 1 3 + 2 5 + 3 7 + dtype: int64 + + Squeezing is even more effective when used with DataFrames. + + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) + >>> df + a b + 0 1 2 + 1 3 4 + + Slicing a single column will produce a DataFrame with the columns + having only one value: + + >>> df_a = df[['a']] + >>> df_a + a + 0 1 + 1 3 + + So the columns can be squeezed down, resulting in a Series: + + >>> df_a.squeeze('columns') + 0 1 + 1 3 + Name: a, dtype: int64 + + Slicing a single row from a single column will produce a single + scalar DataFrame: + + >>> df_0a = df.loc[df.index < 1, ['a']] + >>> df_0a + a + 0 1 + + Squeezing the rows produces a single scalar Series: + + >>> df_0a.squeeze('rows') + a 1 + Name: 0, dtype: int64 + + Squeezing all axes will project directly into a scalar: + + >>> df_0a.squeeze() + 1 + """ + axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) + result = self.iloc[ + tuple( + 0 if i in axes and len(a) == 1 else slice(None) + for i, a in enumerate(self.axes) + ) + ] + if isinstance(result, NDFrame): + result = result.__finalize__(self, method="squeeze") + return result + + # ---------------------------------------------------------------------- + # Rename + + @final + def _rename( + self, + mapper: Renamer | None = None, + *, + index: Renamer | None = None, + columns: Renamer | None = None, + axis: Axis | None = None, + copy: bool_t | None = None, + inplace: bool_t = False, + level: Level | None = None, + errors: str = "ignore", + ) -> Self | None: + # called by Series.rename and DataFrame.rename + + if mapper is None and index is None and columns is None: + raise TypeError("must pass an index to rename") + + if index is not None or columns is not None: + if axis is not None: + raise TypeError( + "Cannot specify both 'axis' and any of 'index' or 'columns'" + ) + if mapper is not None: + raise TypeError( + "Cannot specify both 'mapper' and any of 'index' or 'columns'" + ) + else: + # use the mapper argument + if axis and self._get_axis_number(axis) == 1: + columns = mapper + else: + index = mapper + + self._check_inplace_and_allows_duplicate_labels(inplace) + result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) + + for axis_no, replacements in enumerate((index, columns)): + if replacements is None: + continue + + ax = self._get_axis(axis_no) + f = common.get_rename_function(replacements) + + if level is not None: + level = ax._get_level_number(level) + + # GH 13473 + if not callable(replacements): + if ax._is_multi and level is not None: + indexer = ax.get_level_values(level).get_indexer_for(replacements) + else: + indexer = ax.get_indexer_for(replacements) + + if errors == "raise" and len(indexer[indexer == -1]): + missing_labels = [ + label + for index, label in enumerate(replacements) + if indexer[index] == -1 + ] + raise KeyError(f"{missing_labels} not found in axis") + + new_index = ax._transform_index(f, level=level) + result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) + result._clear_item_cache() + + if inplace: + self._update_inplace(result) + return None + else: + return result.__finalize__(self, method="rename") + + @overload + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = ..., + *, + index=..., + columns=..., + axis: Axis = ..., + copy: bool_t | None = ..., + inplace: Literal[False] = ..., + ) -> Self: + ... + + @overload + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = ..., + *, + index=..., + columns=..., + axis: Axis = ..., + copy: bool_t | None = ..., + inplace: Literal[True], + ) -> None: + ... + + @overload + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = ..., + *, + index=..., + columns=..., + axis: Axis = ..., + copy: bool_t | None = ..., + inplace: bool_t = ..., + ) -> Self | None: + ... + + def rename_axis( + self, + mapper: IndexLabel | lib.NoDefault = lib.no_default, + *, + index=lib.no_default, + columns=lib.no_default, + axis: Axis = 0, + copy: bool_t | None = None, + inplace: bool_t = False, + ) -> Self | None: + """ + Set the name of the axis for the index or columns. + + Parameters + ---------- + mapper : scalar, list-like, optional + Value to set the axis name attribute. + index, columns : scalar, list-like, dict-like or function, optional + A scalar, list-like, dict-like or functions transformations to + apply to that axis' values. + Note that the ``columns`` parameter is not allowed if the + object is a Series. This parameter only apply for DataFrame + type objects. + + Use either ``mapper`` and ``axis`` to + specify the axis to target with ``mapper``, or ``index`` + and/or ``columns``. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to rename. For `Series` this parameter is unused and defaults to 0. + copy : bool, default None + Also copy underlying data. + inplace : bool, default False + Modifies the object directly, instead of creating a new Series + or DataFrame. + + Returns + ------- + Series, DataFrame, or None + The same type as the caller or None if ``inplace=True``. + + See Also + -------- + Series.rename : Alter Series index labels or name. + DataFrame.rename : Alter DataFrame index labels or name. + Index.rename : Set new names on index. + + Notes + ----- + ``DataFrame.rename_axis`` supports two calling conventions + + * ``(index=index_mapper, columns=columns_mapper, ...)`` + * ``(mapper, axis={'index', 'columns'}, ...)`` + + The first calling convention will only modify the names of + the index and/or the names of the Index object that is the columns. + In this case, the parameter ``copy`` is ignored. + + The second calling convention will modify the names of the + corresponding index if mapper is a list or a scalar. + However, if mapper is dict-like or a function, it will use the + deprecated behavior of modifying the axis *labels*. + + We *highly* recommend using keyword arguments to clarify your + intent. + + Examples + -------- + **Series** + + >>> s = pd.Series(["dog", "cat", "monkey"]) + >>> s + 0 dog + 1 cat + 2 monkey + dtype: object + >>> s.rename_axis("animal") + animal + 0 dog + 1 cat + 2 monkey + dtype: object + + **DataFrame** + + >>> df = pd.DataFrame({"num_legs": [4, 4, 2], + ... "num_arms": [0, 0, 2]}, + ... ["dog", "cat", "monkey"]) + >>> df + num_legs num_arms + dog 4 0 + cat 4 0 + monkey 2 2 + >>> df = df.rename_axis("animal") + >>> df + num_legs num_arms + animal + dog 4 0 + cat 4 0 + monkey 2 2 + >>> df = df.rename_axis("limbs", axis="columns") + >>> df + limbs num_legs num_arms + animal + dog 4 0 + cat 4 0 + monkey 2 2 + + **MultiIndex** + + >>> df.index = pd.MultiIndex.from_product([['mammal'], + ... ['dog', 'cat', 'monkey']], + ... names=['type', 'name']) + >>> df + limbs num_legs num_arms + type name + mammal dog 4 0 + cat 4 0 + monkey 2 2 + + >>> df.rename_axis(index={'type': 'class'}) + limbs num_legs num_arms + class name + mammal dog 4 0 + cat 4 0 + monkey 2 2 + + >>> df.rename_axis(columns=str.upper) + LIMBS num_legs num_arms + type name + mammal dog 4 0 + cat 4 0 + monkey 2 2 + """ + axes = {"index": index, "columns": columns} + + if axis is not None: + axis = self._get_axis_number(axis) + + inplace = validate_bool_kwarg(inplace, "inplace") + + if copy and using_copy_on_write(): + copy = False + + if mapper is not lib.no_default: + # Use v0.23 behavior if a scalar or list + non_mapper = is_scalar(mapper) or ( + is_list_like(mapper) and not is_dict_like(mapper) + ) + if non_mapper: + return self._set_axis_name( + mapper, axis=axis, inplace=inplace, copy=copy + ) + else: + raise ValueError("Use `.rename` to alter labels with a mapper.") + else: + # Use new behavior. Means that index and/or columns + # is specified + result = self if inplace else self.copy(deep=copy) + + for axis in range(self._AXIS_LEN): + v = axes.get(self._get_axis_name(axis)) + if v is lib.no_default: + continue + non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) + if non_mapper: + newnames = v + else: + f = common.get_rename_function(v) + curnames = self._get_axis(axis).names + newnames = [f(name) for name in curnames] + result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) + if not inplace: + return result + return None + + @final + def _set_axis_name( + self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True + ): + """ + Set the name(s) of the axis. + + Parameters + ---------- + name : str or list of str + Name(s) to set. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to set the label. The value 0 or 'index' specifies index, + and the value 1 or 'columns' specifies columns. + inplace : bool, default False + If `True`, do operation inplace and return None. + copy: + Whether to make a copy of the result. + + Returns + ------- + Series, DataFrame, or None + The same type as the caller or `None` if `inplace` is `True`. + + See Also + -------- + DataFrame.rename : Alter the axis labels of :class:`DataFrame`. + Series.rename : Alter the index labels or set the index name + of :class:`Series`. + Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. + + Examples + -------- + >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, + ... ["dog", "cat", "monkey"]) + >>> df + num_legs + dog 4 + cat 4 + monkey 2 + >>> df._set_axis_name("animal") + num_legs + animal + dog 4 + cat 4 + monkey 2 + >>> df.index = pd.MultiIndex.from_product( + ... [["mammal"], ['dog', 'cat', 'monkey']]) + >>> df._set_axis_name(["type", "name"]) + num_legs + type name + mammal dog 4 + cat 4 + monkey 2 + """ + axis = self._get_axis_number(axis) + idx = self._get_axis(axis).set_names(name) + + inplace = validate_bool_kwarg(inplace, "inplace") + renamed = self if inplace else self.copy(deep=copy) + if axis == 0: + renamed.index = idx + else: + renamed.columns = idx + + if not inplace: + return renamed + + # ---------------------------------------------------------------------- + # Comparison Methods + + @final + def _indexed_same(self, other) -> bool_t: + return all( + self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS + ) + + @final + def equals(self, other: object) -> bool_t: + """ + Test whether two objects contain the same elements. + + This function allows two Series or DataFrames to be compared against + each other to see if they have the same shape and elements. NaNs in + the same location are considered equal. + + The row/column index do not need to have the same type, as long + as the values are considered equal. Corresponding columns must be of + the same dtype. + + Parameters + ---------- + other : Series or DataFrame + The other Series or DataFrame to be compared with the first. + + Returns + ------- + bool + True if all elements are the same in both objects, False + otherwise. + + See Also + -------- + Series.eq : Compare two Series objects of the same length + and return a Series where each element is True if the element + in each Series is equal, False otherwise. + DataFrame.eq : Compare two DataFrame objects of the same shape and + return a DataFrame where each element is True if the respective + element in each DataFrame is equal, False otherwise. + testing.assert_series_equal : Raises an AssertionError if left and + right are not equal. Provides an easy interface to ignore + inequality in dtypes, indexes and precision among others. + testing.assert_frame_equal : Like assert_series_equal, but targets + DataFrames. + numpy.array_equal : Return True if two arrays have the same shape + and elements, False otherwise. + + Examples + -------- + >>> df = pd.DataFrame({1: [10], 2: [20]}) + >>> df + 1 2 + 0 10 20 + + DataFrames df and exactly_equal have the same types and values for + their elements and column labels, which will return True. + + >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) + >>> exactly_equal + 1 2 + 0 10 20 + >>> df.equals(exactly_equal) + True + + DataFrames df and different_column_type have the same element + types and values, but have different types for the column labels, + which will still return True. + + >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) + >>> different_column_type + 1.0 2.0 + 0 10 20 + >>> df.equals(different_column_type) + True + + DataFrames df and different_data_type have different types for the + same values for their elements, and will return False even though + their column labels are the same values and types. + + >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) + >>> different_data_type + 1 2 + 0 10.0 20.0 + >>> df.equals(different_data_type) + False + """ + if not (isinstance(other, type(self)) or isinstance(self, type(other))): + return False + other = cast(NDFrame, other) + return self._mgr.equals(other._mgr) + + # ------------------------------------------------------------------------- + # Unary Methods + + @final + def __neg__(self) -> Self: + def blk_func(values: ArrayLike): + if is_bool_dtype(values.dtype): + # error: Argument 1 to "inv" has incompatible type "Union + # [ExtensionArray, ndarray[Any, Any]]"; expected + # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" + return operator.inv(values) # type: ignore[arg-type] + else: + # error: Argument 1 to "neg" has incompatible type "Union + # [ExtensionArray, ndarray[Any, Any]]"; expected + # "_SupportsNeg[ndarray[Any, dtype[Any]]]" + return operator.neg(values) # type: ignore[arg-type] + + new_data = self._mgr.apply(blk_func) + res = self._constructor_from_mgr(new_data, axes=new_data.axes) + return res.__finalize__(self, method="__neg__") + + @final + def __pos__(self) -> Self: + def blk_func(values: ArrayLike): + if is_bool_dtype(values.dtype): + return values.copy() + else: + # error: Argument 1 to "pos" has incompatible type "Union + # [ExtensionArray, ndarray[Any, Any]]"; expected + # "_SupportsPos[ndarray[Any, dtype[Any]]]" + return operator.pos(values) # type: ignore[arg-type] + + new_data = self._mgr.apply(blk_func) + res = self._constructor_from_mgr(new_data, axes=new_data.axes) + return res.__finalize__(self, method="__pos__") + + @final + def __invert__(self) -> Self: + if not self.size: + # inv fails with 0 len + return self.copy(deep=False) + + new_data = self._mgr.apply(operator.invert) + res = self._constructor_from_mgr(new_data, axes=new_data.axes) + return res.__finalize__(self, method="__invert__") + + @final + def __nonzero__(self) -> NoReturn: + raise ValueError( + f"The truth value of a {type(self).__name__} is ambiguous. " + "Use a.empty, a.bool(), a.item(), a.any() or a.all()." + ) + + __bool__ = __nonzero__ + + @final + def bool(self) -> bool_t: + """ + Return the bool of a single element Series or DataFrame. + + .. deprecated:: 2.1.0 + + bool is deprecated and will be removed in future version of pandas + + This must be a boolean scalar value, either True or False. It will raise a + ValueError if the Series or DataFrame does not have exactly 1 element, or that + element is not boolean (integer values 0 and 1 will also raise an exception). + + Returns + ------- + bool + The value in the Series or DataFrame. + + See Also + -------- + Series.astype : Change the data type of a Series, including to boolean. + DataFrame.astype : Change the data type of a DataFrame, including to boolean. + numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. + + Examples + -------- + The method will only work for single element objects with a boolean value: + + >>> pd.Series([True]).bool() # doctest: +SKIP + True + >>> pd.Series([False]).bool() # doctest: +SKIP + False + + >>> pd.DataFrame({'col': [True]}).bool() # doctest: +SKIP + True + >>> pd.DataFrame({'col': [False]}).bool() # doctest: +SKIP + False + """ + + warnings.warn( + f"{type(self).__name__}.bool is now deprecated and will be removed " + "in future version of pandas", + FutureWarning, + stacklevel=find_stack_level(), + ) + v = self.squeeze() + if isinstance(v, (bool, np.bool_)): + return bool(v) + elif is_scalar(v): + raise ValueError( + "bool cannot act on a non-boolean single element " + f"{type(self).__name__}" + ) + + self.__nonzero__() + # for mypy (__nonzero__ raises) + return True + + @final + def abs(self) -> Self: + """ + Return a Series/DataFrame with absolute numeric value of each element. + + This function only applies to elements that are all numeric. + + Returns + ------- + abs + Series/DataFrame containing the absolute value of each element. + + See Also + -------- + numpy.absolute : Calculate the absolute value element-wise. + + Notes + ----- + For ``complex`` inputs, ``1.2 + 1j``, the absolute value is + :math:`\\sqrt{ a^2 + b^2 }`. + + Examples + -------- + Absolute numeric values in a Series. + + >>> s = pd.Series([-1.10, 2, -3.33, 4]) + >>> s.abs() + 0 1.10 + 1 2.00 + 2 3.33 + 3 4.00 + dtype: float64 + + Absolute numeric values in a Series with complex numbers. + + >>> s = pd.Series([1.2 + 1j]) + >>> s.abs() + 0 1.56205 + dtype: float64 + + Absolute numeric values in a Series with a Timedelta element. + + >>> s = pd.Series([pd.Timedelta('1 days')]) + >>> s.abs() + 0 1 days + dtype: timedelta64[ns] + + Select rows with data closest to certain value using argsort (from + `StackOverflow `__). + + >>> df = pd.DataFrame({ + ... 'a': [4, 5, 6, 7], + ... 'b': [10, 20, 30, 40], + ... 'c': [100, 50, -30, -50] + ... }) + >>> df + a b c + 0 4 10 100 + 1 5 20 50 + 2 6 30 -30 + 3 7 40 -50 + >>> df.loc[(df.c - 43).abs().argsort()] + a b c + 1 5 20 50 + 0 4 10 100 + 2 6 30 -30 + 3 7 40 -50 + """ + res_mgr = self._mgr.apply(np.abs) + return self._constructor_from_mgr(res_mgr, axes=res_mgr.axes).__finalize__( + self, name="abs" + ) + + @final + def __abs__(self) -> Self: + return self.abs() + + @final + def __round__(self, decimals: int = 0) -> Self: + return self.round(decimals).__finalize__(self, method="__round__") + + # ------------------------------------------------------------------------- + # Label or Level Combination Helpers + # + # A collection of helper methods for DataFrame/Series operations that + # accept a combination of column/index labels and levels. All such + # operations should utilize/extend these methods when possible so that we + # have consistent precedence and validation logic throughout the library. + + @final + def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: + """ + Test whether a key is a level reference for a given axis. + + To be considered a level reference, `key` must be a string that: + - (axis=0): Matches the name of an index level and does NOT match + a column label. + - (axis=1): Matches the name of a column level and does NOT match + an index label. + + Parameters + ---------- + key : Hashable + Potential level name for the given axis + axis : int, default 0 + Axis that levels are associated with (0 for index, 1 for columns) + + Returns + ------- + is_level : bool + """ + axis_int = self._get_axis_number(axis) + + return ( + key is not None + and is_hashable(key) + and key in self.axes[axis_int].names + and not self._is_label_reference(key, axis=axis_int) + ) + + @final + def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: + """ + Test whether a key is a label reference for a given axis. + + To be considered a label reference, `key` must be a string that: + - (axis=0): Matches a column label + - (axis=1): Matches an index label + + Parameters + ---------- + key : Hashable + Potential label name, i.e. Index entry. + axis : int, default 0 + Axis perpendicular to the axis that labels are associated with + (0 means search for column labels, 1 means search for index labels) + + Returns + ------- + is_label: bool + """ + axis_int = self._get_axis_number(axis) + other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) + + return ( + key is not None + and is_hashable(key) + and any(key in self.axes[ax] for ax in other_axes) + ) + + @final + def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: + """ + Test whether a key is a label or level reference for a given axis. + + To be considered either a label or a level reference, `key` must be a + string that: + - (axis=0): Matches a column label or an index level + - (axis=1): Matches an index label or a column level + + Parameters + ---------- + key : Hashable + Potential label or level name + axis : int, default 0 + Axis that levels are associated with (0 for index, 1 for columns) + + Returns + ------- + bool + """ + return self._is_level_reference(key, axis=axis) or self._is_label_reference( + key, axis=axis + ) + + @final + def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: + """ + Check whether `key` is ambiguous. + + By ambiguous, we mean that it matches both a level of the input + `axis` and a label of the other axis. + + Parameters + ---------- + key : Hashable + Label or level name. + axis : int, default 0 + Axis that levels are associated with (0 for index, 1 for columns). + + Raises + ------ + ValueError: `key` is ambiguous + """ + + axis_int = self._get_axis_number(axis) + other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) + + if ( + key is not None + and is_hashable(key) + and key in self.axes[axis_int].names + and any(key in self.axes[ax] for ax in other_axes) + ): + # Build an informative and grammatical warning + level_article, level_type = ( + ("an", "index") if axis_int == 0 else ("a", "column") + ) + + label_article, label_type = ( + ("a", "column") if axis_int == 0 else ("an", "index") + ) + + msg = ( + f"'{key}' is both {level_article} {level_type} level and " + f"{label_article} {label_type} label, which is ambiguous." + ) + raise ValueError(msg) + + @final + def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: + """ + Return a 1-D array of values associated with `key`, a label or level + from the given `axis`. + + Retrieval logic: + - (axis=0): Return column values if `key` matches a column label. + Otherwise return index level values if `key` matches an index + level. + - (axis=1): Return row values if `key` matches an index label. + Otherwise return column level values if 'key' matches a column + level + + Parameters + ---------- + key : Hashable + Label or level name. + axis : int, default 0 + Axis that levels are associated with (0 for index, 1 for columns) + + Returns + ------- + np.ndarray or ExtensionArray + + Raises + ------ + KeyError + if `key` matches neither a label nor a level + ValueError + if `key` matches multiple labels + """ + axis = self._get_axis_number(axis) + other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] + + if self._is_label_reference(key, axis=axis): + self._check_label_or_level_ambiguity(key, axis=axis) + values = self.xs(key, axis=other_axes[0])._values + elif self._is_level_reference(key, axis=axis): + values = self.axes[axis].get_level_values(key)._values + else: + raise KeyError(key) + + # Check for duplicates + if values.ndim > 1: + if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): + multi_message = ( + "\n" + "For a multi-index, the label must be a " + "tuple with elements corresponding to each level." + ) + else: + multi_message = "" + + label_axis_name = "column" if axis == 0 else "index" + raise ValueError( + f"The {label_axis_name} label '{key}' is not unique.{multi_message}" + ) + + return values + + @final + def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): + """ + Drop labels and/or levels for the given `axis`. + + For each key in `keys`: + - (axis=0): If key matches a column label then drop the column. + Otherwise if key matches an index level then drop the level. + - (axis=1): If key matches an index label then drop the row. + Otherwise if key matches a column level then drop the level. + + Parameters + ---------- + keys : str or list of str + labels or levels to drop + axis : int, default 0 + Axis that levels are associated with (0 for index, 1 for columns) + + Returns + ------- + dropped: DataFrame + + Raises + ------ + ValueError + if any `keys` match neither a label nor a level + """ + axis = self._get_axis_number(axis) + + # Validate keys + keys = common.maybe_make_list(keys) + invalid_keys = [ + k for k in keys if not self._is_label_or_level_reference(k, axis=axis) + ] + + if invalid_keys: + raise ValueError( + "The following keys are not valid labels or " + f"levels for axis {axis}: {invalid_keys}" + ) + + # Compute levels and labels to drop + levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] + + labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] + + # Perform copy upfront and then use inplace operations below. + # This ensures that we always perform exactly one copy. + # ``copy`` and/or ``inplace`` options could be added in the future. + dropped = self.copy(deep=False) + + if axis == 0: + # Handle dropping index levels + if levels_to_drop: + dropped.reset_index(levels_to_drop, drop=True, inplace=True) + + # Handle dropping columns labels + if labels_to_drop: + dropped.drop(labels_to_drop, axis=1, inplace=True) + else: + # Handle dropping column levels + if levels_to_drop: + if isinstance(dropped.columns, MultiIndex): + # Drop the specified levels from the MultiIndex + dropped.columns = dropped.columns.droplevel(levels_to_drop) + else: + # Drop the last level of Index by replacing with + # a RangeIndex + dropped.columns = RangeIndex(dropped.columns.size) + + # Handle dropping index labels + if labels_to_drop: + dropped.drop(labels_to_drop, axis=0, inplace=True) + + return dropped + + # ---------------------------------------------------------------------- + # Iteration + + # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 + # Incompatible types in assignment (expression has type "None", base class + # "object" defined the type as "Callable[[object], int]") + __hash__: ClassVar[None] # type: ignore[assignment] + + def __iter__(self) -> Iterator: + """ + Iterate over info axis. + + Returns + ------- + iterator + Info axis as iterator. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + >>> for x in df: + ... print(x) + A + B + """ + return iter(self._info_axis) + + # can we get a better explanation of this? + def keys(self) -> Index: + """ + Get the 'info axis' (see Indexing for more). + + This is index for Series, columns for DataFrame. + + Returns + ------- + Index + Info axis. + + Examples + -------- + >>> d = pd.DataFrame(data={'A': [1, 2, 3], 'B': [0, 4, 8]}, + ... index=['a', 'b', 'c']) + >>> d + A B + a 1 0 + b 2 4 + c 3 8 + >>> d.keys() + Index(['A', 'B'], dtype='object') + """ + return self._info_axis + + def items(self): + """ + Iterate over (label, values) on info axis + + This is index for Series and columns for DataFrame. + + Returns + ------- + Generator + """ + for h in self._info_axis: + yield h, self[h] + + def __len__(self) -> int: + """Returns length of info axis""" + return len(self._info_axis) + + @final + def __contains__(self, key) -> bool_t: + """True if the key is in the info axis""" + return key in self._info_axis + + @property + def empty(self) -> bool_t: + """ + Indicator whether Series/DataFrame is empty. + + True if Series/DataFrame is entirely empty (no items), meaning any of the + axes are of length 0. + + Returns + ------- + bool + If Series/DataFrame is empty, return True, if not return False. + + See Also + -------- + Series.dropna : Return series without null values. + DataFrame.dropna : Return DataFrame with labels on given axis omitted + where (all or any) data are missing. + + Notes + ----- + If Series/DataFrame contains only NaNs, it is still not considered empty. See + the example below. + + Examples + -------- + An example of an actual empty DataFrame. Notice the index is empty: + + >>> df_empty = pd.DataFrame({'A' : []}) + >>> df_empty + Empty DataFrame + Columns: [A] + Index: [] + >>> df_empty.empty + True + + If we only have NaNs in our DataFrame, it is not considered empty! We + will need to drop the NaNs to make the DataFrame empty: + + >>> df = pd.DataFrame({'A' : [np.nan]}) + >>> df + A + 0 NaN + >>> df.empty + False + >>> df.dropna().empty + True + + >>> ser_empty = pd.Series({'A' : []}) + >>> ser_empty + A [] + dtype: object + >>> ser_empty.empty + False + >>> ser_empty = pd.Series() + >>> ser_empty.empty + True + """ + return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) + + # ---------------------------------------------------------------------- + # Array Interface + + # This is also set in IndexOpsMixin + # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented + __array_priority__: int = 1000 + + def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: + values = self._values + arr = np.asarray(values, dtype=dtype) + if ( + astype_is_view(values.dtype, arr.dtype) + and using_copy_on_write() + and self._mgr.is_single_block + ): + # Check if both conversions can be done without a copy + if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( + values.dtype, arr.dtype + ): + arr = arr.view() + arr.flags.writeable = False + return arr + + @final + def __array_ufunc__( + self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any + ): + return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) + + # ---------------------------------------------------------------------- + # Picklability + + @final + def __getstate__(self) -> dict[str, Any]: + meta = {k: getattr(self, k, None) for k in self._metadata} + return { + "_mgr": self._mgr, + "_typ": self._typ, + "_metadata": self._metadata, + "attrs": self.attrs, + "_flags": {k: self.flags[k] for k in self.flags._keys}, + **meta, + } + + @final + def __setstate__(self, state) -> None: + if isinstance(state, BlockManager): + self._mgr = state + elif isinstance(state, dict): + if "_data" in state and "_mgr" not in state: + # compat for older pickles + state["_mgr"] = state.pop("_data") + typ = state.get("_typ") + if typ is not None: + attrs = state.get("_attrs", {}) + if attrs is None: # should not happen, but better be on the safe side + attrs = {} + object.__setattr__(self, "_attrs", attrs) + flags = state.get("_flags", {"allows_duplicate_labels": True}) + object.__setattr__(self, "_flags", Flags(self, **flags)) + + # set in the order of internal names + # to avoid definitional recursion + # e.g. say fill_value needing _mgr to be + # defined + meta = set(self._internal_names + self._metadata) + for k in list(meta): + if k in state and k != "_flags": + v = state[k] + object.__setattr__(self, k, v) + + for k, v in state.items(): + if k not in meta: + object.__setattr__(self, k, v) + + else: + raise NotImplementedError("Pre-0.12 pickles are no longer supported") + elif len(state) == 2: + raise NotImplementedError("Pre-0.12 pickles are no longer supported") + + self._item_cache: dict[Hashable, Series] = {} + + # ---------------------------------------------------------------------- + # Rendering Methods + + def __repr__(self) -> str: + # string representation based upon iterating over self + # (since, by definition, `PandasContainers` are iterable) + prepr = f"[{','.join(map(pprint_thing, self))}]" + return f"{type(self).__name__}({prepr})" + + @final + def _repr_latex_(self): + """ + Returns a LaTeX representation for a particular object. + Mainly for use with nbconvert (jupyter notebook conversion to pdf). + """ + if config.get_option("styler.render.repr") == "latex": + return self.to_latex() + else: + return None + + @final + def _repr_data_resource_(self): + """ + Not a real Jupyter special repr method, but we use the same + naming convention. + """ + if config.get_option("display.html.table_schema"): + data = self.head(config.get_option("display.max_rows")) + + as_json = data.to_json(orient="table") + as_json = cast(str, as_json) + return loads(as_json, object_pairs_hook=collections.OrderedDict) + + # ---------------------------------------------------------------------- + # I/O Methods + + @final + @doc( + klass="object", + storage_options=_shared_docs["storage_options"], + storage_options_versionadded="1.2.0", + ) + def to_excel( + self, + excel_writer: FilePath | WriteExcelBuffer | ExcelWriter, + sheet_name: str = "Sheet1", + na_rep: str = "", + float_format: str | None = None, + columns: Sequence[Hashable] | None = None, + header: Sequence[Hashable] | bool_t = True, + index: bool_t = True, + index_label: IndexLabel | None = None, + startrow: int = 0, + startcol: int = 0, + engine: Literal["openpyxl", "xlsxwriter"] | None = None, + merge_cells: bool_t = True, + inf_rep: str = "inf", + freeze_panes: tuple[int, int] | None = None, + storage_options: StorageOptions | None = None, + engine_kwargs: dict[str, Any] | None = None, + ) -> None: + """ + Write {klass} to an Excel sheet. + + To write a single {klass} to an Excel .xlsx file it is only necessary to + specify a target file name. To write to multiple sheets it is necessary to + create an `ExcelWriter` object with a target file name, and specify a sheet + in the file to write to. + + Multiple sheets may be written to by specifying unique `sheet_name`. + With all data written to the file it is necessary to save the changes. + Note that creating an `ExcelWriter` object with a file name that already + exists will result in the contents of the existing file being erased. + + Parameters + ---------- + excel_writer : path-like, file-like, or ExcelWriter object + File path or existing ExcelWriter. + sheet_name : str, default 'Sheet1' + Name of sheet which will contain DataFrame. + na_rep : str, default '' + Missing data representation. + float_format : str, optional + Format string for floating point numbers. For example + ``float_format="%.2f"`` will format 0.1234 to 0.12. + columns : sequence or list of str, optional + Columns to write. + header : bool or list of str, default True + Write out the column names. If a list of string is given it is + assumed to be aliases for the column names. + index : bool, default True + Write row names (index). + index_label : str or sequence, optional + Column label for index column(s) if desired. If not specified, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the DataFrame uses MultiIndex. + startrow : int, default 0 + Upper left cell row to dump data frame. + startcol : int, default 0 + Upper left cell column to dump data frame. + engine : str, optional + Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this + via the options ``io.excel.xlsx.writer`` or + ``io.excel.xlsm.writer``. + + merge_cells : bool, default True + Write MultiIndex and Hierarchical Rows as merged cells. + inf_rep : str, default 'inf' + Representation for infinity (there is no native representation for + infinity in Excel). + freeze_panes : tuple of int (length 2), optional + Specifies the one-based bottommost row and rightmost column that + is to be frozen. + {storage_options} + + .. versionadded:: {storage_options_versionadded} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + + See Also + -------- + to_csv : Write DataFrame to a comma-separated values (csv) file. + ExcelWriter : Class for writing DataFrame objects into excel sheets. + read_excel : Read an Excel file into a pandas DataFrame. + read_csv : Read a comma-separated values (csv) file into DataFrame. + io.formats.style.Styler.to_excel : Add styles to Excel sheet. + + Notes + ----- + For compatibility with :meth:`~DataFrame.to_csv`, + to_excel serializes lists and dicts to strings before writing. + + Once a workbook has been saved it is not possible to write further + data without rewriting the whole workbook. + + Examples + -------- + + Create, write to and save a workbook: + + >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], + ... index=['row 1', 'row 2'], + ... columns=['col 1', 'col 2']) + >>> df1.to_excel("output.xlsx") # doctest: +SKIP + + To specify the sheet name: + + >>> df1.to_excel("output.xlsx", + ... sheet_name='Sheet_name_1') # doctest: +SKIP + + If you wish to write to more than one sheet in the workbook, it is + necessary to specify an ExcelWriter object: + + >>> df2 = df1.copy() + >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP + ... df1.to_excel(writer, sheet_name='Sheet_name_1') + ... df2.to_excel(writer, sheet_name='Sheet_name_2') + + ExcelWriter can also be used to append to an existing Excel file: + + >>> with pd.ExcelWriter('output.xlsx', + ... mode='a') as writer: # doctest: +SKIP + ... df1.to_excel(writer, sheet_name='Sheet_name_3') + + To set the library that is used to write the Excel file, + you can pass the `engine` keyword (the default engine is + automatically chosen depending on the file extension): + + >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP + """ + if engine_kwargs is None: + engine_kwargs = {} + + df = self if isinstance(self, ABCDataFrame) else self.to_frame() + + from pandas.io.formats.excel import ExcelFormatter + + formatter = ExcelFormatter( + df, + na_rep=na_rep, + cols=columns, + header=header, + float_format=float_format, + index=index, + index_label=index_label, + merge_cells=merge_cells, + inf_rep=inf_rep, + ) + formatter.write( + excel_writer, + sheet_name=sheet_name, + startrow=startrow, + startcol=startcol, + freeze_panes=freeze_panes, + engine=engine, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @final + @doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path_or_buf", + ) + def to_json( + self, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + orient: Literal["split", "records", "index", "table", "columns", "values"] + | None = None, + date_format: str | None = None, + double_precision: int = 10, + force_ascii: bool_t = True, + date_unit: TimeUnit = "ms", + default_handler: Callable[[Any], JSONSerializable] | None = None, + lines: bool_t = False, + compression: CompressionOptions = "infer", + index: bool_t | None = None, + indent: int | None = None, + storage_options: StorageOptions | None = None, + mode: Literal["a", "w"] = "w", + ) -> str | None: + """ + Convert the object to a JSON string. + + Note NaN's and None will be converted to null and datetime objects + will be converted to UNIX timestamps. + + Parameters + ---------- + path_or_buf : str, path object, file-like object, or None, default None + String, path object (implementing os.PathLike[str]), or file-like + object implementing a write() function. If None, the result is + returned as a string. + orient : str + Indication of expected JSON string format. + + * Series: + + - default is 'index' + - allowed values are: {{'split', 'records', 'index', 'table'}}. + + * DataFrame: + + - default is 'columns' + - allowed values are: {{'split', 'records', 'index', 'columns', + 'values', 'table'}}. + + * The format of the JSON string: + + - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], + 'data' -> [values]}} + - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] + - 'index' : dict like {{index -> {{column -> value}}}} + - 'columns' : dict like {{column -> {{index -> value}}}} + - 'values' : just the values array + - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} + + Describing the data, where data component is like ``orient='records'``. + + date_format : {{None, 'epoch', 'iso'}} + Type of date conversion. 'epoch' = epoch milliseconds, + 'iso' = ISO8601. The default depends on the `orient`. For + ``orient='table'``, the default is 'iso'. For all other orients, + the default is 'epoch'. + double_precision : int, default 10 + The number of decimal places to use when encoding + floating point values. The possible maximal value is 15. + Passing double_precision greater than 15 will raise a ValueError. + force_ascii : bool, default True + Force encoded string to be ASCII. + date_unit : str, default 'ms' (milliseconds) + The time unit to encode to, governs timestamp and ISO8601 + precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, + microsecond, and nanosecond respectively. + default_handler : callable, default None + Handler to call if object cannot otherwise be converted to a + suitable format for JSON. Should receive a single argument which is + the object to convert and return a serialisable object. + lines : bool, default False + If 'orient' is 'records' write out line-delimited json format. Will + throw ValueError if incorrect 'orient' since others are not + list-like. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + index : bool or None, default None + The index is only used when 'orient' is 'split', 'index', 'column', + or 'table'. Of these, 'index' and 'column' do not support + `index=False`. + + indent : int, optional + Length of whitespace used to indent each record. + + {storage_options} + + .. versionadded:: 1.2.0 + + mode : str, default 'w' (writing) + Specify the IO mode for output when supplying a path_or_buf. + Accepted args are 'w' (writing) and 'a' (append) only. + mode='a' is only supported when lines is True and orient is 'records'. + + Returns + ------- + None or str + If path_or_buf is None, returns the resulting json format as a + string. Otherwise returns None. + + See Also + -------- + read_json : Convert a JSON string to pandas object. + + Notes + ----- + The behavior of ``indent=0`` varies from the stdlib, which does not + indent the output but does insert newlines. Currently, ``indent=0`` + and the default ``indent=None`` are equivalent in pandas, though this + may change in a future release. + + ``orient='table'`` contains a 'pandas_version' field under 'schema'. + This stores the version of `pandas` used in the latest revision of the + schema. + + Examples + -------- + >>> from json import loads, dumps + >>> df = pd.DataFrame( + ... [["a", "b"], ["c", "d"]], + ... index=["row 1", "row 2"], + ... columns=["col 1", "col 2"], + ... ) + + >>> result = df.to_json(orient="split") + >>> parsed = loads(result) + >>> dumps(parsed, indent=4) # doctest: +SKIP + {{ + "columns": [ + "col 1", + "col 2" + ], + "index": [ + "row 1", + "row 2" + ], + "data": [ + [ + "a", + "b" + ], + [ + "c", + "d" + ] + ] + }} + + Encoding/decoding a Dataframe using ``'records'`` formatted JSON. + Note that index labels are not preserved with this encoding. + + >>> result = df.to_json(orient="records") + >>> parsed = loads(result) + >>> dumps(parsed, indent=4) # doctest: +SKIP + [ + {{ + "col 1": "a", + "col 2": "b" + }}, + {{ + "col 1": "c", + "col 2": "d" + }} + ] + + Encoding/decoding a Dataframe using ``'index'`` formatted JSON: + + >>> result = df.to_json(orient="index") + >>> parsed = loads(result) + >>> dumps(parsed, indent=4) # doctest: +SKIP + {{ + "row 1": {{ + "col 1": "a", + "col 2": "b" + }}, + "row 2": {{ + "col 1": "c", + "col 2": "d" + }} + }} + + Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: + + >>> result = df.to_json(orient="columns") + >>> parsed = loads(result) + >>> dumps(parsed, indent=4) # doctest: +SKIP + {{ + "col 1": {{ + "row 1": "a", + "row 2": "c" + }}, + "col 2": {{ + "row 1": "b", + "row 2": "d" + }} + }} + + Encoding/decoding a Dataframe using ``'values'`` formatted JSON: + + >>> result = df.to_json(orient="values") + >>> parsed = loads(result) + >>> dumps(parsed, indent=4) # doctest: +SKIP + [ + [ + "a", + "b" + ], + [ + "c", + "d" + ] + ] + + Encoding with Table Schema: + + >>> result = df.to_json(orient="table") + >>> parsed = loads(result) + >>> dumps(parsed, indent=4) # doctest: +SKIP + {{ + "schema": {{ + "fields": [ + {{ + "name": "index", + "type": "string" + }}, + {{ + "name": "col 1", + "type": "string" + }}, + {{ + "name": "col 2", + "type": "string" + }} + ], + "primaryKey": [ + "index" + ], + "pandas_version": "1.4.0" + }}, + "data": [ + {{ + "index": "row 1", + "col 1": "a", + "col 2": "b" + }}, + {{ + "index": "row 2", + "col 1": "c", + "col 2": "d" + }} + ] + }} + """ + from pandas.io import json + + if date_format is None and orient == "table": + date_format = "iso" + elif date_format is None: + date_format = "epoch" + + config.is_nonnegative_int(indent) + indent = indent or 0 + + return json.to_json( + path_or_buf=path_or_buf, + obj=self, + orient=orient, + date_format=date_format, + double_precision=double_precision, + force_ascii=force_ascii, + date_unit=date_unit, + default_handler=default_handler, + lines=lines, + compression=compression, + index=index, + indent=indent, + storage_options=storage_options, + mode=mode, + ) + + @final + def to_hdf( + self, + path_or_buf: FilePath | HDFStore, + key: str, + mode: Literal["a", "w", "r+"] = "a", + complevel: int | None = None, + complib: Literal["zlib", "lzo", "bzip2", "blosc"] | None = None, + append: bool_t = False, + format: Literal["fixed", "table"] | None = None, + index: bool_t = True, + min_itemsize: int | dict[str, int] | None = None, + nan_rep=None, + dropna: bool_t | None = None, + data_columns: Literal[True] | list[str] | None = None, + errors: OpenFileErrors = "strict", + encoding: str = "UTF-8", + ) -> None: + """ + Write the contained data to an HDF5 file using HDFStore. + + Hierarchical Data Format (HDF) is self-describing, allowing an + application to interpret the structure and contents of a file with + no outside information. One HDF file can hold a mix of related objects + which can be accessed as a group or as individual objects. + + In order to add another DataFrame or Series to an existing HDF file + please use append mode and a different a key. + + .. warning:: + + One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, + but the type of the subclass is lost upon storing. + + For more information see the :ref:`user guide `. + + Parameters + ---------- + path_or_buf : str or pandas.HDFStore + File path or HDFStore object. + key : str + Identifier for the group in the store. + mode : {'a', 'w', 'r+'}, default 'a' + Mode to open file: + + - 'w': write, a new file is created (an existing file with + the same name would be deleted). + - 'a': append, an existing file is opened for reading and + writing, and if the file does not exist it is created. + - 'r+': similar to 'a', but the file must already exist. + complevel : {0-9}, default None + Specifies a compression level for data. + A value of 0 or None disables compression. + complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' + Specifies the compression library to be used. + These additional compressors for Blosc are supported + (default if no compressor specified: 'blosc:blosclz'): + {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', + 'blosc:zlib', 'blosc:zstd'}. + Specifying a compression library which is not available issues + a ValueError. + append : bool, default False + For Table formats, append the input data to the existing. + format : {'fixed', 'table', None}, default 'fixed' + Possible values: + + - 'fixed': Fixed format. Fast writing/reading. Not-appendable, + nor searchable. + - 'table': Table format. Write as a PyTables Table structure + which may perform worse but allow more flexible operations + like searching / selecting subsets of the data. + - If None, pd.get_option('io.hdf.default_format') is checked, + followed by fallback to "fixed". + index : bool, default True + Write DataFrame index as a column. + min_itemsize : dict or int, optional + Map column names to minimum string sizes for columns. + nan_rep : Any, optional + How to represent null values as str. + Not allowed with append=True. + dropna : bool, default False, optional + Remove missing values. + data_columns : list of columns or True, optional + List of columns to create as indexed data columns for on-disk + queries, or True to use all columns. By default only the axes + of the object are indexed. See + :ref:`Query via data columns`. for + more information. + Applicable only to format='table'. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. + encoding : str, default "UTF-8" + + See Also + -------- + read_hdf : Read from HDF file. + DataFrame.to_orc : Write a DataFrame to the binary orc format. + DataFrame.to_parquet : Write a DataFrame to the binary parquet format. + DataFrame.to_sql : Write to a SQL table. + DataFrame.to_feather : Write out feather-format for DataFrames. + DataFrame.to_csv : Write out to a csv file. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, + ... index=['a', 'b', 'c']) # doctest: +SKIP + >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP + + We can add another object to the same file: + + >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP + >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP + + Reading from HDF file: + + >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP + A B + a 1 4 + b 2 5 + c 3 6 + >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + """ + from pandas.io import pytables + + # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected + # "Union[DataFrame, Series]" [arg-type] + pytables.to_hdf( + path_or_buf, + key, + self, # type: ignore[arg-type] + mode=mode, + complevel=complevel, + complib=complib, + append=append, + format=format, + index=index, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + dropna=dropna, + data_columns=data_columns, + errors=errors, + encoding=encoding, + ) + + @final + @deprecate_nonkeyword_arguments( + version="3.0", allowed_args=["self", "name", "con"], name="to_sql" + ) + def to_sql( + self, + name: str, + con, + schema: str | None = None, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool_t = True, + index_label: IndexLabel | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + ) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Databases supported by SQLAlchemy [1]_ are supported. Tables can be + newly created, appended to, or overwritten. + + Parameters + ---------- + name : str + Name of SQL table. + con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection + Using SQLAlchemy makes it possible to use any DB supported by that + library. Legacy support is provided for sqlite3.Connection objects. The user + is responsible for engine disposal and connection closure for the SQLAlchemy + connectable. See `here \ + `_. + If passing a sqlalchemy.engine.Connection which is already in a transaction, + the transaction will not be committed. If passing a sqlite3.Connection, + it will not be possible to roll back the record insertion. + + schema : str, optional + Specify the schema (if database flavor supports this). If None, use + default schema. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + How to behave if the table already exists. + + * fail: Raise a ValueError. + * replace: Drop the table before inserting new values. + * append: Insert new values to the existing table. + + index : bool, default True + Write DataFrame index as a column. Uses `index_label` as the column + name in the table. + index_label : str or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + chunksize : int, optional + Specify the number of rows in each batch to be written at a time. + By default, all rows will be written at once. + dtype : dict or scalar, optional + Specifying the datatype for columns. If a dictionary is used, the + keys should be the column names and the values should be the + SQLAlchemy types or strings for the sqlite3 legacy mode. If a + scalar is provided, it will be applied to all columns. + method : {None, 'multi', callable}, optional + Controls the SQL insertion clause used: + + * None : Uses standard SQL ``INSERT`` clause (one per row). + * 'multi': Pass multiple values in a single ``INSERT`` clause. + * callable with signature ``(pd_table, conn, keys, data_iter)``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method `. + + Returns + ------- + None or int + Number of rows affected by to_sql. None is returned if the callable + passed into ``method`` does not return an integer number of rows. + + The number of returned rows affected is the sum of the ``rowcount`` + attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not + reflect the exact number of written rows as stipulated in the + `sqlite3 `__ or + `SQLAlchemy `__. + + .. versionadded:: 1.4.0 + + Raises + ------ + ValueError + When the table already exists and `if_exists` is 'fail' (the + default). + + See Also + -------- + read_sql : Read a DataFrame from a table. + + Notes + ----- + Timezone aware datetime columns will be written as + ``Timestamp with timezone`` type with SQLAlchemy if supported by the + database. Otherwise, the datetimes will be stored as timezone unaware + timestamps local to the original timezone. + + References + ---------- + .. [1] https://docs.sqlalchemy.org + .. [2] https://www.python.org/dev/peps/pep-0249/ + + Examples + -------- + Create an in-memory SQLite database. + + >>> from sqlalchemy import create_engine + >>> engine = create_engine('sqlite://', echo=False) + + Create a table from scratch with 3 rows. + + >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) + >>> df + name + 0 User 1 + 1 User 2 + 2 User 3 + + >>> df.to_sql(name='users', con=engine) + 3 + >>> from sqlalchemy import text + >>> with engine.connect() as conn: + ... conn.execute(text("SELECT * FROM users")).fetchall() + [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] + + An `sqlalchemy.engine.Connection` can also be passed to `con`: + + >>> with engine.begin() as connection: + ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) + ... df1.to_sql(name='users', con=connection, if_exists='append') + 2 + + This is allowed to support operations that require that the same + DBAPI connection is used for the entire operation. + + >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) + >>> df2.to_sql(name='users', con=engine, if_exists='append') + 2 + >>> with engine.connect() as conn: + ... conn.execute(text("SELECT * FROM users")).fetchall() + [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), + (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), + (1, 'User 7')] + + Overwrite the table with just ``df2``. + + >>> df2.to_sql(name='users', con=engine, if_exists='replace', + ... index_label='id') + 2 + >>> with engine.connect() as conn: + ... conn.execute(text("SELECT * FROM users")).fetchall() + [(0, 'User 6'), (1, 'User 7')] + + Use ``method`` to define a callable insertion method to do nothing + if there's a primary key conflict on a table in a PostgreSQL database. + + >>> from sqlalchemy.dialects.postgresql import insert + >>> def insert_on_conflict_nothing(table, conn, keys, data_iter): + ... # "a" is the primary key in "conflict_table" + ... data = [dict(zip(keys, row)) for row in data_iter] + ... stmt = insert(table.table).values(data).on_conflict_do_nothing(index_elements=["a"]) + ... result = conn.execute(stmt) + ... return result.rowcount + >>> df_conflict.to_sql(name="conflict_table", con=conn, if_exists="append", method=insert_on_conflict_nothing) # doctest: +SKIP + 0 + + For MySQL, a callable to update columns ``b`` and ``c`` if there's a conflict + on a primary key. + + >>> from sqlalchemy.dialects.mysql import insert + >>> def insert_on_conflict_update(table, conn, keys, data_iter): + ... # update columns "b" and "c" on primary key conflict + ... data = [dict(zip(keys, row)) for row in data_iter] + ... stmt = ( + ... insert(table.table) + ... .values(data) + ... ) + ... stmt = stmt.on_duplicate_key_update(b=stmt.inserted.b, c=stmt.inserted.c) + ... result = conn.execute(stmt) + ... return result.rowcount + >>> df_conflict.to_sql(name="conflict_table", con=conn, if_exists="append", method=insert_on_conflict_update) # doctest: +SKIP + 2 + + Specify the dtype (especially useful for integers with missing values). + Notice that while pandas is forced to store the data as floating point, + the database supports nullable integers. When fetching the data with + Python, we get back integer scalars. + + >>> df = pd.DataFrame({"A": [1, None, 2]}) + >>> df + A + 0 1.0 + 1 NaN + 2 2.0 + + >>> from sqlalchemy.types import Integer + >>> df.to_sql(name='integers', con=engine, index=False, + ... dtype={"A": Integer()}) + 3 + + >>> with engine.connect() as conn: + ... conn.execute(text("SELECT * FROM integers")).fetchall() + [(1,), (None,), (2,)] + """ # noqa: E501 + from pandas.io import sql + + return sql.to_sql( + self, + name, + con, + schema=schema, + if_exists=if_exists, + index=index, + index_label=index_label, + chunksize=chunksize, + dtype=dtype, + method=method, + ) + + @final + @doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path", + ) + def to_pickle( + self, + path: FilePath | WriteBuffer[bytes], + compression: CompressionOptions = "infer", + protocol: int = pickle.HIGHEST_PROTOCOL, + storage_options: StorageOptions | None = None, + ) -> None: + """ + Pickle (serialize) object to file. + + Parameters + ---------- + path : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. File path where + the pickled object will be stored. + {compression_options} + protocol : int + Int which indicates which protocol should be used by the pickler, + default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible + values are 0, 1, 2, 3, 4, 5. A negative value for the protocol + parameter is equivalent to setting its value to HIGHEST_PROTOCOL. + + .. [1] https://docs.python.org/3/library/pickle.html. + + {storage_options} + + .. versionadded:: 1.2.0 + + See Also + -------- + read_pickle : Load pickled pandas object (or any object) from file. + DataFrame.to_hdf : Write DataFrame to an HDF5 file. + DataFrame.to_sql : Write DataFrame to a SQL database. + DataFrame.to_parquet : Write a DataFrame to the binary parquet format. + + Examples + -------- + >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP + >>> original_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP + + >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP + >>> unpickled_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + """ # noqa: E501 + from pandas.io.pickle import to_pickle + + to_pickle( + self, + path, + compression=compression, + protocol=protocol, + storage_options=storage_options, + ) + + @final + def to_clipboard( + self, excel: bool_t = True, sep: str | None = None, **kwargs + ) -> None: + r""" + Copy object to the system clipboard. + + Write a text representation of object to the system clipboard. + This can be pasted into Excel, for example. + + Parameters + ---------- + excel : bool, default True + Produce output in a csv format for easy pasting into excel. + + - True, use the provided separator for csv pasting. + - False, write a string representation of the object to the clipboard. + + sep : str, default ``'\t'`` + Field delimiter. + **kwargs + These parameters will be passed to DataFrame.to_csv. + + See Also + -------- + DataFrame.to_csv : Write a DataFrame to a comma-separated values + (csv) file. + read_clipboard : Read text from clipboard and pass to read_csv. + + Notes + ----- + Requirements for your platform. + + - Linux : `xclip`, or `xsel` (with `PyQt4` modules) + - Windows : none + - macOS : none + + This method uses the processes developed for the package `pyperclip`. A + solution to render any output string format is given in the examples. + + Examples + -------- + Copy the contents of a DataFrame to the clipboard. + + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) + + >>> df.to_clipboard(sep=',') # doctest: +SKIP + ... # Wrote the following to the system clipboard: + ... # ,A,B,C + ... # 0,1,2,3 + ... # 1,4,5,6 + + We can omit the index by passing the keyword `index` and setting + it to false. + + >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP + ... # Wrote the following to the system clipboard: + ... # A,B,C + ... # 1,2,3 + ... # 4,5,6 + + Using the original `pyperclip` package for any string output format. + + .. code-block:: python + + import pyperclip + html = df.style.to_html() + pyperclip.copy(html) + """ + from pandas.io import clipboards + + clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) + + @final + def to_xarray(self): + """ + Return an xarray object from the pandas object. + + Returns + ------- + xarray.DataArray or xarray.Dataset + Data in the pandas structure converted to Dataset if the object is + a DataFrame, or a DataArray if the object is a Series. + + See Also + -------- + DataFrame.to_hdf : Write DataFrame to an HDF5 file. + DataFrame.to_parquet : Write a DataFrame to the binary parquet format. + + Notes + ----- + See the `xarray docs `__ + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), + ... ('parrot', 'bird', 24.0, 2), + ... ('lion', 'mammal', 80.5, 4), + ... ('monkey', 'mammal', np.nan, 4)], + ... columns=['name', 'class', 'max_speed', + ... 'num_legs']) + >>> df + name class max_speed num_legs + 0 falcon bird 389.0 2 + 1 parrot bird 24.0 2 + 2 lion mammal 80.5 4 + 3 monkey mammal NaN 4 + + >>> df.to_xarray() + + Dimensions: (index: 4) + Coordinates: + * index (index) int64 0 1 2 3 + Data variables: + name (index) object 'falcon' 'parrot' 'lion' 'monkey' + class (index) object 'bird' 'bird' 'mammal' 'mammal' + max_speed (index) float64 389.0 24.0 80.5 nan + num_legs (index) int64 2 2 4 4 + + >>> df['max_speed'].to_xarray() + + array([389. , 24. , 80.5, nan]) + Coordinates: + * index (index) int64 0 1 2 3 + + >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', + ... '2018-01-02', '2018-01-02']) + >>> df_multiindex = pd.DataFrame({'date': dates, + ... 'animal': ['falcon', 'parrot', + ... 'falcon', 'parrot'], + ... 'speed': [350, 18, 361, 15]}) + >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) + + >>> df_multiindex + speed + date animal + 2018-01-01 falcon 350 + parrot 18 + 2018-01-02 falcon 361 + parrot 15 + + >>> df_multiindex.to_xarray() + + Dimensions: (date: 2, animal: 2) + Coordinates: + * date (date) datetime64[ns] 2018-01-01 2018-01-02 + * animal (animal) object 'falcon' 'parrot' + Data variables: + speed (date, animal) int64 350 18 361 15 + """ + xarray = import_optional_dependency("xarray") + + if self.ndim == 1: + return xarray.DataArray.from_series(self) + else: + return xarray.Dataset.from_dataframe(self) + + @overload + def to_latex( + self, + buf: None = ..., + columns: Sequence[Hashable] | None = ..., + header: bool_t | list[str] = ..., + index: bool_t = ..., + na_rep: str = ..., + formatters: FormattersType | None = ..., + float_format: FloatFormatType | None = ..., + sparsify: bool_t | None = ..., + index_names: bool_t = ..., + bold_rows: bool_t = ..., + column_format: str | None = ..., + longtable: bool_t | None = ..., + escape: bool_t | None = ..., + encoding: str | None = ..., + decimal: str = ..., + multicolumn: bool_t | None = ..., + multicolumn_format: str | None = ..., + multirow: bool_t | None = ..., + caption: str | tuple[str, str] | None = ..., + label: str | None = ..., + position: str | None = ..., + ) -> str: + ... + + @overload + def to_latex( + self, + buf: FilePath | WriteBuffer[str], + columns: Sequence[Hashable] | None = ..., + header: bool_t | list[str] = ..., + index: bool_t = ..., + na_rep: str = ..., + formatters: FormattersType | None = ..., + float_format: FloatFormatType | None = ..., + sparsify: bool_t | None = ..., + index_names: bool_t = ..., + bold_rows: bool_t = ..., + column_format: str | None = ..., + longtable: bool_t | None = ..., + escape: bool_t | None = ..., + encoding: str | None = ..., + decimal: str = ..., + multicolumn: bool_t | None = ..., + multicolumn_format: str | None = ..., + multirow: bool_t | None = ..., + caption: str | tuple[str, str] | None = ..., + label: str | None = ..., + position: str | None = ..., + ) -> None: + ... + + @final + def to_latex( + self, + buf: FilePath | WriteBuffer[str] | None = None, + columns: Sequence[Hashable] | None = None, + header: bool_t | list[str] = True, + index: bool_t = True, + na_rep: str = "NaN", + formatters: FormattersType | None = None, + float_format: FloatFormatType | None = None, + sparsify: bool_t | None = None, + index_names: bool_t = True, + bold_rows: bool_t = False, + column_format: str | None = None, + longtable: bool_t | None = None, + escape: bool_t | None = None, + encoding: str | None = None, + decimal: str = ".", + multicolumn: bool_t | None = None, + multicolumn_format: str | None = None, + multirow: bool_t | None = None, + caption: str | tuple[str, str] | None = None, + label: str | None = None, + position: str | None = None, + ) -> str | None: + r""" + Render object to a LaTeX tabular, longtable, or nested table. + + Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted + into a main LaTeX document or read from an external file + with ``\input{{table.tex}}``. + + .. versionchanged:: 1.2.0 + Added position argument, changed meaning of caption argument. + + .. versionchanged:: 2.0.0 + Refactored to use the Styler implementation via jinja2 templating. + + Parameters + ---------- + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. + columns : list of label, optional + The subset of columns to write. Writes all columns by default. + header : bool or list of str, default True + Write out the column names. If a list of strings is given, + it is assumed to be aliases for the column names. + index : bool, default True + Write row names (index). + na_rep : str, default 'NaN' + Missing data representation. + formatters : list of functions or dict of {{str: function}}, optional + Formatter functions to apply to columns' elements by position or + name. The result of each function must be a unicode string. + List must be of length equal to the number of columns. + float_format : one-parameter function or str, optional, default None + Formatter for floating point numbers. For example + ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will + both result in 0.1234 being formatted as 0.12. + sparsify : bool, optional + Set to False for a DataFrame with a hierarchical index to print + every multiindex key at each row. By default, the value will be + read from the config module. + index_names : bool, default True + Prints the names of the indexes. + bold_rows : bool, default False + Make the row labels bold in the output. + column_format : str, optional + The columns format as specified in `LaTeX table format + `__ e.g. 'rcl' for 3 + columns. By default, 'l' will be used for all columns except + columns of numbers, which default to 'r'. + longtable : bool, optional + Use a longtable environment instead of tabular. Requires + adding a \usepackage{{longtable}} to your LaTeX preamble. + By default, the value will be read from the pandas config + module, and set to `True` if the option ``styler.latex.environment`` is + `"longtable"`. + + .. versionchanged:: 2.0.0 + The pandas option affecting this argument has changed. + escape : bool, optional + By default, the value will be read from the pandas config + module and set to `True` if the option ``styler.format.escape`` is + `"latex"`. When set to False prevents from escaping latex special + characters in column names. + + .. versionchanged:: 2.0.0 + The pandas option affecting this argument has changed, as has the + default value to `False`. + encoding : str, optional + A string representing the encoding to use in the output file, + defaults to 'utf-8'. + decimal : str, default '.' + Character recognized as decimal separator, e.g. ',' in Europe. + multicolumn : bool, default True + Use \multicolumn to enhance MultiIndex columns. + The default will be read from the config module, and is set + as the option ``styler.sparse.columns``. + + .. versionchanged:: 2.0.0 + The pandas option affecting this argument has changed. + multicolumn_format : str, default 'r' + The alignment for multicolumns, similar to `column_format` + The default will be read from the config module, and is set as the option + ``styler.latex.multicol_align``. + + .. versionchanged:: 2.0.0 + The pandas option affecting this argument has changed, as has the + default value to "r". + multirow : bool, default True + Use \multirow to enhance MultiIndex rows. Requires adding a + \usepackage{{multirow}} to your LaTeX preamble. Will print + centered labels (instead of top-aligned) across the contained + rows, separating groups via clines. The default will be read + from the pandas config module, and is set as the option + ``styler.sparse.index``. + + .. versionchanged:: 2.0.0 + The pandas option affecting this argument has changed, as has the + default value to `True`. + caption : str or tuple, optional + Tuple (full_caption, short_caption), + which results in ``\caption[short_caption]{{full_caption}}``; + if a single string is passed, no short caption will be set. + + .. versionchanged:: 1.2.0 + Optionally allow caption to be a tuple ``(full_caption, short_caption)``. + + label : str, optional + The LaTeX label to be placed inside ``\label{{}}`` in the output. + This is used with ``\ref{{}}`` in the main ``.tex`` file. + + position : str, optional + The LaTeX positional argument for tables, to be placed after + ``\begin{{}}`` in the output. + + .. versionadded:: 1.2.0 + + Returns + ------- + str or None + If buf is None, returns the result as a string. Otherwise returns None. + + See Also + -------- + io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX + with conditional formatting. + DataFrame.to_string : Render a DataFrame to a console-friendly + tabular output. + DataFrame.to_html : Render a DataFrame as an HTML table. + + Notes + ----- + As of v2.0.0 this method has changed to use the Styler implementation as + part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means + that ``jinja2`` is a requirement, and needs to be installed, for this method + to function. It is advised that users switch to using Styler, since that + implementation is more frequently updated and contains much more + flexibility with the output. + + Examples + -------- + Convert a general DataFrame to LaTeX with formatting: + + >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], + ... age=[26, 45], + ... height=[181.23, 177.65])) + >>> print(df.to_latex(index=False, + ... formatters={"name": str.upper}, + ... float_format="{:.1f}".format, + ... )) # doctest: +SKIP + \begin{tabular}{lrr} + \toprule + name & age & height \\ + \midrule + RAPHAEL & 26 & 181.2 \\ + DONATELLO & 45 & 177.7 \\ + \bottomrule + \end{tabular} + """ + # Get defaults from the pandas config + if self.ndim == 1: + self = self.to_frame() + if longtable is None: + longtable = config.get_option("styler.latex.environment") == "longtable" + if escape is None: + escape = config.get_option("styler.format.escape") == "latex" + if multicolumn is None: + multicolumn = config.get_option("styler.sparse.columns") + if multicolumn_format is None: + multicolumn_format = config.get_option("styler.latex.multicol_align") + if multirow is None: + multirow = config.get_option("styler.sparse.index") + + if column_format is not None and not isinstance(column_format, str): + raise ValueError("`column_format` must be str or unicode") + length = len(self.columns) if columns is None else len(columns) + if isinstance(header, (list, tuple)) and len(header) != length: + raise ValueError(f"Writing {length} cols but got {len(header)} aliases") + + # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure + base_format_ = { + "na_rep": na_rep, + "escape": "latex" if escape else None, + "decimal": decimal, + } + index_format_: dict[str, Any] = {"axis": 0, **base_format_} + column_format_: dict[str, Any] = {"axis": 1, **base_format_} + + if isinstance(float_format, str): + float_format_: Callable | None = lambda x: float_format % x + else: + float_format_ = float_format + + def _wrap(x, alt_format_): + if isinstance(x, (float, complex)) and float_format_ is not None: + return float_format_(x) + else: + return alt_format_(x) + + formatters_: list | tuple | dict | Callable | None = None + if isinstance(formatters, list): + formatters_ = { + c: partial(_wrap, alt_format_=formatters[i]) + for i, c in enumerate(self.columns) + } + elif isinstance(formatters, dict): + index_formatter = formatters.pop("__index__", None) + column_formatter = formatters.pop("__columns__", None) + if index_formatter is not None: + index_format_.update({"formatter": index_formatter}) + if column_formatter is not None: + column_format_.update({"formatter": column_formatter}) + + formatters_ = formatters + float_columns = self.select_dtypes(include="float").columns + for col in float_columns: + if col not in formatters.keys(): + formatters_.update({col: float_format_}) + elif formatters is None and float_format is not None: + formatters_ = partial(_wrap, alt_format_=lambda v: v) + format_index_ = [index_format_, column_format_] + + # Deal with hiding indexes and relabelling column names + hide_: list[dict] = [] + relabel_index_: list[dict] = [] + if columns: + hide_.append( + { + "subset": [c for c in self.columns if c not in columns], + "axis": "columns", + } + ) + if header is False: + hide_.append({"axis": "columns"}) + elif isinstance(header, (list, tuple)): + relabel_index_.append({"labels": header, "axis": "columns"}) + format_index_ = [index_format_] # column_format is overwritten + + if index is False: + hide_.append({"axis": "index"}) + if index_names is False: + hide_.append({"names": True, "axis": "index"}) + + render_kwargs_ = { + "hrules": True, + "sparse_index": sparsify, + "sparse_columns": sparsify, + "environment": "longtable" if longtable else None, + "multicol_align": multicolumn_format + if multicolumn + else f"naive-{multicolumn_format}", + "multirow_align": "t" if multirow else "naive", + "encoding": encoding, + "caption": caption, + "label": label, + "position": position, + "column_format": column_format, + "clines": "skip-last;data" + if (multirow and isinstance(self.index, MultiIndex)) + else None, + "bold_rows": bold_rows, + } + + return self._to_latex_via_styler( + buf, + hide=hide_, + relabel_index=relabel_index_, + format={"formatter": formatters_, **base_format_}, + format_index=format_index_, + render_kwargs=render_kwargs_, + ) + + @final + def _to_latex_via_styler( + self, + buf=None, + *, + hide: dict | list[dict] | None = None, + relabel_index: dict | list[dict] | None = None, + format: dict | list[dict] | None = None, + format_index: dict | list[dict] | None = None, + render_kwargs: dict | None = None, + ): + """ + Render object to a LaTeX tabular, longtable, or nested table. + + Uses the ``Styler`` implementation with the following, ordered, method chaining: + + .. code-block:: python + styler = Styler(DataFrame) + styler.hide(**hide) + styler.relabel_index(**relabel_index) + styler.format(**format) + styler.format_index(**format_index) + styler.to_latex(buf=buf, **render_kwargs) + + Parameters + ---------- + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. + hide : dict, list of dict + Keyword args to pass to the method call of ``Styler.hide``. If a list will + call the method numerous times. + relabel_index : dict, list of dict + Keyword args to pass to the method of ``Styler.relabel_index``. If a list + will call the method numerous times. + format : dict, list of dict + Keyword args to pass to the method call of ``Styler.format``. If a list will + call the method numerous times. + format_index : dict, list of dict + Keyword args to pass to the method call of ``Styler.format_index``. If a + list will call the method numerous times. + render_kwargs : dict + Keyword args to pass to the method call of ``Styler.to_latex``. + + Returns + ------- + str or None + If buf is None, returns the result as a string. Otherwise returns None. + """ + from pandas.io.formats.style import Styler + + self = cast("DataFrame", self) + styler = Styler(self, uuid="") + + for kw_name in ["hide", "relabel_index", "format", "format_index"]: + kw = vars()[kw_name] + if isinstance(kw, dict): + getattr(styler, kw_name)(**kw) + elif isinstance(kw, list): + for sub_kw in kw: + getattr(styler, kw_name)(**sub_kw) + + # bold_rows is not a direct kwarg of Styler.to_latex + render_kwargs = {} if render_kwargs is None else render_kwargs + if render_kwargs.pop("bold_rows"): + styler.map_index(lambda v: "textbf:--rwrap;") + + return styler.to_latex(buf=buf, **render_kwargs) + + @overload + def to_csv( + self, + path_or_buf: None = ..., + sep: str = ..., + na_rep: str = ..., + float_format: str | Callable | None = ..., + columns: Sequence[Hashable] | None = ..., + header: bool_t | list[str] = ..., + index: bool_t = ..., + index_label: IndexLabel | None = ..., + mode: str = ..., + encoding: str | None = ..., + compression: CompressionOptions = ..., + quoting: int | None = ..., + quotechar: str = ..., + lineterminator: str | None = ..., + chunksize: int | None = ..., + date_format: str | None = ..., + doublequote: bool_t = ..., + escapechar: str | None = ..., + decimal: str = ..., + errors: OpenFileErrors = ..., + storage_options: StorageOptions = ..., + ) -> str: + ... + + @overload + def to_csv( + self, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], + sep: str = ..., + na_rep: str = ..., + float_format: str | Callable | None = ..., + columns: Sequence[Hashable] | None = ..., + header: bool_t | list[str] = ..., + index: bool_t = ..., + index_label: IndexLabel | None = ..., + mode: str = ..., + encoding: str | None = ..., + compression: CompressionOptions = ..., + quoting: int | None = ..., + quotechar: str = ..., + lineterminator: str | None = ..., + chunksize: int | None = ..., + date_format: str | None = ..., + doublequote: bool_t = ..., + escapechar: str | None = ..., + decimal: str = ..., + errors: OpenFileErrors = ..., + storage_options: StorageOptions = ..., + ) -> None: + ... + + @final + @doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path_or_buf", + ) + def to_csv( + self, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + sep: str = ",", + na_rep: str = "", + float_format: str | Callable | None = None, + columns: Sequence[Hashable] | None = None, + header: bool_t | list[str] = True, + index: bool_t = True, + index_label: IndexLabel | None = None, + mode: str = "w", + encoding: str | None = None, + compression: CompressionOptions = "infer", + quoting: int | None = None, + quotechar: str = '"', + lineterminator: str | None = None, + chunksize: int | None = None, + date_format: str | None = None, + doublequote: bool_t = True, + escapechar: str | None = None, + decimal: str = ".", + errors: OpenFileErrors = "strict", + storage_options: StorageOptions | None = None, + ) -> str | None: + r""" + Write object to a comma-separated values (csv) file. + + Parameters + ---------- + path_or_buf : str, path object, file-like object, or None, default None + String, path object (implementing os.PathLike[str]), or file-like + object implementing a write() function. If None, the result is + returned as a string. If a non-binary file object is passed, it should + be opened with `newline=''`, disabling universal newlines. If a binary + file object is passed, `mode` might need to contain a `'b'`. + + .. versionchanged:: 1.2.0 + + Support for binary file objects was introduced. + + sep : str, default ',' + String of length 1. Field delimiter for the output file. + na_rep : str, default '' + Missing data representation. + float_format : str, Callable, default None + Format string for floating point numbers. If a Callable is given, it takes + precedence over other numeric formatting parameters, like decimal. + columns : sequence, optional + Columns to write. + header : bool or list of str, default True + Write out the column names. If a list of strings is given it is + assumed to be aliases for the column names. + index : bool, default True + Write row names (index). + index_label : str or sequence, or False, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the object uses MultiIndex. If + False do not print fields for index names. Use index_label=False + for easier importing in R. + mode : {{'w', 'x', 'a'}}, default 'w' + Forwarded to either `open(mode=)` or `fsspec.open(mode=)` to control + the file opening. Typical values include: + + - 'w', truncate the file first. + - 'x', exclusive creation, failing if the file already exists. + - 'a', append to the end of file if it exists. + + encoding : str, optional + A string representing the encoding to use in the output file, + defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` + is a non-binary file object. + {compression_options} + + May be a dict with key 'method' as compression mode + and other entries as additional compression options if + compression mode is 'zip'. + + Passing compression options as keys in dict is + supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. + + .. versionchanged:: 1.2.0 + + Compression is supported for binary file objects. + + .. versionchanged:: 1.2.0 + + Previous versions forwarded dict entries for 'gzip' to + `gzip.open` instead of `gzip.GzipFile` which prevented + setting `mtime`. + + quoting : optional constant from csv module + Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` + then floats are converted to strings and thus csv.QUOTE_NONNUMERIC + will treat them as non-numeric. + quotechar : str, default '\"' + String of length 1. Character used to quote fields. + lineterminator : str, optional + The newline character or character sequence to use in the output + file. Defaults to `os.linesep`, which depends on the OS in which + this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). + + .. versionchanged:: 1.5.0 + + Previously was line_terminator, changed for consistency with + read_csv and the standard library 'csv' module. + + chunksize : int or None + Rows to write at a time. + date_format : str, default None + Format string for datetime objects. + doublequote : bool, default True + Control quoting of `quotechar` inside a field. + escapechar : str, default None + String of length 1. Character used to escape `sep` and `quotechar` + when appropriate. + decimal : str, default '.' + Character recognized as decimal separator. E.g. use ',' for + European data. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. + + {storage_options} + + .. versionadded:: 1.2.0 + + Returns + ------- + None or str + If path_or_buf is None, returns the resulting csv format as a + string. Otherwise returns None. + + See Also + -------- + read_csv : Load a CSV file into a DataFrame. + to_excel : Write DataFrame to an Excel file. + + Examples + -------- + >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], + ... 'mask': ['red', 'purple'], + ... 'weapon': ['sai', 'bo staff']}}) + >>> df.to_csv(index=False) + 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' + + Create 'out.zip' containing 'out.csv' + + >>> compression_opts = dict(method='zip', + ... archive_name='out.csv') # doctest: +SKIP + >>> df.to_csv('out.zip', index=False, + ... compression=compression_opts) # doctest: +SKIP + + To write a csv file to a new folder or nested folder you will first + need to create it using either Pathlib or os: + + >>> from pathlib import Path # doctest: +SKIP + >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP + >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP + >>> df.to_csv(filepath) # doctest: +SKIP + + >>> import os # doctest: +SKIP + >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP + >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP + """ + df = self if isinstance(self, ABCDataFrame) else self.to_frame() + + formatter = DataFrameFormatter( + frame=df, + header=header, + index=index, + na_rep=na_rep, + float_format=float_format, + decimal=decimal, + ) + + return DataFrameRenderer(formatter).to_csv( + path_or_buf, + lineterminator=lineterminator, + sep=sep, + encoding=encoding, + errors=errors, + compression=compression, + quoting=quoting, + columns=columns, + index_label=index_label, + mode=mode, + chunksize=chunksize, + quotechar=quotechar, + date_format=date_format, + doublequote=doublequote, + escapechar=escapechar, + storage_options=storage_options, + ) + + # ---------------------------------------------------------------------- + # Lookup Caching + + def _reset_cacher(self) -> None: + """ + Reset the cacher. + """ + raise AbstractMethodError(self) + + def _maybe_update_cacher( + self, + clear: bool_t = False, + verify_is_copy: bool_t = True, + inplace: bool_t = False, + ) -> None: + """ + See if we need to update our parent cacher if clear, then clear our + cache. + + Parameters + ---------- + clear : bool, default False + Clear the item cache. + verify_is_copy : bool, default True + Provide is_copy checks. + """ + if using_copy_on_write(): + return + + if verify_is_copy: + self._check_setitem_copy(t="referent") + + if clear: + self._clear_item_cache() + + def _clear_item_cache(self) -> None: + raise AbstractMethodError(self) + + # ---------------------------------------------------------------------- + # Indexing Methods + + @final + def take(self, indices, axis: Axis = 0, **kwargs) -> Self: + """ + Return the elements in the given *positional* indices along an axis. + + This means that we are not indexing according to actual values in + the index attribute of the object. We are indexing according to the + actual position of the element in the object. + + Parameters + ---------- + indices : array-like + An array of ints indicating which positions to take. + axis : {0 or 'index', 1 or 'columns', None}, default 0 + The axis on which to select elements. ``0`` means that we are + selecting rows, ``1`` means that we are selecting columns. + For `Series` this parameter is unused and defaults to 0. + **kwargs + For compatibility with :meth:`numpy.take`. Has no effect on the + output. + + Returns + ------- + same type as caller + An array-like containing the elements taken from the object. + + See Also + -------- + DataFrame.loc : Select a subset of a DataFrame by labels. + DataFrame.iloc : Select a subset of a DataFrame by positions. + numpy.take : Take elements from an array along an axis. + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan)], + ... columns=['name', 'class', 'max_speed'], + ... index=[0, 2, 3, 1]) + >>> df + name class max_speed + 0 falcon bird 389.0 + 2 parrot bird 24.0 + 3 lion mammal 80.5 + 1 monkey mammal NaN + + Take elements at positions 0 and 3 along the axis 0 (default). + + Note how the actual indices selected (0 and 1) do not correspond to + our selected indices 0 and 3. That's because we are selecting the 0th + and 3rd rows, not rows whose indices equal 0 and 3. + + >>> df.take([0, 3]) + name class max_speed + 0 falcon bird 389.0 + 1 monkey mammal NaN + + Take elements at indices 1 and 2 along the axis 1 (column selection). + + >>> df.take([1, 2], axis=1) + class max_speed + 0 bird 389.0 + 2 bird 24.0 + 3 mammal 80.5 + 1 mammal NaN + + We may take elements using negative integers for positive indices, + starting from the end of the object, just like with Python lists. + + >>> df.take([-1, -2]) + name class max_speed + 1 monkey mammal NaN + 3 lion mammal 80.5 + """ + + nv.validate_take((), kwargs) + + if not isinstance(indices, slice): + indices = np.asarray(indices, dtype=np.intp) + if ( + axis == 0 + and indices.ndim == 1 + and using_copy_on_write() + and is_range_indexer(indices, len(self)) + ): + return self.copy(deep=None) + elif self.ndim == 1: + raise TypeError( + f"{type(self).__name__}.take requires a sequence of integers, " + "not slice." + ) + else: + warnings.warn( + # GH#51539 + f"Passing a slice to {type(self).__name__}.take is deprecated " + "and will raise in a future version. Use `obj[slicer]` or pass " + "a sequence of integers instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # We can get here with a slice via DataFrame.__getitem__ + indices = np.arange( + indices.start, indices.stop, indices.step, dtype=np.intp + ) + + new_data = self._mgr.take( + indices, + axis=self._get_block_manager_axis(axis), + verify=True, + ) + return self._constructor_from_mgr(new_data, axes=new_data.axes).__finalize__( + self, method="take" + ) + + @final + def _take_with_is_copy(self, indices, axis: Axis = 0) -> Self: + """ + Internal version of the `take` method that sets the `_is_copy` + attribute to keep track of the parent dataframe (using in indexing + for the SettingWithCopyWarning). + + For Series this does the same as the public take (it never sets `_is_copy`). + + See the docstring of `take` for full explanation of the parameters. + """ + result = self.take(indices=indices, axis=axis) + # Maybe set copy if we didn't actually change the index. + if self.ndim == 2 and not result._get_axis(axis).equals(self._get_axis(axis)): + result._set_is_copy(self) + return result + + @final + def xs( + self, + key: IndexLabel, + axis: Axis = 0, + level: IndexLabel | None = None, + drop_level: bool_t = True, + ) -> Self: + """ + Return cross-section from the Series/DataFrame. + + This method takes a `key` argument to select data at a particular + level of a MultiIndex. + + Parameters + ---------- + key : label or tuple of label + Label contained in the index, or partially in a MultiIndex. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Axis to retrieve cross-section on. + level : object, defaults to first n levels (n=1 or len(key)) + In case of a key partially contained in a MultiIndex, indicate + which levels are used. Levels can be referred by label or position. + drop_level : bool, default True + If False, returns object with same levels as self. + + Returns + ------- + Series or DataFrame + Cross-section from the original Series or DataFrame + corresponding to the selected index levels. + + See Also + -------- + DataFrame.loc : Access a group of rows and columns + by label(s) or a boolean array. + DataFrame.iloc : Purely integer-location based indexing + for selection by position. + + Notes + ----- + `xs` can not be used to set values. + + MultiIndex Slicers is a generic way to get/set values on + any level or levels. + It is a superset of `xs` functionality, see + :ref:`MultiIndex Slicers `. + + Examples + -------- + >>> d = {'num_legs': [4, 4, 2, 2], + ... 'num_wings': [0, 0, 2, 2], + ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], + ... 'animal': ['cat', 'dog', 'bat', 'penguin'], + ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} + >>> df = pd.DataFrame(data=d) + >>> df = df.set_index(['class', 'animal', 'locomotion']) + >>> df + num_legs num_wings + class animal locomotion + mammal cat walks 4 0 + dog walks 4 0 + bat flies 2 2 + bird penguin walks 2 2 + + Get values at specified index + + >>> df.xs('mammal') + num_legs num_wings + animal locomotion + cat walks 4 0 + dog walks 4 0 + bat flies 2 2 + + Get values at several indexes + + >>> df.xs(('mammal', 'dog', 'walks')) + num_legs 4 + num_wings 0 + Name: (mammal, dog, walks), dtype: int64 + + Get values at specified index and level + + >>> df.xs('cat', level=1) + num_legs num_wings + class locomotion + mammal walks 4 0 + + Get values at several indexes and levels + + >>> df.xs(('bird', 'walks'), + ... level=[0, 'locomotion']) + num_legs num_wings + animal + penguin 2 2 + + Get values at specified column and axis + + >>> df.xs('num_wings', axis=1) + class animal locomotion + mammal cat walks 0 + dog walks 0 + bat flies 2 + bird penguin walks 2 + Name: num_wings, dtype: int64 + """ + axis = self._get_axis_number(axis) + labels = self._get_axis(axis) + + if isinstance(key, list): + raise TypeError("list keys are not supported in xs, pass a tuple instead") + + if level is not None: + if not isinstance(labels, MultiIndex): + raise TypeError("Index must be a MultiIndex") + loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) + + # create the tuple of the indexer + _indexer = [slice(None)] * self.ndim + _indexer[axis] = loc + indexer = tuple(_indexer) + + result = self.iloc[indexer] + setattr(result, result._get_axis_name(axis), new_ax) + return result + + if axis == 1: + if drop_level: + return self[key] + index = self.columns + else: + index = self.index + + if isinstance(index, MultiIndex): + loc, new_index = index._get_loc_level(key, level=0) + if not drop_level: + if lib.is_integer(loc): + # Slice index must be an integer or None + new_index = index[loc : loc + 1] + else: + new_index = index[loc] + else: + loc = index.get_loc(key) + + if isinstance(loc, np.ndarray): + if loc.dtype == np.bool_: + (inds,) = loc.nonzero() + return self._take_with_is_copy(inds, axis=axis) + else: + return self._take_with_is_copy(loc, axis=axis) + + if not is_scalar(loc): + new_index = index[loc] + + if is_scalar(loc) and axis == 0: + # In this case loc should be an integer + if self.ndim == 1: + # if we encounter an array-like and we only have 1 dim + # that means that their are list/ndarrays inside the Series! + # so just return them (GH 6394) + return self._values[loc] + + new_mgr = self._mgr.fast_xs(loc) + + result = self._constructor_sliced_from_mgr(new_mgr, axes=new_mgr.axes) + result._name = self.index[loc] + result = result.__finalize__(self) + elif is_scalar(loc): + result = self.iloc[:, slice(loc, loc + 1)] + elif axis == 1: + result = self.iloc[:, loc] + else: + result = self.iloc[loc] + result.index = new_index + + # this could be a view + # but only in a single-dtyped view sliceable case + result._set_is_copy(self, copy=not result._is_view) + return result + + def __getitem__(self, item): + raise AbstractMethodError(self) + + @final + def _getitem_slice(self, key: slice) -> Self: + """ + __getitem__ for the case where the key is a slice object. + """ + # _convert_slice_indexer to determine if this slice is positional + # or label based, and if the latter, convert to positional + slobj = self.index._convert_slice_indexer(key, kind="getitem") + if isinstance(slobj, np.ndarray): + # reachable with DatetimeIndex + indexer = lib.maybe_indices_to_slice( + slobj.astype(np.intp, copy=False), len(self) + ) + if isinstance(indexer, np.ndarray): + # GH#43223 If we can not convert, use take + return self.take(indexer, axis=0) + slobj = indexer + return self._slice(slobj) + + def _slice(self, slobj: slice, axis: AxisInt = 0) -> Self: + """ + Construct a slice of this container. + + Slicing with this method is *always* positional. + """ + assert isinstance(slobj, slice), type(slobj) + axis = self._get_block_manager_axis(axis) + new_mgr = self._mgr.get_slice(slobj, axis=axis) + result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + result = result.__finalize__(self) + + # this could be a view + # but only in a single-dtyped view sliceable case + is_copy = axis != 0 or result._is_view + result._set_is_copy(self, copy=is_copy) + return result + + @final + def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: + if not copy: + self._is_copy = None + else: + assert ref is not None + self._is_copy = weakref.ref(ref) + + def _check_is_chained_assignment_possible(self) -> bool_t: + """ + Check if we are a view, have a cacher, and are of mixed type. + If so, then force a setitem_copy check. + + Should be called just near setting a value + + Will return a boolean if it we are a view and are cached, but a + single-dtype meaning that the cacher should be updated following + setting. + """ + if self._is_copy: + self._check_setitem_copy(t="referent") + return False + + @final + def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): + """ + + Parameters + ---------- + t : str, the type of setting error + force : bool, default False + If True, then force showing an error. + + validate if we are doing a setitem on a chained copy. + + It is technically possible to figure out that we are setting on + a copy even WITH a multi-dtyped pandas object. In other words, some + blocks may be views while other are not. Currently _is_view will ALWAYS + return False for multi-blocks to avoid having to handle this case. + + df = DataFrame(np.arange(0,9), columns=['count']) + df['group'] = 'b' + + # This technically need not raise SettingWithCopy if both are view + # (which is not generally guaranteed but is usually True. However, + # this is in general not a good practice and we recommend using .loc. + df.iloc[0:5]['group'] = 'a' + + """ + if using_copy_on_write(): + return + + # return early if the check is not needed + if not (force or self._is_copy): + return + + value = config.get_option("mode.chained_assignment") + if value is None: + return + + # see if the copy is not actually referred; if so, then dissolve + # the copy weakref + if self._is_copy is not None and not isinstance(self._is_copy, str): + r = self._is_copy() + if not gc.get_referents(r) or (r is not None and r.shape == self.shape): + self._is_copy = None + return + + # a custom message + if isinstance(self._is_copy, str): + t = self._is_copy + + elif t == "referent": + t = ( + "\n" + "A value is trying to be set on a copy of a slice from a " + "DataFrame\n\n" + "See the caveats in the documentation: " + "https://pandas.pydata.org/pandas-docs/stable/user_guide/" + "indexing.html#returning-a-view-versus-a-copy" + ) + + else: + t = ( + "\n" + "A value is trying to be set on a copy of a slice from a " + "DataFrame.\n" + "Try using .loc[row_indexer,col_indexer] = value " + "instead\n\nSee the caveats in the documentation: " + "https://pandas.pydata.org/pandas-docs/stable/user_guide/" + "indexing.html#returning-a-view-versus-a-copy" + ) + + if value == "raise": + raise SettingWithCopyError(t) + if value == "warn": + warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) + + @final + def __delitem__(self, key) -> None: + """ + Delete item + """ + deleted = False + + maybe_shortcut = False + if self.ndim == 2 and isinstance(self.columns, MultiIndex): + try: + # By using engine's __contains__ we effectively + # restrict to same-length tuples + maybe_shortcut = key not in self.columns._engine + except TypeError: + pass + + if maybe_shortcut: + # Allow shorthand to delete all columns whose first len(key) + # elements match key: + if not isinstance(key, tuple): + key = (key,) + for col in self.columns: + if isinstance(col, tuple) and col[: len(key)] == key: + del self[col] + deleted = True + if not deleted: + # If the above loop ran and didn't delete anything because + # there was no match, this call should raise the appropriate + # exception: + loc = self.axes[-1].get_loc(key) + self._mgr = self._mgr.idelete(loc) + + # delete from the caches + try: + del self._item_cache[key] + except KeyError: + pass + + # ---------------------------------------------------------------------- + # Unsorted + + @final + def _check_inplace_and_allows_duplicate_labels(self, inplace: bool_t): + if inplace and not self.flags.allows_duplicate_labels: + raise ValueError( + "Cannot specify 'inplace=True' when " + "'self.flags.allows_duplicate_labels' is False." + ) + + @final + def get(self, key, default=None): + """ + Get item from object for given key (ex: DataFrame column). + + Returns default value if not found. + + Parameters + ---------- + key : object + + Returns + ------- + same type as items contained in object + + Examples + -------- + >>> df = pd.DataFrame( + ... [ + ... [24.3, 75.7, "high"], + ... [31, 87.8, "high"], + ... [22, 71.6, "medium"], + ... [35, 95, "medium"], + ... ], + ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], + ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), + ... ) + + >>> df + temp_celsius temp_fahrenheit windspeed + 2014-02-12 24.3 75.7 high + 2014-02-13 31.0 87.8 high + 2014-02-14 22.0 71.6 medium + 2014-02-15 35.0 95.0 medium + + >>> df.get(["temp_celsius", "windspeed"]) + temp_celsius windspeed + 2014-02-12 24.3 high + 2014-02-13 31.0 high + 2014-02-14 22.0 medium + 2014-02-15 35.0 medium + + >>> ser = df['windspeed'] + >>> ser.get('2014-02-13') + 'high' + + If the key isn't found, the default value will be used. + + >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") + 'default_value' + + >>> ser.get('2014-02-10', '[unknown]') + '[unknown]' + """ + try: + return self[key] + except (KeyError, ValueError, IndexError): + return default + + @final + @property + def _is_view(self) -> bool_t: + """Return boolean indicating if self is view of another array""" + return self._mgr.is_view + + @final + def reindex_like( + self, + other, + method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, + copy: bool_t | None = None, + limit: int | None = None, + tolerance=None, + ) -> Self: + """ + Return an object with matching indices as other object. + + Conform the object to the same index on all axes. Optional + filling logic, placing NaN in locations having no value + in the previous index. A new object is produced unless the + new index is equivalent to the current one and copy=False. + + Parameters + ---------- + other : Object of the same data type + Its row and column indices are used to define the new indices + of this object. + method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} + Method to use for filling holes in reindexed DataFrame. + Please note: this is only applicable to DataFrames/Series with a + monotonically increasing/decreasing index. + + * None (default): don't fill gaps + * pad / ffill: propagate last valid observation forward to next + valid + * backfill / bfill: use next valid observation to fill gap + * nearest: use nearest valid observations to fill gap. + + copy : bool, default True + Return a new object, even if the passed indexes are the same. + limit : int, default None + Maximum number of consecutive labels to fill for inexact matches. + tolerance : optional + Maximum distance between original and new labels for inexact + matches. The values of the index at the matching locations must + satisfy the equation ``abs(index[indexer] - target) <= tolerance``. + + Tolerance may be a scalar value, which applies the same tolerance + to all values, or list-like, which applies variable tolerance per + element. List-like includes list, tuple, array, Series, and must be + the same size as the index and its dtype must exactly match the + index's type. + + Returns + ------- + Series or DataFrame + Same type as caller, but with changed indices on each axis. + + See Also + -------- + DataFrame.set_index : Set row labels. + DataFrame.reset_index : Remove row labels or move them to new columns. + DataFrame.reindex : Change to new indices or expand indices. + + Notes + ----- + Same as calling + ``.reindex(index=other.index, columns=other.columns,...)``. + + Examples + -------- + >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], + ... [31, 87.8, 'high'], + ... [22, 71.6, 'medium'], + ... [35, 95, 'medium']], + ... columns=['temp_celsius', 'temp_fahrenheit', + ... 'windspeed'], + ... index=pd.date_range(start='2014-02-12', + ... end='2014-02-15', freq='D')) + + >>> df1 + temp_celsius temp_fahrenheit windspeed + 2014-02-12 24.3 75.7 high + 2014-02-13 31.0 87.8 high + 2014-02-14 22.0 71.6 medium + 2014-02-15 35.0 95.0 medium + + >>> df2 = pd.DataFrame([[28, 'low'], + ... [30, 'low'], + ... [35.1, 'medium']], + ... columns=['temp_celsius', 'windspeed'], + ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', + ... '2014-02-15'])) + + >>> df2 + temp_celsius windspeed + 2014-02-12 28.0 low + 2014-02-13 30.0 low + 2014-02-15 35.1 medium + + >>> df2.reindex_like(df1) + temp_celsius temp_fahrenheit windspeed + 2014-02-12 28.0 NaN low + 2014-02-13 30.0 NaN low + 2014-02-14 NaN NaN NaN + 2014-02-15 35.1 NaN medium + """ + d = other._construct_axes_dict( + axes=self._AXIS_ORDERS, + method=method, + copy=copy, + limit=limit, + tolerance=tolerance, + ) + + return self.reindex(**d) + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: Literal[True], + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: Literal[False] = ..., + errors: IgnoreRaise = ..., + ) -> Self: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: bool_t = ..., + errors: IgnoreRaise = ..., + ) -> Self | None: + ... + + def drop( + self, + labels: IndexLabel | None = None, + *, + axis: Axis = 0, + index: IndexLabel | None = None, + columns: IndexLabel | None = None, + level: Level | None = None, + inplace: bool_t = False, + errors: IgnoreRaise = "raise", + ) -> Self | None: + inplace = validate_bool_kwarg(inplace, "inplace") + + if labels is not None: + if index is not None or columns is not None: + raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") + axis_name = self._get_axis_name(axis) + axes = {axis_name: labels} + elif index is not None or columns is not None: + axes = {"index": index} + if self.ndim == 2: + axes["columns"] = columns + else: + raise ValueError( + "Need to specify at least one of 'labels', 'index' or 'columns'" + ) + + obj = self + + for axis, labels in axes.items(): + if labels is not None: + obj = obj._drop_axis(labels, axis, level=level, errors=errors) + + if inplace: + self._update_inplace(obj) + return None + else: + return obj + + @final + def _drop_axis( + self, + labels, + axis, + level=None, + errors: IgnoreRaise = "raise", + only_slice: bool_t = False, + ) -> Self: + """ + Drop labels from specified axis. Used in the ``drop`` method + internally. + + Parameters + ---------- + labels : single label or list-like + axis : int or axis name + level : int or level name, default None + For MultiIndex + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and existing labels are dropped. + only_slice : bool, default False + Whether indexing along columns should be view-only. + + """ + axis_num = self._get_axis_number(axis) + axis = self._get_axis(axis) + + if axis.is_unique: + if level is not None: + if not isinstance(axis, MultiIndex): + raise AssertionError("axis must be a MultiIndex") + new_axis = axis.drop(labels, level=level, errors=errors) + else: + new_axis = axis.drop(labels, errors=errors) + indexer = axis.get_indexer(new_axis) + + # Case for non-unique axis + else: + is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) + labels = ensure_object(common.index_labels_to_array(labels)) + if level is not None: + if not isinstance(axis, MultiIndex): + raise AssertionError("axis must be a MultiIndex") + mask = ~axis.get_level_values(level).isin(labels) + + # GH 18561 MultiIndex.drop should raise if label is absent + if errors == "raise" and mask.all(): + raise KeyError(f"{labels} not found in axis") + elif ( + isinstance(axis, MultiIndex) + and labels.dtype == "object" + and not is_tuple_labels + ): + # Set level to zero in case of MultiIndex and label is string, + # because isin can't handle strings for MultiIndexes GH#36293 + # In case of tuples we get dtype object but have to use isin GH#42771 + mask = ~axis.get_level_values(0).isin(labels) + else: + mask = ~axis.isin(labels) + # Check if label doesn't exist along axis + labels_missing = (axis.get_indexer_for(labels) == -1).any() + if errors == "raise" and labels_missing: + raise KeyError(f"{labels} not found in axis") + + if isinstance(mask.dtype, ExtensionDtype): + # GH#45860 + mask = mask.to_numpy(dtype=bool) + + indexer = mask.nonzero()[0] + new_axis = axis.take(indexer) + + bm_axis = self.ndim - axis_num - 1 + new_mgr = self._mgr.reindex_indexer( + new_axis, + indexer, + axis=bm_axis, + allow_dups=True, + copy=None, + only_slice=only_slice, + ) + result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + if self.ndim == 1: + result._name = self.name + + return result.__finalize__(self) + + @final + def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: + """ + Replace self internals with result. + + Parameters + ---------- + result : same type as self + verify_is_copy : bool, default True + Provide is_copy checks. + """ + # NOTE: This does *not* call __finalize__ and that's an explicit + # decision that we may revisit in the future. + self._reset_cache() + self._clear_item_cache() + self._mgr = result._mgr + self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) + + @final + def add_prefix(self, prefix: str, axis: Axis | None = None) -> Self: + """ + Prefix labels with string `prefix`. + + For Series, the row labels are prefixed. + For DataFrame, the column labels are prefixed. + + Parameters + ---------- + prefix : str + The string to add before each label. + axis : {0 or 'index', 1 or 'columns', None}, default None + Axis to add prefix on + + .. versionadded:: 2.0.0 + + Returns + ------- + Series or DataFrame + New Series or DataFrame with updated labels. + + See Also + -------- + Series.add_suffix: Suffix row labels with string `suffix`. + DataFrame.add_suffix: Suffix column labels with string `suffix`. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + + >>> s.add_prefix('item_') + item_0 1 + item_1 2 + item_2 3 + item_3 4 + dtype: int64 + + >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) + >>> df + A B + 0 1 3 + 1 2 4 + 2 3 5 + 3 4 6 + + >>> df.add_prefix('col_') + col_A col_B + 0 1 3 + 1 2 4 + 2 3 5 + 3 4 6 + """ + f = lambda x: f"{prefix}{x}" + + axis_name = self._info_axis_name + if axis is not None: + axis_name = self._get_axis_name(axis) + + mapper = {axis_name: f} + + # error: Incompatible return value type (got "Optional[Self]", + # expected "Self") + # error: Argument 1 to "rename" of "NDFrame" has incompatible type + # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" + # error: Keywords must be strings + return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] + + @final + def add_suffix(self, suffix: str, axis: Axis | None = None) -> Self: + """ + Suffix labels with string `suffix`. + + For Series, the row labels are suffixed. + For DataFrame, the column labels are suffixed. + + Parameters + ---------- + suffix : str + The string to add after each label. + axis : {0 or 'index', 1 or 'columns', None}, default None + Axis to add suffix on + + .. versionadded:: 2.0.0 + + Returns + ------- + Series or DataFrame + New Series or DataFrame with updated labels. + + See Also + -------- + Series.add_prefix: Prefix row labels with string `prefix`. + DataFrame.add_prefix: Prefix column labels with string `prefix`. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + + >>> s.add_suffix('_item') + 0_item 1 + 1_item 2 + 2_item 3 + 3_item 4 + dtype: int64 + + >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) + >>> df + A B + 0 1 3 + 1 2 4 + 2 3 5 + 3 4 6 + + >>> df.add_suffix('_col') + A_col B_col + 0 1 3 + 1 2 4 + 2 3 5 + 3 4 6 + """ + f = lambda x: f"{x}{suffix}" + + axis_name = self._info_axis_name + if axis is not None: + axis_name = self._get_axis_name(axis) + + mapper = {axis_name: f} + # error: Incompatible return value type (got "Optional[Self]", + # expected "Self") + # error: Argument 1 to "rename" of "NDFrame" has incompatible type + # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" + # error: Keywords must be strings + return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool_t | Sequence[bool_t] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool_t = ..., + key: ValueKeyFunc = ..., + ) -> Self: + ... + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool_t | Sequence[bool_t] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool_t = ..., + key: ValueKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool_t | Sequence[bool_t] = ..., + inplace: bool_t = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool_t = ..., + key: ValueKeyFunc = ..., + ) -> Self | None: + ... + + def sort_values( + self, + *, + axis: Axis = 0, + ascending: bool_t | Sequence[bool_t] = True, + inplace: bool_t = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + ignore_index: bool_t = False, + key: ValueKeyFunc | None = None, + ) -> Self | None: + """ + Sort by the values along either axis. + + Parameters + ----------%(optional_by)s + axis : %(axes_single_arg)s, default 0 + Axis to be sorted. + ascending : bool or list of bool, default True + Sort ascending vs. descending. Specify list for multiple sort + orders. If this is a list of bools, must match the length of + the by. + inplace : bool, default False + If True, perform operation in-place. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. `mergesort` and `stable` are the only stable algorithms. For + DataFrames, this option is only applied when sorting on a single + column or label. + na_position : {'first', 'last'}, default 'last' + Puts NaNs at the beginning if `first`; `last` puts NaNs at the + end. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + Apply the key function to the values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect a + ``Series`` and return a Series with the same shape as the input. + It will be applied to each column in `by` independently. + + Returns + ------- + DataFrame or None + DataFrame with sorted values or None if ``inplace=True``. + + See Also + -------- + DataFrame.sort_index : Sort a DataFrame by the index. + Series.sort_values : Similar method for a Series. + + Examples + -------- + >>> df = pd.DataFrame({ + ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], + ... 'col2': [2, 1, 9, 8, 7, 4], + ... 'col3': [0, 1, 9, 4, 2, 3], + ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] + ... }) + >>> df + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + + Sort by col1 + + >>> df.sort_values(by=['col1']) + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 5 C 4 3 F + 4 D 7 2 e + 3 NaN 8 4 D + + Sort by multiple columns + + >>> df.sort_values(by=['col1', 'col2']) + col1 col2 col3 col4 + 1 A 1 1 B + 0 A 2 0 a + 2 B 9 9 c + 5 C 4 3 F + 4 D 7 2 e + 3 NaN 8 4 D + + Sort Descending + + >>> df.sort_values(by='col1', ascending=False) + col1 col2 col3 col4 + 4 D 7 2 e + 5 C 4 3 F + 2 B 9 9 c + 0 A 2 0 a + 1 A 1 1 B + 3 NaN 8 4 D + + Putting NAs first + + >>> df.sort_values(by='col1', ascending=False, na_position='first') + col1 col2 col3 col4 + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + 2 B 9 9 c + 0 A 2 0 a + 1 A 1 1 B + + Sorting with a key function + + >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) + col1 col2 col3 col4 + 0 A 2 0 a + 1 A 1 1 B + 2 B 9 9 c + 3 NaN 8 4 D + 4 D 7 2 e + 5 C 4 3 F + + Natural sort with the key argument, + using the `natsort ` package. + + >>> df = pd.DataFrame({ + ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], + ... "value": [10, 20, 30, 40, 50] + ... }) + >>> df + time value + 0 0hr 10 + 1 128hr 20 + 2 72hr 30 + 3 48hr 40 + 4 96hr 50 + >>> from natsort import index_natsorted + >>> df.sort_values( + ... by="time", + ... key=lambda x: np.argsort(index_natsorted(df["time"])) + ... ) + time value + 0 0hr 10 + 3 48hr 40 + 2 72hr 30 + 4 96hr 50 + 1 128hr 20 + """ + raise AbstractMethodError(self) + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool_t | Sequence[bool_t] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool_t = ..., + ignore_index: bool_t = ..., + key: IndexKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool_t | Sequence[bool_t] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool_t = ..., + ignore_index: bool_t = ..., + key: IndexKeyFunc = ..., + ) -> Self: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool_t | Sequence[bool_t] = ..., + inplace: bool_t = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool_t = ..., + ignore_index: bool_t = ..., + key: IndexKeyFunc = ..., + ) -> Self | None: + ... + + def sort_index( + self, + *, + axis: Axis = 0, + level: IndexLabel | None = None, + ascending: bool_t | Sequence[bool_t] = True, + inplace: bool_t = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + sort_remaining: bool_t = True, + ignore_index: bool_t = False, + key: IndexKeyFunc | None = None, + ) -> Self | None: + inplace = validate_bool_kwarg(inplace, "inplace") + axis = self._get_axis_number(axis) + ascending = validate_ascending(ascending) + + target = self._get_axis(axis) + + indexer = get_indexer_indexer( + target, level, ascending, kind, na_position, sort_remaining, key + ) + + if indexer is None: + if inplace: + result = self + else: + result = self.copy(deep=None) + + if ignore_index: + result.index = default_index(len(self)) + if inplace: + return None + else: + return result + + baxis = self._get_block_manager_axis(axis) + new_data = self._mgr.take(indexer, axis=baxis, verify=False) + + # reconstruct axis if needed + new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) + + if ignore_index: + axis = 1 if isinstance(self, ABCDataFrame) else 0 + new_data.set_axis(axis, default_index(len(indexer))) + + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="sort_index") + + @doc( + klass=_shared_doc_kwargs["klass"], + optional_reindex="", + ) + def reindex( + self, + labels=None, + *, + index=None, + columns=None, + axis: Axis | None = None, + method: ReindexMethod | None = None, + copy: bool_t | None = None, + level: Level | None = None, + fill_value: Scalar | None = np.nan, + limit: int | None = None, + tolerance=None, + ) -> Self: + """ + Conform {klass} to new index with optional filling logic. + + Places NA/NaN in locations having no value in the previous index. A new object + is produced unless the new index is equivalent to the current one and + ``copy=False``. + + Parameters + ---------- + {optional_reindex} + method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} + Method to use for filling holes in reindexed DataFrame. + Please note: this is only applicable to DataFrames/Series with a + monotonically increasing/decreasing index. + + * None (default): don't fill gaps + * pad / ffill: Propagate last valid observation forward to next + valid. + * backfill / bfill: Use next valid observation to fill gap. + * nearest: Use nearest valid observations to fill gap. + + copy : bool, default True + Return a new object, even if the passed indexes are the same. + level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level. + fill_value : scalar, default np.nan + Value to use for missing values. Defaults to NaN, but can be any + "compatible" value. + limit : int, default None + Maximum number of consecutive elements to forward or backward fill. + tolerance : optional + Maximum distance between original and new labels for inexact + matches. The values of the index at the matching locations most + satisfy the equation ``abs(index[indexer] - target) <= tolerance``. + + Tolerance may be a scalar value, which applies the same tolerance + to all values, or list-like, which applies variable tolerance per + element. List-like includes list, tuple, array, Series, and must be + the same size as the index and its dtype must exactly match the + index's type. + + Returns + ------- + {klass} with changed index. + + See Also + -------- + DataFrame.set_index : Set row labels. + DataFrame.reset_index : Remove row labels or move them to new columns. + DataFrame.reindex_like : Change to same indices as other DataFrame. + + Examples + -------- + ``DataFrame.reindex`` supports two calling conventions + + * ``(index=index_labels, columns=column_labels, ...)`` + * ``(labels, axis={{'index', 'columns'}}, ...)`` + + We *highly* recommend using keyword arguments to clarify your + intent. + + Create a dataframe with some fictional data. + + >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] + >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], + ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, + ... index=index) + >>> df + http_status response_time + Firefox 200 0.04 + Chrome 200 0.02 + Safari 404 0.07 + IE10 404 0.08 + Konqueror 301 1.00 + + Create a new index and reindex the dataframe. By default + values in the new index that do not have corresponding + records in the dataframe are assigned ``NaN``. + + >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', + ... 'Chrome'] + >>> df.reindex(new_index) + http_status response_time + Safari 404.0 0.07 + Iceweasel NaN NaN + Comodo Dragon NaN NaN + IE10 404.0 0.08 + Chrome 200.0 0.02 + + We can fill in the missing values by passing a value to + the keyword ``fill_value``. Because the index is not monotonically + increasing or decreasing, we cannot use arguments to the keyword + ``method`` to fill the ``NaN`` values. + + >>> df.reindex(new_index, fill_value=0) + http_status response_time + Safari 404 0.07 + Iceweasel 0 0.00 + Comodo Dragon 0 0.00 + IE10 404 0.08 + Chrome 200 0.02 + + >>> df.reindex(new_index, fill_value='missing') + http_status response_time + Safari 404 0.07 + Iceweasel missing missing + Comodo Dragon missing missing + IE10 404 0.08 + Chrome 200 0.02 + + We can also reindex the columns. + + >>> df.reindex(columns=['http_status', 'user_agent']) + http_status user_agent + Firefox 200 NaN + Chrome 200 NaN + Safari 404 NaN + IE10 404 NaN + Konqueror 301 NaN + + Or we can use "axis-style" keyword arguments + + >>> df.reindex(['http_status', 'user_agent'], axis="columns") + http_status user_agent + Firefox 200 NaN + Chrome 200 NaN + Safari 404 NaN + IE10 404 NaN + Konqueror 301 NaN + + To further illustrate the filling functionality in + ``reindex``, we will create a dataframe with a + monotonically increasing index (for example, a sequence + of dates). + + >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') + >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, + ... index=date_index) + >>> df2 + prices + 2010-01-01 100.0 + 2010-01-02 101.0 + 2010-01-03 NaN + 2010-01-04 100.0 + 2010-01-05 89.0 + 2010-01-06 88.0 + + Suppose we decide to expand the dataframe to cover a wider + date range. + + >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') + >>> df2.reindex(date_index2) + prices + 2009-12-29 NaN + 2009-12-30 NaN + 2009-12-31 NaN + 2010-01-01 100.0 + 2010-01-02 101.0 + 2010-01-03 NaN + 2010-01-04 100.0 + 2010-01-05 89.0 + 2010-01-06 88.0 + 2010-01-07 NaN + + The index entries that did not have a value in the original data frame + (for example, '2009-12-29') are by default filled with ``NaN``. + If desired, we can fill in the missing values using one of several + options. + + For example, to back-propagate the last valid value to fill the ``NaN`` + values, pass ``bfill`` as an argument to the ``method`` keyword. + + >>> df2.reindex(date_index2, method='bfill') + prices + 2009-12-29 100.0 + 2009-12-30 100.0 + 2009-12-31 100.0 + 2010-01-01 100.0 + 2010-01-02 101.0 + 2010-01-03 NaN + 2010-01-04 100.0 + 2010-01-05 89.0 + 2010-01-06 88.0 + 2010-01-07 NaN + + Please note that the ``NaN`` value present in the original dataframe + (at index value 2010-01-03) will not be filled by any of the + value propagation schemes. This is because filling while reindexing + does not look at dataframe values, but only compares the original and + desired indexes. If you do want to fill in the ``NaN`` values present + in the original dataframe, use the ``fillna()`` method. + + See the :ref:`user guide ` for more. + """ + # TODO: Decide if we care about having different examples for different + # kinds + + if index is not None and columns is not None and labels is not None: + raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") + elif index is not None or columns is not None: + if axis is not None: + raise TypeError( + "Cannot specify both 'axis' and any of 'index' or 'columns'" + ) + if labels is not None: + if index is not None: + columns = labels + else: + index = labels + else: + if axis and self._get_axis_number(axis) == 1: + columns = labels + else: + index = labels + axes: dict[Literal["index", "columns"], Any] = { + "index": index, + "columns": columns, + } + method = clean_reindex_fill_method(method) + + # if all axes that are requested to reindex are equal, then only copy + # if indicated must have index names equal here as well as values + if copy and using_copy_on_write(): + copy = False + if all( + self._get_axis(axis_name).identical(ax) + for axis_name, ax in axes.items() + if ax is not None + ): + return self.copy(deep=copy) + + # check if we are a multi reindex + if self._needs_reindex_multi(axes, method, level): + return self._reindex_multi(axes, copy, fill_value) + + # perform the reindex on the axes + return self._reindex_axes( + axes, level, limit, tolerance, method, fill_value, copy + ).__finalize__(self, method="reindex") + + @final + def _reindex_axes( + self, + axes, + level: Level | None, + limit: int | None, + tolerance, + method, + fill_value: Scalar | None, + copy: bool_t | None, + ) -> Self: + """Perform the reindex for all the axes.""" + obj = self + for a in self._AXIS_ORDERS: + labels = axes[a] + if labels is None: + continue + + ax = self._get_axis(a) + new_index, indexer = ax.reindex( + labels, level=level, limit=limit, tolerance=tolerance, method=method + ) + + axis = self._get_axis_number(a) + obj = obj._reindex_with_indexers( + {axis: [new_index, indexer]}, + fill_value=fill_value, + copy=copy, + allow_dups=False, + ) + # If we've made a copy once, no need to make another one + copy = False + + return obj + + def _needs_reindex_multi(self, axes, method, level: Level | None) -> bool_t: + """Check if we do need a multi reindex.""" + return ( + (common.count_not_none(*axes.values()) == self._AXIS_LEN) + and method is None + and level is None + # reindex_multi calls self.values, so we only want to go + # down that path when doing so is cheap. + and self._can_fast_transpose + ) + + def _reindex_multi(self, axes, copy, fill_value): + raise AbstractMethodError(self) + + @final + def _reindex_with_indexers( + self, + reindexers, + fill_value=None, + copy: bool_t | None = False, + allow_dups: bool_t = False, + ) -> Self: + """allow_dups indicates an internal call here""" + # reindex doing multiple operations on different axes if indicated + new_data = self._mgr + for axis in sorted(reindexers.keys()): + index, indexer = reindexers[axis] + baxis = self._get_block_manager_axis(axis) + + if index is None: + continue + + index = ensure_index(index) + if indexer is not None: + indexer = ensure_platform_int(indexer) + + # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) + new_data = new_data.reindex_indexer( + index, + indexer, + axis=baxis, + fill_value=fill_value, + allow_dups=allow_dups, + copy=copy, + ) + # If we've made a copy once, no need to make another one + copy = False + + if ( + (copy or copy is None) + and new_data is self._mgr + and not using_copy_on_write() + ): + new_data = new_data.copy(deep=copy) + elif using_copy_on_write() and new_data is self._mgr: + new_data = new_data.copy(deep=False) + + return self._constructor_from_mgr(new_data, axes=new_data.axes).__finalize__( + self + ) + + def filter( + self, + items=None, + like: str | None = None, + regex: str | None = None, + axis: Axis | None = None, + ) -> Self: + """ + Subset the dataframe rows or columns according to the specified index labels. + + Note that this routine does not filter a dataframe on its + contents. The filter is applied to the labels of the index. + + Parameters + ---------- + items : list-like + Keep labels from axis which are in items. + like : str + Keep labels from axis for which "like in label == True". + regex : str (regular expression) + Keep labels from axis for which re.search(regex, label) == True. + axis : {0 or 'index', 1 or 'columns', None}, default None + The axis to filter on, expressed either as an index (int) + or axis name (str). By default this is the info axis, 'columns' for + DataFrame. For `Series` this parameter is unused and defaults to `None`. + + Returns + ------- + same type as input object + + See Also + -------- + DataFrame.loc : Access a group of rows and columns + by label(s) or a boolean array. + + Notes + ----- + The ``items``, ``like``, and ``regex`` parameters are + enforced to be mutually exclusive. + + ``axis`` defaults to the info axis that is used when indexing + with ``[]``. + + Examples + -------- + >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), + ... index=['mouse', 'rabbit'], + ... columns=['one', 'two', 'three']) + >>> df + one two three + mouse 1 2 3 + rabbit 4 5 6 + + >>> # select columns by name + >>> df.filter(items=['one', 'three']) + one three + mouse 1 3 + rabbit 4 6 + + >>> # select columns by regular expression + >>> df.filter(regex='e$', axis=1) + one three + mouse 1 3 + rabbit 4 6 + + >>> # select rows containing 'bbi' + >>> df.filter(like='bbi', axis=0) + one two three + rabbit 4 5 6 + """ + nkw = common.count_not_none(items, like, regex) + if nkw > 1: + raise TypeError( + "Keyword arguments `items`, `like`, or `regex` " + "are mutually exclusive" + ) + + if axis is None: + axis = self._info_axis_name + labels = self._get_axis(axis) + + if items is not None: + name = self._get_axis_name(axis) + items = Index(items).intersection(labels) + if len(items) == 0: + # Keep the dtype of labels when we are empty + items = items.astype(labels.dtype) + # error: Keywords must be strings + return self.reindex(**{name: items}) # type: ignore[misc] + elif like: + + def f(x) -> bool_t: + assert like is not None # needed for mypy + return like in ensure_str(x) + + values = labels.map(f) + return self.loc(axis=axis)[values] + elif regex: + + def f(x) -> bool_t: + return matcher.search(ensure_str(x)) is not None + + matcher = re.compile(regex) + values = labels.map(f) + return self.loc(axis=axis)[values] + else: + raise TypeError("Must pass either `items`, `like`, or `regex`") + + @final + def head(self, n: int = 5) -> Self: + """ + Return the first `n` rows. + + This function returns the first `n` rows for the object based + on position. It is useful for quickly testing if your object + has the right type of data in it. + + For negative values of `n`, this function returns all rows except + the last `|n|` rows, equivalent to ``df[:n]``. + + If n is larger than the number of rows, this function returns all rows. + + Parameters + ---------- + n : int, default 5 + Number of rows to select. + + Returns + ------- + same type as caller + The first `n` rows of the caller object. + + See Also + -------- + DataFrame.tail: Returns the last `n` rows. + + Examples + -------- + >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', + ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) + >>> df + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra + + Viewing the first 5 lines + + >>> df.head() + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + + Viewing the first `n` lines (three in this case) + + >>> df.head(3) + animal + 0 alligator + 1 bee + 2 falcon + + For negative values of `n` + + >>> df.head(-3) + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + 5 parrot + """ + if using_copy_on_write(): + return self.iloc[:n].copy() + return self.iloc[:n] + + @final + def tail(self, n: int = 5) -> Self: + """ + Return the last `n` rows. + + This function returns last `n` rows from the object based on + position. It is useful for quickly verifying data, for example, + after sorting or appending rows. + + For negative values of `n`, this function returns all rows except + the first `|n|` rows, equivalent to ``df[|n|:]``. + + If n is larger than the number of rows, this function returns all rows. + + Parameters + ---------- + n : int, default 5 + Number of rows to select. + + Returns + ------- + type of caller + The last `n` rows of the caller object. + + See Also + -------- + DataFrame.head : The first `n` rows of the caller object. + + Examples + -------- + >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', + ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) + >>> df + animal + 0 alligator + 1 bee + 2 falcon + 3 lion + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra + + Viewing the last 5 lines + + >>> df.tail() + animal + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra + + Viewing the last `n` lines (three in this case) + + >>> df.tail(3) + animal + 6 shark + 7 whale + 8 zebra + + For negative values of `n` + + >>> df.tail(-3) + animal + 3 lion + 4 monkey + 5 parrot + 6 shark + 7 whale + 8 zebra + """ + if using_copy_on_write(): + if n == 0: + return self.iloc[0:0].copy() + return self.iloc[-n:].copy() + if n == 0: + return self.iloc[0:0] + return self.iloc[-n:] + + @final + def sample( + self, + n: int | None = None, + frac: float | None = None, + replace: bool_t = False, + weights=None, + random_state: RandomState | None = None, + axis: Axis | None = None, + ignore_index: bool_t = False, + ) -> Self: + """ + Return a random sample of items from an axis of object. + + You can use `random_state` for reproducibility. + + Parameters + ---------- + n : int, optional + Number of items from axis to return. Cannot be used with `frac`. + Default = 1 if `frac` = None. + frac : float, optional + Fraction of axis items to return. Cannot be used with `n`. + replace : bool, default False + Allow or disallow sampling of the same row more than once. + weights : str or ndarray-like, optional + Default 'None' results in equal probability weighting. + If passed a Series, will align with target object on index. Index + values in weights not found in sampled object will be ignored and + index values in sampled object not in weights will be assigned + weights of zero. + If called on a DataFrame, will accept the name of a column + when axis = 0. + Unless weights are a Series, weights must be same length as axis + being sampled. + If weights do not sum to 1, they will be normalized to sum to 1. + Missing values in the weights column will be treated as zero. + Infinite values not allowed. + random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional + If int, array-like, or BitGenerator, seed for random number generator. + If np.random.RandomState or np.random.Generator, use as given. + + .. versionchanged:: 1.4.0 + + np.random.Generator objects now accepted + + axis : {0 or 'index', 1 or 'columns', None}, default None + Axis to sample. Accepts axis number or name. Default is stat axis + for given data type. For `Series` this parameter is unused and defaults to `None`. + ignore_index : bool, default False + If True, the resulting index will be labeled 0, 1, …, n - 1. + + .. versionadded:: 1.3.0 + + Returns + ------- + Series or DataFrame + A new object of same type as caller containing `n` items randomly + sampled from the caller object. + + See Also + -------- + DataFrameGroupBy.sample: Generates random samples from each group of a + DataFrame object. + SeriesGroupBy.sample: Generates random samples from each group of a + Series object. + numpy.random.choice: Generates a random sample from a given 1-D numpy + array. + + Notes + ----- + If `frac` > 1, `replacement` should be set to `True`. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], + ... 'num_wings': [2, 0, 0, 0], + ... 'num_specimen_seen': [10, 2, 1, 8]}, + ... index=['falcon', 'dog', 'spider', 'fish']) + >>> df + num_legs num_wings num_specimen_seen + falcon 2 2 10 + dog 4 0 2 + spider 8 0 1 + fish 0 0 8 + + Extract 3 random elements from the ``Series`` ``df['num_legs']``: + Note that we use `random_state` to ensure the reproducibility of + the examples. + + >>> df['num_legs'].sample(n=3, random_state=1) + fish 0 + spider 8 + falcon 2 + Name: num_legs, dtype: int64 + + A random 50% sample of the ``DataFrame`` with replacement: + + >>> df.sample(frac=0.5, replace=True, random_state=1) + num_legs num_wings num_specimen_seen + dog 4 0 2 + fish 0 0 8 + + An upsample sample of the ``DataFrame`` with replacement: + Note that `replace` parameter has to be `True` for `frac` parameter > 1. + + >>> df.sample(frac=2, replace=True, random_state=1) + num_legs num_wings num_specimen_seen + dog 4 0 2 + fish 0 0 8 + falcon 2 2 10 + falcon 2 2 10 + fish 0 0 8 + dog 4 0 2 + fish 0 0 8 + dog 4 0 2 + + Using a DataFrame column as weights. Rows with larger value in the + `num_specimen_seen` column are more likely to be sampled. + + >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) + num_legs num_wings num_specimen_seen + falcon 2 2 10 + fish 0 0 8 + """ # noqa: E501 + if axis is None: + axis = 0 + + axis = self._get_axis_number(axis) + obj_len = self.shape[axis] + + # Process random_state argument + rs = common.random_state(random_state) + + size = sample.process_sampling_size(n, frac, replace) + if size is None: + assert frac is not None + size = round(frac * obj_len) + + if weights is not None: + weights = sample.preprocess_weights(self, weights, axis) + + sampled_indices = sample.sample(obj_len, size, replace, weights, rs) + result = self.take(sampled_indices, axis=axis) + + if ignore_index: + result.index = default_index(len(result)) + + return result + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def pipe( + self, + func: Callable[..., T] | tuple[Callable[..., T], str], + *args, + **kwargs, + ) -> T: + r""" + Apply chainable functions that expect Series or DataFrames. + + Parameters + ---------- + func : function + Function to apply to the {klass}. + ``args``, and ``kwargs`` are passed into ``func``. + Alternatively a ``(callable, data_keyword)`` tuple where + ``data_keyword`` is a string indicating the keyword of + ``callable`` that expects the {klass}. + *args : iterable, optional + Positional arguments passed into ``func``. + **kwargs : mapping, optional + A dictionary of keyword arguments passed into ``func``. + + Returns + ------- + the return type of ``func``. + + See Also + -------- + DataFrame.apply : Apply a function along input axis of DataFrame. + DataFrame.map : Apply a function elementwise on a whole DataFrame. + Series.map : Apply a mapping correspondence on a + :class:`~pandas.Series`. + + Notes + ----- + Use ``.pipe`` when chaining together functions that expect + Series, DataFrames or GroupBy objects. + + Examples + -------- + Constructing a income DataFrame from a dictionary. + + >>> data = [[8000, 1000], [9500, np.nan], [5000, 2000]] + >>> df = pd.DataFrame(data, columns=['Salary', 'Others']) + >>> df + Salary Others + 0 8000 1000.0 + 1 9500 NaN + 2 5000 2000.0 + + Functions that perform tax reductions on an income DataFrame. + + >>> def subtract_federal_tax(df): + ... return df * 0.9 + >>> def subtract_state_tax(df, rate): + ... return df * (1 - rate) + >>> def subtract_national_insurance(df, rate, rate_increase): + ... new_rate = rate + rate_increase + ... return df * (1 - new_rate) + + Instead of writing + + >>> subtract_national_insurance( + ... subtract_state_tax(subtract_federal_tax(df), rate=0.12), + ... rate=0.05, + ... rate_increase=0.02) # doctest: +SKIP + + You can write + + >>> ( + ... df.pipe(subtract_federal_tax) + ... .pipe(subtract_state_tax, rate=0.12) + ... .pipe(subtract_national_insurance, rate=0.05, rate_increase=0.02) + ... ) + Salary Others + 0 5892.48 736.56 + 1 6997.32 NaN + 2 3682.80 1473.12 + + If you have a function that takes the data as (say) the second + argument, pass a tuple indicating which keyword expects the + data. For example, suppose ``national_insurance`` takes its data as ``df`` + in the second argument: + + >>> def subtract_national_insurance(rate, df, rate_increase): + ... new_rate = rate + rate_increase + ... return df * (1 - new_rate) + >>> ( + ... df.pipe(subtract_federal_tax) + ... .pipe(subtract_state_tax, rate=0.12) + ... .pipe( + ... (subtract_national_insurance, 'df'), + ... rate=0.05, + ... rate_increase=0.02 + ... ) + ... ) + Salary Others + 0 5892.48 736.56 + 1 6997.32 NaN + 2 3682.80 1473.12 + """ + if using_copy_on_write(): + return common.pipe(self.copy(deep=None), func, *args, **kwargs) + return common.pipe(self, func, *args, **kwargs) + + # ---------------------------------------------------------------------- + # Attribute access + + @final + def __finalize__(self, other, method: str | None = None, **kwargs) -> Self: + """ + Propagate metadata from other to self. + + Parameters + ---------- + other : the object from which to get the attributes that we are going + to propagate + method : str, optional + A passed method name providing context on where ``__finalize__`` + was called. + + .. warning:: + + The value passed as `method` are not currently considered + stable across pandas releases. + """ + if isinstance(other, NDFrame): + for name in other.attrs: + self.attrs[name] = other.attrs[name] + + self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels + # For subclasses using _metadata. + for name in set(self._metadata) & set(other._metadata): + assert isinstance(name, str) + object.__setattr__(self, name, getattr(other, name, None)) + + if method == "concat": + attrs = other.objs[0].attrs + check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) + if check_attrs: + for name in attrs: + self.attrs[name] = attrs[name] + + allows_duplicate_labels = all( + x.flags.allows_duplicate_labels for x in other.objs + ) + self.flags.allows_duplicate_labels = allows_duplicate_labels + + return self + + @final + def __getattr__(self, name: str): + """ + After regular attribute access, try looking up the name + This allows simpler access to columns for interactive use. + """ + # Note: obj.x will always call obj.__getattribute__('x') prior to + # calling obj.__getattr__('x'). + if ( + name not in self._internal_names_set + and name not in self._metadata + and name not in self._accessors + and self._info_axis._can_hold_identifiers_and_holds_name(name) + ): + return self[name] + return object.__getattribute__(self, name) + + @final + def __setattr__(self, name: str, value) -> None: + """ + After regular attribute access, try setting the name + This allows simpler access to columns for interactive use. + """ + # first try regular attribute access via __getattribute__, so that + # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify + # the same attribute. + + try: + object.__getattribute__(self, name) + return object.__setattr__(self, name, value) + except AttributeError: + pass + + # if this fails, go on to more involved attribute setting + # (note that this matches __getattr__, above). + if name in self._internal_names_set: + object.__setattr__(self, name, value) + elif name in self._metadata: + object.__setattr__(self, name, value) + else: + try: + existing = getattr(self, name) + if isinstance(existing, Index): + object.__setattr__(self, name, value) + elif name in self._info_axis: + self[name] = value + else: + object.__setattr__(self, name, value) + except (AttributeError, TypeError): + if isinstance(self, ABCDataFrame) and (is_list_like(value)): + warnings.warn( + "Pandas doesn't allow columns to be " + "created via a new attribute name - see " + "https://pandas.pydata.org/pandas-docs/" + "stable/indexing.html#attribute-access", + stacklevel=find_stack_level(), + ) + object.__setattr__(self, name, value) + + @final + def _dir_additions(self) -> set[str]: + """ + add the string-like attributes from the info_axis. + If info_axis is a MultiIndex, its first level values are used. + """ + additions = super()._dir_additions() + if self._info_axis._can_hold_strings: + additions.update(self._info_axis._dir_additions_for_owner) + return additions + + # ---------------------------------------------------------------------- + # Consolidation of internals + + @final + def _protect_consolidate(self, f): + """ + Consolidate _mgr -- if the blocks have changed, then clear the + cache + """ + if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): + return f() + blocks_before = len(self._mgr.blocks) + result = f() + if len(self._mgr.blocks) != blocks_before: + self._clear_item_cache() + return result + + @final + def _consolidate_inplace(self) -> None: + """Consolidate data in place and return None""" + + def f() -> None: + self._mgr = self._mgr.consolidate() + + self._protect_consolidate(f) + + @final + def _consolidate(self): + """ + Compute NDFrame with "consolidated" internals (data of each dtype + grouped together in a single ndarray). + + Returns + ------- + consolidated : same type as caller + """ + f = lambda: self._mgr.consolidate() + cons_data = self._protect_consolidate(f) + return self._constructor_from_mgr(cons_data, axes=cons_data.axes).__finalize__( + self + ) + + @final + @property + def _is_mixed_type(self) -> bool_t: + if self._mgr.is_single_block: + # Includes all Series cases + return False + + if self._mgr.any_extension_types: + # Even if they have the same dtype, we can't consolidate them, + # so we pretend this is "mixed'" + return True + + return self.dtypes.nunique() > 1 + + @final + def _get_numeric_data(self) -> Self: + new_mgr = self._mgr.get_numeric_data() + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) + + @final + def _get_bool_data(self): + new_mgr = self._mgr.get_bool_data() + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) + + # ---------------------------------------------------------------------- + # Internal Interface Methods + + @property + def values(self): + raise AbstractMethodError(self) + + @property + def _values(self) -> ArrayLike: + """internal implementation""" + raise AbstractMethodError(self) + + @property + def dtypes(self): + """ + Return the dtypes in the DataFrame. + + This returns a Series with the data type of each column. + The result's index is the original DataFrame's columns. Columns + with mixed types are stored with the ``object`` dtype. See + :ref:`the User Guide ` for more. + + Returns + ------- + pandas.Series + The data type of each column. + + Examples + -------- + >>> df = pd.DataFrame({'float': [1.0], + ... 'int': [1], + ... 'datetime': [pd.Timestamp('20180310')], + ... 'string': ['foo']}) + >>> df.dtypes + float float64 + int int64 + datetime datetime64[ns] + string object + dtype: object + """ + data = self._mgr.get_dtypes() + return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) + + @final + def astype( + self, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" + ) -> Self: + """ + Cast a pandas object to a specified dtype ``dtype``. + + Parameters + ---------- + dtype : str, data type, Series or Mapping of column name -> data type + Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to + cast entire pandas object to the same type. Alternatively, use a + mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is + a numpy.dtype or Python type to cast one or more of the DataFrame's + columns to column-specific types. + copy : bool, default True + Return a copy when ``copy=True`` (be very careful setting + ``copy=False`` as changes to values then may propagate to other + pandas objects). + errors : {'raise', 'ignore'}, default 'raise' + Control raising of exceptions on invalid data for provided dtype. + + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object. + + Returns + ------- + same type as caller + + See Also + -------- + to_datetime : Convert argument to datetime. + to_timedelta : Convert argument to timedelta. + to_numeric : Convert argument to a numeric type. + numpy.ndarray.astype : Cast a numpy array to a specified type. + + Notes + ----- + .. versionchanged:: 2.0.0 + + Using ``astype`` to convert from timezone-naive dtype to + timezone-aware dtype will raise an exception. + Use :meth:`Series.dt.tz_localize` instead. + + Examples + -------- + Create a DataFrame: + + >>> d = {'col1': [1, 2], 'col2': [3, 4]} + >>> df = pd.DataFrame(data=d) + >>> df.dtypes + col1 int64 + col2 int64 + dtype: object + + Cast all columns to int32: + + >>> df.astype('int32').dtypes + col1 int32 + col2 int32 + dtype: object + + Cast col1 to int32 using a dictionary: + + >>> df.astype({'col1': 'int32'}).dtypes + col1 int32 + col2 int64 + dtype: object + + Create a series: + + >>> ser = pd.Series([1, 2], dtype='int32') + >>> ser + 0 1 + 1 2 + dtype: int32 + >>> ser.astype('int64') + 0 1 + 1 2 + dtype: int64 + + Convert to categorical type: + + >>> ser.astype('category') + 0 1 + 1 2 + dtype: category + Categories (2, int32): [1, 2] + + Convert to ordered categorical type with custom ordering: + + >>> from pandas.api.types import CategoricalDtype + >>> cat_dtype = CategoricalDtype( + ... categories=[2, 1], ordered=True) + >>> ser.astype(cat_dtype) + 0 1 + 1 2 + dtype: category + Categories (2, int64): [2 < 1] + + Create a series of dates: + + >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) + >>> ser_date + 0 2020-01-01 + 1 2020-01-02 + 2 2020-01-03 + dtype: datetime64[ns] + """ + if copy and using_copy_on_write(): + copy = False + + if is_dict_like(dtype): + if self.ndim == 1: # i.e. Series + if len(dtype) > 1 or self.name not in dtype: + raise KeyError( + "Only the Series name can be used for " + "the key in Series dtype mappings." + ) + new_type = dtype[self.name] + return self.astype(new_type, copy, errors) + + # GH#44417 cast to Series so we can use .iat below, which will be + # robust in case we + from pandas import Series + + dtype_ser = Series(dtype, dtype=object) + + for col_name in dtype_ser.index: + if col_name not in self: + raise KeyError( + "Only a column name can be used for the " + "key in a dtype mappings argument. " + f"'{col_name}' not found in columns." + ) + + dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) + + results = [] + for i, (col_name, col) in enumerate(self.items()): + cdt = dtype_ser.iat[i] + if isna(cdt): + res_col = col.copy(deep=copy) + else: + try: + res_col = col.astype(dtype=cdt, copy=copy, errors=errors) + except ValueError as ex: + ex.args = ( + f"{ex}: Error while type casting for column '{col_name}'", + ) + raise + results.append(res_col) + + elif is_extension_array_dtype(dtype) and self.ndim > 1: + # TODO(EA2D): special case not needed with 2D EAs + dtype = pandas_dtype(dtype) + if isinstance(dtype, ExtensionDtype) and all( + arr.dtype == dtype for arr in self._mgr.arrays + ): + return self.copy(deep=copy) + # GH 18099/22869: columnwise conversion to extension dtype + # GH 24704: self.items handles duplicate column names + results = [ser.astype(dtype, copy=copy) for _, ser in self.items()] + + else: + # else, only a single dtype is given + new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) + res = self._constructor_from_mgr(new_data, axes=new_data.axes) + return res.__finalize__(self, method="astype") + + # GH 33113: handle empty frame or series + if not results: + return self.copy(deep=None) + + # GH 19920: retain column metadata after concat + result = concat(results, axis=1, copy=False) + # GH#40810 retain subclass + # error: Incompatible types in assignment + # (expression has type "Self", variable has type "DataFrame") + result = self._constructor(result) # type: ignore[assignment] + result.columns = self.columns + result = result.__finalize__(self, method="astype") + # https://github.com/python/mypy/issues/8354 + return cast(Self, result) + + @final + def copy(self, deep: bool_t | None = True) -> Self: + """ + Make a copy of this object's indices and data. + + When ``deep=True`` (default), a new object will be created with a + copy of the calling object's data and indices. Modifications to + the data or indices of the copy will not be reflected in the + original object (see notes below). + + When ``deep=False``, a new object will be created without copying + the calling object's data or index (only references to the data + and index are copied). Any changes to the data of the original + will be reflected in the shallow copy (and vice versa). + + Parameters + ---------- + deep : bool, default True + Make a deep copy, including a copy of the data and the indices. + With ``deep=False`` neither the indices nor the data are copied. + + Returns + ------- + Series or DataFrame + Object type matches caller. + + Notes + ----- + When ``deep=True``, data is copied but actual Python objects + will not be copied recursively, only the reference to the object. + This is in contrast to `copy.deepcopy` in the Standard Library, + which recursively copies object data (see examples below). + + While ``Index`` objects are copied when ``deep=True``, the underlying + numpy array is not copied for performance reasons. Since ``Index`` is + immutable, the underlying data can be safely shared and a copy + is not needed. + + Since pandas is not thread safe, see the + :ref:`gotchas ` when copying in a threading + environment. + + When ``copy_on_write`` in pandas config is set to ``True``, the + ``copy_on_write`` config takes effect even when ``deep=False``. + This means that any changes to the copied data would make a new copy + of the data upon write (and vice versa). Changes made to either the + original or copied variable would not be reflected in the counterpart. + See :ref:`Copy_on_Write ` for more information. + + Examples + -------- + >>> s = pd.Series([1, 2], index=["a", "b"]) + >>> s + a 1 + b 2 + dtype: int64 + + >>> s_copy = s.copy() + >>> s_copy + a 1 + b 2 + dtype: int64 + + **Shallow copy versus default (deep) copy:** + + >>> s = pd.Series([1, 2], index=["a", "b"]) + >>> deep = s.copy() + >>> shallow = s.copy(deep=False) + + Shallow copy shares data and index with original. + + >>> s is shallow + False + >>> s.values is shallow.values and s.index is shallow.index + True + + Deep copy has own copy of data and index. + + >>> s is deep + False + >>> s.values is deep.values or s.index is deep.index + False + + Updates to the data shared by shallow copy and original is reflected + in both; deep copy remains unchanged. + + >>> s.iloc[0] = 3 + >>> shallow.iloc[1] = 4 + >>> s + a 3 + b 4 + dtype: int64 + >>> shallow + a 3 + b 4 + dtype: int64 + >>> deep + a 1 + b 2 + dtype: int64 + + Note that when copying an object containing Python objects, a deep copy + will copy the data, but will not do so recursively. Updating a nested + data object will be reflected in the deep copy. + + >>> s = pd.Series([[1, 2], [3, 4]]) + >>> deep = s.copy() + >>> s[0][0] = 10 + >>> s + 0 [10, 2] + 1 [3, 4] + dtype: object + >>> deep + 0 [10, 2] + 1 [3, 4] + dtype: object + + ** Copy-on-Write is set to true: ** + + >>> with pd.option_context("mode.copy_on_write", True): + ... s = pd.Series([1, 2], index=["a", "b"]) + ... copy = s.copy(deep=False) + ... s.iloc[0] = 100 + ... s + a 100 + b 2 + dtype: int64 + >>> copy + a 1 + b 2 + dtype: int64 + """ + data = self._mgr.copy(deep=deep) + self._clear_item_cache() + return self._constructor_from_mgr(data, axes=data.axes).__finalize__( + self, method="copy" + ) + + @final + def __copy__(self, deep: bool_t = True) -> Self: + return self.copy(deep=deep) + + @final + def __deepcopy__(self, memo=None) -> Self: + """ + Parameters + ---------- + memo, default None + Standard signature. Unused + """ + return self.copy(deep=True) + + @final + def infer_objects(self, copy: bool_t | None = None) -> Self: + """ + Attempt to infer better dtypes for object columns. + + Attempts soft conversion of object-dtyped + columns, leaving non-object and unconvertible + columns unchanged. The inference rules are the + same as during normal Series/DataFrame construction. + + Parameters + ---------- + copy : bool, default True + Whether to make a copy for non-object or non-inferable columns + or Series. + + Returns + ------- + same type as input object + + See Also + -------- + to_datetime : Convert argument to datetime. + to_timedelta : Convert argument to timedelta. + to_numeric : Convert argument to numeric type. + convert_dtypes : Convert argument to best possible dtype. + + Examples + -------- + >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) + >>> df = df.iloc[1:] + >>> df + A + 1 1 + 2 2 + 3 3 + + >>> df.dtypes + A object + dtype: object + + >>> df.infer_objects().dtypes + A int64 + dtype: object + """ + new_mgr = self._mgr.convert(copy=copy) + res = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + return res.__finalize__(self, method="infer_objects") + + @final + def convert_dtypes( + self, + infer_objects: bool_t = True, + convert_string: bool_t = True, + convert_integer: bool_t = True, + convert_boolean: bool_t = True, + convert_floating: bool_t = True, + dtype_backend: DtypeBackend = "numpy_nullable", + ) -> Self: + """ + Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. + + Parameters + ---------- + infer_objects : bool, default True + Whether object dtypes should be converted to the best possible types. + convert_string : bool, default True + Whether object dtypes should be converted to ``StringDtype()``. + convert_integer : bool, default True + Whether, if possible, conversion can be done to integer extension types. + convert_boolean : bool, defaults True + Whether object dtypes should be converted to ``BooleanDtypes()``. + convert_floating : bool, defaults True + Whether, if possible, conversion can be done to floating extension types. + If `convert_integer` is also True, preference will be give to integer + dtypes if the floats can be faithfully casted to integers. + + .. versionadded:: 1.2.0 + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + Series or DataFrame + Copy of input object with new dtype. + + See Also + -------- + infer_objects : Infer dtypes of objects. + to_datetime : Convert argument to datetime. + to_timedelta : Convert argument to timedelta. + to_numeric : Convert argument to a numeric type. + + Notes + ----- + By default, ``convert_dtypes`` will attempt to convert a Series (or each + Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options + ``convert_string``, ``convert_integer``, ``convert_boolean`` and + ``convert_floating``, it is possible to turn off individual conversions + to ``StringDtype``, the integer extension types, ``BooleanDtype`` + or floating extension types, respectively. + + For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference + rules as during normal Series/DataFrame construction. Then, if possible, + convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer + or floating extension type, otherwise leave as ``object``. + + If the dtype is integer, convert to an appropriate integer extension type. + + If the dtype is numeric, and consists of all integers, convert to an + appropriate integer extension type. Otherwise, convert to an + appropriate floating extension type. + + .. versionchanged:: 1.2 + Starting with pandas 1.2, this method also converts float columns + to the nullable floating extension type. + + In the future, as new dtypes are added that support ``pd.NA``, the results + of this method will change to support those new dtypes. + + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), + ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), + ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), + ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), + ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), + ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), + ... } + ... ) + + Start with a DataFrame with default dtypes. + + >>> df + a b c d e f + 0 1 x True h 10.0 NaN + 1 2 y False i NaN 100.5 + 2 3 z NaN NaN 20.0 200.0 + + >>> df.dtypes + a int32 + b object + c object + d object + e float64 + f float64 + dtype: object + + Convert the DataFrame to use best possible dtypes. + + >>> dfn = df.convert_dtypes() + >>> dfn + a b c d e f + 0 1 x True h 10 + 1 2 y False i 100.5 + 2 3 z 20 200.0 + + >>> dfn.dtypes + a Int32 + b string[python] + c boolean + d string[python] + e Int64 + f Float64 + dtype: object + + Start with a Series of strings and missing data represented by ``np.nan``. + + >>> s = pd.Series(["a", "b", np.nan]) + >>> s + 0 a + 1 b + 2 NaN + dtype: object + + Obtain a Series with dtype ``StringDtype``. + + >>> s.convert_dtypes() + 0 a + 1 b + 2 + dtype: string + """ + check_dtype_backend(dtype_backend) + if self.ndim == 1: + return self._convert_dtypes( + infer_objects, + convert_string, + convert_integer, + convert_boolean, + convert_floating, + dtype_backend=dtype_backend, + ) + else: + results = [ + col._convert_dtypes( + infer_objects, + convert_string, + convert_integer, + convert_boolean, + convert_floating, + dtype_backend=dtype_backend, + ) + for col_name, col in self.items() + ] + if len(results) > 0: + result = concat(results, axis=1, copy=False, keys=self.columns) + cons = cast(type["DataFrame"], self._constructor) + result = cons(result) + result = result.__finalize__(self, method="convert_dtypes") + # https://github.com/python/mypy/issues/8354 + return cast(Self, result) + else: + return self.copy(deep=None) + + # ---------------------------------------------------------------------- + # Filling NA's + + def _deprecate_downcast(self, downcast, method_name: str): + # GH#40988 + if downcast is not lib.no_default: + warnings.warn( + f"The 'downcast' keyword in {method_name} is deprecated and " + "will be removed in a future version. Use " + "res.infer_objects(copy=False) to infer non-object dtype, or " + "pd.to_numeric with the 'downcast' keyword to downcast numeric " + "results.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + downcast = None + return downcast + + @final + def _pad_or_backfill( + self, + method: Literal["ffill", "bfill", "pad", "backfill"], + *, + axis: None | Axis = None, + inplace: bool_t = False, + limit: None | int = None, + downcast: dict | None = None, + ): + if axis is None: + axis = 0 + axis = self._get_axis_number(axis) + method = clean_fill_method(method) + + if not self._mgr.is_single_block and axis == 1: + if inplace: + raise NotImplementedError() + result = self.T._pad_or_backfill(method=method, limit=limit).T + + return result + + new_mgr = self._mgr.pad_or_backfill( + method=method, + axis=self._get_block_manager_axis(axis), + limit=limit, + inplace=inplace, + downcast=downcast, + ) + result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="fillna") + + @overload + def fillna( + self, + value: Hashable | Mapping | Series | DataFrame = ..., + *, + method: FillnaOptions | None = ..., + axis: Axis | None = ..., + inplace: Literal[False] = ..., + limit: int | None = ..., + downcast: dict | None = ..., + ) -> Self: + ... + + @overload + def fillna( + self, + value: Hashable | Mapping | Series | DataFrame = ..., + *, + method: FillnaOptions | None = ..., + axis: Axis | None = ..., + inplace: Literal[True], + limit: int | None = ..., + downcast: dict | None = ..., + ) -> None: + ... + + @overload + def fillna( + self, + value: Hashable | Mapping | Series | DataFrame = ..., + *, + method: FillnaOptions | None = ..., + axis: Axis | None = ..., + inplace: bool_t = ..., + limit: int | None = ..., + downcast: dict | None = ..., + ) -> Self | None: + ... + + @final + @doc( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + ) + def fillna( + self, + value: Hashable | Mapping | Series | DataFrame | None = None, + *, + method: FillnaOptions | None = None, + axis: Axis | None = None, + inplace: bool_t = False, + limit: int | None = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Self | None: + """ + Fill NA/NaN values using the specified method. + + Parameters + ---------- + value : scalar, dict, Series, or DataFrame + Value to use to fill holes (e.g. 0), alternately a + dict/Series/DataFrame of values specifying which value to use for + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot + be a list. + method : {{'backfill', 'bfill', 'ffill', None}}, default None + Method to use for filling holes in reindexed Series: + + * ffill: propagate last valid observation forward to next valid. + * backfill / bfill: use next valid observation to fill gap. + + .. deprecated:: 2.1.0 + Use ffill or bfill instead. + + axis : {axes_single_arg} + Axis along which to fill missing values. For `Series` + this parameter is unused and defaults to 0. + inplace : bool, default False + If True, fill in-place. Note: this will modify any + other views on this object (e.g., a no-copy slice for a column in a + DataFrame). + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + Returns + ------- + {klass} or None + Object with missing values filled or None if ``inplace=True``. + + See Also + -------- + ffill : Fill values by propagating the last valid observation to next valid. + bfill : Fill values by using the next valid observation to fill the gap. + interpolate : Fill NaN values using interpolation. + reindex : Conform object to new index. + asfreq : Convert TimeSeries to specified frequency. + + Examples + -------- + >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], + ... [3, 4, np.nan, 1], + ... [np.nan, np.nan, np.nan, np.nan], + ... [np.nan, 3, np.nan, 4]], + ... columns=list("ABCD")) + >>> df + A B C D + 0 NaN 2.0 NaN 0.0 + 1 3.0 4.0 NaN 1.0 + 2 NaN NaN NaN NaN + 3 NaN 3.0 NaN 4.0 + + Replace all NaN elements with 0s. + + >>> df.fillna(0) + A B C D + 0 0.0 2.0 0.0 0.0 + 1 3.0 4.0 0.0 1.0 + 2 0.0 0.0 0.0 0.0 + 3 0.0 3.0 0.0 4.0 + + Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, + 2, and 3 respectively. + + >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} + >>> df.fillna(value=values) + A B C D + 0 0.0 2.0 2.0 0.0 + 1 3.0 4.0 2.0 1.0 + 2 0.0 1.0 2.0 3.0 + 3 0.0 3.0 2.0 4.0 + + Only replace the first NaN element. + + >>> df.fillna(value=values, limit=1) + A B C D + 0 0.0 2.0 2.0 0.0 + 1 3.0 4.0 NaN 1.0 + 2 NaN 1.0 NaN 3.0 + 3 NaN 3.0 NaN 4.0 + + When filling using a DataFrame, replacement happens along + the same column names and same indices + + >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) + >>> df.fillna(df2) + A B C D + 0 0.0 2.0 0.0 0.0 + 1 3.0 4.0 0.0 1.0 + 2 0.0 0.0 0.0 NaN + 3 0.0 3.0 0.0 4.0 + + Note that column D is not affected since it is not present in df2. + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + value, method = validate_fillna_kwargs(value, method) + if method is not None: + warnings.warn( + f"{type(self).__name__}.fillna with 'method' is deprecated and " + "will raise in a future version. Use obj.ffill() or obj.bfill() " + "instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + was_no_default = downcast is lib.no_default + downcast = self._deprecate_downcast(downcast, "fillna") + + # set the default here, so functions examining the signaure + # can detect if something was set (e.g. in groupby) (GH9221) + if axis is None: + axis = 0 + axis = self._get_axis_number(axis) + + if value is None: + return self._pad_or_backfill( + # error: Argument 1 to "_pad_or_backfill" of "NDFrame" has + # incompatible type "Optional[Literal['backfill', 'bfill', 'ffill', + # 'pad']]"; expected "Literal['ffill', 'bfill', 'pad', 'backfill']" + method, # type: ignore[arg-type] + axis=axis, + limit=limit, + inplace=inplace, + # error: Argument "downcast" to "_fillna_with_method" of "NDFrame" + # has incompatible type "Union[Dict[Any, Any], None, + # Literal[_NoDefault.no_default]]"; expected + # "Optional[Dict[Any, Any]]" + downcast=downcast, # type: ignore[arg-type] + ) + else: + if self.ndim == 1: + if isinstance(value, (dict, ABCSeries)): + if not len(value): + # test_fillna_nonscalar + if inplace: + return None + return self.copy(deep=None) + from pandas import Series + + value = Series(value) + value = value.reindex(self.index, copy=False) + value = value._values + elif not is_list_like(value): + pass + else: + raise TypeError( + '"value" parameter must be a scalar, dict ' + "or Series, but you passed a " + f'"{type(value).__name__}"' + ) + + new_data = self._mgr.fillna( + value=value, limit=limit, inplace=inplace, downcast=downcast + ) + + elif isinstance(value, (dict, ABCSeries)): + if axis == 1: + raise NotImplementedError( + "Currently only can fill " + "with dict/Series column " + "by column" + ) + if using_copy_on_write(): + result = self.copy(deep=None) + else: + result = self if inplace else self.copy() + is_dict = isinstance(downcast, dict) + for k, v in value.items(): + if k not in result: + continue + + if was_no_default: + downcast_k = lib.no_default + else: + downcast_k = ( + # error: Incompatible types in assignment (expression + # has type "Union[Dict[Any, Any], None, + # Literal[_NoDefault.no_default], Any]", variable has + # type "_NoDefault") + downcast # type: ignore[assignment] + if not is_dict + # error: Item "None" of "Optional[Dict[Any, Any]]" has + # no attribute "get" + else downcast.get(k) # type: ignore[union-attr] + ) + + res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) + + if not inplace: + result[k] = res_k + else: + # We can write into our existing column(s) iff dtype + # was preserved. + if isinstance(res_k, ABCSeries): + # i.e. 'k' only shows up once in self.columns + if res_k.dtype == result[k].dtype: + result.loc[:, k] = res_k + else: + # Different dtype -> no way to do inplace. + result[k] = res_k + else: + # see test_fillna_dict_inplace_nonunique_columns + locs = result.columns.get_loc(k) + if isinstance(locs, slice): + locs = np.arange(self.shape[1])[locs] + elif ( + isinstance(locs, np.ndarray) and locs.dtype.kind == "b" + ): + locs = locs.nonzero()[0] + elif not ( + isinstance(locs, np.ndarray) and locs.dtype.kind == "i" + ): + # Should never be reached, but let's cover our bases + raise NotImplementedError( + "Unexpected get_loc result, please report a bug at " + "https://github.com/pandas-dev/pandas" + ) + + for i, loc in enumerate(locs): + res_loc = res_k.iloc[:, i] + target = self.iloc[:, loc] + + if res_loc.dtype == target.dtype: + result.iloc[:, loc] = res_loc + else: + result.isetitem(loc, res_loc) + if inplace: + return self._update_inplace(result) + else: + return result + + elif not is_list_like(value): + if axis == 1: + result = self.T.fillna(value=value, limit=limit).T + new_data = result._mgr + else: + new_data = self._mgr.fillna( + value=value, limit=limit, inplace=inplace, downcast=downcast + ) + elif isinstance(value, ABCDataFrame) and self.ndim == 2: + new_data = self.where(self.notna(), value)._mgr + else: + raise ValueError(f"invalid fill value with a {type(value)}") + + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="fillna") + + @overload + def ffill( + self, + *, + axis: None | Axis = ..., + inplace: Literal[False] = ..., + limit: None | int = ..., + downcast: dict | None | lib.NoDefault = ..., + ) -> Self: + ... + + @overload + def ffill( + self, + *, + axis: None | Axis = ..., + inplace: Literal[True], + limit: None | int = ..., + downcast: dict | None | lib.NoDefault = ..., + ) -> None: + ... + + @overload + def ffill( + self, + *, + axis: None | Axis = ..., + inplace: bool_t = ..., + limit: None | int = ..., + downcast: dict | None | lib.NoDefault = ..., + ) -> Self | None: + ... + + @final + @doc( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + ) + def ffill( + self, + *, + axis: None | Axis = None, + inplace: bool_t = False, + limit: None | int = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Self | None: + """ + Fill NA/NaN values by propagating the last valid observation to next valid. + + Parameters + ---------- + axis : {axes_single_arg} + Axis along which to fill missing values. For `Series` + this parameter is unused and defaults to 0. + inplace : bool, default False + If True, fill in-place. Note: this will modify any + other views on this object (e.g., a no-copy slice for a column in a + DataFrame). + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + Returns + ------- + {klass} or None + Object with missing values filled or None if ``inplace=True``. + + Examples + -------- + >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], + ... [3, 4, np.nan, 1], + ... [np.nan, np.nan, np.nan, np.nan], + ... [np.nan, 3, np.nan, 4]], + ... columns=list("ABCD")) + >>> df + A B C D + 0 NaN 2.0 NaN 0.0 + 1 3.0 4.0 NaN 1.0 + 2 NaN NaN NaN NaN + 3 NaN 3.0 NaN 4.0 + + >>> df.ffill() + A B C D + 0 NaN 2.0 NaN 0.0 + 1 3.0 4.0 NaN 1.0 + 2 3.0 4.0 NaN 1.0 + 3 3.0 3.0 NaN 4.0 + + >>> ser = pd.Series([1, np.nan, 2, 3]) + >>> ser.ffill() + 0 1.0 + 1 1.0 + 2 2.0 + 3 3.0 + dtype: float64 + """ + downcast = self._deprecate_downcast(downcast, "ffill") + inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + return self._pad_or_backfill( + "ffill", + axis=axis, + inplace=inplace, + limit=limit, + # error: Argument "downcast" to "_fillna_with_method" of "NDFrame" + # has incompatible type "Union[Dict[Any, Any], None, + # Literal[_NoDefault.no_default]]"; expected "Optional[Dict[Any, Any]]" + downcast=downcast, # type: ignore[arg-type] + ) + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def pad( + self, + *, + axis: None | Axis = None, + inplace: bool_t = False, + limit: None | int = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Self | None: + """ + Fill NA/NaN values by propagating the last valid observation to next valid. + + .. deprecated:: 2.0 + + {klass}.pad is deprecated. Use {klass}.ffill instead. + + Returns + ------- + {klass} or None + Object with missing values filled or None if ``inplace=True``. + + Examples + -------- + Please see examples for :meth:`DataFrame.ffill` or :meth:`Series.ffill`. + """ + warnings.warn( + "DataFrame.pad/Series.pad is deprecated. Use " + "DataFrame.ffill/Series.ffill instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) + + @overload + def bfill( + self, + *, + axis: None | Axis = ..., + inplace: Literal[False] = ..., + limit: None | int = ..., + downcast: dict | None | lib.NoDefault = ..., + ) -> Self: + ... + + @overload + def bfill( + self, + *, + axis: None | Axis = ..., + inplace: Literal[True], + limit: None | int = ..., + downcast: dict | None | lib.NoDefault = ..., + ) -> None: + ... + + @overload + def bfill( + self, + *, + axis: None | Axis = ..., + inplace: bool_t = ..., + limit: None | int = ..., + downcast: dict | None | lib.NoDefault = ..., + ) -> Self | None: + ... + + @final + @doc( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + ) + def bfill( + self, + *, + axis: None | Axis = None, + inplace: bool_t = False, + limit: None | int = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Self | None: + """ + Fill NA/NaN values by using the next valid observation to fill the gap. + + Parameters + ---------- + axis : {axes_single_arg} + Axis along which to fill missing values. For `Series` + this parameter is unused and defaults to 0. + inplace : bool, default False + If True, fill in-place. Note: this will modify any + other views on this object (e.g., a no-copy slice for a column in a + DataFrame). + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + Returns + ------- + {klass} or None + Object with missing values filled or None if ``inplace=True``. + + Examples + -------- + For Series: + + >>> s = pd.Series([1, None, None, 2]) + >>> s.bfill() + 0 1.0 + 1 2.0 + 2 2.0 + 3 2.0 + dtype: float64 + >>> s.bfill(limit=1) + 0 1.0 + 1 NaN + 2 2.0 + 3 2.0 + dtype: float64 + + With DataFrame: + + >>> df = pd.DataFrame({{'A': [1, None, None, 4], 'B': [None, 5, None, 7]}}) + >>> df + A B + 0 1.0 NaN + 1 NaN 5.0 + 2 NaN NaN + 3 4.0 7.0 + >>> df.bfill() + A B + 0 1.0 5.0 + 1 4.0 5.0 + 2 4.0 7.0 + 3 4.0 7.0 + >>> df.bfill(limit=1) + A B + 0 1.0 5.0 + 1 NaN 5.0 + 2 4.0 7.0 + 3 4.0 7.0 + """ + downcast = self._deprecate_downcast(downcast, "bfill") + inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + return self._pad_or_backfill( + "bfill", + axis=axis, + inplace=inplace, + limit=limit, + # error: Argument "downcast" to "_fillna_with_method" of "NDFrame" + # has incompatible type "Union[Dict[Any, Any], None, + # Literal[_NoDefault.no_default]]"; expected "Optional[Dict[Any, Any]]" + downcast=downcast, # type: ignore[arg-type] + ) + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def backfill( + self, + *, + axis: None | Axis = None, + inplace: bool_t = False, + limit: None | int = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Self | None: + """ + Fill NA/NaN values by using the next valid observation to fill the gap. + + .. deprecated:: 2.0 + + {klass}.backfill is deprecated. Use {klass}.bfill instead. + + Returns + ------- + {klass} or None + Object with missing values filled or None if ``inplace=True``. + + Examples + -------- + Please see examples for :meth:`DataFrame.bfill` or :meth:`Series.bfill`. + """ + warnings.warn( + "DataFrame.backfill/Series.backfill is deprecated. Use " + "DataFrame.bfill/Series.bfill instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) + + @overload + def replace( + self, + to_replace=..., + value=..., + *, + inplace: Literal[False] = ..., + limit: int | None = ..., + regex: bool_t = ..., + method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., + ) -> Self: + ... + + @overload + def replace( + self, + to_replace=..., + value=..., + *, + inplace: Literal[True], + limit: int | None = ..., + regex: bool_t = ..., + method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., + ) -> None: + ... + + @overload + def replace( + self, + to_replace=..., + value=..., + *, + inplace: bool_t = ..., + limit: int | None = ..., + regex: bool_t = ..., + method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., + ) -> Self | None: + ... + + @final + @doc( + _shared_docs["replace"], + klass=_shared_doc_kwargs["klass"], + inplace=_shared_doc_kwargs["inplace"], + ) + def replace( + self, + to_replace=None, + value=lib.no_default, + *, + inplace: bool_t = False, + limit: int | None = None, + regex: bool_t = False, + method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, + ) -> Self | None: + if method is not lib.no_default: + warnings.warn( + # GH#33302 + f"The 'method' keyword in {type(self).__name__}.replace is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + elif limit is not None: + warnings.warn( + # GH#33302 + f"The 'limit' keyword in {type(self).__name__}.replace is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if ( + value is lib.no_default + and method is lib.no_default + and not is_dict_like(to_replace) + and regex is False + ): + # case that goes through _replace_single and defaults to method="pad" + warnings.warn( + # GH#33302 + f"{type(self).__name__}.replace without 'value' and with " + "non-dict-like 'to_replace' is deprecated " + "and will raise in a future version. " + "Explicitly specify the new values instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if not ( + is_scalar(to_replace) + or is_re_compilable(to_replace) + or is_list_like(to_replace) + ): + raise TypeError( + "Expecting 'to_replace' to be either a scalar, array-like, " + "dict or None, got invalid type " + f"{repr(type(to_replace).__name__)}" + ) + + inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + if not is_bool(regex) and to_replace is not None: + raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") + + if value is lib.no_default or method is not lib.no_default: + # GH#36984 if the user explicitly passes value=None we want to + # respect that. We have the corner case where the user explicitly + # passes value=None *and* a method, which we interpret as meaning + # they want the (documented) default behavior. + if method is lib.no_default: + # TODO: get this to show up as the default in the docs? + method = "pad" + + # passing a single value that is scalar like + # when value is None (GH5319), for compat + if not is_dict_like(to_replace) and not is_dict_like(regex): + to_replace = [to_replace] + + if isinstance(to_replace, (tuple, list)): + # TODO: Consider copy-on-write for non-replaced columns's here + if isinstance(self, ABCDataFrame): + from pandas import Series + + result = self.apply( + Series._replace_single, + args=(to_replace, method, inplace, limit), + ) + if inplace: + return None + return result + return self._replace_single(to_replace, method, inplace, limit) + + if not is_dict_like(to_replace): + if not is_dict_like(regex): + raise TypeError( + 'If "to_replace" and "value" are both None ' + 'and "to_replace" is not a list, then ' + "regex must be a mapping" + ) + to_replace = regex + regex = True + + items = list(to_replace.items()) + if items: + keys, values = zip(*items) + else: + keys, values = ([], []) + + are_mappings = [is_dict_like(v) for v in values] + + if any(are_mappings): + if not all(are_mappings): + raise TypeError( + "If a nested mapping is passed, all values " + "of the top level mapping must be mappings" + ) + # passed a nested dict/Series + to_rep_dict = {} + value_dict = {} + + for k, v in items: + keys, values = list(zip(*v.items())) or ([], []) + + to_rep_dict[k] = list(keys) + value_dict[k] = list(values) + + to_replace, value = to_rep_dict, value_dict + else: + to_replace, value = keys, values + + return self.replace( + to_replace, value, inplace=inplace, limit=limit, regex=regex + ) + else: + # need a non-zero len on all axes + if not self.size: + if inplace: + return None + return self.copy(deep=None) + + if is_dict_like(to_replace): + if is_dict_like(value): # {'A' : NA} -> {'A' : 0} + # Note: Checking below for `in foo.keys()` instead of + # `in foo` is needed for when we have a Series and not dict + mapping = { + col: (to_replace[col], value[col]) + for col in to_replace.keys() + if col in value.keys() and col in self + } + return self._replace_columnwise(mapping, inplace, regex) + + # {'A': NA} -> 0 + elif not is_list_like(value): + # Operate column-wise + if self.ndim == 1: + raise ValueError( + "Series.replace cannot use dict-like to_replace " + "and non-None value" + ) + mapping = { + col: (to_rep, value) for col, to_rep in to_replace.items() + } + return self._replace_columnwise(mapping, inplace, regex) + else: + raise TypeError("value argument must be scalar, dict, or Series") + + elif is_list_like(to_replace): + if not is_list_like(value): + # e.g. to_replace = [NA, ''] and value is 0, + # so we replace NA with 0 and then replace '' with 0 + value = [value] * len(to_replace) + + # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] + if len(to_replace) != len(value): + raise ValueError( + f"Replacement lists must match in length. " + f"Expecting {len(to_replace)} got {len(value)} " + ) + new_data = self._mgr.replace_list( + src_list=to_replace, + dest_list=value, + inplace=inplace, + regex=regex, + ) + + elif to_replace is None: + if not ( + is_re_compilable(regex) + or is_list_like(regex) + or is_dict_like(regex) + ): + raise TypeError( + f"'regex' must be a string or a compiled regular expression " + f"or a list or dict of strings or regular expressions, " + f"you passed a {repr(type(regex).__name__)}" + ) + return self.replace( + regex, value, inplace=inplace, limit=limit, regex=True + ) + else: + # dest iterable dict-like + if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} + # Operate column-wise + if self.ndim == 1: + raise ValueError( + "Series.replace cannot use dict-value and " + "non-None to_replace" + ) + mapping = {col: (to_replace, val) for col, val in value.items()} + return self._replace_columnwise(mapping, inplace, regex) + + elif not is_list_like(value): # NA -> 0 + regex = should_use_regex(regex, to_replace) + if regex: + new_data = self._mgr.replace_regex( + to_replace=to_replace, + value=value, + inplace=inplace, + ) + else: + new_data = self._mgr.replace( + to_replace=to_replace, value=value, inplace=inplace + ) + else: + raise TypeError( + f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' + ) + + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="replace") + + @final + def interpolate( + self, + method: InterpolateOptions = "linear", + *, + axis: Axis = 0, + limit: int | None = None, + inplace: bool_t = False, + limit_direction: Literal["forward", "backward", "both"] | None = None, + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None | lib.NoDefault = lib.no_default, + **kwargs, + ) -> Self | None: + """ + Fill NaN values using an interpolation method. + + Please note that only ``method='linear'`` is supported for + DataFrame/Series with a MultiIndex. + + Parameters + ---------- + method : str, default 'linear' + Interpolation technique to use. One of: + + * 'linear': Ignore the index and treat the values as equally + spaced. This is the only method supported on MultiIndexes. + * 'time': Works on daily and higher resolution data to interpolate + given length of interval. + * 'index', 'values': use the actual numerical values of the index. + * 'pad': Fill in NaNs using existing values. + * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', + 'barycentric', 'polynomial': Passed to + `scipy.interpolate.interp1d`, whereas 'spline' is passed to + `scipy.interpolate.UnivariateSpline`. These methods use the numerical + values of the index. Both 'polynomial' and 'spline' require that + you also specify an `order` (int), e.g. + ``df.interpolate(method='polynomial', order=5)``. Note that, + `slinear` method in Pandas refers to the Scipy first order `spline` + instead of Pandas first order `spline`. + * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', + 'cubicspline': Wrappers around the SciPy interpolation methods of + similar names. See `Notes`. + * 'from_derivatives': Refers to + `scipy.interpolate.BPoly.from_derivatives`. + + axis : {{0 or 'index', 1 or 'columns', None}}, default None + Axis to interpolate along. For `Series` this parameter is unused + and defaults to 0. + limit : int, optional + Maximum number of consecutive NaNs to fill. Must be greater than + 0. + inplace : bool, default False + Update the data in place if possible. + limit_direction : {{'forward', 'backward', 'both'}}, Optional + Consecutive NaNs will be filled in this direction. + + If limit is specified: + * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. + * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be + 'backwards'. + + If 'limit' is not specified: + * If 'method' is 'backfill' or 'bfill', the default is 'backward' + * else the default is 'forward' + + raises ValueError if `limit_direction` is 'forward' or 'both' and + method is 'backfill' or 'bfill'. + raises ValueError if `limit_direction` is 'backward' or 'both' and + method is 'pad' or 'ffill'. + + limit_area : {{`None`, 'inside', 'outside'}}, default None + If limit is specified, consecutive NaNs will be filled with this + restriction. + + * ``None``: No fill restriction. + * 'inside': Only fill NaNs surrounded by valid values + (interpolate). + * 'outside': Only fill NaNs outside valid values (extrapolate). + + downcast : optional, 'infer' or None, defaults to None + Downcast dtypes if possible. + + .. deprecated:: 2.1.0 + + ``**kwargs`` : optional + Keyword arguments to pass on to the interpolating function. + + Returns + ------- + Series or DataFrame or None + Returns the same object type as the caller, interpolated at + some or all ``NaN`` values or None if ``inplace=True``. + + See Also + -------- + fillna : Fill missing values using different methods. + scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials + (Akima interpolator). + scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the + Bernstein basis. + scipy.interpolate.interp1d : Interpolate a 1-D function. + scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh + interpolator). + scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic + interpolation. + scipy.interpolate.CubicSpline : Cubic spline data interpolator. + + Notes + ----- + The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' + methods are wrappers around the respective SciPy implementations of + similar names. These use the actual numerical values of the index. + For more information on their behavior, see the + `SciPy documentation + `__. + + Examples + -------- + Filling in ``NaN`` in a :class:`~pandas.Series` via linear + interpolation. + + >>> s = pd.Series([0, 1, np.nan, 3]) + >>> s + 0 0.0 + 1 1.0 + 2 NaN + 3 3.0 + dtype: float64 + >>> s.interpolate() + 0 0.0 + 1 1.0 + 2 2.0 + 3 3.0 + dtype: float64 + + Filling in ``NaN`` in a Series via polynomial interpolation or splines: + Both 'polynomial' and 'spline' methods require that you also specify + an ``order`` (int). + + >>> s = pd.Series([0, 2, np.nan, 8]) + >>> s.interpolate(method='polynomial', order=2) + 0 0.000000 + 1 2.000000 + 2 4.666667 + 3 8.000000 + dtype: float64 + + Fill the DataFrame forward (that is, going down) along each column + using linear interpolation. + + Note how the last entry in column 'a' is interpolated differently, + because there is no entry after it to use for interpolation. + Note how the first entry in column 'b' remains ``NaN``, because there + is no entry before it to use for interpolation. + + >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), + ... (np.nan, 2.0, np.nan, np.nan), + ... (2.0, 3.0, np.nan, 9.0), + ... (np.nan, 4.0, -4.0, 16.0)], + ... columns=list('abcd')) + >>> df + a b c d + 0 0.0 NaN -1.0 1.0 + 1 NaN 2.0 NaN NaN + 2 2.0 3.0 NaN 9.0 + 3 NaN 4.0 -4.0 16.0 + >>> df.interpolate(method='linear', limit_direction='forward', axis=0) + a b c d + 0 0.0 NaN -1.0 1.0 + 1 1.0 2.0 -2.0 5.0 + 2 2.0 3.0 -3.0 9.0 + 3 2.0 4.0 -4.0 16.0 + + Using polynomial interpolation. + + >>> df['d'].interpolate(method='polynomial', order=2) + 0 1.0 + 1 4.0 + 2 9.0 + 3 16.0 + Name: d, dtype: float64 + """ + if downcast is not lib.no_default: + # GH#40988 + warnings.warn( + f"The 'downcast' keyword in {type(self).__name__}.interpolate " + "is deprecated and will be removed in a future version. " + "Call result.infer_objects(copy=False) on the result instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + downcast = None + if downcast is not None and downcast != "infer": + raise ValueError("downcast must be either None or 'infer'") + + inplace = validate_bool_kwarg(inplace, "inplace") + + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + axis = self._get_axis_number(axis) + + if self.empty: + if inplace: + return None + return self.copy() + + if not isinstance(method, str): + raise ValueError("'method' should be a string, not None.") + + fillna_methods = ["ffill", "bfill", "pad", "backfill"] + if method.lower() in fillna_methods: + # GH#53581 + warnings.warn( + f"{type(self).__name__}.interpolate with method={method} is " + "deprecated and will raise in a future version. " + "Use obj.ffill() or obj.bfill() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + obj, should_transpose = self, False + else: + obj, should_transpose = (self.T, True) if axis == 1 else (self, False) + if np.any(obj.dtypes == object): + # GH#53631 + if not (obj.ndim == 2 and np.all(obj.dtypes == object)): + # don't warn in cases that already raise + warnings.warn( + f"{type(self).__name__}.interpolate with object dtype is " + "deprecated and will raise in a future version. Call " + "obj.infer_objects(copy=False) before interpolating instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if method in fillna_methods and "fill_value" in kwargs: + raise ValueError( + "'fill_value' is not a valid keyword for " + f"{type(self).__name__}.interpolate with method from " + f"{fillna_methods}" + ) + + if isinstance(obj.index, MultiIndex) and method != "linear": + raise ValueError( + "Only `method=linear` interpolation is supported on MultiIndexes." + ) + + limit_direction = missing.infer_limit_direction(limit_direction, method) + + if obj.ndim == 2 and np.all(obj.dtypes == object): + raise TypeError( + "Cannot interpolate with all object-dtype columns " + "in the DataFrame. Try setting at least one " + "column to a numeric dtype." + ) + + if method.lower() in fillna_methods: + # TODO(3.0): remove this case + # TODO: warn/raise on limit_direction or kwargs which are ignored? + # as of 2023-06-26 no tests get here with either + if not self._mgr.is_single_block and axis == 1: + # GH#53898 + if inplace: + raise NotImplementedError() + obj, axis, should_transpose = self.T, 1 - axis, True + + new_data = obj._mgr.pad_or_backfill( + method=method, + axis=self._get_block_manager_axis(axis), + limit=limit, + limit_area=limit_area, + inplace=inplace, + downcast=downcast, + ) + else: + index = missing.get_interp_index(method, obj.index) + new_data = obj._mgr.interpolate( + method=method, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + inplace=inplace, + downcast=downcast, + **kwargs, + ) + + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + if should_transpose: + result = result.T + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="interpolate") + + # ---------------------------------------------------------------------- + # Timeseries methods Methods + + @final + def asof(self, where, subset=None): + """ + Return the last row(s) without any NaNs before `where`. + + The last row (for each element in `where`, if list) without any + NaN is taken. + In case of a :class:`~pandas.DataFrame`, the last row without NaN + considering only the subset of columns (if not `None`) + + If there is no good value, NaN is returned for a Series or + a Series of NaN values for a DataFrame + + Parameters + ---------- + where : date or array-like of dates + Date(s) before which the last row(s) are returned. + subset : str or array-like of str, default `None` + For DataFrame, if not `None`, only use these columns to + check for NaNs. + + Returns + ------- + scalar, Series, or DataFrame + + The return can be: + + * scalar : when `self` is a Series and `where` is a scalar + * Series: when `self` is a Series and `where` is an array-like, + or when `self` is a DataFrame and `where` is a scalar + * DataFrame : when `self` is a DataFrame and `where` is an + array-like + + Return scalar, Series, or DataFrame. + + See Also + -------- + merge_asof : Perform an asof merge. Similar to left join. + + Notes + ----- + Dates are assumed to be sorted. Raises if this is not the case. + + Examples + -------- + A Series and a scalar `where`. + + >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) + >>> s + 10 1.0 + 20 2.0 + 30 NaN + 40 4.0 + dtype: float64 + + >>> s.asof(20) + 2.0 + + For a sequence `where`, a Series is returned. The first value is + NaN, because the first element of `where` is before the first + index value. + + >>> s.asof([5, 20]) + 5 NaN + 20 2.0 + dtype: float64 + + Missing values are not considered. The following is ``2.0``, not + NaN, even though NaN is at the index location for ``30``. + + >>> s.asof(30) + 2.0 + + Take all columns into consideration + + >>> df = pd.DataFrame({'a': [10., 20., 30., 40., 50.], + ... 'b': [None, None, None, None, 500]}, + ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', + ... '2018-02-27 09:02:00', + ... '2018-02-27 09:03:00', + ... '2018-02-27 09:04:00', + ... '2018-02-27 09:05:00'])) + >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', + ... '2018-02-27 09:04:30'])) + a b + 2018-02-27 09:03:30 NaN NaN + 2018-02-27 09:04:30 NaN NaN + + Take a single column into consideration + + >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', + ... '2018-02-27 09:04:30']), + ... subset=['a']) + a b + 2018-02-27 09:03:30 30.0 NaN + 2018-02-27 09:04:30 40.0 NaN + """ + if isinstance(where, str): + where = Timestamp(where) + + if not self.index.is_monotonic_increasing: + raise ValueError("asof requires a sorted index") + + is_series = isinstance(self, ABCSeries) + if is_series: + if subset is not None: + raise ValueError("subset is not valid for Series") + else: + if subset is None: + subset = self.columns + if not is_list_like(subset): + subset = [subset] + + is_list = is_list_like(where) + if not is_list: + start = self.index[0] + if isinstance(self.index, PeriodIndex): + where = Period(where, freq=self.index.freq) + + if where < start: + if not is_series: + return self._constructor_sliced( + index=self.columns, name=where, dtype=np.float64 + ) + return np.nan + + # It's always much faster to use a *while* loop here for + # Series than pre-computing all the NAs. However a + # *while* loop is extremely expensive for DataFrame + # so we later pre-compute all the NAs and use the same + # code path whether *where* is a scalar or list. + # See PR: https://github.com/pandas-dev/pandas/pull/14476 + if is_series: + loc = self.index.searchsorted(where, side="right") + if loc > 0: + loc -= 1 + + values = self._values + while loc > 0 and isna(values[loc]): + loc -= 1 + return values[loc] + + if not isinstance(where, Index): + where = Index(where) if is_list else Index([where]) + + nulls = self.isna() if is_series else self[subset].isna().any(axis=1) + if nulls.all(): + if is_series: + self = cast("Series", self) + return self._constructor(np.nan, index=where, name=self.name) + elif is_list: + self = cast("DataFrame", self) + return self._constructor(np.nan, index=where, columns=self.columns) + else: + self = cast("DataFrame", self) + return self._constructor_sliced( + np.nan, index=self.columns, name=where[0] + ) + + locs = self.index.asof_locs(where, ~(nulls._values)) + + # mask the missing + mask = locs == -1 + data = self.take(locs) + data.index = where + if mask.any(): + # GH#16063 only do this setting when necessary, otherwise + # we'd cast e.g. bools to floats + data.loc[mask] = np.nan + return data if is_list else data.iloc[-1] + + # ---------------------------------------------------------------------- + # Action Methods + + @doc(klass=_shared_doc_kwargs["klass"]) + def isna(self) -> Self: + """ + Detect missing values. + + Return a boolean same-sized object indicating if the values are NA. + NA values, such as None or :attr:`numpy.NaN`, gets mapped to True + values. + Everything else gets mapped to False values. Characters such as empty + strings ``''`` or :attr:`numpy.inf` are not considered NA values + (unless you set ``pandas.options.mode.use_inf_as_na = True``). + + Returns + ------- + {klass} + Mask of bool values for each element in {klass} that + indicates whether an element is an NA value. + + See Also + -------- + {klass}.isnull : Alias of isna. + {klass}.notna : Boolean inverse of isna. + {klass}.dropna : Omit axes labels with missing values. + isna : Top-level isna. + + Examples + -------- + Show which entries in a DataFrame are NA. + + >>> df = pd.DataFrame(dict(age=[5, 6, np.nan], + ... born=[pd.NaT, pd.Timestamp('1939-05-27'), + ... pd.Timestamp('1940-04-25')], + ... name=['Alfred', 'Batman', ''], + ... toy=[None, 'Batmobile', 'Joker'])) + >>> df + age born name toy + 0 5.0 NaT Alfred None + 1 6.0 1939-05-27 Batman Batmobile + 2 NaN 1940-04-25 Joker + + >>> df.isna() + age born name toy + 0 False True False True + 1 False False False False + 2 True False False False + + Show which entries in a Series are NA. + + >>> ser = pd.Series([5, 6, np.nan]) + >>> ser + 0 5.0 + 1 6.0 + 2 NaN + dtype: float64 + + >>> ser.isna() + 0 False + 1 False + 2 True + dtype: bool + """ + return isna(self).__finalize__(self, method="isna") + + @doc(isna, klass=_shared_doc_kwargs["klass"]) + def isnull(self) -> Self: + return isna(self).__finalize__(self, method="isnull") + + @doc(klass=_shared_doc_kwargs["klass"]) + def notna(self) -> Self: + """ + Detect existing (non-missing) values. + + Return a boolean same-sized object indicating if the values are not NA. + Non-missing values get mapped to True. Characters such as empty + strings ``''`` or :attr:`numpy.inf` are not considered NA values + (unless you set ``pandas.options.mode.use_inf_as_na = True``). + NA values, such as None or :attr:`numpy.NaN`, get mapped to False + values. + + Returns + ------- + {klass} + Mask of bool values for each element in {klass} that + indicates whether an element is not an NA value. + + See Also + -------- + {klass}.notnull : Alias of notna. + {klass}.isna : Boolean inverse of notna. + {klass}.dropna : Omit axes labels with missing values. + notna : Top-level notna. + + Examples + -------- + Show which entries in a DataFrame are not NA. + + >>> df = pd.DataFrame(dict(age=[5, 6, np.nan], + ... born=[pd.NaT, pd.Timestamp('1939-05-27'), + ... pd.Timestamp('1940-04-25')], + ... name=['Alfred', 'Batman', ''], + ... toy=[None, 'Batmobile', 'Joker'])) + >>> df + age born name toy + 0 5.0 NaT Alfred None + 1 6.0 1939-05-27 Batman Batmobile + 2 NaN 1940-04-25 Joker + + >>> df.notna() + age born name toy + 0 True False True False + 1 True True True True + 2 False True True True + + Show which entries in a Series are not NA. + + >>> ser = pd.Series([5, 6, np.nan]) + >>> ser + 0 5.0 + 1 6.0 + 2 NaN + dtype: float64 + + >>> ser.notna() + 0 True + 1 True + 2 False + dtype: bool + """ + return notna(self).__finalize__(self, method="notna") + + @doc(notna, klass=_shared_doc_kwargs["klass"]) + def notnull(self) -> Self: + return notna(self).__finalize__(self, method="notnull") + + @final + def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): + if (lower is not None and np.any(isna(lower))) or ( + upper is not None and np.any(isna(upper)) + ): + raise ValueError("Cannot use an NA value as a clip threshold") + + result = self + mask = self.isna() + + if lower is not None: + cond = mask | (self >= lower) + result = result.where( + cond, lower, inplace=inplace + ) # type: ignore[assignment] + if upper is not None: + cond = mask | (self <= upper) + result = self if inplace else result + result = result.where( + cond, upper, inplace=inplace + ) # type: ignore[assignment] + + return result + + @final + def _clip_with_one_bound(self, threshold, method, axis, inplace): + if axis is not None: + axis = self._get_axis_number(axis) + + # method is self.le for upper bound and self.ge for lower bound + if is_scalar(threshold) and is_number(threshold): + if method.__name__ == "le": + return self._clip_with_scalar(None, threshold, inplace=inplace) + return self._clip_with_scalar(threshold, None, inplace=inplace) + + # GH #15390 + # In order for where method to work, the threshold must + # be transformed to NDFrame from other array like structure. + if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): + if isinstance(self, ABCSeries): + threshold = self._constructor(threshold, index=self.index) + else: + threshold = self._align_for_op(threshold, axis, flex=None)[1] + + # GH 40420 + # Treat missing thresholds as no bounds, not clipping the values + if is_list_like(threshold): + fill_value = np.inf if method.__name__ == "le" else -np.inf + threshold_inf = threshold.fillna(fill_value) + else: + threshold_inf = threshold + + subset = method(threshold_inf, axis=axis) | isna(self) + + # GH 40420 + return self.where(subset, threshold, axis=axis, inplace=inplace) + + @final + def clip( + self, + lower=None, + upper=None, + *, + axis: Axis | None = None, + inplace: bool_t = False, + **kwargs, + ) -> Self | None: + """ + Trim values at input threshold(s). + + Assigns values outside boundary to boundary values. Thresholds + can be singular values or array like, and in the latter case + the clipping is performed element-wise in the specified axis. + + Parameters + ---------- + lower : float or array-like, default None + Minimum threshold value. All values below this + threshold will be set to it. A missing + threshold (e.g `NA`) will not clip the value. + upper : float or array-like, default None + Maximum threshold value. All values above this + threshold will be set to it. A missing + threshold (e.g `NA`) will not clip the value. + axis : {{0 or 'index', 1 or 'columns', None}}, default None + Align object with lower and upper along the given axis. + For `Series` this parameter is unused and defaults to `None`. + inplace : bool, default False + Whether to perform the operation in place on the data. + *args, **kwargs + Additional keywords have no effect but might be accepted + for compatibility with numpy. + + Returns + ------- + Series or DataFrame or None + Same type as calling object with the values outside the + clip boundaries replaced or None if ``inplace=True``. + + See Also + -------- + Series.clip : Trim values at input threshold in series. + DataFrame.clip : Trim values at input threshold in dataframe. + numpy.clip : Clip (limit) the values in an array. + + Examples + -------- + >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} + >>> df = pd.DataFrame(data) + >>> df + col_0 col_1 + 0 9 -2 + 1 -3 -7 + 2 0 6 + 3 -1 8 + 4 5 -5 + + Clips per column using lower and upper thresholds: + + >>> df.clip(-4, 6) + col_0 col_1 + 0 6 -2 + 1 -3 -4 + 2 0 6 + 3 -1 6 + 4 5 -4 + + Clips using specific lower and upper thresholds per column element: + + >>> t = pd.Series([2, -4, -1, 6, 3]) + >>> t + 0 2 + 1 -4 + 2 -1 + 3 6 + 4 3 + dtype: int64 + + >>> df.clip(t, t + 4, axis=0) + col_0 col_1 + 0 6 2 + 1 -3 -4 + 2 0 3 + 3 6 8 + 4 5 3 + + Clips using specific lower threshold per column element, with missing values: + + >>> t = pd.Series([2, -4, np.nan, 6, 3]) + >>> t + 0 2.0 + 1 -4.0 + 2 NaN + 3 6.0 + 4 3.0 + dtype: float64 + + >>> df.clip(t, axis=0) + col_0 col_1 + 0 9 2 + 1 -3 -4 + 2 0 6 + 3 6 8 + 4 5 3 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + axis = nv.validate_clip_with_axis(axis, (), kwargs) + if axis is not None: + axis = self._get_axis_number(axis) + + # GH 17276 + # numpy doesn't like NaN as a clip value + # so ignore + # GH 19992 + # numpy doesn't drop a list-like bound containing NaN + isna_lower = isna(lower) + if not is_list_like(lower): + if np.any(isna_lower): + lower = None + elif np.all(isna_lower): + lower = None + isna_upper = isna(upper) + if not is_list_like(upper): + if np.any(isna_upper): + upper = None + elif np.all(isna_upper): + upper = None + + # GH 2747 (arguments were reversed) + if ( + lower is not None + and upper is not None + and is_scalar(lower) + and is_scalar(upper) + ): + lower, upper = min(lower, upper), max(lower, upper) + + # fast-path for scalars + if (lower is None or is_number(lower)) and (upper is None or is_number(upper)): + return self._clip_with_scalar(lower, upper, inplace=inplace) + + result = self + if lower is not None: + result = result._clip_with_one_bound( + lower, method=self.ge, axis=axis, inplace=inplace + ) + if upper is not None: + if inplace: + result = self + result = result._clip_with_one_bound( + upper, method=self.le, axis=axis, inplace=inplace + ) + + return result + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def asfreq( + self, + freq: Frequency, + method: FillnaOptions | None = None, + how: Literal["start", "end"] | None = None, + normalize: bool_t = False, + fill_value: Hashable | None = None, + ) -> Self: + """ + Convert time series to specified frequency. + + Returns the original data conformed to a new index with the specified + frequency. + + If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index + is the result of transforming the original index with + :meth:`PeriodIndex.asfreq ` (so the original index + will map one-to-one to the new index). + + Otherwise, the new index will be equivalent to ``pd.date_range(start, end, + freq=freq)`` where ``start`` and ``end`` are, respectively, the first and + last entries in the original index (see :func:`pandas.date_range`). The + values corresponding to any timesteps in the new index which were not present + in the original index will be null (``NaN``), unless a method for filling + such unknowns is provided (see the ``method`` parameter below). + + The :meth:`resample` method is more appropriate if an operation on each group of + timesteps (such as an aggregate) is necessary to represent the data at the new + frequency. + + Parameters + ---------- + freq : DateOffset or str + Frequency DateOffset or string. + method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None + Method to use for filling holes in reindexed Series (note this + does not fill NaNs that already were present): + + * 'pad' / 'ffill': propagate last valid observation forward to next + valid + * 'backfill' / 'bfill': use NEXT valid observation to fill. + how : {{'start', 'end'}}, default end + For PeriodIndex only (see PeriodIndex.asfreq). + normalize : bool, default False + Whether to reset output index to midnight. + fill_value : scalar, optional + Value to use for missing values, applied during upsampling (note + this does not fill NaNs that already were present). + + Returns + ------- + {klass} + {klass} object reindexed to the specified frequency. + + See Also + -------- + reindex : Conform DataFrame to new index with optional filling logic. + + Notes + ----- + To learn more about the frequency strings, please see `this link + `__. + + Examples + -------- + Start by creating a series with 4 one minute timestamps. + + >>> index = pd.date_range('1/1/2000', periods=4, freq='T') + >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) + >>> df = pd.DataFrame({{'s': series}}) + >>> df + s + 2000-01-01 00:00:00 0.0 + 2000-01-01 00:01:00 NaN + 2000-01-01 00:02:00 2.0 + 2000-01-01 00:03:00 3.0 + + Upsample the series into 30 second bins. + + >>> df.asfreq(freq='30S') + s + 2000-01-01 00:00:00 0.0 + 2000-01-01 00:00:30 NaN + 2000-01-01 00:01:00 NaN + 2000-01-01 00:01:30 NaN + 2000-01-01 00:02:00 2.0 + 2000-01-01 00:02:30 NaN + 2000-01-01 00:03:00 3.0 + + Upsample again, providing a ``fill value``. + + >>> df.asfreq(freq='30S', fill_value=9.0) + s + 2000-01-01 00:00:00 0.0 + 2000-01-01 00:00:30 9.0 + 2000-01-01 00:01:00 NaN + 2000-01-01 00:01:30 9.0 + 2000-01-01 00:02:00 2.0 + 2000-01-01 00:02:30 9.0 + 2000-01-01 00:03:00 3.0 + + Upsample again, providing a ``method``. + + >>> df.asfreq(freq='30S', method='bfill') + s + 2000-01-01 00:00:00 0.0 + 2000-01-01 00:00:30 NaN + 2000-01-01 00:01:00 NaN + 2000-01-01 00:01:30 2.0 + 2000-01-01 00:02:00 2.0 + 2000-01-01 00:02:30 3.0 + 2000-01-01 00:03:00 3.0 + """ + from pandas.core.resample import asfreq + + return asfreq( + self, + freq, + method=method, + how=how, + normalize=normalize, + fill_value=fill_value, + ) + + @final + def at_time(self, time, asof: bool_t = False, axis: Axis | None = None) -> Self: + """ + Select values at particular time of day (e.g., 9:30AM). + + Parameters + ---------- + time : datetime.time or str + The values to select. + axis : {0 or 'index', 1 or 'columns'}, default 0 + For `Series` this parameter is unused and defaults to 0. + + Returns + ------- + Series or DataFrame + + Raises + ------ + TypeError + If the index is not a :class:`DatetimeIndex` + + See Also + -------- + between_time : Select values between particular times of the day. + first : Select initial periods of time series based on a date offset. + last : Select final periods of time series based on a date offset. + DatetimeIndex.indexer_at_time : Get just the index locations for + values at particular time of the day. + + Examples + -------- + >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') + >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) + >>> ts + A + 2018-04-09 00:00:00 1 + 2018-04-09 12:00:00 2 + 2018-04-10 00:00:00 3 + 2018-04-10 12:00:00 4 + + >>> ts.at_time('12:00') + A + 2018-04-09 12:00:00 2 + 2018-04-10 12:00:00 4 + """ + if axis is None: + axis = 0 + axis = self._get_axis_number(axis) + + index = self._get_axis(axis) + + if not isinstance(index, DatetimeIndex): + raise TypeError("Index must be DatetimeIndex") + + indexer = index.indexer_at_time(time, asof=asof) + return self._take_with_is_copy(indexer, axis=axis) + + @final + def between_time( + self, + start_time, + end_time, + inclusive: IntervalClosedType = "both", + axis: Axis | None = None, + ) -> Self: + """ + Select values between particular times of the day (e.g., 9:00-9:30 AM). + + By setting ``start_time`` to be later than ``end_time``, + you can get the times that are *not* between the two times. + + Parameters + ---------- + start_time : datetime.time or str + Initial time as a time filter limit. + end_time : datetime.time or str + End time as a time filter limit. + inclusive : {"both", "neither", "left", "right"}, default "both" + Include boundaries; whether to set each bound as closed or open. + axis : {0 or 'index', 1 or 'columns'}, default 0 + Determine range time on index or columns value. + For `Series` this parameter is unused and defaults to 0. + + Returns + ------- + Series or DataFrame + Data from the original object filtered to the specified dates range. + + Raises + ------ + TypeError + If the index is not a :class:`DatetimeIndex` + + See Also + -------- + at_time : Select values at a particular time of the day. + first : Select initial periods of time series based on a date offset. + last : Select final periods of time series based on a date offset. + DatetimeIndex.indexer_between_time : Get just the index locations for + values between particular times of the day. + + Examples + -------- + >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') + >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) + >>> ts + A + 2018-04-09 00:00:00 1 + 2018-04-10 00:20:00 2 + 2018-04-11 00:40:00 3 + 2018-04-12 01:00:00 4 + + >>> ts.between_time('0:15', '0:45') + A + 2018-04-10 00:20:00 2 + 2018-04-11 00:40:00 3 + + You get the times that are *not* between two times by setting + ``start_time`` later than ``end_time``: + + >>> ts.between_time('0:45', '0:15') + A + 2018-04-09 00:00:00 1 + 2018-04-12 01:00:00 4 + """ + if axis is None: + axis = 0 + axis = self._get_axis_number(axis) + + index = self._get_axis(axis) + if not isinstance(index, DatetimeIndex): + raise TypeError("Index must be DatetimeIndex") + + left_inclusive, right_inclusive = validate_inclusive(inclusive) + indexer = index.indexer_between_time( + start_time, + end_time, + include_start=left_inclusive, + include_end=right_inclusive, + ) + return self._take_with_is_copy(indexer, axis=axis) + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def resample( + self, + rule, + axis: Axis | lib.NoDefault = lib.no_default, + closed: Literal["right", "left"] | None = None, + label: Literal["right", "left"] | None = None, + convention: Literal["start", "end", "s", "e"] = "start", + kind: Literal["timestamp", "period"] | None = None, + on: Level | None = None, + level: Level | None = None, + origin: str | TimestampConvertibleTypes = "start_day", + offset: TimedeltaConvertibleTypes | None = None, + group_keys: bool_t = False, + ) -> Resampler: + """ + Resample time-series data. + + Convenience method for frequency conversion and resampling of time series. + The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, + or `TimedeltaIndex`), or the caller must pass the label of a datetime-like + series/index to the ``on``/``level`` keyword parameter. + + Parameters + ---------- + rule : DateOffset, Timedelta or str + The offset string or object representing target conversion. + axis : {{0 or 'index', 1 or 'columns'}}, default 0 + Which axis to use for up- or down-sampling. For `Series` this parameter + is unused and defaults to 0. Must be + `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. + + .. deprecated:: 2.0.0 + Use frame.T.resample(...) instead. + closed : {{'right', 'left'}}, default None + Which side of bin interval is closed. The default is 'left' + for all frequency offsets except for 'M', 'A', 'Q', 'BM', + 'BA', 'BQ', and 'W' which all have a default of 'right'. + label : {{'right', 'left'}}, default None + Which bin edge label to label bucket with. The default is 'left' + for all frequency offsets except for 'M', 'A', 'Q', 'BM', + 'BA', 'BQ', and 'W' which all have a default of 'right'. + convention : {{'start', 'end', 's', 'e'}}, default 'start' + For `PeriodIndex` only, controls whether to use the start or + end of `rule`. + kind : {{'timestamp', 'period'}}, optional, default None + Pass 'timestamp' to convert the resulting index to a + `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. + By default the input representation is retained. + + on : str, optional + For a DataFrame, column to use instead of index for resampling. + Column must be datetime-like. + level : str or int, optional + For a MultiIndex, level (name or number) to use for + resampling. `level` must be datetime-like. + origin : Timestamp or str, default 'start_day' + The timestamp on which to adjust the grouping. The timezone of origin + must match the timezone of the index. + If string, must be one of the following: + + - 'epoch': `origin` is 1970-01-01 + - 'start': `origin` is the first value of the timeseries + - 'start_day': `origin` is the first day at midnight of the timeseries + + - 'end': `origin` is the last value of the timeseries + - 'end_day': `origin` is the ceiling midnight of the last day + + .. versionadded:: 1.3.0 + + .. note:: + + Only takes effect for Tick-frequencies (i.e. fixed frequencies like + days, hours, and minutes, rather than months or quarters). + offset : Timedelta or str, default is None + An offset timedelta added to the origin. + + group_keys : bool, default False + Whether to include the group keys in the result index when using + ``.apply()`` on the resampled object. + + .. versionadded:: 1.5.0 + + Not specifying ``group_keys`` will retain values-dependent behavior + from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes + ` for examples). + + .. versionchanged:: 2.0.0 + + ``group_keys`` now defaults to ``False``. + + Returns + ------- + pandas.api.typing.Resampler + :class:`~pandas.core.Resampler` object. + + See Also + -------- + Series.resample : Resample a Series. + DataFrame.resample : Resample a DataFrame. + groupby : Group {klass} by mapping, function, label, or list of labels. + asfreq : Reindex a {klass} with the given frequency without grouping. + + Notes + ----- + See the `user guide + `__ + for more. + + To learn more about the offset strings, please see `this link + `__. + + Examples + -------- + Start by creating a series with 9 one minute timestamps. + + >>> index = pd.date_range('1/1/2000', periods=9, freq='T') + >>> series = pd.Series(range(9), index=index) + >>> series + 2000-01-01 00:00:00 0 + 2000-01-01 00:01:00 1 + 2000-01-01 00:02:00 2 + 2000-01-01 00:03:00 3 + 2000-01-01 00:04:00 4 + 2000-01-01 00:05:00 5 + 2000-01-01 00:06:00 6 + 2000-01-01 00:07:00 7 + 2000-01-01 00:08:00 8 + Freq: T, dtype: int64 + + Downsample the series into 3 minute bins and sum the values + of the timestamps falling into a bin. + + >>> series.resample('3T').sum() + 2000-01-01 00:00:00 3 + 2000-01-01 00:03:00 12 + 2000-01-01 00:06:00 21 + Freq: 3T, dtype: int64 + + Downsample the series into 3 minute bins as above, but label each + bin using the right edge instead of the left. Please note that the + value in the bucket used as the label is not included in the bucket, + which it labels. For example, in the original series the + bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed + value in the resampled bucket with the label ``2000-01-01 00:03:00`` + does not include 3 (if it did, the summed value would be 6, not 3). + To include this value close the right side of the bin interval as + illustrated in the example below this one. + + >>> series.resample('3T', label='right').sum() + 2000-01-01 00:03:00 3 + 2000-01-01 00:06:00 12 + 2000-01-01 00:09:00 21 + Freq: 3T, dtype: int64 + + Downsample the series into 3 minute bins as above, but close the right + side of the bin interval. + + >>> series.resample('3T', label='right', closed='right').sum() + 2000-01-01 00:00:00 0 + 2000-01-01 00:03:00 6 + 2000-01-01 00:06:00 15 + 2000-01-01 00:09:00 15 + Freq: 3T, dtype: int64 + + Upsample the series into 30 second bins. + + >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows + 2000-01-01 00:00:00 0.0 + 2000-01-01 00:00:30 NaN + 2000-01-01 00:01:00 1.0 + 2000-01-01 00:01:30 NaN + 2000-01-01 00:02:00 2.0 + Freq: 30S, dtype: float64 + + Upsample the series into 30 second bins and fill the ``NaN`` + values using the ``ffill`` method. + + >>> series.resample('30S').ffill()[0:5] + 2000-01-01 00:00:00 0 + 2000-01-01 00:00:30 0 + 2000-01-01 00:01:00 1 + 2000-01-01 00:01:30 1 + 2000-01-01 00:02:00 2 + Freq: 30S, dtype: int64 + + Upsample the series into 30 second bins and fill the + ``NaN`` values using the ``bfill`` method. + + >>> series.resample('30S').bfill()[0:5] + 2000-01-01 00:00:00 0 + 2000-01-01 00:00:30 1 + 2000-01-01 00:01:00 1 + 2000-01-01 00:01:30 2 + 2000-01-01 00:02:00 2 + Freq: 30S, dtype: int64 + + Pass a custom function via ``apply`` + + >>> def custom_resampler(arraylike): + ... return np.sum(arraylike) + 5 + ... + >>> series.resample('3T').apply(custom_resampler) + 2000-01-01 00:00:00 8 + 2000-01-01 00:03:00 17 + 2000-01-01 00:06:00 26 + Freq: 3T, dtype: int64 + + For a Series with a PeriodIndex, the keyword `convention` can be + used to control whether to use the start or end of `rule`. + + Resample a year by quarter using 'start' `convention`. Values are + assigned to the first quarter of the period. + + >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', + ... freq='A', + ... periods=2)) + >>> s + 2012 1 + 2013 2 + Freq: A-DEC, dtype: int64 + >>> s.resample('Q', convention='start').asfreq() + 2012Q1 1.0 + 2012Q2 NaN + 2012Q3 NaN + 2012Q4 NaN + 2013Q1 2.0 + 2013Q2 NaN + 2013Q3 NaN + 2013Q4 NaN + Freq: Q-DEC, dtype: float64 + + Resample quarters by month using 'end' `convention`. Values are + assigned to the last month of the period. + + >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', + ... freq='Q', + ... periods=4)) + >>> q + 2018Q1 1 + 2018Q2 2 + 2018Q3 3 + 2018Q4 4 + Freq: Q-DEC, dtype: int64 + >>> q.resample('M', convention='end').asfreq() + 2018-03 1.0 + 2018-04 NaN + 2018-05 NaN + 2018-06 2.0 + 2018-07 NaN + 2018-08 NaN + 2018-09 3.0 + 2018-10 NaN + 2018-11 NaN + 2018-12 4.0 + Freq: M, dtype: float64 + + For DataFrame objects, the keyword `on` can be used to specify the + column instead of the index for resampling. + + >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], + ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} + >>> df = pd.DataFrame(d) + >>> df['week_starting'] = pd.date_range('01/01/2018', + ... periods=8, + ... freq='W') + >>> df + price volume week_starting + 0 10 50 2018-01-07 + 1 11 60 2018-01-14 + 2 9 40 2018-01-21 + 3 13 100 2018-01-28 + 4 14 50 2018-02-04 + 5 18 100 2018-02-11 + 6 17 40 2018-02-18 + 7 19 50 2018-02-25 + >>> df.resample('M', on='week_starting').mean() + price volume + week_starting + 2018-01-31 10.75 62.5 + 2018-02-28 17.00 60.0 + + For a DataFrame with MultiIndex, the keyword `level` can be used to + specify on which level the resampling needs to take place. + + >>> days = pd.date_range('1/1/2000', periods=4, freq='D') + >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], + ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} + >>> df2 = pd.DataFrame( + ... d2, + ... index=pd.MultiIndex.from_product( + ... [days, ['morning', 'afternoon']] + ... ) + ... ) + >>> df2 + price volume + 2000-01-01 morning 10 50 + afternoon 11 60 + 2000-01-02 morning 9 40 + afternoon 13 100 + 2000-01-03 morning 14 50 + afternoon 18 100 + 2000-01-04 morning 17 40 + afternoon 19 50 + >>> df2.resample('D', level=0).sum() + price volume + 2000-01-01 21 110 + 2000-01-02 22 140 + 2000-01-03 32 150 + 2000-01-04 36 90 + + If you want to adjust the start of the bins based on a fixed timestamp: + + >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' + >>> rng = pd.date_range(start, end, freq='7min') + >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) + >>> ts + 2000-10-01 23:30:00 0 + 2000-10-01 23:37:00 3 + 2000-10-01 23:44:00 6 + 2000-10-01 23:51:00 9 + 2000-10-01 23:58:00 12 + 2000-10-02 00:05:00 15 + 2000-10-02 00:12:00 18 + 2000-10-02 00:19:00 21 + 2000-10-02 00:26:00 24 + Freq: 7T, dtype: int64 + + >>> ts.resample('17min').sum() + 2000-10-01 23:14:00 0 + 2000-10-01 23:31:00 9 + 2000-10-01 23:48:00 21 + 2000-10-02 00:05:00 54 + 2000-10-02 00:22:00 24 + Freq: 17T, dtype: int64 + + >>> ts.resample('17min', origin='epoch').sum() + 2000-10-01 23:18:00 0 + 2000-10-01 23:35:00 18 + 2000-10-01 23:52:00 27 + 2000-10-02 00:09:00 39 + 2000-10-02 00:26:00 24 + Freq: 17T, dtype: int64 + + >>> ts.resample('17min', origin='2000-01-01').sum() + 2000-10-01 23:24:00 3 + 2000-10-01 23:41:00 15 + 2000-10-01 23:58:00 45 + 2000-10-02 00:15:00 45 + Freq: 17T, dtype: int64 + + If you want to adjust the start of the bins with an `offset` Timedelta, the two + following lines are equivalent: + + >>> ts.resample('17min', origin='start').sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17T, dtype: int64 + + >>> ts.resample('17min', offset='23h30min').sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17T, dtype: int64 + + If you want to take the largest Timestamp as the end of the bins: + + >>> ts.resample('17min', origin='end').sum() + 2000-10-01 23:35:00 0 + 2000-10-01 23:52:00 18 + 2000-10-02 00:09:00 27 + 2000-10-02 00:26:00 63 + Freq: 17T, dtype: int64 + + In contrast with the `start_day`, you can use `end_day` to take the ceiling + midnight of the largest Timestamp as the end of the bins and drop the bins + not containing data: + + >>> ts.resample('17min', origin='end_day').sum() + 2000-10-01 23:38:00 3 + 2000-10-01 23:55:00 15 + 2000-10-02 00:12:00 45 + 2000-10-02 00:29:00 45 + Freq: 17T, dtype: int64 + """ + from pandas.core.resample import get_resampler + + if axis is not lib.no_default: + axis = self._get_axis_number(axis) + if axis == 1: + warnings.warn( + "DataFrame.resample with axis=1 is deprecated. Do " + "`frame.T.resample(...)` without axis instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + f"The 'axis' keyword in {type(self).__name__}.resample is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + + return get_resampler( + cast("Series | DataFrame", self), + freq=rule, + label=label, + closed=closed, + axis=axis, + kind=kind, + convention=convention, + key=on, + level=level, + origin=origin, + offset=offset, + group_keys=group_keys, + ) + + @final + def first(self, offset) -> Self: + """ + Select initial periods of time series data based on a date offset. + + .. deprecated:: 2.1 + :meth:`.first` is deprecated and will be removed in a future version. + Please create a mask and filter using `.loc` instead. + + For a DataFrame with a sorted DatetimeIndex, this function can + select the first few rows based on a date offset. + + Parameters + ---------- + offset : str, DateOffset or dateutil.relativedelta + The offset length of the data that will be selected. For instance, + '1M' will display all the rows having their index within the first month. + + Returns + ------- + Series or DataFrame + A subset of the caller. + + Raises + ------ + TypeError + If the index is not a :class:`DatetimeIndex` + + See Also + -------- + last : Select final periods of time series based on a date offset. + at_time : Select values at a particular time of the day. + between_time : Select values between particular times of the day. + + Examples + -------- + >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') + >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) + >>> ts + A + 2018-04-09 1 + 2018-04-11 2 + 2018-04-13 3 + 2018-04-15 4 + + Get the rows for the first 3 days: + + >>> ts.first('3D') + A + 2018-04-09 1 + 2018-04-11 2 + + Notice the data for 3 first calendar days were returned, not the first + 3 days observed in the dataset, and therefore data for 2018-04-13 was + not returned. + """ + warnings.warn( + "first is deprecated and will be removed in a future version. " + "Please create a mask and filter using `.loc` instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + if not isinstance(self.index, DatetimeIndex): + raise TypeError("'first' only supports a DatetimeIndex index") + + if len(self.index) == 0: + return self.copy(deep=False) + + offset = to_offset(offset) + if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): + # GH#29623 if first value is end of period, remove offset with n = 1 + # before adding the real offset + end_date = end = self.index[0] - offset.base + offset + else: + end_date = end = self.index[0] + offset + + # Tick-like, e.g. 3 weeks + if isinstance(offset, Tick) and end_date in self.index: + end = self.index.searchsorted(end_date, side="left") + return self.iloc[:end] + + return self.loc[:end] + + @final + def last(self, offset) -> Self: + """ + Select final periods of time series data based on a date offset. + + .. deprecated:: 2.1 + :meth:`.last` is deprecated and will be removed in a future version. + Please create a mask and filter using `.loc` instead. + + For a DataFrame with a sorted DatetimeIndex, this function + selects the last few rows based on a date offset. + + Parameters + ---------- + offset : str, DateOffset, dateutil.relativedelta + The offset length of the data that will be selected. For instance, + '3D' will display all the rows having their index within the last 3 days. + + Returns + ------- + Series or DataFrame + A subset of the caller. + + Raises + ------ + TypeError + If the index is not a :class:`DatetimeIndex` + + See Also + -------- + first : Select initial periods of time series based on a date offset. + at_time : Select values at a particular time of the day. + between_time : Select values between particular times of the day. + + Notes + ----- + .. deprecated:: 2.1.0 + Please create a mask and filter using `.loc` instead + + Examples + -------- + >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') + >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) + >>> ts + A + 2018-04-09 1 + 2018-04-11 2 + 2018-04-13 3 + 2018-04-15 4 + + Get the rows for the last 3 days: + + >>> ts.last('3D') # doctest: +SKIP + A + 2018-04-13 3 + 2018-04-15 4 + + Notice the data for 3 last calendar days were returned, not the last + 3 observed days in the dataset, and therefore data for 2018-04-11 was + not returned. + """ + warnings.warn( + "last is deprecated and will be removed in a future version. " + "Please create a mask and filter using `.loc` instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if not isinstance(self.index, DatetimeIndex): + raise TypeError("'last' only supports a DatetimeIndex index") + + if len(self.index) == 0: + return self.copy(deep=False) + + offset = to_offset(offset) + + start_date = self.index[-1] - offset + start = self.index.searchsorted(start_date, side="right") + return self.iloc[start:] + + @final + def rank( + self, + axis: Axis = 0, + method: Literal["average", "min", "max", "first", "dense"] = "average", + numeric_only: bool_t = False, + na_option: Literal["keep", "top", "bottom"] = "keep", + ascending: bool_t = True, + pct: bool_t = False, + ) -> Self: + """ + Compute numerical data ranks (1 through n) along axis. + + By default, equal values are assigned a rank that is the average of the + ranks of those values. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns'}, default 0 + Index to direct ranking. + For `Series` this parameter is unused and defaults to 0. + method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' + How to rank the group of records that have the same value (i.e. ties): + + * average: average rank of the group + * min: lowest rank in the group + * max: highest rank in the group + * first: ranks assigned in order they appear in the array + * dense: like 'min', but rank always increases by 1 between groups. + + numeric_only : bool, default False + For DataFrame objects, rank only numeric columns if set to True. + + .. versionchanged:: 2.0.0 + The default value of ``numeric_only`` is now ``False``. + + na_option : {'keep', 'top', 'bottom'}, default 'keep' + How to rank NaN values: + + * keep: assign NaN rank to NaN values + * top: assign lowest rank to NaN values + * bottom: assign highest rank to NaN values + + ascending : bool, default True + Whether or not the elements should be ranked in ascending order. + pct : bool, default False + Whether or not to display the returned rankings in percentile + form. + + Returns + ------- + same type as caller + Return a Series or DataFrame with data ranks as values. + + See Also + -------- + core.groupby.DataFrameGroupBy.rank : Rank of values within each group. + core.groupby.SeriesGroupBy.rank : Rank of values within each group. + + Examples + -------- + >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', + ... 'spider', 'snake'], + ... 'Number_legs': [4, 2, 4, 8, np.nan]}) + >>> df + Animal Number_legs + 0 cat 4.0 + 1 penguin 2.0 + 2 dog 4.0 + 3 spider 8.0 + 4 snake NaN + + Ties are assigned the mean of the ranks (by default) for the group. + + >>> s = pd.Series(range(5), index=list("abcde")) + >>> s["d"] = s["b"] + >>> s.rank() + a 1.0 + b 2.5 + c 4.0 + d 2.5 + e 5.0 + dtype: float64 + + The following example shows how the method behaves with the above + parameters: + + * default_rank: this is the default behaviour obtained without using + any parameter. + * max_rank: setting ``method = 'max'`` the records that have the + same values are ranked using the highest rank (e.g.: since 'cat' + and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) + * NA_bottom: choosing ``na_option = 'bottom'``, if there are records + with NaN values they are placed at the bottom of the ranking. + * pct_rank: when setting ``pct = True``, the ranking is expressed as + percentile rank. + + >>> df['default_rank'] = df['Number_legs'].rank() + >>> df['max_rank'] = df['Number_legs'].rank(method='max') + >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') + >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) + >>> df + Animal Number_legs default_rank max_rank NA_bottom pct_rank + 0 cat 4.0 2.5 3.0 2.5 0.625 + 1 penguin 2.0 1.0 1.0 1.0 0.250 + 2 dog 4.0 2.5 3.0 2.5 0.625 + 3 spider 8.0 4.0 4.0 4.0 1.000 + 4 snake NaN NaN NaN 5.0 NaN + """ + axis_int = self._get_axis_number(axis) + + if na_option not in {"keep", "top", "bottom"}: + msg = "na_option must be one of 'keep', 'top', or 'bottom'" + raise ValueError(msg) + + def ranker(data): + if data.ndim == 2: + # i.e. DataFrame, we cast to ndarray + values = data.values + else: + # i.e. Series, can dispatch to EA + values = data._values + + if isinstance(values, ExtensionArray): + ranks = values._rank( + axis=axis_int, + method=method, + ascending=ascending, + na_option=na_option, + pct=pct, + ) + else: + ranks = algos.rank( + values, + axis=axis_int, + method=method, + ascending=ascending, + na_option=na_option, + pct=pct, + ) + + ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) + return ranks_obj.__finalize__(self, method="rank") + + if numeric_only: + if self.ndim == 1 and not is_numeric_dtype(self.dtype): + # GH#47500 + raise TypeError( + "Series.rank does not allow numeric_only=True with " + "non-numeric dtype." + ) + data = self._get_numeric_data() + else: + data = self + + return ranker(data) + + @doc(_shared_docs["compare"], klass=_shared_doc_kwargs["klass"]) + def compare( + self, + other, + align_axis: Axis = 1, + keep_shape: bool_t = False, + keep_equal: bool_t = False, + result_names: Suffixes = ("self", "other"), + ): + if type(self) is not type(other): + cls_self, cls_other = type(self).__name__, type(other).__name__ + raise TypeError( + f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" + ) + + mask = ~((self == other) | (self.isna() & other.isna())) + mask.fillna(True, inplace=True) + + if not keep_equal: + self = self.where(mask) + other = other.where(mask) + + if not keep_shape: + if isinstance(self, ABCDataFrame): + cmask = mask.any() + rmask = mask.any(axis=1) + self = self.loc[rmask, cmask] + other = other.loc[rmask, cmask] + else: + self = self[mask] + other = other[mask] + if not isinstance(result_names, tuple): + raise TypeError( + f"Passing 'result_names' as a {type(result_names)} is not " + "supported. Provide 'result_names' as a tuple instead." + ) + + if align_axis in (1, "columns"): # This is needed for Series + axis = 1 + else: + axis = self._get_axis_number(align_axis) + + # error: List item 0 has incompatible type "NDFrame"; expected + # "Union[Series, DataFrame]" + diff = concat( + [self, other], # type: ignore[list-item] + axis=axis, + keys=result_names, + ) + + if axis >= self.ndim: + # No need to reorganize data if stacking on new axis + # This currently applies for stacking two Series on columns + return diff + + ax = diff._get_axis(axis) + ax_names = np.array(ax.names) + + # set index names to positions to avoid confusion + ax.names = np.arange(len(ax_names)) + + # bring self-other to inner level + order = list(range(1, ax.nlevels)) + [0] + if isinstance(diff, ABCDataFrame): + diff = diff.reorder_levels(order, axis=axis) + else: + diff = diff.reorder_levels(order) + + # restore the index names in order + diff._get_axis(axis=axis).names = ax_names[order] + + # reorder axis to keep things organized + indices = ( + np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() + ) + diff = diff.take(indices, axis=axis) + + return diff + + @final + @doc( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + ) + def align( + self, + other: NDFrameT, + join: AlignJoin = "outer", + axis: Axis | None = None, + level: Level | None = None, + copy: bool_t | None = None, + fill_value: Hashable | None = None, + method: FillnaOptions | None | lib.NoDefault = lib.no_default, + limit: int | None | lib.NoDefault = lib.no_default, + fill_axis: Axis | lib.NoDefault = lib.no_default, + broadcast_axis: Axis | None | lib.NoDefault = lib.no_default, + ) -> tuple[Self, NDFrameT]: + """ + Align two objects on their axes with the specified join method. + + Join method is specified for each axis Index. + + Parameters + ---------- + other : DataFrame or Series + join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' + Type of alignment to be performed. + + * left: use only keys from left frame, preserve key order. + * right: use only keys from right frame, preserve key order. + * outer: use union of keys from both frames, sort keys lexicographically. + * inner: use intersection of keys from both frames, + preserve the order of the left keys. + + axis : allowed axis of the other object, default None + Align on index (0), columns (1), or both (None). + level : int or level name, default None + Broadcast across a level, matching Index values on the + passed MultiIndex level. + copy : bool, default True + Always returns new objects. If copy=False and no reindexing is + required then original objects are returned. + fill_value : scalar, default np.nan + Value to use for missing values. Defaults to NaN, but can be any + "compatible" value. + method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None + Method to use for filling holes in reindexed Series: + + - pad / ffill: propagate last valid observation forward to next valid. + - backfill / bfill: use NEXT valid observation to fill gap. + + .. deprecated:: 2.1 + + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + + .. deprecated:: 2.1 + + fill_axis : {axes_single_arg}, default 0 + Filling axis, method and limit. + + .. deprecated:: 2.1 + + broadcast_axis : {axes_single_arg}, default None + Broadcast values along this axis, if aligning two objects of + different dimensions. + + .. deprecated:: 2.1 + + Returns + ------- + tuple of ({klass}, type of other) + Aligned objects. + + Examples + -------- + >>> df = pd.DataFrame( + ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] + ... ) + >>> other = pd.DataFrame( + ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], + ... columns=["A", "B", "C", "D"], + ... index=[2, 3, 4], + ... ) + >>> df + D B E A + 1 1 2 3 4 + 2 6 7 8 9 + >>> other + A B C D + 2 10 20 30 40 + 3 60 70 80 90 + 4 600 700 800 900 + + Align on columns: + + >>> left, right = df.align(other, join="outer", axis=1) + >>> left + A B C D E + 1 4 2 NaN 1 3 + 2 9 7 NaN 6 8 + >>> right + A B C D E + 2 10 20 30 40 NaN + 3 60 70 80 90 NaN + 4 600 700 800 900 NaN + + We can also align on the index: + + >>> left, right = df.align(other, join="outer", axis=0) + >>> left + D B E A + 1 1.0 2.0 3.0 4.0 + 2 6.0 7.0 8.0 9.0 + 3 NaN NaN NaN NaN + 4 NaN NaN NaN NaN + >>> right + A B C D + 1 NaN NaN NaN NaN + 2 10.0 20.0 30.0 40.0 + 3 60.0 70.0 80.0 90.0 + 4 600.0 700.0 800.0 900.0 + + Finally, the default `axis=None` will align on both index and columns: + + >>> left, right = df.align(other, join="outer", axis=None) + >>> left + A B C D E + 1 4.0 2.0 NaN 1.0 3.0 + 2 9.0 7.0 NaN 6.0 8.0 + 3 NaN NaN NaN NaN NaN + 4 NaN NaN NaN NaN NaN + >>> right + A B C D E + 1 NaN NaN NaN NaN NaN + 2 10.0 20.0 30.0 40.0 NaN + 3 60.0 70.0 80.0 90.0 NaN + 4 600.0 700.0 800.0 900.0 NaN + """ + if ( + method is not lib.no_default + or limit is not lib.no_default + or fill_axis is not lib.no_default + ): + # GH#51856 + warnings.warn( + "The 'method', 'limit', and 'fill_axis' keywords in " + f"{type(self).__name__}.align are deprecated and will be removed " + "in a future version. Call fillna directly on the returned objects " + "instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if fill_axis is lib.no_default: + fill_axis = 0 + if method is lib.no_default: + method = None + if limit is lib.no_default: + limit = None + + if method is not None: + method = clean_fill_method(method) + + if broadcast_axis is not lib.no_default: + # GH#51856 + # TODO(3.0): enforcing this deprecation will close GH#13194 + msg = ( + f"The 'broadcast_axis' keyword in {type(self).__name__}.align is " + "deprecated and will be removed in a future version." + ) + if broadcast_axis is not None: + if self.ndim == 1 and other.ndim == 2: + msg += ( + " Use left = DataFrame({col: left for col in right.columns}, " + "index=right.index) before calling `left.align(right)` instead." + ) + elif self.ndim == 2 and other.ndim == 1: + msg += ( + " Use right = DataFrame({col: right for col in left.columns}, " + "index=left.index) before calling `left.align(right)` instead" + ) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + else: + broadcast_axis = None + + if broadcast_axis == 1 and self.ndim != other.ndim: + if isinstance(self, ABCSeries): + # this means other is a DataFrame, and we need to broadcast + # self + cons = self._constructor_expanddim + df = cons( + {c: self for c in other.columns}, **other._construct_axes_dict() + ) + # error: Incompatible return value type (got "Tuple[DataFrame, + # DataFrame]", expected "Tuple[Self, NDFrameT]") + return df._align_frame( # type: ignore[return-value] + other, # type: ignore[arg-type] + join=join, + axis=axis, + level=level, + copy=copy, + fill_value=fill_value, + method=method, + limit=limit, + fill_axis=fill_axis, + )[:2] + elif isinstance(other, ABCSeries): + # this means self is a DataFrame, and we need to broadcast + # other + cons = other._constructor_expanddim + df = cons( + {c: other for c in self.columns}, **self._construct_axes_dict() + ) + # error: Incompatible return value type (got "Tuple[NDFrameT, + # DataFrame]", expected "Tuple[Self, NDFrameT]") + return self._align_frame( # type: ignore[return-value] + df, + join=join, + axis=axis, + level=level, + copy=copy, + fill_value=fill_value, + method=method, + limit=limit, + fill_axis=fill_axis, + )[:2] + + _right: DataFrame | Series + if axis is not None: + axis = self._get_axis_number(axis) + if isinstance(other, ABCDataFrame): + left, _right, join_index = self._align_frame( + other, + join=join, + axis=axis, + level=level, + copy=copy, + fill_value=fill_value, + method=method, + limit=limit, + fill_axis=fill_axis, + ) + + elif isinstance(other, ABCSeries): + left, _right, join_index = self._align_series( + other, + join=join, + axis=axis, + level=level, + copy=copy, + fill_value=fill_value, + method=method, + limit=limit, + fill_axis=fill_axis, + ) + else: # pragma: no cover + raise TypeError(f"unsupported type: {type(other)}") + + right = cast(NDFrameT, _right) + if self.ndim == 1 or axis == 0: + # If we are aligning timezone-aware DatetimeIndexes and the timezones + # do not match, convert both to UTC. + if isinstance(left.index.dtype, DatetimeTZDtype): + if left.index.tz != right.index.tz: + if join_index is not None: + # GH#33671 copy to ensure we don't change the index on + # our original Series + left = left.copy(deep=False) + right = right.copy(deep=False) + left.index = join_index + right.index = join_index + + left = left.__finalize__(self) + right = right.__finalize__(other) + return left, right + + @final + def _align_frame( + self, + other: DataFrame, + join: AlignJoin = "outer", + axis: Axis | None = None, + level=None, + copy: bool_t | None = None, + fill_value=None, + method=None, + limit: int | None = None, + fill_axis: Axis = 0, + ) -> tuple[Self, DataFrame, Index | None]: + # defaults + join_index, join_columns = None, None + ilidx, iridx = None, None + clidx, cridx = None, None + + is_series = isinstance(self, ABCSeries) + + if (axis is None or axis == 0) and not self.index.equals(other.index): + join_index, ilidx, iridx = self.index.join( + other.index, how=join, level=level, return_indexers=True + ) + + if ( + (axis is None or axis == 1) + and not is_series + and not self.columns.equals(other.columns) + ): + join_columns, clidx, cridx = self.columns.join( + other.columns, how=join, level=level, return_indexers=True + ) + + if is_series: + reindexers = {0: [join_index, ilidx]} + else: + reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} + + left = self._reindex_with_indexers( + reindexers, copy=copy, fill_value=fill_value, allow_dups=True + ) + # other must be always DataFrame + right = other._reindex_with_indexers( + {0: [join_index, iridx], 1: [join_columns, cridx]}, + copy=copy, + fill_value=fill_value, + allow_dups=True, + ) + + if method is not None: + left = left._pad_or_backfill(method, axis=fill_axis, limit=limit) + right = right._pad_or_backfill(method, axis=fill_axis, limit=limit) + + return left, right, join_index + + @final + def _align_series( + self, + other: Series, + join: AlignJoin = "outer", + axis: Axis | None = None, + level=None, + copy: bool_t | None = None, + fill_value=None, + method=None, + limit: int | None = None, + fill_axis: Axis = 0, + ) -> tuple[Self, Series, Index | None]: + is_series = isinstance(self, ABCSeries) + if copy and using_copy_on_write(): + copy = False + + if (not is_series and axis is None) or axis not in [None, 0, 1]: + raise ValueError("Must specify axis=0 or 1") + + if is_series and axis == 1: + raise ValueError("cannot align series to a series other than axis 0") + + # series/series compat, other must always be a Series + if not axis: + # equal + if self.index.equals(other.index): + join_index, lidx, ridx = None, None, None + else: + join_index, lidx, ridx = self.index.join( + other.index, how=join, level=level, return_indexers=True + ) + + if is_series: + left = self._reindex_indexer(join_index, lidx, copy) + elif lidx is None or join_index is None: + left = self.copy(deep=copy) + else: + new_mgr = self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) + left = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + + right = other._reindex_indexer(join_index, ridx, copy) + + else: + # one has > 1 ndim + fdata = self._mgr + join_index = self.axes[1] + lidx, ridx = None, None + if not join_index.equals(other.index): + join_index, lidx, ridx = join_index.join( + other.index, how=join, level=level, return_indexers=True + ) + + if lidx is not None: + bm_axis = self._get_block_manager_axis(1) + fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) + + if copy and fdata is self._mgr: + fdata = fdata.copy() + + left = self._constructor_from_mgr(fdata, axes=fdata.axes) + + if ridx is None: + right = other.copy(deep=copy) + else: + right = other.reindex(join_index, level=level) + + # fill + fill_na = notna(fill_value) or (method is not None) + if fill_na: + fill_value, method = validate_fillna_kwargs(fill_value, method) + if method is not None: + left = left._pad_or_backfill(method, limit=limit, axis=fill_axis) + right = right._pad_or_backfill(method, limit=limit) + else: + left = left.fillna(fill_value, limit=limit, axis=fill_axis) + right = right.fillna(fill_value, limit=limit) + + return left, right, join_index + + @final + def _where( + self, + cond, + other=lib.no_default, + inplace: bool_t = False, + axis: Axis | None = None, + level=None, + ): + """ + Equivalent to public method `where`, except that `other` is not + applied as a function even if callable. Used in __setitem__. + """ + inplace = validate_bool_kwarg(inplace, "inplace") + + if axis is not None: + axis = self._get_axis_number(axis) + + # align the cond to same shape as myself + cond = common.apply_if_callable(cond, self) + if isinstance(cond, NDFrame): + # CoW: Make sure reference is not kept alive + if cond.ndim == 1 and self.ndim == 2: + cond = cond._constructor_expanddim( + {i: cond for i in range(len(self.columns))}, + copy=False, + ) + cond.columns = self.columns + cond = cond.align(self, join="right", copy=False)[0] + else: + if not hasattr(cond, "shape"): + cond = np.asanyarray(cond) + if cond.shape != self.shape: + raise ValueError("Array conditional must be same shape as self") + cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) + + # make sure we are boolean + fill_value = bool(inplace) + cond = cond.fillna(fill_value) + + msg = "Boolean array expected for the condition, not {dtype}" + + if not cond.empty: + if not isinstance(cond, ABCDataFrame): + # This is a single-dimensional object. + if not is_bool_dtype(cond): + raise ValueError(msg.format(dtype=cond.dtype)) + else: + for _dt in cond.dtypes: + if not is_bool_dtype(_dt): + raise ValueError(msg.format(dtype=_dt)) + if cond._mgr.any_extension_types: + # GH51574: avoid object ndarray conversion later on + cond = cond._constructor( + cond.to_numpy(dtype=bool, na_value=fill_value), + **cond._construct_axes_dict(), + ) + else: + # GH#21947 we have an empty DataFrame/Series, could be object-dtype + cond = cond.astype(bool) + + cond = -cond if inplace else cond + cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) + + # try to align with other + if isinstance(other, NDFrame): + # align with me + if other.ndim <= self.ndim: + # CoW: Make sure reference is not kept alive + other = self.align( + other, + join="left", + axis=axis, + level=level, + fill_value=None, + copy=False, + )[1] + + # if we are NOT aligned, raise as we cannot where index + if axis is None and not other._indexed_same(self): + raise InvalidIndexError + + if other.ndim < self.ndim: + # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 + other = other._values + if axis == 0: + other = np.reshape(other, (-1, 1)) + elif axis == 1: + other = np.reshape(other, (1, -1)) + + other = np.broadcast_to(other, self.shape) + + # slice me out of the other + else: + raise NotImplementedError( + "cannot align with a higher dimensional NDFrame" + ) + + elif not isinstance(other, (MultiIndex, NDFrame)): + # mainly just catching Index here + other = extract_array(other, extract_numpy=True) + + if isinstance(other, (np.ndarray, ExtensionArray)): + if other.shape != self.shape: + if self.ndim != 1: + # In the ndim == 1 case we may have + # other length 1, which we treat as scalar (GH#2745, GH#4192) + # or len(other) == icond.sum(), which we treat like + # __setitem__ (GH#3235) + raise ValueError( + "other must be the same shape as self when an ndarray" + ) + + # we are the same shape, so create an actual object for alignment + else: + other = self._constructor( + other, **self._construct_axes_dict(), copy=False + ) + + if axis is None: + axis = 0 + + if self.ndim == getattr(other, "ndim", 0): + align = True + else: + align = self._get_axis_number(axis) == 1 + + if inplace: + # we may have different type blocks come out of putmask, so + # reconstruct the block manager + + new_data = self._mgr.putmask(mask=cond, new=other, align=align) + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + return self._update_inplace(result) + + else: + new_data = self._mgr.where( + other=other, + cond=cond, + align=align, + ) + result = self._constructor_from_mgr(new_data, axes=new_data.axes) + return result.__finalize__(self) + + @overload + def where( + self, + cond, + other=..., + *, + inplace: Literal[False] = ..., + axis: Axis | None = ..., + level: Level = ..., + ) -> Self: + ... + + @overload + def where( + self, + cond, + other=..., + *, + inplace: Literal[True], + axis: Axis | None = ..., + level: Level = ..., + ) -> None: + ... + + @overload + def where( + self, + cond, + other=..., + *, + inplace: bool_t = ..., + axis: Axis | None = ..., + level: Level = ..., + ) -> Self | None: + ... + + @final + @doc( + klass=_shared_doc_kwargs["klass"], + cond="True", + cond_rev="False", + name="where", + name_other="mask", + ) + def where( + self, + cond, + other=np.nan, + *, + inplace: bool_t = False, + axis: Axis | None = None, + level: Level | None = None, + ) -> Self | None: + """ + Replace values where the condition is {cond_rev}. + + Parameters + ---------- + cond : bool {klass}, array-like, or callable + Where `cond` is {cond}, keep the original value. Where + {cond_rev}, replace with corresponding value from `other`. + If `cond` is callable, it is computed on the {klass} and + should return boolean {klass} or array. The callable must + not change input {klass} (though pandas doesn't check it). + other : scalar, {klass}, or callable + Entries where `cond` is {cond_rev} are replaced with + corresponding value from `other`. + If other is callable, it is computed on the {klass} and + should return scalar or {klass}. The callable must not + change input {klass} (though pandas doesn't check it). + If not specified, entries will be filled with the corresponding + NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension + dtypes). + inplace : bool, default False + Whether to perform the operation in place on the data. + axis : int, default None + Alignment axis if needed. For `Series` this parameter is + unused and defaults to 0. + level : int, default None + Alignment level if needed. + + Returns + ------- + Same type as caller or None if ``inplace=True``. + + See Also + -------- + :func:`DataFrame.{name_other}` : Return an object of same shape as + self. + + Notes + ----- + The {name} method is an application of the if-then idiom. For each + element in the calling DataFrame, if ``cond`` is ``{cond}`` the + element is used; otherwise the corresponding element from the DataFrame + ``other`` is used. If the axis of ``other`` does not align with axis of + ``cond`` {klass}, the misaligned index positions will be filled with + {cond_rev}. + + The signature for :func:`DataFrame.where` differs from + :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to + ``np.where(m, df1, df2)``. + + For further details and examples see the ``{name}`` documentation in + :ref:`indexing `. + + The dtype of the object takes precedence. The fill value is casted to + the object's dtype, if this can be done losslessly. + + Examples + -------- + >>> s = pd.Series(range(5)) + >>> s.where(s > 0) + 0 NaN + 1 1.0 + 2 2.0 + 3 3.0 + 4 4.0 + dtype: float64 + >>> s.mask(s > 0) + 0 0.0 + 1 NaN + 2 NaN + 3 NaN + 4 NaN + dtype: float64 + + >>> s = pd.Series(range(5)) + >>> t = pd.Series([True, False]) + >>> s.where(t, 99) + 0 0 + 1 99 + 2 99 + 3 99 + 4 99 + dtype: int64 + >>> s.mask(t, 99) + 0 99 + 1 1 + 2 99 + 3 99 + 4 99 + dtype: int64 + + >>> s.where(s > 1, 10) + 0 10 + 1 10 + 2 2 + 3 3 + 4 4 + dtype: int64 + >>> s.mask(s > 1, 10) + 0 0 + 1 1 + 2 10 + 3 10 + 4 10 + dtype: int64 + + >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) + >>> df + A B + 0 0 1 + 1 2 3 + 2 4 5 + 3 6 7 + 4 8 9 + >>> m = df % 3 == 0 + >>> df.where(m, -df) + A B + 0 0 -1 + 1 -2 3 + 2 -4 -5 + 3 6 -7 + 4 -8 9 + >>> df.where(m, -df) == np.where(m, df, -df) + A B + 0 True True + 1 True True + 2 True True + 3 True True + 4 True True + >>> df.where(m, -df) == df.mask(~m, -df) + A B + 0 True True + 1 True True + 2 True True + 3 True True + 4 True True + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + other = common.apply_if_callable(other, self) + return self._where(cond, other, inplace, axis, level) + + @overload + def mask( + self, + cond, + other=..., + *, + inplace: Literal[False] = ..., + axis: Axis | None = ..., + level: Level = ..., + ) -> Self: + ... + + @overload + def mask( + self, + cond, + other=..., + *, + inplace: Literal[True], + axis: Axis | None = ..., + level: Level = ..., + ) -> None: + ... + + @overload + def mask( + self, + cond, + other=..., + *, + inplace: bool_t = ..., + axis: Axis | None = ..., + level: Level = ..., + ) -> Self | None: + ... + + @final + @doc( + where, + klass=_shared_doc_kwargs["klass"], + cond="False", + cond_rev="True", + name="mask", + name_other="where", + ) + def mask( + self, + cond, + other=lib.no_default, + *, + inplace: bool_t = False, + axis: Axis | None = None, + level: Level | None = None, + ) -> Self | None: + inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + cond = common.apply_if_callable(cond, self) + + # see gh-21891 + if not hasattr(cond, "__invert__"): + cond = np.array(cond) + + return self.where( + ~cond, + other=other, + inplace=inplace, + axis=axis, + level=level, + ) + + @doc(klass=_shared_doc_kwargs["klass"]) + def shift( + self, + periods: int | Sequence[int] = 1, + freq=None, + axis: Axis = 0, + fill_value: Hashable = lib.no_default, + suffix: str | None = None, + ) -> Self | DataFrame: + """ + Shift index by desired number of periods with an optional time `freq`. + + When `freq` is not passed, shift the index without realigning the data. + If `freq` is passed (in this case, the index must be date or datetime, + or it will raise a `NotImplementedError`), the index will be + increased using the periods and the `freq`. `freq` can be inferred + when specified as "infer" as long as either freq or inferred_freq + attribute is set in the index. + + Parameters + ---------- + periods : int or Sequence + Number of periods to shift. Can be positive or negative. + If an iterable of ints, the data will be shifted once by each int. + This is equivalent to shifting by one value at a time and + concatenating all resulting frames. The resulting columns will have + the shift suffixed to their column names. For multiple periods, + axis must not be 1. + freq : DateOffset, tseries.offsets, timedelta, or str, optional + Offset to use from the tseries module or time rule (e.g. 'EOM'). + If `freq` is specified then the index values are shifted but the + data is not realigned. That is, use `freq` if you would like to + extend the index when shifting and preserve the original data. + If `freq` is specified as "infer" then it will be inferred from + the freq or inferred_freq attributes of the index. If neither of + those attributes exist, a ValueError is thrown. + axis : {{0 or 'index', 1 or 'columns', None}}, default None + Shift direction. For `Series` this parameter is unused and defaults to 0. + fill_value : object, optional + The scalar value to use for newly introduced missing values. + the default depends on the dtype of `self`. + For numeric data, ``np.nan`` is used. + For datetime, timedelta, or period data, etc. :attr:`NaT` is used. + For extension dtypes, ``self.dtype.na_value`` is used. + suffix : str, optional + If str and periods is an iterable, this is added after the column + name and before the shift value for each shifted column name. + + Returns + ------- + {klass} + Copy of input object, shifted. + + See Also + -------- + Index.shift : Shift values of Index. + DatetimeIndex.shift : Shift values of DatetimeIndex. + PeriodIndex.shift : Shift values of PeriodIndex. + + Examples + -------- + >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], + ... "Col2": [13, 23, 18, 33, 48], + ... "Col3": [17, 27, 22, 37, 52]}}, + ... index=pd.date_range("2020-01-01", "2020-01-05")) + >>> df + Col1 Col2 Col3 + 2020-01-01 10 13 17 + 2020-01-02 20 23 27 + 2020-01-03 15 18 22 + 2020-01-04 30 33 37 + 2020-01-05 45 48 52 + + >>> df.shift(periods=3) + Col1 Col2 Col3 + 2020-01-01 NaN NaN NaN + 2020-01-02 NaN NaN NaN + 2020-01-03 NaN NaN NaN + 2020-01-04 10.0 13.0 17.0 + 2020-01-05 20.0 23.0 27.0 + + >>> df.shift(periods=1, axis="columns") + Col1 Col2 Col3 + 2020-01-01 NaN 10 13 + 2020-01-02 NaN 20 23 + 2020-01-03 NaN 15 18 + 2020-01-04 NaN 30 33 + 2020-01-05 NaN 45 48 + + >>> df.shift(periods=3, fill_value=0) + Col1 Col2 Col3 + 2020-01-01 0 0 0 + 2020-01-02 0 0 0 + 2020-01-03 0 0 0 + 2020-01-04 10 13 17 + 2020-01-05 20 23 27 + + >>> df.shift(periods=3, freq="D") + Col1 Col2 Col3 + 2020-01-04 10 13 17 + 2020-01-05 20 23 27 + 2020-01-06 15 18 22 + 2020-01-07 30 33 37 + 2020-01-08 45 48 52 + + >>> df.shift(periods=3, freq="infer") + Col1 Col2 Col3 + 2020-01-04 10 13 17 + 2020-01-05 20 23 27 + 2020-01-06 15 18 22 + 2020-01-07 30 33 37 + 2020-01-08 45 48 52 + + >>> df['Col1'].shift(periods=[0, 1, 2]) + Col1_0 Col1_1 Col1_2 + 2020-01-01 10 NaN NaN + 2020-01-02 20 10.0 NaN + 2020-01-03 15 20.0 10.0 + 2020-01-04 30 15.0 20.0 + 2020-01-05 45 30.0 15.0 + """ + axis = self._get_axis_number(axis) + + if freq is not None and fill_value is not lib.no_default: + # GH#53832 + warnings.warn( + "Passing a 'freq' together with a 'fill_value' silently ignores " + "the fill_value and is deprecated. This will raise in a future " + "version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + fill_value = lib.no_default + + if periods == 0: + return self.copy(deep=None) + + if is_list_like(periods) and isinstance(self, ABCSeries): + return self.to_frame().shift( + periods=periods, freq=freq, axis=axis, fill_value=fill_value + ) + periods = cast(int, periods) + + if freq is None: + # when freq is None, data is shifted, index is not + axis = self._get_axis_number(axis) + assert axis == 0 # axis == 1 cases handled in DataFrame.shift + new_data = self._mgr.shift(periods=periods, fill_value=fill_value) + return self._constructor_from_mgr( + new_data, axes=new_data.axes + ).__finalize__(self, method="shift") + + return self._shift_with_freq(periods, axis, freq) + + @final + def _shift_with_freq(self, periods: int, axis: int, freq) -> Self: + # see shift.__doc__ + # when freq is given, index is shifted, data is not + index = self._get_axis(axis) + + if freq == "infer": + freq = getattr(index, "freq", None) + + if freq is None: + freq = getattr(index, "inferred_freq", None) + + if freq is None: + msg = "Freq was not set in the index hence cannot be inferred" + raise ValueError(msg) + + elif isinstance(freq, str): + freq = to_offset(freq) + + if isinstance(index, PeriodIndex): + orig_freq = to_offset(index.freq) + if freq != orig_freq: + assert orig_freq is not None # for mypy + raise ValueError( + f"Given freq {freq.rule_code} does not match " + f"PeriodIndex freq {orig_freq.rule_code}" + ) + new_ax = index.shift(periods) + else: + new_ax = index.shift(periods, freq) + + result = self.set_axis(new_ax, axis=axis) + return result.__finalize__(self, method="shift") + + @final + def truncate( + self, + before=None, + after=None, + axis: Axis | None = None, + copy: bool_t | None = None, + ) -> Self: + """ + Truncate a Series or DataFrame before and after some index value. + + This is a useful shorthand for boolean indexing based on index + values above or below certain thresholds. + + Parameters + ---------- + before : date, str, int + Truncate all rows before this index value. + after : date, str, int + Truncate all rows after this index value. + axis : {0 or 'index', 1 or 'columns'}, optional + Axis to truncate. Truncates the index (rows) by default. + For `Series` this parameter is unused and defaults to 0. + copy : bool, default is True, + Return a copy of the truncated section. + + Returns + ------- + type of caller + The truncated Series or DataFrame. + + See Also + -------- + DataFrame.loc : Select a subset of a DataFrame by label. + DataFrame.iloc : Select a subset of a DataFrame by position. + + Notes + ----- + If the index being truncated contains only datetime values, + `before` and `after` may be specified as strings instead of + Timestamps. + + Examples + -------- + >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], + ... 'B': ['f', 'g', 'h', 'i', 'j'], + ... 'C': ['k', 'l', 'm', 'n', 'o']}, + ... index=[1, 2, 3, 4, 5]) + >>> df + A B C + 1 a f k + 2 b g l + 3 c h m + 4 d i n + 5 e j o + + >>> df.truncate(before=2, after=4) + A B C + 2 b g l + 3 c h m + 4 d i n + + The columns of a DataFrame can be truncated. + + >>> df.truncate(before="A", after="B", axis="columns") + A B + 1 a f + 2 b g + 3 c h + 4 d i + 5 e j + + For Series, only rows can be truncated. + + >>> df['A'].truncate(before=2, after=4) + 2 b + 3 c + 4 d + Name: A, dtype: object + + The index values in ``truncate`` can be datetimes or string + dates. + + >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') + >>> df = pd.DataFrame(index=dates, data={'A': 1}) + >>> df.tail() + A + 2016-01-31 23:59:56 1 + 2016-01-31 23:59:57 1 + 2016-01-31 23:59:58 1 + 2016-01-31 23:59:59 1 + 2016-02-01 00:00:00 1 + + >>> df.truncate(before=pd.Timestamp('2016-01-05'), + ... after=pd.Timestamp('2016-01-10')).tail() + A + 2016-01-09 23:59:56 1 + 2016-01-09 23:59:57 1 + 2016-01-09 23:59:58 1 + 2016-01-09 23:59:59 1 + 2016-01-10 00:00:00 1 + + Because the index is a DatetimeIndex containing only dates, we can + specify `before` and `after` as strings. They will be coerced to + Timestamps before truncation. + + >>> df.truncate('2016-01-05', '2016-01-10').tail() + A + 2016-01-09 23:59:56 1 + 2016-01-09 23:59:57 1 + 2016-01-09 23:59:58 1 + 2016-01-09 23:59:59 1 + 2016-01-10 00:00:00 1 + + Note that ``truncate`` assumes a 0 value for any unspecified time + component (midnight). This differs from partial string slicing, which + returns any partially matching dates. + + >>> df.loc['2016-01-05':'2016-01-10', :].tail() + A + 2016-01-10 23:59:55 1 + 2016-01-10 23:59:56 1 + 2016-01-10 23:59:57 1 + 2016-01-10 23:59:58 1 + 2016-01-10 23:59:59 1 + """ + if axis is None: + axis = 0 + axis = self._get_axis_number(axis) + ax = self._get_axis(axis) + + # GH 17935 + # Check that index is sorted + if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: + raise ValueError("truncate requires a sorted index") + + # if we have a date index, convert to dates, otherwise + # treat like a slice + if ax._is_all_dates: + from pandas.core.tools.datetimes import to_datetime + + before = to_datetime(before) + after = to_datetime(after) + + if before is not None and after is not None and before > after: + raise ValueError(f"Truncate: {after} must be after {before}") + + if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: + before, after = after, before + + slicer = [slice(None, None)] * self._AXIS_LEN + slicer[axis] = slice(before, after) + result = self.loc[tuple(slicer)] + + if isinstance(ax, MultiIndex): + setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) + + result = result.copy(deep=copy and not using_copy_on_write()) + + return result + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def tz_convert( + self, tz, axis: Axis = 0, level=None, copy: bool_t | None = None + ) -> Self: + """ + Convert tz-aware axis to target time zone. + + Parameters + ---------- + tz : str or tzinfo object or None + Target time zone. Passing ``None`` will convert to + UTC and remove the timezone information. + axis : {{0 or 'index', 1 or 'columns'}}, default 0 + The axis to convert + level : int, str, default None + If axis is a MultiIndex, convert a specific level. Otherwise + must be None. + copy : bool, default True + Also make a copy of the underlying data. + + Returns + ------- + {klass} + Object with time zone converted axis. + + Raises + ------ + TypeError + If the axis is tz-naive. + + Examples + -------- + Change to another time zone: + + >>> s = pd.Series( + ... [1], + ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), + ... ) + >>> s.tz_convert('Asia/Shanghai') + 2018-09-15 07:30:00+08:00 1 + dtype: int64 + + Pass None to convert to UTC and get a tz-naive index: + + >>> s = pd.Series([1], + ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) + >>> s.tz_convert(None) + 2018-09-14 23:30:00 1 + dtype: int64 + """ + axis = self._get_axis_number(axis) + ax = self._get_axis(axis) + + def _tz_convert(ax, tz): + if not hasattr(ax, "tz_convert"): + if len(ax) > 0: + ax_name = self._get_axis_name(axis) + raise TypeError( + f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" + ) + ax = DatetimeIndex([], tz=tz) + else: + ax = ax.tz_convert(tz) + return ax + + # if a level is given it must be a MultiIndex level or + # equivalent to the axis name + if isinstance(ax, MultiIndex): + level = ax._get_level_number(level) + new_level = _tz_convert(ax.levels[level], tz) + ax = ax.set_levels(new_level, level=level) + else: + if level not in (None, 0, ax.name): + raise ValueError(f"The level {level} is not valid") + ax = _tz_convert(ax, tz) + + result = self.copy(deep=copy and not using_copy_on_write()) + result = result.set_axis(ax, axis=axis, copy=False) + return result.__finalize__(self, method="tz_convert") + + @final + @doc(klass=_shared_doc_kwargs["klass"]) + def tz_localize( + self, + tz, + axis: Axis = 0, + level=None, + copy: bool_t | None = None, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + """ + Localize tz-naive index of a Series or DataFrame to target time zone. + + This operation localizes the Index. To localize the values in a + timezone-naive Series, use :meth:`Series.dt.tz_localize`. + + Parameters + ---------- + tz : str or tzinfo or None + Time zone to localize. Passing ``None`` will remove the + time zone information and preserve local time. + axis : {{0 or 'index', 1 or 'columns'}}, default 0 + The axis to localize + level : int, str, default None + If axis ia a MultiIndex, localize a specific level. Otherwise + must be None. + copy : bool, default True + Also make a copy of the underlying data. + ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' + When clocks moved backward due to DST, ambiguous times may arise. + For example in Central European Time (UTC+01), when going from + 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at + 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the + `ambiguous` parameter dictates how ambiguous times should be + handled. + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False designates + a non-DST time (note that this flag is only applicable for + ambiguous times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous + times. + nonexistent : str, default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. Valid values are: + + - 'shift_forward' will shift the nonexistent time forward to the + closest existing time + - 'shift_backward' will shift the nonexistent time backward to the + closest existing time + - 'NaT' will return NaT where there are nonexistent times + - timedelta objects will shift nonexistent times by the timedelta + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times. + + Returns + ------- + {klass} + Same type as the input. + + Raises + ------ + TypeError + If the TimeSeries is tz-aware and tz is not None. + + Examples + -------- + Localize local times: + + >>> s = pd.Series( + ... [1], + ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), + ... ) + >>> s.tz_localize('CET') + 2018-09-15 01:30:00+02:00 1 + dtype: int64 + + Pass None to convert to tz-naive index and preserve local time: + + >>> s = pd.Series([1], + ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) + >>> s.tz_localize(None) + 2018-09-15 01:30:00 1 + dtype: int64 + + Be careful with DST changes. When there is sequential data, pandas + can infer the DST time: + + >>> s = pd.Series(range(7), + ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 03:00:00', + ... '2018-10-28 03:30:00'])) + >>> s.tz_localize('CET', ambiguous='infer') + 2018-10-28 01:30:00+02:00 0 + 2018-10-28 02:00:00+02:00 1 + 2018-10-28 02:30:00+02:00 2 + 2018-10-28 02:00:00+01:00 3 + 2018-10-28 02:30:00+01:00 4 + 2018-10-28 03:00:00+01:00 5 + 2018-10-28 03:30:00+01:00 6 + dtype: int64 + + In some cases, inferring the DST is impossible. In such cases, you can + pass an ndarray to the ambiguous parameter to set the DST explicitly + + >>> s = pd.Series(range(3), + ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', + ... '2018-10-28 02:36:00', + ... '2018-10-28 03:46:00'])) + >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) + 2018-10-28 01:20:00+02:00 0 + 2018-10-28 02:36:00+02:00 1 + 2018-10-28 03:46:00+01:00 2 + dtype: int64 + + If the DST transition causes nonexistent times, you can shift these + dates forward or backward with a timedelta object or `'shift_forward'` + or `'shift_backward'`. + + >>> s = pd.Series(range(2), + ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', + ... '2015-03-29 03:30:00'])) + >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') + 2015-03-29 03:00:00+02:00 0 + 2015-03-29 03:30:00+02:00 1 + dtype: int64 + >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') + 2015-03-29 01:59:59.999999999+01:00 0 + 2015-03-29 03:30:00+02:00 1 + dtype: int64 + >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) + 2015-03-29 03:30:00+02:00 0 + 2015-03-29 03:30:00+02:00 1 + dtype: int64 + """ + nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") + if nonexistent not in nonexistent_options and not isinstance( + nonexistent, dt.timedelta + ): + raise ValueError( + "The nonexistent argument must be one of 'raise', " + "'NaT', 'shift_forward', 'shift_backward' or " + "a timedelta object" + ) + + axis = self._get_axis_number(axis) + ax = self._get_axis(axis) + + def _tz_localize(ax, tz, ambiguous, nonexistent): + if not hasattr(ax, "tz_localize"): + if len(ax) > 0: + ax_name = self._get_axis_name(axis) + raise TypeError( + f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" + ) + ax = DatetimeIndex([], tz=tz) + else: + ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) + return ax + + # if a level is given it must be a MultiIndex level or + # equivalent to the axis name + if isinstance(ax, MultiIndex): + level = ax._get_level_number(level) + new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) + ax = ax.set_levels(new_level, level=level) + else: + if level not in (None, 0, ax.name): + raise ValueError(f"The level {level} is not valid") + ax = _tz_localize(ax, tz, ambiguous, nonexistent) + + result = self.copy(deep=copy and not using_copy_on_write()) + result = result.set_axis(ax, axis=axis, copy=False) + return result.__finalize__(self, method="tz_localize") + + # ---------------------------------------------------------------------- + # Numeric Methods + + @final + def describe( + self, + percentiles=None, + include=None, + exclude=None, + ) -> Self: + """ + Generate descriptive statistics. + + Descriptive statistics include those that summarize the central + tendency, dispersion and shape of a + dataset's distribution, excluding ``NaN`` values. + + Analyzes both numeric and object series, as well + as ``DataFrame`` column sets of mixed data types. The output + will vary depending on what is provided. Refer to the notes + below for more detail. + + Parameters + ---------- + percentiles : list-like of numbers, optional + The percentiles to include in the output. All should + fall between 0 and 1. The default is + ``[.25, .5, .75]``, which returns the 25th, 50th, and + 75th percentiles. + include : 'all', list-like of dtypes or None (default), optional + A white list of data types to include in the result. Ignored + for ``Series``. Here are the options: + + - 'all' : All columns of the input will be included in the output. + - A list-like of dtypes : Limits the results to the + provided data types. + To limit the result to numeric types submit + ``numpy.number``. To limit it instead to object columns submit + the ``numpy.object`` data type. Strings + can also be used in the style of + ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To + select pandas categorical columns, use ``'category'`` + - None (default) : The result will include all numeric columns. + exclude : list-like of dtypes or None (default), optional, + A black list of data types to omit from the result. Ignored + for ``Series``. Here are the options: + + - A list-like of dtypes : Excludes the provided data types + from the result. To exclude numeric types submit + ``numpy.number``. To exclude object columns submit the data + type ``numpy.object``. Strings can also be used in the style of + ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To + exclude pandas categorical columns, use ``'category'`` + - None (default) : The result will exclude nothing. + + Returns + ------- + Series or DataFrame + Summary statistics of the Series or Dataframe provided. + + See Also + -------- + DataFrame.count: Count number of non-NA/null observations. + DataFrame.max: Maximum of the values in the object. + DataFrame.min: Minimum of the values in the object. + DataFrame.mean: Mean of the values. + DataFrame.std: Standard deviation of the observations. + DataFrame.select_dtypes: Subset of a DataFrame including/excluding + columns based on their dtype. + + Notes + ----- + For numeric data, the result's index will include ``count``, + ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and + upper percentiles. By default the lower percentile is ``25`` and the + upper percentile is ``75``. The ``50`` percentile is the + same as the median. + + For object data (e.g. strings or timestamps), the result's index + will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` + is the most common value. The ``freq`` is the most common value's + frequency. Timestamps also include the ``first`` and ``last`` items. + + If multiple object values have the highest count, then the + ``count`` and ``top`` results will be arbitrarily chosen from + among those with the highest count. + + For mixed data types provided via a ``DataFrame``, the default is to + return only an analysis of numeric columns. If the dataframe consists + only of object and categorical data without any numeric columns, the + default is to return an analysis of both the object and categorical + columns. If ``include='all'`` is provided as an option, the result + will include a union of attributes of each type. + + The `include` and `exclude` parameters can be used to limit + which columns in a ``DataFrame`` are analyzed for the output. + The parameters are ignored when analyzing a ``Series``. + + Examples + -------- + Describing a numeric ``Series``. + + >>> s = pd.Series([1, 2, 3]) + >>> s.describe() + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + dtype: float64 + + Describing a categorical ``Series``. + + >>> s = pd.Series(['a', 'a', 'b', 'c']) + >>> s.describe() + count 4 + unique 3 + top a + freq 2 + dtype: object + + Describing a timestamp ``Series``. + + >>> s = pd.Series([ + ... np.datetime64("2000-01-01"), + ... np.datetime64("2010-01-01"), + ... np.datetime64("2010-01-01") + ... ]) + >>> s.describe() + count 3 + mean 2006-09-01 08:00:00 + min 2000-01-01 00:00:00 + 25% 2004-12-31 12:00:00 + 50% 2010-01-01 00:00:00 + 75% 2010-01-01 00:00:00 + max 2010-01-01 00:00:00 + dtype: object + + Describing a ``DataFrame``. By default only numeric fields + are returned. + + >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), + ... 'numeric': [1, 2, 3], + ... 'object': ['a', 'b', 'c'] + ... }) + >>> df.describe() + numeric + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + + Describing all columns of a ``DataFrame`` regardless of data type. + + >>> df.describe(include='all') # doctest: +SKIP + categorical numeric object + count 3 3.0 3 + unique 3 NaN 3 + top f NaN a + freq 1 NaN 1 + mean NaN 2.0 NaN + std NaN 1.0 NaN + min NaN 1.0 NaN + 25% NaN 1.5 NaN + 50% NaN 2.0 NaN + 75% NaN 2.5 NaN + max NaN 3.0 NaN + + Describing a column from a ``DataFrame`` by accessing it as + an attribute. + + >>> df.numeric.describe() + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + Name: numeric, dtype: float64 + + Including only numeric columns in a ``DataFrame`` description. + + >>> df.describe(include=[np.number]) + numeric + count 3.0 + mean 2.0 + std 1.0 + min 1.0 + 25% 1.5 + 50% 2.0 + 75% 2.5 + max 3.0 + + Including only string columns in a ``DataFrame`` description. + + >>> df.describe(include=[object]) # doctest: +SKIP + object + count 3 + unique 3 + top a + freq 1 + + Including only categorical columns from a ``DataFrame`` description. + + >>> df.describe(include=['category']) + categorical + count 3 + unique 3 + top d + freq 1 + + Excluding numeric columns from a ``DataFrame`` description. + + >>> df.describe(exclude=[np.number]) # doctest: +SKIP + categorical object + count 3 3 + unique 3 3 + top f a + freq 1 1 + + Excluding object columns from a ``DataFrame`` description. + + >>> df.describe(exclude=[object]) # doctest: +SKIP + categorical numeric + count 3 3.0 + unique 3 NaN + top f NaN + freq 1 NaN + mean NaN 2.0 + std NaN 1.0 + min NaN 1.0 + 25% NaN 1.5 + 50% NaN 2.0 + 75% NaN 2.5 + max NaN 3.0 + """ + return describe_ndframe( + obj=self, + include=include, + exclude=exclude, + percentiles=percentiles, + ).__finalize__(self, method="describe") + + @final + def pct_change( + self, + periods: int = 1, + fill_method: FillnaOptions | None | lib.NoDefault = lib.no_default, + limit: int | None | lib.NoDefault = lib.no_default, + freq=None, + **kwargs, + ) -> Self: + """ + Fractional change between the current and a prior element. + + Computes the fractional change from the immediately previous row by + default. This is useful in comparing the fraction of change in a time + series of elements. + + .. note:: + + Despite the name of this method, it calculates fractional change + (also known as per unit change or relative change) and not + percentage change. If you need the percentage change, multiply + these values by 100. + + Parameters + ---------- + periods : int, default 1 + Periods to shift for forming percent change. + fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' + How to handle NAs **before** computing percent changes. + + .. deprecated:: 2.1 + All options of `fill_method` are deprecated except `fill_method=None`. + + limit : int, default None + The number of consecutive NAs to fill before stopping. + + .. deprecated:: 2.1 + + freq : DateOffset, timedelta, or str, optional + Increment to use from time series API (e.g. 'M' or BDay()). + **kwargs + Additional keyword arguments are passed into + `DataFrame.shift` or `Series.shift`. + + Returns + ------- + Series or DataFrame + The same type as the calling object. + + See Also + -------- + Series.diff : Compute the difference of two elements in a Series. + DataFrame.diff : Compute the difference of two elements in a DataFrame. + Series.shift : Shift the index by some number of periods. + DataFrame.shift : Shift the index by some number of periods. + + Examples + -------- + **Series** + + >>> s = pd.Series([90, 91, 85]) + >>> s + 0 90 + 1 91 + 2 85 + dtype: int64 + + >>> s.pct_change() + 0 NaN + 1 0.011111 + 2 -0.065934 + dtype: float64 + + >>> s.pct_change(periods=2) + 0 NaN + 1 NaN + 2 -0.055556 + dtype: float64 + + See the percentage change in a Series where filling NAs with last + valid observation forward to next valid. + + >>> s = pd.Series([90, 91, None, 85]) + >>> s + 0 90.0 + 1 91.0 + 2 NaN + 3 85.0 + dtype: float64 + + >>> s.ffill().pct_change() + 0 NaN + 1 0.011111 + 2 0.000000 + 3 -0.065934 + dtype: float64 + + **DataFrame** + + Percentage change in French franc, Deutsche Mark, and Italian lira from + 1980-01-01 to 1980-03-01. + + >>> df = pd.DataFrame({ + ... 'FR': [4.0405, 4.0963, 4.3149], + ... 'GR': [1.7246, 1.7482, 1.8519], + ... 'IT': [804.74, 810.01, 860.13]}, + ... index=['1980-01-01', '1980-02-01', '1980-03-01']) + >>> df + FR GR IT + 1980-01-01 4.0405 1.7246 804.74 + 1980-02-01 4.0963 1.7482 810.01 + 1980-03-01 4.3149 1.8519 860.13 + + >>> df.pct_change() + FR GR IT + 1980-01-01 NaN NaN NaN + 1980-02-01 0.013810 0.013684 0.006549 + 1980-03-01 0.053365 0.059318 0.061876 + + Percentage of change in GOOG and APPL stock volume. Shows computing + the percentage change between columns. + + >>> df = pd.DataFrame({ + ... '2016': [1769950, 30586265], + ... '2015': [1500923, 40912316], + ... '2014': [1371819, 41403351]}, + ... index=['GOOG', 'APPL']) + >>> df + 2016 2015 2014 + GOOG 1769950 1500923 1371819 + APPL 30586265 40912316 41403351 + + >>> df.pct_change(axis='columns', periods=-1) + 2016 2015 2014 + GOOG 0.179241 0.094112 NaN + APPL -0.252395 -0.011860 NaN + """ + # GH#53491 + if fill_method not in (lib.no_default, None) or limit is not lib.no_default: + warnings.warn( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + f"{type(self).__name__}.pct_change are deprecated and will be removed " + "in a future version. Either fill in any non-leading NA values prior " + "to calling pct_change or specify 'fill_method=None' to not fill NA " + "values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if fill_method is lib.no_default: + if limit is lib.no_default: + cols = self.items() if self.ndim == 2 else [(None, self)] + for _, col in cols: + mask = col.isna().values + mask = mask[np.argmax(~mask) :] + if mask.any(): + warnings.warn( + "The default fill_method='pad' in " + f"{type(self).__name__}.pct_change is deprecated and will " + "be removed in a future version. Either fill in any " + "non-leading NA values prior to calling pct_change or " + "specify 'fill_method=None' to not fill NA values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + break + fill_method = "pad" + if limit is lib.no_default: + limit = None + + axis = self._get_axis_number(kwargs.pop("axis", "index")) + if fill_method is None: + data = self + else: + data = self._pad_or_backfill(fill_method, axis=axis, limit=limit) + + shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) + # Unsupported left operand type for / ("Self") + rs = data / shifted - 1 # type: ignore[operator] + if freq is not None: + # Shift method is implemented differently when freq is not None + # We want to restore the original index + rs = rs.loc[~rs.index.duplicated()] + rs = rs.reindex_like(data) + return rs.__finalize__(self, method="pct_change") + + @final + def _logical_func( + self, + name: str, + func, + axis: Axis = 0, + bool_only: bool_t = False, + skipna: bool_t = True, + **kwargs, + ) -> Series | bool_t: + nv.validate_logical_func((), kwargs, fname=name) + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + + if self.ndim > 1 and axis is None: + # Reduce along one dimension then the other, to simplify DataFrame._reduce + res = self._logical_func( + name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs + ) + return res._logical_func(name, func, skipna=skipna, **kwargs) + elif axis is None: + axis = 0 + + if ( + self.ndim > 1 + and axis == 1 + and len(self._mgr.arrays) > 1 + # TODO(EA2D): special-case not needed + and all(x.ndim == 2 for x in self._mgr.arrays) + and not kwargs + ): + # Fastpath avoiding potentially expensive transpose + obj = self + if bool_only: + obj = self._get_bool_data() + return obj._reduce_axis1(name, func, skipna=skipna) + + return self._reduce( + func, + name=name, + axis=axis, + skipna=skipna, + numeric_only=bool_only, + filter_type="bool", + ) + + def any( + self, + axis: Axis = 0, + bool_only: bool_t = False, + skipna: bool_t = True, + **kwargs, + ) -> Series | bool_t: + return self._logical_func( + "any", nanops.nanany, axis, bool_only, skipna, **kwargs + ) + + def all( + self, + axis: Axis = 0, + bool_only: bool_t = False, + skipna: bool_t = True, + **kwargs, + ) -> Series | bool_t: + return self._logical_func( + "all", nanops.nanall, axis, bool_only, skipna, **kwargs + ) + + @final + def _accum_func( + self, + name: str, + func, + axis: Axis | None = None, + skipna: bool_t = True, + *args, + **kwargs, + ): + skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) + if axis is None: + axis = 0 + else: + axis = self._get_axis_number(axis) + + if axis == 1: + return self.T._accum_func( + name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 + ).T + + def block_accum_func(blk_values): + values = blk_values.T if hasattr(blk_values, "T") else blk_values + + result: np.ndarray | ExtensionArray + if isinstance(values, ExtensionArray): + result = values._accumulate(name, skipna=skipna, **kwargs) + else: + result = nanops.na_accum_func(values, func, skipna=skipna) + + result = result.T if hasattr(result, "T") else result + return result + + result = self._mgr.apply(block_accum_func) + + return self._constructor_from_mgr(result, axes=result.axes).__finalize__( + self, method=name + ) + + def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): + return self._accum_func( + "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs + ) + + def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): + return self._accum_func( + "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs + ) + + def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): + return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) + + def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): + return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) + + @final + def _stat_function_ddof( + self, + name: str, + func, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool_t = True, + ddof: int = 1, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + nv.validate_stat_ddof_func((), kwargs, fname=name) + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + + if axis is None: + if self.ndim > 1: + warnings.warn( + f"The behavior of {type(self).__name__}.{name} with axis=None " + "is deprecated, in a future version this will reduce over both " + "axes and return a scalar. To retain the old behavior, pass " + "axis=0 (or do not pass axis)", + FutureWarning, + stacklevel=find_stack_level(), + ) + axis = 0 + elif axis is lib.no_default: + axis = 0 + + return self._reduce( + func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof + ) + + def sem( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + ddof: int = 1, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + return self._stat_function_ddof( + "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs + ) + + def var( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + ddof: int = 1, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + return self._stat_function_ddof( + "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs + ) + + def std( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + ddof: int = 1, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + return self._stat_function_ddof( + "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs + ) + + @final + def _stat_function( + self, + name: str, + func, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + **kwargs, + ): + assert name in ["median", "mean", "min", "max", "kurt", "skew"], name + nv.validate_func(name, (), kwargs) + + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + + return self._reduce( + func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only + ) + + def min( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + **kwargs, + ): + return self._stat_function( + "min", + nanops.nanmin, + axis, + skipna, + numeric_only, + **kwargs, + ) + + def max( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + **kwargs, + ): + return self._stat_function( + "max", + nanops.nanmax, + axis, + skipna, + numeric_only, + **kwargs, + ) + + def mean( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + return self._stat_function( + "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs + ) + + def median( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + return self._stat_function( + "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs + ) + + def skew( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + return self._stat_function( + "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs + ) + + def kurt( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + **kwargs, + ) -> Series | float: + return self._stat_function( + "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs + ) + + kurtosis = kurt + + @final + def _min_count_stat_function( + self, + name: str, + func, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool_t = True, + numeric_only: bool_t = False, + min_count: int = 0, + **kwargs, + ): + assert name in ["sum", "prod"], name + nv.validate_func(name, (), kwargs) + + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + + if axis is None: + if self.ndim > 1: + warnings.warn( + f"The behavior of {type(self).__name__}.{name} with axis=None " + "is deprecated, in a future version this will reduce over both " + "axes and return a scalar. To retain the old behavior, pass " + "axis=0 (or do not pass axis)", + FutureWarning, + stacklevel=find_stack_level(), + ) + axis = 0 + elif axis is lib.no_default: + axis = 0 + + return self._reduce( + func, + name=name, + axis=axis, + skipna=skipna, + numeric_only=numeric_only, + min_count=min_count, + ) + + def sum( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + min_count: int = 0, + **kwargs, + ): + return self._min_count_stat_function( + "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs + ) + + def prod( + self, + axis: Axis | None = 0, + skipna: bool_t = True, + numeric_only: bool_t = False, + min_count: int = 0, + **kwargs, + ): + return self._min_count_stat_function( + "prod", + nanops.nanprod, + axis, + skipna, + numeric_only, + min_count, + **kwargs, + ) + + product = prod + + @final + @doc(Rolling) + def rolling( + self, + window: int | dt.timedelta | str | BaseOffset | BaseIndexer, + min_periods: int | None = None, + center: bool_t = False, + win_type: str | None = None, + on: str | None = None, + axis: Axis | lib.NoDefault = lib.no_default, + closed: IntervalClosedType | None = None, + step: int | None = None, + method: str = "single", + ) -> Window | Rolling: + if axis is not lib.no_default: + axis = self._get_axis_number(axis) + name = "rolling" + if axis == 1: + warnings.warn( + f"Support for axis=1 in {type(self).__name__}.{name} is " + "deprecated and will be removed in a future version. " + f"Use obj.T.{name}(...) instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + f"The 'axis' keyword in {type(self).__name__}.{name} is " + "deprecated and will be removed in a future version. " + "Call the method without the axis keyword instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + + if win_type is not None: + return Window( + self, + window=window, + min_periods=min_periods, + center=center, + win_type=win_type, + on=on, + axis=axis, + closed=closed, + step=step, + method=method, + ) + + return Rolling( + self, + window=window, + min_periods=min_periods, + center=center, + win_type=win_type, + on=on, + axis=axis, + closed=closed, + step=step, + method=method, + ) + + @final + @doc(Expanding) + def expanding( + self, + min_periods: int = 1, + axis: Axis | lib.NoDefault = lib.no_default, + method: Literal["single", "table"] = "single", + ) -> Expanding: + if axis is not lib.no_default: + axis = self._get_axis_number(axis) + name = "expanding" + if axis == 1: + warnings.warn( + f"Support for axis=1 in {type(self).__name__}.{name} is " + "deprecated and will be removed in a future version. " + f"Use obj.T.{name}(...) instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + f"The 'axis' keyword in {type(self).__name__}.{name} is " + "deprecated and will be removed in a future version. " + "Call the method without the axis keyword instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + return Expanding(self, min_periods=min_periods, axis=axis, method=method) + + @final + @doc(ExponentialMovingWindow) + def ewm( + self, + com: float | None = None, + span: float | None = None, + halflife: float | TimedeltaConvertibleTypes | None = None, + alpha: float | None = None, + min_periods: int | None = 0, + adjust: bool_t = True, + ignore_na: bool_t = False, + axis: Axis | lib.NoDefault = lib.no_default, + times: np.ndarray | DataFrame | Series | None = None, + method: Literal["single", "table"] = "single", + ) -> ExponentialMovingWindow: + if axis is not lib.no_default: + axis = self._get_axis_number(axis) + name = "ewm" + if axis == 1: + warnings.warn( + f"Support for axis=1 in {type(self).__name__}.{name} is " + "deprecated and will be removed in a future version. " + f"Use obj.T.{name}(...) instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + f"The 'axis' keyword in {type(self).__name__}.{name} is " + "deprecated and will be removed in a future version. " + "Call the method without the axis keyword instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + + return ExponentialMovingWindow( + self, + com=com, + span=span, + halflife=halflife, + alpha=alpha, + min_periods=min_periods, + adjust=adjust, + ignore_na=ignore_na, + axis=axis, + times=times, + method=method, + ) + + # ---------------------------------------------------------------------- + # Arithmetic Methods + + @final + def _inplace_method(self, other, op) -> Self: + """ + Wrap arithmetic method to operate inplace. + """ + result = op(self, other) + + if self.ndim == 1 and result._indexed_same(self) and result.dtype == self.dtype: + # GH#36498 this inplace op can _actually_ be inplace. + # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, + # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" + self._mgr.setitem_inplace( # type: ignore[union-attr] + slice(None), result._values + ) + return self + + # Delete cacher + self._reset_cacher() + + # this makes sure that we are aligned like the input + # we are updating inplace so we want to ignore is_copy + self._update_inplace( + result.reindex_like(self, copy=False), verify_is_copy=False + ) + return self + + @final + def __iadd__(self, other) -> Self: + # error: Unsupported left operand type for + ("Type[NDFrame]") + return self._inplace_method(other, type(self).__add__) # type: ignore[operator] + + @final + def __isub__(self, other) -> Self: + # error: Unsupported left operand type for - ("Type[NDFrame]") + return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] + + @final + def __imul__(self, other) -> Self: + # error: Unsupported left operand type for * ("Type[NDFrame]") + return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] + + @final + def __itruediv__(self, other) -> Self: + # error: Unsupported left operand type for / ("Type[NDFrame]") + return self._inplace_method( + other, type(self).__truediv__ # type: ignore[operator] + ) + + @final + def __ifloordiv__(self, other) -> Self: + # error: Unsupported left operand type for // ("Type[NDFrame]") + return self._inplace_method( + other, type(self).__floordiv__ # type: ignore[operator] + ) + + @final + def __imod__(self, other) -> Self: + # error: Unsupported left operand type for % ("Type[NDFrame]") + return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] + + @final + def __ipow__(self, other) -> Self: + # error: Unsupported left operand type for ** ("Type[NDFrame]") + return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] + + @final + def __iand__(self, other) -> Self: + # error: Unsupported left operand type for & ("Type[NDFrame]") + return self._inplace_method(other, type(self).__and__) # type: ignore[operator] + + @final + def __ior__(self, other) -> Self: + return self._inplace_method(other, type(self).__or__) + + @final + def __ixor__(self, other) -> Self: + # error: Unsupported left operand type for ^ ("Type[NDFrame]") + return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] + + # ---------------------------------------------------------------------- + # Misc methods + + @final + def _find_valid_index(self, *, how: str) -> Hashable | None: + """ + Retrieves the index of the first valid value. + + Parameters + ---------- + how : {'first', 'last'} + Use this parameter to change between the first or last valid index. + + Returns + ------- + idx_first_valid : type of index + """ + is_valid = self.notna().values + idxpos = find_valid_index(how=how, is_valid=is_valid) + if idxpos is None: + return None + return self.index[idxpos] + + @final + @doc(position="first", klass=_shared_doc_kwargs["klass"]) + def first_valid_index(self) -> Hashable | None: + """ + Return index for {position} non-NA value or None, if no non-NA value is found. + + Returns + ------- + type of index + + Notes + ----- + If all elements are non-NA/null, returns None. + Also returns None for empty {klass}. + + Examples + -------- + For Series: + + >>> s = pd.Series([None, 3, 4]) + >>> s.first_valid_index() + 1 + >>> s.last_valid_index() + 2 + + For DataFrame: + + >>> df = pd.DataFrame({{'A': [None, None, 2], 'B': [None, 3, 4]}}) + >>> df + A B + 0 NaN NaN + 1 NaN 3.0 + 2 2.0 4.0 + >>> df.first_valid_index() + 1 + >>> df.last_valid_index() + 2 + """ + return self._find_valid_index(how="first") + + @final + @doc(first_valid_index, position="last", klass=_shared_doc_kwargs["klass"]) + def last_valid_index(self) -> Hashable | None: + return self._find_valid_index(how="last") + + +_num_doc = """ +{desc} + +Parameters +---------- +axis : {axis_descr} + Axis for the function to be applied on. + For `Series` this parameter is unused and defaults to 0. + + For DataFrames, specifying ``axis=None`` will apply the aggregation + across both axes. + + .. versionadded:: 2.0.0 + +skipna : bool, default True + Exclude NA/null values when computing the result. +numeric_only : bool, default False + Include only float, int, boolean columns. Not implemented for Series. + +{min_count}\ +**kwargs + Additional keyword arguments to be passed to the function. + +Returns +------- +{name1} or scalar\ +{see_also}\ +{examples} +""" + +_num_ddof_doc = """ +{desc} + +Parameters +---------- +axis : {axis_descr} + For `Series` this parameter is unused and defaults to 0. +skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. +ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations is N - ddof, + where N represents the number of elements. +numeric_only : bool, default False + Include only float, int, boolean columns. Not implemented for Series. + +Returns +------- +{name1} or {name2} (if level specified) \ +{notes}\ +{examples} +""" + +_std_notes = """ + +Notes +----- +To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the +default `ddof=1`)""" + +_std_examples = """ + +Examples +-------- +>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], +... 'age': [21, 25, 62, 43], +... 'height': [1.61, 1.87, 1.49, 2.01]} +... ).set_index('person_id') +>>> df + age height +person_id +0 21 1.61 +1 25 1.87 +2 62 1.49 +3 43 2.01 + +The standard deviation of the columns can be found as follows: + +>>> df.std() +age 18.786076 +height 0.237417 +dtype: float64 + +Alternatively, `ddof=0` can be set to normalize by N instead of N-1: + +>>> df.std(ddof=0) +age 16.269219 +height 0.205609 +dtype: float64""" + +_var_examples = """ + +Examples +-------- +>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], +... 'age': [21, 25, 62, 43], +... 'height': [1.61, 1.87, 1.49, 2.01]} +... ).set_index('person_id') +>>> df + age height +person_id +0 21 1.61 +1 25 1.87 +2 62 1.49 +3 43 2.01 + +>>> df.var() +age 352.916667 +height 0.056367 +dtype: float64 + +Alternatively, ``ddof=0`` can be set to normalize by N instead of N-1: + +>>> df.var(ddof=0) +age 264.687500 +height 0.042275 +dtype: float64""" + +_bool_doc = """ +{desc} + +Parameters +---------- +axis : {{0 or 'index', 1 or 'columns', None}}, default 0 + Indicate which axis or axes should be reduced. For `Series` this parameter + is unused and defaults to 0. + + * 0 / 'index' : reduce the index, return a Series whose index is the + original column labels. + * 1 / 'columns' : reduce the columns, return a Series whose index is the + original index. + * None : reduce all axes, return a scalar. + +bool_only : bool, default False + Include only boolean columns. Not implemented for Series. +skipna : bool, default True + Exclude NA/null values. If the entire row/column is NA and skipna is + True, then the result will be {empty_value}, as for an empty row/column. + If skipna is False, then NA are treated as True, because these are not + equal to zero. +**kwargs : any, default None + Additional keywords have no effect but might be accepted for + compatibility with NumPy. + +Returns +------- +{name1} or {name2} + If level is specified, then, {name2} is returned; otherwise, {name1} + is returned. + +{see_also} +{examples}""" + +_all_desc = """\ +Return whether all elements are True, potentially over an axis. + +Returns True unless there at least one element within a series or +along a Dataframe axis that is False or equivalent (e.g. zero or +empty).""" + +_all_examples = """\ +Examples +-------- +**Series** + +>>> pd.Series([True, True]).all() +True +>>> pd.Series([True, False]).all() +False +>>> pd.Series([], dtype="float64").all() +True +>>> pd.Series([np.nan]).all() +True +>>> pd.Series([np.nan]).all(skipna=False) +True + +**DataFrames** + +Create a dataframe from a dictionary. + +>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]}) +>>> df + col1 col2 +0 True True +1 True False + +Default behaviour checks if values in each column all return True. + +>>> df.all() +col1 True +col2 False +dtype: bool + +Specify ``axis='columns'`` to check if values in each row all return True. + +>>> df.all(axis='columns') +0 True +1 False +dtype: bool + +Or ``axis=None`` for whether every value is True. + +>>> df.all(axis=None) +False +""" + +_all_see_also = """\ +See Also +-------- +Series.all : Return True if all elements are True. +DataFrame.any : Return True if one (or more) elements are True. +""" + +_cnum_doc = """ +Return cumulative {desc} over a DataFrame or Series axis. + +Returns a DataFrame or Series of the same size containing the cumulative +{desc}. + +Parameters +---------- +axis : {{0 or 'index', 1 or 'columns'}}, default 0 + The index or the name of the axis. 0 is equivalent to None or 'index'. + For `Series` this parameter is unused and defaults to 0. +skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. +*args, **kwargs + Additional keywords have no effect but might be accepted for + compatibility with NumPy. + +Returns +------- +{name1} or {name2} + Return cumulative {desc} of {name1} or {name2}. + +See Also +-------- +core.window.expanding.Expanding.{accum_func_name} : Similar functionality + but ignores ``NaN`` values. +{name2}.{accum_func_name} : Return the {desc} over + {name2} axis. +{name2}.cummax : Return cumulative maximum over {name2} axis. +{name2}.cummin : Return cumulative minimum over {name2} axis. +{name2}.cumsum : Return cumulative sum over {name2} axis. +{name2}.cumprod : Return cumulative product over {name2} axis. + +{examples}""" + +_cummin_examples = """\ +Examples +-------- +**Series** + +>>> s = pd.Series([2, np.nan, 5, -1, 0]) +>>> s +0 2.0 +1 NaN +2 5.0 +3 -1.0 +4 0.0 +dtype: float64 + +By default, NA values are ignored. + +>>> s.cummin() +0 2.0 +1 NaN +2 2.0 +3 -1.0 +4 -1.0 +dtype: float64 + +To include NA values in the operation, use ``skipna=False`` + +>>> s.cummin(skipna=False) +0 2.0 +1 NaN +2 NaN +3 NaN +4 NaN +dtype: float64 + +**DataFrame** + +>>> df = pd.DataFrame([[2.0, 1.0], +... [3.0, np.nan], +... [1.0, 0.0]], +... columns=list('AB')) +>>> df + A B +0 2.0 1.0 +1 3.0 NaN +2 1.0 0.0 + +By default, iterates over rows and finds the minimum +in each column. This is equivalent to ``axis=None`` or ``axis='index'``. + +>>> df.cummin() + A B +0 2.0 1.0 +1 2.0 NaN +2 1.0 0.0 + +To iterate over columns and find the minimum in each row, +use ``axis=1`` + +>>> df.cummin(axis=1) + A B +0 2.0 1.0 +1 3.0 NaN +2 1.0 0.0 +""" + +_cumsum_examples = """\ +Examples +-------- +**Series** + +>>> s = pd.Series([2, np.nan, 5, -1, 0]) +>>> s +0 2.0 +1 NaN +2 5.0 +3 -1.0 +4 0.0 +dtype: float64 + +By default, NA values are ignored. + +>>> s.cumsum() +0 2.0 +1 NaN +2 7.0 +3 6.0 +4 6.0 +dtype: float64 + +To include NA values in the operation, use ``skipna=False`` + +>>> s.cumsum(skipna=False) +0 2.0 +1 NaN +2 NaN +3 NaN +4 NaN +dtype: float64 + +**DataFrame** + +>>> df = pd.DataFrame([[2.0, 1.0], +... [3.0, np.nan], +... [1.0, 0.0]], +... columns=list('AB')) +>>> df + A B +0 2.0 1.0 +1 3.0 NaN +2 1.0 0.0 + +By default, iterates over rows and finds the sum +in each column. This is equivalent to ``axis=None`` or ``axis='index'``. + +>>> df.cumsum() + A B +0 2.0 1.0 +1 5.0 NaN +2 6.0 1.0 + +To iterate over columns and find the sum in each row, +use ``axis=1`` + +>>> df.cumsum(axis=1) + A B +0 2.0 3.0 +1 3.0 NaN +2 1.0 1.0 +""" + +_cumprod_examples = """\ +Examples +-------- +**Series** + +>>> s = pd.Series([2, np.nan, 5, -1, 0]) +>>> s +0 2.0 +1 NaN +2 5.0 +3 -1.0 +4 0.0 +dtype: float64 + +By default, NA values are ignored. + +>>> s.cumprod() +0 2.0 +1 NaN +2 10.0 +3 -10.0 +4 -0.0 +dtype: float64 + +To include NA values in the operation, use ``skipna=False`` + +>>> s.cumprod(skipna=False) +0 2.0 +1 NaN +2 NaN +3 NaN +4 NaN +dtype: float64 + +**DataFrame** + +>>> df = pd.DataFrame([[2.0, 1.0], +... [3.0, np.nan], +... [1.0, 0.0]], +... columns=list('AB')) +>>> df + A B +0 2.0 1.0 +1 3.0 NaN +2 1.0 0.0 + +By default, iterates over rows and finds the product +in each column. This is equivalent to ``axis=None`` or ``axis='index'``. + +>>> df.cumprod() + A B +0 2.0 1.0 +1 6.0 NaN +2 6.0 0.0 + +To iterate over columns and find the product in each row, +use ``axis=1`` + +>>> df.cumprod(axis=1) + A B +0 2.0 2.0 +1 3.0 NaN +2 1.0 0.0 +""" + +_cummax_examples = """\ +Examples +-------- +**Series** + +>>> s = pd.Series([2, np.nan, 5, -1, 0]) +>>> s +0 2.0 +1 NaN +2 5.0 +3 -1.0 +4 0.0 +dtype: float64 + +By default, NA values are ignored. + +>>> s.cummax() +0 2.0 +1 NaN +2 5.0 +3 5.0 +4 5.0 +dtype: float64 + +To include NA values in the operation, use ``skipna=False`` + +>>> s.cummax(skipna=False) +0 2.0 +1 NaN +2 NaN +3 NaN +4 NaN +dtype: float64 + +**DataFrame** + +>>> df = pd.DataFrame([[2.0, 1.0], +... [3.0, np.nan], +... [1.0, 0.0]], +... columns=list('AB')) +>>> df + A B +0 2.0 1.0 +1 3.0 NaN +2 1.0 0.0 + +By default, iterates over rows and finds the maximum +in each column. This is equivalent to ``axis=None`` or ``axis='index'``. + +>>> df.cummax() + A B +0 2.0 1.0 +1 3.0 NaN +2 3.0 1.0 + +To iterate over columns and find the maximum in each row, +use ``axis=1`` + +>>> df.cummax(axis=1) + A B +0 2.0 2.0 +1 3.0 NaN +2 1.0 1.0 +""" + +_any_see_also = """\ +See Also +-------- +numpy.any : Numpy version of this method. +Series.any : Return whether any element is True. +Series.all : Return whether all elements are True. +DataFrame.any : Return whether any element is True over requested axis. +DataFrame.all : Return whether all elements are True over requested axis. +""" + +_any_desc = """\ +Return whether any element is True, potentially over an axis. + +Returns False unless there is at least one element within a series or +along a Dataframe axis that is True or equivalent (e.g. non-zero or +non-empty).""" + +_any_examples = """\ +Examples +-------- +**Series** + +For Series input, the output is a scalar indicating whether any element +is True. + +>>> pd.Series([False, False]).any() +False +>>> pd.Series([True, False]).any() +True +>>> pd.Series([], dtype="float64").any() +False +>>> pd.Series([np.nan]).any() +False +>>> pd.Series([np.nan]).any(skipna=False) +True + +**DataFrame** + +Whether each column contains at least one True element (the default). + +>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]}) +>>> df + A B C +0 1 0 0 +1 2 2 0 + +>>> df.any() +A True +B True +C False +dtype: bool + +Aggregating over the columns. + +>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]}) +>>> df + A B +0 True 1 +1 False 2 + +>>> df.any(axis='columns') +0 True +1 True +dtype: bool + +>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]}) +>>> df + A B +0 True 1 +1 False 0 + +>>> df.any(axis='columns') +0 True +1 False +dtype: bool + +Aggregating over the entire DataFrame with ``axis=None``. + +>>> df.any(axis=None) +True + +`any` for an empty DataFrame is an empty Series. + +>>> pd.DataFrame([]).any() +Series([], dtype: bool) +""" + +_shared_docs[ + "stat_func_example" +] = """ + +Examples +-------- +>>> idx = pd.MultiIndex.from_arrays([ +... ['warm', 'warm', 'cold', 'cold'], +... ['dog', 'falcon', 'fish', 'spider']], +... names=['blooded', 'animal']) +>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx) +>>> s +blooded animal +warm dog 4 + falcon 2 +cold fish 0 + spider 8 +Name: legs, dtype: int64 + +>>> s.{stat_func}() +{default_output}""" + +_sum_examples = _shared_docs["stat_func_example"].format( + stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8 +) + +_sum_examples += """ + +By default, the sum of an empty or all-NA Series is ``0``. + +>>> pd.Series([], dtype="float64").sum() # min_count=0 is the default +0.0 + +This can be controlled with the ``min_count`` parameter. For example, if +you'd like the sum of an empty series to be NaN, pass ``min_count=1``. + +>>> pd.Series([], dtype="float64").sum(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).sum() +0.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan""" + +_max_examples: str = _shared_docs["stat_func_example"].format( + stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8 +) + +_min_examples: str = _shared_docs["stat_func_example"].format( + stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0 +) + +_stat_func_see_also = """ + +See Also +-------- +Series.sum : Return the sum. +Series.min : Return the minimum. +Series.max : Return the maximum. +Series.idxmin : Return the index of the minimum. +Series.idxmax : Return the index of the maximum. +DataFrame.sum : Return the sum over the requested axis. +DataFrame.min : Return the minimum over the requested axis. +DataFrame.max : Return the maximum over the requested axis. +DataFrame.idxmin : Return the index of the minimum over the requested axis. +DataFrame.idxmax : Return the index of the maximum over the requested axis.""" + +_prod_examples = """ + +Examples +-------- +By default, the product of an empty or all-NA Series is ``1`` + +>>> pd.Series([], dtype="float64").prod() +1.0 + +This can be controlled with the ``min_count`` parameter + +>>> pd.Series([], dtype="float64").prod(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).prod() +1.0 + +>>> pd.Series([np.nan]).prod(min_count=1) +nan""" + +_min_count_stub = """\ +min_count : int, default 0 + The required number of valid values to perform the operation. If fewer than + ``min_count`` non-NA values are present the result will be NA. +""" + + +def make_doc(name: str, ndim: int) -> str: + """ + Generate the docstring for a Series/DataFrame reduction. + """ + if ndim == 1: + name1 = "scalar" + name2 = "Series" + axis_descr = "{index (0)}" + else: + name1 = "Series" + name2 = "DataFrame" + axis_descr = "{index (0), columns (1)}" + + if name == "any": + base_doc = _bool_doc + desc = _any_desc + see_also = _any_see_also + examples = _any_examples + kwargs = {"empty_value": "False"} + elif name == "all": + base_doc = _bool_doc + desc = _all_desc + see_also = _all_see_also + examples = _all_examples + kwargs = {"empty_value": "True"} + elif name == "min": + base_doc = _num_doc + desc = ( + "Return the minimum of the values over the requested axis.\n\n" + "If you want the *index* of the minimum, use ``idxmin``. This is " + "the equivalent of the ``numpy.ndarray`` method ``argmin``." + ) + see_also = _stat_func_see_also + examples = _min_examples + kwargs = {"min_count": ""} + elif name == "max": + base_doc = _num_doc + desc = ( + "Return the maximum of the values over the requested axis.\n\n" + "If you want the *index* of the maximum, use ``idxmax``. This is " + "the equivalent of the ``numpy.ndarray`` method ``argmax``." + ) + see_also = _stat_func_see_also + examples = _max_examples + kwargs = {"min_count": ""} + + elif name == "sum": + base_doc = _num_doc + desc = ( + "Return the sum of the values over the requested axis.\n\n" + "This is equivalent to the method ``numpy.sum``." + ) + see_also = _stat_func_see_also + examples = _sum_examples + kwargs = {"min_count": _min_count_stub} + + elif name == "prod": + base_doc = _num_doc + desc = "Return the product of the values over the requested axis." + see_also = _stat_func_see_also + examples = _prod_examples + kwargs = {"min_count": _min_count_stub} + + elif name == "median": + base_doc = _num_doc + desc = "Return the median of the values over the requested axis." + see_also = "" + examples = """ + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.median() + 2.0 + + With a DataFrame + + >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra']) + >>> df + a b + tiger 1 2 + zebra 2 3 + >>> df.median() + a 1.5 + b 2.5 + dtype: float64 + + Using axis=1 + + >>> df.median(axis=1) + tiger 1.5 + zebra 2.5 + dtype: float64 + + In this case, `numeric_only` should be set to `True` + to avoid getting an error. + + >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']}, + ... index=['tiger', 'zebra']) + >>> df.median(numeric_only=True) + a 1.5 + dtype: float64""" + kwargs = {"min_count": ""} + + elif name == "mean": + base_doc = _num_doc + desc = "Return the mean of the values over the requested axis." + see_also = "" + examples = """ + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.mean() + 2.0 + + With a DataFrame + + >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra']) + >>> df + a b + tiger 1 2 + zebra 2 3 + >>> df.mean() + a 1.5 + b 2.5 + dtype: float64 + + Using axis=1 + + >>> df.mean(axis=1) + tiger 1.5 + zebra 2.5 + dtype: float64 + + In this case, `numeric_only` should be set to `True` to avoid + getting an error. + + >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']}, + ... index=['tiger', 'zebra']) + >>> df.mean(numeric_only=True) + a 1.5 + dtype: float64""" + kwargs = {"min_count": ""} + + elif name == "var": + base_doc = _num_ddof_doc + desc = ( + "Return unbiased variance over requested axis.\n\nNormalized by " + "N-1 by default. This can be changed using the ddof argument." + ) + examples = _var_examples + see_also = "" + kwargs = {"notes": ""} + + elif name == "std": + base_doc = _num_ddof_doc + desc = ( + "Return sample standard deviation over requested axis." + "\n\nNormalized by N-1 by default. This can be changed using the " + "ddof argument." + ) + examples = _std_examples + see_also = "" + kwargs = {"notes": _std_notes} + + elif name == "sem": + base_doc = _num_ddof_doc + desc = ( + "Return unbiased standard error of the mean over requested " + "axis.\n\nNormalized by N-1 by default. This can be changed " + "using the ddof argument" + ) + examples = """ + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.sem().round(6) + 0.57735 + + With a DataFrame + + >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra']) + >>> df + a b + tiger 1 2 + zebra 2 3 + >>> df.sem() + a 0.5 + b 0.5 + dtype: float64 + + Using axis=1 + + >>> df.sem(axis=1) + tiger 0.5 + zebra 0.5 + dtype: float64 + + In this case, `numeric_only` should be set to `True` + to avoid getting an error. + + >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']}, + ... index=['tiger', 'zebra']) + >>> df.sem(numeric_only=True) + a 0.5 + dtype: float64""" + see_also = "" + kwargs = {"notes": ""} + + elif name == "skew": + base_doc = _num_doc + desc = "Return unbiased skew over requested axis.\n\nNormalized by N-1." + see_also = "" + examples = """ + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.skew() + 0.0 + + With a DataFrame + + >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4], 'c': [1, 3, 5]}, + ... index=['tiger', 'zebra', 'cow']) + >>> df + a b c + tiger 1 2 1 + zebra 2 3 3 + cow 3 4 5 + >>> df.skew() + a 0.0 + b 0.0 + c 0.0 + dtype: float64 + + Using axis=1 + + >>> df.skew(axis=1) + tiger 1.732051 + zebra -1.732051 + cow 0.000000 + dtype: float64 + + In this case, `numeric_only` should be set to `True` to avoid + getting an error. + + >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': ['T', 'Z', 'X']}, + ... index=['tiger', 'zebra', 'cow']) + >>> df.skew(numeric_only=True) + a 0.0 + dtype: float64""" + kwargs = {"min_count": ""} + elif name == "kurt": + base_doc = _num_doc + desc = ( + "Return unbiased kurtosis over requested axis.\n\n" + "Kurtosis obtained using Fisher's definition of\n" + "kurtosis (kurtosis of normal == 0.0). Normalized " + "by N-1." + ) + see_also = "" + examples = """ + + Examples + -------- + >>> s = pd.Series([1, 2, 2, 3], index=['cat', 'dog', 'dog', 'mouse']) + >>> s + cat 1 + dog 2 + dog 2 + mouse 3 + dtype: int64 + >>> s.kurt() + 1.5 + + With a DataFrame + + >>> df = pd.DataFrame({'a': [1, 2, 2, 3], 'b': [3, 4, 4, 4]}, + ... index=['cat', 'dog', 'dog', 'mouse']) + >>> df + a b + cat 1 3 + dog 2 4 + dog 2 4 + mouse 3 4 + >>> df.kurt() + a 1.5 + b 4.0 + dtype: float64 + + With axis=None + + >>> df.kurt(axis=None).round(6) + -0.988693 + + Using axis=1 + + >>> df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [3, 4], 'd': [1, 2]}, + ... index=['cat', 'dog']) + >>> df.kurt(axis=1) + cat -6.0 + dog -6.0 + dtype: float64""" + kwargs = {"min_count": ""} + + elif name == "cumsum": + base_doc = _cnum_doc + desc = "sum" + see_also = "" + examples = _cumsum_examples + kwargs = {"accum_func_name": "sum"} + + elif name == "cumprod": + base_doc = _cnum_doc + desc = "product" + see_also = "" + examples = _cumprod_examples + kwargs = {"accum_func_name": "prod"} + + elif name == "cummin": + base_doc = _cnum_doc + desc = "minimum" + see_also = "" + examples = _cummin_examples + kwargs = {"accum_func_name": "min"} + + elif name == "cummax": + base_doc = _cnum_doc + desc = "maximum" + see_also = "" + examples = _cummax_examples + kwargs = {"accum_func_name": "max"} + + else: + raise NotImplementedError + + docstr = base_doc.format( + desc=desc, + name1=name1, + name2=name2, + axis_descr=axis_descr, + see_also=see_also, + examples=examples, + **kwargs, + ) + return docstr diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/__init__.py new file mode 100644 index 00000000..8248f378 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/__init__.py @@ -0,0 +1,15 @@ +from pandas.core.groupby.generic import ( + DataFrameGroupBy, + NamedAgg, + SeriesGroupBy, +) +from pandas.core.groupby.groupby import GroupBy +from pandas.core.groupby.grouper import Grouper + +__all__ = [ + "DataFrameGroupBy", + "NamedAgg", + "SeriesGroupBy", + "GroupBy", + "Grouper", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/base.py new file mode 100644 index 00000000..a4435973 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/base.py @@ -0,0 +1,121 @@ +""" +Provide basic components for groupby. +""" +from __future__ import annotations + +import dataclasses +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Hashable + + +@dataclasses.dataclass(order=True, frozen=True) +class OutputKey: + label: Hashable + position: int + + +# special case to prevent duplicate plots when catching exceptions when +# forwarding methods from NDFrames +plotting_methods = frozenset(["plot", "hist"]) + +# cythonized transformations or canned "agg+broadcast", which do not +# require postprocessing of the result by transform. +cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"]) + +# List of aggregation/reduction functions. +# These map each group to a single numeric value +reduction_kernels = frozenset( + [ + "all", + "any", + "corrwith", + "count", + "first", + "idxmax", + "idxmin", + "last", + "max", + "mean", + "median", + "min", + "nunique", + "prod", + # as long as `quantile`'s signature accepts only + # a single quantile value, it's a reduction. + # GH#27526 might change that. + "quantile", + "sem", + "size", + "skew", + "std", + "sum", + "var", + ] +) + +# List of transformation functions. +# a transformation is a function that, for each group, +# produces a result that has the same shape as the group. + + +transformation_kernels = frozenset( + [ + "bfill", + "cumcount", + "cummax", + "cummin", + "cumprod", + "cumsum", + "diff", + "ffill", + "fillna", + "ngroup", + "pct_change", + "rank", + "shift", + ] +) + +# these are all the public methods on Grouper which don't belong +# in either of the above lists +groupby_other_methods = frozenset( + [ + "agg", + "aggregate", + "apply", + "boxplot", + # corr and cov return ngroups*ncolumns rows, so they + # are neither a transformation nor a reduction + "corr", + "cov", + "describe", + "dtypes", + "expanding", + "ewm", + "filter", + "get_group", + "groups", + "head", + "hist", + "indices", + "ndim", + "ngroups", + "nth", + "ohlc", + "pipe", + "plot", + "resample", + "rolling", + "tail", + "take", + "transform", + "sample", + "value_counts", + ] +) +# Valid values of `name` for `groupby.transform(name)` +# NOTE: do NOT edit this directly. New additions should be inserted +# into the appropriate list above. +transform_kernel_allowlist = reduction_kernels | transformation_kernels diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/categorical.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/categorical.py new file mode 100644 index 00000000..6ab98cf4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/categorical.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import numpy as np + +from pandas.core.algorithms import unique1d +from pandas.core.arrays.categorical import ( + Categorical, + CategoricalDtype, + recode_for_categories, +) + + +def recode_for_groupby( + c: Categorical, sort: bool, observed: bool +) -> tuple[Categorical, Categorical | None]: + """ + Code the categories to ensure we can groupby for categoricals. + + If observed=True, we return a new Categorical with the observed + categories only. + + If sort=False, return a copy of self, coded with categories as + returned by .unique(), followed by any categories not appearing in + the data. If sort=True, return self. + + This method is needed solely to ensure the categorical index of the + GroupBy result has categories in the order of appearance in the data + (GH-8868). + + Parameters + ---------- + c : Categorical + sort : bool + The value of the sort parameter groupby was called with. + observed : bool + Account only for the observed values + + Returns + ------- + Categorical + If sort=False, the new categories are set to the order of + appearance in codes (unless ordered=True, in which case the + original order is preserved), followed by any unrepresented + categories in the original order. + Categorical or None + If we are observed, return the original categorical, otherwise None + """ + # we only care about observed values + if observed: + # In cases with c.ordered, this is equivalent to + # return c.remove_unused_categories(), c + + unique_codes = unique1d(c.codes) + + take_codes = unique_codes[unique_codes != -1] + if sort: + take_codes = np.sort(take_codes) + + # we recode according to the uniques + categories = c.categories.take(take_codes) + codes = recode_for_categories(c.codes, c.categories, categories) + + # return a new categorical that maps our new codes + # and categories + dtype = CategoricalDtype(categories, ordered=c.ordered) + return Categorical._simple_new(codes, dtype=dtype), c + + # Already sorted according to c.categories; all is fine + if sort: + return c, None + + # sort=False should order groups in as-encountered order (GH-8868) + + # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories + all_codes = np.arange(c.categories.nunique()) + # GH 38140: exclude nan from indexer for categories + unique_notnan_codes = unique1d(c.codes[c.codes != -1]) + if sort: + unique_notnan_codes = np.sort(unique_notnan_codes) + if len(all_codes) > len(unique_notnan_codes): + # GH 13179: All categories need to be present, even if missing from the data + missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True) + take_codes = np.concatenate((unique_notnan_codes, missing_codes)) + else: + take_codes = unique_notnan_codes + + return Categorical(c, c.unique().categories.take(take_codes)), None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/generic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/generic.py new file mode 100644 index 00000000..8ba644b5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/generic.py @@ -0,0 +1,2892 @@ +""" +Define the SeriesGroupBy and DataFrameGroupBy +classes that hold the groupby interfaces (and some implementations). + +These are user facing as the result of the ``df.groupby(...)`` operations, +which here returns a DataFrameGroupBy object. +""" +from __future__ import annotations + +from collections import abc +from functools import partial +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + NamedTuple, + TypeVar, + Union, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + Interval, + lib, +) +from pandas.errors import SpecificationError +from pandas.util._decorators import ( + Appender, + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_int64, + is_bool, + is_dict_like, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) +from pandas.core.dtypes.inference import is_hashable +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import algorithms +from pandas.core.apply import ( + GroupByApply, + maybe_mangle_lambdas, + reconstruct_func, + validate_func_kwargs, + warn_alias_replacement, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.groupby import ( + base, + ops, +) +from pandas.core.groupby.groupby import ( + GroupBy, + GroupByPlot, + _agg_template_frame, + _agg_template_series, + _apply_docs, + _transform_template, +) +from pandas.core.indexes.api import ( + Index, + MultiIndex, + all_indexes_same, + default_index, +) +from pandas.core.series import Series +from pandas.core.util.numba_ import maybe_use_numba + +from pandas.plotting import boxplot_frame_groupby + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + AxisInt, + CorrelationMethod, + FillnaOptions, + IndexLabel, + Manager, + Manager2D, + SingleManager, + TakeIndexer, + ) + + from pandas import Categorical + from pandas.core.generic import NDFrame + +# TODO(typing) the return value on this callable should be any *scalar*. +AggScalar = Union[str, Callable[..., Any]] +# TODO: validate types on ScalarResult and move to _typing +# Blocked from using by https://github.com/python/mypy/issues/1484 +# See note at _mangle_lambda_list +ScalarResult = TypeVar("ScalarResult") + + +class NamedAgg(NamedTuple): + """ + Helper for column specific aggregation with control over output column names. + + Subclass of typing.NamedTuple. + + Parameters + ---------- + column : Hashable + Column label in the DataFrame to apply aggfunc. + aggfunc : function or str + Function to apply to the provided column. If string, the name of a built-in + pandas function. + + Examples + -------- + >>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]}) + >>> agg_a = pd.NamedAgg(column="a", aggfunc="min") + >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x)) + >>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1) + result_a result_1 + key + 1 -1 10.5 + 2 1 12.0 + """ + + column: Hashable + aggfunc: AggScalar + + +class SeriesGroupBy(GroupBy[Series]): + def _wrap_agged_manager(self, mgr: Manager) -> Series: + out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes) + out._name = self.obj.name + return out + + def _get_data_to_aggregate( + self, *, numeric_only: bool = False, name: str | None = None + ) -> SingleManager: + ser = self._selected_obj + single = ser._mgr + if numeric_only and not is_numeric_dtype(ser.dtype): + # GH#41291 match Series behavior + kwd_name = "numeric_only" + raise TypeError( + f"Cannot use {kwd_name}=True with " + f"{type(self).__name__}.{name} and non-numeric dtypes." + ) + return single + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).min() + 1 1 + 2 3 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).agg('min') + 1 1 + 2 3 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max']) + min max + 1 1 2 + 2 3 4 + + The output column names can be controlled by passing + the desired column names and aggregations as keyword arguments. + + >>> s.groupby([1, 1, 2, 2]).agg( + ... minimum='min', + ... maximum='max', + ... ) + minimum maximum + 1 1 2 + 2 3 4 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the aggregating function. + + >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min()) + 1 1.0 + 2 3.0 + dtype: float64 + """ + ) + + @Appender( + _apply_docs["template"].format( + input="series", examples=_apply_docs["series_examples"] + ) + ) + def apply(self, func, *args, **kwargs) -> Series: + return super().apply(func, *args, **kwargs) + + @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series") + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + relabeling = func is None + columns = None + if relabeling: + columns, func = validate_func_kwargs(kwargs) + kwargs = {} + + if isinstance(func, str): + if maybe_use_numba(engine) and engine is not None: + # Not all agg functions support numba, only propagate numba kwargs + # if user asks for numba, and engine is not None + # (if engine is None, the called function will handle the case where + # numba is requested via the global option) + kwargs["engine"] = engine + if engine_kwargs is not None: + kwargs["engine_kwargs"] = engine_kwargs + return getattr(self, func)(*args, **kwargs) + + elif isinstance(func, abc.Iterable): + # Catch instances of lists / tuples + # but not the class list / tuple itself. + func = maybe_mangle_lambdas(func) + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + ret = self._aggregate_multiple_funcs(func, *args, **kwargs) + if relabeling: + # columns is not narrowed by mypy from relabeling flag + assert columns is not None # for mypy + ret.columns = columns + if not self.as_index: + ret = ret.reset_index() + return ret + + else: + cyfunc = com.get_cython_func(func) + if cyfunc and not args and not kwargs: + warn_alias_replacement(self, func, cyfunc) + return getattr(self, cyfunc)() + + if maybe_use_numba(engine): + return self._aggregate_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + + if self.ngroups == 0: + # e.g. test_evaluate_with_empty_groups without any groups to + # iterate over, we have no output on which to do dtype + # inference. We default to using the existing dtype. + # xref GH#51445 + obj = self._obj_with_exclusions + return self.obj._constructor( + [], + name=self.obj.name, + index=self.grouper.result_index, + dtype=obj.dtype, + ) + + if self.grouper.nkeys > 1: + return self._python_agg_general(func, *args, **kwargs) + + try: + return self._python_agg_general(func, *args, **kwargs) + except KeyError: + # KeyError raised in test_groupby.test_basic is bc the func does + # a dictionary lookup on group.name, but group name is not + # pinned in _python_agg_general, only in _aggregate_named + result = self._aggregate_named(func, *args, **kwargs) + + warnings.warn( + "Pinning the groupby key to each group in " + f"{type(self).__name__}.agg is deprecated, and cases that " + "relied on it will raise in a future version. " + "If your operation requires utilizing the groupby keys, " + "iterate over the groupby object instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # result is a dict whose keys are the elements of result_index + result = Series(result, index=self.grouper.result_index) + result = self._wrap_aggregated_output(result) + return result + + agg = aggregate + + def _python_agg_general(self, func, *args, **kwargs): + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[func] + warn_alias_replacement(self, orig_func, alias) + f = lambda x: func(x, *args, **kwargs) + + obj = self._obj_with_exclusions + result = self.grouper.agg_series(obj, f) + res = obj._constructor(result, name=obj.name) + return self._wrap_aggregated_output(res) + + def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame: + if isinstance(arg, dict): + if self.as_index: + # GH 15931 + raise SpecificationError("nested renamer is not supported") + else: + # GH#50684 - This accidentally worked in 1.x + msg = ( + "Passing a dictionary to SeriesGroupBy.agg is deprecated " + "and will raise in a future version of pandas. Pass a list " + "of aggregations instead." + ) + warnings.warn( + message=msg, + category=FutureWarning, + stacklevel=find_stack_level(), + ) + arg = list(arg.items()) + elif any(isinstance(x, (tuple, list)) for x in arg): + arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] + else: + # list of functions / function names + columns = (com.get_callable_name(f) or f for f in arg) + arg = zip(columns, arg) + + results: dict[base.OutputKey, DataFrame | Series] = {} + with com.temp_setattr(self, "as_index", True): + # Combine results using the index, need to adjust index after + # if as_index=False (GH#50724) + for idx, (name, func) in enumerate(arg): + key = base.OutputKey(label=name, position=idx) + results[key] = self.aggregate(func, *args, **kwargs) + + if any(isinstance(x, DataFrame) for x in results.values()): + from pandas import concat + + res_df = concat( + results.values(), axis=1, keys=[key.label for key in results] + ) + return res_df + + indexed_output = {key.position: val for key, val in results.items()} + output = self.obj._constructor_expanddim(indexed_output, index=None) + output.columns = Index(key.label for key in results) + + return output + + def _wrap_applied_output( + self, + data: Series, + values: list[Any], + not_indexed_same: bool = False, + is_transform: bool = False, + ) -> DataFrame | Series: + """ + Wrap the output of SeriesGroupBy.apply into the expected result. + + Parameters + ---------- + data : Series + Input data for groupby operation. + values : List[Any] + Applied output for each group. + not_indexed_same : bool, default False + Whether the applied outputs are not indexed the same as the group axes. + + Returns + ------- + DataFrame or Series + """ + if len(values) == 0: + # GH #6265 + if is_transform: + # GH#47787 see test_group_on_empty_multiindex + res_index = data.index + else: + res_index = self.grouper.result_index + + return self.obj._constructor( + [], + name=self.obj.name, + index=res_index, + dtype=data.dtype, + ) + assert values is not None + + if isinstance(values[0], dict): + # GH #823 #24880 + index = self.grouper.result_index + res_df = self.obj._constructor_expanddim(values, index=index) + res_df = self._reindex_output(res_df) + # if self.observed is False, + # keep all-NaN rows created while re-indexing + res_ser = res_df.stack(future_stack=True) + res_ser.name = self.obj.name + return res_ser + elif isinstance(values[0], (Series, DataFrame)): + result = self._concat_objects( + values, + not_indexed_same=not_indexed_same, + is_transform=is_transform, + ) + if isinstance(result, Series): + result.name = self.obj.name + if not self.as_index and not_indexed_same: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return result + else: + # GH #6265 #24880 + result = self.obj._constructor( + data=values, index=self.grouper.result_index, name=self.obj.name + ) + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return self._reindex_output(result) + + def _aggregate_named(self, func, *args, **kwargs): + # Note: this is very similar to _aggregate_series_pure_python, + # but that does not pin group.name + result = {} + initialized = False + + for name, group in self.grouper.get_iterator( + self._selected_obj, axis=self.axis + ): + # needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations + object.__setattr__(group, "name", name) + + output = func(group, *args, **kwargs) + output = ops.extract_result(output) + if not initialized: + # We only do this validation on the first iteration + ops.check_result_array(output, group.dtype) + initialized = True + result[name] = output + + return result + + __examples_series_doc = dedent( + """ + >>> ser = pd.Series( + ... [390.0, 350.0, 30.0, 20.0], + ... index=["Falcon", "Falcon", "Parrot", "Parrot"], + ... name="Max Speed") + >>> grouped = ser.groupby([1, 1, 2, 2]) + >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) + Falcon 0.707107 + Falcon -0.707107 + Parrot 0.707107 + Parrot -0.707107 + Name: Max Speed, dtype: float64 + + Broadcast result of the transformation + + >>> grouped.transform(lambda x: x.max() - x.min()) + Falcon 40.0 + Falcon 40.0 + Parrot 10.0 + Parrot 10.0 + Name: Max Speed, dtype: float64 + + >>> grouped.transform("mean") + Falcon 370.0 + Falcon 370.0 + Parrot 25.0 + Parrot 25.0 + Name: Max Speed, dtype: float64 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + for example: + + >>> grouped.transform(lambda x: x.astype(int).max()) + Falcon 390 + Falcon 390 + Parrot 30 + Parrot 30 + Name: Max Speed, dtype: int64 + """ + ) + + @Substitution(klass="Series", example=__examples_series_doc) + @Appender(_transform_template) + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + return self._transform( + func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + ) + + def _cython_transform( + self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs + ): + assert axis == 0 # handled by caller + + obj = self._selected_obj + + try: + result = self.grouper._cython_operation( + "transform", obj._values, how, axis, **kwargs + ) + except NotImplementedError as err: + # e.g. test_groupby_raises_string + raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err + + return obj._constructor(result, index=self.obj.index, name=obj.name) + + def _transform_general( + self, func: Callable, engine, engine_kwargs, *args, **kwargs + ) -> Series: + """ + Transform with a callable `func`. + """ + if maybe_use_numba(engine): + return self._transform_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + assert callable(func) + klass = type(self.obj) + + results = [] + for name, group in self.grouper.get_iterator( + self._selected_obj, axis=self.axis + ): + # this setattr is needed for test_transform_lambda_with_datetimetz + object.__setattr__(group, "name", name) + res = func(group, *args, **kwargs) + + results.append(klass(res, index=group.index)) + + # check for empty "results" to avoid concat ValueError + if results: + from pandas.core.reshape.concat import concat + + concatenated = concat(results) + result = self._set_result_index_ordered(concatenated) + else: + result = self.obj._constructor(dtype=np.float64) + + result.name = self.obj.name + return result + + def filter(self, func, dropna: bool = True, *args, **kwargs): + """ + Filter elements from groups that don't satisfy a criterion. + + Elements from groups are filtered if they do not satisfy the + boolean criterion specified by func. + + Parameters + ---------- + func : function + Criterion to apply to each group. Should return True or False. + dropna : bool + Drop groups that do not pass the filter. True by default; if False, + groups that evaluate False are filled with NaNs. + + Returns + ------- + Series + + Notes + ----- + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : [1, 2, 3, 4, 5, 6], + ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A') + >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) + 1 2 + 3 4 + 5 6 + Name: B, dtype: int64 + """ + if isinstance(func, str): + wrapper = lambda x: getattr(x, func)(*args, **kwargs) + else: + wrapper = lambda x: func(x, *args, **kwargs) + + # Interpret np.nan as False. + def true_and_notna(x) -> bool: + b = wrapper(x) + return notna(b) and b + + try: + indices = [ + self._get_index(name) + for name, group in self.grouper.get_iterator( + self._selected_obj, axis=self.axis + ) + if true_and_notna(group) + ] + except (ValueError, TypeError) as err: + raise TypeError("the filter must return a boolean result") from err + + filtered = self._apply_filter(indices, dropna) + return filtered + + def nunique(self, dropna: bool = True) -> Series | DataFrame: + """ + Return number of unique elements in the group. + + Returns + ------- + Series + Number of unique values within each group. + + Examples + -------- + For SeriesGroupby: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).nunique() + a 2 + b 1 + dtype: int64 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 3 + dtype: int64 + >>> ser.resample('MS').nunique() + 2023-01-01 2 + 2023-02-01 1 + Freq: MS, dtype: int64 + """ + ids, _, _ = self.grouper.group_info + + val = self.obj._values + + codes, _ = algorithms.factorize(val, sort=False) + sorter = np.lexsort((codes, ids)) + codes = codes[sorter] + ids = ids[sorter] + + # group boundaries are where group ids change + # unique observations are where sorted values change + idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] + inc = np.r_[1, codes[1:] != codes[:-1]] + + # 1st item of each group is a new unique observation + mask = codes == -1 + if dropna: + inc[idx] = 1 + inc[mask] = 0 + else: + inc[mask & np.r_[False, mask[:-1]]] = 0 + inc[idx] = 1 + + out = np.add.reduceat(inc, idx).astype("int64", copy=False) + if len(ids): + # NaN/NaT group exists if the head of ids is -1, + # so remove it from res and exclude its index from idx + if ids[0] == -1: + res = out[1:] + idx = idx[np.flatnonzero(idx)] + else: + res = out + else: + res = out[1:] + ri = self.grouper.result_index + + # we might have duplications among the bins + if len(res) != len(ri): + res, out = np.zeros(len(ri), dtype=out.dtype), res + if len(ids) > 0: + # GH#21334s + res[ids[idx]] = out + + result: Series | DataFrame = self.obj._constructor( + res, index=ri, name=self.obj.name + ) + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return self._reindex_output(result, fill_value=0) + + @doc(Series.describe) + def describe(self, **kwargs): + return super().describe(**kwargs) + + def value_counts( + self, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + bins=None, + dropna: bool = True, + ) -> Series | DataFrame: + name = "proportion" if normalize else "count" + + if bins is None: + result = self._value_counts( + normalize=normalize, sort=sort, ascending=ascending, dropna=dropna + ) + result.name = name + return result + + from pandas.core.reshape.merge import get_join_indexers + from pandas.core.reshape.tile import cut + + ids, _, _ = self.grouper.group_info + val = self.obj._values + + index_names = self.grouper.names + [self.obj.name] + + if isinstance(val.dtype, CategoricalDtype) or ( + bins is not None and not np.iterable(bins) + ): + # scalar bins cannot be done at top level + # in a backward compatible way + # GH38672 relates to categorical dtype + ser = self.apply( + Series.value_counts, + normalize=normalize, + sort=sort, + ascending=ascending, + bins=bins, + ) + ser.name = name + ser.index.names = index_names + return ser + + # groupby removes null keys from groupings + mask = ids != -1 + ids, val = ids[mask], val[mask] + + if bins is None: + lab, lev = algorithms.factorize(val, sort=True) + llab = lambda lab, inc: lab[inc] + else: + # lab is a Categorical with categories an IntervalIndex + cat_ser = cut(Series(val, copy=False), bins, include_lowest=True) + cat_obj = cast("Categorical", cat_ser._values) + lev = cat_obj.categories + lab = lev.take( + cat_obj.codes, + allow_fill=True, + fill_value=lev._na_value, + ) + llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] + + if isinstance(lab.dtype, IntervalDtype): + # TODO: should we do this inside II? + lab_interval = cast(Interval, lab) + + sorter = np.lexsort((lab_interval.left, lab_interval.right, ids)) + else: + sorter = np.lexsort((lab, ids)) + + ids, lab = ids[sorter], lab[sorter] + + # group boundaries are where group ids change + idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0] + idx = np.r_[0, idchanges] + if not len(ids): + idx = idchanges + + # new values are where sorted labels change + lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) + inc = np.r_[True, lchanges] + if not len(val): + inc = lchanges + inc[idx] = True # group boundaries are also new values + out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts + + # num. of times each group should be repeated + rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) + + # multi-index components + codes = self.grouper.reconstructed_codes + codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] + levels = [ping.group_index for ping in self.grouper.groupings] + [lev] + + if dropna: + mask = codes[-1] != -1 + if mask.all(): + dropna = False + else: + out, codes = out[mask], [level_codes[mask] for level_codes in codes] + + if normalize: + out = out.astype("float") + d = np.diff(np.r_[idx, len(ids)]) + if dropna: + m = ids[lab == -1] + np.add.at(d, m, -1) + acc = rep(d)[mask] + else: + acc = rep(d) + out /= acc + + if sort and bins is None: + cat = ids[inc][mask] if dropna else ids[inc] + sorter = np.lexsort((out if ascending else -out, cat)) + out, codes[-1] = out[sorter], codes[-1][sorter] + + if bins is not None: + # for compat. with libgroupby.value_counts need to ensure every + # bin is present at every index level, null filled with zeros + diff = np.zeros(len(out), dtype="bool") + for level_codes in codes[:-1]: + diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] + + ncat, nbin = diff.sum(), len(levels[-1]) + + left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] + + right = [diff.cumsum() - 1, codes[-1]] + + # error: Argument 1 to "get_join_indexers" has incompatible type + # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray, + # ndarray[Any, Any]], Index, Series]] + _, idx = get_join_indexers( + left, right, sort=False, how="left" # type: ignore[arg-type] + ) + out = np.where(idx != -1, out[idx], 0) + + if sort: + sorter = np.lexsort((out if ascending else -out, left[0])) + out, left[-1] = out[sorter], left[-1][sorter] + + # build the multi-index w/ full levels + def build_codes(lev_codes: np.ndarray) -> np.ndarray: + return np.repeat(lev_codes[diff], nbin) + + codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] + codes.append(left[-1]) + + mi = MultiIndex( + levels=levels, codes=codes, names=index_names, verify_integrity=False + ) + + if is_integer_dtype(out.dtype): + out = ensure_int64(out) + result = self.obj._constructor(out, index=mi, name=name) + if not self.as_index: + result = result.reset_index() + return result + + def fillna( + self, + value: object | ArrayLike | None = None, + method: FillnaOptions | None = None, + axis: Axis | None | lib.NoDefault = lib.no_default, + inplace: bool = False, + limit: int | None = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Series | None: + """ + Fill NA/NaN values using the specified method within groups. + + Parameters + ---------- + value : scalar, dict, Series, or DataFrame + Value to use to fill holes (e.g. 0), alternately a + dict/Series/DataFrame of values specifying which value to use for + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot + be a list. Users wanting to use the ``value`` argument and not ``method`` + should prefer :meth:`.Series.fillna` as this + will produce the same result and be more performant. + method : {{'bfill', 'ffill', None}}, default None + Method to use for filling holes. ``'ffill'`` will propagate + the last valid observation forward within a group. + ``'bfill'`` will use next valid observation to fill the gap. + + .. deprecated:: 2.1.0 + Use obj.ffill or obj.bfill instead. + + axis : {0 or 'index', 1 or 'columns'} + Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + inplace : bool, default False + Broken. Do not set to True. + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill within a group. In other words, + if there is a gap with more than this number of consecutive NaNs, + it will only be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + .. deprecated:: 2.1.0 + + Returns + ------- + Series + Object with missing values filled within groups. + + See Also + -------- + ffill : Forward fill values within a group. + bfill : Backward fill values within a group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse'] + >>> ser = pd.Series([1, None, None, 2, None], index=lst) + >>> ser + cat 1.0 + cat NaN + cat NaN + mouse 2.0 + mouse NaN + dtype: float64 + >>> ser.groupby(level=0).fillna(0, limit=1) + cat 1.0 + cat 0.0 + cat NaN + mouse 2.0 + mouse 0.0 + dtype: float64 + """ + result = self._op_via_apply( + "fillna", + value=value, + method=method, + axis=axis, + inplace=inplace, + limit=limit, + downcast=downcast, + ) + return result + + def take( + self, + indices: TakeIndexer, + axis: Axis | lib.NoDefault = lib.no_default, + **kwargs, + ) -> Series: + """ + Return the elements in the given *positional* indices in each group. + + This means that we are not indexing according to actual values in + the index attribute of the object. We are indexing according to the + actual position of the element in the object. + + If a requested index does not exist for some group, this method will raise. + To get similar behavior that ignores indices that don't exist, see + :meth:`.SeriesGroupBy.nth`. + + Parameters + ---------- + indices : array-like + An array of ints indicating which positions to take in each group. + axis : {0 or 'index', 1 or 'columns', None}, default 0 + The axis on which to select elements. ``0`` means that we are + selecting rows, ``1`` means that we are selecting columns. + For `SeriesGroupBy` this parameter is unused and defaults to 0. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + **kwargs + For compatibility with :meth:`numpy.take`. Has no effect on the + output. + + Returns + ------- + Series + A Series containing the elements taken from each group. + + See Also + -------- + Series.take : Take elements from a Series along an axis. + Series.loc : Select a subset of a DataFrame by labels. + Series.iloc : Select a subset of a DataFrame by positions. + numpy.take : Take elements from an array along an axis. + SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist. + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan), + ... ('rabbit', 'mammal', 15.0)], + ... columns=['name', 'class', 'max_speed'], + ... index=[4, 3, 2, 1, 0]) + >>> df + name class max_speed + 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 lion mammal 80.5 + 1 monkey mammal NaN + 0 rabbit mammal 15.0 + >>> gb = df["name"].groupby([1, 1, 2, 2, 2]) + + Take elements at positions 0 and 1 along the axis 0 in each group (default). + + >>> gb.take([0, 1]) + 1 4 falcon + 3 parrot + 2 2 lion + 1 monkey + Name: name, dtype: object + + We may take elements using negative integers for positive indices, + starting from the end of the object, just like with Python lists. + + >>> gb.take([-1, -2]) + 1 3 parrot + 4 falcon + 2 0 rabbit + 1 monkey + Name: name, dtype: object + """ + result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) + return result + + def skew( + self, + axis: Axis | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ) -> Series: + """ + Return unbiased skew within groups. + + Normalized by N-1. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Axis for the function to be applied on. + This parameter is only for compatibility with DataFrame and is unused. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values when computing the result. + + numeric_only : bool, default False + Include only float, int, boolean columns. Not implemented for Series. + + **kwargs + Additional keyword arguments to be passed to the function. + + Returns + ------- + Series + + See Also + -------- + Series.skew : Return unbiased skew over requested axis. + + Examples + -------- + >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.], + ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon', + ... 'Parrot', 'Parrot', 'Parrot'], + ... name="Max Speed") + >>> ser + Falcon 390.0 + Falcon 350.0 + Falcon 357.0 + Falcon NaN + Parrot 22.0 + Parrot 20.0 + Parrot 30.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).skew() + Falcon 1.525174 + Parrot 1.457863 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).skew(skipna=False) + Falcon NaN + Parrot 1.457863 + Name: Max Speed, dtype: float64 + """ + if axis is lib.no_default: + axis = 0 + + if axis != 0: + result = self._op_via_apply( + "skew", + axis=axis, + skipna=skipna, + numeric_only=numeric_only, + **kwargs, + ) + return result + + def alt(obj): + # This should not be reached since the cython path should raise + # TypeError and not NotImplementedError. + raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") + + return self._cython_agg_general( + "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs + ) + + @property + @doc(Series.plot.__doc__) + def plot(self): + result = GroupByPlot(self) + return result + + @doc(Series.nlargest.__doc__) + def nlargest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + f = partial(Series.nlargest, n=n, keep=keep) + data = self._selected_obj + # Don't change behavior if result index happens to be the same, i.e. + # already ordered and n >= all group sizes. + result = self._python_apply_general(f, data, not_indexed_same=True) + return result + + @doc(Series.nsmallest.__doc__) + def nsmallest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + f = partial(Series.nsmallest, n=n, keep=keep) + data = self._selected_obj + # Don't change behavior if result index happens to be the same, i.e. + # already ordered and n >= all group sizes. + result = self._python_apply_general(f, data, not_indexed_same=True) + return result + + @doc(Series.idxmin.__doc__) + def idxmin( + self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True + ) -> Series: + result = self._op_via_apply("idxmin", axis=axis, skipna=skipna) + return result.astype(self.obj.index.dtype) if result.empty else result + + @doc(Series.idxmax.__doc__) + def idxmax( + self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True + ) -> Series: + result = self._op_via_apply("idxmax", axis=axis, skipna=skipna) + return result.astype(self.obj.index.dtype) if result.empty else result + + @doc(Series.corr.__doc__) + def corr( + self, + other: Series, + method: CorrelationMethod = "pearson", + min_periods: int | None = None, + ) -> Series: + result = self._op_via_apply( + "corr", other=other, method=method, min_periods=min_periods + ) + return result + + @doc(Series.cov.__doc__) + def cov( + self, other: Series, min_periods: int | None = None, ddof: int | None = 1 + ) -> Series: + result = self._op_via_apply( + "cov", other=other, min_periods=min_periods, ddof=ddof + ) + return result + + @property + def is_monotonic_increasing(self) -> Series: + """ + Return whether each group's values are monotonically increasing. + + Returns + ------- + Series + + Examples + -------- + >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot']) + >>> s.groupby(level=0).is_monotonic_increasing + Falcon False + Parrot True + dtype: bool + """ + return self.apply(lambda ser: ser.is_monotonic_increasing) + + @property + def is_monotonic_decreasing(self) -> Series: + """ + Return whether each group's values are monotonically decreasing. + + Returns + ------- + Series + + Examples + -------- + >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot']) + >>> s.groupby(level=0).is_monotonic_decreasing + Falcon True + Parrot False + dtype: bool + """ + return self.apply(lambda ser: ser.is_monotonic_decreasing) + + @doc(Series.hist.__doc__) + def hist( + self, + by=None, + ax=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + figsize: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, + ): + result = self._op_via_apply( + "hist", + by=by, + ax=ax, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + figsize=figsize, + bins=bins, + backend=backend, + legend=legend, + **kwargs, + ) + return result + + @property + @doc(Series.dtype.__doc__) + def dtype(self) -> Series: + return self.apply(lambda ser: ser.dtype) + + def unique(self) -> Series: + """ + Return unique values for each group. + + It returns unique values for each of the grouped values. Returned in + order of appearance. Hash table-based unique, therefore does NOT sort. + + Returns + ------- + Series + Unique values for each of the grouped values. + + See Also + -------- + Series.unique : Return unique values of Series object. + + Examples + -------- + >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1), + ... ('Beagle', 'dog', 15.2), + ... ('Chihuahua', 'dog', 6.9), + ... ('Persian', 'cat', 9.2), + ... ('Chihuahua', 'dog', 7), + ... ('Persian', 'cat', 8.8)], + ... columns=['breed', 'animal', 'height_in']) + >>> df + breed animal height_in + 0 Chihuahua dog 6.1 + 1 Beagle dog 15.2 + 2 Chihuahua dog 6.9 + 3 Persian cat 9.2 + 4 Chihuahua dog 7.0 + 5 Persian cat 8.8 + >>> ser = df.groupby('animal')['breed'].unique() + >>> ser + animal + cat [Persian] + dog [Chihuahua, Beagle] + Name: breed, dtype: object + """ + result = self._op_via_apply("unique") + return result + + +class DataFrameGroupBy(GroupBy[DataFrame]): + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "A": [1, 1, 2, 2], + ... "B": [1, 2, 3, 4], + ... "C": [0.362838, 0.227877, 1.267767, -0.562860], + ... } + ... ) + + >>> df + A B C + 0 1 1 0.362838 + 1 1 2 0.227877 + 2 2 3 1.267767 + 3 2 4 -0.562860 + + The aggregation is for each column. + + >>> df.groupby('A').agg('min') + B C + A + 1 1 0.227877 + 2 3 -0.562860 + + Multiple aggregations + + >>> df.groupby('A').agg(['min', 'max']) + B C + min max min max + A + 1 1 2 0.227877 0.362838 + 2 3 4 -0.562860 1.267767 + + Select a column for aggregation + + >>> df.groupby('A').B.agg(['min', 'max']) + min max + A + 1 1 2 + 2 3 4 + + User-defined function for aggregation + + >>> df.groupby('A').agg(lambda x: sum(x) + 2) + B C + A + 1 5 2.590715 + 2 9 2.704907 + + Different aggregations per column + + >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'}) + B C + min max sum + A + 1 1 2 0.590715 + 2 3 4 0.704907 + + To control the output names with different aggregations per column, + pandas supports "named aggregation" + + >>> df.groupby("A").agg( + ... b_min=pd.NamedAgg(column="B", aggfunc="min"), + ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")) + b_min c_sum + A + 1 1 0.590715 + 2 3 0.704907 + + - The keywords are the *output* column names + - The values are tuples whose first element is the column to select + and the second element is the aggregation to apply to that column. + Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields + ``['column', 'aggfunc']`` to make it clearer what the arguments are. + As usual, the aggregation can be a callable or a string alias. + + See :ref:`groupby.aggregate.named` for more. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the aggregating function. + + >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min()) + B + A + 1 1.0 + 2 3.0 + """ + ) + + @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame") + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + relabeling, func, columns, order = reconstruct_func(func, **kwargs) + func = maybe_mangle_lambdas(func) + + if maybe_use_numba(engine): + # Not all agg functions support numba, only propagate numba kwargs + # if user asks for numba + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + + op = GroupByApply(self, func, args=args, kwargs=kwargs) + result = op.agg() + if not is_dict_like(func) and result is not None: + # GH #52849 + if not self.as_index and is_list_like(func): + return result.reset_index() + else: + return result + elif relabeling: + # this should be the only (non-raising) case with relabeling + # used reordered index of columns + result = cast(DataFrame, result) + result = result.iloc[:, order] + result = cast(DataFrame, result) + # error: Incompatible types in assignment (expression has type + # "Optional[List[str]]", variable has type + # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]], + # Index, Series], Sequence[Any]]") + result.columns = columns # type: ignore[assignment] + + if result is None: + # Remove the kwargs we inserted + # (already stored in engine, engine_kwargs arguments) + if "engine" in kwargs: + del kwargs["engine"] + del kwargs["engine_kwargs"] + # at this point func is not a str, list-like, dict-like, + # or a known callable(e.g. sum) + if maybe_use_numba(engine): + return self._aggregate_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + # grouper specific aggregations + if self.grouper.nkeys > 1: + # test_groupby_as_index_series_scalar gets here with 'not self.as_index' + return self._python_agg_general(func, *args, **kwargs) + elif args or kwargs: + # test_pass_args_kwargs gets here (with and without as_index) + # can't return early + result = self._aggregate_frame(func, *args, **kwargs) + + elif self.axis == 1: + # _aggregate_multiple_funcs does not allow self.axis == 1 + # Note: axis == 1 precludes 'not self.as_index', see __init__ + result = self._aggregate_frame(func) + return result + + else: + # try to treat as if we are passing a list + gba = GroupByApply(self, [func], args=(), kwargs={}) + try: + result = gba.agg() + + except ValueError as err: + if "No objects to concatenate" not in str(err): + raise + # _aggregate_frame can fail with e.g. func=Series.mode, + # where it expects 1D values but would be getting 2D values + # In other tests, using aggregate_frame instead of GroupByApply + # would give correct values but incorrect dtypes + # object vs float64 in test_cython_agg_empty_buckets + # float64 vs int64 in test_category_order_apply + result = self._aggregate_frame(func) + + else: + # GH#32040, GH#35246 + # e.g. test_groupby_as_index_select_column_sum_empty_df + result = cast(DataFrame, result) + result.columns = self._obj_with_exclusions.columns.copy() + + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + + return result + + agg = aggregate + + def _python_agg_general(self, func, *args, **kwargs): + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[func] + warn_alias_replacement(self, orig_func, alias) + f = lambda x: func(x, *args, **kwargs) + + if self.ngroups == 0: + # e.g. test_evaluate_with_empty_groups different path gets different + # result dtype in empty case. + return self._python_apply_general(f, self._selected_obj, is_agg=True) + + obj = self._obj_with_exclusions + if self.axis == 1: + obj = obj.T + + if not len(obj.columns): + # e.g. test_margins_no_values_no_cols + return self._python_apply_general(f, self._selected_obj) + + output: dict[int, ArrayLike] = {} + for idx, (name, ser) in enumerate(obj.items()): + result = self.grouper.agg_series(ser, f) + output[idx] = result + + res = self.obj._constructor(output) + res.columns = obj.columns.copy(deep=False) + return self._wrap_aggregated_output(res) + + def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: + if self.grouper.nkeys != 1: + raise AssertionError("Number of keys must be 1") + + obj = self._obj_with_exclusions + + result: dict[Hashable, NDFrame | np.ndarray] = {} + for name, grp_df in self.grouper.get_iterator(obj, self.axis): + fres = func(grp_df, *args, **kwargs) + result[name] = fres + + result_index = self.grouper.result_index + other_ax = obj.axes[1 - self.axis] + out = self.obj._constructor(result, index=other_ax, columns=result_index) + if self.axis == 0: + out = out.T + + return out + + def _wrap_applied_output( + self, + data: DataFrame, + values: list, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + if len(values) == 0: + if is_transform: + # GH#47787 see test_group_on_empty_multiindex + res_index = data.index + else: + res_index = self.grouper.result_index + + result = self.obj._constructor(index=res_index, columns=data.columns) + result = result.astype(data.dtypes, copy=False) + return result + + # GH12824 + # using values[0] here breaks test_groupby_apply_none_first + first_not_none = next(com.not_none(*values), None) + + if first_not_none is None: + # GH9684 - All values are None, return an empty frame. + return self.obj._constructor() + elif isinstance(first_not_none, DataFrame): + return self._concat_objects( + values, + not_indexed_same=not_indexed_same, + is_transform=is_transform, + ) + + key_index = self.grouper.result_index if self.as_index else None + + if isinstance(first_not_none, (np.ndarray, Index)): + # GH#1738: values is list of arrays of unequal lengths + # fall through to the outer else clause + # TODO: sure this is right? we used to do this + # after raising AttributeError above + # GH 18930 + if not is_hashable(self._selection): + # error: Need type annotation for "name" + name = tuple(self._selection) # type: ignore[var-annotated, arg-type] + else: + # error: Incompatible types in assignment + # (expression has type "Hashable", variable + # has type "Tuple[Any, ...]") + name = self._selection # type: ignore[assignment] + return self.obj._constructor_sliced(values, index=key_index, name=name) + elif not isinstance(first_not_none, Series): + # values are not series or array-like but scalars + # self._selection not passed through to Series as the + # result should not take the name of original selection + # of columns + if self.as_index: + return self.obj._constructor_sliced(values, index=key_index) + else: + result = self.obj._constructor(values, columns=[self._selection]) + result = self._insert_inaxis_grouper(result) + return result + else: + # values are Series + return self._wrap_applied_output_series( + values, + not_indexed_same, + first_not_none, + key_index, + is_transform, + ) + + def _wrap_applied_output_series( + self, + values: list[Series], + not_indexed_same: bool, + first_not_none, + key_index: Index | None, + is_transform: bool, + ) -> DataFrame | Series: + kwargs = first_not_none._construct_axes_dict() + backup = Series(**kwargs) + values = [x if (x is not None) else backup for x in values] + + all_indexed_same = all_indexes_same(x.index for x in values) + + if not all_indexed_same: + # GH 8467 + return self._concat_objects( + values, + not_indexed_same=True, + is_transform=is_transform, + ) + + # Combine values + # vstack+constructor is faster than concat and handles MI-columns + stacked_values = np.vstack([np.asarray(v) for v in values]) + + if self.axis == 0: + index = key_index + columns = first_not_none.index.copy() + if columns.name is None: + # GH6124 - propagate name of Series when it's consistent + names = {v.name for v in values} + if len(names) == 1: + columns.name = next(iter(names)) + else: + index = first_not_none.index + columns = key_index + stacked_values = stacked_values.T + + if stacked_values.dtype == object: + # We'll have the DataFrame constructor do inference + stacked_values = stacked_values.tolist() + result = self.obj._constructor(stacked_values, index=index, columns=columns) + + if not self.as_index: + result = self._insert_inaxis_grouper(result) + + return self._reindex_output(result) + + def _cython_transform( + self, + how: str, + numeric_only: bool = False, + axis: AxisInt = 0, + **kwargs, + ) -> DataFrame: + assert axis == 0 # handled by caller + + # With self.axis == 0, we have multi-block tests + # e.g. test_rank_min_int, test_cython_transform_frame + # test_transform_numeric_ret + # With self.axis == 1, _get_data_to_aggregate does a transpose + # so we always have a single block. + mgr: Manager2D = self._get_data_to_aggregate( + numeric_only=numeric_only, name=how + ) + + def arr_func(bvalues: ArrayLike) -> ArrayLike: + return self.grouper._cython_operation( + "transform", bvalues, how, 1, **kwargs + ) + + # We could use `mgr.apply` here and not have to set_axis, but + # we would have to do shape gymnastics for ArrayManager compat + res_mgr = mgr.grouped_reduce(arr_func) + res_mgr.set_axis(1, mgr.axes[1]) + + res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + res_df = self._maybe_transpose_result(res_df) + return res_df + + def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs): + if maybe_use_numba(engine): + return self._transform_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + from pandas.core.reshape.concat import concat + + applied = [] + obj = self._obj_with_exclusions + gen = self.grouper.get_iterator(obj, axis=self.axis) + fast_path, slow_path = self._define_paths(func, *args, **kwargs) + + # Determine whether to use slow or fast path by evaluating on the first group. + # Need to handle the case of an empty generator and process the result so that + # it does not need to be computed again. + try: + name, group = next(gen) + except StopIteration: + pass + else: + # 2023-02-27 No tests broken by disabling this pinning + object.__setattr__(group, "name", name) + try: + path, res = self._choose_path(fast_path, slow_path, group) + except ValueError as err: + # e.g. test_transform_with_non_scalar_group + msg = "transform must return a scalar value for each group" + raise ValueError(msg) from err + if group.size > 0: + res = _wrap_transform_general_frame(self.obj, group, res) + applied.append(res) + + # Compute and process with the remaining groups + for name, group in gen: + if group.size == 0: + continue + # 2023-02-27 No tests broken by disabling this pinning + object.__setattr__(group, "name", name) + res = path(group) + + res = _wrap_transform_general_frame(self.obj, group, res) + applied.append(res) + + concat_index = obj.columns if self.axis == 0 else obj.index + other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1 + concatenated = concat(applied, axis=self.axis, verify_integrity=False) + concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False) + return self._set_result_index_ordered(concatenated) + + __examples_dataframe_doc = dedent( + """ + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : ['one', 'one', 'two', 'three', + ... 'two', 'two'], + ... 'C' : [1, 5, 5, 2, 5, 5], + ... 'D' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A')[['C', 'D']] + >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) + C D + 0 -1.154701 -0.577350 + 1 0.577350 0.000000 + 2 0.577350 1.154701 + 3 -1.154701 -1.000000 + 4 0.577350 -0.577350 + 5 0.577350 1.000000 + + Broadcast result of the transformation + + >>> grouped.transform(lambda x: x.max() - x.min()) + C D + 0 4.0 6.0 + 1 3.0 8.0 + 2 4.0 6.0 + 3 3.0 8.0 + 4 4.0 6.0 + 5 3.0 8.0 + + >>> grouped.transform("mean") + C D + 0 3.666667 4.0 + 1 4.000000 5.0 + 2 3.666667 4.0 + 3 4.000000 5.0 + 4 3.666667 4.0 + 5 4.000000 5.0 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + for example: + + >>> grouped.transform(lambda x: x.astype(int).max()) + C D + 0 5 8 + 1 5 9 + 2 5 8 + 3 5 9 + 4 5 8 + 5 5 9 + """ + ) + + @Substitution(klass="DataFrame", example=__examples_dataframe_doc) + @Appender(_transform_template) + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + return self._transform( + func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + ) + + def _define_paths(self, func, *args, **kwargs): + if isinstance(func, str): + fast_path = lambda group: getattr(group, func)(*args, **kwargs) + slow_path = lambda group: group.apply( + lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis + ) + else: + fast_path = lambda group: func(group, *args, **kwargs) + slow_path = lambda group: group.apply( + lambda x: func(x, *args, **kwargs), axis=self.axis + ) + return fast_path, slow_path + + def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame): + path = slow_path + res = slow_path(group) + + if self.ngroups == 1: + # no need to evaluate multiple paths when only + # a single group exists + return path, res + + # if we make it here, test if we can use the fast path + try: + res_fast = fast_path(group) + except AssertionError: + raise # pragma: no cover + except Exception: + # GH#29631 For user-defined function, we can't predict what may be + # raised; see test_transform.test_transform_fastpath_raises + return path, res + + # verify fast path returns either: + # a DataFrame with columns equal to group.columns + # OR a Series with index equal to group.columns + if isinstance(res_fast, DataFrame): + if not res_fast.columns.equals(group.columns): + return path, res + elif isinstance(res_fast, Series): + if not res_fast.index.equals(group.columns): + return path, res + else: + return path, res + + if res_fast.equals(res): + path = fast_path + + return path, res + + def filter(self, func, dropna: bool = True, *args, **kwargs): + """ + Filter elements from groups that don't satisfy a criterion. + + Elements from groups are filtered if they do not satisfy the + boolean criterion specified by func. + + Parameters + ---------- + func : function + Criterion to apply to each group. Should return True or False. + dropna : bool + Drop groups that do not pass the filter. True by default; if False, + groups that evaluate False are filled with NaNs. + + Returns + ------- + DataFrame + + Notes + ----- + Each subframe is endowed the attribute 'name' in case you need to know + which group you are working on. + + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : [1, 2, 3, 4, 5, 6], + ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A') + >>> grouped.filter(lambda x: x['B'].mean() > 3.) + A B C + 1 bar 2 5.0 + 3 bar 4 1.0 + 5 bar 6 9.0 + """ + indices = [] + + obj = self._selected_obj + gen = self.grouper.get_iterator(obj, axis=self.axis) + + for name, group in gen: + # 2023-02-27 no tests are broken this pinning, but it is documented in the + # docstring above. + object.__setattr__(group, "name", name) + + res = func(group, *args, **kwargs) + + try: + res = res.squeeze() + except AttributeError: # allow e.g., scalars and frames to pass + pass + + # interpret the result of the filter + if is_bool(res) or (is_scalar(res) and isna(res)): + if notna(res) and res: + indices.append(self._get_index(name)) + else: + # non scalars aren't allowed + raise TypeError( + f"filter function returned a {type(res).__name__}, " + "but expected a scalar bool" + ) + + return self._apply_filter(indices, dropna) + + def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: + if self.axis == 1: + # GH 37725 + raise ValueError("Cannot subset columns when using axis=1") + # per GH 23566 + if isinstance(key, tuple) and len(key) > 1: + # if len == 1, then it becomes a SeriesGroupBy and this is actually + # valid syntax, so don't raise + raise ValueError( + "Cannot subset columns with a tuple with more than one element. " + "Use a list instead." + ) + return super().__getitem__(key) + + def _gotitem(self, key, ndim: int, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + if ndim == 2: + if subset is None: + subset = self.obj + return DataFrameGroupBy( + subset, + self.keys, + axis=self.axis, + level=self.level, + grouper=self.grouper, + exclusions=self.exclusions, + selection=key, + as_index=self.as_index, + sort=self.sort, + group_keys=self.group_keys, + observed=self.observed, + dropna=self.dropna, + ) + elif ndim == 1: + if subset is None: + subset = self.obj[key] + return SeriesGroupBy( + subset, + self.keys, + level=self.level, + grouper=self.grouper, + exclusions=self.exclusions, + selection=key, + as_index=self.as_index, + sort=self.sort, + group_keys=self.group_keys, + observed=self.observed, + dropna=self.dropna, + ) + + raise AssertionError("invalid ndim for _gotitem") + + def _get_data_to_aggregate( + self, *, numeric_only: bool = False, name: str | None = None + ) -> Manager2D: + obj = self._obj_with_exclusions + if self.axis == 1: + mgr = obj.T._mgr + else: + mgr = obj._mgr + + if numeric_only: + mgr = mgr.get_numeric_data(copy=False) + return mgr + + def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: + return self.obj._constructor_from_mgr(mgr, axes=mgr.axes) + + def _apply_to_column_groupbys(self, func) -> DataFrame: + from pandas.core.reshape.concat import concat + + obj = self._obj_with_exclusions + columns = obj.columns + sgbs = [ + SeriesGroupBy( + obj.iloc[:, i], + selection=colname, + grouper=self.grouper, + exclusions=self.exclusions, + observed=self.observed, + ) + for i, colname in enumerate(obj.columns) + ] + results = [func(sgb) for sgb in sgbs] + + if not len(results): + # concat would raise + res_df = DataFrame([], columns=columns, index=self.grouper.result_index) + else: + res_df = concat(results, keys=columns, axis=1) + + if not self.as_index: + res_df.index = default_index(len(res_df)) + res_df = self._insert_inaxis_grouper(res_df) + return res_df + + def nunique(self, dropna: bool = True) -> DataFrame: + """ + Return DataFrame with counts of unique elements in each position. + + Parameters + ---------- + dropna : bool, default True + Don't include NaN in the counts. + + Returns + ------- + nunique: DataFrame + + Examples + -------- + >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', + ... 'ham', 'ham'], + ... 'value1': [1, 5, 5, 2, 5, 5], + ... 'value2': list('abbaxy')}) + >>> df + id value1 value2 + 0 spam 1 a + 1 egg 5 b + 2 egg 5 b + 3 spam 2 a + 4 ham 5 x + 5 ham 5 y + + >>> df.groupby('id').nunique() + value1 value2 + id + egg 1 1 + ham 1 2 + spam 2 1 + + Check for rows with the same id but conflicting values: + + >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) + id value1 value2 + 0 spam 1 a + 3 spam 2 a + 4 ham 5 x + 5 ham 5 y + """ + + if self.axis != 0: + # see test_groupby_crash_on_nunique + return self._python_apply_general( + lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True + ) + + return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna)) + + def idxmax( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> DataFrame: + """ + Return index of first occurrence of maximum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + + .. versionchanged:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of maxima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmax : Return index of the maximum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmax``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the maximum value in each column. + + >>> df.idxmax() + consumption Wheat Products + co2_emissions Beef + dtype: object + + To return the index for the maximum value in each row, use ``axis="columns"``. + + >>> df.idxmax(axis="columns") + Pork co2_emissions + Wheat Products consumption + Beef co2_emissions + dtype: object + """ + if axis is not lib.no_default: + if axis is None: + axis = self.axis + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "idxmax") + else: + axis = self.axis + + def func(df): + return df.idxmax(axis=axis, skipna=skipna, numeric_only=numeric_only) + + func.__name__ = "idxmax" + result = self._python_apply_general( + func, self._obj_with_exclusions, not_indexed_same=True + ) + return result.astype(self.obj.index.dtype) if result.empty else result + + def idxmin( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> DataFrame: + """ + Return index of first occurrence of minimum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + + .. versionchanged:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of minima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmin : Return index of the minimum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmin``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the minimum value in each column. + + >>> df.idxmin() + consumption Pork + co2_emissions Wheat Products + dtype: object + + To return the index for the minimum value in each row, use ``axis="columns"``. + + >>> df.idxmin(axis="columns") + Pork consumption + Wheat Products co2_emissions + Beef consumption + dtype: object + """ + if axis is not lib.no_default: + if axis is None: + axis = self.axis + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "idxmin") + else: + axis = self.axis + + def func(df): + return df.idxmin(axis=axis, skipna=skipna, numeric_only=numeric_only) + + func.__name__ = "idxmin" + result = self._python_apply_general( + func, self._obj_with_exclusions, not_indexed_same=True + ) + return result.astype(self.obj.index.dtype) if result.empty else result + + boxplot = boxplot_frame_groupby + + def value_counts( + self, + subset: Sequence[Hashable] | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> DataFrame | Series: + """ + Return a Series or DataFrame containing counts of unique rows. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + subset : list-like, optional + Columns to use when counting unique combinations. + normalize : bool, default False + Return proportions rather than frequencies. + sort : bool, default True + Sort by frequencies. + ascending : bool, default False + Sort in ascending order. + dropna : bool, default True + Don't include counts of rows that contain NA values. + + Returns + ------- + Series or DataFrame + Series if the groupby as_index is True, otherwise DataFrame. + + See Also + -------- + Series.value_counts: Equivalent method on Series. + DataFrame.value_counts: Equivalent method on DataFrame. + SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy. + + Notes + ----- + - If the groupby as_index is True then the returned Series will have a + MultiIndex with one level per input column. + - If the groupby as_index is False then the returned DataFrame will have an + additional column with the value_counts. The column is labelled 'count' or + 'proportion', depending on the ``normalize`` parameter. + + By default, rows that contain any NA values are omitted from + the result. + + By default, the result will be in descending order so that the + first element of each group is the most frequently-occurring row. + + Examples + -------- + >>> df = pd.DataFrame({ + ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'], + ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'], + ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR'] + ... }) + + >>> df + gender education country + 0 male low US + 1 male medium FR + 2 female high US + 3 male low FR + 4 female high FR + 5 male low FR + + >>> df.groupby('gender').value_counts() + gender education country + female high FR 1 + US 1 + male low FR 2 + US 1 + medium FR 1 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(ascending=True) + gender education country + female high FR 1 + US 1 + male low US 1 + medium FR 1 + low FR 2 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(normalize=True) + gender education country + female high FR 0.50 + US 0.50 + male low FR 0.50 + US 0.25 + medium FR 0.25 + Name: proportion, dtype: float64 + + >>> df.groupby('gender', as_index=False).value_counts() + gender education country count + 0 female high FR 1 + 1 female high US 1 + 2 male low FR 2 + 3 male low US 1 + 4 male medium FR 1 + + >>> df.groupby('gender', as_index=False).value_counts(normalize=True) + gender education country proportion + 0 female high FR 0.50 + 1 female high US 0.50 + 2 male low FR 0.50 + 3 male low US 0.25 + 4 male medium FR 0.25 + """ + return self._value_counts(subset, normalize, sort, ascending, dropna) + + def fillna( + self, + value: Hashable | Mapping | Series | DataFrame | None = None, + method: FillnaOptions | None = None, + axis: Axis | None | lib.NoDefault = lib.no_default, + inplace: bool = False, + limit: int | None = None, + downcast=lib.no_default, + ) -> DataFrame | None: + """ + Fill NA/NaN values using the specified method within groups. + + Parameters + ---------- + value : scalar, dict, Series, or DataFrame + Value to use to fill holes (e.g. 0), alternately a + dict/Series/DataFrame of values specifying which value to use for + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot + be a list. Users wanting to use the ``value`` argument and not ``method`` + should prefer :meth:`.DataFrame.fillna` as this + will produce the same result and be more performant. + method : {{'bfill', 'ffill', None}}, default None + Method to use for filling holes. ``'ffill'`` will propagate + the last valid observation forward within a group. + ``'bfill'`` will use next valid observation to fill the gap. + axis : {0 or 'index', 1 or 'columns'} + Axis along which to fill missing values. When the :class:`DataFrameGroupBy` + ``axis`` argument is ``0``, using ``axis=1`` here will produce + the same results as :meth:`.DataFrame.fillna`. When the + :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0`` + or ``axis=1`` here will produce the same results. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + inplace : bool, default False + Broken. Do not set to True. + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill within a group. In other words, + if there is a gap with more than this number of consecutive NaNs, + it will only be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + .. deprecated:: 2.1.0 + + Returns + ------- + DataFrame + Object with missing values filled. + + See Also + -------- + ffill : Forward fill values within a group. + bfill : Backward fill values within a group. + + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "key": [0, 0, 1, 1, 1], + ... "A": [np.nan, 2, np.nan, 3, np.nan], + ... "B": [2, 3, np.nan, np.nan, np.nan], + ... "C": [np.nan, np.nan, 2, np.nan, np.nan], + ... } + ... ) + >>> df + key A B C + 0 0 NaN 2.0 NaN + 1 0 2.0 3.0 NaN + 2 1 NaN NaN 2.0 + 3 1 3.0 NaN NaN + 4 1 NaN NaN NaN + + Propagate non-null values forward or backward within each group along columns. + + >>> df.groupby("key").fillna(method="ffill") + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN 2.0 + + >>> df.groupby("key").fillna(method="bfill") + A B C + 0 2.0 2.0 NaN + 1 2.0 3.0 NaN + 2 3.0 NaN 2.0 + 3 3.0 NaN NaN + 4 NaN NaN NaN + + Propagate non-null values forward or backward within each group along rows. + + >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T + key A B C + 0 0.0 0.0 2.0 2.0 + 1 0.0 2.0 3.0 3.0 + 2 1.0 1.0 NaN 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 1.0 NaN NaN + + >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T + key A B C + 0 0.0 NaN 2.0 NaN + 1 0.0 2.0 3.0 NaN + 2 1.0 NaN 2.0 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 NaN NaN NaN + + Only replace the first NaN element within a group along rows. + + >>> df.groupby("key").fillna(method="ffill", limit=1) + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN NaN + """ + if method is not None: + warnings.warn( + f"{type(self).__name__}.fillna with 'method' is deprecated and " + "will raise in a future version. Use obj.ffill() or obj.bfill() " + "instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + result = self._op_via_apply( + "fillna", + value=value, + method=method, + axis=axis, + inplace=inplace, + limit=limit, + downcast=downcast, + ) + return result + + def take( + self, + indices: TakeIndexer, + axis: Axis | None | lib.NoDefault = lib.no_default, + **kwargs, + ) -> DataFrame: + """ + Return the elements in the given *positional* indices in each group. + + This means that we are not indexing according to actual values in + the index attribute of the object. We are indexing according to the + actual position of the element in the object. + + If a requested index does not exist for some group, this method will raise. + To get similar behavior that ignores indices that don't exist, see + :meth:`.DataFrameGroupBy.nth`. + + Parameters + ---------- + indices : array-like + An array of ints indicating which positions to take. + axis : {0 or 'index', 1 or 'columns', None}, default 0 + The axis on which to select elements. ``0`` means that we are + selecting rows, ``1`` means that we are selecting columns. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + **kwargs + For compatibility with :meth:`numpy.take`. Has no effect on the + output. + + Returns + ------- + DataFrame + An DataFrame containing the elements taken from each group. + + See Also + -------- + DataFrame.take : Take elements from a Series along an axis. + DataFrame.loc : Select a subset of a DataFrame by labels. + DataFrame.iloc : Select a subset of a DataFrame by positions. + numpy.take : Take elements from an array along an axis. + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan), + ... ('rabbit', 'mammal', 15.0)], + ... columns=['name', 'class', 'max_speed'], + ... index=[4, 3, 2, 1, 0]) + >>> df + name class max_speed + 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 lion mammal 80.5 + 1 monkey mammal NaN + 0 rabbit mammal 15.0 + >>> gb = df.groupby([1, 1, 2, 2, 2]) + + Take elements at positions 0 and 1 along the axis 0 (default). + + Note how the indices selected in the result do not correspond to + our input indices 0 and 1. That's because we are selecting the 0th + and 1st rows, not rows whose indices equal 0 and 1. + + >>> gb.take([0, 1]) + name class max_speed + 1 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 2 lion mammal 80.5 + 1 monkey mammal NaN + + The order of the specified indices influences the order in the result. + Here, the order is swapped from the previous example. + + >>> gb.take([1, 0]) + name class max_speed + 1 3 parrot bird 24.0 + 4 falcon bird 389.0 + 2 1 monkey mammal NaN + 2 lion mammal 80.5 + + Take elements at indices 1 and 2 along the axis 1 (column selection). + + We may take elements using negative integers for positive indices, + starting from the end of the object, just like with Python lists. + + >>> gb.take([-1, -2]) + name class max_speed + 1 3 parrot bird 24.0 + 4 falcon bird 389.0 + 2 0 rabbit mammal 15.0 + 1 monkey mammal NaN + """ + result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) + return result + + def skew( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ) -> DataFrame: + """ + Return unbiased skew within groups. + + Normalized by N-1. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Axis for the function to be applied on. + + Specifying ``axis=None`` will apply the aggregation across both axes. + + .. versionadded:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values when computing the result. + + numeric_only : bool, default False + Include only float, int, boolean columns. + + **kwargs + Additional keyword arguments to be passed to the function. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.skew : Return unbiased skew over requested axis. + + Examples + -------- + >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi', + ... 'lion', 'monkey', 'rabbit'], + ... ['bird', 'bird', 'bird', 'bird', + ... 'mammal', 'mammal', 'mammal']] + >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class')) + >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan, + ... 80.5, 21.5, 15.0]}, + ... index=index) + >>> df + max_speed + name class + falcon bird 389.0 + parrot bird 24.0 + cockatoo bird 70.0 + kiwi bird NaN + lion mammal 80.5 + monkey mammal 21.5 + rabbit mammal 15.0 + >>> gb = df.groupby(["class"]) + >>> gb.skew() + max_speed + class + bird 1.628296 + mammal 1.669046 + >>> gb.skew(skipna=False) + max_speed + class + bird NaN + mammal 1.669046 + """ + if axis is lib.no_default: + axis = 0 + + if axis != 0: + result = self._op_via_apply( + "skew", + axis=axis, + skipna=skipna, + numeric_only=numeric_only, + **kwargs, + ) + return result + + def alt(obj): + # This should not be reached since the cython path should raise + # TypeError and not NotImplementedError. + raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") + + return self._cython_agg_general( + "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs + ) + + @property + @doc(DataFrame.plot.__doc__) + def plot(self) -> GroupByPlot: + result = GroupByPlot(self) + return result + + @doc(DataFrame.corr.__doc__) + def corr( + self, + method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson", + min_periods: int = 1, + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "corr", method=method, min_periods=min_periods, numeric_only=numeric_only + ) + return result + + @doc(DataFrame.cov.__doc__) + def cov( + self, + min_periods: int | None = None, + ddof: int | None = 1, + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only + ) + return result + + @doc(DataFrame.hist.__doc__) + def hist( + self, + column: IndexLabel | None = None, + by=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + ax=None, + sharex: bool = False, + sharey: bool = False, + figsize: tuple[int, int] | None = None, + layout: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, + ): + result = self._op_via_apply( + "hist", + column=column, + by=by, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + bins=bins, + backend=backend, + legend=legend, + **kwargs, + ) + return result + + @property + @doc(DataFrame.dtypes.__doc__) + def dtypes(self) -> Series: + # GH#51045 + warnings.warn( + f"{type(self).__name__}.dtypes is deprecated and will be removed in " + "a future version. Check the dtypes on the base object instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # error: Incompatible return value type (got "DataFrame", expected "Series") + return self._python_apply_general( # type: ignore[return-value] + lambda df: df.dtypes, self._selected_obj + ) + + @doc(DataFrame.corrwith.__doc__) + def corrwith( + self, + other: DataFrame | Series, + axis: Axis | lib.NoDefault = lib.no_default, + drop: bool = False, + method: CorrelationMethod = "pearson", + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "corrwith", + other=other, + axis=axis, + drop=drop, + method=method, + numeric_only=numeric_only, + ) + return result + + +def _wrap_transform_general_frame( + obj: DataFrame, group: DataFrame, res: DataFrame | Series +) -> DataFrame: + from pandas import concat + + if isinstance(res, Series): + # we need to broadcast across the + # other dimension; this will preserve dtypes + # GH14457 + if res.index.is_(obj.index): + res_frame = concat([res] * len(group.columns), axis=1) + res_frame.columns = group.columns + res_frame.index = group.index + else: + res_frame = obj._constructor( + np.tile(res.values, (len(group.index), 1)), + columns=group.columns, + index=group.index, + ) + assert isinstance(res_frame, DataFrame) + return res_frame + elif isinstance(res, DataFrame) and not res.index.is_(group.index): + return res._align_frame(group)[0] + else: + return res diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/groupby.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/groupby.py new file mode 100644 index 00000000..187c62bc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/groupby.py @@ -0,0 +1,5722 @@ +""" +Provide the groupby split-apply-combine paradigm. Define the GroupBy +class providing the base-class of operations. + +The SeriesGroupBy and DataFrameGroupBy sub-class +(defined in pandas.core.groupby.generic) +expose these user-facing objects to provide specific functionality. +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, +) +import datetime +from functools import ( + partial, + wraps, +) +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + TypeVar, + Union, + cast, + final, +) +import warnings + +import numpy as np + +from pandas._config.config import option_context + +from pandas._libs import ( + Timestamp, + lib, +) +from pandas._libs.algos import rank_1d +import pandas._libs.groupby as libgroupby +from pandas._libs.missing import NA +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + Axis, + AxisInt, + DtypeObj, + FillnaOptions, + IndexLabel, + NDFrameT, + PositionalIndexer, + RandomState, + Scalar, + T, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + AbstractMethodError, + DataError, +) +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + coerce_indexer_dtype, + ensure_dtype_can_hold_na, +) +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + is_object_dtype, + is_scalar, + needs_i8_conversion, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import ( + algorithms, + sample, +) +from pandas.core._numba import executor +from pandas.core.apply import warn_alias_replacement +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + Categorical, + ExtensionArray, + FloatingArray, + IntegerArray, + SparseArray, +) +from pandas.core.arrays.string_ import StringDtype +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) +from pandas.core.base import ( + PandasObject, + SelectionMixin, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.generic import NDFrame +from pandas.core.groupby import ( + base, + numba_, + ops, +) +from pandas.core.groupby.grouper import get_grouper +from pandas.core.groupby.indexing import ( + GroupByIndexingMixin, + GroupByNthSelector, +) +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, + RangeIndex, + default_index, +) +from pandas.core.internals.blocks import ensure_block_shape +from pandas.core.series import Series +from pandas.core.sorting import get_group_index_sorter +from pandas.core.util.numba_ import ( + get_jit_arguments, + maybe_use_numba, +) + +if TYPE_CHECKING: + from typing import Any + + from pandas.core.window import ( + ExpandingGroupby, + ExponentialMovingWindowGroupby, + RollingGroupby, + ) + +_common_see_also = """ + See Also + -------- + Series.%(name)s : Apply a function %(name)s to a Series. + DataFrame.%(name)s : Apply a function %(name)s + to each row or column of a DataFrame. +""" + +_apply_docs = { + "template": """ + Apply function ``func`` group-wise and combine the results together. + + The function passed to ``apply`` must take a {input} as its first + argument and return a DataFrame, Series or scalar. ``apply`` will + then take care of combining the results back together into a single + dataframe or series. ``apply`` is therefore a highly flexible + grouping method. + + While ``apply`` is a very flexible method, its downside is that + using it can be quite a bit slower than using more specific methods + like ``agg`` or ``transform``. Pandas offers a wide range of method that will + be much faster than using ``apply`` for their specific purposes, so try to + use them before reaching for ``apply``. + + Parameters + ---------- + func : callable + A callable that takes a {input} as its first argument, and + returns a dataframe, a series or a scalar. In addition the + callable may take positional and keyword arguments. + args, kwargs : tuple and dict + Optional positional and keyword arguments to pass to ``func``. + + Returns + ------- + Series or DataFrame + + See Also + -------- + pipe : Apply function to the full GroupBy object instead of to each + group. + aggregate : Apply aggregate function to the GroupBy object. + transform : Apply function column-by-column to the GroupBy object. + Series.apply : Apply a function to a Series. + DataFrame.apply : Apply a function to each row or column of a DataFrame. + + Notes + ----- + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. + + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + {examples} + """, + "dataframe_examples": """ + >>> df = pd.DataFrame({'A': 'a a b'.split(), + ... 'B': [1,2,3], + ... 'C': [4,6,5]}) + >>> g1 = df.groupby('A', group_keys=False) + >>> g2 = df.groupby('A', group_keys=True) + + Notice that ``g1`` and ``g2`` have two groups, ``a`` and ``b``, and only + differ in their ``group_keys`` argument. Calling `apply` in various ways, + we can get different grouping results: + + Example 1: below the function passed to `apply` takes a DataFrame as + its argument and returns a DataFrame. `apply` combines the result for + each group together into a new DataFrame: + + >>> g1[['B', 'C']].apply(lambda x: x / x.sum()) + B C + 0 0.333333 0.4 + 1 0.666667 0.6 + 2 1.000000 1.0 + + In the above, the groups are not part of the index. We can have them included + by using ``g2`` where ``group_keys=True``: + + >>> g2[['B', 'C']].apply(lambda x: x / x.sum()) + B C + A + a 0 0.333333 0.4 + 1 0.666667 0.6 + b 2 1.000000 1.0 + + Example 2: The function passed to `apply` takes a DataFrame as + its argument and returns a Series. `apply` combines the result for + each group together into a new DataFrame. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``. + + >>> g1[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min()) + B C + A + a 1.0 2.0 + b 0.0 0.0 + + >>> g2[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min()) + B C + A + a 1.0 2.0 + b 0.0 0.0 + + The ``group_keys`` argument has no effect here because the result is not + like-indexed (i.e. :ref:`a transform `) when compared + to the input. + + Example 3: The function passed to `apply` takes a DataFrame as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as + appropriate: + + >>> g1.apply(lambda x: x.C.max() - x.B.min()) + A + a 5 + b 2 + dtype: int64""", + "series_examples": """ + >>> s = pd.Series([0, 1, 2], index='a a b'.split()) + >>> g1 = s.groupby(s.index, group_keys=False) + >>> g2 = s.groupby(s.index, group_keys=True) + + From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``. + Notice that ``g1`` have ``g2`` have two groups, ``a`` and ``b``, and only + differ in their ``group_keys`` argument. Calling `apply` in various ways, + we can get different grouping results: + + Example 1: The function passed to `apply` takes a Series as + its argument and returns a Series. `apply` combines the result for + each group together into a new Series. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``. + + >>> g1.apply(lambda x: x*2 if x.name == 'a' else x/2) + a 0.0 + a 2.0 + b 1.0 + dtype: float64 + + In the above, the groups are not part of the index. We can have them included + by using ``g2`` where ``group_keys=True``: + + >>> g2.apply(lambda x: x*2 if x.name == 'a' else x/2) + a a 0.0 + a 2.0 + b b 1.0 + dtype: float64 + + Example 2: The function passed to `apply` takes a Series as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as + appropriate: + + >>> g1.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64 + + The ``group_keys`` argument has no effect here because the result is not + like-indexed (i.e. :ref:`a transform `) when compared + to the input. + + >>> g2.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64""", +} + +_groupby_agg_method_template = """ +Compute {fname} of group values. + +Parameters +---------- +numeric_only : bool, default {no} + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + +min_count : int, default {mc} + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + +Returns +------- +Series or DataFrame + Computed {fname} of values within each group. + +Examples +-------- +{example} +""" + +_groupby_agg_method_engine_template = """ +Compute {fname} of group values. + +Parameters +---------- +numeric_only : bool, default {no} + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + +min_count : int, default {mc} + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + +engine : str, default None {e} + * ``'cython'`` : Runs rolling apply through C-extensions from cython. + * ``'numba'`` : Runs rolling apply through JIT compiled code from numba. + Only available when ``raw`` is set to ``True``. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None {ek} + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to both the ``func`` and the ``apply`` groupby aggregation. + +Returns +------- +Series or DataFrame + Computed {fname} of values within each group. + +Examples +-------- +{example} +""" + +_pipe_template = """ +Apply a ``func`` with arguments to this %(klass)s object and return its result. + +Use `.pipe` when you want to improve readability by chaining together +functions that expect Series, DataFrames, GroupBy or Resampler objects. +Instead of writing + +>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP + +You can write + +>>> (df.groupby('group') +... .pipe(f) +... .pipe(g, arg1=a) +... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP + +which is much more readable. + +Parameters +---------- +func : callable or tuple of (callable, str) + Function to apply to this %(klass)s object or, alternatively, + a `(callable, data_keyword)` tuple where `data_keyword` is a + string indicating the keyword of `callable` that expects the + %(klass)s object. +args : iterable, optional + Positional arguments passed into `func`. +kwargs : dict, optional + A dictionary of keyword arguments passed into `func`. + +Returns +------- +the return type of `func`. + +See Also +-------- +Series.pipe : Apply a function with arguments to a series. +DataFrame.pipe: Apply a function with arguments to a dataframe. +apply : Apply function to each group instead of to the + full %(klass)s object. + +Notes +----- +See more `here +`_ + +Examples +-------- +%(examples)s +""" + +_transform_template = """ +Call function producing a same-indexed %(klass)s on each group. + +Returns a %(klass)s having the same indexes as the original object +filled with the transformed values. + +Parameters +---------- +f : function, str + Function to apply to each group. See the Notes section below for requirements. + + Accepted inputs are: + + - String + - Python function + - Numba JIT function with ``engine='numba'`` specified. + + Only passing a single function is supported with this engine. + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + + If a string is chosen, then it needs to be the name + of the groupby method you want to use. +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or the global setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be + applied to the function + +**kwargs + Keyword arguments to be passed into func. + +Returns +------- +%(klass)s + +See Also +-------- +%(klass)s.groupby.apply : Apply function ``func`` group-wise and combine + the results together. +%(klass)s.groupby.aggregate : Aggregate using one or more + operations over the specified axis. +%(klass)s.transform : Call ``func`` on self producing a %(klass)s with the + same axis shape as self. + +Notes +----- +Each group is endowed the attribute 'name' in case you need to know +which group you are working on. + +The current implementation imposes three requirements on f: + +* f must return a value that either has the same shape as the input + subframe or can be broadcast to the shape of the input subframe. + For example, if `f` returns a scalar it will be broadcast to have the + same shape as the input subframe. +* if this is a DataFrame, f must support application column-by-column + in the subframe. If f also supports application to the entire subframe, + then a fast path is used starting from the second chunk. +* f must not mutate groups. Mutation is not supported and may + produce unexpected results. See :ref:`gotchas.udf-mutation` for more details. + +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. + +.. versionchanged:: 2.0.0 + + When using ``.transform`` on a grouped DataFrame and the transformation function + returns a DataFrame, pandas now aligns the result's index + with the input's index. You can call ``.to_numpy()`` on the + result of the transformation function to avoid alignment. + +Examples +-------- +%(example)s""" + +_agg_template_series = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list, dict or None + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - None, in which case ``**kwargs`` are used with Named Aggregation. Here the + output has one column for each element in ``**kwargs``. The name of the + column is keyword, whereas the value determines the aggregation used to compute + the values in the column. + + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + + .. deprecated:: 2.1.0 + + Passing a dictionary is deprecated and will raise in a future version + of pandas. Pass a list of aggregations instead. +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + +**kwargs + * If ``func`` is None, ``**kwargs`` are used to define the output names and + aggregations via Named Aggregation. See ``func`` entry. + * Otherwise, keyword arguments to be passed into func. + +Returns +------- +{klass} + +See Also +-------- +{klass}.groupby.apply : Apply function func group-wise + and combine the results together. +{klass}.groupby.transform : Transforms the Series on each group + based on the given function. +{klass}.aggregate : Aggregate using one or more + operations over the specified axis. + +Notes +----- +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. +{examples}""" + +_agg_template_frame = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list, dict or None + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - dict of axis labels -> functions, function names or list of such. + - None, in which case ``**kwargs`` are used with Named Aggregation. Here the + output has one column for each element in ``**kwargs``. The name of the + column is keyword, whereas the value determines the aggregation used to compute + the values in the column. + + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + +**kwargs + * If ``func`` is None, ``**kwargs`` are used to define the output names and + aggregations via Named Aggregation. See ``func`` entry. + * Otherwise, keyword arguments to be passed into func. + +Returns +------- +{klass} + +See Also +-------- +{klass}.groupby.apply : Apply function func group-wise + and combine the results together. +{klass}.groupby.transform : Transforms the Series on each group + based on the given function. +{klass}.aggregate : Aggregate using one or more + operations over the specified axis. + +Notes +----- +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. +{examples}""" + + +@final +class GroupByPlot(PandasObject): + """ + Class implementing the .plot attribute for groupby objects. + """ + + def __init__(self, groupby: GroupBy) -> None: + self._groupby = groupby + + def __call__(self, *args, **kwargs): + def f(self): + return self.plot(*args, **kwargs) + + f.__name__ = "plot" + return self._groupby._python_apply_general(f, self._groupby._selected_obj) + + def __getattr__(self, name: str): + def attr(*args, **kwargs): + def f(self): + return getattr(self.plot, name)(*args, **kwargs) + + return self._groupby._python_apply_general(f, self._groupby._selected_obj) + + return attr + + +_KeysArgType = Union[ + Hashable, + list[Hashable], + Callable[[Hashable], Hashable], + list[Callable[[Hashable], Hashable]], + Mapping[Hashable, Hashable], +] + + +class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin): + _hidden_attrs = PandasObject._hidden_attrs | { + "as_index", + "axis", + "dropna", + "exclusions", + "grouper", + "group_keys", + "keys", + "level", + "obj", + "observed", + "sort", + } + + axis: AxisInt + grouper: ops.BaseGrouper + keys: _KeysArgType | None = None + level: IndexLabel | None = None + group_keys: bool + + @final + def __len__(self) -> int: + return len(self.groups) + + @final + def __repr__(self) -> str: + # TODO: Better repr for GroupBy object + return object.__repr__(self) + + @final + @property + def groups(self) -> dict[Hashable, np.ndarray]: + """ + Dict {group name -> group labels}. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).groups + {'a': ['a', 'a'], 'b': ['b']} + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"]) + >>> df + a b c + 0 1 2 3 + 1 1 5 6 + 2 7 8 9 + >>> df.groupby(by=["a"]).groups + {1: [0, 1], 7: [2]} + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').groups + {Timestamp('2023-01-01 00:00:00'): 2, Timestamp('2023-02-01 00:00:00'): 4} + """ + return self.grouper.groups + + @final + @property + def ngroups(self) -> int: + return self.grouper.ngroups + + @final + @property + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """ + Dict {group name -> group indices}. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).indices + {'a': array([0, 1]), 'b': array([2])} + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby(by=["a"]).indices + {1: array([0, 1]), 7: array([2])} + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').indices + defaultdict(, {Timestamp('2023-01-01 00:00:00'): [0, 1], + Timestamp('2023-02-01 00:00:00'): [2, 3]}) + """ + return self.grouper.indices + + @final + def _get_indices(self, names): + """ + Safe get multiple indices, translate keys for + datelike to underlying repr. + """ + + def get_converter(s): + # possibly convert to the actual key types + # in the indices, could be a Timestamp or a np.datetime64 + if isinstance(s, datetime.datetime): + return lambda key: Timestamp(key) + elif isinstance(s, np.datetime64): + return lambda key: Timestamp(key).asm8 + else: + return lambda key: key + + if len(names) == 0: + return [] + + if len(self.indices) > 0: + index_sample = next(iter(self.indices)) + else: + index_sample = None # Dummy sample + + name_sample = names[0] + if isinstance(index_sample, tuple): + if not isinstance(name_sample, tuple): + msg = "must supply a tuple to get_group with multiple grouping keys" + raise ValueError(msg) + if not len(name_sample) == len(index_sample): + try: + # If the original grouper was a tuple + return [self.indices[name] for name in names] + except KeyError as err: + # turns out it wasn't a tuple + msg = ( + "must supply a same-length tuple to get_group " + "with multiple grouping keys" + ) + raise ValueError(msg) from err + + converters = [get_converter(s) for s in index_sample] + names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) + + else: + converter = get_converter(index_sample) + names = (converter(name) for name in names) + + return [self.indices.get(name, []) for name in names] + + @final + def _get_index(self, name): + """ + Safe get index, translate keys for datelike to underlying repr. + """ + return self._get_indices([name])[0] + + @final + @cache_readonly + def _selected_obj(self): + # Note: _selected_obj is always just `self.obj` for SeriesGroupBy + if isinstance(self.obj, Series): + return self.obj + + if self._selection is not None: + if is_hashable(self._selection): + # i.e. a single key, so selecting it will return a Series. + # In this case, _obj_with_exclusions would wrap the key + # in a list and return a single-column DataFrame. + return self.obj[self._selection] + + # Otherwise _selection is equivalent to _selection_list, so + # _selected_obj matches _obj_with_exclusions, so we can re-use + # that and avoid making a copy. + return self._obj_with_exclusions + + return self.obj + + @final + def _dir_additions(self) -> set[str]: + return self.obj._dir_additions() + + @Substitution( + klass="GroupBy", + examples=dedent( + """\ + >>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]}) + >>> df + A B + 0 a 1 + 1 b 2 + 2 a 3 + 3 b 4 + + To get the difference between each groups maximum and minimum value in one + pass, you can do + + >>> df.groupby('A').pipe(lambda x: x.max() - x.min()) + B + A + a 2 + b 2""" + ), + ) + @Appender(_pipe_template) + def pipe( + self, + func: Callable[..., T] | tuple[Callable[..., T], str], + *args, + **kwargs, + ) -> T: + return com.pipe(self, func, *args, **kwargs) + + @final + def get_group(self, name, obj=None) -> DataFrame | Series: + """ + Construct DataFrame from group with provided name. + + Parameters + ---------- + name : object + The name of the group to get as a DataFrame. + obj : DataFrame, default None + The DataFrame to take the DataFrame out of. If + it is None, the object groupby was called on will + be used. + + .. deprecated:: 2.1.0 + The obj is deprecated and will be removed in a future version. + Do ``df.iloc[gb.indices.get(name)]`` + instead of ``gb.get_group(name, obj=df)``. + + Returns + ------- + same type as obj + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).get_group("a") + a 1 + a 2 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby(by=["a"]).get_group(1) + a b c + owl 1 2 3 + toucan 1 5 6 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').get_group('2023-01-01') + 2023-01-01 1 + 2023-01-15 2 + dtype: int64 + """ + inds = self._get_index(name) + if not len(inds): + raise KeyError(name) + + if obj is None: + indexer = inds if self.axis == 0 else (slice(None), inds) + return self._selected_obj.iloc[indexer] + else: + warnings.warn( + "obj is deprecated and will be removed in a future version. " + "Do ``df.iloc[gb.indices.get(name)]`` " + "instead of ``gb.get_group(name, obj=df)``.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return obj._take_with_is_copy(inds, axis=self.axis) + + @final + def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: + """ + Groupby iterator. + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> for x, y in ser.groupby(level=0): + ... print(f'{x}\\n{y}\\n') + a + a 1 + a 2 + dtype: int64 + b + b 3 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"]) + >>> df + a b c + 0 1 2 3 + 1 1 5 6 + 2 7 8 9 + >>> for x, y in df.groupby(by=["a"]): + ... print(f'{x}\\n{y}\\n') + (1,) + a b c + 0 1 2 3 + 1 1 5 6 + (7,) + a b c + 2 7 8 9 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> for x, y in ser.resample('MS'): + ... print(f'{x}\\n{y}\\n') + 2023-01-01 00:00:00 + 2023-01-01 1 + 2023-01-15 2 + dtype: int64 + 2023-02-01 00:00:00 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + """ + keys = self.keys + level = self.level + result = self.grouper.get_iterator(self._selected_obj, axis=self.axis) + # error: Argument 1 to "len" has incompatible type "Hashable"; expected "Sized" + if is_list_like(level) and len(level) == 1: # type: ignore[arg-type] + # GH 51583 + warnings.warn( + "Creating a Groupby object with a length-1 list-like " + "level parameter will yield indexes as tuples in a future version. " + "To keep indexes as scalars, create Groupby objects with " + "a scalar level parameter instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if isinstance(keys, list) and len(keys) == 1: + # GH#42795 - when keys is a list, return tuples even when length is 1 + result = (((key,), group) for key, group in result) + return result + + +# To track operations that expand dimensions, like ohlc +OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame) + + +class GroupBy(BaseGroupBy[NDFrameT]): + """ + Class for grouping and aggregating relational data. + + See aggregate, transform, and apply functions on this object. + + It's easiest to use obj.groupby(...) to use GroupBy, but you can also do: + + :: + + grouped = groupby(obj, ...) + + Parameters + ---------- + obj : pandas object + axis : int, default 0 + level : int, default None + Level of MultiIndex + groupings : list of Grouping objects + Most users should ignore this + exclusions : array-like, optional + List of columns to exclude + name : str + Most users should ignore this + + Returns + ------- + **Attributes** + groups : dict + {group name -> group labels} + len(grouped) : int + Number of groups + + Notes + ----- + After grouping, see aggregate, apply, and transform functions. Here are + some other brief notes about usage. When grouping by multiple groups, the + result index will be a MultiIndex (hierarchical) by default. + + Iteration produces (key, group) tuples, i.e. chunking the data by group. So + you can write code like: + + :: + + grouped = obj.groupby(keys, axis=axis) + for key, group in grouped: + # do something with the data + + Function calls on GroupBy, if not specially implemented, "dispatch" to the + grouped data. So if you group a DataFrame and wish to invoke the std() + method on each group, you can simply do: + + :: + + df.groupby(mapper).std() + + rather than + + :: + + df.groupby(mapper).aggregate(np.std) + + You can pass arguments to these "wrapped" functions, too. + + See the online documentation for full exposition on these topics and much + more + """ + + grouper: ops.BaseGrouper + as_index: bool + + @final + def __init__( + self, + obj: NDFrameT, + keys: _KeysArgType | None = None, + axis: Axis = 0, + level: IndexLabel | None = None, + grouper: ops.BaseGrouper | None = None, + exclusions: frozenset[Hashable] | None = None, + selection: IndexLabel | None = None, + as_index: bool = True, + sort: bool = True, + group_keys: bool = True, + observed: bool | lib.NoDefault = lib.no_default, + dropna: bool = True, + ) -> None: + self._selection = selection + + assert isinstance(obj, NDFrame), type(obj) + + self.level = level + + if not as_index: + if axis != 0: + raise ValueError("as_index=False only valid for axis=0") + + self.as_index = as_index + self.keys = keys + self.sort = sort + self.group_keys = group_keys + self.dropna = dropna + + if grouper is None: + grouper, exclusions, obj = get_grouper( + obj, + keys, + axis=axis, + level=level, + sort=sort, + observed=False if observed is lib.no_default else observed, + dropna=self.dropna, + ) + + if observed is lib.no_default: + if any(ping._passed_categorical for ping in grouper.groupings): + warnings.warn( + "The default of observed=False is deprecated and will be changed " + "to True in a future version of pandas. Pass observed=False to " + "retain current behavior or observed=True to adopt the future " + "default and silence this warning.", + FutureWarning, + stacklevel=find_stack_level(), + ) + observed = False + self.observed = observed + + self.obj = obj + self.axis = obj._get_axis_number(axis) + self.grouper = grouper + self.exclusions = frozenset(exclusions) if exclusions else frozenset() + + def __getattr__(self, attr: str): + if attr in self._internal_names_set: + return object.__getattribute__(self, attr) + if attr in self.obj: + return self[attr] + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{attr}'" + ) + + @final + def _deprecate_axis(self, axis: int, name: str) -> None: + if axis == 1: + warnings.warn( + f"{type(self).__name__}.{name} with axis=1 is deprecated and " + "will be removed in a future version. Operate on the un-grouped " + "DataFrame instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + f"The 'axis' keyword in {type(self).__name__}.{name} is deprecated " + "and will be removed in a future version. " + "Call without passing 'axis' instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + @final + def _op_via_apply(self, name: str, *args, **kwargs): + """Compute the result of an operation by using GroupBy's apply.""" + f = getattr(type(self._obj_with_exclusions), name) + sig = inspect.signature(f) + + if "axis" in kwargs and kwargs["axis"] is not lib.no_default: + axis = self.obj._get_axis_number(kwargs["axis"]) + self._deprecate_axis(axis, name) + elif "axis" in kwargs: + # exclude skew here because that was already defaulting to lib.no_default + # before this deprecation was instituted + if name == "skew": + pass + elif name == "fillna": + # maintain the behavior from before the deprecation + kwargs["axis"] = None + else: + kwargs["axis"] = 0 + + # a little trickery for aggregation functions that need an axis + # argument + if "axis" in sig.parameters: + if kwargs.get("axis", None) is None or kwargs.get("axis") is lib.no_default: + kwargs["axis"] = self.axis + + def curried(x): + return f(x, *args, **kwargs) + + # preserve the name so we can detect it when calling plot methods, + # to avoid duplicates + curried.__name__ = name + + # special case otherwise extra plots are created when catching the + # exception below + if name in base.plotting_methods: + return self._python_apply_general(curried, self._selected_obj) + + is_transform = name in base.transformation_kernels + result = self._python_apply_general( + curried, + self._obj_with_exclusions, + is_transform=is_transform, + not_indexed_same=not is_transform, + ) + + if self.grouper.has_dropped_na and is_transform: + # result will have dropped rows due to nans, fill with null + # and ensure index is ordered same as the input + result = self._set_result_index_ordered(result) + return result + + # ----------------------------------------------------------------- + # Dispatch/Wrapping + + @final + def _concat_objects( + self, + values, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + from pandas.core.reshape.concat import concat + + if self.group_keys and not is_transform: + if self.as_index: + # possible MI return case + group_keys = self.grouper.result_index + group_levels = self.grouper.levels + group_names = self.grouper.names + + result = concat( + values, + axis=self.axis, + keys=group_keys, + levels=group_levels, + names=group_names, + sort=False, + ) + else: + # GH5610, returns a MI, with the first level being a + # range index + keys = list(range(len(values))) + result = concat(values, axis=self.axis, keys=keys) + + elif not not_indexed_same: + result = concat(values, axis=self.axis) + + ax = self._selected_obj._get_axis(self.axis) + if self.dropna: + labels = self.grouper.group_info[0] + mask = labels != -1 + ax = ax[mask] + + # this is a very unfortunate situation + # we can't use reindex to restore the original order + # when the ax has duplicates + # so we resort to this + # GH 14776, 30667 + # TODO: can we re-use e.g. _reindex_non_unique? + if ax.has_duplicates and not result.axes[self.axis].equals(ax): + # e.g. test_category_order_transformer + target = algorithms.unique1d(ax._values) + indexer, _ = result.index.get_indexer_non_unique(target) + result = result.take(indexer, axis=self.axis) + else: + result = result.reindex(ax, axis=self.axis, copy=False) + + else: + result = concat(values, axis=self.axis) + + if self.obj.ndim == 1: + name = self.obj.name + elif is_hashable(self._selection): + name = self._selection + else: + name = None + + if isinstance(result, Series) and name is not None: + result.name = name + + return result + + @final + def _set_result_index_ordered( + self, result: OutputFrameOrSeries + ) -> OutputFrameOrSeries: + # set the result index on the passed values object and + # return the new object, xref 8046 + + obj_axis = self.obj._get_axis(self.axis) + + if self.grouper.is_monotonic and not self.grouper.has_dropped_na: + # shortcut if we have an already ordered grouper + result = result.set_axis(obj_axis, axis=self.axis, copy=False) + return result + + # row order is scrambled => sort the rows by position in original index + original_positions = Index(self.grouper.result_ilocs()) + result = result.set_axis(original_positions, axis=self.axis, copy=False) + result = result.sort_index(axis=self.axis) + if self.grouper.has_dropped_na: + # Add back in any missing rows due to dropna - index here is integral + # with values referring to the row of the input so can use RangeIndex + result = result.reindex(RangeIndex(len(obj_axis)), axis=self.axis) + result = result.set_axis(obj_axis, axis=self.axis, copy=False) + + return result + + @final + def _insert_inaxis_grouper(self, result: Series | DataFrame) -> DataFrame: + if isinstance(result, Series): + result = result.to_frame() + + # zip in reverse so we can always insert at loc 0 + columns = result.columns + for name, lev, in_axis in zip( + reversed(self.grouper.names), + reversed(self.grouper.get_group_levels()), + reversed([grp.in_axis for grp in self.grouper.groupings]), + ): + # GH #28549 + # When using .apply(-), name will be in columns already + if name not in columns: + if in_axis: + result.insert(0, name, lev) + else: + msg = ( + "A grouping was used that is not in the columns of the " + "DataFrame and so was excluded from the result. This grouping " + "will be included in a future version of pandas. Add the " + "grouping as a column of the DataFrame to silence this warning." + ) + warnings.warn( + message=msg, + category=FutureWarning, + stacklevel=find_stack_level(), + ) + + return result + + @final + def _maybe_transpose_result(self, result: NDFrameT) -> NDFrameT: + if self.axis == 1: + # Only relevant for DataFrameGroupBy, no-op for SeriesGroupBy + result = result.T + if result.index.equals(self.obj.index): + # Retain e.g. DatetimeIndex/TimedeltaIndex freq + # e.g. test_groupby_crash_on_nunique + result.index = self.obj.index.copy() + return result + + @final + def _wrap_aggregated_output( + self, + result: Series | DataFrame, + qs: npt.NDArray[np.float64] | None = None, + ): + """ + Wraps the output of GroupBy aggregations into the expected result. + + Parameters + ---------- + result : Series, DataFrame + + Returns + ------- + Series or DataFrame + """ + # ATM we do not get here for SeriesGroupBy; when we do, we will + # need to require that result.name already match self.obj.name + + if not self.as_index: + # `not self.as_index` is only relevant for DataFrameGroupBy, + # enforced in __init__ + result = self._insert_inaxis_grouper(result) + result = result._consolidate() + index = Index(range(self.grouper.ngroups)) + + else: + index = self.grouper.result_index + + if qs is not None: + # We get here with len(qs) != 1 and not self.as_index + # in test_pass_args_kwargs + index = _insert_quantile_level(index, qs) + + result.index = index + + # error: Argument 1 to "_maybe_transpose_result" of "GroupBy" has + # incompatible type "Union[Series, DataFrame]"; expected "NDFrameT" + res = self._maybe_transpose_result(result) # type: ignore[arg-type] + return self._reindex_output(res, qs=qs) + + def _wrap_applied_output( + self, + data, + values: list, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + raise AbstractMethodError(self) + + # ----------------------------------------------------------------- + # numba + + @final + def _numba_prep(self, data: DataFrame): + ids, _, ngroups = self.grouper.group_info + sorted_index = self.grouper._sort_idx + sorted_ids = self.grouper._sorted_ids + + sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() + # GH 46867 + index_data = data.index + if isinstance(index_data, MultiIndex): + if len(self.grouper.groupings) > 1: + raise NotImplementedError( + "Grouping with more than 1 grouping labels and " + "a MultiIndex is not supported with engine='numba'" + ) + group_key = self.grouper.groupings[0].name + index_data = index_data.get_level_values(group_key) + sorted_index_data = index_data.take(sorted_index).to_numpy() + + starts, ends = lib.generate_slices(sorted_ids, ngroups) + return ( + starts, + ends, + sorted_index_data, + sorted_data, + ) + + def _numba_agg_general( + self, + func: Callable, + dtype_mapping: dict[np.dtype, Any], + engine_kwargs: dict[str, bool] | None, + **aggregator_kwargs, + ): + """ + Perform groupby with a standard numerical aggregation function (e.g. mean) + with Numba. + """ + if not self.as_index: + raise NotImplementedError( + "as_index=False is not supported. Use .reset_index() instead." + ) + if self.axis == 1: + raise NotImplementedError("axis=1 is not supported.") + + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + aggregator = executor.generate_shared_aggregator( + func, + dtype_mapping, + True, # is_grouped_kernel + **get_jit_arguments(engine_kwargs), + ) + # Pass group ids to kernel directly if it can handle it + # (This is faster since it doesn't require a sort) + ids, _, _ = self.grouper.group_info + ngroups = self.grouper.ngroups + + res_mgr = df._mgr.apply( + aggregator, labels=ids, ngroups=ngroups, **aggregator_kwargs + ) + res_mgr.axes[1] = self.grouper.result_index + result = df._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + + if data.ndim == 1: + result = result.squeeze("columns") + result.name = data.name + else: + result.columns = data.columns + return result + + @final + def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby transform routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + starts, ends, sorted_index, sorted_data = self._numba_prep(df) + numba_.validate_udf(func) + numba_transform_func = numba_.generate_numba_transform_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + result = numba_transform_func( + sorted_data, + sorted_index, + starts, + ends, + len(df.columns), + *args, + ) + # result values needs to be resorted to their original positions since we + # evaluated the data sorted by group + result = result.take(np.argsort(sorted_index), axis=0) + index = data.index + if data.ndim == 1: + result_kwargs = {"name": data.name} + result = result.ravel() + else: + result_kwargs = {"columns": data.columns} + return data._constructor(result, index=index, **result_kwargs) + + @final + def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby aggregation routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + starts, ends, sorted_index, sorted_data = self._numba_prep(df) + numba_.validate_udf(func) + numba_agg_func = numba_.generate_numba_agg_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + result = numba_agg_func( + sorted_data, + sorted_index, + starts, + ends, + len(df.columns), + *args, + ) + index = self.grouper.result_index + if data.ndim == 1: + result_kwargs = {"name": data.name} + result = result.ravel() + else: + result_kwargs = {"columns": data.columns} + res = data._constructor(result, index=index, **result_kwargs) + if not self.as_index: + res = self._insert_inaxis_grouper(res) + res.index = default_index(len(res)) + return res + + # ----------------------------------------------------------------- + # apply/agg/transform + + @Appender( + _apply_docs["template"].format( + input="dataframe", examples=_apply_docs["dataframe_examples"] + ) + ) + def apply(self, func, *args, **kwargs) -> NDFrameT: + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[orig_func] + warn_alias_replacement(self, orig_func, alias) + + if isinstance(func, str): + if hasattr(self, func): + res = getattr(self, func) + if callable(res): + return res(*args, **kwargs) + elif args or kwargs: + raise ValueError(f"Cannot pass arguments to property {func}") + return res + + else: + raise TypeError(f"apply func should be callable, not '{func}'") + + elif args or kwargs: + if callable(func): + + @wraps(func) + def f(g): + return func(g, *args, **kwargs) + + else: + raise ValueError( + "func must be a callable if args or kwargs are supplied" + ) + else: + f = func + + # ignore SettingWithCopy here in case the user mutates + with option_context("mode.chained_assignment", None): + try: + result = self._python_apply_general(f, self._selected_obj) + except TypeError: + # gh-20949 + # try again, with .apply acting as a filtering + # operation, by excluding the grouping column + # This would normally not be triggered + # except if the udf is trying an operation that + # fails on *some* columns, e.g. a numeric operation + # on a string grouper column + + return self._python_apply_general(f, self._obj_with_exclusions) + + return result + + @final + def _python_apply_general( + self, + f: Callable, + data: DataFrame | Series, + not_indexed_same: bool | None = None, + is_transform: bool = False, + is_agg: bool = False, + ) -> NDFrameT: + """ + Apply function f in python space + + Parameters + ---------- + f : callable + Function to apply + data : Series or DataFrame + Data to apply f to + not_indexed_same: bool, optional + When specified, overrides the value of not_indexed_same. Apply behaves + differently when the result index is equal to the input index, but + this can be coincidental leading to value-dependent behavior. + is_transform : bool, default False + Indicator for whether the function is actually a transform + and should not have group keys prepended. + is_agg : bool, default False + Indicator for whether the function is an aggregation. When the + result is empty, we don't want to warn for this case. + See _GroupBy._python_agg_general. + + Returns + ------- + Series or DataFrame + data after applying f + """ + values, mutated = self.grouper.apply_groupwise(f, data, self.axis) + if not_indexed_same is None: + not_indexed_same = mutated + + return self._wrap_applied_output( + data, + values, + not_indexed_same, + is_transform, + ) + + @final + def _agg_general( + self, + numeric_only: bool = False, + min_count: int = -1, + *, + alias: str, + npfunc: Callable, + ): + result = self._cython_agg_general( + how=alias, + alt=npfunc, + numeric_only=numeric_only, + min_count=min_count, + ) + return result.__finalize__(self.obj, method="groupby") + + def _agg_py_fallback( + self, how: str, values: ArrayLike, ndim: int, alt: Callable + ) -> ArrayLike: + """ + Fallback to pure-python aggregation if _cython_operation raises + NotImplementedError. + """ + # We get here with a) EADtypes and b) object dtype + assert alt is not None + + if values.ndim == 1: + # For DataFrameGroupBy we only get here with ExtensionArray + ser = Series(values, copy=False) + else: + # We only get here with values.dtype == object + df = DataFrame(values.T, dtype=values.dtype) + # bc we split object blocks in grouped_reduce, we have only 1 col + # otherwise we'd have to worry about block-splitting GH#39329 + assert df.shape[1] == 1 + # Avoid call to self.values that can occur in DataFrame + # reductions; see GH#28949 + ser = df.iloc[:, 0] + + # We do not get here with UDFs, so we know that our dtype + # should always be preserved by the implemented aggregations + # TODO: Is this exactly right; see WrappedCythonOp get_result_dtype? + try: + res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True) + except Exception as err: + msg = f"agg function failed [how->{how},dtype->{ser.dtype}]" + # preserve the kind of exception that raised + raise type(err)(msg) from err + + if ser.dtype == object: + res_values = res_values.astype(object, copy=False) + + # If we are DataFrameGroupBy and went through a SeriesGroupByPath + # then we need to reshape + # GH#32223 includes case with IntegerArray values, ndarray res_values + # test_groupby_duplicate_columns with object dtype values + return ensure_block_shape(res_values, ndim=ndim) + + @final + def _cython_agg_general( + self, + how: str, + alt: Callable, + numeric_only: bool = False, + min_count: int = -1, + **kwargs, + ): + # Note: we never get here with how="ohlc" for DataFrameGroupBy; + # that goes through SeriesGroupBy + + data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how) + + def array_func(values: ArrayLike) -> ArrayLike: + try: + result = self.grouper._cython_operation( + "aggregate", + values, + how, + axis=data.ndim - 1, + min_count=min_count, + **kwargs, + ) + except NotImplementedError: + # generally if we have numeric_only=False + # and non-applicable functions + # try to python agg + # TODO: shouldn't min_count matter? + # TODO: avoid special casing SparseArray here + if how in ["any", "all"] and isinstance(values, SparseArray): + pass + elif how in ["any", "all", "std", "sem"]: + raise # TODO: re-raise as TypeError? should not be reached + else: + return result + + result = self._agg_py_fallback(how, values, ndim=data.ndim, alt=alt) + return result + + new_mgr = data.grouped_reduce(array_func) + res = self._wrap_agged_manager(new_mgr) + out = self._wrap_aggregated_output(res) + if self.axis == 1: + out = out.infer_objects(copy=False) + return out + + def _cython_transform( + self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs + ): + raise AbstractMethodError(self) + + @final + def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + # optimized transforms + orig_func = func + func = com.get_cython_func(func) or func + if orig_func != func: + warn_alias_replacement(self, orig_func, func) + + if not isinstance(func, str): + return self._transform_general(func, engine, engine_kwargs, *args, **kwargs) + + elif func not in base.transform_kernel_allowlist: + msg = f"'{func}' is not a valid function name for transform(name)" + raise ValueError(msg) + elif func in base.cythonized_kernels or func in base.transformation_kernels: + # cythonized transform or canned "agg+broadcast" + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + return getattr(self, func)(*args, **kwargs) + + else: + # i.e. func in base.reduction_kernels + + # GH#30918 Use _transform_fast only when we know func is an aggregation + # If func is a reduction, we need to broadcast the + # result to the whole group. Compute func result + # and deal with possible broadcasting below. + # Temporarily set observed for dealing with categoricals. + with com.temp_setattr(self, "observed", True): + with com.temp_setattr(self, "as_index", True): + # GH#49834 - result needs groups in the index for + # _wrap_transform_fast_result + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + result = getattr(self, func)(*args, **kwargs) + + return self._wrap_transform_fast_result(result) + + @final + def _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT: + """ + Fast transform path for aggregations. + """ + obj = self._obj_with_exclusions + + # for each col, reshape to size of original frame by take operation + ids, _, _ = self.grouper.group_info + result = result.reindex(self.grouper.result_index, axis=self.axis, copy=False) + + if self.obj.ndim == 1: + # i.e. SeriesGroupBy + out = algorithms.take_nd(result._values, ids) + output = obj._constructor(out, index=obj.index, name=obj.name) + else: + # `.size()` gives Series output on DataFrame input, need axis 0 + axis = 0 if result.ndim == 1 else self.axis + # GH#46209 + # Don't convert indices: negative indices need to give rise + # to null values in the result + new_ax = result.axes[axis].take(ids) + output = result._reindex_with_indexers( + {axis: (new_ax, ids)}, allow_dups=True, copy=False + ) + output = output.set_axis(obj._get_axis(self.axis), axis=axis) + return output + + # ----------------------------------------------------------------- + # Utilities + + @final + def _apply_filter(self, indices, dropna): + if len(indices) == 0: + indices = np.array([], dtype="int64") + else: + indices = np.sort(np.concatenate(indices)) + if dropna: + filtered = self._selected_obj.take(indices, axis=self.axis) + else: + mask = np.empty(len(self._selected_obj.index), dtype=bool) + mask.fill(False) + mask[indices.astype(int)] = True + # mask fails to broadcast when passed to where; broadcast manually. + mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T + filtered = self._selected_obj.where(mask) # Fill with NaNs. + return filtered + + @final + def _cumcount_array(self, ascending: bool = True) -> np.ndarray: + """ + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from length of group - 1 to 0. + + Notes + ----- + this is currently implementing sort=False + (though the default is sort=True) for groupby in general + """ + ids, _, ngroups = self.grouper.group_info + sorter = get_group_index_sorter(ids, ngroups) + ids, count = ids[sorter], len(ids) + + if count == 0: + return np.empty(0, dtype=np.int64) + + run = np.r_[True, ids[:-1] != ids[1:]] + rep = np.diff(np.r_[np.nonzero(run)[0], count]) + out = (~run).cumsum() + + if ascending: + out -= np.repeat(out[run], rep) + else: + out = np.repeat(out[np.r_[run[1:], True]], rep) - out + + if self.grouper.has_dropped_na: + out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False)) + else: + out = out.astype(np.int64, copy=False) + + rev = np.empty(count, dtype=np.intp) + rev[sorter] = np.arange(count, dtype=np.intp) + return out[rev] + + # ----------------------------------------------------------------- + + @final + @property + def _obj_1d_constructor(self) -> Callable: + # GH28330 preserve subclassed Series/DataFrames + if isinstance(self.obj, DataFrame): + return self.obj._constructor_sliced + assert isinstance(self.obj, Series) + return self.obj._constructor + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def any(self, skipna: bool = True): + """ + Return True if any value in the group is truthful, else False. + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing. + + Returns + ------- + Series or DataFrame + DataFrame or Series of boolean values, where a value is True if any element + is True within its respective group, False otherwise. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 0], index=lst) + >>> ser + a 1 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).any() + a True + b False + dtype: bool + + For DataFrameGroupBy: + + >>> data = [[1, 0, 3], [1, 0, 6], [7, 1, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["ostrich", "penguin", "parrot"]) + >>> df + a b c + ostrich 1 0 3 + penguin 1 0 6 + parrot 7 1 9 + >>> df.groupby(by=["a"]).any() + b c + a + 1 False True + 7 True True + """ + return self._cython_agg_general( + "any", + alt=lambda x: Series(x).any(skipna=skipna), + skipna=skipna, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def all(self, skipna: bool = True): + """ + Return True if all values in the group are truthful, else False. + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing. + + Returns + ------- + Series or DataFrame + DataFrame or Series of boolean values, where a value is True if all elements + are True within its respective group, False otherwise. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 0], index=lst) + >>> ser + a 1 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).all() + a True + b False + dtype: bool + + For DataFrameGroupBy: + + >>> data = [[1, 0, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["ostrich", "penguin", "parrot"]) + >>> df + a b c + ostrich 1 0 3 + penguin 1 5 6 + parrot 7 8 9 + >>> df.groupby(by=["a"]).all() + b c + a + 1 False True + 7 True True + """ + return self._cython_agg_general( + "all", + alt=lambda x: Series(x).all(skipna=skipna), + skipna=skipna, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def count(self) -> NDFrameT: + """ + Compute count of group, excluding missing values. + + Returns + ------- + Series or DataFrame + Count of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, np.nan], index=lst) + >>> ser + a 1.0 + a 2.0 + b NaN + dtype: float64 + >>> ser.groupby(level=0).count() + a 2 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, np.nan, 3], [1, np.nan, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 NaN 3 + horse 1 NaN 6 + bull 7 8.0 9 + >>> df.groupby("a").count() + b c + a + 1 0 2 + 7 1 1 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').count() + 2023-01-01 2 + 2023-02-01 2 + Freq: MS, dtype: int64 + """ + data = self._get_data_to_aggregate() + ids, _, ngroups = self.grouper.group_info + mask = ids != -1 + + is_series = data.ndim == 1 + + def hfunc(bvalues: ArrayLike) -> ArrayLike: + # TODO(EA2D): reshape would not be necessary with 2D EAs + if bvalues.ndim == 1: + # EA + masked = mask & ~isna(bvalues).reshape(1, -1) + else: + masked = mask & ~isna(bvalues) + + counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups) + if isinstance(bvalues, BaseMaskedArray): + return IntegerArray( + counted[0], mask=np.zeros(counted.shape[1], dtype=np.bool_) + ) + elif isinstance(bvalues, ArrowExtensionArray) and not isinstance( + bvalues.dtype, StringDtype + ): + return type(bvalues)._from_sequence(counted[0]) + if is_series: + assert counted.ndim == 2 + assert counted.shape[0] == 1 + return counted[0] + return counted + + new_mgr = data.grouped_reduce(hfunc) + new_obj = self._wrap_agged_manager(new_mgr) + + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _wrap_aggregated_output() returns. GH 35028 + # e.g. test_dataframe_groupby_on_2_categoricals_when_observed_is_false + with com.temp_setattr(self, "observed", True): + result = self._wrap_aggregated_output(new_obj) + + return self._reindex_output(result, fill_value=0) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + """ + Compute mean of groups, excluding missing values. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None`` and defaults to ``False``. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + Returns + ------- + pandas.Series or pandas.DataFrame + %(see_also)s + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C']) + + Groupby one column and return the mean of the remaining columns in + each group. + + >>> df.groupby('A').mean() + B C + A + 1 3.0 1.333333 + 2 4.0 1.500000 + + Groupby two columns and return the mean of the remaining column. + + >>> df.groupby(['A', 'B']).mean() + C + A B + 1 2.0 2.0 + 4.0 1.0 + 2 3.0 1.0 + 5.0 2.0 + + Groupby one column and return the mean of only particular column in + the group. + + >>> df.groupby('A')['B'].mean() + A + 1 3.0 + 2 4.0 + Name: B, dtype: float64 + """ + + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_mean + + return self._numba_agg_general( + grouped_mean, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ) + else: + result = self._cython_agg_general( + "mean", + alt=lambda x: Series(x).mean(numeric_only=numeric_only), + numeric_only=numeric_only, + ) + return result.__finalize__(self.obj, method="groupby") + + @final + def median(self, numeric_only: bool = False): + """ + Compute median of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None`` and defaults to False. + + Returns + ------- + Series or DataFrame + Median of values within each group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).median() + a 7.0 + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).median() + a b + dog 3.0 4.0 + mouse 7.0 3.0 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 3, 4, 5], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').median() + 2023-01-01 2.0 + 2023-02-01 4.0 + Freq: MS, dtype: float64 + """ + result = self._cython_agg_general( + "median", + alt=lambda x: Series(x).median(numeric_only=numeric_only), + numeric_only=numeric_only, + ) + return result.__finalize__(self.obj, method="groupby") + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def std( + self, + ddof: int = 1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + numeric_only: bool = False, + ): + """ + Compute standard deviation of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Standard deviation of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).std() + a 3.21455 + b 0.57735 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).std() + a b + dog 2.000000 3.511885 + mouse 2.217356 1.500000 + """ + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_var + + return np.sqrt( + self._numba_agg_general( + grouped_var, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ddof=ddof, + ) + ) + else: + return self._cython_agg_general( + "std", + alt=lambda x: Series(x).std(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def var( + self, + ddof: int = 1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + numeric_only: bool = False, + ): + """ + Compute variance of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Variance of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).var() + a 10.333333 + b 0.333333 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).var() + a b + dog 4.000000 12.333333 + mouse 4.916667 2.250000 + """ + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_var + + return self._numba_agg_general( + grouped_var, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ddof=ddof, + ) + else: + return self._cython_agg_general( + "var", + alt=lambda x: Series(x).var(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + def _value_counts( + self, + subset: Sequence[Hashable] | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> DataFrame | Series: + """ + Shared implementation of value_counts for SeriesGroupBy and DataFrameGroupBy. + + SeriesGroupBy additionally supports a bins argument. See the docstring of + DataFrameGroupBy.value_counts for a description of arguments. + """ + if self.axis == 1: + raise NotImplementedError( + "DataFrameGroupBy.value_counts only handles axis=0" + ) + name = "proportion" if normalize else "count" + + df = self.obj + obj = self._obj_with_exclusions + + in_axis_names = { + grouping.name for grouping in self.grouper.groupings if grouping.in_axis + } + if isinstance(obj, Series): + _name = obj.name + keys = [] if _name in in_axis_names else [obj] + else: + unique_cols = set(obj.columns) + if subset is not None: + subsetted = set(subset) + clashing = subsetted & set(in_axis_names) + if clashing: + raise ValueError( + f"Keys {clashing} in subset cannot be in " + "the groupby column keys." + ) + doesnt_exist = subsetted - unique_cols + if doesnt_exist: + raise ValueError( + f"Keys {doesnt_exist} in subset do not " + f"exist in the DataFrame." + ) + else: + subsetted = unique_cols + + keys = [ + # Can't use .values because the column label needs to be preserved + obj.iloc[:, idx] + for idx, _name in enumerate(obj.columns) + if _name not in in_axis_names and _name in subsetted + ] + + groupings = list(self.grouper.groupings) + for key in keys: + grouper, _, _ = get_grouper( + df, + key=key, + axis=self.axis, + sort=self.sort, + observed=False, + dropna=dropna, + ) + groupings += list(grouper.groupings) + + # Take the size of the overall columns + gb = df.groupby( + groupings, + sort=self.sort, + observed=self.observed, + dropna=self.dropna, + ) + result_series = cast(Series, gb.size()) + result_series.name = name + + # GH-46357 Include non-observed categories + # of non-grouping columns regardless of `observed` + if any( + isinstance(grouping.grouping_vector, (Categorical, CategoricalIndex)) + and not grouping._observed + for grouping in groupings + ): + levels_list = [ping.result_index for ping in groupings] + multi_index, _ = MultiIndex.from_product( + levels_list, names=[ping.name for ping in groupings] + ).sortlevel() + result_series = result_series.reindex(multi_index, fill_value=0) + + if normalize: + # Normalize the results by dividing by the original group sizes. + # We are guaranteed to have the first N levels be the + # user-requested grouping. + levels = list( + range(len(self.grouper.groupings), result_series.index.nlevels) + ) + indexed_group_size = result_series.groupby( + result_series.index.droplevel(levels), + sort=self.sort, + dropna=self.dropna, + # GH#43999 - deprecation of observed=False + observed=False, + ).transform("sum") + result_series /= indexed_group_size + + # Handle groups of non-observed categories + result_series = result_series.fillna(0.0) + + if sort: + # Sort the values and then resort by the main grouping + index_level = range(len(self.grouper.groupings)) + result_series = result_series.sort_values(ascending=ascending).sort_index( + level=index_level, sort_remaining=False + ) + + result: Series | DataFrame + if self.as_index: + result = result_series + else: + # Convert to frame + index = result_series.index + columns = com.fill_missing_names(index.names) + if name in columns: + raise ValueError(f"Column label '{name}' is duplicate of result column") + result_series.name = name + result_series.index = index.set_names(range(len(columns))) + result_frame = result_series.reset_index() + orig_dtype = self.grouper.groupings[0].obj.columns.dtype # type: ignore[union-attr] # noqa: E501 + cols = Index(columns, dtype=orig_dtype).insert(len(columns), name) + result_frame.columns = cols + result = result_frame + return result.__finalize__(self.obj, method="value_counts") + + @final + def sem(self, ddof: int = 1, numeric_only: bool = False): + """ + Compute standard error of the mean of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Standard error of the mean of values within each group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([5, 10, 8, 14], index=lst) + >>> ser + a 5 + a 10 + b 8 + b 14 + dtype: int64 + >>> ser.groupby(level=0).sem() + a 2.5 + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 12, 11], [1, 15, 2], [2, 5, 8], [2, 6, 12]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 12 11 + salmon 1 15 2 + catfish 2 5 8 + goldfish 2 6 12 + >>> df.groupby("a").sem() + b c + a + 1 1.5 4.5 + 2 0.5 2.0 + + For Resampler: + + >>> ser = pd.Series([1, 3, 2, 4, 3, 8], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').sem() + 2023-01-01 0.577350 + 2023-02-01 1.527525 + Freq: MS, dtype: float64 + """ + if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype): + raise TypeError( + f"{type(self).__name__}.sem called with " + f"numeric_only={numeric_only} and dtype {self.obj.dtype}" + ) + return self._cython_agg_general( + "sem", + alt=lambda x: Series(x).sem(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def size(self) -> DataFrame | Series: + """ + Compute group sizes. + + Returns + ------- + DataFrame or Series + Number of rows in each group as a Series if as_index is True + or a DataFrame if as_index is False. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).size() + a 2 + b 1 + dtype: int64 + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby("a").size() + a + 1 2 + 7 1 + dtype: int64 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + dtype: int64 + >>> ser.resample('MS').size() + 2023-01-01 2 + 2023-02-01 1 + Freq: MS, dtype: int64 + """ + result = self.grouper.size() + dtype_backend: None | Literal["pyarrow", "numpy_nullable"] = None + if isinstance(self.obj, Series): + if isinstance(self.obj.array, ArrowExtensionArray): + if isinstance(self.obj.array, ArrowStringArrayNumpySemantics): + dtype_backend = None + elif isinstance(self.obj.array, ArrowStringArray): + dtype_backend = "numpy_nullable" + else: + dtype_backend = "pyarrow" + elif isinstance(self.obj.array, BaseMaskedArray): + dtype_backend = "numpy_nullable" + # TODO: For DataFrames what if columns are mixed arrow/numpy/masked? + + # GH28330 preserve subclassed Series/DataFrames through calls + if isinstance(self.obj, Series): + result = self._obj_1d_constructor(result, name=self.obj.name) + else: + result = self._obj_1d_constructor(result) + + if dtype_backend is not None: + result = result.convert_dtypes( + infer_objects=False, + convert_string=False, + convert_boolean=False, + convert_floating=False, + dtype_backend=dtype_backend, + ) + + with com.temp_setattr(self, "as_index", True): + # size already has the desired behavior in GH#49519, but this makes the + # as_index=False path of _reindex_output fail on categorical groupers. + result = self._reindex_output(result, fill_value=0) + if not self.as_index: + # error: Incompatible types in assignment (expression has + # type "DataFrame", variable has type "Series") + result = result.rename("size").reset_index() # type: ignore[assignment] + return result + + @final + @doc( + _groupby_agg_method_engine_template, + fname="sum", + no=False, + mc=0, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).sum() + a 3 + b 7 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").sum() + b c + a + 1 10 7 + 2 11 17""" + ), + ) + def sum( + self, + numeric_only: bool = False, + min_count: int = 0, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_sum + + return self._numba_agg_general( + grouped_sum, + executor.default_dtype_mapping, + engine_kwargs, + min_periods=min_count, + ) + else: + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _agg_general() returns. GH #31422 + with com.temp_setattr(self, "observed", True): + result = self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="sum", + npfunc=np.sum, + ) + + return self._reindex_output(result, fill_value=0) + + @final + @doc( + _groupby_agg_method_template, + fname="prod", + no=False, + mc=0, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).prod() + a 2 + b 12 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").prod() + b c + a + 1 16 10 + 2 30 72""" + ), + ) + def prod(self, numeric_only: bool = False, min_count: int = 0): + return self._agg_general( + numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod + ) + + @final + @doc( + _groupby_agg_method_engine_template, + fname="min", + no=False, + mc=-1, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).min() + a 1 + b 3 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").min() + b c + a + 1 2 2 + 2 5 8""" + ), + ) + def min( + self, + numeric_only: bool = False, + min_count: int = -1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_min_max + + return self._numba_agg_general( + grouped_min_max, + executor.identity_dtype_mapping, + engine_kwargs, + min_periods=min_count, + is_max=False, + ) + else: + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="min", + npfunc=np.min, + ) + + @final + @doc( + _groupby_agg_method_engine_template, + fname="max", + no=False, + mc=-1, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).max() + a 2 + b 4 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").max() + b c + a + 1 8 5 + 2 6 9""" + ), + ) + def max( + self, + numeric_only: bool = False, + min_count: int = -1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_min_max + + return self._numba_agg_general( + grouped_min_max, + executor.identity_dtype_mapping, + engine_kwargs, + min_periods=min_count, + is_max=True, + ) + else: + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="max", + npfunc=np.max, + ) + + @final + def first(self, numeric_only: bool = False, min_count: int = -1): + """ + Compute the first non-null entry of each column. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + min_count : int, default -1 + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + + Returns + ------- + Series or DataFrame + First non-null of values within each group. + + See Also + -------- + DataFrame.groupby : Apply a function groupby to each row or column of a + DataFrame. + pandas.core.groupby.DataFrameGroupBy.last : Compute the last non-null entry + of each column. + pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. + + Examples + -------- + >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[None, 5, 6], C=[1, 2, 3], + ... D=['3/11/2000', '3/12/2000', '3/13/2000'])) + >>> df['D'] = pd.to_datetime(df['D']) + >>> df.groupby("A").first() + B C D + A + 1 5.0 1 2000-03-11 + 3 6.0 3 2000-03-13 + >>> df.groupby("A").first(min_count=2) + B C D + A + 1 NaN 1.0 2000-03-11 + 3 NaN NaN NaT + >>> df.groupby("A").first(numeric_only=True) + B C + A + 1 5.0 1 + 3 6.0 3 + """ + + def first_compat(obj: NDFrameT, axis: AxisInt = 0): + def first(x: Series): + """Helper function for first item that isn't NA.""" + arr = x.array[notna(x.array)] + if not len(arr): + return x.array.dtype.na_value + return arr[0] + + if isinstance(obj, DataFrame): + return obj.apply(first, axis=axis) + elif isinstance(obj, Series): + return first(obj) + else: # pragma: no cover + raise TypeError(type(obj)) + + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="first", + npfunc=first_compat, + ) + + @final + def last(self, numeric_only: bool = False, min_count: int = -1): + """ + Compute the last non-null entry of each column. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + min_count : int, default -1 + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + + Returns + ------- + Series or DataFrame + Last non-null of values within each group. + + See Also + -------- + DataFrame.groupby : Apply a function groupby to each row or column of a + DataFrame. + pandas.core.groupby.DataFrameGroupBy.first : Compute the first non-null entry + of each column. + pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. + + Examples + -------- + >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3])) + >>> df.groupby("A").last() + B C + A + 1 5.0 2 + 3 6.0 3 + """ + + def last_compat(obj: NDFrameT, axis: AxisInt = 0): + def last(x: Series): + """Helper function for last item that isn't NA.""" + arr = x.array[notna(x.array)] + if not len(arr): + return x.array.dtype.na_value + return arr[-1] + + if isinstance(obj, DataFrame): + return obj.apply(last, axis=axis) + elif isinstance(obj, Series): + return last(obj) + else: # pragma: no cover + raise TypeError(type(obj)) + + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="last", + npfunc=last_compat, + ) + + @final + def ohlc(self) -> DataFrame: + """ + Compute open, high, low and close values of a group, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex + + Returns + ------- + DataFrame + Open, high, low and close values within each group. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC',] + >>> ser = pd.Series([3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 0.1, 0.5], index=lst) + >>> ser + SPX 3.4 + CAC 9.0 + SPX 7.2 + CAC 5.2 + SPX 8.8 + CAC 9.4 + SPX 0.1 + CAC 0.5 + dtype: float64 + >>> ser.groupby(level=0).ohlc() + open high low close + CAC 9.0 9.4 0.5 0.5 + SPX 3.4 8.8 0.1 0.1 + + For DataFrameGroupBy: + + >>> data = {2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2 , 1], + ... 2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0]} + >>> df = pd.DataFrame(data, index=['SPX', 'CAC', 'SPX', 'CAC', + ... 'SPX', 'CAC', 'SPX', 'CAC']) + >>> df + 2022 2023 + SPX 1.2 3.4 + CAC 2.3 9.0 + SPX 8.9 7.2 + CAC 4.5 5.2 + SPX 4.4 8.8 + CAC 3.0 9.4 + SPX 2.0 8.2 + CAC 1.0 1.0 + >>> df.groupby(level=0).ohlc() + 2022 2023 + open high low close open high low close + CAC 2.3 4.5 1.0 1.0 9.0 9.4 1.0 1.0 + SPX 1.2 8.9 1.2 2.0 3.4 8.8 3.4 8.2 + + For Resampler: + + >>> ser = pd.Series([1, 3, 2, 4, 3, 5], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').ohlc() + open high low close + 2023-01-01 1 3 1 2 + 2023-02-01 4 5 3 5 + """ + if self.obj.ndim == 1: + obj = self._selected_obj + + is_numeric = is_numeric_dtype(obj.dtype) + if not is_numeric: + raise DataError("No numeric types to aggregate") + + res_values = self.grouper._cython_operation( + "aggregate", obj._values, "ohlc", axis=0, min_count=-1 + ) + + agg_names = ["open", "high", "low", "close"] + result = self.obj._constructor_expanddim( + res_values, index=self.grouper.result_index, columns=agg_names + ) + return self._reindex_output(result) + + result = self._apply_to_column_groupbys(lambda sgb: sgb.ohlc()) + return result + + @doc(DataFrame.describe) + def describe( + self, + percentiles=None, + include=None, + exclude=None, + ) -> NDFrameT: + obj = self._obj_with_exclusions + + if len(obj) == 0: + described = obj.describe( + percentiles=percentiles, include=include, exclude=exclude + ) + if obj.ndim == 1: + result = described + else: + result = described.unstack() + return result.to_frame().T.iloc[:0] + + with com.temp_setattr(self, "as_index", True): + result = self._python_apply_general( + lambda x: x.describe( + percentiles=percentiles, include=include, exclude=exclude + ), + obj, + not_indexed_same=True, + ) + if self.axis == 1: + return result.T + + # GH#49256 - properly handle the grouping column(s) + result = result.unstack() + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + + return result + + @final + def resample(self, rule, *args, **kwargs): + """ + Provide resampling when using a TimeGrouper. + + Given a grouper, the function resamples it according to a string + "string" -> "frequency". + + See the :ref:`frequency aliases ` + documentation for more details. + + Parameters + ---------- + rule : str or DateOffset + The offset string or object representing target grouper conversion. + *args, **kwargs + Possible arguments are `how`, `fill_method`, `limit`, `kind` and + `on`, and other arguments of `TimeGrouper`. + + Returns + ------- + pandas.api.typing.DatetimeIndexResamplerGroupby, + pandas.api.typing.PeriodIndexResamplerGroupby, or + pandas.api.typing.TimedeltaIndexResamplerGroupby + Return a new groupby object, with type depending on the data + being resampled. + + See Also + -------- + Grouper : Specify a frequency to resample with when + grouping by a key. + DatetimeIndex.resample : Frequency conversion and resampling of + time series. + + Examples + -------- + >>> idx = pd.date_range('1/1/2000', periods=4, freq='T') + >>> df = pd.DataFrame(data=4 * [range(2)], + ... index=idx, + ... columns=['a', 'b']) + >>> df.iloc[2, 0] = 5 + >>> df + a b + 2000-01-01 00:00:00 0 1 + 2000-01-01 00:01:00 0 1 + 2000-01-01 00:02:00 5 1 + 2000-01-01 00:03:00 0 1 + + Downsample the DataFrame into 3 minute bins and sum the values of + the timestamps falling into a bin. + + >>> df.groupby('a').resample('3T').sum() + a b + a + 0 2000-01-01 00:00:00 0 2 + 2000-01-01 00:03:00 0 1 + 5 2000-01-01 00:00:00 5 1 + + Upsample the series into 30 second bins. + + >>> df.groupby('a').resample('30S').sum() + a b + a + 0 2000-01-01 00:00:00 0 1 + 2000-01-01 00:00:30 0 0 + 2000-01-01 00:01:00 0 1 + 2000-01-01 00:01:30 0 0 + 2000-01-01 00:02:00 0 0 + 2000-01-01 00:02:30 0 0 + 2000-01-01 00:03:00 0 1 + 5 2000-01-01 00:02:00 5 1 + + Resample by month. Values are assigned to the month of the period. + + >>> df.groupby('a').resample('M').sum() + a b + a + 0 2000-01-31 0 3 + 5 2000-01-31 5 1 + + Downsample the series into 3 minute bins as above, but close the right + side of the bin interval. + + >>> df.groupby('a').resample('3T', closed='right').sum() + a b + a + 0 1999-12-31 23:57:00 0 1 + 2000-01-01 00:00:00 0 2 + 5 2000-01-01 00:00:00 5 1 + + Downsample the series into 3 minute bins and close the right side of + the bin interval, but label each bin using the right edge instead of + the left. + + >>> df.groupby('a').resample('3T', closed='right', label='right').sum() + a b + a + 0 2000-01-01 00:00:00 0 1 + 2000-01-01 00:03:00 0 2 + 5 2000-01-01 00:03:00 5 1 + """ + from pandas.core.resample import get_resampler_for_grouping + + return get_resampler_for_grouping(self, rule, *args, **kwargs) + + @final + def rolling(self, *args, **kwargs) -> RollingGroupby: + """ + Return a rolling grouper, providing rolling functionality per group. + + Parameters + ---------- + window : int, timedelta, str, offset, or BaseIndexer subclass + Size of the moving window. + + If an integer, the fixed number of observations used for + each window. + + If a timedelta, str, or offset, the time period of each window. Each + window will be a variable sized based on the observations included in + the time-period. This is only valid for datetimelike indexes. + To learn more about the offsets & frequency strings, please see `this link + `__. + + If a BaseIndexer subclass, the window boundaries + based on the defined ``get_window_bounds`` method. Additional rolling + keyword arguments, namely ``min_periods``, ``center``, ``closed`` and + ``step`` will be passed to ``get_window_bounds``. + + min_periods : int, default None + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + For a window that is specified by an offset, + ``min_periods`` will default to 1. + + For a window that is specified by an integer, ``min_periods`` will default + to the size of the window. + + center : bool, default False + If False, set the window labels as the right edge of the window index. + + If True, set the window labels as the center of the window index. + + win_type : str, default None + If ``None``, all points are evenly weighted. + + If a string, it must be a valid `scipy.signal window function + `__. + + Certain Scipy window types require additional parameters to be passed + in the aggregation function. The additional parameters must match + the keywords specified in the Scipy window type method signature. + + on : str, optional + For a DataFrame, a column label or Index level on which + to calculate the rolling window, rather than the DataFrame's index. + + Provided integer column is ignored and excluded from result since + an integer index is not used to calculate the rolling window. + + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + + For `Series` this parameter is unused and defaults to 0. + + closed : str, default None + If ``'right'``, the first point in the window is excluded from calculations. + + If ``'left'``, the last point in the window is excluded from calculations. + + If ``'both'``, no points in the window are excluded from calculations. + + If ``'neither'``, the first and last points in the window are excluded + from calculations. + + Default ``None`` (``'right'``). + + method : str {'single', 'table'}, default 'single' + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + Returns + ------- + pandas.api.typing.RollingGroupby + Return a new grouper with our rolling appended. + + See Also + -------- + Series.rolling : Calling object with Series data. + DataFrame.rolling : Calling object with DataFrames. + Series.groupby : Apply a function groupby to a Series. + DataFrame.groupby : Apply a function groupby. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 2, 2], + ... 'B': [1, 2, 3, 4], + ... 'C': [0.362, 0.227, 1.267, -0.562]}) + >>> df + A B C + 0 1 1 0.362 + 1 1 2 0.227 + 2 2 3 1.267 + 3 2 4 -0.562 + + >>> df.groupby('A').rolling(2).sum() + B C + A + 1 0 NaN NaN + 1 3.0 0.589 + 2 2 NaN NaN + 3 7.0 0.705 + + >>> df.groupby('A').rolling(2, min_periods=1).sum() + B C + A + 1 0 1.0 0.362 + 1 3.0 0.589 + 2 2 3.0 1.267 + 3 7.0 0.705 + + >>> df.groupby('A').rolling(2, on='B').sum() + B C + A + 1 0 1 NaN + 1 2 0.589 + 2 2 3 NaN + 3 4 0.705 + """ + from pandas.core.window import RollingGroupby + + return RollingGroupby( + self._selected_obj, + *args, + _grouper=self.grouper, + _as_index=self.as_index, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Appender(_common_see_also) + def expanding(self, *args, **kwargs) -> ExpandingGroupby: + """ + Return an expanding grouper, providing expanding + functionality per group. + + Returns + ------- + pandas.api.typing.ExpandingGroupby + """ + from pandas.core.window import ExpandingGroupby + + return ExpandingGroupby( + self._selected_obj, + *args, + _grouper=self.grouper, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Appender(_common_see_also) + def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby: + """ + Return an ewm grouper, providing ewm functionality per group. + + Returns + ------- + pandas.api.typing.ExponentialMovingWindowGroupby + """ + from pandas.core.window import ExponentialMovingWindowGroupby + + return ExponentialMovingWindowGroupby( + self._selected_obj, + *args, + _grouper=self.grouper, + **kwargs, + ) + + @final + def _fill(self, direction: Literal["ffill", "bfill"], limit: int | None = None): + """ + Shared function for `pad` and `backfill` to call Cython method. + + Parameters + ---------- + direction : {'ffill', 'bfill'} + Direction passed to underlying Cython function. `bfill` will cause + values to be filled backwards. `ffill` and any other values will + default to a forward fill + limit : int, default None + Maximum number of consecutive values to fill. If `None`, this + method will convert to -1 prior to passing to Cython + + Returns + ------- + `Series` or `DataFrame` with filled values + + See Also + -------- + pad : Returns Series with minimum number of char in object. + backfill : Backward fill the missing values in the dataset. + """ + # Need int value for Cython + if limit is None: + limit = -1 + + ids, _, _ = self.grouper.group_info + sorted_labels = np.argsort(ids, kind="mergesort").astype(np.intp, copy=False) + if direction == "bfill": + sorted_labels = sorted_labels[::-1] + + col_func = partial( + libgroupby.group_fillna_indexer, + labels=ids, + sorted_labels=sorted_labels, + limit=limit, + dropna=self.dropna, + ) + + def blk_func(values: ArrayLike) -> ArrayLike: + mask = isna(values) + if values.ndim == 1: + indexer = np.empty(values.shape, dtype=np.intp) + col_func(out=indexer, mask=mask) + return algorithms.take_nd(values, indexer) + + else: + # We broadcast algorithms.take_nd analogous to + # np.take_along_axis + if isinstance(values, np.ndarray): + dtype = values.dtype + if self.grouper.has_dropped_na: + # dropped null groups give rise to nan in the result + dtype = ensure_dtype_can_hold_na(values.dtype) + out = np.empty(values.shape, dtype=dtype) + else: + # Note: we only get here with backfill/pad, + # so if we have a dtype that cannot hold NAs, + # then there will be no -1s in indexer, so we can use + # the original dtype (no need to ensure_dtype_can_hold_na) + out = type(values)._empty(values.shape, dtype=values.dtype) + + for i, value_element in enumerate(values): + # call group_fillna_indexer column-wise + indexer = np.empty(values.shape[1], dtype=np.intp) + col_func(out=indexer, mask=mask[i]) + out[i, :] = algorithms.take_nd(value_element, indexer) + return out + + mgr = self._get_data_to_aggregate() + res_mgr = mgr.apply(blk_func) + + new_obj = self._wrap_agged_manager(res_mgr) + + if self.axis == 1: + # Only relevant for DataFrameGroupBy + new_obj = new_obj.T + new_obj.columns = self.obj.columns + + new_obj.index = self.obj.index + return new_obj + + @final + @Substitution(name="groupby") + def ffill(self, limit: int | None = None): + """ + Forward fill the values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series or DataFrame + Object with missing values filled. + + See Also + -------- + Series.ffill: Returns Series with minimum number of char in object. + DataFrame.ffill: Object with missing values filled or None if inplace=True. + Series.fillna: Fill NaN values of a Series. + DataFrame.fillna: Fill NaN values of a DataFrame. + + Examples + -------- + + For SeriesGroupBy: + + >>> key = [0, 0, 1, 1] + >>> ser = pd.Series([np.nan, 2, 3, np.nan], index=key) + >>> ser + 0 NaN + 0 2.0 + 1 3.0 + 1 NaN + dtype: float64 + >>> ser.groupby(level=0).ffill() + 0 NaN + 0 2.0 + 1 3.0 + 1 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> df = pd.DataFrame( + ... { + ... "key": [0, 0, 1, 1, 1], + ... "A": [np.nan, 2, np.nan, 3, np.nan], + ... "B": [2, 3, np.nan, np.nan, np.nan], + ... "C": [np.nan, np.nan, 2, np.nan, np.nan], + ... } + ... ) + >>> df + key A B C + 0 0 NaN 2.0 NaN + 1 0 2.0 3.0 NaN + 2 1 NaN NaN 2.0 + 3 1 3.0 NaN NaN + 4 1 NaN NaN NaN + + Propagate non-null values forward or backward within each group along columns. + + >>> df.groupby("key").ffill() + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN 2.0 + + Propagate non-null values forward or backward within each group along rows. + + >>> df.T.groupby(np.array([0, 0, 1, 1])).ffill().T + key A B C + 0 0.0 0.0 2.0 2.0 + 1 0.0 2.0 3.0 3.0 + 2 1.0 1.0 NaN 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 1.0 NaN NaN + + Only replace the first NaN element within a group along rows. + + >>> df.groupby("key").ffill(limit=1) + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN NaN + """ + return self._fill("ffill", limit=limit) + + @final + @Substitution(name="groupby") + def bfill(self, limit: int | None = None): + """ + Backward fill the values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series or DataFrame + Object with missing values filled. + + See Also + -------- + Series.bfill : Backward fill the missing values in the dataset. + DataFrame.bfill: Backward fill the missing values in the dataset. + Series.fillna: Fill NaN values of a Series. + DataFrame.fillna: Fill NaN values of a DataFrame. + + Examples + -------- + + With Series: + + >>> index = ['Falcon', 'Falcon', 'Parrot', 'Parrot', 'Parrot'] + >>> s = pd.Series([None, 1, None, None, 3], index=index) + >>> s + Falcon NaN + Falcon 1.0 + Parrot NaN + Parrot NaN + Parrot 3.0 + dtype: float64 + >>> s.groupby(level=0).bfill() + Falcon 1.0 + Falcon 1.0 + Parrot 3.0 + Parrot 3.0 + Parrot 3.0 + dtype: float64 + >>> s.groupby(level=0).bfill(limit=1) + Falcon 1.0 + Falcon 1.0 + Parrot NaN + Parrot 3.0 + Parrot 3.0 + dtype: float64 + + With DataFrame: + + >>> df = pd.DataFrame({'A': [1, None, None, None, 4], + ... 'B': [None, None, 5, None, 7]}, index=index) + >>> df + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot NaN 5.0 + Parrot NaN NaN + Parrot 4.0 7.0 + >>> df.groupby(level=0).bfill() + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot 4.0 5.0 + Parrot 4.0 7.0 + Parrot 4.0 7.0 + >>> df.groupby(level=0).bfill(limit=1) + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot NaN 5.0 + Parrot 4.0 7.0 + Parrot 4.0 7.0 + """ + return self._fill("bfill", limit=limit) + + @final + @property + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def nth(self) -> GroupByNthSelector: + """ + Take the nth row from each group if n is an int, otherwise a subset of rows. + + Can be either a call or an index. dropna is not available with index notation. + Index notation accepts a comma separated list of integers and slices. + + If dropna, will take the nth non-null row, dropna is either + 'all' or 'any'; this is equivalent to calling dropna(how=dropna) + before the groupby. + + Parameters + ---------- + n : int, slice or list of ints and slices + A single nth value for the row or a list of nth values or slices. + + .. versionchanged:: 1.4.0 + Added slice and lists containing slices. + Added index notation. + + dropna : {'any', 'all', None}, default None + Apply the specified dropna operation before counting which row is + the nth row. Only supported if n is an int. + + Returns + ------- + Series or DataFrame + N-th value within each group. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B']) + >>> g = df.groupby('A') + >>> g.nth(0) + A B + 0 1 NaN + 2 2 3.0 + >>> g.nth(1) + A B + 1 1 2.0 + 4 2 5.0 + >>> g.nth(-1) + A B + 3 1 4.0 + 4 2 5.0 + >>> g.nth([0, 1]) + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + 4 2 5.0 + >>> g.nth(slice(None, -1)) + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + + Index notation may also be used + + >>> g.nth[0, 1] + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + 4 2 5.0 + >>> g.nth[:-1] + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + + Specifying `dropna` allows ignoring ``NaN`` values + + >>> g.nth(0, dropna='any') + A B + 1 1 2.0 + 2 2 3.0 + + When the specified ``n`` is larger than any of the groups, an + empty DataFrame is returned + + >>> g.nth(3, dropna='any') + Empty DataFrame + Columns: [A, B] + Index: [] + """ + return GroupByNthSelector(self) + + def _nth( + self, + n: PositionalIndexer | tuple, + dropna: Literal["any", "all", None] = None, + ) -> NDFrameT: + if not dropna: + mask = self._make_mask_from_positional_indexer(n) + + ids, _, _ = self.grouper.group_info + + # Drop NA values in grouping + mask = mask & (ids != -1) + + out = self._mask_selected_obj(mask) + return out + + # dropna is truthy + if not is_integer(n): + raise ValueError("dropna option only supported for an integer argument") + + if dropna not in ["any", "all"]: + # Note: when agg-ing picker doesn't raise this, just returns NaN + raise ValueError( + "For a DataFrame or Series groupby.nth, dropna must be " + "either None, 'any' or 'all', " + f"(was passed {dropna})." + ) + + # old behaviour, but with all and any support for DataFrames. + # modified in GH 7559 to have better perf + n = cast(int, n) + dropped = self._selected_obj.dropna(how=dropna, axis=self.axis) + + # get a new grouper for our dropped obj + grouper: np.ndarray | Index | ops.BaseGrouper + if len(dropped) == len(self._selected_obj): + # Nothing was dropped, can use the same grouper + grouper = self.grouper + else: + # we don't have the grouper info available + # (e.g. we have selected out + # a column that is not in the current object) + axis = self.grouper.axis + grouper = self.grouper.codes_info[axis.isin(dropped.index)] + if self.grouper.has_dropped_na: + # Null groups need to still be encoded as -1 when passed to groupby + nulls = grouper == -1 + # error: No overload variant of "where" matches argument types + # "Any", "NAType", "Any" + values = np.where(nulls, NA, grouper) # type: ignore[call-overload] + grouper = Index(values, dtype="Int64") + + if self.axis == 1: + grb = dropped.T.groupby(grouper, as_index=self.as_index, sort=self.sort) + else: + grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort) + return grb.nth(n) + + @final + def quantile( + self, + q: float | AnyArrayLike = 0.5, + interpolation: str = "linear", + numeric_only: bool = False, + ): + """ + Return group values at the given quantile, a la numpy.percentile. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + Value(s) between 0 and 1 providing the quantile(s) to compute. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + Method to use when the desired quantile falls between two points. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Return type determined by caller of GroupBy object. + + See Also + -------- + Series.quantile : Similar method for Series. + DataFrame.quantile : Similar method for DataFrame. + numpy.percentile : NumPy method to compute qth percentile. + + Examples + -------- + >>> df = pd.DataFrame([ + ... ['a', 1], ['a', 2], ['a', 3], + ... ['b', 1], ['b', 3], ['b', 5] + ... ], columns=['key', 'val']) + >>> df.groupby('key').quantile() + val + key + a 2.0 + b 3.0 + """ + mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name="quantile") + obj = self._wrap_agged_manager(mgr) + if self.axis == 1: + splitter = self.grouper._get_splitter(obj.T, axis=self.axis) + sdata = splitter._sorted_data.T + else: + splitter = self.grouper._get_splitter(obj, axis=self.axis) + sdata = splitter._sorted_data + + starts, ends = lib.generate_slices(splitter._slabels, splitter.ngroups) + + def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]: + if is_object_dtype(vals.dtype): + raise TypeError( + "'quantile' cannot be performed against 'object' dtypes!" + ) + + inference: DtypeObj | None = None + if isinstance(vals, BaseMaskedArray) and is_numeric_dtype(vals.dtype): + out = vals.to_numpy(dtype=float, na_value=np.nan) + inference = vals.dtype + elif is_integer_dtype(vals.dtype): + if isinstance(vals, ExtensionArray): + out = vals.to_numpy(dtype=float, na_value=np.nan) + else: + out = vals + inference = np.dtype(np.int64) + elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray): + out = vals.to_numpy(dtype=float, na_value=np.nan) + elif is_bool_dtype(vals.dtype): + # GH#51424 deprecate to match Series/DataFrame behavior + warnings.warn( + f"Allowing bool dtype in {type(self).__name__}.quantile is " + "deprecated and will raise in a future version, matching " + "the Series/DataFrame behavior. Cast to uint8 dtype before " + "calling quantile instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + out = np.asarray(vals) + elif needs_i8_conversion(vals.dtype): + inference = vals.dtype + # In this case we need to delay the casting until after the + # np.lexsort below. + # error: Incompatible return value type (got + # "Tuple[Union[ExtensionArray, ndarray[Any, Any]], Union[Any, + # ExtensionDtype]]", expected "Tuple[ndarray[Any, Any], + # Optional[Union[dtype[Any], ExtensionDtype]]]") + return vals, inference # type: ignore[return-value] + elif isinstance(vals, ExtensionArray) and is_float_dtype(vals.dtype): + inference = np.dtype(np.float64) + out = vals.to_numpy(dtype=float, na_value=np.nan) + else: + out = np.asarray(vals) + + return out, inference + + def post_processor( + vals: np.ndarray, + inference: DtypeObj | None, + result_mask: np.ndarray | None, + orig_vals: ArrayLike, + ) -> ArrayLike: + if inference: + # Check for edge case + if isinstance(orig_vals, BaseMaskedArray): + assert result_mask is not None # for mypy + + if interpolation in {"linear", "midpoint"} and not is_float_dtype( + orig_vals + ): + return FloatingArray(vals, result_mask) + else: + # Item "ExtensionDtype" of "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" has no attribute "numpy_dtype" + # [union-attr] + with warnings.catch_warnings(): + # vals.astype with nan can warn with numpy >1.24 + warnings.filterwarnings("ignore", category=RuntimeWarning) + return type(orig_vals)( + vals.astype( + inference.numpy_dtype # type: ignore[union-attr] + ), + result_mask, + ) + + elif not ( + is_integer_dtype(inference) + and interpolation in {"linear", "midpoint"} + ): + if needs_i8_conversion(inference): + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_ndarray" + vals = vals.astype("i8").view( + orig_vals._ndarray.dtype # type: ignore[union-attr] + ) + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_from_backing_data" + return orig_vals._from_backing_data( # type: ignore[union-attr] + vals + ) + + assert isinstance(inference, np.dtype) # for mypy + return vals.astype(inference) + + return vals + + qs = np.array(q, dtype=np.float64) + pass_qs: np.ndarray | None = qs + if is_scalar(q): + qs = np.array([q], dtype=np.float64) + pass_qs = None + + ids, _, ngroups = self.grouper.group_info + nqs = len(qs) + + func = partial( + libgroupby.group_quantile, + labels=ids, + qs=qs, + interpolation=interpolation, + starts=starts, + ends=ends, + ) + + def blk_func(values: ArrayLike) -> ArrayLike: + orig_vals = values + if isinstance(values, BaseMaskedArray): + mask = values._mask + result_mask = np.zeros((ngroups, nqs), dtype=np.bool_) + else: + mask = isna(values) + result_mask = None + + is_datetimelike = needs_i8_conversion(values.dtype) + + vals, inference = pre_processor(values) + + ncols = 1 + if vals.ndim == 2: + ncols = vals.shape[0] + + out = np.empty((ncols, ngroups, nqs), dtype=np.float64) + + if is_datetimelike: + vals = vals.view("i8") + + if vals.ndim == 1: + # EA is always 1d + func( + out[0], + values=vals, + mask=mask, + result_mask=result_mask, + is_datetimelike=is_datetimelike, + ) + else: + for i in range(ncols): + func( + out[i], + values=vals[i], + mask=mask[i], + result_mask=None, + is_datetimelike=is_datetimelike, + ) + + if vals.ndim == 1: + out = out.ravel("K") + if result_mask is not None: + result_mask = result_mask.ravel("K") + else: + out = out.reshape(ncols, ngroups * nqs) + + return post_processor(out, inference, result_mask, orig_vals) + + res_mgr = sdata._mgr.grouped_reduce(blk_func) + + res = self._wrap_agged_manager(res_mgr) + return self._wrap_aggregated_output(res, qs=pass_qs) + + @final + @Substitution(name="groupby") + def ngroup(self, ascending: bool = True): + """ + Number each group from 0 to the number of groups - 1. + + This is the enumerative complement of cumcount. Note that the + numbers given to the groups match the order in which the groups + would be seen when iterating over the groupby object, not the + order they are first observed. + + Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN` + and will be skipped from the count. + + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from number of group - 1 to 0. + + Returns + ------- + Series + Unique numbers for each group. + + See Also + -------- + .cumcount : Number the rows in each group. + + Examples + -------- + >>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]}) + >>> df + color + 0 red + 1 None + 2 red + 3 blue + 4 blue + 5 red + >>> df.groupby("color").ngroup() + 0 1.0 + 1 NaN + 2 1.0 + 3 0.0 + 4 0.0 + 5 1.0 + dtype: float64 + >>> df.groupby("color", dropna=False).ngroup() + 0 1 + 1 2 + 2 1 + 3 0 + 4 0 + 5 1 + dtype: int64 + >>> df.groupby("color", dropna=False).ngroup(ascending=False) + 0 1 + 1 0 + 2 1 + 3 2 + 4 2 + 5 1 + dtype: int64 + """ + obj = self._obj_with_exclusions + index = obj._get_axis(self.axis) + comp_ids = self.grouper.group_info[0] + + dtype: type + if self.grouper.has_dropped_na: + comp_ids = np.where(comp_ids == -1, np.nan, comp_ids) + dtype = np.float64 + else: + dtype = np.int64 + + if any(ping._passed_categorical for ping in self.grouper.groupings): + # comp_ids reflect non-observed groups, we need only observed + comp_ids = rank_1d(comp_ids, ties_method="dense") - 1 + + result = self._obj_1d_constructor(comp_ids, index, dtype=dtype) + if not ascending: + result = self.ngroups - 1 - result + return result + + @final + @Substitution(name="groupby") + def cumcount(self, ascending: bool = True): + """ + Number each item in each group from 0 to the length of that group - 1. + + Essentially this is equivalent to + + .. code-block:: python + + self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) + + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from length of group - 1 to 0. + + Returns + ------- + Series + Sequence number of each element within each group. + + See Also + -------- + .ngroup : Number the groups themselves. + + Examples + -------- + >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], + ... columns=['A']) + >>> df + A + 0 a + 1 a + 2 a + 3 b + 4 b + 5 a + >>> df.groupby('A').cumcount() + 0 0 + 1 1 + 2 2 + 3 0 + 4 1 + 5 3 + dtype: int64 + >>> df.groupby('A').cumcount(ascending=False) + 0 3 + 1 2 + 2 1 + 3 1 + 4 0 + 5 0 + dtype: int64 + """ + index = self._obj_with_exclusions._get_axis(self.axis) + cumcounts = self._cumcount_array(ascending=ascending) + return self._obj_1d_constructor(cumcounts, index) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def rank( + self, + method: str = "average", + ascending: bool = True, + na_option: str = "keep", + pct: bool = False, + axis: AxisInt | lib.NoDefault = lib.no_default, + ) -> NDFrameT: + """ + Provide the rank of values within each group. + + Parameters + ---------- + method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' + * average: average rank of group. + * min: lowest rank in group. + * max: highest rank in group. + * first: ranks assigned in order they appear in the array. + * dense: like 'min', but rank always increases by 1 between groups. + ascending : bool, default True + False for ranks by high (1) to low (N). + na_option : {'keep', 'top', 'bottom'}, default 'keep' + * keep: leave NA values where they are. + * top: smallest rank if ascending. + * bottom: smallest rank if descending. + pct : bool, default False + Compute percentage rank of data within each group. + axis : int, default 0 + The axis of the object over which to compute the rank. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + Returns + ------- + DataFrame with ranking of values within each group + %(see_also)s + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "group": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], + ... "value": [2, 4, 2, 3, 5, 1, 2, 4, 1, 5], + ... } + ... ) + >>> df + group value + 0 a 2 + 1 a 4 + 2 a 2 + 3 a 3 + 4 a 5 + 5 b 1 + 6 b 2 + 7 b 4 + 8 b 1 + 9 b 5 + >>> for method in ['average', 'min', 'max', 'dense', 'first']: + ... df[f'{method}_rank'] = df.groupby('group')['value'].rank(method) + >>> df + group value average_rank min_rank max_rank dense_rank first_rank + 0 a 2 1.5 1.0 2.0 1.0 1.0 + 1 a 4 4.0 4.0 4.0 3.0 4.0 + 2 a 2 1.5 1.0 2.0 1.0 2.0 + 3 a 3 3.0 3.0 3.0 2.0 3.0 + 4 a 5 5.0 5.0 5.0 4.0 5.0 + 5 b 1 1.5 1.0 2.0 1.0 1.0 + 6 b 2 3.0 3.0 3.0 2.0 3.0 + 7 b 4 4.0 4.0 4.0 3.0 4.0 + 8 b 1 1.5 1.0 2.0 1.0 2.0 + 9 b 5 5.0 5.0 5.0 4.0 5.0 + """ + if na_option not in {"keep", "top", "bottom"}: + msg = "na_option must be one of 'keep', 'top', or 'bottom'" + raise ValueError(msg) + + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "rank") + else: + axis = 0 + + kwargs = { + "ties_method": method, + "ascending": ascending, + "na_option": na_option, + "pct": pct, + } + if axis != 0: + # DataFrame uses different keyword name + kwargs["method"] = kwargs.pop("ties_method") + f = lambda x: x.rank(axis=axis, numeric_only=False, **kwargs) + result = self._python_apply_general( + f, self._selected_obj, is_transform=True + ) + return result + + return self._cython_transform( + "rank", + numeric_only=False, + axis=axis, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cumprod( + self, axis: Axis | lib.NoDefault = lib.no_default, *args, **kwargs + ) -> NDFrameT: + """ + Cumulative product for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([6, 2, 0], index=lst) + >>> ser + a 6 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).cumprod() + a 6 + a 12 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 8 2 + horse 1 2 5 + bull 2 6 9 + >>> df.groupby("a").groups + {1: ['cow', 'horse'], 2: ['bull']} + >>> df.groupby("a").cumprod() + b c + cow 8 2 + horse 16 10 + bull 6 9 + """ + nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"]) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cumprod") + else: + axis = 0 + + if axis != 0: + f = lambda x: x.cumprod(axis=axis, **kwargs) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + return self._cython_transform("cumprod", **kwargs) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cumsum( + self, axis: Axis | lib.NoDefault = lib.no_default, *args, **kwargs + ) -> NDFrameT: + """ + Cumulative sum for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([6, 2, 0], index=lst) + >>> ser + a 6 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).cumsum() + a 6 + a 8 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["fox", "gorilla", "lion"]) + >>> df + a b c + fox 1 8 2 + gorilla 1 2 5 + lion 2 6 9 + >>> df.groupby("a").groups + {1: ['fox', 'gorilla'], 2: ['lion']} + >>> df.groupby("a").cumsum() + b c + fox 8 2 + gorilla 10 7 + lion 6 9 + """ + nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"]) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cumsum") + else: + axis = 0 + + if axis != 0: + f = lambda x: x.cumsum(axis=axis, **kwargs) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + return self._cython_transform("cumsum", **kwargs) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cummin( + self, + axis: AxisInt | lib.NoDefault = lib.no_default, + numeric_only: bool = False, + **kwargs, + ) -> NDFrameT: + """ + Cumulative min for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 6, 2, 3, 0, 4], index=lst) + >>> ser + a 1 + a 6 + a 2 + b 3 + b 0 + b 4 + dtype: int64 + >>> ser.groupby(level=0).cummin() + a 1 + a 1 + a 1 + b 3 + b 0 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 0, 2], [1, 1, 5], [6, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["snake", "rabbit", "turtle"]) + >>> df + a b c + snake 1 0 2 + rabbit 1 1 5 + turtle 6 6 9 + >>> df.groupby("a").groups + {1: ['snake', 'rabbit'], 6: ['turtle']} + >>> df.groupby("a").cummin() + b c + snake 0 2 + rabbit 0 2 + turtle 6 9 + """ + skipna = kwargs.get("skipna", True) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cummin") + else: + axis = 0 + + if axis != 0: + f = lambda x: np.minimum.accumulate(x, axis) + obj = self._selected_obj + if numeric_only: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) + + return self._cython_transform( + "cummin", numeric_only=numeric_only, skipna=skipna + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cummax( + self, + axis: AxisInt | lib.NoDefault = lib.no_default, + numeric_only: bool = False, + **kwargs, + ) -> NDFrameT: + """ + Cumulative max for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 6, 2, 3, 1, 4], index=lst) + >>> ser + a 1 + a 6 + a 2 + b 3 + b 1 + b 4 + dtype: int64 + >>> ser.groupby(level=0).cummax() + a 1 + a 6 + a 6 + b 3 + b 3 + b 4 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 1, 0], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 8 2 + horse 1 1 0 + bull 2 6 9 + >>> df.groupby("a").groups + {1: ['cow', 'horse'], 2: ['bull']} + >>> df.groupby("a").cummax() + b c + cow 8 2 + horse 8 2 + bull 6 9 + """ + skipna = kwargs.get("skipna", True) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cummax") + else: + axis = 0 + + if axis != 0: + f = lambda x: np.maximum.accumulate(x, axis) + obj = self._selected_obj + if numeric_only: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) + + return self._cython_transform( + "cummax", numeric_only=numeric_only, skipna=skipna + ) + + @final + @Substitution(name="groupby") + def shift( + self, + periods: int | Sequence[int] = 1, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + fill_value=lib.no_default, + suffix: str | None = None, + ): + """ + Shift each group by periods observations. + + If freq is passed, the index will be increased using the periods and the freq. + + Parameters + ---------- + periods : int | Sequence[int], default 1 + Number of periods to shift. If a list of values, shift each group by + each period. + freq : str, optional + Frequency string. + axis : axis to shift, default 0 + Shift direction. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + fill_value : optional + The scalar value to use for newly introduced missing values. + + .. versionchanged:: 2.1.0 + Will raise a ``ValueError`` if ``freq`` is provided too. + + suffix : str, optional + A string to add to each shifted column if there are multiple periods. + Ignored otherwise. + + Returns + ------- + Series or DataFrame + Object shifted within each group. + + See Also + -------- + Index.shift : Shift values of Index. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).shift(1) + a NaN + a 1.0 + b NaN + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 2 3 + salmon 1 5 6 + catfish 2 5 8 + goldfish 2 6 9 + >>> df.groupby("a").shift(1) + b c + tuna NaN NaN + salmon 2.0 3.0 + catfish NaN NaN + goldfish 5.0 8.0 + """ + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "shift") + else: + axis = 0 + + if is_list_like(periods): + if axis == 1: + raise ValueError( + "If `periods` contains multiple shifts, `axis` cannot be 1." + ) + periods = cast(Sequence, periods) + if len(periods) == 0: + raise ValueError("If `periods` is an iterable, it cannot be empty.") + from pandas.core.reshape.concat import concat + + add_suffix = True + else: + if not is_integer(periods): + raise TypeError( + f"Periods must be integer, but {periods} is {type(periods)}." + ) + if suffix: + raise ValueError("Cannot specify `suffix` if `periods` is an int.") + periods = [cast(int, periods)] + add_suffix = False + + shifted_dataframes = [] + for period in periods: + if not is_integer(period): + raise TypeError( + f"Periods must be integer, but {period} is {type(period)}." + ) + period = cast(int, period) + if freq is not None or axis != 0: + f = lambda x: x.shift( + period, freq, axis, fill_value # pylint: disable=cell-var-from-loop + ) + shifted = self._python_apply_general( + f, self._selected_obj, is_transform=True + ) + else: + if fill_value is lib.no_default: + fill_value = None + ids, _, ngroups = self.grouper.group_info + res_indexer = np.zeros(len(ids), dtype=np.int64) + + libgroupby.group_shift_indexer(res_indexer, ids, ngroups, period) + + obj = self._obj_with_exclusions + + shifted = obj._reindex_with_indexers( + {self.axis: (obj.axes[self.axis], res_indexer)}, + fill_value=fill_value, + allow_dups=True, + ) + + if add_suffix: + if isinstance(shifted, Series): + shifted = cast(NDFrameT, shifted.to_frame()) + shifted = shifted.add_suffix( + f"{suffix}_{period}" if suffix else f"_{period}" + ) + shifted_dataframes.append(cast(Union[Series, DataFrame], shifted)) + + return ( + shifted_dataframes[0] + if len(shifted_dataframes) == 1 + else concat(shifted_dataframes, axis=1) + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def diff( + self, periods: int = 1, axis: AxisInt | lib.NoDefault = lib.no_default + ) -> NDFrameT: + """ + First discrete difference of element. + + Calculates the difference of each element compared with another + element in the group (default is element in previous row). + + Parameters + ---------- + periods : int, default 1 + Periods to shift for calculating difference, accepts negative values. + axis : axis to shift, default 0 + Take difference over rows (0) or columns (1). + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + Returns + ------- + Series or DataFrame + First differences. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).diff() + a NaN + a -5.0 + a 6.0 + b NaN + b -1.0 + b 0.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).diff() + a b + dog NaN NaN + dog 2.0 3.0 + dog 2.0 4.0 + mouse NaN NaN + mouse 0.0 0.0 + mouse 1.0 -2.0 + mouse -5.0 -1.0 + """ + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "diff") + else: + axis = 0 + + if axis != 0: + return self.apply(lambda x: x.diff(periods=periods, axis=axis)) + + obj = self._obj_with_exclusions + shifted = self.shift(periods=periods) + + # GH45562 - to retain existing behavior and match behavior of Series.diff(), + # int8 and int16 are coerced to float32 rather than float64. + dtypes_to_f32 = ["int8", "int16"] + if obj.ndim == 1: + if obj.dtype in dtypes_to_f32: + shifted = shifted.astype("float32") + else: + to_coerce = [c for c, dtype in obj.dtypes.items() if dtype in dtypes_to_f32] + if len(to_coerce): + shifted = shifted.astype({c: "float32" for c in to_coerce}) + + return obj - shifted + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def pct_change( + self, + periods: int = 1, + fill_method: FillnaOptions | None | lib.NoDefault = lib.no_default, + limit: int | None | lib.NoDefault = lib.no_default, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + ): + """ + Calculate pct_change of each value to previous entry in group. + + Returns + ------- + Series or DataFrame + Percentage changes within each group. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).pct_change() + a NaN + a 1.000000 + b NaN + b 0.333333 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 2 3 + salmon 1 5 6 + catfish 2 5 8 + goldfish 2 6 9 + >>> df.groupby("a").pct_change() + b c + tuna NaN NaN + salmon 1.5 1.000 + catfish NaN NaN + goldfish 0.2 0.125 + """ + # GH#53491 + if fill_method not in (lib.no_default, None) or limit is not lib.no_default: + warnings.warn( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + f"{type(self).__name__}.pct_change are deprecated and will be removed " + "in a future version. Either fill in any non-leading NA values prior " + "to calling pct_change or specify 'fill_method=None' to not fill NA " + "values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if fill_method is lib.no_default: + if limit is lib.no_default and any( + grp.isna().values.any() for _, grp in self + ): + warnings.warn( + "The default fill_method='ffill' in " + f"{type(self).__name__}.pct_change is deprecated and will " + "be removed in a future version. Either fill in any " + "non-leading NA values prior to calling pct_change or " + "specify 'fill_method=None' to not fill NA values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + fill_method = "ffill" + if limit is lib.no_default: + limit = None + + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "pct_change") + else: + axis = 0 + + # TODO(GH#23918): Remove this conditional for SeriesGroupBy when + # GH#23918 is fixed + if freq is not None or axis != 0: + f = lambda x: x.pct_change( + periods=periods, + fill_method=fill_method, + limit=limit, + freq=freq, + axis=axis, + ) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + if fill_method is None: # GH30463 + fill_method = "ffill" + limit = 0 + filled = getattr(self, fill_method)(limit=limit) + if self.axis == 0: + fill_grp = filled.groupby(self.grouper.codes, group_keys=self.group_keys) + else: + fill_grp = filled.T.groupby(self.grouper.codes, group_keys=self.group_keys) + shifted = fill_grp.shift(periods=periods, freq=freq) + if self.axis == 1: + shifted = shifted.T + return (filled / shifted) - 1 + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def head(self, n: int = 5) -> NDFrameT: + """ + Return first n rows of each group. + + Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). + + Parameters + ---------- + n : int + If positive: number of entries to include from start of each group. + If negative: number of entries to exclude from end of each group. + + Returns + ------- + Series or DataFrame + Subset of original Series or DataFrame as determined by n. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], + ... columns=['A', 'B']) + >>> df.groupby('A').head(1) + A B + 0 1 2 + 2 5 6 + >>> df.groupby('A').head(-1) + A B + 0 1 2 + """ + mask = self._make_mask_from_positional_indexer(slice(None, n)) + return self._mask_selected_obj(mask) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def tail(self, n: int = 5) -> NDFrameT: + """ + Return last n rows of each group. + + Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). + + Parameters + ---------- + n : int + If positive: number of entries to include from end of each group. + If negative: number of entries to exclude from start of each group. + + Returns + ------- + Series or DataFrame + Subset of original Series or DataFrame as determined by n. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], + ... columns=['A', 'B']) + >>> df.groupby('A').tail(1) + A B + 1 a 2 + 3 b 2 + >>> df.groupby('A').tail(-1) + A B + 1 a 2 + 3 b 2 + """ + if n: + mask = self._make_mask_from_positional_indexer(slice(-n, None)) + else: + mask = self._make_mask_from_positional_indexer([]) + + return self._mask_selected_obj(mask) + + @final + def _mask_selected_obj(self, mask: npt.NDArray[np.bool_]) -> NDFrameT: + """ + Return _selected_obj with mask applied to the correct axis. + + Parameters + ---------- + mask : np.ndarray[bool] + Boolean mask to apply. + + Returns + ------- + Series or DataFrame + Filtered _selected_obj. + """ + ids = self.grouper.group_info[0] + mask = mask & (ids != -1) + + if self.axis == 0: + return self._selected_obj[mask] + else: + return self._selected_obj.iloc[:, mask] + + @final + def _reindex_output( + self, + output: OutputFrameOrSeries, + fill_value: Scalar = np.nan, + qs: npt.NDArray[np.float64] | None = None, + ) -> OutputFrameOrSeries: + """ + If we have categorical groupers, then we might want to make sure that + we have a fully re-indexed output to the levels. This means expanding + the output space to accommodate all values in the cartesian product of + our groups, regardless of whether they were observed in the data or + not. This will expand the output space if there are missing groups. + + The method returns early without modifying the input if the number of + groupings is less than 2, self.observed == True or none of the groupers + are categorical. + + Parameters + ---------- + output : Series or DataFrame + Object resulting from grouping and applying an operation. + fill_value : scalar, default np.nan + Value to use for unobserved categories if self.observed is False. + qs : np.ndarray[float64] or None, default None + quantile values, only relevant for quantile. + + Returns + ------- + Series or DataFrame + Object (potentially) re-indexed to include all possible groups. + """ + groupings = self.grouper.groupings + if len(groupings) == 1: + return output + + # if we only care about the observed values + # we are done + elif self.observed: + return output + + # reindexing only applies to a Categorical grouper + elif not any( + isinstance(ping.grouping_vector, (Categorical, CategoricalIndex)) + for ping in groupings + ): + return output + + levels_list = [ping.group_index for ping in groupings] + names = self.grouper.names + if qs is not None: + # error: Argument 1 to "append" of "list" has incompatible type + # "ndarray[Any, dtype[floating[_64Bit]]]"; expected "Index" + levels_list.append(qs) # type: ignore[arg-type] + names = names + [None] + index = MultiIndex.from_product(levels_list, names=names) + if self.sort: + index = index.sort_values() + + if self.as_index: + # Always holds for SeriesGroupBy unless GH#36507 is implemented + d = { + self.obj._get_axis_name(self.axis): index, + "copy": False, + "fill_value": fill_value, + } + return output.reindex(**d) # type: ignore[arg-type] + + # GH 13204 + # Here, the categorical in-axis groupers, which need to be fully + # expanded, are columns in `output`. An idea is to do: + # output = output.set_index(self.grouper.names) + # .reindex(index).reset_index() + # but special care has to be taken because of possible not-in-axis + # groupers. + # So, we manually select and drop the in-axis grouper columns, + # reindex `output`, and then reset the in-axis grouper columns. + + # Select in-axis groupers + in_axis_grps = [ + (i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis + ] + if len(in_axis_grps) > 0: + g_nums, g_names = zip(*in_axis_grps) + output = output.drop(labels=list(g_names), axis=1) + + # Set a temp index and reindex (possibly expanding) + output = output.set_index(self.grouper.result_index).reindex( + index, copy=False, fill_value=fill_value + ) + + # Reset in-axis grouper columns + # (using level numbers `g_nums` because level names may not be unique) + if len(in_axis_grps) > 0: + output = output.reset_index(level=g_nums) + + return output.reset_index(drop=True) + + @final + def sample( + self, + n: int | None = None, + frac: float | None = None, + replace: bool = False, + weights: Sequence | Series | None = None, + random_state: RandomState | None = None, + ): + """ + Return a random sample of items from each group. + + You can use `random_state` for reproducibility. + + Parameters + ---------- + n : int, optional + Number of items to return for each group. Cannot be used with + `frac` and must be no larger than the smallest group unless + `replace` is True. Default is one if `frac` is None. + frac : float, optional + Fraction of items to return. Cannot be used with `n`. + replace : bool, default False + Allow or disallow sampling of the same row more than once. + weights : list-like, optional + Default None results in equal probability weighting. + If passed a list-like then values must have the same length as + the underlying DataFrame or Series object and will be used as + sampling probabilities after normalization within each group. + Values must be non-negative with at least one positive element + within each group. + random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional + If int, array-like, or BitGenerator, seed for random number generator. + If np.random.RandomState or np.random.Generator, use as given. + + .. versionchanged:: 1.4.0 + + np.random.Generator objects now accepted + + Returns + ------- + Series or DataFrame + A new object of same type as caller containing items randomly + sampled within each group from the caller object. + + See Also + -------- + DataFrame.sample: Generate random samples from a DataFrame object. + numpy.random.choice: Generate a random sample from a given 1-D numpy + array. + + Examples + -------- + >>> df = pd.DataFrame( + ... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)} + ... ) + >>> df + a b + 0 red 0 + 1 red 1 + 2 blue 2 + 3 blue 3 + 4 black 4 + 5 black 5 + + Select one row at random for each distinct value in column a. The + `random_state` argument can be used to guarantee reproducibility: + + >>> df.groupby("a").sample(n=1, random_state=1) + a b + 4 black 4 + 2 blue 2 + 1 red 1 + + Set `frac` to sample fixed proportions rather than counts: + + >>> df.groupby("a")["b"].sample(frac=0.5, random_state=2) + 5 5 + 2 2 + 0 0 + Name: b, dtype: int64 + + Control sample probabilities within groups by setting weights: + + >>> df.groupby("a").sample( + ... n=1, + ... weights=[1, 1, 1, 0, 0, 1], + ... random_state=1, + ... ) + a b + 5 black 5 + 2 blue 2 + 0 red 0 + """ # noqa: E501 + if self._selected_obj.empty: + # GH48459 prevent ValueError when object is empty + return self._selected_obj + size = sample.process_sampling_size(n, frac, replace) + if weights is not None: + weights_arr = sample.preprocess_weights( + self._selected_obj, weights, axis=self.axis + ) + + random_state = com.random_state(random_state) + + group_iterator = self.grouper.get_iterator(self._selected_obj, self.axis) + + sampled_indices = [] + for labels, obj in group_iterator: + grp_indices = self.indices[labels] + group_size = len(grp_indices) + if size is not None: + sample_size = size + else: + assert frac is not None + sample_size = round(frac * group_size) + + grp_sample = sample.sample( + group_size, + size=sample_size, + replace=replace, + weights=None if weights is None else weights_arr[grp_indices], + random_state=random_state, + ) + sampled_indices.append(grp_indices[grp_sample]) + + sampled_indices = np.concatenate(sampled_indices) + return self._selected_obj.take(sampled_indices, axis=self.axis) + + +@doc(GroupBy) +def get_groupby( + obj: NDFrame, + by: _KeysArgType | None = None, + axis: AxisInt = 0, + grouper: ops.BaseGrouper | None = None, + group_keys: bool = True, +) -> GroupBy: + klass: type[GroupBy] + if isinstance(obj, Series): + from pandas.core.groupby.generic import SeriesGroupBy + + klass = SeriesGroupBy + elif isinstance(obj, DataFrame): + from pandas.core.groupby.generic import DataFrameGroupBy + + klass = DataFrameGroupBy + else: # pragma: no cover + raise TypeError(f"invalid type: {obj}") + + return klass( + obj=obj, + keys=by, + axis=axis, + grouper=grouper, + group_keys=group_keys, + ) + + +def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiIndex: + """ + Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex. + + The quantile level in the MultiIndex is a repeated copy of 'qs'. + + Parameters + ---------- + idx : Index + qs : np.ndarray[float64] + + Returns + ------- + MultiIndex + """ + nqs = len(qs) + lev_codes, lev = Index(qs).factorize() + lev_codes = coerce_indexer_dtype(lev_codes, lev) + + if idx._is_multi: + idx = cast(MultiIndex, idx) + levels = list(idx.levels) + [lev] + codes = [np.repeat(x, nqs) for x in idx.codes] + [np.tile(lev_codes, len(idx))] + mi = MultiIndex(levels=levels, codes=codes, names=idx.names + [None]) + else: + nidx = len(idx) + idx_codes = coerce_indexer_dtype(np.arange(nidx), idx) + levels = [idx, lev] + codes = [np.repeat(idx_codes, nqs), np.tile(lev_codes, nidx)] + mi = MultiIndex(levels=levels, codes=codes, names=[idx.name, None]) + + return mi diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/grouper.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/grouper.py new file mode 100644 index 00000000..9877ddf0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/grouper.py @@ -0,0 +1,1068 @@ +""" +Provide user facing operators for doing the split part of the +split-apply-combine paradigm. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + final, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import lib +from pandas._libs.tslibs import OutOfBoundsDatetime +from pandas.errors import InvalidIndexError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas.core import algorithms +from pandas.core.arrays import ( + Categorical, + ExtensionArray, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.groupby import ops +from pandas.core.groupby.categorical import recode_for_groupby +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, +) +from pandas.core.series import Series + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + NDFrameT, + npt, + ) + + from pandas.core.generic import NDFrame + + +class Grouper: + """ + A Grouper allows the user to specify a groupby instruction for an object. + + This specification will select a column via the key parameter, or if the + level and/or axis parameters are given, a level of the index of the target + object. + + If `axis` and/or `level` are passed as keywords to both `Grouper` and + `groupby`, the values passed to `Grouper` take precedence. + + Parameters + ---------- + key : str, defaults to None + Groupby key, which selects the grouping column of the target. + level : name/number, defaults to None + The level for the target index. + freq : str / frequency object, defaults to None + This will groupby the specified frequency if the target selection + (via key or level) is a datetime-like object. For full specification + of available frequencies, please see `here + `_. + axis : str, int, defaults to 0 + Number/name of the axis. + sort : bool, default to False + Whether to sort the resulting labels. + closed : {'left' or 'right'} + Closed end of interval. Only when `freq` parameter is passed. + label : {'left' or 'right'} + Interval boundary to use for labeling. + Only when `freq` parameter is passed. + convention : {'start', 'end', 'e', 's'} + If grouper is PeriodIndex and `freq` parameter is passed. + + origin : Timestamp or str, default 'start_day' + The timestamp on which to adjust the grouping. The timezone of origin must + match the timezone of the index. + If string, must be one of the following: + + - 'epoch': `origin` is 1970-01-01 + - 'start': `origin` is the first value of the timeseries + - 'start_day': `origin` is the first day at midnight of the timeseries + + - 'end': `origin` is the last value of the timeseries + - 'end_day': `origin` is the ceiling midnight of the last day + + .. versionadded:: 1.3.0 + + offset : Timedelta or str, default is None + An offset timedelta added to the origin. + + dropna : bool, default True + If True, and if group keys contain NA values, NA values together with + row/column will be dropped. If False, NA values will also be treated as + the key in groups. + + .. versionadded:: 1.2.0 + + Returns + ------- + Grouper or pandas.api.typing.TimeGrouper + A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper + is returned. + + Examples + -------- + ``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')`` + + >>> df = pd.DataFrame( + ... { + ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"], + ... "Speed": [100, 5, 200, 300, 15], + ... } + ... ) + >>> df + Animal Speed + 0 Falcon 100 + 1 Parrot 5 + 2 Falcon 200 + 3 Falcon 300 + 4 Parrot 15 + >>> df.groupby(pd.Grouper(key="Animal")).mean() + Speed + Animal + Falcon 200.0 + Parrot 10.0 + + Specify a resample operation on the column 'Publish date' + + >>> df = pd.DataFrame( + ... { + ... "Publish date": [ + ... pd.Timestamp("2000-01-02"), + ... pd.Timestamp("2000-01-02"), + ... pd.Timestamp("2000-01-09"), + ... pd.Timestamp("2000-01-16") + ... ], + ... "ID": [0, 1, 2, 3], + ... "Price": [10, 20, 30, 40] + ... } + ... ) + >>> df + Publish date ID Price + 0 2000-01-02 0 10 + 1 2000-01-02 1 20 + 2 2000-01-09 2 30 + 3 2000-01-16 3 40 + >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean() + ID Price + Publish date + 2000-01-02 0.5 15.0 + 2000-01-09 2.0 30.0 + 2000-01-16 3.0 40.0 + + If you want to adjust the start of the bins based on a fixed timestamp: + + >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' + >>> rng = pd.date_range(start, end, freq='7min') + >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) + >>> ts + 2000-10-01 23:30:00 0 + 2000-10-01 23:37:00 3 + 2000-10-01 23:44:00 6 + 2000-10-01 23:51:00 9 + 2000-10-01 23:58:00 12 + 2000-10-02 00:05:00 15 + 2000-10-02 00:12:00 18 + 2000-10-02 00:19:00 21 + 2000-10-02 00:26:00 24 + Freq: 7T, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min')).sum() + 2000-10-01 23:14:00 0 + 2000-10-01 23:31:00 9 + 2000-10-01 23:48:00 21 + 2000-10-02 00:05:00 54 + 2000-10-02 00:22:00 24 + Freq: 17T, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum() + 2000-10-01 23:18:00 0 + 2000-10-01 23:35:00 18 + 2000-10-01 23:52:00 27 + 2000-10-02 00:09:00 39 + 2000-10-02 00:26:00 24 + Freq: 17T, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum() + 2000-10-01 23:24:00 3 + 2000-10-01 23:41:00 15 + 2000-10-01 23:58:00 45 + 2000-10-02 00:15:00 45 + Freq: 17T, dtype: int64 + + If you want to adjust the start of the bins with an `offset` Timedelta, the two + following lines are equivalent: + + >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17T, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17T, dtype: int64 + + To replace the use of the deprecated `base` argument, you can now use `offset`, + in this example it is equivalent to have `base=2`: + + >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum() + 2000-10-01 23:16:00 0 + 2000-10-01 23:33:00 9 + 2000-10-01 23:50:00 36 + 2000-10-02 00:07:00 39 + 2000-10-02 00:24:00 24 + Freq: 17T, dtype: int64 + """ + + sort: bool + dropna: bool + _gpr_index: Index | None + _grouper: Index | None + + _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna") + + def __new__(cls, *args, **kwargs): + if kwargs.get("freq") is not None: + from pandas.core.resample import TimeGrouper + + cls = TimeGrouper + return super().__new__(cls) + + def __init__( + self, + key=None, + level=None, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + sort: bool = False, + dropna: bool = True, + ) -> None: + if type(self) is Grouper: + # i.e. not TimeGrouper + if axis is not lib.no_default: + warnings.warn( + "Grouper axis keyword is deprecated and will be removed in a " + "future version. To group on axis=1, use obj.T.groupby(...) " + "instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + if axis is lib.no_default: + axis = 0 + + self.key = key + self.level = level + self.freq = freq + self.axis = axis + self.sort = sort + self.dropna = dropna + + self._grouper_deprecated = None + self._indexer_deprecated = None + self._obj_deprecated = None + self._gpr_index = None + self.binner = None + self._grouper = None + self._indexer = None + + def _get_grouper( + self, obj: NDFrameT, validate: bool = True + ) -> tuple[ops.BaseGrouper, NDFrameT]: + """ + Parameters + ---------- + obj : Series or DataFrame + validate : bool, default True + if True, validate the grouper + + Returns + ------- + a tuple of grouper, obj (possibly sorted) + """ + obj, _, _ = self._set_grouper(obj) + grouper, _, obj = get_grouper( + obj, + [self.key], + axis=self.axis, + level=self.level, + sort=self.sort, + validate=validate, + dropna=self.dropna, + ) + # Without setting this, subsequent lookups to .groups raise + # error: Incompatible types in assignment (expression has type "BaseGrouper", + # variable has type "None") + self._grouper_deprecated = grouper # type: ignore[assignment] + + return grouper, obj + + @final + def _set_grouper( + self, obj: NDFrame, sort: bool = False, *, gpr_index: Index | None = None + ): + """ + given an object and the specifications, setup the internal grouper + for this particular specification + + Parameters + ---------- + obj : Series or DataFrame + sort : bool, default False + whether the resulting grouper should be sorted + gpr_index : Index or None, default None + + Returns + ------- + NDFrame + Index + np.ndarray[np.intp] | None + """ + assert obj is not None + + indexer = None + + if self.key is not None and self.level is not None: + raise ValueError("The Grouper cannot specify both a key and a level!") + + # Keep self._grouper value before overriding + if self._grouper is None: + # TODO: What are we assuming about subsequent calls? + self._grouper = gpr_index + self._indexer = self._indexer_deprecated + + # the key must be a valid info item + if self.key is not None: + key = self.key + # The 'on' is already defined + if getattr(gpr_index, "name", None) == key and isinstance(obj, Series): + # Sometimes self._grouper will have been resorted while + # obj has not. In this case there is a mismatch when we + # call self._grouper.take(obj.index) so we need to undo the sorting + # before we call _grouper.take. + assert self._grouper is not None + if self._indexer is not None: + reverse_indexer = self._indexer.argsort() + unsorted_ax = self._grouper.take(reverse_indexer) + ax = unsorted_ax.take(obj.index) + else: + ax = self._grouper.take(obj.index) + else: + if key not in obj._info_axis: + raise KeyError(f"The grouper name {key} is not found") + ax = Index(obj[key], name=key) + + else: + ax = obj._get_axis(self.axis) + if self.level is not None: + level = self.level + + # if a level is given it must be a mi level or + # equivalent to the axis name + if isinstance(ax, MultiIndex): + level = ax._get_level_number(level) + ax = Index(ax._get_level_values(level), name=ax.names[level]) + + else: + if level not in (0, ax.name): + raise ValueError(f"The level {level} is not valid") + + # possibly sort + if (self.sort or sort) and not ax.is_monotonic_increasing: + # use stable sort to support first, last, nth + # TODO: why does putting na_position="first" fix datetimelike cases? + indexer = self._indexer_deprecated = ax.array.argsort( + kind="mergesort", na_position="first" + ) + ax = ax.take(indexer) + obj = obj.take(indexer, axis=self.axis) + + # error: Incompatible types in assignment (expression has type + # "NDFrameT", variable has type "None") + self._obj_deprecated = obj # type: ignore[assignment] + self._gpr_index = ax + return obj, ax, indexer + + @final + @property + def ax(self) -> Index: + warnings.warn( + f"{type(self).__name__}.ax is deprecated and will be removed in a " + "future version. Use Resampler.ax instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + index = self._gpr_index + if index is None: + raise ValueError("_set_grouper must be called before ax is accessed") + return index + + @final + @property + def indexer(self): + warnings.warn( + f"{type(self).__name__}.indexer is deprecated and will be removed " + "in a future version. Use Resampler.indexer instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._indexer_deprecated + + @final + @property + def obj(self): + warnings.warn( + f"{type(self).__name__}.obj is deprecated and will be removed " + "in a future version. Use GroupBy.indexer instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._obj_deprecated + + @final + @property + def grouper(self): + warnings.warn( + f"{type(self).__name__}.grouper is deprecated and will be removed " + "in a future version. Use GroupBy.grouper instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._grouper_deprecated + + @final + @property + def groups(self): + warnings.warn( + f"{type(self).__name__}.groups is deprecated and will be removed " + "in a future version. Use GroupBy.groups instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # error: "None" has no attribute "groups" + return self._grouper_deprecated.groups # type: ignore[attr-defined] + + @final + def __repr__(self) -> str: + attrs_list = ( + f"{attr_name}={repr(getattr(self, attr_name))}" + for attr_name in self._attributes + if getattr(self, attr_name) is not None + ) + attrs = ", ".join(attrs_list) + cls_name = type(self).__name__ + return f"{cls_name}({attrs})" + + +@final +class Grouping: + """ + Holds the grouping information for a single key + + Parameters + ---------- + index : Index + grouper : + obj : DataFrame or Series + name : Label + level : + observed : bool, default False + If we are a Categorical, use the observed values + in_axis : if the Grouping is a column in self.obj and hence among + Groupby.exclusions list + dropna : bool, default True + Whether to drop NA groups. + uniques : Array-like, optional + When specified, will be used for unique values. Enables including empty groups + in the result for a BinGrouper. Must not contain duplicates. + + Attributes + ------- + indices : dict + Mapping of {group -> index_list} + codes : ndarray + Group codes + group_index : Index or None + unique groups + groups : dict + Mapping of {group -> label_list} + """ + + _codes: npt.NDArray[np.signedinteger] | None = None + _group_index: Index | None = None + _all_grouper: Categorical | None + _orig_cats: Index | None + _index: Index + + def __init__( + self, + index: Index, + grouper=None, + obj: NDFrame | None = None, + level=None, + sort: bool = True, + observed: bool = False, + in_axis: bool = False, + dropna: bool = True, + uniques: ArrayLike | None = None, + ) -> None: + self.level = level + self._orig_grouper = grouper + grouping_vector = _convert_grouper(index, grouper) + self._all_grouper = None + self._orig_cats = None + self._index = index + self._sort = sort + self.obj = obj + self._observed = observed + self.in_axis = in_axis + self._dropna = dropna + self._uniques = uniques + + # we have a single grouper which may be a myriad of things, + # some of which are dependent on the passing in level + + ilevel = self._ilevel + if ilevel is not None: + # In extant tests, the new self.grouping_vector matches + # `index.get_level_values(ilevel)` whenever + # mapper is None and isinstance(index, MultiIndex) + if isinstance(index, MultiIndex): + index_level = index.get_level_values(ilevel) + else: + index_level = index + + if grouping_vector is None: + grouping_vector = index_level + else: + mapper = grouping_vector + grouping_vector = index_level.map(mapper) + + # a passed Grouper like, directly get the grouper in the same way + # as single grouper groupby, use the group_info to get codes + elif isinstance(grouping_vector, Grouper): + # get the new grouper; we already have disambiguated + # what key/level refer to exactly, don't need to + # check again as we have by this point converted these + # to an actual value (rather than a pd.Grouper) + assert self.obj is not None # for mypy + newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False) + self.obj = newobj + + if isinstance(newgrouper, ops.BinGrouper): + # TODO: can we unwrap this and get a tighter typing + # for self.grouping_vector? + grouping_vector = newgrouper + else: + # ops.BaseGrouper + # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1. + # If that were to occur, would we be throwing out information? + # error: Cannot determine type of "grouping_vector" [has-type] + ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type] + # use Index instead of ndarray so we can recover the name + grouping_vector = Index(ng, name=newgrouper.result_index.name) + + elif not isinstance( + grouping_vector, (Series, Index, ExtensionArray, np.ndarray) + ): + # no level passed + if getattr(grouping_vector, "ndim", 1) != 1: + t = str(type(grouping_vector)) + raise ValueError(f"Grouper for '{t}' not 1-dimensional") + + grouping_vector = index.map(grouping_vector) + + if not ( + hasattr(grouping_vector, "__len__") + and len(grouping_vector) == len(index) + ): + grper = pprint_thing(grouping_vector) + errmsg = ( + "Grouper result violates len(labels) == " + f"len(data)\nresult: {grper}" + ) + raise AssertionError(errmsg) + + if isinstance(grouping_vector, np.ndarray): + if grouping_vector.dtype.kind in "mM": + # if we have a date/time-like grouper, make sure that we have + # Timestamps like + # TODO 2022-10-08 we only have one test that gets here and + # values are already in nanoseconds in that case. + grouping_vector = Series(grouping_vector).to_numpy() + elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype): + # a passed Categorical + self._orig_cats = grouping_vector.categories + grouping_vector, self._all_grouper = recode_for_groupby( + grouping_vector, sort, observed + ) + + self.grouping_vector = grouping_vector + + def __repr__(self) -> str: + return f"Grouping({self.name})" + + def __iter__(self) -> Iterator: + return iter(self.indices) + + @cache_readonly + def _passed_categorical(self) -> bool: + dtype = getattr(self.grouping_vector, "dtype", None) + return isinstance(dtype, CategoricalDtype) + + @cache_readonly + def name(self) -> Hashable: + ilevel = self._ilevel + if ilevel is not None: + return self._index.names[ilevel] + + if isinstance(self._orig_grouper, (Index, Series)): + return self._orig_grouper.name + + elif isinstance(self.grouping_vector, ops.BaseGrouper): + return self.grouping_vector.result_index.name + + elif isinstance(self.grouping_vector, Index): + return self.grouping_vector.name + + # otherwise we have ndarray or ExtensionArray -> no name + return None + + @cache_readonly + def _ilevel(self) -> int | None: + """ + If necessary, converted index level name to index level position. + """ + level = self.level + if level is None: + return None + if not isinstance(level, int): + index = self._index + if level not in index.names: + raise AssertionError(f"Level {level} not in index") + return index.names.index(level) + return level + + @property + def ngroups(self) -> int: + return len(self.group_index) + + @cache_readonly + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + # we have a list of groupers + if isinstance(self.grouping_vector, ops.BaseGrouper): + return self.grouping_vector.indices + + values = Categorical(self.grouping_vector) + return values._reverse_indexer() + + @property + def codes(self) -> npt.NDArray[np.signedinteger]: + return self._codes_and_uniques[0] + + @cache_readonly + def group_arraylike(self) -> ArrayLike: + """ + Analogous to result_index, but holding an ArrayLike to ensure + we can retain ExtensionDtypes. + """ + if self._all_grouper is not None: + # retain dtype for categories, including unobserved ones + return self.result_index._values + + elif self._passed_categorical: + return self.group_index._values + + return self._codes_and_uniques[1] + + @cache_readonly + def result_index(self) -> Index: + # result_index retains dtype for categories, including unobserved ones, + # which group_index does not + if self._all_grouper is not None: + group_idx = self.group_index + assert isinstance(group_idx, CategoricalIndex) + cats = self._orig_cats + # set_categories is dynamically added + return group_idx.set_categories(cats) # type: ignore[attr-defined] + return self.group_index + + @cache_readonly + def group_index(self) -> Index: + codes, uniques = self._codes_and_uniques + if not self._dropna and self._passed_categorical: + assert isinstance(uniques, Categorical) + if self._sort and (codes == len(uniques)).any(): + # Add NA value on the end when sorting + uniques = Categorical.from_codes( + np.append(uniques.codes, [-1]), uniques.categories, validate=False + ) + elif len(codes) > 0: + # Need to determine proper placement of NA value when not sorting + cat = self.grouping_vector + na_idx = (cat.codes < 0).argmax() + if cat.codes[na_idx] < 0: + # count number of unique codes that comes before the nan value + na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx]) + new_codes = np.insert(uniques.codes, na_unique_idx, -1) + uniques = Categorical.from_codes( + new_codes, uniques.categories, validate=False + ) + return Index._with_infer(uniques, name=self.name) + + @cache_readonly + def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]: + uniques: ArrayLike + if self._passed_categorical: + # we make a CategoricalIndex out of the cat grouper + # preserving the categories / ordered attributes; + # doesn't (yet - GH#46909) handle dropna=False + cat = self.grouping_vector + categories = cat.categories + + if self._observed: + ucodes = algorithms.unique1d(cat.codes) + ucodes = ucodes[ucodes != -1] + if self._sort: + ucodes = np.sort(ucodes) + else: + ucodes = np.arange(len(categories)) + + uniques = Categorical.from_codes( + codes=ucodes, categories=categories, ordered=cat.ordered, validate=False + ) + + codes = cat.codes + if not self._dropna: + na_mask = codes < 0 + if np.any(na_mask): + if self._sort: + # Replace NA codes with `largest code + 1` + na_code = len(categories) + codes = np.where(na_mask, na_code, codes) + else: + # Insert NA code into the codes based on first appearance + # A negative code must exist, no need to check codes[na_idx] < 0 + na_idx = na_mask.argmax() + # count number of unique codes that comes before the nan value + na_code = algorithms.nunique_ints(codes[:na_idx]) + codes = np.where(codes >= na_code, codes + 1, codes) + codes = np.where(na_mask, na_code, codes) + + if not self._observed: + uniques = uniques.reorder_categories(self._orig_cats) + + return codes, uniques + + elif isinstance(self.grouping_vector, ops.BaseGrouper): + # we have a list of groupers + codes = self.grouping_vector.codes_info + uniques = self.grouping_vector.result_index._values + elif self._uniques is not None: + # GH#50486 Code grouping_vector using _uniques; allows + # including uniques that are not present in grouping_vector. + cat = Categorical(self.grouping_vector, categories=self._uniques) + codes = cat.codes + uniques = self._uniques + else: + # GH35667, replace dropna=False with use_na_sentinel=False + # error: Incompatible types in assignment (expression has type "Union[ + # ndarray[Any, Any], Index]", variable has type "Categorical") + codes, uniques = algorithms.factorize( # type: ignore[assignment] + self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna + ) + return codes, uniques + + @cache_readonly + def groups(self) -> dict[Hashable, np.ndarray]: + cats = Categorical.from_codes(self.codes, self.group_index, validate=False) + return self._index.groupby(cats) + + +def get_grouper( + obj: NDFrameT, + key=None, + axis: Axis = 0, + level=None, + sort: bool = True, + observed: bool = False, + validate: bool = True, + dropna: bool = True, +) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]: + """ + Create and return a BaseGrouper, which is an internal + mapping of how to create the grouper indexers. + This may be composed of multiple Grouping objects, indicating + multiple groupers + + Groupers are ultimately index mappings. They can originate as: + index mappings, keys to columns, functions, or Groupers + + Groupers enable local references to axis,level,sort, while + the passed in axis, level, and sort are 'global'. + + This routine tries to figure out what the passing in references + are and then creates a Grouping for each one, combined into + a BaseGrouper. + + If observed & we have a categorical grouper, only show the observed + values. + + If validate, then check for key/level overlaps. + + """ + group_axis = obj._get_axis(axis) + + # validate that the passed single level is compatible with the passed + # axis of the object + if level is not None: + # TODO: These if-block and else-block are almost same. + # MultiIndex instance check is removable, but it seems that there are + # some processes only for non-MultiIndex in else-block, + # eg. `obj.index.name != level`. We have to consider carefully whether + # these are applicable for MultiIndex. Even if these are applicable, + # we need to check if it makes no side effect to subsequent processes + # on the outside of this condition. + # (GH 17621) + if isinstance(group_axis, MultiIndex): + if is_list_like(level) and len(level) == 1: + level = level[0] + + if key is None and is_scalar(level): + # Get the level values from group_axis + key = group_axis.get_level_values(level) + level = None + + else: + # allow level to be a length-one list-like object + # (e.g., level=[0]) + # GH 13901 + if is_list_like(level): + nlevels = len(level) + if nlevels == 1: + level = level[0] + elif nlevels == 0: + raise ValueError("No group keys passed!") + else: + raise ValueError("multiple levels only valid with MultiIndex") + + if isinstance(level, str): + if obj._get_axis(axis).name != level: + raise ValueError( + f"level name {level} is not the name " + f"of the {obj._get_axis_name(axis)}" + ) + elif level > 0 or level < -1: + raise ValueError("level > 0 or level < -1 only valid with MultiIndex") + + # NOTE: `group_axis` and `group_axis.get_level_values(level)` + # are same in this section. + level = None + key = group_axis + + # a passed-in Grouper, directly convert + if isinstance(key, Grouper): + grouper, obj = key._get_grouper(obj, validate=False) + if key.key is None: + return grouper, frozenset(), obj + else: + return grouper, frozenset({key.key}), obj + + # already have a BaseGrouper, just return it + elif isinstance(key, ops.BaseGrouper): + return key, frozenset(), obj + + if not isinstance(key, list): + keys = [key] + match_axis_length = False + else: + keys = key + match_axis_length = len(keys) == len(group_axis) + + # what are we after, exactly? + any_callable = any(callable(g) or isinstance(g, dict) for g in keys) + any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys) + any_arraylike = any( + isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys + ) + + # is this an index replacement? + if ( + not any_callable + and not any_arraylike + and not any_groupers + and match_axis_length + and level is None + ): + if isinstance(obj, DataFrame): + all_in_columns_index = all( + g in obj.columns or g in obj.index.names for g in keys + ) + else: + assert isinstance(obj, Series) + all_in_columns_index = all(g in obj.index.names for g in keys) + + if not all_in_columns_index: + keys = [com.asarray_tuplesafe(keys)] + + if isinstance(level, (tuple, list)): + if key is None: + keys = [None] * len(level) + levels = level + else: + levels = [level] * len(keys) + + groupings: list[Grouping] = [] + exclusions: set[Hashable] = set() + + # if the actual grouper should be obj[key] + def is_in_axis(key) -> bool: + if not _is_label_like(key): + if obj.ndim == 1: + return False + + # items -> .columns for DataFrame, .index for Series + items = obj.axes[-1] + try: + items.get_loc(key) + except (KeyError, TypeError, InvalidIndexError): + # TypeError shows up here if we pass e.g. an Index + return False + + return True + + # if the grouper is obj[name] + def is_in_obj(gpr) -> bool: + if not hasattr(gpr, "name"): + return False + if using_copy_on_write(): + # For the CoW case, we check the references to determine if the + # series is part of the object + try: + obj_gpr_column = obj[gpr.name] + except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime): + return False + if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series): + return gpr._mgr.references_same_values( # type: ignore[union-attr] + obj_gpr_column._mgr, 0 # type: ignore[arg-type] + ) + return False + try: + return gpr is obj[gpr.name] + except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime): + # IndexError reached in e.g. test_skip_group_keys when we pass + # lambda here + # InvalidIndexError raised on key-types inappropriate for index, + # e.g. DatetimeIndex.get_loc(tuple()) + # OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex + # and gpr.name is month str + return False + + for gpr, level in zip(keys, levels): + if is_in_obj(gpr): # df.groupby(df['name']) + in_axis = True + exclusions.add(gpr.name) + + elif is_in_axis(gpr): # df.groupby('name') + if obj.ndim != 1 and gpr in obj: + if validate: + obj._check_label_or_level_ambiguity(gpr, axis=axis) + in_axis, name, gpr = True, gpr, obj[gpr] + if gpr.ndim != 1: + # non-unique columns; raise here to get the name in the + # exception message + raise ValueError(f"Grouper for '{name}' not 1-dimensional") + exclusions.add(name) + elif obj._is_level_reference(gpr, axis=axis): + in_axis, level, gpr = False, gpr, None + else: + raise KeyError(gpr) + elif isinstance(gpr, Grouper) and gpr.key is not None: + # Add key to exclusions + exclusions.add(gpr.key) + in_axis = True + else: + in_axis = False + + # create the Grouping + # allow us to passing the actual Grouping as the gpr + ping = ( + Grouping( + group_axis, + gpr, + obj=obj, + level=level, + sort=sort, + observed=observed, + in_axis=in_axis, + dropna=dropna, + ) + if not isinstance(gpr, Grouping) + else gpr + ) + + groupings.append(ping) + + if len(groupings) == 0 and len(obj): + raise ValueError("No group keys passed!") + if len(groupings) == 0: + groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) + + # create the internals grouper + grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna) + return grouper, frozenset(exclusions), obj + + +def _is_label_like(val) -> bool: + return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val)) + + +def _convert_grouper(axis: Index, grouper): + if isinstance(grouper, dict): + return grouper.get + elif isinstance(grouper, Series): + if grouper.index.equals(axis): + return grouper._values + else: + return grouper.reindex(axis)._values + elif isinstance(grouper, MultiIndex): + return grouper._values + elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)): + if len(grouper) != len(axis): + raise ValueError("Grouper and axis must be same length") + + if isinstance(grouper, (list, tuple)): + grouper = com.asarray_tuplesafe(grouper) + return grouper + else: + return grouper diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/indexing.py new file mode 100644 index 00000000..a3c5ab8e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/indexing.py @@ -0,0 +1,304 @@ +from __future__ import annotations + +from collections.abc import Iterable +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) + +import numpy as np + +from pandas.util._decorators import ( + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) + +if TYPE_CHECKING: + from pandas._typing import PositionalIndexer + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.groupby import groupby + + +class GroupByIndexingMixin: + """ + Mixin for adding ._positional_selector to GroupBy. + """ + + @cache_readonly + def _positional_selector(self) -> GroupByPositionalSelector: + """ + Return positional selection for each group. + + ``groupby._positional_selector[i:j]`` is similar to + ``groupby.apply(lambda x: x.iloc[i:j])`` + but much faster and preserves the original index and order. + + ``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head` + and :meth:`~GroupBy.tail`. For example: + + - ``head(5)`` + - ``_positional_selector[5:-5]`` + - ``tail(5)`` + + together return all the rows. + + Allowed inputs for the index are: + + - An integer valued iterable, e.g. ``range(2, 4)``. + - A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``. + + The output format is the same as :meth:`~GroupBy.head` and + :meth:`~GroupBy.tail`, namely + a subset of the ``DataFrame`` or ``Series`` with the index and order preserved. + + Returns + ------- + Series + The filtered subset of the original Series. + DataFrame + The filtered subset of the original DataFrame. + + See Also + -------- + DataFrame.iloc : Purely integer-location based indexing for selection by + position. + GroupBy.head : Return first n rows of each group. + GroupBy.tail : Return last n rows of each group. + GroupBy.nth : Take the nth row from each group if n is an int, or a + subset of rows, if n is a list of ints. + + Notes + ----- + - The slice step cannot be negative. + - If the index specification results in overlaps, the item is not duplicated. + - If the index specification changes the order of items, then + they are returned in their original order. + By contrast, ``DataFrame.iloc`` can change the row order. + - ``groupby()`` parameters such as as_index and dropna are ignored. + + The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth` + with ``as_index=False`` are: + + - Input to ``_positional_selector`` can include + one or more slices whereas ``nth`` + just handles an integer or a list of integers. + - ``_positional_selector`` can accept a slice relative to the + last row of each group. + - ``_positional_selector`` does not have an equivalent to the + ``nth()`` ``dropna`` parameter. + + Examples + -------- + >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], + ... columns=["A", "B"]) + >>> df.groupby("A")._positional_selector[1:2] + A B + 1 a 2 + 4 b 5 + + >>> df.groupby("A")._positional_selector[1, -1] + A B + 1 a 2 + 2 a 3 + 4 b 5 + """ + if TYPE_CHECKING: + # pylint: disable-next=used-before-assignment + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return GroupByPositionalSelector(groupby_self) + + def _make_mask_from_positional_indexer( + self, + arg: PositionalIndexer | tuple, + ) -> np.ndarray: + if is_list_like(arg): + if all(is_integer(i) for i in cast(Iterable, arg)): + mask = self._make_mask_from_list(cast(Iterable[int], arg)) + else: + mask = self._make_mask_from_tuple(cast(tuple, arg)) + + elif isinstance(arg, slice): + mask = self._make_mask_from_slice(arg) + elif is_integer(arg): + mask = self._make_mask_from_int(cast(int, arg)) + else: + raise TypeError( + f"Invalid index {type(arg)}. " + "Must be integer, list-like, slice or a tuple of " + "integers and slices" + ) + + if isinstance(mask, bool): + if mask: + mask = self._ascending_count >= 0 + else: + mask = self._ascending_count < 0 + + return cast(np.ndarray, mask) + + def _make_mask_from_int(self, arg: int) -> np.ndarray: + if arg >= 0: + return self._ascending_count == arg + else: + return self._descending_count == (-arg - 1) + + def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray: + positive = [arg for arg in args if arg >= 0] + negative = [-arg - 1 for arg in args if arg < 0] + + mask: bool | np.ndarray = False + + if positive: + mask |= np.isin(self._ascending_count, positive) + + if negative: + mask |= np.isin(self._descending_count, negative) + + return mask + + def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray: + mask: bool | np.ndarray = False + + for arg in args: + if is_integer(arg): + mask |= self._make_mask_from_int(cast(int, arg)) + elif isinstance(arg, slice): + mask |= self._make_mask_from_slice(arg) + else: + raise ValueError( + f"Invalid argument {type(arg)}. Should be int or slice." + ) + + return mask + + def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray: + start = arg.start + stop = arg.stop + step = arg.step + + if step is not None and step < 0: + raise ValueError(f"Invalid step {step}. Must be non-negative") + + mask: bool | np.ndarray = True + + if step is None: + step = 1 + + if start is None: + if step > 1: + mask &= self._ascending_count % step == 0 + + elif start >= 0: + mask &= self._ascending_count >= start + + if step > 1: + mask &= (self._ascending_count - start) % step == 0 + + else: + mask &= self._descending_count < -start + + offset_array = self._descending_count + start + 1 + limit_array = ( + self._ascending_count + self._descending_count + (start + 1) + ) < 0 + offset_array = np.where(limit_array, self._ascending_count, offset_array) + + mask &= offset_array % step == 0 + + if stop is not None: + if stop >= 0: + mask &= self._ascending_count < stop + else: + mask &= self._descending_count >= -stop + + return mask + + @cache_readonly + def _ascending_count(self) -> np.ndarray: + if TYPE_CHECKING: + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return groupby_self._cumcount_array() + + @cache_readonly + def _descending_count(self) -> np.ndarray: + if TYPE_CHECKING: + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return groupby_self._cumcount_array(ascending=False) + + +@doc(GroupByIndexingMixin._positional_selector) +class GroupByPositionalSelector: + def __init__(self, groupby_object: groupby.GroupBy) -> None: + self.groupby_object = groupby_object + + def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series: + """ + Select by positional index per group. + + Implements GroupBy._positional_selector + + Parameters + ---------- + arg : PositionalIndexer | tuple + Allowed values are: + - int + - int valued iterable such as list or range + - slice with step either None or positive + - tuple of integers and slices + + Returns + ------- + Series + The filtered subset of the original groupby Series. + DataFrame + The filtered subset of the original groupby DataFrame. + + See Also + -------- + DataFrame.iloc : Integer-location based indexing for selection by position. + GroupBy.head : Return first n rows of each group. + GroupBy.tail : Return last n rows of each group. + GroupBy._positional_selector : Return positional selection for each group. + GroupBy.nth : Take the nth row from each group if n is an int, or a + subset of rows, if n is a list of ints. + """ + mask = self.groupby_object._make_mask_from_positional_indexer(arg) + return self.groupby_object._mask_selected_obj(mask) + + +class GroupByNthSelector: + """ + Dynamically substituted for GroupBy.nth to enable both call and index + """ + + def __init__(self, groupby_object: groupby.GroupBy) -> None: + self.groupby_object = groupby_object + + def __call__( + self, + n: PositionalIndexer | tuple, + dropna: Literal["any", "all", None] = None, + ) -> DataFrame | Series: + return self.groupby_object._nth(n, dropna) + + def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series: + return self.groupby_object._nth(n) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/numba_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/numba_.py new file mode 100644 index 00000000..3b7a58e8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/numba_.py @@ -0,0 +1,181 @@ +"""Common utilities for Numba operations with groupby ops""" +from __future__ import annotations + +import functools +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import ( + NumbaUtilError, + jit_user_function, +) + +if TYPE_CHECKING: + from pandas._typing import Scalar + + +def validate_udf(func: Callable) -> None: + """ + Validate user defined function for ops when using Numba with groupby ops. + + The first signature arguments should include: + + def f(values, index, ...): + ... + + Parameters + ---------- + func : function, default False + user defined function + + Returns + ------- + None + + Raises + ------ + NumbaUtilError + """ + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + udf_signature = list(inspect.signature(func).parameters.keys()) + expected_args = ["values", "index"] + min_number_args = len(expected_args) + if ( + len(udf_signature) < min_number_args + or udf_signature[:min_number_args] != expected_args + ): + raise NumbaUtilError( + f"The first {min_number_args} arguments to {func.__name__} must be " + f"{expected_args}" + ) + + +@functools.cache +def generate_numba_agg_func( + func: Callable[..., Scalar], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted agg function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby agg function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each group and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_agg( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((num_groups, num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[i, j] = numba_func(group, group_index, *args) + return result + + return group_agg + + +@functools.cache +def generate_numba_transform_func( + func: Callable[..., np.ndarray], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted transform function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby transform function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_transform( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((len(values), num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[begin[i] : end[i], j] = numba_func(group, group_index, *args) + return result + + return group_transform diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/ops.py new file mode 100644 index 00000000..a7c1217f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/groupby/ops.py @@ -0,0 +1,1203 @@ +""" +Provide classes to perform the groupby aggregate operations. + +These are not exposed to the user and provide implementations of the grouping +operations, primarily in cython. These classes (BaseGrouper and BinGrouper) +are contained *in* the SeriesGroupBy and DataFrameGroupBy objects. +""" +from __future__ import annotations + +import collections +import functools +from typing import ( + TYPE_CHECKING, + Callable, + Generic, + final, +) + +import numpy as np + +from pandas._libs import ( + NaT, + lib, +) +import pandas._libs.groupby as libgroupby +from pandas._typing import ( + ArrayLike, + AxisInt, + NDFrameT, + Shape, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.cast import ( + maybe_cast_pointwise_result, + maybe_downcast_to_dtype, +) +from pandas.core.dtypes.common import ( + ensure_float64, + ensure_int64, + ensure_platform_int, + ensure_uint64, + is_1d_only_ea_dtype, +) +from pandas.core.dtypes.missing import ( + isna, + maybe_fill, +) + +from pandas.core.frame import DataFrame +from pandas.core.groupby import grouper +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, + ensure_index, +) +from pandas.core.series import Series +from pandas.core.sorting import ( + compress_group_index, + decons_obs_group_ids, + get_flattened_list, + get_group_index, + get_group_index_sorter, + get_indexer_dict, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sequence, + ) + + from pandas.core.generic import NDFrame + + +def check_result_array(obj, dtype): + # Our operation is supposed to be an aggregation/reduction. If + # it returns an ndarray, this likely means an invalid operation has + # been passed. See test_apply_without_aggregation, test_agg_must_agg + if isinstance(obj, np.ndarray): + if dtype != object: + # If it is object dtype, the function can be a reduction/aggregation + # and still return an ndarray e.g. test_agg_over_numpy_arrays + raise ValueError("Must produce aggregated value") + + +def extract_result(res): + """ + Extract the result object, it might be a 0-dim ndarray + or a len-1 0-dim, or a scalar + """ + if hasattr(res, "_values"): + # Preserve EA + res = res._values + if res.ndim == 1 and len(res) == 1: + # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply + res = res[0] + return res + + +class WrappedCythonOp: + """ + Dispatch logic for functions defined in _libs.groupby + + Parameters + ---------- + kind: str + Whether the operation is an aggregate or transform. + how: str + Operation name, e.g. "mean". + has_dropped_na: bool + True precisely when dropna=True and the grouper contains a null value. + """ + + # Functions for which we do _not_ attempt to cast the cython result + # back to the original dtype. + cast_blocklist = frozenset( + ["any", "all", "rank", "count", "size", "idxmin", "idxmax"] + ) + + def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: + self.kind = kind + self.how = how + self.has_dropped_na = has_dropped_na + + _CYTHON_FUNCTIONS: dict[str, dict] = { + "aggregate": { + "any": functools.partial(libgroupby.group_any_all, val_test="any"), + "all": functools.partial(libgroupby.group_any_all, val_test="all"), + "sum": "group_sum", + "prod": "group_prod", + "min": "group_min", + "max": "group_max", + "mean": "group_mean", + "median": "group_median_float64", + "var": "group_var", + "std": functools.partial(libgroupby.group_var, name="std"), + "sem": functools.partial(libgroupby.group_var, name="sem"), + "skew": "group_skew", + "first": "group_nth", + "last": "group_last", + "ohlc": "group_ohlc", + }, + "transform": { + "cumprod": "group_cumprod", + "cumsum": "group_cumsum", + "cummin": "group_cummin", + "cummax": "group_cummax", + "rank": "group_rank", + }, + } + + _cython_arity = {"ohlc": 4} # OHLC + + @classmethod + def get_kind_from_how(cls, how: str) -> str: + if how in cls._CYTHON_FUNCTIONS["aggregate"]: + return "aggregate" + return "transform" + + # Note: we make this a classmethod and pass kind+how so that caching + # works at the class level and not the instance level + @classmethod + @functools.cache + def _get_cython_function( + cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool + ): + dtype_str = dtype.name + ftype = cls._CYTHON_FUNCTIONS[kind][how] + + # see if there is a fused-type version of function + # only valid for numeric + if callable(ftype): + f = ftype + else: + f = getattr(libgroupby, ftype) + if is_numeric: + return f + elif dtype == np.dtype(object): + if how in ["median", "cumprod"]: + # no fused types -> no __signatures__ + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + elif how in ["std", "sem"]: + # We have a partial object that does not have __signatures__ + return f + elif how == "skew": + # _get_cython_vals will convert to float64 + pass + elif "object" not in f.__signatures__: + # raise NotImplementedError here rather than TypeError later + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + return f + else: + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/", + dtype, + ) + + def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: + """ + Cast numeric dtypes to float64 for functions that only support that. + + Parameters + ---------- + values : np.ndarray + + Returns + ------- + values : np.ndarray + """ + how = self.how + + if how in ["median", "std", "sem", "skew"]: + # median only has a float64 implementation + # We should only get here with is_numeric, as non-numeric cases + # should raise in _get_cython_function + values = ensure_float64(values) + + elif values.dtype.kind in "iu": + if how in ["var", "mean"] or ( + self.kind == "transform" and self.has_dropped_na + ): + # has_dropped_na check need for test_null_group_str_transformer + # result may still include NaN, so we have to cast + values = ensure_float64(values) + + elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]: + # Avoid overflow during group op + if values.dtype.kind == "i": + values = ensure_int64(values) + else: + values = ensure_uint64(values) + + return values + + def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape: + how = self.how + kind = self.kind + + arity = self._cython_arity.get(how, 1) + + out_shape: Shape + if how == "ohlc": + out_shape = (ngroups, arity) + elif arity > 1: + raise NotImplementedError( + "arity of more than 1 is not supported for the 'how' argument" + ) + elif kind == "transform": + out_shape = values.shape + else: + out_shape = (ngroups,) + values.shape[1:] + return out_shape + + def _get_out_dtype(self, dtype: np.dtype) -> np.dtype: + how = self.how + + if how == "rank": + out_dtype = "float64" + else: + if dtype.kind in "iufcb": + out_dtype = f"{dtype.kind}{dtype.itemsize}" + else: + out_dtype = "object" + return np.dtype(out_dtype) + + def _get_result_dtype(self, dtype: np.dtype) -> np.dtype: + """ + Get the desired dtype of a result based on the + input dtype and how it was computed. + + Parameters + ---------- + dtype : np.dtype + + Returns + ------- + np.dtype + The desired dtype of the result. + """ + how = self.how + + if how in ["sum", "cumsum", "sum", "prod", "cumprod"]: + if dtype == np.dtype(bool): + return np.dtype(np.int64) + elif how in ["mean", "median", "var", "std", "sem"]: + if dtype.kind in "fc": + return dtype + elif dtype.kind in "iub": + return np.dtype(np.float64) + return dtype + + @final + def _cython_op_ndim_compat( + self, + values: np.ndarray, + *, + min_count: int, + ngroups: int, + comp_ids: np.ndarray, + mask: npt.NDArray[np.bool_] | None = None, + result_mask: npt.NDArray[np.bool_] | None = None, + **kwargs, + ) -> np.ndarray: + if values.ndim == 1: + # expand to 2d, dispatch, then squeeze if appropriate + values2d = values[None, :] + if mask is not None: + mask = mask[None, :] + if result_mask is not None: + result_mask = result_mask[None, :] + res = self._call_cython_op( + values2d, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + if res.shape[0] == 1: + return res[0] + + # otherwise we have OHLC + return res.T + + return self._call_cython_op( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + + @final + def _call_cython_op( + self, + values: np.ndarray, # np.ndarray[ndim=2] + *, + min_count: int, + ngroups: int, + comp_ids: np.ndarray, + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None, + **kwargs, + ) -> np.ndarray: # np.ndarray[ndim=2] + orig_values = values + + dtype = values.dtype + is_numeric = dtype.kind in "iufcb" + + is_datetimelike = dtype.kind in "mM" + + if is_datetimelike: + values = values.view("int64") + is_numeric = True + elif dtype.kind == "b": + values = values.view("uint8") + if values.dtype == "float16": + values = values.astype(np.float32) + + if self.how in ["any", "all"]: + if mask is None: + mask = isna(values) + if dtype == object: + if kwargs["skipna"]: + # GH#37501: don't raise on pd.NA when skipna=True + if mask.any(): + # mask on original values computed separately + values = values.copy() + values[mask] = True + values = values.astype(bool, copy=False).view(np.int8) + is_numeric = True + + values = values.T + if mask is not None: + mask = mask.T + if result_mask is not None: + result_mask = result_mask.T + + out_shape = self._get_output_shape(ngroups, values) + func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric) + values = self._get_cython_vals(values) + out_dtype = self._get_out_dtype(values.dtype) + + result = maybe_fill(np.empty(out_shape, dtype=out_dtype)) + if self.kind == "aggregate": + counts = np.zeros(ngroups, dtype=np.int64) + if self.how in ["min", "max", "mean", "last", "first", "sum"]: + func( + out=result, + counts=counts, + values=values, + labels=comp_ids, + min_count=min_count, + mask=mask, + result_mask=result_mask, + is_datetimelike=is_datetimelike, + ) + elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]: + if self.how in ["std", "sem"]: + kwargs["is_datetimelike"] = is_datetimelike + func( + result, + counts, + values, + comp_ids, + min_count=min_count, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + elif self.how in ["any", "all"]: + func( + out=result, + values=values, + labels=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + result = result.astype(bool, copy=False) + elif self.how in ["skew"]: + func( + out=result, + counts=counts, + values=values, + labels=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + if dtype == object: + result = result.astype(object) + + else: + raise NotImplementedError(f"{self.how} is not implemented") + else: + # TODO: min_count + if self.how != "rank": + # TODO: should rank take result_mask? + kwargs["result_mask"] = result_mask + func( + out=result, + values=values, + labels=comp_ids, + ngroups=ngroups, + is_datetimelike=is_datetimelike, + mask=mask, + **kwargs, + ) + + if self.kind == "aggregate": + # i.e. counts is defined. Locations where count None: + if values.ndim > 2: + raise NotImplementedError("number of dimensions is currently limited to 2") + if values.ndim == 2: + assert axis == 1, axis + elif not is_1d_only_ea_dtype(values.dtype): + # Note: it is *not* the case that axis is always 0 for 1-dim values, + # as we can have 1D ExtensionArrays that we need to treat as 2D + assert axis == 0 + + @final + def cython_operation( + self, + *, + values: ArrayLike, + axis: AxisInt, + min_count: int = -1, + comp_ids: np.ndarray, + ngroups: int, + **kwargs, + ) -> ArrayLike: + """ + Call our cython function, with appropriate pre- and post- processing. + """ + self._validate_axis(axis, values) + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + return values._groupby_op( + how=self.how, + has_dropped_na=self.has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=comp_ids, + **kwargs, + ) + + return self._cython_op_ndim_compat( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=None, + **kwargs, + ) + + +class BaseGrouper: + """ + This is an internal Grouper class, which actually holds + the generated groups + + Parameters + ---------- + axis : Index + groupings : Sequence[Grouping] + all the grouping instances to handle in this grouper + for example for grouper list to groupby, need to pass the list + sort : bool, default True + whether this grouper will give sorted result or not + + """ + + axis: Index + + def __init__( + self, + axis: Index, + groupings: Sequence[grouper.Grouping], + sort: bool = True, + dropna: bool = True, + ) -> None: + assert isinstance(axis, Index), axis + + self.axis = axis + self._groupings: list[grouper.Grouping] = list(groupings) + self._sort = sort + self.dropna = dropna + + @property + def groupings(self) -> list[grouper.Grouping]: + return self._groupings + + @property + def shape(self) -> Shape: + return tuple(ping.ngroups for ping in self.groupings) + + def __iter__(self) -> Iterator[Hashable]: + return iter(self.indices) + + @property + def nkeys(self) -> int: + return len(self.groupings) + + def get_iterator( + self, data: NDFrameT, axis: AxisInt = 0 + ) -> Iterator[tuple[Hashable, NDFrameT]]: + """ + Groupby iterator + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + """ + splitter = self._get_splitter(data, axis=axis) + keys = self.group_keys_seq + yield from zip(keys, splitter) + + @final + def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter: + """ + Returns + ------- + Generator yielding subsetted objects + """ + ids, _, ngroups = self.group_info + return _get_splitter( + data, + ids, + ngroups, + sorted_ids=self._sorted_ids, + sort_idx=self._sort_idx, + axis=axis, + ) + + @final + @cache_readonly + def group_keys_seq(self): + if len(self.groupings) == 1: + return self.levels[0] + else: + ids, _, ngroups = self.group_info + + # provide "flattened" iterator for multi-group setting + return get_flattened_list(ids, ngroups, self.levels, self.codes) + + @cache_readonly + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """dict {group name -> group indices}""" + if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex): + # This shows unused categories in indices GH#38642 + return self.groupings[0].indices + codes_list = [ping.codes for ping in self.groupings] + keys = [ping.group_index for ping in self.groupings] + return get_indexer_dict(codes_list, keys) + + @final + def result_ilocs(self) -> npt.NDArray[np.intp]: + """ + Get the original integer locations of result_index in the input. + """ + # Original indices are where group_index would go via sorting. + # But when dropna is true, we need to remove null values while accounting for + # any gaps that then occur because of them. + group_index = get_group_index( + self.codes, self.shape, sort=self._sort, xnull=True + ) + group_index, _ = compress_group_index(group_index, sort=self._sort) + + if self.has_dropped_na: + mask = np.where(group_index >= 0) + # Count how many gaps are caused by previous null values for each position + null_gaps = np.cumsum(group_index == -1)[mask] + group_index = group_index[mask] + + result = get_group_index_sorter(group_index, self.ngroups) + + if self.has_dropped_na: + # Shift by the number of prior null gaps + result += np.take(null_gaps, result) + + return result + + @final + @property + def codes(self) -> list[npt.NDArray[np.signedinteger]]: + return [ping.codes for ping in self.groupings] + + @property + def levels(self) -> list[Index]: + return [ping.group_index for ping in self.groupings] + + @property + def names(self) -> list[Hashable]: + return [ping.name for ping in self.groupings] + + @final + def size(self) -> Series: + """ + Compute group sizes. + """ + ids, _, ngroups = self.group_info + out: np.ndarray | list + if ngroups: + out = np.bincount(ids[ids != -1], minlength=ngroups) + else: + out = [] + return Series(out, index=self.result_index, dtype="int64") + + @cache_readonly + def groups(self) -> dict[Hashable, np.ndarray]: + """dict {group name -> group labels}""" + if len(self.groupings) == 1: + return self.groupings[0].groups + else: + to_groupby = [] + for ping in self.groupings: + gv = ping.grouping_vector + if not isinstance(gv, BaseGrouper): + to_groupby.append(gv) + else: + to_groupby.append(gv.groupings[0].grouping_vector) + index = MultiIndex.from_arrays(to_groupby) + return self.axis.groupby(index) + + @final + @cache_readonly + def is_monotonic(self) -> bool: + # return if my group orderings are monotonic + return Index(self.group_info[0]).is_monotonic_increasing + + @final + @cache_readonly + def has_dropped_na(self) -> bool: + """ + Whether grouper has null value(s) that are dropped. + """ + return bool((self.group_info[0] < 0).any()) + + @cache_readonly + def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + comp_ids, obs_group_ids = self._get_compressed_codes() + + ngroups = len(obs_group_ids) + comp_ids = ensure_platform_int(comp_ids) + + return comp_ids, obs_group_ids, ngroups + + @cache_readonly + def codes_info(self) -> npt.NDArray[np.intp]: + # return the codes of items in original grouped axis + ids, _, _ = self.group_info + return ids + + @final + def _get_compressed_codes( + self, + ) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]: + # The first returned ndarray may have any signed integer dtype + if len(self.groupings) > 1: + group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True) + return compress_group_index(group_index, sort=self._sort) + # FIXME: compress_group_index's second return value is int64, not intp + + ping = self.groupings[0] + return ping.codes, np.arange(len(ping.group_index), dtype=np.intp) + + @final + @cache_readonly + def ngroups(self) -> int: + return len(self.result_index) + + @property + def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]: + codes = self.codes + ids, obs_ids, _ = self.group_info + return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True) + + @cache_readonly + def result_index(self) -> Index: + if len(self.groupings) == 1: + return self.groupings[0].result_index.rename(self.names[0]) + + codes = self.reconstructed_codes + levels = [ping.result_index for ping in self.groupings] + return MultiIndex( + levels=levels, codes=codes, verify_integrity=False, names=self.names + ) + + @final + def get_group_levels(self) -> list[ArrayLike]: + # Note: only called from _insert_inaxis_grouper, which + # is only called for BaseGrouper, never for BinGrouper + if len(self.groupings) == 1: + return [self.groupings[0].group_arraylike] + + name_list = [] + for ping, codes in zip(self.groupings, self.reconstructed_codes): + codes = ensure_platform_int(codes) + levels = ping.group_arraylike.take(codes) + + name_list.append(levels) + + return name_list + + # ------------------------------------------------------------ + # Aggregation functions + + @final + def _cython_operation( + self, + kind: str, + values, + how: str, + axis: AxisInt, + min_count: int = -1, + **kwargs, + ) -> ArrayLike: + """ + Returns the values of a cython operation. + """ + assert kind in ["transform", "aggregate"] + + cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na) + + ids, _, _ = self.group_info + ngroups = self.ngroups + return cy_op.cython_operation( + values=values, + axis=axis, + min_count=min_count, + comp_ids=ids, + ngroups=ngroups, + **kwargs, + ) + + @final + def agg_series( + self, obj: Series, func: Callable, preserve_dtype: bool = False + ) -> ArrayLike: + """ + Parameters + ---------- + obj : Series + func : function taking a Series and returning a scalar-like + preserve_dtype : bool + Whether the aggregation is known to be dtype-preserving. + + Returns + ------- + np.ndarray or ExtensionArray + """ + + if not isinstance(obj._values, np.ndarray): + # we can preserve a little bit more aggressively with EA dtype + # because maybe_cast_pointwise_result will do a try/except + # with _from_sequence. NB we are assuming here that _from_sequence + # is sufficiently strict that it casts appropriately. + preserve_dtype = True + + result = self._aggregate_series_pure_python(obj, func) + + if len(obj) == 0 and len(result) == 0 and isinstance(obj.dtype, ExtensionDtype): + cls = obj.dtype.construct_array_type() + out = cls._from_sequence(result) + + else: + npvalues = lib.maybe_convert_objects(result, try_float=False) + if preserve_dtype: + out = maybe_cast_pointwise_result( + npvalues, obj.dtype, numeric_only=True + ) + else: + out = npvalues + return out + + @final + def _aggregate_series_pure_python( + self, obj: Series, func: Callable + ) -> npt.NDArray[np.object_]: + _, _, ngroups = self.group_info + + result = np.empty(ngroups, dtype="O") + initialized = False + + splitter = self._get_splitter(obj, axis=0) + + for i, group in enumerate(splitter): + res = func(group) + res = extract_result(res) + + if not initialized: + # We only do this validation on the first iteration + check_result_array(res, group.dtype) + initialized = True + + result[i] = res + + return result + + @final + def apply_groupwise( + self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0 + ) -> tuple[list, bool]: + mutated = False + splitter = self._get_splitter(data, axis=axis) + group_keys = self.group_keys_seq + result_values = [] + + # This calls DataSplitter.__iter__ + zipped = zip(group_keys, splitter) + + for key, group in zipped: + # Pinning name is needed for + # test_group_apply_once_per_group, + # test_inconsistent_return_type, test_set_group_name, + # test_group_name_available_in_inference_pass, + # test_groupby_multi_timezone + object.__setattr__(group, "name", key) + + # group might be modified + group_axes = group.axes + res = f(group) + if not mutated and not _is_indexed_like(res, group_axes, axis): + mutated = True + result_values.append(res) + # getattr pattern for __name__ is needed for functools.partial objects + if len(group_keys) == 0 and getattr(f, "__name__", None) in [ + "skew", + "sum", + "prod", + ]: + # If group_keys is empty, then no function calls have been made, + # so we will not have raised even if this is an invalid dtype. + # So do one dummy call here to raise appropriate TypeError. + f(data.iloc[:0]) + + return result_values, mutated + + # ------------------------------------------------------------ + # Methods for sorting subsets of our GroupBy's object + + @final + @cache_readonly + def _sort_idx(self) -> npt.NDArray[np.intp]: + # Counting sort indexer + ids, _, ngroups = self.group_info + return get_group_index_sorter(ids, ngroups) + + @final + @cache_readonly + def _sorted_ids(self) -> npt.NDArray[np.intp]: + ids, _, _ = self.group_info + return ids.take(self._sort_idx) + + +class BinGrouper(BaseGrouper): + """ + This is an internal Grouper class + + Parameters + ---------- + bins : the split index of binlabels to group the item of axis + binlabels : the label list + indexer : np.ndarray[np.intp], optional + the indexer created by Grouper + some groupers (TimeGrouper) will sort its axis and its + group_info is also sorted, so need the indexer to reorder + + Examples + -------- + bins: [2, 4, 6, 8, 10] + binlabels: DatetimeIndex(['2005-01-01', '2005-01-03', + '2005-01-05', '2005-01-07', '2005-01-09'], + dtype='datetime64[ns]', freq='2D') + + the group_info, which contains the label of each item in grouped + axis, the index of label in label list, group number, is + + (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5) + + means that, the grouped axis has 10 items, can be grouped into 5 + labels, the first and second items belong to the first label, the + third and forth items belong to the second label, and so on + + """ + + bins: npt.NDArray[np.int64] + binlabels: Index + + def __init__( + self, + bins, + binlabels, + indexer=None, + ) -> None: + self.bins = ensure_int64(bins) + self.binlabels = ensure_index(binlabels) + self.indexer = indexer + + # These lengths must match, otherwise we could call agg_series + # with empty self.bins, which would raise later. + assert len(self.binlabels) == len(self.bins) + + @cache_readonly + def groups(self): + """dict {group name -> group labels}""" + # this is mainly for compat + # GH 3881 + result = { + key: value + for key, value in zip(self.binlabels, self.bins) + if key is not NaT + } + return result + + def __iter__(self) -> Iterator[Hashable]: + return iter(self.groupings[0].grouping_vector) + + @property + def nkeys(self) -> int: + # still matches len(self.groupings), but we can hard-code + return 1 + + @cache_readonly + def codes_info(self) -> npt.NDArray[np.intp]: + # return the codes of items in original grouped axis + ids, _, _ = self.group_info + if self.indexer is not None: + sorter = np.lexsort((ids, self.indexer)) + ids = ids[sorter] + return ids + + def get_iterator(self, data: NDFrame, axis: AxisInt = 0): + """ + Groupby iterator + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + """ + if axis == 0: + slicer = lambda start, edge: data.iloc[start:edge] + else: + slicer = lambda start, edge: data.iloc[:, start:edge] + + length = len(data.axes[axis]) + + start = 0 + for edge, label in zip(self.bins, self.binlabels): + if label is not NaT: + yield label, slicer(start, edge) + start = edge + + if start < length: + yield self.binlabels[-1], slicer(start, None) + + @cache_readonly + def indices(self): + indices = collections.defaultdict(list) + + i = 0 + for label, bin in zip(self.binlabels, self.bins): + if i < bin: + if label is not NaT: + indices[label] = list(range(i, bin)) + i = bin + return indices + + @cache_readonly + def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + ngroups = self.ngroups + obs_group_ids = np.arange(ngroups, dtype=np.intp) + rep = np.diff(np.r_[0, self.bins]) + + rep = ensure_platform_int(rep) + if ngroups == len(self.bins): + comp_ids = np.repeat(np.arange(ngroups), rep) + else: + comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) + + return ( + ensure_platform_int(comp_ids), + obs_group_ids, + ngroups, + ) + + @cache_readonly + def reconstructed_codes(self) -> list[np.ndarray]: + # get unique result indices, and prepend 0 as groupby starts from the first + return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]] + + @cache_readonly + def result_index(self) -> Index: + if len(self.binlabels) != 0 and isna(self.binlabels[0]): + return self.binlabels[1:] + + return self.binlabels + + @property + def levels(self) -> list[Index]: + return [self.binlabels] + + @property + def names(self) -> list[Hashable]: + return [self.binlabels.name] + + @property + def groupings(self) -> list[grouper.Grouping]: + lev = self.binlabels + codes = self.group_info[0] + labels = lev.take(codes) + ping = grouper.Grouping( + labels, labels, in_axis=False, level=None, uniques=lev._values + ) + return [ping] + + +def _is_indexed_like(obj, axes, axis: AxisInt) -> bool: + if isinstance(obj, Series): + if len(axes) > 1: + return False + return obj.axes[axis].equals(axes[axis]) + elif isinstance(obj, DataFrame): + return obj.axes[axis].equals(axes[axis]) + + return False + + +# ---------------------------------------------------------------------- +# Splitting / application + + +class DataSplitter(Generic[NDFrameT]): + def __init__( + self, + data: NDFrameT, + labels: npt.NDArray[np.intp], + ngroups: int, + *, + sort_idx: npt.NDArray[np.intp], + sorted_ids: npt.NDArray[np.intp], + axis: AxisInt = 0, + ) -> None: + self.data = data + self.labels = ensure_platform_int(labels) # _should_ already be np.intp + self.ngroups = ngroups + + self._slabels = sorted_ids + self._sort_idx = sort_idx + + self.axis = axis + assert isinstance(axis, int), axis + + def __iter__(self) -> Iterator: + sdata = self._sorted_data + + if self.ngroups == 0: + # we are inside a generator, rather than raise StopIteration + # we merely return signal the end + return + + starts, ends = lib.generate_slices(self._slabels, self.ngroups) + + for start, end in zip(starts, ends): + yield self._chop(sdata, slice(start, end)) + + @cache_readonly + def _sorted_data(self) -> NDFrameT: + return self.data.take(self._sort_idx, axis=self.axis) + + def _chop(self, sdata, slice_obj: slice) -> NDFrame: + raise AbstractMethodError(self) + + +class SeriesSplitter(DataSplitter): + def _chop(self, sdata: Series, slice_obj: slice) -> Series: + # fastpath equivalent to `sdata.iloc[slice_obj]` + mgr = sdata._mgr.get_slice(slice_obj) + ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes) + ser._name = sdata.name + return ser.__finalize__(sdata, method="groupby") + + +class FrameSplitter(DataSplitter): + def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame: + # Fastpath equivalent to: + # if self.axis == 0: + # return sdata.iloc[slice_obj] + # else: + # return sdata.iloc[:, slice_obj] + mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis) + df = sdata._constructor_from_mgr(mgr, axes=mgr.axes) + return df.__finalize__(sdata, method="groupby") + + +def _get_splitter( + data: NDFrame, + labels: npt.NDArray[np.intp], + ngroups: int, + *, + sort_idx: npt.NDArray[np.intp], + sorted_ids: npt.NDArray[np.intp], + axis: AxisInt = 0, +) -> DataSplitter: + if isinstance(data, Series): + klass: type[DataSplitter] = SeriesSplitter + else: + # i.e. DataFrame + klass = FrameSplitter + + return klass( + data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/__init__.py new file mode 100644 index 00000000..ba8a4f1d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/__init__.py @@ -0,0 +1,31 @@ +from pandas.core.indexers.utils import ( + check_array_indexer, + check_key_length, + check_setitem_lengths, + disallow_ndim_indexing, + is_empty_indexer, + is_list_like_indexer, + is_scalar_indexer, + is_valid_positional_slice, + length_of_indexer, + maybe_convert_indices, + unpack_1tuple, + unpack_tuple_and_ellipses, + validate_indices, +) + +__all__ = [ + "is_valid_positional_slice", + "is_list_like_indexer", + "is_scalar_indexer", + "is_empty_indexer", + "check_setitem_lengths", + "validate_indices", + "maybe_convert_indices", + "length_of_indexer", + "disallow_ndim_indexing", + "unpack_1tuple", + "check_key_length", + "check_array_indexer", + "unpack_tuple_and_ellipses", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/objects.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/objects.py new file mode 100644 index 00000000..b516227c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/objects.py @@ -0,0 +1,451 @@ +"""Indexer objects for computing start/end window bounds for rolling operations""" +from __future__ import annotations + +from datetime import timedelta + +import numpy as np + +from pandas._libs.tslibs import BaseOffset +from pandas._libs.window.indexers import calculate_variable_window_bounds +from pandas.util._decorators import Appender + +from pandas.core.dtypes.common import ensure_platform_int + +from pandas.core.indexes.datetimes import DatetimeIndex + +from pandas.tseries.offsets import Nano + +get_window_bounds_doc = """ +Computes the bounds of a window. + +Parameters +---------- +num_values : int, default 0 + number of values that will be aggregated over +window_size : int, default 0 + the number of rows in a window +min_periods : int, default None + min_periods passed from the top level rolling API +center : bool, default None + center passed from the top level rolling API +closed : str, default None + closed passed from the top level rolling API +step : int, default None + step passed from the top level rolling API + .. versionadded:: 1.5 +win_type : str, default None + win_type passed from the top level rolling API + +Returns +------- +A tuple of ndarray[int64]s, indicating the boundaries of each +window +""" + + +class BaseIndexer: + """ + Base class for window bounds calculations. + + Examples + -------- + >>> from pandas.api.indexers import BaseIndexer + >>> class CustomIndexer(BaseIndexer): + ... def get_window_bounds(self, num_values, min_periods, center, closed, step): + ... start = np.empty(num_values, dtype=np.int64) + ... end = np.empty(num_values, dtype=np.int64) + ... for i in range(num_values): + ... start[i] = i + ... end[i] = i + self.window_size + ... return start, end + >>> df = pd.DataFrame({"values": range(5)}) + >>> indexer = CustomIndexer(window_size=2) + >>> df.rolling(indexer).sum() + values + 0 1.0 + 1 3.0 + 2 5.0 + 3 7.0 + 4 4.0 + """ + + def __init__( + self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs + ) -> None: + self.index_array = index_array + self.window_size = window_size + # Set user defined kwargs as attributes that can be used in get_window_bounds + for key, value in kwargs.items(): + setattr(self, key, value) + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + raise NotImplementedError + + +class FixedWindowIndexer(BaseIndexer): + """Creates window boundaries that are of fixed length.""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if center or self.window_size == 0: + offset = (self.window_size - 1) // 2 + else: + offset = 0 + + end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64") + start = end - self.window_size + if closed in ["left", "both"]: + start -= 1 + if closed in ["left", "neither"]: + end -= 1 + + end = np.clip(end, 0, num_values) + start = np.clip(start, 0, num_values) + + return start, end + + +class VariableWindowIndexer(BaseIndexer): + """Creates window boundaries that are of variable length, namely for time series.""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + # error: Argument 4 to "calculate_variable_window_bounds" has incompatible + # type "Optional[bool]"; expected "bool" + # error: Argument 6 to "calculate_variable_window_bounds" has incompatible + # type "Optional[ndarray]"; expected "ndarray" + return calculate_variable_window_bounds( + num_values, + self.window_size, + min_periods, + center, # type: ignore[arg-type] + closed, + self.index_array, # type: ignore[arg-type] + ) + + +class VariableOffsetWindowIndexer(BaseIndexer): + """ + Calculate window boundaries based on a non-fixed offset such as a BusinessDay. + + Examples + -------- + >>> from pandas.api.indexers import VariableOffsetWindowIndexer + >>> df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10)) + >>> offset = pd.offsets.BDay(1) + >>> indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset) + >>> df + 0 + 2020-01-01 0 + 2020-01-02 1 + 2020-01-03 2 + 2020-01-04 3 + 2020-01-05 4 + 2020-01-06 5 + 2020-01-07 6 + 2020-01-08 7 + 2020-01-09 8 + 2020-01-10 9 + >>> df.rolling(indexer).sum() + 0 + 2020-01-01 0.0 + 2020-01-02 1.0 + 2020-01-03 2.0 + 2020-01-04 3.0 + 2020-01-05 7.0 + 2020-01-06 12.0 + 2020-01-07 6.0 + 2020-01-08 7.0 + 2020-01-09 8.0 + 2020-01-10 9.0 + """ + + def __init__( + self, + index_array: np.ndarray | None = None, + window_size: int = 0, + index: DatetimeIndex | None = None, + offset: BaseOffset | None = None, + **kwargs, + ) -> None: + super().__init__(index_array, window_size, **kwargs) + if not isinstance(index, DatetimeIndex): + raise ValueError("index must be a DatetimeIndex.") + self.index = index + if not isinstance(offset, BaseOffset): + raise ValueError("offset must be a DateOffset-like object.") + self.offset = offset + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if step is not None: + raise NotImplementedError("step not implemented for variable offset window") + if num_values <= 0: + return np.empty(0, dtype="int64"), np.empty(0, dtype="int64") + + # if windows is variable, default is 'right', otherwise default is 'both' + if closed is None: + closed = "right" if self.index is not None else "both" + + right_closed = closed in ["right", "both"] + left_closed = closed in ["left", "both"] + + if self.index[num_values - 1] < self.index[0]: + index_growth_sign = -1 + else: + index_growth_sign = 1 + offset_diff = index_growth_sign * self.offset + + start = np.empty(num_values, dtype="int64") + start.fill(-1) + end = np.empty(num_values, dtype="int64") + end.fill(-1) + + start[0] = 0 + + # right endpoint is closed + if right_closed: + end[0] = 1 + # right endpoint is open + else: + end[0] = 0 + + zero = timedelta(0) + # start is start of slice interval (including) + # end is end of slice interval (not including) + for i in range(1, num_values): + end_bound = self.index[i] + start_bound = end_bound - offset_diff + + # left endpoint is closed + if left_closed: + start_bound -= Nano(1) + + # advance the start bound until we are + # within the constraint + start[i] = i + for j in range(start[i - 1], i): + start_diff = (self.index[j] - start_bound) * index_growth_sign + if start_diff > zero: + start[i] = j + break + + # end bound is previous end + # or current index + end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign + if end_diff <= zero: + end[i] = i + 1 + else: + end[i] = end[i - 1] + + # right endpoint is open + if not right_closed: + end[i] -= 1 + + return start, end + + +class ExpandingIndexer(BaseIndexer): + """Calculate expanding window bounds, mimicking df.expanding()""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + return ( + np.zeros(num_values, dtype=np.int64), + np.arange(1, num_values + 1, dtype=np.int64), + ) + + +class FixedForwardWindowIndexer(BaseIndexer): + """ + Creates window boundaries for fixed-length windows that include the current row. + + Examples + -------- + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2) + >>> df.rolling(window=indexer, min_periods=1).sum() + B + 0 1.0 + 1 3.0 + 2 2.0 + 3 4.0 + 4 4.0 + """ + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + if center: + raise ValueError("Forward-looking windows can't have center=True") + if closed is not None: + raise ValueError( + "Forward-looking windows don't support setting the closed argument" + ) + if step is None: + step = 1 + + start = np.arange(0, num_values, step, dtype="int64") + end = start + self.window_size + if self.window_size: + end = np.clip(end, 0, num_values) + + return start, end + + +class GroupbyIndexer(BaseIndexer): + """Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()""" + + def __init__( + self, + index_array: np.ndarray | None = None, + window_size: int | BaseIndexer = 0, + groupby_indices: dict | None = None, + window_indexer: type[BaseIndexer] = BaseIndexer, + indexer_kwargs: dict | None = None, + **kwargs, + ) -> None: + """ + Parameters + ---------- + index_array : np.ndarray or None + np.ndarray of the index of the original object that we are performing + a chained groupby operation over. This index has been pre-sorted relative to + the groups + window_size : int or BaseIndexer + window size during the windowing operation + groupby_indices : dict or None + dict of {group label: [positional index of rows belonging to the group]} + window_indexer : BaseIndexer + BaseIndexer class determining the start and end bounds of each group + indexer_kwargs : dict or None + Custom kwargs to be passed to window_indexer + **kwargs : + keyword arguments that will be available when get_window_bounds is called + """ + self.groupby_indices = groupby_indices or {} + self.window_indexer = window_indexer + self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {} + super().__init__( + index_array=index_array, + window_size=self.indexer_kwargs.pop("window_size", window_size), + **kwargs, + ) + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + # 1) For each group, get the indices that belong to the group + # 2) Use the indices to calculate the start & end bounds of the window + # 3) Append the window bounds in group order + start_arrays = [] + end_arrays = [] + window_indices_start = 0 + for key, indices in self.groupby_indices.items(): + index_array: np.ndarray | None + + if self.index_array is not None: + index_array = self.index_array.take(ensure_platform_int(indices)) + else: + index_array = self.index_array + indexer = self.window_indexer( + index_array=index_array, + window_size=self.window_size, + **self.indexer_kwargs, + ) + start, end = indexer.get_window_bounds( + len(indices), min_periods, center, closed, step + ) + start = start.astype(np.int64) + end = end.astype(np.int64) + assert len(start) == len( + end + ), "these should be equal in length from get_window_bounds" + # Cannot use groupby_indices as they might not be monotonic with the object + # we're rolling over + window_indices = np.arange( + window_indices_start, window_indices_start + len(indices) + ) + window_indices_start += len(indices) + # Extend as we'll be slicing window like [start, end) + window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( + np.int64, copy=False + ) + start_arrays.append(window_indices.take(ensure_platform_int(start))) + end_arrays.append(window_indices.take(ensure_platform_int(end))) + if len(start_arrays) == 0: + return np.array([], dtype=np.int64), np.array([], dtype=np.int64) + start = np.concatenate(start_arrays) + end = np.concatenate(end_arrays) + return start, end + + +class ExponentialMovingWindowIndexer(BaseIndexer): + """Calculate ewm window bounds (the entire window)""" + + @Appender(get_window_bounds_doc) + def get_window_bounds( + self, + num_values: int = 0, + min_periods: int | None = None, + center: bool | None = None, + closed: str | None = None, + step: int | None = None, + ) -> tuple[np.ndarray, np.ndarray]: + return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/utils.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/utils.py new file mode 100644 index 00000000..55bb58f3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexers/utils.py @@ -0,0 +1,553 @@ +""" +Low-dependency indexing utilities. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import ( + is_array_like, + is_bool_dtype, + is_integer, + is_integer_dtype, + is_list_like, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import AnyArrayLike + + from pandas.core.frame import DataFrame + from pandas.core.indexes.base import Index + +# ----------------------------------------------------------- +# Indexer Identification + + +def is_valid_positional_slice(slc: slice) -> bool: + """ + Check if a slice object can be interpreted as a positional indexer. + + Parameters + ---------- + slc : slice + + Returns + ------- + bool + + Notes + ----- + A valid positional slice may also be interpreted as a label-based slice + depending on the index being sliced. + """ + return ( + lib.is_int_or_none(slc.start) + and lib.is_int_or_none(slc.stop) + and lib.is_int_or_none(slc.step) + ) + + +def is_list_like_indexer(key) -> bool: + """ + Check if we have a list-like indexer that is *not* a NamedTuple. + + Parameters + ---------- + key : object + + Returns + ------- + bool + """ + # allow a list_like, but exclude NamedTuples which can be indexers + return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple) + + +def is_scalar_indexer(indexer, ndim: int) -> bool: + """ + Return True if we are all scalar indexers. + + Parameters + ---------- + indexer : object + ndim : int + Number of dimensions in the object being indexed. + + Returns + ------- + bool + """ + if ndim == 1 and is_integer(indexer): + # GH37748: allow indexer to be an integer for Series + return True + if isinstance(indexer, tuple) and len(indexer) == ndim: + return all(is_integer(x) for x in indexer) + return False + + +def is_empty_indexer(indexer) -> bool: + """ + Check if we have an empty indexer. + + Parameters + ---------- + indexer : object + + Returns + ------- + bool + """ + if is_list_like(indexer) and not len(indexer): + return True + if not isinstance(indexer, tuple): + indexer = (indexer,) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) + + +# ----------------------------------------------------------- +# Indexer Validation + + +def check_setitem_lengths(indexer, value, values) -> bool: + """ + Validate that value and indexer are the same length. + + An special-case is allowed for when the indexer is a boolean array + and the number of true values equals the length of ``value``. In + this case, no exception is raised. + + Parameters + ---------- + indexer : sequence + Key for the setitem. + value : array-like + Value for the setitem. + values : array-like + Values being set into. + + Returns + ------- + bool + Whether this is an empty listlike setting which is a no-op. + + Raises + ------ + ValueError + When the indexer is an ndarray or list and the lengths don't match. + """ + no_op = False + + if isinstance(indexer, (np.ndarray, list)): + # We can ignore other listlikes because they are either + # a) not necessarily 1-D indexers, e.g. tuple + # b) boolean indexers e.g. BoolArray + if is_list_like(value): + if len(indexer) != len(value) and values.ndim == 1: + # boolean with truth values == len of the value is ok too + if isinstance(indexer, list): + indexer = np.array(indexer) + if not ( + isinstance(indexer, np.ndarray) + and indexer.dtype == np.bool_ + and indexer.sum() == len(value) + ): + raise ValueError( + "cannot set using a list-like indexer " + "with a different length than the value" + ) + if not len(indexer): + no_op = True + + elif isinstance(indexer, slice): + if is_list_like(value): + if len(value) != length_of_indexer(indexer, values) and values.ndim == 1: + # In case of two dimensional value is used row-wise and broadcasted + raise ValueError( + "cannot set using a slice indexer with a " + "different length than the value" + ) + if not len(value): + no_op = True + + return no_op + + +def validate_indices(indices: np.ndarray, n: int) -> None: + """ + Perform bounds-checking for an indexer. + + -1 is allowed for indicating missing values. + + Parameters + ---------- + indices : ndarray + n : int + Length of the array being indexed. + + Raises + ------ + ValueError + + Examples + -------- + >>> validate_indices(np.array([1, 2]), 3) # OK + + >>> validate_indices(np.array([1, -2]), 3) + Traceback (most recent call last): + ... + ValueError: negative dimensions are not allowed + + >>> validate_indices(np.array([1, 2, 3]), 3) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds + + >>> validate_indices(np.array([-1, -1]), 0) # OK + + >>> validate_indices(np.array([0, 1]), 0) + Traceback (most recent call last): + ... + IndexError: indices are out-of-bounds + """ + if len(indices): + min_idx = indices.min() + if min_idx < -1: + msg = f"'indices' contains values less than allowed ({min_idx} < -1)" + raise ValueError(msg) + + max_idx = indices.max() + if max_idx >= n: + raise IndexError("indices are out-of-bounds") + + +# ----------------------------------------------------------- +# Indexer Conversion + + +def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray: + """ + Attempt to convert indices into valid, positive indices. + + If we have negative indices, translate to positive here. + If we have indices that are out-of-bounds, raise an IndexError. + + Parameters + ---------- + indices : array-like + Array of indices that we are to convert. + n : int + Number of elements in the array that we are indexing. + verify : bool, default True + Check that all entries are between 0 and n - 1, inclusive. + + Returns + ------- + array-like + An array-like of positive indices that correspond to the ones + that were passed in initially to this function. + + Raises + ------ + IndexError + One of the converted indices either exceeded the number of, + elements (specified by `n`), or was still negative. + """ + if isinstance(indices, list): + indices = np.array(indices) + if len(indices) == 0: + # If `indices` is empty, np.array will return a float, + # and will cause indexing errors. + return np.empty(0, dtype=np.intp) + + mask = indices < 0 + if mask.any(): + indices = indices.copy() + indices[mask] += n + + if verify: + mask = (indices >= n) | (indices < 0) + if mask.any(): + raise IndexError("indices are out-of-bounds") + return indices + + +# ----------------------------------------------------------- +# Unsorted + + +def length_of_indexer(indexer, target=None) -> int: + """ + Return the expected length of target[indexer] + + Returns + ------- + int + """ + if target is not None and isinstance(indexer, slice): + target_len = len(target) + start = indexer.start + stop = indexer.stop + step = indexer.step + if start is None: + start = 0 + elif start < 0: + start += target_len + if stop is None or stop > target_len: + stop = target_len + elif stop < 0: + stop += target_len + if step is None: + step = 1 + elif step < 0: + start, stop = stop + 1, start + 1 + step = -step + return (stop - start + step - 1) // step + elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)): + if isinstance(indexer, list): + indexer = np.array(indexer) + + if indexer.dtype == bool: + # GH#25774 + return indexer.sum() + return len(indexer) + elif isinstance(indexer, range): + return (indexer.stop - indexer.start) // indexer.step + elif not is_list_like_indexer(indexer): + return 1 + raise AssertionError("cannot find the length of the indexer") + + +def disallow_ndim_indexing(result) -> None: + """ + Helper function to disallow multi-dimensional indexing on 1D Series/Index. + + GH#27125 indexer like idx[:, None] expands dim, but we cannot do that + and keep an index, so we used to return ndarray, which was deprecated + in GH#30588. + """ + if np.ndim(result) > 1: + raise ValueError( + "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer " + "supported. Convert to a numpy array before indexing instead." + ) + + +def unpack_1tuple(tup): + """ + If we have a length-1 tuple/list that contains a slice, unpack to just + the slice. + + Notes + ----- + The list case is deprecated. + """ + if len(tup) == 1 and isinstance(tup[0], slice): + # if we don't have a MultiIndex, we may still be able to handle + # a 1-tuple. see test_1tuple_without_multiindex + + if isinstance(tup, list): + # GH#31299 + raise ValueError( + "Indexing with a single-item list containing a " + "slice is not allowed. Pass a tuple instead.", + ) + + return tup[0] + return tup + + +def check_key_length(columns: Index, key, value: DataFrame) -> None: + """ + Checks if a key used as indexer has the same length as the columns it is + associated with. + + Parameters + ---------- + columns : Index The columns of the DataFrame to index. + key : A list-like of keys to index with. + value : DataFrame The value to set for the keys. + + Raises + ------ + ValueError: If the length of key is not equal to the number of columns in value + or if the number of columns referenced by key is not equal to number + of columns. + """ + if columns.is_unique: + if len(value.columns) != len(key): + raise ValueError("Columns must be same length as key") + else: + # Missing keys in columns are represented as -1 + if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns): + raise ValueError("Columns must be same length as key") + + +def unpack_tuple_and_ellipses(item: tuple): + """ + Possibly unpack arr[..., n] to arr[n] + """ + if len(item) > 1: + # Note: we are assuming this indexing is being done on a 1D arraylike + if item[0] is Ellipsis: + item = item[1:] + elif item[-1] is Ellipsis: + item = item[:-1] + + if len(item) > 1: + raise IndexError("too many indices for array.") + + item = item[0] + return item + + +# ----------------------------------------------------------- +# Public indexer validation + + +def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: + """ + Check if `indexer` is a valid array indexer for `array`. + + For a boolean mask, `array` and `indexer` are checked to have the same + length. The dtype is validated, and if it is an integer or boolean + ExtensionArray, it is checked if there are missing values present, and + it is converted to the appropriate numpy array. Other dtypes will raise + an error. + + Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed + through as is. + + Parameters + ---------- + array : array-like + The array that is being indexed (only used for the length). + indexer : array-like or list-like + The array-like that's used to index. List-like input that is not yet + a numpy array or an ExtensionArray is converted to one. Other input + types are passed through as is. + + Returns + ------- + numpy.ndarray + The validated indexer as a numpy array that can be used to index. + + Raises + ------ + IndexError + When the lengths don't match. + ValueError + When `indexer` cannot be converted to a numpy ndarray to index + (e.g. presence of missing values). + + See Also + -------- + api.types.is_bool_dtype : Check if `key` is of boolean dtype. + + Examples + -------- + When checking a boolean mask, a boolean ndarray is returned when the + arguments are all valid. + + >>> mask = pd.array([True, False]) + >>> arr = pd.array([1, 2]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + An IndexError is raised when the lengths don't match. + + >>> mask = pd.array([True, False, True]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + Traceback (most recent call last): + ... + IndexError: Boolean index has wrong length: 3 instead of 2. + + NA values in a boolean array are treated as False. + + >>> mask = pd.array([True, pd.NA]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + A numpy boolean mask will get passed through (if the length is correct): + + >>> mask = np.array([True, False]) + >>> pd.api.indexers.check_array_indexer(arr, mask) + array([ True, False]) + + Similarly for integer indexers, an integer ndarray is returned when it is + a valid indexer, otherwise an error is (for integer indexers, a matching + length is not required): + + >>> indexer = pd.array([0, 2], dtype="Int64") + >>> arr = pd.array([1, 2, 3]) + >>> pd.api.indexers.check_array_indexer(arr, indexer) + array([0, 2]) + + >>> indexer = pd.array([0, pd.NA], dtype="Int64") + >>> pd.api.indexers.check_array_indexer(arr, indexer) + Traceback (most recent call last): + ... + ValueError: Cannot index with an integer indexer containing NA values + + For non-integer/boolean dtypes, an appropriate error is raised: + + >>> indexer = np.array([0., 2.], dtype="float64") + >>> pd.api.indexers.check_array_indexer(arr, indexer) + Traceback (most recent call last): + ... + IndexError: arrays used as indices must be of integer or boolean type + """ + from pandas.core.construction import array as pd_array + + # whatever is not an array-like is returned as-is (possible valid array + # indexers that are not array-like: integer, slice, Ellipsis, None) + # In this context, tuples are not considered as array-like, as they have + # a specific meaning in indexing (multi-dimensional indexing) + if is_list_like(indexer): + if isinstance(indexer, tuple): + return indexer + else: + return indexer + + # convert list-likes to array + if not is_array_like(indexer): + indexer = pd_array(indexer) + if len(indexer) == 0: + # empty list is converted to float array by pd.array + indexer = np.array([], dtype=np.intp) + + dtype = indexer.dtype + if is_bool_dtype(dtype): + if isinstance(dtype, ExtensionDtype): + indexer = indexer.to_numpy(dtype=bool, na_value=False) + else: + indexer = np.asarray(indexer, dtype=bool) + + # GH26658 + if len(indexer) != len(array): + raise IndexError( + f"Boolean index has wrong length: " + f"{len(indexer)} instead of {len(array)}" + ) + elif is_integer_dtype(dtype): + try: + indexer = np.asarray(indexer, dtype=np.intp) + except ValueError as err: + raise ValueError( + "Cannot index with an integer indexer containing NA values" + ) from err + else: + raise IndexError("arrays used as indices must be of integer or boolean type") + + return indexer diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/accessors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/accessors.py new file mode 100644 index 00000000..d9729835 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/accessors.py @@ -0,0 +1,608 @@ +""" +datetimelike delegation +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_list_like, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, + DatetimeTZDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ABCSeries + +from pandas.core.accessor import ( + PandasDelegate, + delegate_names, +) +from pandas.core.arrays import ( + DatetimeArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.arrow.array import ArrowExtensionArray +from pandas.core.base import ( + NoNewAttributesMixin, + PandasObject, +) +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex + +if TYPE_CHECKING: + from pandas import ( + DataFrame, + Series, + ) + + +class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin): + _hidden_attrs = PandasObject._hidden_attrs | { + "orig", + "name", + } + + def __init__(self, data: Series, orig) -> None: + if not isinstance(data, ABCSeries): + raise TypeError( + f"cannot convert an object of type {type(data)} to a datetimelike index" + ) + + self._parent = data + self.orig = orig + self.name = getattr(data, "name", None) + self._freeze() + + def _get_values(self): + data = self._parent + if lib.is_np_dtype(data.dtype, "M"): + return DatetimeIndex(data, copy=False, name=self.name) + + elif isinstance(data.dtype, DatetimeTZDtype): + return DatetimeIndex(data, copy=False, name=self.name) + + elif lib.is_np_dtype(data.dtype, "m"): + return TimedeltaIndex(data, copy=False, name=self.name) + + elif isinstance(data.dtype, PeriodDtype): + return PeriodArray(data, copy=False) + + raise TypeError( + f"cannot convert an object of type {type(data)} to a datetimelike index" + ) + + # error: Signature of "_delegate_property_get" incompatible with supertype + # "PandasDelegate" + def _delegate_property_get(self, name: str): # type: ignore[override] + from pandas import Series + + values = self._get_values() + + result = getattr(values, name) + + # maybe need to upcast (ints) + if isinstance(result, np.ndarray): + if is_integer_dtype(result): + result = result.astype("int64") + elif not is_list_like(result): + return result + + result = np.asarray(result) + + if self.orig is not None: + index = self.orig.index + else: + index = self._parent.index + # return the result as a Series + result = Series(result, index=index, name=self.name).__finalize__(self._parent) + + # setting this object will show a SettingWithCopyWarning/Error + result._is_copy = ( + "modifications to a property of a datetimelike " + "object are not supported and are discarded. " + "Change values on the original." + ) + + return result + + def _delegate_property_set(self, name: str, value, *args, **kwargs): + raise ValueError( + "modifications to a property of a datetimelike object are not supported. " + "Change values on the original." + ) + + def _delegate_method(self, name: str, *args, **kwargs): + from pandas import Series + + values = self._get_values() + + method = getattr(values, name) + result = method(*args, **kwargs) + + if not is_list_like(result): + return result + + result = Series(result, index=self._parent.index, name=self.name).__finalize__( + self._parent + ) + + # setting this object will show a SettingWithCopyWarning/Error + result._is_copy = ( + "modifications to a method of a datetimelike " + "object are not supported and are discarded. " + "Change values on the original." + ) + + return result + + +@delegate_names( + delegate=ArrowExtensionArray, + accessors=DatetimeArray._datetimelike_ops, + typ="property", + accessor_mapping=lambda x: f"_dt_{x}", + raise_on_missing=False, +) +@delegate_names( + delegate=ArrowExtensionArray, + accessors=DatetimeArray._datetimelike_methods, + typ="method", + accessor_mapping=lambda x: f"_dt_{x}", + raise_on_missing=False, +) +class ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin): + def __init__(self, data: Series, orig) -> None: + if not isinstance(data, ABCSeries): + raise TypeError( + f"cannot convert an object of type {type(data)} to a datetimelike index" + ) + + self._parent = data + self._orig = orig + self._freeze() + + def _delegate_property_get(self, name: str): # type: ignore[override] + if not hasattr(self._parent.array, f"_dt_{name}"): + raise NotImplementedError( + f"dt.{name} is not supported for {self._parent.dtype}" + ) + result = getattr(self._parent.array, f"_dt_{name}") + + if not is_list_like(result): + return result + + if self._orig is not None: + index = self._orig.index + else: + index = self._parent.index + # return the result as a Series, which is by definition a copy + result = type(self._parent)( + result, index=index, name=self._parent.name + ).__finalize__(self._parent) + + return result + + def _delegate_method(self, name: str, *args, **kwargs): + if not hasattr(self._parent.array, f"_dt_{name}"): + raise NotImplementedError( + f"dt.{name} is not supported for {self._parent.dtype}" + ) + + result = getattr(self._parent.array, f"_dt_{name}")(*args, **kwargs) + + if self._orig is not None: + index = self._orig.index + else: + index = self._parent.index + # return the result as a Series, which is by definition a copy + result = type(self._parent)( + result, index=index, name=self._parent.name + ).__finalize__(self._parent) + + return result + + def to_pydatetime(self): + # GH#20306 + warnings.warn( + f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, " + "in a future version this will return a Series containing python " + "datetime objects instead of an ndarray. To retain the old behavior, " + "call `np.array` on the result", + FutureWarning, + stacklevel=find_stack_level(), + ) + return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime() + + def isocalendar(self): + from pandas import DataFrame + + result = ( + cast(ArrowExtensionArray, self._parent.array) + ._dt_isocalendar() + ._pa_array.combine_chunks() + ) + iso_calendar_df = DataFrame( + { + col: type(self._parent.array)(result.field(i)) # type: ignore[call-arg] + for i, col in enumerate(["year", "week", "day"]) + } + ) + return iso_calendar_df + + +@delegate_names( + delegate=DatetimeArray, + accessors=DatetimeArray._datetimelike_ops + ["unit"], + typ="property", +) +@delegate_names( + delegate=DatetimeArray, + accessors=DatetimeArray._datetimelike_methods + ["as_unit"], + typ="method", +) +class DatetimeProperties(Properties): + """ + Accessor object for datetimelike properties of the Series values. + + Examples + -------- + >>> seconds_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="s")) + >>> seconds_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:00:01 + 2 2000-01-01 00:00:02 + dtype: datetime64[ns] + >>> seconds_series.dt.second + 0 0 + 1 1 + 2 2 + dtype: int32 + + >>> hours_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="h")) + >>> hours_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 01:00:00 + 2 2000-01-01 02:00:00 + dtype: datetime64[ns] + >>> hours_series.dt.hour + 0 0 + 1 1 + 2 2 + dtype: int32 + + >>> quarters_series = pd.Series(pd.date_range("2000-01-01", periods=3, freq="q")) + >>> quarters_series + 0 2000-03-31 + 1 2000-06-30 + 2 2000-09-30 + dtype: datetime64[ns] + >>> quarters_series.dt.quarter + 0 1 + 1 2 + 2 3 + dtype: int32 + + Returns a Series indexed like the original Series. + Raises TypeError if the Series does not contain datetimelike values. + """ + + def to_pydatetime(self) -> np.ndarray: + """ + Return the data as an array of :class:`datetime.datetime` objects. + + .. deprecated:: 2.1.0 + + The current behavior of dt.to_pydatetime is deprecated. + In a future version this will return a Series containing python + datetime objects instead of a ndarray. + + Timezone information is retained if present. + + .. warning:: + + Python's datetime uses microsecond resolution, which is lower than + pandas (nanosecond). The values are truncated. + + Returns + ------- + numpy.ndarray + Object dtype array containing native Python datetime objects. + + See Also + -------- + datetime.datetime : Standard library value for a datetime. + + Examples + -------- + >>> s = pd.Series(pd.date_range('20180310', periods=2)) + >>> s + 0 2018-03-10 + 1 2018-03-11 + dtype: datetime64[ns] + + >>> s.dt.to_pydatetime() + array([datetime.datetime(2018, 3, 10, 0, 0), + datetime.datetime(2018, 3, 11, 0, 0)], dtype=object) + + pandas' nanosecond precision is truncated to microseconds. + + >>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns')) + >>> s + 0 2018-03-10 00:00:00.000000000 + 1 2018-03-10 00:00:00.000000001 + dtype: datetime64[ns] + + >>> s.dt.to_pydatetime() + array([datetime.datetime(2018, 3, 10, 0, 0), + datetime.datetime(2018, 3, 10, 0, 0)], dtype=object) + """ + # GH#20306 + warnings.warn( + f"The behavior of {type(self).__name__}.to_pydatetime is deprecated, " + "in a future version this will return a Series containing python " + "datetime objects instead of an ndarray. To retain the old behavior, " + "call `np.array` on the result", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._get_values().to_pydatetime() + + @property + def freq(self): + return self._get_values().inferred_freq + + def isocalendar(self) -> DataFrame: + """ + Calculate year, week, and day according to the ISO 8601 standard. + + Returns + ------- + DataFrame + With columns year, week and day. + + See Also + -------- + Timestamp.isocalendar : Function return a 3-tuple containing ISO year, + week number, and weekday for the given Timestamp object. + datetime.date.isocalendar : Return a named tuple object with + three components: year, week and weekday. + + Examples + -------- + >>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT])) + >>> ser.dt.isocalendar() + year week day + 0 2009 53 5 + 1 + >>> ser.dt.isocalendar().week + 0 53 + 1 + Name: week, dtype: UInt32 + """ + return self._get_values().isocalendar().set_index(self._parent.index) + + +@delegate_names( + delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property" +) +@delegate_names( + delegate=TimedeltaArray, + accessors=TimedeltaArray._datetimelike_methods, + typ="method", +) +class TimedeltaProperties(Properties): + """ + Accessor object for datetimelike properties of the Series values. + + Returns a Series indexed like the original Series. + Raises TypeError if the Series does not contain datetimelike values. + + Examples + -------- + >>> seconds_series = pd.Series( + ... pd.timedelta_range(start="1 second", periods=3, freq="S") + ... ) + >>> seconds_series + 0 0 days 00:00:01 + 1 0 days 00:00:02 + 2 0 days 00:00:03 + dtype: timedelta64[ns] + >>> seconds_series.dt.seconds + 0 1 + 1 2 + 2 3 + dtype: int32 + """ + + def to_pytimedelta(self) -> np.ndarray: + """ + Return an array of native :class:`datetime.timedelta` objects. + + Python's standard `datetime` library uses a different representation + timedelta's. This method converts a Series of pandas Timedeltas + to `datetime.timedelta` format with the same length as the original + Series. + + Returns + ------- + numpy.ndarray + Array of 1D containing data with `datetime.timedelta` type. + + See Also + -------- + datetime.timedelta : A duration expressing the difference + between two date, time, or datetime. + + Examples + -------- + >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="d")) + >>> s + 0 0 days + 1 1 days + 2 2 days + 3 3 days + 4 4 days + dtype: timedelta64[ns] + + >>> s.dt.to_pytimedelta() + array([datetime.timedelta(0), datetime.timedelta(days=1), + datetime.timedelta(days=2), datetime.timedelta(days=3), + datetime.timedelta(days=4)], dtype=object) + """ + return self._get_values().to_pytimedelta() + + @property + def components(self): + """ + Return a Dataframe of the components of the Timedeltas. + + Returns + ------- + DataFrame + + Examples + -------- + >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s')) + >>> s + 0 0 days 00:00:00 + 1 0 days 00:00:01 + 2 0 days 00:00:02 + 3 0 days 00:00:03 + 4 0 days 00:00:04 + dtype: timedelta64[ns] + >>> s.dt.components + days hours minutes seconds milliseconds microseconds nanoseconds + 0 0 0 0 0 0 0 0 + 1 0 0 0 1 0 0 0 + 2 0 0 0 2 0 0 0 + 3 0 0 0 3 0 0 0 + 4 0 0 0 4 0 0 0 + """ + return ( + self._get_values() + .components.set_index(self._parent.index) + .__finalize__(self._parent) + ) + + @property + def freq(self): + return self._get_values().inferred_freq + + +@delegate_names( + delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ="property" +) +@delegate_names( + delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ="method" +) +class PeriodProperties(Properties): + """ + Accessor object for datetimelike properties of the Series values. + + Returns a Series indexed like the original Series. + Raises TypeError if the Series does not contain datetimelike values. + + Examples + -------- + >>> seconds_series = pd.Series( + ... pd.period_range( + ... start="2000-01-01 00:00:00", end="2000-01-01 00:00:03", freq="s" + ... ) + ... ) + >>> seconds_series + 0 2000-01-01 00:00:00 + 1 2000-01-01 00:00:01 + 2 2000-01-01 00:00:02 + 3 2000-01-01 00:00:03 + dtype: period[S] + >>> seconds_series.dt.second + 0 0 + 1 1 + 2 2 + 3 3 + dtype: int64 + + >>> hours_series = pd.Series( + ... pd.period_range(start="2000-01-01 00:00", end="2000-01-01 03:00", freq="h") + ... ) + >>> hours_series + 0 2000-01-01 00:00 + 1 2000-01-01 01:00 + 2 2000-01-01 02:00 + 3 2000-01-01 03:00 + dtype: period[H] + >>> hours_series.dt.hour + 0 0 + 1 1 + 2 2 + 3 3 + dtype: int64 + + >>> quarters_series = pd.Series( + ... pd.period_range(start="2000-01-01", end="2000-12-31", freq="Q-DEC") + ... ) + >>> quarters_series + 0 2000Q1 + 1 2000Q2 + 2 2000Q3 + 3 2000Q4 + dtype: period[Q-DEC] + >>> quarters_series.dt.quarter + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + """ + + +class CombinedDatetimelikeProperties( + DatetimeProperties, TimedeltaProperties, PeriodProperties +): + def __new__(cls, data: Series): # pyright: ignore[reportInconsistentConstructor] + # CombinedDatetimelikeProperties isn't really instantiated. Instead + # we need to choose which parent (datetime or timedelta) is + # appropriate. Since we're checking the dtypes anyway, we'll just + # do all the validation here. + + if not isinstance(data, ABCSeries): + raise TypeError( + f"cannot convert an object of type {type(data)} to a datetimelike index" + ) + + orig = data if isinstance(data.dtype, CategoricalDtype) else None + if orig is not None: + data = data._constructor( + orig.array, + name=orig.name, + copy=False, + dtype=orig._values.categories.dtype, + index=orig.index, + ) + + if isinstance(data.dtype, ArrowDtype) and data.dtype.kind == "M": + return ArrowTemporalProperties(data, orig) + if lib.is_np_dtype(data.dtype, "M"): + return DatetimeProperties(data, orig) + elif isinstance(data.dtype, DatetimeTZDtype): + return DatetimeProperties(data, orig) + elif lib.is_np_dtype(data.dtype, "m"): + return TimedeltaProperties(data, orig) + elif isinstance(data.dtype, PeriodDtype): + return PeriodProperties(data, orig) + + raise AttributeError("Can only use .dt accessor with datetimelike values") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/api.py new file mode 100644 index 00000000..781dfae7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/api.py @@ -0,0 +1,381 @@ +from __future__ import annotations + +import textwrap +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._libs import ( + NaT, + lib, +) +from pandas.errors import InvalidIndexError + +from pandas.core.dtypes.cast import find_common_type + +from pandas.core.algorithms import safe_sort +from pandas.core.indexes.base import ( + Index, + _new_Index, + ensure_index, + ensure_index_from_sequences, + get_unanimous_names, +) +from pandas.core.indexes.category import CategoricalIndex +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.interval import IntervalIndex +from pandas.core.indexes.multi import MultiIndex +from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.range import RangeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex + +if TYPE_CHECKING: + from pandas._typing import Axis +_sort_msg = textwrap.dedent( + """\ +Sorting because non-concatenation axis is not aligned. A future version +of pandas will change to not sort by default. + +To accept the future behavior, pass 'sort=False'. + +To retain the current behavior and silence the warning, pass 'sort=True'. +""" +) + + +__all__ = [ + "Index", + "MultiIndex", + "CategoricalIndex", + "IntervalIndex", + "RangeIndex", + "InvalidIndexError", + "TimedeltaIndex", + "PeriodIndex", + "DatetimeIndex", + "_new_Index", + "NaT", + "ensure_index", + "ensure_index_from_sequences", + "get_objs_combined_axis", + "union_indexes", + "get_unanimous_names", + "all_indexes_same", + "default_index", + "safe_sort_index", +] + + +def get_objs_combined_axis( + objs, + intersect: bool = False, + axis: Axis = 0, + sort: bool = True, + copy: bool = False, +) -> Index: + """ + Extract combined index: return intersection or union (depending on the + value of "intersect") of indexes on given axis, or None if all objects + lack indexes (e.g. they are numpy arrays). + + Parameters + ---------- + objs : list + Series or DataFrame objects, may be mix of the two. + intersect : bool, default False + If True, calculate the intersection between indexes. Otherwise, + calculate the union. + axis : {0 or 'index', 1 or 'outer'}, default 0 + The axis to extract indexes from. + sort : bool, default True + Whether the result index should come out sorted or not. + copy : bool, default False + If True, return a copy of the combined index. + + Returns + ------- + Index + """ + obs_idxes = [obj._get_axis(axis) for obj in objs] + return _get_combined_index(obs_idxes, intersect=intersect, sort=sort, copy=copy) + + +def _get_distinct_objs(objs: list[Index]) -> list[Index]: + """ + Return a list with distinct elements of "objs" (different ids). + Preserves order. + """ + ids: set[int] = set() + res = [] + for obj in objs: + if id(obj) not in ids: + ids.add(id(obj)) + res.append(obj) + return res + + +def _get_combined_index( + indexes: list[Index], + intersect: bool = False, + sort: bool = False, + copy: bool = False, +) -> Index: + """ + Return the union or intersection of indexes. + + Parameters + ---------- + indexes : list of Index or list objects + When intersect=True, do not accept list of lists. + intersect : bool, default False + If True, calculate the intersection between indexes. Otherwise, + calculate the union. + sort : bool, default False + Whether the result index should come out sorted or not. + copy : bool, default False + If True, return a copy of the combined index. + + Returns + ------- + Index + """ + # TODO: handle index names! + indexes = _get_distinct_objs(indexes) + if len(indexes) == 0: + index = Index([]) + elif len(indexes) == 1: + index = indexes[0] + elif intersect: + index = indexes[0] + for other in indexes[1:]: + index = index.intersection(other) + else: + index = union_indexes(indexes, sort=False) + index = ensure_index(index) + + if sort: + index = safe_sort_index(index) + # GH 29879 + if copy: + index = index.copy() + + return index + + +def safe_sort_index(index: Index) -> Index: + """ + Returns the sorted index + + We keep the dtypes and the name attributes. + + Parameters + ---------- + index : an Index + + Returns + ------- + Index + """ + if index.is_monotonic_increasing: + return index + + try: + array_sorted = safe_sort(index) + except TypeError: + pass + else: + if isinstance(array_sorted, Index): + return array_sorted + + array_sorted = cast(np.ndarray, array_sorted) + if isinstance(index, MultiIndex): + index = MultiIndex.from_tuples(array_sorted, names=index.names) + else: + index = Index(array_sorted, name=index.name, dtype=index.dtype) + + return index + + +def union_indexes(indexes, sort: bool | None = True) -> Index: + """ + Return the union of indexes. + + The behavior of sort and names is not consistent. + + Parameters + ---------- + indexes : list of Index or list objects + sort : bool, default True + Whether the result index should come out sorted or not. + + Returns + ------- + Index + """ + if len(indexes) == 0: + raise AssertionError("Must have at least 1 Index to union") + if len(indexes) == 1: + result = indexes[0] + if isinstance(result, list): + result = Index(sorted(result)) + return result + + indexes, kind = _sanitize_and_check(indexes) + + def _unique_indices(inds, dtype) -> Index: + """ + Concatenate indices and remove duplicates. + + Parameters + ---------- + inds : list of Index or list objects + dtype : dtype to set for the resulting Index + + Returns + ------- + Index + """ + if all(isinstance(ind, Index) for ind in inds): + result = inds[0].append(inds[1:]).unique() + result = result.astype(dtype, copy=False) + if sort: + result = result.sort_values() + return result + + def conv(i): + if isinstance(i, Index): + i = i.tolist() + return i + + return Index( + lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort), + dtype=dtype, + ) + + def _find_common_index_dtype(inds): + """ + Finds a common type for the indexes to pass through to resulting index. + + Parameters + ---------- + inds: list of Index or list objects + + Returns + ------- + The common type or None if no indexes were given + """ + dtypes = [idx.dtype for idx in indexes if isinstance(idx, Index)] + if dtypes: + dtype = find_common_type(dtypes) + else: + dtype = None + + return dtype + + if kind == "special": + result = indexes[0] + + dtis = [x for x in indexes if isinstance(x, DatetimeIndex)] + dti_tzs = [x for x in dtis if x.tz is not None] + if len(dti_tzs) not in [0, len(dtis)]: + # TODO: this behavior is not tested (so may not be desired), + # but is kept in order to keep behavior the same when + # deprecating union_many + # test_frame_from_dict_with_mixed_indexes + raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") + + if len(dtis) == len(indexes): + sort = True + result = indexes[0] + + elif len(dtis) > 1: + # If we have mixed timezones, our casting behavior may depend on + # the order of indexes, which we don't want. + sort = False + + # TODO: what about Categorical[dt64]? + # test_frame_from_dict_with_mixed_indexes + indexes = [x.astype(object, copy=False) for x in indexes] + result = indexes[0] + + for other in indexes[1:]: + result = result.union(other, sort=None if sort else False) + return result + + elif kind == "array": + dtype = _find_common_index_dtype(indexes) + index = indexes[0] + if not all(index.equals(other) for other in indexes[1:]): + index = _unique_indices(indexes, dtype) + + name = get_unanimous_names(*indexes)[0] + if name != index.name: + index = index.rename(name) + return index + else: # kind='list' + dtype = _find_common_index_dtype(indexes) + return _unique_indices(indexes, dtype) + + +def _sanitize_and_check(indexes): + """ + Verify the type of indexes and convert lists to Index. + + Cases: + + - [list, list, ...]: Return ([list, list, ...], 'list') + - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...]) + Lists are sorted and converted to Index. + - [Index, Index, ...]: Return ([Index, Index, ...], TYPE) + TYPE = 'special' if at least one special type, 'array' otherwise. + + Parameters + ---------- + indexes : list of Index or list objects + + Returns + ------- + sanitized_indexes : list of Index or list objects + type : {'list', 'array', 'special'} + """ + kinds = list({type(index) for index in indexes}) + + if list in kinds: + if len(kinds) > 1: + indexes = [ + Index(list(x)) if not isinstance(x, Index) else x for x in indexes + ] + kinds.remove(list) + else: + return indexes, "list" + + if len(kinds) > 1 or Index not in kinds: + return indexes, "special" + else: + return indexes, "array" + + +def all_indexes_same(indexes) -> bool: + """ + Determine if all indexes contain the same elements. + + Parameters + ---------- + indexes : iterable of Index objects + + Returns + ------- + bool + True if all indexes contain the same elements, False otherwise. + """ + itr = iter(indexes) + first = next(itr) + return all(first.equals(index) for index in itr) + + +def default_index(n: int) -> RangeIndex: + rng = range(0, n) + return RangeIndex._simple_new(rng, name=None) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/base.py new file mode 100644 index 00000000..aaea5a58 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/base.py @@ -0,0 +1,7695 @@ +from __future__ import annotations + +from collections import abc +from datetime import datetime +import functools +from itertools import zip_longest +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Literal, + NoReturn, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._config import ( + get_option, + using_copy_on_write, + using_pyarrow_string_dtype, +) + +from pandas._libs import ( + NaT, + algos as libalgos, + index as libindex, + lib, +) +from pandas._libs.internals import BlockValuesRefs +import pandas._libs.join as libjoin +from pandas._libs.lib import ( + is_datetime_array, + no_default, +) +from pandas._libs.missing import is_float_nan +from pandas._libs.tslibs import ( + IncompatibleFrequency, + OutOfBoundsDatetime, + Timestamp, + tz_compare, +) +from pandas._typing import ( + AnyAll, + ArrayLike, + Axes, + Axis, + DropKeep, + DtypeObj, + F, + IgnoreRaise, + IndexLabel, + JoinHow, + Level, + NaPosition, + ReindexMethod, + Self, + Shape, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + DuplicateLabelError, + InvalidIndexError, +) +from pandas.util._decorators import ( + Appender, + cache_readonly, + doc, +) +from pandas.util._exceptions import ( + find_stack_level, + rewrite_exception, +) + +from pandas.core.dtypes.astype import ( + astype_array, + astype_is_view, +) +from pandas.core.dtypes.cast import ( + LossySetitemError, + can_hold_element, + common_dtype_categorical_compat, + find_result_type, + infer_dtype_from, + maybe_cast_pointwise_result, + np_can_hold_element, +) +from pandas.core.dtypes.common import ( + ensure_int64, + ensure_object, + ensure_platform_int, + is_any_real_numeric_dtype, + is_bool_dtype, + is_ea_or_datetimelike_dtype, + is_float, + is_float_dtype, + is_hashable, + is_integer, + is_iterator, + is_list_like, + is_numeric_dtype, + is_object_dtype, + is_scalar, + is_signed_integer_dtype, + is_string_dtype, + needs_i8_conversion, + pandas_dtype, + validate_all_hashable, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCDatetimeIndex, + ABCMultiIndex, + ABCPeriodIndex, + ABCSeries, + ABCTimedeltaIndex, +) +from pandas.core.dtypes.inference import is_dict_like +from pandas.core.dtypes.missing import ( + array_equivalent, + is_valid_na_for_dtype, + isna, +) + +from pandas.core import ( + arraylike, + nanops, + ops, +) +from pandas.core.accessor import CachedAccessor +import pandas.core.algorithms as algos +from pandas.core.array_algos.putmask import ( + setitem_datetimelike_compat, + validate_putmask, +) +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + Categorical, + ExtensionArray, +) +from pandas.core.arrays.string_ import StringArray +from pandas.core.base import ( + IndexOpsMixin, + PandasObject, +) +import pandas.core.common as com +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, + sanitize_array, +) +from pandas.core.indexers import ( + disallow_ndim_indexing, + is_valid_positional_slice, +) +from pandas.core.indexes.frozen import FrozenList +from pandas.core.missing import clean_reindex_fill_method +from pandas.core.ops import get_op_result_name +from pandas.core.ops.invalid import make_invalid_op +from pandas.core.sorting import ( + ensure_key_mapped, + get_group_index_sorter, + nargsort, +) +from pandas.core.strings.accessor import StringMethods + +from pandas.io.formats.printing import ( + PrettyDict, + default_pprint, + format_object_summary, + pprint_thing, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Sequence, + ) + + from pandas import ( + CategoricalIndex, + DataFrame, + MultiIndex, + Series, + ) + from pandas.core.arrays import PeriodArray + +__all__ = ["Index"] + +_unsortable_types = frozenset(("mixed", "mixed-integer")) + +_index_doc_kwargs: dict[str, str] = { + "klass": "Index", + "inplace": "", + "target_klass": "Index", + "raises_section": "", + "unique": "Index", + "duplicated": "np.ndarray", +} +_index_shared_docs: dict[str, str] = {} +str_t = str + +_dtype_obj = np.dtype("object") + +_masked_engines = { + "Complex128": libindex.MaskedComplex128Engine, + "Complex64": libindex.MaskedComplex64Engine, + "Float64": libindex.MaskedFloat64Engine, + "Float32": libindex.MaskedFloat32Engine, + "UInt64": libindex.MaskedUInt64Engine, + "UInt32": libindex.MaskedUInt32Engine, + "UInt16": libindex.MaskedUInt16Engine, + "UInt8": libindex.MaskedUInt8Engine, + "Int64": libindex.MaskedInt64Engine, + "Int32": libindex.MaskedInt32Engine, + "Int16": libindex.MaskedInt16Engine, + "Int8": libindex.MaskedInt8Engine, + "boolean": libindex.MaskedBoolEngine, + "double[pyarrow]": libindex.MaskedFloat64Engine, + "float64[pyarrow]": libindex.MaskedFloat64Engine, + "float32[pyarrow]": libindex.MaskedFloat32Engine, + "float[pyarrow]": libindex.MaskedFloat32Engine, + "uint64[pyarrow]": libindex.MaskedUInt64Engine, + "uint32[pyarrow]": libindex.MaskedUInt32Engine, + "uint16[pyarrow]": libindex.MaskedUInt16Engine, + "uint8[pyarrow]": libindex.MaskedUInt8Engine, + "int64[pyarrow]": libindex.MaskedInt64Engine, + "int32[pyarrow]": libindex.MaskedInt32Engine, + "int16[pyarrow]": libindex.MaskedInt16Engine, + "int8[pyarrow]": libindex.MaskedInt8Engine, + "bool[pyarrow]": libindex.MaskedBoolEngine, +} + + +def _maybe_return_indexers(meth: F) -> F: + """ + Decorator to simplify 'return_indexers' checks in Index.join. + """ + + @functools.wraps(meth) + def join( + self, + other: Index, + *, + how: JoinHow = "left", + level=None, + return_indexers: bool = False, + sort: bool = False, + ): + join_index, lidx, ridx = meth(self, other, how=how, level=level, sort=sort) + if not return_indexers: + return join_index + + if lidx is not None: + lidx = ensure_platform_int(lidx) + if ridx is not None: + ridx = ensure_platform_int(ridx) + return join_index, lidx, ridx + + return cast(F, join) + + +def _new_Index(cls, d): + """ + This is called upon unpickling, rather than the default which doesn't + have arguments and breaks __new__. + """ + # required for backward compat, because PI can't be instantiated with + # ordinals through __new__ GH #13277 + if issubclass(cls, ABCPeriodIndex): + from pandas.core.indexes.period import _new_PeriodIndex + + return _new_PeriodIndex(cls, **d) + + if issubclass(cls, ABCMultiIndex): + if "labels" in d and "codes" not in d: + # GH#23752 "labels" kwarg has been replaced with "codes" + d["codes"] = d.pop("labels") + + # Since this was a valid MultiIndex at pickle-time, we don't need to + # check validty at un-pickle time. + d["verify_integrity"] = False + + elif "dtype" not in d and "data" in d: + # Prevent Index.__new__ from conducting inference; + # "data" key not in RangeIndex + d["dtype"] = d["data"].dtype + return cls.__new__(cls, **d) + + +class Index(IndexOpsMixin, PandasObject): + """ + Immutable sequence used for indexing and alignment. + + The basic object storing axis labels for all pandas objects. + + .. versionchanged:: 2.0.0 + + Index can hold all numpy numeric dtypes (except float16). Previously only + int64/uint64/float64 dtypes were accepted. + + Parameters + ---------- + data : array-like (1-dimensional) + dtype : NumPy dtype (default: object) + If dtype is None, we find the dtype that best fits the data. + If an actual dtype is provided, we coerce to that dtype if it's safe. + Otherwise, an error will be raised. + copy : bool + Make a copy of input ndarray. + name : object + Name to be stored in the index. + tupleize_cols : bool (default: True) + When True, attempt to create a MultiIndex if possible. + + See Also + -------- + RangeIndex : Index implementing a monotonic integer range. + CategoricalIndex : Index of :class:`Categorical` s. + MultiIndex : A multi-level, or hierarchical Index. + IntervalIndex : An Index of :class:`Interval` s. + DatetimeIndex : Index of datetime64 data. + TimedeltaIndex : Index of timedelta64 data. + PeriodIndex : Index of Period data. + + Notes + ----- + An Index instance can **only** contain hashable objects. + An Index instance *can not* hold numpy float16 dtype. + + Examples + -------- + >>> pd.Index([1, 2, 3]) + Index([1, 2, 3], dtype='int64') + + >>> pd.Index(list('abc')) + Index(['a', 'b', 'c'], dtype='object') + + >>> pd.Index([1, 2, 3], dtype="uint8") + Index([1, 2, 3], dtype='uint8') + """ + + # To hand over control to subclasses + _join_precedence = 1 + + # similar to __array_priority__, positions Index after Series and DataFrame + # but before ExtensionArray. Should NOT be overridden by subclasses. + __pandas_priority__ = 2000 + + # Cython methods; see github.com/cython/cython/issues/2647 + # for why we need to wrap these instead of making them class attributes + # Moreover, cython will choose the appropriate-dtyped sub-function + # given the dtypes of the passed arguments + + @final + def _left_indexer_unique(self, other: Self) -> npt.NDArray[np.intp]: + # Caller is responsible for ensuring other.dtype == self.dtype + sv = self._get_join_target() + ov = other._get_join_target() + # can_use_libjoin assures sv and ov are ndarrays + sv = cast(np.ndarray, sv) + ov = cast(np.ndarray, ov) + # similar but not identical to ov.searchsorted(sv) + return libjoin.left_join_indexer_unique(sv, ov) + + @final + def _left_indexer( + self, other: Self + ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: + # Caller is responsible for ensuring other.dtype == self.dtype + sv = self._get_join_target() + ov = other._get_join_target() + # can_use_libjoin assures sv and ov are ndarrays + sv = cast(np.ndarray, sv) + ov = cast(np.ndarray, ov) + joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) + joined = self._from_join_target(joined_ndarray) + return joined, lidx, ridx + + @final + def _inner_indexer( + self, other: Self + ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: + # Caller is responsible for ensuring other.dtype == self.dtype + sv = self._get_join_target() + ov = other._get_join_target() + # can_use_libjoin assures sv and ov are ndarrays + sv = cast(np.ndarray, sv) + ov = cast(np.ndarray, ov) + joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) + joined = self._from_join_target(joined_ndarray) + return joined, lidx, ridx + + @final + def _outer_indexer( + self, other: Self + ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: + # Caller is responsible for ensuring other.dtype == self.dtype + sv = self._get_join_target() + ov = other._get_join_target() + # can_use_libjoin assures sv and ov are ndarrays + sv = cast(np.ndarray, sv) + ov = cast(np.ndarray, ov) + joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) + joined = self._from_join_target(joined_ndarray) + return joined, lidx, ridx + + _typ: str = "index" + _data: ExtensionArray | np.ndarray + _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( + np.ndarray, + ExtensionArray, + ) + _id: object | None = None + _name: Hashable = None + # MultiIndex.levels previously allowed setting the index name. We + # don't allow this anymore, and raise if it happens rather than + # failing silently. + _no_setting_name: bool = False + _comparables: list[str] = ["name"] + _attributes: list[str] = ["name"] + + @cache_readonly + def _can_hold_strings(self) -> bool: + return not is_numeric_dtype(self.dtype) + + _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { + np.dtype(np.int8): libindex.Int8Engine, + np.dtype(np.int16): libindex.Int16Engine, + np.dtype(np.int32): libindex.Int32Engine, + np.dtype(np.int64): libindex.Int64Engine, + np.dtype(np.uint8): libindex.UInt8Engine, + np.dtype(np.uint16): libindex.UInt16Engine, + np.dtype(np.uint32): libindex.UInt32Engine, + np.dtype(np.uint64): libindex.UInt64Engine, + np.dtype(np.float32): libindex.Float32Engine, + np.dtype(np.float64): libindex.Float64Engine, + np.dtype(np.complex64): libindex.Complex64Engine, + np.dtype(np.complex128): libindex.Complex128Engine, + } + + @property + def _engine_type( + self, + ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: + return self._engine_types.get(self.dtype, libindex.ObjectEngine) + + # whether we support partial string indexing. Overridden + # in DatetimeIndex and PeriodIndex + _supports_partial_string_indexing = False + + _accessors = {"str"} + + str = CachedAccessor("str", StringMethods) + + _references = None + + # -------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + data=None, + dtype=None, + copy: bool = False, + name=None, + tupleize_cols: bool = True, + ) -> Index: + from pandas.core.indexes.range import RangeIndex + + name = maybe_extract_name(name, data, cls) + + if dtype is not None: + dtype = pandas_dtype(dtype) + + data_dtype = getattr(data, "dtype", None) + + refs = None + if not copy and isinstance(data, (ABCSeries, Index)): + refs = data._references + + # range + if isinstance(data, (range, RangeIndex)): + result = RangeIndex(start=data, copy=copy, name=name) + if dtype is not None: + return result.astype(dtype, copy=False) + return result + + elif is_ea_or_datetimelike_dtype(dtype): + # non-EA dtype indexes have special casting logic, so we punt here + pass + + elif is_ea_or_datetimelike_dtype(data_dtype): + pass + + elif isinstance(data, (np.ndarray, Index, ABCSeries)): + if isinstance(data, ABCMultiIndex): + data = data._values + + if data.dtype.kind not in "iufcbmM": + # GH#11836 we need to avoid having numpy coerce + # things that look like ints/floats to ints unless + # they are actually ints, e.g. '0' and 0.0 + # should not be coerced + data = com.asarray_tuplesafe(data, dtype=_dtype_obj) + + elif is_scalar(data): + raise cls._raise_scalar_data_error(data) + elif hasattr(data, "__array__"): + return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) + elif not is_list_like(data) and not isinstance(data, memoryview): + # 2022-11-16 the memoryview check is only necessary on some CI + # builds, not clear why + raise cls._raise_scalar_data_error(data) + + else: + if tupleize_cols: + # GH21470: convert iterable to list before determining if empty + if is_iterator(data): + data = list(data) + + if data and all(isinstance(e, tuple) for e in data): + # we must be all tuples, otherwise don't construct + # 10697 + from pandas.core.indexes.multi import MultiIndex + + return MultiIndex.from_tuples(data, names=name) + # other iterable of some kind + + if not isinstance(data, (list, tuple)): + # we allow set/frozenset, which Series/sanitize_array does not, so + # cast to list here + data = list(data) + if len(data) == 0: + # unlike Series, we default to object dtype: + data = np.array(data, dtype=object) + + if len(data) and isinstance(data[0], tuple): + # Ensure we get 1-D array of tuples instead of 2D array. + data = com.asarray_tuplesafe(data, dtype=_dtype_obj) + + try: + arr = sanitize_array(data, None, dtype=dtype, copy=copy) + except ValueError as err: + if "index must be specified when data is not list-like" in str(err): + raise cls._raise_scalar_data_error(data) from err + if "Data must be 1-dimensional" in str(err): + raise ValueError("Index data must be 1-dimensional") from err + raise + arr = ensure_wrapped_if_datetimelike(arr) + + klass = cls._dtype_to_subclass(arr.dtype) + + arr = klass._ensure_array(arr, arr.dtype, copy=False) + return klass._simple_new(arr, name, refs=refs) + + @classmethod + def _ensure_array(cls, data, dtype, copy: bool): + """ + Ensure we have a valid array to pass to _simple_new. + """ + if data.ndim > 1: + # GH#13601, GH#20285, GH#27125 + raise ValueError("Index data must be 1-dimensional") + elif dtype == np.float16: + # float16 not supported (no indexing engine) + raise NotImplementedError("float16 indexes are not supported") + + if copy: + # asarray_tuplesafe does not always copy underlying data, + # so need to make sure that this happens + data = data.copy() + return data + + @final + @classmethod + def _dtype_to_subclass(cls, dtype: DtypeObj): + # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 + + if isinstance(dtype, ExtensionDtype): + if isinstance(dtype, DatetimeTZDtype): + from pandas import DatetimeIndex + + return DatetimeIndex + elif isinstance(dtype, CategoricalDtype): + from pandas import CategoricalIndex + + return CategoricalIndex + elif isinstance(dtype, IntervalDtype): + from pandas import IntervalIndex + + return IntervalIndex + elif isinstance(dtype, PeriodDtype): + from pandas import PeriodIndex + + return PeriodIndex + + return Index + + if dtype.kind == "M": + from pandas import DatetimeIndex + + return DatetimeIndex + + elif dtype.kind == "m": + from pandas import TimedeltaIndex + + return TimedeltaIndex + + elif dtype.kind == "O": + # NB: assuming away MultiIndex + return Index + + elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): + return Index + + raise NotImplementedError(dtype) + + # NOTE for new Index creation: + + # - _simple_new: It returns new Index with the same type as the caller. + # All metadata (such as name) must be provided by caller's responsibility. + # Using _shallow_copy is recommended because it fills these metadata + # otherwise specified. + + # - _shallow_copy: It returns new Index with the same type (using + # _simple_new), but fills caller's metadata otherwise specified. Passed + # kwargs will overwrite corresponding metadata. + + # See each method's docstring. + + @classmethod + def _simple_new( + cls, values: ArrayLike, name: Hashable | None = None, refs=None + ) -> Self: + """ + We require that we have a dtype compat for the values. If we are passed + a non-dtype compat, then coerce using the constructor. + + Must be careful not to recurse. + """ + assert isinstance(values, cls._data_cls), type(values) + + result = object.__new__(cls) + result._data = values + result._name = name + result._cache = {} + result._reset_identity() + if refs is not None: + result._references = refs + else: + result._references = BlockValuesRefs() + result._references.add_index_reference(result) + + return result + + @classmethod + def _with_infer(cls, *args, **kwargs): + """ + Constructor that uses the 1.0.x behavior inferring numeric dtypes + for ndarray[object] inputs. + """ + result = cls(*args, **kwargs) + + if result.dtype == _dtype_obj and not result._is_multi: + # error: Argument 1 to "maybe_convert_objects" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected + # "ndarray[Any, Any]" + values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] + if values.dtype.kind in "iufb": + return Index(values, name=result.name) + + return result + + @cache_readonly + def _constructor(self) -> type[Self]: + return type(self) + + @final + def _maybe_check_unique(self) -> None: + """ + Check that an Index has no duplicates. + + This is typically only called via + `NDFrame.flags.allows_duplicate_labels.setter` when it's set to + True (duplicates aren't allowed). + + Raises + ------ + DuplicateLabelError + When the index is not unique. + """ + if not self.is_unique: + msg = """Index has duplicates.""" + duplicates = self._format_duplicate_message() + msg += f"\n{duplicates}" + + raise DuplicateLabelError(msg) + + @final + def _format_duplicate_message(self) -> DataFrame: + """ + Construct the DataFrame for a DuplicateLabelError. + + This returns a DataFrame indicating the labels and positions + of duplicates in an index. This should only be called when it's + already known that duplicates are present. + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'a']) + >>> idx._format_duplicate_message() + positions + label + a [0, 2] + """ + from pandas import Series + + duplicates = self[self.duplicated(keep="first")].unique() + assert len(duplicates) + + out = ( + Series(np.arange(len(self))) + .groupby(self, observed=False) + .agg(list)[duplicates] + ) + if self._is_multi: + # test_format_duplicate_labels_message_multi + # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] + out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] + + if self.nlevels == 1: + out = out.rename_axis("label") + return out.to_frame(name="positions") + + # -------------------------------------------------------------------- + # Index Internals Methods + + def _shallow_copy(self, values, name: Hashable = no_default) -> Self: + """ + Create a new Index with the same class as the caller, don't copy the + data, use the same object attributes with passed in attributes taking + precedence. + + *this is an internal non-public method* + + Parameters + ---------- + values : the values to create the new Index, optional + name : Label, defaults to self.name + """ + name = self._name if name is no_default else name + + return self._simple_new(values, name=name, refs=self._references) + + def _view(self) -> Self: + """ + fastpath to make a shallow copy, i.e. new object with same data. + """ + result = self._simple_new(self._values, name=self._name, refs=self._references) + + result._cache = self._cache + return result + + @final + def _rename(self, name: Hashable) -> Self: + """ + fastpath for rename if new name is already validated. + """ + result = self._view() + result._name = name + return result + + @final + def is_(self, other) -> bool: + """ + More flexible, faster check like ``is`` but that works through views. + + Note: this is *not* the same as ``Index.identical()``, which checks + that metadata is also the same. + + Parameters + ---------- + other : object + Other object to compare against. + + Returns + ------- + bool + True if both have same underlying data, False otherwise. + + See Also + -------- + Index.identical : Works like ``Index.is_`` but also checks metadata. + + Examples + -------- + >>> idx1 = pd.Index(['1', '2', '3']) + >>> idx1.is_(idx1.view()) + True + + >>> idx1.is_(idx1.copy()) + False + """ + if self is other: + return True + elif not hasattr(other, "_id"): + return False + elif self._id is None or other._id is None: + return False + else: + return self._id is other._id + + @final + def _reset_identity(self) -> None: + """ + Initializes or resets ``_id`` attribute with new object. + """ + self._id = object() + + @final + def _cleanup(self) -> None: + self._engine.clear_mapping() + + @cache_readonly + def _engine( + self, + ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: + # For base class (object dtype) we get ObjectEngine + target_values = self._get_engine_target() + + if isinstance(self._values, ArrowExtensionArray) and self.dtype.kind in "Mm": + import pyarrow as pa + + pa_type = self._values._pa_array.type + if pa.types.is_timestamp(pa_type): + target_values = self._values._to_datetimearray() + return libindex.DatetimeEngine(target_values._ndarray) + elif pa.types.is_duration(pa_type): + target_values = self._values._to_timedeltaarray() + return libindex.TimedeltaEngine(target_values._ndarray) + + if isinstance(target_values, ExtensionArray): + if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): + try: + return _masked_engines[target_values.dtype.name](target_values) + except KeyError: + # Not supported yet e.g. decimal + pass + elif self._engine_type is libindex.ObjectEngine: + return libindex.ExtensionEngine(target_values) + + target_values = cast(np.ndarray, target_values) + # to avoid a reference cycle, bind `target_values` to a local variable, so + # `self` is not passed into the lambda. + if target_values.dtype == bool: + return libindex.BoolEngine(target_values) + elif target_values.dtype == np.complex64: + return libindex.Complex64Engine(target_values) + elif target_values.dtype == np.complex128: + return libindex.Complex128Engine(target_values) + elif needs_i8_conversion(self.dtype): + # We need to keep M8/m8 dtype when initializing the Engine, + # but don't want to change _get_engine_target bc it is used + # elsewhere + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] + target_values = self._data._ndarray # type: ignore[union-attr] + + # error: Argument 1 to "ExtensionEngine" has incompatible type + # "ndarray[Any, Any]"; expected "ExtensionArray" + return self._engine_type(target_values) # type: ignore[arg-type] + + @final + @cache_readonly + def _dir_additions_for_owner(self) -> set[str_t]: + """ + Add the string-like labels to the owner dataframe/series dir output. + + If this is a MultiIndex, it's first level values are used. + """ + return { + c + for c in self.unique(level=0)[: get_option("display.max_dir_items")] + if isinstance(c, str) and c.isidentifier() + } + + # -------------------------------------------------------------------- + # Array-Like Methods + + # ndarray compat + def __len__(self) -> int: + """ + Return the length of the Index. + """ + return len(self._data) + + def __array__(self, dtype=None) -> np.ndarray: + """ + The array interface, return my values. + """ + return np.asarray(self._data, dtype=dtype) + + def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): + if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): + return NotImplemented + + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + if "out" in kwargs: + # e.g. test_dti_isub_tdi + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + new_inputs = [x if x is not self else x._values for x in inputs] + result = getattr(ufunc, method)(*new_inputs, **kwargs) + if ufunc.nout == 2: + # i.e. np.divmod, np.modf, np.frexp + return tuple(self.__array_wrap__(x) for x in result) + elif method == "reduce": + result = lib.item_from_zerodim(result) + return result + + if result.dtype == np.float16: + result = result.astype(np.float32) + + return self.__array_wrap__(result) + + @final + def __array_wrap__(self, result, context=None): + """ + Gets called after a ufunc and other functions e.g. np.split. + """ + result = lib.item_from_zerodim(result) + if (not isinstance(result, Index) and is_bool_dtype(result.dtype)) or np.ndim( + result + ) > 1: + # exclude Index to avoid warning from is_bool_dtype deprecation; + # in the Index case it doesn't matter which path we go down. + # reached in plotting tests with e.g. np.nonzero(index) + return result + + return Index(result, name=self.name) + + @cache_readonly + def dtype(self) -> DtypeObj: + """ + Return the dtype object of the underlying data. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.dtype + dtype('int64') + """ + return self._data.dtype + + @final + def ravel(self, order: str_t = "C") -> Self: + """ + Return a view on self. + + Returns + ------- + Index + + See Also + -------- + numpy.ndarray.ravel : Return a flattened array. + + Examples + -------- + >>> s = pd.Series([1, 2, 3], index=['a', 'b', 'c']) + >>> s.index.ravel() + Index(['a', 'b', 'c'], dtype='object') + """ + return self[:] + + def view(self, cls=None): + # we need to see if we are subclassing an + # index type here + if cls is not None and not hasattr(cls, "_typ"): + dtype = cls + if isinstance(cls, str): + dtype = pandas_dtype(cls) + + if needs_i8_conversion(dtype): + if dtype.kind == "m" and dtype != "m8[ns]": + # e.g. m8[s] + return self._data.view(cls) + + idx_cls = self._dtype_to_subclass(dtype) + # NB: we only get here for subclasses that override + # _data_cls such that it is a type and not a tuple + # of types. + arr_cls = idx_cls._data_cls + arr = arr_cls(self._data.view("i8"), dtype=dtype) + return idx_cls._simple_new(arr, name=self.name, refs=self._references) + + result = self._data.view(cls) + else: + result = self._view() + if isinstance(result, Index): + result._id = self._id + return result + + def astype(self, dtype, copy: bool = True): + """ + Create an Index with values cast to dtypes. + + The class of a new Index is determined by dtype. When conversion is + impossible, a TypeError exception is raised. + + Parameters + ---------- + dtype : numpy dtype or pandas type + Note that any signed integer `dtype` is treated as ``'int64'``, + and any unsigned integer `dtype` is treated as ``'uint64'``, + regardless of the size. + copy : bool, default True + By default, astype always returns a newly allocated object. + If copy is set to False and internal requirements on dtype are + satisfied, the original data is used to create a new Index + or the original Index is returned. + + Returns + ------- + Index + Index with values cast to specified dtype. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.astype('float') + Index([1.0, 2.0, 3.0], dtype='float64') + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + + if self.dtype == dtype: + # Ensure that self.astype(self.dtype) is self + return self.copy() if copy else self + + values = self._data + if isinstance(values, ExtensionArray): + with rewrite_exception(type(values).__name__, type(self).__name__): + new_values = values.astype(dtype, copy=copy) + + elif isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + # Note: for RangeIndex and CategoricalDtype self vs self._values + # behaves differently here. + new_values = cls._from_sequence(self, dtype=dtype, copy=copy) + + else: + # GH#13149 specifically use astype_array instead of astype + new_values = astype_array(values, dtype=dtype, copy=copy) + + # pass copy=False because any copying will be done in the astype above + result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) + if ( + not copy + and self._references is not None + and astype_is_view(self.dtype, dtype) + ): + result._references = self._references + result._references.add_index_reference(result) + return result + + _index_shared_docs[ + "take" + ] = """ + Return a new %(klass)s of the values selected by the indices. + + For internal compatibility with numpy arrays. + + Parameters + ---------- + indices : array-like + Indices to be taken. + axis : int, optional + The axis over which to select values, always 0. + allow_fill : bool, default True + fill_value : scalar, default None + If allow_fill=True and fill_value is not None, indices specified by + -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. + + Returns + ------- + Index + An index formed of elements at the given indices. Will be the same + type as self, except for RangeIndex. + + See Also + -------- + numpy.ndarray.take: Return an array formed from the + elements of a at the given indices. + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'c']) + >>> idx.take([2, 2, 1, 2]) + Index(['c', 'c', 'b', 'c'], dtype='object') + """ + + @Appender(_index_shared_docs["take"] % _index_doc_kwargs) + def take( + self, + indices, + axis: Axis = 0, + allow_fill: bool = True, + fill_value=None, + **kwargs, + ): + if kwargs: + nv.validate_take((), kwargs) + if is_scalar(indices): + raise TypeError("Expected indices to be array-like") + indices = ensure_platform_int(indices) + allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) + + # Note: we discard fill_value and use self._na_value, only relevant + # in the case where allow_fill is True and fill_value is not None + values = self._values + if isinstance(values, np.ndarray): + taken = algos.take( + values, indices, allow_fill=allow_fill, fill_value=self._na_value + ) + else: + # algos.take passes 'axis' keyword which not all EAs accept + taken = values.take( + indices, allow_fill=allow_fill, fill_value=self._na_value + ) + return self._constructor._simple_new(taken, name=self.name) + + @final + def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: + """ + We only use pandas-style take when allow_fill is True _and_ + fill_value is not None. + """ + if allow_fill and fill_value is not None: + # only fill if we are passing a non-None fill_value + if self._can_hold_na: + if (indices < -1).any(): + raise ValueError( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + else: + cls_name = type(self).__name__ + raise ValueError( + f"Unable to fill values because {cls_name} cannot contain NA" + ) + else: + allow_fill = False + return allow_fill + + _index_shared_docs[ + "repeat" + ] = """ + Repeat elements of a %(klass)s. + + Returns a new %(klass)s where each element of the current %(klass)s + is repeated consecutively a given number of times. + + Parameters + ---------- + repeats : int or array of ints + The number of repetitions for each element. This should be a + non-negative integer. Repeating 0 times will return an empty + %(klass)s. + axis : None + Must be ``None``. Has no effect but is accepted for compatibility + with numpy. + + Returns + ------- + %(klass)s + Newly created %(klass)s with repeated elements. + + See Also + -------- + Series.repeat : Equivalent function for Series. + numpy.repeat : Similar method for :class:`numpy.ndarray`. + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'c']) + >>> idx + Index(['a', 'b', 'c'], dtype='object') + >>> idx.repeat(2) + Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') + >>> idx.repeat([1, 2, 3]) + Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') + """ + + @Appender(_index_shared_docs["repeat"] % _index_doc_kwargs) + def repeat(self, repeats, axis=None): + repeats = ensure_platform_int(repeats) + nv.validate_repeat((), {"axis": axis}) + res_values = self._values.repeat(repeats) + + # _constructor so RangeIndex-> Index with an int64 dtype + return self._constructor._simple_new(res_values, name=self.name) + + # -------------------------------------------------------------------- + # Copying Methods + + def copy( + self, + name: Hashable | None = None, + deep: bool = False, + ) -> Self: + """ + Make a copy of this object. + + Name is set on the new object. + + Parameters + ---------- + name : Label, optional + Set name for new object. + deep : bool, default False + + Returns + ------- + Index + Index refer to new object which is a copy of this object. + + Notes + ----- + In most cases, there should be no functional difference from using + ``deep``, but if ``deep`` is passed it will attempt to deepcopy. + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'c']) + >>> new_idx = idx.copy() + >>> idx is new_idx + False + """ + + name = self._validate_names(name=name, deep=deep)[0] + if deep: + new_data = self._data.copy() + new_index = type(self)._simple_new(new_data, name=name) + else: + new_index = self._rename(name=name) + return new_index + + @final + def __copy__(self, **kwargs) -> Self: + return self.copy(**kwargs) + + @final + def __deepcopy__(self, memo=None) -> Self: + """ + Parameters + ---------- + memo, default None + Standard signature. Unused + """ + return self.copy(deep=True) + + # -------------------------------------------------------------------- + # Rendering Methods + + @final + def __repr__(self) -> str_t: + """ + Return a string representation for this object. + """ + klass_name = type(self).__name__ + data = self._format_data() + attrs = self._format_attrs() + space = self._format_space() + attrs_str = [f"{k}={v}" for k, v in attrs] + prepr = f",{space}".join(attrs_str) + + # no data provided, just attributes + if data is None: + data = "" + + return f"{klass_name}({data}{prepr})" + + def _format_space(self) -> str_t: + # using space here controls if the attributes + # are line separated or not (the default) + + # max_seq_items = get_option('display.max_seq_items') + # if len(self) > max_seq_items: + # space = "\n%s" % (' ' * (len(klass) + 1)) + return " " + + @property + def _formatter_func(self): + """ + Return the formatter function. + """ + return default_pprint + + def _format_data(self, name=None) -> str_t: + """ + Return the formatted data as a unicode string. + """ + # do we want to justify (only do so for non-objects) + is_justify = True + + if self.inferred_type == "string": + is_justify = False + elif self.inferred_type == "categorical": + self = cast("CategoricalIndex", self) + if is_object_dtype(self.categories.dtype): + is_justify = False + + return format_object_summary( + self, + self._formatter_func, + is_justify=is_justify, + name=name, + line_break_each_value=self._is_multi, + ) + + def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: + """ + Return a list of tuples of the (attr,formatted_value). + """ + attrs: list[tuple[str_t, str_t | int | bool | None]] = [] + + if not self._is_multi: + attrs.append(("dtype", f"'{self.dtype}'")) + + if self.name is not None: + attrs.append(("name", default_pprint(self.name))) + elif self._is_multi and any(x is not None for x in self.names): + attrs.append(("names", default_pprint(self.names))) + + max_seq_items = get_option("display.max_seq_items") or len(self) + if len(self) > max_seq_items: + attrs.append(("length", len(self))) + return attrs + + @final + def _get_level_names(self) -> Hashable | Sequence[Hashable]: + """ + Return a name or list of names with None replaced by the level number. + """ + if self._is_multi: + return [ + level if name is None else name for level, name in enumerate(self.names) + ] + else: + return 0 if self.name is None else self.name + + @final + def _mpl_repr(self) -> np.ndarray: + # how to represent ourselves to matplotlib + if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": + return cast(np.ndarray, self.values) + return self.astype(object, copy=False)._values + + def format( + self, + name: bool = False, + formatter: Callable | None = None, + na_rep: str_t = "NaN", + ) -> list[str_t]: + """ + Render a string representation of the Index. + """ + header = [] + if name: + header.append( + pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) + if self.name is not None + else "" + ) + + if formatter is not None: + return header + list(self.map(formatter)) + + return self._format_with_header(header, na_rep=na_rep) + + def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: + from pandas.io.formats.format import format_array + + values = self._values + + if is_object_dtype(values.dtype) or is_string_dtype(values.dtype): + values = np.asarray(values) + values = lib.maybe_convert_objects(values, safe=True) + + result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] + + # could have nans + mask = is_float_nan(values) + if mask.any(): + result_arr = np.array(result) + result_arr[mask] = na_rep + result = result_arr.tolist() + else: + result = trim_front(format_array(values, None, justify="left")) + return header + result + + def _format_native_types( + self, + *, + na_rep: str_t = "", + decimal: str_t = ".", + float_format=None, + date_format=None, + quoting=None, + ) -> npt.NDArray[np.object_]: + """ + Actually format specific types of the index. + """ + from pandas.io.formats.format import FloatArrayFormatter + + if is_float_dtype(self.dtype) and not isinstance(self.dtype, ExtensionDtype): + formatter = FloatArrayFormatter( + self._values, + na_rep=na_rep, + float_format=float_format, + decimal=decimal, + quoting=quoting, + fixed_width=False, + ) + return formatter.get_result_as_array() + + mask = isna(self) + if self.dtype != object and not quoting: + values = np.asarray(self).astype(str) + else: + values = np.array(self, dtype=object, copy=True) + + values[mask] = na_rep + return values + + def _summary(self, name=None) -> str_t: + """ + Return a summarized representation. + + Parameters + ---------- + name : str + name to use in the summary representation + + Returns + ------- + String with a summarized representation of the index + """ + if len(self) > 0: + head = self[0] + if hasattr(head, "format") and not isinstance(head, str): + head = head.format() + elif needs_i8_conversion(self.dtype): + # e.g. Timedelta, display as values, not quoted + head = self._formatter_func(head).replace("'", "") + tail = self[-1] + if hasattr(tail, "format") and not isinstance(tail, str): + tail = tail.format() + elif needs_i8_conversion(self.dtype): + # e.g. Timedelta, display as values, not quoted + tail = self._formatter_func(tail).replace("'", "") + + index_summary = f", {head} to {tail}" + else: + index_summary = "" + + if name is None: + name = type(self).__name__ + return f"{name}: {len(self)} entries{index_summary}" + + # -------------------------------------------------------------------- + # Conversion Methods + + def to_flat_index(self) -> Self: + """ + Identity method. + + This is implemented for compatibility with subclass implementations + when chaining. + + Returns + ------- + pd.Index + Caller. + + See Also + -------- + MultiIndex.to_flat_index : Subclass implementation. + """ + return self + + @final + def to_series(self, index=None, name: Hashable | None = None) -> Series: + """ + Create a Series with both index and values equal to the index keys. + + Useful with map for returning an indexer based on an index. + + Parameters + ---------- + index : Index, optional + Index of resulting Series. If None, defaults to original index. + name : str, optional + Name of resulting Series. If None, defaults to name of original + index. + + Returns + ------- + Series + The dtype will be based on the type of the Index values. + + See Also + -------- + Index.to_frame : Convert an Index to a DataFrame. + Series.to_frame : Convert Series to DataFrame. + + Examples + -------- + >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') + + By default, the original index and original name is reused. + + >>> idx.to_series() + animal + Ant Ant + Bear Bear + Cow Cow + Name: animal, dtype: object + + To enforce a new index, specify new labels to ``index``: + + >>> idx.to_series(index=[0, 1, 2]) + 0 Ant + 1 Bear + 2 Cow + Name: animal, dtype: object + + To override the name of the resulting column, specify ``name``: + + >>> idx.to_series(name='zoo') + animal + Ant Ant + Bear Bear + Cow Cow + Name: zoo, dtype: object + """ + from pandas import Series + + if index is None: + index = self._view() + if name is None: + name = self.name + + return Series(self._values.copy(), index=index, name=name) + + def to_frame( + self, index: bool = True, name: Hashable = lib.no_default + ) -> DataFrame: + """ + Create a DataFrame with a column containing the Index. + + Parameters + ---------- + index : bool, default True + Set the index of the returned DataFrame as the original Index. + + name : object, defaults to index.name + The passed name should substitute for the index name (if it has + one). + + Returns + ------- + DataFrame + DataFrame containing the original Index data. + + See Also + -------- + Index.to_series : Convert an Index to a Series. + Series.to_frame : Convert Series to DataFrame. + + Examples + -------- + >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') + >>> idx.to_frame() + animal + animal + Ant Ant + Bear Bear + Cow Cow + + By default, the original Index is reused. To enforce a new Index: + + >>> idx.to_frame(index=False) + animal + 0 Ant + 1 Bear + 2 Cow + + To override the name of the resulting column, specify `name`: + + >>> idx.to_frame(index=False, name='zoo') + zoo + 0 Ant + 1 Bear + 2 Cow + """ + from pandas import DataFrame + + if name is lib.no_default: + name = self._get_level_names() + result = DataFrame({name: self}, copy=not using_copy_on_write()) + + if index: + result.index = self + return result + + # -------------------------------------------------------------------- + # Name-Centric Methods + + @property + def name(self) -> Hashable: + """ + Return Index or MultiIndex name. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3], name='x') + >>> idx + Index([1, 2, 3], dtype='int64', name='x') + >>> idx.name + 'x' + """ + return self._name + + @name.setter + def name(self, value: Hashable) -> None: + if self._no_setting_name: + # Used in MultiIndex.levels to avoid silently ignoring name updates. + raise RuntimeError( + "Cannot set name on a level of a MultiIndex. Use " + "'MultiIndex.set_names' instead." + ) + maybe_extract_name(value, None, type(self)) + self._name = value + + @final + def _validate_names( + self, name=None, names=None, deep: bool = False + ) -> list[Hashable]: + """ + Handles the quirks of having a singular 'name' parameter for general + Index and plural 'names' parameter for MultiIndex. + """ + from copy import deepcopy + + if names is not None and name is not None: + raise TypeError("Can only provide one of `names` and `name`") + if names is None and name is None: + new_names = deepcopy(self.names) if deep else self.names + elif names is not None: + if not is_list_like(names): + raise TypeError("Must pass list-like as `names`.") + new_names = names + elif not is_list_like(name): + new_names = [name] + else: + new_names = name + + if len(new_names) != len(self.names): + raise ValueError( + f"Length of new names must be {len(self.names)}, got {len(new_names)}" + ) + + # All items in 'new_names' need to be hashable + validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") + + return new_names + + def _get_default_index_names( + self, names: Hashable | Sequence[Hashable] | None = None, default=None + ) -> list[Hashable]: + """ + Get names of index. + + Parameters + ---------- + names : int, str or 1-dimensional list, default None + Index names to set. + default : str + Default name of index. + + Raises + ------ + TypeError + if names not str or list-like + """ + from pandas.core.indexes.multi import MultiIndex + + if names is not None: + if isinstance(names, (int, str)): + names = [names] + + if not isinstance(names, list) and names is not None: + raise ValueError("Index names must be str or 1-dimensional list") + + if not names: + if isinstance(self, MultiIndex): + names = com.fill_missing_names(self.names) + else: + names = [default] if self.name is None else [self.name] + + return names + + def _get_names(self) -> FrozenList: + return FrozenList((self.name,)) + + def _set_names(self, values, *, level=None) -> None: + """ + Set new names on index. Each name has to be a hashable type. + + Parameters + ---------- + values : str or sequence + name(s) to set + level : int, level name, or sequence of int/level names (default None) + If the index is a MultiIndex (hierarchical), level(s) to set (None + for all levels). Otherwise level must be None + + Raises + ------ + TypeError if each name is not hashable. + """ + if not is_list_like(values): + raise ValueError("Names must be a list-like") + if len(values) != 1: + raise ValueError(f"Length of new names must be 1, got {len(values)}") + + # GH 20527 + # All items in 'name' need to be hashable: + validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") + + self._name = values[0] + + names = property(fset=_set_names, fget=_get_names) + + @overload + def set_names(self, names, *, level=..., inplace: Literal[False] = ...) -> Self: + ... + + @overload + def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: + ... + + @overload + def set_names(self, names, *, level=..., inplace: bool = ...) -> Self | None: + ... + + def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None: + """ + Set Index or MultiIndex name. + + Able to set new names partially and by level. + + Parameters + ---------- + + names : label or list of label or dict-like for MultiIndex + Name(s) to set. + + .. versionchanged:: 1.3.0 + + level : int, label or list of int or label, optional + If the index is a MultiIndex and names is not dict-like, level(s) to set + (None for all levels). Otherwise level must be None. + + .. versionchanged:: 1.3.0 + + inplace : bool, default False + Modifies the object directly, instead of creating a new Index or + MultiIndex. + + Returns + ------- + Index or None + The same type as the caller or None if ``inplace=True``. + + See Also + -------- + Index.rename : Able to set new names without level. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3, 4]) + >>> idx + Index([1, 2, 3, 4], dtype='int64') + >>> idx.set_names('quarter') + Index([1, 2, 3, 4], dtype='int64', name='quarter') + + >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], + ... [2018, 2019]]) + >>> idx + MultiIndex([('python', 2018), + ('python', 2019), + ( 'cobra', 2018), + ( 'cobra', 2019)], + ) + >>> idx = idx.set_names(['kind', 'year']) + >>> idx.set_names('species', level=0) + MultiIndex([('python', 2018), + ('python', 2019), + ( 'cobra', 2018), + ( 'cobra', 2019)], + names=['species', 'year']) + + When renaming levels with a dict, levels can not be passed. + + >>> idx.set_names({'kind': 'snake'}) + MultiIndex([('python', 2018), + ('python', 2019), + ( 'cobra', 2018), + ( 'cobra', 2019)], + names=['snake', 'year']) + """ + if level is not None and not isinstance(self, ABCMultiIndex): + raise ValueError("Level must be None for non-MultiIndex") + + if level is not None and not is_list_like(level) and is_list_like(names): + raise TypeError("Names must be a string when a single level is provided.") + + if not is_list_like(names) and level is None and self.nlevels > 1: + raise TypeError("Must pass list-like as `names`.") + + if is_dict_like(names) and not isinstance(self, ABCMultiIndex): + raise TypeError("Can only pass dict-like as `names` for MultiIndex.") + + if is_dict_like(names) and level is not None: + raise TypeError("Can not pass level for dictlike `names`.") + + if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: + # Transform dict to list of new names and corresponding levels + level, names_adjusted = [], [] + for i, name in enumerate(self.names): + if name in names.keys(): + level.append(i) + names_adjusted.append(names[name]) + names = names_adjusted + + if not is_list_like(names): + names = [names] + if level is not None and not is_list_like(level): + level = [level] + + if inplace: + idx = self + else: + idx = self._view() + + idx._set_names(names, level=level) + if not inplace: + return idx + return None + + def rename(self, name, inplace: bool = False): + """ + Alter Index or MultiIndex name. + + Able to set new names without level. Defaults to returning new index. + Length of names must match number of levels in MultiIndex. + + Parameters + ---------- + name : label or list of labels + Name(s) to set. + inplace : bool, default False + Modifies the object directly, instead of creating a new Index or + MultiIndex. + + Returns + ------- + Index or None + The same type as the caller or None if ``inplace=True``. + + See Also + -------- + Index.set_names : Able to set new names partially and by level. + + Examples + -------- + >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') + >>> idx.rename('grade') + Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') + + >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], + ... [2018, 2019]], + ... names=['kind', 'year']) + >>> idx + MultiIndex([('python', 2018), + ('python', 2019), + ( 'cobra', 2018), + ( 'cobra', 2019)], + names=['kind', 'year']) + >>> idx.rename(['species', 'year']) + MultiIndex([('python', 2018), + ('python', 2019), + ( 'cobra', 2018), + ( 'cobra', 2019)], + names=['species', 'year']) + >>> idx.rename('species') + Traceback (most recent call last): + TypeError: Must pass list-like as `names`. + """ + return self.set_names([name], inplace=inplace) + + # -------------------------------------------------------------------- + # Level-Centric Methods + + @property + def nlevels(self) -> int: + """ + Number of levels. + """ + return 1 + + def _sort_levels_monotonic(self) -> Self: + """ + Compat with MultiIndex. + """ + return self + + @final + def _validate_index_level(self, level) -> None: + """ + Validate index level. + + For single-level Index getting level number is a no-op, but some + verification must be done like in MultiIndex. + + """ + if isinstance(level, int): + if level < 0 and level != -1: + raise IndexError( + "Too many levels: Index has only 1 level, " + f"{level} is not a valid level number" + ) + if level > 0: + raise IndexError( + f"Too many levels: Index has only 1 level, not {level + 1}" + ) + elif level != self.name: + raise KeyError( + f"Requested level ({level}) does not match index name ({self.name})" + ) + + def _get_level_number(self, level) -> int: + self._validate_index_level(level) + return 0 + + def sortlevel( + self, + level=None, + ascending: bool | list[bool] = True, + sort_remaining=None, + na_position: NaPosition = "first", + ): + """ + For internal compatibility with the Index API. + + Sort the Index. This is for compat with MultiIndex + + Parameters + ---------- + ascending : bool, default True + False to sort in descending order + na_position : {'first' or 'last'}, default 'first' + Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at + the end. + + .. versionadded:: 2.1.0 + + level, sort_remaining are compat parameters + + Returns + ------- + Index + """ + if not isinstance(ascending, (list, bool)): + raise TypeError( + "ascending must be a single bool value or" + "a list of bool values of length 1" + ) + + if isinstance(ascending, list): + if len(ascending) != 1: + raise TypeError("ascending must be a list of bool values of length 1") + ascending = ascending[0] + + if not isinstance(ascending, bool): + raise TypeError("ascending must be a bool value") + + return self.sort_values( + return_indexer=True, ascending=ascending, na_position=na_position + ) + + def _get_level_values(self, level) -> Index: + """ + Return an Index of values for requested level. + + This is primarily useful to get an individual level of values from a + MultiIndex, but is provided on Index as well for compatibility. + + Parameters + ---------- + level : int or str + It is either the integer position or the name of the level. + + Returns + ------- + Index + Calling object, as there is only one level in the Index. + + See Also + -------- + MultiIndex.get_level_values : Get values for a level of a MultiIndex. + + Notes + ----- + For Index, level should be 0, since there are no multiple levels. + + Examples + -------- + >>> idx = pd.Index(list('abc')) + >>> idx + Index(['a', 'b', 'c'], dtype='object') + + Get level values by supplying `level` as integer: + + >>> idx.get_level_values(0) + Index(['a', 'b', 'c'], dtype='object') + """ + self._validate_index_level(level) + return self + + get_level_values = _get_level_values + + @final + def droplevel(self, level: IndexLabel = 0): + """ + Return index with requested level(s) removed. + + If resulting index has only 1 level left, the result will be + of Index type, not MultiIndex. The original index is not modified inplace. + + Parameters + ---------- + level : int, str, or list-like, default 0 + If a string is given, must be the name of a level + If list-like, elements must be names or indexes of levels. + + Returns + ------- + Index or MultiIndex + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays( + ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) + >>> mi + MultiIndex([(1, 3, 5), + (2, 4, 6)], + names=['x', 'y', 'z']) + + >>> mi.droplevel() + MultiIndex([(3, 5), + (4, 6)], + names=['y', 'z']) + + >>> mi.droplevel(2) + MultiIndex([(1, 3), + (2, 4)], + names=['x', 'y']) + + >>> mi.droplevel('z') + MultiIndex([(1, 3), + (2, 4)], + names=['x', 'y']) + + >>> mi.droplevel(['x', 'y']) + Index([5, 6], dtype='int64', name='z') + """ + if not isinstance(level, (tuple, list)): + level = [level] + + levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] + + return self._drop_level_numbers(levnums) + + @final + def _drop_level_numbers(self, levnums: list[int]): + """ + Drop MultiIndex levels by level _number_, not name. + """ + + if not levnums and not isinstance(self, ABCMultiIndex): + return self + if len(levnums) >= self.nlevels: + raise ValueError( + f"Cannot remove {len(levnums)} levels from an index with " + f"{self.nlevels} levels: at least one level must be left." + ) + # The two checks above guarantee that here self is a MultiIndex + self = cast("MultiIndex", self) + + new_levels = list(self.levels) + new_codes = list(self.codes) + new_names = list(self.names) + + for i in levnums: + new_levels.pop(i) + new_codes.pop(i) + new_names.pop(i) + + if len(new_levels) == 1: + lev = new_levels[0] + + if len(lev) == 0: + # If lev is empty, lev.take will fail GH#42055 + if len(new_codes[0]) == 0: + # GH#45230 preserve RangeIndex here + # see test_reset_index_empty_rangeindex + result = lev[:0] + else: + res_values = algos.take(lev._values, new_codes[0], allow_fill=True) + # _constructor instead of type(lev) for RangeIndex compat GH#35230 + result = lev._constructor._simple_new(res_values, name=new_names[0]) + else: + # set nan if needed + mask = new_codes[0] == -1 + result = new_levels[0].take(new_codes[0]) + if mask.any(): + result = result.putmask(mask, np.nan) + + result._name = new_names[0] + + return result + else: + from pandas.core.indexes.multi import MultiIndex + + return MultiIndex( + levels=new_levels, + codes=new_codes, + names=new_names, + verify_integrity=False, + ) + + # -------------------------------------------------------------------- + # Introspection Methods + + @cache_readonly + @final + def _can_hold_na(self) -> bool: + if isinstance(self.dtype, ExtensionDtype): + if isinstance(self.dtype, IntervalDtype): + # FIXME(GH#45720): this is inaccurate for integer-backed + # IntervalArray, but without it other.categories.take raises + # in IntervalArray._cmp_method + return True + return self.dtype._can_hold_na + if self.dtype.kind in "iub": + return False + return True + + @property + def is_monotonic_increasing(self) -> bool: + """ + Return a boolean if the values are equal or increasing. + + Returns + ------- + bool + + See Also + -------- + Index.is_monotonic_decreasing : Check if the values are equal or decreasing. + + Examples + -------- + >>> pd.Index([1, 2, 3]).is_monotonic_increasing + True + >>> pd.Index([1, 2, 2]).is_monotonic_increasing + True + >>> pd.Index([1, 3, 2]).is_monotonic_increasing + False + """ + return self._engine.is_monotonic_increasing + + @property + def is_monotonic_decreasing(self) -> bool: + """ + Return a boolean if the values are equal or decreasing. + + Returns + ------- + bool + + See Also + -------- + Index.is_monotonic_increasing : Check if the values are equal or increasing. + + Examples + -------- + >>> pd.Index([3, 2, 1]).is_monotonic_decreasing + True + >>> pd.Index([3, 2, 2]).is_monotonic_decreasing + True + >>> pd.Index([3, 1, 2]).is_monotonic_decreasing + False + """ + return self._engine.is_monotonic_decreasing + + @final + @property + def _is_strictly_monotonic_increasing(self) -> bool: + """ + Return if the index is strictly monotonic increasing + (only increasing) values. + + Examples + -------- + >>> Index([1, 2, 3])._is_strictly_monotonic_increasing + True + >>> Index([1, 2, 2])._is_strictly_monotonic_increasing + False + >>> Index([1, 3, 2])._is_strictly_monotonic_increasing + False + """ + return self.is_unique and self.is_monotonic_increasing + + @final + @property + def _is_strictly_monotonic_decreasing(self) -> bool: + """ + Return if the index is strictly monotonic decreasing + (only decreasing) values. + + Examples + -------- + >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing + True + >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing + False + >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing + False + """ + return self.is_unique and self.is_monotonic_decreasing + + @cache_readonly + def is_unique(self) -> bool: + """ + Return if the index has unique values. + + Returns + ------- + bool + + See Also + -------- + Index.has_duplicates : Inverse method that checks if it has duplicate values. + + Examples + -------- + >>> idx = pd.Index([1, 5, 7, 7]) + >>> idx.is_unique + False + + >>> idx = pd.Index([1, 5, 7]) + >>> idx.is_unique + True + + >>> idx = pd.Index(["Watermelon", "Orange", "Apple", + ... "Watermelon"]).astype("category") + >>> idx.is_unique + False + + >>> idx = pd.Index(["Orange", "Apple", + ... "Watermelon"]).astype("category") + >>> idx.is_unique + True + """ + return self._engine.is_unique + + @final + @property + def has_duplicates(self) -> bool: + """ + Check if the Index has duplicate values. + + Returns + ------- + bool + Whether or not the Index has duplicate values. + + See Also + -------- + Index.is_unique : Inverse method that checks if it has unique values. + + Examples + -------- + >>> idx = pd.Index([1, 5, 7, 7]) + >>> idx.has_duplicates + True + + >>> idx = pd.Index([1, 5, 7]) + >>> idx.has_duplicates + False + + >>> idx = pd.Index(["Watermelon", "Orange", "Apple", + ... "Watermelon"]).astype("category") + >>> idx.has_duplicates + True + + >>> idx = pd.Index(["Orange", "Apple", + ... "Watermelon"]).astype("category") + >>> idx.has_duplicates + False + """ + return not self.is_unique + + @final + def is_boolean(self) -> bool: + """ + Check if the Index only consists of booleans. + + .. deprecated:: 2.0.0 + Use `pandas.api.types.is_bool_dtype` instead. + + Returns + ------- + bool + Whether or not the Index only consists of booleans. + + See Also + -------- + is_integer : Check if the Index only consists of integers (deprecated). + is_floating : Check if the Index is a floating type (deprecated). + is_numeric : Check if the Index only consists of numeric data (deprecated). + is_object : Check if the Index is of the object dtype (deprecated). + is_categorical : Check if the Index holds categorical data. + is_interval : Check if the Index holds Interval objects (deprecated). + + Examples + -------- + >>> idx = pd.Index([True, False, True]) + >>> idx.is_boolean() # doctest: +SKIP + True + + >>> idx = pd.Index(["True", "False", "True"]) + >>> idx.is_boolean() # doctest: +SKIP + False + + >>> idx = pd.Index([True, False, "True"]) + >>> idx.is_boolean() # doctest: +SKIP + False + """ + warnings.warn( + f"{type(self).__name__}.is_boolean is deprecated. " + "Use pandas.api.types.is_bool_type instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.inferred_type in ["boolean"] + + @final + def is_integer(self) -> bool: + """ + Check if the Index only consists of integers. + + .. deprecated:: 2.0.0 + Use `pandas.api.types.is_integer_dtype` instead. + + Returns + ------- + bool + Whether or not the Index only consists of integers. + + See Also + -------- + is_boolean : Check if the Index only consists of booleans (deprecated). + is_floating : Check if the Index is a floating type (deprecated). + is_numeric : Check if the Index only consists of numeric data (deprecated). + is_object : Check if the Index is of the object dtype. (deprecated). + is_categorical : Check if the Index holds categorical data (deprecated). + is_interval : Check if the Index holds Interval objects (deprecated). + + Examples + -------- + >>> idx = pd.Index([1, 2, 3, 4]) + >>> idx.is_integer() # doctest: +SKIP + True + + >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) + >>> idx.is_integer() # doctest: +SKIP + False + + >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) + >>> idx.is_integer() # doctest: +SKIP + False + """ + warnings.warn( + f"{type(self).__name__}.is_integer is deprecated. " + "Use pandas.api.types.is_integer_dtype instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.inferred_type in ["integer"] + + @final + def is_floating(self) -> bool: + """ + Check if the Index is a floating type. + + .. deprecated:: 2.0.0 + Use `pandas.api.types.is_float_dtype` instead + + The Index may consist of only floats, NaNs, or a mix of floats, + integers, or NaNs. + + Returns + ------- + bool + Whether or not the Index only consists of only consists of floats, NaNs, or + a mix of floats, integers, or NaNs. + + See Also + -------- + is_boolean : Check if the Index only consists of booleans (deprecated). + is_integer : Check if the Index only consists of integers (deprecated). + is_numeric : Check if the Index only consists of numeric data (deprecated). + is_object : Check if the Index is of the object dtype. (deprecated). + is_categorical : Check if the Index holds categorical data (deprecated). + is_interval : Check if the Index holds Interval objects (deprecated). + + Examples + -------- + >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) + >>> idx.is_floating() # doctest: +SKIP + True + + >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) + >>> idx.is_floating() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 2, 3, 4, np.nan]) + >>> idx.is_floating() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 2, 3, 4]) + >>> idx.is_floating() # doctest: +SKIP + False + """ + warnings.warn( + f"{type(self).__name__}.is_floating is deprecated. " + "Use pandas.api.types.is_float_dtype instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] + + @final + def is_numeric(self) -> bool: + """ + Check if the Index only consists of numeric data. + + .. deprecated:: 2.0.0 + Use `pandas.api.types.is_numeric_dtype` instead. + + Returns + ------- + bool + Whether or not the Index only consists of numeric data. + + See Also + -------- + is_boolean : Check if the Index only consists of booleans (deprecated). + is_integer : Check if the Index only consists of integers (deprecated). + is_floating : Check if the Index is a floating type (deprecated). + is_object : Check if the Index is of the object dtype. (deprecated). + is_categorical : Check if the Index holds categorical data (deprecated). + is_interval : Check if the Index holds Interval objects (deprecated). + + Examples + -------- + >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) + >>> idx.is_numeric() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 2, 3, 4.0]) + >>> idx.is_numeric() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 2, 3, 4]) + >>> idx.is_numeric() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) + >>> idx.is_numeric() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) + >>> idx.is_numeric() # doctest: +SKIP + False + """ + warnings.warn( + f"{type(self).__name__}.is_numeric is deprecated. " + "Use pandas.api.types.is_any_real_numeric_dtype instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.inferred_type in ["integer", "floating"] + + @final + def is_object(self) -> bool: + """ + Check if the Index is of the object dtype. + + .. deprecated:: 2.0.0 + Use `pandas.api.types.is_object_dtype` instead. + + Returns + ------- + bool + Whether or not the Index is of the object dtype. + + See Also + -------- + is_boolean : Check if the Index only consists of booleans (deprecated). + is_integer : Check if the Index only consists of integers (deprecated). + is_floating : Check if the Index is a floating type (deprecated). + is_numeric : Check if the Index only consists of numeric data (deprecated). + is_categorical : Check if the Index holds categorical data (deprecated). + is_interval : Check if the Index holds Interval objects (deprecated). + + Examples + -------- + >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) + >>> idx.is_object() # doctest: +SKIP + True + + >>> idx = pd.Index(["Apple", "Mango", 2.0]) + >>> idx.is_object() # doctest: +SKIP + True + + >>> idx = pd.Index(["Watermelon", "Orange", "Apple", + ... "Watermelon"]).astype("category") + >>> idx.is_object() # doctest: +SKIP + False + + >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) + >>> idx.is_object() # doctest: +SKIP + False + """ + warnings.warn( + f"{type(self).__name__}.is_object is deprecated." + "Use pandas.api.types.is_object_dtype instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return is_object_dtype(self.dtype) + + @final + def is_categorical(self) -> bool: + """ + Check if the Index holds categorical data. + + .. deprecated:: 2.0.0 + Use `isinstance(index.dtype, pd.CategoricalDtype)` instead. + + Returns + ------- + bool + True if the Index is categorical. + + See Also + -------- + CategoricalIndex : Index for categorical data. + is_boolean : Check if the Index only consists of booleans (deprecated). + is_integer : Check if the Index only consists of integers (deprecated). + is_floating : Check if the Index is a floating type (deprecated). + is_numeric : Check if the Index only consists of numeric data (deprecated). + is_object : Check if the Index is of the object dtype. (deprecated). + is_interval : Check if the Index holds Interval objects (deprecated). + + Examples + -------- + >>> idx = pd.Index(["Watermelon", "Orange", "Apple", + ... "Watermelon"]).astype("category") + >>> idx.is_categorical() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 3, 5, 7]) + >>> idx.is_categorical() # doctest: +SKIP + False + + >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) + >>> s + 0 Peter + 1 Victor + 2 Elisabeth + 3 Mar + dtype: object + >>> s.index.is_categorical() # doctest: +SKIP + False + """ + warnings.warn( + f"{type(self).__name__}.is_categorical is deprecated." + "Use pandas.api.types.is_categorical_dtype instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return self.inferred_type in ["categorical"] + + @final + def is_interval(self) -> bool: + """ + Check if the Index holds Interval objects. + + .. deprecated:: 2.0.0 + Use `isinstance(index.dtype, pd.IntervalDtype)` instead. + + Returns + ------- + bool + Whether or not the Index holds Interval objects. + + See Also + -------- + IntervalIndex : Index for Interval objects. + is_boolean : Check if the Index only consists of booleans (deprecated). + is_integer : Check if the Index only consists of integers (deprecated). + is_floating : Check if the Index is a floating type (deprecated). + is_numeric : Check if the Index only consists of numeric data (deprecated). + is_object : Check if the Index is of the object dtype. (deprecated). + is_categorical : Check if the Index holds categorical data (deprecated). + + Examples + -------- + >>> idx = pd.Index([pd.Interval(left=0, right=5), + ... pd.Interval(left=5, right=10)]) + >>> idx.is_interval() # doctest: +SKIP + True + + >>> idx = pd.Index([1, 3, 5, 7]) + >>> idx.is_interval() # doctest: +SKIP + False + """ + warnings.warn( + f"{type(self).__name__}.is_interval is deprecated." + "Use pandas.api.types.is_interval_dtype instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.inferred_type in ["interval"] + + @final + def _holds_integer(self) -> bool: + """ + Whether the type is an integer type. + """ + return self.inferred_type in ["integer", "mixed-integer"] + + @final + def holds_integer(self) -> bool: + """ + Whether the type is an integer type. + + .. deprecated:: 2.0.0 + Use `pandas.api.types.infer_dtype` instead + """ + warnings.warn( + f"{type(self).__name__}.holds_integer is deprecated. " + "Use pandas.api.types.infer_dtype instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._holds_integer() + + @cache_readonly + def inferred_type(self) -> str_t: + """ + Return a string of the type inferred from the values. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.inferred_type + 'integer' + """ + return lib.infer_dtype(self._values, skipna=False) + + @cache_readonly + @final + def _is_all_dates(self) -> bool: + """ + Whether or not the index values only consist of dates. + """ + if needs_i8_conversion(self.dtype): + return True + elif self.dtype != _dtype_obj: + # TODO(ExtensionIndex): 3rd party EA might override? + # Note: this includes IntervalIndex, even when the left/right + # contain datetime-like objects. + return False + elif self._is_multi: + return False + return is_datetime_array(ensure_object(self._values)) + + @final + @cache_readonly + def _is_multi(self) -> bool: + """ + Cached check equivalent to isinstance(self, MultiIndex) + """ + return isinstance(self, ABCMultiIndex) + + # -------------------------------------------------------------------- + # Pickle Methods + + def __reduce__(self): + d = {"data": self._data, "name": self.name} + return _new_Index, (type(self), d), None + + # -------------------------------------------------------------------- + # Null Handling Methods + + @cache_readonly + def _na_value(self): + """The expected NA value to use with this index.""" + dtype = self.dtype + if isinstance(dtype, np.dtype): + if dtype.kind in "mM": + return NaT + return np.nan + return dtype.na_value + + @cache_readonly + def _isnan(self) -> npt.NDArray[np.bool_]: + """ + Return if each value is NaN. + """ + if self._can_hold_na: + return isna(self) + else: + # shouldn't reach to this condition by checking hasnans beforehand + values = np.empty(len(self), dtype=np.bool_) + values.fill(False) + return values + + @cache_readonly + def hasnans(self) -> bool: + """ + Return True if there are any NaNs. + + Enables various performance speedups. + + Returns + ------- + bool + + Examples + -------- + >>> s = pd.Series([1, 2, 3], index=['a', 'b', None]) + >>> s + a 1 + b 2 + None 3 + dtype: int64 + >>> s.index.hasnans + True + """ + if self._can_hold_na: + return bool(self._isnan.any()) + else: + return False + + @final + def isna(self) -> npt.NDArray[np.bool_]: + """ + Detect missing values. + + Return a boolean same-sized object indicating if the values are NA. + NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get + mapped to ``True`` values. + Everything else get mapped to ``False`` values. Characters such as + empty strings `''` or :attr:`numpy.inf` are not considered NA values. + + Returns + ------- + numpy.ndarray[bool] + A boolean array of whether my values are NA. + + See Also + -------- + Index.notna : Boolean inverse of isna. + Index.dropna : Omit entries with missing values. + isna : Top-level isna. + Series.isna : Detect missing values in Series object. + + Examples + -------- + Show which entries in a pandas.Index are NA. The result is an + array. + + >>> idx = pd.Index([5.2, 6.0, np.nan]) + >>> idx + Index([5.2, 6.0, nan], dtype='float64') + >>> idx.isna() + array([False, False, True]) + + Empty strings are not considered NA values. None is considered an NA + value. + + >>> idx = pd.Index(['black', '', 'red', None]) + >>> idx + Index(['black', '', 'red', None], dtype='object') + >>> idx.isna() + array([False, False, False, True]) + + For datetimes, `NaT` (Not a Time) is considered as an NA value. + + >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), + ... pd.Timestamp(''), None, pd.NaT]) + >>> idx + DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], + dtype='datetime64[ns]', freq=None) + >>> idx.isna() + array([False, True, True, True]) + """ + return self._isnan + + isnull = isna + + @final + def notna(self) -> npt.NDArray[np.bool_]: + """ + Detect existing (non-missing) values. + + Return a boolean same-sized object indicating if the values are not NA. + Non-missing values get mapped to ``True``. Characters such as empty + strings ``''`` or :attr:`numpy.inf` are not considered NA values. + NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` + values. + + Returns + ------- + numpy.ndarray[bool] + Boolean array to indicate which entries are not NA. + + See Also + -------- + Index.notnull : Alias of notna. + Index.isna: Inverse of notna. + notna : Top-level notna. + + Examples + -------- + Show which entries in an Index are not NA. The result is an + array. + + >>> idx = pd.Index([5.2, 6.0, np.nan]) + >>> idx + Index([5.2, 6.0, nan], dtype='float64') + >>> idx.notna() + array([ True, True, False]) + + Empty strings are not considered NA values. None is considered a NA + value. + + >>> idx = pd.Index(['black', '', 'red', None]) + >>> idx + Index(['black', '', 'red', None], dtype='object') + >>> idx.notna() + array([ True, True, True, False]) + """ + return ~self.isna() + + notnull = notna + + def fillna(self, value=None, downcast=lib.no_default): + """ + Fill NA/NaN values with the specified value. + + Parameters + ---------- + value : scalar + Scalar value to use to fill holes (e.g. 0). + This value cannot be a list-likes. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + .. deprecated:: 2.1.0 + + Returns + ------- + Index + + See Also + -------- + DataFrame.fillna : Fill NaN values of a DataFrame. + Series.fillna : Fill NaN Values of a Series. + + Examples + -------- + >>> idx = pd.Index([np.nan, np.nan, 3]) + >>> idx.fillna(0) + Index([0.0, 0.0, 3.0], dtype='float64') + """ + if not is_scalar(value): + raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") + if downcast is not lib.no_default: + warnings.warn( + f"The 'downcast' keyword in {type(self).__name__}.fillna is " + "deprecated and will be removed in a future version. " + "It was previously silently ignored.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + downcast = None + + if self.hasnans: + result = self.putmask(self._isnan, value) + if downcast is None: + # no need to care metadata other than name + # because it can't have freq if it has NaTs + # _with_infer needed for test_fillna_categorical + return Index._with_infer(result, name=self.name) + raise NotImplementedError( + f"{type(self).__name__}.fillna does not support 'downcast' " + "argument values other than 'None'." + ) + return self._view() + + def dropna(self, how: AnyAll = "any") -> Self: + """ + Return Index without NA/NaN values. + + Parameters + ---------- + how : {'any', 'all'}, default 'any' + If the Index is a MultiIndex, drop the value when any or all levels + are NaN. + + Returns + ------- + Index + + Examples + -------- + >>> idx = pd.Index([1, np.nan, 3]) + >>> idx.dropna() + Index([1.0, 3.0], dtype='float64') + """ + if how not in ("any", "all"): + raise ValueError(f"invalid how option: {how}") + + if self.hasnans: + res_values = self._values[~self._isnan] + return type(self)._simple_new(res_values, name=self.name) + return self._view() + + # -------------------------------------------------------------------- + # Uniqueness Methods + + def unique(self, level: Hashable | None = None) -> Self: + """ + Return unique values in the index. + + Unique values are returned in order of appearance, this does NOT sort. + + Parameters + ---------- + level : int or hashable, optional + Only return values from specified level (for MultiIndex). + If int, gets the level by integer position, else by level name. + + Returns + ------- + Index + + See Also + -------- + unique : Numpy array of unique values in that column. + Series.unique : Return unique values of Series object. + + Examples + -------- + >>> idx = pd.Index([1, 1, 2, 3, 3]) + >>> idx.unique() + Index([1, 2, 3], dtype='int64') + """ + if level is not None: + self._validate_index_level(level) + + if self.is_unique: + return self._view() + + result = super().unique() + return self._shallow_copy(result) + + def drop_duplicates(self, *, keep: DropKeep = "first") -> Self: + """ + Return Index with duplicate values removed. + + Parameters + ---------- + keep : {'first', 'last', ``False``}, default 'first' + - 'first' : Drop duplicates except for the first occurrence. + - 'last' : Drop duplicates except for the last occurrence. + - ``False`` : Drop all duplicates. + + Returns + ------- + Index + + See Also + -------- + Series.drop_duplicates : Equivalent method on Series. + DataFrame.drop_duplicates : Equivalent method on DataFrame. + Index.duplicated : Related method on Index, indicating duplicate + Index values. + + Examples + -------- + Generate an pandas.Index with duplicate values. + + >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) + + The `keep` parameter controls which duplicate values are removed. + The value 'first' keeps the first occurrence for each + set of duplicated entries. The default value of keep is 'first'. + + >>> idx.drop_duplicates(keep='first') + Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') + + The value 'last' keeps the last occurrence for each set of duplicated + entries. + + >>> idx.drop_duplicates(keep='last') + Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') + + The value ``False`` discards all sets of duplicated entries. + + >>> idx.drop_duplicates(keep=False) + Index(['cow', 'beetle', 'hippo'], dtype='object') + """ + if self.is_unique: + return self._view() + + return super().drop_duplicates(keep=keep) + + def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: + """ + Indicate duplicate index values. + + Duplicated values are indicated as ``True`` values in the resulting + array. Either all duplicates, all except the first, or all except the + last occurrence of duplicates can be indicated. + + Parameters + ---------- + keep : {'first', 'last', False}, default 'first' + The value or values in a set of duplicates to mark as missing. + + - 'first' : Mark duplicates as ``True`` except for the first + occurrence. + - 'last' : Mark duplicates as ``True`` except for the last + occurrence. + - ``False`` : Mark all duplicates as ``True``. + + Returns + ------- + np.ndarray[bool] + + See Also + -------- + Series.duplicated : Equivalent method on pandas.Series. + DataFrame.duplicated : Equivalent method on pandas.DataFrame. + Index.drop_duplicates : Remove duplicate values from Index. + + Examples + -------- + By default, for each set of duplicated values, the first occurrence is + set to False and all others to True: + + >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) + >>> idx.duplicated() + array([False, False, True, False, True]) + + which is equivalent to + + >>> idx.duplicated(keep='first') + array([False, False, True, False, True]) + + By using 'last', the last occurrence of each set of duplicated values + is set on False and all others on True: + + >>> idx.duplicated(keep='last') + array([ True, False, True, False, False]) + + By setting keep on ``False``, all duplicates are True: + + >>> idx.duplicated(keep=False) + array([ True, False, True, False, True]) + """ + if self.is_unique: + # fastpath available bc we are immutable + return np.zeros(len(self), dtype=bool) + return self._duplicated(keep=keep) + + # -------------------------------------------------------------------- + # Arithmetic & Logical Methods + + def __iadd__(self, other): + # alias for __add__ + return self + other + + @final + def __nonzero__(self) -> NoReturn: + raise ValueError( + f"The truth value of a {type(self).__name__} is ambiguous. " + "Use a.empty, a.bool(), a.item(), a.any() or a.all()." + ) + + __bool__ = __nonzero__ + + # -------------------------------------------------------------------- + # Set Operation Methods + + def _get_reconciled_name_object(self, other): + """ + If the result of a set operation will be self, + return self, unless the name changes, in which + case make a shallow copy of self. + """ + name = get_op_result_name(self, other) + if self.name is not name: + return self.rename(name) + return self + + @final + def _validate_sort_keyword(self, sort): + if sort not in [None, False, True]: + raise ValueError( + "The 'sort' keyword only takes the values of " + f"None, True, or False; {sort} was passed." + ) + + @final + def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: + """ + With mismatched timezones, cast both to UTC. + """ + # Caller is responsibelf or checking + # `self.dtype != other.dtype` + if ( + isinstance(self, ABCDatetimeIndex) + and isinstance(other, ABCDatetimeIndex) + and self.tz is not None + and other.tz is not None + ): + # GH#39328, GH#45357 + left = self.tz_convert("UTC") + right = other.tz_convert("UTC") + return left, right + return self, other + + @final + def union(self, other, sort=None): + """ + Form the union of two Index objects. + + If the Index objects are incompatible, both Index objects will be + cast to dtype('object') first. + + Parameters + ---------- + other : Index or array-like + sort : bool or None, default None + Whether to sort the resulting Index. + + * None : Sort the result, except when + + 1. `self` and `other` are equal. + 2. `self` or `other` has length 0. + 3. Some values in `self` or `other` cannot be compared. + A RuntimeWarning is issued in this case. + + * False : do not sort the result. + * True : Sort the result (which may raise TypeError). + + Returns + ------- + Index + + Examples + -------- + Union matching dtypes + + >>> idx1 = pd.Index([1, 2, 3, 4]) + >>> idx2 = pd.Index([3, 4, 5, 6]) + >>> idx1.union(idx2) + Index([1, 2, 3, 4, 5, 6], dtype='int64') + + Union mismatched dtypes + + >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) + >>> idx2 = pd.Index([1, 2, 3, 4]) + >>> idx1.union(idx2) + Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') + + MultiIndex case + + >>> idx1 = pd.MultiIndex.from_arrays( + ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] + ... ) + >>> idx1 + MultiIndex([(1, 'Red'), + (1, 'Blue'), + (2, 'Red'), + (2, 'Blue')], + ) + >>> idx2 = pd.MultiIndex.from_arrays( + ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] + ... ) + >>> idx2 + MultiIndex([(3, 'Red'), + (3, 'Green'), + (2, 'Red'), + (2, 'Green')], + ) + >>> idx1.union(idx2) + MultiIndex([(1, 'Blue'), + (1, 'Red'), + (2, 'Blue'), + (2, 'Green'), + (2, 'Red'), + (3, 'Green'), + (3, 'Red')], + ) + >>> idx1.union(idx2, sort=False) + MultiIndex([(1, 'Red'), + (1, 'Blue'), + (2, 'Red'), + (2, 'Blue'), + (3, 'Red'), + (3, 'Green'), + (2, 'Green')], + ) + """ + self._validate_sort_keyword(sort) + self._assert_can_do_setop(other) + other, result_name = self._convert_can_do_setop(other) + + if self.dtype != other.dtype: + if ( + isinstance(self, ABCMultiIndex) + and not is_object_dtype(_unpack_nested_dtype(other)) + and len(other) > 0 + ): + raise NotImplementedError( + "Can only union MultiIndex with MultiIndex or Index of tuples, " + "try mi.to_flat_index().union(other) instead." + ) + self, other = self._dti_setop_align_tzs(other, "union") + + dtype = self._find_common_type_compat(other) + left = self.astype(dtype, copy=False) + right = other.astype(dtype, copy=False) + return left.union(right, sort=sort) + + elif not len(other) or self.equals(other): + # NB: whether this (and the `if not len(self)` check below) come before + # or after the dtype equality check above affects the returned dtype + result = self._get_reconciled_name_object(other) + if sort is True: + return result.sort_values() + return result + + elif not len(self): + result = other._get_reconciled_name_object(self) + if sort is True: + return result.sort_values() + return result + + result = self._union(other, sort=sort) + + return self._wrap_setop_result(other, result) + + def _union(self, other: Index, sort: bool | None): + """ + Specific union logic should go here. In subclasses, union behavior + should be overwritten here rather than in `self.union`. + + Parameters + ---------- + other : Index or array-like + sort : False or None, default False + Whether to sort the resulting index. + + * True : sort the result + * False : do not sort the result. + * None : sort the result, except when `self` and `other` are equal + or when the values cannot be compared. + + Returns + ------- + Index + """ + lvals = self._values + rvals = other._values + + if ( + sort in (None, True) + and self.is_monotonic_increasing + and other.is_monotonic_increasing + and not (self.has_duplicates and other.has_duplicates) + and self._can_use_libjoin + ): + # Both are monotonic and at least one is unique, so can use outer join + # (actually don't need either unique, but without this restriction + # test_union_same_value_duplicated_in_both fails) + try: + return self._outer_indexer(other)[0] + except (TypeError, IncompatibleFrequency): + # incomparable objects; should only be for object dtype + value_list = list(lvals) + + # worth making this faster? a very unusual case + value_set = set(lvals) + value_list.extend([x for x in rvals if x not in value_set]) + # If objects are unorderable, we must have object dtype. + return np.array(value_list, dtype=object) + + elif not other.is_unique: + # other has duplicates + result_dups = algos.union_with_duplicates(self, other) + return _maybe_try_sort(result_dups, sort) + + # The rest of this method is analogous to Index._intersection_via_get_indexer + + # Self may have duplicates; other already checked as unique + # find indexes of things in "other" that are not in "self" + if self._index_as_unique: + indexer = self.get_indexer(other) + missing = (indexer == -1).nonzero()[0] + else: + missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) + + result: Index | MultiIndex | ArrayLike + if self._is_multi: + # Preserve MultiIndex to avoid losing dtypes + result = self.append(other.take(missing)) + + else: + if len(missing) > 0: + other_diff = rvals.take(missing) + result = concat_compat((lvals, other_diff)) + else: + result = lvals + + if not self.is_monotonic_increasing or not other.is_monotonic_increasing: + # if both are monotonic then result should already be sorted + result = _maybe_try_sort(result, sort) + + return result + + @final + def _wrap_setop_result(self, other: Index, result) -> Index: + name = get_op_result_name(self, other) + if isinstance(result, Index): + if result.name != name: + result = result.rename(name) + else: + result = self._shallow_copy(result, name=name) + return result + + @final + def intersection(self, other, sort: bool = False): + # default sort keyword is different here from other setops intentionally + # done in GH#25063 + """ + Form the intersection of two Index objects. + + This returns a new Index with elements common to the index and `other`. + + Parameters + ---------- + other : Index or array-like + sort : True, False or None, default False + Whether to sort the resulting index. + + * None : sort the result, except when `self` and `other` are equal + or when the values cannot be compared. + * False : do not sort the result. + * True : Sort the result (which may raise TypeError). + + Returns + ------- + Index + + Examples + -------- + >>> idx1 = pd.Index([1, 2, 3, 4]) + >>> idx2 = pd.Index([3, 4, 5, 6]) + >>> idx1.intersection(idx2) + Index([3, 4], dtype='int64') + """ + self._validate_sort_keyword(sort) + self._assert_can_do_setop(other) + other, result_name = self._convert_can_do_setop(other) + + if self.dtype != other.dtype: + self, other = self._dti_setop_align_tzs(other, "intersection") + + if self.equals(other): + if self.has_duplicates: + result = self.unique()._get_reconciled_name_object(other) + else: + result = self._get_reconciled_name_object(other) + if sort is True: + result = result.sort_values() + return result + + if len(self) == 0 or len(other) == 0: + # fastpath; we need to be careful about having commutativity + + if self._is_multi or other._is_multi: + # _convert_can_do_setop ensures that we have both or neither + # We retain self.levels + return self[:0].rename(result_name) + + dtype = self._find_common_type_compat(other) + if self.dtype == dtype: + # Slicing allows us to retain DTI/TDI.freq, RangeIndex + + # Note: self[:0] vs other[:0] affects + # 1) which index's `freq` we get in DTI/TDI cases + # This may be a historical artifact, i.e. no documented + # reason for this choice. + # 2) The `step` we get in RangeIndex cases + if len(self) == 0: + return self[:0].rename(result_name) + else: + return other[:0].rename(result_name) + + return Index([], dtype=dtype, name=result_name) + + elif not self._should_compare(other): + # We can infer that the intersection is empty. + if isinstance(self, ABCMultiIndex): + return self[:0].rename(result_name) + return Index([], name=result_name) + + elif self.dtype != other.dtype: + dtype = self._find_common_type_compat(other) + this = self.astype(dtype, copy=False) + other = other.astype(dtype, copy=False) + return this.intersection(other, sort=sort) + + result = self._intersection(other, sort=sort) + return self._wrap_intersection_result(other, result) + + def _intersection(self, other: Index, sort: bool = False): + """ + intersection specialized to the case with matching dtypes. + """ + if ( + self.is_monotonic_increasing + and other.is_monotonic_increasing + and self._can_use_libjoin + and not isinstance(self, ABCMultiIndex) + ): + try: + res_indexer, indexer, _ = self._inner_indexer(other) + except TypeError: + # non-comparable; should only be for object dtype + pass + else: + # TODO: algos.unique1d should preserve DTA/TDA + if is_numeric_dtype(self): + # This is faster, because Index.unique() checks for uniqueness + # before calculating the unique values. + res = algos.unique1d(res_indexer) + else: + result = self.take(indexer) + res = result.drop_duplicates() + return ensure_wrapped_if_datetimelike(res) + + res_values = self._intersection_via_get_indexer(other, sort=sort) + res_values = _maybe_try_sort(res_values, sort) + return res_values + + def _wrap_intersection_result(self, other, result): + # We will override for MultiIndex to handle empty results + return self._wrap_setop_result(other, result) + + @final + def _intersection_via_get_indexer( + self, other: Index | MultiIndex, sort + ) -> ArrayLike | MultiIndex: + """ + Find the intersection of two Indexes using get_indexer. + + Returns + ------- + np.ndarray or ExtensionArray + The returned array will be unique. + """ + left_unique = self.unique() + right_unique = other.unique() + + # even though we are unique, we need get_indexer_for for IntervalIndex + indexer = left_unique.get_indexer_for(right_unique) + + mask = indexer != -1 + + taker = indexer.take(mask.nonzero()[0]) + if sort is False: + # sort bc we want the elements in the same order they are in self + # unnecessary in the case with sort=None bc we will sort later + taker = np.sort(taker) + + if isinstance(left_unique, ABCMultiIndex): + result = left_unique.take(taker) + else: + result = left_unique.take(taker)._values + return result + + @final + def difference(self, other, sort=None): + """ + Return a new Index with elements of index not in `other`. + + This is the set difference of two Index objects. + + Parameters + ---------- + other : Index or array-like + sort : bool or None, default None + Whether to sort the resulting index. By default, the + values are attempted to be sorted, but any TypeError from + incomparable elements is caught by pandas. + + * None : Attempt to sort the result, but catch any TypeErrors + from comparing incomparable elements. + * False : Do not sort the result. + * True : Sort the result (which may raise TypeError). + + Returns + ------- + Index + + Examples + -------- + >>> idx1 = pd.Index([2, 1, 3, 4]) + >>> idx2 = pd.Index([3, 4, 5, 6]) + >>> idx1.difference(idx2) + Index([1, 2], dtype='int64') + >>> idx1.difference(idx2, sort=False) + Index([2, 1], dtype='int64') + """ + self._validate_sort_keyword(sort) + self._assert_can_do_setop(other) + other, result_name = self._convert_can_do_setop(other) + + # Note: we do NOT call _dti_setop_align_tzs here, as there + # is no requirement that .difference be commutative, so it does + # not cast to object. + + if self.equals(other): + # Note: we do not (yet) sort even if sort=None GH#24959 + return self[:0].rename(result_name) + + if len(other) == 0: + # Note: we do not (yet) sort even if sort=None GH#24959 + result = self.rename(result_name) + if sort is True: + return result.sort_values() + return result + + if not self._should_compare(other): + # Nothing matches -> difference is everything + result = self.rename(result_name) + if sort is True: + return result.sort_values() + return result + + result = self._difference(other, sort=sort) + return self._wrap_difference_result(other, result) + + def _difference(self, other, sort): + # overridden by RangeIndex + + this = self.unique() + + indexer = this.get_indexer_for(other) + indexer = indexer.take((indexer != -1).nonzero()[0]) + + label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) + + the_diff: MultiIndex | ArrayLike + if isinstance(this, ABCMultiIndex): + the_diff = this.take(label_diff) + else: + the_diff = this._values.take(label_diff) + the_diff = _maybe_try_sort(the_diff, sort) + + return the_diff + + def _wrap_difference_result(self, other, result): + # We will override for MultiIndex to handle empty results + return self._wrap_setop_result(other, result) + + def symmetric_difference(self, other, result_name=None, sort=None): + """ + Compute the symmetric difference of two Index objects. + + Parameters + ---------- + other : Index or array-like + result_name : str + sort : bool or None, default None + Whether to sort the resulting index. By default, the + values are attempted to be sorted, but any TypeError from + incomparable elements is caught by pandas. + + * None : Attempt to sort the result, but catch any TypeErrors + from comparing incomparable elements. + * False : Do not sort the result. + * True : Sort the result (which may raise TypeError). + + Returns + ------- + Index + + Notes + ----- + ``symmetric_difference`` contains elements that appear in either + ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by + ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates + dropped. + + Examples + -------- + >>> idx1 = pd.Index([1, 2, 3, 4]) + >>> idx2 = pd.Index([2, 3, 4, 5]) + >>> idx1.symmetric_difference(idx2) + Index([1, 5], dtype='int64') + """ + self._validate_sort_keyword(sort) + self._assert_can_do_setop(other) + other, result_name_update = self._convert_can_do_setop(other) + if result_name is None: + result_name = result_name_update + + if self.dtype != other.dtype: + self, other = self._dti_setop_align_tzs(other, "symmetric_difference") + + if not self._should_compare(other): + return self.union(other, sort=sort).rename(result_name) + + elif self.dtype != other.dtype: + dtype = self._find_common_type_compat(other) + this = self.astype(dtype, copy=False) + that = other.astype(dtype, copy=False) + return this.symmetric_difference(that, sort=sort).rename(result_name) + + this = self.unique() + other = other.unique() + indexer = this.get_indexer_for(other) + + # {this} minus {other} + common_indexer = indexer.take((indexer != -1).nonzero()[0]) + left_indexer = np.setdiff1d( + np.arange(this.size), common_indexer, assume_unique=True + ) + left_diff = this.take(left_indexer) + + # {other} minus {this} + right_indexer = (indexer == -1).nonzero()[0] + right_diff = other.take(right_indexer) + + res_values = left_diff.append(right_diff) + result = _maybe_try_sort(res_values, sort) + + if not self._is_multi: + return Index(result, name=result_name, dtype=res_values.dtype) + else: + left_diff = cast("MultiIndex", left_diff) + if len(result) == 0: + # result might be an Index, if other was an Index + return left_diff.remove_unused_levels().set_names(result_name) + return result.set_names(result_name) + + @final + def _assert_can_do_setop(self, other) -> bool: + if not is_list_like(other): + raise TypeError("Input must be Index or array-like") + return True + + def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: + if not isinstance(other, Index): + other = Index(other, name=self.name) + result_name = self.name + else: + result_name = get_op_result_name(self, other) + return other, result_name + + # -------------------------------------------------------------------- + # Indexing Methods + + def get_loc(self, key): + """ + Get integer location, slice or boolean mask for requested label. + + Parameters + ---------- + key : label + + Returns + ------- + int if unique index, slice if monotonic index, else mask + + Examples + -------- + >>> unique_index = pd.Index(list('abc')) + >>> unique_index.get_loc('b') + 1 + + >>> monotonic_index = pd.Index(list('abbc')) + >>> monotonic_index.get_loc('b') + slice(1, 3, None) + + >>> non_monotonic_index = pd.Index(list('abcb')) + >>> non_monotonic_index.get_loc('b') + array([False, True, False, True]) + """ + casted_key = self._maybe_cast_indexer(key) + try: + return self._engine.get_loc(casted_key) + except KeyError as err: + if isinstance(casted_key, slice) or ( + isinstance(casted_key, abc.Iterable) + and any(isinstance(x, slice) for x in casted_key) + ): + raise InvalidIndexError(key) + raise KeyError(key) from err + except TypeError: + # If we have a listlike key, _check_indexing_error will raise + # InvalidIndexError. Otherwise we fall through and re-raise + # the TypeError. + self._check_indexing_error(key) + raise + + _index_shared_docs[ + "get_indexer" + ] = """ + Compute indexer and mask for new index given the current index. + + The indexer should be then used as an input to ndarray.take to align the + current data to the new index. + + Parameters + ---------- + target : %(target_klass)s + method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional + * default: exact matches only. + * pad / ffill: find the PREVIOUS index value if no exact match. + * backfill / bfill: use NEXT index value if no exact match + * nearest: use the NEAREST index value if no exact match. Tied + distances are broken by preferring the larger index value. + limit : int, optional + Maximum number of consecutive labels in ``target`` to match for + inexact matches. + tolerance : optional + Maximum distance between original and new labels for inexact + matches. The values of the index at the matching locations must + satisfy the equation ``abs(index[indexer] - target) <= tolerance``. + + Tolerance may be a scalar value, which applies the same tolerance + to all values, or list-like, which applies variable tolerance per + element. List-like includes list, tuple, array, Series, and must be + the same size as the index and its dtype must exactly match the + index's type. + + Returns + ------- + np.ndarray[np.intp] + Integers from 0 to n - 1 indicating that the index at these + positions matches the corresponding target values. Missing values + in the target are marked by -1. + %(raises_section)s + Notes + ----- + Returns -1 for unmatched values, for further explanation see the + example below. + + Examples + -------- + >>> index = pd.Index(['c', 'a', 'b']) + >>> index.get_indexer(['a', 'b', 'x']) + array([ 1, 2, -1]) + + Notice that the return value is an array of locations in ``index`` + and ``x`` is marked by -1, as it is not in ``index``. + """ + + @Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs) + @final + def get_indexer( + self, + target, + method: ReindexMethod | None = None, + limit: int | None = None, + tolerance=None, + ) -> npt.NDArray[np.intp]: + method = clean_reindex_fill_method(method) + orig_target = target + target = self._maybe_cast_listlike_indexer(target) + + self._check_indexing_method(method, limit, tolerance) + + if not self._index_as_unique: + raise InvalidIndexError(self._requires_unique_msg) + + if len(target) == 0: + return np.array([], dtype=np.intp) + + if not self._should_compare(target) and not self._should_partial_index(target): + # IntervalIndex get special treatment bc numeric scalars can be + # matched to Interval scalars + return self._get_indexer_non_comparable(target, method=method, unique=True) + + if isinstance(self.dtype, CategoricalDtype): + # _maybe_cast_listlike_indexer ensures target has our dtype + # (could improve perf by doing _should_compare check earlier?) + assert self.dtype == target.dtype + + indexer = self._engine.get_indexer(target.codes) + if self.hasnans and target.hasnans: + # After _maybe_cast_listlike_indexer, target elements which do not + # belong to some category are changed to NaNs + # Mask to track actual NaN values compared to inserted NaN values + # GH#45361 + target_nans = isna(orig_target) + loc = self.get_loc(np.nan) + mask = target.isna() + indexer[target_nans] = loc + indexer[mask & ~target_nans] = -1 + return indexer + + if isinstance(target.dtype, CategoricalDtype): + # potential fastpath + # get an indexer for unique categories then propagate to codes via take_nd + # get_indexer instead of _get_indexer needed for MultiIndex cases + # e.g. test_append_different_columns_types + categories_indexer = self.get_indexer(target.categories) + + indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) + + if (not self._is_multi and self.hasnans) and target.hasnans: + # Exclude MultiIndex because hasnans raises NotImplementedError + # we should only get here if we are unique, so loc is an integer + # GH#41934 + loc = self.get_loc(np.nan) + mask = target.isna() + indexer[mask] = loc + + return ensure_platform_int(indexer) + + pself, ptarget = self._maybe_promote(target) + if pself is not self or ptarget is not target: + return pself.get_indexer( + ptarget, method=method, limit=limit, tolerance=tolerance + ) + + if self.dtype == target.dtype and self.equals(target): + # Only call equals if we have same dtype to avoid inference/casting + return np.arange(len(target), dtype=np.intp) + + if self.dtype != target.dtype and not self._should_partial_index(target): + # _should_partial_index e.g. IntervalIndex with numeric scalars + # that can be matched to Interval scalars. + dtype = self._find_common_type_compat(target) + + this = self.astype(dtype, copy=False) + target = target.astype(dtype, copy=False) + return this._get_indexer( + target, method=method, limit=limit, tolerance=tolerance + ) + + return self._get_indexer(target, method, limit, tolerance) + + def _get_indexer( + self, + target: Index, + method: str_t | None = None, + limit: int | None = None, + tolerance=None, + ) -> npt.NDArray[np.intp]: + if tolerance is not None: + tolerance = self._convert_tolerance(tolerance, target) + + if method in ["pad", "backfill"]: + indexer = self._get_fill_indexer(target, method, limit, tolerance) + elif method == "nearest": + indexer = self._get_nearest_indexer(target, limit, tolerance) + else: + if target._is_multi and self._is_multi: + engine = self._engine + # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" + # has no attribute "_extract_level_codes" + tgt_values = engine._extract_level_codes( # type: ignore[union-attr] + target + ) + else: + tgt_values = target._get_engine_target() + + indexer = self._engine.get_indexer(tgt_values) + + return ensure_platform_int(indexer) + + @final + def _should_partial_index(self, target: Index) -> bool: + """ + Should we attempt partial-matching indexing? + """ + if isinstance(self.dtype, IntervalDtype): + if isinstance(target.dtype, IntervalDtype): + return False + # See https://github.com/pandas-dev/pandas/issues/47772 the commented + # out code can be restored (instead of hardcoding `return True`) + # once that issue is fixed + # "Index" has no attribute "left" + # return self.left._should_compare(target) # type: ignore[attr-defined] + return True + return False + + @final + def _check_indexing_method( + self, + method: str_t | None, + limit: int | None = None, + tolerance=None, + ) -> None: + """ + Raise if we have a get_indexer `method` that is not supported or valid. + """ + if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: + # in practice the clean_reindex_fill_method call would raise + # before we get here + raise ValueError("Invalid fill method") # pragma: no cover + + if self._is_multi: + if method == "nearest": + raise NotImplementedError( + "method='nearest' not implemented yet " + "for MultiIndex; see GitHub issue 9365" + ) + if method in ("pad", "backfill"): + if tolerance is not None: + raise NotImplementedError( + "tolerance not implemented yet for MultiIndex" + ) + + if isinstance(self.dtype, (IntervalDtype, CategoricalDtype)): + # GH#37871 for now this is only for IntervalIndex and CategoricalIndex + if method is not None: + raise NotImplementedError( + f"method {method} not yet implemented for {type(self).__name__}" + ) + + if method is None: + if tolerance is not None: + raise ValueError( + "tolerance argument only valid if doing pad, " + "backfill or nearest reindexing" + ) + if limit is not None: + raise ValueError( + "limit argument only valid if doing pad, " + "backfill or nearest reindexing" + ) + + def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: + # override this method on subclasses + tolerance = np.asarray(tolerance) + if target.size != tolerance.size and tolerance.size > 1: + raise ValueError("list-like tolerance size must match target index size") + elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): + if tolerance.ndim > 0: + raise ValueError( + f"tolerance argument for {type(self).__name__} with dtype " + f"{self.dtype} must contain numeric elements if it is list type" + ) + + raise ValueError( + f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " + f"must be numeric if it is a scalar: {repr(tolerance)}" + ) + return tolerance + + @final + def _get_fill_indexer( + self, target: Index, method: str_t, limit: int | None = None, tolerance=None + ) -> npt.NDArray[np.intp]: + if self._is_multi: + # TODO: get_indexer_with_fill docstring says values must be _sorted_ + # but that doesn't appear to be enforced + # error: "IndexEngine" has no attribute "get_indexer_with_fill" + engine = self._engine + with warnings.catch_warnings(): + # TODO: We need to fix this. Casting to int64 in cython + warnings.filterwarnings("ignore", category=RuntimeWarning) + return engine.get_indexer_with_fill( # type: ignore[union-attr] + target=target._values, + values=self._values, + method=method, + limit=limit, + ) + + if self.is_monotonic_increasing and target.is_monotonic_increasing: + target_values = target._get_engine_target() + own_values = self._get_engine_target() + if not isinstance(target_values, np.ndarray) or not isinstance( + own_values, np.ndarray + ): + raise NotImplementedError + + if method == "pad": + indexer = libalgos.pad(own_values, target_values, limit=limit) + else: + # i.e. "backfill" + indexer = libalgos.backfill(own_values, target_values, limit=limit) + else: + indexer = self._get_fill_indexer_searchsorted(target, method, limit) + if tolerance is not None and len(self): + indexer = self._filter_indexer_tolerance(target, indexer, tolerance) + return indexer + + @final + def _get_fill_indexer_searchsorted( + self, target: Index, method: str_t, limit: int | None = None + ) -> npt.NDArray[np.intp]: + """ + Fallback pad/backfill get_indexer that works for monotonic decreasing + indexes and non-monotonic targets. + """ + if limit is not None: + raise ValueError( + f"limit argument for {repr(method)} method only well-defined " + "if index and target are monotonic" + ) + + side: Literal["left", "right"] = "left" if method == "pad" else "right" + + # find exact matches first (this simplifies the algorithm) + indexer = self.get_indexer(target) + nonexact = indexer == -1 + indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) + if side == "left": + # searchsorted returns "indices into a sorted array such that, + # if the corresponding elements in v were inserted before the + # indices, the order of a would be preserved". + # Thus, we need to subtract 1 to find values to the left. + indexer[nonexact] -= 1 + # This also mapped not found values (values of 0 from + # np.searchsorted) to -1, which conveniently is also our + # sentinel for missing values + else: + # Mark indices to the right of the largest value as not found + indexer[indexer == len(self)] = -1 + return indexer + + @final + def _get_nearest_indexer( + self, target: Index, limit: int | None, tolerance + ) -> npt.NDArray[np.intp]: + """ + Get the indexer for the nearest index labels; requires an index with + values that can be subtracted from each other (e.g., not strings or + tuples). + """ + if not len(self): + return self._get_fill_indexer(target, "pad") + + left_indexer = self.get_indexer(target, "pad", limit=limit) + right_indexer = self.get_indexer(target, "backfill", limit=limit) + + left_distances = self._difference_compat(target, left_indexer) + right_distances = self._difference_compat(target, right_indexer) + + op = operator.lt if self.is_monotonic_increasing else operator.le + indexer = np.where( + # error: Argument 1&2 has incompatible type "Union[ExtensionArray, + # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, + # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" + op(left_distances, right_distances) # type: ignore[arg-type] + | (right_indexer == -1), + left_indexer, + right_indexer, + ) + if tolerance is not None: + indexer = self._filter_indexer_tolerance(target, indexer, tolerance) + return indexer + + @final + def _filter_indexer_tolerance( + self, + target: Index, + indexer: npt.NDArray[np.intp], + tolerance, + ) -> npt.NDArray[np.intp]: + distance = self._difference_compat(target, indexer) + + return np.where(distance <= tolerance, indexer, -1) + + @final + def _difference_compat( + self, target: Index, indexer: npt.NDArray[np.intp] + ) -> ArrayLike: + # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] + # of DateOffset objects, which do not support __abs__ (and would be slow + # if they did) + + if isinstance(self.dtype, PeriodDtype): + # Note: we only get here with matching dtypes + own_values = cast("PeriodArray", self._data)._ndarray + target_values = cast("PeriodArray", target._data)._ndarray + diff = own_values[indexer] - target_values + else: + # error: Unsupported left operand type for - ("ExtensionArray") + diff = self._values[indexer] - target._values # type: ignore[operator] + return abs(diff) + + # -------------------------------------------------------------------- + # Indexer Conversion Methods + + @final + def _validate_positional_slice(self, key: slice) -> None: + """ + For positional indexing, a slice must have either int or None + for each of start, stop, and step. + """ + self._validate_indexer("positional", key.start, "iloc") + self._validate_indexer("positional", key.stop, "iloc") + self._validate_indexer("positional", key.step, "iloc") + + def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]): + """ + Convert a slice indexer. + + By definition, these are labels unless 'iloc' is passed in. + Floats are not allowed as the start, step, or stop of the slice. + + Parameters + ---------- + key : label of the slice bound + kind : {'loc', 'getitem'} + """ + + # potentially cast the bounds to integers + start, stop, step = key.start, key.stop, key.step + + # figure out if this is a positional indexer + is_index_slice = is_valid_positional_slice(key) + + # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able + # to simplify this. + if lib.is_np_dtype(self.dtype, "f"): + # We always treat __getitem__ slicing as label-based + # translate to locations + if kind == "getitem" and is_index_slice and not start == stop and step != 0: + # exclude step=0 from the warning because it will raise anyway + # start/stop both None e.g. [:] or [::-1] won't change. + # exclude start==stop since it will be empty either way, or + # will be [:] or [::-1] which won't change + warnings.warn( + # GH#49612 + "The behavior of obj[i:j] with a float-dtype index is " + "deprecated. In a future version, this will be treated as " + "positional instead of label-based. For label-based slicing, " + "use obj.loc[i:j] instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.slice_indexer(start, stop, step) + + if kind == "getitem": + # called from the getitem slicers, validate that we are in fact integers + if is_index_slice: + # In this case the _validate_indexer checks below are redundant + return key + elif self.dtype.kind in "iu": + # Note: these checks are redundant if we know is_index_slice + self._validate_indexer("slice", key.start, "getitem") + self._validate_indexer("slice", key.stop, "getitem") + self._validate_indexer("slice", key.step, "getitem") + return key + + # convert the slice to an indexer here + + # special case for interval_dtype bc we do not do partial-indexing + # on integer Intervals when slicing + # TODO: write this in terms of e.g. should_partial_index? + ints_are_positional = self._should_fallback_to_positional or isinstance( + self.dtype, IntervalDtype + ) + is_positional = is_index_slice and ints_are_positional + + # if we are mixed and have integers + if is_positional: + try: + # Validate start & stop + if start is not None: + self.get_loc(start) + if stop is not None: + self.get_loc(stop) + is_positional = False + except KeyError: + pass + + if com.is_null_slice(key): + # It doesn't matter if we are positional or label based + indexer = key + elif is_positional: + if kind == "loc": + # GH#16121, GH#24612, GH#31810 + raise TypeError( + "Slicing a positional slice with .loc is not allowed, " + "Use .loc with labels or .iloc with positions instead.", + ) + indexer = key + else: + indexer = self.slice_indexer(start, stop, step) + + return indexer + + @final + def _raise_invalid_indexer( + self, + form: Literal["slice", "positional"], + key, + reraise: lib.NoDefault | None | Exception = lib.no_default, + ) -> None: + """ + Raise consistent invalid indexer message. + """ + msg = ( + f"cannot do {form} indexing on {type(self).__name__} with these " + f"indexers [{key}] of type {type(key).__name__}" + ) + if reraise is not lib.no_default: + raise TypeError(msg) from reraise + raise TypeError(msg) + + # -------------------------------------------------------------------- + # Reindex Methods + + @final + def _validate_can_reindex(self, indexer: np.ndarray) -> None: + """ + Check if we are allowing reindexing with this particular indexer. + + Parameters + ---------- + indexer : an integer ndarray + + Raises + ------ + ValueError if its a duplicate axis + """ + # trying to reindex on an axis with duplicates + if not self._index_as_unique and len(indexer): + raise ValueError("cannot reindex on an axis with duplicate labels") + + def reindex( + self, + target, + method: ReindexMethod | None = None, + level=None, + limit: int | None = None, + tolerance: float | None = None, + ) -> tuple[Index, npt.NDArray[np.intp] | None]: + """ + Create index with target's values. + + Parameters + ---------- + target : an iterable + method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional + * default: exact matches only. + * pad / ffill: find the PREVIOUS index value if no exact match. + * backfill / bfill: use NEXT index value if no exact match + * nearest: use the NEAREST index value if no exact match. Tied + distances are broken by preferring the larger index value. + level : int, optional + Level of multiindex. + limit : int, optional + Maximum number of consecutive labels in ``target`` to match for + inexact matches. + tolerance : int or float, optional + Maximum distance between original and new labels for inexact + matches. The values of the index at the matching locations must + satisfy the equation ``abs(index[indexer] - target) <= tolerance``. + + Tolerance may be a scalar value, which applies the same tolerance + to all values, or list-like, which applies variable tolerance per + element. List-like includes list, tuple, array, Series, and must be + the same size as the index and its dtype must exactly match the + index's type. + + Returns + ------- + new_index : pd.Index + Resulting index. + indexer : np.ndarray[np.intp] or None + Indices of output values in original index. + + Raises + ------ + TypeError + If ``method`` passed along with ``level``. + ValueError + If non-unique multi-index + ValueError + If non-unique index and ``method`` or ``limit`` passed. + + See Also + -------- + Series.reindex : Conform Series to new index with optional filling logic. + DataFrame.reindex : Conform DataFrame to new index with optional filling logic. + + Examples + -------- + >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) + >>> idx + Index(['car', 'bike', 'train', 'tractor'], dtype='object') + >>> idx.reindex(['car', 'bike']) + (Index(['car', 'bike'], dtype='object'), array([0, 1])) + """ + # GH6552: preserve names when reindexing to non-named target + # (i.e. neither Index nor Series). + preserve_names = not hasattr(target, "name") + + # GH7774: preserve dtype/tz if target is empty and not an Index. + target = ensure_has_len(target) # target may be an iterator + + if not isinstance(target, Index) and len(target) == 0: + if level is not None and self._is_multi: + # "Index" has no attribute "levels"; maybe "nlevels"? + idx = self.levels[level] # type: ignore[attr-defined] + else: + idx = self + target = idx[:0] + else: + target = ensure_index(target) + + if level is not None and ( + isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) + ): + if method is not None: + raise TypeError("Fill method not supported if level passed") + + # TODO: tests where passing `keep_order=not self._is_multi` + # makes a difference for non-MultiIndex case + target, indexer, _ = self._join_level( + target, level, how="right", keep_order=not self._is_multi + ) + + else: + if self.equals(target): + indexer = None + else: + if self._index_as_unique: + indexer = self.get_indexer( + target, method=method, limit=limit, tolerance=tolerance + ) + elif self._is_multi: + raise ValueError("cannot handle a non-unique multi-index!") + elif not self.is_unique: + # GH#42568 + raise ValueError("cannot reindex on an axis with duplicate labels") + else: + indexer, _ = self.get_indexer_non_unique(target) + + target = self._wrap_reindex_result(target, indexer, preserve_names) + return target, indexer + + def _wrap_reindex_result(self, target, indexer, preserve_names: bool): + target = self._maybe_preserve_names(target, preserve_names) + return target + + def _maybe_preserve_names(self, target: Index, preserve_names: bool): + if preserve_names and target.nlevels == 1 and target.name != self.name: + target = target.copy(deep=False) + target.name = self.name + return target + + @final + def _reindex_non_unique( + self, target: Index + ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: + """ + Create a new index with target's values (move/add/delete values as + necessary) use with non-unique Index and a possibly non-unique target. + + Parameters + ---------- + target : an iterable + + Returns + ------- + new_index : pd.Index + Resulting index. + indexer : np.ndarray[np.intp] + Indices of output values in original index. + new_indexer : np.ndarray[np.intp] or None + + """ + target = ensure_index(target) + if len(target) == 0: + # GH#13691 + return self[:0], np.array([], dtype=np.intp), None + + indexer, missing = self.get_indexer_non_unique(target) + check = indexer != -1 + new_labels = self.take(indexer[check]) + new_indexer = None + + if len(missing): + length = np.arange(len(indexer), dtype=np.intp) + + missing = ensure_platform_int(missing) + missing_labels = target.take(missing) + missing_indexer = length[~check] + cur_labels = self.take(indexer[check]).values + cur_indexer = length[check] + + # Index constructor below will do inference + new_labels = np.empty((len(indexer),), dtype=object) + new_labels[cur_indexer] = cur_labels + new_labels[missing_indexer] = missing_labels + + # GH#38906 + if not len(self): + new_indexer = np.arange(0, dtype=np.intp) + + # a unique indexer + elif target.is_unique: + # see GH5553, make sure we use the right indexer + new_indexer = np.arange(len(indexer), dtype=np.intp) + new_indexer[cur_indexer] = np.arange(len(cur_labels)) + new_indexer[missing_indexer] = -1 + + # we have a non_unique selector, need to use the original + # indexer here + else: + # need to retake to have the same size as the indexer + indexer[~check] = -1 + + # reset the new indexer to account for the new size + new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) + new_indexer[~check] = -1 + + if not isinstance(self, ABCMultiIndex): + new_index = Index(new_labels, name=self.name) + else: + new_index = type(self).from_tuples(new_labels, names=self.names) + return new_index, indexer, new_indexer + + # -------------------------------------------------------------------- + # Join Methods + + @overload + def join( + self, + other: Index, + *, + how: JoinHow = ..., + level: Level = ..., + return_indexers: Literal[True], + sort: bool = ..., + ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + ... + + @overload + def join( + self, + other: Index, + *, + how: JoinHow = ..., + level: Level = ..., + return_indexers: Literal[False] = ..., + sort: bool = ..., + ) -> Index: + ... + + @overload + def join( + self, + other: Index, + *, + how: JoinHow = ..., + level: Level = ..., + return_indexers: bool = ..., + sort: bool = ..., + ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + ... + + @final + @_maybe_return_indexers + def join( + self, + other: Index, + *, + how: JoinHow = "left", + level: Level | None = None, + return_indexers: bool = False, + sort: bool = False, + ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + """ + Compute join_index and indexers to conform data structures to the new index. + + Parameters + ---------- + other : Index + how : {'left', 'right', 'inner', 'outer'} + level : int or level name, default None + return_indexers : bool, default False + sort : bool, default False + Sort the join keys lexicographically in the result Index. If False, + the order of the join keys depends on the join type (how keyword). + + Returns + ------- + join_index, (left_indexer, right_indexer) + + Examples + -------- + >>> idx1 = pd.Index([1, 2, 3]) + >>> idx2 = pd.Index([4, 5, 6]) + >>> idx1.join(idx2, how='outer') + Index([1, 2, 3, 4, 5, 6], dtype='int64') + """ + other = ensure_index(other) + + if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): + if (self.tz is None) ^ (other.tz is None): + # Raise instead of casting to object below. + raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") + + if not self._is_multi and not other._is_multi: + # We have specific handling for MultiIndex below + pself, pother = self._maybe_promote(other) + if pself is not self or pother is not other: + return pself.join( + pother, how=how, level=level, return_indexers=True, sort=sort + ) + + lindexer: np.ndarray | None + rindexer: np.ndarray | None + + # try to figure out the join level + # GH3662 + if level is None and (self._is_multi or other._is_multi): + # have the same levels/names so a simple join + if self.names == other.names: + pass + else: + return self._join_multi(other, how=how) + + # join on the level + if level is not None and (self._is_multi or other._is_multi): + return self._join_level(other, level, how=how) + + if len(other) == 0: + if how in ("left", "outer"): + join_index = self._view() + rindexer = np.broadcast_to(np.intp(-1), len(join_index)) + return join_index, None, rindexer + elif how in ("right", "inner", "cross"): + join_index = other._view() + lindexer = np.array([]) + return join_index, lindexer, None + + if len(self) == 0: + if how in ("right", "outer"): + join_index = other._view() + lindexer = np.broadcast_to(np.intp(-1), len(join_index)) + return join_index, lindexer, None + elif how in ("left", "inner", "cross"): + join_index = self._view() + rindexer = np.array([]) + return join_index, None, rindexer + + if self._join_precedence < other._join_precedence: + flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} + how = flip.get(how, how) + join_index, lidx, ridx = other.join( + self, how=how, level=level, return_indexers=True + ) + lidx, ridx = ridx, lidx + return join_index, lidx, ridx + + if self.dtype != other.dtype: + dtype = self._find_common_type_compat(other) + this = self.astype(dtype, copy=False) + other = other.astype(dtype, copy=False) + return this.join(other, how=how, return_indexers=True) + + _validate_join_method(how) + + if not self.is_unique and not other.is_unique: + return self._join_non_unique(other, how=how) + elif not self.is_unique or not other.is_unique: + if self.is_monotonic_increasing and other.is_monotonic_increasing: + if not isinstance(self.dtype, IntervalDtype): + # otherwise we will fall through to _join_via_get_indexer + # GH#39133 + # go through object dtype for ea till engine is supported properly + return self._join_monotonic(other, how=how) + else: + return self._join_non_unique(other, how=how) + elif ( + # GH48504: exclude MultiIndex to avoid going through MultiIndex._values + self.is_monotonic_increasing + and other.is_monotonic_increasing + and self._can_use_libjoin + and not isinstance(self, ABCMultiIndex) + and not isinstance(self.dtype, CategoricalDtype) + ): + # Categorical is monotonic if data are ordered as categories, but join can + # not handle this in case of not lexicographically monotonic GH#38502 + try: + return self._join_monotonic(other, how=how) + except TypeError: + # object dtype; non-comparable objects + pass + + return self._join_via_get_indexer(other, how, sort) + + @final + def _join_via_get_indexer( + self, other: Index, how: JoinHow, sort: bool + ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + # Fallback if we do not have any fastpaths available based on + # uniqueness/monotonicity + + # Note: at this point we have checked matching dtypes + + if how == "left": + join_index = self + elif how == "right": + join_index = other + elif how == "inner": + # TODO: sort=False here for backwards compat. It may + # be better to use the sort parameter passed into join + join_index = self.intersection(other, sort=False) + elif how == "outer": + # TODO: sort=True here for backwards compat. It may + # be better to use the sort parameter passed into join + join_index = self.union(other) + + if sort: + join_index = join_index.sort_values() + + if join_index is self: + lindexer = None + else: + lindexer = self.get_indexer_for(join_index) + if join_index is other: + rindexer = None + else: + rindexer = other.get_indexer_for(join_index) + return join_index, lindexer, rindexer + + @final + def _join_multi(self, other: Index, how: JoinHow): + from pandas.core.indexes.multi import MultiIndex + from pandas.core.reshape.merge import restore_dropped_levels_multijoin + + # figure out join names + self_names_list = list(com.not_none(*self.names)) + other_names_list = list(com.not_none(*other.names)) + self_names_order = self_names_list.index + other_names_order = other_names_list.index + self_names = set(self_names_list) + other_names = set(other_names_list) + overlap = self_names & other_names + + # need at least 1 in common + if not overlap: + raise ValueError("cannot join with no overlapping index names") + + if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): + # Drop the non-matching levels from left and right respectively + ldrop_names = sorted(self_names - overlap, key=self_names_order) + rdrop_names = sorted(other_names - overlap, key=other_names_order) + + # if only the order differs + if not len(ldrop_names + rdrop_names): + self_jnlevels = self + other_jnlevels = other.reorder_levels(self.names) + else: + self_jnlevels = self.droplevel(ldrop_names) + other_jnlevels = other.droplevel(rdrop_names) + + # Join left and right + # Join on same leveled multi-index frames is supported + join_idx, lidx, ridx = self_jnlevels.join( + other_jnlevels, how=how, return_indexers=True + ) + + # Restore the dropped levels + # Returned index level order is + # common levels, ldrop_names, rdrop_names + dropped_names = ldrop_names + rdrop_names + + # error: Argument 5/6 to "restore_dropped_levels_multijoin" has + # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any + # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" + levels, codes, names = restore_dropped_levels_multijoin( + self, + other, + dropped_names, + join_idx, + lidx, # type: ignore[arg-type] + ridx, # type: ignore[arg-type] + ) + + # Re-create the multi-index + multi_join_idx = MultiIndex( + levels=levels, codes=codes, names=names, verify_integrity=False + ) + + multi_join_idx = multi_join_idx.remove_unused_levels() + + return multi_join_idx, lidx, ridx + + jl = next(iter(overlap)) + + # Case where only one index is multi + # make the indices into mi's that match + flip_order = False + if isinstance(self, MultiIndex): + self, other = other, self + flip_order = True + # flip if join method is right or left + flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} + how = flip.get(how, how) + + level = other.names.index(jl) + result = self._join_level(other, level, how=how) + + if flip_order: + return result[0], result[2], result[1] + return result + + @final + def _join_non_unique( + self, other: Index, how: JoinHow = "left" + ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: + from pandas.core.reshape.merge import get_join_indexers + + # We only get here if dtypes match + assert self.dtype == other.dtype + + left_idx, right_idx = get_join_indexers( + [self._values], [other._values], how=how, sort=True + ) + mask = left_idx == -1 + + join_idx = self.take(left_idx) + right = other.take(right_idx) + join_index = join_idx.putmask(mask, right) + return join_index, left_idx, right_idx + + @final + def _join_level( + self, other: Index, level, how: JoinHow = "left", keep_order: bool = True + ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + """ + The join method *only* affects the level of the resulting + MultiIndex. Otherwise it just exactly aligns the Index data to the + labels of the level in the MultiIndex. + + If ```keep_order == True```, the order of the data indexed by the + MultiIndex will not be changed; otherwise, it will tie out + with `other`. + """ + from pandas.core.indexes.multi import MultiIndex + + def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: + """ + Returns sorter for the inner most level while preserving the + order of higher levels. + + Parameters + ---------- + labels : list[np.ndarray] + Each ndarray has signed integer dtype, not necessarily identical. + + Returns + ------- + np.ndarray[np.intp] + """ + if labels[0].size == 0: + return np.empty(0, dtype=np.intp) + + if len(labels) == 1: + return get_group_index_sorter(ensure_platform_int(labels[0])) + + # find indexers of beginning of each set of + # same-key labels w.r.t all but last level + tic = labels[0][:-1] != labels[0][1:] + for lab in labels[1:-1]: + tic |= lab[:-1] != lab[1:] + + starts = np.hstack(([True], tic, [True])).nonzero()[0] + lab = ensure_int64(labels[-1]) + return lib.get_level_sorter(lab, ensure_platform_int(starts)) + + if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): + raise TypeError("Join on level between two MultiIndex objects is ambiguous") + + left, right = self, other + + flip_order = not isinstance(self, MultiIndex) + if flip_order: + left, right = right, left + flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} + how = flip.get(how, how) + + assert isinstance(left, MultiIndex) + + level = left._get_level_number(level) + old_level = left.levels[level] + + if not right.is_unique: + raise NotImplementedError( + "Index._join_level on non-unique index is not implemented" + ) + + new_level, left_lev_indexer, right_lev_indexer = old_level.join( + right, how=how, return_indexers=True + ) + + if left_lev_indexer is None: + if keep_order or len(left) == 0: + left_indexer = None + join_index = left + else: # sort the leaves + left_indexer = _get_leaf_sorter(left.codes[: level + 1]) + join_index = left[left_indexer] + + else: + left_lev_indexer = ensure_platform_int(left_lev_indexer) + rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) + old_codes = left.codes[level] + + taker = old_codes[old_codes != -1] + new_lev_codes = rev_indexer.take(taker) + + new_codes = list(left.codes) + new_codes[level] = new_lev_codes + + new_levels = list(left.levels) + new_levels[level] = new_level + + if keep_order: # just drop missing values. o.w. keep order + left_indexer = np.arange(len(left), dtype=np.intp) + left_indexer = cast(np.ndarray, left_indexer) + mask = new_lev_codes != -1 + if not mask.all(): + new_codes = [lab[mask] for lab in new_codes] + left_indexer = left_indexer[mask] + + else: # tie out the order with other + if level == 0: # outer most level, take the fast route + max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() + ngroups = 1 + max_new_lev + left_indexer, counts = libalgos.groupsort_indexer( + new_lev_codes, ngroups + ) + + # missing values are placed first; drop them! + left_indexer = left_indexer[counts[0] :] + new_codes = [lab[left_indexer] for lab in new_codes] + + else: # sort the leaves + mask = new_lev_codes != -1 + mask_all = mask.all() + if not mask_all: + new_codes = [lab[mask] for lab in new_codes] + + left_indexer = _get_leaf_sorter(new_codes[: level + 1]) + new_codes = [lab[left_indexer] for lab in new_codes] + + # left_indexers are w.r.t masked frame. + # reverse to original frame! + if not mask_all: + left_indexer = mask.nonzero()[0][left_indexer] + + join_index = MultiIndex( + levels=new_levels, + codes=new_codes, + names=left.names, + verify_integrity=False, + ) + + if right_lev_indexer is not None: + right_indexer = right_lev_indexer.take(join_index.codes[level]) + else: + right_indexer = join_index.codes[level] + + if flip_order: + left_indexer, right_indexer = right_indexer, left_indexer + + left_indexer = ( + None if left_indexer is None else ensure_platform_int(left_indexer) + ) + right_indexer = ( + None if right_indexer is None else ensure_platform_int(right_indexer) + ) + return join_index, left_indexer, right_indexer + + @final + def _join_monotonic( + self, other: Index, how: JoinHow = "left" + ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + # We only get here with matching dtypes and both monotonic increasing + assert other.dtype == self.dtype + + if self.equals(other): + # This is a convenient place for this check, but its correctness + # does not depend on monotonicity, so it could go earlier + # in the calling method. + ret_index = other if how == "right" else self + return ret_index, None, None + + ridx: npt.NDArray[np.intp] | None + lidx: npt.NDArray[np.intp] | None + + if self.is_unique and other.is_unique: + # We can perform much better than the general case + if how == "left": + join_index = self + lidx = None + ridx = self._left_indexer_unique(other) + elif how == "right": + join_index = other + lidx = other._left_indexer_unique(self) + ridx = None + elif how == "inner": + join_array, lidx, ridx = self._inner_indexer(other) + join_index = self._wrap_joined_index(join_array, other, lidx, ridx) + elif how == "outer": + join_array, lidx, ridx = self._outer_indexer(other) + join_index = self._wrap_joined_index(join_array, other, lidx, ridx) + else: + if how == "left": + join_array, lidx, ridx = self._left_indexer(other) + elif how == "right": + join_array, ridx, lidx = other._left_indexer(self) + elif how == "inner": + join_array, lidx, ridx = self._inner_indexer(other) + elif how == "outer": + join_array, lidx, ridx = self._outer_indexer(other) + + assert lidx is not None + assert ridx is not None + + join_index = self._wrap_joined_index(join_array, other, lidx, ridx) + + lidx = None if lidx is None else ensure_platform_int(lidx) + ridx = None if ridx is None else ensure_platform_int(ridx) + return join_index, lidx, ridx + + def _wrap_joined_index( + self, + joined: ArrayLike, + other: Self, + lidx: npt.NDArray[np.intp], + ridx: npt.NDArray[np.intp], + ) -> Self: + assert other.dtype == self.dtype + + if isinstance(self, ABCMultiIndex): + name = self.names if self.names == other.names else None + # error: Incompatible return value type (got "MultiIndex", + # expected "Self") + mask = lidx == -1 + join_idx = self.take(lidx) + right = other.take(ridx) + join_index = join_idx.putmask(mask, right)._sort_levels_monotonic() + return join_index.set_names(name) # type: ignore[return-value] + else: + name = get_op_result_name(self, other) + return self._constructor._with_infer(joined, name=name, dtype=self.dtype) + + @cache_readonly + def _can_use_libjoin(self) -> bool: + """ + Whether we can use the fastpaths implement in _libs.join + """ + if type(self) is Index: + # excludes EAs, but include masks, we get here with monotonic + # values only, meaning no NA + return ( + isinstance(self.dtype, np.dtype) + or isinstance(self.values, BaseMaskedArray) + or isinstance(self._values, ArrowExtensionArray) + ) + return not isinstance(self.dtype, IntervalDtype) + + # -------------------------------------------------------------------- + # Uncategorized Methods + + @property + def values(self) -> ArrayLike: + """ + Return an array representing the data in the Index. + + .. warning:: + + We recommend using :attr:`Index.array` or + :meth:`Index.to_numpy`, depending on whether you need + a reference to the underlying data or a NumPy array. + + Returns + ------- + array: numpy.ndarray or ExtensionArray + + See Also + -------- + Index.array : Reference to the underlying data. + Index.to_numpy : A NumPy array representing the underlying data. + + Examples + -------- + For :class:`pandas.Index`: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.values + array([1, 2, 3]) + + For :class:`pandas.IntervalIndex`: + + >>> idx = pd.interval_range(start=0, end=5) + >>> idx.values + + [(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]] + Length: 5, dtype: interval[int64, right] + """ + if using_copy_on_write(): + data = self._data + if isinstance(data, np.ndarray): + data = data.view() + data.flags.writeable = False + return data + return self._data + + @cache_readonly + @doc(IndexOpsMixin.array) + def array(self) -> ExtensionArray: + array = self._data + if isinstance(array, np.ndarray): + from pandas.core.arrays.numpy_ import NumpyExtensionArray + + array = NumpyExtensionArray(array) + return array + + @property + def _values(self) -> ExtensionArray | np.ndarray: + """ + The best array representation. + + This is an ndarray or ExtensionArray. + + ``_values`` are consistent between ``Series`` and ``Index``. + + It may differ from the public '.values' method. + + index | values | _values | + ----------------- | --------------- | ------------- | + Index | ndarray | ndarray | + CategoricalIndex | Categorical | Categorical | + DatetimeIndex | ndarray[M8ns] | DatetimeArray | + DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | + PeriodIndex | ndarray[object] | PeriodArray | + IntervalIndex | IntervalArray | IntervalArray | + + See Also + -------- + values : Values + """ + return self._data + + def _get_engine_target(self) -> ArrayLike: + """ + Get the ndarray or ExtensionArray that we can pass to the IndexEngine + constructor. + """ + vals = self._values + if isinstance(vals, StringArray): + # GH#45652 much more performant than ExtensionEngine + return vals._ndarray + if isinstance(vals, ArrowExtensionArray) and self.dtype.kind in "Mm": + import pyarrow as pa + + pa_type = vals._pa_array.type + if pa.types.is_timestamp(pa_type): + vals = vals._to_datetimearray() + return vals._ndarray.view("i8") + elif pa.types.is_duration(pa_type): + vals = vals._to_timedeltaarray() + return vals._ndarray.view("i8") + if ( + type(self) is Index + and isinstance(self._values, ExtensionArray) + and not isinstance(self._values, BaseMaskedArray) + and not ( + isinstance(self._values, ArrowExtensionArray) + and is_numeric_dtype(self.dtype) + # Exclude decimal + and self.dtype.kind != "O" + ) + ): + # TODO(ExtensionIndex): remove special-case, just use self._values + return self._values.astype(object) + return vals + + def _get_join_target(self) -> ArrayLike: + """ + Get the ndarray or ExtensionArray that we can pass to the join + functions. + """ + if isinstance(self._values, BaseMaskedArray): + # This is only used if our array is monotonic, so no NAs present + return self._values._data + elif isinstance(self._values, ArrowExtensionArray): + # This is only used if our array is monotonic, so no missing values + # present + return self._values.to_numpy() + return self._get_engine_target() + + def _from_join_target(self, result: np.ndarray) -> ArrayLike: + """ + Cast the ndarray returned from one of the libjoin.foo_indexer functions + back to type(self)._data. + """ + if isinstance(self.values, BaseMaskedArray): + return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) + elif isinstance(self.values, (ArrowExtensionArray, StringArray)): + return type(self.values)._from_sequence(result) + return result + + @doc(IndexOpsMixin._memory_usage) + def memory_usage(self, deep: bool = False) -> int: + result = self._memory_usage(deep=deep) + + # include our engine hashtable + result += self._engine.sizeof(deep=deep) + return result + + @final + def where(self, cond, other=None) -> Index: + """ + Replace values where the condition is False. + + The replacement is taken from other. + + Parameters + ---------- + cond : bool array-like with the same length as self + Condition to select the values on. + other : scalar, or array-like, default None + Replacement if the condition is False. + + Returns + ------- + pandas.Index + A copy of self with values replaced from other + where the condition is False. + + See Also + -------- + Series.where : Same method for Series. + DataFrame.where : Same method for DataFrame. + + Examples + -------- + >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) + >>> idx + Index(['car', 'bike', 'train', 'tractor'], dtype='object') + >>> idx.where(idx.isin(['car', 'train']), 'other') + Index(['car', 'other', 'train', 'other'], dtype='object') + """ + if isinstance(self, ABCMultiIndex): + raise NotImplementedError( + ".where is not supported for MultiIndex operations" + ) + cond = np.asarray(cond, dtype=bool) + return self.putmask(~cond, other) + + # construction helpers + @final + @classmethod + def _raise_scalar_data_error(cls, data): + # We return the TypeError so that we can raise it from the constructor + # in order to keep mypy happy + raise TypeError( + f"{cls.__name__}(...) must be called with a collection of some " + f"kind, {repr(data) if not isinstance(data, np.generic) else str(data)} " + "was passed" + ) + + def _validate_fill_value(self, value): + """ + Check if the value can be inserted into our array without casting, + and convert it to an appropriate native type if necessary. + + Raises + ------ + TypeError + If the value cannot be inserted into an array of this dtype. + """ + dtype = self.dtype + if isinstance(dtype, np.dtype) and dtype.kind not in "mM": + # return np_can_hold_element(dtype, value) + try: + return np_can_hold_element(dtype, value) + except LossySetitemError as err: + # re-raise as TypeError for consistency + raise TypeError from err + elif not can_hold_element(self._values, value): + raise TypeError + return value + + def _is_memory_usage_qualified(self) -> bool: + """ + Return a boolean if we need a qualified .info display. + """ + return is_object_dtype(self.dtype) + + def __contains__(self, key: Any) -> bool: + """ + Return a boolean indicating whether the provided key is in the index. + + Parameters + ---------- + key : label + The key to check if it is present in the index. + + Returns + ------- + bool + Whether the key search is in the index. + + Raises + ------ + TypeError + If the key is not hashable. + + See Also + -------- + Index.isin : Returns an ndarray of boolean dtype indicating whether the + list-like key is in the index. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3, 4]) + >>> idx + Index([1, 2, 3, 4], dtype='int64') + + >>> 2 in idx + True + >>> 6 in idx + False + """ + hash(key) + try: + return key in self._engine + except (OverflowError, TypeError, ValueError): + return False + + # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 + # Incompatible types in assignment (expression has type "None", base class + # "object" defined the type as "Callable[[object], int]") + __hash__: ClassVar[None] # type: ignore[assignment] + + @final + def __setitem__(self, key, value) -> None: + raise TypeError("Index does not support mutable operations") + + def __getitem__(self, key): + """ + Override numpy.ndarray's __getitem__ method to work as desired. + + This function adds lists and Series as valid boolean indexers + (ndarrays only supports ndarray with dtype=bool). + + If resulting ndim != 1, plain ndarray is returned instead of + corresponding `Index` subclass. + + """ + getitem = self._data.__getitem__ + + if is_integer(key) or is_float(key): + # GH#44051 exclude bool, which would return a 2d ndarray + key = com.cast_scalar_indexer(key) + return getitem(key) + + if isinstance(key, slice): + # This case is separated from the conditional above to avoid + # pessimization com.is_bool_indexer and ndim checks. + return self._getitem_slice(key) + + if com.is_bool_indexer(key): + # if we have list[bools, length=1e5] then doing this check+convert + # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ + # time below from 3.8 ms to 496 µs + # if we already have ndarray[bool], the overhead is 1.4 µs or .25% + if isinstance(getattr(key, "dtype", None), ExtensionDtype): + key = key.to_numpy(dtype=bool, na_value=False) + else: + key = np.asarray(key, dtype=bool) + + result = getitem(key) + # Because we ruled out integer above, we always get an arraylike here + if result.ndim > 1: + disallow_ndim_indexing(result) + + # NB: Using _constructor._simple_new would break if MultiIndex + # didn't override __getitem__ + return self._constructor._simple_new(result, name=self._name) + + def _getitem_slice(self, slobj: slice) -> Self: + """ + Fastpath for __getitem__ when we know we have a slice. + """ + res = self._data[slobj] + result = type(self)._simple_new(res, name=self._name, refs=self._references) + if "_engine" in self._cache: + reverse = slobj.step is not None and slobj.step < 0 + result._engine._update_from_sliced(self._engine, reverse=reverse) # type: ignore[union-attr] # noqa: E501 + + return result + + @final + def _can_hold_identifiers_and_holds_name(self, name) -> bool: + """ + Faster check for ``name in self`` when we know `name` is a Python + identifier (e.g. in NDFrame.__getattr__, which hits this to support + . key lookup). For indexes that can't hold identifiers (everything + but object & categorical) we just return False. + + https://github.com/pandas-dev/pandas/issues/19764 + """ + if ( + is_object_dtype(self.dtype) + or is_string_dtype(self.dtype) + or isinstance(self.dtype, CategoricalDtype) + ): + return name in self + return False + + def append(self, other: Index | Sequence[Index]) -> Index: + """ + Append a collection of Index options together. + + Parameters + ---------- + other : Index or list/tuple of indices + + Returns + ------- + Index + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx.append(pd.Index([4])) + Index([1, 2, 3, 4], dtype='int64') + """ + to_concat = [self] + + if isinstance(other, (list, tuple)): + to_concat += list(other) + else: + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[Index, Sequence[Index]]"; expected "Index" + to_concat.append(other) # type: ignore[arg-type] + + for obj in to_concat: + if not isinstance(obj, Index): + raise TypeError("all inputs must be Index") + + names = {obj.name for obj in to_concat} + name = None if len(names) > 1 else self.name + + return self._concat(to_concat, name) + + def _concat(self, to_concat: list[Index], name: Hashable) -> Index: + """ + Concatenate multiple Index objects. + """ + to_concat_vals = [x._values for x in to_concat] + + result = concat_compat(to_concat_vals) + + return Index._with_infer(result, name=name) + + def putmask(self, mask, value) -> Index: + """ + Return a new Index of the values set with the mask. + + Returns + ------- + Index + + See Also + -------- + numpy.ndarray.putmask : Changes elements of an array + based on conditional and input values. + + Examples + -------- + >>> idx1 = pd.Index([1, 2, 3]) + >>> idx2 = pd.Index([5, 6, 7]) + >>> idx1.putmask([True, False, False], idx2) + Index([5, 2, 3], dtype='int64') + """ + mask, noop = validate_putmask(self._values, mask) + if noop: + return self.copy() + + if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): + # e.g. None -> np.nan, see also Block._standardize_fill_value + value = self._na_value + + try: + converted = self._validate_fill_value(value) + except (LossySetitemError, ValueError, TypeError) as err: + if is_object_dtype(self.dtype): # pragma: no cover + raise err + + # See also: Block.coerce_to_target_dtype + dtype = self._find_common_type_compat(value) + return self.astype(dtype).putmask(mask, value) + + values = self._values.copy() + + if isinstance(values, np.ndarray): + converted = setitem_datetimelike_compat(values, mask.sum(), converted) + np.putmask(values, mask, converted) + + else: + # Note: we use the original value here, not converted, as + # _validate_fill_value is not idempotent + values._putmask(mask, value) + + return self._shallow_copy(values) + + def equals(self, other: Any) -> bool: + """ + Determine if two Index object are equal. + + The things that are being compared are: + + * The elements inside the Index object. + * The order of the elements inside the Index object. + + Parameters + ---------- + other : Any + The other object to compare against. + + Returns + ------- + bool + True if "other" is an Index and it has the same elements and order + as the calling index; False otherwise. + + Examples + -------- + >>> idx1 = pd.Index([1, 2, 3]) + >>> idx1 + Index([1, 2, 3], dtype='int64') + >>> idx1.equals(pd.Index([1, 2, 3])) + True + + The elements inside are compared + + >>> idx2 = pd.Index(["1", "2", "3"]) + >>> idx2 + Index(['1', '2', '3'], dtype='object') + + >>> idx1.equals(idx2) + False + + The order is compared + + >>> ascending_idx = pd.Index([1, 2, 3]) + >>> ascending_idx + Index([1, 2, 3], dtype='int64') + >>> descending_idx = pd.Index([3, 2, 1]) + >>> descending_idx + Index([3, 2, 1], dtype='int64') + >>> ascending_idx.equals(descending_idx) + False + + The dtype is *not* compared + + >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') + >>> int64_idx + Index([1, 2, 3], dtype='int64') + >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') + >>> uint64_idx + Index([1, 2, 3], dtype='uint64') + >>> int64_idx.equals(uint64_idx) + True + """ + if self.is_(other): + return True + + if not isinstance(other, Index): + return False + + if len(self) != len(other): + # quickly return if the lengths are different + return False + + if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): + # if other is not object, use other's logic for coercion + return other.equals(self) + + if isinstance(other, ABCMultiIndex): + # d-level MultiIndex can equal d-tuple Index + return other.equals(self) + + if isinstance(self._values, ExtensionArray): + # Dispatch to the ExtensionArray's .equals method. + if not isinstance(other, type(self)): + return False + + earr = cast(ExtensionArray, self._data) + return earr.equals(other._data) + + if isinstance(other.dtype, ExtensionDtype): + # All EA-backed Index subclasses override equals + return other.equals(self) + + return array_equivalent(self._values, other._values) + + @final + def identical(self, other) -> bool: + """ + Similar to equals, but checks that object attributes and types are also equal. + + Returns + ------- + bool + If two Index objects have equal elements and same type True, + otherwise False. + + Examples + -------- + >>> idx1 = pd.Index(['1', '2', '3']) + >>> idx2 = pd.Index(['1', '2', '3']) + >>> idx2.identical(idx1) + True + + >>> idx1 = pd.Index(['1', '2', '3'], name="A") + >>> idx2 = pd.Index(['1', '2', '3'], name="B") + >>> idx2.identical(idx1) + False + """ + return ( + self.equals(other) + and all( + getattr(self, c, None) == getattr(other, c, None) + for c in self._comparables + ) + and type(self) == type(other) + and self.dtype == other.dtype + ) + + @final + def asof(self, label): + """ + Return the label from the index, or, if not present, the previous one. + + Assuming that the index is sorted, return the passed index label if it + is in the index, or return the previous index label if the passed one + is not in the index. + + Parameters + ---------- + label : object + The label up to which the method returns the latest index label. + + Returns + ------- + object + The passed label if it is in the index. The previous label if the + passed label is not in the sorted index or `NaN` if there is no + such label. + + See Also + -------- + Series.asof : Return the latest value in a Series up to the + passed index. + merge_asof : Perform an asof merge (similar to left join but it + matches on nearest key rather than equal key). + Index.get_loc : An `asof` is a thin wrapper around `get_loc` + with method='pad'. + + Examples + -------- + `Index.asof` returns the latest index label up to the passed label. + + >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) + >>> idx.asof('2014-01-01') + '2013-12-31' + + If the label is in the index, the method returns the passed label. + + >>> idx.asof('2014-01-02') + '2014-01-02' + + If all of the labels in the index are later than the passed label, + NaN is returned. + + >>> idx.asof('1999-01-02') + nan + + If the index is not sorted, an error is raised. + + >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', + ... '2014-01-03']) + >>> idx_not_sorted.asof('2013-12-31') + Traceback (most recent call last): + ValueError: index must be monotonic increasing or decreasing + """ + self._searchsorted_monotonic(label) # validate sortedness + try: + loc = self.get_loc(label) + except (KeyError, TypeError): + # KeyError -> No exact match, try for padded + # TypeError -> passed e.g. non-hashable, fall through to get + # the tested exception message + indexer = self.get_indexer([label], method="pad") + if indexer.ndim > 1 or indexer.size > 1: + raise TypeError("asof requires scalar valued input") + loc = indexer.item() + if loc == -1: + return self._na_value + else: + if isinstance(loc, slice): + loc = loc.indices(len(self))[-1] + + return self[loc] + + def asof_locs( + self, where: Index, mask: npt.NDArray[np.bool_] + ) -> npt.NDArray[np.intp]: + """ + Return the locations (indices) of labels in the index. + + As in the :meth:`pandas.Index.asof`, if the label (a particular entry in + ``where``) is not in the index, the latest index label up to the + passed label is chosen and its index returned. + + If all of the labels in the index are later than a label in ``where``, + -1 is returned. + + ``mask`` is used to ignore ``NA`` values in the index during calculation. + + Parameters + ---------- + where : Index + An Index consisting of an array of timestamps. + mask : np.ndarray[bool] + Array of booleans denoting where values in the original + data are not ``NA``. + + Returns + ------- + np.ndarray[np.intp] + An array of locations (indices) of the labels from the index + which correspond to the return values of :meth:`pandas.Index.asof` + for every element in ``where``. + + See Also + -------- + Index.asof : Return the label from the index, or, if not present, the + previous one. + + Examples + -------- + >>> idx = pd.date_range('2023-06-01', periods=3, freq='D') + >>> where = pd.DatetimeIndex(['2023-05-30 00:12:00', '2023-06-01 00:00:00', + ... '2023-06-02 23:59:59']) + >>> mask = np.ones(3, dtype=bool) + >>> idx.asof_locs(where, mask) + array([-1, 0, 1]) + + We can use ``mask`` to ignore certain values in the index during calculation. + + >>> mask[1] = False + >>> idx.asof_locs(where, mask) + array([-1, 0, 0]) + """ + # error: No overload variant of "searchsorted" of "ndarray" matches argument + # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" + # TODO: will be fixed when ExtensionArray.searchsorted() is fixed + locs = self._values[mask].searchsorted( + where._values, side="right" # type: ignore[call-overload] + ) + locs = np.where(locs > 0, locs - 1, 0) + + result = np.arange(len(self), dtype=np.intp)[mask].take(locs) + + first_value = self._values[mask.argmax()] + result[(locs == 0) & (where._values < first_value)] = -1 + + return result + + def sort_values( + self, + return_indexer: bool = False, + ascending: bool = True, + na_position: NaPosition = "last", + key: Callable | None = None, + ): + """ + Return a sorted copy of the index. + + Return a sorted copy of the index, and optionally return the indices + that sorted the index itself. + + Parameters + ---------- + return_indexer : bool, default False + Should the indices that would sort the index be returned. + ascending : bool, default True + Should the index values be sorted in an ascending order. + na_position : {'first' or 'last'}, default 'last' + Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at + the end. + + .. versionadded:: 1.2.0 + + key : callable, optional + If not None, apply the key function to the index values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect an + ``Index`` and return an ``Index`` of the same shape. + + Returns + ------- + sorted_index : pandas.Index + Sorted copy of the index. + indexer : numpy.ndarray, optional + The indices that the index itself was sorted by. + + See Also + -------- + Series.sort_values : Sort values of a Series. + DataFrame.sort_values : Sort values in a DataFrame. + + Examples + -------- + >>> idx = pd.Index([10, 100, 1, 1000]) + >>> idx + Index([10, 100, 1, 1000], dtype='int64') + + Sort values in ascending order (default behavior). + + >>> idx.sort_values() + Index([1, 10, 100, 1000], dtype='int64') + + Sort values in descending order, and also get the indices `idx` was + sorted by. + + >>> idx.sort_values(ascending=False, return_indexer=True) + (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) + """ + # GH 35584. Sort missing values according to na_position kwarg + # ignore na_position for MultiIndex + if not isinstance(self, ABCMultiIndex): + _as = nargsort( + items=self, ascending=ascending, na_position=na_position, key=key + ) + else: + idx = cast(Index, ensure_key_mapped(self, key)) + _as = idx.argsort(na_position=na_position) + if not ascending: + _as = _as[::-1] + + sorted_index = self.take(_as) + + if return_indexer: + return sorted_index, _as + else: + return sorted_index + + @final + def sort(self, *args, **kwargs): + """ + Use sort_values instead. + """ + raise TypeError("cannot sort an Index object in-place, use sort_values instead") + + def shift(self, periods: int = 1, freq=None): + """ + Shift index by desired number of time frequency increments. + + This method is for shifting the values of datetime-like indexes + by a specified time increment a given number of times. + + Parameters + ---------- + periods : int, default 1 + Number of periods (or increments) to shift by, + can be positive or negative. + freq : pandas.DateOffset, pandas.Timedelta or str, optional + Frequency increment to shift by. + If None, the index is shifted by its own `freq` attribute. + Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. + + Returns + ------- + pandas.Index + Shifted index. + + See Also + -------- + Series.shift : Shift values of Series. + + Notes + ----- + This method is only implemented for datetime-like index classes, + i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. + + Examples + -------- + Put the first 5 month starts of 2011 into an index. + + >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') + >>> month_starts + DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', + '2011-05-01'], + dtype='datetime64[ns]', freq='MS') + + Shift the index by 10 days. + + >>> month_starts.shift(10, freq='D') + DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', + '2011-05-11'], + dtype='datetime64[ns]', freq=None) + + The default value of `freq` is the `freq` attribute of the index, + which is 'MS' (month start) in this example. + + >>> month_starts.shift(10) + DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', + '2012-03-01'], + dtype='datetime64[ns]', freq='MS') + """ + raise NotImplementedError( + f"This method is only implemented for DatetimeIndex, PeriodIndex and " + f"TimedeltaIndex; Got type {type(self).__name__}" + ) + + def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: + """ + Return the integer indices that would sort the index. + + Parameters + ---------- + *args + Passed to `numpy.ndarray.argsort`. + **kwargs + Passed to `numpy.ndarray.argsort`. + + Returns + ------- + np.ndarray[np.intp] + Integer indices that would sort the index if used as + an indexer. + + See Also + -------- + numpy.argsort : Similar method for NumPy arrays. + Index.sort_values : Return sorted copy of Index. + + Examples + -------- + >>> idx = pd.Index(['b', 'a', 'd', 'c']) + >>> idx + Index(['b', 'a', 'd', 'c'], dtype='object') + + >>> order = idx.argsort() + >>> order + array([1, 0, 3, 2]) + + >>> idx[order] + Index(['a', 'b', 'c', 'd'], dtype='object') + """ + # This works for either ndarray or EA, is overridden + # by RangeIndex, MultIIndex + return self._data.argsort(*args, **kwargs) + + def _check_indexing_error(self, key): + if not is_scalar(key): + # if key is not a scalar, directly raise an error (the code below + # would convert to numpy arrays and raise later any way) - GH29926 + raise InvalidIndexError(key) + + @cache_readonly + def _should_fallback_to_positional(self) -> bool: + """ + Should an integer key be treated as positional? + """ + return self.inferred_type not in { + "integer", + "mixed-integer", + "floating", + "complex", + } + + _index_shared_docs[ + "get_indexer_non_unique" + ] = """ + Compute indexer and mask for new index given the current index. + + The indexer should be then used as an input to ndarray.take to align the + current data to the new index. + + Parameters + ---------- + target : %(target_klass)s + + Returns + ------- + indexer : np.ndarray[np.intp] + Integers from 0 to n - 1 indicating that the index at these + positions matches the corresponding target values. Missing values + in the target are marked by -1. + missing : np.ndarray[np.intp] + An indexer into the target of the values not found. + These correspond to the -1 in the indexer array. + + Examples + -------- + >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) + >>> index.get_indexer_non_unique(['b', 'b']) + (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) + + In the example below there are no matched values. + + >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) + >>> index.get_indexer_non_unique(['q', 'r', 't']) + (array([-1, -1, -1]), array([0, 1, 2])) + + For this reason, the returned ``indexer`` contains only integers equal to -1. + It demonstrates that there's no match between the index and the ``target`` + values at these positions. The mask [0, 1, 2] in the return value shows that + the first, second, and third elements are missing. + + Notice that the return value is a tuple contains two items. In the example + below the first item is an array of locations in ``index``. The second + item is a mask shows that the first and third elements are missing. + + >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) + >>> index.get_indexer_non_unique(['f', 'b', 's']) + (array([-1, 1, 3, 4, -1]), array([0, 2])) + """ + + @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) + def get_indexer_non_unique( + self, target + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + target = ensure_index(target) + target = self._maybe_cast_listlike_indexer(target) + + if not self._should_compare(target) and not self._should_partial_index(target): + # _should_partial_index e.g. IntervalIndex with numeric scalars + # that can be matched to Interval scalars. + return self._get_indexer_non_comparable(target, method=None, unique=False) + + pself, ptarget = self._maybe_promote(target) + if pself is not self or ptarget is not target: + return pself.get_indexer_non_unique(ptarget) + + if self.dtype != target.dtype: + # TODO: if object, could use infer_dtype to preempt costly + # conversion if still non-comparable? + dtype = self._find_common_type_compat(target) + + this = self.astype(dtype, copy=False) + that = target.astype(dtype, copy=False) + return this.get_indexer_non_unique(that) + + # TODO: get_indexer has fastpaths for both Categorical-self and + # Categorical-target. Can we do something similar here? + + # Note: _maybe_promote ensures we never get here with MultiIndex + # self and non-Multi target + tgt_values = target._get_engine_target() + if self._is_multi and target._is_multi: + engine = self._engine + # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has + # no attribute "_extract_level_codes" + tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] + + indexer, missing = self._engine.get_indexer_non_unique(tgt_values) + return ensure_platform_int(indexer), ensure_platform_int(missing) + + @final + def get_indexer_for(self, target) -> npt.NDArray[np.intp]: + """ + Guaranteed return of an indexer even when non-unique. + + This dispatches to get_indexer or get_indexer_non_unique + as appropriate. + + Returns + ------- + np.ndarray[np.intp] + List of indices. + + Examples + -------- + >>> idx = pd.Index([np.nan, 'var1', np.nan]) + >>> idx.get_indexer_for([np.nan]) + array([0, 2]) + """ + if self._index_as_unique: + return self.get_indexer(target) + indexer, _ = self.get_indexer_non_unique(target) + return indexer + + def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: + """ + Analogue to get_indexer that raises if any elements are missing. + """ + keyarr = key + if not isinstance(keyarr, Index): + keyarr = com.asarray_tuplesafe(keyarr) + + if self._index_as_unique: + indexer = self.get_indexer_for(keyarr) + keyarr = self.reindex(keyarr)[0] + else: + keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) + + self._raise_if_missing(keyarr, indexer, axis_name) + + keyarr = self.take(indexer) + if isinstance(key, Index): + # GH 42790 - Preserve name from an Index + keyarr.name = key.name + if lib.is_np_dtype(keyarr.dtype, "mM") or isinstance( + keyarr.dtype, DatetimeTZDtype + ): + # DTI/TDI.take can infer a freq in some cases when we dont want one + if isinstance(key, list) or ( + isinstance(key, type(self)) + # "Index" has no attribute "freq" + and key.freq is None # type: ignore[attr-defined] + ): + keyarr = keyarr._with_freq(None) + + return keyarr, indexer + + def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: + """ + Check that indexer can be used to return a result. + + e.g. at least one element was found, + unless the list of keys was actually empty. + + Parameters + ---------- + key : list-like + Targeted labels (only used to show correct error message). + indexer: array-like of booleans + Indices corresponding to the key, + (with -1 indicating not found). + axis_name : str + + Raises + ------ + KeyError + If at least one key was requested but none was found. + """ + if len(key) == 0: + return + + # Count missing values + missing_mask = indexer < 0 + nmissing = missing_mask.sum() + + if nmissing: + # TODO: remove special-case; this is just to keep exception + # message tests from raising while debugging + use_interval_msg = isinstance(self.dtype, IntervalDtype) or ( + isinstance(self.dtype, CategoricalDtype) + # "Index" has no attribute "categories" [attr-defined] + and isinstance( + self.categories.dtype, IntervalDtype # type: ignore[attr-defined] + ) + ) + + if nmissing == len(indexer): + if use_interval_msg: + key = list(key) + raise KeyError(f"None of [{key}] are in the [{axis_name}]") + + not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) + raise KeyError(f"{not_found} not in index") + + @overload + def _get_indexer_non_comparable( + self, target: Index, method, unique: Literal[True] = ... + ) -> npt.NDArray[np.intp]: + ... + + @overload + def _get_indexer_non_comparable( + self, target: Index, method, unique: Literal[False] + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + ... + + @overload + def _get_indexer_non_comparable( + self, target: Index, method, unique: bool = True + ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + ... + + @final + def _get_indexer_non_comparable( + self, target: Index, method, unique: bool = True + ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """ + Called from get_indexer or get_indexer_non_unique when the target + is of a non-comparable dtype. + + For get_indexer lookups with method=None, get_indexer is an _equality_ + check, so non-comparable dtypes mean we will always have no matches. + + For get_indexer lookups with a method, get_indexer is an _inequality_ + check, so non-comparable dtypes mean we will always raise TypeError. + + Parameters + ---------- + target : Index + method : str or None + unique : bool, default True + * True if called from get_indexer. + * False if called from get_indexer_non_unique. + + Raises + ------ + TypeError + If doing an inequality check, i.e. method is not None. + """ + if method is not None: + other = _unpack_nested_dtype(target) + raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") + + no_matches = -1 * np.ones(target.shape, dtype=np.intp) + if unique: + # This is for get_indexer + return no_matches + else: + # This is for get_indexer_non_unique + missing = np.arange(len(target), dtype=np.intp) + return no_matches, missing + + @property + def _index_as_unique(self) -> bool: + """ + Whether we should treat this as unique for the sake of + get_indexer vs get_indexer_non_unique. + + For IntervalIndex compat. + """ + return self.is_unique + + _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" + + @final + def _maybe_promote(self, other: Index) -> tuple[Index, Index]: + """ + When dealing with an object-dtype Index and a non-object Index, see + if we can upcast the object-dtype one to improve performance. + """ + + if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): + if ( + self.tz is not None + and other.tz is not None + and not tz_compare(self.tz, other.tz) + ): + # standardize on UTC + return self.tz_convert("UTC"), other.tz_convert("UTC") + + elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): + try: + return type(other)(self), other + except OutOfBoundsDatetime: + return self, other + elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): + # TODO: we dont have tests that get here + return type(other)(self), other + + elif self.dtype.kind == "u" and other.dtype.kind == "i": + # GH#41873 + if other.min() >= 0: + # lookup min as it may be cached + # TODO: may need itemsize check if we have non-64-bit Indexes + return self, other.astype(self.dtype) + + elif self._is_multi and not other._is_multi: + try: + # "Type[Index]" has no attribute "from_tuples" + other = type(self).from_tuples(other) # type: ignore[attr-defined] + except (TypeError, ValueError): + # let's instead try with a straight Index + self = Index(self._values) + + if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): + # Reverse op so we dont need to re-implement on the subclasses + other, self = other._maybe_promote(self) + + return self, other + + @final + def _find_common_type_compat(self, target) -> DtypeObj: + """ + Implementation of find_common_type that adjusts for Index-specific + special cases. + """ + target_dtype, _ = infer_dtype_from(target) + + # special case: if one dtype is uint64 and the other a signed int, return object + # See https://github.com/pandas-dev/pandas/issues/26778 for discussion + # Now it's: + # * float | [u]int -> float + # * uint64 | signed int -> object + # We may change union(float | [u]int) to go to object. + if self.dtype == "uint64" or target_dtype == "uint64": + if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( + target_dtype + ): + return _dtype_obj + + dtype = find_result_type(self.dtype, target) + dtype = common_dtype_categorical_compat([self, target], dtype) + return dtype + + @final + def _should_compare(self, other: Index) -> bool: + """ + Check if `self == other` can ever have non-False entries. + """ + + # NB: we use inferred_type rather than is_bool_dtype to catch + # object_dtype_of_bool and categorical[object_dtype_of_bool] cases + if ( + other.inferred_type == "boolean" and is_any_real_numeric_dtype(self.dtype) + ) or ( + self.inferred_type == "boolean" and is_any_real_numeric_dtype(other.dtype) + ): + # GH#16877 Treat boolean labels passed to a numeric index as not + # found. Without this fix False and True would be treated as 0 and 1 + # respectively. + return False + + other = _unpack_nested_dtype(other) + dtype = other.dtype + return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) + + def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: + """ + Can we compare values of the given dtype to our own? + """ + if self.dtype.kind == "b": + return dtype.kind == "b" + elif is_numeric_dtype(self.dtype): + return is_numeric_dtype(dtype) + # TODO: this was written assuming we only get here with object-dtype, + # which is no longer correct. Can we specialize for EA? + return True + + @final + def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: + """ + Group the index labels by a given array of values. + + Parameters + ---------- + values : array + Values used to determine the groups. + + Returns + ------- + dict + {group name -> group labels} + """ + # TODO: if we are a MultiIndex, we can do better + # that converting to tuples + if isinstance(values, ABCMultiIndex): + values = values._values + values = Categorical(values) + result = values._reverse_indexer() + + # map to the label + result = {k: self.take(v) for k, v in result.items()} + + return PrettyDict(result) + + def map(self, mapper, na_action: Literal["ignore"] | None = None): + """ + Map values using an input mapping or function. + + Parameters + ---------- + mapper : function, dict, or Series + Mapping correspondence. + na_action : {None, 'ignore'} + If 'ignore', propagate NA values, without passing them to the + mapping correspondence. + + Returns + ------- + Union[Index, MultiIndex] + The output of the mapping function applied to the index. + If the function returns a tuple with more than one element + a MultiIndex will be returned. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx.map({1: 'a', 2: 'b', 3: 'c'}) + Index(['a', 'b', 'c'], dtype='object') + + Using `map` with a function: + + >>> idx = pd.Index([1, 2, 3]) + >>> idx.map('I am a {}'.format) + Index(['I am a 1', 'I am a 2', 'I am a 3'], dtype='object') + + >>> idx = pd.Index(['a', 'b', 'c']) + >>> idx.map(lambda x: x.upper()) + Index(['A', 'B', 'C'], dtype='object') + """ + from pandas.core.indexes.multi import MultiIndex + + new_values = self._map_values(mapper, na_action=na_action) + + # we can return a MultiIndex + if new_values.size and isinstance(new_values[0], tuple): + if isinstance(self, MultiIndex): + names = self.names + elif self.name: + names = [self.name] * len(new_values[0]) + else: + names = None + return MultiIndex.from_tuples(new_values, names=names) + + dtype = None + if not new_values.size: + # empty + dtype = self.dtype + + # e.g. if we are floating and new_values is all ints, then we + # don't want to cast back to floating. But if we are UInt64 + # and new_values is all ints, we want to try. + same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type + if same_dtype: + new_values = maybe_cast_pointwise_result( + new_values, self.dtype, same_dtype=same_dtype + ) + + return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) + + # TODO: De-duplicate with map, xref GH#32349 + @final + def _transform_index(self, func, *, level=None) -> Index: + """ + Apply function to all values found in index. + + This includes transforming multiindex entries separately. + Only apply function to one level of the MultiIndex if level is specified. + """ + if isinstance(self, ABCMultiIndex): + values = [ + self.get_level_values(i).map(func) + if i == level or level is None + else self.get_level_values(i) + for i in range(self.nlevels) + ] + return type(self).from_arrays(values) + else: + items = [func(x) for x in self] + return Index(items, name=self.name, tupleize_cols=False) + + def isin(self, values, level=None) -> npt.NDArray[np.bool_]: + """ + Return a boolean array where the index values are in `values`. + + Compute boolean array of whether each index value is found in the + passed set of values. The length of the returned boolean array matches + the length of the index. + + Parameters + ---------- + values : set or list-like + Sought values. + level : str or int, optional + Name or position of the index level to use (if the index is a + `MultiIndex`). + + Returns + ------- + np.ndarray[bool] + NumPy array of boolean values. + + See Also + -------- + Series.isin : Same for Series. + DataFrame.isin : Same method for DataFrames. + + Notes + ----- + In the case of `MultiIndex` you must either specify `values` as a + list-like object containing tuples that are the same length as the + number of levels, or specify `level`. Otherwise it will raise a + ``ValueError``. + + If `level` is specified: + + - if it is the name of one *and only one* index level, use that level; + - otherwise it should be a number indicating level position. + + Examples + -------- + >>> idx = pd.Index([1,2,3]) + >>> idx + Index([1, 2, 3], dtype='int64') + + Check whether each index value in a list of values. + + >>> idx.isin([1, 4]) + array([ True, False, False]) + + >>> midx = pd.MultiIndex.from_arrays([[1,2,3], + ... ['red', 'blue', 'green']], + ... names=('number', 'color')) + >>> midx + MultiIndex([(1, 'red'), + (2, 'blue'), + (3, 'green')], + names=['number', 'color']) + + Check whether the strings in the 'color' level of the MultiIndex + are in a list of colors. + + >>> midx.isin(['red', 'orange', 'yellow'], level='color') + array([ True, False, False]) + + To check across the levels of a MultiIndex, pass a list of tuples: + + >>> midx.isin([(1, 'red'), (3, 'red')]) + array([ True, False, False]) + + For a DatetimeIndex, string values in `values` are converted to + Timestamps. + + >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] + >>> dti = pd.to_datetime(dates) + >>> dti + DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], + dtype='datetime64[ns]', freq=None) + + >>> dti.isin(['2000-03-11']) + array([ True, False, False]) + """ + if level is not None: + self._validate_index_level(level) + return algos.isin(self._values, values) + + def _get_string_slice(self, key: str_t): + # this is for partial string indexing, + # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex + raise NotImplementedError + + def slice_indexer( + self, + start: Hashable | None = None, + end: Hashable | None = None, + step: int | None = None, + ) -> slice: + """ + Compute the slice indexer for input labels and step. + + Index needs to be ordered and unique. + + Parameters + ---------- + start : label, default None + If None, defaults to the beginning. + end : label, default None + If None, defaults to the end. + step : int, default None + + Returns + ------- + slice + + Raises + ------ + KeyError : If key does not exist, or key is not unique and index is + not ordered. + + Notes + ----- + This function assumes that the data is sorted, so use at your own peril + + Examples + -------- + This is a method on all index types. For example you can do: + + >>> idx = pd.Index(list('abcd')) + >>> idx.slice_indexer(start='b', end='c') + slice(1, 3, None) + + >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) + >>> idx.slice_indexer(start='b', end=('c', 'g')) + slice(1, 3, None) + """ + start_slice, end_slice = self.slice_locs(start, end, step=step) + + # return a slice + if not is_scalar(start_slice): + raise AssertionError("Start slice bound is non-scalar") + if not is_scalar(end_slice): + raise AssertionError("End slice bound is non-scalar") + + return slice(start_slice, end_slice, step) + + def _maybe_cast_indexer(self, key): + """ + If we have a float key and are not a floating index, then try to cast + to an int if equivalent. + """ + return key + + def _maybe_cast_listlike_indexer(self, target) -> Index: + """ + Analogue to maybe_cast_indexer for get_indexer instead of get_loc. + """ + return ensure_index(target) + + @final + def _validate_indexer( + self, + form: Literal["positional", "slice"], + key, + kind: Literal["getitem", "iloc"], + ) -> None: + """ + If we are positional indexer, validate that we have appropriate + typed bounds must be an integer. + """ + if not lib.is_int_or_none(key): + self._raise_invalid_indexer(form, key) + + def _maybe_cast_slice_bound(self, label, side: str_t): + """ + This function should be overloaded in subclasses that allow non-trivial + casting on label-slice bounds, e.g. datetime-like indices allowing + strings containing formatted datetimes. + + Parameters + ---------- + label : object + side : {'left', 'right'} + + Returns + ------- + label : object + + Notes + ----- + Value of `side` parameter should be validated in caller. + """ + + # We are a plain index here (sub-class override this method if they + # wish to have special treatment for floats/ints, e.g. datetimelike Indexes + + if is_numeric_dtype(self.dtype): + return self._maybe_cast_indexer(label) + + # reject them, if index does not contain label + if (is_float(label) or is_integer(label)) and label not in self: + self._raise_invalid_indexer("slice", label) + + return label + + def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): + if self.is_monotonic_increasing: + return self.searchsorted(label, side=side) + elif self.is_monotonic_decreasing: + # np.searchsorted expects ascending sort order, have to reverse + # everything for it to work (element ordering, search side and + # resulting value). + pos = self[::-1].searchsorted( + label, side="right" if side == "left" else "left" + ) + return len(self) - pos + + raise ValueError("index must be monotonic increasing or decreasing") + + def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: + """ + Calculate slice bound that corresponds to given label. + + Returns leftmost (one-past-the-rightmost if ``side=='right'``) position + of given label. + + Parameters + ---------- + label : object + side : {'left', 'right'} + + Returns + ------- + int + Index of label. + + See Also + -------- + Index.get_loc : Get integer location, slice or boolean mask for requested + label. + + Examples + -------- + >>> idx = pd.RangeIndex(5) + >>> idx.get_slice_bound(3, 'left') + 3 + + >>> idx.get_slice_bound(3, 'right') + 4 + + If ``label`` is non-unique in the index, an error will be raised. + + >>> idx_duplicate = pd.Index(['a', 'b', 'a', 'c', 'd']) + >>> idx_duplicate.get_slice_bound('a', 'left') + Traceback (most recent call last): + KeyError: Cannot get left slice bound for non-unique label: 'a' + """ + + if side not in ("left", "right"): + raise ValueError( + "Invalid value for side kwarg, must be either " + f"'left' or 'right': {side}" + ) + + original_label = label + + # For datetime indices label may be a string that has to be converted + # to datetime boundary according to its resolution. + label = self._maybe_cast_slice_bound(label, side) + + # we need to look up the label + try: + slc = self.get_loc(label) + except KeyError as err: + try: + return self._searchsorted_monotonic(label, side) + except ValueError: + # raise the original KeyError + raise err + + if isinstance(slc, np.ndarray): + # get_loc may return a boolean array, which + # is OK as long as they are representable by a slice. + assert is_bool_dtype(slc.dtype) + slc = lib.maybe_booleans_to_slice(slc.view("u1")) + if isinstance(slc, np.ndarray): + raise KeyError( + f"Cannot get {side} slice bound for non-unique " + f"label: {repr(original_label)}" + ) + + if isinstance(slc, slice): + if side == "left": + return slc.start + else: + return slc.stop + else: + if side == "right": + return slc + 1 + else: + return slc + + def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: + """ + Compute slice locations for input labels. + + Parameters + ---------- + start : label, default None + If None, defaults to the beginning. + end : label, default None + If None, defaults to the end. + step : int, defaults None + If None, defaults to 1. + + Returns + ------- + tuple[int, int] + + See Also + -------- + Index.get_loc : Get location for a single label. + + Notes + ----- + This method only works if the index is monotonic or unique. + + Examples + -------- + >>> idx = pd.Index(list('abcd')) + >>> idx.slice_locs(start='b', end='c') + (1, 3) + """ + inc = step is None or step >= 0 + + if not inc: + # If it's a reverse slice, temporarily swap bounds. + start, end = end, start + + # GH 16785: If start and end happen to be date strings with UTC offsets + # attempt to parse and check that the offsets are the same + if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): + try: + ts_start = Timestamp(start) + ts_end = Timestamp(end) + except (ValueError, TypeError): + pass + else: + if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): + raise ValueError("Both dates must have the same UTC offset") + + start_slice = None + if start is not None: + start_slice = self.get_slice_bound(start, "left") + if start_slice is None: + start_slice = 0 + + end_slice = None + if end is not None: + end_slice = self.get_slice_bound(end, "right") + if end_slice is None: + end_slice = len(self) + + if not inc: + # Bounds at this moment are swapped, swap them back and shift by 1. + # + # slice_locs('B', 'A', step=-1): s='B', e='A' + # + # s='A' e='B' + # AFTER SWAP: | | + # v ------------------> V + # ----------------------------------- + # | | |A|A|A|A| | | | | |B|B| | | | | + # ----------------------------------- + # ^ <------------------ ^ + # SHOULD BE: | | + # end=s-1 start=e-1 + # + end_slice, start_slice = start_slice - 1, end_slice - 1 + + # i == -1 triggers ``len(self) + i`` selection that points to the + # last element, not before-the-first one, subtracting len(self) + # compensates that. + if end_slice == -1: + end_slice -= len(self) + if start_slice == -1: + start_slice -= len(self) + + return start_slice, end_slice + + def delete(self, loc) -> Self: + """ + Make new Index with passed location(-s) deleted. + + Parameters + ---------- + loc : int or list of int + Location of item(-s) which will be deleted. + Use a list of locations to delete more than one value at the same time. + + Returns + ------- + Index + Will be same type as self, except for RangeIndex. + + See Also + -------- + numpy.delete : Delete any rows and column from NumPy array (ndarray). + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'c']) + >>> idx.delete(1) + Index(['a', 'c'], dtype='object') + + >>> idx = pd.Index(['a', 'b', 'c']) + >>> idx.delete([0, 2]) + Index(['b'], dtype='object') + """ + values = self._values + res_values: ArrayLike + if isinstance(values, np.ndarray): + # TODO(__array_function__): special casing will be unnecessary + res_values = np.delete(values, loc) + else: + res_values = values.delete(loc) + + # _constructor so RangeIndex-> Index with an int64 dtype + return self._constructor._simple_new(res_values, name=self.name) + + def insert(self, loc: int, item) -> Index: + """ + Make new Index inserting new item at location. + + Follows Python numpy.insert semantics for negative values. + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + Index + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'c']) + >>> idx.insert(1, 'x') + Index(['a', 'x', 'b', 'c'], dtype='object') + """ + item = lib.item_from_zerodim(item) + if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: + item = self._na_value + + arr = self._values + + try: + if isinstance(arr, ExtensionArray): + res_values = arr.insert(loc, item) + return type(self)._simple_new(res_values, name=self.name) + else: + item = self._validate_fill_value(item) + except (TypeError, ValueError, LossySetitemError): + # e.g. trying to insert an integer into a DatetimeIndex + # We cannot keep the same dtype, so cast to the (often object) + # minimal shared dtype before doing the insert. + dtype = self._find_common_type_compat(item) + return self.astype(dtype).insert(loc, item) + + if arr.dtype != object or not isinstance( + item, (tuple, np.datetime64, np.timedelta64) + ): + # with object-dtype we need to worry about numpy incorrectly casting + # dt64/td64 to integer, also about treating tuples as sequences + # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 + casted = arr.dtype.type(item) + new_values = np.insert(arr, loc, casted) + + else: + # error: No overload variant of "insert" matches argument types + # "ndarray[Any, Any]", "int", "None" + new_values = np.insert(arr, loc, None) # type: ignore[call-overload] + loc = loc if loc >= 0 else loc - 1 + new_values[loc] = item + + idx = Index._with_infer(new_values, name=self.name) + if ( + using_pyarrow_string_dtype() + and is_string_dtype(idx.dtype) + and new_values.dtype == object + ): + idx = idx.astype(new_values.dtype) + return idx + + def drop( + self, + labels: Index | np.ndarray | Iterable[Hashable], + errors: IgnoreRaise = "raise", + ) -> Index: + """ + Make new Index with passed list of labels deleted. + + Parameters + ---------- + labels : array-like or scalar + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and existing labels are dropped. + + Returns + ------- + Index + Will be same type as self, except for RangeIndex. + + Raises + ------ + KeyError + If not all of the labels are found in the selected axis + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'c']) + >>> idx.drop(['a']) + Index(['b', 'c'], dtype='object') + """ + if not isinstance(labels, Index): + # avoid materializing e.g. RangeIndex + arr_dtype = "object" if self.dtype == "object" else None + labels = com.index_labels_to_array(labels, dtype=arr_dtype) + + indexer = self.get_indexer_for(labels) + mask = indexer == -1 + if mask.any(): + if errors != "ignore": + raise KeyError(f"{labels[mask].tolist()} not found in axis") + indexer = indexer[~mask] + return self.delete(indexer) + + def infer_objects(self, copy: bool = True) -> Index: + """ + If we have an object dtype, try to infer a non-object dtype. + + Parameters + ---------- + copy : bool, default True + Whether to make a copy in cases where no inference occurs. + """ + if self._is_multi: + raise NotImplementedError( + "infer_objects is not implemented for MultiIndex. " + "Use index.to_frame().infer_objects() instead." + ) + if self.dtype != object: + return self.copy() if copy else self + + values = self._values + values = cast("npt.NDArray[np.object_]", values) + res_values = lib.maybe_convert_objects( + values, + convert_non_numeric=True, + ) + if copy and res_values is values: + return self.copy() + result = Index(res_values, name=self.name) + if not copy and res_values is values and self._references is not None: + result._references = self._references + result._references.add_index_reference(result) + return result + + @final + def diff(self, periods: int = 1) -> Index: + """ + Computes the difference between consecutive values in the Index object. + + If periods is greater than 1, computes the difference between values that + are `periods` number of positions apart. + + Parameters + ---------- + periods : int, optional + The number of positions between the current and previous + value to compute the difference with. Default is 1. + + Returns + ------- + Index + A new Index object with the computed differences. + + Examples + -------- + >>> import pandas as pd + >>> idx = pd.Index([10, 20, 30, 40, 50]) + >>> idx.diff() + Index([nan, 10.0, 10.0, 10.0, 10.0], dtype='float64') + + """ + return Index(self.to_series().diff(periods)) + + def round(self, decimals: int = 0): + """ + Round each value in the Index to the given number of decimals. + + Parameters + ---------- + decimals : int, optional + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + + Returns + ------- + Index + A new Index with the rounded values. + + Examples + -------- + >>> import pandas as pd + >>> idx = pd.Index([10.1234, 20.5678, 30.9123, 40.4567, 50.7890]) + >>> idx.round(decimals=2) + Index([10.12, 20.57, 30.91, 40.46, 50.79], dtype='float64') + + """ + return self._constructor(self.to_series().round(decimals)) + + # -------------------------------------------------------------------- + # Generated Arithmetic, Comparison, and Unary Methods + + def _cmp_method(self, other, op): + """ + Wrapper used to dispatch comparison operations. + """ + if self.is_(other): + # fastpath + if op in {operator.eq, operator.le, operator.ge}: + arr = np.ones(len(self), dtype=bool) + if self._can_hold_na and not isinstance(self, ABCMultiIndex): + # TODO: should set MultiIndex._can_hold_na = False? + arr[self.isna()] = False + return arr + elif op is operator.ne: + arr = np.zeros(len(self), dtype=bool) + if self._can_hold_na and not isinstance(self, ABCMultiIndex): + arr[self.isna()] = True + return arr + + if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( + self + ) != len(other): + raise ValueError("Lengths must match to compare") + + if not isinstance(other, ABCMultiIndex): + other = extract_array(other, extract_numpy=True) + else: + other = np.asarray(other) + + if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): + # e.g. PeriodArray, Categorical + result = op(self._values, other) + + elif isinstance(self._values, ExtensionArray): + result = op(self._values, other) + + elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): + # don't pass MultiIndex + result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) + + else: + result = ops.comparison_op(self._values, other, op) + + return result + + @final + def _logical_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + + lvalues = self._values + rvalues = extract_array(other, extract_numpy=True, extract_range=True) + + res_values = ops.logical_op(lvalues, rvalues, op) + return self._construct_result(res_values, name=res_name) + + @final + def _construct_result(self, result, name): + if isinstance(result, tuple): + return ( + Index(result[0], name=name, dtype=result[0].dtype), + Index(result[1], name=name, dtype=result[1].dtype), + ) + return Index(result, name=name, dtype=result.dtype) + + def _arith_method(self, other, op): + if ( + isinstance(other, Index) + and is_object_dtype(other.dtype) + and type(other) is not Index + ): + # We return NotImplemented for object-dtype index *subclasses* so they have + # a chance to implement ops before we unwrap them. + # See https://github.com/pandas-dev/pandas/issues/31109 + return NotImplemented + + return super()._arith_method(other, op) + + @final + def _unary_method(self, op): + result = op(self._values) + return Index(result, name=self.name) + + def __abs__(self) -> Index: + return self._unary_method(operator.abs) + + def __neg__(self) -> Index: + return self._unary_method(operator.neg) + + def __pos__(self) -> Index: + return self._unary_method(operator.pos) + + def __invert__(self) -> Index: + # GH#8875 + return self._unary_method(operator.inv) + + # -------------------------------------------------------------------- + # Reductions + + def any(self, *args, **kwargs): + """ + Return whether any element is Truthy. + + Parameters + ---------- + *args + Required for compatibility with numpy. + **kwargs + Required for compatibility with numpy. + + Returns + ------- + bool or array-like (if axis is specified) + A single element array-like may be converted to bool. + + See Also + -------- + Index.all : Return whether all elements are True. + Series.all : Return whether all elements are True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to True because these are not equal to zero. + + Examples + -------- + >>> index = pd.Index([0, 1, 2]) + >>> index.any() + True + + >>> index = pd.Index([0, 0, 0]) + >>> index.any() + False + """ + nv.validate_any(args, kwargs) + self._maybe_disable_logical_methods("any") + vals = self._values + if not isinstance(vals, np.ndarray): + # i.e. EA, call _reduce instead of "any" to get TypeError instead + # of AttributeError + return vals._reduce("any") + return np.any(vals) + + def all(self, *args, **kwargs): + """ + Return whether all elements are Truthy. + + Parameters + ---------- + *args + Required for compatibility with numpy. + **kwargs + Required for compatibility with numpy. + + Returns + ------- + bool or array-like (if axis is specified) + A single element array-like may be converted to bool. + + See Also + -------- + Index.any : Return whether any element in an Index is True. + Series.any : Return whether any element in a Series is True. + Series.all : Return whether all elements in a Series are True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to True because these are not equal to zero. + + Examples + -------- + True, because nonzero integers are considered True. + + >>> pd.Index([1, 2, 3]).all() + True + + False, because ``0`` is considered False. + + >>> pd.Index([0, 1, 2]).all() + False + """ + nv.validate_all(args, kwargs) + self._maybe_disable_logical_methods("all") + vals = self._values + if not isinstance(vals, np.ndarray): + # i.e. EA, call _reduce instead of "all" to get TypeError instead + # of AttributeError + return vals._reduce("all") + return np.all(vals) + + @final + def _maybe_disable_logical_methods(self, opname: str_t) -> None: + """ + raise if this Index subclass does not support any or all. + """ + if ( + isinstance(self, ABCMultiIndex) + # TODO(3.0): PeriodArray and DatetimeArray any/all will raise, + # so checking needs_i8_conversion will be unnecessary + or (needs_i8_conversion(self.dtype) and self.dtype.kind != "m") + ): + # This call will raise + make_invalid_op(opname)(self) + + @Appender(IndexOpsMixin.argmin.__doc__) + def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: + nv.validate_argmin(args, kwargs) + nv.validate_minmax_axis(axis) + + if not self._is_multi and self.hasnans: + # Take advantage of cache + mask = self._isnan + if not skipna or mask.all(): + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return -1 + return super().argmin(skipna=skipna) + + @Appender(IndexOpsMixin.argmax.__doc__) + def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: + nv.validate_argmax(args, kwargs) + nv.validate_minmax_axis(axis) + + if not self._is_multi and self.hasnans: + # Take advantage of cache + mask = self._isnan + if not skipna or mask.all(): + warnings.warn( + f"The behavior of {type(self).__name__}.argmax/argmin " + "with skipna=False and NAs, or with all-NAs is deprecated. " + "In a future version this will raise ValueError.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return -1 + return super().argmax(skipna=skipna) + + def min(self, axis=None, skipna: bool = True, *args, **kwargs): + """ + Return the minimum value of the Index. + + Parameters + ---------- + axis : {None} + Dummy argument for consistency with Series. + skipna : bool, default True + Exclude NA/null values when showing the result. + *args, **kwargs + Additional arguments and keywords for compatibility with NumPy. + + Returns + ------- + scalar + Minimum value. + + See Also + -------- + Index.max : Return the maximum value of the object. + Series.min : Return the minimum value in a Series. + DataFrame.min : Return the minimum values in a DataFrame. + + Examples + -------- + >>> idx = pd.Index([3, 2, 1]) + >>> idx.min() + 1 + + >>> idx = pd.Index(['c', 'b', 'a']) + >>> idx.min() + 'a' + + For a MultiIndex, the minimum is determined lexicographically. + + >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) + >>> idx.min() + ('a', 1) + """ + nv.validate_min(args, kwargs) + nv.validate_minmax_axis(axis) + + if not len(self): + return self._na_value + + if len(self) and self.is_monotonic_increasing: + # quick check + first = self[0] + if not isna(first): + return first + + if not self._is_multi and self.hasnans: + # Take advantage of cache + mask = self._isnan + if not skipna or mask.all(): + return self._na_value + + if not self._is_multi and not isinstance(self._values, np.ndarray): + return self._values._reduce(name="min", skipna=skipna) + + return nanops.nanmin(self._values, skipna=skipna) + + def max(self, axis=None, skipna: bool = True, *args, **kwargs): + """ + Return the maximum value of the Index. + + Parameters + ---------- + axis : int, optional + For compatibility with NumPy. Only 0 or None are allowed. + skipna : bool, default True + Exclude NA/null values when showing the result. + *args, **kwargs + Additional arguments and keywords for compatibility with NumPy. + + Returns + ------- + scalar + Maximum value. + + See Also + -------- + Index.min : Return the minimum value in an Index. + Series.max : Return the maximum value in a Series. + DataFrame.max : Return the maximum values in a DataFrame. + + Examples + -------- + >>> idx = pd.Index([3, 2, 1]) + >>> idx.max() + 3 + + >>> idx = pd.Index(['c', 'b', 'a']) + >>> idx.max() + 'c' + + For a MultiIndex, the maximum is determined lexicographically. + + >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) + >>> idx.max() + ('b', 2) + """ + + nv.validate_max(args, kwargs) + nv.validate_minmax_axis(axis) + + if not len(self): + return self._na_value + + if len(self) and self.is_monotonic_increasing: + # quick check + last = self[-1] + if not isna(last): + return last + + if not self._is_multi and self.hasnans: + # Take advantage of cache + mask = self._isnan + if not skipna or mask.all(): + return self._na_value + + if not self._is_multi and not isinstance(self._values, np.ndarray): + return self._values._reduce(name="max", skipna=skipna) + + return nanops.nanmax(self._values, skipna=skipna) + + # -------------------------------------------------------------------- + + @final + @property + def shape(self) -> Shape: + """ + Return a tuple of the shape of the underlying data. + + Examples + -------- + >>> idx = pd.Index([1, 2, 3]) + >>> idx + Index([1, 2, 3], dtype='int64') + >>> idx.shape + (3,) + """ + # See GH#27775, GH#27384 for history/reasoning in how this is defined. + return (len(self),) + + +def ensure_index_from_sequences(sequences, names=None) -> Index: + """ + Construct an index from sequences of data. + + A single sequence returns an Index. Many sequences returns a + MultiIndex. + + Parameters + ---------- + sequences : sequence of sequences + names : sequence of str + + Returns + ------- + index : Index or MultiIndex + + Examples + -------- + >>> ensure_index_from_sequences([[1, 2, 3]], names=["name"]) + Index([1, 2, 3], dtype='int64', name='name') + + >>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"]) + MultiIndex([('a', 'a'), + ('a', 'b')], + names=['L1', 'L2']) + + See Also + -------- + ensure_index + """ + from pandas.core.indexes.multi import MultiIndex + + if len(sequences) == 1: + if names is not None: + names = names[0] + return Index(sequences[0], name=names) + else: + return MultiIndex.from_arrays(sequences, names=names) + + +def ensure_index(index_like: Axes, copy: bool = False) -> Index: + """ + Ensure that we have an index from some index-like object. + + Parameters + ---------- + index_like : sequence + An Index or other sequence + copy : bool, default False + + Returns + ------- + index : Index or MultiIndex + + See Also + -------- + ensure_index_from_sequences + + Examples + -------- + >>> ensure_index(['a', 'b']) + Index(['a', 'b'], dtype='object') + + >>> ensure_index([('a', 'a'), ('b', 'c')]) + Index([('a', 'a'), ('b', 'c')], dtype='object') + + >>> ensure_index([['a', 'a'], ['b', 'c']]) + MultiIndex([('a', 'b'), + ('a', 'c')], + ) + """ + if isinstance(index_like, Index): + if copy: + index_like = index_like.copy() + return index_like + + if isinstance(index_like, ABCSeries): + name = index_like.name + return Index(index_like, name=name, copy=copy) + + if is_iterator(index_like): + index_like = list(index_like) + + if isinstance(index_like, list): + if type(index_like) is not list: + # must check for exactly list here because of strict type + # check in clean_index_list + index_like = list(index_like) + + if len(index_like) and lib.is_all_arraylike(index_like): + from pandas.core.indexes.multi import MultiIndex + + return MultiIndex.from_arrays(index_like) + else: + return Index(index_like, copy=copy, tupleize_cols=False) + else: + return Index(index_like, copy=copy) + + +def ensure_has_len(seq): + """ + If seq is an iterator, put its values into a list. + """ + try: + len(seq) + except TypeError: + return list(seq) + else: + return seq + + +def trim_front(strings: list[str]) -> list[str]: + """ + Trims zeros and decimal points. + + Examples + -------- + >>> trim_front([" a", " b"]) + ['a', 'b'] + + >>> trim_front([" a", " "]) + ['a', ''] + """ + if not strings: + return strings + while all(strings) and all(x[0] == " " for x in strings): + strings = [x[1:] for x in strings] + return strings + + +def _validate_join_method(method: str) -> None: + if method not in ["left", "right", "inner", "outer"]: + raise ValueError(f"do not recognize join method {method}") + + +def maybe_extract_name(name, obj, cls) -> Hashable: + """ + If no name is passed, then extract it from data, validating hashability. + """ + if name is None and isinstance(obj, (Index, ABCSeries)): + # Note we don't just check for "name" attribute since that would + # pick up e.g. dtype.name + name = obj.name + + # GH#29069 + if not is_hashable(name): + raise TypeError(f"{cls.__name__}.name must be a hashable type") + + return name + + +def get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]: + """ + Return common name if all indices agree, otherwise None (level-by-level). + + Parameters + ---------- + indexes : list of Index objects + + Returns + ------- + list + A list representing the unanimous 'names' found. + """ + name_tups = [tuple(i.names) for i in indexes] + name_sets = [{*ns} for ns in zip_longest(*name_tups)] + names = tuple(ns.pop() if len(ns) == 1 else None for ns in name_sets) + return names + + +def _unpack_nested_dtype(other: Index) -> Index: + """ + When checking if our dtype is comparable with another, we need + to unpack CategoricalDtype to look at its categories.dtype. + + Parameters + ---------- + other : Index + + Returns + ------- + Index + """ + dtype = other.dtype + if isinstance(dtype, CategoricalDtype): + # If there is ever a SparseIndex, this could get dispatched + # here too. + return dtype.categories + elif isinstance(dtype, ArrowDtype): + # GH 53617 + import pyarrow as pa + + if pa.types.is_dictionary(dtype.pyarrow_dtype): + other = other.astype(ArrowDtype(dtype.pyarrow_dtype.value_type)) + return other + + +def _maybe_try_sort(result: Index | ArrayLike, sort: bool | None): + if sort is not False: + try: + # error: Incompatible types in assignment (expression has type + # "Union[ExtensionArray, ndarray[Any, Any], Index, Series, + # Tuple[Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], + # ndarray[Any, Any]]]", variable has type "Union[Index, + # Union[ExtensionArray, ndarray[Any, Any]]]") + result = algos.safe_sort(result) # type: ignore[assignment] + except TypeError as err: + if sort is True: + raise + warnings.warn( + f"{err}, sort order is undefined for incomparable objects.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/category.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/category.py new file mode 100644 index 00000000..e189d921 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/category.py @@ -0,0 +1,522 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, +) + +import numpy as np + +from pandas._libs import index as libindex +from pandas.util._decorators import ( + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import is_scalar +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + notna, +) + +from pandas.core.arrays.categorical import ( + Categorical, + contains, +) +from pandas.core.construction import extract_array +from pandas.core.indexes.base import ( + Index, + maybe_extract_name, +) +from pandas.core.indexes.extension import ( + NDArrayBackedExtensionIndex, + inherit_names, +) + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + Dtype, + DtypeObj, + npt, + ) + + +@inherit_names( + [ + "argsort", + "tolist", + "codes", + "categories", + "ordered", + "_reverse_indexer", + "searchsorted", + "min", + "max", + ], + Categorical, +) +@inherit_names( + [ + "rename_categories", + "reorder_categories", + "add_categories", + "remove_categories", + "remove_unused_categories", + "set_categories", + "as_ordered", + "as_unordered", + ], + Categorical, + wrap=True, +) +class CategoricalIndex(NDArrayBackedExtensionIndex): + """ + Index based on an underlying :class:`Categorical`. + + CategoricalIndex, like Categorical, can only take on a limited, + and usually fixed, number of possible values (`categories`). Also, + like Categorical, it might have an order, but numerical operations + (additions, divisions, ...) are not possible. + + Parameters + ---------- + data : array-like (1-dimensional) + The values of the categorical. If `categories` are given, values not in + `categories` will be replaced with NaN. + categories : index-like, optional + The categories for the categorical. Items need to be unique. + If the categories are not given here (and also not in `dtype`), they + will be inferred from the `data`. + ordered : bool, optional + Whether or not this categorical is treated as an ordered + categorical. If not given here or in `dtype`, the resulting + categorical will be unordered. + dtype : CategoricalDtype or "category", optional + If :class:`CategoricalDtype`, cannot be used together with + `categories` or `ordered`. + copy : bool, default False + Make a copy of input ndarray. + name : object, optional + Name to be stored in the index. + + Attributes + ---------- + codes + categories + ordered + + Methods + ------- + rename_categories + reorder_categories + add_categories + remove_categories + remove_unused_categories + set_categories + as_ordered + as_unordered + map + + Raises + ------ + ValueError + If the categories do not validate. + TypeError + If an explicit ``ordered=True`` is given but no `categories` and the + `values` are not sortable. + + See Also + -------- + Index : The base pandas Index type. + Categorical : A categorical array. + CategoricalDtype : Type for categorical data. + + Notes + ----- + See the `user guide + `__ + for more. + + Examples + -------- + >>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]) + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], + categories=['a', 'b', 'c'], ordered=False, dtype='category') + + ``CategoricalIndex`` can also be instantiated from a ``Categorical``: + + >>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"]) + >>> pd.CategoricalIndex(c) + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], + categories=['a', 'b', 'c'], ordered=False, dtype='category') + + Ordered ``CategoricalIndex`` can have a min and max value. + + >>> ci = pd.CategoricalIndex( + ... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"] + ... ) + >>> ci + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], + categories=['c', 'b', 'a'], ordered=True, dtype='category') + >>> ci.min() + 'c' + """ + + _typ = "categoricalindex" + _data_cls = Categorical + + @property + def _can_hold_strings(self): + return self.categories._can_hold_strings + + @cache_readonly + def _should_fallback_to_positional(self) -> bool: + return self.categories._should_fallback_to_positional + + codes: np.ndarray + categories: Index + ordered: bool | None + _data: Categorical + _values: Categorical + + @property + def _engine_type(self) -> type[libindex.IndexEngine]: + # self.codes can have dtype int8, int16, int32 or int64, so we need + # to return the corresponding engine type (libindex.Int8Engine, etc.). + return { + np.int8: libindex.Int8Engine, + np.int16: libindex.Int16Engine, + np.int32: libindex.Int32Engine, + np.int64: libindex.Int64Engine, + }[self.codes.dtype.type] + + # -------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + data=None, + categories=None, + ordered=None, + dtype: Dtype | None = None, + copy: bool = False, + name: Hashable | None = None, + ) -> CategoricalIndex: + name = maybe_extract_name(name, data, cls) + + if is_scalar(data): + # GH#38944 include None here, which pre-2.0 subbed in [] + cls._raise_scalar_data_error(data) + + data = Categorical( + data, categories=categories, ordered=ordered, dtype=dtype, copy=copy + ) + + return cls._simple_new(data, name=name) + + # -------------------------------------------------------------------- + + def _is_dtype_compat(self, other: Index) -> Categorical: + """ + *this is an internal non-public method* + + provide a comparison between the dtype of self and other (coercing if + needed) + + Parameters + ---------- + other : Index + + Returns + ------- + Categorical + + Raises + ------ + TypeError if the dtypes are not compatible + """ + if isinstance(other.dtype, CategoricalDtype): + cat = extract_array(other) + cat = cast(Categorical, cat) + if not cat._categories_match_up_to_permutation(self._values): + raise TypeError( + "categories must match existing categories when appending" + ) + + elif other._is_multi: + # preempt raising NotImplementedError in isna call + raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex") + else: + values = other + + cat = Categorical(other, dtype=self.dtype) + other = CategoricalIndex(cat) + if not other.isin(values).all(): + raise TypeError( + "cannot append a non-category item to a CategoricalIndex" + ) + cat = other._values + + if not ((cat == values) | (isna(cat) & isna(values))).all(): + # GH#37667 see test_equals_non_category + raise TypeError( + "categories must match existing categories when appending" + ) + + return cat + + def equals(self, other: object) -> bool: + """ + Determine if two CategoricalIndex objects contain the same elements. + + Returns + ------- + bool + ``True`` if two :class:`pandas.CategoricalIndex` objects have equal + elements, ``False`` otherwise. + + Examples + -------- + >>> ci = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c']) + >>> ci2 = pd.CategoricalIndex(pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])) + >>> ci.equals(ci2) + True + + The order of elements matters. + + >>> ci3 = pd.CategoricalIndex(['c', 'b', 'a', 'a', 'b', 'c']) + >>> ci.equals(ci3) + False + + The orderedness also matters. + + >>> ci4 = ci.as_ordered() + >>> ci.equals(ci4) + False + + The categories matter, but the order of the categories matters only when + ``ordered=True``. + + >>> ci5 = ci.set_categories(['a', 'b', 'c', 'd']) + >>> ci.equals(ci5) + False + + >>> ci6 = ci.set_categories(['b', 'c', 'a']) + >>> ci.equals(ci6) + True + >>> ci_ordered = pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], + ... ordered=True) + >>> ci2_ordered = ci_ordered.set_categories(['b', 'c', 'a']) + >>> ci_ordered.equals(ci2_ordered) + False + """ + if self.is_(other): + return True + + if not isinstance(other, Index): + return False + + try: + other = self._is_dtype_compat(other) + except (TypeError, ValueError): + return False + + return self._data.equals(other) + + # -------------------------------------------------------------------- + # Rendering Methods + + @property + def _formatter_func(self): + return self.categories._formatter_func + + def _format_attrs(self): + """ + Return a list of tuples of the (attr,formatted_value) + """ + attrs: list[tuple[str, str | int | bool | None]] + + attrs = [ + ( + "categories", + f"[{', '.join(self._data._repr_categories())}]", + ), + ("ordered", self.ordered), + ] + extra = super()._format_attrs() + return attrs + extra + + def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: + result = [ + pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep + for x in self._values + ] + return header + result + + # -------------------------------------------------------------------- + + @property + def inferred_type(self) -> str: + return "categorical" + + @doc(Index.__contains__) + def __contains__(self, key: Any) -> bool: + # if key is a NaN, check if any NaN is in self. + if is_valid_na_for_dtype(key, self.categories.dtype): + return self.hasnans + + return contains(self, key, container=self._engine) + + def reindex( + self, target, method=None, level=None, limit: int | None = None, tolerance=None + ) -> tuple[Index, npt.NDArray[np.intp] | None]: + """ + Create index with target's values (move/add/delete values as necessary) + + Returns + ------- + new_index : pd.Index + Resulting index + indexer : np.ndarray[np.intp] or None + Indices of output values in original index + + """ + if method is not None: + raise NotImplementedError( + "argument method is not implemented for CategoricalIndex.reindex" + ) + if level is not None: + raise NotImplementedError( + "argument level is not implemented for CategoricalIndex.reindex" + ) + if limit is not None: + raise NotImplementedError( + "argument limit is not implemented for CategoricalIndex.reindex" + ) + return super().reindex(target) + + # -------------------------------------------------------------------- + # Indexing Methods + + def _maybe_cast_indexer(self, key) -> int: + # GH#41933: we have to do this instead of self._data._validate_scalar + # because this will correctly get partial-indexing on Interval categories + try: + return self._data._unbox_scalar(key) + except KeyError: + if is_valid_na_for_dtype(key, self.categories.dtype): + return -1 + raise + + def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex: + if isinstance(values, CategoricalIndex): + values = values._data + if isinstance(values, Categorical): + # Indexing on codes is more efficient if categories are the same, + # so we can apply some optimizations based on the degree of + # dtype-matching. + cat = self._data._encode_with_my_categories(values) + codes = cat._codes + else: + codes = self.categories.get_indexer(values) + codes = codes.astype(self.codes.dtype, copy=False) + cat = self._data._from_backing_data(codes) + return type(self)._simple_new(cat) + + # -------------------------------------------------------------------- + + def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: + return self.categories._is_comparable_dtype(dtype) + + def map(self, mapper, na_action: Literal["ignore"] | None = None): + """ + Map values using input an input mapping or function. + + Maps the values (their categories, not the codes) of the index to new + categories. If the mapping correspondence is one-to-one the result is a + :class:`~pandas.CategoricalIndex` which has the same order property as + the original, otherwise an :class:`~pandas.Index` is returned. + + If a `dict` or :class:`~pandas.Series` is used any unmapped category is + mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` + will be returned. + + Parameters + ---------- + mapper : function, dict, or Series + Mapping correspondence. + + Returns + ------- + pandas.CategoricalIndex or pandas.Index + Mapped index. + + See Also + -------- + Index.map : Apply a mapping correspondence on an + :class:`~pandas.Index`. + Series.map : Apply a mapping correspondence on a + :class:`~pandas.Series`. + Series.apply : Apply more complex functions on a + :class:`~pandas.Series`. + + Examples + -------- + >>> idx = pd.CategoricalIndex(['a', 'b', 'c']) + >>> idx + CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], + ordered=False, dtype='category') + >>> idx.map(lambda x: x.upper()) + CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'], + ordered=False, dtype='category') + >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'}) + CategoricalIndex(['first', 'second', 'third'], categories=['first', + 'second', 'third'], ordered=False, dtype='category') + + If the mapping is one-to-one the ordering of the categories is + preserved: + + >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True) + >>> idx + CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], + ordered=True, dtype='category') + >>> idx.map({'a': 3, 'b': 2, 'c': 1}) + CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True, + dtype='category') + + If the mapping is not one-to-one an :class:`~pandas.Index` is returned: + + >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'}) + Index(['first', 'second', 'first'], dtype='object') + + If a `dict` is used, all unmapped categories are mapped to `NaN` and + the result is an :class:`~pandas.Index`: + + >>> idx.map({'a': 'first', 'b': 'second'}) + Index(['first', 'second', nan], dtype='object') + """ + mapped = self._values.map(mapper, na_action=na_action) + return Index(mapped, name=self.name) + + def _concat(self, to_concat: list[Index], name: Hashable) -> Index: + # if calling index is category, don't check dtype of others + try: + cat = Categorical._concat_same_type( + [self._is_dtype_compat(c) for c in to_concat] + ) + except TypeError: + # not all to_concat elements are among our categories (or NA) + + res = concat_compat([x._values for x in to_concat]) + return Index(res, name=name) + else: + return type(self)._simple_new(cat, name=name) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimelike.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimelike.py new file mode 100644 index 00000000..d5a29233 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimelike.py @@ -0,0 +1,819 @@ +""" +Base and utility classes for tseries type pandas objects. +""" +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, + final, +) + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import ( + NaT, + Timedelta, + lib, +) +from pandas._libs.tslibs import ( + BaseOffset, + Resolution, + Tick, + parsing, + to_offset, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + InvalidIndexError, + NullFrequencyError, +) +from pandas.util._decorators import ( + Appender, + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin +import pandas.core.common as com +import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import ( + Index, + _index_shared_docs, +) +from pandas.core.indexes.extension import NDArrayBackedExtensionIndex +from pandas.core.indexes.range import RangeIndex +from pandas.core.tools.timedeltas import to_timedelta + +if TYPE_CHECKING: + from collections.abc import Sequence + from datetime import datetime + + from pandas._typing import ( + Axis, + Self, + npt, + ) + + from pandas import CategoricalIndex + +_index_doc_kwargs = dict(ibase._index_doc_kwargs) + + +class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC): + """ + Common ops mixin to support a unified interface datetimelike Index. + """ + + _can_hold_strings = False + _data: DatetimeArray | TimedeltaArray | PeriodArray + + @doc(DatetimeLikeArrayMixin.mean) + def mean(self, *, skipna: bool = True, axis: int | None = 0): + return self._data.mean(skipna=skipna, axis=axis) + + @property + def freq(self) -> BaseOffset | None: + return self._data.freq + + @freq.setter + def freq(self, value) -> None: + # error: Property "freq" defined in "PeriodArray" is read-only [misc] + self._data.freq = value # type: ignore[misc] + + @property + def asi8(self) -> npt.NDArray[np.int64]: + return self._data.asi8 + + @property + @doc(DatetimeLikeArrayMixin.freqstr) + def freqstr(self) -> str | None: + return self._data.freqstr + + @cache_readonly + @abstractmethod + def _resolution_obj(self) -> Resolution: + ... + + @cache_readonly + @doc(DatetimeLikeArrayMixin.resolution) + def resolution(self) -> str: + return self._data.resolution + + # ------------------------------------------------------------------------ + + @cache_readonly + def hasnans(self) -> bool: + return self._data._hasna + + def equals(self, other: Any) -> bool: + """ + Determines if two Index objects contain the same elements. + """ + if self.is_(other): + return True + + if not isinstance(other, Index): + return False + elif other.dtype.kind in "iufc": + return False + elif not isinstance(other, type(self)): + should_try = False + inferable = self._data._infer_matches + if other.dtype == object: + should_try = other.inferred_type in inferable + elif isinstance(other.dtype, CategoricalDtype): + other = cast("CategoricalIndex", other) + should_try = other.categories.inferred_type in inferable + + if should_try: + try: + other = type(self)(other) + except (ValueError, TypeError, OverflowError): + # e.g. + # ValueError -> cannot parse str entry, or OutOfBoundsDatetime + # TypeError -> trying to convert IntervalIndex to DatetimeIndex + # OverflowError -> Index([very_large_timedeltas]) + return False + + if self.dtype != other.dtype: + # have different timezone + return False + + return np.array_equal(self.asi8, other.asi8) + + @Appender(Index.__contains__.__doc__) + def __contains__(self, key: Any) -> bool: + hash(key) + try: + self.get_loc(key) + except (KeyError, TypeError, ValueError, InvalidIndexError): + return False + return True + + def _convert_tolerance(self, tolerance, target): + tolerance = np.asarray(to_timedelta(tolerance).to_numpy()) + return super()._convert_tolerance(tolerance, target) + + # -------------------------------------------------------------------- + # Rendering Methods + + def format( + self, + name: bool = False, + formatter: Callable | None = None, + na_rep: str = "NaT", + date_format: str | None = None, + ) -> list[str]: + """ + Render a string representation of the Index. + """ + header = [] + if name: + header.append( + ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) + if self.name is not None + else "" + ) + + if formatter is not None: + return header + list(self.map(formatter)) + + return self._format_with_header(header, na_rep=na_rep, date_format=date_format) + + def _format_with_header( + self, header: list[str], na_rep: str = "NaT", date_format: str | None = None + ) -> list[str]: + # matches base class except for whitespace padding and date_format + return header + list( + self._format_native_types(na_rep=na_rep, date_format=date_format) + ) + + @property + def _formatter_func(self): + return self._data._formatter() + + def _format_attrs(self): + """ + Return a list of tuples of the (attr,formatted_value). + """ + attrs = super()._format_attrs() + for attrib in self._attributes: + # iterating over _attributes prevents us from doing this for PeriodIndex + if attrib == "freq": + freq = self.freqstr + if freq is not None: + freq = repr(freq) # e.g. D -> 'D' + attrs.append(("freq", freq)) + return attrs + + @Appender(Index._summary.__doc__) + def _summary(self, name=None) -> str: + result = super()._summary(name=name) + if self.freq: + result += f"\nFreq: {self.freqstr}" + + return result + + # -------------------------------------------------------------------- + # Indexing Methods + + @final + def _can_partial_date_slice(self, reso: Resolution) -> bool: + # e.g. test_getitem_setitem_periodindex + # History of conversation GH#3452, GH#3931, GH#2369, GH#14826 + return reso > self._resolution_obj + # NB: for DTI/PI, not TDI + + def _parsed_string_to_bounds(self, reso: Resolution, parsed): + raise NotImplementedError + + def _parse_with_reso(self, label: str): + # overridden by TimedeltaIndex + try: + if self.freq is None or hasattr(self.freq, "rule_code"): + freq = self.freq + except NotImplementedError: + freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) + + freqstr: str | None + if freq is not None and not isinstance(freq, str): + freqstr = freq.rule_code + else: + freqstr = freq + + if isinstance(label, np.str_): + # GH#45580 + label = str(label) + + parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr) + reso = Resolution.from_attrname(reso_str) + return parsed, reso + + def _get_string_slice(self, key: str): + # overridden by TimedeltaIndex + parsed, reso = self._parse_with_reso(key) + try: + return self._partial_date_slice(reso, parsed) + except KeyError as err: + raise KeyError(key) from err + + @final + def _partial_date_slice( + self, + reso: Resolution, + parsed: datetime, + ) -> slice | npt.NDArray[np.intp]: + """ + Parameters + ---------- + reso : Resolution + parsed : datetime + + Returns + ------- + slice or ndarray[intp] + """ + if not self._can_partial_date_slice(reso): + raise ValueError + + t1, t2 = self._parsed_string_to_bounds(reso, parsed) + vals = self._data._ndarray + unbox = self._data._unbox + + if self.is_monotonic_increasing: + if len(self) and ( + (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1]) + ): + # we are out of range + raise KeyError + + # TODO: does this depend on being monotonic _increasing_? + + # a monotonic (sorted) series can be sliced + left = vals.searchsorted(unbox(t1), side="left") + right = vals.searchsorted(unbox(t2), side="right") + return slice(left, right) + + else: + lhs_mask = vals >= unbox(t1) + rhs_mask = vals <= unbox(t2) + + # try to find the dates + return (lhs_mask & rhs_mask).nonzero()[0] + + def _maybe_cast_slice_bound(self, label, side: str): + """ + If label is a string, cast it to scalar type according to resolution. + + Parameters + ---------- + label : object + side : {'left', 'right'} + + Returns + ------- + label : object + + Notes + ----- + Value of `side` parameter should be validated in caller. + """ + if isinstance(label, str): + try: + parsed, reso = self._parse_with_reso(label) + except ValueError as err: + # DTI -> parsing.DateParseError + # TDI -> 'unit abbreviation w/o a number' + # PI -> string cannot be parsed as datetime-like + self._raise_invalid_indexer("slice", label, err) + + lower, upper = self._parsed_string_to_bounds(reso, parsed) + return lower if side == "left" else upper + elif not isinstance(label, self._data._recognized_scalars): + self._raise_invalid_indexer("slice", label) + + return label + + # -------------------------------------------------------------------- + # Arithmetic Methods + + def shift(self, periods: int = 1, freq=None) -> Self: + """ + Shift index by desired number of time frequency increments. + + This method is for shifting the values of datetime-like indexes + by a specified time increment a given number of times. + + Parameters + ---------- + periods : int, default 1 + Number of periods (or increments) to shift by, + can be positive or negative. + freq : pandas.DateOffset, pandas.Timedelta or string, optional + Frequency increment to shift by. + If None, the index is shifted by its own `freq` attribute. + Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. + + Returns + ------- + pandas.DatetimeIndex + Shifted index. + + See Also + -------- + Index.shift : Shift values of Index. + PeriodIndex.shift : Shift values of PeriodIndex. + """ + raise NotImplementedError + + # -------------------------------------------------------------------- + + @doc(Index._maybe_cast_listlike_indexer) + def _maybe_cast_listlike_indexer(self, keyarr): + try: + res = self._data._validate_listlike(keyarr, allow_object=True) + except (ValueError, TypeError): + if not isinstance(keyarr, ExtensionArray): + # e.g. we don't want to cast DTA to ndarray[object] + res = com.asarray_tuplesafe(keyarr) + # TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray + else: + res = keyarr + return Index(res, dtype=res.dtype) + + +class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC): + """ + Mixin class for methods shared by DatetimeIndex and TimedeltaIndex, + but not PeriodIndex + """ + + _data: DatetimeArray | TimedeltaArray + _comparables = ["name", "freq"] + _attributes = ["name", "freq"] + + # Compat for frequency inference, see GH#23789 + _is_monotonic_increasing = Index.is_monotonic_increasing + _is_monotonic_decreasing = Index.is_monotonic_decreasing + _is_unique = Index.is_unique + + _join_precedence = 10 + + @property + def unit(self) -> str: + return self._data.unit + + def as_unit(self, unit: str) -> Self: + """ + Convert to a dtype with the given unit resolution. + + Parameters + ---------- + unit : {'s', 'ms', 'us', 'ns'} + + Returns + ------- + same type as self + + Examples + -------- + For :class:`pandas.DatetimeIndex`: + + >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006']) + >>> idx + DatetimeIndex(['2020-01-02 01:02:03.004005006'], + dtype='datetime64[ns]', freq=None) + >>> idx.as_unit('s') + DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None) + + For :class:`pandas.TimedeltaIndex`: + + >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns']) + >>> tdelta_idx + TimedeltaIndex(['1 days 00:03:00.000002042'], + dtype='timedelta64[ns]', freq=None) + >>> tdelta_idx.as_unit('s') + TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None) + """ + arr = self._data.as_unit(unit) + return type(self)._simple_new(arr, name=self.name) + + def _with_freq(self, freq): + arr = self._data._with_freq(freq) + return type(self)._simple_new(arr, name=self._name) + + @property + def values(self) -> np.ndarray: + # NB: For Datetime64TZ this is lossy + data = self._data._ndarray + if using_copy_on_write(): + data = data.view() + data.flags.writeable = False + return data + + @doc(DatetimeIndexOpsMixin.shift) + def shift(self, periods: int = 1, freq=None) -> Self: + if freq is not None and freq != self.freq: + if isinstance(freq, str): + freq = to_offset(freq) + offset = periods * freq + return self + offset + + if periods == 0 or len(self) == 0: + # GH#14811 empty case + return self.copy() + + if self.freq is None: + raise NullFrequencyError("Cannot shift with no freq") + + start = self[0] + periods * self.freq + end = self[-1] + periods * self.freq + + # Note: in the DatetimeTZ case, _generate_range will infer the + # appropriate timezone from `start` and `end`, so tz does not need + # to be passed explicitly. + result = self._data._generate_range( + start=start, end=end, periods=None, freq=self.freq + ) + return type(self)._simple_new(result, name=self.name) + + @cache_readonly + @doc(DatetimeLikeArrayMixin.inferred_freq) + def inferred_freq(self) -> str | None: + return self._data.inferred_freq + + # -------------------------------------------------------------------- + # Set Operation Methods + + @cache_readonly + def _as_range_index(self) -> RangeIndex: + # Convert our i8 representations to RangeIndex + # Caller is responsible for checking isinstance(self.freq, Tick) + freq = cast(Tick, self.freq) + tick = freq.delta._value + rng = range(self[0]._value, self[-1]._value + tick, tick) + return RangeIndex(rng) + + def _can_range_setop(self, other) -> bool: + return isinstance(self.freq, Tick) and isinstance(other.freq, Tick) + + def _wrap_range_setop(self, other, res_i8) -> Self: + new_freq = None + if not len(res_i8): + # RangeIndex defaults to step=1, which we don't want. + new_freq = self.freq + elif isinstance(res_i8, RangeIndex): + new_freq = to_offset(Timedelta(res_i8.step)) + + # TODO(GH#41493): we cannot just do + # type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq) + # because test_setops_preserve_freq fails with _validate_frequency raising. + # This raising is incorrect, as 'on_freq' is incorrect. This will + # be fixed by GH#41493 + res_values = res_i8.values.view(self._data._ndarray.dtype) + result = type(self._data)._simple_new( + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[dtype[Any], ExtensionDtype]"; expected + # "Union[dtype[datetime64], DatetimeTZDtype]" + res_values, + dtype=self.dtype, # type: ignore[arg-type] + freq=new_freq, # type: ignore[arg-type] + ) + return cast("Self", self._wrap_setop_result(other, result)) + + def _range_intersect(self, other, sort) -> Self: + # Dispatch to RangeIndex intersection logic. + left = self._as_range_index + right = other._as_range_index + res_i8 = left.intersection(right, sort=sort) + return self._wrap_range_setop(other, res_i8) + + def _range_union(self, other, sort) -> Self: + # Dispatch to RangeIndex union logic. + left = self._as_range_index + right = other._as_range_index + res_i8 = left.union(right, sort=sort) + return self._wrap_range_setop(other, res_i8) + + def _intersection(self, other: Index, sort: bool = False) -> Index: + """ + intersection specialized to the case with matching dtypes and both non-empty. + """ + other = cast("DatetimeTimedeltaMixin", other) + + if self._can_range_setop(other): + return self._range_intersect(other, sort=sort) + + if not self._can_fast_intersect(other): + result = Index._intersection(self, other, sort=sort) + # We need to invalidate the freq because Index._intersection + # uses _shallow_copy on a view of self._data, which will preserve + # self.freq if we're not careful. + # At this point we should have result.dtype == self.dtype + # and type(result) is type(self._data) + result = self._wrap_setop_result(other, result) + return result._with_freq(None)._with_freq("infer") + + else: + return self._fast_intersect(other, sort) + + def _fast_intersect(self, other, sort): + # to make our life easier, "sort" the two ranges + if self[0] <= other[0]: + left, right = self, other + else: + left, right = other, self + + # after sorting, the intersection always starts with the right index + # and ends with the index of which the last elements is smallest + end = min(left[-1], right[-1]) + start = right[0] + + if end < start: + result = self[:0] + else: + lslice = slice(*left.slice_locs(start, end)) + result = left._values[lslice] + + return result + + def _can_fast_intersect(self, other: Self) -> bool: + # Note: we only get here with len(self) > 0 and len(other) > 0 + if self.freq is None: + return False + + elif other.freq != self.freq: + return False + + elif not self.is_monotonic_increasing: + # Because freq is not None, we must then be monotonic decreasing + return False + + # this along with matching freqs ensure that we "line up", + # so intersection will preserve freq + # Note we are assuming away Ticks, as those go through _range_intersect + # GH#42104 + return self.freq.n == 1 + + def _can_fast_union(self, other: Self) -> bool: + # Assumes that type(self) == type(other), as per the annotation + # The ability to fast_union also implies that `freq` should be + # retained on union. + freq = self.freq + + if freq is None or freq != other.freq: + return False + + if not self.is_monotonic_increasing: + # Because freq is not None, we must then be monotonic decreasing + # TODO: do union on the reversed indexes? + return False + + if len(self) == 0 or len(other) == 0: + # only reached via union_many + return True + + # to make our life easier, "sort" the two ranges + if self[0] <= other[0]: + left, right = self, other + else: + left, right = other, self + + right_start = right[0] + left_end = left[-1] + + # Only need to "adjoin", not overlap + return (right_start == left_end + freq) or right_start in left + + def _fast_union(self, other: Self, sort=None) -> Self: + # Caller is responsible for ensuring self and other are non-empty + + # to make our life easier, "sort" the two ranges + if self[0] <= other[0]: + left, right = self, other + elif sort is False: + # TDIs are not in the "correct" order and we don't want + # to sort but want to remove overlaps + left, right = self, other + left_start = left[0] + loc = right.searchsorted(left_start, side="left") + right_chunk = right._values[:loc] + dates = concat_compat((left._values, right_chunk)) + result = type(self)._simple_new(dates, name=self.name) + return result + else: + left, right = other, self + + left_end = left[-1] + right_end = right[-1] + + # concatenate + if left_end < right_end: + loc = right.searchsorted(left_end, side="right") + right_chunk = right._values[loc:] + dates = concat_compat([left._values, right_chunk]) + # The can_fast_union check ensures that the result.freq + # should match self.freq + dates = type(self._data)(dates, freq=self.freq) + result = type(self)._simple_new(dates) + return result + else: + return left + + def _union(self, other, sort): + # We are called by `union`, which is responsible for this validation + assert isinstance(other, type(self)) + assert self.dtype == other.dtype + + if self._can_range_setop(other): + return self._range_union(other, sort=sort) + + if self._can_fast_union(other): + result = self._fast_union(other, sort=sort) + # in the case with sort=None, the _can_fast_union check ensures + # that result.freq == self.freq + return result + else: + return super()._union(other, sort)._with_freq("infer") + + # -------------------------------------------------------------------- + # Join Methods + + def _get_join_freq(self, other): + """ + Get the freq to attach to the result of a join operation. + """ + freq = None + if self._can_fast_union(other): + freq = self.freq + return freq + + def _wrap_joined_index( + self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp] + ): + assert other.dtype == self.dtype, (other.dtype, self.dtype) + result = super()._wrap_joined_index(joined, other, lidx, ridx) + result._data._freq = self._get_join_freq(other) + return result + + def _get_engine_target(self) -> np.ndarray: + # engine methods and libjoin methods need dt64/td64 values cast to i8 + return self._data._ndarray.view("i8") + + def _from_join_target(self, result: np.ndarray): + # view e.g. i8 back to M8[ns] + result = result.view(self._data._ndarray.dtype) + return self._data._from_backing_data(result) + + # -------------------------------------------------------------------- + # List-like Methods + + def _get_delete_freq(self, loc: int | slice | Sequence[int]): + """ + Find the `freq` for self.delete(loc). + """ + freq = None + if self.freq is not None: + if is_integer(loc): + if loc in (0, -len(self), -1, len(self) - 1): + freq = self.freq + else: + if is_list_like(loc): + # error: Incompatible types in assignment (expression has + # type "Union[slice, ndarray]", variable has type + # "Union[int, slice, Sequence[int]]") + loc = lib.maybe_indices_to_slice( # type: ignore[assignment] + np.asarray(loc, dtype=np.intp), len(self) + ) + if isinstance(loc, slice) and loc.step in (1, None): + if loc.start in (0, None) or loc.stop in (len(self), None): + freq = self.freq + return freq + + def _get_insert_freq(self, loc: int, item): + """ + Find the `freq` for self.insert(loc, item). + """ + value = self._data._validate_scalar(item) + item = self._data._box_func(value) + + freq = None + if self.freq is not None: + # freq can be preserved on edge cases + if self.size: + if item is NaT: + pass + elif loc in (0, -len(self)) and item + self.freq == self[0]: + freq = self.freq + elif (loc == len(self)) and item - self.freq == self[-1]: + freq = self.freq + else: + # Adding a single item to an empty index may preserve freq + if isinstance(self.freq, Tick): + # all TimedeltaIndex cases go through here; is_on_offset + # would raise TypeError + freq = self.freq + elif self.freq.is_on_offset(item): + freq = self.freq + return freq + + @doc(NDArrayBackedExtensionIndex.delete) + def delete(self, loc) -> Self: + result = super().delete(loc) + result._data._freq = self._get_delete_freq(loc) + return result + + @doc(NDArrayBackedExtensionIndex.insert) + def insert(self, loc: int, item): + result = super().insert(loc, item) + if isinstance(result, type(self)): + # i.e. parent class method did not cast + result._data._freq = self._get_insert_freq(loc, item) + return result + + # -------------------------------------------------------------------- + # NDArray-Like Methods + + @Appender(_index_shared_docs["take"] % _index_doc_kwargs) + def take( + self, + indices, + axis: Axis = 0, + allow_fill: bool = True, + fill_value=None, + **kwargs, + ) -> Self: + nv.validate_take((), kwargs) + indices = np.asarray(indices, dtype=np.intp) + + result = NDArrayBackedExtensionIndex.take( + self, indices, axis, allow_fill, fill_value, **kwargs + ) + + maybe_slice = lib.maybe_indices_to_slice(indices, len(self)) + if isinstance(maybe_slice, slice): + freq = self._data._get_getitem_freq(maybe_slice) + result._data._freq = freq + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimes.py new file mode 100644 index 00000000..dcb5f8ca --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/datetimes.py @@ -0,0 +1,1128 @@ +from __future__ import annotations + +import datetime as dt +import operator +from typing import TYPE_CHECKING +import warnings + +import numpy as np +import pytz + +from pandas._libs import ( + NaT, + Period, + Timestamp, + index as libindex, + lib, +) +from pandas._libs.tslibs import ( + Resolution, + periods_per_day, + timezones, + to_offset, +) +from pandas._libs.tslibs.offsets import prefix_mapping +from pandas.util._decorators import ( + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_scalar +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import is_valid_na_for_dtype + +from pandas.core.arrays.datetimes import ( + DatetimeArray, + tz_to_dtype, +) +import pandas.core.common as com +from pandas.core.indexes.base import ( + Index, + maybe_extract_name, +) +from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin +from pandas.core.indexes.extension import inherit_names +from pandas.core.tools.times import to_time + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + Dtype, + DtypeObj, + Frequency, + IntervalClosedType, + Self, + TimeAmbiguous, + TimeNonexistent, + npt, + ) + + from pandas.core.api import ( + DataFrame, + PeriodIndex, + ) + + +def _new_DatetimeIndex(cls, d): + """ + This is called upon unpickling, rather than the default which doesn't + have arguments and breaks __new__ + """ + if "data" in d and not isinstance(d["data"], DatetimeIndex): + # Avoid need to verify integrity by calling simple_new directly + data = d.pop("data") + if not isinstance(data, DatetimeArray): + # For backward compat with older pickles, we may need to construct + # a DatetimeArray to adapt to the newer _simple_new signature + tz = d.pop("tz") + freq = d.pop("freq") + dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq) + else: + dta = data + for key in ["tz", "freq"]: + # These are already stored in our DatetimeArray; if they are + # also in the pickle and don't match, we have a problem. + if key in d: + assert d[key] == getattr(dta, key) + d.pop(key) + result = cls._simple_new(dta, **d) + else: + with warnings.catch_warnings(): + # TODO: If we knew what was going in to **d, we might be able to + # go through _simple_new instead + warnings.simplefilter("ignore") + result = cls.__new__(cls, **d) + + return result + + +@inherit_names( + DatetimeArray._field_ops + + [ + method + for method in DatetimeArray._datetimelike_methods + if method not in ("tz_localize", "tz_convert", "strftime") + ], + DatetimeArray, + wrap=True, +) +@inherit_names(["is_normalized"], DatetimeArray, cache=True) +@inherit_names( + [ + "tz", + "tzinfo", + "dtype", + "to_pydatetime", + "_format_native_types", + "date", + "time", + "timetz", + "std", + ] + + DatetimeArray._bool_ops, + DatetimeArray, +) +class DatetimeIndex(DatetimeTimedeltaMixin): + """ + Immutable ndarray-like of datetime64 data. + + Represented internally as int64, and which can be boxed to Timestamp objects + that are subclasses of datetime and carry metadata. + + .. versionchanged:: 2.0.0 + The various numeric date/time attributes (:attr:`~DatetimeIndex.day`, + :attr:`~DatetimeIndex.month`, :attr:`~DatetimeIndex.year` etc.) now have dtype + ``int32``. Previously they had dtype ``int64``. + + Parameters + ---------- + data : array-like (1-dimensional) + Datetime-like data to construct index with. + freq : str or pandas offset object, optional + One of pandas date offset strings or corresponding objects. The string + 'infer' can be passed in order to set the frequency of the index as the + inferred frequency upon creation. + tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str + Set the Timezone of the data. + normalize : bool, default False + Normalize start/end dates to midnight before generating date range. + + .. deprecated:: 2.1.0 + + closed : {'left', 'right'}, optional + Set whether to include `start` and `end` that are on the + boundary. The default includes boundary points on either end. + + .. deprecated:: 2.1.0 + + ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' + When clocks moved backward due to DST, ambiguous times may arise. + For example in Central European Time (UTC+01), when going from 03:00 + DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC + and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter + dictates how ambiguous times should be handled. + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False signifies a + non-DST time (note that this flag is only applicable for ambiguous + times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. + dayfirst : bool, default False + If True, parse dates in `data` with the day first order. + yearfirst : bool, default False + If True parse dates in `data` with the year first order. + dtype : numpy.dtype or DatetimeTZDtype or str, default None + Note that the only NumPy dtype allowed is `datetime64[ns]`. + copy : bool, default False + Make a copy of input ndarray. + name : label, default None + Name to be stored in the index. + + Attributes + ---------- + year + month + day + hour + minute + second + microsecond + nanosecond + date + time + timetz + dayofyear + day_of_year + weekofyear + week + dayofweek + day_of_week + weekday + quarter + tz + freq + freqstr + is_month_start + is_month_end + is_quarter_start + is_quarter_end + is_year_start + is_year_end + is_leap_year + inferred_freq + + Methods + ------- + normalize + strftime + snap + tz_convert + tz_localize + round + floor + ceil + to_period + to_pydatetime + to_series + to_frame + month_name + day_name + mean + std + + See Also + -------- + Index : The base pandas Index type. + TimedeltaIndex : Index of timedelta64 data. + PeriodIndex : Index of Period data. + to_datetime : Convert argument to datetime. + date_range : Create a fixed-frequency DatetimeIndex. + + Notes + ----- + To learn more about the frequency strings, please see `this link + `__. + + Examples + -------- + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) + >>> idx + DatetimeIndex(['2020-01-01 10:00:00+00:00', '2020-02-01 11:00:00+00:00'], + dtype='datetime64[ns, UTC]', freq=None) + """ + + _typ = "datetimeindex" + + _data_cls = DatetimeArray + _supports_partial_string_indexing = True + + @property + def _engine_type(self) -> type[libindex.DatetimeEngine]: + return libindex.DatetimeEngine + + _data: DatetimeArray + tz: dt.tzinfo | None + + # -------------------------------------------------------------------- + # methods that dispatch to DatetimeArray and wrap result + + @doc(DatetimeArray.strftime) + def strftime(self, date_format) -> Index: + arr = self._data.strftime(date_format) + return Index(arr, name=self.name, dtype=object) + + @doc(DatetimeArray.tz_convert) + def tz_convert(self, tz) -> Self: + arr = self._data.tz_convert(tz) + return type(self)._simple_new(arr, name=self.name, refs=self._references) + + @doc(DatetimeArray.tz_localize) + def tz_localize( + self, + tz, + ambiguous: TimeAmbiguous = "raise", + nonexistent: TimeNonexistent = "raise", + ) -> Self: + arr = self._data.tz_localize(tz, ambiguous, nonexistent) + return type(self)._simple_new(arr, name=self.name) + + @doc(DatetimeArray.to_period) + def to_period(self, freq=None) -> PeriodIndex: + from pandas.core.indexes.api import PeriodIndex + + arr = self._data.to_period(freq) + return PeriodIndex._simple_new(arr, name=self.name) + + @doc(DatetimeArray.to_julian_date) + def to_julian_date(self) -> Index: + arr = self._data.to_julian_date() + return Index._simple_new(arr, name=self.name) + + @doc(DatetimeArray.isocalendar) + def isocalendar(self) -> DataFrame: + df = self._data.isocalendar() + return df.set_index(self) + + @cache_readonly + def _resolution_obj(self) -> Resolution: + return self._data._resolution_obj + + # -------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + data=None, + freq: Frequency | lib.NoDefault = lib.no_default, + tz=lib.no_default, + normalize: bool | lib.NoDefault = lib.no_default, + closed=lib.no_default, + ambiguous: TimeAmbiguous = "raise", + dayfirst: bool = False, + yearfirst: bool = False, + dtype: Dtype | None = None, + copy: bool = False, + name: Hashable | None = None, + ) -> Self: + if closed is not lib.no_default: + # GH#52628 + warnings.warn( + f"The 'closed' keyword in {cls.__name__} construction is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if normalize is not lib.no_default: + # GH#52628 + warnings.warn( + f"The 'normalize' keyword in {cls.__name__} construction is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if is_scalar(data): + cls._raise_scalar_data_error(data) + + # - Cases checked above all return/raise before reaching here - # + + name = maybe_extract_name(name, data, cls) + + if ( + isinstance(data, DatetimeArray) + and freq is lib.no_default + and tz is lib.no_default + and dtype is None + ): + # fastpath, similar logic in TimedeltaIndex.__new__; + # Note in this particular case we retain non-nano. + if copy: + data = data.copy() + return cls._simple_new(data, name=name) + + dtarr = DatetimeArray._from_sequence_not_strict( + data, + dtype=dtype, + copy=copy, + tz=tz, + freq=freq, + dayfirst=dayfirst, + yearfirst=yearfirst, + ambiguous=ambiguous, + ) + refs = None + if not copy and isinstance(data, (Index, ABCSeries)): + refs = data._references + + subarr = cls._simple_new(dtarr, name=name, refs=refs) + return subarr + + # -------------------------------------------------------------------- + + @cache_readonly + def _is_dates_only(self) -> bool: + """ + Return a boolean if we are only dates (and don't have a timezone) + + Returns + ------- + bool + """ + + from pandas.io.formats.format import is_dates_only + + delta = getattr(self.freq, "delta", None) + + if delta and delta % dt.timedelta(days=1) != dt.timedelta(days=0): + return False + + # error: Argument 1 to "is_dates_only" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "Union[ndarray, + # DatetimeArray, Index, DatetimeIndex]" + + return self.tz is None and is_dates_only(self._values) # type: ignore[arg-type] + + def __reduce__(self): + d = {"data": self._data, "name": self.name} + return _new_DatetimeIndex, (type(self), d), None + + def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: + """ + Can we compare values of the given dtype to our own? + """ + if self.tz is not None: + # If we have tz, we can compare to tzaware + return isinstance(dtype, DatetimeTZDtype) + # if we dont have tz, we can only compare to tznaive + return lib.is_np_dtype(dtype, "M") + + # -------------------------------------------------------------------- + # Rendering Methods + + @property + def _formatter_func(self): + from pandas.io.formats.format import get_format_datetime64 + + formatter = get_format_datetime64(is_dates_only_=self._is_dates_only) + return lambda x: f"'{formatter(x)}'" + + # -------------------------------------------------------------------- + # Set Operation Methods + + def _can_range_setop(self, other) -> bool: + # GH 46702: If self or other have non-UTC tzs, DST transitions prevent + # range representation due to no singular step + if ( + self.tz is not None + and not timezones.is_utc(self.tz) + and not timezones.is_fixed_offset(self.tz) + ): + return False + if ( + other.tz is not None + and not timezones.is_utc(other.tz) + and not timezones.is_fixed_offset(other.tz) + ): + return False + return super()._can_range_setop(other) + + # -------------------------------------------------------------------- + + def _get_time_micros(self) -> npt.NDArray[np.int64]: + """ + Return the number of microseconds since midnight. + + Returns + ------- + ndarray[int64_t] + """ + values = self._data._local_timestamps() + + ppd = periods_per_day(self._data._creso) + + frac = values % ppd + if self.unit == "ns": + micros = frac // 1000 + elif self.unit == "us": + micros = frac + elif self.unit == "ms": + micros = frac * 1000 + elif self.unit == "s": + micros = frac * 1_000_000 + else: # pragma: no cover + raise NotImplementedError(self.unit) + + micros[self._isnan] = -1 + return micros + + def snap(self, freq: Frequency = "S") -> DatetimeIndex: + """ + Snap time stamps to nearest occurring frequency. + + Returns + ------- + DatetimeIndex + + Examples + -------- + >>> idx = pd.DatetimeIndex(['2023-01-01', '2023-01-02', + ... '2023-02-01', '2023-02-02']) + >>> idx + DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'], + dtype='datetime64[ns]', freq=None) + >>> idx.snap('MS') + DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'], + dtype='datetime64[ns]', freq=None) + """ + # Superdumb, punting on any optimizing + freq = to_offset(freq) + + dta = self._data.copy() + + for i, v in enumerate(self): + s = v + if not freq.is_on_offset(s): + t0 = freq.rollback(s) + t1 = freq.rollforward(s) + if abs(s - t0) < abs(t1 - s): + s = t0 + else: + s = t1 + dta[i] = s + + return DatetimeIndex._simple_new(dta, name=self.name) + + # -------------------------------------------------------------------- + # Indexing Methods + + def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime): + """ + Calculate datetime bounds for parsed time string and its resolution. + + Parameters + ---------- + reso : Resolution + Resolution provided by parsed string. + parsed : datetime + Datetime from parsed string. + + Returns + ------- + lower, upper: pd.Timestamp + """ + per = Period(parsed, freq=reso.attr_abbrev) + start, end = per.start_time, per.end_time + + # GH 24076 + # If an incoming date string contained a UTC offset, need to localize + # the parsed date to this offset first before aligning with the index's + # timezone + start = start.tz_localize(parsed.tzinfo) + end = end.tz_localize(parsed.tzinfo) + + if parsed.tzinfo is not None: + if self.tz is None: + raise ValueError( + "The index must be timezone aware when indexing " + "with a date string with a UTC offset" + ) + # The flipped case with parsed.tz is None and self.tz is not None + # is ruled out bc parsed and reso are produced by _parse_with_reso, + # which localizes parsed. + return start, end + + def _parse_with_reso(self, label: str): + parsed, reso = super()._parse_with_reso(label) + + parsed = Timestamp(parsed) + + if self.tz is not None and parsed.tzinfo is None: + # we special-case timezone-naive strings and timezone-aware + # DatetimeIndex + # https://github.com/pandas-dev/pandas/pull/36148#issuecomment-687883081 + parsed = parsed.tz_localize(self.tz) + + return parsed, reso + + def _disallow_mismatched_indexing(self, key) -> None: + """ + Check for mismatched-tzawareness indexing and re-raise as KeyError. + """ + # we get here with isinstance(key, self._data._recognized_scalars) + try: + # GH#36148 + self._data._assert_tzawareness_compat(key) + except TypeError as err: + raise KeyError(key) from err + + def get_loc(self, key): + """ + Get integer location for requested label + + Returns + ------- + loc : int + """ + self._check_indexing_error(key) + + orig_key = key + if is_valid_na_for_dtype(key, self.dtype): + key = NaT + + if isinstance(key, self._data._recognized_scalars): + # needed to localize naive datetimes + self._disallow_mismatched_indexing(key) + key = Timestamp(key) + + elif isinstance(key, str): + try: + parsed, reso = self._parse_with_reso(key) + except (ValueError, pytz.NonExistentTimeError) as err: + raise KeyError(key) from err + self._disallow_mismatched_indexing(parsed) + + if self._can_partial_date_slice(reso): + try: + return self._partial_date_slice(reso, parsed) + except KeyError as err: + raise KeyError(key) from err + + key = parsed + + elif isinstance(key, dt.timedelta): + # GH#20464 + raise TypeError( + f"Cannot index {type(self).__name__} with {type(key).__name__}" + ) + + elif isinstance(key, dt.time): + return self.indexer_at_time(key) + + else: + # unrecognized type + raise KeyError(key) + + try: + return Index.get_loc(self, key) + except KeyError as err: + raise KeyError(orig_key) from err + + @doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound) + def _maybe_cast_slice_bound(self, label, side: str): + # GH#42855 handle date here instead of get_slice_bound + if isinstance(label, dt.date) and not isinstance(label, dt.datetime): + # Pandas supports slicing with dates, treated as datetimes at midnight. + # https://github.com/pandas-dev/pandas/issues/31501 + label = Timestamp(label).to_pydatetime() + + label = super()._maybe_cast_slice_bound(label, side) + self._data._assert_tzawareness_compat(label) + return Timestamp(label) + + def slice_indexer(self, start=None, end=None, step=None): + """ + Return indexer for specified label slice. + Index.slice_indexer, customized to handle time slicing. + + In addition to functionality provided by Index.slice_indexer, does the + following: + + - if both `start` and `end` are instances of `datetime.time`, it + invokes `indexer_between_time` + - if `start` and `end` are both either string or None perform + value-based selection in non-monotonic cases. + + """ + # For historical reasons DatetimeIndex supports slices between two + # instances of datetime.time as if it were applying a slice mask to + # an array of (self.hour, self.minute, self.seconds, self.microsecond). + if isinstance(start, dt.time) and isinstance(end, dt.time): + if step is not None and step != 1: + raise ValueError("Must have step size of 1 with time slices") + return self.indexer_between_time(start, end) + + if isinstance(start, dt.time) or isinstance(end, dt.time): + raise KeyError("Cannot mix time and non-time slice keys") + + def check_str_or_none(point) -> bool: + return point is not None and not isinstance(point, str) + + # GH#33146 if start and end are combinations of str and None and Index is not + # monotonic, we can not use Index.slice_indexer because it does not honor the + # actual elements, is only searching for start and end + if ( + check_str_or_none(start) + or check_str_or_none(end) + or self.is_monotonic_increasing + ): + return Index.slice_indexer(self, start, end, step) + + mask = np.array(True) + in_index = True + if start is not None: + start_casted = self._maybe_cast_slice_bound(start, "left") + mask = start_casted <= self + in_index &= (start_casted == self).any() + + if end is not None: + end_casted = self._maybe_cast_slice_bound(end, "right") + mask = (self <= end_casted) & mask + in_index &= (end_casted == self).any() + + if not in_index: + raise KeyError( + "Value based partial slicing on non-monotonic DatetimeIndexes " + "with non-existing keys is not allowed.", + ) + indexer = mask.nonzero()[0][::step] + if len(indexer) == len(self): + return slice(None) + else: + return indexer + + # -------------------------------------------------------------------- + + @property + def inferred_type(self) -> str: + # b/c datetime is represented as microseconds since the epoch, make + # sure we can't have ambiguous indexing + return "datetime64" + + def indexer_at_time(self, time, asof: bool = False) -> npt.NDArray[np.intp]: + """ + Return index locations of values at particular time of day. + + Parameters + ---------- + time : datetime.time or str + Time passed in either as object (datetime.time) or as string in + appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", + "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"). + + Returns + ------- + np.ndarray[np.intp] + + See Also + -------- + indexer_between_time : Get index locations of values between particular + times of day. + DataFrame.at_time : Select values at particular time of day. + + Examples + -------- + >>> idx = pd.DatetimeIndex(["1/1/2020 10:00", "2/1/2020 11:00", + ... "3/1/2020 10:00"]) + >>> idx.indexer_at_time("10:00") + array([0, 2]) + """ + if asof: + raise NotImplementedError("'asof' argument is not supported") + + if isinstance(time, str): + from dateutil.parser import parse + + time = parse(time).time() + + if time.tzinfo: + if self.tz is None: + raise ValueError("Index must be timezone aware.") + time_micros = self.tz_convert(time.tzinfo)._get_time_micros() + else: + time_micros = self._get_time_micros() + micros = _time_to_micros(time) + return (time_micros == micros).nonzero()[0] + + def indexer_between_time( + self, start_time, end_time, include_start: bool = True, include_end: bool = True + ) -> npt.NDArray[np.intp]: + """ + Return index locations of values between particular times of day. + + Parameters + ---------- + start_time, end_time : datetime.time, str + Time passed either as object (datetime.time) or as string in + appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", + "%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p"). + include_start : bool, default True + include_end : bool, default True + + Returns + ------- + np.ndarray[np.intp] + + See Also + -------- + indexer_at_time : Get index locations of values at particular time of day. + DataFrame.between_time : Select values between particular times of day. + + Examples + -------- + >>> idx = pd.date_range("2023-01-01", periods=4, freq="H") + >>> idx + DatetimeIndex(['2023-01-01 00:00:00', '2023-01-01 01:00:00', + '2023-01-01 02:00:00', '2023-01-01 03:00:00'], + dtype='datetime64[ns]', freq='H') + >>> idx.indexer_between_time("00:00", "2:00", include_end=False) + array([0, 1]) + """ + start_time = to_time(start_time) + end_time = to_time(end_time) + time_micros = self._get_time_micros() + start_micros = _time_to_micros(start_time) + end_micros = _time_to_micros(end_time) + + if include_start and include_end: + lop = rop = operator.le + elif include_start: + lop = operator.le + rop = operator.lt + elif include_end: + lop = operator.lt + rop = operator.le + else: + lop = rop = operator.lt + + if start_time <= end_time: + join_op = operator.and_ + else: + join_op = operator.or_ + + mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros)) + + return mask.nonzero()[0] + + +def date_range( + start=None, + end=None, + periods=None, + freq=None, + tz=None, + normalize: bool = False, + name: Hashable | None = None, + inclusive: IntervalClosedType = "both", + *, + unit: str | None = None, + **kwargs, +) -> DatetimeIndex: + """ + Return a fixed frequency DatetimeIndex. + + Returns the range of equally spaced time points (where the difference between any + two adjacent points is specified by the given frequency) such that they all + satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp., + the first and last time points in that range that fall on the boundary of ``freq`` + (if given as a frequency string) or that are valid for ``freq`` (if given as a + :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``, + ``end``, or ``freq`` is *not* specified, this missing parameter can be computed + given ``periods``, the number of timesteps in the range. See the note below.) + + Parameters + ---------- + start : str or datetime-like, optional + Left bound for generating dates. + end : str or datetime-like, optional + Right bound for generating dates. + periods : int, optional + Number of periods to generate. + freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D' + Frequency strings can have multiples, e.g. '5H'. See + :ref:`here ` for a list of + frequency aliases. + tz : str or tzinfo, optional + Time zone name for returning localized DatetimeIndex, for example + 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is + timezone-naive unless timezone-aware datetime-likes are passed. + normalize : bool, default False + Normalize start/end dates to midnight before generating date range. + name : str, default None + Name of the resulting DatetimeIndex. + inclusive : {"both", "neither", "left", "right"}, default "both" + Include boundaries; Whether to set each bound as closed or open. + + .. versionadded:: 1.4.0 + unit : str, default None + Specify the desired resolution of the result. + + .. versionadded:: 2.0.0 + **kwargs + For compatibility. Has no effect on the result. + + Returns + ------- + DatetimeIndex + + See Also + -------- + DatetimeIndex : An immutable container for datetimes. + timedelta_range : Return a fixed frequency TimedeltaIndex. + period_range : Return a fixed frequency PeriodIndex. + interval_range : Return a fixed frequency IntervalIndex. + + Notes + ----- + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``DatetimeIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end`` (closed on both sides). + + To learn more about the frequency strings, please see `this link + `__. + + Examples + -------- + **Specifying the values** + + The next four examples generate the same `DatetimeIndex`, but vary + the combination of `start`, `end` and `periods`. + + Specify `start` and `end`, with the default daily frequency. + + >>> pd.date_range(start='1/1/2018', end='1/08/2018') + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', + '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], + dtype='datetime64[ns]', freq='D') + + Specify timezone-aware `start` and `end`, with the default daily frequency. + + >>> pd.date_range( + ... start=pd.to_datetime("1/1/2018").tz_localize("Europe/Berlin"), + ... end=pd.to_datetime("1/08/2018").tz_localize("Europe/Berlin"), + ... ) + DatetimeIndex(['2018-01-01 00:00:00+01:00', '2018-01-02 00:00:00+01:00', + '2018-01-03 00:00:00+01:00', '2018-01-04 00:00:00+01:00', + '2018-01-05 00:00:00+01:00', '2018-01-06 00:00:00+01:00', + '2018-01-07 00:00:00+01:00', '2018-01-08 00:00:00+01:00'], + dtype='datetime64[ns, Europe/Berlin]', freq='D') + + Specify `start` and `periods`, the number of periods (days). + + >>> pd.date_range(start='1/1/2018', periods=8) + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', + '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], + dtype='datetime64[ns]', freq='D') + + Specify `end` and `periods`, the number of periods (days). + + >>> pd.date_range(end='1/1/2018', periods=8) + DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', + '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], + dtype='datetime64[ns]', freq='D') + + Specify `start`, `end`, and `periods`; the frequency is generated + automatically (linearly spaced). + + >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) + DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', + '2018-04-27 00:00:00'], + dtype='datetime64[ns]', freq=None) + + **Other Parameters** + + Changed the `freq` (frequency) to ``'M'`` (month end frequency). + + >>> pd.date_range(start='1/1/2018', periods=5, freq='M') + DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', + '2018-05-31'], + dtype='datetime64[ns]', freq='M') + + Multiples are allowed + + >>> pd.date_range(start='1/1/2018', periods=5, freq='3M') + DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', + '2019-01-31'], + dtype='datetime64[ns]', freq='3M') + + `freq` can also be specified as an Offset object. + + >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) + DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', + '2019-01-31'], + dtype='datetime64[ns]', freq='3M') + + Specify `tz` to set the timezone. + + >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') + DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', + '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', + '2018-01-05 00:00:00+09:00'], + dtype='datetime64[ns, Asia/Tokyo]', freq='D') + + `inclusive` controls whether to include `start` and `end` that are on the + boundary. The default, "both", includes boundary points on either end. + + >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive="both") + DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], + dtype='datetime64[ns]', freq='D') + + Use ``inclusive='left'`` to exclude `end` if it falls on the boundary. + + >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='left') + DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], + dtype='datetime64[ns]', freq='D') + + Use ``inclusive='right'`` to exclude `start` if it falls on the boundary, and + similarly ``inclusive='neither'`` will exclude both `start` and `end`. + + >>> pd.date_range(start='2017-01-01', end='2017-01-04', inclusive='right') + DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], + dtype='datetime64[ns]', freq='D') + + **Specify a unit** + + >>> pd.date_range(start="2017-01-01", periods=10, freq="100AS", unit="s") + DatetimeIndex(['2017-01-01', '2117-01-01', '2217-01-01', '2317-01-01', + '2417-01-01', '2517-01-01', '2617-01-01', '2717-01-01', + '2817-01-01', '2917-01-01'], + dtype='datetime64[s]', freq='100AS-JAN') + """ + if freq is None and com.any_none(periods, start, end): + freq = "D" + + dtarr = DatetimeArray._generate_range( + start=start, + end=end, + periods=periods, + freq=freq, + tz=tz, + normalize=normalize, + inclusive=inclusive, + unit=unit, + **kwargs, + ) + return DatetimeIndex._simple_new(dtarr, name=name) + + +def bdate_range( + start=None, + end=None, + periods: int | None = None, + freq: Frequency | dt.timedelta = "B", + tz=None, + normalize: bool = True, + name: Hashable | None = None, + weekmask=None, + holidays=None, + inclusive: IntervalClosedType = "both", + **kwargs, +) -> DatetimeIndex: + """ + Return a fixed frequency DatetimeIndex with business day as the default. + + Parameters + ---------- + start : str or datetime-like, default None + Left bound for generating dates. + end : str or datetime-like, default None + Right bound for generating dates. + periods : int, default None + Number of periods to generate. + freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'B' + Frequency strings can have multiples, e.g. '5H'. The default is + business daily ('B'). + tz : str or None + Time zone name for returning localized DatetimeIndex, for example + Asia/Beijing. + normalize : bool, default False + Normalize start/end dates to midnight before generating date range. + name : str, default None + Name of the resulting DatetimeIndex. + weekmask : str or None, default None + Weekmask of valid business days, passed to ``numpy.busdaycalendar``, + only used when custom frequency strings are passed. The default + value None is equivalent to 'Mon Tue Wed Thu Fri'. + holidays : list-like or None, default None + Dates to exclude from the set of valid business days, passed to + ``numpy.busdaycalendar``, only used when custom frequency strings + are passed. + inclusive : {"both", "neither", "left", "right"}, default "both" + Include boundaries; Whether to set each bound as closed or open. + + .. versionadded:: 1.4.0 + **kwargs + For compatibility. Has no effect on the result. + + Returns + ------- + DatetimeIndex + + Notes + ----- + Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. Specifying ``freq`` is a requirement + for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not + desired. + + To learn more about the frequency strings, please see `this link + `__. + + Examples + -------- + Note how the two weekend days are skipped in the result. + + >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') + DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', + '2018-01-05', '2018-01-08'], + dtype='datetime64[ns]', freq='B') + """ + if freq is None: + msg = "freq must be specified for bdate_range; use date_range instead" + raise TypeError(msg) + + if isinstance(freq, str) and freq.startswith("C"): + try: + weekmask = weekmask or "Mon Tue Wed Thu Fri" + freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) + except (KeyError, TypeError) as err: + msg = f"invalid custom frequency string: {freq}" + raise ValueError(msg) from err + elif holidays or weekmask: + msg = ( + "a custom frequency string is required when holidays or " + f"weekmask are passed, got frequency {freq}" + ) + raise ValueError(msg) + + return date_range( + start=start, + end=end, + periods=periods, + freq=freq, + tz=tz, + normalize=normalize, + name=name, + inclusive=inclusive, + **kwargs, + ) + + +def _time_to_micros(time_obj: dt.time) -> int: + seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second + return 1_000_000 * seconds + time_obj.microsecond diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/extension.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/extension.py new file mode 100644 index 00000000..61949531 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/extension.py @@ -0,0 +1,172 @@ +""" +Shared methods for Index subclasses backed by ExtensionArray. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Callable, + TypeVar, +) + +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.generic import ABCDataFrame + +from pandas.core.indexes.base import Index + +if TYPE_CHECKING: + import numpy as np + + from pandas._typing import ( + ArrayLike, + npt, + ) + + from pandas.core.arrays import IntervalArray + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + +_ExtensionIndexT = TypeVar("_ExtensionIndexT", bound="ExtensionIndex") + + +def _inherit_from_data( + name: str, delegate: type, cache: bool = False, wrap: bool = False +): + """ + Make an alias for a method of the underlying ExtensionArray. + + Parameters + ---------- + name : str + Name of an attribute the class should inherit from its EA parent. + delegate : class + cache : bool, default False + Whether to convert wrapped properties into cache_readonly + wrap : bool, default False + Whether to wrap the inherited result in an Index. + + Returns + ------- + attribute, method, property, or cache_readonly + """ + attr = getattr(delegate, name) + + if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor": + # getset_descriptor i.e. property defined in cython class + if cache: + + def cached(self): + return getattr(self._data, name) + + cached.__name__ = name + cached.__doc__ = attr.__doc__ + method = cache_readonly(cached) + + else: + + def fget(self): + result = getattr(self._data, name) + if wrap: + if isinstance(result, type(self._data)): + return type(self)._simple_new(result, name=self.name) + elif isinstance(result, ABCDataFrame): + return result.set_index(self) + return Index(result, name=self.name) + return result + + def fset(self, value) -> None: + setattr(self._data, name, value) + + fget.__name__ = name + fget.__doc__ = attr.__doc__ + + method = property(fget, fset) + + elif not callable(attr): + # just a normal attribute, no wrapping + method = attr + + else: + # error: Incompatible redefinition (redefinition with type "Callable[[Any, + # VarArg(Any), KwArg(Any)], Any]", original type "property") + def method(self, *args, **kwargs): # type: ignore[misc] + if "inplace" in kwargs: + raise ValueError(f"cannot use inplace with {type(self).__name__}") + result = attr(self._data, *args, **kwargs) + if wrap: + if isinstance(result, type(self._data)): + return type(self)._simple_new(result, name=self.name) + elif isinstance(result, ABCDataFrame): + return result.set_index(self) + return Index(result, name=self.name) + return result + + # error: "property" has no attribute "__name__" + method.__name__ = name # type: ignore[attr-defined] + method.__doc__ = attr.__doc__ + return method + + +def inherit_names( + names: list[str], delegate: type, cache: bool = False, wrap: bool = False +) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]: + """ + Class decorator to pin attributes from an ExtensionArray to a Index subclass. + + Parameters + ---------- + names : List[str] + delegate : class + cache : bool, default False + wrap : bool, default False + Whether to wrap the inherited result in an Index. + """ + + def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]: + for name in names: + meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap) + setattr(cls, name, meth) + + return cls + + return wrapper + + +class ExtensionIndex(Index): + """ + Index subclass for indexes backed by ExtensionArray. + """ + + # The base class already passes through to _data: + # size, __len__, dtype + + _data: IntervalArray | NDArrayBackedExtensionArray + + # --------------------------------------------------------------------- + + def _validate_fill_value(self, value): + """ + Convert value to be insertable to underlying array. + """ + return self._data._validate_setitem_value(value) + + @cache_readonly + def _isnan(self) -> npt.NDArray[np.bool_]: + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return self._data.isna() # type: ignore[return-value] + + +class NDArrayBackedExtensionIndex(ExtensionIndex): + """ + Index subclass for indexes backed by NDArrayBackedExtensionArray. + """ + + _data: NDArrayBackedExtensionArray + + def _get_engine_target(self) -> np.ndarray: + return self._data._ndarray + + def _from_join_target(self, result: np.ndarray) -> ArrayLike: + assert result.dtype == self._data._ndarray.dtype + return self._data._from_backing_data(result) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/frozen.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/frozen.py new file mode 100644 index 00000000..3b8aefdb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/frozen.py @@ -0,0 +1,117 @@ +""" +frozen (immutable) data structures to support MultiIndexing + +These are used for: + +- .names (FrozenList) + +""" +from __future__ import annotations + +from typing import ( + Any, + NoReturn, +) + +from pandas.core.base import PandasObject + +from pandas.io.formats.printing import pprint_thing + + +class FrozenList(PandasObject, list): + """ + Container that doesn't allow setting item *but* + because it's technically hashable, will be used + for lookups, appropriately, etc. + """ + + # Side note: This has to be of type list. Otherwise, + # it messes up PyTables type checks. + + def union(self, other) -> FrozenList: + """ + Returns a FrozenList with other concatenated to the end of self. + + Parameters + ---------- + other : array-like + The array-like whose elements we are concatenating. + + Returns + ------- + FrozenList + The collection difference between self and other. + """ + if isinstance(other, tuple): + other = list(other) + return type(self)(super().__add__(other)) + + def difference(self, other) -> FrozenList: + """ + Returns a FrozenList with elements from other removed from self. + + Parameters + ---------- + other : array-like + The array-like whose elements we are removing self. + + Returns + ------- + FrozenList + The collection difference between self and other. + """ + other = set(other) + temp = [x for x in self if x not in other] + return type(self)(temp) + + # TODO: Consider deprecating these in favor of `union` (xref gh-15506) + # error: Incompatible types in assignment (expression has type + # "Callable[[FrozenList, Any], FrozenList]", base class "list" defined the + # type as overloaded function) + __add__ = __iadd__ = union # type: ignore[assignment] + + def __getitem__(self, n): + if isinstance(n, slice): + return type(self)(super().__getitem__(n)) + return super().__getitem__(n) + + def __radd__(self, other): + if isinstance(other, tuple): + other = list(other) + return type(self)(other + list(self)) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, (tuple, FrozenList)): + other = list(other) + return super().__eq__(other) + + __req__ = __eq__ + + def __mul__(self, other): + return type(self)(super().__mul__(other)) + + __imul__ = __mul__ + + def __reduce__(self): + return type(self), (list(self),) + + # error: Signature of "__hash__" incompatible with supertype "list" + def __hash__(self) -> int: # type: ignore[override] + return hash(tuple(self)) + + def _disabled(self, *args, **kwargs) -> NoReturn: + """ + This method will not function because object is immutable. + """ + raise TypeError(f"'{type(self).__name__}' does not support mutable operations.") + + def __str__(self) -> str: + return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n")) + + def __repr__(self) -> str: + return f"{type(self).__name__}({str(self)})" + + __setitem__ = __setslice__ = _disabled # type: ignore[assignment] + __delitem__ = __delslice__ = _disabled + pop = append = extend = _disabled + remove = sort = insert = _disabled # type: ignore[assignment] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/interval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/interval.py new file mode 100644 index 00000000..e8b3676e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/interval.py @@ -0,0 +1,1154 @@ +""" define the IntervalIndex """ +from __future__ import annotations + +from operator import ( + le, + lt, +) +import textwrap +from typing import ( + TYPE_CHECKING, + Any, + Literal, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.interval import ( + Interval, + IntervalMixin, + IntervalTree, +) +from pandas._libs.tslibs import ( + BaseOffset, + Timedelta, + Timestamp, + to_offset, +) +from pandas.errors import InvalidIndexError +from pandas.util._decorators import ( + Appender, + cache_readonly, +) +from pandas.util._exceptions import rewrite_exception + +from pandas.core.dtypes.cast import ( + find_common_type, + infer_dtype_from_scalar, + maybe_box_datetimelike, + maybe_downcast_numeric, + maybe_upcast_numeric_to_64bit, +) +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_float, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_number, + is_object_dtype, + is_scalar, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + IntervalDtype, +) +from pandas.core.dtypes.missing import is_valid_na_for_dtype + +from pandas.core.algorithms import unique +from pandas.core.arrays.interval import ( + IntervalArray, + _interval_shared_docs, +) +import pandas.core.common as com +from pandas.core.indexers import is_valid_positional_slice +import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import ( + Index, + _index_shared_docs, + ensure_index, + maybe_extract_name, +) +from pandas.core.indexes.datetimes import ( + DatetimeIndex, + date_range, +) +from pandas.core.indexes.extension import ( + ExtensionIndex, + inherit_names, +) +from pandas.core.indexes.multi import MultiIndex +from pandas.core.indexes.timedeltas import ( + TimedeltaIndex, + timedelta_range, +) + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + Dtype, + DtypeObj, + IntervalClosedType, + npt, + ) +_index_doc_kwargs = dict(ibase._index_doc_kwargs) + +_index_doc_kwargs.update( + { + "klass": "IntervalIndex", + "qualname": "IntervalIndex", + "target_klass": "IntervalIndex or list of Intervals", + "name": textwrap.dedent( + """\ + name : object, optional + Name to be stored in the index. + """ + ), + } +) + + +def _get_next_label(label): + # see test_slice_locs_with_ints_and_floats_succeeds + dtype = getattr(label, "dtype", type(label)) + if isinstance(label, (Timestamp, Timedelta)): + dtype = "datetime64[ns]" + dtype = pandas_dtype(dtype) + + if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype): + return label + np.timedelta64(1, "ns") + elif is_integer_dtype(dtype): + return label + 1 + elif is_float_dtype(dtype): + return np.nextafter(label, np.inf) + else: + raise TypeError(f"cannot determine next label for type {repr(type(label))}") + + +def _get_prev_label(label): + # see test_slice_locs_with_ints_and_floats_succeeds + dtype = getattr(label, "dtype", type(label)) + if isinstance(label, (Timestamp, Timedelta)): + dtype = "datetime64[ns]" + dtype = pandas_dtype(dtype) + + if lib.is_np_dtype(dtype, "mM") or isinstance(dtype, DatetimeTZDtype): + return label - np.timedelta64(1, "ns") + elif is_integer_dtype(dtype): + return label - 1 + elif is_float_dtype(dtype): + return np.nextafter(label, -np.inf) + else: + raise TypeError(f"cannot determine next label for type {repr(type(label))}") + + +def _new_IntervalIndex(cls, d): + """ + This is called upon unpickling, rather than the default which doesn't have + arguments and breaks __new__. + """ + return cls.from_arrays(**d) + + +@Appender( + _interval_shared_docs["class"] + % { + "klass": "IntervalIndex", + "summary": "Immutable index of intervals that are closed on the same side.", + "name": _index_doc_kwargs["name"], + "extra_attributes": "is_overlapping\nvalues\n", + "extra_methods": "", + "examples": textwrap.dedent( + """\ + Examples + -------- + A new ``IntervalIndex`` is typically constructed using + :func:`interval_range`: + + >>> pd.interval_range(start=0, end=5) + IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], + dtype='interval[int64, right]') + + It may also be constructed using one of the constructor + methods: :meth:`IntervalIndex.from_arrays`, + :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`. + + See further examples in the doc strings of ``interval_range`` and the + mentioned constructor methods. + """ + ), + } +) +@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True) +@inherit_names( + [ + "__array__", + "overlaps", + "contains", + "closed_left", + "closed_right", + "open_left", + "open_right", + "is_empty", + ], + IntervalArray, +) +@inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True) +class IntervalIndex(ExtensionIndex): + _typ = "intervalindex" + + # annotate properties pinned via inherit_names + closed: IntervalClosedType + is_non_overlapping_monotonic: bool + closed_left: bool + closed_right: bool + open_left: bool + open_right: bool + + _data: IntervalArray + _values: IntervalArray + _can_hold_strings = False + _data_cls = IntervalArray + + # -------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + data, + closed: IntervalClosedType | None = None, + dtype: Dtype | None = None, + copy: bool = False, + name: Hashable | None = None, + verify_integrity: bool = True, + ) -> IntervalIndex: + name = maybe_extract_name(name, data, cls) + + with rewrite_exception("IntervalArray", cls.__name__): + array = IntervalArray( + data, + closed=closed, + copy=copy, + dtype=dtype, + verify_integrity=verify_integrity, + ) + + return cls._simple_new(array, name) + + @classmethod + @Appender( + _interval_shared_docs["from_breaks"] + % { + "klass": "IntervalIndex", + "name": textwrap.dedent( + """ + name : str, optional + Name of the resulting IntervalIndex.""" + ), + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3]) + IntervalIndex([(0, 1], (1, 2], (2, 3]], + dtype='interval[int64, right]') + """ + ), + } + ) + def from_breaks( + cls, + breaks, + closed: IntervalClosedType | None = "right", + name: Hashable | None = None, + copy: bool = False, + dtype: Dtype | None = None, + ) -> IntervalIndex: + with rewrite_exception("IntervalArray", cls.__name__): + array = IntervalArray.from_breaks( + breaks, closed=closed, copy=copy, dtype=dtype + ) + return cls._simple_new(array, name=name) + + @classmethod + @Appender( + _interval_shared_docs["from_arrays"] + % { + "klass": "IntervalIndex", + "name": textwrap.dedent( + """ + name : str, optional + Name of the resulting IntervalIndex.""" + ), + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) + IntervalIndex([(0, 1], (1, 2], (2, 3]], + dtype='interval[int64, right]') + """ + ), + } + ) + def from_arrays( + cls, + left, + right, + closed: IntervalClosedType = "right", + name: Hashable | None = None, + copy: bool = False, + dtype: Dtype | None = None, + ) -> IntervalIndex: + with rewrite_exception("IntervalArray", cls.__name__): + array = IntervalArray.from_arrays( + left, right, closed, copy=copy, dtype=dtype + ) + return cls._simple_new(array, name=name) + + @classmethod + @Appender( + _interval_shared_docs["from_tuples"] + % { + "klass": "IntervalIndex", + "name": textwrap.dedent( + """ + name : str, optional + Name of the resulting IntervalIndex.""" + ), + "examples": textwrap.dedent( + """\ + Examples + -------- + >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)]) + IntervalIndex([(0, 1], (1, 2]], + dtype='interval[int64, right]') + """ + ), + } + ) + def from_tuples( + cls, + data, + closed: IntervalClosedType = "right", + name: Hashable | None = None, + copy: bool = False, + dtype: Dtype | None = None, + ) -> IntervalIndex: + with rewrite_exception("IntervalArray", cls.__name__): + arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype) + return cls._simple_new(arr, name=name) + + # -------------------------------------------------------------------- + # error: Return type "IntervalTree" of "_engine" incompatible with return type + # "Union[IndexEngine, ExtensionEngine]" in supertype "Index" + @cache_readonly + def _engine(self) -> IntervalTree: # type: ignore[override] + # IntervalTree does not supports numpy array unless they are 64 bit + left = self._maybe_convert_i8(self.left) + left = maybe_upcast_numeric_to_64bit(left) + right = self._maybe_convert_i8(self.right) + right = maybe_upcast_numeric_to_64bit(right) + return IntervalTree(left, right, closed=self.closed) + + def __contains__(self, key: Any) -> bool: + """ + return a boolean if this key is IN the index + We *only* accept an Interval + + Parameters + ---------- + key : Interval + + Returns + ------- + bool + """ + hash(key) + if not isinstance(key, Interval): + if is_valid_na_for_dtype(key, self.dtype): + return self.hasnans + return False + + try: + self.get_loc(key) + return True + except KeyError: + return False + + def _getitem_slice(self, slobj: slice) -> IntervalIndex: + """ + Fastpath for __getitem__ when we know we have a slice. + """ + res = self._data[slobj] + return type(self)._simple_new(res, name=self._name) + + @cache_readonly + def _multiindex(self) -> MultiIndex: + return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"]) + + def __reduce__(self): + d = { + "left": self.left, + "right": self.right, + "closed": self.closed, + "name": self.name, + } + return _new_IntervalIndex, (type(self), d), None + + @property + def inferred_type(self) -> str: + """Return a string of the type inferred from the values""" + return "interval" + + # Cannot determine type of "memory_usage" + @Appender(Index.memory_usage.__doc__) # type: ignore[has-type] + def memory_usage(self, deep: bool = False) -> int: + # we don't use an explicit engine + # so return the bytes here + return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep) + + # IntervalTree doesn't have a is_monotonic_decreasing, so have to override + # the Index implementation + @cache_readonly + def is_monotonic_decreasing(self) -> bool: + """ + Return True if the IntervalIndex is monotonic decreasing (only equal or + decreasing values), else False + """ + return self[::-1].is_monotonic_increasing + + @cache_readonly + def is_unique(self) -> bool: + """ + Return True if the IntervalIndex contains unique elements, else False. + """ + left = self.left + right = self.right + + if self.isna().sum() > 1: + return False + + if left.is_unique or right.is_unique: + return True + + seen_pairs = set() + check_idx = np.where(left.duplicated(keep=False))[0] + for idx in check_idx: + pair = (left[idx], right[idx]) + if pair in seen_pairs: + return False + seen_pairs.add(pair) + + return True + + @property + def is_overlapping(self) -> bool: + """ + Return True if the IntervalIndex has overlapping intervals, else False. + + Two intervals overlap if they share a common point, including closed + endpoints. Intervals that only have an open endpoint in common do not + overlap. + + Returns + ------- + bool + Boolean indicating if the IntervalIndex has overlapping intervals. + + See Also + -------- + Interval.overlaps : Check whether two Interval objects overlap. + IntervalIndex.overlaps : Check an IntervalIndex elementwise for + overlaps. + + Examples + -------- + >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)]) + >>> index + IntervalIndex([(0, 2], (1, 3], (4, 5]], + dtype='interval[int64, right]') + >>> index.is_overlapping + True + + Intervals that share closed endpoints overlap: + + >>> index = pd.interval_range(0, 3, closed='both') + >>> index + IntervalIndex([[0, 1], [1, 2], [2, 3]], + dtype='interval[int64, both]') + >>> index.is_overlapping + True + + Intervals that only have an open endpoint in common do not overlap: + + >>> index = pd.interval_range(0, 3, closed='left') + >>> index + IntervalIndex([[0, 1), [1, 2), [2, 3)], + dtype='interval[int64, left]') + >>> index.is_overlapping + False + """ + # GH 23309 + return self._engine.is_overlapping + + def _needs_i8_conversion(self, key) -> bool: + """ + Check if a given key needs i8 conversion. Conversion is necessary for + Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An + Interval-like requires conversion if its endpoints are one of the + aforementioned types. + + Assumes that any list-like data has already been cast to an Index. + + Parameters + ---------- + key : scalar or Index-like + The key that should be checked for i8 conversion + + Returns + ------- + bool + """ + key_dtype = getattr(key, "dtype", None) + if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval): + return self._needs_i8_conversion(key.left) + + i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex) + return isinstance(key, i8_types) + + def _maybe_convert_i8(self, key): + """ + Maybe convert a given key to its equivalent i8 value(s). Used as a + preprocessing step prior to IntervalTree queries (self._engine), which + expects numeric data. + + Parameters + ---------- + key : scalar or list-like + The key that should maybe be converted to i8. + + Returns + ------- + scalar or list-like + The original key if no conversion occurred, int if converted scalar, + Index with an int64 dtype if converted list-like. + """ + if is_list_like(key): + key = ensure_index(key) + key = maybe_upcast_numeric_to_64bit(key) + + if not self._needs_i8_conversion(key): + return key + + scalar = is_scalar(key) + key_dtype = getattr(key, "dtype", None) + if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval): + # convert left/right and reconstruct + left = self._maybe_convert_i8(key.left) + right = self._maybe_convert_i8(key.right) + constructor = Interval if scalar else IntervalIndex.from_arrays + # error: "object" not callable + return constructor( + left, right, closed=self.closed + ) # type: ignore[operator] + + if scalar: + # Timestamp/Timedelta + key_dtype, key_i8 = infer_dtype_from_scalar(key) + if lib.is_period(key): + key_i8 = key.ordinal + elif isinstance(key_i8, Timestamp): + key_i8 = key_i8._value + elif isinstance(key_i8, (np.datetime64, np.timedelta64)): + key_i8 = key_i8.view("i8") + else: + # DatetimeIndex/TimedeltaIndex + key_dtype, key_i8 = key.dtype, Index(key.asi8) + if key.hasnans: + # convert NaT from its i8 value to np.nan so it's not viewed + # as a valid value, maybe causing errors (e.g. is_overlapping) + key_i8 = key_i8.where(~key._isnan) + + # ensure consistency with IntervalIndex subtype + # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any], + # ExtensionDtype]" has no attribute "subtype" + subtype = self.dtype.subtype # type: ignore[union-attr] + + if subtype != key_dtype: + raise ValueError( + f"Cannot index an IntervalIndex of subtype {subtype} with " + f"values of dtype {key_dtype}" + ) + + return key_i8 + + def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): + if not self.is_non_overlapping_monotonic: + raise KeyError( + "can only get slices from an IntervalIndex if bounds are " + "non-overlapping and all monotonic increasing or decreasing" + ) + + if isinstance(label, (IntervalMixin, IntervalIndex)): + raise NotImplementedError("Interval objects are not currently supported") + + # GH 20921: "not is_monotonic_increasing" for the second condition + # instead of "is_monotonic_decreasing" to account for single element + # indexes being both increasing and decreasing + if (side == "left" and self.left.is_monotonic_increasing) or ( + side == "right" and not self.left.is_monotonic_increasing + ): + sub_idx = self.right + if self.open_right: + label = _get_next_label(label) + else: + sub_idx = self.left + if self.open_left: + label = _get_prev_label(label) + + return sub_idx._searchsorted_monotonic(label, side) + + # -------------------------------------------------------------------- + # Indexing Methods + + def get_loc(self, key) -> int | slice | np.ndarray: + """ + Get integer location, slice or boolean mask for requested label. + + Parameters + ---------- + key : label + + Returns + ------- + int if unique index, slice if monotonic index, else mask + + Examples + -------- + >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) + >>> index = pd.IntervalIndex([i1, i2]) + >>> index.get_loc(1) + 0 + + You can also supply a point inside an interval. + + >>> index.get_loc(1.5) + 1 + + If a label is in several intervals, you get the locations of all the + relevant intervals. + + >>> i3 = pd.Interval(0, 2) + >>> overlapping_index = pd.IntervalIndex([i1, i2, i3]) + >>> overlapping_index.get_loc(0.5) + array([ True, False, True]) + + Only exact matches will be returned if an interval is provided. + + >>> index.get_loc(pd.Interval(0, 1)) + 0 + """ + self._check_indexing_error(key) + + if isinstance(key, Interval): + if self.closed != key.closed: + raise KeyError(key) + mask = (self.left == key.left) & (self.right == key.right) + elif is_valid_na_for_dtype(key, self.dtype): + mask = self.isna() + else: + # assume scalar + op_left = le if self.closed_left else lt + op_right = le if self.closed_right else lt + try: + mask = op_left(self.left, key) & op_right(key, self.right) + except TypeError as err: + # scalar is not comparable to II subtype --> invalid label + raise KeyError(key) from err + + matches = mask.sum() + if matches == 0: + raise KeyError(key) + if matches == 1: + return mask.argmax() + + res = lib.maybe_booleans_to_slice(mask.view("u1")) + if isinstance(res, slice) and res.stop is None: + # TODO: DO this in maybe_booleans_to_slice? + res = slice(res.start, len(self), res.step) + return res + + def _get_indexer( + self, + target: Index, + method: str | None = None, + limit: int | None = None, + tolerance: Any | None = None, + ) -> npt.NDArray[np.intp]: + if isinstance(target, IntervalIndex): + # We only get here with not self.is_overlapping + # -> at most one match per interval in target + # want exact matches -> need both left/right to match, so defer to + # left/right get_indexer, compare elementwise, equality -> match + indexer = self._get_indexer_unique_sides(target) + + elif not is_object_dtype(target.dtype): + # homogeneous scalar index: use IntervalTree + # we should always have self._should_partial_index(target) here + target = self._maybe_convert_i8(target) + indexer = self._engine.get_indexer(target.values) + else: + # heterogeneous scalar index: defer elementwise to get_loc + # we should always have self._should_partial_index(target) here + return self._get_indexer_pointwise(target)[0] + + return ensure_platform_int(indexer) + + @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) + def get_indexer_non_unique( + self, target: Index + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + target = ensure_index(target) + + if not self._should_compare(target) and not self._should_partial_index(target): + # e.g. IntervalIndex with different closed or incompatible subtype + # -> no matches + return self._get_indexer_non_comparable(target, None, unique=False) + + elif isinstance(target, IntervalIndex): + if self.left.is_unique and self.right.is_unique: + # fastpath available even if we don't have self._index_as_unique + indexer = self._get_indexer_unique_sides(target) + missing = (indexer == -1).nonzero()[0] + else: + return self._get_indexer_pointwise(target) + + elif is_object_dtype(target.dtype) or not self._should_partial_index(target): + # target might contain intervals: defer elementwise to get_loc + return self._get_indexer_pointwise(target) + + else: + # Note: this case behaves differently from other Index subclasses + # because IntervalIndex does partial-int indexing + target = self._maybe_convert_i8(target) + indexer, missing = self._engine.get_indexer_non_unique(target.values) + + return ensure_platform_int(indexer), ensure_platform_int(missing) + + def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]: + """ + _get_indexer specialized to the case where both of our sides are unique. + """ + # Caller is responsible for checking + # `self.left.is_unique and self.right.is_unique` + + left_indexer = self.left.get_indexer(target.left) + right_indexer = self.right.get_indexer(target.right) + indexer = np.where(left_indexer == right_indexer, left_indexer, -1) + return indexer + + def _get_indexer_pointwise( + self, target: Index + ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """ + pointwise implementation for get_indexer and get_indexer_non_unique. + """ + indexer, missing = [], [] + for i, key in enumerate(target): + try: + locs = self.get_loc(key) + if isinstance(locs, slice): + # Only needed for get_indexer_non_unique + locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp") + elif lib.is_integer(locs): + locs = np.array(locs, ndmin=1) + else: + # otherwise we have ndarray[bool] + locs = np.where(locs)[0] + except KeyError: + missing.append(i) + locs = np.array([-1]) + except InvalidIndexError: + # i.e. non-scalar key e.g. a tuple. + # see test_append_different_columns_types_raises + missing.append(i) + locs = np.array([-1]) + + indexer.append(locs) + + indexer = np.concatenate(indexer) + return ensure_platform_int(indexer), ensure_platform_int(missing) + + @cache_readonly + def _index_as_unique(self) -> bool: + return not self.is_overlapping and self._engine._na_count < 2 + + _requires_unique_msg = ( + "cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique" + ) + + def _convert_slice_indexer(self, key: slice, kind: Literal["loc", "getitem"]): + if not (key.step is None or key.step == 1): + # GH#31658 if label-based, we require step == 1, + # if positional, we disallow float start/stop + msg = "label-based slicing with step!=1 is not supported for IntervalIndex" + if kind == "loc": + raise ValueError(msg) + if kind == "getitem": + if not is_valid_positional_slice(key): + # i.e. this cannot be interpreted as a positional slice + raise ValueError(msg) + + return super()._convert_slice_indexer(key, kind) + + @cache_readonly + def _should_fallback_to_positional(self) -> bool: + # integer lookups in Series.__getitem__ are unambiguously + # positional in this case + # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any], + # ExtensionDtype]" has no attribute "subtype" + return self.dtype.subtype.kind in "mM" # type: ignore[union-attr] + + def _maybe_cast_slice_bound(self, label, side: str): + return getattr(self, side)._maybe_cast_slice_bound(label, side) + + def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: + if not isinstance(dtype, IntervalDtype): + return False + common_subtype = find_common_type([self.dtype, dtype]) + return not is_object_dtype(common_subtype) + + # -------------------------------------------------------------------- + + @cache_readonly + def left(self) -> Index: + return Index(self._data.left, copy=False) + + @cache_readonly + def right(self) -> Index: + return Index(self._data.right, copy=False) + + @cache_readonly + def mid(self) -> Index: + return Index(self._data.mid, copy=False) + + @property + def length(self) -> Index: + return Index(self._data.length, copy=False) + + # -------------------------------------------------------------------- + # Rendering Methods + # __repr__ associated methods are based on MultiIndex + + def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: + # matches base class except for whitespace padding + return header + list(self._format_native_types(na_rep=na_rep)) + + def _format_native_types( + self, *, na_rep: str = "NaN", quoting=None, **kwargs + ) -> npt.NDArray[np.object_]: + # GH 28210: use base method but with different default na_rep + return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs) + + def _format_data(self, name=None) -> str: + # TODO: integrate with categorical and make generic + # name argument is unused here; just for compat with base / categorical + return f"{self._data._format_data()},{self._format_space()}" + + # -------------------------------------------------------------------- + # Set Operations + + def _intersection(self, other, sort): + """ + intersection specialized to the case with matching dtypes. + """ + # For IntervalIndex we also know other.closed == self.closed + if self.left.is_unique and self.right.is_unique: + taken = self._intersection_unique(other) + elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1: + # Swap other/self if other is unique and self does not have + # multiple NaNs + taken = other._intersection_unique(self) + else: + # duplicates + taken = self._intersection_non_unique(other) + + if sort is None: + taken = taken.sort_values() + + return taken + + def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex: + """ + Used when the IntervalIndex does not have any common endpoint, + no matter left or right. + Return the intersection with another IntervalIndex. + Parameters + ---------- + other : IntervalIndex + Returns + ------- + IntervalIndex + """ + # Note: this is much more performant than super()._intersection(other) + lindexer = self.left.get_indexer(other.left) + rindexer = self.right.get_indexer(other.right) + + match = (lindexer == rindexer) & (lindexer != -1) + indexer = lindexer.take(match.nonzero()[0]) + indexer = unique(indexer) + + return self.take(indexer) + + def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex: + """ + Used when the IntervalIndex does have some common endpoints, + on either sides. + Return the intersection with another IntervalIndex. + + Parameters + ---------- + other : IntervalIndex + + Returns + ------- + IntervalIndex + """ + # Note: this is about 3.25x faster than super()._intersection(other) + # in IntervalIndexMethod.time_intersection_both_duplicate(1000) + mask = np.zeros(len(self), dtype=bool) + + if self.hasnans and other.hasnans: + first_nan_loc = np.arange(len(self))[self.isna()][0] + mask[first_nan_loc] = True + + other_tups = set(zip(other.left, other.right)) + for i, tup in enumerate(zip(self.left, self.right)): + if tup in other_tups: + mask[i] = True + + return self[mask] + + # -------------------------------------------------------------------- + + def _get_engine_target(self) -> np.ndarray: + # Note: we _could_ use libjoin functions by either casting to object + # dtype or constructing tuples (faster than constructing Intervals) + # but the libjoin fastpaths are no longer fast in these cases. + raise NotImplementedError( + "IntervalIndex does not use libjoin fastpaths or pass values to " + "IndexEngine objects" + ) + + def _from_join_target(self, result): + raise NotImplementedError("IntervalIndex does not use libjoin fastpaths") + + # TODO: arithmetic operations + + +def _is_valid_endpoint(endpoint) -> bool: + """ + Helper for interval_range to check if start/end are valid types. + """ + return any( + [ + is_number(endpoint), + isinstance(endpoint, Timestamp), + isinstance(endpoint, Timedelta), + endpoint is None, + ] + ) + + +def _is_type_compatible(a, b) -> bool: + """ + Helper for interval_range to check type compat of start/end/freq. + """ + is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset)) + is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset)) + return ( + (is_number(a) and is_number(b)) + or (is_ts_compat(a) and is_ts_compat(b)) + or (is_td_compat(a) and is_td_compat(b)) + or com.any_none(a, b) + ) + + +def interval_range( + start=None, + end=None, + periods=None, + freq=None, + name: Hashable | None = None, + closed: IntervalClosedType = "right", +) -> IntervalIndex: + """ + Return a fixed frequency IntervalIndex. + + Parameters + ---------- + start : numeric or datetime-like, default None + Left bound for generating intervals. + end : numeric or datetime-like, default None + Right bound for generating intervals. + periods : int, default None + Number of periods to generate. + freq : numeric, str, Timedelta, datetime.timedelta, or DateOffset, default None + The length of each interval. Must be consistent with the type of start + and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 + for numeric and 'D' for datetime-like. + name : str, default None + Name of the resulting IntervalIndex. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + + Returns + ------- + IntervalIndex + + See Also + -------- + IntervalIndex : An Index of intervals that are all closed on the same side. + + Notes + ----- + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``IntervalIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end``, inclusively. + + To learn more about datetime-like frequency strings, please see `this link + `__. + + Examples + -------- + Numeric ``start`` and ``end`` is supported. + + >>> pd.interval_range(start=0, end=5) + IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], + dtype='interval[int64, right]') + + Additionally, datetime-like input is also supported. + + >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), + ... end=pd.Timestamp('2017-01-04')) + IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], + (2017-01-03, 2017-01-04]], + dtype='interval[datetime64[ns], right]') + + The ``freq`` parameter specifies the frequency between the left and right. + endpoints of the individual intervals within the ``IntervalIndex``. For + numeric ``start`` and ``end``, the frequency must also be numeric. + + >>> pd.interval_range(start=0, periods=4, freq=1.5) + IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], + dtype='interval[float64, right]') + + Similarly, for datetime-like ``start`` and ``end``, the frequency must be + convertible to a DateOffset. + + >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), + ... periods=3, freq='MS') + IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], + (2017-03-01, 2017-04-01]], + dtype='interval[datetime64[ns], right]') + + Specify ``start``, ``end``, and ``periods``; the frequency is generated + automatically (linearly spaced). + + >>> pd.interval_range(start=0, end=6, periods=4) + IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], + dtype='interval[float64, right]') + + The ``closed`` parameter specifies which endpoints of the individual + intervals within the ``IntervalIndex`` are closed. + + >>> pd.interval_range(end=5, periods=4, closed='both') + IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], + dtype='interval[int64, both]') + """ + start = maybe_box_datetimelike(start) + end = maybe_box_datetimelike(end) + endpoint = start if start is not None else end + + if freq is None and com.any_none(periods, start, end): + freq = 1 if is_number(endpoint) else "D" + + if com.count_not_none(start, end, periods, freq) != 3: + raise ValueError( + "Of the four parameters: start, end, periods, and " + "freq, exactly three must be specified" + ) + + if not _is_valid_endpoint(start): + raise ValueError(f"start must be numeric or datetime-like, got {start}") + if not _is_valid_endpoint(end): + raise ValueError(f"end must be numeric or datetime-like, got {end}") + + if is_float(periods): + periods = int(periods) + elif not is_integer(periods) and periods is not None: + raise TypeError(f"periods must be a number, got {periods}") + + if freq is not None and not is_number(freq): + try: + freq = to_offset(freq) + except ValueError as err: + raise ValueError( + f"freq must be numeric or convertible to DateOffset, got {freq}" + ) from err + + # verify type compatibility + if not all( + [ + _is_type_compatible(start, end), + _is_type_compatible(start, freq), + _is_type_compatible(end, freq), + ] + ): + raise TypeError("start, end, freq need to be type compatible") + + # +1 to convert interval count to breaks count (n breaks = n-1 intervals) + if periods is not None: + periods += 1 + + breaks: np.ndarray | TimedeltaIndex | DatetimeIndex + + if is_number(endpoint): + if com.all_not_none(start, end, freq): + # 0.1 ensures we capture end + breaks = np.arange(start, end + (freq * 0.1), freq) + else: + # compute the period/start/end if unspecified (at most one) + if periods is None: + periods = int((end - start) // freq) + 1 + elif start is None: + start = end - (periods - 1) * freq + elif end is None: + end = start + (periods - 1) * freq + + breaks = np.linspace(start, end, periods) + if all(is_integer(x) for x in com.not_none(start, end, freq)): + # np.linspace always produces float output + + # error: Argument 1 to "maybe_downcast_numeric" has incompatible type + # "Union[ndarray[Any, Any], TimedeltaIndex, DatetimeIndex]"; + # expected "ndarray[Any, Any]" [ + breaks = maybe_downcast_numeric( + breaks, # type: ignore[arg-type] + np.dtype("int64"), + ) + else: + # delegate to the appropriate range function + if isinstance(endpoint, Timestamp): + breaks = date_range(start=start, end=end, periods=periods, freq=freq) + else: + breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) + + return IntervalIndex.from_breaks(breaks, name=name, closed=closed) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/multi.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/multi.py new file mode 100644 index 00000000..bdc9e05a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/multi.py @@ -0,0 +1,4036 @@ +from __future__ import annotations + +from collections.abc import ( + Collection, + Generator, + Hashable, + Iterable, + Sequence, +) +from functools import wraps +from sys import getsizeof +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import ( + algos as libalgos, + index as libindex, + lib, +) +from pandas._libs.hashtable import duplicated +from pandas._typing import ( + AnyAll, + AnyArrayLike, + Axis, + DropKeep, + DtypeObj, + F, + IgnoreRaise, + IndexLabel, + Scalar, + Shape, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + InvalidIndexError, + PerformanceWarning, + UnsortedIndexError, +) +from pandas.util._decorators import ( + Appender, + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import coerce_indexer_dtype +from pandas.core.dtypes.common import ( + ensure_int64, + ensure_platform_int, + is_hashable, + is_integer, + is_iterator, + is_list_like, + is_object_dtype, + is_scalar, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCDatetimeIndex, + ABCSeries, + ABCTimedeltaIndex, +) +from pandas.core.dtypes.inference import is_array_like +from pandas.core.dtypes.missing import ( + array_equivalent, + isna, +) + +import pandas.core.algorithms as algos +from pandas.core.array_algos.putmask import validate_putmask +from pandas.core.arrays import ( + Categorical, + ExtensionArray, +) +from pandas.core.arrays.categorical import ( + factorize_from_iterables, + recode_for_categories, +) +import pandas.core.common as com +from pandas.core.construction import sanitize_array +import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import ( + Index, + _index_shared_docs, + ensure_index, + get_unanimous_names, +) +from pandas.core.indexes.frozen import FrozenList +from pandas.core.ops.invalid import make_invalid_op +from pandas.core.sorting import ( + get_group_index, + lexsort_indexer, +) + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas import ( + CategoricalIndex, + DataFrame, + Series, + ) + +_index_doc_kwargs = dict(ibase._index_doc_kwargs) +_index_doc_kwargs.update( + {"klass": "MultiIndex", "target_klass": "MultiIndex or list of tuples"} +) + + +class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine): + """ + This class manages a MultiIndex by mapping label combinations to positive + integers. + """ + + _base = libindex.UInt64Engine + + def _codes_to_ints(self, codes): + """ + Transform combination(s) of uint64 in one uint64 (each), in a strictly + monotonic way (i.e. respecting the lexicographic order of integer + combinations): see BaseMultiIndexCodesEngine documentation. + + Parameters + ---------- + codes : 1- or 2-dimensional array of dtype uint64 + Combinations of integers (one per row) + + Returns + ------- + scalar or 1-dimensional array, of dtype uint64 + Integer(s) representing one combination (each). + """ + # Shift the representation of each level by the pre-calculated number + # of bits: + codes <<= self.offsets + + # Now sum and OR are in fact interchangeable. This is a simple + # composition of the (disjunct) significant bits of each level (i.e. + # each column in "codes") in a single positive integer: + if codes.ndim == 1: + # Single key + return np.bitwise_or.reduce(codes) + + # Multiple keys + return np.bitwise_or.reduce(codes, axis=1) + + +class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine): + """ + This class manages those (extreme) cases in which the number of possible + label combinations overflows the 64 bits integers, and uses an ObjectEngine + containing Python integers. + """ + + _base = libindex.ObjectEngine + + def _codes_to_ints(self, codes): + """ + Transform combination(s) of uint64 in one Python integer (each), in a + strictly monotonic way (i.e. respecting the lexicographic order of + integer combinations): see BaseMultiIndexCodesEngine documentation. + + Parameters + ---------- + codes : 1- or 2-dimensional array of dtype uint64 + Combinations of integers (one per row) + + Returns + ------- + int, or 1-dimensional array of dtype object + Integer(s) representing one combination (each). + """ + # Shift the representation of each level by the pre-calculated number + # of bits. Since this can overflow uint64, first make sure we are + # working with Python integers: + codes = codes.astype("object") << self.offsets + + # Now sum and OR are in fact interchangeable. This is a simple + # composition of the (disjunct) significant bits of each level (i.e. + # each column in "codes") in a single positive integer (per row): + if codes.ndim == 1: + # Single key + return np.bitwise_or.reduce(codes) + + # Multiple keys + return np.bitwise_or.reduce(codes, axis=1) + + +def names_compat(meth: F) -> F: + """ + A decorator to allow either `name` or `names` keyword but not both. + + This makes it easier to share code with base class. + """ + + @wraps(meth) + def new_meth(self_or_cls, *args, **kwargs): + if "name" in kwargs and "names" in kwargs: + raise TypeError("Can only provide one of `names` and `name`") + if "name" in kwargs: + kwargs["names"] = kwargs.pop("name") + + return meth(self_or_cls, *args, **kwargs) + + return cast(F, new_meth) + + +class MultiIndex(Index): + """ + A multi-level, or hierarchical, index object for pandas objects. + + Parameters + ---------- + levels : sequence of arrays + The unique labels for each level. + codes : sequence of arrays + Integers for each level designating which label at each location. + sortorder : optional int + Level of sortedness (must be lexicographically sorted by that + level). + names : optional sequence of objects + Names for each of the index levels. (name is accepted for compat). + copy : bool, default False + Copy the meta-data. + verify_integrity : bool, default True + Check that the levels/codes are consistent and valid. + + Attributes + ---------- + names + levels + codes + nlevels + levshape + dtypes + + Methods + ------- + from_arrays + from_tuples + from_product + from_frame + set_levels + set_codes + to_frame + to_flat_index + sortlevel + droplevel + swaplevel + reorder_levels + remove_unused_levels + get_level_values + get_indexer + get_loc + get_locs + get_loc_level + drop + + See Also + -------- + MultiIndex.from_arrays : Convert list of arrays to MultiIndex. + MultiIndex.from_product : Create a MultiIndex from the cartesian product + of iterables. + MultiIndex.from_tuples : Convert list of tuples to a MultiIndex. + MultiIndex.from_frame : Make a MultiIndex from a DataFrame. + Index : The base pandas Index type. + + Notes + ----- + See the `user guide + `__ + for more. + + Examples + -------- + A new ``MultiIndex`` is typically constructed using one of the helper + methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product` + and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``): + + >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] + >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) + MultiIndex([(1, 'red'), + (1, 'blue'), + (2, 'red'), + (2, 'blue')], + names=['number', 'color']) + + See further examples for how to construct a MultiIndex in the doc strings + of the mentioned helper methods. + """ + + _hidden_attrs = Index._hidden_attrs | frozenset() + + # initialize to zero-length tuples to make everything work + _typ = "multiindex" + _names: list[Hashable | None] = [] + _levels = FrozenList() + _codes = FrozenList() + _comparables = ["names"] + + sortorder: int | None + + # -------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + levels=None, + codes=None, + sortorder=None, + names=None, + dtype=None, + copy: bool = False, + name=None, + verify_integrity: bool = True, + ) -> MultiIndex: + # compat with Index + if name is not None: + names = name + if levels is None or codes is None: + raise TypeError("Must pass both levels and codes") + if len(levels) != len(codes): + raise ValueError("Length of levels and codes must be the same.") + if len(levels) == 0: + raise ValueError("Must pass non-zero number of levels/codes") + + result = object.__new__(cls) + result._cache = {} + + # we've already validated levels and codes, so shortcut here + result._set_levels(levels, copy=copy, validate=False) + result._set_codes(codes, copy=copy, validate=False) + + result._names = [None] * len(levels) + if names is not None: + # handles name validation + result._set_names(names) + + if sortorder is not None: + result.sortorder = int(sortorder) + else: + result.sortorder = sortorder + + if verify_integrity: + new_codes = result._verify_integrity() + result._codes = new_codes + + result._reset_identity() + result._references = None + + return result + + def _validate_codes(self, level: list, code: list): + """ + Reassign code values as -1 if their corresponding levels are NaN. + + Parameters + ---------- + code : list + Code to reassign. + level : list + Level to check for missing values (NaN, NaT, None). + + Returns + ------- + new code where code value = -1 if it corresponds + to a level with missing values (NaN, NaT, None). + """ + null_mask = isna(level) + if np.any(null_mask): + # error: Incompatible types in assignment + # (expression has type "ndarray[Any, dtype[Any]]", + # variable has type "List[Any]") + code = np.where(null_mask[code], -1, code) # type: ignore[assignment] + return code + + def _verify_integrity( + self, + codes: list | None = None, + levels: list | None = None, + levels_to_verify: list[int] | range | None = None, + ): + """ + Parameters + ---------- + codes : optional list + Codes to check for validity. Defaults to current codes. + levels : optional list + Levels to check for validity. Defaults to current levels. + levels_to_validate: optional list + Specifies the levels to verify. + + Raises + ------ + ValueError + If length of levels and codes don't match, if the codes for any + level would exceed level bounds, or there are any duplicate levels. + + Returns + ------- + new codes where code value = -1 if it corresponds to a + NaN level. + """ + # NOTE: Currently does not check, among other things, that cached + # nlevels matches nor that sortorder matches actually sortorder. + codes = codes or self.codes + levels = levels or self.levels + if levels_to_verify is None: + levels_to_verify = range(len(levels)) + + if len(levels) != len(codes): + raise ValueError( + "Length of levels and codes must match. NOTE: " + "this index is in an inconsistent state." + ) + codes_length = len(codes[0]) + for i in levels_to_verify: + level = levels[i] + level_codes = codes[i] + + if len(level_codes) != codes_length: + raise ValueError( + f"Unequal code lengths: {[len(code_) for code_ in codes]}" + ) + if len(level_codes) and level_codes.max() >= len(level): + raise ValueError( + f"On level {i}, code max ({level_codes.max()}) >= length of " + f"level ({len(level)}). NOTE: this index is in an " + "inconsistent state" + ) + if len(level_codes) and level_codes.min() < -1: + raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1") + if not level.is_unique: + raise ValueError( + f"Level values must be unique: {list(level)} on level {i}" + ) + if self.sortorder is not None: + if self.sortorder > _lexsort_depth(self.codes, self.nlevels): + raise ValueError( + "Value for sortorder must be inferior or equal to actual " + f"lexsort_depth: sortorder {self.sortorder} " + f"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}" + ) + + result_codes = [] + for i in range(len(levels)): + if i in levels_to_verify: + result_codes.append(self._validate_codes(levels[i], codes[i])) + else: + result_codes.append(codes[i]) + + new_codes = FrozenList(result_codes) + return new_codes + + @classmethod + def from_arrays( + cls, + arrays, + sortorder: int | None = None, + names: Sequence[Hashable] | Hashable | lib.NoDefault = lib.no_default, + ) -> MultiIndex: + """ + Convert arrays to MultiIndex. + + Parameters + ---------- + arrays : list / sequence of array-likes + Each array-like gives one level's value for each data point. + len(arrays) is the number of levels. + sortorder : int or None + Level of sortedness (must be lexicographically sorted by that + level). + names : list / sequence of str, optional + Names for the levels in the index. + + Returns + ------- + MultiIndex + + See Also + -------- + MultiIndex.from_tuples : Convert list of tuples to MultiIndex. + MultiIndex.from_product : Make a MultiIndex from cartesian product + of iterables. + MultiIndex.from_frame : Make a MultiIndex from a DataFrame. + + Examples + -------- + >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] + >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) + MultiIndex([(1, 'red'), + (1, 'blue'), + (2, 'red'), + (2, 'blue')], + names=['number', 'color']) + """ + error_msg = "Input must be a list / sequence of array-likes." + if not is_list_like(arrays): + raise TypeError(error_msg) + if is_iterator(arrays): + arrays = list(arrays) + + # Check if elements of array are list-like + for array in arrays: + if not is_list_like(array): + raise TypeError(error_msg) + + # Check if lengths of all arrays are equal or not, + # raise ValueError, if not + for i in range(1, len(arrays)): + if len(arrays[i]) != len(arrays[i - 1]): + raise ValueError("all arrays must be same length") + + codes, levels = factorize_from_iterables(arrays) + if names is lib.no_default: + names = [getattr(arr, "name", None) for arr in arrays] + + return cls( + levels=levels, + codes=codes, + sortorder=sortorder, + names=names, + verify_integrity=False, + ) + + @classmethod + @names_compat + def from_tuples( + cls, + tuples: Iterable[tuple[Hashable, ...]], + sortorder: int | None = None, + names: Sequence[Hashable] | Hashable | None = None, + ) -> MultiIndex: + """ + Convert list of tuples to MultiIndex. + + Parameters + ---------- + tuples : list / sequence of tuple-likes + Each tuple is the index of one row/column. + sortorder : int or None + Level of sortedness (must be lexicographically sorted by that + level). + names : list / sequence of str, optional + Names for the levels in the index. + + Returns + ------- + MultiIndex + + See Also + -------- + MultiIndex.from_arrays : Convert list of arrays to MultiIndex. + MultiIndex.from_product : Make a MultiIndex from cartesian product + of iterables. + MultiIndex.from_frame : Make a MultiIndex from a DataFrame. + + Examples + -------- + >>> tuples = [(1, 'red'), (1, 'blue'), + ... (2, 'red'), (2, 'blue')] + >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) + MultiIndex([(1, 'red'), + (1, 'blue'), + (2, 'red'), + (2, 'blue')], + names=['number', 'color']) + """ + if not is_list_like(tuples): + raise TypeError("Input must be a list / sequence of tuple-likes.") + if is_iterator(tuples): + tuples = list(tuples) + tuples = cast(Collection[tuple[Hashable, ...]], tuples) + + # handling the empty tuple cases + if len(tuples) and all(isinstance(e, tuple) and not e for e in tuples): + codes = [np.zeros(len(tuples))] + levels = [Index(com.asarray_tuplesafe(tuples, dtype=np.dtype("object")))] + return cls( + levels=levels, + codes=codes, + sortorder=sortorder, + names=names, + verify_integrity=False, + ) + + arrays: list[Sequence[Hashable]] + if len(tuples) == 0: + if names is None: + raise TypeError("Cannot infer number of levels from empty list") + # error: Argument 1 to "len" has incompatible type "Hashable"; + # expected "Sized" + arrays = [[]] * len(names) # type: ignore[arg-type] + elif isinstance(tuples, (np.ndarray, Index)): + if isinstance(tuples, Index): + tuples = np.asarray(tuples._values) + + arrays = list(lib.tuples_to_object_array(tuples).T) + elif isinstance(tuples, list): + arrays = list(lib.to_object_array_tuples(tuples).T) + else: + arrs = zip(*tuples) + arrays = cast(list[Sequence[Hashable]], arrs) + + return cls.from_arrays(arrays, sortorder=sortorder, names=names) + + @classmethod + def from_product( + cls, + iterables: Sequence[Iterable[Hashable]], + sortorder: int | None = None, + names: Sequence[Hashable] | Hashable | lib.NoDefault = lib.no_default, + ) -> MultiIndex: + """ + Make a MultiIndex from the cartesian product of multiple iterables. + + Parameters + ---------- + iterables : list / sequence of iterables + Each iterable has unique labels for each level of the index. + sortorder : int or None + Level of sortedness (must be lexicographically sorted by that + level). + names : list / sequence of str, optional + Names for the levels in the index. + If not explicitly provided, names will be inferred from the + elements of iterables if an element has a name attribute. + + Returns + ------- + MultiIndex + + See Also + -------- + MultiIndex.from_arrays : Convert list of arrays to MultiIndex. + MultiIndex.from_tuples : Convert list of tuples to MultiIndex. + MultiIndex.from_frame : Make a MultiIndex from a DataFrame. + + Examples + -------- + >>> numbers = [0, 1, 2] + >>> colors = ['green', 'purple'] + >>> pd.MultiIndex.from_product([numbers, colors], + ... names=['number', 'color']) + MultiIndex([(0, 'green'), + (0, 'purple'), + (1, 'green'), + (1, 'purple'), + (2, 'green'), + (2, 'purple')], + names=['number', 'color']) + """ + from pandas.core.reshape.util import cartesian_product + + if not is_list_like(iterables): + raise TypeError("Input must be a list / sequence of iterables.") + if is_iterator(iterables): + iterables = list(iterables) + + codes, levels = factorize_from_iterables(iterables) + if names is lib.no_default: + names = [getattr(it, "name", None) for it in iterables] + + # codes are all ndarrays, so cartesian_product is lossless + codes = cartesian_product(codes) + return cls(levels, codes, sortorder=sortorder, names=names) + + @classmethod + def from_frame( + cls, + df: DataFrame, + sortorder: int | None = None, + names: Sequence[Hashable] | Hashable | None = None, + ) -> MultiIndex: + """ + Make a MultiIndex from a DataFrame. + + Parameters + ---------- + df : DataFrame + DataFrame to be converted to MultiIndex. + sortorder : int, optional + Level of sortedness (must be lexicographically sorted by that + level). + names : list-like, optional + If no names are provided, use the column names, or tuple of column + names if the columns is a MultiIndex. If a sequence, overwrite + names with the given sequence. + + Returns + ------- + MultiIndex + The MultiIndex representation of the given DataFrame. + + See Also + -------- + MultiIndex.from_arrays : Convert list of arrays to MultiIndex. + MultiIndex.from_tuples : Convert list of tuples to MultiIndex. + MultiIndex.from_product : Make a MultiIndex from cartesian product + of iterables. + + Examples + -------- + >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], + ... ['NJ', 'Temp'], ['NJ', 'Precip']], + ... columns=['a', 'b']) + >>> df + a b + 0 HI Temp + 1 HI Precip + 2 NJ Temp + 3 NJ Precip + + >>> pd.MultiIndex.from_frame(df) + MultiIndex([('HI', 'Temp'), + ('HI', 'Precip'), + ('NJ', 'Temp'), + ('NJ', 'Precip')], + names=['a', 'b']) + + Using explicit names, instead of the column names + + >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) + MultiIndex([('HI', 'Temp'), + ('HI', 'Precip'), + ('NJ', 'Temp'), + ('NJ', 'Precip')], + names=['state', 'observation']) + """ + if not isinstance(df, ABCDataFrame): + raise TypeError("Input must be a DataFrame") + + column_names, columns = zip(*df.items()) + names = column_names if names is None else names + return cls.from_arrays(columns, sortorder=sortorder, names=names) + + # -------------------------------------------------------------------- + + @cache_readonly + def _values(self) -> np.ndarray: + # We override here, since our parent uses _data, which we don't use. + values = [] + + for i in range(self.nlevels): + index = self.levels[i] + codes = self.codes[i] + + vals = index + if isinstance(vals.dtype, CategoricalDtype): + vals = cast("CategoricalIndex", vals) + vals = vals._data._internal_get_values() + + if isinstance(vals.dtype, ExtensionDtype) or isinstance( + vals, (ABCDatetimeIndex, ABCTimedeltaIndex) + ): + vals = vals.astype(object) + + vals = np.array(vals, copy=False) + vals = algos.take_nd(vals, codes, fill_value=index._na_value) + values.append(vals) + + arr = lib.fast_zip(values) + return arr + + @property + def values(self) -> np.ndarray: + return self._values + + @property + def array(self): + """ + Raises a ValueError for `MultiIndex` because there's no single + array backing a MultiIndex. + + Raises + ------ + ValueError + """ + raise ValueError( + "MultiIndex has no single backing array. Use " + "'MultiIndex.to_numpy()' to get a NumPy array of tuples." + ) + + @cache_readonly + def dtypes(self) -> Series: + """ + Return the dtypes as a Series for the underlying MultiIndex. + + Examples + -------- + >>> idx = pd.MultiIndex.from_product([(0, 1, 2), ('green', 'purple')], + ... names=['number', 'color']) + >>> idx + MultiIndex([(0, 'green'), + (0, 'purple'), + (1, 'green'), + (1, 'purple'), + (2, 'green'), + (2, 'purple')], + names=['number', 'color']) + >>> idx.dtypes + number int64 + color object + dtype: object + """ + from pandas import Series + + names = com.fill_missing_names([level.name for level in self.levels]) + return Series([level.dtype for level in self.levels], index=Index(names)) + + def __len__(self) -> int: + return len(self.codes[0]) + + @property + def size(self) -> int: + """ + Return the number of elements in the underlying data. + """ + # override Index.size to avoid materializing _values + return len(self) + + # -------------------------------------------------------------------- + # Levels Methods + + @cache_readonly + def levels(self) -> FrozenList: + # Use cache_readonly to ensure that self.get_locs doesn't repeatedly + # create new IndexEngine + # https://github.com/pandas-dev/pandas/issues/31648 + result = [x._rename(name=name) for x, name in zip(self._levels, self._names)] + for level in result: + # disallow midx.levels[0].name = "foo" + level._no_setting_name = True + return FrozenList(result) + + def _set_levels( + self, + levels, + *, + level=None, + copy: bool = False, + validate: bool = True, + verify_integrity: bool = False, + ) -> None: + # This is NOT part of the levels property because it should be + # externally not allowed to set levels. User beware if you change + # _levels directly + if validate: + if len(levels) == 0: + raise ValueError("Must set non-zero number of levels.") + if level is None and len(levels) != self.nlevels: + raise ValueError("Length of levels must match number of levels.") + if level is not None and len(levels) != len(level): + raise ValueError("Length of levels must match length of level.") + + if level is None: + new_levels = FrozenList( + ensure_index(lev, copy=copy)._view() for lev in levels + ) + level_numbers = list(range(len(new_levels))) + else: + level_numbers = [self._get_level_number(lev) for lev in level] + new_levels_list = list(self._levels) + for lev_num, lev in zip(level_numbers, levels): + new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view() + new_levels = FrozenList(new_levels_list) + + if verify_integrity: + new_codes = self._verify_integrity( + levels=new_levels, levels_to_verify=level_numbers + ) + self._codes = new_codes + + names = self.names + self._levels = new_levels + if any(names): + self._set_names(names) + + self._reset_cache() + + def set_levels( + self, levels, *, level=None, verify_integrity: bool = True + ) -> MultiIndex: + """ + Set new levels on MultiIndex. Defaults to returning new index. + + Parameters + ---------- + levels : sequence or list of sequence + New level(s) to apply. + level : int, level name, or sequence of int/level names (default None) + Level(s) to set (None for all levels). + verify_integrity : bool, default True + If True, checks that levels and codes are compatible. + + Returns + ------- + MultiIndex + + Examples + -------- + >>> idx = pd.MultiIndex.from_tuples( + ... [ + ... (1, "one"), + ... (1, "two"), + ... (2, "one"), + ... (2, "two"), + ... (3, "one"), + ... (3, "two") + ... ], + ... names=["foo", "bar"] + ... ) + >>> idx + MultiIndex([(1, 'one'), + (1, 'two'), + (2, 'one'), + (2, 'two'), + (3, 'one'), + (3, 'two')], + names=['foo', 'bar']) + + >>> idx.set_levels([['a', 'b', 'c'], [1, 2]]) + MultiIndex([('a', 1), + ('a', 2), + ('b', 1), + ('b', 2), + ('c', 1), + ('c', 2)], + names=['foo', 'bar']) + >>> idx.set_levels(['a', 'b', 'c'], level=0) + MultiIndex([('a', 'one'), + ('a', 'two'), + ('b', 'one'), + ('b', 'two'), + ('c', 'one'), + ('c', 'two')], + names=['foo', 'bar']) + >>> idx.set_levels(['a', 'b'], level='bar') + MultiIndex([(1, 'a'), + (1, 'b'), + (2, 'a'), + (2, 'b'), + (3, 'a'), + (3, 'b')], + names=['foo', 'bar']) + + If any of the levels passed to ``set_levels()`` exceeds the + existing length, all of the values from that argument will + be stored in the MultiIndex levels, though the values will + be truncated in the MultiIndex output. + + >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]) + MultiIndex([('a', 1), + ('a', 2), + ('b', 1), + ('b', 2), + ('c', 1), + ('c', 2)], + names=['foo', 'bar']) + >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels + FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]]) + """ + + if isinstance(levels, Index): + pass + elif is_array_like(levels): + levels = Index(levels) + elif is_list_like(levels): + levels = list(levels) + + level, levels = _require_listlike(level, levels, "Levels") + idx = self._view() + idx._reset_identity() + idx._set_levels( + levels, level=level, validate=True, verify_integrity=verify_integrity + ) + return idx + + @property + def nlevels(self) -> int: + """ + Integer number of levels in this MultiIndex. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) + >>> mi + MultiIndex([('a', 'b', 'c')], + ) + >>> mi.nlevels + 3 + """ + return len(self._levels) + + @property + def levshape(self) -> Shape: + """ + A tuple with the length of each level. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) + >>> mi + MultiIndex([('a', 'b', 'c')], + ) + >>> mi.levshape + (1, 1, 1) + """ + return tuple(len(x) for x in self.levels) + + # -------------------------------------------------------------------- + # Codes Methods + + @property + def codes(self): + return self._codes + + def _set_codes( + self, + codes, + *, + level=None, + copy: bool = False, + validate: bool = True, + verify_integrity: bool = False, + ) -> None: + if validate: + if level is None and len(codes) != self.nlevels: + raise ValueError("Length of codes must match number of levels") + if level is not None and len(codes) != len(level): + raise ValueError("Length of codes must match length of levels.") + + level_numbers: list[int] | range + if level is None: + new_codes = FrozenList( + _coerce_indexer_frozen(level_codes, lev, copy=copy).view() + for lev, level_codes in zip(self._levels, codes) + ) + level_numbers = range(len(new_codes)) + else: + level_numbers = [self._get_level_number(lev) for lev in level] + new_codes_list = list(self._codes) + for lev_num, level_codes in zip(level_numbers, codes): + lev = self.levels[lev_num] + new_codes_list[lev_num] = _coerce_indexer_frozen( + level_codes, lev, copy=copy + ) + new_codes = FrozenList(new_codes_list) + + if verify_integrity: + new_codes = self._verify_integrity( + codes=new_codes, levels_to_verify=level_numbers + ) + + self._codes = new_codes + + self._reset_cache() + + def set_codes(self, codes, *, level=None, verify_integrity: bool = True): + """ + Set new codes on MultiIndex. Defaults to returning new index. + + Parameters + ---------- + codes : sequence or list of sequence + New codes to apply. + level : int, level name, or sequence of int/level names (default None) + Level(s) to set (None for all levels). + verify_integrity : bool, default True + If True, checks that levels and codes are compatible. + + Returns + ------- + new index (of same type and class...etc) or None + The same type as the caller or None if ``inplace=True``. + + Examples + -------- + >>> idx = pd.MultiIndex.from_tuples( + ... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"] + ... ) + >>> idx + MultiIndex([(1, 'one'), + (1, 'two'), + (2, 'one'), + (2, 'two')], + names=['foo', 'bar']) + + >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]]) + MultiIndex([(2, 'one'), + (1, 'one'), + (2, 'two'), + (1, 'two')], + names=['foo', 'bar']) + >>> idx.set_codes([1, 0, 1, 0], level=0) + MultiIndex([(2, 'one'), + (1, 'two'), + (2, 'one'), + (1, 'two')], + names=['foo', 'bar']) + >>> idx.set_codes([0, 0, 1, 1], level='bar') + MultiIndex([(1, 'one'), + (1, 'one'), + (2, 'two'), + (2, 'two')], + names=['foo', 'bar']) + >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1]) + MultiIndex([(2, 'one'), + (1, 'one'), + (2, 'two'), + (1, 'two')], + names=['foo', 'bar']) + """ + + level, codes = _require_listlike(level, codes, "Codes") + idx = self._view() + idx._reset_identity() + idx._set_codes(codes, level=level, verify_integrity=verify_integrity) + return idx + + # -------------------------------------------------------------------- + # Index Internals + + @cache_readonly + def _engine(self): + # Calculate the number of bits needed to represent labels in each + # level, as log2 of their sizes: + # NaN values are shifted to 1 and missing values in other while + # calculating the indexer are shifted to 0 + sizes = np.ceil( + np.log2( + [len(level) + libindex.multiindex_nulls_shift for level in self.levels] + ) + ) + + # Sum bit counts, starting from the _right_.... + lev_bits = np.cumsum(sizes[::-1])[::-1] + + # ... in order to obtain offsets such that sorting the combination of + # shifted codes (one for each level, resulting in a unique integer) is + # equivalent to sorting lexicographically the codes themselves. Notice + # that each level needs to be shifted by the number of bits needed to + # represent the _previous_ ones: + offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64") + + # Check the total number of bits needed for our representation: + if lev_bits[0] > 64: + # The levels would overflow a 64 bit uint - use Python integers: + return MultiIndexPyIntEngine(self.levels, self.codes, offsets) + return MultiIndexUIntEngine(self.levels, self.codes, offsets) + + # Return type "Callable[..., MultiIndex]" of "_constructor" incompatible with return + # type "Type[MultiIndex]" in supertype "Index" + @property + def _constructor(self) -> Callable[..., MultiIndex]: # type: ignore[override] + return type(self).from_tuples + + @doc(Index._shallow_copy) + def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex: + names = name if name is not lib.no_default else self.names + + return type(self).from_tuples(values, sortorder=None, names=names) + + def _view(self) -> MultiIndex: + result = type(self)( + levels=self.levels, + codes=self.codes, + sortorder=self.sortorder, + names=self.names, + verify_integrity=False, + ) + result._cache = self._cache.copy() + result._cache.pop("levels", None) # GH32669 + return result + + # -------------------------------------------------------------------- + + # error: Signature of "copy" incompatible with supertype "Index" + def copy( # type: ignore[override] + self, + names=None, + deep: bool = False, + name=None, + ): + """ + Make a copy of this object. + + Names, dtype, levels and codes can be passed and will be set on new copy. + + Parameters + ---------- + names : sequence, optional + deep : bool, default False + name : Label + Kept for compatibility with 1-dimensional Index. Should not be used. + + Returns + ------- + MultiIndex + + Notes + ----- + In most cases, there should be no functional difference from using + ``deep``, but if ``deep`` is passed it will attempt to deepcopy. + This could be potentially expensive on large MultiIndex objects. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']]) + >>> mi + MultiIndex([('a', 'b', 'c')], + ) + >>> mi.copy() + MultiIndex([('a', 'b', 'c')], + ) + """ + names = self._validate_names(name=name, names=names, deep=deep) + keep_id = not deep + levels, codes = None, None + + if deep: + from copy import deepcopy + + levels = deepcopy(self.levels) + codes = deepcopy(self.codes) + + levels = levels if levels is not None else self.levels + codes = codes if codes is not None else self.codes + + new_index = type(self)( + levels=levels, + codes=codes, + sortorder=self.sortorder, + names=names, + verify_integrity=False, + ) + new_index._cache = self._cache.copy() + new_index._cache.pop("levels", None) # GH32669 + if keep_id: + new_index._id = self._id + return new_index + + def __array__(self, dtype=None) -> np.ndarray: + """the array interface, return my values""" + return self.values + + def view(self, cls=None): + """this is defined as a copy with the same identity""" + result = self.copy() + result._id = self._id + return result + + @doc(Index.__contains__) + def __contains__(self, key: Any) -> bool: + hash(key) + try: + self.get_loc(key) + return True + except (LookupError, TypeError, ValueError): + return False + + @cache_readonly + def dtype(self) -> np.dtype: + return np.dtype("O") + + def _is_memory_usage_qualified(self) -> bool: + """return a boolean if we need a qualified .info display""" + + def f(level) -> bool: + return "mixed" in level or "string" in level or "unicode" in level + + return any(f(level) for level in self._inferred_type_levels) + + # Cannot determine type of "memory_usage" + @doc(Index.memory_usage) # type: ignore[has-type] + def memory_usage(self, deep: bool = False) -> int: + # we are overwriting our base class to avoid + # computing .values here which could materialize + # a tuple representation unnecessarily + return self._nbytes(deep) + + @cache_readonly + def nbytes(self) -> int: + """return the number of bytes in the underlying data""" + return self._nbytes(False) + + def _nbytes(self, deep: bool = False) -> int: + """ + return the number of bytes in the underlying data + deeply introspect the level data if deep=True + + include the engine hashtable + + *this is in internal routine* + + """ + # for implementations with no useful getsizeof (PyPy) + objsize = 24 + + level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels) + label_nbytes = sum(i.nbytes for i in self.codes) + names_nbytes = sum(getsizeof(i, objsize) for i in self.names) + result = level_nbytes + label_nbytes + names_nbytes + + # include our engine hashtable + result += self._engine.sizeof(deep=deep) + return result + + # -------------------------------------------------------------------- + # Rendering Methods + + def _formatter_func(self, tup): + """ + Formats each item in tup according to its level's formatter function. + """ + formatter_funcs = [level._formatter_func for level in self.levels] + return tuple(func(val) for func, val in zip(formatter_funcs, tup)) + + def _format_native_types( + self, *, na_rep: str = "nan", **kwargs + ) -> npt.NDArray[np.object_]: + new_levels = [] + new_codes = [] + + # go through the levels and format them + for level, level_codes in zip(self.levels, self.codes): + level_strs = level._format_native_types(na_rep=na_rep, **kwargs) + # add nan values, if there are any + mask = level_codes == -1 + if mask.any(): + nan_index = len(level_strs) + # numpy 1.21 deprecated implicit string casting + level_strs = level_strs.astype(str) + level_strs = np.append(level_strs, na_rep) + assert not level_codes.flags.writeable # i.e. copy is needed + level_codes = level_codes.copy() # make writeable + level_codes[mask] = nan_index + new_levels.append(level_strs) + new_codes.append(level_codes) + + if len(new_levels) == 1: + # a single-level multi-index + return Index(new_levels[0].take(new_codes[0]))._format_native_types() + else: + # reconstruct the multi-index + mi = MultiIndex( + levels=new_levels, + codes=new_codes, + names=self.names, + sortorder=self.sortorder, + verify_integrity=False, + ) + return mi._values + + def format( + self, + name: bool | None = None, + formatter: Callable | None = None, + na_rep: str | None = None, + names: bool = False, + space: int = 2, + sparsify=None, + adjoin: bool = True, + ) -> list: + if name is not None: + names = name + + if len(self) == 0: + return [] + + stringified_levels = [] + for lev, level_codes in zip(self.levels, self.codes): + na = na_rep if na_rep is not None else _get_na_rep(lev.dtype) + + if len(lev) > 0: + formatted = lev.take(level_codes).format(formatter=formatter) + + # we have some NA + mask = level_codes == -1 + if mask.any(): + formatted = np.array(formatted, dtype=object) + formatted[mask] = na + formatted = formatted.tolist() + + else: + # weird all NA case + formatted = [ + pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n")) + for x in algos.take_nd(lev._values, level_codes) + ] + stringified_levels.append(formatted) + + result_levels = [] + for lev, lev_name in zip(stringified_levels, self.names): + level = [] + + if names: + level.append( + pprint_thing(lev_name, escape_chars=("\t", "\r", "\n")) + if lev_name is not None + else "" + ) + + level.extend(np.array(lev, dtype=object)) + result_levels.append(level) + + if sparsify is None: + sparsify = get_option("display.multi_sparse") + + if sparsify: + sentinel: Literal[""] | bool | lib.NoDefault = "" + # GH3547 use value of sparsify as sentinel if it's "Falsey" + assert isinstance(sparsify, bool) or sparsify is lib.no_default + if sparsify in [False, lib.no_default]: + sentinel = sparsify + # little bit of a kludge job for #1217 + result_levels = sparsify_labels( + result_levels, start=int(names), sentinel=sentinel + ) + + if adjoin: + from pandas.io.formats.format import get_adjustment + + adj = get_adjustment() + return adj.adjoin(space, *result_levels).split("\n") + else: + return result_levels + + # -------------------------------------------------------------------- + # Names Methods + + def _get_names(self) -> FrozenList: + return FrozenList(self._names) + + def _set_names(self, names, *, level=None, validate: bool = True): + """ + Set new names on index. Each name has to be a hashable type. + + Parameters + ---------- + values : str or sequence + name(s) to set + level : int, level name, or sequence of int/level names (default None) + If the index is a MultiIndex (hierarchical), level(s) to set (None + for all levels). Otherwise level must be None + validate : bool, default True + validate that the names match level lengths + + Raises + ------ + TypeError if each name is not hashable. + + Notes + ----- + sets names on levels. WARNING: mutates! + + Note that you generally want to set this *after* changing levels, so + that it only acts on copies + """ + # GH 15110 + # Don't allow a single string for names in a MultiIndex + if names is not None and not is_list_like(names): + raise ValueError("Names should be list-like for a MultiIndex") + names = list(names) + + if validate: + if level is not None and len(names) != len(level): + raise ValueError("Length of names must match length of level.") + if level is None and len(names) != self.nlevels: + raise ValueError( + "Length of names must match number of levels in MultiIndex." + ) + + if level is None: + level = range(self.nlevels) + else: + level = [self._get_level_number(lev) for lev in level] + + # set the name + for lev, name in zip(level, names): + if name is not None: + # GH 20527 + # All items in 'names' need to be hashable: + if not is_hashable(name): + raise TypeError( + f"{type(self).__name__}.name must be a hashable type" + ) + self._names[lev] = name + + # If .levels has been accessed, the names in our cache will be stale. + self._reset_cache() + + names = property( + fset=_set_names, + fget=_get_names, + doc=""" + Names of levels in MultiIndex. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays( + ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) + >>> mi + MultiIndex([(1, 3, 5), + (2, 4, 6)], + names=['x', 'y', 'z']) + >>> mi.names + FrozenList(['x', 'y', 'z']) + """, + ) + + # -------------------------------------------------------------------- + + @cache_readonly + def inferred_type(self) -> str: + return "mixed" + + def _get_level_number(self, level) -> int: + count = self.names.count(level) + if (count > 1) and not is_integer(level): + raise ValueError( + f"The name {level} occurs multiple times, use a level number" + ) + try: + level = self.names.index(level) + except ValueError as err: + if not is_integer(level): + raise KeyError(f"Level {level} not found") from err + if level < 0: + level += self.nlevels + if level < 0: + orig_level = level - self.nlevels + raise IndexError( + f"Too many levels: Index has only {self.nlevels} levels, " + f"{orig_level} is not a valid level number" + ) from err + # Note: levels are zero-based + elif level >= self.nlevels: + raise IndexError( + f"Too many levels: Index has only {self.nlevels} levels, " + f"not {level + 1}" + ) from err + return level + + @cache_readonly + def is_monotonic_increasing(self) -> bool: + """ + Return a boolean if the values are equal or increasing. + """ + if any(-1 in code for code in self.codes): + return False + + if all(level.is_monotonic_increasing for level in self.levels): + # If each level is sorted, we can operate on the codes directly. GH27495 + return libalgos.is_lexsorted( + [x.astype("int64", copy=False) for x in self.codes] + ) + + # reversed() because lexsort() wants the most significant key last. + values = [ + self._get_level_values(i)._values for i in reversed(range(len(self.levels))) + ] + try: + # error: Argument 1 to "lexsort" has incompatible type + # "List[Union[ExtensionArray, ndarray[Any, Any]]]"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], bool, + # int, float, complex, str, bytes, _NestedSequence[Union + # [bool, int, float, complex, str, bytes]]]" + sort_order = np.lexsort(values) # type: ignore[arg-type] + return Index(sort_order).is_monotonic_increasing + except TypeError: + # we have mixed types and np.lexsort is not happy + return Index(self._values).is_monotonic_increasing + + @cache_readonly + def is_monotonic_decreasing(self) -> bool: + """ + Return a boolean if the values are equal or decreasing. + """ + # monotonic decreasing if and only if reverse is monotonic increasing + return self[::-1].is_monotonic_increasing + + @cache_readonly + def _inferred_type_levels(self) -> list[str]: + """return a list of the inferred types, one for each level""" + return [i.inferred_type for i in self.levels] + + @doc(Index.duplicated) + def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: + shape = tuple(len(lev) for lev in self.levels) + ids = get_group_index(self.codes, shape, sort=False, xnull=False) + + return duplicated(ids, keep) + + # error: Cannot override final attribute "_duplicated" + # (previously declared in base class "IndexOpsMixin") + _duplicated = duplicated # type: ignore[misc] + + def fillna(self, value=None, downcast=None): + """ + fillna is not implemented for MultiIndex + """ + raise NotImplementedError("isna is not defined for MultiIndex") + + @doc(Index.dropna) + def dropna(self, how: AnyAll = "any") -> MultiIndex: + nans = [level_codes == -1 for level_codes in self.codes] + if how == "any": + indexer = np.any(nans, axis=0) + elif how == "all": + indexer = np.all(nans, axis=0) + else: + raise ValueError(f"invalid how option: {how}") + + new_codes = [level_codes[~indexer] for level_codes in self.codes] + return self.set_codes(codes=new_codes) + + def _get_level_values(self, level: int, unique: bool = False) -> Index: + """ + Return vector of label values for requested level, + equal to the length of the index + + **this is an internal method** + + Parameters + ---------- + level : int + unique : bool, default False + if True, drop duplicated values + + Returns + ------- + Index + """ + lev = self.levels[level] + level_codes = self.codes[level] + name = self._names[level] + if unique: + level_codes = algos.unique(level_codes) + filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value) + return lev._shallow_copy(filled, name=name) + + def get_level_values(self, level): + """ + Return vector of label values for requested level. + + Length of returned vector is equal to the length of the index. + + Parameters + ---------- + level : int or str + ``level`` is either the integer position of the level in the + MultiIndex, or the name of the level. + + Returns + ------- + Index + Values is a level of this MultiIndex converted to + a single :class:`Index` (or subclass thereof). + + Notes + ----- + If the level contains missing values, the result may be casted to + ``float`` with missing values specified as ``NaN``. This is because + the level is converted to a regular ``Index``. + + Examples + -------- + Create a MultiIndex: + + >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) + >>> mi.names = ['level_1', 'level_2'] + + Get level values by supplying level as either integer or name: + + >>> mi.get_level_values(0) + Index(['a', 'b', 'c'], dtype='object', name='level_1') + >>> mi.get_level_values('level_2') + Index(['d', 'e', 'f'], dtype='object', name='level_2') + + If a level contains missing values, the return type of the level + may be cast to ``float``. + + >>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).dtypes + level_0 int64 + level_1 int64 + dtype: object + >>> pd.MultiIndex.from_arrays([[1, None, 2], [3, 4, 5]]).get_level_values(0) + Index([1.0, nan, 2.0], dtype='float64') + """ + level = self._get_level_number(level) + values = self._get_level_values(level) + return values + + @doc(Index.unique) + def unique(self, level=None): + if level is None: + return self.drop_duplicates() + else: + level = self._get_level_number(level) + return self._get_level_values(level=level, unique=True) + + def to_frame( + self, + index: bool = True, + name=lib.no_default, + allow_duplicates: bool = False, + ) -> DataFrame: + """ + Create a DataFrame with the levels of the MultiIndex as columns. + + Column ordering is determined by the DataFrame constructor with data as + a dict. + + Parameters + ---------- + index : bool, default True + Set the index of the returned DataFrame as the original MultiIndex. + + name : list / sequence of str, optional + The passed names should substitute index level names. + + allow_duplicates : bool, optional default False + Allow duplicate column labels to be created. + + .. versionadded:: 1.5.0 + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame : Two-dimensional, size-mutable, potentially heterogeneous + tabular data. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']]) + >>> mi + MultiIndex([('a', 'c'), + ('b', 'd')], + ) + + >>> df = mi.to_frame() + >>> df + 0 1 + a c a c + b d b d + + >>> df = mi.to_frame(index=False) + >>> df + 0 1 + 0 a c + 1 b d + + >>> df = mi.to_frame(name=['x', 'y']) + >>> df + x y + a c a c + b d b d + """ + from pandas import DataFrame + + if name is not lib.no_default: + if not is_list_like(name): + raise TypeError("'name' must be a list / sequence of column names.") + + if len(name) != len(self.levels): + raise ValueError( + "'name' should have same length as number of levels on index." + ) + idx_names = name + else: + idx_names = self._get_level_names() + + if not allow_duplicates and len(set(idx_names)) != len(idx_names): + raise ValueError( + "Cannot create duplicate column labels if allow_duplicates is False" + ) + + # Guarantee resulting column order - PY36+ dict maintains insertion order + result = DataFrame( + {level: self._get_level_values(level) for level in range(len(self.levels))}, + copy=False, + ) + result.columns = idx_names + + if index: + result.index = self + return result + + # error: Return type "Index" of "to_flat_index" incompatible with return type + # "MultiIndex" in supertype "Index" + def to_flat_index(self) -> Index: # type: ignore[override] + """ + Convert a MultiIndex to an Index of Tuples containing the level values. + + Returns + ------- + pd.Index + Index with the MultiIndex data represented in Tuples. + + See Also + -------- + MultiIndex.from_tuples : Convert flat index back to MultiIndex. + + Notes + ----- + This method will simply return the caller if called by anything other + than a MultiIndex. + + Examples + -------- + >>> index = pd.MultiIndex.from_product( + ... [['foo', 'bar'], ['baz', 'qux']], + ... names=['a', 'b']) + >>> index.to_flat_index() + Index([('foo', 'baz'), ('foo', 'qux'), + ('bar', 'baz'), ('bar', 'qux')], + dtype='object') + """ + return Index(self._values, tupleize_cols=False) + + def _is_lexsorted(self) -> bool: + """ + Return True if the codes are lexicographically sorted. + + Returns + ------- + bool + + Examples + -------- + In the below examples, the first level of the MultiIndex is sorted because + a>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], + ... ['d', 'e', 'f']])._is_lexsorted() + True + >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], + ... ['d', 'f', 'e']])._is_lexsorted() + True + + In case there is a tie, the lexicographical sorting looks + at the next level of the MultiIndex. + + >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']])._is_lexsorted() + True + >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']])._is_lexsorted() + False + >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], + ... ['aa', 'bb', 'aa', 'bb']])._is_lexsorted() + True + >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], + ... ['bb', 'aa', 'aa', 'bb']])._is_lexsorted() + False + """ + return self._lexsort_depth == self.nlevels + + @cache_readonly + def _lexsort_depth(self) -> int: + """ + Compute and return the lexsort_depth, the number of levels of the + MultiIndex that are sorted lexically + + Returns + ------- + int + """ + if self.sortorder is not None: + return self.sortorder + return _lexsort_depth(self.codes, self.nlevels) + + def _sort_levels_monotonic(self, raise_if_incomparable: bool = False) -> MultiIndex: + """ + This is an *internal* function. + + Create a new MultiIndex from the current to monotonically sorted + items IN the levels. This does not actually make the entire MultiIndex + monotonic, JUST the levels. + + The resulting MultiIndex will have the same outward + appearance, meaning the same .values and ordering. It will also + be .equals() to the original. + + Returns + ------- + MultiIndex + + Examples + -------- + >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], + ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) + >>> mi + MultiIndex([('a', 'bb'), + ('a', 'aa'), + ('b', 'bb'), + ('b', 'aa')], + ) + + >>> mi.sort_values() + MultiIndex([('a', 'aa'), + ('a', 'bb'), + ('b', 'aa'), + ('b', 'bb')], + ) + """ + if self._is_lexsorted() and self.is_monotonic_increasing: + return self + + new_levels = [] + new_codes = [] + + for lev, level_codes in zip(self.levels, self.codes): + if not lev.is_monotonic_increasing: + try: + # indexer to reorder the levels + indexer = lev.argsort() + except TypeError: + if raise_if_incomparable: + raise + else: + lev = lev.take(indexer) + + # indexer to reorder the level codes + indexer = ensure_platform_int(indexer) + ri = lib.get_reverse_indexer(indexer, len(indexer)) + level_codes = algos.take_nd(ri, level_codes) + + new_levels.append(lev) + new_codes.append(level_codes) + + return MultiIndex( + new_levels, + new_codes, + names=self.names, + sortorder=self.sortorder, + verify_integrity=False, + ) + + def remove_unused_levels(self) -> MultiIndex: + """ + Create new MultiIndex from current that removes unused levels. + + Unused level(s) means levels that are not expressed in the + labels. The resulting MultiIndex will have the same outward + appearance, meaning the same .values and ordering. It will + also be .equals() to the original. + + Returns + ------- + MultiIndex + + Examples + -------- + >>> mi = pd.MultiIndex.from_product([range(2), list('ab')]) + >>> mi + MultiIndex([(0, 'a'), + (0, 'b'), + (1, 'a'), + (1, 'b')], + ) + + >>> mi[2:] + MultiIndex([(1, 'a'), + (1, 'b')], + ) + + The 0 from the first level is not represented + and can be removed + + >>> mi2 = mi[2:].remove_unused_levels() + >>> mi2.levels + FrozenList([[1], ['a', 'b']]) + """ + new_levels = [] + new_codes = [] + + changed = False + for lev, level_codes in zip(self.levels, self.codes): + # Since few levels are typically unused, bincount() is more + # efficient than unique() - however it only accepts positive values + # (and drops order): + uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 + has_na = int(len(uniques) and (uniques[0] == -1)) + + if len(uniques) != len(lev) + has_na: + if lev.isna().any() and len(uniques) == len(lev): + break + # We have unused levels + changed = True + + # Recalculate uniques, now preserving order. + # Can easily be cythonized by exploiting the already existing + # "uniques" and stop parsing "level_codes" when all items + # are found: + uniques = algos.unique(level_codes) + if has_na: + na_idx = np.where(uniques == -1)[0] + # Just ensure that -1 is in first position: + uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] + + # codes get mapped from uniques to 0:len(uniques) + # -1 (if present) is mapped to last position + code_mapping = np.zeros(len(lev) + has_na) + # ... and reassigned value -1: + code_mapping[uniques] = np.arange(len(uniques)) - has_na + + level_codes = code_mapping[level_codes] + + # new levels are simple + lev = lev.take(uniques[has_na:]) + + new_levels.append(lev) + new_codes.append(level_codes) + + result = self.view() + + if changed: + result._reset_identity() + result._set_levels(new_levels, validate=False) + result._set_codes(new_codes, validate=False) + + return result + + # -------------------------------------------------------------------- + # Pickling Methods + + def __reduce__(self): + """Necessary for making this object picklable""" + d = { + "levels": list(self.levels), + "codes": list(self.codes), + "sortorder": self.sortorder, + "names": list(self.names), + } + return ibase._new_Index, (type(self), d), None + + # -------------------------------------------------------------------- + + def __getitem__(self, key): + if is_scalar(key): + key = com.cast_scalar_indexer(key) + + retval = [] + for lev, level_codes in zip(self.levels, self.codes): + if level_codes[key] == -1: + retval.append(np.nan) + else: + retval.append(lev[level_codes[key]]) + + return tuple(retval) + else: + # in general cannot be sure whether the result will be sorted + sortorder = None + if com.is_bool_indexer(key): + key = np.asarray(key, dtype=bool) + sortorder = self.sortorder + elif isinstance(key, slice): + if key.step is None or key.step > 0: + sortorder = self.sortorder + elif isinstance(key, Index): + key = np.asarray(key) + + new_codes = [level_codes[key] for level_codes in self.codes] + + return MultiIndex( + levels=self.levels, + codes=new_codes, + names=self.names, + sortorder=sortorder, + verify_integrity=False, + ) + + def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex: + """ + Fastpath for __getitem__ when we know we have a slice. + """ + sortorder = None + if slobj.step is None or slobj.step > 0: + sortorder = self.sortorder + + new_codes = [level_codes[slobj] for level_codes in self.codes] + + return type(self)( + levels=self.levels, + codes=new_codes, + names=self._names, + sortorder=sortorder, + verify_integrity=False, + ) + + @Appender(_index_shared_docs["take"] % _index_doc_kwargs) + def take( + self: MultiIndex, + indices, + axis: Axis = 0, + allow_fill: bool = True, + fill_value=None, + **kwargs, + ) -> MultiIndex: + nv.validate_take((), kwargs) + indices = ensure_platform_int(indices) + + # only fill if we are passing a non-None fill_value + allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) + + na_value = -1 + + taken = [lab.take(indices) for lab in self.codes] + if allow_fill: + mask = indices == -1 + if mask.any(): + masked = [] + for new_label in taken: + label_values = new_label + label_values[mask] = na_value + masked.append(np.asarray(label_values)) + taken = masked + + return MultiIndex( + levels=self.levels, codes=taken, names=self.names, verify_integrity=False + ) + + def append(self, other): + """ + Append a collection of Index options together. + + Parameters + ---------- + other : Index or list/tuple of indices + + Returns + ------- + Index + The combined index. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a'], ['b']]) + >>> mi + MultiIndex([('a', 'b')], + ) + >>> mi.append(mi) + MultiIndex([('a', 'b'), ('a', 'b')], + ) + """ + if not isinstance(other, (list, tuple)): + other = [other] + + if all( + (isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other + ): + codes = [] + levels = [] + names = [] + for i in range(self.nlevels): + level_values = self.levels[i] + for mi in other: + level_values = level_values.union(mi.levels[i]) + level_codes = [ + recode_for_categories( + mi.codes[i], mi.levels[i], level_values, copy=False + ) + for mi in ([self, *other]) + ] + level_name = self.names[i] + if any(mi.names[i] != level_name for mi in other): + level_name = None + codes.append(np.concatenate(level_codes)) + levels.append(level_values) + names.append(level_name) + return MultiIndex( + codes=codes, levels=levels, names=names, verify_integrity=False + ) + + to_concat = (self._values,) + tuple(k._values for k in other) + new_tuples = np.concatenate(to_concat) + + # if all(isinstance(x, MultiIndex) for x in other): + try: + # We only get here if other contains at least one index with tuples, + # setting names to None automatically + return MultiIndex.from_tuples(new_tuples) + except (TypeError, IndexError): + return Index(new_tuples) + + def argsort( + self, *args, na_position: str = "last", **kwargs + ) -> npt.NDArray[np.intp]: + if len(args) == 0 and len(kwargs) == 0: + # lexsort is significantly faster than self._values.argsort() + target = self._sort_levels_monotonic(raise_if_incomparable=True) + return lexsort_indexer( + # error: Argument 1 to "lexsort_indexer" has incompatible type + # "List[Categorical]"; expected "Union[List[Union[ExtensionArray, + # ndarray[Any, Any]]], List[Series]]" + target._get_codes_for_sorting(), # type: ignore[arg-type] + na_position=na_position, + ) + return self._values.argsort(*args, **kwargs) + + @Appender(_index_shared_docs["repeat"] % _index_doc_kwargs) + def repeat(self, repeats: int, axis=None) -> MultiIndex: + nv.validate_repeat((), {"axis": axis}) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "int") + repeats = ensure_platform_int(repeats) # type: ignore[assignment] + return MultiIndex( + levels=self.levels, + codes=[ + level_codes.view(np.ndarray).astype(np.intp, copy=False).repeat(repeats) + for level_codes in self.codes + ], + names=self.names, + sortorder=self.sortorder, + verify_integrity=False, + ) + + # error: Signature of "drop" incompatible with supertype "Index" + def drop( # type: ignore[override] + self, + codes, + level: Index | np.ndarray | Iterable[Hashable] | None = None, + errors: IgnoreRaise = "raise", + ) -> MultiIndex: + """ + Make a new :class:`pandas.MultiIndex` with the passed list of codes deleted. + + Parameters + ---------- + codes : array-like + Must be a list of tuples when ``level`` is not specified. + level : int or level name, default None + errors : str, default 'raise' + + Returns + ------- + MultiIndex + + Examples + -------- + >>> idx = pd.MultiIndex.from_product([(0, 1, 2), ('green', 'purple')], + ... names=["number", "color"]) + >>> idx + MultiIndex([(0, 'green'), + (0, 'purple'), + (1, 'green'), + (1, 'purple'), + (2, 'green'), + (2, 'purple')], + names=['number', 'color']) + >>> idx.drop([(1, 'green'), (2, 'purple')]) + MultiIndex([(0, 'green'), + (0, 'purple'), + (1, 'purple'), + (2, 'green')], + names=['number', 'color']) + + We can also drop from a specific level. + + >>> idx.drop('green', level='color') + MultiIndex([(0, 'purple'), + (1, 'purple'), + (2, 'purple')], + names=['number', 'color']) + + >>> idx.drop([1, 2], level=0) + MultiIndex([(0, 'green'), + (0, 'purple')], + names=['number', 'color']) + """ + if level is not None: + return self._drop_from_level(codes, level, errors) + + if not isinstance(codes, (np.ndarray, Index)): + try: + codes = com.index_labels_to_array(codes, dtype=np.dtype("object")) + except ValueError: + pass + + inds = [] + for level_codes in codes: + try: + loc = self.get_loc(level_codes) + # get_loc returns either an integer, a slice, or a boolean + # mask + if isinstance(loc, int): + inds.append(loc) + elif isinstance(loc, slice): + step = loc.step if loc.step is not None else 1 + inds.extend(range(loc.start, loc.stop, step)) + elif com.is_bool_indexer(loc): + if self._lexsort_depth == 0: + warnings.warn( + "dropping on a non-lexsorted multi-index " + "without a level parameter may impact performance.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + loc = loc.nonzero()[0] + inds.extend(loc) + else: + msg = f"unsupported indexer of type {type(loc)}" + raise AssertionError(msg) + except KeyError: + if errors != "ignore": + raise + + return self.delete(inds) + + def _drop_from_level( + self, codes, level, errors: IgnoreRaise = "raise" + ) -> MultiIndex: + codes = com.index_labels_to_array(codes) + i = self._get_level_number(level) + index = self.levels[i] + values = index.get_indexer(codes) + # If nan should be dropped it will equal -1 here. We have to check which values + # are not nan and equal -1, this means they are missing in the index + nan_codes = isna(codes) + values[(np.equal(nan_codes, False)) & (values == -1)] = -2 + if index.shape[0] == self.shape[0]: + values[np.equal(nan_codes, True)] = -2 + + not_found = codes[values == -2] + if len(not_found) != 0 and errors != "ignore": + raise KeyError(f"labels {not_found} not found in level") + mask = ~algos.isin(self.codes[i], values) + + return self[mask] + + def swaplevel(self, i=-2, j=-1) -> MultiIndex: + """ + Swap level i with level j. + + Calling this method does not change the ordering of the values. + + Parameters + ---------- + i : int, str, default -2 + First level of index to be swapped. Can pass level name as string. + Type of parameters can be mixed. + j : int, str, default -1 + Second level of index to be swapped. Can pass level name as string. + Type of parameters can be mixed. + + Returns + ------- + MultiIndex + A new MultiIndex. + + See Also + -------- + Series.swaplevel : Swap levels i and j in a MultiIndex. + DataFrame.swaplevel : Swap levels i and j in a MultiIndex on a + particular axis. + + Examples + -------- + >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], + ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) + >>> mi + MultiIndex([('a', 'bb'), + ('a', 'aa'), + ('b', 'bb'), + ('b', 'aa')], + ) + >>> mi.swaplevel(0, 1) + MultiIndex([('bb', 'a'), + ('aa', 'a'), + ('bb', 'b'), + ('aa', 'b')], + ) + """ + new_levels = list(self.levels) + new_codes = list(self.codes) + new_names = list(self.names) + + i = self._get_level_number(i) + j = self._get_level_number(j) + + new_levels[i], new_levels[j] = new_levels[j], new_levels[i] + new_codes[i], new_codes[j] = new_codes[j], new_codes[i] + new_names[i], new_names[j] = new_names[j], new_names[i] + + return MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + def reorder_levels(self, order) -> MultiIndex: + """ + Rearrange levels using input order. May not drop or duplicate levels. + + Parameters + ---------- + order : list of int or list of str + List representing new level order. Reference level by number + (position) or by key (label). + + Returns + ------- + MultiIndex + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y']) + >>> mi + MultiIndex([(1, 3), + (2, 4)], + names=['x', 'y']) + + >>> mi.reorder_levels(order=[1, 0]) + MultiIndex([(3, 1), + (4, 2)], + names=['y', 'x']) + + >>> mi.reorder_levels(order=['y', 'x']) + MultiIndex([(3, 1), + (4, 2)], + names=['y', 'x']) + """ + order = [self._get_level_number(i) for i in order] + result = self._reorder_ilevels(order) + return result + + def _reorder_ilevels(self, order) -> MultiIndex: + if len(order) != self.nlevels: + raise AssertionError( + f"Length of order must be same as number of levels ({self.nlevels}), " + f"got {len(order)}" + ) + new_levels = [self.levels[i] for i in order] + new_codes = [self.codes[i] for i in order] + new_names = [self.names[i] for i in order] + + return MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + def _recode_for_new_levels( + self, new_levels, copy: bool = True + ) -> Generator[np.ndarray, None, None]: + if len(new_levels) > self.nlevels: + raise AssertionError( + f"Length of new_levels ({len(new_levels)}) " + f"must be <= self.nlevels ({self.nlevels})" + ) + for i in range(len(new_levels)): + yield recode_for_categories( + self.codes[i], self.levels[i], new_levels[i], copy=copy + ) + + def _get_codes_for_sorting(self) -> list[Categorical]: + """ + we are categorizing our codes by using the + available categories (all, not just observed) + excluding any missing ones (-1); this is in preparation + for sorting, where we need to disambiguate that -1 is not + a valid valid + """ + + def cats(level_codes): + return np.arange( + np.array(level_codes).max() + 1 if len(level_codes) else 0, + dtype=level_codes.dtype, + ) + + return [ + Categorical.from_codes(level_codes, cats(level_codes), True, validate=False) + for level_codes in self.codes + ] + + def sortlevel( + self, + level: IndexLabel = 0, + ascending: bool | list[bool] = True, + sort_remaining: bool = True, + na_position: str = "first", + ) -> tuple[MultiIndex, npt.NDArray[np.intp]]: + """ + Sort MultiIndex at the requested level. + + The result will respect the original ordering of the associated + factor at that level. + + Parameters + ---------- + level : list-like, int or str, default 0 + If a string is given, must be a name of the level. + If list-like must be names or ints of levels. + ascending : bool, default True + False to sort in descending order. + Can also be a list to specify a directed ordering. + sort_remaining : sort by the remaining levels after level + na_position : {'first' or 'last'}, default 'first' + Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at + the end. + + .. versionadded:: 2.1.0 + + Returns + ------- + sorted_index : pd.MultiIndex + Resulting index. + indexer : np.ndarray[np.intp] + Indices of output values in original index. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]]) + >>> mi + MultiIndex([(0, 2), + (0, 1)], + ) + + >>> mi.sortlevel() + (MultiIndex([(0, 1), + (0, 2)], + ), array([1, 0])) + + >>> mi.sortlevel(sort_remaining=False) + (MultiIndex([(0, 2), + (0, 1)], + ), array([0, 1])) + + >>> mi.sortlevel(1) + (MultiIndex([(0, 1), + (0, 2)], + ), array([1, 0])) + + >>> mi.sortlevel(1, ascending=False) + (MultiIndex([(0, 2), + (0, 1)], + ), array([0, 1])) + """ + if not is_list_like(level): + level = [level] + # error: Item "Hashable" of "Union[Hashable, Sequence[Hashable]]" has + # no attribute "__iter__" (not iterable) + level = [ + self._get_level_number(lev) for lev in level # type: ignore[union-attr] + ] + sortorder = None + + codes = [self.codes[lev] for lev in level] + # we have a directed ordering via ascending + if isinstance(ascending, list): + if not len(level) == len(ascending): + raise ValueError("level must have same length as ascending") + elif sort_remaining: + codes.extend( + [self.codes[lev] for lev in range(len(self.levels)) if lev not in level] + ) + else: + sortorder = level[0] + + indexer = lexsort_indexer( + codes, orders=ascending, na_position=na_position, codes_given=True + ) + + indexer = ensure_platform_int(indexer) + new_codes = [level_codes.take(indexer) for level_codes in self.codes] + + new_index = MultiIndex( + codes=new_codes, + levels=self.levels, + names=self.names, + sortorder=sortorder, + verify_integrity=False, + ) + + return new_index, indexer + + def _wrap_reindex_result(self, target, indexer, preserve_names: bool): + if not isinstance(target, MultiIndex): + if indexer is None: + target = self + elif (indexer >= 0).all(): + target = self.take(indexer) + else: + try: + target = MultiIndex.from_tuples(target) + except TypeError: + # not all tuples, see test_constructor_dict_multiindex_reindex_flat + return target + + target = self._maybe_preserve_names(target, preserve_names) + return target + + def _maybe_preserve_names(self, target: Index, preserve_names: bool) -> Index: + if ( + preserve_names + and target.nlevels == self.nlevels + and target.names != self.names + ): + target = target.copy(deep=False) + target.names = self.names + return target + + # -------------------------------------------------------------------- + # Indexing Methods + + def _check_indexing_error(self, key) -> None: + if not is_hashable(key) or is_iterator(key): + # We allow tuples if they are hashable, whereas other Index + # subclasses require scalar. + # We have to explicitly exclude generators, as these are hashable. + raise InvalidIndexError(key) + + @cache_readonly + def _should_fallback_to_positional(self) -> bool: + """ + Should integer key(s) be treated as positional? + """ + # GH#33355 + return self.levels[0]._should_fallback_to_positional + + def _get_indexer_strict( + self, key, axis_name: str + ) -> tuple[Index, npt.NDArray[np.intp]]: + keyarr = key + if not isinstance(keyarr, Index): + keyarr = com.asarray_tuplesafe(keyarr) + + if len(keyarr) and not isinstance(keyarr[0], tuple): + indexer = self._get_indexer_level_0(keyarr) + + self._raise_if_missing(key, indexer, axis_name) + return self[indexer], indexer + + return super()._get_indexer_strict(key, axis_name) + + def _raise_if_missing(self, key, indexer, axis_name: str) -> None: + keyarr = key + if not isinstance(key, Index): + keyarr = com.asarray_tuplesafe(key) + + if len(keyarr) and not isinstance(keyarr[0], tuple): + # i.e. same condition for special case in MultiIndex._get_indexer_strict + + mask = indexer == -1 + if mask.any(): + check = self.levels[0].get_indexer(keyarr) + cmask = check == -1 + if cmask.any(): + raise KeyError(f"{keyarr[cmask]} not in index") + # We get here when levels still contain values which are not + # actually in Index anymore + raise KeyError(f"{keyarr} not in index") + else: + return super()._raise_if_missing(key, indexer, axis_name) + + def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]: + """ + Optimized equivalent to `self.get_level_values(0).get_indexer_for(target)`. + """ + lev = self.levels[0] + codes = self._codes[0] + cat = Categorical.from_codes(codes=codes, categories=lev, validate=False) + ci = Index(cat) + return ci.get_indexer_for(target) + + def get_slice_bound( + self, + label: Hashable | Sequence[Hashable], + side: Literal["left", "right"], + ) -> int: + """ + For an ordered MultiIndex, compute slice bound + that corresponds to given label. + + Returns leftmost (one-past-the-rightmost if `side=='right') position + of given label. + + Parameters + ---------- + label : object or tuple of objects + side : {'left', 'right'} + + Returns + ------- + int + Index of label. + + Notes + ----- + This method only works if level 0 index of the MultiIndex is lexsorted. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')]) + + Get the locations from the leftmost 'b' in the first level + until the end of the multiindex: + + >>> mi.get_slice_bound('b', side="left") + 1 + + Like above, but if you get the locations from the rightmost + 'b' in the first level and 'f' in the second level: + + >>> mi.get_slice_bound(('b','f'), side="right") + 3 + + See Also + -------- + MultiIndex.get_loc : Get location for a label or a tuple of labels. + MultiIndex.get_locs : Get location for a label/slice/list/mask or a + sequence of such. + """ + if not isinstance(label, tuple): + label = (label,) + return self._partial_tup_index(label, side=side) + + # pylint: disable-next=useless-parent-delegation + def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: + """ + For an ordered MultiIndex, compute the slice locations for input + labels. + + The input labels can be tuples representing partial levels, e.g. for a + MultiIndex with 3 levels, you can pass a single value (corresponding to + the first level), or a 1-, 2-, or 3-tuple. + + Parameters + ---------- + start : label or tuple, default None + If None, defaults to the beginning + end : label or tuple + If None, defaults to the end + step : int or None + Slice step + + Returns + ------- + (start, end) : (int, int) + + Notes + ----- + This method only works if the MultiIndex is properly lexsorted. So, + if only the first 2 levels of a 3-level MultiIndex are lexsorted, + you can only pass two levels to ``.slice_locs``. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], + ... names=['A', 'B']) + + Get the slice locations from the beginning of 'b' in the first level + until the end of the multiindex: + + >>> mi.slice_locs(start='b') + (1, 4) + + Like above, but stop at the end of 'b' in the first level and 'f' in + the second level: + + >>> mi.slice_locs(start='b', end=('b', 'f')) + (1, 3) + + See Also + -------- + MultiIndex.get_loc : Get location for a label or a tuple of labels. + MultiIndex.get_locs : Get location for a label/slice/list/mask or a + sequence of such. + """ + # This function adds nothing to its parent implementation (the magic + # happens in get_slice_bound method), but it adds meaningful doc. + return super().slice_locs(start, end, step) + + def _partial_tup_index(self, tup: tuple, side: Literal["left", "right"] = "left"): + if len(tup) > self._lexsort_depth: + raise UnsortedIndexError( + f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth " + f"({self._lexsort_depth})" + ) + + n = len(tup) + start, end = 0, len(self) + zipped = zip(tup, self.levels, self.codes) + for k, (lab, lev, level_codes) in enumerate(zipped): + section = level_codes[start:end] + + loc: npt.NDArray[np.intp] | np.intp | int + if lab not in lev and not isna(lab): + # short circuit + try: + loc = algos.searchsorted(lev, lab, side=side) + except TypeError as err: + # non-comparable e.g. test_slice_locs_with_type_mismatch + raise TypeError(f"Level type mismatch: {lab}") from err + if not is_integer(loc): + # non-comparable level, e.g. test_groupby_example + raise TypeError(f"Level type mismatch: {lab}") + if side == "right" and loc >= 0: + loc -= 1 + return start + algos.searchsorted(section, loc, side=side) + + idx = self._get_loc_single_level_index(lev, lab) + if isinstance(idx, slice) and k < n - 1: + # Get start and end value from slice, necessary when a non-integer + # interval is given as input GH#37707 + start = idx.start + end = idx.stop + elif k < n - 1: + # error: Incompatible types in assignment (expression has type + # "Union[ndarray[Any, dtype[signedinteger[Any]]] + end = start + algos.searchsorted( # type: ignore[assignment] + section, idx, side="right" + ) + # error: Incompatible types in assignment (expression has type + # "Union[ndarray[Any, dtype[signedinteger[Any]]] + start = start + algos.searchsorted( # type: ignore[assignment] + section, idx, side="left" + ) + elif isinstance(idx, slice): + idx = idx.start + return start + algos.searchsorted(section, idx, side=side) + else: + return start + algos.searchsorted(section, idx, side=side) + + def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int: + """ + If key is NA value, location of index unify as -1. + + Parameters + ---------- + level_index: Index + key : label + + Returns + ------- + loc : int + If key is NA value, loc is -1 + Else, location of key in index. + + See Also + -------- + Index.get_loc : The get_loc method for (single-level) index. + """ + if is_scalar(key) and isna(key): + # TODO: need is_valid_na_for_dtype(key, level_index.dtype) + return -1 + else: + return level_index.get_loc(key) + + def get_loc(self, key): + """ + Get location for a label or a tuple of labels. + + The location is returned as an integer/slice or boolean + mask. + + Parameters + ---------- + key : label or tuple of labels (one for each level) + + Returns + ------- + int, slice object or boolean mask + If the key is past the lexsort depth, the return may be a + boolean mask array, otherwise it is always a slice or int. + + See Also + -------- + Index.get_loc : The get_loc method for (single-level) index. + MultiIndex.slice_locs : Get slice location given start label(s) and + end label(s). + MultiIndex.get_locs : Get location for a label/slice/list/mask or a + sequence of such. + + Notes + ----- + The key cannot be a slice, list of same-level labels, a boolean mask, + or a sequence of such. If you want to use those, use + :meth:`MultiIndex.get_locs` instead. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) + + >>> mi.get_loc('b') + slice(1, 3, None) + + >>> mi.get_loc(('b', 'e')) + 1 + """ + self._check_indexing_error(key) + + def _maybe_to_slice(loc): + """convert integer indexer to boolean mask or slice if possible""" + if not isinstance(loc, np.ndarray) or loc.dtype != np.intp: + return loc + + loc = lib.maybe_indices_to_slice(loc, len(self)) + if isinstance(loc, slice): + return loc + + mask = np.empty(len(self), dtype="bool") + mask.fill(False) + mask[loc] = True + return mask + + if not isinstance(key, tuple): + loc = self._get_level_indexer(key, level=0) + return _maybe_to_slice(loc) + + keylen = len(key) + if self.nlevels < keylen: + raise KeyError( + f"Key length ({keylen}) exceeds index depth ({self.nlevels})" + ) + + if keylen == self.nlevels and self.is_unique: + # TODO: what if we have an IntervalIndex level? + # i.e. do we need _index_as_unique on that level? + try: + return self._engine.get_loc(key) + except KeyError as err: + raise KeyError(key) from err + except TypeError: + # e.g. test_partial_slicing_with_multiindex partial string slicing + loc, _ = self.get_loc_level(key, list(range(self.nlevels))) + return loc + + # -- partial selection or non-unique index + # break the key into 2 parts based on the lexsort_depth of the index; + # the first part returns a continuous slice of the index; the 2nd part + # needs linear search within the slice + i = self._lexsort_depth + lead_key, follow_key = key[:i], key[i:] + + if not lead_key: + start = 0 + stop = len(self) + else: + try: + start, stop = self.slice_locs(lead_key, lead_key) + except TypeError as err: + # e.g. test_groupby_example key = ((0, 0, 1, 2), "new_col") + # when self has 5 integer levels + raise KeyError(key) from err + + if start == stop: + raise KeyError(key) + + if not follow_key: + return slice(start, stop) + + warnings.warn( + "indexing past lexsort depth may impact performance.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + loc = np.arange(start, stop, dtype=np.intp) + + for i, k in enumerate(follow_key, len(lead_key)): + mask = self.codes[i][loc] == self._get_loc_single_level_index( + self.levels[i], k + ) + if not mask.all(): + loc = loc[mask] + if not len(loc): + raise KeyError(key) + + return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop) + + def get_loc_level(self, key, level: IndexLabel = 0, drop_level: bool = True): + """ + Get location and sliced index for requested label(s)/level(s). + + Parameters + ---------- + key : label or sequence of labels + level : int/level name or list thereof, optional + drop_level : bool, default True + If ``False``, the resulting index will not drop any level. + + Returns + ------- + tuple + A 2-tuple where the elements : + + Element 0: int, slice object or boolean array. + + Element 1: The resulting sliced multiindex/index. If the key + contains all levels, this will be ``None``. + + See Also + -------- + MultiIndex.get_loc : Get location for a label or a tuple of labels. + MultiIndex.get_locs : Get location for a label/slice/list/mask or a + sequence of such. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')], + ... names=['A', 'B']) + + >>> mi.get_loc_level('b') + (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B')) + + >>> mi.get_loc_level('e', level='B') + (array([False, True, False]), Index(['b'], dtype='object', name='A')) + + >>> mi.get_loc_level(['b', 'e']) + (1, None) + """ + if not isinstance(level, (list, tuple)): + level = self._get_level_number(level) + else: + level = [self._get_level_number(lev) for lev in level] + + loc, mi = self._get_loc_level(key, level=level) + if not drop_level: + if lib.is_integer(loc): + # Slice index must be an integer or None + mi = self[loc : loc + 1] + else: + mi = self[loc] + return loc, mi + + def _get_loc_level(self, key, level: int | list[int] = 0): + """ + get_loc_level but with `level` known to be positional, not name-based. + """ + + # different name to distinguish from maybe_droplevels + def maybe_mi_droplevels(indexer, levels): + """ + If level does not exist or all levels were dropped, the exception + has to be handled outside. + """ + new_index = self[indexer] + + for i in sorted(levels, reverse=True): + new_index = new_index._drop_level_numbers([i]) + + return new_index + + if isinstance(level, (tuple, list)): + if len(key) != len(level): + raise AssertionError( + "Key for location must have same length as number of levels" + ) + result = None + for lev, k in zip(level, key): + loc, new_index = self._get_loc_level(k, level=lev) + if isinstance(loc, slice): + mask = np.zeros(len(self), dtype=bool) + mask[loc] = True + loc = mask + result = loc if result is None else result & loc + + try: + # FIXME: we should be only dropping levels on which we are + # scalar-indexing + mi = maybe_mi_droplevels(result, level) + except ValueError: + # droplevel failed because we tried to drop all levels, + # i.e. len(level) == self.nlevels + mi = self[result] + + return result, mi + + # kludge for #1796 + if isinstance(key, list): + key = tuple(key) + + if isinstance(key, tuple) and level == 0: + try: + # Check if this tuple is a single key in our first level + if key in self.levels[0]: + indexer = self._get_level_indexer(key, level=level) + new_index = maybe_mi_droplevels(indexer, [0]) + return indexer, new_index + except (TypeError, InvalidIndexError): + pass + + if not any(isinstance(k, slice) for k in key): + if len(key) == self.nlevels and self.is_unique: + # Complete key in unique index -> standard get_loc + try: + return (self._engine.get_loc(key), None) + except KeyError as err: + raise KeyError(key) from err + except TypeError: + # e.g. partial string indexing + # test_partial_string_timestamp_multiindex + pass + + # partial selection + indexer = self.get_loc(key) + ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] + if len(ilevels) == self.nlevels: + if is_integer(indexer): + # we are dropping all levels + return indexer, None + + # TODO: in some cases we still need to drop some levels, + # e.g. test_multiindex_perf_warn + # test_partial_string_timestamp_multiindex + ilevels = [ + i + for i in range(len(key)) + if ( + not isinstance(key[i], str) + or not self.levels[i]._supports_partial_string_indexing + ) + and key[i] != slice(None, None) + ] + if len(ilevels) == self.nlevels: + # TODO: why? + ilevels = [] + return indexer, maybe_mi_droplevels(indexer, ilevels) + + else: + indexer = None + for i, k in enumerate(key): + if not isinstance(k, slice): + loc_level = self._get_level_indexer(k, level=i) + if isinstance(loc_level, slice): + if com.is_null_slice(loc_level) or com.is_full_slice( + loc_level, len(self) + ): + # everything + continue + + # e.g. test_xs_IndexSlice_argument_not_implemented + k_index = np.zeros(len(self), dtype=bool) + k_index[loc_level] = True + + else: + k_index = loc_level + + elif com.is_null_slice(k): + # taking everything, does not affect `indexer` below + continue + + else: + # FIXME: this message can be inaccurate, e.g. + # test_series_varied_multiindex_alignment + raise TypeError(f"Expected label or tuple of labels, got {key}") + + if indexer is None: + indexer = k_index + else: + indexer &= k_index + if indexer is None: + indexer = slice(None, None) + ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] + return indexer, maybe_mi_droplevels(indexer, ilevels) + else: + indexer = self._get_level_indexer(key, level=level) + if ( + isinstance(key, str) + and self.levels[level]._supports_partial_string_indexing + ): + # check to see if we did an exact lookup vs sliced + check = self.levels[level].get_loc(key) + if not is_integer(check): + # e.g. test_partial_string_timestamp_multiindex + return indexer, self[indexer] + + try: + result_index = maybe_mi_droplevels(indexer, [level]) + except ValueError: + result_index = self[indexer] + + return indexer, result_index + + def _get_level_indexer( + self, key, level: int = 0, indexer: npt.NDArray[np.bool_] | None = None + ): + # `level` kwarg is _always_ positional, never name + # return a boolean array or slice showing where the key is + # in the totality of values + # if the indexer is provided, then use this + + level_index = self.levels[level] + level_codes = self.codes[level] + + def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes): + # Compute a bool indexer to identify the positions to take. + # If we have an existing indexer, we only need to examine the + # subset of positions where the existing indexer is True. + if indexer is not None: + # we only need to look at the subset of codes where the + # existing indexer equals True + codes = codes[indexer] + + if step is None or step == 1: + new_indexer = (codes >= start) & (codes < stop) + else: + r = np.arange(start, stop, step, dtype=codes.dtype) + new_indexer = algos.isin(codes, r) + + if indexer is None: + return new_indexer + + indexer = indexer.copy() + indexer[indexer] = new_indexer + return indexer + + if isinstance(key, slice): + # handle a slice, returning a slice if we can + # otherwise a boolean indexer + step = key.step + is_negative_step = step is not None and step < 0 + + try: + if key.start is not None: + start = level_index.get_loc(key.start) + elif is_negative_step: + start = len(level_index) - 1 + else: + start = 0 + + if key.stop is not None: + stop = level_index.get_loc(key.stop) + elif is_negative_step: + stop = 0 + elif isinstance(start, slice): + stop = len(level_index) + else: + stop = len(level_index) - 1 + except KeyError: + # we have a partial slice (like looking up a partial date + # string) + start = stop = level_index.slice_indexer(key.start, key.stop, key.step) + step = start.step + + if isinstance(start, slice) or isinstance(stop, slice): + # we have a slice for start and/or stop + # a partial date slicer on a DatetimeIndex generates a slice + # note that the stop ALREADY includes the stopped point (if + # it was a string sliced) + start = getattr(start, "start", start) + stop = getattr(stop, "stop", stop) + return convert_indexer(start, stop, step) + + elif level > 0 or self._lexsort_depth == 0 or step is not None: + # need to have like semantics here to right + # searching as when we are using a slice + # so adjust the stop by 1 (so we include stop) + stop = (stop - 1) if is_negative_step else (stop + 1) + return convert_indexer(start, stop, step) + else: + # sorted, so can return slice object -> view + i = algos.searchsorted(level_codes, start, side="left") + j = algos.searchsorted(level_codes, stop, side="right") + return slice(i, j, step) + + else: + idx = self._get_loc_single_level_index(level_index, key) + + if level > 0 or self._lexsort_depth == 0: + # Desired level is not sorted + if isinstance(idx, slice): + # test_get_loc_partial_timestamp_multiindex + locs = (level_codes >= idx.start) & (level_codes < idx.stop) + return locs + + locs = np.array(level_codes == idx, dtype=bool, copy=False) + + if not locs.any(): + # The label is present in self.levels[level] but unused: + raise KeyError(key) + return locs + + if isinstance(idx, slice): + # e.g. test_partial_string_timestamp_multiindex + start = algos.searchsorted(level_codes, idx.start, side="left") + # NB: "left" here bc of slice semantics + end = algos.searchsorted(level_codes, idx.stop, side="left") + else: + start = algos.searchsorted(level_codes, idx, side="left") + end = algos.searchsorted(level_codes, idx, side="right") + + if start == end: + # The label is present in self.levels[level] but unused: + raise KeyError(key) + return slice(start, end) + + def get_locs(self, seq): + """ + Get location for a sequence of labels. + + Parameters + ---------- + seq : label, slice, list, mask or a sequence of such + You should use one of the above for each level. + If a level should not be used, set it to ``slice(None)``. + + Returns + ------- + numpy.ndarray + NumPy array of integers suitable for passing to iloc. + + See Also + -------- + MultiIndex.get_loc : Get location for a label or a tuple of labels. + MultiIndex.slice_locs : Get slice location given start label(s) and + end label(s). + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) + + >>> mi.get_locs('b') # doctest: +SKIP + array([1, 2], dtype=int64) + + >>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP + array([1, 2], dtype=int64) + + >>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP + array([2], dtype=int64) + """ + + # must be lexsorted to at least as many levels + true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] + if true_slices and true_slices[-1] >= self._lexsort_depth: + raise UnsortedIndexError( + "MultiIndex slicing requires the index to be lexsorted: slicing " + f"on levels {true_slices}, lexsort depth {self._lexsort_depth}" + ) + + if any(x is Ellipsis for x in seq): + raise NotImplementedError( + "MultiIndex does not support indexing with Ellipsis" + ) + + n = len(self) + + def _to_bool_indexer(indexer) -> npt.NDArray[np.bool_]: + if isinstance(indexer, slice): + new_indexer = np.zeros(n, dtype=np.bool_) + new_indexer[indexer] = True + return new_indexer + return indexer + + # a bool indexer for the positions we want to take + indexer: npt.NDArray[np.bool_] | None = None + + for i, k in enumerate(seq): + lvl_indexer: npt.NDArray[np.bool_] | slice | None = None + + if com.is_bool_indexer(k): + if len(k) != n: + raise ValueError( + "cannot index with a boolean indexer that " + "is not the same length as the index" + ) + lvl_indexer = np.asarray(k) + + elif is_list_like(k): + # a collection of labels to include from this level (these are or'd) + + # GH#27591 check if this is a single tuple key in the level + try: + lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer) + except (InvalidIndexError, TypeError, KeyError) as err: + # InvalidIndexError e.g. non-hashable, fall back to treating + # this as a sequence of labels + # KeyError it can be ambiguous if this is a label or sequence + # of labels + # github.com/pandas-dev/pandas/issues/39424#issuecomment-871626708 + for x in k: + if not is_hashable(x): + # e.g. slice + raise err + # GH 39424: Ignore not founds + # GH 42351: No longer ignore not founds & enforced in 2.0 + # TODO: how to handle IntervalIndex level? (no test cases) + item_indexer = self._get_level_indexer( + x, level=i, indexer=indexer + ) + if lvl_indexer is None: + lvl_indexer = _to_bool_indexer(item_indexer) + elif isinstance(item_indexer, slice): + lvl_indexer[item_indexer] = True # type: ignore[index] + else: + lvl_indexer |= item_indexer + + if lvl_indexer is None: + # no matches we are done + # test_loc_getitem_duplicates_multiindex_empty_indexer + return np.array([], dtype=np.intp) + + elif com.is_null_slice(k): + # empty slice + if indexer is None and i == len(seq) - 1: + return np.arange(n, dtype=np.intp) + continue + + else: + # a slice or a single label + lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer) + + # update indexer + lvl_indexer = _to_bool_indexer(lvl_indexer) + if indexer is None: + indexer = lvl_indexer + else: + indexer &= lvl_indexer + if not np.any(indexer) and np.any(lvl_indexer): + raise KeyError(seq) + + # empty indexer + if indexer is None: + return np.array([], dtype=np.intp) + + pos_indexer = indexer.nonzero()[0] + return self._reorder_indexer(seq, pos_indexer) + + # -------------------------------------------------------------------- + + def _reorder_indexer( + self, + seq: tuple[Scalar | Iterable | AnyArrayLike, ...], + indexer: npt.NDArray[np.intp], + ) -> npt.NDArray[np.intp]: + """ + Reorder an indexer of a MultiIndex (self) so that the labels are in the + same order as given in seq + + Parameters + ---------- + seq : label/slice/list/mask or a sequence of such + indexer: a position indexer of self + + Returns + ------- + indexer : a sorted position indexer of self ordered as seq + """ + + # check if sorting is necessary + need_sort = False + for i, k in enumerate(seq): + if com.is_null_slice(k) or com.is_bool_indexer(k) or is_scalar(k): + pass + elif is_list_like(k): + if len(k) <= 1: # type: ignore[arg-type] + pass + elif self._is_lexsorted(): + # If the index is lexsorted and the list_like label + # in seq are sorted then we do not need to sort + k_codes = self.levels[i].get_indexer(k) + k_codes = k_codes[k_codes >= 0] # Filter absent keys + # True if the given codes are not ordered + need_sort = (k_codes[:-1] > k_codes[1:]).any() + else: + need_sort = True + elif isinstance(k, slice): + if self._is_lexsorted(): + need_sort = k.step is not None and k.step < 0 + else: + need_sort = True + else: + need_sort = True + if need_sort: + break + if not need_sort: + return indexer + + n = len(self) + keys: tuple[np.ndarray, ...] = () + # For each level of the sequence in seq, map the level codes with the + # order they appears in a list-like sequence + # This mapping is then use to reorder the indexer + for i, k in enumerate(seq): + if is_scalar(k): + # GH#34603 we want to treat a scalar the same as an all equal list + k = [k] + if com.is_bool_indexer(k): + new_order = np.arange(n)[indexer] + elif is_list_like(k): + # Generate a map with all level codes as sorted initially + if not isinstance(k, (np.ndarray, ExtensionArray, Index, ABCSeries)): + k = sanitize_array(k, None) + k = algos.unique(k) + key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len( + self.levels[i] + ) + # Set order as given in the indexer list + level_indexer = self.levels[i].get_indexer(k) + level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys + key_order_map[level_indexer] = np.arange(len(level_indexer)) + + new_order = key_order_map[self.codes[i][indexer]] + elif isinstance(k, slice) and k.step is not None and k.step < 0: + # flip order for negative step + new_order = np.arange(n)[::-1][indexer] + elif isinstance(k, slice) and k.start is None and k.stop is None: + # slice(None) should not determine order GH#31330 + new_order = np.ones((n,), dtype=np.intp)[indexer] + else: + # For all other case, use the same order as the level + new_order = np.arange(n)[indexer] + keys = (new_order,) + keys + + # Find the reordering using lexsort on the keys mapping + ind = np.lexsort(keys) + return indexer[ind] + + def truncate(self, before=None, after=None) -> MultiIndex: + """ + Slice index between two labels / tuples, return new MultiIndex. + + Parameters + ---------- + before : label or tuple, can be partial. Default None + None defaults to start. + after : label or tuple, can be partial. Default None + None defaults to end. + + Returns + ------- + MultiIndex + The truncated MultiIndex. + + Examples + -------- + >>> mi = pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['x', 'y', 'z']]) + >>> mi + MultiIndex([('a', 'x'), ('b', 'y'), ('c', 'z')], + ) + >>> mi.truncate(before='a', after='b') + MultiIndex([('a', 'x'), ('b', 'y')], + ) + """ + if after and before and after < before: + raise ValueError("after < before") + + i, j = self.levels[0].slice_locs(before, after) + left, right = self.slice_locs(before, after) + + new_levels = list(self.levels) + new_levels[0] = new_levels[0][i:j] + + new_codes = [level_codes[left:right] for level_codes in self.codes] + new_codes[0] = new_codes[0] - i + + return MultiIndex( + levels=new_levels, + codes=new_codes, + names=self._names, + verify_integrity=False, + ) + + def equals(self, other: object) -> bool: + """ + Determines if two MultiIndex objects have the same labeling information + (the levels themselves do not necessarily have to be the same) + + See Also + -------- + equal_levels + """ + if self.is_(other): + return True + + if not isinstance(other, Index): + return False + + if len(self) != len(other): + return False + + if not isinstance(other, MultiIndex): + # d-level MultiIndex can equal d-tuple Index + if not self._should_compare(other): + # object Index or Categorical[object] may contain tuples + return False + return array_equivalent(self._values, other._values) + + if self.nlevels != other.nlevels: + return False + + for i in range(self.nlevels): + self_codes = self.codes[i] + other_codes = other.codes[i] + self_mask = self_codes == -1 + other_mask = other_codes == -1 + if not np.array_equal(self_mask, other_mask): + return False + self_codes = self_codes[~self_mask] + self_values = self.levels[i]._values.take(self_codes) + + other_codes = other_codes[~other_mask] + other_values = other.levels[i]._values.take(other_codes) + + # since we use NaT both datetime64 and timedelta64 we can have a + # situation where a level is typed say timedelta64 in self (IOW it + # has other values than NaT) but types datetime64 in other (where + # its all NaT) but these are equivalent + if len(self_values) == 0 and len(other_values) == 0: + continue + + if not isinstance(self_values, np.ndarray): + # i.e. ExtensionArray + if not self_values.equals(other_values): + return False + elif not isinstance(other_values, np.ndarray): + # i.e. other is ExtensionArray + if not other_values.equals(self_values): + return False + else: + if not array_equivalent(self_values, other_values): + return False + + return True + + def equal_levels(self, other: MultiIndex) -> bool: + """ + Return True if the levels of both MultiIndex objects are the same + + """ + if self.nlevels != other.nlevels: + return False + + for i in range(self.nlevels): + if not self.levels[i].equals(other.levels[i]): + return False + return True + + # -------------------------------------------------------------------- + # Set Methods + + def _union(self, other, sort) -> MultiIndex: + other, result_names = self._convert_can_do_setop(other) + if other.has_duplicates: + # This is only necessary if other has dupes, + # otherwise difference is faster + result = super()._union(other, sort) + + if isinstance(result, MultiIndex): + return result + return MultiIndex.from_arrays( + zip(*result), sortorder=None, names=result_names + ) + + else: + right_missing = other.difference(self, sort=False) + if len(right_missing): + result = self.append(right_missing) + else: + result = self._get_reconciled_name_object(other) + + if sort is not False: + try: + result = result.sort_values() + except TypeError: + if sort is True: + raise + warnings.warn( + "The values in the array are unorderable. " + "Pass `sort=False` to suppress this warning.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + return result + + def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: + return is_object_dtype(dtype) + + def _get_reconciled_name_object(self, other) -> MultiIndex: + """ + If the result of a set operation will be self, + return self, unless the names change, in which + case make a shallow copy of self. + """ + names = self._maybe_match_names(other) + if self.names != names: + # error: Cannot determine type of "rename" + return self.rename(names) # type: ignore[has-type] + return self + + def _maybe_match_names(self, other): + """ + Try to find common names to attach to the result of an operation between + a and b. Return a consensus list of names if they match at least partly + or list of None if they have completely different names. + """ + if len(self.names) != len(other.names): + return [None] * len(self.names) + names = [] + for a_name, b_name in zip(self.names, other.names): + if a_name == b_name: + names.append(a_name) + else: + # TODO: what if they both have np.nan for their names? + names.append(None) + return names + + def _wrap_intersection_result(self, other, result) -> MultiIndex: + _, result_names = self._convert_can_do_setop(other) + return result.set_names(result_names) + + def _wrap_difference_result(self, other, result: MultiIndex) -> MultiIndex: + _, result_names = self._convert_can_do_setop(other) + + if len(result) == 0: + return result.remove_unused_levels().set_names(result_names) + else: + return result.set_names(result_names) + + def _convert_can_do_setop(self, other): + result_names = self.names + + if not isinstance(other, Index): + if len(other) == 0: + return self[:0], self.names + else: + msg = "other must be a MultiIndex or a list of tuples" + try: + other = MultiIndex.from_tuples(other, names=self.names) + except (ValueError, TypeError) as err: + # ValueError raised by tuples_to_object_array if we + # have non-object dtype + raise TypeError(msg) from err + else: + result_names = get_unanimous_names(self, other) + + return other, result_names + + # -------------------------------------------------------------------- + + @doc(Index.astype) + def astype(self, dtype, copy: bool = True): + dtype = pandas_dtype(dtype) + if isinstance(dtype, CategoricalDtype): + msg = "> 1 ndim Categorical are not supported at this time" + raise NotImplementedError(msg) + if not is_object_dtype(dtype): + raise TypeError( + "Setting a MultiIndex dtype to anything other than object " + "is not supported" + ) + if copy is True: + return self._view() + return self + + def _validate_fill_value(self, item): + if isinstance(item, MultiIndex): + # GH#43212 + if item.nlevels != self.nlevels: + raise ValueError("Item must have length equal to number of levels.") + return item._values + elif not isinstance(item, tuple): + # Pad the key with empty strings if lower levels of the key + # aren't specified: + item = (item,) + ("",) * (self.nlevels - 1) + elif len(item) != self.nlevels: + raise ValueError("Item must have length equal to number of levels.") + return item + + def putmask(self, mask, value: MultiIndex) -> MultiIndex: + """ + Return a new MultiIndex of the values set with the mask. + + Parameters + ---------- + mask : array like + value : MultiIndex + Must either be the same length as self or length one + + Returns + ------- + MultiIndex + """ + mask, noop = validate_putmask(self, mask) + if noop: + return self.copy() + + if len(mask) == len(value): + subset = value[mask].remove_unused_levels() + else: + subset = value.remove_unused_levels() + + new_levels = [] + new_codes = [] + + for i, (value_level, level, level_codes) in enumerate( + zip(subset.levels, self.levels, self.codes) + ): + new_level = level.union(value_level, sort=False) + value_codes = new_level.get_indexer_for(subset.get_level_values(i)) + new_code = ensure_int64(level_codes) + new_code[mask] = value_codes + new_levels.append(new_level) + new_codes.append(new_code) + + return MultiIndex( + levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False + ) + + def insert(self, loc: int, item) -> MultiIndex: + """ + Make new MultiIndex inserting new item at location + + Parameters + ---------- + loc : int + item : tuple + Must be same length as number of levels in the MultiIndex + + Returns + ------- + new_index : Index + """ + item = self._validate_fill_value(item) + + new_levels = [] + new_codes = [] + for k, level, level_codes in zip(item, self.levels, self.codes): + if k not in level: + # have to insert into level + # must insert at end otherwise you have to recompute all the + # other codes + lev_loc = len(level) + level = level.insert(lev_loc, k) + else: + lev_loc = level.get_loc(k) + + new_levels.append(level) + new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc)) + + return MultiIndex( + levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False + ) + + def delete(self, loc) -> MultiIndex: + """ + Make new index with passed location deleted + + Returns + ------- + new_index : MultiIndex + """ + new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] + return MultiIndex( + levels=self.levels, + codes=new_codes, + names=self.names, + verify_integrity=False, + ) + + @doc(Index.isin) + def isin(self, values, level=None) -> npt.NDArray[np.bool_]: + if isinstance(values, Generator): + values = list(values) + + if level is None: + if len(values) == 0: + return np.zeros((len(self),), dtype=np.bool_) + if not isinstance(values, MultiIndex): + values = MultiIndex.from_tuples(values) + return values.unique().get_indexer_for(self) != -1 + else: + num = self._get_level_number(level) + levs = self.get_level_values(num) + + if levs.size == 0: + return np.zeros(len(levs), dtype=np.bool_) + return levs.isin(values) + + # error: Incompatible types in assignment (expression has type overloaded function, + # base class "Index" defined the type as "Callable[[Index, Any, bool], Any]") + rename = Index.set_names # type: ignore[assignment] + + # --------------------------------------------------------------- + # Arithmetic/Numeric Methods - Disabled + + __add__ = make_invalid_op("__add__") + __radd__ = make_invalid_op("__radd__") + __iadd__ = make_invalid_op("__iadd__") + __sub__ = make_invalid_op("__sub__") + __rsub__ = make_invalid_op("__rsub__") + __isub__ = make_invalid_op("__isub__") + __pow__ = make_invalid_op("__pow__") + __rpow__ = make_invalid_op("__rpow__") + __mul__ = make_invalid_op("__mul__") + __rmul__ = make_invalid_op("__rmul__") + __floordiv__ = make_invalid_op("__floordiv__") + __rfloordiv__ = make_invalid_op("__rfloordiv__") + __truediv__ = make_invalid_op("__truediv__") + __rtruediv__ = make_invalid_op("__rtruediv__") + __mod__ = make_invalid_op("__mod__") + __rmod__ = make_invalid_op("__rmod__") + __divmod__ = make_invalid_op("__divmod__") + __rdivmod__ = make_invalid_op("__rdivmod__") + # Unary methods disabled + __neg__ = make_invalid_op("__neg__") + __pos__ = make_invalid_op("__pos__") + __abs__ = make_invalid_op("__abs__") + __invert__ = make_invalid_op("__invert__") + + +def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int: + """Count depth (up to a maximum of `nlevels`) with which codes are lexsorted.""" + int64_codes = [ensure_int64(level_codes) for level_codes in codes] + for k in range(nlevels, 0, -1): + if libalgos.is_lexsorted(int64_codes[:k]): + return k + return 0 + + +def sparsify_labels(label_list, start: int = 0, sentinel: object = ""): + pivoted = list(zip(*label_list)) + k = len(label_list) + + result = pivoted[: start + 1] + prev = pivoted[start] + + for cur in pivoted[start + 1 :]: + sparse_cur = [] + + for i, (p, t) in enumerate(zip(prev, cur)): + if i == k - 1: + sparse_cur.append(t) + result.append(sparse_cur) + break + + if p == t: + sparse_cur.append(sentinel) + else: + sparse_cur.extend(cur[i:]) + result.append(sparse_cur) + break + + prev = cur + + return list(zip(*result)) + + +def _get_na_rep(dtype: DtypeObj) -> str: + if isinstance(dtype, ExtensionDtype): + return f"{dtype.na_value}" + else: + dtype_type = dtype.type + + return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype_type, "NaN") + + +def maybe_droplevels(index: Index, key) -> Index: + """ + Attempt to drop level or levels from the given index. + + Parameters + ---------- + index: Index + key : scalar or tuple + + Returns + ------- + Index + """ + # drop levels + original_index = index + if isinstance(key, tuple): + # Caller is responsible for ensuring the key is not an entry in the first + # level of the MultiIndex. + for _ in key: + try: + index = index._drop_level_numbers([0]) + except ValueError: + # we have dropped too much, so back out + return original_index + else: + try: + index = index._drop_level_numbers([0]) + except ValueError: + pass + + return index + + +def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray: + """ + Coerce the array-like indexer to the smallest integer dtype that can encode all + of the given categories. + + Parameters + ---------- + array_like : array-like + categories : array-like + copy : bool + + Returns + ------- + np.ndarray + Non-writeable. + """ + array_like = coerce_indexer_dtype(array_like, categories) + if copy: + array_like = array_like.copy() + array_like.flags.writeable = False + return array_like + + +def _require_listlike(level, arr, arrname: str): + """ + Ensure that level is either None or listlike, and arr is list-of-listlike. + """ + if level is not None and not is_list_like(level): + if not is_list_like(arr): + raise TypeError(f"{arrname} must be list-like") + if len(arr) > 0 and is_list_like(arr[0]): + raise TypeError(f"{arrname} must be list-like") + level = [level] + arr = [arr] + elif level is None or is_list_like(level): + if not is_list_like(arr) or not is_list_like(arr[0]): + raise TypeError(f"{arrname} must be list of lists-like") + return level, arr diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/period.py new file mode 100644 index 00000000..d05694c0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/period.py @@ -0,0 +1,535 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, +) +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs import index as libindex +from pandas._libs.tslibs import ( + BaseOffset, + NaT, + Period, + Resolution, + Tick, +) +from pandas.util._decorators import ( + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import is_integer +from pandas.core.dtypes.dtypes import PeriodDtype +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import is_valid_na_for_dtype + +from pandas.core.arrays.period import ( + PeriodArray, + period_array, + raise_on_incompatible, + validate_dtype_freq, +) +import pandas.core.common as com +import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import maybe_extract_name +from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin +from pandas.core.indexes.datetimes import ( + DatetimeIndex, + Index, +) +from pandas.core.indexes.extension import inherit_names + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + Dtype, + DtypeObj, + Self, + npt, + ) + + +_index_doc_kwargs = dict(ibase._index_doc_kwargs) +_index_doc_kwargs.update({"target_klass": "PeriodIndex or list of Periods"}) +_shared_doc_kwargs = { + "klass": "PeriodArray", +} + +# --- Period index sketch + + +def _new_PeriodIndex(cls, **d): + # GH13277 for unpickling + values = d.pop("data") + if values.dtype == "int64": + freq = d.pop("freq", None) + dtype = PeriodDtype(freq) + values = PeriodArray(values, dtype=dtype) + return cls._simple_new(values, **d) + else: + return cls(values, **d) + + +@inherit_names( + ["strftime", "start_time", "end_time"] + PeriodArray._field_ops, + PeriodArray, + wrap=True, +) +@inherit_names(["is_leap_year", "_format_native_types"], PeriodArray) +class PeriodIndex(DatetimeIndexOpsMixin): + """ + Immutable ndarray holding ordinal values indicating regular periods in time. + + Index keys are boxed to Period objects which carries the metadata (eg, + frequency information). + + Parameters + ---------- + data : array-like (1d int np.ndarray or PeriodArray), optional + Optional period-like data to construct index with. + copy : bool + Make a copy of input ndarray. + freq : str or period object, optional + One of pandas period strings or corresponding objects. + year : int, array, or Series, default None + month : int, array, or Series, default None + quarter : int, array, or Series, default None + day : int, array, or Series, default None + hour : int, array, or Series, default None + minute : int, array, or Series, default None + second : int, array, or Series, default None + dtype : str or PeriodDtype, default None + + Attributes + ---------- + day + dayofweek + day_of_week + dayofyear + day_of_year + days_in_month + daysinmonth + end_time + freq + freqstr + hour + is_leap_year + minute + month + quarter + qyear + second + start_time + week + weekday + weekofyear + year + + Methods + ------- + asfreq + strftime + to_timestamp + + See Also + -------- + Index : The base pandas Index type. + Period : Represents a period of time. + DatetimeIndex : Index with datetime64 data. + TimedeltaIndex : Index of timedelta64 data. + period_range : Create a fixed-frequency PeriodIndex. + + Examples + -------- + >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3]) + >>> idx + PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]') + """ + + _typ = "periodindex" + + _data: PeriodArray + freq: BaseOffset + dtype: PeriodDtype + + _data_cls = PeriodArray + _supports_partial_string_indexing = True + + @property + def _engine_type(self) -> type[libindex.PeriodEngine]: + return libindex.PeriodEngine + + @cache_readonly + def _resolution_obj(self) -> Resolution: + # for compat with DatetimeIndex + return self.dtype._resolution_obj + + # -------------------------------------------------------------------- + # methods that dispatch to array and wrap result in Index + # These are defined here instead of via inherit_names for mypy + + @doc( + PeriodArray.asfreq, + other="pandas.arrays.PeriodArray", + other_name="PeriodArray", + **_shared_doc_kwargs, + ) + def asfreq(self, freq=None, how: str = "E") -> Self: + arr = self._data.asfreq(freq, how) + return type(self)._simple_new(arr, name=self.name) + + @doc(PeriodArray.to_timestamp) + def to_timestamp(self, freq=None, how: str = "start") -> DatetimeIndex: + arr = self._data.to_timestamp(freq, how) + return DatetimeIndex._simple_new(arr, name=self.name) + + @property + @doc(PeriodArray.hour.fget) + def hour(self) -> Index: + return Index(self._data.hour, name=self.name) + + @property + @doc(PeriodArray.minute.fget) + def minute(self) -> Index: + return Index(self._data.minute, name=self.name) + + @property + @doc(PeriodArray.second.fget) + def second(self) -> Index: + return Index(self._data.second, name=self.name) + + # ------------------------------------------------------------------------ + # Index Constructors + + def __new__( + cls, + data=None, + ordinal=None, + freq=None, + dtype: Dtype | None = None, + copy: bool = False, + name: Hashable | None = None, + **fields, + ) -> Self: + valid_field_set = { + "year", + "month", + "day", + "quarter", + "hour", + "minute", + "second", + } + + refs = None + if not copy and isinstance(data, (Index, ABCSeries)): + refs = data._references + + if not set(fields).issubset(valid_field_set): + argument = next(iter(set(fields) - valid_field_set)) + raise TypeError(f"__new__() got an unexpected keyword argument {argument}") + + name = maybe_extract_name(name, data, cls) + + if data is None and ordinal is None: + # range-based. + if not fields: + # test_pickle_compat_construction + cls._raise_scalar_data_error(None) + + data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields) + # PeriodArray._generate range does validation that fields is + # empty when really using the range-based constructor. + freq = freq2 + + dtype = PeriodDtype(freq) + data = PeriodArray(data, dtype=dtype) + else: + freq = validate_dtype_freq(dtype, freq) + + # PeriodIndex allow PeriodIndex(period_index, freq=different) + # Let's not encourage that kind of behavior in PeriodArray. + + if freq and isinstance(data, cls) and data.freq != freq: + # TODO: We can do some of these with no-copy / coercion? + # e.g. D -> 2D seems to be OK + data = data.asfreq(freq) + + if data is None and ordinal is not None: + # we strangely ignore `ordinal` if data is passed. + ordinal = np.asarray(ordinal, dtype=np.int64) + dtype = PeriodDtype(freq) + data = PeriodArray(ordinal, dtype=dtype) + else: + # don't pass copy here, since we copy later. + data = period_array(data=data, freq=freq) + + if copy: + data = data.copy() + + return cls._simple_new(data, name=name, refs=refs) + + # ------------------------------------------------------------------------ + # Data + + @property + def values(self) -> npt.NDArray[np.object_]: + return np.asarray(self, dtype=object) + + def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]: + """ + Convert timedelta-like input to an integer multiple of self.freq + + Parameters + ---------- + other : timedelta, np.timedelta64, DateOffset, int, np.ndarray + + Returns + ------- + converted : int, np.ndarray[int64] + + Raises + ------ + IncompatibleFrequency : if the input cannot be written as a multiple + of self.freq. Note IncompatibleFrequency subclasses ValueError. + """ + if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)): + if isinstance(self.freq, Tick): + # _check_timedeltalike_freq_compat will raise if incompatible + delta = self._data._check_timedeltalike_freq_compat(other) + return delta + elif isinstance(other, BaseOffset): + if other.base == self.freq.base: + return other.n + + raise raise_on_incompatible(self, other) + elif is_integer(other): + assert isinstance(other, int) + return other + + # raise when input doesn't have freq + raise raise_on_incompatible(self, None) + + def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: + """ + Can we compare values of the given dtype to our own? + """ + return self.dtype == dtype + + # ------------------------------------------------------------------------ + # Index Methods + + def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray: + """ + where : array of timestamps + mask : np.ndarray[bool] + Array of booleans where data is not NA. + """ + if isinstance(where, DatetimeIndex): + where = PeriodIndex(where._values, freq=self.freq) + elif not isinstance(where, PeriodIndex): + raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex") + + return super().asof_locs(where, mask) + + @property + def is_full(self) -> bool: + """ + Returns True if this PeriodIndex is range-like in that all Periods + between start and end are present, in order. + """ + if len(self) == 0: + return True + if not self.is_monotonic_increasing: + raise ValueError("Index is not monotonic") + values = self.asi8 + return bool(((values[1:] - values[:-1]) < 2).all()) + + @property + def inferred_type(self) -> str: + # b/c data is represented as ints make sure we can't have ambiguous + # indexing + return "period" + + # ------------------------------------------------------------------------ + # Indexing Methods + + def _convert_tolerance(self, tolerance, target): + # Returned tolerance must be in dtype/units so that + # `|self._get_engine_target() - target._engine_target()| <= tolerance` + # is meaningful. Since PeriodIndex returns int64 for engine_target, + # we may need to convert timedelta64 tolerance to int64. + tolerance = super()._convert_tolerance(tolerance, target) + + if self.dtype == target.dtype: + # convert tolerance to i8 + tolerance = self._maybe_convert_timedelta(tolerance) + + return tolerance + + def get_loc(self, key): + """ + Get integer location for requested label. + + Parameters + ---------- + key : Period, NaT, str, or datetime + String or datetime key must be parsable as Period. + + Returns + ------- + loc : int or ndarray[int64] + + Raises + ------ + KeyError + Key is not present in the index. + TypeError + If key is listlike or otherwise not hashable. + """ + orig_key = key + + self._check_indexing_error(key) + + if is_valid_na_for_dtype(key, self.dtype): + key = NaT + + elif isinstance(key, str): + try: + parsed, reso = self._parse_with_reso(key) + except ValueError as err: + # A string with invalid format + raise KeyError(f"Cannot interpret '{key}' as period") from err + + if self._can_partial_date_slice(reso): + try: + return self._partial_date_slice(reso, parsed) + except KeyError as err: + raise KeyError(key) from err + + if reso == self._resolution_obj: + # the reso < self._resolution_obj case goes + # through _get_string_slice + key = self._cast_partial_indexing_scalar(parsed) + else: + raise KeyError(key) + + elif isinstance(key, Period): + self._disallow_mismatched_indexing(key) + + elif isinstance(key, datetime): + key = self._cast_partial_indexing_scalar(key) + + else: + # in particular integer, which Period constructor would cast to string + raise KeyError(key) + + try: + return Index.get_loc(self, key) + except KeyError as err: + raise KeyError(orig_key) from err + + def _disallow_mismatched_indexing(self, key: Period) -> None: + if key._dtype != self.dtype: + raise KeyError(key) + + def _cast_partial_indexing_scalar(self, label: datetime) -> Period: + try: + period = Period(label, freq=self.freq) + except ValueError as err: + # we cannot construct the Period + raise KeyError(label) from err + return period + + @doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound) + def _maybe_cast_slice_bound(self, label, side: str): + if isinstance(label, datetime): + label = self._cast_partial_indexing_scalar(label) + + return super()._maybe_cast_slice_bound(label, side) + + def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): + iv = Period(parsed, freq=reso.attr_abbrev) + return (iv.asfreq(self.freq, how="start"), iv.asfreq(self.freq, how="end")) + + @doc(DatetimeIndexOpsMixin.shift) + def shift(self, periods: int = 1, freq=None) -> Self: + if freq is not None: + raise TypeError( + f"`freq` argument is not supported for {type(self).__name__}.shift" + ) + return self + periods + + +def period_range( + start=None, + end=None, + periods: int | None = None, + freq=None, + name: Hashable | None = None, +) -> PeriodIndex: + """ + Return a fixed frequency PeriodIndex. + + The day (calendar) is the default frequency. + + Parameters + ---------- + start : str, datetime, date, pandas.Timestamp, or period-like, default None + Left bound for generating periods. + end : str, datetime, date, pandas.Timestamp, or period-like, default None + Right bound for generating periods. + periods : int, default None + Number of periods to generate. + freq : str or DateOffset, optional + Frequency alias. By default the freq is taken from `start` or `end` + if those are Period objects. Otherwise, the default is ``"D"`` for + daily frequency. + name : str, default None + Name of the resulting PeriodIndex. + + Returns + ------- + PeriodIndex + + Notes + ----- + Of the three parameters: ``start``, ``end``, and ``periods``, exactly two + must be specified. + + To learn more about the frequency strings, please see `this link + `__. + + Examples + -------- + >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M') + PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', + '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12', + '2018-01'], + dtype='period[M]') + + If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor + endpoints for a ``PeriodIndex`` with frequency matching that of the + ``period_range`` constructor. + + >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'), + ... end=pd.Period('2017Q2', freq='Q'), freq='M') + PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], + dtype='period[M]') + """ + if com.count_not_none(start, end, periods) != 2: + raise ValueError( + "Of the three parameters: start, end, and periods, " + "exactly two must be specified" + ) + if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)): + freq = "D" + + data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={}) + dtype = PeriodDtype(freq) + data = PeriodArray(data, dtype=dtype) + return PeriodIndex(data, name=name) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/range.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/range.py new file mode 100644 index 00000000..1e8a3851 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/range.py @@ -0,0 +1,1149 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, +) +from datetime import timedelta +import operator +from sys import getsizeof +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, +) + +import numpy as np + +from pandas._libs import ( + index as libindex, + lib, +) +from pandas._libs.algos import unique_deltas +from pandas._libs.lib import no_default +from pandas.compat.numpy import function as nv +from pandas.util._decorators import ( + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import ( + ensure_platform_int, + ensure_python_int, + is_float, + is_integer, + is_scalar, + is_signed_integer_dtype, +) +from pandas.core.dtypes.generic import ABCTimedeltaIndex + +from pandas.core import ops +import pandas.core.common as com +from pandas.core.construction import extract_array +import pandas.core.indexes.base as ibase +from pandas.core.indexes.base import ( + Index, + maybe_extract_name, +) +from pandas.core.ops.common import unpack_zerodim_and_defer + +if TYPE_CHECKING: + from pandas._typing import ( + Axis, + Dtype, + NaPosition, + Self, + npt, + ) +_empty_range = range(0) +_dtype_int64 = np.dtype(np.int64) + + +class RangeIndex(Index): + """ + Immutable Index implementing a monotonic integer range. + + RangeIndex is a memory-saving special case of an Index limited to representing + monotonic ranges with a 64-bit dtype. Using RangeIndex may in some instances + improve computing speed. + + This is the default index type used + by DataFrame and Series when no explicit index is provided by the user. + + Parameters + ---------- + start : int (default: 0), range, or other RangeIndex instance + If int and "stop" is not given, interpreted as "stop" instead. + stop : int (default: 0) + step : int (default: 1) + dtype : np.int64 + Unused, accepted for homogeneity with other index types. + copy : bool, default False + Unused, accepted for homogeneity with other index types. + name : object, optional + Name to be stored in the index. + + Attributes + ---------- + start + stop + step + + Methods + ------- + from_range + + See Also + -------- + Index : The base pandas Index type. + + Examples + -------- + >>> list(pd.RangeIndex(5)) + [0, 1, 2, 3, 4] + + >>> list(pd.RangeIndex(-2, 4)) + [-2, -1, 0, 1, 2, 3] + + >>> list(pd.RangeIndex(0, 10, 2)) + [0, 2, 4, 6, 8] + + >>> list(pd.RangeIndex(2, -10, -3)) + [2, -1, -4, -7] + + >>> list(pd.RangeIndex(0)) + [] + + >>> list(pd.RangeIndex(1, 0)) + [] + """ + + _typ = "rangeindex" + _dtype_validation_metadata = (is_signed_integer_dtype, "signed integer") + _range: range + _values: np.ndarray + + @property + def _engine_type(self) -> type[libindex.Int64Engine]: + return libindex.Int64Engine + + # -------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + start=None, + stop=None, + step=None, + dtype: Dtype | None = None, + copy: bool = False, + name: Hashable | None = None, + ) -> RangeIndex: + cls._validate_dtype(dtype) + name = maybe_extract_name(name, start, cls) + + # RangeIndex + if isinstance(start, RangeIndex): + return start.copy(name=name) + elif isinstance(start, range): + return cls._simple_new(start, name=name) + + # validate the arguments + if com.all_none(start, stop, step): + raise TypeError("RangeIndex(...) must be called with integers") + + start = ensure_python_int(start) if start is not None else 0 + + if stop is None: + start, stop = 0, start + else: + stop = ensure_python_int(stop) + + step = ensure_python_int(step) if step is not None else 1 + if step == 0: + raise ValueError("Step must not be zero") + + rng = range(start, stop, step) + return cls._simple_new(rng, name=name) + + @classmethod + def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self: + """ + Create :class:`pandas.RangeIndex` from a ``range`` object. + + Returns + ------- + RangeIndex + + Examples + -------- + >>> pd.RangeIndex.from_range(range(5)) + RangeIndex(start=0, stop=5, step=1) + + >>> pd.RangeIndex.from_range(range(2, -10, -3)) + RangeIndex(start=2, stop=-10, step=-3) + """ + if not isinstance(data, range): + raise TypeError( + f"{cls.__name__}(...) must be called with object coercible to a " + f"range, {repr(data)} was passed" + ) + cls._validate_dtype(dtype) + return cls._simple_new(data, name=name) + + # error: Argument 1 of "_simple_new" is incompatible with supertype "Index"; + # supertype defines the argument type as + # "Union[ExtensionArray, ndarray[Any, Any]]" [override] + @classmethod + def _simple_new( # type: ignore[override] + cls, values: range, name: Hashable | None = None + ) -> Self: + result = object.__new__(cls) + + assert isinstance(values, range) + + result._range = values + result._name = name + result._cache = {} + result._reset_identity() + result._references = None + return result + + @classmethod + def _validate_dtype(cls, dtype: Dtype | None) -> None: + if dtype is None: + return + + validation_func, expected = cls._dtype_validation_metadata + if not validation_func(dtype): + raise ValueError( + f"Incorrect `dtype` passed: expected {expected}, received {dtype}" + ) + + # -------------------------------------------------------------------- + + # error: Return type "Type[Index]" of "_constructor" incompatible with return + # type "Type[RangeIndex]" in supertype "Index" + @cache_readonly + def _constructor(self) -> type[Index]: # type: ignore[override] + """return the class to use for construction""" + return Index + + # error: Signature of "_data" incompatible with supertype "Index" + @cache_readonly + def _data(self) -> np.ndarray: # type: ignore[override] + """ + An int array that for performance reasons is created only when needed. + + The constructed array is saved in ``_cache``. + """ + return np.arange(self.start, self.stop, self.step, dtype=np.int64) + + def _get_data_as_items(self): + """return a list of tuples of start, stop, step""" + rng = self._range + return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)] + + def __reduce__(self): + d = {"name": self._name} + d.update(dict(self._get_data_as_items())) + return ibase._new_Index, (type(self), d), None + + # -------------------------------------------------------------------- + # Rendering Methods + + def _format_attrs(self): + """ + Return a list of tuples of the (attr, formatted_value) + """ + attrs = self._get_data_as_items() + if self._name is not None: + attrs.append(("name", ibase.default_pprint(self._name))) + return attrs + + def _format_data(self, name=None): + # we are formatting thru the attributes + return None + + def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: + # Equivalent to Index implementation, but faster + if not len(self._range): + return header + first_val_str = str(self._range[0]) + last_val_str = str(self._range[-1]) + max_length = max(len(first_val_str), len(last_val_str)) + + return header + [f"{x:<{max_length}}" for x in self._range] + + # -------------------------------------------------------------------- + + @property + def start(self) -> int: + """ + The value of the `start` parameter (``0`` if this was not supplied). + + Examples + -------- + >>> idx = pd.RangeIndex(5) + >>> idx.start + 0 + + >>> idx = pd.RangeIndex(2, -10, -3) + >>> idx.start + 2 + """ + # GH 25710 + return self._range.start + + @property + def stop(self) -> int: + """ + The value of the `stop` parameter. + + Examples + -------- + >>> idx = pd.RangeIndex(5) + >>> idx.stop + 5 + + >>> idx = pd.RangeIndex(2, -10, -3) + >>> idx.stop + -10 + """ + return self._range.stop + + @property + def step(self) -> int: + """ + The value of the `step` parameter (``1`` if this was not supplied). + + Examples + -------- + >>> idx = pd.RangeIndex(5) + >>> idx.step + 1 + + >>> idx = pd.RangeIndex(2, -10, -3) + >>> idx.step + -3 + + Even if :class:`pandas.RangeIndex` is empty, ``step`` is still ``1`` if + not supplied. + + >>> idx = pd.RangeIndex(1, 0) + >>> idx.step + 1 + """ + # GH 25710 + return self._range.step + + @cache_readonly + def nbytes(self) -> int: + """ + Return the number of bytes in the underlying data. + """ + rng = self._range + return getsizeof(rng) + sum( + getsizeof(getattr(rng, attr_name)) + for attr_name in ["start", "stop", "step"] + ) + + def memory_usage(self, deep: bool = False) -> int: + """ + Memory usage of my values + + Parameters + ---------- + deep : bool + Introspect the data deeply, interrogate + `object` dtypes for system-level memory consumption + + Returns + ------- + bytes used + + Notes + ----- + Memory usage does not include memory consumed by elements that + are not components of the array if deep=False + + See Also + -------- + numpy.ndarray.nbytes + """ + return self.nbytes + + @property + def dtype(self) -> np.dtype: + return _dtype_int64 + + @property + def is_unique(self) -> bool: + """return if the index has unique values""" + return True + + @cache_readonly + def is_monotonic_increasing(self) -> bool: + return self._range.step > 0 or len(self) <= 1 + + @cache_readonly + def is_monotonic_decreasing(self) -> bool: + return self._range.step < 0 or len(self) <= 1 + + def __contains__(self, key: Any) -> bool: + hash(key) + try: + key = ensure_python_int(key) + except TypeError: + return False + return key in self._range + + @property + def inferred_type(self) -> str: + return "integer" + + # -------------------------------------------------------------------- + # Indexing Methods + + @doc(Index.get_loc) + def get_loc(self, key): + if is_integer(key) or (is_float(key) and key.is_integer()): + new_key = int(key) + try: + return self._range.index(new_key) + except ValueError as err: + raise KeyError(key) from err + if isinstance(key, Hashable): + raise KeyError(key) + self._check_indexing_error(key) + raise KeyError(key) + + def _get_indexer( + self, + target: Index, + method: str | None = None, + limit: int | None = None, + tolerance=None, + ) -> npt.NDArray[np.intp]: + if com.any_not_none(method, tolerance, limit): + return super()._get_indexer( + target, method=method, tolerance=tolerance, limit=limit + ) + + if self.step > 0: + start, stop, step = self.start, self.stop, self.step + else: + # GH 28678: work on reversed range for simplicity + reverse = self._range[::-1] + start, stop, step = reverse.start, reverse.stop, reverse.step + + target_array = np.asarray(target) + locs = target_array - start + valid = (locs % step == 0) & (locs >= 0) & (target_array < stop) + locs[~valid] = -1 + locs[valid] = locs[valid] / step + + if step != self.step: + # We reversed this range: transform to original locs + locs[valid] = len(self) - 1 - locs[valid] + return ensure_platform_int(locs) + + @cache_readonly + def _should_fallback_to_positional(self) -> bool: + """ + Should an integer key be treated as positional? + """ + return False + + # -------------------------------------------------------------------- + + def tolist(self) -> list[int]: + return list(self._range) + + @doc(Index.__iter__) + def __iter__(self) -> Iterator[int]: + yield from self._range + + @doc(Index._shallow_copy) + def _shallow_copy(self, values, name: Hashable = no_default): + name = self._name if name is no_default else name + + if values.dtype.kind == "f": + return Index(values, name=name, dtype=np.float64) + # GH 46675 & 43885: If values is equally spaced, return a + # more memory-compact RangeIndex instead of Index with 64-bit dtype + unique_diffs = unique_deltas(values) + if len(unique_diffs) == 1 and unique_diffs[0] != 0: + diff = unique_diffs[0] + new_range = range(values[0], values[-1] + diff, diff) + return type(self)._simple_new(new_range, name=name) + else: + return self._constructor._simple_new(values, name=name) + + def _view(self) -> Self: + result = type(self)._simple_new(self._range, name=self._name) + result._cache = self._cache + return result + + @doc(Index.copy) + def copy(self, name: Hashable | None = None, deep: bool = False) -> Self: + name = self._validate_names(name=name, deep=deep)[0] + new_index = self._rename(name=name) + return new_index + + def _minmax(self, meth: str): + no_steps = len(self) - 1 + if no_steps == -1: + return np.nan + elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0): + return self.start + + return self.start + self.step * no_steps + + def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: + """The minimum value of the RangeIndex""" + nv.validate_minmax_axis(axis) + nv.validate_min(args, kwargs) + return self._minmax("min") + + def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: + """The maximum value of the RangeIndex""" + nv.validate_minmax_axis(axis) + nv.validate_max(args, kwargs) + return self._minmax("max") + + def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: + """ + Returns the indices that would sort the index and its + underlying data. + + Returns + ------- + np.ndarray[np.intp] + + See Also + -------- + numpy.ndarray.argsort + """ + ascending = kwargs.pop("ascending", True) # EA compat + kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant + nv.validate_argsort(args, kwargs) + + if self._range.step > 0: + result = np.arange(len(self), dtype=np.intp) + else: + result = np.arange(len(self) - 1, -1, -1, dtype=np.intp) + + if not ascending: + result = result[::-1] + return result + + def factorize( + self, + sort: bool = False, + use_na_sentinel: bool = True, + ) -> tuple[npt.NDArray[np.intp], RangeIndex]: + codes = np.arange(len(self), dtype=np.intp) + uniques = self + if sort and self.step < 0: + codes = codes[::-1] + uniques = uniques[::-1] + return codes, uniques + + def equals(self, other: object) -> bool: + """ + Determines if two Index objects contain the same elements. + """ + if isinstance(other, RangeIndex): + return self._range == other._range + return super().equals(other) + + def sort_values( + self, + return_indexer: bool = False, + ascending: bool = True, + na_position: NaPosition = "last", + key: Callable | None = None, + ): + if key is not None: + return super().sort_values( + return_indexer=return_indexer, + ascending=ascending, + na_position=na_position, + key=key, + ) + else: + sorted_index = self + inverse_indexer = False + if ascending: + if self.step < 0: + sorted_index = self[::-1] + inverse_indexer = True + else: + if self.step > 0: + sorted_index = self[::-1] + inverse_indexer = True + + if return_indexer: + if inverse_indexer: + rng = range(len(self) - 1, -1, -1) + else: + rng = range(len(self)) + return sorted_index, RangeIndex(rng) + else: + return sorted_index + + # -------------------------------------------------------------------- + # Set Operations + + def _intersection(self, other: Index, sort: bool = False): + # caller is responsible for checking self and other are both non-empty + + if not isinstance(other, RangeIndex): + return super()._intersection(other, sort=sort) + + first = self._range[::-1] if self.step < 0 else self._range + second = other._range[::-1] if other.step < 0 else other._range + + # check whether intervals intersect + # deals with in- and decreasing ranges + int_low = max(first.start, second.start) + int_high = min(first.stop, second.stop) + if int_high <= int_low: + return self._simple_new(_empty_range) + + # Method hint: linear Diophantine equation + # solve intersection problem + # performance hint: for identical step sizes, could use + # cheaper alternative + gcd, s, _ = self._extended_gcd(first.step, second.step) + + # check whether element sets intersect + if (first.start - second.start) % gcd: + return self._simple_new(_empty_range) + + # calculate parameters for the RangeIndex describing the + # intersection disregarding the lower bounds + tmp_start = first.start + (second.start - first.start) * first.step // gcd * s + new_step = first.step * second.step // gcd + new_range = range(tmp_start, int_high, new_step) + new_index = self._simple_new(new_range) + + # adjust index to limiting interval + new_start = new_index._min_fitting_element(int_low) + new_range = range(new_start, new_index.stop, new_index.step) + new_index = self._simple_new(new_range) + + if (self.step < 0 and other.step < 0) is not (new_index.step < 0): + new_index = new_index[::-1] + + if sort is None: + new_index = new_index.sort_values() + + return new_index + + def _min_fitting_element(self, lower_limit: int) -> int: + """Returns the smallest element greater than or equal to the limit""" + no_steps = -(-(lower_limit - self.start) // abs(self.step)) + return self.start + abs(self.step) * no_steps + + def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]: + """ + Extended Euclidean algorithms to solve Bezout's identity: + a*x + b*y = gcd(x, y) + Finds one particular solution for x, y: s, t + Returns: gcd, s, t + """ + s, old_s = 0, 1 + t, old_t = 1, 0 + r, old_r = b, a + while r: + quotient = old_r // r + old_r, r = r, old_r - quotient * r + old_s, s = s, old_s - quotient * s + old_t, t = t, old_t - quotient * t + return old_r, old_s, old_t + + def _range_in_self(self, other: range) -> bool: + """Check if other range is contained in self""" + # https://stackoverflow.com/a/32481015 + if not other: + return True + if not self._range: + return False + if len(other) > 1 and other.step % self._range.step: + return False + return other.start in self._range and other[-1] in self._range + + def _union(self, other: Index, sort: bool | None): + """ + Form the union of two Index objects and sorts if possible + + Parameters + ---------- + other : Index or array-like + + sort : bool or None, default None + Whether to sort (monotonically increasing) the resulting index. + ``sort=None|True`` returns a ``RangeIndex`` if possible or a sorted + ``Index`` with a int64 dtype if not. + ``sort=False`` can return a ``RangeIndex`` if self is monotonically + increasing and other is fully contained in self. Otherwise, returns + an unsorted ``Index`` with an int64 dtype. + + Returns + ------- + union : Index + """ + if isinstance(other, RangeIndex): + if sort in (None, True) or ( + sort is False and self.step > 0 and self._range_in_self(other._range) + ): + # GH 47557: Can still return a RangeIndex + # if other range in self and sort=False + start_s, step_s = self.start, self.step + end_s = self.start + self.step * (len(self) - 1) + start_o, step_o = other.start, other.step + end_o = other.start + other.step * (len(other) - 1) + if self.step < 0: + start_s, step_s, end_s = end_s, -step_s, start_s + if other.step < 0: + start_o, step_o, end_o = end_o, -step_o, start_o + if len(self) == 1 and len(other) == 1: + step_s = step_o = abs(self.start - other.start) + elif len(self) == 1: + step_s = step_o + elif len(other) == 1: + step_o = step_s + start_r = min(start_s, start_o) + end_r = max(end_s, end_o) + if step_o == step_s: + if ( + (start_s - start_o) % step_s == 0 + and (start_s - end_o) <= step_s + and (start_o - end_s) <= step_s + ): + return type(self)(start_r, end_r + step_s, step_s) + if ( + (step_s % 2 == 0) + and (abs(start_s - start_o) == step_s / 2) + and (abs(end_s - end_o) == step_s / 2) + ): + # e.g. range(0, 10, 2) and range(1, 11, 2) + # but not range(0, 20, 4) and range(1, 21, 4) GH#44019 + return type(self)(start_r, end_r + step_s / 2, step_s / 2) + + elif step_o % step_s == 0: + if ( + (start_o - start_s) % step_s == 0 + and (start_o + step_s >= start_s) + and (end_o - step_s <= end_s) + ): + return type(self)(start_r, end_r + step_s, step_s) + elif step_s % step_o == 0: + if ( + (start_s - start_o) % step_o == 0 + and (start_s + step_o >= start_o) + and (end_s - step_o <= end_o) + ): + return type(self)(start_r, end_r + step_o, step_o) + + return super()._union(other, sort=sort) + + def _difference(self, other, sort=None): + # optimized set operation if we have another RangeIndex + self._validate_sort_keyword(sort) + self._assert_can_do_setop(other) + other, result_name = self._convert_can_do_setop(other) + + if not isinstance(other, RangeIndex): + return super()._difference(other, sort=sort) + + if sort is not False and self.step < 0: + return self[::-1]._difference(other) + + res_name = ops.get_op_result_name(self, other) + + first = self._range[::-1] if self.step < 0 else self._range + overlap = self.intersection(other) + if overlap.step < 0: + overlap = overlap[::-1] + + if len(overlap) == 0: + return self.rename(name=res_name) + if len(overlap) == len(self): + return self[:0].rename(res_name) + + # overlap.step will always be a multiple of self.step (see _intersection) + + if len(overlap) == 1: + if overlap[0] == self[0]: + return self[1:] + + elif overlap[0] == self[-1]: + return self[:-1] + + elif len(self) == 3 and overlap[0] == self[1]: + return self[::2] + + else: + return super()._difference(other, sort=sort) + + elif len(overlap) == 2 and overlap[0] == first[0] and overlap[-1] == first[-1]: + # e.g. range(-8, 20, 7) and range(13, -9, -3) + return self[1:-1] + + if overlap.step == first.step: + if overlap[0] == first.start: + # The difference is everything after the intersection + new_rng = range(overlap[-1] + first.step, first.stop, first.step) + elif overlap[-1] == first[-1]: + # The difference is everything before the intersection + new_rng = range(first.start, overlap[0], first.step) + elif overlap._range == first[1:-1]: + # e.g. range(4) and range(1, 3) + step = len(first) - 1 + new_rng = first[::step] + else: + # The difference is not range-like + # e.g. range(1, 10, 1) and range(3, 7, 1) + return super()._difference(other, sort=sort) + + else: + # We must have len(self) > 1, bc we ruled out above + # len(overlap) == 0 and len(overlap) == len(self) + assert len(self) > 1 + + if overlap.step == first.step * 2: + if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]): + # e.g. range(1, 10, 1) and range(1, 10, 2) + new_rng = first[1::2] + + elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]): + # e.g. range(1, 10, 1) and range(2, 10, 2) + new_rng = first[::2] + + else: + # We can get here with e.g. range(20) and range(0, 10, 2) + return super()._difference(other, sort=sort) + + else: + # e.g. range(10) and range(0, 10, 3) + return super()._difference(other, sort=sort) + + new_index = type(self)._simple_new(new_rng, name=res_name) + if first is not self._range: + new_index = new_index[::-1] + + return new_index + + def symmetric_difference( + self, other, result_name: Hashable | None = None, sort=None + ): + if not isinstance(other, RangeIndex) or sort is not None: + return super().symmetric_difference(other, result_name, sort) + + left = self.difference(other) + right = other.difference(self) + result = left.union(right) + + if result_name is not None: + result = result.rename(result_name) + return result + + # -------------------------------------------------------------------- + + # error: Return type "Index" of "delete" incompatible with return type + # "RangeIndex" in supertype "Index" + def delete(self, loc) -> Index: # type: ignore[override] + # In some cases we can retain RangeIndex, see also + # DatetimeTimedeltaMixin._get_delete_Freq + if is_integer(loc): + if loc in (0, -len(self)): + return self[1:] + if loc in (-1, len(self) - 1): + return self[:-1] + if len(self) == 3 and loc in (1, -2): + return self[::2] + + elif lib.is_list_like(loc): + slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self)) + + if isinstance(slc, slice): + # defer to RangeIndex._difference, which is optimized to return + # a RangeIndex whenever possible + other = self[slc] + return self.difference(other, sort=False) + + return super().delete(loc) + + def insert(self, loc: int, item) -> Index: + if len(self) and (is_integer(item) or is_float(item)): + # We can retain RangeIndex is inserting at the beginning or end, + # or right in the middle. + rng = self._range + if loc == 0 and item == self[0] - self.step: + new_rng = range(rng.start - rng.step, rng.stop, rng.step) + return type(self)._simple_new(new_rng, name=self._name) + + elif loc == len(self) and item == self[-1] + self.step: + new_rng = range(rng.start, rng.stop + rng.step, rng.step) + return type(self)._simple_new(new_rng, name=self._name) + + elif len(self) == 2 and item == self[0] + self.step / 2: + # e.g. inserting 1 into [0, 2] + step = int(self.step / 2) + new_rng = range(self.start, self.stop, step) + return type(self)._simple_new(new_rng, name=self._name) + + return super().insert(loc, item) + + def _concat(self, indexes: list[Index], name: Hashable) -> Index: + """ + Overriding parent method for the case of all RangeIndex instances. + + When all members of "indexes" are of type RangeIndex: result will be + RangeIndex if possible, Index with a int64 dtype otherwise. E.g.: + indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) + indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Index([0,1,2,4,5], dtype='int64') + """ + if not all(isinstance(x, RangeIndex) for x in indexes): + return super()._concat(indexes, name) + + elif len(indexes) == 1: + return indexes[0] + + rng_indexes = cast(list[RangeIndex], indexes) + + start = step = next_ = None + + # Filter the empty indexes + non_empty_indexes = [obj for obj in rng_indexes if len(obj)] + + for obj in non_empty_indexes: + rng = obj._range + + if start is None: + # This is set by the first non-empty index + start = rng.start + if step is None and len(rng) > 1: + step = rng.step + elif step is None: + # First non-empty index had only one element + if rng.start == start: + values = np.concatenate([x._values for x in rng_indexes]) + result = self._constructor(values) + return result.rename(name) + + step = rng.start - start + + non_consecutive = (step != rng.step and len(rng) > 1) or ( + next_ is not None and rng.start != next_ + ) + if non_consecutive: + result = self._constructor( + np.concatenate([x._values for x in rng_indexes]) + ) + return result.rename(name) + + if step is not None: + next_ = rng[-1] + step + + if non_empty_indexes: + # Get the stop value from "next" or alternatively + # from the last non-empty index + stop = non_empty_indexes[-1].stop if next_ is None else next_ + return RangeIndex(start, stop, step).rename(name) + + # Here all "indexes" had 0 length, i.e. were empty. + # In this case return an empty range index. + return RangeIndex(0, 0).rename(name) + + def __len__(self) -> int: + """ + return the length of the RangeIndex + """ + return len(self._range) + + @property + def size(self) -> int: + return len(self) + + def __getitem__(self, key): + """ + Conserve RangeIndex type for scalar and slice keys. + """ + if isinstance(key, slice): + return self._getitem_slice(key) + elif is_integer(key): + new_key = int(key) + try: + return self._range[new_key] + except IndexError as err: + raise IndexError( + f"index {key} is out of bounds for axis 0 with size {len(self)}" + ) from err + elif is_scalar(key): + raise IndexError( + "only integers, slices (`:`), " + "ellipsis (`...`), numpy.newaxis (`None`) " + "and integer or boolean " + "arrays are valid indices" + ) + return super().__getitem__(key) + + def _getitem_slice(self, slobj: slice) -> Self: + """ + Fastpath for __getitem__ when we know we have a slice. + """ + res = self._range[slobj] + return type(self)._simple_new(res, name=self._name) + + @unpack_zerodim_and_defer("__floordiv__") + def __floordiv__(self, other): + if is_integer(other) and other != 0: + if len(self) == 0 or self.start % other == 0 and self.step % other == 0: + start = self.start // other + step = self.step // other + stop = start + len(self) * step + new_range = range(start, stop, step or 1) + return self._simple_new(new_range, name=self._name) + if len(self) == 1: + start = self.start // other + new_range = range(start, start + 1, 1) + return self._simple_new(new_range, name=self._name) + + return super().__floordiv__(other) + + # -------------------------------------------------------------------- + # Reductions + + def all(self, *args, **kwargs) -> bool: + return 0 not in self._range + + def any(self, *args, **kwargs) -> bool: + return any(self._range) + + # -------------------------------------------------------------------- + + def _cmp_method(self, other, op): + if isinstance(other, RangeIndex) and self._range == other._range: + # Both are immutable so if ._range attr. are equal, shortcut is possible + return super()._cmp_method(self, op) + return super()._cmp_method(other, op) + + def _arith_method(self, other, op): + """ + Parameters + ---------- + other : Any + op : callable that accepts 2 params + perform the binary op + """ + + if isinstance(other, ABCTimedeltaIndex): + # Defer to TimedeltaIndex implementation + return NotImplemented + elif isinstance(other, (timedelta, np.timedelta64)): + # GH#19333 is_integer evaluated True on timedelta64, + # so we need to catch these explicitly + return super()._arith_method(other, op) + elif lib.is_np_dtype(getattr(other, "dtype", None), "m"): + # Must be an np.ndarray; GH#22390 + return super()._arith_method(other, op) + + if op in [ + operator.pow, + ops.rpow, + operator.mod, + ops.rmod, + operator.floordiv, + ops.rfloordiv, + divmod, + ops.rdivmod, + ]: + return super()._arith_method(other, op) + + step: Callable | None = None + if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: + step = op + + # TODO: if other is a RangeIndex we may have more efficient options + right = extract_array(other, extract_numpy=True, extract_range=True) + left = self + + try: + # apply if we have an override + if step: + with np.errstate(all="ignore"): + rstep = step(left.step, right) + + # we don't have a representable op + # so return a base index + if not is_integer(rstep) or not rstep: + raise ValueError + + # GH#53255 + else: + rstep = -left.step if op == ops.rsub else left.step + + with np.errstate(all="ignore"): + rstart = op(left.start, right) + rstop = op(left.stop, right) + + res_name = ops.get_op_result_name(self, other) + result = type(self)(rstart, rstop, rstep, name=res_name) + + # for compat with numpy / Index with int64 dtype + # even if we can represent as a RangeIndex, return + # as a float64 Index if we have float-like descriptors + if not all(is_integer(x) for x in [rstart, rstop, rstep]): + result = result.astype("float64") + + return result + + except (ValueError, TypeError, ZeroDivisionError): + # test_arithmetic_explicit_conversions + return super()._arith_method(other, op) + + def take( + self, + indices, + axis: Axis = 0, + allow_fill: bool = True, + fill_value=None, + **kwargs, + ): + if kwargs: + nv.validate_take((), kwargs) + if is_scalar(indices): + raise TypeError("Expected indices to be array-like") + indices = ensure_platform_int(indices) + + # raise an exception if allow_fill is True and fill_value is not None + self._maybe_disallow_fill(allow_fill, fill_value, indices) + + if len(indices) == 0: + taken = np.array([], dtype=self.dtype) + else: + ind_max = indices.max() + if ind_max >= len(self): + raise IndexError( + f"index {ind_max} is out of bounds for axis 0 with size {len(self)}" + ) + ind_min = indices.min() + if ind_min < -len(self): + raise IndexError( + f"index {ind_min} is out of bounds for axis 0 with size {len(self)}" + ) + taken = indices.astype(self.dtype, casting="safe") + if ind_min < 0: + taken %= len(self) + if self.step != 1: + taken *= self.step + if self.start != 0: + taken += self.start + + # _constructor so RangeIndex-> Index with an int64 dtype + return self._constructor._simple_new(taken, name=self.name) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/timedeltas.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/timedeltas.py new file mode 100644 index 00000000..cd6a4883 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexes/timedeltas.py @@ -0,0 +1,350 @@ +""" implement the TimedeltaIndex """ +from __future__ import annotations + +from typing import TYPE_CHECKING +import warnings + +from pandas._libs import ( + index as libindex, + lib, +) +from pandas._libs.tslibs import ( + Resolution, + Timedelta, + to_offset, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_scalar, + pandas_dtype, +) +from pandas.core.dtypes.generic import ABCSeries + +from pandas.core.arrays import datetimelike as dtl +from pandas.core.arrays.timedeltas import TimedeltaArray +import pandas.core.common as com +from pandas.core.indexes.base import ( + Index, + maybe_extract_name, +) +from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin +from pandas.core.indexes.extension import inherit_names + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +@inherit_names( + ["__neg__", "__pos__", "__abs__", "total_seconds", "round", "floor", "ceil"] + + TimedeltaArray._field_ops, + TimedeltaArray, + wrap=True, +) +@inherit_names( + [ + "components", + "to_pytimedelta", + "sum", + "std", + "median", + "_format_native_types", + ], + TimedeltaArray, +) +class TimedeltaIndex(DatetimeTimedeltaMixin): + """ + Immutable Index of timedelta64 data. + + Represented internally as int64, and scalars returned Timedelta objects. + + Parameters + ---------- + data : array-like (1-dimensional), optional + Optional timedelta-like data to construct index with. + unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional + The unit of ``data``. + freq : str or pandas offset object, optional + One of pandas date offset strings or corresponding objects. The string + ``'infer'`` can be passed in order to set the frequency of the index as + the inferred frequency upon creation. + dtype : numpy.dtype or str, default None + Valid ``numpy`` dtypes are ``timedelta64[ns]``, ``timedelta64[us]``, + ``timedelta64[ms]``, and ``timedelta64[s]``. + copy : bool + Make a copy of input array. + name : object + Name to be stored in the index. + + Attributes + ---------- + days + seconds + microseconds + nanoseconds + components + inferred_freq + + Methods + ------- + to_pytimedelta + to_series + round + floor + ceil + to_frame + mean + + See Also + -------- + Index : The base pandas Index type. + Timedelta : Represents a duration between two dates or times. + DatetimeIndex : Index of datetime64 data. + PeriodIndex : Index of Period data. + timedelta_range : Create a fixed-frequency TimedeltaIndex. + + Notes + ----- + To learn more about the frequency strings, please see `this link + `__. + + Examples + -------- + >>> pd.TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days']) + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq=None) + + >>> pd.TimedeltaIndex([1, 2, 4, 8], unit='D') + TimedeltaIndex(['1 days', '2 days', '4 days', '8 days'], + dtype='timedelta64[ns]', freq=None) + + We can also let pandas infer the frequency when possible. + + >>> pd.TimedeltaIndex(range(5), unit='D', freq='infer') + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq='D') + """ + + _typ = "timedeltaindex" + + _data_cls = TimedeltaArray + + @property + def _engine_type(self) -> type[libindex.TimedeltaEngine]: + return libindex.TimedeltaEngine + + _data: TimedeltaArray + + # Use base class method instead of DatetimeTimedeltaMixin._get_string_slice + _get_string_slice = Index._get_string_slice + + # error: Signature of "_resolution_obj" incompatible with supertype + # "DatetimeIndexOpsMixin" + @property + def _resolution_obj(self) -> Resolution | None: # type: ignore[override] + return self._data._resolution_obj + + # ------------------------------------------------------------------- + # Constructors + + def __new__( + cls, + data=None, + unit=None, + freq=lib.no_default, + closed=lib.no_default, + dtype=None, + copy: bool = False, + name=None, + ): + if closed is not lib.no_default: + # GH#52628 + warnings.warn( + f"The 'closed' keyword in {cls.__name__} construction is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + name = maybe_extract_name(name, data, cls) + + if is_scalar(data): + cls._raise_scalar_data_error(data) + + if unit in {"Y", "y", "M"}: + raise ValueError( + "Units 'M', 'Y', and 'y' are no longer supported, as they do not " + "represent unambiguous timedelta values durations." + ) + if dtype is not None: + dtype = pandas_dtype(dtype) + + if ( + isinstance(data, TimedeltaArray) + and freq is lib.no_default + and (dtype is None or dtype == data.dtype) + ): + if copy: + data = data.copy() + return cls._simple_new(data, name=name) + + if ( + isinstance(data, TimedeltaIndex) + and freq is lib.no_default + and name is None + and (dtype is None or dtype == data.dtype) + ): + if copy: + return data.copy() + else: + return data._view() + + # - Cases checked above all return/raise before reaching here - # + + tdarr = TimedeltaArray._from_sequence_not_strict( + data, freq=freq, unit=unit, dtype=dtype, copy=copy + ) + refs = None + if not copy and isinstance(data, (ABCSeries, Index)): + refs = data._references + + return cls._simple_new(tdarr, name=name, refs=refs) + + # ------------------------------------------------------------------- + + def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: + """ + Can we compare values of the given dtype to our own? + """ + return lib.is_np_dtype(dtype, "m") # aka self._data._is_recognized_dtype + + # ------------------------------------------------------------------- + # Indexing Methods + + def get_loc(self, key): + """ + Get integer location for requested label + + Returns + ------- + loc : int, slice, or ndarray[int] + """ + self._check_indexing_error(key) + + try: + key = self._data._validate_scalar(key, unbox=False) + except TypeError as err: + raise KeyError(key) from err + + return Index.get_loc(self, key) + + def _parse_with_reso(self, label: str): + # the "with_reso" is a no-op for TimedeltaIndex + parsed = Timedelta(label) + return parsed, None + + def _parsed_string_to_bounds(self, reso, parsed: Timedelta): + # reso is unused, included to match signature of DTI/PI + lbound = parsed.round(parsed.resolution_string) + rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, "ns") + return lbound, rbound + + # ------------------------------------------------------------------- + + @property + def inferred_type(self) -> str: + return "timedelta64" + + +def timedelta_range( + start=None, + end=None, + periods: int | None = None, + freq=None, + name=None, + closed=None, + *, + unit: str | None = None, +) -> TimedeltaIndex: + """ + Return a fixed frequency TimedeltaIndex with day as the default. + + Parameters + ---------- + start : str or timedelta-like, default None + Left bound for generating timedeltas. + end : str or timedelta-like, default None + Right bound for generating timedeltas. + periods : int, default None + Number of periods to generate. + freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'D' + Frequency strings can have multiples, e.g. '5H'. + name : str, default None + Name of the resulting TimedeltaIndex. + closed : str, default None + Make the interval closed with respect to the given frequency to + the 'left', 'right', or both sides (None). + unit : str, default None + Specify the desired resolution of the result. + + .. versionadded:: 2.0.0 + + Returns + ------- + TimedeltaIndex + + Notes + ----- + Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, + exactly three must be specified. If ``freq`` is omitted, the resulting + ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between + ``start`` and ``end`` (closed on both sides). + + To learn more about the frequency strings, please see `this link + `__. + + Examples + -------- + >>> pd.timedelta_range(start='1 day', periods=4) + TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq='D') + + The ``closed`` parameter specifies which endpoint is included. The default + behavior is to include both endpoints. + + >>> pd.timedelta_range(start='1 day', periods=4, closed='right') + TimedeltaIndex(['2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq='D') + + The ``freq`` parameter specifies the frequency of the TimedeltaIndex. + Only fixed frequencies can be passed, non-fixed frequencies such as + 'M' (month end) will raise. + + >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H') + TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', + '1 days 18:00:00', '2 days 00:00:00'], + dtype='timedelta64[ns]', freq='6H') + + Specify ``start``, ``end``, and ``periods``; the frequency is generated + automatically (linearly spaced). + + >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) + TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', + '5 days 00:00:00'], + dtype='timedelta64[ns]', freq=None) + + **Specify a unit** + + >>> pd.timedelta_range("1 Day", periods=3, freq="100000D", unit="s") + TimedeltaIndex(['1 days 00:00:00', '100001 days 00:00:00', + '200001 days 00:00:00'], + dtype='timedelta64[s]', freq='100000D') + """ + if freq is None and com.any_none(periods, start, end): + freq = "D" + + freq, _ = dtl.maybe_infer_freq(freq) + tdarr = TimedeltaArray._generate_range( + start, end, periods, freq, closed=closed, unit=unit + ) + return TimedeltaIndex._simple_new(tdarr, name=name) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexing.py new file mode 100644 index 00000000..a2871f36 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/indexing.py @@ -0,0 +1,2698 @@ +from __future__ import annotations + +from contextlib import suppress +import sys +from typing import ( + TYPE_CHECKING, + cast, + final, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs.indexing import NDFrameIndexerBase +from pandas._libs.lib import item_from_zerodim +from pandas.compat import PYPY +from pandas.errors import ( + AbstractMethodError, + ChainedAssignmentError, + IndexingError, + InvalidIndexError, + LossySetitemError, + _chained_assignment_msg, +) +from pandas.util._decorators import doc + +from pandas.core.dtypes.cast import ( + can_hold_element, + maybe_promote, +) +from pandas.core.dtypes.common import ( + is_array_like, + is_bool_dtype, + is_hashable, + is_integer, + is_iterator, + is_list_like, + is_numeric_dtype, + is_object_dtype, + is_scalar, + is_sequence, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + infer_fill_value, + is_valid_na_for_dtype, + isna, + na_value_for_dtype, +) + +from pandas.core import algorithms as algos +import pandas.core.common as com +from pandas.core.construction import ( + array as pd_array, + extract_array, +) +from pandas.core.indexers import ( + check_array_indexer, + is_list_like_indexer, + is_scalar_indexer, + length_of_indexer, +) +from pandas.core.indexes.api import ( + Index, + MultiIndex, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas._typing import ( + Axis, + AxisInt, + Self, + ) + + from pandas import ( + DataFrame, + Series, + ) + +# "null slice" +_NS = slice(None, None) +_one_ellipsis_message = "indexer may only contain one '...' entry" + + +# the public IndexSlicerMaker +class _IndexSlice: + """ + Create an object to more easily perform multi-index slicing. + + See Also + -------- + MultiIndex.remove_unused_levels : New MultiIndex with no unused levels. + + Notes + ----- + See :ref:`Defined Levels ` + for further info on slicing a MultiIndex. + + Examples + -------- + >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']]) + >>> columns = ['foo', 'bar'] + >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))), + ... index=midx, columns=columns) + + Using the default slice command: + + >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :] + foo bar + A0 B0 0 1 + B1 2 3 + A1 B0 8 9 + B1 10 11 + + Using the IndexSlice class for a more intuitive command: + + >>> idx = pd.IndexSlice + >>> dfmi.loc[idx[:, 'B0':'B1'], :] + foo bar + A0 B0 0 1 + B1 2 3 + A1 B0 8 9 + B1 10 11 + """ + + def __getitem__(self, arg): + return arg + + +IndexSlice = _IndexSlice() + + +class IndexingMixin: + """ + Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series. + """ + + @property + def iloc(self) -> _iLocIndexer: + """ + Purely integer-location based indexing for selection by position. + + ``.iloc[]`` is primarily integer position based (from ``0`` to + ``length-1`` of the axis), but may also be used with a boolean + array. + + Allowed inputs are: + + - An integer, e.g. ``5``. + - A list or array of integers, e.g. ``[4, 3, 0]``. + - A slice object with ints, e.g. ``1:7``. + - A boolean array. + - A ``callable`` function with one argument (the calling Series or + DataFrame) and that returns valid output for indexing (one of the above). + This is useful in method chains, when you don't have a reference to the + calling object, but would like to base your selection on some value. + - A tuple of row and column indexes. The tuple elements consist of one of the + above inputs, e.g. ``(0, 1)``. + + ``.iloc`` will raise ``IndexError`` if a requested indexer is + out-of-bounds, except *slice* indexers which allow out-of-bounds + indexing (this conforms with python/numpy *slice* semantics). + + See more at :ref:`Selection by Position `. + + See Also + -------- + DataFrame.iat : Fast integer location scalar accessor. + DataFrame.loc : Purely label-location based indexer for selection by label. + Series.iloc : Purely integer-location based indexing for + selection by position. + + Examples + -------- + >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, + ... {'a': 100, 'b': 200, 'c': 300, 'd': 400}, + ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] + >>> df = pd.DataFrame(mydict) + >>> df + a b c d + 0 1 2 3 4 + 1 100 200 300 400 + 2 1000 2000 3000 4000 + + **Indexing just the rows** + + With a scalar integer. + + >>> type(df.iloc[0]) + + >>> df.iloc[0] + a 1 + b 2 + c 3 + d 4 + Name: 0, dtype: int64 + + With a list of integers. + + >>> df.iloc[[0]] + a b c d + 0 1 2 3 4 + >>> type(df.iloc[[0]]) + + + >>> df.iloc[[0, 1]] + a b c d + 0 1 2 3 4 + 1 100 200 300 400 + + With a `slice` object. + + >>> df.iloc[:3] + a b c d + 0 1 2 3 4 + 1 100 200 300 400 + 2 1000 2000 3000 4000 + + With a boolean mask the same length as the index. + + >>> df.iloc[[True, False, True]] + a b c d + 0 1 2 3 4 + 2 1000 2000 3000 4000 + + With a callable, useful in method chains. The `x` passed + to the ``lambda`` is the DataFrame being sliced. This selects + the rows whose index label even. + + >>> df.iloc[lambda x: x.index % 2 == 0] + a b c d + 0 1 2 3 4 + 2 1000 2000 3000 4000 + + **Indexing both axes** + + You can mix the indexer types for the index and columns. Use ``:`` to + select the entire axis. + + With scalar integers. + + >>> df.iloc[0, 1] + 2 + + With lists of integers. + + >>> df.iloc[[0, 2], [1, 3]] + b d + 0 2 4 + 2 2000 4000 + + With `slice` objects. + + >>> df.iloc[1:3, 0:3] + a b c + 1 100 200 300 + 2 1000 2000 3000 + + With a boolean array whose length matches the columns. + + >>> df.iloc[:, [True, False, True, False]] + a c + 0 1 3 + 1 100 300 + 2 1000 3000 + + With a callable function that expects the Series or DataFrame. + + >>> df.iloc[:, lambda df: [0, 2]] + a c + 0 1 3 + 1 100 300 + 2 1000 3000 + """ + return _iLocIndexer("iloc", self) + + @property + def loc(self) -> _LocIndexer: + """ + Access a group of rows and columns by label(s) or a boolean array. + + ``.loc[]`` is primarily label based, but may also be used with a + boolean array. + + Allowed inputs are: + + - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is + interpreted as a *label* of the index, and **never** as an + integer position along the index). + - A list or array of labels, e.g. ``['a', 'b', 'c']``. + - A slice object with labels, e.g. ``'a':'f'``. + + .. warning:: Note that contrary to usual python slices, **both** the + start and the stop are included + + - A boolean array of the same length as the axis being sliced, + e.g. ``[True, False, True]``. + - An alignable boolean Series. The index of the key will be aligned before + masking. + - An alignable Index. The Index of the returned selection will be the input. + - A ``callable`` function with one argument (the calling Series or + DataFrame) and that returns valid output for indexing (one of the above) + + See more at :ref:`Selection by Label `. + + Raises + ------ + KeyError + If any items are not found. + IndexingError + If an indexed key is passed and its index is unalignable to the frame index. + + See Also + -------- + DataFrame.at : Access a single value for a row/column label pair. + DataFrame.iloc : Access group of rows and columns by integer position(s). + DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the + Series/DataFrame. + Series.loc : Access group of values using labels. + + Examples + -------- + **Getting values** + + >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], + ... index=['cobra', 'viper', 'sidewinder'], + ... columns=['max_speed', 'shield']) + >>> df + max_speed shield + cobra 1 2 + viper 4 5 + sidewinder 7 8 + + Single label. Note this returns the row as a Series. + + >>> df.loc['viper'] + max_speed 4 + shield 5 + Name: viper, dtype: int64 + + List of labels. Note using ``[[]]`` returns a DataFrame. + + >>> df.loc[['viper', 'sidewinder']] + max_speed shield + viper 4 5 + sidewinder 7 8 + + Single label for row and column + + >>> df.loc['cobra', 'shield'] + 2 + + Slice with labels for row and single label for column. As mentioned + above, note that both the start and stop of the slice are included. + + >>> df.loc['cobra':'viper', 'max_speed'] + cobra 1 + viper 4 + Name: max_speed, dtype: int64 + + Boolean list with the same length as the row axis + + >>> df.loc[[False, False, True]] + max_speed shield + sidewinder 7 8 + + Alignable boolean Series: + + >>> df.loc[pd.Series([False, True, False], + ... index=['viper', 'sidewinder', 'cobra'])] + max_speed shield + sidewinder 7 8 + + Index (same behavior as ``df.reindex``) + + >>> df.loc[pd.Index(["cobra", "viper"], name="foo")] + max_speed shield + foo + cobra 1 2 + viper 4 5 + + Conditional that returns a boolean Series + + >>> df.loc[df['shield'] > 6] + max_speed shield + sidewinder 7 8 + + Conditional that returns a boolean Series with column labels specified + + >>> df.loc[df['shield'] > 6, ['max_speed']] + max_speed + sidewinder 7 + + Multiple conditional using ``&`` that returns a boolean Series + + >>> df.loc[(df['max_speed'] > 1) & (df['shield'] < 8)] + max_speed shield + viper 4 5 + + Multiple conditional using ``|`` that returns a boolean Series + + >>> df.loc[(df['max_speed'] > 4) | (df['shield'] < 5)] + max_speed shield + cobra 1 2 + sidewinder 7 8 + + Please ensure that each condition is wrapped in parentheses ``()``. + See the :ref:`user guide` + for more details and explanations of Boolean indexing. + + .. note:: + If you find yourself using 3 or more conditionals in ``.loc[]``, + consider using :ref:`advanced indexing`. + + See below for using ``.loc[]`` on MultiIndex DataFrames. + + Callable that returns a boolean Series + + >>> df.loc[lambda df: df['shield'] == 8] + max_speed shield + sidewinder 7 8 + + **Setting values** + + Set value for all items matching the list of labels + + >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50 + >>> df + max_speed shield + cobra 1 2 + viper 4 50 + sidewinder 7 50 + + Set value for an entire row + + >>> df.loc['cobra'] = 10 + >>> df + max_speed shield + cobra 10 10 + viper 4 50 + sidewinder 7 50 + + Set value for an entire column + + >>> df.loc[:, 'max_speed'] = 30 + >>> df + max_speed shield + cobra 30 10 + viper 30 50 + sidewinder 30 50 + + Set value for rows matching callable condition + + >>> df.loc[df['shield'] > 35] = 0 + >>> df + max_speed shield + cobra 30 10 + viper 0 0 + sidewinder 0 0 + + Add value matching location + + >>> df.loc["viper", "shield"] += 5 + >>> df + max_speed shield + cobra 30 10 + viper 0 5 + sidewinder 0 0 + + Setting using a ``Series`` or a ``DataFrame`` sets the values matching the + index labels, not the index positions. + + >>> shuffled_df = df.loc[["viper", "cobra", "sidewinder"]] + >>> df.loc[:] += shuffled_df + >>> df + max_speed shield + cobra 60 20 + viper 0 10 + sidewinder 0 0 + + **Getting values on a DataFrame with an index that has integer labels** + + Another example using integers for the index + + >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]], + ... index=[7, 8, 9], columns=['max_speed', 'shield']) + >>> df + max_speed shield + 7 1 2 + 8 4 5 + 9 7 8 + + Slice with integer labels for rows. As mentioned above, note that both + the start and stop of the slice are included. + + >>> df.loc[7:9] + max_speed shield + 7 1 2 + 8 4 5 + 9 7 8 + + **Getting values with a MultiIndex** + + A number of examples using a DataFrame with a MultiIndex + + >>> tuples = [ + ... ('cobra', 'mark i'), ('cobra', 'mark ii'), + ... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'), + ... ('viper', 'mark ii'), ('viper', 'mark iii') + ... ] + >>> index = pd.MultiIndex.from_tuples(tuples) + >>> values = [[12, 2], [0, 4], [10, 20], + ... [1, 4], [7, 1], [16, 36]] + >>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index) + >>> df + max_speed shield + cobra mark i 12 2 + mark ii 0 4 + sidewinder mark i 10 20 + mark ii 1 4 + viper mark ii 7 1 + mark iii 16 36 + + Single label. Note this returns a DataFrame with a single index. + + >>> df.loc['cobra'] + max_speed shield + mark i 12 2 + mark ii 0 4 + + Single index tuple. Note this returns a Series. + + >>> df.loc[('cobra', 'mark ii')] + max_speed 0 + shield 4 + Name: (cobra, mark ii), dtype: int64 + + Single label for row and column. Similar to passing in a tuple, this + returns a Series. + + >>> df.loc['cobra', 'mark i'] + max_speed 12 + shield 2 + Name: (cobra, mark i), dtype: int64 + + Single tuple. Note using ``[[]]`` returns a DataFrame. + + >>> df.loc[[('cobra', 'mark ii')]] + max_speed shield + cobra mark ii 0 4 + + Single tuple for the index with a single label for the column + + >>> df.loc[('cobra', 'mark i'), 'shield'] + 2 + + Slice from index tuple to single label + + >>> df.loc[('cobra', 'mark i'):'viper'] + max_speed shield + cobra mark i 12 2 + mark ii 0 4 + sidewinder mark i 10 20 + mark ii 1 4 + viper mark ii 7 1 + mark iii 16 36 + + Slice from index tuple to index tuple + + >>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')] + max_speed shield + cobra mark i 12 2 + mark ii 0 4 + sidewinder mark i 10 20 + mark ii 1 4 + viper mark ii 7 1 + + Please see the :ref:`user guide` + for more details and explanations of advanced indexing. + """ + return _LocIndexer("loc", self) + + @property + def at(self) -> _AtIndexer: + """ + Access a single value for a row/column label pair. + + Similar to ``loc``, in that both provide label-based lookups. Use + ``at`` if you only need to get or set a single value in a DataFrame + or Series. + + Raises + ------ + KeyError + * If getting a value and 'label' does not exist in a DataFrame or + Series. + ValueError + * If row/column label pair is not a tuple or if any label from + the pair is not a scalar for DataFrame. + * If label is list-like (*excluding* NamedTuple) for Series. + + See Also + -------- + DataFrame.at : Access a single value for a row/column pair by label. + DataFrame.iat : Access a single value for a row/column pair by integer + position. + DataFrame.loc : Access a group of rows and columns by label(s). + DataFrame.iloc : Access a group of rows and columns by integer + position(s). + Series.at : Access a single value by label. + Series.iat : Access a single value by integer position. + Series.loc : Access a group of rows by label(s). + Series.iloc : Access a group of rows by integer position(s). + + Notes + ----- + See :ref:`Fast scalar value getting and setting ` + for more details. + + Examples + -------- + >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], + ... index=[4, 5, 6], columns=['A', 'B', 'C']) + >>> df + A B C + 4 0 2 3 + 5 0 4 1 + 6 10 20 30 + + Get value at specified row/column pair + + >>> df.at[4, 'B'] + 2 + + Set value at specified row/column pair + + >>> df.at[4, 'B'] = 10 + >>> df.at[4, 'B'] + 10 + + Get value within a Series + + >>> df.loc[5].at['B'] + 4 + """ + return _AtIndexer("at", self) + + @property + def iat(self) -> _iAtIndexer: + """ + Access a single value for a row/column pair by integer position. + + Similar to ``iloc``, in that both provide integer-based lookups. Use + ``iat`` if you only need to get or set a single value in a DataFrame + or Series. + + Raises + ------ + IndexError + When integer position is out of bounds. + + See Also + -------- + DataFrame.at : Access a single value for a row/column label pair. + DataFrame.loc : Access a group of rows and columns by label(s). + DataFrame.iloc : Access a group of rows and columns by integer position(s). + + Examples + -------- + >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]], + ... columns=['A', 'B', 'C']) + >>> df + A B C + 0 0 2 3 + 1 0 4 1 + 2 10 20 30 + + Get value at specified row/column pair + + >>> df.iat[1, 2] + 1 + + Set value at specified row/column pair + + >>> df.iat[1, 2] = 10 + >>> df.iat[1, 2] + 10 + + Get value within a series + + >>> df.loc[0].iat[1] + 2 + """ + return _iAtIndexer("iat", self) + + +class _LocationIndexer(NDFrameIndexerBase): + _valid_types: str + axis: AxisInt | None = None + + # sub-classes need to set _takeable + _takeable: bool + + @final + def __call__(self, axis: Axis | None = None) -> Self: + # we need to return a copy of ourselves + new_self = type(self)(self.name, self.obj) + + if axis is not None: + axis_int_none = self.obj._get_axis_number(axis) + else: + axis_int_none = axis + new_self.axis = axis_int_none + return new_self + + def _get_setitem_indexer(self, key): + """ + Convert a potentially-label-based key into a positional indexer. + """ + if self.name == "loc": + # always holds here bc iloc overrides _get_setitem_indexer + self._ensure_listlike_indexer(key) + + if isinstance(key, tuple): + for x in key: + check_dict_or_set_indexers(x) + + if self.axis is not None: + key = _tupleize_axis_indexer(self.ndim, self.axis, key) + + ax = self.obj._get_axis(0) + + if ( + isinstance(ax, MultiIndex) + and self.name != "iloc" + and is_hashable(key) + and not isinstance(key, slice) + ): + with suppress(KeyError, InvalidIndexError): + # TypeError e.g. passed a bool + return ax.get_loc(key) + + if isinstance(key, tuple): + with suppress(IndexingError): + # suppress "Too many indexers" + return self._convert_tuple(key) + + if isinstance(key, range): + # GH#45479 test_loc_setitem_range_key + key = list(key) + + return self._convert_to_indexer(key, axis=0) + + @final + def _maybe_mask_setitem_value(self, indexer, value): + """ + If we have obj.iloc[mask] = series_or_frame and series_or_frame has the + same length as obj, we treat this as obj.iloc[mask] = series_or_frame[mask], + similar to Series.__setitem__. + + Note this is only for loc, not iloc. + """ + + if ( + isinstance(indexer, tuple) + and len(indexer) == 2 + and isinstance(value, (ABCSeries, ABCDataFrame)) + ): + pi, icols = indexer + ndim = value.ndim + if com.is_bool_indexer(pi) and len(value) == len(pi): + newkey = pi.nonzero()[0] + + if is_scalar_indexer(icols, self.ndim - 1) and ndim == 1: + # e.g. test_loc_setitem_boolean_mask_allfalse + # test_loc_setitem_ndframe_values_alignment + value = self.obj.iloc._align_series(indexer, value) + indexer = (newkey, icols) + + elif ( + isinstance(icols, np.ndarray) + and icols.dtype.kind == "i" + and len(icols) == 1 + ): + if ndim == 1: + # We implicitly broadcast, though numpy does not, see + # github.com/pandas-dev/pandas/pull/45501#discussion_r789071825 + # test_loc_setitem_ndframe_values_alignment + value = self.obj.iloc._align_series(indexer, value) + indexer = (newkey, icols) + + elif ndim == 2 and value.shape[1] == 1: + # test_loc_setitem_ndframe_values_alignment + value = self.obj.iloc._align_frame(indexer, value) + indexer = (newkey, icols) + elif com.is_bool_indexer(indexer): + indexer = indexer.nonzero()[0] + + return indexer, value + + @final + def _ensure_listlike_indexer(self, key, axis=None, value=None) -> None: + """ + Ensure that a list-like of column labels are all present by adding them if + they do not already exist. + + Parameters + ---------- + key : list-like of column labels + Target labels. + axis : key axis if known + """ + column_axis = 1 + + # column only exists in 2-dimensional DataFrame + if self.ndim != 2: + return + + orig_key = key + if isinstance(key, tuple) and len(key) > 1: + # key may be a tuple if we are .loc + # if length of key is > 1 set key to column part + key = key[column_axis] + axis = column_axis + + if ( + axis == column_axis + and not isinstance(self.obj.columns, MultiIndex) + and is_list_like_indexer(key) + and not com.is_bool_indexer(key) + and all(is_hashable(k) for k in key) + ): + # GH#38148 + keys = self.obj.columns.union(key, sort=False) + diff = Index(key).difference(self.obj.columns, sort=False) + + if len(diff) and com.is_null_slice(orig_key[0]): + # e.g. if we are doing df.loc[:, ["A", "B"]] = 7 and "B" + # is a new column, add the new columns with dtype=np.void + # so that later when we go through setitem_single_column + # we will use isetitem. Without this, the reindex_axis + # below would create float64 columns in this example, which + # would successfully hold 7, so we would end up with the wrong + # dtype. + indexer = np.arange(len(keys), dtype=np.intp) + indexer[len(self.obj.columns) :] = -1 + new_mgr = self.obj._mgr.reindex_indexer( + keys, indexer=indexer, axis=0, only_slice=True, use_na_proxy=True + ) + self.obj._mgr = new_mgr + return + + self.obj._mgr = self.obj._mgr.reindex_axis(keys, axis=0, only_slice=True) + + @final + def __setitem__(self, key, value) -> None: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self.obj) <= 2: + warnings.warn( + _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 + ) + + check_dict_or_set_indexers(key) + if isinstance(key, tuple): + key = tuple(list(x) if is_iterator(x) else x for x in key) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) + else: + key = com.apply_if_callable(key, self.obj) + indexer = self._get_setitem_indexer(key) + self._has_valid_setitem_indexer(key) + + iloc = self if self.name == "iloc" else self.obj.iloc + iloc._setitem_with_indexer(indexer, value, self.name) + + def _validate_key(self, key, axis: AxisInt): + """ + Ensure that key is valid for current indexer. + + Parameters + ---------- + key : scalar, slice or list-like + Key requested. + axis : int + Dimension on which the indexing is being made. + + Raises + ------ + TypeError + If the key (or some element of it) has wrong type. + IndexError + If the key (or some element of it) is out of bounds. + KeyError + If the key was not found. + """ + raise AbstractMethodError(self) + + @final + def _expand_ellipsis(self, tup: tuple) -> tuple: + """ + If a tuple key includes an Ellipsis, replace it with an appropriate + number of null slices. + """ + if any(x is Ellipsis for x in tup): + if tup.count(Ellipsis) > 1: + raise IndexingError(_one_ellipsis_message) + + if len(tup) == self.ndim: + # It is unambiguous what axis this Ellipsis is indexing, + # treat as a single null slice. + i = tup.index(Ellipsis) + # FIXME: this assumes only one Ellipsis + new_key = tup[:i] + (_NS,) + tup[i + 1 :] + return new_key + + # TODO: other cases? only one test gets here, and that is covered + # by _validate_key_length + return tup + + @final + def _validate_tuple_indexer(self, key: tuple) -> tuple: + """ + Check the key for valid keys across my indexer. + """ + key = self._validate_key_length(key) + key = self._expand_ellipsis(key) + for i, k in enumerate(key): + try: + self._validate_key(k, i) + except ValueError as err: + raise ValueError( + "Location based indexing can only have " + f"[{self._valid_types}] types" + ) from err + return key + + @final + def _is_nested_tuple_indexer(self, tup: tuple) -> bool: + """ + Returns + ------- + bool + """ + if any(isinstance(ax, MultiIndex) for ax in self.obj.axes): + return any(is_nested_tuple(tup, ax) for ax in self.obj.axes) + return False + + @final + def _convert_tuple(self, key: tuple) -> tuple: + # Note: we assume _tupleize_axis_indexer has been called, if necessary. + self._validate_key_length(key) + keyidx = [self._convert_to_indexer(k, axis=i) for i, k in enumerate(key)] + return tuple(keyidx) + + @final + def _validate_key_length(self, key: tuple) -> tuple: + if len(key) > self.ndim: + if key[0] is Ellipsis: + # e.g. Series.iloc[..., 3] reduces to just Series.iloc[3] + key = key[1:] + if Ellipsis in key: + raise IndexingError(_one_ellipsis_message) + return self._validate_key_length(key) + raise IndexingError("Too many indexers") + return key + + @final + def _getitem_tuple_same_dim(self, tup: tuple): + """ + Index with indexers that should return an object of the same dimension + as self.obj. + + This is only called after a failed call to _getitem_lowerdim. + """ + retval = self.obj + # Selecting columns before rows is signficiantly faster + start_val = (self.ndim - len(tup)) + 1 + for i, key in enumerate(reversed(tup)): + i = self.ndim - i - start_val + if com.is_null_slice(key): + continue + + retval = getattr(retval, self.name)._getitem_axis(key, axis=i) + # We should never have retval.ndim < self.ndim, as that should + # be handled by the _getitem_lowerdim call above. + assert retval.ndim == self.ndim + + if retval is self.obj: + # if all axes were a null slice (`df.loc[:, :]`), ensure we still + # return a new object (https://github.com/pandas-dev/pandas/pull/49469) + retval = retval.copy(deep=False) + + return retval + + @final + def _getitem_lowerdim(self, tup: tuple): + # we can directly get the axis result since the axis is specified + if self.axis is not None: + axis = self.obj._get_axis_number(self.axis) + return self._getitem_axis(tup, axis=axis) + + # we may have a nested tuples indexer here + if self._is_nested_tuple_indexer(tup): + return self._getitem_nested_tuple(tup) + + # we maybe be using a tuple to represent multiple dimensions here + ax0 = self.obj._get_axis(0) + # ...but iloc should handle the tuple as simple integer-location + # instead of checking it as multiindex representation (GH 13797) + if ( + isinstance(ax0, MultiIndex) + and self.name != "iloc" + and not any(isinstance(x, slice) for x in tup) + ): + # Note: in all extant test cases, replacing the slice condition with + # `all(is_hashable(x) or com.is_null_slice(x) for x in tup)` + # is equivalent. + # (see the other place where we call _handle_lowerdim_multi_index_axis0) + with suppress(IndexingError): + return cast(_LocIndexer, self)._handle_lowerdim_multi_index_axis0(tup) + + tup = self._validate_key_length(tup) + + for i, key in enumerate(tup): + if is_label_like(key): + # We don't need to check for tuples here because those are + # caught by the _is_nested_tuple_indexer check above. + section = self._getitem_axis(key, axis=i) + + # We should never have a scalar section here, because + # _getitem_lowerdim is only called after a check for + # is_scalar_access, which that would be. + if section.ndim == self.ndim: + # we're in the middle of slicing through a MultiIndex + # revise the key wrt to `section` by inserting an _NS + new_key = tup[:i] + (_NS,) + tup[i + 1 :] + + else: + # Note: the section.ndim == self.ndim check above + # rules out having DataFrame here, so we dont need to worry + # about transposing. + new_key = tup[:i] + tup[i + 1 :] + + if len(new_key) == 1: + new_key = new_key[0] + + # Slices should return views, but calling iloc/loc with a null + # slice returns a new object. + if com.is_null_slice(new_key): + return section + # This is an elided recursive call to iloc/loc + return getattr(section, self.name)[new_key] + + raise IndexingError("not applicable") + + @final + def _getitem_nested_tuple(self, tup: tuple): + # we have a nested tuple so have at least 1 multi-index level + # we should be able to match up the dimensionality here + + def _contains_slice(x: object) -> bool: + # Check if object is a slice or a tuple containing a slice + if isinstance(x, tuple): + return any(isinstance(v, slice) for v in x) + elif isinstance(x, slice): + return True + return False + + for key in tup: + check_dict_or_set_indexers(key) + + # we have too many indexers for our dim, but have at least 1 + # multi-index dimension, try to see if we have something like + # a tuple passed to a series with a multi-index + if len(tup) > self.ndim: + if self.name != "loc": + # This should never be reached, but let's be explicit about it + raise ValueError("Too many indices") # pragma: no cover + if all( + (is_hashable(x) and not _contains_slice(x)) or com.is_null_slice(x) + for x in tup + ): + # GH#10521 Series should reduce MultiIndex dimensions instead of + # DataFrame, IndexingError is not raised when slice(None,None,None) + # with one row. + with suppress(IndexingError): + return cast(_LocIndexer, self)._handle_lowerdim_multi_index_axis0( + tup + ) + elif isinstance(self.obj, ABCSeries) and any( + isinstance(k, tuple) for k in tup + ): + # GH#35349 Raise if tuple in tuple for series + # Do this after the all-hashable-or-null-slice check so that + # we are only getting non-hashable tuples, in particular ones + # that themselves contain a slice entry + # See test_loc_series_getitem_too_many_dimensions + raise IndexingError("Too many indexers") + + # this is a series with a multi-index specified a tuple of + # selectors + axis = self.axis or 0 + return self._getitem_axis(tup, axis=axis) + + # handle the multi-axis by taking sections and reducing + # this is iterative + obj = self.obj + # GH#41369 Loop in reverse order ensures indexing along columns before rows + # which selects only necessary blocks which avoids dtype conversion if possible + axis = len(tup) - 1 + for key in tup[::-1]: + if com.is_null_slice(key): + axis -= 1 + continue + + obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) + axis -= 1 + + # if we have a scalar, we are done + if is_scalar(obj) or not hasattr(obj, "ndim"): + break + + return obj + + def _convert_to_indexer(self, key, axis: AxisInt): + raise AbstractMethodError(self) + + @final + def __getitem__(self, key): + check_dict_or_set_indexers(key) + if type(key) is tuple: + key = tuple(list(x) if is_iterator(x) else x for x in key) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) + if self._is_scalar_access(key): + return self.obj._get_value(*key, takeable=self._takeable) + return self._getitem_tuple(key) + else: + # we by definition only have the 0th axis + axis = self.axis or 0 + + maybe_callable = com.apply_if_callable(key, self.obj) + return self._getitem_axis(maybe_callable, axis=axis) + + def _is_scalar_access(self, key: tuple): + raise NotImplementedError() + + def _getitem_tuple(self, tup: tuple): + raise AbstractMethodError(self) + + def _getitem_axis(self, key, axis: AxisInt): + raise NotImplementedError() + + def _has_valid_setitem_indexer(self, indexer) -> bool: + raise AbstractMethodError(self) + + @final + def _getbool_axis(self, key, axis: AxisInt): + # caller is responsible for ensuring non-None axis + labels = self.obj._get_axis(axis) + key = check_bool_indexer(labels, key) + inds = key.nonzero()[0] + return self.obj._take_with_is_copy(inds, axis=axis) + + +@doc(IndexingMixin.loc) +class _LocIndexer(_LocationIndexer): + _takeable: bool = False + _valid_types = ( + "labels (MUST BE IN THE INDEX), slices of labels (BOTH " + "endpoints included! Can be slices of integers if the " + "index is integers), listlike of labels, boolean" + ) + + # ------------------------------------------------------------------- + # Key Checks + + @doc(_LocationIndexer._validate_key) + def _validate_key(self, key, axis: Axis): + # valid for a collection of labels (we check their presence later) + # slice of labels (where start-end in labels) + # slice of integers (only if in the labels) + # boolean not in slice and with boolean index + ax = self.obj._get_axis(axis) + if isinstance(key, bool) and not ( + is_bool_dtype(ax.dtype) + or ax.dtype.name == "boolean" + or isinstance(ax, MultiIndex) + and is_bool_dtype(ax.get_level_values(0).dtype) + ): + raise KeyError( + f"{key}: boolean label can not be used without a boolean index" + ) + + if isinstance(key, slice) and ( + isinstance(key.start, bool) or isinstance(key.stop, bool) + ): + raise TypeError(f"{key}: boolean values can not be used in a slice") + + def _has_valid_setitem_indexer(self, indexer) -> bool: + return True + + def _is_scalar_access(self, key: tuple) -> bool: + """ + Returns + ------- + bool + """ + # this is a shortcut accessor to both .loc and .iloc + # that provide the equivalent access of .at and .iat + # a) avoid getting things via sections and (to minimize dtype changes) + # b) provide a performant path + if len(key) != self.ndim: + return False + + for i, k in enumerate(key): + if not is_scalar(k): + return False + + ax = self.obj.axes[i] + if isinstance(ax, MultiIndex): + return False + + if isinstance(k, str) and ax._supports_partial_string_indexing: + # partial string indexing, df.loc['2000', 'A'] + # should not be considered scalar + return False + + if not ax._index_as_unique: + return False + + return True + + # ------------------------------------------------------------------- + # MultiIndex Handling + + def _multi_take_opportunity(self, tup: tuple) -> bool: + """ + Check whether there is the possibility to use ``_multi_take``. + + Currently the limit is that all axes being indexed, must be indexed with + list-likes. + + Parameters + ---------- + tup : tuple + Tuple of indexers, one per axis. + + Returns + ------- + bool + Whether the current indexing, + can be passed through `_multi_take`. + """ + if not all(is_list_like_indexer(x) for x in tup): + return False + + # just too complicated + return not any(com.is_bool_indexer(x) for x in tup) + + def _multi_take(self, tup: tuple): + """ + Create the indexers for the passed tuple of keys, and + executes the take operation. This allows the take operation to be + executed all at once, rather than once for each dimension. + Improving efficiency. + + Parameters + ---------- + tup : tuple + Tuple of indexers, one per axis. + + Returns + ------- + values: same type as the object being indexed + """ + # GH 836 + d = { + axis: self._get_listlike_indexer(key, axis) + for (key, axis) in zip(tup, self.obj._AXIS_ORDERS) + } + return self.obj._reindex_with_indexers(d, copy=True, allow_dups=True) + + # ------------------------------------------------------------------- + + def _getitem_iterable(self, key, axis: AxisInt): + """ + Index current object with an iterable collection of keys. + + Parameters + ---------- + key : iterable + Targeted labels. + axis : int + Dimension on which the indexing is being made. + + Raises + ------ + KeyError + If no key was found. Will change in the future to raise if not all + keys were found. + + Returns + ------- + scalar, DataFrame, or Series: indexed value(s). + """ + # we assume that not com.is_bool_indexer(key), as that is + # handled before we get here. + self._validate_key(key, axis) + + # A collection of keys + keyarr, indexer = self._get_listlike_indexer(key, axis) + return self.obj._reindex_with_indexers( + {axis: [keyarr, indexer]}, copy=True, allow_dups=True + ) + + def _getitem_tuple(self, tup: tuple): + with suppress(IndexingError): + tup = self._expand_ellipsis(tup) + return self._getitem_lowerdim(tup) + + # no multi-index, so validate all of the indexers + tup = self._validate_tuple_indexer(tup) + + # ugly hack for GH #836 + if self._multi_take_opportunity(tup): + return self._multi_take(tup) + + return self._getitem_tuple_same_dim(tup) + + def _get_label(self, label, axis: AxisInt): + # GH#5567 this will fail if the label is not present in the axis. + return self.obj.xs(label, axis=axis) + + def _handle_lowerdim_multi_index_axis0(self, tup: tuple): + # we have an axis0 multi-index, handle or raise + axis = self.axis or 0 + try: + # fast path for series or for tup devoid of slices + return self._get_label(tup, axis=axis) + + except KeyError as ek: + # raise KeyError if number of indexers match + # else IndexingError will be raised + if self.ndim < len(tup) <= self.obj.index.nlevels: + raise ek + raise IndexingError("No label returned") from ek + + def _getitem_axis(self, key, axis: AxisInt): + key = item_from_zerodim(key) + if is_iterator(key): + key = list(key) + if key is Ellipsis: + key = slice(None) + + labels = self.obj._get_axis(axis) + + if isinstance(key, tuple) and isinstance(labels, MultiIndex): + key = tuple(key) + + if isinstance(key, slice): + self._validate_key(key, axis) + return self._get_slice_axis(key, axis=axis) + elif com.is_bool_indexer(key): + return self._getbool_axis(key, axis=axis) + elif is_list_like_indexer(key): + # an iterable multi-selection + if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): + if hasattr(key, "ndim") and key.ndim > 1: + raise ValueError("Cannot index with multidimensional key") + + return self._getitem_iterable(key, axis=axis) + + # nested tuple slicing + if is_nested_tuple(key, labels): + locs = labels.get_locs(key) + indexer = [slice(None)] * self.ndim + indexer[axis] = locs + return self.obj.iloc[tuple(indexer)] + + # fall thru to straight lookup + self._validate_key(key, axis) + return self._get_label(key, axis=axis) + + def _get_slice_axis(self, slice_obj: slice, axis: AxisInt): + """ + This is pretty simple as we just have to deal with labels. + """ + # caller is responsible for ensuring non-None axis + obj = self.obj + if not need_slice(slice_obj): + return obj.copy(deep=False) + + labels = obj._get_axis(axis) + indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) + + if isinstance(indexer, slice): + return self.obj._slice(indexer, axis=axis) + else: + # DatetimeIndex overrides Index.slice_indexer and may + # return a DatetimeIndex instead of a slice object. + return self.obj.take(indexer, axis=axis) + + def _convert_to_indexer(self, key, axis: AxisInt): + """ + Convert indexing key into something we can use to do actual fancy + indexing on a ndarray. + + Examples + ix[:5] -> slice(0, 5) + ix[[1,2,3]] -> [1,2,3] + ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) + + Going by Zen of Python? + 'In the face of ambiguity, refuse the temptation to guess.' + raise AmbiguousIndexError with integer labels? + - No, prefer label-based indexing + """ + labels = self.obj._get_axis(axis) + + if isinstance(key, slice): + return labels._convert_slice_indexer(key, kind="loc") + + if ( + isinstance(key, tuple) + and not isinstance(labels, MultiIndex) + and self.ndim < 2 + and len(key) > 1 + ): + raise IndexingError("Too many indexers") + + # Slices are not valid keys passed in by the user, + # even though they are hashable in Python 3.12 + contains_slice = False + if isinstance(key, tuple): + contains_slice = any(isinstance(v, slice) for v in key) + + if is_scalar(key) or ( + isinstance(labels, MultiIndex) and is_hashable(key) and not contains_slice + ): + # Otherwise get_loc will raise InvalidIndexError + + # if we are a label return me + try: + return labels.get_loc(key) + except LookupError: + if isinstance(key, tuple) and isinstance(labels, MultiIndex): + if len(key) == labels.nlevels: + return {"key": key} + raise + except InvalidIndexError: + # GH35015, using datetime as column indices raises exception + if not isinstance(labels, MultiIndex): + raise + except ValueError: + if not is_integer(key): + raise + return {"key": key} + + if is_nested_tuple(key, labels): + if self.ndim == 1 and any(isinstance(k, tuple) for k in key): + # GH#35349 Raise if tuple in tuple for series + raise IndexingError("Too many indexers") + return labels.get_locs(key) + + elif is_list_like_indexer(key): + if is_iterator(key): + key = list(key) + + if com.is_bool_indexer(key): + key = check_bool_indexer(labels, key) + return key + else: + return self._get_listlike_indexer(key, axis)[1] + else: + try: + return labels.get_loc(key) + except LookupError: + # allow a not found key only if we are a setter + if not is_list_like_indexer(key): + return {"key": key} + raise + + def _get_listlike_indexer(self, key, axis: AxisInt): + """ + Transform a list-like of keys into a new index and an indexer. + + Parameters + ---------- + key : list-like + Targeted labels. + axis: int + Dimension on which the indexing is being made. + + Raises + ------ + KeyError + If at least one key was requested but none was found. + + Returns + ------- + keyarr: Index + New index (coinciding with 'key' if the axis is unique). + values : array-like + Indexer for the return object, -1 denotes keys not found. + """ + ax = self.obj._get_axis(axis) + axis_name = self.obj._get_axis_name(axis) + + keyarr, indexer = ax._get_indexer_strict(key, axis_name) + + return keyarr, indexer + + +@doc(IndexingMixin.iloc) +class _iLocIndexer(_LocationIndexer): + _valid_types = ( + "integer, integer slice (START point is INCLUDED, END " + "point is EXCLUDED), listlike of integers, boolean array" + ) + _takeable = True + + # ------------------------------------------------------------------- + # Key Checks + + def _validate_key(self, key, axis: AxisInt): + if com.is_bool_indexer(key): + if hasattr(key, "index") and isinstance(key.index, Index): + if key.index.inferred_type == "integer": + raise NotImplementedError( + "iLocation based boolean " + "indexing on an integer type " + "is not available" + ) + raise ValueError( + "iLocation based boolean indexing cannot use " + "an indexable as a mask" + ) + return + + if isinstance(key, slice): + return + elif is_integer(key): + self._validate_integer(key, axis) + elif isinstance(key, tuple): + # a tuple should already have been caught by this point + # so don't treat a tuple as a valid indexer + raise IndexingError("Too many indexers") + elif is_list_like_indexer(key): + if isinstance(key, ABCSeries): + arr = key._values + elif is_array_like(key): + arr = key + else: + arr = np.array(key) + len_axis = len(self.obj._get_axis(axis)) + + # check that the key has a numeric dtype + if not is_numeric_dtype(arr.dtype): + raise IndexError(f".iloc requires numeric indexers, got {arr}") + + # check that the key does not exceed the maximum size of the index + if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis): + raise IndexError("positional indexers are out-of-bounds") + else: + raise ValueError(f"Can only index by location with a [{self._valid_types}]") + + def _has_valid_setitem_indexer(self, indexer) -> bool: + """ + Validate that a positional indexer cannot enlarge its target + will raise if needed, does not modify the indexer externally. + + Returns + ------- + bool + """ + if isinstance(indexer, dict): + raise IndexError("iloc cannot enlarge its target object") + + if isinstance(indexer, ABCDataFrame): + raise TypeError( + "DataFrame indexer for .iloc is not supported. " + "Consider using .loc with a DataFrame indexer for automatic alignment.", + ) + + if not isinstance(indexer, tuple): + indexer = _tuplify(self.ndim, indexer) + + for ax, i in zip(self.obj.axes, indexer): + if isinstance(i, slice): + # should check the stop slice? + pass + elif is_list_like_indexer(i): + # should check the elements? + pass + elif is_integer(i): + if i >= len(ax): + raise IndexError("iloc cannot enlarge its target object") + elif isinstance(i, dict): + raise IndexError("iloc cannot enlarge its target object") + + return True + + def _is_scalar_access(self, key: tuple) -> bool: + """ + Returns + ------- + bool + """ + # this is a shortcut accessor to both .loc and .iloc + # that provide the equivalent access of .at and .iat + # a) avoid getting things via sections and (to minimize dtype changes) + # b) provide a performant path + if len(key) != self.ndim: + return False + + return all(is_integer(k) for k in key) + + def _validate_integer(self, key: int | np.integer, axis: AxisInt) -> None: + """ + Check that 'key' is a valid position in the desired axis. + + Parameters + ---------- + key : int + Requested position. + axis : int + Desired axis. + + Raises + ------ + IndexError + If 'key' is not a valid position in axis 'axis'. + """ + len_axis = len(self.obj._get_axis(axis)) + if key >= len_axis or key < -len_axis: + raise IndexError("single positional indexer is out-of-bounds") + + # ------------------------------------------------------------------- + + def _getitem_tuple(self, tup: tuple): + tup = self._validate_tuple_indexer(tup) + with suppress(IndexingError): + return self._getitem_lowerdim(tup) + + return self._getitem_tuple_same_dim(tup) + + def _get_list_axis(self, key, axis: AxisInt): + """ + Return Series values by list or array of integers. + + Parameters + ---------- + key : list-like positional indexer + axis : int + + Returns + ------- + Series object + + Notes + ----- + `axis` can only be zero. + """ + try: + return self.obj._take_with_is_copy(key, axis=axis) + except IndexError as err: + # re-raise with different error message, e.g. test_getitem_ndarray_3d + raise IndexError("positional indexers are out-of-bounds") from err + + def _getitem_axis(self, key, axis: AxisInt): + if key is Ellipsis: + key = slice(None) + elif isinstance(key, ABCDataFrame): + raise IndexError( + "DataFrame indexer is not allowed for .iloc\n" + "Consider using .loc for automatic alignment." + ) + + if isinstance(key, slice): + return self._get_slice_axis(key, axis=axis) + + if is_iterator(key): + key = list(key) + + if isinstance(key, list): + key = np.asarray(key) + + if com.is_bool_indexer(key): + self._validate_key(key, axis) + return self._getbool_axis(key, axis=axis) + + # a list of integers + elif is_list_like_indexer(key): + return self._get_list_axis(key, axis=axis) + + # a single integer + else: + key = item_from_zerodim(key) + if not is_integer(key): + raise TypeError("Cannot index by location index with a non-integer key") + + # validate the location + self._validate_integer(key, axis) + + return self.obj._ixs(key, axis=axis) + + def _get_slice_axis(self, slice_obj: slice, axis: AxisInt): + # caller is responsible for ensuring non-None axis + obj = self.obj + + if not need_slice(slice_obj): + return obj.copy(deep=False) + + labels = obj._get_axis(axis) + labels._validate_positional_slice(slice_obj) + return self.obj._slice(slice_obj, axis=axis) + + def _convert_to_indexer(self, key, axis: AxisInt): + """ + Much simpler as we only have to deal with our valid types. + """ + return key + + def _get_setitem_indexer(self, key): + # GH#32257 Fall through to let numpy do validation + if is_iterator(key): + key = list(key) + + if self.axis is not None: + key = _tupleize_axis_indexer(self.ndim, self.axis, key) + + return key + + # ------------------------------------------------------------------- + + def _setitem_with_indexer(self, indexer, value, name: str = "iloc"): + """ + _setitem_with_indexer is for setting values on a Series/DataFrame + using positional indexers. + + If the relevant keys are not present, the Series/DataFrame may be + expanded. + + This method is currently broken when dealing with non-unique Indexes, + since it goes from positional indexers back to labels when calling + BlockManager methods, see GH#12991, GH#22046, GH#15686. + """ + info_axis = self.obj._info_axis_number + + # maybe partial set + take_split_path = not self.obj._mgr.is_single_block + + if not take_split_path and isinstance(value, ABCDataFrame): + # Avoid cast of values + take_split_path = not value._mgr.is_single_block + + # if there is only one block/type, still have to take split path + # unless the block is one-dimensional or it can hold the value + if not take_split_path and len(self.obj._mgr.arrays) and self.ndim > 1: + # in case of dict, keys are indices + val = list(value.values()) if isinstance(value, dict) else value + arr = self.obj._mgr.arrays[0] + take_split_path = not can_hold_element( + arr, extract_array(val, extract_numpy=True) + ) + + # if we have any multi-indexes that have non-trivial slices + # (not null slices) then we must take the split path, xref + # GH 10360, GH 27841 + if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): + for i, ax in zip(indexer, self.obj.axes): + if isinstance(ax, MultiIndex) and not ( + is_integer(i) or com.is_null_slice(i) + ): + take_split_path = True + break + + if isinstance(indexer, tuple): + nindexer = [] + for i, idx in enumerate(indexer): + if isinstance(idx, dict): + # reindex the axis to the new value + # and set inplace + key, _ = convert_missing_indexer(idx) + + # if this is the items axes, then take the main missing + # path first + # this correctly sets the dtype and avoids cache issues + # essentially this separates out the block that is needed + # to possibly be modified + if self.ndim > 1 and i == info_axis: + # add the new item, and set the value + # must have all defined axes if we have a scalar + # or a list-like on the non-info axes if we have a + # list-like + if not len(self.obj): + if not is_list_like_indexer(value): + raise ValueError( + "cannot set a frame with no " + "defined index and a scalar" + ) + self.obj[key] = value + return + + # add a new item with the dtype setup + if com.is_null_slice(indexer[0]): + # We are setting an entire column + self.obj[key] = value + return + elif is_array_like(value): + # GH#42099 + arr = extract_array(value, extract_numpy=True) + taker = -1 * np.ones(len(self.obj), dtype=np.intp) + empty_value = algos.take_nd(arr, taker) + if not isinstance(value, ABCSeries): + # if not Series (in which case we need to align), + # we can short-circuit + if ( + isinstance(arr, np.ndarray) + and arr.ndim == 1 + and len(arr) == 1 + ): + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + arr = arr[0, ...] + empty_value[indexer[0]] = arr + self.obj[key] = empty_value + return + + self.obj[key] = empty_value + + else: + # FIXME: GH#42099#issuecomment-864326014 + self.obj[key] = infer_fill_value(value) + + new_indexer = convert_from_missing_indexer_tuple( + indexer, self.obj.axes + ) + self._setitem_with_indexer(new_indexer, value, name) + + return + + # reindex the axis + # make sure to clear the cache because we are + # just replacing the block manager here + # so the object is the same + index = self.obj._get_axis(i) + labels = index.insert(len(index), key) + + # We are expanding the Series/DataFrame values to match + # the length of thenew index `labels`. GH#40096 ensure + # this is valid even if the index has duplicates. + taker = np.arange(len(index) + 1, dtype=np.intp) + taker[-1] = -1 + reindexers = {i: (labels, taker)} + new_obj = self.obj._reindex_with_indexers( + reindexers, allow_dups=True + ) + self.obj._mgr = new_obj._mgr + self.obj._maybe_update_cacher(clear=True) + self.obj._is_copy = None + + nindexer.append(labels.get_loc(key)) + + else: + nindexer.append(idx) + + indexer = tuple(nindexer) + else: + indexer, missing = convert_missing_indexer(indexer) + + if missing: + self._setitem_with_indexer_missing(indexer, value) + return + + if name == "loc": + # must come after setting of missing + indexer, value = self._maybe_mask_setitem_value(indexer, value) + + # align and set the values + if take_split_path: + # We have to operate column-wise + self._setitem_with_indexer_split_path(indexer, value, name) + else: + self._setitem_single_block(indexer, value, name) + + def _setitem_with_indexer_split_path(self, indexer, value, name: str): + """ + Setitem column-wise. + """ + # Above we only set take_split_path to True for 2D cases + assert self.ndim == 2 + + if not isinstance(indexer, tuple): + indexer = _tuplify(self.ndim, indexer) + if len(indexer) > self.ndim: + raise IndexError("too many indices for array") + if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: + raise ValueError(r"Cannot set values with ndim > 2") + + if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): + from pandas import Series + + value = self._align_series(indexer, Series(value)) + + # Ensure we have something we can iterate over + info_axis = indexer[1] + ilocs = self._ensure_iterable_column_indexer(info_axis) + + pi = indexer[0] + lplane_indexer = length_of_indexer(pi, self.obj.index) + # lplane_indexer gives the expected length of obj[indexer[0]] + + # we need an iterable, with a ndim of at least 1 + # eg. don't pass through np.array(0) + if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0: + if isinstance(value, ABCDataFrame): + self._setitem_with_indexer_frame_value(indexer, value, name) + + elif np.ndim(value) == 2: + # TODO: avoid np.ndim call in case it isn't an ndarray, since + # that will construct an ndarray, which will be wasteful + self._setitem_with_indexer_2d_value(indexer, value) + + elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi): + # We are setting multiple rows in a single column. + self._setitem_single_column(ilocs[0], value, pi) + + elif len(ilocs) == 1 and 0 != lplane_indexer != len(value): + # We are trying to set N values into M entries of a single + # column, which is invalid for N != M + # Exclude zero-len for e.g. boolean masking that is all-false + + if len(value) == 1 and not is_integer(info_axis): + # This is a case like df.iloc[:3, [1]] = [0] + # where we treat as df.iloc[:3, 1] = 0 + return self._setitem_with_indexer((pi, info_axis[0]), value[0]) + + raise ValueError( + "Must have equal len keys and value " + "when setting with an iterable" + ) + + elif lplane_indexer == 0 and len(value) == len(self.obj.index): + # We get here in one case via .loc with a all-False mask + pass + + elif self._is_scalar_access(indexer) and is_object_dtype( + self.obj.dtypes._values[ilocs[0]] + ): + # We are setting nested data, only possible for object dtype data + self._setitem_single_column(indexer[1], value, pi) + + elif len(ilocs) == len(value): + # We are setting multiple columns in a single row. + for loc, v in zip(ilocs, value): + self._setitem_single_column(loc, v, pi) + + elif len(ilocs) == 1 and com.is_null_slice(pi) and len(self.obj) == 0: + # This is a setitem-with-expansion, see + # test_loc_setitem_empty_append_expands_rows_mixed_dtype + # e.g. df = DataFrame(columns=["x", "y"]) + # df["x"] = df["x"].astype(np.int64) + # df.loc[:, "x"] = [1, 2, 3] + self._setitem_single_column(ilocs[0], value, pi) + + else: + raise ValueError( + "Must have equal len keys and value " + "when setting with an iterable" + ) + + else: + # scalar value + for loc in ilocs: + self._setitem_single_column(loc, value, pi) + + def _setitem_with_indexer_2d_value(self, indexer, value): + # We get here with np.ndim(value) == 2, excluding DataFrame, + # which goes through _setitem_with_indexer_frame_value + pi = indexer[0] + + ilocs = self._ensure_iterable_column_indexer(indexer[1]) + + if not is_array_like(value): + # cast lists to array + value = np.array(value, dtype=object) + if len(ilocs) != value.shape[1]: + raise ValueError( + "Must have equal len keys and value when setting with an ndarray" + ) + + for i, loc in enumerate(ilocs): + value_col = value[:, i] + if is_object_dtype(value_col.dtype): + # casting to list so that we do type inference in setitem_single_column + value_col = value_col.tolist() + self._setitem_single_column(loc, value_col, pi) + + def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str): + ilocs = self._ensure_iterable_column_indexer(indexer[1]) + + sub_indexer = list(indexer) + pi = indexer[0] + + multiindex_indexer = isinstance(self.obj.columns, MultiIndex) + + unique_cols = value.columns.is_unique + + # We do not want to align the value in case of iloc GH#37728 + if name == "iloc": + for i, loc in enumerate(ilocs): + val = value.iloc[:, i] + self._setitem_single_column(loc, val, pi) + + elif not unique_cols and value.columns.equals(self.obj.columns): + # We assume we are already aligned, see + # test_iloc_setitem_frame_duplicate_columns_multiple_blocks + for loc in ilocs: + item = self.obj.columns[loc] + if item in value: + sub_indexer[1] = item + val = self._align_series( + tuple(sub_indexer), + value.iloc[:, loc], + multiindex_indexer, + ) + else: + val = np.nan + + self._setitem_single_column(loc, val, pi) + + elif not unique_cols: + raise ValueError("Setting with non-unique columns is not allowed.") + + else: + for loc in ilocs: + item = self.obj.columns[loc] + if item in value: + sub_indexer[1] = item + val = self._align_series( + tuple(sub_indexer), + value[item], + multiindex_indexer, + using_cow=using_copy_on_write(), + ) + else: + val = np.nan + + self._setitem_single_column(loc, val, pi) + + def _setitem_single_column(self, loc: int, value, plane_indexer) -> None: + """ + + Parameters + ---------- + loc : int + Indexer for column position + plane_indexer : int, slice, listlike[int] + The indexer we use for setitem along axis=0. + """ + pi = plane_indexer + + is_full_setter = com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj)) + + is_null_setter = com.is_empty_slice(pi) or is_array_like(pi) and len(pi) == 0 + + if is_null_setter: + # no-op, don't cast dtype later + return + + elif is_full_setter: + try: + self.obj._mgr.column_setitem( + loc, plane_indexer, value, inplace_only=True + ) + except (ValueError, TypeError, LossySetitemError): + # If we're setting an entire column and we can't do it inplace, + # then we can use value's dtype (or inferred dtype) + # instead of object + self.obj.isetitem(loc, value) + else: + # set value into the column (first attempting to operate inplace, then + # falling back to casting if necessary) + self.obj._mgr.column_setitem(loc, plane_indexer, value) + + self.obj._clear_item_cache() + + def _setitem_single_block(self, indexer, value, name: str) -> None: + """ + _setitem_with_indexer for the case when we have a single Block. + """ + from pandas import Series + + info_axis = self.obj._info_axis_number + item_labels = self.obj._get_axis(info_axis) + if isinstance(indexer, tuple): + # if we are setting on the info axis ONLY + # set using those methods to avoid block-splitting + # logic here + if ( + self.ndim == len(indexer) == 2 + and is_integer(indexer[1]) + and com.is_null_slice(indexer[0]) + ): + col = item_labels[indexer[info_axis]] + if len(item_labels.get_indexer_for([col])) == 1: + # e.g. test_loc_setitem_empty_append_expands_rows + loc = item_labels.get_loc(col) + self._setitem_single_column(loc, value, indexer[0]) + return + + indexer = maybe_convert_ix(*indexer) # e.g. test_setitem_frame_align + + if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict): + # TODO(EA): ExtensionBlock.setitem this causes issues with + # setting for extensionarrays that store dicts. Need to decide + # if it's worth supporting that. + value = self._align_series(indexer, Series(value)) + + elif isinstance(value, ABCDataFrame) and name != "iloc": + value = self._align_frame(indexer, value)._values + + # check for chained assignment + self.obj._check_is_chained_assignment_possible() + + # actually do the set + self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) + self.obj._maybe_update_cacher(clear=True, inplace=True) + + def _setitem_with_indexer_missing(self, indexer, value): + """ + Insert new row(s) or column(s) into the Series or DataFrame. + """ + from pandas import Series + + # reindex the axis to the new value + # and set inplace + if self.ndim == 1: + index = self.obj.index + new_index = index.insert(len(index), indexer) + + # we have a coerced indexer, e.g. a float + # that matches in an int64 Index, so + # we will not create a duplicate index, rather + # index to that element + # e.g. 0.0 -> 0 + # GH#12246 + if index.is_unique: + # pass new_index[-1:] instead if [new_index[-1]] + # so that we retain dtype + new_indexer = index.get_indexer(new_index[-1:]) + if (new_indexer != -1).any(): + # We get only here with loc, so can hard code + return self._setitem_with_indexer(new_indexer, value, "loc") + + # this preserves dtype of the value and of the object + if not is_scalar(value): + new_dtype = None + + elif is_valid_na_for_dtype(value, self.obj.dtype): + if not is_object_dtype(self.obj.dtype): + # Every NA value is suitable for object, no conversion needed + value = na_value_for_dtype(self.obj.dtype, compat=False) + + new_dtype = maybe_promote(self.obj.dtype, value)[0] + + elif isna(value): + new_dtype = None + elif not self.obj.empty and not is_object_dtype(self.obj.dtype): + # We should not cast, if we have object dtype because we can + # set timedeltas into object series + curr_dtype = self.obj.dtype + curr_dtype = getattr(curr_dtype, "numpy_dtype", curr_dtype) + new_dtype = maybe_promote(curr_dtype, value)[0] + else: + new_dtype = None + + new_values = Series([value], dtype=new_dtype)._values + + if len(self.obj._values): + # GH#22717 handle casting compatibility that np.concatenate + # does incorrectly + new_values = concat_compat([self.obj._values, new_values]) + self.obj._mgr = self.obj._constructor( + new_values, index=new_index, name=self.obj.name + )._mgr + self.obj._maybe_update_cacher(clear=True) + + elif self.ndim == 2: + if not len(self.obj.columns): + # no columns and scalar + raise ValueError("cannot set a frame with no defined columns") + + has_dtype = hasattr(value, "dtype") + if isinstance(value, ABCSeries): + # append a Series + value = value.reindex(index=self.obj.columns, copy=True) + value.name = indexer + elif isinstance(value, dict): + value = Series( + value, index=self.obj.columns, name=indexer, dtype=object + ) + else: + # a list-list + if is_list_like_indexer(value): + # must have conforming columns + if len(value) != len(self.obj.columns): + raise ValueError("cannot set a row with mismatched columns") + + value = Series(value, index=self.obj.columns, name=indexer) + + if not len(self.obj): + # We will ignore the existing dtypes instead of using + # internals.concat logic + df = value.to_frame().T + + idx = self.obj.index + if isinstance(idx, MultiIndex): + name = idx.names + else: + name = idx.name + + df.index = Index([indexer], name=name) + if not has_dtype: + # i.e. if we already had a Series or ndarray, keep that + # dtype. But if we had a list or dict, then do inference + df = df.infer_objects(copy=False) + self.obj._mgr = df._mgr + else: + self.obj._mgr = self.obj._append(value)._mgr + self.obj._maybe_update_cacher(clear=True) + + def _ensure_iterable_column_indexer(self, column_indexer): + """ + Ensure that our column indexer is something that can be iterated over. + """ + ilocs: Sequence[int | np.integer] | np.ndarray + if is_integer(column_indexer): + ilocs = [column_indexer] + elif isinstance(column_indexer, slice): + ilocs = np.arange(len(self.obj.columns))[column_indexer] + elif ( + isinstance(column_indexer, np.ndarray) and column_indexer.dtype.kind == "b" + ): + ilocs = np.arange(len(column_indexer))[column_indexer] + else: + ilocs = column_indexer + return ilocs + + def _align_series( + self, + indexer, + ser: Series, + multiindex_indexer: bool = False, + using_cow: bool = False, + ): + """ + Parameters + ---------- + indexer : tuple, slice, scalar + Indexer used to get the locations that will be set to `ser`. + ser : pd.Series + Values to assign to the locations specified by `indexer`. + multiindex_indexer : bool, optional + Defaults to False. Should be set to True if `indexer` was from + a `pd.MultiIndex`, to avoid unnecessary broadcasting. + + Returns + ------- + `np.array` of `ser` broadcast to the appropriate shape for assignment + to the locations selected by `indexer` + """ + if isinstance(indexer, (slice, np.ndarray, list, Index)): + indexer = (indexer,) + + if isinstance(indexer, tuple): + # flatten np.ndarray indexers + def ravel(i): + return i.ravel() if isinstance(i, np.ndarray) else i + + indexer = tuple(map(ravel, indexer)) + + aligners = [not com.is_null_slice(idx) for idx in indexer] + sum_aligners = sum(aligners) + single_aligner = sum_aligners == 1 + is_frame = self.ndim == 2 + obj = self.obj + + # are we a single alignable value on a non-primary + # dim (e.g. panel: 1,2, or frame: 0) ? + # hence need to align to a single axis dimension + # rather that find all valid dims + + # frame + if is_frame: + single_aligner = single_aligner and aligners[0] + + # we have a frame, with multiple indexers on both axes; and a + # series, so need to broadcast (see GH5206) + if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer): + ser_values = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values + + # single indexer + if len(indexer) > 1 and not multiindex_indexer: + len_indexer = len(indexer[1]) + ser_values = ( + np.tile(ser_values, len_indexer).reshape(len_indexer, -1).T + ) + + return ser_values + + for i, idx in enumerate(indexer): + ax = obj.axes[i] + + # multiple aligners (or null slices) + if is_sequence(idx) or isinstance(idx, slice): + if single_aligner and com.is_null_slice(idx): + continue + new_ix = ax[idx] + if not is_list_like_indexer(new_ix): + new_ix = Index([new_ix]) + else: + new_ix = Index(new_ix) + if ser.index.equals(new_ix): + if using_cow: + return ser + return ser._values.copy() + + return ser.reindex(new_ix)._values + + # 2 dims + elif single_aligner: + # reindex along index + ax = self.obj.axes[1] + if ser.index.equals(ax) or not len(ax): + return ser._values.copy() + return ser.reindex(ax)._values + + elif is_integer(indexer) and self.ndim == 1: + if is_object_dtype(self.obj.dtype): + return ser + ax = self.obj._get_axis(0) + + if ser.index.equals(ax): + return ser._values.copy() + + return ser.reindex(ax)._values[indexer] + + elif is_integer(indexer): + ax = self.obj._get_axis(1) + + if ser.index.equals(ax): + return ser._values.copy() + + return ser.reindex(ax)._values + + raise ValueError("Incompatible indexer with Series") + + def _align_frame(self, indexer, df: DataFrame) -> DataFrame: + is_frame = self.ndim == 2 + + if isinstance(indexer, tuple): + idx, cols = None, None + sindexers = [] + for i, ix in enumerate(indexer): + ax = self.obj.axes[i] + if is_sequence(ix) or isinstance(ix, slice): + if isinstance(ix, np.ndarray): + ix = ix.ravel() + if idx is None: + idx = ax[ix] + elif cols is None: + cols = ax[ix] + else: + break + else: + sindexers.append(i) + + if idx is not None and cols is not None: + if df.index.equals(idx) and df.columns.equals(cols): + val = df.copy() + else: + val = df.reindex(idx, columns=cols) + return val + + elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame: + ax = self.obj.index[indexer] + if df.index.equals(ax): + val = df.copy() + else: + # we have a multi-index and are trying to align + # with a particular, level GH3738 + if ( + isinstance(ax, MultiIndex) + and isinstance(df.index, MultiIndex) + and ax.nlevels != df.index.nlevels + ): + raise TypeError( + "cannot align on a multi-index with out " + "specifying the join levels" + ) + + val = df.reindex(index=ax) + return val + + raise ValueError("Incompatible indexer with DataFrame") + + +class _ScalarAccessIndexer(NDFrameIndexerBase): + """ + Access scalars quickly. + """ + + # sub-classes need to set _takeable + _takeable: bool + + def _convert_key(self, key): + raise AbstractMethodError(self) + + def __getitem__(self, key): + if not isinstance(key, tuple): + # we could have a convertible item here (e.g. Timestamp) + if not is_list_like_indexer(key): + key = (key,) + else: + raise ValueError("Invalid call for scalar access (getting)!") + + key = self._convert_key(key) + return self.obj._get_value(*key, takeable=self._takeable) + + def __setitem__(self, key, value) -> None: + if isinstance(key, tuple): + key = tuple(com.apply_if_callable(x, self.obj) for x in key) + else: + # scalar callable may return tuple + key = com.apply_if_callable(key, self.obj) + + if not isinstance(key, tuple): + key = _tuplify(self.ndim, key) + key = list(self._convert_key(key)) + if len(key) != self.ndim: + raise ValueError("Not enough indexers for scalar access (setting)!") + + self.obj._set_value(*key, value=value, takeable=self._takeable) + + +@doc(IndexingMixin.at) +class _AtIndexer(_ScalarAccessIndexer): + _takeable = False + + def _convert_key(self, key): + """ + Require they keys to be the same type as the index. (so we don't + fallback) + """ + # GH 26989 + # For series, unpacking key needs to result in the label. + # This is already the case for len(key) == 1; e.g. (1,) + if self.ndim == 1 and len(key) > 1: + key = (key,) + + return key + + @property + def _axes_are_unique(self) -> bool: + # Only relevant for self.ndim == 2 + assert self.ndim == 2 + return self.obj.index.is_unique and self.obj.columns.is_unique + + def __getitem__(self, key): + if self.ndim == 2 and not self._axes_are_unique: + # GH#33041 fall back to .loc + if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): + raise ValueError("Invalid call for scalar access (getting)!") + return self.obj.loc[key] + + return super().__getitem__(key) + + def __setitem__(self, key, value) -> None: + if self.ndim == 2 and not self._axes_are_unique: + # GH#33041 fall back to .loc + if not isinstance(key, tuple) or not all(is_scalar(x) for x in key): + raise ValueError("Invalid call for scalar access (setting)!") + + self.obj.loc[key] = value + return + + return super().__setitem__(key, value) + + +@doc(IndexingMixin.iat) +class _iAtIndexer(_ScalarAccessIndexer): + _takeable = True + + def _convert_key(self, key): + """ + Require integer args. (and convert to label arguments) + """ + for i in key: + if not is_integer(i): + raise ValueError("iAt based indexing can only have integer indexers") + return key + + +def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: + """ + Given an indexer for the first dimension, create an equivalent tuple + for indexing over all dimensions. + + Parameters + ---------- + ndim : int + loc : object + + Returns + ------- + tuple + """ + _tup: list[Hashable | slice] + _tup = [slice(None, None) for _ in range(ndim)] + _tup[0] = loc + return tuple(_tup) + + +def _tupleize_axis_indexer(ndim: int, axis: AxisInt, key) -> tuple: + """ + If we have an axis, adapt the given key to be axis-independent. + """ + new_key = [slice(None)] * ndim + new_key[axis] = key + return tuple(new_key) + + +def check_bool_indexer(index: Index, key) -> np.ndarray: + """ + Check if key is a valid boolean indexer for an object with such index and + perform reindexing or conversion if needed. + + This function assumes that is_bool_indexer(key) == True. + + Parameters + ---------- + index : Index + Index of the object on which the indexing is done. + key : list-like + Boolean indexer to check. + + Returns + ------- + np.array + Resulting key. + + Raises + ------ + IndexError + If the key does not have the same length as index. + IndexingError + If the index of the key is unalignable to index. + """ + result = key + if isinstance(key, ABCSeries) and not key.index.equals(index): + indexer = result.index.get_indexer_for(index) + if -1 in indexer: + raise IndexingError( + "Unalignable boolean Series provided as " + "indexer (index of the boolean Series and of " + "the indexed object do not match)." + ) + + result = result.take(indexer) + + # fall through for boolean + if not isinstance(result.dtype, ExtensionDtype): + return result.astype(bool)._values + + if is_object_dtype(key): + # key might be object-dtype bool, check_array_indexer needs bool array + result = np.asarray(result, dtype=bool) + elif not is_array_like(result): + # GH 33924 + # key may contain nan elements, check_array_indexer needs bool array + result = pd_array(result, dtype=bool) + return check_array_indexer(index, result) + + +def convert_missing_indexer(indexer): + """ + Reverse convert a missing indexer, which is a dict + return the scalar indexer and a boolean indicating if we converted + """ + if isinstance(indexer, dict): + # a missing key (but not a tuple indexer) + indexer = indexer["key"] + + if isinstance(indexer, bool): + raise KeyError("cannot use a single bool to index into setitem") + return indexer, True + + return indexer, False + + +def convert_from_missing_indexer_tuple(indexer, axes): + """ + Create a filtered indexer that doesn't have any missing indexers. + """ + + def get_indexer(_i, _idx): + return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx + + return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)) + + +def maybe_convert_ix(*args): + """ + We likely want to take the cross-product. + """ + for arg in args: + if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): + return args + return np.ix_(*args) + + +def is_nested_tuple(tup, labels) -> bool: + """ + Returns + ------- + bool + """ + # check for a compatible nested tuple and multiindexes among the axes + if not isinstance(tup, tuple): + return False + + for k in tup: + if is_list_like(k) or isinstance(k, slice): + return isinstance(labels, MultiIndex) + + return False + + +def is_label_like(key) -> bool: + """ + Returns + ------- + bool + """ + # select a label or row + return ( + not isinstance(key, slice) + and not is_list_like_indexer(key) + and key is not Ellipsis + ) + + +def need_slice(obj: slice) -> bool: + """ + Returns + ------- + bool + """ + return ( + obj.start is not None + or obj.stop is not None + or (obj.step is not None and obj.step != 1) + ) + + +def check_dict_or_set_indexers(key) -> None: + """ + Check if the indexer is or contains a dict or set, which is no longer allowed. + """ + if ( + isinstance(key, set) + or isinstance(key, tuple) + and any(isinstance(x, set) for x in key) + ): + raise TypeError( + "Passing a set as an indexer is not supported. Use a list instead." + ) + + if ( + isinstance(key, dict) + or isinstance(key, tuple) + and any(isinstance(x, dict) for x in key) + ): + raise TypeError( + "Passing a dict as an indexer is not supported. Use a list instead." + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/buffer.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/buffer.py new file mode 100644 index 00000000..b31a2526 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/buffer.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + +from pandas.core.interchange.dataframe_protocol import ( + Buffer, + DlpackDeviceType, +) +from pandas.util.version import Version + +_NUMPY_HAS_DLPACK = Version(np.__version__) >= Version("1.22.0") + + +class PandasBuffer(Buffer): + """ + Data in the buffer is guaranteed to be contiguous in memory. + """ + + def __init__(self, x: np.ndarray, allow_copy: bool = True) -> None: + """ + Handle only regular columns (= numpy arrays) for now. + """ + if not x.strides == (x.dtype.itemsize,): + # The protocol does not support strided buffers, so a copy is + # necessary. If that's not allowed, we need to raise an exception. + if allow_copy: + x = x.copy() + else: + raise RuntimeError( + "Exports cannot be zero-copy in the case " + "of a non-contiguous buffer" + ) + + # Store the numpy array in which the data resides as a private + # attribute, so we can use it to retrieve the public attributes + self._x = x + + @property + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + return self._x.size * self._x.dtype.itemsize + + @property + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + return self._x.__array_interface__["data"][0] + + def __dlpack__(self) -> Any: + """ + Represent this structure as DLPack interface. + """ + if _NUMPY_HAS_DLPACK: + return self._x.__dlpack__() + raise NotImplementedError("__dlpack__") + + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + """ + return (DlpackDeviceType.CPU, None) + + def __repr__(self) -> str: + return ( + "PandasBuffer(" + + str( + { + "bufsize": self.bufsize, + "ptr": self.ptr, + "device": self.__dlpack_device__()[0].name, + } + ) + + ")" + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/column.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/column.py new file mode 100644 index 00000000..acfbc5d9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/column.py @@ -0,0 +1,391 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + +from pandas._libs.lib import infer_dtype +from pandas._libs.tslibs import iNaT +from pandas.errors import NoBufferPresent +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + DatetimeTZDtype, +) + +import pandas as pd +from pandas.api.types import is_string_dtype +from pandas.core.interchange.buffer import PandasBuffer +from pandas.core.interchange.dataframe_protocol import ( + Column, + ColumnBuffers, + ColumnNullType, + DtypeKind, +) +from pandas.core.interchange.utils import ( + ArrowCTypes, + Endianness, + dtype_to_arrow_c_fmt, +) + +_NP_KINDS = { + "i": DtypeKind.INT, + "u": DtypeKind.UINT, + "f": DtypeKind.FLOAT, + "b": DtypeKind.BOOL, + "U": DtypeKind.STRING, + "M": DtypeKind.DATETIME, + "m": DtypeKind.DATETIME, +} + +_NULL_DESCRIPTION = { + DtypeKind.FLOAT: (ColumnNullType.USE_NAN, None), + DtypeKind.DATETIME: (ColumnNullType.USE_SENTINEL, iNaT), + DtypeKind.INT: (ColumnNullType.NON_NULLABLE, None), + DtypeKind.UINT: (ColumnNullType.NON_NULLABLE, None), + DtypeKind.BOOL: (ColumnNullType.NON_NULLABLE, None), + # Null values for categoricals are stored as `-1` sentinel values + # in the category date (e.g., `col.values.codes` is int8 np.ndarray) + DtypeKind.CATEGORICAL: (ColumnNullType.USE_SENTINEL, -1), + # follow Arrow in using 1 as valid value and 0 for missing/null value + DtypeKind.STRING: (ColumnNullType.USE_BYTEMASK, 0), +} + +_NO_VALIDITY_BUFFER = { + ColumnNullType.NON_NULLABLE: "This column is non-nullable", + ColumnNullType.USE_NAN: "This column uses NaN as null", + ColumnNullType.USE_SENTINEL: "This column uses a sentinel value", +} + + +class PandasColumn(Column): + """ + A column object, with only the methods and properties required by the + interchange protocol defined. + A column can contain one or more chunks. Each chunk can contain up to three + buffers - a data buffer, a mask buffer (depending on null representation), + and an offsets buffer (if variable-size binary; e.g., variable-length + strings). + Note: this Column object can only be produced by ``__dataframe__``, so + doesn't need its own version or ``__column__`` protocol. + """ + + def __init__(self, column: pd.Series, allow_copy: bool = True) -> None: + """ + Note: doesn't deal with extension arrays yet, just assume a regular + Series/ndarray for now. + """ + if not isinstance(column, pd.Series): + raise NotImplementedError(f"Columns of type {type(column)} not handled yet") + + # Store the column as a private attribute + self._col = column + self._allow_copy = allow_copy + + def size(self) -> int: + """ + Size of the column, in elements. + """ + return self._col.size + + @property + def offset(self) -> int: + """ + Offset of first element. Always zero. + """ + # TODO: chunks are implemented now, probably this should return something + return 0 + + @cache_readonly + def dtype(self) -> tuple[DtypeKind, int, str, str]: + dtype = self._col.dtype + + if isinstance(dtype, pd.CategoricalDtype): + codes = self._col.values.codes + ( + _, + bitwidth, + c_arrow_dtype_f_str, + _, + ) = self._dtype_from_pandasdtype(codes.dtype) + return ( + DtypeKind.CATEGORICAL, + bitwidth, + c_arrow_dtype_f_str, + Endianness.NATIVE, + ) + elif is_string_dtype(dtype): + if infer_dtype(self._col) == "string": + return ( + DtypeKind.STRING, + 8, + dtype_to_arrow_c_fmt(dtype), + Endianness.NATIVE, + ) + raise NotImplementedError("Non-string object dtypes are not supported yet") + else: + return self._dtype_from_pandasdtype(dtype) + + def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]: + """ + See `self.dtype` for details. + """ + # Note: 'c' (complex) not handled yet (not in array spec v1). + # 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void) not handled + # datetime and timedelta both map to datetime (is timedelta handled?) + + kind = _NP_KINDS.get(dtype.kind, None) + if kind is None: + # Not a NumPy dtype. Check if it's a categorical maybe + raise ValueError(f"Data type {dtype} not supported by interchange protocol") + if isinstance(dtype, ArrowDtype): + byteorder = dtype.numpy_dtype.byteorder + elif isinstance(dtype, DatetimeTZDtype): + byteorder = dtype.base.byteorder # type: ignore[union-attr] + else: + byteorder = dtype.byteorder + + return kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder + + @property + def describe_categorical(self): + """ + If the dtype is categorical, there are two options: + - There are only values in the data buffer. + - There is a separate non-categorical Column encoding for categorical values. + + Raises TypeError if the dtype is not categorical + + Content of returned dict: + - "is_ordered" : bool, whether the ordering of dictionary indices is + semantically meaningful. + - "is_dictionary" : bool, whether a dictionary-style mapping of + categorical values to other objects exists + - "categories" : Column representing the (implicit) mapping of indices to + category values (e.g. an array of cat1, cat2, ...). + None if not a dictionary-style categorical. + """ + if not self.dtype[0] == DtypeKind.CATEGORICAL: + raise TypeError( + "describe_categorical only works on a column with categorical dtype!" + ) + + return { + "is_ordered": self._col.cat.ordered, + "is_dictionary": True, + "categories": PandasColumn(pd.Series(self._col.cat.categories)), + } + + @property + def describe_null(self): + kind = self.dtype[0] + try: + null, value = _NULL_DESCRIPTION[kind] + except KeyError: + raise NotImplementedError(f"Data type {kind} not yet supported") + + return null, value + + @cache_readonly + def null_count(self) -> int: + """ + Number of null elements. Should always be known. + """ + return self._col.isna().sum().item() + + @property + def metadata(self) -> dict[str, pd.Index]: + """ + Store specific metadata of the column. + """ + return {"pandas.index": self._col.index} + + def num_chunks(self) -> int: + """ + Return the number of chunks the column consists of. + """ + return 1 + + def get_chunks(self, n_chunks: int | None = None): + """ + Return an iterator yielding the chunks. + See `DataFrame.get_chunks` for details on ``n_chunks``. + """ + if n_chunks and n_chunks > 1: + size = len(self._col) + step = size // n_chunks + if size % n_chunks != 0: + step += 1 + for start in range(0, step * n_chunks, step): + yield PandasColumn( + self._col.iloc[start : start + step], self._allow_copy + ) + else: + yield self + + def get_buffers(self) -> ColumnBuffers: + """ + Return a dictionary containing the underlying buffers. + The returned dictionary has the following contents: + - "data": a two-element tuple whose first element is a buffer + containing the data and whose second element is the data + buffer's associated dtype. + - "validity": a two-element tuple whose first element is a buffer + containing mask values indicating missing data and + whose second element is the mask value buffer's + associated dtype. None if the null representation is + not a bit or byte mask. + - "offsets": a two-element tuple whose first element is a buffer + containing the offset values for variable-size binary + data (e.g., variable-length strings) and whose second + element is the offsets buffer's associated dtype. None + if the data buffer does not have an associated offsets + buffer. + """ + buffers: ColumnBuffers = { + "data": self._get_data_buffer(), + "validity": None, + "offsets": None, + } + + try: + buffers["validity"] = self._get_validity_buffer() + except NoBufferPresent: + pass + + try: + buffers["offsets"] = self._get_offsets_buffer() + except NoBufferPresent: + pass + + return buffers + + def _get_data_buffer( + self, + ) -> tuple[PandasBuffer, Any]: # Any is for self.dtype tuple + """ + Return the buffer containing the data and the buffer's associated dtype. + """ + if self.dtype[0] in ( + DtypeKind.INT, + DtypeKind.UINT, + DtypeKind.FLOAT, + DtypeKind.BOOL, + DtypeKind.DATETIME, + ): + # self.dtype[2] is an ArrowCTypes.TIMESTAMP where the tz will make + # it longer than 4 characters + if self.dtype[0] == DtypeKind.DATETIME and len(self.dtype[2]) > 4: + np_arr = self._col.dt.tz_convert(None).to_numpy() + else: + np_arr = self._col.to_numpy() + buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy) + dtype = self.dtype + elif self.dtype[0] == DtypeKind.CATEGORICAL: + codes = self._col.values._codes + buffer = PandasBuffer(codes, allow_copy=self._allow_copy) + dtype = self._dtype_from_pandasdtype(codes.dtype) + elif self.dtype[0] == DtypeKind.STRING: + # Marshal the strings from a NumPy object array into a byte array + buf = self._col.to_numpy() + b = bytearray() + + # TODO: this for-loop is slow; can be implemented in Cython/C/C++ later + for obj in buf: + if isinstance(obj, str): + b.extend(obj.encode(encoding="utf-8")) + + # Convert the byte array to a Pandas "buffer" using + # a NumPy array as the backing store + buffer = PandasBuffer(np.frombuffer(b, dtype="uint8")) + + # Define the dtype for the returned buffer + dtype = ( + DtypeKind.STRING, + 8, + ArrowCTypes.STRING, + Endianness.NATIVE, + ) # note: currently only support native endianness + else: + raise NotImplementedError(f"Data type {self._col.dtype} not handled yet") + + return buffer, dtype + + def _get_validity_buffer(self) -> tuple[PandasBuffer, Any]: + """ + Return the buffer containing the mask values indicating missing data and + the buffer's associated dtype. + Raises NoBufferPresent if null representation is not a bit or byte mask. + """ + null, invalid = self.describe_null + + if self.dtype[0] == DtypeKind.STRING: + # For now, use byte array as the mask. + # TODO: maybe store as bit array to save space?.. + buf = self._col.to_numpy() + + # Determine the encoding for valid values + valid = invalid == 0 + invalid = not valid + + mask = np.zeros(shape=(len(buf),), dtype=np.bool_) + for i, obj in enumerate(buf): + mask[i] = valid if isinstance(obj, str) else invalid + + # Convert the mask array to a Pandas "buffer" using + # a NumPy array as the backing store + buffer = PandasBuffer(mask) + + # Define the dtype of the returned buffer + dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE) + + return buffer, dtype + + try: + msg = f"{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask" + except KeyError: + # TODO: implement for other bit/byte masks? + raise NotImplementedError("See self.describe_null") + + raise NoBufferPresent(msg) + + def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]: + """ + Return the buffer containing the offset values for variable-size binary + data (e.g., variable-length strings) and the buffer's associated dtype. + Raises NoBufferPresent if the data buffer does not have an associated + offsets buffer. + """ + if self.dtype[0] == DtypeKind.STRING: + # For each string, we need to manually determine the next offset + values = self._col.to_numpy() + ptr = 0 + offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) + for i, v in enumerate(values): + # For missing values (in this case, `np.nan` values) + # we don't increment the pointer + if isinstance(v, str): + b = v.encode(encoding="utf-8") + ptr += len(b) + + offsets[i + 1] = ptr + + # Convert the offsets to a Pandas "buffer" using + # the NumPy array as the backing store + buffer = PandasBuffer(offsets) + + # Assemble the buffer dtype info + dtype = ( + DtypeKind.INT, + 64, + ArrowCTypes.INT64, + Endianness.NATIVE, + ) # note: currently only support native endianness + else: + raise NoBufferPresent( + "This column has a fixed-length dtype so " + "it does not have an offsets buffer" + ) + + return buffer, dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe.py new file mode 100644 index 00000000..0ddceb6b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from collections import abc +from typing import TYPE_CHECKING + +from pandas.core.interchange.column import PandasColumn +from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from pandas import ( + DataFrame, + Index, + ) + + +class PandasDataFrameXchg(DataFrameXchg): + """ + A data frame class, with only the methods required by the interchange + protocol defined. + Instances of this (private) class are returned from + ``pd.DataFrame.__dataframe__`` as objects with the methods and + attributes defined on this class. + """ + + def __init__( + self, df: DataFrame, nan_as_null: bool = False, allow_copy: bool = True + ) -> None: + """ + Constructor - an instance of this (private) class is returned from + `pd.DataFrame.__dataframe__`. + """ + self._df = df + # ``nan_as_null`` is a keyword intended for the consumer to tell the + # producer to overwrite null values in the data with ``NaN`` (or ``NaT``). + # This currently has no effect; once support for nullable extension + # dtypes is added, this value should be propagated to columns. + self._nan_as_null = nan_as_null + self._allow_copy = allow_copy + + def __dataframe__( + self, nan_as_null: bool = False, allow_copy: bool = True + ) -> PandasDataFrameXchg: + return PandasDataFrameXchg(self._df, nan_as_null, allow_copy) + + @property + def metadata(self) -> dict[str, Index]: + # `index` isn't a regular column, and the protocol doesn't support row + # labels - so we export it as Pandas-specific metadata here. + return {"pandas.index": self._df.index} + + def num_columns(self) -> int: + return len(self._df.columns) + + def num_rows(self) -> int: + return len(self._df) + + def num_chunks(self) -> int: + return 1 + + def column_names(self) -> Index: + return self._df.columns + + def get_column(self, i: int) -> PandasColumn: + return PandasColumn(self._df.iloc[:, i], allow_copy=self._allow_copy) + + def get_column_by_name(self, name: str) -> PandasColumn: + return PandasColumn(self._df[name], allow_copy=self._allow_copy) + + def get_columns(self) -> list[PandasColumn]: + return [ + PandasColumn(self._df[name], allow_copy=self._allow_copy) + for name in self._df.columns + ] + + def select_columns(self, indices: Sequence[int]) -> PandasDataFrameXchg: + if not isinstance(indices, abc.Sequence): + raise ValueError("`indices` is not a sequence") + if not isinstance(indices, list): + indices = list(indices) + + return PandasDataFrameXchg( + self._df.iloc[:, indices], self._nan_as_null, self._allow_copy + ) + + def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: # type: ignore[override] # noqa: E501 + if not isinstance(names, abc.Sequence): + raise ValueError("`names` is not a sequence") + if not isinstance(names, list): + names = list(names) + + return PandasDataFrameXchg( + self._df.loc[:, names], self._nan_as_null, self._allow_copy + ) + + def get_chunks(self, n_chunks: int | None = None) -> Iterable[PandasDataFrameXchg]: + """ + Return an iterator yielding the chunks. + """ + if n_chunks and n_chunks > 1: + size = len(self._df) + step = size // n_chunks + if size % n_chunks != 0: + step += 1 + for start in range(0, step * n_chunks, step): + yield PandasDataFrameXchg( + self._df.iloc[start : start + step, :], + self._nan_as_null, + self._allow_copy, + ) + else: + yield self diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe_protocol.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe_protocol.py new file mode 100644 index 00000000..95e7b6a2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/dataframe_protocol.py @@ -0,0 +1,465 @@ +""" +A verbatim copy (vendored) of the spec from https://github.com/data-apis/dataframe-api +""" + +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +import enum +from typing import ( + TYPE_CHECKING, + Any, + TypedDict, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + +class DlpackDeviceType(enum.IntEnum): + """Integer enum for device type codes matching DLPack.""" + + CPU = 1 + CUDA = 2 + CPU_PINNED = 3 + OPENCL = 4 + VULKAN = 7 + METAL = 8 + VPI = 9 + ROCM = 10 + + +class DtypeKind(enum.IntEnum): + """ + Integer enum for data types. + + Attributes + ---------- + INT : int + Matches to signed integer data type. + UINT : int + Matches to unsigned integer data type. + FLOAT : int + Matches to floating point data type. + BOOL : int + Matches to boolean data type. + STRING : int + Matches to string data type (UTF-8 encoded). + DATETIME : int + Matches to datetime data type. + CATEGORICAL : int + Matches to categorical data type. + """ + + INT = 0 + UINT = 1 + FLOAT = 2 + BOOL = 20 + STRING = 21 # UTF-8 + DATETIME = 22 + CATEGORICAL = 23 + + +class ColumnNullType(enum.IntEnum): + """ + Integer enum for null type representation. + + Attributes + ---------- + NON_NULLABLE : int + Non-nullable column. + USE_NAN : int + Use explicit float NaN value. + USE_SENTINEL : int + Sentinel value besides NaN/NaT. + USE_BITMASK : int + The bit is set/unset representing a null on a certain position. + USE_BYTEMASK : int + The byte is set/unset representing a null on a certain position. + """ + + NON_NULLABLE = 0 + USE_NAN = 1 + USE_SENTINEL = 2 + USE_BITMASK = 3 + USE_BYTEMASK = 4 + + +class ColumnBuffers(TypedDict): + # first element is a buffer containing the column data; + # second element is the data buffer's associated dtype + data: tuple[Buffer, Any] + + # first element is a buffer containing mask values indicating missing data; + # second element is the mask value buffer's associated dtype. + # None if the null representation is not a bit or byte mask + validity: tuple[Buffer, Any] | None + + # first element is a buffer containing the offset values for + # variable-size binary data (e.g., variable-length strings); + # second element is the offsets buffer's associated dtype. + # None if the data buffer does not have an associated offsets buffer + offsets: tuple[Buffer, Any] | None + + +class CategoricalDescription(TypedDict): + # whether the ordering of dictionary indices is semantically meaningful + is_ordered: bool + # whether a dictionary-style mapping of categorical values to other objects exists + is_dictionary: bool + # Python-level only (e.g. ``{int: str}``). + # None if not a dictionary-style categorical. + categories: Column | None + + +class Buffer(ABC): + """ + Data in the buffer is guaranteed to be contiguous in memory. + + Note that there is no dtype attribute present, a buffer can be thought of + as simply a block of memory. However, if the column that the buffer is + attached to has a dtype that's supported by DLPack and ``__dlpack__`` is + implemented, then that dtype information will be contained in the return + value from ``__dlpack__``. + + This distinction is useful to support both data exchange via DLPack on a + buffer and (b) dtypes like variable-length strings which do not have a + fixed number of bytes per element. + """ + + @property + @abstractmethod + def bufsize(self) -> int: + """ + Buffer size in bytes. + """ + + @property + @abstractmethod + def ptr(self) -> int: + """ + Pointer to start of the buffer as an integer. + """ + + @abstractmethod + def __dlpack__(self): + """ + Produce DLPack capsule (see array API standard). + + Raises: + + - TypeError : if the buffer contains unsupported dtypes. + - NotImplementedError : if DLPack support is not implemented + + Useful to have to connect to array libraries. Support optional because + it's not completely trivial to implement for a Python-only library. + """ + raise NotImplementedError("__dlpack__") + + @abstractmethod + def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: + """ + Device type and device ID for where the data in the buffer resides. + Uses device type codes matching DLPack. + Note: must be implemented even if ``__dlpack__`` is not. + """ + + +class Column(ABC): + """ + A column object, with only the methods and properties required by the + interchange protocol defined. + + A column can contain one or more chunks. Each chunk can contain up to three + buffers - a data buffer, a mask buffer (depending on null representation), + and an offsets buffer (if variable-size binary; e.g., variable-length + strings). + + TBD: Arrow has a separate "null" dtype, and has no separate mask concept. + Instead, it seems to use "children" for both columns with a bit mask, + and for nested dtypes. Unclear whether this is elegant or confusing. + This design requires checking the null representation explicitly. + + The Arrow design requires checking: + 1. the ARROW_FLAG_NULLABLE (for sentinel values) + 2. if a column has two children, combined with one of those children + having a null dtype. + + Making the mask concept explicit seems useful. One null dtype would + not be enough to cover both bit and byte masks, so that would mean + even more checking if we did it the Arrow way. + + TBD: there's also the "chunk" concept here, which is implicit in Arrow as + multiple buffers per array (= column here). Semantically it may make + sense to have both: chunks were meant for example for lazy evaluation + of data which doesn't fit in memory, while multiple buffers per column + could also come from doing a selection operation on a single + contiguous buffer. + + Given these concepts, one would expect chunks to be all of the same + size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows), + while multiple buffers could have data-dependent lengths. Not an issue + in pandas if one column is backed by a single NumPy array, but in + Arrow it seems possible. + Are multiple chunks *and* multiple buffers per column necessary for + the purposes of this interchange protocol, or must producers either + reuse the chunk concept for this or copy the data? + + Note: this Column object can only be produced by ``__dataframe__``, so + doesn't need its own version or ``__column__`` protocol. + """ + + @abstractmethod + def size(self) -> int: + """ + Size of the column, in elements. + + Corresponds to DataFrame.num_rows() if column is a single chunk; + equal to size of this current chunk otherwise. + """ + + @property + @abstractmethod + def offset(self) -> int: + """ + Offset of first element. + + May be > 0 if using chunks; for example for a column with N chunks of + equal size M (only the last chunk may be shorter), + ``offset = n * M``, ``n = 0 .. N-1``. + """ + + @property + @abstractmethod + def dtype(self) -> tuple[DtypeKind, int, str, str]: + """ + Dtype description as a tuple ``(kind, bit-width, format string, endianness)``. + + Bit-width : the number of bits as an integer + Format string : data type description format string in Apache Arrow C + Data Interface format. + Endianness : current only native endianness (``=``) is supported + + Notes: + - Kind specifiers are aligned with DLPack where possible (hence the + jump to 20, leave enough room for future extension) + - Masks must be specified as boolean with either bit width 1 (for bit + masks) or 8 (for byte masks). + - Dtype width in bits was preferred over bytes + - Endianness isn't too useful, but included now in case in the future + we need to support non-native endianness + - Went with Apache Arrow format strings over NumPy format strings + because they're more complete from a dataframe perspective + - Format strings are mostly useful for datetime specification, and + for categoricals. + - For categoricals, the format string describes the type of the + categorical in the data buffer. In case of a separate encoding of + the categorical (e.g. an integer to string mapping), this can + be derived from ``self.describe_categorical``. + - Data types not included: complex, Arrow-style null, binary, decimal, + and nested (list, struct, map, union) dtypes. + """ + + @property + @abstractmethod + def describe_categorical(self) -> CategoricalDescription: + """ + If the dtype is categorical, there are two options: + - There are only values in the data buffer. + - There is a separate non-categorical Column encoding for categorical values. + + Raises TypeError if the dtype is not categorical + + Returns the dictionary with description on how to interpret the data buffer: + - "is_ordered" : bool, whether the ordering of dictionary indices is + semantically meaningful. + - "is_dictionary" : bool, whether a mapping of + categorical values to other objects exists + - "categories" : Column representing the (implicit) mapping of indices to + category values (e.g. an array of cat1, cat2, ...). + None if not a dictionary-style categorical. + + TBD: are there any other in-memory representations that are needed? + """ + + @property + @abstractmethod + def describe_null(self) -> tuple[ColumnNullType, Any]: + """ + Return the missing value (or "null") representation the column dtype + uses, as a tuple ``(kind, value)``. + + Value : if kind is "sentinel value", the actual value. If kind is a bit + mask or a byte mask, the value (0 or 1) indicating a missing value. None + otherwise. + """ + + @property + @abstractmethod + def null_count(self) -> int | None: + """ + Number of null elements, if known. + + Note: Arrow uses -1 to indicate "unknown", but None seems cleaner. + """ + + @property + @abstractmethod + def metadata(self) -> dict[str, Any]: + """ + The metadata for the column. See `DataFrame.metadata` for more details. + """ + + @abstractmethod + def num_chunks(self) -> int: + """ + Return the number of chunks the column consists of. + """ + + @abstractmethod + def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]: + """ + Return an iterator yielding the chunks. + + See `DataFrame.get_chunks` for details on ``n_chunks``. + """ + + @abstractmethod + def get_buffers(self) -> ColumnBuffers: + """ + Return a dictionary containing the underlying buffers. + + The returned dictionary has the following contents: + + - "data": a two-element tuple whose first element is a buffer + containing the data and whose second element is the data + buffer's associated dtype. + - "validity": a two-element tuple whose first element is a buffer + containing mask values indicating missing data and + whose second element is the mask value buffer's + associated dtype. None if the null representation is + not a bit or byte mask. + - "offsets": a two-element tuple whose first element is a buffer + containing the offset values for variable-size binary + data (e.g., variable-length strings) and whose second + element is the offsets buffer's associated dtype. None + if the data buffer does not have an associated offsets + buffer. + """ + + +# def get_children(self) -> Iterable[Column]: +# """ +# Children columns underneath the column, each object in this iterator +# must adhere to the column specification. +# """ +# pass + + +class DataFrame(ABC): + """ + A data frame class, with only the methods required by the interchange + protocol defined. + + A "data frame" represents an ordered collection of named columns. + A column's "name" must be a unique string. + Columns may be accessed by name or by position. + + This could be a public data frame class, or an object with the methods and + attributes defined on this DataFrame class could be returned from the + ``__dataframe__`` method of a public data frame class in a library adhering + to the dataframe interchange protocol specification. + """ + + version = 0 # version of the protocol + + @abstractmethod + def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): + """Construct a new interchange object, potentially changing the parameters.""" + + @property + @abstractmethod + def metadata(self) -> dict[str, Any]: + """ + The metadata for the data frame, as a dictionary with string keys. The + contents of `metadata` may be anything, they are meant for a library + to store information that it needs to, e.g., roundtrip losslessly or + for two implementations to share data that is not (yet) part of the + interchange protocol specification. For avoiding collisions with other + entries, please add name the keys with the name of the library + followed by a period and the desired name, e.g, ``pandas.indexcol``. + """ + + @abstractmethod + def num_columns(self) -> int: + """ + Return the number of columns in the DataFrame. + """ + + @abstractmethod + def num_rows(self) -> int | None: + # TODO: not happy with Optional, but need to flag it may be expensive + # why include it if it may be None - what do we expect consumers + # to do here? + """ + Return the number of rows in the DataFrame, if available. + """ + + @abstractmethod + def num_chunks(self) -> int: + """ + Return the number of chunks the DataFrame consists of. + """ + + @abstractmethod + def column_names(self) -> Iterable[str]: + """ + Return an iterator yielding the column names. + """ + + @abstractmethod + def get_column(self, i: int) -> Column: + """ + Return the column at the indicated position. + """ + + @abstractmethod + def get_column_by_name(self, name: str) -> Column: + """ + Return the column whose name is the indicated name. + """ + + @abstractmethod + def get_columns(self) -> Iterable[Column]: + """ + Return an iterator yielding the columns. + """ + + @abstractmethod + def select_columns(self, indices: Sequence[int]) -> DataFrame: + """ + Create a new DataFrame by selecting a subset of columns by index. + """ + + @abstractmethod + def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: + """ + Create a new DataFrame by selecting a subset of columns by name. + """ + + @abstractmethod + def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]: + """ + Return an iterator yielding the chunks. + + By default (None), yields the chunks that the data is stored as by the + producer. If given, ``n_chunks`` must be a multiple of + ``self.num_chunks()``, meaning the producer must subdivide each chunk + before yielding it. + """ diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/from_dataframe.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/from_dataframe.py new file mode 100644 index 00000000..d45ae378 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/from_dataframe.py @@ -0,0 +1,523 @@ +from __future__ import annotations + +import ctypes +import re +from typing import Any + +import numpy as np + +from pandas.compat._optional import import_optional_dependency +from pandas.errors import SettingWithCopyError + +import pandas as pd +from pandas.core.interchange.dataframe_protocol import ( + Buffer, + Column, + ColumnNullType, + DataFrame as DataFrameXchg, + DtypeKind, +) +from pandas.core.interchange.utils import ( + ArrowCTypes, + Endianness, +) + +_NP_DTYPES: dict[DtypeKind, dict[int, Any]] = { + DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64}, + DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64}, + DtypeKind.FLOAT: {32: np.float32, 64: np.float64}, + DtypeKind.BOOL: {1: bool, 8: bool}, +} + + +def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame: + """ + Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol. + + Parameters + ---------- + df : DataFrameXchg + Object supporting the interchange protocol, i.e. `__dataframe__` method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pd.DataFrame + + Examples + -------- + >>> df_not_necessarily_pandas = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> interchange_object = df_not_necessarily_pandas.__dataframe__() + >>> interchange_object.column_names() + Index(['A', 'B'], dtype='object') + >>> df_pandas = (pd.api.interchange.from_dataframe + ... (interchange_object.select_columns_by_name(['A']))) + >>> df_pandas + A + 0 1 + 1 2 + + These methods (``column_names``, ``select_columns_by_name``) should work + for any dataframe library which implements the interchange protocol. + """ + if isinstance(df, pd.DataFrame): + return df + + if not hasattr(df, "__dataframe__"): + raise ValueError("`df` does not support __dataframe__") + + return _from_dataframe( + df.__dataframe__(allow_copy=allow_copy), allow_copy=allow_copy + ) + + +def _from_dataframe(df: DataFrameXchg, allow_copy: bool = True): + """ + Build a ``pd.DataFrame`` from the DataFrame interchange object. + + Parameters + ---------- + df : DataFrameXchg + Object supporting the interchange protocol, i.e. `__dataframe__` method. + allow_copy : bool, default: True + Whether to allow copying the memory to perform the conversion + (if false then zero-copy approach is requested). + + Returns + ------- + pd.DataFrame + """ + pandas_dfs = [] + for chunk in df.get_chunks(): + pandas_df = protocol_df_chunk_to_pandas(chunk) + pandas_dfs.append(pandas_df) + + if not allow_copy and len(pandas_dfs) > 1: + raise RuntimeError( + "To join chunks a copy is required which is forbidden by allow_copy=False" + ) + if not pandas_dfs: + pandas_df = protocol_df_chunk_to_pandas(df) + elif len(pandas_dfs) == 1: + pandas_df = pandas_dfs[0] + else: + pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False) + + index_obj = df.metadata.get("pandas.index", None) + if index_obj is not None: + pandas_df.index = index_obj + + return pandas_df + + +def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame: + """ + Convert interchange protocol chunk to ``pd.DataFrame``. + + Parameters + ---------- + df : DataFrameXchg + + Returns + ------- + pd.DataFrame + """ + # We need a dict of columns here, with each column being a NumPy array (at + # least for now, deal with non-NumPy dtypes later). + columns: dict[str, Any] = {} + buffers = [] # hold on to buffers, keeps memory alive + for name in df.column_names(): + if not isinstance(name, str): + raise ValueError(f"Column {name} is not a string") + if name in columns: + raise ValueError(f"Column {name} is not unique") + col = df.get_column_by_name(name) + dtype = col.dtype[0] + if dtype in ( + DtypeKind.INT, + DtypeKind.UINT, + DtypeKind.FLOAT, + DtypeKind.BOOL, + ): + columns[name], buf = primitive_column_to_ndarray(col) + elif dtype == DtypeKind.CATEGORICAL: + columns[name], buf = categorical_column_to_series(col) + elif dtype == DtypeKind.STRING: + columns[name], buf = string_column_to_ndarray(col) + elif dtype == DtypeKind.DATETIME: + columns[name], buf = datetime_column_to_ndarray(col) + else: + raise NotImplementedError(f"Data type {dtype} not handled yet") + + buffers.append(buf) + + pandas_df = pd.DataFrame(columns) + pandas_df.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"] = buffers + return pandas_df + + +def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: + """ + Convert a column holding one of the primitive dtypes to a NumPy array. + + A primitive type is one of: int, uint, float, bool. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of np.ndarray holding the data and the memory owner object + that keeps the memory alive. + """ + buffers = col.get_buffers() + + data_buff, data_dtype = buffers["data"] + data = buffer_to_ndarray( + data_buff, data_dtype, offset=col.offset, length=col.size() + ) + + data = set_nulls(data, col, buffers["validity"]) + return data, buffers + + +def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]: + """ + Convert a column holding categorical data to a pandas Series. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of pd.Series holding the data and the memory owner object + that keeps the memory alive. + """ + categorical = col.describe_categorical + + if not categorical["is_dictionary"]: + raise NotImplementedError("Non-dictionary categoricals not supported yet") + + cat_column = categorical["categories"] + if hasattr(cat_column, "_col"): + # Item "Column" of "Optional[Column]" has no attribute "_col" + # Item "None" of "Optional[Column]" has no attribute "_col" + categories = np.array(cat_column._col) # type: ignore[union-attr] + else: + raise NotImplementedError( + "Interchanging categorical columns isn't supported yet, and our " + "fallback of using the `col._col` attribute (a ndarray) failed." + ) + buffers = col.get_buffers() + + codes_buff, codes_dtype = buffers["data"] + codes = buffer_to_ndarray( + codes_buff, codes_dtype, offset=col.offset, length=col.size() + ) + + # Doing module in order to not get ``IndexError`` for + # out-of-bounds sentinel values in `codes` + if len(categories) > 0: + values = categories[codes % len(categories)] + else: + values = codes + + cat = pd.Categorical( + values, categories=categories, ordered=categorical["is_ordered"] + ) + data = pd.Series(cat) + + data = set_nulls(data, col, buffers["validity"]) + return data, buffers + + +def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: + """ + Convert a column holding string data to a NumPy array. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of np.ndarray holding the data and the memory owner object + that keeps the memory alive. + """ + null_kind, sentinel_val = col.describe_null + + if null_kind not in ( + ColumnNullType.NON_NULLABLE, + ColumnNullType.USE_BITMASK, + ColumnNullType.USE_BYTEMASK, + ): + raise NotImplementedError( + f"{null_kind} null kind is not yet supported for string columns." + ) + + buffers = col.get_buffers() + + assert buffers["offsets"], "String buffers must contain offsets" + # Retrieve the data buffer containing the UTF-8 code units + data_buff, _ = buffers["data"] + # We're going to reinterpret the buffer as uint8, so make sure we can do it safely + assert col.dtype[2] in ( + ArrowCTypes.STRING, + ArrowCTypes.LARGE_STRING, + ) # format_str == utf-8 + # Convert the buffers to NumPy arrays. In order to go from STRING to + # an equivalent ndarray, we claim that the buffer is uint8 (i.e., a byte array) + data_dtype = ( + DtypeKind.UINT, + 8, + ArrowCTypes.UINT8, + Endianness.NATIVE, + ) + # Specify zero offset as we don't want to chunk the string data + data = buffer_to_ndarray(data_buff, data_dtype, offset=0, length=data_buff.bufsize) + + # Retrieve the offsets buffer containing the index offsets demarcating + # the beginning and the ending of each string + offset_buff, offset_dtype = buffers["offsets"] + # Offsets buffer contains start-stop positions of strings in the data buffer, + # meaning that it has more elements than in the data buffer, do `col.size() + 1` + # here to pass a proper offsets buffer size + offsets = buffer_to_ndarray( + offset_buff, offset_dtype, offset=col.offset, length=col.size() + 1 + ) + + null_pos = None + if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): + assert buffers["validity"], "Validity buffers cannot be empty for masks" + valid_buff, valid_dtype = buffers["validity"] + null_pos = buffer_to_ndarray( + valid_buff, valid_dtype, offset=col.offset, length=col.size() + ) + if sentinel_val == 0: + null_pos = ~null_pos + + # Assemble the strings from the code units + str_list: list[None | float | str] = [None] * col.size() + for i in range(col.size()): + # Check for missing values + if null_pos is not None and null_pos[i]: + str_list[i] = np.nan + continue + + # Extract a range of code units + units = data[offsets[i] : offsets[i + 1]] + + # Convert the list of code units to bytes + str_bytes = bytes(units) + + # Create the string + string = str_bytes.decode(encoding="utf-8") + + # Add to our list of strings + str_list[i] = string + + # Convert the string list to a NumPy array + return np.asarray(str_list, dtype="object"), buffers + + +def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray: + """Parse datetime `format_str` to interpret the `data`.""" + # timestamp 'ts{unit}:tz' + timestamp_meta = re.match(r"ts([smun]):(.*)", format_str) + if timestamp_meta: + unit, tz = timestamp_meta.group(1), timestamp_meta.group(2) + if unit != "s": + # the format string describes only a first letter of the unit, so + # add one extra letter to convert the unit to numpy-style: + # 'm' -> 'ms', 'u' -> 'us', 'n' -> 'ns' + unit += "s" + data = data.astype(f"datetime64[{unit}]") + if tz != "": + data = pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(tz) + return data + + # date 'td{Days/Ms}' + date_meta = re.match(r"td([Dm])", format_str) + if date_meta: + unit = date_meta.group(1) + if unit == "D": + # NumPy doesn't support DAY unit, so converting days to seconds + # (converting to uint64 to avoid overflow) + data = (data.astype(np.uint64) * (24 * 60 * 60)).astype("datetime64[s]") + elif unit == "m": + data = data.astype("datetime64[ms]") + else: + raise NotImplementedError(f"Date unit is not supported: {unit}") + return data + + raise NotImplementedError(f"DateTime kind is not supported: {format_str}") + + +def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any]: + """ + Convert a column holding DateTime data to a NumPy array. + + Parameters + ---------- + col : Column + + Returns + ------- + tuple + Tuple of np.ndarray holding the data and the memory owner object + that keeps the memory alive. + """ + buffers = col.get_buffers() + + _, col_bit_width, format_str, _ = col.dtype + dbuf, _ = buffers["data"] + # Consider dtype being `uint` to get number of units passed since the 01.01.1970 + + data = buffer_to_ndarray( + dbuf, + ( + DtypeKind.INT, + col_bit_width, + getattr(ArrowCTypes, f"INT{col_bit_width}"), + Endianness.NATIVE, + ), + offset=col.offset, + length=col.size(), + ) + + data = parse_datetime_format_str(format_str, data) # type: ignore[assignment] + data = set_nulls(data, col, buffers["validity"]) + return data, buffers + + +def buffer_to_ndarray( + buffer: Buffer, + dtype: tuple[DtypeKind, int, str, str], + *, + length: int, + offset: int = 0, +) -> np.ndarray: + """ + Build a NumPy array from the passed buffer. + + Parameters + ---------- + buffer : Buffer + Buffer to build a NumPy array from. + dtype : tuple + Data type of the buffer conforming protocol dtypes format. + offset : int, default: 0 + Number of elements to offset from the start of the buffer. + length : int, optional + If the buffer is a bit-mask, specifies a number of bits to read + from the buffer. Has no effect otherwise. + + Returns + ------- + np.ndarray + + Notes + ----- + The returned array doesn't own the memory. The caller of this function is + responsible for keeping the memory owner object alive as long as + the returned NumPy array is being used. + """ + kind, bit_width, _, _ = dtype + + column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None) + if column_dtype is None: + raise NotImplementedError(f"Conversion for {dtype} is not yet supported.") + + # TODO: No DLPack yet, so need to construct a new ndarray from the data pointer + # and size in the buffer plus the dtype on the column. Use DLPack as NumPy supports + # it since https://github.com/numpy/numpy/pull/19083 + ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype) + + if bit_width == 1: + assert length is not None, "`length` must be specified for a bit-mask buffer." + pa = import_optional_dependency("pyarrow") + arr = pa.BooleanArray.from_buffers( + pa.bool_(), + length, + [None, pa.foreign_buffer(buffer.ptr, length)], + offset=offset, + ) + return np.asarray(arr) + else: + data_pointer = ctypes.cast( + buffer.ptr + (offset * bit_width // 8), ctypes.POINTER(ctypes_type) + ) + if length > 0: + return np.ctypeslib.as_array(data_pointer, shape=(length,)) + return np.array([], dtype=ctypes_type) + + +def set_nulls( + data: np.ndarray | pd.Series, + col: Column, + validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, + allow_modify_inplace: bool = True, +): + """ + Set null values for the data according to the column null kind. + + Parameters + ---------- + data : np.ndarray or pd.Series + Data to set nulls in. + col : Column + Column object that describes the `data`. + validity : tuple(Buffer, dtype) or None + The return value of ``col.buffers()``. We do not access the ``col.buffers()`` + here to not take the ownership of the memory of buffer objects. + allow_modify_inplace : bool, default: True + Whether to modify the `data` inplace when zero-copy is possible (True) or always + modify a copy of the `data` (False). + + Returns + ------- + np.ndarray or pd.Series + Data with the nulls being set. + """ + null_kind, sentinel_val = col.describe_null + null_pos = None + + if null_kind == ColumnNullType.USE_SENTINEL: + null_pos = pd.Series(data) == sentinel_val + elif null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): + assert validity, "Expected to have a validity buffer for the mask" + valid_buff, valid_dtype = validity + null_pos = buffer_to_ndarray( + valid_buff, valid_dtype, offset=col.offset, length=col.size() + ) + if sentinel_val == 0: + null_pos = ~null_pos + elif null_kind in (ColumnNullType.NON_NULLABLE, ColumnNullType.USE_NAN): + pass + else: + raise NotImplementedError(f"Null kind {null_kind} is not yet supported.") + + if null_pos is not None and np.any(null_pos): + if not allow_modify_inplace: + data = data.copy() + try: + data[null_pos] = None + except TypeError: + # TypeError happens if the `data` dtype appears to be non-nullable + # in numpy notation (bool, int, uint). If this happens, + # cast the `data` to nullable float dtype. + data = data.astype(float) + data[null_pos] = None + except SettingWithCopyError: + # `SettingWithCopyError` may happen for datetime-like with missing values. + data = data.copy() + data[null_pos] = None + + return data diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/utils.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/utils.py new file mode 100644 index 00000000..4ac06308 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/interchange/utils.py @@ -0,0 +1,146 @@ +""" +Utility functions and objects for implementing the interchange API. +""" + +from __future__ import annotations + +import typing + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, + DatetimeTZDtype, +) + +if typing.TYPE_CHECKING: + from pandas._typing import DtypeObj + + +# Maps str(pyarrow.DataType) = C type format string +# Currently, no pyarrow API for this +PYARROW_CTYPES = { + "null": "n", + "bool": "b", + "uint8": "C", + "uint16": "S", + "uint32": "I", + "uint64": "L", + "int8": "c", + "int16": "S", + "int32": "i", + "int64": "l", + "halffloat": "e", # float16 + "float": "f", # float32 + "double": "g", # float64 + "string": "u", + "binary": "z", + "time32[s]": "tts", + "time32[ms]": "ttm", + "time64[us]": "ttu", + "time64[ns]": "ttn", + "date32[day]": "tdD", + "date64[ms]": "tdm", + "timestamp[s]": "tss:", + "timestamp[ms]": "tsm:", + "timestamp[us]": "tsu:", + "timestamp[ns]": "tsn:", + "duration[s]": "tDs", + "duration[ms]": "tDm", + "duration[us]": "tDu", + "duration[ns]": "tDn", +} + + +class ArrowCTypes: + """ + Enum for Apache Arrow C type format strings. + + The Arrow C data interface: + https://arrow.apache.org/docs/format/CDataInterface.html#data-type-description-format-strings + """ + + NULL = "n" + BOOL = "b" + INT8 = "c" + UINT8 = "C" + INT16 = "s" + UINT16 = "S" + INT32 = "i" + UINT32 = "I" + INT64 = "l" + UINT64 = "L" + FLOAT16 = "e" + FLOAT32 = "f" + FLOAT64 = "g" + STRING = "u" # utf-8 + LARGE_STRING = "U" # utf-8 + DATE32 = "tdD" + DATE64 = "tdm" + # Resoulution: + # - seconds -> 's' + # - milliseconds -> 'm' + # - microseconds -> 'u' + # - nanoseconds -> 'n' + TIMESTAMP = "ts{resolution}:{tz}" + TIME = "tt{resolution}" + + +class Endianness: + """Enum indicating the byte-order of a data-type.""" + + LITTLE = "<" + BIG = ">" + NATIVE = "=" + NA = "|" + + +def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: + """ + Represent pandas `dtype` as a format string in Apache Arrow C notation. + + Parameters + ---------- + dtype : np.dtype + Datatype of pandas DataFrame to represent. + + Returns + ------- + str + Format string in Apache Arrow C notation of the given `dtype`. + """ + if isinstance(dtype, CategoricalDtype): + return ArrowCTypes.INT64 + elif dtype == np.dtype("O"): + return ArrowCTypes.STRING + elif isinstance(dtype, ArrowDtype): + import pyarrow as pa + + pa_type = dtype.pyarrow_dtype + if pa.types.is_decimal(pa_type): + return f"d:{pa_type.precision},{pa_type.scale}" + elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None: + return f"ts{pa_type.unit[0]}:{pa_type.tz}" + format_str = PYARROW_CTYPES.get(str(pa_type), None) + if format_str is not None: + return format_str + + format_str = getattr(ArrowCTypes, dtype.name.upper(), None) + if format_str is not None: + return format_str + + if lib.is_np_dtype(dtype, "M"): + # Selecting the first char of resolution string: + # dtype.str -> ' 'n' + resolution = np.datetime_data(dtype)[0][0] + return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="") + + elif isinstance(dtype, DatetimeTZDtype): + return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz) + + raise NotImplementedError( + f"Conversion of {dtype} to Arrow C format string is not implemented." + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/__init__.py new file mode 100644 index 00000000..284f8ef1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/__init__.py @@ -0,0 +1,60 @@ +from pandas.core.internals.api import make_block +from pandas.core.internals.array_manager import ( + ArrayManager, + SingleArrayManager, +) +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, +) +from pandas.core.internals.blocks import ( # io.pytables, io.packers + Block, + DatetimeTZBlock, + ExtensionBlock, +) +from pandas.core.internals.concat import concatenate_managers +from pandas.core.internals.managers import ( + BlockManager, + SingleBlockManager, + create_block_manager_from_blocks, +) + +__all__ = [ + "Block", + "DatetimeTZBlock", + "ExtensionBlock", + "make_block", + "DataManager", + "ArrayManager", + "BlockManager", + "SingleDataManager", + "SingleBlockManager", + "SingleArrayManager", + "concatenate_managers", + # this is preserved here for downstream compatibility (GH-33892) + "create_block_manager_from_blocks", +] + + +def __getattr__(name: str): + import warnings + + from pandas.util._exceptions import find_stack_level + + if name in ["NumericBlock", "ObjectBlock"]: + warnings.warn( + f"{name} is deprecated and will be removed in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + if name == "NumericBlock": + from pandas.core.internals.blocks import NumericBlock + + return NumericBlock + else: + from pandas.core.internals.blocks import ObjectBlock + + return ObjectBlock + + raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/api.py new file mode 100644 index 00000000..10e6b76e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/api.py @@ -0,0 +1,107 @@ +""" +This is a pseudo-public API for downstream libraries. We ask that downstream +authors + +1) Try to avoid using internals directly altogether, and failing that, +2) Use only functions exposed here (or in core.internals) + +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.internals import BlockPlacement + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + PeriodDtype, +) + +from pandas.core.arrays import DatetimeArray +from pandas.core.construction import extract_array +from pandas.core.internals.blocks import ( + Block, + DatetimeTZBlock, + ExtensionBlock, + check_ndim, + ensure_block_shape, + extract_pandas_array, + get_block_type, + maybe_coerce_values, +) + +if TYPE_CHECKING: + from pandas._typing import Dtype + + +def make_block( + values, placement, klass=None, ndim=None, dtype: Dtype | None = None +) -> Block: + """ + This is a pseudo-public analogue to blocks.new_block. + + We ask that downstream libraries use this rather than any fully-internal + APIs, including but not limited to: + + - core.internals.blocks.make_block + - Block.make_block + - Block.make_block_same_class + - Block.__init__ + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + + values, dtype = extract_pandas_array(values, dtype, ndim) + + if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype): + # GH-44681 changed PeriodArray to be stored in the 2D + # NDArrayBackedExtensionBlock instead of ExtensionBlock + # -> still allow ExtensionBlock to be passed in this case for back compat + klass = None + + if klass is None: + dtype = dtype or values.dtype + klass = get_block_type(dtype) + + elif klass is DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype): + # pyarrow calls get here + values = DatetimeArray._simple_new( + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[ExtensionDtype, dtype[Any], None]"; + # expected "Union[dtype[datetime64], DatetimeTZDtype]" + values, + dtype=dtype, # type: ignore[arg-type] + ) + + if not isinstance(placement, BlockPlacement): + placement = BlockPlacement(placement) + + ndim = maybe_infer_ndim(values, placement, ndim) + if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)): + # GH#41168 ensure we can pass 1D dt64tz values + # More generally, any EA dtype that isn't is_1d_only_ea_dtype + values = extract_array(values, extract_numpy=True) + values = ensure_block_shape(values, ndim) + + check_ndim(values, placement, ndim) + values = maybe_coerce_values(values) + return klass(values, ndim=ndim, placement=placement) + + +def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int: + """ + If `ndim` is not provided, infer it from placement and values. + """ + if ndim is None: + # GH#38134 Block constructor now assumes ndim is not None + if not isinstance(values.dtype, np.dtype): + if len(placement) != 1: + ndim = 1 + else: + ndim = 2 + else: + ndim = values.ndim + return ndim diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/array_manager.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/array_manager.py new file mode 100644 index 00000000..14969425 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/array_manager.py @@ -0,0 +1,1331 @@ +""" +Experimental manager based on storing a collection of 1D arrays +""" +from __future__ import annotations + +import itertools +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs import ( + NaT, + lib, +) + +from pandas.core.dtypes.astype import ( + astype_array, + astype_array_safe, +) +from pandas.core.dtypes.cast import ( + ensure_dtype_can_hold_na, + find_common_type, + infer_dtype_from_scalar, + np_find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_datetime64_ns_dtype, + is_integer, + is_numeric_dtype, + is_object_dtype, + is_timedelta64_ns_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + array_equals, + isna, + na_value_for_dtype, +) + +import pandas.core.algorithms as algos +from pandas.core.array_algos.quantile import quantile_compat +from pandas.core.array_algos.take import take_1d +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + NumpyExtensionArray, + TimedeltaArray, +) +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, + sanitize_array, +) +from pandas.core.indexers import ( + maybe_convert_indices, + validate_indices, +) +from pandas.core.indexes.api import ( + Index, + ensure_index, +) +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, + ensure_np_dtype, + interleaved_dtype, +) +from pandas.core.internals.blocks import ( + BlockPlacement, + ensure_block_shape, + external_values, + extract_pandas_array, + maybe_coerce_values, + new_block, + to_native_types, +) +from pandas.core.internals.managers import make_na_array + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + QuantileInterpolation, + Self, + npt, + ) + + +class BaseArrayManager(DataManager): + """ + Core internal data structure to implement DataFrame and Series. + + Alternative to the BlockManager, storing a list of 1D arrays instead of + Blocks. + + This is *not* a public API class + + Parameters + ---------- + arrays : Sequence of arrays + axes : Sequence of Index + verify_integrity : bool, default True + + """ + + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: list[np.ndarray | ExtensionArray] + _axes: list[Index] + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + raise NotImplementedError + + def make_empty(self, axes=None) -> Self: + """Return an empty ArrayManager with the items axis of len 0 (no columns)""" + if axes is None: + axes = [self.axes[1:], Index([])] + + arrays: list[np.ndarray | ExtensionArray] = [] + return type(self)(arrays, axes) + + @property + def items(self) -> Index: + return self._axes[-1] + + @property + # error: Signature of "axes" incompatible with supertype "DataManager" + def axes(self) -> list[Index]: # type: ignore[override] + # mypy doesn't work to override attribute with property + # see https://github.com/python/mypy/issues/4125 + """Axes is BlockManager-compatible order (columns, rows)""" + return [self._axes[1], self._axes[0]] + + @property + def shape_proper(self) -> tuple[int, ...]: + # this returns (n_rows, n_columns) + return tuple(len(ax) for ax in self._axes) + + @staticmethod + def _normalize_axis(axis: AxisInt) -> int: + # switch axis + axis = 1 if axis == 0 else 0 + return axis + + def set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + self._validate_set_axis(axis, new_labels) + axis = self._normalize_axis(axis) + self._axes[axis] = new_labels + + def get_dtypes(self) -> npt.NDArray[np.object_]: + return np.array([arr.dtype for arr in self.arrays], dtype="object") + + def add_references(self, mgr: BaseArrayManager) -> None: + """ + Only implemented on the BlockManager level + """ + return + + def __getstate__(self): + return self.arrays, self._axes + + def __setstate__(self, state) -> None: + self.arrays = state[0] + self._axes = state[1] + + def __repr__(self) -> str: + output = type(self).__name__ + output += f"\nIndex: {self._axes[0]}" + if self.ndim == 2: + output += f"\nColumns: {self._axes[1]}" + output += f"\n{len(self.arrays)} arrays:" + for arr in self.arrays: + output += f"\n{arr.dtype}" + return output + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + """ + Iterate over the arrays, collect and create a new ArrayManager. + + Parameters + ---------- + f : str or callable + Name of the Array method to apply. + align_keys: List[str] or None, default None + **kwargs + Keywords to pass to `f` + + Returns + ------- + ArrayManager + """ + assert "filter" not in kwargs + + align_keys = align_keys or [] + result_arrays: list[ArrayLike] = [] + # fillna: Series/DataFrame is responsible for making sure value is aligned + + aligned_args = {k: kwargs[k] for k in align_keys} + + if f == "apply": + f = kwargs.pop("func") + + for i, arr in enumerate(self.arrays): + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj.iloc[i] + else: + kwargs[k] = obj.iloc[:, i]._values + else: + # otherwise we have an array-like + kwargs[k] = obj[i] + + if callable(f): + applied = f(arr, **kwargs) + else: + applied = getattr(arr, f)(**kwargs) + + result_arrays.append(applied) + + new_axes = self._axes + return type(self)(result_arrays, new_axes) + + def apply_with_block(self, f, align_keys=None, **kwargs) -> Self: + # switch axis to follow BlockManager logic + swap_axis = True + if f == "interpolate": + swap_axis = False + if swap_axis and "axis" in kwargs and self.ndim == 2: + kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0 + + align_keys = align_keys or [] + aligned_args = {k: kwargs[k] for k in align_keys} + + result_arrays = [] + + for i, arr in enumerate(self.arrays): + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + if self.ndim == 2: + kwargs[k] = obj.iloc[slice(i, i + 1)]._values + else: + kwargs[k] = obj.iloc[:]._values + else: + kwargs[k] = obj.iloc[:, [i]]._values + else: + # otherwise we have an ndarray + if obj.ndim == 2: + kwargs[k] = obj[[i]] + + if isinstance(arr.dtype, np.dtype) and not isinstance(arr, np.ndarray): + # i.e. TimedeltaArray, DatetimeArray with tz=None. Need to + # convert for the Block constructors. + arr = np.asarray(arr) + + arr = maybe_coerce_values(arr) + if self.ndim == 2: + arr = ensure_block_shape(arr, 2) + bp = BlockPlacement(slice(0, 1, 1)) + block = new_block(arr, placement=bp, ndim=2) + else: + bp = BlockPlacement(slice(0, len(self), 1)) + block = new_block(arr, placement=bp, ndim=1) + + applied = getattr(block, f)(**kwargs) + if isinstance(applied, list): + applied = applied[0] + arr = applied.values + if self.ndim == 2 and arr.ndim == 2: + # 2D for np.ndarray or DatetimeArray/TimedeltaArray + assert len(arr) == 1 + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[int, slice]" + arr = arr[0, :] # type: ignore[call-overload] + result_arrays.append(arr) + + return type(self)(result_arrays, self._axes) + + def setitem(self, indexer, value) -> Self: + return self.apply_with_block("setitem", indexer=indexer, value=value) + + def diff(self, n: int) -> Self: + assert self.ndim == 2 # caller ensures + return self.apply(algos.diff, n=n) + + def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self: + if copy is None: + copy = True + + return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors) + + def convert(self, copy: bool | None) -> Self: + if copy is None: + copy = True + + def _convert(arr): + if is_object_dtype(arr.dtype): + # extract NumpyExtensionArray for tests that patch + # NumpyExtensionArray._typ + arr = np.asarray(arr) + result = lib.maybe_convert_objects( + arr, + convert_non_numeric=True, + ) + if result is arr and copy: + return arr.copy() + return result + else: + return arr.copy() if copy else arr + + return self.apply(_convert) + + def to_native_types(self, **kwargs) -> Self: + return self.apply(to_native_types, **kwargs) + + @property + def any_extension_types(self) -> bool: + """Whether any of the blocks in this manager are extension blocks""" + return False # any(block.is_extension for block in self.blocks) + + @property + def is_view(self) -> bool: + """return a boolean if we are a single block and are a view""" + # TODO what is this used for? + return False + + @property + def is_single_block(self) -> bool: + return len(self.arrays) == 1 + + def _get_data_subset(self, predicate: Callable) -> Self: + indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)] + arrays = [self.arrays[i] for i in indices] + # TODO copy? + # Note: using Index.take ensures we can retain e.g. DatetimeIndex.freq, + # see test_describe_datetime_columns + taker = np.array(indices, dtype="intp") + new_cols = self._axes[1].take(taker) + new_axes = [self._axes[0], new_cols] + return type(self)(arrays, new_axes, verify_integrity=False) + + def get_bool_data(self, copy: bool = False) -> Self: + """ + Select columns that are bool-dtype and object-dtype columns that are all-bool. + + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + return self._get_data_subset(lambda x: x.dtype == np.dtype(bool)) + + def get_numeric_data(self, copy: bool = False) -> Self: + """ + Select columns that have a numeric dtype. + + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + return self._get_data_subset( + lambda arr: is_numeric_dtype(arr.dtype) + or getattr(arr.dtype, "_is_numeric", False) + ) + + def copy(self, deep: bool | Literal["all"] | None = True) -> Self: + """ + Make deep or shallow copy of ArrayManager + + Parameters + ---------- + deep : bool or string, default True + If False, return shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + BlockManager + """ + if deep is None: + # ArrayManager does not yet support CoW, so deep=None always means + # deep=True for now + deep = True + + # this preserves the notion of view copying of axes + if deep: + # hit in e.g. tests.io.json.test_pandas + + def copy_func(ax): + return ax.copy(deep=True) if deep == "all" else ax.view() + + new_axes = [copy_func(ax) for ax in self._axes] + else: + new_axes = list(self._axes) + + if deep: + new_arrays = [arr.copy() for arr in self.arrays] + else: + new_arrays = list(self.arrays) + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def reindex_indexer( + self, + new_axis, + indexer, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + # ignored keywords + only_slice: bool = False, + # ArrayManager specific keywords + use_na_proxy: bool = False, + ) -> Self: + axis = self._normalize_axis(axis) + return self._reindex_indexer( + new_axis, + indexer, + axis, + fill_value, + allow_dups, + copy, + use_na_proxy, + ) + + def _reindex_indexer( + self, + new_axis, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + use_na_proxy: bool = False, + ) -> Self: + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray[intp] or None + axis : int + fill_value : object, default None + allow_dups : bool, default False + copy : bool, default True + + + pandas-indexer with -1's only. + """ + if copy is None: + # ArrayManager does not yet support CoW, so deep=None always means + # deep=True for now + copy = True + + if indexer is None: + if new_axis is self._axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result._axes = list(self._axes) + result._axes[axis] = new_axis + return result + + # some axes don't allow reindexing with dups + if not allow_dups: + self._axes[axis]._validate_can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 1: + new_arrays = [] + for i in indexer: + if i == -1: + arr = self._make_na_array( + fill_value=fill_value, use_na_proxy=use_na_proxy + ) + else: + arr = self.arrays[i] + if copy: + arr = arr.copy() + new_arrays.append(arr) + + else: + validate_indices(indexer, len(self._axes[0])) + indexer = ensure_platform_int(indexer) + mask = indexer == -1 + needs_masking = mask.any() + new_arrays = [ + take_1d( + arr, + indexer, + allow_fill=needs_masking, + fill_value=fill_value, + mask=mask, + # if fill_value is not None else blk.fill_value + ) + for arr in self.arrays + ] + + new_axes = list(self._axes) + new_axes[axis] = new_axis + + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def take( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt = 1, + verify: bool = True, + ) -> Self: + """ + Take items along any axis. + """ + assert isinstance(indexer, np.ndarray), type(indexer) + assert indexer.dtype == np.intp, indexer.dtype + + axis = self._normalize_axis(axis) + + if not indexer.ndim == 1: + raise ValueError("indexer should be 1-dimensional") + + n = self.shape_proper[axis] + indexer = maybe_convert_indices(indexer, n, verify=verify) + + new_labels = self._axes[axis].take(indexer) + return self._reindex_indexer( + new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True + ) + + def _make_na_array(self, fill_value=None, use_na_proxy: bool = False): + if use_na_proxy: + assert fill_value is None + return NullArrayProxy(self.shape_proper[0]) + + if fill_value is None: + fill_value = np.nan + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + array_values = make_na_array(dtype, self.shape_proper[:1], fill_value) + return array_values + + def _equal_values(self, other) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + for left, right in zip(self.arrays, other.arrays): + if not array_equals(left, right): + return False + return True + + # TODO + # to_dict + + +class ArrayManager(BaseArrayManager): + @property + def ndim(self) -> Literal[2]: + return 2 + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + # Note: we are storing the axes in "_axes" in the (row, columns) order + # which contrasts the order how it is stored in BlockManager + self._axes = axes + self.arrays = arrays + + if verify_integrity: + self._axes = [ensure_index(ax) for ax in axes] + arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays] + self.arrays = [maybe_coerce_values(arr) for arr in arrays] + self._verify_integrity() + + def _verify_integrity(self) -> None: + n_rows, n_columns = self.shape_proper + if not len(self.arrays) == n_columns: + raise ValueError( + "Number of passed arrays must equal the size of the column Index: " + f"{len(self.arrays)} arrays vs {n_columns} columns." + ) + for arr in self.arrays: + if not len(arr) == n_rows: + raise ValueError( + "Passed arrays should have the same length as the rows Index: " + f"{len(arr)} vs {n_rows} rows" + ) + if not isinstance(arr, (np.ndarray, ExtensionArray)): + raise ValueError( + "Passed arrays should be np.ndarray or ExtensionArray instances, " + f"got {type(arr)} instead" + ) + if not arr.ndim == 1: + raise ValueError( + "Passed arrays should be 1-dimensional, got array with " + f"{arr.ndim} dimensions instead." + ) + + # -------------------------------------------------------------------- + # Indexing + + def fast_xs(self, loc: int) -> SingleArrayManager: + """ + Return the array corresponding to `frame.iloc[loc]`. + + Parameters + ---------- + loc : int + + Returns + ------- + np.ndarray or ExtensionArray + """ + dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) + + values = [arr[loc] for arr in self.arrays] + if isinstance(dtype, ExtensionDtype): + result = dtype.construct_array_type()._from_sequence(values, dtype=dtype) + # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT + elif is_datetime64_ns_dtype(dtype): + result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray + elif is_timedelta64_ns_dtype(dtype): + result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray + else: + result = np.array(values, dtype=dtype) + return SingleArrayManager([result], [self._axes[1]]) + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager: + axis = self._normalize_axis(axis) + + if axis == 0: + arrays = [arr[slobj] for arr in self.arrays] + elif axis == 1: + arrays = self.arrays[slobj] + + new_axes = list(self._axes) + new_axes[axis] = new_axes[axis]._getitem_slice(slobj) + + return type(self)(arrays, new_axes, verify_integrity=False) + + def iget(self, i: int) -> SingleArrayManager: + """ + Return the data as a SingleArrayManager. + """ + values = self.arrays[i] + return SingleArrayManager([values], [self._axes[0]]) + + def iget_values(self, i: int) -> ArrayLike: + """ + Return the data for column i as the values (ndarray or ExtensionArray). + """ + return self.arrays[i] + + @property + def column_arrays(self) -> list[ArrayLike]: + """ + Used in the JSON C code to access column arrays. + """ + + return [np.asarray(arr) for arr in self.arrays] + + def iset( + self, + loc: int | slice | np.ndarray, + value: ArrayLike, + inplace: bool = False, + refs=None, + ) -> None: + """ + Set new column(s). + + This changes the ArrayManager in-place, but replaces (an) existing + column(s), not changing column values in-place). + + Parameters + ---------- + loc : integer, slice or boolean mask + Positional location (already bounds checked) + value : np.ndarray or ExtensionArray + inplace : bool, default False + Whether overwrite existing array as opposed to replacing it. + """ + # single column -> single integer index + if lib.is_integer(loc): + # TODO can we avoid needing to unpack this here? That means converting + # DataFrame into 1D array when loc is an integer + if isinstance(value, np.ndarray) and value.ndim == 2: + assert value.shape[1] == 1 + value = value[:, 0] + + # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item + # but we should avoid that and pass directly the proper array + value = maybe_coerce_values(value) + + assert isinstance(value, (np.ndarray, ExtensionArray)) + assert value.ndim == 1 + assert len(value) == len(self._axes[0]) + self.arrays[loc] = value + return + + # multiple columns -> convert slice or array to integer indices + elif isinstance(loc, slice): + indices: range | np.ndarray = range( + loc.start if loc.start is not None else 0, + loc.stop if loc.stop is not None else self.shape_proper[1], + loc.step if loc.step is not None else 1, + ) + else: + assert isinstance(loc, np.ndarray) + assert loc.dtype == "bool" + indices = np.nonzero(loc)[0] + + assert value.ndim == 2 + assert value.shape[0] == len(self._axes[0]) + + for value_idx, mgr_idx in enumerate(indices): + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[slice, int]" + value_arr = value[:, value_idx] # type: ignore[call-overload] + self.arrays[mgr_idx] = value_arr + return + + def column_setitem( + self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False + ) -> None: + """ + Set values ("setitem") into a single column (not setting the full column). + + This is a method on the ArrayManager level, to avoid creating an + intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) + """ + if not is_integer(loc): + raise TypeError("The column index should be an integer") + arr = self.arrays[loc] + mgr = SingleArrayManager([arr], [self._axes[0]]) + if inplace_only: + mgr.setitem_inplace(idx, value) + else: + new_mgr = mgr.setitem((idx,), value) + # update existing ArrayManager in-place + self.arrays[loc] = new_mgr.arrays[0] + + def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : np.ndarray or ExtensionArray + """ + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + value = extract_array(value, extract_numpy=True) + if value.ndim == 2: + if value.shape[0] == 1: + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[int, slice]" + value = value[0, :] # type: ignore[call-overload] + else: + raise ValueError( + f"Expected a 1D array, got an array with shape {value.shape}" + ) + value = maybe_coerce_values(value) + + # TODO self.arrays can be empty + # assert len(value) == len(self.arrays[0]) + + # TODO is this copy needed? + arrays = self.arrays.copy() + arrays.insert(loc, value) + + self.arrays = arrays + self._axes[1] = new_axis + + def idelete(self, indexer) -> ArrayManager: + """ + Delete selected locations in-place (new block and array, same BlockManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] + self._axes = [self._axes[0], self._axes[1][to_keep]] + return self + + # -------------------------------------------------------------------- + # Array-wise Operation + + def grouped_reduce(self, func: Callable) -> Self: + """ + Apply grouped reduction function columnwise, returning a new ArrayManager. + + Parameters + ---------- + func : grouped reduction function + + Returns + ------- + ArrayManager + """ + result_arrays: list[np.ndarray] = [] + result_indices: list[int] = [] + + for i, arr in enumerate(self.arrays): + # grouped_reduce functions all expect 2D arrays + arr = ensure_block_shape(arr, ndim=2) + res = func(arr) + if res.ndim == 2: + # reverse of ensure_block_shape + assert res.shape[0] == 1 + res = res[0] + + result_arrays.append(res) + result_indices.append(i) + + if len(result_arrays) == 0: + nrows = 0 + else: + nrows = result_arrays[0].shape[0] + index = Index(range(nrows)) + + columns = self.items + + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + + def reduce(self, func: Callable) -> Self: + """ + Apply reduction function column-wise, returning a single-row ArrayManager. + + Parameters + ---------- + func : reduction function + + Returns + ------- + ArrayManager + """ + result_arrays: list[np.ndarray] = [] + for i, arr in enumerate(self.arrays): + res = func(arr, axis=0) + + # TODO NaT doesn't preserve dtype, so we need to ensure to create + # a timedelta result array if original was timedelta + # what if datetime results in timedelta? (eg std) + dtype = arr.dtype if res is NaT else None + result_arrays.append( + sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type] + ) + + index = Index._simple_new(np.array([None], dtype=object)) # placeholder + columns = self.items + + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + return new_mgr + + def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: + """ + Apply array_op blockwise with another (aligned) BlockManager. + """ + # TODO what if `other` is BlockManager ? + left_arrays = self.arrays + right_arrays = other.arrays + result_arrays = [ + array_op(left, right) for left, right in zip(left_arrays, right_arrays) + ] + return type(self)(result_arrays, self._axes) + + def quantile( + self, + *, + qs: Index, # with dtype float64 + transposed: bool = False, + interpolation: QuantileInterpolation = "linear", + ) -> ArrayManager: + arrs = [ensure_block_shape(x, 2) for x in self.arrays] + new_arrs = [ + quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs + ] + for i, arr in enumerate(new_arrs): + if arr.ndim == 2: + assert arr.shape[0] == 1, arr.shape + new_arrs[i] = arr[0] + + axes = [qs, self._axes[1]] + return type(self)(new_arrs, axes) + + # ---------------------------------------------------------------- + + def unstack(self, unstacker, fill_value) -> ArrayManager: + """ + Return a BlockManager with all blocks unstacked. + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : Any + fill_value for newly introduced missing values. + + Returns + ------- + unstacked : BlockManager + """ + indexer, _ = unstacker._indexer_and_to_sort + if unstacker.mask.all(): + new_indexer = indexer + allow_fill = False + new_mask2D = None + needs_masking = None + else: + new_indexer = np.full(unstacker.mask.shape, -1) + new_indexer[unstacker.mask] = indexer + allow_fill = True + # calculating the full mask once and passing it to take_1d is faster + # than letting take_1d calculate it in each repeated call + new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) + needs_masking = new_mask2D.any(axis=0) + new_indexer2D = new_indexer.reshape(*unstacker.full_shape) + new_indexer2D = ensure_platform_int(new_indexer2D) + + new_arrays = [] + for arr in self.arrays: + for i in range(unstacker.full_shape[1]): + if allow_fill: + # error: Value of type "Optional[Any]" is not indexable [index] + new_arr = take_1d( + arr, + new_indexer2D[:, i], + allow_fill=needs_masking[i], # type: ignore[index] + fill_value=fill_value, + mask=new_mask2D[:, i], # type: ignore[index] + ) + else: + new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False) + new_arrays.append(new_arr) + + new_index = unstacker.new_index + new_columns = unstacker.get_new_columns(self._axes[1]) + new_axes = [new_index, new_columns] + + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def as_array( + self, + dtype=None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert the blockmanager data into an numpy array. + + Parameters + ---------- + dtype : object, default None + Data type of the return array. + copy : bool, default False + If True then guarantee that a copy is returned. A value of + False does not guarantee that the underlying data is not + copied. + na_value : object, default lib.no_default + Value to be used as the missing value sentinel. + + Returns + ------- + arr : ndarray + """ + if len(self.arrays) == 0: + empty_arr = np.empty(self.shape, dtype=float) + return empty_arr.transpose() + + # We want to copy when na_value is provided to avoid + # mutating the original object + copy = copy or na_value is not lib.no_default + + if not dtype: + dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) + + dtype = ensure_np_dtype(dtype) + + result = np.empty(self.shape_proper, dtype=dtype) + + for i, arr in enumerate(self.arrays): + arr = arr.astype(dtype, copy=copy) + result[:, i] = arr + + if na_value is not lib.no_default: + result[isna(result)] = na_value + + return result + + @classmethod + def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed ArrayManagers horizontally. + """ + # concatting along the columns -> combine reindexed arrays in a single manager + arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs])) + new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False) + return new_mgr + + @classmethod + def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed ArrayManagers vertically. + """ + # concatting along the rows -> concat the reindexed arrays + # TODO(ArrayManager) doesn't yet preserve the correct dtype + arrays = [ + concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))]) + for j in range(len(mgrs[0].arrays)) + ] + new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False) + return new_mgr + + +class SingleArrayManager(BaseArrayManager, SingleDataManager): + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: list[np.ndarray | ExtensionArray] + _axes: list[Index] + + @property + def ndim(self) -> Literal[1]: + return 1 + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + self._axes = axes + self.arrays = arrays + + if verify_integrity: + assert len(axes) == 1 + assert len(arrays) == 1 + self._axes = [ensure_index(ax) for ax in self._axes] + arr = arrays[0] + arr = maybe_coerce_values(arr) + arr = extract_pandas_array(arr, None, 1)[0] + self.arrays = [arr] + self._verify_integrity() + + def _verify_integrity(self) -> None: + (n_rows,) = self.shape + assert len(self.arrays) == 1 + arr = self.arrays[0] + assert len(arr) == n_rows + if not arr.ndim == 1: + raise ValueError( + "Passed array should be 1-dimensional, got array with " + f"{arr.ndim} dimensions instead." + ) + + @staticmethod + def _normalize_axis(axis): + return axis + + def make_empty(self, axes=None) -> SingleArrayManager: + """Return an empty ArrayManager with index/array of length 0""" + if axes is None: + axes = [Index([], dtype=object)] + array: np.ndarray = np.array([], dtype=self.dtype) + return type(self)([array], axes) + + @classmethod + def from_array(cls, array, index) -> SingleArrayManager: + return cls([array], [index]) + + # error: Cannot override writeable attribute with read-only property + @property + def axes(self) -> list[Index]: # type: ignore[override] + return self._axes + + @property + def index(self) -> Index: + return self._axes[0] + + @property + def dtype(self): + return self.array.dtype + + def external_values(self): + """The array that Series.values returns""" + return external_values(self.array) + + def internal_values(self): + """The array that Series._values returns""" + return self.array + + def array_values(self): + """The array that Series.array returns""" + arr = self.array + if isinstance(arr, np.ndarray): + arr = NumpyExtensionArray(arr) + return arr + + @property + def _can_hold_na(self) -> bool: + if isinstance(self.array, np.ndarray): + return self.array.dtype.kind not in "iub" + else: + # ExtensionArray + return self.array._can_hold_na + + @property + def is_single_block(self) -> bool: + return True + + def fast_xs(self, loc: int) -> SingleArrayManager: + raise NotImplementedError("Use series._values[loc] instead") + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleArrayManager: + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + new_array = self.array[slobj] + new_index = self.index._getitem_slice(slobj) + return type(self)([new_array], [new_index], verify_integrity=False) + + def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> SingleArrayManager: + new_array = self.array[indexer] + new_index = self.index[indexer] + return type(self)([new_array], [new_index]) + + # error: Signature of "apply" incompatible with supertype "BaseArrayManager" + def apply(self, func, **kwargs) -> Self: # type: ignore[override] + if callable(func): + new_array = func(self.array, **kwargs) + else: + new_array = getattr(self.array, func)(**kwargs) + return type(self)([new_array], self._axes) + + def setitem(self, indexer, value) -> SingleArrayManager: + """ + Set values with indexer. + + For SingleArrayManager, this backs s[indexer] = value + + See `setitem_inplace` for a version that works inplace and doesn't + return a new Manager. + """ + if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: + raise ValueError(f"Cannot set values with ndim > {self.ndim}") + return self.apply_with_block("setitem", indexer=indexer, value=value) + + def idelete(self, indexer) -> SingleArrayManager: + """ + Delete selected locations in-place (new array, same ArrayManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[0][to_keep]] + self._axes = [self._axes[0][to_keep]] + return self + + def _get_data_subset(self, predicate: Callable) -> SingleArrayManager: + # used in get_numeric_data / get_bool_data + if predicate(self.array): + return type(self)(self.arrays, self._axes, verify_integrity=False) + else: + return self.make_empty() + + def set_values(self, values: ArrayLike) -> None: + """ + Set (replace) the values of the SingleArrayManager in place. + + Use at your own risk! This does not check if the passed values are + valid for the current SingleArrayManager (length, dtype, etc). + """ + self.arrays[0] = values + + def to_2d_mgr(self, columns: Index) -> ArrayManager: + """ + Manager analogue of Series.to_frame + """ + arrays = [self.arrays[0]] + axes = [self.axes[0], columns] + + return ArrayManager(arrays, axes, verify_integrity=False) + + +class NullArrayProxy: + """ + Proxy object for an all-NA array. + + Only stores the length of the array, and not the dtype. The dtype + will only be known when actually concatenating (after determining the + common dtype, for which this proxy is ignored). + Using this object avoids that the internals/concat.py needs to determine + the proper dtype and array type. + """ + + ndim = 1 + + def __init__(self, n: int) -> None: + self.n = n + + @property + def shape(self) -> tuple[int]: + return (self.n,) + + def to_array(self, dtype: DtypeObj) -> ArrayLike: + """ + Helper function to create the actual all-NA array from the NullArrayProxy + object. + + Parameters + ---------- + arr : NullArrayProxy + dtype : the dtype for the resulting array + + Returns + ------- + np.ndarray or ExtensionArray + """ + if isinstance(dtype, ExtensionDtype): + empty = dtype.construct_array_type()._from_sequence([], dtype=dtype) + indexer = -np.ones(self.n, dtype=np.intp) + return empty.take(indexer, allow_fill=True) + else: + # when introducing missing values, int becomes float, bool becomes object + dtype = ensure_dtype_can_hold_na(dtype) + fill_value = na_value_for_dtype(dtype) + arr = np.empty(self.n, dtype=dtype) + arr.fill(fill_value) + return ensure_wrapped_if_datetimelike(arr) + + +def concat_arrays(to_concat: list) -> ArrayLike: + """ + Alternative for concat_compat but specialized for use in the ArrayManager. + + Differences: only deals with 1D arrays (no axis keyword), assumes + ensure_wrapped_if_datetimelike and does not skip empty arrays to determine + the dtype. + In addition ensures that all NullArrayProxies get replaced with actual + arrays. + + Parameters + ---------- + to_concat : list of arrays + + Returns + ------- + np.ndarray or ExtensionArray + """ + # ignore the all-NA proxies to determine the resulting dtype + to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)] + + dtypes = {x.dtype for x in to_concat_no_proxy} + single_dtype = len(dtypes) == 1 + + if single_dtype: + target_dtype = to_concat_no_proxy[0].dtype + elif all(lib.is_np_dtype(x, "iub") for x in dtypes): + # GH#42092 + target_dtype = np_find_common_type(*dtypes) + else: + target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy]) + + to_concat = [ + arr.to_array(target_dtype) + if isinstance(arr, NullArrayProxy) + else astype_array(arr, target_dtype, copy=False) + for arr in to_concat + ] + + if isinstance(to_concat[0], ExtensionArray): + cls = type(to_concat[0]) + return cls._concat_same_type(to_concat) + + result = np.concatenate(to_concat) + + # TODO decide on exact behaviour (we shouldn't do this only for empty result) + # see https://github.com/pandas-dev/pandas/issues/39817 + if len(result) == 0: + # all empties -> check for bool to not coerce to float + kinds = {obj.dtype.kind for obj in to_concat_no_proxy} + if len(kinds) != 1: + if "b" in kinds: + result = result.astype(object) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/base.py new file mode 100644 index 00000000..677dd369 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/base.py @@ -0,0 +1,376 @@ +""" +Base class for the internal managers. Both BlockManager and ArrayManager +inherit from this class. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, + final, +) + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import ( + algos as libalgos, + lib, +) +from pandas.errors import AbstractMethodError +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.cast import ( + find_common_type, + np_can_hold_element, +) +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + SparseDtype, +) + +from pandas.core.base import PandasObject +from pandas.core.construction import extract_array +from pandas.core.indexes.api import ( + Index, + default_index, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + Self, + Shape, + ) + + +class DataManager(PandasObject): + # TODO share more methods/attributes + + axes: list[Index] + + @property + def items(self) -> Index: + raise AbstractMethodError(self) + + @final + def __len__(self) -> int: + return len(self.items) + + @property + def ndim(self) -> int: + return len(self.axes) + + @property + def shape(self) -> Shape: + return tuple(len(ax) for ax in self.axes) + + @final + def _validate_set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + old_len = len(self.axes[axis]) + new_len = len(new_labels) + + if axis == 1 and len(self.items) == 0: + # If we are setting the index on a DataFrame with no columns, + # it is OK to change the length. + pass + + elif new_len != old_len: + raise ValueError( + f"Length mismatch: Expected axis has {old_len} elements, new " + f"values have {new_len} elements" + ) + + def reindex_indexer( + self, + new_axis, + indexer, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool = True, + only_slice: bool = False, + ) -> Self: + raise AbstractMethodError(self) + + @final + def reindex_axis( + self, + new_index: Index, + axis: AxisInt, + fill_value=None, + only_slice: bool = False, + ) -> Self: + """ + Conform data manager to new index. + """ + new_index, indexer = self.axes[axis].reindex(new_index) + + return self.reindex_indexer( + new_index, + indexer, + axis=axis, + fill_value=fill_value, + copy=False, + only_slice=only_slice, + ) + + def _equal_values(self, other: Self) -> bool: + """ + To be implemented by the subclasses. Only check the column values + assuming shape and indexes have already been checked. + """ + raise AbstractMethodError(self) + + @final + def equals(self, other: object) -> bool: + """ + Implementation for DataFrame.equals + """ + if not isinstance(other, DataManager): + return False + + self_axes, other_axes = self.axes, other.axes + if len(self_axes) != len(other_axes): + return False + if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + return False + + return self._equal_values(other) + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + raise AbstractMethodError(self) + + def apply_with_block( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + raise AbstractMethodError(self) + + @final + def isna(self, func) -> Self: + return self.apply("apply", func=func) + + @final + def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self: + if limit is not None: + # Do this validation even if we go through one of the no-op paths + limit = libalgos.validate_limit(None, limit=limit) + + return self.apply_with_block( + "fillna", + value=value, + limit=limit, + inplace=inplace, + downcast=downcast, + using_cow=using_copy_on_write(), + ) + + @final + def where(self, other, cond, align: bool) -> Self: + if align: + align_keys = ["other", "cond"] + else: + align_keys = ["cond"] + other = extract_array(other, extract_numpy=True) + + return self.apply_with_block( + "where", + align_keys=align_keys, + other=other, + cond=cond, + using_cow=using_copy_on_write(), + ) + + @final + def putmask(self, mask, new, align: bool = True) -> Self: + if align: + align_keys = ["new", "mask"] + else: + align_keys = ["mask"] + new = extract_array(new, extract_numpy=True) + + return self.apply_with_block( + "putmask", + align_keys=align_keys, + mask=mask, + new=new, + using_cow=using_copy_on_write(), + ) + + @final + def round(self, decimals: int, using_cow: bool = False) -> Self: + return self.apply_with_block( + "round", + decimals=decimals, + using_cow=using_cow, + ) + + @final + def replace(self, to_replace, value, inplace: bool) -> Self: + inplace = validate_bool_kwarg(inplace, "inplace") + # NDFrame.replace ensures the not-is_list_likes here + assert not lib.is_list_like(to_replace) + assert not lib.is_list_like(value) + return self.apply_with_block( + "replace", + to_replace=to_replace, + value=value, + inplace=inplace, + using_cow=using_copy_on_write(), + ) + + @final + def replace_regex(self, **kwargs) -> Self: + return self.apply_with_block( + "_replace_regex", **kwargs, using_cow=using_copy_on_write() + ) + + @final + def replace_list( + self, + src_list: list[Any], + dest_list: list[Any], + inplace: bool = False, + regex: bool = False, + ) -> Self: + """do a list replace""" + inplace = validate_bool_kwarg(inplace, "inplace") + + bm = self.apply_with_block( + "replace_list", + src_list=src_list, + dest_list=dest_list, + inplace=inplace, + regex=regex, + using_cow=using_copy_on_write(), + ) + bm._consolidate_inplace() + return bm + + def interpolate(self, inplace: bool, **kwargs) -> Self: + return self.apply_with_block( + "interpolate", inplace=inplace, **kwargs, using_cow=using_copy_on_write() + ) + + def pad_or_backfill(self, inplace: bool, **kwargs) -> Self: + return self.apply_with_block( + "pad_or_backfill", + inplace=inplace, + **kwargs, + using_cow=using_copy_on_write(), + ) + + def shift(self, periods: int, fill_value) -> Self: + if fill_value is lib.no_default: + fill_value = None + + return self.apply_with_block("shift", periods=periods, fill_value=fill_value) + + # -------------------------------------------------------------------- + # Consolidation: No-ops for all but BlockManager + + def is_consolidated(self) -> bool: + return True + + def consolidate(self) -> Self: + return self + + def _consolidate_inplace(self) -> None: + return + + +class SingleDataManager(DataManager): + @property + def ndim(self) -> Literal[1]: + return 1 + + @final + @property + def array(self) -> ArrayLike: + """ + Quick access to the backing array of the Block or SingleArrayManager. + """ + # error: "SingleDataManager" has no attribute "arrays"; maybe "array" + return self.arrays[0] # type: ignore[attr-defined] + + def setitem_inplace(self, indexer, value) -> None: + """ + Set values with indexer. + + For Single[Block/Array]Manager, this backs s[indexer] = value + + This is an inplace version of `setitem()`, mutating the manager/values + in place, not returning a new Manager (and Block), and thus never changing + the dtype. + """ + arr = self.array + + # EAs will do this validation in their own __setitem__ methods. + if isinstance(arr, np.ndarray): + # Note: checking for ndarray instead of np.dtype means we exclude + # dt64/td64, which do their own validation. + value = np_can_hold_element(arr.dtype, value) + + if isinstance(value, np.ndarray) and value.ndim == 1 and len(value) == 1: + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + value = value[0, ...] + + arr[indexer] = value + + def grouped_reduce(self, func): + arr = self.array + res = func(arr) + index = default_index(len(res)) + + mgr = type(self).from_array(res, index) + return mgr + + @classmethod + def from_array(cls, arr: ArrayLike, index: Index): + raise AbstractMethodError(cls) + + +def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None: + """ + Find the common dtype for `blocks`. + + Parameters + ---------- + blocks : List[DtypeObj] + + Returns + ------- + dtype : np.dtype, ExtensionDtype, or None + None is returned when `blocks` is empty. + """ + if not len(dtypes): + return None + + return find_common_type(dtypes) + + +def ensure_np_dtype(dtype: DtypeObj) -> np.dtype: + # TODO: https://github.com/pandas-dev/pandas/issues/22791 + # Give EAs some input on what happens here. Sparse needs this. + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + dtype = cast(np.dtype, dtype) + elif isinstance(dtype, ExtensionDtype): + dtype = np.dtype("object") + elif dtype == np.dtype(str): + dtype = np.dtype("object") + return dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/blocks.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/blocks.py new file mode 100644 index 00000000..c0b78e73 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/blocks.py @@ -0,0 +1,2609 @@ +from __future__ import annotations + +from functools import wraps +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + final, +) +import warnings +import weakref + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import ( + NaT, + internals as libinternals, + lib, + writers, +) +from pandas._libs.internals import ( + BlockPlacement, + BlockValuesRefs, +) +from pandas._libs.missing import NA +from pandas._libs.tslibs import IncompatibleFrequency +from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + F, + FillnaOptions, + IgnoreRaise, + InterpolateOptions, + QuantileInterpolation, + Self, + Shape, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.astype import ( + astype_array_safe, + astype_is_view, +) +from pandas.core.dtypes.cast import ( + LossySetitemError, + can_hold_element, + find_result_type, + maybe_downcast_to_dtype, + np_can_hold_element, +) +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, + is_float_dtype, + is_integer_dtype, + is_list_like, + is_scalar, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + NumpyEADtype, + PeriodDtype, + SparseDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCNumpyExtensionArray, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, +) + +from pandas.core import missing +import pandas.core.algorithms as algos +from pandas.core.array_algos.putmask import ( + extract_bool_array, + putmask_inplace, + putmask_without_repeat, + setitem_datetimelike_compat, + validate_putmask, +) +from pandas.core.array_algos.quantile import quantile_compat +from pandas.core.array_algos.replace import ( + compare_or_regex_search, + replace_regex, + should_use_regex, +) +from pandas.core.array_algos.transforms import shift +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.computation import expressions +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import check_setitem_lengths + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from pandas.core.api import Index + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + +# comparison is faster than is_object_dtype +_dtype_obj = np.dtype("object") + + +def maybe_split(meth: F) -> F: + """ + If we have a multi-column block, split and operate block-wise. Otherwise + use the original method. + """ + + @wraps(meth) + def newfunc(self, *args, **kwargs) -> list[Block]: + if self.ndim == 1 or self.shape[0] == 1: + return meth(self, *args, **kwargs) + else: + # Split and operate column-by-column + return self.split_and_operate(meth, *args, **kwargs) + + return cast(F, newfunc) + + +class Block(PandasObject): + """ + Canonical n-dimensional unit of homogeneous dtype contained in a pandas + data structure + + Index-ignorant; let the container take care of that + """ + + values: np.ndarray | ExtensionArray + ndim: int + refs: BlockValuesRefs + __init__: Callable + + __slots__ = () + is_numeric = False + + @final + @cache_readonly + def _validate_ndim(self) -> bool: + """ + We validate dimension for blocks that can hold 2D values, which for now + means numpy dtypes or DatetimeTZDtype. + """ + dtype = self.dtype + return not isinstance(dtype, ExtensionDtype) or isinstance( + dtype, DatetimeTZDtype + ) + + @final + @cache_readonly + def is_object(self) -> bool: + return self.values.dtype == _dtype_obj + + @final + @cache_readonly + def is_extension(self) -> bool: + return not lib.is_np_dtype(self.values.dtype) + + @final + @cache_readonly + def _can_consolidate(self) -> bool: + # We _could_ consolidate for DatetimeTZDtype but don't for now. + return not self.is_extension + + @final + @cache_readonly + def _consolidate_key(self): + return self._can_consolidate, self.dtype.name + + @final + @cache_readonly + def _can_hold_na(self) -> bool: + """ + Can we store NA values in this Block? + """ + dtype = self.dtype + if isinstance(dtype, np.dtype): + return dtype.kind not in "iub" + return dtype._can_hold_na + + @final + @property + def is_bool(self) -> bool: + """ + We can be bool if a) we are bool dtype or b) object dtype with bool objects. + """ + return self.values.dtype == np.dtype(bool) + + @final + def external_values(self): + return external_values(self.values) + + @final + @cache_readonly + def fill_value(self): + # Used in reindex_indexer + return na_value_for_dtype(self.dtype, compat=False) + + @final + def _standardize_fill_value(self, value): + # if we are passed a scalar None, convert it here + if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): + value = self.fill_value + return value + + @property + def mgr_locs(self) -> BlockPlacement: + return self._mgr_locs + + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: + self._mgr_locs = new_mgr_locs + + @final + def make_block( + self, + values, + placement: BlockPlacement | None = None, + refs: BlockValuesRefs | None = None, + ) -> Block: + """ + Create a new block, with type inference propagate any values that are + not specified + """ + if placement is None: + placement = self._mgr_locs + if self.is_extension: + values = ensure_block_shape(values, ndim=self.ndim) + + return new_block(values, placement=placement, ndim=self.ndim, refs=refs) + + @final + def make_block_same_class( + self, + values, + placement: BlockPlacement | None = None, + refs: BlockValuesRefs | None = None, + ) -> Self: + """Wrap given values in a block of same type as self.""" + # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet + # relied on it, as of 2.0 the caller is responsible for this. + if placement is None: + placement = self._mgr_locs + + # We assume maybe_coerce_values has already been called + return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) + + @final + def __repr__(self) -> str: + # don't want to print out all of the items here + name = type(self).__name__ + if self.ndim == 1: + result = f"{name}: {len(self)} dtype: {self.dtype}" + else: + shape = " x ".join([str(s) for s in self.shape]) + result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" + + return result + + @final + def __len__(self) -> int: + return len(self.values) + + @final + def slice_block_columns(self, slc: slice) -> Self: + """ + Perform __getitem__-like, return result as block. + """ + new_mgr_locs = self._mgr_locs[slc] + + new_values = self._slice(slc) + refs = self.refs + return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) + + @final + def take_block_columns(self, indices: npt.NDArray[np.intp]) -> Self: + """ + Perform __getitem__-like, return result as block. + + Only supports slices that preserve dimensionality. + """ + # Note: only called from is from internals.concat, and we can verify + # that never happens with 1-column blocks, i.e. never for ExtensionBlock. + + new_mgr_locs = self._mgr_locs[indices] + + new_values = self._slice(indices) + return type(self)(new_values, new_mgr_locs, self.ndim, refs=None) + + @final + def getitem_block_columns( + self, slicer: slice, new_mgr_locs: BlockPlacement, ref_inplace_op: bool = False + ) -> Self: + """ + Perform __getitem__-like, return result as block. + + Only supports slices that preserve dimensionality. + """ + new_values = self._slice(slicer) + refs = self.refs if not ref_inplace_op or self.refs.has_reference() else None + return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) + + @final + def _can_hold_element(self, element: Any) -> bool: + """require the same dtype as ourselves""" + element = extract_array(element, extract_numpy=True) + return can_hold_element(self.values, element) + + @final + def should_store(self, value: ArrayLike) -> bool: + """ + Should we set self.values[indexer] = value inplace or do we need to cast? + + Parameters + ---------- + value : np.ndarray or ExtensionArray + + Returns + ------- + bool + """ + return value.dtype == self.dtype + + # --------------------------------------------------------------------- + # Apply/Reduce and Helpers + + @final + def apply(self, func, **kwargs) -> list[Block]: + """ + apply the function to my values; return a block if we are not + one + """ + result = func(self.values, **kwargs) + + result = maybe_coerce_values(result) + return self._split_op_result(result) + + @final + def reduce(self, func) -> list[Block]: + # We will apply the function and reshape the result into a single-row + # Block with the same mgr_locs; squeezing will be done at a higher level + assert self.ndim == 2 + + result = func(self.values) + + if self.values.ndim == 1: + res_values = result + else: + res_values = result.reshape(-1, 1) + + nb = self.make_block(res_values) + return [nb] + + @final + def _split_op_result(self, result: ArrayLike) -> list[Block]: + # See also: split_and_operate + if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): + # TODO(EA2D): unnecessary with 2D EAs + # if we get a 2D ExtensionArray, we need to split it into 1D pieces + nbs = [] + for i, loc in enumerate(self._mgr_locs): + if not is_1d_only_ea_dtype(result.dtype): + vals = result[i : i + 1] + else: + vals = result[i] + + bp = BlockPlacement(loc) + block = self.make_block(values=vals, placement=bp) + nbs.append(block) + return nbs + + nb = self.make_block(result) + + return [nb] + + @final + def _split(self) -> list[Block]: + """ + Split a block into a list of single-column blocks. + """ + assert self.ndim == 2 + + new_blocks = [] + for i, ref_loc in enumerate(self._mgr_locs): + vals = self.values[slice(i, i + 1)] + + bp = BlockPlacement(ref_loc) + nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) + new_blocks.append(nb) + return new_blocks + + @final + def split_and_operate(self, func, *args, **kwargs) -> list[Block]: + """ + Split the block and apply func column-by-column. + + Parameters + ---------- + func : Block method + *args + **kwargs + + Returns + ------- + List[Block] + """ + assert self.ndim == 2 and self.shape[0] != 1 + + res_blocks = [] + for nb in self._split(): + rbs = func(nb, *args, **kwargs) + res_blocks.extend(rbs) + return res_blocks + + # --------------------------------------------------------------------- + # Up/Down-casting + + @final + def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block: + """ + coerce the current block to a dtype compat for other + we will return a block, possibly object, and not raise + + we can also safely try to coerce to the same dtype + and will receive the same block + """ + new_dtype = find_result_type(self.values.dtype, other) + + # In a future version of pandas, the default will be that + # setting `nan` into an integer series won't raise. + if ( + is_scalar(other) + and is_integer_dtype(self.values.dtype) + and isna(other) + and other is not NaT + ): + warn_on_upcast = False + elif ( + isinstance(other, np.ndarray) + and other.ndim == 1 + and is_integer_dtype(self.values.dtype) + and is_float_dtype(other.dtype) + and lib.has_only_ints_or_nan(other) + ): + warn_on_upcast = False + + if warn_on_upcast: + warnings.warn( + f"Setting an item of incompatible dtype is deprecated " + "and will raise in a future error of pandas. " + f"Value '{other}' has dtype incompatible with {self.values.dtype}, " + "please explicitly cast to a compatible dtype first.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self.values.dtype == new_dtype: + raise AssertionError( + f"Did not expect new dtype {new_dtype} to equal self.dtype " + f"{self.values.dtype}. Please report a bug at " + "https://github.com/pandas-dev/pandas/issues." + ) + return self.astype(new_dtype, copy=False) + + @final + def _maybe_downcast( + self, blocks: list[Block], downcast=None, using_cow: bool = False + ) -> list[Block]: + if downcast is False: + return blocks + + if self.dtype == _dtype_obj: + # TODO: does it matter that self.dtype might not match blocks[i].dtype? + # GH#44241 We downcast regardless of the argument; + # respecting 'downcast=None' may be worthwhile at some point, + # but ATM it breaks too much existing code. + # split and convert the blocks + + return extend_blocks( + [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] + ) + + if downcast is None: + return blocks + + return extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) + + @final + @maybe_split + def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: + """ + downcast specialized to 2D case post-validation. + + Refactored to allow use of maybe_split. + """ + new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) + new_values = maybe_coerce_values(new_values) + refs = self.refs if new_values is self.values else None + return [self.make_block(new_values, refs=refs)] + + @final + def convert( + self, + *, + copy: bool = True, + using_cow: bool = False, + ) -> list[Block]: + """ + Attempt to coerce any object types to better types. Return a copy + of the block (if copy = True). + """ + if not self.is_object: + if not copy and using_cow: + return [self.copy(deep=False)] + return [self.copy()] if copy else [self] + + if self.ndim != 1 and self.shape[0] != 1: + blocks = self.split_and_operate( + Block.convert, copy=copy, using_cow=using_cow + ) + if all(blk.dtype.kind == "O" for blk in blocks): + # Avoid fragmenting the block if convert is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] if copy else [self] + return blocks + + values = self.values + if values.ndim == 2: + # the check above ensures we only get here with values.shape[0] == 1, + # avoid doing .ravel as that might make a copy + values = values[0] + + res_values = lib.maybe_convert_objects( + values, # type: ignore[arg-type] + convert_non_numeric=True, + ) + refs = None + if copy and res_values is values: + res_values = values.copy() + elif res_values is values: + refs = self.refs + + res_values = ensure_block_shape(res_values, self.ndim) + res_values = maybe_coerce_values(res_values) + return [self.make_block(res_values, refs=refs)] + + # --------------------------------------------------------------------- + # Array-Like Methods + + @final + @cache_readonly + def dtype(self) -> DtypeObj: + return self.values.dtype + + @final + def astype( + self, + dtype: DtypeObj, + copy: bool = False, + errors: IgnoreRaise = "raise", + using_cow: bool = False, + ) -> Block: + """ + Coerce to the new dtype. + + Parameters + ---------- + dtype : np.dtype or ExtensionDtype + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + using_cow: bool, default False + Signaling if copy on write copy logic is used. + + Returns + ------- + Block + """ + values = self.values + + new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) + + new_values = maybe_coerce_values(new_values) + + refs = None + if (using_cow or not copy) and astype_is_view(values.dtype, new_values.dtype): + refs = self.refs + + newb = self.make_block(new_values, refs=refs) + if newb.shape != self.shape: + raise TypeError( + f"cannot set astype for copy = [{copy}] for dtype " + f"({self.dtype.name} [{self.shape}]) to different shape " + f"({newb.dtype.name} [{newb.shape}])" + ) + return newb + + @final + def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block: + """convert to our native types format""" + result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) + return self.make_block(result) + + @final + def copy(self, deep: bool = True) -> Self: + """copy constructor""" + values = self.values + refs: BlockValuesRefs | None + if deep: + values = values.copy() + refs = None + else: + refs = self.refs + return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) + + # --------------------------------------------------------------------- + # Copy-on-Write Helpers + + @final + def _maybe_copy(self, using_cow: bool, inplace: bool) -> Self: + if using_cow and inplace: + deep = self.refs.has_reference() + blk = self.copy(deep=deep) + else: + blk = self if inplace else self.copy() + return blk + + @final + def _get_refs_and_copy(self, using_cow: bool, inplace: bool): + refs = None + copy = not inplace + if inplace: + if using_cow and self.refs.has_reference(): + copy = True + else: + refs = self.refs + return copy, refs + + # --------------------------------------------------------------------- + # Replace + + @final + def replace( + self, + to_replace, + value, + inplace: bool = False, + # mask may be pre-computed if we're called from replace_list + mask: npt.NDArray[np.bool_] | None = None, + using_cow: bool = False, + ) -> list[Block]: + """ + replace the to_replace value with value, possible to create new + blocks here this is just a call to putmask. + """ + + # Note: the checks we do in NDFrame.replace ensure we never get + # here with listlike to_replace or value, as those cases + # go through replace_list + values = self.values + + if isinstance(values, Categorical): + # TODO: avoid special-casing + # GH49404 + blk = self._maybe_copy(using_cow, inplace) + values = cast(Categorical, blk.values) + values._replace(to_replace=to_replace, value=value, inplace=True) + return [blk] + + if not self._can_hold_element(to_replace): + # We cannot hold `to_replace`, so we know immediately that + # replacing it is a no-op. + # Note: If to_replace were a list, NDFrame.replace would call + # replace_list instead of replace. + if using_cow: + return [self.copy(deep=False)] + else: + return [self] if inplace else [self.copy()] + + if mask is None: + mask = missing.mask_missing(values, to_replace) + if not mask.any(): + # Note: we get here with test_replace_extension_other incorrectly + # bc _can_hold_element is incorrect. + if using_cow: + return [self.copy(deep=False)] + else: + return [self] if inplace else [self.copy()] + + elif self._can_hold_element(value): + # TODO(CoW): Maybe split here as well into columns where mask has True + # and rest? + blk = self._maybe_copy(using_cow, inplace) + putmask_inplace(blk.values, mask, value) + if not (self.is_object and value is None): + # if the user *explicitly* gave None, we keep None, otherwise + # may downcast to NaN + blocks = blk.convert(copy=False, using_cow=using_cow) + else: + blocks = [blk] + return blocks + + elif self.ndim == 1 or self.shape[0] == 1: + if value is None or value is NA: + blk = self.astype(np.dtype(object)) + else: + blk = self.coerce_to_target_dtype(value) + return blk.replace( + to_replace=to_replace, + value=value, + inplace=True, + mask=mask, + ) + + else: + # split so that we only upcast where necessary + blocks = [] + for i, nb in enumerate(self._split()): + blocks.extend( + type(self).replace( + nb, + to_replace=to_replace, + value=value, + inplace=True, + mask=mask[i : i + 1], + using_cow=using_cow, + ) + ) + return blocks + + @final + def _replace_regex( + self, + to_replace, + value, + inplace: bool = False, + mask=None, + using_cow: bool = False, + ) -> list[Block]: + """ + Replace elements by the given value. + + Parameters + ---------- + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + inplace : bool, default False + Perform inplace modification. + mask : array-like of bool, optional + True indicate corresponding element is ignored. + using_cow: bool, default False + Specifying if copy on write is enabled. + + Returns + ------- + List[Block] + """ + if not self._can_hold_element(to_replace): + # i.e. only if self.is_object is True, but could in principle include a + # String ExtensionBlock + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + rx = re.compile(to_replace) + + block = self._maybe_copy(using_cow, inplace) + + replace_regex(block.values, rx, value, mask) + + return block.convert(copy=False, using_cow=using_cow) + + @final + def replace_list( + self, + src_list: Iterable[Any], + dest_list: Sequence[Any], + inplace: bool = False, + regex: bool = False, + using_cow: bool = False, + ) -> list[Block]: + """ + See BlockManager.replace_list docstring. + """ + values = self.values + + if isinstance(values, Categorical): + # TODO: avoid special-casing + # GH49404 + blk = self._maybe_copy(using_cow, inplace) + values = cast(Categorical, blk.values) + values._replace(to_replace=src_list, value=dest_list, inplace=True) + return [blk] + + # Exclude anything that we know we won't contain + pairs = [ + (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) + ] + if not len(pairs): + if using_cow: + return [self.copy(deep=False)] + # shortcut, nothing to replace + return [self] if inplace else [self.copy()] + + src_len = len(pairs) - 1 + + if is_string_dtype(values.dtype): + # Calculate the mask once, prior to the call of comp + # in order to avoid repeating the same computations + na_mask = ~isna(values) + masks: Iterable[npt.NDArray[np.bool_]] = ( + extract_bool_array( + cast( + ArrayLike, + compare_or_regex_search( + values, s[0], regex=regex, mask=na_mask + ), + ) + ) + for s in pairs + ) + else: + # GH#38086 faster if we know we dont need to check for regex + masks = (missing.mask_missing(values, s[0]) for s in pairs) + # Materialize if inplace = True, since the masks can change + # as we replace + if inplace: + masks = list(masks) + + if using_cow: + # Don't set up refs here, otherwise we will think that we have + # references when we check again later + rb = [self] + else: + rb = [self if inplace else self.copy()] + + for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): + convert = i == src_len # only convert once at the end + new_rb: list[Block] = [] + + # GH-39338: _replace_coerce can split a block into + # single-column blocks, so track the index so we know + # where to index into the mask + for blk_num, blk in enumerate(rb): + if len(rb) == 1: + m = mask + else: + mib = mask + assert not isinstance(mib, bool) + m = mib[blk_num : blk_num + 1] + + # error: Argument "mask" to "_replace_coerce" of "Block" has + # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; + # expected "ndarray[Any, dtype[bool_]]" + result = blk._replace_coerce( + to_replace=src, + value=dest, + mask=m, + inplace=inplace, + regex=regex, + using_cow=using_cow, + ) + + if using_cow and i != src_len: + # This is ugly, but we have to get rid of intermediate refs + # that did not go out of scope yet, otherwise we will trigger + # many unnecessary copies + for b in result: + ref = weakref.ref(b) + b.refs.referenced_blocks.pop( + b.refs.referenced_blocks.index(ref) + ) + + if convert and blk.is_object and not all(x is None for x in dest_list): + # GH#44498 avoid unwanted cast-back + result = extend_blocks( + [ + b.convert(copy=True and not using_cow, using_cow=using_cow) + for b in result + ] + ) + new_rb.extend(result) + rb = new_rb + return rb + + @final + def _replace_coerce( + self, + to_replace, + value, + mask: npt.NDArray[np.bool_], + inplace: bool = True, + regex: bool = False, + using_cow: bool = False, + ) -> list[Block]: + """ + Replace value corresponding to the given boolean array with another + value. + + Parameters + ---------- + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + mask : np.ndarray[bool] + True indicate corresponding element is ignored. + inplace : bool, default True + Perform inplace modification. + regex : bool, default False + If true, perform regular expression substitution. + + Returns + ------- + List[Block] + """ + if should_use_regex(regex, to_replace): + return self._replace_regex( + to_replace, + value, + inplace=inplace, + mask=mask, + ) + else: + if value is None: + # gh-45601, gh-45836, gh-46634 + if mask.any(): + has_ref = self.refs.has_reference() + nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) + if (nb is self or using_cow) and not inplace: + nb = nb.copy() + elif inplace and has_ref and nb.refs.has_reference() and using_cow: + # no copy in astype and we had refs before + nb = nb.copy() + putmask_inplace(nb.values, mask, value) + return [nb] + if using_cow: + return [self] + return [self] if inplace else [self.copy()] + return self.replace( + to_replace=to_replace, + value=value, + inplace=inplace, + mask=mask, + using_cow=using_cow, + ) + + # --------------------------------------------------------------------- + # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock + # but not ExtensionBlock + + def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: + """ + For compatibility with 1D-only ExtensionArrays. + """ + return arg + + def _unwrap_setitem_indexer(self, indexer): + """ + For compatibility with 1D-only ExtensionArrays. + """ + return indexer + + # NB: this cannot be made cache_readonly because in mgr.set_values we pin + # new .values that can have different shape GH#42631 + @property + def shape(self) -> Shape: + return self.values.shape + + def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: + # In the case where we have a tuple[slice, int], the slice will always + # be slice(None) + # Note: only reached with self.ndim == 2 + # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" + # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type + # "Union[int, integer[Any]]" + return self.values[i] # type: ignore[index] + + def _slice( + self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] + ) -> ArrayLike: + """return a slice of my values""" + + return self.values[slicer] + + def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: + """ + Modify block values in-place with new item value. + + If copy=True, first copy the underlying values in place before modifying + (for Copy-on-Write). + + Notes + ----- + `set_inplace` never creates a new array or new Block, whereas `setitem` + _may_ create a new array and always creates a new Block. + + Caller is responsible for checking values.dtype == self.dtype. + """ + if copy: + self.values = self.values.copy() + self.values[locs] = values + + @final + def take_nd( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt, + new_mgr_locs: BlockPlacement | None = None, + fill_value=lib.no_default, + ) -> Block: + """ + Take values according to indexer and return them as a block. + """ + values = self.values + + if fill_value is lib.no_default: + fill_value = self.fill_value + allow_fill = False + else: + allow_fill = True + + # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype + new_values = algos.take_nd( + values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value + ) + + # Called from three places in managers, all of which satisfy + # these assertions + if isinstance(self, ExtensionBlock): + # NB: in this case, the 'axis' kwarg will be ignored in the + # algos.take_nd call above. + assert not (self.ndim == 1 and new_mgr_locs is None) + assert not (axis == 0 and new_mgr_locs is None) + + if new_mgr_locs is None: + new_mgr_locs = self._mgr_locs + + if new_values.dtype != self.dtype: + return self.make_block(new_values, new_mgr_locs) + else: + return self.make_block_same_class(new_values, new_mgr_locs) + + def _unstack( + self, + unstacker, + fill_value, + new_placement: npt.NDArray[np.intp], + needs_masking: npt.NDArray[np.bool_], + ): + """ + Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : int + Only used in ExtensionBlock._unstack + new_placement : np.ndarray[np.intp] + allow_fill : bool + needs_masking : np.ndarray[bool] + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array-like of bool + The mask of columns of `blocks` we should keep. + """ + new_values, mask = unstacker.get_new_values( + self.values.T, fill_value=fill_value + ) + + mask = mask.any(0) + # TODO: in all tests we have mask.all(); can we rely on that? + + # Note: these next two lines ensure that + # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) + # which the calling function needs in order to pass verify_integrity=False + # to the BlockManager constructor + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + bp = BlockPlacement(new_placement) + blocks = [new_block_2d(new_values, placement=bp)] + return blocks, mask + + # --------------------------------------------------------------------- + + def setitem(self, indexer, value, using_cow: bool = False) -> Block: + """ + Attempt self.values[indexer] = value, possibly creating a new array. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice, int + The subset of self.values to set + value : object + The value being set + using_cow: bool, default False + Signaling if CoW is used. + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + + value = self._standardize_fill_value(value) + + values = cast(np.ndarray, self.values) + if self.ndim == 2: + values = values.T + + # length checking + check_setitem_lengths(indexer, value, values) + + if self.dtype != _dtype_obj: + # GH48933: extract_array would convert a pd.Series value to np.ndarray + value = extract_array(value, extract_numpy=True) + try: + casted = np_can_hold_element(values.dtype, value) + except LossySetitemError: + # current dtype cannot store value, coerce to common dtype + nb = self.coerce_to_target_dtype(value, warn_on_upcast=True) + return nb.setitem(indexer, value) + else: + if self.dtype == _dtype_obj: + # TODO: avoid having to construct values[indexer] + vi = values[indexer] + if lib.is_list_like(vi): + # checking lib.is_scalar here fails on + # test_iloc_setitem_custom_object + casted = setitem_datetimelike_compat(values, len(vi), casted) + + self = self._maybe_copy(using_cow, inplace=True) + values = cast(np.ndarray, self.values.T) + if isinstance(casted, np.ndarray) and casted.ndim == 1 and len(casted) == 1: + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + casted = casted[0, ...] + values[indexer] = casted + return self + + def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: + """ + putmask the data to the block; it is possible that we may create a + new dtype of block + + Return the resulting block(s). + + Parameters + ---------- + mask : np.ndarray[bool], SparseArray[bool], or BooleanArray + new : a ndarray/object + using_cow: bool, default False + + Returns + ------- + List[Block] + """ + orig_mask = mask + values = cast(np.ndarray, self.values) + mask, noop = validate_putmask(values.T, mask) + assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) + + if new is lib.no_default: + new = self.fill_value + + new = self._standardize_fill_value(new) + new = extract_array(new, extract_numpy=True) + + if noop: + if using_cow: + return [self.copy(deep=False)] + return [self] + + try: + casted = np_can_hold_element(values.dtype, new) + + self = self._maybe_copy(using_cow, inplace=True) + values = cast(np.ndarray, self.values) + + putmask_without_repeat(values.T, mask, casted) + return [self] + except LossySetitemError: + if self.ndim == 1 or self.shape[0] == 1: + # no need to split columns + + if not is_list_like(new): + # using just new[indexer] can't save us the need to cast + return self.coerce_to_target_dtype( + new, warn_on_upcast=True + ).putmask(mask, new) + else: + indexer = mask.nonzero()[0] + nb = self.setitem(indexer, new[indexer], using_cow=using_cow) + return [nb] + + else: + is_array = isinstance(new, np.ndarray) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = new + if is_array: + # we have a different value per-column + n = new[:, i : i + 1] + + submask = orig_mask[:, i : i + 1] + rbs = nb.putmask(submask, n, using_cow=using_cow) + res_blocks.extend(rbs) + return res_blocks + + def where( + self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False + ) -> list[Block]: + """ + evaluate the block; return result block(s) from the result + + Parameters + ---------- + other : a ndarray/object + cond : np.ndarray[bool], SparseArray[bool], or BooleanArray + _downcast : str or None, default "infer" + Private because we only specify it when calling from fillna. + + Returns + ------- + List[Block] + """ + assert cond.ndim == self.ndim + assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) + + transpose = self.ndim == 2 + + cond = extract_bool_array(cond) + + # EABlocks override where + values = cast(np.ndarray, self.values) + orig_other = other + if transpose: + values = values.T + + icond, noop = validate_putmask(values, ~cond) + if noop: + # GH-39595: Always return a copy; short-circuit up/downcasting + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] + + if other is lib.no_default: + other = self.fill_value + + other = self._standardize_fill_value(other) + + try: + # try/except here is equivalent to a self._can_hold_element check, + # but this gets us back 'casted' which we will re-use below; + # without using 'casted', expressions.where may do unwanted upcasts. + casted = np_can_hold_element(values.dtype, other) + except (ValueError, TypeError, LossySetitemError): + # we cannot coerce, return a compat dtype + + if self.ndim == 1 or self.shape[0] == 1: + # no need to split columns + + block = self.coerce_to_target_dtype(other) + blocks = block.where(orig_other, cond, using_cow=using_cow) + return self._maybe_downcast( + blocks, downcast=_downcast, using_cow=using_cow + ) + + else: + # since _maybe_downcast would split blocks anyway, we + # can avoid some potential upcast/downcast by splitting + # on the front end. + is_array = isinstance(other, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + oth = other + if is_array: + # we have a different value per-column + oth = other[:, i : i + 1] + + submask = cond[:, i : i + 1] + rbs = nb.where( + oth, submask, _downcast=_downcast, using_cow=using_cow + ) + res_blocks.extend(rbs) + return res_blocks + + else: + other = casted + alt = setitem_datetimelike_compat(values, icond.sum(), other) + if alt is not other: + if is_list_like(other) and len(other) < len(values): + # call np.where with other to get the appropriate ValueError + np.where(~icond, values, other) + raise NotImplementedError( + "This should not be reached; call to np.where above is " + "expected to raise ValueError. Please report a bug at " + "github.com/pandas-dev/pandas" + ) + result = values.copy() + np.putmask(result, icond, alt) + else: + # By the time we get here, we should have all Series/Index + # args extracted to ndarray + if ( + is_list_like(other) + and not isinstance(other, np.ndarray) + and len(other) == self.shape[-1] + ): + # If we don't do this broadcasting here, then expressions.where + # will broadcast a 1D other to be row-like instead of + # column-like. + other = np.array(other).reshape(values.shape) + # If lengths don't match (or len(other)==1), we will raise + # inside expressions.where, see test_series_where + + # Note: expressions.where may upcast. + result = expressions.where(~icond, values, other) + # The np_can_hold_element check _should_ ensure that we always + # have result.dtype == self.dtype here. + + if transpose: + result = result.T + + return [self.make_block(result)] + + def fillna( + self, + value, + limit: int | None = None, + inplace: bool = False, + downcast=None, + using_cow: bool = False, + ) -> list[Block]: + """ + fillna on the block with the value. If we fail, then convert to + block to hold objects instead and try again + """ + # Caller is responsible for validating limit; if int it is strictly positive + inplace = validate_bool_kwarg(inplace, "inplace") + + if not self._can_hold_na: + # can short-circuit the isna call + noop = True + else: + mask = isna(self.values) + mask, noop = validate_putmask(self.values, mask) + + if noop: + # we can't process the value, but nothing to do + if inplace: + if using_cow: + return [self.copy(deep=False)] + # Arbitrarily imposing the convention that we ignore downcast + # on no-op when inplace=True + return [self] + else: + # GH#45423 consistent downcasting on no-ops. + nb = self.copy(deep=not using_cow) + nbs = nb._maybe_downcast([nb], downcast=downcast, using_cow=using_cow) + return nbs + + if limit is not None: + mask[mask.cumsum(self.ndim - 1) > limit] = False + + if inplace: + nbs = self.putmask(mask.T, value, using_cow=using_cow) + else: + # without _downcast, we would break + # test_fillna_dtype_conversion_equiv_replace + nbs = self.where(value, ~mask.T, _downcast=False) + + # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) + # makes a difference bc blk may have object dtype, which has + # different behavior in _maybe_downcast. + return extend_blocks( + [ + blk._maybe_downcast([blk], downcast=downcast, using_cow=using_cow) + for blk in nbs + ] + ) + + def pad_or_backfill( + self, + *, + method: FillnaOptions, + axis: AxisInt = 0, + inplace: bool = False, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + ) -> list[Block]: + if not self._can_hold_na: + # If there are no NAs, then interpolate is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + # Dispatch to the NumpyExtensionArray method. + # We know self.array_values is a NumpyExtensionArray bc EABlock overrides + vals = cast(NumpyExtensionArray, self.array_values) + if axis == 1: + vals = vals.T + new_values = vals._pad_or_backfill( + method=method, + limit=limit, + limit_area=limit_area, + copy=copy, + ) + if axis == 1: + new_values = new_values.T + + data = extract_array(new_values, extract_numpy=True) + + nb = self.make_block_same_class(data, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow) + + @final + def interpolate( + self, + *, + method: InterpolateOptions, + index: Index, + inplace: bool = False, + limit: int | None = None, + limit_direction: Literal["forward", "backward", "both"] = "forward", + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + **kwargs, + ) -> list[Block]: + inplace = validate_bool_kwarg(inplace, "inplace") + # error: Non-overlapping equality check [...] + if method == "asfreq": # type: ignore[comparison-overlap] + # clean_fill_method used to allow this + missing.clean_fill_method(method) + + if not self._can_hold_na: + # If there are no NAs, then interpolate is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + # TODO(3.0): this case will not be reachable once GH#53638 is enforced + if self.dtype == _dtype_obj: + # only deal with floats + # bc we already checked that can_hold_na, we don't have int dtype here + # test_interp_basic checks that we make a copy here + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + # Dispatch to the EA method. + new_values = self.array_values.interpolate( + method=method, + axis=self.ndim - 1, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + copy=copy, + **kwargs, + ) + data = extract_array(new_values, extract_numpy=True) + + nb = self.make_block_same_class(data, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow) + + @final + def diff(self, n: int) -> list[Block]: + """return block for the diff of the values""" + # only reached with ndim == 2 + # TODO(EA2D): transpose will be unnecessary with 2D EAs + new_values = algos.diff(self.values.T, n, axis=0).T + return [self.make_block(values=new_values)] + + def shift(self, periods: int, fill_value: Any = None) -> list[Block]: + """shift the block by periods, possibly upcast""" + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + axis = self.ndim - 1 + + # Note: periods is never 0 here, as that is handled at the top of + # NDFrame.shift. If that ever changes, we can do a check for periods=0 + # and possibly avoid coercing. + + if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + # see test_shift_object_non_scalar_fill + raise ValueError("fill_value must be a scalar") + + fill_value = self._standardize_fill_value(fill_value) + + try: + # error: Argument 1 to "np_can_hold_element" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" + casted = np_can_hold_element( + self.dtype, fill_value # type: ignore[arg-type] + ) + except LossySetitemError: + nb = self.coerce_to_target_dtype(fill_value) + return nb.shift(periods, fill_value=fill_value) + + else: + values = cast(np.ndarray, self.values) + new_values = shift(values, periods, axis, casted) + return [self.make_block_same_class(new_values)] + + @final + def quantile( + self, + qs: Index, # with dtype float64 + interpolation: QuantileInterpolation = "linear", + ) -> Block: + """ + compute the quantiles of the + + Parameters + ---------- + qs : Index + The quantiles to be computed in float64. + interpolation : str, default 'linear' + Type of interpolation. + + Returns + ------- + Block + """ + # We should always have ndim == 2 because Series dispatches to DataFrame + assert self.ndim == 2 + assert is_list_like(qs) # caller is responsible for this + + result = quantile_compat(self.values, np.asarray(qs._values), interpolation) + # ensure_block_shape needed for cases where we start with EA and result + # is ndarray, e.g. IntegerArray, SparseArray + result = ensure_block_shape(result, ndim=2) + return new_block_2d(result, placement=self._mgr_locs) + + @final + def round(self, decimals: int, using_cow: bool = False) -> Self: + """ + Rounds the values. + If the block is not of an integer or float dtype, nothing happens. + This is consistent with DataFrame.round behavivor. + (Note: Series.round would raise) + + Parameters + ---------- + decimals: int, + Number of decimal places to round to. + Caller is responsible for validating this + using_cow: bool, + Whether Copy on Write is enabled right now + """ + if not self.is_numeric or self.is_bool: + return self.copy(deep=not using_cow) + refs = None + # TODO: round only defined on BaseMaskedArray + # Series also does this, so would need to fix both places + # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" + # has no attribute "round" + values = self.values.round(decimals) # type: ignore[union-attr] + if values is self.values: + refs = self.refs + if not using_cow: + # Normally would need to do this before, but + # numpy only returns same array when round operation + # is no-op + # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 + values = values.copy() + return self.make_block_same_class(values, refs=refs) + + # --------------------------------------------------------------------- + # Abstract Methods Overridden By EABackedBlock and NumpyBlock + + def delete(self, loc) -> list[Block]: + """Deletes the locs from the block. + + We split the block to avoid copying the underlying data. We create new + blocks for every connected segment of the initial block that is not deleted. + The new blocks point to the initial array. + """ + if not is_list_like(loc): + loc = [loc] + + if self.ndim == 1: + values = cast(np.ndarray, self.values) + values = np.delete(values, loc) + mgr_locs = self._mgr_locs.delete(loc) + return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] + + if np.max(loc) >= self.values.shape[0]: + raise IndexError + + # Add one out-of-bounds indexer as maximum to collect + # all columns after our last indexer if any + loc = np.concatenate([loc, [self.values.shape[0]]]) + mgr_locs_arr = self._mgr_locs.as_array + new_blocks: list[Block] = [] + + previous_loc = -1 + # TODO(CoW): This is tricky, if parent block goes out of scope + # all split blocks are referencing each other even though they + # don't share data + refs = self.refs if self.refs.has_reference() else None + for idx in loc: + if idx == previous_loc + 1: + # There is no column between current and last idx + pass + else: + # No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[slice, slice]" + values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa: E501 + locs = mgr_locs_arr[previous_loc + 1 : idx] + nb = type(self)( + values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs + ) + new_blocks.append(nb) + + previous_loc = idx + + return new_blocks + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + raise AbstractMethodError(self) + + @property + def array_values(self) -> ExtensionArray: + """ + The array that Series.array returns. Always an ExtensionArray. + """ + raise AbstractMethodError(self) + + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + """ + return an internal format, currently just the ndarray + this is often overridden to handle to_dense like operations + """ + raise AbstractMethodError(self) + + +class EABackedBlock(Block): + """ + Mixin for Block subclasses backed by ExtensionArray. + """ + + values: ExtensionArray + + @final + def shift(self, periods: int, fill_value: Any = None) -> list[Block]: + """ + Shift the block by `periods`. + + Dispatches to underlying ExtensionArray and re-boxes in an + ExtensionBlock. + """ + # Transpose since EA.shift is always along axis=0, while we want to shift + # along rows. + new_values = self.values.T.shift(periods=periods, fill_value=fill_value).T + return [self.make_block_same_class(new_values)] + + @final + def setitem(self, indexer, value, using_cow: bool = False): + """ + Attempt self.values[indexer] = value, possibly creating a new array. + + This differs from Block.setitem by not allowing setitem to change + the dtype of the Block. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice, int + The subset of self.values to set + value : object + The value being set + using_cow: bool, default False + Signaling if CoW is used. + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + orig_indexer = indexer + orig_value = value + + indexer = self._unwrap_setitem_indexer(indexer) + value = self._maybe_squeeze_arg(value) + + values = self.values + if values.ndim == 2: + # TODO(GH#45419): string[pyarrow] tests break if we transpose + # unconditionally + values = values.T + check_setitem_lengths(indexer, value, values) + + try: + values[indexer] = value + except (ValueError, TypeError) as err: + _catch_deprecated_value_error(err) + + if isinstance(self.dtype, IntervalDtype): + # see TestSetitemFloatIntervalWithIntIntervalValues + nb = self.coerce_to_target_dtype(orig_value, warn_on_upcast=True) + return nb.setitem(orig_indexer, orig_value) + + elif isinstance(self, NDArrayBackedExtensionBlock): + nb = self.coerce_to_target_dtype(orig_value, warn_on_upcast=True) + return nb.setitem(orig_indexer, orig_value) + + else: + raise + + else: + return self + + @final + def where( + self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False + ) -> list[Block]: + # _downcast private bc we only specify it when calling from fillna + arr = self.values.T + + cond = extract_bool_array(cond) + + orig_other = other + orig_cond = cond + other = self._maybe_squeeze_arg(other) + cond = self._maybe_squeeze_arg(cond) + + if other is lib.no_default: + other = self.fill_value + + icond, noop = validate_putmask(arr, ~cond) + if noop: + # GH#44181, GH#45135 + # Avoid a) raising for Interval/PeriodDtype and b) unnecessary object upcast + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] + + try: + res_values = arr._where(cond, other).T + except (ValueError, TypeError) as err: + _catch_deprecated_value_error(err) + + if self.ndim == 1 or self.shape[0] == 1: + if isinstance(self.dtype, IntervalDtype): + # TestSetitemFloatIntervalWithIntIntervalValues + blk = self.coerce_to_target_dtype(orig_other) + nbs = blk.where(orig_other, orig_cond, using_cow=using_cow) + return self._maybe_downcast( + nbs, downcast=_downcast, using_cow=using_cow + ) + + elif isinstance(self, NDArrayBackedExtensionBlock): + # NB: not (yet) the same as + # isinstance(values, NDArrayBackedExtensionArray) + blk = self.coerce_to_target_dtype(orig_other) + nbs = blk.where(orig_other, orig_cond, using_cow=using_cow) + return self._maybe_downcast( + nbs, downcast=_downcast, using_cow=using_cow + ) + + else: + raise + + else: + # Same pattern we use in Block.putmask + is_array = isinstance(orig_other, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = orig_other + if is_array: + # we have a different value per-column + n = orig_other[:, i : i + 1] + + submask = orig_cond[:, i : i + 1] + rbs = nb.where(n, submask, using_cow=using_cow) + res_blocks.extend(rbs) + return res_blocks + + nb = self.make_block_same_class(res_values) + return [nb] + + @final + def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: + """ + See Block.putmask.__doc__ + """ + mask = extract_bool_array(mask) + if new is lib.no_default: + new = self.fill_value + + orig_new = new + orig_mask = mask + new = self._maybe_squeeze_arg(new) + mask = self._maybe_squeeze_arg(mask) + + if not mask.any(): + if using_cow: + return [self.copy(deep=False)] + return [self] + + self = self._maybe_copy(using_cow, inplace=True) + values = self.values + if values.ndim == 2: + values = values.T + + try: + # Caller is responsible for ensuring matching lengths + values._putmask(mask, new) + except (TypeError, ValueError) as err: + _catch_deprecated_value_error(err) + + if self.ndim == 1 or self.shape[0] == 1: + if isinstance(self.dtype, IntervalDtype): + # Discussion about what we want to support in the general + # case GH#39584 + blk = self.coerce_to_target_dtype(orig_new, warn_on_upcast=True) + return blk.putmask(orig_mask, orig_new) + + elif isinstance(self, NDArrayBackedExtensionBlock): + # NB: not (yet) the same as + # isinstance(values, NDArrayBackedExtensionArray) + blk = self.coerce_to_target_dtype(orig_new, warn_on_upcast=True) + return blk.putmask(orig_mask, orig_new) + + else: + raise + + else: + # Same pattern we use in Block.putmask + is_array = isinstance(orig_new, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = orig_new + if is_array: + # we have a different value per-column + n = orig_new[:, i : i + 1] + + submask = orig_mask[:, i : i + 1] + rbs = nb.putmask(submask, n) + res_blocks.extend(rbs) + return res_blocks + + return [self] + + @final + def delete(self, loc) -> list[Block]: + # This will be unnecessary if/when __array_function__ is implemented + if self.ndim == 1: + values = self.values.delete(loc) + mgr_locs = self._mgr_locs.delete(loc) + return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] + elif self.values.ndim == 1: + # We get here through to_stata + return [] + return super().delete(loc) + + @final + @cache_readonly + def array_values(self) -> ExtensionArray: + return self.values + + @final + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + """ + return object dtype as boxed values, such as Timestamps/Timedelta + """ + values: ArrayLike = self.values + if dtype == _dtype_obj: + values = values.astype(object) + # TODO(EA2D): reshape not needed with 2D EAs + return np.asarray(values).reshape(self.shape) + + @final + def pad_or_backfill( + self, + *, + method: FillnaOptions, + axis: AxisInt = 0, + inplace: bool = False, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + ) -> list[Block]: + values = self.values + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + if values.ndim == 2 and axis == 1: + # NDArrayBackedExtensionArray.fillna assumes axis=0 + new_values = values.T._pad_or_backfill(method=method, limit=limit).T + else: + new_values = values._pad_or_backfill(method=method, limit=limit) + return [self.make_block_same_class(new_values)] + + +class ExtensionBlock(libinternals.Block, EABackedBlock): + """ + Block for holding extension types. + + Notes + ----- + This holds all 3rd-party extension array types. It's also the immediate + parent class for our internal extension types' blocks. + + ExtensionArrays are limited to 1-D. + """ + + values: ExtensionArray + + def fillna( + self, + value, + limit: int | None = None, + inplace: bool = False, + downcast=None, + using_cow: bool = False, + ) -> list[Block]: + if isinstance(self.dtype, IntervalDtype): + # Block.fillna handles coercion (test_fillna_interval) + return super().fillna( + value=value, + limit=limit, + inplace=inplace, + downcast=downcast, + using_cow=using_cow, + ) + if using_cow and self._can_hold_na and not self.values._hasna: + refs = self.refs + new_values = self.values + else: + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + try: + new_values = self.values.fillna( + value=value, method=None, limit=limit, copy=copy + ) + except TypeError: + # 3rd party EA that has not implemented copy keyword yet + refs = None + new_values = self.values.fillna(value=value, method=None, limit=limit) + # issue the warning *after* retrying, in case the TypeError + # was caused by an invalid fill_value + warnings.warn( + # GH#53278 + "ExtensionArray.fillna added a 'copy' keyword in pandas " + "2.1.0. In a future version, ExtensionArray subclasses will " + "need to implement this keyword or an exception will be " + "raised. In the interim, the keyword is ignored by " + f"{type(self.values).__name__}.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + + nb = self.make_block_same_class(new_values, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow=using_cow) + + @cache_readonly + def shape(self) -> Shape: + # TODO(EA2D): override unnecessary with 2D EAs + if self.ndim == 1: + return (len(self.values),) + return len(self._mgr_locs), len(self.values) + + def iget(self, i: int | tuple[int, int] | tuple[slice, int]): + # In the case where we have a tuple[slice, int], the slice will always + # be slice(None) + # We _could_ make the annotation more specific, but mypy would + # complain about override mismatch: + # Literal[0] | tuple[Literal[0], int] | tuple[slice, int] + + # Note: only reached with self.ndim == 2 + + if isinstance(i, tuple): + # TODO(EA2D): unnecessary with 2D EAs + col, loc = i + if not com.is_null_slice(col) and col != 0: + raise IndexError(f"{self} only contains one item") + if isinstance(col, slice): + # the is_null_slice check above assures that col is slice(None) + # so what we want is a view on all our columns and row loc + if loc < 0: + loc += len(self.values) + # Note: loc:loc+1 vs [[loc]] makes a difference when called + # from fast_xs because we want to get a view back. + return self.values[loc : loc + 1] + return self.values[loc] + else: + if i != 0: + raise IndexError(f"{self} only contains one item") + return self.values + + def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: + # When an ndarray, we should have locs.tolist() == [0] + # When a BlockPlacement we should have list(locs) == [0] + if copy: + self.values = self.values.copy() + self.values[:] = values + + def _maybe_squeeze_arg(self, arg): + """ + If necessary, squeeze a (N, 1) ndarray to (N,) + """ + # e.g. if we are passed a 2D mask for putmask + if ( + isinstance(arg, (np.ndarray, ExtensionArray)) + and arg.ndim == self.values.ndim + 1 + ): + # TODO(EA2D): unnecessary with 2D EAs + assert arg.shape[1] == 1 + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[slice, int]" + arg = arg[:, 0] # type: ignore[call-overload] + elif isinstance(arg, ABCDataFrame): + # 2022-01-06 only reached for setitem + # TODO: should we avoid getting here with DataFrame? + assert arg.shape[1] == 1 + arg = arg._ixs(0, axis=1)._values + + return arg + + def _unwrap_setitem_indexer(self, indexer): + """ + Adapt a 2D-indexer to our 1D values. + + This is intended for 'setitem', not 'iget' or '_slice'. + """ + # TODO: ATM this doesn't work for iget/_slice, can we change that? + + if isinstance(indexer, tuple) and len(indexer) == 2: + # TODO(EA2D): not needed with 2D EAs + # Should never have length > 2. Caller is responsible for checking. + # Length 1 is reached vis setitem_single_block and setitem_single_column + # each of which pass indexer=(pi,) + if all(isinstance(x, np.ndarray) and x.ndim == 2 for x in indexer): + # GH#44703 went through indexing.maybe_convert_ix + first, second = indexer + if not ( + second.size == 1 and (second == 0).all() and first.shape[1] == 1 + ): + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/" + ) + indexer = first[:, 0] + + elif lib.is_integer(indexer[1]) and indexer[1] == 0: + # reached via setitem_single_block passing the whole indexer + indexer = indexer[0] + + elif com.is_null_slice(indexer[1]): + indexer = indexer[0] + + elif is_list_like(indexer[1]) and indexer[1][0] == 0: + indexer = indexer[0] + + else: + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/" + ) + return indexer + + @property + def is_view(self) -> bool: + """Extension arrays are never treated as views.""" + return False + + @cache_readonly + def is_numeric(self): + return self.values.dtype._is_numeric + + def _slice( + self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] + ) -> ExtensionArray: + """ + Return a slice of my values. + + Parameters + ---------- + slicer : slice, ndarray[int], or ndarray[bool] + Valid (non-reducing) indexer for self.values. + + Returns + ------- + ExtensionArray + """ + # Notes: ndarray[bool] is only reachable when via get_rows_with_mask, which + # is only for Series, i.e. self.ndim == 1. + + # return same dims as we currently have + if self.ndim == 2: + # reached via getitem_block via _slice_take_blocks_ax0 + # TODO(EA2D): won't be necessary with 2D EAs + + if not isinstance(slicer, slice): + raise AssertionError( + "invalid slicing for a 1-ndim ExtensionArray", slicer + ) + # GH#32959 only full-slicers along fake-dim0 are valid + # TODO(EA2D): won't be necessary with 2D EAs + # range(1) instead of self._mgr_locs to avoid exception on [::-1] + # see test_iloc_getitem_slice_negative_step_ea_block + new_locs = range(1)[slicer] + if not len(new_locs): + raise AssertionError( + "invalid slicing for a 1-ndim ExtensionArray", slicer + ) + slicer = slice(None) + + return self.values[slicer] + + @final + def slice_block_rows(self, slicer: slice) -> Self: + """ + Perform __getitem__-like specialized to slicing along index. + """ + # GH#42787 in principle this is equivalent to values[..., slicer], but we don't + # require subclasses of ExtensionArray to support that form (for now). + new_values = self.values[slicer] + return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs) + + def _unstack( + self, + unstacker, + fill_value, + new_placement: npt.NDArray[np.intp], + needs_masking: npt.NDArray[np.bool_], + ): + # ExtensionArray-safe unstack. + # We override Block._unstack, which unstacks directly on the + # values of the array. For EA-backed blocks, this would require + # converting to a 2-D ndarray of objects. + # Instead, we unstack an ndarray of integer positions, followed by + # a `take` on the actual values. + + # Caller is responsible for ensuring self.shape[-1] == len(unstacker.index) + new_values, mask = unstacker.arange_result + + # Note: these next two lines ensure that + # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) + # which the calling function needs in order to pass verify_integrity=False + # to the BlockManager constructor + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + # needs_masking[i] calculated once in BlockManager.unstack tells + # us if there are any -1s in the relevant indices. When False, + # that allows us to go through a faster path in 'take', among + # other things avoiding e.g. Categorical._validate_scalar. + blocks = [ + # TODO: could cast to object depending on fill_value? + type(self)( + self.values.take( + indices, allow_fill=needs_masking[i], fill_value=fill_value + ), + BlockPlacement(place), + ndim=2, + ) + for i, (indices, place) in enumerate(zip(new_values, new_placement)) + ] + return blocks, mask + + +class NumpyBlock(libinternals.NumpyBlock, Block): + values: np.ndarray + __slots__ = () + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + return self.values.base is not None + + @property + def array_values(self) -> ExtensionArray: + return NumpyExtensionArray(self.values) + + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + if dtype == _dtype_obj: + return self.values.astype(_dtype_obj) + return self.values + + @cache_readonly + def is_numeric(self) -> bool: # type: ignore[override] + dtype = self.values.dtype + kind = dtype.kind + + return kind in "fciub" + + +class NumericBlock(NumpyBlock): + # this Block type is kept for backwards-compatibility + # TODO(3.0): delete and remove deprecation in __init__.py. + __slots__ = () + + +class ObjectBlock(NumpyBlock): + # this Block type is kept for backwards-compatibility + # TODO(3.0): delete and remove deprecation in __init__.py. + __slots__ = () + + +class NDArrayBackedExtensionBlock(libinternals.NDArrayBackedBlock, EABackedBlock): + """ + Block backed by an NDArrayBackedExtensionArray + """ + + values: NDArrayBackedExtensionArray + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + # check the ndarray values of the DatetimeIndex values + return self.values._ndarray.base is not None + + +def _catch_deprecated_value_error(err: Exception) -> None: + """ + We catch ValueError for now, but only a specific one raised by DatetimeArray + which will no longer be raised in version 2.0. + """ + if isinstance(err, ValueError): + if isinstance(err, IncompatibleFrequency): + pass + elif "'value.closed' is" in str(err): + # IntervalDtype mismatched 'closed' + pass + + +class DatetimeLikeBlock(NDArrayBackedExtensionBlock): + """Block for datetime64[ns], timedelta64[ns].""" + + __slots__ = () + is_numeric = False + values: DatetimeArray | TimedeltaArray + + +class DatetimeTZBlock(DatetimeLikeBlock): + """implement a datetime64 block with a tz attribute""" + + values: DatetimeArray + + __slots__ = () + + +# ----------------------------------------------------------------- +# Constructor Helpers + + +def maybe_coerce_values(values: ArrayLike) -> ArrayLike: + """ + Input validation for values passed to __init__. Ensure that + any datetime64/timedelta64 dtypes are in nanoseconds. Ensure + that we do not have string dtypes. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + + Returns + ------- + values : np.ndarray or ExtensionArray + """ + # Caller is responsible for ensuring NumpyExtensionArray is already extracted. + + if isinstance(values, np.ndarray): + values = ensure_wrapped_if_datetimelike(values) + + if issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: + # freq is only stored in DatetimeIndex/TimedeltaIndex, not in Series/DataFrame + values = values._with_freq(None) + + return values + + +def get_block_type(dtype: DtypeObj) -> type[Block]: + """ + Find the appropriate Block subclass to use for the given values and dtype. + + Parameters + ---------- + dtype : numpy or pandas dtype + + Returns + ------- + cls : class, subclass of Block + """ + if isinstance(dtype, DatetimeTZDtype): + return DatetimeTZBlock + elif isinstance(dtype, PeriodDtype): + return NDArrayBackedExtensionBlock + elif isinstance(dtype, ExtensionDtype): + # Note: need to be sure NumpyExtensionArray is unwrapped before we get here + return ExtensionBlock + + # We use kind checks because it is much more performant + # than is_foo_dtype + kind = dtype.kind + if kind in "Mm": + return DatetimeLikeBlock + + return NumpyBlock + + +def new_block_2d( + values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None = None +): + # new_block specialized to case with + # ndim=2 + # isinstance(placement, BlockPlacement) + # check_ndim/ensure_block_shape already checked + klass = get_block_type(values.dtype) + + values = maybe_coerce_values(values) + return klass(values, ndim=2, placement=placement, refs=refs) + + +def new_block( + values, + placement: BlockPlacement, + *, + ndim: int, + refs: BlockValuesRefs | None = None, +) -> Block: + # caller is responsible for ensuring: + # - values is NOT a NumpyExtensionArray + # - check_ndim/ensure_block_shape already checked + # - maybe_coerce_values already called/unnecessary + klass = get_block_type(values.dtype) + return klass(values, ndim=ndim, placement=placement, refs=refs) + + +def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: + """ + ndim inference and validation. + + Validates that values.ndim and ndim are consistent. + Validates that len(values) and len(placement) are consistent. + + Parameters + ---------- + values : array-like + placement : BlockPlacement + ndim : int + + Raises + ------ + ValueError : the number of dimensions do not match + """ + + if values.ndim > ndim: + # Check for both np.ndarray and ExtensionArray + raise ValueError( + "Wrong number of dimensions. " + f"values.ndim > ndim [{values.ndim} > {ndim}]" + ) + + if not is_1d_only_ea_dtype(values.dtype): + # TODO(EA2D): special case not needed with 2D EAs + if values.ndim != ndim: + raise ValueError( + "Wrong number of dimensions. " + f"values.ndim != ndim [{values.ndim} != {ndim}]" + ) + if len(placement) != len(values): + raise ValueError( + f"Wrong number of items passed {len(values)}, " + f"placement implies {len(placement)}" + ) + elif ndim == 2 and len(placement) != 1: + # TODO(EA2D): special case unnecessary with 2D EAs + raise ValueError("need to split") + + +def extract_pandas_array( + values: ArrayLike, dtype: DtypeObj | None, ndim: int +) -> tuple[ArrayLike, DtypeObj | None]: + """ + Ensure that we don't allow NumpyExtensionArray / NumpyEADtype in internals. + """ + # For now, blocks should be backed by ndarrays when possible. + if isinstance(values, ABCNumpyExtensionArray): + values = values.to_numpy() + if ndim and ndim > 1: + # TODO(EA2D): special case not needed with 2D EAs + values = np.atleast_2d(values) + + if isinstance(dtype, NumpyEADtype): + dtype = dtype.numpy_dtype + + return values, dtype + + +# ----------------------------------------------------------------- + + +def extend_blocks(result, blocks=None) -> list[Block]: + """return a new extended blocks, given the result""" + if blocks is None: + blocks = [] + if isinstance(result, list): + for r in result: + if isinstance(r, list): + blocks.extend(r) + else: + blocks.append(r) + else: + assert isinstance(result, Block), type(result) + blocks.append(result) + return blocks + + +def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: + """ + Reshape if possible to have values.ndim == ndim. + """ + + if values.ndim < ndim: + if not is_1d_only_ea_dtype(values.dtype): + # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 + # block.shape is incorrect for "2D" ExtensionArrays + # We can't, and don't need to, reshape. + values = cast("np.ndarray | DatetimeArray | TimedeltaArray", values) + values = values.reshape(1, -1) + + return values + + +def to_native_types( + values: ArrayLike, + *, + na_rep: str = "nan", + quoting=None, + float_format=None, + decimal: str = ".", + **kwargs, +) -> npt.NDArray[np.object_]: + """convert to our native types format""" + if isinstance(values, Categorical) and values.categories.dtype.kind in "Mm": + # GH#40754 Convert categorical datetimes to datetime array + values = algos.take_nd( + values.categories._values, + ensure_platform_int(values._codes), + fill_value=na_rep, + ) + + values = ensure_wrapped_if_datetimelike(values) + + if isinstance(values, (DatetimeArray, TimedeltaArray)): + if values.ndim == 1: + result = values._format_native_types(na_rep=na_rep, **kwargs) + result = result.astype(object, copy=False) + return result + + # GH#21734 Process every column separately, they might have different formats + results_converted = [] + for i in range(len(values)): + result = values[i, :]._format_native_types(na_rep=na_rep, **kwargs) + results_converted.append(result.astype(object, copy=False)) + return np.vstack(results_converted) + + elif values.dtype.kind == "f" and not isinstance(values.dtype, SparseDtype): + # see GH#13418: no special formatting is desired at the + # output (important for appropriate 'quoting' behaviour), + # so do not pass it through the FloatArrayFormatter + if float_format is None and decimal == ".": + mask = isna(values) + + if not quoting: + values = values.astype(str) + else: + values = np.array(values, dtype="object") + + values[mask] = na_rep + values = values.astype(object, copy=False) + return values + + from pandas.io.formats.format import FloatArrayFormatter + + formatter = FloatArrayFormatter( + values, + na_rep=na_rep, + float_format=float_format, + decimal=decimal, + quoting=quoting, + fixed_width=False, + ) + res = formatter.get_result_as_array() + res = res.astype(object, copy=False) + return res + + elif isinstance(values, ExtensionArray): + mask = isna(values) + + new_values = np.asarray(values.astype(object)) + new_values[mask] = na_rep + return new_values + + else: + mask = isna(values) + itemsize = writers.word_len(na_rep) + + if values.dtype != _dtype_obj and not quoting and itemsize: + values = values.astype(str) + if values.dtype.itemsize / np.dtype("U1").itemsize < itemsize: + # enlarge for the na_rep + values = values.astype(f" ArrayLike: + """ + The array that Series.values returns (public attribute). + + This has some historical constraints, and is overridden in block + subclasses to return the correct array (e.g. period returns + object ndarray and datetimetz a datetime64[ns] ndarray instead of + proper extension array). + """ + if isinstance(values, (PeriodArray, IntervalArray)): + return values.astype(object) + elif isinstance(values, (DatetimeArray, TimedeltaArray)): + # NB: for datetime64tz this is different from np.asarray(values), since + # that returns an object-dtype ndarray of Timestamps. + # Avoid raising in .astype in casting from dt64tz to dt64 + values = values._ndarray + + if isinstance(values, np.ndarray) and using_copy_on_write(): + values = values.view() + values.flags.writeable = False + + # TODO(CoW) we should also mark our ExtensionArrays as read-only + + return values diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/concat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/concat.py new file mode 100644 index 00000000..b2d463a8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/concat.py @@ -0,0 +1,598 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + NaT, + algos as libalgos, + internals as libinternals, + lib, +) +from pandas._libs.missing import NA +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + ensure_dtype_can_hold_na, + find_common_type, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_scalar, + needs_i8_conversion, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + SparseDtype, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + isna_all, +) + +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.internals.array_manager import ArrayManager +from pandas.core.internals.blocks import ( + ensure_block_shape, + new_block_2d, +) +from pandas.core.internals.managers import ( + BlockManager, + make_na_array, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + Manager2D, + Shape, + ) + + from pandas import Index + from pandas.core.internals.blocks import ( + Block, + BlockPlacement, + ) + + +def _concatenate_array_managers( + mgrs: list[ArrayManager], axes: list[Index], concat_axis: AxisInt +) -> Manager2D: + """ + Concatenate array managers into one. + + Parameters + ---------- + mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + + Returns + ------- + ArrayManager + """ + if concat_axis == 1: + return mgrs[0].concat_vertical(mgrs, axes) + else: + # concatting along the columns -> combine reindexed arrays in a single manager + assert concat_axis == 0 + return mgrs[0].concat_horizontal(mgrs, axes) + + +def concatenate_managers( + mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool +) -> Manager2D: + """ + Concatenate block managers into one. + + Parameters + ---------- + mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + Returns + ------- + BlockManager + """ + + needs_copy = copy and concat_axis == 0 + + # TODO(ArrayManager) this assumes that all managers are of the same type + if isinstance(mgrs_indexers[0][0], ArrayManager): + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + # error: Argument 1 to "_concatenate_array_managers" has incompatible + # type "List[BlockManager]"; expected "List[Union[ArrayManager, + # SingleArrayManager, BlockManager, SingleBlockManager]]" + return _concatenate_array_managers( + mgrs, axes, concat_axis # type: ignore[arg-type] + ) + + # Assertions disabled for performance + # for tup in mgrs_indexers: + # # caller is responsible for ensuring this + # indexers = tup[1] + # assert concat_axis not in indexers + + if concat_axis == 0: + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + return mgrs[0].concat_horizontal(mgrs, axes) + + if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0: + first_dtype = mgrs_indexers[0][0].blocks[0].dtype + if first_dtype in [np.float64, np.float32]: + # TODO: support more dtypes here. This will be simpler once + # JoinUnit.is_na behavior is deprecated. + if ( + all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in mgrs_indexers) + and len(mgrs_indexers) > 1 + ): + # Fastpath! + # Length restriction is just to avoid having to worry about 'copy' + shape = tuple(len(x) for x in axes) + nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype) + return BlockManager((nb,), axes) + + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + + if len(mgrs) == 1: + mgr = mgrs[0] + out = mgr.copy(deep=False) + out.axes = axes + return out + + concat_plan = _get_combined_plan(mgrs) + + blocks = [] + values: ArrayLike + + for placement, join_units in concat_plan: + unit = join_units[0] + blk = unit.block + + if _is_uniform_join_units(join_units): + vals = [ju.block.values for ju in join_units] + + if not blk.is_extension: + # _is_uniform_join_units ensures a single dtype, so + # we can use np.concatenate, which is more performant + # than concat_compat + # error: Argument 1 to "concatenate" has incompatible type + # "List[Union[ndarray[Any, Any], ExtensionArray]]"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]]]" + values = np.concatenate(vals, axis=1) # type: ignore[arg-type] + elif is_1d_only_ea_dtype(blk.dtype): + # TODO(EA2D): special-casing not needed with 2D EAs + values = concat_compat(vals, axis=0, ea_compat_axis=True) + values = ensure_block_shape(values, ndim=2) + else: + values = concat_compat(vals, axis=1) + + values = ensure_wrapped_if_datetimelike(values) + + fastpath = blk.values.dtype == values.dtype + else: + values = _concatenate_join_units(join_units, copy=copy) + fastpath = False + + if fastpath: + b = blk.make_block_same_class(values, placement=placement) + else: + b = new_block_2d(values, placement=placement) + + blocks.append(b) + + return BlockManager(tuple(blocks), axes) + + +def _maybe_reindex_columns_na_proxy( + axes: list[Index], + mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]], + needs_copy: bool, +) -> list[BlockManager]: + """ + Reindex along columns so that all of the BlockManagers being concatenated + have matching columns. + + Columns added in this reindexing have dtype=np.void, indicating they + should be ignored when choosing a column's final dtype. + """ + new_mgrs = [] + + for mgr, indexers in mgrs_indexers: + # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this + # is a cheap reindexing. + for i, indexer in indexers.items(): + mgr = mgr.reindex_indexer( + axes[i], + indexers[i], + axis=i, + copy=False, + only_slice=True, # only relevant for i==0 + allow_dups=True, + use_na_proxy=True, # only relevant for i==0 + ) + if needs_copy and not indexers: + mgr = mgr.copy() + + new_mgrs.append(mgr) + return new_mgrs + + +def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool: + """ + Check if this Manager can be treated as a single ndarray. + """ + if mgr.nblocks != 1: + return False + blk = mgr.blocks[0] + if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1): + return False + + return blk.dtype == first_dtype + + +def _concat_homogeneous_fastpath( + mgrs_indexers, shape: Shape, first_dtype: np.dtype +) -> Block: + """ + With single-Block managers with homogeneous dtypes (that can already hold nan), + we avoid [...] + """ + # assumes + # all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers) + + if all(not indexers for _, indexers in mgrs_indexers): + # https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739 + arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers] + arr = np.concatenate(arrs).T + bp = libinternals.BlockPlacement(slice(shape[0])) + nb = new_block_2d(arr, bp) + return nb + + arr = np.empty(shape, dtype=first_dtype) + + if first_dtype == np.float64: + take_func = libalgos.take_2d_axis0_float64_float64 + else: + take_func = libalgos.take_2d_axis0_float32_float32 + + start = 0 + for mgr, indexers in mgrs_indexers: + mgr_len = mgr.shape[1] + end = start + mgr_len + + if 0 in indexers: + take_func( + mgr.blocks[0].values, + indexers[0], + arr[:, start:end], + ) + else: + # No reindexing necessary, we can copy values directly + arr[:, start:end] = mgr.blocks[0].values + + start += mgr_len + + bp = libinternals.BlockPlacement(slice(shape[0])) + nb = new_block_2d(arr, bp) + return nb + + +def _get_combined_plan( + mgrs: list[BlockManager], +) -> list[tuple[BlockPlacement, list[JoinUnit]]]: + plan = [] + + max_len = mgrs[0].shape[0] + + blknos_list = [mgr.blknos for mgr in mgrs] + pairs = libinternals.get_concat_blkno_indexers(blknos_list) + for ind, (blknos, bp) in enumerate(pairs): + # assert bp.is_slice_like + # assert len(bp) > 0 + + units_for_bp = [] + for k, mgr in enumerate(mgrs): + blkno = blknos[k] + + nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len) + unit = JoinUnit(nb) + units_for_bp.append(unit) + + plan.append((bp, units_for_bp)) + + return plan + + +def _get_block_for_concat_plan( + mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int +) -> Block: + blk = mgr.blocks[blkno] + # Assertions disabled for performance: + # assert bp.is_slice_like + # assert blkno != -1 + # assert (mgr.blknos[bp] == blkno).all() + + if len(bp) == len(blk.mgr_locs) and ( + blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1 + ): + nb = blk + else: + ax0_blk_indexer = mgr.blklocs[bp.indexer] + + slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len) + # TODO: in all extant test cases 2023-04-08 we have a slice here. + # Will this always be the case? + if isinstance(slc, slice): + nb = blk.slice_block_columns(slc) + else: + nb = blk.take_block_columns(slc) + + # assert nb.shape == (len(bp), mgr.shape[1]) + return nb + + +class JoinUnit: + def __init__(self, block: Block) -> None: + self.block = block + + def __repr__(self) -> str: + return f"{type(self).__name__}({repr(self.block)})" + + def _is_valid_na_for(self, dtype: DtypeObj) -> bool: + """ + Check that we are all-NA of a type/dtype that is compatible with this dtype. + Augments `self.is_na` with an additional check of the type of NA values. + """ + if not self.is_na: + return False + + blk = self.block + if blk.dtype.kind == "V": + return True + + if blk.dtype == object: + values = blk.values + return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K")) + + na_value = blk.fill_value + if na_value is NaT and blk.dtype != dtype: + # e.g. we are dt64 and other is td64 + # fill_values match but we should not cast blk.values to dtype + # TODO: this will need updating if we ever have non-nano dt64/td64 + return False + + if na_value is NA and needs_i8_conversion(dtype): + # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat + # e.g. blk.dtype == "Int64" and dtype is td64, we dont want + # to consider these as matching + return False + + # TODO: better to use can_hold_element? + return is_valid_na_for_dtype(na_value, dtype) + + @cache_readonly + def is_na(self) -> bool: + blk = self.block + if blk.dtype.kind == "V": + return True + + if not blk._can_hold_na: + return False + + values = blk.values + if values.size == 0: + # GH#39122 this case will return False once deprecation is enforced + return True + + if isinstance(values.dtype, SparseDtype): + return False + + if values.ndim == 1: + # TODO(EA2D): no need for special case with 2D EAs + val = values[0] + if not is_scalar(val) or not isna(val): + # ideally isna_all would do this short-circuiting + return False + return isna_all(values) + else: + val = values[0][0] + if not is_scalar(val) or not isna(val): + # ideally isna_all would do this short-circuiting + return False + return all(isna_all(row) for row in values) + + @cache_readonly + def is_na_after_size_and_isna_all_deprecation(self) -> bool: + """ + Will self.is_na be True after values.size == 0 deprecation and isna_all + deprecation are enforced? + """ + blk = self.block + if blk.dtype.kind == "V": + return True + return False + + def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: + values: ArrayLike + + if upcasted_na is None and self.block.dtype.kind != "V": + # No upcasting is necessary + return self.block.values + else: + fill_value = upcasted_na + + if self._is_valid_na_for(empty_dtype): + # note: always holds when self.block.dtype.kind == "V" + blk_dtype = self.block.dtype + + if blk_dtype == np.dtype("object"): + # we want to avoid filling with np.nan if we are + # using None; we already know that we are all + # nulls + values = cast(np.ndarray, self.block.values) + if values.size and values[0, 0] is None: + fill_value = None + + return make_na_array(empty_dtype, self.block.shape, fill_value) + + return self.block.values + + +def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike: + """ + Concatenate values from several join units along axis=1. + """ + empty_dtype, empty_dtype_future = _get_empty_dtype(join_units) + + has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) + upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks) + + to_concat = [ + ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) + for ju in join_units + ] + + if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat): + # TODO(EA2D): special case not needed if all EAs used HybridBlocks + + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[int, slice]" + to_concat = [ + t + if is_1d_only_ea_dtype(t.dtype) + else t[0, :] # type: ignore[call-overload] + for t in to_concat + ] + concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) + concat_values = ensure_block_shape(concat_values, 2) + + else: + concat_values = concat_compat(to_concat, axis=1) + + if empty_dtype != empty_dtype_future: + if empty_dtype == concat_values.dtype: + # GH#39122, GH#40893 + warnings.warn( + "The behavior of DataFrame concatenation with empty or all-NA " + "entries is deprecated. In a future version, this will no longer " + "exclude empty or all-NA columns when determining the result dtypes. " + "To retain the old behavior, exclude the relevant entries before " + "the concat operation.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return concat_values + + +def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool): + """ + Find the NA value to go with this dtype. + """ + if isinstance(dtype, ExtensionDtype): + return dtype.na_value + elif dtype.kind in "mM": + return dtype.type("NaT") + elif dtype.kind in "fc": + return dtype.type("NaN") + elif dtype.kind == "b": + # different from missing.na_value_for_dtype + return None + elif dtype.kind in "iu": + if not has_none_blocks: + # different from missing.na_value_for_dtype + return None + return np.nan + elif dtype.kind == "O": + return np.nan + raise NotImplementedError + + +def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> tuple[DtypeObj, DtypeObj]: + """ + Return dtype and N/A values to use when concatenating specified units. + + Returned N/A value may be None which means there was no casting involved. + + Returns + ------- + dtype + """ + if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]): + empty_dtype = join_units[0].block.dtype + return empty_dtype, empty_dtype + + has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) + + dtypes = [unit.block.dtype for unit in join_units if not unit.is_na] + if not len(dtypes): + dtypes = [ + unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V" + ] + + dtype = find_common_type(dtypes) + if has_none_blocks: + dtype = ensure_dtype_can_hold_na(dtype) + + dtype_future = dtype + if len(dtypes) != len(join_units): + dtypes_future = [ + unit.block.dtype + for unit in join_units + if not unit.is_na_after_size_and_isna_all_deprecation + ] + if not len(dtypes_future): + dtypes_future = [ + unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V" + ] + + if len(dtypes) != len(dtypes_future): + dtype_future = find_common_type(dtypes_future) + if has_none_blocks: + dtype_future = ensure_dtype_can_hold_na(dtype_future) + + return dtype, dtype_future + + +def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool: + """ + Check if the join units consist of blocks of uniform type that can + be concatenated using Block.concat_same_type instead of the generic + _concatenate_join_units (which uses `concat_compat`). + + """ + first = join_units[0].block + if first.dtype.kind == "V": + return False + return ( + # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64 + all(type(ju.block) is type(first) for ju in join_units) + and + # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform + all( + ju.block.dtype == first.dtype + # GH#42092 we only want the dtype_equal check for non-numeric blocks + # (for now, may change but that would need a deprecation) + or ju.block.dtype.kind in "iub" + for ju in join_units + ) + and + # no blocks that would get missing values (can lead to type upcasts) + # unless we're an extension dtype. + all(not ju.is_na or ju.block.is_extension for ju in join_units) + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/construction.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/construction.py new file mode 100644 index 00000000..8bb6c6b5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/construction.py @@ -0,0 +1,1070 @@ +""" +Functions for preparing various inputs passed to the DataFrame or Series +constructors before passing them to a BlockManager. +""" +from __future__ import annotations + +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np +from numpy import ma + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib + +from pandas.core.dtypes.astype import astype_is_view +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + dict_compat, + maybe_cast_to_datetime, + maybe_convert_platform, + maybe_infer_to_datetimelike, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_integer_dtype, + is_list_like, + is_named_tuple, + is_object_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core import ( + algorithms, + common as com, +) +from pandas.core.arrays import ExtensionArray +from pandas.core.arrays.string_ import StringDtype +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, + range_to_ndarray, + sanitize_array, +) +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + TimedeltaIndex, + default_index, + ensure_index, + get_objs_combined_axis, + union_indexes, +) +from pandas.core.internals.array_manager import ( + ArrayManager, + SingleArrayManager, +) +from pandas.core.internals.blocks import ( + BlockPlacement, + ensure_block_shape, + new_block, + new_block_2d, +) +from pandas.core.internals.managers import ( + BlockManager, + SingleBlockManager, + create_block_manager_from_blocks, + create_block_manager_from_column_arrays, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeObj, + Manager, + npt, + ) +# --------------------------------------------------------------------- +# BlockManager Interface + + +def arrays_to_mgr( + arrays, + columns: Index, + index, + *, + dtype: DtypeObj | None = None, + verify_integrity: bool = True, + typ: str | None = None, + consolidate: bool = True, +) -> Manager: + """ + Segregate Series based on type and coerce into matrices. + + Needs to handle a lot of exceptional cases. + """ + if verify_integrity: + # figure out the index, if necessary + if index is None: + index = _extract_index(arrays) + else: + index = ensure_index(index) + + # don't force copy because getting jammed in an ndarray anyway + arrays, refs = _homogenize(arrays, index, dtype) + # _homogenize ensures + # - all(len(x) == len(index) for x in arrays) + # - all(x.ndim == 1 for x in arrays) + # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) + # - all(type(x) is not NumpyExtensionArray for x in arrays) + + else: + index = ensure_index(index) + arrays = [extract_array(x, extract_numpy=True) for x in arrays] + # with _from_arrays, the passed arrays should never be Series objects + refs = [None] * len(arrays) + + # Reached via DataFrame._from_arrays; we do minimal validation here + for arr in arrays: + if ( + not isinstance(arr, (np.ndarray, ExtensionArray)) + or arr.ndim != 1 + or len(arr) != len(index) + ): + raise ValueError( + "Arrays must be 1-dimensional np.ndarray or ExtensionArray " + "with length matching len(index)" + ) + + columns = ensure_index(columns) + if len(columns) != len(arrays): + raise ValueError("len(arrays) must match len(columns)") + + # from BlockManager perspective + axes = [columns, index] + + if typ == "block": + return create_block_manager_from_column_arrays( + arrays, axes, consolidate=consolidate, refs=refs + ) + elif typ == "array": + return ArrayManager(arrays, [index, columns]) + else: + raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") + + +def rec_array_to_mgr( + data: np.rec.recarray | np.ndarray, + index, + columns, + dtype: DtypeObj | None, + copy: bool, + typ: str, +) -> Manager: + """ + Extract from a masked rec array and create the manager. + """ + # essentially process a record array then fill it + fdata = ma.getdata(data) + if index is None: + index = default_index(len(fdata)) + else: + index = ensure_index(index) + + if columns is not None: + columns = ensure_index(columns) + arrays, arr_columns = to_arrays(fdata, columns) + + # create the manager + + arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index)) + if columns is None: + columns = arr_columns + + mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ) + + if copy: + mgr = mgr.copy() + return mgr + + +def mgr_to_mgr(mgr, typ: str, copy: bool = True): + """ + Convert to specific type of Manager. Does not copy if the type is already + correct. Does not guarantee a copy otherwise. `copy` keyword only controls + whether conversion from Block->ArrayManager copies the 1D arrays. + """ + new_mgr: Manager + + if typ == "block": + if isinstance(mgr, BlockManager): + new_mgr = mgr + else: + if mgr.ndim == 2: + new_mgr = arrays_to_mgr( + mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block" + ) + else: + new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index) + elif typ == "array": + if isinstance(mgr, ArrayManager): + new_mgr = mgr + else: + if mgr.ndim == 2: + arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))] + if copy: + arrays = [arr.copy() for arr in arrays] + new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]]) + else: + array = mgr.internal_values() + if copy: + array = array.copy() + new_mgr = SingleArrayManager([array], [mgr.index]) + else: + raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") + return new_mgr + + +# --------------------------------------------------------------------- +# DataFrame Constructor Interface + + +def ndarray_to_mgr( + values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str +) -> Manager: + # used in DataFrame.__init__ + # input must be a ndarray, list, Series, Index, ExtensionArray + + if isinstance(values, ABCSeries): + if columns is None: + if values.name is not None: + columns = Index([values.name]) + if index is None: + index = values.index + else: + values = values.reindex(index) + + # zero len case (GH #2234) + if not len(values) and columns is not None and len(columns): + values = np.empty((0, 1), dtype=object) + + # if the array preparation does a copy -> avoid this for ArrayManager, + # since the copy is done on conversion to 1D arrays + copy_on_sanitize = False if typ == "array" else copy + + vdtype = getattr(values, "dtype", None) + refs = None + if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype): + # GH#19157 + + if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1: + # GH#12513 a EA dtype passed with a 2D array, split into + # multiple EAs that view the values + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[slice, int]" + values = [ + values[:, n] # type: ignore[call-overload] + for n in range(values.shape[1]) + ] + else: + values = [values] + + if columns is None: + columns = Index(range(len(values))) + else: + columns = ensure_index(columns) + + return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ) + + elif isinstance(vdtype, ExtensionDtype): + # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype) + # are already caught above + values = extract_array(values, extract_numpy=True) + if copy: + values = values.copy() + if values.ndim == 1: + values = values.reshape(-1, 1) + + elif isinstance(values, (ABCSeries, Index)): + if not copy_on_sanitize and ( + dtype is None or astype_is_view(values.dtype, dtype) + ): + refs = values._references + + if copy_on_sanitize: + values = values._values.copy() + else: + values = values._values + + values = _ensure_2d(values) + + elif isinstance(values, (np.ndarray, ExtensionArray)): + # drop subclass info + _copy = ( + copy_on_sanitize + if (dtype is None or astype_is_view(values.dtype, dtype)) + else False + ) + values = np.array(values, copy=_copy) + values = _ensure_2d(values) + + else: + # by definition an array here + # the dtypes will be coerced to a single dtype + values = _prep_ndarraylike(values, copy=copy_on_sanitize) + + if dtype is not None and values.dtype != dtype: + # GH#40110 see similar check inside sanitize_array + values = sanitize_array( + values, + None, + dtype=dtype, + copy=copy_on_sanitize, + allow_2d=True, + ) + + # _prep_ndarraylike ensures that values.ndim == 2 at this point + index, columns = _get_axes( + values.shape[0], values.shape[1], index=index, columns=columns + ) + + _check_values_indices_shape_match(values, index, columns) + + if typ == "array": + if issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + if dtype is None and is_object_dtype(values.dtype): + arrays = [ + ensure_wrapped_if_datetimelike( + maybe_infer_to_datetimelike(values[:, i]) + ) + for i in range(values.shape[1]) + ] + else: + if lib.is_np_dtype(values.dtype, "mM"): + values = ensure_wrapped_if_datetimelike(values) + arrays = [values[:, i] for i in range(values.shape[1])] + + if copy: + arrays = [arr.copy() for arr in arrays] + + return ArrayManager(arrays, [index, columns], verify_integrity=False) + + values = values.T + + # if we don't have a dtype specified, then try to convert objects + # on the entire block; this is to convert if we have datetimelike's + # embedded in an object type + if dtype is None and is_object_dtype(values.dtype): + obj_columns = list(values) + maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns] + # don't convert (and copy) the objects if no type inference occurs + if any(x is not y for x, y in zip(obj_columns, maybe_datetime)): + dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime] + block_values = [ + new_block_2d(dvals_list[n], placement=BlockPlacement(n)) + for n in range(len(dvals_list)) + ] + else: + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp, refs=refs) + block_values = [nb] + elif dtype is None and values.dtype.kind == "U" and using_pyarrow_string_dtype(): + dtype = StringDtype(storage="pyarrow_numpy") + + obj_columns = list(values) + block_values = [ + new_block( + dtype.construct_array_type()._from_sequence(data, dtype=dtype), + BlockPlacement(slice(i, i + 1)), + ndim=2, + ) + for i, data in enumerate(obj_columns) + ] + + else: + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp, refs=refs) + block_values = [nb] + + if len(columns) == 0: + # TODO: check len(values) == 0? + block_values = [] + + return create_block_manager_from_blocks( + block_values, [columns, index], verify_integrity=False + ) + + +def _check_values_indices_shape_match( + values: np.ndarray, index: Index, columns: Index +) -> None: + """ + Check that the shape implied by our axes matches the actual shape of the + data. + """ + if values.shape[1] != len(columns) or values.shape[0] != len(index): + # Could let this raise in Block constructor, but we get a more + # helpful exception message this way. + if values.shape[0] == 0 < len(index): + raise ValueError("Empty data passed with indices specified.") + + passed = values.shape + implied = (len(index), len(columns)) + raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") + + +def dict_to_mgr( + data: dict, + index, + columns, + *, + dtype: DtypeObj | None = None, + typ: str = "block", + copy: bool = True, +) -> Manager: + """ + Segregate Series based on type and coerce into matrices. + Needs to handle a lot of exceptional cases. + + Used in DataFrame.__init__ + """ + arrays: Sequence[Any] | Series + + if columns is not None: + from pandas.core.series import Series + + arrays = Series(data, index=columns, dtype=object) + missing = arrays.isna() + if index is None: + # GH10856 + # raise ValueError if only scalars in dict + index = _extract_index(arrays[~missing]) + else: + index = ensure_index(index) + + # no obvious "empty" int column + if missing.any() and not is_integer_dtype(dtype): + nan_dtype: DtypeObj + + if dtype is not None: + # calling sanitize_array ensures we don't mix-and-match + # NA dtypes + midxs = missing.values.nonzero()[0] + for i in midxs: + arr = sanitize_array(arrays.iat[i], index, dtype=dtype) + arrays.iat[i] = arr + else: + # GH#1783 + nan_dtype = np.dtype("object") + val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) + nmissing = missing.sum() + if copy: + rhs = [val] * nmissing + else: + # GH#45369 + rhs = [val.copy() for _ in range(nmissing)] + arrays.loc[missing] = rhs + + arrays = list(arrays) + columns = ensure_index(columns) + + else: + keys = list(data.keys()) + columns = Index(keys) if keys else default_index(0) + arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] + + if copy: + if typ == "block": + # We only need to copy arrays that will not get consolidated, i.e. + # only EA arrays + arrays = [ + x.copy() + if isinstance(x, ExtensionArray) + else x.copy(deep=True) + if ( + isinstance(x, Index) + or isinstance(x, ABCSeries) + and is_1d_only_ea_dtype(x.dtype) + ) + else x + for x in arrays + ] + else: + # dtype check to exclude e.g. range objects, scalars + arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays] + + return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy) + + +def nested_data_to_arrays( + data: Sequence, + columns: Index | None, + index: Index | None, + dtype: DtypeObj | None, +) -> tuple[list[ArrayLike], Index, Index]: + """ + Convert a single sequence of arrays to multiple arrays. + """ + # By the time we get here we have already checked treat_as_nested(data) + + if is_named_tuple(data[0]) and columns is None: + columns = ensure_index(data[0]._fields) + + arrays, columns = to_arrays(data, columns, dtype=dtype) + columns = ensure_index(columns) + + if index is None: + if isinstance(data[0], ABCSeries): + index = _get_names_from_index(data) + else: + index = default_index(len(data)) + + return arrays, columns, index + + +def treat_as_nested(data) -> bool: + """ + Check if we should use nested_data_to_arrays. + """ + return ( + len(data) > 0 + and is_list_like(data[0]) + and getattr(data[0], "ndim", 1) == 1 + and not (isinstance(data, ExtensionArray) and data.ndim == 2) + ) + + +# --------------------------------------------------------------------- + + +def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray: + # values is specifically _not_ ndarray, EA, Index, or Series + # We only get here with `not treat_as_nested(values)` + + if len(values) == 0: + # TODO: check for length-zero range, in which case return int64 dtype? + # TODO: re-use anything in try_cast? + return np.empty((0, 0), dtype=object) + elif isinstance(values, range): + arr = range_to_ndarray(values) + return arr[..., np.newaxis] + + def convert(v): + if not is_list_like(v) or isinstance(v, ABCDataFrame): + return v + + v = extract_array(v, extract_numpy=True) + res = maybe_convert_platform(v) + # We don't do maybe_infer_to_datetimelike here bc we will end up doing + # it column-by-column in ndarray_to_mgr + return res + + # we could have a 1-dim or 2-dim list here + # this is equiv of np.asarray, but does object conversion + # and platform dtype preservation + # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like + # np.asarray would + if is_list_like(values[0]): + values = np.array([convert(v) for v in values]) + elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: + # GH#21861 see test_constructor_list_of_lists + values = np.array([convert(v) for v in values]) + else: + values = convert(values) + + return _ensure_2d(values) + + +def _ensure_2d(values: np.ndarray) -> np.ndarray: + """ + Reshape 1D values, raise on anything else other than 2D. + """ + if values.ndim == 1: + values = values.reshape((values.shape[0], 1)) + elif values.ndim != 2: + raise ValueError(f"Must pass 2-d input. shape={values.shape}") + return values + + +def _homogenize( + data, index: Index, dtype: DtypeObj | None +) -> tuple[list[ArrayLike], list[Any]]: + oindex = None + homogenized = [] + # if the original array-like in `data` is a Series, keep track of this Series' refs + refs: list[Any] = [] + + for val in data: + if isinstance(val, (ABCSeries, Index)): + if dtype is not None: + val = val.astype(dtype, copy=False) + if isinstance(val, ABCSeries) and val.index is not index: + # Forces alignment. No need to copy data since we + # are putting it into an ndarray later + val = val.reindex(index, copy=False) + refs.append(val._references) + val = val._values + else: + if isinstance(val, dict): + # GH#41785 this _should_ be equivalent to (but faster than) + # val = Series(val, index=index)._values + if oindex is None: + oindex = index.astype("O") + + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + # see test_constructor_dict_datetime64_index + val = dict_compat(val) + else: + # see test_constructor_subclass_dict + val = dict(val) + val = lib.fast_multiget(val, oindex._values, default=np.nan) + + val = sanitize_array(val, index, dtype=dtype, copy=False) + com.require_length_match(val, index) + refs.append(None) + + homogenized.append(val) + + return homogenized, refs + + +def _extract_index(data) -> Index: + """ + Try to infer an Index from the passed data, raise ValueError on failure. + """ + index: Index + if len(data) == 0: + return default_index(0) + + raw_lengths = [] + indexes: list[list[Hashable] | Index] = [] + + have_raw_arrays = False + have_series = False + have_dicts = False + + for val in data: + if isinstance(val, ABCSeries): + have_series = True + indexes.append(val.index) + elif isinstance(val, dict): + have_dicts = True + indexes.append(list(val.keys())) + elif is_list_like(val) and getattr(val, "ndim", 1) == 1: + have_raw_arrays = True + raw_lengths.append(len(val)) + elif isinstance(val, np.ndarray) and val.ndim > 1: + raise ValueError("Per-column arrays must each be 1-dimensional") + + if not indexes and not raw_lengths: + raise ValueError("If using all scalar values, you must pass an index") + + if have_series: + index = union_indexes(indexes) + elif have_dicts: + index = union_indexes(indexes, sort=False) + + if have_raw_arrays: + lengths = list(set(raw_lengths)) + if len(lengths) > 1: + raise ValueError("All arrays must be of the same length") + + if have_dicts: + raise ValueError( + "Mixing dicts with non-Series may lead to ambiguous ordering." + ) + + if have_series: + if lengths[0] != len(index): + msg = ( + f"array length {lengths[0]} does not match index " + f"length {len(index)}" + ) + raise ValueError(msg) + else: + index = default_index(lengths[0]) + + return ensure_index(index) + + +def reorder_arrays( + arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int +) -> tuple[list[ArrayLike], Index]: + """ + Pre-emptively (cheaply) reindex arrays with new columns. + """ + # reorder according to the columns + if columns is not None: + if not columns.equals(arr_columns): + # if they are equal, there is nothing to do + new_arrays: list[ArrayLike] = [] + indexer = arr_columns.get_indexer(columns) + for i, k in enumerate(indexer): + if k == -1: + # by convention default is all-NaN object dtype + arr = np.empty(length, dtype=object) + arr.fill(np.nan) + else: + arr = arrays[k] + new_arrays.append(arr) + + arrays = new_arrays + arr_columns = columns + + return arrays, arr_columns + + +def _get_names_from_index(data) -> Index: + has_some_name = any(getattr(s, "name", None) is not None for s in data) + if not has_some_name: + return default_index(len(data)) + + index: list[Hashable] = list(range(len(data))) + count = 0 + for i, s in enumerate(data): + n = getattr(s, "name", None) + if n is not None: + index[i] = n + else: + index[i] = f"Unnamed {count}" + count += 1 + + return Index(index) + + +def _get_axes( + N: int, K: int, index: Index | None, columns: Index | None +) -> tuple[Index, Index]: + # helper to create the axes as indexes + # return axes or defaults + + if index is None: + index = default_index(N) + else: + index = ensure_index(index) + + if columns is None: + columns = default_index(K) + else: + columns = ensure_index(columns) + return index, columns + + +def dataclasses_to_dicts(data): + """ + Converts a list of dataclass instances to a list of dictionaries. + + Parameters + ---------- + data : List[Type[dataclass]] + + Returns + -------- + list_dict : List[dict] + + Examples + -------- + >>> from dataclasses import dataclass + >>> @dataclass + ... class Point: + ... x: int + ... y: int + + >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) + [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}] + + """ + from dataclasses import asdict + + return list(map(asdict, data)) + + +# --------------------------------------------------------------------- +# Conversion of Inputs to Arrays + + +def to_arrays( + data, columns: Index | None, dtype: DtypeObj | None = None +) -> tuple[list[ArrayLike], Index]: + """ + Return list of arrays, columns. + + Returns + ------- + list[ArrayLike] + These will become columns in a DataFrame. + Index + This will become frame.columns. + + Notes + ----- + Ensures that len(result_arrays) == len(result_index). + """ + + if not len(data): + if isinstance(data, np.ndarray): + if data.dtype.names is not None: + # i.e. numpy structured array + columns = ensure_index(data.dtype.names) + arrays = [data[name] for name in columns] + + if len(data) == 0: + # GH#42456 the indexing above results in list of 2D ndarrays + # TODO: is that an issue with numpy? + for i, arr in enumerate(arrays): + if arr.ndim == 2: + arrays[i] = arr[:, 0] + + return arrays, columns + return [], ensure_index([]) + + elif isinstance(data, np.ndarray) and data.dtype.names is not None: + # e.g. recarray + columns = Index(list(data.dtype.names)) + arrays = [data[k] for k in columns] + return arrays, columns + + if isinstance(data[0], (list, tuple)): + arr = _list_to_arrays(data) + elif isinstance(data[0], abc.Mapping): + arr, columns = _list_of_dict_to_arrays(data, columns) + elif isinstance(data[0], ABCSeries): + arr, columns = _list_of_series_to_arrays(data, columns) + else: + # last ditch effort + data = [tuple(x) for x in data] + arr = _list_to_arrays(data) + + content, columns = _finalize_columns_and_data(arr, columns, dtype) + return content, columns + + +def _list_to_arrays(data: list[tuple | list]) -> np.ndarray: + # Returned np.ndarray has ndim = 2 + # Note: we already check len(data) > 0 before getting hre + if isinstance(data[0], tuple): + content = lib.to_object_array_tuples(data) + else: + # list of lists + content = lib.to_object_array(data) + return content + + +def _list_of_series_to_arrays( + data: list, + columns: Index | None, +) -> tuple[np.ndarray, Index]: + # returned np.ndarray has ndim == 2 + + if columns is None: + # We know pass_data is non-empty because data[0] is a Series + pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))] + columns = get_objs_combined_axis(pass_data, sort=False) + + indexer_cache: dict[int, np.ndarray] = {} + + aligned_values = [] + for s in data: + index = getattr(s, "index", None) + if index is None: + index = default_index(len(s)) + + if id(index) in indexer_cache: + indexer = indexer_cache[id(index)] + else: + indexer = indexer_cache[id(index)] = index.get_indexer(columns) + + values = extract_array(s, extract_numpy=True) + aligned_values.append(algorithms.take_nd(values, indexer)) + + content = np.vstack(aligned_values) + return content, columns + + +def _list_of_dict_to_arrays( + data: list[dict], + columns: Index | None, +) -> tuple[np.ndarray, Index]: + """ + Convert list of dicts to numpy arrays + + if `columns` is not passed, column names are inferred from the records + - for OrderedDict and dicts, the column names match + the key insertion-order from the first record to the last. + - For other kinds of dict-likes, the keys are lexically sorted. + + Parameters + ---------- + data : iterable + collection of records (OrderedDict, dict) + columns: iterables or None + + Returns + ------- + content : np.ndarray[object, ndim=2] + columns : Index + """ + if columns is None: + gen = (list(x.keys()) for x in data) + sort = not any(isinstance(d, dict) for d in data) + pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort) + columns = ensure_index(pre_cols) + + # assure that they are of the base dict class and not of derived + # classes + data = [d if type(d) is dict else dict(d) for d in data] + + content = lib.dicts_to_array(data, list(columns)) + return content, columns + + +def _finalize_columns_and_data( + content: np.ndarray, # ndim == 2 + columns: Index | None, + dtype: DtypeObj | None, +) -> tuple[list[ArrayLike], Index]: + """ + Ensure we have valid columns, cast object dtypes if possible. + """ + contents = list(content.T) + + try: + columns = _validate_or_indexify_columns(contents, columns) + except AssertionError as err: + # GH#26429 do not raise user-facing AssertionError + raise ValueError(err) from err + + if len(contents) and contents[0].dtype == np.object_: + contents = convert_object_array(contents, dtype=dtype) + + return contents, columns + + +def _validate_or_indexify_columns( + content: list[np.ndarray], columns: Index | None +) -> Index: + """ + If columns is None, make numbers as column names; Otherwise, validate that + columns have valid length. + + Parameters + ---------- + content : list of np.ndarrays + columns : Index or None + + Returns + ------- + Index + If columns is None, assign positional column index value as columns. + + Raises + ------ + 1. AssertionError when content is not composed of list of lists, and if + length of columns is not equal to length of content. + 2. ValueError when content is list of lists, but length of each sub-list + is not equal + 3. ValueError when content is list of lists, but length of sub-list is + not equal to length of content + """ + if columns is None: + columns = default_index(len(content)) + else: + # Add mask for data which is composed of list of lists + is_mi_list = isinstance(columns, list) and all( + isinstance(col, list) for col in columns + ) + + if not is_mi_list and len(columns) != len(content): # pragma: no cover + # caller's responsibility to check for this... + raise AssertionError( + f"{len(columns)} columns passed, passed data had " + f"{len(content)} columns" + ) + if is_mi_list: + # check if nested list column, length of each sub-list should be equal + if len({len(col) for col in columns}) > 1: + raise ValueError( + "Length of columns passed for MultiIndex columns is different" + ) + + # if columns is not empty and length of sublist is not equal to content + if columns and len(columns[0]) != len(content): + raise ValueError( + f"{len(columns[0])} columns passed, passed data had " + f"{len(content)} columns" + ) + return columns + + +def convert_object_array( + content: list[npt.NDArray[np.object_]], + dtype: DtypeObj | None, + dtype_backend: str = "numpy", + coerce_float: bool = False, +) -> list[ArrayLike]: + """ + Internal function to convert object array. + + Parameters + ---------- + content: List[np.ndarray] + dtype: np.dtype or ExtensionDtype + dtype_backend: Controls if nullable/pyarrow dtypes are returned. + coerce_float: Cast floats that are integers to int. + + Returns + ------- + List[ArrayLike] + """ + # provide soft conversion of object dtypes + + def convert(arr): + if dtype != np.dtype("O"): + arr = lib.maybe_convert_objects( + arr, + try_float=coerce_float, + convert_to_nullable_dtype=dtype_backend != "numpy", + ) + # Notes on cases that get here 2023-02-15 + # 1) we DO get here when arr is all Timestamps and dtype=None + # 2) disabling this doesn't break the world, so this must be + # getting caught at a higher level + # 3) passing convert_non_numeric to maybe_convert_objects get this right + # 4) convert_non_numeric? + + if dtype is None: + if arr.dtype == np.dtype("O"): + # i.e. maybe_convert_objects didn't convert + arr = maybe_infer_to_datetimelike(arr) + if dtype_backend != "numpy" and arr.dtype == np.dtype("O"): + arr = StringDtype().construct_array_type()._from_sequence(arr) + elif dtype_backend != "numpy" and isinstance(arr, np.ndarray): + if arr.dtype.kind in "iufb": + arr = pd_array(arr, copy=False) + + elif isinstance(dtype, ExtensionDtype): + # TODO: test(s) that get here + # TODO: try to de-duplicate this convert function with + # core.construction functions + cls = dtype.construct_array_type() + arr = cls._from_sequence(arr, dtype=dtype, copy=False) + elif dtype.kind in "mM": + # This restriction is harmless bc these are the only cases + # where maybe_cast_to_datetime is not a no-op. + # Here we know: + # 1) dtype.kind in "mM" and + # 2) arr is either object or numeric dtype + arr = maybe_cast_to_datetime(arr, dtype) + + return arr + + arrays = [convert(arr) for arr in content] + + return arrays diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/managers.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/managers.py new file mode 100644 index 00000000..4a6d3c33 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/managers.py @@ -0,0 +1,2321 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +import itertools +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, +) +import warnings +import weakref + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import ( + internals as libinternals, + lib, +) +from pandas._libs.internals import ( + BlockPlacement, + BlockValuesRefs, +) +from pandas.errors import PerformanceWarning +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import infer_dtype_from_scalar +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, + is_list_like, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + array_equals, + isna, +) + +import pandas.core.algorithms as algos +from pandas.core.arrays import DatetimeArray +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import maybe_convert_indices +from pandas.core.indexes.api import ( + Index, + ensure_index, +) +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, + ensure_np_dtype, + interleaved_dtype, +) +from pandas.core.internals.blocks import ( + Block, + NumpyBlock, + ensure_block_shape, + extend_blocks, + get_block_type, + maybe_coerce_values, + new_block, + new_block_2d, +) +from pandas.core.internals.ops import ( + blockwise_all, + operate_blockwise, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + QuantileInterpolation, + Self, + Shape, + npt, + ) + + +class BaseBlockManager(DataManager): + """ + Core internal data structure to implement DataFrame, Series, etc. + + Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a + lightweight blocked set of labeled data to be manipulated by the DataFrame + public API class + + Attributes + ---------- + shape + ndim + axes + values + items + + Methods + ------- + set_axis(axis, new_labels) + copy(deep=True) + + get_dtypes + + apply(func, axes, block_filter_fn) + + get_bool_data + get_numeric_data + + get_slice(slice_like, axis) + get(label) + iget(loc) + + take(indexer, axis) + reindex_axis(new_labels, axis) + reindex_indexer(new_labels, indexer, axis) + + delete(label) + insert(loc, label, value) + set(label, value) + + Parameters + ---------- + blocks: Sequence of Block + axes: Sequence of Index + verify_integrity: bool, default True + + Notes + ----- + This is *not* a public API class + """ + + __slots__ = () + + _blknos: npt.NDArray[np.intp] + _blklocs: npt.NDArray[np.intp] + blocks: tuple[Block, ...] + axes: list[Index] + + @property + def ndim(self) -> int: + raise NotImplementedError + + _known_consolidated: bool + _is_consolidated: bool + + def __init__(self, blocks, axes, verify_integrity: bool = True) -> None: + raise NotImplementedError + + @classmethod + def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: + raise NotImplementedError + + @property + def blknos(self) -> npt.NDArray[np.intp]: + """ + Suppose we want to find the array corresponding to our i'th column. + + blknos[i] identifies the block from self.blocks that contains this column. + + blklocs[i] identifies the column of interest within + self.blocks[self.blknos[i]] + """ + if self._blknos is None: + # Note: these can be altered by other BlockManager methods. + self._rebuild_blknos_and_blklocs() + + return self._blknos + + @property + def blklocs(self) -> npt.NDArray[np.intp]: + """ + See blknos.__doc__ + """ + if self._blklocs is None: + # Note: these can be altered by other BlockManager methods. + self._rebuild_blknos_and_blklocs() + + return self._blklocs + + def make_empty(self, axes=None) -> Self: + """return an empty BlockManager with the items axis of len 0""" + if axes is None: + axes = [Index([])] + self.axes[1:] + + # preserve dtype if possible + if self.ndim == 1: + assert isinstance(self, SingleBlockManager) # for mypy + blk = self.blocks[0] + arr = blk.values[:0] + bp = BlockPlacement(slice(0, 0)) + nb = blk.make_block_same_class(arr, placement=bp) + blocks = [nb] + else: + blocks = [] + return type(self).from_blocks(blocks, axes) + + def __nonzero__(self) -> bool: + return True + + # Python3 compat + __bool__ = __nonzero__ + + def _normalize_axis(self, axis: AxisInt) -> int: + # switch axis to follow BlockManager logic + if self.ndim == 2: + axis = 1 if axis == 0 else 0 + return axis + + def set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + self._validate_set_axis(axis, new_labels) + self.axes[axis] = new_labels + + @property + def is_single_block(self) -> bool: + # Assumes we are 2D; overridden by SingleBlockManager + return len(self.blocks) == 1 + + @property + def items(self) -> Index: + return self.axes[0] + + def _has_no_reference(self, i: int) -> bool: + """ + Check for column `i` if it has references. + (whether it references another array or is itself being referenced) + Returns True if the column has no references. + """ + blkno = self.blknos[i] + return self._has_no_reference_block(blkno) + + def _has_no_reference_block(self, blkno: int) -> bool: + """ + Check for block `i` if it has references. + (whether it references another array or is itself being referenced) + Returns True if the block has no references. + """ + return not self.blocks[blkno].refs.has_reference() + + def add_references(self, mgr: BaseBlockManager) -> None: + """ + Adds the references from one manager to another. We assume that both + managers have the same block structure. + """ + if len(self.blocks) != len(mgr.blocks): + # If block structure changes, then we made a copy + return + for i, blk in enumerate(self.blocks): + blk.refs = mgr.blocks[i].refs + # Argument 1 to "add_reference" of "BlockValuesRefs" has incompatible type + # "Block"; expected "SharedBlock" + blk.refs.add_reference(blk) # type: ignore[arg-type] + + def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool: + """ + Checks if two blocks from two different block managers reference the + same underlying values. + """ + ref = weakref.ref(self.blocks[blkno]) + return ref in mgr.blocks[blkno].refs.referenced_blocks + + def get_dtypes(self) -> npt.NDArray[np.object_]: + dtypes = np.array([blk.dtype for blk in self.blocks], dtype=object) + return dtypes.take(self.blknos) + + @property + def arrays(self) -> list[ArrayLike]: + """ + Quick access to the backing arrays of the Blocks. + + Only for compatibility with ArrayManager for testing convenience. + Not to be used in actual code, and return value is not the same as the + ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs). + + Warning! The returned arrays don't handle Copy-on-Write, so this should + be used with caution (only in read-mode). + """ + return [blk.values for blk in self.blocks] + + def __repr__(self) -> str: + output = type(self).__name__ + for i, ax in enumerate(self.axes): + if i == 0: + output += f"\nItems: {ax}" + else: + output += f"\nAxis {i}: {ax}" + + for block in self.blocks: + output += f"\n{block}" + return output + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + """ + Iterate over the blocks, collect and create a new BlockManager. + + Parameters + ---------- + f : str or callable + Name of the Block method to apply. + align_keys: List[str] or None, default None + **kwargs + Keywords to pass to `f` + + Returns + ------- + BlockManager + """ + assert "filter" not in kwargs + + align_keys = align_keys or [] + result_blocks: list[Block] = [] + # fillna: Series/DataFrame is responsible for making sure value is aligned + + aligned_args = {k: kwargs[k] for k in align_keys} + + for b in self.blocks: + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values + else: + kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values + else: + # otherwise we have an ndarray + kwargs[k] = obj[b.mgr_locs.indexer] + + if callable(f): + applied = b.apply(f, **kwargs) + else: + applied = getattr(b, f)(**kwargs) + result_blocks = extend_blocks(applied, result_blocks) + + out = type(self).from_blocks(result_blocks, self.axes) + return out + + # Alias so we can share code with ArrayManager + apply_with_block = apply + + def setitem(self, indexer, value) -> Self: + """ + Set values with indexer. + + For SingleBlockManager, this backs s[indexer] = value + """ + if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: + raise ValueError(f"Cannot set values with ndim > {self.ndim}") + + if using_copy_on_write() and not self._has_no_reference(0): + # this method is only called if there is a single block -> hardcoded 0 + # Split blocks to only copy the columns we want to modify + if self.ndim == 2 and isinstance(indexer, tuple): + blk_loc = self.blklocs[indexer[1]] + if is_list_like(blk_loc) and blk_loc.ndim == 2: + blk_loc = np.squeeze(blk_loc, axis=0) + elif not is_list_like(blk_loc): + # Keep dimension and copy data later + blk_loc = [blk_loc] # type: ignore[assignment] + if len(blk_loc) == 0: + return self.copy(deep=False) + + values = self.blocks[0].values + if values.ndim == 2: + values = values[blk_loc] + # "T" has no attribute "_iset_split_block" + self._iset_split_block( # type: ignore[attr-defined] + 0, blk_loc, values + ) + # first block equals values + self.blocks[0].setitem((indexer[0], np.arange(len(blk_loc))), value) + return self + # No need to split if we either set all columns or on a single block + # manager + self = self.copy() + + return self.apply("setitem", indexer=indexer, value=value) + + def diff(self, n: int) -> Self: + # only reached with self.ndim == 2 + return self.apply("diff", n=n) + + def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self: + if copy is None: + if using_copy_on_write(): + copy = False + else: + copy = True + elif using_copy_on_write(): + copy = False + + return self.apply( + "astype", + dtype=dtype, + copy=copy, + errors=errors, + using_cow=using_copy_on_write(), + ) + + def convert(self, copy: bool | None) -> Self: + if copy is None: + if using_copy_on_write(): + copy = False + else: + copy = True + elif using_copy_on_write(): + copy = False + + return self.apply("convert", copy=copy, using_cow=using_copy_on_write()) + + def to_native_types(self, **kwargs) -> Self: + """ + Convert values to native types (strings / python objects) that are used + in formatting (repr / csv). + """ + return self.apply("to_native_types", **kwargs) + + @property + def any_extension_types(self) -> bool: + """Whether any of the blocks in this manager are extension blocks""" + return any(block.is_extension for block in self.blocks) + + @property + def is_view(self) -> bool: + """return a boolean if we are a single block and are a view""" + if len(self.blocks) == 1: + return self.blocks[0].is_view + + # It is technically possible to figure out which blocks are views + # e.g. [ b.values.base is not None for b in self.blocks ] + # but then we have the case of possibly some blocks being a view + # and some blocks not. setting in theory is possible on the non-view + # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit + # complicated + + return False + + def _get_data_subset(self, predicate: Callable) -> Self: + blocks = [blk for blk in self.blocks if predicate(blk.values)] + return self._combine(blocks, copy=False) + + def get_bool_data(self, copy: bool = False) -> Self: + """ + Select blocks that are bool-dtype and columns from object-dtype blocks + that are all-bool. + + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + + new_blocks = [] + + for blk in self.blocks: + if blk.dtype == bool: + new_blocks.append(blk) + + elif blk.is_object: + nbs = blk._split() + new_blocks.extend(nb for nb in nbs if nb.is_bool) + + return self._combine(new_blocks, copy) + + def get_numeric_data(self, copy: bool = False) -> Self: + """ + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + numeric_blocks = [blk for blk in self.blocks if blk.is_numeric] + if len(numeric_blocks) == len(self.blocks): + # Avoid somewhat expensive _combine + if copy: + return self.copy(deep=True) + return self + return self._combine(numeric_blocks, copy) + + def _combine( + self, blocks: list[Block], copy: bool = True, index: Index | None = None + ) -> Self: + """return a new manager with the blocks""" + if len(blocks) == 0: + if self.ndim == 2: + # retain our own Index dtype + if index is not None: + axes = [self.items[:0], index] + else: + axes = [self.items[:0]] + self.axes[1:] + return self.make_empty(axes) + return self.make_empty() + + # FIXME: optimization potential + indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) + inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) + + new_blocks: list[Block] = [] + # TODO(CoW) we could optimize here if we know that the passed blocks + # are fully "owned" (eg created from an operation, not coming from + # an existing manager) + for b in blocks: + nb = b.copy(deep=copy) + nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer]) + new_blocks.append(nb) + + axes = list(self.axes) + if index is not None: + axes[-1] = index + axes[0] = self.items.take(indexer) + + return type(self).from_blocks(new_blocks, axes) + + @property + def nblocks(self) -> int: + return len(self.blocks) + + def copy(self, deep: bool | None | Literal["all"] = True) -> Self: + """ + Make deep or shallow copy of BlockManager + + Parameters + ---------- + deep : bool, string or None, default True + If False or None, return a shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + BlockManager + """ + if deep is None: + if using_copy_on_write(): + # use shallow copy + deep = False + else: + # preserve deep copy for BlockManager with copy=None + deep = True + + # this preserves the notion of view copying of axes + if deep: + # hit in e.g. tests.io.json.test_pandas + + def copy_func(ax): + return ax.copy(deep=True) if deep == "all" else ax.view() + + new_axes = [copy_func(ax) for ax in self.axes] + else: + if using_copy_on_write(): + new_axes = [ax.view() for ax in self.axes] + else: + new_axes = list(self.axes) + + res = self.apply("copy", deep=deep) + res.axes = new_axes + + if self.ndim > 1: + # Avoid needing to re-compute these + blknos = self._blknos + if blknos is not None: + res._blknos = blknos.copy() + res._blklocs = self._blklocs.copy() + + if deep: + res._consolidate_inplace() + return res + + def consolidate(self) -> Self: + """ + Join together blocks having same dtype + + Returns + ------- + y : BlockManager + """ + if self.is_consolidated(): + return self + + bm = type(self)(self.blocks, self.axes, verify_integrity=False) + bm._is_consolidated = False + bm._consolidate_inplace() + return bm + + def reindex_indexer( + self, + new_axis: Index, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + only_slice: bool = False, + *, + use_na_proxy: bool = False, + ) -> Self: + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray[intp] or None + axis : int + fill_value : object, default None + allow_dups : bool, default False + copy : bool or None, default True + If None, regard as False to get shallow copy. + only_slice : bool, default False + Whether to take views, not copies, along columns. + use_na_proxy : bool, default False + Whether to use a np.void ndarray for newly introduced columns. + + pandas-indexer with -1's only. + """ + if copy is None: + if using_copy_on_write(): + # use shallow copy + copy = False + else: + # preserve deep copy for BlockManager with copy=None + copy = True + + if indexer is None: + if new_axis is self.axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result.axes = list(self.axes) + result.axes[axis] = new_axis + return result + + # Should be intp, but in some cases we get int64 on 32bit builds + assert isinstance(indexer, np.ndarray) + + # some axes don't allow reindexing with dups + if not allow_dups: + self.axes[axis]._validate_can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0( + indexer, + fill_value=fill_value, + only_slice=only_slice, + use_na_proxy=use_na_proxy, + ) + else: + new_blocks = [ + blk.take_nd( + indexer, + axis=1, + fill_value=( + fill_value if fill_value is not None else blk.fill_value + ), + ) + for blk in self.blocks + ] + + new_axes = list(self.axes) + new_axes[axis] = new_axis + + new_mgr = type(self).from_blocks(new_blocks, new_axes) + if axis == 1: + # We can avoid the need to rebuild these + new_mgr._blknos = self.blknos.copy() + new_mgr._blklocs = self.blklocs.copy() + return new_mgr + + def _slice_take_blocks_ax0( + self, + slice_or_indexer: slice | np.ndarray, + fill_value=lib.no_default, + only_slice: bool = False, + *, + use_na_proxy: bool = False, + ref_inplace_op: bool = False, + ) -> list[Block]: + """ + Slice/take blocks along axis=0. + + Overloaded for SingleBlock + + Parameters + ---------- + slice_or_indexer : slice or np.ndarray[int64] + fill_value : scalar, default lib.no_default + only_slice : bool, default False + If True, we always return views on existing arrays, never copies. + This is used when called from ops.blockwise.operate_blockwise. + use_na_proxy : bool, default False + Whether to use a np.void ndarray for newly introduced columns. + ref_inplace_op: bool, default False + Don't track refs if True because we operate inplace + + Returns + ------- + new_blocks : list of Block + """ + allow_fill = fill_value is not lib.no_default + + sl_type, slobj, sllen = _preprocess_slice_or_indexer( + slice_or_indexer, self.shape[0], allow_fill=allow_fill + ) + + if self.is_single_block: + blk = self.blocks[0] + + if sl_type == "slice": + # GH#32959 EABlock would fail since we can't make 0-width + # TODO(EA2D): special casing unnecessary with 2D EAs + if sllen == 0: + return [] + bp = BlockPlacement(slice(0, sllen)) + return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)] + elif not allow_fill or self.ndim == 1: + if allow_fill and fill_value is None: + fill_value = blk.fill_value + + if not allow_fill and only_slice: + # GH#33597 slice instead of take, so we get + # views instead of copies + blocks = [ + blk.getitem_block_columns( + slice(ml, ml + 1), + new_mgr_locs=BlockPlacement(i), + ref_inplace_op=ref_inplace_op, + ) + for i, ml in enumerate(slobj) + ] + return blocks + else: + bp = BlockPlacement(slice(0, sllen)) + return [ + blk.take_nd( + slobj, + axis=0, + new_mgr_locs=bp, + fill_value=fill_value, + ) + ] + + if sl_type == "slice": + blknos = self.blknos[slobj] + blklocs = self.blklocs[slobj] + else: + blknos = algos.take_nd( + self.blknos, slobj, fill_value=-1, allow_fill=allow_fill + ) + blklocs = algos.take_nd( + self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill + ) + + # When filling blknos, make sure blknos is updated before appending to + # blocks list, that way new blkno is exactly len(blocks). + blocks = [] + group = not only_slice + for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group): + if blkno == -1: + # If we've got here, fill_value was not lib.no_default + + blocks.append( + self._make_na_block( + placement=mgr_locs, + fill_value=fill_value, + use_na_proxy=use_na_proxy, + ) + ) + else: + blk = self.blocks[blkno] + + # Otherwise, slicing along items axis is necessary. + if not blk._can_consolidate and not blk._validate_ndim: + # i.e. we dont go through here for DatetimeTZBlock + # A non-consolidatable block, it's easy, because there's + # only one item and each mgr loc is a copy of that single + # item. + deep = not (only_slice or using_copy_on_write()) + for mgr_loc in mgr_locs: + newblk = blk.copy(deep=deep) + newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1)) + blocks.append(newblk) + + else: + # GH#32779 to avoid the performance penalty of copying, + # we may try to only slice + taker = blklocs[mgr_locs.indexer] + max_len = max(len(mgr_locs), taker.max() + 1) + if only_slice or using_copy_on_write(): + taker = lib.maybe_indices_to_slice(taker, max_len) + + if isinstance(taker, slice): + nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs) + blocks.append(nb) + elif only_slice: + # GH#33597 slice instead of take, so we get + # views instead of copies + for i, ml in zip(taker, mgr_locs): + slc = slice(i, i + 1) + bp = BlockPlacement(ml) + nb = blk.getitem_block_columns(slc, new_mgr_locs=bp) + # We have np.shares_memory(nb.values, blk.values) + blocks.append(nb) + else: + nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs) + blocks.append(nb) + + return blocks + + def _make_na_block( + self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False + ) -> Block: + # Note: we only get here with self.ndim == 2 + + if use_na_proxy: + assert fill_value is None + shape = (len(placement), self.shape[1]) + vals = np.empty(shape, dtype=np.void) + nb = NumpyBlock(vals, placement, ndim=2) + return nb + + if fill_value is None: + fill_value = np.nan + + shape = (len(placement), self.shape[1]) + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + block_values = make_na_array(dtype, shape, fill_value) + return new_block_2d(block_values, placement=placement) + + def take( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt = 1, + verify: bool = True, + ) -> Self: + """ + Take items along any axis. + + indexer : np.ndarray[np.intp] + axis : int, default 1 + verify : bool, default True + Check that all entries are between 0 and len(self) - 1, inclusive. + Pass verify=False if this check has been done by the caller. + + Returns + ------- + BlockManager + """ + # Caller is responsible for ensuring indexer annotation is accurate + + n = self.shape[axis] + indexer = maybe_convert_indices(indexer, n, verify=verify) + + new_labels = self.axes[axis].take(indexer) + return self.reindex_indexer( + new_axis=new_labels, + indexer=indexer, + axis=axis, + allow_dups=True, + copy=None, + ) + + +class BlockManager(libinternals.BlockManager, BaseBlockManager): + """ + BaseBlockManager that holds 2D blocks. + """ + + ndim = 2 + + # ---------------------------------------------------------------- + # Constructors + + def __init__( + self, + blocks: Sequence[Block], + axes: Sequence[Index], + verify_integrity: bool = True, + ) -> None: + if verify_integrity: + # Assertion disabled for performance + # assert all(isinstance(x, Index) for x in axes) + + for block in blocks: + if self.ndim != block.ndim: + raise AssertionError( + f"Number of Block dimensions ({block.ndim}) must equal " + f"number of axes ({self.ndim})" + ) + # As of 2.0, the caller is responsible for ensuring that + # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2; + # previously there was a special check for fastparquet compat. + + self._verify_integrity() + + def _verify_integrity(self) -> None: + mgr_shape = self.shape + tot_items = sum(len(x.mgr_locs) for x in self.blocks) + for block in self.blocks: + if block.shape[1:] != mgr_shape[1:]: + raise_construction_error(tot_items, block.shape[1:], self.axes) + if len(self.items) != tot_items: + raise AssertionError( + "Number of manager items must equal union of " + f"block items\n# manager items: {len(self.items)}, # " + f"tot_items: {tot_items}" + ) + + @classmethod + def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: + """ + Constructor for BlockManager and SingleBlockManager with same signature. + """ + return cls(blocks, axes, verify_integrity=False) + + # ---------------------------------------------------------------- + # Indexing + + def fast_xs(self, loc: int) -> SingleBlockManager: + """ + Return the array corresponding to `frame.iloc[loc]`. + + Parameters + ---------- + loc : int + + Returns + ------- + np.ndarray or ExtensionArray + """ + if len(self.blocks) == 1: + # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like; + # is this ruled out in the general case? + result = self.blocks[0].iget((slice(None), loc)) + # in the case of a single block, the new block is a view + bp = BlockPlacement(slice(0, len(result))) + block = new_block( + result, + placement=bp, + ndim=1, + refs=self.blocks[0].refs, + ) + return SingleBlockManager(block, self.axes[0]) + + dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) + + n = len(self) + + if isinstance(dtype, ExtensionDtype): + result = np.empty(n, dtype=object) + else: + result = np.empty(n, dtype=dtype) + result = ensure_wrapped_if_datetimelike(result) + + for blk in self.blocks: + # Such assignment may incorrectly coerce NaT to None + # result[blk.mgr_locs] = blk._slice((slice(None), loc)) + for i, rl in enumerate(blk.mgr_locs): + result[rl] = blk.iget((i, loc)) + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + result = cls._from_sequence(result, dtype=dtype) + + bp = BlockPlacement(slice(0, len(result))) + block = new_block(result, placement=bp, ndim=1) + return SingleBlockManager(block, self.axes[0]) + + def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: + """ + Return the data as a SingleBlockManager. + """ + block = self.blocks[self.blknos[i]] + values = block.iget(self.blklocs[i]) + + # shortcut for select a single-dim from a 2-dim BM + bp = BlockPlacement(slice(0, len(values))) + nb = type(block)( + values, placement=bp, ndim=1, refs=block.refs if track_ref else None + ) + return SingleBlockManager(nb, self.axes[1]) + + def iget_values(self, i: int) -> ArrayLike: + """ + Return the data for column i as the values (ndarray or ExtensionArray). + + Warning! The returned array is a view but doesn't handle Copy-on-Write, + so this should be used with caution. + """ + # TODO(CoW) making the arrays read-only might make this safer to use? + block = self.blocks[self.blknos[i]] + values = block.iget(self.blklocs[i]) + return values + + @property + def column_arrays(self) -> list[np.ndarray]: + """ + Used in the JSON C code to access column arrays. + This optimizes compared to using `iget_values` by converting each + + Warning! This doesn't handle Copy-on-Write, so should be used with + caution (current use case of consuming this in the JSON code is fine). + """ + # This is an optimized equivalent to + # result = [self.iget_values(i) for i in range(len(self.items))] + result: list[np.ndarray | None] = [None] * len(self.items) + + for blk in self.blocks: + mgr_locs = blk._mgr_locs + values = blk.array_values._values_for_json() + if values.ndim == 1: + # TODO(EA2D): special casing not needed with 2D EAs + result[mgr_locs[0]] = values + + else: + for i, loc in enumerate(mgr_locs): + result[loc] = values[i] + + # error: Incompatible return value type (got "List[None]", + # expected "List[ndarray[Any, Any]]") + return result # type: ignore[return-value] + + def iset( + self, + loc: int | slice | np.ndarray, + value: ArrayLike, + inplace: bool = False, + refs: BlockValuesRefs | None = None, + ): + """ + Set new item in-place. Does not consolidate. Adds new Block if not + contained in the current set of items + """ + + # FIXME: refactor, clearly separate broadcasting & zip-like assignment + # can prob also fix the various if tests for sparse/categorical + if self._blklocs is None and self.ndim > 1: + self._rebuild_blknos_and_blklocs() + + # Note: we exclude DTA/TDA here + value_is_extension_type = is_1d_only_ea_dtype(value.dtype) + if not value_is_extension_type: + if value.ndim == 2: + value = value.T + else: + value = ensure_block_shape(value, ndim=2) + + if value.shape[1:] != self.shape[1:]: + raise AssertionError( + "Shape of new values must be compatible with manager shape" + ) + + if lib.is_integer(loc): + # We have 6 tests where loc is _not_ an int. + # In this case, get_blkno_placements will yield only one tuple, + # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) + + # Check if we can use _iset_single fastpath + loc = cast(int, loc) + blkno = self.blknos[loc] + blk = self.blocks[blkno] + if len(blk._mgr_locs) == 1: # TODO: fastest way to check this? + return self._iset_single( + loc, + value, + inplace=inplace, + blkno=blkno, + blk=blk, + refs=refs, + ) + + # error: Incompatible types in assignment (expression has type + # "List[Union[int, slice, ndarray]]", variable has type "Union[int, + # slice, ndarray]") + loc = [loc] # type: ignore[assignment] + + # categorical/sparse/datetimetz + if value_is_extension_type: + + def value_getitem(placement): + return value + + else: + + def value_getitem(placement): + return value[placement.indexer] + + # Accessing public blknos ensures the public versions are initialized + blknos = self.blknos[loc] + blklocs = self.blklocs[loc].copy() + + unfit_mgr_locs = [] + unfit_val_locs = [] + removed_blknos = [] + for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True): + blk = self.blocks[blkno_l] + blk_locs = blklocs[val_locs.indexer] + if inplace and blk.should_store(value): + # Updating inplace -> check if we need to do Copy-on-Write + if using_copy_on_write() and not self._has_no_reference_block(blkno_l): + self._iset_split_block( + blkno_l, blk_locs, value_getitem(val_locs), refs=refs + ) + else: + blk.set_inplace(blk_locs, value_getitem(val_locs)) + continue + else: + unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) + unfit_val_locs.append(val_locs) + + # If all block items are unfit, schedule the block for removal. + if len(val_locs) == len(blk.mgr_locs): + removed_blknos.append(blkno_l) + continue + else: + # Defer setting the new values to enable consolidation + self._iset_split_block(blkno_l, blk_locs, refs=refs) + + if len(removed_blknos): + # Remove blocks & update blknos accordingly + is_deleted = np.zeros(self.nblocks, dtype=np.bool_) + is_deleted[removed_blknos] = True + + new_blknos = np.empty(self.nblocks, dtype=np.intp) + new_blknos.fill(-1) + new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) + self._blknos = new_blknos[self._blknos] + self.blocks = tuple( + blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos) + ) + + if unfit_val_locs: + unfit_idxr = np.concatenate(unfit_mgr_locs) + unfit_count = len(unfit_idxr) + + new_blocks: list[Block] = [] + # TODO(CoW) is this always correct to assume that the new_blocks + # are not referencing anything else? + if value_is_extension_type: + # This code (ab-)uses the fact that EA blocks contain only + # one item. + # TODO(EA2D): special casing unnecessary with 2D EAs + new_blocks.extend( + new_block_2d( + values=value, + placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), + refs=refs, + ) + for mgr_loc in unfit_idxr + ) + + self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) + self._blklocs[unfit_idxr] = 0 + + else: + # unfit_val_locs contains BlockPlacement objects + unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) + + new_blocks.append( + new_block_2d( + values=value_getitem(unfit_val_items), + placement=BlockPlacement(unfit_idxr), + refs=refs, + ) + ) + + self._blknos[unfit_idxr] = len(self.blocks) + self._blklocs[unfit_idxr] = np.arange(unfit_count) + + self.blocks += tuple(new_blocks) + + # Newly created block's dtype may already be present. + self._known_consolidated = False + + def _iset_split_block( + self, + blkno_l: int, + blk_locs: np.ndarray | list[int], + value: ArrayLike | None = None, + refs: BlockValuesRefs | None = None, + ) -> None: + """Removes columns from a block by splitting the block. + + Avoids copying the whole block through slicing and updates the manager + after determinint the new block structure. Optionally adds a new block, + otherwise has to be done by the caller. + + Parameters + ---------- + blkno_l: The block number to operate on, relevant for updating the manager + blk_locs: The locations of our block that should be deleted. + value: The value to set as a replacement. + refs: The reference tracking object of the value to set. + """ + blk = self.blocks[blkno_l] + + if self._blklocs is None: + self._rebuild_blknos_and_blklocs() + + nbs_tup = tuple(blk.delete(blk_locs)) + if value is not None: + locs = blk.mgr_locs.as_array[blk_locs] + first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs) + else: + first_nb = nbs_tup[0] + nbs_tup = tuple(nbs_tup[1:]) + + nr_blocks = len(self.blocks) + blocks_tup = ( + self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup + ) + self.blocks = blocks_tup + + if not nbs_tup and value is not None: + # No need to update anything if split did not happen + return + + self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) + + for i, nb in enumerate(nbs_tup): + self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) + self._blknos[nb.mgr_locs.indexer] = i + nr_blocks + + def _iset_single( + self, + loc: int, + value: ArrayLike, + inplace: bool, + blkno: int, + blk: Block, + refs: BlockValuesRefs | None = None, + ) -> None: + """ + Fastpath for iset when we are only setting a single position and + the Block currently in that position is itself single-column. + + In this case we can swap out the entire Block and blklocs and blknos + are unaffected. + """ + # Caller is responsible for verifying value.shape + + if inplace and blk.should_store(value): + copy = False + if using_copy_on_write() and not self._has_no_reference_block(blkno): + # perform Copy-on-Write and clear the reference + copy = True + iloc = self.blklocs[loc] + blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) + return + + nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs) + old_blocks = self.blocks + new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :] + self.blocks = new_blocks + return + + def column_setitem( + self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False + ) -> None: + """ + Set values ("setitem") into a single column (not setting the full column). + + This is a method on the BlockManager level, to avoid creating an + intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) + """ + if using_copy_on_write() and not self._has_no_reference(loc): + blkno = self.blknos[loc] + # Split blocks to only copy the column we want to modify + blk_loc = self.blklocs[loc] + # Copy our values + values = self.blocks[blkno].values + if values.ndim == 1: + values = values.copy() + else: + # Use [blk_loc] as indexer to keep ndim=2, this already results in a + # copy + values = values[[blk_loc]] + self._iset_split_block(blkno, [blk_loc], values) + + # this manager is only created temporarily to mutate the values in place + # so don't track references, otherwise the `setitem` would perform CoW again + col_mgr = self.iget(loc, track_ref=False) + if inplace_only: + col_mgr.setitem_inplace(idx, value) + else: + new_mgr = col_mgr.setitem((idx,), value) + self.iset(loc, new_mgr._block.values, inplace=True) + + def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : np.ndarray or ExtensionArray + refs : The reference tracking object of the value to set. + """ + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + if value.ndim == 2: + value = value.T + if len(value) > 1: + raise ValueError( + f"Expected a 1D array, got an array with shape {value.T.shape}" + ) + else: + value = ensure_block_shape(value, ndim=self.ndim) + + bp = BlockPlacement(slice(loc, loc + 1)) + # TODO(CoW) do we always "own" the passed `value`? + block = new_block_2d(values=value, placement=bp, refs=refs) + + if not len(self.blocks): + # Fastpath + self._blklocs = np.array([0], dtype=np.intp) + self._blknos = np.array([0], dtype=np.intp) + else: + self._insert_update_mgr_locs(loc) + self._insert_update_blklocs_and_blknos(loc) + + self.axes[0] = new_axis + self.blocks += (block,) + + self._known_consolidated = False + + if sum(not block.is_extension for block in self.blocks) > 100: + warnings.warn( + "DataFrame is highly fragmented. This is usually the result " + "of calling `frame.insert` many times, which has poor performance. " + "Consider joining all columns at once using pd.concat(axis=1) " + "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + def _insert_update_mgr_locs(self, loc) -> None: + """ + When inserting a new Block at location 'loc', we increment + all of the mgr_locs of blocks above that by one. + """ + for blkno, count in _fast_count_smallints(self.blknos[loc:]): + # .620 this way, .326 of which is in increment_above + blk = self.blocks[blkno] + blk._mgr_locs = blk._mgr_locs.increment_above(loc) + + def _insert_update_blklocs_and_blknos(self, loc) -> None: + """ + When inserting a new Block at location 'loc', we update our + _blklocs and _blknos. + """ + + # Accessing public blklocs ensures the public versions are initialized + if loc == self.blklocs.shape[0]: + # np.append is a lot faster, let's use it if we can. + self._blklocs = np.append(self._blklocs, 0) + self._blknos = np.append(self._blknos, len(self.blocks)) + elif loc == 0: + # np.append is a lot faster, let's use it if we can. + self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] + self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] + else: + new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( + self.blklocs, self.blknos, loc, len(self.blocks) + ) + self._blklocs = new_blklocs + self._blknos = new_blknos + + def idelete(self, indexer) -> BlockManager: + """ + Delete selected locations, returning a new BlockManager. + """ + is_deleted = np.zeros(self.shape[0], dtype=np.bool_) + is_deleted[indexer] = True + taker = (~is_deleted).nonzero()[0] + + nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True) + new_columns = self.items[~is_deleted] + axes = [new_columns, self.axes[1]] + return type(self)(tuple(nbs), axes, verify_integrity=False) + + # ---------------------------------------------------------------- + # Block-wise Operation + + def grouped_reduce(self, func: Callable) -> Self: + """ + Apply grouped reduction function blockwise, returning a new BlockManager. + + Parameters + ---------- + func : grouped reduction function + + Returns + ------- + BlockManager + """ + result_blocks: list[Block] = [] + + for blk in self.blocks: + if blk.is_object: + # split on object-dtype blocks bc some columns may raise + # while others do not. + for sb in blk._split(): + applied = sb.apply(func) + result_blocks = extend_blocks(applied, result_blocks) + else: + applied = blk.apply(func) + result_blocks = extend_blocks(applied, result_blocks) + + if len(result_blocks) == 0: + nrows = 0 + else: + nrows = result_blocks[0].values.shape[-1] + index = Index(range(nrows)) + + return type(self).from_blocks(result_blocks, [self.axes[0], index]) + + def reduce(self, func: Callable) -> Self: + """ + Apply reduction function blockwise, returning a single-row BlockManager. + + Parameters + ---------- + func : reduction function + + Returns + ------- + BlockManager + """ + # If 2D, we assume that we're operating column-wise + assert self.ndim == 2 + + res_blocks: list[Block] = [] + for blk in self.blocks: + nbs = blk.reduce(func) + res_blocks.extend(nbs) + + index = Index([None]) # placeholder + new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) + return new_mgr + + def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: + """ + Apply array_op blockwise with another (aligned) BlockManager. + """ + return operate_blockwise(self, other, array_op) + + def _equal_values(self: BlockManager, other: BlockManager) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + return blockwise_all(self, other, array_equals) + + def quantile( + self, + *, + qs: Index, # with dtype float 64 + interpolation: QuantileInterpolation = "linear", + ) -> Self: + """ + Iterate over blocks applying quantile reduction. + This routine is intended for reduction type operations and + will do inference on the generated blocks. + + Parameters + ---------- + interpolation : type of interpolation, default 'linear' + qs : list of the quantiles to be computed + + Returns + ------- + BlockManager + """ + # Series dispatches to DataFrame for quantile, which allows us to + # simplify some of the code here and in the blocks + assert self.ndim >= 2 + assert is_list_like(qs) # caller is responsible for this + + new_axes = list(self.axes) + new_axes[1] = Index(qs, dtype=np.float64) + + blocks = [ + blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks + ] + + return type(self)(blocks, new_axes) + + # ---------------------------------------------------------------- + + def unstack(self, unstacker, fill_value) -> BlockManager: + """ + Return a BlockManager with all blocks unstacked. + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : Any + fill_value for newly introduced missing values. + + Returns + ------- + unstacked : BlockManager + """ + new_columns = unstacker.get_new_columns(self.items) + new_index = unstacker.new_index + + allow_fill = not unstacker.mask_all + if allow_fill: + # calculating the full mask once and passing it to Block._unstack is + # faster than letting calculating it in each repeated call + new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) + needs_masking = new_mask2D.any(axis=0) + else: + needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) + + new_blocks: list[Block] = [] + columns_mask: list[np.ndarray] = [] + + if len(self.items) == 0: + factor = 1 + else: + fac = len(new_columns) / len(self.items) + assert fac == int(fac) + factor = int(fac) + + for blk in self.blocks: + mgr_locs = blk.mgr_locs + new_placement = mgr_locs.tile_for_unstack(factor) + + blocks, mask = blk._unstack( + unstacker, + fill_value, + new_placement=new_placement, + needs_masking=needs_masking, + ) + + new_blocks.extend(blocks) + columns_mask.extend(mask) + + # Block._unstack should ensure this holds, + assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) + # In turn this ensures that in the BlockManager call below + # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) + # which suffices to allow us to pass verify_inegrity=False + + new_columns = new_columns[columns_mask] + + bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) + return bm + + def to_dict(self, copy: bool = True) -> dict[str, Self]: + """ + Return a dict of str(dtype) -> BlockManager + + Parameters + ---------- + copy : bool, default True + + Returns + ------- + values : a dict of dtype -> BlockManager + """ + + bd: dict[str, list[Block]] = {} + for b in self.blocks: + bd.setdefault(str(b.dtype), []).append(b) + + # TODO(EA2D): the combine will be unnecessary with 2D EAs + return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()} + + def as_array( + self, + dtype: np.dtype | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert the blockmanager data into an numpy array. + + Parameters + ---------- + dtype : np.dtype or None, default None + Data type of the return array. + copy : bool, default False + If True then guarantee that a copy is returned. A value of + False does not guarantee that the underlying data is not + copied. + na_value : object, default lib.no_default + Value to be used as the missing value sentinel. + + Returns + ------- + arr : ndarray + """ + passed_nan = lib.is_float(na_value) and isna(na_value) + + # TODO(CoW) handle case where resulting array is a view + if len(self.blocks) == 0: + arr = np.empty(self.shape, dtype=float) + return arr.transpose() + + if self.is_single_block: + blk = self.blocks[0] + + if na_value is not lib.no_default: + # We want to copy when na_value is provided to avoid + # mutating the original object + if lib.is_np_dtype(blk.dtype, "f") and passed_nan: + # We are already numpy-float and na_value=np.nan + pass + else: + copy = True + + if blk.is_extension: + # Avoid implicit conversion of extension blocks to object + + # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no + # attribute "to_numpy" + arr = blk.values.to_numpy( # type: ignore[union-attr] + dtype=dtype, + na_value=na_value, + copy=copy, + ).reshape(blk.shape) + else: + arr = np.array(blk.values, dtype=dtype, copy=copy) + + if using_copy_on_write() and not copy: + arr = arr.view() + arr.flags.writeable = False + else: + arr = self._interleave(dtype=dtype, na_value=na_value) + # The underlying data was copied within _interleave, so no need + # to further copy if copy=True or setting na_value + + if na_value is lib.no_default: + pass + elif arr.dtype.kind == "f" and passed_nan: + pass + else: + arr[isna(arr)] = na_value + + return arr.transpose() + + def _interleave( + self, + dtype: np.dtype | None = None, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Return ndarray from blocks with specified item order + Items must be contained in the blocks + """ + if not dtype: + # Incompatible types in assignment (expression has type + # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has + # type "Optional[dtype[Any]]") + dtype = interleaved_dtype( # type: ignore[assignment] + [blk.dtype for blk in self.blocks] + ) + + # error: Argument 1 to "ensure_np_dtype" has incompatible type + # "Optional[dtype[Any]]"; expected "Union[dtype[Any], ExtensionDtype]" + dtype = ensure_np_dtype(dtype) # type: ignore[arg-type] + result = np.empty(self.shape, dtype=dtype) + + itemmask = np.zeros(self.shape[0]) + + if dtype == np.dtype("object") and na_value is lib.no_default: + # much more performant than using to_numpy below + for blk in self.blocks: + rl = blk.mgr_locs + arr = blk.get_values(dtype) + result[rl.indexer] = arr + itemmask[rl.indexer] = 1 + return result + + for blk in self.blocks: + rl = blk.mgr_locs + if blk.is_extension: + # Avoid implicit conversion of extension blocks to object + + # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no + # attribute "to_numpy" + arr = blk.values.to_numpy( # type: ignore[union-attr] + dtype=dtype, + na_value=na_value, + ) + else: + arr = blk.get_values(dtype) + result[rl.indexer] = arr + itemmask[rl.indexer] = 1 + + if not itemmask.all(): + raise AssertionError("Some items were not contained in blocks") + + return result + + # ---------------------------------------------------------------- + # Consolidation + + def is_consolidated(self) -> bool: + """ + Return True if more than one block with the same dtype + """ + if not self._known_consolidated: + self._consolidate_check() + return self._is_consolidated + + def _consolidate_check(self) -> None: + if len(self.blocks) == 1: + # fastpath + self._is_consolidated = True + self._known_consolidated = True + return + dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] + self._is_consolidated = len(dtypes) == len(set(dtypes)) + self._known_consolidated = True + + def _consolidate_inplace(self) -> None: + # In general, _consolidate_inplace should only be called via + # DataFrame._consolidate_inplace, otherwise we will fail to invalidate + # the DataFrame's _item_cache. The exception is for newly-created + # BlockManager objects not yet attached to a DataFrame. + if not self.is_consolidated(): + self.blocks = _consolidate(self.blocks) + self._is_consolidated = True + self._known_consolidated = True + self._rebuild_blknos_and_blklocs() + + # ---------------------------------------------------------------- + # Concatenation + + @classmethod + def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed BlockManagers horizontally. + """ + offset = 0 + blocks: list[Block] = [] + for mgr in mgrs: + for blk in mgr.blocks: + # We need to do getitem_block here otherwise we would be altering + # blk.mgr_locs in place, which would render it invalid. This is only + # relevant in the copy=False case. + nb = blk.slice_block_columns(slice(None)) + nb._mgr_locs = nb._mgr_locs.add(offset) + blocks.append(nb) + + offset += len(mgr.items) + + new_mgr = cls(tuple(blocks), axes) + return new_mgr + + @classmethod + def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed BlockManagers vertically. + """ + raise NotImplementedError("This logic lives (for now) in internals.concat") + + +class SingleBlockManager(BaseBlockManager, SingleDataManager): + """manage a single block with""" + + @property + def ndim(self) -> Literal[1]: + return 1 + + _is_consolidated = True + _known_consolidated = True + __slots__ = () + is_single_block = True + + def __init__( + self, + block: Block, + axis: Index, + verify_integrity: bool = False, + ) -> None: + # Assertions disabled for performance + # assert isinstance(block, Block), type(block) + # assert isinstance(axis, Index), type(axis) + + self.axes = [axis] + self.blocks = (block,) + + @classmethod + def from_blocks( + cls, + blocks: list[Block], + axes: list[Index], + ) -> Self: + """ + Constructor for BlockManager and SingleBlockManager with same signature. + """ + assert len(blocks) == 1 + assert len(axes) == 1 + return cls(blocks[0], axes[0], verify_integrity=False) + + @classmethod + def from_array( + cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None = None + ) -> SingleBlockManager: + """ + Constructor for if we have an array that is not yet a Block. + """ + array = maybe_coerce_values(array) + bp = BlockPlacement(slice(0, len(index))) + block = new_block(array, placement=bp, ndim=1, refs=refs) + return cls(block, index) + + def to_2d_mgr(self, columns: Index) -> BlockManager: + """ + Manager analogue of Series.to_frame + """ + blk = self.blocks[0] + arr = ensure_block_shape(blk.values, ndim=2) + bp = BlockPlacement(0) + new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs) + axes = [columns, self.axes[0]] + return BlockManager([new_blk], axes=axes, verify_integrity=False) + + def _has_no_reference(self, i: int = 0) -> bool: + """ + Check for column `i` if it has references. + (whether it references another array or is itself being referenced) + Returns True if the column has no references. + """ + return not self.blocks[0].refs.has_reference() + + def __getstate__(self): + block_values = [b.values for b in self.blocks] + block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] + axes_array = list(self.axes) + + extra_state = { + "0.14.1": { + "axes": axes_array, + "blocks": [ + {"values": b.values, "mgr_locs": b.mgr_locs.indexer} + for b in self.blocks + ], + } + } + + # First three elements of the state are to maintain forward + # compatibility with 0.13.1. + return axes_array, block_values, block_items, extra_state + + def __setstate__(self, state): + def unpickle_block(values, mgr_locs, ndim: int) -> Block: + # TODO(EA2D): ndim would be unnecessary with 2D EAs + # older pickles may store e.g. DatetimeIndex instead of DatetimeArray + values = extract_array(values, extract_numpy=True) + if not isinstance(mgr_locs, BlockPlacement): + mgr_locs = BlockPlacement(mgr_locs) + + values = maybe_coerce_values(values) + return new_block(values, placement=mgr_locs, ndim=ndim) + + if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]: + state = state[3]["0.14.1"] + self.axes = [ensure_index(ax) for ax in state["axes"]] + ndim = len(self.axes) + self.blocks = tuple( + unpickle_block(b["values"], b["mgr_locs"], ndim=ndim) + for b in state["blocks"] + ) + else: + raise NotImplementedError("pre-0.14.1 pickles are no longer supported") + + self._post_setstate() + + def _post_setstate(self) -> None: + pass + + @cache_readonly + def _block(self) -> Block: + return self.blocks[0] + + @property + def _blknos(self): + """compat with BlockManager""" + return None + + @property + def _blklocs(self): + """compat with BlockManager""" + return None + + def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Self: + # similar to get_slice, but not restricted to slice indexer + blk = self._block + if using_copy_on_write() and len(indexer) > 0 and indexer.all(): + return type(self)(blk.copy(deep=False), self.index) + array = blk.values[indexer] + + bp = BlockPlacement(slice(0, len(array))) + # TODO(CoW) in theory only need to track reference if new_array is a view + block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs) + + new_idx = self.index[indexer] + return type(self)(block, new_idx) + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager: + # Assertion disabled for performance + # assert isinstance(slobj, slice), type(slobj) + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + blk = self._block + array = blk.values[slobj] + bp = BlockPlacement(slice(0, len(array))) + # TODO this method is only used in groupby SeriesSplitter at the moment, + # so passing refs is not yet covered by the tests + block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs) + new_index = self.index._getitem_slice(slobj) + return type(self)(block, new_index) + + @property + def index(self) -> Index: + return self.axes[0] + + @property + def dtype(self) -> DtypeObj: + return self._block.dtype + + def get_dtypes(self) -> npt.NDArray[np.object_]: + return np.array([self._block.dtype], dtype=object) + + def external_values(self): + """The array that Series.values returns""" + return self._block.external_values() + + def internal_values(self): + """The array that Series._values returns""" + return self._block.values + + def array_values(self): + """The array that Series.array returns""" + return self._block.array_values + + def get_numeric_data(self, copy: bool = False) -> Self: + if self._block.is_numeric: + return self.copy(deep=copy) + return self.make_empty() + + @property + def _can_hold_na(self) -> bool: + return self._block._can_hold_na + + def setitem_inplace(self, indexer, value) -> None: + """ + Set values with indexer. + + For Single[Block/Array]Manager, this backs s[indexer] = value + + This is an inplace version of `setitem()`, mutating the manager/values + in place, not returning a new Manager (and Block), and thus never changing + the dtype. + """ + if using_copy_on_write() and not self._has_no_reference(0): + self.blocks = (self._block.copy(),) + self._cache.clear() + + super().setitem_inplace(indexer, value) + + def idelete(self, indexer) -> SingleBlockManager: + """ + Delete single location from SingleBlockManager. + + Ensures that self.blocks doesn't become empty. + """ + nb = self._block.delete(indexer)[0] + self.blocks = (nb,) + self.axes[0] = self.axes[0].delete(indexer) + self._cache.clear() + return self + + def fast_xs(self, loc): + """ + fast path for getting a cross-section + return a view of the data + """ + raise NotImplementedError("Use series._values[loc] instead") + + def set_values(self, values: ArrayLike) -> None: + """ + Set the values of the single block in place. + + Use at your own risk! This does not check if the passed values are + valid for the current Block/SingleBlockManager (length, dtype, etc). + """ + # TODO(CoW) do we need to handle copy on write here? Currently this is + # only used for FrameColumnApply.series_generator (what if apply is + # mutating inplace?) + self.blocks[0].values = values + self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values))) + + def _equal_values(self, other: Self) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + # For SingleBlockManager (i.e.Series) + if other.ndim != 1: + return False + left = self.blocks[0].values + right = other.blocks[0].values + return array_equals(left, right) + + +# -------------------------------------------------------------------- +# Constructor Helpers + + +def create_block_manager_from_blocks( + blocks: list[Block], + axes: list[Index], + consolidate: bool = True, + verify_integrity: bool = True, +) -> BlockManager: + # If verify_integrity=False, then caller is responsible for checking + # all(x.shape[-1] == len(axes[1]) for x in blocks) + # sum(x.shape[0] for x in blocks) == len(axes[0]) + # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0]))) + # all(blk.ndim == 2 for blk in blocks) + # This allows us to safely pass verify_integrity=False + + try: + mgr = BlockManager(blocks, axes, verify_integrity=verify_integrity) + + except ValueError as err: + arrays = [blk.values for blk in blocks] + tot_items = sum(arr.shape[0] for arr in arrays) + raise_construction_error(tot_items, arrays[0].shape[1:], axes, err) + + if consolidate: + mgr._consolidate_inplace() + return mgr + + +def create_block_manager_from_column_arrays( + arrays: list[ArrayLike], + axes: list[Index], + consolidate: bool, + refs: list, +) -> BlockManager: + # Assertions disabled for performance (caller is responsible for verifying) + # assert isinstance(axes, list) + # assert all(isinstance(x, Index) for x in axes) + # assert all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) + # assert all(type(x) is not NumpyExtensionArray for x in arrays) + # assert all(x.ndim == 1 for x in arrays) + # assert all(len(x) == len(axes[1]) for x in arrays) + # assert len(arrays) == len(axes[0]) + # These last three are sufficient to allow us to safely pass + # verify_integrity=False below. + + try: + blocks = _form_blocks(arrays, consolidate, refs) + mgr = BlockManager(blocks, axes, verify_integrity=False) + except ValueError as e: + raise_construction_error(len(arrays), arrays[0].shape, axes, e) + if consolidate: + mgr._consolidate_inplace() + return mgr + + +def raise_construction_error( + tot_items: int, + block_shape: Shape, + axes: list[Index], + e: ValueError | None = None, +): + """raise a helpful message about our construction""" + passed = tuple(map(int, [tot_items] + list(block_shape))) + # Correcting the user facing error message during dataframe construction + if len(passed) <= 2: + passed = passed[::-1] + + implied = tuple(len(ax) for ax in axes) + # Correcting the user facing error message during dataframe construction + if len(implied) <= 2: + implied = implied[::-1] + + # We return the exception object instead of raising it so that we + # can raise it in the caller; mypy plays better with that + if passed == implied and e is not None: + raise e + if block_shape[0] == 0: + raise ValueError("Empty data passed with indices specified.") + raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") + + +# ----------------------------------------------------------------------- + + +def _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, DtypeObj]: + dtype = tup[1].dtype + + if is_1d_only_ea_dtype(dtype): + # We know these won't be consolidated, so don't need to group these. + # This avoids expensive comparisons of CategoricalDtype objects + sep = id(dtype) + else: + sep = 0 + + return sep, dtype + + +def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]: + tuples = list(enumerate(arrays)) + + if not consolidate: + return _tuples_to_blocks_no_consolidate(tuples, refs) + + # when consolidating, we can ignore refs (either stacking always copies, + # or the EA is already copied in the calling dict_to_mgr) + # TODO(CoW) check if this is also valid for rec_array_to_mgr + + # group by dtype + grouper = itertools.groupby(tuples, _grouping_func) + + nbs: list[Block] = [] + for (_, dtype), tup_block in grouper: + block_type = get_block_type(dtype) + + if isinstance(dtype, np.dtype): + is_dtlike = dtype.kind in "mM" + + if issubclass(dtype.type, (str, bytes)): + dtype = np.dtype(object) + + values, placement = _stack_arrays(list(tup_block), dtype) + if is_dtlike: + values = ensure_wrapped_if_datetimelike(values) + blk = block_type(values, placement=BlockPlacement(placement), ndim=2) + nbs.append(blk) + + elif is_1d_only_ea_dtype(dtype): + dtype_blocks = [ + block_type(x[1], placement=BlockPlacement(x[0]), ndim=2) + for x in tup_block + ] + nbs.extend(dtype_blocks) + + else: + dtype_blocks = [ + block_type( + ensure_block_shape(x[1], 2), placement=BlockPlacement(x[0]), ndim=2 + ) + for x in tup_block + ] + nbs.extend(dtype_blocks) + return nbs + + +def _tuples_to_blocks_no_consolidate(tuples, refs) -> list[Block]: + # tuples produced within _form_blocks are of the form (placement, array) + return [ + new_block_2d( + ensure_block_shape(arr, ndim=2), placement=BlockPlacement(i), refs=ref + ) + for ((i, arr), ref) in zip(tuples, refs) + ] + + +def _stack_arrays(tuples, dtype: np.dtype): + placement, arrays = zip(*tuples) + + first = arrays[0] + shape = (len(arrays),) + first.shape + + stacked = np.empty(shape, dtype=dtype) + for i, arr in enumerate(arrays): + stacked[i] = arr + + return stacked, placement + + +def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]: + """ + Merge blocks having same dtype, exclude non-consolidating blocks + """ + # sort by _can_consolidate, dtype + gkey = lambda x: x._consolidate_key + grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) + + new_blocks: list[Block] = [] + for (_can_consolidate, dtype), group_blocks in grouper: + merged_blocks, _ = _merge_blocks( + list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate + ) + new_blocks = extend_blocks(merged_blocks, new_blocks) + return tuple(new_blocks) + + +def _merge_blocks( + blocks: list[Block], dtype: DtypeObj, can_consolidate: bool +) -> tuple[list[Block], bool]: + if len(blocks) == 1: + return blocks, False + + if can_consolidate: + # TODO: optimization potential in case all mgrs contain slices and + # combination of those slices is a slice, too. + new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) + + new_values: ArrayLike + + if isinstance(blocks[0].dtype, np.dtype): + # error: List comprehension has incompatible type List[Union[ndarray, + # ExtensionArray]]; expected List[Union[complex, generic, + # Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], SupportsArray]] + new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc] + else: + bvals = [blk.values for blk in blocks] + bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals) + new_values = bvals2[0]._concat_same_type(bvals2, axis=0) + + argsort = np.argsort(new_mgr_locs) + new_values = new_values[argsort] + new_mgr_locs = new_mgr_locs[argsort] + + bp = BlockPlacement(new_mgr_locs) + return [new_block_2d(new_values, placement=bp)], True + + # can't consolidate --> no merge + return blocks, False + + +def _fast_count_smallints(arr: npt.NDArray[np.intp]): + """Faster version of set(arr) for sequences of small numbers.""" + counts = np.bincount(arr) + nz = counts.nonzero()[0] + # Note: list(zip(...) outperforms list(np.c_[nz, counts[nz]]) here, + # in one benchmark by a factor of 11 + return zip(nz, counts[nz]) + + +def _preprocess_slice_or_indexer( + slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool +): + if isinstance(slice_or_indexer, slice): + return ( + "slice", + slice_or_indexer, + libinternals.slice_len(slice_or_indexer, length), + ) + else: + if ( + not isinstance(slice_or_indexer, np.ndarray) + or slice_or_indexer.dtype.kind != "i" + ): + dtype = getattr(slice_or_indexer, "dtype", None) + raise TypeError(type(slice_or_indexer), dtype) + + indexer = ensure_platform_int(slice_or_indexer) + if not allow_fill: + indexer = maybe_convert_indices(indexer, length) + return "fancy", indexer, len(indexer) + + +def make_na_array(dtype: DtypeObj, shape: Shape, fill_value) -> ArrayLike: + if isinstance(dtype, DatetimeTZDtype): + # NB: exclude e.g. pyarrow[dt64tz] dtypes + i8values = np.full(shape, fill_value._value) + return DatetimeArray(i8values, dtype=dtype) + + elif is_1d_only_ea_dtype(dtype): + dtype = cast(ExtensionDtype, dtype) + cls = dtype.construct_array_type() + + missing_arr = cls._from_sequence([], dtype=dtype) + ncols, nrows = shape + assert ncols == 1, ncols + empty_arr = -1 * np.ones((nrows,), dtype=np.intp) + return missing_arr.take(empty_arr, allow_fill=True, fill_value=fill_value) + elif isinstance(dtype, ExtensionDtype): + # TODO: no tests get here, a handful would if we disabled + # the dt64tz special-case above (which is faster) + cls = dtype.construct_array_type() + missing_arr = cls._empty(shape=shape, dtype=dtype) + missing_arr[:] = fill_value + return missing_arr + else: + # NB: we should never get here with dtype integer or bool; + # if we did, the missing_arr.fill would cast to gibberish + missing_arr = np.empty(shape, dtype=dtype) + missing_arr.fill(fill_value) + + if dtype.kind in "mM": + missing_arr = ensure_wrapped_if_datetimelike(missing_arr) + return missing_arr diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/ops.py new file mode 100644 index 00000000..cf9466c0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/internals/ops.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + NamedTuple, +) + +from pandas.core.dtypes.common import is_1d_only_ea_dtype + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas._libs.internals import BlockPlacement + from pandas._typing import ArrayLike + + from pandas.core.internals.blocks import Block + from pandas.core.internals.managers import BlockManager + + +class BlockPairInfo(NamedTuple): + lvals: ArrayLike + rvals: ArrayLike + locs: BlockPlacement + left_ea: bool + right_ea: bool + rblk: Block + + +def _iter_block_pairs( + left: BlockManager, right: BlockManager +) -> Iterator[BlockPairInfo]: + # At this point we have already checked the parent DataFrames for + # assert rframe._indexed_same(lframe) + + for blk in left.blocks: + locs = blk.mgr_locs + blk_vals = blk.values + + left_ea = blk_vals.ndim == 1 + + rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True) + + # Assertions are disabled for performance, but should hold: + # if left_ea: + # assert len(locs) == 1, locs + # assert len(rblks) == 1, rblks + # assert rblks[0].shape[0] == 1, rblks[0].shape + + for rblk in rblks: + right_ea = rblk.values.ndim == 1 + + lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea) + info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk) + yield info + + +def operate_blockwise( + left: BlockManager, right: BlockManager, array_op +) -> BlockManager: + # At this point we have already checked the parent DataFrames for + # assert rframe._indexed_same(lframe) + + res_blks: list[Block] = [] + for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right): + res_values = array_op(lvals, rvals) + if ( + left_ea + and not right_ea + and hasattr(res_values, "reshape") + and not is_1d_only_ea_dtype(res_values.dtype) + ): + res_values = res_values.reshape(1, -1) + nbs = rblk._split_op_result(res_values) + + # Assertions are disabled for performance, but should hold: + # if right_ea or left_ea: + # assert len(nbs) == 1 + # else: + # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape) + + _reset_block_mgr_locs(nbs, locs) + + res_blks.extend(nbs) + + # Assertions are disabled for performance, but should hold: + # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array} + # nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks) + # assert nlocs == len(left.items), (nlocs, len(left.items)) + # assert len(slocs) == nlocs, (len(slocs), nlocs) + # assert slocs == set(range(nlocs)), slocs + + new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False) + return new_mgr + + +def _reset_block_mgr_locs(nbs: list[Block], locs) -> None: + """ + Reset mgr_locs to correspond to our original DataFrame. + """ + for nb in nbs: + nblocs = locs[nb.mgr_locs.indexer] + nb.mgr_locs = nblocs + # Assertions are disabled for performance, but should hold: + # assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape) + # assert all(x in locs.as_array for x in nb.mgr_locs.as_array) + + +def _get_same_shape_values( + lblk: Block, rblk: Block, left_ea: bool, right_ea: bool +) -> tuple[ArrayLike, ArrayLike]: + """ + Slice lblk.values to align with rblk. Squeeze if we have EAs. + """ + lvals = lblk.values + rvals = rblk.values + + # Require that the indexing into lvals be slice-like + assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs + + # TODO(EA2D): with 2D EAs only this first clause would be needed + if not (left_ea or right_ea): + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[Union[ndarray, slice], slice]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload] + assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) + elif left_ea and right_ea: + assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) + elif right_ea: + # lvals are 2D, rvals are 1D + + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[Union[ndarray, slice], slice]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload] + assert lvals.shape[0] == 1, lvals.shape + lvals = lvals[0, :] + else: + # lvals are 1D, rvals are 2D + assert rvals.shape[0] == 1, rvals.shape + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[int, slice]" + rvals = rvals[0, :] # type: ignore[call-overload] + + return lvals, rvals + + +def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool: + """ + Blockwise `all` reduction. + """ + for info in _iter_block_pairs(left, right): + res = op(info.lvals, info.rvals) + if not res: + return False + return True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/describe.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/describe.py new file mode 100644 index 00000000..5bb6bebd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/describe.py @@ -0,0 +1,414 @@ +""" +Module responsible for execution of NDFrame.describe() method. + +Method NDFrame.describe() delegates actual execution to function describe_ndframe(). +""" +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + TYPE_CHECKING, + Callable, + cast, +) + +import numpy as np + +from pandas._libs.tslibs import Timestamp +from pandas._typing import ( + DtypeObj, + NDFrameT, + npt, +) +from pandas.util._validators import validate_percentile + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + DatetimeTZDtype, + ExtensionDtype, +) + +from pandas.core.arrays.floating import Float64Dtype +from pandas.core.reshape.concat import concat + +from pandas.io.formats.format import format_percentiles + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +def describe_ndframe( + *, + obj: NDFrameT, + include: str | Sequence[str] | None, + exclude: str | Sequence[str] | None, + percentiles: Sequence[float] | np.ndarray | None, +) -> NDFrameT: + """Describe series or dataframe. + + Called from pandas.core.generic.NDFrame.describe() + + Parameters + ---------- + obj: DataFrame or Series + Either dataframe or series to be described. + include : 'all', list-like of dtypes or None (default), optional + A white list of data types to include in the result. Ignored for ``Series``. + exclude : list-like of dtypes or None (default), optional, + A black list of data types to omit from the result. Ignored for ``Series``. + percentiles : list-like of numbers, optional + The percentiles to include in the output. All should fall between 0 and 1. + The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and + 75th percentiles. + + Returns + ------- + Dataframe or series description. + """ + percentiles = _refine_percentiles(percentiles) + + describer: NDFrameDescriberAbstract + if obj.ndim == 1: + describer = SeriesDescriber( + obj=cast("Series", obj), + ) + else: + describer = DataFrameDescriber( + obj=cast("DataFrame", obj), + include=include, + exclude=exclude, + ) + + result = describer.describe(percentiles=percentiles) + return cast(NDFrameT, result) + + +class NDFrameDescriberAbstract(ABC): + """Abstract class for describing dataframe or series. + + Parameters + ---------- + obj : Series or DataFrame + Object to be described. + """ + + def __init__(self, obj: DataFrame | Series) -> None: + self.obj = obj + + @abstractmethod + def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series: + """Do describe either series or dataframe. + + Parameters + ---------- + percentiles : list-like of numbers + The percentiles to include in the output. + """ + + +class SeriesDescriber(NDFrameDescriberAbstract): + """Class responsible for creating series description.""" + + obj: Series + + def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series: + describe_func = select_describe_func( + self.obj, + ) + return describe_func(self.obj, percentiles) + + +class DataFrameDescriber(NDFrameDescriberAbstract): + """Class responsible for creating dataobj description. + + Parameters + ---------- + obj : DataFrame + DataFrame to be described. + include : 'all', list-like of dtypes or None + A white list of data types to include in the result. + exclude : list-like of dtypes or None + A black list of data types to omit from the result. + """ + + def __init__( + self, + obj: DataFrame, + *, + include: str | Sequence[str] | None, + exclude: str | Sequence[str] | None, + ) -> None: + self.include = include + self.exclude = exclude + + if obj.ndim == 2 and obj.columns.size == 0: + raise ValueError("Cannot describe a DataFrame without columns") + + super().__init__(obj) + + def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame: + data = self._select_data() + + ldesc: list[Series] = [] + for _, series in data.items(): + describe_func = select_describe_func(series) + ldesc.append(describe_func(series, percentiles)) + + col_names = reorder_columns(ldesc) + d = concat( + [x.reindex(col_names, copy=False) for x in ldesc], + axis=1, + sort=False, + ) + d.columns = data.columns.copy() + return d + + def _select_data(self) -> DataFrame: + """Select columns to be described.""" + if (self.include is None) and (self.exclude is None): + # when some numerics are found, keep only numerics + default_include: list[npt.DTypeLike] = [np.number, "datetime"] + data = self.obj.select_dtypes(include=default_include) + if len(data.columns) == 0: + data = self.obj + elif self.include == "all": + if self.exclude is not None: + msg = "exclude must be None when include is 'all'" + raise ValueError(msg) + data = self.obj + else: + data = self.obj.select_dtypes( + include=self.include, + exclude=self.exclude, + ) + return data # pyright: ignore[reportGeneralTypeIssues] + + +def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]: + """Set a convenient order for rows for display.""" + names: list[Hashable] = [] + seen_names: set[Hashable] = set() + ldesc_indexes = sorted((x.index for x in ldesc), key=len) + for idxnames in ldesc_indexes: + for name in idxnames: + if name not in seen_names: + seen_names.add(name) + names.append(name) + return names + + +def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series: + """Describe series containing numerical data. + + Parameters + ---------- + series : Series + Series to be described. + percentiles : list-like of numbers + The percentiles to include in the output. + """ + from pandas import Series + + formatted_percentiles = format_percentiles(percentiles) + + stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"] + d = ( + [series.count(), series.mean(), series.std(), series.min()] + + series.quantile(percentiles).tolist() + + [series.max()] + ) + # GH#48340 - always return float on non-complex numeric data + dtype: DtypeObj | None + if isinstance(series.dtype, ExtensionDtype): + if isinstance(series.dtype, ArrowDtype): + if series.dtype.kind == "m": + # GH53001: describe timedeltas with object dtype + dtype = None + else: + import pyarrow as pa + + dtype = ArrowDtype(pa.float64()) + else: + dtype = Float64Dtype() + elif series.dtype.kind in "iufb": + # i.e. numeric but exclude complex dtype + dtype = np.dtype("float") + else: + dtype = None + return Series(d, index=stat_index, name=series.name, dtype=dtype) + + +def describe_categorical_1d( + data: Series, + percentiles_ignored: Sequence[float], +) -> Series: + """Describe series containing categorical data. + + Parameters + ---------- + data : Series + Series to be described. + percentiles_ignored : list-like of numbers + Ignored, but in place to unify interface. + """ + names = ["count", "unique", "top", "freq"] + objcounts = data.value_counts() + count_unique = len(objcounts[objcounts != 0]) + if count_unique > 0: + top, freq = objcounts.index[0], objcounts.iloc[0] + dtype = None + else: + # If the DataFrame is empty, set 'top' and 'freq' to None + # to maintain output shape consistency + top, freq = np.nan, np.nan + dtype = "object" + + result = [data.count(), count_unique, top, freq] + + from pandas import Series + + return Series(result, index=names, name=data.name, dtype=dtype) + + +def describe_timestamp_as_categorical_1d( + data: Series, + percentiles_ignored: Sequence[float], +) -> Series: + """Describe series containing timestamp data treated as categorical. + + Parameters + ---------- + data : Series + Series to be described. + percentiles_ignored : list-like of numbers + Ignored, but in place to unify interface. + """ + names = ["count", "unique"] + objcounts = data.value_counts() + count_unique = len(objcounts[objcounts != 0]) + result = [data.count(), count_unique] + dtype = None + if count_unique > 0: + top, freq = objcounts.index[0], objcounts.iloc[0] + tz = data.dt.tz + asint = data.dropna().values.view("i8") + top = Timestamp(top) + if top.tzinfo is not None and tz is not None: + # Don't tz_localize(None) if key is already tz-aware + top = top.tz_convert(tz) + else: + top = top.tz_localize(tz) + names += ["top", "freq", "first", "last"] + result += [ + top, + freq, + Timestamp(asint.min(), tz=tz), + Timestamp(asint.max(), tz=tz), + ] + + # If the DataFrame is empty, set 'top' and 'freq' to None + # to maintain output shape consistency + else: + names += ["top", "freq"] + result += [np.nan, np.nan] + dtype = "object" + + from pandas import Series + + return Series(result, index=names, name=data.name, dtype=dtype) + + +def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series: + """Describe series containing datetime64 dtype. + + Parameters + ---------- + data : Series + Series to be described. + percentiles : list-like of numbers + The percentiles to include in the output. + """ + # GH-30164 + from pandas import Series + + formatted_percentiles = format_percentiles(percentiles) + + stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"] + d = ( + [data.count(), data.mean(), data.min()] + + data.quantile(percentiles).tolist() + + [data.max()] + ) + return Series(d, index=stat_index, name=data.name) + + +def select_describe_func( + data: Series, +) -> Callable: + """Select proper function for describing series based on data type. + + Parameters + ---------- + data : Series + Series to be described. + """ + if is_bool_dtype(data.dtype): + return describe_categorical_1d + elif is_numeric_dtype(data): + return describe_numeric_1d + elif data.dtype.kind == "M" or isinstance(data.dtype, DatetimeTZDtype): + return describe_timestamp_1d + elif data.dtype.kind == "m": + return describe_numeric_1d + else: + return describe_categorical_1d + + +def _refine_percentiles( + percentiles: Sequence[float] | np.ndarray | None, +) -> npt.NDArray[np.float64]: + """ + Ensure that percentiles are unique and sorted. + + Parameters + ---------- + percentiles : list-like of numbers, optional + The percentiles to include in the output. + """ + if percentiles is None: + return np.array([0.25, 0.5, 0.75]) + + # explicit conversion of `percentiles` to list + percentiles = list(percentiles) + + # get them all to be in [0, 1] + validate_percentile(percentiles) + + # median should always be included + if 0.5 not in percentiles: + percentiles.append(0.5) + + percentiles = np.asarray(percentiles) + + # sort and check for duplicates + unique_pcts = np.unique(percentiles) + assert percentiles is not None + if len(unique_pcts) < len(percentiles): + raise ValueError("percentiles cannot contain duplicates") + + return unique_pcts diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/selectn.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/selectn.py new file mode 100644 index 00000000..894791cb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/selectn.py @@ -0,0 +1,265 @@ +""" +Implementation of nlargest and nsmallest. +""" + +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +from typing import ( + TYPE_CHECKING, + cast, + final, +) + +import numpy as np + +from pandas._libs import algos as libalgos + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_complex_dtype, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import BaseMaskedDtype + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + IndexLabel, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +class SelectN: + def __init__(self, obj, n: int, keep: str) -> None: + self.obj = obj + self.n = n + self.keep = keep + + if self.keep not in ("first", "last", "all"): + raise ValueError('keep must be either "first", "last" or "all"') + + def compute(self, method: str) -> DataFrame | Series: + raise NotImplementedError + + @final + def nlargest(self): + return self.compute("nlargest") + + @final + def nsmallest(self): + return self.compute("nsmallest") + + @final + @staticmethod + def is_valid_dtype_n_method(dtype: DtypeObj) -> bool: + """ + Helper function to determine if dtype is valid for + nsmallest/nlargest methods + """ + if is_numeric_dtype(dtype): + return not is_complex_dtype(dtype) + return needs_i8_conversion(dtype) + + +class SelectNSeries(SelectN): + """ + Implement n largest/smallest for Series + + Parameters + ---------- + obj : Series + n : int + keep : {'first', 'last'}, default 'first' + + Returns + ------- + nordered : Series + """ + + def compute(self, method: str) -> Series: + from pandas.core.reshape.concat import concat + + n = self.n + dtype = self.obj.dtype + if not self.is_valid_dtype_n_method(dtype): + raise TypeError(f"Cannot use method '{method}' with dtype {dtype}") + + if n <= 0: + return self.obj[[]] + + dropped = self.obj.dropna() + nan_index = self.obj.drop(dropped.index) + + # slow method + if n >= len(self.obj): + ascending = method == "nsmallest" + return self.obj.sort_values(ascending=ascending).head(n) + + # fast method + new_dtype = dropped.dtype + + # Similar to algorithms._ensure_data + arr = dropped._values + if needs_i8_conversion(arr.dtype): + arr = arr.view("i8") + elif isinstance(arr.dtype, BaseMaskedDtype): + arr = arr._data + else: + arr = np.asarray(arr) + if arr.dtype.kind == "b": + arr = arr.view(np.uint8) + + if method == "nlargest": + arr = -arr + if is_integer_dtype(new_dtype): + # GH 21426: ensure reverse ordering at boundaries + arr -= 1 + + elif is_bool_dtype(new_dtype): + # GH 26154: ensure False is smaller than True + arr = 1 - (-arr) + + if self.keep == "last": + arr = arr[::-1] + + nbase = n + narr = len(arr) + n = min(n, narr) + + # arr passed into kth_smallest must be contiguous. We copy + # here because kth_smallest will modify its input + kth_val = libalgos.kth_smallest(arr.copy(order="C"), n - 1) + (ns,) = np.nonzero(arr <= kth_val) + inds = ns[arr[ns].argsort(kind="mergesort")] + + if self.keep != "all": + inds = inds[:n] + findex = nbase + else: + if len(inds) < nbase <= len(nan_index) + len(inds): + findex = len(nan_index) + len(inds) + else: + findex = len(inds) + + if self.keep == "last": + # reverse indices + inds = narr - 1 - inds + + return concat([dropped.iloc[inds], nan_index]).iloc[:findex] + + +class SelectNFrame(SelectN): + """ + Implement n largest/smallest for DataFrame + + Parameters + ---------- + obj : DataFrame + n : int + keep : {'first', 'last'}, default 'first' + columns : list or str + + Returns + ------- + nordered : DataFrame + """ + + def __init__(self, obj: DataFrame, n: int, keep: str, columns: IndexLabel) -> None: + super().__init__(obj, n, keep) + if not is_list_like(columns) or isinstance(columns, tuple): + columns = [columns] + + columns = cast(Sequence[Hashable], columns) + columns = list(columns) + self.columns = columns + + def compute(self, method: str) -> DataFrame: + from pandas.core.api import Index + + n = self.n + frame = self.obj + columns = self.columns + + for column in columns: + dtype = frame[column].dtype + if not self.is_valid_dtype_n_method(dtype): + raise TypeError( + f"Column {repr(column)} has dtype {dtype}, " + f"cannot use method {repr(method)} with this dtype" + ) + + def get_indexer(current_indexer, other_indexer): + """ + Helper function to concat `current_indexer` and `other_indexer` + depending on `method` + """ + if method == "nsmallest": + return current_indexer.append(other_indexer) + else: + return other_indexer.append(current_indexer) + + # Below we save and reset the index in case index contains duplicates + original_index = frame.index + cur_frame = frame = frame.reset_index(drop=True) + cur_n = n + indexer = Index([], dtype=np.int64) + + for i, column in enumerate(columns): + # For each column we apply method to cur_frame[column]. + # If it's the last column or if we have the number of + # results desired we are done. + # Otherwise there are duplicates of the largest/smallest + # value and we need to look at the rest of the columns + # to determine which of the rows with the largest/smallest + # value in the column to keep. + series = cur_frame[column] + is_last_column = len(columns) - 1 == i + values = getattr(series, method)( + cur_n, keep=self.keep if is_last_column else "all" + ) + + if is_last_column or len(values) <= cur_n: + indexer = get_indexer(indexer, values.index) + break + + # Now find all values which are equal to + # the (nsmallest: largest)/(nlargest: smallest) + # from our series. + border_value = values == values[values.index[-1]] + + # Some of these values are among the top-n + # some aren't. + unsafe_values = values[border_value] + + # These values are definitely among the top-n + safe_values = values[~border_value] + indexer = get_indexer(indexer, safe_values.index) + + # Go on and separate the unsafe_values on the remaining + # columns. + cur_frame = cur_frame.loc[unsafe_values.index] + cur_n = n - len(indexer) + + frame = frame.take(indexer) + + # Restore the index on frame + frame.index = original_index.take(indexer) + + # If there is only one column, the frame is already sorted. + if len(columns) == 1: + return frame + + ascending = method == "nsmallest" + + return frame.sort_values(columns, ascending=ascending, kind="mergesort") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/to_dict.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/to_dict.py new file mode 100644 index 00000000..e89f641e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/methods/to_dict.py @@ -0,0 +1,211 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, +) +import warnings + +import numpy as np + +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import maybe_box_native +from pandas.core.dtypes.dtypes import ExtensionDtype + +from pandas.core import common as com + +if TYPE_CHECKING: + from pandas import DataFrame + + +def to_dict( + df: DataFrame, + orient: Literal[ + "dict", "list", "series", "split", "tight", "records", "index" + ] = "dict", + into: type[dict] = dict, + index: bool = True, +) -> dict | list[dict]: + """ + Convert the DataFrame to a dictionary. + + The type of the key-value pairs can be customized with the parameters + (see below). + + Parameters + ---------- + orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} + Determines the type of the values of the dictionary. + + - 'dict' (default) : dict like {column -> {index -> value}} + - 'list' : dict like {column -> [values]} + - 'series' : dict like {column -> Series(values)} + - 'split' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} + - 'tight' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values], + 'index_names' -> [index.names], 'column_names' -> [column.names]} + - 'records' : list like + [{column -> value}, ... , {column -> value}] + - 'index' : dict like {index -> {column -> value}} + + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + + into : class, default dict + The collections.abc.Mapping subclass used for all Mappings + in the return value. Can be the actual class or an empty + instance of the mapping type you want. If you want a + collections.defaultdict, you must pass it initialized. + + index : bool, default True + Whether to include the index item (and index_names item if `orient` + is 'tight') in the returned dictionary. Can only be ``False`` + when `orient` is 'split' or 'tight'. + + .. versionadded:: 2.0.0 + + Returns + ------- + dict, list or collections.abc.Mapping + Return a collections.abc.Mapping object representing the DataFrame. + The resulting transformation depends on the `orient` parameter. + """ + if not df.columns.is_unique: + warnings.warn( + "DataFrame columns are not unique, some columns will be omitted.", + UserWarning, + stacklevel=find_stack_level(), + ) + # GH16122 + into_c = com.standardize_mapping(into) + + # error: Incompatible types in assignment (expression has type "str", + # variable has type "Literal['dict', 'list', 'series', 'split', 'tight', + # 'records', 'index']") + orient = orient.lower() # type: ignore[assignment] + + if not index and orient not in ["split", "tight"]: + raise ValueError( + "'index=False' is only valid when 'orient' is 'split' or 'tight'" + ) + + if orient == "series": + # GH46470 Return quickly if orient series to avoid creating dtype objects + return into_c((k, v) for k, v in df.items()) + + box_native_indices = [ + i + for i, col_dtype in enumerate(df.dtypes.values) + if col_dtype == np.dtype(object) or isinstance(col_dtype, ExtensionDtype) + ] + are_all_object_dtype_cols = len(box_native_indices) == len(df.dtypes) + + if orient == "dict": + return into_c((k, v.to_dict(into)) for k, v in df.items()) + + elif orient == "list": + object_dtype_indices_as_set = set(box_native_indices) + return into_c( + ( + k, + list(map(maybe_box_native, v.tolist())) + if i in object_dtype_indices_as_set + else v.tolist(), + ) + for i, (k, v) in enumerate(df.items()) + ) + + elif orient == "split": + data = df._create_data_for_split_and_tight_to_dict( + are_all_object_dtype_cols, box_native_indices + ) + + return into_c( + ((("index", df.index.tolist()),) if index else ()) + + ( + ("columns", df.columns.tolist()), + ("data", data), + ) + ) + + elif orient == "tight": + data = df._create_data_for_split_and_tight_to_dict( + are_all_object_dtype_cols, box_native_indices + ) + + return into_c( + ((("index", df.index.tolist()),) if index else ()) + + ( + ("columns", df.columns.tolist()), + ( + "data", + [ + list(map(maybe_box_native, t)) + for t in df.itertuples(index=False, name=None) + ], + ), + ) + + ((("index_names", list(df.index.names)),) if index else ()) + + (("column_names", list(df.columns.names)),) + ) + + elif orient == "records": + columns = df.columns.tolist() + if are_all_object_dtype_cols: + rows = ( + dict(zip(columns, row)) for row in df.itertuples(index=False, name=None) + ) + return [ + into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows + ] + else: + data = [ + into_c(zip(columns, t)) for t in df.itertuples(index=False, name=None) + ] + if box_native_indices: + object_dtype_indices_as_set = set(box_native_indices) + object_dtype_cols = { + col + for i, col in enumerate(df.columns) + if i in object_dtype_indices_as_set + } + for row in data: + for col in object_dtype_cols: + row[col] = maybe_box_native(row[col]) + return data + + elif orient == "index": + if not df.index.is_unique: + raise ValueError("DataFrame index must be unique for orient='index'.") + columns = df.columns.tolist() + if are_all_object_dtype_cols: + return into_c( + (t[0], dict(zip(df.columns, map(maybe_box_native, t[1:])))) + for t in df.itertuples(name=None) + ) + elif box_native_indices: + object_dtype_indices_as_set = set(box_native_indices) + is_object_dtype_by_index = [ + i in object_dtype_indices_as_set for i in range(len(df.columns)) + ] + return into_c( + ( + t[0], + { + columns[i]: maybe_box_native(v) + if is_object_dtype_by_index[i] + else v + for i, v in enumerate(t[1:]) + }, + ) + for t in df.itertuples(name=None) + ) + else: + return into_c( + (t[0], dict(zip(df.columns, t[1:]))) for t in df.itertuples(name=None) + ) + + else: + raise ValueError(f"orient '{orient}' not understood") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/missing.py new file mode 100644 index 00000000..58b0e290 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/missing.py @@ -0,0 +1,1056 @@ +""" +Routines for filling missing data. +""" +from __future__ import annotations + +from functools import ( + partial, + wraps, +) +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, +) + +import numpy as np + +from pandas._libs import ( + NaT, + algos, + lib, +) +from pandas._typing import ( + ArrayLike, + AxisInt, + F, + ReindexMethod, + npt, +) +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.cast import infer_dtype_from +from pandas.core.dtypes.common import ( + is_array_like, + is_numeric_dtype, + is_numeric_v_string_like, + is_object_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, +) + +if TYPE_CHECKING: + from pandas import Index + + +def check_value_size(value, mask: npt.NDArray[np.bool_], length: int): + """ + Validate the size of the values passed to ExtensionArray.fillna. + """ + if is_array_like(value): + if len(value) != length: + raise ValueError( + f"Length of 'value' does not match. Got ({len(value)}) " + f" expected {length}" + ) + value = value[mask] + + return value + + +def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]: + """ + Return a masking array of same size/shape as arr + with entries equaling any member of values_to_mask set to True + + Parameters + ---------- + arr : ArrayLike + values_to_mask: list, tuple, or scalar + + Returns + ------- + np.ndarray[bool] + """ + # When called from Block.replace/replace_list, values_to_mask is a scalar + # known to be holdable by arr. + # When called from Series._single_replace, values_to_mask is tuple or list + dtype, values_to_mask = infer_dtype_from(values_to_mask) + + if isinstance(dtype, np.dtype): + values_to_mask = np.array(values_to_mask, dtype=dtype) + else: + cls = dtype.construct_array_type() + if not lib.is_list_like(values_to_mask): + values_to_mask = [values_to_mask] + values_to_mask = cls._from_sequence(values_to_mask, dtype=dtype, copy=False) + + potential_na = False + if is_object_dtype(arr.dtype): + # pre-compute mask to avoid comparison to NA + potential_na = True + arr_mask = ~isna(arr) + + na_mask = isna(values_to_mask) + nonna = values_to_mask[~na_mask] + + # GH 21977 + mask = np.zeros(arr.shape, dtype=bool) + for x in nonna: + if is_numeric_v_string_like(arr, x): + # GH#29553 prevent numpy deprecation warnings + pass + else: + if potential_na: + new_mask = np.zeros(arr.shape, dtype=np.bool_) + new_mask[arr_mask] = arr[arr_mask] == x + else: + new_mask = arr == x + + if not isinstance(new_mask, np.ndarray): + # usually BooleanArray + new_mask = new_mask.to_numpy(dtype=bool, na_value=False) + mask |= new_mask + + if na_mask.any(): + mask |= isna(arr) + + return mask + + +def clean_fill_method(method: str, allow_nearest: bool = False): + if isinstance(method, str): + method = method.lower() + if method == "ffill": + method = "pad" + elif method == "bfill": + method = "backfill" + + valid_methods = ["pad", "backfill"] + expecting = "pad (ffill) or backfill (bfill)" + if allow_nearest: + valid_methods.append("nearest") + expecting = "pad (ffill), backfill (bfill) or nearest" + if method not in valid_methods: + raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}") + return method + + +# interpolation methods that dispatch to np.interp + +NP_METHODS = ["linear", "time", "index", "values"] + +# interpolation methods that dispatch to _interpolate_scipy_wrapper + +SP_METHODS = [ + "nearest", + "zero", + "slinear", + "quadratic", + "cubic", + "barycentric", + "krogh", + "spline", + "polynomial", + "from_derivatives", + "piecewise_polynomial", + "pchip", + "akima", + "cubicspline", +] + + +def clean_interp_method(method: str, index: Index, **kwargs) -> str: + order = kwargs.get("order") + + if method in ("spline", "polynomial") and order is None: + raise ValueError("You must specify the order of the spline or polynomial.") + + valid = NP_METHODS + SP_METHODS + if method not in valid: + raise ValueError(f"method must be one of {valid}. Got '{method}' instead.") + + if method in ("krogh", "piecewise_polynomial", "pchip"): + if not index.is_monotonic_increasing: + raise ValueError( + f"{method} interpolation requires that the index be monotonic." + ) + + return method + + +def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None: + """ + Retrieves the positional index of the first valid value. + + Parameters + ---------- + how : {'first', 'last'} + Use this parameter to change between the first or last valid index. + is_valid: np.ndarray + Mask to find na_values. + + Returns + ------- + int or None + """ + assert how in ["first", "last"] + + if len(is_valid) == 0: # early stop + return None + + if is_valid.ndim == 2: + is_valid = is_valid.any(axis=1) # reduce axis 1 + + if how == "first": + idxpos = is_valid[::].argmax() + + elif how == "last": + idxpos = len(is_valid) - 1 - is_valid[::-1].argmax() + + chk_notna = is_valid[idxpos] + + if not chk_notna: + return None + # Incompatible return value type (got "signedinteger[Any]", + # expected "Optional[int]") + return idxpos # type: ignore[return-value] + + +def validate_limit_direction( + limit_direction: str, +) -> Literal["forward", "backward", "both"]: + valid_limit_directions = ["forward", "backward", "both"] + limit_direction = limit_direction.lower() + if limit_direction not in valid_limit_directions: + raise ValueError( + "Invalid limit_direction: expecting one of " + f"{valid_limit_directions}, got '{limit_direction}'." + ) + # error: Incompatible return value type (got "str", expected + # "Literal['forward', 'backward', 'both']") + return limit_direction # type: ignore[return-value] + + +def validate_limit_area(limit_area: str | None) -> Literal["inside", "outside"] | None: + if limit_area is not None: + valid_limit_areas = ["inside", "outside"] + limit_area = limit_area.lower() + if limit_area not in valid_limit_areas: + raise ValueError( + f"Invalid limit_area: expecting one of {valid_limit_areas}, got " + f"{limit_area}." + ) + # error: Incompatible return value type (got "Optional[str]", expected + # "Optional[Literal['inside', 'outside']]") + return limit_area # type: ignore[return-value] + + +def infer_limit_direction(limit_direction, method): + # Set `limit_direction` depending on `method` + if limit_direction is None: + if method in ("backfill", "bfill"): + limit_direction = "backward" + else: + limit_direction = "forward" + else: + if method in ("pad", "ffill") and limit_direction != "forward": + raise ValueError( + f"`limit_direction` must be 'forward' for method `{method}`" + ) + if method in ("backfill", "bfill") and limit_direction != "backward": + raise ValueError( + f"`limit_direction` must be 'backward' for method `{method}`" + ) + return limit_direction + + +def get_interp_index(method, index: Index) -> Index: + # create/use the index + if method == "linear": + # prior default + from pandas import Index + + index = Index(np.arange(len(index))) + else: + methods = {"index", "values", "nearest", "time"} + is_numeric_or_datetime = ( + is_numeric_dtype(index.dtype) + or isinstance(index.dtype, DatetimeTZDtype) + or lib.is_np_dtype(index.dtype, "mM") + ) + if method not in methods and not is_numeric_or_datetime: + raise ValueError( + "Index column must be numeric or datetime type when " + f"using {method} method other than linear. " + "Try setting a numeric or datetime index column before " + "interpolating." + ) + + if isna(index).any(): + raise NotImplementedError( + "Interpolation with NaNs in the index " + "has not been implemented. Try filling " + "those NaNs before interpolating." + ) + return index + + +def interpolate_2d_inplace( + data: np.ndarray, # floating dtype + index: Index, + axis: AxisInt, + method: str = "linear", + limit: int | None = None, + limit_direction: str = "forward", + limit_area: str | None = None, + fill_value: Any | None = None, + **kwargs, +) -> None: + """ + Column-wise application of _interpolate_1d. + + Notes + ----- + Alters 'data' in-place. + + The signature does differ from _interpolate_1d because it only + includes what is needed for Block.interpolate. + """ + # validate the interp method + clean_interp_method(method, index, **kwargs) + + if is_valid_na_for_dtype(fill_value, data.dtype): + fill_value = na_value_for_dtype(data.dtype, compat=False) + + if method == "time": + if not needs_i8_conversion(index.dtype): + raise ValueError( + "time-weighted interpolation only works " + "on Series or DataFrames with a " + "DatetimeIndex" + ) + method = "values" + + limit_direction = validate_limit_direction(limit_direction) + limit_area_validated = validate_limit_area(limit_area) + + # default limit is unlimited GH #16282 + limit = algos.validate_limit(nobs=None, limit=limit) + + indices = _index_to_interp_indices(index, method) + + def func(yvalues: np.ndarray) -> None: + # process 1-d slices in the axis direction + + _interpolate_1d( + indices=indices, + yvalues=yvalues, + method=method, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area_validated, + fill_value=fill_value, + bounds_error=False, + **kwargs, + ) + + # error: Argument 1 to "apply_along_axis" has incompatible type + # "Callable[[ndarray[Any, Any]], None]"; expected "Callable[..., + # Union[_SupportsArray[dtype[]], Sequence[_SupportsArray + # [dtype[]]], Sequence[Sequence[_SupportsArray[dtype[]]]], + # Sequence[Sequence[Sequence[_SupportsArray[dtype[]]]]], + # Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[]]]]]]]]" + np.apply_along_axis(func, axis, data) # type: ignore[arg-type] + + +def _index_to_interp_indices(index: Index, method: str) -> np.ndarray: + """ + Convert Index to ndarray of indices to pass to NumPy/SciPy. + """ + xarr = index._values + if needs_i8_conversion(xarr.dtype): + # GH#1646 for dt64tz + xarr = xarr.view("i8") + + if method == "linear": + inds = xarr + inds = cast(np.ndarray, inds) + else: + inds = np.asarray(xarr) + + if method in ("values", "index"): + if inds.dtype == np.object_: + inds = lib.maybe_convert_objects(inds) + + return inds + + +def _interpolate_1d( + indices: np.ndarray, + yvalues: np.ndarray, + method: str = "linear", + limit: int | None = None, + limit_direction: str = "forward", + limit_area: Literal["inside", "outside"] | None = None, + fill_value: Any | None = None, + bounds_error: bool = False, + order: int | None = None, + **kwargs, +) -> None: + """ + Logic for the 1-d interpolation. The input + indices and yvalues will each be 1-d arrays of the same length. + + Bounds_error is currently hardcoded to False since non-scipy ones don't + take it as an argument. + + Notes + ----- + Fills 'yvalues' in-place. + """ + + invalid = isna(yvalues) + valid = ~invalid + + if not valid.any(): + return + + if valid.all(): + return + + # These are sets of index pointers to invalid values... i.e. {0, 1, etc... + all_nans = set(np.flatnonzero(invalid)) + + first_valid_index = find_valid_index(how="first", is_valid=valid) + if first_valid_index is None: # no nan found in start + first_valid_index = 0 + start_nans = set(range(first_valid_index)) + + last_valid_index = find_valid_index(how="last", is_valid=valid) + if last_valid_index is None: # no nan found in end + last_valid_index = len(yvalues) + end_nans = set(range(1 + last_valid_index, len(valid))) + + # Like the sets above, preserve_nans contains indices of invalid values, + # but in this case, it is the final set of indices that need to be + # preserved as NaN after the interpolation. + + # For example if limit_direction='forward' then preserve_nans will + # contain indices of NaNs at the beginning of the series, and NaNs that + # are more than 'limit' away from the prior non-NaN. + + # set preserve_nans based on direction using _interp_limit + preserve_nans: list | set + if limit_direction == "forward": + preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0)) + elif limit_direction == "backward": + preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit)) + else: + # both directions... just use _interp_limit + preserve_nans = set(_interp_limit(invalid, limit, limit)) + + # if limit_area is set, add either mid or outside indices + # to preserve_nans GH #16284 + if limit_area == "inside": + # preserve NaNs on the outside + preserve_nans |= start_nans | end_nans + elif limit_area == "outside": + # preserve NaNs on the inside + mid_nans = all_nans - start_nans - end_nans + preserve_nans |= mid_nans + + # sort preserve_nans and convert to list + preserve_nans = sorted(preserve_nans) + + is_datetimelike = yvalues.dtype.kind in "mM" + + if is_datetimelike: + yvalues = yvalues.view("i8") + + if method in NP_METHODS: + # np.interp requires sorted X values, #21037 + + indexer = np.argsort(indices[valid]) + yvalues[invalid] = np.interp( + indices[invalid], indices[valid][indexer], yvalues[valid][indexer] + ) + else: + yvalues[invalid] = _interpolate_scipy_wrapper( + indices[valid], + yvalues[valid], + indices[invalid], + method=method, + fill_value=fill_value, + bounds_error=bounds_error, + order=order, + **kwargs, + ) + + if is_datetimelike: + yvalues[preserve_nans] = NaT.value + else: + yvalues[preserve_nans] = np.nan + return + + +def _interpolate_scipy_wrapper( + x: np.ndarray, + y: np.ndarray, + new_x: np.ndarray, + method: str, + fill_value=None, + bounds_error: bool = False, + order=None, + **kwargs, +): + """ + Passed off to scipy.interpolate.interp1d. method is scipy's kind. + Returns an array interpolated at new_x. Add any new methods to + the list in _clean_interp_method. + """ + extra = f"{method} interpolation requires SciPy." + import_optional_dependency("scipy", extra=extra) + from scipy import interpolate + + new_x = np.asarray(new_x) + + # ignores some kwargs that could be passed along. + alt_methods = { + "barycentric": interpolate.barycentric_interpolate, + "krogh": interpolate.krogh_interpolate, + "from_derivatives": _from_derivatives, + "piecewise_polynomial": _from_derivatives, + "cubicspline": _cubicspline_interpolate, + "akima": _akima_interpolate, + "pchip": interpolate.pchip_interpolate, + } + + interp1d_methods = [ + "nearest", + "zero", + "slinear", + "quadratic", + "cubic", + "polynomial", + ] + if method in interp1d_methods: + if method == "polynomial": + kind = order + else: + kind = method + terp = interpolate.interp1d( + x, y, kind=kind, fill_value=fill_value, bounds_error=bounds_error + ) + new_y = terp(new_x) + elif method == "spline": + # GH #10633, #24014 + if isna(order) or (order <= 0): + raise ValueError( + f"order needs to be specified and greater than 0; got order: {order}" + ) + terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) + new_y = terp(new_x) + else: + # GH 7295: need to be able to write for some reason + # in some circumstances: check all three + if not x.flags.writeable: + x = x.copy() + if not y.flags.writeable: + y = y.copy() + if not new_x.flags.writeable: + new_x = new_x.copy() + terp = alt_methods[method] + new_y = terp(x, y, new_x, **kwargs) + return new_y + + +def _from_derivatives( + xi: np.ndarray, + yi: np.ndarray, + x: np.ndarray, + order=None, + der: int | list[int] | None = 0, + extrapolate: bool = False, +): + """ + Convenience function for interpolate.BPoly.from_derivatives. + + Construct a piecewise polynomial in the Bernstein basis, compatible + with the specified values and derivatives at breakpoints. + + Parameters + ---------- + xi : array-like + sorted 1D array of x-coordinates + yi : array-like or list of array-likes + yi[i][j] is the j-th derivative known at xi[i] + order: None or int or array-like of ints. Default: None. + Specifies the degree of local polynomials. If not None, some + derivatives are ignored. + der : int or list + How many derivatives to extract; None for all potentially nonzero + derivatives (that is a number equal to the number of points), or a + list of derivatives to extract. This number includes the function + value as 0th derivative. + extrapolate : bool, optional + Whether to extrapolate to ouf-of-bounds points based on first and last + intervals, or to return NaNs. Default: True. + + See Also + -------- + scipy.interpolate.BPoly.from_derivatives + + Returns + ------- + y : scalar or array-like + The result, of length R or length M or M by R. + """ + from scipy import interpolate + + # return the method for compat with scipy version & backwards compat + method = interpolate.BPoly.from_derivatives + m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate) + + return m(x) + + +def _akima_interpolate( + xi: np.ndarray, + yi: np.ndarray, + x: np.ndarray, + der: int | list[int] | None = 0, + axis: AxisInt = 0, +): + """ + Convenience function for akima interpolation. + xi and yi are arrays of values used to approximate some function f, + with ``yi = f(xi)``. + + See `Akima1DInterpolator` for details. + + Parameters + ---------- + xi : np.ndarray + A sorted list of x-coordinates, of length N. + yi : np.ndarray + A 1-D array of real values. `yi`'s length along the interpolation + axis must be equal to the length of `xi`. If N-D array, use axis + parameter to select correct axis. + x : np.ndarray + Of length M. + der : int, optional + How many derivatives to extract; None for all potentially + nonzero derivatives (that is a number equal to the number + of points), or a list of derivatives to extract. This number + includes the function value as 0th derivative. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + See Also + -------- + scipy.interpolate.Akima1DInterpolator + + Returns + ------- + y : scalar or array-like + The result, of length R or length M or M by R, + + """ + from scipy import interpolate + + P = interpolate.Akima1DInterpolator(xi, yi, axis=axis) + + return P(x, nu=der) + + +def _cubicspline_interpolate( + xi: np.ndarray, + yi: np.ndarray, + x: np.ndarray, + axis: AxisInt = 0, + bc_type: str | tuple[Any, Any] = "not-a-knot", + extrapolate=None, +): + """ + Convenience function for cubic spline data interpolator. + + See `scipy.interpolate.CubicSpline` for details. + + Parameters + ---------- + xi : np.ndarray, shape (n,) + 1-d array containing values of the independent variable. + Values must be real, finite and in strictly increasing order. + yi : np.ndarray + Array containing values of the dependent variable. It can have + arbitrary number of dimensions, but the length along ``axis`` + (see below) must match the length of ``x``. Values must be finite. + x : np.ndarray, shape (m,) + axis : int, optional + Axis along which `y` is assumed to be varying. Meaning that for + ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``. + Default is 0. + bc_type : string or 2-tuple, optional + Boundary condition type. Two additional equations, given by the + boundary conditions, are required to determine all coefficients of + polynomials on each segment [2]_. + If `bc_type` is a string, then the specified condition will be applied + at both ends of a spline. Available conditions are: + * 'not-a-knot' (default): The first and second segment at a curve end + are the same polynomial. It is a good default when there is no + information on boundary conditions. + * 'periodic': The interpolated functions is assumed to be periodic + of period ``x[-1] - x[0]``. The first and last value of `y` must be + identical: ``y[0] == y[-1]``. This boundary condition will result in + ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``. + * 'clamped': The first derivative at curves ends are zero. Assuming + a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition. + * 'natural': The second derivative at curve ends are zero. Assuming + a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition. + If `bc_type` is a 2-tuple, the first and the second value will be + applied at the curve start and end respectively. The tuple values can + be one of the previously mentioned strings (except 'periodic') or a + tuple `(order, deriv_values)` allowing to specify arbitrary + derivatives at curve ends: + * `order`: the derivative order, 1 or 2. + * `deriv_value`: array-like containing derivative values, shape must + be the same as `y`, excluding ``axis`` dimension. For example, if + `y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with + the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D + and have the shape (n0, n1). + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. If 'periodic', + periodic extrapolation is used. If None (default), ``extrapolate`` is + set to 'periodic' for ``bc_type='periodic'`` and to True otherwise. + + See Also + -------- + scipy.interpolate.CubicHermiteSpline + + Returns + ------- + y : scalar or array-like + The result, of shape (m,) + + References + ---------- + .. [1] `Cubic Spline Interpolation + `_ + on Wikiversity. + .. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978. + """ + from scipy import interpolate + + P = interpolate.CubicSpline( + xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate + ) + + return P(x) + + +def _interpolate_with_limit_area( + values: np.ndarray, + method: Literal["pad", "backfill"], + limit: int | None, + limit_area: Literal["inside", "outside"], +) -> None: + """ + Apply interpolation and limit_area logic to values along a to-be-specified axis. + + Parameters + ---------- + values: np.ndarray + Input array. + method: str + Interpolation method. Could be "bfill" or "pad" + limit: int, optional + Index limit on interpolation. + limit_area: {'inside', 'outside'} + Limit area for interpolation. + + Notes + ----- + Modifies values in-place. + """ + + invalid = isna(values) + is_valid = ~invalid + + if not invalid.all(): + first = find_valid_index(how="first", is_valid=is_valid) + if first is None: + first = 0 + last = find_valid_index(how="last", is_valid=is_valid) + if last is None: + last = len(values) + + pad_or_backfill_inplace( + values, + method=method, + limit=limit, + ) + + if limit_area == "inside": + invalid[first : last + 1] = False + elif limit_area == "outside": + invalid[:first] = invalid[last + 1 :] = False + else: + raise ValueError("limit_area should be 'inside' or 'outside'") + + values[invalid] = np.nan + + +def pad_or_backfill_inplace( + values: np.ndarray, + method: Literal["pad", "backfill"] = "pad", + axis: AxisInt = 0, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, +) -> None: + """ + Perform an actual interpolation of values, values will be make 2-d if + needed fills inplace, returns the result. + + Parameters + ---------- + values: np.ndarray + Input array. + method: str, default "pad" + Interpolation method. Could be "bfill" or "pad" + axis: 0 or 1 + Interpolation axis + limit: int, optional + Index limit on interpolation. + limit_area: str, optional + Limit area for interpolation. Can be "inside" or "outside" + + Notes + ----- + Modifies values in-place. + """ + if limit_area is not None: + np.apply_along_axis( + # error: Argument 1 to "apply_along_axis" has incompatible type + # "partial[None]"; expected + # "Callable[..., Union[_SupportsArray[dtype[]], + # Sequence[_SupportsArray[dtype[]]], + # Sequence[Sequence[_SupportsArray[dtype[]]]], + # Sequence[Sequence[Sequence[_SupportsArray[dtype[]]]]], + # Sequence[Sequence[Sequence[Sequence[_ + # SupportsArray[dtype[]]]]]]]]" + partial( # type: ignore[arg-type] + _interpolate_with_limit_area, + method=method, + limit=limit, + limit_area=limit_area, + ), + axis, + values, + ) + return + + transf = (lambda x: x) if axis == 0 else (lambda x: x.T) + + # reshape a 1 dim if needed + if values.ndim == 1: + if axis != 0: # pragma: no cover + raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0") + values = values.reshape(tuple((1,) + values.shape)) + + method = clean_fill_method(method) + tvalues = transf(values) + + func = get_fill_func(method, ndim=2) + # _pad_2d and _backfill_2d both modify tvalues inplace + func(tvalues, limit=limit) + return + + +def _fillna_prep( + values, mask: npt.NDArray[np.bool_] | None = None +) -> npt.NDArray[np.bool_]: + # boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d + + if mask is None: + mask = isna(values) + + mask = mask.view(np.uint8) + return mask + + +def _datetimelike_compat(func: F) -> F: + """ + Wrapper to handle datetime64 and timedelta64 dtypes. + """ + + @wraps(func) + def new_func(values, limit: int | None = None, mask=None): + if needs_i8_conversion(values.dtype): + if mask is None: + # This needs to occur before casting to int64 + mask = isna(values) + + result, mask = func(values.view("i8"), limit=limit, mask=mask) + return result.view(values.dtype), mask + + return func(values, limit=limit, mask=mask) + + return cast(F, new_func) + + +@_datetimelike_compat +def _pad_1d( + values: np.ndarray, + limit: int | None = None, + mask: npt.NDArray[np.bool_] | None = None, +) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: + mask = _fillna_prep(values, mask) + algos.pad_inplace(values, mask, limit=limit) + return values, mask + + +@_datetimelike_compat +def _backfill_1d( + values: np.ndarray, + limit: int | None = None, + mask: npt.NDArray[np.bool_] | None = None, +) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: + mask = _fillna_prep(values, mask) + algos.backfill_inplace(values, mask, limit=limit) + return values, mask + + +@_datetimelike_compat +def _pad_2d( + values: np.ndarray, + limit: int | None = None, + mask: npt.NDArray[np.bool_] | None = None, +): + mask = _fillna_prep(values, mask) + + if values.size: + algos.pad_2d_inplace(values, mask, limit=limit) + else: + # for test coverage + pass + return values, mask + + +@_datetimelike_compat +def _backfill_2d( + values, limit: int | None = None, mask: npt.NDArray[np.bool_] | None = None +): + mask = _fillna_prep(values, mask) + + if values.size: + algos.backfill_2d_inplace(values, mask, limit=limit) + else: + # for test coverage + pass + return values, mask + + +_fill_methods = {"pad": _pad_1d, "backfill": _backfill_1d} + + +def get_fill_func(method, ndim: int = 1): + method = clean_fill_method(method) + if ndim == 1: + return _fill_methods[method] + return {"pad": _pad_2d, "backfill": _backfill_2d}[method] + + +def clean_reindex_fill_method(method) -> ReindexMethod | None: + if method is None: + return None + return clean_fill_method(method, allow_nearest=True) + + +def _interp_limit( + invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None +): + """ + Get indexers of values that won't be filled + because they exceed the limits. + + Parameters + ---------- + invalid : np.ndarray[bool] + fw_limit : int or None + forward limit to index + bw_limit : int or None + backward limit to index + + Returns + ------- + set of indexers + + Notes + ----- + This is equivalent to the more readable, but slower + + .. code-block:: python + + def _interp_limit(invalid, fw_limit, bw_limit): + for x in np.where(invalid)[0]: + if invalid[max(0, x - fw_limit):x + bw_limit + 1].all(): + yield x + """ + # handle forward first; the backward direction is the same except + # 1. operate on the reversed array + # 2. subtract the returned indices from N - 1 + N = len(invalid) + f_idx = set() + b_idx = set() + + def inner(invalid, limit: int): + limit = min(limit, N) + windowed = _rolling_window(invalid, limit + 1).all(1) + idx = set(np.where(windowed)[0] + limit) | set( + np.where((~invalid[: limit + 1]).cumsum() == 0)[0] + ) + return idx + + if fw_limit is not None: + if fw_limit == 0: + f_idx = set(np.where(invalid)[0]) + else: + f_idx = inner(invalid, fw_limit) + + if bw_limit is not None: + if bw_limit == 0: + # then we don't even need to care about backwards + # just use forwards + return f_idx + else: + b_idx_inv = list(inner(invalid[::-1], bw_limit)) + b_idx = set(N - 1 - np.asarray(b_idx_inv)) + if fw_limit == 0: + return b_idx + + return f_idx & b_idx + + +def _rolling_window(a: npt.NDArray[np.bool_], window: int) -> npt.NDArray[np.bool_]: + """ + [True, True, False, True, False], 2 -> + + [ + [True, True], + [True, False], + [False, True], + [True, False], + ] + """ + # https://stackoverflow.com/a/6811241 + shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) + strides = a.strides + (a.strides[-1],) + return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/nanops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/nanops.py new file mode 100644 index 00000000..e60c42a2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/nanops.py @@ -0,0 +1,1740 @@ +from __future__ import annotations + +import functools +import itertools +from typing import ( + Any, + Callable, + cast, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import ( + NaT, + NaTType, + iNaT, + lib, +) +from pandas._typing import ( + ArrayLike, + AxisInt, + CorrelationMethod, + Dtype, + DtypeObj, + F, + Scalar, + Shape, + npt, +) +from pandas.compat._optional import import_optional_dependency +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_complex, + is_float, + is_float_dtype, + is_integer, + is_numeric_dtype, + is_object_dtype, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, + notna, +) + +bn = import_optional_dependency("bottleneck", errors="warn") +_BOTTLENECK_INSTALLED = bn is not None +_USE_BOTTLENECK = False + + +def set_use_bottleneck(v: bool = True) -> None: + # set/unset to use bottleneck + global _USE_BOTTLENECK + if _BOTTLENECK_INSTALLED: + _USE_BOTTLENECK = v + + +set_use_bottleneck(get_option("compute.use_bottleneck")) + + +class disallow: + def __init__(self, *dtypes: Dtype) -> None: + super().__init__() + self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes) + + def check(self, obj) -> bool: + return hasattr(obj, "dtype") and issubclass(obj.dtype.type, self.dtypes) + + def __call__(self, f: F) -> F: + @functools.wraps(f) + def _f(*args, **kwargs): + obj_iter = itertools.chain(args, kwargs.values()) + if any(self.check(obj) for obj in obj_iter): + f_name = f.__name__.replace("nan", "") + raise TypeError( + f"reduction operation '{f_name}' not allowed for this dtype" + ) + try: + return f(*args, **kwargs) + except ValueError as e: + # we want to transform an object array + # ValueError message to the more typical TypeError + # e.g. this is normally a disallowed function on + # object arrays that contain strings + if is_object_dtype(args[0]): + raise TypeError(e) from e + raise + + return cast(F, _f) + + +class bottleneck_switch: + def __init__(self, name=None, **kwargs) -> None: + self.name = name + self.kwargs = kwargs + + def __call__(self, alt: F) -> F: + bn_name = self.name or alt.__name__ + + try: + bn_func = getattr(bn, bn_name) + except (AttributeError, NameError): # pragma: no cover + bn_func = None + + @functools.wraps(alt) + def f( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + **kwds, + ): + if len(self.kwargs) > 0: + for k, v in self.kwargs.items(): + if k not in kwds: + kwds[k] = v + + if values.size == 0 and kwds.get("min_count") is None: + # We are empty, returning NA for our type + # Only applies for the default `min_count` of None + # since that affects how empty arrays are handled. + # TODO(GH-18976) update all the nanops methods to + # correctly handle empty inputs and remove this check. + # It *may* just be `var` + return _na_for_min_count(values, axis) + + if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name): + if kwds.get("mask", None) is None: + # `mask` is not recognised by bottleneck, would raise + # TypeError if called + kwds.pop("mask", None) + result = bn_func(values, axis=axis, **kwds) + + # prefer to treat inf/-inf as NA, but must compute the func + # twice :( + if _has_infs(result): + result = alt(values, axis=axis, skipna=skipna, **kwds) + else: + result = alt(values, axis=axis, skipna=skipna, **kwds) + else: + result = alt(values, axis=axis, skipna=skipna, **kwds) + + return result + + return cast(F, f) + + +def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool: + # Bottleneck chokes on datetime64, PeriodDtype (or and EA) + if dtype != object and not needs_i8_conversion(dtype): + # GH 42878 + # Bottleneck uses naive summation leading to O(n) loss of precision + # unlike numpy which implements pairwise summation, which has O(log(n)) loss + # crossref: https://github.com/pydata/bottleneck/issues/379 + + # GH 15507 + # bottleneck does not properly upcast during the sum + # so can overflow + + # GH 9422 + # further we also want to preserve NaN when all elements + # are NaN, unlike bottleneck/numpy which consider this + # to be 0 + return name not in ["nansum", "nanprod", "nanmean"] + return False + + +def _has_infs(result) -> bool: + if isinstance(result, np.ndarray): + if result.dtype in ("f8", "f4"): + # Note: outside of an nanops-specific test, we always have + # result.ndim == 1, so there is no risk of this ravel making a copy. + return lib.has_infs(result.ravel("K")) + try: + return np.isinf(result).any() + except (TypeError, NotImplementedError): + # if it doesn't support infs, then it can't have infs + return False + + +def _get_fill_value( + dtype: DtypeObj, fill_value: Scalar | None = None, fill_value_typ=None +): + """return the correct fill value for the dtype of the values""" + if fill_value is not None: + return fill_value + if _na_ok_dtype(dtype): + if fill_value_typ is None: + return np.nan + else: + if fill_value_typ == "+inf": + return np.inf + else: + return -np.inf + else: + if fill_value_typ == "+inf": + # need the max int here + return lib.i8max + else: + return iNaT + + +def _maybe_get_mask( + values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None +) -> npt.NDArray[np.bool_] | None: + """ + Compute a mask if and only if necessary. + + This function will compute a mask iff it is necessary. Otherwise, + return the provided mask (potentially None) when a mask does not need to be + computed. + + A mask is never necessary if the values array is of boolean or integer + dtypes, as these are incapable of storing NaNs. If passing a NaN-capable + dtype that is interpretable as either boolean or integer data (eg, + timedelta64), a mask must be provided. + + If the skipna parameter is False, a new mask will not be computed. + + The mask is computed using isna() by default. Setting invert=True selects + notna() as the masking function. + + Parameters + ---------- + values : ndarray + input array to potentially compute mask for + skipna : bool + boolean for whether NaNs should be skipped + mask : Optional[ndarray] + nan-mask if known + + Returns + ------- + Optional[np.ndarray[bool]] + """ + if mask is None: + if values.dtype.kind in "biu": + # Boolean data cannot contain nulls, so signal via mask being None + return None + + if skipna or values.dtype.kind in "mM": + mask = isna(values) + + return mask + + +def _get_values( + values: np.ndarray, + skipna: bool, + fill_value: Any = None, + fill_value_typ: str | None = None, + mask: npt.NDArray[np.bool_] | None = None, +) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None]: + """ + Utility to get the values view, mask, dtype, dtype_max, and fill_value. + + If both mask and fill_value/fill_value_typ are not None and skipna is True, + the values array will be copied. + + For input arrays of boolean or integer dtypes, copies will only occur if a + precomputed mask, a fill_value/fill_value_typ, and skipna=True are + provided. + + Parameters + ---------- + values : ndarray + input array to potentially compute mask for + skipna : bool + boolean for whether NaNs should be skipped + fill_value : Any + value to fill NaNs with + fill_value_typ : str + Set to '+inf' or '-inf' to handle dtype-specific infinities + mask : Optional[np.ndarray[bool]] + nan-mask if known + + Returns + ------- + values : ndarray + Potential copy of input value array + mask : Optional[ndarray[bool]] + Mask for values, if deemed necessary to compute + """ + # In _get_values is only called from within nanops, and in all cases + # with scalar fill_value. This guarantee is important for the + # np.where call below + + mask = _maybe_get_mask(values, skipna, mask) + + dtype = values.dtype + + datetimelike = False + if values.dtype.kind in "mM": + # changing timedelta64/datetime64 to int64 needs to happen after + # finding `mask` above + values = np.asarray(values.view("i8")) + datetimelike = True + + if skipna and (mask is not None): + # get our fill value (in case we need to provide an alternative + # dtype for it) + fill_value = _get_fill_value( + dtype, fill_value=fill_value, fill_value_typ=fill_value_typ + ) + + if fill_value is not None: + if mask.any(): + if datetimelike or _na_ok_dtype(dtype): + values = values.copy() + np.putmask(values, mask, fill_value) + else: + # np.where will promote if needed + values = np.where(~mask, values, fill_value) + + return values, mask + + +def _get_dtype_max(dtype: np.dtype) -> np.dtype: + # return a platform independent precision dtype + dtype_max = dtype + if dtype.kind in "bi": + dtype_max = np.dtype(np.int64) + elif dtype.kind == "u": + dtype_max = np.dtype(np.uint64) + elif dtype.kind == "f": + dtype_max = np.dtype(np.float64) + return dtype_max + + +def _na_ok_dtype(dtype: DtypeObj) -> bool: + if needs_i8_conversion(dtype): + return False + return not issubclass(dtype.type, np.integer) + + +def _wrap_results(result, dtype: np.dtype, fill_value=None): + """wrap our results if needed""" + if result is NaT: + pass + + elif dtype.kind == "M": + if fill_value is None: + # GH#24293 + fill_value = iNaT + if not isinstance(result, np.ndarray): + assert not isna(fill_value), "Expected non-null fill_value" + if result == fill_value: + result = np.nan + + if isna(result): + result = np.datetime64("NaT", "ns").astype(dtype) + else: + result = np.int64(result).view(dtype) + # retain original unit + result = result.astype(dtype, copy=False) + else: + # If we have float dtype, taking a view will give the wrong result + result = result.astype(dtype) + elif dtype.kind == "m": + if not isinstance(result, np.ndarray): + if result == fill_value or np.isnan(result): + result = np.timedelta64("NaT").astype(dtype) + + elif np.fabs(result) > lib.i8max: + # raise if we have a timedelta64[ns] which is too large + raise ValueError("overflow in timedelta operation") + else: + # return a timedelta64 with the original unit + result = np.int64(result).astype(dtype, copy=False) + + else: + result = result.astype("m8[ns]").view(dtype) + + return result + + +def _datetimelike_compat(func: F) -> F: + """ + If we have datetime64 or timedelta64 values, ensure we have a correct + mask before calling the wrapped function, then cast back afterwards. + """ + + @functools.wraps(func) + def new_func( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, + **kwargs, + ): + orig_values = values + + datetimelike = values.dtype.kind in "mM" + if datetimelike and mask is None: + mask = isna(values) + + result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs) + + if datetimelike: + result = _wrap_results(result, orig_values.dtype, fill_value=iNaT) + if not skipna: + assert mask is not None # checked above + result = _mask_datetimelike_result(result, axis, mask, orig_values) + + return result + + return cast(F, new_func) + + +def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray: + """ + Return the missing value for `values`. + + Parameters + ---------- + values : ndarray + axis : int or None + axis for the reduction, required if values.ndim > 1. + + Returns + ------- + result : scalar or ndarray + For 1-D values, returns a scalar of the correct missing type. + For 2-D values, returns a 1-D array where each element is missing. + """ + # we either return np.nan or pd.NaT + if values.dtype.kind in "iufcb": + values = values.astype("float64") + fill_value = na_value_for_dtype(values.dtype) + + if values.ndim == 1: + return fill_value + elif axis is None: + return fill_value + else: + result_shape = values.shape[:axis] + values.shape[axis + 1 :] + + return np.full(result_shape, fill_value, dtype=values.dtype) + + +def maybe_operate_rowwise(func: F) -> F: + """ + NumPy operations on C-contiguous ndarrays with axis=1 can be + very slow if axis 1 >> axis 0. + Operate row-by-row and concatenate the results. + """ + + @functools.wraps(func) + def newfunc(values: np.ndarray, *, axis: AxisInt | None = None, **kwargs): + if ( + axis == 1 + and values.ndim == 2 + and values.flags["C_CONTIGUOUS"] + # only takes this path for wide arrays (long dataframes), for threshold see + # https://github.com/pandas-dev/pandas/pull/43311#issuecomment-974891737 + and (values.shape[1] / 1000) > values.shape[0] + and values.dtype != object + and values.dtype != bool + ): + arrs = list(values) + if kwargs.get("mask") is not None: + mask = kwargs.pop("mask") + results = [ + func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs)) + ] + else: + results = [func(x, **kwargs) for x in arrs] + return np.array(results) + + return func(values, axis=axis, **kwargs) + + return cast(F, newfunc) + + +def nanany( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> bool: + """ + Check if any elements along an axis evaluate to True. + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : bool + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, 2]) + >>> nanops.nanany(s.values) + True + + >>> from pandas.core import nanops + >>> s = pd.Series([np.nan]) + >>> nanops.nanany(s.values) + False + """ + if values.dtype.kind in "iub" and mask is None: + # GH#26032 fastpath + # error: Incompatible return value type (got "Union[bool_, ndarray]", + # expected "bool") + return values.any(axis) # type: ignore[return-value] + + if values.dtype.kind == "M": + # GH#34479 + warnings.warn( + "'any' with datetime64 dtypes is deprecated and will raise in a " + "future version. Use (obj != pd.Timestamp(0)).any() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + values, _ = _get_values(values, skipna, fill_value=False, mask=mask) + + # For object type, any won't necessarily return + # boolean values (numpy/numpy#4352) + if values.dtype == object: + values = values.astype(bool) + + # error: Incompatible return value type (got "Union[bool_, ndarray]", expected + # "bool") + return values.any(axis) # type: ignore[return-value] + + +def nanall( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> bool: + """ + Check if all elements along an axis evaluate to True. + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : bool + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, 2, np.nan]) + >>> nanops.nanall(s.values) + True + + >>> from pandas.core import nanops + >>> s = pd.Series([1, 0]) + >>> nanops.nanall(s.values) + False + """ + if values.dtype.kind in "iub" and mask is None: + # GH#26032 fastpath + # error: Incompatible return value type (got "Union[bool_, ndarray]", + # expected "bool") + return values.all(axis) # type: ignore[return-value] + + if values.dtype.kind == "M": + # GH#34479 + warnings.warn( + "'all' with datetime64 dtypes is deprecated and will raise in a " + "future version. Use (obj != pd.Timestamp(0)).all() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + values, _ = _get_values(values, skipna, fill_value=True, mask=mask) + + # For object type, all won't necessarily return + # boolean values (numpy/numpy#4352) + if values.dtype == object: + values = values.astype(bool) + + # error: Incompatible return value type (got "Union[bool_, ndarray]", expected + # "bool") + return values.all(axis) # type: ignore[return-value] + + +@disallow("M8") +@_datetimelike_compat +@maybe_operate_rowwise +def nansum( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + min_count: int = 0, + mask: npt.NDArray[np.bool_] | None = None, +) -> float: + """ + Sum the elements along an axis ignoring NaNs + + Parameters + ---------- + values : ndarray[dtype] + axis : int, optional + skipna : bool, default True + min_count: int, default 0 + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : dtype + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, 2, np.nan]) + >>> nanops.nansum(s.values) + 3.0 + """ + dtype = values.dtype + values, mask = _get_values(values, skipna, fill_value=0, mask=mask) + dtype_sum = _get_dtype_max(dtype) + if dtype.kind == "f": + dtype_sum = dtype + elif dtype.kind == "m": + dtype_sum = np.dtype(np.float64) + + the_sum = values.sum(axis, dtype=dtype_sum) + the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count) + + return the_sum + + +def _mask_datetimelike_result( + result: np.ndarray | np.datetime64 | np.timedelta64, + axis: AxisInt | None, + mask: npt.NDArray[np.bool_], + orig_values: np.ndarray, +) -> np.ndarray | np.datetime64 | np.timedelta64 | NaTType: + if isinstance(result, np.ndarray): + # we need to apply the mask + result = result.astype("i8").view(orig_values.dtype) + axis_mask = mask.any(axis=axis) + # error: Unsupported target for indexed assignment ("Union[ndarray[Any, Any], + # datetime64, timedelta64]") + result[axis_mask] = iNaT # type: ignore[index] + else: + if mask.any(): + return np.int64(iNaT).view(orig_values.dtype) + return result + + +@bottleneck_switch() +@_datetimelike_compat +def nanmean( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> float: + """ + Compute the mean of the element along an axis ignoring NaNs + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + float + Unless input is a float array, in which case use the same + precision as the input array. + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, 2, np.nan]) + >>> nanops.nanmean(s.values) + 1.5 + """ + dtype = values.dtype + values, mask = _get_values(values, skipna, fill_value=0, mask=mask) + dtype_sum = _get_dtype_max(dtype) + dtype_count = np.dtype(np.float64) + + # not using needs_i8_conversion because that includes period + if dtype.kind in "mM": + dtype_sum = np.dtype(np.float64) + elif dtype.kind in "iu": + dtype_sum = np.dtype(np.float64) + elif dtype.kind == "f": + dtype_sum = dtype + dtype_count = dtype + + count = _get_counts(values.shape, mask, axis, dtype=dtype_count) + the_sum = values.sum(axis, dtype=dtype_sum) + the_sum = _ensure_numeric(the_sum) + + if axis is not None and getattr(the_sum, "ndim", False): + count = cast(np.ndarray, count) + with np.errstate(all="ignore"): + # suppress division by zero warnings + the_mean = the_sum / count + ct_mask = count == 0 + if ct_mask.any(): + the_mean[ct_mask] = np.nan + else: + the_mean = the_sum / count if count > 0 else np.nan + + return the_mean + + +@bottleneck_switch() +def nanmedian(values, *, axis: AxisInt | None = None, skipna: bool = True, mask=None): + """ + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : float + Unless input is a float array, in which case use the same + precision as the input array. + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, np.nan, 2, 2]) + >>> nanops.nanmedian(s.values) + 2.0 + """ + + def get_median(x, _mask=None): + if _mask is None: + _mask = notna(x) + else: + _mask = ~_mask + if not skipna and not _mask.all(): + return np.nan + with warnings.catch_warnings(): + # Suppress RuntimeWarning about All-NaN slice + warnings.filterwarnings( + "ignore", "All-NaN slice encountered", RuntimeWarning + ) + res = np.nanmedian(x[_mask]) + return res + + dtype = values.dtype + values, mask = _get_values(values, skipna, mask=mask, fill_value=0) + if values.dtype.kind != "f": + if values.dtype == object: + # GH#34671 avoid casting strings to numeric + inferred = lib.infer_dtype(values) + if inferred in ["string", "mixed"]: + raise TypeError(f"Cannot convert {values} to numeric") + try: + values = values.astype("f8") + except ValueError as err: + # e.g. "could not convert string to float: 'a'" + raise TypeError(str(err)) from err + if mask is not None: + values[mask] = np.nan + + notempty = values.size + + # an array from a frame + if values.ndim > 1 and axis is not None: + # there's a non-empty array to apply over otherwise numpy raises + if notempty: + if not skipna: + res = np.apply_along_axis(get_median, axis, values) + + else: + # fastpath for the skipna case + with warnings.catch_warnings(): + # Suppress RuntimeWarning about All-NaN slice + warnings.filterwarnings( + "ignore", "All-NaN slice encountered", RuntimeWarning + ) + if (values.shape[1] == 1 and axis == 0) or ( + values.shape[0] == 1 and axis == 1 + ): + # GH52788: fastpath when squeezable, nanmedian for 2D array slow + res = np.nanmedian(np.squeeze(values), keepdims=True) + else: + res = np.nanmedian(values, axis=axis) + + else: + # must return the correct shape, but median is not defined for the + # empty set so return nans of shape "everything but the passed axis" + # since "axis" is where the reduction would occur if we had a nonempty + # array + res = _get_empty_reduction_result(values.shape, axis) + + else: + # otherwise return a scalar value + res = get_median(values, mask) if notempty else np.nan + return _wrap_results(res, dtype) + + +def _get_empty_reduction_result( + shape: Shape, + axis: AxisInt, +) -> np.ndarray: + """ + The result from a reduction on an empty ndarray. + + Parameters + ---------- + shape : Tuple[int, ...] + axis : int + + Returns + ------- + np.ndarray + """ + shp = np.array(shape) + dims = np.arange(len(shape)) + ret = np.empty(shp[dims != axis], dtype=np.float64) + ret.fill(np.nan) + return ret + + +def _get_counts_nanvar( + values_shape: Shape, + mask: npt.NDArray[np.bool_] | None, + axis: AxisInt | None, + ddof: int, + dtype: np.dtype = np.dtype(np.float64), +) -> tuple[float | np.ndarray, float | np.ndarray]: + """ + Get the count of non-null values along an axis, accounting + for degrees of freedom. + + Parameters + ---------- + values_shape : Tuple[int, ...] + shape tuple from values ndarray, used if mask is None + mask : Optional[ndarray[bool]] + locations in values that should be considered missing + axis : Optional[int] + axis to count along + ddof : int + degrees of freedom + dtype : type, optional + type to use for count + + Returns + ------- + count : int, np.nan or np.ndarray + d : int, np.nan or np.ndarray + """ + count = _get_counts(values_shape, mask, axis, dtype=dtype) + d = count - dtype.type(ddof) + + # always return NaN, never inf + if is_float(count): + if count <= ddof: + # error: Incompatible types in assignment (expression has type + # "float", variable has type "Union[floating[Any], ndarray[Any, + # dtype[floating[Any]]]]") + count = np.nan # type: ignore[assignment] + d = np.nan + else: + # count is not narrowed by is_float check + count = cast(np.ndarray, count) + mask = count <= ddof + if mask.any(): + np.putmask(d, mask, np.nan) + np.putmask(count, mask, np.nan) + return count, d + + +@bottleneck_switch(ddof=1) +def nanstd( + values, + *, + axis: AxisInt | None = None, + skipna: bool = True, + ddof: int = 1, + mask=None, +): + """ + Compute the standard deviation along given axis while ignoring NaNs + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations is N - ddof, + where N represents the number of elements. + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : float + Unless input is a float array, in which case use the same + precision as the input array. + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, np.nan, 2, 3]) + >>> nanops.nanstd(s.values) + 1.0 + """ + if values.dtype == "M8[ns]": + values = values.view("m8[ns]") + + orig_dtype = values.dtype + values, mask = _get_values(values, skipna, mask=mask) + + result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)) + return _wrap_results(result, orig_dtype) + + +@disallow("M8", "m8") +@bottleneck_switch(ddof=1) +def nanvar( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + ddof: int = 1, + mask=None, +): + """ + Compute the variance along given axis while ignoring NaNs + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations is N - ddof, + where N represents the number of elements. + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : float + Unless input is a float array, in which case use the same + precision as the input array. + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, np.nan, 2, 3]) + >>> nanops.nanvar(s.values) + 1.0 + """ + dtype = values.dtype + mask = _maybe_get_mask(values, skipna, mask) + if dtype.kind in "iu": + values = values.astype("f8") + if mask is not None: + values[mask] = np.nan + + if values.dtype.kind == "f": + count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) + else: + count, d = _get_counts_nanvar(values.shape, mask, axis, ddof) + + if skipna and mask is not None: + values = values.copy() + np.putmask(values, mask, 0) + + # xref GH10242 + # Compute variance via two-pass algorithm, which is stable against + # cancellation errors and relatively accurate for small numbers of + # observations. + # + # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count + if axis is not None: + avg = np.expand_dims(avg, axis) + sqr = _ensure_numeric((avg - values) ** 2) + if mask is not None: + np.putmask(sqr, mask, 0) + result = sqr.sum(axis=axis, dtype=np.float64) / d + + # Return variance as np.float64 (the datatype used in the accumulator), + # unless we were dealing with a float array, in which case use the same + # precision as the original values array. + if dtype.kind == "f": + result = result.astype(dtype, copy=False) + return result + + +@disallow("M8", "m8") +def nansem( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + ddof: int = 1, + mask: npt.NDArray[np.bool_] | None = None, +) -> float: + """ + Compute the standard error in the mean along given axis while ignoring NaNs + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations is N - ddof, + where N represents the number of elements. + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : float64 + Unless input is a float array, in which case use the same + precision as the input array. + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, np.nan, 2, 3]) + >>> nanops.nansem(s.values) + 0.5773502691896258 + """ + # This checks if non-numeric-like data is passed with numeric_only=False + # and raises a TypeError otherwise + nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask) + + mask = _maybe_get_mask(values, skipna, mask) + if values.dtype.kind != "f": + values = values.astype("f8") + + if not skipna and mask is not None and mask.any(): + return np.nan + + count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) + var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask) + + return np.sqrt(var) / np.sqrt(count) + + +def _nanminmax(meth, fill_value_typ): + @bottleneck_switch(name=f"nan{meth}") + @_datetimelike_compat + def reduction( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, + ): + if values.size == 0: + return _na_for_min_count(values, axis) + + values, mask = _get_values( + values, skipna, fill_value_typ=fill_value_typ, mask=mask + ) + result = getattr(values, meth)(axis) + result = _maybe_null_out(result, axis, mask, values.shape) + return result + + return reduction + + +nanmin = _nanminmax("min", fill_value_typ="+inf") +nanmax = _nanminmax("max", fill_value_typ="-inf") + + +def nanargmax( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> int | np.ndarray: + """ + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : int or ndarray[int] + The index/indices of max value in specified axis or -1 in the NA case + + Examples + -------- + >>> from pandas.core import nanops + >>> arr = np.array([1, 2, 3, np.nan, 4]) + >>> nanops.nanargmax(arr) + 4 + + >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) + >>> arr[2:, 2] = np.nan + >>> arr + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., nan], + [ 9., 10., nan]]) + >>> nanops.nanargmax(arr, axis=1) + array([2, 2, 1, 1]) + """ + values, mask = _get_values(values, True, fill_value_typ="-inf", mask=mask) + # error: Need type annotation for 'result' + result = values.argmax(axis) # type: ignore[var-annotated] + result = _maybe_arg_null_out(result, axis, mask, skipna) + return result + + +def nanargmin( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> int | np.ndarray: + """ + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : int or ndarray[int] + The index/indices of min value in specified axis or -1 in the NA case + + Examples + -------- + >>> from pandas.core import nanops + >>> arr = np.array([1, 2, 3, np.nan, 4]) + >>> nanops.nanargmin(arr) + 0 + + >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) + >>> arr[2:, 0] = np.nan + >>> arr + array([[ 0., 1., 2.], + [ 3., 4., 5.], + [nan, 7., 8.], + [nan, 10., 11.]]) + >>> nanops.nanargmin(arr, axis=1) + array([0, 0, 1, 1]) + """ + values, mask = _get_values(values, True, fill_value_typ="+inf", mask=mask) + # error: Need type annotation for 'result' + result = values.argmin(axis) # type: ignore[var-annotated] + result = _maybe_arg_null_out(result, axis, mask, skipna) + return result + + +@disallow("M8", "m8") +@maybe_operate_rowwise +def nanskew( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> float: + """ + Compute the sample skewness. + + The statistic computed here is the adjusted Fisher-Pearson standardized + moment coefficient G1. The algorithm computes this coefficient directly + from the second and third central moment. + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : float64 + Unless input is a float array, in which case use the same + precision as the input array. + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, np.nan, 1, 2]) + >>> nanops.nanskew(s.values) + 1.7320508075688787 + """ + mask = _maybe_get_mask(values, skipna, mask) + if values.dtype.kind != "f": + values = values.astype("f8") + count = _get_counts(values.shape, mask, axis) + else: + count = _get_counts(values.shape, mask, axis, dtype=values.dtype) + + if skipna and mask is not None: + values = values.copy() + np.putmask(values, mask, 0) + elif not skipna and mask is not None and mask.any(): + return np.nan + + with np.errstate(invalid="ignore", divide="ignore"): + mean = values.sum(axis, dtype=np.float64) / count + if axis is not None: + mean = np.expand_dims(mean, axis) + + adjusted = values - mean + if skipna and mask is not None: + np.putmask(adjusted, mask, 0) + adjusted2 = adjusted**2 + adjusted3 = adjusted2 * adjusted + m2 = adjusted2.sum(axis, dtype=np.float64) + m3 = adjusted3.sum(axis, dtype=np.float64) + + # floating point error + # + # #18044 in _libs/windows.pyx calc_skew follow this behavior + # to fix the fperr to treat m2 <1e-14 as zero + m2 = _zero_out_fperr(m2) + m3 = _zero_out_fperr(m3) + + with np.errstate(invalid="ignore", divide="ignore"): + result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2**1.5) + + dtype = values.dtype + if dtype.kind == "f": + result = result.astype(dtype, copy=False) + + if isinstance(result, np.ndarray): + result = np.where(m2 == 0, 0, result) + result[count < 3] = np.nan + else: + result = dtype.type(0) if m2 == 0 else result + if count < 3: + return np.nan + + return result + + +@disallow("M8", "m8") +@maybe_operate_rowwise +def nankurt( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> float: + """ + Compute the sample excess kurtosis + + The statistic computed here is the adjusted Fisher-Pearson standardized + moment coefficient G2, computed directly from the second and fourth + central moment. + + Parameters + ---------- + values : ndarray + axis : int, optional + skipna : bool, default True + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + result : float64 + Unless input is a float array, in which case use the same + precision as the input array. + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, np.nan, 1, 3, 2]) + >>> nanops.nankurt(s.values) + -1.2892561983471076 + """ + mask = _maybe_get_mask(values, skipna, mask) + if values.dtype.kind != "f": + values = values.astype("f8") + count = _get_counts(values.shape, mask, axis) + else: + count = _get_counts(values.shape, mask, axis, dtype=values.dtype) + + if skipna and mask is not None: + values = values.copy() + np.putmask(values, mask, 0) + elif not skipna and mask is not None and mask.any(): + return np.nan + + with np.errstate(invalid="ignore", divide="ignore"): + mean = values.sum(axis, dtype=np.float64) / count + if axis is not None: + mean = np.expand_dims(mean, axis) + + adjusted = values - mean + if skipna and mask is not None: + np.putmask(adjusted, mask, 0) + adjusted2 = adjusted**2 + adjusted4 = adjusted2**2 + m2 = adjusted2.sum(axis, dtype=np.float64) + m4 = adjusted4.sum(axis, dtype=np.float64) + + with np.errstate(invalid="ignore", divide="ignore"): + adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) + numerator = count * (count + 1) * (count - 1) * m4 + denominator = (count - 2) * (count - 3) * m2**2 + + # floating point error + # + # #18044 in _libs/windows.pyx calc_kurt follow this behavior + # to fix the fperr to treat denom <1e-14 as zero + numerator = _zero_out_fperr(numerator) + denominator = _zero_out_fperr(denominator) + + if not isinstance(denominator, np.ndarray): + # if ``denom`` is a scalar, check these corner cases first before + # doing division + if count < 4: + return np.nan + if denominator == 0: + return values.dtype.type(0) + + with np.errstate(invalid="ignore", divide="ignore"): + result = numerator / denominator - adj + + dtype = values.dtype + if dtype.kind == "f": + result = result.astype(dtype, copy=False) + + if isinstance(result, np.ndarray): + result = np.where(denominator == 0, 0, result) + result[count < 4] = np.nan + + return result + + +@disallow("M8", "m8") +@maybe_operate_rowwise +def nanprod( + values: np.ndarray, + *, + axis: AxisInt | None = None, + skipna: bool = True, + min_count: int = 0, + mask: npt.NDArray[np.bool_] | None = None, +) -> float: + """ + Parameters + ---------- + values : ndarray[dtype] + axis : int, optional + skipna : bool, default True + min_count: int, default 0 + mask : ndarray[bool], optional + nan-mask if known + + Returns + ------- + Dtype + The product of all elements on a given axis. ( NaNs are treated as 1) + + Examples + -------- + >>> from pandas.core import nanops + >>> s = pd.Series([1, 2, 3, np.nan]) + >>> nanops.nanprod(s.values) + 6.0 + """ + mask = _maybe_get_mask(values, skipna, mask) + + if skipna and mask is not None: + values = values.copy() + values[mask] = 1 + result = values.prod(axis) + # error: Incompatible return value type (got "Union[ndarray, float]", expected + # "float") + return _maybe_null_out( # type: ignore[return-value] + result, axis, mask, values.shape, min_count=min_count + ) + + +def _maybe_arg_null_out( + result: np.ndarray, + axis: AxisInt | None, + mask: npt.NDArray[np.bool_] | None, + skipna: bool, +) -> np.ndarray | int: + # helper function for nanargmin/nanargmax + if mask is None: + return result + + if axis is None or not getattr(result, "ndim", False): + if skipna: + if mask.all(): + return -1 + else: + if mask.any(): + return -1 + else: + if skipna: + na_mask = mask.all(axis) + else: + na_mask = mask.any(axis) + if na_mask.any(): + result[na_mask] = -1 + return result + + +def _get_counts( + values_shape: Shape, + mask: npt.NDArray[np.bool_] | None, + axis: AxisInt | None, + dtype: np.dtype[np.floating] = np.dtype(np.float64), +) -> np.floating | npt.NDArray[np.floating]: + """ + Get the count of non-null values along an axis + + Parameters + ---------- + values_shape : tuple of int + shape tuple from values ndarray, used if mask is None + mask : Optional[ndarray[bool]] + locations in values that should be considered missing + axis : Optional[int] + axis to count along + dtype : type, optional + type to use for count + + Returns + ------- + count : scalar or array + """ + if axis is None: + if mask is not None: + n = mask.size - mask.sum() + else: + n = np.prod(values_shape) + return dtype.type(n) + + if mask is not None: + count = mask.shape[axis] - mask.sum(axis) + else: + count = values_shape[axis] + + if is_integer(count): + return dtype.type(count) + return count.astype(dtype, copy=False) + + +def _maybe_null_out( + result: np.ndarray | float | NaTType, + axis: AxisInt | None, + mask: npt.NDArray[np.bool_] | None, + shape: tuple[int, ...], + min_count: int = 1, +) -> np.ndarray | float | NaTType: + """ + Returns + ------- + Dtype + The product of all elements on a given axis. ( NaNs are treated as 1) + """ + if mask is None and min_count == 0: + # nothing to check; short-circuit + return result + + if axis is not None and isinstance(result, np.ndarray): + if mask is not None: + null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 + else: + # we have no nulls, kept mask=None in _maybe_get_mask + below_count = shape[axis] - min_count < 0 + new_shape = shape[:axis] + shape[axis + 1 :] + null_mask = np.broadcast_to(below_count, new_shape) + + if np.any(null_mask): + if is_numeric_dtype(result): + if np.iscomplexobj(result): + result = result.astype("c16") + elif not is_float_dtype(result): + result = result.astype("f8", copy=False) + result[null_mask] = np.nan + else: + # GH12941, use None to auto cast null + result[null_mask] = None + elif result is not NaT: + if check_below_min_count(shape, mask, min_count): + result_dtype = getattr(result, "dtype", None) + if is_float_dtype(result_dtype): + # error: Item "None" of "Optional[Any]" has no attribute "type" + result = result_dtype.type("nan") # type: ignore[union-attr] + else: + result = np.nan + + return result + + +def check_below_min_count( + shape: tuple[int, ...], mask: npt.NDArray[np.bool_] | None, min_count: int +) -> bool: + """ + Check for the `min_count` keyword. Returns True if below `min_count` (when + missing value should be returned from the reduction). + + Parameters + ---------- + shape : tuple + The shape of the values (`values.shape`). + mask : ndarray[bool] or None + Boolean numpy array (typically of same shape as `shape`) or None. + min_count : int + Keyword passed through from sum/prod call. + + Returns + ------- + bool + """ + if min_count > 0: + if mask is None: + # no missing values, only check size + non_nulls = np.prod(shape) + else: + non_nulls = mask.size - mask.sum() + if non_nulls < min_count: + return True + return False + + +def _zero_out_fperr(arg): + # #18044 reference this behavior to fix rolling skew/kurt issue + if isinstance(arg, np.ndarray): + return np.where(np.abs(arg) < 1e-14, 0, arg) + else: + return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg + + +@disallow("M8", "m8") +def nancorr( + a: np.ndarray, + b: np.ndarray, + *, + method: CorrelationMethod = "pearson", + min_periods: int | None = None, +) -> float: + """ + a, b: ndarrays + """ + if len(a) != len(b): + raise AssertionError("Operands to nancorr must have same size") + + if min_periods is None: + min_periods = 1 + + valid = notna(a) & notna(b) + if not valid.all(): + a = a[valid] + b = b[valid] + + if len(a) < min_periods: + return np.nan + + a = _ensure_numeric(a) + b = _ensure_numeric(b) + + f = get_corr_func(method) + return f(a, b) + + +def get_corr_func( + method: CorrelationMethod, +) -> Callable[[np.ndarray, np.ndarray], float]: + if method == "kendall": + from scipy.stats import kendalltau + + def func(a, b): + return kendalltau(a, b)[0] + + return func + elif method == "spearman": + from scipy.stats import spearmanr + + def func(a, b): + return spearmanr(a, b)[0] + + return func + elif method == "pearson": + + def func(a, b): + return np.corrcoef(a, b)[0, 1] + + return func + elif callable(method): + return method + + raise ValueError( + f"Unknown method '{method}', expected one of " + "'kendall', 'spearman', 'pearson', or callable" + ) + + +@disallow("M8", "m8") +def nancov( + a: np.ndarray, + b: np.ndarray, + *, + min_periods: int | None = None, + ddof: int | None = 1, +) -> float: + if len(a) != len(b): + raise AssertionError("Operands to nancov must have same size") + + if min_periods is None: + min_periods = 1 + + valid = notna(a) & notna(b) + if not valid.all(): + a = a[valid] + b = b[valid] + + if len(a) < min_periods: + return np.nan + + a = _ensure_numeric(a) + b = _ensure_numeric(b) + + return np.cov(a, b, ddof=ddof)[0, 1] + + +def _ensure_numeric(x): + if isinstance(x, np.ndarray): + if x.dtype.kind in "biu": + x = x.astype(np.float64) + elif x.dtype == object: + inferred = lib.infer_dtype(x) + if inferred in ["string", "mixed"]: + # GH#44008, GH#36703 avoid casting e.g. strings to numeric + raise TypeError(f"Could not convert {x} to numeric") + try: + x = x.astype(np.complex128) + except (TypeError, ValueError): + try: + x = x.astype(np.float64) + except ValueError as err: + # GH#29941 we get here with object arrays containing strs + raise TypeError(f"Could not convert {x} to numeric") from err + else: + if not np.any(np.imag(x)): + x = x.real + elif not (is_float(x) or is_integer(x) or is_complex(x)): + if isinstance(x, str): + # GH#44008, GH#36703 avoid casting e.g. strings to numeric + raise TypeError(f"Could not convert string '{x}' to numeric") + try: + x = float(x) + except (TypeError, ValueError): + # e.g. "1+1j" or "foo" + try: + x = complex(x) + except ValueError as err: + # e.g. "foo" + raise TypeError(f"Could not convert {x} to numeric") from err + return x + + +def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike: + """ + Cumulative function with skipna support. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate} + skipna : bool + + Returns + ------- + np.ndarray or ExtensionArray + """ + mask_a, mask_b = { + np.cumprod: (1.0, np.nan), + np.maximum.accumulate: (-np.inf, np.nan), + np.cumsum: (0.0, np.nan), + np.minimum.accumulate: (np.inf, np.nan), + }[accum_func] + + # This should go through ea interface + assert values.dtype.kind not in "mM" + + # We will be applying this function to block values + if skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)): + vals = values.copy() + mask = isna(vals) + vals[mask] = mask_a + result = accum_func(vals, axis=0) + result[mask] = mask_b + else: + result = accum_func(values, axis=0) + + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/__init__.py new file mode 100644 index 00000000..ae889a7f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/__init__.py @@ -0,0 +1,93 @@ +""" +Arithmetic operations for PandasObjects + +This is not a public API. +""" +from __future__ import annotations + +from pandas.core.ops.array_ops import ( + arithmetic_op, + comp_method_OBJECT_ARRAY, + comparison_op, + fill_binop, + get_array_op, + logical_op, + maybe_prepare_scalar_for_op, +) +from pandas.core.ops.common import ( + get_op_result_name, + unpack_zerodim_and_defer, +) +from pandas.core.ops.docstrings import make_flex_doc +from pandas.core.ops.invalid import invalid_comparison +from pandas.core.ops.mask_ops import ( + kleene_and, + kleene_or, + kleene_xor, +) +from pandas.core.roperator import ( + radd, + rand_, + rdiv, + rdivmod, + rfloordiv, + rmod, + rmul, + ror_, + rpow, + rsub, + rtruediv, + rxor, +) + +# ----------------------------------------------------------------------------- +# constants +ARITHMETIC_BINOPS: set[str] = { + "add", + "sub", + "mul", + "pow", + "mod", + "floordiv", + "truediv", + "divmod", + "radd", + "rsub", + "rmul", + "rpow", + "rmod", + "rfloordiv", + "rtruediv", + "rdivmod", +} + + +__all__ = [ + "ARITHMETIC_BINOPS", + "arithmetic_op", + "comparison_op", + "comp_method_OBJECT_ARRAY", + "invalid_comparison", + "fill_binop", + "kleene_and", + "kleene_or", + "kleene_xor", + "logical_op", + "make_flex_doc", + "radd", + "rand_", + "rdiv", + "rdivmod", + "rfloordiv", + "rmod", + "rmul", + "ror_", + "rpow", + "rsub", + "rtruediv", + "rxor", + "unpack_zerodim_and_defer", + "get_op_result_name", + "maybe_prepare_scalar_for_op", + "get_array_op", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/array_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/array_ops.py new file mode 100644 index 00000000..b39930da --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/array_ops.py @@ -0,0 +1,600 @@ +""" +Functions for arithmetic and comparison operations on NumPy arrays and +ExtensionArrays. +""" +from __future__ import annotations + +import datetime +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Any, +) +import warnings + +import numpy as np + +from pandas._libs import ( + NaT, + Timedelta, + Timestamp, + lib, + ops as libops, +) +from pandas._libs.tslibs import ( + BaseOffset, + get_supported_reso, + get_unit_from_dtype, + is_supported_unit, + is_unitless, + npy_unit_to_abbrev, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + construct_1d_object_array_from_listlike, + find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_list_like, + is_numeric_v_string_like, + is_object_dtype, + is_scalar, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import roperator +from pandas.core.computation import expressions +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.ops import missing +from pandas.core.ops.dispatch import should_extension_dispatch +from pandas.core.ops.invalid import invalid_comparison + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Shape, + ) + +# ----------------------------------------------------------------------------- +# Masking NA values and fallbacks for operations numpy does not support + + +def fill_binop(left, right, fill_value): + """ + If a non-None fill_value is given, replace null entries in left and right + with this value, but only in positions where _one_ of left/right is null, + not both. + + Parameters + ---------- + left : array-like + right : array-like + fill_value : object + + Returns + ------- + left : array-like + right : array-like + + Notes + ----- + Makes copies if fill_value is not None and NAs are present. + """ + if fill_value is not None: + left_mask = isna(left) + right_mask = isna(right) + + # one but not both + mask = left_mask ^ right_mask + + if left_mask.any(): + # Avoid making a copy if we can + left = left.copy() + left[left_mask & mask] = fill_value + + if right_mask.any(): + # Avoid making a copy if we can + right = right.copy() + right[right_mask & mask] = fill_value + + return left, right + + +def comp_method_OBJECT_ARRAY(op, x, y): + if isinstance(y, list): + # e.g. test_tuple_categories + y = construct_1d_object_array_from_listlike(y) + + if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): + if not is_object_dtype(y.dtype): + y = y.astype(np.object_) + + if isinstance(y, (ABCSeries, ABCIndex)): + y = y._values + + if x.shape != y.shape: + raise ValueError("Shapes must match", x.shape, y.shape) + result = libops.vec_compare(x.ravel(), y.ravel(), op) + else: + result = libops.scalar_compare(x.ravel(), y, op) + return result.reshape(x.shape) + + +def _masked_arith_op(x: np.ndarray, y, op): + """ + If the given arithmetic operation fails, attempt it again on + only the non-null elements of the input array(s). + + Parameters + ---------- + x : np.ndarray + y : np.ndarray, Series, Index + op : binary operator + """ + # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes + # the logic valid for both Series and DataFrame ops. + xrav = x.ravel() + + if isinstance(y, np.ndarray): + dtype = find_common_type([x.dtype, y.dtype]) + result = np.empty(x.size, dtype=dtype) + + if len(x) != len(y): + raise ValueError(x.shape, y.shape) + ymask = notna(y) + + # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex + # we would get int64 dtype, see GH#19956 + yrav = y.ravel() + mask = notna(xrav) & ymask.ravel() + + # See GH#5284, GH#5035, GH#19448 for historical reference + if mask.any(): + result[mask] = op(xrav[mask], yrav[mask]) + + else: + if not is_scalar(y): + raise TypeError( + f"Cannot broadcast np.ndarray with operand of type { type(y) }" + ) + + # mask is only meaningful for x + result = np.empty(x.size, dtype=x.dtype) + mask = notna(xrav) + + # 1 ** np.nan is 1. So we have to unmask those. + if op is pow: + mask = np.where(x == 1, False, mask) + elif op is roperator.rpow: + mask = np.where(y == 1, False, mask) + + if mask.any(): + result[mask] = op(xrav[mask], y) + + np.putmask(result, ~mask, np.nan) + result = result.reshape(x.shape) # 2D compat + return result + + +def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): + """ + Return the result of evaluating op on the passed in values. + + If native types are not compatible, try coercion to object dtype. + + Parameters + ---------- + left : np.ndarray + right : np.ndarray or scalar + Excludes DataFrame, Series, Index, ExtensionArray. + is_cmp : bool, default False + If this a comparison operation. + + Returns + ------- + array-like + + Raises + ------ + TypeError : invalid operation + """ + if isinstance(right, str): + # can never use numexpr + func = op + else: + func = partial(expressions.evaluate, op) + + try: + result = func(left, right) + except TypeError: + if not is_cmp and ( + left.dtype == object or getattr(right, "dtype", None) == object + ): + # For object dtype, fallback to a masked operation (only operating + # on the non-missing values) + # Don't do this for comparisons, as that will handle complex numbers + # incorrectly, see GH#32047 + result = _masked_arith_op(left, right, op) + else: + raise + + if is_cmp and (is_scalar(result) or result is NotImplemented): + # numpy returned a scalar instead of operating element-wise + # e.g. numeric array vs str + # TODO: can remove this after dropping some future numpy version? + return invalid_comparison(left, right, op) + + return missing.dispatch_fill_zeros(op, left, right, result) + + +def arithmetic_op(left: ArrayLike, right: Any, op): + """ + Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame or Index. Series is *not* excluded. + op : {operator.add, operator.sub, ...} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + Or a 2-tuple of these in the case of divmod or rdivmod. + """ + # NB: We assume that extract_array and ensure_wrapped_if_datetimelike + # have already been called on `left` and `right`, + # and `maybe_prepare_scalar_for_op` has already been called on `right` + # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy + # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390) + + if ( + should_extension_dispatch(left, right) + or isinstance(right, (Timedelta, BaseOffset, Timestamp)) + or right is NaT + ): + # Timedelta/Timestamp and other custom scalars are included in the check + # because numexpr will fail on it, see GH#31457 + res_values = op(left, right) + else: + # TODO we should handle EAs consistently and move this check before the if/else + # (https://github.com/pandas-dev/pandas/issues/41165) + # error: Argument 2 to "_bool_arith_check" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + _bool_arith_check(op, left, right) # type: ignore[arg-type] + + # error: Argument 1 to "_na_arithmetic_op" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type] + + return res_values + + +def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`. + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le} + + Returns + ------- + ndarray or ExtensionArray + """ + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = ensure_wrapped_if_datetimelike(right) + + rvalues = lib.item_from_zerodim(rvalues) + if isinstance(rvalues, list): + # We don't catch tuple here bc we may be comparing e.g. MultiIndex + # to a tuple that represents a single entry, see test_compare_tuple_strs + rvalues = np.asarray(rvalues) + + if isinstance(rvalues, (np.ndarray, ABCExtensionArray)): + # TODO: make this treatment consistent across ops and classes. + # We are not catching all listlikes here (e.g. frozenset, tuple) + # The ambiguous case is object-dtype. See GH#27803 + if len(lvalues) != len(rvalues): + raise ValueError( + "Lengths must match to compare", lvalues.shape, rvalues.shape + ) + + if should_extension_dispatch(lvalues, rvalues) or ( + (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) + and lvalues.dtype != object + ): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA? + # numpy does not like comparisons vs None + if op is operator.ne: + res_values = np.ones(lvalues.shape, dtype=bool) + else: + res_values = np.zeros(lvalues.shape, dtype=bool) + + elif is_numeric_v_string_like(lvalues, rvalues): + # GH#36377 going through the numexpr path would incorrectly raise + return invalid_comparison(lvalues, rvalues, op) + + elif lvalues.dtype == object or isinstance(rvalues, str): + res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) + + else: + res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) + + return res_values + + +def na_logical_op(x: np.ndarray, y, op): + try: + # For exposition, write: + # yarr = isinstance(y, np.ndarray) + # yint = is_integer(y) or (yarr and y.dtype.kind == "i") + # ybool = is_bool(y) or (yarr and y.dtype.kind == "b") + # xint = x.dtype.kind == "i" + # xbool = x.dtype.kind == "b" + # Then Cases where this goes through without raising include: + # (xint or xbool) and (yint or bool) + result = op(x, y) + except TypeError: + if isinstance(y, np.ndarray): + # bool-bool dtype operations should be OK, should not get here + assert not (x.dtype.kind == "b" and y.dtype.kind == "b") + x = ensure_object(x) + y = ensure_object(y) + result = libops.vec_binop(x.ravel(), y.ravel(), op) + else: + # let null fall thru + assert lib.is_scalar(y) + if not isna(y): + y = bool(y) + try: + result = libops.scalar_binop(x, y, op) + except ( + TypeError, + ValueError, + AttributeError, + OverflowError, + NotImplementedError, + ) as err: + typ = type(y).__name__ + raise TypeError( + f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array " + f"and scalar of type [{typ}]" + ) from err + + return result.reshape(x.shape) + + +def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a logical operation `|`, `&`, or `^`. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.and_, operator.or_, operator.xor} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + """ + + def fill_bool(x, left=None): + # if `left` is specifically not-boolean, we do not cast to bool + if x.dtype.kind in "cfO": + # dtypes that can hold NA + mask = isna(x) + if mask.any(): + x = x.astype(object) + x[mask] = False + + if left is None or left.dtype.kind == "b": + x = x.astype(bool) + return x + + right = lib.item_from_zerodim(right) + if is_list_like(right) and not hasattr(right, "dtype"): + # e.g. list, tuple + warnings.warn( + "Logical ops (and, or, xor) between Pandas objects and dtype-less " + "sequences (e.g. list, tuple) are deprecated and will raise in a " + "future version. Wrap the object in a Series, Index, or np.array " + "before operating instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + right = construct_1d_object_array_from_listlike(right) + + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = right + + if should_extension_dispatch(lvalues, rvalues): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + else: + if isinstance(rvalues, np.ndarray): + is_other_int_dtype = rvalues.dtype.kind in "iu" + if not is_other_int_dtype: + rvalues = fill_bool(rvalues, lvalues) + + else: + # i.e. scalar + is_other_int_dtype = lib.is_integer(rvalues) + + res_values = na_logical_op(lvalues, rvalues, op) + + # For int vs int `^`, `|`, `&` are bitwise operators and return + # integer dtypes. Otherwise these are boolean ops + if not (left.dtype.kind in "iu" and is_other_int_dtype): + res_values = fill_bool(res_values) + + return res_values + + +def get_array_op(op): + """ + Return a binary array operation corresponding to the given operator op. + + Parameters + ---------- + op : function + Binary operator from operator or roperator module. + + Returns + ------- + functools.partial + """ + if isinstance(op, partial): + # We get here via dispatch_to_series in DataFrame case + # e.g. test_rolling_consistency_var_debiasing_factors + return op + + op_name = op.__name__.strip("_").lstrip("r") + if op_name == "arith_op": + # Reached via DataFrame._combine_frame i.e. flex methods + # e.g. test_df_add_flex_filled_mixed_dtypes + return op + + if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}: + return partial(comparison_op, op=op) + elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}: + return partial(logical_op, op=op) + elif op_name in { + "add", + "sub", + "mul", + "truediv", + "floordiv", + "mod", + "divmod", + "pow", + }: + return partial(arithmetic_op, op=op) + else: + raise NotImplementedError(op_name) + + +def maybe_prepare_scalar_for_op(obj, shape: Shape): + """ + Cast non-pandas objects to pandas types to unify behavior of arithmetic + and comparison operations. + + Parameters + ---------- + obj: object + shape : tuple[int] + + Returns + ------- + out : object + + Notes + ----- + Be careful to call this *after* determining the `name` attribute to be + attached to the result of the arithmetic operation. + """ + if type(obj) is datetime.timedelta: + # GH#22390 cast up to Timedelta to rely on Timedelta + # implementation; otherwise operation against numeric-dtype + # raises TypeError + return Timedelta(obj) + elif type(obj) is datetime.datetime: + # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above + return Timestamp(obj) + elif isinstance(obj, np.datetime64): + # GH#28080 numpy casts integer-dtype to datetime64 when doing + # array[int] + datetime64, which we do not allow + if isna(obj): + from pandas.core.arrays import DatetimeArray + + # Avoid possible ambiguities with pd.NaT + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("datetime64[ns]") + elif not is_supported_unit(get_unit_from_dtype(obj.dtype)): + unit = get_unit_from_dtype(obj.dtype) + closest_unit = npy_unit_to_abbrev(get_supported_reso(unit)) + obj = obj.astype(f"datetime64[{closest_unit}]") + right = np.broadcast_to(obj, shape) + return DatetimeArray(right) + + return Timestamp(obj) + + elif isinstance(obj, np.timedelta64): + if isna(obj): + from pandas.core.arrays import TimedeltaArray + + # wrapping timedelta64("NaT") in Timedelta returns NaT, + # which would incorrectly be treated as a datetime-NaT, so + # we broadcast and wrap in a TimedeltaArray + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("timedelta64[ns]") + elif not is_supported_unit(get_unit_from_dtype(obj.dtype)): + unit = get_unit_from_dtype(obj.dtype) + closest_unit = npy_unit_to_abbrev(get_supported_reso(unit)) + obj = obj.astype(f"timedelta64[{closest_unit}]") + right = np.broadcast_to(obj, shape) + return TimedeltaArray(right) + + # In particular non-nanosecond timedelta64 needs to be cast to + # nanoseconds, or else we get undesired behavior like + # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') + return Timedelta(obj) + + return obj + + +_BOOL_OP_NOT_ALLOWED = { + operator.truediv, + roperator.rtruediv, + operator.floordiv, + roperator.rfloordiv, + operator.pow, + roperator.rpow, +} + + +def _bool_arith_check(op, a: np.ndarray, b): + """ + In contrast to numpy, pandas raises an error for certain operations + with booleans. + """ + if op in _BOOL_OP_NOT_ALLOWED: + if a.dtype.kind == "b" and (is_bool_dtype(b) or lib.is_bool(b)): + op_name = op.__name__.strip("_").lstrip("r") + raise NotImplementedError( + f"operator '{op_name}' not implemented for bool dtypes" + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/common.py new file mode 100644 index 00000000..559977ba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/common.py @@ -0,0 +1,146 @@ +""" +Boilerplate functions used in defining binary operations. +""" +from __future__ import annotations + +from functools import wraps +from typing import ( + TYPE_CHECKING, + Callable, +) + +from pandas._libs.lib import item_from_zerodim +from pandas._libs.missing import is_matching_na + +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import F + + +def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]: + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Parameters + ---------- + name : str + + Returns + ------- + decorator + """ + + def wrapper(method: F) -> F: + return _unpack_zerodim_and_defer(method, name) + + return wrapper + + +def _unpack_zerodim_and_defer(method, name: str): + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Ensure method returns NotImplemented when operating against "senior" + classes. Ensure zero-dimensional ndarrays are always unpacked. + + Parameters + ---------- + method : binary method + name : str + + Returns + ------- + method + """ + stripped_name = name.removeprefix("__").removesuffix("__") + is_cmp = stripped_name in {"eq", "ne", "lt", "le", "gt", "ge"} + + @wraps(method) + def new_method(self, other): + if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries): + # For comparison ops, Index does *not* defer to Series + pass + else: + prio = getattr(other, "__pandas_priority__", None) + if prio is not None: + if prio > self.__pandas_priority__: + # e.g. other is DataFrame while self is Index/Series/EA + return NotImplemented + + other = item_from_zerodim(other) + + return method(self, other) + + return new_method + + +def get_op_result_name(left, right): + """ + Find the appropriate name to pin to an operation result. This result + should always be either an Index or a Series. + + Parameters + ---------- + left : {Series, Index} + right : object + + Returns + ------- + name : object + Usually a string + """ + if isinstance(right, (ABCSeries, ABCIndex)): + name = _maybe_match_name(left, right) + else: + name = left.name + return name + + +def _maybe_match_name(a, b): + """ + Try to find a name to attach to the result of an operation between + a and b. If only one of these has a `name` attribute, return that + name. Otherwise return a consensus name if they match or None if + they have different names. + + Parameters + ---------- + a : object + b : object + + Returns + ------- + name : str or None + + See Also + -------- + pandas.core.common.consensus_name_attr + """ + a_has = hasattr(a, "name") + b_has = hasattr(b, "name") + if a_has and b_has: + try: + if a.name == b.name: + return a.name + elif is_matching_na(a.name, b.name): + # e.g. both are np.nan + return a.name + else: + return None + except TypeError: + # pd.NA + if is_matching_na(a.name, b.name): + return a.name + return None + except ValueError: + # e.g. np.int64(1) vs (np.int64(1), np.int64(2)) + return None + elif a_has: + return a.name + elif b_has: + return b.name + return None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/dispatch.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/dispatch.py new file mode 100644 index 00000000..a939fdd3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/dispatch.py @@ -0,0 +1,30 @@ +""" +Functions for defining unary operations. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.core.dtypes.generic import ABCExtensionArray + +if TYPE_CHECKING: + from pandas._typing import ArrayLike + + +def should_extension_dispatch(left: ArrayLike, right: Any) -> bool: + """ + Identify cases where Series operation should dispatch to ExtensionArray method. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + + Returns + ------- + bool + """ + return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/docstrings.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/docstrings.py new file mode 100644 index 00000000..bd2e5325 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/docstrings.py @@ -0,0 +1,772 @@ +""" +Templating for ops docstrings +""" +from __future__ import annotations + + +def make_flex_doc(op_name: str, typ: str) -> str: + """ + Make the appropriate substitutions for the given operation and class-typ + into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring + to attach to a generated method. + + Parameters + ---------- + op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} + typ : str {series, 'dataframe']} + + Returns + ------- + doc : str + """ + op_name = op_name.replace("__", "") + op_desc = _op_descriptions[op_name] + + op_desc_op = op_desc["op"] + assert op_desc_op is not None # for mypy + if op_name.startswith("r"): + equiv = f"other {op_desc_op} {typ}" + elif op_name == "divmod": + equiv = f"{op_name}({typ}, other)" + else: + equiv = f"{typ} {op_desc_op} other" + + if typ == "series": + base_doc = _flex_doc_SERIES + if op_desc["reverse"]: + base_doc += _see_also_reverse_SERIES.format( + reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"] + ) + doc_no_examples = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + series_returns=op_desc["series_returns"], + ) + ser_example = op_desc["series_examples"] + if ser_example: + doc = doc_no_examples + ser_example + else: + doc = doc_no_examples + elif typ == "dataframe": + if op_name in ["eq", "ne", "le", "lt", "ge", "gt"]: + base_doc = _flex_comp_doc_FRAME + doc = _flex_comp_doc_FRAME.format( + op_name=op_name, + desc=op_desc["desc"], + ) + else: + base_doc = _flex_doc_FRAME + doc = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + reverse=op_desc["reverse"], + ) + else: + raise AssertionError("Invalid typ argument.") + return doc + + +_common_examples_algebra_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +dtype: float64 +>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) +>>> b +a 1.0 +b NaN +d 1.0 +e NaN +dtype: float64""" + +_common_examples_comparison_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +e 1.0 +dtype: float64 +>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f']) +>>> b +a 0.0 +b 1.0 +c 2.0 +d NaN +f 1.0 +dtype: float64""" + +_add_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.add(b, fill_value=0) +a 2.0 +b 1.0 +c 1.0 +d 1.0 +e NaN +dtype: float64 +""" +) + +_sub_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.subtract(b, fill_value=0) +a 0.0 +b 1.0 +c 1.0 +d -1.0 +e NaN +dtype: float64 +""" +) + +_mul_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.multiply(b, fill_value=0) +a 1.0 +b 0.0 +c 0.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_div_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divide(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_floordiv_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.floordiv(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_divmod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divmod(b, fill_value=0) +(a 1.0 + b inf + c inf + d 0.0 + e NaN + dtype: float64, + a 0.0 + b NaN + c NaN + d 0.0 + e NaN + dtype: float64) +""" +) + +_mod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.mod(b, fill_value=0) +a 0.0 +b NaN +c NaN +d 0.0 +e NaN +dtype: float64 +""" +) +_pow_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.pow(b, fill_value=0) +a 1.0 +b 1.0 +c 1.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_ne_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.ne(b, fill_value=0) +a False +b True +c True +d True +e True +dtype: bool +""" +) + +_eq_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.eq(b, fill_value=0) +a True +b False +c False +d False +e False +dtype: bool +""" +) + +_lt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.lt(b, fill_value=0) +a False +b False +c True +d False +e False +f True +dtype: bool +""" +) + +_le_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.le(b, fill_value=0) +a False +b True +c True +d False +e False +f True +dtype: bool +""" +) + +_gt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.gt(b, fill_value=0) +a True +b False +c False +d False +e True +f False +dtype: bool +""" +) + +_ge_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.ge(b, fill_value=0) +a True +b True +c False +d False +e True +f False +dtype: bool +""" +) + +_returns_series = """Series\n The result of the operation.""" + +_returns_tuple = """2-Tuple of Series\n The result of the operation.""" + +_op_descriptions: dict[str, dict[str, str | None]] = { + # Arithmetic Operators + "add": { + "op": "+", + "desc": "Addition", + "reverse": "radd", + "series_examples": _add_example_SERIES, + "series_returns": _returns_series, + }, + "sub": { + "op": "-", + "desc": "Subtraction", + "reverse": "rsub", + "series_examples": _sub_example_SERIES, + "series_returns": _returns_series, + }, + "mul": { + "op": "*", + "desc": "Multiplication", + "reverse": "rmul", + "series_examples": _mul_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "mod": { + "op": "%", + "desc": "Modulo", + "reverse": "rmod", + "series_examples": _mod_example_SERIES, + "series_returns": _returns_series, + }, + "pow": { + "op": "**", + "desc": "Exponential power", + "reverse": "rpow", + "series_examples": _pow_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "truediv": { + "op": "/", + "desc": "Floating division", + "reverse": "rtruediv", + "series_examples": _div_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "floordiv": { + "op": "//", + "desc": "Integer division", + "reverse": "rfloordiv", + "series_examples": _floordiv_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "divmod": { + "op": "divmod", + "desc": "Integer division and modulo", + "reverse": "rdivmod", + "series_examples": _divmod_example_SERIES, + "series_returns": _returns_tuple, + "df_examples": None, + }, + # Comparison Operators + "eq": { + "op": "==", + "desc": "Equal to", + "reverse": None, + "series_examples": _eq_example_SERIES, + "series_returns": _returns_series, + }, + "ne": { + "op": "!=", + "desc": "Not equal to", + "reverse": None, + "series_examples": _ne_example_SERIES, + "series_returns": _returns_series, + }, + "lt": { + "op": "<", + "desc": "Less than", + "reverse": None, + "series_examples": _lt_example_SERIES, + "series_returns": _returns_series, + }, + "le": { + "op": "<=", + "desc": "Less than or equal to", + "reverse": None, + "series_examples": _le_example_SERIES, + "series_returns": _returns_series, + }, + "gt": { + "op": ">", + "desc": "Greater than", + "reverse": None, + "series_examples": _gt_example_SERIES, + "series_returns": _returns_series, + }, + "ge": { + "op": ">=", + "desc": "Greater than or equal to", + "reverse": None, + "series_examples": _ge_example_SERIES, + "series_returns": _returns_series, + }, +} + +_py_num_ref = """see + `Python documentation + `_ + for more details""" +_op_names = list(_op_descriptions.keys()) +for key in _op_names: + reverse_op = _op_descriptions[key]["reverse"] + if reverse_op is not None: + _op_descriptions[reverse_op] = _op_descriptions[key].copy() + _op_descriptions[reverse_op]["reverse"] = key + _op_descriptions[key][ + "see_also_desc" + ] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}" + _op_descriptions[reverse_op][ + "see_also_desc" + ] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}" + +_flex_doc_SERIES = """ +Return {desc} of series and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value for +missing data in either one of the inputs. + +Parameters +---------- +other : Series or scalar value +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : None or float value, default None (NaN) + Fill existing missing (NaN) values, and any new element needed for + successful Series alignment, with this value before computation. + If data in both corresponding Series locations is missing + the result of filling (at that location) will be missing. +axis : {{0 or 'index'}} + Unused. Parameter needed for compatibility with DataFrame. + +Returns +------- +{series_returns} +""" + +_see_also_reverse_SERIES = """ +See Also +-------- +Series.{reverse} : {see_also_desc}. +""" + +_flex_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value +for missing data in one of the inputs. With reverse version, `{reverse}`. + +Among flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to +arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. + +Parameters +---------- +other : scalar, sequence, Series, dict or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}} + Whether to compare by the index (0 or 'index') or columns. + (1 or 'columns'). For Series input, axis to match Series index on. +level : int or label + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : float or None, default None + Fill existing missing (NaN) values, and any new element needed for + successful DataFrame alignment, with this value before computation. + If data in both corresponding DataFrame locations is missing + the result will be missing. + +Returns +------- +DataFrame + Result of the arithmetic operation. + +See Also +-------- +DataFrame.add : Add DataFrames. +DataFrame.sub : Subtract DataFrames. +DataFrame.mul : Multiply DataFrames. +DataFrame.div : Divide DataFrames (float division). +DataFrame.truediv : Divide DataFrames (float division). +DataFrame.floordiv : Divide DataFrames (integer division). +DataFrame.mod : Calculate modulo (remainder after division). +DataFrame.pow : Calculate exponential power. + +Notes +----- +Mismatched indices will be unioned together. + +Examples +-------- +>>> df = pd.DataFrame({{'angles': [0, 3, 4], +... 'degrees': [360, 180, 360]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> df + angles degrees +circle 0 360 +triangle 3 180 +rectangle 4 360 + +Add a scalar with operator version which return the same +results. + +>>> df + 1 + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +>>> df.add(1) + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +Divide by constant with reverse version. + +>>> df.div(10) + angles degrees +circle 0.0 36.0 +triangle 0.3 18.0 +rectangle 0.4 36.0 + +>>> df.rdiv(10) + angles degrees +circle inf 0.027778 +triangle 3.333333 0.055556 +rectangle 2.500000 0.027778 + +Subtract a list and Series by axis with operator version. + +>>> df - [1, 2] + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub([1, 2], axis='columns') + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), +... axis='index') + angles degrees +circle -1 359 +triangle 2 179 +rectangle 3 359 + +Multiply a dictionary by axis. + +>>> df.mul({{'angles': 0, 'degrees': 2}}) + angles degrees +circle 0 720 +triangle 0 360 +rectangle 0 720 + +>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index') + angles degrees +circle 0 0 +triangle 6 360 +rectangle 12 1080 + +Multiply a DataFrame of different shape with operator version. + +>>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> other + angles +circle 0 +triangle 3 +rectangle 4 + +>>> df * other + angles degrees +circle 0 NaN +triangle 9 NaN +rectangle 16 NaN + +>>> df.mul(other, fill_value=0) + angles degrees +circle 0 0.0 +triangle 9 0.0 +rectangle 16 0.0 + +Divide by a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], +... 'degrees': [360, 180, 360, 360, 540, 720]}}, +... index=[['A', 'A', 'A', 'B', 'B', 'B'], +... ['circle', 'triangle', 'rectangle', +... 'square', 'pentagon', 'hexagon']]) +>>> df_multindex + angles degrees +A circle 0 360 + triangle 3 180 + rectangle 4 360 +B square 4 360 + pentagon 5 540 + hexagon 6 720 + +>>> df.div(df_multindex, level=1, fill_value=0) + angles degrees +A circle NaN 1.0 + triangle 1.0 1.0 + rectangle 1.0 1.0 +B square 0.0 0.0 + pentagon 0.0 0.0 + hexagon 0.0 0.0 +""" + +_flex_comp_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison +operators. + +Equivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis +(rows or columns) and level for comparison. + +Parameters +---------- +other : scalar, sequence, Series, or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}}, default 'columns' + Whether to compare by the index (0 or 'index') or columns + (1 or 'columns'). +level : int or label + Broadcast across a level, matching Index values on the passed + MultiIndex level. + +Returns +------- +DataFrame of bool + Result of the comparison. + +See Also +-------- +DataFrame.eq : Compare DataFrames for equality elementwise. +DataFrame.ne : Compare DataFrames for inequality elementwise. +DataFrame.le : Compare DataFrames for less than inequality + or equality elementwise. +DataFrame.lt : Compare DataFrames for strictly less than + inequality elementwise. +DataFrame.ge : Compare DataFrames for greater than inequality + or equality elementwise. +DataFrame.gt : Compare DataFrames for strictly greater than + inequality elementwise. + +Notes +----- +Mismatched indices will be unioned together. +`NaN` values are considered different (i.e. `NaN` != `NaN`). + +Examples +-------- +>>> df = pd.DataFrame({{'cost': [250, 150, 100], +... 'revenue': [100, 250, 300]}}, +... index=['A', 'B', 'C']) +>>> df + cost revenue +A 250 100 +B 150 250 +C 100 300 + +Comparison with a scalar, using either the operator or method: + +>>> df == 100 + cost revenue +A False True +B False False +C True False + +>>> df.eq(100) + cost revenue +A False True +B False False +C True False + +When `other` is a :class:`Series`, the columns of a DataFrame are aligned +with the index of `other` and broadcast: + +>>> df != pd.Series([100, 250], index=["cost", "revenue"]) + cost revenue +A True True +B True False +C False True + +Use the method to control the broadcast axis: + +>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') + cost revenue +A True False +B True True +C True True +D True True + +When comparing to an arbitrary sequence, the number of columns must +match the number elements in `other`: + +>>> df == [250, 100] + cost revenue +A True True +B False False +C False False + +Use the method to control the axis: + +>>> df.eq([250, 250, 100], axis='index') + cost revenue +A True False +B False True +C True False + +Compare to a DataFrame of different shape. + +>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}}, +... index=['A', 'B', 'C', 'D']) +>>> other + revenue +A 300 +B 250 +C 100 +D 150 + +>>> df.gt(other) + cost revenue +A False False +B False False +C False True +D False False + +Compare to a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], +... 'revenue': [100, 250, 300, 200, 175, 225]}}, +... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], +... ['A', 'B', 'C', 'A', 'B', 'C']]) +>>> df_multindex + cost revenue +Q1 A 250 100 + B 150 250 + C 100 300 +Q2 A 150 200 + B 300 175 + C 220 225 + +>>> df.le(df_multindex, level=1) + cost revenue +Q1 A True True + B True True + C True True +Q2 A False True + B True False + C True False +""" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/invalid.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/invalid.py new file mode 100644 index 00000000..e5ae6d35 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/invalid.py @@ -0,0 +1,62 @@ +""" +Templates for invalid operations. +""" +from __future__ import annotations + +import operator +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +def invalid_comparison(left, right, op) -> npt.NDArray[np.bool_]: + """ + If a comparison has mismatched types and is not necessarily meaningful, + follow python3 conventions by: + + - returning all-False for equality + - returning all-True for inequality + - raising TypeError otherwise + + Parameters + ---------- + left : array-like + right : scalar, array-like + op : operator.{eq, ne, lt, le, gt} + + Raises + ------ + TypeError : on inequality comparisons + """ + if op is operator.eq: + res_values = np.zeros(left.shape, dtype=bool) + elif op is operator.ne: + res_values = np.ones(left.shape, dtype=bool) + else: + typ = type(right).__name__ + raise TypeError(f"Invalid comparison between dtype={left.dtype} and {typ}") + return res_values + + +def make_invalid_op(name: str): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + + def invalid_op(self, other=None): + typ = type(self).__name__ + raise TypeError(f"cannot perform {name} with this index type: {typ}") + + invalid_op.__name__ = name + return invalid_op diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/mask_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/mask_ops.py new file mode 100644 index 00000000..adc1f63c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/mask_ops.py @@ -0,0 +1,189 @@ +""" +Ops for masked arrays. +""" +from __future__ import annotations + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) + + +def kleene_or( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``or`` using Kleene logic. + + Values are NA where we have ``NA | NA`` or ``NA | False``. + ``NA | True`` is considered True. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical or, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A | B == B | A + if left_mask is None: + return kleene_or(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="or") + + if right is libmissing.NA: + result = left.copy() + else: + result = left | right + + if right_mask is not None: + # output is unknown where (False & NA), (NA & False), (NA & NA) + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = ( + (left_false & right_mask) + | (right_false & left_mask) + | (left_mask & right_mask) + ) + else: + if right is True: + mask = np.zeros_like(left_mask) + elif right is libmissing.NA: + mask = (~left & ~left_mask) | left_mask + else: + # False + mask = left_mask.copy() + + return result, mask + + +def kleene_xor( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``xor`` using Kleene logic. + + This is the same as ``or``, with the following adjustments + + * True, True -> False + * True, NA -> NA + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A ^ B == B ^ A + if left_mask is None: + return kleene_xor(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="xor") + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left ^ right + + if right_mask is None: + if right is libmissing.NA: + mask = np.ones_like(left_mask) + else: + mask = left_mask.copy() + else: + mask = left_mask | right_mask + + return result, mask + + +def kleene_and( + left: bool | libmissing.NAType | np.ndarray, + right: bool | libmissing.NAType | np.ndarray, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``and`` using Kleene logic. + + Values are ``NA`` for ``NA & NA`` or ``True & NA``. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A & B == B & A + if left_mask is None: + return kleene_and(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + raise_for_nan(right, method="and") + + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left & right + + if right_mask is None: + # Scalar `right` + if right is libmissing.NA: + mask = (left & ~left_mask) | left_mask + + else: + mask = left_mask.copy() + if right is False: + # unmask everything + mask[:] = False + else: + # unmask where either left or right is False + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = (left_mask & ~right_false) | (right_mask & ~left_false) + + return result, mask + + +def raise_for_nan(value, method: str) -> None: + if lib.is_float(value) and np.isnan(value): + raise ValueError(f"Cannot perform logical '{method}' with floating NaN") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/missing.py new file mode 100644 index 00000000..fc685935 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/ops/missing.py @@ -0,0 +1,176 @@ +""" +Missing data handling for arithmetic operations. + +In particular, pandas conventions regarding division by zero differ +from numpy in the following ways: + 1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2) + gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for + the remaining pairs + (the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN). + + pandas convention is to return [-inf, nan, inf] for all dtype + combinations. + + Note: the numpy behavior described here is py3-specific. + + 2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2) + gives precisely the same results as the // operation. + + pandas convention is to return [nan, nan, nan] for all dtype + combinations. + + 3) divmod behavior consistent with 1) and 2). +""" +from __future__ import annotations + +import operator + +import numpy as np + +from pandas.core import roperator + + +def _fill_zeros(result: np.ndarray, x, y): + """ + If this is a reversed op, then flip x,y + + If we have an integer value (or array in y) + and we have 0's, fill them with np.nan, + return the result. + + Mask the nan's from x. + """ + if result.dtype.kind == "f": + return result + + is_variable_type = hasattr(y, "dtype") + is_scalar_type = not isinstance(y, np.ndarray) + + if not is_variable_type and not is_scalar_type: + # e.g. test_series_ops_name_retention with mod we get here with list/tuple + return result + + if is_scalar_type: + y = np.array(y) + + if y.dtype.kind in "iu": + ymask = y == 0 + if ymask.any(): + # GH#7325, mask and nans must be broadcastable + mask = ymask & ~np.isnan(result) + + # GH#9308 doing ravel on result and mask can improve putmask perf, + # but can also make unwanted copies. + result = result.astype("float64", copy=False) + + np.putmask(result, mask, np.nan) + + return result + + +def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray: + """ + Set results of 0 // 0 to np.nan, regardless of the dtypes + of the numerator or the denominator. + + Parameters + ---------- + x : ndarray + y : ndarray + result : ndarray + + Returns + ------- + ndarray + The filled result. + + Examples + -------- + >>> x = np.array([1, 0, -1], dtype=np.int64) + >>> x + array([ 1, 0, -1]) + >>> y = 0 # int 0; numpy behavior is different with float + >>> result = x // y + >>> result # raw numpy result does not fill division by zero + array([0, 0, 0]) + >>> mask_zero_div_zero(x, y, result) + array([ inf, nan, -inf]) + """ + + if not hasattr(y, "dtype"): + # e.g. scalar, tuple + y = np.array(y) + if not hasattr(x, "dtype"): + # e.g scalar, tuple + x = np.array(x) + + zmask = y == 0 + + if zmask.any(): + # Flip sign if necessary for -0.0 + zneg_mask = zmask & np.signbit(y) + zpos_mask = zmask & ~zneg_mask + + x_lt0 = x < 0 + x_gt0 = x > 0 + nan_mask = zmask & (x == 0) + neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0) + posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0) + + if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): + # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN + result = result.astype("float64", copy=False) + + result[nan_mask] = np.nan + result[posinf_mask] = np.inf + result[neginf_mask] = -np.inf + + return result + + +def dispatch_fill_zeros(op, left, right, result): + """ + Call _fill_zeros with the appropriate fill value depending on the operation, + with special logic for divmod and rdivmod. + + Parameters + ---------- + op : function (operator.add, operator.div, ...) + left : object (np.ndarray for non-reversed ops) + We have excluded ExtensionArrays here + right : object (np.ndarray for reversed ops) + We have excluded ExtensionArrays here + result : ndarray + + Returns + ------- + result : np.ndarray + + Notes + ----- + For divmod and rdivmod, the `result` parameter and returned `result` + is a 2-tuple of ndarray objects. + """ + if op is divmod: + result = ( + mask_zero_div_zero(left, right, result[0]), + _fill_zeros(result[1], left, right), + ) + elif op is roperator.rdivmod: + result = ( + mask_zero_div_zero(right, left, result[0]), + _fill_zeros(result[1], right, left), + ) + elif op is operator.floordiv: + # Note: no need to do this for truediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(left, right, result) + elif op is roperator.rfloordiv: + # Note: no need to do this for rtruediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(right, left, result) + elif op is operator.mod: + result = _fill_zeros(result, left, right) + elif op is roperator.rmod: + result = _fill_zeros(result, right, left) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/resample.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/resample.py new file mode 100644 index 00000000..b75005ff --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/resample.py @@ -0,0 +1,2758 @@ +from __future__ import annotations + +import copy +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, + final, + no_type_check, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + BaseOffset, + IncompatibleFrequency, + NaT, + Period, + Timedelta, + Timestamp, + to_offset, +) +from pandas._typing import NDFrameT +from pandas.compat.numpy import function as nv +from pandas.errors import AbstractMethodError +from pandas.util._decorators import ( + Appender, + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +import pandas.core.algorithms as algos +from pandas.core.apply import ( + ResamplerWindowApply, + warn_alias_replacement, +) +from pandas.core.base import ( + PandasObject, + SelectionMixin, +) +import pandas.core.common as com +from pandas.core.generic import ( + NDFrame, + _shared_docs, +) +from pandas.core.groupby.generic import SeriesGroupBy +from pandas.core.groupby.groupby import ( + BaseGroupBy, + GroupBy, + _pipe_template, + get_groupby, +) +from pandas.core.groupby.grouper import Grouper +from pandas.core.groupby.ops import BinGrouper +from pandas.core.indexes.api import MultiIndex +from pandas.core.indexes.datetimes import ( + DatetimeIndex, + date_range, +) +from pandas.core.indexes.period import ( + PeriodIndex, + period_range, +) +from pandas.core.indexes.timedeltas import ( + TimedeltaIndex, + timedelta_range, +) + +from pandas.tseries.frequencies import ( + is_subperiod, + is_superperiod, +) +from pandas.tseries.offsets import ( + Day, + Tick, +) + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + AnyArrayLike, + Axis, + AxisInt, + Frequency, + IndexLabel, + InterpolateOptions, + T, + TimedeltaConvertibleTypes, + TimeGrouperOrigin, + TimestampConvertibleTypes, + npt, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + +_shared_docs_kwargs: dict[str, str] = {} + + +class Resampler(BaseGroupBy, PandasObject): + """ + Class for resampling datetimelike data, a groupby-like operation. + See aggregate, transform, and apply functions on this object. + + It's easiest to use obj.resample(...) to use Resampler. + + Parameters + ---------- + obj : Series or DataFrame + groupby : TimeGrouper + axis : int, default 0 + kind : str or None + 'period', 'timestamp' to override default index treatment + + Returns + ------- + a Resampler of the appropriate type + + Notes + ----- + After resampling, see aggregate, apply, and transform functions. + """ + + grouper: BinGrouper + _timegrouper: TimeGrouper + binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass + exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat + _internal_names_set = set({"obj", "ax", "_indexer"}) + + # to the groupby descriptor + _attributes = [ + "freq", + "axis", + "closed", + "label", + "convention", + "kind", + "origin", + "offset", + ] + + def __init__( + self, + obj: NDFrame, + timegrouper: TimeGrouper, + axis: Axis = 0, + kind=None, + *, + gpr_index: Index, + group_keys: bool = False, + selection=None, + ) -> None: + self._timegrouper = timegrouper + self.keys = None + self.sort = True + self.axis = obj._get_axis_number(axis) + self.kind = kind + self.group_keys = group_keys + self.as_index = True + + self.obj, self.ax, self._indexer = self._timegrouper._set_grouper( + self._convert_obj(obj), sort=True, gpr_index=gpr_index + ) + self.binner, self.grouper = self._get_binner() + self._selection = selection + if self._timegrouper.key is not None: + self.exclusions = frozenset([self._timegrouper.key]) + else: + self.exclusions = frozenset() + + def __str__(self) -> str: + """ + Provide a nice str repr of our rolling object. + """ + attrs = ( + f"{k}={getattr(self._timegrouper, k)}" + for k in self._attributes + if getattr(self._timegrouper, k, None) is not None + ) + return f"{type(self).__name__} [{', '.join(attrs)}]" + + def __getattr__(self, attr: str): + if attr in self._internal_names_set: + return object.__getattribute__(self, attr) + if attr in self._attributes: + return getattr(self._timegrouper, attr) + if attr in self.obj: + return self[attr] + + return object.__getattribute__(self, attr) + + @property + def _from_selection(self) -> bool: + """ + Is the resampling from a DataFrame column or MultiIndex level. + """ + # upsampling and PeriodIndex resampling do not work + # with selection, this state used to catch and raise an error + return self._timegrouper is not None and ( + self._timegrouper.key is not None or self._timegrouper.level is not None + ) + + def _convert_obj(self, obj: NDFrameT) -> NDFrameT: + """ + Provide any conversions for the object in order to correctly handle. + + Parameters + ---------- + obj : Series or DataFrame + + Returns + ------- + Series or DataFrame + """ + return obj._consolidate() + + def _get_binner_for_time(self): + raise AbstractMethodError(self) + + @final + def _get_binner(self): + """ + Create the BinGrouper, assume that self.set_grouper(obj) + has already been called. + """ + binner, bins, binlabels = self._get_binner_for_time() + assert len(bins) == len(binlabels) + bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer) + return binner, bin_grouper + + @Substitution( + klass="Resampler", + examples=""" + >>> df = pd.DataFrame({'A': [1, 2, 3, 4]}, + ... index=pd.date_range('2012-08-02', periods=4)) + >>> df + A + 2012-08-02 1 + 2012-08-03 2 + 2012-08-04 3 + 2012-08-05 4 + + To get the difference between each 2-day period's maximum and minimum + value in one pass, you can do + + >>> df.resample('2D').pipe(lambda x: x.max() - x.min()) + A + 2012-08-02 1 + 2012-08-04 1""", + ) + @Appender(_pipe_template) + def pipe( + self, + func: Callable[..., T] | tuple[Callable[..., T], str], + *args, + **kwargs, + ) -> T: + return super().pipe(func, *args, **kwargs) + + _agg_see_also_doc = dedent( + """ + See Also + -------- + DataFrame.groupby.aggregate : Aggregate using callable, string, dict, + or list of string/callables. + DataFrame.resample.transform : Transforms the Series on each group + based on the given function. + DataFrame.aggregate: Aggregate using one or more + operations over the specified axis. + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4, 5], + ... index=pd.date_range('20130101', periods=5, freq='s')) + >>> s + 2013-01-01 00:00:00 1 + 2013-01-01 00:00:01 2 + 2013-01-01 00:00:02 3 + 2013-01-01 00:00:03 4 + 2013-01-01 00:00:04 5 + Freq: S, dtype: int64 + + >>> r = s.resample('2s') + + >>> r.agg("sum") + 2013-01-01 00:00:00 3 + 2013-01-01 00:00:02 7 + 2013-01-01 00:00:04 5 + Freq: 2S, dtype: int64 + + >>> r.agg(['sum', 'mean', 'max']) + sum mean max + 2013-01-01 00:00:00 3 1.5 2 + 2013-01-01 00:00:02 7 3.5 4 + 2013-01-01 00:00:04 5 5.0 5 + + >>> r.agg({'result': lambda x: x.mean() / x.std(), + ... 'total': "sum"}) + result total + 2013-01-01 00:00:00 2.121320 3 + 2013-01-01 00:00:02 4.949747 7 + 2013-01-01 00:00:04 NaN 5 + + >>> r.agg(average="mean", total="sum") + average total + 2013-01-01 00:00:00 1.5 3 + 2013-01-01 00:00:02 3.5 7 + 2013-01-01 00:00:04 5.0 5 + """ + ) + + @doc( + _shared_docs["aggregate"], + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + klass="DataFrame", + axis="", + ) + def aggregate(self, func=None, *args, **kwargs): + result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() + if result is None: + how = func + result = self._groupby_and_aggregate(how, *args, **kwargs) + + return result + + agg = aggregate + apply = aggregate + + def transform(self, arg, *args, **kwargs): + """ + Call function producing a like-indexed Series on each group. + + Return a Series with the transformed values. + + Parameters + ---------- + arg : function + To apply to each group. Should return a Series with the same index. + + Returns + ------- + Series + + Examples + -------- + >>> s = pd.Series([1, 2], + ... index=pd.date_range('20180101', + ... periods=2, + ... freq='1h')) + >>> s + 2018-01-01 00:00:00 1 + 2018-01-01 01:00:00 2 + Freq: H, dtype: int64 + + >>> resampled = s.resample('15min') + >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) + 2018-01-01 00:00:00 NaN + 2018-01-01 01:00:00 NaN + Freq: H, dtype: float64 + """ + return self._selected_obj.groupby(self._timegrouper).transform( + arg, *args, **kwargs + ) + + def _downsample(self, f, **kwargs): + raise AbstractMethodError(self) + + def _upsample(self, f, limit: int | None = None, fill_value=None): + raise AbstractMethodError(self) + + def _gotitem(self, key, ndim: int, subset=None): + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + grouper = self.grouper + if subset is None: + subset = self.obj + if key is not None: + subset = subset[key] + else: + # reached via Apply.agg_dict_like with selection=None and ndim=1 + assert subset.ndim == 1 + if ndim == 1: + assert subset.ndim == 1 + + grouped = get_groupby( + subset, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys + ) + return grouped + + def _groupby_and_aggregate(self, how, *args, **kwargs): + """ + Re-evaluate the obj with a groupby aggregation. + """ + grouper = self.grouper + + # Excludes `on` column when provided + obj = self._obj_with_exclusions + + grouped = get_groupby( + obj, by=None, grouper=grouper, axis=self.axis, group_keys=self.group_keys + ) + + try: + if callable(how): + # TODO: test_resample_apply_with_additional_args fails if we go + # through the non-lambda path, not clear that it should. + func = lambda x: how(x, *args, **kwargs) + result = grouped.aggregate(func) + else: + result = grouped.aggregate(how, *args, **kwargs) + except (AttributeError, KeyError): + # we have a non-reducing function; try to evaluate + # alternatively we want to evaluate only a column of the input + + # test_apply_to_one_column_of_df the function being applied references + # a DataFrame column, but aggregate_item_by_item operates column-wise + # on Series, raising AttributeError or KeyError + # (depending on whether the column lookup uses getattr/__getitem__) + result = grouped.apply(how, *args, **kwargs) + + except ValueError as err: + if "Must produce aggregated value" in str(err): + # raised in _aggregate_named + # see test_apply_without_aggregation, test_apply_with_mutated_index + pass + else: + raise + + # we have a non-reducing function + # try to evaluate + result = grouped.apply(how, *args, **kwargs) + + return self._wrap_result(result) + + def _get_resampler_for_grouping(self, groupby: GroupBy, key): + """ + Return the correct class for resampling with groupby. + """ + return self._resampler_for_grouping(groupby=groupby, key=key, parent=self) + + def _wrap_result(self, result): + """ + Potentially wrap any results. + """ + # GH 47705 + obj = self.obj + if ( + isinstance(result, ABCDataFrame) + and len(result) == 0 + and not isinstance(result.index, PeriodIndex) + ): + result = result.set_index( + _asfreq_compat(obj.index[:0], freq=self.freq), append=True + ) + + if isinstance(result, ABCSeries) and self._selection is not None: + result.name = self._selection + + if isinstance(result, ABCSeries) and result.empty: + # When index is all NaT, result is empty but index is not + result.index = _asfreq_compat(obj.index[:0], freq=self.freq) + result.name = getattr(obj, "name", None) + + return result + + def ffill(self, limit: int | None = None): + """ + Forward fill the values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + An upsampled Series. + + See Also + -------- + Series.fillna: Fill NA/NaN values using the specified method. + DataFrame.fillna: Fill NA/NaN values using the specified method. + + Examples + -------- + Here we only create a ``Series``. + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + + Example for ``ffill`` with downsampling (we have fewer dates after resampling): + + >>> ser.resample('MS').ffill() + 2023-01-01 1 + 2023-02-01 3 + Freq: MS, dtype: int64 + + Example for ``ffill`` with upsampling (fill the new dates with + the previous value): + + >>> ser.resample('W').ffill() + 2023-01-01 1 + 2023-01-08 1 + 2023-01-15 2 + 2023-01-22 2 + 2023-01-29 2 + 2023-02-05 3 + 2023-02-12 3 + 2023-02-19 4 + Freq: W-SUN, dtype: int64 + + With upsampling and limiting (only fill the first new date with the + previous value): + + >>> ser.resample('W').ffill(limit=1) + 2023-01-01 1.0 + 2023-01-08 1.0 + 2023-01-15 2.0 + 2023-01-22 2.0 + 2023-01-29 NaN + 2023-02-05 3.0 + 2023-02-12 NaN + 2023-02-19 4.0 + Freq: W-SUN, dtype: float64 + """ + return self._upsample("ffill", limit=limit) + + def nearest(self, limit: int | None = None): + """ + Resample by using the nearest value. + + When resampling data, missing values may appear (e.g., when the + resampling frequency is higher than the original frequency). + The `nearest` method will replace ``NaN`` values that appeared in + the resampled data with the value from the nearest member of the + sequence, based on the index value. + Missing values that existed in the original data will not be modified. + If `limit` is given, fill only this many values in each direction for + each of the original values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series or DataFrame + An upsampled Series or DataFrame with ``NaN`` values filled with + their nearest value. + + See Also + -------- + backfill : Backward fill the new missing values in the resampled data. + pad : Forward fill ``NaN`` values. + + Examples + -------- + >>> s = pd.Series([1, 2], + ... index=pd.date_range('20180101', + ... periods=2, + ... freq='1h')) + >>> s + 2018-01-01 00:00:00 1 + 2018-01-01 01:00:00 2 + Freq: H, dtype: int64 + + >>> s.resample('15min').nearest() + 2018-01-01 00:00:00 1 + 2018-01-01 00:15:00 1 + 2018-01-01 00:30:00 2 + 2018-01-01 00:45:00 2 + 2018-01-01 01:00:00 2 + Freq: 15T, dtype: int64 + + Limit the number of upsampled values imputed by the nearest: + + >>> s.resample('15min').nearest(limit=1) + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:15:00 1.0 + 2018-01-01 00:30:00 NaN + 2018-01-01 00:45:00 2.0 + 2018-01-01 01:00:00 2.0 + Freq: 15T, dtype: float64 + """ + return self._upsample("nearest", limit=limit) + + def bfill(self, limit: int | None = None): + """ + Backward fill the new missing values in the resampled data. + + In statistics, imputation is the process of replacing missing data with + substituted values [1]_. When resampling data, missing values may + appear (e.g., when the resampling frequency is higher than the original + frequency). The backward fill will replace NaN values that appeared in + the resampled data with the next value in the original sequence. + Missing values that existed in the original data will not be modified. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series, DataFrame + An upsampled Series or DataFrame with backward filled NaN values. + + See Also + -------- + bfill : Alias of backfill. + fillna : Fill NaN values using the specified method, which can be + 'backfill'. + nearest : Fill NaN values with nearest neighbor starting from center. + ffill : Forward fill NaN values. + Series.fillna : Fill NaN values in the Series using the + specified method, which can be 'backfill'. + DataFrame.fillna : Fill NaN values in the DataFrame using the + specified method, which can be 'backfill'. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) + + Examples + -------- + Resampling a Series: + + >>> s = pd.Series([1, 2, 3], + ... index=pd.date_range('20180101', periods=3, freq='h')) + >>> s + 2018-01-01 00:00:00 1 + 2018-01-01 01:00:00 2 + 2018-01-01 02:00:00 3 + Freq: H, dtype: int64 + + >>> s.resample('30min').bfill() + 2018-01-01 00:00:00 1 + 2018-01-01 00:30:00 2 + 2018-01-01 01:00:00 2 + 2018-01-01 01:30:00 3 + 2018-01-01 02:00:00 3 + Freq: 30T, dtype: int64 + + >>> s.resample('15min').bfill(limit=2) + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:15:00 NaN + 2018-01-01 00:30:00 2.0 + 2018-01-01 00:45:00 2.0 + 2018-01-01 01:00:00 2.0 + 2018-01-01 01:15:00 NaN + 2018-01-01 01:30:00 3.0 + 2018-01-01 01:45:00 3.0 + 2018-01-01 02:00:00 3.0 + Freq: 15T, dtype: float64 + + Resampling a DataFrame that has missing values: + + >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, + ... index=pd.date_range('20180101', periods=3, + ... freq='h')) + >>> df + a b + 2018-01-01 00:00:00 2.0 1 + 2018-01-01 01:00:00 NaN 3 + 2018-01-01 02:00:00 6.0 5 + + >>> df.resample('30min').bfill() + a b + 2018-01-01 00:00:00 2.0 1 + 2018-01-01 00:30:00 NaN 3 + 2018-01-01 01:00:00 NaN 3 + 2018-01-01 01:30:00 6.0 5 + 2018-01-01 02:00:00 6.0 5 + + >>> df.resample('15min').bfill(limit=2) + a b + 2018-01-01 00:00:00 2.0 1.0 + 2018-01-01 00:15:00 NaN NaN + 2018-01-01 00:30:00 NaN 3.0 + 2018-01-01 00:45:00 NaN 3.0 + 2018-01-01 01:00:00 NaN 3.0 + 2018-01-01 01:15:00 NaN NaN + 2018-01-01 01:30:00 6.0 5.0 + 2018-01-01 01:45:00 6.0 5.0 + 2018-01-01 02:00:00 6.0 5.0 + """ + return self._upsample("bfill", limit=limit) + + def fillna(self, method, limit: int | None = None): + """ + Fill missing values introduced by upsampling. + + In statistics, imputation is the process of replacing missing data with + substituted values [1]_. When resampling data, missing values may + appear (e.g., when the resampling frequency is higher than the original + frequency). + + Missing values that existed in the original data will + not be modified. + + Parameters + ---------- + method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'} + Method to use for filling holes in resampled data + + * 'pad' or 'ffill': use previous valid observation to fill gap + (forward fill). + * 'backfill' or 'bfill': use next valid observation to fill gap. + * 'nearest': use nearest valid observation to fill gap. + + limit : int, optional + Limit of how many consecutive missing values to fill. + + Returns + ------- + Series or DataFrame + An upsampled Series or DataFrame with missing values filled. + + See Also + -------- + bfill : Backward fill NaN values in the resampled data. + ffill : Forward fill NaN values in the resampled data. + nearest : Fill NaN values in the resampled data + with nearest neighbor starting from center. + interpolate : Fill NaN values using interpolation. + Series.fillna : Fill NaN values in the Series using the + specified method, which can be 'bfill' and 'ffill'. + DataFrame.fillna : Fill NaN values in the DataFrame using the + specified method, which can be 'bfill' and 'ffill'. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) + + Examples + -------- + Resampling a Series: + + >>> s = pd.Series([1, 2, 3], + ... index=pd.date_range('20180101', periods=3, freq='h')) + >>> s + 2018-01-01 00:00:00 1 + 2018-01-01 01:00:00 2 + 2018-01-01 02:00:00 3 + Freq: H, dtype: int64 + + Without filling the missing values you get: + + >>> s.resample("30min").asfreq() + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:30:00 NaN + 2018-01-01 01:00:00 2.0 + 2018-01-01 01:30:00 NaN + 2018-01-01 02:00:00 3.0 + Freq: 30T, dtype: float64 + + >>> s.resample('30min').fillna("backfill") + 2018-01-01 00:00:00 1 + 2018-01-01 00:30:00 2 + 2018-01-01 01:00:00 2 + 2018-01-01 01:30:00 3 + 2018-01-01 02:00:00 3 + Freq: 30T, dtype: int64 + + >>> s.resample('15min').fillna("backfill", limit=2) + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:15:00 NaN + 2018-01-01 00:30:00 2.0 + 2018-01-01 00:45:00 2.0 + 2018-01-01 01:00:00 2.0 + 2018-01-01 01:15:00 NaN + 2018-01-01 01:30:00 3.0 + 2018-01-01 01:45:00 3.0 + 2018-01-01 02:00:00 3.0 + Freq: 15T, dtype: float64 + + >>> s.resample('30min').fillna("pad") + 2018-01-01 00:00:00 1 + 2018-01-01 00:30:00 1 + 2018-01-01 01:00:00 2 + 2018-01-01 01:30:00 2 + 2018-01-01 02:00:00 3 + Freq: 30T, dtype: int64 + + >>> s.resample('30min').fillna("nearest") + 2018-01-01 00:00:00 1 + 2018-01-01 00:30:00 2 + 2018-01-01 01:00:00 2 + 2018-01-01 01:30:00 3 + 2018-01-01 02:00:00 3 + Freq: 30T, dtype: int64 + + Missing values present before the upsampling are not affected. + + >>> sm = pd.Series([1, None, 3], + ... index=pd.date_range('20180101', periods=3, freq='h')) + >>> sm + 2018-01-01 00:00:00 1.0 + 2018-01-01 01:00:00 NaN + 2018-01-01 02:00:00 3.0 + Freq: H, dtype: float64 + + >>> sm.resample('30min').fillna('backfill') + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:30:00 NaN + 2018-01-01 01:00:00 NaN + 2018-01-01 01:30:00 3.0 + 2018-01-01 02:00:00 3.0 + Freq: 30T, dtype: float64 + + >>> sm.resample('30min').fillna('pad') + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:30:00 1.0 + 2018-01-01 01:00:00 NaN + 2018-01-01 01:30:00 NaN + 2018-01-01 02:00:00 3.0 + Freq: 30T, dtype: float64 + + >>> sm.resample('30min').fillna('nearest') + 2018-01-01 00:00:00 1.0 + 2018-01-01 00:30:00 NaN + 2018-01-01 01:00:00 NaN + 2018-01-01 01:30:00 3.0 + 2018-01-01 02:00:00 3.0 + Freq: 30T, dtype: float64 + + DataFrame resampling is done column-wise. All the same options are + available. + + >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, + ... index=pd.date_range('20180101', periods=3, + ... freq='h')) + >>> df + a b + 2018-01-01 00:00:00 2.0 1 + 2018-01-01 01:00:00 NaN 3 + 2018-01-01 02:00:00 6.0 5 + + >>> df.resample('30min').fillna("bfill") + a b + 2018-01-01 00:00:00 2.0 1 + 2018-01-01 00:30:00 NaN 3 + 2018-01-01 01:00:00 NaN 3 + 2018-01-01 01:30:00 6.0 5 + 2018-01-01 02:00:00 6.0 5 + """ + warnings.warn( + f"{type(self).__name__}.fillna is deprecated and will be removed " + "in a future version. Use obj.ffill(), obj.bfill(), " + "or obj.nearest() instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._upsample(method, limit=limit) + + def interpolate( + self, + method: InterpolateOptions = "linear", + *, + axis: Axis = 0, + limit: int | None = None, + inplace: bool = False, + limit_direction: Literal["forward", "backward", "both"] = "forward", + limit_area=None, + downcast=lib.no_default, + **kwargs, + ): + """ + Interpolate values between target timestamps according to different methods. + + The original index is first reindexed to target timestamps + (see :meth:`core.resample.Resampler.asfreq`), + then the interpolation of ``NaN`` values via :meth`DataFrame.interpolate` + happens. + + Parameters + ---------- + method : str, default 'linear' + Interpolation technique to use. One of: + + * 'linear': Ignore the index and treat the values as equally + spaced. This is the only method supported on MultiIndexes. + * 'time': Works on daily and higher resolution data to interpolate + given length of interval. + * 'index', 'values': use the actual numerical values of the index. + * 'pad': Fill in NaNs using existing values. + * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', + 'barycentric', 'polynomial': Passed to + `scipy.interpolate.interp1d`, whereas 'spline' is passed to + `scipy.interpolate.UnivariateSpline`. These methods use the numerical + values of the index. Both 'polynomial' and 'spline' require that + you also specify an `order` (int), e.g. + ``df.interpolate(method='polynomial', order=5)``. Note that, + `slinear` method in Pandas refers to the Scipy first order `spline` + instead of Pandas first order `spline`. + * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', + 'cubicspline': Wrappers around the SciPy interpolation methods of + similar names. See `Notes`. + * 'from_derivatives': Refers to + `scipy.interpolate.BPoly.from_derivatives`. + + axis : {{0 or 'index', 1 or 'columns', None}}, default None + Axis to interpolate along. For `Series` this parameter is unused + and defaults to 0. + limit : int, optional + Maximum number of consecutive NaNs to fill. Must be greater than + 0. + inplace : bool, default False + Update the data in place if possible. + limit_direction : {{'forward', 'backward', 'both'}}, Optional + Consecutive NaNs will be filled in this direction. + + If limit is specified: + * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. + * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be + 'backwards'. + + If 'limit' is not specified: + * If 'method' is 'backfill' or 'bfill', the default is 'backward' + * else the default is 'forward' + + raises ValueError if `limit_direction` is 'forward' or 'both' and + method is 'backfill' or 'bfill'. + raises ValueError if `limit_direction` is 'backward' or 'both' and + method is 'pad' or 'ffill'. + + limit_area : {{`None`, 'inside', 'outside'}}, default None + If limit is specified, consecutive NaNs will be filled with this + restriction. + + * ``None``: No fill restriction. + * 'inside': Only fill NaNs surrounded by valid values + (interpolate). + * 'outside': Only fill NaNs outside valid values (extrapolate). + + downcast : optional, 'infer' or None, defaults to None + Downcast dtypes if possible. + + .. deprecated::2.1.0 + + ``**kwargs`` : optional + Keyword arguments to pass on to the interpolating function. + + Returns + ------- + DataFrame or Series + Interpolated values at the specified freq. + + See Also + -------- + core.resample.Resampler.asfreq: Return the values at the new freq, + essentially a reindex. + DataFrame.interpolate: Fill NaN values using an interpolation method. + + Notes + ----- + For high-frequent or non-equidistant time-series with timestamps + the reindexing followed by interpolation may lead to information loss + as shown in the last example. + + Examples + -------- + + >>> import datetime as dt + >>> timesteps = [ + ... dt.datetime(2023, 3, 1, 7, 0, 0), + ... dt.datetime(2023, 3, 1, 7, 0, 1), + ... dt.datetime(2023, 3, 1, 7, 0, 2), + ... dt.datetime(2023, 3, 1, 7, 0, 3), + ... dt.datetime(2023, 3, 1, 7, 0, 4)] + >>> series = pd.Series(data=[1, -1, 2, 1, 3], index=timesteps) + >>> series + 2023-03-01 07:00:00 1 + 2023-03-01 07:00:01 -1 + 2023-03-01 07:00:02 2 + 2023-03-01 07:00:03 1 + 2023-03-01 07:00:04 3 + dtype: int64 + + Upsample the dataframe to 0.5Hz by providing the period time of 2s. + + >>> series.resample("2s").interpolate("linear") + 2023-03-01 07:00:00 1 + 2023-03-01 07:00:02 2 + 2023-03-01 07:00:04 3 + Freq: 2S, dtype: int64 + + Downsample the dataframe to 2Hz by providing the period time of 500ms. + + >>> series.resample("500ms").interpolate("linear") + 2023-03-01 07:00:00.000 1.0 + 2023-03-01 07:00:00.500 0.0 + 2023-03-01 07:00:01.000 -1.0 + 2023-03-01 07:00:01.500 0.5 + 2023-03-01 07:00:02.000 2.0 + 2023-03-01 07:00:02.500 1.5 + 2023-03-01 07:00:03.000 1.0 + 2023-03-01 07:00:03.500 2.0 + 2023-03-01 07:00:04.000 3.0 + Freq: 500L, dtype: float64 + + Internal reindexing with ``as_freq()`` prior to interpolation leads to + an interpolated timeseries on the basis the reindexed timestamps (anchors). + Since not all datapoints from original series become anchors, + it can lead to misleading interpolation results as in the following example: + + >>> series.resample("400ms").interpolate("linear") + 2023-03-01 07:00:00.000 1.0 + 2023-03-01 07:00:00.400 1.2 + 2023-03-01 07:00:00.800 1.4 + 2023-03-01 07:00:01.200 1.6 + 2023-03-01 07:00:01.600 1.8 + 2023-03-01 07:00:02.000 2.0 + 2023-03-01 07:00:02.400 2.2 + 2023-03-01 07:00:02.800 2.4 + 2023-03-01 07:00:03.200 2.6 + 2023-03-01 07:00:03.600 2.8 + 2023-03-01 07:00:04.000 3.0 + Freq: 400L, dtype: float64 + + Note that the series erroneously increases between two anchors + ``07:00:00`` and ``07:00:02``. + """ + assert downcast is lib.no_default # just checking coverage + result = self._upsample("asfreq") + return result.interpolate( + method=method, + axis=axis, + limit=limit, + inplace=inplace, + limit_direction=limit_direction, + limit_area=limit_area, + downcast=downcast, + **kwargs, + ) + + def asfreq(self, fill_value=None): + """ + Return the values at the new freq, essentially a reindex. + + Parameters + ---------- + fill_value : scalar, optional + Value to use for missing values, applied during upsampling (note + this does not fill NaNs that already were present). + + Returns + ------- + DataFrame or Series + Values at the specified freq. + + See Also + -------- + Series.asfreq: Convert TimeSeries to specified frequency. + DataFrame.asfreq: Convert TimeSeries to specified frequency. + + Examples + -------- + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-31', '2023-02-01', '2023-02-28'])) + >>> ser + 2023-01-01 1 + 2023-01-31 2 + 2023-02-01 3 + 2023-02-28 4 + dtype: int64 + >>> ser.resample('MS').asfreq() + 2023-01-01 1 + 2023-02-01 3 + Freq: MS, dtype: int64 + """ + return self._upsample("asfreq", fill_value=fill_value) + + def sum( + self, + numeric_only: bool = False, + min_count: int = 0, + *args, + **kwargs, + ): + """ + Compute sum of group values. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + + min_count : int, default 0 + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + + Returns + ------- + Series or DataFrame + Computed sum of values within each group. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').sum() + 2023-01-01 3 + 2023-02-01 7 + Freq: MS, dtype: int64 + """ + maybe_warn_args_and_kwargs(type(self), "sum", args, kwargs) + nv.validate_resampler_func("sum", args, kwargs) + return self._downsample("sum", numeric_only=numeric_only, min_count=min_count) + + def prod( + self, + numeric_only: bool = False, + min_count: int = 0, + *args, + **kwargs, + ): + """ + Compute prod of group values. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + + min_count : int, default 0 + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + + Returns + ------- + Series or DataFrame + Computed prod of values within each group. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').prod() + 2023-01-01 2 + 2023-02-01 12 + Freq: MS, dtype: int64 + """ + maybe_warn_args_and_kwargs(type(self), "prod", args, kwargs) + nv.validate_resampler_func("prod", args, kwargs) + return self._downsample("prod", numeric_only=numeric_only, min_count=min_count) + + def min( + self, + numeric_only: bool = False, + min_count: int = 0, + *args, + **kwargs, + ): + """ + Compute min value of group. + + Returns + ------- + Series or DataFrame + + Examples + -------- + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').min() + 2023-01-01 1 + 2023-02-01 3 + Freq: MS, dtype: int64 + """ + + maybe_warn_args_and_kwargs(type(self), "min", args, kwargs) + nv.validate_resampler_func("min", args, kwargs) + return self._downsample("min", numeric_only=numeric_only, min_count=min_count) + + def max( + self, + numeric_only: bool = False, + min_count: int = 0, + *args, + **kwargs, + ): + """ + Compute max value of group. + + Returns + ------- + Series or DataFrame + + Examples + -------- + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').max() + 2023-01-01 2 + 2023-02-01 4 + Freq: MS, dtype: int64 + """ + maybe_warn_args_and_kwargs(type(self), "max", args, kwargs) + nv.validate_resampler_func("max", args, kwargs) + return self._downsample("max", numeric_only=numeric_only, min_count=min_count) + + @doc(GroupBy.first) + def first( + self, + numeric_only: bool = False, + min_count: int = 0, + *args, + **kwargs, + ): + maybe_warn_args_and_kwargs(type(self), "first", args, kwargs) + nv.validate_resampler_func("first", args, kwargs) + return self._downsample("first", numeric_only=numeric_only, min_count=min_count) + + @doc(GroupBy.last) + def last( + self, + numeric_only: bool = False, + min_count: int = 0, + *args, + **kwargs, + ): + maybe_warn_args_and_kwargs(type(self), "last", args, kwargs) + nv.validate_resampler_func("last", args, kwargs) + return self._downsample("last", numeric_only=numeric_only, min_count=min_count) + + @doc(GroupBy.median) + def median(self, numeric_only: bool = False, *args, **kwargs): + maybe_warn_args_and_kwargs(type(self), "median", args, kwargs) + nv.validate_resampler_func("median", args, kwargs) + return self._downsample("median", numeric_only=numeric_only) + + def mean( + self, + numeric_only: bool = False, + *args, + **kwargs, + ): + """ + Compute mean of groups, excluding missing values. + + Parameters + ---------- + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + DataFrame or Series + Mean of values within each group. + + Examples + -------- + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').mean() + 2023-01-01 1.5 + 2023-02-01 3.5 + Freq: MS, dtype: float64 + """ + maybe_warn_args_and_kwargs(type(self), "mean", args, kwargs) + nv.validate_resampler_func("mean", args, kwargs) + return self._downsample("mean", numeric_only=numeric_only) + + def std( + self, + ddof: int = 1, + numeric_only: bool = False, + *args, + **kwargs, + ): + """ + Compute standard deviation of groups, excluding missing values. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + DataFrame or Series + Standard deviation of values within each group. + + Examples + -------- + + >>> ser = pd.Series([1, 3, 2, 4, 3, 8], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').std() + 2023-01-01 1.000000 + 2023-02-01 2.645751 + Freq: MS, dtype: float64 + """ + maybe_warn_args_and_kwargs(type(self), "std", args, kwargs) + nv.validate_resampler_func("std", args, kwargs) + return self._downsample("std", ddof=ddof, numeric_only=numeric_only) + + def var( + self, + ddof: int = 1, + numeric_only: bool = False, + *args, + **kwargs, + ): + """ + Compute variance of groups, excluding missing values. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + DataFrame or Series + Variance of values within each group. + + Examples + -------- + + >>> ser = pd.Series([1, 3, 2, 4, 3, 8], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').var() + 2023-01-01 1.0 + 2023-02-01 7.0 + Freq: MS, dtype: float64 + + >>> ser.resample('MS').var(ddof=0) + 2023-01-01 0.666667 + 2023-02-01 4.666667 + Freq: MS, dtype: float64 + """ + maybe_warn_args_and_kwargs(type(self), "var", args, kwargs) + nv.validate_resampler_func("var", args, kwargs) + return self._downsample("var", ddof=ddof, numeric_only=numeric_only) + + @doc(GroupBy.sem) + def sem( + self, + ddof: int = 1, + numeric_only: bool = False, + *args, + **kwargs, + ): + maybe_warn_args_and_kwargs(type(self), "sem", args, kwargs) + nv.validate_resampler_func("sem", args, kwargs) + return self._downsample("sem", ddof=ddof, numeric_only=numeric_only) + + @doc(GroupBy.ohlc) + def ohlc( + self, + *args, + **kwargs, + ): + maybe_warn_args_and_kwargs(type(self), "ohlc", args, kwargs) + nv.validate_resampler_func("ohlc", args, kwargs) + + ax = self.ax + obj = self._obj_with_exclusions + if len(ax) == 0: + # GH#42902 + obj = obj.copy() + obj.index = _asfreq_compat(obj.index, self.freq) + if obj.ndim == 1: + obj = obj.to_frame() + obj = obj.reindex(["open", "high", "low", "close"], axis=1) + else: + mi = MultiIndex.from_product( + [obj.columns, ["open", "high", "low", "close"]] + ) + obj = obj.reindex(mi, axis=1) + return obj + + return self._downsample("ohlc") + + @doc(SeriesGroupBy.nunique) + def nunique( + self, + *args, + **kwargs, + ): + maybe_warn_args_and_kwargs(type(self), "nunique", args, kwargs) + nv.validate_resampler_func("nunique", args, kwargs) + return self._downsample("nunique") + + @doc(GroupBy.size) + def size(self): + result = self._downsample("size") + + # If the result is a non-empty DataFrame we stack to get a Series + # GH 46826 + if isinstance(result, ABCDataFrame) and not result.empty: + result = result.stack(future_stack=True) + + if not len(self.ax): + from pandas import Series + + if self._selected_obj.ndim == 1: + name = self._selected_obj.name + else: + name = None + result = Series([], index=result.index, dtype="int64", name=name) + return result + + @doc(GroupBy.count) + def count(self): + result = self._downsample("count") + if not len(self.ax): + if self._selected_obj.ndim == 1: + result = type(self._selected_obj)( + [], index=result.index, dtype="int64", name=self._selected_obj.name + ) + else: + from pandas import DataFrame + + result = DataFrame( + [], index=result.index, columns=result.columns, dtype="int64" + ) + + return result + + def quantile(self, q: float | AnyArrayLike = 0.5, **kwargs): + """ + Return value at the given quantile. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + + Returns + ------- + DataFrame or Series + Quantile of values within each group. + + See Also + -------- + Series.quantile + Return a series, where the index is q and the values are the quantiles. + DataFrame.quantile + Return a DataFrame, where the columns are the columns of self, + and the values are the quantiles. + DataFrameGroupBy.quantile + Return a DataFrame, where the columns are groupby columns, + and the values are its quantiles. + + Examples + -------- + + >>> ser = pd.Series([1, 3, 2, 4, 3, 8], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').quantile() + 2023-01-01 2.0 + 2023-02-01 4.0 + Freq: MS, dtype: float64 + + >>> ser.resample('MS').quantile(.25) + 2023-01-01 1.5 + 2023-02-01 3.5 + Freq: MS, dtype: float64 + """ + return self._downsample("quantile", q=q, **kwargs) + + +class _GroupByMixin(PandasObject, SelectionMixin): + """ + Provide the groupby facilities. + """ + + _attributes: list[str] # in practice the same as Resampler._attributes + _selection: IndexLabel | None = None + _groupby: GroupBy + _timegrouper: TimeGrouper + + def __init__( + self, + *, + parent: Resampler, + groupby: GroupBy, + key=None, + selection: IndexLabel | None = None, + ) -> None: + # reached via ._gotitem and _get_resampler_for_grouping + + assert isinstance(groupby, GroupBy), type(groupby) + + # parent is always a Resampler, sometimes a _GroupByMixin + assert isinstance(parent, Resampler), type(parent) + + # initialize our GroupByMixin object with + # the resampler attributes + for attr in self._attributes: + setattr(self, attr, getattr(parent, attr)) + self._selection = selection + + self.binner = parent.binner + self.key = key + + self._groupby = groupby + self._timegrouper = copy.copy(parent._timegrouper) + + self.ax = parent.ax + self.obj = parent.obj + + @no_type_check + def _apply(self, f, *args, **kwargs): + """ + Dispatch to _upsample; we are stripping all of the _upsample kwargs and + performing the original function call on the grouped object. + """ + + def func(x): + x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax) + + if isinstance(f, str): + return getattr(x, f)(**kwargs) + + return x.apply(f, *args, **kwargs) + + result = self._groupby.apply(func) + return self._wrap_result(result) + + _upsample = _apply + _downsample = _apply + _groupby_and_aggregate = _apply + + @final + def _gotitem(self, key, ndim, subset=None): + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + # create a new object to prevent aliasing + if subset is None: + subset = self.obj + if key is not None: + subset = subset[key] + else: + # reached via Apply.agg_dict_like with selection=None, ndim=1 + assert subset.ndim == 1 + + # Try to select from a DataFrame, falling back to a Series + try: + if isinstance(key, list) and self.key not in key and self.key is not None: + key.append(self.key) + groupby = self._groupby[key] + except IndexError: + groupby = self._groupby + + selection = self._infer_selection(key, subset) + + new_rs = type(self)( + groupby=groupby, + parent=cast(Resampler, self), + selection=selection, + ) + return new_rs + + +class DatetimeIndexResampler(Resampler): + @property + def _resampler_for_grouping(self): + return DatetimeIndexResamplerGroupby + + def _get_binner_for_time(self): + # this is how we are actually creating the bins + if self.kind == "period": + return self._timegrouper._get_time_period_bins(self.ax) + return self._timegrouper._get_time_bins(self.ax) + + def _downsample(self, how, **kwargs): + """ + Downsample the cython defined function. + + Parameters + ---------- + how : string / cython mapped function + **kwargs : kw args passed to how function + """ + orig_how = how + how = com.get_cython_func(how) or how + if orig_how != how: + warn_alias_replacement(self, orig_how, how) + ax = self.ax + + # Excludes `on` column when provided + obj = self._obj_with_exclusions + + if not len(ax): + # reset to the new freq + obj = obj.copy() + obj.index = obj.index._with_freq(self.freq) + assert obj.index.freq == self.freq, (obj.index.freq, self.freq) + return obj + + # do we have a regular frequency + + # error: Item "None" of "Optional[Any]" has no attribute "binlabels" + if ( + (ax.freq is not None or ax.inferred_freq is not None) + and len(self.grouper.binlabels) > len(ax) + and how is None + ): + # let's do an asfreq + return self.asfreq() + + # we are downsampling + # we want to call the actual grouper method here + if self.axis == 0: + result = obj.groupby(self.grouper).aggregate(how, **kwargs) + else: + # test_resample_axis1 + result = obj.T.groupby(self.grouper).aggregate(how, **kwargs).T + + return self._wrap_result(result) + + def _adjust_binner_for_upsample(self, binner): + """ + Adjust our binner when upsampling. + + The range of a new index should not be outside specified range + """ + if self.closed == "right": + binner = binner[1:] + else: + binner = binner[:-1] + return binner + + def _upsample(self, method, limit: int | None = None, fill_value=None): + """ + Parameters + ---------- + method : string {'backfill', 'bfill', 'pad', + 'ffill', 'asfreq'} method for upsampling + limit : int, default None + Maximum size gap to fill when reindexing + fill_value : scalar, default None + Value to use for missing values + + See Also + -------- + .fillna: Fill NA/NaN values using the specified method. + + """ + if self.axis: + raise AssertionError("axis must be 0") + if self._from_selection: + raise ValueError( + "Upsampling from level= or on= selection " + "is not supported, use .set_index(...) " + "to explicitly set index to datetime-like" + ) + + ax = self.ax + obj = self._selected_obj + binner = self.binner + res_index = self._adjust_binner_for_upsample(binner) + + # if we have the same frequency as our axis, then we are equal sampling + if ( + limit is None + and to_offset(ax.inferred_freq) == self.freq + and len(obj) == len(res_index) + ): + result = obj.copy() + result.index = res_index + else: + if method == "asfreq": + method = None + result = obj.reindex( + res_index, method=method, limit=limit, fill_value=fill_value + ) + + return self._wrap_result(result) + + def _wrap_result(self, result): + result = super()._wrap_result(result) + + # we may have a different kind that we were asked originally + # convert if needed + if self.kind == "period" and not isinstance(result.index, PeriodIndex): + if isinstance(result.index, MultiIndex): + # GH 24103 - e.g. groupby resample + if not isinstance(result.index.levels[-1], PeriodIndex): + new_level = result.index.levels[-1].to_period(self.freq) + result.index = result.index.set_levels(new_level, level=-1) + else: + result.index = result.index.to_period(self.freq) + return result + + +class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler): + """ + Provides a resample of a groupby implementation + """ + + @property + def _resampler_cls(self): + return DatetimeIndexResampler + + +class PeriodIndexResampler(DatetimeIndexResampler): + @property + def _resampler_for_grouping(self): + return PeriodIndexResamplerGroupby + + def _get_binner_for_time(self): + if self.kind == "timestamp": + return super()._get_binner_for_time() + return self._timegrouper._get_period_bins(self.ax) + + def _convert_obj(self, obj: NDFrameT) -> NDFrameT: + obj = super()._convert_obj(obj) + + if self._from_selection: + # see GH 14008, GH 12871 + msg = ( + "Resampling from level= or on= selection " + "with a PeriodIndex is not currently supported, " + "use .set_index(...) to explicitly set index" + ) + raise NotImplementedError(msg) + + # convert to timestamp + if self.kind == "timestamp": + obj = obj.to_timestamp(how=self.convention) + + return obj + + def _downsample(self, how, **kwargs): + """ + Downsample the cython defined function. + + Parameters + ---------- + how : string / cython mapped function + **kwargs : kw args passed to how function + """ + # we may need to actually resample as if we are timestamps + if self.kind == "timestamp": + return super()._downsample(how, **kwargs) + + orig_how = how + how = com.get_cython_func(how) or how + if orig_how != how: + warn_alias_replacement(self, orig_how, how) + ax = self.ax + + if is_subperiod(ax.freq, self.freq): + # Downsampling + return self._groupby_and_aggregate(how, **kwargs) + elif is_superperiod(ax.freq, self.freq): + if how == "ohlc": + # GH #13083 + # upsampling to subperiods is handled as an asfreq, which works + # for pure aggregating/reducing methods + # OHLC reduces along the time dimension, but creates multiple + # values for each period -> handle by _groupby_and_aggregate() + return self._groupby_and_aggregate(how) + return self.asfreq() + elif ax.freq == self.freq: + return self.asfreq() + + raise IncompatibleFrequency( + f"Frequency {ax.freq} cannot be resampled to {self.freq}, " + "as they are not sub or super periods" + ) + + def _upsample(self, method, limit: int | None = None, fill_value=None): + """ + Parameters + ---------- + method : {'backfill', 'bfill', 'pad', 'ffill'} + Method for upsampling. + limit : int, default None + Maximum size gap to fill when reindexing. + fill_value : scalar, default None + Value to use for missing values. + + See Also + -------- + .fillna: Fill NA/NaN values using the specified method. + + """ + # we may need to actually resample as if we are timestamps + if self.kind == "timestamp": + return super()._upsample(method, limit=limit, fill_value=fill_value) + + ax = self.ax + obj = self.obj + new_index = self.binner + + # Start vs. end of period + memb = ax.asfreq(self.freq, how=self.convention) + + # Get the fill indexer + if method == "asfreq": + method = None + indexer = memb.get_indexer(new_index, method=method, limit=limit) + new_obj = _take_new_index( + obj, + indexer, + new_index, + axis=self.axis, + ) + return self._wrap_result(new_obj) + + +class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler): + """ + Provides a resample of a groupby implementation. + """ + + @property + def _resampler_cls(self): + return PeriodIndexResampler + + +class TimedeltaIndexResampler(DatetimeIndexResampler): + @property + def _resampler_for_grouping(self): + return TimedeltaIndexResamplerGroupby + + def _get_binner_for_time(self): + return self._timegrouper._get_time_delta_bins(self.ax) + + def _adjust_binner_for_upsample(self, binner): + """ + Adjust our binner when upsampling. + + The range of a new index is allowed to be greater than original range + so we don't need to change the length of a binner, GH 13022 + """ + return binner + + +class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler): + """ + Provides a resample of a groupby implementation. + """ + + @property + def _resampler_cls(self): + return TimedeltaIndexResampler + + +def get_resampler(obj: Series | DataFrame, kind=None, **kwds) -> Resampler: + """ + Create a TimeGrouper and return our resampler. + """ + tg = TimeGrouper(**kwds) + return tg._get_resampler(obj, kind=kind) + + +get_resampler.__doc__ = Resampler.__doc__ + + +def get_resampler_for_grouping( + groupby: GroupBy, + rule, + how=None, + fill_method=None, + limit: int | None = None, + kind=None, + on=None, + **kwargs, +) -> Resampler: + """ + Return our appropriate resampler when grouping as well. + """ + # .resample uses 'on' similar to how .groupby uses 'key' + tg = TimeGrouper(freq=rule, key=on, **kwargs) + resampler = tg._get_resampler(groupby.obj, kind=kind) + return resampler._get_resampler_for_grouping(groupby=groupby, key=tg.key) + + +class TimeGrouper(Grouper): + """ + Custom groupby class for time-interval grouping. + + Parameters + ---------- + freq : pandas date offset or offset alias for identifying bin edges + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' + convention : {'start', 'end', 'e', 's'} + If axis is PeriodIndex + """ + + _attributes = Grouper._attributes + ( + "closed", + "label", + "how", + "kind", + "convention", + "origin", + "offset", + ) + + origin: TimeGrouperOrigin + + def __init__( + self, + freq: Frequency = "Min", + closed: Literal["left", "right"] | None = None, + label: Literal["left", "right"] | None = None, + how: str = "mean", + axis: Axis = 0, + fill_method=None, + limit: int | None = None, + kind: str | None = None, + convention: Literal["start", "end", "e", "s"] | None = None, + origin: Literal["epoch", "start", "start_day", "end", "end_day"] + | TimestampConvertibleTypes = "start_day", + offset: TimedeltaConvertibleTypes | None = None, + group_keys: bool = False, + **kwargs, + ) -> None: + # Check for correctness of the keyword arguments which would + # otherwise silently use the default if misspelled + if label not in {None, "left", "right"}: + raise ValueError(f"Unsupported value {label} for `label`") + if closed not in {None, "left", "right"}: + raise ValueError(f"Unsupported value {closed} for `closed`") + if convention not in {None, "start", "end", "e", "s"}: + raise ValueError(f"Unsupported value {convention} for `convention`") + + freq = to_offset(freq) + + end_types = {"M", "A", "Q", "BM", "BA", "BQ", "W"} + rule = freq.rule_code + if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types): + if closed is None: + closed = "right" + if label is None: + label = "right" + else: + # The backward resample sets ``closed`` to ``'right'`` by default + # since the last value should be considered as the edge point for + # the last bin. When origin in "end" or "end_day", the value for a + # specific ``Timestamp`` index stands for the resample result from + # the current ``Timestamp`` minus ``freq`` to the current + # ``Timestamp`` with a right close. + if origin in ["end", "end_day"]: + if closed is None: + closed = "right" + if label is None: + label = "right" + else: + if closed is None: + closed = "left" + if label is None: + label = "left" + + self.closed = closed + self.label = label + self.kind = kind + self.convention = convention if convention is not None else "e" + self.how = how + self.fill_method = fill_method + self.limit = limit + self.group_keys = group_keys + + if origin in ("epoch", "start", "start_day", "end", "end_day"): + # error: Incompatible types in assignment (expression has type "Union[Union[ + # Timestamp, datetime, datetime64, signedinteger[_64Bit], float, str], + # Literal['epoch', 'start', 'start_day', 'end', 'end_day']]", variable has + # type "Union[Timestamp, Literal['epoch', 'start', 'start_day', 'end', + # 'end_day']]") + self.origin = origin # type: ignore[assignment] + else: + try: + self.origin = Timestamp(origin) + except (ValueError, TypeError) as err: + raise ValueError( + "'origin' should be equal to 'epoch', 'start', 'start_day', " + "'end', 'end_day' or " + f"should be a Timestamp convertible type. Got '{origin}' instead." + ) from err + + try: + self.offset = Timedelta(offset) if offset is not None else None + except (ValueError, TypeError) as err: + raise ValueError( + "'offset' should be a Timedelta convertible type. " + f"Got '{offset}' instead." + ) from err + + # always sort time groupers + kwargs["sort"] = True + + super().__init__(freq=freq, axis=axis, **kwargs) + + def _get_resampler(self, obj: NDFrame, kind=None) -> Resampler: + """ + Return my resampler or raise if we have an invalid axis. + + Parameters + ---------- + obj : Series or DataFrame + kind : string, optional + 'period','timestamp','timedelta' are valid + + Returns + ------- + Resampler + + Raises + ------ + TypeError if incompatible axis + + """ + _, ax, indexer = self._set_grouper(obj, gpr_index=None) + + if isinstance(ax, DatetimeIndex): + return DatetimeIndexResampler( + obj, + timegrouper=self, + kind=kind, + axis=self.axis, + group_keys=self.group_keys, + gpr_index=ax, + ) + elif isinstance(ax, PeriodIndex) or kind == "period": + return PeriodIndexResampler( + obj, + timegrouper=self, + kind=kind, + axis=self.axis, + group_keys=self.group_keys, + gpr_index=ax, + ) + elif isinstance(ax, TimedeltaIndex): + return TimedeltaIndexResampler( + obj, + timegrouper=self, + axis=self.axis, + group_keys=self.group_keys, + gpr_index=ax, + ) + + raise TypeError( + "Only valid with DatetimeIndex, " + "TimedeltaIndex or PeriodIndex, " + f"but got an instance of '{type(ax).__name__}'" + ) + + def _get_grouper( + self, obj: NDFrameT, validate: bool = True + ) -> tuple[BinGrouper, NDFrameT]: + # create the resampler and return our binner + r = self._get_resampler(obj) + return r.grouper, cast(NDFrameT, r.obj) + + def _get_time_bins(self, ax: DatetimeIndex): + if not isinstance(ax, DatetimeIndex): + raise TypeError( + "axis must be a DatetimeIndex, but got " + f"an instance of {type(ax).__name__}" + ) + + if len(ax) == 0: + binner = labels = DatetimeIndex( + data=[], freq=self.freq, name=ax.name, dtype=ax.dtype + ) + return binner, [], labels + + first, last = _get_timestamp_range_edges( + ax.min(), + ax.max(), + self.freq, + unit=ax.unit, + closed=self.closed, + origin=self.origin, + offset=self.offset, + ) + # GH #12037 + # use first/last directly instead of call replace() on them + # because replace() will swallow the nanosecond part + # thus last bin maybe slightly before the end if the end contains + # nanosecond part and lead to `Values falls after last bin` error + # GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback + # has noted that ambiguous=True provides the most sensible result + binner = labels = date_range( + freq=self.freq, + start=first, + end=last, + tz=ax.tz, + name=ax.name, + ambiguous=True, + nonexistent="shift_forward", + unit=ax.unit, + ) + + ax_values = ax.asi8 + binner, bin_edges = self._adjust_bin_edges(binner, ax_values) + + # general version, knowing nothing about relative frequencies + bins = lib.generate_bins_dt64( + ax_values, bin_edges, self.closed, hasnans=ax.hasnans + ) + + if self.closed == "right": + labels = binner + if self.label == "right": + labels = labels[1:] + elif self.label == "right": + labels = labels[1:] + + if ax.hasnans: + binner = binner.insert(0, NaT) + labels = labels.insert(0, NaT) + + # if we end up with more labels than bins + # adjust the labels + # GH4076 + if len(bins) < len(labels): + labels = labels[: len(bins)] + + return binner, bins, labels + + def _adjust_bin_edges( + self, binner: DatetimeIndex, ax_values: npt.NDArray[np.int64] + ) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]: + # Some hacks for > daily data, see #1471, #1458, #1483 + + if self.freq != "D" and is_superperiod(self.freq, "D"): + if self.closed == "right": + # GH 21459, GH 9119: Adjust the bins relative to the wall time + edges_dti = binner.tz_localize(None) + edges_dti = ( + edges_dti + + Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit) + - Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit) + ) + bin_edges = edges_dti.tz_localize(binner.tz).asi8 + else: + bin_edges = binner.asi8 + + # intraday values on last day + if bin_edges[-2] > ax_values.max(): + bin_edges = bin_edges[:-1] + binner = binner[:-1] + else: + bin_edges = binner.asi8 + return binner, bin_edges + + def _get_time_delta_bins(self, ax: TimedeltaIndex): + if not isinstance(ax, TimedeltaIndex): + raise TypeError( + "axis must be a TimedeltaIndex, but got " + f"an instance of {type(ax).__name__}" + ) + + if not isinstance(self.freq, Tick): + # GH#51896 + raise ValueError( + "Resampling on a TimedeltaIndex requires fixed-duration `freq`, " + f"e.g. '24H' or '3D', not {self.freq}" + ) + + if not len(ax): + binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name) + return binner, [], labels + + start, end = ax.min(), ax.max() + + if self.closed == "right": + end += self.freq + + labels = binner = timedelta_range( + start=start, end=end, freq=self.freq, name=ax.name + ) + + end_stamps = labels + if self.closed == "left": + end_stamps += self.freq + + bins = ax.searchsorted(end_stamps, side=self.closed) + + if self.offset: + # GH 10530 & 31809 + labels += self.offset + + return binner, bins, labels + + def _get_time_period_bins(self, ax: DatetimeIndex): + if not isinstance(ax, DatetimeIndex): + raise TypeError( + "axis must be a DatetimeIndex, but got " + f"an instance of {type(ax).__name__}" + ) + + freq = self.freq + + if len(ax) == 0: + binner = labels = PeriodIndex( + data=[], freq=freq, name=ax.name, dtype=ax.dtype + ) + return binner, [], labels + + labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name) + + end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp() + if ax.tz: + end_stamps = end_stamps.tz_localize(ax.tz) + bins = ax.searchsorted(end_stamps, side="left") + + return binner, bins, labels + + def _get_period_bins(self, ax: PeriodIndex): + if not isinstance(ax, PeriodIndex): + raise TypeError( + "axis must be a PeriodIndex, but got " + f"an instance of {type(ax).__name__}" + ) + + memb = ax.asfreq(self.freq, how=self.convention) + + # NaT handling as in pandas._lib.lib.generate_bins_dt64() + nat_count = 0 + if memb.hasnans: + # error: Incompatible types in assignment (expression has type + # "bool_", variable has type "int") [assignment] + nat_count = np.sum(memb._isnan) # type: ignore[assignment] + memb = memb[~memb._isnan] + + if not len(memb): + # index contains no valid (non-NaT) values + bins = np.array([], dtype=np.int64) + binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name) + if len(ax) > 0: + # index is all NaT + binner, bins, labels = _insert_nat_bin(binner, bins, labels, len(ax)) + return binner, bins, labels + + freq_mult = self.freq.n + + start = ax.min().asfreq(self.freq, how=self.convention) + end = ax.max().asfreq(self.freq, how="end") + bin_shift = 0 + + if isinstance(self.freq, Tick): + # GH 23882 & 31809: get adjusted bin edge labels with 'origin' + # and 'origin' support. This call only makes sense if the freq is a + # Tick since offset and origin are only used in those cases. + # Not doing this check could create an extra empty bin. + p_start, end = _get_period_range_edges( + start, + end, + self.freq, + closed=self.closed, + origin=self.origin, + offset=self.offset, + ) + + # Get offset for bin edge (not label edge) adjustment + start_offset = Period(start, self.freq) - Period(p_start, self.freq) + # error: Item "Period" of "Union[Period, Any]" has no attribute "n" + bin_shift = start_offset.n % freq_mult # type: ignore[union-attr] + start = p_start + + labels = binner = period_range( + start=start, end=end, freq=self.freq, name=ax.name + ) + + i8 = memb.asi8 + + # when upsampling to subperiods, we need to generate enough bins + expected_bins_count = len(binner) * freq_mult + i8_extend = expected_bins_count - (i8[-1] - i8[0]) + rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult) + rng += freq_mult + # adjust bin edge indexes to account for base + rng -= bin_shift + + # Wrap in PeriodArray for PeriodArray.searchsorted + prng = type(memb._data)(rng, dtype=memb.dtype) + bins = memb.searchsorted(prng, side="left") + + if nat_count > 0: + binner, bins, labels = _insert_nat_bin(binner, bins, labels, nat_count) + + return binner, bins, labels + + +def _take_new_index( + obj: NDFrameT, indexer: npt.NDArray[np.intp], new_index: Index, axis: AxisInt = 0 +) -> NDFrameT: + if isinstance(obj, ABCSeries): + new_values = algos.take_nd(obj._values, indexer) + # error: Incompatible return value type (got "Series", expected "NDFrameT") + return obj._constructor( # type: ignore[return-value] + new_values, index=new_index, name=obj.name + ) + elif isinstance(obj, ABCDataFrame): + if axis == 1: + raise NotImplementedError("axis 1 is not supported") + new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) + return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) + else: + raise ValueError("'obj' should be either a Series or a DataFrame") + + +def _get_timestamp_range_edges( + first: Timestamp, + last: Timestamp, + freq: BaseOffset, + unit: str, + closed: Literal["right", "left"] = "left", + origin: TimeGrouperOrigin = "start_day", + offset: Timedelta | None = None, +) -> tuple[Timestamp, Timestamp]: + """ + Adjust the `first` Timestamp to the preceding Timestamp that resides on + the provided offset. Adjust the `last` Timestamp to the following + Timestamp that resides on the provided offset. Input Timestamps that + already reside on the offset will be adjusted depending on the type of + offset and the `closed` parameter. + + Parameters + ---------- + first : pd.Timestamp + The beginning Timestamp of the range to be adjusted. + last : pd.Timestamp + The ending Timestamp of the range to be adjusted. + freq : pd.DateOffset + The dateoffset to which the Timestamps will be adjusted. + closed : {'right', 'left'}, default "left" + Which side of bin interval is closed. + origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day' + The timestamp on which to adjust the grouping. The timezone of origin must + match the timezone of the index. + If a timestamp is not used, these values are also supported: + + - 'epoch': `origin` is 1970-01-01 + - 'start': `origin` is the first value of the timeseries + - 'start_day': `origin` is the first day at midnight of the timeseries + offset : pd.Timedelta, default is None + An offset timedelta added to the origin. + + Returns + ------- + A tuple of length 2, containing the adjusted pd.Timestamp objects. + """ + if isinstance(freq, Tick): + index_tz = first.tz + if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): + raise ValueError("The origin must have the same timezone as the index.") + if origin == "epoch": + # set the epoch based on the timezone to have similar bins results when + # resampling on the same kind of indexes on different timezones + origin = Timestamp("1970-01-01", tz=index_tz) + + if isinstance(freq, Day): + # _adjust_dates_anchored assumes 'D' means 24H, but first/last + # might contain a DST transition (23H, 24H, or 25H). + # So "pretend" the dates are naive when adjusting the endpoints + first = first.tz_localize(None) + last = last.tz_localize(None) + if isinstance(origin, Timestamp): + origin = origin.tz_localize(None) + + first, last = _adjust_dates_anchored( + first, last, freq, closed=closed, origin=origin, offset=offset, unit=unit + ) + if isinstance(freq, Day): + first = first.tz_localize(index_tz) + last = last.tz_localize(index_tz) + else: + first = first.normalize() + last = last.normalize() + + if closed == "left": + first = Timestamp(freq.rollback(first)) + else: + first = Timestamp(first - freq) + + last = Timestamp(last + freq) + + return first, last + + +def _get_period_range_edges( + first: Period, + last: Period, + freq: BaseOffset, + closed: Literal["right", "left"] = "left", + origin: TimeGrouperOrigin = "start_day", + offset: Timedelta | None = None, +) -> tuple[Period, Period]: + """ + Adjust the provided `first` and `last` Periods to the respective Period of + the given offset that encompasses them. + + Parameters + ---------- + first : pd.Period + The beginning Period of the range to be adjusted. + last : pd.Period + The ending Period of the range to be adjusted. + freq : pd.DateOffset + The freq to which the Periods will be adjusted. + closed : {'right', 'left'}, default "left" + Which side of bin interval is closed. + origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day' + The timestamp on which to adjust the grouping. The timezone of origin must + match the timezone of the index. + + If a timestamp is not used, these values are also supported: + + - 'epoch': `origin` is 1970-01-01 + - 'start': `origin` is the first value of the timeseries + - 'start_day': `origin` is the first day at midnight of the timeseries + offset : pd.Timedelta, default is None + An offset timedelta added to the origin. + + Returns + ------- + A tuple of length 2, containing the adjusted pd.Period objects. + """ + if not all(isinstance(obj, Period) for obj in [first, last]): + raise TypeError("'first' and 'last' must be instances of type Period") + + # GH 23882 + first_ts = first.to_timestamp() + last_ts = last.to_timestamp() + adjust_first = not freq.is_on_offset(first_ts) + adjust_last = freq.is_on_offset(last_ts) + + first_ts, last_ts = _get_timestamp_range_edges( + first_ts, last_ts, freq, unit="ns", closed=closed, origin=origin, offset=offset + ) + + first = (first_ts + int(adjust_first) * freq).to_period(freq) + last = (last_ts - int(adjust_last) * freq).to_period(freq) + return first, last + + +def _insert_nat_bin( + binner: PeriodIndex, bins: np.ndarray, labels: PeriodIndex, nat_count: int +) -> tuple[PeriodIndex, np.ndarray, PeriodIndex]: + # NaT handling as in pandas._lib.lib.generate_bins_dt64() + # shift bins by the number of NaT + assert nat_count > 0 + bins += nat_count + bins = np.insert(bins, 0, nat_count) + + # Incompatible types in assignment (expression has type "Index", variable + # has type "PeriodIndex") + binner = binner.insert(0, NaT) # type: ignore[assignment] + # Incompatible types in assignment (expression has type "Index", variable + # has type "PeriodIndex") + labels = labels.insert(0, NaT) # type: ignore[assignment] + return binner, bins, labels + + +def _adjust_dates_anchored( + first: Timestamp, + last: Timestamp, + freq: Tick, + closed: Literal["right", "left"] = "right", + origin: TimeGrouperOrigin = "start_day", + offset: Timedelta | None = None, + unit: str = "ns", +) -> tuple[Timestamp, Timestamp]: + # First and last offsets should be calculated from the start day to fix an + # error cause by resampling across multiple days when a one day period is + # not a multiple of the frequency. See GH 8683 + # To handle frequencies that are not multiple or divisible by a day we let + # the possibility to define a fixed origin timestamp. See GH 31809 + first = first.as_unit(unit) + last = last.as_unit(unit) + if offset is not None: + offset = offset.as_unit(unit) + + freq_value = Timedelta(freq).as_unit(unit)._value + + origin_timestamp = 0 # origin == "epoch" + if origin == "start_day": + origin_timestamp = first.normalize()._value + elif origin == "start": + origin_timestamp = first._value + elif isinstance(origin, Timestamp): + origin_timestamp = origin.as_unit(unit)._value + elif origin in ["end", "end_day"]: + origin_last = last if origin == "end" else last.ceil("D") + sub_freq_times = (origin_last._value - first._value) // freq_value + if closed == "left": + sub_freq_times += 1 + first = origin_last - sub_freq_times * freq + origin_timestamp = first._value + origin_timestamp += offset._value if offset else 0 + + # GH 10117 & GH 19375. If first and last contain timezone information, + # Perform the calculation in UTC in order to avoid localizing on an + # Ambiguous or Nonexistent time. + first_tzinfo = first.tzinfo + last_tzinfo = last.tzinfo + if first_tzinfo is not None: + first = first.tz_convert("UTC") + if last_tzinfo is not None: + last = last.tz_convert("UTC") + + foffset = (first._value - origin_timestamp) % freq_value + loffset = (last._value - origin_timestamp) % freq_value + + if closed == "right": + if foffset > 0: + # roll back + fresult_int = first._value - foffset + else: + fresult_int = first._value - freq_value + + if loffset > 0: + # roll forward + lresult_int = last._value + (freq_value - loffset) + else: + # already the end of the road + lresult_int = last._value + else: # closed == 'left' + if foffset > 0: + fresult_int = first._value - foffset + else: + # start of the road + fresult_int = first._value + + if loffset > 0: + # roll forward + lresult_int = last._value + (freq_value - loffset) + else: + lresult_int = last._value + freq_value + fresult = Timestamp(fresult_int, unit=unit) + lresult = Timestamp(lresult_int, unit=unit) + if first_tzinfo is not None: + fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo) + if last_tzinfo is not None: + lresult = lresult.tz_localize("UTC").tz_convert(last_tzinfo) + return fresult, lresult + + +def asfreq( + obj: NDFrameT, + freq, + method=None, + how=None, + normalize: bool = False, + fill_value=None, +) -> NDFrameT: + """ + Utility frequency conversion method for Series/DataFrame. + + See :meth:`pandas.NDFrame.asfreq` for full documentation. + """ + if isinstance(obj.index, PeriodIndex): + if method is not None: + raise NotImplementedError("'method' argument is not supported") + + if how is None: + how = "E" + + new_obj = obj.copy() + new_obj.index = obj.index.asfreq(freq, how=how) + + elif len(obj.index) == 0: + new_obj = obj.copy() + + new_obj.index = _asfreq_compat(obj.index, freq) + else: + dti = date_range(obj.index.min(), obj.index.max(), freq=freq) + dti.name = obj.index.name + new_obj = obj.reindex(dti, method=method, fill_value=fill_value) + if normalize: + new_obj.index = new_obj.index.normalize() + + return new_obj + + +def _asfreq_compat(index: DatetimeIndex | PeriodIndex | TimedeltaIndex, freq): + """ + Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex. + + Parameters + ---------- + index : PeriodIndex, DatetimeIndex, or TimedeltaIndex + freq : DateOffset + + Returns + ------- + same type as index + """ + if len(index) != 0: + # This should never be reached, always checked by the caller + raise ValueError( + "Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex" + ) + new_index: Index + if isinstance(index, PeriodIndex): + new_index = index.asfreq(freq=freq) + elif isinstance(index, DatetimeIndex): + new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name) + elif isinstance(index, TimedeltaIndex): + new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name) + else: # pragma: no cover + raise TypeError(type(index)) + return new_index + + +def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None: + """ + Warn for deprecation of args and kwargs in resample functions. + + Parameters + ---------- + cls : type + Class to warn about. + kernel : str + Operation name. + args : tuple or None + args passed by user. Will be None if and only if kernel does not have args. + kwargs : dict or None + kwargs passed by user. Will be None if and only if kernel does not have kwargs. + """ + warn_args = args is not None and len(args) > 0 + warn_kwargs = kwargs is not None and len(kwargs) > 0 + if warn_args and warn_kwargs: + msg = "args and kwargs" + elif warn_args: + msg = "args" + elif warn_kwargs: + msg = "kwargs" + else: + return + warnings.warn( + f"Passing additional {msg} to {cls.__name__}.{kernel} has " + "no impact on the result and is deprecated. This will " + "raise a TypeError in a future version of pandas.", + category=FutureWarning, + stacklevel=find_stack_level(), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/api.py new file mode 100644 index 00000000..b1884c49 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/api.py @@ -0,0 +1,41 @@ +from pandas.core.reshape.concat import concat +from pandas.core.reshape.encoding import ( + from_dummies, + get_dummies, +) +from pandas.core.reshape.melt import ( + lreshape, + melt, + wide_to_long, +) +from pandas.core.reshape.merge import ( + merge, + merge_asof, + merge_ordered, +) +from pandas.core.reshape.pivot import ( + crosstab, + pivot, + pivot_table, +) +from pandas.core.reshape.tile import ( + cut, + qcut, +) + +__all__ = [ + "concat", + "crosstab", + "cut", + "from_dummies", + "get_dummies", + "lreshape", + "melt", + "merge", + "merge_asof", + "merge_ordered", + "pivot", + "pivot_table", + "qcut", + "wide_to_long", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/concat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/concat.py new file mode 100644 index 00000000..ffa71999 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/concat.py @@ -0,0 +1,888 @@ +""" +Concat routines. +""" +from __future__ import annotations + +from collections import abc +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool, + is_iterator, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.arrays.categorical import ( + factorize_from_iterable, + factorize_from_iterables, +) +import pandas.core.common as com +from pandas.core.indexes.api import ( + Index, + MultiIndex, + all_indexes_same, + default_index, + ensure_index, + get_objs_combined_axis, + get_unanimous_names, +) +from pandas.core.internals import concatenate_managers + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + ) + + from pandas._typing import ( + Axis, + AxisInt, + HashableT, + ) + + from pandas import ( + DataFrame, + Series, + ) + +# --------------------------------------------------------------------- +# Concatenate DataFrame objects + + +@overload +def concat( + objs: Iterable[DataFrame] | Mapping[HashableT, DataFrame], + *, + axis: Literal[0, "index"] = ..., + join: str = ..., + ignore_index: bool = ..., + keys=..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame: + ... + + +@overload +def concat( + objs: Iterable[Series] | Mapping[HashableT, Series], + *, + axis: Literal[0, "index"] = ..., + join: str = ..., + ignore_index: bool = ..., + keys=..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> Series: + ... + + +@overload +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Literal[0, "index"] = ..., + join: str = ..., + ignore_index: bool = ..., + keys=..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame | Series: + ... + + +@overload +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Literal[1, "columns"], + join: str = ..., + ignore_index: bool = ..., + keys=..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame: + ... + + +@overload +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Axis = ..., + join: str = ..., + ignore_index: bool = ..., + keys=..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame | Series: + ... + + +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Axis = 0, + join: str = "outer", + ignore_index: bool = False, + keys=None, + levels=None, + names: list[HashableT] | None = None, + verify_integrity: bool = False, + sort: bool = False, + copy: bool | None = None, +) -> DataFrame | Series: + """ + Concatenate pandas objects along a particular axis. + + Allows optional set logic along the other axes. + + Can also add a layer of hierarchical indexing on the concatenation axis, + which may be useful if the labels are the same (or overlapping) on + the passed axis number. + + Parameters + ---------- + objs : a sequence or mapping of Series or DataFrame objects + If a mapping is passed, the sorted keys will be used as the `keys` + argument, unless it is passed, in which case the values will be + selected (see below). Any None objects will be dropped silently unless + they are all None in which case a ValueError will be raised. + axis : {0/'index', 1/'columns'}, default 0 + The axis to concatenate along. + join : {'inner', 'outer'}, default 'outer' + How to handle indexes on other axis (or axes). + ignore_index : bool, default False + If True, do not use the index values along the concatenation axis. The + resulting axis will be labeled 0, ..., n - 1. This is useful if you are + concatenating objects where the concatenation axis does not have + meaningful indexing information. Note the index values on the other + axes are still respected in the join. + keys : sequence, default None + If multiple levels passed, should contain tuples. Construct + hierarchical index using the passed keys as the outermost level. + levels : list of sequences, default None + Specific levels (unique values) to use for constructing a + MultiIndex. Otherwise they will be inferred from the keys. + names : list, default None + Names for the levels in the resulting hierarchical index. + verify_integrity : bool, default False + Check whether the new concatenated axis contains duplicates. This can + be very expensive relative to the actual data concatenation. + sort : bool, default False + Sort non-concatenation axis if it is not already aligned. + + copy : bool, default True + If False, do not copy data unnecessarily. + + Returns + ------- + object, type of objs + When concatenating all ``Series`` along the index (axis=0), a + ``Series`` is returned. When ``objs`` contains at least one + ``DataFrame``, a ``DataFrame`` is returned. When concatenating along + the columns (axis=1), a ``DataFrame`` is returned. + + See Also + -------- + DataFrame.join : Join DataFrames using indexes. + DataFrame.merge : Merge DataFrames by indexes or columns. + + Notes + ----- + The keys, levels, and names arguments are all optional. + + A walkthrough of how this method fits in with other tools for combining + pandas objects can be found `here + `__. + + It is not recommended to build DataFrames by adding single rows in a + for loop. Build a list of rows and make a DataFrame in a single concat. + + Examples + -------- + Combine two ``Series``. + + >>> s1 = pd.Series(['a', 'b']) + >>> s2 = pd.Series(['c', 'd']) + >>> pd.concat([s1, s2]) + 0 a + 1 b + 0 c + 1 d + dtype: object + + Clear the existing index and reset it in the result + by setting the ``ignore_index`` option to ``True``. + + >>> pd.concat([s1, s2], ignore_index=True) + 0 a + 1 b + 2 c + 3 d + dtype: object + + Add a hierarchical index at the outermost level of + the data with the ``keys`` option. + + >>> pd.concat([s1, s2], keys=['s1', 's2']) + s1 0 a + 1 b + s2 0 c + 1 d + dtype: object + + Label the index keys you create with the ``names`` option. + + >>> pd.concat([s1, s2], keys=['s1', 's2'], + ... names=['Series name', 'Row ID']) + Series name Row ID + s1 0 a + 1 b + s2 0 c + 1 d + dtype: object + + Combine two ``DataFrame`` objects with identical columns. + + >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], + ... columns=['letter', 'number']) + >>> df1 + letter number + 0 a 1 + 1 b 2 + >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], + ... columns=['letter', 'number']) + >>> df2 + letter number + 0 c 3 + 1 d 4 + >>> pd.concat([df1, df2]) + letter number + 0 a 1 + 1 b 2 + 0 c 3 + 1 d 4 + + Combine ``DataFrame`` objects with overlapping columns + and return everything. Columns outside the intersection will + be filled with ``NaN`` values. + + >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], + ... columns=['letter', 'number', 'animal']) + >>> df3 + letter number animal + 0 c 3 cat + 1 d 4 dog + >>> pd.concat([df1, df3], sort=False) + letter number animal + 0 a 1 NaN + 1 b 2 NaN + 0 c 3 cat + 1 d 4 dog + + Combine ``DataFrame`` objects with overlapping columns + and return only those that are shared by passing ``inner`` to + the ``join`` keyword argument. + + >>> pd.concat([df1, df3], join="inner") + letter number + 0 a 1 + 1 b 2 + 0 c 3 + 1 d 4 + + Combine ``DataFrame`` objects horizontally along the x axis by + passing in ``axis=1``. + + >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], + ... columns=['animal', 'name']) + >>> pd.concat([df1, df4], axis=1) + letter number animal name + 0 a 1 bird polly + 1 b 2 monkey george + + Prevent the result from including duplicate index values with the + ``verify_integrity`` option. + + >>> df5 = pd.DataFrame([1], index=['a']) + >>> df5 + 0 + a 1 + >>> df6 = pd.DataFrame([2], index=['a']) + >>> df6 + 0 + a 2 + >>> pd.concat([df5, df6], verify_integrity=True) + Traceback (most recent call last): + ... + ValueError: Indexes have overlapping values: ['a'] + + Append a single row to the end of a ``DataFrame`` object. + + >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0]) + >>> df7 + a b + 0 1 2 + >>> new_row = pd.Series({'a': 3, 'b': 4}) + >>> new_row + a 3 + b 4 + dtype: int64 + >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True) + a b + 0 1 2 + 1 3 4 + """ + if copy is None: + if using_copy_on_write(): + copy = False + else: + copy = True + elif copy and using_copy_on_write(): + copy = False + + op = _Concatenator( + objs, + axis=axis, + ignore_index=ignore_index, + join=join, + keys=keys, + levels=levels, + names=names, + verify_integrity=verify_integrity, + copy=copy, + sort=sort, + ) + + return op.get_result() + + +class _Concatenator: + """ + Orchestrates a concatenation operation for BlockManagers + """ + + sort: bool + + def __init__( + self, + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + axis: Axis = 0, + join: str = "outer", + keys=None, + levels=None, + names: list[HashableT] | None = None, + ignore_index: bool = False, + verify_integrity: bool = False, + copy: bool = True, + sort: bool = False, + ) -> None: + if isinstance(objs, (ABCSeries, ABCDataFrame, str)): + raise TypeError( + "first argument must be an iterable of pandas " + f'objects, you passed an object of type "{type(objs).__name__}"' + ) + + if join == "outer": + self.intersect = False + elif join == "inner": + self.intersect = True + else: # pragma: no cover + raise ValueError( + "Only can inner (intersect) or outer (union) join the other axis" + ) + + if not is_bool(sort): + raise ValueError( + f"The 'sort' keyword only accepts boolean values; {sort} was passed." + ) + # Incompatible types in assignment (expression has type "Union[bool, bool_]", + # variable has type "bool") + self.sort = sort # type: ignore[assignment] + + self.ignore_index = ignore_index + self.verify_integrity = verify_integrity + self.copy = copy + + objs, keys = self._clean_keys_and_objs(objs, keys) + + # figure out what our result ndim is going to be + ndims = self._get_ndims(objs) + sample, objs = self._get_sample_object(objs, ndims, keys, names, levels) + + # Standardize axis parameter to int + if sample.ndim == 1: + from pandas import DataFrame + + axis = DataFrame._get_axis_number(axis) + self._is_frame = False + self._is_series = True + else: + axis = sample._get_axis_number(axis) + self._is_frame = True + self._is_series = False + + # Need to flip BlockManager axis in the DataFrame special case + axis = sample._get_block_manager_axis(axis) + + # if we have mixed ndims, then convert to highest ndim + # creating column numbers as needed + if len(ndims) > 1: + objs, sample = self._sanitize_mixed_ndim(objs, sample, ignore_index, axis) + + self.objs = objs + + # note: this is the BlockManager axis (since DataFrame is transposed) + self.bm_axis = axis + self.axis = 1 - self.bm_axis if self._is_frame else 0 + self.keys = keys + self.names = names or getattr(keys, "names", None) + self.levels = levels + + def _get_ndims(self, objs: list[Series | DataFrame]) -> set[int]: + # figure out what our result ndim is going to be + ndims = set() + for obj in objs: + if not isinstance(obj, (ABCSeries, ABCDataFrame)): + msg = ( + f"cannot concatenate object of type '{type(obj)}'; " + "only Series and DataFrame objs are valid" + ) + raise TypeError(msg) + + ndims.add(obj.ndim) + return ndims + + def _clean_keys_and_objs( + self, + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + keys, + ) -> tuple[list[Series | DataFrame], Index | None]: + if isinstance(objs, abc.Mapping): + if keys is None: + keys = list(objs.keys()) + objs_list = [objs[k] for k in keys] + else: + objs_list = list(objs) + + if len(objs_list) == 0: + raise ValueError("No objects to concatenate") + + if keys is None: + objs_list = list(com.not_none(*objs_list)) + else: + # GH#1649 + clean_keys = [] + clean_objs = [] + if is_iterator(keys): + keys = list(keys) + if len(keys) != len(objs_list): + # GH#43485 + warnings.warn( + "The behavior of pd.concat with len(keys) != len(objs) is " + "deprecated. In a future version this will raise instead of " + "truncating to the smaller of the two sequences", + FutureWarning, + stacklevel=find_stack_level(), + ) + for k, v in zip(keys, objs_list): + if v is None: + continue + clean_keys.append(k) + clean_objs.append(v) + objs_list = clean_objs + + if isinstance(keys, MultiIndex): + # TODO: retain levels? + keys = type(keys).from_tuples(clean_keys, names=keys.names) + else: + name = getattr(keys, "name", None) + keys = Index(clean_keys, name=name, dtype=getattr(keys, "dtype", None)) + + if len(objs_list) == 0: + raise ValueError("All objects passed were None") + + return objs_list, keys + + def _get_sample_object( + self, + objs: list[Series | DataFrame], + ndims: set[int], + keys, + names, + levels, + ) -> tuple[Series | DataFrame, list[Series | DataFrame]]: + # get the sample + # want the highest ndim that we have, and must be non-empty + # unless all objs are empty + sample: Series | DataFrame | None = None + if len(ndims) > 1: + max_ndim = max(ndims) + for obj in objs: + if obj.ndim == max_ndim and np.sum(obj.shape): + sample = obj + break + + else: + # filter out the empties if we have not multi-index possibilities + # note to keep empty Series as it affect to result columns / name + non_empties = [obj for obj in objs if sum(obj.shape) > 0 or obj.ndim == 1] + + if len(non_empties) and ( + keys is None and names is None and levels is None and not self.intersect + ): + objs = non_empties + sample = objs[0] + + if sample is None: + sample = objs[0] + return sample, objs + + def _sanitize_mixed_ndim( + self, + objs: list[Series | DataFrame], + sample: Series | DataFrame, + ignore_index: bool, + axis: AxisInt, + ) -> tuple[list[Series | DataFrame], Series | DataFrame]: + # if we have mixed ndims, then convert to highest ndim + # creating column numbers as needed + + new_objs = [] + + current_column = 0 + max_ndim = sample.ndim + for obj in objs: + ndim = obj.ndim + if ndim == max_ndim: + pass + + elif ndim != max_ndim - 1: + raise ValueError( + "cannot concatenate unaligned mixed dimensional NDFrame objects" + ) + + else: + name = getattr(obj, "name", None) + if ignore_index or name is None: + name = current_column + current_column += 1 + + # doing a row-wise concatenation so need everything + # to line up + if self._is_frame and axis == 1: + name = 0 + + obj = sample._constructor({name: obj}, copy=False) + + new_objs.append(obj) + + return new_objs, sample + + def get_result(self): + cons: Callable[..., DataFrame | Series] + sample: DataFrame | Series + + # series only + if self._is_series: + sample = cast("Series", self.objs[0]) + + # stack blocks + if self.bm_axis == 0: + name = com.consensus_name_attr(self.objs) + cons = sample._constructor + + arrs = [ser._values for ser in self.objs] + + res = concat_compat(arrs, axis=0) + + new_index: Index + if self.ignore_index: + # We can avoid surprisingly-expensive _get_concat_axis + new_index = default_index(len(res)) + else: + new_index = self.new_axes[0] + + mgr = type(sample._mgr).from_array(res, index=new_index) + + result = sample._constructor_from_mgr(mgr, axes=mgr.axes) + result._name = name + return result.__finalize__(self, method="concat") + + # combine as columns in a frame + else: + data = dict(zip(range(len(self.objs)), self.objs)) + + # GH28330 Preserves subclassed objects through concat + cons = sample._constructor_expanddim + + index, columns = self.new_axes + df = cons(data, index=index, copy=self.copy) + df.columns = columns + return df.__finalize__(self, method="concat") + + # combine block managers + else: + sample = cast("DataFrame", self.objs[0]) + + mgrs_indexers = [] + for obj in self.objs: + indexers = {} + for ax, new_labels in enumerate(self.new_axes): + # ::-1 to convert BlockManager ax to DataFrame ax + if ax == self.bm_axis: + # Suppress reindexing on concat axis + continue + + # 1-ax to convert BlockManager axis to DataFrame axis + obj_labels = obj.axes[1 - ax] + if not new_labels.equals(obj_labels): + indexers[ax] = obj_labels.get_indexer(new_labels) + + mgrs_indexers.append((obj._mgr, indexers)) + + new_data = concatenate_managers( + mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy + ) + if not self.copy and not using_copy_on_write(): + new_data._consolidate_inplace() + + out = sample._constructor_from_mgr(new_data, axes=new_data.axes) + return out.__finalize__(self, method="concat") + + def _get_result_dim(self) -> int: + if self._is_series and self.bm_axis == 1: + return 2 + else: + return self.objs[0].ndim + + @cache_readonly + def new_axes(self) -> list[Index]: + ndim = self._get_result_dim() + return [ + self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i) + for i in range(ndim) + ] + + def _get_comb_axis(self, i: AxisInt) -> Index: + data_axis = self.objs[0]._get_block_manager_axis(i) + return get_objs_combined_axis( + self.objs, + axis=data_axis, + intersect=self.intersect, + sort=self.sort, + copy=self.copy, + ) + + @cache_readonly + def _get_concat_axis(self) -> Index: + """ + Return index to be used along concatenation axis. + """ + if self._is_series: + if self.bm_axis == 0: + indexes = [x.index for x in self.objs] + elif self.ignore_index: + idx = default_index(len(self.objs)) + return idx + elif self.keys is None: + names: list[Hashable] = [None] * len(self.objs) + num = 0 + has_names = False + for i, x in enumerate(self.objs): + if x.ndim != 1: + raise TypeError( + f"Cannot concatenate type 'Series' with " + f"object of type '{type(x).__name__}'" + ) + if x.name is not None: + names[i] = x.name + has_names = True + else: + names[i] = num + num += 1 + if has_names: + return Index(names) + else: + return default_index(len(self.objs)) + else: + return ensure_index(self.keys).set_names(self.names) + else: + indexes = [x.axes[self.axis] for x in self.objs] + + if self.ignore_index: + idx = default_index(sum(len(i) for i in indexes)) + return idx + + if self.keys is None: + if self.levels is not None: + raise ValueError("levels supported only when keys is not None") + concat_axis = _concat_indexes(indexes) + else: + concat_axis = _make_concat_multiindex( + indexes, self.keys, self.levels, self.names + ) + + self._maybe_check_integrity(concat_axis) + + return concat_axis + + def _maybe_check_integrity(self, concat_index: Index): + if self.verify_integrity: + if not concat_index.is_unique: + overlap = concat_index[concat_index.duplicated()].unique() + raise ValueError(f"Indexes have overlapping values: {overlap}") + + +def _concat_indexes(indexes) -> Index: + return indexes[0].append(indexes[1:]) + + +def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex: + if (levels is None and isinstance(keys[0], tuple)) or ( + levels is not None and len(levels) > 1 + ): + zipped = list(zip(*keys)) + if names is None: + names = [None] * len(zipped) + + if levels is None: + _, levels = factorize_from_iterables(zipped) + else: + levels = [ensure_index(x) for x in levels] + else: + zipped = [keys] + if names is None: + names = [None] + + if levels is None: + levels = [ensure_index(keys).unique()] + else: + levels = [ensure_index(x) for x in levels] + + for level in levels: + if not level.is_unique: + raise ValueError(f"Level values not unique: {level.tolist()}") + + if not all_indexes_same(indexes) or not all(level.is_unique for level in levels): + codes_list = [] + + # things are potentially different sizes, so compute the exact codes + # for each level and pass those to MultiIndex.from_arrays + + for hlevel, level in zip(zipped, levels): + to_concat = [] + if isinstance(hlevel, Index) and hlevel.equals(level): + lens = [len(idx) for idx in indexes] + codes_list.append(np.repeat(np.arange(len(hlevel)), lens)) + else: + for key, index in zip(hlevel, indexes): + # Find matching codes, include matching nan values as equal. + mask = (isna(level) & isna(key)) | (level == key) + if not mask.any(): + raise ValueError(f"Key {key} not in level {level}") + i = np.nonzero(mask)[0][0] + + to_concat.append(np.repeat(i, len(index))) + codes_list.append(np.concatenate(to_concat)) + + concat_index = _concat_indexes(indexes) + + # these go at the end + if isinstance(concat_index, MultiIndex): + levels.extend(concat_index.levels) + codes_list.extend(concat_index.codes) + else: + codes, categories = factorize_from_iterable(concat_index) + levels.append(categories) + codes_list.append(codes) + + if len(names) == len(levels): + names = list(names) + else: + # make sure that all of the passed indices have the same nlevels + if not len({idx.nlevels for idx in indexes}) == 1: + raise AssertionError( + "Cannot concat indices that do not have the same number of levels" + ) + + # also copies + names = list(names) + list(get_unanimous_names(*indexes)) + + return MultiIndex( + levels=levels, codes=codes_list, names=names, verify_integrity=False + ) + + new_index = indexes[0] + n = len(new_index) + kpieces = len(indexes) + + # also copies + new_names = list(names) + new_levels = list(levels) + + # construct codes + new_codes = [] + + # do something a bit more speedy + + for hlevel, level in zip(zipped, levels): + hlevel = ensure_index(hlevel) + mapped = level.get_indexer(hlevel) + + mask = mapped == -1 + if mask.any(): + raise ValueError(f"Values not found in passed level: {hlevel[mask]!s}") + + new_codes.append(np.repeat(mapped, n)) + + if isinstance(new_index, MultiIndex): + new_levels.extend(new_index.levels) + new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes]) + else: + new_levels.append(new_index.unique()) + single_codes = new_index.unique().get_indexer(new_index) + new_codes.append(np.tile(single_codes, kpieces)) + + if len(new_names) < len(new_levels): + new_names.extend(new_index.names) + + return MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/encoding.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/encoding.py new file mode 100644 index 00000000..9ebce3a7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/encoding.py @@ -0,0 +1,546 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import ( + Hashable, + Iterable, +) +import itertools +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._libs.sparse import IntIndex + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_list_like, + is_object_dtype, + pandas_dtype, +) + +from pandas.core.arrays import SparseArray +from pandas.core.arrays.categorical import factorize_from_iterable +from pandas.core.frame import DataFrame +from pandas.core.indexes.api import ( + Index, + default_index, +) +from pandas.core.series import Series + +if TYPE_CHECKING: + from pandas._typing import NpDtype + + +def get_dummies( + data, + prefix=None, + prefix_sep: str | Iterable[str] | dict[str, str] = "_", + dummy_na: bool = False, + columns=None, + sparse: bool = False, + drop_first: bool = False, + dtype: NpDtype | None = None, +) -> DataFrame: + """ + Convert categorical variable into dummy/indicator variables. + + Each variable is converted in as many 0/1 variables as there are different + values. Columns in the output are each named after a value; if the input is + a DataFrame, the name of the original variable is prepended to the value. + + Parameters + ---------- + data : array-like, Series, or DataFrame + Data of which to get dummy indicators. + prefix : str, list of str, or dict of str, default None + String to append DataFrame column names. + Pass a list with length equal to the number of columns + when calling get_dummies on a DataFrame. Alternatively, `prefix` + can be a dictionary mapping column names to prefixes. + prefix_sep : str, default '_' + If appending prefix, separator/delimiter to use. Or pass a + list or dictionary as with `prefix`. + dummy_na : bool, default False + Add a column to indicate NaNs, if False NaNs are ignored. + columns : list-like, default None + Column names in the DataFrame to be encoded. + If `columns` is None then all the columns with + `object`, `string`, or `category` dtype will be converted. + sparse : bool, default False + Whether the dummy-encoded columns should be backed by + a :class:`SparseArray` (True) or a regular NumPy array (False). + drop_first : bool, default False + Whether to get k-1 dummies out of k categorical levels by removing the + first level. + dtype : dtype, default bool + Data type for new columns. Only a single dtype is allowed. + + Returns + ------- + DataFrame + Dummy-coded data. If `data` contains other columns than the + dummy-coded one(s), these will be prepended, unaltered, to the result. + + See Also + -------- + Series.str.get_dummies : Convert Series of strings to dummy codes. + :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``. + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> s = pd.Series(list('abca')) + + >>> pd.get_dummies(s) + a b c + 0 True False False + 1 False True False + 2 False False True + 3 True False False + + >>> s1 = ['a', 'b', np.nan] + + >>> pd.get_dummies(s1) + a b + 0 True False + 1 False True + 2 False False + + >>> pd.get_dummies(s1, dummy_na=True) + a b NaN + 0 True False False + 1 False True False + 2 False False True + + >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], + ... 'C': [1, 2, 3]}) + + >>> pd.get_dummies(df, prefix=['col1', 'col2']) + C col1_a col1_b col2_a col2_b col2_c + 0 1 True False False True False + 1 2 False True True False False + 2 3 True False False False True + + >>> pd.get_dummies(pd.Series(list('abcaa'))) + a b c + 0 True False False + 1 False True False + 2 False False True + 3 True False False + 4 True False False + + >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) + b c + 0 False False + 1 True False + 2 False True + 3 False False + 4 False False + + >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) + a b c + 0 1.0 0.0 0.0 + 1 0.0 1.0 0.0 + 2 0.0 0.0 1.0 + """ + from pandas.core.reshape.concat import concat + + dtypes_to_encode = ["object", "string", "category"] + + if isinstance(data, DataFrame): + # determine columns being encoded + if columns is None: + data_to_encode = data.select_dtypes(include=dtypes_to_encode) + elif not is_list_like(columns): + raise TypeError("Input must be a list-like for parameter `columns`") + else: + data_to_encode = data[columns] + + # validate prefixes and separator to avoid silently dropping cols + def check_len(item, name: str): + if is_list_like(item): + if not len(item) == data_to_encode.shape[1]: + len_msg = ( + f"Length of '{name}' ({len(item)}) did not match the " + "length of the columns being encoded " + f"({data_to_encode.shape[1]})." + ) + raise ValueError(len_msg) + + check_len(prefix, "prefix") + check_len(prefix_sep, "prefix_sep") + + if isinstance(prefix, str): + prefix = itertools.cycle([prefix]) + if isinstance(prefix, dict): + prefix = [prefix[col] for col in data_to_encode.columns] + + if prefix is None: + prefix = data_to_encode.columns + + # validate separators + if isinstance(prefix_sep, str): + prefix_sep = itertools.cycle([prefix_sep]) + elif isinstance(prefix_sep, dict): + prefix_sep = [prefix_sep[col] for col in data_to_encode.columns] + + with_dummies: list[DataFrame] + if data_to_encode.shape == data.shape: + # Encoding the entire df, do not prepend any dropped columns + with_dummies = [] + elif columns is not None: + # Encoding only cols specified in columns. Get all cols not in + # columns to prepend to result. + with_dummies = [data.drop(columns, axis=1)] + else: + # Encoding only object and category dtype columns. Get remaining + # columns to prepend to result. + with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)] + + for col, pre, sep in zip(data_to_encode.items(), prefix, prefix_sep): + # col is (column_name, column), use just column data here + dummy = _get_dummies_1d( + col[1], + prefix=pre, + prefix_sep=sep, + dummy_na=dummy_na, + sparse=sparse, + drop_first=drop_first, + dtype=dtype, + ) + with_dummies.append(dummy) + result = concat(with_dummies, axis=1) + else: + result = _get_dummies_1d( + data, + prefix, + prefix_sep, + dummy_na, + sparse=sparse, + drop_first=drop_first, + dtype=dtype, + ) + return result + + +def _get_dummies_1d( + data, + prefix, + prefix_sep: str | Iterable[str] | dict[str, str] = "_", + dummy_na: bool = False, + sparse: bool = False, + drop_first: bool = False, + dtype: NpDtype | None = None, +) -> DataFrame: + from pandas.core.reshape.concat import concat + + # Series avoids inconsistent NaN handling + codes, levels = factorize_from_iterable(Series(data, copy=False)) + + if dtype is None: + dtype = np.dtype(bool) + _dtype = pandas_dtype(dtype) + + if is_object_dtype(_dtype): + raise ValueError("dtype=object is not a valid dtype for get_dummies") + + def get_empty_frame(data) -> DataFrame: + index: Index | np.ndarray + if isinstance(data, Series): + index = data.index + else: + index = default_index(len(data)) + return DataFrame(index=index) + + # if all NaN + if not dummy_na and len(levels) == 0: + return get_empty_frame(data) + + codes = codes.copy() + if dummy_na: + codes[codes == -1] = len(levels) + levels = levels.insert(len(levels), np.nan) + + # if dummy_na, we just fake a nan level. drop_first will drop it again + if drop_first and len(levels) == 1: + return get_empty_frame(data) + + number_of_cols = len(levels) + + if prefix is None: + dummy_cols = levels + else: + dummy_cols = Index([f"{prefix}{prefix_sep}{level}" for level in levels]) + + index: Index | None + if isinstance(data, Series): + index = data.index + else: + index = None + + if sparse: + fill_value: bool | float + if is_integer_dtype(dtype): + fill_value = 0 + elif dtype == np.dtype(bool): + fill_value = False + else: + fill_value = 0.0 + + sparse_series = [] + N = len(data) + sp_indices: list[list] = [[] for _ in range(len(dummy_cols))] + mask = codes != -1 + codes = codes[mask] + n_idx = np.arange(N)[mask] + + for ndx, code in zip(n_idx, codes): + sp_indices[code].append(ndx) + + if drop_first: + # remove first categorical level to avoid perfect collinearity + # GH12042 + sp_indices = sp_indices[1:] + dummy_cols = dummy_cols[1:] + for col, ixs in zip(dummy_cols, sp_indices): + sarr = SparseArray( + np.ones(len(ixs), dtype=dtype), + sparse_index=IntIndex(N, ixs), + fill_value=fill_value, + dtype=dtype, + ) + sparse_series.append(Series(data=sarr, index=index, name=col, copy=False)) + + return concat(sparse_series, axis=1, copy=False) + + else: + # take on axis=1 + transpose to ensure ndarray layout is column-major + eye_dtype: NpDtype + if isinstance(_dtype, np.dtype): + eye_dtype = _dtype + else: + eye_dtype = np.bool_ + dummy_mat = np.eye(number_of_cols, dtype=eye_dtype).take(codes, axis=1).T + + if not dummy_na: + # reset NaN GH4446 + dummy_mat[codes == -1] = 0 + + if drop_first: + # remove first GH12042 + dummy_mat = dummy_mat[:, 1:] + dummy_cols = dummy_cols[1:] + return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype) + + +def from_dummies( + data: DataFrame, + sep: None | str = None, + default_category: None | Hashable | dict[str, Hashable] = None, +) -> DataFrame: + """ + Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables. + + Inverts the operation performed by :func:`~pandas.get_dummies`. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + data : DataFrame + Data which contains dummy-coded variables in form of integer columns of + 1's and 0's. + sep : str, default None + Separator used in the column names of the dummy categories they are + character indicating the separation of the categorical names from the prefixes. + For example, if your column names are 'prefix_A' and 'prefix_B', + you can strip the underscore by specifying sep='_'. + default_category : None, Hashable or dict of Hashables, default None + The default category is the implied category when a value has none of the + listed categories specified with a one, i.e. if all dummies in a row are + zero. Can be a single value for all variables or a dict directly mapping + the default categories to a prefix of a variable. + + Returns + ------- + DataFrame + Categorical data decoded from the dummy input-data. + + Raises + ------ + ValueError + * When the input ``DataFrame`` ``data`` contains NA values. + * When the input ``DataFrame`` ``data`` contains column names with separators + that do not match the separator specified with ``sep``. + * When a ``dict`` passed to ``default_category`` does not include an implied + category for each prefix. + * When a value in ``data`` has more than one category assigned to it. + * When ``default_category=None`` and a value in ``data`` has no category + assigned to it. + TypeError + * When the input ``data`` is not of type ``DataFrame``. + * When the input ``DataFrame`` ``data`` contains non-dummy data. + * When the passed ``sep`` is of a wrong data type. + * When the passed ``default_category`` is of a wrong data type. + + See Also + -------- + :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes. + :class:`~pandas.Categorical` : Represent a categorical variable in classic. + + Notes + ----- + The columns of the passed dummy data should only include 1's and 0's, + or boolean values. + + Examples + -------- + >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], + ... "c": [0, 0, 1, 0]}) + + >>> df + a b c + 0 1 0 0 + 1 0 1 0 + 2 0 0 1 + 3 1 0 0 + + >>> pd.from_dummies(df) + 0 a + 1 b + 2 c + 3 a + + >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0], + ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], + ... "col2_c": [0, 0, 1]}) + + >>> df + col1_a col1_b col2_a col2_b col2_c + 0 1 0 0 1 0 + 1 0 1 1 0 0 + 2 1 0 0 0 1 + + >>> pd.from_dummies(df, sep="_") + col1 col2 + 0 a b + 1 b a + 2 a c + + >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0], + ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], + ... "col2_c": [0, 0, 0]}) + + >>> df + col1_a col1_b col2_a col2_b col2_c + 0 1 0 0 1 0 + 1 0 1 1 0 0 + 2 0 0 0 0 0 + + >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"}) + col1 col2 + 0 a b + 1 b a + 2 d e + """ + from pandas.core.reshape.concat import concat + + if not isinstance(data, DataFrame): + raise TypeError( + "Expected 'data' to be a 'DataFrame'; " + f"Received 'data' of type: {type(data).__name__}" + ) + + col_isna_mask = cast(Series, data.isna().any()) + + if col_isna_mask.any(): + raise ValueError( + "Dummy DataFrame contains NA value in column: " + f"'{col_isna_mask.idxmax()}'" + ) + + # index data with a list of all columns that are dummies + try: + data_to_decode = data.astype("boolean", copy=False) + except TypeError: + raise TypeError("Passed DataFrame contains non-dummy data") + + # collect prefixes and get lists to slice data for each prefix + variables_slice = defaultdict(list) + if sep is None: + variables_slice[""] = list(data.columns) + elif isinstance(sep, str): + for col in data_to_decode.columns: + prefix = col.split(sep)[0] + if len(prefix) == len(col): + raise ValueError(f"Separator not specified for column: {col}") + variables_slice[prefix].append(col) + else: + raise TypeError( + "Expected 'sep' to be of type 'str' or 'None'; " + f"Received 'sep' of type: {type(sep).__name__}" + ) + + if default_category is not None: + if isinstance(default_category, dict): + if not len(default_category) == len(variables_slice): + len_msg = ( + f"Length of 'default_category' ({len(default_category)}) " + f"did not match the length of the columns being encoded " + f"({len(variables_slice)})" + ) + raise ValueError(len_msg) + elif isinstance(default_category, Hashable): + default_category = dict( + zip(variables_slice, [default_category] * len(variables_slice)) + ) + else: + raise TypeError( + "Expected 'default_category' to be of type " + "'None', 'Hashable', or 'dict'; " + "Received 'default_category' of type: " + f"{type(default_category).__name__}" + ) + + cat_data = {} + for prefix, prefix_slice in variables_slice.items(): + if sep is None: + cats = prefix_slice.copy() + else: + cats = [col[len(prefix + sep) :] for col in prefix_slice] + assigned = data_to_decode.loc[:, prefix_slice].sum(axis=1) + if any(assigned > 1): + raise ValueError( + "Dummy DataFrame contains multi-assignment(s); " + f"First instance in row: {assigned.idxmax()}" + ) + if any(assigned == 0): + if isinstance(default_category, dict): + cats.append(default_category[prefix]) + else: + raise ValueError( + "Dummy DataFrame contains unassigned value(s); " + f"First instance in row: {assigned.idxmin()}" + ) + data_slice = concat( + (data_to_decode.loc[:, prefix_slice], assigned == 0), axis=1 + ) + else: + data_slice = data_to_decode.loc[:, prefix_slice] + cats_array = data._constructor_sliced(cats, dtype=data.columns.dtype) + # get indices of True entries along axis=1 + true_values = data_slice.idxmax(axis=1) + indexer = data_slice.columns.get_indexer_for(true_values) + cat_data[prefix] = cats_array.take(indexer).set_axis(data.index) + + result = DataFrame(cat_data) + if sep is not None: + result.columns = result.columns.astype(data.columns.dtype) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/melt.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/melt.py new file mode 100644 index 00000000..74e6a6a2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/melt.py @@ -0,0 +1,533 @@ +from __future__ import annotations + +import re +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.util._decorators import Appender + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.missing import notna + +import pandas.core.algorithms as algos +from pandas.core.arrays import Categorical +import pandas.core.common as com +from pandas.core.indexes.api import ( + Index, + MultiIndex, +) +from pandas.core.reshape.concat import concat +from pandas.core.reshape.util import tile_compat +from pandas.core.shared_docs import _shared_docs +from pandas.core.tools.numeric import to_numeric + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import AnyArrayLike + + from pandas import DataFrame + + +@Appender(_shared_docs["melt"] % {"caller": "pd.melt(df, ", "other": "DataFrame.melt"}) +def melt( + frame: DataFrame, + id_vars=None, + value_vars=None, + var_name=None, + value_name: Hashable = "value", + col_level=None, + ignore_index: bool = True, +) -> DataFrame: + # If multiindex, gather names of columns on all level for checking presence + # of `id_vars` and `value_vars` + if isinstance(frame.columns, MultiIndex): + cols = [x for c in frame.columns for x in c] + else: + cols = list(frame.columns) + + if value_name in frame.columns: + raise ValueError( + f"value_name ({value_name}) cannot match an element in " + "the DataFrame columns." + ) + + if id_vars is not None: + if not is_list_like(id_vars): + id_vars = [id_vars] + elif isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list): + raise ValueError( + "id_vars must be a list of tuples when columns are a MultiIndex" + ) + else: + # Check that `id_vars` are in frame + id_vars = list(id_vars) + missing = Index(com.flatten(id_vars)).difference(cols) + if not missing.empty: + raise KeyError( + "The following 'id_vars' are not present " + f"in the DataFrame: {list(missing)}" + ) + else: + id_vars = [] + + if value_vars is not None: + if not is_list_like(value_vars): + value_vars = [value_vars] + elif isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list): + raise ValueError( + "value_vars must be a list of tuples when columns are a MultiIndex" + ) + else: + value_vars = list(value_vars) + # Check that `value_vars` are in frame + missing = Index(com.flatten(value_vars)).difference(cols) + if not missing.empty: + raise KeyError( + "The following 'value_vars' are not present in " + f"the DataFrame: {list(missing)}" + ) + if col_level is not None: + idx = frame.columns.get_level_values(col_level).get_indexer( + id_vars + value_vars + ) + else: + idx = algos.unique(frame.columns.get_indexer_for(id_vars + value_vars)) + frame = frame.iloc[:, idx] + else: + frame = frame.copy() + + if col_level is not None: # allow list or other? + # frame is a copy + frame.columns = frame.columns.get_level_values(col_level) + + if var_name is None: + if isinstance(frame.columns, MultiIndex): + if len(frame.columns.names) == len(set(frame.columns.names)): + var_name = frame.columns.names + else: + var_name = [f"variable_{i}" for i in range(len(frame.columns.names))] + else: + var_name = [ + frame.columns.name if frame.columns.name is not None else "variable" + ] + if isinstance(var_name, str): + var_name = [var_name] + + N, K = frame.shape + K -= len(id_vars) + + mdata: dict[Hashable, AnyArrayLike] = {} + for col in id_vars: + id_data = frame.pop(col) + if not isinstance(id_data.dtype, np.dtype): + # i.e. ExtensionDtype + if K > 0: + mdata[col] = concat([id_data] * K, ignore_index=True) + else: + # We can't concat empty list. (GH 46044) + mdata[col] = type(id_data)([], name=id_data.name, dtype=id_data.dtype) + else: + mdata[col] = np.tile(id_data._values, K) + + mcolumns = id_vars + var_name + [value_name] + + if frame.shape[1] > 0: + mdata[value_name] = concat( + [frame.iloc[:, i] for i in range(frame.shape[1])] + ).values + else: + mdata[value_name] = frame._values.ravel("F") + for i, col in enumerate(var_name): + mdata[col] = frame.columns._get_level_values(i).repeat(N) + + result = frame._constructor(mdata, columns=mcolumns) + + if not ignore_index: + result.index = tile_compat(frame.index, K) + + return result + + +def lreshape(data: DataFrame, groups, dropna: bool = True) -> DataFrame: + """ + Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. + + Accepts a dictionary, ``groups``, in which each key is a new column name + and each value is a list of old column names that will be "melted" under + the new column name as part of the reshape. + + Parameters + ---------- + data : DataFrame + The wide-format DataFrame. + groups : dict + {new_name : list_of_columns}. + dropna : bool, default True + Do not include columns whose entries are all NaN. + + Returns + ------- + DataFrame + Reshaped DataFrame. + + See Also + -------- + melt : Unpivot a DataFrame from wide to long format, optionally leaving + identifiers set. + pivot : Create a spreadsheet-style pivot table as a DataFrame. + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. + + Examples + -------- + >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], + ... 'team': ['Red Sox', 'Yankees'], + ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) + >>> data + hr1 hr2 team year1 year2 + 0 514 545 Red Sox 2007 2008 + 1 573 526 Yankees 2007 2008 + + >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) + team year hr + 0 Red Sox 2007 514 + 1 Yankees 2007 573 + 2 Red Sox 2008 545 + 3 Yankees 2008 526 + """ + if isinstance(groups, dict): + keys = list(groups.keys()) + values = list(groups.values()) + else: + keys, values = zip(*groups) + + all_cols = list(set.union(*(set(x) for x in values))) + id_cols = list(data.columns.difference(all_cols)) + + K = len(values[0]) + + for seq in values: + if len(seq) != K: + raise ValueError("All column lists must be same length") + + mdata = {} + pivot_cols = [] + + for target, names in zip(keys, values): + to_concat = [data[col]._values for col in names] + + mdata[target] = concat_compat(to_concat) + pivot_cols.append(target) + + for col in id_cols: + mdata[col] = np.tile(data[col]._values, K) + + if dropna: + mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) + for c in pivot_cols: + mask &= notna(mdata[c]) + if not mask.all(): + mdata = {k: v[mask] for k, v in mdata.items()} + + return data._constructor(mdata, columns=id_cols + pivot_cols) + + +def wide_to_long( + df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+" +) -> DataFrame: + r""" + Unpivot a DataFrame from wide to long format. + + Less flexible but more user-friendly than melt. + + With stubnames ['A', 'B'], this function expects to find one or more + group of columns with format + A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... + You specify what you want to call this suffix in the resulting long format + with `j` (for example `j='year'`) + + Each row of these wide variables are assumed to be uniquely identified by + `i` (can be a single column name or a list of column names) + + All remaining variables in the data frame are left intact. + + Parameters + ---------- + df : DataFrame + The wide-format DataFrame. + stubnames : str or list-like + The stub name(s). The wide format variables are assumed to + start with the stub names. + i : str or list-like + Column(s) to use as id variable(s). + j : str + The name of the sub-observation variable. What you wish to name your + suffix in the long format. + sep : str, default "" + A character indicating the separation of the variable names + in the wide format, to be stripped from the names in the long format. + For example, if your column names are A-suffix1, A-suffix2, you + can strip the hyphen by specifying `sep='-'`. + suffix : str, default '\\d+' + A regular expression capturing the wanted suffixes. '\\d+' captures + numeric suffixes. Suffixes with no numbers could be specified with the + negated character class '\\D+'. You can also further disambiguate + suffixes, for example, if your wide variables are of the form A-one, + B-two,.., and you have an unrelated column A-rating, you can ignore the + last one by specifying `suffix='(!?one|two)'`. When all suffixes are + numeric, they are cast to int64/float64. + + Returns + ------- + DataFrame + A DataFrame that contains each stub name as a variable, with new index + (i, j). + + See Also + -------- + melt : Unpivot a DataFrame from wide to long format, optionally leaving + identifiers set. + pivot : Create a spreadsheet-style pivot table as a DataFrame. + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + + Notes + ----- + All extra variables are left untouched. This simply uses + `pandas.melt` under the hood, but is hard-coded to "do the right thing" + in a typical case. + + Examples + -------- + >>> np.random.seed(123) + >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, + ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, + ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, + ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, + ... "X" : dict(zip(range(3), np.random.randn(3))) + ... }) + >>> df["id"] = df.index + >>> df + A1970 A1980 B1970 B1980 X id + 0 a d 2.5 3.2 -1.085631 0 + 1 b e 1.2 1.3 0.997345 1 + 2 c f 0.7 0.1 0.282978 2 + >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") + ... # doctest: +NORMALIZE_WHITESPACE + X A B + id year + 0 1970 -1.085631 a 2.5 + 1 1970 0.997345 b 1.2 + 2 1970 0.282978 c 0.7 + 0 1980 -1.085631 d 3.2 + 1 1980 0.997345 e 1.3 + 2 1980 0.282978 f 0.1 + + With multiple id columns + + >>> df = pd.DataFrame({ + ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], + ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], + ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], + ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] + ... }) + >>> df + famid birth ht1 ht2 + 0 1 1 2.8 3.4 + 1 1 2 2.9 3.8 + 2 1 3 2.2 2.9 + 3 2 1 2.0 3.2 + 4 2 2 1.8 2.8 + 5 2 3 1.9 2.4 + 6 3 1 2.2 3.3 + 7 3 2 2.3 3.4 + 8 3 3 2.1 2.9 + >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') + >>> l + ... # doctest: +NORMALIZE_WHITESPACE + ht + famid birth age + 1 1 1 2.8 + 2 3.4 + 2 1 2.9 + 2 3.8 + 3 1 2.2 + 2 2.9 + 2 1 1 2.0 + 2 3.2 + 2 1 1.8 + 2 2.8 + 3 1 1.9 + 2 2.4 + 3 1 1 2.2 + 2 3.3 + 2 1 2.3 + 2 3.4 + 3 1 2.1 + 2 2.9 + + Going from long back to wide just takes some creative use of `unstack` + + >>> w = l.unstack() + >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) + >>> w.reset_index() + famid birth ht1 ht2 + 0 1 1 2.8 3.4 + 1 1 2 2.9 3.8 + 2 1 3 2.2 2.9 + 3 2 1 2.0 3.2 + 4 2 2 1.8 2.8 + 5 2 3 1.9 2.4 + 6 3 1 2.2 3.3 + 7 3 2 2.3 3.4 + 8 3 3 2.1 2.9 + + Less wieldy column names are also handled + + >>> np.random.seed(0) + >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3), + ... 'A(weekly)-2011': np.random.rand(3), + ... 'B(weekly)-2010': np.random.rand(3), + ... 'B(weekly)-2011': np.random.rand(3), + ... 'X' : np.random.randint(3, size=3)}) + >>> df['id'] = df.index + >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id + 0 0.548814 0.544883 0.437587 0.383442 0 0 + 1 0.715189 0.423655 0.891773 0.791725 1 1 + 2 0.602763 0.645894 0.963663 0.528895 1 2 + + >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id', + ... j='year', sep='-') + ... # doctest: +NORMALIZE_WHITESPACE + X A(weekly) B(weekly) + id year + 0 2010 0 0.548814 0.437587 + 1 2010 1 0.715189 0.891773 + 2 2010 1 0.602763 0.963663 + 0 2011 0 0.544883 0.383442 + 1 2011 1 0.423655 0.791725 + 2 2011 1 0.645894 0.528895 + + If we have many columns, we could also use a regex to find our + stubnames and pass that list on to wide_to_long + + >>> stubnames = sorted( + ... set([match[0] for match in df.columns.str.findall( + ... r'[A-B]\(.*\)').values if match != []]) + ... ) + >>> list(stubnames) + ['A(weekly)', 'B(weekly)'] + + All of the above examples have integers as suffixes. It is possible to + have non-integers as suffixes. + + >>> df = pd.DataFrame({ + ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], + ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], + ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], + ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] + ... }) + >>> df + famid birth ht_one ht_two + 0 1 1 2.8 3.4 + 1 1 2 2.9 3.8 + 2 1 3 2.2 2.9 + 3 2 1 2.0 3.2 + 4 2 2 1.8 2.8 + 5 2 3 1.9 2.4 + 6 3 1 2.2 3.3 + 7 3 2 2.3 3.4 + 8 3 3 2.1 2.9 + + >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', + ... sep='_', suffix=r'\w+') + >>> l + ... # doctest: +NORMALIZE_WHITESPACE + ht + famid birth age + 1 1 one 2.8 + two 3.4 + 2 one 2.9 + two 3.8 + 3 one 2.2 + two 2.9 + 2 1 one 2.0 + two 3.2 + 2 one 1.8 + two 2.8 + 3 one 1.9 + two 2.4 + 3 1 one 2.2 + two 3.3 + 2 one 2.3 + two 3.4 + 3 one 2.1 + two 2.9 + """ + + def get_var_names(df, stub: str, sep: str, suffix: str) -> list[str]: + regex = rf"^{re.escape(stub)}{re.escape(sep)}{suffix}$" + pattern = re.compile(regex) + return [col for col in df.columns if pattern.match(col)] + + def melt_stub(df, stub: str, i, j, value_vars, sep: str): + newdf = melt( + df, + id_vars=i, + value_vars=value_vars, + value_name=stub.rstrip(sep), + var_name=j, + ) + newdf[j] = Categorical(newdf[j]) + newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "", regex=True) + + # GH17627 Cast numerics suffixes to int/float + newdf[j] = to_numeric(newdf[j], errors="ignore") + + return newdf.set_index(i + [j]) + + if not is_list_like(stubnames): + stubnames = [stubnames] + else: + stubnames = list(stubnames) + + if any(col in stubnames for col in df.columns): + raise ValueError("stubname can't be identical to a column name") + + if not is_list_like(i): + i = [i] + else: + i = list(i) + + if df[i].duplicated().any(): + raise ValueError("the id variables need to uniquely identify each row") + + value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames] + + value_vars_flattened = [e for sublist in value_vars for e in sublist] + id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) + + _melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)] + melted = _melted[0].join(_melted[1:], how="outer") + + if len(i) == 1: + new = df[id_vars].set_index(i).join(melted) + return new + + new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) + + return new diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/merge.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/merge.py new file mode 100644 index 00000000..0b343a1f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/merge.py @@ -0,0 +1,2702 @@ +""" +SQL-style merge routines +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +import datetime +from functools import partial +import string +from typing import ( + TYPE_CHECKING, + Literal, + cast, + final, +) +import uuid +import warnings + +import numpy as np + +from pandas._libs import ( + Timedelta, + hashtable as libhashtable, + join as libjoin, + lib, +) +from pandas._libs.lib import is_range_indexer +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + IndexLabel, + JoinHow, + MergeHow, + Shape, + Suffixes, + npt, +) +from pandas.errors import MergeError +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.common import ( + ensure_int64, + ensure_object, + is_bool, + is_bool_dtype, + is_extension_array_dtype, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_number, + is_numeric_dtype, + is_object_dtype, + is_string_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, +) + +from pandas import ( + ArrowDtype, + Categorical, + Index, + MultiIndex, + Series, +) +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + ExtensionArray, +) +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.arrays.string_ import StringDtype +import pandas.core.common as com +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.frame import _merge_doc +from pandas.core.indexes.api import default_index +from pandas.core.sorting import is_int64_overflow_possible + +if TYPE_CHECKING: + from pandas import DataFrame + from pandas.core import groupby + from pandas.core.arrays import DatetimeArray + +_factorizers = { + np.int64: libhashtable.Int64Factorizer, + np.longlong: libhashtable.Int64Factorizer, + np.int32: libhashtable.Int32Factorizer, + np.int16: libhashtable.Int16Factorizer, + np.int8: libhashtable.Int8Factorizer, + np.uint64: libhashtable.UInt64Factorizer, + np.uint32: libhashtable.UInt32Factorizer, + np.uint16: libhashtable.UInt16Factorizer, + np.uint8: libhashtable.UInt8Factorizer, + np.bool_: libhashtable.UInt8Factorizer, + np.float64: libhashtable.Float64Factorizer, + np.float32: libhashtable.Float32Factorizer, + np.complex64: libhashtable.Complex64Factorizer, + np.complex128: libhashtable.Complex128Factorizer, + np.object_: libhashtable.ObjectFactorizer, +} + +# See https://github.com/pandas-dev/pandas/issues/52451 +if np.intc is not np.int32: + _factorizers[np.intc] = libhashtable.Int64Factorizer + +_known = (np.ndarray, ExtensionArray, Index, ABCSeries) + + +@Substitution("\nleft : DataFrame or named Series") +@Appender(_merge_doc, indents=0) +def merge( + left: DataFrame | Series, + right: DataFrame | Series, + how: MergeHow = "inner", + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = False, + suffixes: Suffixes = ("_x", "_y"), + copy: bool | None = None, + indicator: str | bool = False, + validate: str | None = None, +) -> DataFrame: + left_df = _validate_operand(left) + right_df = _validate_operand(right) + if how == "cross": + return _cross_merge( + left_df, + right_df, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + indicator=indicator, + validate=validate, + copy=copy, + ) + else: + op = _MergeOperation( + left_df, + right_df, + how=how, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + indicator=indicator, + validate=validate, + ) + return op.get_result(copy=copy) + + +def _cross_merge( + left: DataFrame, + right: DataFrame, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = False, + suffixes: Suffixes = ("_x", "_y"), + copy: bool | None = None, + indicator: str | bool = False, + validate: str | None = None, +) -> DataFrame: + """ + See merge.__doc__ with how='cross' + """ + + if ( + left_index + or right_index + or right_on is not None + or left_on is not None + or on is not None + ): + raise MergeError( + "Can not pass on, right_on, left_on or set right_index=True or " + "left_index=True" + ) + + cross_col = f"_cross_{uuid.uuid4()}" + left = left.assign(**{cross_col: 1}) + right = right.assign(**{cross_col: 1}) + + left_on = right_on = [cross_col] + + res = merge( + left, + right, + how="inner", + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + indicator=indicator, + validate=validate, + copy=copy, + ) + del res[cross_col] + return res + + +def _groupby_and_merge(by, left: DataFrame, right: DataFrame, merge_pieces): + """ + groupby & merge; we are always performing a left-by type operation + + Parameters + ---------- + by: field to group + left: DataFrame + right: DataFrame + merge_pieces: function for merging + """ + pieces = [] + if not isinstance(by, (list, tuple)): + by = [by] + + lby = left.groupby(by, sort=False) + rby: groupby.DataFrameGroupBy | None = None + + # if we can groupby the rhs + # then we can get vastly better perf + if all(item in right.columns for item in by): + rby = right.groupby(by, sort=False) + + for key, lhs in lby.grouper.get_iterator(lby._selected_obj, axis=lby.axis): + if rby is None: + rhs = right + else: + try: + rhs = right.take(rby.indices[key]) + except KeyError: + # key doesn't exist in left + lcols = lhs.columns.tolist() + cols = lcols + [r for r in right.columns if r not in set(lcols)] + merged = lhs.reindex(columns=cols) + merged.index = range(len(merged)) + pieces.append(merged) + continue + + merged = merge_pieces(lhs, rhs) + + # make sure join keys are in the merged + # TODO, should merge_pieces do this? + merged[by] = key + + pieces.append(merged) + + # preserve the original order + # if we have a missing piece this can be reset + from pandas.core.reshape.concat import concat + + result = concat(pieces, ignore_index=True) + result = result.reindex(columns=pieces[0].columns, copy=False) + return result, lby + + +def merge_ordered( + left: DataFrame, + right: DataFrame, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_by=None, + right_by=None, + fill_method: str | None = None, + suffixes: Suffixes = ("_x", "_y"), + how: JoinHow = "outer", +) -> DataFrame: + """ + Perform a merge for ordered data with optional filling/interpolation. + + Designed for ordered data like time series data. Optionally + perform group-wise merge (see examples). + + Parameters + ---------- + left : DataFrame or named Series + right : DataFrame or named Series + on : label or list + Field names to join on. Must be found in both DataFrames. + left_on : label or list, or array-like + Field names to join on in left DataFrame. Can be a vector or list of + vectors of the length of the DataFrame to use a particular vector as + the join key instead of columns. + right_on : label or list, or array-like + Field names to join on in right DataFrame or vector/list of vectors per + left_on docs. + left_by : column name or list of column names + Group left DataFrame by group columns and merge piece by piece with + right DataFrame. Must be None if either left or right are a Series. + right_by : column name or list of column names + Group right DataFrame by group columns and merge piece by piece with + left DataFrame. Must be None if either left or right are a Series. + fill_method : {'ffill', None}, default None + Interpolation method for data. + suffixes : list-like, default is ("_x", "_y") + A length-2 sequence where each element is optionally a string + indicating the suffix to add to overlapping column names in + `left` and `right` respectively. Pass a value of `None` instead + of a string to indicate that the column name from `left` or + `right` should be left as-is, with no suffix. At least one of the + values must not be None. + + how : {'left', 'right', 'outer', 'inner'}, default 'outer' + * left: use only keys from left frame (SQL: left outer join) + * right: use only keys from right frame (SQL: right outer join) + * outer: use union of keys from both frames (SQL: full outer join) + * inner: use intersection of keys from both frames (SQL: inner join). + + Returns + ------- + DataFrame + The merged DataFrame output type will be the same as + 'left', if it is a subclass of DataFrame. + + See Also + -------- + merge : Merge with a database-style join. + merge_asof : Merge on nearest keys. + + Examples + -------- + >>> from pandas import merge_ordered + >>> df1 = pd.DataFrame( + ... { + ... "key": ["a", "c", "e", "a", "c", "e"], + ... "lvalue": [1, 2, 3, 1, 2, 3], + ... "group": ["a", "a", "a", "b", "b", "b"] + ... } + ... ) + >>> df1 + key lvalue group + 0 a 1 a + 1 c 2 a + 2 e 3 a + 3 a 1 b + 4 c 2 b + 5 e 3 b + + >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) + >>> df2 + key rvalue + 0 b 1 + 1 c 2 + 2 d 3 + + >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") + key lvalue group rvalue + 0 a 1 a NaN + 1 b 1 a 1.0 + 2 c 2 a 2.0 + 3 d 2 a 3.0 + 4 e 3 a 3.0 + 5 a 1 b NaN + 6 b 1 b 1.0 + 7 c 2 b 2.0 + 8 d 2 b 3.0 + 9 e 3 b 3.0 + """ + + def _merger(x, y) -> DataFrame: + # perform the ordered merge operation + op = _OrderedMerge( + x, + y, + on=on, + left_on=left_on, + right_on=right_on, + suffixes=suffixes, + fill_method=fill_method, + how=how, + ) + return op.get_result() + + if left_by is not None and right_by is not None: + raise ValueError("Can only group either left or right frames") + if left_by is not None: + if isinstance(left_by, str): + left_by = [left_by] + check = set(left_by).difference(left.columns) + if len(check) != 0: + raise KeyError(f"{check} not found in left columns") + result, _ = _groupby_and_merge(left_by, left, right, lambda x, y: _merger(x, y)) + elif right_by is not None: + if isinstance(right_by, str): + right_by = [right_by] + check = set(right_by).difference(right.columns) + if len(check) != 0: + raise KeyError(f"{check} not found in right columns") + result, _ = _groupby_and_merge( + right_by, right, left, lambda x, y: _merger(y, x) + ) + else: + result = _merger(left, right) + return result + + +def merge_asof( + left: DataFrame | Series, + right: DataFrame | Series, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + by=None, + left_by=None, + right_by=None, + suffixes: Suffixes = ("_x", "_y"), + tolerance: int | Timedelta | None = None, + allow_exact_matches: bool = True, + direction: str = "backward", +) -> DataFrame: + """ + Perform a merge by key distance. + + This is similar to a left-join except that we match on nearest + key rather than equal keys. Both DataFrames must be sorted by the key. + + For each row in the left DataFrame: + + - A "backward" search selects the last row in the right DataFrame whose + 'on' key is less than or equal to the left's key. + + - A "forward" search selects the first row in the right DataFrame whose + 'on' key is greater than or equal to the left's key. + + - A "nearest" search selects the row in the right DataFrame whose 'on' + key is closest in absolute distance to the left's key. + + Optionally match on equivalent keys with 'by' before searching with 'on'. + + Parameters + ---------- + left : DataFrame or named Series + right : DataFrame or named Series + on : label + Field name to join on. Must be found in both DataFrames. + The data MUST be ordered. Furthermore this must be a numeric column, + such as datetimelike, integer, or float. On or left_on/right_on + must be given. + left_on : label + Field name to join on in left DataFrame. + right_on : label + Field name to join on in right DataFrame. + left_index : bool + Use the index of the left DataFrame as the join key. + right_index : bool + Use the index of the right DataFrame as the join key. + by : column name or list of column names + Match on these columns before performing merge operation. + left_by : column name + Field names to match on in the left DataFrame. + right_by : column name + Field names to match on in the right DataFrame. + suffixes : 2-length sequence (tuple, list, ...) + Suffix to apply to overlapping column names in the left and right + side, respectively. + tolerance : int or Timedelta, optional, default None + Select asof tolerance within this range; must be compatible + with the merge index. + allow_exact_matches : bool, default True + + - If True, allow matching with the same 'on' value + (i.e. less-than-or-equal-to / greater-than-or-equal-to) + - If False, don't match the same 'on' value + (i.e., strictly less-than / strictly greater-than). + + direction : 'backward' (default), 'forward', or 'nearest' + Whether to search for prior, subsequent, or closest matches. + + Returns + ------- + DataFrame + + See Also + -------- + merge : Merge with a database-style join. + merge_ordered : Merge with optional filling/interpolation. + + Examples + -------- + >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + >>> left + a left_val + 0 1 a + 1 5 b + 2 10 c + + >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + >>> right + a right_val + 0 1 1 + 1 2 2 + 2 3 3 + 3 6 6 + 4 7 7 + + >>> pd.merge_asof(left, right, on="a") + a left_val right_val + 0 1 a 1 + 1 5 b 3 + 2 10 c 7 + + >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) + a left_val right_val + 0 1 a NaN + 1 5 b 3.0 + 2 10 c 7.0 + + >>> pd.merge_asof(left, right, on="a", direction="forward") + a left_val right_val + 0 1 a 1.0 + 1 5 b 6.0 + 2 10 c NaN + + >>> pd.merge_asof(left, right, on="a", direction="nearest") + a left_val right_val + 0 1 a 1 + 1 5 b 6 + 2 10 c 7 + + We can use indexed DataFrames as well. + + >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) + >>> left + left_val + 1 a + 5 b + 10 c + + >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) + >>> right + right_val + 1 1 + 2 2 + 3 3 + 6 6 + 7 7 + + >>> pd.merge_asof(left, right, left_index=True, right_index=True) + left_val right_val + 1 a 1 + 5 b 3 + 10 c 7 + + Here is a real-world times-series example + + >>> quotes = pd.DataFrame( + ... { + ... "time": [ + ... pd.Timestamp("2016-05-25 13:30:00.023"), + ... pd.Timestamp("2016-05-25 13:30:00.023"), + ... pd.Timestamp("2016-05-25 13:30:00.030"), + ... pd.Timestamp("2016-05-25 13:30:00.041"), + ... pd.Timestamp("2016-05-25 13:30:00.048"), + ... pd.Timestamp("2016-05-25 13:30:00.049"), + ... pd.Timestamp("2016-05-25 13:30:00.072"), + ... pd.Timestamp("2016-05-25 13:30:00.075") + ... ], + ... "ticker": [ + ... "GOOG", + ... "MSFT", + ... "MSFT", + ... "MSFT", + ... "GOOG", + ... "AAPL", + ... "GOOG", + ... "MSFT" + ... ], + ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], + ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03] + ... } + ... ) + >>> quotes + time ticker bid ask + 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 + 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 + 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 + 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 + 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 + 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 + 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 + 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 + + >>> trades = pd.DataFrame( + ... { + ... "time": [ + ... pd.Timestamp("2016-05-25 13:30:00.023"), + ... pd.Timestamp("2016-05-25 13:30:00.038"), + ... pd.Timestamp("2016-05-25 13:30:00.048"), + ... pd.Timestamp("2016-05-25 13:30:00.048"), + ... pd.Timestamp("2016-05-25 13:30:00.048") + ... ], + ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], + ... "quantity": [75, 155, 100, 100, 100] + ... } + ... ) + >>> trades + time ticker price quantity + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 + + By default we are taking the asof of the quotes + + >>> pd.merge_asof(trades, quotes, on="time", by="ticker") + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + + We only asof within 2ms between the quote time and the trade time + + >>> pd.merge_asof( + ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") + ... ) + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + + We only asof within 10ms between the quote time and the trade time + and we exclude exact matches on time. However *prior* data will + propagate forward + + >>> pd.merge_asof( + ... trades, + ... quotes, + ... on="time", + ... by="ticker", + ... tolerance=pd.Timedelta("10ms"), + ... allow_exact_matches=False + ... ) + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + """ + op = _AsOfMerge( + left, + right, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + by=by, + left_by=left_by, + right_by=right_by, + suffixes=suffixes, + how="asof", + tolerance=tolerance, + allow_exact_matches=allow_exact_matches, + direction=direction, + ) + return op.get_result() + + +# TODO: transformations?? +# TODO: only copy DataFrames when modification necessary +class _MergeOperation: + """ + Perform a database (SQL) merge operation between two DataFrame or Series + objects using either columns as keys or their row indexes + """ + + _merge_type = "merge" + how: MergeHow | Literal["asof"] + on: IndexLabel | None + # left_on/right_on may be None when passed, but in validate_specification + # get replaced with non-None. + left_on: Sequence[Hashable | AnyArrayLike] + right_on: Sequence[Hashable | AnyArrayLike] + left_index: bool + right_index: bool + sort: bool + suffixes: Suffixes + copy: bool + indicator: str | bool + validate: str | None + join_names: list[Hashable] + right_join_keys: list[ArrayLike] + left_join_keys: list[ArrayLike] + + def __init__( + self, + left: DataFrame | Series, + right: DataFrame | Series, + how: MergeHow | Literal["asof"] = "inner", + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = True, + suffixes: Suffixes = ("_x", "_y"), + indicator: str | bool = False, + validate: str | None = None, + ) -> None: + _left = _validate_operand(left) + _right = _validate_operand(right) + self.left = self.orig_left = _left + self.right = self.orig_right = _right + self.how = how + + self.on = com.maybe_make_list(on) + + self.suffixes = suffixes + self.sort = sort + + self.left_index = left_index + self.right_index = right_index + + self.indicator = indicator + + if not is_bool(left_index): + raise ValueError( + f"left_index parameter must be of type bool, not {type(left_index)}" + ) + if not is_bool(right_index): + raise ValueError( + f"right_index parameter must be of type bool, not {type(right_index)}" + ) + + # GH 40993: raise when merging between different levels; enforced in 2.0 + if _left.columns.nlevels != _right.columns.nlevels: + msg = ( + "Not allowed to merge between different levels. " + f"({_left.columns.nlevels} levels on the left, " + f"{_right.columns.nlevels} on the right)" + ) + raise MergeError(msg) + + self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on) + + ( + self.left_join_keys, + self.right_join_keys, + self.join_names, + left_drop, + right_drop, + ) = self._get_merge_keys() + + if left_drop: + self.left = self.left._drop_labels_or_levels(left_drop) + + if right_drop: + self.right = self.right._drop_labels_or_levels(right_drop) + + self._maybe_require_matching_dtypes(self.left_join_keys, self.right_join_keys) + self._validate_tolerance(self.left_join_keys) + + # validate the merge keys dtypes. We may need to coerce + # to avoid incompatible dtypes + self._maybe_coerce_merge_keys() + + # If argument passed to validate, + # check if columns specified as unique + # are in fact unique. + if validate is not None: + self._validate_validate_kwd(validate) + + def _maybe_require_matching_dtypes( + self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike] + ) -> None: + # Overridden by AsOfMerge + pass + + def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None: + # Overridden by AsOfMerge + pass + + @final + def _reindex_and_concat( + self, + join_index: Index, + left_indexer: npt.NDArray[np.intp] | None, + right_indexer: npt.NDArray[np.intp] | None, + copy: bool | None, + ) -> DataFrame: + """ + reindex along index and concat along columns. + """ + # Take views so we do not alter the originals + left = self.left[:] + right = self.right[:] + + llabels, rlabels = _items_overlap_with_suffix( + self.left._info_axis, self.right._info_axis, self.suffixes + ) + + if left_indexer is not None and not is_range_indexer(left_indexer, len(left)): + # Pinning the index here (and in the right code just below) is not + # necessary, but makes the `.take` more performant if we have e.g. + # a MultiIndex for left.index. + lmgr = left._mgr.reindex_indexer( + join_index, + left_indexer, + axis=1, + copy=False, + only_slice=True, + allow_dups=True, + use_na_proxy=True, + ) + left = left._constructor_from_mgr(lmgr, axes=lmgr.axes) + left.index = join_index + + if right_indexer is not None and not is_range_indexer( + right_indexer, len(right) + ): + rmgr = right._mgr.reindex_indexer( + join_index, + right_indexer, + axis=1, + copy=False, + only_slice=True, + allow_dups=True, + use_na_proxy=True, + ) + right = right._constructor_from_mgr(rmgr, axes=rmgr.axes) + right.index = join_index + + from pandas import concat + + left.columns = llabels + right.columns = rlabels + result = concat([left, right], axis=1, copy=copy) + return result + + def get_result(self, copy: bool | None = True) -> DataFrame: + if self.indicator: + self.left, self.right = self._indicator_pre_merge(self.left, self.right) + + join_index, left_indexer, right_indexer = self._get_join_info() + + result = self._reindex_and_concat( + join_index, left_indexer, right_indexer, copy=copy + ) + result = result.__finalize__(self, method=self._merge_type) + + if self.indicator: + result = self._indicator_post_merge(result) + + self._maybe_add_join_keys(result, left_indexer, right_indexer) + + self._maybe_restore_index_levels(result) + + return result.__finalize__(self, method="merge") + + @final + @cache_readonly + def _indicator_name(self) -> str | None: + if isinstance(self.indicator, str): + return self.indicator + elif isinstance(self.indicator, bool): + return "_merge" if self.indicator else None + else: + raise ValueError( + "indicator option can only accept boolean or string arguments" + ) + + @final + def _indicator_pre_merge( + self, left: DataFrame, right: DataFrame + ) -> tuple[DataFrame, DataFrame]: + columns = left.columns.union(right.columns) + + for i in ["_left_indicator", "_right_indicator"]: + if i in columns: + raise ValueError( + "Cannot use `indicator=True` option when " + f"data contains a column named {i}" + ) + if self._indicator_name in columns: + raise ValueError( + "Cannot use name of an existing column for indicator column" + ) + + left = left.copy() + right = right.copy() + + left["_left_indicator"] = 1 + left["_left_indicator"] = left["_left_indicator"].astype("int8") + + right["_right_indicator"] = 2 + right["_right_indicator"] = right["_right_indicator"].astype("int8") + + return left, right + + @final + def _indicator_post_merge(self, result: DataFrame) -> DataFrame: + result["_left_indicator"] = result["_left_indicator"].fillna(0) + result["_right_indicator"] = result["_right_indicator"].fillna(0) + + result[self._indicator_name] = Categorical( + (result["_left_indicator"] + result["_right_indicator"]), + categories=[1, 2, 3], + ) + result[self._indicator_name] = result[ + self._indicator_name + ].cat.rename_categories(["left_only", "right_only", "both"]) + + result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1) + return result + + @final + def _maybe_restore_index_levels(self, result: DataFrame) -> None: + """ + Restore index levels specified as `on` parameters + + Here we check for cases where `self.left_on` and `self.right_on` pairs + each reference an index level in their respective DataFrames. The + joined columns corresponding to these pairs are then restored to the + index of `result`. + + **Note:** This method has side effects. It modifies `result` in-place + + Parameters + ---------- + result: DataFrame + merge result + + Returns + ------- + None + """ + names_to_restore = [] + for name, left_key, right_key in zip( + self.join_names, self.left_on, self.right_on + ): + if ( + # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible + # type "Union[Hashable, ExtensionArray, Index, Series]"; expected + # "Hashable" + self.orig_left._is_level_reference(left_key) # type: ignore[arg-type] + # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible + # type "Union[Hashable, ExtensionArray, Index, Series]"; expected + # "Hashable" + and self.orig_right._is_level_reference( + right_key # type: ignore[arg-type] + ) + and left_key == right_key + and name not in result.index.names + ): + names_to_restore.append(name) + + if names_to_restore: + result.set_index(names_to_restore, inplace=True) + + @final + def _maybe_add_join_keys( + self, + result: DataFrame, + left_indexer: npt.NDArray[np.intp] | None, + right_indexer: npt.NDArray[np.intp] | None, + ) -> None: + left_has_missing = None + right_has_missing = None + + assert all(isinstance(x, _known) for x in self.left_join_keys) + + keys = zip(self.join_names, self.left_on, self.right_on) + for i, (name, lname, rname) in enumerate(keys): + if not _should_fill(lname, rname): + continue + + take_left, take_right = None, None + + if name in result: + if left_indexer is not None and right_indexer is not None: + if name in self.left: + if left_has_missing is None: + left_has_missing = (left_indexer == -1).any() + + if left_has_missing: + take_right = self.right_join_keys[i] + + if result[name].dtype != self.left[name].dtype: + take_left = self.left[name]._values + + elif name in self.right: + if right_has_missing is None: + right_has_missing = (right_indexer == -1).any() + + if right_has_missing: + take_left = self.left_join_keys[i] + + if result[name].dtype != self.right[name].dtype: + take_right = self.right[name]._values + + elif left_indexer is not None: + take_left = self.left_join_keys[i] + take_right = self.right_join_keys[i] + + if take_left is not None or take_right is not None: + if take_left is None: + lvals = result[name]._values + else: + # TODO: can we pin down take_left's type earlier? + take_left = extract_array(take_left, extract_numpy=True) + lfill = na_value_for_dtype(take_left.dtype) + lvals = algos.take_nd(take_left, left_indexer, fill_value=lfill) + + if take_right is None: + rvals = result[name]._values + else: + # TODO: can we pin down take_right's type earlier? + taker = extract_array(take_right, extract_numpy=True) + rfill = na_value_for_dtype(taker.dtype) + rvals = algos.take_nd(taker, right_indexer, fill_value=rfill) + + # if we have an all missing left_indexer + # make sure to just use the right values or vice-versa + mask_left = left_indexer == -1 + # error: Item "bool" of "Union[Any, bool]" has no attribute "all" + if mask_left.all(): # type: ignore[union-attr] + key_col = Index(rvals) + result_dtype = rvals.dtype + elif right_indexer is not None and (right_indexer == -1).all(): + key_col = Index(lvals) + result_dtype = lvals.dtype + else: + key_col = Index(lvals).where(~mask_left, rvals) + result_dtype = find_common_type([lvals.dtype, rvals.dtype]) + if ( + lvals.dtype.kind == "M" + and rvals.dtype.kind == "M" + and result_dtype.kind == "O" + ): + # TODO(non-nano) Workaround for common_type not dealing + # with different resolutions + result_dtype = key_col.dtype + + if result._is_label_reference(name): + result[name] = result._constructor_sliced( + key_col, dtype=result_dtype, index=result.index + ) + elif result._is_level_reference(name): + if isinstance(result.index, MultiIndex): + key_col.name = name + idx_list = [ + result.index.get_level_values(level_name) + if level_name != name + else key_col + for level_name in result.index.names + ] + + result.set_index(idx_list, inplace=True) + else: + result.index = Index(key_col, name=name) + else: + result.insert(i, name or f"key_{i}", key_col) + + def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """return the join indexers""" + return get_join_indexers( + self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how + ) + + @final + def _get_join_info( + self, + ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + # make mypy happy + assert self.how != "cross" + left_ax = self.left.index + right_ax = self.right.index + + if self.left_index and self.right_index and self.how != "asof": + join_index, left_indexer, right_indexer = left_ax.join( + right_ax, how=self.how, return_indexers=True, sort=self.sort + ) + + elif self.right_index and self.how == "left": + join_index, left_indexer, right_indexer = _left_join_on_index( + left_ax, right_ax, self.left_join_keys, sort=self.sort + ) + + elif self.left_index and self.how == "right": + join_index, right_indexer, left_indexer = _left_join_on_index( + right_ax, left_ax, self.right_join_keys, sort=self.sort + ) + else: + (left_indexer, right_indexer) = self._get_join_indexers() + + if self.right_index: + if len(self.left) > 0: + join_index = self._create_join_index( + left_ax, + right_ax, + left_indexer, + how="right", + ) + else: + join_index = right_ax.take(right_indexer) + elif self.left_index: + if self.how == "asof": + # GH#33463 asof should always behave like a left merge + join_index = self._create_join_index( + left_ax, + right_ax, + left_indexer, + how="left", + ) + + elif len(self.right) > 0: + join_index = self._create_join_index( + right_ax, + left_ax, + right_indexer, + how="left", + ) + else: + join_index = left_ax.take(left_indexer) + else: + join_index = default_index(len(left_indexer)) + + return join_index, left_indexer, right_indexer + + @final + def _create_join_index( + self, + index: Index, + other_index: Index, + indexer: npt.NDArray[np.intp], + how: JoinHow = "left", + ) -> Index: + """ + Create a join index by rearranging one index to match another + + Parameters + ---------- + index : Index being rearranged + other_index : Index used to supply values not found in index + indexer : np.ndarray[np.intp] how to rearrange index + how : str + Replacement is only necessary if indexer based on other_index. + + Returns + ------- + Index + """ + if self.how in (how, "outer") and not isinstance(other_index, MultiIndex): + # if final index requires values in other_index but not target + # index, indexer may hold missing (-1) values, causing Index.take + # to take the final value in target index. So, we set the last + # element to be the desired fill value. We do not use allow_fill + # and fill_value because it throws a ValueError on integer indices + mask = indexer == -1 + if np.any(mask): + fill_value = na_value_for_dtype(index.dtype, compat=False) + index = index.append(Index([fill_value])) + return index.take(indexer) + + @final + def _get_merge_keys( + self, + ) -> tuple[ + list[ArrayLike], + list[ArrayLike], + list[Hashable], + list[Hashable], + list[Hashable], + ]: + """ + Returns + ------- + left_keys, right_keys, join_names, left_drop, right_drop + """ + left_keys: list[ArrayLike] = [] + right_keys: list[ArrayLike] = [] + join_names: list[Hashable] = [] + right_drop: list[Hashable] = [] + left_drop: list[Hashable] = [] + + left, right = self.left, self.right + + is_lkey = lambda x: isinstance(x, _known) and len(x) == len(left) + is_rkey = lambda x: isinstance(x, _known) and len(x) == len(right) + + # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A + # user could, for example, request 'left_index' and 'left_by'. In a + # regular pd.merge(), users cannot specify both 'left_index' and + # 'left_on'. (Instead, users have a MultiIndex). That means the + # self.left_on in this function is always empty in a pd.merge(), but + # a pd.merge_asof(left_index=True, left_by=...) will result in a + # self.left_on array with a None in the middle of it. This requires + # a work-around as designated in the code below. + # See _validate_left_right_on() for where this happens. + + # ugh, spaghetti re #733 + if _any(self.left_on) and _any(self.right_on): + for lk, rk in zip(self.left_on, self.right_on): + lk = extract_array(lk, extract_numpy=True) + rk = extract_array(rk, extract_numpy=True) + if is_lkey(lk): + lk = cast(ArrayLike, lk) + left_keys.append(lk) + if is_rkey(rk): + rk = cast(ArrayLike, rk) + right_keys.append(rk) + join_names.append(None) # what to do? + else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + rk = cast(Hashable, rk) + if rk is not None: + right_keys.append(right._get_label_or_level_values(rk)) + join_names.append(rk) + else: + # work-around for merge_asof(right_index=True) + right_keys.append(right.index._values) + join_names.append(right.index.name) + else: + if not is_rkey(rk): + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + rk = cast(Hashable, rk) + if rk is not None: + right_keys.append(right._get_label_or_level_values(rk)) + else: + # work-around for merge_asof(right_index=True) + right_keys.append(right.index._values) + if lk is not None and lk == rk: # FIXME: what about other NAs? + # avoid key upcast in corner case (length-0) + lk = cast(Hashable, lk) + if len(left) > 0: + right_drop.append(rk) + else: + left_drop.append(lk) + else: + rk = cast(ArrayLike, rk) + right_keys.append(rk) + if lk is not None: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + lk = cast(Hashable, lk) + left_keys.append(left._get_label_or_level_values(lk)) + join_names.append(lk) + else: + # work-around for merge_asof(left_index=True) + left_keys.append(left.index._values) + join_names.append(left.index.name) + elif _any(self.left_on): + for k in self.left_on: + if is_lkey(k): + k = extract_array(k, extract_numpy=True) + k = cast(ArrayLike, k) + left_keys.append(k) + join_names.append(None) + else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + k = cast(Hashable, k) + left_keys.append(left._get_label_or_level_values(k)) + join_names.append(k) + if isinstance(self.right.index, MultiIndex): + right_keys = [ + lev._values.take(lev_codes) + for lev, lev_codes in zip( + self.right.index.levels, self.right.index.codes + ) + ] + else: + right_keys = [self.right.index._values] + elif _any(self.right_on): + for k in self.right_on: + k = extract_array(k, extract_numpy=True) + if is_rkey(k): + k = cast(ArrayLike, k) + right_keys.append(k) + join_names.append(None) + else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + k = cast(Hashable, k) + right_keys.append(right._get_label_or_level_values(k)) + join_names.append(k) + if isinstance(self.left.index, MultiIndex): + left_keys = [ + lev._values.take(lev_codes) + for lev, lev_codes in zip( + self.left.index.levels, self.left.index.codes + ) + ] + else: + left_keys = [self.left.index._values] + + return left_keys, right_keys, join_names, left_drop, right_drop + + @final + def _maybe_coerce_merge_keys(self) -> None: + # we have valid merges but we may have to further + # coerce these if they are originally incompatible types + # + # for example if these are categorical, but are not dtype_equal + # or if we have object and integer dtypes + + for lk, rk, name in zip( + self.left_join_keys, self.right_join_keys, self.join_names + ): + if (len(lk) and not len(rk)) or (not len(lk) and len(rk)): + continue + + lk = extract_array(lk, extract_numpy=True) + rk = extract_array(rk, extract_numpy=True) + + lk_is_cat = isinstance(lk.dtype, CategoricalDtype) + rk_is_cat = isinstance(rk.dtype, CategoricalDtype) + lk_is_object = is_object_dtype(lk.dtype) + rk_is_object = is_object_dtype(rk.dtype) + + # if either left or right is a categorical + # then the must match exactly in categories & ordered + if lk_is_cat and rk_is_cat: + lk = cast(Categorical, lk) + rk = cast(Categorical, rk) + if lk._categories_match_up_to_permutation(rk): + continue + + elif lk_is_cat or rk_is_cat: + pass + + elif lk.dtype == rk.dtype: + continue + + msg = ( + f"You are trying to merge on {lk.dtype} and {rk.dtype} columns " + f"for key '{name}'. If you wish to proceed you should use pd.concat" + ) + + # if we are numeric, then allow differing + # kinds to proceed, eg. int64 and int8, int and float + # further if we are object, but we infer to + # the same, then proceed + if is_numeric_dtype(lk.dtype) and is_numeric_dtype(rk.dtype): + if lk.dtype.kind == rk.dtype.kind: + continue + + if is_extension_array_dtype(lk.dtype) and not is_extension_array_dtype( + rk.dtype + ): + ct = find_common_type([lk.dtype, rk.dtype]) + if is_extension_array_dtype(ct): + rk = ct.construct_array_type()._from_sequence(rk) # type: ignore[union-attr] # noqa: E501 + else: + rk = rk.astype(ct) # type: ignore[arg-type] + elif is_extension_array_dtype(rk.dtype): + ct = find_common_type([lk.dtype, rk.dtype]) + if is_extension_array_dtype(ct): + lk = ct.construct_array_type()._from_sequence(lk) # type: ignore[union-attr] # noqa: E501 + else: + lk = lk.astype(ct) # type: ignore[arg-type] + + # check whether ints and floats + if is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype): + # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int + with np.errstate(invalid="ignore"): + # error: Argument 1 to "astype" of "ndarray" has incompatible + # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected + # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" + casted = lk.astype(rk.dtype) # type: ignore[arg-type] + + mask = ~np.isnan(lk) + match = lk == casted + if not match[mask].all(): + warnings.warn( + "You are merging on int and float " + "columns where the float values " + "are not equal to their int representation.", + UserWarning, + stacklevel=find_stack_level(), + ) + continue + + if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype): + # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int + with np.errstate(invalid="ignore"): + # error: Argument 1 to "astype" of "ndarray" has incompatible + # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected + # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" + casted = rk.astype(lk.dtype) # type: ignore[arg-type] + + mask = ~np.isnan(rk) + match = rk == casted + if not match[mask].all(): + warnings.warn( + "You are merging on int and float " + "columns where the float values " + "are not equal to their int representation.", + UserWarning, + stacklevel=find_stack_level(), + ) + continue + + # let's infer and see if we are ok + if lib.infer_dtype(lk, skipna=False) == lib.infer_dtype( + rk, skipna=False + ): + continue + + # Check if we are trying to merge on obviously + # incompatible dtypes GH 9780, GH 15800 + + # bool values are coerced to object + elif (lk_is_object and is_bool_dtype(rk.dtype)) or ( + is_bool_dtype(lk.dtype) and rk_is_object + ): + pass + + # object values are allowed to be merged + elif (lk_is_object and is_numeric_dtype(rk.dtype)) or ( + is_numeric_dtype(lk.dtype) and rk_is_object + ): + inferred_left = lib.infer_dtype(lk, skipna=False) + inferred_right = lib.infer_dtype(rk, skipna=False) + bool_types = ["integer", "mixed-integer", "boolean", "empty"] + string_types = ["string", "unicode", "mixed", "bytes", "empty"] + + # inferred bool + if inferred_left in bool_types and inferred_right in bool_types: + pass + + # unless we are merging non-string-like with string-like + elif ( + inferred_left in string_types and inferred_right not in string_types + ) or ( + inferred_right in string_types and inferred_left not in string_types + ): + raise ValueError(msg) + + # datetimelikes must match exactly + elif needs_i8_conversion(lk.dtype) and not needs_i8_conversion(rk.dtype): + raise ValueError(msg) + elif not needs_i8_conversion(lk.dtype) and needs_i8_conversion(rk.dtype): + raise ValueError(msg) + elif isinstance(lk.dtype, DatetimeTZDtype) and not isinstance( + rk.dtype, DatetimeTZDtype + ): + raise ValueError(msg) + elif not isinstance(lk.dtype, DatetimeTZDtype) and isinstance( + rk.dtype, DatetimeTZDtype + ): + raise ValueError(msg) + elif ( + isinstance(lk.dtype, DatetimeTZDtype) + and isinstance(rk.dtype, DatetimeTZDtype) + ) or (lk.dtype.kind == "M" and rk.dtype.kind == "M"): + # allows datetime with different resolutions + continue + + elif lk_is_object and rk_is_object: + continue + + # Houston, we have a problem! + # let's coerce to object if the dtypes aren't + # categorical, otherwise coerce to the category + # dtype. If we coerced categories to object, + # then we would lose type information on some + # columns, and end up trying to merge + # incompatible dtypes. See GH 16900. + if name in self.left.columns: + typ = cast(Categorical, lk).categories.dtype if lk_is_cat else object + self.left = self.left.copy() + self.left[name] = self.left[name].astype(typ) + if name in self.right.columns: + typ = cast(Categorical, rk).categories.dtype if rk_is_cat else object + self.right = self.right.copy() + self.right[name] = self.right[name].astype(typ) + + def _validate_left_right_on(self, left_on, right_on): + left_on = com.maybe_make_list(left_on) + right_on = com.maybe_make_list(right_on) + + # Hm, any way to make this logic less complicated?? + if self.on is None and left_on is None and right_on is None: + if self.left_index and self.right_index: + left_on, right_on = (), () + elif self.left_index: + raise MergeError("Must pass right_on or right_index=True") + elif self.right_index: + raise MergeError("Must pass left_on or left_index=True") + else: + # use the common columns + left_cols = self.left.columns + right_cols = self.right.columns + common_cols = left_cols.intersection(right_cols) + if len(common_cols) == 0: + raise MergeError( + "No common columns to perform merge on. " + f"Merge options: left_on={left_on}, " + f"right_on={right_on}, " + f"left_index={self.left_index}, " + f"right_index={self.right_index}" + ) + if ( + not left_cols.join(common_cols, how="inner").is_unique + or not right_cols.join(common_cols, how="inner").is_unique + ): + raise MergeError(f"Data columns not unique: {repr(common_cols)}") + left_on = right_on = common_cols + elif self.on is not None: + if left_on is not None or right_on is not None: + raise MergeError( + 'Can only pass argument "on" OR "left_on" ' + 'and "right_on", not a combination of both.' + ) + if self.left_index or self.right_index: + raise MergeError( + 'Can only pass argument "on" OR "left_index" ' + 'and "right_index", not a combination of both.' + ) + left_on = right_on = self.on + elif left_on is not None: + if self.left_index: + raise MergeError( + 'Can only pass argument "left_on" OR "left_index" not both.' + ) + if not self.right_index and right_on is None: + raise MergeError('Must pass "right_on" OR "right_index".') + n = len(left_on) + if self.right_index: + if len(left_on) != self.right.index.nlevels: + raise ValueError( + "len(left_on) must equal the number " + 'of levels in the index of "right"' + ) + right_on = [None] * n + elif right_on is not None: + if self.right_index: + raise MergeError( + 'Can only pass argument "right_on" OR "right_index" not both.' + ) + if not self.left_index and left_on is None: + raise MergeError('Must pass "left_on" OR "left_index".') + n = len(right_on) + if self.left_index: + if len(right_on) != self.left.index.nlevels: + raise ValueError( + "len(right_on) must equal the number " + 'of levels in the index of "left"' + ) + left_on = [None] * n + if len(right_on) != len(left_on): + raise ValueError("len(right_on) must equal len(left_on)") + + return left_on, right_on + + @final + def _validate_validate_kwd(self, validate: str) -> None: + # Check uniqueness of each + if self.left_index: + left_unique = self.orig_left.index.is_unique + else: + left_unique = MultiIndex.from_arrays(self.left_join_keys).is_unique + + if self.right_index: + right_unique = self.orig_right.index.is_unique + else: + right_unique = MultiIndex.from_arrays(self.right_join_keys).is_unique + + # Check data integrity + if validate in ["one_to_one", "1:1"]: + if not left_unique and not right_unique: + raise MergeError( + "Merge keys are not unique in either left " + "or right dataset; not a one-to-one merge" + ) + if not left_unique: + raise MergeError( + "Merge keys are not unique in left dataset; not a one-to-one merge" + ) + if not right_unique: + raise MergeError( + "Merge keys are not unique in right dataset; not a one-to-one merge" + ) + + elif validate in ["one_to_many", "1:m"]: + if not left_unique: + raise MergeError( + "Merge keys are not unique in left dataset; not a one-to-many merge" + ) + + elif validate in ["many_to_one", "m:1"]: + if not right_unique: + raise MergeError( + "Merge keys are not unique in right dataset; " + "not a many-to-one merge" + ) + + elif validate in ["many_to_many", "m:m"]: + pass + + else: + raise ValueError( + f'"{validate}" is not a valid argument. ' + "Valid arguments are:\n" + '- "1:1"\n' + '- "1:m"\n' + '- "m:1"\n' + '- "m:m"\n' + '- "one_to_one"\n' + '- "one_to_many"\n' + '- "many_to_one"\n' + '- "many_to_many"' + ) + + +def get_join_indexers( + left_keys: list[ArrayLike], + right_keys: list[ArrayLike], + sort: bool = False, + how: MergeHow | Literal["asof"] = "inner", +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """ + + Parameters + ---------- + left_keys : list[ndarray, ExtensionArray, Index, Series] + right_keys : list[ndarray, ExtensionArray, Index, Series] + sort : bool, default False + how : {'inner', 'outer', 'left', 'right'}, default 'inner' + + Returns + ------- + np.ndarray[np.intp] + Indexer into the left_keys. + np.ndarray[np.intp] + Indexer into the right_keys. + """ + assert len(left_keys) == len( + right_keys + ), "left_keys and right_keys must be the same length" + + # fast-path for empty left/right + left_n = len(left_keys[0]) + right_n = len(right_keys[0]) + if left_n == 0: + if how in ["left", "inner", "cross"]: + return _get_empty_indexer() + elif not sort and how in ["right", "outer"]: + return _get_no_sort_one_missing_indexer(right_n, True) + elif right_n == 0: + if how in ["right", "inner", "cross"]: + return _get_empty_indexer() + elif not sort and how in ["left", "outer"]: + return _get_no_sort_one_missing_indexer(left_n, False) + + # get left & right join labels and num. of levels at each location + mapped = ( + _factorize_keys(left_keys[n], right_keys[n], sort=sort, how=how) + for n in range(len(left_keys)) + ) + zipped = zip(*mapped) + llab, rlab, shape = (list(x) for x in zipped) + + # get flat i8 keys from label lists + lkey, rkey = _get_join_keys(llab, rlab, tuple(shape), sort) + + # factorize keys to a dense i8 space + # `count` is the num. of unique keys + # set(lkey) | set(rkey) == range(count) + + lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort, how=how) + # preserve left frame order if how == 'left' and sort == False + kwargs = {} + if how in ("left", "right"): + kwargs["sort"] = sort + join_func = { + "inner": libjoin.inner_join, + "left": libjoin.left_outer_join, + "right": lambda x, y, count, **kwargs: libjoin.left_outer_join( + y, x, count, **kwargs + )[::-1], + "outer": libjoin.full_outer_join, + }[how] + + # error: Cannot call function of unknown type + return join_func(lkey, rkey, count, **kwargs) # type: ignore[operator] + + +def restore_dropped_levels_multijoin( + left: MultiIndex, + right: MultiIndex, + dropped_level_names, + join_index: Index, + lindexer: npt.NDArray[np.intp], + rindexer: npt.NDArray[np.intp], +) -> tuple[list[Index], npt.NDArray[np.intp], list[Hashable]]: + """ + *this is an internal non-public method* + + Returns the levels, labels and names of a multi-index to multi-index join. + Depending on the type of join, this method restores the appropriate + dropped levels of the joined multi-index. + The method relies on lindexer, rindexer which hold the index positions of + left and right, where a join was feasible + + Parameters + ---------- + left : MultiIndex + left index + right : MultiIndex + right index + dropped_level_names : str array + list of non-common level names + join_index : Index + the index of the join between the + common levels of left and right + lindexer : np.ndarray[np.intp] + left indexer + rindexer : np.ndarray[np.intp] + right indexer + + Returns + ------- + levels : list of Index + levels of combined multiindexes + labels : np.ndarray[np.intp] + labels of combined multiindexes + names : List[Hashable] + names of combined multiindex levels + + """ + + def _convert_to_multiindex(index: Index) -> MultiIndex: + if isinstance(index, MultiIndex): + return index + else: + return MultiIndex.from_arrays([index._values], names=[index.name]) + + # For multi-multi joins with one overlapping level, + # the returned index if of type Index + # Assure that join_index is of type MultiIndex + # so that dropped levels can be appended + join_index = _convert_to_multiindex(join_index) + + join_levels = join_index.levels + join_codes = join_index.codes + join_names = join_index.names + + # Iterate through the levels that must be restored + for dropped_level_name in dropped_level_names: + if dropped_level_name in left.names: + idx = left + indexer = lindexer + else: + idx = right + indexer = rindexer + + # The index of the level name to be restored + name_idx = idx.names.index(dropped_level_name) + + restore_levels = idx.levels[name_idx] + # Inject -1 in the codes list where a join was not possible + # IOW indexer[i]=-1 + codes = idx.codes[name_idx] + if indexer is None: + restore_codes = codes + else: + restore_codes = algos.take_nd(codes, indexer, fill_value=-1) + + # error: Cannot determine type of "__add__" + join_levels = join_levels + [restore_levels] # type: ignore[has-type] + join_codes = join_codes + [restore_codes] + join_names = join_names + [dropped_level_name] + + return join_levels, join_codes, join_names + + +class _OrderedMerge(_MergeOperation): + _merge_type = "ordered_merge" + + def __init__( + self, + left: DataFrame | Series, + right: DataFrame | Series, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + suffixes: Suffixes = ("_x", "_y"), + fill_method: str | None = None, + how: JoinHow | Literal["asof"] = "outer", + ) -> None: + self.fill_method = fill_method + _MergeOperation.__init__( + self, + left, + right, + on=on, + left_on=left_on, + left_index=left_index, + right_index=right_index, + right_on=right_on, + how=how, + suffixes=suffixes, + sort=True, # factorize sorts + ) + + def get_result(self, copy: bool | None = True) -> DataFrame: + join_index, left_indexer, right_indexer = self._get_join_info() + + left_join_indexer: npt.NDArray[np.intp] | None + right_join_indexer: npt.NDArray[np.intp] | None + + if self.fill_method == "ffill": + if left_indexer is None: + raise TypeError("left_indexer cannot be None") + left_indexer = cast("npt.NDArray[np.intp]", left_indexer) + right_indexer = cast("npt.NDArray[np.intp]", right_indexer) + left_join_indexer = libjoin.ffill_indexer(left_indexer) + right_join_indexer = libjoin.ffill_indexer(right_indexer) + else: + left_join_indexer = left_indexer + right_join_indexer = right_indexer + + result = self._reindex_and_concat( + join_index, left_join_indexer, right_join_indexer, copy=copy + ) + self._maybe_add_join_keys(result, left_indexer, right_indexer) + + return result + + +def _asof_by_function(direction: str): + name = f"asof_join_{direction}_on_X_by_Y" + return getattr(libjoin, name, None) + + +class _AsOfMerge(_OrderedMerge): + _merge_type = "asof_merge" + + def __init__( + self, + left: DataFrame | Series, + right: DataFrame | Series, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + by=None, + left_by=None, + right_by=None, + suffixes: Suffixes = ("_x", "_y"), + how: Literal["asof"] = "asof", + tolerance=None, + allow_exact_matches: bool = True, + direction: str = "backward", + ) -> None: + self.by = by + self.left_by = left_by + self.right_by = right_by + self.tolerance = tolerance + self.allow_exact_matches = allow_exact_matches + self.direction = direction + + # check 'direction' is valid + if self.direction not in ["backward", "forward", "nearest"]: + raise MergeError(f"direction invalid: {self.direction}") + + # validate allow_exact_matches + if not is_bool(self.allow_exact_matches): + msg = ( + "allow_exact_matches must be boolean, " + f"passed {self.allow_exact_matches}" + ) + raise MergeError(msg) + + _OrderedMerge.__init__( + self, + left, + right, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + how=how, + suffixes=suffixes, + fill_method=None, + ) + + def _validate_left_right_on(self, left_on, right_on): + left_on, right_on = super()._validate_left_right_on(left_on, right_on) + + # we only allow on to be a single item for on + if len(left_on) != 1 and not self.left_index: + raise MergeError("can only asof on a key for left") + + if len(right_on) != 1 and not self.right_index: + raise MergeError("can only asof on a key for right") + + if self.left_index and isinstance(self.left.index, MultiIndex): + raise MergeError("left can only have one index") + + if self.right_index and isinstance(self.right.index, MultiIndex): + raise MergeError("right can only have one index") + + # set 'by' columns + if self.by is not None: + if self.left_by is not None or self.right_by is not None: + raise MergeError("Can only pass by OR left_by and right_by") + self.left_by = self.right_by = self.by + if self.left_by is None and self.right_by is not None: + raise MergeError("missing left_by") + if self.left_by is not None and self.right_by is None: + raise MergeError("missing right_by") + + # GH#29130 Check that merge keys do not have dtype object + if not self.left_index: + left_on_0 = left_on[0] + if isinstance(left_on_0, _known): + lo_dtype = left_on_0.dtype + else: + lo_dtype = ( + self.left._get_label_or_level_values(left_on_0).dtype + if left_on_0 in self.left.columns + else self.left.index.get_level_values(left_on_0) + ) + else: + lo_dtype = self.left.index.dtype + + if not self.right_index: + right_on_0 = right_on[0] + if isinstance(right_on_0, _known): + ro_dtype = right_on_0.dtype + else: + ro_dtype = ( + self.right._get_label_or_level_values(right_on_0).dtype + if right_on_0 in self.right.columns + else self.right.index.get_level_values(right_on_0) + ) + else: + ro_dtype = self.right.index.dtype + + if is_object_dtype(lo_dtype) or is_object_dtype(ro_dtype): + raise MergeError( + f"Incompatible merge dtype, {repr(ro_dtype)} and " + f"{repr(lo_dtype)}, both sides must have numeric dtype" + ) + + # add 'by' to our key-list so we can have it in the + # output as a key + if self.left_by is not None: + if not is_list_like(self.left_by): + self.left_by = [self.left_by] + if not is_list_like(self.right_by): + self.right_by = [self.right_by] + + if len(self.left_by) != len(self.right_by): + raise MergeError("left_by and right_by must be the same length") + + left_on = self.left_by + list(left_on) + right_on = self.right_by + list(right_on) + + return left_on, right_on + + def _maybe_require_matching_dtypes( + self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike] + ) -> None: + # TODO: why do we do this for AsOfMerge but not the others? + + def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int): + if left.dtype != right.dtype: + if isinstance(left.dtype, CategoricalDtype) and isinstance( + right.dtype, CategoricalDtype + ): + # The generic error message is confusing for categoricals. + # + # In this function, the join keys include both the original + # ones of the merge_asof() call, and also the keys passed + # to its by= argument. Unordered but equal categories + # are not supported for the former, but will fail + # later with a ValueError, so we don't *need* to check + # for them here. + msg = ( + f"incompatible merge keys [{i}] {repr(left.dtype)} and " + f"{repr(right.dtype)}, both sides category, but not equal ones" + ) + else: + msg = ( + f"incompatible merge keys [{i}] {repr(left.dtype)} and " + f"{repr(right.dtype)}, must be the same type" + ) + raise MergeError(msg) + + # validate index types are the same + for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): + _check_dtype_match(lk, rk, i) + + if self.left_index: + lt = self.left.index._values + else: + lt = left_join_keys[-1] + + if self.right_index: + rt = self.right.index._values + else: + rt = right_join_keys[-1] + + _check_dtype_match(lt, rt, 0) + + def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None: + # validate tolerance; datetime.timedelta or Timedelta if we have a DTI + if self.tolerance is not None: + if self.left_index: + lt = self.left.index._values + else: + lt = left_join_keys[-1] + + msg = ( + f"incompatible tolerance {self.tolerance}, must be compat " + f"with type {repr(lt.dtype)}" + ) + + if needs_i8_conversion(lt.dtype): + if not isinstance(self.tolerance, datetime.timedelta): + raise MergeError(msg) + if self.tolerance < Timedelta(0): + raise MergeError("tolerance must be positive") + + elif is_integer_dtype(lt.dtype): + if not is_integer(self.tolerance): + raise MergeError(msg) + if self.tolerance < 0: + raise MergeError("tolerance must be positive") + + elif is_float_dtype(lt.dtype): + if not is_number(self.tolerance): + raise MergeError(msg) + # error: Unsupported operand types for > ("int" and "Number") + if self.tolerance < 0: # type: ignore[operator] + raise MergeError("tolerance must be positive") + + else: + raise MergeError("key must be integer, timestamp or float") + + def _convert_values_for_libjoin( + self, values: AnyArrayLike, side: str + ) -> np.ndarray: + # we require sortedness and non-null values in the join keys + if not Index(values).is_monotonic_increasing: + if isna(values).any(): + raise ValueError(f"Merge keys contain null values on {side} side") + raise ValueError(f"{side} keys must be sorted") + + if isinstance(values, ArrowExtensionArray): + values = values._maybe_convert_datelike_array() + + if needs_i8_conversion(values.dtype): + values = values.view("i8") + + elif isinstance(values, BaseMaskedArray): + # we've verified above that no nulls exist + values = values._data + elif isinstance(values, ExtensionArray): + values = values.to_numpy() + + # error: Incompatible return value type (got "Union[ExtensionArray, + # Any, ndarray[Any, Any], ndarray[Any, dtype[Any]], Index, Series]", + # expected "ndarray[Any, Any]") + return values # type: ignore[return-value] + + def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """return the join indexers""" + + def flip(xs: list[ArrayLike]) -> np.ndarray: + """unlike np.transpose, this returns an array of tuples""" + + def injection(obj: ArrayLike): + if not isinstance(obj.dtype, ExtensionDtype): + # ndarray + return obj + obj = extract_array(obj) + if isinstance(obj, NDArrayBackedExtensionArray): + # fastpath for e.g. dt64tz, categorical + return obj._ndarray + # FIXME: returning obj._values_for_argsort() here doesn't + # break in any existing test cases, but i (@jbrockmendel) + # am pretty sure it should! + # e.g. + # arr = pd.array([0, pd.NA, 255], dtype="UInt8") + # will have values_for_argsort (before GH#45434) + # np.array([0, 255, 255], dtype=np.uint8) + # and the non-injectivity should make a difference somehow + # shouldn't it? + return np.asarray(obj) + + xs = [injection(x) for x in xs] + labels = list(string.ascii_lowercase[: len(xs)]) + dtypes = [x.dtype for x in xs] + labeled_dtypes = list(zip(labels, dtypes)) + return np.array(list(zip(*xs)), labeled_dtypes) + + # values to compare + left_values = ( + self.left.index._values if self.left_index else self.left_join_keys[-1] + ) + right_values = ( + self.right.index._values if self.right_index else self.right_join_keys[-1] + ) + + # _maybe_require_matching_dtypes already checked for dtype matching + assert left_values.dtype == right_values.dtype + + tolerance = self.tolerance + if tolerance is not None: + # TODO: can we reuse a tolerance-conversion function from + # e.g. TimedeltaIndex? + if needs_i8_conversion(left_values.dtype): + tolerance = Timedelta(tolerance) + # TODO: we have no test cases with PeriodDtype here; probably + # need to adjust tolerance for that case. + if left_values.dtype.kind in "mM": + # Make sure the i8 representation for tolerance + # matches that for left_values/right_values. + lvs = ensure_wrapped_if_datetimelike(left_values) + tolerance = tolerance.as_unit(lvs.unit) + + tolerance = tolerance._value + + # initial type conversion as needed + left_values = self._convert_values_for_libjoin(left_values, "left") + right_values = self._convert_values_for_libjoin(right_values, "right") + + # a "by" parameter requires special handling + if self.left_by is not None: + # remove 'on' parameter from values if one existed + if self.left_index and self.right_index: + left_by_values = self.left_join_keys + right_by_values = self.right_join_keys + else: + left_by_values = self.left_join_keys[0:-1] + right_by_values = self.right_join_keys[0:-1] + + # get tuple representation of values if more than one + if len(left_by_values) == 1: + lbv = left_by_values[0] + rbv = right_by_values[0] + + # TODO: conversions for EAs that can be no-copy. + lbv = np.asarray(lbv) + rbv = np.asarray(rbv) + if needs_i8_conversion(lbv.dtype): + lbv = lbv.view("i8") + if needs_i8_conversion(rbv.dtype): + rbv = rbv.view("i8") + else: + # We get here with non-ndarrays in test_merge_by_col_tz_aware + # and test_merge_groupby_multiple_column_with_categorical_column + lbv = flip(left_by_values) + rbv = flip(right_by_values) + lbv = ensure_object(lbv) + rbv = ensure_object(rbv) + + # error: Incompatible types in assignment (expression has type + # "Union[ndarray[Any, dtype[Any]], ndarray[Any, dtype[object_]]]", + # variable has type "List[Union[Union[ExtensionArray, + # ndarray[Any, Any]], Index, Series]]") + right_by_values = rbv # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type + # "Union[ndarray[Any, dtype[Any]], ndarray[Any, dtype[object_]]]", + # variable has type "List[Union[Union[ExtensionArray, + # ndarray[Any, Any]], Index, Series]]") + left_by_values = lbv # type: ignore[assignment] + + # choose appropriate function by type + func = _asof_by_function(self.direction) + return func( + left_values, + right_values, + left_by_values, + right_by_values, + self.allow_exact_matches, + tolerance, + ) + else: + # choose appropriate function by type + func = _asof_by_function(self.direction) + return func( + left_values, + right_values, + None, + None, + self.allow_exact_matches, + tolerance, + False, + ) + + +def _get_multiindex_indexer( + join_keys: list[ArrayLike], index: MultiIndex, sort: bool +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + # left & right join labels and num. of levels at each location + mapped = ( + _factorize_keys(index.levels[n]._values, join_keys[n], sort=sort) + for n in range(index.nlevels) + ) + zipped = zip(*mapped) + rcodes, lcodes, shape = (list(x) for x in zipped) + if sort: + rcodes = list(map(np.take, rcodes, index.codes)) + else: + i8copy = lambda a: a.astype("i8", subok=False, copy=True) + rcodes = list(map(i8copy, index.codes)) + + # fix right labels if there were any nulls + for i, join_key in enumerate(join_keys): + mask = index.codes[i] == -1 + if mask.any(): + # check if there already was any nulls at this location + # if there was, it is factorized to `shape[i] - 1` + a = join_key[lcodes[i] == shape[i] - 1] + if a.size == 0 or not a[0] != a[0]: + shape[i] += 1 + + rcodes[i][mask] = shape[i] - 1 + + # get flat i8 join keys + lkey, rkey = _get_join_keys(lcodes, rcodes, tuple(shape), sort) + return lkey, rkey + + +def _get_empty_indexer() -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """Return empty join indexers.""" + return ( + np.array([], dtype=np.intp), + np.array([], dtype=np.intp), + ) + + +def _get_no_sort_one_missing_indexer( + n: int, left_missing: bool +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """ + Return join indexers where all of one side is selected without sorting + and none of the other side is selected. + + Parameters + ---------- + n : int + Length of indexers to create. + left_missing : bool + If True, the left indexer will contain only -1's. + If False, the right indexer will contain only -1's. + + Returns + ------- + np.ndarray[np.intp] + Left indexer + np.ndarray[np.intp] + Right indexer + """ + idx = np.arange(n, dtype=np.intp) + idx_missing = np.full(shape=n, fill_value=-1, dtype=np.intp) + if left_missing: + return idx_missing, idx + return idx, idx_missing + + +def _left_join_on_index( + left_ax: Index, right_ax: Index, join_keys: list[ArrayLike], sort: bool = False +) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp]]: + if isinstance(right_ax, MultiIndex): + lkey, rkey = _get_multiindex_indexer(join_keys, right_ax, sort=sort) + else: + # error: Incompatible types in assignment (expression has type + # "Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]", + # variable has type "ndarray[Any, dtype[signedinteger[Any]]]") + lkey = join_keys[0] # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "Index", + # variable has type "ndarray[Any, dtype[signedinteger[Any]]]") + rkey = right_ax._values # type: ignore[assignment] + + left_key, right_key, count = _factorize_keys(lkey, rkey, sort=sort) + left_indexer, right_indexer = libjoin.left_outer_join( + left_key, right_key, count, sort=sort + ) + + if sort or len(left_ax) != len(left_indexer): + # if asked to sort or there are 1-to-many matches + join_index = left_ax.take(left_indexer) + return join_index, left_indexer, right_indexer + + # left frame preserves order & length of its index + return left_ax, None, right_indexer + + +def _factorize_keys( + lk: ArrayLike, + rk: ArrayLike, + sort: bool = True, + how: MergeHow | Literal["asof"] = "inner", +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + """ + Encode left and right keys as enumerated types. + + This is used to get the join indexers to be used when merging DataFrames. + + Parameters + ---------- + lk : ndarray, ExtensionArray + Left key. + rk : ndarray, ExtensionArray + Right key. + sort : bool, defaults to True + If True, the encoding is done such that the unique elements in the + keys are sorted. + how : {'left', 'right', 'outer', 'inner'}, default 'inner' + Type of merge. + + Returns + ------- + np.ndarray[np.intp] + Left (resp. right if called with `key='right'`) labels, as enumerated type. + np.ndarray[np.intp] + Right (resp. left if called with `key='right'`) labels, as enumerated type. + int + Number of unique elements in union of left and right labels. + + See Also + -------- + merge : Merge DataFrame or named Series objects + with a database-style join. + algorithms.factorize : Encode the object as an enumerated type + or categorical variable. + + Examples + -------- + >>> lk = np.array(["a", "c", "b"]) + >>> rk = np.array(["a", "c"]) + + Here, the unique values are `'a', 'b', 'c'`. With the default + `sort=True`, the encoding will be `{0: 'a', 1: 'b', 2: 'c'}`: + + >>> pd.core.reshape.merge._factorize_keys(lk, rk) + (array([0, 2, 1]), array([0, 2]), 3) + + With the `sort=False`, the encoding will correspond to the order + in which the unique elements first appear: `{0: 'a', 1: 'c', 2: 'b'}`: + + >>> pd.core.reshape.merge._factorize_keys(lk, rk, sort=False) + (array([0, 1, 2]), array([0, 1]), 3) + """ + # TODO: if either is a RangeIndex, we can likely factorize more efficiently? + + if ( + isinstance(lk.dtype, DatetimeTZDtype) and isinstance(rk.dtype, DatetimeTZDtype) + ) or (lib.is_np_dtype(lk.dtype, "M") and lib.is_np_dtype(rk.dtype, "M")): + # Extract the ndarray (UTC-localized) values + # Note: we dont need the dtypes to match, as these can still be compared + lk, rk = cast("DatetimeArray", lk)._ensure_matching_resos(rk) + lk = cast("DatetimeArray", lk)._ndarray + rk = cast("DatetimeArray", rk)._ndarray + + elif ( + isinstance(lk.dtype, CategoricalDtype) + and isinstance(rk.dtype, CategoricalDtype) + and lk.dtype == rk.dtype + ): + assert isinstance(lk, Categorical) + assert isinstance(rk, Categorical) + # Cast rk to encoding so we can compare codes with lk + + rk = lk._encode_with_my_categories(rk) + + lk = ensure_int64(lk.codes) + rk = ensure_int64(rk.codes) + + elif isinstance(lk, ExtensionArray) and lk.dtype == rk.dtype: + if (isinstance(lk.dtype, ArrowDtype) and is_string_dtype(lk.dtype)) or ( + isinstance(lk.dtype, StringDtype) + and lk.dtype.storage in ["pyarrow", "pyarrow_numpy"] + ): + import pyarrow as pa + import pyarrow.compute as pc + + len_lk = len(lk) + lk = lk._pa_array # type: ignore[attr-defined] + rk = rk._pa_array # type: ignore[union-attr] + dc = ( + pa.chunked_array(lk.chunks + rk.chunks) # type: ignore[union-attr] + .combine_chunks() + .dictionary_encode() + ) + length = len(dc.dictionary) + + llab, rlab, count = ( + pc.fill_null(dc.indices[slice(len_lk)], length) + .to_numpy() + .astype(np.intp, copy=False), + pc.fill_null(dc.indices[slice(len_lk, None)], length) + .to_numpy() + .astype(np.intp, copy=False), + len(dc.dictionary), + ) + if dc.null_count > 0: + count += 1 + if how == "right": + return rlab, llab, count + return llab, rlab, count + + if not isinstance(lk, BaseMaskedArray) and not ( + # exclude arrow dtypes that would get cast to object + isinstance(lk.dtype, ArrowDtype) + and ( + is_numeric_dtype(lk.dtype.numpy_dtype) + or is_string_dtype(lk.dtype) + and not sort + ) + ): + lk, _ = lk._values_for_factorize() + + # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute + # "_values_for_factorize" + rk, _ = rk._values_for_factorize() # type: ignore[union-attr] + + if needs_i8_conversion(lk.dtype) and lk.dtype == rk.dtype: + # GH#23917 TODO: Needs tests for non-matching dtypes + # GH#23917 TODO: needs tests for case where lk is integer-dtype + # and rk is datetime-dtype + lk = np.asarray(lk, dtype=np.int64) + rk = np.asarray(rk, dtype=np.int64) + + klass, lk, rk = _convert_arrays_and_get_rizer_klass(lk, rk) + + rizer = klass(max(len(lk), len(rk))) + + if isinstance(lk, BaseMaskedArray): + assert isinstance(rk, BaseMaskedArray) + llab = rizer.factorize(lk._data, mask=lk._mask) + rlab = rizer.factorize(rk._data, mask=rk._mask) + elif isinstance(lk, ArrowExtensionArray): + assert isinstance(rk, ArrowExtensionArray) + # we can only get here with numeric dtypes + # TODO: Remove when we have a Factorizer for Arrow + llab = rizer.factorize( + lk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=lk.isna() + ) + rlab = rizer.factorize( + rk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=rk.isna() + ) + else: + # Argument 1 to "factorize" of "ObjectFactorizer" has incompatible type + # "Union[ndarray[Any, dtype[signedinteger[_64Bit]]], + # ndarray[Any, dtype[object_]]]"; expected "ndarray[Any, dtype[object_]]" + llab = rizer.factorize(lk) # type: ignore[arg-type] + rlab = rizer.factorize(rk) # type: ignore[arg-type] + assert llab.dtype == np.dtype(np.intp), llab.dtype + assert rlab.dtype == np.dtype(np.intp), rlab.dtype + + count = rizer.get_count() + + if sort: + uniques = rizer.uniques.to_array() + llab, rlab = _sort_labels(uniques, llab, rlab) + + # NA group + lmask = llab == -1 + lany = lmask.any() + rmask = rlab == -1 + rany = rmask.any() + + if lany or rany: + if lany: + np.putmask(llab, lmask, count) + if rany: + np.putmask(rlab, rmask, count) + count += 1 + + if how == "right": + return rlab, llab, count + return llab, rlab, count + + +def _convert_arrays_and_get_rizer_klass( + lk: ArrayLike, rk: ArrayLike +) -> tuple[type[libhashtable.Factorizer], ArrayLike, ArrayLike]: + klass: type[libhashtable.Factorizer] + if is_numeric_dtype(lk.dtype): + if lk.dtype != rk.dtype: + dtype = find_common_type([lk.dtype, rk.dtype]) + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + if not isinstance(lk, ExtensionArray): + lk = cls._from_sequence(lk, dtype=dtype, copy=False) + else: + lk = lk.astype(dtype) + + if not isinstance(rk, ExtensionArray): + rk = cls._from_sequence(rk, dtype=dtype, copy=False) + else: + rk = rk.astype(dtype) + else: + lk = lk.astype(dtype) + rk = rk.astype(dtype) + if isinstance(lk, BaseMaskedArray): + # Invalid index type "type" for "Dict[Type[object], Type[Factorizer]]"; + # expected type "Type[object]" + klass = _factorizers[lk.dtype.type] # type: ignore[index] + elif isinstance(lk.dtype, ArrowDtype): + klass = _factorizers[lk.dtype.numpy_dtype.type] + else: + klass = _factorizers[lk.dtype.type] + + else: + klass = libhashtable.ObjectFactorizer + lk = ensure_object(lk) + rk = ensure_object(rk) + return klass, lk, rk + + +def _sort_labels( + uniques: np.ndarray, left: npt.NDArray[np.intp], right: npt.NDArray[np.intp] +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + llength = len(left) + labels = np.concatenate([left, right]) + + _, new_labels = algos.safe_sort(uniques, labels, use_na_sentinel=True) + new_left, new_right = new_labels[:llength], new_labels[llength:] + + return new_left, new_right + + +def _get_join_keys( + llab: list[npt.NDArray[np.int64 | np.intp]], + rlab: list[npt.NDArray[np.int64 | np.intp]], + shape: Shape, + sort: bool, +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: + # how many levels can be done without overflow + nlev = next( + lev + for lev in range(len(shape), 0, -1) + if not is_int64_overflow_possible(shape[:lev]) + ) + + # get keys for the first `nlev` levels + stride = np.prod(shape[1:nlev], dtype="i8") + lkey = stride * llab[0].astype("i8", subok=False, copy=False) + rkey = stride * rlab[0].astype("i8", subok=False, copy=False) + + for i in range(1, nlev): + with np.errstate(divide="ignore"): + stride //= shape[i] + lkey += llab[i] * stride + rkey += rlab[i] * stride + + if nlev == len(shape): # all done! + return lkey, rkey + + # densify current keys to avoid overflow + lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort) + + llab = [lkey] + llab[nlev:] + rlab = [rkey] + rlab[nlev:] + shape = (count,) + shape[nlev:] + + return _get_join_keys(llab, rlab, shape, sort) + + +def _should_fill(lname, rname) -> bool: + if not isinstance(lname, str) or not isinstance(rname, str): + return True + return lname == rname + + +def _any(x) -> bool: + return x is not None and com.any_not_none(*x) + + +def _validate_operand(obj: DataFrame | Series) -> DataFrame: + if isinstance(obj, ABCDataFrame): + return obj + elif isinstance(obj, ABCSeries): + if obj.name is None: + raise ValueError("Cannot merge a Series without a name") + return obj.to_frame() + else: + raise TypeError( + f"Can only merge Series or DataFrame objects, a {type(obj)} was passed" + ) + + +def _items_overlap_with_suffix( + left: Index, right: Index, suffixes: Suffixes +) -> tuple[Index, Index]: + """ + Suffixes type validation. + + If two indices overlap, add suffixes to overlapping entries. + + If corresponding suffix is empty, the entry is simply converted to string. + + """ + if not is_list_like(suffixes, allow_sets=False) or isinstance(suffixes, dict): + raise TypeError( + f"Passing 'suffixes' as a {type(suffixes)}, is not supported. " + "Provide 'suffixes' as a tuple instead." + ) + + to_rename = left.intersection(right) + if len(to_rename) == 0: + return left, right + + lsuffix, rsuffix = suffixes + + if not lsuffix and not rsuffix: + raise ValueError(f"columns overlap but no suffix specified: {to_rename}") + + def renamer(x, suffix: str | None): + """ + Rename the left and right indices. + + If there is overlap, and suffix is not None, add + suffix, otherwise, leave it as-is. + + Parameters + ---------- + x : original column name + suffix : str or None + + Returns + ------- + x : renamed column name + """ + if x in to_rename and suffix is not None: + return f"{x}{suffix}" + return x + + lrenamer = partial(renamer, suffix=lsuffix) + rrenamer = partial(renamer, suffix=rsuffix) + + llabels = left._transform_index(lrenamer) + rlabels = right._transform_index(rrenamer) + + dups = [] + if not llabels.is_unique: + # Only warn when duplicates are caused because of suffixes, already duplicated + # columns in origin should not warn + dups = llabels[(llabels.duplicated()) & (~left.duplicated())].tolist() + if not rlabels.is_unique: + dups.extend(rlabels[(rlabels.duplicated()) & (~right.duplicated())].tolist()) + if dups: + raise MergeError( + f"Passing 'suffixes' which cause duplicate columns {set(dups)} is " + f"not allowed.", + ) + + return llabels, rlabels diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/pivot.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/pivot.py new file mode 100644 index 00000000..71e3ea5b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/pivot.py @@ -0,0 +1,881 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Callable, + cast, +) + +import numpy as np + +from pandas._libs import lib +from pandas.util._decorators import ( + Appender, + Substitution, +) + +from pandas.core.dtypes.cast import maybe_downcast_to_dtype +from pandas.core.dtypes.common import ( + is_list_like, + is_nested_list_like, + is_scalar, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +import pandas.core.common as com +from pandas.core.frame import _shared_docs +from pandas.core.groupby import Grouper +from pandas.core.indexes.api import ( + Index, + MultiIndex, + get_objs_combined_axis, +) +from pandas.core.reshape.concat import concat +from pandas.core.reshape.util import cartesian_product +from pandas.core.series import Series + +if TYPE_CHECKING: + from pandas._typing import ( + AggFuncType, + AggFuncTypeBase, + AggFuncTypeDict, + IndexLabel, + ) + + from pandas import DataFrame + + +# Note: We need to make sure `frame` is imported before `pivot`, otherwise +# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency +@Substitution("\ndata : DataFrame") +@Appender(_shared_docs["pivot_table"], indents=1) +def pivot_table( + data: DataFrame, + values=None, + index=None, + columns=None, + aggfunc: AggFuncType = "mean", + fill_value=None, + margins: bool = False, + dropna: bool = True, + margins_name: Hashable = "All", + observed: bool = False, + sort: bool = True, +) -> DataFrame: + index = _convert_by(index) + columns = _convert_by(columns) + + if isinstance(aggfunc, list): + pieces: list[DataFrame] = [] + keys = [] + for func in aggfunc: + _table = __internal_pivot_table( + data, + values=values, + index=index, + columns=columns, + fill_value=fill_value, + aggfunc=func, + margins=margins, + dropna=dropna, + margins_name=margins_name, + observed=observed, + sort=sort, + ) + pieces.append(_table) + keys.append(getattr(func, "__name__", func)) + + table = concat(pieces, keys=keys, axis=1) + return table.__finalize__(data, method="pivot_table") + + table = __internal_pivot_table( + data, + values, + index, + columns, + aggfunc, + fill_value, + margins, + dropna, + margins_name, + observed, + sort, + ) + return table.__finalize__(data, method="pivot_table") + + +def __internal_pivot_table( + data: DataFrame, + values, + index, + columns, + aggfunc: AggFuncTypeBase | AggFuncTypeDict, + fill_value, + margins: bool, + dropna: bool, + margins_name: Hashable, + observed: bool, + sort: bool, +) -> DataFrame: + """ + Helper of :func:`pandas.pivot_table` for any non-list ``aggfunc``. + """ + keys = index + columns + + values_passed = values is not None + if values_passed: + if is_list_like(values): + values_multi = True + values = list(values) + else: + values_multi = False + values = [values] + + # GH14938 Make sure value labels are in data + for i in values: + if i not in data: + raise KeyError(i) + + to_filter = [] + for x in keys + values: + if isinstance(x, Grouper): + x = x.key + try: + if x in data: + to_filter.append(x) + except TypeError: + pass + if len(to_filter) < len(data.columns): + data = data[to_filter] + + else: + values = data.columns + for key in keys: + try: + values = values.drop(key) + except (TypeError, ValueError, KeyError): + pass + values = list(values) + + grouped = data.groupby(keys, observed=observed, sort=sort, dropna=dropna) + agged = grouped.agg(aggfunc) + + if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns): + agged = agged.dropna(how="all") + + table = agged + + # GH17038, this check should only happen if index is defined (not None) + if table.index.nlevels > 1 and index: + # Related GH #17123 + # If index_names are integers, determine whether the integers refer + # to the level position or name. + index_names = agged.index.names[: len(index)] + to_unstack = [] + for i in range(len(index), len(keys)): + name = agged.index.names[i] + if name is None or name in index_names: + to_unstack.append(i) + else: + to_unstack.append(name) + table = agged.unstack(to_unstack, fill_value=fill_value) + + if not dropna: + if isinstance(table.index, MultiIndex): + m = MultiIndex.from_arrays( + cartesian_product(table.index.levels), names=table.index.names + ) + table = table.reindex(m, axis=0, fill_value=fill_value) + + if isinstance(table.columns, MultiIndex): + m = MultiIndex.from_arrays( + cartesian_product(table.columns.levels), names=table.columns.names + ) + table = table.reindex(m, axis=1, fill_value=fill_value) + + if sort is True and isinstance(table, ABCDataFrame): + table = table.sort_index(axis=1) + + if fill_value is not None: + table = table.fillna(fill_value) + if aggfunc is len and not observed and lib.is_integer(fill_value): + # TODO: can we avoid this? this used to be handled by + # downcast="infer" in fillna + table = table.astype(np.int64) + + if margins: + if dropna: + data = data[data.notna().all(axis=1)] + table = _add_margins( + table, + data, + values, + rows=index, + cols=columns, + aggfunc=aggfunc, + observed=dropna, + margins_name=margins_name, + fill_value=fill_value, + ) + + # discard the top level + if values_passed and not values_multi and table.columns.nlevels > 1: + table.columns = table.columns.droplevel(0) + if len(index) == 0 and len(columns) > 0: + table = table.T + + # GH 15193 Make sure empty columns are removed if dropna=True + if isinstance(table, ABCDataFrame) and dropna: + table = table.dropna(how="all", axis=1) + + return table + + +def _add_margins( + table: DataFrame | Series, + data: DataFrame, + values, + rows, + cols, + aggfunc, + observed: bool, + margins_name: Hashable = "All", + fill_value=None, +): + if not isinstance(margins_name, str): + raise ValueError("margins_name argument must be a string") + + msg = f'Conflicting name "{margins_name}" in margins' + for level in table.index.names: + if margins_name in table.index.get_level_values(level): + raise ValueError(msg) + + grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name) + + if table.ndim == 2: + # i.e. DataFrame + for level in table.columns.names[1:]: + if margins_name in table.columns.get_level_values(level): + raise ValueError(msg) + + key: str | tuple[str, ...] + if len(rows) > 1: + key = (margins_name,) + ("",) * (len(rows) - 1) + else: + key = margins_name + + if not values and isinstance(table, ABCSeries): + # If there are no values and the table is a series, then there is only + # one column in the data. Compute grand margin and return it. + return table._append(table._constructor({key: grand_margin[margins_name]})) + + elif values: + marginal_result_set = _generate_marginal_results( + table, data, values, rows, cols, aggfunc, observed, margins_name + ) + if not isinstance(marginal_result_set, tuple): + return marginal_result_set + result, margin_keys, row_margin = marginal_result_set + else: + # no values, and table is a DataFrame + assert isinstance(table, ABCDataFrame) + marginal_result_set = _generate_marginal_results_without_values( + table, data, rows, cols, aggfunc, observed, margins_name + ) + if not isinstance(marginal_result_set, tuple): + return marginal_result_set + result, margin_keys, row_margin = marginal_result_set + + row_margin = row_margin.reindex(result.columns, fill_value=fill_value) + # populate grand margin + for k in margin_keys: + if isinstance(k, str): + row_margin[k] = grand_margin[k] + else: + row_margin[k] = grand_margin[k[0]] + + from pandas import DataFrame + + margin_dummy = DataFrame(row_margin, columns=Index([key])).T + + row_names = result.index.names + # check the result column and leave floats + for dtype in set(result.dtypes): + if isinstance(dtype, ExtensionDtype): + # Can hold NA already + continue + + cols = result.select_dtypes([dtype]).columns + margin_dummy[cols] = margin_dummy[cols].apply( + maybe_downcast_to_dtype, args=(dtype,) + ) + result = result._append(margin_dummy) + result.index.names = row_names + + return result + + +def _compute_grand_margin( + data: DataFrame, values, aggfunc, margins_name: Hashable = "All" +): + if values: + grand_margin = {} + for k, v in data[values].items(): + try: + if isinstance(aggfunc, str): + grand_margin[k] = getattr(v, aggfunc)() + elif isinstance(aggfunc, dict): + if isinstance(aggfunc[k], str): + grand_margin[k] = getattr(v, aggfunc[k])() + else: + grand_margin[k] = aggfunc[k](v) + else: + grand_margin[k] = aggfunc(v) + except TypeError: + pass + return grand_margin + else: + return {margins_name: aggfunc(data.index)} + + +def _generate_marginal_results( + table, + data: DataFrame, + values, + rows, + cols, + aggfunc, + observed: bool, + margins_name: Hashable = "All", +): + margin_keys: list | Index + if len(cols) > 0: + # need to "interleave" the margins + table_pieces = [] + margin_keys = [] + + def _all_key(key): + return (key, margins_name) + ("",) * (len(cols) - 1) + + if len(rows) > 0: + margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc) + cat_axis = 1 + + for key, piece in table.T.groupby(level=0, observed=observed): + piece = piece.T + all_key = _all_key(key) + + # we are going to mutate this, so need to copy! + piece = piece.copy() + piece[all_key] = margin[key] + + table_pieces.append(piece) + margin_keys.append(all_key) + else: + from pandas import DataFrame + + cat_axis = 0 + for key, piece in table.groupby(level=0, observed=observed): + if len(cols) > 1: + all_key = _all_key(key) + else: + all_key = margins_name + table_pieces.append(piece) + # GH31016 this is to calculate margin for each group, and assign + # corresponded key as index + transformed_piece = DataFrame(piece.apply(aggfunc)).T + if isinstance(piece.index, MultiIndex): + # We are adding an empty level + transformed_piece.index = MultiIndex.from_tuples( + [all_key], names=piece.index.names + [None] + ) + else: + transformed_piece.index = Index([all_key], name=piece.index.name) + + # append piece for margin into table_piece + table_pieces.append(transformed_piece) + margin_keys.append(all_key) + + if not table_pieces: + # GH 49240 + return table + else: + result = concat(table_pieces, axis=cat_axis) + + if len(rows) == 0: + return result + else: + result = table + margin_keys = table.columns + + if len(cols) > 0: + row_margin = data[cols + values].groupby(cols, observed=observed).agg(aggfunc) + row_margin = row_margin.stack(future_stack=True) + + # slight hack + new_order = [len(cols)] + list(range(len(cols))) + row_margin.index = row_margin.index.reorder_levels(new_order) + else: + row_margin = data._constructor_sliced(np.nan, index=result.columns) + + return result, margin_keys, row_margin + + +def _generate_marginal_results_without_values( + table: DataFrame, + data: DataFrame, + rows, + cols, + aggfunc, + observed: bool, + margins_name: Hashable = "All", +): + margin_keys: list | Index + if len(cols) > 0: + # need to "interleave" the margins + margin_keys = [] + + def _all_key(): + if len(cols) == 1: + return margins_name + return (margins_name,) + ("",) * (len(cols) - 1) + + if len(rows) > 0: + margin = data[rows].groupby(rows, observed=observed).apply(aggfunc) + all_key = _all_key() + table[all_key] = margin + result = table + margin_keys.append(all_key) + + else: + margin = data.groupby(level=0, axis=0, observed=observed).apply(aggfunc) + all_key = _all_key() + table[all_key] = margin + result = table + margin_keys.append(all_key) + return result + else: + result = table + margin_keys = table.columns + + if len(cols): + row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc) + else: + row_margin = Series(np.nan, index=result.columns) + + return result, margin_keys, row_margin + + +def _convert_by(by): + if by is None: + by = [] + elif ( + is_scalar(by) + or isinstance(by, (np.ndarray, Index, ABCSeries, Grouper)) + or callable(by) + ): + by = [by] + else: + by = list(by) + return by + + +@Substitution("\ndata : DataFrame") +@Appender(_shared_docs["pivot"], indents=1) +def pivot( + data: DataFrame, + *, + columns: IndexLabel, + index: IndexLabel | lib.NoDefault = lib.no_default, + values: IndexLabel | lib.NoDefault = lib.no_default, +) -> DataFrame: + columns_listlike = com.convert_to_list_like(columns) + + # If columns is None we will create a MultiIndex level with None as name + # which might cause duplicated names because None is the default for + # level names + data = data.copy(deep=False) + data.index = data.index.copy() + data.index.names = [ + name if name is not None else lib.no_default for name in data.index.names + ] + + indexed: DataFrame | Series + if values is lib.no_default: + if index is not lib.no_default: + cols = com.convert_to_list_like(index) + else: + cols = [] + + append = index is lib.no_default + # error: Unsupported operand types for + ("List[Any]" and "ExtensionArray") + # error: Unsupported left operand type for + ("ExtensionArray") + indexed = data.set_index( + cols + columns_listlike, append=append # type: ignore[operator] + ) + else: + if index is lib.no_default: + if isinstance(data.index, MultiIndex): + # GH 23955 + index_list = [ + data.index.get_level_values(i) for i in range(data.index.nlevels) + ] + else: + index_list = [ + data._constructor_sliced(data.index, name=data.index.name) + ] + else: + index_list = [data[idx] for idx in com.convert_to_list_like(index)] + + data_columns = [data[col] for col in columns_listlike] + index_list.extend(data_columns) + multiindex = MultiIndex.from_arrays(index_list) + + if is_list_like(values) and not isinstance(values, tuple): + # Exclude tuple because it is seen as a single column name + values = cast(Sequence[Hashable], values) + indexed = data._constructor( + data[values]._values, index=multiindex, columns=values + ) + else: + indexed = data._constructor_sliced(data[values]._values, index=multiindex) + # error: Argument 1 to "unstack" of "DataFrame" has incompatible type "Union + # [List[Any], ExtensionArray, ndarray[Any, Any], Index, Series]"; expected + # "Hashable" + result = indexed.unstack(columns_listlike) # type: ignore[arg-type] + result.index.names = [ + name if name is not lib.no_default else None for name in result.index.names + ] + + return result + + +def crosstab( + index, + columns, + values=None, + rownames=None, + colnames=None, + aggfunc=None, + margins: bool = False, + margins_name: Hashable = "All", + dropna: bool = True, + normalize: bool = False, +) -> DataFrame: + """ + Compute a simple cross tabulation of two (or more) factors. + + By default, computes a frequency table of the factors unless an + array of values and an aggregation function are passed. + + Parameters + ---------- + index : array-like, Series, or list of arrays/Series + Values to group by in the rows. + columns : array-like, Series, or list of arrays/Series + Values to group by in the columns. + values : array-like, optional + Array of values to aggregate according to the factors. + Requires `aggfunc` be specified. + rownames : sequence, default None + If passed, must match number of row arrays passed. + colnames : sequence, default None + If passed, must match number of column arrays passed. + aggfunc : function, optional + If specified, requires `values` be specified as well. + margins : bool, default False + Add row/column margins (subtotals). + margins_name : str, default 'All' + Name of the row/column that will contain the totals + when margins is True. + dropna : bool, default True + Do not include columns whose entries are all NaN. + normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False + Normalize by dividing all values by the sum of values. + + - If passed 'all' or `True`, will normalize over all values. + - If passed 'index' will normalize over each row. + - If passed 'columns' will normalize over each column. + - If margins is `True`, will also normalize margin values. + + Returns + ------- + DataFrame + Cross tabulation of the data. + + See Also + -------- + DataFrame.pivot : Reshape data based on column values. + pivot_table : Create a pivot table as a DataFrame. + + Notes + ----- + Any Series passed will have their name attributes used unless row or column + names for the cross-tabulation are specified. + + Any input passed containing Categorical data will have **all** of its + categories included in the cross-tabulation, even if the actual data does + not contain any instances of a particular category. + + In the event that there aren't overlapping indexes an empty DataFrame will + be returned. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", + ... "bar", "bar", "foo", "foo", "foo"], dtype=object) + >>> b = np.array(["one", "one", "one", "two", "one", "one", + ... "one", "two", "two", "two", "one"], dtype=object) + >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", + ... "shiny", "dull", "shiny", "shiny", "shiny"], + ... dtype=object) + >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) + b one two + c dull shiny dull shiny + a + bar 1 2 1 0 + foo 2 2 1 2 + + Here 'c' and 'f' are not represented in the data and will not be + shown in the output because dropna is True by default. Set + dropna=False to preserve categories with no data. + + >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) + >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) + >>> pd.crosstab(foo, bar) + col_0 d e + row_0 + a 1 0 + b 0 1 + >>> pd.crosstab(foo, bar, dropna=False) + col_0 d e f + row_0 + a 1 0 0 + b 0 1 0 + c 0 0 0 + """ + if values is None and aggfunc is not None: + raise ValueError("aggfunc cannot be used without values.") + + if values is not None and aggfunc is None: + raise ValueError("values cannot be used without an aggfunc.") + + if not is_nested_list_like(index): + index = [index] + if not is_nested_list_like(columns): + columns = [columns] + + common_idx = None + pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))] + if pass_objs: + common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False) + + rownames = _get_names(index, rownames, prefix="row") + colnames = _get_names(columns, colnames, prefix="col") + + # duplicate names mapped to unique names for pivot op + ( + rownames_mapper, + unique_rownames, + colnames_mapper, + unique_colnames, + ) = _build_names_mapper(rownames, colnames) + + from pandas import DataFrame + + data = { + **dict(zip(unique_rownames, index)), + **dict(zip(unique_colnames, columns)), + } + df = DataFrame(data, index=common_idx) + + if values is None: + df["__dummy__"] = 0 + kwargs = {"aggfunc": len, "fill_value": 0} + else: + df["__dummy__"] = values + kwargs = {"aggfunc": aggfunc} + + # error: Argument 7 to "pivot_table" of "DataFrame" has incompatible type + # "**Dict[str, object]"; expected "Union[...]" + table = df.pivot_table( + "__dummy__", + index=unique_rownames, + columns=unique_colnames, + margins=margins, + margins_name=margins_name, + dropna=dropna, + **kwargs, # type: ignore[arg-type] + ) + + # Post-process + if normalize is not False: + table = _normalize( + table, normalize=normalize, margins=margins, margins_name=margins_name + ) + + table = table.rename_axis(index=rownames_mapper, axis=0) + table = table.rename_axis(columns=colnames_mapper, axis=1) + + return table + + +def _normalize( + table: DataFrame, normalize, margins: bool, margins_name: Hashable = "All" +) -> DataFrame: + if not isinstance(normalize, (bool, str)): + axis_subs = {0: "index", 1: "columns"} + try: + normalize = axis_subs[normalize] + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err + + if margins is False: + # Actual Normalizations + normalizers: dict[bool | str, Callable] = { + "all": lambda x: x / x.sum(axis=1).sum(axis=0), + "columns": lambda x: x / x.sum(), + "index": lambda x: x.div(x.sum(axis=1), axis=0), + } + + normalizers[True] = normalizers["all"] + + try: + f = normalizers[normalize] + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err + + table = f(table) + table = table.fillna(0) + + elif margins is True: + # keep index and column of pivoted table + table_index = table.index + table_columns = table.columns + last_ind_or_col = table.iloc[-1, :].name + + # check if margin name is not in (for MI cases) and not equal to last + # index/column and save the column and index margin + if (margins_name not in last_ind_or_col) & (margins_name != last_ind_or_col): + raise ValueError(f"{margins_name} not in pivoted DataFrame") + column_margin = table.iloc[:-1, -1] + index_margin = table.iloc[-1, :-1] + + # keep the core table + table = table.iloc[:-1, :-1] + + # Normalize core + table = _normalize(table, normalize=normalize, margins=False) + + # Fix Margins + if normalize == "columns": + column_margin = column_margin / column_margin.sum() + table = concat([table, column_margin], axis=1) + table = table.fillna(0) + table.columns = table_columns + + elif normalize == "index": + index_margin = index_margin / index_margin.sum() + table = table._append(index_margin) + table = table.fillna(0) + table.index = table_index + + elif normalize == "all" or normalize is True: + column_margin = column_margin / column_margin.sum() + index_margin = index_margin / index_margin.sum() + index_margin.loc[margins_name] = 1 + table = concat([table, column_margin], axis=1) + table = table._append(index_margin) + + table = table.fillna(0) + table.index = table_index + table.columns = table_columns + + else: + raise ValueError("Not a valid normalize argument") + + else: + raise ValueError("Not a valid margins argument") + + return table + + +def _get_names(arrs, names, prefix: str = "row"): + if names is None: + names = [] + for i, arr in enumerate(arrs): + if isinstance(arr, ABCSeries) and arr.name is not None: + names.append(arr.name) + else: + names.append(f"{prefix}_{i}") + else: + if len(names) != len(arrs): + raise AssertionError("arrays and names must have the same length") + if not isinstance(names, list): + names = list(names) + + return names + + +def _build_names_mapper( + rownames: list[str], colnames: list[str] +) -> tuple[dict[str, str], list[str], dict[str, str], list[str]]: + """ + Given the names of a DataFrame's rows and columns, returns a set of unique row + and column names and mappers that convert to original names. + + A row or column name is replaced if it is duplicate among the rows of the inputs, + among the columns of the inputs or between the rows and the columns. + + Parameters + ---------- + rownames: list[str] + colnames: list[str] + + Returns + ------- + Tuple(Dict[str, str], List[str], Dict[str, str], List[str]) + + rownames_mapper: dict[str, str] + a dictionary with new row names as keys and original rownames as values + unique_rownames: list[str] + a list of rownames with duplicate names replaced by dummy names + colnames_mapper: dict[str, str] + a dictionary with new column names as keys and original column names as values + unique_colnames: list[str] + a list of column names with duplicate names replaced by dummy names + + """ + + def get_duplicates(names): + seen: set = set() + return {name for name in names if name not in seen} + + shared_names = set(rownames).intersection(set(colnames)) + dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names + + rownames_mapper = { + f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names + } + unique_rownames = [ + f"row_{i}" if name in dup_names else name for i, name in enumerate(rownames) + ] + + colnames_mapper = { + f"col_{i}": name for i, name in enumerate(colnames) if name in dup_names + } + unique_colnames = [ + f"col_{i}" if name in dup_names else name for i, name in enumerate(colnames) + ] + + return rownames_mapper, unique_rownames, colnames_mapper, unique_colnames diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/reshape.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/reshape.py new file mode 100644 index 00000000..bf7c7a1e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/reshape.py @@ -0,0 +1,989 @@ +from __future__ import annotations + +import itertools +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +import pandas._libs.reshape as libreshape +from pandas.errors import PerformanceWarning +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + find_common_type, + maybe_promote, +) +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, + is_integer, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.missing import notna + +import pandas.core.algorithms as algos +from pandas.core.algorithms import ( + factorize, + unique, +) +from pandas.core.arrays.categorical import factorize_from_iterable +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.frame import DataFrame +from pandas.core.indexes.api import ( + Index, + MultiIndex, + RangeIndex, +) +from pandas.core.reshape.concat import concat +from pandas.core.series import Series +from pandas.core.sorting import ( + compress_group_index, + decons_obs_group_ids, + get_compressed_ids, + get_group_index, + get_group_index_sorter, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Level, + npt, + ) + + from pandas.core.arrays import ExtensionArray + from pandas.core.indexes.frozen import FrozenList + + +class _Unstacker: + """ + Helper class to unstack data / pivot with multi-level index + + Parameters + ---------- + index : MultiIndex + level : int or str, default last level + Level to "unstack". Accepts a name for the level. + fill_value : scalar, optional + Default value to fill in missing values if subgroups do not have the + same set of labels. By default, missing values will be replaced with + the default fill value for that data type, NaN for float, NaT for + datetimelike, etc. For integer types, by default data will converted to + float and missing values will be set to NaN. + constructor : object + Pandas ``DataFrame`` or subclass used to create unstacked + response. If None, DataFrame will be used. + + Examples + -------- + >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), + ... ('two', 'a'), ('two', 'b')]) + >>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index) + >>> s + one a 1 + b 2 + two a 3 + b 4 + dtype: int64 + + >>> s.unstack(level=-1) + a b + one 1 2 + two 3 4 + + >>> s.unstack(level=0) + one two + a 1 3 + b 2 4 + + Returns + ------- + unstacked : DataFrame + """ + + def __init__( + self, index: MultiIndex, level: Level, constructor, sort: bool = True + ) -> None: + self.constructor = constructor + self.sort = sort + + self.index = index.remove_unused_levels() + + self.level = self.index._get_level_number(level) + + # when index includes `nan`, need to lift levels/strides by 1 + self.lift = 1 if -1 in self.index.codes[self.level] else 0 + + # Note: the "pop" below alters these in-place. + self.new_index_levels = list(self.index.levels) + self.new_index_names = list(self.index.names) + + self.removed_name = self.new_index_names.pop(self.level) + self.removed_level = self.new_index_levels.pop(self.level) + self.removed_level_full = index.levels[self.level] + if not self.sort: + unique_codes = unique(self.index.codes[self.level]) + self.removed_level = self.removed_level.take(unique_codes) + self.removed_level_full = self.removed_level_full.take(unique_codes) + + # Bug fix GH 20601 + # If the data frame is too big, the number of unique index combination + # will cause int32 overflow on windows environments. + # We want to check and raise an warning before this happens + num_rows = np.max([index_level.size for index_level in self.new_index_levels]) + num_columns = self.removed_level.size + + # GH20601: This forces an overflow if the number of cells is too high. + num_cells = num_rows * num_columns + + # GH 26314: Previous ValueError raised was too restrictive for many users. + if num_cells > np.iinfo(np.int32).max: + warnings.warn( + f"The following operation may generate {num_cells} cells " + f"in the resulting pandas object.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + self._make_selectors() + + @cache_readonly + def _indexer_and_to_sort( + self, + ) -> tuple[ + npt.NDArray[np.intp], + list[np.ndarray], # each has _some_ signed integer dtype + ]: + v = self.level + + codes = list(self.index.codes) + levs = list(self.index.levels) + to_sort = codes[:v] + codes[v + 1 :] + [codes[v]] + sizes = tuple(len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]) + + comp_index, obs_ids = get_compressed_ids(to_sort, sizes) + ngroups = len(obs_ids) + + indexer = get_group_index_sorter(comp_index, ngroups) + return indexer, to_sort + + @cache_readonly + def sorted_labels(self) -> list[np.ndarray]: + indexer, to_sort = self._indexer_and_to_sort + if self.sort: + return [line.take(indexer) for line in to_sort] + return to_sort + + def _make_sorted_values(self, values: np.ndarray) -> np.ndarray: + if self.sort: + indexer, _ = self._indexer_and_to_sort + + sorted_values = algos.take_nd(values, indexer, axis=0) + return sorted_values + return values + + def _make_selectors(self): + new_levels = self.new_index_levels + + # make the mask + remaining_labels = self.sorted_labels[:-1] + level_sizes = tuple(len(x) for x in new_levels) + + comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes) + ngroups = len(obs_ids) + + comp_index = ensure_platform_int(comp_index) + stride = self.index.levshape[self.level] + self.lift + self.full_shape = ngroups, stride + + selector = self.sorted_labels[-1] + stride * comp_index + self.lift + mask = np.zeros(np.prod(self.full_shape), dtype=bool) + mask.put(selector, True) + + if mask.sum() < len(self.index): + raise ValueError("Index contains duplicate entries, cannot reshape") + + self.group_index = comp_index + self.mask = mask + if self.sort: + self.compressor = comp_index.searchsorted(np.arange(ngroups)) + else: + self.compressor = np.sort(np.unique(comp_index, return_index=True)[1]) + + @cache_readonly + def mask_all(self) -> bool: + return bool(self.mask.all()) + + @cache_readonly + def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]: + # We cache this for re-use in ExtensionBlock._unstack + dummy_arr = np.arange(len(self.index), dtype=np.intp) + new_values, mask = self.get_new_values(dummy_arr, fill_value=-1) + return new_values, mask.any(0) + # TODO: in all tests we have mask.any(0).all(); can we rely on that? + + def get_result(self, values, value_columns, fill_value) -> DataFrame: + if values.ndim == 1: + values = values[:, np.newaxis] + + if value_columns is None and values.shape[1] != 1: # pragma: no cover + raise ValueError("must pass column labels for multi-column data") + + values, _ = self.get_new_values(values, fill_value) + columns = self.get_new_columns(value_columns) + index = self.new_index + + return self.constructor( + values, index=index, columns=columns, dtype=values.dtype + ) + + def get_new_values(self, values, fill_value=None): + if values.ndim == 1: + values = values[:, np.newaxis] + + sorted_values = self._make_sorted_values(values) + + # place the values + length, width = self.full_shape + stride = values.shape[1] + result_width = width * stride + result_shape = (length, result_width) + mask = self.mask + mask_all = self.mask_all + + # we can simply reshape if we don't have a mask + if mask_all and len(values): + # TODO: Under what circumstances can we rely on sorted_values + # matching values? When that holds, we can slice instead + # of take (in particular for EAs) + new_values = ( + sorted_values.reshape(length, width, stride) + .swapaxes(1, 2) + .reshape(result_shape) + ) + new_mask = np.ones(result_shape, dtype=bool) + return new_values, new_mask + + dtype = values.dtype + + # if our mask is all True, then we can use our existing dtype + if mask_all: + dtype = values.dtype + new_values = np.empty(result_shape, dtype=dtype) + else: + if isinstance(dtype, ExtensionDtype): + # GH#41875 + # We are assuming that fill_value can be held by this dtype, + # unlike the non-EA case that promotes. + cls = dtype.construct_array_type() + new_values = cls._empty(result_shape, dtype=dtype) + new_values[:] = fill_value + else: + dtype, fill_value = maybe_promote(dtype, fill_value) + new_values = np.empty(result_shape, dtype=dtype) + new_values.fill(fill_value) + + name = dtype.name + new_mask = np.zeros(result_shape, dtype=bool) + + # we need to convert to a basic dtype + # and possibly coerce an input to our output dtype + # e.g. ints -> floats + if needs_i8_conversion(values.dtype): + sorted_values = sorted_values.view("i8") + new_values = new_values.view("i8") + else: + sorted_values = sorted_values.astype(name, copy=False) + + # fill in our values & mask + libreshape.unstack( + sorted_values, + mask.view("u1"), + stride, + length, + width, + new_values, + new_mask.view("u1"), + ) + + # reconstruct dtype if needed + if needs_i8_conversion(values.dtype): + # view as datetime64 so we can wrap in DatetimeArray and use + # DTA's view method + new_values = new_values.view("M8[ns]") + new_values = ensure_wrapped_if_datetimelike(new_values) + new_values = new_values.view(values.dtype) + + return new_values, new_mask + + def get_new_columns(self, value_columns: Index | None): + if value_columns is None: + if self.lift == 0: + return self.removed_level._rename(name=self.removed_name) + + lev = self.removed_level.insert(0, item=self.removed_level._na_value) + return lev.rename(self.removed_name) + + stride = len(self.removed_level) + self.lift + width = len(value_columns) + propagator = np.repeat(np.arange(width), stride) + + new_levels: FrozenList | list[Index] + + if isinstance(value_columns, MultiIndex): + # error: Cannot determine type of "__add__" [has-type] + new_levels = value_columns.levels + ( # type: ignore[has-type] + self.removed_level_full, + ) + new_names = value_columns.names + (self.removed_name,) + + new_codes = [lab.take(propagator) for lab in value_columns.codes] + else: + new_levels = [ + value_columns, + self.removed_level_full, + ] + new_names = [value_columns.name, self.removed_name] + new_codes = [propagator] + + repeater = self._repeater + + # The entire level is then just a repetition of the single chunk: + new_codes.append(np.tile(repeater, width)) + return MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + @cache_readonly + def _repeater(self) -> np.ndarray: + # The two indices differ only if the unstacked level had unused items: + if len(self.removed_level_full) != len(self.removed_level): + # In this case, we remap the new codes to the original level: + repeater = self.removed_level_full.get_indexer(self.removed_level) + if self.lift: + repeater = np.insert(repeater, 0, -1) + else: + # Otherwise, we just use each level item exactly once: + stride = len(self.removed_level) + self.lift + repeater = np.arange(stride) - self.lift + + return repeater + + @cache_readonly + def new_index(self) -> MultiIndex: + # Does not depend on values or value_columns + result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]] + + # construct the new index + if len(self.new_index_levels) == 1: + level, level_codes = self.new_index_levels[0], result_codes[0] + if (level_codes == -1).any(): + level = level.insert(len(level), level._na_value) + return level.take(level_codes).rename(self.new_index_names[0]) + + return MultiIndex( + levels=self.new_index_levels, + codes=result_codes, + names=self.new_index_names, + verify_integrity=False, + ) + + +def _unstack_multiple( + data: Series | DataFrame, clocs, fill_value=None, sort: bool = True +): + if len(clocs) == 0: + return data + + # NOTE: This doesn't deal with hierarchical columns yet + + index = data.index + index = cast(MultiIndex, index) # caller is responsible for checking + + # GH 19966 Make sure if MultiIndexed index has tuple name, they will be + # recognised as a whole + if clocs in index.names: + clocs = [clocs] + clocs = [index._get_level_number(i) for i in clocs] + + rlocs = [i for i in range(index.nlevels) if i not in clocs] + + clevels = [index.levels[i] for i in clocs] + ccodes = [index.codes[i] for i in clocs] + cnames = [index.names[i] for i in clocs] + rlevels = [index.levels[i] for i in rlocs] + rcodes = [index.codes[i] for i in rlocs] + rnames = [index.names[i] for i in rlocs] + + shape = tuple(len(x) for x in clevels) + group_index = get_group_index(ccodes, shape, sort=False, xnull=False) + + comp_ids, obs_ids = compress_group_index(group_index, sort=False) + recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False) + + if not rlocs: + # Everything is in clocs, so the dummy df has a regular index + dummy_index = Index(obs_ids, name="__placeholder__") + else: + dummy_index = MultiIndex( + levels=rlevels + [obs_ids], + codes=rcodes + [comp_ids], + names=rnames + ["__placeholder__"], + verify_integrity=False, + ) + + if isinstance(data, Series): + dummy = data.copy() + dummy.index = dummy_index + + unstacked = dummy.unstack("__placeholder__", fill_value=fill_value, sort=sort) + new_levels = clevels + new_names = cnames + new_codes = recons_codes + else: + if isinstance(data.columns, MultiIndex): + result = data + while clocs: + val = clocs.pop(0) + result = result.unstack(val, fill_value=fill_value, sort=sort) + clocs = [v if v < val else v - 1 for v in clocs] + + return result + + # GH#42579 deep=False to avoid consolidating + dummy_df = data.copy(deep=False) + dummy_df.index = dummy_index + + unstacked = dummy_df.unstack( + "__placeholder__", fill_value=fill_value, sort=sort + ) + if isinstance(unstacked, Series): + unstcols = unstacked.index + else: + unstcols = unstacked.columns + assert isinstance(unstcols, MultiIndex) # for mypy + new_levels = [unstcols.levels[0]] + clevels + new_names = [data.columns.name] + cnames + + new_codes = [unstcols.codes[0]] + new_codes.extend(rec.take(unstcols.codes[-1]) for rec in recons_codes) + + new_columns = MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + if isinstance(unstacked, Series): + unstacked.index = new_columns + else: + unstacked.columns = new_columns + + return unstacked + + +def unstack(obj: Series | DataFrame, level, fill_value=None, sort: bool = True): + if isinstance(level, (tuple, list)): + if len(level) != 1: + # _unstack_multiple only handles MultiIndexes, + # and isn't needed for a single level + return _unstack_multiple(obj, level, fill_value=fill_value, sort=sort) + else: + level = level[0] + + if not is_integer(level) and not level == "__placeholder__": + # check if level is valid in case of regular index + obj.index._get_level_number(level) + + if isinstance(obj, DataFrame): + if isinstance(obj.index, MultiIndex): + return _unstack_frame(obj, level, fill_value=fill_value, sort=sort) + else: + return obj.T.stack(future_stack=True) + elif not isinstance(obj.index, MultiIndex): + # GH 36113 + # Give nicer error messages when unstack a Series whose + # Index is not a MultiIndex. + raise ValueError( + f"index must be a MultiIndex to unstack, {type(obj.index)} was passed" + ) + else: + if is_1d_only_ea_dtype(obj.dtype): + return _unstack_extension_series(obj, level, fill_value, sort=sort) + unstacker = _Unstacker( + obj.index, level=level, constructor=obj._constructor_expanddim, sort=sort + ) + return unstacker.get_result( + obj._values, value_columns=None, fill_value=fill_value + ) + + +def _unstack_frame( + obj: DataFrame, level, fill_value=None, sort: bool = True +) -> DataFrame: + assert isinstance(obj.index, MultiIndex) # checked by caller + unstacker = _Unstacker( + obj.index, level=level, constructor=obj._constructor, sort=sort + ) + + if not obj._can_fast_transpose: + mgr = obj._mgr.unstack(unstacker, fill_value=fill_value) + return obj._constructor_from_mgr(mgr, axes=mgr.axes) + else: + return unstacker.get_result( + obj._values, value_columns=obj.columns, fill_value=fill_value + ) + + +def _unstack_extension_series( + series: Series, level, fill_value, sort: bool +) -> DataFrame: + """ + Unstack an ExtensionArray-backed Series. + + The ExtensionDtype is preserved. + + Parameters + ---------- + series : Series + A Series with an ExtensionArray for values + level : Any + The level name or number. + fill_value : Any + The user-level (not physical storage) fill value to use for + missing values introduced by the reshape. Passed to + ``series.values.take``. + sort : bool + Whether to sort the resulting MuliIndex levels + + Returns + ------- + DataFrame + Each column of the DataFrame will have the same dtype as + the input Series. + """ + # Defer to the logic in ExtensionBlock._unstack + df = series.to_frame() + result = df.unstack(level=level, fill_value=fill_value, sort=sort) + + # equiv: result.droplevel(level=0, axis=1) + # but this avoids an extra copy + result.columns = result.columns.droplevel(0) + return result + + +def stack(frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True): + """ + Convert DataFrame to Series with multi-level Index. Columns become the + second level of the resulting hierarchical index + + Returns + ------- + stacked : Series or DataFrame + """ + + def stack_factorize(index): + if index.is_unique: + return index, np.arange(len(index)) + codes, categories = factorize_from_iterable(index) + return categories, codes + + N, K = frame.shape + + # Will also convert negative level numbers and check if out of bounds. + level_num = frame.columns._get_level_number(level) + + if isinstance(frame.columns, MultiIndex): + return _stack_multi_columns( + frame, level_num=level_num, dropna=dropna, sort=sort + ) + elif isinstance(frame.index, MultiIndex): + new_levels = list(frame.index.levels) + new_codes = [lab.repeat(K) for lab in frame.index.codes] + + clev, clab = stack_factorize(frame.columns) + new_levels.append(clev) + new_codes.append(np.tile(clab, N).ravel()) + + new_names = list(frame.index.names) + new_names.append(frame.columns.name) + new_index = MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + else: + levels, (ilab, clab) = zip(*map(stack_factorize, (frame.index, frame.columns))) + codes = ilab.repeat(K), np.tile(clab, N).ravel() + new_index = MultiIndex( + levels=levels, + codes=codes, + names=[frame.index.name, frame.columns.name], + verify_integrity=False, + ) + + new_values: ArrayLike + if not frame.empty and frame._is_homogeneous_type: + # For homogeneous EAs, frame._values will coerce to object. So + # we concatenate instead. + dtypes = list(frame.dtypes._values) + dtype = dtypes[0] + + if isinstance(dtype, ExtensionDtype): + arr = dtype.construct_array_type() + new_values = arr._concat_same_type( + [col._values for _, col in frame.items()] + ) + new_values = _reorder_for_extension_array_stack(new_values, N, K) + else: + # homogeneous, non-EA + new_values = frame._values.ravel() + + else: + # non-homogeneous + new_values = frame._values.ravel() + + if dropna: + mask = notna(new_values) + new_values = new_values[mask] + new_index = new_index[mask] + + return frame._constructor_sliced(new_values, index=new_index) + + +def stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = True): + # If all passed levels match up to column names, no + # ambiguity about what to do + if all(lev in frame.columns.names for lev in level): + result = frame + for lev in level: + result = stack(result, lev, dropna=dropna, sort=sort) + + # Otherwise, level numbers may change as each successive level is stacked + elif all(isinstance(lev, int) for lev in level): + # As each stack is done, the level numbers decrease, so we need + # to account for that when level is a sequence of ints + result = frame + # _get_level_number() checks level numbers are in range and converts + # negative numbers to positive + level = [frame.columns._get_level_number(lev) for lev in level] + + while level: + lev = level.pop(0) + result = stack(result, lev, dropna=dropna, sort=sort) + # Decrement all level numbers greater than current, as these + # have now shifted down by one + level = [v if v <= lev else v - 1 for v in level] + + else: + raise ValueError( + "level should contain all level names or all level " + "numbers, not a mixture of the two." + ) + + return result + + +def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex: + """Creates a MultiIndex from the first N-1 levels of this MultiIndex.""" + if len(columns.levels) <= 2: + return columns.levels[0]._rename(name=columns.names[0]) + + levs = [ + [lev[c] if c >= 0 else None for c in codes] + for lev, codes in zip(columns.levels[:-1], columns.codes[:-1]) + ] + + # Remove duplicate tuples in the MultiIndex. + tuples = zip(*levs) + unique_tuples = (key for key, _ in itertools.groupby(tuples)) + new_levs = zip(*unique_tuples) + + # The dtype of each level must be explicitly set to avoid inferring the wrong type. + # See GH-36991. + return MultiIndex.from_arrays( + [ + # Not all indices can accept None values. + Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev + for new_lev, lev in zip(new_levs, columns.levels) + ], + names=columns.names[:-1], + ) + + +def _stack_multi_columns( + frame: DataFrame, level_num: int = -1, dropna: bool = True, sort: bool = True +) -> DataFrame: + def _convert_level_number(level_num: int, columns: Index): + """ + Logic for converting the level number to something we can safely pass + to swaplevel. + + If `level_num` matches a column name return the name from + position `level_num`, otherwise return `level_num`. + """ + if level_num in columns.names: + return columns.names[level_num] + + return level_num + + this = frame.copy(deep=False) + mi_cols = this.columns # cast(MultiIndex, this.columns) + assert isinstance(mi_cols, MultiIndex) # caller is responsible + + # this makes life much simpler + if level_num != mi_cols.nlevels - 1: + # roll levels to put selected level at end + roll_columns = mi_cols + for i in range(level_num, mi_cols.nlevels - 1): + # Need to check if the ints conflict with level names + lev1 = _convert_level_number(i, roll_columns) + lev2 = _convert_level_number(i + 1, roll_columns) + roll_columns = roll_columns.swaplevel(lev1, lev2) + this.columns = mi_cols = roll_columns + + if not mi_cols._is_lexsorted() and sort: + # Workaround the edge case where 0 is one of the column names, + # which interferes with trying to sort based on the first + # level + level_to_sort = _convert_level_number(0, mi_cols) + this = this.sort_index(level=level_to_sort, axis=1) + mi_cols = this.columns + + mi_cols = cast(MultiIndex, mi_cols) + new_columns = _stack_multi_column_index(mi_cols) + + # time to ravel the values + new_data = {} + level_vals = mi_cols.levels[-1] + level_codes = unique(mi_cols.codes[-1]) + if sort: + level_codes = np.sort(level_codes) + level_vals_nan = level_vals.insert(len(level_vals), None) + + level_vals_used = np.take(level_vals_nan, level_codes) + levsize = len(level_codes) + drop_cols = [] + for key in new_columns: + try: + loc = this.columns.get_loc(key) + except KeyError: + drop_cols.append(key) + continue + + # can make more efficient? + # we almost always return a slice + # but if unsorted can get a boolean + # indexer + if not isinstance(loc, slice): + slice_len = len(loc) + else: + slice_len = loc.stop - loc.start + + if slice_len != levsize: + chunk = this.loc[:, this.columns[loc]] + chunk.columns = level_vals_nan.take(chunk.columns.codes[-1]) + value_slice = chunk.reindex(columns=level_vals_used).values + else: + subset = this.iloc[:, loc] + dtype = find_common_type(subset.dtypes.tolist()) + if isinstance(dtype, ExtensionDtype): + # TODO(EA2D): won't need special case, can go through .values + # paths below (might change to ._values) + value_slice = dtype.construct_array_type()._concat_same_type( + [x._values.astype(dtype, copy=False) for _, x in subset.items()] + ) + N, K = subset.shape + idx = np.arange(N * K).reshape(K, N).T.ravel() + value_slice = value_slice.take(idx) + else: + value_slice = subset.values + + if value_slice.ndim > 1: + # i.e. not extension + value_slice = value_slice.ravel() + + new_data[key] = value_slice + + if len(drop_cols) > 0: + new_columns = new_columns.difference(drop_cols) + + N = len(this) + + if isinstance(this.index, MultiIndex): + new_levels = list(this.index.levels) + new_names = list(this.index.names) + new_codes = [lab.repeat(levsize) for lab in this.index.codes] + else: + old_codes, old_levels = factorize_from_iterable(this.index) + new_levels = [old_levels] + new_codes = [old_codes.repeat(levsize)] + new_names = [this.index.name] # something better? + + new_levels.append(level_vals) + new_codes.append(np.tile(level_codes, N)) + new_names.append(frame.columns.names[level_num]) + + new_index = MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + result = frame._constructor(new_data, index=new_index, columns=new_columns) + + if frame.columns.nlevels > 1: + desired_columns = frame.columns._drop_level_numbers([level_num]).unique() + if not result.columns.equals(desired_columns): + result = result[desired_columns] + + # more efficient way to go about this? can do the whole masking biz but + # will only save a small amount of time... + if dropna: + result = result.dropna(axis=0, how="all") + + return result + + +def _reorder_for_extension_array_stack( + arr: ExtensionArray, n_rows: int, n_columns: int +) -> ExtensionArray: + """ + Re-orders the values when stacking multiple extension-arrays. + + The indirect stacking method used for EAs requires a followup + take to get the order correct. + + Parameters + ---------- + arr : ExtensionArray + n_rows, n_columns : int + The number of rows and columns in the original DataFrame. + + Returns + ------- + taken : ExtensionArray + The original `arr` with elements re-ordered appropriately + + Examples + -------- + >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f']) + >>> _reorder_for_extension_array_stack(arr, 2, 3) + array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='>> _reorder_for_extension_array_stack(arr, 3, 2) + array(['a', 'd', 'b', 'e', 'c', 'f'], dtype=' Series | DataFrame: + if frame.columns.nunique() != len(frame.columns): + raise ValueError("Columns with duplicate values are not supported in stack") + + # If we need to drop `level` from columns, it needs to be in descending order + drop_levnums = sorted(level, reverse=True) + stack_cols = frame.columns._drop_level_numbers( + [k for k in range(frame.columns.nlevels) if k not in level][::-1] + ) + if len(level) > 1: + # Arrange columns in the order we want to take them, e.g. level=[2, 0, 1] + sorter = np.argsort(level) + ordered_stack_cols = stack_cols._reorder_ilevels(sorter) + else: + ordered_stack_cols = stack_cols + + stack_cols_unique = stack_cols.unique() + ordered_stack_cols_unique = ordered_stack_cols.unique() + + # Grab data for each unique index to be stacked + buf = [] + for idx in stack_cols_unique: + if len(frame.columns) == 1: + data = frame.copy() + else: + # Take the data from frame corresponding to this idx value + if len(level) == 1: + idx = (idx,) + gen = iter(idx) + column_indexer = tuple( + next(gen) if k in level else slice(None) + for k in range(frame.columns.nlevels) + ) + data = frame.loc[:, column_indexer] + + if len(level) < frame.columns.nlevels: + data.columns = data.columns._drop_level_numbers(drop_levnums) + elif stack_cols.nlevels == 1: + if data.ndim == 1: + data.name = 0 + else: + data.columns = RangeIndex(len(data.columns)) + buf.append(data) + + result: Series | DataFrame + if len(buf) > 0 and not frame.empty: + result = concat(buf) + ratio = len(result) // len(frame) + else: + # input is empty + if len(level) < frame.columns.nlevels: + # concat column order may be different from dropping the levels + new_columns = frame.columns._drop_level_numbers(drop_levnums).unique() + else: + new_columns = [0] + result = DataFrame(columns=new_columns, dtype=frame._values.dtype) + ratio = 0 + + if len(level) < frame.columns.nlevels: + # concat column order may be different from dropping the levels + desired_columns = frame.columns._drop_level_numbers(drop_levnums).unique() + if not result.columns.equals(desired_columns): + result = result[desired_columns] + + # Construct the correct MultiIndex by combining the frame's index and + # stacked columns. + index_levels: list | FrozenList + if isinstance(frame.index, MultiIndex): + index_levels = frame.index.levels + index_codes = list(np.tile(frame.index.codes, (1, ratio))) + else: + index_levels = [frame.index.unique()] + codes = factorize(frame.index)[0] + index_codes = list(np.tile(codes, (1, ratio))) + if isinstance(stack_cols, MultiIndex): + column_levels = ordered_stack_cols.levels + column_codes = ordered_stack_cols.drop_duplicates().codes + else: + column_levels = [ordered_stack_cols.unique()] + column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]] + column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] + result.index = MultiIndex( + levels=index_levels + column_levels, + codes=index_codes + column_codes, + names=frame.index.names + list(ordered_stack_cols.names), + verify_integrity=False, + ) + + # sort result, but faster than calling sort_index since we know the order we need + len_df = len(frame) + n_uniques = len(ordered_stack_cols_unique) + indexer = np.arange(n_uniques) + idxs = np.tile(len_df * indexer, len_df) + np.repeat(np.arange(len_df), n_uniques) + result = result.take(idxs) + + # Reshape/rename if needed and dropna + if result.ndim == 2 and frame.columns.nlevels == len(level): + if len(result.columns) == 0: + result = Series(index=result.index) + else: + result = result.iloc[:, 0] + if result.ndim == 1: + result.name = None + + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/tile.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/tile.py new file mode 100644 index 00000000..43eea7c6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/tile.py @@ -0,0 +1,657 @@ +""" +Quantilization functions and related stuff +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs import ( + Timedelta, + Timestamp, + lib, +) +from pandas._libs.lib import infer_dtype + +from pandas.core.dtypes.common import ( + DT64NS_DTYPE, + ensure_platform_int, + is_bool_dtype, + is_integer, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import isna + +from pandas import ( + Categorical, + Index, + IntervalIndex, + to_datetime, + to_timedelta, +) +from pandas.core import nanops +import pandas.core.algorithms as algos + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + IntervalLeftRight, + ) + + +def cut( + x, + bins, + right: bool = True, + labels=None, + retbins: bool = False, + precision: int = 3, + include_lowest: bool = False, + duplicates: str = "raise", + ordered: bool = True, +): + """ + Bin values into discrete intervals. + + Use `cut` when you need to segment and sort data values into bins. This + function is also useful for going from a continuous variable to a + categorical variable. For example, `cut` could convert ages to groups of + age ranges. Supports binning into an equal number of bins, or a + pre-specified array of bins. + + Parameters + ---------- + x : array-like + The input array to be binned. Must be 1-dimensional. + bins : int, sequence of scalars, or IntervalIndex + The criteria to bin by. + + * int : Defines the number of equal-width bins in the range of `x`. The + range of `x` is extended by .1% on each side to include the minimum + and maximum values of `x`. + * sequence of scalars : Defines the bin edges allowing for non-uniform + width. No extension of the range of `x` is done. + * IntervalIndex : Defines the exact bins to be used. Note that + IntervalIndex for `bins` must be non-overlapping. + + right : bool, default True + Indicates whether `bins` includes the rightmost edge or not. If + ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` + indicate (1,2], (2,3], (3,4]. This argument is ignored when + `bins` is an IntervalIndex. + labels : array or False, default None + Specifies the labels for the returned bins. Must be the same length as + the resulting bins. If False, returns only integer indicators of the + bins. This affects the type of the output container (see below). + This argument is ignored when `bins` is an IntervalIndex. If True, + raises an error. When `ordered=False`, labels must be provided. + retbins : bool, default False + Whether to return the bins or not. Useful when bins is provided + as a scalar. + precision : int, default 3 + The precision at which to store and display the bins labels. + include_lowest : bool, default False + Whether the first interval should be left-inclusive or not. + duplicates : {default 'raise', 'drop'}, optional + If bin edges are not unique, raise ValueError or drop non-uniques. + ordered : bool, default True + Whether the labels are ordered or not. Applies to returned types + Categorical and Series (with Categorical dtype). If True, + the resulting categorical will be ordered. If False, the resulting + categorical will be unordered (labels must be provided). + + Returns + ------- + out : Categorical, Series, or ndarray + An array-like object representing the respective bin for each value + of `x`. The type depends on the value of `labels`. + + * None (default) : returns a Series for Series `x` or a + Categorical for all other inputs. The values stored within + are Interval dtype. + + * sequence of scalars : returns a Series for Series `x` or a + Categorical for all other inputs. The values stored within + are whatever the type in the sequence is. + + * False : returns an ndarray of integers. + + bins : numpy.ndarray or IntervalIndex. + The computed or specified bins. Only returned when `retbins=True`. + For scalar or sequence `bins`, this is an ndarray with the computed + bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For + an IntervalIndex `bins`, this is equal to `bins`. + + See Also + -------- + qcut : Discretize variable into equal-sized buckets based on rank + or based on sample quantiles. + Categorical : Array type for storing data that come from a + fixed set of values. + Series : One-dimensional array with axis labels (including time series). + IntervalIndex : Immutable Index implementing an ordered, sliceable set. + + Notes + ----- + Any NA values will be NA in the result. Out of bounds values will be NA in + the resulting Series or Categorical object. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + Discretize into three equal-sized bins. + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) + ... # doctest: +ELLIPSIS + [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... + Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) + ... # doctest: +ELLIPSIS + ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... + Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... + array([0.994, 3. , 5. , 7. ])) + + Discovers the same bins, but assign them specific labels. Notice that + the returned Categorical's categories are `labels` and is ordered. + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), + ... 3, labels=["bad", "medium", "good"]) + ['bad', 'good', 'medium', 'medium', 'good', 'bad'] + Categories (3, object): ['bad' < 'medium' < 'good'] + + ``ordered=False`` will result in unordered categories when labels are passed. + This parameter can be used to allow non-unique labels: + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, + ... labels=["B", "A", "B"], ordered=False) + ['B', 'B', 'A', 'A', 'B', 'B'] + Categories (2, object): ['A', 'B'] + + ``labels=False`` implies you just want the bins back. + + >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) + array([0, 1, 1, 3]) + + Passing a Series as an input returns a Series with categorical dtype: + + >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), + ... index=['a', 'b', 'c', 'd', 'e']) + >>> pd.cut(s, 3) + ... # doctest: +ELLIPSIS + a (1.992, 4.667] + b (1.992, 4.667] + c (4.667, 7.333] + d (7.333, 10.0] + e (7.333, 10.0] + dtype: category + Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ... + + Passing a Series as an input returns a Series with mapping value. + It is used to map numerically to intervals based on bins. + + >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), + ... index=['a', 'b', 'c', 'd', 'e']) + >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) + ... # doctest: +ELLIPSIS + (a 1.0 + b 2.0 + c 3.0 + d 4.0 + e NaN + dtype: float64, + array([ 0, 2, 4, 6, 8, 10])) + + Use `drop` optional when bins is not unique + + >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, + ... right=False, duplicates='drop') + ... # doctest: +ELLIPSIS + (a 1.0 + b 2.0 + c 3.0 + d 3.0 + e NaN + dtype: float64, + array([ 0, 2, 4, 6, 10])) + + Passing an IntervalIndex for `bins` results in those categories exactly. + Notice that values not covered by the IntervalIndex are set to NaN. 0 + is to the left of the first bin (which is closed on the right), and 1.5 + falls between two bins. + + >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) + [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]] + Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]] + """ + # NOTE: this binning code is changed a bit from histogram for var(x) == 0 + + original = x + x = _preprocess_for_cut(x) + x, dtype = _coerce_to_type(x) + + if not np.iterable(bins): + if is_scalar(bins) and bins < 1: + raise ValueError("`bins` should be a positive integer.") + + sz = x.size + + if sz == 0: + raise ValueError("Cannot cut empty array") + + rng = (nanops.nanmin(x), nanops.nanmax(x)) + mn, mx = (mi + 0.0 for mi in rng) + + if np.isinf(mn) or np.isinf(mx): + # GH 24314 + raise ValueError( + "cannot specify integer `bins` when input data contains infinity" + ) + if mn == mx: # adjust end points before binning + mn -= 0.001 * abs(mn) if mn != 0 else 0.001 + mx += 0.001 * abs(mx) if mx != 0 else 0.001 + bins = np.linspace(mn, mx, bins + 1, endpoint=True) + else: # adjust end points after binning + bins = np.linspace(mn, mx, bins + 1, endpoint=True) + adj = (mx - mn) * 0.001 # 0.1% of the range + if right: + bins[0] -= adj + else: + bins[-1] += adj + + elif isinstance(bins, IntervalIndex): + if bins.is_overlapping: + raise ValueError("Overlapping IntervalIndex is not accepted.") + + else: + if isinstance(getattr(bins, "dtype", None), DatetimeTZDtype): + bins = np.asarray(bins, dtype=DT64NS_DTYPE) + else: + bins = np.asarray(bins) + bins = _convert_bin_to_numeric_type(bins, dtype) + + # GH 26045: cast to float64 to avoid an overflow + if (np.diff(bins.astype("float64")) < 0).any(): + raise ValueError("bins must increase monotonically.") + + fac, bins = _bins_to_cuts( + x, + bins, + right=right, + labels=labels, + precision=precision, + include_lowest=include_lowest, + dtype=dtype, + duplicates=duplicates, + ordered=ordered, + ) + + return _postprocess_for_cut(fac, bins, retbins, dtype, original) + + +def qcut( + x, + q, + labels=None, + retbins: bool = False, + precision: int = 3, + duplicates: str = "raise", +): + """ + Quantile-based discretization function. + + Discretize variable into equal-sized buckets based on rank or based + on sample quantiles. For example 1000 values for 10 quantiles would + produce a Categorical object indicating quantile membership for each data point. + + Parameters + ---------- + x : 1d ndarray or Series + q : int or list-like of float + Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately + array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles. + labels : array or False, default None + Used as labels for the resulting bins. Must be of the same length as + the resulting bins. If False, return only integer indicators of the + bins. If True, raises an error. + retbins : bool, optional + Whether to return the (bins, labels) or not. Can be useful if bins + is given as a scalar. + precision : int, optional + The precision at which to store and display the bins labels. + duplicates : {default 'raise', 'drop'}, optional + If bin edges are not unique, raise ValueError or drop non-uniques. + + Returns + ------- + out : Categorical or Series or array of integers if labels is False + The return type (Categorical or Series) depends on the input: a Series + of type category if input is a Series else Categorical. Bins are + represented as categories when categorical data is returned. + bins : ndarray of floats + Returned only if `retbins` is True. + + Notes + ----- + Out of bounds values will be NA in the resulting Categorical object + + Examples + -------- + >>> pd.qcut(range(5), 4) + ... # doctest: +ELLIPSIS + [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] + Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ... + + >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) + ... # doctest: +SKIP + [good, good, medium, bad, bad] + Categories (3, object): [good < medium < bad] + + >>> pd.qcut(range(5), 4, labels=False) + array([0, 0, 1, 2, 3]) + """ + original = x + x = _preprocess_for_cut(x) + x, dtype = _coerce_to_type(x) + + quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q + + x_np = np.asarray(x) + x_np = x_np[~np.isnan(x_np)] + bins = np.quantile(x_np, quantiles) + + fac, bins = _bins_to_cuts( + x, + bins, + labels=labels, + precision=precision, + include_lowest=True, + dtype=dtype, + duplicates=duplicates, + ) + + return _postprocess_for_cut(fac, bins, retbins, dtype, original) + + +def _bins_to_cuts( + x, + bins: np.ndarray, + right: bool = True, + labels=None, + precision: int = 3, + include_lowest: bool = False, + dtype: DtypeObj | None = None, + duplicates: str = "raise", + ordered: bool = True, +): + if not ordered and labels is None: + raise ValueError("'labels' must be provided if 'ordered = False'") + + if duplicates not in ["raise", "drop"]: + raise ValueError( + "invalid value for 'duplicates' parameter, valid options are: raise, drop" + ) + + if isinstance(bins, IntervalIndex): + # we have a fast-path here + ids = bins.get_indexer(x) + cat_dtype = CategoricalDtype(bins, ordered=True) + result = Categorical.from_codes(ids, dtype=cat_dtype, validate=False) + return result, bins + + unique_bins = algos.unique(bins) + if len(unique_bins) < len(bins) and len(bins) != 2: + if duplicates == "raise": + raise ValueError( + f"Bin edges must be unique: {repr(bins)}.\n" + f"You can drop duplicate edges by setting the 'duplicates' kwarg" + ) + bins = unique_bins + + side: Literal["left", "right"] = "left" if right else "right" + ids = ensure_platform_int(bins.searchsorted(x, side=side)) + + if include_lowest: + ids[np.asarray(x) == bins[0]] = 1 + + na_mask = isna(x) | (ids == len(bins)) | (ids == 0) + has_nas = na_mask.any() + + if labels is not False: + if not (labels is None or is_list_like(labels)): + raise ValueError( + "Bin labels must either be False, None or passed in as a " + "list-like argument" + ) + + if labels is None: + labels = _format_labels( + bins, precision, right=right, include_lowest=include_lowest, dtype=dtype + ) + elif ordered and len(set(labels)) != len(labels): + raise ValueError( + "labels must be unique if ordered=True; pass ordered=False " + "for duplicate labels" + ) + else: + if len(labels) != len(bins) - 1: + raise ValueError( + "Bin labels must be one fewer than the number of bin edges" + ) + + if not isinstance(getattr(labels, "dtype", None), CategoricalDtype): + labels = Categorical( + labels, + categories=labels if len(set(labels)) == len(labels) else None, + ordered=ordered, + ) + # TODO: handle mismatch between categorical label order and pandas.cut order. + np.putmask(ids, na_mask, 0) + result = algos.take_nd(labels, ids - 1) + + else: + result = ids - 1 + if has_nas: + result = result.astype(np.float64) + np.putmask(result, na_mask, np.nan) + + return result, bins + + +def _coerce_to_type(x): + """ + if the passed data is of datetime/timedelta, bool or nullable int type, + this method converts it to numeric so that cut or qcut method can + handle it + """ + dtype: DtypeObj | None = None + + if isinstance(x.dtype, DatetimeTZDtype): + dtype = x.dtype + elif lib.is_np_dtype(x.dtype, "M"): + x = to_datetime(x).astype("datetime64[ns]", copy=False) + dtype = np.dtype("datetime64[ns]") + elif lib.is_np_dtype(x.dtype, "m"): + x = to_timedelta(x) + dtype = np.dtype("timedelta64[ns]") + elif is_bool_dtype(x.dtype): + # GH 20303 + x = x.astype(np.int64) + # To support cut and qcut for IntegerArray we convert to float dtype. + # Will properly support in the future. + # https://github.com/pandas-dev/pandas/pull/31290 + # https://github.com/pandas-dev/pandas/issues/31389 + elif isinstance(x.dtype, ExtensionDtype) and is_numeric_dtype(x.dtype): + x = x.to_numpy(dtype=np.float64, na_value=np.nan) + + if dtype is not None: + # GH 19768: force NaT to NaN during integer conversion + x = np.where(x.notna(), x.view(np.int64), np.nan) + + return x, dtype + + +def _convert_bin_to_numeric_type(bins, dtype: DtypeObj | None): + """ + if the passed bin is of datetime/timedelta type, + this method converts it to integer + + Parameters + ---------- + bins : list-like of bins + dtype : dtype of data + + Raises + ------ + ValueError if bins are not of a compat dtype to dtype + """ + bins_dtype = infer_dtype(bins, skipna=False) + if lib.is_np_dtype(dtype, "m"): + if bins_dtype in ["timedelta", "timedelta64"]: + bins = to_timedelta(bins).view(np.int64) + else: + raise ValueError("bins must be of timedelta64 dtype") + elif lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype): + if bins_dtype in ["datetime", "datetime64"]: + bins = to_datetime(bins) + if lib.is_np_dtype(bins.dtype, "M"): + # As of 2.0, to_datetime may give non-nano, so we need to convert + # here until the rest of this file recognizes non-nano + bins = bins.astype("datetime64[ns]", copy=False) + bins = bins.view(np.int64) + else: + raise ValueError("bins must be of datetime64 dtype") + + return bins + + +def _convert_bin_to_datelike_type(bins, dtype: DtypeObj | None): + """ + Convert bins to a DatetimeIndex or TimedeltaIndex if the original dtype is + datelike + + Parameters + ---------- + bins : list-like of bins + dtype : dtype of data + + Returns + ------- + bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is + datelike + """ + if isinstance(dtype, DatetimeTZDtype): + bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz) + elif lib.is_np_dtype(dtype, "mM"): + bins = Index(bins.astype(np.int64), dtype=dtype) + return bins + + +def _format_labels( + bins, + precision: int, + right: bool = True, + include_lowest: bool = False, + dtype: DtypeObj | None = None, +): + """based on the dtype, return our labels""" + closed: IntervalLeftRight = "right" if right else "left" + + formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta] + + if isinstance(dtype, DatetimeTZDtype): + formatter = lambda x: Timestamp(x, tz=dtype.tz) + adjust = lambda x: x - Timedelta("1ns") + elif lib.is_np_dtype(dtype, "M"): + formatter = Timestamp + adjust = lambda x: x - Timedelta("1ns") + elif lib.is_np_dtype(dtype, "m"): + formatter = Timedelta + adjust = lambda x: x - Timedelta("1ns") + else: + precision = _infer_precision(precision, bins) + formatter = lambda x: _round_frac(x, precision) + adjust = lambda x: x - 10 ** (-precision) + + breaks = [formatter(b) for b in bins] + if right and include_lowest: + # adjust lhs of first interval by precision to account for being right closed + breaks[0] = adjust(breaks[0]) + + return IntervalIndex.from_breaks(breaks, closed=closed) + + +def _preprocess_for_cut(x): + """ + handles preprocessing for cut where we convert passed + input to array, strip the index information and store it + separately + """ + # Check that the passed array is a Pandas or Numpy object + # We don't want to strip away a Pandas data-type here (e.g. datetimetz) + ndim = getattr(x, "ndim", None) + if ndim is None: + x = np.asarray(x) + if x.ndim != 1: + raise ValueError("Input array must be 1 dimensional") + + return x + + +def _postprocess_for_cut(fac, bins, retbins: bool, dtype: DtypeObj | None, original): + """ + handles post processing for the cut method where + we combine the index information if the originally passed + datatype was a series + """ + if isinstance(original, ABCSeries): + fac = original._constructor(fac, index=original.index, name=original.name) + + if not retbins: + return fac + + bins = _convert_bin_to_datelike_type(bins, dtype) + + return fac, bins + + +def _round_frac(x, precision: int): + """ + Round the fractional part of the given number + """ + if not np.isfinite(x) or x == 0: + return x + else: + frac, whole = np.modf(x) + if whole == 0: + digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision + else: + digits = precision + return np.around(x, digits) + + +def _infer_precision(base_precision: int, bins) -> int: + """ + Infer an appropriate precision for _round_frac + """ + for precision in range(base_precision, 20): + levels = np.asarray([_round_frac(b, precision) for b in bins]) + if algos.unique(levels).size == bins.size: + return precision + return base_precision # default diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/util.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/util.py new file mode 100644 index 00000000..bcd51e09 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/reshape/util.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.common import is_list_like + +if TYPE_CHECKING: + from pandas._typing import NumpyIndexT + + +def cartesian_product(X) -> list[np.ndarray]: + """ + Numpy version of itertools.product. + Sometimes faster (for large inputs)... + + Parameters + ---------- + X : list-like of list-likes + + Returns + ------- + product : list of ndarrays + + Examples + -------- + >>> cartesian_product([list('ABC'), [1, 2]]) + [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype=' NumpyIndexT: + """ + Index compat for np.tile. + + Notes + ----- + Does not support multi-dimensional `num`. + """ + if isinstance(arr, np.ndarray): + return np.tile(arr, num) + + # Otherwise we have an Index + taker = np.tile(np.arange(len(arr)), num) + return arr.take(taker) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/roperator.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/roperator.py new file mode 100644 index 00000000..2f320f4e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/roperator.py @@ -0,0 +1,62 @@ +""" +Reversed Operations not available in the stdlib operator module. +Defining these instead of using lambdas allows us to reference them by name. +""" +from __future__ import annotations + +import operator + + +def radd(left, right): + return right + left + + +def rsub(left, right): + return right - left + + +def rmul(left, right): + return right * left + + +def rdiv(left, right): + return right / left + + +def rtruediv(left, right): + return right / left + + +def rfloordiv(left, right): + return right // left + + +def rmod(left, right): + # check if right is a string as % is the string + # formatting operation; this is a TypeError + # otherwise perform the op + if isinstance(right, str): + typ = type(left).__name__ + raise TypeError(f"{typ} cannot perform the operation mod") + + return right % left + + +def rdivmod(left, right): + return divmod(right, left) + + +def rpow(left, right): + return right**left + + +def rand_(left, right): + return operator.and_(right, left) + + +def ror_(left, right): + return operator.or_(right, left) + + +def rxor(left, right): + return operator.xor(right, left) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/sample.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/sample.py new file mode 100644 index 00000000..eebbed35 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/sample.py @@ -0,0 +1,154 @@ +""" +Module containing utilities for NDFrame.sample() and .GroupBy.sample() +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import AxisInt + + from pandas.core.generic import NDFrame + + +def preprocess_weights(obj: NDFrame, weights, axis: AxisInt) -> np.ndarray: + """ + Process and validate the `weights` argument to `NDFrame.sample` and + `.GroupBy.sample`. + + Returns `weights` as an ndarray[np.float64], validated except for normalizing + weights (because that must be done groupwise in groupby sampling). + """ + # If a series, align with frame + if isinstance(weights, ABCSeries): + weights = weights.reindex(obj.axes[axis]) + + # Strings acceptable if a dataframe and axis = 0 + if isinstance(weights, str): + if isinstance(obj, ABCDataFrame): + if axis == 0: + try: + weights = obj[weights] + except KeyError as err: + raise KeyError( + "String passed to weights not a valid column" + ) from err + else: + raise ValueError( + "Strings can only be passed to " + "weights when sampling from rows on " + "a DataFrame" + ) + else: + raise ValueError( + "Strings cannot be passed as weights when sampling from a Series." + ) + + if isinstance(obj, ABCSeries): + func = obj._constructor + else: + func = obj._constructor_sliced + + weights = func(weights, dtype="float64")._values + + if len(weights) != obj.shape[axis]: + raise ValueError("Weights and axis to be sampled must be of same length") + + if lib.has_infs(weights): + raise ValueError("weight vector may not include `inf` values") + + if (weights < 0).any(): + raise ValueError("weight vector many not include negative values") + + missing = np.isnan(weights) + if missing.any(): + # Don't modify weights in place + weights = weights.copy() + weights[missing] = 0 + return weights + + +def process_sampling_size( + n: int | None, frac: float | None, replace: bool +) -> int | None: + """ + Process and validate the `n` and `frac` arguments to `NDFrame.sample` and + `.GroupBy.sample`. + + Returns None if `frac` should be used (variable sampling sizes), otherwise returns + the constant sampling size. + """ + # If no frac or n, default to n=1. + if n is None and frac is None: + n = 1 + elif n is not None and frac is not None: + raise ValueError("Please enter a value for `frac` OR `n`, not both") + elif n is not None: + if n < 0: + raise ValueError( + "A negative number of rows requested. Please provide `n` >= 0." + ) + if n % 1 != 0: + raise ValueError("Only integers accepted as `n` values") + else: + assert frac is not None # for mypy + if frac > 1 and not replace: + raise ValueError( + "Replace has to be set to `True` when " + "upsampling the population `frac` > 1." + ) + if frac < 0: + raise ValueError( + "A negative number of rows requested. Please provide `frac` >= 0." + ) + + return n + + +def sample( + obj_len: int, + size: int, + replace: bool, + weights: np.ndarray | None, + random_state: np.random.RandomState | np.random.Generator, +) -> np.ndarray: + """ + Randomly sample `size` indices in `np.arange(obj_len)` + + Parameters + ---------- + obj_len : int + The length of the indices being considered + size : int + The number of values to choose + replace : bool + Allow or disallow sampling of the same row more than once. + weights : np.ndarray[np.float64] or None + If None, equal probability weighting, otherwise weights according + to the vector normalized + random_state: np.random.RandomState or np.random.Generator + State used for the random sampling + + Returns + ------- + np.ndarray[np.intp] + """ + if weights is not None: + weight_sum = weights.sum() + if weight_sum != 0: + weights = weights / weight_sum + else: + raise ValueError("Invalid weights: weights sum to zero") + + return random_state.choice(obj_len, size=size, replace=replace, p=weights).astype( + np.intp, copy=False + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/series.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/series.py new file mode 100644 index 00000000..623fb333 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/series.py @@ -0,0 +1,6307 @@ +""" +Data structure for 1-dimensional cross-sectional and time series data +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, +) +import operator +import sys +from textwrap import dedent +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings +import weakref + +import numpy as np + +from pandas._config import ( + get_option, + using_copy_on_write, +) + +from pandas._libs import ( + lib, + properties, + reshape, +) +from pandas._libs.lib import is_range_indexer +from pandas.compat import PYPY +from pandas.compat._constants import REF_COUNT +from pandas.compat._optional import import_optional_dependency +from pandas.compat.numpy import function as nv +from pandas.errors import ( + ChainedAssignmentError, + InvalidIndexError, + _chained_assignment_method_msg, + _chained_assignment_msg, +) +from pandas.util._decorators import ( + Appender, + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import ( + validate_ascending, + validate_bool_kwarg, + validate_percentile, +) + +from pandas.core.dtypes.astype import astype_is_view +from pandas.core.dtypes.cast import ( + LossySetitemError, + convert_dtypes, + maybe_box_native, + maybe_cast_pointwise_result, +) +from pandas.core.dtypes.common import ( + is_dict_like, + is_integer, + is_iterator, + is_list_like, + is_object_dtype, + is_scalar, + pandas_dtype, + validate_all_hashable, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ABCDataFrame +from pandas.core.dtypes.inference import is_hashable +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, + notna, + remove_na_arraylike, +) + +from pandas.core import ( + algorithms, + base, + common as com, + missing, + nanops, + ops, + roperator, +) +from pandas.core.accessor import CachedAccessor +from pandas.core.apply import SeriesApply +from pandas.core.arrays import ExtensionArray +from pandas.core.arrays.categorical import CategoricalAccessor +from pandas.core.arrays.sparse import SparseAccessor +from pandas.core.construction import ( + extract_array, + sanitize_array, +) +from pandas.core.generic import ( + NDFrame, + make_doc, +) +from pandas.core.indexers import ( + disallow_ndim_indexing, + unpack_1tuple, +) +from pandas.core.indexes.accessors import CombinedDatetimelikeProperties +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + MultiIndex, + PeriodIndex, + default_index, + ensure_index, +) +import pandas.core.indexes.base as ibase +from pandas.core.indexes.multi import maybe_droplevels +from pandas.core.indexing import ( + check_bool_indexer, + check_dict_or_set_indexers, +) +from pandas.core.internals import ( + SingleArrayManager, + SingleBlockManager, +) +from pandas.core.methods import selectn +from pandas.core.shared_docs import _shared_docs +from pandas.core.sorting import ( + ensure_key_mapped, + nargsort, +) +from pandas.core.strings.accessor import StringMethods +from pandas.core.tools.datetimes import to_datetime + +import pandas.io.formats.format as fmt +from pandas.io.formats.info import ( + INFO_DOCSTRING, + SeriesInfo, + series_sub_kwargs, +) +import pandas.plotting + +if TYPE_CHECKING: + from pandas._libs.internals import BlockValuesRefs + from pandas._typing import ( + AggFuncType, + AnyAll, + AnyArrayLike, + ArrayLike, + Axis, + AxisInt, + CorrelationMethod, + DropKeep, + Dtype, + DtypeBackend, + DtypeObj, + FilePath, + IgnoreRaise, + IndexKeyFunc, + IndexLabel, + Level, + NaPosition, + NumpySorter, + NumpyValueArrayLike, + QuantileInterpolation, + ReindexMethod, + Renamer, + Scalar, + Self, + SingleManager, + SortKind, + StorageOptions, + Suffixes, + ValueKeyFunc, + WriteBuffer, + npt, + ) + + from pandas.core.frame import DataFrame + from pandas.core.groupby.generic import SeriesGroupBy + +__all__ = ["Series"] + +_shared_doc_kwargs = { + "axes": "index", + "klass": "Series", + "axes_single_arg": "{0 or 'index'}", + "axis": """axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame.""", + "inplace": """inplace : bool, default False + If True, performs operation inplace and returns None.""", + "unique": "np.ndarray", + "duplicated": "Series", + "optional_by": "", + "optional_reindex": """ +index : array-like, optional + New labels for the index. Preferably an Index object to avoid + duplicating data. +axis : int or str, optional + Unused.""", +} + + +def _coerce_method(converter): + """ + Install the scalar coercion methods. + """ + + def wrapper(self): + if len(self) == 1: + warnings.warn( + f"Calling {converter.__name__} on a single element Series is " + "deprecated and will raise a TypeError in the future. " + f"Use {converter.__name__}(ser.iloc[0]) instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return converter(self.iloc[0]) + raise TypeError(f"cannot convert the series to {converter}") + + wrapper.__name__ = f"__{converter.__name__}__" + return wrapper + + +# ---------------------------------------------------------------------- +# Series class + + +# error: Cannot override final attribute "ndim" (previously declared in base +# class "NDFrame") +# error: Cannot override final attribute "size" (previously declared in base +# class "NDFrame") +# definition in base class "NDFrame" +class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] + """ + One-dimensional ndarray with axis labels (including time series). + + Labels need not be unique but must be a hashable type. The object + supports both integer- and label-based indexing and provides a host of + methods for performing operations involving the index. Statistical + methods from ndarray have been overridden to automatically exclude + missing data (currently represented as NaN). + + Operations between Series (+, -, /, \\*, \\*\\*) align values based on their + associated index values-- they need not be the same length. The result + index will be the sorted union of the two indexes. + + Parameters + ---------- + data : array-like, Iterable, dict, or scalar value + Contains data stored in Series. If data is a dict, argument order is + maintained. + index : array-like or Index (1d) + Values must be hashable and have the same length as `data`. + Non-unique index values are allowed. Will default to + RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like + and index is None, then the keys in the data are used as the index. If the + index is not None, the resulting Series is reindexed with the index values. + dtype : str, numpy.dtype, or ExtensionDtype, optional + Data type for the output Series. If not specified, this will be + inferred from `data`. + See the :ref:`user guide ` for more usages. + name : Hashable, default None + The name to give to the Series. + copy : bool, default False + Copy input data. Only affects Series or 1d ndarray input. See examples. + + Notes + ----- + Please reference the :ref:`User Guide ` for more information. + + Examples + -------- + Constructing Series from a dictionary with an Index specified + + >>> d = {'a': 1, 'b': 2, 'c': 3} + >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) + >>> ser + a 1 + b 2 + c 3 + dtype: int64 + + The keys of the dictionary match with the Index values, hence the Index + values have no effect. + + >>> d = {'a': 1, 'b': 2, 'c': 3} + >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) + >>> ser + x NaN + y NaN + z NaN + dtype: float64 + + Note that the Index is first build with the keys from the dictionary. + After this the Series is reindexed with the given Index values, hence we + get all NaN as a result. + + Constructing Series from a list with `copy=False`. + + >>> r = [1, 2] + >>> ser = pd.Series(r, copy=False) + >>> ser.iloc[0] = 999 + >>> r + [1, 2] + >>> ser + 0 999 + 1 2 + dtype: int64 + + Due to input data type the Series has a `copy` of + the original data even though `copy=False`, so + the data is unchanged. + + Constructing Series from a 1d ndarray with `copy=False`. + + >>> r = np.array([1, 2]) + >>> ser = pd.Series(r, copy=False) + >>> ser.iloc[0] = 999 + >>> r + array([999, 2]) + >>> ser + 0 999 + 1 2 + dtype: int64 + + Due to input data type the Series has a `view` on + the original data, so + the data is changed as well. + """ + + _typ = "series" + _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) + + _name: Hashable + _metadata: list[str] = ["_name"] + _internal_names_set = {"index", "name"} | NDFrame._internal_names_set + _accessors = {"dt", "cat", "str", "sparse"} + _hidden_attrs = ( + base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) + ) + + # similar to __array_priority__, positions Series after DataFrame + # but before Index and ExtensionArray. Should NOT be overridden by subclasses. + __pandas_priority__ = 3000 + + # Override cache_readonly bc Series is mutable + # error: Incompatible types in assignment (expression has type "property", + # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") + hasnans = property( # type: ignore[assignment] + # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" + base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] + doc=base.IndexOpsMixin.hasnans.__doc__, + ) + _mgr: SingleManager + + # ---------------------------------------------------------------------- + # Constructors + + def __init__( + self, + data=None, + index=None, + dtype: Dtype | None = None, + name=None, + copy: bool | None = None, + fastpath: bool = False, + ) -> None: + if ( + isinstance(data, (SingleBlockManager, SingleArrayManager)) + and index is None + and dtype is None + and (copy is False or copy is None) + ): + if using_copy_on_write(): + data = data.copy(deep=False) + # GH#33357 called with just the SingleBlockManager + NDFrame.__init__(self, data) + if fastpath: + # e.g. from _box_col_values, skip validation of name + object.__setattr__(self, "_name", name) + else: + self.name = name + return + + if isinstance(data, (ExtensionArray, np.ndarray)): + if copy is not False and using_copy_on_write(): + if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): + data = data.copy() + if copy is None: + copy = False + + # we are called internally, so short-circuit + if fastpath: + # data is a ndarray, index is defined + if not isinstance(data, (SingleBlockManager, SingleArrayManager)): + manager = get_option("mode.data_manager") + if manager == "block": + data = SingleBlockManager.from_array(data, index) + elif manager == "array": + data = SingleArrayManager.from_array(data, index) + elif using_copy_on_write() and not copy: + data = data.copy(deep=False) + if copy: + data = data.copy() + # skips validation of the name + object.__setattr__(self, "_name", name) + NDFrame.__init__(self, data) + return + + if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: + data = data.copy(deep=False) + + name = ibase.maybe_extract_name(name, data, type(self)) + + if index is not None: + index = ensure_index(index) + + if dtype is not None: + dtype = self._validate_dtype(dtype) + + if data is None: + index = index if index is not None else default_index(0) + if len(index) or dtype is not None: + data = na_value_for_dtype(pandas_dtype(dtype), compat=False) + else: + data = [] + + if isinstance(data, MultiIndex): + raise NotImplementedError( + "initializing a Series from a MultiIndex is not supported" + ) + + refs = None + if isinstance(data, Index): + if dtype is not None: + data = data.astype(dtype, copy=False) + + if using_copy_on_write(): + refs = data._references + data = data._values + else: + # GH#24096 we need to ensure the index remains immutable + data = data._values.copy() + copy = False + + elif isinstance(data, np.ndarray): + if len(data.dtype): + # GH#13296 we are dealing with a compound dtype, which + # should be treated as 2D + raise ValueError( + "Cannot construct a Series from an ndarray with " + "compound dtype. Use DataFrame instead." + ) + elif isinstance(data, Series): + if index is None: + index = data.index + data = data._mgr.copy(deep=False) + else: + data = data.reindex(index, copy=copy) + copy = False + data = data._mgr + elif is_dict_like(data): + data, index = self._init_dict(data, index, dtype) + dtype = None + copy = False + elif isinstance(data, (SingleBlockManager, SingleArrayManager)): + if index is None: + index = data.index + elif not data.index.equals(index) or copy: + # GH#19275 SingleBlockManager input should only be called + # internally + raise AssertionError( + "Cannot pass both SingleBlockManager " + "`data` argument and a different " + "`index` argument. `copy` must be False." + ) + + elif isinstance(data, ExtensionArray): + pass + else: + data = com.maybe_iterable_to_list(data) + if is_list_like(data) and not len(data) and dtype is None: + # GH 29405: Pre-2.0, this defaulted to float. + dtype = np.dtype(object) + + if index is None: + if not is_list_like(data): + data = [data] + index = default_index(len(data)) + elif is_list_like(data): + com.require_length_match(data, index) + + # create/copy the manager + if isinstance(data, (SingleBlockManager, SingleArrayManager)): + if dtype is not None: + data = data.astype(dtype=dtype, errors="ignore", copy=copy) + elif copy: + data = data.copy() + else: + data = sanitize_array(data, index, dtype, copy) + + manager = get_option("mode.data_manager") + if manager == "block": + data = SingleBlockManager.from_array(data, index, refs=refs) + elif manager == "array": + data = SingleArrayManager.from_array(data, index) + + NDFrame.__init__(self, data) + self.name = name + self._set_axis(0, index) + + def _init_dict( + self, data, index: Index | None = None, dtype: DtypeObj | None = None + ): + """ + Derive the "_mgr" and "index" attributes of a new Series from a + dictionary input. + + Parameters + ---------- + data : dict or dict-like + Data used to populate the new Series. + index : Index or None, default None + Index for the new Series: if None, use dict keys. + dtype : np.dtype, ExtensionDtype, or None, default None + The dtype for the new Series: if None, infer from data. + + Returns + ------- + _data : BlockManager for the new Series + index : index for the new Series + """ + keys: Index | tuple + + # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] + # raises KeyError), so we iterate the entire dict, and align + if data: + # GH:34717, issue was using zip to extract key and values from data. + # using generators in effects the performance. + # Below is the new way of extracting the keys and values + + keys = tuple(data.keys()) + values = list(data.values()) # Generating list of values- faster way + elif index is not None: + # fastpath for Series(data=None). Just use broadcasting a scalar + # instead of reindexing. + if len(index) or dtype is not None: + values = na_value_for_dtype(pandas_dtype(dtype), compat=False) + else: + values = [] + keys = index + else: + keys, values = default_index(0), [] + + # Input is now list-like, so rely on "standard" construction: + s = Series(values, index=keys, dtype=dtype) + + # Now we just make sure the order is respected, if any + if data and index is not None: + s = s.reindex(index, copy=False) + return s._mgr, s.index + + # ---------------------------------------------------------------------- + + @property + def _constructor(self) -> Callable[..., Series]: + return Series + + def _constructor_from_mgr(self, mgr, axes): + if self._constructor is Series: + # we are pandas.Series (or a subclass that doesn't override _constructor) + ser = Series._from_mgr(mgr, axes=axes) + ser._name = None # caller is responsible for setting real name + return ser + else: + assert axes is mgr.axes + return self._constructor(mgr) + + @property + def _constructor_expanddim(self) -> Callable[..., DataFrame]: + """ + Used when a manipulation result has one higher dimension as the + original, such as Series.to_frame() + """ + from pandas.core.frame import DataFrame + + return DataFrame + + def _expanddim_from_mgr(self, mgr, axes) -> DataFrame: + # https://github.com/pandas-dev/pandas/pull/52132#issuecomment-1481491828 + # This is a short-term implementation that will be replaced + # with self._constructor_expanddim._constructor_from_mgr(...) + # once downstream packages (geopandas) have had a chance to implement + # their own overrides. + # error: "Callable[..., DataFrame]" has no attribute "_from_mgr" [attr-defined] + from pandas import DataFrame + + return DataFrame._from_mgr(mgr, axes=mgr.axes) + + def _constructor_expanddim_from_mgr(self, mgr, axes): + from pandas.core.frame import DataFrame + + if self._constructor_expanddim is DataFrame: + return self._expanddim_from_mgr(mgr, axes) + assert axes is mgr.axes + return self._constructor_expanddim(mgr) + + # types + @property + def _can_hold_na(self) -> bool: + return self._mgr._can_hold_na + + # ndarray compatibility + @property + def dtype(self) -> DtypeObj: + """ + Return the dtype object of the underlying data. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.dtype + dtype('int64') + """ + return self._mgr.dtype + + @property + def dtypes(self) -> DtypeObj: + """ + Return the dtype object of the underlying data. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.dtypes + dtype('int64') + """ + # DataFrame compatibility + return self.dtype + + @property + def name(self) -> Hashable: + """ + Return the name of the Series. + + The name of a Series becomes its index or column name if it is used + to form a DataFrame. It is also used whenever displaying the Series + using the interpreter. + + Returns + ------- + label (hashable object) + The name of the Series, also the column name if part of a DataFrame. + + See Also + -------- + Series.rename : Sets the Series name when given a scalar input. + Index.name : Corresponding Index property. + + Examples + -------- + The Series name can be set initially when calling the constructor. + + >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') + >>> s + 0 1 + 1 2 + 2 3 + Name: Numbers, dtype: int64 + >>> s.name = "Integers" + >>> s + 0 1 + 1 2 + 2 3 + Name: Integers, dtype: int64 + + The name of a Series within a DataFrame is its column name. + + >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], + ... columns=["Odd Numbers", "Even Numbers"]) + >>> df + Odd Numbers Even Numbers + 0 1 2 + 1 3 4 + 2 5 6 + >>> df["Even Numbers"].name + 'Even Numbers' + """ + return self._name + + @name.setter + def name(self, value: Hashable) -> None: + validate_all_hashable(value, error_name=f"{type(self).__name__}.name") + object.__setattr__(self, "_name", value) + + @property + def values(self): + """ + Return Series as ndarray or ndarray-like depending on the dtype. + + .. warning:: + + We recommend using :attr:`Series.array` or + :meth:`Series.to_numpy`, depending on whether you need + a reference to the underlying data or a NumPy array. + + Returns + ------- + numpy.ndarray or ndarray-like + + See Also + -------- + Series.array : Reference to the underlying data. + Series.to_numpy : A NumPy array representing the underlying data. + + Examples + -------- + >>> pd.Series([1, 2, 3]).values + array([1, 2, 3]) + + >>> pd.Series(list('aabc')).values + array(['a', 'a', 'b', 'c'], dtype=object) + + >>> pd.Series(list('aabc')).astype('category').values + ['a', 'a', 'b', 'c'] + Categories (3, object): ['a', 'b', 'c'] + + Timezone aware datetime data is converted to UTC: + + >>> pd.Series(pd.date_range('20130101', periods=3, + ... tz='US/Eastern')).values + array(['2013-01-01T05:00:00.000000000', + '2013-01-02T05:00:00.000000000', + '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') + """ + return self._mgr.external_values() + + @property + def _values(self): + """ + Return the internal repr of this data (defined by Block.interval_values). + This are the values as stored in the Block (ndarray or ExtensionArray + depending on the Block class), with datetime64[ns] and timedelta64[ns] + wrapped in ExtensionArrays to match Index._values behavior. + + Differs from the public ``.values`` for certain data types, because of + historical backwards compatibility of the public attribute (e.g. period + returns object ndarray and datetimetz a datetime64[ns] ndarray for + ``.values`` while it returns an ExtensionArray for ``._values`` in those + cases). + + Differs from ``.array`` in that this still returns the numpy array if + the Block is backed by a numpy array (except for datetime64 and + timedelta64 dtypes), while ``.array`` ensures to always return an + ExtensionArray. + + Overview: + + dtype | values | _values | array | + ----------- | ------------- | ------------- | --------------------- | + Numeric | ndarray | ndarray | NumpyExtensionArray | + Category | Categorical | Categorical | Categorical | + dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | + dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | + td64[ns] | ndarray[m8ns] | TimedeltaArray| TimedeltaArray | + Period | ndarray[obj] | PeriodArray | PeriodArray | + Nullable | EA | EA | EA | + + """ + return self._mgr.internal_values() + + @property + def _references(self) -> BlockValuesRefs | None: + if isinstance(self._mgr, SingleArrayManager): + return None + return self._mgr._block.refs + + # error: Decorated property not supported + @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc] + @property + def array(self) -> ExtensionArray: + return self._mgr.array_values() + + # ops + def ravel(self, order: str = "C") -> ArrayLike: + """ + Return the flattened underlying data as an ndarray or ExtensionArray. + + Returns + ------- + numpy.ndarray or ExtensionArray + Flattened data of the Series. + + See Also + -------- + numpy.ndarray.ravel : Return a flattened array. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.ravel() + array([1, 2, 3]) + """ + arr = self._values.ravel(order=order) + if isinstance(arr, np.ndarray) and using_copy_on_write(): + arr.flags.writeable = False + return arr + + def __len__(self) -> int: + """ + Return the length of the Series. + """ + return len(self._mgr) + + def view(self, dtype: Dtype | None = None) -> Series: + """ + Create a new view of the Series. + + This function will return a new Series with a view of the same + underlying values in memory, optionally reinterpreted with a new data + type. The new data type must preserve the same size in bytes as to not + cause index misalignment. + + Parameters + ---------- + dtype : data type + Data type object or one of their string representations. + + Returns + ------- + Series + A new Series object as a view of the same data in memory. + + See Also + -------- + numpy.ndarray.view : Equivalent numpy function to create a new view of + the same data in memory. + + Notes + ----- + Series are instantiated with ``dtype=float64`` by default. While + ``numpy.ndarray.view()`` will return a view with the same data type as + the original array, ``Series.view()`` (without specified dtype) + will try using ``float64`` and may fail if the original data type size + in bytes is not the same. + + Examples + -------- + >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') + >>> s + 0 -2 + 1 -1 + 2 0 + 3 1 + 4 2 + dtype: int8 + + The 8 bit signed integer representation of `-1` is `0b11111111`, but + the same bytes represent 255 if read as an 8 bit unsigned integer: + + >>> us = s.view('uint8') + >>> us + 0 254 + 1 255 + 2 0 + 3 1 + 4 2 + dtype: uint8 + + The views share the same underlying values: + + >>> us[0] = 128 + >>> s + 0 -128 + 1 -1 + 2 0 + 3 1 + 4 2 + dtype: int8 + """ + # self.array instead of self._values so we piggyback on NumpyExtensionArray + # implementation + res_values = self.array.view(dtype) + res_ser = self._constructor(res_values, index=self.index, copy=False) + if isinstance(res_ser._mgr, SingleBlockManager): + blk = res_ser._mgr._block + blk.refs = cast("BlockValuesRefs", self._references) + blk.refs.add_reference(blk) # type: ignore[arg-type] + return res_ser.__finalize__(self, method="view") + + # ---------------------------------------------------------------------- + # NDArray Compat + def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: + """ + Return the values as a NumPy array. + + Users should not call this directly. Rather, it is invoked by + :func:`numpy.array` and :func:`numpy.asarray`. + + Parameters + ---------- + dtype : str or numpy.dtype, optional + The dtype to use for the resulting NumPy array. By default, + the dtype is inferred from the data. + + Returns + ------- + numpy.ndarray + The values in the series converted to a :class:`numpy.ndarray` + with the specified `dtype`. + + See Also + -------- + array : Create a new array from data. + Series.array : Zero-copy view to the array backing the Series. + Series.to_numpy : Series method for similar behavior. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]) + >>> np.asarray(ser) + array([1, 2, 3]) + + For timezone-aware data, the timezones may be retained with + ``dtype='object'`` + + >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + >>> np.asarray(tzser, dtype="object") + array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), + Timestamp('2000-01-02 00:00:00+0100', tz='CET')], + dtype=object) + + Or the values may be localized to UTC and the tzinfo discarded with + ``dtype='datetime64[ns]'`` + + >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS + array(['1999-12-31T23:00:00.000000000', ...], + dtype='datetime64[ns]') + """ + values = self._values + arr = np.asarray(values, dtype=dtype) + if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): + arr = arr.view() + arr.flags.writeable = False + return arr + + # ---------------------------------------------------------------------- + + def __column_consortium_standard__(self, *, api_version: str | None = None) -> Any: + """ + Provide entry point to the Consortium DataFrame Standard API. + + This is developed and maintained outside of pandas. + Please report any issues to https://github.com/data-apis/dataframe-api-compat. + """ + dataframe_api_compat = import_optional_dependency("dataframe_api_compat") + return ( + dataframe_api_compat.pandas_standard.convert_to_standard_compliant_column( + self, api_version=api_version + ) + ) + + # ---------------------------------------------------------------------- + # Unary Methods + + # coercion + __float__ = _coerce_method(float) + __int__ = _coerce_method(int) + + # ---------------------------------------------------------------------- + + # indexers + @property + def axes(self) -> list[Index]: + """ + Return a list of the row axis labels. + """ + return [self.index] + + # ---------------------------------------------------------------------- + # Indexing Methods + + def _ixs(self, i: int, axis: AxisInt = 0) -> Any: + """ + Return the i-th value or values in the Series by location. + + Parameters + ---------- + i : int + + Returns + ------- + scalar (int) or Series (slice, sequence) + """ + return self._values[i] + + def _slice(self, slobj: slice, axis: AxisInt = 0) -> Series: + # axis kwarg is retained for compat with NDFrame method + # _slice is *always* positional + mgr = self._mgr.get_slice(slobj, axis=axis) + out = self._constructor(mgr, fastpath=True) + return out.__finalize__(self) + + def __getitem__(self, key): + check_dict_or_set_indexers(key) + key = com.apply_if_callable(key, self) + + if key is Ellipsis: + return self + + key_is_scalar = is_scalar(key) + if isinstance(key, (list, tuple)): + key = unpack_1tuple(key) + + if is_integer(key) and self.index._should_fallback_to_positional: + warnings.warn( + # GH#50617 + "Series.__getitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To access " + "a value by position, use `ser.iloc[pos]`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._values[key] + + elif key_is_scalar: + return self._get_value(key) + + # Convert generator to list before going through hashable part + # (We will iterate through the generator there to check for slices) + if is_iterator(key): + key = list(key) + + if is_hashable(key) and not isinstance(key, slice): + # Otherwise index.get_value will raise InvalidIndexError + try: + # For labels that don't resolve as scalars like tuples and frozensets + result = self._get_value(key) + + return result + + except (KeyError, TypeError, InvalidIndexError): + # InvalidIndexError for e.g. generator + # see test_series_getitem_corner_generator + if isinstance(key, tuple) and isinstance(self.index, MultiIndex): + # We still have the corner case where a tuple is a key + # in the first level of our MultiIndex + return self._get_values_tuple(key) + + if isinstance(key, slice): + # Do slice check before somewhat-costly is_bool_indexer + return self._getitem_slice(key) + + if com.is_bool_indexer(key): + key = check_bool_indexer(self.index, key) + key = np.asarray(key, dtype=bool) + return self._get_rows_with_mask(key) + + return self._get_with(key) + + def _get_with(self, key): + # other: fancy integer or otherwise + if isinstance(key, ABCDataFrame): + raise TypeError( + "Indexing a Series with DataFrame is not " + "supported, use the appropriate DataFrame column" + ) + elif isinstance(key, tuple): + return self._get_values_tuple(key) + + elif not is_list_like(key): + # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 + return self.loc[key] + + if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): + key = list(key) + + key_type = lib.infer_dtype(key, skipna=False) + + # Note: The key_type == "boolean" case should be caught by the + # com.is_bool_indexer check in __getitem__ + if key_type == "integer": + # We need to decide whether to treat this as a positional indexer + # (i.e. self.iloc) or label-based (i.e. self.loc) + if not self.index._should_fallback_to_positional: + return self.loc[key] + else: + warnings.warn( + # GH#50617 + "Series.__getitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To access " + "a value by position, use `ser.iloc[pos]`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.iloc[key] + + # handle the dup indexing case GH#4246 + return self.loc[key] + + def _get_values_tuple(self, key: tuple): + # mpl hackaround + if com.any_none(*key): + # mpl compat if we look up e.g. ser[:, np.newaxis]; + # see tests.series.timeseries.test_mpl_compat_hack + # the asarray is needed to avoid returning a 2D DatetimeArray + result = np.asarray(self._values[key]) + disallow_ndim_indexing(result) + return result + + if not isinstance(self.index, MultiIndex): + raise KeyError("key of type tuple not found and not a MultiIndex") + + # If key is contained, would have returned by now + indexer, new_index = self.index.get_loc_level(key) + new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) + if using_copy_on_write() and isinstance(indexer, slice): + new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] + return new_ser.__finalize__(self) + + def _get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Series: + new_mgr = self._mgr.get_rows_with_mask(indexer) + return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) + + def _get_value(self, label, takeable: bool = False): + """ + Quickly retrieve single value at passed index label. + + Parameters + ---------- + label : object + takeable : interpret the index as indexers, default False + + Returns + ------- + scalar value + """ + if takeable: + return self._values[label] + + # Similar to Index.get_value, but we do not fall back to positional + loc = self.index.get_loc(label) + + if is_integer(loc): + return self._values[loc] + + if isinstance(self.index, MultiIndex): + mi = self.index + new_values = self._values[loc] + if len(new_values) == 1 and mi.nlevels == 1: + # If more than one level left, we can not return a scalar + return new_values[0] + + new_index = mi[loc] + new_index = maybe_droplevels(new_index, label) + new_ser = self._constructor( + new_values, index=new_index, name=self.name, copy=False + ) + if using_copy_on_write() and isinstance(loc, slice): + new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] + return new_ser.__finalize__(self) + + else: + return self.iloc[loc] + + def __setitem__(self, key, value) -> None: + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= 3: + warnings.warn( + _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 + ) + + check_dict_or_set_indexers(key) + key = com.apply_if_callable(key, self) + cacher_needs_updating = self._check_is_chained_assignment_possible() + + if key is Ellipsis: + key = slice(None) + + if isinstance(key, slice): + indexer = self.index._convert_slice_indexer(key, kind="getitem") + return self._set_values(indexer, value) + + try: + self._set_with_engine(key, value) + except KeyError: + # We have a scalar (or for MultiIndex or object-dtype, scalar-like) + # key that is not present in self.index. + if is_integer(key): + if not self.index._should_fallback_to_positional: + # GH#33469 + self.loc[key] = value + else: + # positional setter + # can't use _mgr.setitem_inplace yet bc could have *both* + # KeyError and then ValueError, xref GH#45070 + warnings.warn( + # GH#50617 + "Series.__setitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To set " + "a value by position, use `ser.iloc[pos] = value`", + FutureWarning, + stacklevel=find_stack_level(), + ) + self._set_values(key, value) + else: + # GH#12862 adding a new key to the Series + self.loc[key] = value + + except (TypeError, ValueError, LossySetitemError): + # The key was OK, but we cannot set the value losslessly + indexer = self.index.get_loc(key) + self._set_values(indexer, value) + + except InvalidIndexError as err: + if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): + # cases with MultiIndex don't get here bc they raise KeyError + # e.g. test_basic_getitem_setitem_corner + raise KeyError( + "key of type tuple not found and not a MultiIndex" + ) from err + + if com.is_bool_indexer(key): + key = check_bool_indexer(self.index, key) + key = np.asarray(key, dtype=bool) + + if ( + is_list_like(value) + and len(value) != len(self) + and not isinstance(value, Series) + and not is_object_dtype(self.dtype) + ): + # Series will be reindexed to have matching length inside + # _where call below + # GH#44265 + indexer = key.nonzero()[0] + self._set_values(indexer, value) + return + + # otherwise with listlike other we interpret series[mask] = other + # as series[mask] = other[mask] + try: + self._where(~key, value, inplace=True) + except InvalidIndexError: + # test_where_dups + self.iloc[key] = value + return + + else: + self._set_with(key, value) + + if cacher_needs_updating: + self._maybe_update_cacher(inplace=True) + + def _set_with_engine(self, key, value) -> None: + loc = self.index.get_loc(key) + + # this is equivalent to self._values[key] = value + self._mgr.setitem_inplace(loc, value) + + def _set_with(self, key, value) -> None: + # We got here via exception-handling off of InvalidIndexError, so + # key should always be listlike at this point. + assert not isinstance(key, tuple) + + if is_iterator(key): + # Without this, the call to infer_dtype will consume the generator + key = list(key) + + if not self.index._should_fallback_to_positional: + # Regardless of the key type, we're treating it as labels + self._set_labels(key, value) + + else: + # Note: key_type == "boolean" should not occur because that + # should be caught by the is_bool_indexer check in __setitem__ + key_type = lib.infer_dtype(key, skipna=False) + + if key_type == "integer": + warnings.warn( + # GH#50617 + "Series.__setitem__ treating keys as positions is deprecated. " + "In a future version, integer keys will always be treated " + "as labels (consistent with DataFrame behavior). To set " + "a value by position, use `ser.iloc[pos] = value`", + FutureWarning, + stacklevel=find_stack_level(), + ) + self._set_values(key, value) + else: + self._set_labels(key, value) + + def _set_labels(self, key, value) -> None: + key = com.asarray_tuplesafe(key) + indexer: np.ndarray = self.index.get_indexer(key) + mask = indexer == -1 + if mask.any(): + raise KeyError(f"{key[mask]} not in index") + self._set_values(indexer, value) + + def _set_values(self, key, value) -> None: + if isinstance(key, (Index, Series)): + key = key._values + + self._mgr = self._mgr.setitem(indexer=key, value=value) + self._maybe_update_cacher() + + def _set_value(self, label, value, takeable: bool = False) -> None: + """ + Quickly set single value at passed label. + + If label is not contained, a new object is created with the label + placed at the end of the result index. + + Parameters + ---------- + label : object + Partial indexing with MultiIndex not allowed. + value : object + Scalar value. + takeable : interpret the index as indexers, default False + """ + if not takeable: + try: + loc = self.index.get_loc(label) + except KeyError: + # set using a non-recursive method + self.loc[label] = value + return + else: + loc = label + + self._set_values(loc, value) + + # ---------------------------------------------------------------------- + # Lookup Caching + + @property + def _is_cached(self) -> bool: + """Return boolean indicating if self is cached or not.""" + return getattr(self, "_cacher", None) is not None + + def _get_cacher(self): + """return my cacher or None""" + cacher = getattr(self, "_cacher", None) + if cacher is not None: + cacher = cacher[1]() + return cacher + + def _reset_cacher(self) -> None: + """ + Reset the cacher. + """ + if hasattr(self, "_cacher"): + del self._cacher + + def _set_as_cached(self, item, cacher) -> None: + """ + Set the _cacher attribute on the calling object with a weakref to + cacher. + """ + if using_copy_on_write(): + return + self._cacher = (item, weakref.ref(cacher)) + + def _clear_item_cache(self) -> None: + # no-op for Series + pass + + def _check_is_chained_assignment_possible(self) -> bool: + """ + See NDFrame._check_is_chained_assignment_possible.__doc__ + """ + if self._is_view and self._is_cached: + ref = self._get_cacher() + if ref is not None and ref._is_mixed_type: + self._check_setitem_copy(t="referent", force=True) + return True + return super()._check_is_chained_assignment_possible() + + def _maybe_update_cacher( + self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False + ) -> None: + """ + See NDFrame._maybe_update_cacher.__doc__ + """ + # for CoW, we never want to update the parent DataFrame cache + # if the Series changed, but don't keep track of any cacher + if using_copy_on_write(): + return + cacher = getattr(self, "_cacher", None) + if cacher is not None: + ref: DataFrame = cacher[1]() + + # we are trying to reference a dead referent, hence + # a copy + if ref is None: + del self._cacher + elif len(self) == len(ref) and self.name in ref.columns: + # GH#42530 self.name must be in ref.columns + # to ensure column still in dataframe + # otherwise, either self or ref has swapped in new arrays + ref._maybe_cache_changed(cacher[0], self, inplace=inplace) + else: + # GH#33675 we have swapped in a new array, so parent + # reference to self is now invalid + ref._item_cache.pop(cacher[0], None) + + super()._maybe_update_cacher( + clear=clear, verify_is_copy=verify_is_copy, inplace=inplace + ) + + # ---------------------------------------------------------------------- + # Unsorted + + def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: + """ + Repeat elements of a Series. + + Returns a new Series where each element of the current Series + is repeated consecutively a given number of times. + + Parameters + ---------- + repeats : int or array of ints + The number of repetitions for each element. This should be a + non-negative integer. Repeating 0 times will return an empty + Series. + axis : None + Unused. Parameter needed for compatibility with DataFrame. + + Returns + ------- + Series + Newly created Series with repeated elements. + + See Also + -------- + Index.repeat : Equivalent function for Index. + numpy.repeat : Similar method for :class:`numpy.ndarray`. + + Examples + -------- + >>> s = pd.Series(['a', 'b', 'c']) + >>> s + 0 a + 1 b + 2 c + dtype: object + >>> s.repeat(2) + 0 a + 0 a + 1 b + 1 b + 2 c + 2 c + dtype: object + >>> s.repeat([1, 2, 3]) + 0 a + 1 b + 1 b + 2 c + 2 c + 2 c + dtype: object + """ + nv.validate_repeat((), {"axis": axis}) + new_index = self.index.repeat(repeats) + new_values = self._values.repeat(repeats) + return self._constructor(new_values, index=new_index, copy=False).__finalize__( + self, method="repeat" + ) + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: Literal[False] = ..., + name: Level = ..., + inplace: Literal[False] = ..., + allow_duplicates: bool = ..., + ) -> DataFrame: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: Literal[True], + name: Level = ..., + inplace: Literal[False] = ..., + allow_duplicates: bool = ..., + ) -> Series: + ... + + @overload + def reset_index( + self, + level: IndexLabel = ..., + *, + drop: bool = ..., + name: Level = ..., + inplace: Literal[True], + allow_duplicates: bool = ..., + ) -> None: + ... + + def reset_index( + self, + level: IndexLabel | None = None, + *, + drop: bool = False, + name: Level = lib.no_default, + inplace: bool = False, + allow_duplicates: bool = False, + ) -> DataFrame | Series | None: + """ + Generate a new DataFrame or Series with the index reset. + + This is useful when the index needs to be treated as a column, or + when the index is meaningless and needs to be reset to the default + before another operation. + + Parameters + ---------- + level : int, str, tuple, or list, default optional + For a Series with a MultiIndex, only remove the specified levels + from the index. Removes all levels by default. + drop : bool, default False + Just reset the index, without inserting it as a column in + the new DataFrame. + name : object, optional + The name to use for the column containing the original Series + values. Uses ``self.name`` by default. This argument is ignored + when `drop` is True. + inplace : bool, default False + Modify the Series in place (do not create a new object). + allow_duplicates : bool, default False + Allow duplicate column labels to be created. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series or DataFrame or None + When `drop` is False (the default), a DataFrame is returned. + The newly created columns will come first in the DataFrame, + followed by the original Series values. + When `drop` is True, a `Series` is returned. + In either case, if ``inplace=True``, no value is returned. + + See Also + -------- + DataFrame.reset_index: Analogous function for DataFrame. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4], name='foo', + ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) + + Generate a DataFrame with default index. + + >>> s.reset_index() + idx foo + 0 a 1 + 1 b 2 + 2 c 3 + 3 d 4 + + To specify the name of the new column use `name`. + + >>> s.reset_index(name='values') + idx values + 0 a 1 + 1 b 2 + 2 c 3 + 3 d 4 + + To generate a new Series with the default set `drop` to True. + + >>> s.reset_index(drop=True) + 0 1 + 1 2 + 2 3 + 3 4 + Name: foo, dtype: int64 + + The `level` parameter is interesting for Series with a multi-level + index. + + >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), + ... np.array(['one', 'two', 'one', 'two'])] + >>> s2 = pd.Series( + ... range(4), name='foo', + ... index=pd.MultiIndex.from_arrays(arrays, + ... names=['a', 'b'])) + + To remove a specific level from the Index, use `level`. + + >>> s2.reset_index(level='a') + a foo + b + one bar 0 + two bar 1 + one baz 2 + two baz 3 + + If `level` is not set, all levels are removed from the Index. + + >>> s2.reset_index() + a b foo + 0 bar one 0 + 1 bar two 1 + 2 baz one 2 + 3 baz two 3 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + if drop: + new_index = default_index(len(self)) + if level is not None: + level_list: Sequence[Hashable] + if not isinstance(level, (tuple, list)): + level_list = [level] + else: + level_list = level + level_list = [self.index._get_level_number(lev) for lev in level_list] + if len(level_list) < self.index.nlevels: + new_index = self.index.droplevel(level_list) + + if inplace: + self.index = new_index + elif using_copy_on_write(): + new_ser = self.copy(deep=False) + new_ser.index = new_index + return new_ser.__finalize__(self, method="reset_index") + else: + return self._constructor( + self._values.copy(), index=new_index, copy=False, dtype=self.dtype + ).__finalize__(self, method="reset_index") + elif inplace: + raise TypeError( + "Cannot reset_index inplace on a Series to create a DataFrame" + ) + else: + if name is lib.no_default: + # For backwards compatibility, keep columns as [0] instead of + # [None] when self.name is None + if self.name is None: + name = 0 + else: + name = self.name + + df = self.to_frame(name) + return df.reset_index( + level=level, drop=drop, allow_duplicates=allow_duplicates + ) + return None + + # ---------------------------------------------------------------------- + # Rendering Methods + + def __repr__(self) -> str: + """ + Return a string representation for a particular Series. + """ + # pylint: disable=invalid-repr-returned + repr_params = fmt.get_series_repr_params() + return self.to_string(**repr_params) + + @overload + def to_string( + self, + buf: None = ..., + na_rep: str = ..., + float_format: str | None = ..., + header: bool = ..., + index: bool = ..., + length: bool = ..., + dtype=..., + name=..., + max_rows: int | None = ..., + min_rows: int | None = ..., + ) -> str: + ... + + @overload + def to_string( + self, + buf: FilePath | WriteBuffer[str], + na_rep: str = ..., + float_format: str | None = ..., + header: bool = ..., + index: bool = ..., + length: bool = ..., + dtype=..., + name=..., + max_rows: int | None = ..., + min_rows: int | None = ..., + ) -> None: + ... + + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + na_rep: str = "NaN", + float_format: str | None = None, + header: bool = True, + index: bool = True, + length: bool = False, + dtype: bool = False, + name: bool = False, + max_rows: int | None = None, + min_rows: int | None = None, + ) -> str | None: + """ + Render a string representation of the Series. + + Parameters + ---------- + buf : StringIO-like, optional + Buffer to write to. + na_rep : str, optional + String representation of NaN to use, default 'NaN'. + float_format : one-parameter function, optional + Formatter function to apply to columns' elements if they are + floats, default None. + header : bool, default True + Add the Series header (index name). + index : bool, optional + Add index (row) labels, default True. + length : bool, default False + Add the Series length. + dtype : bool, default False + Add the Series dtype. + name : bool, default False + Add the Series name if not None. + max_rows : int, optional + Maximum number of rows to show before truncating. If None, show + all. + min_rows : int, optional + The number of rows to display in a truncated repr (when number + of rows is above `max_rows`). + + Returns + ------- + str or None + String representation of Series if ``buf=None``, otherwise None. + + Examples + -------- + >>> ser = pd.Series([1, 2, 3]).to_string() + >>> ser + '0 1\\n1 2\\n2 3' + """ + formatter = fmt.SeriesFormatter( + self, + name=name, + length=length, + header=header, + index=index, + dtype=dtype, + na_rep=na_rep, + float_format=float_format, + min_rows=min_rows, + max_rows=max_rows, + ) + result = formatter.to_string() + + # catch contract violations + if not isinstance(result, str): + raise AssertionError( + "result must be of type str, type " + f"of result is {repr(type(result).__name__)}" + ) + + if buf is None: + return result + else: + if hasattr(buf, "write"): + buf.write(result) + else: + with open(buf, "w", encoding="utf-8") as f: + f.write(result) + return None + + @doc( + klass=_shared_doc_kwargs["klass"], + storage_options=_shared_docs["storage_options"], + examples=dedent( + """Examples + -------- + >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") + >>> print(s.to_markdown()) + | | animal | + |---:|:---------| + | 0 | elk | + | 1 | pig | + | 2 | dog | + | 3 | quetzal | + + Output markdown with a tabulate option. + + >>> print(s.to_markdown(tablefmt="grid")) + +----+----------+ + | | animal | + +====+==========+ + | 0 | elk | + +----+----------+ + | 1 | pig | + +----+----------+ + | 2 | dog | + +----+----------+ + | 3 | quetzal | + +----+----------+""" + ), + ) + def to_markdown( + self, + buf: IO[str] | None = None, + mode: str = "wt", + index: bool = True, + storage_options: StorageOptions | None = None, + **kwargs, + ) -> str | None: + """ + Print {klass} in Markdown-friendly format. + + Parameters + ---------- + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. + mode : str, optional + Mode in which file is opened, "wt" by default. + index : bool, optional, default True + Add index (row) labels. + + {storage_options} + + .. versionadded:: 1.2.0 + + **kwargs + These parameters will be passed to `tabulate \ + `_. + + Returns + ------- + str + {klass} in Markdown-friendly format. + + Notes + ----- + Requires the `tabulate `_ package. + + {examples} + """ + return self.to_frame().to_markdown( + buf, mode, index, storage_options=storage_options, **kwargs + ) + + # ---------------------------------------------------------------------- + + def items(self) -> Iterable[tuple[Hashable, Any]]: + """ + Lazily iterate over (index, value) tuples. + + This method returns an iterable tuple (index, value). This is + convenient if you want to create a lazy iterator. + + Returns + ------- + iterable + Iterable of tuples containing the (index, value) pairs from a + Series. + + See Also + -------- + DataFrame.items : Iterate over (column name, Series) pairs. + DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. + + Examples + -------- + >>> s = pd.Series(['A', 'B', 'C']) + >>> for index, value in s.items(): + ... print(f"Index : {index}, Value : {value}") + Index : 0, Value : A + Index : 1, Value : B + Index : 2, Value : C + """ + return zip(iter(self.index), iter(self)) + + # ---------------------------------------------------------------------- + # Misc public methods + + def keys(self) -> Index: + """ + Return alias for index. + + Returns + ------- + Index + Index of the Series. + + Examples + -------- + >>> s = pd.Series([1, 2, 3], index=[0, 1, 2]) + >>> s.keys() + Index([0, 1, 2], dtype='int64') + """ + return self.index + + def to_dict(self, into: type[dict] = dict) -> dict: + """ + Convert Series to {label -> value} dict or dict-like object. + + Parameters + ---------- + into : class, default dict + The collections.abc.Mapping subclass to use as the return + object. Can be the actual class or an empty + instance of the mapping type you want. If you want a + collections.defaultdict, you must pass it initialized. + + Returns + ------- + collections.abc.Mapping + Key-value representation of Series. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.to_dict() + {0: 1, 1: 2, 2: 3, 3: 4} + >>> from collections import OrderedDict, defaultdict + >>> s.to_dict(OrderedDict) + OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) + >>> dd = defaultdict(list) + >>> s.to_dict(dd) + defaultdict(, {0: 1, 1: 2, 2: 3, 3: 4}) + """ + # GH16122 + into_c = com.standardize_mapping(into) + + if is_object_dtype(self.dtype) or isinstance(self.dtype, ExtensionDtype): + return into_c((k, maybe_box_native(v)) for k, v in self.items()) + else: + # Not an object dtype => all types will be the same so let the default + # indexer return native python type + return into_c(self.items()) + + def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: + """ + Convert Series to DataFrame. + + Parameters + ---------- + name : object, optional + The passed name should substitute for the series name (if it has + one). + + Returns + ------- + DataFrame + DataFrame representation of Series. + + Examples + -------- + >>> s = pd.Series(["a", "b", "c"], + ... name="vals") + >>> s.to_frame() + vals + 0 a + 1 b + 2 c + """ + columns: Index + if name is lib.no_default: + name = self.name + if name is None: + # default to [0], same as we would get with DataFrame(self) + columns = default_index(1) + else: + columns = Index([name]) + else: + columns = Index([name]) + + mgr = self._mgr.to_2d_mgr(columns) + df = self._constructor_expanddim_from_mgr(mgr, axes=mgr.axes) + return df.__finalize__(self, method="to_frame") + + def _set_name( + self, name, inplace: bool = False, deep: bool | None = None + ) -> Series: + """ + Set the Series name. + + Parameters + ---------- + name : str + inplace : bool + Whether to modify `self` directly or return a copy. + deep : bool|None, default None + Whether to do a deep copy, a shallow copy, or Copy on Write(None) + """ + inplace = validate_bool_kwarg(inplace, "inplace") + ser = self if inplace else self.copy(deep and not using_copy_on_write()) + ser.name = name + return ser + + @Appender( + dedent( + """ + Examples + -------- + >>> ser = pd.Series([390., 350., 30., 20.], + ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], + ... name="Max Speed") + >>> ser + Falcon 390.0 + Falcon 350.0 + Parrot 30.0 + Parrot 20.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(["a", "b", "a", "b"]).mean() + a 210.0 + b 185.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).mean() + Falcon 370.0 + Parrot 25.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(ser > 100).mean() + Max Speed + False 25.0 + True 370.0 + Name: Max Speed, dtype: float64 + + **Grouping by Indexes** + + We can groupby different levels of a hierarchical index + using the `level` parameter: + + >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], + ... ['Captive', 'Wild', 'Captive', 'Wild']] + >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) + >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") + >>> ser + Animal Type + Falcon Captive 390.0 + Wild 350.0 + Parrot Captive 30.0 + Wild 20.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).mean() + Animal + Falcon 370.0 + Parrot 25.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level="Type").mean() + Type + Captive 210.0 + Wild 185.0 + Name: Max Speed, dtype: float64 + + We can also choose to include `NA` in group keys or not by defining + `dropna` parameter, the default setting is `True`. + + >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) + >>> ser.groupby(level=0).sum() + a 3 + b 3 + dtype: int64 + + >>> ser.groupby(level=0, dropna=False).sum() + a 3 + b 3 + NaN 3 + dtype: int64 + + >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] + >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") + >>> ser.groupby(["a", "b", "a", np.nan]).mean() + a 210.0 + b 350.0 + Name: Max Speed, dtype: float64 + + >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() + a 210.0 + b 350.0 + NaN 20.0 + Name: Max Speed, dtype: float64 + """ + ) + ) + @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) + def groupby( + self, + by=None, + axis: Axis = 0, + level: IndexLabel | None = None, + as_index: bool = True, + sort: bool = True, + group_keys: bool = True, + observed: bool | lib.NoDefault = lib.no_default, + dropna: bool = True, + ) -> SeriesGroupBy: + from pandas.core.groupby.generic import SeriesGroupBy + + if level is None and by is None: + raise TypeError("You have to supply one of 'by' and 'level'") + if not as_index: + raise TypeError("as_index=False only valid with DataFrame") + axis = self._get_axis_number(axis) + + return SeriesGroupBy( + obj=self, + keys=by, + axis=axis, + level=level, + as_index=as_index, + sort=sort, + group_keys=group_keys, + observed=observed, + dropna=dropna, + ) + + # ---------------------------------------------------------------------- + # Statistics, overridden ndarray methods + + # TODO: integrate bottleneck + def count(self): + """ + Return number of non-NA/null observations in the Series. + + Returns + ------- + int or Series (if level specified) + Number of non-null values in the Series. + + See Also + -------- + DataFrame.count : Count non-NA cells for each column or row. + + Examples + -------- + >>> s = pd.Series([0.0, 1.0, np.nan]) + >>> s.count() + 2 + """ + return notna(self._values).sum().astype("int64") + + def mode(self, dropna: bool = True) -> Series: + """ + Return the mode(s) of the Series. + + The mode is the value that appears most often. There can be multiple modes. + + Always returns Series even if only one value is returned. + + Parameters + ---------- + dropna : bool, default True + Don't consider counts of NaN/NaT. + + Returns + ------- + Series + Modes of the Series in sorted order. + + Examples + -------- + >>> s = pd.Series([2, 4, 2, 2, 4, None]) + >>> s.mode() + 0 2.0 + dtype: float64 + + More than one mode: + + >>> s = pd.Series([2, 4, 8, 2, 4, None]) + >>> s.mode() + 0 2.0 + 1 4.0 + dtype: float64 + + With and without considering null value: + + >>> s = pd.Series([2, 4, None, None, 4, None]) + >>> s.mode(dropna=False) + 0 NaN + dtype: float64 + >>> s = pd.Series([2, 4, None, None, 4, None]) + >>> s.mode() + 0 4.0 + dtype: float64 + """ + # TODO: Add option for bins like value_counts() + values = self._values + if isinstance(values, np.ndarray): + res_values = algorithms.mode(values, dropna=dropna) + else: + res_values = values._mode(dropna=dropna) + + # Ensure index is type stable (should always use int index) + return self._constructor( + res_values, + index=range(len(res_values)), + name=self.name, + copy=False, + dtype=self.dtype, + ).__finalize__(self, method="mode") + + def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation + """ + Return unique values of Series object. + + Uniques are returned in order of appearance. Hash table-based unique, + therefore does NOT sort. + + Returns + ------- + ndarray or ExtensionArray + The unique values returned as a NumPy array. See Notes. + + See Also + -------- + Series.drop_duplicates : Return Series with duplicate values removed. + unique : Top-level unique method for any 1-d array-like object. + Index.unique : Return Index with unique values from an Index object. + + Notes + ----- + Returns the unique values as a NumPy array. In case of an + extension-array backed Series, a new + :class:`~api.extensions.ExtensionArray` of that type with just + the unique values is returned. This includes + + * Categorical + * Period + * Datetime with Timezone + * Datetime without Timezone + * Timedelta + * Interval + * Sparse + * IntegerNA + + See Examples section. + + Examples + -------- + >>> pd.Series([2, 1, 3, 3], name='A').unique() + array([2, 1, 3]) + + >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() + + ['2016-01-01 00:00:00'] + Length: 1, dtype: datetime64[ns] + + >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') + ... for _ in range(3)]).unique() + + ['2016-01-01 00:00:00-05:00'] + Length: 1, dtype: datetime64[ns, US/Eastern] + + An Categorical will return categories in the order of + appearance and with the same dtype. + + >>> pd.Series(pd.Categorical(list('baabc'))).unique() + ['b', 'a', 'c'] + Categories (3, object): ['a', 'b', 'c'] + >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), + ... ordered=True)).unique() + ['b', 'a', 'c'] + Categories (3, object): ['a' < 'b' < 'c'] + """ + return super().unique() + + @overload + def drop_duplicates( + self, + *, + keep: DropKeep = ..., + inplace: Literal[False] = ..., + ignore_index: bool = ..., + ) -> Series: + ... + + @overload + def drop_duplicates( + self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... + ) -> None: + ... + + @overload + def drop_duplicates( + self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... + ) -> Series | None: + ... + + def drop_duplicates( + self, + *, + keep: DropKeep = "first", + inplace: bool = False, + ignore_index: bool = False, + ) -> Series | None: + """ + Return Series with duplicate values removed. + + Parameters + ---------- + keep : {'first', 'last', ``False``}, default 'first' + Method to handle dropping duplicates: + + - 'first' : Drop duplicates except for the first occurrence. + - 'last' : Drop duplicates except for the last occurrence. + - ``False`` : Drop all duplicates. + + inplace : bool, default ``False`` + If ``True``, performs operation inplace and returns None. + + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 2.0.0 + + Returns + ------- + Series or None + Series with duplicates dropped or None if ``inplace=True``. + + See Also + -------- + Index.drop_duplicates : Equivalent method on Index. + DataFrame.drop_duplicates : Equivalent method on DataFrame. + Series.duplicated : Related method on Series, indicating duplicate + Series values. + Series.unique : Return unique values as an array. + + Examples + -------- + Generate a Series with duplicated entries. + + >>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama', 'hippo'], + ... name='animal') + >>> s + 0 llama + 1 cow + 2 llama + 3 beetle + 4 llama + 5 hippo + Name: animal, dtype: object + + With the 'keep' parameter, the selection behaviour of duplicated values + can be changed. The value 'first' keeps the first occurrence for each + set of duplicated entries. The default value of keep is 'first'. + + >>> s.drop_duplicates() + 0 llama + 1 cow + 3 beetle + 5 hippo + Name: animal, dtype: object + + The value 'last' for parameter 'keep' keeps the last occurrence for + each set of duplicated entries. + + >>> s.drop_duplicates(keep='last') + 1 cow + 3 beetle + 4 llama + 5 hippo + Name: animal, dtype: object + + The value ``False`` for parameter 'keep' discards all sets of + duplicated entries. + + >>> s.drop_duplicates(keep=False) + 1 cow + 3 beetle + 5 hippo + Name: animal, dtype: object + """ + inplace = validate_bool_kwarg(inplace, "inplace") + result = super().drop_duplicates(keep=keep) + + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + self._update_inplace(result) + return None + else: + return result + + def duplicated(self, keep: DropKeep = "first") -> Series: + """ + Indicate duplicate Series values. + + Duplicated values are indicated as ``True`` values in the resulting + Series. Either all duplicates, all except the first or all except the + last occurrence of duplicates can be indicated. + + Parameters + ---------- + keep : {'first', 'last', False}, default 'first' + Method to handle dropping duplicates: + + - 'first' : Mark duplicates as ``True`` except for the first + occurrence. + - 'last' : Mark duplicates as ``True`` except for the last + occurrence. + - ``False`` : Mark all duplicates as ``True``. + + Returns + ------- + Series[bool] + Series indicating whether each value has occurred in the + preceding values. + + See Also + -------- + Index.duplicated : Equivalent method on pandas.Index. + DataFrame.duplicated : Equivalent method on pandas.DataFrame. + Series.drop_duplicates : Remove duplicate values from Series. + + Examples + -------- + By default, for each set of duplicated values, the first occurrence is + set on False and all others on True: + + >>> animals = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama']) + >>> animals.duplicated() + 0 False + 1 False + 2 True + 3 False + 4 True + dtype: bool + + which is equivalent to + + >>> animals.duplicated(keep='first') + 0 False + 1 False + 2 True + 3 False + 4 True + dtype: bool + + By using 'last', the last occurrence of each set of duplicated values + is set on False and all others on True: + + >>> animals.duplicated(keep='last') + 0 True + 1 False + 2 True + 3 False + 4 False + dtype: bool + + By setting keep on ``False``, all duplicates are True: + + >>> animals.duplicated(keep=False) + 0 True + 1 False + 2 True + 3 False + 4 True + dtype: bool + """ + res = self._duplicated(keep=keep) + result = self._constructor(res, index=self.index, copy=False) + return result.__finalize__(self, method="duplicated") + + def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: + """ + Return the row label of the minimum value. + + If multiple values equal the minimum, the first row label with that + value is returned. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + skipna : bool, default True + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + Index + Label of the minimum value. + + Raises + ------ + ValueError + If the Series is empty. + + See Also + -------- + numpy.argmin : Return indices of the minimum values + along the given axis. + DataFrame.idxmin : Return index of first occurrence of minimum + over requested axis. + Series.idxmax : Return index *label* of the first occurrence + of maximum of values. + + Notes + ----- + This method is the Series version of ``ndarray.argmin``. This method + returns the label of the minimum, while ``ndarray.argmin`` returns + the position. To get the position, use ``series.values.argmin()``. + + Examples + -------- + >>> s = pd.Series(data=[1, None, 4, 1], + ... index=['A', 'B', 'C', 'D']) + >>> s + A 1.0 + B NaN + C 4.0 + D 1.0 + dtype: float64 + + >>> s.idxmin() + 'A' + + If `skipna` is False and there is an NA value in the data, + the function returns ``nan``. + + >>> s.idxmin(skipna=False) + nan + """ + axis = self._get_axis_number(axis) + with warnings.catch_warnings(): + # TODO(3.0): this catching/filtering can be removed + # ignore warning produced by argmin since we will issue a different + # warning for idxmin + warnings.simplefilter("ignore") + i = self.argmin(axis, skipna, *args, **kwargs) + + if i == -1: + # GH#43587 give correct NA value for Index. + warnings.warn( + f"The behavior of {type(self).__name__}.idxmin with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.index._na_value + return self.index[i] + + def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: + """ + Return the row label of the maximum value. + + If multiple values equal the maximum, the first row label with that + value is returned. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + skipna : bool, default True + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + Index + Label of the maximum value. + + Raises + ------ + ValueError + If the Series is empty. + + See Also + -------- + numpy.argmax : Return indices of the maximum values + along the given axis. + DataFrame.idxmax : Return index of first occurrence of maximum + over requested axis. + Series.idxmin : Return index *label* of the first occurrence + of minimum of values. + + Notes + ----- + This method is the Series version of ``ndarray.argmax``. This method + returns the label of the maximum, while ``ndarray.argmax`` returns + the position. To get the position, use ``series.values.argmax()``. + + Examples + -------- + >>> s = pd.Series(data=[1, None, 4, 3, 4], + ... index=['A', 'B', 'C', 'D', 'E']) + >>> s + A 1.0 + B NaN + C 4.0 + D 3.0 + E 4.0 + dtype: float64 + + >>> s.idxmax() + 'C' + + If `skipna` is False and there is an NA value in the data, + the function returns ``nan``. + + >>> s.idxmax(skipna=False) + nan + """ + axis = self._get_axis_number(axis) + with warnings.catch_warnings(): + # TODO(3.0): this catching/filtering can be removed + # ignore warning produced by argmax since we will issue a different + # warning for argmax + warnings.simplefilter("ignore") + i = self.argmax(axis, skipna, *args, **kwargs) + + if i == -1: + # GH#43587 give correct NA value for Index. + warnings.warn( + f"The behavior of {type(self).__name__}.idxmax with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.index._na_value + return self.index[i] + + def round(self, decimals: int = 0, *args, **kwargs) -> Series: + """ + Round each value in a Series to the given number of decimals. + + Parameters + ---------- + decimals : int, default 0 + Number of decimal places to round to. If decimals is negative, + it specifies the number of positions to the left of the decimal point. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. + + Returns + ------- + Series + Rounded values of the Series. + + See Also + -------- + numpy.around : Round values of an np.array. + DataFrame.round : Round values of a DataFrame. + + Examples + -------- + >>> s = pd.Series([0.1, 1.3, 2.7]) + >>> s.round() + 0 0.0 + 1 1.0 + 2 3.0 + dtype: float64 + """ + nv.validate_round(args, kwargs) + result = self._values.round(decimals) + result = self._constructor(result, index=self.index, copy=False).__finalize__( + self, method="round" + ) + + return result + + @overload + def quantile( + self, q: float = ..., interpolation: QuantileInterpolation = ... + ) -> float: + ... + + @overload + def quantile( + self, + q: Sequence[float] | AnyArrayLike, + interpolation: QuantileInterpolation = ..., + ) -> Series: + ... + + @overload + def quantile( + self, + q: float | Sequence[float] | AnyArrayLike = ..., + interpolation: QuantileInterpolation = ..., + ) -> float | Series: + ... + + def quantile( + self, + q: float | Sequence[float] | AnyArrayLike = 0.5, + interpolation: QuantileInterpolation = "linear", + ) -> float | Series: + """ + Return value at the given quantile. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + The quantile(s) to compute, which can lie in range: 0 <= q <= 1. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + + Returns + ------- + float or Series + If ``q`` is an array, a Series will be returned where the + index is ``q`` and the values are the quantiles, otherwise + a float will be returned. + + See Also + -------- + core.window.Rolling.quantile : Calculate the rolling quantile. + numpy.percentile : Returns the q-th percentile(s) of the array elements. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.quantile(.5) + 2.5 + >>> s.quantile([.25, .5, .75]) + 0.25 1.75 + 0.50 2.50 + 0.75 3.25 + dtype: float64 + """ + validate_percentile(q) + + # We dispatch to DataFrame so that core.internals only has to worry + # about 2D cases. + df = self.to_frame() + + result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) + if result.ndim == 2: + result = result.iloc[:, 0] + + if is_list_like(q): + result.name = self.name + idx = Index(q, dtype=np.float64) + return self._constructor(result, index=idx, name=self.name) + else: + # scalar + return result.iloc[0] + + def corr( + self, + other: Series, + method: CorrelationMethod = "pearson", + min_periods: int | None = None, + ) -> float: + """ + Compute correlation with `other` Series, excluding missing values. + + The two `Series` objects are not required to be the same length and will be + aligned internally before the correlation function is applied. + + Parameters + ---------- + other : Series + Series with which to compute the correlation. + method : {'pearson', 'kendall', 'spearman'} or callable + Method used to compute correlation: + + - pearson : Standard correlation coefficient + - kendall : Kendall Tau correlation coefficient + - spearman : Spearman rank correlation + - callable: Callable with input two 1d ndarrays and returning a float. + + .. warning:: + Note that the returned matrix from corr will have 1 along the + diagonals and will be symmetric regardless of the callable's + behavior. + min_periods : int, optional + Minimum number of observations needed to have a valid result. + + Returns + ------- + float + Correlation with other. + + See Also + -------- + DataFrame.corr : Compute pairwise correlation between columns. + DataFrame.corrwith : Compute pairwise correlation with another + DataFrame or Series. + + Notes + ----- + Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. + + * `Pearson correlation coefficient `_ + * `Kendall rank correlation coefficient `_ + * `Spearman's rank correlation coefficient `_ + + Automatic data alignment: as with all pandas operations, automatic data alignment is performed for this method. + ``corr()`` automatically considers values with matching indices. + + Examples + -------- + >>> def histogram_intersection(a, b): + ... v = np.minimum(a, b).sum().round(decimals=1) + ... return v + >>> s1 = pd.Series([.2, .0, .6, .2]) + >>> s2 = pd.Series([.3, .6, .0, .1]) + >>> s1.corr(s2, method=histogram_intersection) + 0.3 + + Pandas auto-aligns the values with matching indices + + >>> s1 = pd.Series([1, 2, 3], index=[0, 1, 2]) + >>> s2 = pd.Series([1, 2, 3], index=[2, 1, 0]) + >>> s1.corr(s2) + -1.0 + """ # noqa: E501 + this, other = self.align(other, join="inner", copy=False) + if len(this) == 0: + return np.nan + + this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False) + other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False) + + if method in ["pearson", "spearman", "kendall"] or callable(method): + return nanops.nancorr( + this_values, other_values, method=method, min_periods=min_periods + ) + + raise ValueError( + "method must be either 'pearson', " + "'spearman', 'kendall', or a callable, " + f"'{method}' was supplied" + ) + + def cov( + self, + other: Series, + min_periods: int | None = None, + ddof: int | None = 1, + ) -> float: + """ + Compute covariance with Series, excluding missing values. + + The two `Series` objects are not required to be the same length and + will be aligned internally before the covariance is calculated. + + Parameters + ---------- + other : Series + Series with which to compute the covariance. + min_periods : int, optional + Minimum number of observations needed to have a valid result. + ddof : int, default 1 + Delta degrees of freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + + Returns + ------- + float + Covariance between Series and other normalized by N-1 + (unbiased estimator). + + See Also + -------- + DataFrame.cov : Compute pairwise covariance of columns. + + Examples + -------- + >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) + >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) + >>> s1.cov(s2) + -0.01685762652715874 + """ + this, other = self.align(other, join="inner", copy=False) + if len(this) == 0: + return np.nan + this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False) + other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False) + return nanops.nancov( + this_values, other_values, min_periods=min_periods, ddof=ddof + ) + + @doc( + klass="Series", + extra_params="", + other_klass="DataFrame", + examples=dedent( + """ + Difference with previous row + + >>> s = pd.Series([1, 1, 2, 3, 5, 8]) + >>> s.diff() + 0 NaN + 1 0.0 + 2 1.0 + 3 1.0 + 4 2.0 + 5 3.0 + dtype: float64 + + Difference with 3rd previous row + + >>> s.diff(periods=3) + 0 NaN + 1 NaN + 2 NaN + 3 2.0 + 4 4.0 + 5 6.0 + dtype: float64 + + Difference with following row + + >>> s.diff(periods=-1) + 0 0.0 + 1 -1.0 + 2 -1.0 + 3 -2.0 + 4 -3.0 + 5 NaN + dtype: float64 + + Overflow in input dtype + + >>> s = pd.Series([1, 0], dtype=np.uint8) + >>> s.diff() + 0 NaN + 1 255.0 + dtype: float64""" + ), + ) + def diff(self, periods: int = 1) -> Series: + """ + First discrete difference of element. + + Calculates the difference of a {klass} element compared with another + element in the {klass} (default is element in previous row). + + Parameters + ---------- + periods : int, default 1 + Periods to shift for calculating difference, accepts negative + values. + {extra_params} + Returns + ------- + {klass} + First differences of the Series. + + See Also + -------- + {klass}.pct_change: Percent change over given number of periods. + {klass}.shift: Shift index by desired number of periods with an + optional time freq. + {other_klass}.diff: First discrete difference of object. + + Notes + ----- + For boolean dtypes, this uses :meth:`operator.xor` rather than + :meth:`operator.sub`. + The result is calculated according to current dtype in {klass}, + however dtype of the result is always float64. + + Examples + -------- + {examples} + """ + result = algorithms.diff(self._values, periods) + return self._constructor(result, index=self.index, copy=False).__finalize__( + self, method="diff" + ) + + def autocorr(self, lag: int = 1) -> float: + """ + Compute the lag-N autocorrelation. + + This method computes the Pearson correlation between + the Series and its shifted self. + + Parameters + ---------- + lag : int, default 1 + Number of lags to apply before performing autocorrelation. + + Returns + ------- + float + The Pearson correlation between self and self.shift(lag). + + See Also + -------- + Series.corr : Compute the correlation between two Series. + Series.shift : Shift index by desired number of periods. + DataFrame.corr : Compute pairwise correlation of columns. + DataFrame.corrwith : Compute pairwise correlation between rows or + columns of two DataFrame objects. + + Notes + ----- + If the Pearson correlation is not well defined return 'NaN'. + + Examples + -------- + >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) + >>> s.autocorr() # doctest: +ELLIPSIS + 0.10355... + >>> s.autocorr(lag=2) # doctest: +ELLIPSIS + -0.99999... + + If the Pearson correlation is not well defined, then 'NaN' is returned. + + >>> s = pd.Series([1, 0, 0, 0]) + >>> s.autocorr() + nan + """ + return self.corr(cast(Series, self.shift(lag))) + + def dot(self, other: AnyArrayLike) -> Series | np.ndarray: + """ + Compute the dot product between the Series and the columns of other. + + This method computes the dot product between the Series and another + one, or the Series and each columns of a DataFrame, or the Series and + each columns of an array. + + It can also be called using `self @ other`. + + Parameters + ---------- + other : Series, DataFrame or array-like + The other object to compute the dot product with its columns. + + Returns + ------- + scalar, Series or numpy.ndarray + Return the dot product of the Series and other if other is a + Series, the Series of the dot product of Series and each rows of + other if other is a DataFrame or a numpy.ndarray between the Series + and each columns of the numpy array. + + See Also + -------- + DataFrame.dot: Compute the matrix product with the DataFrame. + Series.mul: Multiplication of series and other, element-wise. + + Notes + ----- + The Series and other has to share the same index if other is a Series + or a DataFrame. + + Examples + -------- + >>> s = pd.Series([0, 1, 2, 3]) + >>> other = pd.Series([-1, 2, -3, 4]) + >>> s.dot(other) + 8 + >>> s @ other + 8 + >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) + >>> s.dot(df) + 0 24 + 1 14 + dtype: int64 + >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) + >>> s.dot(arr) + array([24, 14]) + """ + if isinstance(other, (Series, ABCDataFrame)): + common = self.index.union(other.index) + if len(common) > len(self.index) or len(common) > len(other.index): + raise ValueError("matrices are not aligned") + + left = self.reindex(index=common, copy=False) + right = other.reindex(index=common, copy=False) + lvals = left.values + rvals = right.values + else: + lvals = self.values + rvals = np.asarray(other) + if lvals.shape[0] != rvals.shape[0]: + raise Exception( + f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" + ) + + if isinstance(other, ABCDataFrame): + return self._constructor( + np.dot(lvals, rvals), index=other.columns, copy=False + ).__finalize__(self, method="dot") + elif isinstance(other, Series): + return np.dot(lvals, rvals) + elif isinstance(rvals, np.ndarray): + return np.dot(lvals, rvals) + else: # pragma: no cover + raise TypeError(f"unsupported type: {type(other)}") + + def __matmul__(self, other): + """ + Matrix multiplication using binary `@` operator. + """ + return self.dot(other) + + def __rmatmul__(self, other): + """ + Matrix multiplication using binary `@` operator. + """ + return self.dot(np.transpose(other)) + + @doc(base.IndexOpsMixin.searchsorted, klass="Series") + # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" + def searchsorted( # type: ignore[override] + self, + value: NumpyValueArrayLike | ExtensionArray, + side: Literal["left", "right"] = "left", + sorter: NumpySorter | None = None, + ) -> npt.NDArray[np.intp] | np.intp: + return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) + + # ------------------------------------------------------------------- + # Combination + + def _append( + self, to_append, ignore_index: bool = False, verify_integrity: bool = False + ): + from pandas.core.reshape.concat import concat + + if isinstance(to_append, (list, tuple)): + to_concat = [self] + to_concat.extend(to_append) + else: + to_concat = [self, to_append] + if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): + msg = "to_append should be a Series or list/tuple of Series, got DataFrame" + raise TypeError(msg) + return concat( + to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity + ) + + @doc( + _shared_docs["compare"], + dedent( + """ + Returns + ------- + Series or DataFrame + If axis is 0 or 'index' the result will be a Series. + The resulting index will be a MultiIndex with 'self' and 'other' + stacked alternately at the inner level. + + If axis is 1 or 'columns' the result will be a DataFrame. + It will have two columns namely 'self' and 'other'. + + See Also + -------- + DataFrame.compare : Compare with another DataFrame and show differences. + + Notes + ----- + Matching NaNs will not appear as a difference. + + Examples + -------- + >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) + >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) + + Align the differences on columns + + >>> s1.compare(s2) + self other + 1 b a + 3 d b + + Stack the differences on indices + + >>> s1.compare(s2, align_axis=0) + 1 self b + other a + 3 self d + other b + dtype: object + + Keep all original rows + + >>> s1.compare(s2, keep_shape=True) + self other + 0 NaN NaN + 1 b a + 2 NaN NaN + 3 d b + 4 NaN NaN + + Keep all original rows and also all original values + + >>> s1.compare(s2, keep_shape=True, keep_equal=True) + self other + 0 a a + 1 b a + 2 c c + 3 d b + 4 e e + """ + ), + klass=_shared_doc_kwargs["klass"], + ) + def compare( + self, + other: Series, + align_axis: Axis = 1, + keep_shape: bool = False, + keep_equal: bool = False, + result_names: Suffixes = ("self", "other"), + ) -> DataFrame | Series: + return super().compare( + other=other, + align_axis=align_axis, + keep_shape=keep_shape, + keep_equal=keep_equal, + result_names=result_names, + ) + + def combine( + self, + other: Series | Hashable, + func: Callable[[Hashable, Hashable], Hashable], + fill_value: Hashable | None = None, + ) -> Series: + """ + Combine the Series with a Series or scalar according to `func`. + + Combine the Series and `other` using `func` to perform elementwise + selection for combined Series. + `fill_value` is assumed when value is missing at some index + from one of the two objects being combined. + + Parameters + ---------- + other : Series or scalar + The value(s) to be combined with the `Series`. + func : function + Function that takes two scalars as inputs and returns an element. + fill_value : scalar, optional + The value to assume when an index is missing from + one Series or the other. The default specifies to use the + appropriate NaN value for the underlying dtype of the Series. + + Returns + ------- + Series + The result of combining the Series with the other object. + + See Also + -------- + Series.combine_first : Combine Series values, choosing the calling + Series' values first. + + Examples + -------- + Consider 2 Datasets ``s1`` and ``s2`` containing + highest clocked speeds of different birds. + + >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) + >>> s1 + falcon 330.0 + eagle 160.0 + dtype: float64 + >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) + >>> s2 + falcon 345.0 + eagle 200.0 + duck 30.0 + dtype: float64 + + Now, to combine the two datasets and view the highest speeds + of the birds across the two datasets + + >>> s1.combine(s2, max) + duck NaN + eagle 200.0 + falcon 345.0 + dtype: float64 + + In the previous example, the resulting value for duck is missing, + because the maximum of a NaN and a float is a NaN. + So, in the example, we set ``fill_value=0``, + so the maximum value returned will be the value from some dataset. + + >>> s1.combine(s2, max, fill_value=0) + duck 30.0 + eagle 200.0 + falcon 345.0 + dtype: float64 + """ + if fill_value is None: + fill_value = na_value_for_dtype(self.dtype, compat=False) + + if isinstance(other, Series): + # If other is a Series, result is based on union of Series, + # so do this element by element + new_index = self.index.union(other.index) + new_name = ops.get_op_result_name(self, other) + new_values = np.empty(len(new_index), dtype=object) + with np.errstate(all="ignore"): + for i, idx in enumerate(new_index): + lv = self.get(idx, fill_value) + rv = other.get(idx, fill_value) + new_values[i] = func(lv, rv) + else: + # Assume that other is a scalar, so apply the function for + # each element in the Series + new_index = self.index + new_values = np.empty(len(new_index), dtype=object) + with np.errstate(all="ignore"): + new_values[:] = [func(lv, other) for lv in self._values] + new_name = self.name + + # try_float=False is to match agg_series + npvalues = lib.maybe_convert_objects(new_values, try_float=False) + res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) + return self._constructor(res_values, index=new_index, name=new_name, copy=False) + + def combine_first(self, other) -> Series: + """ + Update null elements with value in the same location in 'other'. + + Combine two Series objects by filling null values in one Series with + non-null values from the other Series. Result index will be the union + of the two indexes. + + Parameters + ---------- + other : Series + The value(s) to be used for filling null values. + + Returns + ------- + Series + The result of combining the provided Series with the other object. + + See Also + -------- + Series.combine : Perform element-wise operation on two Series + using a given function. + + Examples + -------- + >>> s1 = pd.Series([1, np.nan]) + >>> s2 = pd.Series([3, 4, 5]) + >>> s1.combine_first(s2) + 0 1.0 + 1 4.0 + 2 5.0 + dtype: float64 + + Null values still persist if the location of that null value + does not exist in `other` + + >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) + >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) + >>> s1.combine_first(s2) + duck 30.0 + eagle 160.0 + falcon NaN + dtype: float64 + """ + from pandas.core.reshape.concat import concat + + new_index = self.index.union(other.index) + + this = self + # identify the index subset to keep for each series + keep_other = other.index.difference(this.index[notna(this)]) + keep_this = this.index.difference(keep_other) + + this = this.reindex(keep_this, copy=False) + other = other.reindex(keep_other, copy=False) + + if this.dtype.kind == "M" and other.dtype.kind != "M": + other = to_datetime(other) + combined = concat([this, other]) + combined = combined.reindex(new_index, copy=False) + return combined.__finalize__(self, method="combine_first") + + def update(self, other: Series | Sequence | Mapping) -> None: + """ + Modify Series in place using values from passed Series. + + Uses non-NA values from passed Series to make updates. Aligns + on index. + + Parameters + ---------- + other : Series, or object coercible into Series + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.update(pd.Series([4, 5, 6])) + >>> s + 0 4 + 1 5 + 2 6 + dtype: int64 + + >>> s = pd.Series(['a', 'b', 'c']) + >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) + >>> s + 0 d + 1 b + 2 e + dtype: object + + >>> s = pd.Series([1, 2, 3]) + >>> s.update(pd.Series([4, 5, 6, 7, 8])) + >>> s + 0 4 + 1 5 + 2 6 + dtype: int64 + + If ``other`` contains NaNs the corresponding values are not updated + in the original Series. + + >>> s = pd.Series([1, 2, 3]) + >>> s.update(pd.Series([4, np.nan, 6])) + >>> s + 0 4 + 1 2 + 2 6 + dtype: int64 + + ``other`` can also be a non-Series object type + that is coercible into a Series + + >>> s = pd.Series([1, 2, 3]) + >>> s.update([4, np.nan, 6]) + >>> s + 0 4 + 1 2 + 2 6 + dtype: int64 + + >>> s = pd.Series([1, 2, 3]) + >>> s.update({1: 9}) + >>> s + 0 1 + 1 9 + 2 3 + dtype: int64 + """ + if not PYPY and using_copy_on_write(): + if sys.getrefcount(self) <= REF_COUNT: + warnings.warn( + _chained_assignment_method_msg, + ChainedAssignmentError, + stacklevel=2, + ) + + if not isinstance(other, Series): + other = Series(other) + + other = other.reindex_like(self) + mask = notna(other) + + self._mgr = self._mgr.putmask(mask=mask, new=other) + self._maybe_update_cacher() + + # ---------------------------------------------------------------------- + # Reindexing, sorting + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> Series: + ... + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_values( + self, + *, + axis: Axis = ..., + ascending: bool | Sequence[bool] = ..., + inplace: bool = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + ignore_index: bool = ..., + key: ValueKeyFunc = ..., + ) -> Series | None: + ... + + def sort_values( + self, + *, + axis: Axis = 0, + ascending: bool | Sequence[bool] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + ignore_index: bool = False, + key: ValueKeyFunc | None = None, + ) -> Series | None: + """ + Sort by the values. + + Sort a Series in ascending or descending order by some + criterion. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + ascending : bool or list of bools, default True + If True, sort values in ascending order, otherwise descending. + inplace : bool, default False + If True, perform operation in-place. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. 'mergesort' and 'stable' are the only stable algorithms. + na_position : {'first' or 'last'}, default 'last' + Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at + the end. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + If not None, apply the key function to the series values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect a + ``Series`` and return an array-like. + + Returns + ------- + Series or None + Series ordered by values or None if ``inplace=True``. + + See Also + -------- + Series.sort_index : Sort by the Series indices. + DataFrame.sort_values : Sort DataFrame by the values along either axis. + DataFrame.sort_index : Sort DataFrame by indices. + + Examples + -------- + >>> s = pd.Series([np.nan, 1, 3, 10, 5]) + >>> s + 0 NaN + 1 1.0 + 2 3.0 + 3 10.0 + 4 5.0 + dtype: float64 + + Sort values ascending order (default behaviour) + + >>> s.sort_values(ascending=True) + 1 1.0 + 2 3.0 + 4 5.0 + 3 10.0 + 0 NaN + dtype: float64 + + Sort values descending order + + >>> s.sort_values(ascending=False) + 3 10.0 + 4 5.0 + 2 3.0 + 1 1.0 + 0 NaN + dtype: float64 + + Sort values putting NAs first + + >>> s.sort_values(na_position='first') + 0 NaN + 1 1.0 + 2 3.0 + 4 5.0 + 3 10.0 + dtype: float64 + + Sort a series of strings + + >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) + >>> s + 0 z + 1 b + 2 d + 3 a + 4 c + dtype: object + + >>> s.sort_values() + 3 a + 1 b + 4 c + 2 d + 0 z + dtype: object + + Sort using a key function. Your `key` function will be + given the ``Series`` of values and should return an array-like. + + >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) + >>> s.sort_values() + 1 B + 3 D + 0 a + 2 c + 4 e + dtype: object + >>> s.sort_values(key=lambda x: x.str.lower()) + 0 a + 1 B + 2 c + 3 D + 4 e + dtype: object + + NumPy ufuncs work well here. For example, we can + sort by the ``sin`` of the value + + >>> s = pd.Series([-4, -2, 0, 2, 4]) + >>> s.sort_values(key=np.sin) + 1 -2 + 4 4 + 2 0 + 0 -4 + 3 2 + dtype: int64 + + More complicated user-defined functions can be used, + as long as they expect a Series and return an array-like + + >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) + 0 -4 + 3 2 + 4 4 + 1 -2 + 2 0 + dtype: int64 + """ + inplace = validate_bool_kwarg(inplace, "inplace") + # Validate the axis parameter + self._get_axis_number(axis) + + # GH 5856/5853 + if inplace and self._is_cached: + raise ValueError( + "This Series is a view of some other array, to " + "sort in-place you must create a copy" + ) + + if is_list_like(ascending): + ascending = cast(Sequence[bool], ascending) + if len(ascending) != 1: + raise ValueError( + f"Length of ascending ({len(ascending)}) must be 1 for Series" + ) + ascending = ascending[0] + + ascending = validate_ascending(ascending) + + if na_position not in ["first", "last"]: + raise ValueError(f"invalid na_position: {na_position}") + + # GH 35922. Make sorting stable by leveraging nargsort + if key: + values_to_sort = cast(Series, ensure_key_mapped(self, key))._values + else: + values_to_sort = self._values + sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) + + if is_range_indexer(sorted_index, len(sorted_index)): + if inplace: + return self._update_inplace(self) + return self.copy(deep=None) + + result = self._constructor( + self._values[sorted_index], index=self.index[sorted_index], copy=False + ) + + if ignore_index: + result.index = default_index(len(sorted_index)) + + if not inplace: + return result.__finalize__(self, method="sort_values") + self._update_inplace(result) + return None + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[True], + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> None: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: Literal[False] = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> Series: + ... + + @overload + def sort_index( + self, + *, + axis: Axis = ..., + level: IndexLabel = ..., + ascending: bool | Sequence[bool] = ..., + inplace: bool = ..., + kind: SortKind = ..., + na_position: NaPosition = ..., + sort_remaining: bool = ..., + ignore_index: bool = ..., + key: IndexKeyFunc = ..., + ) -> Series | None: + ... + + def sort_index( + self, + *, + axis: Axis = 0, + level: IndexLabel | None = None, + ascending: bool | Sequence[bool] = True, + inplace: bool = False, + kind: SortKind = "quicksort", + na_position: NaPosition = "last", + sort_remaining: bool = True, + ignore_index: bool = False, + key: IndexKeyFunc | None = None, + ) -> Series | None: + """ + Sort Series by index labels. + + Returns a new Series sorted by label if `inplace` argument is + ``False``, otherwise updates the original series and returns None. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + level : int, optional + If not None, sort on values in specified index level(s). + ascending : bool or list-like of bools, default True + Sort ascending vs. descending. When the index is a MultiIndex the + sort direction can be controlled for each level individually. + inplace : bool, default False + If True, perform operation in-place. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See also :func:`numpy.sort` for more + information. 'mergesort' and 'stable' are the only stable algorithms. For + DataFrames, this option is only applied when sorting on a single + column or label. + na_position : {'first', 'last'}, default 'last' + If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. + Not implemented for MultiIndex. + sort_remaining : bool, default True + If True and sorting by level and index is multilevel, sort by other + levels too (in order) after sorting by specified level. + ignore_index : bool, default False + If True, the resulting axis will be labeled 0, 1, …, n - 1. + key : callable, optional + If not None, apply the key function to the index values + before sorting. This is similar to the `key` argument in the + builtin :meth:`sorted` function, with the notable difference that + this `key` function should be *vectorized*. It should expect an + ``Index`` and return an ``Index`` of the same shape. + + Returns + ------- + Series or None + The original Series sorted by the labels or None if ``inplace=True``. + + See Also + -------- + DataFrame.sort_index: Sort DataFrame by the index. + DataFrame.sort_values: Sort DataFrame by the value. + Series.sort_values : Sort Series by the value. + + Examples + -------- + >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) + >>> s.sort_index() + 1 c + 2 b + 3 a + 4 d + dtype: object + + Sort Descending + + >>> s.sort_index(ascending=False) + 4 d + 3 a + 2 b + 1 c + dtype: object + + By default NaNs are put at the end, but use `na_position` to place + them at the beginning + + >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) + >>> s.sort_index(na_position='first') + NaN d + 1.0 c + 2.0 b + 3.0 a + dtype: object + + Specify index level to sort + + >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', + ... 'baz', 'baz', 'bar', 'bar']), + ... np.array(['two', 'one', 'two', 'one', + ... 'two', 'one', 'two', 'one'])] + >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) + >>> s.sort_index(level=1) + bar one 8 + baz one 6 + foo one 4 + qux one 2 + bar two 7 + baz two 5 + foo two 3 + qux two 1 + dtype: int64 + + Does not sort by remaining levels when sorting by levels + + >>> s.sort_index(level=1, sort_remaining=False) + qux one 2 + foo one 4 + baz one 6 + bar one 8 + qux two 1 + foo two 3 + baz two 5 + bar two 7 + dtype: int64 + + Apply a key function before sorting + + >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) + >>> s.sort_index(key=lambda x : x.str.lower()) + A 1 + b 2 + C 3 + d 4 + dtype: int64 + """ + + return super().sort_index( + axis=axis, + level=level, + ascending=ascending, + inplace=inplace, + kind=kind, + na_position=na_position, + sort_remaining=sort_remaining, + ignore_index=ignore_index, + key=key, + ) + + def argsort( + self, + axis: Axis = 0, + kind: SortKind = "quicksort", + order: None = None, + ) -> Series: + """ + Return the integer indices that would sort the Series values. + + Override ndarray.argsort. Argsorts the value, omitting NA/null values, + and places the result in the same locations as the non-NA values. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' + Choice of sorting algorithm. See :func:`numpy.sort` for more + information. 'mergesort' and 'stable' are the only stable algorithms. + order : None + Has no effect but is accepted for compatibility with numpy. + + Returns + ------- + Series[np.intp] + Positions of values within the sort order with -1 indicating + nan values. + + See Also + -------- + numpy.ndarray.argsort : Returns the indices that would sort this array. + + Examples + -------- + >>> s = pd.Series([3, 2, 1]) + >>> s.argsort() + 0 2 + 1 1 + 2 0 + dtype: int64 + """ + if axis != -1: + # GH#54257 We allow -1 here so that np.argsort(series) works + self._get_axis_number(axis) + + values = self._values + mask = isna(values) + + if mask.any(): + warnings.warn( + "The behavior of Series.argsort in the presence of NA values is " + "deprecated. In a future version, NA values will be ordered " + "last instead of set to -1.", + FutureWarning, + stacklevel=find_stack_level(), + ) + result = np.full(len(self), -1, dtype=np.intp) + notmask = ~mask + result[notmask] = np.argsort(values[notmask], kind=kind) + else: + result = np.argsort(values, kind=kind) + + res = self._constructor( + result, index=self.index, name=self.name, dtype=np.intp, copy=False + ) + return res.__finalize__(self, method="argsort") + + def nlargest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + """ + Return the largest `n` elements. + + Parameters + ---------- + n : int, default 5 + Return this many descending sorted values. + keep : {'first', 'last', 'all'}, default 'first' + When there are duplicate values that cannot all fit in a + Series of `n` elements: + + - ``first`` : return the first `n` occurrences in order + of appearance. + - ``last`` : return the last `n` occurrences in reverse + order of appearance. + - ``all`` : keep all occurrences. This can result in a Series of + size larger than `n`. + + Returns + ------- + Series + The `n` largest values in the Series, sorted in decreasing order. + + See Also + -------- + Series.nsmallest: Get the `n` smallest elements. + Series.sort_values: Sort Series by values. + Series.head: Return the first `n` rows. + + Notes + ----- + Faster than ``.sort_values(ascending=False).head(n)`` for small `n` + relative to the size of the ``Series`` object. + + Examples + -------- + >>> countries_population = {"Italy": 59000000, "France": 65000000, + ... "Malta": 434000, "Maldives": 434000, + ... "Brunei": 434000, "Iceland": 337000, + ... "Nauru": 11300, "Tuvalu": 11300, + ... "Anguilla": 11300, "Montserrat": 5200} + >>> s = pd.Series(countries_population) + >>> s + Italy 59000000 + France 65000000 + Malta 434000 + Maldives 434000 + Brunei 434000 + Iceland 337000 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + Montserrat 5200 + dtype: int64 + + The `n` largest elements where ``n=5`` by default. + + >>> s.nlargest() + France 65000000 + Italy 59000000 + Malta 434000 + Maldives 434000 + Brunei 434000 + dtype: int64 + + The `n` largest elements where ``n=3``. Default `keep` value is 'first' + so Malta will be kept. + + >>> s.nlargest(3) + France 65000000 + Italy 59000000 + Malta 434000 + dtype: int64 + + The `n` largest elements where ``n=3`` and keeping the last duplicates. + Brunei will be kept since it is the last with value 434000 based on + the index order. + + >>> s.nlargest(3, keep='last') + France 65000000 + Italy 59000000 + Brunei 434000 + dtype: int64 + + The `n` largest elements where ``n=3`` with all duplicates kept. Note + that the returned Series has five elements due to the three duplicates. + + >>> s.nlargest(3, keep='all') + France 65000000 + Italy 59000000 + Malta 434000 + Maldives 434000 + Brunei 434000 + dtype: int64 + """ + return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() + + def nsmallest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + """ + Return the smallest `n` elements. + + Parameters + ---------- + n : int, default 5 + Return this many ascending sorted values. + keep : {'first', 'last', 'all'}, default 'first' + When there are duplicate values that cannot all fit in a + Series of `n` elements: + + - ``first`` : return the first `n` occurrences in order + of appearance. + - ``last`` : return the last `n` occurrences in reverse + order of appearance. + - ``all`` : keep all occurrences. This can result in a Series of + size larger than `n`. + + Returns + ------- + Series + The `n` smallest values in the Series, sorted in increasing order. + + See Also + -------- + Series.nlargest: Get the `n` largest elements. + Series.sort_values: Sort Series by values. + Series.head: Return the first `n` rows. + + Notes + ----- + Faster than ``.sort_values().head(n)`` for small `n` relative to + the size of the ``Series`` object. + + Examples + -------- + >>> countries_population = {"Italy": 59000000, "France": 65000000, + ... "Brunei": 434000, "Malta": 434000, + ... "Maldives": 434000, "Iceland": 337000, + ... "Nauru": 11300, "Tuvalu": 11300, + ... "Anguilla": 11300, "Montserrat": 5200} + >>> s = pd.Series(countries_population) + >>> s + Italy 59000000 + France 65000000 + Brunei 434000 + Malta 434000 + Maldives 434000 + Iceland 337000 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + Montserrat 5200 + dtype: int64 + + The `n` smallest elements where ``n=5`` by default. + + >>> s.nsmallest() + Montserrat 5200 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + Iceland 337000 + dtype: int64 + + The `n` smallest elements where ``n=3``. Default `keep` value is + 'first' so Nauru and Tuvalu will be kept. + + >>> s.nsmallest(3) + Montserrat 5200 + Nauru 11300 + Tuvalu 11300 + dtype: int64 + + The `n` smallest elements where ``n=3`` and keeping the last + duplicates. Anguilla and Tuvalu will be kept since they are the last + with value 11300 based on the index order. + + >>> s.nsmallest(3, keep='last') + Montserrat 5200 + Anguilla 11300 + Tuvalu 11300 + dtype: int64 + + The `n` smallest elements where ``n=3`` with all duplicates kept. Note + that the returned Series has four elements due to the three duplicates. + + >>> s.nsmallest(3, keep='all') + Montserrat 5200 + Nauru 11300 + Tuvalu 11300 + Anguilla 11300 + dtype: int64 + """ + return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() + + @doc( + klass=_shared_doc_kwargs["klass"], + extra_params=dedent( + """copy : bool, default True + Whether to copy underlying data.""" + ), + examples=dedent( + """\ + Examples + -------- + >>> s = pd.Series( + ... ["A", "B", "A", "C"], + ... index=[ + ... ["Final exam", "Final exam", "Coursework", "Coursework"], + ... ["History", "Geography", "History", "Geography"], + ... ["January", "February", "March", "April"], + ... ], + ... ) + >>> s + Final exam History January A + Geography February B + Coursework History March A + Geography April C + dtype: object + + In the following example, we will swap the levels of the indices. + Here, we will swap the levels column-wise, but levels can be swapped row-wise + in a similar manner. Note that column-wise is the default behaviour. + By not supplying any arguments for i and j, we swap the last and second to + last indices. + + >>> s.swaplevel() + Final exam January History A + February Geography B + Coursework March History A + April Geography C + dtype: object + + By supplying one argument, we can choose which index to swap the last + index with. We can for example swap the first index with the last one as + follows. + + >>> s.swaplevel(0) + January History Final exam A + February Geography Final exam B + March History Coursework A + April Geography Coursework C + dtype: object + + We can also define explicitly which indices we want to swap by supplying values + for both i and j. Here, we for example swap the first and second indices. + + >>> s.swaplevel(0, 1) + History Final exam January A + Geography Final exam February B + History Coursework March A + Geography Coursework April C + dtype: object""" + ), + ) + def swaplevel( + self, i: Level = -2, j: Level = -1, copy: bool | None = None + ) -> Series: + """ + Swap levels i and j in a :class:`MultiIndex`. + + Default is to swap the two innermost levels of the index. + + Parameters + ---------- + i, j : int or str + Levels of the indices to be swapped. Can pass level name as string. + {extra_params} + + Returns + ------- + {klass} + {klass} with levels swapped in MultiIndex. + + {examples} + """ + assert isinstance(self.index, MultiIndex) + result = self.copy(deep=copy and not using_copy_on_write()) + result.index = self.index.swaplevel(i, j) + return result + + def reorder_levels(self, order: Sequence[Level]) -> Series: + """ + Rearrange index levels using input order. + + May not drop or duplicate levels. + + Parameters + ---------- + order : list of int representing new level order + Reference level by number or key. + + Returns + ------- + type of caller (new object) + + Examples + -------- + >>> arrays = [np.array(["dog", "dog", "cat", "cat", "bird", "bird"]), + ... np.array(["white", "black", "white", "black", "white", "black"])] + >>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays) + >>> s + dog white 1 + black 2 + cat white 3 + black 3 + bird white 5 + black 2 + dtype: int64 + >>> s.reorder_levels([1, 0]) + white dog 1 + black dog 2 + white cat 3 + black cat 3 + white bird 5 + black bird 2 + dtype: int64 + """ + if not isinstance(self.index, MultiIndex): # pragma: no cover + raise Exception("Can only reorder levels on a hierarchical axis.") + + result = self.copy(deep=None) + assert isinstance(result.index, MultiIndex) + result.index = result.index.reorder_levels(order) + return result + + def explode(self, ignore_index: bool = False) -> Series: + """ + Transform each element of a list-like to a row. + + Parameters + ---------- + ignore_index : bool, default False + If True, the resulting index will be labeled 0, 1, …, n - 1. + + Returns + ------- + Series + Exploded lists to rows; index will be duplicated for these rows. + + See Also + -------- + Series.str.split : Split string values on specified separator. + Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex + to produce DataFrame. + DataFrame.melt : Unpivot a DataFrame from wide format to long format. + DataFrame.explode : Explode a DataFrame from list-like + columns to long format. + + Notes + ----- + This routine will explode list-likes including lists, tuples, sets, + Series, and np.ndarray. The result dtype of the subset rows will + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of elements in + the output will be non-deterministic when exploding sets. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) + >>> s + 0 [1, 2, 3] + 1 foo + 2 [] + 3 [3, 4] + dtype: object + + >>> s.explode() + 0 1 + 0 2 + 0 3 + 1 foo + 2 NaN + 3 3 + 3 4 + dtype: object + """ + if isinstance(self.dtype, ArrowDtype) and self.dtype.type == list: + values, counts = self._values._explode() + elif len(self) and is_object_dtype(self.dtype): + values, counts = reshape.explode(np.asarray(self._values)) + else: + result = self.copy() + return result.reset_index(drop=True) if ignore_index else result + + if ignore_index: + index = default_index(len(values)) + else: + index = self.index.repeat(counts) + + return self._constructor(values, index=index, name=self.name, copy=False) + + def unstack( + self, + level: IndexLabel = -1, + fill_value: Hashable | None = None, + sort: bool = True, + ) -> DataFrame: + """ + Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. + + Parameters + ---------- + level : int, str, or list of these, default last level + Level(s) to unstack, can pass level name. + fill_value : scalar value, default None + Value to use when replacing NaN values. + sort : bool, default True + Sort the level(s) in the resulting MultiIndex columns. + + Returns + ------- + DataFrame + Unstacked Series. + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4], + ... index=pd.MultiIndex.from_product([['one', 'two'], + ... ['a', 'b']])) + >>> s + one a 1 + b 2 + two a 3 + b 4 + dtype: int64 + + >>> s.unstack(level=-1) + a b + one 1 2 + two 3 4 + + >>> s.unstack(level=0) + one two + a 1 3 + b 2 4 + """ + from pandas.core.reshape.reshape import unstack + + return unstack(self, level, fill_value, sort) + + # ---------------------------------------------------------------------- + # function application + + def map( + self, + arg: Callable | Mapping | Series, + na_action: Literal["ignore"] | None = None, + ) -> Series: + """ + Map values of Series according to an input mapping or function. + + Used for substituting each value in a Series with another value, + that may be derived from a function, a ``dict`` or + a :class:`Series`. + + Parameters + ---------- + arg : function, collections.abc.Mapping subclass or Series + Mapping correspondence. + na_action : {None, 'ignore'}, default None + If 'ignore', propagate NaN values, without passing them to the + mapping correspondence. + + Returns + ------- + Series + Same index as caller. + + See Also + -------- + Series.apply : For applying more complex functions on a Series. + Series.replace: Replace values given in `to_replace` with `value`. + DataFrame.apply : Apply a function row-/column-wise. + DataFrame.map : Apply a function elementwise on a whole DataFrame. + + Notes + ----- + When ``arg`` is a dictionary, values in Series that are not in the + dictionary (as keys) are converted to ``NaN``. However, if the + dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. + provides a method for default values), then this default is used + rather than ``NaN``. + + Examples + -------- + >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) + >>> s + 0 cat + 1 dog + 2 NaN + 3 rabbit + dtype: object + + ``map`` accepts a ``dict`` or a ``Series``. Values that are not found + in the ``dict`` are converted to ``NaN``, unless the dict has a default + value (e.g. ``defaultdict``): + + >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) + 0 kitten + 1 puppy + 2 NaN + 3 NaN + dtype: object + + It also accepts a function: + + >>> s.map('I am a {}'.format) + 0 I am a cat + 1 I am a dog + 2 I am a nan + 3 I am a rabbit + dtype: object + + To avoid applying the function to missing values (and keep them as + ``NaN``) ``na_action='ignore'`` can be used: + + >>> s.map('I am a {}'.format, na_action='ignore') + 0 I am a cat + 1 I am a dog + 2 NaN + 3 I am a rabbit + dtype: object + """ + new_values = self._map_values(arg, na_action=na_action) + return self._constructor(new_values, index=self.index, copy=False).__finalize__( + self, method="map" + ) + + def _gotitem(self, key, ndim, subset=None) -> Self: + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + Requested ndim of result. + subset : object, default None + Subset to act on. + """ + return self + + _agg_see_also_doc = dedent( + """ + See Also + -------- + Series.apply : Invoke function on a Series. + Series.transform : Transform function producing a Series with like indexes. + """ + ) + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + + >>> s.agg('min') + 1 + + >>> s.agg(['min', 'max']) + min 1 + max 4 + dtype: int64 + """ + ) + + @doc( + _shared_docs["aggregate"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + see_also=_agg_see_also_doc, + examples=_agg_examples_doc, + ) + def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): + # Validate the axis parameter + self._get_axis_number(axis) + + # if func is None, will switch to user-provided "named aggregation" kwargs + if func is None: + func = dict(kwargs.items()) + + op = SeriesApply(self, func, args=args, kwargs=kwargs) + result = op.agg() + return result + + agg = aggregate + + @doc( + _shared_docs["transform"], + klass=_shared_doc_kwargs["klass"], + axis=_shared_doc_kwargs["axis"], + ) + def transform( + self, func: AggFuncType, axis: Axis = 0, *args, **kwargs + ) -> DataFrame | Series: + # Validate axis argument + self._get_axis_number(axis) + ser = self.copy(deep=False) if using_copy_on_write() else self + result = SeriesApply(ser, func=func, args=args, kwargs=kwargs).transform() + return result + + def apply( + self, + func: AggFuncType, + convert_dtype: bool | lib.NoDefault = lib.no_default, + args: tuple[Any, ...] = (), + *, + by_row: Literal[False, "compat"] = "compat", + **kwargs, + ) -> DataFrame | Series: + """ + Invoke function on values of Series. + + Can be ufunc (a NumPy function that applies to the entire Series) + or a Python function that only works on single values. + + Parameters + ---------- + func : function + Python function or NumPy ufunc to apply. + convert_dtype : bool, default True + Try to find better dtype for elementwise function results. If + False, leave as dtype=object. Note that the dtype is always + preserved for some extension array dtypes, such as Categorical. + + .. deprecated:: 2.1.0 + ``convert_dtype`` has been deprecated. Do ``ser.astype(object).apply()`` + instead if you want ``convert_dtype=False``. + args : tuple + Positional arguments passed to func after the series value. + by_row : False or "compat", default "compat" + If ``"compat"`` and func is a callable, func will be passed each element of + the Series, like ``Series.map``. If func is a list or dict of + callables, will first try to translate each func into pandas methods. If + that doesn't work, will try call to apply again with ``by_row="compat"`` + and if that fails, will call apply again with ``by_row=False`` + (backward compatible). + If False, the func will be passed the whole Series at once. + + ``by_row`` has no effect when ``func`` is a string. + + .. versionadded:: 2.1.0 + **kwargs + Additional keyword arguments passed to func. + + Returns + ------- + Series or DataFrame + If func returns a Series object the result will be a DataFrame. + + See Also + -------- + Series.map: For element-wise operations. + Series.agg: Only perform aggregating type operations. + Series.transform: Only perform transforming type operations. + + Notes + ----- + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + Create a series with typical summer temperatures for each city. + + >>> s = pd.Series([20, 21, 12], + ... index=['London', 'New York', 'Helsinki']) + >>> s + London 20 + New York 21 + Helsinki 12 + dtype: int64 + + Square the values by defining a function and passing it as an + argument to ``apply()``. + + >>> def square(x): + ... return x ** 2 + >>> s.apply(square) + London 400 + New York 441 + Helsinki 144 + dtype: int64 + + Square the values by passing an anonymous function as an + argument to ``apply()``. + + >>> s.apply(lambda x: x ** 2) + London 400 + New York 441 + Helsinki 144 + dtype: int64 + + Define a custom function that needs additional positional + arguments and pass these additional arguments using the + ``args`` keyword. + + >>> def subtract_custom_value(x, custom_value): + ... return x - custom_value + + >>> s.apply(subtract_custom_value, args=(5,)) + London 15 + New York 16 + Helsinki 7 + dtype: int64 + + Define a custom function that takes keyword arguments + and pass these arguments to ``apply``. + + >>> def add_custom_values(x, **kwargs): + ... for month in kwargs: + ... x += kwargs[month] + ... return x + + >>> s.apply(add_custom_values, june=30, july=20, august=25) + London 95 + New York 96 + Helsinki 87 + dtype: int64 + + Use a function from the Numpy library. + + >>> s.apply(np.log) + London 2.995732 + New York 3.044522 + Helsinki 2.484907 + dtype: float64 + """ + return SeriesApply( + self, + func, + convert_dtype=convert_dtype, + by_row=by_row, + args=args, + kwargs=kwargs, + ).apply() + + def _reindex_indexer( + self, + new_index: Index | None, + indexer: npt.NDArray[np.intp] | None, + copy: bool | None, + ) -> Series: + # Note: new_index is None iff indexer is None + # if not None, indexer is np.intp + if indexer is None and ( + new_index is None or new_index.names == self.index.names + ): + if using_copy_on_write(): + return self.copy(deep=copy) + if copy or copy is None: + return self.copy(deep=copy) + return self + + new_values = algorithms.take_nd( + self._values, indexer, allow_fill=True, fill_value=None + ) + return self._constructor(new_values, index=new_index, copy=False) + + def _needs_reindex_multi(self, axes, method, level) -> bool: + """ + Check if we do need a multi reindex; this is for compat with + higher dims. + """ + return False + + @overload + def rename( + self, + index: Renamer | Hashable | None = ..., + *, + axis: Axis | None = ..., + copy: bool = ..., + inplace: Literal[True], + level: Level | None = ..., + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def rename( + self, + index: Renamer | Hashable | None = ..., + *, + axis: Axis | None = ..., + copy: bool = ..., + inplace: Literal[False] = ..., + level: Level | None = ..., + errors: IgnoreRaise = ..., + ) -> Series: + ... + + @overload + def rename( + self, + index: Renamer | Hashable | None = ..., + *, + axis: Axis | None = ..., + copy: bool = ..., + inplace: bool = ..., + level: Level | None = ..., + errors: IgnoreRaise = ..., + ) -> Series | None: + ... + + def rename( + self, + index: Renamer | Hashable | None = None, + *, + axis: Axis | None = None, + copy: bool | None = None, + inplace: bool = False, + level: Level | None = None, + errors: IgnoreRaise = "ignore", + ) -> Series | None: + """ + Alter Series index labels or name. + + Function / dict values must be unique (1-to-1). Labels not contained in + a dict / Series will be left as-is. Extra labels listed don't throw an + error. + + Alternatively, change ``Series.name`` with a scalar value. + + See the :ref:`user guide ` for more. + + Parameters + ---------- + index : scalar, hashable sequence, dict-like or function optional + Functions or dict-like are transformations to apply to + the index. + Scalar or hashable sequence-like will alter the ``Series.name`` + attribute. + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + copy : bool, default True + Also copy underlying data. + inplace : bool, default False + Whether to return a new Series. If True the value of copy is ignored. + level : int or level name, default None + In case of MultiIndex, only rename labels in the specified level. + errors : {'ignore', 'raise'}, default 'ignore' + If 'raise', raise `KeyError` when a `dict-like mapper` or + `index` contains labels that are not present in the index being transformed. + If 'ignore', existing keys will be renamed and extra keys will be ignored. + + Returns + ------- + Series or None + Series with index labels or name altered or None if ``inplace=True``. + + See Also + -------- + DataFrame.rename : Corresponding DataFrame method. + Series.rename_axis : Set the name of the axis. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s + 0 1 + 1 2 + 2 3 + dtype: int64 + >>> s.rename("my_name") # scalar, changes Series.name + 0 1 + 1 2 + 2 3 + Name: my_name, dtype: int64 + >>> s.rename(lambda x: x ** 2) # function, changes labels + 0 1 + 1 2 + 4 3 + dtype: int64 + >>> s.rename({1: 3, 2: 5}) # mapping, changes labels + 0 1 + 3 2 + 5 3 + dtype: int64 + """ + if axis is not None: + # Make sure we raise if an invalid 'axis' is passed. + axis = self._get_axis_number(axis) + + if callable(index) or is_dict_like(index): + # error: Argument 1 to "_rename" of "NDFrame" has incompatible + # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], + # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, + # Hashable], Callable[[Any], Hashable], None]" + return super()._rename( + index, # type: ignore[arg-type] + copy=copy, + inplace=inplace, + level=level, + errors=errors, + ) + else: + return self._set_name(index, inplace=inplace, deep=copy) + + @Appender( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s + 0 1 + 1 2 + 2 3 + dtype: int64 + + >>> s.set_axis(['a', 'b', 'c'], axis=0) + a 1 + b 2 + c 3 + dtype: int64 + """ + ) + @Substitution( + klass=_shared_doc_kwargs["klass"], + axes_single_arg=_shared_doc_kwargs["axes_single_arg"], + extended_summary_sub="", + axis_description_sub="", + see_also_sub="", + ) + @Appender(NDFrame.set_axis.__doc__) + def set_axis( + self, + labels, + *, + axis: Axis = 0, + copy: bool | None = None, + ) -> Series: + return super().set_axis(labels, axis=axis, copy=copy) + + # error: Cannot determine type of 'reindex' + @doc( + NDFrame.reindex, # type: ignore[has-type] + klass=_shared_doc_kwargs["klass"], + optional_reindex=_shared_doc_kwargs["optional_reindex"], + ) + def reindex( # type: ignore[override] + self, + index=None, + *, + axis: Axis | None = None, + method: ReindexMethod | None = None, + copy: bool | None = None, + level: Level | None = None, + fill_value: Scalar | None = None, + limit: int | None = None, + tolerance=None, + ) -> Series: + return super().reindex( + index=index, + method=method, + copy=copy, + level=level, + fill_value=fill_value, + limit=limit, + tolerance=tolerance, + ) + + @doc(NDFrame.rename_axis) + def rename_axis( # type: ignore[override] + self, + mapper: IndexLabel | lib.NoDefault = lib.no_default, + *, + index=lib.no_default, + axis: Axis = 0, + copy: bool = True, + inplace: bool = False, + ) -> Self | None: + return super().rename_axis( + mapper=mapper, + index=index, + axis=axis, + copy=copy, + inplace=inplace, + ) + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: Literal[True], + errors: IgnoreRaise = ..., + ) -> None: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: Literal[False] = ..., + errors: IgnoreRaise = ..., + ) -> Series: + ... + + @overload + def drop( + self, + labels: IndexLabel = ..., + *, + axis: Axis = ..., + index: IndexLabel = ..., + columns: IndexLabel = ..., + level: Level | None = ..., + inplace: bool = ..., + errors: IgnoreRaise = ..., + ) -> Series | None: + ... + + def drop( + self, + labels: IndexLabel | None = None, + *, + axis: Axis = 0, + index: IndexLabel | None = None, + columns: IndexLabel | None = None, + level: Level | None = None, + inplace: bool = False, + errors: IgnoreRaise = "raise", + ) -> Series | None: + """ + Return Series with specified index labels removed. + + Remove elements of a Series based on specifying the index labels. + When using a multi-index, labels on different levels can be removed + by specifying the level. + + Parameters + ---------- + labels : single label or list-like + Index labels to drop. + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + index : single label or list-like + Redundant for application on Series, but 'index' can be used instead + of 'labels'. + columns : single label or list-like + No change is made to the Series; use 'index' or 'labels' instead. + level : int or level name, optional + For MultiIndex, level for which the labels will be removed. + inplace : bool, default False + If True, do operation inplace and return None. + errors : {'ignore', 'raise'}, default 'raise' + If 'ignore', suppress error and only existing labels are dropped. + + Returns + ------- + Series or None + Series with specified index labels removed or None if ``inplace=True``. + + Raises + ------ + KeyError + If none of the labels are found in the index. + + See Also + -------- + Series.reindex : Return only specified index labels of Series. + Series.dropna : Return series without null values. + Series.drop_duplicates : Return Series with duplicate values removed. + DataFrame.drop : Drop specified labels from rows or columns. + + Examples + -------- + >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C']) + >>> s + A 0 + B 1 + C 2 + dtype: int64 + + Drop labels B en C + + >>> s.drop(labels=['B', 'C']) + A 0 + dtype: int64 + + Drop 2nd level label in MultiIndex Series + + >>> midx = pd.MultiIndex(levels=[['llama', 'cow', 'falcon'], + ... ['speed', 'weight', 'length']], + ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], + ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) + >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], + ... index=midx) + >>> s + llama speed 45.0 + weight 200.0 + length 1.2 + cow speed 30.0 + weight 250.0 + length 1.5 + falcon speed 320.0 + weight 1.0 + length 0.3 + dtype: float64 + + >>> s.drop(labels='weight', level=1) + llama speed 45.0 + length 1.2 + cow speed 30.0 + length 1.5 + falcon speed 320.0 + length 0.3 + dtype: float64 + """ + return super().drop( + labels=labels, + axis=axis, + index=index, + columns=columns, + level=level, + inplace=inplace, + errors=errors, + ) + + def pop(self, item: Hashable) -> Any: + """ + Return item and drops from series. Raise KeyError if not found. + + Parameters + ---------- + item : label + Index of the element that needs to be removed. + + Returns + ------- + Value that is popped from series. + + Examples + -------- + >>> ser = pd.Series([1,2,3]) + + >>> ser.pop(0) + 1 + + >>> ser + 1 2 + 2 3 + dtype: int64 + """ + return super().pop(item=item) + + @doc(INFO_DOCSTRING, **series_sub_kwargs) + def info( + self, + verbose: bool | None = None, + buf: IO[str] | None = None, + max_cols: int | None = None, + memory_usage: bool | str | None = None, + show_counts: bool = True, + ) -> None: + return SeriesInfo(self, memory_usage).render( + buf=buf, + max_cols=max_cols, + verbose=verbose, + show_counts=show_counts, + ) + + def _replace_single(self, to_replace, method: str, inplace: bool, limit): + """ + Replaces values in a Series using the fill method specified when no + replacement value is given in the replace method + """ + + result = self if inplace else self.copy() + + values = result._values + mask = missing.mask_missing(values, to_replace) + + if isinstance(values, ExtensionArray): + # dispatch to the EA's _pad_mask_inplace method + values._fill_mask_inplace(method, limit, mask) + else: + fill_f = missing.get_fill_func(method) + fill_f(values, limit=limit, mask=mask) + + if inplace: + return + return result + + def memory_usage(self, index: bool = True, deep: bool = False) -> int: + """ + Return the memory usage of the Series. + + The memory usage can optionally include the contribution of + the index and of elements of `object` dtype. + + Parameters + ---------- + index : bool, default True + Specifies whether to include the memory usage of the Series index. + deep : bool, default False + If True, introspect the data deeply by interrogating + `object` dtypes for system-level memory consumption, and include + it in the returned value. + + Returns + ------- + int + Bytes of memory consumed. + + See Also + -------- + numpy.ndarray.nbytes : Total bytes consumed by the elements of the + array. + DataFrame.memory_usage : Bytes consumed by a DataFrame. + + Examples + -------- + >>> s = pd.Series(range(3)) + >>> s.memory_usage() + 152 + + Not including the index gives the size of the rest of the data, which + is necessarily smaller: + + >>> s.memory_usage(index=False) + 24 + + The memory footprint of `object` values is ignored by default: + + >>> s = pd.Series(["a", "b"]) + >>> s.values + array(['a', 'b'], dtype=object) + >>> s.memory_usage() + 144 + >>> s.memory_usage(deep=True) + 244 + """ + v = self._memory_usage(deep=deep) + if index: + v += self.index.memory_usage(deep=deep) + return v + + def isin(self, values) -> Series: + """ + Whether elements in Series are contained in `values`. + + Return a boolean Series showing whether each element in the Series + matches an element in the passed sequence of `values` exactly. + + Parameters + ---------- + values : set or list-like + The sequence of values to test. Passing in a single string will + raise a ``TypeError``. Instead, turn a single string into a + list of one element. + + Returns + ------- + Series + Series of booleans indicating if each element is in values. + + Raises + ------ + TypeError + * If `values` is a string + + See Also + -------- + DataFrame.isin : Equivalent method on DataFrame. + + Examples + -------- + >>> s = pd.Series(['llama', 'cow', 'llama', 'beetle', 'llama', + ... 'hippo'], name='animal') + >>> s.isin(['cow', 'llama']) + 0 True + 1 True + 2 True + 3 False + 4 True + 5 False + Name: animal, dtype: bool + + To invert the boolean values, use the ``~`` operator: + + >>> ~s.isin(['cow', 'llama']) + 0 False + 1 False + 2 False + 3 True + 4 False + 5 True + Name: animal, dtype: bool + + Passing a single string as ``s.isin('llama')`` will raise an error. Use + a list of one element instead: + + >>> s.isin(['llama']) + 0 True + 1 False + 2 True + 3 False + 4 True + 5 False + Name: animal, dtype: bool + + Strings and integers are distinct and are therefore not comparable: + + >>> pd.Series([1]).isin(['1']) + 0 False + dtype: bool + >>> pd.Series([1.1]).isin(['1.1']) + 0 False + dtype: bool + """ + result = algorithms.isin(self._values, values) + return self._constructor(result, index=self.index, copy=False).__finalize__( + self, method="isin" + ) + + def between( + self, + left, + right, + inclusive: Literal["both", "neither", "left", "right"] = "both", + ) -> Series: + """ + Return boolean Series equivalent to left <= series <= right. + + This function returns a boolean vector containing `True` wherever the + corresponding Series element is between the boundary values `left` and + `right`. NA values are treated as `False`. + + Parameters + ---------- + left : scalar or list-like + Left boundary. + right : scalar or list-like + Right boundary. + inclusive : {"both", "neither", "left", "right"} + Include boundaries. Whether to set each bound as closed or open. + + .. versionchanged:: 1.3.0 + + Returns + ------- + Series + Series representing whether each element is between left and + right (inclusive). + + See Also + -------- + Series.gt : Greater than of series and other. + Series.lt : Less than of series and other. + + Notes + ----- + This function is equivalent to ``(left <= ser) & (ser <= right)`` + + Examples + -------- + >>> s = pd.Series([2, 0, 4, 8, np.nan]) + + Boundary values are included by default: + + >>> s.between(1, 4) + 0 True + 1 False + 2 True + 3 False + 4 False + dtype: bool + + With `inclusive` set to ``"neither"`` boundary values are excluded: + + >>> s.between(1, 4, inclusive="neither") + 0 True + 1 False + 2 False + 3 False + 4 False + dtype: bool + + `left` and `right` can be any scalar value: + + >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve']) + >>> s.between('Anna', 'Daniel') + 0 False + 1 True + 2 True + 3 False + dtype: bool + """ + if inclusive == "both": + lmask = self >= left + rmask = self <= right + elif inclusive == "left": + lmask = self >= left + rmask = self < right + elif inclusive == "right": + lmask = self > left + rmask = self <= right + elif inclusive == "neither": + lmask = self > left + rmask = self < right + else: + raise ValueError( + "Inclusive has to be either string of 'both'," + "'left', 'right', or 'neither'." + ) + + return lmask & rmask + + # ---------------------------------------------------------------------- + # Convert to types that support pd.NA + + def _convert_dtypes( + self, + infer_objects: bool = True, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + dtype_backend: DtypeBackend = "numpy_nullable", + ) -> Series: + input_series = self + if infer_objects: + input_series = input_series.infer_objects() + if is_object_dtype(input_series.dtype): + input_series = input_series.copy(deep=None) + + if convert_string or convert_integer or convert_boolean or convert_floating: + inferred_dtype = convert_dtypes( + input_series._values, + convert_string, + convert_integer, + convert_boolean, + convert_floating, + infer_objects, + dtype_backend, + ) + result = input_series.astype(inferred_dtype) + else: + result = input_series.copy(deep=None) + return result + + # error: Cannot determine type of 'isna' + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def isna(self) -> Series: + return NDFrame.isna(self) + + # error: Cannot determine type of 'isna' + @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def isnull(self) -> Series: + """ + Series.isnull is an alias for Series.isna. + """ + return super().isnull() + + # error: Cannot determine type of 'notna' + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def notna(self) -> Series: + return super().notna() + + # error: Cannot determine type of 'notna' + @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] + def notnull(self) -> Series: + """ + Series.notnull is an alias for Series.notna. + """ + return super().notnull() + + @overload + def dropna( + self, + *, + axis: Axis = ..., + inplace: Literal[False] = ..., + how: AnyAll | None = ..., + ignore_index: bool = ..., + ) -> Series: + ... + + @overload + def dropna( + self, + *, + axis: Axis = ..., + inplace: Literal[True], + how: AnyAll | None = ..., + ignore_index: bool = ..., + ) -> None: + ... + + def dropna( + self, + *, + axis: Axis = 0, + inplace: bool = False, + how: AnyAll | None = None, + ignore_index: bool = False, + ) -> Series | None: + """ + Return a new Series with missing values removed. + + See the :ref:`User Guide ` for more on which values are + considered missing, and how to work with missing data. + + Parameters + ---------- + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + inplace : bool, default False + If True, do operation inplace and return None. + how : str, optional + Not in use. Kept for compatibility. + ignore_index : bool, default ``False`` + If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. + + .. versionadded:: 2.0.0 + + Returns + ------- + Series or None + Series with NA entries dropped from it or None if ``inplace=True``. + + See Also + -------- + Series.isna: Indicate missing values. + Series.notna : Indicate existing (non-missing) values. + Series.fillna : Replace missing values. + DataFrame.dropna : Drop rows or columns which contain NA values. + Index.dropna : Drop missing indices. + + Examples + -------- + >>> ser = pd.Series([1., 2., np.nan]) + >>> ser + 0 1.0 + 1 2.0 + 2 NaN + dtype: float64 + + Drop NA values from a Series. + + >>> ser.dropna() + 0 1.0 + 1 2.0 + dtype: float64 + + Empty strings are not considered NA values. ``None`` is considered an + NA value. + + >>> ser = pd.Series([np.nan, 2, pd.NaT, '', None, 'I stay']) + >>> ser + 0 NaN + 1 2 + 2 NaT + 3 + 4 None + 5 I stay + dtype: object + >>> ser.dropna() + 1 2 + 3 + 5 I stay + dtype: object + """ + inplace = validate_bool_kwarg(inplace, "inplace") + ignore_index = validate_bool_kwarg(ignore_index, "ignore_index") + # Validate the axis parameter + self._get_axis_number(axis or 0) + + if self._can_hold_na: + result = remove_na_arraylike(self) + else: + if not inplace: + result = self.copy(deep=None) + else: + result = self + + if ignore_index: + result.index = default_index(len(result)) + + if inplace: + return self._update_inplace(result) + else: + return result + + # ---------------------------------------------------------------------- + # Time series-oriented methods + + def to_timestamp( + self, + freq=None, + how: Literal["s", "e", "start", "end"] = "start", + copy: bool | None = None, + ) -> Series: + """ + Cast to DatetimeIndex of Timestamps, at *beginning* of period. + + Parameters + ---------- + freq : str, default frequency of PeriodIndex + Desired frequency. + how : {'s', 'e', 'start', 'end'} + Convention for converting period to timestamp; start of period + vs. end. + copy : bool, default True + Whether or not to return a copy. + + Returns + ------- + Series with DatetimeIndex + + Examples + -------- + >>> idx = pd.PeriodIndex(['2023', '2024', '2025'], freq='Y') + >>> s1 = pd.Series([1, 2, 3], index=idx) + >>> s1 + 2023 1 + 2024 2 + 2025 3 + Freq: A-DEC, dtype: int64 + + The resulting frequency of the Timestamps is `YearBegin` + + >>> s1 = s1.to_timestamp() + >>> s1 + 2023-01-01 1 + 2024-01-01 2 + 2025-01-01 3 + Freq: AS-JAN, dtype: int64 + + Using `freq` which is the offset that the Timestamps will have + + >>> s2 = pd.Series([1, 2, 3], index=idx) + >>> s2 = s2.to_timestamp(freq='M') + >>> s2 + 2023-01-31 1 + 2024-01-31 2 + 2025-01-31 3 + Freq: A-JAN, dtype: int64 + """ + if not isinstance(self.index, PeriodIndex): + raise TypeError(f"unsupported Type {type(self.index).__name__}") + + new_obj = self.copy(deep=copy and not using_copy_on_write()) + new_index = self.index.to_timestamp(freq=freq, how=how) + setattr(new_obj, "index", new_index) + return new_obj + + def to_period(self, freq: str | None = None, copy: bool | None = None) -> Series: + """ + Convert Series from DatetimeIndex to PeriodIndex. + + Parameters + ---------- + freq : str, default None + Frequency associated with the PeriodIndex. + copy : bool, default True + Whether or not to return a copy. + + Returns + ------- + Series + Series with index converted to PeriodIndex. + + Examples + -------- + >>> idx = pd.DatetimeIndex(['2023', '2024', '2025']) + >>> s = pd.Series([1, 2, 3], index=idx) + >>> s = s.to_period() + >>> s + 2023 1 + 2024 2 + 2025 3 + Freq: A-DEC, dtype: int64 + + Viewing the index + + >>> s.index + PeriodIndex(['2023', '2024', '2025'], dtype='period[A-DEC]') + """ + if not isinstance(self.index, DatetimeIndex): + raise TypeError(f"unsupported Type {type(self.index).__name__}") + + new_obj = self.copy(deep=copy and not using_copy_on_write()) + new_index = self.index.to_period(freq=freq) + setattr(new_obj, "index", new_index) + return new_obj + + # ---------------------------------------------------------------------- + # Add index + _AXIS_ORDERS: list[Literal["index", "columns"]] = ["index"] + _AXIS_LEN = len(_AXIS_ORDERS) + _info_axis_number: Literal[0] = 0 + _info_axis_name: Literal["index"] = "index" + + index = properties.AxisProperty( + axis=0, + doc=""" + The index (axis labels) of the Series. + + The index of a Series is used to label and identify each element of the + underlying data. The index can be thought of as an immutable ordered set + (technically a multi-set, as it may contain duplicate labels), and is + used to index and align data in pandas. + + Returns + ------- + Index + The index labels of the Series. + + See Also + -------- + Series.reindex : Conform Series to new index. + Series.set_index : Set Series as DataFrame index. + Index : The base pandas index type. + + Notes + ----- + For more information on pandas indexing, see the `indexing user guide + `__. + + Examples + -------- + To create a Series with a custom index and view the index labels: + + >>> cities = ['Kolkata', 'Chicago', 'Toronto', 'Lisbon'] + >>> populations = [14.85, 2.71, 2.93, 0.51] + >>> city_series = pd.Series(populations, index=cities) + >>> city_series.index + Index(['Kolkata', 'Chicago', 'Toronto', 'Lisbon'], dtype='object') + + To change the index labels of an existing Series: + + >>> city_series.index = ['KOL', 'CHI', 'TOR', 'LIS'] + >>> city_series.index + Index(['KOL', 'CHI', 'TOR', 'LIS'], dtype='object') + """, + ) + + # ---------------------------------------------------------------------- + # Accessor Methods + # ---------------------------------------------------------------------- + str = CachedAccessor("str", StringMethods) + dt = CachedAccessor("dt", CombinedDatetimelikeProperties) + cat = CachedAccessor("cat", CategoricalAccessor) + plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) + sparse = CachedAccessor("sparse", SparseAccessor) + + # ---------------------------------------------------------------------- + # Add plotting methods to Series + hist = pandas.plotting.hist_series + + # ---------------------------------------------------------------------- + # Template-Based Arithmetic/Comparison Methods + + def _cmp_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + + if isinstance(other, Series) and not self._indexed_same(other): + raise ValueError("Can only compare identically-labeled Series objects") + + lvalues = self._values + rvalues = extract_array(other, extract_numpy=True, extract_range=True) + + res_values = ops.comparison_op(lvalues, rvalues, op) + + return self._construct_result(res_values, name=res_name) + + def _logical_method(self, other, op): + res_name = ops.get_op_result_name(self, other) + self, other = self._align_for_op(other, align_asobject=True) + + lvalues = self._values + rvalues = extract_array(other, extract_numpy=True, extract_range=True) + + res_values = ops.logical_op(lvalues, rvalues, op) + return self._construct_result(res_values, name=res_name) + + def _arith_method(self, other, op): + self, other = self._align_for_op(other) + return base.IndexOpsMixin._arith_method(self, other, op) + + def _align_for_op(self, right, align_asobject: bool = False): + """align lhs and rhs Series""" + # TODO: Different from DataFrame._align_for_op, list, tuple and ndarray + # are not coerced here + # because Series has inconsistencies described in GH#13637 + left = self + + if isinstance(right, Series): + # avoid repeated alignment + if not left.index.equals(right.index): + if align_asobject: + if left.dtype not in (object, np.bool_) or right.dtype not in ( + object, + np.bool_, + ): + warnings.warn( + "Operation between non boolean Series with different " + "indexes will no longer return a boolean result in " + "a future version. Cast both Series to object type " + "to maintain the prior behavior.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # to keep original value's dtype for bool ops + left = left.astype(object) + right = right.astype(object) + + left, right = left.align(right, copy=False) + + return left, right + + def _binop(self, other: Series, func, level=None, fill_value=None) -> Series: + """ + Perform generic binary operation with optional fill value. + + Parameters + ---------- + other : Series + func : binary operator + fill_value : float or object + Value to substitute for NA/null values. If both Series are NA in a + location, the result will be NA regardless of the passed fill value. + level : int or level name, default None + Broadcast across a level, matching Index values on the + passed MultiIndex level. + + Returns + ------- + Series + """ + this = self + + if not self.index.equals(other.index): + this, other = self.align(other, level=level, join="outer", copy=False) + + this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) + + with np.errstate(all="ignore"): + result = func(this_vals, other_vals) + + name = ops.get_op_result_name(self, other) + out = this._construct_result(result, name) + return cast(Series, out) + + def _construct_result( + self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable + ) -> Series | tuple[Series, Series]: + """ + Construct an appropriately-labelled Series from the result of an op. + + Parameters + ---------- + result : ndarray or ExtensionArray + name : Label + + Returns + ------- + Series + In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. + """ + if isinstance(result, tuple): + # produced by divmod or rdivmod + + res1 = self._construct_result(result[0], name=name) + res2 = self._construct_result(result[1], name=name) + + # GH#33427 assertions to keep mypy happy + assert isinstance(res1, Series) + assert isinstance(res2, Series) + return (res1, res2) + + # TODO: result should always be ArrayLike, but this fails for some + # JSONArray tests + dtype = getattr(result, "dtype", None) + out = self._constructor(result, index=self.index, dtype=dtype, copy=False) + out = out.__finalize__(self) + + # Set the result's name after __finalize__ is called because __finalize__ + # would set it back to self.name + out.name = name + return out + + def _flex_method(self, other, op, *, level=None, fill_value=None, axis: Axis = 0): + if axis is not None: + self._get_axis_number(axis) + + res_name = ops.get_op_result_name(self, other) + + if isinstance(other, Series): + return self._binop(other, op, level=level, fill_value=fill_value) + elif isinstance(other, (np.ndarray, list, tuple)): + if len(other) != len(self): + raise ValueError("Lengths must be equal") + other = self._constructor(other, self.index, copy=False) + result = self._binop(other, op, level=level, fill_value=fill_value) + result._name = res_name + return result + else: + if fill_value is not None: + self = self.fillna(fill_value) + + return op(self, other) + + @Appender(ops.make_flex_doc("eq", "series")) + def eq(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.eq, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("ne", "series")) + def ne(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.ne, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("le", "series")) + def le(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.le, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("lt", "series")) + def lt(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.lt, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("ge", "series")) + def ge(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.ge, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("gt", "series")) + def gt(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.gt, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("add", "series")) + def add(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.add, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("radd", "series")) + def radd(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.radd, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("sub", "series")) + def sub(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.sub, level=level, fill_value=fill_value, axis=axis + ) + + subtract = sub + + @Appender(ops.make_flex_doc("rsub", "series")) + def rsub(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.rsub, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mul", "series")) + def mul( + self, + other, + level: Level | None = None, + fill_value: float | None = None, + axis: Axis = 0, + ): + return self._flex_method( + other, operator.mul, level=level, fill_value=fill_value, axis=axis + ) + + multiply = mul + + @Appender(ops.make_flex_doc("rmul", "series")) + def rmul(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.rmul, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("truediv", "series")) + def truediv(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.truediv, level=level, fill_value=fill_value, axis=axis + ) + + div = truediv + divide = truediv + + @Appender(ops.make_flex_doc("rtruediv", "series")) + def rtruediv(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.rtruediv, level=level, fill_value=fill_value, axis=axis + ) + + rdiv = rtruediv + + @Appender(ops.make_flex_doc("floordiv", "series")) + def floordiv(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.floordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rfloordiv", "series")) + def rfloordiv(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.rfloordiv, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("mod", "series")) + def mod(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.mod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rmod", "series")) + def rmod(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.rmod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("pow", "series")) + def pow(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, operator.pow, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rpow", "series")) + def rpow(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.rpow, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("divmod", "series")) + def divmod(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, divmod, level=level, fill_value=fill_value, axis=axis + ) + + @Appender(ops.make_flex_doc("rdivmod", "series")) + def rdivmod(self, other, level=None, fill_value=None, axis: Axis = 0): + return self._flex_method( + other, roperator.rdivmod, level=level, fill_value=fill_value, axis=axis + ) + + # ---------------------------------------------------------------------- + # Reductions + + def _reduce( + self, + op, + # error: Variable "pandas.core.series.Series.str" is not valid as a type + name: str, # type: ignore[valid-type] + *, + axis: Axis = 0, + skipna: bool = True, + numeric_only: bool = False, + filter_type=None, + **kwds, + ): + """ + Perform a reduction operation. + + If we have an ndarray as a value, then simply perform the operation, + otherwise delegate to the object. + """ + delegate = self._values + + if axis is not None: + self._get_axis_number(axis) + + if isinstance(delegate, ExtensionArray): + # dispatch to ExtensionArray interface + return delegate._reduce(name, skipna=skipna, **kwds) + + else: + # dispatch to numpy arrays + if numeric_only and self.dtype.kind not in "iufcb": + # i.e. not is_numeric_dtype(self.dtype) + kwd_name = "numeric_only" + if name in ["any", "all"]: + kwd_name = "bool_only" + # GH#47500 - change to TypeError to match other methods + raise TypeError( + f"Series.{name} does not allow {kwd_name}={numeric_only} " + "with non-numeric dtypes." + ) + return op(delegate, skipna=skipna, **kwds) + + @Appender(make_doc("any", ndim=1)) + # error: Signature of "any" incompatible with supertype "NDFrame" + def any( # type: ignore[override] + self, + *, + axis: Axis = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> bool: + nv.validate_logical_func((), kwargs, fname="any") + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + return self._reduce( + nanops.nanany, + name="any", + axis=axis, + numeric_only=bool_only, + skipna=skipna, + filter_type="bool", + ) + + @Appender(make_doc("all", ndim=1)) + def all( + self, + axis: Axis = 0, + bool_only: bool = False, + skipna: bool = True, + **kwargs, + ) -> bool: + nv.validate_logical_func((), kwargs, fname="all") + validate_bool_kwarg(skipna, "skipna", none_allowed=False) + return self._reduce( + nanops.nanall, + name="all", + axis=axis, + numeric_only=bool_only, + skipna=skipna, + filter_type="bool", + ) + + @doc(make_doc("min", ndim=1)) + def min( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("max", ndim=1)) + def max( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("sum", ndim=1)) + def sum( + self, + axis: Axis | None = None, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) + + @doc(make_doc("prod", ndim=1)) + def prod( + self, + axis: Axis | None = None, + skipna: bool = True, + numeric_only: bool = False, + min_count: int = 0, + **kwargs, + ): + return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) + + @doc(make_doc("mean", ndim=1)) + def mean( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("median", ndim=1)) + def median( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("sem", ndim=1)) + def sem( + self, + axis: Axis | None = None, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) + + @doc(make_doc("var", ndim=1)) + def var( + self, + axis: Axis | None = None, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) + + @doc(make_doc("std", ndim=1)) + def std( + self, + axis: Axis | None = None, + skipna: bool = True, + ddof: int = 1, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) + + @doc(make_doc("skew", ndim=1)) + def skew( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) + + @doc(make_doc("kurt", ndim=1)) + def kurt( + self, + axis: Axis | None = 0, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ): + return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) + + kurtosis = kurt + product = prod + + @doc(make_doc("cummin", ndim=1)) + def cummin(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummin(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cummax", ndim=1)) + def cummax(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cummax(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumsum", ndim=1)) + def cumsum(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) + + @doc(make_doc("cumprod", 1)) + def cumprod(self, axis: Axis | None = None, skipna: bool = True, *args, **kwargs): + return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/shared_docs.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/shared_docs.py new file mode 100644 index 00000000..ba793b9c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/shared_docs.py @@ -0,0 +1,929 @@ +from __future__ import annotations + +_shared_docs: dict[str, str] = {} + +_shared_docs[ + "aggregate" +] = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list or dict + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - dict of axis labels -> functions, function names or list of such. +{axis} +*args + Positional arguments to pass to `func`. +**kwargs + Keyword arguments to pass to `func`. + +Returns +------- +scalar, Series or DataFrame + + The return can be: + + * scalar : when Series.agg is called with single function + * Series : when DataFrame.agg is called with a single function + * DataFrame : when DataFrame.agg is called with several functions + + Return scalar, Series or DataFrame. +{see_also} +Notes +----- +The aggregation operations are always performed over an axis, either the +index (default) or the column axis. This behavior is different from +`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, +`var`), where the default is to compute the aggregation of the flattened +array, e.g., ``numpy.mean(arr_2d)`` as opposed to +``numpy.mean(arr_2d, axis=0)``. + +`agg` is an alias for `aggregate`. Use the alias. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +A passed user-defined-function will be passed a Series for evaluation. +{examples}""" + +_shared_docs[ + "compare" +] = """ +Compare to another {klass} and show the differences. + +Parameters +---------- +other : {klass} + Object to compare with. + +align_axis : {{0 or 'index', 1 or 'columns'}}, default 1 + Determine which axis to align the comparison on. + + * 0, or 'index' : Resulting differences are stacked vertically + with rows drawn alternately from self and other. + * 1, or 'columns' : Resulting differences are aligned horizontally + with columns drawn alternately from self and other. + +keep_shape : bool, default False + If true, all rows and columns are kept. + Otherwise, only the ones with different values are kept. + +keep_equal : bool, default False + If true, the result keeps values that are equal. + Otherwise, equal values are shown as NaNs. + +result_names : tuple, default ('self', 'other') + Set the dataframes names in the comparison. + + .. versionadded:: 1.5.0 +""" + +_shared_docs[ + "groupby" +] = """ +Group %(klass)s using a mapper or by a Series of columns. + +A groupby operation involves some combination of splitting the +object, applying a function, and combining the results. This can be +used to group large amounts of data and compute operations on these +groups. + +Parameters +---------- +by : mapping, function, label, pd.Grouper or list of such + Used to determine the groups for the groupby. + If ``by`` is a function, it's called on each value of the object's + index. If a dict or Series is passed, the Series or dict VALUES + will be used to determine the groups (the Series' values are first + aligned; see ``.align()`` method). If a list or ndarray of length + equal to the selected axis is passed (see the `groupby user guide + `_), + the values are used as-is to determine the groups. A label or list + of labels may be passed to group by the columns in ``self``. + Notice that a tuple is interpreted as a (single) key. +axis : {0 or 'index', 1 or 'columns'}, default 0 + Split along rows (0) or columns (1). For `Series` this parameter + is unused and defaults to 0. + + .. deprecated:: 2.1.0 + + Will be removed and behave like axis=0 in a future version. + For ``axis=1``, do ``frame.T.groupby(...)`` instead. + +level : int, level name, or sequence of such, default None + If the axis is a MultiIndex (hierarchical), group by a particular + level or levels. Do not specify both ``by`` and ``level``. +as_index : bool, default True + Return object with group labels as the + index. Only relevant for DataFrame input. as_index=False is + effectively "SQL-style" grouped output. This argument has no effect + on filtrations (see the `filtrations in the user guide + `_), + such as ``head()``, ``tail()``, ``nth()`` and in transformations + (see the `transformations in the user guide + `_). +sort : bool, default True + Sort group keys. Get better performance by turning this off. + Note this does not influence the order of observations within each + group. Groupby preserves the order of rows within each group. If False, + the groups will appear in the same order as they did in the original DataFrame. + This argument has no effect on filtrations (see the `filtrations in the user guide + `_), + such as ``head()``, ``tail()``, ``nth()`` and in transformations + (see the `transformations in the user guide + `_). + + .. versionchanged:: 2.0.0 + + Specifying ``sort=False`` with an ordered categorical grouper will no + longer sort the values. + +group_keys : bool, default True + When calling apply and the ``by`` argument produces a like-indexed + (i.e. :ref:`a transform `) result, add group keys to + index to identify pieces. By default group keys are not included + when the result's index (and column) labels match the inputs, and + are included otherwise. + + .. versionchanged:: 1.5.0 + + Warns that ``group_keys`` will no longer be ignored when the + result from ``apply`` is a like-indexed Series or DataFrame. + Specify ``group_keys`` explicitly to include the group keys or + not. + + .. versionchanged:: 2.0.0 + + ``group_keys`` now defaults to ``True``. + +observed : bool, default False + This only applies if any of the groupers are Categoricals. + If True: only show observed values for categorical groupers. + If False: show all values for categorical groupers. + + .. deprecated:: 2.1.0 + + The default value will change to True in a future version of pandas. + +dropna : bool, default True + If True, and if group keys contain NA values, NA values together + with row/column will be dropped. + If False, NA values will also be treated as the key in groups. + +Returns +------- +pandas.api.typing.%(klass)sGroupBy + Returns a groupby object that contains information about the groups. + +See Also +-------- +resample : Convenience method for frequency conversion and resampling + of time series. + +Notes +----- +See the `user guide +`__ for more +detailed usage and examples, including splitting an object into groups, +iterating through groups, selecting a group, aggregation, and more. +""" + +_shared_docs[ + "melt" +] = """ +Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. + +This function is useful to massage a DataFrame into a format where one +or more columns are identifier variables (`id_vars`), while all other +columns, considered measured variables (`value_vars`), are "unpivoted" to +the row axis, leaving just two non-identifier columns, 'variable' and +'value'. + +Parameters +---------- +id_vars : tuple, list, or ndarray, optional + Column(s) to use as identifier variables. +value_vars : tuple, list, or ndarray, optional + Column(s) to unpivot. If not specified, uses all columns that + are not set as `id_vars`. +var_name : scalar + Name to use for the 'variable' column. If None it uses + ``frame.columns.name`` or 'variable'. +value_name : scalar, default 'value' + Name to use for the 'value' column. +col_level : int or str, optional + If columns are a MultiIndex then use this level to melt. +ignore_index : bool, default True + If True, original index is ignored. If False, the original index is retained. + Index labels will be repeated as necessary. + +Returns +------- +DataFrame + Unpivoted DataFrame. + +See Also +-------- +%(other)s : Identical method. +pivot_table : Create a spreadsheet-style pivot table as a DataFrame. +DataFrame.pivot : Return reshaped DataFrame organized + by given index / column values. +DataFrame.explode : Explode a DataFrame from list-like + columns to long format. + +Notes +----- +Reference :ref:`the user guide ` for more examples. + +Examples +-------- +>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, +... 'B': {0: 1, 1: 3, 2: 5}, +... 'C': {0: 2, 1: 4, 2: 6}}) +>>> df + A B C +0 a 1 2 +1 b 3 4 +2 c 5 6 + +>>> %(caller)sid_vars=['A'], value_vars=['B']) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 + +>>> %(caller)sid_vars=['A'], value_vars=['B', 'C']) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 +3 a C 2 +4 b C 4 +5 c C 6 + +The names of 'variable' and 'value' columns can be customized: + +>>> %(caller)sid_vars=['A'], value_vars=['B'], +... var_name='myVarname', value_name='myValname') + A myVarname myValname +0 a B 1 +1 b B 3 +2 c B 5 + +Original index values can be kept around: + +>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 +0 a C 2 +1 b C 4 +2 c C 6 + +If you have multi-index columns: + +>>> df.columns = [list('ABC'), list('DEF')] +>>> df + A B C + D E F +0 a 1 2 +1 b 3 4 +2 c 5 6 + +>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B']) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 + +>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')]) + (A, D) variable_0 variable_1 value +0 a B E 1 +1 b B E 3 +2 c B E 5 +""" + +_shared_docs[ + "transform" +] = """ +Call ``func`` on self producing a {klass} with the same axis shape as self. + +Parameters +---------- +func : function, str, list-like or dict-like + Function to use for transforming the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. If func + is both list-like and dict-like, dict-like behavior takes precedence. + + Accepted combinations are: + + - function + - string function name + - list-like of functions and/or function names, e.g. ``[np.exp, 'sqrt']`` + - dict-like of axis labels -> functions, function names or list-like of such. +{axis} +*args + Positional arguments to pass to `func`. +**kwargs + Keyword arguments to pass to `func`. + +Returns +------- +{klass} + A {klass} that must have the same length as self. + +Raises +------ +ValueError : If the returned {klass} has a different length than self. + +See Also +-------- +{klass}.agg : Only perform aggregating type operations. +{klass}.apply : Invoke function on a {klass}. + +Notes +----- +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +Examples +-------- +>>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}}) +>>> df + A B +0 0 1 +1 1 2 +2 2 3 +>>> df.transform(lambda x: x + 1) + A B +0 1 2 +1 2 3 +2 3 4 + +Even though the resulting {klass} must have the same length as the +input {klass}, it is possible to provide several input functions: + +>>> s = pd.Series(range(3)) +>>> s +0 0 +1 1 +2 2 +dtype: int64 +>>> s.transform([np.sqrt, np.exp]) + sqrt exp +0 0.000000 1.000000 +1 1.000000 2.718282 +2 1.414214 7.389056 + +You can call transform on a GroupBy object: + +>>> df = pd.DataFrame({{ +... "Date": [ +... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05", +... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05"], +... "Data": [5, 8, 6, 1, 50, 100, 60, 120], +... }}) +>>> df + Date Data +0 2015-05-08 5 +1 2015-05-07 8 +2 2015-05-06 6 +3 2015-05-05 1 +4 2015-05-08 50 +5 2015-05-07 100 +6 2015-05-06 60 +7 2015-05-05 120 +>>> df.groupby('Date')['Data'].transform('sum') +0 55 +1 108 +2 66 +3 121 +4 55 +5 108 +6 66 +7 121 +Name: Data, dtype: int64 + +>>> df = pd.DataFrame({{ +... "c": [1, 1, 1, 2, 2, 2, 2], +... "type": ["m", "n", "o", "m", "m", "n", "n"] +... }}) +>>> df + c type +0 1 m +1 1 n +2 1 o +3 2 m +4 2 m +5 2 n +6 2 n +>>> df['size'] = df.groupby('c')['type'].transform(len) +>>> df + c type size +0 1 m 3 +1 1 n 3 +2 1 o 3 +3 2 m 4 +4 2 m 4 +5 2 n 4 +6 2 n 4 +""" + +_shared_docs[ + "storage_options" +] = """storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc. For HTTP(S) URLs the key-value pairs + are forwarded to ``urllib.request.Request`` as header options. For other + URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are + forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more + details, and for more examples on storage options refer `here + `_.""" + +_shared_docs[ + "compression_options" +] = """compression : str or dict, default 'infer' + For on-the-fly compression of the output data. If 'infer' and '%s' is + path-like, then detect compression from the following extensions: '.gz', + '.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2' + (otherwise no compression). + Set to ``None`` for no compression. + Can also be a dict with key ``'method'`` set + to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and + other key-value pairs are forwarded to + ``zipfile.ZipFile``, ``gzip.GzipFile``, + ``bz2.BZ2File``, ``zstandard.ZstdCompressor``, ``lzma.LZMAFile`` or + ``tarfile.TarFile``, respectively. + As an example, the following could be passed for faster compression and to create + a reproducible gzip archive: + ``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``. + + .. versionadded:: 1.5.0 + Added support for `.tar` files.""" + +_shared_docs[ + "decompression_options" +] = """compression : str or dict, default 'infer' + For on-the-fly decompression of on-disk data. If 'infer' and '%s' is + path-like, then detect compression from the following extensions: '.gz', + '.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2' + (otherwise no compression). + If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in. + Set to ``None`` for no decompression. + Can also be a dict with key ``'method'`` set + to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and + other key-value pairs are forwarded to + ``zipfile.ZipFile``, ``gzip.GzipFile``, + ``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or + ``tarfile.TarFile``, respectively. + As an example, the following could be passed for Zstandard decompression using a + custom compression dictionary: + ``compression={'method': 'zstd', 'dict_data': my_compression_dict}``. + + .. versionadded:: 1.5.0 + Added support for `.tar` files.""" + +_shared_docs[ + "replace" +] = """ + Replace values given in `to_replace` with `value`. + + Values of the {klass} are replaced with other values dynamically. + This differs from updating with ``.loc`` or ``.iloc``, which require + you to specify a location to update with some value. + + Parameters + ---------- + to_replace : str, regex, list, dict, Series, int, float, or None + How to find the values that will be replaced. + + * numeric, str or regex: + + - numeric: numeric values equal to `to_replace` will be + replaced with `value` + - str: string exactly matching `to_replace` will be replaced + with `value` + - regex: regexs matching `to_replace` will be replaced with + `value` + + * list of str, regex, or numeric: + + - First, if `to_replace` and `value` are both lists, they + **must** be the same length. + - Second, if ``regex=True`` then all of the strings in **both** + lists will be interpreted as regexs otherwise they will match + directly. This doesn't matter much for `value` since there + are only a few possible substitution regexes you can use. + - str, regex and numeric rules apply as above. + + * dict: + + - Dicts can be used to specify different replacement values + for different existing values. For example, + ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and + 'y' with 'z'. To use a dict in this way, the optional `value` + parameter should not be given. + - For a DataFrame a dict can specify that different values + should be replaced in different columns. For example, + ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a' + and the value 'z' in column 'b' and replaces these values + with whatever is specified in `value`. The `value` parameter + should not be ``None`` in this case. You can treat this as a + special case of passing two lists except that you are + specifying the column to search in. + - For a DataFrame nested dictionaries, e.g., + ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column + 'a' for the value 'b' and replace it with NaN. The optional `value` + parameter should not be specified to use a nested dict in this + way. You can nest regular expressions as well. Note that + column names (the top-level dictionary keys in a nested + dictionary) **cannot** be regular expressions. + + * None: + + - This means that the `regex` argument must be a string, + compiled regular expression, or list, dict, ndarray or + Series of such elements. If `value` is also ``None`` then + this **must** be a nested dictionary or Series. + + See the examples section for examples of each of these. + value : scalar, dict, list, str, regex, default None + Value to replace any values matching `to_replace` with. + For a DataFrame a dict of values can be used to specify which + value to use for each column (columns not in the dict will not be + filled). Regular expressions, strings and lists or dicts of such + objects are also allowed. + {inplace} + limit : int, default None + Maximum size gap to forward or backward fill. + + .. deprecated:: 2.1.0 + regex : bool or same types as `to_replace`, default False + Whether to interpret `to_replace` and/or `value` as regular + expressions. If this is ``True`` then `to_replace` *must* be a + string. Alternatively, this could be a regular expression or a + list, dict, or array of regular expressions in which case + `to_replace` must be ``None``. + method : {{'pad', 'ffill', 'bfill'}} + The method to use when for replacement, when `to_replace` is a + scalar, list or tuple and `value` is ``None``. + + .. deprecated:: 2.1.0 + + Returns + ------- + {klass} + Object after replacement. + + Raises + ------ + AssertionError + * If `regex` is not a ``bool`` and `to_replace` is not + ``None``. + + TypeError + * If `to_replace` is not a scalar, array-like, ``dict``, or ``None`` + * If `to_replace` is a ``dict`` and `value` is not a ``list``, + ``dict``, ``ndarray``, or ``Series`` + * If `to_replace` is ``None`` and `regex` is not compilable + into a regular expression or is a list, dict, ndarray, or + Series. + * When replacing multiple ``bool`` or ``datetime64`` objects and + the arguments to `to_replace` does not match the type of the + value being replaced + + ValueError + * If a ``list`` or an ``ndarray`` is passed to `to_replace` and + `value` but they are not the same length. + + See Also + -------- + Series.fillna : Fill NA values. + DataFrame.fillna : Fill NA values. + Series.where : Replace values based on boolean condition. + DataFrame.where : Replace values based on boolean condition. + DataFrame.map: Apply a function to a Dataframe elementwise. + Series.map: Map values of Series according to an input mapping or function. + Series.str.replace : Simple string replacement. + + Notes + ----- + * Regex substitution is performed under the hood with ``re.sub``. The + rules for substitution for ``re.sub`` are the same. + * Regular expressions will only substitute on strings, meaning you + cannot provide, for example, a regular expression matching floating + point numbers and expect the columns in your frame that have a + numeric dtype to be matched. However, if those floating point + numbers *are* strings, then you can do this. + * This method has *a lot* of options. You are encouraged to experiment + and play with this method to gain intuition about how it works. + * When dict is used as the `to_replace` value, it is like + key(s) in the dict are the to_replace part and + value(s) in the dict are the value parameter. + + Examples + -------- + + **Scalar `to_replace` and `value`** + + >>> s = pd.Series([1, 2, 3, 4, 5]) + >>> s.replace(1, 5) + 0 5 + 1 2 + 2 3 + 3 4 + 4 5 + dtype: int64 + + >>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4], + ... 'B': [5, 6, 7, 8, 9], + ... 'C': ['a', 'b', 'c', 'd', 'e']}}) + >>> df.replace(0, 5) + A B C + 0 5 5 a + 1 1 6 b + 2 2 7 c + 3 3 8 d + 4 4 9 e + + **List-like `to_replace`** + + >>> df.replace([0, 1, 2, 3], 4) + A B C + 0 4 5 a + 1 4 6 b + 2 4 7 c + 3 4 8 d + 4 4 9 e + + >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1]) + A B C + 0 4 5 a + 1 3 6 b + 2 2 7 c + 3 1 8 d + 4 4 9 e + + >>> s.replace([1, 2], method='bfill') + 0 3 + 1 3 + 2 3 + 3 4 + 4 5 + dtype: int64 + + **dict-like `to_replace`** + + >>> df.replace({{0: 10, 1: 100}}) + A B C + 0 10 5 a + 1 100 6 b + 2 2 7 c + 3 3 8 d + 4 4 9 e + + >>> df.replace({{'A': 0, 'B': 5}}, 100) + A B C + 0 100 100 a + 1 1 6 b + 2 2 7 c + 3 3 8 d + 4 4 9 e + + >>> df.replace({{'A': {{0: 100, 4: 400}}}}) + A B C + 0 100 5 a + 1 1 6 b + 2 2 7 c + 3 3 8 d + 4 400 9 e + + **Regular expression `to_replace`** + + >>> df = pd.DataFrame({{'A': ['bat', 'foo', 'bait'], + ... 'B': ['abc', 'bar', 'xyz']}}) + >>> df.replace(to_replace=r'^ba.$', value='new', regex=True) + A B + 0 new abc + 1 foo new + 2 bait xyz + + >>> df.replace({{'A': r'^ba.$'}}, {{'A': 'new'}}, regex=True) + A B + 0 new abc + 1 foo bar + 2 bait xyz + + >>> df.replace(regex=r'^ba.$', value='new') + A B + 0 new abc + 1 foo new + 2 bait xyz + + >>> df.replace(regex={{r'^ba.$': 'new', 'foo': 'xyz'}}) + A B + 0 new abc + 1 xyz new + 2 bait xyz + + >>> df.replace(regex=[r'^ba.$', 'foo'], value='new') + A B + 0 new abc + 1 new new + 2 bait xyz + + Compare the behavior of ``s.replace({{'a': None}})`` and + ``s.replace('a', None)`` to understand the peculiarities + of the `to_replace` parameter: + + >>> s = pd.Series([10, 'a', 'a', 'b', 'a']) + + When one uses a dict as the `to_replace` value, it is like the + value(s) in the dict are equal to the `value` parameter. + ``s.replace({{'a': None}})`` is equivalent to + ``s.replace(to_replace={{'a': None}}, value=None, method=None)``: + + >>> s.replace({{'a': None}}) + 0 10 + 1 None + 2 None + 3 b + 4 None + dtype: object + + When ``value`` is not explicitly passed and `to_replace` is a scalar, list + or tuple, `replace` uses the method parameter (default 'pad') to do the + replacement. So this is why the 'a' values are being replaced by 10 + in rows 1 and 2 and 'b' in row 4 in this case. + + >>> s.replace('a') + 0 10 + 1 10 + 2 10 + 3 b + 4 b + dtype: object + + .. deprecated:: 2.1.0 + The 'method' parameter and padding behavior are deprecated. + + On the other hand, if ``None`` is explicitly passed for ``value``, it will + be respected: + + >>> s.replace('a', None) + 0 10 + 1 None + 2 None + 3 b + 4 None + dtype: object + + .. versionchanged:: 1.4.0 + Previously the explicit ``None`` was silently ignored. +""" + +_shared_docs[ + "idxmin" +] = """ + Return index of first occurrence of minimum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default 0 + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default {numeric_only_default} + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of minima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmin : Return index of the minimum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmin``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({{'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the minimum value in each column. + + >>> df.idxmin() + consumption Pork + co2_emissions Wheat Products + dtype: object + + To return the index for the minimum value in each row, use ``axis="columns"``. + + >>> df.idxmin(axis="columns") + Pork consumption + Wheat Products co2_emissions + Beef consumption + dtype: object +""" + +_shared_docs[ + "idxmax" +] = """ + Return index of first occurrence of maximum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default 0 + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default {numeric_only_default} + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of maxima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmax : Return index of the maximum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmax``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({{'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the maximum value in each column. + + >>> df.idxmax() + consumption Wheat Products + co2_emissions Beef + dtype: object + + To return the index for the maximum value in each row, use ``axis="columns"``. + + >>> df.idxmax(axis="columns") + Pork co2_emissions + Wheat Products consumption + Beef co2_emissions + dtype: object +""" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/sorting.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/sorting.py new file mode 100644 index 00000000..e6b54de9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/sorting.py @@ -0,0 +1,792 @@ +""" miscellaneous sorting / groupby utilities """ +from __future__ import annotations + +from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Callable, + DefaultDict, + cast, +) + +import numpy as np + +from pandas._libs import ( + algos, + hashtable, + lib, +) +from pandas._libs.hashtable import unique_label_indices + +from pandas.core.dtypes.common import ( + ensure_int64, + ensure_platform_int, +) +from pandas.core.dtypes.generic import ( + ABCMultiIndex, + ABCRangeIndex, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.construction import extract_array + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + AxisInt, + IndexKeyFunc, + Level, + NaPosition, + Shape, + SortKind, + npt, + ) + + from pandas import ( + MultiIndex, + Series, + ) + from pandas.core.arrays import ExtensionArray + from pandas.core.indexes.base import Index + + +def get_indexer_indexer( + target: Index, + level: Level | list[Level] | None, + ascending: list[bool] | bool, + kind: SortKind, + na_position: NaPosition, + sort_remaining: bool, + key: IndexKeyFunc, +) -> npt.NDArray[np.intp] | None: + """ + Helper method that return the indexer according to input parameters for + the sort_index method of DataFrame and Series. + + Parameters + ---------- + target : Index + level : int or level name or list of ints or list of level names + ascending : bool or list of bools, default True + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'} + na_position : {'first', 'last'} + sort_remaining : bool + key : callable, optional + + Returns + ------- + Optional[ndarray[intp]] + The indexer for the new index. + """ + + # error: Incompatible types in assignment (expression has type + # "Union[ExtensionArray, ndarray[Any, Any], Index, Series]", variable has + # type "Index") + target = ensure_key_mapped(target, key, levels=level) # type:ignore[assignment] + target = target._sort_levels_monotonic() + + if level is not None: + _, indexer = target.sortlevel( + level, + ascending=ascending, + sort_remaining=sort_remaining, + na_position=na_position, + ) + elif isinstance(target, ABCMultiIndex): + indexer = lexsort_indexer( + target.codes, orders=ascending, na_position=na_position, codes_given=True + ) + else: + # Check monotonic-ness before sort an index (GH 11080) + if (ascending and target.is_monotonic_increasing) or ( + not ascending and target.is_monotonic_decreasing + ): + return None + + # ascending can only be a Sequence for MultiIndex + indexer = nargsort( + target, + kind=kind, + ascending=cast(bool, ascending), + na_position=na_position, + ) + return indexer + + +def get_group_index( + labels, shape: Shape, sort: bool, xnull: bool +) -> npt.NDArray[np.int64]: + """ + For the particular label_list, gets the offsets into the hypothetical list + representing the totally ordered cartesian product of all possible label + combinations, *as long as* this space fits within int64 bounds; + otherwise, though group indices identify unique combinations of + labels, they cannot be deconstructed. + - If `sort`, rank of returned ids preserve lexical ranks of labels. + i.e. returned id's can be used to do lexical sort on labels; + - If `xnull` nulls (-1 labels) are passed through. + + Parameters + ---------- + labels : sequence of arrays + Integers identifying levels at each location + shape : tuple[int, ...] + Number of unique levels at each location + sort : bool + If the ranks of returned ids should match lexical ranks of labels + xnull : bool + If true nulls are excluded. i.e. -1 values in the labels are + passed through. + + Returns + ------- + An array of type int64 where two elements are equal if their corresponding + labels are equal at all location. + + Notes + ----- + The length of `labels` and `shape` must be identical. + """ + + def _int64_cut_off(shape) -> int: + acc = 1 + for i, mul in enumerate(shape): + acc *= int(mul) + if not acc < lib.i8max: + return i + return len(shape) + + def maybe_lift(lab, size: int) -> tuple[np.ndarray, int]: + # promote nan values (assigned -1 label in lab array) + # so that all output values are non-negative + return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) + + labels = [ensure_int64(x) for x in labels] + lshape = list(shape) + if not xnull: + for i, (lab, size) in enumerate(zip(labels, shape)): + labels[i], lshape[i] = maybe_lift(lab, size) + + labels = list(labels) + + # Iteratively process all the labels in chunks sized so less + # than lib.i8max unique int ids will be required for each chunk + while True: + # how many levels can be done without overflow: + nlev = _int64_cut_off(lshape) + + # compute flat ids for the first `nlev` levels + stride = np.prod(lshape[1:nlev], dtype="i8") + out = stride * labels[0].astype("i8", subok=False, copy=False) + + for i in range(1, nlev): + if lshape[i] == 0: + stride = np.int64(0) + else: + stride //= lshape[i] + out += labels[i] * stride + + if xnull: # exclude nulls + mask = labels[0] == -1 + for lab in labels[1:nlev]: + mask |= lab == -1 + out[mask] = -1 + + if nlev == len(lshape): # all levels done! + break + + # compress what has been done so far in order to avoid overflow + # to retain lexical ranks, obs_ids should be sorted + comp_ids, obs_ids = compress_group_index(out, sort=sort) + + labels = [comp_ids] + labels[nlev:] + lshape = [len(obs_ids)] + lshape[nlev:] + + return out + + +def get_compressed_ids( + labels, sizes: Shape +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: + """ + Group_index is offsets into cartesian product of all possible labels. This + space can be huge, so this function compresses it, by computing offsets + (comp_ids) into the list of unique labels (obs_group_ids). + + Parameters + ---------- + labels : list of label arrays + sizes : tuple[int] of size of the levels + + Returns + ------- + np.ndarray[np.intp] + comp_ids + np.ndarray[np.int64] + obs_group_ids + """ + ids = get_group_index(labels, sizes, sort=True, xnull=False) + return compress_group_index(ids, sort=True) + + +def is_int64_overflow_possible(shape: Shape) -> bool: + the_prod = 1 + for x in shape: + the_prod *= int(x) + + return the_prod >= lib.i8max + + +def _decons_group_index( + comp_labels: npt.NDArray[np.intp], shape: Shape +) -> list[npt.NDArray[np.intp]]: + # reconstruct labels + if is_int64_overflow_possible(shape): + # at some point group indices are factorized, + # and may not be deconstructed here! wrong path! + raise ValueError("cannot deconstruct factorized group indices!") + + label_list = [] + factor = 1 + y = np.array(0) + x = comp_labels + for i in reversed(range(len(shape))): + labels = (x - y) % (factor * shape[i]) // factor + np.putmask(labels, comp_labels < 0, -1) + label_list.append(labels) + y = labels * factor + factor *= shape[i] + return label_list[::-1] + + +def decons_obs_group_ids( + comp_ids: npt.NDArray[np.intp], + obs_ids: npt.NDArray[np.intp], + shape: Shape, + labels: Sequence[npt.NDArray[np.signedinteger]], + xnull: bool, +) -> list[npt.NDArray[np.intp]]: + """ + Reconstruct labels from observed group ids. + + Parameters + ---------- + comp_ids : np.ndarray[np.intp] + obs_ids: np.ndarray[np.intp] + shape : tuple[int] + labels : Sequence[np.ndarray[np.signedinteger]] + xnull : bool + If nulls are excluded; i.e. -1 labels are passed through. + """ + if not xnull: + lift = np.fromiter(((a == -1).any() for a in labels), dtype=np.intp) + arr_shape = np.asarray(shape, dtype=np.intp) + lift + shape = tuple(arr_shape) + + if not is_int64_overflow_possible(shape): + # obs ids are deconstructable! take the fast route! + out = _decons_group_index(obs_ids, shape) + return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)] + + indexer = unique_label_indices(comp_ids) + return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels] + + +def indexer_from_factorized( + labels, shape: Shape, compress: bool = True +) -> npt.NDArray[np.intp]: + ids = get_group_index(labels, shape, sort=True, xnull=False) + + if not compress: + ngroups = (ids.size and ids.max()) + 1 + else: + ids, obs = compress_group_index(ids, sort=True) + ngroups = len(obs) + + return get_group_index_sorter(ids, ngroups) + + +def lexsort_indexer( + keys: list[ArrayLike] | list[Series], + orders=None, + na_position: str = "last", + key: Callable | None = None, + codes_given: bool = False, +) -> npt.NDArray[np.intp]: + """ + Performs lexical sorting on a set of keys + + Parameters + ---------- + keys : list[ArrayLike] | list[Series] + Sequence of ndarrays to be sorted by the indexer + list[Series] is only if key is not None. + orders : bool or list of booleans, optional + Determines the sorting order for each element in keys. If a list, + it must be the same length as keys. This determines whether the + corresponding element in keys should be sorted in ascending + (True) or descending (False) order. if bool, applied to all + elements as above. if None, defaults to True. + na_position : {'first', 'last'}, default 'last' + Determines placement of NA elements in the sorted list ("last" or "first") + key : Callable, optional + Callable key function applied to every element in keys before sorting + codes_given: bool, False + Avoid categorical materialization if codes are already provided. + + Returns + ------- + np.ndarray[np.intp] + """ + from pandas.core.arrays import Categorical + + labels = [] + shape = [] + if isinstance(orders, bool): + orders = [orders] * len(keys) + elif orders is None: + orders = [True] * len(keys) + + # error: Incompatible types in assignment (expression has type + # "List[Union[ExtensionArray, ndarray[Any, Any], Index, Series]]", variable + # has type "Union[List[Union[ExtensionArray, ndarray[Any, Any]]], List[Series]]") + keys = [ensure_key_mapped(k, key) for k in keys] # type: ignore[assignment] + + for k, order in zip(keys, orders): + if na_position not in ["last", "first"]: + raise ValueError(f"invalid na_position: {na_position}") + + if codes_given: + mask = k == -1 + codes = k.copy() + # error: Item "ExtensionArray" of "Series | ExtensionArray | + # ndarray[Any, Any]" has no attribute "max" + n = codes.max() + 1 if len(codes) else 0 # type: ignore[union-attr] + + else: + cat = Categorical(k, ordered=True) + n = len(cat.categories) + codes = cat.codes.copy() + mask = cat.codes == -1 + + if order: # ascending + if na_position == "last": + # error: Argument 1 to "where" has incompatible type "Union[Any, + # ExtensionArray, ndarray[Any, Any]]"; expected + # "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], bool, int, float, + # complex, str, bytes, _NestedSequence[Union[bool, int, float, + # complex, str, bytes]]]" + codes = np.where(mask, n, codes) # type: ignore[arg-type] + else: # not order means descending + if na_position == "last": + # error: Unsupported operand types for - ("int" and "ExtensionArray") + # error: Argument 1 to "where" has incompatible type "Union[Any, + # ExtensionArray, ndarray[Any, Any]]"; expected + # "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], bool, int, float, + # complex, str, bytes, _NestedSequence[Union[bool, int, float, + # complex, str, bytes]]]" + codes = np.where(mask, n, n - codes - 1) # type: ignore[arg-type] + elif na_position == "first": + # error: Unsupported operand types for - ("int" and "ExtensionArray") + # error: Argument 1 to "where" has incompatible type "Union[Any, + # ExtensionArray, ndarray[Any, Any]]"; expected + # "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]], bool, int, float, + # complex, str, bytes, _NestedSequence[Union[bool, int, float, + # complex, str, bytes]]]" + codes = np.where(mask, -1, n - codes) # type: ignore[arg-type] + + shape.append(n + 1) + labels.append(codes) + + return indexer_from_factorized(labels, tuple(shape)) + + +def nargsort( + items: ArrayLike | Index | Series, + kind: SortKind = "quicksort", + ascending: bool = True, + na_position: str = "last", + key: Callable | None = None, + mask: npt.NDArray[np.bool_] | None = None, +) -> npt.NDArray[np.intp]: + """ + Intended to be a drop-in replacement for np.argsort which handles NaNs. + + Adds ascending, na_position, and key parameters. + + (GH #6399, #5231, #27237) + + Parameters + ---------- + items : np.ndarray, ExtensionArray, Index, or Series + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' + ascending : bool, default True + na_position : {'first', 'last'}, default 'last' + key : Optional[Callable], default None + mask : Optional[np.ndarray[bool]], default None + Passed when called by ExtensionArray.argsort. + + Returns + ------- + np.ndarray[np.intp] + """ + + if key is not None: + # see TestDataFrameSortKey, TestRangeIndex::test_sort_values_key + items = ensure_key_mapped(items, key) + return nargsort( + items, + kind=kind, + ascending=ascending, + na_position=na_position, + key=None, + mask=mask, + ) + + if isinstance(items, ABCRangeIndex): + return items.argsort(ascending=ascending) + elif not isinstance(items, ABCMultiIndex): + items = extract_array(items) + else: + raise TypeError( + "nargsort does not support MultiIndex. Use index.sort_values instead." + ) + + if mask is None: + mask = np.asarray(isna(items)) + + if not isinstance(items, np.ndarray): + # i.e. ExtensionArray + return items.argsort( + ascending=ascending, + kind=kind, + na_position=na_position, + ) + + idx = np.arange(len(items)) + non_nans = items[~mask] + non_nan_idx = idx[~mask] + + nan_idx = np.nonzero(mask)[0] + if not ascending: + non_nans = non_nans[::-1] + non_nan_idx = non_nan_idx[::-1] + indexer = non_nan_idx[non_nans.argsort(kind=kind)] + if not ascending: + indexer = indexer[::-1] + # Finally, place the NaNs at the end or the beginning according to + # na_position + if na_position == "last": + indexer = np.concatenate([indexer, nan_idx]) + elif na_position == "first": + indexer = np.concatenate([nan_idx, indexer]) + else: + raise ValueError(f"invalid na_position: {na_position}") + return ensure_platform_int(indexer) + + +def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0): + """ + Implementation of np.argmin/argmax but for ExtensionArray and which + handles missing values. + + Parameters + ---------- + values : ExtensionArray + method : {"argmax", "argmin"} + axis : int, default 0 + + Returns + ------- + int + """ + assert method in {"argmax", "argmin"} + func = np.argmax if method == "argmax" else np.argmin + + mask = np.asarray(isna(values)) + arr_values = values._values_for_argsort() + + if arr_values.ndim > 1: + if mask.any(): + if axis == 1: + zipped = zip(arr_values, mask) + else: + zipped = zip(arr_values.T, mask.T) + return np.array([_nanargminmax(v, m, func) for v, m in zipped]) + return func(arr_values, axis=axis) + + return _nanargminmax(arr_values, mask, func) + + +def _nanargminmax(values: np.ndarray, mask: npt.NDArray[np.bool_], func) -> int: + """ + See nanargminmax.__doc__. + """ + idx = np.arange(values.shape[0]) + non_nans = values[~mask] + non_nan_idx = idx[~mask] + + return non_nan_idx[func(non_nans)] + + +def _ensure_key_mapped_multiindex( + index: MultiIndex, key: Callable, level=None +) -> MultiIndex: + """ + Returns a new MultiIndex in which key has been applied + to all levels specified in level (or all levels if level + is None). Used for key sorting for MultiIndex. + + Parameters + ---------- + index : MultiIndex + Index to which to apply the key function on the + specified levels. + key : Callable + Function that takes an Index and returns an Index of + the same shape. This key is applied to each level + separately. The name of the level can be used to + distinguish different levels for application. + level : list-like, int or str, default None + Level or list of levels to apply the key function to. + If None, key function is applied to all levels. Other + levels are left unchanged. + + Returns + ------- + labels : MultiIndex + Resulting MultiIndex with modified levels. + """ + + if level is not None: + if isinstance(level, (str, int)): + sort_levels = [level] + else: + sort_levels = level + + sort_levels = [index._get_level_number(lev) for lev in sort_levels] + else: + sort_levels = list(range(index.nlevels)) # satisfies mypy + + mapped = [ + ensure_key_mapped(index._get_level_values(level), key) + if level in sort_levels + else index._get_level_values(level) + for level in range(index.nlevels) + ] + + return type(index).from_arrays(mapped) + + +def ensure_key_mapped( + values: ArrayLike | Index | Series, key: Callable | None, levels=None +) -> ArrayLike | Index | Series: + """ + Applies a callable key function to the values function and checks + that the resulting value has the same shape. Can be called on Index + subclasses, Series, DataFrames, or ndarrays. + + Parameters + ---------- + values : Series, DataFrame, Index subclass, or ndarray + key : Optional[Callable], key to be called on the values array + levels : Optional[List], if values is a MultiIndex, list of levels to + apply the key to. + """ + from pandas.core.indexes.api import Index + + if not key: + return values + + if isinstance(values, ABCMultiIndex): + return _ensure_key_mapped_multiindex(values, key, level=levels) + + result = key(values.copy()) + if len(result) != len(values): + raise ValueError( + "User-provided `key` function must not change the shape of the array." + ) + + try: + if isinstance( + values, Index + ): # convert to a new Index subclass, not necessarily the same + result = Index(result) + else: + # try to revert to original type otherwise + type_of_values = type(values) + # error: Too many arguments for "ExtensionArray" + result = type_of_values(result) # type: ignore[call-arg] + except TypeError: + raise TypeError( + f"User-provided `key` function returned an invalid type {type(result)} \ + which could not be converted to {type(values)}." + ) + + return result + + +def get_flattened_list( + comp_ids: npt.NDArray[np.intp], + ngroups: int, + levels: Iterable[Index], + labels: Iterable[np.ndarray], +) -> list[tuple]: + """Map compressed group id -> key tuple.""" + comp_ids = comp_ids.astype(np.int64, copy=False) + arrays: DefaultDict[int, list[int]] = defaultdict(list) + for labs, level in zip(labels, levels): + table = hashtable.Int64HashTable(ngroups) + table.map_keys_to_values(comp_ids, labs.astype(np.int64, copy=False)) + for i in range(ngroups): + arrays[i].append(level[table.get_item(i)]) + return [tuple(array) for array in arrays.values()] + + +def get_indexer_dict( + label_list: list[np.ndarray], keys: list[Index] +) -> dict[Hashable, npt.NDArray[np.intp]]: + """ + Returns + ------- + dict: + Labels mapped to indexers. + """ + shape = tuple(len(x) for x in keys) + + group_index = get_group_index(label_list, shape, sort=True, xnull=True) + if np.all(group_index == -1): + # Short-circuit, lib.indices_fast will return the same + return {} + ngroups = ( + ((group_index.size and group_index.max()) + 1) + if is_int64_overflow_possible(shape) + else np.prod(shape, dtype="i8") + ) + + sorter = get_group_index_sorter(group_index, ngroups) + + sorted_labels = [lab.take(sorter) for lab in label_list] + group_index = group_index.take(sorter) + + return lib.indices_fast(sorter, group_index, keys, sorted_labels) + + +# ---------------------------------------------------------------------- +# sorting levels...cleverly? + + +def get_group_index_sorter( + group_index: npt.NDArray[np.intp], ngroups: int | None = None +) -> npt.NDArray[np.intp]: + """ + algos.groupsort_indexer implements `counting sort` and it is at least + O(ngroups), where + ngroups = prod(shape) + shape = map(len, keys) + that is, linear in the number of combinations (cartesian product) of unique + values of groupby keys. This can be huge when doing multi-key groupby. + np.argsort(kind='mergesort') is O(count x log(count)) where count is the + length of the data-frame; + Both algorithms are `stable` sort and that is necessary for correctness of + groupby operations. e.g. consider: + df.groupby(key)[col].transform('first') + + Parameters + ---------- + group_index : np.ndarray[np.intp] + signed integer dtype + ngroups : int or None, default None + + Returns + ------- + np.ndarray[np.intp] + """ + if ngroups is None: + ngroups = 1 + group_index.max() + count = len(group_index) + alpha = 0.0 # taking complexities literally; there may be + beta = 1.0 # some room for fine-tuning these parameters + do_groupsort = count > 0 and ((alpha + beta * ngroups) < (count * np.log(count))) + if do_groupsort: + sorter, _ = algos.groupsort_indexer( + ensure_platform_int(group_index), + ngroups, + ) + # sorter _should_ already be intp, but mypy is not yet able to verify + else: + sorter = group_index.argsort(kind="mergesort") + return ensure_platform_int(sorter) + + +def compress_group_index( + group_index: npt.NDArray[np.int64], sort: bool = True +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: + """ + Group_index is offsets into cartesian product of all possible labels. This + space can be huge, so this function compresses it, by computing offsets + (comp_ids) into the list of unique labels (obs_group_ids). + """ + if len(group_index) and np.all(group_index[1:] >= group_index[:-1]): + # GH 53806: fast path for sorted group_index + unique_mask = np.concatenate( + [group_index[:1] > -1, group_index[1:] != group_index[:-1]] + ) + comp_ids = unique_mask.cumsum() + comp_ids -= 1 + obs_group_ids = group_index[unique_mask] + else: + size_hint = len(group_index) + table = hashtable.Int64HashTable(size_hint) + + group_index = ensure_int64(group_index) + + # note, group labels come out ascending (ie, 1,2,3 etc) + comp_ids, obs_group_ids = table.get_labels_groupby(group_index) + + if sort and len(obs_group_ids) > 0: + obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids) + + return ensure_int64(comp_ids), ensure_int64(obs_group_ids) + + +def _reorder_by_uniques( + uniques: npt.NDArray[np.int64], labels: npt.NDArray[np.intp] +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.intp]]: + """ + Parameters + ---------- + uniques : np.ndarray[np.int64] + labels : np.ndarray[np.intp] + + Returns + ------- + np.ndarray[np.int64] + np.ndarray[np.intp] + """ + # sorter is index where elements ought to go + sorter = uniques.argsort() + + # reverse_indexer is where elements came from + reverse_indexer = np.empty(len(sorter), dtype=np.intp) + reverse_indexer.put(sorter, np.arange(len(sorter))) + + mask = labels < 0 + + # move labels to right locations (ie, unsort ascending labels) + labels = reverse_indexer.take(labels) + np.putmask(labels, mask, -1) + + # sort observed ids + uniques = uniques.take(sorter) + + return uniques, labels diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/sparse/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/sparse/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/sparse/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/sparse/api.py new file mode 100644 index 00000000..6650a5c4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/sparse/api.py @@ -0,0 +1,5 @@ +from pandas.core.dtypes.dtypes import SparseDtype + +from pandas.core.arrays.sparse import SparseArray + +__all__ = ["SparseArray", "SparseDtype"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/__init__.py new file mode 100644 index 00000000..d4ce75f7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/__init__.py @@ -0,0 +1,28 @@ +""" +Implementation of pandas.Series.str and its interface. + +* strings.accessor.StringMethods : Accessor for Series.str +* strings.base.BaseStringArrayMethods: Mixin ABC for EAs to implement str methods + +Most methods on the StringMethods accessor follow the pattern: + + 1. extract the array from the series (or index) + 2. Call that array's implementation of the string method + 3. Wrap the result (in a Series, index, or DataFrame) + +Pandas extension arrays implementing string methods should inherit from +pandas.core.strings.base.BaseStringArrayMethods. This is an ABC defining +the various string methods. To avoid namespace clashes and pollution, +these are prefixed with `_str_`. So ``Series.str.upper()`` calls +``Series.array._str_upper()``. The interface isn't currently public +to other string extension arrays. +""" +# Pandas current implementation is in ObjectStringArrayMixin. This is designed +# to work on object-dtype ndarrays. +# +# BaseStringArrayMethods +# - ObjectStringArrayMixin +# - StringArray +# - NumpyExtensionArray +# - Categorical +# - ArrowStringArray diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/accessor.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/accessor.py new file mode 100644 index 00000000..e2a3b937 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/accessor.py @@ -0,0 +1,3519 @@ +from __future__ import annotations + +import codecs +from functools import wraps +import re +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._typing import ( + AlignJoin, + DtypeObj, + F, + Scalar, + npt, +) +from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_integer, + is_list_like, + is_object_dtype, + is_re, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCMultiIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.base import NoNewAttributesMixin +from pandas.core.construction import extract_array + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + +_shared_docs: dict[str, str] = {} +_cpython_optimized_encoders = ( + "utf-8", + "utf8", + "latin-1", + "latin1", + "iso-8859-1", + "mbcs", + "ascii", +) +_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32") + + +def forbid_nonstring_types( + forbidden: list[str] | None, name: str | None = None +) -> Callable[[F], F]: + """ + Decorator to forbid specific types for a method of StringMethods. + + For calling `.str.{method}` on a Series or Index, it is necessary to first + initialize the :class:`StringMethods` object, and then call the method. + However, different methods allow different input types, and so this can not + be checked during :meth:`StringMethods.__init__`, but must be done on a + per-method basis. This decorator exists to facilitate this process, and + make it explicit which (inferred) types are disallowed by the method. + + :meth:`StringMethods.__init__` allows the *union* of types its different + methods allow (after skipping NaNs; see :meth:`StringMethods._validate`), + namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer']. + + The default string types ['string', 'empty'] are allowed for all methods. + For the additional types ['bytes', 'mixed', 'mixed-integer'], each method + then needs to forbid the types it is not intended for. + + Parameters + ---------- + forbidden : list-of-str or None + List of forbidden non-string types, may be one or more of + `['bytes', 'mixed', 'mixed-integer']`. + name : str, default None + Name of the method to use in the error message. By default, this is + None, in which case the name from the method being wrapped will be + copied. However, for working with further wrappers (like _pat_wrapper + and _noarg_wrapper), it is necessary to specify the name. + + Returns + ------- + func : wrapper + The method to which the decorator is applied, with an added check that + enforces the inferred type to not be in the list of forbidden types. + + Raises + ------ + TypeError + If the inferred type of the underlying data is in `forbidden`. + """ + # deal with None + forbidden = [] if forbidden is None else forbidden + + allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set( + forbidden + ) + + def _forbid_nonstring_types(func: F) -> F: + func_name = func.__name__ if name is None else name + + @wraps(func) + def wrapper(self, *args, **kwargs): + if self._inferred_dtype not in allowed_types: + msg = ( + f"Cannot use .str.{func_name} with values of " + f"inferred dtype '{self._inferred_dtype}'." + ) + raise TypeError(msg) + return func(self, *args, **kwargs) + + wrapper.__name__ = func_name + return cast(F, wrapper) + + return _forbid_nonstring_types + + +def _map_and_wrap(name: str | None, docstring: str | None): + @forbid_nonstring_types(["bytes"], name=name) + def wrapper(self): + result = getattr(self._data.array, f"_str_{name}")() + return self._wrap_result( + result, returns_string=name not in ("isnumeric", "isdecimal") + ) + + wrapper.__doc__ = docstring + return wrapper + + +class StringMethods(NoNewAttributesMixin): + """ + Vectorized string functions for Series and Index. + + NAs stay NA unless handled otherwise by a particular method. + Patterned after Python's string methods, with some inspiration from + R's stringr package. + + Examples + -------- + >>> s = pd.Series(["A_Str_Series"]) + >>> s + 0 A_Str_Series + dtype: object + + >>> s.str.split("_") + 0 [A, Str, Series] + dtype: object + + >>> s.str.replace("_", "") + 0 AStrSeries + dtype: object + """ + + # Note: see the docstring in pandas.core.strings.__init__ + # for an explanation of the implementation. + # TODO: Dispatch all the methods + # Currently the following are not dispatched to the array + # * cat + # * extractall + + def __init__(self, data) -> None: + from pandas.core.arrays.string_ import StringDtype + + self._inferred_dtype = self._validate(data) + self._is_categorical = isinstance(data.dtype, CategoricalDtype) + self._is_string = isinstance(data.dtype, StringDtype) + self._data = data + + self._index = self._name = None + if isinstance(data, ABCSeries): + self._index = data.index + self._name = data.name + + # ._values.categories works for both Series/Index + self._parent = data._values.categories if self._is_categorical else data + # save orig to blow up categoricals to the right type + self._orig = data + self._freeze() + + @staticmethod + def _validate(data): + """ + Auxiliary function for StringMethods, infers and checks dtype of data. + + This is a "first line of defence" at the creation of the StringMethods- + object, and just checks that the dtype is in the + *union* of the allowed types over all string methods below; this + restriction is then refined on a per-method basis using the decorator + @forbid_nonstring_types (more info in the corresponding docstring). + + This really should exclude all series/index with any non-string values, + but that isn't practical for performance reasons until we have a str + dtype (GH 9343 / 13877) + + Parameters + ---------- + data : The content of the Series + + Returns + ------- + dtype : inferred dtype of data + """ + if isinstance(data, ABCMultiIndex): + raise AttributeError( + "Can only use .str accessor with Index, not MultiIndex" + ) + + # see _libs/lib.pyx for list of inferred types + allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"] + + data = extract_array(data) + + values = getattr(data, "categories", data) # categorical / normal + + inferred_dtype = lib.infer_dtype(values, skipna=True) + + if inferred_dtype not in allowed_types: + raise AttributeError("Can only use .str accessor with string values!") + return inferred_dtype + + def __getitem__(self, key): + result = self._data.array._str_getitem(key) + return self._wrap_result(result) + + def __iter__(self) -> Iterator: + raise TypeError(f"'{type(self).__name__}' object is not iterable") + + def _wrap_result( + self, + result, + name=None, + expand: bool | None = None, + fill_value=np.nan, + returns_string: bool = True, + returns_bool: bool = False, + dtype=None, + ): + from pandas import ( + Index, + MultiIndex, + ) + + if not hasattr(result, "ndim") or not hasattr(result, "dtype"): + if isinstance(result, ABCDataFrame): + result = result.__finalize__(self._orig, name="str") + return result + assert result.ndim < 3 + + # We can be wrapping a string / object / categorical result, in which + # case we'll want to return the same dtype as the input. + # Or we can be wrapping a numeric output, in which case we don't want + # to return a StringArray. + # Ideally the array method returns the right array type. + if expand is None: + # infer from ndim if expand is not specified + expand = result.ndim != 1 + elif expand is True and not isinstance(self._orig, ABCIndex): + # required when expand=True is explicitly specified + # not needed when inferred + if isinstance(result.dtype, ArrowDtype): + import pyarrow as pa + + from pandas.compat import pa_version_under11p0 + + from pandas.core.arrays.arrow.array import ArrowExtensionArray + + value_lengths = pa.compute.list_value_length(result._pa_array) + max_len = pa.compute.max(value_lengths).as_py() + min_len = pa.compute.min(value_lengths).as_py() + if result._hasna: + # ArrowExtensionArray.fillna doesn't work for list scalars + result = ArrowExtensionArray( + result._pa_array.fill_null([None] * max_len) + ) + if min_len < max_len: + # append nulls to each scalar list element up to max_len + if not pa_version_under11p0: + result = ArrowExtensionArray( + pa.compute.list_slice( + result._pa_array, + start=0, + stop=max_len, + return_fixed_size_list=True, + ) + ) + else: + all_null = np.full(max_len, fill_value=None, dtype=object) + values = result.to_numpy() + new_values = [] + for row in values: + if len(row) < max_len: + nulls = all_null[: max_len - len(row)] + row = np.append(row, nulls) + new_values.append(row) + pa_type = result._pa_array.type + result = ArrowExtensionArray(pa.array(new_values, type=pa_type)) + if name is not None: + labels = name + else: + labels = range(max_len) + result = ( + pa.compute.list_flatten(result._pa_array) + .to_numpy() + .reshape(len(result), max_len) + ) + result = { + label: ArrowExtensionArray(pa.array(res)) + for label, res in zip(labels, result.T) + } + elif is_object_dtype(result): + + def cons_row(x): + if is_list_like(x): + return x + else: + return [x] + + result = [cons_row(x) for x in result] + if result and not self._is_string: + # propagate nan values to match longest sequence (GH 18450) + max_len = max(len(x) for x in result) + result = [ + x * max_len if len(x) == 0 or x[0] is np.nan else x + for x in result + ] + + if not isinstance(expand, bool): + raise ValueError("expand must be True or False") + + if expand is False: + # if expand is False, result should have the same name + # as the original otherwise specified + if name is None: + name = getattr(result, "name", None) + if name is None: + # do not use logical or, _orig may be a DataFrame + # which has "name" column + name = self._orig.name + + # Wait until we are sure result is a Series or Index before + # checking attributes (GH 12180) + if isinstance(self._orig, ABCIndex): + # if result is a boolean np.array, return the np.array + # instead of wrapping it into a boolean Index (GH 8875) + if is_bool_dtype(result): + return result + + if expand: + result = list(result) + out = MultiIndex.from_tuples(result, names=name) + if out.nlevels == 1: + # We had all tuples of length-one, which are + # better represented as a regular Index. + out = out.get_level_values(0) + return out + else: + return Index(result, name=name, dtype=dtype) + else: + index = self._orig.index + # This is a mess. + _dtype: DtypeObj | str | None = dtype + vdtype = getattr(result, "dtype", None) + if self._is_string: + if is_bool_dtype(vdtype): + _dtype = result.dtype + elif returns_string: + _dtype = self._orig.dtype + else: + _dtype = vdtype + elif vdtype is not None: + _dtype = vdtype + + if expand: + cons = self._orig._constructor_expanddim + result = cons(result, columns=name, index=index, dtype=_dtype) + else: + # Must be a Series + cons = self._orig._constructor + result = cons(result, name=name, index=index, dtype=_dtype) + result = result.__finalize__(self._orig, method="str") + if name is not None and result.ndim == 1: + # __finalize__ might copy over the original name, but we may + # want the new name (e.g. str.extract). + result.name = name + return result + + def _get_series_list(self, others): + """ + Auxiliary function for :meth:`str.cat`. Turn potentially mixed input + into a list of Series (elements without an index must match the length + of the calling Series/Index). + + Parameters + ---------- + others : Series, DataFrame, np.ndarray, list-like or list-like of + Objects that are either Series, Index or np.ndarray (1-dim). + + Returns + ------- + list of Series + Others transformed into list of Series. + """ + from pandas import ( + DataFrame, + Series, + ) + + # self._orig is either Series or Index + idx = self._orig if isinstance(self._orig, ABCIndex) else self._orig.index + + # Generally speaking, all objects without an index inherit the index + # `idx` of the calling Series/Index - i.e. must have matching length. + # Objects with an index (i.e. Series/Index/DataFrame) keep their own. + if isinstance(others, ABCSeries): + return [others] + elif isinstance(others, ABCIndex): + return [Series(others, index=idx, dtype=others.dtype)] + elif isinstance(others, ABCDataFrame): + return [others[x] for x in others] + elif isinstance(others, np.ndarray) and others.ndim == 2: + others = DataFrame(others, index=idx) + return [others[x] for x in others] + elif is_list_like(others, allow_sets=False): + try: + others = list(others) # ensure iterators do not get read twice etc + except TypeError: + # e.g. ser.str, raise below + pass + else: + # in case of list-like `others`, all elements must be + # either Series/Index/np.ndarray (1-dim)... + if all( + isinstance(x, (ABCSeries, ABCIndex)) + or (isinstance(x, np.ndarray) and x.ndim == 1) + for x in others + ): + los: list[Series] = [] + while others: # iterate through list and append each element + los = los + self._get_series_list(others.pop(0)) + return los + # ... or just strings + elif all(not is_list_like(x) for x in others): + return [Series(others, index=idx)] + raise TypeError( + "others must be Series, Index, DataFrame, np.ndarray " + "or list-like (either containing only strings or " + "containing only objects of type Series/Index/" + "np.ndarray[1-dim])" + ) + + @forbid_nonstring_types(["bytes", "mixed", "mixed-integer"]) + def cat( + self, + others=None, + sep: str | None = None, + na_rep=None, + join: AlignJoin = "left", + ) -> str | Series | Index: + """ + Concatenate strings in the Series/Index with given separator. + + If `others` is specified, this function concatenates the Series/Index + and elements of `others` element-wise. + If `others` is not passed, then all values in the Series/Index are + concatenated into a single string with a given `sep`. + + Parameters + ---------- + others : Series, Index, DataFrame, np.ndarray or list-like + Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and + other list-likes of strings must have the same length as the + calling Series/Index, with the exception of indexed objects (i.e. + Series/Index/DataFrame) if `join` is not None. + + If others is a list-like that contains a combination of Series, + Index or np.ndarray (1-dim), then all elements will be unpacked and + must satisfy the above criteria individually. + + If others is None, the method returns the concatenation of all + strings in the calling Series/Index. + sep : str, default '' + The separator between the different elements/columns. By default + the empty string `''` is used. + na_rep : str or None, default None + Representation that is inserted for all missing values: + + - If `na_rep` is None, and `others` is None, missing values in the + Series/Index are omitted from the result. + - If `na_rep` is None, and `others` is not None, a row containing a + missing value in any of the columns (before concatenation) will + have a missing value in the result. + join : {'left', 'right', 'outer', 'inner'}, default 'left' + Determines the join-style between the calling Series/Index and any + Series/Index/DataFrame in `others` (objects without an index need + to match the length of the calling Series/Index). To disable + alignment, use `.values` on any Series/Index/DataFrame in `others`. + + Returns + ------- + str, Series or Index + If `others` is None, `str` is returned, otherwise a `Series/Index` + (same type as caller) of objects is returned. + + See Also + -------- + split : Split each string in the Series/Index. + join : Join lists contained as elements in the Series/Index. + + Examples + -------- + When not passing `others`, all values are concatenated into a single + string: + + >>> s = pd.Series(['a', 'b', np.nan, 'd']) + >>> s.str.cat(sep=' ') + 'a b d' + + By default, NA values in the Series are ignored. Using `na_rep`, they + can be given a representation: + + >>> s.str.cat(sep=' ', na_rep='?') + 'a b ? d' + + If `others` is specified, corresponding values are concatenated with + the separator. Result will be a Series of strings. + + >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',') + 0 a,A + 1 b,B + 2 NaN + 3 d,D + dtype: object + + Missing values will remain missing in the result, but can again be + represented using `na_rep` + + >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-') + 0 a,A + 1 b,B + 2 -,C + 3 d,D + dtype: object + + If `sep` is not specified, the values are concatenated without + separation. + + >>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-') + 0 aA + 1 bB + 2 -C + 3 dD + dtype: object + + Series with different indexes can be aligned before concatenation. The + `join`-keyword works as in other methods. + + >>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2]) + >>> s.str.cat(t, join='left', na_rep='-') + 0 aa + 1 b- + 2 -c + 3 dd + dtype: object + >>> + >>> s.str.cat(t, join='outer', na_rep='-') + 0 aa + 1 b- + 2 -c + 3 dd + 4 -e + dtype: object + >>> + >>> s.str.cat(t, join='inner', na_rep='-') + 0 aa + 2 -c + 3 dd + dtype: object + >>> + >>> s.str.cat(t, join='right', na_rep='-') + 3 dd + 0 aa + 4 -e + 2 -c + dtype: object + + For more examples, see :ref:`here `. + """ + # TODO: dispatch + from pandas import ( + Index, + Series, + concat, + ) + + if isinstance(others, str): + raise ValueError("Did you mean to supply a `sep` keyword?") + if sep is None: + sep = "" + + if isinstance(self._orig, ABCIndex): + data = Series(self._orig, index=self._orig, dtype=self._orig.dtype) + else: # Series + data = self._orig + + # concatenate Series/Index with itself if no "others" + if others is None: + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Series") + data = ensure_object(data) # type: ignore[assignment] + na_mask = isna(data) + if na_rep is None and na_mask.any(): + return sep.join(data[~na_mask]) + elif na_rep is not None and na_mask.any(): + return sep.join(np.where(na_mask, na_rep, data)) + else: + return sep.join(data) + + try: + # turn anything in "others" into lists of Series + others = self._get_series_list(others) + except ValueError as err: # do not catch TypeError raised by _get_series_list + raise ValueError( + "If `others` contains arrays or lists (or other " + "list-likes without an index), these must all be " + "of the same length as the calling Series/Index." + ) from err + + # align if required + if any(not data.index.equals(x.index) for x in others): + # Need to add keys for uniqueness in case of duplicate columns + others = concat( + others, + axis=1, + join=(join if join == "inner" else "outer"), + keys=range(len(others)), + sort=False, + copy=False, + ) + data, others = data.align(others, join=join) + others = [others[x] for x in others] # again list of Series + + all_cols = [ensure_object(x) for x in [data] + others] + na_masks = np.array([isna(x) for x in all_cols]) + union_mask = np.logical_or.reduce(na_masks, axis=0) + + if na_rep is None and union_mask.any(): + # no na_rep means NaNs for all rows where any column has a NaN + # only necessary if there are actually any NaNs + result = np.empty(len(data), dtype=object) + np.putmask(result, union_mask, np.nan) + + not_masked = ~union_mask + result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep) + elif na_rep is not None and union_mask.any(): + # fill NaNs with na_rep in case there are actually any NaNs + all_cols = [ + np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols) + ] + result = cat_safe(all_cols, sep) + else: + # no NaNs - can just concatenate + result = cat_safe(all_cols, sep) + + out: Index | Series + if isinstance(self._orig, ABCIndex): + # add dtype for case that result is all-NA + + out = Index(result, dtype=object, name=self._orig.name) + else: # Series + if isinstance(self._orig.dtype, CategoricalDtype): + # We need to infer the new categories. + dtype = None + else: + dtype = self._orig.dtype + res_ser = Series( + result, dtype=dtype, index=data.index, name=self._orig.name, copy=False + ) + out = res_ser.__finalize__(self._orig, method="str_cat") + return out + + _shared_docs[ + "str_split" + ] = r""" + Split strings around given separator/delimiter. + + Splits the string in the Series/Index from the %(side)s, + at the specified delimiter string. + + Parameters + ---------- + pat : str%(pat_regex)s, optional + %(pat_description)s. + If not specified, split on whitespace. + n : int, default -1 (all) + Limit number of splits in output. + ``None``, 0 and -1 will be interpreted as return all splits. + expand : bool, default False + Expand the split strings into separate columns. + + - If ``True``, return DataFrame/MultiIndex expanding dimensionality. + - If ``False``, return Series/Index, containing lists of strings. + %(regex_argument)s + Returns + ------- + Series, Index, DataFrame or MultiIndex + Type matches caller unless ``expand=True`` (see Notes). + %(raises_split)s + See Also + -------- + Series.str.split : Split strings around given separator/delimiter. + Series.str.rsplit : Splits string around given separator/delimiter, + starting from the right. + Series.str.join : Join lists contained as elements in the Series/Index + with passed delimiter. + str.split : Standard library version for split. + str.rsplit : Standard library version for rsplit. + + Notes + ----- + The handling of the `n` keyword depends on the number of found splits: + + - If found splits > `n`, make first `n` splits only + - If found splits <= `n`, make all splits + - If for a certain row the number of found splits < `n`, + append `None` for padding up to `n` if ``expand=True`` + + If using ``expand=True``, Series and Index callers return DataFrame and + MultiIndex objects, respectively. + %(regex_pat_note)s + Examples + -------- + >>> s = pd.Series( + ... [ + ... "this is a regular sentence", + ... "https://docs.python.org/3/tutorial/index.html", + ... np.nan + ... ] + ... ) + >>> s + 0 this is a regular sentence + 1 https://docs.python.org/3/tutorial/index.html + 2 NaN + dtype: object + + In the default setting, the string is split by whitespace. + + >>> s.str.split() + 0 [this, is, a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + Without the `n` parameter, the outputs of `rsplit` and `split` + are identical. + + >>> s.str.rsplit() + 0 [this, is, a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + The `n` parameter can be used to limit the number of splits on the + delimiter. The outputs of `split` and `rsplit` are different. + + >>> s.str.split(n=2) + 0 [this, is, a regular sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + >>> s.str.rsplit(n=2) + 0 [this is a, regular, sentence] + 1 [https://docs.python.org/3/tutorial/index.html] + 2 NaN + dtype: object + + The `pat` parameter can be used to split by other characters. + + >>> s.str.split(pat="/") + 0 [this is a regular sentence] + 1 [https:, , docs.python.org, 3, tutorial, index... + 2 NaN + dtype: object + + When using ``expand=True``, the split elements will expand out into + separate columns. If NaN is present, it is propagated throughout + the columns during the split. + + >>> s.str.split(expand=True) + 0 1 2 3 4 + 0 this is a regular sentence + 1 https://docs.python.org/3/tutorial/index.html None None None None + 2 NaN NaN NaN NaN NaN + + For slightly more complex use cases like splitting the html document name + from a url, a combination of parameter settings can be used. + + >>> s.str.rsplit("/", n=1, expand=True) + 0 1 + 0 this is a regular sentence None + 1 https://docs.python.org/3/tutorial index.html + 2 NaN NaN + %(regex_examples)s""" + + @Appender( + _shared_docs["str_split"] + % { + "side": "beginning", + "pat_regex": " or compiled regex", + "pat_description": "String or regular expression to split on", + "regex_argument": """ + regex : bool, default None + Determines if the passed-in pattern is a regular expression: + + - If ``True``, assumes the passed-in pattern is a regular expression + - If ``False``, treats the pattern as a literal string. + - If ``None`` and `pat` length is 1, treats `pat` as a literal string. + - If ``None`` and `pat` length is not 1, treats `pat` as a regular expression. + - Cannot be set to False if `pat` is a compiled regex + + .. versionadded:: 1.4.0 + """, + "raises_split": """ + Raises + ------ + ValueError + * if `regex` is False and `pat` is a compiled regex + """, + "regex_pat_note": """ + Use of `regex =False` with a `pat` as a compiled regex will raise an error. + """, + "method": "split", + "regex_examples": r""" + Remember to escape special characters when explicitly using regular expressions. + + >>> s = pd.Series(["foo and bar plus baz"]) + >>> s.str.split(r"and|plus", expand=True) + 0 1 2 + 0 foo bar baz + + Regular expressions can be used to handle urls or file names. + When `pat` is a string and ``regex=None`` (the default), the given `pat` is compiled + as a regex only if ``len(pat) != 1``. + + >>> s = pd.Series(['foojpgbar.jpg']) + >>> s.str.split(r".", expand=True) + 0 1 + 0 foojpgbar jpg + + >>> s.str.split(r"\.jpg", expand=True) + 0 1 + 0 foojpgbar + + When ``regex=True``, `pat` is interpreted as a regex + + >>> s.str.split(r"\.jpg", regex=True, expand=True) + 0 1 + 0 foojpgbar + + A compiled regex can be passed as `pat` + + >>> import re + >>> s.str.split(re.compile(r"\.jpg"), expand=True) + 0 1 + 0 foojpgbar + + When ``regex=False``, `pat` is interpreted as the string itself + + >>> s.str.split(r"\.jpg", regex=False, expand=True) + 0 + 0 foojpgbar.jpg + """, + } + ) + @forbid_nonstring_types(["bytes"]) + def split( + self, + pat: str | re.Pattern | None = None, + *, + n=-1, + expand: bool = False, + regex: bool | None = None, + ): + if regex is False and is_re(pat): + raise ValueError( + "Cannot use a compiled regex as replacement pattern with regex=False" + ) + if is_re(pat): + regex = True + result = self._data.array._str_split(pat, n, expand, regex) + return self._wrap_result(result, returns_string=expand, expand=expand) + + @Appender( + _shared_docs["str_split"] + % { + "side": "end", + "pat_regex": "", + "pat_description": "String to split on", + "regex_argument": "", + "raises_split": "", + "regex_pat_note": "", + "method": "rsplit", + "regex_examples": "", + } + ) + @forbid_nonstring_types(["bytes"]) + def rsplit(self, pat=None, *, n=-1, expand: bool = False): + result = self._data.array._str_rsplit(pat, n=n) + return self._wrap_result(result, expand=expand, returns_string=expand) + + _shared_docs[ + "str_partition" + ] = """ + Split the string at the %(side)s occurrence of `sep`. + + This method splits the string at the %(side)s occurrence of `sep`, + and returns 3 elements containing the part before the separator, + the separator itself, and the part after the separator. + If the separator is not found, return %(return)s. + + Parameters + ---------- + sep : str, default whitespace + String to split on. + expand : bool, default True + If True, return DataFrame/MultiIndex expanding dimensionality. + If False, return Series/Index. + + Returns + ------- + DataFrame/MultiIndex or Series/Index of objects + + See Also + -------- + %(also)s + Series.str.split : Split strings around given separators. + str.partition : Standard library version. + + Examples + -------- + + >>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers']) + >>> s + 0 Linda van der Berg + 1 George Pitt-Rivers + dtype: object + + >>> s.str.partition() + 0 1 2 + 0 Linda van der Berg + 1 George Pitt-Rivers + + To partition by the last space instead of the first one: + + >>> s.str.rpartition() + 0 1 2 + 0 Linda van der Berg + 1 George Pitt-Rivers + + To partition by something different than a space: + + >>> s.str.partition('-') + 0 1 2 + 0 Linda van der Berg + 1 George Pitt - Rivers + + To return a Series containing tuples instead of a DataFrame: + + >>> s.str.partition('-', expand=False) + 0 (Linda van der Berg, , ) + 1 (George Pitt, -, Rivers) + dtype: object + + Also available on indices: + + >>> idx = pd.Index(['X 123', 'Y 999']) + >>> idx + Index(['X 123', 'Y 999'], dtype='object') + + Which will create a MultiIndex: + + >>> idx.str.partition() + MultiIndex([('X', ' ', '123'), + ('Y', ' ', '999')], + ) + + Or an index with tuples with ``expand=False``: + + >>> idx.str.partition(expand=False) + Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object') + """ + + @Appender( + _shared_docs["str_partition"] + % { + "side": "first", + "return": "3 elements containing the string itself, followed by two " + "empty strings", + "also": "rpartition : Split the string at the last occurrence of `sep`.", + } + ) + @forbid_nonstring_types(["bytes"]) + def partition(self, sep: str = " ", expand: bool = True): + result = self._data.array._str_partition(sep, expand) + return self._wrap_result(result, expand=expand, returns_string=expand) + + @Appender( + _shared_docs["str_partition"] + % { + "side": "last", + "return": "3 elements containing two empty strings, followed by the " + "string itself", + "also": "partition : Split the string at the first occurrence of `sep`.", + } + ) + @forbid_nonstring_types(["bytes"]) + def rpartition(self, sep: str = " ", expand: bool = True): + result = self._data.array._str_rpartition(sep, expand) + return self._wrap_result(result, expand=expand, returns_string=expand) + + def get(self, i): + """ + Extract element from each component at specified position or with specified key. + + Extract element from lists, tuples, dict, or strings in each element in the + Series/Index. + + Parameters + ---------- + i : int or hashable dict label + Position or key of element to extract. + + Returns + ------- + Series or Index + + Examples + -------- + >>> s = pd.Series(["String", + ... (1, 2, 3), + ... ["a", "b", "c"], + ... 123, + ... -456, + ... {1: "Hello", "2": "World"}]) + >>> s + 0 String + 1 (1, 2, 3) + 2 [a, b, c] + 3 123 + 4 -456 + 5 {1: 'Hello', '2': 'World'} + dtype: object + + >>> s.str.get(1) + 0 t + 1 2 + 2 b + 3 NaN + 4 NaN + 5 Hello + dtype: object + + >>> s.str.get(-1) + 0 g + 1 3 + 2 c + 3 NaN + 4 NaN + 5 None + dtype: object + + Return element with given key + + >>> s = pd.Series([{"name": "Hello", "value": "World"}, + ... {"name": "Goodbye", "value": "Planet"}]) + >>> s.str.get('name') + 0 Hello + 1 Goodbye + dtype: object + """ + result = self._data.array._str_get(i) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def join(self, sep: str): + """ + Join lists contained as elements in the Series/Index with passed delimiter. + + If the elements of a Series are lists themselves, join the content of these + lists using the delimiter passed to the function. + This function is an equivalent to :meth:`str.join`. + + Parameters + ---------- + sep : str + Delimiter to use between list entries. + + Returns + ------- + Series/Index: object + The list entries concatenated by intervening occurrences of the + delimiter. + + Raises + ------ + AttributeError + If the supplied Series contains neither strings nor lists. + + See Also + -------- + str.join : Standard library version of this method. + Series.str.split : Split strings around given separator/delimiter. + + Notes + ----- + If any of the list items is not a string object, the result of the join + will be `NaN`. + + Examples + -------- + Example with a list that contains non-string elements. + + >>> s = pd.Series([['lion', 'elephant', 'zebra'], + ... [1.1, 2.2, 3.3], + ... ['cat', np.nan, 'dog'], + ... ['cow', 4.5, 'goat'], + ... ['duck', ['swan', 'fish'], 'guppy']]) + >>> s + 0 [lion, elephant, zebra] + 1 [1.1, 2.2, 3.3] + 2 [cat, nan, dog] + 3 [cow, 4.5, goat] + 4 [duck, [swan, fish], guppy] + dtype: object + + Join all lists using a '-'. The lists containing object(s) of types other + than str will produce a NaN. + + >>> s.str.join('-') + 0 lion-elephant-zebra + 1 NaN + 2 NaN + 3 NaN + 4 NaN + dtype: object + """ + result = self._data.array._str_join(sep) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def contains( + self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True + ): + r""" + Test if pattern or regex is contained within a string of a Series or Index. + + Return boolean Series or Index based on whether a given pattern or regex is + contained within a string of a Series or Index. + + Parameters + ---------- + pat : str + Character sequence or regular expression. + case : bool, default True + If True, case sensitive. + flags : int, default 0 (no flags) + Flags to pass through to the re module, e.g. re.IGNORECASE. + na : scalar, optional + Fill value for missing values. The default depends on dtype of the + array. For object-dtype, ``numpy.nan`` is used. For ``StringDtype``, + ``pandas.NA`` is used. + regex : bool, default True + If True, assumes the pat is a regular expression. + + If False, treats the pat as a literal string. + + Returns + ------- + Series or Index of boolean values + A Series or Index of boolean values indicating whether the + given pattern is contained within the string of each element + of the Series or Index. + + See Also + -------- + match : Analogous, but stricter, relying on re.match instead of re.search. + Series.str.startswith : Test if the start of each string element matches a + pattern. + Series.str.endswith : Same as startswith, but tests the end of string. + + Examples + -------- + Returning a Series of booleans using only a literal pattern. + + >>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.nan]) + >>> s1.str.contains('og', regex=False) + 0 False + 1 True + 2 False + 3 False + 4 NaN + dtype: object + + Returning an Index of booleans using only a literal pattern. + + >>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.nan]) + >>> ind.str.contains('23', regex=False) + Index([False, False, False, True, nan], dtype='object') + + Specifying case sensitivity using `case`. + + >>> s1.str.contains('oG', case=True, regex=True) + 0 False + 1 False + 2 False + 3 False + 4 NaN + dtype: object + + Specifying `na` to be `False` instead of `NaN` replaces NaN values + with `False`. If Series or Index does not contain NaN values + the resultant dtype will be `bool`, otherwise, an `object` dtype. + + >>> s1.str.contains('og', na=False, regex=True) + 0 False + 1 True + 2 False + 3 False + 4 False + dtype: bool + + Returning 'house' or 'dog' when either expression occurs in a string. + + >>> s1.str.contains('house|dog', regex=True) + 0 False + 1 True + 2 True + 3 False + 4 NaN + dtype: object + + Ignoring case sensitivity using `flags` with regex. + + >>> import re + >>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True) + 0 False + 1 False + 2 True + 3 False + 4 NaN + dtype: object + + Returning any digit using regular expression. + + >>> s1.str.contains('\\d', regex=True) + 0 False + 1 False + 2 False + 3 True + 4 NaN + dtype: object + + Ensure `pat` is a not a literal pattern when `regex` is set to True. + Note in the following example one might expect only `s2[1]` and `s2[3]` to + return `True`. However, '.0' as a regex matches any character + followed by a 0. + + >>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35']) + >>> s2.str.contains('.0', regex=True) + 0 True + 1 True + 2 False + 3 True + 4 False + dtype: bool + """ + if regex and re.compile(pat).groups: + warnings.warn( + "This pattern is interpreted as a regular expression, and has " + "match groups. To actually get the groups, use str.extract.", + UserWarning, + stacklevel=find_stack_level(), + ) + + result = self._data.array._str_contains(pat, case, flags, na, regex) + return self._wrap_result(result, fill_value=na, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def match(self, pat, case: bool = True, flags: int = 0, na=None): + """ + Determine if each string starts with a match of a regular expression. + + Parameters + ---------- + pat : str + Character sequence or regular expression. + case : bool, default True + If True, case sensitive. + flags : int, default 0 (no flags) + Regex module flags, e.g. re.IGNORECASE. + na : scalar, optional + Fill value for missing values. The default depends on dtype of the + array. For object-dtype, ``numpy.nan`` is used. For ``StringDtype``, + ``pandas.NA`` is used. + + Returns + ------- + Series/Index/array of boolean values + + See Also + -------- + fullmatch : Stricter matching that requires the entire string to match. + contains : Analogous, but less strict, relying on re.search instead of + re.match. + extract : Extract matched groups. + + Examples + -------- + >>> ser = pd.Series(["horse", "eagle", "donkey"]) + >>> ser.str.match("e") + 0 False + 1 True + 2 False + dtype: bool + """ + result = self._data.array._str_match(pat, case=case, flags=flags, na=na) + return self._wrap_result(result, fill_value=na, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def fullmatch(self, pat, case: bool = True, flags: int = 0, na=None): + """ + Determine if each string entirely matches a regular expression. + + Parameters + ---------- + pat : str + Character sequence or regular expression. + case : bool, default True + If True, case sensitive. + flags : int, default 0 (no flags) + Regex module flags, e.g. re.IGNORECASE. + na : scalar, optional + Fill value for missing values. The default depends on dtype of the + array. For object-dtype, ``numpy.nan`` is used. For ``StringDtype``, + ``pandas.NA`` is used. + + Returns + ------- + Series/Index/array of boolean values + + See Also + -------- + match : Similar, but also returns `True` when only a *prefix* of the string + matches the regular expression. + extract : Extract matched groups. + + Examples + -------- + >>> ser = pd.Series(["cat", "duck", "dove"]) + >>> ser.str.fullmatch(r'd.+') + 0 False + 1 True + 2 True + dtype: bool + """ + result = self._data.array._str_fullmatch(pat, case=case, flags=flags, na=na) + return self._wrap_result(result, fill_value=na, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def replace( + self, + pat: str | re.Pattern, + repl: str | Callable, + n: int = -1, + case: bool | None = None, + flags: int = 0, + regex: bool = False, + ): + r""" + Replace each occurrence of pattern/regex in the Series/Index. + + Equivalent to :meth:`str.replace` or :func:`re.sub`, depending on + the regex value. + + Parameters + ---------- + pat : str or compiled regex + String can be a character sequence or regular expression. + repl : str or callable + Replacement string or a callable. The callable is passed the regex + match object and must return a replacement string to be used. + See :func:`re.sub`. + n : int, default -1 (all) + Number of replacements to make from start. + case : bool, default None + Determines if replace is case sensitive: + + - If True, case sensitive (the default if `pat` is a string) + - Set to False for case insensitive + - Cannot be set if `pat` is a compiled regex. + + flags : int, default 0 (no flags) + Regex module flags, e.g. re.IGNORECASE. Cannot be set if `pat` is a compiled + regex. + regex : bool, default False + Determines if the passed-in pattern is a regular expression: + + - If True, assumes the passed-in pattern is a regular expression. + - If False, treats the pattern as a literal string + - Cannot be set to False if `pat` is a compiled regex or `repl` is + a callable. + + Returns + ------- + Series or Index of object + A copy of the object with all matching occurrences of `pat` replaced by + `repl`. + + Raises + ------ + ValueError + * if `regex` is False and `repl` is a callable or `pat` is a compiled + regex + * if `pat` is a compiled regex and `case` or `flags` is set + + Notes + ----- + When `pat` is a compiled regex, all flags should be included in the + compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled + regex will raise an error. + + Examples + -------- + When `pat` is a string and `regex` is True, the given `pat` + is compiled as a regex. When `repl` is a string, it replaces matching + regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are + left as is: + + >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True) + 0 bao + 1 baz + 2 NaN + dtype: object + + When `pat` is a string and `regex` is False, every `pat` is replaced with + `repl` as with :meth:`str.replace`: + + >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False) + 0 bao + 1 fuz + 2 NaN + dtype: object + + When `repl` is a callable, it is called on every `pat` using + :func:`re.sub`. The callable should expect one positional argument + (a regex object) and return a string. + + To get the idea: + + >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr, regex=True) + 0 oo + 1 uz + 2 NaN + dtype: object + + Reverse every lowercase alphabetic word: + + >>> repl = lambda m: m.group(0)[::-1] + >>> ser = pd.Series(['foo 123', 'bar baz', np.nan]) + >>> ser.str.replace(r'[a-z]+', repl, regex=True) + 0 oof 123 + 1 rab zab + 2 NaN + dtype: object + + Using regex groups (extract second group and swap case): + + >>> pat = r"(?P\w+) (?P\w+) (?P\w+)" + >>> repl = lambda m: m.group('two').swapcase() + >>> ser = pd.Series(['One Two Three', 'Foo Bar Baz']) + >>> ser.str.replace(pat, repl, regex=True) + 0 tWO + 1 bAR + dtype: object + + Using a compiled regex with flags + + >>> import re + >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) + >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar', regex=True) + 0 foo + 1 bar + 2 NaN + dtype: object + """ + # Check whether repl is valid (GH 13438, GH 15055) + if not (isinstance(repl, str) or callable(repl)): + raise TypeError("repl must be a string or callable") + + is_compiled_re = is_re(pat) + if regex or regex is None: + if is_compiled_re and (case is not None or flags != 0): + raise ValueError( + "case and flags cannot be set when pat is a compiled regex" + ) + + elif is_compiled_re: + raise ValueError( + "Cannot use a compiled regex as replacement pattern with regex=False" + ) + elif callable(repl): + raise ValueError("Cannot use a callable replacement when regex=False") + + if case is None: + case = True + + result = self._data.array._str_replace( + pat, repl, n=n, case=case, flags=flags, regex=regex + ) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def repeat(self, repeats): + """ + Duplicate each string in the Series or Index. + + Parameters + ---------- + repeats : int or sequence of int + Same value for all (int) or different value per (sequence). + + Returns + ------- + Series or pandas.Index + Series or Index of repeated string objects specified by + input parameter repeats. + + Examples + -------- + >>> s = pd.Series(['a', 'b', 'c']) + >>> s + 0 a + 1 b + 2 c + dtype: object + + Single int repeats string in Series + + >>> s.str.repeat(repeats=2) + 0 aa + 1 bb + 2 cc + dtype: object + + Sequence of int repeats corresponding string in Series + + >>> s.str.repeat(repeats=[1, 2, 3]) + 0 a + 1 bb + 2 ccc + dtype: object + """ + result = self._data.array._str_repeat(repeats) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def pad( + self, + width: int, + side: Literal["left", "right", "both"] = "left", + fillchar: str = " ", + ): + """ + Pad strings in the Series/Index up to width. + + Parameters + ---------- + width : int + Minimum width of resulting string; additional characters will be filled + with character defined in `fillchar`. + side : {'left', 'right', 'both'}, default 'left' + Side from which to fill resulting string. + fillchar : str, default ' ' + Additional character for filling, default is whitespace. + + Returns + ------- + Series or Index of object + Returns Series or Index with minimum number of char in object. + + See Also + -------- + Series.str.rjust : Fills the left side of strings with an arbitrary + character. Equivalent to ``Series.str.pad(side='left')``. + Series.str.ljust : Fills the right side of strings with an arbitrary + character. Equivalent to ``Series.str.pad(side='right')``. + Series.str.center : Fills both sides of strings with an arbitrary + character. Equivalent to ``Series.str.pad(side='both')``. + Series.str.zfill : Pad strings in the Series/Index by prepending '0' + character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``. + + Examples + -------- + >>> s = pd.Series(["caribou", "tiger"]) + >>> s + 0 caribou + 1 tiger + dtype: object + + >>> s.str.pad(width=10) + 0 caribou + 1 tiger + dtype: object + + >>> s.str.pad(width=10, side='right', fillchar='-') + 0 caribou--- + 1 tiger----- + dtype: object + + >>> s.str.pad(width=10, side='both', fillchar='-') + 0 -caribou-- + 1 --tiger--- + dtype: object + """ + if not isinstance(fillchar, str): + msg = f"fillchar must be a character, not {type(fillchar).__name__}" + raise TypeError(msg) + + if len(fillchar) != 1: + raise TypeError("fillchar must be a character, not str") + + if not is_integer(width): + msg = f"width must be of integer type, not {type(width).__name__}" + raise TypeError(msg) + + result = self._data.array._str_pad(width, side=side, fillchar=fillchar) + return self._wrap_result(result) + + _shared_docs[ + "str_pad" + ] = """ + Pad %(side)s side of strings in the Series/Index. + + Equivalent to :meth:`str.%(method)s`. + + Parameters + ---------- + width : int + Minimum width of resulting string; additional characters will be filled + with ``fillchar``. + fillchar : str + Additional character for filling, default is whitespace. + + Returns + ------- + Series/Index of objects. + + Examples + -------- + For Series.str.center: + + >>> ser = pd.Series(['dog', 'bird', 'mouse']) + >>> ser.str.center(8, fillchar='.') + 0 ..dog... + 1 ..bird.. + 2 .mouse.. + dtype: object + + For Series.str.ljust: + + >>> ser = pd.Series(['dog', 'bird', 'mouse']) + >>> ser.str.ljust(8, fillchar='.') + 0 dog..... + 1 bird.... + 2 mouse... + dtype: object + + For Series.str.rjust: + + >>> ser = pd.Series(['dog', 'bird', 'mouse']) + >>> ser.str.rjust(8, fillchar='.') + 0 .....dog + 1 ....bird + 2 ...mouse + dtype: object + """ + + @Appender(_shared_docs["str_pad"] % {"side": "left and right", "method": "center"}) + @forbid_nonstring_types(["bytes"]) + def center(self, width: int, fillchar: str = " "): + return self.pad(width, side="both", fillchar=fillchar) + + @Appender(_shared_docs["str_pad"] % {"side": "right", "method": "ljust"}) + @forbid_nonstring_types(["bytes"]) + def ljust(self, width: int, fillchar: str = " "): + return self.pad(width, side="right", fillchar=fillchar) + + @Appender(_shared_docs["str_pad"] % {"side": "left", "method": "rjust"}) + @forbid_nonstring_types(["bytes"]) + def rjust(self, width: int, fillchar: str = " "): + return self.pad(width, side="left", fillchar=fillchar) + + @forbid_nonstring_types(["bytes"]) + def zfill(self, width: int): + """ + Pad strings in the Series/Index by prepending '0' characters. + + Strings in the Series/Index are padded with '0' characters on the + left of the string to reach a total string length `width`. Strings + in the Series/Index with length greater or equal to `width` are + unchanged. + + Parameters + ---------- + width : int + Minimum length of resulting string; strings with length less + than `width` be prepended with '0' characters. + + Returns + ------- + Series/Index of objects. + + See Also + -------- + Series.str.rjust : Fills the left side of strings with an arbitrary + character. + Series.str.ljust : Fills the right side of strings with an arbitrary + character. + Series.str.pad : Fills the specified sides of strings with an arbitrary + character. + Series.str.center : Fills both sides of strings with an arbitrary + character. + + Notes + ----- + Differs from :meth:`str.zfill` which has special handling + for '+'/'-' in the string. + + Examples + -------- + >>> s = pd.Series(['-1', '1', '1000', 10, np.nan]) + >>> s + 0 -1 + 1 1 + 2 1000 + 3 10 + 4 NaN + dtype: object + + Note that ``10`` and ``NaN`` are not strings, therefore they are + converted to ``NaN``. The minus sign in ``'-1'`` is treated as a + special character and the zero is added to the right of it + (:meth:`str.zfill` would have moved it to the left). ``1000`` + remains unchanged as it is longer than `width`. + + >>> s.str.zfill(3) + 0 -01 + 1 001 + 2 1000 + 3 NaN + 4 NaN + dtype: object + """ + if not is_integer(width): + msg = f"width must be of integer type, not {type(width).__name__}" + raise TypeError(msg) + f = lambda x: x.zfill(width) + result = self._data.array._str_map(f) + return self._wrap_result(result) + + def slice(self, start=None, stop=None, step=None): + """ + Slice substrings from each element in the Series or Index. + + Parameters + ---------- + start : int, optional + Start position for slice operation. + stop : int, optional + Stop position for slice operation. + step : int, optional + Step size for slice operation. + + Returns + ------- + Series or Index of object + Series or Index from sliced substring from original string object. + + See Also + -------- + Series.str.slice_replace : Replace a slice with a string. + Series.str.get : Return element at position. + Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` + being the position. + + Examples + -------- + >>> s = pd.Series(["koala", "dog", "chameleon"]) + >>> s + 0 koala + 1 dog + 2 chameleon + dtype: object + + >>> s.str.slice(start=1) + 0 oala + 1 og + 2 hameleon + dtype: object + + >>> s.str.slice(start=-1) + 0 a + 1 g + 2 n + dtype: object + + >>> s.str.slice(stop=2) + 0 ko + 1 do + 2 ch + dtype: object + + >>> s.str.slice(step=2) + 0 kaa + 1 dg + 2 caeen + dtype: object + + >>> s.str.slice(start=0, stop=5, step=3) + 0 kl + 1 d + 2 cm + dtype: object + + Equivalent behaviour to: + + >>> s.str[0:5:3] + 0 kl + 1 d + 2 cm + dtype: object + """ + result = self._data.array._str_slice(start, stop, step) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def slice_replace(self, start=None, stop=None, repl=None): + """ + Replace a positional slice of a string with another value. + + Parameters + ---------- + start : int, optional + Left index position to use for the slice. If not specified (None), + the slice is unbounded on the left, i.e. slice from the start + of the string. + stop : int, optional + Right index position to use for the slice. If not specified (None), + the slice is unbounded on the right, i.e. slice until the + end of the string. + repl : str, optional + String for replacement. If not specified (None), the sliced region + is replaced with an empty string. + + Returns + ------- + Series or Index + Same type as the original object. + + See Also + -------- + Series.str.slice : Just slicing without replacement. + + Examples + -------- + >>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde']) + >>> s + 0 a + 1 ab + 2 abc + 3 abdc + 4 abcde + dtype: object + + Specify just `start`, meaning replace `start` until the end of the + string with `repl`. + + >>> s.str.slice_replace(1, repl='X') + 0 aX + 1 aX + 2 aX + 3 aX + 4 aX + dtype: object + + Specify just `stop`, meaning the start of the string to `stop` is replaced + with `repl`, and the rest of the string is included. + + >>> s.str.slice_replace(stop=2, repl='X') + 0 X + 1 X + 2 Xc + 3 Xdc + 4 Xcde + dtype: object + + Specify `start` and `stop`, meaning the slice from `start` to `stop` is + replaced with `repl`. Everything before or after `start` and `stop` is + included as is. + + >>> s.str.slice_replace(start=1, stop=3, repl='X') + 0 aX + 1 aX + 2 aX + 3 aXc + 4 aXde + dtype: object + """ + result = self._data.array._str_slice_replace(start, stop, repl) + return self._wrap_result(result) + + def decode(self, encoding, errors: str = "strict"): + """ + Decode character string in the Series/Index using indicated encoding. + + Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in + python3. + + Parameters + ---------- + encoding : str + errors : str, optional + + Returns + ------- + Series or Index + + Examples + -------- + For Series: + + >>> ser = pd.Series([b'cow', b'123', b'()']) + >>> ser.str.decode('ascii') + 0 cow + 1 123 + 2 () + dtype: object + """ + # TODO: Add a similar _bytes interface. + if encoding in _cpython_optimized_decoders: + # CPython optimized implementation + f = lambda x: x.decode(encoding, errors) + else: + decoder = codecs.getdecoder(encoding) + f = lambda x: decoder(x, errors)[0] + arr = self._data.array + # assert isinstance(arr, (StringArray,)) + result = arr._str_map(f) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def encode(self, encoding, errors: str = "strict"): + """ + Encode character string in the Series/Index using indicated encoding. + + Equivalent to :meth:`str.encode`. + + Parameters + ---------- + encoding : str + errors : str, optional + + Returns + ------- + Series/Index of objects + + Examples + -------- + >>> ser = pd.Series(['cow', '123', '()']) + >>> ser.str.encode(encoding='ascii') + 0 b'cow' + 1 b'123' + 2 b'()' + dtype: object + """ + result = self._data.array._str_encode(encoding, errors) + return self._wrap_result(result, returns_string=False) + + _shared_docs[ + "str_strip" + ] = r""" + Remove %(position)s characters. + + Strip whitespaces (including newlines) or a set of specified characters + from each string in the Series/Index from %(side)s. + Replaces any non-strings in Series with NaNs. + Equivalent to :meth:`str.%(method)s`. + + Parameters + ---------- + to_strip : str or None, default None + Specifying the set of characters to be removed. + All combinations of this set of characters will be stripped. + If None then whitespaces are removed. + + Returns + ------- + Series or Index of object + + See Also + -------- + Series.str.strip : Remove leading and trailing characters in Series/Index. + Series.str.lstrip : Remove leading characters in Series/Index. + Series.str.rstrip : Remove trailing characters in Series/Index. + + Examples + -------- + >>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan, 10, True]) + >>> s + 0 1. Ant. + 1 2. Bee!\n + 2 3. Cat?\t + 3 NaN + 4 10 + 5 True + dtype: object + + >>> s.str.strip() + 0 1. Ant. + 1 2. Bee! + 2 3. Cat? + 3 NaN + 4 NaN + 5 NaN + dtype: object + + >>> s.str.lstrip('123.') + 0 Ant. + 1 Bee!\n + 2 Cat?\t + 3 NaN + 4 NaN + 5 NaN + dtype: object + + >>> s.str.rstrip('.!? \n\t') + 0 1. Ant + 1 2. Bee + 2 3. Cat + 3 NaN + 4 NaN + 5 NaN + dtype: object + + >>> s.str.strip('123.!? \n\t') + 0 Ant + 1 Bee + 2 Cat + 3 NaN + 4 NaN + 5 NaN + dtype: object + """ + + @Appender( + _shared_docs["str_strip"] + % { + "side": "left and right sides", + "method": "strip", + "position": "leading and trailing", + } + ) + @forbid_nonstring_types(["bytes"]) + def strip(self, to_strip=None): + result = self._data.array._str_strip(to_strip) + return self._wrap_result(result) + + @Appender( + _shared_docs["str_strip"] + % {"side": "left side", "method": "lstrip", "position": "leading"} + ) + @forbid_nonstring_types(["bytes"]) + def lstrip(self, to_strip=None): + result = self._data.array._str_lstrip(to_strip) + return self._wrap_result(result) + + @Appender( + _shared_docs["str_strip"] + % {"side": "right side", "method": "rstrip", "position": "trailing"} + ) + @forbid_nonstring_types(["bytes"]) + def rstrip(self, to_strip=None): + result = self._data.array._str_rstrip(to_strip) + return self._wrap_result(result) + + _shared_docs[ + "str_removefix" + ] = r""" + Remove a %(side)s from an object series. + + If the %(side)s is not present, the original string will be returned. + + Parameters + ---------- + %(side)s : str + Remove the %(side)s of the string. + + Returns + ------- + Series/Index: object + The Series or Index with given %(side)s removed. + + See Also + -------- + Series.str.remove%(other_side)s : Remove a %(other_side)s from an object series. + + Examples + -------- + >>> s = pd.Series(["str_foo", "str_bar", "no_prefix"]) + >>> s + 0 str_foo + 1 str_bar + 2 no_prefix + dtype: object + >>> s.str.removeprefix("str_") + 0 foo + 1 bar + 2 no_prefix + dtype: object + + >>> s = pd.Series(["foo_str", "bar_str", "no_suffix"]) + >>> s + 0 foo_str + 1 bar_str + 2 no_suffix + dtype: object + >>> s.str.removesuffix("_str") + 0 foo + 1 bar + 2 no_suffix + dtype: object + """ + + @Appender( + _shared_docs["str_removefix"] % {"side": "prefix", "other_side": "suffix"} + ) + @forbid_nonstring_types(["bytes"]) + def removeprefix(self, prefix: str): + result = self._data.array._str_removeprefix(prefix) + return self._wrap_result(result) + + @Appender( + _shared_docs["str_removefix"] % {"side": "suffix", "other_side": "prefix"} + ) + @forbid_nonstring_types(["bytes"]) + def removesuffix(self, suffix: str): + result = self._data.array._str_removesuffix(suffix) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def wrap(self, width: int, **kwargs): + r""" + Wrap strings in Series/Index at specified line width. + + This method has the same keyword parameters and defaults as + :class:`textwrap.TextWrapper`. + + Parameters + ---------- + width : int + Maximum line width. + expand_tabs : bool, optional + If True, tab characters will be expanded to spaces (default: True). + replace_whitespace : bool, optional + If True, each whitespace character (as defined by string.whitespace) + remaining after tab expansion will be replaced by a single space + (default: True). + drop_whitespace : bool, optional + If True, whitespace that, after wrapping, happens to end up at the + beginning or end of a line is dropped (default: True). + break_long_words : bool, optional + If True, then words longer than width will be broken in order to ensure + that no lines are longer than width. If it is false, long words will + not be broken, and some lines may be longer than width (default: True). + break_on_hyphens : bool, optional + If True, wrapping will occur preferably on whitespace and right after + hyphens in compound words, as it is customary in English. If false, + only whitespaces will be considered as potentially good places for line + breaks, but you need to set break_long_words to false if you want truly + insecable words (default: True). + + Returns + ------- + Series or Index + + Notes + ----- + Internally, this method uses a :class:`textwrap.TextWrapper` instance with + default settings. To achieve behavior matching R's stringr library str_wrap + function, use the arguments: + + - expand_tabs = False + - replace_whitespace = True + - drop_whitespace = True + - break_long_words = False + - break_on_hyphens = False + + Examples + -------- + >>> s = pd.Series(['line to be wrapped', 'another line to be wrapped']) + >>> s.str.wrap(12) + 0 line to be\nwrapped + 1 another line\nto be\nwrapped + dtype: object + """ + result = self._data.array._str_wrap(width, **kwargs) + return self._wrap_result(result) + + @forbid_nonstring_types(["bytes"]) + def get_dummies(self, sep: str = "|"): + """ + Return DataFrame of dummy/indicator variables for Series. + + Each string in Series is split by sep and returned as a DataFrame + of dummy/indicator variables. + + Parameters + ---------- + sep : str, default "|" + String to split on. + + Returns + ------- + DataFrame + Dummy variables corresponding to values of the Series. + + See Also + -------- + get_dummies : Convert categorical variable into dummy/indicator + variables. + + Examples + -------- + >>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies() + a b c + 0 1 1 0 + 1 1 0 0 + 2 1 0 1 + + >>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies() + a b c + 0 1 1 0 + 1 0 0 0 + 2 1 0 1 + """ + # we need to cast to Series of strings as only that has all + # methods available for making the dummies... + result, name = self._data.array._str_get_dummies(sep) + return self._wrap_result( + result, + name=name, + expand=True, + returns_string=False, + ) + + @forbid_nonstring_types(["bytes"]) + def translate(self, table): + """ + Map all characters in the string through the given mapping table. + + Equivalent to standard :meth:`str.translate`. + + Parameters + ---------- + table : dict + Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or + None. Unmapped characters are left untouched. + Characters mapped to None are deleted. :meth:`str.maketrans` is a + helper function for making translation tables. + + Returns + ------- + Series or Index + + Examples + -------- + >>> ser = pd.Series(["El niño", "Françoise"]) + >>> mytable = str.maketrans({'ñ': 'n', 'ç': 'c'}) + >>> ser.str.translate(mytable) + 0 El nino + 1 Francoise + dtype: object + """ + result = self._data.array._str_translate(table) + dtype = object if self._data.dtype == "object" else None + return self._wrap_result(result, dtype=dtype) + + @forbid_nonstring_types(["bytes"]) + def count(self, pat, flags: int = 0): + r""" + Count occurrences of pattern in each string of the Series/Index. + + This function is used to count the number of times a particular regex + pattern is repeated in each of the string elements of the + :class:`~pandas.Series`. + + Parameters + ---------- + pat : str + Valid regular expression. + flags : int, default 0, meaning no flags + Flags for the `re` module. For a complete list, `see here + `_. + **kwargs + For compatibility with other string methods. Not used. + + Returns + ------- + Series or Index + Same type as the calling object containing the integer counts. + + See Also + -------- + re : Standard library module for regular expressions. + str.count : Standard library version, without regular expression support. + + Notes + ----- + Some characters need to be escaped when passing in `pat`. + eg. ``'$'`` has a special meaning in regex and must be escaped when + finding this literal character. + + Examples + -------- + >>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat']) + >>> s.str.count('a') + 0 0.0 + 1 0.0 + 2 2.0 + 3 2.0 + 4 NaN + 5 0.0 + 6 1.0 + dtype: float64 + + Escape ``'$'`` to find the literal dollar sign. + + >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat']) + >>> s.str.count('\\$') + 0 1 + 1 0 + 2 1 + 3 2 + 4 2 + 5 0 + dtype: int64 + + This is also available on Index + + >>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a') + Index([0, 0, 2, 1], dtype='int64') + """ + result = self._data.array._str_count(pat, flags) + return self._wrap_result(result, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def startswith( + self, pat: str | tuple[str, ...], na: Scalar | None = None + ) -> Series | Index: + """ + Test if the start of each string element matches a pattern. + + Equivalent to :meth:`str.startswith`. + + Parameters + ---------- + pat : str or tuple[str, ...] + Character sequence or tuple of strings. Regular expressions are not + accepted. + na : object, default NaN + Object shown if element tested is not a string. The default depends + on dtype of the array. For object-dtype, ``numpy.nan`` is used. + For ``StringDtype``, ``pandas.NA`` is used. + + Returns + ------- + Series or Index of bool + A Series of booleans indicating whether the given pattern matches + the start of each string element. + + See Also + -------- + str.startswith : Python standard library string method. + Series.str.endswith : Same as startswith, but tests the end of string. + Series.str.contains : Tests if string element contains a pattern. + + Examples + -------- + >>> s = pd.Series(['bat', 'Bear', 'cat', np.nan]) + >>> s + 0 bat + 1 Bear + 2 cat + 3 NaN + dtype: object + + >>> s.str.startswith('b') + 0 True + 1 False + 2 False + 3 NaN + dtype: object + + >>> s.str.startswith(('b', 'B')) + 0 True + 1 True + 2 False + 3 NaN + dtype: object + + Specifying `na` to be `False` instead of `NaN`. + + >>> s.str.startswith('b', na=False) + 0 True + 1 False + 2 False + 3 False + dtype: bool + """ + if not isinstance(pat, (str, tuple)): + msg = f"expected a string or tuple, not {type(pat).__name__}" + raise TypeError(msg) + result = self._data.array._str_startswith(pat, na=na) + return self._wrap_result(result, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def endswith( + self, pat: str | tuple[str, ...], na: Scalar | None = None + ) -> Series | Index: + """ + Test if the end of each string element matches a pattern. + + Equivalent to :meth:`str.endswith`. + + Parameters + ---------- + pat : str or tuple[str, ...] + Character sequence or tuple of strings. Regular expressions are not + accepted. + na : object, default NaN + Object shown if element tested is not a string. The default depends + on dtype of the array. For object-dtype, ``numpy.nan`` is used. + For ``StringDtype``, ``pandas.NA`` is used. + + Returns + ------- + Series or Index of bool + A Series of booleans indicating whether the given pattern matches + the end of each string element. + + See Also + -------- + str.endswith : Python standard library string method. + Series.str.startswith : Same as endswith, but tests the start of string. + Series.str.contains : Tests if string element contains a pattern. + + Examples + -------- + >>> s = pd.Series(['bat', 'bear', 'caT', np.nan]) + >>> s + 0 bat + 1 bear + 2 caT + 3 NaN + dtype: object + + >>> s.str.endswith('t') + 0 True + 1 False + 2 False + 3 NaN + dtype: object + + >>> s.str.endswith(('t', 'T')) + 0 True + 1 False + 2 True + 3 NaN + dtype: object + + Specifying `na` to be `False` instead of `NaN`. + + >>> s.str.endswith('t', na=False) + 0 True + 1 False + 2 False + 3 False + dtype: bool + """ + if not isinstance(pat, (str, tuple)): + msg = f"expected a string or tuple, not {type(pat).__name__}" + raise TypeError(msg) + result = self._data.array._str_endswith(pat, na=na) + return self._wrap_result(result, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def findall(self, pat, flags: int = 0): + """ + Find all occurrences of pattern or regular expression in the Series/Index. + + Equivalent to applying :func:`re.findall` to all the elements in the + Series/Index. + + Parameters + ---------- + pat : str + Pattern or regular expression. + flags : int, default 0 + Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which + means no flags). + + Returns + ------- + Series/Index of lists of strings + All non-overlapping matches of pattern or regular expression in each + string of this Series/Index. + + See Also + -------- + count : Count occurrences of pattern or regular expression in each string + of the Series/Index. + extractall : For each string in the Series, extract groups from all matches + of regular expression and return a DataFrame with one row for each + match and one column for each group. + re.findall : The equivalent ``re`` function to all non-overlapping matches + of pattern or regular expression in string, as a list of strings. + + Examples + -------- + >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit']) + + The search for the pattern 'Monkey' returns one match: + + >>> s.str.findall('Monkey') + 0 [] + 1 [Monkey] + 2 [] + dtype: object + + On the other hand, the search for the pattern 'MONKEY' doesn't return any + match: + + >>> s.str.findall('MONKEY') + 0 [] + 1 [] + 2 [] + dtype: object + + Flags can be added to the pattern or regular expression. For instance, + to find the pattern 'MONKEY' ignoring the case: + + >>> import re + >>> s.str.findall('MONKEY', flags=re.IGNORECASE) + 0 [] + 1 [Monkey] + 2 [] + dtype: object + + When the pattern matches more than one string in the Series, all matches + are returned: + + >>> s.str.findall('on') + 0 [on] + 1 [on] + 2 [] + dtype: object + + Regular expressions are supported too. For instance, the search for all the + strings ending with the word 'on' is shown next: + + >>> s.str.findall('on$') + 0 [on] + 1 [] + 2 [] + dtype: object + + If the pattern is found more than once in the same string, then a list of + multiple strings is returned: + + >>> s.str.findall('b') + 0 [] + 1 [] + 2 [b, b] + dtype: object + """ + result = self._data.array._str_findall(pat, flags) + return self._wrap_result(result, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def extract( + self, pat: str, flags: int = 0, expand: bool = True + ) -> DataFrame | Series | Index: + r""" + Extract capture groups in the regex `pat` as columns in a DataFrame. + + For each subject string in the Series, extract groups from the + first match of regular expression `pat`. + + Parameters + ---------- + pat : str + Regular expression pattern with capturing groups. + flags : int, default 0 (no flags) + Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that + modify regular expression matching for things like case, + spaces, etc. For more details, see :mod:`re`. + expand : bool, default True + If True, return DataFrame with one column per capture group. + If False, return a Series/Index if there is one capture group + or DataFrame if there are multiple capture groups. + + Returns + ------- + DataFrame or Series or Index + A DataFrame with one row for each subject string, and one + column for each group. Any capture group names in regular + expression pat will be used for column names; otherwise + capture group numbers will be used. The dtype of each result + column is always object, even when no match is found. If + ``expand=False`` and pat has only one capture group, then + return a Series (if subject is a Series) or Index (if subject + is an Index). + + See Also + -------- + extractall : Returns all matches (not just the first match). + + Examples + -------- + A pattern with two groups will return a DataFrame with two columns. + Non-matches will be NaN. + + >>> s = pd.Series(['a1', 'b2', 'c3']) + >>> s.str.extract(r'([ab])(\d)') + 0 1 + 0 a 1 + 1 b 2 + 2 NaN NaN + + A pattern may contain optional groups. + + >>> s.str.extract(r'([ab])?(\d)') + 0 1 + 0 a 1 + 1 b 2 + 2 NaN 3 + + Named groups will become column names in the result. + + >>> s.str.extract(r'(?P[ab])(?P\d)') + letter digit + 0 a 1 + 1 b 2 + 2 NaN NaN + + A pattern with one group will return a DataFrame with one column + if expand=True. + + >>> s.str.extract(r'[ab](\d)', expand=True) + 0 + 0 1 + 1 2 + 2 NaN + + A pattern with one group will return a Series if expand=False. + + >>> s.str.extract(r'[ab](\d)', expand=False) + 0 1 + 1 2 + 2 NaN + dtype: object + """ + from pandas import DataFrame + + if not isinstance(expand, bool): + raise ValueError("expand must be True or False") + + regex = re.compile(pat, flags=flags) + if regex.groups == 0: + raise ValueError("pattern contains no capture groups") + + if not expand and regex.groups > 1 and isinstance(self._data, ABCIndex): + raise ValueError("only one regex group is supported with Index") + + obj = self._data + result_dtype = _result_dtype(obj) + + returns_df = regex.groups > 1 or expand + + if returns_df: + name = None + columns = _get_group_names(regex) + + if obj.array.size == 0: + result = DataFrame(columns=columns, dtype=result_dtype) + + else: + result_list = self._data.array._str_extract( + pat, flags=flags, expand=returns_df + ) + + result_index: Index | None + if isinstance(obj, ABCSeries): + result_index = obj.index + else: + result_index = None + + result = DataFrame( + result_list, columns=columns, index=result_index, dtype=result_dtype + ) + + else: + name = _get_single_group_name(regex) + result = self._data.array._str_extract(pat, flags=flags, expand=returns_df) + return self._wrap_result(result, name=name) + + @forbid_nonstring_types(["bytes"]) + def extractall(self, pat, flags: int = 0) -> DataFrame: + r""" + Extract capture groups in the regex `pat` as columns in DataFrame. + + For each subject string in the Series, extract groups from all + matches of regular expression pat. When each subject string in the + Series has exactly one match, extractall(pat).xs(0, level='match') + is the same as extract(pat). + + Parameters + ---------- + pat : str + Regular expression pattern with capturing groups. + flags : int, default 0 (no flags) + A ``re`` module flag, for example ``re.IGNORECASE``. These allow + to modify regular expression matching for things like case, spaces, + etc. Multiple flags can be combined with the bitwise OR operator, + for example ``re.IGNORECASE | re.MULTILINE``. + + Returns + ------- + DataFrame + A ``DataFrame`` with one row for each match, and one column for each + group. Its rows have a ``MultiIndex`` with first levels that come from + the subject ``Series``. The last level is named 'match' and indexes the + matches in each item of the ``Series``. Any capture group names in + regular expression pat will be used for column names; otherwise capture + group numbers will be used. + + See Also + -------- + extract : Returns first match only (not all matches). + + Examples + -------- + A pattern with one group will return a DataFrame with one column. + Indices with no matches will not appear in the result. + + >>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"]) + >>> s.str.extractall(r"[ab](\d)") + 0 + match + A 0 1 + 1 2 + B 0 1 + + Capture group names are used for column names of the result. + + >>> s.str.extractall(r"[ab](?P\d)") + digit + match + A 0 1 + 1 2 + B 0 1 + + A pattern with two groups will return a DataFrame with two columns. + + >>> s.str.extractall(r"(?P[ab])(?P\d)") + letter digit + match + A 0 a 1 + 1 a 2 + B 0 b 1 + + Optional groups that do not match are NaN in the result. + + >>> s.str.extractall(r"(?P[ab])?(?P\d)") + letter digit + match + A 0 a 1 + 1 a 2 + B 0 b 1 + C 0 NaN 1 + """ + # TODO: dispatch + return str_extractall(self._orig, pat, flags) + + _shared_docs[ + "find" + ] = """ + Return %(side)s indexes in each strings in the Series/Index. + + Each of returned indexes corresponds to the position where the + substring is fully contained between [start:end]. Return -1 on + failure. Equivalent to standard :meth:`str.%(method)s`. + + Parameters + ---------- + sub : str + Substring being searched. + start : int + Left edge index. + end : int + Right edge index. + + Returns + ------- + Series or Index of int. + + See Also + -------- + %(also)s + + Examples + -------- + For Series.str.find: + + >>> ser = pd.Series(["cow_", "duck_", "do_ve"]) + >>> ser.str.find("_") + 0 3 + 1 4 + 2 2 + dtype: int64 + + For Series.str.rfind: + + >>> ser = pd.Series(["_cow_", "duck_", "do_v_e"]) + >>> ser.str.rfind("_") + 0 4 + 1 4 + 2 4 + dtype: int64 + """ + + @Appender( + _shared_docs["find"] + % { + "side": "lowest", + "method": "find", + "also": "rfind : Return highest indexes in each strings.", + } + ) + @forbid_nonstring_types(["bytes"]) + def find(self, sub, start: int = 0, end=None): + if not isinstance(sub, str): + msg = f"expected a string object, not {type(sub).__name__}" + raise TypeError(msg) + + result = self._data.array._str_find(sub, start, end) + return self._wrap_result(result, returns_string=False) + + @Appender( + _shared_docs["find"] + % { + "side": "highest", + "method": "rfind", + "also": "find : Return lowest indexes in each strings.", + } + ) + @forbid_nonstring_types(["bytes"]) + def rfind(self, sub, start: int = 0, end=None): + if not isinstance(sub, str): + msg = f"expected a string object, not {type(sub).__name__}" + raise TypeError(msg) + + result = self._data.array._str_rfind(sub, start=start, end=end) + return self._wrap_result(result, returns_string=False) + + @forbid_nonstring_types(["bytes"]) + def normalize(self, form): + """ + Return the Unicode normal form for the strings in the Series/Index. + + For more information on the forms, see the + :func:`unicodedata.normalize`. + + Parameters + ---------- + form : {'NFC', 'NFKC', 'NFD', 'NFKD'} + Unicode form. + + Returns + ------- + Series/Index of objects + + Examples + -------- + >>> ser = pd.Series(['ñ']) + >>> ser.str.normalize('NFC') == ser.str.normalize('NFD') + 0 False + dtype: bool + """ + result = self._data.array._str_normalize(form) + return self._wrap_result(result) + + _shared_docs[ + "index" + ] = """ + Return %(side)s indexes in each string in Series/Index. + + Each of the returned indexes corresponds to the position where the + substring is fully contained between [start:end]. This is the same + as ``str.%(similar)s`` except instead of returning -1, it raises a + ValueError when the substring is not found. Equivalent to standard + ``str.%(method)s``. + + Parameters + ---------- + sub : str + Substring being searched. + start : int + Left edge index. + end : int + Right edge index. + + Returns + ------- + Series or Index of object + + See Also + -------- + %(also)s + + Examples + -------- + For Series.str.index: + + >>> ser = pd.Series(["horse", "eagle", "donkey"]) + >>> ser.str.index("e") + 0 4 + 1 0 + 2 4 + dtype: int64 + + For Series.str.rindex: + + >>> ser = pd.Series(["Deer", "eagle", "Sheep"]) + >>> ser.str.rindex("e") + 0 2 + 1 4 + 2 3 + dtype: int64 + """ + + @Appender( + _shared_docs["index"] + % { + "side": "lowest", + "similar": "find", + "method": "index", + "also": "rindex : Return highest indexes in each strings.", + } + ) + @forbid_nonstring_types(["bytes"]) + def index(self, sub, start: int = 0, end=None): + if not isinstance(sub, str): + msg = f"expected a string object, not {type(sub).__name__}" + raise TypeError(msg) + + result = self._data.array._str_index(sub, start=start, end=end) + return self._wrap_result(result, returns_string=False) + + @Appender( + _shared_docs["index"] + % { + "side": "highest", + "similar": "rfind", + "method": "rindex", + "also": "index : Return lowest indexes in each strings.", + } + ) + @forbid_nonstring_types(["bytes"]) + def rindex(self, sub, start: int = 0, end=None): + if not isinstance(sub, str): + msg = f"expected a string object, not {type(sub).__name__}" + raise TypeError(msg) + + result = self._data.array._str_rindex(sub, start=start, end=end) + return self._wrap_result(result, returns_string=False) + + def len(self): + """ + Compute the length of each element in the Series/Index. + + The element may be a sequence (such as a string, tuple or list) or a collection + (such as a dictionary). + + Returns + ------- + Series or Index of int + A Series or Index of integer values indicating the length of each + element in the Series or Index. + + See Also + -------- + str.len : Python built-in function returning the length of an object. + Series.size : Returns the length of the Series. + + Examples + -------- + Returns the length (number of characters) in a string. Returns the + number of entries for dictionaries, lists or tuples. + + >>> s = pd.Series(['dog', + ... '', + ... 5, + ... {'foo' : 'bar'}, + ... [2, 3, 5, 7], + ... ('one', 'two', 'three')]) + >>> s + 0 dog + 1 + 2 5 + 3 {'foo': 'bar'} + 4 [2, 3, 5, 7] + 5 (one, two, three) + dtype: object + >>> s.str.len() + 0 3.0 + 1 0.0 + 2 NaN + 3 1.0 + 4 4.0 + 5 3.0 + dtype: float64 + """ + result = self._data.array._str_len() + return self._wrap_result(result, returns_string=False) + + _shared_docs[ + "casemethods" + ] = """ + Convert strings in the Series/Index to %(type)s. + %(version)s + Equivalent to :meth:`str.%(method)s`. + + Returns + ------- + Series or Index of object + + See Also + -------- + Series.str.lower : Converts all characters to lowercase. + Series.str.upper : Converts all characters to uppercase. + Series.str.title : Converts first character of each word to uppercase and + remaining to lowercase. + Series.str.capitalize : Converts first character to uppercase and + remaining to lowercase. + Series.str.swapcase : Converts uppercase to lowercase and lowercase to + uppercase. + Series.str.casefold: Removes all case distinctions in the string. + + Examples + -------- + >>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']) + >>> s + 0 lower + 1 CAPITALS + 2 this is a sentence + 3 SwApCaSe + dtype: object + + >>> s.str.lower() + 0 lower + 1 capitals + 2 this is a sentence + 3 swapcase + dtype: object + + >>> s.str.upper() + 0 LOWER + 1 CAPITALS + 2 THIS IS A SENTENCE + 3 SWAPCASE + dtype: object + + >>> s.str.title() + 0 Lower + 1 Capitals + 2 This Is A Sentence + 3 Swapcase + dtype: object + + >>> s.str.capitalize() + 0 Lower + 1 Capitals + 2 This is a sentence + 3 Swapcase + dtype: object + + >>> s.str.swapcase() + 0 LOWER + 1 capitals + 2 THIS IS A SENTENCE + 3 sWaPcAsE + dtype: object + """ + # Types: + # cases: + # upper, lower, title, capitalize, swapcase, casefold + # boolean: + # isalpha, isnumeric isalnum isdigit isdecimal isspace islower isupper istitle + # _doc_args holds dict of strings to use in substituting casemethod docs + _doc_args: dict[str, dict[str, str]] = {} + _doc_args["lower"] = {"type": "lowercase", "method": "lower", "version": ""} + _doc_args["upper"] = {"type": "uppercase", "method": "upper", "version": ""} + _doc_args["title"] = {"type": "titlecase", "method": "title", "version": ""} + _doc_args["capitalize"] = { + "type": "be capitalized", + "method": "capitalize", + "version": "", + } + _doc_args["swapcase"] = { + "type": "be swapcased", + "method": "swapcase", + "version": "", + } + _doc_args["casefold"] = { + "type": "be casefolded", + "method": "casefold", + "version": "", + } + + @Appender(_shared_docs["casemethods"] % _doc_args["lower"]) + @forbid_nonstring_types(["bytes"]) + def lower(self): + result = self._data.array._str_lower() + return self._wrap_result(result) + + @Appender(_shared_docs["casemethods"] % _doc_args["upper"]) + @forbid_nonstring_types(["bytes"]) + def upper(self): + result = self._data.array._str_upper() + return self._wrap_result(result) + + @Appender(_shared_docs["casemethods"] % _doc_args["title"]) + @forbid_nonstring_types(["bytes"]) + def title(self): + result = self._data.array._str_title() + return self._wrap_result(result) + + @Appender(_shared_docs["casemethods"] % _doc_args["capitalize"]) + @forbid_nonstring_types(["bytes"]) + def capitalize(self): + result = self._data.array._str_capitalize() + return self._wrap_result(result) + + @Appender(_shared_docs["casemethods"] % _doc_args["swapcase"]) + @forbid_nonstring_types(["bytes"]) + def swapcase(self): + result = self._data.array._str_swapcase() + return self._wrap_result(result) + + @Appender(_shared_docs["casemethods"] % _doc_args["casefold"]) + @forbid_nonstring_types(["bytes"]) + def casefold(self): + result = self._data.array._str_casefold() + return self._wrap_result(result) + + _shared_docs[ + "ismethods" + ] = """ + Check whether all characters in each string are %(type)s. + + This is equivalent to running the Python string method + :meth:`str.%(method)s` for each element of the Series/Index. If a string + has zero characters, ``False`` is returned for that check. + + Returns + ------- + Series or Index of bool + Series or Index of boolean values with the same length as the original + Series/Index. + + See Also + -------- + Series.str.isalpha : Check whether all characters are alphabetic. + Series.str.isnumeric : Check whether all characters are numeric. + Series.str.isalnum : Check whether all characters are alphanumeric. + Series.str.isdigit : Check whether all characters are digits. + Series.str.isdecimal : Check whether all characters are decimal. + Series.str.isspace : Check whether all characters are whitespace. + Series.str.islower : Check whether all characters are lowercase. + Series.str.isupper : Check whether all characters are uppercase. + Series.str.istitle : Check whether all characters are titlecase. + + Examples + -------- + **Checks for Alphabetic and Numeric Characters** + + >>> s1 = pd.Series(['one', 'one1', '1', '']) + + >>> s1.str.isalpha() + 0 True + 1 False + 2 False + 3 False + dtype: bool + + >>> s1.str.isnumeric() + 0 False + 1 False + 2 True + 3 False + dtype: bool + + >>> s1.str.isalnum() + 0 True + 1 True + 2 True + 3 False + dtype: bool + + Note that checks against characters mixed with any additional punctuation + or whitespace will evaluate to false for an alphanumeric check. + + >>> s2 = pd.Series(['A B', '1.5', '3,000']) + >>> s2.str.isalnum() + 0 False + 1 False + 2 False + dtype: bool + + **More Detailed Checks for Numeric Characters** + + There are several different but overlapping sets of numeric characters that + can be checked for. + + >>> s3 = pd.Series(['23', '³', '⅕', '']) + + The ``s3.str.isdecimal`` method checks for characters used to form numbers + in base 10. + + >>> s3.str.isdecimal() + 0 True + 1 False + 2 False + 3 False + dtype: bool + + The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also + includes special digits, like superscripted and subscripted digits in + unicode. + + >>> s3.str.isdigit() + 0 True + 1 True + 2 False + 3 False + dtype: bool + + The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also + includes other characters that can represent quantities such as unicode + fractions. + + >>> s3.str.isnumeric() + 0 True + 1 True + 2 True + 3 False + dtype: bool + + **Checks for Whitespace** + + >>> s4 = pd.Series([' ', '\\t\\r\\n ', '']) + >>> s4.str.isspace() + 0 True + 1 True + 2 False + dtype: bool + + **Checks for Character Case** + + >>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', '']) + + >>> s5.str.islower() + 0 True + 1 False + 2 False + 3 False + dtype: bool + + >>> s5.str.isupper() + 0 False + 1 False + 2 True + 3 False + dtype: bool + + The ``s5.str.istitle`` method checks for whether all words are in title + case (whether only the first letter of each word is capitalized). Words are + assumed to be as any sequence of non-numeric characters separated by + whitespace characters. + + >>> s5.str.istitle() + 0 False + 1 True + 2 False + 3 False + dtype: bool + """ + _doc_args["isalnum"] = {"type": "alphanumeric", "method": "isalnum"} + _doc_args["isalpha"] = {"type": "alphabetic", "method": "isalpha"} + _doc_args["isdigit"] = {"type": "digits", "method": "isdigit"} + _doc_args["isspace"] = {"type": "whitespace", "method": "isspace"} + _doc_args["islower"] = {"type": "lowercase", "method": "islower"} + _doc_args["isupper"] = {"type": "uppercase", "method": "isupper"} + _doc_args["istitle"] = {"type": "titlecase", "method": "istitle"} + _doc_args["isnumeric"] = {"type": "numeric", "method": "isnumeric"} + _doc_args["isdecimal"] = {"type": "decimal", "method": "isdecimal"} + # force _noarg_wrapper return type with dtype=np.dtype(bool) (GH 29624) + + isalnum = _map_and_wrap( + "isalnum", docstring=_shared_docs["ismethods"] % _doc_args["isalnum"] + ) + isalpha = _map_and_wrap( + "isalpha", docstring=_shared_docs["ismethods"] % _doc_args["isalpha"] + ) + isdigit = _map_and_wrap( + "isdigit", docstring=_shared_docs["ismethods"] % _doc_args["isdigit"] + ) + isspace = _map_and_wrap( + "isspace", docstring=_shared_docs["ismethods"] % _doc_args["isspace"] + ) + islower = _map_and_wrap( + "islower", docstring=_shared_docs["ismethods"] % _doc_args["islower"] + ) + isupper = _map_and_wrap( + "isupper", docstring=_shared_docs["ismethods"] % _doc_args["isupper"] + ) + istitle = _map_and_wrap( + "istitle", docstring=_shared_docs["ismethods"] % _doc_args["istitle"] + ) + isnumeric = _map_and_wrap( + "isnumeric", docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"] + ) + isdecimal = _map_and_wrap( + "isdecimal", docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"] + ) + + +def cat_safe(list_of_columns: list[npt.NDArray[np.object_]], sep: str): + """ + Auxiliary function for :meth:`str.cat`. + + Same signature as cat_core, but handles TypeErrors in concatenation, which + happen if the arrays in list_of columns have the wrong dtypes or content. + + Parameters + ---------- + list_of_columns : list of numpy arrays + List of arrays to be concatenated with sep; + these arrays may not contain NaNs! + sep : string + The separator string for concatenating the columns. + + Returns + ------- + nd.array + The concatenation of list_of_columns with sep. + """ + try: + result = cat_core(list_of_columns, sep) + except TypeError: + # if there are any non-string values (wrong dtype or hidden behind + # object dtype), np.sum will fail; catch and return with better message + for column in list_of_columns: + dtype = lib.infer_dtype(column, skipna=True) + if dtype not in ["string", "empty"]: + raise TypeError( + "Concatenation requires list-likes containing only " + "strings (or missing values). Offending values found in " + f"column {dtype}" + ) from None + return result + + +def cat_core(list_of_columns: list, sep: str): + """ + Auxiliary function for :meth:`str.cat` + + Parameters + ---------- + list_of_columns : list of numpy arrays + List of arrays to be concatenated with sep; + these arrays may not contain NaNs! + sep : string + The separator string for concatenating the columns. + + Returns + ------- + nd.array + The concatenation of list_of_columns with sep. + """ + if sep == "": + # no need to interleave sep if it is empty + arr_of_cols = np.asarray(list_of_columns, dtype=object) + return np.sum(arr_of_cols, axis=0) + list_with_sep = [sep] * (2 * len(list_of_columns) - 1) + list_with_sep[::2] = list_of_columns + arr_with_sep = np.asarray(list_with_sep, dtype=object) + return np.sum(arr_with_sep, axis=0) + + +def _result_dtype(arr): + # workaround #27953 + # ideally we just pass `dtype=arr.dtype` unconditionally, but this fails + # when the list of values is empty. + from pandas.core.arrays.string_ import StringDtype + + if isinstance(arr.dtype, (ArrowDtype, StringDtype)): + return arr.dtype + return object + + +def _get_single_group_name(regex: re.Pattern) -> Hashable: + if regex.groupindex: + return next(iter(regex.groupindex)) + else: + return None + + +def _get_group_names(regex: re.Pattern) -> list[Hashable]: + """ + Get named groups from compiled regex. + + Unnamed groups are numbered. + + Parameters + ---------- + regex : compiled regex + + Returns + ------- + list of column labels + """ + names = {v: k for k, v in regex.groupindex.items()} + return [names.get(1 + i, i) for i in range(regex.groups)] + + +def str_extractall(arr, pat, flags: int = 0) -> DataFrame: + regex = re.compile(pat, flags=flags) + # the regex must contain capture groups. + if regex.groups == 0: + raise ValueError("pattern contains no capture groups") + + if isinstance(arr, ABCIndex): + arr = arr.to_series().reset_index(drop=True) + + columns = _get_group_names(regex) + match_list = [] + index_list = [] + is_mi = arr.index.nlevels > 1 + + for subject_key, subject in arr.items(): + if isinstance(subject, str): + if not is_mi: + subject_key = (subject_key,) + + for match_i, match_tuple in enumerate(regex.findall(subject)): + if isinstance(match_tuple, str): + match_tuple = (match_tuple,) + na_tuple = [np.nan if group == "" else group for group in match_tuple] + match_list.append(na_tuple) + result_key = tuple(subject_key + (match_i,)) + index_list.append(result_key) + + from pandas import MultiIndex + + index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"]) + dtype = _result_dtype(arr) + + result = arr._constructor_expanddim( + match_list, index=index, columns=columns, dtype=dtype + ) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/base.py new file mode 100644 index 00000000..96b03526 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/base.py @@ -0,0 +1,262 @@ +from __future__ import annotations + +import abc +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +import numpy as np + +if TYPE_CHECKING: + from collections.abc import Sequence + import re + + from pandas._typing import Scalar + + from pandas import Series + + +class BaseStringArrayMethods(abc.ABC): + """ + Base class for extension arrays implementing string methods. + + This is where our ExtensionArrays can override the implementation of + Series.str.. We don't expect this to work with + 3rd-party extension arrays. + + * User calls Series.str. + * pandas extracts the extension array from the Series + * pandas calls ``extension_array._str_(*args, **kwargs)`` + * pandas wraps the result, to return to the user. + + See :ref:`Series.str` for the docstring of each method. + """ + + def _str_getitem(self, key): + if isinstance(key, slice): + return self._str_slice(start=key.start, stop=key.stop, step=key.step) + else: + return self._str_get(key) + + @abc.abstractmethod + def _str_count(self, pat, flags: int = 0): + pass + + @abc.abstractmethod + def _str_pad( + self, + width: int, + side: Literal["left", "right", "both"] = "left", + fillchar: str = " ", + ): + pass + + @abc.abstractmethod + def _str_contains( + self, pat, case: bool = True, flags: int = 0, na=None, regex: bool = True + ): + pass + + @abc.abstractmethod + def _str_startswith(self, pat, na=None): + pass + + @abc.abstractmethod + def _str_endswith(self, pat, na=None): + pass + + @abc.abstractmethod + def _str_replace( + self, + pat: str | re.Pattern, + repl: str | Callable, + n: int = -1, + case: bool = True, + flags: int = 0, + regex: bool = True, + ): + pass + + @abc.abstractmethod + def _str_repeat(self, repeats: int | Sequence[int]): + pass + + @abc.abstractmethod + def _str_match( + self, pat: str, case: bool = True, flags: int = 0, na: Scalar = np.nan + ): + pass + + @abc.abstractmethod + def _str_fullmatch( + self, + pat: str | re.Pattern, + case: bool = True, + flags: int = 0, + na: Scalar = np.nan, + ): + pass + + @abc.abstractmethod + def _str_encode(self, encoding, errors: str = "strict"): + pass + + @abc.abstractmethod + def _str_find(self, sub, start: int = 0, end=None): + pass + + @abc.abstractmethod + def _str_rfind(self, sub, start: int = 0, end=None): + pass + + @abc.abstractmethod + def _str_findall(self, pat, flags: int = 0): + pass + + @abc.abstractmethod + def _str_get(self, i): + pass + + @abc.abstractmethod + def _str_index(self, sub, start: int = 0, end=None): + pass + + @abc.abstractmethod + def _str_rindex(self, sub, start: int = 0, end=None): + pass + + @abc.abstractmethod + def _str_join(self, sep: str): + pass + + @abc.abstractmethod + def _str_partition(self, sep: str, expand): + pass + + @abc.abstractmethod + def _str_rpartition(self, sep: str, expand): + pass + + @abc.abstractmethod + def _str_len(self): + pass + + @abc.abstractmethod + def _str_slice(self, start=None, stop=None, step=None): + pass + + @abc.abstractmethod + def _str_slice_replace(self, start=None, stop=None, repl=None): + pass + + @abc.abstractmethod + def _str_translate(self, table): + pass + + @abc.abstractmethod + def _str_wrap(self, width: int, **kwargs): + pass + + @abc.abstractmethod + def _str_get_dummies(self, sep: str = "|"): + pass + + @abc.abstractmethod + def _str_isalnum(self): + pass + + @abc.abstractmethod + def _str_isalpha(self): + pass + + @abc.abstractmethod + def _str_isdecimal(self): + pass + + @abc.abstractmethod + def _str_isdigit(self): + pass + + @abc.abstractmethod + def _str_islower(self): + pass + + @abc.abstractmethod + def _str_isnumeric(self): + pass + + @abc.abstractmethod + def _str_isspace(self): + pass + + @abc.abstractmethod + def _str_istitle(self): + pass + + @abc.abstractmethod + def _str_isupper(self): + pass + + @abc.abstractmethod + def _str_capitalize(self): + pass + + @abc.abstractmethod + def _str_casefold(self): + pass + + @abc.abstractmethod + def _str_title(self): + pass + + @abc.abstractmethod + def _str_swapcase(self): + pass + + @abc.abstractmethod + def _str_lower(self): + pass + + @abc.abstractmethod + def _str_upper(self): + pass + + @abc.abstractmethod + def _str_normalize(self, form): + pass + + @abc.abstractmethod + def _str_strip(self, to_strip=None): + pass + + @abc.abstractmethod + def _str_lstrip(self, to_strip=None): + pass + + @abc.abstractmethod + def _str_rstrip(self, to_strip=None): + pass + + @abc.abstractmethod + def _str_removeprefix(self, prefix: str) -> Series: + pass + + @abc.abstractmethod + def _str_removesuffix(self, suffix: str) -> Series: + pass + + @abc.abstractmethod + def _str_split( + self, pat=None, n=-1, expand: bool = False, regex: bool | None = None + ): + pass + + @abc.abstractmethod + def _str_rsplit(self, pat=None, n=-1): + pass + + @abc.abstractmethod + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/object_array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/object_array.py new file mode 100644 index 00000000..6993ae32 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/strings/object_array.py @@ -0,0 +1,497 @@ +from __future__ import annotations + +import functools +import re +import textwrap +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, +) +import unicodedata + +import numpy as np + +from pandas._libs import lib +import pandas._libs.missing as libmissing +import pandas._libs.ops as libops + +from pandas.core.dtypes.missing import isna + +from pandas.core.strings.base import BaseStringArrayMethods + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + NpDtype, + Scalar, + ) + + from pandas import Series + + +class ObjectStringArrayMixin(BaseStringArrayMethods): + """ + String Methods operating on object-dtype ndarrays. + """ + + _str_na_value = np.nan + + def __len__(self) -> int: + # For typing, _str_map relies on the object being sized. + raise NotImplementedError + + def _str_map( + self, f, na_value=None, dtype: NpDtype | None = None, convert: bool = True + ): + """ + Map a callable over valid elements of the array. + + Parameters + ---------- + f : Callable + A function to call on each non-NA element. + na_value : Scalar, optional + The value to set for NA values. Might also be used for the + fill value if the callable `f` raises an exception. + This defaults to ``self._str_na_value`` which is ``np.nan`` + for object-dtype and Categorical and ``pd.NA`` for StringArray. + dtype : Dtype, optional + The dtype of the result array. + convert : bool, default True + Whether to call `maybe_convert_objects` on the resulting ndarray + """ + if dtype is None: + dtype = np.dtype("object") + if na_value is None: + na_value = self._str_na_value + + if not len(self): + return np.array([], dtype=dtype) + + arr = np.asarray(self, dtype=object) + mask = isna(arr) + map_convert = convert and not np.all(mask) + try: + result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert) + except (TypeError, AttributeError) as err: + # Reraise the exception if callable `f` got wrong number of args. + # The user may want to be warned by this, instead of getting NaN + p_err = ( + r"((takes)|(missing)) (?(2)from \d+ to )?\d+ " + r"(?(3)required )positional arguments?" + ) + + if len(err.args) >= 1 and re.search(p_err, err.args[0]): + # FIXME: this should be totally avoidable + raise err + + def g(x): + # This type of fallback behavior can be removed once + # we remove object-dtype .str accessor. + try: + return f(x) + except (TypeError, AttributeError): + return na_value + + return self._str_map(g, na_value=na_value, dtype=dtype) + if not isinstance(result, np.ndarray): + return result + if na_value is not np.nan: + np.putmask(result, mask, na_value) + if convert and result.dtype == object: + result = lib.maybe_convert_objects(result) + return result + + def _str_count(self, pat, flags: int = 0): + regex = re.compile(pat, flags=flags) + f = lambda x: len(regex.findall(x)) + return self._str_map(f, dtype="int64") + + def _str_pad( + self, + width: int, + side: Literal["left", "right", "both"] = "left", + fillchar: str = " ", + ): + if side == "left": + f = lambda x: x.rjust(width, fillchar) + elif side == "right": + f = lambda x: x.ljust(width, fillchar) + elif side == "both": + f = lambda x: x.center(width, fillchar) + else: # pragma: no cover + raise ValueError("Invalid side") + return self._str_map(f) + + def _str_contains( + self, pat, case: bool = True, flags: int = 0, na=np.nan, regex: bool = True + ): + if regex: + if not case: + flags |= re.IGNORECASE + + pat = re.compile(pat, flags=flags) + + f = lambda x: pat.search(x) is not None + else: + if case: + f = lambda x: pat in x + else: + upper_pat = pat.upper() + f = lambda x: upper_pat in x.upper() + return self._str_map(f, na, dtype=np.dtype("bool")) + + def _str_startswith(self, pat, na=None): + f = lambda x: x.startswith(pat) + return self._str_map(f, na_value=na, dtype=np.dtype(bool)) + + def _str_endswith(self, pat, na=None): + f = lambda x: x.endswith(pat) + return self._str_map(f, na_value=na, dtype=np.dtype(bool)) + + def _str_replace( + self, + pat: str | re.Pattern, + repl: str | Callable, + n: int = -1, + case: bool = True, + flags: int = 0, + regex: bool = True, + ): + if case is False: + # add case flag, if provided + flags |= re.IGNORECASE + + if regex or flags or callable(repl): + if not isinstance(pat, re.Pattern): + if regex is False: + pat = re.escape(pat) + pat = re.compile(pat, flags=flags) + + n = n if n >= 0 else 0 + f = lambda x: pat.sub(repl=repl, string=x, count=n) + else: + f = lambda x: x.replace(pat, repl, n) + + return self._str_map(f, dtype=str) + + def _str_repeat(self, repeats: int | Sequence[int]): + if lib.is_integer(repeats): + rint = cast(int, repeats) + + def scalar_rep(x): + try: + return bytes.__mul__(x, rint) + except TypeError: + return str.__mul__(x, rint) + + return self._str_map(scalar_rep, dtype=str) + else: + from pandas.core.arrays.string_ import BaseStringArray + + def rep(x, r): + if x is libmissing.NA: + return x + try: + return bytes.__mul__(x, r) + except TypeError: + return str.__mul__(x, r) + + result = libops.vec_binop( + np.asarray(self), + np.asarray(repeats, dtype=object), + rep, + ) + if isinstance(self, BaseStringArray): + # Not going through map, so we have to do this here. + result = type(self)._from_sequence(result) + return result + + def _str_match( + self, pat: str, case: bool = True, flags: int = 0, na: Scalar | None = None + ): + if not case: + flags |= re.IGNORECASE + + regex = re.compile(pat, flags=flags) + + f = lambda x: regex.match(x) is not None + return self._str_map(f, na_value=na, dtype=np.dtype(bool)) + + def _str_fullmatch( + self, + pat: str | re.Pattern, + case: bool = True, + flags: int = 0, + na: Scalar | None = None, + ): + if not case: + flags |= re.IGNORECASE + + regex = re.compile(pat, flags=flags) + + f = lambda x: regex.fullmatch(x) is not None + return self._str_map(f, na_value=na, dtype=np.dtype(bool)) + + def _str_encode(self, encoding, errors: str = "strict"): + f = lambda x: x.encode(encoding, errors=errors) + return self._str_map(f, dtype=object) + + def _str_find(self, sub, start: int = 0, end=None): + return self._str_find_(sub, start, end, side="left") + + def _str_rfind(self, sub, start: int = 0, end=None): + return self._str_find_(sub, start, end, side="right") + + def _str_find_(self, sub, start, end, side): + if side == "left": + method = "find" + elif side == "right": + method = "rfind" + else: # pragma: no cover + raise ValueError("Invalid side") + + if end is None: + f = lambda x: getattr(x, method)(sub, start) + else: + f = lambda x: getattr(x, method)(sub, start, end) + return self._str_map(f, dtype="int64") + + def _str_findall(self, pat, flags: int = 0): + regex = re.compile(pat, flags=flags) + return self._str_map(regex.findall, dtype="object") + + def _str_get(self, i): + def f(x): + if isinstance(x, dict): + return x.get(i) + elif len(x) > i >= -len(x): + return x[i] + return self._str_na_value + + return self._str_map(f) + + def _str_index(self, sub, start: int = 0, end=None): + if end: + f = lambda x: x.index(sub, start, end) + else: + f = lambda x: x.index(sub, start, end) + return self._str_map(f, dtype="int64") + + def _str_rindex(self, sub, start: int = 0, end=None): + if end: + f = lambda x: x.rindex(sub, start, end) + else: + f = lambda x: x.rindex(sub, start, end) + return self._str_map(f, dtype="int64") + + def _str_join(self, sep: str): + return self._str_map(sep.join) + + def _str_partition(self, sep: str, expand): + result = self._str_map(lambda x: x.partition(sep), dtype="object") + return result + + def _str_rpartition(self, sep: str, expand): + return self._str_map(lambda x: x.rpartition(sep), dtype="object") + + def _str_len(self): + return self._str_map(len, dtype="int64") + + def _str_slice(self, start=None, stop=None, step=None): + obj = slice(start, stop, step) + return self._str_map(lambda x: x[obj]) + + def _str_slice_replace(self, start=None, stop=None, repl=None): + if repl is None: + repl = "" + + def f(x): + if x[start:stop] == "": + local_stop = start + else: + local_stop = stop + y = "" + if start is not None: + y += x[:start] + y += repl + if stop is not None: + y += x[local_stop:] + return y + + return self._str_map(f) + + def _str_split( + self, + pat: str | re.Pattern | None = None, + n=-1, + expand: bool = False, + regex: bool | None = None, + ): + if pat is None: + if n is None or n == 0: + n = -1 + f = lambda x: x.split(pat, n) + else: + new_pat: str | re.Pattern + if regex is True or isinstance(pat, re.Pattern): + new_pat = re.compile(pat) + elif regex is False: + new_pat = pat + # regex is None so link to old behavior #43563 + else: + if len(pat) == 1: + new_pat = pat + else: + new_pat = re.compile(pat) + + if isinstance(new_pat, re.Pattern): + if n is None or n == -1: + n = 0 + f = lambda x: new_pat.split(x, maxsplit=n) + else: + if n is None or n == 0: + n = -1 + f = lambda x: x.split(pat, n) + return self._str_map(f, dtype=object) + + def _str_rsplit(self, pat=None, n=-1): + if n is None or n == 0: + n = -1 + f = lambda x: x.rsplit(pat, n) + return self._str_map(f, dtype="object") + + def _str_translate(self, table): + return self._str_map(lambda x: x.translate(table)) + + def _str_wrap(self, width: int, **kwargs): + kwargs["width"] = width + tw = textwrap.TextWrapper(**kwargs) + return self._str_map(lambda s: "\n".join(tw.wrap(s))) + + def _str_get_dummies(self, sep: str = "|"): + from pandas import Series + + arr = Series(self).fillna("") + try: + arr = sep + arr + sep + except (TypeError, NotImplementedError): + arr = sep + arr.astype(str) + sep + + tags: set[str] = set() + for ts in Series(arr, copy=False).str.split(sep): + tags.update(ts) + tags2 = sorted(tags - {""}) + + dummies = np.empty((len(arr), len(tags2)), dtype=np.int64) + + def _isin(test_elements: str, element: str) -> bool: + return element in test_elements + + for i, t in enumerate(tags2): + pat = sep + t + sep + dummies[:, i] = lib.map_infer( + arr.to_numpy(), functools.partial(_isin, element=pat) + ) + return dummies, tags2 + + def _str_upper(self): + return self._str_map(lambda x: x.upper()) + + def _str_isalnum(self): + return self._str_map(str.isalnum, dtype="bool") + + def _str_isalpha(self): + return self._str_map(str.isalpha, dtype="bool") + + def _str_isdecimal(self): + return self._str_map(str.isdecimal, dtype="bool") + + def _str_isdigit(self): + return self._str_map(str.isdigit, dtype="bool") + + def _str_islower(self): + return self._str_map(str.islower, dtype="bool") + + def _str_isnumeric(self): + return self._str_map(str.isnumeric, dtype="bool") + + def _str_isspace(self): + return self._str_map(str.isspace, dtype="bool") + + def _str_istitle(self): + return self._str_map(str.istitle, dtype="bool") + + def _str_isupper(self): + return self._str_map(str.isupper, dtype="bool") + + def _str_capitalize(self): + return self._str_map(str.capitalize) + + def _str_casefold(self): + return self._str_map(str.casefold) + + def _str_title(self): + return self._str_map(str.title) + + def _str_swapcase(self): + return self._str_map(str.swapcase) + + def _str_lower(self): + return self._str_map(str.lower) + + def _str_normalize(self, form): + f = lambda x: unicodedata.normalize(form, x) + return self._str_map(f) + + def _str_strip(self, to_strip=None): + return self._str_map(lambda x: x.strip(to_strip)) + + def _str_lstrip(self, to_strip=None): + return self._str_map(lambda x: x.lstrip(to_strip)) + + def _str_rstrip(self, to_strip=None): + return self._str_map(lambda x: x.rstrip(to_strip)) + + def _str_removeprefix(self, prefix: str) -> Series: + # outstanding question on whether to use native methods for users on Python 3.9+ + # https://github.com/pandas-dev/pandas/pull/39226#issuecomment-836719770, + # in which case we could do return self._str_map(str.removeprefix) + + def removeprefix(text: str) -> str: + if text.startswith(prefix): + return text[len(prefix) :] + return text + + return self._str_map(removeprefix) + + def _str_removesuffix(self, suffix: str) -> Series: + return self._str_map(lambda x: x.removesuffix(suffix)) + + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + regex = re.compile(pat, flags=flags) + na_value = self._str_na_value + + if not expand: + + def g(x): + m = regex.search(x) + return m.groups()[0] if m else na_value + + return self._str_map(g, convert=False) + + empty_row = [na_value] * regex.groups + + def f(x): + if not isinstance(x, str): + return empty_row + m = regex.search(x) + if m: + return [na_value if item is None else item for item in m.groups()] + else: + return empty_row + + return [f(val) for val in np.asarray(self)] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/datetimes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/datetimes.py new file mode 100644 index 00000000..303136b7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/datetimes.py @@ -0,0 +1,1332 @@ +from __future__ import annotations + +from collections import abc +from datetime import date +from functools import partial +from itertools import islice +from typing import ( + TYPE_CHECKING, + Callable, + TypedDict, + Union, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + tslib, +) +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + Timedelta, + Timestamp, + astype_overflowsafe, + get_unit_from_dtype, + iNaT, + is_supported_unit, + nat_strings, + parsing, + timezones as libtimezones, +) +from pandas._libs.tslibs.conversion import precision_from_unit +from pandas._libs.tslibs.parsing import ( + DateParseError, + guess_datetime_format, +) +from pandas._libs.tslibs.strptime import array_strptime +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + DateTimeErrorChoices, + npt, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_object, + is_float, + is_integer, + is_integer_dtype, + is_list_like, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + DatetimeTZDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import notna + +from pandas.arrays import ( + DatetimeArray, + IntegerArray, + NumpyExtensionArray, +) +from pandas.core import algorithms +from pandas.core.algorithms import unique +from pandas.core.arrays import ArrowExtensionArray +from pandas.core.arrays.base import ExtensionArray +from pandas.core.arrays.datetimes import ( + maybe_convert_dtype, + objects_to_datetime64ns, + tz_to_dtype, +) +from pandas.core.construction import extract_array +from pandas.core.indexes.base import Index +from pandas.core.indexes.datetimes import DatetimeIndex + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._libs.tslibs.nattype import NaTType + from pandas._libs.tslibs.timedeltas import UnitChoices + + from pandas import ( + DataFrame, + Series, + ) + +# --------------------------------------------------------------------- +# types used in annotations + +ArrayConvertible = Union[list, tuple, AnyArrayLike] +Scalar = Union[float, str] +DatetimeScalar = Union[Scalar, date, np.datetime64] + +DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible] + +DatetimeDictArg = Union[list[Scalar], tuple[Scalar, ...], AnyArrayLike] + + +class YearMonthDayDict(TypedDict, total=True): + year: DatetimeDictArg + month: DatetimeDictArg + day: DatetimeDictArg + + +class FulldatetimeDict(YearMonthDayDict, total=False): + hour: DatetimeDictArg + hours: DatetimeDictArg + minute: DatetimeDictArg + minutes: DatetimeDictArg + second: DatetimeDictArg + seconds: DatetimeDictArg + ms: DatetimeDictArg + us: DatetimeDictArg + ns: DatetimeDictArg + + +DictConvertible = Union[FulldatetimeDict, "DataFrame"] +start_caching_at = 50 + + +# --------------------------------------------------------------------- + + +def _guess_datetime_format_for_array(arr, dayfirst: bool | None = False) -> str | None: + # Try to guess the format based on the first non-NaN element, return None if can't + if (first_non_null := tslib.first_non_null(arr)) != -1: + if type(first_non_nan_element := arr[first_non_null]) is str: + # GH#32264 np.str_ object + guessed_format = guess_datetime_format( + first_non_nan_element, dayfirst=dayfirst + ) + if guessed_format is not None: + return guessed_format + # If there are multiple non-null elements, warn about + # how parsing might not be consistent + if tslib.first_non_null(arr[first_non_null + 1 :]) != -1: + warnings.warn( + "Could not infer format, so each element will be parsed " + "individually, falling back to `dateutil`. To ensure parsing is " + "consistent and as-expected, please specify a format.", + UserWarning, + stacklevel=find_stack_level(), + ) + return None + + +def should_cache( + arg: ArrayConvertible, unique_share: float = 0.7, check_count: int | None = None +) -> bool: + """ + Decides whether to do caching. + + If the percent of unique elements among `check_count` elements less + than `unique_share * 100` then we can do caching. + + Parameters + ---------- + arg: listlike, tuple, 1-d array, Series + unique_share: float, default=0.7, optional + 0 < unique_share < 1 + check_count: int, optional + 0 <= check_count <= len(arg) + + Returns + ------- + do_caching: bool + + Notes + ----- + By default for a sequence of less than 50 items in size, we don't do + caching; for the number of elements less than 5000, we take ten percent of + all elements to check for a uniqueness share; if the sequence size is more + than 5000, then we check only the first 500 elements. + All constants were chosen empirically by. + """ + do_caching = True + + # default realization + if check_count is None: + # in this case, the gain from caching is negligible + if len(arg) <= start_caching_at: + return False + + if len(arg) <= 5000: + check_count = len(arg) // 10 + else: + check_count = 500 + else: + assert ( + 0 <= check_count <= len(arg) + ), "check_count must be in next bounds: [0; len(arg)]" + if check_count == 0: + return False + + assert 0 < unique_share < 1, "unique_share must be in next bounds: (0; 1)" + + try: + # We can't cache if the items are not hashable. + unique_elements = set(islice(arg, check_count)) + except TypeError: + return False + if len(unique_elements) > check_count * unique_share: + do_caching = False + return do_caching + + +def _maybe_cache( + arg: ArrayConvertible, + format: str | None, + cache: bool, + convert_listlike: Callable, +) -> Series: + """ + Create a cache of unique dates from an array of dates + + Parameters + ---------- + arg : listlike, tuple, 1-d array, Series + format : string + Strftime format to parse time + cache : bool + True attempts to create a cache of converted values + convert_listlike : function + Conversion function to apply on dates + + Returns + ------- + cache_array : Series + Cache of converted, unique dates. Can be empty + """ + from pandas import Series + + cache_array = Series(dtype=object) + + if cache: + # Perform a quicker unique check + if not should_cache(arg): + return cache_array + + if not isinstance(arg, (np.ndarray, ExtensionArray, Index, ABCSeries)): + arg = np.array(arg) + + unique_dates = unique(arg) + if len(unique_dates) < len(arg): + cache_dates = convert_listlike(unique_dates, format) + # GH#45319 + try: + cache_array = Series(cache_dates, index=unique_dates, copy=False) + except OutOfBoundsDatetime: + return cache_array + # GH#39882 and GH#35888 in case of None and NaT we get duplicates + if not cache_array.index.is_unique: + cache_array = cache_array[~cache_array.index.duplicated()] + return cache_array + + +def _box_as_indexlike( + dt_array: ArrayLike, utc: bool = False, name: Hashable | None = None +) -> Index: + """ + Properly boxes the ndarray of datetimes to DatetimeIndex + if it is possible or to generic Index instead + + Parameters + ---------- + dt_array: 1-d array + Array of datetimes to be wrapped in an Index. + utc : bool + Whether to convert/localize timestamps to UTC. + name : string, default None + Name for a resulting index + + Returns + ------- + result : datetime of converted dates + - DatetimeIndex if convertible to sole datetime64 type + - general Index otherwise + """ + + if lib.is_np_dtype(dt_array.dtype, "M"): + tz = "utc" if utc else None + return DatetimeIndex(dt_array, tz=tz, name=name) + return Index(dt_array, name=name, dtype=dt_array.dtype) + + +def _convert_and_box_cache( + arg: DatetimeScalarOrArrayConvertible, + cache_array: Series, + name: Hashable | None = None, +) -> Index: + """ + Convert array of dates with a cache and wrap the result in an Index. + + Parameters + ---------- + arg : integer, float, string, datetime, list, tuple, 1-d array, Series + cache_array : Series + Cache of converted, unique dates + name : string, default None + Name for a DatetimeIndex + + Returns + ------- + result : Index-like of converted dates + """ + from pandas import Series + + result = Series(arg, dtype=cache_array.index.dtype).map(cache_array) + return _box_as_indexlike(result._values, utc=False, name=name) + + +def _return_parsed_timezone_results( + result: np.ndarray, timezones, utc: bool, name: str +) -> Index: + """ + Return results from array_strptime if a %z or %Z directive was passed. + + Parameters + ---------- + result : ndarray[int64] + int64 date representations of the dates + timezones : ndarray + pytz timezone objects + utc : bool + Whether to convert/localize timestamps to UTC. + name : string, default None + Name for a DatetimeIndex + + Returns + ------- + tz_result : Index-like of parsed dates with timezone + """ + tz_results = np.empty(len(result), dtype=object) + non_na_timezones = set() + for zone in unique(timezones): + mask = timezones == zone + dta = DatetimeArray(result[mask]).tz_localize(zone) + if utc: + if dta.tzinfo is None: + dta = dta.tz_localize("utc") + else: + dta = dta.tz_convert("utc") + else: + if not dta.isna().all(): + non_na_timezones.add(zone) + tz_results[mask] = dta + if len(non_na_timezones) > 1: + warnings.warn( + "In a future version of pandas, parsing datetimes with mixed time " + "zones will raise an error unless `utc=True`. Please specify `utc=True` " + "to opt in to the new behaviour and silence this warning. " + "To create a `Series` with mixed offsets and `object` dtype, " + "please use `apply` and `datetime.datetime.strptime`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return Index(tz_results, name=name) + + +def _convert_listlike_datetimes( + arg, + format: str | None, + name: Hashable | None = None, + utc: bool = False, + unit: str | None = None, + errors: DateTimeErrorChoices = "raise", + dayfirst: bool | None = None, + yearfirst: bool | None = None, + exact: bool = True, +): + """ + Helper function for to_datetime. Performs the conversions of 1D listlike + of dates + + Parameters + ---------- + arg : list, tuple, ndarray, Series, Index + date to be parsed + name : object + None or string for the Index name + utc : bool + Whether to convert/localize timestamps to UTC. + unit : str + None or string of the frequency of the passed data + errors : str + error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore' + dayfirst : bool + dayfirst parsing behavior from to_datetime + yearfirst : bool + yearfirst parsing behavior from to_datetime + exact : bool, default True + exact format matching behavior from to_datetime + + Returns + ------- + Index-like of parsed dates + """ + if isinstance(arg, (list, tuple)): + arg = np.array(arg, dtype="O") + elif isinstance(arg, NumpyExtensionArray): + arg = np.array(arg) + + arg_dtype = getattr(arg, "dtype", None) + # these are shortcutable + tz = "utc" if utc else None + if isinstance(arg_dtype, DatetimeTZDtype): + if not isinstance(arg, (DatetimeArray, DatetimeIndex)): + return DatetimeIndex(arg, tz=tz, name=name) + if utc: + arg = arg.tz_convert(None).tz_localize("utc") + return arg + + elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.type is Timestamp: + # TODO: Combine with above if DTI/DTA supports Arrow timestamps + if utc: + # pyarrow uses UTC, not lowercase utc + if isinstance(arg, Index): + arg_array = cast(ArrowExtensionArray, arg.array) + if arg_dtype.pyarrow_dtype.tz is not None: + arg_array = arg_array._dt_tz_convert("UTC") + else: + arg_array = arg_array._dt_tz_localize("UTC") + arg = Index(arg_array) + else: + # ArrowExtensionArray + if arg_dtype.pyarrow_dtype.tz is not None: + arg = arg._dt_tz_convert("UTC") + else: + arg = arg._dt_tz_localize("UTC") + return arg + + elif lib.is_np_dtype(arg_dtype, "M"): + if not is_supported_unit(get_unit_from_dtype(arg_dtype)): + # We go to closest supported reso, i.e. "s" + arg = astype_overflowsafe( + # TODO: looks like we incorrectly raise with errors=="ignore" + np.asarray(arg), + np.dtype("M8[s]"), + is_coerce=errors == "coerce", + ) + + if not isinstance(arg, (DatetimeArray, DatetimeIndex)): + return DatetimeIndex(arg, tz=tz, name=name) + elif utc: + # DatetimeArray, DatetimeIndex + return arg.tz_localize("utc") + + return arg + + elif unit is not None: + if format is not None: + raise ValueError("cannot specify both format and unit") + return _to_datetime_with_unit(arg, unit, name, utc, errors) + elif getattr(arg, "ndim", 1) > 1: + raise TypeError( + "arg must be a string, datetime, list, tuple, 1-d array, or Series" + ) + + # warn if passing timedelta64, raise for PeriodDtype + # NB: this must come after unit transformation + try: + arg, _ = maybe_convert_dtype(arg, copy=False, tz=libtimezones.maybe_get_tz(tz)) + except TypeError: + if errors == "coerce": + npvalues = np.array(["NaT"], dtype="datetime64[ns]").repeat(len(arg)) + return DatetimeIndex(npvalues, name=name) + elif errors == "ignore": + idx = Index(arg, name=name) + return idx + raise + + arg = ensure_object(arg) + + if format is None: + format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst) + + # `format` could be inferred, or user didn't ask for mixed-format parsing. + if format is not None and format != "mixed": + return _array_strptime_with_fallback(arg, name, utc, format, exact, errors) + + result, tz_parsed = objects_to_datetime64ns( + arg, + dayfirst=dayfirst, + yearfirst=yearfirst, + utc=utc, + errors=errors, + allow_object=True, + ) + + if tz_parsed is not None: + # We can take a shortcut since the datetime64 numpy array + # is in UTC + dta = DatetimeArray(result, dtype=tz_to_dtype(tz_parsed)) + return DatetimeIndex._simple_new(dta, name=name) + + return _box_as_indexlike(result, utc=utc, name=name) + + +def _array_strptime_with_fallback( + arg, + name, + utc: bool, + fmt: str, + exact: bool, + errors: str, +) -> Index: + """ + Call array_strptime, with fallback behavior depending on 'errors'. + """ + result, timezones = array_strptime(arg, fmt, exact=exact, errors=errors, utc=utc) + if any(tz is not None for tz in timezones): + return _return_parsed_timezone_results(result, timezones, utc, name) + + return _box_as_indexlike(result, utc=utc, name=name) + + +def _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index: + """ + to_datetime specalized to the case where a 'unit' is passed. + """ + arg = extract_array(arg, extract_numpy=True) + + # GH#30050 pass an ndarray to tslib.array_with_unit_to_datetime + # because it expects an ndarray argument + if isinstance(arg, IntegerArray): + arr = arg.astype(f"datetime64[{unit}]") + tz_parsed = None + else: + arg = np.asarray(arg) + + if arg.dtype.kind in "iu": + # Note we can't do "f" here because that could induce unwanted + # rounding GH#14156, GH#20445 + arr = arg.astype(f"datetime64[{unit}]", copy=False) + try: + arr = astype_overflowsafe(arr, np.dtype("M8[ns]"), copy=False) + except OutOfBoundsDatetime: + if errors == "raise": + raise + arg = arg.astype(object) + return _to_datetime_with_unit(arg, unit, name, utc, errors) + tz_parsed = None + + elif arg.dtype.kind == "f": + mult, _ = precision_from_unit(unit) + + mask = np.isnan(arg) | (arg == iNaT) + fvalues = (arg * mult).astype("f8", copy=False) + fvalues[mask] = 0 + + if (fvalues < Timestamp.min._value).any() or ( + fvalues > Timestamp.max._value + ).any(): + if errors != "raise": + arg = arg.astype(object) + return _to_datetime_with_unit(arg, unit, name, utc, errors) + raise OutOfBoundsDatetime(f"cannot convert input with unit '{unit}'") + + arr = fvalues.astype("M8[ns]", copy=False) + arr[mask] = np.datetime64("NaT", "ns") + + tz_parsed = None + else: + arg = arg.astype(object, copy=False) + arr, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit, errors=errors) + + if errors == "ignore": + # Index constructor _may_ infer to DatetimeIndex + result = Index._with_infer(arr, name=name) + else: + result = DatetimeIndex(arr, name=name) + + if not isinstance(result, DatetimeIndex): + return result + + # GH#23758: We may still need to localize the result with tz + # GH#25546: Apply tz_parsed first (from arg), then tz (from caller) + # result will be naive but in UTC + result = result.tz_localize("UTC").tz_convert(tz_parsed) + + if utc: + if result.tz is None: + result = result.tz_localize("utc") + else: + result = result.tz_convert("utc") + return result + + +def _adjust_to_origin(arg, origin, unit): + """ + Helper function for to_datetime. + Adjust input argument to the specified origin + + Parameters + ---------- + arg : list, tuple, ndarray, Series, Index + date to be adjusted + origin : 'julian' or Timestamp + origin offset for the arg + unit : str + passed unit from to_datetime, must be 'D' + + Returns + ------- + ndarray or scalar of adjusted date(s) + """ + if origin == "julian": + original = arg + j0 = Timestamp(0).to_julian_date() + if unit != "D": + raise ValueError("unit must be 'D' for origin='julian'") + try: + arg = arg - j0 + except TypeError as err: + raise ValueError( + "incompatible 'arg' type for given 'origin'='julian'" + ) from err + + # preemptively check this for a nice range + j_max = Timestamp.max.to_julian_date() - j0 + j_min = Timestamp.min.to_julian_date() - j0 + if np.any(arg > j_max) or np.any(arg < j_min): + raise OutOfBoundsDatetime( + f"{original} is Out of Bounds for origin='julian'" + ) + else: + # arg must be numeric + if not ( + (is_integer(arg) or is_float(arg)) or is_numeric_dtype(np.asarray(arg)) + ): + raise ValueError( + f"'{arg}' is not compatible with origin='{origin}'; " + "it must be numeric with a unit specified" + ) + + # we are going to offset back to unix / epoch time + try: + offset = Timestamp(origin, unit=unit) + except OutOfBoundsDatetime as err: + raise OutOfBoundsDatetime(f"origin {origin} is Out of Bounds") from err + except ValueError as err: + raise ValueError( + f"origin {origin} cannot be converted to a Timestamp" + ) from err + + if offset.tz is not None: + raise ValueError(f"origin offset {offset} must be tz-naive") + td_offset = offset - Timestamp(0) + + # convert the offset to the unit of the arg + # this should be lossless in terms of precision + ioffset = td_offset // Timedelta(1, unit=unit) + + # scalars & ndarray-like can handle the addition + if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)): + arg = np.asarray(arg) + arg = arg + ioffset + return arg + + +@overload +def to_datetime( + arg: DatetimeScalar, + errors: DateTimeErrorChoices = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: bool = ..., + format: str | None = ..., + exact: bool = ..., + unit: str | None = ..., + infer_datetime_format: bool = ..., + origin=..., + cache: bool = ..., +) -> Timestamp: + ... + + +@overload +def to_datetime( + arg: Series | DictConvertible, + errors: DateTimeErrorChoices = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: bool = ..., + format: str | None = ..., + exact: bool = ..., + unit: str | None = ..., + infer_datetime_format: bool = ..., + origin=..., + cache: bool = ..., +) -> Series: + ... + + +@overload +def to_datetime( + arg: list | tuple | Index | ArrayLike, + errors: DateTimeErrorChoices = ..., + dayfirst: bool = ..., + yearfirst: bool = ..., + utc: bool = ..., + format: str | None = ..., + exact: bool = ..., + unit: str | None = ..., + infer_datetime_format: bool = ..., + origin=..., + cache: bool = ..., +) -> DatetimeIndex: + ... + + +def to_datetime( + arg: DatetimeScalarOrArrayConvertible | DictConvertible, + errors: DateTimeErrorChoices = "raise", + dayfirst: bool = False, + yearfirst: bool = False, + utc: bool = False, + format: str | None = None, + exact: bool | lib.NoDefault = lib.no_default, + unit: str | None = None, + infer_datetime_format: lib.NoDefault | bool = lib.no_default, + origin: str = "unix", + cache: bool = True, +) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None: + """ + Convert argument to datetime. + + This function converts a scalar, array-like, :class:`Series` or + :class:`DataFrame`/dict-like to a pandas datetime object. + + Parameters + ---------- + arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like + The object to convert to a datetime. If a :class:`DataFrame` is provided, the + method expects minimally the following columns: :const:`"year"`, + :const:`"month"`, :const:`"day"`. The column "year" + must be specified in 4-digit format. + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + - If :const:`'raise'`, then invalid parsing will raise an exception. + - If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`. + - If :const:`'ignore'`, then invalid parsing will return the input. + dayfirst : bool, default False + Specify a date parse order if `arg` is str or is list-like. + If :const:`True`, parses dates with the day first, e.g. :const:`"10/11/12"` + is parsed as :const:`2012-11-10`. + + .. warning:: + + ``dayfirst=True`` is not strict, but will prefer to parse + with day first. + + yearfirst : bool, default False + Specify a date parse order if `arg` is str or is list-like. + + - If :const:`True` parses dates with the year first, e.g. + :const:`"10/11/12"` is parsed as :const:`2010-11-12`. + - If both `dayfirst` and `yearfirst` are :const:`True`, `yearfirst` is + preceded (same as :mod:`dateutil`). + + .. warning:: + + ``yearfirst=True`` is not strict, but will prefer to parse + with year first. + + utc : bool, default False + Control timezone-related parsing, localization and conversion. + + - If :const:`True`, the function *always* returns a timezone-aware + UTC-localized :class:`Timestamp`, :class:`Series` or + :class:`DatetimeIndex`. To do this, timezone-naive inputs are + *localized* as UTC, while timezone-aware inputs are *converted* to UTC. + + - If :const:`False` (default), inputs will not be coerced to UTC. + Timezone-naive inputs will remain naive, while timezone-aware ones + will keep their time offsets. Limitations exist for mixed + offsets (typically, daylight savings), see :ref:`Examples + ` section for details. + + .. warning:: + + In a future version of pandas, parsing datetimes with mixed time + zones will raise an error unless `utc=True`. + Please specify `utc=True` to opt in to the new behaviour + and silence this warning. To create a `Series` with mixed offsets and + `object` dtype, please use `apply` and `datetime.datetime.strptime`. + + See also: pandas general documentation about `timezone conversion and + localization + `_. + + format : str, default None + The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See + `strftime documentation + `_ for more information on choices, though + note that :const:`"%f"` will parse all the way up to nanoseconds. + You can also pass: + + - "ISO8601", to parse any `ISO8601 `_ + time string (not necessarily in exactly the same format); + - "mixed", to infer the format for each element individually. This is risky, + and you should probably use it along with `dayfirst`. + + .. note:: + + If a :class:`DataFrame` is passed, then `format` has no effect. + + exact : bool, default True + Control how `format` is used: + + - If :const:`True`, require an exact `format` match. + - If :const:`False`, allow the `format` to match anywhere in the target + string. + + Cannot be used alongside ``format='ISO8601'`` or ``format='mixed'``. + unit : str, default 'ns' + The unit of the arg (D,s,ms,us,ns) denote the unit, which is an + integer or float number. This will be based off the origin. + Example, with ``unit='ms'`` and ``origin='unix'``, this would calculate + the number of milliseconds to the unix epoch start. + infer_datetime_format : bool, default False + If :const:`True` and no `format` is given, attempt to infer the format + of the datetime strings based on the first non-NaN element, + and if it can be inferred, switch to a faster method of parsing them. + In some cases this can increase the parsing speed by ~5-10x. + + .. deprecated:: 2.0.0 + A strict version of this argument is now the default, passing it has + no effect. + + origin : scalar, default 'unix' + Define the reference date. The numeric values would be parsed as number + of units (defined by `unit`) since this reference date. + + - If :const:`'unix'` (or POSIX) time; origin is set to 1970-01-01. + - If :const:`'julian'`, unit must be :const:`'D'`, and origin is set to + beginning of Julian Calendar. Julian day number :const:`0` is assigned + to the day starting at noon on January 1, 4713 BC. + - If Timestamp convertible (Timestamp, dt.datetime, np.datetimt64 or date + string), origin is set to Timestamp identified by origin. + - If a float or integer, origin is the millisecond difference + relative to 1970-01-01. + cache : bool, default True + If :const:`True`, use a cache of unique, converted dates to apply the + datetime conversion. May produce significant speed-up when parsing + duplicate date strings, especially ones with timezone offsets. The cache + is only used when there are at least 50 values. The presence of + out-of-bounds values will render the cache unusable and may slow down + parsing. + + Returns + ------- + datetime + If parsing succeeded. + Return type depends on input (types in parenthesis correspond to + fallback in case of unsuccessful timezone or out-of-range timestamp + parsing): + + - scalar: :class:`Timestamp` (or :class:`datetime.datetime`) + - array-like: :class:`DatetimeIndex` (or :class:`Series` with + :class:`object` dtype containing :class:`datetime.datetime`) + - Series: :class:`Series` of :class:`datetime64` dtype (or + :class:`Series` of :class:`object` dtype containing + :class:`datetime.datetime`) + - DataFrame: :class:`Series` of :class:`datetime64` dtype (or + :class:`Series` of :class:`object` dtype containing + :class:`datetime.datetime`) + + Raises + ------ + ParserError + When parsing a date from string fails. + ValueError + When another datetime conversion error happens. For example when one + of 'year', 'month', day' columns is missing in a :class:`DataFrame`, or + when a Timezone-aware :class:`datetime.datetime` is found in an array-like + of mixed time offsets, and ``utc=False``. + + See Also + -------- + DataFrame.astype : Cast argument to a specified dtype. + to_timedelta : Convert argument to timedelta. + convert_dtypes : Convert dtypes. + + Notes + ----- + + Many input types are supported, and lead to different output types: + + - **scalars** can be int, float, str, datetime object (from stdlib :mod:`datetime` + module or :mod:`numpy`). They are converted to :class:`Timestamp` when + possible, otherwise they are converted to :class:`datetime.datetime`. + None/NaN/null scalars are converted to :const:`NaT`. + + - **array-like** can contain int, float, str, datetime objects. They are + converted to :class:`DatetimeIndex` when possible, otherwise they are + converted to :class:`Index` with :class:`object` dtype, containing + :class:`datetime.datetime`. None/NaN/null entries are converted to + :const:`NaT` in both cases. + + - **Series** are converted to :class:`Series` with :class:`datetime64` + dtype when possible, otherwise they are converted to :class:`Series` with + :class:`object` dtype, containing :class:`datetime.datetime`. None/NaN/null + entries are converted to :const:`NaT` in both cases. + + - **DataFrame/dict-like** are converted to :class:`Series` with + :class:`datetime64` dtype. For each row a datetime is created from assembling + the various dataframe columns. Column keys can be common abbreviations + like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or + plurals of the same. + + The following causes are responsible for :class:`datetime.datetime` objects + being returned (possibly inside an :class:`Index` or a :class:`Series` with + :class:`object` dtype) instead of a proper pandas designated type + (:class:`Timestamp`, :class:`DatetimeIndex` or :class:`Series` + with :class:`datetime64` dtype): + + - when any input element is before :const:`Timestamp.min` or after + :const:`Timestamp.max`, see `timestamp limitations + `_. + + - when ``utc=False`` (default) and the input is an array-like or + :class:`Series` containing mixed naive/aware datetime, or aware with mixed + time offsets. Note that this happens in the (quite frequent) situation when + the timezone has a daylight savings policy. In that case you may wish to + use ``utc=True``. + + Examples + -------- + + **Handling various input formats** + + Assembling a datetime from multiple columns of a :class:`DataFrame`. The keys + can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', + 'ms', 'us', 'ns']) or plurals of the same + + >>> df = pd.DataFrame({'year': [2015, 2016], + ... 'month': [2, 3], + ... 'day': [4, 5]}) + >>> pd.to_datetime(df) + 0 2015-02-04 + 1 2016-03-05 + dtype: datetime64[ns] + + Using a unix epoch time + + >>> pd.to_datetime(1490195805, unit='s') + Timestamp('2017-03-22 15:16:45') + >>> pd.to_datetime(1490195805433502912, unit='ns') + Timestamp('2017-03-22 15:16:45.433502912') + + .. warning:: For float arg, precision rounding might happen. To prevent + unexpected behavior use a fixed-width exact type. + + Using a non-unix epoch origin + + >>> pd.to_datetime([1, 2, 3], unit='D', + ... origin=pd.Timestamp('1960-01-01')) + DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], + dtype='datetime64[ns]', freq=None) + + **Differences with strptime behavior** + + :const:`"%f"` will parse all the way up to nanoseconds. + + >>> pd.to_datetime('2018-10-26 12:00:00.0000000011', + ... format='%Y-%m-%d %H:%M:%S.%f') + Timestamp('2018-10-26 12:00:00.000000001') + + **Non-convertible date/times** + + If a date does not meet the `timestamp limitations + `_, passing ``errors='ignore'`` + will return the original input instead of raising any exception. + + Passing ``errors='coerce'`` will force an out-of-bounds date to :const:`NaT`, + in addition to forcing non-dates (or non-parseable dates) to :const:`NaT`. + + >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore') + '13000101' + >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce') + NaT + + .. _to_datetime_tz_examples: + + **Timezones and time offsets** + + The default behaviour (``utc=False``) is as follows: + + - Timezone-naive inputs are converted to timezone-naive :class:`DatetimeIndex`: + + >>> pd.to_datetime(['2018-10-26 12:00:00', '2018-10-26 13:00:15']) + DatetimeIndex(['2018-10-26 12:00:00', '2018-10-26 13:00:15'], + dtype='datetime64[ns]', freq=None) + + - Timezone-aware inputs *with constant time offset* are converted to + timezone-aware :class:`DatetimeIndex`: + + >>> pd.to_datetime(['2018-10-26 12:00 -0500', '2018-10-26 13:00 -0500']) + DatetimeIndex(['2018-10-26 12:00:00-05:00', '2018-10-26 13:00:00-05:00'], + dtype='datetime64[ns, UTC-05:00]', freq=None) + + - However, timezone-aware inputs *with mixed time offsets* (for example + issued from a timezone with daylight savings, such as Europe/Paris) + are **not successfully converted** to a :class:`DatetimeIndex`. + Parsing datetimes with mixed time zones will show a warning unless + `utc=True`. If you specify `utc=False` the warning below will be shown + and a simple :class:`Index` containing :class:`datetime.datetime` + objects will be returned: + + >>> pd.to_datetime(['2020-10-25 02:00 +0200', + ... '2020-10-25 04:00 +0100']) # doctest: +SKIP + FutureWarning: In a future version of pandas, parsing datetimes with mixed + time zones will raise an error unless `utc=True`. Please specify `utc=True` + to opt in to the new behaviour and silence this warning. To create a `Series` + with mixed offsets and `object` dtype, please use `apply` and + `datetime.datetime.strptime`. + Index([2020-10-25 02:00:00+02:00, 2020-10-25 04:00:00+01:00], + dtype='object') + + - A mix of timezone-aware and timezone-naive inputs is also converted to + a simple :class:`Index` containing :class:`datetime.datetime` objects: + + >>> from datetime import datetime + >>> pd.to_datetime(["2020-01-01 01:00:00-01:00", + ... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP + FutureWarning: In a future version of pandas, parsing datetimes with mixed + time zones will raise an error unless `utc=True`. Please specify `utc=True` + to opt in to the new behaviour and silence this warning. To create a `Series` + with mixed offsets and `object` dtype, please use `apply` and + `datetime.datetime.strptime`. + Index([2020-01-01 01:00:00-01:00, 2020-01-01 03:00:00], dtype='object') + + | + + Setting ``utc=True`` solves most of the above issues: + + - Timezone-naive inputs are *localized* as UTC + + >>> pd.to_datetime(['2018-10-26 12:00', '2018-10-26 13:00'], utc=True) + DatetimeIndex(['2018-10-26 12:00:00+00:00', '2018-10-26 13:00:00+00:00'], + dtype='datetime64[ns, UTC]', freq=None) + + - Timezone-aware inputs are *converted* to UTC (the output represents the + exact same datetime, but viewed from the UTC time offset `+00:00`). + + >>> pd.to_datetime(['2018-10-26 12:00 -0530', '2018-10-26 12:00 -0500'], + ... utc=True) + DatetimeIndex(['2018-10-26 17:30:00+00:00', '2018-10-26 17:00:00+00:00'], + dtype='datetime64[ns, UTC]', freq=None) + + - Inputs can contain both string or datetime, the above + rules still apply + + >>> pd.to_datetime(['2018-10-26 12:00', datetime(2020, 1, 1, 18)], utc=True) + DatetimeIndex(['2018-10-26 12:00:00+00:00', '2020-01-01 18:00:00+00:00'], + dtype='datetime64[ns, UTC]', freq=None) + """ + if exact is not lib.no_default and format in {"mixed", "ISO8601"}: + raise ValueError("Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'") + if infer_datetime_format is not lib.no_default: + warnings.warn( + "The argument 'infer_datetime_format' is deprecated and will " + "be removed in a future version. " + "A strict version of it is now the default, see " + "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " + "You can safely remove this argument.", + stacklevel=find_stack_level(), + ) + if arg is None: + return None + + if origin != "unix": + arg = _adjust_to_origin(arg, origin, unit) + + convert_listlike = partial( + _convert_listlike_datetimes, + utc=utc, + unit=unit, + dayfirst=dayfirst, + yearfirst=yearfirst, + errors=errors, + exact=exact, + ) + # pylint: disable-next=used-before-assignment + result: Timestamp | NaTType | Series | Index + + if isinstance(arg, Timestamp): + result = arg + if utc: + if arg.tz is not None: + result = arg.tz_convert("utc") + else: + result = arg.tz_localize("utc") + elif isinstance(arg, ABCSeries): + cache_array = _maybe_cache(arg, format, cache, convert_listlike) + if not cache_array.empty: + result = arg.map(cache_array) + else: + values = convert_listlike(arg._values, format) + result = arg._constructor(values, index=arg.index, name=arg.name) + elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)): + result = _assemble_from_unit_mappings(arg, errors, utc) + elif isinstance(arg, Index): + cache_array = _maybe_cache(arg, format, cache, convert_listlike) + if not cache_array.empty: + result = _convert_and_box_cache(arg, cache_array, name=arg.name) + else: + result = convert_listlike(arg, format, name=arg.name) + elif is_list_like(arg): + try: + # error: Argument 1 to "_maybe_cache" has incompatible type + # "Union[float, str, datetime, List[Any], Tuple[Any, ...], ExtensionArray, + # ndarray[Any, Any], Series]"; expected "Union[List[Any], Tuple[Any, ...], + # Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series], Series]" + argc = cast( + Union[list, tuple, ExtensionArray, np.ndarray, "Series", Index], arg + ) + cache_array = _maybe_cache(argc, format, cache, convert_listlike) + except OutOfBoundsDatetime: + # caching attempts to create a DatetimeIndex, which may raise + # an OOB. If that's the desired behavior, then just reraise... + if errors == "raise": + raise + # ... otherwise, continue without the cache. + from pandas import Series + + cache_array = Series([], dtype=object) # just an empty array + if not cache_array.empty: + result = _convert_and_box_cache(argc, cache_array) + else: + result = convert_listlike(argc, format) + else: + result = convert_listlike(np.array([arg]), format)[0] + if isinstance(arg, bool) and isinstance(result, np.bool_): + result = bool(result) # TODO: avoid this kludge. + + # error: Incompatible return value type (got "Union[Timestamp, NaTType, + # Series, Index]", expected "Union[DatetimeIndex, Series, float, str, + # NaTType, None]") + return result # type: ignore[return-value] + + +# mappings for assembling units +_unit_map = { + "year": "year", + "years": "year", + "month": "month", + "months": "month", + "day": "day", + "days": "day", + "hour": "h", + "hours": "h", + "minute": "m", + "minutes": "m", + "second": "s", + "seconds": "s", + "ms": "ms", + "millisecond": "ms", + "milliseconds": "ms", + "us": "us", + "microsecond": "us", + "microseconds": "us", + "ns": "ns", + "nanosecond": "ns", + "nanoseconds": "ns", +} + + +def _assemble_from_unit_mappings(arg, errors: DateTimeErrorChoices, utc: bool): + """ + assemble the unit specified fields from the arg (DataFrame) + Return a Series for actual parsing + + Parameters + ---------- + arg : DataFrame + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + + - If :const:`'raise'`, then invalid parsing will raise an exception + - If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT` + - If :const:`'ignore'`, then invalid parsing will return the input + utc : bool + Whether to convert/localize timestamps to UTC. + + Returns + ------- + Series + """ + from pandas import ( + DataFrame, + to_numeric, + to_timedelta, + ) + + arg = DataFrame(arg) + if not arg.columns.is_unique: + raise ValueError("cannot assemble with duplicate keys") + + # replace passed unit with _unit_map + def f(value): + if value in _unit_map: + return _unit_map[value] + + # m is case significant + if value.lower() in _unit_map: + return _unit_map[value.lower()] + + return value + + unit = {k: f(k) for k in arg.keys()} + unit_rev = {v: k for k, v in unit.items()} + + # we require at least Ymd + required = ["year", "month", "day"] + req = sorted(set(required) - set(unit_rev.keys())) + if len(req): + _required = ",".join(req) + raise ValueError( + "to assemble mappings requires at least that " + f"[year, month, day] be specified: [{_required}] is missing" + ) + + # keys we don't recognize + excess = sorted(set(unit_rev.keys()) - set(_unit_map.values())) + if len(excess): + _excess = ",".join(excess) + raise ValueError( + f"extra keys have been passed to the datetime assemblage: [{_excess}]" + ) + + def coerce(values): + # we allow coercion to if errors allows + values = to_numeric(values, errors=errors) + + # prevent overflow in case of int8 or int16 + if is_integer_dtype(values): + values = values.astype("int64", copy=False) + return values + + values = ( + coerce(arg[unit_rev["year"]]) * 10000 + + coerce(arg[unit_rev["month"]]) * 100 + + coerce(arg[unit_rev["day"]]) + ) + try: + values = to_datetime(values, format="%Y%m%d", errors=errors, utc=utc) + except (TypeError, ValueError) as err: + raise ValueError(f"cannot assemble the datetimes: {err}") from err + + units: list[UnitChoices] = ["h", "m", "s", "ms", "us", "ns"] + for u in units: + value = unit_rev.get(u) + if value is not None and value in arg: + try: + values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) + except (TypeError, ValueError) as err: + raise ValueError( + f"cannot assemble the datetimes [{value}]: {err}" + ) from err + return values + + +def _attempt_YYYYMMDD(arg: npt.NDArray[np.object_], errors: str) -> np.ndarray | None: + """ + try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, + arg is a passed in as an object dtype, but could really be ints/strings + with nan-like/or floats (e.g. with nan) + + Parameters + ---------- + arg : np.ndarray[object] + errors : {'raise','ignore','coerce'} + """ + + def calc(carg): + # calculate the actual result + carg = carg.astype(object, copy=False) + parsed = parsing.try_parse_year_month_day( + carg / 10000, carg / 100 % 100, carg % 100 + ) + return tslib.array_to_datetime(parsed, errors=errors)[0] + + def calc_with_mask(carg, mask): + result = np.empty(carg.shape, dtype="M8[ns]") + iresult = result.view("i8") + iresult[~mask] = iNaT + + masked_result = calc(carg[mask].astype(np.float64).astype(np.int64)) + result[mask] = masked_result.astype("M8[ns]") + return result + + # try intlike / strings that are ints + try: + return calc(arg.astype(np.int64)) + except (ValueError, OverflowError, TypeError): + pass + + # a float with actual np.nan + try: + carg = arg.astype(np.float64) + return calc_with_mask(carg, notna(carg)) + except (ValueError, OverflowError, TypeError): + pass + + # string with NaN-like + try: + mask = ~algorithms.isin(arg, list(nat_strings)) + return calc_with_mask(arg, mask) + except (ValueError, OverflowError, TypeError): + pass + + return None + + +__all__ = [ + "DateParseError", + "should_cache", + "to_datetime", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/numeric.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/numeric.py new file mode 100644 index 00000000..f1b14cdc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/numeric.py @@ -0,0 +1,319 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, +) + +import numpy as np + +from pandas._libs import lib +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.cast import maybe_downcast_numeric +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_decimal, + is_integer_dtype, + is_number, + is_numeric_dtype, + is_scalar, + is_string_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ArrowDtype +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +from pandas.core.arrays import BaseMaskedArray +from pandas.core.arrays.string_ import StringDtype + +if TYPE_CHECKING: + from pandas._typing import ( + DateTimeErrorChoices, + DtypeBackend, + npt, + ) + + +def to_numeric( + arg, + errors: DateTimeErrorChoices = "raise", + downcast: Literal["integer", "signed", "unsigned", "float"] | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +): + """ + Convert argument to a numeric type. + + The default return dtype is `float64` or `int64` + depending on the data supplied. Use the `downcast` parameter + to obtain other dtypes. + + Please note that precision loss may occur if really large numbers + are passed in. Due to the internal limitations of `ndarray`, if + numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) + or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are + passed in, it is very likely they will be converted to float so that + they can be stored in an `ndarray`. These warnings apply similarly to + `Series` since it internally leverages `ndarray`. + + Parameters + ---------- + arg : scalar, list, tuple, 1-d array, or Series + Argument to be converted. + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + - If 'raise', then invalid parsing will raise an exception. + - If 'coerce', then invalid parsing will be set as NaN. + - If 'ignore', then invalid parsing will return the input. + downcast : str, default None + Can be 'integer', 'signed', 'unsigned', or 'float'. + If not None, and if the data has been successfully cast to a + numerical dtype (or if the data was numeric to begin with), + downcast that resulting data to the smallest numerical dtype + possible according to the following rules: + + - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) + - 'unsigned': smallest unsigned int dtype (min.: np.uint8) + - 'float': smallest float dtype (min.: np.float32) + + As this behaviour is separate from the core conversion to + numeric values, any errors raised during the downcasting + will be surfaced regardless of the value of the 'errors' input. + + In addition, downcasting will only occur if the size + of the resulting data's dtype is strictly larger than + the dtype it is to be cast to, so if none of the dtypes + checked satisfy that specification, no downcasting will be + performed on the data. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + ret + Numeric if parsing succeeded. + Return type depends on input. Series if Series, otherwise ndarray. + + See Also + -------- + DataFrame.astype : Cast argument to a specified dtype. + to_datetime : Convert argument to datetime. + to_timedelta : Convert argument to timedelta. + numpy.ndarray.astype : Cast a numpy array to a specified type. + DataFrame.convert_dtypes : Convert dtypes. + + Examples + -------- + Take separate series and convert to numeric, coercing when told to + + >>> s = pd.Series(['1.0', '2', -3]) + >>> pd.to_numeric(s) + 0 1.0 + 1 2.0 + 2 -3.0 + dtype: float64 + >>> pd.to_numeric(s, downcast='float') + 0 1.0 + 1 2.0 + 2 -3.0 + dtype: float32 + >>> pd.to_numeric(s, downcast='signed') + 0 1 + 1 2 + 2 -3 + dtype: int8 + >>> s = pd.Series(['apple', '1.0', '2', -3]) + >>> pd.to_numeric(s, errors='ignore') + 0 apple + 1 1.0 + 2 2 + 3 -3 + dtype: object + >>> pd.to_numeric(s, errors='coerce') + 0 NaN + 1 1.0 + 2 2.0 + 3 -3.0 + dtype: float64 + + Downcasting of nullable integer and floating dtypes is supported: + + >>> s = pd.Series([1, 2, 3], dtype="Int64") + >>> pd.to_numeric(s, downcast="integer") + 0 1 + 1 2 + 2 3 + dtype: Int8 + >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64") + >>> pd.to_numeric(s, downcast="float") + 0 1.0 + 1 2.1 + 2 3.0 + dtype: Float32 + """ + if downcast not in (None, "integer", "signed", "unsigned", "float"): + raise ValueError("invalid downcasting method provided") + + if errors not in ("ignore", "raise", "coerce"): + raise ValueError("invalid error value specified") + + check_dtype_backend(dtype_backend) + + is_series = False + is_index = False + is_scalars = False + + if isinstance(arg, ABCSeries): + is_series = True + values = arg.values + elif isinstance(arg, ABCIndex): + is_index = True + if needs_i8_conversion(arg.dtype): + values = arg.view("i8") + else: + values = arg.values + elif isinstance(arg, (list, tuple)): + values = np.array(arg, dtype="O") + elif is_scalar(arg): + if is_decimal(arg): + return float(arg) + if is_number(arg): + return arg + is_scalars = True + values = np.array([arg], dtype="O") + elif getattr(arg, "ndim", 1) > 1: + raise TypeError("arg must be a list, tuple, 1-d array, or Series") + else: + values = arg + + orig_values = values + + # GH33013: for IntegerArray & FloatingArray extract non-null values for casting + # save mask to reconstruct the full array after casting + mask: npt.NDArray[np.bool_] | None = None + if isinstance(values, BaseMaskedArray): + mask = values._mask + values = values._data[~mask] + + values_dtype = getattr(values, "dtype", None) + if isinstance(values_dtype, ArrowDtype): + mask = values.isna() + values = values.dropna().to_numpy() + new_mask: np.ndarray | None = None + if is_numeric_dtype(values_dtype): + pass + elif lib.is_np_dtype(values_dtype, "mM"): + values = values.view(np.int64) + else: + values = ensure_object(values) + coerce_numeric = errors not in ("ignore", "raise") + try: + values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] # noqa: E501 + values, + set(), + coerce_numeric=coerce_numeric, + convert_to_masked_nullable=dtype_backend is not lib.no_default + or isinstance(values_dtype, StringDtype) + and not values_dtype.storage == "pyarrow_numpy", + ) + except (ValueError, TypeError): + if errors == "raise": + raise + values = orig_values + + if new_mask is not None: + # Remove unnecessary values, is expected later anyway and enables + # downcasting + values = values[~new_mask] + elif ( + dtype_backend is not lib.no_default + and new_mask is None + or isinstance(values_dtype, StringDtype) + and not values_dtype.storage == "pyarrow_numpy" + ): + new_mask = np.zeros(values.shape, dtype=np.bool_) + + # attempt downcast only if the data has been successfully converted + # to a numerical dtype and if a downcast method has been specified + if downcast is not None and is_numeric_dtype(values.dtype): + typecodes: str | None = None + + if downcast in ("integer", "signed"): + typecodes = np.typecodes["Integer"] + elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0): + typecodes = np.typecodes["UnsignedInteger"] + elif downcast == "float": + typecodes = np.typecodes["Float"] + + # pandas support goes only to np.float32, + # as float dtypes smaller than that are + # extremely rare and not well supported + float_32_char = np.dtype(np.float32).char + float_32_ind = typecodes.index(float_32_char) + typecodes = typecodes[float_32_ind:] + + if typecodes is not None: + # from smallest to largest + for typecode in typecodes: + dtype = np.dtype(typecode) + if dtype.itemsize <= values.dtype.itemsize: + values = maybe_downcast_numeric(values, dtype) + + # successful conversion + if values.dtype == dtype: + break + + # GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct + # masked array + if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype): + if mask is None or (new_mask is not None and new_mask.shape == mask.shape): + # GH 52588 + mask = new_mask + else: + mask = mask.copy() + assert isinstance(mask, np.ndarray) + data = np.zeros(mask.shape, dtype=values.dtype) + data[~mask] = values + + from pandas.core.arrays import ( + ArrowExtensionArray, + BooleanArray, + FloatingArray, + IntegerArray, + ) + + klass: type[IntegerArray] | type[BooleanArray] | type[FloatingArray] + if is_integer_dtype(data.dtype): + klass = IntegerArray + elif is_bool_dtype(data.dtype): + klass = BooleanArray + else: + klass = FloatingArray + values = klass(data, mask) + + if dtype_backend == "pyarrow" or isinstance(values_dtype, ArrowDtype): + values = ArrowExtensionArray(values.__arrow_array__()) + + if is_series: + return arg._constructor(values, index=arg.index, name=arg.name) + elif is_index: + # because we want to coerce to numeric if possible, + # do not use _shallow_copy + from pandas import Index + + return Index(values, name=arg.name) + elif is_scalars: + return values[0] + else: + return values diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/timedeltas.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/timedeltas.py new file mode 100644 index 00000000..3f2f832c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/timedeltas.py @@ -0,0 +1,283 @@ +""" +timedelta support tools +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + NaT, + NaTType, +) +from pandas._libs.tslibs.timedeltas import ( + Timedelta, + parse_timedelta_unit, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.dtypes import ArrowDtype +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +from pandas.core.arrays.timedeltas import sequence_to_td64ns + +if TYPE_CHECKING: + from collections.abc import Hashable + from datetime import timedelta + + from pandas._libs.tslibs.timedeltas import UnitChoices + from pandas._typing import ( + ArrayLike, + DateTimeErrorChoices, + ) + + from pandas import ( + Index, + Series, + TimedeltaIndex, + ) + + +@overload +def to_timedelta( + arg: str | float | timedelta, + unit: UnitChoices | None = ..., + errors: DateTimeErrorChoices = ..., +) -> Timedelta: + ... + + +@overload +def to_timedelta( + arg: Series, + unit: UnitChoices | None = ..., + errors: DateTimeErrorChoices = ..., +) -> Series: + ... + + +@overload +def to_timedelta( + arg: list | tuple | range | ArrayLike | Index, + unit: UnitChoices | None = ..., + errors: DateTimeErrorChoices = ..., +) -> TimedeltaIndex: + ... + + +def to_timedelta( + arg: str + | int + | float + | timedelta + | list + | tuple + | range + | ArrayLike + | Index + | Series, + unit: UnitChoices | None = None, + errors: DateTimeErrorChoices = "raise", +) -> Timedelta | TimedeltaIndex | Series: + """ + Convert argument to timedelta. + + Timedeltas are absolute differences in times, expressed in difference + units (e.g. days, hours, minutes, seconds). This method converts + an argument from a recognized timedelta format / value into + a Timedelta type. + + Parameters + ---------- + arg : str, timedelta, list-like or Series + The data to be converted to timedelta. + + .. versionchanged:: 2.0 + Strings with units 'M', 'Y' and 'y' do not represent + unambiguous timedelta values and will raise an exception. + + unit : str, optional + Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``. + + Possible values: + + * 'W' + * 'D' / 'days' / 'day' + * 'hours' / 'hour' / 'hr' / 'h' + * 'm' / 'minute' / 'min' / 'minutes' / 'T' + * 'S' / 'seconds' / 'sec' / 'second' + * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L' + * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U' + * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N' + + Must not be specified when `arg` context strings and ``errors="raise"``. + + .. deprecated:: 2.1.0 + Units 'T' and 'L' are deprecated and will be removed in a future version. + + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + - If 'raise', then invalid parsing will raise an exception. + - If 'coerce', then invalid parsing will be set as NaT. + - If 'ignore', then invalid parsing will return the input. + + Returns + ------- + timedelta + If parsing succeeded. + Return type depends on input: + + - list-like: TimedeltaIndex of timedelta64 dtype + - Series: Series of timedelta64 dtype + - scalar: Timedelta + + See Also + -------- + DataFrame.astype : Cast argument to a specified dtype. + to_datetime : Convert argument to datetime. + convert_dtypes : Convert dtypes. + + Notes + ----- + If the precision is higher than nanoseconds, the precision of the duration is + truncated to nanoseconds for string inputs. + + Examples + -------- + Parsing a single string to a Timedelta: + + >>> pd.to_timedelta('1 days 06:05:01.00003') + Timedelta('1 days 06:05:01.000030') + >>> pd.to_timedelta('15.5us') + Timedelta('0 days 00:00:00.000015500') + + Parsing a list or array of strings: + + >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) + TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT], + dtype='timedelta64[ns]', freq=None) + + Converting numbers by specifying the `unit` keyword argument: + + >>> pd.to_timedelta(np.arange(5), unit='s') + TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02', + '0 days 00:00:03', '0 days 00:00:04'], + dtype='timedelta64[ns]', freq=None) + >>> pd.to_timedelta(np.arange(5), unit='d') + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq=None) + """ + if unit in {"T", "t", "L", "l"}: + warnings.warn( + f"Unit '{unit}' is deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if unit is not None: + unit = parse_timedelta_unit(unit) + + if errors not in ("ignore", "raise", "coerce"): + raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'.") + + if unit in {"Y", "y", "M"}: + raise ValueError( + "Units 'M', 'Y', and 'y' are no longer supported, as they do not " + "represent unambiguous timedelta values durations." + ) + + if arg is None: + return arg + elif isinstance(arg, ABCSeries): + values = _convert_listlike(arg._values, unit=unit, errors=errors) + return arg._constructor(values, index=arg.index, name=arg.name) + elif isinstance(arg, ABCIndex): + return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name) + elif isinstance(arg, np.ndarray) and arg.ndim == 0: + # extract array scalar and process below + # error: Incompatible types in assignment (expression has type "object", + # variable has type "Union[str, int, float, timedelta, List[Any], + # Tuple[Any, ...], Union[Union[ExtensionArray, ndarray[Any, Any]], Index, + # Series]]") [assignment] + arg = lib.item_from_zerodim(arg) # type: ignore[assignment] + elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1: + return _convert_listlike(arg, unit=unit, errors=errors) + elif getattr(arg, "ndim", 1) > 1: + raise TypeError( + "arg must be a string, timedelta, list, tuple, 1-d array, or Series" + ) + + if isinstance(arg, str) and unit is not None: + raise ValueError("unit must not be specified if the input is/contains a str") + + # ...so it must be a scalar value. Return scalar. + return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors) + + +def _coerce_scalar_to_timedelta_type( + r, unit: UnitChoices | None = "ns", errors: DateTimeErrorChoices = "raise" +): + """Convert string 'r' to a timedelta object.""" + result: Timedelta | NaTType + + try: + result = Timedelta(r, unit) + except ValueError: + if errors == "raise": + raise + if errors == "ignore": + return r + + # coerce + result = NaT + + return result + + +def _convert_listlike( + arg, + unit: UnitChoices | None = None, + errors: DateTimeErrorChoices = "raise", + name: Hashable | None = None, +): + """Convert a list of objects to a timedelta index object.""" + arg_dtype = getattr(arg, "dtype", None) + if isinstance(arg, (list, tuple)) or arg_dtype is None: + # This is needed only to ensure that in the case where we end up + # returning arg (errors == "ignore"), and where the input is a + # generator, we return a useful list-like instead of a + # used-up generator + if not hasattr(arg, "__array__"): + arg = list(arg) + arg = np.array(arg, dtype=object) + elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.kind == "m": + return arg + + try: + td64arr = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0] + except ValueError: + if errors == "ignore": + return arg + else: + # This else-block accounts for the cases when errors='raise' + # and errors='coerce'. If errors == 'raise', these errors + # should be raised. If errors == 'coerce', we shouldn't + # expect any errors to be raised, since all parsing errors + # cause coercion to pd.NaT. However, if an error / bug is + # introduced that causes an Exception to be raised, we would + # like to surface it. + raise + + from pandas import TimedeltaIndex + + value = TimedeltaIndex(td64arr, unit="ns", name=name) + return value diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/times.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/times.py new file mode 100644 index 00000000..1b3a3ae1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/tools/times.py @@ -0,0 +1,157 @@ +from __future__ import annotations + +from datetime import ( + datetime, + time, +) +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.lib import is_list_like + +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import notna + +if TYPE_CHECKING: + from pandas._typing import DateTimeErrorChoices + + +def to_time( + arg, + format: str | None = None, + infer_time_format: bool = False, + errors: DateTimeErrorChoices = "raise", +): + """ + Parse time strings to time objects using fixed strptime formats ("%H:%M", + "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", + "%I%M%S%p") + + Use infer_time_format if all the strings are in the same format to speed + up conversion. + + Parameters + ---------- + arg : string in time format, datetime.time, list, tuple, 1-d array, Series + format : str, default None + Format used to convert arg into a time object. If None, fixed formats + are used. + infer_time_format: bool, default False + Infer the time format based on the first non-NaN element. If all + strings are in the same format, this will speed up conversion. + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + - If 'raise', then invalid parsing will raise an exception + - If 'coerce', then invalid parsing will be set as None + - If 'ignore', then invalid parsing will return the input + + Returns + ------- + datetime.time + """ + + def _convert_listlike(arg, format): + if isinstance(arg, (list, tuple)): + arg = np.array(arg, dtype="O") + + elif getattr(arg, "ndim", 1) > 1: + raise TypeError( + "arg must be a string, datetime, list, tuple, 1-d array, or Series" + ) + + arg = np.asarray(arg, dtype="O") + + if infer_time_format and format is None: + format = _guess_time_format_for_array(arg) + + times: list[time | None] = [] + if format is not None: + for element in arg: + try: + times.append(datetime.strptime(element, format).time()) + except (ValueError, TypeError) as err: + if errors == "raise": + msg = ( + f"Cannot convert {element} to a time with given " + f"format {format}" + ) + raise ValueError(msg) from err + if errors == "ignore": + return arg + else: + times.append(None) + else: + formats = _time_formats[:] + format_found = False + for element in arg: + time_object = None + try: + time_object = time.fromisoformat(element) + except (ValueError, TypeError): + for time_format in formats: + try: + time_object = datetime.strptime(element, time_format).time() + if not format_found: + # Put the found format in front + fmt = formats.pop(formats.index(time_format)) + formats.insert(0, fmt) + format_found = True + break + except (ValueError, TypeError): + continue + + if time_object is not None: + times.append(time_object) + elif errors == "raise": + raise ValueError(f"Cannot convert arg {arg} to a time") + elif errors == "ignore": + return arg + else: + times.append(None) + + return times + + if arg is None: + return arg + elif isinstance(arg, time): + return arg + elif isinstance(arg, ABCSeries): + values = _convert_listlike(arg._values, format) + return arg._constructor(values, index=arg.index, name=arg.name) + elif isinstance(arg, ABCIndex): + return _convert_listlike(arg, format) + elif is_list_like(arg): + return _convert_listlike(arg, format) + + return _convert_listlike(np.array([arg]), format)[0] + + +# Fixed time formats for time parsing +_time_formats = [ + "%H:%M", + "%H%M", + "%I:%M%p", + "%I%M%p", + "%H:%M:%S", + "%H%M%S", + "%I:%M:%S%p", + "%I%M%S%p", +] + + +def _guess_time_format_for_array(arr): + # Try to guess the format based on the first non-NaN element + non_nan_elements = notna(arr).nonzero()[0] + if len(non_nan_elements): + element = arr[non_nan_elements[0]] + for time_format in _time_formats: + try: + datetime.strptime(element, time_format) + return time_format + except ValueError: + pass + + return None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/hashing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/hashing.py new file mode 100644 index 00000000..4933de32 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/hashing.py @@ -0,0 +1,339 @@ +""" +data hash pandas / numpy objects +""" +from __future__ import annotations + +import itertools +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.hashing import hash_object_array + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCExtensionArray, + ABCIndex, + ABCMultiIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Iterator, + ) + + from pandas._typing import ( + ArrayLike, + npt, + ) + + from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + ) + + +# 16 byte long hashing key +_default_hash_key = "0123456789123456" + + +def combine_hash_arrays( + arrays: Iterator[np.ndarray], num_items: int +) -> npt.NDArray[np.uint64]: + """ + Parameters + ---------- + arrays : Iterator[np.ndarray] + num_items : int + + Returns + ------- + np.ndarray[uint64] + + Should be the same as CPython's tupleobject.c + """ + try: + first = next(arrays) + except StopIteration: + return np.array([], dtype=np.uint64) + + arrays = itertools.chain([first], arrays) + + mult = np.uint64(1000003) + out = np.zeros_like(first) + np.uint64(0x345678) + last_i = 0 + for i, a in enumerate(arrays): + inverse_i = num_items - i + out ^= a + out *= mult + mult += np.uint64(82520 + inverse_i + inverse_i) + last_i = i + assert last_i + 1 == num_items, "Fed in wrong num_items" + out += np.uint64(97531) + return out + + +def hash_pandas_object( + obj: Index | DataFrame | Series, + index: bool = True, + encoding: str = "utf8", + hash_key: str | None = _default_hash_key, + categorize: bool = True, +) -> Series: + """ + Return a data hash of the Index/Series/DataFrame. + + Parameters + ---------- + obj : Index, Series, or DataFrame + index : bool, default True + Include the index in the hash (if Series/DataFrame). + encoding : str, default 'utf8' + Encoding for data & key when strings. + hash_key : str, default _default_hash_key + Hash_key for string key to encode. + categorize : bool, default True + Whether to first categorize object arrays before hashing. This is more + efficient when the array contains duplicate values. + + Returns + ------- + Series of uint64, same length as the object + + Examples + -------- + >>> pd.util.hash_pandas_object(pd.Series([1, 2, 3])) + 0 14639053686158035780 + 1 3869563279212530728 + 2 393322362522515241 + dtype: uint64 + """ + from pandas import Series + + if hash_key is None: + hash_key = _default_hash_key + + if isinstance(obj, ABCMultiIndex): + return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False) + + elif isinstance(obj, ABCIndex): + h = hash_array(obj._values, encoding, hash_key, categorize).astype( + "uint64", copy=False + ) + ser = Series(h, index=obj, dtype="uint64", copy=False) + + elif isinstance(obj, ABCSeries): + h = hash_array(obj._values, encoding, hash_key, categorize).astype( + "uint64", copy=False + ) + if index: + index_iter = ( + hash_pandas_object( + obj.index, + index=False, + encoding=encoding, + hash_key=hash_key, + categorize=categorize, + )._values + for _ in [None] + ) + arrays = itertools.chain([h], index_iter) + h = combine_hash_arrays(arrays, 2) + + ser = Series(h, index=obj.index, dtype="uint64", copy=False) + + elif isinstance(obj, ABCDataFrame): + hashes = ( + hash_array(series._values, encoding, hash_key, categorize) + for _, series in obj.items() + ) + num_items = len(obj.columns) + if index: + index_hash_generator = ( + hash_pandas_object( + obj.index, + index=False, + encoding=encoding, + hash_key=hash_key, + categorize=categorize, + )._values + for _ in [None] + ) + num_items += 1 + + # keep `hashes` specifically a generator to keep mypy happy + _hashes = itertools.chain(hashes, index_hash_generator) + hashes = (x for x in _hashes) + h = combine_hash_arrays(hashes, num_items) + + ser = Series(h, index=obj.index, dtype="uint64", copy=False) + else: + raise TypeError(f"Unexpected type for hashing {type(obj)}") + + return ser + + +def hash_tuples( + vals: MultiIndex | Iterable[tuple[Hashable, ...]], + encoding: str = "utf8", + hash_key: str = _default_hash_key, +) -> npt.NDArray[np.uint64]: + """ + Hash an MultiIndex / listlike-of-tuples efficiently. + + Parameters + ---------- + vals : MultiIndex or listlike-of-tuples + encoding : str, default 'utf8' + hash_key : str, default _default_hash_key + + Returns + ------- + ndarray[np.uint64] of hashed values + """ + if not is_list_like(vals): + raise TypeError("must be convertible to a list-of-tuples") + + from pandas import ( + Categorical, + MultiIndex, + ) + + if not isinstance(vals, ABCMultiIndex): + mi = MultiIndex.from_tuples(vals) + else: + mi = vals + + # create a list-of-Categoricals + cat_vals = [ + Categorical._simple_new( + mi.codes[level], + CategoricalDtype(categories=mi.levels[level], ordered=False), + ) + for level in range(mi.nlevels) + ] + + # hash the list-of-ndarrays + hashes = ( + cat._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=False) + for cat in cat_vals + ) + h = combine_hash_arrays(hashes, len(cat_vals)) + + return h + + +def hash_array( + vals: ArrayLike, + encoding: str = "utf8", + hash_key: str = _default_hash_key, + categorize: bool = True, +) -> npt.NDArray[np.uint64]: + """ + Given a 1d array, return an array of deterministic integers. + + Parameters + ---------- + vals : ndarray or ExtensionArray + encoding : str, default 'utf8' + Encoding for data & key when strings. + hash_key : str, default _default_hash_key + Hash_key for string key to encode. + categorize : bool, default True + Whether to first categorize object arrays before hashing. This is more + efficient when the array contains duplicate values. + + Returns + ------- + ndarray[np.uint64, ndim=1] + Hashed values, same length as the vals. + + Examples + -------- + >>> pd.util.hash_array(np.array([1, 2, 3])) + array([ 6238072747940578789, 15839785061582574730, 2185194620014831856], + dtype=uint64) + """ + if not hasattr(vals, "dtype"): + raise TypeError("must pass a ndarray-like") + + if isinstance(vals, ABCExtensionArray): + return vals._hash_pandas_object( + encoding=encoding, hash_key=hash_key, categorize=categorize + ) + + if not isinstance(vals, np.ndarray): + # GH#42003 + raise TypeError( + "hash_array requires np.ndarray or ExtensionArray, not " + f"{type(vals).__name__}. Use hash_pandas_object instead." + ) + + return _hash_ndarray(vals, encoding, hash_key, categorize) + + +def _hash_ndarray( + vals: np.ndarray, + encoding: str = "utf8", + hash_key: str = _default_hash_key, + categorize: bool = True, +) -> npt.NDArray[np.uint64]: + """ + See hash_array.__doc__. + """ + dtype = vals.dtype + + # _hash_ndarray only takes 64-bit values, so handle 128-bit by parts + if np.issubdtype(dtype, np.complex128): + hash_real = _hash_ndarray(vals.real, encoding, hash_key, categorize) + hash_imag = _hash_ndarray(vals.imag, encoding, hash_key, categorize) + return hash_real + 23 * hash_imag + + # First, turn whatever array this is into unsigned 64-bit ints, if we can + # manage it. + if dtype == bool: + vals = vals.astype("u8") + elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): + vals = vals.view("i8").astype("u8", copy=False) + elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: + vals = vals.view(f"u{vals.dtype.itemsize}").astype("u8") + else: + # With repeated values, its MUCH faster to categorize object dtypes, + # then hash and rename categories. We allow skipping the categorization + # when the values are known/likely to be unique. + if categorize: + from pandas import ( + Categorical, + Index, + factorize, + ) + + codes, categories = factorize(vals, sort=False) + dtype = CategoricalDtype(categories=Index(categories), ordered=False) + cat = Categorical._simple_new(codes, dtype) + return cat._hash_pandas_object( + encoding=encoding, hash_key=hash_key, categorize=False + ) + + try: + vals = hash_object_array(vals, hash_key, encoding) + except TypeError: + # we have mixed types + vals = hash_object_array( + vals.astype(str).astype(object), hash_key, encoding + ) + + # Then, redistribute these 64-bit ints within the space of 64-bit ints + vals ^= vals >> 30 + vals *= np.uint64(0xBF58476D1CE4E5B9) + vals ^= vals >> 27 + vals *= np.uint64(0x94D049BB133111EB) + vals ^= vals >> 31 + return vals diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/numba_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/numba_.py new file mode 100644 index 00000000..b8d48917 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/util/numba_.py @@ -0,0 +1,89 @@ +"""Common utilities for Numba operations""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Callable, +) + +from pandas.compat._optional import import_optional_dependency +from pandas.errors import NumbaUtilError + +GLOBAL_USE_NUMBA: bool = False + + +def maybe_use_numba(engine: str | None) -> bool: + """Signal whether to use numba routines.""" + return engine == "numba" or (engine is None and GLOBAL_USE_NUMBA) + + +def set_use_numba(enable: bool = False) -> None: + global GLOBAL_USE_NUMBA + if enable: + import_optional_dependency("numba") + GLOBAL_USE_NUMBA = enable + + +def get_jit_arguments( + engine_kwargs: dict[str, bool] | None = None, kwargs: dict | None = None +) -> dict[str, bool]: + """ + Return arguments to pass to numba.JIT, falling back on pandas default JIT settings. + + Parameters + ---------- + engine_kwargs : dict, default None + user passed keyword arguments for numba.JIT + kwargs : dict, default None + user passed keyword arguments to pass into the JITed function + + Returns + ------- + dict[str, bool] + nopython, nogil, parallel + + Raises + ------ + NumbaUtilError + """ + if engine_kwargs is None: + engine_kwargs = {} + + nopython = engine_kwargs.get("nopython", True) + if kwargs and nopython: + raise NumbaUtilError( + "numba does not support kwargs with nopython=True: " + "https://github.com/numba/numba/issues/2916" + ) + nogil = engine_kwargs.get("nogil", False) + parallel = engine_kwargs.get("parallel", False) + return {"nopython": nopython, "nogil": nogil, "parallel": parallel} + + +def jit_user_function(func: Callable) -> Callable: + """ + If user function is not jitted already, mark the user's function + as jitable. + + Parameters + ---------- + func : function + user defined function + + Returns + ------- + function + Numba JITed function, or function marked as JITable by numba + """ + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + if numba.extending.is_jitted(func): + # Don't jit a user passed jitted function + numba_func = func + else: + numba_func = numba.extending.register_jitable(func) + + return numba_func diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/__init__.py new file mode 100644 index 00000000..857e12e5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/__init__.py @@ -0,0 +1,23 @@ +from pandas.core.window.ewm import ( + ExponentialMovingWindow, + ExponentialMovingWindowGroupby, +) +from pandas.core.window.expanding import ( + Expanding, + ExpandingGroupby, +) +from pandas.core.window.rolling import ( + Rolling, + RollingGroupby, + Window, +) + +__all__ = [ + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "Rolling", + "RollingGroupby", + "Window", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/common.py new file mode 100644 index 00000000..fc8eddca --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/common.py @@ -0,0 +1,169 @@ +"""Common utility functions for rolling operations""" +from __future__ import annotations + +from collections import defaultdict +from typing import cast + +import numpy as np + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core.indexes.api import MultiIndex + + +def flex_binary_moment(arg1, arg2, f, pairwise: bool = False): + if isinstance(arg1, ABCSeries) and isinstance(arg2, ABCSeries): + X, Y = prep_binary(arg1, arg2) + return f(X, Y) + + elif isinstance(arg1, ABCDataFrame): + from pandas import DataFrame + + def dataframe_from_int_dict(data, frame_template) -> DataFrame: + result = DataFrame(data, index=frame_template.index) + if len(result.columns) > 0: + result.columns = frame_template.columns[result.columns] + else: + result.columns = frame_template.columns.copy() + return result + + results = {} + if isinstance(arg2, ABCDataFrame): + if pairwise is False: + if arg1 is arg2: + # special case in order to handle duplicate column names + for i in range(len(arg1.columns)): + results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) + return dataframe_from_int_dict(results, arg1) + else: + if not arg1.columns.is_unique: + raise ValueError("'arg1' columns are not unique") + if not arg2.columns.is_unique: + raise ValueError("'arg2' columns are not unique") + X, Y = arg1.align(arg2, join="outer") + X, Y = prep_binary(X, Y) + res_columns = arg1.columns.union(arg2.columns) + for col in res_columns: + if col in X and col in Y: + results[col] = f(X[col], Y[col]) + return DataFrame(results, index=X.index, columns=res_columns) + elif pairwise is True: + results = defaultdict(dict) + for i in range(len(arg1.columns)): + for j in range(len(arg2.columns)): + if j < i and arg2 is arg1: + # Symmetric case + results[i][j] = results[j][i] + else: + results[i][j] = f( + *prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) + ) + + from pandas import concat + + result_index = arg1.index.union(arg2.index) + if len(result_index): + # construct result frame + result = concat( + [ + concat( + [results[i][j] for j in range(len(arg2.columns))], + ignore_index=True, + ) + for i in range(len(arg1.columns)) + ], + ignore_index=True, + axis=1, + ) + result.columns = arg1.columns + + # set the index and reorder + if arg2.columns.nlevels > 1: + # mypy needs to know columns is a MultiIndex, Index doesn't + # have levels attribute + arg2.columns = cast(MultiIndex, arg2.columns) + # GH 21157: Equivalent to MultiIndex.from_product( + # [result_index], , + # ) + # A normal MultiIndex.from_product will produce too many + # combinations. + result_level = np.tile( + result_index, len(result) // len(result_index) + ) + arg2_levels = ( + np.repeat( + arg2.columns.get_level_values(i), + len(result) // len(arg2.columns), + ) + for i in range(arg2.columns.nlevels) + ) + result_names = list(arg2.columns.names) + [result_index.name] + result.index = MultiIndex.from_arrays( + [*arg2_levels, result_level], names=result_names + ) + # GH 34440 + num_levels = len(result.index.levels) + new_order = [num_levels - 1] + list(range(num_levels - 1)) + result = result.reorder_levels(new_order).sort_index() + else: + result.index = MultiIndex.from_product( + [range(len(arg2.columns)), range(len(result_index))] + ) + result = result.swaplevel(1, 0).sort_index() + result.index = MultiIndex.from_product( + [result_index] + [arg2.columns] + ) + else: + # empty result + result = DataFrame( + index=MultiIndex( + levels=[arg1.index, arg2.columns], codes=[[], []] + ), + columns=arg2.columns, + dtype="float64", + ) + + # reset our index names to arg1 names + # reset our column names to arg2 names + # careful not to mutate the original names + result.columns = result.columns.set_names(arg1.columns.names) + result.index = result.index.set_names( + result_index.names + arg2.columns.names + ) + + return result + else: + results = { + i: f(*prep_binary(arg1.iloc[:, i], arg2)) + for i in range(len(arg1.columns)) + } + return dataframe_from_int_dict(results, arg1) + + else: + return flex_binary_moment(arg2, arg1, f) + + +def zsqrt(x): + with np.errstate(all="ignore"): + result = np.sqrt(x) + mask = x < 0 + + if isinstance(x, ABCDataFrame): + if mask._values.any(): + result[mask] = 0 + else: + if mask.any(): + result[mask] = 0 + + return result + + +def prep_binary(arg1, arg2): + # mask out values, this also makes a common index... + X = arg1 + 0 * arg2 + Y = arg2 + 0 * arg1 + + return X, Y diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/doc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/doc.py new file mode 100644 index 00000000..2a5cbc04 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/doc.py @@ -0,0 +1,116 @@ +"""Any shareable docstring components for rolling/expanding/ewm""" +from __future__ import annotations + +from textwrap import dedent + +from pandas.core.shared_docs import _shared_docs + +_shared_docs = dict(**_shared_docs) + + +def create_section_header(header: str) -> str: + """Create numpydoc section header""" + return f"{header}\n{'-' * len(header)}\n" + + +template_header = "\nCalculate the {window_method} {aggregation_description}.\n\n" + +template_returns = dedent( + """ + Series or DataFrame + Return type is the same as the original object with ``np.float64`` dtype.\n + """ +).replace("\n", "", 1) + +template_see_also = dedent( + """ + pandas.Series.{window_method} : Calling {window_method} with Series data. + pandas.DataFrame.{window_method} : Calling {window_method} with DataFrames. + pandas.Series.{agg_method} : Aggregating {agg_method} for Series. + pandas.DataFrame.{agg_method} : Aggregating {agg_method} for DataFrame.\n + """ +).replace("\n", "", 1) + +kwargs_numeric_only = dedent( + """ + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionadded:: 1.5.0\n + """ +).replace("\n", "", 1) + +kwargs_scipy = dedent( + """ + **kwargs + Keyword arguments to configure the ``SciPy`` weighted window type.\n + """ +).replace("\n", "", 1) + +window_apply_parameters = dedent( + """ + func : function + Must produce a single value from an ndarray input if ``raw=True`` + or a single value from a Series if ``raw=False``. Can also accept a + Numba JIT function with ``engine='numba'`` specified. + + raw : bool, default False + * ``False`` : passes each row or column as a Series to the + function. + * ``True`` : the passed function will receive ndarray + objects instead. + If you are just applying a NumPy reduction function this will + achieve much better performance. + + engine : str, default None + * ``'cython'`` : Runs rolling apply through C-extensions from cython. + * ``'numba'`` : Runs rolling apply through JIT compiled code from numba. + Only available when ``raw`` is set to ``True``. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to both the ``func`` and the ``apply`` rolling aggregation. + + args : tuple, default None + Positional arguments to be passed into func. + + kwargs : dict, default None + Keyword arguments to be passed into func.\n + """ +).replace("\n", "", 1) + +numba_notes = ( + "See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for " + "extended documentation and performance considerations for the Numba engine.\n\n" +) + + +def window_agg_numba_parameters(version: str = "1.3") -> str: + return ( + dedent( + """ + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + + .. versionadded:: {version}.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: {version}.0\n + """ + ) + .replace("\n", "", 1) + .replace("{version}", version) + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/ewm.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/ewm.py new file mode 100644 index 00000000..775f3cd4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/ewm.py @@ -0,0 +1,1085 @@ +from __future__ import annotations + +import datetime +from functools import partial +from textwrap import dedent +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.tslibs import Timedelta +import pandas._libs.window.aggregations as window_aggregations +from pandas.util._decorators import doc + +from pandas.core.dtypes.common import ( + is_datetime64_ns_dtype, + is_numeric_dtype, +) +from pandas.core.dtypes.missing import isna + +from pandas.core import common +from pandas.core.indexers.objects import ( + BaseIndexer, + ExponentialMovingWindowIndexer, + GroupbyIndexer, +) +from pandas.core.util.numba_ import ( + get_jit_arguments, + maybe_use_numba, +) +from pandas.core.window.common import zsqrt +from pandas.core.window.doc import ( + _shared_docs, + create_section_header, + kwargs_numeric_only, + numba_notes, + template_header, + template_returns, + template_see_also, + window_agg_numba_parameters, +) +from pandas.core.window.numba_ import ( + generate_numba_ewm_func, + generate_numba_ewm_table_func, +) +from pandas.core.window.online import ( + EWMMeanState, + generate_online_numba_ewma_func, +) +from pandas.core.window.rolling import ( + BaseWindow, + BaseWindowGroupby, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Axis, + TimedeltaConvertibleTypes, + ) + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + + +def get_center_of_mass( + comass: float | None, + span: float | None, + halflife: float | None, + alpha: float | None, +) -> float: + valid_count = common.count_not_none(comass, span, halflife, alpha) + if valid_count > 1: + raise ValueError("comass, span, halflife, and alpha are mutually exclusive") + + # Convert to center of mass; domain checks ensure 0 < alpha <= 1 + if comass is not None: + if comass < 0: + raise ValueError("comass must satisfy: comass >= 0") + elif span is not None: + if span < 1: + raise ValueError("span must satisfy: span >= 1") + comass = (span - 1) / 2 + elif halflife is not None: + if halflife <= 0: + raise ValueError("halflife must satisfy: halflife > 0") + decay = 1 - np.exp(np.log(0.5) / halflife) + comass = 1 / decay - 1 + elif alpha is not None: + if alpha <= 0 or alpha > 1: + raise ValueError("alpha must satisfy: 0 < alpha <= 1") + comass = (1 - alpha) / alpha + else: + raise ValueError("Must pass one of comass, span, halflife, or alpha") + + return float(comass) + + +def _calculate_deltas( + times: np.ndarray | NDFrame, + halflife: float | TimedeltaConvertibleTypes | None, +) -> np.ndarray: + """ + Return the diff of the times divided by the half-life. These values are used in + the calculation of the ewm mean. + + Parameters + ---------- + times : np.ndarray, Series + Times corresponding to the observations. Must be monotonically increasing + and ``datetime64[ns]`` dtype. + halflife : float, str, timedelta, optional + Half-life specifying the decay + + Returns + ------- + np.ndarray + Diff of the times divided by the half-life + """ + _times = np.asarray(times.view(np.int64), dtype=np.float64) + # TODO: generalize to non-nano? + _halflife = float(Timedelta(halflife).as_unit("ns")._value) + return np.diff(_times) / _halflife + + +class ExponentialMovingWindow(BaseWindow): + r""" + Provide exponentially weighted (EW) calculations. + + Exactly one of ``com``, ``span``, ``halflife``, or ``alpha`` must be + provided if ``times`` is not provided. If ``times`` is provided, + ``halflife`` and one of ``com``, ``span`` or ``alpha`` may be provided. + + Parameters + ---------- + com : float, optional + Specify decay in terms of center of mass + + :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. + + span : float, optional + Specify decay in terms of span + + :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. + + halflife : float, str, timedelta, optional + Specify decay in terms of half-life + + :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for + :math:`halflife > 0`. + + If ``times`` is specified, a timedelta convertible unit over which an + observation decays to half its value. Only applicable to ``mean()``, + and halflife value will not apply to the other functions. + + alpha : float, optional + Specify smoothing factor :math:`\alpha` directly + + :math:`0 < \alpha \leq 1`. + + min_periods : int, default 0 + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + adjust : bool, default True + Divide by decaying adjustment factor in beginning periods to account + for imbalance in relative weightings (viewing EWMA as a moving average). + + - When ``adjust=True`` (default), the EW function is calculated using weights + :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series + [:math:`x_0, x_1, ..., x_t`] would be: + + .. math:: + y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 - + \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} + + - When ``adjust=False``, the exponentially weighted function is calculated + recursively: + + .. math:: + \begin{split} + y_0 &= x_0\\ + y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, + \end{split} + ignore_na : bool, default False + Ignore missing values when calculating weights. + + - When ``ignore_na=False`` (default), weights are based on absolute positions. + For example, the weights of :math:`x_0` and :math:`x_2` used in calculating + the final weighted average of [:math:`x_0`, None, :math:`x_2`] are + :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and + :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. + + - When ``ignore_na=True``, weights are based + on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` + used in calculating the final weighted average of + [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if + ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. + + axis : {0, 1}, default 0 + If ``0`` or ``'index'``, calculate across the rows. + + If ``1`` or ``'columns'``, calculate across the columns. + + For `Series` this parameter is unused and defaults to 0. + + times : np.ndarray, Series, default None + + Only applicable to ``mean()``. + + Times corresponding to the observations. Must be monotonically increasing and + ``datetime64[ns]`` dtype. + + If 1-D array like, a sequence with the same shape as the observations. + + method : str {'single', 'table'}, default 'single' + .. versionadded:: 1.4.0 + + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + Only applicable to ``mean()`` + + Returns + ------- + pandas.api.typing.ExponentialMovingWindow + + See Also + -------- + rolling : Provides rolling window calculations. + expanding : Provides expanding transformations. + + Notes + ----- + See :ref:`Windowing Operations ` + for further usage details and examples. + + Examples + -------- + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.ewm(com=0.5).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + >>> df.ewm(alpha=2 / 3).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + + **adjust** + + >>> df.ewm(com=0.5, adjust=True).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + >>> df.ewm(com=0.5, adjust=False).mean() + B + 0 0.000000 + 1 0.666667 + 2 1.555556 + 3 1.555556 + 4 3.650794 + + **ignore_na** + + >>> df.ewm(com=0.5, ignore_na=True).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.225000 + >>> df.ewm(com=0.5, ignore_na=False).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + + **times** + + Exponentially weighted mean with weights calculated with a timedelta ``halflife`` + relative to ``times``. + + >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] + >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() + B + 0 0.000000 + 1 0.585786 + 2 1.523889 + 3 1.523889 + 4 3.233686 + """ + + _attributes = [ + "com", + "span", + "halflife", + "alpha", + "min_periods", + "adjust", + "ignore_na", + "axis", + "times", + "method", + ] + + def __init__( + self, + obj: NDFrame, + com: float | None = None, + span: float | None = None, + halflife: float | TimedeltaConvertibleTypes | None = None, + alpha: float | None = None, + min_periods: int | None = 0, + adjust: bool = True, + ignore_na: bool = False, + axis: Axis = 0, + times: np.ndarray | NDFrame | None = None, + method: str = "single", + *, + selection=None, + ) -> None: + super().__init__( + obj=obj, + min_periods=1 if min_periods is None else max(int(min_periods), 1), + on=None, + center=False, + closed=None, + method=method, + axis=axis, + selection=selection, + ) + self.com = com + self.span = span + self.halflife = halflife + self.alpha = alpha + self.adjust = adjust + self.ignore_na = ignore_na + self.times = times + if self.times is not None: + if not self.adjust: + raise NotImplementedError("times is not supported with adjust=False.") + if not is_datetime64_ns_dtype(self.times): + raise ValueError("times must be datetime64[ns] dtype.") + if len(self.times) != len(obj): + raise ValueError("times must be the same length as the object.") + if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)): + raise ValueError("halflife must be a timedelta convertible object") + if isna(self.times).any(): + raise ValueError("Cannot convert NaT values to integer") + self._deltas = _calculate_deltas(self.times, self.halflife) + # Halflife is no longer applicable when calculating COM + # But allow COM to still be calculated if the user passes other decay args + if common.count_not_none(self.com, self.span, self.alpha) > 0: + self._com = get_center_of_mass(self.com, self.span, None, self.alpha) + else: + self._com = 1.0 + else: + if self.halflife is not None and isinstance( + self.halflife, (str, datetime.timedelta, np.timedelta64) + ): + raise ValueError( + "halflife can only be a timedelta convertible argument if " + "times is not None." + ) + # Without times, points are equally spaced + self._deltas = np.ones( + max(self.obj.shape[self.axis] - 1, 0), dtype=np.float64 + ) + self._com = get_center_of_mass( + # error: Argument 3 to "get_center_of_mass" has incompatible type + # "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]"; + # expected "Optional[float]" + self.com, + self.span, + self.halflife, # type: ignore[arg-type] + self.alpha, + ) + + def _check_window_bounds( + self, start: np.ndarray, end: np.ndarray, num_vals: int + ) -> None: + # emw algorithms are iterative with each point + # ExponentialMovingWindowIndexer "bounds" are the entire window + pass + + def _get_window_indexer(self) -> BaseIndexer: + """ + Return an indexer class that will compute the window start and end bounds + """ + return ExponentialMovingWindowIndexer() + + def online( + self, engine: str = "numba", engine_kwargs=None + ) -> OnlineExponentialMovingWindow: + """ + Return an ``OnlineExponentialMovingWindow`` object to calculate + exponentially moving window aggregations in an online method. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + engine: str, default ``'numba'`` + Execution engine to calculate online aggregations. + Applies to all supported aggregation methods. + + engine_kwargs : dict, default None + Applies to all supported aggregation methods. + + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + + Returns + ------- + OnlineExponentialMovingWindow + """ + return OnlineExponentialMovingWindow( + obj=self.obj, + com=self.com, + span=self.span, + halflife=self.halflife, + alpha=self.alpha, + min_periods=self.min_periods, + adjust=self.adjust, + ignore_na=self.ignore_na, + axis=self.axis, + times=self.times, + engine=engine, + engine_kwargs=engine_kwargs, + selection=self._selection, + ) + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.DataFrame.rolling.aggregate + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 1.000000 4.000000 7.000000 + 1 1.666667 4.666667 7.666667 + 2 2.428571 5.428571 8.428571 + """ + ), + klass="Series/Dataframe", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).mean() + 0 1.000000 + 1 1.555556 + 2 2.147541 + 3 2.775068 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) mean", + agg_method="mean", + ) + def mean( + self, + numeric_only: bool = False, + engine=None, + engine_kwargs=None, + ): + if maybe_use_numba(engine): + if self.method == "single": + func = generate_numba_ewm_func + else: + func = generate_numba_ewm_table_func + ewm_func = func( + **get_jit_arguments(engine_kwargs), + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=tuple(self._deltas), + normalize=True, + ) + return self._apply(ewm_func, name="mean") + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + + deltas = None if self.times is None else self._deltas + window_func = partial( + window_aggregations.ewm, + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=deltas, + normalize=True, + ) + return self._apply(window_func, name="mean", numeric_only=numeric_only) + else: + raise ValueError("engine must be either 'numba' or 'cython'") + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).sum() + 0 1.000 + 1 2.800 + 2 5.240 + 3 8.192 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) sum", + agg_method="sum", + ) + def sum( + self, + numeric_only: bool = False, + engine=None, + engine_kwargs=None, + ): + if not self.adjust: + raise NotImplementedError("sum is not implemented with adjust=False") + if maybe_use_numba(engine): + if self.method == "single": + func = generate_numba_ewm_func + else: + func = generate_numba_ewm_table_func + ewm_func = func( + **get_jit_arguments(engine_kwargs), + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=tuple(self._deltas), + normalize=False, + ) + return self._apply(ewm_func, name="sum") + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + + deltas = None if self.times is None else self._deltas + window_func = partial( + window_aggregations.ewm, + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=deltas, + normalize=False, + ) + return self._apply(window_func, name="sum", numeric_only=numeric_only) + else: + raise ValueError("engine must be either 'numba' or 'cython'") + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + bias : bool, default False + Use a standard estimation bias correction. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).std() + 0 NaN + 1 0.707107 + 2 0.995893 + 3 1.277320 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) standard deviation", + agg_method="std", + ) + def std(self, bias: bool = False, numeric_only: bool = False): + if ( + numeric_only + and self._selected_obj.ndim == 1 + and not is_numeric_dtype(self._selected_obj.dtype) + ): + # Raise directly so error message says std instead of var + raise NotImplementedError( + f"{type(self).__name__}.std does not implement numeric_only" + ) + return zsqrt(self.var(bias=bias, numeric_only=numeric_only)) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + bias : bool, default False + Use a standard estimation bias correction. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).var() + 0 NaN + 1 0.500000 + 2 0.991803 + 3 1.631547 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) variance", + agg_method="var", + ) + def var(self, bias: bool = False, numeric_only: bool = False): + window_func = window_aggregations.ewmcov + wfunc = partial( + window_func, + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + bias=bias, + ) + + def var_func(values, begin, end, min_periods): + return wfunc(values, begin, end, min_periods, values) + + return self._apply(var_func, name="var", numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + other : Series or DataFrame , optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + bias : bool, default False + Use a standard estimation bias correction. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4]) + >>> ser2 = pd.Series([10, 11, 13, 16]) + >>> ser1.ewm(alpha=.2).cov(ser2) + 0 NaN + 1 0.500000 + 2 1.524590 + 3 3.408836 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) sample covariance", + agg_method="cov", + ) + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + bias: bool = False, + numeric_only: bool = False, + ): + from pandas import Series + + self._validate_numeric_only("cov", numeric_only) + + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + result = window_aggregations.ewmcov( + x_array, + start, + end, + # error: Argument 4 to "ewmcov" has incompatible type + # "Optional[int]"; expected "int" + self.min_periods, # type: ignore[arg-type] + y_array, + self._com, + self.adjust, + self.ignore_na, + bias, + ) + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, cov_func, numeric_only + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4]) + >>> ser2 = pd.Series([10, 11, 13, 16]) + >>> ser1.ewm(alpha=.2).corr(ser2) + 0 NaN + 1 1.000000 + 2 0.982821 + 3 0.977802 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) sample correlation", + agg_method="corr", + ) + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + numeric_only: bool = False, + ): + from pandas import Series + + self._validate_numeric_only("corr", numeric_only) + + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + + def _cov(X, Y): + return window_aggregations.ewmcov( + X, + start, + end, + min_periods, + Y, + self._com, + self.adjust, + self.ignore_na, + True, + ) + + with np.errstate(all="ignore"): + cov = _cov(x_array, y_array) + x_var = _cov(x_array, x_array) + y_var = _cov(y_array, y_array) + result = cov / zsqrt(x_var * y_var) + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, cov_func, numeric_only + ) + + +class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow): + """ + Provide an exponential moving window groupby implementation. + """ + + _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes + + def __init__(self, obj, *args, _grouper=None, **kwargs) -> None: + super().__init__(obj, *args, _grouper=_grouper, **kwargs) + + if not obj.empty and self.times is not None: + # sort the times and recalculate the deltas according to the groups + groupby_order = np.concatenate(list(self._grouper.indices.values())) + self._deltas = _calculate_deltas( + self.times.take(groupby_order), + self.halflife, + ) + + def _get_window_indexer(self) -> GroupbyIndexer: + """ + Return an indexer class that will compute the window start and end bounds + + Returns + ------- + GroupbyIndexer + """ + window_indexer = GroupbyIndexer( + groupby_indices=self._grouper.indices, + window_indexer=ExponentialMovingWindowIndexer, + ) + return window_indexer + + +class OnlineExponentialMovingWindow(ExponentialMovingWindow): + def __init__( + self, + obj: NDFrame, + com: float | None = None, + span: float | None = None, + halflife: float | TimedeltaConvertibleTypes | None = None, + alpha: float | None = None, + min_periods: int | None = 0, + adjust: bool = True, + ignore_na: bool = False, + axis: Axis = 0, + times: np.ndarray | NDFrame | None = None, + engine: str = "numba", + engine_kwargs: dict[str, bool] | None = None, + *, + selection=None, + ) -> None: + if times is not None: + raise NotImplementedError( + "times is not implemented with online operations." + ) + super().__init__( + obj=obj, + com=com, + span=span, + halflife=halflife, + alpha=alpha, + min_periods=min_periods, + adjust=adjust, + ignore_na=ignore_na, + axis=axis, + times=times, + selection=selection, + ) + self._mean = EWMMeanState( + self._com, self.adjust, self.ignore_na, self.axis, obj.shape + ) + if maybe_use_numba(engine): + self.engine = engine + self.engine_kwargs = engine_kwargs + else: + raise ValueError("'numba' is the only supported engine") + + def reset(self) -> None: + """ + Reset the state captured by `update` calls. + """ + self._mean.reset() + + def aggregate(self, func, *args, **kwargs): + raise NotImplementedError("aggregate is not implemented.") + + def std(self, bias: bool = False, *args, **kwargs): + raise NotImplementedError("std is not implemented.") + + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + numeric_only: bool = False, + ): + raise NotImplementedError("corr is not implemented.") + + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + bias: bool = False, + numeric_only: bool = False, + ): + raise NotImplementedError("cov is not implemented.") + + def var(self, bias: bool = False, numeric_only: bool = False): + raise NotImplementedError("var is not implemented.") + + def mean(self, *args, update=None, update_times=None, **kwargs): + """ + Calculate an online exponentially weighted mean. + + Parameters + ---------- + update: DataFrame or Series, default None + New values to continue calculating the + exponentially weighted mean from the last values and weights. + Values should be float64 dtype. + + ``update`` needs to be ``None`` the first time the + exponentially weighted mean is calculated. + + update_times: Series or 1-D np.ndarray, default None + New times to continue calculating the + exponentially weighted mean from the last values and weights. + If ``None``, values are assumed to be evenly spaced + in time. + This feature is currently unsupported. + + Returns + ------- + DataFrame or Series + + Examples + -------- + >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)}) + >>> online_ewm = df.head(2).ewm(0.5).online() + >>> online_ewm.mean() + a b + 0 0.00 5.00 + 1 0.75 5.75 + >>> online_ewm.mean(update=df.tail(3)) + a b + 2 1.615385 6.615385 + 3 2.550000 7.550000 + 4 3.520661 8.520661 + >>> online_ewm.reset() + >>> online_ewm.mean() + a b + 0 0.00 5.00 + 1 0.75 5.75 + """ + result_kwargs = {} + is_frame = self._selected_obj.ndim == 2 + if update_times is not None: + raise NotImplementedError("update_times is not implemented.") + update_deltas = np.ones( + max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64 + ) + if update is not None: + if self._mean.last_ewm is None: + raise ValueError( + "Must call mean with update=None first before passing update" + ) + result_from = 1 + result_kwargs["index"] = update.index + if is_frame: + last_value = self._mean.last_ewm[np.newaxis, :] + result_kwargs["columns"] = update.columns + else: + last_value = self._mean.last_ewm + result_kwargs["name"] = update.name + np_array = np.concatenate((last_value, update.to_numpy())) + else: + result_from = 0 + result_kwargs["index"] = self._selected_obj.index + if is_frame: + result_kwargs["columns"] = self._selected_obj.columns + else: + result_kwargs["name"] = self._selected_obj.name + np_array = self._selected_obj.astype(np.float64).to_numpy() + ewma_func = generate_online_numba_ewma_func( + **get_jit_arguments(self.engine_kwargs) + ) + result = self._mean.run_ewm( + np_array if is_frame else np_array[:, np.newaxis], + update_deltas, + self.min_periods, + ewma_func, + ) + if not is_frame: + result = result.squeeze() + result = result[result_from:] + result = self._selected_obj._constructor(result, **result_kwargs) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/expanding.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/expanding.py new file mode 100644 index 00000000..aac10596 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/expanding.py @@ -0,0 +1,964 @@ +from __future__ import annotations + +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, +) + +from pandas.util._decorators import ( + deprecate_kwarg, + doc, +) + +from pandas.core.indexers.objects import ( + BaseIndexer, + ExpandingIndexer, + GroupbyIndexer, +) +from pandas.core.window.doc import ( + _shared_docs, + create_section_header, + kwargs_numeric_only, + numba_notes, + template_header, + template_returns, + template_see_also, + window_agg_numba_parameters, + window_apply_parameters, +) +from pandas.core.window.rolling import ( + BaseWindowGroupby, + RollingAndExpandingMixin, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Axis, + QuantileInterpolation, + WindowingRankType, + ) + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + + +class Expanding(RollingAndExpandingMixin): + """ + Provide expanding window calculations. + + Parameters + ---------- + min_periods : int, default 1 + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + + For `Series` this parameter is unused and defaults to 0. + + method : str {'single', 'table'}, default 'single' + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + .. versionadded:: 1.3.0 + + Returns + ------- + pandas.api.typing.Expanding + + See Also + -------- + rolling : Provides rolling window calculations. + ewm : Provides exponential weighted functions. + + Notes + ----- + See :ref:`Windowing Operations ` for further usage details + and examples. + + Examples + -------- + >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + **min_periods** + + Expanding sum with 1 vs 3 observations needed to calculate a value. + + >>> df.expanding(1).sum() + B + 0 0.0 + 1 1.0 + 2 3.0 + 3 3.0 + 4 7.0 + >>> df.expanding(3).sum() + B + 0 NaN + 1 NaN + 2 3.0 + 3 3.0 + 4 7.0 + """ + + _attributes: list[str] = ["min_periods", "axis", "method"] + + def __init__( + self, + obj: NDFrame, + min_periods: int = 1, + axis: Axis = 0, + method: str = "single", + selection=None, + ) -> None: + super().__init__( + obj=obj, + min_periods=min_periods, + axis=axis, + method=method, + selection=selection, + ) + + def _get_window_indexer(self) -> BaseIndexer: + """ + Return an indexer class that will compute the window start and end bounds + """ + return ExpandingIndexer() + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.DataFrame.aggregate : Similar DataFrame method. + pandas.Series.aggregate : Similar Series method. + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 1.000000 4.000000 7.000000 + 1 1.666667 4.666667 7.666667 + 2 2.428571 5.428571 8.428571 + """ + ), + klass="Series/Dataframe", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + @doc( + template_header, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().count() + a 1.0 + b 2.0 + c 3.0 + d 4.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="count of non NaN observations", + agg_method="count", + ) + def count(self, numeric_only: bool = False): + return super().count(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + window_apply_parameters, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().apply(lambda s: s.max() - 2 * s.min()) + a -1.0 + b 0.0 + c 1.0 + d 2.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="custom aggregation function", + agg_method="apply", + ) + def apply( + self, + func: Callable[..., Any], + raw: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, + ): + return super().apply( + func, + raw=raw, + engine=engine, + engine_kwargs=engine_kwargs, + args=args, + kwargs=kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().sum() + a 1.0 + b 3.0 + c 6.0 + d 10.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="sum", + agg_method="sum", + ) + def sum( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().sum( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([3, 2, 1, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().max() + a 3.0 + b 3.0 + c 3.0 + d 4.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="maximum", + agg_method="max", + ) + def max( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().max( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([2, 3, 4, 1], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().min() + a 2.0 + b 2.0 + c 2.0 + d 1.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="minimum", + agg_method="min", + ) + def min( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().min( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().mean() + a 1.0 + b 1.5 + c 2.0 + d 2.5 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="mean", + agg_method="mean", + ) + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().mean( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().median() + a 1.0 + b 1.5 + c 2.0 + d 2.5 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="median", + agg_method="median", + ) + def median( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().median( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements.\n + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.std : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.std` is different + than the default ``ddof`` of 0 in :func:`numpy.std`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + + >>> s.expanding(3).std() + 0 NaN + 1 NaN + 2 0.577350 + 3 0.957427 + 4 0.894427 + 5 0.836660 + 6 0.786796 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="standard deviation", + agg_method="std", + ) + def std( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().std( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements.\n + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.var : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.var` is different + than the default ``ddof`` of 0 in :func:`numpy.var`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + + >>> s.expanding(3).var() + 0 NaN + 1 NaN + 2 0.333333 + 3 0.916667 + 4 0.800000 + 5 0.700000 + 6 0.619048 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="variance", + agg_method="var", + ) + def var( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().var( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements.\n + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + "A minimum of one period is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([0, 1, 2, 3]) + + >>> s.expanding().sem() + 0 NaN + 1 0.707107 + 2 0.707107 + 3 0.745356 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="standard error of mean", + agg_method="sem", + ) + def sem(self, ddof: int = 1, numeric_only: bool = False): + return super().sem(ddof=ddof, numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.skew : Third moment of a probability density.\n", + template_see_also, + create_section_header("Notes"), + "A minimum of three periods is required for the rolling calculation.\n\n", + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([-1, 0, 2, -1, 2], index=['a', 'b', 'c', 'd', 'e']) + >>> ser.expanding().skew() + a NaN + b NaN + c 0.935220 + d 1.414214 + e 0.315356 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="unbiased skewness", + agg_method="skew", + ) + def skew(self, numeric_only: bool = False): + return super().skew(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.kurtosis : Reference SciPy method.\n", + template_see_also, + create_section_header("Notes"), + "A minimum of four periods is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + The example below will show a rolling calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}") + -1.200000 + >>> print(f"{{scipy.stats.kurtosis(arr, bias=False):.6f}}") + 4.999874 + >>> s = pd.Series(arr) + >>> s.expanding(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 4.999874 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="Fisher's definition of kurtosis without bias", + agg_method="kurt", + ) + def kurt(self, numeric_only: bool = False): + return super().kurt(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + quantile : float + Quantile to compute. 0 <= quantile <= 1. + + .. deprecated:: 2.1.0 + This will be renamed to 'q' in a future version. + interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f']) + >>> ser.expanding(min_periods=4).quantile(.25) + a NaN + b NaN + c NaN + d 1.75 + e 2.00 + f 2.25 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="quantile", + agg_method="quantile", + ) + @deprecate_kwarg(old_arg_name="quantile", new_arg_name="q") + def quantile( + self, + q: float, + interpolation: QuantileInterpolation = "linear", + numeric_only: bool = False, + ): + return super().quantile( + q=q, + interpolation=interpolation, + numeric_only=numeric_only, + ) + + @doc( + template_header, + ".. versionadded:: 1.4.0 \n\n", + create_section_header("Parameters"), + dedent( + """ + method : {{'average', 'min', 'max'}}, default 'average' + How to rank the group of records that have the same value (i.e. ties): + + * average: average rank of the group + * min: lowest rank in the group + * max: highest rank in the group + + ascending : bool, default True + Whether or not the elements should be ranked in ascending order. + pct : bool, default False + Whether or not to display the returned rankings in percentile + form. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 4, 2, 3, 5, 3]) + >>> s.expanding().rank() + 0 1.0 + 1 2.0 + 2 2.0 + 3 3.0 + 4 5.0 + 5 3.5 + dtype: float64 + + >>> s.expanding().rank(method="max") + 0 1.0 + 1 2.0 + 2 2.0 + 3 3.0 + 4 5.0 + 5 4.0 + dtype: float64 + + >>> s.expanding().rank(method="min") + 0 1.0 + 1 2.0 + 2 2.0 + 3 3.0 + 4 5.0 + 5 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="rank", + agg_method="rank", + ) + def rank( + self, + method: WindowingRankType = "average", + ascending: bool = True, + pct: bool = False, + numeric_only: bool = False, + ): + return super().rank( + method=method, + ascending=ascending, + pct=pct, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd']) + >>> ser1.expanding().cov(ser2) + a NaN + b 0.500000 + c 1.500000 + d 3.333333 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="sample covariance", + agg_method="cov", + ) + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().cov( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + dedent( + """ + cov : Similar method to calculate covariance. + numpy.corrcoef : NumPy Pearson's correlation calculation. + """ + ).replace("\n", "", 1), + template_see_also, + create_section_header("Notes"), + dedent( + """ + This function uses Pearson's definition of correlation + (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient). + + When `other` is not specified, the output will be self correlation (e.g. + all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise` + set to `True`. + + Function will return ``NaN`` for correlations of equal valued sequences; + this is the result of a 0/0 division error. + + When `pairwise` is set to `False`, only matching columns between `self` and + `other` will be used. + + When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame + with the original index on the first level, and the `other` DataFrame + columns on the second level. + + In the case of missing elements, only complete pairwise observations + will be used.\n + """ + ), + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd']) + >>> ser1.expanding().corr(ser2) + a NaN + b 1.000000 + c 0.981981 + d 0.975900 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="correlation", + agg_method="corr", + ) + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().corr( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + +class ExpandingGroupby(BaseWindowGroupby, Expanding): + """ + Provide a expanding groupby implementation. + """ + + _attributes = Expanding._attributes + BaseWindowGroupby._attributes + + def _get_window_indexer(self) -> GroupbyIndexer: + """ + Return an indexer class that will compute the window start and end bounds + + Returns + ------- + GroupbyIndexer + """ + window_indexer = GroupbyIndexer( + groupby_indices=self._grouper.indices, + window_indexer=ExpandingIndexer, + ) + return window_indexer diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/numba_.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/numba_.py new file mode 100644 index 00000000..9357945e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/numba_.py @@ -0,0 +1,351 @@ +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import jit_user_function + +if TYPE_CHECKING: + from pandas._typing import Scalar + + +@functools.cache +def generate_numba_apply_func( + func: Callable[..., Scalar], + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a numba jitted apply function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a rolling apply function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the rolling apply function. + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def roll_apply( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + *args: Any, + ) -> np.ndarray: + result = np.empty(len(begin)) + for i in numba.prange(len(result)): + start = begin[i] + stop = end[i] + window = values[start:stop] + count_nan = np.sum(np.isnan(window)) + if len(window) - count_nan >= minimum_periods: + result[i] = numba_func(window, *args) + else: + result[i] = np.nan + return result + + return roll_apply + + +@functools.cache +def generate_numba_ewm_func( + nopython: bool, + nogil: bool, + parallel: bool, + com: float, + adjust: bool, + ignore_na: bool, + deltas: tuple, + normalize: bool, +): + """ + Generate a numba jitted ewm mean or sum function specified by values + from engine_kwargs. + + Parameters + ---------- + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + com : float + adjust : bool + ignore_na : bool + deltas : tuple + normalize : bool + + Returns + ------- + Numba function + """ + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def ewm( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + ) -> np.ndarray: + result = np.empty(len(values)) + alpha = 1.0 / (1.0 + com) + old_wt_factor = 1.0 - alpha + new_wt = 1.0 if adjust else alpha + + for i in numba.prange(len(begin)): + start = begin[i] + stop = end[i] + window = values[start:stop] + sub_result = np.empty(len(window)) + + weighted = window[0] + nobs = int(not np.isnan(weighted)) + sub_result[0] = weighted if nobs >= minimum_periods else np.nan + old_wt = 1.0 + + for j in range(1, len(window)): + cur = window[j] + is_observation = not np.isnan(cur) + nobs += is_observation + if not np.isnan(weighted): + if is_observation or not ignore_na: + if normalize: + # note that len(deltas) = len(vals) - 1 and deltas[i] + # is to be used in conjunction with vals[i+1] + old_wt *= old_wt_factor ** deltas[start + j - 1] + else: + weighted = old_wt_factor * weighted + if is_observation: + if normalize: + # avoid numerical errors on constant series + if weighted != cur: + weighted = old_wt * weighted + new_wt * cur + if normalize: + weighted = weighted / (old_wt + new_wt) + if adjust: + old_wt += new_wt + else: + old_wt = 1.0 + else: + weighted += cur + elif is_observation: + weighted = cur + + sub_result[j] = weighted if nobs >= minimum_periods else np.nan + + result[start:stop] = sub_result + + return result + + return ewm + + +@functools.cache +def generate_numba_table_func( + func: Callable[..., np.ndarray], + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a numba jitted function to apply window calculations table-wise. + + Func will be passed a M window size x N number of columns array, and + must return a 1 x N number of columns array. Func is intended to operate + row-wise, but the result will be transposed for axis=1. + + 1. jit the user's function + 2. Return a rolling apply function with the jitted function inline + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def roll_table( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + *args: Any, + ): + result = np.empty((len(begin), values.shape[1])) + min_periods_mask = np.empty(result.shape) + for i in numba.prange(len(result)): + start = begin[i] + stop = end[i] + window = values[start:stop] + count_nan = np.sum(np.isnan(window), axis=0) + sub_result = numba_func(window, *args) + nan_mask = len(window) - count_nan >= minimum_periods + min_periods_mask[i, :] = nan_mask + result[i, :] = sub_result + result = np.where(min_periods_mask, result, np.nan) + return result + + return roll_table + + +# This function will no longer be needed once numba supports +# axis for all np.nan* agg functions +# https://github.com/numba/numba/issues/1269 +@functools.cache +def generate_manual_numpy_nan_agg_with_axis(nan_func): + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=True, nogil=True, parallel=True) + def nan_agg_with_axis(table): + result = np.empty(table.shape[1]) + for i in numba.prange(table.shape[1]): + partition = table[:, i] + result[i] = nan_func(partition) + return result + + return nan_agg_with_axis + + +@functools.cache +def generate_numba_ewm_table_func( + nopython: bool, + nogil: bool, + parallel: bool, + com: float, + adjust: bool, + ignore_na: bool, + deltas: tuple, + normalize: bool, +): + """ + Generate a numba jitted ewm mean or sum function applied table wise specified + by values from engine_kwargs. + + Parameters + ---------- + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + com : float + adjust : bool + ignore_na : bool + deltas : tuple + normalize: bool + + Returns + ------- + Numba function + """ + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def ewm_table( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + ) -> np.ndarray: + alpha = 1.0 / (1.0 + com) + old_wt_factor = 1.0 - alpha + new_wt = 1.0 if adjust else alpha + old_wt = np.ones(values.shape[1]) + + result = np.empty(values.shape) + weighted = values[0].copy() + nobs = (~np.isnan(weighted)).astype(np.int64) + result[0] = np.where(nobs >= minimum_periods, weighted, np.nan) + for i in range(1, len(values)): + cur = values[i] + is_observations = ~np.isnan(cur) + nobs += is_observations.astype(np.int64) + for j in numba.prange(len(cur)): + if not np.isnan(weighted[j]): + if is_observations[j] or not ignore_na: + if normalize: + # note that len(deltas) = len(vals) - 1 and deltas[i] + # is to be used in conjunction with vals[i+1] + old_wt[j] *= old_wt_factor ** deltas[i - 1] + else: + weighted[j] = old_wt_factor * weighted[j] + if is_observations[j]: + if normalize: + # avoid numerical errors on constant series + if weighted[j] != cur[j]: + weighted[j] = ( + old_wt[j] * weighted[j] + new_wt * cur[j] + ) + if normalize: + weighted[j] = weighted[j] / (old_wt[j] + new_wt) + if adjust: + old_wt[j] += new_wt + else: + old_wt[j] = 1.0 + else: + weighted[j] += cur[j] + elif is_observations[j]: + weighted[j] = cur[j] + + result[i] = np.where(nobs >= minimum_periods, weighted, np.nan) + + return result + + return ewm_table diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/online.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/online.py new file mode 100644 index 00000000..f9e3122b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/online.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + + +def generate_online_numba_ewma_func( + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a numba jitted groupby ewma function specified by values + from engine_kwargs. + + Parameters + ---------- + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def online_ewma( + values: np.ndarray, + deltas: np.ndarray, + minimum_periods: int, + old_wt_factor: float, + new_wt: float, + old_wt: np.ndarray, + adjust: bool, + ignore_na: bool, + ): + """ + Compute online exponentially weighted mean per column over 2D values. + + Takes the first observation as is, then computes the subsequent + exponentially weighted mean accounting minimum periods. + """ + result = np.empty(values.shape) + weighted_avg = values[0] + nobs = (~np.isnan(weighted_avg)).astype(np.int64) + result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) + + for i in range(1, len(values)): + cur = values[i] + is_observations = ~np.isnan(cur) + nobs += is_observations.astype(np.int64) + for j in numba.prange(len(cur)): + if not np.isnan(weighted_avg[j]): + if is_observations[j] or not ignore_na: + # note that len(deltas) = len(vals) - 1 and deltas[i] is to be + # used in conjunction with vals[i+1] + old_wt[j] *= old_wt_factor ** deltas[j - 1] + if is_observations[j]: + # avoid numerical errors on constant series + if weighted_avg[j] != cur[j]: + weighted_avg[j] = ( + (old_wt[j] * weighted_avg[j]) + (new_wt * cur[j]) + ) / (old_wt[j] + new_wt) + if adjust: + old_wt[j] += new_wt + else: + old_wt[j] = 1.0 + elif is_observations[j]: + weighted_avg[j] = cur[j] + + result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) + + return result, old_wt + + return online_ewma + + +class EWMMeanState: + def __init__(self, com, adjust, ignore_na, axis, shape) -> None: + alpha = 1.0 / (1.0 + com) + self.axis = axis + self.shape = shape + self.adjust = adjust + self.ignore_na = ignore_na + self.new_wt = 1.0 if adjust else alpha + self.old_wt_factor = 1.0 - alpha + self.old_wt = np.ones(self.shape[self.axis - 1]) + self.last_ewm = None + + def run_ewm(self, weighted_avg, deltas, min_periods, ewm_func): + result, old_wt = ewm_func( + weighted_avg, + deltas, + min_periods, + self.old_wt_factor, + self.new_wt, + self.old_wt, + self.adjust, + self.ignore_na, + ) + self.old_wt = old_wt + self.last_ewm = result[-1] + return result + + def reset(self) -> None: + self.old_wt = np.ones(self.shape[self.axis - 1]) + self.last_ewm = None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/rolling.py b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/rolling.py new file mode 100644 index 00000000..ddd6caaa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/core/window/rolling.py @@ -0,0 +1,2921 @@ +""" +Provide a generic structure to support window functions, +similar to how we have a Groupby object. +""" +from __future__ import annotations + +import copy +from datetime import timedelta +from functools import partial +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, +) + +import numpy as np + +from pandas._libs.tslibs import ( + BaseOffset, + Timedelta, + to_offset, +) +import pandas._libs.window.aggregations as window_aggregations +from pandas.compat._optional import import_optional_dependency +from pandas.errors import DataError +from pandas.util._decorators import ( + deprecate_kwarg, + doc, +) + +from pandas.core.dtypes.common import ( + ensure_float64, + is_bool, + is_integer, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import notna + +from pandas.core._numba import executor +from pandas.core.algorithms import factorize +from pandas.core.apply import ResamplerWindowApply +from pandas.core.arrays import ExtensionArray +from pandas.core.base import SelectionMixin +import pandas.core.common as com +from pandas.core.indexers.objects import ( + BaseIndexer, + FixedWindowIndexer, + GroupbyIndexer, + VariableWindowIndexer, +) +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + MultiIndex, + PeriodIndex, + TimedeltaIndex, +) +from pandas.core.reshape.concat import concat +from pandas.core.util.numba_ import ( + get_jit_arguments, + maybe_use_numba, +) +from pandas.core.window.common import ( + flex_binary_moment, + zsqrt, +) +from pandas.core.window.doc import ( + _shared_docs, + create_section_header, + kwargs_numeric_only, + kwargs_scipy, + numba_notes, + template_header, + template_returns, + template_see_also, + window_agg_numba_parameters, + window_apply_parameters, +) +from pandas.core.window.numba_ import ( + generate_manual_numpy_nan_agg_with_axis, + generate_numba_apply_func, + generate_numba_table_func, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sized, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + NDFrameT, + QuantileInterpolation, + WindowingRankType, + ) + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + from pandas.core.groupby.ops import BaseGrouper + +from pandas.core.arrays.datetimelike import dtype_to_unit + + +class BaseWindow(SelectionMixin): + """Provides utilities for performing windowing operations.""" + + _attributes: list[str] = [] + exclusions: frozenset[Hashable] = frozenset() + _on: Index + + def __init__( + self, + obj: NDFrame, + window=None, + min_periods: int | None = None, + center: bool | None = False, + win_type: str | None = None, + axis: Axis = 0, + on: str | Index | None = None, + closed: str | None = None, + step: int | None = None, + method: str = "single", + *, + selection=None, + ) -> None: + self.obj = obj + self.on = on + self.closed = closed + self.step = step + self.window = window + self.min_periods = min_periods + self.center = center + self.win_type = win_type + self.axis = obj._get_axis_number(axis) if axis is not None else None + self.method = method + self._win_freq_i8: int | None = None + if self.on is None: + if self.axis == 0: + self._on = self.obj.index + else: + # i.e. self.axis == 1 + self._on = self.obj.columns + elif isinstance(self.on, Index): + self._on = self.on + elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns: + self._on = Index(self.obj[self.on]) + else: + raise ValueError( + f"invalid on specified as {self.on}, " + "must be a column (of DataFrame), an Index or None" + ) + + self._selection = selection + self._validate() + + def _validate(self) -> None: + if self.center is not None and not is_bool(self.center): + raise ValueError("center must be a boolean") + if self.min_periods is not None: + if not is_integer(self.min_periods): + raise ValueError("min_periods must be an integer") + if self.min_periods < 0: + raise ValueError("min_periods must be >= 0") + if is_integer(self.window) and self.min_periods > self.window: + raise ValueError( + f"min_periods {self.min_periods} must be <= window {self.window}" + ) + if self.closed is not None and self.closed not in [ + "right", + "both", + "left", + "neither", + ]: + raise ValueError("closed must be 'right', 'left', 'both' or 'neither'") + if not isinstance(self.obj, (ABCSeries, ABCDataFrame)): + raise TypeError(f"invalid type: {type(self)}") + if isinstance(self.window, BaseIndexer): + # Validate that the passed BaseIndexer subclass has + # a get_window_bounds with the correct signature. + get_window_bounds_signature = inspect.signature( + self.window.get_window_bounds + ).parameters.keys() + expected_signature = inspect.signature( + BaseIndexer().get_window_bounds + ).parameters.keys() + if get_window_bounds_signature != expected_signature: + raise ValueError( + f"{type(self.window).__name__} does not implement " + f"the correct signature for get_window_bounds" + ) + if self.method not in ["table", "single"]: + raise ValueError("method must be 'table' or 'single") + if self.step is not None: + if not is_integer(self.step): + raise ValueError("step must be an integer") + if self.step < 0: + raise ValueError("step must be >= 0") + + def _check_window_bounds( + self, start: np.ndarray, end: np.ndarray, num_vals: int + ) -> None: + if len(start) != len(end): + raise ValueError( + f"start ({len(start)}) and end ({len(end)}) bounds must be the " + f"same length" + ) + if len(start) != (num_vals + (self.step or 1) - 1) // (self.step or 1): + raise ValueError( + f"start and end bounds ({len(start)}) must be the same length " + f"as the object ({num_vals}) divided by the step ({self.step}) " + f"if given and rounded up" + ) + + def _slice_axis_for_step(self, index: Index, result: Sized | None = None) -> Index: + """ + Slices the index for a given result and the preset step. + """ + return ( + index + if result is None or len(result) == len(index) + else index[:: self.step] + ) + + def _validate_numeric_only(self, name: str, numeric_only: bool) -> None: + """ + Validate numeric_only argument, raising if invalid for the input. + + Parameters + ---------- + name : str + Name of the operator (kernel). + numeric_only : bool + Value passed by user. + """ + if ( + self._selected_obj.ndim == 1 + and numeric_only + and not is_numeric_dtype(self._selected_obj.dtype) + ): + raise NotImplementedError( + f"{type(self).__name__}.{name} does not implement numeric_only" + ) + + def _make_numeric_only(self, obj: NDFrameT) -> NDFrameT: + """Subset DataFrame to numeric columns. + + Parameters + ---------- + obj : DataFrame + + Returns + ------- + obj subset to numeric-only columns. + """ + result = obj.select_dtypes(include=["number"], exclude=["timedelta"]) + return result + + def _create_data(self, obj: NDFrameT, numeric_only: bool = False) -> NDFrameT: + """ + Split data into blocks & return conformed data. + """ + # filter out the on from the object + if self.on is not None and not isinstance(self.on, Index) and obj.ndim == 2: + obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) + if obj.ndim > 1 and (numeric_only or self.axis == 1): + # GH: 20649 in case of mixed dtype and axis=1 we have to convert everything + # to float to calculate the complete row at once. We exclude all non-numeric + # dtypes. + obj = self._make_numeric_only(obj) + if self.axis == 1: + obj = obj.astype("float64", copy=False) + obj._mgr = obj._mgr.consolidate() + return obj + + def _gotitem(self, key, ndim, subset=None): + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : str / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + # create a new object to prevent aliasing + if subset is None: + subset = self.obj + + # we need to make a shallow copy of ourselves + # with the same groupby + kwargs = {attr: getattr(self, attr) for attr in self._attributes} + + selection = self._infer_selection(key, subset) + new_win = type(self)(subset, selection=selection, **kwargs) + return new_win + + def __getattr__(self, attr: str): + if attr in self._internal_names_set: + return object.__getattribute__(self, attr) + if attr in self.obj: + return self[attr] + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{attr}'" + ) + + def _dir_additions(self): + return self.obj._dir_additions() + + def __repr__(self) -> str: + """ + Provide a nice str repr of our rolling object. + """ + attrs_list = ( + f"{attr_name}={getattr(self, attr_name)}" + for attr_name in self._attributes + if getattr(self, attr_name, None) is not None and attr_name[0] != "_" + ) + attrs = ",".join(attrs_list) + return f"{type(self).__name__} [{attrs}]" + + def __iter__(self) -> Iterator: + obj = self._selected_obj.set_axis(self._on) + obj = self._create_data(obj) + indexer = self._get_window_indexer() + + start, end = indexer.get_window_bounds( + num_values=len(obj), + min_periods=self.min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(obj)) + + for s, e in zip(start, end): + result = obj.iloc[slice(s, e)] + yield result + + def _prep_values(self, values: ArrayLike) -> np.ndarray: + """Convert input to numpy arrays for Cython routines""" + if needs_i8_conversion(values.dtype): + raise NotImplementedError( + f"ops for {type(self).__name__} for this " + f"dtype {values.dtype} are not implemented" + ) + # GH #12373 : rolling functions error on float32 data + # make sure the data is coerced to float64 + try: + if isinstance(values, ExtensionArray): + values = values.to_numpy(np.float64, na_value=np.nan) + else: + values = ensure_float64(values) + except (ValueError, TypeError) as err: + raise TypeError(f"cannot handle this type -> {values.dtype}") from err + + # Convert inf to nan for C funcs + inf = np.isinf(values) + if inf.any(): + values = np.where(inf, np.nan, values) + + return values + + def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None: + # if we have an 'on' column we want to put it back into + # the results in the same location + from pandas import Series + + if self.on is not None and not self._on.equals(obj.index): + name = self._on.name + extra_col = Series(self._on, index=self.obj.index, name=name, copy=False) + if name in result.columns: + # TODO: sure we want to overwrite results? + result[name] = extra_col + elif name in result.index.names: + pass + elif name in self._selected_obj.columns: + # insert in the same location as we had in _selected_obj + old_cols = self._selected_obj.columns + new_cols = result.columns + old_loc = old_cols.get_loc(name) + overlap = new_cols.intersection(old_cols[:old_loc]) + new_loc = len(overlap) + result.insert(new_loc, name, extra_col) + else: + # insert at the end + result[name] = extra_col + + @property + def _index_array(self): + # TODO: why do we get here with e.g. MultiIndex? + if needs_i8_conversion(self._on.dtype): + idx = cast("PeriodIndex | DatetimeIndex | TimedeltaIndex", self._on) + return idx.asi8 + return None + + def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame: + """Validate and finalize result.""" + if out.shape[1] == 0 and obj.shape[1] > 0: + raise DataError("No numeric types to aggregate") + if out.shape[1] == 0: + return obj.astype("float64") + + self._insert_on_column(out, obj) + return out + + def _get_window_indexer(self) -> BaseIndexer: + """ + Return an indexer class that will compute the window start and end bounds + """ + if isinstance(self.window, BaseIndexer): + return self.window + if self._win_freq_i8 is not None: + return VariableWindowIndexer( + index_array=self._index_array, + window_size=self._win_freq_i8, + center=self.center, + ) + return FixedWindowIndexer(window_size=self.window) + + def _apply_series( + self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None + ) -> Series: + """ + Series version of _apply_blockwise + """ + obj = self._create_data(self._selected_obj) + + if name == "count": + # GH 12541: Special case for count where we support date-like types + obj = notna(obj).astype(int) + try: + values = self._prep_values(obj._values) + except (TypeError, NotImplementedError) as err: + raise DataError("No numeric types to aggregate") from err + + result = homogeneous_func(values) + index = self._slice_axis_for_step(obj.index, result) + return obj._constructor(result, index=index, name=obj.name) + + def _apply_blockwise( + self, + homogeneous_func: Callable[..., ArrayLike], + name: str, + numeric_only: bool = False, + ) -> DataFrame | Series: + """ + Apply the given function to the DataFrame broken down into homogeneous + sub-frames. + """ + self._validate_numeric_only(name, numeric_only) + if self._selected_obj.ndim == 1: + return self._apply_series(homogeneous_func, name) + + obj = self._create_data(self._selected_obj, numeric_only) + if name == "count": + # GH 12541: Special case for count where we support date-like types + obj = notna(obj).astype(int) + obj._mgr = obj._mgr.consolidate() + + if self.axis == 1: + obj = obj.T + + taker = [] + res_values = [] + for i, arr in enumerate(obj._iter_column_arrays()): + # GH#42736 operate column-wise instead of block-wise + # As of 2.0, hfunc will raise for nuisance columns + try: + arr = self._prep_values(arr) + except (TypeError, NotImplementedError) as err: + raise DataError( + f"Cannot aggregate non-numeric type: {arr.dtype}" + ) from err + res = homogeneous_func(arr) + res_values.append(res) + taker.append(i) + + index = self._slice_axis_for_step( + obj.index, res_values[0] if len(res_values) > 0 else None + ) + df = type(obj)._from_arrays( + res_values, + index=index, + columns=obj.columns.take(taker), + verify_integrity=False, + ) + + if self.axis == 1: + df = df.T + + return self._resolve_output(df, obj) + + def _apply_tablewise( + self, + homogeneous_func: Callable[..., ArrayLike], + name: str | None = None, + numeric_only: bool = False, + ) -> DataFrame | Series: + """ + Apply the given function to the DataFrame across the entire object + """ + if self._selected_obj.ndim == 1: + raise ValueError("method='table' not applicable for Series objects.") + obj = self._create_data(self._selected_obj, numeric_only) + values = self._prep_values(obj.to_numpy()) + values = values.T if self.axis == 1 else values + result = homogeneous_func(values) + result = result.T if self.axis == 1 else result + index = self._slice_axis_for_step(obj.index, result) + columns = ( + obj.columns + if result.shape[1] == len(obj.columns) + else obj.columns[:: self.step] + ) + out = obj._constructor(result, index=index, columns=columns) + + return self._resolve_output(out, obj) + + def _apply_pairwise( + self, + target: DataFrame | Series, + other: DataFrame | Series | None, + pairwise: bool | None, + func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series], + numeric_only: bool, + ) -> DataFrame | Series: + """ + Apply the given pairwise function given 2 pandas objects (DataFrame/Series) + """ + target = self._create_data(target, numeric_only) + if other is None: + other = target + # only default unset + pairwise = True if pairwise is None else pairwise + elif not isinstance(other, (ABCDataFrame, ABCSeries)): + raise ValueError("other must be a DataFrame or Series") + elif other.ndim == 2 and numeric_only: + other = self._make_numeric_only(other) + + return flex_binary_moment(target, other, func, pairwise=bool(pairwise)) + + def _apply( + self, + func: Callable[..., Any], + name: str, + numeric_only: bool = False, + numba_args: tuple[Any, ...] = (), + **kwargs, + ): + """ + Rolling statistical measure using supplied function. + + Designed to be used with passed-in Cython array-based functions. + + Parameters + ---------- + func : callable function to apply + name : str, + numba_args : tuple + args to be passed when func is a numba func + **kwargs + additional arguments for rolling function and window function + + Returns + ------- + y : type of input + """ + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + + def homogeneous_func(values: np.ndarray): + # calculation function + + if values.size == 0: + return values.copy() + + def calc(x): + start, end = window_indexer.get_window_bounds( + num_values=len(x), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(x)) + + return func(x, start, end, min_periods, *numba_args) + + with np.errstate(all="ignore"): + result = calc(values) + + return result + + if self.method == "single": + return self._apply_blockwise(homogeneous_func, name, numeric_only) + else: + return self._apply_tablewise(homogeneous_func, name, numeric_only) + + def _numba_apply( + self, + func: Callable[..., Any], + engine_kwargs: dict[str, bool] | None = None, + **func_kwargs, + ): + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + obj = self._create_data(self._selected_obj) + if self.axis == 1: + obj = obj.T + values = self._prep_values(obj.to_numpy()) + if values.ndim == 1: + values = values.reshape(-1, 1) + start, end = window_indexer.get_window_bounds( + num_values=len(values), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(values)) + # For now, map everything to float to match the Cython impl + # even though it is wrong + # TODO: Could preserve correct dtypes in future + # xref #53214 + dtype_mapping = executor.float_dtype_mapping + aggregator = executor.generate_shared_aggregator( + func, + dtype_mapping, + is_grouped_kernel=False, + **get_jit_arguments(engine_kwargs), + ) + result = aggregator( + values.T, start=start, end=end, min_periods=min_periods, **func_kwargs + ).T + result = result.T if self.axis == 1 else result + index = self._slice_axis_for_step(obj.index, result) + if obj.ndim == 1: + result = result.squeeze() + out = obj._constructor(result, index=index, name=obj.name) + return out + else: + columns = self._slice_axis_for_step(obj.columns, result.T) + out = obj._constructor(result, index=index, columns=columns) + return self._resolve_output(out, obj) + + def aggregate(self, func, *args, **kwargs): + result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() + if result is None: + return self.apply(func, raw=False, args=args, kwargs=kwargs) + return result + + agg = aggregate + + +class BaseWindowGroupby(BaseWindow): + """ + Provide the groupby windowing facilities. + """ + + _grouper: BaseGrouper + _as_index: bool + _attributes: list[str] = ["_grouper"] + + def __init__( + self, + obj: DataFrame | Series, + *args, + _grouper: BaseGrouper, + _as_index: bool = True, + **kwargs, + ) -> None: + from pandas.core.groupby.ops import BaseGrouper + + if not isinstance(_grouper, BaseGrouper): + raise ValueError("Must pass a BaseGrouper object.") + self._grouper = _grouper + self._as_index = _as_index + # GH 32262: It's convention to keep the grouping column in + # groupby., but unexpected to users in + # groupby.rolling. + obj = obj.drop(columns=self._grouper.names, errors="ignore") + # GH 15354 + if kwargs.get("step") is not None: + raise NotImplementedError("step not implemented for groupby") + super().__init__(obj, *args, **kwargs) + + def _apply( + self, + func: Callable[..., Any], + name: str, + numeric_only: bool = False, + numba_args: tuple[Any, ...] = (), + **kwargs, + ) -> DataFrame | Series: + result = super()._apply( + func, + name, + numeric_only, + numba_args, + **kwargs, + ) + # Reconstruct the resulting MultiIndex + # 1st set of levels = group by labels + # 2nd set of levels = original DataFrame/Series index + grouped_object_index = self.obj.index + grouped_index_name = [*grouped_object_index.names] + groupby_keys = copy.copy(self._grouper.names) + result_index_names = groupby_keys + grouped_index_name + + drop_columns = [ + key + for key in self._grouper.names + if key not in self.obj.index.names or key is None + ] + + if len(drop_columns) != len(groupby_keys): + # Our result will have still kept the column in the result + result = result.drop(columns=drop_columns, errors="ignore") + + codes = self._grouper.codes + levels = copy.copy(self._grouper.levels) + + group_indices = self._grouper.indices.values() + if group_indices: + indexer = np.concatenate(list(group_indices)) + else: + indexer = np.array([], dtype=np.intp) + codes = [c.take(indexer) for c in codes] + + # if the index of the original dataframe needs to be preserved, append + # this index (but reordered) to the codes/levels from the groupby + if grouped_object_index is not None: + idx = grouped_object_index.take(indexer) + if not isinstance(idx, MultiIndex): + idx = MultiIndex.from_arrays([idx]) + codes.extend(list(idx.codes)) + levels.extend(list(idx.levels)) + + result_index = MultiIndex( + levels, codes, names=result_index_names, verify_integrity=False + ) + + result.index = result_index + if not self._as_index: + result = result.reset_index(level=list(range(len(groupby_keys)))) + return result + + def _apply_pairwise( + self, + target: DataFrame | Series, + other: DataFrame | Series | None, + pairwise: bool | None, + func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series], + numeric_only: bool, + ) -> DataFrame | Series: + """ + Apply the given pairwise function given 2 pandas objects (DataFrame/Series) + """ + # Manually drop the grouping column first + target = target.drop(columns=self._grouper.names, errors="ignore") + result = super()._apply_pairwise(target, other, pairwise, func, numeric_only) + # 1) Determine the levels + codes of the groupby levels + if other is not None and not all( + len(group) == len(other) for group in self._grouper.indices.values() + ): + # GH 42915 + # len(other) != len(any group), so must reindex (expand) the result + # from flex_binary_moment to a "transform"-like result + # per groupby combination + old_result_len = len(result) + result = concat( + [ + result.take(gb_indices).reindex(result.index) + for gb_indices in self._grouper.indices.values() + ] + ) + + gb_pairs = ( + com.maybe_make_list(pair) for pair in self._grouper.indices.keys() + ) + groupby_codes = [] + groupby_levels = [] + # e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]] + for gb_level_pair in map(list, zip(*gb_pairs)): + labels = np.repeat(np.array(gb_level_pair), old_result_len) + codes, levels = factorize(labels) + groupby_codes.append(codes) + groupby_levels.append(levels) + else: + # pairwise=True or len(other) == len(each group), so repeat + # the groupby labels by the number of columns in the original object + groupby_codes = self._grouper.codes + # error: Incompatible types in assignment (expression has type + # "List[Index]", variable has type "List[Union[ndarray, Index]]") + groupby_levels = self._grouper.levels # type: ignore[assignment] + + group_indices = self._grouper.indices.values() + if group_indices: + indexer = np.concatenate(list(group_indices)) + else: + indexer = np.array([], dtype=np.intp) + + if target.ndim == 1: + repeat_by = 1 + else: + repeat_by = len(target.columns) + groupby_codes = [ + np.repeat(c.take(indexer), repeat_by) for c in groupby_codes + ] + # 2) Determine the levels + codes of the result from super()._apply_pairwise + if isinstance(result.index, MultiIndex): + result_codes = list(result.index.codes) + result_levels = list(result.index.levels) + result_names = list(result.index.names) + else: + idx_codes, idx_levels = factorize(result.index) + result_codes = [idx_codes] + result_levels = [idx_levels] + result_names = [result.index.name] + + # 3) Create the resulting index by combining 1) + 2) + result_codes = groupby_codes + result_codes + result_levels = groupby_levels + result_levels + result_names = self._grouper.names + result_names + + result_index = MultiIndex( + result_levels, result_codes, names=result_names, verify_integrity=False + ) + result.index = result_index + return result + + def _create_data(self, obj: NDFrameT, numeric_only: bool = False) -> NDFrameT: + """ + Split data into blocks & return conformed data. + """ + # Ensure the object we're rolling over is monotonically sorted relative + # to the groups + # GH 36197 + if not obj.empty: + groupby_order = np.concatenate(list(self._grouper.indices.values())).astype( + np.int64 + ) + obj = obj.take(groupby_order) + return super()._create_data(obj, numeric_only) + + def _gotitem(self, key, ndim, subset=None): + # we are setting the index on the actual object + # here so our index is carried through to the selected obj + # when we do the splitting for the groupby + if self.on is not None: + # GH 43355 + subset = self.obj.set_index(self._on) + return super()._gotitem(key, ndim, subset=subset) + + +class Window(BaseWindow): + """ + Provide rolling window calculations. + + Parameters + ---------- + window : int, timedelta, str, offset, or BaseIndexer subclass + Size of the moving window. + + If an integer, the fixed number of observations used for + each window. + + If a timedelta, str, or offset, the time period of each window. Each + window will be a variable sized based on the observations included in + the time-period. This is only valid for datetimelike indexes. + To learn more about the offsets & frequency strings, please see `this link + `__. + + If a BaseIndexer subclass, the window boundaries + based on the defined ``get_window_bounds`` method. Additional rolling + keyword arguments, namely ``min_periods``, ``center``, ``closed`` and + ``step`` will be passed to ``get_window_bounds``. + + min_periods : int, default None + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + For a window that is specified by an offset, ``min_periods`` will default to 1. + + For a window that is specified by an integer, ``min_periods`` will default + to the size of the window. + + center : bool, default False + If False, set the window labels as the right edge of the window index. + + If True, set the window labels as the center of the window index. + + win_type : str, default None + If ``None``, all points are evenly weighted. + + If a string, it must be a valid `scipy.signal window function + `__. + + Certain Scipy window types require additional parameters to be passed + in the aggregation function. The additional parameters must match + the keywords specified in the Scipy window type method signature. + + on : str, optional + For a DataFrame, a column label or Index level on which + to calculate the rolling window, rather than the DataFrame's index. + + Provided integer column is ignored and excluded from result since + an integer index is not used to calculate the rolling window. + + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + + For `Series` this parameter is unused and defaults to 0. + + closed : str, default None + If ``'right'``, the first point in the window is excluded from calculations. + + If ``'left'``, the last point in the window is excluded from calculations. + + If ``'both'``, the no points in the window are excluded from calculations. + + If ``'neither'``, the first and last points in the window are excluded + from calculations. + + Default ``None`` (``'right'``). + + .. versionchanged:: 1.2.0 + + The closed parameter with fixed windows is now supported. + + step : int, default None + + .. versionadded:: 1.5.0 + + Evaluate the window at every ``step`` result, equivalent to slicing as + ``[::step]``. ``window`` must be an integer. Using a step argument other + than None or 1 will produce a result with a different shape than the input. + + method : str {'single', 'table'}, default 'single' + + .. versionadded:: 1.3.0 + + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + Returns + ------- + pandas.api.typing.Window or pandas.api.typing.Rolling + An instance of Window is returned if ``win_type`` is passed. Otherwise, + an instance of Rolling is returned. + + See Also + -------- + expanding : Provides expanding transformations. + ewm : Provides exponential weighted functions. + + Notes + ----- + See :ref:`Windowing Operations ` for further usage details + and examples. + + Examples + -------- + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + **window** + + Rolling sum with a window length of 2 observations. + + >>> df.rolling(2).sum() + B + 0 NaN + 1 1.0 + 2 3.0 + 3 NaN + 4 NaN + + Rolling sum with a window span of 2 seconds. + + >>> df_time = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, + ... index=[pd.Timestamp('20130101 09:00:00'), + ... pd.Timestamp('20130101 09:00:02'), + ... pd.Timestamp('20130101 09:00:03'), + ... pd.Timestamp('20130101 09:00:05'), + ... pd.Timestamp('20130101 09:00:06')]) + + >>> df_time + B + 2013-01-01 09:00:00 0.0 + 2013-01-01 09:00:02 1.0 + 2013-01-01 09:00:03 2.0 + 2013-01-01 09:00:05 NaN + 2013-01-01 09:00:06 4.0 + + >>> df_time.rolling('2s').sum() + B + 2013-01-01 09:00:00 0.0 + 2013-01-01 09:00:02 1.0 + 2013-01-01 09:00:03 3.0 + 2013-01-01 09:00:05 NaN + 2013-01-01 09:00:06 4.0 + + Rolling sum with forward looking windows with 2 observations. + + >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2) + >>> df.rolling(window=indexer, min_periods=1).sum() + B + 0 1.0 + 1 3.0 + 2 2.0 + 3 4.0 + 4 4.0 + + **min_periods** + + Rolling sum with a window length of 2 observations, but only needs a minimum of 1 + observation to calculate a value. + + >>> df.rolling(2, min_periods=1).sum() + B + 0 0.0 + 1 1.0 + 2 3.0 + 3 2.0 + 4 4.0 + + **center** + + Rolling sum with the result assigned to the center of the window index. + + >>> df.rolling(3, min_periods=1, center=True).sum() + B + 0 1.0 + 1 3.0 + 2 3.0 + 3 6.0 + 4 4.0 + + >>> df.rolling(3, min_periods=1, center=False).sum() + B + 0 0.0 + 1 1.0 + 2 3.0 + 3 3.0 + 4 6.0 + + **step** + + Rolling sum with a window length of 2 observations, minimum of 1 observation to + calculate a value, and a step of 2. + + >>> df.rolling(2, min_periods=1, step=2).sum() + B + 0 0.0 + 2 3.0 + 4 4.0 + + **win_type** + + Rolling sum with a window length of 2, using the Scipy ``'gaussian'`` + window type. ``std`` is required in the aggregation function. + + >>> df.rolling(2, win_type='gaussian').sum(std=3) + B + 0 NaN + 1 0.986207 + 2 2.958621 + 3 NaN + 4 NaN + + **on** + + Rolling sum with a window length of 2 days. + + >>> df = pd.DataFrame({ + ... 'A': [pd.to_datetime('2020-01-01'), + ... pd.to_datetime('2020-01-01'), + ... pd.to_datetime('2020-01-02'),], + ... 'B': [1, 2, 3], }, + ... index=pd.date_range('2020', periods=3)) + + >>> df + A B + 2020-01-01 2020-01-01 1 + 2020-01-02 2020-01-01 2 + 2020-01-03 2020-01-02 3 + + >>> df.rolling('2D', on='A').sum() + A B + 2020-01-01 2020-01-01 1.0 + 2020-01-02 2020-01-01 3.0 + 2020-01-03 2020-01-02 6.0 + """ + + _attributes = [ + "window", + "min_periods", + "center", + "win_type", + "axis", + "on", + "closed", + "step", + "method", + ] + + def _validate(self): + super()._validate() + + if not isinstance(self.win_type, str): + raise ValueError(f"Invalid win_type {self.win_type}") + signal = import_optional_dependency( + "scipy.signal.windows", extra="Scipy is required to generate window weight." + ) + self._scipy_weight_generator = getattr(signal, self.win_type, None) + if self._scipy_weight_generator is None: + raise ValueError(f"Invalid win_type {self.win_type}") + + if isinstance(self.window, BaseIndexer): + raise NotImplementedError( + "BaseIndexer subclasses not implemented with win_types." + ) + if not is_integer(self.window) or self.window < 0: + raise ValueError("window must be an integer 0 or greater") + + if self.method != "single": + raise NotImplementedError("'single' is the only supported method type.") + + def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray: + """ + Center the result in the window for weighted rolling aggregations. + """ + if offset > 0: + lead_indexer = [slice(offset, None)] + result = np.copy(result[tuple(lead_indexer)]) + return result + + def _apply( + self, + func: Callable[[np.ndarray, int, int], np.ndarray], + name: str, + numeric_only: bool = False, + numba_args: tuple[Any, ...] = (), + **kwargs, + ): + """ + Rolling with weights statistical measure using supplied function. + + Designed to be used with passed-in Cython array-based functions. + + Parameters + ---------- + func : callable function to apply + name : str, + numeric_only : bool, default False + Whether to only operate on bool, int, and float columns + numba_args : tuple + unused + **kwargs + additional arguments for scipy windows if necessary + + Returns + ------- + y : type of input + """ + # "None" not callable [misc] + window = self._scipy_weight_generator( # type: ignore[misc] + self.window, **kwargs + ) + offset = (len(window) - 1) // 2 if self.center else 0 + + def homogeneous_func(values: np.ndarray): + # calculation function + + if values.size == 0: + return values.copy() + + def calc(x): + additional_nans = np.array([np.nan] * offset) + x = np.concatenate((x, additional_nans)) + return func( + x, + window, + self.min_periods if self.min_periods is not None else len(window), + ) + + with np.errstate(all="ignore"): + # Our weighted aggregations return memoryviews + result = np.asarray(calc(values)) + + if self.center: + result = self._center_window(result, offset) + + return result + + return self._apply_blockwise(homogeneous_func, name, numeric_only)[:: self.step] + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.DataFrame.aggregate : Similar DataFrame method. + pandas.Series.aggregate : Similar Series method. + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.rolling(2, win_type="boxcar").agg("mean") + A B C + 0 NaN NaN NaN + 1 1.5 4.5 7.5 + 2 2.5 5.5 8.5 + """ + ), + klass="Series/DataFrame", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() + if result is None: + # these must apply directly + result = func(self) + + return result + + agg = aggregate + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method + (`sum` in this case): + + >>> ser.rolling(2, win_type='gaussian').sum(std=3) + 0 NaN + 1 0.986207 + 2 5.917243 + 3 6.903450 + 4 9.862071 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window sum", + agg_method="sum", + ) + def sum(self, numeric_only: bool = False, **kwargs): + window_func = window_aggregations.roll_weighted_sum + # error: Argument 1 to "_apply" of "Window" has incompatible type + # "Callable[[ndarray, ndarray, int], ndarray]"; expected + # "Callable[[ndarray, int, int], ndarray]" + return self._apply( + window_func, # type: ignore[arg-type] + name="sum", + numeric_only=numeric_only, + **kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method: + + >>> ser.rolling(2, win_type='gaussian').mean(std=3) + 0 NaN + 1 0.5 + 2 3.0 + 3 3.5 + 4 5.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window mean", + agg_method="mean", + ) + def mean(self, numeric_only: bool = False, **kwargs): + window_func = window_aggregations.roll_weighted_mean + # error: Argument 1 to "_apply" of "Window" has incompatible type + # "Callable[[ndarray, ndarray, int], ndarray]"; expected + # "Callable[[ndarray, int, int], ndarray]" + return self._apply( + window_func, # type: ignore[arg-type] + name="mean", + numeric_only=numeric_only, + **kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method: + + >>> ser.rolling(2, win_type='gaussian').var(std=3) + 0 NaN + 1 0.5 + 2 8.0 + 3 4.5 + 4 18.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window variance", + agg_method="var", + ) + def var(self, ddof: int = 1, numeric_only: bool = False, **kwargs): + window_func = partial(window_aggregations.roll_weighted_var, ddof=ddof) + kwargs.pop("name", None) + return self._apply(window_func, name="var", numeric_only=numeric_only, **kwargs) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method: + + >>> ser.rolling(2, win_type='gaussian').std(std=3) + 0 NaN + 1 0.707107 + 2 2.828427 + 3 2.121320 + 4 4.242641 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window standard deviation", + agg_method="std", + ) + def std(self, ddof: int = 1, numeric_only: bool = False, **kwargs): + return zsqrt( + self.var(ddof=ddof, name="std", numeric_only=numeric_only, **kwargs) + ) + + +class RollingAndExpandingMixin(BaseWindow): + def count(self, numeric_only: bool = False): + window_func = window_aggregations.roll_sum + return self._apply(window_func, name="count", numeric_only=numeric_only) + + def apply( + self, + func: Callable[..., Any], + raw: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, + ): + if args is None: + args = () + if kwargs is None: + kwargs = {} + + if not is_bool(raw): + raise ValueError("raw parameter must be `True` or `False`") + + numba_args: tuple[Any, ...] = () + if maybe_use_numba(engine): + if raw is False: + raise ValueError("raw must be `True` when using the numba engine") + numba_args = args + if self.method == "single": + apply_func = generate_numba_apply_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + else: + apply_func = generate_numba_table_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + apply_func = self._generate_cython_apply_func(args, kwargs, raw, func) + else: + raise ValueError("engine must be either 'numba' or 'cython'") + + return self._apply( + apply_func, + name="apply", + numba_args=numba_args, + ) + + def _generate_cython_apply_func( + self, + args: tuple[Any, ...], + kwargs: dict[str, Any], + raw: bool | np.bool_, + function: Callable[..., Any], + ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, int], np.ndarray]: + from pandas import Series + + window_func = partial( + window_aggregations.roll_apply, + args=args, + kwargs=kwargs, + raw=raw, + function=function, + ) + + def apply_func(values, begin, end, min_periods, raw=raw): + if not raw: + # GH 45912 + values = Series(values, index=self._on, copy=False) + return window_func(values, begin, end, min_periods) + + return apply_func + + def sum( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nansum) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_sum + + return self._numba_apply(sliding_sum, engine_kwargs) + window_func = window_aggregations.roll_sum + return self._apply(window_func, name="sum", numeric_only=numeric_only) + + def max( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmax) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_min_max + + return self._numba_apply(sliding_min_max, engine_kwargs, is_max=True) + window_func = window_aggregations.roll_max + return self._apply(window_func, name="max", numeric_only=numeric_only) + + def min( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmin) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_min_max + + return self._numba_apply(sliding_min_max, engine_kwargs, is_max=False) + window_func = window_aggregations.roll_min + return self._apply(window_func, name="min", numeric_only=numeric_only) + + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmean) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_mean + + return self._numba_apply(sliding_mean, engine_kwargs) + window_func = window_aggregations.roll_mean + return self._apply(window_func, name="mean", numeric_only=numeric_only) + + def median( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian) + else: + func = np.nanmedian + + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + window_func = window_aggregations.roll_median_c + return self._apply(window_func, name="median", numeric_only=numeric_only) + + def std( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + raise NotImplementedError("std not supported with method='table'") + from pandas.core._numba.kernels import sliding_var + + return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof=ddof)) + window_func = window_aggregations.roll_var + + def zsqrt_func(values, begin, end, min_periods): + return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof)) + + return self._apply( + zsqrt_func, + name="std", + numeric_only=numeric_only, + ) + + def var( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + raise NotImplementedError("var not supported with method='table'") + from pandas.core._numba.kernels import sliding_var + + return self._numba_apply(sliding_var, engine_kwargs, ddof=ddof) + window_func = partial(window_aggregations.roll_var, ddof=ddof) + return self._apply( + window_func, + name="var", + numeric_only=numeric_only, + ) + + def skew(self, numeric_only: bool = False): + window_func = window_aggregations.roll_skew + return self._apply( + window_func, + name="skew", + numeric_only=numeric_only, + ) + + def sem(self, ddof: int = 1, numeric_only: bool = False): + # Raise here so error message says sem instead of std + self._validate_numeric_only("sem", numeric_only) + return self.std(numeric_only=numeric_only) / ( + self.count(numeric_only=numeric_only) - ddof + ).pow(0.5) + + def kurt(self, numeric_only: bool = False): + window_func = window_aggregations.roll_kurt + return self._apply( + window_func, + name="kurt", + numeric_only=numeric_only, + ) + + def quantile( + self, + q: float, + interpolation: QuantileInterpolation = "linear", + numeric_only: bool = False, + ): + if q == 1.0: + window_func = window_aggregations.roll_max + elif q == 0.0: + window_func = window_aggregations.roll_min + else: + window_func = partial( + window_aggregations.roll_quantile, + quantile=q, + interpolation=interpolation, + ) + + return self._apply(window_func, name="quantile", numeric_only=numeric_only) + + def rank( + self, + method: WindowingRankType = "average", + ascending: bool = True, + pct: bool = False, + numeric_only: bool = False, + ): + window_func = partial( + window_aggregations.roll_rank, + method=method, + ascending=ascending, + percentile=pct, + ) + + return self._apply(window_func, name="rank", numeric_only=numeric_only) + + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + if self.step is not None: + raise NotImplementedError("step not implemented for cov") + self._validate_numeric_only("cov", numeric_only) + + from pandas import Series + + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(x_array)) + + with np.errstate(all="ignore"): + mean_x_y = window_aggregations.roll_mean( + x_array * y_array, start, end, min_periods + ) + mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods) + mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods) + count_x_y = window_aggregations.roll_sum( + notna(x_array + y_array).astype(np.float64), start, end, 0 + ) + result = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof)) + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, cov_func, numeric_only + ) + + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + if self.step is not None: + raise NotImplementedError("step not implemented for corr") + self._validate_numeric_only("corr", numeric_only) + + from pandas import Series + + def corr_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(x_array)) + + with np.errstate(all="ignore"): + mean_x_y = window_aggregations.roll_mean( + x_array * y_array, start, end, min_periods + ) + mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods) + mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods) + count_x_y = window_aggregations.roll_sum( + notna(x_array + y_array).astype(np.float64), start, end, 0 + ) + x_var = window_aggregations.roll_var( + x_array, start, end, min_periods, ddof + ) + y_var = window_aggregations.roll_var( + y_array, start, end, min_periods, ddof + ) + numerator = (mean_x_y - mean_x * mean_y) * ( + count_x_y / (count_x_y - ddof) + ) + denominator = (x_var * y_var) ** 0.5 + result = numerator / denominator + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, corr_func, numeric_only + ) + + +class Rolling(RollingAndExpandingMixin): + _attributes: list[str] = [ + "window", + "min_periods", + "center", + "win_type", + "axis", + "on", + "closed", + "step", + "method", + ] + + def _validate(self): + super()._validate() + + # we allow rolling on a datetimelike index + if ( + self.obj.empty + or isinstance(self._on, (DatetimeIndex, TimedeltaIndex, PeriodIndex)) + ) and isinstance(self.window, (str, BaseOffset, timedelta)): + self._validate_datetimelike_monotonic() + + # this will raise ValueError on non-fixed freqs + try: + freq = to_offset(self.window) + except (TypeError, ValueError) as err: + raise ValueError( + f"passed window {self.window} is not " + "compatible with a datetimelike index" + ) from err + if isinstance(self._on, PeriodIndex): + # error: Incompatible types in assignment (expression has type + # "float", variable has type "Optional[int]") + self._win_freq_i8 = freq.nanos / ( # type: ignore[assignment] + self._on.freq.nanos / self._on.freq.n + ) + else: + try: + unit = dtype_to_unit(self._on.dtype) # type: ignore[arg-type] + except TypeError: + # if not a datetime dtype, eg for empty dataframes + unit = "ns" + self._win_freq_i8 = Timedelta(freq.nanos).as_unit(unit)._value + + # min_periods must be an integer + if self.min_periods is None: + self.min_periods = 1 + + if self.step is not None: + raise NotImplementedError( + "step is not supported with frequency windows" + ) + + elif isinstance(self.window, BaseIndexer): + # Passed BaseIndexer subclass should handle all other rolling kwargs + pass + elif not is_integer(self.window) or self.window < 0: + raise ValueError("window must be an integer 0 or greater") + + def _validate_datetimelike_monotonic(self) -> None: + """ + Validate self._on is monotonic (increasing or decreasing) and has + no NaT values for frequency windows. + """ + if self._on.hasnans: + self._raise_monotonic_error("values must not have NaT") + if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing): + self._raise_monotonic_error("values must be monotonic") + + def _raise_monotonic_error(self, msg: str): + on = self.on + if on is None: + if self.axis == 0: + on = "index" + else: + on = "column" + raise ValueError(f"{on} {msg}") + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.Series.rolling : Calling object with Series data. + pandas.DataFrame.rolling : Calling object with DataFrame data. + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.rolling(2).sum() + A B C + 0 NaN NaN NaN + 1 3.0 9.0 15.0 + 2 5.0 11.0 17.0 + + >>> df.rolling(2).agg({"A": "sum", "B": "min"}) + A B + 0 NaN NaN + 1 3.0 4.0 + 2 5.0 5.0 + """ + ), + klass="Series/Dataframe", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([2, 3, np.nan, 10]) + >>> s.rolling(2).count() + 0 NaN + 1 2.0 + 2 1.0 + 3 1.0 + dtype: float64 + >>> s.rolling(3).count() + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + dtype: float64 + >>> s.rolling(4).count() + 0 NaN + 1 NaN + 2 NaN + 3 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="count of non NaN observations", + agg_method="count", + ) + def count(self, numeric_only: bool = False): + return super().count(numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + window_apply_parameters, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 6, 5, 4]) + >>> ser.rolling(2).apply(lambda s: s.sum() - s.min()) + 0 NaN + 1 6.0 + 2 6.0 + 3 5.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="custom aggregation function", + agg_method="apply", + ) + def apply( + self, + func: Callable[..., Any], + raw: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, + ): + return super().apply( + func, + raw=raw, + engine=engine, + engine_kwargs=engine_kwargs, + args=args, + kwargs=kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 2, 3, 4, 5]) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + 4 5 + dtype: int64 + + >>> s.rolling(3).sum() + 0 NaN + 1 NaN + 2 6.0 + 3 9.0 + 4 12.0 + dtype: float64 + + >>> s.rolling(3, center=True).sum() + 0 NaN + 1 6.0 + 2 9.0 + 3 12.0 + 4 NaN + dtype: float64 + + For DataFrame, each sum is computed column-wise. + + >>> df = pd.DataFrame({{"A": s, "B": s ** 2}}) + >>> df + A B + 0 1 1 + 1 2 4 + 2 3 9 + 3 4 16 + 4 5 25 + + >>> df.rolling(3).sum() + A B + 0 NaN NaN + 1 NaN NaN + 2 6.0 14.0 + 3 9.0 29.0 + 4 12.0 50.0 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="sum", + agg_method="sum", + ) + def sum( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().sum( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.rolling(2).max() + 0 NaN + 1 2.0 + 2 3.0 + 3 4.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="maximum", + agg_method="max", + ) + def max( + self, + numeric_only: bool = False, + *args, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + **kwargs, + ): + return super().max( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + Performing a rolling minimum with a window size of 3. + + >>> s = pd.Series([4, 3, 5, 2, 6]) + >>> s.rolling(3).min() + 0 NaN + 1 NaN + 2 3.0 + 3 2.0 + 4 2.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="minimum", + agg_method="min", + ) + def min( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().min( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + The below examples will show rolling mean calculations with window sizes of + two and three, respectively. + + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.rolling(2).mean() + 0 NaN + 1 1.5 + 2 2.5 + 3 3.5 + dtype: float64 + + >>> s.rolling(3).mean() + 0 NaN + 1 NaN + 2 2.0 + 3 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="mean", + agg_method="mean", + ) + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().mean( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + Compute the rolling median of a series with a window size of 3. + + >>> s = pd.Series([0, 1, 2, 3, 4]) + >>> s.rolling(3).median() + 0 NaN + 1 NaN + 2 1.0 + 3 2.0 + 4 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="median", + agg_method="median", + ) + def median( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().median( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.std : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.std` is different + than the default ``ddof`` of 0 in :func:`numpy.std`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + >>> s.rolling(3).std() + 0 NaN + 1 NaN + 2 0.577350 + 3 1.000000 + 4 1.000000 + 5 1.154701 + 6 0.000000 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="standard deviation", + agg_method="std", + ) + def std( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().std( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.var : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.var` is different + than the default ``ddof`` of 0 in :func:`numpy.var`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + >>> s.rolling(3).var() + 0 NaN + 1 NaN + 2 0.333333 + 3 1.000000 + 4 1.000000 + 5 1.333333 + 6 0.000000 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="variance", + agg_method="var", + ) + def var( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().var( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.skew : Third moment of a probability density.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + A minimum of three periods is required for the rolling calculation.\n + """ + ), + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 5, 2, 7, 12, 6]) + >>> ser.rolling(3).skew().round(6) + 0 NaN + 1 NaN + 2 1.293343 + 3 -0.585583 + 4 0.000000 + 5 1.545393 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="unbiased skewness", + agg_method="skew", + ) + def skew(self, numeric_only: bool = False): + return super().skew(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + "A minimum of one period is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([0, 1, 2, 3]) + >>> s.rolling(2, min_periods=1).sem() + 0 NaN + 1 0.707107 + 2 0.707107 + 3 0.707107 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="standard error of mean", + agg_method="sem", + ) + def sem(self, ddof: int = 1, numeric_only: bool = False): + # Raise here so error message says sem instead of std + self._validate_numeric_only("sem", numeric_only) + return self.std(numeric_only=numeric_only) / ( + self.count(numeric_only) - ddof + ).pow(0.5) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.kurtosis : Reference SciPy method.\n", + template_see_also, + create_section_header("Notes"), + "A minimum of four periods is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + The example below will show a rolling calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}") + -1.200000 + >>> print(f"{{scipy.stats.kurtosis(arr[1:], bias=False):.6f}}") + 3.999946 + >>> s = pd.Series(arr) + >>> s.rolling(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 3.999946 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="Fisher's definition of kurtosis without bias", + agg_method="kurt", + ) + def kurt(self, numeric_only: bool = False): + return super().kurt(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + quantile : float + Quantile to compute. 0 <= quantile <= 1. + + .. deprecated:: 2.1.0 + This will be renamed to 'q' in a future version. + interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.rolling(2).quantile(.4, interpolation='lower') + 0 NaN + 1 1.0 + 2 2.0 + 3 3.0 + dtype: float64 + + >>> s.rolling(2).quantile(.4, interpolation='midpoint') + 0 NaN + 1 1.5 + 2 2.5 + 3 3.5 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="quantile", + agg_method="quantile", + ) + @deprecate_kwarg(old_arg_name="quantile", new_arg_name="q") + def quantile( + self, + q: float, + interpolation: QuantileInterpolation = "linear", + numeric_only: bool = False, + ): + return super().quantile( + q=q, + interpolation=interpolation, + numeric_only=numeric_only, + ) + + @doc( + template_header, + ".. versionadded:: 1.4.0 \n\n", + create_section_header("Parameters"), + dedent( + """ + method : {{'average', 'min', 'max'}}, default 'average' + How to rank the group of records that have the same value (i.e. ties): + + * average: average rank of the group + * min: lowest rank in the group + * max: highest rank in the group + + ascending : bool, default True + Whether or not the elements should be ranked in ascending order. + pct : bool, default False + Whether or not to display the returned rankings in percentile + form. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 4, 2, 3, 5, 3]) + >>> s.rolling(3).rank() + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + 4 3.0 + 5 1.5 + dtype: float64 + + >>> s.rolling(3).rank(method="max") + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + 4 3.0 + 5 2.0 + dtype: float64 + + >>> s.rolling(3).rank(method="min") + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + 4 3.0 + 5 1.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="rank", + agg_method="rank", + ) + def rank( + self, + method: WindowingRankType = "average", + ascending: bool = True, + pct: bool = False, + numeric_only: bool = False, + ): + return super().rank( + method=method, + ascending=ascending, + pct=pct, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4]) + >>> ser2 = pd.Series([1, 4, 5, 8]) + >>> ser1.rolling(2).cov(ser2) + 0 NaN + 1 1.5 + 2 0.5 + 3 1.5 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="sample covariance", + agg_method="cov", + ) + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().cov( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + dedent( + """ + cov : Similar method to calculate covariance. + numpy.corrcoef : NumPy Pearson's correlation calculation. + """ + ).replace("\n", "", 1), + template_see_also, + create_section_header("Notes"), + dedent( + """ + This function uses Pearson's definition of correlation + (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient). + + When `other` is not specified, the output will be self correlation (e.g. + all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise` + set to `True`. + + Function will return ``NaN`` for correlations of equal valued sequences; + this is the result of a 0/0 division error. + + When `pairwise` is set to `False`, only matching columns between `self` and + `other` will be used. + + When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame + with the original index on the first level, and the `other` DataFrame + columns on the second level. + + In the case of missing elements, only complete pairwise observations + will be used.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + The below example shows a rolling calculation with a window size of + four matching the equivalent function call using :meth:`numpy.corrcoef`. + + >>> v1 = [3, 3, 3, 5, 8] + >>> v2 = [3, 4, 4, 4, 8] + >>> # numpy returns a 2X2 array, the correlation coefficient + >>> # is the number at entry [0][1] + >>> print(f"{{np.corrcoef(v1[:-1], v2[:-1])[0][1]:.6f}}") + 0.333333 + >>> print(f"{{np.corrcoef(v1[1:], v2[1:])[0][1]:.6f}}") + 0.916949 + >>> s1 = pd.Series(v1) + >>> s2 = pd.Series(v2) + >>> s1.rolling(4).corr(s2) + 0 NaN + 1 NaN + 2 NaN + 3 0.333333 + 4 0.916949 + dtype: float64 + + The below example shows a similar rolling calculation on a + DataFrame using the pairwise option. + + >>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\ + [46., 31.], [50., 36.]]) + >>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7)) + [[1. 0.6263001] + [0.6263001 1. ]] + >>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7)) + [[1. 0.5553681] + [0.5553681 1. ]] + >>> df = pd.DataFrame(matrix, columns=['X','Y']) + >>> df + X Y + 0 51.0 35.0 + 1 49.0 30.0 + 2 47.0 32.0 + 3 46.0 31.0 + 4 50.0 36.0 + >>> df.rolling(4).corr(pairwise=True) + X Y + 0 X NaN NaN + Y NaN NaN + 1 X NaN NaN + Y NaN NaN + 2 X NaN NaN + Y NaN NaN + 3 X 1.000000 0.626300 + Y 0.626300 1.000000 + 4 X 1.000000 0.555368 + Y 0.555368 1.000000 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="correlation", + agg_method="corr", + ) + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().corr( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + +Rolling.__doc__ = Window.__doc__ + + +class RollingGroupby(BaseWindowGroupby, Rolling): + """ + Provide a rolling groupby implementation. + """ + + _attributes = Rolling._attributes + BaseWindowGroupby._attributes + + def _get_window_indexer(self) -> GroupbyIndexer: + """ + Return an indexer class that will compute the window start and end bounds + + Returns + ------- + GroupbyIndexer + """ + rolling_indexer: type[BaseIndexer] + indexer_kwargs: dict[str, Any] | None = None + index_array = self._index_array + if isinstance(self.window, BaseIndexer): + rolling_indexer = type(self.window) + indexer_kwargs = self.window.__dict__.copy() + assert isinstance(indexer_kwargs, dict) # for mypy + # We'll be using the index of each group later + indexer_kwargs.pop("index_array", None) + window = self.window + elif self._win_freq_i8 is not None: + rolling_indexer = VariableWindowIndexer + # error: Incompatible types in assignment (expression has type + # "int", variable has type "BaseIndexer") + window = self._win_freq_i8 # type: ignore[assignment] + else: + rolling_indexer = FixedWindowIndexer + window = self.window + window_indexer = GroupbyIndexer( + index_array=index_array, + window_size=window, + groupby_indices=self._grouper.indices, + window_indexer=rolling_indexer, + indexer_kwargs=indexer_kwargs, + ) + return window_indexer + + def _validate_datetimelike_monotonic(self): + """ + Validate that each group in self._on is monotonic + """ + # GH 46061 + if self._on.hasnans: + self._raise_monotonic_error("values must not have NaT") + for group_indices in self._grouper.indices.values(): + group_on = self._on.take(group_indices) + if not ( + group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing + ): + on = "index" if self.on is None else self.on + raise ValueError( + f"Each group within {on} must be monotonic. " + f"Sort the values in {on} first." + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/errors/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/errors/__init__.py new file mode 100644 index 00000000..09a612ec --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/errors/__init__.py @@ -0,0 +1,803 @@ +""" +Expose public exceptions & warnings +""" +from __future__ import annotations + +import ctypes + +from pandas._config.config import OptionError + +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, +) + +from pandas.util.version import InvalidVersion + + +class IntCastingNaNError(ValueError): + """ + Exception raised when converting (``astype``) an array with NaN to an integer type. + + Examples + -------- + >>> pd.DataFrame(np.array([[1, np.nan], [2, 3]]), dtype="i8") + Traceback (most recent call last): + IntCastingNaNError: Cannot convert non-finite values (NA or inf) to integer + """ + + +class NullFrequencyError(ValueError): + """ + Exception raised when a ``freq`` cannot be null. + + Particularly ``DatetimeIndex.shift``, ``TimedeltaIndex.shift``, + ``PeriodIndex.shift``. + + Examples + -------- + >>> df = pd.DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None) + >>> df.shift(2) + Traceback (most recent call last): + NullFrequencyError: Cannot shift with no freq + """ + + +class PerformanceWarning(Warning): + """ + Warning raised when there is a possible performance impact. + + Examples + -------- + >>> df = pd.DataFrame({"jim": [0, 0, 1, 1], + ... "joe": ["x", "x", "z", "y"], + ... "jolie": [1, 2, 3, 4]}) + >>> df = df.set_index(["jim", "joe"]) + >>> df + jolie + jim joe + 0 x 1 + x 2 + 1 z 3 + y 4 + >>> df.loc[(1, 'z')] # doctest: +SKIP + # PerformanceWarning: indexing past lexsort depth may impact performance. + df.loc[(1, 'z')] + jolie + jim joe + 1 z 3 + """ + + +class UnsupportedFunctionCall(ValueError): + """ + Exception raised when attempting to call a unsupported numpy function. + + For example, ``np.cumsum(groupby_object)``. + + Examples + -------- + >>> df = pd.DataFrame({"A": [0, 0, 1, 1], + ... "B": ["x", "x", "z", "y"], + ... "C": [1, 2, 3, 4]} + ... ) + >>> np.cumsum(df.groupby(["A"])) + Traceback (most recent call last): + UnsupportedFunctionCall: numpy operations are not valid with groupby. + Use .groupby(...).cumsum() instead + """ + + +class UnsortedIndexError(KeyError): + """ + Error raised when slicing a MultiIndex which has not been lexsorted. + + Subclass of `KeyError`. + + Examples + -------- + >>> df = pd.DataFrame({"cat": [0, 0, 1, 1], + ... "color": ["white", "white", "brown", "black"], + ... "lives": [4, 4, 3, 7]}, + ... ) + >>> df = df.set_index(["cat", "color"]) + >>> df + lives + cat color + 0 white 4 + white 4 + 1 brown 3 + black 7 + >>> df.loc[(0, "black"):(1, "white")] + Traceback (most recent call last): + UnsortedIndexError: 'Key length (2) was greater + than MultiIndex lexsort depth (1)' + """ + + +class ParserError(ValueError): + """ + Exception that is raised by an error encountered in parsing file contents. + + This is a generic error raised for errors encountered when functions like + `read_csv` or `read_html` are parsing contents of a file. + + See Also + -------- + read_csv : Read CSV (comma-separated) file into a DataFrame. + read_html : Read HTML table into a DataFrame. + + Examples + -------- + >>> data = '''a,b,c + ... cat,foo,bar + ... dog,foo,"baz''' + >>> from io import StringIO + >>> pd.read_csv(StringIO(data), skipfooter=1, engine='python') + Traceback (most recent call last): + ParserError: ',' expected after '"'. Error could possibly be due + to parsing errors in the skipped footer rows + """ + + +class DtypeWarning(Warning): + """ + Warning raised when reading different dtypes in a column from a file. + + Raised for a dtype incompatibility. This can happen whenever `read_csv` + or `read_table` encounter non-uniform dtypes in a column(s) of a given + CSV file. + + See Also + -------- + read_csv : Read CSV (comma-separated) file into a DataFrame. + read_table : Read general delimited file into a DataFrame. + + Notes + ----- + This warning is issued when dealing with larger files because the dtype + checking happens per chunk read. + + Despite the warning, the CSV file is read with mixed types in a single + column which will be an object type. See the examples below to better + understand this issue. + + Examples + -------- + This example creates and reads a large CSV file with a column that contains + `int` and `str`. + + >>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 + + ... ['1'] * 100000), + ... 'b': ['b'] * 300000}) # doctest: +SKIP + >>> df.to_csv('test.csv', index=False) # doctest: +SKIP + >>> df2 = pd.read_csv('test.csv') # doctest: +SKIP + ... # DtypeWarning: Columns (0) have mixed types + + Important to notice that ``df2`` will contain both `str` and `int` for the + same input, '1'. + + >>> df2.iloc[262140, 0] # doctest: +SKIP + '1' + >>> type(df2.iloc[262140, 0]) # doctest: +SKIP + + >>> df2.iloc[262150, 0] # doctest: +SKIP + 1 + >>> type(df2.iloc[262150, 0]) # doctest: +SKIP + + + One way to solve this issue is using the `dtype` parameter in the + `read_csv` and `read_table` functions to explicit the conversion: + + >>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str}) # doctest: +SKIP + + No warning was issued. + """ + + +class EmptyDataError(ValueError): + """ + Exception raised in ``pd.read_csv`` when empty data or header is encountered. + + Examples + -------- + >>> from io import StringIO + >>> empty = StringIO() + >>> pd.read_csv(empty) + Traceback (most recent call last): + EmptyDataError: No columns to parse from file + """ + + +class ParserWarning(Warning): + """ + Warning raised when reading a file that doesn't use the default 'c' parser. + + Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change + parsers, generally from the default 'c' parser to 'python'. + + It happens due to a lack of support or functionality for parsing a + particular attribute of a CSV file with the requested engine. + + Currently, 'c' unsupported options include the following parameters: + + 1. `sep` other than a single character (e.g. regex separators) + 2. `skipfooter` higher than 0 + 3. `sep=None` with `delim_whitespace=False` + + The warning can be avoided by adding `engine='python'` as a parameter in + `pd.read_csv` and `pd.read_table` methods. + + See Also + -------- + pd.read_csv : Read CSV (comma-separated) file into DataFrame. + pd.read_table : Read general delimited file into DataFrame. + + Examples + -------- + Using a `sep` in `pd.read_csv` other than a single character: + + >>> import io + >>> csv = '''a;b;c + ... 1;1,8 + ... 1;2,1''' + >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP + ... # ParserWarning: Falling back to the 'python' engine... + + Adding `engine='python'` to `pd.read_csv` removes the Warning: + + >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python') + """ + + +class MergeError(ValueError): + """ + Exception raised when merging data. + + Subclass of ``ValueError``. + + Examples + -------- + >>> left = pd.DataFrame({"a": ["a", "b", "b", "d"], + ... "b": ["cat", "dog", "weasel", "horse"]}, + ... index=range(4)) + >>> right = pd.DataFrame({"a": ["a", "b", "c", "d"], + ... "c": ["meow", "bark", "chirp", "nay"]}, + ... index=range(4)).set_index("a") + >>> left.join(right, on="a", validate="one_to_one",) + Traceback (most recent call last): + MergeError: Merge keys are not unique in left dataset; not a one-to-one merge + """ + + +class AbstractMethodError(NotImplementedError): + """ + Raise this error instead of NotImplementedError for abstract methods. + + Examples + -------- + >>> class Foo: + ... @classmethod + ... def classmethod(cls): + ... raise pd.errors.AbstractMethodError(cls, methodtype="classmethod") + ... def method(self): + ... raise pd.errors.AbstractMethodError(self) + >>> test = Foo.classmethod() + Traceback (most recent call last): + AbstractMethodError: This classmethod must be defined in the concrete class Foo + + >>> test2 = Foo().method() + Traceback (most recent call last): + AbstractMethodError: This classmethod must be defined in the concrete class Foo + """ + + def __init__(self, class_instance, methodtype: str = "method") -> None: + types = {"method", "classmethod", "staticmethod", "property"} + if methodtype not in types: + raise ValueError( + f"methodtype must be one of {methodtype}, got {types} instead." + ) + self.methodtype = methodtype + self.class_instance = class_instance + + def __str__(self) -> str: + if self.methodtype == "classmethod": + name = self.class_instance.__name__ + else: + name = type(self.class_instance).__name__ + return f"This {self.methodtype} must be defined in the concrete class {name}" + + +class NumbaUtilError(Exception): + """ + Error raised for unsupported Numba engine routines. + + Examples + -------- + >>> df = pd.DataFrame({"key": ["a", "a", "b", "b"], "data": [1, 2, 3, 4]}, + ... columns=["key", "data"]) + >>> def incorrect_function(x): + ... return sum(x) * 2.7 + >>> df.groupby("key").agg(incorrect_function, engine="numba") + Traceback (most recent call last): + NumbaUtilError: The first 2 arguments to incorrect_function + must be ['values', 'index'] + """ + + +class DuplicateLabelError(ValueError): + """ + Error raised when an operation would introduce duplicate labels. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> s = pd.Series([0, 1, 2], index=['a', 'b', 'c']).set_flags( + ... allows_duplicate_labels=False + ... ) + >>> s.reindex(['a', 'a', 'b']) + Traceback (most recent call last): + ... + DuplicateLabelError: Index has duplicates. + positions + label + a [0, 1] + """ + + +class InvalidIndexError(Exception): + """ + Exception raised when attempting to use an invalid index key. + + Examples + -------- + >>> idx = pd.MultiIndex.from_product([["x", "y"], [0, 1]]) + >>> df = pd.DataFrame([[1, 1, 2, 2], + ... [3, 3, 4, 4]], columns=idx) + >>> df + x y + 0 1 0 1 + 0 1 1 2 2 + 1 3 3 4 4 + >>> df[:, 0] + Traceback (most recent call last): + InvalidIndexError: (slice(None, None, None), 0) + """ + + +class DataError(Exception): + """ + Exceptionn raised when performing an operation on non-numerical data. + + For example, calling ``ohlc`` on a non-numerical column or a function + on a rolling window. + + Examples + -------- + >>> ser = pd.Series(['a', 'b', 'c']) + >>> ser.rolling(2).sum() + Traceback (most recent call last): + DataError: No numeric types to aggregate + """ + + +class SpecificationError(Exception): + """ + Exception raised by ``agg`` when the functions are ill-specified. + + The exception raised in two scenarios. + + The first way is calling ``agg`` on a + Dataframe or Series using a nested renamer (dict-of-dict). + + The second way is calling ``agg`` on a Dataframe with duplicated functions + names without assigning column name. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2], + ... 'B': range(5), + ... 'C': range(5)}) + >>> df.groupby('A').B.agg({'foo': 'count'}) # doctest: +SKIP + ... # SpecificationError: nested renamer is not supported + + >>> df.groupby('A').agg({'B': {'foo': ['sum', 'max']}}) # doctest: +SKIP + ... # SpecificationError: nested renamer is not supported + + >>> df.groupby('A').agg(['min', 'min']) # doctest: +SKIP + ... # SpecificationError: nested renamer is not supported + """ + + +class SettingWithCopyError(ValueError): + """ + Exception raised when trying to set on a copied slice from a ``DataFrame``. + + The ``mode.chained_assignment`` needs to be set to set to 'raise.' This can + happen unintentionally when chained indexing. + + For more information on evaluation order, + see :ref:`the user guide`. + + For more information on view vs. copy, + see :ref:`the user guide`. + + Examples + -------- + >>> pd.options.mode.chained_assignment = 'raise' + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) + >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP + ... # SettingWithCopyError: A value is trying to be set on a copy of a... + """ + + +class SettingWithCopyWarning(Warning): + """ + Warning raised when trying to set on a copied slice from a ``DataFrame``. + + The ``mode.chained_assignment`` needs to be set to set to 'warn.' + 'Warn' is the default option. This can happen unintentionally when + chained indexing. + + For more information on evaluation order, + see :ref:`the user guide`. + + For more information on view vs. copy, + see :ref:`the user guide`. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) + >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP + ... # SettingWithCopyWarning: A value is trying to be set on a copy of a... + """ + + +class ChainedAssignmentError(Warning): + """ + Warning raised when trying to set using chained assignment. + + When the ``mode.copy_on_write`` option is enabled, chained assignment can + never work. In such a situation, we are always setting into a temporary + object that is the result of an indexing operation (getitem), which under + Copy-on-Write always behaves as a copy. Thus, assigning through a chain + can never update the original Series or DataFrame. + + For more information on view vs. copy, + see :ref:`the user guide`. + + Examples + -------- + >>> pd.options.mode.copy_on_write = True + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) + >>> df["A"][0:3] = 10 # doctest: +SKIP + ... # ChainedAssignmentError: ... + >>> pd.options.mode.copy_on_write = False + """ + + +_chained_assignment_msg = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment.\n" + "When using the Copy-on-Write mode, such chained assignment never works " + "to update the original DataFrame or Series, because the intermediate " + "object on which we are setting values always behaves as a copy.\n\n" + "Try using '.loc[row_indexer, col_indexer] = value' instead, to perform " + "the assignment in a single step.\n\n" + "See the caveats in the documentation: " + "https://pandas.pydata.org/pandas-docs/stable/user_guide/" + "indexing.html#returning-a-view-versus-a-copy" +) + + +_chained_assignment_method_msg = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment using an inplace method.\n" + "When using the Copy-on-Write mode, such inplace method never works " + "to update the original DataFrame or Series, because the intermediate " + "object on which we are setting values always behaves as a copy.\n\n" + "For example, when doing 'df[col].method(value, inplace=True)', try " + "using 'df.method({col: value}, inplace=True)' instead, to perform " + "the operation inplace on the original object.\n\n" +) + + +class NumExprClobberingError(NameError): + """ + Exception raised when trying to use a built-in numexpr name as a variable name. + + ``eval`` or ``query`` will throw the error if the engine is set + to 'numexpr'. 'numexpr' is the default engine value for these methods if the + numexpr package is installed. + + Examples + -------- + >>> df = pd.DataFrame({'abs': [1, 1, 1]}) + >>> df.query("abs > 2") # doctest: +SKIP + ... # NumExprClobberingError: Variables in expression "(abs) > (2)" overlap... + >>> sin, a = 1, 2 + >>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP + ... # NumExprClobberingError: Variables in expression "(sin) + (a)" overlap... + """ + + +class UndefinedVariableError(NameError): + """ + Exception raised by ``query`` or ``eval`` when using an undefined variable name. + + It will also specify whether the undefined variable is local or not. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.query("A > x") # doctest: +SKIP + ... # UndefinedVariableError: name 'x' is not defined + >>> df.query("A > @y") # doctest: +SKIP + ... # UndefinedVariableError: local variable 'y' is not defined + >>> pd.eval('x + 1') # doctest: +SKIP + ... # UndefinedVariableError: name 'x' is not defined + """ + + def __init__(self, name: str, is_local: bool | None = None) -> None: + base_msg = f"{repr(name)} is not defined" + if is_local: + msg = f"local variable {base_msg}" + else: + msg = f"name {base_msg}" + super().__init__(msg) + + +class IndexingError(Exception): + """ + Exception is raised when trying to index and there is a mismatch in dimensions. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.loc[..., ..., 'A'] # doctest: +SKIP + ... # IndexingError: indexer may only contain one '...' entry + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.loc[1, ..., ...] # doctest: +SKIP + ... # IndexingError: Too many indexers + >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP + ... # IndexingError: Unalignable boolean Series provided as indexer... + >>> s = pd.Series(range(2), + ... index = pd.MultiIndex.from_product([["a", "b"], ["c"]])) + >>> s.loc["a", "c", "d"] # doctest: +SKIP + ... # IndexingError: Too many indexers + """ + + +class PyperclipException(RuntimeError): + """ + Exception raised when clipboard functionality is unsupported. + + Raised by ``to_clipboard()`` and ``read_clipboard()``. + """ + + +class PyperclipWindowsException(PyperclipException): + """ + Exception raised when clipboard functionality is unsupported by Windows. + + Access to the clipboard handle would be denied due to some other + window process is accessing it. + """ + + def __init__(self, message: str) -> None: + # attr only exists on Windows, so typing fails on other platforms + message += f" ({ctypes.WinError()})" # type: ignore[attr-defined] + super().__init__(message) + + +class CSSWarning(UserWarning): + """ + Warning is raised when converting css styling fails. + + This can be due to the styling not having an equivalent value or because the + styling isn't properly formatted. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.style.applymap( + ... lambda x: 'background-color: blueGreenRed;' + ... ).to_excel('styled.xlsx') # doctest: +SKIP + CSSWarning: Unhandled color format: 'blueGreenRed' + >>> df.style.applymap( + ... lambda x: 'border: 1px solid red red;' + ... ).to_excel('styled.xlsx') # doctest: +SKIP + CSSWarning: Unhandled color format: 'blueGreenRed' + """ + + +class PossibleDataLossError(Exception): + """ + Exception raised when trying to open a HDFStore file when already opened. + + Examples + -------- + >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP + >>> store.open("w") # doctest: +SKIP + ... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]... + """ + + +class ClosedFileError(Exception): + """ + Exception is raised when trying to perform an operation on a closed HDFStore file. + + Examples + -------- + >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP + >>> store.close() # doctest: +SKIP + >>> store.keys() # doctest: +SKIP + ... # ClosedFileError: my-store file is not open! + """ + + +class IncompatibilityWarning(Warning): + """ + Warning raised when trying to use where criteria on an incompatible HDF5 file. + """ + + +class AttributeConflictWarning(Warning): + """ + Warning raised when index attributes conflict when using HDFStore. + + Occurs when attempting to append an index with a different + name than the existing index on an HDFStore or attempting to append an index with a + different frequency than the existing index on an HDFStore. + + Examples + -------- + >>> idx1 = pd.Index(['a', 'b'], name='name1') + >>> df1 = pd.DataFrame([[1, 2], [3, 4]], index=idx1) + >>> df1.to_hdf('file', 'data', 'w', append=True) # doctest: +SKIP + >>> idx2 = pd.Index(['c', 'd'], name='name2') + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], index=idx2) + >>> df2.to_hdf('file', 'data', 'a', append=True) # doctest: +SKIP + AttributeConflictWarning: the [index_name] attribute of the existing index is + [name1] which conflicts with the new [name2]... + """ + + +class DatabaseError(OSError): + """ + Error is raised when executing sql with bad syntax or sql that throws an error. + + Examples + -------- + >>> from sqlite3 import connect + >>> conn = connect(':memory:') + >>> pd.read_sql('select * test', conn) # doctest: +SKIP + ... # DatabaseError: Execution failed on sql 'test': near "test": syntax error + """ + + +class PossiblePrecisionLoss(Warning): + """ + Warning raised by to_stata on a column with a value outside or equal to int64. + + When the column value is outside or equal to the int64 value the column is + converted to a float64 dtype. + + Examples + -------- + >>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)}) + >>> df.to_stata('test') # doctest: +SKIP + ... # PossiblePrecisionLoss: Column converted from int64 to float64... + """ + + +class ValueLabelTypeMismatch(Warning): + """ + Warning raised by to_stata on a category column that contains non-string values. + + Examples + -------- + >>> df = pd.DataFrame({"categories": pd.Series(["a", 2], dtype="category")}) + >>> df.to_stata('test') # doctest: +SKIP + ... # ValueLabelTypeMismatch: Stata value labels (pandas categories) must be str... + """ + + +class InvalidColumnName(Warning): + """ + Warning raised by to_stata the column contains a non-valid stata name. + + Because the column name is an invalid Stata variable, the name needs to be + converted. + + Examples + -------- + >>> df = pd.DataFrame({"0categories": pd.Series([2, 2])}) + >>> df.to_stata('test') # doctest: +SKIP + ... # InvalidColumnName: Not all pandas column names were valid Stata variable... + """ + + +class CategoricalConversionWarning(Warning): + """ + Warning is raised when reading a partial labeled Stata file using a iterator. + + Examples + -------- + >>> from pandas.io.stata import StataReader + >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP + ... for i, block in enumerate(reader): + ... print(i, block) + ... # CategoricalConversionWarning: One or more series with value labels... + """ + + +class LossySetitemError(Exception): + """ + Raised when trying to do a __setitem__ on an np.ndarray that is not lossless. + + Notes + ----- + This is an internal error. + """ + + +class NoBufferPresent(Exception): + """ + Exception is raised in _get_data_buffer to signal that there is no requested buffer. + """ + + +class InvalidComparison(Exception): + """ + Exception is raised by _validate_comparison_value to indicate an invalid comparison. + + Notes + ----- + This is an internal error. + """ + + +__all__ = [ + "AbstractMethodError", + "AttributeConflictWarning", + "CategoricalConversionWarning", + "ClosedFileError", + "CSSWarning", + "DatabaseError", + "DataError", + "DtypeWarning", + "DuplicateLabelError", + "EmptyDataError", + "IncompatibilityWarning", + "IntCastingNaNError", + "InvalidColumnName", + "InvalidComparison", + "InvalidIndexError", + "InvalidVersion", + "IndexingError", + "LossySetitemError", + "MergeError", + "NoBufferPresent", + "NullFrequencyError", + "NumbaUtilError", + "NumExprClobberingError", + "OptionError", + "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", + "ParserError", + "ParserWarning", + "PerformanceWarning", + "PossibleDataLossError", + "PossiblePrecisionLoss", + "PyperclipException", + "PyperclipWindowsException", + "SettingWithCopyError", + "SettingWithCopyWarning", + "SpecificationError", + "UndefinedVariableError", + "UnsortedIndexError", + "UnsupportedFunctionCall", + "ValueLabelTypeMismatch", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/__init__.py new file mode 100644 index 00000000..c804b81c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/__init__.py @@ -0,0 +1,13 @@ +# ruff: noqa: TCH004 +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # import modules that have public classes/functions + from pandas.io import ( + formats, + json, + stata, + ) + + # mark only those modules as public + __all__ = ["formats", "json", "stata"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/_util.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/_util.py new file mode 100644 index 00000000..3b2ae5da --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/_util.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from typing import Callable + +from pandas.compat._optional import import_optional_dependency + +import pandas as pd + + +def _arrow_dtype_mapping() -> dict: + pa = import_optional_dependency("pyarrow") + return { + pa.int8(): pd.Int8Dtype(), + pa.int16(): pd.Int16Dtype(), + pa.int32(): pd.Int32Dtype(), + pa.int64(): pd.Int64Dtype(), + pa.uint8(): pd.UInt8Dtype(), + pa.uint16(): pd.UInt16Dtype(), + pa.uint32(): pd.UInt32Dtype(), + pa.uint64(): pd.UInt64Dtype(), + pa.bool_(): pd.BooleanDtype(), + pa.string(): pd.StringDtype(), + pa.float32(): pd.Float32Dtype(), + pa.float64(): pd.Float64Dtype(), + } + + +def arrow_string_types_mapper() -> Callable: + pa = import_optional_dependency("pyarrow") + + return { + pa.string(): pd.StringDtype(storage="pyarrow_numpy"), + pa.large_string(): pd.StringDtype(storage="pyarrow_numpy"), + }.get diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/api.py new file mode 100644 index 00000000..4e8b34a6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/api.py @@ -0,0 +1,65 @@ +""" +Data IO api +""" + +from pandas.io.clipboards import read_clipboard +from pandas.io.excel import ( + ExcelFile, + ExcelWriter, + read_excel, +) +from pandas.io.feather_format import read_feather +from pandas.io.gbq import read_gbq +from pandas.io.html import read_html +from pandas.io.json import read_json +from pandas.io.orc import read_orc +from pandas.io.parquet import read_parquet +from pandas.io.parsers import ( + read_csv, + read_fwf, + read_table, +) +from pandas.io.pickle import ( + read_pickle, + to_pickle, +) +from pandas.io.pytables import ( + HDFStore, + read_hdf, +) +from pandas.io.sas import read_sas +from pandas.io.spss import read_spss +from pandas.io.sql import ( + read_sql, + read_sql_query, + read_sql_table, +) +from pandas.io.stata import read_stata +from pandas.io.xml import read_xml + +__all__ = [ + "ExcelFile", + "ExcelWriter", + "HDFStore", + "read_clipboard", + "read_csv", + "read_excel", + "read_feather", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_json", + "read_orc", + "read_parquet", + "read_pickle", + "read_sas", + "read_spss", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_xml", + "to_pickle", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboard/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboard/__init__.py new file mode 100644 index 00000000..c07f51d8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboard/__init__.py @@ -0,0 +1,678 @@ +""" +Pyperclip + +A cross-platform clipboard module for Python, +with copy & paste functions for plain text. +By Al Sweigart al@inventwithpython.com +BSD License + +Usage: + import pyperclip + pyperclip.copy('The text to be copied to the clipboard.') + spam = pyperclip.paste() + + if not pyperclip.is_available(): + print("Copy functionality unavailable!") + +On Windows, no additional modules are needed. +On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli + commands. (These commands should come with OS X.). +On Linux, install xclip or xsel via package manager. For example, in Debian: + sudo apt-get install xclip + sudo apt-get install xsel + +Otherwise on Linux, you will need the PyQt5 modules installed. + +This module does not work with PyGObject yet. + +Cygwin is currently not supported. + +Security Note: This module runs programs with these names: + - which + - where + - pbcopy + - pbpaste + - xclip + - xsel + - klipper + - qdbus +A malicious user could rename or add programs with these names, tricking +Pyperclip into running them with whatever permissions the Python process has. + +""" + +__version__ = "1.7.0" + + +import contextlib +import ctypes +from ctypes import ( + c_size_t, + c_wchar, + c_wchar_p, + get_errno, + sizeof, +) +import os +import platform +from shutil import which +import subprocess +import time +import warnings + +from pandas.errors import ( + PyperclipException, + PyperclipWindowsException, +) +from pandas.util._exceptions import find_stack_level + +# `import PyQt4` sys.exit()s if DISPLAY is not in the environment. +# Thus, we need to detect the presence of $DISPLAY manually +# and not load PyQt4 if it is absent. +HAS_DISPLAY = os.getenv("DISPLAY") + +EXCEPT_MSG = """ + Pyperclip could not find a copy/paste mechanism for your system. + For more information, please visit + https://pyperclip.readthedocs.io/en/latest/#not-implemented-error + """ + +ENCODING = "utf-8" + +# The "which" unix command finds where a command is. +if platform.system() == "Windows": + WHICH_CMD = "where" +else: + WHICH_CMD = "which" + + +def _executable_exists(name): + return ( + subprocess.call( + [WHICH_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + == 0 + ) + + +def _stringifyText(text) -> str: + acceptedTypes = (str, int, float, bool) + if not isinstance(text, acceptedTypes): + raise PyperclipException( + f"only str, int, float, and bool values " + f"can be copied to the clipboard, not {type(text).__name__}" + ) + return str(text) + + +def init_osx_pbcopy_clipboard(): + def copy_osx_pbcopy(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen( + ["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_osx_pbcopy(): + with subprocess.Popen( + ["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True + ) as p: + stdout = p.communicate()[0] + return stdout.decode(ENCODING) + + return copy_osx_pbcopy, paste_osx_pbcopy + + +def init_osx_pyobjc_clipboard(): + def copy_osx_pyobjc(text): + """Copy string argument to clipboard""" + text = _stringifyText(text) # Converts non-str values to str. + newStr = Foundation.NSString.stringWithString_(text).nsstring() + newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding) + board = AppKit.NSPasteboard.generalPasteboard() + board.declareTypes_owner_([AppKit.NSStringPboardType], None) + board.setData_forType_(newData, AppKit.NSStringPboardType) + + def paste_osx_pyobjc(): + """Returns contents of clipboard""" + board = AppKit.NSPasteboard.generalPasteboard() + content = board.stringForType_(AppKit.NSStringPboardType) + return content + + return copy_osx_pyobjc, paste_osx_pyobjc + + +def init_qt_clipboard(): + global QApplication + # $DISPLAY should exist + + # Try to import from qtpy, but if that fails try PyQt5 then PyQt4 + try: + from qtpy.QtWidgets import QApplication + except ImportError: + try: + from PyQt5.QtWidgets import QApplication + except ImportError: + from PyQt4.QtGui import QApplication + + app = QApplication.instance() + if app is None: + app = QApplication([]) + + def copy_qt(text): + text = _stringifyText(text) # Converts non-str values to str. + cb = app.clipboard() + cb.setText(text) + + def paste_qt() -> str: + cb = app.clipboard() + return str(cb.text()) + + return copy_qt, paste_qt + + +def init_xclip_clipboard(): + DEFAULT_SELECTION = "c" + PRIMARY_SELECTION = "p" + + def copy_xclip(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + selection = DEFAULT_SELECTION + if primary: + selection = PRIMARY_SELECTION + with subprocess.Popen( + ["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_xclip(primary=False): + selection = DEFAULT_SELECTION + if primary: + selection = PRIMARY_SELECTION + with subprocess.Popen( + ["xclip", "-selection", selection, "-o"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + # Intentionally ignore extraneous output on stderr when clipboard is empty + return stdout.decode(ENCODING) + + return copy_xclip, paste_xclip + + +def init_xsel_clipboard(): + DEFAULT_SELECTION = "-b" + PRIMARY_SELECTION = "-p" + + def copy_xsel(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + selection_flag = DEFAULT_SELECTION + if primary: + selection_flag = PRIMARY_SELECTION + with subprocess.Popen( + ["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_xsel(primary=False): + selection_flag = DEFAULT_SELECTION + if primary: + selection_flag = PRIMARY_SELECTION + with subprocess.Popen( + ["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True + ) as p: + stdout = p.communicate()[0] + return stdout.decode(ENCODING) + + return copy_xsel, paste_xsel + + +def init_klipper_clipboard(): + def copy_klipper(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen( + [ + "qdbus", + "org.kde.klipper", + "/klipper", + "setClipboardContents", + text.encode(ENCODING), + ], + stdin=subprocess.PIPE, + close_fds=True, + ) as p: + p.communicate(input=None) + + def paste_klipper(): + with subprocess.Popen( + ["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"], + stdout=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + + # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874 + # TODO: https://github.com/asweigart/pyperclip/issues/43 + clipboardContents = stdout.decode(ENCODING) + # even if blank, Klipper will append a newline at the end + assert len(clipboardContents) > 0 + # make sure that newline is there + assert clipboardContents.endswith("\n") + if clipboardContents.endswith("\n"): + clipboardContents = clipboardContents[:-1] + return clipboardContents + + return copy_klipper, paste_klipper + + +def init_dev_clipboard_clipboard(): + def copy_dev_clipboard(text): + text = _stringifyText(text) # Converts non-str values to str. + if text == "": + warnings.warn( + "Pyperclip cannot copy a blank string to the clipboard on Cygwin. " + "This is effectively a no-op.", + stacklevel=find_stack_level(), + ) + if "\r" in text: + warnings.warn( + "Pyperclip cannot handle \\r characters on Cygwin.", + stacklevel=find_stack_level(), + ) + + with open("/dev/clipboard", "w", encoding="utf-8") as fd: + fd.write(text) + + def paste_dev_clipboard() -> str: + with open("/dev/clipboard", encoding="utf-8") as fd: + content = fd.read() + return content + + return copy_dev_clipboard, paste_dev_clipboard + + +def init_no_clipboard(): + class ClipboardUnavailable: + def __call__(self, *args, **kwargs): + raise PyperclipException(EXCEPT_MSG) + + def __bool__(self) -> bool: + return False + + return ClipboardUnavailable(), ClipboardUnavailable() + + +# Windows-related clipboard functions: +class CheckedCall: + def __init__(self, f) -> None: + super().__setattr__("f", f) + + def __call__(self, *args): + ret = self.f(*args) + if not ret and get_errno(): + raise PyperclipWindowsException("Error calling " + self.f.__name__) + return ret + + def __setattr__(self, key, value): + setattr(self.f, key, value) + + +def init_windows_clipboard(): + global HGLOBAL, LPVOID, DWORD, LPCSTR, INT + global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE + from ctypes.wintypes import ( + BOOL, + DWORD, + HANDLE, + HGLOBAL, + HINSTANCE, + HMENU, + HWND, + INT, + LPCSTR, + LPVOID, + UINT, + ) + + windll = ctypes.windll + msvcrt = ctypes.CDLL("msvcrt") + + safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA) + safeCreateWindowExA.argtypes = [ + DWORD, + LPCSTR, + LPCSTR, + DWORD, + INT, + INT, + INT, + INT, + HWND, + HMENU, + HINSTANCE, + LPVOID, + ] + safeCreateWindowExA.restype = HWND + + safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow) + safeDestroyWindow.argtypes = [HWND] + safeDestroyWindow.restype = BOOL + + OpenClipboard = windll.user32.OpenClipboard + OpenClipboard.argtypes = [HWND] + OpenClipboard.restype = BOOL + + safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard) + safeCloseClipboard.argtypes = [] + safeCloseClipboard.restype = BOOL + + safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard) + safeEmptyClipboard.argtypes = [] + safeEmptyClipboard.restype = BOOL + + safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData) + safeGetClipboardData.argtypes = [UINT] + safeGetClipboardData.restype = HANDLE + + safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData) + safeSetClipboardData.argtypes = [UINT, HANDLE] + safeSetClipboardData.restype = HANDLE + + safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc) + safeGlobalAlloc.argtypes = [UINT, c_size_t] + safeGlobalAlloc.restype = HGLOBAL + + safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock) + safeGlobalLock.argtypes = [HGLOBAL] + safeGlobalLock.restype = LPVOID + + safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock) + safeGlobalUnlock.argtypes = [HGLOBAL] + safeGlobalUnlock.restype = BOOL + + wcslen = CheckedCall(msvcrt.wcslen) + wcslen.argtypes = [c_wchar_p] + wcslen.restype = UINT + + GMEM_MOVEABLE = 0x0002 + CF_UNICODETEXT = 13 + + @contextlib.contextmanager + def window(): + """ + Context that provides a valid Windows hwnd. + """ + # we really just need the hwnd, so setting "STATIC" + # as predefined lpClass is just fine. + hwnd = safeCreateWindowExA( + 0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None + ) + try: + yield hwnd + finally: + safeDestroyWindow(hwnd) + + @contextlib.contextmanager + def clipboard(hwnd): + """ + Context manager that opens the clipboard and prevents + other applications from modifying the clipboard content. + """ + # We may not get the clipboard handle immediately because + # some other application is accessing it (?) + # We try for at least 500ms to get the clipboard. + t = time.time() + 0.5 + success = False + while time.time() < t: + success = OpenClipboard(hwnd) + if success: + break + time.sleep(0.01) + if not success: + raise PyperclipWindowsException("Error calling OpenClipboard") + + try: + yield + finally: + safeCloseClipboard() + + def copy_windows(text): + # This function is heavily based on + # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard + + text = _stringifyText(text) # Converts non-str values to str. + + with window() as hwnd: + # http://msdn.com/ms649048 + # If an application calls OpenClipboard with hwnd set to NULL, + # EmptyClipboard sets the clipboard owner to NULL; + # this causes SetClipboardData to fail. + # => We need a valid hwnd to copy something. + with clipboard(hwnd): + safeEmptyClipboard() + + if text: + # http://msdn.com/ms649051 + # If the hMem parameter identifies a memory object, + # the object must have been allocated using the + # function with the GMEM_MOVEABLE flag. + count = wcslen(text) + 1 + handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar)) + locked_handle = safeGlobalLock(handle) + + ctypes.memmove( + c_wchar_p(locked_handle), + c_wchar_p(text), + count * sizeof(c_wchar), + ) + + safeGlobalUnlock(handle) + safeSetClipboardData(CF_UNICODETEXT, handle) + + def paste_windows(): + with clipboard(None): + handle = safeGetClipboardData(CF_UNICODETEXT) + if not handle: + # GetClipboardData may return NULL with errno == NO_ERROR + # if the clipboard is empty. + # (Also, it may return a handle to an empty buffer, + # but technically that's not empty) + return "" + return c_wchar_p(handle).value + + return copy_windows, paste_windows + + +def init_wsl_clipboard(): + def copy_wsl(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_wsl(): + with subprocess.Popen( + ["powershell.exe", "-command", "Get-Clipboard"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + # WSL appends "\r\n" to the contents. + return stdout[:-2].decode(ENCODING) + + return copy_wsl, paste_wsl + + +# Automatic detection of clipboard mechanisms +# and importing is done in determine_clipboard(): +def determine_clipboard(): + """ + Determine the OS/platform and set the copy() and paste() functions + accordingly. + """ + global Foundation, AppKit, qtpy, PyQt4, PyQt5 + + # Setup for the CYGWIN platform: + if ( + "cygwin" in platform.system().lower() + ): # Cygwin has a variety of values returned by platform.system(), + # such as 'CYGWIN_NT-6.1' + # FIXME(pyperclip#55): pyperclip currently does not support Cygwin, + # see https://github.com/asweigart/pyperclip/issues/55 + if os.path.exists("/dev/clipboard"): + warnings.warn( + "Pyperclip's support for Cygwin is not perfect, " + "see https://github.com/asweigart/pyperclip/issues/55", + stacklevel=find_stack_level(), + ) + return init_dev_clipboard_clipboard() + + # Setup for the WINDOWS platform: + elif os.name == "nt" or platform.system() == "Windows": + return init_windows_clipboard() + + if platform.system() == "Linux": + if which("wslconfig.exe"): + return init_wsl_clipboard() + + # Setup for the macOS platform: + if os.name == "mac" or platform.system() == "Darwin": + try: + import AppKit + import Foundation # check if pyobjc is installed + except ImportError: + return init_osx_pbcopy_clipboard() + else: + return init_osx_pyobjc_clipboard() + + # Setup for the LINUX platform: + if HAS_DISPLAY: + if _executable_exists("xsel"): + return init_xsel_clipboard() + if _executable_exists("xclip"): + return init_xclip_clipboard() + if _executable_exists("klipper") and _executable_exists("qdbus"): + return init_klipper_clipboard() + + try: + # qtpy is a small abstraction layer that lets you write applications + # using a single api call to either PyQt or PySide. + # https://pypi.python.org/project/QtPy + import qtpy # check if qtpy is installed + except ImportError: + # If qtpy isn't installed, fall back on importing PyQt4. + try: + import PyQt5 # check if PyQt5 is installed + except ImportError: + try: + import PyQt4 # check if PyQt4 is installed + except ImportError: + pass # We want to fail fast for all non-ImportError exceptions. + else: + return init_qt_clipboard() + else: + return init_qt_clipboard() + else: + return init_qt_clipboard() + + return init_no_clipboard() + + +def set_clipboard(clipboard): + """ + Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how + the copy() and paste() functions interact with the operating system to + implement the copy/paste feature. The clipboard parameter must be one of: + - pbcopy + - pyobjc (default on macOS) + - qt + - xclip + - xsel + - klipper + - windows (default on Windows) + - no (this is what is set when no clipboard mechanism can be found) + """ + global copy, paste + + clipboard_types = { + "pbcopy": init_osx_pbcopy_clipboard, + "pyobjc": init_osx_pyobjc_clipboard, + "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5' + "xclip": init_xclip_clipboard, + "xsel": init_xsel_clipboard, + "klipper": init_klipper_clipboard, + "windows": init_windows_clipboard, + "no": init_no_clipboard, + } + + if clipboard not in clipboard_types: + allowed_clipboard_types = [repr(_) for _ in clipboard_types] + raise ValueError( + f"Argument must be one of {', '.join(allowed_clipboard_types)}" + ) + + # Sets pyperclip's copy() and paste() functions: + copy, paste = clipboard_types[clipboard]() + + +def lazy_load_stub_copy(text): + """ + A stub function for copy(), which will load the real copy() function when + called so that the real copy() function is used for later calls. + + This allows users to import pyperclip without having determine_clipboard() + automatically run, which will automatically select a clipboard mechanism. + This could be a problem if it selects, say, the memory-heavy PyQt4 module + but the user was just going to immediately call set_clipboard() to use a + different clipboard mechanism. + + The lazy loading this stub function implements gives the user a chance to + call set_clipboard() to pick another clipboard mechanism. Or, if the user + simply calls copy() or paste() without calling set_clipboard() first, + will fall back on whatever clipboard mechanism that determine_clipboard() + automatically chooses. + """ + global copy, paste + copy, paste = determine_clipboard() + return copy(text) + + +def lazy_load_stub_paste(): + """ + A stub function for paste(), which will load the real paste() function when + called so that the real paste() function is used for later calls. + + This allows users to import pyperclip without having determine_clipboard() + automatically run, which will automatically select a clipboard mechanism. + This could be a problem if it selects, say, the memory-heavy PyQt4 module + but the user was just going to immediately call set_clipboard() to use a + different clipboard mechanism. + + The lazy loading this stub function implements gives the user a chance to + call set_clipboard() to pick another clipboard mechanism. Or, if the user + simply calls copy() or paste() without calling set_clipboard() first, + will fall back on whatever clipboard mechanism that determine_clipboard() + automatically chooses. + """ + global copy, paste + copy, paste = determine_clipboard() + return paste() + + +def is_available() -> bool: + return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste + + +# Initially, copy() and paste() are set to lazy loading wrappers which will +# set `copy` and `paste` to real functions the first time they're used, unless +# set_clipboard() or determine_clipboard() is called first. +copy, paste = lazy_load_stub_copy, lazy_load_stub_paste + + +__all__ = ["copy", "paste", "set_clipboard", "determine_clipboard"] + +# pandas aliases +clipboard_get = paste +clipboard_set = copy diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboards.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboards.py new file mode 100644 index 00000000..a15e3732 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/clipboards.py @@ -0,0 +1,197 @@ +""" io on the clipboard """ +from __future__ import annotations + +from io import StringIO +from typing import TYPE_CHECKING +import warnings + +from pandas._libs import lib +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.generic import ABCDataFrame + +from pandas import ( + get_option, + option_context, +) + +if TYPE_CHECKING: + from pandas._typing import DtypeBackend + + +def read_clipboard( + sep: str = r"\s+", + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwargs, +): # pragma: no cover + r""" + Read text from clipboard and pass to :func:`~pandas.read_csv`. + + Parses clipboard contents similar to how CSV files are parsed + using :func:`~pandas.read_csv`. + + Parameters + ---------- + sep : str, default '\\s+' + A string or regex delimiter. The default of ``'\\s+'`` denotes + one or more whitespace characters. + + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + **kwargs + See :func:`~pandas.read_csv` for the full argument list. + + Returns + ------- + DataFrame + A parsed :class:`~pandas.DataFrame` object. + + See Also + -------- + DataFrame.to_clipboard : Copy object to the system clipboard. + read_csv : Read a comma-separated values (csv) file into DataFrame. + read_fwf : Read a table of fixed-width formatted lines into DataFrame. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) + >>> df.to_clipboard() # doctest: +SKIP + >>> pd.read_clipboard() # doctest: +SKIP + A B C + 0 1 2 3 + 1 4 5 6 + """ + encoding = kwargs.pop("encoding", "utf-8") + + # only utf-8 is valid for passed value because that's what clipboard + # supports + if encoding is not None and encoding.lower().replace("-", "") != "utf8": + raise NotImplementedError("reading from clipboard only supports utf-8 encoding") + + check_dtype_backend(dtype_backend) + + from pandas.io.clipboard import clipboard_get + from pandas.io.parsers import read_csv + + text = clipboard_get() + + # Try to decode (if needed, as "text" might already be a string here). + try: + text = text.decode(kwargs.get("encoding") or get_option("display.encoding")) + except AttributeError: + pass + + # Excel copies into clipboard with \t separation + # inspect no more then the 10 first lines, if they + # all contain an equal number (>0) of tabs, infer + # that this came from excel and set 'sep' accordingly + lines = text[:10000].split("\n")[:-1][:10] + + # Need to remove leading white space, since read_csv + # accepts: + # a b + # 0 1 2 + # 1 3 4 + + counts = {x.lstrip(" ").count("\t") for x in lines} + if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: + sep = "\t" + # check the number of leading tabs in the first line + # to account for index columns + index_length = len(lines[0]) - len(lines[0].lstrip(" \t")) + if index_length != 0: + kwargs.setdefault("index_col", list(range(index_length))) + + # Edge case where sep is specified to be None, return to default + if sep is None and kwargs.get("delim_whitespace") is None: + sep = r"\s+" + + # Regex separator currently only works with python engine. + # Default to python if separator is multi-character (regex) + if len(sep) > 1 and kwargs.get("engine") is None: + kwargs["engine"] = "python" + elif len(sep) > 1 and kwargs.get("engine") == "c": + warnings.warn( + "read_clipboard with regex separator does not work properly with c engine.", + stacklevel=find_stack_level(), + ) + + return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs) + + +def to_clipboard( + obj, excel: bool | None = True, sep: str | None = None, **kwargs +) -> None: # pragma: no cover + """ + Attempt to write text representation of object to the system clipboard + The clipboard can be then pasted into Excel for example. + + Parameters + ---------- + obj : the object to write to the clipboard + excel : bool, defaults to True + if True, use the provided separator, writing in a csv + format for allowing easy pasting into excel. + if False, write a string representation of the object + to the clipboard + sep : optional, defaults to tab + other keywords are passed to to_csv + + Notes + ----- + Requirements for your platform + - Linux: xclip, or xsel (with PyQt4 modules) + - Windows: + - OS X: + """ + encoding = kwargs.pop("encoding", "utf-8") + + # testing if an invalid encoding is passed to clipboard + if encoding is not None and encoding.lower().replace("-", "") != "utf8": + raise ValueError("clipboard only supports utf-8 encoding") + + from pandas.io.clipboard import clipboard_set + + if excel is None: + excel = True + + if excel: + try: + if sep is None: + sep = "\t" + buf = StringIO() + + # clipboard_set (pyperclip) expects unicode + obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs) + text = buf.getvalue() + + clipboard_set(text) + return + except TypeError: + warnings.warn( + "to_clipboard in excel mode requires a single character separator.", + stacklevel=find_stack_level(), + ) + elif sep is not None: + warnings.warn( + "to_clipboard with excel=False ignores the sep argument.", + stacklevel=find_stack_level(), + ) + + if isinstance(obj, ABCDataFrame): + # str(df) has various unhelpful defaults, like truncation + with option_context("display.max_colwidth", None): + objstr = obj.to_string(**kwargs) + else: + objstr = str(obj) + clipboard_set(objstr) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/common.py new file mode 100644 index 00000000..6be6f3f4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/common.py @@ -0,0 +1,1257 @@ +"""Common IO api utilities""" +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +import codecs +from collections import defaultdict +from collections.abc import ( + Hashable, + Mapping, + Sequence, +) +import dataclasses +import functools +import gzip +from io import ( + BufferedIOBase, + BytesIO, + RawIOBase, + StringIO, + TextIOBase, + TextIOWrapper, +) +import mmap +import os +from pathlib import Path +import re +import tarfile +from typing import ( + IO, + Any, + AnyStr, + DefaultDict, + Generic, + Literal, + TypeVar, + cast, + overload, +) +from urllib.parse import ( + urljoin, + urlparse as parse_url, + uses_netloc, + uses_params, + uses_relative, +) +import warnings +import zipfile + +from pandas._typing import ( + BaseBuffer, + CompressionDict, + CompressionOptions, + FilePath, + ReadBuffer, + ReadCsvBuffer, + StorageOptions, + WriteBuffer, +) +from pandas.compat import ( + get_bz2_file, + get_lzma_file, +) +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool, + is_file_like, + is_integer, + is_list_like, +) + +from pandas.core.indexes.api import MultiIndex +from pandas.core.shared_docs import _shared_docs + +_VALID_URLS = set(uses_relative + uses_netloc + uses_params) +_VALID_URLS.discard("") +_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://") + +BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer) + + +@dataclasses.dataclass +class IOArgs: + """ + Return value of io/common.py:_get_filepath_or_buffer. + """ + + filepath_or_buffer: str | BaseBuffer + encoding: str + mode: str + compression: CompressionDict + should_close: bool = False + + +@dataclasses.dataclass +class IOHandles(Generic[AnyStr]): + """ + Return value of io/common.py:get_handle + + Can be used as a context manager. + + This is used to easily close created buffers and to handle corner cases when + TextIOWrapper is inserted. + + handle: The file handle to be used. + created_handles: All file handles that are created by get_handle + is_wrapped: Whether a TextIOWrapper needs to be detached. + """ + + # handle might not implement the IO-interface + handle: IO[AnyStr] + compression: CompressionDict + created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list) + is_wrapped: bool = False + + def close(self) -> None: + """ + Close all created buffers. + + Note: If a TextIOWrapper was inserted, it is flushed and detached to + avoid closing the potentially user-created buffer. + """ + if self.is_wrapped: + assert isinstance(self.handle, TextIOWrapper) + self.handle.flush() + self.handle.detach() + self.created_handles.remove(self.handle) + for handle in self.created_handles: + handle.close() + self.created_handles = [] + self.is_wrapped = False + + def __enter__(self) -> IOHandles[AnyStr]: + return self + + def __exit__(self, *args: Any) -> None: + self.close() + + +def is_url(url: object) -> bool: + """ + Check to see if a URL has a valid protocol. + + Parameters + ---------- + url : str or unicode + + Returns + ------- + isurl : bool + If `url` has a valid protocol return True otherwise False. + """ + if not isinstance(url, str): + return False + return parse_url(url).scheme in _VALID_URLS + + +@overload +def _expand_user(filepath_or_buffer: str) -> str: + ... + + +@overload +def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: + ... + + +def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT: + """ + Return the argument with an initial component of ~ or ~user + replaced by that user's home directory. + + Parameters + ---------- + filepath_or_buffer : object to be converted if possible + + Returns + ------- + expanded_filepath_or_buffer : an expanded filepath or the + input if not expandable + """ + if isinstance(filepath_or_buffer, str): + return os.path.expanduser(filepath_or_buffer) + return filepath_or_buffer + + +def validate_header_arg(header: object) -> None: + if header is None: + return + if is_integer(header): + header = cast(int, header) + if header < 0: + # GH 27779 + raise ValueError( + "Passing negative integer to header is invalid. " + "For no header, use header=None instead" + ) + return + if is_list_like(header, allow_sets=False): + header = cast(Sequence, header) + if not all(map(is_integer, header)): + raise ValueError("header must be integer or list of integers") + if any(i < 0 for i in header): + raise ValueError("cannot specify multi-index header with negative integers") + return + if is_bool(header): + raise TypeError( + "Passing a bool to header is invalid. Use header=None for no header or " + "header=int or list-like of ints to specify " + "the row(s) making up the column names" + ) + # GH 16338 + raise ValueError("header must be integer or list of integers") + + +@overload +def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str: + ... + + +@overload +def stringify_path( + filepath_or_buffer: BaseBufferT, convert_file_like: bool = ... +) -> BaseBufferT: + ... + + +def stringify_path( + filepath_or_buffer: FilePath | BaseBufferT, + convert_file_like: bool = False, +) -> str | BaseBufferT: + """ + Attempt to convert a path-like object to a string. + + Parameters + ---------- + filepath_or_buffer : object to be converted + + Returns + ------- + str_filepath_or_buffer : maybe a string version of the object + + Notes + ----- + Objects supporting the fspath protocol are coerced + according to its __fspath__ method. + + Any other object is passed through unchanged, which includes bytes, + strings, buffers, or anything else that's not even path-like. + """ + if not convert_file_like and is_file_like(filepath_or_buffer): + # GH 38125: some fsspec objects implement os.PathLike but have already opened a + # file. This prevents opening the file a second time. infer_compression calls + # this function with convert_file_like=True to infer the compression. + return cast(BaseBufferT, filepath_or_buffer) + + if isinstance(filepath_or_buffer, os.PathLike): + filepath_or_buffer = filepath_or_buffer.__fspath__() + return _expand_user(filepath_or_buffer) + + +def urlopen(*args, **kwargs): + """ + Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of + the stdlib. + """ + import urllib.request + + return urllib.request.urlopen(*args, **kwargs) + + +def is_fsspec_url(url: FilePath | BaseBuffer) -> bool: + """ + Returns true if the given URL looks like + something fsspec can handle + """ + return ( + isinstance(url, str) + and bool(_RFC_3986_PATTERN.match(url)) + and not url.startswith(("http://", "https://")) + ) + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "filepath_or_buffer", +) +def _get_filepath_or_buffer( + filepath_or_buffer: FilePath | BaseBuffer, + encoding: str = "utf-8", + compression: CompressionOptions | None = None, + mode: str = "r", + storage_options: StorageOptions | None = None, +) -> IOArgs: + """ + If the filepath_or_buffer is a url, translate and return the buffer. + Otherwise passthrough. + + Parameters + ---------- + filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), + or buffer + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + encoding : the encoding to use to decode bytes, default is 'utf-8' + mode : str, optional + + {storage_options} + + .. versionadded:: 1.2.0 + + ..versionchange:: 1.2.0 + + Returns the dataclass IOArgs. + """ + filepath_or_buffer = stringify_path(filepath_or_buffer) + + # handle compression dict + compression_method, compression = get_compression_method(compression) + compression_method = infer_compression(filepath_or_buffer, compression_method) + + # GH21227 internal compression is not used for non-binary handles. + if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode: + warnings.warn( + "compression has no effect when passing a non-binary object as input.", + RuntimeWarning, + stacklevel=find_stack_level(), + ) + compression_method = None + + compression = dict(compression, method=compression_method) + + # bz2 and xz do not write the byte order mark for utf-16 and utf-32 + # print a warning when writing such files + if ( + "w" in mode + and compression_method in ["bz2", "xz"] + and encoding in ["utf-16", "utf-32"] + ): + warnings.warn( + f"{compression} will not write the byte order mark for {encoding}", + UnicodeWarning, + stacklevel=find_stack_level(), + ) + + # Use binary mode when converting path-like objects to file-like objects (fsspec) + # except when text mode is explicitly requested. The original mode is returned if + # fsspec is not used. + fsspec_mode = mode + if "t" not in fsspec_mode and "b" not in fsspec_mode: + fsspec_mode += "b" + + if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): + # TODO: fsspec can also handle HTTP via requests, but leaving this + # unchanged. using fsspec appears to break the ability to infer if the + # server responded with gzipped data + storage_options = storage_options or {} + + # waiting until now for importing to match intended lazy logic of + # urlopen function defined elsewhere in this module + import urllib.request + + # assuming storage_options is to be interpreted as headers + req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options) + with urlopen(req_info) as req: + content_encoding = req.headers.get("Content-Encoding", None) + if content_encoding == "gzip": + # Override compression based on Content-Encoding header + compression = {"method": "gzip"} + reader = BytesIO(req.read()) + return IOArgs( + filepath_or_buffer=reader, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) + + if is_fsspec_url(filepath_or_buffer): + assert isinstance( + filepath_or_buffer, str + ) # just to appease mypy for this branch + # two special-case s3-like protocols; these have special meaning in Hadoop, + # but are equivalent to just "s3" from fsspec's point of view + # cc #11071 + if filepath_or_buffer.startswith("s3a://"): + filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://") + if filepath_or_buffer.startswith("s3n://"): + filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://") + fsspec = import_optional_dependency("fsspec") + + # If botocore is installed we fallback to reading with anon=True + # to allow reads from public buckets + err_types_to_retry_with_anon: list[Any] = [] + try: + import_optional_dependency("botocore") + from botocore.exceptions import ( + ClientError, + NoCredentialsError, + ) + + err_types_to_retry_with_anon = [ + ClientError, + NoCredentialsError, + PermissionError, + ] + except ImportError: + pass + + try: + file_obj = fsspec.open( + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) + ).open() + # GH 34626 Reads from Public Buckets without Credentials needs anon=True + except tuple(err_types_to_retry_with_anon): + if storage_options is None: + storage_options = {"anon": True} + else: + # don't mutate user input. + storage_options = dict(storage_options) + storage_options["anon"] = True + file_obj = fsspec.open( + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) + ).open() + + return IOArgs( + filepath_or_buffer=file_obj, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) + elif storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) + + if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): + return IOArgs( + filepath_or_buffer=_expand_user(filepath_or_buffer), + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) + + # is_file_like requires (read | write) & __iter__ but __iter__ is only + # needed for read_csv(engine=python) + if not ( + hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write") + ): + msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}" + raise ValueError(msg) + + return IOArgs( + filepath_or_buffer=filepath_or_buffer, + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) + + +def file_path_to_url(path: str) -> str: + """ + converts an absolute native path to a FILE URL. + + Parameters + ---------- + path : a path in native format + + Returns + ------- + a valid FILE URL + """ + # lazify expensive import (~30ms) + from urllib.request import pathname2url + + return urljoin("file:", pathname2url(path)) + + +extension_to_compression = { + ".tar": "tar", + ".tar.gz": "tar", + ".tar.bz2": "tar", + ".tar.xz": "tar", + ".gz": "gzip", + ".bz2": "bz2", + ".zip": "zip", + ".xz": "xz", + ".zst": "zstd", +} +_supported_compressions = set(extension_to_compression.values()) + + +def get_compression_method( + compression: CompressionOptions, +) -> tuple[str | None, CompressionDict]: + """ + Simplifies a compression argument to a compression method string and + a mapping containing additional arguments. + + Parameters + ---------- + compression : str or mapping + If string, specifies the compression method. If mapping, value at key + 'method' specifies compression method. + + Returns + ------- + tuple of ({compression method}, Optional[str] + {compression arguments}, Dict[str, Any]) + + Raises + ------ + ValueError on mapping missing 'method' key + """ + compression_method: str | None + if isinstance(compression, Mapping): + compression_args = dict(compression) + try: + compression_method = compression_args.pop("method") + except KeyError as err: + raise ValueError("If mapping, compression must have key 'method'") from err + else: + compression_args = {} + compression_method = compression + return compression_method, compression_args + + +@doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer") +def infer_compression( + filepath_or_buffer: FilePath | BaseBuffer, compression: str | None +) -> str | None: + """ + Get the compression method for filepath_or_buffer. If compression='infer', + the inferred compression method is returned. Otherwise, the input + compression method is returned unchanged, unless it's invalid, in which + case an error is raised. + + Parameters + ---------- + filepath_or_buffer : str or file handle + File path or object. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + Returns + ------- + string or None + + Raises + ------ + ValueError on invalid compression specified. + """ + if compression is None: + return None + + # Infer compression + if compression == "infer": + # Convert all path types (e.g. pathlib.Path) to strings + filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True) + if not isinstance(filepath_or_buffer, str): + # Cannot infer compression of a buffer, assume no compression + return None + + # Infer compression from the filename/URL extension + for extension, compression in extension_to_compression.items(): + if filepath_or_buffer.lower().endswith(extension): + return compression + return None + + # Compression has been specified. Check that it's valid + if compression in _supported_compressions: + return compression + + valid = ["infer", None] + sorted(_supported_compressions) + msg = ( + f"Unrecognized compression type: {compression}\n" + f"Valid compression types are {valid}" + ) + raise ValueError(msg) + + +def check_parent_directory(path: Path | str) -> None: + """ + Check if parent directory of a file exists, raise OSError if it does not + + Parameters + ---------- + path: Path or str + Path to check parent directory of + """ + parent = Path(path).parent + if not parent.is_dir(): + raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'") + + +@overload +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = ..., + compression: CompressionOptions = ..., + memory_map: bool = ..., + is_text: Literal[False], + errors: str | None = ..., + storage_options: StorageOptions = ..., +) -> IOHandles[bytes]: + ... + + +@overload +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = ..., + compression: CompressionOptions = ..., + memory_map: bool = ..., + is_text: Literal[True] = ..., + errors: str | None = ..., + storage_options: StorageOptions = ..., +) -> IOHandles[str]: + ... + + +@overload +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = ..., + compression: CompressionOptions = ..., + memory_map: bool = ..., + is_text: bool = ..., + errors: str | None = ..., + storage_options: StorageOptions = ..., +) -> IOHandles[str] | IOHandles[bytes]: + ... + + +@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf") +def get_handle( + path_or_buf: FilePath | BaseBuffer, + mode: str, + *, + encoding: str | None = None, + compression: CompressionOptions | None = None, + memory_map: bool = False, + is_text: bool = True, + errors: str | None = None, + storage_options: StorageOptions | None = None, +) -> IOHandles[str] | IOHandles[bytes]: + """ + Get file handle for given path/buffer and mode. + + Parameters + ---------- + path_or_buf : str or file handle + File path or object. + mode : str + Mode to open path_or_buf with. + encoding : str or None + Encoding to use. + {compression_options} + + May be a dict with key 'method' as compression mode + and other keys as compression options if compression + mode is 'zip'. + + Passing compression options as keys in dict is + supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'. + + .. versionchanged:: 1.4.0 Zstandard support. + + memory_map : bool, default False + See parsers._parser_params for more information. Only used by read_csv. + is_text : bool, default True + Whether the type of the content passed to the file/buffer is string or + bytes. This is not the same as `"b" not in mode`. If a string content is + passed to a binary file/buffer, a wrapper is inserted. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. + storage_options: StorageOptions = None + Passed to _get_filepath_or_buffer + + .. versionchanged:: 1.2.0 + + Returns the dataclass IOHandles + """ + # Windows does not default to utf-8. Set to utf-8 for a consistent behavior + encoding = encoding or "utf-8" + + errors = errors or "strict" + + # read_csv does not know whether the buffer is opened in binary/text mode + if _is_binary_mode(path_or_buf, mode) and "b" not in mode: + mode += "b" + + # validate encoding and errors + codecs.lookup(encoding) + if isinstance(errors, str): + codecs.lookup_error(errors) + + # open URLs + ioargs = _get_filepath_or_buffer( + path_or_buf, + encoding=encoding, + compression=compression, + mode=mode, + storage_options=storage_options, + ) + + handle = ioargs.filepath_or_buffer + handles: list[BaseBuffer] + + # memory mapping needs to be the first step + # only used for read_csv + handle, memory_map, handles = _maybe_memory_map(handle, memory_map) + + is_path = isinstance(handle, str) + compression_args = dict(ioargs.compression) + compression = compression_args.pop("method") + + # Only for write methods + if "r" not in mode and is_path: + check_parent_directory(str(handle)) + + if compression: + if compression != "zstd": + # compression libraries do not like an explicit text-mode + ioargs.mode = ioargs.mode.replace("t", "") + elif compression == "zstd" and "b" not in ioargs.mode: + # python-zstandard defaults to text mode, but we always expect + # compression libraries to use binary mode. + ioargs.mode += "b" + + # GZ Compression + if compression == "gzip": + if isinstance(handle, str): + # error: Incompatible types in assignment (expression has type + # "GzipFile", variable has type "Union[str, BaseBuffer]") + handle = gzip.GzipFile( # type: ignore[assignment] + filename=handle, + mode=ioargs.mode, + **compression_args, + ) + else: + handle = gzip.GzipFile( + # No overload variant of "GzipFile" matches argument types + # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" + fileobj=handle, # type: ignore[call-overload] + mode=ioargs.mode, + **compression_args, + ) + + # BZ Compression + elif compression == "bz2": + # Overload of "BZ2File" to handle pickle protocol 5 + # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" + handle = get_bz2_file()( # type: ignore[call-overload] + handle, + mode=ioargs.mode, + **compression_args, + ) + + # ZIP Compression + elif compression == "zip": + # error: Argument 1 to "_BytesZipFile" has incompatible type + # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]], + # ReadBuffer[bytes], WriteBuffer[bytes]]" + handle = _BytesZipFile( + handle, ioargs.mode, **compression_args # type: ignore[arg-type] + ) + if handle.buffer.mode == "r": + handles.append(handle) + zip_names = handle.buffer.namelist() + if len(zip_names) == 1: + handle = handle.buffer.open(zip_names.pop()) + elif not zip_names: + raise ValueError(f"Zero files found in ZIP file {path_or_buf}") + else: + raise ValueError( + "Multiple files found in ZIP file. " + f"Only one file per ZIP: {zip_names}" + ) + + # TAR Encoding + elif compression == "tar": + compression_args.setdefault("mode", ioargs.mode) + if isinstance(handle, str): + handle = _BytesTarFile(name=handle, **compression_args) + else: + # error: Argument "fileobj" to "_BytesTarFile" has incompatible + # type "BaseBuffer"; expected "Union[ReadBuffer[bytes], + # WriteBuffer[bytes], None]" + handle = _BytesTarFile( + fileobj=handle, **compression_args # type: ignore[arg-type] + ) + assert isinstance(handle, _BytesTarFile) + if "r" in handle.buffer.mode: + handles.append(handle) + files = handle.buffer.getnames() + if len(files) == 1: + file = handle.buffer.extractfile(files[0]) + assert file is not None + handle = file + elif not files: + raise ValueError(f"Zero files found in TAR archive {path_or_buf}") + else: + raise ValueError( + "Multiple files found in TAR archive. " + f"Only one file per TAR archive: {files}" + ) + + # XZ Compression + elif compression == "xz": + # error: Argument 1 to "LZMAFile" has incompatible type "Union[str, + # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str], + # PathLike[bytes]], IO[bytes]], None]" + handle = get_lzma_file()( + handle, ioargs.mode, **compression_args # type: ignore[arg-type] + ) + + # Zstd Compression + elif compression == "zstd": + zstd = import_optional_dependency("zstandard") + if "r" in ioargs.mode: + open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)} + else: + open_args = {"cctx": zstd.ZstdCompressor(**compression_args)} + handle = zstd.open( + handle, + mode=ioargs.mode, + **open_args, + ) + + # Unrecognized Compression + else: + msg = f"Unrecognized compression type: {compression}" + raise ValueError(msg) + + assert not isinstance(handle, str) + handles.append(handle) + + elif isinstance(handle, str): + # Check whether the filename is to be opened in binary mode. + # Binary mode does not support 'encoding' and 'newline'. + if ioargs.encoding and "b" not in ioargs.mode: + # Encoding + handle = open( + handle, + ioargs.mode, + encoding=ioargs.encoding, + errors=errors, + newline="", + ) + else: + # Binary mode + handle = open(handle, ioargs.mode) + handles.append(handle) + + # Convert BytesIO or file objects passed with an encoding + is_wrapped = False + if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase): + # not added to handles as it does not open/buffer resources + handle = _BytesIOWrapper( + handle, + encoding=ioargs.encoding, + ) + elif is_text and ( + compression or memory_map or _is_binary_mode(handle, ioargs.mode) + ): + if ( + not hasattr(handle, "readable") + or not hasattr(handle, "writable") + or not hasattr(handle, "seekable") + ): + handle = _IOWrapper(handle) + # error: Argument 1 to "TextIOWrapper" has incompatible type + # "_IOWrapper"; expected "IO[bytes]" + handle = TextIOWrapper( + handle, # type: ignore[arg-type] + encoding=ioargs.encoding, + errors=errors, + newline="", + ) + handles.append(handle) + # only marked as wrapped when the caller provided a handle + is_wrapped = not ( + isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close + ) + + if "r" in ioargs.mode and not hasattr(handle, "read"): + raise TypeError( + "Expected file path name or file-like object, " + f"got {type(ioargs.filepath_or_buffer)} type" + ) + + handles.reverse() # close the most recently added buffer first + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + handles.append(ioargs.filepath_or_buffer) + + return IOHandles( + # error: Argument "handle" to "IOHandles" has incompatible type + # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], + # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" + handle=handle, # type: ignore[arg-type] + # error: Argument "created_handles" to "IOHandles" has incompatible type + # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" + created_handles=handles, # type: ignore[arg-type] + is_wrapped=is_wrapped, + compression=ioargs.compression, + ) + + +# error: Definition of "__enter__" in base class "IOBase" is incompatible +# with definition in base class "BinaryIO" +class _BufferedWriter(BytesIO, ABC): # type: ignore[misc] + """ + Some objects do not support multiple .write() calls (TarFile and ZipFile). + This wrapper writes to the underlying buffer on close. + """ + + buffer = BytesIO() + + @abstractmethod + def write_to_buffer(self) -> None: + ... + + def close(self) -> None: + if self.closed: + # already closed + return + if self.getbuffer().nbytes: + # write to buffer + self.seek(0) + with self.buffer: + self.write_to_buffer() + else: + self.buffer.close() + super().close() + + +class _BytesTarFile(_BufferedWriter): + def __init__( + self, + name: str | None = None, + mode: Literal["r", "a", "w", "x"] = "r", + fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None, + archive_name: str | None = None, + **kwargs, + ) -> None: + super().__init__() + self.archive_name = archive_name + self.name = name + # error: Incompatible types in assignment (expression has type "TarFile", + # base class "_BufferedWriter" defined the type as "BytesIO") + self.buffer: tarfile.TarFile = tarfile.TarFile.open( # type: ignore[assignment] + name=name, + mode=self.extend_mode(mode), + fileobj=fileobj, + **kwargs, + ) + + def extend_mode(self, mode: str) -> str: + mode = mode.replace("b", "") + if mode != "w": + return mode + if self.name is not None: + suffix = Path(self.name).suffix + if suffix in (".gz", ".xz", ".bz2"): + mode = f"{mode}:{suffix[1:]}" + return mode + + def infer_filename(self) -> str | None: + """ + If an explicit archive_name is not given, we still want the file inside the zip + file not to be named something.tar, because that causes confusion (GH39465). + """ + if self.name is None: + return None + + filename = Path(self.name) + if filename.suffix == ".tar": + return filename.with_suffix("").name + elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"): + return filename.with_suffix("").with_suffix("").name + return filename.name + + def write_to_buffer(self) -> None: + # TarFile needs a non-empty string + archive_name = self.archive_name or self.infer_filename() or "tar" + tarinfo = tarfile.TarInfo(name=archive_name) + tarinfo.size = len(self.getvalue()) + self.buffer.addfile(tarinfo, self) + + +class _BytesZipFile(_BufferedWriter): + def __init__( + self, + file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], + mode: str, + archive_name: str | None = None, + **kwargs, + ) -> None: + super().__init__() + mode = mode.replace("b", "") + self.archive_name = archive_name + + kwargs.setdefault("compression", zipfile.ZIP_DEFLATED) + # error: Incompatible types in assignment (expression has type "ZipFile", + # base class "_BufferedWriter" defined the type as "BytesIO") + self.buffer: zipfile.ZipFile = zipfile.ZipFile( # type: ignore[assignment] + file, mode, **kwargs + ) + + def infer_filename(self) -> str | None: + """ + If an explicit archive_name is not given, we still want the file inside the zip + file not to be named something.zip, because that causes confusion (GH39465). + """ + if isinstance(self.buffer.filename, (os.PathLike, str)): + filename = Path(self.buffer.filename) + if filename.suffix == ".zip": + return filename.with_suffix("").name + return filename.name + return None + + def write_to_buffer(self) -> None: + # ZipFile needs a non-empty string + archive_name = self.archive_name or self.infer_filename() or "zip" + self.buffer.writestr(archive_name, self.getvalue()) + + +class _IOWrapper: + # TextIOWrapper is overly strict: it request that the buffer has seekable, readable, + # and writable. If we have a read-only buffer, we shouldn't need writable and vice + # versa. Some buffers, are seek/read/writ-able but they do not have the "-able" + # methods, e.g., tempfile.SpooledTemporaryFile. + # If a buffer does not have the above "-able" methods, we simple assume they are + # seek/read/writ-able. + def __init__(self, buffer: BaseBuffer) -> None: + self.buffer = buffer + + def __getattr__(self, name: str): + return getattr(self.buffer, name) + + def readable(self) -> bool: + if hasattr(self.buffer, "readable"): + return self.buffer.readable() + return True + + def seekable(self) -> bool: + if hasattr(self.buffer, "seekable"): + return self.buffer.seekable() + return True + + def writable(self) -> bool: + if hasattr(self.buffer, "writable"): + return self.buffer.writable() + return True + + +class _BytesIOWrapper: + # Wrapper that wraps a StringIO buffer and reads bytes from it + # Created for compat with pyarrow read_csv + def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None: + self.buffer = buffer + self.encoding = encoding + # Because a character can be represented by more than 1 byte, + # it is possible that reading will produce more bytes than n + # We store the extra bytes in this overflow variable, and append the + # overflow to the front of the bytestring the next time reading is performed + self.overflow = b"" + + def __getattr__(self, attr: str): + return getattr(self.buffer, attr) + + def read(self, n: int | None = -1) -> bytes: + assert self.buffer is not None + bytestring = self.buffer.read(n).encode(self.encoding) + # When n=-1/n greater than remaining bytes: Read entire file/rest of file + combined_bytestring = self.overflow + bytestring + if n is None or n < 0 or n >= len(combined_bytestring): + self.overflow = b"" + return combined_bytestring + else: + to_return = combined_bytestring[:n] + self.overflow = combined_bytestring[n:] + return to_return + + +def _maybe_memory_map( + handle: str | BaseBuffer, memory_map: bool +) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]: + """Try to memory map file/buffer.""" + handles: list[BaseBuffer] = [] + memory_map &= hasattr(handle, "fileno") or isinstance(handle, str) + if not memory_map: + return handle, memory_map, handles + + # mmap used by only read_csv + handle = cast(ReadCsvBuffer, handle) + + # need to open the file first + if isinstance(handle, str): + handle = open(handle, "rb") + handles.append(handle) + + try: + # open mmap and adds *-able + # error: Argument 1 to "_IOWrapper" has incompatible type "mmap"; + # expected "BaseBuffer" + wrapped = _IOWrapper( + mmap.mmap( + handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type] + ) + ) + finally: + for handle in reversed(handles): + # error: "BaseBuffer" has no attribute "close" + handle.close() # type: ignore[attr-defined] + + return wrapped, memory_map, [wrapped] + + +def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool: + """Test whether file exists.""" + exists = False + filepath_or_buffer = stringify_path(filepath_or_buffer) + if not isinstance(filepath_or_buffer, str): + return exists + try: + exists = os.path.exists(filepath_or_buffer) + # gh-5874: if the filepath is too long will raise here + except (TypeError, ValueError): + pass + return exists + + +def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool: + """Whether the handle is opened in binary mode""" + # specified by user + if "t" in mode or "b" in mode: + return "b" in mode + + # exceptions + text_classes = ( + # classes that expect string but have 'b' in mode + codecs.StreamWriter, + codecs.StreamReader, + codecs.StreamReaderWriter, + ) + if issubclass(type(handle), text_classes): + return False + + return isinstance(handle, _get_binary_io_classes()) or "b" in getattr( + handle, "mode", mode + ) + + +@functools.lru_cache +def _get_binary_io_classes() -> tuple[type, ...]: + """IO classes that that expect bytes""" + binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase) + + # python-zstandard doesn't use any of the builtin base classes; instead we + # have to use the `zstd.ZstdDecompressionReader` class for isinstance checks. + # Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard + # so we have to get it from a `zstd.ZstdDecompressor` instance. + # See also https://github.com/indygreg/python-zstandard/pull/165. + zstd = import_optional_dependency("zstandard", errors="ignore") + if zstd is not None: + with zstd.ZstdDecompressor().stream_reader(b"") as reader: + binary_classes += (type(reader),) + + return binary_classes + + +def is_potential_multi_index( + columns: Sequence[Hashable] | MultiIndex, + index_col: bool | Sequence[int] | None = None, +) -> bool: + """ + Check whether or not the `columns` parameter + could be converted into a MultiIndex. + + Parameters + ---------- + columns : array-like + Object which may or may not be convertible into a MultiIndex + index_col : None, bool or list, optional + Column or columns to use as the (possibly hierarchical) index + + Returns + ------- + bool : Whether or not columns could become a MultiIndex + """ + if index_col is None or isinstance(index_col, bool): + index_col = [] + + return bool( + len(columns) + and not isinstance(columns, MultiIndex) + and all(isinstance(c, tuple) for c in columns if c not in list(index_col)) + ) + + +def dedup_names( + names: Sequence[Hashable], is_potential_multiindex: bool +) -> Sequence[Hashable]: + """ + Rename column names if duplicates exist. + + Currently the renaming is done by appending a period and an autonumeric, + but a custom pattern may be supported in the future. + + Examples + -------- + >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False) + ['x', 'y', 'x.1', 'x.2'] + """ + names = list(names) # so we can index + counts: DefaultDict[Hashable, int] = defaultdict(int) + + for i, col in enumerate(names): + cur_count = counts[col] + + while cur_count > 0: + counts[col] = cur_count + 1 + + if is_potential_multiindex: + # for mypy + assert isinstance(col, tuple) + col = col[:-1] + (f"{col[-1]}.{cur_count}",) + else: + col = f"{col}.{cur_count}" + cur_count = counts[col] + + names[i] = col + counts[col] = cur_count + 1 + + return names diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/__init__.py new file mode 100644 index 00000000..275cbf01 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/__init__.py @@ -0,0 +1,19 @@ +from pandas.io.excel._base import ( + ExcelFile, + ExcelWriter, + read_excel, +) +from pandas.io.excel._odswriter import ODSWriter as _ODSWriter +from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter +from pandas.io.excel._util import register_writer +from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter + +__all__ = ["read_excel", "ExcelWriter", "ExcelFile"] + + +register_writer(_OpenpyxlWriter) + +register_writer(_XlsxWriter) + + +register_writer(_ODSWriter) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_base.py new file mode 100644 index 00000000..9ffbfb9f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_base.py @@ -0,0 +1,1672 @@ +from __future__ import annotations + +import abc +from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, +) +import datetime +from functools import partial +from io import BytesIO +import os +from textwrap import fill +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Generic, + Literal, + TypeVar, + Union, + cast, + overload, +) +import warnings +import zipfile + +from pandas._config import config + +from pandas._libs import lib +from pandas._libs.parsers import STR_NA_VALUES +from pandas.compat._optional import ( + get_version, + import_optional_dependency, +) +from pandas.errors import EmptyDataError +from pandas.util._decorators import ( + Appender, + doc, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + is_bool, + is_float, + is_integer, + is_list_like, +) + +from pandas.core.frame import DataFrame +from pandas.core.shared_docs import _shared_docs +from pandas.util.version import Version + +from pandas.io.common import ( + IOHandles, + get_handle, + stringify_path, + validate_header_arg, +) +from pandas.io.excel._util import ( + fill_mi_header, + get_default_engine, + get_writer, + maybe_convert_usecols, + pop_header_name, +) +from pandas.io.parsers import TextParser +from pandas.io.parsers.readers import validate_integer + +if TYPE_CHECKING: + from types import TracebackType + + from pandas._typing import ( + DtypeArg, + DtypeBackend, + ExcelWriterIfSheetExists, + FilePath, + IntStrT, + ReadBuffer, + Self, + StorageOptions, + WriteExcelBuffer, + ) +_read_excel_doc = ( + """ +Read an Excel file into a pandas DataFrame. + +Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions +read from a local filesystem or URL. Supports an option to read +a single sheet or a list of sheets. + +Parameters +---------- +io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: ``file://localhost/path/to/table.xlsx``. + + If you want to pass in a path object, pandas accepts any ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, + such as a file handle (e.g. via builtin ``open`` function) + or ``StringIO``. + + .. deprecated:: 2.1.0 + Passing byte strings is deprecated. To read from a + byte string, wrap it in a ``BytesIO`` object. +sheet_name : str, int, list, or None, default 0 + Strings are used for sheet names. Integers are used in zero-indexed + sheet positions (chart sheets do not count as a sheet position). + Lists of strings/integers are used to request multiple sheets. + Specify None to get all worksheets. + + Available cases: + + * Defaults to ``0``: 1st sheet as a `DataFrame` + * ``1``: 2nd sheet as a `DataFrame` + * ``"Sheet1"``: Load sheet with name "Sheet1" + * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5" + as a dict of `DataFrame` + * None: All worksheets. + +header : int, list of int, default 0 + Row (0-indexed) to use for the column labels of the parsed + DataFrame. If a list of integers is passed those row positions will + be combined into a ``MultiIndex``. Use None if there is no header. +names : array-like, default None + List of column names to use. If file contains no header row, + then you should explicitly pass header=None. +index_col : int, str, list of int, default None + Column (0-indexed) to use as the row labels of the DataFrame. + Pass None if there is no such column. If a list is passed, + those columns will be combined into a ``MultiIndex``. If a + subset of data is selected with ``usecols``, index_col + is based on the subset. + + Missing values will be forward filled to allow roundtripping with + ``to_excel`` for ``merged_cells=True``. To avoid forward filling the + missing values use ``set_index`` after reading the data instead of + ``index_col``. +usecols : str, list-like, or callable, default None + * If None, then parse all columns. + * If str, then indicates comma separated list of Excel column letters + and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of + both sides. + * If list of int, then indicates list of column numbers to be parsed + (0-indexed). + * If list of string, then indicates list of column names to be parsed. + * If callable, then evaluate each column name against it and parse the + column if the callable returns ``True``. + + Returns a subset of the columns according to behavior above. +dtype : Type name or dict of column -> type, default None + Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}} + Use `object` to preserve data as stored in Excel and not interpret dtype. + If converters are specified, they will be applied INSTEAD + of dtype conversion. +engine : str, default None + If io is not a buffer or path, this must be set to identify io. + Supported engines: "xlrd", "openpyxl", "odf", "pyxlsb". + Engine compatibility : + + - "xlrd" supports old-style Excel files (.xls). + - "openpyxl" supports newer Excel file formats. + - "odf" supports OpenDocument file formats (.odf, .ods, .odt). + - "pyxlsb" supports Binary Excel files. + + .. versionchanged:: 1.2.0 + The engine `xlrd `_ + now only supports old-style ``.xls`` files. + When ``engine=None``, the following logic will be + used to determine the engine: + + - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt), + then `odf `_ will be used. + - Otherwise if ``path_or_buffer`` is an xls format, + ``xlrd`` will be used. + - Otherwise if ``path_or_buffer`` is in xlsb format, + ``pyxlsb`` will be used. + + .. versionadded:: 1.3.0 + - Otherwise ``openpyxl`` will be used. + + .. versionchanged:: 1.3.0 + +converters : dict, default None + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the Excel cell content, and return the transformed + content. +true_values : list, default None + Values to consider as True. +false_values : list, default None + Values to consider as False. +skiprows : list-like, int, or callable, optional + Line numbers to skip (0-indexed) or number of lines to skip (int) at the + start of the file. If callable, the callable function will be evaluated + against the row indices, returning True if the row should be skipped and + False otherwise. An example of a valid callable argument would be ``lambda + x: x in [0, 2]``. +nrows : int, default None + Number of rows to parse. +na_values : scalar, str, list-like, or dict, default None + Additional strings to recognize as NA/NaN. If dict passed, specific + per-column NA values. By default the following values are interpreted + as NaN: '""" + + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ") + + """'. +keep_default_na : bool, default True + Whether or not to include the default NaN values when parsing the data. + Depending on whether `na_values` is passed in, the behavior is as follows: + + * If `keep_default_na` is True, and `na_values` are specified, `na_values` + is appended to the default NaN values used for parsing. + * If `keep_default_na` is True, and `na_values` are not specified, only + the default NaN values are used for parsing. + * If `keep_default_na` is False, and `na_values` are specified, only + the NaN values specified `na_values` are used for parsing. + * If `keep_default_na` is False, and `na_values` are not specified, no + strings will be parsed as NaN. + + Note that if `na_filter` is passed in as False, the `keep_default_na` and + `na_values` parameters will be ignored. +na_filter : bool, default True + Detect missing value markers (empty strings and the value of na_values). In + data without any NAs, passing na_filter=False can improve the performance + of reading a large file. +verbose : bool, default False + Indicate number of NA values placed in non-numeric columns. +parse_dates : bool, list-like, or dict, default False + The behavior is as follows: + + * bool. If True -> try parsing the index. + * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 + each as a separate date column. + * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as + a single date column. + * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call + result 'foo' + + If a column or index contains an unparsable date, the entire column or + index will be returned unaltered as an object data type. If you don`t want to + parse some cells as date just change their type in Excel to "Text". + For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``. + + Note: A fast-path exists for iso8601-formatted dates. +date_parser : function, optional + Function to use for converting a sequence of string columns to an array of + datetime instances. The default uses ``dateutil.parser.parser`` to do the + conversion. Pandas will try to call `date_parser` in three different ways, + advancing to the next if an exception occurs: 1) Pass one or more arrays + (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the + string values from the columns defined by `parse_dates` into a single array + and pass that; and 3) call `date_parser` once for each row using one or + more strings (corresponding to the columns defined by `parse_dates`) as + arguments. + + .. deprecated:: 2.0.0 + Use ``date_format`` instead, or read in as ``object`` and then apply + :func:`to_datetime` as-needed. +date_format : str or dict of column -> format, default ``None`` + If used in conjunction with ``parse_dates``, will parse dates according to this + format. For anything more complex, + please read in as ``object`` and then apply :func:`to_datetime` as-needed. + + .. versionadded:: 2.0.0 +thousands : str, default None + Thousands separator for parsing string columns to numeric. Note that + this parameter is only necessary for columns stored as TEXT in Excel, + any numeric columns will automatically be parsed, regardless of display + format. +decimal : str, default '.' + Character to recognize as decimal point for parsing string columns to numeric. + Note that this parameter is only necessary for columns stored as TEXT in Excel, + any numeric columns will automatically be parsed, regardless of display + format.(e.g. use ',' for European data). + + .. versionadded:: 1.4.0 + +comment : str, default None + Comments out remainder of line. Pass a character or characters to this + argument to indicate comments in the input file. Any data between the + comment string and the end of the current line is ignored. +skipfooter : int, default 0 + Rows at the end to skip (0-indexed). +{storage_options} + + .. versionadded:: 1.2.0 + +dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + +engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + +Returns +------- +DataFrame or dict of DataFrames + DataFrame from the passed in Excel file. See notes in sheet_name + argument for more information on when a dict of DataFrames is returned. + +See Also +-------- +DataFrame.to_excel : Write DataFrame to an Excel file. +DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. +read_csv : Read a comma-separated values (csv) file into DataFrame. +read_fwf : Read a table of fixed-width formatted lines into DataFrame. + +Notes +----- +For specific information on the methods used for each Excel engine, refer to the pandas +:ref:`user guide ` + +Examples +-------- +The file can be read using the file name as string or an open file object: + +>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP + Name Value +0 string1 1 +1 string2 2 +2 #Comment 3 + +>>> pd.read_excel(open('tmp.xlsx', 'rb'), +... sheet_name='Sheet3') # doctest: +SKIP + Unnamed: 0 Name Value +0 0 string1 1 +1 1 string2 2 +2 2 #Comment 3 + +Index and header can be specified via the `index_col` and `header` arguments + +>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP + 0 1 2 +0 NaN Name Value +1 0.0 string1 1 +2 1.0 string2 2 +3 2.0 #Comment 3 + +Column types are inferred but can be explicitly specified + +>>> pd.read_excel('tmp.xlsx', index_col=0, +... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP + Name Value +0 string1 1.0 +1 string2 2.0 +2 #Comment 3.0 + +True, False, and NA values, and thousands separators have defaults, +but can be explicitly specified, too. Supply the values you would like +as strings or lists of strings! + +>>> pd.read_excel('tmp.xlsx', index_col=0, +... na_values=['string1', 'string2']) # doctest: +SKIP + Name Value +0 NaN 1 +1 NaN 2 +2 #Comment 3 + +Comment lines in the excel input file can be skipped using the `comment` kwarg + +>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP + Name Value +0 string1 1.0 +1 string2 2.0 +2 None NaN +""" +) + + +@overload +def read_excel( + io, + # sheet name is str or int -> DataFrame + sheet_name: str | int = ..., + *, + header: int | Sequence[int] | None = ..., + names: list[str] | None = ..., + index_col: int | Sequence[int] | None = ..., + usecols: int + | str + | Sequence[int] + | Sequence[str] + | Callable[[str], bool] + | None = ..., + dtype: DtypeArg | None = ..., + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ..., + converters: dict[str, Callable] | dict[int, Callable] | None = ..., + true_values: Iterable[Hashable] | None = ..., + false_values: Iterable[Hashable] | None = ..., + skiprows: Sequence[int] | int | Callable[[int], object] | None = ..., + nrows: int | None = ..., + na_values=..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + parse_dates: list | dict | bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: dict[Hashable, str] | str | None = ..., + thousands: str | None = ..., + decimal: str = ..., + comment: str | None = ..., + skipfooter: int = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +@overload +def read_excel( + io, + # sheet name is list or None -> dict[IntStrT, DataFrame] + sheet_name: list[IntStrT] | None, + *, + header: int | Sequence[int] | None = ..., + names: list[str] | None = ..., + index_col: int | Sequence[int] | None = ..., + usecols: int + | str + | Sequence[int] + | Sequence[str] + | Callable[[str], bool] + | None = ..., + dtype: DtypeArg | None = ..., + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ..., + converters: dict[str, Callable] | dict[int, Callable] | None = ..., + true_values: Iterable[Hashable] | None = ..., + false_values: Iterable[Hashable] | None = ..., + skiprows: Sequence[int] | int | Callable[[int], object] | None = ..., + nrows: int | None = ..., + na_values=..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + parse_dates: list | dict | bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: dict[Hashable, str] | str | None = ..., + thousands: str | None = ..., + decimal: str = ..., + comment: str | None = ..., + skipfooter: int = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> dict[IntStrT, DataFrame]: + ... + + +@doc(storage_options=_shared_docs["storage_options"]) +@Appender(_read_excel_doc) +def read_excel( + io, + sheet_name: str | int | list[IntStrT] | None = 0, + *, + header: int | Sequence[int] | None = 0, + names: list[str] | None = None, + index_col: int | Sequence[int] | None = None, + usecols: int + | str + | Sequence[int] + | Sequence[str] + | Callable[[str], bool] + | None = None, + dtype: DtypeArg | None = None, + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = None, + converters: dict[str, Callable] | dict[int, Callable] | None = None, + true_values: Iterable[Hashable] | None = None, + false_values: Iterable[Hashable] | None = None, + skiprows: Sequence[int] | int | Callable[[int], object] | None = None, + nrows: int | None = None, + na_values=None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool = False, + parse_dates: list | dict | bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: dict[Hashable, str] | str | None = None, + thousands: str | None = None, + decimal: str = ".", + comment: str | None = None, + skipfooter: int = 0, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + engine_kwargs: dict | None = None, +) -> DataFrame | dict[IntStrT, DataFrame]: + check_dtype_backend(dtype_backend) + should_close = False + if engine_kwargs is None: + engine_kwargs = {} + + if not isinstance(io, ExcelFile): + should_close = True + io = ExcelFile( + io, + storage_options=storage_options, + engine=engine, + engine_kwargs=engine_kwargs, + ) + elif engine and engine != io.engine: + raise ValueError( + "Engine should not be specified when passing " + "an ExcelFile - ExcelFile already has the engine set" + ) + + try: + data = io.parse( + sheet_name=sheet_name, + header=header, + names=names, + index_col=index_col, + usecols=usecols, + dtype=dtype, + converters=converters, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + keep_default_na=keep_default_na, + na_filter=na_filter, + verbose=verbose, + parse_dates=parse_dates, + date_parser=date_parser, + date_format=date_format, + thousands=thousands, + decimal=decimal, + comment=comment, + skipfooter=skipfooter, + dtype_backend=dtype_backend, + ) + finally: + # make sure to close opened file handles + if should_close: + io.close() + return data + + +_WorkbookT = TypeVar("_WorkbookT") + + +class BaseExcelReader(Generic[_WorkbookT], metaclass=abc.ABCMeta): + book: _WorkbookT + + def __init__( + self, + filepath_or_buffer, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + if engine_kwargs is None: + engine_kwargs = {} + + # First argument can also be bytes, so create a buffer + if isinstance(filepath_or_buffer, bytes): + filepath_or_buffer = BytesIO(filepath_or_buffer) + + self.handles = IOHandles( + handle=filepath_or_buffer, compression={"method": None} + ) + if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): + self.handles = get_handle( + filepath_or_buffer, "rb", storage_options=storage_options, is_text=False + ) + + if isinstance(self.handles.handle, self._workbook_class): + self.book = self.handles.handle + elif hasattr(self.handles.handle, "read"): + # N.B. xlrd.Book has a read attribute too + self.handles.handle.seek(0) + try: + self.book = self.load_workbook(self.handles.handle, engine_kwargs) + except Exception: + self.close() + raise + else: + raise ValueError( + "Must explicitly set engine if not passing in buffer or path for io." + ) + + @property + @abc.abstractmethod + def _workbook_class(self) -> type[_WorkbookT]: + pass + + @abc.abstractmethod + def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT: + pass + + def close(self) -> None: + if hasattr(self, "book"): + if hasattr(self.book, "close"): + # pyxlsb: opens a TemporaryFile + # openpyxl: https://stackoverflow.com/questions/31416842/ + # openpyxl-does-not-close-excel-workbook-in-read-only-mode + self.book.close() + elif hasattr(self.book, "release_resources"): + # xlrd + # https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548 + self.book.release_resources() + self.handles.close() + + @property + @abc.abstractmethod + def sheet_names(self) -> list[str]: + pass + + @abc.abstractmethod + def get_sheet_by_name(self, name: str): + pass + + @abc.abstractmethod + def get_sheet_by_index(self, index: int): + pass + + @abc.abstractmethod + def get_sheet_data(self, sheet, rows: int | None = None): + pass + + def raise_if_bad_sheet_by_index(self, index: int) -> None: + n_sheets = len(self.sheet_names) + if index >= n_sheets: + raise ValueError( + f"Worksheet index {index} is invalid, {n_sheets} worksheets found" + ) + + def raise_if_bad_sheet_by_name(self, name: str) -> None: + if name not in self.sheet_names: + raise ValueError(f"Worksheet named '{name}' not found") + + def _check_skiprows_func( + self, + skiprows: Callable, + rows_to_use: int, + ) -> int: + """ + Determine how many file rows are required to obtain `nrows` data + rows when `skiprows` is a function. + + Parameters + ---------- + skiprows : function + The function passed to read_excel by the user. + rows_to_use : int + The number of rows that will be needed for the header and + the data. + + Returns + ------- + int + """ + i = 0 + rows_used_so_far = 0 + while rows_used_so_far < rows_to_use: + if not skiprows(i): + rows_used_so_far += 1 + i += 1 + return i + + def _calc_rows( + self, + header: int | Sequence[int] | None, + index_col: int | Sequence[int] | None, + skiprows: Sequence[int] | int | Callable[[int], object] | None, + nrows: int | None, + ) -> int | None: + """ + If nrows specified, find the number of rows needed from the + file, otherwise return None. + + + Parameters + ---------- + header : int, list of int, or None + See read_excel docstring. + index_col : int, list of int, or None + See read_excel docstring. + skiprows : list-like, int, callable, or None + See read_excel docstring. + nrows : int or None + See read_excel docstring. + + Returns + ------- + int or None + """ + if nrows is None: + return None + if header is None: + header_rows = 1 + elif is_integer(header): + header = cast(int, header) + header_rows = 1 + header + else: + header = cast(Sequence, header) + header_rows = 1 + header[-1] + # If there is a MultiIndex header and an index then there is also + # a row containing just the index name(s) + if is_list_like(header) and index_col is not None: + header = cast(Sequence, header) + if len(header) > 1: + header_rows += 1 + if skiprows is None: + return header_rows + nrows + if is_integer(skiprows): + skiprows = cast(int, skiprows) + return header_rows + nrows + skiprows + if is_list_like(skiprows): + + def f(skiprows: Sequence, x: int) -> bool: + return x in skiprows + + skiprows = cast(Sequence, skiprows) + return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows) + if callable(skiprows): + return self._check_skiprows_func( + skiprows, + header_rows + nrows, + ) + # else unexpected skiprows type: read_excel will not optimize + # the number of rows read from file + return None + + def parse( + self, + sheet_name: str | int | list[int] | list[str] | None = 0, + header: int | Sequence[int] | None = 0, + names=None, + index_col: int | Sequence[int] | None = None, + usecols=None, + dtype: DtypeArg | None = None, + true_values: Iterable[Hashable] | None = None, + false_values: Iterable[Hashable] | None = None, + skiprows: Sequence[int] | int | Callable[[int], object] | None = None, + nrows: int | None = None, + na_values=None, + verbose: bool = False, + parse_dates: list | dict | bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: dict[Hashable, str] | str | None = None, + thousands: str | None = None, + decimal: str = ".", + comment: str | None = None, + skipfooter: int = 0, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwds, + ): + validate_header_arg(header) + validate_integer("nrows", nrows) + + ret_dict = False + + # Keep sheetname to maintain backwards compatibility. + sheets: list[int] | list[str] + if isinstance(sheet_name, list): + sheets = sheet_name + ret_dict = True + elif sheet_name is None: + sheets = self.sheet_names + ret_dict = True + elif isinstance(sheet_name, str): + sheets = [sheet_name] + else: + sheets = [sheet_name] + + # handle same-type duplicates. + sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys())) + + output = {} + + last_sheetname = None + for asheetname in sheets: + last_sheetname = asheetname + if verbose: + print(f"Reading sheet {asheetname}") + + if isinstance(asheetname, str): + sheet = self.get_sheet_by_name(asheetname) + else: # assume an integer if not a string + sheet = self.get_sheet_by_index(asheetname) + + file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows) + data = self.get_sheet_data(sheet, file_rows_needed) + if hasattr(sheet, "close"): + # pyxlsb opens two TemporaryFiles + sheet.close() + usecols = maybe_convert_usecols(usecols) + + if not data: + output[asheetname] = DataFrame() + continue + + is_list_header = False + is_len_one_list_header = False + if is_list_like(header): + assert isinstance(header, Sequence) + is_list_header = True + if len(header) == 1: + is_len_one_list_header = True + + if is_len_one_list_header: + header = cast(Sequence[int], header)[0] + + # forward fill and pull out names for MultiIndex column + header_names = None + if header is not None and is_list_like(header): + assert isinstance(header, Sequence) + + header_names = [] + control_row = [True] * len(data[0]) + + for row in header: + if is_integer(skiprows): + assert isinstance(skiprows, int) + row += skiprows + + if row > len(data) - 1: + raise ValueError( + f"header index {row} exceeds maximum index " + f"{len(data) - 1} of data.", + ) + + data[row], control_row = fill_mi_header(data[row], control_row) + + if index_col is not None: + header_name, _ = pop_header_name(data[row], index_col) + header_names.append(header_name) + + # If there is a MultiIndex header and an index then there is also + # a row containing just the index name(s) + has_index_names = False + if is_list_header and not is_len_one_list_header and index_col is not None: + index_col_list: Sequence[int] + if isinstance(index_col, int): + index_col_list = [index_col] + else: + assert isinstance(index_col, Sequence) + index_col_list = index_col + + # We have to handle mi without names. If any of the entries in the data + # columns are not empty, this is a regular row + assert isinstance(header, Sequence) + if len(header) < len(data): + potential_index_names = data[len(header)] + potential_data = [ + x + for i, x in enumerate(potential_index_names) + if not control_row[i] and i not in index_col_list + ] + has_index_names = all(x == "" or x is None for x in potential_data) + + if is_list_like(index_col): + # Forward fill values for MultiIndex index. + if header is None: + offset = 0 + elif isinstance(header, int): + offset = 1 + header + else: + offset = 1 + max(header) + + # GH34673: if MultiIndex names present and not defined in the header, + # offset needs to be incremented so that forward filling starts + # from the first MI value instead of the name + if has_index_names: + offset += 1 + + # Check if we have an empty dataset + # before trying to collect data. + if offset < len(data): + assert isinstance(index_col, Sequence) + + for col in index_col: + last = data[offset][col] + + for row in range(offset + 1, len(data)): + if data[row][col] == "" or data[row][col] is None: + data[row][col] = last + else: + last = data[row][col] + + # GH 12292 : error when read one empty column from excel file + try: + parser = TextParser( + data, + names=names, + header=header, + index_col=index_col, + has_index_names=has_index_names, + dtype=dtype, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + skip_blank_lines=False, # GH 39808 + parse_dates=parse_dates, + date_parser=date_parser, + date_format=date_format, + thousands=thousands, + decimal=decimal, + comment=comment, + skipfooter=skipfooter, + usecols=usecols, + dtype_backend=dtype_backend, + **kwds, + ) + + output[asheetname] = parser.read(nrows=nrows) + + if header_names: + output[asheetname].columns = output[asheetname].columns.set_names( + header_names + ) + + except EmptyDataError: + # No Data, return an empty DataFrame + output[asheetname] = DataFrame() + + except Exception as err: + err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:]) + raise err + + if last_sheetname is None: + raise ValueError("Sheet name is an empty list") + + if ret_dict: + return output + else: + return output[last_sheetname] + + +@doc(storage_options=_shared_docs["storage_options"]) +class ExcelWriter(Generic[_WorkbookT], metaclass=abc.ABCMeta): + """ + Class for writing DataFrame objects into excel sheets. + + Default is to use: + + * `xlsxwriter `__ for xlsx files if xlsxwriter + is installed otherwise `openpyxl `__ + * `odswriter `__ for ods files + + See ``DataFrame.to_excel`` for typical usage. + + The writer should be used as a context manager. Otherwise, call `close()` to save + and close any opened file handles. + + Parameters + ---------- + path : str or typing.BinaryIO + Path to xls or xlsx or ods file. + engine : str (optional) + Engine to use for writing. If None, defaults to + ``io.excel..writer``. NOTE: can only be passed as a keyword + argument. + date_format : str, default None + Format string for dates written into Excel files (e.g. 'YYYY-MM-DD'). + datetime_format : str, default None + Format string for datetime objects written into Excel files. + (e.g. 'YYYY-MM-DD HH:MM:SS'). + mode : {{'w', 'a'}}, default 'w' + File mode to use (write or append). Append does not work with fsspec URLs. + {storage_options} + + .. versionadded:: 1.2.0 + + if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error' + How to behave when trying to write to a sheet that already + exists (append mode only). + + * error: raise a ValueError. + * new: Create a new sheet, with a name determined by the engine. + * replace: Delete the contents of the sheet before writing to it. + * overlay: Write contents to the existing sheet without first removing, + but possibly over top of, the existing contents. + + .. versionadded:: 1.3.0 + + .. versionchanged:: 1.4.0 + + Added ``overlay`` option + + engine_kwargs : dict, optional + Keyword arguments to be passed into the engine. These will be passed to + the following functions of the respective engines: + + * xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)`` + * openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)`` + * openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)`` + * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)`` + + .. versionadded:: 1.3.0 + + Notes + ----- + For compatibility with CSV writers, ExcelWriter serializes lists + and dicts to strings before writing. + + Examples + -------- + Default usage: + + >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP + >>> with pd.ExcelWriter("path_to_file.xlsx") as writer: + ... df.to_excel(writer) # doctest: +SKIP + + To write to separate sheets in a single file: + + >>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP + >>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP + >>> with pd.ExcelWriter("path_to_file.xlsx") as writer: + ... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP + ... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP + + You can set the date format or datetime format: + + >>> from datetime import date, datetime # doctest: +SKIP + >>> df = pd.DataFrame( + ... [ + ... [date(2014, 1, 31), date(1999, 9, 24)], + ... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ... ], + ... index=["Date", "Datetime"], + ... columns=["X", "Y"], + ... ) # doctest: +SKIP + >>> with pd.ExcelWriter( + ... "path_to_file.xlsx", + ... date_format="YYYY-MM-DD", + ... datetime_format="YYYY-MM-DD HH:MM:SS" + ... ) as writer: + ... df.to_excel(writer) # doctest: +SKIP + + You can also append to an existing Excel file: + + >>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer: + ... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP + + Here, the `if_sheet_exists` parameter can be set to replace a sheet if it + already exists: + + >>> with ExcelWriter( + ... "path_to_file.xlsx", + ... mode="a", + ... engine="openpyxl", + ... if_sheet_exists="replace", + ... ) as writer: + ... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP + + You can also write multiple DataFrames to a single sheet. Note that the + ``if_sheet_exists`` parameter needs to be set to ``overlay``: + + >>> with ExcelWriter("path_to_file.xlsx", + ... mode="a", + ... engine="openpyxl", + ... if_sheet_exists="overlay", + ... ) as writer: + ... df1.to_excel(writer, sheet_name="Sheet1") + ... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP + + You can store Excel file in RAM: + + >>> import io + >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) + >>> buffer = io.BytesIO() + >>> with pd.ExcelWriter(buffer) as writer: + ... df.to_excel(writer) + + You can pack Excel file into zip archive: + + >>> import zipfile # doctest: +SKIP + >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP + >>> with zipfile.ZipFile("path_to_file.zip", "w") as zf: + ... with zf.open("filename.xlsx", "w") as buffer: + ... with pd.ExcelWriter(buffer) as writer: + ... df.to_excel(writer) # doctest: +SKIP + + You can specify additional arguments to the underlying engine: + + >>> with pd.ExcelWriter( + ... "path_to_file.xlsx", + ... engine="xlsxwriter", + ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}} + ... ) as writer: + ... df.to_excel(writer) # doctest: +SKIP + + In append mode, ``engine_kwargs`` are passed through to + openpyxl's ``load_workbook``: + + >>> with pd.ExcelWriter( + ... "path_to_file.xlsx", + ... engine="openpyxl", + ... mode="a", + ... engine_kwargs={{"keep_vba": True}} + ... ) as writer: + ... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP + """ + + # Defining an ExcelWriter implementation (see abstract methods for more...) + + # - Mandatory + # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)`` + # --> called to write additional DataFrames to disk + # - ``_supported_extensions`` (tuple of supported extensions), used to + # check that engine supports the given extension. + # - ``_engine`` - string that gives the engine name. Necessary to + # instantiate class directly and bypass ``ExcelWriterMeta`` engine + # lookup. + # - ``save(self)`` --> called to save file to disk + # - Mostly mandatory (i.e. should at least exist) + # - book, cur_sheet, path + + # - Optional: + # - ``__init__(self, path, engine=None, **kwargs)`` --> always called + # with path as first argument. + + # You also need to register the class with ``register_writer()``. + # Technically, ExcelWriter implementations don't need to subclass + # ExcelWriter. + + _engine: str + _supported_extensions: tuple[str, ...] + + def __new__( + cls, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict | None = None, + ) -> Self: + # only switch class if generic(ExcelWriter) + if cls is ExcelWriter: + if engine is None or (isinstance(engine, str) and engine == "auto"): + if isinstance(path, str): + ext = os.path.splitext(path)[-1][1:] + else: + ext = "xlsx" + + try: + engine = config.get_option(f"io.excel.{ext}.writer", silent=True) + if engine == "auto": + engine = get_default_engine(ext, mode="writer") + except KeyError as err: + raise ValueError(f"No engine for filetype: '{ext}'") from err + + # for mypy + assert engine is not None + # error: Incompatible types in assignment (expression has type + # "type[ExcelWriter[Any]]", variable has type "type[Self]") + cls = get_writer(engine) # type: ignore[assignment] + + return object.__new__(cls) + + # declare external properties you can count on + _path = None + + @property + def supported_extensions(self) -> tuple[str, ...]: + """Extensions that writer engine supports.""" + return self._supported_extensions + + @property + def engine(self) -> str: + """Name of engine.""" + return self._engine + + @property + @abc.abstractmethod + def sheets(self) -> dict[str, Any]: + """Mapping of sheet names to sheet objects.""" + + @property + @abc.abstractmethod + def book(self) -> _WorkbookT: + """ + Book instance. Class type will depend on the engine used. + + This attribute can be used to access engine-specific features. + """ + + @abc.abstractmethod + def _write_cells( + self, + cells, + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + """ + Write given formatted cells into Excel an excel sheet + + Parameters + ---------- + cells : generator + cell of formatted data to save to Excel sheet + sheet_name : str, default None + Name of Excel sheet, if None, then use self.cur_sheet + startrow : upper left cell row to dump data frame + startcol : upper left cell column to dump data frame + freeze_panes: int tuple of length 2 + contains the bottom-most row and right-most column to freeze + """ + + @abc.abstractmethod + def _save(self) -> None: + """ + Save workbook to disk. + """ + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + ) -> None: + # validate that this engine can handle the extension + if isinstance(path, str): + ext = os.path.splitext(path)[-1] + self.check_extension(ext) + + # use mode to open the file + if "b" not in mode: + mode += "b" + # use "a" for the user to append data to excel but internally use "r+" to let + # the excel backend first read the existing file and then write any data to it + mode = mode.replace("a", "r+") + + if if_sheet_exists not in (None, "error", "new", "replace", "overlay"): + raise ValueError( + f"'{if_sheet_exists}' is not valid for if_sheet_exists. " + "Valid options are 'error', 'new', 'replace' and 'overlay'." + ) + if if_sheet_exists and "r+" not in mode: + raise ValueError("if_sheet_exists is only valid in append mode (mode='a')") + if if_sheet_exists is None: + if_sheet_exists = "error" + self._if_sheet_exists = if_sheet_exists + + # cast ExcelWriter to avoid adding 'if self._handles is not None' + self._handles = IOHandles( + cast(IO[bytes], path), compression={"compression": None} + ) + if not isinstance(path, ExcelWriter): + self._handles = get_handle( + path, mode, storage_options=storage_options, is_text=False + ) + self._cur_sheet = None + + if date_format is None: + self._date_format = "YYYY-MM-DD" + else: + self._date_format = date_format + if datetime_format is None: + self._datetime_format = "YYYY-MM-DD HH:MM:SS" + else: + self._datetime_format = datetime_format + + self._mode = mode + + @property + def date_format(self) -> str: + """ + Format string for dates written into Excel files (e.g. 'YYYY-MM-DD'). + """ + return self._date_format + + @property + def datetime_format(self) -> str: + """ + Format string for dates written into Excel files (e.g. 'YYYY-MM-DD'). + """ + return self._datetime_format + + @property + def if_sheet_exists(self) -> str: + """ + How to behave when writing to a sheet that already exists in append mode. + """ + return self._if_sheet_exists + + def __fspath__(self) -> str: + return getattr(self._handles.handle, "name", "") + + def _get_sheet_name(self, sheet_name: str | None) -> str: + if sheet_name is None: + sheet_name = self._cur_sheet + if sheet_name is None: # pragma: no cover + raise ValueError("Must pass explicit sheet_name or set _cur_sheet property") + return sheet_name + + def _value_with_fmt( + self, val + ) -> tuple[ + int | float | bool | str | datetime.datetime | datetime.date, str | None + ]: + """ + Convert numpy types to Python types for the Excel writers. + + Parameters + ---------- + val : object + Value to be written into cells + + Returns + ------- + Tuple with the first element being the converted value and the second + being an optional format + """ + fmt = None + + if is_integer(val): + val = int(val) + elif is_float(val): + val = float(val) + elif is_bool(val): + val = bool(val) + elif isinstance(val, datetime.datetime): + fmt = self._datetime_format + elif isinstance(val, datetime.date): + fmt = self._date_format + elif isinstance(val, datetime.timedelta): + val = val.total_seconds() / 86400 + fmt = "0" + else: + val = str(val) + + return val, fmt + + @classmethod + def check_extension(cls, ext: str) -> Literal[True]: + """ + checks that path's extension against the Writer's supported + extensions. If it isn't supported, raises UnsupportedFiletypeError. + """ + if ext.startswith("."): + ext = ext[1:] + if not any(ext in extension for extension in cls._supported_extensions): + raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'") + return True + + # Allow use as a contextmanager + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + def close(self) -> None: + """synonym for save, to make it more file-like""" + self._save() + self._handles.close() + + +XLS_SIGNATURES = ( + b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2 + b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3 + b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4 + b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary +) +ZIP_SIGNATURE = b"PK\x03\x04" +PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,))) + + +@doc(storage_options=_shared_docs["storage_options"]) +def inspect_excel_format( + content_or_path: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, +) -> str | None: + """ + Inspect the path or content of an excel file and get its format. + + Adopted from xlrd: https://github.com/python-excel/xlrd. + + Parameters + ---------- + content_or_path : str or file-like object + Path to file or content of file to inspect. May be a URL. + {storage_options} + + Returns + ------- + str or None + Format of file if it can be determined. + + Raises + ------ + ValueError + If resulting stream is empty. + BadZipFile + If resulting stream does not have an XLS signature and is not a valid zipfile. + """ + if isinstance(content_or_path, bytes): + content_or_path = BytesIO(content_or_path) + + with get_handle( + content_or_path, "rb", storage_options=storage_options, is_text=False + ) as handle: + stream = handle.handle + stream.seek(0) + buf = stream.read(PEEK_SIZE) + if buf is None: + raise ValueError("stream is empty") + assert isinstance(buf, bytes) + peek = buf + stream.seek(0) + + if any(peek.startswith(sig) for sig in XLS_SIGNATURES): + return "xls" + elif not peek.startswith(ZIP_SIGNATURE): + return None + + with zipfile.ZipFile(stream) as zf: + # Workaround for some third party files that use forward slashes and + # lower case names. + component_names = [ + name.replace("\\", "/").lower() for name in zf.namelist() + ] + + if "xl/workbook.xml" in component_names: + return "xlsx" + if "xl/workbook.bin" in component_names: + return "xlsb" + if "content.xml" in component_names: + return "ods" + return "zip" + + +class ExcelFile: + """ + Class for parsing tabular Excel sheets into DataFrame objects. + + See read_excel for more documentation. + + Parameters + ---------- + path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath), + A file-like object, xlrd workbook or openpyxl workbook. + If a string or path object, expected to be a path to a + .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file. + engine : str, default None + If io is not a buffer or path, this must be set to identify io. + Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb`` + Engine compatibility : + + - ``xlrd`` supports old-style Excel files (.xls). + - ``openpyxl`` supports newer Excel file formats. + - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt). + - ``pyxlsb`` supports Binary Excel files. + + .. versionchanged:: 1.2.0 + + The engine `xlrd `_ + now only supports old-style ``.xls`` files. + When ``engine=None``, the following logic will be + used to determine the engine: + + - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt), + then `odf `_ will be used. + - Otherwise if ``path_or_buffer`` is an xls format, + ``xlrd`` will be used. + - Otherwise if ``path_or_buffer`` is in xlsb format, + `pyxlsb `_ will be used. + + .. versionadded:: 1.3.0 + + - Otherwise if `openpyxl `_ is installed, + then ``openpyxl`` will be used. + - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised. + + .. warning:: + + Please do not report issues when using ``xlrd`` to read ``.xlsx`` files. + This is not supported, switch to using ``openpyxl`` instead. + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + + Examples + -------- + >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP + >>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP + ... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP + """ + + from pandas.io.excel._odfreader import ODFReader + from pandas.io.excel._openpyxl import OpenpyxlReader + from pandas.io.excel._pyxlsb import PyxlsbReader + from pandas.io.excel._xlrd import XlrdReader + + _engines: Mapping[str, Any] = { + "xlrd": XlrdReader, + "openpyxl": OpenpyxlReader, + "odf": ODFReader, + "pyxlsb": PyxlsbReader, + } + + def __init__( + self, + path_or_buffer, + engine: str | None = None, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + if engine_kwargs is None: + engine_kwargs = {} + + if engine is not None and engine not in self._engines: + raise ValueError(f"Unknown engine: {engine}") + + # First argument can also be bytes, so create a buffer + if isinstance(path_or_buffer, bytes): + path_or_buffer = BytesIO(path_or_buffer) + warnings.warn( + "Passing bytes to 'read_excel' is deprecated and " + "will be removed in a future version. To read from a " + "byte string, wrap it in a `BytesIO` object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # Could be a str, ExcelFile, Book, etc. + self.io = path_or_buffer + # Always a string + self._io = stringify_path(path_or_buffer) + + # Determine xlrd version if installed + if import_optional_dependency("xlrd", errors="ignore") is None: + xlrd_version = None + else: + import xlrd + + xlrd_version = Version(get_version(xlrd)) + + if engine is None: + # Only determine ext if it is needed + ext: str | None + if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): + ext = "xls" + else: + ext = inspect_excel_format( + content_or_path=path_or_buffer, storage_options=storage_options + ) + if ext is None: + raise ValueError( + "Excel file format cannot be determined, you must specify " + "an engine manually." + ) + + engine = config.get_option(f"io.excel.{ext}.reader", silent=True) + if engine == "auto": + engine = get_default_engine(ext, mode="reader") + + assert engine is not None + self.engine = engine + self.storage_options = storage_options + + self._reader = self._engines[engine]( + self._io, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + def __fspath__(self): + return self._io + + def parse( + self, + sheet_name: str | int | list[int] | list[str] | None = 0, + header: int | Sequence[int] | None = 0, + names=None, + index_col: int | Sequence[int] | None = None, + usecols=None, + converters=None, + true_values: Iterable[Hashable] | None = None, + false_values: Iterable[Hashable] | None = None, + skiprows: Sequence[int] | int | Callable[[int], object] | None = None, + nrows: int | None = None, + na_values=None, + parse_dates: list | dict | bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | dict[Hashable, str] | None = None, + thousands: str | None = None, + comment: str | None = None, + skipfooter: int = 0, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwds, + ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]: + """ + Parse specified sheet(s) into a DataFrame. + + Equivalent to read_excel(ExcelFile, ...) See the read_excel + docstring for more info on accepted parameters. + + Returns + ------- + DataFrame or dict of DataFrames + DataFrame from the passed in Excel file. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) + >>> df.to_excel('myfile.xlsx') # doctest: +SKIP + >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP + >>> file.parse() # doctest: +SKIP + """ + return self._reader.parse( + sheet_name=sheet_name, + header=header, + names=names, + index_col=index_col, + usecols=usecols, + converters=converters, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + parse_dates=parse_dates, + date_parser=date_parser, + date_format=date_format, + thousands=thousands, + comment=comment, + skipfooter=skipfooter, + dtype_backend=dtype_backend, + **kwds, + ) + + @property + def book(self): + return self._reader.book + + @property + def sheet_names(self): + return self._reader.sheet_names + + def close(self) -> None: + """close io if necessary""" + self._reader.close() + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odfreader.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odfreader.py new file mode 100644 index 00000000..8016dbba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odfreader.py @@ -0,0 +1,259 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._typing import ( + FilePath, + ReadBuffer, + Scalar, + StorageOptions, +) +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +import pandas as pd +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from odf.opendocument import OpenDocument + + from pandas._libs.tslibs.nattype import NaTType + + +@doc(storage_options=_shared_docs["storage_options"]) +class ODFReader(BaseExcelReader["OpenDocument"]): + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Read tables out of OpenDocument formatted files. + + Parameters + ---------- + filepath_or_buffer : str, path to be parsed or + an open readable stream. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("odf") + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[OpenDocument]: + from odf.opendocument import OpenDocument + + return OpenDocument + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ) -> OpenDocument: + from odf.opendocument import load + + return load(filepath_or_buffer, **engine_kwargs) + + @property + def empty_value(self) -> str: + """Property for compat with other readers.""" + return "" + + @property + def sheet_names(self) -> list[str]: + """Return a list of sheet names present in the document""" + from odf.table import Table + + tables = self.book.getElementsByType(Table) + return [t.getAttribute("name") for t in tables] + + def get_sheet_by_index(self, index: int): + from odf.table import Table + + self.raise_if_bad_sheet_by_index(index) + tables = self.book.getElementsByType(Table) + return tables[index] + + def get_sheet_by_name(self, name: str): + from odf.table import Table + + self.raise_if_bad_sheet_by_name(name) + tables = self.book.getElementsByType(Table) + + for table in tables: + if table.getAttribute("name") == name: + return table + + self.close() + raise ValueError(f"sheet {name} not found") + + def get_sheet_data( + self, sheet, file_rows_needed: int | None = None + ) -> list[list[Scalar | NaTType]]: + """ + Parse an ODF Table into a list of lists + """ + from odf.table import ( + CoveredTableCell, + TableCell, + TableRow, + ) + + covered_cell_name = CoveredTableCell().qname + table_cell_name = TableCell().qname + cell_names = {covered_cell_name, table_cell_name} + + sheet_rows = sheet.getElementsByType(TableRow) + empty_rows = 0 + max_row_len = 0 + + table: list[list[Scalar | NaTType]] = [] + + for sheet_row in sheet_rows: + sheet_cells = [ + x + for x in sheet_row.childNodes + if hasattr(x, "qname") and x.qname in cell_names + ] + empty_cells = 0 + table_row: list[Scalar | NaTType] = [] + + for sheet_cell in sheet_cells: + if sheet_cell.qname == table_cell_name: + value = self._get_cell_value(sheet_cell) + else: + value = self.empty_value + + column_repeat = self._get_column_repeat(sheet_cell) + + # Queue up empty values, writing only if content succeeds them + if value == self.empty_value: + empty_cells += column_repeat + else: + table_row.extend([self.empty_value] * empty_cells) + empty_cells = 0 + table_row.extend([value] * column_repeat) + + if max_row_len < len(table_row): + max_row_len = len(table_row) + + row_repeat = self._get_row_repeat(sheet_row) + if self._is_empty_row(sheet_row): + empty_rows += row_repeat + else: + # add blank rows to our table + table.extend([[self.empty_value]] * empty_rows) + empty_rows = 0 + table.extend(table_row for _ in range(row_repeat)) + if file_rows_needed is not None and len(table) >= file_rows_needed: + break + + # Make our table square + for row in table: + if len(row) < max_row_len: + row.extend([self.empty_value] * (max_row_len - len(row))) + + return table + + def _get_row_repeat(self, row) -> int: + """ + Return number of times this row was repeated + Repeating an empty row appeared to be a common way + of representing sparse rows in the table. + """ + from odf.namespaces import TABLENS + + return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1)) + + def _get_column_repeat(self, cell) -> int: + from odf.namespaces import TABLENS + + return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1)) + + def _is_empty_row(self, row) -> bool: + """ + Helper function to find empty rows + """ + for column in row.childNodes: + if len(column.childNodes) > 0: + return False + + return True + + def _get_cell_value(self, cell) -> Scalar | NaTType: + from odf.namespaces import OFFICENS + + if str(cell) == "#N/A": + return np.nan + + cell_type = cell.attributes.get((OFFICENS, "value-type")) + if cell_type == "boolean": + if str(cell) == "TRUE": + return True + return False + if cell_type is None: + return self.empty_value + elif cell_type == "float": + # GH5394 + cell_value = float(cell.attributes.get((OFFICENS, "value"))) + val = int(cell_value) + if val == cell_value: + return val + return cell_value + elif cell_type == "percentage": + cell_value = cell.attributes.get((OFFICENS, "value")) + return float(cell_value) + elif cell_type == "string": + return self._get_cell_string_value(cell) + elif cell_type == "currency": + cell_value = cell.attributes.get((OFFICENS, "value")) + return float(cell_value) + elif cell_type == "date": + cell_value = cell.attributes.get((OFFICENS, "date-value")) + return pd.Timestamp(cell_value) + elif cell_type == "time": + stamp = pd.Timestamp(str(cell)) + # cast needed here because Scalar doesn't include datetime.time + return cast(Scalar, stamp.time()) + else: + self.close() + raise ValueError(f"Unrecognized type {cell_type}") + + def _get_cell_string_value(self, cell) -> str: + """ + Find and decode OpenDocument text:s tags that represent + a run length encoded sequence of space characters. + """ + from odf.element import Element + from odf.namespaces import TEXTNS + from odf.text import S + + text_s = S().qname + + value = [] + + for fragment in cell.childNodes: + if isinstance(fragment, Element): + if fragment.qname == text_s: + spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) + value.append(" " * spaces) + else: + # recursive impl needed in case of nested fragments + # with multiple spaces + # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704 + value.append(self._get_cell_string_value(fragment)) + else: + value.append(str(fragment).strip("\n")) + return "".join(value) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odswriter.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odswriter.py new file mode 100644 index 00000000..0bc335a9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_odswriter.py @@ -0,0 +1,347 @@ +from __future__ import annotations + +from collections import defaultdict +import datetime +from typing import ( + TYPE_CHECKING, + Any, + DefaultDict, + cast, + overload, +) + +from pandas._libs import json + +from pandas.io.excel._base import ExcelWriter +from pandas.io.excel._util import ( + combine_kwargs, + validate_freeze_panes, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ExcelWriterIfSheetExists, + FilePath, + StorageOptions, + WriteExcelBuffer, + ) + + from pandas.io.formats.excel import ExcelCell + + +class ODSWriter(ExcelWriter): + _engine = "odf" + _supported_extensions = (".ods",) + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format=None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + **kwargs, + ) -> None: + from odf.opendocument import OpenDocumentSpreadsheet + + if mode == "a": + raise ValueError("Append mode is not supported with odf!") + + engine_kwargs = combine_kwargs(engine_kwargs, kwargs) + self._book = OpenDocumentSpreadsheet(**engine_kwargs) + + super().__init__( + path, + mode=mode, + storage_options=storage_options, + if_sheet_exists=if_sheet_exists, + engine_kwargs=engine_kwargs, + ) + + self._style_dict: dict[str, str] = {} + + @property + def book(self): + """ + Book instance of class odf.opendocument.OpenDocumentSpreadsheet. + + This attribute can be used to access engine-specific features. + """ + return self._book + + @property + def sheets(self) -> dict[str, Any]: + """Mapping of sheet names to sheet objects.""" + from odf.table import Table + + result = { + sheet.getAttribute("name"): sheet + for sheet in self.book.getElementsByType(Table) + } + return result + + def _save(self) -> None: + """ + Save workbook to disk. + """ + for sheet in self.sheets.values(): + self.book.spreadsheet.addElement(sheet) + self.book.save(self._handles.handle) + + def _write_cells( + self, + cells: list[ExcelCell], + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + """ + Write the frame cells using odf + """ + from odf.table import ( + Table, + TableCell, + TableRow, + ) + from odf.text import P + + sheet_name = self._get_sheet_name(sheet_name) + assert sheet_name is not None + + if sheet_name in self.sheets: + wks = self.sheets[sheet_name] + else: + wks = Table(name=sheet_name) + self.book.spreadsheet.addElement(wks) + + if validate_freeze_panes(freeze_panes): + freeze_panes = cast(tuple[int, int], freeze_panes) + self._create_freeze_panes(sheet_name, freeze_panes) + + for _ in range(startrow): + wks.addElement(TableRow()) + + rows: DefaultDict = defaultdict(TableRow) + col_count: DefaultDict = defaultdict(int) + + for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)): + # only add empty cells if the row is still empty + if not col_count[cell.row]: + for _ in range(startcol): + rows[cell.row].addElement(TableCell()) + + # fill with empty cells if needed + for _ in range(cell.col - col_count[cell.row]): + rows[cell.row].addElement(TableCell()) + col_count[cell.row] += 1 + + pvalue, tc = self._make_table_cell(cell) + rows[cell.row].addElement(tc) + col_count[cell.row] += 1 + p = P(text=pvalue) + tc.addElement(p) + + # add all rows to the sheet + if len(rows) > 0: + for row_nr in range(max(rows.keys()) + 1): + wks.addElement(rows[row_nr]) + + def _make_table_cell_attributes(self, cell) -> dict[str, int | str]: + """Convert cell attributes to OpenDocument attributes + + Parameters + ---------- + cell : ExcelCell + Spreadsheet cell data + + Returns + ------- + attributes : Dict[str, Union[int, str]] + Dictionary with attributes and attribute values + """ + attributes: dict[str, int | str] = {} + style_name = self._process_style(cell.style) + if style_name is not None: + attributes["stylename"] = style_name + if cell.mergestart is not None and cell.mergeend is not None: + attributes["numberrowsspanned"] = max(1, cell.mergestart) + attributes["numbercolumnsspanned"] = cell.mergeend + return attributes + + def _make_table_cell(self, cell) -> tuple[object, Any]: + """Convert cell data to an OpenDocument spreadsheet cell + + Parameters + ---------- + cell : ExcelCell + Spreadsheet cell data + + Returns + ------- + pvalue, cell : Tuple[str, TableCell] + Display value, Cell value + """ + from odf.table import TableCell + + attributes = self._make_table_cell_attributes(cell) + val, fmt = self._value_with_fmt(cell.val) + pvalue = value = val + if isinstance(val, bool): + value = str(val).lower() + pvalue = str(val).upper() + if isinstance(val, datetime.datetime): + # Fast formatting + value = val.isoformat() + # Slow but locale-dependent + pvalue = val.strftime("%c") + return ( + pvalue, + TableCell(valuetype="date", datevalue=value, attributes=attributes), + ) + elif isinstance(val, datetime.date): + # Fast formatting + value = f"{val.year}-{val.month:02d}-{val.day:02d}" + # Slow but locale-dependent + pvalue = val.strftime("%x") + return ( + pvalue, + TableCell(valuetype="date", datevalue=value, attributes=attributes), + ) + else: + class_to_cell_type = { + str: "string", + int: "float", + float: "float", + bool: "boolean", + } + return ( + pvalue, + TableCell( + valuetype=class_to_cell_type[type(val)], + value=value, + attributes=attributes, + ), + ) + + @overload + def _process_style(self, style: dict[str, Any]) -> str: + ... + + @overload + def _process_style(self, style: None) -> None: + ... + + def _process_style(self, style: dict[str, Any] | None) -> str | None: + """Convert a style dictionary to a OpenDocument style sheet + + Parameters + ---------- + style : Dict + Style dictionary + + Returns + ------- + style_key : str + Unique style key for later reference in sheet + """ + from odf.style import ( + ParagraphProperties, + Style, + TableCellProperties, + TextProperties, + ) + + if style is None: + return None + style_key = json.ujson_dumps(style) + if style_key in self._style_dict: + return self._style_dict[style_key] + name = f"pd{len(self._style_dict)+1}" + self._style_dict[style_key] = name + odf_style = Style(name=name, family="table-cell") + if "font" in style: + font = style["font"] + if font.get("bold", False): + odf_style.addElement(TextProperties(fontweight="bold")) + if "borders" in style: + borders = style["borders"] + for side, thickness in borders.items(): + thickness_translation = {"thin": "0.75pt solid #000000"} + odf_style.addElement( + TableCellProperties( + attributes={f"border{side}": thickness_translation[thickness]} + ) + ) + if "alignment" in style: + alignment = style["alignment"] + horizontal = alignment.get("horizontal") + if horizontal: + odf_style.addElement(ParagraphProperties(textalign=horizontal)) + vertical = alignment.get("vertical") + if vertical: + odf_style.addElement(TableCellProperties(verticalalign=vertical)) + self.book.styles.addElement(odf_style) + return name + + def _create_freeze_panes( + self, sheet_name: str, freeze_panes: tuple[int, int] + ) -> None: + """ + Create freeze panes in the sheet. + + Parameters + ---------- + sheet_name : str + Name of the spreadsheet + freeze_panes : tuple of (int, int) + Freeze pane location x and y + """ + from odf.config import ( + ConfigItem, + ConfigItemMapEntry, + ConfigItemMapIndexed, + ConfigItemMapNamed, + ConfigItemSet, + ) + + config_item_set = ConfigItemSet(name="ooo:view-settings") + self.book.settings.addElement(config_item_set) + + config_item_map_indexed = ConfigItemMapIndexed(name="Views") + config_item_set.addElement(config_item_map_indexed) + + config_item_map_entry = ConfigItemMapEntry() + config_item_map_indexed.addElement(config_item_map_entry) + + config_item_map_named = ConfigItemMapNamed(name="Tables") + config_item_map_entry.addElement(config_item_map_named) + + config_item_map_entry = ConfigItemMapEntry(name=sheet_name) + config_item_map_named.addElement(config_item_map_entry) + + config_item_map_entry.addElement( + ConfigItem(name="HorizontalSplitMode", type="short", text="2") + ) + config_item_map_entry.addElement( + ConfigItem(name="VerticalSplitMode", type="short", text="2") + ) + config_item_map_entry.addElement( + ConfigItem( + name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0]) + ) + ) + config_item_map_entry.addElement( + ConfigItem( + name="VerticalSplitPosition", type="int", text=str(freeze_panes[1]) + ) + ) + config_item_map_entry.addElement( + ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0])) + ) + config_item_map_entry.addElement( + ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1])) + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_openpyxl.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_openpyxl.py new file mode 100644 index 00000000..ca7e84f7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_openpyxl.py @@ -0,0 +1,640 @@ +from __future__ import annotations + +import mmap +from typing import ( + TYPE_CHECKING, + Any, + cast, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import ( + BaseExcelReader, + ExcelWriter, +) +from pandas.io.excel._util import ( + combine_kwargs, + validate_freeze_panes, +) + +if TYPE_CHECKING: + from openpyxl import Workbook + from openpyxl.descriptors.serialisable import Serialisable + + from pandas._typing import ( + ExcelWriterIfSheetExists, + FilePath, + ReadBuffer, + Scalar, + StorageOptions, + WriteExcelBuffer, + ) + + +class OpenpyxlWriter(ExcelWriter): + _engine = "openpyxl" + _supported_extensions = (".xlsx", ".xlsm") + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + **kwargs, + ) -> None: + # Use the openpyxl module as the Excel writer. + from openpyxl.workbook import Workbook + + engine_kwargs = combine_kwargs(engine_kwargs, kwargs) + + super().__init__( + path, + mode=mode, + storage_options=storage_options, + if_sheet_exists=if_sheet_exists, + engine_kwargs=engine_kwargs, + ) + + # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from + # the file and later write to it + if "r+" in self._mode: # Load from existing workbook + from openpyxl import load_workbook + + try: + self._book = load_workbook(self._handles.handle, **engine_kwargs) + except TypeError: + self._handles.handle.close() + raise + self._handles.handle.seek(0) + else: + # Create workbook object with default optimized_write=True. + try: + self._book = Workbook(**engine_kwargs) + except TypeError: + self._handles.handle.close() + raise + + if self.book.worksheets: + self.book.remove(self.book.worksheets[0]) + + @property + def book(self) -> Workbook: + """ + Book instance of class openpyxl.workbook.Workbook. + + This attribute can be used to access engine-specific features. + """ + return self._book + + @property + def sheets(self) -> dict[str, Any]: + """Mapping of sheet names to sheet objects.""" + result = {name: self.book[name] for name in self.book.sheetnames} + return result + + def _save(self) -> None: + """ + Save workbook to disk. + """ + self.book.save(self._handles.handle) + if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap): + # truncate file to the written content + self._handles.handle.truncate() + + @classmethod + def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]: + """ + Convert a style_dict to a set of kwargs suitable for initializing + or updating-on-copy an openpyxl v2 style object. + + Parameters + ---------- + style_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'font' + 'fill' + 'border' ('borders') + 'alignment' + 'number_format' + 'protection' + + Returns + ------- + style_kwargs : dict + A dict with the same, normalized keys as ``style_dict`` but each + value has been replaced with a native openpyxl style object of the + appropriate class. + """ + _style_key_map = {"borders": "border"} + + style_kwargs: dict[str, Serialisable] = {} + for k, v in style_dict.items(): + k = _style_key_map.get(k, k) + _conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None) + new_v = _conv_to_x(v) + if new_v: + style_kwargs[k] = new_v + + return style_kwargs + + @classmethod + def _convert_to_color(cls, color_spec): + """ + Convert ``color_spec`` to an openpyxl v2 Color object. + + Parameters + ---------- + color_spec : str, dict + A 32-bit ARGB hex string, or a dict with zero or more of the + following keys. + 'rgb' + 'indexed' + 'auto' + 'theme' + 'tint' + 'index' + 'type' + + Returns + ------- + color : openpyxl.styles.Color + """ + from openpyxl.styles import Color + + if isinstance(color_spec, str): + return Color(color_spec) + else: + return Color(**color_spec) + + @classmethod + def _convert_to_font(cls, font_dict): + """ + Convert ``font_dict`` to an openpyxl v2 Font object. + + Parameters + ---------- + font_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'name' + 'size' ('sz') + 'bold' ('b') + 'italic' ('i') + 'underline' ('u') + 'strikethrough' ('strike') + 'color' + 'vertAlign' ('vertalign') + 'charset' + 'scheme' + 'family' + 'outline' + 'shadow' + 'condense' + + Returns + ------- + font : openpyxl.styles.Font + """ + from openpyxl.styles import Font + + _font_key_map = { + "sz": "size", + "b": "bold", + "i": "italic", + "u": "underline", + "strike": "strikethrough", + "vertalign": "vertAlign", + } + + font_kwargs = {} + for k, v in font_dict.items(): + k = _font_key_map.get(k, k) + if k == "color": + v = cls._convert_to_color(v) + font_kwargs[k] = v + + return Font(**font_kwargs) + + @classmethod + def _convert_to_stop(cls, stop_seq): + """ + Convert ``stop_seq`` to a list of openpyxl v2 Color objects, + suitable for initializing the ``GradientFill`` ``stop`` parameter. + + Parameters + ---------- + stop_seq : iterable + An iterable that yields objects suitable for consumption by + ``_convert_to_color``. + + Returns + ------- + stop : list of openpyxl.styles.Color + """ + return map(cls._convert_to_color, stop_seq) + + @classmethod + def _convert_to_fill(cls, fill_dict: dict[str, Any]): + """ + Convert ``fill_dict`` to an openpyxl v2 Fill object. + + Parameters + ---------- + fill_dict : dict + A dict with one or more of the following keys (or their synonyms), + 'fill_type' ('patternType', 'patterntype') + 'start_color' ('fgColor', 'fgcolor') + 'end_color' ('bgColor', 'bgcolor') + or one or more of the following keys (or their synonyms). + 'type' ('fill_type') + 'degree' + 'left' + 'right' + 'top' + 'bottom' + 'stop' + + Returns + ------- + fill : openpyxl.styles.Fill + """ + from openpyxl.styles import ( + GradientFill, + PatternFill, + ) + + _pattern_fill_key_map = { + "patternType": "fill_type", + "patterntype": "fill_type", + "fgColor": "start_color", + "fgcolor": "start_color", + "bgColor": "end_color", + "bgcolor": "end_color", + } + + _gradient_fill_key_map = {"fill_type": "type"} + + pfill_kwargs = {} + gfill_kwargs = {} + for k, v in fill_dict.items(): + pk = _pattern_fill_key_map.get(k) + gk = _gradient_fill_key_map.get(k) + if pk in ["start_color", "end_color"]: + v = cls._convert_to_color(v) + if gk == "stop": + v = cls._convert_to_stop(v) + if pk: + pfill_kwargs[pk] = v + elif gk: + gfill_kwargs[gk] = v + else: + pfill_kwargs[k] = v + gfill_kwargs[k] = v + + try: + return PatternFill(**pfill_kwargs) + except TypeError: + return GradientFill(**gfill_kwargs) + + @classmethod + def _convert_to_side(cls, side_spec): + """ + Convert ``side_spec`` to an openpyxl v2 Side object. + + Parameters + ---------- + side_spec : str, dict + A string specifying the border style, or a dict with zero or more + of the following keys (or their synonyms). + 'style' ('border_style') + 'color' + + Returns + ------- + side : openpyxl.styles.Side + """ + from openpyxl.styles import Side + + _side_key_map = {"border_style": "style"} + + if isinstance(side_spec, str): + return Side(style=side_spec) + + side_kwargs = {} + for k, v in side_spec.items(): + k = _side_key_map.get(k, k) + if k == "color": + v = cls._convert_to_color(v) + side_kwargs[k] = v + + return Side(**side_kwargs) + + @classmethod + def _convert_to_border(cls, border_dict): + """ + Convert ``border_dict`` to an openpyxl v2 Border object. + + Parameters + ---------- + border_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'left' + 'right' + 'top' + 'bottom' + 'diagonal' + 'diagonal_direction' + 'vertical' + 'horizontal' + 'diagonalUp' ('diagonalup') + 'diagonalDown' ('diagonaldown') + 'outline' + + Returns + ------- + border : openpyxl.styles.Border + """ + from openpyxl.styles import Border + + _border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"} + + border_kwargs = {} + for k, v in border_dict.items(): + k = _border_key_map.get(k, k) + if k == "color": + v = cls._convert_to_color(v) + if k in ["left", "right", "top", "bottom", "diagonal"]: + v = cls._convert_to_side(v) + border_kwargs[k] = v + + return Border(**border_kwargs) + + @classmethod + def _convert_to_alignment(cls, alignment_dict): + """ + Convert ``alignment_dict`` to an openpyxl v2 Alignment object. + + Parameters + ---------- + alignment_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'horizontal' + 'vertical' + 'text_rotation' + 'wrap_text' + 'shrink_to_fit' + 'indent' + Returns + ------- + alignment : openpyxl.styles.Alignment + """ + from openpyxl.styles import Alignment + + return Alignment(**alignment_dict) + + @classmethod + def _convert_to_number_format(cls, number_format_dict): + """ + Convert ``number_format_dict`` to an openpyxl v2.1.0 number format + initializer. + + Parameters + ---------- + number_format_dict : dict + A dict with zero or more of the following keys. + 'format_code' : str + + Returns + ------- + number_format : str + """ + return number_format_dict["format_code"] + + @classmethod + def _convert_to_protection(cls, protection_dict): + """ + Convert ``protection_dict`` to an openpyxl v2 Protection object. + + Parameters + ---------- + protection_dict : dict + A dict with zero or more of the following keys. + 'locked' + 'hidden' + + Returns + ------- + """ + from openpyxl.styles import Protection + + return Protection(**protection_dict) + + def _write_cells( + self, + cells, + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + # Write the frame cells using openpyxl. + sheet_name = self._get_sheet_name(sheet_name) + + _style_cache: dict[str, dict[str, Serialisable]] = {} + + if sheet_name in self.sheets and self._if_sheet_exists != "new": + if "r+" in self._mode: + if self._if_sheet_exists == "replace": + old_wks = self.sheets[sheet_name] + target_index = self.book.index(old_wks) + del self.book[sheet_name] + wks = self.book.create_sheet(sheet_name, target_index) + elif self._if_sheet_exists == "error": + raise ValueError( + f"Sheet '{sheet_name}' already exists and " + f"if_sheet_exists is set to 'error'." + ) + elif self._if_sheet_exists == "overlay": + wks = self.sheets[sheet_name] + else: + raise ValueError( + f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. " + "Valid options are 'error', 'new', 'replace' and 'overlay'." + ) + else: + wks = self.sheets[sheet_name] + else: + wks = self.book.create_sheet() + wks.title = sheet_name + + if validate_freeze_panes(freeze_panes): + freeze_panes = cast(tuple[int, int], freeze_panes) + wks.freeze_panes = wks.cell( + row=freeze_panes[0] + 1, column=freeze_panes[1] + 1 + ) + + for cell in cells: + xcell = wks.cell( + row=startrow + cell.row + 1, column=startcol + cell.col + 1 + ) + xcell.value, fmt = self._value_with_fmt(cell.val) + if fmt: + xcell.number_format = fmt + + style_kwargs: dict[str, Serialisable] | None = {} + if cell.style: + key = str(cell.style) + style_kwargs = _style_cache.get(key) + if style_kwargs is None: + style_kwargs = self._convert_to_style_kwargs(cell.style) + _style_cache[key] = style_kwargs + + if style_kwargs: + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + + if cell.mergestart is not None and cell.mergeend is not None: + wks.merge_cells( + start_row=startrow + cell.row + 1, + start_column=startcol + cell.col + 1, + end_column=startcol + cell.mergeend + 1, + end_row=startrow + cell.mergestart + 1, + ) + + # When cells are merged only the top-left cell is preserved + # The behaviour of the other cells in a merged range is + # undefined + if style_kwargs: + first_row = startrow + cell.row + 1 + last_row = startrow + cell.mergestart + 1 + first_col = startcol + cell.col + 1 + last_col = startcol + cell.mergeend + 1 + + for row in range(first_row, last_row + 1): + for col in range(first_col, last_col + 1): + if row == first_row and col == first_col: + # Ignore first cell. It is already handled. + continue + xcell = wks.cell(column=col, row=row) + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + + +class OpenpyxlReader(BaseExcelReader["Workbook"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using openpyxl engine. + + Parameters + ---------- + filepath_or_buffer : str, path object or Workbook + Object to be parsed. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("openpyxl") + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[Workbook]: + from openpyxl import Workbook + + return Workbook + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ) -> Workbook: + from openpyxl import load_workbook + + return load_workbook( + filepath_or_buffer, + read_only=True, + data_only=True, + keep_links=False, + **engine_kwargs, + ) + + @property + def sheet_names(self) -> list[str]: + return [sheet.title for sheet in self.book.worksheets] + + def get_sheet_by_name(self, name: str): + self.raise_if_bad_sheet_by_name(name) + return self.book[name] + + def get_sheet_by_index(self, index: int): + self.raise_if_bad_sheet_by_index(index) + return self.book.worksheets[index] + + def _convert_cell(self, cell) -> Scalar: + from openpyxl.cell.cell import ( + TYPE_ERROR, + TYPE_NUMERIC, + ) + + if cell.value is None: + return "" # compat with xlrd + elif cell.data_type == TYPE_ERROR: + return np.nan + elif cell.data_type == TYPE_NUMERIC: + val = int(cell.value) + if val == cell.value: + return val + return float(cell.value) + + return cell.value + + def get_sheet_data( + self, sheet, file_rows_needed: int | None = None + ) -> list[list[Scalar]]: + if self.book.read_only: + sheet.reset_dimensions() + + data: list[list[Scalar]] = [] + last_row_with_data = -1 + for row_number, row in enumerate(sheet.rows): + converted_row = [self._convert_cell(cell) for cell in row] + while converted_row and converted_row[-1] == "": + # trim trailing empty elements + converted_row.pop() + if converted_row: + last_row_with_data = row_number + data.append(converted_row) + if file_rows_needed is not None and len(data) >= file_rows_needed: + break + + # Trim trailing empty rows + data = data[: last_row_with_data + 1] + + if len(data) > 0: + # extend rows to max width + max_width = max(len(data_row) for data_row in data) + if min(len(data_row) for data_row in data) < max_width: + empty_cell: list[Scalar] = [""] + data = [ + data_row + (max_width - len(data_row)) * empty_cell + for data_row in data + ] + + return data diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_pyxlsb.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_pyxlsb.py new file mode 100644 index 00000000..a6e42616 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_pyxlsb.py @@ -0,0 +1,127 @@ +# pyright: reportMissingImports=false +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from pyxlsb import Workbook + + from pandas._typing import ( + FilePath, + ReadBuffer, + Scalar, + StorageOptions, + ) + + +class PyxlsbReader(BaseExcelReader["Workbook"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using pyxlsb engine. + + Parameters + ---------- + filepath_or_buffer : str, path object, or Workbook + Object to be parsed. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("pyxlsb") + # This will call load_workbook on the filepath or buffer + # And set the result to the book-attribute + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[Workbook]: + from pyxlsb import Workbook + + return Workbook + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ) -> Workbook: + from pyxlsb import open_workbook + + # TODO: hack in buffer capability + # This might need some modifications to the Pyxlsb library + # Actual work for opening it is in xlsbpackage.py, line 20-ish + + return open_workbook(filepath_or_buffer, **engine_kwargs) + + @property + def sheet_names(self) -> list[str]: + return self.book.sheets + + def get_sheet_by_name(self, name: str): + self.raise_if_bad_sheet_by_name(name) + return self.book.get_sheet(name) + + def get_sheet_by_index(self, index: int): + self.raise_if_bad_sheet_by_index(index) + # pyxlsb sheets are indexed from 1 onwards + # There's a fix for this in the source, but the pypi package doesn't have it + return self.book.get_sheet(index + 1) + + def _convert_cell(self, cell) -> Scalar: + # TODO: there is no way to distinguish between floats and datetimes in pyxlsb + # This means that there is no way to read datetime types from an xlsb file yet + if cell.v is None: + return "" # Prevents non-named columns from not showing up as Unnamed: i + if isinstance(cell.v, float): + val = int(cell.v) + if val == cell.v: + return val + else: + return float(cell.v) + + return cell.v + + def get_sheet_data( + self, + sheet, + file_rows_needed: int | None = None, + ) -> list[list[Scalar]]: + data: list[list[Scalar]] = [] + previous_row_number = -1 + # When sparse=True the rows can have different lengths and empty rows are + # not returned. The cells are namedtuples of row, col, value (r, c, v). + for row in sheet.rows(sparse=True): + row_number = row[0].r + converted_row = [self._convert_cell(cell) for cell in row] + while converted_row and converted_row[-1] == "": + # trim trailing empty elements + converted_row.pop() + if converted_row: + data.extend([[]] * (row_number - previous_row_number - 1)) + data.append(converted_row) + previous_row_number = row_number + if file_rows_needed is not None and len(data) >= file_rows_needed: + break + if data: + # extend rows to max_width + max_width = max(len(data_row) for data_row in data) + if min(len(data_row) for data_row in data) < max_width: + empty_cell: list[Scalar] = [""] + data = [ + data_row + (max_width - len(data_row)) * empty_cell + for data_row in data + ] + return data diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_util.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_util.py new file mode 100644 index 00000000..f7a1fcb8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_util.py @@ -0,0 +1,334 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + MutableMapping, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + TypeVar, + overload, +) + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) + +if TYPE_CHECKING: + from pandas.io.excel._base import ExcelWriter + + ExcelWriter_t = type[ExcelWriter] + usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object]) + +_writers: MutableMapping[str, ExcelWriter_t] = {} + + +def register_writer(klass: ExcelWriter_t) -> None: + """ + Add engine to the excel writer registry.io.excel. + + You must use this method to integrate with ``to_excel``. + + Parameters + ---------- + klass : ExcelWriter + """ + if not callable(klass): + raise ValueError("Can only register callables as engines") + engine_name = klass._engine + _writers[engine_name] = klass + + +def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str: + """ + Return the default reader/writer for the given extension. + + Parameters + ---------- + ext : str + The excel file extension for which to get the default engine. + mode : str {'reader', 'writer'} + Whether to get the default engine for reading or writing. + Either 'reader' or 'writer' + + Returns + ------- + str + The default engine for the extension. + """ + _default_readers = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + _default_writers = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "ods": "odf", + } + assert mode in ["reader", "writer"] + if mode == "writer": + # Prefer xlsxwriter over openpyxl if installed + xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn") + if xlsxwriter: + _default_writers["xlsx"] = "xlsxwriter" + return _default_writers[ext] + else: + return _default_readers[ext] + + +def get_writer(engine_name: str) -> ExcelWriter_t: + try: + return _writers[engine_name] + except KeyError as err: + raise ValueError(f"No Excel writer '{engine_name}'") from err + + +def _excel2num(x: str) -> int: + """ + Convert Excel column name like 'AB' to 0-based column index. + + Parameters + ---------- + x : str + The Excel column name to convert to a 0-based column index. + + Returns + ------- + num : int + The column index corresponding to the name. + + Raises + ------ + ValueError + Part of the Excel column name was invalid. + """ + index = 0 + + for c in x.upper().strip(): + cp = ord(c) + + if cp < ord("A") or cp > ord("Z"): + raise ValueError(f"Invalid column name: {x}") + + index = index * 26 + cp - ord("A") + 1 + + return index - 1 + + +def _range2cols(areas: str) -> list[int]: + """ + Convert comma separated list of column names and ranges to indices. + + Parameters + ---------- + areas : str + A string containing a sequence of column ranges (or areas). + + Returns + ------- + cols : list + A list of 0-based column indices. + + Examples + -------- + >>> _range2cols('A:E') + [0, 1, 2, 3, 4] + >>> _range2cols('A,C,Z:AB') + [0, 2, 25, 26, 27] + """ + cols: list[int] = [] + + for rng in areas.split(","): + if ":" in rng: + rngs = rng.split(":") + cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1)) + else: + cols.append(_excel2num(rng)) + + return cols + + +@overload +def maybe_convert_usecols(usecols: str | list[int]) -> list[int]: + ... + + +@overload +def maybe_convert_usecols(usecols: list[str]) -> list[str]: + ... + + +@overload +def maybe_convert_usecols(usecols: usecols_func) -> usecols_func: + ... + + +@overload +def maybe_convert_usecols(usecols: None) -> None: + ... + + +def maybe_convert_usecols( + usecols: str | list[int] | list[str] | usecols_func | None, +) -> None | list[int] | list[str] | usecols_func: + """ + Convert `usecols` into a compatible format for parsing in `parsers.py`. + + Parameters + ---------- + usecols : object + The use-columns object to potentially convert. + + Returns + ------- + converted : object + The compatible format of `usecols`. + """ + if usecols is None: + return usecols + + if is_integer(usecols): + raise ValueError( + "Passing an integer for `usecols` is no longer supported. " + "Please pass in a list of int from 0 to `usecols` inclusive instead." + ) + + if isinstance(usecols, str): + return _range2cols(usecols) + + return usecols + + +@overload +def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]: + ... + + +@overload +def validate_freeze_panes(freeze_panes: None) -> Literal[False]: + ... + + +def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool: + if freeze_panes is not None: + if len(freeze_panes) == 2 and all( + isinstance(item, int) for item in freeze_panes + ): + return True + + raise ValueError( + "freeze_panes must be of form (row, column) " + "where row and column are integers" + ) + + # freeze_panes wasn't specified, return False so it won't be applied + # to output sheet + return False + + +def fill_mi_header( + row: list[Hashable], control_row: list[bool] +) -> tuple[list[Hashable], list[bool]]: + """ + Forward fill blank entries in row but only inside the same parent index. + + Used for creating headers in Multiindex. + + Parameters + ---------- + row : list + List of items in a single row. + control_row : list of bool + Helps to determine if particular column is in same parent index as the + previous value. Used to stop propagation of empty cells between + different indexes. + + Returns + ------- + Returns changed row and control_row + """ + last = row[0] + for i in range(1, len(row)): + if not control_row[i]: + last = row[i] + + if row[i] == "" or row[i] is None: + row[i] = last + else: + control_row[i] = False + last = row[i] + + return row, control_row + + +def pop_header_name( + row: list[Hashable], index_col: int | Sequence[int] +) -> tuple[Hashable | None, list[Hashable]]: + """ + Pop the header name for MultiIndex parsing. + + Parameters + ---------- + row : list + The data row to parse for the header name. + index_col : int, list + The index columns for our data. Assumed to be non-null. + + Returns + ------- + header_name : str + The extracted header name. + trimmed_row : list + The original data row with the header name removed. + """ + # Pop out header name and fill w/blank. + if is_list_like(index_col): + assert isinstance(index_col, Iterable) + i = max(index_col) + else: + assert not isinstance(index_col, Iterable) + i = index_col + + header_name = row[i] + header_name = None if header_name == "" else header_name + + return header_name, row[:i] + [""] + row[i + 1 :] + + +def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict: + """ + Used to combine two sources of kwargs for the backend engine. + + Use of kwargs is deprecated, this function is solely for use in 1.3 and should + be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs + or kwargs must be None or empty respectively. + + Parameters + ---------- + engine_kwargs: dict + kwargs to be passed through to the engine. + kwargs: dict + kwargs to be psased through to the engine (deprecated) + + Returns + ------- + engine_kwargs combined with kwargs + """ + if engine_kwargs is None: + result = {} + else: + result = engine_kwargs.copy() + result.update(kwargs) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlrd.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlrd.py new file mode 100644 index 00000000..c68a0ab5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlrd.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from datetime import time +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from xlrd import Book + + from pandas._typing import ( + Scalar, + StorageOptions, + ) + + +class XlrdReader(BaseExcelReader["Book"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using xlrd engine. + + Parameters + ---------- + filepath_or_buffer : str, path object or Workbook + Object to be parsed. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + err_msg = "Install xlrd >= 2.0.1 for xls Excel support" + import_optional_dependency("xlrd", extra=err_msg) + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[Book]: + from xlrd import Book + + return Book + + def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book: + from xlrd import open_workbook + + if hasattr(filepath_or_buffer, "read"): + data = filepath_or_buffer.read() + return open_workbook(file_contents=data, **engine_kwargs) + else: + return open_workbook(filepath_or_buffer, **engine_kwargs) + + @property + def sheet_names(self): + return self.book.sheet_names() + + def get_sheet_by_name(self, name): + self.raise_if_bad_sheet_by_name(name) + return self.book.sheet_by_name(name) + + def get_sheet_by_index(self, index): + self.raise_if_bad_sheet_by_index(index) + return self.book.sheet_by_index(index) + + def get_sheet_data( + self, sheet, file_rows_needed: int | None = None + ) -> list[list[Scalar]]: + from xlrd import ( + XL_CELL_BOOLEAN, + XL_CELL_DATE, + XL_CELL_ERROR, + XL_CELL_NUMBER, + xldate, + ) + + epoch1904 = self.book.datemode + + def _parse_cell(cell_contents, cell_typ): + """ + converts the contents of the cell into a pandas appropriate object + """ + if cell_typ == XL_CELL_DATE: + # Use the newer xlrd datetime handling. + try: + cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904) + except OverflowError: + return cell_contents + + # Excel doesn't distinguish between dates and time, + # so we treat dates on the epoch as times only. + # Also, Excel supports 1900 and 1904 epochs. + year = (cell_contents.timetuple())[0:3] + if (not epoch1904 and year == (1899, 12, 31)) or ( + epoch1904 and year == (1904, 1, 1) + ): + cell_contents = time( + cell_contents.hour, + cell_contents.minute, + cell_contents.second, + cell_contents.microsecond, + ) + + elif cell_typ == XL_CELL_ERROR: + cell_contents = np.nan + elif cell_typ == XL_CELL_BOOLEAN: + cell_contents = bool(cell_contents) + elif cell_typ == XL_CELL_NUMBER: + # GH5394 - Excel 'numbers' are always floats + # it's a minimal perf hit and less surprising + val = int(cell_contents) + if val == cell_contents: + cell_contents = val + return cell_contents + + data = [] + + nrows = sheet.nrows + if file_rows_needed is not None: + nrows = min(nrows, file_rows_needed) + for i in range(nrows): + row = [ + _parse_cell(value, typ) + for value, typ in zip(sheet.row_values(i), sheet.row_types(i)) + ] + data.append(row) + + return data diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlsxwriter.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlsxwriter.py new file mode 100644 index 00000000..afa988a5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/excel/_xlsxwriter.py @@ -0,0 +1,285 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas._libs import json + +from pandas.io.excel._base import ExcelWriter +from pandas.io.excel._util import ( + combine_kwargs, + validate_freeze_panes, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ExcelWriterIfSheetExists, + FilePath, + StorageOptions, + WriteExcelBuffer, + ) + + +class _XlsxStyler: + # Map from openpyxl-oriented styles to flatter xlsxwriter representation + # Ordering necessary for both determinism and because some are keyed by + # prefixes of others. + STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = { + "font": [ + (("name",), "font_name"), + (("sz",), "font_size"), + (("size",), "font_size"), + (("color", "rgb"), "font_color"), + (("color",), "font_color"), + (("b",), "bold"), + (("bold",), "bold"), + (("i",), "italic"), + (("italic",), "italic"), + (("u",), "underline"), + (("underline",), "underline"), + (("strike",), "font_strikeout"), + (("vertAlign",), "font_script"), + (("vertalign",), "font_script"), + ], + "number_format": [(("format_code",), "num_format"), ((), "num_format")], + "protection": [(("locked",), "locked"), (("hidden",), "hidden")], + "alignment": [ + (("horizontal",), "align"), + (("vertical",), "valign"), + (("text_rotation",), "rotation"), + (("wrap_text",), "text_wrap"), + (("indent",), "indent"), + (("shrink_to_fit",), "shrink"), + ], + "fill": [ + (("patternType",), "pattern"), + (("patterntype",), "pattern"), + (("fill_type",), "pattern"), + (("start_color", "rgb"), "fg_color"), + (("fgColor", "rgb"), "fg_color"), + (("fgcolor", "rgb"), "fg_color"), + (("start_color",), "fg_color"), + (("fgColor",), "fg_color"), + (("fgcolor",), "fg_color"), + (("end_color", "rgb"), "bg_color"), + (("bgColor", "rgb"), "bg_color"), + (("bgcolor", "rgb"), "bg_color"), + (("end_color",), "bg_color"), + (("bgColor",), "bg_color"), + (("bgcolor",), "bg_color"), + ], + "border": [ + (("color", "rgb"), "border_color"), + (("color",), "border_color"), + (("style",), "border"), + (("top", "color", "rgb"), "top_color"), + (("top", "color"), "top_color"), + (("top", "style"), "top"), + (("top",), "top"), + (("right", "color", "rgb"), "right_color"), + (("right", "color"), "right_color"), + (("right", "style"), "right"), + (("right",), "right"), + (("bottom", "color", "rgb"), "bottom_color"), + (("bottom", "color"), "bottom_color"), + (("bottom", "style"), "bottom"), + (("bottom",), "bottom"), + (("left", "color", "rgb"), "left_color"), + (("left", "color"), "left_color"), + (("left", "style"), "left"), + (("left",), "left"), + ], + } + + @classmethod + def convert(cls, style_dict, num_format_str=None): + """ + converts a style_dict to an xlsxwriter format dict + + Parameters + ---------- + style_dict : style dictionary to convert + num_format_str : optional number format string + """ + # Create a XlsxWriter format object. + props = {} + + if num_format_str is not None: + props["num_format"] = num_format_str + + if style_dict is None: + return props + + if "borders" in style_dict: + style_dict = style_dict.copy() + style_dict["border"] = style_dict.pop("borders") + + for style_group_key, style_group in style_dict.items(): + for src, dst in cls.STYLE_MAPPING.get(style_group_key, []): + # src is a sequence of keys into a nested dict + # dst is a flat key + if dst in props: + continue + v = style_group + for k in src: + try: + v = v[k] + except (KeyError, TypeError): + break + else: + props[dst] = v + + if isinstance(props.get("pattern"), str): + # TODO: support other fill patterns + props["pattern"] = 0 if props["pattern"] == "none" else 1 + + for k in ["border", "top", "right", "bottom", "left"]: + if isinstance(props.get(k), str): + try: + props[k] = [ + "none", + "thin", + "medium", + "dashed", + "dotted", + "thick", + "double", + "hair", + "mediumDashed", + "dashDot", + "mediumDashDot", + "dashDotDot", + "mediumDashDotDot", + "slantDashDot", + ].index(props[k]) + except ValueError: + props[k] = 2 + + if isinstance(props.get("font_script"), str): + props["font_script"] = ["baseline", "superscript", "subscript"].index( + props["font_script"] + ) + + if isinstance(props.get("underline"), str): + props["underline"] = { + "none": 0, + "single": 1, + "double": 2, + "singleAccounting": 33, + "doubleAccounting": 34, + }[props["underline"]] + + # GH 30107 - xlsxwriter uses different name + if props.get("valign") == "center": + props["valign"] = "vcenter" + + return props + + +class XlsxWriter(ExcelWriter): + _engine = "xlsxwriter" + _supported_extensions = (".xlsx",) + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + **kwargs, + ) -> None: + # Use the xlsxwriter module as the Excel writer. + from xlsxwriter import Workbook + + engine_kwargs = combine_kwargs(engine_kwargs, kwargs) + + if mode == "a": + raise ValueError("Append mode is not supported with xlsxwriter!") + + super().__init__( + path, + engine=engine, + date_format=date_format, + datetime_format=datetime_format, + mode=mode, + storage_options=storage_options, + if_sheet_exists=if_sheet_exists, + engine_kwargs=engine_kwargs, + ) + + try: + self._book = Workbook(self._handles.handle, **engine_kwargs) + except TypeError: + self._handles.handle.close() + raise + + @property + def book(self): + """ + Book instance of class xlsxwriter.Workbook. + + This attribute can be used to access engine-specific features. + """ + return self._book + + @property + def sheets(self) -> dict[str, Any]: + result = self.book.sheetnames + return result + + def _save(self) -> None: + """ + Save workbook to disk. + """ + self.book.close() + + def _write_cells( + self, + cells, + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + # Write the frame cells using xlsxwriter. + sheet_name = self._get_sheet_name(sheet_name) + + wks = self.book.get_worksheet_by_name(sheet_name) + if wks is None: + wks = self.book.add_worksheet(sheet_name) + + style_dict = {"null": None} + + if validate_freeze_panes(freeze_panes): + wks.freeze_panes(*(freeze_panes)) + + for cell in cells: + val, fmt = self._value_with_fmt(cell.val) + + stylekey = json.ujson_dumps(cell.style) + if fmt: + stylekey += fmt + + if stylekey in style_dict: + style = style_dict[stylekey] + else: + style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt)) + style_dict[stylekey] = style + + if cell.mergestart is not None and cell.mergeend is not None: + wks.merge_range( + startrow + cell.row, + startcol + cell.col, + startrow + cell.mergestart, + startcol + cell.mergeend, + val, + style, + ) + else: + wks.write(startrow + cell.row, startcol + cell.col, val, style) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/feather_format.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/feather_format.py new file mode 100644 index 00000000..b018b572 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/feather_format.py @@ -0,0 +1,148 @@ +""" feather-format compat """ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc +from pandas.util._validators import check_dtype_backend + +import pandas as pd +from pandas.core.api import DataFrame +from pandas.core.shared_docs import _shared_docs + +from pandas.io._util import arrow_string_types_mapper +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas._typing import ( + DtypeBackend, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + +@doc(storage_options=_shared_docs["storage_options"]) +def to_feather( + df: DataFrame, + path: FilePath | WriteBuffer[bytes], + storage_options: StorageOptions | None = None, + **kwargs: Any, +) -> None: + """ + Write a DataFrame to the binary Feather format. + + Parameters + ---------- + df : DataFrame + path : str, path object, or file-like object + {storage_options} + + .. versionadded:: 1.2.0 + + **kwargs : + Additional keywords passed to `pyarrow.feather.write_feather`. + + """ + import_optional_dependency("pyarrow") + from pyarrow import feather + + if not isinstance(df, DataFrame): + raise ValueError("feather only support IO with DataFrames") + + with get_handle( + path, "wb", storage_options=storage_options, is_text=False + ) as handles: + feather.write_feather(df, handles.handle, **kwargs) + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_feather( + path: FilePath | ReadBuffer[bytes], + columns: Sequence[Hashable] | None = None, + use_threads: bool = True, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame: + """ + Load a feather-format object from the file path. + + Parameters + ---------- + path : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: ``file://localhost/path/to/table.feather``. + columns : sequence, default None + If not provided, all columns are read. + use_threads : bool, default True + Whether to parallelize reading using multiple threads. + {storage_options} + + .. versionadded:: 1.2.0 + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + type of object stored in file + + Examples + -------- + >>> df = pd.read_feather("path/to/file.feather") # doctest: +SKIP + """ + import_optional_dependency("pyarrow") + from pyarrow import feather + + # import utils to register the pyarrow extension types + import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401,E501 + + check_dtype_backend(dtype_backend) + + with get_handle( + path, "rb", storage_options=storage_options, is_text=False + ) as handles: + if dtype_backend is lib.no_default and not using_pyarrow_string_dtype(): + return feather.read_feather( + handles.handle, columns=columns, use_threads=bool(use_threads) + ) + + pa_table = feather.read_table( + handles.handle, columns=columns, use_threads=bool(use_threads) + ) + + if dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + return pa_table.to_pandas(types_mapper=_arrow_dtype_mapping().get) + + elif dtype_backend == "pyarrow": + return pa_table.to_pandas(types_mapper=pd.ArrowDtype) + + elif using_pyarrow_string_dtype(): + return pa_table.to_pandas(types_mapper=arrow_string_types_mapper()) + else: + raise NotImplementedError diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/__init__.py new file mode 100644 index 00000000..5e56b1bc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/__init__.py @@ -0,0 +1,9 @@ +# ruff: noqa: TCH004 +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # import modules that have public classes/functions + from pandas.io.formats import style + + # and mark only those modules as public + __all__ = ["style"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/_color_data.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/_color_data.py new file mode 100644 index 00000000..2e7cb7f2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/_color_data.py @@ -0,0 +1,157 @@ +# GH37967: Enable the use of CSS named colors, as defined in +# matplotlib.colors.CSS4_COLORS, when exporting to Excel. +# This data has been copied here, instead of being imported from matplotlib, +# not to have ``to_excel`` methods require matplotlib. +# source: matplotlib._color_data (3.3.3) +from __future__ import annotations + +CSS4_COLORS = { + "aliceblue": "F0F8FF", + "antiquewhite": "FAEBD7", + "aqua": "00FFFF", + "aquamarine": "7FFFD4", + "azure": "F0FFFF", + "beige": "F5F5DC", + "bisque": "FFE4C4", + "black": "000000", + "blanchedalmond": "FFEBCD", + "blue": "0000FF", + "blueviolet": "8A2BE2", + "brown": "A52A2A", + "burlywood": "DEB887", + "cadetblue": "5F9EA0", + "chartreuse": "7FFF00", + "chocolate": "D2691E", + "coral": "FF7F50", + "cornflowerblue": "6495ED", + "cornsilk": "FFF8DC", + "crimson": "DC143C", + "cyan": "00FFFF", + "darkblue": "00008B", + "darkcyan": "008B8B", + "darkgoldenrod": "B8860B", + "darkgray": "A9A9A9", + "darkgreen": "006400", + "darkgrey": "A9A9A9", + "darkkhaki": "BDB76B", + "darkmagenta": "8B008B", + "darkolivegreen": "556B2F", + "darkorange": "FF8C00", + "darkorchid": "9932CC", + "darkred": "8B0000", + "darksalmon": "E9967A", + "darkseagreen": "8FBC8F", + "darkslateblue": "483D8B", + "darkslategray": "2F4F4F", + "darkslategrey": "2F4F4F", + "darkturquoise": "00CED1", + "darkviolet": "9400D3", + "deeppink": "FF1493", + "deepskyblue": "00BFFF", + "dimgray": "696969", + "dimgrey": "696969", + "dodgerblue": "1E90FF", + "firebrick": "B22222", + "floralwhite": "FFFAF0", + "forestgreen": "228B22", + "fuchsia": "FF00FF", + "gainsboro": "DCDCDC", + "ghostwhite": "F8F8FF", + "gold": "FFD700", + "goldenrod": "DAA520", + "gray": "808080", + "green": "008000", + "greenyellow": "ADFF2F", + "grey": "808080", + "honeydew": "F0FFF0", + "hotpink": "FF69B4", + "indianred": "CD5C5C", + "indigo": "4B0082", + "ivory": "FFFFF0", + "khaki": "F0E68C", + "lavender": "E6E6FA", + "lavenderblush": "FFF0F5", + "lawngreen": "7CFC00", + "lemonchiffon": "FFFACD", + "lightblue": "ADD8E6", + "lightcoral": "F08080", + "lightcyan": "E0FFFF", + "lightgoldenrodyellow": "FAFAD2", + "lightgray": "D3D3D3", + "lightgreen": "90EE90", + "lightgrey": "D3D3D3", + "lightpink": "FFB6C1", + "lightsalmon": "FFA07A", + "lightseagreen": "20B2AA", + "lightskyblue": "87CEFA", + "lightslategray": "778899", + "lightslategrey": "778899", + "lightsteelblue": "B0C4DE", + "lightyellow": "FFFFE0", + "lime": "00FF00", + "limegreen": "32CD32", + "linen": "FAF0E6", + "magenta": "FF00FF", + "maroon": "800000", + "mediumaquamarine": "66CDAA", + "mediumblue": "0000CD", + "mediumorchid": "BA55D3", + "mediumpurple": "9370DB", + "mediumseagreen": "3CB371", + "mediumslateblue": "7B68EE", + "mediumspringgreen": "00FA9A", + "mediumturquoise": "48D1CC", + "mediumvioletred": "C71585", + "midnightblue": "191970", + "mintcream": "F5FFFA", + "mistyrose": "FFE4E1", + "moccasin": "FFE4B5", + "navajowhite": "FFDEAD", + "navy": "000080", + "oldlace": "FDF5E6", + "olive": "808000", + "olivedrab": "6B8E23", + "orange": "FFA500", + "orangered": "FF4500", + "orchid": "DA70D6", + "palegoldenrod": "EEE8AA", + "palegreen": "98FB98", + "paleturquoise": "AFEEEE", + "palevioletred": "DB7093", + "papayawhip": "FFEFD5", + "peachpuff": "FFDAB9", + "peru": "CD853F", + "pink": "FFC0CB", + "plum": "DDA0DD", + "powderblue": "B0E0E6", + "purple": "800080", + "rebeccapurple": "663399", + "red": "FF0000", + "rosybrown": "BC8F8F", + "royalblue": "4169E1", + "saddlebrown": "8B4513", + "salmon": "FA8072", + "sandybrown": "F4A460", + "seagreen": "2E8B57", + "seashell": "FFF5EE", + "sienna": "A0522D", + "silver": "C0C0C0", + "skyblue": "87CEEB", + "slateblue": "6A5ACD", + "slategray": "708090", + "slategrey": "708090", + "snow": "FFFAFA", + "springgreen": "00FF7F", + "steelblue": "4682B4", + "tan": "D2B48C", + "teal": "008080", + "thistle": "D8BFD8", + "tomato": "FF6347", + "turquoise": "40E0D0", + "violet": "EE82EE", + "wheat": "F5DEB3", + "white": "FFFFFF", + "whitesmoke": "F5F5F5", + "yellow": "FFFF00", + "yellowgreen": "9ACD32", +} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/console.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/console.py new file mode 100644 index 00000000..2a6cbe07 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/console.py @@ -0,0 +1,94 @@ +""" +Internal module for console introspection +""" +from __future__ import annotations + +from shutil import get_terminal_size + + +def get_console_size() -> tuple[int | None, int | None]: + """ + Return console size as tuple = (width, height). + + Returns (None,None) in non-interactive session. + """ + from pandas import get_option + + display_width = get_option("display.width") + display_height = get_option("display.max_rows") + + # Consider + # interactive shell terminal, can detect term size + # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term + # size non-interactive script, should disregard term size + + # in addition + # width,height have default values, but setting to 'None' signals + # should use Auto-Detection, But only in interactive shell-terminal. + # Simple. yeah. + + if in_interactive_session(): + if in_ipython_frontend(): + # sane defaults for interactive non-shell terminal + # match default for width,height in config_init + from pandas._config.config import get_default_val + + terminal_width = get_default_val("display.width") + terminal_height = get_default_val("display.max_rows") + else: + # pure terminal + terminal_width, terminal_height = get_terminal_size() + else: + terminal_width, terminal_height = None, None + + # Note if the User sets width/Height to None (auto-detection) + # and we're in a script (non-inter), this will return (None,None) + # caller needs to deal. + return display_width or terminal_width, display_height or terminal_height + + +# ---------------------------------------------------------------------- +# Detect our environment + + +def in_interactive_session() -> bool: + """ + Check if we're running in an interactive shell. + + Returns + ------- + bool + True if running under python/ipython interactive shell. + """ + from pandas import get_option + + def check_main(): + try: + import __main__ as main + except ModuleNotFoundError: + return get_option("mode.sim_interactive") + return not hasattr(main, "__file__") or get_option("mode.sim_interactive") + + try: + # error: Name '__IPYTHON__' is not defined + return __IPYTHON__ or check_main() # type: ignore[name-defined] + except NameError: + return check_main() + + +def in_ipython_frontend() -> bool: + """ + Check if we're inside an IPython zmq frontend. + + Returns + ------- + bool + """ + try: + # error: Name 'get_ipython' is not defined + ip = get_ipython() # type: ignore[name-defined] + return "zmq" in str(type(ip)).lower() + except NameError: + pass + + return False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/css.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/css.py new file mode 100644 index 00000000..ccce60c0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/css.py @@ -0,0 +1,421 @@ +""" +Utilities for interpreting CSS from Stylers for formatting non-HTML outputs. +""" +from __future__ import annotations + +import re +from typing import ( + TYPE_CHECKING, + Callable, +) +import warnings + +from pandas.errors import CSSWarning +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Iterable, + Iterator, + ) + + +def _side_expander(prop_fmt: str) -> Callable: + """ + Wrapper to expand shorthand property into top, right, bottom, left properties + + Parameters + ---------- + side : str + The border side to expand into properties + + Returns + ------- + function: Return to call when a 'border(-{side}): {value}' string is encountered + """ + + def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]: + """ + Expand shorthand property into side-specific property (top, right, bottom, left) + + Parameters + ---------- + prop (str): CSS property name + value (str): String token for property + + Yields + ------ + Tuple (str, str): Expanded property, value + """ + tokens = value.split() + try: + mapping = self.SIDE_SHORTHANDS[len(tokens)] + except KeyError: + warnings.warn( + f'Could not expand "{prop}: {value}"', + CSSWarning, + stacklevel=find_stack_level(), + ) + return + for key, idx in zip(self.SIDES, mapping): + yield prop_fmt.format(key), tokens[idx] + + return expand + + +def _border_expander(side: str = "") -> Callable: + """ + Wrapper to expand 'border' property into border color, style, and width properties + + Parameters + ---------- + side : str + The border side to expand into properties + + Returns + ------- + function: Return to call when a 'border(-{side}): {value}' string is encountered + """ + if side != "": + side = f"-{side}" + + def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]: + """ + Expand border into color, style, and width tuples + + Parameters + ---------- + prop : str + CSS property name passed to styler + value : str + Value passed to styler for property + + Yields + ------ + Tuple (str, str): Expanded property, value + """ + tokens = value.split() + if len(tokens) == 0 or len(tokens) > 3: + warnings.warn( + f'Too many tokens provided to "{prop}" (expected 1-3)', + CSSWarning, + stacklevel=find_stack_level(), + ) + + # TODO: Can we use current color as initial value to comply with CSS standards? + border_declarations = { + f"border{side}-color": "black", + f"border{side}-style": "none", + f"border{side}-width": "medium", + } + for token in tokens: + if token.lower() in self.BORDER_STYLES: + border_declarations[f"border{side}-style"] = token + elif any(ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS): + border_declarations[f"border{side}-width"] = token + else: + border_declarations[f"border{side}-color"] = token + # TODO: Warn user if item entered more than once (e.g. "border: red green") + + # Per CSS, "border" will reset previous "border-*" definitions + yield from self.atomize(border_declarations.items()) + + return expand + + +class CSSResolver: + """ + A callable for parsing and resolving CSS to atomic properties. + """ + + UNIT_RATIOS = { + "pt": ("pt", 1), + "em": ("em", 1), + "rem": ("pt", 12), + "ex": ("em", 0.5), + # 'ch': + "px": ("pt", 0.75), + "pc": ("pt", 12), + "in": ("pt", 72), + "cm": ("in", 1 / 2.54), + "mm": ("in", 1 / 25.4), + "q": ("mm", 0.25), + "!!default": ("em", 0), + } + + FONT_SIZE_RATIOS = UNIT_RATIOS.copy() + FONT_SIZE_RATIOS.update( + { + "%": ("em", 0.01), + "xx-small": ("rem", 0.5), + "x-small": ("rem", 0.625), + "small": ("rem", 0.8), + "medium": ("rem", 1), + "large": ("rem", 1.125), + "x-large": ("rem", 1.5), + "xx-large": ("rem", 2), + "smaller": ("em", 1 / 1.2), + "larger": ("em", 1.2), + "!!default": ("em", 1), + } + ) + + MARGIN_RATIOS = UNIT_RATIOS.copy() + MARGIN_RATIOS.update({"none": ("pt", 0)}) + + BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy() + BORDER_WIDTH_RATIOS.update( + { + "none": ("pt", 0), + "thick": ("px", 4), + "medium": ("px", 2), + "thin": ("px", 1), + # Default: medium only if solid + } + ) + + BORDER_STYLES = [ + "none", + "hidden", + "dotted", + "dashed", + "solid", + "double", + "groove", + "ridge", + "inset", + "outset", + "mediumdashdot", + "dashdotdot", + "hair", + "mediumdashdotdot", + "dashdot", + "slantdashdot", + "mediumdashed", + ] + + SIDE_SHORTHANDS = { + 1: [0, 0, 0, 0], + 2: [0, 1, 0, 1], + 3: [0, 1, 2, 1], + 4: [0, 1, 2, 3], + } + + SIDES = ("top", "right", "bottom", "left") + + CSS_EXPANSIONS = { + **{ + (f"border-{prop}" if prop else "border"): _border_expander(prop) + for prop in ["", "top", "right", "bottom", "left"] + }, + **{ + f"border-{prop}": _side_expander(f"border-{{:s}}-{prop}") + for prop in ["color", "style", "width"] + }, + "margin": _side_expander("margin-{:s}"), + "padding": _side_expander("padding-{:s}"), + } + + def __call__( + self, + declarations: str | Iterable[tuple[str, str]], + inherited: dict[str, str] | None = None, + ) -> dict[str, str]: + """ + The given declarations to atomic properties. + + Parameters + ---------- + declarations_str : str | Iterable[tuple[str, str]] + A CSS string or set of CSS declaration tuples + e.g. "font-weight: bold; background: blue" or + {("font-weight", "bold"), ("background", "blue")} + inherited : dict, optional + Atomic properties indicating the inherited style context in which + declarations_str is to be resolved. ``inherited`` should already + be resolved, i.e. valid output of this method. + + Returns + ------- + dict + Atomic CSS 2.2 properties. + + Examples + -------- + >>> resolve = CSSResolver() + >>> inherited = {'font-family': 'serif', 'font-weight': 'bold'} + >>> out = resolve(''' + ... border-color: BLUE RED; + ... font-size: 1em; + ... font-size: 2em; + ... font-weight: normal; + ... font-weight: inherit; + ... ''', inherited) + >>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE + [('border-bottom-color', 'blue'), + ('border-left-color', 'red'), + ('border-right-color', 'red'), + ('border-top-color', 'blue'), + ('font-family', 'serif'), + ('font-size', '24pt'), + ('font-weight', 'bold')] + """ + if isinstance(declarations, str): + declarations = self.parse(declarations) + props = dict(self.atomize(declarations)) + if inherited is None: + inherited = {} + + props = self._update_initial(props, inherited) + props = self._update_font_size(props, inherited) + return self._update_other_units(props) + + def _update_initial( + self, + props: dict[str, str], + inherited: dict[str, str], + ) -> dict[str, str]: + # 1. resolve inherited, initial + for prop, val in inherited.items(): + if prop not in props: + props[prop] = val + + new_props = props.copy() + for prop, val in props.items(): + if val == "inherit": + val = inherited.get(prop, "initial") + + if val in ("initial", None): + # we do not define a complete initial stylesheet + del new_props[prop] + else: + new_props[prop] = val + return new_props + + def _update_font_size( + self, + props: dict[str, str], + inherited: dict[str, str], + ) -> dict[str, str]: + # 2. resolve relative font size + if props.get("font-size"): + props["font-size"] = self.size_to_pt( + props["font-size"], + self._get_font_size(inherited), + conversions=self.FONT_SIZE_RATIOS, + ) + return props + + def _get_font_size(self, props: dict[str, str]) -> float | None: + if props.get("font-size"): + font_size_string = props["font-size"] + return self._get_float_font_size_from_pt(font_size_string) + return None + + def _get_float_font_size_from_pt(self, font_size_string: str) -> float: + assert font_size_string.endswith("pt") + return float(font_size_string.rstrip("pt")) + + def _update_other_units(self, props: dict[str, str]) -> dict[str, str]: + font_size = self._get_font_size(props) + # 3. TODO: resolve other font-relative units + for side in self.SIDES: + prop = f"border-{side}-width" + if prop in props: + props[prop] = self.size_to_pt( + props[prop], + em_pt=font_size, + conversions=self.BORDER_WIDTH_RATIOS, + ) + + for prop in [f"margin-{side}", f"padding-{side}"]: + if prop in props: + # TODO: support % + props[prop] = self.size_to_pt( + props[prop], + em_pt=font_size, + conversions=self.MARGIN_RATIOS, + ) + return props + + def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS) -> str: + def _error(): + warnings.warn( + f"Unhandled size: {repr(in_val)}", + CSSWarning, + stacklevel=find_stack_level(), + ) + return self.size_to_pt("1!!default", conversions=conversions) + + match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val) + if match is None: + return _error() + + val, unit = match.groups() + if val == "": + # hack for 'large' etc. + val = 1 + else: + try: + val = float(val) + except ValueError: + return _error() + + while unit != "pt": + if unit == "em": + if em_pt is None: + unit = "rem" + else: + val *= em_pt + unit = "pt" + continue + + try: + unit, mul = conversions[unit] + except KeyError: + return _error() + val *= mul + + val = round(val, 5) + if int(val) == val: + size_fmt = f"{int(val):d}pt" + else: + size_fmt = f"{val:f}pt" + return size_fmt + + def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]: + for prop, value in declarations: + prop = prop.lower() + value = value.lower() + if prop in self.CSS_EXPANSIONS: + expand = self.CSS_EXPANSIONS[prop] + yield from expand(self, prop, value) + else: + yield prop, value + + def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]: + """ + Generates (prop, value) pairs from declarations. + + In a future version may generate parsed tokens from tinycss/tinycss2 + + Parameters + ---------- + declarations_str : str + """ + for decl in declarations_str.split(";"): + if not decl.strip(): + continue + prop, sep, val = decl.partition(":") + prop = prop.strip().lower() + # TODO: don't lowercase case sensitive parts of values (strings) + val = val.strip().lower() + if sep: + yield prop, val + else: + warnings.warn( + f"Ill-formatted attribute: expected a colon in {repr(decl)}", + CSSWarning, + stacklevel=find_stack_level(), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/csvs.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/csvs.py new file mode 100644 index 00000000..8d0edd88 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/csvs.py @@ -0,0 +1,326 @@ +""" +Module for formatting output data into CSV files. +""" + +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + Iterator, + Sequence, +) +import csv as csvlib +import os +from typing import ( + TYPE_CHECKING, + Any, + cast, +) + +import numpy as np + +from pandas._libs import writers as libwriters +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.generic import ( + ABCDatetimeIndex, + ABCIndex, + ABCMultiIndex, + ABCPeriodIndex, +) +from pandas.core.dtypes.missing import notna + +from pandas.core.indexes.api import Index + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + FilePath, + FloatFormatType, + IndexLabel, + StorageOptions, + WriteBuffer, + ) + + from pandas.io.formats.format import DataFrameFormatter + + +_DEFAULT_CHUNKSIZE_CELLS = 100_000 + + +class CSVFormatter: + cols: np.ndarray + + def __init__( + self, + formatter: DataFrameFormatter, + path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "", + sep: str = ",", + cols: Sequence[Hashable] | None = None, + index_label: IndexLabel | None = None, + mode: str = "w", + encoding: str | None = None, + errors: str = "strict", + compression: CompressionOptions = "infer", + quoting: int | None = None, + lineterminator: str | None = "\n", + chunksize: int | None = None, + quotechar: str | None = '"', + date_format: str | None = None, + doublequote: bool = True, + escapechar: str | None = None, + storage_options: StorageOptions | None = None, + ) -> None: + self.fmt = formatter + + self.obj = self.fmt.frame + + self.filepath_or_buffer = path_or_buf + self.encoding = encoding + self.compression: CompressionOptions = compression + self.mode = mode + self.storage_options = storage_options + + self.sep = sep + self.index_label = self._initialize_index_label(index_label) + self.errors = errors + self.quoting = quoting or csvlib.QUOTE_MINIMAL + self.quotechar = self._initialize_quotechar(quotechar) + self.doublequote = doublequote + self.escapechar = escapechar + self.lineterminator = lineterminator or os.linesep + self.date_format = date_format + self.cols = self._initialize_columns(cols) + self.chunksize = self._initialize_chunksize(chunksize) + + @property + def na_rep(self) -> str: + return self.fmt.na_rep + + @property + def float_format(self) -> FloatFormatType | None: + return self.fmt.float_format + + @property + def decimal(self) -> str: + return self.fmt.decimal + + @property + def header(self) -> bool | list[str]: + return self.fmt.header + + @property + def index(self) -> bool: + return self.fmt.index + + def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel: + if index_label is not False: + if index_label is None: + return self._get_index_label_from_obj() + elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)): + # given a string for a DF with Index + return [index_label] + return index_label + + def _get_index_label_from_obj(self) -> Sequence[Hashable]: + if isinstance(self.obj.index, ABCMultiIndex): + return self._get_index_label_multiindex() + else: + return self._get_index_label_flat() + + def _get_index_label_multiindex(self) -> Sequence[Hashable]: + return [name or "" for name in self.obj.index.names] + + def _get_index_label_flat(self) -> Sequence[Hashable]: + index_label = self.obj.index.name + return [""] if index_label is None else [index_label] + + def _initialize_quotechar(self, quotechar: str | None) -> str | None: + if self.quoting != csvlib.QUOTE_NONE: + # prevents crash in _csv + return quotechar + return None + + @property + def has_mi_columns(self) -> bool: + return bool(isinstance(self.obj.columns, ABCMultiIndex)) + + def _initialize_columns(self, cols: Iterable[Hashable] | None) -> np.ndarray: + # validate mi options + if self.has_mi_columns: + if cols is not None: + msg = "cannot specify cols with a MultiIndex on the columns" + raise TypeError(msg) + + if cols is not None: + if isinstance(cols, ABCIndex): + cols = cols._format_native_types(**self._number_format) + else: + cols = list(cols) + self.obj = self.obj.loc[:, cols] + + # update columns to include possible multiplicity of dupes + # and make sure cols is just a list of labels + new_cols = self.obj.columns + return new_cols._format_native_types(**self._number_format) + + def _initialize_chunksize(self, chunksize: int | None) -> int: + if chunksize is None: + return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1 + return int(chunksize) + + @property + def _number_format(self) -> dict[str, Any]: + """Dictionary used for storing number formatting settings.""" + return { + "na_rep": self.na_rep, + "float_format": self.float_format, + "date_format": self.date_format, + "quoting": self.quoting, + "decimal": self.decimal, + } + + @cache_readonly + def data_index(self) -> Index: + data_index = self.obj.index + if ( + isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex)) + and self.date_format is not None + ): + data_index = Index( + [x.strftime(self.date_format) if notna(x) else "" for x in data_index] + ) + elif isinstance(data_index, ABCMultiIndex): + data_index = data_index.remove_unused_levels() + return data_index + + @property + def nlevels(self) -> int: + if self.index: + return getattr(self.data_index, "nlevels", 1) + else: + return 0 + + @property + def _has_aliases(self) -> bool: + return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex)) + + @property + def _need_to_save_header(self) -> bool: + return bool(self._has_aliases or self.header) + + @property + def write_cols(self) -> Sequence[Hashable]: + if self._has_aliases: + assert not isinstance(self.header, bool) + if len(self.header) != len(self.cols): + raise ValueError( + f"Writing {len(self.cols)} cols but got {len(self.header)} aliases" + ) + return self.header + else: + # self.cols is an ndarray derived from Index._format_native_types, + # so its entries are strings, i.e. hashable + return cast(Sequence[Hashable], self.cols) + + @property + def encoded_labels(self) -> list[Hashable]: + encoded_labels: list[Hashable] = [] + + if self.index and self.index_label: + assert isinstance(self.index_label, Sequence) + encoded_labels = list(self.index_label) + + if not self.has_mi_columns or self._has_aliases: + encoded_labels += list(self.write_cols) + + return encoded_labels + + def save(self) -> None: + """ + Create the writer & save. + """ + # apply compression and byte/text conversion + with get_handle( + self.filepath_or_buffer, + self.mode, + encoding=self.encoding, + errors=self.errors, + compression=self.compression, + storage_options=self.storage_options, + ) as handles: + # Note: self.encoding is irrelevant here + self.writer = csvlib.writer( + handles.handle, + lineterminator=self.lineterminator, + delimiter=self.sep, + quoting=self.quoting, + doublequote=self.doublequote, + escapechar=self.escapechar, + quotechar=self.quotechar, + ) + + self._save() + + def _save(self) -> None: + if self._need_to_save_header: + self._save_header() + self._save_body() + + def _save_header(self) -> None: + if not self.has_mi_columns or self._has_aliases: + self.writer.writerow(self.encoded_labels) + else: + for row in self._generate_multiindex_header_rows(): + self.writer.writerow(row) + + def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]: + columns = self.obj.columns + for i in range(columns.nlevels): + # we need at least 1 index column to write our col names + col_line = [] + if self.index: + # name is the first column + col_line.append(columns.names[i]) + + if isinstance(self.index_label, list) and len(self.index_label) > 1: + col_line.extend([""] * (len(self.index_label) - 1)) + + col_line.extend(columns._get_level_values(i)) + yield col_line + + # Write out the index line if it's not empty. + # Otherwise, we will print out an extraneous + # blank line between the mi and the data rows. + if self.encoded_labels and set(self.encoded_labels) != {""}: + yield self.encoded_labels + [""] * len(columns) + + def _save_body(self) -> None: + nrows = len(self.data_index) + chunks = (nrows // self.chunksize) + 1 + for i in range(chunks): + start_i = i * self.chunksize + end_i = min(start_i + self.chunksize, nrows) + if start_i >= end_i: + break + self._save_chunk(start_i, end_i) + + def _save_chunk(self, start_i: int, end_i: int) -> None: + # create the data for a chunk + slicer = slice(start_i, end_i) + df = self.obj.iloc[slicer] + + res = df._mgr.to_native_types(**self._number_format) + data = [res.iget_values(i) for i in range(len(res.items))] + + ix = self.data_index[slicer]._format_native_types(**self._number_format) + libwriters.write_csv_rows( + data, + ix, + self.nlevels, + self.cols, + self.writer, + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/excel.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/excel.py new file mode 100644 index 00000000..9970d465 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/excel.py @@ -0,0 +1,965 @@ +""" +Utilities for conversion to writer-agnostic Excel representation. +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, +) +import functools +import itertools +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, +) +import warnings + +import numpy as np + +from pandas._libs.lib import is_list_like +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes import missing +from pandas.core.dtypes.common import ( + is_float, + is_scalar, +) + +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, +) +import pandas.core.common as com +from pandas.core.shared_docs import _shared_docs + +from pandas.io.formats._color_data import CSS4_COLORS +from pandas.io.formats.css import ( + CSSResolver, + CSSWarning, +) +from pandas.io.formats.format import get_level_lengths +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas._typing import ( + FilePath, + IndexLabel, + StorageOptions, + WriteExcelBuffer, + ) + + from pandas import ExcelWriter + + +class ExcelCell: + __fields__ = ("row", "col", "val", "style", "mergestart", "mergeend") + __slots__ = __fields__ + + def __init__( + self, + row: int, + col: int, + val, + style=None, + mergestart: int | None = None, + mergeend: int | None = None, + ) -> None: + self.row = row + self.col = col + self.val = val + self.style = style + self.mergestart = mergestart + self.mergeend = mergeend + + +class CssExcelCell(ExcelCell): + def __init__( + self, + row: int, + col: int, + val, + style: dict | None, + css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None, + css_row: int, + css_col: int, + css_converter: Callable | None, + **kwargs, + ) -> None: + if css_styles and css_converter: + # Use dict to get only one (case-insensitive) declaration per property + declaration_dict = { + prop.lower(): val for prop, val in css_styles[css_row, css_col] + } + # Convert to frozenset for order-invariant caching + unique_declarations = frozenset(declaration_dict.items()) + style = css_converter(unique_declarations) + + super().__init__(row=row, col=col, val=val, style=style, **kwargs) + + +class CSSToExcelConverter: + """ + A callable for converting CSS declarations to ExcelWriter styles + + Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow), + focusing on font styling, backgrounds, borders and alignment. + + Operates by first computing CSS styles in a fairly generic + way (see :meth:`compute_css`) then determining Excel style + properties from CSS properties (see :meth:`build_xlstyle`). + + Parameters + ---------- + inherited : str, optional + CSS declarations understood to be the containing scope for the + CSS processed by :meth:`__call__`. + """ + + NAMED_COLORS = CSS4_COLORS + + VERTICAL_MAP = { + "top": "top", + "text-top": "top", + "middle": "center", + "baseline": "bottom", + "bottom": "bottom", + "text-bottom": "bottom", + # OpenXML also has 'justify', 'distributed' + } + + BOLD_MAP = { + "bold": True, + "bolder": True, + "600": True, + "700": True, + "800": True, + "900": True, + "normal": False, + "lighter": False, + "100": False, + "200": False, + "300": False, + "400": False, + "500": False, + } + + ITALIC_MAP = { + "normal": False, + "italic": True, + "oblique": True, + } + + FAMILY_MAP = { + "serif": 1, # roman + "sans-serif": 2, # swiss + "cursive": 4, # script + "fantasy": 5, # decorative + } + + BORDER_STYLE_MAP = { + style.lower(): style + for style in [ + "dashed", + "mediumDashDot", + "dashDotDot", + "hair", + "dotted", + "mediumDashDotDot", + "double", + "dashDot", + "slantDashDot", + "mediumDashed", + ] + } + + # NB: Most of the methods here could be classmethods, as only __init__ + # and __call__ make use of instance attributes. We leave them as + # instancemethods so that users can easily experiment with extensions + # without monkey-patching. + inherited: dict[str, str] | None + + def __init__(self, inherited: str | None = None) -> None: + if inherited is not None: + self.inherited = self.compute_css(inherited) + else: + self.inherited = None + # We should avoid cache on the __call__ method. + # Otherwise once the method __call__ has been called + # garbage collection no longer deletes the instance. + self._call_cached = functools.cache(self._call_uncached) + + compute_css = CSSResolver() + + def __call__( + self, declarations: str | frozenset[tuple[str, str]] + ) -> dict[str, dict[str, str]]: + """ + Convert CSS declarations to ExcelWriter style. + + Parameters + ---------- + declarations : str | frozenset[tuple[str, str]] + CSS string or set of CSS declaration tuples. + e.g. "font-weight: bold; background: blue" or + {("font-weight", "bold"), ("background", "blue")} + + Returns + ------- + xlstyle : dict + A style as interpreted by ExcelWriter when found in + ExcelCell.style. + """ + return self._call_cached(declarations) + + def _call_uncached( + self, declarations: str | frozenset[tuple[str, str]] + ) -> dict[str, dict[str, str]]: + properties = self.compute_css(declarations, self.inherited) + return self.build_xlstyle(properties) + + def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]: + out = { + "alignment": self.build_alignment(props), + "border": self.build_border(props), + "fill": self.build_fill(props), + "font": self.build_font(props), + "number_format": self.build_number_format(props), + } + + # TODO: handle cell width and height: needs support in pandas.io.excel + + def remove_none(d: dict[str, str | None]) -> None: + """Remove key where value is None, through nested dicts""" + for k, v in list(d.items()): + if v is None: + del d[k] + elif isinstance(v, dict): + remove_none(v) + if not v: + del d[k] + + remove_none(out) + return out + + def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]: + # TODO: text-indent, padding-left -> alignment.indent + return { + "horizontal": props.get("text-align"), + "vertical": self._get_vertical_alignment(props), + "wrap_text": self._get_is_wrap_text(props), + } + + def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None: + vertical_align = props.get("vertical-align") + if vertical_align: + return self.VERTICAL_MAP.get(vertical_align) + return None + + def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None: + if props.get("white-space") is None: + return None + return bool(props["white-space"] not in ("nowrap", "pre", "pre-line")) + + def build_border( + self, props: Mapping[str, str] + ) -> dict[str, dict[str, str | None]]: + return { + side: { + "style": self._border_style( + props.get(f"border-{side}-style"), + props.get(f"border-{side}-width"), + self.color_to_excel(props.get(f"border-{side}-color")), + ), + "color": self.color_to_excel(props.get(f"border-{side}-color")), + } + for side in ["top", "right", "bottom", "left"] + } + + def _border_style(self, style: str | None, width: str | None, color: str | None): + # convert styles and widths to openxml, one of: + # 'dashDot' + # 'dashDotDot' + # 'dashed' + # 'dotted' + # 'double' + # 'hair' + # 'medium' + # 'mediumDashDot' + # 'mediumDashDotDot' + # 'mediumDashed' + # 'slantDashDot' + # 'thick' + # 'thin' + if width is None and style is None and color is None: + # Return None will remove "border" from style dictionary + return None + + if width is None and style is None: + # Return "none" will keep "border" in style dictionary + return "none" + + if style in ("none", "hidden"): + return "none" + + width_name = self._get_width_name(width) + if width_name is None: + return "none" + + if style in (None, "groove", "ridge", "inset", "outset", "solid"): + # not handled + return width_name + + if style == "double": + return "double" + if style == "dotted": + if width_name in ("hair", "thin"): + return "dotted" + return "mediumDashDotDot" + if style == "dashed": + if width_name in ("hair", "thin"): + return "dashed" + return "mediumDashed" + elif style in self.BORDER_STYLE_MAP: + # Excel-specific styles + return self.BORDER_STYLE_MAP[style] + else: + warnings.warn( + f"Unhandled border style format: {repr(style)}", + CSSWarning, + stacklevel=find_stack_level(), + ) + return "none" + + def _get_width_name(self, width_input: str | None) -> str | None: + width = self._width_to_float(width_input) + if width < 1e-5: + return None + elif width < 1.3: + return "thin" + elif width < 2.8: + return "medium" + return "thick" + + def _width_to_float(self, width: str | None) -> float: + if width is None: + width = "2pt" + return self._pt_to_float(width) + + def _pt_to_float(self, pt_string: str) -> float: + assert pt_string.endswith("pt") + return float(pt_string.rstrip("pt")) + + def build_fill(self, props: Mapping[str, str]): + # TODO: perhaps allow for special properties + # -excel-pattern-bgcolor and -excel-pattern-type + fill_color = props.get("background-color") + if fill_color not in (None, "transparent", "none"): + return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"} + + def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]: + fc = props.get("number-format") + fc = fc.replace("§", ";") if isinstance(fc, str) else fc + return {"format_code": fc} + + def build_font( + self, props: Mapping[str, str] + ) -> dict[str, bool | float | str | None]: + font_names = self._get_font_names(props) + decoration = self._get_decoration(props) + return { + "name": font_names[0] if font_names else None, + "family": self._select_font_family(font_names), + "size": self._get_font_size(props), + "bold": self._get_is_bold(props), + "italic": self._get_is_italic(props), + "underline": ("single" if "underline" in decoration else None), + "strike": ("line-through" in decoration) or None, + "color": self.color_to_excel(props.get("color")), + # shadow if nonzero digit before shadow color + "shadow": self._get_shadow(props), + } + + def _get_is_bold(self, props: Mapping[str, str]) -> bool | None: + weight = props.get("font-weight") + if weight: + return self.BOLD_MAP.get(weight) + return None + + def _get_is_italic(self, props: Mapping[str, str]) -> bool | None: + font_style = props.get("font-style") + if font_style: + return self.ITALIC_MAP.get(font_style) + return None + + def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]: + decoration = props.get("text-decoration") + if decoration is not None: + return decoration.split() + else: + return () + + def _get_underline(self, decoration: Sequence[str]) -> str | None: + if "underline" in decoration: + return "single" + return None + + def _get_shadow(self, props: Mapping[str, str]) -> bool | None: + if "text-shadow" in props: + return bool(re.search("^[^#(]*[1-9]", props["text-shadow"])) + return None + + def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]: + font_names_tmp = re.findall( + r"""(?x) + ( + "(?:[^"]|\\")+" + | + '(?:[^']|\\')+' + | + [^'",]+ + )(?=,|\s*$) + """, + props.get("font-family", ""), + ) + + font_names = [] + for name in font_names_tmp: + if name[:1] == '"': + name = name[1:-1].replace('\\"', '"') + elif name[:1] == "'": + name = name[1:-1].replace("\\'", "'") + else: + name = name.strip() + if name: + font_names.append(name) + return font_names + + def _get_font_size(self, props: Mapping[str, str]) -> float | None: + size = props.get("font-size") + if size is None: + return size + return self._pt_to_float(size) + + def _select_font_family(self, font_names: Sequence[str]) -> int | None: + family = None + for name in font_names: + family = self.FAMILY_MAP.get(name) + if family: + break + + return family + + def color_to_excel(self, val: str | None) -> str | None: + if val is None: + return None + + if self._is_hex_color(val): + return self._convert_hex_to_excel(val) + + try: + return self.NAMED_COLORS[val] + except KeyError: + warnings.warn( + f"Unhandled color format: {repr(val)}", + CSSWarning, + stacklevel=find_stack_level(), + ) + return None + + def _is_hex_color(self, color_string: str) -> bool: + return bool(color_string.startswith("#")) + + def _convert_hex_to_excel(self, color_string: str) -> str: + code = color_string.lstrip("#") + if self._is_shorthand_color(color_string): + return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper() + else: + return code.upper() + + def _is_shorthand_color(self, color_string: str) -> bool: + """Check if color code is shorthand. + + #FFF is a shorthand as opposed to full #FFFFFF. + """ + code = color_string.lstrip("#") + if len(code) == 3: + return True + elif len(code) == 6: + return False + else: + raise ValueError(f"Unexpected color {color_string}") + + +class ExcelFormatter: + """ + Class for formatting a DataFrame to a list of ExcelCells, + + Parameters + ---------- + df : DataFrame or Styler + na_rep: na representation + float_format : str, default None + Format string for floating point numbers + cols : sequence, optional + Columns to write + header : bool or sequence of str, default True + Write out column names. If a list of string is given it is + assumed to be aliases for the column names + index : bool, default True + output row names (index) + index_label : str or sequence, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the DataFrame uses MultiIndex. + merge_cells : bool, default False + Format MultiIndex and Hierarchical Rows as merged cells. + inf_rep : str, default `'inf'` + representation for np.inf values (which aren't representable in Excel) + A `'-'` sign will be added in front of -inf. + style_converter : callable, optional + This translates Styler styles (CSS) into ExcelWriter styles. + Defaults to ``CSSToExcelConverter()``. + It should have signature css_declarations string -> excel style. + This is only called for body cells. + """ + + max_rows = 2**20 + max_cols = 2**14 + + def __init__( + self, + df, + na_rep: str = "", + float_format: str | None = None, + cols: Sequence[Hashable] | None = None, + header: Sequence[Hashable] | bool = True, + index: bool = True, + index_label: IndexLabel | None = None, + merge_cells: bool = False, + inf_rep: str = "inf", + style_converter: Callable | None = None, + ) -> None: + self.rowcounter = 0 + self.na_rep = na_rep + if not isinstance(df, DataFrame): + self.styler = df + self.styler._compute() # calculate applied styles + df = df.data + if style_converter is None: + style_converter = CSSToExcelConverter() + self.style_converter: Callable | None = style_converter + else: + self.styler = None + self.style_converter = None + self.df = df + if cols is not None: + # all missing, raise + if not len(Index(cols).intersection(df.columns)): + raise KeyError("passes columns are not ALL present dataframe") + + if len(Index(cols).intersection(df.columns)) != len(set(cols)): + # Deprecated in GH#17295, enforced in 1.0.0 + raise KeyError("Not all names specified in 'columns' are found") + + self.df = df.reindex(columns=cols) + + self.columns = self.df.columns + self.float_format = float_format + self.index = index + self.index_label = index_label + self.header = header + self.merge_cells = merge_cells + self.inf_rep = inf_rep + + @property + def header_style(self) -> dict[str, dict[str, str | bool]]: + return { + "font": {"bold": True}, + "borders": { + "top": "thin", + "right": "thin", + "bottom": "thin", + "left": "thin", + }, + "alignment": {"horizontal": "center", "vertical": "top"}, + } + + def _format_value(self, val): + if is_scalar(val) and missing.isna(val): + val = self.na_rep + elif is_float(val): + if missing.isposinf_scalar(val): + val = self.inf_rep + elif missing.isneginf_scalar(val): + val = f"-{self.inf_rep}" + elif self.float_format is not None: + val = float(self.float_format % val) + if getattr(val, "tzinfo", None) is not None: + raise ValueError( + "Excel does not support datetimes with " + "timezones. Please ensure that datetimes " + "are timezone unaware before writing to Excel." + ) + return val + + def _format_header_mi(self) -> Iterable[ExcelCell]: + if self.columns.nlevels > 1: + if not self.index: + raise NotImplementedError( + "Writing to Excel with MultiIndex columns and no " + "index ('index'=False) is not yet implemented." + ) + + if not (self._has_aliases or self.header): + return + + columns = self.columns + level_strs = columns.format( + sparsify=self.merge_cells, adjoin=False, names=False + ) + level_lengths = get_level_lengths(level_strs) + coloffset = 0 + lnum = 0 + + if self.index and isinstance(self.df.index, MultiIndex): + coloffset = len(self.df.index[0]) - 1 + + if self.merge_cells: + # Format multi-index as a merged cells. + for lnum, name in enumerate(columns.names): + yield ExcelCell( + row=lnum, + col=coloffset, + val=name, + style=self.header_style, + ) + + for lnum, (spans, levels, level_codes) in enumerate( + zip(level_lengths, columns.levels, columns.codes) + ): + values = levels.take(level_codes) + for i, span_val in spans.items(): + mergestart, mergeend = None, None + if span_val > 1: + mergestart, mergeend = lnum, coloffset + i + span_val + yield CssExcelCell( + row=lnum, + col=coloffset + i + 1, + val=values[i], + style=self.header_style, + css_styles=getattr(self.styler, "ctx_columns", None), + css_row=lnum, + css_col=i, + css_converter=self.style_converter, + mergestart=mergestart, + mergeend=mergeend, + ) + else: + # Format in legacy format with dots to indicate levels. + for i, values in enumerate(zip(*level_strs)): + v = ".".join(map(pprint_thing, values)) + yield CssExcelCell( + row=lnum, + col=coloffset + i + 1, + val=v, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_columns", None), + css_row=lnum, + css_col=i, + css_converter=self.style_converter, + ) + + self.rowcounter = lnum + + def _format_header_regular(self) -> Iterable[ExcelCell]: + if self._has_aliases or self.header: + coloffset = 0 + + if self.index: + coloffset = 1 + if isinstance(self.df.index, MultiIndex): + coloffset = len(self.df.index.names) + + colnames = self.columns + if self._has_aliases: + self.header = cast(Sequence, self.header) + if len(self.header) != len(self.columns): + raise ValueError( + f"Writing {len(self.columns)} cols " + f"but got {len(self.header)} aliases" + ) + colnames = self.header + + for colindex, colname in enumerate(colnames): + yield CssExcelCell( + row=self.rowcounter, + col=colindex + coloffset, + val=colname, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_columns", None), + css_row=0, + css_col=colindex, + css_converter=self.style_converter, + ) + + def _format_header(self) -> Iterable[ExcelCell]: + gen: Iterable[ExcelCell] + + if isinstance(self.columns, MultiIndex): + gen = self._format_header_mi() + else: + gen = self._format_header_regular() + + gen2: Iterable[ExcelCell] = () + + if self.df.index.names: + row = [x if x is not None else "" for x in self.df.index.names] + [ + "" + ] * len(self.columns) + if functools.reduce(lambda x, y: x and y, (x != "" for x in row)): + gen2 = ( + ExcelCell(self.rowcounter, colindex, val, self.header_style) + for colindex, val in enumerate(row) + ) + self.rowcounter += 1 + return itertools.chain(gen, gen2) + + def _format_body(self) -> Iterable[ExcelCell]: + if isinstance(self.df.index, MultiIndex): + return self._format_hierarchical_rows() + else: + return self._format_regular_rows() + + def _format_regular_rows(self) -> Iterable[ExcelCell]: + if self._has_aliases or self.header: + self.rowcounter += 1 + + # output index and index_label? + if self.index: + # check aliases + # if list only take first as this is not a MultiIndex + if self.index_label and isinstance( + self.index_label, (list, tuple, np.ndarray, Index) + ): + index_label = self.index_label[0] + # if string good to go + elif self.index_label and isinstance(self.index_label, str): + index_label = self.index_label + else: + index_label = self.df.index.names[0] + + if isinstance(self.columns, MultiIndex): + self.rowcounter += 1 + + if index_label and self.header is not False: + yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style) + + # write index_values + index_values = self.df.index + if isinstance(self.df.index, PeriodIndex): + index_values = self.df.index.to_timestamp() + + for idx, idxval in enumerate(index_values): + yield CssExcelCell( + row=self.rowcounter + idx, + col=0, + val=idxval, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_index", None), + css_row=idx, + css_col=0, + css_converter=self.style_converter, + ) + coloffset = 1 + else: + coloffset = 0 + + yield from self._generate_body(coloffset) + + def _format_hierarchical_rows(self) -> Iterable[ExcelCell]: + if self._has_aliases or self.header: + self.rowcounter += 1 + + gcolidx = 0 + + if self.index: + index_labels = self.df.index.names + # check for aliases + if self.index_label and isinstance( + self.index_label, (list, tuple, np.ndarray, Index) + ): + index_labels = self.index_label + + # MultiIndex columns require an extra row + # with index names (blank if None) for + # unambiguous round-trip, unless not merging, + # in which case the names all go on one row Issue #11328 + if isinstance(self.columns, MultiIndex) and self.merge_cells: + self.rowcounter += 1 + + # if index labels are not empty go ahead and dump + if com.any_not_none(*index_labels) and self.header is not False: + for cidx, name in enumerate(index_labels): + yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style) + + if self.merge_cells: + # Format hierarchical rows as merged cells. + level_strs = self.df.index.format( + sparsify=True, adjoin=False, names=False + ) + level_lengths = get_level_lengths(level_strs) + + for spans, levels, level_codes in zip( + level_lengths, self.df.index.levels, self.df.index.codes + ): + values = levels.take( + level_codes, + allow_fill=levels._can_hold_na, + fill_value=levels._na_value, + ) + + for i, span_val in spans.items(): + mergestart, mergeend = None, None + if span_val > 1: + mergestart = self.rowcounter + i + span_val - 1 + mergeend = gcolidx + yield CssExcelCell( + row=self.rowcounter + i, + col=gcolidx, + val=values[i], + style=self.header_style, + css_styles=getattr(self.styler, "ctx_index", None), + css_row=i, + css_col=gcolidx, + css_converter=self.style_converter, + mergestart=mergestart, + mergeend=mergeend, + ) + gcolidx += 1 + + else: + # Format hierarchical rows with non-merged values. + for indexcolvals in zip(*self.df.index): + for idx, indexcolval in enumerate(indexcolvals): + yield CssExcelCell( + row=self.rowcounter + idx, + col=gcolidx, + val=indexcolval, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_index", None), + css_row=idx, + css_col=gcolidx, + css_converter=self.style_converter, + ) + gcolidx += 1 + + yield from self._generate_body(gcolidx) + + @property + def _has_aliases(self) -> bool: + """Whether the aliases for column names are present.""" + return is_list_like(self.header) + + def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]: + # Write the body of the frame data series by series. + for colidx in range(len(self.columns)): + series = self.df.iloc[:, colidx] + for i, val in enumerate(series): + yield CssExcelCell( + row=self.rowcounter + i, + col=colidx + coloffset, + val=val, + style=None, + css_styles=getattr(self.styler, "ctx", None), + css_row=i, + css_col=colidx, + css_converter=self.style_converter, + ) + + def get_formatted_cells(self) -> Iterable[ExcelCell]: + for cell in itertools.chain(self._format_header(), self._format_body()): + cell.val = self._format_value(cell.val) + yield cell + + @doc(storage_options=_shared_docs["storage_options"]) + def write( + self, + writer: FilePath | WriteExcelBuffer | ExcelWriter, + sheet_name: str = "Sheet1", + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + engine: str | None = None, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + writer : path-like, file-like, or ExcelWriter object + File path or existing ExcelWriter + sheet_name : str, default 'Sheet1' + Name of sheet which will contain DataFrame + startrow : + upper left cell row to dump data frame + startcol : + upper left cell column to dump data frame + freeze_panes : tuple of integer (length 2), default None + Specifies the one-based bottommost row and rightmost column that + is to be frozen + engine : string, default None + write engine to use if writer is a path - you can also set this + via the options ``io.excel.xlsx.writer``, + or ``io.excel.xlsm.writer``. + + {storage_options} + + .. versionadded:: 1.2.0 + engine_kwargs: dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + from pandas.io.excel import ExcelWriter + + num_rows, num_cols = self.df.shape + if num_rows > self.max_rows or num_cols > self.max_cols: + raise ValueError( + f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} " + f"Max sheet size is: {self.max_rows}, {self.max_cols}" + ) + + if engine_kwargs is None: + engine_kwargs = {} + + formatted_cells = self.get_formatted_cells() + if isinstance(writer, ExcelWriter): + need_save = False + else: + # error: Cannot instantiate abstract class 'ExcelWriter' with abstract + # attributes 'engine', 'save', 'supported_extensions' and 'write_cells' + writer = ExcelWriter( # type: ignore[abstract] + writer, + engine=engine, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + need_save = True + + try: + writer._write_cells( + formatted_cells, + sheet_name, + startrow=startrow, + startcol=startcol, + freeze_panes=freeze_panes, + ) + finally: + # make sure to close opened file handles + if need_save: + writer.close() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/format.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/format.py new file mode 100644 index 00000000..2297f794 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/format.py @@ -0,0 +1,2241 @@ +""" +Internal module for formatting output data in csv, html, xml, +and latex files. This module also applies to display formatting. +""" +from __future__ import annotations + +from collections.abc import ( + Generator, + Hashable, + Iterable, + Mapping, + Sequence, +) +from contextlib import contextmanager +from csv import ( + QUOTE_NONE, + QUOTE_NONNUMERIC, +) +from decimal import Decimal +from functools import partial +from io import StringIO +import math +import re +from shutil import get_terminal_size +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Final, + cast, +) +from unicodedata import east_asian_width + +import numpy as np + +from pandas._config.config import ( + get_option, + set_option, +) + +from pandas._libs import lib +from pandas._libs.missing import NA +from pandas._libs.tslibs import ( + NaT, + Timedelta, + Timestamp, + get_unit_from_dtype, + iNaT, + periods_per_day, +) +from pandas._libs.tslibs.nattype import NaTType + +from pandas.core.dtypes.common import ( + is_complex_dtype, + is_float, + is_integer, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + TimedeltaArray, +) +from pandas.core.arrays.string_ import StringDtype +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.construction import extract_array +from pandas.core.indexes.api import ( + Index, + MultiIndex, + PeriodIndex, + ensure_index, +) +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.reshape.concat import concat + +from pandas.io.common import ( + check_parent_directory, + stringify_path, +) +from pandas.io.formats import printing + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Axes, + ColspaceArgType, + ColspaceType, + CompressionOptions, + FilePath, + FloatFormatType, + FormattersType, + IndexLabel, + StorageOptions, + WriteBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +common_docstring: Final = """ + Parameters + ---------- + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. + columns : array-like, optional, default None + The subset of columns to write. Writes all columns by default. + col_space : %(col_space_type)s, optional + %(col_space)s. + header : %(header_type)s, optional + %(header)s. + index : bool, optional, default True + Whether to print index (row) labels. + na_rep : str, optional, default 'NaN' + String representation of ``NaN`` to use. + formatters : list, tuple or dict of one-param. functions, optional + Formatter functions to apply to columns' elements by position or + name. + The result of each function must be a unicode string. + List/tuple must be of length equal to the number of columns. + float_format : one-parameter function, optional, default None + Formatter function to apply to columns' elements if they are + floats. This function must return a unicode string and will be + applied only to the non-``NaN`` elements, with ``NaN`` being + handled by ``na_rep``. + + .. versionchanged:: 1.2.0 + + sparsify : bool, optional, default True + Set to False for a DataFrame with a hierarchical index to print + every multiindex key at each row. + index_names : bool, optional, default True + Prints the names of the indexes. + justify : str, default None + How to justify the column labels. If None uses the option from + the print configuration (controlled by set_option), 'right' out + of the box. Valid values are + + * left + * right + * center + * justify + * justify-all + * start + * end + * inherit + * match-parent + * initial + * unset. + max_rows : int, optional + Maximum number of rows to display in the console. + max_cols : int, optional + Maximum number of columns to display in the console. + show_dimensions : bool, default False + Display DataFrame dimensions (number of rows by number of columns). + decimal : str, default '.' + Character recognized as decimal separator, e.g. ',' in Europe. + """ + +_VALID_JUSTIFY_PARAMETERS = ( + "left", + "right", + "center", + "justify", + "justify-all", + "start", + "end", + "inherit", + "match-parent", + "initial", + "unset", +) + +return_docstring: Final = """ + Returns + ------- + str or None + If buf is None, returns the result as a string. Otherwise returns + None. + """ + + +class CategoricalFormatter: + def __init__( + self, + categorical: Categorical, + buf: IO[str] | None = None, + length: bool = True, + na_rep: str = "NaN", + footer: bool = True, + ) -> None: + self.categorical = categorical + self.buf = buf if buf is not None else StringIO("") + self.na_rep = na_rep + self.length = length + self.footer = footer + self.quoting = QUOTE_NONNUMERIC + + def _get_footer(self) -> str: + footer = "" + + if self.length: + if footer: + footer += ", " + footer += f"Length: {len(self.categorical)}" + + level_info = self.categorical._repr_categories_info() + + # Levels are added in a newline + if footer: + footer += "\n" + footer += level_info + + return str(footer) + + def _get_formatted_values(self) -> list[str]: + return format_array( + self.categorical._internal_get_values(), + None, + float_format=None, + na_rep=self.na_rep, + quoting=self.quoting, + ) + + def to_string(self) -> str: + categorical = self.categorical + + if len(categorical) == 0: + if self.footer: + return self._get_footer() + else: + return "" + + fmt_values = self._get_formatted_values() + + fmt_values = [i.strip() for i in fmt_values] + values = ", ".join(fmt_values) + result = ["[" + values + "]"] + if self.footer: + footer = self._get_footer() + if footer: + result.append(footer) + + return str("\n".join(result)) + + +class SeriesFormatter: + def __init__( + self, + series: Series, + buf: IO[str] | None = None, + length: bool | str = True, + header: bool = True, + index: bool = True, + na_rep: str = "NaN", + name: bool = False, + float_format: str | None = None, + dtype: bool = True, + max_rows: int | None = None, + min_rows: int | None = None, + ) -> None: + self.series = series + self.buf = buf if buf is not None else StringIO() + self.name = name + self.na_rep = na_rep + self.header = header + self.length = length + self.index = index + self.max_rows = max_rows + self.min_rows = min_rows + + if float_format is None: + float_format = get_option("display.float_format") + self.float_format = float_format + self.dtype = dtype + self.adj = get_adjustment() + + self._chk_truncate() + + def _chk_truncate(self) -> None: + self.tr_row_num: int | None + + min_rows = self.min_rows + max_rows = self.max_rows + # truncation determined by max_rows, actual truncated number of rows + # used below by min_rows + is_truncated_vertically = max_rows and (len(self.series) > max_rows) + series = self.series + if is_truncated_vertically: + max_rows = cast(int, max_rows) + if min_rows: + # if min_rows is set (not None or 0), set max_rows to minimum + # of both + max_rows = min(min_rows, max_rows) + if max_rows == 1: + row_num = max_rows + series = series.iloc[:max_rows] + else: + row_num = max_rows // 2 + series = concat((series.iloc[:row_num], series.iloc[-row_num:])) + self.tr_row_num = row_num + else: + self.tr_row_num = None + self.tr_series = series + self.is_truncated_vertically = is_truncated_vertically + + def _get_footer(self) -> str: + name = self.series.name + footer = "" + + if getattr(self.series.index, "freq", None) is not None: + assert isinstance( + self.series.index, (DatetimeIndex, PeriodIndex, TimedeltaIndex) + ) + footer += f"Freq: {self.series.index.freqstr}" + + if self.name is not False and name is not None: + if footer: + footer += ", " + + series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n")) + footer += f"Name: {series_name}" + + if self.length is True or ( + self.length == "truncate" and self.is_truncated_vertically + ): + if footer: + footer += ", " + footer += f"Length: {len(self.series)}" + + if self.dtype is not False and self.dtype is not None: + dtype_name = getattr(self.tr_series.dtype, "name", None) + if dtype_name: + if footer: + footer += ", " + footer += f"dtype: {printing.pprint_thing(dtype_name)}" + + # level infos are added to the end and in a new line, like it is done + # for Categoricals + if isinstance(self.tr_series.dtype, CategoricalDtype): + level_info = self.tr_series._values._repr_categories_info() + if footer: + footer += "\n" + footer += level_info + + return str(footer) + + def _get_formatted_index(self) -> tuple[list[str], bool]: + index = self.tr_series.index + + if isinstance(index, MultiIndex): + have_header = any(name for name in index.names) + fmt_index = index.format(names=True) + else: + have_header = index.name is not None + fmt_index = index.format(name=True) + return fmt_index, have_header + + def _get_formatted_values(self) -> list[str]: + return format_array( + self.tr_series._values, + None, + float_format=self.float_format, + na_rep=self.na_rep, + leading_space=self.index, + ) + + def to_string(self) -> str: + series = self.tr_series + footer = self._get_footer() + + if len(series) == 0: + return f"{type(self.series).__name__}([], {footer})" + + fmt_index, have_header = self._get_formatted_index() + fmt_values = self._get_formatted_values() + + if self.is_truncated_vertically: + n_header_rows = 0 + row_num = self.tr_row_num + row_num = cast(int, row_num) + width = self.adj.len(fmt_values[row_num - 1]) + if width > 3: + dot_str = "..." + else: + dot_str = ".." + # Series uses mode=center because it has single value columns + # DataFrame uses mode=left + dot_str = self.adj.justify([dot_str], width, mode="center")[0] + fmt_values.insert(row_num + n_header_rows, dot_str) + fmt_index.insert(row_num + 1, "") + + if self.index: + result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values]) + else: + result = self.adj.adjoin(3, fmt_values) + + if self.header and have_header: + result = fmt_index[0] + "\n" + result + + if footer: + result += "\n" + footer + + return str("".join(result)) + + +class TextAdjustment: + def __init__(self) -> None: + self.encoding = get_option("display.encoding") + + def len(self, text: str) -> int: + return len(text) + + def justify(self, texts: Any, max_len: int, mode: str = "right") -> list[str]: + return printing.justify(texts, max_len, mode=mode) + + def adjoin(self, space: int, *lists, **kwargs) -> str: + return printing.adjoin( + space, *lists, strlen=self.len, justfunc=self.justify, **kwargs + ) + + +class EastAsianTextAdjustment(TextAdjustment): + def __init__(self) -> None: + super().__init__() + if get_option("display.unicode.ambiguous_as_wide"): + self.ambiguous_width = 2 + else: + self.ambiguous_width = 1 + + # Definition of East Asian Width + # https://unicode.org/reports/tr11/ + # Ambiguous width can be changed by option + self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1} + + def len(self, text: str) -> int: + """ + Calculate display width considering unicode East Asian Width + """ + if not isinstance(text, str): + return len(text) + + return sum( + self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text + ) + + def justify( + self, texts: Iterable[str], max_len: int, mode: str = "right" + ) -> list[str]: + # re-calculate padding space per str considering East Asian Width + def _get_pad(t): + return max_len - self.len(t) + len(t) + + if mode == "left": + return [x.ljust(_get_pad(x)) for x in texts] + elif mode == "center": + return [x.center(_get_pad(x)) for x in texts] + else: + return [x.rjust(_get_pad(x)) for x in texts] + + +def get_adjustment() -> TextAdjustment: + use_east_asian_width = get_option("display.unicode.east_asian_width") + if use_east_asian_width: + return EastAsianTextAdjustment() + else: + return TextAdjustment() + + +def get_dataframe_repr_params() -> dict[str, Any]: + """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string. + + Supplying these parameters to DataFrame.to_string is equivalent to calling + ``repr(DataFrame)``. This is useful if you want to adjust the repr output. + + .. versionadded:: 1.4.0 + + Example + ------- + >>> import pandas as pd + >>> + >>> df = pd.DataFrame([[1, 2], [3, 4]]) + >>> repr_params = pd.io.formats.format.get_dataframe_repr_params() + >>> repr(df) == df.to_string(**repr_params) + True + """ + from pandas.io.formats import console + + if get_option("display.expand_frame_repr"): + line_width, _ = console.get_console_size() + else: + line_width = None + return { + "max_rows": get_option("display.max_rows"), + "min_rows": get_option("display.min_rows"), + "max_cols": get_option("display.max_columns"), + "max_colwidth": get_option("display.max_colwidth"), + "show_dimensions": get_option("display.show_dimensions"), + "line_width": line_width, + } + + +def get_series_repr_params() -> dict[str, Any]: + """Get the parameters used to repr(Series) calls using Series.to_string. + + Supplying these parameters to Series.to_string is equivalent to calling + ``repr(series)``. This is useful if you want to adjust the series repr output. + + .. versionadded:: 1.4.0 + + Example + ------- + >>> import pandas as pd + >>> + >>> ser = pd.Series([1, 2, 3, 4]) + >>> repr_params = pd.io.formats.format.get_series_repr_params() + >>> repr(ser) == ser.to_string(**repr_params) + True + """ + width, height = get_terminal_size() + max_rows = ( + height + if get_option("display.max_rows") == 0 + else get_option("display.max_rows") + ) + min_rows = ( + height + if get_option("display.max_rows") == 0 + else get_option("display.min_rows") + ) + + return { + "name": True, + "dtype": True, + "min_rows": min_rows, + "max_rows": max_rows, + "length": get_option("display.show_dimensions"), + } + + +class DataFrameFormatter: + """Class for processing dataframe formatting options and data.""" + + __doc__ = __doc__ if __doc__ else "" + __doc__ += common_docstring + return_docstring + + def __init__( + self, + frame: DataFrame, + columns: Axes | None = None, + col_space: ColspaceArgType | None = None, + header: bool | list[str] = True, + index: bool = True, + na_rep: str = "NaN", + formatters: FormattersType | None = None, + justify: str | None = None, + float_format: FloatFormatType | None = None, + sparsify: bool | None = None, + index_names: bool = True, + max_rows: int | None = None, + min_rows: int | None = None, + max_cols: int | None = None, + show_dimensions: bool | str = False, + decimal: str = ".", + bold_rows: bool = False, + escape: bool = True, + ) -> None: + self.frame = frame + self.columns = self._initialize_columns(columns) + self.col_space = self._initialize_colspace(col_space) + self.header = header + self.index = index + self.na_rep = na_rep + self.formatters = self._initialize_formatters(formatters) + self.justify = self._initialize_justify(justify) + self.float_format = float_format + self.sparsify = self._initialize_sparsify(sparsify) + self.show_index_names = index_names + self.decimal = decimal + self.bold_rows = bold_rows + self.escape = escape + self.max_rows = max_rows + self.min_rows = min_rows + self.max_cols = max_cols + self.show_dimensions = show_dimensions + + self.max_cols_fitted = self._calc_max_cols_fitted() + self.max_rows_fitted = self._calc_max_rows_fitted() + + self.tr_frame = self.frame + self.truncate() + self.adj = get_adjustment() + + def get_strcols(self) -> list[list[str]]: + """ + Render a DataFrame to a list of columns (as lists of strings). + """ + strcols = self._get_strcols_without_index() + + if self.index: + str_index = self._get_formatted_index(self.tr_frame) + strcols.insert(0, str_index) + + return strcols + + @property + def should_show_dimensions(self) -> bool: + return self.show_dimensions is True or ( + self.show_dimensions == "truncate" and self.is_truncated + ) + + @property + def is_truncated(self) -> bool: + return bool(self.is_truncated_horizontally or self.is_truncated_vertically) + + @property + def is_truncated_horizontally(self) -> bool: + return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted)) + + @property + def is_truncated_vertically(self) -> bool: + return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted)) + + @property + def dimensions_info(self) -> str: + return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]" + + @property + def has_index_names(self) -> bool: + return _has_names(self.frame.index) + + @property + def has_column_names(self) -> bool: + return _has_names(self.frame.columns) + + @property + def show_row_idx_names(self) -> bool: + return all((self.has_index_names, self.index, self.show_index_names)) + + @property + def show_col_idx_names(self) -> bool: + return all((self.has_column_names, self.show_index_names, self.header)) + + @property + def max_rows_displayed(self) -> int: + return min(self.max_rows or len(self.frame), len(self.frame)) + + def _initialize_sparsify(self, sparsify: bool | None) -> bool: + if sparsify is None: + return get_option("display.multi_sparse") + return sparsify + + def _initialize_formatters( + self, formatters: FormattersType | None + ) -> FormattersType: + if formatters is None: + return {} + elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict): + return formatters + else: + raise ValueError( + f"Formatters length({len(formatters)}) should match " + f"DataFrame number of columns({len(self.frame.columns)})" + ) + + def _initialize_justify(self, justify: str | None) -> str: + if justify is None: + return get_option("display.colheader_justify") + else: + return justify + + def _initialize_columns(self, columns: Axes | None) -> Index: + if columns is not None: + cols = ensure_index(columns) + self.frame = self.frame[cols] + return cols + else: + return self.frame.columns + + def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType: + result: ColspaceType + + if col_space is None: + result = {} + elif isinstance(col_space, (int, str)): + result = {"": col_space} + result.update({column: col_space for column in self.frame.columns}) + elif isinstance(col_space, Mapping): + for column in col_space.keys(): + if column not in self.frame.columns and column != "": + raise ValueError( + f"Col_space is defined for an unknown column: {column}" + ) + result = col_space + else: + if len(self.frame.columns) != len(col_space): + raise ValueError( + f"Col_space length({len(col_space)}) should match " + f"DataFrame number of columns({len(self.frame.columns)})" + ) + result = dict(zip(self.frame.columns, col_space)) + return result + + def _calc_max_cols_fitted(self) -> int | None: + """Number of columns fitting the screen.""" + if not self._is_in_terminal(): + return self.max_cols + + width, _ = get_terminal_size() + if self._is_screen_narrow(width): + return width + else: + return self.max_cols + + def _calc_max_rows_fitted(self) -> int | None: + """Number of rows with data fitting the screen.""" + max_rows: int | None + + if self._is_in_terminal(): + _, height = get_terminal_size() + if self.max_rows == 0: + # rows available to fill with actual data + return height - self._get_number_of_auxiliary_rows() + + if self._is_screen_short(height): + max_rows = height + else: + max_rows = self.max_rows + else: + max_rows = self.max_rows + + return self._adjust_max_rows(max_rows) + + def _adjust_max_rows(self, max_rows: int | None) -> int | None: + """Adjust max_rows using display logic. + + See description here: + https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options + + GH #37359 + """ + if max_rows: + if (len(self.frame) > max_rows) and self.min_rows: + # if truncated, set max_rows showed to min_rows + max_rows = min(self.min_rows, max_rows) + return max_rows + + def _is_in_terminal(self) -> bool: + """Check if the output is to be shown in terminal.""" + return bool(self.max_cols == 0 or self.max_rows == 0) + + def _is_screen_narrow(self, max_width) -> bool: + return bool(self.max_cols == 0 and len(self.frame.columns) > max_width) + + def _is_screen_short(self, max_height) -> bool: + return bool(self.max_rows == 0 and len(self.frame) > max_height) + + def _get_number_of_auxiliary_rows(self) -> int: + """Get number of rows occupied by prompt, dots and dimension info.""" + dot_row = 1 + prompt_row = 1 + num_rows = dot_row + prompt_row + + if self.show_dimensions: + num_rows += len(self.dimensions_info.splitlines()) + + if self.header: + num_rows += 1 + + return num_rows + + def truncate(self) -> None: + """ + Check whether the frame should be truncated. If so, slice the frame up. + """ + if self.is_truncated_horizontally: + self._truncate_horizontally() + + if self.is_truncated_vertically: + self._truncate_vertically() + + def _truncate_horizontally(self) -> None: + """Remove columns, which are not to be displayed and adjust formatters. + + Attributes affected: + - tr_frame + - formatters + - tr_col_num + """ + assert self.max_cols_fitted is not None + col_num = self.max_cols_fitted // 2 + if col_num >= 1: + left = self.tr_frame.iloc[:, :col_num] + right = self.tr_frame.iloc[:, -col_num:] + self.tr_frame = concat((left, right), axis=1) + + # truncate formatter + if isinstance(self.formatters, (list, tuple)): + self.formatters = [ + *self.formatters[:col_num], + *self.formatters[-col_num:], + ] + else: + col_num = cast(int, self.max_cols) + self.tr_frame = self.tr_frame.iloc[:, :col_num] + self.tr_col_num = col_num + + def _truncate_vertically(self) -> None: + """Remove rows, which are not to be displayed. + + Attributes affected: + - tr_frame + - tr_row_num + """ + assert self.max_rows_fitted is not None + row_num = self.max_rows_fitted // 2 + if row_num >= 1: + head = self.tr_frame.iloc[:row_num, :] + tail = self.tr_frame.iloc[-row_num:, :] + self.tr_frame = concat((head, tail)) + else: + row_num = cast(int, self.max_rows) + self.tr_frame = self.tr_frame.iloc[:row_num, :] + self.tr_row_num = row_num + + def _get_strcols_without_index(self) -> list[list[str]]: + strcols: list[list[str]] = [] + + if not is_list_like(self.header) and not self.header: + for i, c in enumerate(self.tr_frame): + fmt_values = self.format_col(i) + fmt_values = _make_fixed_width( + strings=fmt_values, + justify=self.justify, + minimum=int(self.col_space.get(c, 0)), + adj=self.adj, + ) + strcols.append(fmt_values) + return strcols + + if is_list_like(self.header): + # cast here since can't be bool if is_list_like + self.header = cast(list[str], self.header) + if len(self.header) != len(self.columns): + raise ValueError( + f"Writing {len(self.columns)} cols " + f"but got {len(self.header)} aliases" + ) + str_columns = [[label] for label in self.header] + else: + str_columns = self._get_formatted_column_labels(self.tr_frame) + + if self.show_row_idx_names: + for x in str_columns: + x.append("") + + for i, c in enumerate(self.tr_frame): + cheader = str_columns[i] + header_colwidth = max( + int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader) + ) + fmt_values = self.format_col(i) + fmt_values = _make_fixed_width( + fmt_values, self.justify, minimum=header_colwidth, adj=self.adj + ) + + max_len = max(*(self.adj.len(x) for x in fmt_values), header_colwidth) + cheader = self.adj.justify(cheader, max_len, mode=self.justify) + strcols.append(cheader + fmt_values) + + return strcols + + def format_col(self, i: int) -> list[str]: + frame = self.tr_frame + formatter = self._get_formatter(i) + return format_array( + frame.iloc[:, i]._values, + formatter, + float_format=self.float_format, + na_rep=self.na_rep, + space=self.col_space.get(frame.columns[i]), + decimal=self.decimal, + leading_space=self.index, + ) + + def _get_formatter(self, i: str | int) -> Callable | None: + if isinstance(self.formatters, (list, tuple)): + if is_integer(i): + i = cast(int, i) + return self.formatters[i] + else: + return None + else: + if is_integer(i) and i not in self.columns: + i = self.columns[i] + return self.formatters.get(i, None) + + def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]: + from pandas.core.indexes.multi import sparsify_labels + + columns = frame.columns + + if isinstance(columns, MultiIndex): + fmt_columns = columns.format(sparsify=False, adjoin=False) + fmt_columns = list(zip(*fmt_columns)) + dtypes = self.frame.dtypes._values + + # if we have a Float level, they don't use leading space at all + restrict_formatting = any(level.is_floating for level in columns.levels) + need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes))) + + def space_format(x, y): + if ( + y not in self.formatters + and need_leadsp[x] + and not restrict_formatting + ): + return " " + y + return y + + str_columns = list( + zip(*([space_format(x, y) for y in x] for x in fmt_columns)) + ) + if self.sparsify and len(str_columns): + str_columns = sparsify_labels(str_columns) + + str_columns = [list(x) for x in zip(*str_columns)] + else: + fmt_columns = columns.format() + dtypes = self.frame.dtypes + need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes))) + str_columns = [ + [" " + x if not self._get_formatter(i) and need_leadsp[x] else x] + for i, x in enumerate(fmt_columns) + ] + # self.str_columns = str_columns + return str_columns + + def _get_formatted_index(self, frame: DataFrame) -> list[str]: + # Note: this is only used by to_string() and to_latex(), not by + # to_html(). so safe to cast col_space here. + col_space = {k: cast(int, v) for k, v in self.col_space.items()} + index = frame.index + columns = frame.columns + fmt = self._get_formatter("__index__") + + if isinstance(index, MultiIndex): + fmt_index = index.format( + sparsify=self.sparsify, + adjoin=False, + names=self.show_row_idx_names, + formatter=fmt, + ) + else: + fmt_index = [index.format(name=self.show_row_idx_names, formatter=fmt)] + + fmt_index = [ + tuple( + _make_fixed_width( + list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj + ) + ) + for x in fmt_index + ] + + adjoined = self.adj.adjoin(1, *fmt_index).split("\n") + + # empty space for columns + if self.show_col_idx_names: + col_header = [str(x) for x in self._get_column_name_list()] + else: + col_header = [""] * columns.nlevels + + if self.header: + return col_header + adjoined + else: + return adjoined + + def _get_column_name_list(self) -> list[Hashable]: + names: list[Hashable] = [] + columns = self.frame.columns + if isinstance(columns, MultiIndex): + names.extend("" if name is None else name for name in columns.names) + else: + names.append("" if columns.name is None else columns.name) + return names + + +class DataFrameRenderer: + """Class for creating dataframe output in multiple formats. + + Called in pandas.core.generic.NDFrame: + - to_csv + - to_latex + + Called in pandas.core.frame.DataFrame: + - to_html + - to_string + + Parameters + ---------- + fmt : DataFrameFormatter + Formatter with the formatting options. + """ + + def __init__(self, fmt: DataFrameFormatter) -> None: + self.fmt = fmt + + def to_html( + self, + buf: FilePath | WriteBuffer[str] | None = None, + encoding: str | None = None, + classes: str | list | tuple | None = None, + notebook: bool = False, + border: int | bool | None = None, + table_id: str | None = None, + render_links: bool = False, + ) -> str | None: + """ + Render a DataFrame to a html table. + + Parameters + ---------- + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. + encoding : str, default “utf-8” + Set character encoding. + classes : str or list-like + classes to include in the `class` attribute of the opening + ```` tag, in addition to the default "dataframe". + notebook : {True, False}, optional, default False + Whether the generated HTML is for IPython Notebook. + border : int + A ``border=border`` attribute is included in the opening + ``
`` tag. Default ``pd.options.display.html.border``. + table_id : str, optional + A css id is included in the opening `
` tag if specified. + render_links : bool, default False + Convert URLs to HTML links. + """ + from pandas.io.formats.html import ( + HTMLFormatter, + NotebookFormatter, + ) + + Klass = NotebookFormatter if notebook else HTMLFormatter + + html_formatter = Klass( + self.fmt, + classes=classes, + border=border, + table_id=table_id, + render_links=render_links, + ) + string = html_formatter.to_string() + return save_to_buffer(string, buf=buf, encoding=encoding) + + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + encoding: str | None = None, + line_width: int | None = None, + ) -> str | None: + """ + Render a DataFrame to a console-friendly tabular output. + + Parameters + ---------- + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. + encoding: str, default “utf-8” + Set character encoding. + line_width : int, optional + Width to wrap a line in characters. + """ + from pandas.io.formats.string import StringFormatter + + string_formatter = StringFormatter(self.fmt, line_width=line_width) + string = string_formatter.to_string() + return save_to_buffer(string, buf=buf, encoding=encoding) + + def to_csv( + self, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + encoding: str | None = None, + sep: str = ",", + columns: Sequence[Hashable] | None = None, + index_label: IndexLabel | None = None, + mode: str = "w", + compression: CompressionOptions = "infer", + quoting: int | None = None, + quotechar: str = '"', + lineterminator: str | None = None, + chunksize: int | None = None, + date_format: str | None = None, + doublequote: bool = True, + escapechar: str | None = None, + errors: str = "strict", + storage_options: StorageOptions | None = None, + ) -> str | None: + """ + Render dataframe as comma-separated file. + """ + from pandas.io.formats.csvs import CSVFormatter + + if path_or_buf is None: + created_buffer = True + path_or_buf = StringIO() + else: + created_buffer = False + + csv_formatter = CSVFormatter( + path_or_buf=path_or_buf, + lineterminator=lineterminator, + sep=sep, + encoding=encoding, + errors=errors, + compression=compression, + quoting=quoting, + cols=columns, + index_label=index_label, + mode=mode, + chunksize=chunksize, + quotechar=quotechar, + date_format=date_format, + doublequote=doublequote, + escapechar=escapechar, + storage_options=storage_options, + formatter=self.fmt, + ) + csv_formatter.save() + + if created_buffer: + assert isinstance(path_or_buf, StringIO) + content = path_or_buf.getvalue() + path_or_buf.close() + return content + + return None + + +def save_to_buffer( + string: str, + buf: FilePath | WriteBuffer[str] | None = None, + encoding: str | None = None, +) -> str | None: + """ + Perform serialization. Write to buf or return as string if buf is None. + """ + with get_buffer(buf, encoding=encoding) as f: + f.write(string) + if buf is None: + # error: "WriteBuffer[str]" has no attribute "getvalue" + return f.getvalue() # type: ignore[attr-defined] + return None + + +@contextmanager +def get_buffer( + buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None +) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]: + """ + Context manager to open, yield and close buffer for filenames or Path-like + objects, otherwise yield buf unchanged. + """ + if buf is not None: + buf = stringify_path(buf) + else: + buf = StringIO() + + if encoding is None: + encoding = "utf-8" + elif not isinstance(buf, str): + raise ValueError("buf is not a file name and encoding is specified.") + + if hasattr(buf, "write"): + # Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str], + # StringIO]", expected type "Union[WriteBuffer[str], StringIO]") + yield buf # type: ignore[misc] + elif isinstance(buf, str): + check_parent_directory(str(buf)) + with open(buf, "w", encoding=encoding, newline="") as f: + # GH#30034 open instead of codecs.open prevents a file leak + # if we have an invalid encoding argument. + # newline="" is needed to roundtrip correctly on + # windows test_to_latex_filename + yield f + else: + raise TypeError("buf is not a file name and it has no write method") + + +# ---------------------------------------------------------------------- +# Array formatters + + +def format_array( + values: Any, + formatter: Callable | None, + float_format: FloatFormatType | None = None, + na_rep: str = "NaN", + digits: int | None = None, + space: str | int | None = None, + justify: str = "right", + decimal: str = ".", + leading_space: bool | None = True, + quoting: int | None = None, + fallback_formatter: Callable | None = None, +) -> list[str]: + """ + Format an array for printing. + + Parameters + ---------- + values + formatter + float_format + na_rep + digits + space + justify + decimal + leading_space : bool, optional, default True + Whether the array should be formatted with a leading space. + When an array as a column of a Series or DataFrame, we do want + the leading space to pad between columns. + + When formatting an Index subclass + (e.g. IntervalIndex._format_native_types), we don't want the + leading space since it should be left-aligned. + fallback_formatter + + Returns + ------- + List[str] + """ + fmt_klass: type[GenericArrayFormatter] + if lib.is_np_dtype(values.dtype, "M"): + fmt_klass = Datetime64Formatter + elif isinstance(values.dtype, DatetimeTZDtype): + fmt_klass = Datetime64TZFormatter + elif lib.is_np_dtype(values.dtype, "m"): + fmt_klass = Timedelta64Formatter + elif isinstance(values.dtype, ExtensionDtype): + fmt_klass = ExtensionArrayFormatter + elif lib.is_np_dtype(values.dtype, "fc"): + fmt_klass = FloatArrayFormatter + elif lib.is_np_dtype(values.dtype, "iu"): + fmt_klass = IntArrayFormatter + else: + fmt_klass = GenericArrayFormatter + + if space is None: + space = 12 + + if float_format is None: + float_format = get_option("display.float_format") + + if digits is None: + digits = get_option("display.precision") + + fmt_obj = fmt_klass( + values, + digits=digits, + na_rep=na_rep, + float_format=float_format, + formatter=formatter, + space=space, + justify=justify, + decimal=decimal, + leading_space=leading_space, + quoting=quoting, + fallback_formatter=fallback_formatter, + ) + + return fmt_obj.get_result() + + +class GenericArrayFormatter: + def __init__( + self, + values: Any, + digits: int = 7, + formatter: Callable | None = None, + na_rep: str = "NaN", + space: str | int = 12, + float_format: FloatFormatType | None = None, + justify: str = "right", + decimal: str = ".", + quoting: int | None = None, + fixed_width: bool = True, + leading_space: bool | None = True, + fallback_formatter: Callable | None = None, + ) -> None: + self.values = values + self.digits = digits + self.na_rep = na_rep + self.space = space + self.formatter = formatter + self.float_format = float_format + self.justify = justify + self.decimal = decimal + self.quoting = quoting + self.fixed_width = fixed_width + self.leading_space = leading_space + self.fallback_formatter = fallback_formatter + + def get_result(self) -> list[str]: + fmt_values = self._format_strings() + return _make_fixed_width(fmt_values, self.justify) + + def _format_strings(self) -> list[str]: + if self.float_format is None: + float_format = get_option("display.float_format") + if float_format is None: + precision = get_option("display.precision") + float_format = lambda x: _trim_zeros_single_float( + f"{x: .{precision:d}f}" + ) + else: + float_format = self.float_format + + if self.formatter is not None: + formatter = self.formatter + elif self.fallback_formatter is not None: + formatter = self.fallback_formatter + else: + quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE + formatter = partial( + printing.pprint_thing, + escape_chars=("\t", "\r", "\n"), + quote_strings=quote_strings, + ) + + def _format(x): + if self.na_rep is not None and is_scalar(x) and isna(x): + try: + # try block for np.isnat specifically + # determine na_rep if x is None or NaT-like + if x is None: + return "None" + elif x is NA: + return str(NA) + elif x is NaT or np.isnat(x): + return "NaT" + except (TypeError, ValueError): + # np.isnat only handles datetime or timedelta objects + pass + return self.na_rep + elif isinstance(x, PandasObject): + return str(x) + elif isinstance(x, StringDtype): + return repr(x) + else: + # object dtype + return str(formatter(x)) + + vals = extract_array(self.values, extract_numpy=True) + if not isinstance(vals, np.ndarray): + raise TypeError( + "ExtensionArray formatting should use ExtensionArrayFormatter" + ) + inferred = lib.map_infer(vals, is_float) + is_float_type = ( + inferred + # vals may have 2 or more dimensions + & np.all(notna(vals), axis=tuple(range(1, len(vals.shape)))) + ) + leading_space = self.leading_space + if leading_space is None: + leading_space = is_float_type.any() + + fmt_values = [] + for i, v in enumerate(vals): + if (not is_float_type[i] or self.formatter is not None) and leading_space: + fmt_values.append(f" {_format(v)}") + elif is_float_type[i]: + fmt_values.append(float_format(v)) + else: + if leading_space is False: + # False specifically, so that the default is + # to include a space if we get here. + tpl = "{v}" + else: + tpl = " {v}" + fmt_values.append(tpl.format(v=_format(v))) + + return fmt_values + + +class FloatArrayFormatter(GenericArrayFormatter): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + # float_format is expected to be a string + # formatter should be used to pass a function + if self.float_format is not None and self.formatter is None: + # GH21625, GH22270 + self.fixed_width = False + if callable(self.float_format): + self.formatter = self.float_format + self.float_format = None + + def _value_formatter( + self, + float_format: FloatFormatType | None = None, + threshold: float | None = None, + ) -> Callable: + """Returns a function to be applied on each value to format it""" + # the float_format parameter supersedes self.float_format + if float_format is None: + float_format = self.float_format + + # we are going to compose different functions, to first convert to + # a string, then replace the decimal symbol, and finally chop according + # to the threshold + + # when there is no float_format, we use str instead of '%g' + # because str(0.0) = '0.0' while '%g' % 0.0 = '0' + if float_format: + + def base_formatter(v): + assert float_format is not None # for mypy + # error: "str" not callable + # error: Unexpected keyword argument "value" for "__call__" of + # "EngFormatter" + return ( + float_format(value=v) # type: ignore[operator,call-arg] + if notna(v) + else self.na_rep + ) + + else: + + def base_formatter(v): + return str(v) if notna(v) else self.na_rep + + if self.decimal != ".": + + def decimal_formatter(v): + return base_formatter(v).replace(".", self.decimal, 1) + + else: + decimal_formatter = base_formatter + + if threshold is None: + return decimal_formatter + + def formatter(value): + if notna(value): + if abs(value) > threshold: + return decimal_formatter(value) + else: + return decimal_formatter(0.0) + else: + return self.na_rep + + return formatter + + def get_result_as_array(self) -> np.ndarray: + """ + Returns the float values converted into strings using + the parameters given at initialisation, as a numpy array + """ + + def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str): + mask = isna(values) + formatted = np.array( + [ + formatter(val) if not m else na_rep + for val, m in zip(values.ravel(), mask.ravel()) + ] + ).reshape(values.shape) + return formatted + + def format_complex_with_na_rep( + values: ArrayLike, formatter: Callable, na_rep: str + ): + real_values = np.real(values).ravel() # type: ignore[arg-type] + imag_values = np.imag(values).ravel() # type: ignore[arg-type] + real_mask, imag_mask = isna(real_values), isna(imag_values) + formatted_lst = [] + for val, real_val, imag_val, re_isna, im_isna in zip( + values.ravel(), + real_values, + imag_values, + real_mask, + imag_mask, + ): + if not re_isna and not im_isna: + formatted_lst.append(formatter(val)) + elif not re_isna: # xxx+nanj + formatted_lst.append(f"{formatter(real_val)}+{na_rep}j") + elif not im_isna: # nan[+/-]xxxj + # The imaginary part may either start with a "-" or a space + imag_formatted = formatter(imag_val).strip() + if imag_formatted.startswith("-"): + formatted_lst.append(f"{na_rep}{imag_formatted}j") + else: + formatted_lst.append(f"{na_rep}+{imag_formatted}j") + else: # nan+nanj + formatted_lst.append(f"{na_rep}+{na_rep}j") + return np.array(formatted_lst).reshape(values.shape) + + if self.formatter is not None: + return format_with_na_rep(self.values, self.formatter, self.na_rep) + + if self.fixed_width: + threshold = get_option("display.chop_threshold") + else: + threshold = None + + # if we have a fixed_width, we'll need to try different float_format + def format_values_with(float_format): + formatter = self._value_formatter(float_format, threshold) + + # default formatter leaves a space to the left when formatting + # floats, must be consistent for left-justifying NaNs (GH #25061) + na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep + + # different formatting strategies for complex and non-complex data + # need to distinguish complex and float NaNs (GH #53762) + values = self.values + is_complex = is_complex_dtype(values) + + # separate the wheat from the chaff + if is_complex: + values = format_complex_with_na_rep(values, formatter, na_rep) + else: + values = format_with_na_rep(values, formatter, na_rep) + + if self.fixed_width: + if is_complex: + result = _trim_zeros_complex(values, self.decimal) + else: + result = _trim_zeros_float(values, self.decimal) + return np.asarray(result, dtype="object") + + return values + + # There is a special default string when we are fixed-width + # The default is otherwise to use str instead of a formatting string + float_format: FloatFormatType | None + if self.float_format is None: + if self.fixed_width: + if self.leading_space is True: + fmt_str = "{value: .{digits:d}f}" + else: + fmt_str = "{value:.{digits:d}f}" + float_format = partial(fmt_str.format, digits=self.digits) + else: + float_format = self.float_format + else: + float_format = lambda value: self.float_format % value + + formatted_values = format_values_with(float_format) + + if not self.fixed_width: + return formatted_values + + # we need do convert to engineering format if some values are too small + # and would appear as 0, or if some values are too big and take too + # much space + + if len(formatted_values) > 0: + maxlen = max(len(x) for x in formatted_values) + too_long = maxlen > self.digits + 6 + else: + too_long = False + + abs_vals = np.abs(self.values) + # this is pretty arbitrary for now + # large values: more that 8 characters including decimal symbol + # and first digit, hence > 1e6 + has_large_values = (abs_vals > 1e6).any() + has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any() + + if has_small_values or (too_long and has_large_values): + if self.leading_space is True: + fmt_str = "{value: .{digits:d}e}" + else: + fmt_str = "{value:.{digits:d}e}" + float_format = partial(fmt_str.format, digits=self.digits) + formatted_values = format_values_with(float_format) + + return formatted_values + + def _format_strings(self) -> list[str]: + return list(self.get_result_as_array()) + + +class IntArrayFormatter(GenericArrayFormatter): + def _format_strings(self) -> list[str]: + if self.leading_space is False: + formatter_str = lambda x: f"{x:d}".format(x=x) + else: + formatter_str = lambda x: f"{x: d}".format(x=x) + formatter = self.formatter or formatter_str + fmt_values = [formatter(x) for x in self.values] + return fmt_values + + +class Datetime64Formatter(GenericArrayFormatter): + def __init__( + self, + values: np.ndarray | Series | DatetimeIndex | DatetimeArray, + nat_rep: str = "NaT", + date_format: None = None, + **kwargs, + ) -> None: + super().__init__(values, **kwargs) + self.nat_rep = nat_rep + self.date_format = date_format + + def _format_strings(self) -> list[str]: + """we by definition have DO NOT have a TZ""" + values = self.values + + if not isinstance(values, DatetimeIndex): + values = DatetimeIndex(values) + + if self.formatter is not None and callable(self.formatter): + return [self.formatter(x) for x in values] + + fmt_values = values._data._format_native_types( + na_rep=self.nat_rep, date_format=self.date_format + ) + return fmt_values.tolist() + + +class ExtensionArrayFormatter(GenericArrayFormatter): + def _format_strings(self) -> list[str]: + values = extract_array(self.values, extract_numpy=True) + + formatter = self.formatter + fallback_formatter = None + if formatter is None: + fallback_formatter = values._formatter(boxed=True) + + if isinstance(values, Categorical): + # Categorical is special for now, so that we can preserve tzinfo + array = values._internal_get_values() + else: + array = np.asarray(values) + + fmt_values = format_array( + array, + formatter, + float_format=self.float_format, + na_rep=self.na_rep, + digits=self.digits, + space=self.space, + justify=self.justify, + decimal=self.decimal, + leading_space=self.leading_space, + quoting=self.quoting, + fallback_formatter=fallback_formatter, + ) + return fmt_values + + +def format_percentiles( + percentiles: (np.ndarray | Sequence[float]), +) -> list[str]: + """ + Outputs rounded and formatted percentiles. + + Parameters + ---------- + percentiles : list-like, containing floats from interval [0,1] + + Returns + ------- + formatted : list of strings + + Notes + ----- + Rounding precision is chosen so that: (1) if any two elements of + ``percentiles`` differ, they remain different after rounding + (2) no entry is *rounded* to 0% or 100%. + Any non-integer is always rounded to at least 1 decimal place. + + Examples + -------- + Keeps all entries different after rounding: + + >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) + ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] + + No element is rounded to 0% or 100% (unless already equal to it). + Duplicates are allowed: + + >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) + ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] + """ + percentiles = np.asarray(percentiles) + + # It checks for np.nan as well + if ( + not is_numeric_dtype(percentiles) + or not np.all(percentiles >= 0) + or not np.all(percentiles <= 1) + ): + raise ValueError("percentiles should all be in the interval [0,1]") + + percentiles = 100 * percentiles + percentiles_round_type = percentiles.round().astype(int) + + int_idx = np.isclose(percentiles_round_type, percentiles) + + if np.all(int_idx): + out = percentiles_round_type.astype(str) + return [i + "%" for i in out] + + unique_pcts = np.unique(percentiles) + to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None + to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None + + # Least precision that keeps percentiles unique after rounding + prec = -np.floor( + np.log10(np.min(np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end))) + ).astype(int) + prec = max(1, prec) + out = np.empty_like(percentiles, dtype=object) + out[int_idx] = percentiles[int_idx].round().astype(int).astype(str) + + out[~int_idx] = percentiles[~int_idx].round(prec).astype(str) + return [i + "%" for i in out] + + +def is_dates_only(values: np.ndarray | DatetimeArray | Index | DatetimeIndex) -> bool: + # return a boolean if we are only dates (and don't have a timezone) + if not isinstance(values, Index): + values = values.ravel() + + if not isinstance(values, (DatetimeArray, DatetimeIndex)): + values = DatetimeIndex(values) + + if values.tz is not None: + return False + + values_int = values.asi8 + consider_values = values_int != iNaT + # error: Argument 1 to "py_get_unit_from_dtype" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" + reso = get_unit_from_dtype(values.dtype) # type: ignore[arg-type] + ppd = periods_per_day(reso) + + # TODO: can we reuse is_date_array_normalized? would need a skipna kwd + even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0 + if even_days: + return True + return False + + +def _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str: + if x is NaT: + return nat_rep + + # Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ') + # so it already uses string formatting rather than strftime (faster). + return str(x) + + +def _format_datetime64_dateonly( + x: NaTType | Timestamp, + nat_rep: str = "NaT", + date_format: str | None = None, +) -> str: + if isinstance(x, NaTType): + return nat_rep + + if date_format: + return x.strftime(date_format) + else: + # Timestamp._date_repr relies on string formatting (faster than strftime) + return x._date_repr + + +def get_format_datetime64( + is_dates_only_: bool, nat_rep: str = "NaT", date_format: str | None = None +) -> Callable: + """Return a formatter callable taking a datetime64 as input and providing + a string as output""" + + if is_dates_only_: + return lambda x: _format_datetime64_dateonly( + x, nat_rep=nat_rep, date_format=date_format + ) + else: + return lambda x: _format_datetime64(x, nat_rep=nat_rep) + + +def get_format_datetime64_from_values( + values: np.ndarray | DatetimeArray | DatetimeIndex, date_format: str | None +) -> str | None: + """given values and a date_format, return a string format""" + if isinstance(values, np.ndarray) and values.ndim > 1: + # We don't actually care about the order of values, and DatetimeIndex + # only accepts 1D values + values = values.ravel() + + ido = is_dates_only(values) + if ido: + # Only dates and no timezone: provide a default format + return date_format or "%Y-%m-%d" + return date_format + + +class Datetime64TZFormatter(Datetime64Formatter): + def _format_strings(self) -> list[str]: + """we by definition have a TZ""" + ido = is_dates_only(self.values) + values = self.values.astype(object) + formatter = self.formatter or get_format_datetime64( + ido, date_format=self.date_format + ) + fmt_values = [formatter(x) for x in values] + + return fmt_values + + +class Timedelta64Formatter(GenericArrayFormatter): + def __init__( + self, + values: np.ndarray | TimedeltaIndex, + nat_rep: str = "NaT", + box: bool = False, + **kwargs, + ) -> None: + super().__init__(values, **kwargs) + self.nat_rep = nat_rep + self.box = box + + def _format_strings(self) -> list[str]: + formatter = self.formatter or get_format_timedelta64( + self.values, nat_rep=self.nat_rep, box=self.box + ) + return [formatter(x) for x in self.values] + + +def get_format_timedelta64( + values: np.ndarray | TimedeltaIndex | TimedeltaArray, + nat_rep: str | float = "NaT", + box: bool = False, +) -> Callable: + """ + Return a formatter function for a range of timedeltas. + These will all have the same format argument + + If box, then show the return in quotes + """ + values_int = values.view(np.int64) + + consider_values = values_int != iNaT + + one_day_nanos = 86400 * 10**9 + # error: Unsupported operand types for % ("ExtensionArray" and "int") + not_midnight = values_int % one_day_nanos != 0 # type: ignore[operator] + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "Union[Any, ExtensionArray, ndarray]"; expected + # "Union[Union[int, float, complex, str, bytes, generic], + # Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + both = np.logical_and(consider_values, not_midnight) # type: ignore[arg-type] + even_days = both.sum() == 0 + + if even_days: + format = None + else: + format = "long" + + def _formatter(x): + if x is None or (is_scalar(x) and isna(x)): + return nat_rep + + if not isinstance(x, Timedelta): + x = Timedelta(x) + + # Timedelta._repr_base uses string formatting (faster than strftime) + result = x._repr_base(format=format) + if box: + result = f"'{result}'" + return result + + return _formatter + + +def _make_fixed_width( + strings: list[str], + justify: str = "right", + minimum: int | None = None, + adj: TextAdjustment | None = None, +) -> list[str]: + if len(strings) == 0 or justify == "all": + return strings + + if adj is None: + adjustment = get_adjustment() + else: + adjustment = adj + + max_len = max(adjustment.len(x) for x in strings) + + if minimum is not None: + max_len = max(minimum, max_len) + + conf_max = get_option("display.max_colwidth") + if conf_max is not None and max_len > conf_max: + max_len = conf_max + + def just(x: str) -> str: + if conf_max is not None: + if (conf_max > 3) & (adjustment.len(x) > max_len): + x = x[: max_len - 3] + "..." + return x + + strings = [just(x) for x in strings] + result = adjustment.justify(strings, max_len, mode=justify) + return result + + +def _trim_zeros_complex(str_complexes: np.ndarray, decimal: str = ".") -> list[str]: + """ + Separates the real and imaginary parts from the complex number, and + executes the _trim_zeros_float method on each of those. + """ + real_part, imag_part = [], [] + for x in str_complexes: + # Complex numbers are represented as "(-)xxx(+/-)xxxj" + # The split will give [{"", "-"}, "xxx", "+/-", "xxx", "j", ""] + # Therefore, the imaginary part is the 4th and 3rd last elements, + # and the real part is everything before the imaginary part + trimmed = re.split(r"([j+-])", x) + real_part.append("".join(trimmed[:-4])) + imag_part.append("".join(trimmed[-4:-2])) + + # We want to align the lengths of the real and imaginary parts of each complex + # number, as well as the lengths the real (resp. complex) parts of all numbers + # in the array + n = len(str_complexes) + padded_parts = _trim_zeros_float(real_part + imag_part, decimal) + if len(padded_parts) == 0: + return [] + padded_length = max(len(part) for part in padded_parts) - 1 + padded = [ + real_pt # real part, possibly NaN + + imag_pt[0] # +/- + + f"{imag_pt[1:]:>{padded_length}}" # complex part (no sign), possibly nan + + "j" + for real_pt, imag_pt in zip(padded_parts[:n], padded_parts[n:]) + ] + return padded + + +def _trim_zeros_single_float(str_float: str) -> str: + """ + Trims trailing zeros after a decimal point, + leaving just one if necessary. + """ + str_float = str_float.rstrip("0") + if str_float.endswith("."): + str_float += "0" + + return str_float + + +def _trim_zeros_float( + str_floats: np.ndarray | list[str], decimal: str = "." +) -> list[str]: + """ + Trims the maximum number of trailing zeros equally from + all numbers containing decimals, leaving just one if + necessary. + """ + trimmed = str_floats + number_regex = re.compile(rf"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$") + + def is_number_with_decimal(x) -> bool: + return re.match(number_regex, x) is not None + + def should_trim(values: np.ndarray | list[str]) -> bool: + """ + Determine if an array of strings should be trimmed. + + Returns True if all numbers containing decimals (defined by the + above regular expression) within the array end in a zero, otherwise + returns False. + """ + numbers = [x for x in values if is_number_with_decimal(x)] + return len(numbers) > 0 and all(x.endswith("0") for x in numbers) + + while should_trim(trimmed): + trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed] + + # leave one 0 after the decimal points if need be. + result = [ + x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x + for x in trimmed + ] + return result + + +def _has_names(index: Index) -> bool: + if isinstance(index, MultiIndex): + return com.any_not_none(*index.names) + else: + return index.name is not None + + +class EngFormatter: + """ + Formats float values according to engineering format. + + Based on matplotlib.ticker.EngFormatter + """ + + # The SI engineering prefixes + ENG_PREFIXES = { + -24: "y", + -21: "z", + -18: "a", + -15: "f", + -12: "p", + -9: "n", + -6: "u", + -3: "m", + 0: "", + 3: "k", + 6: "M", + 9: "G", + 12: "T", + 15: "P", + 18: "E", + 21: "Z", + 24: "Y", + } + + def __init__( + self, accuracy: int | None = None, use_eng_prefix: bool = False + ) -> None: + self.accuracy = accuracy + self.use_eng_prefix = use_eng_prefix + + def __call__(self, num: float) -> str: + """ + Formats a number in engineering notation, appending a letter + representing the power of 1000 of the original number. Some examples: + >>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True) + >>> format_eng(0) + ' 0' + >>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True) + >>> format_eng(1_000_000) + ' 1.0M' + >>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False) + >>> format_eng("-1e-6") + '-1.00E-06' + + @param num: the value to represent + @type num: either a numeric value or a string that can be converted to + a numeric value (as per decimal.Decimal constructor) + + @return: engineering formatted string + """ + dnum = Decimal(str(num)) + + if Decimal.is_nan(dnum): + return "NaN" + + if Decimal.is_infinite(dnum): + return "inf" + + sign = 1 + + if dnum < 0: # pragma: no cover + sign = -1 + dnum = -dnum + + if dnum != 0: + pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3)) + else: + pow10 = Decimal(0) + + pow10 = pow10.min(max(self.ENG_PREFIXES.keys())) + pow10 = pow10.max(min(self.ENG_PREFIXES.keys())) + int_pow10 = int(pow10) + + if self.use_eng_prefix: + prefix = self.ENG_PREFIXES[int_pow10] + elif int_pow10 < 0: + prefix = f"E-{-int_pow10:02d}" + else: + prefix = f"E+{int_pow10:02d}" + + mant = sign * dnum / (10**pow10) + + if self.accuracy is None: # pragma: no cover + format_str = "{mant: g}{prefix}" + else: + format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}" + + formatted = format_str.format(mant=mant, prefix=prefix) + + return formatted + + +def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None: + """ + Format float representation in DataFrame with SI notation. + + Parameters + ---------- + accuracy : int, default 3 + Number of decimal digits after the floating point. + use_eng_prefix : bool, default False + Whether to represent a value with SI prefixes. + + Returns + ------- + None + + Examples + -------- + >>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6]) + >>> df + 0 + 0 1.000000e-09 + 1 1.000000e-03 + 2 1.000000e+00 + 3 1.000000e+03 + 4 1.000000e+06 + + >>> pd.set_eng_float_format(accuracy=1) + >>> df + 0 + 0 1.0E-09 + 1 1.0E-03 + 2 1.0E+00 + 3 1.0E+03 + 4 1.0E+06 + + >>> pd.set_eng_float_format(use_eng_prefix=True) + >>> df + 0 + 0 1.000n + 1 1.000m + 2 1.000 + 3 1.000k + 4 1.000M + + >>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True) + >>> df + 0 + 0 1.0n + 1 1.0m + 2 1.0 + 3 1.0k + 4 1.0M + + >>> pd.set_option("display.float_format", None) # unset option + """ + set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) + + +def get_level_lengths( + levels: Any, sentinel: bool | object | str = "" +) -> list[dict[int, int]]: + """ + For each index in each level the function returns lengths of indexes. + + Parameters + ---------- + levels : list of lists + List of values on for level. + sentinel : string, optional + Value which states that no new index starts on there. + + Returns + ------- + Returns list of maps. For each level returns map of indexes (key is index + in row and value is length of index). + """ + if len(levels) == 0: + return [] + + control = [True] * len(levels[0]) + + result = [] + for level in levels: + last_index = 0 + + lengths = {} + for i, key in enumerate(level): + if control[i] and key == sentinel: + pass + else: + control[i] = False + lengths[last_index] = i - last_index + last_index = i + + lengths[last_index] = len(level) - last_index + + result.append(lengths) + + return result + + +def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None: + """ + Appends lines to a buffer. + + Parameters + ---------- + buf + The buffer to write to + lines + The lines to append. + """ + if any(isinstance(x, str) for x in lines): + lines = [str(x) for x in lines] + buf.write("\n".join(lines)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/html.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/html.py new file mode 100644 index 00000000..ce59985b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/html.py @@ -0,0 +1,644 @@ +""" +Module for formatting output data in HTML. +""" +from __future__ import annotations + +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Final, + cast, +) + +from pandas._config import get_option + +from pandas._libs import lib + +from pandas import ( + MultiIndex, + option_context, +) + +from pandas.io.common import is_url +from pandas.io.formats.format import ( + DataFrameFormatter, + get_level_lengths, +) +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + ) + + +class HTMLFormatter: + """ + Internal class for formatting output data in html. + This class is intended for shared functionality between + DataFrame.to_html() and DataFrame._repr_html_(). + Any logic in common with other output formatting methods + should ideally be inherited from classes in format.py + and this class responsible for only producing html markup. + """ + + indent_delta: Final = 2 + + def __init__( + self, + formatter: DataFrameFormatter, + classes: str | list[str] | tuple[str, ...] | None = None, + border: int | bool | None = None, + table_id: str | None = None, + render_links: bool = False, + ) -> None: + self.fmt = formatter + self.classes = classes + + self.frame = self.fmt.frame + self.columns = self.fmt.tr_frame.columns + self.elements: list[str] = [] + self.bold_rows = self.fmt.bold_rows + self.escape = self.fmt.escape + self.show_dimensions = self.fmt.show_dimensions + if border is None or border is True: + border = cast(int, get_option("display.html.border")) + elif not border: + border = None + + self.border = border + self.table_id = table_id + self.render_links = render_links + + self.col_space = {} + is_multi_index = isinstance(self.columns, MultiIndex) + for column, value in self.fmt.col_space.items(): + col_space_value = f"{value}px" if isinstance(value, int) else value + self.col_space[column] = col_space_value + # GH 53885: Handling case where column is index + # Flatten the data in the multi index and add in the map + if is_multi_index and isinstance(column, tuple): + for column_index in column: + self.col_space[str(column_index)] = col_space_value + + def to_string(self) -> str: + lines = self.render() + if any(isinstance(x, str) for x in lines): + lines = [str(x) for x in lines] + return "\n".join(lines) + + def render(self) -> list[str]: + self._write_table() + + if self.should_show_dimensions: + by = chr(215) # × # noqa: RUF003 + self.write( + f"

{len(self.frame)} rows {by} {len(self.frame.columns)} columns

" + ) + + return self.elements + + @property + def should_show_dimensions(self) -> bool: + return self.fmt.should_show_dimensions + + @property + def show_row_idx_names(self) -> bool: + return self.fmt.show_row_idx_names + + @property + def show_col_idx_names(self) -> bool: + return self.fmt.show_col_idx_names + + @property + def row_levels(self) -> int: + if self.fmt.index: + # showing (row) index + return self.frame.index.nlevels + elif self.show_col_idx_names: + # see gh-22579 + # Column misalignment also occurs for + # a standard index when the columns index is named. + # If the row index is not displayed a column of + # blank cells need to be included before the DataFrame values. + return 1 + # not showing (row) index + return 0 + + def _get_columns_formatted_values(self) -> Iterable: + return self.columns + + @property + def is_truncated(self) -> bool: + return self.fmt.is_truncated + + @property + def ncols(self) -> int: + return len(self.fmt.tr_frame.columns) + + def write(self, s: Any, indent: int = 0) -> None: + rs = pprint_thing(s) + self.elements.append(" " * indent + rs) + + def write_th( + self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None + ) -> None: + """ + Method for writing a formatted . This will + cause min-width to be set if there is one. + indent : int, default 0 + The indentation level of the cell. + tags : str, default None + Tags to include in the cell. + + Returns + ------- + A written ", indent) + else: + self.write(f'', indent) + indent += indent_delta + + for i, s in enumerate(line): + val_tag = tags.get(i, None) + if header or (self.bold_rows and i < nindex_levels): + self.write_th(s, indent=indent, header=header, tags=val_tag) + else: + self.write_td(s, indent, tags=val_tag) + + indent -= indent_delta + self.write("", indent) + + def _write_table(self, indent: int = 0) -> None: + _classes = ["dataframe"] # Default class. + use_mathjax = get_option("display.html.use_mathjax") + if not use_mathjax: + _classes.append("tex2jax_ignore") + if self.classes is not None: + if isinstance(self.classes, str): + self.classes = self.classes.split() + if not isinstance(self.classes, (list, tuple)): + raise TypeError( + "classes must be a string, list, " + f"or tuple, not {type(self.classes)}" + ) + _classes.extend(self.classes) + + if self.table_id is None: + id_section = "" + else: + id_section = f' id="{self.table_id}"' + + if self.border is None: + border_attr = "" + else: + border_attr = f' border="{self.border}"' + + self.write( + f'', + indent, + ) + + if self.fmt.header or self.show_row_idx_names: + self._write_header(indent + self.indent_delta) + + self._write_body(indent + self.indent_delta) + + self.write("
cell. + + If col_space is set on the formatter then that is used for + the value of min-width. + + Parameters + ---------- + s : object + The data to be written inside the cell. + header : bool, default False + Set to True if the is for use inside
cell. + """ + col_space = self.col_space.get(s, None) + + if header and col_space is not None: + tags = tags or "" + tags += f'style="min-width: {col_space};"' + + self._write_cell(s, kind="th", indent=indent, tags=tags) + + def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None: + self._write_cell(s, kind="td", indent=indent, tags=tags) + + def _write_cell( + self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None + ) -> None: + if tags is not None: + start_tag = f"<{kind} {tags}>" + else: + start_tag = f"<{kind}>" + + if self.escape: + # escape & first to prevent double escaping of & + esc = {"&": r"&", "<": r"<", ">": r">"} + else: + esc = {} + + rs = pprint_thing(s, escape_chars=esc).strip() + + if self.render_links and is_url(rs): + rs_unescaped = pprint_thing(s, escape_chars={}).strip() + start_tag += f'' + end_a = "" + else: + end_a = "" + + self.write(f"{start_tag}{rs}{end_a}", indent) + + def write_tr( + self, + line: Iterable, + indent: int = 0, + indent_delta: int = 0, + header: bool = False, + align: str | None = None, + tags: dict[int, str] | None = None, + nindex_levels: int = 0, + ) -> None: + if tags is None: + tags = {} + + if align is None: + self.write("
", indent) + + def _write_col_header(self, indent: int) -> None: + row: list[Hashable] + is_truncated_horizontally = self.fmt.is_truncated_horizontally + if isinstance(self.columns, MultiIndex): + template = 'colspan="{span:d}" halign="left"' + + sentinel: lib.NoDefault | bool + if self.fmt.sparsify: + # GH3547 + sentinel = lib.no_default + else: + sentinel = False + levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False) + level_lengths = get_level_lengths(levels, sentinel) + inner_lvl = len(level_lengths) - 1 + for lnum, (records, values) in enumerate(zip(level_lengths, levels)): + if is_truncated_horizontally: + # modify the header lines + ins_col = self.fmt.tr_col_num + if self.fmt.sparsify: + recs_new = {} + # Increment tags after ... col. + for tag, span in list(records.items()): + if tag >= ins_col: + recs_new[tag + 1] = span + elif tag + span > ins_col: + recs_new[tag] = span + 1 + if lnum == inner_lvl: + values = ( + values[:ins_col] + ("...",) + values[ins_col:] + ) + else: + # sparse col headers do not receive a ... + values = ( + values[:ins_col] + + (values[ins_col - 1],) + + values[ins_col:] + ) + else: + recs_new[tag] = span + # if ins_col lies between tags, all col headers + # get ... + if tag + span == ins_col: + recs_new[ins_col] = 1 + values = values[:ins_col] + ("...",) + values[ins_col:] + records = recs_new + inner_lvl = len(level_lengths) - 1 + if lnum == inner_lvl: + records[ins_col] = 1 + else: + recs_new = {} + for tag, span in list(records.items()): + if tag >= ins_col: + recs_new[tag + 1] = span + else: + recs_new[tag] = span + recs_new[ins_col] = 1 + records = recs_new + values = values[:ins_col] + ["..."] + values[ins_col:] + + # see gh-22579 + # Column Offset Bug with to_html(index=False) with + # MultiIndex Columns and Index. + # Initially fill row with blank cells before column names. + # TODO: Refactor to remove code duplication with code + # block below for standard columns index. + row = [""] * (self.row_levels - 1) + if self.fmt.index or self.show_col_idx_names: + # see gh-22747 + # If to_html(index_names=False) do not show columns + # index names. + # TODO: Refactor to use _get_column_name_list from + # DataFrameFormatter class and create a + # _get_formatted_column_labels function for code + # parity with DataFrameFormatter class. + if self.fmt.show_index_names: + name = self.columns.names[lnum] + row.append(pprint_thing(name or "")) + else: + row.append("") + + tags = {} + j = len(row) + for i, v in enumerate(values): + if i in records: + if records[i] > 1: + tags[j] = template.format(span=records[i]) + else: + continue + j += 1 + row.append(v) + self.write_tr(row, indent, self.indent_delta, tags=tags, header=True) + else: + # see gh-22579 + # Column misalignment also occurs for + # a standard index when the columns index is named. + # Initially fill row with blank cells before column names. + # TODO: Refactor to remove code duplication with code block + # above for columns MultiIndex. + row = [""] * (self.row_levels - 1) + if self.fmt.index or self.show_col_idx_names: + # see gh-22747 + # If to_html(index_names=False) do not show columns + # index names. + # TODO: Refactor to use _get_column_name_list from + # DataFrameFormatter class. + if self.fmt.show_index_names: + row.append(self.columns.name or "") + else: + row.append("") + row.extend(self._get_columns_formatted_values()) + align = self.fmt.justify + + if is_truncated_horizontally: + ins_col = self.row_levels + self.fmt.tr_col_num + row.insert(ins_col, "...") + + self.write_tr(row, indent, self.indent_delta, header=True, align=align) + + def _write_row_header(self, indent: int) -> None: + is_truncated_horizontally = self.fmt.is_truncated_horizontally + row = [x if x is not None else "" for x in self.frame.index.names] + [""] * ( + self.ncols + (1 if is_truncated_horizontally else 0) + ) + self.write_tr(row, indent, self.indent_delta, header=True) + + def _write_header(self, indent: int) -> None: + self.write("
`` tag + in addition to automatic (by default) id. + cell_ids : bool, default True + If True, each cell will have an ``id`` attribute in their HTML tag. + The ``id`` takes the form ``T__row_col`` + where ```` is the unique identifier, ```` is the row + number and ```` is the column number. + na_rep : str, optional + Representation for missing values. + If ``na_rep`` is None, no special formatting is applied, and falls back to + ``pandas.options.styler.format.na_rep``. + + uuid_len : int, default 5 + If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate + expressed in hex characters, in range [0, 32]. + + .. versionadded:: 1.2.0 + + decimal : str, optional + Character used as decimal separator for floats, complex and integers. If not + given uses ``pandas.options.styler.format.decimal``. + + .. versionadded:: 1.3.0 + + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. If not + given uses ``pandas.options.styler.format.thousands``. + + .. versionadded:: 1.3.0 + + escape : str, optional + Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` + in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. Use 'latex-math' to replace the characters + the same way as in 'latex' mode, except for math substrings, + which either are surrounded by two characters ``$`` or start with + the character ``\(`` and end with ``\)``. + If not given uses ``pandas.options.styler.format.escape``. + + .. versionadded:: 1.3.0 + formatter : str, callable, dict, optional + Object to define how values are displayed. See ``Styler.format``. If not given + uses ``pandas.options.styler.format.formatter``. + + .. versionadded:: 1.4.0 + + Attributes + ---------- + env : Jinja2 jinja2.Environment + template_html : Jinja2 Template + template_html_table : Jinja2 Template + template_html_style : Jinja2 Template + template_latex : Jinja2 Template + loader : Jinja2 Loader + + See Also + -------- + DataFrame.style : Return a Styler object containing methods for building + a styled HTML representation for the DataFrame. + + Notes + ----- + Most styling will be done by passing style functions into + ``Styler.apply`` or ``Styler.map``. Style functions should + return values with strings containing CSS ``'attr: value'`` that will + be applied to the indicated cells. + + If using in the Jupyter notebook, Styler has defined a ``_repr_html_`` + to automatically render itself. Otherwise call Styler.to_html to get + the generated HTML. + + CSS classes are attached to the generated HTML + + * Index and Column names include ``index_name`` and ``level`` + where `k` is its level in a MultiIndex + * Index label cells include + + * ``row_heading`` + * ``row`` where `n` is the numeric position of the row + * ``level`` where `k` is the level in a MultiIndex + + * Column label cells include + * ``col_heading`` + * ``col`` where `n` is the numeric position of the column + * ``level`` where `k` is the level in a MultiIndex + + * Blank cells include ``blank`` + * Data cells include ``data`` + * Trimmed cells include ``col_trim`` or ``row_trim``. + + Any, or all, or these classes can be renamed by using the ``css_class_names`` + argument in ``Styler.set_table_classes``, giving a value such as + *{"row": "MY_ROW_CLASS", "col_trim": "", "row_trim": ""}*. + + Examples + -------- + >>> df = pd.DataFrame([[1.0, 2.0, 3.0], [4, 5, 6]], index=['a', 'b'], + ... columns=['A', 'B', 'C']) + >>> pd.io.formats.style.Styler(df, precision=2, + ... caption="My table") # doctest: +SKIP + + Please see: + `Table Visualization <../../user_guide/style.ipynb>`_ for more examples. + """ + + def __init__( + self, + data: DataFrame | Series, + precision: int | None = None, + table_styles: CSSStyles | None = None, + uuid: str | None = None, + caption: str | tuple | list | None = None, + table_attributes: str | None = None, + cell_ids: bool = True, + na_rep: str | None = None, + uuid_len: int = 5, + decimal: str | None = None, + thousands: str | None = None, + escape: str | None = None, + formatter: ExtFormatter | None = None, + ) -> None: + super().__init__( + data=data, + uuid=uuid, + uuid_len=uuid_len, + table_styles=table_styles, + table_attributes=table_attributes, + caption=caption, + cell_ids=cell_ids, + precision=precision, + ) + + # validate ordered args + thousands = thousands or get_option("styler.format.thousands") + decimal = decimal or get_option("styler.format.decimal") + na_rep = na_rep or get_option("styler.format.na_rep") + escape = escape or get_option("styler.format.escape") + formatter = formatter or get_option("styler.format.formatter") + # precision is handled by superclass as default for performance + + self.format( + formatter=formatter, + precision=precision, + na_rep=na_rep, + escape=escape, + decimal=decimal, + thousands=thousands, + ) + + def concat(self, other: Styler) -> Styler: + """ + Append another Styler to combine the output into a single table. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + other : Styler + The other Styler object which has already been styled and formatted. The + data for this Styler must have the same columns as the original, and the + number of index levels must also be the same to render correctly. + + Returns + ------- + Styler + + Notes + ----- + The purpose of this method is to extend existing styled dataframes with other + metrics that may be useful but may not conform to the original's structure. + For example adding a sub total row, or displaying metrics such as means, + variance or counts. + + Styles that are applied using the ``apply``, ``map``, ``apply_index`` + and ``map_index``, and formatting applied with ``format`` and + ``format_index`` will be preserved. + + .. warning:: + Only the output methods ``to_html``, ``to_string`` and ``to_latex`` + currently work with concatenated Stylers. + + Other output methods, including ``to_excel``, **do not** work with + concatenated Stylers. + + The following should be noted: + + - ``table_styles``, ``table_attributes``, ``caption`` and ``uuid`` are all + inherited from the original Styler and not ``other``. + - hidden columns and hidden index levels will be inherited from the + original Styler + - ``css`` will be inherited from the original Styler, and the value of + keys ``data``, ``row_heading`` and ``row`` will be prepended with + ``foot0_``. If more concats are chained, their styles will be prepended + with ``foot1_``, ''foot_2'', etc., and if a concatenated style have + another concatanated style, the second style will be prepended with + ``foot{parent}_foot{child}_``. + + A common use case is to concatenate user defined functions with + ``DataFrame.agg`` or with described statistics via ``DataFrame.describe``. + See examples. + + Examples + -------- + A common use case is adding totals rows, or otherwise, via methods calculated + in ``DataFrame.agg``. + + >>> df = pd.DataFrame([[4, 6], [1, 9], [3, 4], [5, 5], [9, 6]], + ... columns=["Mike", "Jim"], + ... index=["Mon", "Tue", "Wed", "Thurs", "Fri"]) + >>> styler = df.style.concat(df.agg(["sum"]).style) # doctest: +SKIP + + .. figure:: ../../_static/style/footer_simple.png + + Since the concatenated object is a Styler the existing functionality can be + used to conditionally format it as well as the original. + + >>> descriptors = df.agg(["sum", "mean", lambda s: s.dtype]) + >>> descriptors.index = ["Total", "Average", "dtype"] + >>> other = (descriptors.style + ... .highlight_max(axis=1, subset=(["Total", "Average"], slice(None))) + ... .format(subset=("Average", slice(None)), precision=2, decimal=",") + ... .map(lambda v: "font-weight: bold;")) + >>> styler = (df.style + ... .highlight_max(color="salmon") + ... .set_table_styles([{"selector": ".foot_row0", + ... "props": "border-top: 1px solid black;"}])) + >>> styler.concat(other) # doctest: +SKIP + + .. figure:: ../../_static/style/footer_extended.png + + When ``other`` has fewer index levels than the original Styler it is possible + to extend the index in ``other``, with placeholder levels. + + >>> df = pd.DataFrame([[1], [2]], + ... index=pd.MultiIndex.from_product([[0], [1, 2]])) + >>> descriptors = df.agg(["sum"]) + >>> descriptors.index = pd.MultiIndex.from_product([[""], descriptors.index]) + >>> df.style.concat(descriptors.style) # doctest: +SKIP + """ + if not isinstance(other, Styler): + raise TypeError("`other` must be of type `Styler`") + if not self.data.columns.equals(other.data.columns): + raise ValueError("`other.data` must have same columns as `Styler.data`") + if not self.data.index.nlevels == other.data.index.nlevels: + raise ValueError( + "number of index levels must be same in `other` " + "as in `Styler`. See documentation for suggestions." + ) + self.concatenated.append(other) + return self + + def _repr_html_(self) -> str | None: + """ + Hooks into Jupyter notebook rich display system, which calls _repr_html_ by + default if an object is returned at the end of a cell. + """ + if get_option("styler.render.repr") == "html": + return self.to_html() + return None + + def _repr_latex_(self) -> str | None: + if get_option("styler.render.repr") == "latex": + return self.to_latex() + return None + + def set_tooltips( + self, + ttips: DataFrame, + props: CSSProperties | None = None, + css_class: str | None = None, + ) -> Styler: + """ + Set the DataFrame of strings on ``Styler`` generating ``:hover`` tooltips. + + These string based tooltips are only applicable to `` of the output html), + # there are two `foot_` in the id and class + fp1 = "foot0_" + fp2 = "foot0_foot0_" + expected = dedent( + f"""\ + + + + + + + + + + + + + +
`` HTML elements, + and cannot be used for column or index headers. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + ttips : DataFrame + DataFrame containing strings that will be translated to tooltips, mapped + by identical column and index values that must exist on the underlying + Styler data. None, NaN values, and empty strings will be ignored and + not affect the rendered HTML. + props : list-like or str, optional + List of (attr, value) tuples or a valid CSS string. If ``None`` adopts + the internal default values described in notes. + css_class : str, optional + Name of the tooltip class used in CSS, should conform to HTML standards. + Only useful if integrating tooltips with external CSS. If ``None`` uses the + internal default value 'pd-t'. + + Returns + ------- + Styler + + Notes + ----- + Tooltips are created by adding `` to each data cell + and then manipulating the table level CSS to attach pseudo hover and pseudo + after selectors to produce the required the results. + + The default properties for the tooltip CSS class are: + + - visibility: hidden + - position: absolute + - z-index: 1 + - background-color: black + - color: white + - transform: translate(-20px, -20px) + + The property 'visibility: hidden;' is a key prerequisite to the hover + functionality, and should always be included in any manual properties + specification, using the ``props`` argument. + + Tooltips are not designed to be efficient, and can add large amounts of + additional HTML for larger tables, since they also require that ``cell_ids`` + is forced to `True`. + + Examples + -------- + Basic application + + >>> df = pd.DataFrame(data=[[0, 1], [2, 3]]) + >>> ttips = pd.DataFrame( + ... data=[["Min", ""], [np.nan, "Max"]], columns=df.columns, index=df.index + ... ) + >>> s = df.style.set_tooltips(ttips).to_html() + + Optionally controlling the tooltip visual display + + >>> df.style.set_tooltips(ttips, css_class='tt-add', props=[ + ... ('visibility', 'hidden'), + ... ('position', 'absolute'), + ... ('z-index', 1)]) # doctest: +SKIP + >>> df.style.set_tooltips(ttips, css_class='tt-add', + ... props='visibility:hidden; position:absolute; z-index:1;') + ... # doctest: +SKIP + """ + if not self.cell_ids: + # tooltips not optimised for individual cell check. requires reasonable + # redesign and more extensive code for a feature that might be rarely used. + raise NotImplementedError( + "Tooltips can only render with 'cell_ids' is True." + ) + if not ttips.index.is_unique or not ttips.columns.is_unique: + raise KeyError( + "Tooltips render only if `ttips` has unique index and columns." + ) + if self.tooltips is None: # create a default instance if necessary + self.tooltips = Tooltips() + self.tooltips.tt_data = ttips + if props: + self.tooltips.class_properties = props + if css_class: + self.tooltips.class_name = css_class + + return self + + @doc( + NDFrame.to_excel, + klass="Styler", + storage_options=_shared_docs["storage_options"], + storage_options_versionadded="1.5.0", + ) + def to_excel( + self, + excel_writer: FilePath | WriteExcelBuffer | ExcelWriter, + sheet_name: str = "Sheet1", + na_rep: str = "", + float_format: str | None = None, + columns: Sequence[Hashable] | None = None, + header: Sequence[Hashable] | bool = True, + index: bool = True, + index_label: IndexLabel | None = None, + startrow: int = 0, + startcol: int = 0, + engine: str | None = None, + merge_cells: bool = True, + encoding: str | None = None, + inf_rep: str = "inf", + verbose: bool = True, + freeze_panes: tuple[int, int] | None = None, + storage_options: StorageOptions | None = None, + ) -> None: + from pandas.io.formats.excel import ExcelFormatter + + formatter = ExcelFormatter( + self, + na_rep=na_rep, + cols=columns, + header=header, + float_format=float_format, + index=index, + index_label=index_label, + merge_cells=merge_cells, + inf_rep=inf_rep, + ) + formatter.write( + excel_writer, + sheet_name=sheet_name, + startrow=startrow, + startcol=startcol, + freeze_panes=freeze_panes, + engine=engine, + storage_options=storage_options, + ) + + @overload + def to_latex( + self, + buf: FilePath | WriteBuffer[str], + *, + column_format: str | None = ..., + position: str | None = ..., + position_float: str | None = ..., + hrules: bool | None = ..., + clines: str | None = ..., + label: str | None = ..., + caption: str | tuple | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + multirow_align: str | None = ..., + multicol_align: str | None = ..., + siunitx: bool = ..., + environment: str | None = ..., + encoding: str | None = ..., + convert_css: bool = ..., + ) -> None: + ... + + @overload + def to_latex( + self, + buf: None = ..., + *, + column_format: str | None = ..., + position: str | None = ..., + position_float: str | None = ..., + hrules: bool | None = ..., + clines: str | None = ..., + label: str | None = ..., + caption: str | tuple | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + multirow_align: str | None = ..., + multicol_align: str | None = ..., + siunitx: bool = ..., + environment: str | None = ..., + encoding: str | None = ..., + convert_css: bool = ..., + ) -> str: + ... + + def to_latex( + self, + buf: FilePath | WriteBuffer[str] | None = None, + *, + column_format: str | None = None, + position: str | None = None, + position_float: str | None = None, + hrules: bool | None = None, + clines: str | None = None, + label: str | None = None, + caption: str | tuple | None = None, + sparse_index: bool | None = None, + sparse_columns: bool | None = None, + multirow_align: str | None = None, + multicol_align: str | None = None, + siunitx: bool = False, + environment: str | None = None, + encoding: str | None = None, + convert_css: bool = False, + ) -> str | None: + r""" + Write Styler to a file, buffer or string in LaTeX format. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. + column_format : str, optional + The LaTeX column specification placed in location: + + \\begin{tabular}{} + + Defaults to 'l' for index and + non-numeric data columns, and, for numeric data columns, + to 'r' by default, or 'S' if ``siunitx`` is ``True``. + position : str, optional + The LaTeX positional argument (e.g. 'h!') for tables, placed in location: + + ``\\begin{table}[]``. + position_float : {"centering", "raggedleft", "raggedright"}, optional + The LaTeX float command placed in location: + + \\begin{table}[] + + \\ + + Cannot be used if ``environment`` is "longtable". + hrules : bool + Set to `True` to add \\toprule, \\midrule and \\bottomrule from the + {booktabs} LaTeX package. + Defaults to ``pandas.options.styler.latex.hrules``, which is `False`. + + .. versionchanged:: 1.4.0 + clines : str, optional + Use to control adding \\cline commands for the index labels separation. + Possible values are: + + - `None`: no cline commands are added (default). + - `"all;data"`: a cline is added for every index value extending the + width of the table, including data entries. + - `"all;index"`: as above with lines extending only the width of the + index entries. + - `"skip-last;data"`: a cline is added for each index value except the + last level (which is never sparsified), extending the widtn of the + table. + - `"skip-last;index"`: as above with lines extending only the width of the + index entries. + + .. versionadded:: 1.4.0 + label : str, optional + The LaTeX label included as: \\label{
}. + If tuple, i.e ("full caption", "short caption"), the caption included + as: \\caption[]{}. + sparse_index : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each row. + Defaults to ``pandas.options.styler.sparse.index``, which is `True`. + sparse_columns : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each + column. Defaults to ``pandas.options.styler.sparse.columns``, which + is `True`. + multirow_align : {"c", "t", "b", "naive"}, optional + If sparsifying hierarchical MultiIndexes whether to align text centrally, + at the top or bottom using the multirow package. If not given defaults to + ``pandas.options.styler.latex.multirow_align``, which is `"c"`. + If "naive" is given renders without multirow. + + .. versionchanged:: 1.4.0 + multicol_align : {"r", "c", "l", "naive-l", "naive-r"}, optional + If sparsifying hierarchical MultiIndex columns whether to align text at + the left, centrally, or at the right. If not given defaults to + ``pandas.options.styler.latex.multicol_align``, which is "r". + If a naive option is given renders without multicol. + Pipe decorators can also be added to non-naive values to draw vertical + rules, e.g. "\|r" will draw a rule on the left side of right aligned merged + cells. + + .. versionchanged:: 1.4.0 + siunitx : bool, default False + Set to ``True`` to structure LaTeX compatible with the {siunitx} package. + environment : str, optional + If given, the environment that will replace 'table' in ``\\begin{table}``. + If 'longtable' is specified then a more suitable template is + rendered. If not given defaults to + ``pandas.options.styler.latex.environment``, which is `None`. + + .. versionadded:: 1.4.0 + encoding : str, optional + Character encoding setting. Defaults + to ``pandas.options.styler.render.encoding``, which is "utf-8". + convert_css : bool, default False + Convert simple cell-styles from CSS to LaTeX format. Any CSS not found in + conversion table is dropped. A style can be forced by adding option + `--latex`. See notes. + + Returns + ------- + str or None + If `buf` is None, returns the result as a string. Otherwise returns `None`. + + See Also + -------- + Styler.format: Format the text display value of cells. + + Notes + ----- + **Latex Packages** + + For the following features we recommend the following LaTeX inclusions: + + ===================== ========================================================== + Feature Inclusion + ===================== ========================================================== + sparse columns none: included within default {tabular} environment + sparse rows \\usepackage{multirow} + hrules \\usepackage{booktabs} + colors \\usepackage[table]{xcolor} + siunitx \\usepackage{siunitx} + bold (with siunitx) | \\usepackage{etoolbox} + | \\robustify\\bfseries + | \\sisetup{detect-all = true} *(within {document})* + italic (with siunitx) | \\usepackage{etoolbox} + | \\robustify\\itshape + | \\sisetup{detect-all = true} *(within {document})* + environment \\usepackage{longtable} if arg is "longtable" + | or any other relevant environment package + hyperlinks \\usepackage{hyperref} + ===================== ========================================================== + + **Cell Styles** + + LaTeX styling can only be rendered if the accompanying styling functions have + been constructed with appropriate LaTeX commands. All styling + functionality is built around the concept of a CSS ``(, )`` + pair (see `Table Visualization <../../user_guide/style.ipynb>`_), and this + should be replaced by a LaTeX + ``(, )`` approach. Each cell will be styled individually + using nested LaTeX commands with their accompanied options. + + For example the following code will highlight and bold a cell in HTML-CSS: + + >>> df = pd.DataFrame([[1,2], [3,4]]) + >>> s = df.style.highlight_max(axis=None, + ... props='background-color:red; font-weight:bold;') + >>> s.to_html() # doctest: +SKIP + + The equivalent using LaTeX only commands is the following: + + >>> s = df.style.highlight_max(axis=None, + ... props='cellcolor:{red}; bfseries: ;') + >>> s.to_latex() # doctest: +SKIP + + Internally these structured LaTeX ``(, )`` pairs + are translated to the + ``display_value`` with the default structure: + ``\ ``. + Where there are multiple commands the latter is nested recursively, so that + the above example highlighted cell is rendered as + ``\cellcolor{red} \bfseries 4``. + + Occasionally this format does not suit the applied command, or + combination of LaTeX packages that is in use, so additional flags can be + added to the ````, within the tuple, to result in different + positions of required braces (the **default** being the same as ``--nowrap``): + + =================================== ============================================ + Tuple Format Output Structure + =================================== ============================================ + (,) \\ + (, ``--nowrap``) \\ + (, ``--rwrap``) \\{} + (, ``--wrap``) {\\ } + (, ``--lwrap``) {\\} + (, ``--dwrap``) {\\}{} + =================================== ============================================ + + For example the `textbf` command for font-weight + should always be used with `--rwrap` so ``('textbf', '--rwrap')`` will render a + working cell, wrapped with braces, as ``\textbf{}``. + + A more comprehensive example is as follows: + + >>> df = pd.DataFrame([[1, 2.2, "dogs"], [3, 4.4, "cats"], [2, 6.6, "cows"]], + ... index=["ix1", "ix2", "ix3"], + ... columns=["Integers", "Floats", "Strings"]) + >>> s = df.style.highlight_max( + ... props='cellcolor:[HTML]{FFFF00}; color:{red};' + ... 'textit:--rwrap; textbf:--rwrap;' + ... ) + >>> s.to_latex() # doctest: +SKIP + + .. figure:: ../../_static/style/latex_1.png + + **Table Styles** + + Internally Styler uses its ``table_styles`` object to parse the + ``column_format``, ``position``, ``position_float``, and ``label`` + input arguments. These arguments are added to table styles in the format: + + .. code-block:: python + + set_table_styles([ + {"selector": "column_format", "props": f":{column_format};"}, + {"selector": "position", "props": f":{position};"}, + {"selector": "position_float", "props": f":{position_float};"}, + {"selector": "label", "props": f":{{{label.replace(':','§')}}};"} + ], overwrite=False) + + Exception is made for the ``hrules`` argument which, in fact, controls all three + commands: ``toprule``, ``bottomrule`` and ``midrule`` simultaneously. Instead of + setting ``hrules`` to ``True``, it is also possible to set each + individual rule definition, by manually setting the ``table_styles``, + for example below we set a regular ``toprule``, set an ``hline`` for + ``bottomrule`` and exclude the ``midrule``: + + .. code-block:: python + + set_table_styles([ + {'selector': 'toprule', 'props': ':toprule;'}, + {'selector': 'bottomrule', 'props': ':hline;'}, + ], overwrite=False) + + If other ``commands`` are added to table styles they will be detected, and + positioned immediately above the '\\begin{tabular}' command. For example to + add odd and even row coloring, from the {colortbl} package, in format + ``\rowcolors{1}{pink}{red}``, use: + + .. code-block:: python + + set_table_styles([ + {'selector': 'rowcolors', 'props': ':{1}{pink}{red};'} + ], overwrite=False) + + A more comprehensive example using these arguments is as follows: + + >>> df.columns = pd.MultiIndex.from_tuples([ + ... ("Numeric", "Integers"), + ... ("Numeric", "Floats"), + ... ("Non-Numeric", "Strings") + ... ]) + >>> df.index = pd.MultiIndex.from_tuples([ + ... ("L0", "ix1"), ("L0", "ix2"), ("L1", "ix3") + ... ]) + >>> s = df.style.highlight_max( + ... props='cellcolor:[HTML]{FFFF00}; color:{red}; itshape:; bfseries:;' + ... ) + >>> s.to_latex( + ... column_format="rrrrr", position="h", position_float="centering", + ... hrules=True, label="table:5", caption="Styled LaTeX Table", + ... multirow_align="t", multicol_align="r" + ... ) # doctest: +SKIP + + .. figure:: ../../_static/style/latex_2.png + + **Formatting** + + To format values :meth:`Styler.format` should be used prior to calling + `Styler.to_latex`, as well as other methods such as :meth:`Styler.hide` + for example: + + >>> s.clear() + >>> s.table_styles = [] + >>> s.caption = None + >>> s.format({ + ... ("Numeric", "Integers"): '\${}', + ... ("Numeric", "Floats"): '{:.3f}', + ... ("Non-Numeric", "Strings"): str.upper + ... }) # doctest: +SKIP + Numeric Non-Numeric + Integers Floats Strings + L0 ix1 $1 2.200 DOGS + ix2 $3 4.400 CATS + L1 ix3 $2 6.600 COWS + + >>> s.to_latex() # doctest: +SKIP + \begin{tabular}{llrrl} + {} & {} & \multicolumn{2}{r}{Numeric} & {Non-Numeric} \\ + {} & {} & {Integers} & {Floats} & {Strings} \\ + \multirow[c]{2}{*}{L0} & ix1 & \\$1 & 2.200 & DOGS \\ + & ix2 & \$3 & 4.400 & CATS \\ + L1 & ix3 & \$2 & 6.600 & COWS \\ + \end{tabular} + + **CSS Conversion** + + This method can convert a Styler constructured with HTML-CSS to LaTeX using + the following limited conversions. + + ================== ==================== ============= ========================== + CSS Attribute CSS value LaTeX Command LaTeX Options + ================== ==================== ============= ========================== + font-weight | bold | bfseries + | bolder | bfseries + font-style | italic | itshape + | oblique | slshape + background-color | red cellcolor | {red}--lwrap + | #fe01ea | [HTML]{FE01EA}--lwrap + | #f0e | [HTML]{FF00EE}--lwrap + | rgb(128,255,0) | [rgb]{0.5,1,0}--lwrap + | rgba(128,0,0,0.5) | [rgb]{0.5,0,0}--lwrap + | rgb(25%,255,50%) | [rgb]{0.25,1,0.5}--lwrap + color | red color | {red} + | #fe01ea | [HTML]{FE01EA} + | #f0e | [HTML]{FF00EE} + | rgb(128,255,0) | [rgb]{0.5,1,0} + | rgba(128,0,0,0.5) | [rgb]{0.5,0,0} + | rgb(25%,255,50%) | [rgb]{0.25,1,0.5} + ================== ==================== ============= ========================== + + It is also possible to add user-defined LaTeX only styles to a HTML-CSS Styler + using the ``--latex`` flag, and to add LaTeX parsing options that the + converter will detect within a CSS-comment. + + >>> df = pd.DataFrame([[1]]) + >>> df.style.set_properties( + ... **{"font-weight": "bold /* --dwrap */", "Huge": "--latex--rwrap"} + ... ).to_latex(convert_css=True) # doctest: +SKIP + \begin{tabular}{lr} + {} & {0} \\ + 0 & {\bfseries}{\Huge{1}} \\ + \end{tabular} + + Examples + -------- + Below we give a complete step by step example adding some advanced features + and noting some common gotchas. + + First we create the DataFrame and Styler as usual, including MultiIndex rows + and columns, which allow for more advanced formatting options: + + >>> cidx = pd.MultiIndex.from_arrays([ + ... ["Equity", "Equity", "Equity", "Equity", + ... "Stats", "Stats", "Stats", "Stats", "Rating"], + ... ["Energy", "Energy", "Consumer", "Consumer", "", "", "", "", ""], + ... ["BP", "Shell", "H&M", "Unilever", + ... "Std Dev", "Variance", "52w High", "52w Low", ""] + ... ]) + >>> iidx = pd.MultiIndex.from_arrays([ + ... ["Equity", "Equity", "Equity", "Equity"], + ... ["Energy", "Energy", "Consumer", "Consumer"], + ... ["BP", "Shell", "H&M", "Unilever"] + ... ]) + >>> styler = pd.DataFrame([ + ... [1, 0.8, 0.66, 0.72, 32.1678, 32.1678**2, 335.12, 240.89, "Buy"], + ... [0.8, 1.0, 0.69, 0.79, 1.876, 1.876**2, 14.12, 19.78, "Hold"], + ... [0.66, 0.69, 1.0, 0.86, 7, 7**2, 210.9, 140.6, "Buy"], + ... [0.72, 0.79, 0.86, 1.0, 213.76, 213.76**2, 2807, 3678, "Sell"], + ... ], columns=cidx, index=iidx).style + + Second we will format the display and, since our table is quite wide, will + hide the repeated level-0 of the index: + + >>> (styler.format(subset="Equity", precision=2) + ... .format(subset="Stats", precision=1, thousands=",") + ... .format(subset="Rating", formatter=str.upper) + ... .format_index(escape="latex", axis=1) + ... .format_index(escape="latex", axis=0) + ... .hide(level=0, axis=0)) # doctest: +SKIP + + Note that one of the string entries of the index and column headers is "H&M". + Without applying the `escape="latex"` option to the `format_index` method the + resultant LaTeX will fail to render, and the error returned is quite + difficult to debug. Using the appropriate escape the "&" is converted to "\\&". + + Thirdly we will apply some (CSS-HTML) styles to our object. We will use a + builtin method and also define our own method to highlight the stock + recommendation: + + >>> def rating_color(v): + ... if v == "Buy": color = "#33ff85" + ... elif v == "Sell": color = "#ff5933" + ... else: color = "#ffdd33" + ... return f"color: {color}; font-weight: bold;" + >>> (styler.background_gradient(cmap="inferno", subset="Equity", vmin=0, vmax=1) + ... .map(rating_color, subset="Rating")) # doctest: +SKIP + + All the above styles will work with HTML (see below) and LaTeX upon conversion: + + .. figure:: ../../_static/style/latex_stocks_html.png + + However, we finally want to add one LaTeX only style + (from the {graphicx} package), that is not easy to convert from CSS and + pandas does not support it. Notice the `--latex` flag used here, + as well as `--rwrap` to ensure this is formatted correctly and + not ignored upon conversion. + + >>> styler.map_index( + ... lambda v: "rotatebox:{45}--rwrap--latex;", level=2, axis=1 + ... ) # doctest: +SKIP + + Finally we render our LaTeX adding in other options as required: + + >>> styler.to_latex( + ... caption="Selected stock correlation and simple statistics.", + ... clines="skip-last;data", + ... convert_css=True, + ... position_float="centering", + ... multicol_align="|c|", + ... hrules=True, + ... ) # doctest: +SKIP + \begin{table} + \centering + \caption{Selected stock correlation and simple statistics.} + \begin{tabular}{llrrrrrrrrl} + \toprule + & & \multicolumn{4}{|c|}{Equity} & \multicolumn{4}{|c|}{Stats} & Rating \\ + & & \multicolumn{2}{|c|}{Energy} & \multicolumn{2}{|c|}{Consumer} & + \multicolumn{4}{|c|}{} & \\ + & & \rotatebox{45}{BP} & \rotatebox{45}{Shell} & \rotatebox{45}{H\&M} & + \rotatebox{45}{Unilever} & \rotatebox{45}{Std Dev} & \rotatebox{45}{Variance} & + \rotatebox{45}{52w High} & \rotatebox{45}{52w Low} & \rotatebox{45}{} \\ + \midrule + \multirow[c]{2}{*}{Energy} & BP & {\cellcolor[HTML]{FCFFA4}} + \color[HTML]{000000} 1.00 & {\cellcolor[HTML]{FCA50A}} \color[HTML]{000000} + 0.80 & {\cellcolor[HTML]{EB6628}} \color[HTML]{F1F1F1} 0.66 & + {\cellcolor[HTML]{F68013}} \color[HTML]{F1F1F1} 0.72 & 32.2 & 1,034.8 & 335.1 + & 240.9 & \color[HTML]{33FF85} \bfseries BUY \\ + & Shell & {\cellcolor[HTML]{FCA50A}} \color[HTML]{000000} 0.80 & + {\cellcolor[HTML]{FCFFA4}} \color[HTML]{000000} 1.00 & + {\cellcolor[HTML]{F1731D}} \color[HTML]{F1F1F1} 0.69 & + {\cellcolor[HTML]{FCA108}} \color[HTML]{000000} 0.79 & 1.9 & 3.5 & 14.1 & + 19.8 & \color[HTML]{FFDD33} \bfseries HOLD \\ + \cline{1-11} + \multirow[c]{2}{*}{Consumer} & H\&M & {\cellcolor[HTML]{EB6628}} + \color[HTML]{F1F1F1} 0.66 & {\cellcolor[HTML]{F1731D}} \color[HTML]{F1F1F1} + 0.69 & {\cellcolor[HTML]{FCFFA4}} \color[HTML]{000000} 1.00 & + {\cellcolor[HTML]{FAC42A}} \color[HTML]{000000} 0.86 & 7.0 & 49.0 & 210.9 & + 140.6 & \color[HTML]{33FF85} \bfseries BUY \\ + & Unilever & {\cellcolor[HTML]{F68013}} \color[HTML]{F1F1F1} 0.72 & + {\cellcolor[HTML]{FCA108}} \color[HTML]{000000} 0.79 & + {\cellcolor[HTML]{FAC42A}} \color[HTML]{000000} 0.86 & + {\cellcolor[HTML]{FCFFA4}} \color[HTML]{000000} 1.00 & 213.8 & 45,693.3 & + 2,807.0 & 3,678.0 & \color[HTML]{FF5933} \bfseries SELL \\ + \cline{1-11} + \bottomrule + \end{tabular} + \end{table} + + .. figure:: ../../_static/style/latex_stocks.png + """ + obj = self._copy(deepcopy=True) # manipulate table_styles on obj, not self + + table_selectors = ( + [style["selector"] for style in self.table_styles] + if self.table_styles is not None + else [] + ) + + if column_format is not None: + # add more recent setting to table_styles + obj.set_table_styles( + [{"selector": "column_format", "props": f":{column_format}"}], + overwrite=False, + ) + elif "column_format" in table_selectors: + pass # adopt what has been previously set in table_styles + else: + # create a default: set float, complex, int cols to 'r' ('S'), index to 'l' + _original_columns = self.data.columns + self.data.columns = RangeIndex(stop=len(self.data.columns)) + numeric_cols = self.data._get_numeric_data().columns.to_list() + self.data.columns = _original_columns + column_format = "" + for level in range(self.index.nlevels): + column_format += "" if self.hide_index_[level] else "l" + for ci, _ in enumerate(self.data.columns): + if ci not in self.hidden_columns: + column_format += ( + ("r" if not siunitx else "S") if ci in numeric_cols else "l" + ) + obj.set_table_styles( + [{"selector": "column_format", "props": f":{column_format}"}], + overwrite=False, + ) + + if position: + obj.set_table_styles( + [{"selector": "position", "props": f":{position}"}], + overwrite=False, + ) + + if position_float: + if environment == "longtable": + raise ValueError( + "`position_float` cannot be used in 'longtable' `environment`" + ) + if position_float not in ["raggedright", "raggedleft", "centering"]: + raise ValueError( + f"`position_float` should be one of " + f"'raggedright', 'raggedleft', 'centering', " + f"got: '{position_float}'" + ) + obj.set_table_styles( + [{"selector": "position_float", "props": f":{position_float}"}], + overwrite=False, + ) + + hrules = get_option("styler.latex.hrules") if hrules is None else hrules + if hrules: + obj.set_table_styles( + [ + {"selector": "toprule", "props": ":toprule"}, + {"selector": "midrule", "props": ":midrule"}, + {"selector": "bottomrule", "props": ":bottomrule"}, + ], + overwrite=False, + ) + + if label: + obj.set_table_styles( + [{"selector": "label", "props": f":{{{label.replace(':', '§')}}}"}], + overwrite=False, + ) + + if caption: + obj.set_caption(caption) + + if sparse_index is None: + sparse_index = get_option("styler.sparse.index") + if sparse_columns is None: + sparse_columns = get_option("styler.sparse.columns") + environment = environment or get_option("styler.latex.environment") + multicol_align = multicol_align or get_option("styler.latex.multicol_align") + multirow_align = multirow_align or get_option("styler.latex.multirow_align") + latex = obj._render_latex( + sparse_index=sparse_index, + sparse_columns=sparse_columns, + multirow_align=multirow_align, + multicol_align=multicol_align, + environment=environment, + convert_css=convert_css, + siunitx=siunitx, + clines=clines, + ) + + encoding = ( + (encoding or get_option("styler.render.encoding")) + if isinstance(buf, str) # i.e. a filepath + else encoding + ) + return save_to_buffer(latex, buf=buf, encoding=encoding) + + @overload + def to_html( + self, + buf: FilePath | WriteBuffer[str], + *, + table_uuid: str | None = ..., + table_attributes: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + bold_headers: bool = ..., + caption: str | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + encoding: str | None = ..., + doctype_html: bool = ..., + exclude_styles: bool = ..., + **kwargs, + ) -> None: + ... + + @overload + def to_html( + self, + buf: None = ..., + *, + table_uuid: str | None = ..., + table_attributes: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + bold_headers: bool = ..., + caption: str | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + encoding: str | None = ..., + doctype_html: bool = ..., + exclude_styles: bool = ..., + **kwargs, + ) -> str: + ... + + @Substitution(buf=buffering_args, encoding=encoding_args) + def to_html( + self, + buf: FilePath | WriteBuffer[str] | None = None, + *, + table_uuid: str | None = None, + table_attributes: str | None = None, + sparse_index: bool | None = None, + sparse_columns: bool | None = None, + bold_headers: bool = False, + caption: str | None = None, + max_rows: int | None = None, + max_columns: int | None = None, + encoding: str | None = None, + doctype_html: bool = False, + exclude_styles: bool = False, + **kwargs, + ) -> str | None: + """ + Write Styler to a file, buffer or string in HTML-CSS format. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + %(buf)s + table_uuid : str, optional + Id attribute assigned to the HTML element in the format: + + ``
`` + + If not given uses Styler's initially assigned value. + table_attributes : str, optional + Attributes to assign within the `
` HTML element in the format: + + ``
>`` + + If not given defaults to Styler's preexisting value. + sparse_index : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each row. + Defaults to ``pandas.options.styler.sparse.index`` value. + + .. versionadded:: 1.4.0 + sparse_columns : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each + column. Defaults to ``pandas.options.styler.sparse.columns`` value. + + .. versionadded:: 1.4.0 + bold_headers : bool, optional + Adds "font-weight: bold;" as a CSS property to table style header cells. + + .. versionadded:: 1.4.0 + caption : str, optional + Set, or overwrite, the caption on Styler before rendering. + + .. versionadded:: 1.4.0 + max_rows : int, optional + The maximum number of rows that will be rendered. Defaults to + ``pandas.options.styler.render.max_rows/max_columns``. + + .. versionadded:: 1.4.0 + max_columns : int, optional + The maximum number of columns that will be rendered. Defaults to + ``pandas.options.styler.render.max_columns``, which is None. + + Rows and columns may be reduced if the number of total elements is + large. This value is set to ``pandas.options.styler.render.max_elements``, + which is 262144 (18 bit browser rendering). + + .. versionadded:: 1.4.0 + %(encoding)s + doctype_html : bool, default False + Whether to output a fully structured HTML file including all + HTML elements, or just the core `` +
+ + + + + + + ... + """ + obj = self._copy(deepcopy=True) # manipulate table_styles on obj, not self + + if table_uuid: + obj.set_uuid(table_uuid) + + if table_attributes: + obj.set_table_attributes(table_attributes) + + if sparse_index is None: + sparse_index = get_option("styler.sparse.index") + if sparse_columns is None: + sparse_columns = get_option("styler.sparse.columns") + + if bold_headers: + obj.set_table_styles( + [{"selector": "th", "props": "font-weight: bold;"}], overwrite=False + ) + + if caption is not None: + obj.set_caption(caption) + + # Build HTML string.. + html = obj._render_html( + sparse_index=sparse_index, + sparse_columns=sparse_columns, + max_rows=max_rows, + max_cols=max_columns, + exclude_styles=exclude_styles, + encoding=encoding or get_option("styler.render.encoding"), + doctype_html=doctype_html, + **kwargs, + ) + + return save_to_buffer( + html, buf=buf, encoding=(encoding if buf is not None else None) + ) + + @overload + def to_string( + self, + buf: FilePath | WriteBuffer[str], + *, + encoding: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + delimiter: str = ..., + ) -> None: + ... + + @overload + def to_string( + self, + buf: None = ..., + *, + encoding: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + delimiter: str = ..., + ) -> str: + ... + + @Substitution(buf=buffering_args, encoding=encoding_args) + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + *, + encoding: str | None = None, + sparse_index: bool | None = None, + sparse_columns: bool | None = None, + max_rows: int | None = None, + max_columns: int | None = None, + delimiter: str = " ", + ) -> str | None: + """ + Write Styler to a file, buffer or string in text format. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + %(buf)s + %(encoding)s + sparse_index : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each row. + Defaults to ``pandas.options.styler.sparse.index`` value. + sparse_columns : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each + column. Defaults to ``pandas.options.styler.sparse.columns`` value. + max_rows : int, optional + The maximum number of rows that will be rendered. Defaults to + ``pandas.options.styler.render.max_rows``, which is None. + max_columns : int, optional + The maximum number of columns that will be rendered. Defaults to + ``pandas.options.styler.render.max_columns``, which is None. + + Rows and columns may be reduced if the number of total elements is + large. This value is set to ``pandas.options.styler.render.max_elements``, + which is 262144 (18 bit browser rendering). + delimiter : str, default single space + The separator between data elements. + + Returns + ------- + str or None + If `buf` is None, returns the result as a string. Otherwise returns `None`. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> df.style.to_string() + ' A B\\n0 1 3\\n1 2 4\\n' + """ + obj = self._copy(deepcopy=True) + + if sparse_index is None: + sparse_index = get_option("styler.sparse.index") + if sparse_columns is None: + sparse_columns = get_option("styler.sparse.columns") + + text = obj._render_string( + sparse_columns=sparse_columns, + sparse_index=sparse_index, + max_rows=max_rows, + max_cols=max_columns, + delimiter=delimiter, + ) + return save_to_buffer( + text, buf=buf, encoding=(encoding if buf is not None else None) + ) + + def set_td_classes(self, classes: DataFrame) -> Styler: + """ + Set the ``class`` attribute of `` + + + + + + + + +
 AB
`` HTML elements. + + Parameters + ---------- + classes : DataFrame + DataFrame containing strings that will be translated to CSS classes, + mapped by identical column and index key values that must exist on the + underlying Styler data. None, NaN values, and empty strings will + be ignored and not affect the rendered HTML. + + Returns + ------- + Styler + + See Also + -------- + Styler.set_table_styles: Set the table styles included within the ``' + '' + ' ' + ' ' + ' ' + ' ' + ' ' + ' ' + '
0
1
' + """ + if not classes.index.is_unique or not classes.columns.is_unique: + raise KeyError( + "Classes render only if `classes` has unique index and columns." + ) + classes = classes.reindex_like(self.data) + + for r, row_tup in enumerate(classes.itertuples()): + for c, value in enumerate(row_tup[1:]): + if not (pd.isna(value) or value == ""): + self.cell_context[(r, c)] = str(value) + + return self + + def _update_ctx(self, attrs: DataFrame) -> None: + """ + Update the state of the ``Styler`` for data cells. + + Collects a mapping of {index_label: [('', ''), ..]}. + + Parameters + ---------- + attrs : DataFrame + should contain strings of ': ;: ' + Whitespace shouldn't matter and the final trailing ';' shouldn't + matter. + """ + if not self.index.is_unique or not self.columns.is_unique: + raise KeyError( + "`Styler.apply` and `.map` are not compatible " + "with non-unique index or columns." + ) + + for cn in attrs.columns: + j = self.columns.get_loc(cn) + ser = attrs[cn] + for rn, c in ser.items(): + if not c or pd.isna(c): + continue + css_list = maybe_convert_css_to_tuples(c) + i = self.index.get_loc(rn) + self.ctx[(i, j)].extend(css_list) + + def _update_ctx_header(self, attrs: DataFrame, axis: AxisInt) -> None: + """ + Update the state of the ``Styler`` for header cells. + + Collects a mapping of {index_label: [('', ''), ..]}. + + Parameters + ---------- + attrs : Series + Should contain strings of ': ;: ', and an + integer index. + Whitespace shouldn't matter and the final trailing ';' shouldn't + matter. + axis : int + Identifies whether the ctx object being updated is the index or columns + """ + for j in attrs.columns: + ser = attrs[j] + for i, c in ser.items(): + if not c: + continue + css_list = maybe_convert_css_to_tuples(c) + if axis == 0: + self.ctx_index[(i, j)].extend(css_list) + else: + self.ctx_columns[(j, i)].extend(css_list) + + def _copy(self, deepcopy: bool = False) -> Styler: + """ + Copies a Styler, allowing for deepcopy or shallow copy + + Copying a Styler aims to recreate a new Styler object which contains the same + data and styles as the original. + + Data dependent attributes [copied and NOT exported]: + - formatting (._display_funcs) + - hidden index values or column values (.hidden_rows, .hidden_columns) + - tooltips + - cell_context (cell css classes) + - ctx (cell css styles) + - caption + - concatenated stylers + + Non-data dependent attributes [copied and exported]: + - css + - hidden index state and hidden columns state (.hide_index_, .hide_columns_) + - table_attributes + - table_styles + - applied styles (_todo) + + """ + # GH 40675, 52728 + styler = type(self)( + self.data, # populates attributes 'data', 'columns', 'index' as shallow + ) + shallow = [ # simple string or boolean immutables + "hide_index_", + "hide_columns_", + "hide_column_names", + "hide_index_names", + "table_attributes", + "cell_ids", + "caption", + "uuid", + "uuid_len", + "template_latex", # also copy templates if these have been customised + "template_html_style", + "template_html_table", + "template_html", + ] + deep = [ # nested lists or dicts + "css", + "concatenated", + "_display_funcs", + "_display_funcs_index", + "_display_funcs_columns", + "hidden_rows", + "hidden_columns", + "ctx", + "ctx_index", + "ctx_columns", + "cell_context", + "_todo", + "table_styles", + "tooltips", + ] + + for attr in shallow: + setattr(styler, attr, getattr(self, attr)) + + for attr in deep: + val = getattr(self, attr) + setattr(styler, attr, copy.deepcopy(val) if deepcopy else val) + + return styler + + def __copy__(self) -> Styler: + return self._copy(deepcopy=False) + + def __deepcopy__(self, memo) -> Styler: + return self._copy(deepcopy=True) + + def clear(self) -> None: + """ + Reset the ``Styler``, removing any previously applied styles. + + Returns None. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, np.nan]}) + + After any added style: + + >>> df.style.highlight_null(color='yellow') # doctest: +SKIP + + Remove it with: + + >>> df.style.clear() # doctest: +SKIP + + Please see: + `Table Visualization <../../user_guide/style.ipynb>`_ for more examples. + """ + # create default GH 40675 + clean_copy = Styler(self.data, uuid=self.uuid) + clean_attrs = [a for a in clean_copy.__dict__ if not callable(a)] + self_attrs = [a for a in self.__dict__ if not callable(a)] # maybe more attrs + for attr in clean_attrs: + setattr(self, attr, getattr(clean_copy, attr)) + for attr in set(self_attrs).difference(clean_attrs): + delattr(self, attr) + + def _apply( + self, + func: Callable, + axis: Axis | None = 0, + subset: Subset | None = None, + **kwargs, + ) -> Styler: + subset = slice(None) if subset is None else subset + subset = non_reducing_slice(subset) + data = self.data.loc[subset] + if data.empty: + result = DataFrame() + elif axis is None: + result = func(data, **kwargs) + if not isinstance(result, DataFrame): + if not isinstance(result, np.ndarray): + raise TypeError( + f"Function {repr(func)} must return a DataFrame or ndarray " + f"when passed to `Styler.apply` with axis=None" + ) + if data.shape != result.shape: + raise ValueError( + f"Function {repr(func)} returned ndarray with wrong shape.\n" + f"Result has shape: {result.shape}\n" + f"Expected shape: {data.shape}" + ) + result = DataFrame(result, index=data.index, columns=data.columns) + else: + axis = self.data._get_axis_number(axis) + if axis == 0: + result = data.apply(func, axis=0, **kwargs) + else: + result = data.T.apply(func, axis=0, **kwargs).T # see GH 42005 + + if isinstance(result, Series): + raise ValueError( + f"Function {repr(func)} resulted in the apply method collapsing to a " + f"Series.\nUsually, this is the result of the function returning a " + f"single value, instead of list-like." + ) + msg = ( + f"Function {repr(func)} created invalid {{0}} labels.\nUsually, this is " + f"the result of the function returning a " + f"{'Series' if axis is not None else 'DataFrame'} which contains invalid " + f"labels, or returning an incorrectly shaped, list-like object which " + f"cannot be mapped to labels, possibly due to applying the function along " + f"the wrong axis.\n" + f"Result {{0}} has shape: {{1}}\n" + f"Expected {{0}} shape: {{2}}" + ) + if not all(result.index.isin(data.index)): + raise ValueError(msg.format("index", result.index.shape, data.index.shape)) + if not all(result.columns.isin(data.columns)): + raise ValueError( + msg.format("columns", result.columns.shape, data.columns.shape) + ) + self._update_ctx(result) + return self + + @Substitution(subset=subset_args) + def apply( + self, + func: Callable, + axis: Axis | None = 0, + subset: Subset | None = None, + **kwargs, + ) -> Styler: + """ + Apply a CSS-styling function column-wise, row-wise, or table-wise. + + Updates the HTML representation with the result. + + Parameters + ---------- + func : function + ``func`` should take a Series if ``axis`` in [0,1] and return a list-like + object of same length, or a Series, not necessarily of same length, with + valid index labels considering ``subset``. + ``func`` should take a DataFrame if ``axis`` is ``None`` and return either + an ndarray with the same shape or a DataFrame, not necessarily of the same + shape, with valid index and columns labels considering ``subset``. + + .. versionchanged:: 1.3.0 + + .. versionchanged:: 1.4.0 + + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Apply to each column (``axis=0`` or ``'index'``), to each row + (``axis=1`` or ``'columns'``), or to the entire DataFrame at once + with ``axis=None``. + %(subset)s + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + + See Also + -------- + Styler.map_index: Apply a CSS-styling function to headers elementwise. + Styler.apply_index: Apply a CSS-styling function to headers level-wise. + Styler.map: Apply a CSS-styling function elementwise. + + Notes + ----- + The elements of the output of ``func`` should be CSS styles as strings, in the + format 'attribute: value; attribute2: value2; ...' or, + if nothing is to be applied to that element, an empty string or ``None``. + + This is similar to ``DataFrame.apply``, except that ``axis=None`` + applies the function to the entire DataFrame at once, + rather than column-wise or row-wise. + + Examples + -------- + >>> def highlight_max(x, color): + ... return np.where(x == np.nanmax(x.to_numpy()), f"color: {color};", None) + >>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"]) + >>> df.style.apply(highlight_max, color='red') # doctest: +SKIP + >>> df.style.apply(highlight_max, color='blue', axis=1) # doctest: +SKIP + >>> df.style.apply(highlight_max, color='green', axis=None) # doctest: +SKIP + + Using ``subset`` to restrict application to a single column or multiple columns + + >>> df.style.apply(highlight_max, color='red', subset="A") + ... # doctest: +SKIP + >>> df.style.apply(highlight_max, color='red', subset=["A", "B"]) + ... # doctest: +SKIP + + Using a 2d input to ``subset`` to select rows in addition to columns + + >>> df.style.apply(highlight_max, color='red', subset=([0, 1, 2], slice(None))) + ... # doctest: +SKIP + >>> df.style.apply(highlight_max, color='red', subset=(slice(0, 5, 2), "A")) + ... # doctest: +SKIP + + Using a function which returns a Series / DataFrame of unequal length but + containing valid index labels + + >>> df = pd.DataFrame([[1, 2], [3, 4], [4, 6]], index=["A1", "A2", "Total"]) + >>> total_style = pd.Series("font-weight: bold;", index=["Total"]) + >>> df.style.apply(lambda s: total_style) # doctest: +SKIP + + See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for + more details. + """ + self._todo.append( + (lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs) + ) + return self + + def _apply_index( + self, + func: Callable, + axis: Axis = 0, + level: Level | list[Level] | None = None, + method: str = "apply", + **kwargs, + ) -> Styler: + axis = self.data._get_axis_number(axis) + obj = self.index if axis == 0 else self.columns + + levels_ = refactor_levels(level, obj) + data = DataFrame(obj.to_list()).loc[:, levels_] + + if method == "apply": + result = data.apply(func, axis=0, **kwargs) + elif method == "map": + result = data.map(func, **kwargs) + + self._update_ctx_header(result, axis) + return self + + @doc( + this="apply", + wise="level-wise", + alt="map", + altwise="elementwise", + func="take a Series and return a string array of the same length", + input_note="the index as a Series, if an Index, or a level of a MultiIndex", + output_note="an identically sized array of CSS styles as strings", + var="s", + ret='np.where(s == "B", "background-color: yellow;", "")', + ret2='["background-color: yellow;" if "x" in v else "" for v in s]', + ) + def apply_index( + self, + func: Callable, + axis: AxisInt | str = 0, + level: Level | list[Level] | None = None, + **kwargs, + ) -> Styler: + """ + Apply a CSS-styling function to the index or column headers, {wise}. + + Updates the HTML representation with the result. + + .. versionadded:: 1.4.0 + + .. versionadded:: 2.1.0 + Styler.applymap_index was deprecated and renamed to Styler.map_index. + + Parameters + ---------- + func : function + ``func`` should {func}. + axis : {{0, 1, "index", "columns"}} + The headers over which to apply the function. + level : int, str, list, optional + If index is MultiIndex the level(s) over which to apply the function. + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + + See Also + -------- + Styler.{alt}_index: Apply a CSS-styling function to headers {altwise}. + Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. + Styler.map: Apply a CSS-styling function elementwise. + + Notes + ----- + Each input to ``func`` will be {input_note}. The output of ``func`` should be + {output_note}, in the format 'attribute: value; attribute2: value2; ...' + or, if nothing is to be applied to that element, an empty string or ``None``. + + Examples + -------- + Basic usage to conditionally highlight values in the index. + + >>> df = pd.DataFrame([[1,2], [3,4]], index=["A", "B"]) + >>> def color_b(s): + ... return {ret} + >>> df.style.{this}_index(color_b) # doctest: +SKIP + + .. figure:: ../../_static/style/appmaphead1.png + + Selectively applying to specific levels of MultiIndex columns. + + >>> midx = pd.MultiIndex.from_product([['ix', 'jy'], [0, 1], ['x3', 'z4']]) + >>> df = pd.DataFrame([np.arange(8)], columns=midx) + >>> def highlight_x({var}): + ... return {ret2} + >>> df.style.{this}_index(highlight_x, axis="columns", level=[0, 2]) + ... # doctest: +SKIP + + .. figure:: ../../_static/style/appmaphead2.png + """ + self._todo.append( + ( + lambda instance: getattr(instance, "_apply_index"), + (func, axis, level, "apply"), + kwargs, + ) + ) + return self + + @doc( + apply_index, + this="map", + wise="elementwise", + alt="apply", + altwise="level-wise", + func="take a scalar and return a string", + input_note="an index value, if an Index, or a level value of a MultiIndex", + output_note="CSS styles as a string", + var="v", + ret='"background-color: yellow;" if v == "B" else None', + ret2='"background-color: yellow;" if "x" in v else None', + ) + def map_index( + self, + func: Callable, + axis: AxisInt | str = 0, + level: Level | list[Level] | None = None, + **kwargs, + ) -> Styler: + self._todo.append( + ( + lambda instance: getattr(instance, "_apply_index"), + (func, axis, level, "map"), + kwargs, + ) + ) + return self + + def applymap_index( + self, + func: Callable, + axis: AxisInt | str = 0, + level: Level | list[Level] | None = None, + **kwargs, + ) -> Styler: + """ + Apply a CSS-styling function to the index or column headers, elementwise. + + .. deprecated:: 2.1.0 + + Styler.applymap_index has been deprecated. Use Styler.map_index instead. + + Parameters + ---------- + func : function + ``func`` should take a scalar and return a string. + axis : {{0, 1, "index", "columns"}} + The headers over which to apply the function. + level : int, str, list, optional + If index is MultiIndex the level(s) over which to apply the function. + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + """ + warnings.warn( + "Styler.applymap_index has been deprecated. Use Styler.map_index instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.map_index(func, axis, level, **kwargs) + + def _map(self, func: Callable, subset: Subset | None = None, **kwargs) -> Styler: + func = partial(func, **kwargs) # map doesn't take kwargs? + if subset is None: + subset = IndexSlice[:] + subset = non_reducing_slice(subset) + result = self.data.loc[subset].map(func) + self._update_ctx(result) + return self + + @Substitution(subset=subset_args) + def map(self, func: Callable, subset: Subset | None = None, **kwargs) -> Styler: + """ + Apply a CSS-styling function elementwise. + + Updates the HTML representation with the result. + + Parameters + ---------- + func : function + ``func`` should take a scalar and return a string. + %(subset)s + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + + See Also + -------- + Styler.map_index: Apply a CSS-styling function to headers elementwise. + Styler.apply_index: Apply a CSS-styling function to headers level-wise. + Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. + + Notes + ----- + The elements of the output of ``func`` should be CSS styles as strings, in the + format 'attribute: value; attribute2: value2; ...' or, + if nothing is to be applied to that element, an empty string or ``None``. + + Examples + -------- + >>> def color_negative(v, color): + ... return f"color: {color};" if v < 0 else None + >>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"]) + >>> df.style.map(color_negative, color='red') # doctest: +SKIP + + Using ``subset`` to restrict application to a single column or multiple columns + + >>> df.style.map(color_negative, color='red', subset="A") + ... # doctest: +SKIP + >>> df.style.map(color_negative, color='red', subset=["A", "B"]) + ... # doctest: +SKIP + + Using a 2d input to ``subset`` to select rows in addition to columns + + >>> df.style.map(color_negative, color='red', + ... subset=([0,1,2], slice(None))) # doctest: +SKIP + >>> df.style.map(color_negative, color='red', subset=(slice(0,5,2), "A")) + ... # doctest: +SKIP + + See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for + more details. + """ + self._todo.append( + (lambda instance: getattr(instance, "_map"), (func, subset), kwargs) + ) + return self + + @Substitution(subset=subset_args) + def applymap( + self, func: Callable, subset: Subset | None = None, **kwargs + ) -> Styler: + """ + Apply a CSS-styling function elementwise. + + .. deprecated:: 2.1.0 + + Styler.applymap has been deprecated. Use Styler.map instead. + + Parameters + ---------- + func : function + ``func`` should take a scalar and return a string. + %(subset)s + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + """ + warnings.warn( + "Styler.applymap has been deprecated. Use Styler.map instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.map(func, subset, **kwargs) + + def set_table_attributes(self, attributes: str) -> Styler: + """ + Set the table attributes added to the ```` HTML element. + + These are items in addition to automatic (by default) ``id`` attribute. + + Parameters + ---------- + attributes : str + + Returns + ------- + Styler + + See Also + -------- + Styler.set_table_styles: Set the table styles included within the `` block + + Parameters + ---------- + sparsify_index : bool + Whether index_headers section will add rowspan attributes (>1) to elements. + + Returns + ------- + body : list + The associated HTML elements needed for template rendering. + """ + rlabels = self.data.index.tolist() + if not isinstance(self.data.index, MultiIndex): + rlabels = [[x] for x in rlabels] + + body: list = [] + visible_row_count: int = 0 + for r, row_tup in [ + z for z in enumerate(self.data.itertuples()) if z[0] not in self.hidden_rows + ]: + visible_row_count += 1 + if self._check_trim( + visible_row_count, + max_rows, + body, + "row", + ): + break + + body_row = self._generate_body_row( + (r, row_tup, rlabels), max_cols, idx_lengths + ) + body.append(body_row) + return body + + def _check_trim( + self, + count: int, + max: int, + obj: list, + element: str, + css: str | None = None, + value: str = "...", + ) -> bool: + """ + Indicates whether to break render loops and append a trimming indicator + + Parameters + ---------- + count : int + The loop count of previous visible items. + max : int + The allowable rendered items in the loop. + obj : list + The current render collection of the rendered items. + element : str + The type of element to append in the case a trimming indicator is needed. + css : str, optional + The css to add to the trimming indicator element. + value : str, optional + The value of the elements display if necessary. + + Returns + ------- + result : bool + Whether a trimming element was required and appended. + """ + if count > max: + if element == "row": + obj.append(self._generate_trimmed_row(max)) + else: + obj.append(_element(element, css, value, True, attributes="")) + return True + return False + + def _generate_trimmed_row(self, max_cols: int) -> list: + """ + When a render has too many rows we generate a trimming row containing "..." + + Parameters + ---------- + max_cols : int + Number of permissible columns + + Returns + ------- + list of elements + """ + index_headers = [ + _element( + "th", + ( + f"{self.css['row_heading']} {self.css['level']}{c} " + f"{self.css['row_trim']}" + ), + "...", + not self.hide_index_[c], + attributes="", + ) + for c in range(self.data.index.nlevels) + ] + + data: list = [] + visible_col_count: int = 0 + for c, _ in enumerate(self.columns): + data_element_visible = c not in self.hidden_columns + if data_element_visible: + visible_col_count += 1 + if self._check_trim( + visible_col_count, + max_cols, + data, + "td", + f"{self.css['data']} {self.css['row_trim']} {self.css['col_trim']}", + ): + break + + data.append( + _element( + "td", + f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}", + "...", + data_element_visible, + attributes="", + ) + ) + + return index_headers + data + + def _generate_body_row( + self, + iter: tuple, + max_cols: int, + idx_lengths: dict, + ): + """ + Generate a regular row for the body section of appropriate format. + + +--------------------------------------------+---------------------------+ + | index_header_0 ... index_header_n | data_by_column ... | + +--------------------------------------------+---------------------------+ + + Parameters + ---------- + iter : tuple + Iterable from outer scope: row number, row data tuple, row index labels. + max_cols : int + Number of permissible columns. + idx_lengths : dict + A map of the sparsification structure of the index + + Returns + ------- + list of elements + """ + r, row_tup, rlabels = iter + + index_headers = [] + for c, value in enumerate(rlabels[r]): + header_element_visible = ( + _is_visible(r, c, idx_lengths) and not self.hide_index_[c] + ) + header_element = _element( + "th", + ( + f"{self.css['row_heading']} {self.css['level']}{c} " + f"{self.css['row']}{r}" + ), + value, + header_element_visible, + display_value=self._display_funcs_index[(r, c)](value), + attributes=( + f'rowspan="{idx_lengths.get((c, r), 0)}"' + if idx_lengths.get((c, r), 0) > 1 + else "" + ), + ) + + if self.cell_ids: + header_element[ + "id" + ] = f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given + if ( + header_element_visible + and (r, c) in self.ctx_index + and self.ctx_index[r, c] + ): + # always add id if a style is specified + header_element["id"] = f"{self.css['level']}{c}_{self.css['row']}{r}" + self.cellstyle_map_index[tuple(self.ctx_index[r, c])].append( + f"{self.css['level']}{c}_{self.css['row']}{r}" + ) + + index_headers.append(header_element) + + data: list = [] + visible_col_count: int = 0 + for c, value in enumerate(row_tup[1:]): + data_element_visible = ( + c not in self.hidden_columns and r not in self.hidden_rows + ) + if data_element_visible: + visible_col_count += 1 + if self._check_trim( + visible_col_count, + max_cols, + data, + "td", + f"{self.css['data']} {self.css['row']}{r} {self.css['col_trim']}", + ): + break + + # add custom classes from cell context + cls = "" + if (r, c) in self.cell_context: + cls = " " + self.cell_context[r, c] + + data_element = _element( + "td", + ( + f"{self.css['data']} {self.css['row']}{r} " + f"{self.css['col']}{c}{cls}" + ), + value, + data_element_visible, + attributes="", + display_value=self._display_funcs[(r, c)](value), + ) + + if self.cell_ids: + data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}" + if data_element_visible and (r, c) in self.ctx and self.ctx[r, c]: + # always add id if needed due to specified style + data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}" + self.cellstyle_map[tuple(self.ctx[r, c])].append( + f"{self.css['row']}{r}_{self.css['col']}{c}" + ) + + data.append(data_element) + + return index_headers + data + + def _translate_latex(self, d: dict, clines: str | None) -> None: + r""" + Post-process the default render dict for the LaTeX template format. + + Processing items included are: + - Remove hidden columns from the non-headers part of the body. + - Place cellstyles directly in td cells rather than use cellstyle_map. + - Remove hidden indexes or reinsert missing th elements if part of multiindex + or multirow sparsification (so that \multirow and \multicol work correctly). + """ + index_levels = self.index.nlevels + visible_index_level_n = index_levels - sum(self.hide_index_) + d["head"] = [ + [ + {**col, "cellstyle": self.ctx_columns[r, c - visible_index_level_n]} + for c, col in enumerate(row) + if col["is_visible"] + ] + for r, row in enumerate(d["head"]) + ] + + def _concatenated_visible_rows(obj, n, row_indices): + """ + Extract all visible row indices recursively from concatenated stylers. + """ + row_indices.extend( + [r + n for r in range(len(obj.index)) if r not in obj.hidden_rows] + ) + n += len(obj.index) + for concatenated in obj.concatenated: + n = _concatenated_visible_rows(concatenated, n, row_indices) + return n + + def concatenated_visible_rows(obj): + row_indices: list[int] = [] + _concatenated_visible_rows(obj, 0, row_indices) + # TODO try to consolidate the concat visible rows + # methods to a single function / recursion for simplicity + return row_indices + + body = [] + for r, row in zip(concatenated_visible_rows(self), d["body"]): + # note: cannot enumerate d["body"] because rows were dropped if hidden + # during _translate_body so must zip to acquire the true r-index associated + # with the ctx obj which contains the cell styles. + if all(self.hide_index_): + row_body_headers = [] + else: + row_body_headers = [ + { + **col, + "display_value": col["display_value"] + if col["is_visible"] + else "", + "cellstyle": self.ctx_index[r, c], + } + for c, col in enumerate(row[:index_levels]) + if (col["type"] == "th" and not self.hide_index_[c]) + ] + + row_body_cells = [ + {**col, "cellstyle": self.ctx[r, c]} + for c, col in enumerate(row[index_levels:]) + if (col["is_visible"] and col["type"] == "td") + ] + + body.append(row_body_headers + row_body_cells) + d["body"] = body + + # clines are determined from info on index_lengths and hidden_rows and input + # to a dict defining which row clines should be added in the template. + if clines not in [ + None, + "all;data", + "all;index", + "skip-last;data", + "skip-last;index", + ]: + raise ValueError( + f"`clines` value of {clines} is invalid. Should either be None or one " + f"of 'all;data', 'all;index', 'skip-last;data', 'skip-last;index'." + ) + if clines is not None: + data_len = len(row_body_cells) if "data" in clines and d["body"] else 0 + + d["clines"] = defaultdict(list) + visible_row_indexes: list[int] = [ + r for r in range(len(self.data.index)) if r not in self.hidden_rows + ] + visible_index_levels: list[int] = [ + i for i in range(index_levels) if not self.hide_index_[i] + ] + for rn, r in enumerate(visible_row_indexes): + for lvln, lvl in enumerate(visible_index_levels): + if lvl == index_levels - 1 and "skip-last" in clines: + continue + idx_len = d["index_lengths"].get((lvl, r), None) + if idx_len is not None: # i.e. not a sparsified entry + d["clines"][rn + idx_len].append( + f"\\cline{{{lvln+1}-{len(visible_index_levels)+data_len}}}" + ) + + def format( + self, + formatter: ExtFormatter | None = None, + subset: Subset | None = None, + na_rep: str | None = None, + precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, + escape: str | None = None, + hyperlinks: str | None = None, + ) -> StylerRenderer: + r""" + Format the text display value of cells. + + Parameters + ---------- + formatter : str, callable, dict or None + Object to define how values are displayed. See notes. + subset : label, array-like, IndexSlice, optional + A valid 2d input to `DataFrame.loc[]`, or, in the case of a 1d input + or single key, to `DataFrame.loc[:, ]` where the columns are + prioritised, to limit ``data`` to *before* applying the function. + na_rep : str, optional + Representation for missing values. + If ``na_rep`` is None, no special formatting is applied. + precision : int, optional + Floating point precision to use for display purposes, if not determined by + the specified ``formatter``. + + .. versionadded:: 1.3.0 + + decimal : str, default "." + Character used as decimal separator for floats, complex and integers. + + .. versionadded:: 1.3.0 + + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. + + .. versionadded:: 1.3.0 + + escape : str, optional + Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` + in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. + Use 'latex-math' to replace the characters the same way as in 'latex' mode, + except for math substrings, which either are surrounded + by two characters ``$`` or start with the character ``\(`` and + end with ``\)``. Escaping is done before ``formatter``. + + .. versionadded:: 1.3.0 + + hyperlinks : {"html", "latex"}, optional + Convert string patterns containing https://, http://, ftp:// or www. to + HTML tags as clickable URL hyperlinks if "html", or LaTeX \href + commands if "latex". + + .. versionadded:: 1.4.0 + + Returns + ------- + Styler + + See Also + -------- + Styler.format_index: Format the text display value of index labels. + + Notes + ----- + This method assigns a formatting function, ``formatter``, to each cell in the + DataFrame. If ``formatter`` is ``None``, then the default formatter is used. + If a callable then that function should take a data value as input and return + a displayable representation, such as a string. If ``formatter`` is + given as a string this is assumed to be a valid Python format specification + and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given, + keys should correspond to column names, and values should be string or + callable, as above. + + The default formatter currently expresses floats and complex numbers with the + pandas display precision unless using the ``precision`` argument here. The + default formatter does not adjust the representation of missing values unless + the ``na_rep`` argument is used. + + The ``subset`` argument defines which region to apply the formatting function + to. If the ``formatter`` argument is given in dict form but does not include + all columns within the subset then these columns will have the default formatter + applied. Any columns in the formatter dict excluded from the subset will + be ignored. + + When using a ``formatter`` string the dtypes must be compatible, otherwise a + `ValueError` will be raised. + + When instantiating a Styler, default formatting can be applied be setting the + ``pandas.options``: + + - ``styler.format.formatter``: default None. + - ``styler.format.na_rep``: default None. + - ``styler.format.precision``: default 6. + - ``styler.format.decimal``: default ".". + - ``styler.format.thousands``: default None. + - ``styler.format.escape``: default None. + + .. warning:: + `Styler.format` is ignored when using the output format `Styler.to_excel`, + since Excel and Python have inherrently different formatting structures. + However, it is possible to use the `number-format` pseudo CSS attribute + to force Excel permissible formatting. See examples. + + Examples + -------- + Using ``na_rep`` and ``precision`` with the default ``formatter`` + + >>> df = pd.DataFrame([[np.nan, 1.0, 'A'], [2.0, np.nan, 3.0]]) + >>> df.style.format(na_rep='MISS', precision=3) # doctest: +SKIP + 0 1 2 + 0 MISS 1.000 A + 1 2.000 MISS 3.000 + + Using a ``formatter`` specification on consistent column dtypes + + >>> df.style.format('{:.2f}', na_rep='MISS', subset=[0,1]) # doctest: +SKIP + 0 1 2 + 0 MISS 1.00 A + 1 2.00 MISS 3.000000 + + Using the default ``formatter`` for unspecified columns + + >>> df.style.format({0: '{:.2f}', 1: '£ {:.1f}'}, na_rep='MISS', precision=1) + ... # doctest: +SKIP + 0 1 2 + 0 MISS £ 1.0 A + 1 2.00 MISS 3.0 + + Multiple ``na_rep`` or ``precision`` specifications under the default + ``formatter``. + + >>> (df.style.format(na_rep='MISS', precision=1, subset=[0]) + ... .format(na_rep='PASS', precision=2, subset=[1, 2])) # doctest: +SKIP + 0 1 2 + 0 MISS 1.00 A + 1 2.0 PASS 3.00 + + Using a callable ``formatter`` function. + + >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT' + >>> df.style.format({0: '{:.1f}', 2: func}, precision=4, na_rep='MISS') + ... # doctest: +SKIP + 0 1 2 + 0 MISS 1.0000 STRING + 1 2.0 MISS FLOAT + + Using a ``formatter`` with HTML ``escape`` and ``na_rep``. + + >>> df = pd.DataFrame([['
', '"A&B"', None]]) + >>> s = df.style.format( + ... '
{0}', escape="html", na_rep="NA" + ... ) + >>> s.to_html() # doctest: +SKIP + ... +
+ + + ... + + Using a ``formatter`` with ``escape`` in 'latex' mode. + + >>> df = pd.DataFrame([["123"], ["~ ^"], ["$%#"]]) + >>> df.style.format("\\textbf{{{}}}", escape="latex").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & \textbf{123} \\ + 1 & \textbf{\textasciitilde \space \textasciicircum } \\ + 2 & \textbf{\$\%\#} \\ + \end{tabular} + + Applying ``escape`` in 'latex-math' mode. In the example below + we enter math mode using the character ``$``. + + >>> df = pd.DataFrame([[r"$\sum_{i=1}^{10} a_i$ a~b $\alpha \ + ... = \frac{\beta}{\zeta^2}$"], ["%#^ $ \$x^2 $"]]) + >>> df.style.format(escape="latex-math").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & $\sum_{i=1}^{10} a_i$ a\textasciitilde b $\alpha = \frac{\beta}{\zeta^2}$ \\ + 1 & \%\#\textasciicircum \space $ \$x^2 $ \\ + \end{tabular} + + We can use the character ``\(`` to enter math mode and the character ``\)`` + to close math mode. + + >>> df = pd.DataFrame([[r"\(\sum_{i=1}^{10} a_i\) a~b \(\alpha \ + ... = \frac{\beta}{\zeta^2}\)"], ["%#^ \( \$x^2 \)"]]) + >>> df.style.format(escape="latex-math").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & \(\sum_{i=1}^{10} a_i\) a\textasciitilde b \(\alpha + = \frac{\beta}{\zeta^2}\) \\ + 1 & \%\#\textasciicircum \space \( \$x^2 \) \\ + \end{tabular} + + If we have in one DataFrame cell a combination of both shorthands + for math formulas, the shorthand with the sign ``$`` will be applied. + + >>> df = pd.DataFrame([[r"\( x^2 \) $x^2$"], \ + ... [r"$\frac{\beta}{\zeta}$ \(\frac{\beta}{\zeta}\)"]]) + >>> df.style.format(escape="latex-math").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & \textbackslash ( x\textasciicircum 2 \textbackslash ) $x^2$ \\ + 1 & $\frac{\beta}{\zeta}$ \textbackslash (\textbackslash + frac\{\textbackslash beta\}\{\textbackslash zeta\}\textbackslash ) \\ + \end{tabular} + + Pandas defines a `number-format` pseudo CSS attribute instead of the `.format` + method to create `to_excel` permissible formatting. Note that semi-colons are + CSS protected characters but used as separators in Excel's format string. + Replace semi-colons with the section separator character (ASCII-245) when + defining the formatting here. + + >>> df = pd.DataFrame({"A": [1, 0, -1]}) + >>> pseudo_css = "number-format: 0§[Red](0)§-§@;" + >>> filename = "formatted_file.xlsx" + >>> df.style.map(lambda v: pseudo_css).to_excel(filename) # doctest: +SKIP + + .. figure:: ../../_static/style/format_excel_css.png + """ + if all( + ( + formatter is None, + subset is None, + precision is None, + decimal == ".", + thousands is None, + na_rep is None, + escape is None, + hyperlinks is None, + ) + ): + self._display_funcs.clear() + return self # clear the formatter / revert to default and avoid looping + + subset = slice(None) if subset is None else subset + subset = non_reducing_slice(subset) + data = self.data.loc[subset] + + if not isinstance(formatter, dict): + formatter = {col: formatter for col in data.columns} + + cis = self.columns.get_indexer_for(data.columns) + ris = self.index.get_indexer_for(data.index) + for ci in cis: + format_func = _maybe_wrap_formatter( + formatter.get(self.columns[ci]), + na_rep=na_rep, + precision=precision, + decimal=decimal, + thousands=thousands, + escape=escape, + hyperlinks=hyperlinks, + ) + for ri in ris: + self._display_funcs[(ri, ci)] = format_func + + return self + + def format_index( + self, + formatter: ExtFormatter | None = None, + axis: Axis = 0, + level: Level | list[Level] | None = None, + na_rep: str | None = None, + precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, + escape: str | None = None, + hyperlinks: str | None = None, + ) -> StylerRenderer: + r""" + Format the text display value of index labels or column headers. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + formatter : str, callable, dict or None + Object to define how values are displayed. See notes. + axis : {0, "index", 1, "columns"} + Whether to apply the formatter to the index or column headers. + level : int, str, list + The level(s) over which to apply the generic formatter. + na_rep : str, optional + Representation for missing values. + If ``na_rep`` is None, no special formatting is applied. + precision : int, optional + Floating point precision to use for display purposes, if not determined by + the specified ``formatter``. + decimal : str, default "." + Character used as decimal separator for floats, complex and integers. + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. + escape : str, optional + Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` + in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. + Escaping is done before ``formatter``. + hyperlinks : {"html", "latex"}, optional + Convert string patterns containing https://, http://, ftp:// or www. to + HTML tags as clickable URL hyperlinks if "html", or LaTeX \href + commands if "latex". + + Returns + ------- + Styler + + See Also + -------- + Styler.format: Format the text display value of data cells. + + Notes + ----- + This method assigns a formatting function, ``formatter``, to each level label + in the DataFrame's index or column headers. If ``formatter`` is ``None``, + then the default formatter is used. + If a callable then that function should take a label value as input and return + a displayable representation, such as a string. If ``formatter`` is + given as a string this is assumed to be a valid Python format specification + and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given, + keys should correspond to MultiIndex level numbers or names, and values should + be string or callable, as above. + + The default formatter currently expresses floats and complex numbers with the + pandas display precision unless using the ``precision`` argument here. The + default formatter does not adjust the representation of missing values unless + the ``na_rep`` argument is used. + + The ``level`` argument defines which levels of a MultiIndex to apply the + method to. If the ``formatter`` argument is given in dict form but does + not include all levels within the level argument then these unspecified levels + will have the default formatter applied. Any levels in the formatter dict + specifically excluded from the level argument will be ignored. + + When using a ``formatter`` string the dtypes must be compatible, otherwise a + `ValueError` will be raised. + + .. warning:: + `Styler.format_index` is ignored when using the output format + `Styler.to_excel`, since Excel and Python have inherrently different + formatting structures. + However, it is possible to use the `number-format` pseudo CSS attribute + to force Excel permissible formatting. See documentation for `Styler.format`. + + Examples + -------- + Using ``na_rep`` and ``precision`` with the default ``formatter`` + + >>> df = pd.DataFrame([[1, 2, 3]], columns=[2.0, np.nan, 4.0]) + >>> df.style.format_index(axis=1, na_rep='MISS', precision=3) # doctest: +SKIP + 2.000 MISS 4.000 + 0 1 2 3 + + Using a ``formatter`` specification on consistent dtypes in a level + + >>> df.style.format_index('{:.2f}', axis=1, na_rep='MISS') # doctest: +SKIP + 2.00 MISS 4.00 + 0 1 2 3 + + Using the default ``formatter`` for unspecified levels + + >>> df = pd.DataFrame([[1, 2, 3]], + ... columns=pd.MultiIndex.from_arrays([["a", "a", "b"],[2, np.nan, 4]])) + >>> df.style.format_index({0: lambda v: v.upper()}, axis=1, precision=1) + ... # doctest: +SKIP + A B + 2.0 nan 4.0 + 0 1 2 3 + + Using a callable ``formatter`` function. + + >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT' + >>> df.style.format_index(func, axis=1, na_rep='MISS') + ... # doctest: +SKIP + STRING STRING + FLOAT MISS FLOAT + 0 1 2 3 + + Using a ``formatter`` with HTML ``escape`` and ``na_rep``. + + >>> df = pd.DataFrame([[1, 2, 3]], columns=['"A"', 'A&B', None]) + >>> s = df.style.format_index('$ {0}', axis=1, escape="html", na_rep="NA") + ... # doctest: +SKIP + + + or element. + """ + if "display_value" not in kwargs: + kwargs["display_value"] = value + return { + "type": html_element, + "value": value, + "class": html_class, + "is_visible": is_visible, + **kwargs, + } + + +def _get_trimming_maximums( + rn, + cn, + max_elements, + max_rows=None, + max_cols=None, + scaling_factor: float = 0.8, +) -> tuple[int, int]: + """ + Recursively reduce the number of rows and columns to satisfy max elements. + + Parameters + ---------- + rn, cn : int + The number of input rows / columns + max_elements : int + The number of allowable elements + max_rows, max_cols : int, optional + Directly specify an initial maximum rows or columns before compression. + scaling_factor : float + Factor at which to reduce the number of rows / columns to fit. + + Returns + ------- + rn, cn : tuple + New rn and cn values that satisfy the max_elements constraint + """ + + def scale_down(rn, cn): + if cn >= rn: + return rn, int(cn * scaling_factor) + else: + return int(rn * scaling_factor), cn + + if max_rows: + rn = max_rows if rn > max_rows else rn + if max_cols: + cn = max_cols if cn > max_cols else cn + + while rn * cn > max_elements: + rn, cn = scale_down(rn, cn) + + return rn, cn + + +def _get_level_lengths( + index: Index, + sparsify: bool, + max_index: int, + hidden_elements: Sequence[int] | None = None, +): + """ + Given an index, find the level length for each element. + + Parameters + ---------- + index : Index + Index or columns to determine lengths of each element + sparsify : bool + Whether to hide or show each distinct element in a MultiIndex + max_index : int + The maximum number of elements to analyse along the index due to trimming + hidden_elements : sequence of int + Index positions of elements hidden from display in the index affecting + length + + Returns + ------- + Dict : + Result is a dictionary of (level, initial_position): span + """ + if isinstance(index, MultiIndex): + levels = index.format(sparsify=lib.no_default, adjoin=False) + else: + levels = index.format() + + if hidden_elements is None: + hidden_elements = [] + + lengths = {} + if not isinstance(index, MultiIndex): + for i, value in enumerate(levels): + if i not in hidden_elements: + lengths[(0, i)] = 1 + return lengths + + for i, lvl in enumerate(levels): + visible_row_count = 0 # used to break loop due to display trimming + for j, row in enumerate(lvl): + if visible_row_count > max_index: + break + if not sparsify: + # then lengths will always equal 1 since no aggregation. + if j not in hidden_elements: + lengths[(i, j)] = 1 + visible_row_count += 1 + elif (row is not lib.no_default) and (j not in hidden_elements): + # this element has not been sparsified so must be the start of section + last_label = j + lengths[(i, last_label)] = 1 + visible_row_count += 1 + elif row is not lib.no_default: + # even if the above is hidden, keep track of it in case length > 1 and + # later elements are visible + last_label = j + lengths[(i, last_label)] = 0 + elif j not in hidden_elements: + # then element must be part of sparsified section and is visible + visible_row_count += 1 + if visible_row_count > max_index: + break # do not add a length since the render trim limit reached + if lengths[(i, last_label)] == 0: + # if previous iteration was first-of-section but hidden then offset + last_label = j + lengths[(i, last_label)] = 1 + else: + # else add to previous iteration + lengths[(i, last_label)] += 1 + + non_zero_lengths = { + element: length for element, length in lengths.items() if length >= 1 + } + + return non_zero_lengths + + +def _is_visible(idx_row, idx_col, lengths) -> bool: + """ + Index -> {(idx_row, idx_col): bool}). + """ + return (idx_col, idx_row) in lengths + + +def format_table_styles(styles: CSSStyles) -> CSSStyles: + """ + looks for multiple CSS selectors and separates them: + [{'selector': 'td, th', 'props': 'a:v;'}] + ---> [{'selector': 'td', 'props': 'a:v;'}, + {'selector': 'th', 'props': 'a:v;'}] + """ + return [ + {"selector": selector, "props": css_dict["props"]} + for css_dict in styles + for selector in css_dict["selector"].split(",") + ] + + +def _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any: + """ + Format the display of a value + + Parameters + ---------- + x : Any + Input variable to be formatted + precision : Int + Floating point precision used if ``x`` is float or complex. + thousands : bool, default False + Whether to group digits with thousands separated with ",". + + Returns + ------- + value : Any + Matches input type, or string if input is float or complex or int with sep. + """ + if is_float(x) or is_complex(x): + return f"{x:,.{precision}f}" if thousands else f"{x:.{precision}f}" + elif is_integer(x): + return f"{x:,}" if thousands else str(x) + return x + + +def _wrap_decimal_thousands( + formatter: Callable, decimal: str, thousands: str | None +) -> Callable: + """ + Takes a string formatting function and wraps logic to deal with thousands and + decimal parameters, in the case that they are non-standard and that the input + is a (float, complex, int). + """ + + def wrapper(x): + if is_float(x) or is_integer(x) or is_complex(x): + if decimal != "." and thousands is not None and thousands != ",": + return ( + formatter(x) + .replace(",", "§_§-") # rare string to avoid "," <-> "." clash. + .replace(".", decimal) + .replace("§_§-", thousands) + ) + elif decimal != "." and (thousands is None or thousands == ","): + return formatter(x).replace(".", decimal) + elif decimal == "." and thousands is not None and thousands != ",": + return formatter(x).replace(",", thousands) + return formatter(x) + + return wrapper + + +def _str_escape(x, escape): + """if escaping: only use on str, else return input""" + if isinstance(x, str): + if escape == "html": + return escape_html(x) + elif escape == "latex": + return _escape_latex(x) + elif escape == "latex-math": + return _escape_latex_math(x) + else: + raise ValueError( + f"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, \ +got {escape}" + ) + return x + + +def _render_href(x, format): + """uses regex to detect a common URL pattern and converts to href tag in format.""" + if isinstance(x, str): + if format == "html": + href = '{0}' + elif format == "latex": + href = r"\href{{{0}}}{{{0}}}" + else: + raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'") + pat = r"((http|ftp)s?:\/\/|www.)[\w/\-?=%.:@]+\.[\w/\-&?=%.,':;~!@#$*()\[\]]+" + return re.sub(pat, lambda m: href.format(m.group(0)), x) + return x + + +def _maybe_wrap_formatter( + formatter: BaseFormatter | None = None, + na_rep: str | None = None, + precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, + escape: str | None = None, + hyperlinks: str | None = None, +) -> Callable: + """ + Allows formatters to be expressed as str, callable or None, where None returns + a default formatting function. wraps with na_rep, and precision where they are + available. + """ + # Get initial func from input string, input callable, or from default factory + if isinstance(formatter, str): + func_0 = lambda x: formatter.format(x) + elif callable(formatter): + func_0 = formatter + elif formatter is None: + precision = ( + get_option("styler.format.precision") if precision is None else precision + ) + func_0 = partial( + _default_formatter, precision=precision, thousands=(thousands is not None) + ) + else: + raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}") + + # Replace chars if escaping + if escape is not None: + func_1 = lambda x: func_0(_str_escape(x, escape=escape)) + else: + func_1 = func_0 + + # Replace decimals and thousands if non-standard inputs detected + if decimal != "." or (thousands is not None and thousands != ","): + func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands) + else: + func_2 = func_1 + + # Render links + if hyperlinks is not None: + func_3 = lambda x: func_2(_render_href(x, format=hyperlinks)) + else: + func_3 = func_2 + + # Replace missing values if na_rep + if na_rep is None: + return func_3 + else: + return lambda x: na_rep if (isna(x) is True) else func_3(x) + + +def non_reducing_slice(slice_: Subset): + """ + Ensure that a slice doesn't reduce to a Series or Scalar. + + Any user-passed `subset` should have this called on it + to make sure we're always working with DataFrames. + """ + # default to column slice, like DataFrame + # ['A', 'B'] -> IndexSlices[:, ['A', 'B']] + kinds = (ABCSeries, np.ndarray, Index, list, str) + if isinstance(slice_, kinds): + slice_ = IndexSlice[:, slice_] + + def pred(part) -> bool: + """ + Returns + ------- + bool + True if slice does *not* reduce, + False if `part` is a tuple. + """ + # true when slice does *not* reduce, False when part is a tuple, + # i.e. MultiIndex slice + if isinstance(part, tuple): + # GH#39421 check for sub-slice: + return any((isinstance(s, slice) or is_list_like(s)) for s in part) + else: + return isinstance(part, slice) or is_list_like(part) + + if not is_list_like(slice_): + if not isinstance(slice_, slice): + # a 1-d slice, like df.loc[1] + slice_ = [[slice_]] + else: + # slice(a, b, c) + slice_ = [slice_] # to tuplize later + else: + # error: Item "slice" of "Union[slice, Sequence[Any]]" has no attribute + # "__iter__" (not iterable) -> is specifically list_like in conditional + slice_ = [p if pred(p) else [p] for p in slice_] # type: ignore[union-attr] + return tuple(slice_) + + +def maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList: + """ + Convert css-string to sequence of tuples format if needed. + 'color:red; border:1px solid black;' -> [('color', 'red'), + ('border','1px solid red')] + """ + if isinstance(style, str): + s = style.split(";") + try: + return [ + (x.split(":")[0].strip(), x.split(":")[1].strip()) + for x in s + if x.strip() != "" + ] + except IndexError: + raise ValueError( + "Styles supplied as string must follow CSS rule formats, " + f"for example 'attr: val;'. '{style}' was given." + ) + return style + + +def refactor_levels( + level: Level | list[Level] | None, + obj: Index, +) -> list[int]: + """ + Returns a consistent levels arg for use in ``hide_index`` or ``hide_columns``. + + Parameters + ---------- + level : int, str, list + Original ``level`` arg supplied to above methods. + obj: + Either ``self.index`` or ``self.columns`` + + Returns + ------- + list : refactored arg with a list of levels to hide + """ + if level is None: + levels_: list[int] = list(range(obj.nlevels)) + elif isinstance(level, int): + levels_ = [level] + elif isinstance(level, str): + levels_ = [obj._get_level_number(level)] + elif isinstance(level, list): + levels_ = [ + obj._get_level_number(lev) if not isinstance(lev, int) else lev + for lev in level + ] + else: + raise ValueError("`level` must be of type `int`, `str` or list of such") + return levels_ + + +class Tooltips: + """ + An extension to ``Styler`` that allows for and manipulates tooltips on hover + of ``" in result + result = styler.to_html() + assert "" not in result + + +def test_block_names(tpl_style, tpl_table): + # catch accidental removal of a block + expected_style = { + "before_style", + "style", + "table_styles", + "before_cellstyle", + "cellstyle", + } + expected_table = { + "before_table", + "table", + "caption", + "thead", + "tbody", + "after_table", + "before_head_rows", + "head_tr", + "after_head_rows", + "before_rows", + "tr", + "after_rows", + } + result1 = set(tpl_style.blocks) + assert result1 == expected_style + + result2 = set(tpl_table.blocks) + assert result2 == expected_table + + +def test_from_custom_template_table(tmpdir): + p = tmpdir.mkdir("tpl").join("myhtml_table.tpl") + p.write( + dedent( + """\ + {% extends "html_table.tpl" %} + {% block table %} +

{{custom_title}}

+ {{ super() }} + {% endblock table %}""" + ) + ) + result = Styler.from_custom_template(str(tmpdir.join("tpl")), "myhtml_table.tpl") + assert issubclass(result, Styler) + assert result.env is not Styler.env + assert result.template_html_table is not Styler.template_html_table + styler = result(DataFrame({"A": [1, 2]})) + assert "

My Title

\n\n\n + {{ super() }} + {% endblock style %}""" + ) + ) + result = Styler.from_custom_template( + str(tmpdir.join("tpl")), html_style="myhtml_style.tpl" + ) + assert issubclass(result, Styler) + assert result.env is not Styler.env + assert result.template_html_style is not Styler.template_html_style + styler = result(DataFrame({"A": [1, 2]})) + assert '\n\nfull cap" in styler.to_html() + + +@pytest.mark.parametrize("index", [False, True]) +@pytest.mark.parametrize("columns", [False, True]) +@pytest.mark.parametrize("index_name", [True, False]) +def test_sticky_basic(styler, index, columns, index_name): + if index_name: + styler.index.name = "some text" + if index: + styler.set_sticky(axis=0) + if columns: + styler.set_sticky(axis=1) + + left_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " left: 0px;\n z-index: {1};\n}}" + ) + top_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " top: {1}px;\n z-index: {2};\n{3}}}" + ) + + res = styler.set_uuid("").to_html() + + # test index stickys over thead and tbody + assert (left_css.format("thead tr th:nth-child(1)", "3 !important") in res) is index + assert (left_css.format("tbody tr th:nth-child(1)", "1") in res) is index + + # test column stickys including if name row + assert ( + top_css.format("thead tr:nth-child(1) th", "0", "2", " height: 25px;\n") in res + ) is (columns and index_name) + assert ( + top_css.format("thead tr:nth-child(2) th", "25", "2", " height: 25px;\n") + in res + ) is (columns and index_name) + assert (top_css.format("thead tr:nth-child(1) th", "0", "2", "") in res) is ( + columns and not index_name + ) + + +@pytest.mark.parametrize("index", [False, True]) +@pytest.mark.parametrize("columns", [False, True]) +def test_sticky_mi(styler_mi, index, columns): + if index: + styler_mi.set_sticky(axis=0) + if columns: + styler_mi.set_sticky(axis=1) + + left_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " left: {1}px;\n min-width: 75px;\n max-width: 75px;\n z-index: {2};\n}}" + ) + top_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " top: {1}px;\n height: 25px;\n z-index: {2};\n}}" + ) + + res = styler_mi.set_uuid("").to_html() + + # test the index stickys for thead and tbody over both levels + assert ( + left_css.format("thead tr th:nth-child(1)", "0", "3 !important") in res + ) is index + assert (left_css.format("tbody tr th.level0", "0", "1") in res) is index + assert ( + left_css.format("thead tr th:nth-child(2)", "75", "3 !important") in res + ) is index + assert (left_css.format("tbody tr th.level1", "75", "1") in res) is index + + # test the column stickys for each level row + assert (top_css.format("thead tr:nth-child(1) th", "0", "2") in res) is columns + assert (top_css.format("thead tr:nth-child(2) th", "25", "2") in res) is columns + + +@pytest.mark.parametrize("index", [False, True]) +@pytest.mark.parametrize("columns", [False, True]) +@pytest.mark.parametrize("levels", [[1], ["one"], "one"]) +def test_sticky_levels(styler_mi, index, columns, levels): + styler_mi.index.names, styler_mi.columns.names = ["zero", "one"], ["zero", "one"] + if index: + styler_mi.set_sticky(axis=0, levels=levels) + if columns: + styler_mi.set_sticky(axis=1, levels=levels) + + left_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " left: {1}px;\n min-width: 75px;\n max-width: 75px;\n z-index: {2};\n}}" + ) + top_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " top: {1}px;\n height: 25px;\n z-index: {2};\n}}" + ) + + res = styler_mi.set_uuid("").to_html() + + # test no sticking of level0 + assert "#T_ thead tr th:nth-child(1)" not in res + assert "#T_ tbody tr th.level0" not in res + assert "#T_ thead tr:nth-child(1) th" not in res + + # test sticking level1 + assert ( + left_css.format("thead tr th:nth-child(2)", "0", "3 !important") in res + ) is index + assert (left_css.format("tbody tr th.level1", "0", "1") in res) is index + assert (top_css.format("thead tr:nth-child(2) th", "0", "2") in res) is columns + + +def test_sticky_raises(styler): + with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"): + styler.set_sticky(axis="bad") + + +@pytest.mark.parametrize( + "sparse_index, sparse_columns", + [(True, True), (True, False), (False, True), (False, False)], +) +def test_sparse_options(sparse_index, sparse_columns): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=ridx, columns=cidx) + styler = df.style + + default_html = styler.to_html() # defaults under pd.options to (True , True) + + with option_context( + "styler.sparse.index", sparse_index, "styler.sparse.columns", sparse_columns + ): + html1 = styler.to_html() + assert (html1 == default_html) is (sparse_index and sparse_columns) + html2 = styler.to_html(sparse_index=sparse_index, sparse_columns=sparse_columns) + assert html1 == html2 + + +@pytest.mark.parametrize("index", [True, False]) +@pytest.mark.parametrize("columns", [True, False]) +def test_map_header_cell_ids(styler, index, columns): + # GH 41893 + func = lambda v: "attr: val;" + styler.uuid, styler.cell_ids = "", False + if index: + styler.map_index(func, axis="index") + if columns: + styler.map_index(func, axis="columns") + + result = styler.to_html() + + # test no data cell ids + assert '' in result + assert '' in result + + # test index header ids where needed and css styles + assert ( + '' in result + ) is index + assert ( + '' in result + ) is index + assert ("#T__level0_row0, #T__level0_row1 {\n attr: val;\n}" in result) is index + + # test column header ids where needed and css styles + assert ( + '' in result + ) is columns + assert ("#T__level0_col0 {\n attr: val;\n}" in result) is columns + + +@pytest.mark.parametrize("rows", [True, False]) +@pytest.mark.parametrize("cols", [True, False]) +def test_maximums(styler_mi, rows, cols): + result = styler_mi.to_html( + max_rows=2 if rows else None, + max_columns=2 if cols else None, + ) + + assert ">5" in result # [[0,1], [4,5]] always visible + assert (">8" in result) is not rows # first trimmed vertical element + assert (">2" in result) is not cols # first trimmed horizontal element + + +def test_replaced_css_class_names(): + css = { + "row_heading": "ROWHEAD", + # "col_heading": "COLHEAD", + "index_name": "IDXNAME", + # "col": "COL", + "row": "ROW", + # "col_trim": "COLTRIM", + "row_trim": "ROWTRIM", + "level": "LEVEL", + "data": "DATA", + "blank": "BLANK", + } + midx = MultiIndex.from_product([["a", "b"], ["c", "d"]]) + styler_mi = Styler( + DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx), + uuid_len=0, + ).set_table_styles(css_class_names=css) + styler_mi.index.names = ["n1", "n2"] + styler_mi.hide(styler_mi.index[1:], axis=0) + styler_mi.hide(styler_mi.columns[1:], axis=1) + styler_mi.map_index(lambda v: "color: red;", axis=0) + styler_mi.map_index(lambda v: "color: green;", axis=1) + styler_mi.map(lambda v: "color: blue;") + expected = dedent( + """\ + +
<div></div>"A&B"NA$ "A"$ A&BNA + ... + + Using a ``formatter`` with LaTeX ``escape``. + + >>> df = pd.DataFrame([[1, 2, 3]], columns=["123", "~", "$%#"]) + >>> df.style.format_index("\\textbf{{{}}}", escape="latex", axis=1).to_latex() + ... # doctest: +SKIP + \begin{tabular}{lrrr} + {} & {\textbf{123}} & {\textbf{\textasciitilde }} & {\textbf{\$\%\#}} \\ + 0 & 1 & 2 & 3 \\ + \end{tabular} + """ + axis = self.data._get_axis_number(axis) + if axis == 0: + display_funcs_, obj = self._display_funcs_index, self.index + else: + display_funcs_, obj = self._display_funcs_columns, self.columns + levels_ = refactor_levels(level, obj) + + if all( + ( + formatter is None, + level is None, + precision is None, + decimal == ".", + thousands is None, + na_rep is None, + escape is None, + hyperlinks is None, + ) + ): + display_funcs_.clear() + return self # clear the formatter / revert to default and avoid looping + + if not isinstance(formatter, dict): + formatter = {level: formatter for level in levels_} + else: + formatter = { + obj._get_level_number(level): formatter_ + for level, formatter_ in formatter.items() + } + + for lvl in levels_: + format_func = _maybe_wrap_formatter( + formatter.get(lvl), + na_rep=na_rep, + precision=precision, + decimal=decimal, + thousands=thousands, + escape=escape, + hyperlinks=hyperlinks, + ) + + for idx in [(i, lvl) if axis == 0 else (lvl, i) for i in range(len(obj))]: + display_funcs_[idx] = format_func + + return self + + def relabel_index( + self, + labels: Sequence | Index, + axis: Axis = 0, + level: Level | list[Level] | None = None, + ) -> StylerRenderer: + r""" + Relabel the index, or column header, keys to display a set of specified values. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + labels : list-like or Index + New labels to display. Must have same length as the underlying values not + hidden. + axis : {"index", 0, "columns", 1} + Apply to the index or columns. + level : int, str, list, optional + The level(s) over which to apply the new labels. If `None` will apply + to all levels of an Index or MultiIndex which are not hidden. + + Returns + ------- + Styler + + See Also + -------- + Styler.format_index: Format the text display value of index or column headers. + Styler.hide: Hide the index, column headers, or specified data from display. + + Notes + ----- + As part of Styler, this method allows the display of an index to be + completely user-specified without affecting the underlying DataFrame data, + index, or column headers. This means that the flexibility of indexing is + maintained whilst the final display is customisable. + + Since Styler is designed to be progressively constructed with method chaining, + this method is adapted to react to the **currently specified hidden elements**. + This is useful because it means one does not have to specify all the new + labels if the majority of an index, or column headers, have already been hidden. + The following produce equivalent display (note the length of ``labels`` in + each case). + + .. code-block:: python + + # relabel first, then hide + df = pd.DataFrame({"col": ["a", "b", "c"]}) + df.style.relabel_index(["A", "B", "C"]).hide([0,1]) + # hide first, then relabel + df = pd.DataFrame({"col": ["a", "b", "c"]}) + df.style.hide([0,1]).relabel_index(["C"]) + + This method should be used, rather than :meth:`Styler.format_index`, in one of + the following cases (see examples): + + - A specified set of labels are required which are not a function of the + underlying index keys. + - The function of the underlying index keys requires a counter variable, + such as those available upon enumeration. + + Examples + -------- + Basic use + + >>> df = pd.DataFrame({"col": ["a", "b", "c"]}) + >>> df.style.relabel_index(["A", "B", "C"]) # doctest: +SKIP + col + A a + B b + C c + + Chaining with pre-hidden elements + + >>> df.style.hide([0,1]).relabel_index(["C"]) # doctest: +SKIP + col + C c + + Using a MultiIndex + + >>> midx = pd.MultiIndex.from_product([[0, 1], [0, 1], [0, 1]]) + >>> df = pd.DataFrame({"col": list(range(8))}, index=midx) + >>> styler = df.style # doctest: +SKIP + col + 0 0 0 0 + 1 1 + 1 0 2 + 1 3 + 1 0 0 4 + 1 5 + 1 0 6 + 1 7 + >>> styler.hide((midx.get_level_values(0)==0)|(midx.get_level_values(1)==0)) + ... # doctest: +SKIP + >>> styler.hide(level=[0,1]) # doctest: +SKIP + >>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP + col + binary6 6 + binary7 7 + + We can also achieve the above by indexing first and then re-labeling + + >>> styler = df.loc[[(1,1,0), (1,1,1)]].style + >>> styler.hide(level=[0,1]).relabel_index(["binary6", "binary7"]) + ... # doctest: +SKIP + col + binary6 6 + binary7 7 + + Defining a formatting function which uses an enumeration counter. Also note + that the value of the index key is passed in the case of string labels so it + can also be inserted into the label, using curly brackets (or double curly + brackets if the string if pre-formatted), + + >>> df = pd.DataFrame({"samples": np.random.rand(10)}) + >>> styler = df.loc[np.random.randint(0,10,3)].style + >>> styler.relabel_index([f"sample{i+1} ({{}})" for i in range(3)]) + ... # doctest: +SKIP + samples + sample1 (5) 0.315811 + sample2 (0) 0.495941 + sample3 (2) 0.067946 + """ + axis = self.data._get_axis_number(axis) + if axis == 0: + display_funcs_, obj = self._display_funcs_index, self.index + hidden_labels, hidden_lvls = self.hidden_rows, self.hide_index_ + else: + display_funcs_, obj = self._display_funcs_columns, self.columns + hidden_labels, hidden_lvls = self.hidden_columns, self.hide_columns_ + visible_len = len(obj) - len(set(hidden_labels)) + if len(labels) != visible_len: + raise ValueError( + "``labels`` must be of length equal to the number of " + f"visible labels along ``axis`` ({visible_len})." + ) + + if level is None: + level = [i for i in range(obj.nlevels) if not hidden_lvls[i]] + levels_ = refactor_levels(level, obj) + + def alias_(x, value): + if isinstance(value, str): + return value.format(x) + return value + + for ai, i in enumerate([i for i in range(len(obj)) if i not in hidden_labels]): + if len(levels_) == 1: + idx = (i, levels_[0]) if axis == 0 else (levels_[0], i) + display_funcs_[idx] = partial(alias_, value=labels[ai]) + else: + for aj, lvl in enumerate(levels_): + idx = (i, lvl) if axis == 0 else (lvl, i) + display_funcs_[idx] = partial(alias_, value=labels[ai][aj]) + + return self + + +def _element( + html_element: str, + html_class: str | None, + value: Any, + is_visible: bool, + **kwargs, +) -> dict: + """ + Template to return container with information for a `` cells in the HTML result. + + Parameters + ---------- + css_name: str, default "pd-t" + Name of the CSS class that controls visualisation of tooltips. + css_props: list-like, default; see Notes + List of (attr, value) tuples defining properties of the CSS class. + tooltips: DataFrame, default empty + DataFrame of strings aligned with underlying Styler data for tooltip + display. + + Notes + ----- + The default properties for the tooltip CSS class are: + + - visibility: hidden + - position: absolute + - z-index: 1 + - background-color: black + - color: white + - transform: translate(-20px, -20px) + + Hidden visibility is a key prerequisite to the hover functionality, and should + always be included in any manual properties specification. + """ + + def __init__( + self, + css_props: CSSProperties = [ + ("visibility", "hidden"), + ("position", "absolute"), + ("z-index", 1), + ("background-color", "black"), + ("color", "white"), + ("transform", "translate(-20px, -20px)"), + ], + css_name: str = "pd-t", + tooltips: DataFrame = DataFrame(), + ) -> None: + self.class_name = css_name + self.class_properties = css_props + self.tt_data = tooltips + self.table_styles: CSSStyles = [] + + @property + def _class_styles(self): + """ + Combine the ``_Tooltips`` CSS class name and CSS properties to the format + required to extend the underlying ``Styler`` `table_styles` to allow + tooltips to render in HTML. + + Returns + ------- + styles : List + """ + return [ + { + "selector": f".{self.class_name}", + "props": maybe_convert_css_to_tuples(self.class_properties), + } + ] + + def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str): + """ + For every table data-cell that has a valid tooltip (not None, NaN or + empty string) must create two pseudo CSS entries for the specific + element id which are added to overall table styles: + an on hover visibility change and a content change + dependent upon the user's chosen display string. + + For example: + [{"selector": "T__row1_col1:hover .pd-t", + "props": [("visibility", "visible")]}, + {"selector": "T__row1_col1 .pd-t::after", + "props": [("content", "Some Valid Text String")]}] + + Parameters + ---------- + uuid: str + The uuid of the Styler instance + name: str + The css-name of the class used for styling tooltips + row : int + The row index of the specified tooltip string data + col : int + The col index of the specified tooltip string data + text : str + The textual content of the tooltip to be displayed in HTML. + + Returns + ------- + pseudo_css : List + """ + selector_id = "#T_" + uuid + "_row" + str(row) + "_col" + str(col) + return [ + { + "selector": selector_id + f":hover .{name}", + "props": [("visibility", "visible")], + }, + { + "selector": selector_id + f" .{name}::after", + "props": [("content", f'"{text}"')], + }, + ] + + def _translate(self, styler: StylerRenderer, d: dict): + """ + Mutate the render dictionary to allow for tooltips: + + - Add ```` HTML element to each data cells ``display_value``. Ignores + headers. + - Add table level CSS styles to control pseudo classes. + + Parameters + ---------- + styler_data : DataFrame + Underlying ``Styler`` DataFrame used for reindexing. + uuid : str + The underlying ``Styler`` uuid for CSS id. + d : dict + The dictionary prior to final render + + Returns + ------- + render_dict : Dict + """ + self.tt_data = self.tt_data.reindex_like(styler.data) + if self.tt_data.empty: + return d + + name = self.class_name + mask = (self.tt_data.isna()) | (self.tt_data.eq("")) # empty string = no ttip + self.table_styles = [ + style + for sublist in [ + self._pseudo_css(styler.uuid, name, i, j, str(self.tt_data.iloc[i, j])) + for i in range(len(self.tt_data.index)) + for j in range(len(self.tt_data.columns)) + if not ( + mask.iloc[i, j] + or i in styler.hidden_rows + or j in styler.hidden_columns + ) + ] + for style in sublist + ] + + if self.table_styles: + # add span class to every cell only if at least 1 non-empty tooltip + for row in d["body"]: + for item in row: + if item["type"] == "td": + item["display_value"] = ( + str(item["display_value"]) + + f'' + ) + d["table_styles"].extend(self._class_styles) + d["table_styles"].extend(self.table_styles) + + return d + + +def _parse_latex_table_wrapping(table_styles: CSSStyles, caption: str | None) -> bool: + """ + Indicate whether LaTeX {tabular} should be wrapped with a {table} environment. + + Parses the `table_styles` and detects any selectors which must be included outside + of {tabular}, i.e. indicating that wrapping must occur, and therefore return True, + or if a caption exists and requires similar. + """ + IGNORED_WRAPPERS = ["toprule", "midrule", "bottomrule", "column_format"] + # ignored selectors are included with {tabular} so do not need wrapping + return ( + table_styles is not None + and any(d["selector"] not in IGNORED_WRAPPERS for d in table_styles) + ) or caption is not None + + +def _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | None: + """ + Return the first 'props' 'value' from ``tables_styles`` identified by ``selector``. + + Examples + -------- + >>> table_styles = [{'selector': 'foo', 'props': [('attr','value')]}, + ... {'selector': 'bar', 'props': [('attr', 'overwritten')]}, + ... {'selector': 'bar', 'props': [('a1', 'baz'), ('a2', 'ignore')]}] + >>> _parse_latex_table_styles(table_styles, selector='bar') + 'baz' + + Notes + ----- + The replacement of "§" with ":" is to avoid the CSS problem where ":" has structural + significance and cannot be used in LaTeX labels, but is often required by them. + """ + for style in table_styles[::-1]: # in reverse for most recently applied style + if style["selector"] == selector: + return str(style["props"][0][1]).replace("§", ":") + return None + + +def _parse_latex_cell_styles( + latex_styles: CSSList, display_value: str, convert_css: bool = False +) -> str: + r""" + Mutate the ``display_value`` string including LaTeX commands from ``latex_styles``. + + This method builds a recursive latex chain of commands based on the + CSSList input, nested around ``display_value``. + + If a CSS style is given as ('', '') this is translated to + '\{display_value}', and this value is treated as the + display value for the next iteration. + + The most recent style forms the inner component, for example for styles: + `[('c1', 'o1'), ('c2', 'o2')]` this returns: `\c1o1{\c2o2{display_value}}` + + Sometimes latex commands have to be wrapped with curly braces in different ways: + We create some parsing flags to identify the different behaviours: + + - `--rwrap` : `\{}` + - `--wrap` : `{\ }` + - `--nowrap` : `\ ` + - `--lwrap` : `{\} ` + - `--dwrap` : `{\}{}` + + For example for styles: + `[('c1', 'o1--wrap'), ('c2', 'o2')]` this returns: `{\c1o1 \c2o2{display_value}} + """ + if convert_css: + latex_styles = _parse_latex_css_conversion(latex_styles) + for command, options in latex_styles[::-1]: # in reverse for most recent style + formatter = { + "--wrap": f"{{\\{command}--to_parse {display_value}}}", + "--nowrap": f"\\{command}--to_parse {display_value}", + "--lwrap": f"{{\\{command}--to_parse}} {display_value}", + "--rwrap": f"\\{command}--to_parse{{{display_value}}}", + "--dwrap": f"{{\\{command}--to_parse}}{{{display_value}}}", + } + display_value = f"\\{command}{options} {display_value}" + for arg in ["--nowrap", "--wrap", "--lwrap", "--rwrap", "--dwrap"]: + if arg in str(options): + display_value = formatter[arg].replace( + "--to_parse", _parse_latex_options_strip(value=options, arg=arg) + ) + break # only ever one purposeful entry + return display_value + + +def _parse_latex_header_span( + cell: dict[str, Any], + multirow_align: str, + multicol_align: str, + wrap: bool = False, + convert_css: bool = False, +) -> str: + r""" + Refactor the cell `display_value` if a 'colspan' or 'rowspan' attribute is present. + + 'rowspan' and 'colspan' do not occur simultaneouly. If they are detected then + the `display_value` is altered to a LaTeX `multirow` or `multicol` command + respectively, with the appropriate cell-span. + + ``wrap`` is used to enclose the `display_value` in braces which is needed for + column headers using an siunitx package. + + Requires the package {multirow}, whereas multicol support is usually built in + to the {tabular} environment. + + Examples + -------- + >>> cell = {'cellstyle': '', 'display_value':'text', 'attributes': 'colspan="3"'} + >>> _parse_latex_header_span(cell, 't', 'c') + '\\multicolumn{3}{c}{text}' + """ + display_val = _parse_latex_cell_styles( + cell["cellstyle"], cell["display_value"], convert_css + ) + if "attributes" in cell: + attrs = cell["attributes"] + if 'colspan="' in attrs: + colspan = attrs[attrs.find('colspan="') + 9 :] # len('colspan="') = 9 + colspan = int(colspan[: colspan.find('"')]) + if "naive-l" == multicol_align: + out = f"{{{display_val}}}" if wrap else f"{display_val}" + blanks = " & {}" if wrap else " &" + return out + blanks * (colspan - 1) + elif "naive-r" == multicol_align: + out = f"{{{display_val}}}" if wrap else f"{display_val}" + blanks = "{} & " if wrap else "& " + return blanks * (colspan - 1) + out + return f"\\multicolumn{{{colspan}}}{{{multicol_align}}}{{{display_val}}}" + elif 'rowspan="' in attrs: + if multirow_align == "naive": + return display_val + rowspan = attrs[attrs.find('rowspan="') + 9 :] + rowspan = int(rowspan[: rowspan.find('"')]) + return f"\\multirow[{multirow_align}]{{{rowspan}}}{{*}}{{{display_val}}}" + if wrap: + return f"{{{display_val}}}" + else: + return display_val + + +def _parse_latex_options_strip(value: str | float, arg: str) -> str: + """ + Strip a css_value which may have latex wrapping arguments, css comment identifiers, + and whitespaces, to a valid string for latex options parsing. + + For example: 'red /* --wrap */ ' --> 'red' + """ + return str(value).replace(arg, "").replace("/*", "").replace("*/", "").strip() + + +def _parse_latex_css_conversion(styles: CSSList) -> CSSList: + """ + Convert CSS (attribute,value) pairs to equivalent LaTeX (command,options) pairs. + + Ignore conversion if tagged with `--latex` option, skipped if no conversion found. + """ + + def font_weight(value, arg): + if value in ("bold", "bolder"): + return "bfseries", f"{arg}" + return None + + def font_style(value, arg): + if value == "italic": + return "itshape", f"{arg}" + if value == "oblique": + return "slshape", f"{arg}" + return None + + def color(value, user_arg, command, comm_arg): + """ + CSS colors have 5 formats to process: + + - 6 digit hex code: "#ff23ee" --> [HTML]{FF23EE} + - 3 digit hex code: "#f0e" --> [HTML]{FF00EE} + - rgba: rgba(128, 255, 0, 0.5) --> [rgb]{0.502, 1.000, 0.000} + - rgb: rgb(128, 255, 0,) --> [rbg]{0.502, 1.000, 0.000} + - string: red --> {red} + + Additionally rgb or rgba can be expressed in % which is also parsed. + """ + arg = user_arg if user_arg != "" else comm_arg + + if value[0] == "#" and len(value) == 7: # color is hex code + return command, f"[HTML]{{{value[1:].upper()}}}{arg}" + if value[0] == "#" and len(value) == 4: # color is short hex code + val = f"{value[1].upper()*2}{value[2].upper()*2}{value[3].upper()*2}" + return command, f"[HTML]{{{val}}}{arg}" + elif value[:3] == "rgb": # color is rgb or rgba + r = re.findall("(?<=\\()[0-9\\s%]+(?=,)", value)[0].strip() + r = float(r[:-1]) / 100 if "%" in r else int(r) / 255 + g = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[0].strip() + g = float(g[:-1]) / 100 if "%" in g else int(g) / 255 + if value[3] == "a": # color is rgba + b = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[1].strip() + else: # color is rgb + b = re.findall("(?<=,)[0-9\\s%]+(?=\\))", value)[0].strip() + b = float(b[:-1]) / 100 if "%" in b else int(b) / 255 + return command, f"[rgb]{{{r:.3f}, {g:.3f}, {b:.3f}}}{arg}" + else: + return command, f"{{{value}}}{arg}" # color is likely string-named + + CONVERTED_ATTRIBUTES: dict[str, Callable] = { + "font-weight": font_weight, + "background-color": partial(color, command="cellcolor", comm_arg="--lwrap"), + "color": partial(color, command="color", comm_arg=""), + "font-style": font_style, + } + + latex_styles: CSSList = [] + for attribute, value in styles: + if isinstance(value, str) and "--latex" in value: + # return the style without conversion but drop '--latex' + latex_styles.append((attribute, value.replace("--latex", ""))) + if attribute in CONVERTED_ATTRIBUTES: + arg = "" + for x in ["--wrap", "--nowrap", "--lwrap", "--dwrap", "--rwrap"]: + if x in str(value): + arg, value = x, _parse_latex_options_strip(value, x) + break + latex_style = CONVERTED_ATTRIBUTES[attribute](value, arg) + if latex_style is not None: + latex_styles.extend([latex_style]) + return latex_styles + + +def _escape_latex(s): + r""" + Replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, ``{``, ``}``, + ``~``, ``^``, and ``\`` in the string with LaTeX-safe sequences. + + Use this if you need to display text that might contain such characters in LaTeX. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + return ( + s.replace("\\", "ab2§=§8yz") # rare string for final conversion: avoid \\ clash + .replace("ab2§=§8yz ", "ab2§=§8yz\\space ") # since \backslash gobbles spaces + .replace("&", "\\&") + .replace("%", "\\%") + .replace("$", "\\$") + .replace("#", "\\#") + .replace("_", "\\_") + .replace("{", "\\{") + .replace("}", "\\}") + .replace("~ ", "~\\space ") # since \textasciitilde gobbles spaces + .replace("~", "\\textasciitilde ") + .replace("^ ", "^\\space ") # since \textasciicircum gobbles spaces + .replace("^", "\\textasciicircum ") + .replace("ab2§=§8yz", "\\textbackslash ") + ) + + +def _math_mode_with_dollar(s): + r""" + All characters in LaTeX math mode are preserved. + + The substrings in LaTeX math mode, which start with + the character ``$`` and end with ``$``, are preserved + without escaping. Otherwise regular LaTeX escaping applies. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + s = s.replace(r"\$", r"rt8§=§7wz") + pattern = re.compile(r"\$.*?\$") + pos = 0 + ps = pattern.search(s, pos) + res = [] + while ps: + res.append(_escape_latex(s[pos : ps.span()[0]])) + res.append(ps.group()) + pos = ps.span()[1] + ps = pattern.search(s, pos) + + res.append(_escape_latex(s[pos : len(s)])) + return "".join(res).replace(r"rt8§=§7wz", r"\$") + + +def _math_mode_with_parentheses(s): + r""" + All characters in LaTeX math mode are preserved. + + The substrings in LaTeX math mode, which start with + the character ``\(`` and end with ``\)``, are preserved + without escaping. Otherwise regular LaTeX escaping applies. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + s = s.replace(r"\(", r"LEFT§=§6yzLEFT").replace(r"\)", r"RIGHTab5§=§RIGHT") + res = [] + for item in re.split(r"LEFT§=§6yz|ab5§=§RIGHT", s): + if item.startswith("LEFT") and item.endswith("RIGHT"): + res.append(item.replace("LEFT", r"\(").replace("RIGHT", r"\)")) + elif "LEFT" in item and "RIGHT" in item: + res.append( + _escape_latex(item).replace("LEFT", r"\(").replace("RIGHT", r"\)") + ) + else: + res.append( + _escape_latex(item) + .replace("LEFT", r"\textbackslash (") + .replace("RIGHT", r"\textbackslash )") + ) + return "".join(res) + + +def _escape_latex_math(s): + r""" + All characters in LaTeX math mode are preserved. + + The substrings in LaTeX math mode, which either are surrounded + by two characters ``$`` or start with the character ``\(`` and end with ``\)``, + are preserved without escaping. Otherwise regular LaTeX escaping applies. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + s = s.replace(r"\$", r"rt8§=§7wz") + ps_d = re.compile(r"\$.*?\$").search(s, 0) + ps_p = re.compile(r"\(.*?\)").search(s, 0) + mode = [] + if ps_d: + mode.append(ps_d.span()[0]) + if ps_p: + mode.append(ps_p.span()[0]) + if len(mode) == 0: + return _escape_latex(s.replace(r"rt8§=§7wz", r"\$")) + if s[mode[0]] == r"$": + return _math_mode_with_dollar(s.replace(r"rt8§=§7wz", r"\$")) + if s[mode[0] - 1 : mode[0] + 1] == r"\(": + return _math_mode_with_parentheses(s.replace(r"rt8§=§7wz", r"\$")) + else: + return _escape_latex(s.replace(r"rt8§=§7wz", r"\$")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html.tpl b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html.tpl new file mode 100644 index 00000000..8c63be3a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html.tpl @@ -0,0 +1,16 @@ +{# Update the html_style/table_structure.html documentation too #} +{% if doctype_html %} + + + + +{% if not exclude_styles %}{% include html_style_tpl %}{% endif %} + + +{% include html_table_tpl %} + + +{% elif not doctype_html %} +{% if not exclude_styles %}{% include html_style_tpl %}{% endif %} +{% include html_table_tpl %} +{% endif %} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html_style.tpl b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html_style.tpl new file mode 100644 index 00000000..5c3fcd97 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html_style.tpl @@ -0,0 +1,26 @@ +{%- block before_style -%}{%- endblock before_style -%} +{% block style %} + +{% endblock style %} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html_table.tpl b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html_table.tpl new file mode 100644 index 00000000..17118d2b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/html_table.tpl @@ -0,0 +1,63 @@ +{% block before_table %}{% endblock before_table %} +{% block table %} +{% if exclude_styles %} + +{% else %} +
+{% endif %} +{% block caption %} +{% if caption and caption is string %} + +{% elif caption and caption is sequence %} + +{% endif %} +{% endblock caption %} +{% block thead %} + +{% block before_head_rows %}{% endblock %} +{% for r in head %} +{% block head_tr scoped %} + +{% if exclude_styles %} +{% for c in r %} +{% if c.is_visible != False %} + <{{c.type}} {{c.attributes}}>{{c.display_value}} +{% endif %} +{% endfor %} +{% else %} +{% for c in r %} +{% if c.is_visible != False %} + <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}} +{% endif %} +{% endfor %} +{% endif %} + +{% endblock head_tr %} +{% endfor %} +{% block after_head_rows %}{% endblock %} + +{% endblock thead %} +{% block tbody %} + +{% block before_rows %}{% endblock before_rows %} +{% for r in body %} +{% block tr scoped %} + +{% if exclude_styles %} +{% for c in r %}{% if c.is_visible != False %} + <{{c.type}} {{c.attributes}}>{{c.display_value}} +{% endif %}{% endfor %} +{% else %} +{% for c in r %}{% if c.is_visible != False %} + <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}} +{% endif %}{% endfor %} +{% endif %} + +{% endblock tr %} +{% endfor %} +{% block after_rows %}{% endblock after_rows %} + +{% endblock tbody %} +
{{caption}}{{caption[0]}}
+{% endblock table %} +{% block after_table %}{% endblock after_table %} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex.tpl b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex.tpl new file mode 100644 index 00000000..ae341bbc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex.tpl @@ -0,0 +1,5 @@ +{% if environment == "longtable" %} +{% include "latex_longtable.tpl" %} +{% else %} +{% include "latex_table.tpl" %} +{% endif %} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex_longtable.tpl b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex_longtable.tpl new file mode 100644 index 00000000..b97843ee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex_longtable.tpl @@ -0,0 +1,82 @@ +\begin{longtable} +{%- set position = parse_table(table_styles, 'position') %} +{%- if position is not none %} +[{{position}}] +{%- endif %} +{%- set column_format = parse_table(table_styles, 'column_format') %} +{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %} + +{% for style in table_styles %} +{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format', 'label'] %} +\{{style['selector']}}{{parse_table(table_styles, style['selector'])}} +{% endif %} +{% endfor %} +{% if caption and caption is string %} +\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} +{%- set label = parse_table(table_styles, 'label') %} +{%- if label is not none %} + \label{{label}} +{%- endif %} \\ +{% elif caption and caption is sequence %} +\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} +{%- set label = parse_table(table_styles, 'label') %} +{%- if label is not none %} + \label{{label}} +{%- endif %} \\ +{% else %} +{%- set label = parse_table(table_styles, 'label') %} +{%- if label is not none %} +\label{{label}} \\ +{% endif %} +{% endif %} +{% set toprule = parse_table(table_styles, 'toprule') %} +{% if toprule is not none %} +\{{toprule}} +{% endif %} +{% for row in head %} +{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\ +{% endfor %} +{% set midrule = parse_table(table_styles, 'midrule') %} +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\endfirsthead +{% if caption and caption is string %} +\caption[]{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} \\ +{% elif caption and caption is sequence %} +\caption[]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} \\ +{% endif %} +{% if toprule is not none %} +\{{toprule}} +{% endif %} +{% for row in head %} +{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\ +{% endfor %} +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\endhead +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\multicolumn{% raw %}{{% endraw %}{{body[0]|length}}{% raw %}}{% endraw %}{r}{Continued on next page} \\ +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\endfoot +{% set bottomrule = parse_table(table_styles, 'bottomrule') %} +{% if bottomrule is not none %} +\{{bottomrule}} +{% endif %} +\endlastfoot +{% for row in body %} +{% for c in row %}{% if not loop.first %} & {% endif %} + {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %} +{%- endfor %} \\ +{% if clines and clines[loop.index] | length > 0 %} + {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %} + +{% endif %} +{% endfor %} +\end{longtable} +{% raw %}{% endraw %} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex_table.tpl b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex_table.tpl new file mode 100644 index 00000000..7858cb4c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/latex_table.tpl @@ -0,0 +1,57 @@ +{% if environment or parse_wrap(table_styles, caption) %} +\begin{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %} +{%- set position = parse_table(table_styles, 'position') %} +{%- if position is not none %} +[{{position}}] +{%- endif %} + +{% set position_float = parse_table(table_styles, 'position_float') %} +{% if position_float is not none%} +\{{position_float}} +{% endif %} +{% if caption and caption is string %} +\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} + +{% elif caption and caption is sequence %} +\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} + +{% endif %} +{% for style in table_styles %} +{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format'] %} +\{{style['selector']}}{{parse_table(table_styles, style['selector'])}} +{% endif %} +{% endfor %} +{% endif %} +\begin{tabular} +{%- set column_format = parse_table(table_styles, 'column_format') %} +{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %} + +{% set toprule = parse_table(table_styles, 'toprule') %} +{% if toprule is not none %} +\{{toprule}} +{% endif %} +{% for row in head %} +{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx, convert_css)}}{% endfor %} \\ +{% endfor %} +{% set midrule = parse_table(table_styles, 'midrule') %} +{% if midrule is not none %} +\{{midrule}} +{% endif %} +{% for row in body %} +{% for c in row %}{% if not loop.first %} & {% endif %} + {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align, False, convert_css)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %} +{%- endfor %} \\ +{% if clines and clines[loop.index] | length > 0 %} + {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %} + +{% endif %} +{% endfor %} +{% set bottomrule = parse_table(table_styles, 'bottomrule') %} +{% if bottomrule is not none %} +\{{bottomrule}} +{% endif %} +\end{tabular} +{% if environment or parse_wrap(table_styles, caption) %} +\end{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %} + +{% endif %} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/string.tpl b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/string.tpl new file mode 100644 index 00000000..06aeb2b4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/templates/string.tpl @@ -0,0 +1,12 @@ +{% for r in head %} +{% for c in r %}{% if c["is_visible"] %} +{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %} +{% endif %}{% endfor %} + +{% endfor %} +{% for r in body %} +{% for c in r %}{% if c["is_visible"] %} +{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %} +{% endif %}{% endfor %} + +{% endfor %} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/xml.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/xml.py new file mode 100644 index 00000000..76b93875 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/formats/xml.py @@ -0,0 +1,536 @@ +""" +:mod:`pandas.io.formats.xml` is a module for formatting data in XML. +""" +from __future__ import annotations + +import codecs +import io +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.missing import isna + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import get_handle +from pandas.io.xml import ( + get_data_from_filepath, + preprocess_data, +) + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + from pandas import DataFrame + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path_or_buffer", +) +class BaseXMLFormatter: + """ + Subclass for formatting data in XML. + + Parameters + ---------- + path_or_buffer : str or file-like + This can be either a string of raw XML, a valid URL, + file or file-like object. + + index : bool + Whether to include index in xml document. + + row_name : str + Name for root of xml document. Default is 'data'. + + root_name : str + Name for row elements of xml document. Default is 'row'. + + na_rep : str + Missing data representation. + + attrs_cols : list + List of columns to write as attributes in row element. + + elem_cols : list + List of columns to write as children in row element. + + namespaces : dict + The namespaces to define in XML document as dicts with key + being namespace and value the URI. + + prefix : str + The prefix for each element in XML document including root. + + encoding : str + Encoding of xml object or document. + + xml_declaration : bool + Whether to include xml declaration at top line item in xml. + + pretty_print : bool + Whether to write xml document with line breaks and indentation. + + stylesheet : str or file-like + A URL, file, file-like object, or a raw string containing XSLT. + + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + See also + -------- + pandas.io.formats.xml.EtreeXMLFormatter + pandas.io.formats.xml.LxmlXMLFormatter + + """ + + def __init__( + self, + frame: DataFrame, + path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + index: bool = True, + root_name: str | None = "data", + row_name: str | None = "row", + na_rep: str | None = None, + attr_cols: list[str] | None = None, + elem_cols: list[str] | None = None, + namespaces: dict[str | None, str] | None = None, + prefix: str | None = None, + encoding: str = "utf-8", + xml_declaration: bool | None = True, + pretty_print: bool | None = True, + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + ) -> None: + self.frame = frame + self.path_or_buffer = path_or_buffer + self.index = index + self.root_name = root_name + self.row_name = row_name + self.na_rep = na_rep + self.attr_cols = attr_cols + self.elem_cols = elem_cols + self.namespaces = namespaces + self.prefix = prefix + self.encoding = encoding + self.xml_declaration = xml_declaration + self.pretty_print = pretty_print + self.stylesheet = stylesheet + self.compression: CompressionOptions = compression + self.storage_options = storage_options + + self.orig_cols = self.frame.columns.tolist() + self.frame_dicts = self.process_dataframe() + + self.validate_columns() + self.validate_encoding() + self.prefix_uri = self.get_prefix_uri() + self.handle_indexes() + + def build_tree(self) -> bytes: + """ + Build tree from data. + + This method initializes the root and builds attributes and elements + with optional namespaces. + """ + raise AbstractMethodError(self) + + def validate_columns(self) -> None: + """ + Validate elems_cols and attrs_cols. + + This method will check if columns is list-like. + + Raises + ------ + ValueError + * If value is not a list and less then length of nodes. + """ + if self.attr_cols and not is_list_like(self.attr_cols): + raise TypeError( + f"{type(self.attr_cols).__name__} is not a valid type for attr_cols" + ) + + if self.elem_cols and not is_list_like(self.elem_cols): + raise TypeError( + f"{type(self.elem_cols).__name__} is not a valid type for elem_cols" + ) + + def validate_encoding(self) -> None: + """ + Validate encoding. + + This method will check if encoding is among listed under codecs. + + Raises + ------ + LookupError + * If encoding is not available in codecs. + """ + + codecs.lookup(self.encoding) + + def process_dataframe(self) -> dict[int | str, dict[str, Any]]: + """ + Adjust Data Frame to fit xml output. + + This method will adjust underlying data frame for xml output, + including optionally replacing missing values and including indexes. + """ + + df = self.frame + + if self.index: + df = df.reset_index() + + if self.na_rep is not None: + df = df.fillna(self.na_rep) + + return df.to_dict(orient="index") + + def handle_indexes(self) -> None: + """ + Handle indexes. + + This method will add indexes into attr_cols or elem_cols. + """ + + if not self.index: + return + + first_key = next(iter(self.frame_dicts)) + indexes: list[str] = [ + x for x in self.frame_dicts[first_key].keys() if x not in self.orig_cols + ] + + if self.attr_cols: + self.attr_cols = indexes + self.attr_cols + + if self.elem_cols: + self.elem_cols = indexes + self.elem_cols + + def get_prefix_uri(self) -> str: + """ + Get uri of namespace prefix. + + This method retrieves corresponding URI to prefix in namespaces. + + Raises + ------ + KeyError + *If prefix is not included in namespace dict. + """ + + raise AbstractMethodError(self) + + def other_namespaces(self) -> dict: + """ + Define other namespaces. + + This method will build dictionary of namespaces attributes + for root element, conditionally with optional namespaces and + prefix. + """ + + nmsp_dict: dict[str, str] = {} + if self.namespaces: + nmsp_dict = { + f"xmlns{p if p=='' else f':{p}'}": n + for p, n in self.namespaces.items() + if n != self.prefix_uri[1:-1] + } + + return nmsp_dict + + def build_attribs(self, d: dict[str, Any], elem_row: Any) -> Any: + """ + Create attributes of row. + + This method adds attributes using attr_cols to row element and + works with tuples for multindex or hierarchical columns. + """ + + if not self.attr_cols: + return elem_row + + for col in self.attr_cols: + attr_name = self._get_flat_col_name(col) + try: + if not isna(d[col]): + elem_row.attrib[attr_name] = str(d[col]) + except KeyError: + raise KeyError(f"no valid column, {col}") + return elem_row + + def _get_flat_col_name(self, col: str | tuple) -> str: + flat_col = col + if isinstance(col, tuple): + flat_col = ( + "".join([str(c) for c in col]).strip() + if "" in col + else "_".join([str(c) for c in col]).strip() + ) + return f"{self.prefix_uri}{flat_col}" + + def build_elems(self, d: dict[str, Any], elem_row: Any) -> None: + """ + Create child elements of row. + + This method adds child elements using elem_cols to row element and + works with tuples for multindex or hierarchical columns. + """ + + raise AbstractMethodError(self) + + def _build_elems(self, sub_element_cls, d: dict[str, Any], elem_row: Any) -> None: + if not self.elem_cols: + return + + for col in self.elem_cols: + elem_name = self._get_flat_col_name(col) + try: + val = None if isna(d[col]) or d[col] == "" else str(d[col]) + sub_element_cls(elem_row, elem_name).text = val + except KeyError: + raise KeyError(f"no valid column, {col}") + + def write_output(self) -> str | None: + xml_doc = self.build_tree() + + if self.path_or_buffer is not None: + with get_handle( + self.path_or_buffer, + "wb", + compression=self.compression, + storage_options=self.storage_options, + is_text=False, + ) as handles: + handles.handle.write(xml_doc) + return None + + else: + return xml_doc.decode(self.encoding).rstrip() + + +class EtreeXMLFormatter(BaseXMLFormatter): + """ + Class for formatting data in xml using Python standard library + modules: `xml.etree.ElementTree` and `xml.dom.minidom`. + """ + + def build_tree(self) -> bytes: + from xml.etree.ElementTree import ( + Element, + SubElement, + tostring, + ) + + self.root = Element( + f"{self.prefix_uri}{self.root_name}", attrib=self.other_namespaces() + ) + + for d in self.frame_dicts.values(): + elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") + + if not self.attr_cols and not self.elem_cols: + self.elem_cols = list(d.keys()) + self.build_elems(d, elem_row) + + else: + elem_row = self.build_attribs(d, elem_row) + self.build_elems(d, elem_row) + + self.out_xml = tostring( + self.root, + method="xml", + encoding=self.encoding, + xml_declaration=self.xml_declaration, + ) + + if self.pretty_print: + self.out_xml = self.prettify_tree() + + if self.stylesheet is not None: + raise ValueError( + "To use stylesheet, you need lxml installed and selected as parser." + ) + + return self.out_xml + + def get_prefix_uri(self) -> str: + from xml.etree.ElementTree import register_namespace + + uri = "" + if self.namespaces: + for p, n in self.namespaces.items(): + if isinstance(p, str) and isinstance(n, str): + register_namespace(p, n) + if self.prefix: + try: + uri = f"{{{self.namespaces[self.prefix]}}}" + except KeyError: + raise KeyError(f"{self.prefix} is not included in namespaces") + elif "" in self.namespaces: + uri = f'{{{self.namespaces[""]}}}' + else: + uri = "" + + return uri + + def build_elems(self, d: dict[str, Any], elem_row: Any) -> None: + from xml.etree.ElementTree import SubElement + + self._build_elems(SubElement, d, elem_row) + + def prettify_tree(self) -> bytes: + """ + Output tree for pretty print format. + + This method will pretty print xml with line breaks and indentation. + """ + + from xml.dom.minidom import parseString + + dom = parseString(self.out_xml) + + return dom.toprettyxml(indent=" ", encoding=self.encoding) + + +class LxmlXMLFormatter(BaseXMLFormatter): + """ + Class for formatting data in xml using Python standard library + modules: `xml.etree.ElementTree` and `xml.dom.minidom`. + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self.convert_empty_str_key() + + def build_tree(self) -> bytes: + """ + Build tree from data. + + This method initializes the root and builds attributes and elements + with optional namespaces. + """ + from lxml.etree import ( + Element, + SubElement, + tostring, + ) + + self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces) + + for d in self.frame_dicts.values(): + elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") + + if not self.attr_cols and not self.elem_cols: + self.elem_cols = list(d.keys()) + self.build_elems(d, elem_row) + + else: + elem_row = self.build_attribs(d, elem_row) + self.build_elems(d, elem_row) + + self.out_xml = tostring( + self.root, + pretty_print=self.pretty_print, + method="xml", + encoding=self.encoding, + xml_declaration=self.xml_declaration, + ) + + if self.stylesheet is not None: + self.out_xml = self.transform_doc() + + return self.out_xml + + def convert_empty_str_key(self) -> None: + """ + Replace zero-length string in `namespaces`. + + This method will replace '' with None to align to `lxml` + requirement that empty string prefixes are not allowed. + """ + + if self.namespaces and "" in self.namespaces.keys(): + self.namespaces[None] = self.namespaces.pop("", "default") + + def get_prefix_uri(self) -> str: + uri = "" + if self.namespaces: + if self.prefix: + try: + uri = f"{{{self.namespaces[self.prefix]}}}" + except KeyError: + raise KeyError(f"{self.prefix} is not included in namespaces") + elif "" in self.namespaces: + uri = f'{{{self.namespaces[""]}}}' + else: + uri = "" + + return uri + + def build_elems(self, d: dict[str, Any], elem_row: Any) -> None: + from lxml.etree import SubElement + + self._build_elems(SubElement, d, elem_row) + + def transform_doc(self) -> bytes: + """ + Parse stylesheet from file or buffer and run it. + + This method will parse stylesheet object into tree for parsing + conditionally by its specific object type, then transforms + original tree with XSLT script. + """ + from lxml.etree import ( + XSLT, + XMLParser, + fromstring, + parse, + ) + + style_doc = self.stylesheet + assert style_doc is not None # is ensured by caller + + handle_data = get_data_from_filepath( + filepath_or_buffer=style_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + + if isinstance(xml_data, io.StringIO): + xsl_doc = fromstring( + xml_data.getvalue().encode(self.encoding), parser=curr_parser + ) + else: + xsl_doc = parse(xml_data, parser=curr_parser) + + transformer = XSLT(xsl_doc) + new_doc = transformer(self.root) + + return bytes(new_doc) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/gbq.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/gbq.py new file mode 100644 index 00000000..ee71f5af --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/gbq.py @@ -0,0 +1,235 @@ +""" Google BigQuery support """ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.compat._optional import import_optional_dependency + +if TYPE_CHECKING: + import google.auth + + from pandas import DataFrame + + +def _try_import(): + # since pandas is a dependency of pandas-gbq + # we need to import on first use + msg = ( + "pandas-gbq is required to load data from Google BigQuery. " + "See the docs: https://pandas-gbq.readthedocs.io." + ) + pandas_gbq = import_optional_dependency("pandas_gbq", extra=msg) + return pandas_gbq + + +def read_gbq( + query: str, + project_id: str | None = None, + index_col: str | None = None, + col_order: list[str] | None = None, + reauth: bool = False, + auth_local_webserver: bool = True, + dialect: str | None = None, + location: str | None = None, + configuration: dict[str, Any] | None = None, + credentials: google.auth.credentials.Credentials | None = None, + use_bqstorage_api: bool | None = None, + max_results: int | None = None, + progress_bar_type: str | None = None, +) -> DataFrame: + """ + Load data from Google BigQuery. + + This function requires the `pandas-gbq package + `__. + + See the `How to authenticate with Google BigQuery + `__ + guide for authentication instructions. + + Parameters + ---------- + query : str + SQL-Like Query to return data values. + project_id : str, optional + Google BigQuery Account project ID. Optional when available from + the environment. + index_col : str, optional + Name of result column to use for index in results DataFrame. + col_order : list(str), optional + List of BigQuery column names in the desired order for results + DataFrame. + reauth : bool, default False + Force Google BigQuery to re-authenticate the user. This is useful + if multiple accounts are used. + auth_local_webserver : bool, default True + Use the `local webserver flow`_ instead of the `console flow`_ + when getting user credentials. + + .. _local webserver flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server + .. _console flow: + https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console + + *New in version 0.2.0 of pandas-gbq*. + + .. versionchanged:: 1.5.0 + Default value is changed to ``True``. Google has deprecated the + ``auth_local_webserver = False`` `"out of band" (copy-paste) + flow + `_. + dialect : str, default 'legacy' + Note: The default value is changing to 'standard' in a future version. + + SQL syntax dialect to use. Value can be one of: + + ``'legacy'`` + Use BigQuery's legacy SQL dialect. For more information see + `BigQuery Legacy SQL Reference + `__. + ``'standard'`` + Use BigQuery's standard SQL, which is + compliant with the SQL 2011 standard. For more information + see `BigQuery Standard SQL Reference + `__. + location : str, optional + Location where the query job should run. See the `BigQuery locations + documentation + `__ for a + list of available locations. The location must match that of any + datasets used in the query. + + *New in version 0.5.0 of pandas-gbq*. + configuration : dict, optional + Query config parameters for job processing. + For example: + + configuration = {'query': {'useQueryCache': False}} + + For more information see `BigQuery REST API Reference + `__. + credentials : google.auth.credentials.Credentials, optional + Credentials for accessing Google APIs. Use this parameter to override + default credentials, such as to use Compute Engine + :class:`google.auth.compute_engine.Credentials` or Service Account + :class:`google.oauth2.service_account.Credentials` directly. + + *New in version 0.8.0 of pandas-gbq*. + use_bqstorage_api : bool, default False + Use the `BigQuery Storage API + `__ to + download query results quickly, but at an increased cost. To use this + API, first `enable it in the Cloud Console + `__. + You must also have the `bigquery.readsessions.create + `__ + permission on the project you are billing queries to. + + This feature requires version 0.10.0 or later of the ``pandas-gbq`` + package. It also requires the ``google-cloud-bigquery-storage`` and + ``fastavro`` packages. + + max_results : int, optional + If set, limit the maximum number of rows to fetch from the query + results. + + progress_bar_type : Optional, str + If set, use the `tqdm `__ library to + display a progress bar while the data downloads. Install the + ``tqdm`` package to use this feature. + + Possible values of ``progress_bar_type`` include: + + ``None`` + No progress bar. + ``'tqdm'`` + Use the :func:`tqdm.tqdm` function to print a progress bar + to :data:`sys.stderr`. + ``'tqdm_notebook'`` + Use the :func:`tqdm.tqdm_notebook` function to display a + progress bar as a Jupyter notebook widget. + ``'tqdm_gui'`` + Use the :func:`tqdm.tqdm_gui` function to display a + progress bar as a graphical dialog box. + + Returns + ------- + df: DataFrame + DataFrame representing results of query. + + See Also + -------- + pandas_gbq.read_gbq : This function in the pandas-gbq library. + DataFrame.to_gbq : Write a DataFrame to Google BigQuery. + + Examples + -------- + Example taken from `Google BigQuery documentation + `_ + + >>> sql = "SELECT name FROM table_name WHERE state = 'TX' LIMIT 100;" + >>> df = pd.read_gbq(sql, dialect="standard") # doctest: +SKIP + >>> project_id = "your-project-id" # doctest: +SKIP + >>> df = pd.read_gbq(sql, + ... project_id=project_id, + ... dialect="standard" + ... ) # doctest: +SKIP + """ + pandas_gbq = _try_import() + + kwargs: dict[str, str | bool | int | None] = {} + + # START: new kwargs. Don't populate unless explicitly set. + if use_bqstorage_api is not None: + kwargs["use_bqstorage_api"] = use_bqstorage_api + if max_results is not None: + kwargs["max_results"] = max_results + + kwargs["progress_bar_type"] = progress_bar_type + # END: new kwargs + + return pandas_gbq.read_gbq( + query, + project_id=project_id, + index_col=index_col, + col_order=col_order, + reauth=reauth, + auth_local_webserver=auth_local_webserver, + dialect=dialect, + location=location, + configuration=configuration, + credentials=credentials, + **kwargs, + ) + + +def to_gbq( + dataframe: DataFrame, + destination_table: str, + project_id: str | None = None, + chunksize: int | None = None, + reauth: bool = False, + if_exists: str = "fail", + auth_local_webserver: bool = True, + table_schema: list[dict[str, str]] | None = None, + location: str | None = None, + progress_bar: bool = True, + credentials: google.auth.credentials.Credentials | None = None, +) -> None: + pandas_gbq = _try_import() + pandas_gbq.to_gbq( + dataframe, + destination_table, + project_id=project_id, + chunksize=chunksize, + reauth=reauth, + if_exists=if_exists, + auth_local_webserver=auth_local_webserver, + table_schema=table_schema, + location=location, + progress_bar=progress_bar, + credentials=credentials, + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/html.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/html.py new file mode 100644 index 00000000..10701be4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/html.py @@ -0,0 +1,1264 @@ +""" +:mod:`pandas.io.html` is a module containing functionality for dealing with +HTML IO. + +""" + +from __future__ import annotations + +from collections import abc +import numbers +import re +from re import Pattern +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) +import warnings + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + AbstractMethodError, + EmptyDataError, +) +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import is_list_like + +from pandas import isna +from pandas.core.indexes.base import Index +from pandas.core.indexes.multi import MultiIndex +from pandas.core.series import Series +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + file_exists, + get_handle, + is_file_like, + is_fsspec_url, + is_url, + stringify_path, + validate_header_arg, +) +from pandas.io.formats.printing import pprint_thing +from pandas.io.parsers import TextParser + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from pandas._typing import ( + BaseBuffer, + DtypeBackend, + FilePath, + ReadBuffer, + StorageOptions, + ) + + from pandas import DataFrame + +############# +# READ HTML # +############# +_RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}") + + +def _remove_whitespace(s: str, regex: Pattern = _RE_WHITESPACE) -> str: + """ + Replace extra whitespace inside of a string with a single space. + + Parameters + ---------- + s : str or unicode + The string from which to remove extra whitespace. + regex : re.Pattern + The regular expression to use to remove extra whitespace. + + Returns + ------- + subd : str or unicode + `s` with all extra whitespace replaced with a single space. + """ + return regex.sub(" ", s.strip()) + + +def _get_skiprows(skiprows: int | Sequence[int] | slice | None) -> int | Sequence[int]: + """ + Get an iterator given an integer, slice or container. + + Parameters + ---------- + skiprows : int, slice, container + The iterator to use to skip rows; can also be a slice. + + Raises + ------ + TypeError + * If `skiprows` is not a slice, integer, or Container + + Returns + ------- + it : iterable + A proper iterator to use to skip rows of a DataFrame. + """ + if isinstance(skiprows, slice): + start, step = skiprows.start or 0, skiprows.step or 1 + return list(range(start, skiprows.stop, step)) + elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows): + return cast("int | Sequence[int]", skiprows) + elif skiprows is None: + return 0 + raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows") + + +def _read( + obj: FilePath | BaseBuffer, + encoding: str | None, + storage_options: StorageOptions | None, +) -> str | bytes: + """ + Try to read from a url, file or string. + + Parameters + ---------- + obj : str, unicode, path object, or file-like object + + Returns + ------- + raw_text : str + """ + text: str | bytes + if ( + is_url(obj) + or hasattr(obj, "read") + or (isinstance(obj, str) and file_exists(obj)) + ): + with get_handle( + obj, "r", encoding=encoding, storage_options=storage_options + ) as handles: + text = handles.handle.read() + elif isinstance(obj, (str, bytes)): + text = obj + else: + raise TypeError(f"Cannot read object of type '{type(obj).__name__}'") + return text + + +class _HtmlFrameParser: + """ + Base class for parsers that parse HTML into DataFrames. + + Parameters + ---------- + io : str or file-like + This can be either a string of raw HTML, a valid URL using the HTTP, + FTP, or FILE protocols or a file-like object. + + match : str or regex + The text to match in the document. + + attrs : dict + List of HTML element attributes to match. + + encoding : str + Encoding to be used by parser + + displayed_only : bool + Whether or not items with "display:none" should be ignored + + extract_links : {None, "all", "header", "body", "footer"} + Table elements in the specified section(s) with tags will have their + href extracted. + + .. versionadded:: 1.5.0 + + Attributes + ---------- + io : str or file-like + raw HTML, URL, or file-like object + + match : regex + The text to match in the raw HTML + + attrs : dict-like + A dictionary of valid table attributes to use to search for table + elements. + + encoding : str + Encoding to be used by parser + + displayed_only : bool + Whether or not items with "display:none" should be ignored + + extract_links : {None, "all", "header", "body", "footer"} + Table elements in the specified section(s) with tags will have their + href extracted. + + .. versionadded:: 1.5.0 + + Notes + ----- + To subclass this class effectively you must override the following methods: + * :func:`_build_doc` + * :func:`_attr_getter` + * :func:`_href_getter` + * :func:`_text_getter` + * :func:`_parse_td` + * :func:`_parse_thead_tr` + * :func:`_parse_tbody_tr` + * :func:`_parse_tfoot_tr` + * :func:`_parse_tables` + * :func:`_equals_tag` + See each method's respective documentation for details on their + functionality. + """ + + def __init__( + self, + io: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + match: str | Pattern, + attrs: dict[str, str] | None, + encoding: str, + displayed_only: bool, + extract_links: Literal[None, "header", "footer", "body", "all"], + storage_options: StorageOptions = None, + ) -> None: + self.io = io + self.match = match + self.attrs = attrs + self.encoding = encoding + self.displayed_only = displayed_only + self.extract_links = extract_links + self.storage_options = storage_options + + def parse_tables(self): + """ + Parse and return all tables from the DOM. + + Returns + ------- + list of parsed (header, body, footer) tuples from tables. + """ + tables = self._parse_tables(self._build_doc(), self.match, self.attrs) + return (self._parse_thead_tbody_tfoot(table) for table in tables) + + def _attr_getter(self, obj, attr): + """ + Return the attribute value of an individual DOM node. + + Parameters + ---------- + obj : node-like + A DOM node. + + attr : str or unicode + The attribute, such as "colspan" + + Returns + ------- + str or unicode + The attribute value. + """ + # Both lxml and BeautifulSoup have the same implementation: + return obj.get(attr) + + def _href_getter(self, obj): + """ + Return a href if the DOM node contains a child or None. + + Parameters + ---------- + obj : node-like + A DOM node. + + Returns + ------- + href : str or unicode + The href from the child of the DOM node. + """ + raise AbstractMethodError(self) + + def _text_getter(self, obj): + """ + Return the text of an individual DOM node. + + Parameters + ---------- + obj : node-like + A DOM node. + + Returns + ------- + text : str or unicode + The text from an individual DOM node. + """ + raise AbstractMethodError(self) + + def _parse_td(self, obj): + """ + Return the td elements from a row element. + + Parameters + ---------- + obj : node-like + A DOM node. + + Returns + ------- + list of node-like + These are the elements of each row, i.e., the columns. + """ + raise AbstractMethodError(self) + + def _parse_thead_tr(self, table): + """ + Return the list of thead row elements from the parsed table element. + + Parameters + ---------- + table : a table element that contains zero or more thead elements. + + Returns + ------- + list of node-like + These are the row elements of a table. + """ + raise AbstractMethodError(self) + + def _parse_tbody_tr(self, table): + """ + Return the list of tbody row elements from the parsed table element. + + HTML5 table bodies consist of either 0 or more elements (which + only contain elements) or 0 or more elements. This method + checks for both structures. + + Parameters + ---------- + table : a table element that contains row elements. + + Returns + ------- + list of node-like + These are the row elements of a table. + """ + raise AbstractMethodError(self) + + def _parse_tfoot_tr(self, table): + """ + Return the list of tfoot row elements from the parsed table element. + + Parameters + ---------- + table : a table element that contains row elements. + + Returns + ------- + list of node-like + These are the row elements of a table. + """ + raise AbstractMethodError(self) + + def _parse_tables(self, document, match, attrs): + """ + Return all tables from the parsed DOM. + + Parameters + ---------- + document : the DOM from which to parse the table element. + + match : str or regular expression + The text to search for in the DOM tree. + + attrs : dict + A dictionary of table attributes that can be used to disambiguate + multiple tables on a page. + + Raises + ------ + ValueError : `match` does not match any text in the document. + + Returns + ------- + list of node-like + HTML
elements to be parsed into raw data. + """ + raise AbstractMethodError(self) + + def _equals_tag(self, obj, tag): + """ + Return whether an individual DOM node matches a tag + + Parameters + ---------- + obj : node-like + A DOM node. + + tag : str + Tag name to be checked for equality. + + Returns + ------- + boolean + Whether `obj`'s tag name is `tag` + """ + raise AbstractMethodError(self) + + def _build_doc(self): + """ + Return a tree-like object that can be used to iterate over the DOM. + + Returns + ------- + node-like + The DOM from which to parse the table element. + """ + raise AbstractMethodError(self) + + def _parse_thead_tbody_tfoot(self, table_html): + """ + Given a table, return parsed header, body, and foot. + + Parameters + ---------- + table_html : node-like + + Returns + ------- + tuple of (header, body, footer), each a list of list-of-text rows. + + Notes + ----- + Header and body are lists-of-lists. Top level list is a list of + rows. Each row is a list of str text. + + Logic: Use , , elements to identify + header, body, and footer, otherwise: + - Put all rows into body + - Move rows from top of body to header only if + all elements inside row are . Move the top all- or + while body_rows and row_is_all_th(body_rows[0]): + header_rows.append(body_rows.pop(0)) + + header = self._expand_colspan_rowspan(header_rows, section="header") + body = self._expand_colspan_rowspan(body_rows, section="body") + footer = self._expand_colspan_rowspan(footer_rows, section="footer") + + return header, body, footer + + def _expand_colspan_rowspan( + self, rows, section: Literal["header", "footer", "body"] + ): + """ + Given a list of s, return a list of text rows. + + Parameters + ---------- + rows : list of node-like + List of s + section : the section that the rows belong to (header, body or footer). + + Returns + ------- + list of list + Each returned row is a list of str text, or tuple (text, link) + if extract_links is not None. + + Notes + ----- + Any cell with ``rowspan`` or ``colspan`` will have its contents copied + to subsequent cells. + """ + all_texts = [] # list of rows, each a list of str + text: str | tuple + remainder: list[ + tuple[int, str | tuple, int] + ] = [] # list of (index, text, nrows) + + for tr in rows: + texts = [] # the output for this row + next_remainder = [] + + index = 0 + tds = self._parse_td(tr) + for td in tds: + # Append texts from previous rows with rowspan>1 that come + # before this or (see _parse_thead_tr). + return row.xpath("./td|./th") + + def _parse_tables(self, document, match, kwargs): + pattern = match.pattern + + # 1. check all descendants for the given pattern and only search tables + # GH 49929 + xpath_expr = f"//table[.//text()[re:test(., {repr(pattern)})]]" + + # if any table attributes were given build an xpath expression to + # search for them + if kwargs: + xpath_expr += _build_xpath_expr(kwargs) + + tables = document.xpath(xpath_expr, namespaces=_re_namespace) + + tables = self._handle_hidden_tables(tables, "attrib") + if self.displayed_only: + for table in tables: + # lxml utilizes XPATH 1.0 which does not have regex + # support. As a result, we find all elements with a style + # attribute and iterate them to check for display:none + for elem in table.xpath(".//style"): + elem.drop_tree() + for elem in table.xpath(".//*[@style]"): + if "display:none" in elem.attrib.get("style", "").replace(" ", ""): + elem.drop_tree() + if not tables: + raise ValueError(f"No tables found matching regex {repr(pattern)}") + return tables + + def _equals_tag(self, obj, tag): + return obj.tag == tag + + def _build_doc(self): + """ + Raises + ------ + ValueError + * If a URL that lxml cannot parse is passed. + + Exception + * Any other ``Exception`` thrown. For example, trying to parse a + URL that is syntactically correct on a machine with no internet + connection will fail. + + See Also + -------- + pandas.io.html._HtmlFrameParser._build_doc + """ + from lxml.etree import XMLSyntaxError + from lxml.html import ( + HTMLParser, + fromstring, + parse, + ) + + parser = HTMLParser(recover=True, encoding=self.encoding) + + try: + if is_url(self.io): + with get_handle( + self.io, "r", storage_options=self.storage_options + ) as f: + r = parse(f.handle, parser=parser) + else: + # try to parse the input in the simplest way + r = parse(self.io, parser=parser) + try: + r = r.getroot() + except AttributeError: + pass + except (UnicodeDecodeError, OSError) as e: + # if the input is a blob of html goop + if not is_url(self.io): + r = fromstring(self.io, parser=parser) + + try: + r = r.getroot() + except AttributeError: + pass + else: + raise e + else: + if not hasattr(r, "text_content"): + raise XMLSyntaxError("no text parsed from document", 0, 0, 0) + + for br in r.xpath("*//br"): + br.tail = "\n" + (br.tail or "") + + return r + + def _parse_thead_tr(self, table): + rows = [] + + for thead in table.xpath(".//thead"): + rows.extend(thead.xpath("./tr")) + + # HACK: lxml does not clean up the clearly-erroneous + # . (Missing ). Add + # the and _pretend_ it's a ; _parse_td() will find its + # children as though it's a . + # + # Better solution would be to use html5lib. + elements_at_root = thead.xpath("./td|./th") + if elements_at_root: + rows.append(thead) + + return rows + + def _parse_tbody_tr(self, table): + from_tbody = table.xpath(".//tbody//tr") + from_root = table.xpath("./tr") + # HTML spec: at most one of these lists has content + return from_tbody + from_root + + def _parse_tfoot_tr(self, table): + return table.xpath(".//tfoot//tr") + + +def _expand_elements(body) -> None: + data = [len(elem) for elem in body] + lens = Series(data) + lens_max = lens.max() + not_max = lens[lens != lens_max] + + empty = [""] + for ind, length in not_max.items(): + body[ind] += empty * (lens_max - length) + + +def _data_to_frame(**kwargs): + head, body, foot = kwargs.pop("data") + header = kwargs.pop("header") + kwargs["skiprows"] = _get_skiprows(kwargs["skiprows"]) + if head: + body = head + body + + # Infer header when there is a or top ' in styler.to_html() + + +def test_rowspan_w3(): + # GH 38533 + df = DataFrame(data=[[1, 2]], index=[["l0", "l0"], ["l1a", "l1b"]]) + styler = Styler(df, uuid="_", cell_ids=False) + assert '' in styler.to_html() + + +def test_styles(styler): + styler.set_uuid("abc") + styler.set_table_styles([{"selector": "td", "props": "color: red;"}]) + result = styler.to_html(doctype_html=True) + expected = dedent( + """\ + + + + + + + +
+ - Move rows from bottom of body to footer only if + all elements inside row are + """ + header_rows = self._parse_thead_tr(table_html) + body_rows = self._parse_tbody_tr(table_html) + footer_rows = self._parse_tfoot_tr(table_html) + + def row_is_all_th(row): + return all(self._equals_tag(t, "th") for t in self._parse_td(row)) + + if not header_rows: + # The table has no
rows from + # body_rows to header_rows. (This is a common case because many + # tables in the wild have no
+ while remainder and remainder[0][0] <= index: + prev_i, prev_text, prev_rowspan = remainder.pop(0) + texts.append(prev_text) + if prev_rowspan > 1: + next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) + index += 1 + + # Append the text from this , colspan times + text = _remove_whitespace(self._text_getter(td)) + if self.extract_links in ("all", section): + href = self._href_getter(td) + text = (text, href) + rowspan = int(self._attr_getter(td, "rowspan") or 1) + colspan = int(self._attr_getter(td, "colspan") or 1) + + for _ in range(colspan): + texts.append(text) + if rowspan > 1: + next_remainder.append((index, text, rowspan - 1)) + index += 1 + + # Append texts from previous rows at the final position + for prev_i, prev_text, prev_rowspan in remainder: + texts.append(prev_text) + if prev_rowspan > 1: + next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) + + all_texts.append(texts) + remainder = next_remainder + + # Append rows that only appear because the previous row had non-1 + # rowspan + while remainder: + next_remainder = [] + texts = [] + for prev_i, prev_text, prev_rowspan in remainder: + texts.append(prev_text) + if prev_rowspan > 1: + next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) + all_texts.append(texts) + remainder = next_remainder + + return all_texts + + def _handle_hidden_tables(self, tbl_list, attr_name: str): + """ + Return list of tables, potentially removing hidden elements + + Parameters + ---------- + tbl_list : list of node-like + Type of list elements will vary depending upon parser used + attr_name : str + Name of the accessor for retrieving HTML attributes + + Returns + ------- + list of node-like + Return type matches `tbl_list` + """ + if not self.displayed_only: + return tbl_list + + return [ + x + for x in tbl_list + if "display:none" + not in getattr(x, attr_name).get("style", "").replace(" ", "") + ] + + +class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): + """ + HTML to DataFrame parser that uses BeautifulSoup under the hood. + + See Also + -------- + pandas.io.html._HtmlFrameParser + pandas.io.html._LxmlFrameParser + + Notes + ----- + Documentation strings for this class are in the base class + :class:`pandas.io.html._HtmlFrameParser`. + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + from bs4 import SoupStrainer + + self._strainer = SoupStrainer("table") + + def _parse_tables(self, document, match, attrs): + element_name = self._strainer.name + tables = document.find_all(element_name, attrs=attrs) + if not tables: + raise ValueError("No tables found") + + result = [] + unique_tables = set() + tables = self._handle_hidden_tables(tables, "attrs") + + for table in tables: + if self.displayed_only: + for elem in table.find_all("style"): + elem.decompose() + + for elem in table.find_all(style=re.compile(r"display:\s*none")): + elem.decompose() + + if table not in unique_tables and table.find(string=match) is not None: + result.append(table) + unique_tables.add(table) + if not result: + raise ValueError(f"No tables found matching pattern {repr(match.pattern)}") + return result + + def _href_getter(self, obj) -> str | None: + a = obj.find("a", href=True) + return None if not a else a["href"] + + def _text_getter(self, obj): + return obj.text + + def _equals_tag(self, obj, tag): + return obj.name == tag + + def _parse_td(self, row): + return row.find_all(("td", "th"), recursive=False) + + def _parse_thead_tr(self, table): + return table.select("thead tr") + + def _parse_tbody_tr(self, table): + from_tbody = table.select("tbody tr") + from_root = table.find_all("tr", recursive=False) + # HTML spec: at most one of these lists has content + return from_tbody + from_root + + def _parse_tfoot_tr(self, table): + return table.select("tfoot tr") + + def _setup_build_doc(self): + raw_text = _read(self.io, self.encoding, self.storage_options) + if not raw_text: + raise ValueError(f"No text parsed from document: {self.io}") + return raw_text + + def _build_doc(self): + from bs4 import BeautifulSoup + + bdoc = self._setup_build_doc() + if isinstance(bdoc, bytes) and self.encoding is not None: + udoc = bdoc.decode(self.encoding) + from_encoding = None + else: + udoc = bdoc + from_encoding = self.encoding + + soup = BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding) + + for br in soup.find_all("br"): + br.replace_with("\n" + br.text) + + return soup + + +def _build_xpath_expr(attrs) -> str: + """ + Build an xpath expression to simulate bs4's ability to pass in kwargs to + search for attributes when using the lxml parser. + + Parameters + ---------- + attrs : dict + A dict of HTML attributes. These are NOT checked for validity. + + Returns + ------- + expr : unicode + An XPath expression that checks for the given HTML attributes. + """ + # give class attribute as class_ because class is a python keyword + if "class_" in attrs: + attrs["class"] = attrs.pop("class_") + + s = " and ".join([f"@{k}={repr(v)}" for k, v in attrs.items()]) + return f"[{s}]" + + +_re_namespace = {"re": "http://exslt.org/regular-expressions"} + + +class _LxmlFrameParser(_HtmlFrameParser): + """ + HTML to DataFrame parser that uses lxml under the hood. + + Warning + ------- + This parser can only handle HTTP, FTP, and FILE urls. + + See Also + -------- + _HtmlFrameParser + _BeautifulSoupLxmlFrameParser + + Notes + ----- + Documentation strings for this class are in the base class + :class:`_HtmlFrameParser`. + """ + + def _href_getter(self, obj) -> str | None: + href = obj.xpath(".//a/@href") + return None if not href else href[0] + + def _text_getter(self, obj): + return obj.text_content() + + def _parse_td(self, row): + # Look for direct children only: the "row" element here may be a + #
foobar
-only rows + if header is None: + if len(head) == 1: + header = 0 + else: + # ignore all-empty-text rows + header = [i for i, row in enumerate(head) if any(text for text in row)] + + if foot: + body += foot + + # fill out elements of body that are "ragged" + _expand_elements(body) + with TextParser(body, header=header, **kwargs) as tp: + return tp.read() + + +_valid_parsers = { + "lxml": _LxmlFrameParser, + None: _LxmlFrameParser, + "html5lib": _BeautifulSoupHtml5LibFrameParser, + "bs4": _BeautifulSoupHtml5LibFrameParser, +} + + +def _parser_dispatch(flavor: str | None) -> type[_HtmlFrameParser]: + """ + Choose the parser based on the input flavor. + + Parameters + ---------- + flavor : str + The type of parser to use. This must be a valid backend. + + Returns + ------- + cls : _HtmlFrameParser subclass + The parser class based on the requested input flavor. + + Raises + ------ + ValueError + * If `flavor` is not a valid backend. + ImportError + * If you do not have the requested `flavor` + """ + valid_parsers = list(_valid_parsers.keys()) + if flavor not in valid_parsers: + raise ValueError( + f"{repr(flavor)} is not a valid flavor, valid flavors are {valid_parsers}" + ) + + if flavor in ("bs4", "html5lib"): + import_optional_dependency("html5lib") + import_optional_dependency("bs4") + else: + import_optional_dependency("lxml.etree") + return _valid_parsers[flavor] + + +def _print_as_set(s) -> str: + arg = ", ".join([pprint_thing(el) for el in s]) + return f"{{{arg}}}" + + +def _validate_flavor(flavor): + if flavor is None: + flavor = "lxml", "bs4" + elif isinstance(flavor, str): + flavor = (flavor,) + elif isinstance(flavor, abc.Iterable): + if not all(isinstance(flav, str) for flav in flavor): + raise TypeError( + f"Object of type {repr(type(flavor).__name__)} " + f"is not an iterable of strings" + ) + else: + msg = repr(flavor) if isinstance(flavor, str) else str(flavor) + msg += " is not a valid flavor" + raise ValueError(msg) + + flavor = tuple(flavor) + valid_flavors = set(_valid_parsers) + flavor_set = set(flavor) + + if not flavor_set & valid_flavors: + raise ValueError( + f"{_print_as_set(flavor_set)} is not a valid set of flavors, valid " + f"flavors are {_print_as_set(valid_flavors)}" + ) + return flavor + + +def _parse( + flavor, + io, + match, + attrs, + encoding, + displayed_only, + extract_links, + storage_options, + **kwargs, +): + flavor = _validate_flavor(flavor) + compiled_match = re.compile(match) # you can pass a compiled regex here + + retained = None + for flav in flavor: + parser = _parser_dispatch(flav) + p = parser( + io, + compiled_match, + attrs, + encoding, + displayed_only, + extract_links, + storage_options, + ) + + try: + tables = p.parse_tables() + except ValueError as caught: + # if `io` is an io-like object, check if it's seekable + # and try to rewind it before trying the next parser + if hasattr(io, "seekable") and io.seekable(): + io.seek(0) + elif hasattr(io, "seekable") and not io.seekable(): + # if we couldn't rewind it, let the user know + raise ValueError( + f"The flavor {flav} failed to parse your input. " + "Since you passed a non-rewindable file " + "object, we can't rewind it to try " + "another parser. Try read_html() with a different flavor." + ) from caught + + retained = caught + else: + break + else: + assert retained is not None # for mypy + raise retained + + ret = [] + for table in tables: + try: + df = _data_to_frame(data=table, **kwargs) + # Cast MultiIndex header to an Index of tuples when extracting header + # links and replace nan with None (therefore can't use mi.to_flat_index()). + # This maintains consistency of selection (e.g. df.columns.str[1]) + if extract_links in ("all", "header") and isinstance( + df.columns, MultiIndex + ): + df.columns = Index( + ((col[0], None if isna(col[1]) else col[1]) for col in df.columns), + tupleize_cols=False, + ) + + ret.append(df) + except EmptyDataError: # empty table + continue + return ret + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_html( + io: FilePath | ReadBuffer[str], + *, + match: str | Pattern = ".+", + flavor: str | None = None, + header: int | Sequence[int] | None = None, + index_col: int | Sequence[int] | None = None, + skiprows: int | Sequence[int] | slice | None = None, + attrs: dict[str, str] | None = None, + parse_dates: bool = False, + thousands: str | None = ",", + encoding: str | None = None, + decimal: str = ".", + converters: dict | None = None, + na_values: Iterable[object] | None = None, + keep_default_na: bool = True, + displayed_only: bool = True, + extract_links: Literal[None, "header", "footer", "body", "all"] = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + storage_options: StorageOptions = None, +) -> list[DataFrame]: + r""" + Read HTML tables into a ``list`` of ``DataFrame`` objects. + + Parameters + ---------- + io : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``read()`` function. + The string can represent a URL or the HTML itself. Note that + lxml only accepts the http, ftp and file url protocols. If you have a + URL that starts with ``'https'`` you might try removing the ``'s'``. + + .. deprecated:: 2.1.0 + Passing html literal strings is deprecated. + Wrap literal string/bytes input in ``io.StringIO``/``io.BytesIO`` instead. + + match : str or compiled regular expression, optional + The set of tables containing text matching this regex or string will be + returned. Unless the HTML is extremely simple you will probably need to + pass a non-empty string here. Defaults to '.+' (match any non-empty + string). The default value will return all tables contained on a page. + This value is converted to a regular expression so that there is + consistent behavior between Beautiful Soup and lxml. + + flavor : str, optional + The parsing engine to use. 'bs4' and 'html5lib' are synonymous with + each other, they are both there for backwards compatibility. The + default of ``None`` tries to use ``lxml`` to parse and if that fails it + falls back on ``bs4`` + ``html5lib``. + + header : int or list-like, optional + The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to + make the columns headers. + + index_col : int or list-like, optional + The column (or list of columns) to use to create the index. + + skiprows : int, list-like or slice, optional + Number of rows to skip after parsing the column integer. 0-based. If a + sequence of integers or a slice is given, will skip the rows indexed by + that sequence. Note that a single element sequence means 'skip the nth + row' whereas an integer means 'skip n rows'. + + attrs : dict, optional + This is a dictionary of attributes that you can pass to use to identify + the table in the HTML. These are not checked for validity before being + passed to lxml or Beautiful Soup. However, these attributes must be + valid HTML table attributes to work correctly. For example, :: + + attrs = {{'id': 'table'}} + + is a valid attribute dictionary because the 'id' HTML tag attribute is + a valid HTML attribute for *any* HTML tag as per `this document + `__. :: + + attrs = {{'asdf': 'table'}} + + is *not* a valid attribute dictionary because 'asdf' is not a valid + HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 + table attributes can be found `here + `__. A + working draft of the HTML 5 spec can be found `here + `__. It contains the + latest information on table attributes for the modern web. + + parse_dates : bool, optional + See :func:`~read_csv` for more details. + + thousands : str, optional + Separator to use to parse thousands. Defaults to ``','``. + + encoding : str, optional + The encoding used to decode the web page. Defaults to ``None``.``None`` + preserves the previous encoding behavior, which depends on the + underlying parser library (e.g., the parser library will try to use + the encoding provided by the document). + + decimal : str, default '.' + Character to recognize as decimal point (e.g. use ',' for European + data). + + converters : dict, default None + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the cell (not column) content, and return the + transformed content. + + na_values : iterable, default None + Custom NA values. + + keep_default_na : bool, default True + If na_values are specified and keep_default_na is False the default NaN + values are overridden, otherwise they're appended to. + + displayed_only : bool, default True + Whether elements with "display: none" should be parsed. + + extract_links : {{None, "all", "header", "body", "footer"}} + Table elements in the specified section(s) with tags will have their + href extracted. + + .. versionadded:: 1.5.0 + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + {storage_options} + + .. versionadded:: 2.1.0 + + Returns + ------- + dfs + A list of DataFrames. + + See Also + -------- + read_csv : Read a comma-separated values (csv) file into DataFrame. + + Notes + ----- + Before using this function you should read the :ref:`gotchas about the + HTML parsing libraries `. + + Expect to do some cleanup after you call this function. For example, you + might need to manually assign column names if the column names are + converted to NaN when you pass the `header=0` argument. We try to assume as + little as possible about the structure of the table and push the + idiosyncrasies of the HTML contained in the table to the user. + + This function searches for ```` elements and only for ```` + and ```` or ```` argument, it is used to construct + the header, otherwise the function attempts to find the header within + the body (by putting rows with only ``' + assert expected in s.to_html() + + # only the value should be escaped before passing to the formatter + s = Styler(df, uuid_len=0).format("&{0}&", escape=escape) + expected = f'' + assert expected in s.to_html() + + # also test format_index() + styler = Styler(DataFrame(columns=[chars]), uuid_len=0) + styler.format_index("&{0}&", escape=None, axis=1) + assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{chars}&" + styler.format_index("&{0}&", escape=escape, axis=1) + assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{exp}&" + + +@pytest.mark.parametrize( + "chars, expected", + [ + ( + r"$ \$&%#_{}~^\ $ &%#_{}~^\ $", + "".join( + [ + r"$ \$&%#_{}~^\ $ ", + r"\&\%\#\_\{\}\textasciitilde \textasciicircum ", + r"\textbackslash \space \$", + ] + ), + ), + ( + r"\( &%#_{}~^\ \) &%#_{}~^\ \(", + "".join( + [ + r"\( &%#_{}~^\ \) ", + r"\&\%\#\_\{\}\textasciitilde \textasciicircum ", + r"\textbackslash \space \textbackslash (", + ] + ), + ), + ( + r"$\&%#_{}^\$", + r"\$\textbackslash \&\%\#\_\{\}\textasciicircum \textbackslash \$", + ), + ( + r"$ \frac{1}{2} $ \( \frac{1}{2} \)", + "".join( + [ + r"$ \frac{1}{2} $", + r" \textbackslash ( \textbackslash frac\{1\}\{2\} \textbackslash )", + ] + ), + ), + ], +) +def test_format_escape_latex_math(chars, expected): + # GH 51903 + # latex-math escape works for each DataFrame cell separately. If we have + # a combination of dollar signs and brackets, the dollar sign would apply. + df = DataFrame([[chars]]) + s = df.style.format("{0}", escape="latex-math") + assert s._translate(True, True)["body"][0][1]["display_value"] == expected + + +def test_format_escape_na_rep(): + # tests the na_rep is not escaped + df = DataFrame([['<>&"', None]]) + s = Styler(df, uuid_len=0).format("X&{0}>X", escape="html", na_rep="&") + ex = '' + expected2 = '' + assert ex in s.to_html() + assert expected2 in s.to_html() + + # also test for format_index() + df = DataFrame(columns=['<>&"', None]) + styler = Styler(df, uuid_len=0) + styler.format_index("X&{0}>X", escape="html", na_rep="&", axis=1) + ctx = styler._translate(True, True) + assert ctx["head"][0][1]["display_value"] == "X&<>&">X" + assert ctx["head"][0][2]["display_value"] == "&" + + +def test_format_escape_floats(styler): + # test given formatter for number format is not impacted by escape + s = styler.format("{:.1f}", escape="html") + for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]: + assert expected in s.to_html() + # tests precision of floats is not impacted by escape + s = styler.format(precision=1, escape="html") + for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]: + assert expected in s.to_html() + + +@pytest.mark.parametrize("formatter", [5, True, [2.0]]) +@pytest.mark.parametrize("func", ["format", "format_index"]) +def test_format_raises(styler, formatter, func): + with pytest.raises(TypeError, match="expected str or callable"): + getattr(styler, func)(formatter) + + +@pytest.mark.parametrize( + "precision, expected", + [ + (1, ["1.0", "2.0", "3.2", "4.6"]), + (2, ["1.00", "2.01", "3.21", "4.57"]), + (3, ["1.000", "2.009", "3.212", "4.566"]), + ], +) +def test_format_with_precision(precision, expected): + # Issue #13257 + df = DataFrame([[1.0, 2.0090, 3.2121, 4.566]], columns=[1.0, 2.0090, 3.2121, 4.566]) + styler = Styler(df) + styler.format(precision=precision) + styler.format_index(precision=precision, axis=1) + + ctx = styler._translate(True, True) + for col, exp in enumerate(expected): + assert ctx["body"][0][col + 1]["display_value"] == exp # format test + assert ctx["head"][0][col + 1]["display_value"] == exp # format_index test + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize( + "level, expected", + [ + (0, ["X", "X", "_", "_"]), # level int + ("zero", ["X", "X", "_", "_"]), # level name + (1, ["_", "_", "X", "X"]), # other level int + ("one", ["_", "_", "X", "X"]), # other level name + ([0, 1], ["X", "X", "X", "X"]), # both levels + ([0, "zero"], ["X", "X", "_", "_"]), # level int and name simultaneous + ([0, "one"], ["X", "X", "X", "X"]), # both levels as int and name + (["one", "zero"], ["X", "X", "X", "X"]), # both level names, reversed + ], +) +def test_format_index_level(axis, level, expected): + midx = MultiIndex.from_arrays([["_", "_"], ["_", "_"]], names=["zero", "one"]) + df = DataFrame([[1, 2], [3, 4]]) + if axis == 0: + df.index = midx + else: + df.columns = midx + + styler = df.style.format_index(lambda v: "X", level=level, axis=axis) + ctx = styler._translate(True, True) + + if axis == 0: # compare index + result = [ctx["body"][s][0]["display_value"] for s in range(2)] + result += [ctx["body"][s][1]["display_value"] for s in range(2)] + else: # compare columns + result = [ctx["head"][0][s + 1]["display_value"] for s in range(2)] + result += [ctx["head"][1][s + 1]["display_value"] for s in range(2)] + + assert expected == result + + +def test_format_subset(): + df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) + ctx = df.style.format( + {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :] + )._translate(True, True) + expected = "0.1" + raw_11 = "1.123400" + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + assert ctx["body"][0][2]["display_value"] == "12.34%" + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, :])._translate(True, True) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + + ctx = df.style.format("{:0.1f}", subset=IndexSlice["a"])._translate(True, True) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][0][2]["display_value"] == "0.123400" + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, "a"])._translate(True, True) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate( + True, True + ) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == "1.1" + assert ctx["body"][0][2]["display_value"] == "0.123400" + assert ctx["body"][1][2]["display_value"] == raw_11 + + +@pytest.mark.parametrize("formatter", [None, "{:,.1f}"]) +@pytest.mark.parametrize("decimal", [".", "*"]) +@pytest.mark.parametrize("precision", [None, 2]) +@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)]) +def test_format_thousands(formatter, decimal, precision, func, col): + styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style + result = getattr(styler, func)( # testing float + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate(True, True) + assert "1_000_000" in result["body"][0][col]["display_value"] + + styler = DataFrame([[1000000]], index=[1000000]).style + result = getattr(styler, func)( # testing int + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate(True, True) + assert "1_000_000" in result["body"][0][col]["display_value"] + + styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style + result = getattr(styler, func)( # testing complex + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate(True, True) + assert "1_000_000" in result["body"][0][col]["display_value"] + + +@pytest.mark.parametrize("formatter", [None, "{:,.4f}"]) +@pytest.mark.parametrize("thousands", [None, ",", "*"]) +@pytest.mark.parametrize("precision", [None, 4]) +@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)]) +def test_format_decimal(formatter, thousands, precision, func, col): + styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style + result = getattr(styler, func)( # testing float + decimal="_", formatter=formatter, thousands=thousands, precision=precision + )._translate(True, True) + assert "000_123" in result["body"][0][col]["display_value"] + + styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style + result = getattr(styler, func)( # testing complex + decimal="_", formatter=formatter, thousands=thousands, precision=precision + )._translate(True, True) + assert "000_123" in result["body"][0][col]["display_value"] + + +def test_str_escape_error(): + msg = "`escape` only permitted in {'html', 'latex', 'latex-math'}, got " + with pytest.raises(ValueError, match=msg): + _str_escape("text", "bad_escape") + + with pytest.raises(ValueError, match=msg): + _str_escape("text", []) + + _str_escape(2.00, "bad_escape") # OK since dtype is float + + +def test_long_int_formatting(): + df = DataFrame(data=[[1234567890123456789]], columns=["test"]) + styler = df.style + ctx = styler._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "1234567890123456789" + + styler = df.style.format(thousands="_") + ctx = styler._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "1_234_567_890_123_456_789" + + +def test_format_options(): + df = DataFrame({"int": [2000, 1], "float": [1.009, None], "str": ["&<", "&~"]}) + ctx = df.style._translate(True, True) + + # test option: na_rep + assert ctx["body"][1][2]["display_value"] == "nan" + with option_context("styler.format.na_rep", "MISSING"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][1][2]["display_value"] == "MISSING" + + # test option: decimal and precision + assert ctx["body"][0][2]["display_value"] == "1.009000" + with option_context("styler.format.decimal", "_"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][2]["display_value"] == "1_009000" + with option_context("styler.format.precision", 2): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][2]["display_value"] == "1.01" + + # test option: thousands + assert ctx["body"][0][1]["display_value"] == "2000" + with option_context("styler.format.thousands", "_"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][1]["display_value"] == "2_000" + + # test option: escape + assert ctx["body"][0][3]["display_value"] == "&<" + assert ctx["body"][1][3]["display_value"] == "&~" + with option_context("styler.format.escape", "html"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][3]["display_value"] == "&<" + with option_context("styler.format.escape", "latex"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde " + with option_context("styler.format.escape", "latex-math"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde " + + # test option: formatter + with option_context("styler.format.formatter", {"int": "{:,.2f}"}): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][1]["display_value"] == "2,000.00" + + +def test_precision_zero(df): + styler = Styler(df, precision=0) + ctx = styler._translate(True, True) + assert ctx["body"][0][2]["display_value"] == "-1" + assert ctx["body"][1][2]["display_value"] == "-1" + + +@pytest.mark.parametrize( + "formatter, exp", + [ + (lambda x: f"{x:.3f}", "9.000"), + ("{:.2f}", "9.00"), + ({0: "{:.1f}"}, "9.0"), + (None, "9"), + ], +) +def test_formatter_options_validator(formatter, exp): + df = DataFrame([[9]]) + with option_context("styler.format.formatter", formatter): + assert f" {exp} " in df.style.to_latex() + + +def test_formatter_options_raises(): + msg = "Value must be an instance of" + with pytest.raises(ValueError, match=msg): + with option_context("styler.format.formatter", ["bad", "type"]): + DataFrame().style.to_latex() + + +def test_1level_multiindex(): + # GH 43383 + midx = MultiIndex.from_product([[1, 2]], names=[""]) + df = DataFrame(-1, index=midx, columns=[0, 1]) + ctx = df.style._translate(True, True) + assert ctx["body"][0][0]["display_value"] == "1" + assert ctx["body"][0][0]["is_visible"] is True + assert ctx["body"][1][0]["display_value"] == "2" + assert ctx["body"][1][0]["is_visible"] is True + + +def test_boolean_format(): + # gh 46384: booleans do not collapse to integer representation on display + df = DataFrame([[True, False]]) + ctx = df.style._translate(True, True) + assert ctx["body"][0][1]["display_value"] is True + assert ctx["body"][0][2]["display_value"] is False + + +@pytest.mark.parametrize( + "hide, labels", + [ + (False, [1, 2]), + (True, [1, 2, 3, 4]), + ], +) +def test_relabel_raise_length(styler_multi, hide, labels): + if hide: + styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")]) + with pytest.raises(ValueError, match="``labels`` must be of length equal"): + styler_multi.relabel_index(labels=labels) + + +def test_relabel_index(styler_multi): + labels = [(1, 2), (3, 4)] + styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")]) + styler_multi.relabel_index(labels=labels) + ctx = styler_multi._translate(True, True) + assert {"value": "X", "display_value": 1}.items() <= ctx["body"][0][0].items() + assert {"value": "y", "display_value": 2}.items() <= ctx["body"][0][1].items() + assert {"value": "Y", "display_value": 3}.items() <= ctx["body"][1][0].items() + assert {"value": "x", "display_value": 4}.items() <= ctx["body"][1][1].items() + + +def test_relabel_columns(styler_multi): + labels = [(1, 2), (3, 4)] + styler_multi.hide(axis=1, subset=[("A", "a"), ("B", "b")]) + styler_multi.relabel_index(axis=1, labels=labels) + ctx = styler_multi._translate(True, True) + assert {"value": "A", "display_value": 1}.items() <= ctx["head"][0][3].items() + assert {"value": "B", "display_value": 3}.items() <= ctx["head"][0][4].items() + assert {"value": "b", "display_value": 2}.items() <= ctx["head"][1][3].items() + assert {"value": "a", "display_value": 4}.items() <= ctx["head"][1][4].items() + + +def test_relabel_roundtrip(styler): + styler.relabel_index(["{}", "{}"]) + ctx = styler._translate(True, True) + assert {"value": "x", "display_value": "x"}.items() <= ctx["body"][0][0].items() + assert {"value": "y", "display_value": "y"}.items() <= ctx["body"][1][0].items() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_highlight.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_highlight.py new file mode 100644 index 00000000..3d597190 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_highlight.py @@ -0,0 +1,218 @@ +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + IndexSlice, +) + +pytest.importorskip("jinja2") + +from pandas.io.formats.style import Styler + + +@pytest.fixture(params=[(None, "float64"), (NA, "Int64")]) +def df(request): + # GH 45804 + return DataFrame( + {"A": [0, np.nan, 10], "B": [1, request.param[0], 2]}, dtype=request.param[1] + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +def test_highlight_null(styler): + result = styler.highlight_null()._compute().ctx + expected = { + (1, 0): [("background-color", "red")], + (1, 1): [("background-color", "red")], + } + assert result == expected + + +def test_highlight_null_subset(styler): + # GH 31345 + result = ( + styler.highlight_null(color="red", subset=["A"]) + .highlight_null(color="green", subset=["B"]) + ._compute() + .ctx + ) + expected = { + (1, 0): [("background-color", "red")], + (1, 1): [("background-color", "green")], + } + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +def test_highlight_minmax_basic(df, f): + expected = { + (0, 1): [("background-color", "red")], + # ignores NaN row, + (2, 0): [("background-color", "red")], + } + if f == "highlight_min": + df = -df + result = getattr(df.style, f)(axis=1, color="red")._compute().ctx + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +@pytest.mark.parametrize( + "kwargs", + [ + {"axis": None, "color": "red"}, # test axis + {"axis": 0, "subset": ["A"], "color": "red"}, # test subset and ignores NaN + {"axis": None, "props": "background-color: red"}, # test props + ], +) +def test_highlight_minmax_ext(df, f, kwargs): + expected = {(2, 0): [("background-color", "red")]} + if f == "highlight_min": + df = -df + result = getattr(df.style, f)(**kwargs)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +@pytest.mark.parametrize("axis", [None, 0, 1]) +def test_highlight_minmax_nulls(f, axis): + # GH 42750 + expected = { + (1, 0): [("background-color", "yellow")], + (1, 1): [("background-color", "yellow")], + } + if axis == 1: + expected.update({(2, 1): [("background-color", "yellow")]}) + + if f == "highlight_max": + df = DataFrame({"a": [NA, 1, None], "b": [np.nan, 1, -1]}) + else: + df = DataFrame({"a": [NA, -1, None], "b": [np.nan, -1, 1]}) + + result = getattr(df.style, f)(axis=axis)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "kwargs", + [ + {"left": 0, "right": 1}, # test basic range + {"left": 0, "right": 1, "props": "background-color: yellow"}, # test props + {"left": -100, "right": 100, "subset": IndexSlice[[0, 1], :]}, # test subset + {"left": 0, "subset": IndexSlice[[0, 1], :]}, # test no right + {"right": 1}, # test no left + {"left": [0, 0, 11], "axis": 0}, # test left as sequence + {"left": DataFrame({"A": [0, 0, 11], "B": [1, 1, 11]}), "axis": None}, # axis + {"left": 0, "right": [0, 1], "axis": 1}, # test sequence right + ], +) +def test_highlight_between(styler, kwargs): + expected = { + (0, 0): [("background-color", "yellow")], + (0, 1): [("background-color", "yellow")], + } + result = styler.highlight_between(**kwargs)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "arg, map, axis", + [ + ("left", [1, 2], 0), # 0 axis has 3 elements not 2 + ("left", [1, 2, 3], 1), # 1 axis has 2 elements not 3 + ("left", np.array([[1, 2], [1, 2]]), None), # df is (2,3) not (2,2) + ("right", [1, 2], 0), # same tests as above for 'right' not 'left' + ("right", [1, 2, 3], 1), # .. + ("right", np.array([[1, 2], [1, 2]]), None), # .. + ], +) +def test_highlight_between_raises(arg, styler, map, axis): + msg = f"supplied '{arg}' is not correct shape" + with pytest.raises(ValueError, match=msg): + styler.highlight_between(**{arg: map, "axis": axis})._compute() + + +def test_highlight_between_raises2(styler): + msg = "values can be 'both', 'left', 'right', or 'neither'" + with pytest.raises(ValueError, match=msg): + styler.highlight_between(inclusive="badstring")._compute() + + with pytest.raises(ValueError, match=msg): + styler.highlight_between(inclusive=1)._compute() + + +@pytest.mark.parametrize( + "inclusive, expected", + [ + ( + "both", + { + (0, 0): [("background-color", "yellow")], + (0, 1): [("background-color", "yellow")], + }, + ), + ("neither", {}), + ("left", {(0, 0): [("background-color", "yellow")]}), + ("right", {(0, 1): [("background-color", "yellow")]}), + ], +) +def test_highlight_between_inclusive(styler, inclusive, expected): + kwargs = {"left": 0, "right": 1, "subset": IndexSlice[[0, 1], :]} + result = styler.highlight_between(**kwargs, inclusive=inclusive)._compute() + assert result.ctx == expected + + +@pytest.mark.parametrize( + "kwargs", + [ + {"q_left": 0.5, "q_right": 1, "axis": 0}, # base case + {"q_left": 0.5, "q_right": 1, "axis": None}, # test axis + {"q_left": 0, "q_right": 1, "subset": IndexSlice[2, :]}, # test subset + {"q_left": 0.5, "axis": 0}, # test no high + {"q_right": 1, "subset": IndexSlice[2, :], "axis": 1}, # test no low + {"q_left": 0.5, "axis": 0, "props": "background-color: yellow"}, # tst prop + ], +) +def test_highlight_quantile(styler, kwargs): + expected = { + (2, 0): [("background-color", "yellow")], + (2, 1): [("background-color", "yellow")], + } + result = styler.highlight_quantile(**kwargs)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "f,kwargs", + [ + ("highlight_min", {"axis": 1, "subset": IndexSlice[1, :]}), + ("highlight_max", {"axis": 0, "subset": [0]}), + ("highlight_quantile", {"axis": None, "q_left": 0.6, "q_right": 0.8}), + ("highlight_between", {"subset": [0]}), + ], +) +@pytest.mark.parametrize( + "df", + [ + DataFrame([[0, 10], [20, 30]], dtype=int), + DataFrame([[0, 10], [20, 30]], dtype=float), + DataFrame([[0, 10], [20, 30]], dtype="datetime64[ns]"), + DataFrame([[0, 10], [20, 30]], dtype=str), + DataFrame([[0, 10], [20, 30]], dtype="timedelta64[ns]"), + ], +) +def test_all_highlight_dtypes(f, kwargs, df): + if f == "highlight_quantile" and isinstance(df.iloc[0, 0], (str)): + return None # quantile incompatible with str + if f == "highlight_between": + kwargs["left"] = df.iloc[1, 0] # set the range low for testing + + expected = {(1, 0): [("background-color", "yellow")]} + result = getattr(df.style, f)(**kwargs)._compute().ctx + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_html.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_html.py new file mode 100644 index 00000000..1e345eb8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_html.py @@ -0,0 +1,1009 @@ +from textwrap import ( + dedent, + indent, +) + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + option_context, +) + +jinja2 = pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler + + +@pytest.fixture +def env(): + loader = jinja2.PackageLoader("pandas", "io/formats/templates") + env = jinja2.Environment(loader=loader, trim_blocks=True) + return env + + +@pytest.fixture +def styler(): + return Styler(DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"])) + + +@pytest.fixture +def styler_mi(): + midx = MultiIndex.from_product([["a", "b"], ["c", "d"]]) + return Styler(DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx)) + + +@pytest.fixture +def tpl_style(env): + return env.get_template("html_style.tpl") + + +@pytest.fixture +def tpl_table(env): + return env.get_template("html_table.tpl") + + +def test_html_template_extends_options(): + # make sure if templates are edited tests are updated as are setup fixtures + # to understand the dependency + with open("pandas/io/formats/templates/html.tpl", encoding="utf-8") as file: + result = file.read() + assert "{% include html_style_tpl %}" in result + assert "{% include html_table_tpl %}" in result + + +def test_exclude_styles(styler): + result = styler.to_html(exclude_styles=True, doctype_html=True) + expected = dedent( + """\ + + + + + + +
`` rows and ```` elements within each ``
`` + element in the table. ```` stands for "table data". This function + attempts to properly handle ``colspan`` and ``rowspan`` attributes. + If the function has a ``
`` elements into the header). + + Similar to :func:`~read_csv` the `header` argument is applied + **after** `skiprows` is applied. + + This function will *always* return a list of :class:`DataFrame` *or* + it will fail, e.g., it will *not* return an empty list. + + Examples + -------- + See the :ref:`read_html documentation in the IO section of the docs + ` for some examples of reading in HTML tables. + """ + # Type check here. We don't want to parse only to fail because of an + # invalid value of an integer skiprows. + if isinstance(skiprows, numbers.Integral) and skiprows < 0: + raise ValueError( + "cannot skip rows starting from the end of the " + "data (you passed a negative value)" + ) + if extract_links not in [None, "header", "footer", "body", "all"]: + raise ValueError( + "`extract_links` must be one of " + '{None, "header", "footer", "body", "all"}, got ' + f'"{extract_links}"' + ) + + validate_header_arg(header) + check_dtype_backend(dtype_backend) + + io = stringify_path(io) + + if isinstance(io, str) and not any( + [ + is_file_like(io), + file_exists(io), + is_url(io), + is_fsspec_url(io), + ] + ): + warnings.warn( + "Passing literal html to 'read_html' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return _parse( + flavor=flavor, + io=io, + match=match, + header=header, + index_col=index_col, + skiprows=skiprows, + parse_dates=parse_dates, + thousands=thousands, + attrs=attrs, + encoding=encoding, + decimal=decimal, + converters=converters, + na_values=na_values, + keep_default_na=keep_default_na, + displayed_only=displayed_only, + extract_links=extract_links, + dtype_backend=dtype_backend, + storage_options=storage_options, + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/__init__.py new file mode 100644 index 00000000..ff19cf6e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/__init__.py @@ -0,0 +1,15 @@ +from pandas.io.json._json import ( + read_json, + to_json, + ujson_dumps as dumps, + ujson_loads as loads, +) +from pandas.io.json._table_schema import build_table_schema + +__all__ = [ + "dumps", + "loads", + "read_json", + "to_json", + "build_table_schema", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_json.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_json.py new file mode 100644 index 00000000..58979a29 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_json.py @@ -0,0 +1,1465 @@ +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from collections import abc +from io import StringIO +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Generic, + Literal, + TypeVar, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.json import ( + ujson_dumps, + ujson_loads, +) +from pandas._libs.tslibs import iNaT +from pandas.compat._optional import import_optional_dependency +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ensure_str +from pandas.core.dtypes.dtypes import PeriodDtype +from pandas.core.dtypes.generic import ABCIndex + +from pandas import ( + ArrowDtype, + DataFrame, + MultiIndex, + Series, + isna, + notna, + to_datetime, +) +from pandas.core.reshape.concat import concat +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + IOHandles, + dedup_names, + extension_to_compression, + file_exists, + get_handle, + is_fsspec_url, + is_potential_multi_index, + is_url, + stringify_path, +) +from pandas.io.json._normalize import convert_to_line_delimits +from pandas.io.json._table_schema import ( + build_table_schema, + parse_table_schema, +) +from pandas.io.parsers.readers import validate_integer + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + ) + from types import TracebackType + + from pandas._typing import ( + CompressionOptions, + DtypeArg, + DtypeBackend, + FilePath, + IndexLabel, + JSONEngine, + JSONSerializable, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + from pandas.core.generic import NDFrame + +FrameSeriesStrT = TypeVar("FrameSeriesStrT", bound=Literal["frame", "series"]) + + +# interface to/from +@overload +def to_json( + path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes], + obj: NDFrame, + orient: str | None = ..., + date_format: str = ..., + double_precision: int = ..., + force_ascii: bool = ..., + date_unit: str = ..., + default_handler: Callable[[Any], JSONSerializable] | None = ..., + lines: bool = ..., + compression: CompressionOptions = ..., + index: bool | None = ..., + indent: int = ..., + storage_options: StorageOptions = ..., + mode: Literal["a", "w"] = ..., +) -> None: + ... + + +@overload +def to_json( + path_or_buf: None, + obj: NDFrame, + orient: str | None = ..., + date_format: str = ..., + double_precision: int = ..., + force_ascii: bool = ..., + date_unit: str = ..., + default_handler: Callable[[Any], JSONSerializable] | None = ..., + lines: bool = ..., + compression: CompressionOptions = ..., + index: bool | None = ..., + indent: int = ..., + storage_options: StorageOptions = ..., + mode: Literal["a", "w"] = ..., +) -> str: + ... + + +def to_json( + path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] | None, + obj: NDFrame, + orient: str | None = None, + date_format: str = "epoch", + double_precision: int = 10, + force_ascii: bool = True, + date_unit: str = "ms", + default_handler: Callable[[Any], JSONSerializable] | None = None, + lines: bool = False, + compression: CompressionOptions = "infer", + index: bool | None = None, + indent: int = 0, + storage_options: StorageOptions | None = None, + mode: Literal["a", "w"] = "w", +) -> str | None: + if orient in ["records", "values"] and index is True: + raise ValueError( + "'index=True' is only valid when 'orient' is 'split', 'table', " + "'index', or 'columns'." + ) + elif orient in ["index", "columns"] and index is False: + raise ValueError( + "'index=False' is only valid when 'orient' is 'split', 'table', " + "'records', or 'values'." + ) + elif index is None: + # will be ignored for orient='records' and 'values' + index = True + + if lines and orient != "records": + raise ValueError("'lines' keyword only valid when 'orient' is records") + + if mode not in ["a", "w"]: + msg = ( + f"mode={mode} is not a valid option." + "Only 'w' and 'a' are currently supported." + ) + raise ValueError(msg) + + if mode == "a" and (not lines or orient != "records"): + msg = ( + "mode='a' (append) is only supported when" + "lines is True and orient is 'records'" + ) + raise ValueError(msg) + + if orient == "table" and isinstance(obj, Series): + obj = obj.to_frame(name=obj.name or "values") + + writer: type[Writer] + if orient == "table" and isinstance(obj, DataFrame): + writer = JSONTableWriter + elif isinstance(obj, Series): + writer = SeriesWriter + elif isinstance(obj, DataFrame): + writer = FrameWriter + else: + raise NotImplementedError("'obj' should be a Series or a DataFrame") + + s = writer( + obj, + orient=orient, + date_format=date_format, + double_precision=double_precision, + ensure_ascii=force_ascii, + date_unit=date_unit, + default_handler=default_handler, + index=index, + indent=indent, + ).write() + + if lines: + s = convert_to_line_delimits(s) + + if path_or_buf is not None: + # apply compression and byte/text conversion + with get_handle( + path_or_buf, mode, compression=compression, storage_options=storage_options + ) as handles: + handles.handle.write(s) + else: + return s + return None + + +class Writer(ABC): + _default_orient: str + + def __init__( + self, + obj: NDFrame, + orient: str | None, + date_format: str, + double_precision: int, + ensure_ascii: bool, + date_unit: str, + index: bool, + default_handler: Callable[[Any], JSONSerializable] | None = None, + indent: int = 0, + ) -> None: + self.obj = obj + + if orient is None: + orient = self._default_orient + + self.orient = orient + self.date_format = date_format + self.double_precision = double_precision + self.ensure_ascii = ensure_ascii + self.date_unit = date_unit + self.default_handler = default_handler + self.index = index + self.indent = indent + + self.is_copy = None + self._format_axes() + + def _format_axes(self): + raise AbstractMethodError(self) + + def write(self) -> str: + iso_dates = self.date_format == "iso" + return ujson_dumps( + self.obj_to_write, + orient=self.orient, + double_precision=self.double_precision, + ensure_ascii=self.ensure_ascii, + date_unit=self.date_unit, + iso_dates=iso_dates, + default_handler=self.default_handler, + indent=self.indent, + ) + + @property + @abstractmethod + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + """Object to write in JSON format.""" + + +class SeriesWriter(Writer): + _default_orient = "index" + + @property + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + if not self.index and self.orient == "split": + return {"name": self.obj.name, "data": self.obj.values} + else: + return self.obj + + def _format_axes(self): + if not self.obj.index.is_unique and self.orient == "index": + raise ValueError(f"Series index must be unique for orient='{self.orient}'") + + +class FrameWriter(Writer): + _default_orient = "columns" + + @property + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + if not self.index and self.orient == "split": + obj_to_write = self.obj.to_dict(orient="split") + del obj_to_write["index"] + else: + obj_to_write = self.obj + return obj_to_write + + def _format_axes(self): + """ + Try to format axes if they are datelike. + """ + if not self.obj.index.is_unique and self.orient in ("index", "columns"): + raise ValueError( + f"DataFrame index must be unique for orient='{self.orient}'." + ) + if not self.obj.columns.is_unique and self.orient in ( + "index", + "columns", + "records", + ): + raise ValueError( + f"DataFrame columns must be unique for orient='{self.orient}'." + ) + + +class JSONTableWriter(FrameWriter): + _default_orient = "records" + + def __init__( + self, + obj, + orient: str | None, + date_format: str, + double_precision: int, + ensure_ascii: bool, + date_unit: str, + index: bool, + default_handler: Callable[[Any], JSONSerializable] | None = None, + indent: int = 0, + ) -> None: + """ + Adds a `schema` attribute with the Table Schema, resets + the index (can't do in caller, because the schema inference needs + to know what the index is, forces orient to records, and forces + date_format to 'iso'. + """ + super().__init__( + obj, + orient, + date_format, + double_precision, + ensure_ascii, + date_unit, + index, + default_handler=default_handler, + indent=indent, + ) + + if date_format != "iso": + msg = ( + "Trying to write with `orient='table'` and " + f"`date_format='{date_format}'`. Table Schema requires dates " + "to be formatted with `date_format='iso'`" + ) + raise ValueError(msg) + + self.schema = build_table_schema(obj, index=self.index) + + # NotImplemented on a column MultiIndex + if obj.ndim == 2 and isinstance(obj.columns, MultiIndex): + raise NotImplementedError( + "orient='table' is not supported for MultiIndex columns" + ) + + # TODO: Do this timedelta properly in objToJSON.c See GH #15137 + if ( + (obj.ndim == 1) + and (obj.name in set(obj.index.names)) + or len(obj.columns.intersection(obj.index.names)) + ): + msg = "Overlapping names between the index and columns" + raise ValueError(msg) + + obj = obj.copy() + timedeltas = obj.select_dtypes(include=["timedelta"]).columns + if len(timedeltas): + obj[timedeltas] = obj[timedeltas].map(lambda x: x.isoformat()) + # Convert PeriodIndex to datetimes before serializing + if isinstance(obj.index.dtype, PeriodDtype): + obj.index = obj.index.to_timestamp() + + # exclude index from obj if index=False + if not self.index: + self.obj = obj.reset_index(drop=True) + else: + self.obj = obj.reset_index(drop=False) + self.date_format = "iso" + self.orient = "records" + self.index = index + + @property + def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: + return {"schema": self.schema, "data": self.obj} + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["frame"] = ..., + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> JsonReader[Literal["frame"]]: + ... + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["series"], + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> JsonReader[Literal["series"]]: + ... + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["series"], + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> Series: + ... + + +@overload +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = ..., + typ: Literal["frame"] = ..., + dtype: DtypeArg | None = ..., + convert_axes: bool | None = ..., + convert_dates: bool | list[str] = ..., + keep_default_dates: bool = ..., + precise_float: bool = ..., + date_unit: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + lines: bool = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + nrows: int | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + engine: JSONEngine = ..., +) -> DataFrame: + ... + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "path_or_buf", +) +def read_json( + path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + *, + orient: str | None = None, + typ: Literal["frame", "series"] = "frame", + dtype: DtypeArg | None = None, + convert_axes: bool | None = None, + convert_dates: bool | list[str] = True, + keep_default_dates: bool = True, + precise_float: bool = False, + date_unit: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + lines: bool = False, + chunksize: int | None = None, + compression: CompressionOptions = "infer", + nrows: int | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + engine: JSONEngine = "ujson", +) -> DataFrame | Series | JsonReader: + """ + Convert a JSON string to pandas object. + + Parameters + ---------- + path_or_buf : a valid JSON str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.json``. + + If you want to pass in a path object, pandas accepts any + ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, + such as a file handle (e.g. via builtin ``open`` function) + or ``StringIO``. + + .. deprecated:: 2.1.0 + Passing json literal strings is deprecated. + + orient : str, optional + Indication of expected JSON string format. + Compatible JSON strings can be produced by ``to_json()`` with a + corresponding orient value. + The set of possible orients is: + + - ``'split'`` : dict like + ``{{index -> [index], columns -> [columns], data -> [values]}}`` + - ``'records'`` : list like + ``[{{column -> value}}, ... , {{column -> value}}]`` + - ``'index'`` : dict like ``{{index -> {{column -> value}}}}`` + - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}`` + - ``'values'`` : just the values array + - ``'table'`` : dict like ``{{'schema': {{schema}}, 'data': {{data}}}}`` + + The allowed and default values depend on the value + of the `typ` parameter. + + * when ``typ == 'series'``, + + - allowed orients are ``{{'split','records','index'}}`` + - default is ``'index'`` + - The Series index must be unique for orient ``'index'``. + + * when ``typ == 'frame'``, + + - allowed orients are ``{{'split','records','index', + 'columns','values', 'table'}}`` + - default is ``'columns'`` + - The DataFrame index must be unique for orients ``'index'`` and + ``'columns'``. + - The DataFrame columns must be unique for orients ``'index'``, + ``'columns'``, and ``'records'``. + + typ : {{'frame', 'series'}}, default 'frame' + The type of object to recover. + + dtype : bool or dict, default None + If True, infer dtypes; if a dict of column to dtype, then use those; + if False, then don't infer dtypes at all, applies only to the data. + + For all ``orient`` values except ``'table'``, default is True. + + convert_axes : bool, default None + Try to convert the axes to the proper dtypes. + + For all ``orient`` values except ``'table'``, default is True. + + convert_dates : bool or list of str, default True + If True then default datelike columns may be converted (depending on + keep_default_dates). + If False, no dates will be converted. + If a list of column names, then those columns will be converted and + default datelike columns may also be converted (depending on + keep_default_dates). + + keep_default_dates : bool, default True + If parsing dates (convert_dates is not False), then try to parse the + default datelike columns. + A column label is datelike if + + * it ends with ``'_at'``, + + * it ends with ``'_time'``, + + * it begins with ``'timestamp'``, + + * it is ``'modified'``, or + + * it is ``'date'``. + + precise_float : bool, default False + Set to enable usage of higher precision (strtod) function when + decoding string to double values. Default (False) is to use fast but + less precise builtin functionality. + + date_unit : str, default None + The timestamp unit to detect if converting dates. The default behaviour + is to try and detect the correct precision, but if this is not desired + then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, + milliseconds, microseconds or nanoseconds respectively. + + encoding : str, default is 'utf-8' + The encoding to use to decode py3 bytes. + + encoding_errors : str, optional, default "strict" + How encoding errors are treated. `List of possible values + `_ . + + .. versionadded:: 1.3.0 + + lines : bool, default False + Read the file as a json object per line. + + chunksize : int, optional + Return JsonReader object for iteration. + See the `line-delimited json docs + `_ + for more information on ``chunksize``. + This can only be passed if `lines=True`. + If this is None, the file will be read into memory all at once. + + .. versionchanged:: 1.2 + + ``JsonReader`` is a context manager. + + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + nrows : int, optional + The number of lines from the line-delimited jsonfile that has to be read. + This can only be passed if `lines=True`. + If this is None, all the rows will be returned. + + {storage_options} + + .. versionadded:: 1.2.0 + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + engine : {{"ujson", "pyarrow"}}, default "ujson" + Parser engine to use. The ``"pyarrow"`` engine is only available when + ``lines=True``. + + .. versionadded:: 2.0 + + Returns + ------- + Series, DataFrame, or pandas.api.typing.JsonReader + A JsonReader is returned when ``chunksize`` is not ``0`` or ``None``. + Otherwise, the type returned depends on the value of ``typ``. + + See Also + -------- + DataFrame.to_json : Convert a DataFrame to a JSON string. + Series.to_json : Convert a Series to a JSON string. + json_normalize : Normalize semi-structured JSON data into a flat table. + + Notes + ----- + Specific to ``orient='table'``, if a :class:`DataFrame` with a literal + :class:`Index` name of `index` gets written with :func:`to_json`, the + subsequent read operation will incorrectly set the :class:`Index` name to + ``None``. This is because `index` is also used by :func:`DataFrame.to_json` + to denote a missing :class:`Index` name, and the subsequent + :func:`read_json` operation cannot distinguish between the two. The same + limitation is encountered with a :class:`MultiIndex` and any names + beginning with ``'level_'``. + + Examples + -------- + >>> from io import StringIO + >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], + ... index=['row 1', 'row 2'], + ... columns=['col 1', 'col 2']) + + Encoding/decoding a Dataframe using ``'split'`` formatted JSON: + + >>> df.to_json(orient='split') + '\ +{{\ +"columns":["col 1","col 2"],\ +"index":["row 1","row 2"],\ +"data":[["a","b"],["c","d"]]\ +}}\ +' + >>> pd.read_json(StringIO(_), orient='split') + col 1 col 2 + row 1 a b + row 2 c d + + Encoding/decoding a Dataframe using ``'index'`` formatted JSON: + + >>> df.to_json(orient='index') + '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}' + + >>> pd.read_json(StringIO(_), orient='index') + col 1 col 2 + row 1 a b + row 2 c d + + Encoding/decoding a Dataframe using ``'records'`` formatted JSON. + Note that index labels are not preserved with this encoding. + + >>> df.to_json(orient='records') + '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]' + >>> pd.read_json(StringIO(_), orient='records') + col 1 col 2 + 0 a b + 1 c d + + Encoding with Table Schema + + >>> df.to_json(orient='table') + '\ +{{"schema":{{"fields":[\ +{{"name":"index","type":"string"}},\ +{{"name":"col 1","type":"string"}},\ +{{"name":"col 2","type":"string"}}],\ +"primaryKey":["index"],\ +"pandas_version":"1.4.0"}},\ +"data":[\ +{{"index":"row 1","col 1":"a","col 2":"b"}},\ +{{"index":"row 2","col 1":"c","col 2":"d"}}]\ +}}\ +' + """ + if orient == "table" and dtype: + raise ValueError("cannot pass both dtype and orient='table'") + if orient == "table" and convert_axes: + raise ValueError("cannot pass both convert_axes and orient='table'") + + check_dtype_backend(dtype_backend) + + if dtype is None and orient != "table": + # error: Incompatible types in assignment (expression has type "bool", variable + # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable, + # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object]]], None]") + dtype = True # type: ignore[assignment] + if convert_axes is None and orient != "table": + convert_axes = True + + json_reader = JsonReader( + path_or_buf, + orient=orient, + typ=typ, + dtype=dtype, + convert_axes=convert_axes, + convert_dates=convert_dates, + keep_default_dates=keep_default_dates, + precise_float=precise_float, + date_unit=date_unit, + encoding=encoding, + lines=lines, + chunksize=chunksize, + compression=compression, + nrows=nrows, + storage_options=storage_options, + encoding_errors=encoding_errors, + dtype_backend=dtype_backend, + engine=engine, + ) + + if chunksize: + return json_reader + else: + return json_reader.read() + + +class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]): + """ + JsonReader provides an interface for reading in a JSON file. + + If initialized with ``lines=True`` and ``chunksize``, can be iterated over + ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the + whole document. + """ + + def __init__( + self, + filepath_or_buffer, + orient, + typ: FrameSeriesStrT, + dtype, + convert_axes: bool | None, + convert_dates, + keep_default_dates: bool, + precise_float: bool, + date_unit, + encoding, + lines: bool, + chunksize: int | None, + compression: CompressionOptions, + nrows: int | None, + storage_options: StorageOptions | None = None, + encoding_errors: str | None = "strict", + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + engine: JSONEngine = "ujson", + ) -> None: + self.orient = orient + self.typ = typ + self.dtype = dtype + self.convert_axes = convert_axes + self.convert_dates = convert_dates + self.keep_default_dates = keep_default_dates + self.precise_float = precise_float + self.date_unit = date_unit + self.encoding = encoding + self.engine = engine + self.compression = compression + self.storage_options = storage_options + self.lines = lines + self.chunksize = chunksize + self.nrows_seen = 0 + self.nrows = nrows + self.encoding_errors = encoding_errors + self.handles: IOHandles[str] | None = None + self.dtype_backend = dtype_backend + + if self.engine not in {"pyarrow", "ujson"}: + raise ValueError( + f"The engine type {self.engine} is currently not supported." + ) + if self.chunksize is not None: + self.chunksize = validate_integer("chunksize", self.chunksize, 1) + if not self.lines: + raise ValueError("chunksize can only be passed if lines=True") + if self.engine == "pyarrow": + raise ValueError( + "currently pyarrow engine doesn't support chunksize parameter" + ) + if self.nrows is not None: + self.nrows = validate_integer("nrows", self.nrows, 0) + if not self.lines: + raise ValueError("nrows can only be passed if lines=True") + if ( + isinstance(filepath_or_buffer, str) + and not self.lines + and "\n" in filepath_or_buffer + ): + warnings.warn( + "Passing literal json to 'read_json' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self.engine == "pyarrow": + if not self.lines: + raise ValueError( + "currently pyarrow engine only supports " + "the line-delimited JSON format" + ) + self.data = filepath_or_buffer + elif self.engine == "ujson": + data = self._get_data_from_filepath(filepath_or_buffer) + self.data = self._preprocess_data(data) + + def _preprocess_data(self, data): + """ + At this point, the data either has a `read` attribute (e.g. a file + object or a StringIO) or is a string that is a JSON document. + + If self.chunksize, we prepare the data for the `__next__` method. + Otherwise, we read it into memory for the `read` method. + """ + if hasattr(data, "read") and not (self.chunksize or self.nrows): + with self: + data = data.read() + if not hasattr(data, "read") and (self.chunksize or self.nrows): + data = StringIO(data) + + return data + + def _get_data_from_filepath(self, filepath_or_buffer): + """ + The function read_json accepts three input types: + 1. filepath (string-like) + 2. file-like object (e.g. open file object, StringIO) + 3. JSON string + + This method turns (1) into (2) to simplify the rest of the processing. + It returns input types (2) and (3) unchanged. + + It raises FileNotFoundError if the input is a string ending in + one of .json, .json.gz, .json.bz2, etc. but no such file exists. + """ + # if it is a string but the file does not exist, it might be a JSON string + filepath_or_buffer = stringify_path(filepath_or_buffer) + if ( + not isinstance(filepath_or_buffer, str) + or is_url(filepath_or_buffer) + or is_fsspec_url(filepath_or_buffer) + or file_exists(filepath_or_buffer) + ): + self.handles = get_handle( + filepath_or_buffer, + "r", + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + errors=self.encoding_errors, + ) + filepath_or_buffer = self.handles.handle + elif ( + isinstance(filepath_or_buffer, str) + and filepath_or_buffer.lower().endswith( + (".json",) + tuple(f".json{c}" for c in extension_to_compression) + ) + and not file_exists(filepath_or_buffer) + ): + raise FileNotFoundError(f"File {filepath_or_buffer} does not exist") + else: + warnings.warn( + "Passing literal json to 'read_json' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return filepath_or_buffer + + def _combine_lines(self, lines) -> str: + """ + Combines a list of JSON objects into one JSON object. + """ + return ( + f'[{",".join([line for line in (line.strip() for line in lines) if line])}]' + ) + + @overload + def read(self: JsonReader[Literal["frame"]]) -> DataFrame: + ... + + @overload + def read(self: JsonReader[Literal["series"]]) -> Series: + ... + + @overload + def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: + ... + + def read(self) -> DataFrame | Series: + """ + Read the whole JSON input into a pandas object. + """ + obj: DataFrame | Series + with self: + if self.engine == "pyarrow": + pyarrow_json = import_optional_dependency("pyarrow.json") + pa_table = pyarrow_json.read_json(self.data) + + mapping: type[ArrowDtype] | None | Callable + if self.dtype_backend == "pyarrow": + mapping = ArrowDtype + elif self.dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping().get + else: + mapping = None + + return pa_table.to_pandas(types_mapper=mapping) + elif self.engine == "ujson": + if self.lines: + if self.chunksize: + obj = concat(self) + elif self.nrows: + lines = list(islice(self.data, self.nrows)) + lines_json = self._combine_lines(lines) + obj = self._get_object_parser(lines_json) + else: + data = ensure_str(self.data) + data_lines = data.split("\n") + obj = self._get_object_parser(self._combine_lines(data_lines)) + else: + obj = self._get_object_parser(self.data) + if self.dtype_backend is not lib.no_default: + return obj.convert_dtypes( + infer_objects=False, dtype_backend=self.dtype_backend + ) + else: + return obj + + def _get_object_parser(self, json) -> DataFrame | Series: + """ + Parses a json document into a pandas object. + """ + typ = self.typ + dtype = self.dtype + kwargs = { + "orient": self.orient, + "dtype": self.dtype, + "convert_axes": self.convert_axes, + "convert_dates": self.convert_dates, + "keep_default_dates": self.keep_default_dates, + "precise_float": self.precise_float, + "date_unit": self.date_unit, + "dtype_backend": self.dtype_backend, + } + obj = None + if typ == "frame": + obj = FrameParser(json, **kwargs).parse() + + if typ == "series" or obj is None: + if not isinstance(dtype, bool): + kwargs["dtype"] = dtype + obj = SeriesParser(json, **kwargs).parse() + + return obj + + def close(self) -> None: + """ + If we opened a stream earlier, in _get_data_from_filepath, we should + close it. + + If an open stream or file was passed, we leave it open. + """ + if self.handles is not None: + self.handles.close() + + def __iter__(self: JsonReader[FrameSeriesStrT]) -> JsonReader[FrameSeriesStrT]: + return self + + @overload + def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: + ... + + @overload + def __next__(self: JsonReader[Literal["series"]]) -> Series: + ... + + @overload + def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: + ... + + def __next__(self) -> DataFrame | Series: + if self.nrows and self.nrows_seen >= self.nrows: + self.close() + raise StopIteration + + lines = list(islice(self.data, self.chunksize)) + if not lines: + self.close() + raise StopIteration + + try: + lines_json = self._combine_lines(lines) + obj = self._get_object_parser(lines_json) + + # Make sure that the returned objects have the right index. + obj.index = range(self.nrows_seen, self.nrows_seen + len(obj)) + self.nrows_seen += len(obj) + except Exception as ex: + self.close() + raise ex + + if self.dtype_backend is not lib.no_default: + return obj.convert_dtypes( + infer_objects=False, dtype_backend=self.dtype_backend + ) + else: + return obj + + def __enter__(self) -> JsonReader[FrameSeriesStrT]: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +class Parser: + _split_keys: tuple[str, ...] + _default_orient: str + + _STAMP_UNITS = ("s", "ms", "us", "ns") + _MIN_STAMPS = { + "s": 31536000, + "ms": 31536000000, + "us": 31536000000000, + "ns": 31536000000000000, + } + + def __init__( + self, + json, + orient, + dtype: DtypeArg | None = None, + convert_axes: bool = True, + convert_dates: bool | list[str] = True, + keep_default_dates: bool = False, + precise_float: bool = False, + date_unit=None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + ) -> None: + self.json = json + + if orient is None: + orient = self._default_orient + + self.orient = orient + + self.dtype = dtype + + if date_unit is not None: + date_unit = date_unit.lower() + if date_unit not in self._STAMP_UNITS: + raise ValueError(f"date_unit must be one of {self._STAMP_UNITS}") + self.min_stamp = self._MIN_STAMPS[date_unit] + else: + self.min_stamp = self._MIN_STAMPS["s"] + + self.precise_float = precise_float + self.convert_axes = convert_axes + self.convert_dates = convert_dates + self.date_unit = date_unit + self.keep_default_dates = keep_default_dates + self.obj: DataFrame | Series | None = None + self.dtype_backend = dtype_backend + + def check_keys_split(self, decoded) -> None: + """ + Checks that dict has only the appropriate keys for orient='split'. + """ + bad_keys = set(decoded.keys()).difference(set(self._split_keys)) + if bad_keys: + bad_keys_joined = ", ".join(bad_keys) + raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}") + + def parse(self): + self._parse() + + if self.obj is None: + return None + if self.convert_axes: + self._convert_axes() + self._try_convert_types() + return self.obj + + def _parse(self): + raise AbstractMethodError(self) + + def _convert_axes(self) -> None: + """ + Try to convert axes. + """ + obj = self.obj + assert obj is not None # for mypy + for axis_name in obj._AXIS_ORDERS: + new_axis, result = self._try_convert_data( + name=axis_name, + data=obj._get_axis(axis_name), + use_dtypes=False, + convert_dates=True, + ) + if result: + setattr(self.obj, axis_name, new_axis) + + def _try_convert_types(self): + raise AbstractMethodError(self) + + def _try_convert_data( + self, + name: Hashable, + data, + use_dtypes: bool = True, + convert_dates: bool | list[str] = True, + ): + """ + Try to parse a ndarray like into a column by inferring dtype. + """ + # don't try to coerce, unless a force conversion + if use_dtypes: + if not self.dtype: + if all(notna(data)): + return data, False + return data.fillna(np.nan), True + + elif self.dtype is True: + pass + else: + # dtype to force + dtype = ( + self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype + ) + if dtype is not None: + try: + return data.astype(dtype), True + except (TypeError, ValueError): + return data, False + + if convert_dates: + new_data, result = self._try_convert_to_date(data) + if result: + return new_data, True + + if self.dtype_backend is not lib.no_default and not isinstance(data, ABCIndex): + # Fall through for conversion later on + return data, True + elif data.dtype == "object": + # try float + try: + data = data.astype("float64") + except (TypeError, ValueError): + pass + + if data.dtype.kind == "f": + if data.dtype != "float64": + # coerce floats to 64 + try: + data = data.astype("float64") + except (TypeError, ValueError): + pass + + # don't coerce 0-len data + if len(data) and data.dtype in ("float", "object"): + # coerce ints if we can + try: + new_data = data.astype("int64") + if (new_data == data).all(): + data = new_data + except (TypeError, ValueError, OverflowError): + pass + + # coerce ints to 64 + if data.dtype == "int": + # coerce floats to 64 + try: + data = data.astype("int64") + except (TypeError, ValueError): + pass + + # if we have an index, we want to preserve dtypes + if name == "index" and len(data): + if self.orient == "split": + return data, False + + return data, True + + def _try_convert_to_date(self, data): + """ + Try to parse a ndarray like into a date column. + + Try to coerce object in epoch/iso formats and integer/float in epoch + formats. Return a boolean if parsing was successful. + """ + # no conversion on empty + if not len(data): + return data, False + + new_data = data + if new_data.dtype == "object": + try: + new_data = data.astype("int64") + except OverflowError: + return data, False + except (TypeError, ValueError): + pass + + # ignore numbers that are out of range + if issubclass(new_data.dtype.type, np.number): + in_range = ( + isna(new_data._values) + | (new_data > self.min_stamp) + | (new_data._values == iNaT) + ) + if not in_range.all(): + return data, False + + date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS + for date_unit in date_units: + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time " + "zones will raise an error", + category=FutureWarning, + ) + new_data = to_datetime(new_data, errors="raise", unit=date_unit) + except (ValueError, OverflowError, TypeError): + continue + return new_data, True + return data, False + + def _try_convert_dates(self): + raise AbstractMethodError(self) + + +class SeriesParser(Parser): + _default_orient = "index" + _split_keys = ("name", "index", "data") + + def _parse(self) -> None: + data = ujson_loads(self.json, precise_float=self.precise_float) + + if self.orient == "split": + decoded = {str(k): v for k, v in data.items()} + self.check_keys_split(decoded) + self.obj = Series(**decoded) + else: + self.obj = Series(data) + + def _try_convert_types(self) -> None: + if self.obj is None: + return + obj, result = self._try_convert_data( + "data", self.obj, convert_dates=self.convert_dates + ) + if result: + self.obj = obj + + +class FrameParser(Parser): + _default_orient = "columns" + _split_keys = ("columns", "index", "data") + + def _parse(self) -> None: + json = self.json + orient = self.orient + + if orient == "columns": + self.obj = DataFrame( + ujson_loads(json, precise_float=self.precise_float), dtype=None + ) + elif orient == "split": + decoded = { + str(k): v + for k, v in ujson_loads(json, precise_float=self.precise_float).items() + } + self.check_keys_split(decoded) + orig_names = [ + (tuple(col) if isinstance(col, list) else col) + for col in decoded["columns"] + ] + decoded["columns"] = dedup_names( + orig_names, + is_potential_multi_index(orig_names, None), + ) + self.obj = DataFrame(dtype=None, **decoded) + elif orient == "index": + self.obj = DataFrame.from_dict( + ujson_loads(json, precise_float=self.precise_float), + dtype=None, + orient="index", + ) + elif orient == "table": + self.obj = parse_table_schema(json, precise_float=self.precise_float) + else: + self.obj = DataFrame( + ujson_loads(json, precise_float=self.precise_float), dtype=None + ) + + def _process_converter(self, f, filt=None) -> None: + """ + Take a conversion function and possibly recreate the frame. + """ + if filt is None: + filt = lambda col, c: True + + obj = self.obj + assert obj is not None # for mypy + + needs_new_obj = False + new_obj = {} + for i, (col, c) in enumerate(obj.items()): + if filt(col, c): + new_data, result = f(col, c) + if result: + c = new_data + needs_new_obj = True + new_obj[i] = c + + if needs_new_obj: + # possibly handle dup columns + new_frame = DataFrame(new_obj, index=obj.index) + new_frame.columns = obj.columns + self.obj = new_frame + + def _try_convert_types(self) -> None: + if self.obj is None: + return + if self.convert_dates: + self._try_convert_dates() + + self._process_converter( + lambda col, c: self._try_convert_data(col, c, convert_dates=False) + ) + + def _try_convert_dates(self) -> None: + if self.obj is None: + return + + # our columns to parse + convert_dates_list_bool = self.convert_dates + if isinstance(convert_dates_list_bool, bool): + convert_dates_list_bool = [] + convert_dates = set(convert_dates_list_bool) + + def is_ok(col) -> bool: + """ + Return if this col is ok to try for a date parse. + """ + if not isinstance(col, str): + return False + + col_lower = col.lower() + if ( + col_lower.endswith(("_at", "_time")) + or col_lower == "modified" + or col_lower == "date" + or col_lower == "datetime" + or col_lower.startswith("timestamp") + ): + return True + return False + + self._process_converter( + lambda col, c: self._try_convert_to_date(c), + lambda col, c: ( + (self.keep_default_dates and is_ok(col)) or col in convert_dates + ), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_normalize.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_normalize.py new file mode 100644 index 00000000..b1e2210f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_normalize.py @@ -0,0 +1,544 @@ +# --------------------------------------------------------------------- +# JSON normalization routines +from __future__ import annotations + +from collections import ( + abc, + defaultdict, +) +import copy +from typing import ( + TYPE_CHECKING, + Any, + DefaultDict, +) + +import numpy as np + +from pandas._libs.writers import convert_json_to_lines + +import pandas as pd +from pandas import DataFrame + +if TYPE_CHECKING: + from collections.abc import Iterable + + from pandas._typing import ( + IgnoreRaise, + Scalar, + ) + + +def convert_to_line_delimits(s: str) -> str: + """ + Helper function that converts JSON lists to line delimited JSON. + """ + # Determine we have a JSON list to turn to lines otherwise just return the + # json object, only lists can + if not s[0] == "[" and s[-1] == "]": + return s + s = s[1:-1] + + return convert_json_to_lines(s) + + +def nested_to_record( + ds, + prefix: str = "", + sep: str = ".", + level: int = 0, + max_level: int | None = None, +): + """ + A simplified json_normalize + + Converts a nested dict into a flat dict ("record"), unlike json_normalize, + it does not attempt to extract a subset of the data. + + Parameters + ---------- + ds : dict or list of dicts + prefix: the prefix, optional, default: "" + sep : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + level: int, optional, default: 0 + The number of levels in the json string. + + max_level: int, optional, default: None + The max depth to normalize. + + Returns + ------- + d - dict or list of dicts, matching `ds` + + Examples + -------- + >>> nested_to_record( + ... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2)) + ... ) + {\ +'flat1': 1, \ +'dict1.c': 1, \ +'dict1.d': 2, \ +'nested.e.c': 1, \ +'nested.e.d': 2, \ +'nested.d': 2\ +} + """ + singleton = False + if isinstance(ds, dict): + ds = [ds] + singleton = True + new_ds = [] + for d in ds: + new_d = copy.deepcopy(d) + for k, v in d.items(): + # each key gets renamed with prefix + if not isinstance(k, str): + k = str(k) + if level == 0: + newkey = k + else: + newkey = prefix + sep + k + + # flatten if type is dict and + # current dict level < maximum level provided and + # only dicts gets recurse-flattened + # only at level>1 do we rename the rest of the keys + if not isinstance(v, dict) or ( + max_level is not None and level >= max_level + ): + if level != 0: # so we skip copying for top level, common case + v = new_d.pop(k) + new_d[newkey] = v + continue + + v = new_d.pop(k) + new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level)) + new_ds.append(new_d) + + if singleton: + return new_ds[0] + return new_ds + + +def _normalise_json( + data: Any, + key_string: str, + normalized_dict: dict[str, Any], + separator: str, +) -> dict[str, Any]: + """ + Main recursive function + Designed for the most basic use case of pd.json_normalize(data) + intended as a performance improvement, see #15621 + + Parameters + ---------- + data : Any + Type dependent on types contained within nested Json + key_string : str + New key (with separator(s) in) for data + normalized_dict : dict + The new normalized/flattened Json dict + separator : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + """ + if isinstance(data, dict): + for key, value in data.items(): + new_key = f"{key_string}{separator}{key}" + + if not key_string: + new_key = new_key.removeprefix(separator) + + _normalise_json( + data=value, + key_string=new_key, + normalized_dict=normalized_dict, + separator=separator, + ) + else: + normalized_dict[key_string] = data + return normalized_dict + + +def _normalise_json_ordered(data: dict[str, Any], separator: str) -> dict[str, Any]: + """ + Order the top level keys and then recursively go to depth + + Parameters + ---------- + data : dict or list of dicts + separator : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + + Returns + ------- + dict or list of dicts, matching `normalised_json_object` + """ + top_dict_ = {k: v for k, v in data.items() if not isinstance(v, dict)} + nested_dict_ = _normalise_json( + data={k: v for k, v in data.items() if isinstance(v, dict)}, + key_string="", + normalized_dict={}, + separator=separator, + ) + return {**top_dict_, **nested_dict_} + + +def _simple_json_normalize( + ds: dict | list[dict], + sep: str = ".", +) -> dict | list[dict] | Any: + """ + A optimized basic json_normalize + + Converts a nested dict into a flat dict ("record"), unlike + json_normalize and nested_to_record it doesn't do anything clever. + But for the most basic use cases it enhances performance. + E.g. pd.json_normalize(data) + + Parameters + ---------- + ds : dict or list of dicts + sep : str, default '.' + Nested records will generate names separated by sep, + e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar + + Returns + ------- + frame : DataFrame + d - dict or list of dicts, matching `normalised_json_object` + + Examples + -------- + >>> _simple_json_normalize( + ... { + ... "flat1": 1, + ... "dict1": {"c": 1, "d": 2}, + ... "nested": {"e": {"c": 1, "d": 2}, "d": 2}, + ... } + ... ) + {\ +'flat1': 1, \ +'dict1.c': 1, \ +'dict1.d': 2, \ +'nested.e.c': 1, \ +'nested.e.d': 2, \ +'nested.d': 2\ +} + + """ + normalised_json_object = {} + # expect a dictionary, as most jsons are. However, lists are perfectly valid + if isinstance(ds, dict): + normalised_json_object = _normalise_json_ordered(data=ds, separator=sep) + elif isinstance(ds, list): + normalised_json_list = [_simple_json_normalize(row, sep=sep) for row in ds] + return normalised_json_list + return normalised_json_object + + +def json_normalize( + data: dict | list[dict], + record_path: str | list | None = None, + meta: str | list[str | list[str]] | None = None, + meta_prefix: str | None = None, + record_prefix: str | None = None, + errors: IgnoreRaise = "raise", + sep: str = ".", + max_level: int | None = None, +) -> DataFrame: + """ + Normalize semi-structured JSON data into a flat table. + + Parameters + ---------- + data : dict or list of dicts + Unserialized JSON objects. + record_path : str or list of str, default None + Path in each object to list of records. If not passed, data will be + assumed to be an array of records. + meta : list of paths (str or list of str), default None + Fields to use as metadata for each record in resulting table. + meta_prefix : str, default None + If True, prefix records with dotted (?) path, e.g. foo.bar.field if + meta is ['foo', 'bar']. + record_prefix : str, default None + If True, prefix records with dotted (?) path, e.g. foo.bar.field if + path to records is ['foo', 'bar']. + errors : {'raise', 'ignore'}, default 'raise' + Configures error handling. + + * 'ignore' : will ignore KeyError if keys listed in meta are not + always present. + * 'raise' : will raise KeyError if keys listed in meta are not + always present. + sep : str, default '.' + Nested records will generate names separated by sep. + e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar. + max_level : int, default None + Max number of levels(depth of dict) to normalize. + if None, normalizes all levels. + + Returns + ------- + frame : DataFrame + Normalize semi-structured JSON data into a flat table. + + Examples + -------- + >>> data = [ + ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}}, + ... {"name": {"given": "Mark", "family": "Regner"}}, + ... {"id": 2, "name": "Faye Raker"}, + ... ] + >>> pd.json_normalize(data) + id name.first name.last name.given name.family name + 0 1.0 Coleen Volk NaN NaN NaN + 1 NaN NaN NaN Mark Regner NaN + 2 2.0 NaN NaN NaN NaN Faye Raker + + >>> data = [ + ... { + ... "id": 1, + ... "name": "Cole Volk", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, + ... { + ... "id": 2, + ... "name": "Faye Raker", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... ] + >>> pd.json_normalize(data, max_level=0) + id name fitness + 0 1.0 Cole Volk {'height': 130, 'weight': 60} + 1 NaN Mark Reg {'height': 130, 'weight': 60} + 2 2.0 Faye Raker {'height': 130, 'weight': 60} + + Normalizes nested data up to level 1. + + >>> data = [ + ... { + ... "id": 1, + ... "name": "Cole Volk", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, + ... { + ... "id": 2, + ... "name": "Faye Raker", + ... "fitness": {"height": 130, "weight": 60}, + ... }, + ... ] + >>> pd.json_normalize(data, max_level=1) + id name fitness.height fitness.weight + 0 1.0 Cole Volk 130 60 + 1 NaN Mark Reg 130 60 + 2 2.0 Faye Raker 130 60 + + >>> data = [ + ... { + ... "state": "Florida", + ... "shortname": "FL", + ... "info": {"governor": "Rick Scott"}, + ... "counties": [ + ... {"name": "Dade", "population": 12345}, + ... {"name": "Broward", "population": 40000}, + ... {"name": "Palm Beach", "population": 60000}, + ... ], + ... }, + ... { + ... "state": "Ohio", + ... "shortname": "OH", + ... "info": {"governor": "John Kasich"}, + ... "counties": [ + ... {"name": "Summit", "population": 1234}, + ... {"name": "Cuyahoga", "population": 1337}, + ... ], + ... }, + ... ] + >>> result = pd.json_normalize( + ... data, "counties", ["state", "shortname", ["info", "governor"]] + ... ) + >>> result + name population state shortname info.governor + 0 Dade 12345 Florida FL Rick Scott + 1 Broward 40000 Florida FL Rick Scott + 2 Palm Beach 60000 Florida FL Rick Scott + 3 Summit 1234 Ohio OH John Kasich + 4 Cuyahoga 1337 Ohio OH John Kasich + + >>> data = {"A": [1, 2]} + >>> pd.json_normalize(data, "A", record_prefix="Prefix.") + Prefix.0 + 0 1 + 1 2 + + Returns normalized data with columns prefixed with the given string. + """ + + def _pull_field( + js: dict[str, Any], spec: list | str, extract_record: bool = False + ) -> Scalar | Iterable: + """Internal function to pull field""" + result = js + try: + if isinstance(spec, list): + for field in spec: + if result is None: + raise KeyError(field) + result = result[field] + else: + result = result[spec] + except KeyError as e: + if extract_record: + raise KeyError( + f"Key {e} not found. If specifying a record_path, all elements of " + f"data should have the path." + ) from e + if errors == "ignore": + return np.nan + else: + raise KeyError( + f"Key {e} not found. To replace missing values of {e} with " + f"np.nan, pass in errors='ignore'" + ) from e + + return result + + def _pull_records(js: dict[str, Any], spec: list | str) -> list: + """ + Internal function to pull field for records, and similar to + _pull_field, but require to return list. And will raise error + if has non iterable value. + """ + result = _pull_field(js, spec, extract_record=True) + + # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not + # null, otherwise return an empty list + if not isinstance(result, list): + if pd.isnull(result): + result = [] + else: + raise TypeError( + f"{js} has non list value {result} for path {spec}. " + "Must be list or null." + ) + return result + + if isinstance(data, list) and not data: + return DataFrame() + elif isinstance(data, dict): + # A bit of a hackjob + data = [data] + elif isinstance(data, abc.Iterable) and not isinstance(data, str): + # GH35923 Fix pd.json_normalize to not skip the first element of a + # generator input + data = list(data) + else: + raise NotImplementedError + + # check to see if a simple recursive function is possible to + # improve performance (see #15621) but only for cases such + # as pd.Dataframe(data) or pd.Dataframe(data, sep) + if ( + record_path is None + and meta is None + and meta_prefix is None + and record_prefix is None + and max_level is None + ): + return DataFrame(_simple_json_normalize(data, sep=sep)) + + if record_path is None: + if any([isinstance(x, dict) for x in y.values()] for y in data): + # naive normalization, this is idempotent for flat records + # and potentially will inflate the data considerably for + # deeply nested structures: + # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} + # + # TODO: handle record value which are lists, at least error + # reasonably + data = nested_to_record(data, sep=sep, max_level=max_level) + return DataFrame(data) + elif not isinstance(record_path, list): + record_path = [record_path] + + if meta is None: + meta = [] + elif not isinstance(meta, list): + meta = [meta] + + _meta = [m if isinstance(m, list) else [m] for m in meta] + + # Disastrously inefficient for now + records: list = [] + lengths = [] + + meta_vals: DefaultDict = defaultdict(list) + meta_keys = [sep.join(val) for val in _meta] + + def _recursive_extract(data, path, seen_meta, level: int = 0) -> None: + if isinstance(data, dict): + data = [data] + if len(path) > 1: + for obj in data: + for val, key in zip(_meta, meta_keys): + if level + 1 == len(val): + seen_meta[key] = _pull_field(obj, val[-1]) + + _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) + else: + for obj in data: + recs = _pull_records(obj, path[0]) + recs = [ + nested_to_record(r, sep=sep, max_level=max_level) + if isinstance(r, dict) + else r + for r in recs + ] + + # For repeating the metadata later + lengths.append(len(recs)) + for val, key in zip(_meta, meta_keys): + if level + 1 > len(val): + meta_val = seen_meta[key] + else: + meta_val = _pull_field(obj, val[level:]) + meta_vals[key].append(meta_val) + records.extend(recs) + + _recursive_extract(data, record_path, {}, level=0) + + result = DataFrame(records) + + if record_prefix is not None: + result = result.rename(columns=lambda x: f"{record_prefix}{x}") + + # Data types, a problem + for k, v in meta_vals.items(): + if meta_prefix is not None: + k = meta_prefix + k + + if k in result: + raise ValueError( + f"Conflicting metadata name {k}, need distinguishing prefix " + ) + # GH 37782 + + values = np.array(v, dtype=object) + + if values.ndim > 1: + # GH 37782 + values = np.empty((len(v),), dtype=object) + for i, v in enumerate(v): + values[i] = v + + result[k] = values.repeat(lengths) + return result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_table_schema.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_table_schema.py new file mode 100644 index 00000000..3f2291ba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/json/_table_schema.py @@ -0,0 +1,382 @@ +""" +Table Schema builders + +https://specs.frictionlessdata.io/table-schema/ +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +from pandas._libs import lib +from pandas._libs.json import ujson_loads +from pandas._libs.tslibs import timezones +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import _registry as registry +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer_dtype, + is_numeric_dtype, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + PeriodDtype, +) + +from pandas import DataFrame +import pandas.core.common as com + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + JSONSerializable, + ) + + from pandas import Series + from pandas.core.indexes.multi import MultiIndex + + +TABLE_SCHEMA_VERSION = "1.4.0" + + +def as_json_table_type(x: DtypeObj) -> str: + """ + Convert a NumPy / pandas type to its corresponding json_table. + + Parameters + ---------- + x : np.dtype or ExtensionDtype + + Returns + ------- + str + the Table Schema data types + + Notes + ----- + This table shows the relationship between NumPy / pandas dtypes, + and Table Schema dtypes. + + ============== ================= + Pandas type Table Schema type + ============== ================= + int64 integer + float64 number + bool boolean + datetime64[ns] datetime + timedelta64[ns] duration + object str + categorical any + =============== ================= + """ + if is_integer_dtype(x): + return "integer" + elif is_bool_dtype(x): + return "boolean" + elif is_numeric_dtype(x): + return "number" + elif lib.is_np_dtype(x, "M") or isinstance(x, (DatetimeTZDtype, PeriodDtype)): + return "datetime" + elif lib.is_np_dtype(x, "m"): + return "duration" + elif isinstance(x, ExtensionDtype): + return "any" + elif is_string_dtype(x): + return "string" + else: + return "any" + + +def set_default_names(data): + """Sets index names to 'index' for regular, or 'level_x' for Multi""" + if com.all_not_none(*data.index.names): + nms = data.index.names + if len(nms) == 1 and data.index.name == "index": + warnings.warn( + "Index name of 'index' is not round-trippable.", + stacklevel=find_stack_level(), + ) + elif len(nms) > 1 and any(x.startswith("level_") for x in nms): + warnings.warn( + "Index names beginning with 'level_' are not round-trippable.", + stacklevel=find_stack_level(), + ) + return data + + data = data.copy() + if data.index.nlevels > 1: + data.index.names = com.fill_missing_names(data.index.names) + else: + data.index.name = data.index.name or "index" + return data + + +def convert_pandas_type_to_json_field(arr) -> dict[str, JSONSerializable]: + dtype = arr.dtype + name: JSONSerializable + if arr.name is None: + name = "values" + else: + name = arr.name + field: dict[str, JSONSerializable] = { + "name": name, + "type": as_json_table_type(dtype), + } + + if isinstance(dtype, CategoricalDtype): + cats = dtype.categories + ordered = dtype.ordered + + field["constraints"] = {"enum": list(cats)} + field["ordered"] = ordered + elif isinstance(dtype, PeriodDtype): + field["freq"] = dtype.freq.freqstr + elif isinstance(dtype, DatetimeTZDtype): + if timezones.is_utc(dtype.tz): + # timezone.utc has no "zone" attr + field["tz"] = "UTC" + else: + # error: "tzinfo" has no attribute "zone" + field["tz"] = dtype.tz.zone # type: ignore[attr-defined] + elif isinstance(dtype, ExtensionDtype): + field["extDtype"] = dtype.name + return field + + +def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype: + """ + Converts a JSON field descriptor into its corresponding NumPy / pandas type + + Parameters + ---------- + field + A JSON field descriptor + + Returns + ------- + dtype + + Raises + ------ + ValueError + If the type of the provided field is unknown or currently unsupported + + Examples + -------- + >>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"}) + 'int64' + + >>> convert_json_field_to_pandas_type( + ... { + ... "name": "a_categorical", + ... "type": "any", + ... "constraints": {"enum": ["a", "b", "c"]}, + ... "ordered": True, + ... } + ... ) + CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=object) + + >>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"}) + 'datetime64[ns]' + + >>> convert_json_field_to_pandas_type( + ... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"} + ... ) + 'datetime64[ns, US/Central]' + """ + typ = field["type"] + if typ == "string": + return "object" + elif typ == "integer": + return field.get("extDtype", "int64") + elif typ == "number": + return field.get("extDtype", "float64") + elif typ == "boolean": + return field.get("extDtype", "bool") + elif typ == "duration": + return "timedelta64" + elif typ == "datetime": + if field.get("tz"): + return f"datetime64[ns, {field['tz']}]" + elif field.get("freq"): + # GH#47747 using datetime over period to minimize the change surface + return f"period[{field['freq']}]" + else: + return "datetime64[ns]" + elif typ == "any": + if "constraints" in field and "ordered" in field: + return CategoricalDtype( + categories=field["constraints"]["enum"], ordered=field["ordered"] + ) + elif "extDtype" in field: + return registry.find(field["extDtype"]) + else: + return "object" + + raise ValueError(f"Unsupported or invalid field type: {typ}") + + +def build_table_schema( + data: DataFrame | Series, + index: bool = True, + primary_key: bool | None = None, + version: bool = True, +) -> dict[str, JSONSerializable]: + """ + Create a Table schema from ``data``. + + Parameters + ---------- + data : Series, DataFrame + index : bool, default True + Whether to include ``data.index`` in the schema. + primary_key : bool or None, default True + Column names to designate as the primary key. + The default `None` will set `'primaryKey'` to the index + level or levels if the index is unique. + version : bool, default True + Whether to include a field `pandas_version` with the version + of pandas that last revised the table schema. This version + can be different from the installed pandas version. + + Returns + ------- + dict + + Notes + ----- + See `Table Schema + `__ for + conversion types. + Timedeltas as converted to ISO8601 duration format with + 9 decimal places after the seconds field for nanosecond precision. + + Categoricals are converted to the `any` dtype, and use the `enum` field + constraint to list the allowed values. The `ordered` attribute is included + in an `ordered` field. + + Examples + -------- + >>> from pandas.io.json._table_schema import build_table_schema + >>> df = pd.DataFrame( + ... {'A': [1, 2, 3], + ... 'B': ['a', 'b', 'c'], + ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), + ... }, index=pd.Index(range(3), name='idx')) + >>> build_table_schema(df) + {'fields': \ +[{'name': 'idx', 'type': 'integer'}, \ +{'name': 'A', 'type': 'integer'}, \ +{'name': 'B', 'type': 'string'}, \ +{'name': 'C', 'type': 'datetime'}], \ +'primaryKey': ['idx'], \ +'pandas_version': '1.4.0'} + """ + if index is True: + data = set_default_names(data) + + schema: dict[str, Any] = {} + fields = [] + + if index: + if data.index.nlevels > 1: + data.index = cast("MultiIndex", data.index) + for level, name in zip(data.index.levels, data.index.names): + new_field = convert_pandas_type_to_json_field(level) + new_field["name"] = name + fields.append(new_field) + else: + fields.append(convert_pandas_type_to_json_field(data.index)) + + if data.ndim > 1: + for column, s in data.items(): + fields.append(convert_pandas_type_to_json_field(s)) + else: + fields.append(convert_pandas_type_to_json_field(data)) + + schema["fields"] = fields + if index and data.index.is_unique and primary_key is None: + if data.index.nlevels == 1: + schema["primaryKey"] = [data.index.name] + else: + schema["primaryKey"] = data.index.names + elif primary_key is not None: + schema["primaryKey"] = primary_key + + if version: + schema["pandas_version"] = TABLE_SCHEMA_VERSION + return schema + + +def parse_table_schema(json, precise_float: bool) -> DataFrame: + """ + Builds a DataFrame from a given schema + + Parameters + ---------- + json : + A JSON table schema + precise_float : bool + Flag controlling precision when decoding string to double values, as + dictated by ``read_json`` + + Returns + ------- + df : DataFrame + + Raises + ------ + NotImplementedError + If the JSON table schema contains either timezone or timedelta data + + Notes + ----- + Because :func:`DataFrame.to_json` uses the string 'index' to denote a + name-less :class:`Index`, this function sets the name of the returned + :class:`DataFrame` to ``None`` when said string is encountered with a + normal :class:`Index`. For a :class:`MultiIndex`, the same limitation + applies to any strings beginning with 'level_'. Therefore, an + :class:`Index` name of 'index' and :class:`MultiIndex` names starting + with 'level_' are not supported. + + See Also + -------- + build_table_schema : Inverse function. + pandas.read_json + """ + table = ujson_loads(json, precise_float=precise_float) + col_order = [field["name"] for field in table["schema"]["fields"]] + df = DataFrame(table["data"], columns=col_order)[col_order] + + dtypes = { + field["name"]: convert_json_field_to_pandas_type(field) + for field in table["schema"]["fields"] + } + + # No ISO constructor for Timedelta as of yet, so need to raise + if "timedelta64" in dtypes.values(): + raise NotImplementedError( + 'table="orient" can not yet read ISO-formatted Timedelta data' + ) + + df = df.astype(dtypes) + + if "primaryKey" in table["schema"]: + df = df.set_index(table["schema"]["primaryKey"]) + if len(df.index.names) == 1: + if df.index.name == "index": + df.index.name = None + else: + df.index.names = [ + None if x.startswith("level_") else x for x in df.index.names + ] + + return df diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/orc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/orc.py new file mode 100644 index 00000000..774f9d79 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/orc.py @@ -0,0 +1,264 @@ +""" orc compat """ +from __future__ import annotations + +import io +from types import ModuleType +from typing import ( + TYPE_CHECKING, + Any, + Literal, +) + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat import pa_version_under8p0 +from pandas.compat._optional import import_optional_dependency +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import is_unsigned_integer_dtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, + PeriodDtype, +) + +import pandas as pd +from pandas.core.indexes.api import default_index + +from pandas.io._util import arrow_string_types_mapper +from pandas.io.common import ( + get_handle, + is_fsspec_url, +) + +if TYPE_CHECKING: + import fsspec + import pyarrow.fs + + from pandas._typing import ( + DtypeBackend, + FilePath, + ReadBuffer, + WriteBuffer, + ) + + from pandas.core.frame import DataFrame + + +def read_orc( + path: FilePath | ReadBuffer[bytes], + columns: list[str] | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem | None = None, + **kwargs: Any, +) -> DataFrame: + """ + Load an ORC object from the file path, returning a DataFrame. + + Parameters + ---------- + path : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.orc``. + columns : list, default None + If not None, only these columns will be read from the file. + Output always follows the ordering of the file and not the columns list. + This mirrors the original behaviour of + :external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + filesystem : fsspec or pyarrow filesystem, default None + Filesystem object to use when reading the parquet file. + + .. versionadded:: 2.1.0 + + **kwargs + Any additional kwargs are passed to pyarrow. + + Returns + ------- + DataFrame + + Notes + ----- + Before using this function you should read the :ref:`user guide about ORC ` + and :ref:`install optional dependencies `. + + If ``path`` is a URI scheme pointing to a local or remote file (e.g. "s3://"), + a ``pyarrow.fs`` filesystem will be attempted to read the file. You can also pass a + pyarrow or fsspec filesystem object into the filesystem keyword to override this + behavior. + + Examples + -------- + >>> result = pd.read_orc("example_pa.orc") # doctest: +SKIP + """ + # we require a newer version of pyarrow than we support for parquet + + orc = import_optional_dependency("pyarrow.orc") + + check_dtype_backend(dtype_backend) + + with get_handle(path, "rb", is_text=False) as handles: + source = handles.handle + if is_fsspec_url(path) and filesystem is None: + pa = import_optional_dependency("pyarrow") + pa_fs = import_optional_dependency("pyarrow.fs") + try: + filesystem, source = pa_fs.FileSystem.from_uri(path) + except (TypeError, pa.ArrowInvalid): + pass + + pa_table = orc.read_table( + source=source, columns=columns, filesystem=filesystem, **kwargs + ) + if dtype_backend is not lib.no_default: + if dtype_backend == "pyarrow": + df = pa_table.to_pandas(types_mapper=pd.ArrowDtype) + else: + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping() + df = pa_table.to_pandas(types_mapper=mapping.get) + return df + else: + if using_pyarrow_string_dtype(): + types_mapper = arrow_string_types_mapper() + else: + types_mapper = None + return pa_table.to_pandas(types_mapper=types_mapper) + + +def to_orc( + df: DataFrame, + path: FilePath | WriteBuffer[bytes] | None = None, + *, + engine: Literal["pyarrow"] = "pyarrow", + index: bool | None = None, + engine_kwargs: dict[str, Any] | None = None, +) -> bytes | None: + """ + Write a DataFrame to the ORC format. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + df : DataFrame + The dataframe to be written to ORC. Raises NotImplementedError + if dtype of one or more columns is category, unsigned integers, + intervals, periods or sparse. + path : str, file-like object or None, default None + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handle + (e.g. via builtin open function). If path is None, + a bytes object is returned. + engine : str, default 'pyarrow' + ORC library to use. Pyarrow must be >= 7.0.0. + index : bool, optional + If ``True``, include the dataframe's index(es) in the file output. If + ``False``, they will not be written to the file. + If ``None``, similar to ``infer`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + engine_kwargs : dict[str, Any] or None, default None + Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. + + Returns + ------- + bytes if no path argument is provided else None + + Raises + ------ + NotImplementedError + Dtype of one or more columns is category, unsigned integers, interval, + period or sparse. + ValueError + engine is not pyarrow. + + Notes + ----- + * Before using this function you should read the + :ref:`user guide about ORC ` and + :ref:`install optional dependencies `. + * This function requires `pyarrow `_ + library. + * For supported dtypes please refer to `supported ORC features in Arrow + `__. + * Currently timezones in datetime columns are not preserved when a + dataframe is converted into ORC files. + """ + if index is None: + index = df.index.names[0] is not None + if engine_kwargs is None: + engine_kwargs = {} + + # validate index + # -------------- + + # validate that we have only a default index + # raise on anything else as we don't serialize the index + + if not df.index.equals(default_index(len(df))): + raise ValueError( + "orc does not support serializing a non-default index for the index; " + "you can .reset_index() to make the index into column(s)" + ) + + if df.index.name is not None: + raise ValueError("orc does not serialize index meta-data on a default index") + + # If unsupported dtypes are found raise NotImplementedError + # In Pyarrow 8.0.0 this check will no longer be needed + if pa_version_under8p0: + for dtype in df.dtypes: + if isinstance( + dtype, (IntervalDtype, CategoricalDtype, PeriodDtype) + ) or is_unsigned_integer_dtype(dtype): + raise NotImplementedError( + "The dtype of one or more columns is not supported yet." + ) + + if engine != "pyarrow": + raise ValueError("engine must be 'pyarrow'") + engine = import_optional_dependency(engine, min_version="7.0.0") + pa = import_optional_dependency("pyarrow") + orc = import_optional_dependency("pyarrow.orc") + + was_none = path is None + if was_none: + path = io.BytesIO() + assert path is not None # For mypy + with get_handle(path, "wb", is_text=False) as handles: + assert isinstance(engine, ModuleType) # For mypy + try: + orc.write_table( + engine.Table.from_pandas(df, preserve_index=index), + handles.handle, + **engine_kwargs, + ) + except (TypeError, pa.ArrowNotImplementedError) as e: + raise NotImplementedError( + "The dtype of one or more columns is not supported yet." + ) from e + + if was_none: + assert isinstance(path, io.BytesIO) # For mypy + return path.getvalue() + return None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/parquet.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parquet.py new file mode 100644 index 00000000..f51b98a9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parquet.py @@ -0,0 +1,679 @@ +""" parquet compat """ +from __future__ import annotations + +import io +import json +import os +from typing import ( + TYPE_CHECKING, + Any, + Literal, +) +import warnings +from warnings import catch_warnings + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +import pandas as pd +from pandas import ( + DataFrame, + get_option, +) +from pandas.core.shared_docs import _shared_docs + +from pandas.io._util import arrow_string_types_mapper +from pandas.io.common import ( + IOHandles, + get_handle, + is_fsspec_url, + is_url, + stringify_path, +) + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeBackend, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + +def get_engine(engine: str) -> BaseImpl: + """return our implementation""" + if engine == "auto": + engine = get_option("io.parquet.engine") + + if engine == "auto": + # try engines in this order + engine_classes = [PyArrowImpl, FastParquetImpl] + + error_msgs = "" + for engine_class in engine_classes: + try: + return engine_class() + except ImportError as err: + error_msgs += "\n - " + str(err) + + raise ImportError( + "Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "A suitable version of " + "pyarrow or fastparquet is required for parquet " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" + ) + + if engine == "pyarrow": + return PyArrowImpl() + elif engine == "fastparquet": + return FastParquetImpl() + + raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") + + +def _get_path_or_handle( + path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], + fs: Any, + storage_options: StorageOptions | None = None, + mode: str = "rb", + is_dir: bool = False, +) -> tuple[ + FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any +]: + """File handling for PyArrow.""" + path_or_handle = stringify_path(path) + if fs is not None: + pa_fs = import_optional_dependency("pyarrow.fs", errors="ignore") + fsspec = import_optional_dependency("fsspec", errors="ignore") + if pa_fs is not None and isinstance(fs, pa_fs.FileSystem): + if storage_options: + raise NotImplementedError( + "storage_options not supported with a pyarrow FileSystem." + ) + elif fsspec is not None and isinstance(fs, fsspec.spec.AbstractFileSystem): + pass + else: + raise ValueError( + f"filesystem must be a pyarrow or fsspec FileSystem, " + f"not a {type(fs).__name__}" + ) + if is_fsspec_url(path_or_handle) and fs is None: + if storage_options is None: + pa = import_optional_dependency("pyarrow") + pa_fs = import_optional_dependency("pyarrow.fs") + + try: + fs, path_or_handle = pa_fs.FileSystem.from_uri(path) + except (TypeError, pa.ArrowInvalid): + pass + if fs is None: + fsspec = import_optional_dependency("fsspec") + fs, path_or_handle = fsspec.core.url_to_fs( + path_or_handle, **(storage_options or {}) + ) + elif storage_options and (not is_url(path_or_handle) or mode != "rb"): + # can't write to a remote url + # without making use of fsspec at the moment + raise ValueError("storage_options passed with buffer, or non-supported URL") + + handles = None + if ( + not fs + and not is_dir + and isinstance(path_or_handle, str) + and not os.path.isdir(path_or_handle) + ): + # use get_handle only when we are very certain that it is not a directory + # fsspec resources can also point to directories + # this branch is used for example when reading from non-fsspec URLs + handles = get_handle( + path_or_handle, mode, is_text=False, storage_options=storage_options + ) + fs = None + path_or_handle = handles.handle + return path_or_handle, handles, fs + + +class BaseImpl: + @staticmethod + def validate_dataframe(df: DataFrame) -> None: + if not isinstance(df, DataFrame): + raise ValueError("to_parquet only supports IO with DataFrames") + + def write(self, df: DataFrame, path, compression, **kwargs): + raise AbstractMethodError(self) + + def read(self, path, columns=None, **kwargs) -> DataFrame: + raise AbstractMethodError(self) + + +class PyArrowImpl(BaseImpl): + def __init__(self) -> None: + import_optional_dependency( + "pyarrow", extra="pyarrow is required for parquet support." + ) + import pyarrow.parquet + + # import utils to register the pyarrow extension types + import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401,E501 + + self.api = pyarrow + + def write( + self, + df: DataFrame, + path: FilePath | WriteBuffer[bytes], + compression: str | None = "snappy", + index: bool | None = None, + storage_options: StorageOptions | None = None, + partition_cols: list[str] | None = None, + filesystem=None, + **kwargs, + ) -> None: + self.validate_dataframe(df) + + from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)} + if index is not None: + from_pandas_kwargs["preserve_index"] = index + + table = self.api.Table.from_pandas(df, **from_pandas_kwargs) + + if df.attrs: + df_metadata = {"PANDAS_ATTRS": json.dumps(df.attrs)} + existing_metadata = table.schema.metadata + merged_metadata = {**existing_metadata, **df_metadata} + table = table.replace_schema_metadata(merged_metadata) + + path_or_handle, handles, filesystem = _get_path_or_handle( + path, + filesystem, + storage_options=storage_options, + mode="wb", + is_dir=partition_cols is not None, + ) + if ( + isinstance(path_or_handle, io.BufferedWriter) + and hasattr(path_or_handle, "name") + and isinstance(path_or_handle.name, (str, bytes)) + ): + path_or_handle = path_or_handle.name + if isinstance(path_or_handle, bytes): + path_or_handle = path_or_handle.decode() + + try: + if partition_cols is not None: + # writes to multiple files under the given path + self.api.parquet.write_to_dataset( + table, + path_or_handle, + compression=compression, + partition_cols=partition_cols, + filesystem=filesystem, + **kwargs, + ) + else: + # write to single output file + self.api.parquet.write_table( + table, + path_or_handle, + compression=compression, + filesystem=filesystem, + **kwargs, + ) + finally: + if handles is not None: + handles.close() + + def read( + self, + path, + columns=None, + filters=None, + use_nullable_dtypes: bool = False, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + storage_options: StorageOptions | None = None, + filesystem=None, + **kwargs, + ) -> DataFrame: + kwargs["use_pandas_metadata"] = True + + to_pandas_kwargs = {} + if dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping() + to_pandas_kwargs["types_mapper"] = mapping.get + elif dtype_backend == "pyarrow": + to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment] # noqa: E501 + elif using_pyarrow_string_dtype(): + to_pandas_kwargs["types_mapper"] = arrow_string_types_mapper() + + manager = get_option("mode.data_manager") + if manager == "array": + to_pandas_kwargs["split_blocks"] = True # type: ignore[assignment] + + path_or_handle, handles, filesystem = _get_path_or_handle( + path, + filesystem, + storage_options=storage_options, + mode="rb", + ) + try: + pa_table = self.api.parquet.read_table( + path_or_handle, + columns=columns, + filesystem=filesystem, + filters=filters, + **kwargs, + ) + result = pa_table.to_pandas(**to_pandas_kwargs) + + if manager == "array": + result = result._as_manager("array", copy=False) + + if pa_table.schema.metadata: + if b"PANDAS_ATTRS" in pa_table.schema.metadata: + df_metadata = pa_table.schema.metadata[b"PANDAS_ATTRS"] + result.attrs = json.loads(df_metadata) + return result + finally: + if handles is not None: + handles.close() + + +class FastParquetImpl(BaseImpl): + def __init__(self) -> None: + # since pandas is a dependency of fastparquet + # we need to import on first use + fastparquet = import_optional_dependency( + "fastparquet", extra="fastparquet is required for parquet support." + ) + self.api = fastparquet + + def write( + self, + df: DataFrame, + path, + compression: Literal["snappy", "gzip", "brotli"] | None = "snappy", + index=None, + partition_cols=None, + storage_options: StorageOptions | None = None, + filesystem=None, + **kwargs, + ) -> None: + self.validate_dataframe(df) + + if "partition_on" in kwargs and partition_cols is not None: + raise ValueError( + "Cannot use both partition_on and " + "partition_cols. Use partition_cols for partitioning data" + ) + if "partition_on" in kwargs: + partition_cols = kwargs.pop("partition_on") + + if partition_cols is not None: + kwargs["file_scheme"] = "hive" + + if filesystem is not None: + raise NotImplementedError( + "filesystem is not implemented for the fastparquet engine." + ) + + # cannot use get_handle as write() does not accept file buffers + path = stringify_path(path) + if is_fsspec_url(path): + fsspec = import_optional_dependency("fsspec") + + # if filesystem is provided by fsspec, file must be opened in 'wb' mode. + kwargs["open_with"] = lambda path, _: fsspec.open( + path, "wb", **(storage_options or {}) + ).open() + elif storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) + + with catch_warnings(record=True): + self.api.write( + path, + df, + compression=compression, + write_index=index, + partition_on=partition_cols, + **kwargs, + ) + + def read( + self, + path, + columns=None, + filters=None, + storage_options: StorageOptions | None = None, + filesystem=None, + **kwargs, + ) -> DataFrame: + parquet_kwargs: dict[str, Any] = {} + use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False) + dtype_backend = kwargs.pop("dtype_backend", lib.no_default) + # We are disabling nullable dtypes for fastparquet pending discussion + parquet_kwargs["pandas_nulls"] = False + if use_nullable_dtypes: + raise ValueError( + "The 'use_nullable_dtypes' argument is not supported for the " + "fastparquet engine" + ) + if dtype_backend is not lib.no_default: + raise ValueError( + "The 'dtype_backend' argument is not supported for the " + "fastparquet engine" + ) + if filesystem is not None: + raise NotImplementedError( + "filesystem is not implemented for the fastparquet engine." + ) + path = stringify_path(path) + handles = None + if is_fsspec_url(path): + fsspec = import_optional_dependency("fsspec") + + parquet_kwargs["fs"] = fsspec.open(path, "rb", **(storage_options or {})).fs + elif isinstance(path, str) and not os.path.isdir(path): + # use get_handle only when we are very certain that it is not a directory + # fsspec resources can also point to directories + # this branch is used for example when reading from non-fsspec URLs + handles = get_handle( + path, "rb", is_text=False, storage_options=storage_options + ) + path = handles.handle + + try: + parquet_file = self.api.ParquetFile(path, **parquet_kwargs) + return parquet_file.to_pandas(columns=columns, filters=filters, **kwargs) + finally: + if handles is not None: + handles.close() + + +@doc(storage_options=_shared_docs["storage_options"]) +def to_parquet( + df: DataFrame, + path: FilePath | WriteBuffer[bytes] | None = None, + engine: str = "auto", + compression: str | None = "snappy", + index: bool | None = None, + storage_options: StorageOptions | None = None, + partition_cols: list[str] | None = None, + filesystem: Any = None, + **kwargs, +) -> bytes | None: + """ + Write a DataFrame to the parquet format. + + Parameters + ---------- + df : DataFrame + path : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If None, the result is + returned as bytes. If a string, it will be used as Root Directory path + when writing a partitioned dataset. The engine fastparquet does not + accept file-like objects. + + .. versionchanged:: 1.2.0 + + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. + + When using the ``'pyarrow'`` engine and no storage options are provided + and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec`` + (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first. + Use the filesystem keyword with an instantiated fsspec filesystem + if you wish to use its implementation. + compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}}, + default 'snappy'. Name of the compression to use. Use ``None`` + for no compression. + index : bool, default None + If ``True``, include the dataframe's index(es) in the file output. If + ``False``, they will not be written to the file. + If ``None``, similar to ``True`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + partition_cols : str or list, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + Must be None if path is not a string. + {storage_options} + + .. versionadded:: 1.2.0 + + filesystem : fsspec or pyarrow filesystem, default None + Filesystem object to use when reading the parquet file. Only implemented + for ``engine="pyarrow"``. + + .. versionadded:: 2.1.0 + + kwargs + Additional keyword arguments passed to the engine + + Returns + ------- + bytes if no path argument is provided else None + """ + if isinstance(partition_cols, str): + partition_cols = [partition_cols] + impl = get_engine(engine) + + path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path + + impl.write( + df, + path_or_buf, + compression=compression, + index=index, + partition_cols=partition_cols, + storage_options=storage_options, + filesystem=filesystem, + **kwargs, + ) + + if path is None: + assert isinstance(path_or_buf, io.BytesIO) + return path_or_buf.getvalue() + else: + return None + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_parquet( + path: FilePath | ReadBuffer[bytes], + engine: str = "auto", + columns: list[str] | None = None, + storage_options: StorageOptions | None = None, + use_nullable_dtypes: bool | lib.NoDefault = lib.no_default, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + filesystem: Any = None, + filters: list[tuple] | list[list[tuple]] | None = None, + **kwargs, +) -> DataFrame: + """ + Load a parquet object from the file path, returning a DataFrame. + + Parameters + ---------- + path : str, path object or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. + The string could be a URL. Valid URL schemes include http, ftp, s3, + gs, and file. For file URLs, a host is expected. A local file could be: + ``file://localhost/path/to/table.parquet``. + A file URL can also be a path to a directory that contains multiple + partitioned parquet files. Both pyarrow and fastparquet support + paths to directories as well as file URLs. A directory path could be: + ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``. + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. + + When using the ``'pyarrow'`` engine and no storage options are provided + and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec`` + (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first. + Use the filesystem keyword with an instantiated fsspec filesystem + if you wish to use its implementation. + columns : list, default=None + If not None, only these columns will be read from the file. + {storage_options} + + .. versionadded:: 1.3.0 + + use_nullable_dtypes : bool, default False + If True, use dtypes that use ``pd.NA`` as missing value indicator + for the resulting DataFrame. (only applicable for the ``pyarrow`` + engine) + As new dtypes are added that support ``pd.NA`` in the future, the + output with this option will change to use those dtypes. + Note: this is an experimental option, and behaviour (e.g. additional + support dtypes) may change without notice. + + .. deprecated:: 2.0 + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + filesystem : fsspec or pyarrow filesystem, default None + Filesystem object to use when reading the parquet file. Only implemented + for ``engine="pyarrow"``. + + .. versionadded:: 2.1.0 + + filters : List[Tuple] or List[List[Tuple]], default None + To filter out data. + Filter syntax: [[(column, op, val), ...],...] + where op is [==, =, >, >=, <, <=, !=, in, not in] + The innermost tuples are transposed into a set of filters applied + through an `AND` operation. + The outer list combines these sets of filters through an `OR` + operation. + A single list of tuples can also be used, meaning that no `OR` + operation between set of filters is to be conducted. + + Using this argument will NOT result in row-wise filtering of the final + partitions unless ``engine="pyarrow"`` is also specified. For + other engines, filtering is only performed at the partition level, that is, + to prevent the loading of some row-groups and/or files. + + .. versionadded:: 2.1.0 + + **kwargs + Any additional kwargs are passed to the engine. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.to_parquet : Create a parquet object that serializes a DataFrame. + + Examples + -------- + >>> original_df = pd.DataFrame( + ... {{"foo": range(5), "bar": range(5, 10)}} + ... ) + >>> original_df + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> df_parquet_bytes = original_df.to_parquet() + >>> from io import BytesIO + >>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes)) + >>> restored_df + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> restored_df.equals(original_df) + True + >>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"]) + >>> restored_bar + bar + 0 5 + 1 6 + 2 7 + 3 8 + 4 9 + >>> restored_bar.equals(original_df[['bar']]) + True + + The function uses `kwargs` that are passed directly to the engine. + In the following example, we use the `filters` argument of the pyarrow + engine to filter the rows of the DataFrame. + + Since `pyarrow` is the default engine, we can omit the `engine` argument. + Note that the `filters` argument is implemented by the `pyarrow` engine, + which can benefit from multithreading and also potentially be more + economical in terms of memory. + + >>> sel = [("foo", ">", 2)] + >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel) + >>> restored_part + foo bar + 0 3 8 + 1 4 9 + """ + + impl = get_engine(engine) + + if use_nullable_dtypes is not lib.no_default: + msg = ( + "The argument 'use_nullable_dtypes' is deprecated and will be removed " + "in a future version." + ) + if use_nullable_dtypes is True: + msg += ( + "Use dtype_backend='numpy_nullable' instead of use_nullable_dtype=True." + ) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + else: + use_nullable_dtypes = False + check_dtype_backend(dtype_backend) + + return impl.read( + path, + columns=columns, + filters=filters, + storage_options=storage_options, + use_nullable_dtypes=use_nullable_dtypes, + dtype_backend=dtype_backend, + filesystem=filesystem, + **kwargs, + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/__init__.py new file mode 100644 index 00000000..ff11968d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/__init__.py @@ -0,0 +1,9 @@ +from pandas.io.parsers.readers import ( + TextFileReader, + TextParser, + read_csv, + read_fwf, + read_table, +) + +__all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/arrow_parser_wrapper.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/arrow_parser_wrapper.py new file mode 100644 index 00000000..71bfb00a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/arrow_parser_wrapper.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.inference import is_integer + +import pandas as pd +from pandas import DataFrame + +from pandas.io._util import ( + _arrow_dtype_mapping, + arrow_string_types_mapper, +) +from pandas.io.parsers.base_parser import ParserBase + +if TYPE_CHECKING: + from pandas._typing import ReadBuffer + + +class ArrowParserWrapper(ParserBase): + """ + Wrapper for the pyarrow engine for read_csv() + """ + + def __init__(self, src: ReadBuffer[bytes], **kwds) -> None: + super().__init__(kwds) + self.kwds = kwds + self.src = src + + self._parse_kwds() + + def _parse_kwds(self): + """ + Validates keywords before passing to pyarrow. + """ + encoding: str | None = self.kwds.get("encoding") + self.encoding = "utf-8" if encoding is None else encoding + + na_values = self.kwds["na_values"] + if isinstance(na_values, dict): + raise ValueError( + "The pyarrow engine doesn't support passing a dict for na_values" + ) + self.na_values = list(self.kwds["na_values"]) + + def _get_pyarrow_options(self) -> None: + """ + Rename some arguments to pass to pyarrow + """ + mapping = { + "usecols": "include_columns", + "na_values": "null_values", + "escapechar": "escape_char", + "skip_blank_lines": "ignore_empty_lines", + "decimal": "decimal_point", + } + for pandas_name, pyarrow_name in mapping.items(): + if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None: + self.kwds[pyarrow_name] = self.kwds.pop(pandas_name) + + # Date format handling + # If we get a string, we need to convert it into a list for pyarrow + # If we get a dict, we want to parse those separately + date_format = self.date_format + if isinstance(date_format, str): + date_format = [date_format] + else: + # In case of dict, we don't want to propagate through, so + # just set to pyarrow default of None + + # Ideally, in future we disable pyarrow dtype inference (read in as string) + # to prevent misreads. + date_format = None + self.kwds["timestamp_parsers"] = date_format + + self.parse_options = { + option_name: option_value + for option_name, option_value in self.kwds.items() + if option_value is not None + and option_name + in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines") + } + self.convert_options = { + option_name: option_value + for option_name, option_value in self.kwds.items() + if option_value is not None + and option_name + in ( + "include_columns", + "null_values", + "true_values", + "false_values", + "decimal_point", + "timestamp_parsers", + ) + } + self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"] + self.read_options = { + "autogenerate_column_names": self.header is None, + "skip_rows": self.header + if self.header is not None + else self.kwds["skiprows"], + "encoding": self.encoding, + } + + def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame: + """ + Processes data read in based on kwargs. + + Parameters + ---------- + frame: DataFrame + The DataFrame to process. + + Returns + ------- + DataFrame + The processed DataFrame. + """ + num_cols = len(frame.columns) + multi_index_named = True + if self.header is None: + if self.names is None: + if self.header is None: + self.names = range(num_cols) + if len(self.names) != num_cols: + # usecols is passed through to pyarrow, we only handle index col here + # The only way self.names is not the same length as number of cols is + # if we have int index_col. We should just pad the names(they will get + # removed anyways) to expected length then. + self.names = list(range(num_cols - len(self.names))) + self.names + multi_index_named = False + frame.columns = self.names + # we only need the frame not the names + _, frame = self._do_date_conversions(frame.columns, frame) + if self.index_col is not None: + index_to_set = self.index_col.copy() + for i, item in enumerate(self.index_col): + if is_integer(item): + index_to_set[i] = frame.columns[item] + # String case + elif item not in frame.columns: + raise ValueError(f"Index {item} invalid") + + # Process dtype for index_col and drop from dtypes + if self.dtype is not None: + key, new_dtype = ( + (item, self.dtype.get(item)) + if self.dtype.get(item) is not None + else (frame.columns[item], self.dtype.get(frame.columns[item])) + ) + if new_dtype is not None: + frame[key] = frame[key].astype(new_dtype) + del self.dtype[key] + + frame.set_index(index_to_set, drop=True, inplace=True) + # Clear names if headerless and no name given + if self.header is None and not multi_index_named: + frame.index.names = [None] * len(frame.index.names) + + if self.dtype is not None: + # Ignore non-existent columns from dtype mapping + # like other parsers do + if isinstance(self.dtype, dict): + self.dtype = {k: v for k, v in self.dtype.items() if k in frame.columns} + try: + frame = frame.astype(self.dtype) + except TypeError as e: + # GH#44901 reraise to keep api consistent + raise ValueError(e) + return frame + + def read(self) -> DataFrame: + """ + Reads the contents of a CSV file into a DataFrame and + processes it according to the kwargs passed in the + constructor. + + Returns + ------- + DataFrame + The DataFrame created from the CSV file. + """ + pa = import_optional_dependency("pyarrow") + pyarrow_csv = import_optional_dependency("pyarrow.csv") + self._get_pyarrow_options() + + table = pyarrow_csv.read_csv( + self.src, + read_options=pyarrow_csv.ReadOptions(**self.read_options), + parse_options=pyarrow_csv.ParseOptions(**self.parse_options), + convert_options=pyarrow_csv.ConvertOptions(**self.convert_options), + ) + + dtype_backend = self.kwds["dtype_backend"] + + # Convert all pa.null() cols -> float64 (non nullable) + # else Int64 (nullable case, see below) + if dtype_backend is lib.no_default: + new_schema = table.schema + new_type = pa.float64() + for i, arrow_type in enumerate(table.schema.types): + if pa.types.is_null(arrow_type): + new_schema = new_schema.set( + i, new_schema.field(i).with_type(new_type) + ) + + table = table.cast(new_schema) + + if dtype_backend == "pyarrow": + frame = table.to_pandas(types_mapper=pd.ArrowDtype) + elif dtype_backend == "numpy_nullable": + # Modify the default mapping to also + # map null to Int64 (to match other engines) + dtype_mapping = _arrow_dtype_mapping() + dtype_mapping[pa.null()] = pd.Int64Dtype() + frame = table.to_pandas(types_mapper=dtype_mapping.get) + elif using_pyarrow_string_dtype(): + frame = table.to_pandas(types_mapper=arrow_string_types_mapper()) + else: + frame = table.to_pandas() + return self._finalize_pandas_output(frame) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/base_parser.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/base_parser.py new file mode 100644 index 00000000..6b1daa96 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/base_parser.py @@ -0,0 +1,1426 @@ +from __future__ import annotations + +from collections import defaultdict +from copy import copy +import csv +import datetime +from enum import Enum +import itertools +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + parsers, +) +import pandas._libs.ops as libops +from pandas._libs.parsers import STR_NA_VALUES +from pandas._libs.tslibs import parsing +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + ParserError, + ParserWarning, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_dict_like, + is_extension_array_dtype, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.missing import isna + +from pandas import ( + ArrowDtype, + DataFrame, + DatetimeIndex, + StringDtype, + concat, +) +from pandas.core import algorithms +from pandas.core.arrays import ( + ArrowExtensionArray, + BooleanArray, + Categorical, + ExtensionArray, + FloatingArray, + IntegerArray, +) +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.indexes.api import ( + Index, + MultiIndex, + default_index, + ensure_index_from_sequences, +) +from pandas.core.series import Series +from pandas.core.tools import datetimes as tools + +from pandas.io.common import is_potential_multi_index + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeArg, + DtypeObj, + Scalar, + ) + + +class ParserBase: + class BadLineHandleMethod(Enum): + ERROR = 0 + WARN = 1 + SKIP = 2 + + _implicit_index: bool + _first_chunk: bool + keep_default_na: bool + dayfirst: bool + cache_dates: bool + keep_date_col: bool + usecols_dtype: str | None + + def __init__(self, kwds) -> None: + self._implicit_index = False + + self.names = kwds.get("names") + self.orig_names: Sequence[Hashable] | None = None + + self.index_col = kwds.get("index_col", None) + self.unnamed_cols: set = set() + self.index_names: Sequence[Hashable] | None = None + self.col_names: Sequence[Hashable] | None = None + + self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False)) + self._parse_date_cols: Iterable = [] + self.date_parser = kwds.pop("date_parser", lib.no_default) + self.date_format = kwds.pop("date_format", None) + self.dayfirst = kwds.pop("dayfirst", False) + self.keep_date_col = kwds.pop("keep_date_col", False) + + self.na_values = kwds.get("na_values") + self.na_fvalues = kwds.get("na_fvalues") + self.na_filter = kwds.get("na_filter", False) + self.keep_default_na = kwds.get("keep_default_na", True) + + self.dtype = copy(kwds.get("dtype", None)) + self.converters = kwds.get("converters") + self.dtype_backend = kwds.get("dtype_backend") + + self.true_values = kwds.get("true_values") + self.false_values = kwds.get("false_values") + self.cache_dates = kwds.pop("cache_dates", True) + + self._date_conv = _make_date_converter( + date_parser=self.date_parser, + date_format=self.date_format, + dayfirst=self.dayfirst, + cache_dates=self.cache_dates, + ) + + # validate header options for mi + self.header = kwds.get("header") + if is_list_like(self.header, allow_sets=False): + if kwds.get("usecols"): + raise ValueError( + "cannot specify usecols when specifying a multi-index header" + ) + if kwds.get("names"): + raise ValueError( + "cannot specify names when specifying a multi-index header" + ) + + # validate index_col that only contains integers + if self.index_col is not None: + # In this case we can pin down index_col as list[int] + if is_integer(self.index_col): + self.index_col = [self.index_col] + elif not ( + is_list_like(self.index_col, allow_sets=False) + and all(map(is_integer, self.index_col)) + ): + raise ValueError( + "index_col must only contain row numbers " + "when specifying a multi-index header" + ) + else: + self.index_col = list(self.index_col) + + self._name_processed = False + + self._first_chunk = True + + self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"]) + + # Fallback to error to pass a sketchy test(test_override_set_noconvert_columns) + # Normally, this arg would get pre-processed earlier on + self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR) + + def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable: + """ + Check if parse_dates are in columns. + + If user has provided names for parse_dates, check if those columns + are available. + + Parameters + ---------- + columns : list + List of names of the dataframe. + + Returns + ------- + The names of the columns which will get parsed later if a dict or list + is given as specification. + + Raises + ------ + ValueError + If column to parse_date is not in dataframe. + + """ + cols_needed: Iterable + if is_dict_like(self.parse_dates): + cols_needed = itertools.chain(*self.parse_dates.values()) + elif is_list_like(self.parse_dates): + # a column in parse_dates could be represented + # ColReference = Union[int, str] + # DateGroups = List[ColReference] + # ParseDates = Union[DateGroups, List[DateGroups], + # Dict[ColReference, DateGroups]] + cols_needed = itertools.chain.from_iterable( + col if is_list_like(col) and not isinstance(col, tuple) else [col] + for col in self.parse_dates + ) + else: + cols_needed = [] + + cols_needed = list(cols_needed) + + # get only columns that are references using names (str), not by index + missing_cols = ", ".join( + sorted( + { + col + for col in cols_needed + if isinstance(col, str) and col not in columns + } + ) + ) + if missing_cols: + raise ValueError( + f"Missing column provided to 'parse_dates': '{missing_cols}'" + ) + # Convert positions to actual column names + return [ + col if (isinstance(col, str) or col in columns) else columns[col] + for col in cols_needed + ] + + def close(self) -> None: + pass + + @final + @property + def _has_complex_date_col(self) -> bool: + return isinstance(self.parse_dates, dict) or ( + isinstance(self.parse_dates, list) + and len(self.parse_dates) > 0 + and isinstance(self.parse_dates[0], list) + ) + + @final + def _should_parse_dates(self, i: int) -> bool: + if lib.is_bool(self.parse_dates): + return bool(self.parse_dates) + else: + if self.index_names is not None: + name = self.index_names[i] + else: + name = None + j = i if self.index_col is None else self.index_col[i] + + return (j in self.parse_dates) or ( + name is not None and name in self.parse_dates + ) + + @final + def _extract_multi_indexer_columns( + self, + header, + index_names: Sequence[Hashable] | None, + passed_names: bool = False, + ) -> tuple[ + Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool + ]: + """ + Extract and return the names, index_names, col_names if the column + names are a MultiIndex. + + Parameters + ---------- + header: list of lists + The header rows + index_names: list, optional + The names of the future index + passed_names: bool, default False + A flag specifying if names where passed + + """ + if len(header) < 2: + return header[0], index_names, None, passed_names + + # the names are the tuples of the header that are not the index cols + # 0 is the name of the index, assuming index_col is a list of column + # numbers + ic = self.index_col + if ic is None: + ic = [] + + if not isinstance(ic, (list, tuple, np.ndarray)): + ic = [ic] + sic = set(ic) + + # clean the index_names + index_names = header.pop(-1) + index_names, _, _ = self._clean_index_names(index_names, self.index_col) + + # extract the columns + field_count = len(header[0]) + + # check if header lengths are equal + if not all(len(header_iter) == field_count for header_iter in header[1:]): + raise ParserError("Header rows must have an equal number of columns.") + + def extract(r): + return tuple(r[i] for i in range(field_count) if i not in sic) + + columns = list(zip(*(extract(r) for r in header))) + names = columns.copy() + for single_ic in sorted(ic): + names.insert(single_ic, single_ic) + + # Clean the column names (if we have an index_col). + if len(ic): + col_names = [ + r[ic[0]] + if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols) + else None + for r in header + ] + else: + col_names = [None] * len(header) + + passed_names = True + + return names, index_names, col_names, passed_names + + @final + def _maybe_make_multi_index_columns( + self, + columns: Sequence[Hashable], + col_names: Sequence[Hashable] | None = None, + ) -> Sequence[Hashable] | MultiIndex: + # possibly create a column mi here + if is_potential_multi_index(columns): + list_columns = cast(list[tuple], columns) + return MultiIndex.from_tuples(list_columns, names=col_names) + return columns + + @final + def _make_index( + self, data, alldata, columns, indexnamerow: list[Scalar] | None = None + ) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]: + index: Index | None + if not is_index_col(self.index_col) or not self.index_col: + index = None + + elif not self._has_complex_date_col: + simple_index = self._get_simple_index(alldata, columns) + index = self._agg_index(simple_index) + elif self._has_complex_date_col: + if not self._name_processed: + (self.index_names, _, self.index_col) = self._clean_index_names( + list(columns), self.index_col + ) + self._name_processed = True + date_index = self._get_complex_date_index(data, columns) + index = self._agg_index(date_index, try_parse_dates=False) + + # add names for the index + if indexnamerow: + coffset = len(indexnamerow) - len(columns) + assert index is not None + index = index.set_names(indexnamerow[:coffset]) + + # maybe create a mi on the columns + columns = self._maybe_make_multi_index_columns(columns, self.col_names) + + return index, columns + + @final + def _get_simple_index(self, data, columns): + def ix(col): + if not isinstance(col, str): + return col + raise ValueError(f"Index {col} invalid") + + to_remove = [] + index = [] + for idx in self.index_col: + i = ix(idx) + to_remove.append(i) + index.append(data[i]) + + # remove index items from content and columns, don't pop in + # loop + for i in sorted(to_remove, reverse=True): + data.pop(i) + if not self._implicit_index: + columns.pop(i) + + return index + + @final + def _get_complex_date_index(self, data, col_names): + def _get_name(icol): + if isinstance(icol, str): + return icol + + if col_names is None: + raise ValueError(f"Must supply column order to use {icol!s} as index") + + for i, c in enumerate(col_names): + if i == icol: + return c + + to_remove = [] + index = [] + for idx in self.index_col: + name = _get_name(idx) + to_remove.append(name) + index.append(data[name]) + + # remove index items from content and columns, don't pop in + # loop + for c in sorted(to_remove, reverse=True): + data.pop(c) + col_names.remove(c) + + return index + + @final + def _clean_mapping(self, mapping): + """converts col numbers to names""" + if not isinstance(mapping, dict): + return mapping + clean = {} + # for mypy + assert self.orig_names is not None + + for col, v in mapping.items(): + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] + clean[col] = v + if isinstance(mapping, defaultdict): + remaining_cols = set(self.orig_names) - set(clean.keys()) + clean.update({col: mapping[col] for col in remaining_cols}) + return clean + + @final + def _agg_index(self, index, try_parse_dates: bool = True) -> Index: + arrays = [] + converters = self._clean_mapping(self.converters) + + for i, arr in enumerate(index): + if try_parse_dates and self._should_parse_dates(i): + arr = self._date_conv( + arr, + col=self.index_names[i] if self.index_names is not None else None, + ) + + if self.na_filter: + col_na_values = self.na_values + col_na_fvalues = self.na_fvalues + else: + col_na_values = set() + col_na_fvalues = set() + + if isinstance(self.na_values, dict): + assert self.index_names is not None + col_name = self.index_names[i] + if col_name is not None: + col_na_values, col_na_fvalues = _get_na_values( + col_name, self.na_values, self.na_fvalues, self.keep_default_na + ) + + clean_dtypes = self._clean_mapping(self.dtype) + + cast_type = None + index_converter = False + if self.index_names is not None: + if isinstance(clean_dtypes, dict): + cast_type = clean_dtypes.get(self.index_names[i], None) + + if isinstance(converters, dict): + index_converter = converters.get(self.index_names[i]) is not None + + try_num_bool = not ( + cast_type and is_string_dtype(cast_type) or index_converter + ) + + arr, _ = self._infer_types( + arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool + ) + arrays.append(arr) + + names = self.index_names + index = ensure_index_from_sequences(arrays, names) + + return index + + @final + def _convert_to_ndarrays( + self, + dct: Mapping, + na_values, + na_fvalues, + verbose: bool = False, + converters=None, + dtypes=None, + ): + result = {} + for c, values in dct.items(): + conv_f = None if converters is None else converters.get(c, None) + if isinstance(dtypes, dict): + cast_type = dtypes.get(c, None) + else: + # single dtype or None + cast_type = dtypes + + if self.na_filter: + col_na_values, col_na_fvalues = _get_na_values( + c, na_values, na_fvalues, self.keep_default_na + ) + else: + col_na_values, col_na_fvalues = set(), set() + + if c in self._parse_date_cols: + # GH#26203 Do not convert columns which get converted to dates + # but replace nans to ensure to_datetime works + mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues) + np.putmask(values, mask, np.nan) + result[c] = values + continue + + if conv_f is not None: + # conv_f applied to data before inference + if cast_type is not None: + warnings.warn( + ( + "Both a converter and dtype were specified " + f"for column {c} - only the converter will be used." + ), + ParserWarning, + stacklevel=find_stack_level(), + ) + + try: + values = lib.map_infer(values, conv_f) + except ValueError: + mask = algorithms.isin(values, list(na_values)).view(np.uint8) + values = lib.map_infer_mask(values, conv_f, mask) + + cvals, na_count = self._infer_types( + values, + set(col_na_values) | col_na_fvalues, + cast_type is None, + try_num_bool=False, + ) + else: + is_ea = is_extension_array_dtype(cast_type) + is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type) + # skip inference if specified dtype is object + # or casting to an EA + try_num_bool = not (cast_type and is_str_or_ea_dtype) + + # general type inference and conversion + cvals, na_count = self._infer_types( + values, + set(col_na_values) | col_na_fvalues, + cast_type is None, + try_num_bool, + ) + + # type specified in dtype param or cast_type is an EA + if cast_type is not None: + cast_type = pandas_dtype(cast_type) + if cast_type and (cvals.dtype != cast_type or is_ea): + if not is_ea and na_count > 0: + if is_bool_dtype(cast_type): + raise ValueError(f"Bool column has NA values in column {c}") + cvals = self._cast_types(cvals, cast_type, c) + + result[c] = cvals + if verbose and na_count: + print(f"Filled {na_count} NA values in column {c!s}") + return result + + @final + def _set_noconvert_dtype_columns( + self, col_indices: list[int], names: Sequence[Hashable] + ) -> set[int]: + """ + Set the columns that should not undergo dtype conversions. + + Currently, any column that is involved with date parsing will not + undergo such conversions. If usecols is specified, the positions of the columns + not to cast is relative to the usecols not to all columns. + + Parameters + ---------- + col_indices: The indices specifying order and positions of the columns + names: The column names which order is corresponding with the order + of col_indices + + Returns + ------- + A set of integers containing the positions of the columns not to convert. + """ + usecols: list[int] | list[str] | None + noconvert_columns = set() + if self.usecols_dtype == "integer": + # A set of integers will be converted to a list in + # the correct order every single time. + usecols = sorted(self.usecols) + elif callable(self.usecols) or self.usecols_dtype not in ("empty", None): + # The names attribute should have the correct columns + # in the proper order for indexing with parse_dates. + usecols = col_indices + else: + # Usecols is empty. + usecols = None + + def _set(x) -> int: + if usecols is not None and is_integer(x): + x = usecols[x] + + if not is_integer(x): + x = col_indices[names.index(x)] + + return x + + if isinstance(self.parse_dates, list): + for val in self.parse_dates: + if isinstance(val, list): + for k in val: + noconvert_columns.add(_set(k)) + else: + noconvert_columns.add(_set(val)) + + elif isinstance(self.parse_dates, dict): + for val in self.parse_dates.values(): + if isinstance(val, list): + for k in val: + noconvert_columns.add(_set(k)) + else: + noconvert_columns.add(_set(val)) + + elif self.parse_dates: + if isinstance(self.index_col, list): + for k in self.index_col: + noconvert_columns.add(_set(k)) + elif self.index_col is not None: + noconvert_columns.add(_set(self.index_col)) + + return noconvert_columns + + @final + def _infer_types( + self, values, na_values, no_dtype_specified, try_num_bool: bool = True + ) -> tuple[ArrayLike, int]: + """ + Infer types of values, possibly casting + + Parameters + ---------- + values : ndarray + na_values : set + no_dtype_specified: Specifies if we want to cast explicitly + try_num_bool : bool, default try + try to cast values to numeric (first preference) or boolean + + Returns + ------- + converted : ndarray or ExtensionArray + na_count : int + """ + na_count = 0 + if issubclass(values.dtype.type, (np.number, np.bool_)): + # If our array has numeric dtype, we don't have to check for strings in isin + na_values = np.array([val for val in na_values if not isinstance(val, str)]) + mask = algorithms.isin(values, na_values) + na_count = mask.astype("uint8", copy=False).sum() + if na_count > 0: + if is_integer_dtype(values): + values = values.astype(np.float64) + np.putmask(values, mask, np.nan) + return values, na_count + + dtype_backend = self.dtype_backend + non_default_dtype_backend = ( + no_dtype_specified and dtype_backend is not lib.no_default + ) + result: ArrayLike + + if try_num_bool and is_object_dtype(values.dtype): + # exclude e.g DatetimeIndex here + try: + result, result_mask = lib.maybe_convert_numeric( + values, + na_values, + False, + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa: E501 + ) + except (ValueError, TypeError): + # e.g. encountering datetime string gets ValueError + # TypeError can be raised in floatify + na_count = parsers.sanitize_objects(values, na_values) + result = values + else: + if non_default_dtype_backend: + if result_mask is None: + result_mask = np.zeros(result.shape, dtype=np.bool_) + + if result_mask.all(): + result = IntegerArray( + np.ones(result_mask.shape, dtype=np.int64), result_mask + ) + elif is_integer_dtype(result): + result = IntegerArray(result, result_mask) + elif is_bool_dtype(result): + result = BooleanArray(result, result_mask) + elif is_float_dtype(result): + result = FloatingArray(result, result_mask) + + na_count = result_mask.sum() + else: + na_count = isna(result).sum() + else: + result = values + if values.dtype == np.object_: + na_count = parsers.sanitize_objects(values, na_values) + + if result.dtype == np.object_ and try_num_bool: + result, bool_mask = libops.maybe_convert_bool( + np.asarray(values), + true_values=self.true_values, + false_values=self.false_values, + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa: E501 + ) + if result.dtype == np.bool_ and non_default_dtype_backend: + if bool_mask is None: + bool_mask = np.zeros(result.shape, dtype=np.bool_) + result = BooleanArray(result, bool_mask) + elif result.dtype == np.object_ and non_default_dtype_backend: + # read_excel sends array of datetime objects + if not lib.is_datetime_array(result, skipna=True): + result = StringDtype().construct_array_type()._from_sequence(values) + + if dtype_backend == "pyarrow": + pa = import_optional_dependency("pyarrow") + if isinstance(result, np.ndarray): + result = ArrowExtensionArray(pa.array(result, from_pandas=True)) + else: + # ExtensionArray + result = ArrowExtensionArray( + pa.array(result.to_numpy(), from_pandas=True) + ) + + return result, na_count + + @final + def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike: + """ + Cast values to specified type + + Parameters + ---------- + values : ndarray or ExtensionArray + cast_type : np.dtype or ExtensionDtype + dtype to cast values to + column : string + column name - used only for error reporting + + Returns + ------- + converted : ndarray or ExtensionArray + """ + if isinstance(cast_type, CategoricalDtype): + known_cats = cast_type.categories is not None + + if not is_object_dtype(values.dtype) and not known_cats: + # TODO: this is for consistency with + # c-parser which parses all categories + # as strings + values = lib.ensure_string_array( + values, skipna=False, convert_na_value=False + ) + + cats = Index(values).unique().dropna() + values = Categorical._from_inferred_categories( + cats, cats.get_indexer(values), cast_type, true_values=self.true_values + ) + + # use the EA's implementation of casting + elif isinstance(cast_type, ExtensionDtype): + array_type = cast_type.construct_array_type() + try: + if isinstance(cast_type, BooleanDtype): + # error: Unexpected keyword argument "true_values" for + # "_from_sequence_of_strings" of "ExtensionArray" + return array_type._from_sequence_of_strings( # type: ignore[call-arg] # noqa: E501 + values, + dtype=cast_type, + true_values=self.true_values, + false_values=self.false_values, + ) + else: + return array_type._from_sequence_of_strings(values, dtype=cast_type) + except NotImplementedError as err: + raise NotImplementedError( + f"Extension Array: {array_type} must implement " + "_from_sequence_of_strings in order to be used in parser methods" + ) from err + + elif isinstance(values, ExtensionArray): + values = values.astype(cast_type, copy=False) + elif issubclass(cast_type.type, str): + # TODO: why skipna=True here and False above? some tests depend + # on it here, but nothing fails if we change it above + # (as no tests get there as of 2022-12-06) + values = lib.ensure_string_array( + values, skipna=True, convert_na_value=False + ) + else: + try: + values = astype_array(values, cast_type, copy=True) + except ValueError as err: + raise ValueError( + f"Unable to convert column {column} to type {cast_type}" + ) from err + return values + + @overload + def _do_date_conversions( + self, + names: Index, + data: DataFrame, + ) -> tuple[Sequence[Hashable] | Index, DataFrame]: + ... + + @overload + def _do_date_conversions( + self, + names: Sequence[Hashable], + data: Mapping[Hashable, ArrayLike], + ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]: + ... + + @final + def _do_date_conversions( + self, + names: Sequence[Hashable] | Index, + data: Mapping[Hashable, ArrayLike] | DataFrame, + ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]: + # returns data, columns + + if self.parse_dates is not None: + data, names = _process_date_conversion( + data, + self._date_conv, + self.parse_dates, + self.index_col, + self.index_names, + names, + keep_date_col=self.keep_date_col, + dtype_backend=self.dtype_backend, + ) + + return names, data + + @final + def _check_data_length( + self, + columns: Sequence[Hashable], + data: Sequence[ArrayLike], + ) -> None: + """Checks if length of data is equal to length of column names. + + One set of trailing commas is allowed. self.index_col not False + results in a ParserError previously when lengths do not match. + + Parameters + ---------- + columns: list of column names + data: list of array-likes containing the data column-wise. + """ + if not self.index_col and len(columns) != len(data) and columns: + empty_str = is_object_dtype(data[-1]) and data[-1] == "" + # error: No overload variant of "__ror__" of "ndarray" matches + # argument type "ExtensionArray" + empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator] + if len(columns) == len(data) - 1 and np.all(empty_str_or_na): + return + warnings.warn( + "Length of header or names does not match length of data. This leads " + "to a loss of data with index_col=False.", + ParserWarning, + stacklevel=find_stack_level(), + ) + + @overload + def _evaluate_usecols( + self, + usecols: set[int] | Callable[[Hashable], object], + names: Sequence[Hashable], + ) -> set[int]: + ... + + @overload + def _evaluate_usecols( + self, usecols: set[str], names: Sequence[Hashable] + ) -> set[str]: + ... + + @final + def _evaluate_usecols( + self, + usecols: Callable[[Hashable], object] | set[str] | set[int], + names: Sequence[Hashable], + ) -> set[str] | set[int]: + """ + Check whether or not the 'usecols' parameter + is a callable. If so, enumerates the 'names' + parameter and returns a set of indices for + each entry in 'names' that evaluates to True. + If not a callable, returns 'usecols'. + """ + if callable(usecols): + return {i for i, name in enumerate(names) if usecols(name)} + return usecols + + @final + def _validate_usecols_names(self, usecols, names: Sequence): + """ + Validates that all usecols are present in a given + list of names. If not, raise a ValueError that + shows what usecols are missing. + + Parameters + ---------- + usecols : iterable of usecols + The columns to validate are present in names. + names : iterable of names + The column names to check against. + + Returns + ------- + usecols : iterable of usecols + The `usecols` parameter if the validation succeeds. + + Raises + ------ + ValueError : Columns were missing. Error message will list them. + """ + missing = [c for c in usecols if c not in names] + if len(missing) > 0: + raise ValueError( + f"Usecols do not match columns, columns expected but not found: " + f"{missing}" + ) + + return usecols + + @final + def _validate_usecols_arg(self, usecols): + """ + Validate the 'usecols' parameter. + + Checks whether or not the 'usecols' parameter contains all integers + (column selection by index), strings (column by name) or is a callable. + Raises a ValueError if that is not the case. + + Parameters + ---------- + usecols : list-like, callable, or None + List of columns to use when parsing or a callable that can be used + to filter a list of table columns. + + Returns + ------- + usecols_tuple : tuple + A tuple of (verified_usecols, usecols_dtype). + + 'verified_usecols' is either a set if an array-like is passed in or + 'usecols' if a callable or None is passed in. + + 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like + is passed in or None if a callable or None is passed in. + """ + msg = ( + "'usecols' must either be list-like of all strings, all unicode, " + "all integers or a callable." + ) + if usecols is not None: + if callable(usecols): + return usecols, None + + if not is_list_like(usecols): + # see gh-20529 + # + # Ensure it is iterable container but not string. + raise ValueError(msg) + + usecols_dtype = lib.infer_dtype(usecols, skipna=False) + + if usecols_dtype not in ("empty", "integer", "string"): + raise ValueError(msg) + + usecols = set(usecols) + + return usecols, usecols_dtype + return usecols, None + + @final + def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]: + if not is_index_col(index_col): + return None, columns, index_col + + columns = list(columns) + + # In case of no rows and multiindex columns we have to set index_names to + # list of Nones GH#38292 + if not columns: + return [None] * len(index_col), columns, index_col + + cp_cols = list(columns) + index_names: list[str | int | None] = [] + + # don't mutate + index_col = list(index_col) + + for i, c in enumerate(index_col): + if isinstance(c, str): + index_names.append(c) + for j, name in enumerate(cp_cols): + if name == c: + index_col[i] = j + columns.remove(name) + break + else: + name = cp_cols[c] + columns.remove(name) + index_names.append(name) + + # Only clean index names that were placeholders. + for i, name in enumerate(index_names): + if isinstance(name, str) and name in self.unnamed_cols: + index_names[i] = None + + return index_names, columns, index_col + + @final + def _get_empty_meta(self, columns, dtype: DtypeArg | None = None): + columns = list(columns) + + index_col = self.index_col + index_names = self.index_names + + # Convert `dtype` to a defaultdict of some kind. + # This will enable us to write `dtype[col_name]` + # without worrying about KeyError issues later on. + dtype_dict: defaultdict[Hashable, Any] + if not is_dict_like(dtype): + # if dtype == None, default will be object. + default_dtype = dtype or object + dtype_dict = defaultdict(lambda: default_dtype) + else: + dtype = cast(dict, dtype) + dtype_dict = defaultdict( + lambda: object, + {columns[k] if is_integer(k) else k: v for k, v in dtype.items()}, + ) + + # Even though we have no data, the "index" of the empty DataFrame + # could for example still be an empty MultiIndex. Thus, we need to + # check whether we have any index columns specified, via either: + # + # 1) index_col (column indices) + # 2) index_names (column names) + # + # Both must be non-null to ensure a successful construction. Otherwise, + # we have to create a generic empty Index. + index: Index + if (index_col is None or index_col is False) or index_names is None: + index = default_index(0) + else: + data = [Series([], dtype=dtype_dict[name]) for name in index_names] + index = ensure_index_from_sequences(data, names=index_names) + index_col.sort() + + for i, n in enumerate(index_col): + columns.pop(n - i) + + col_dict = { + col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns + } + + return index, columns, col_dict + + +def _make_date_converter( + date_parser=lib.no_default, + dayfirst: bool = False, + cache_dates: bool = True, + date_format: dict[Hashable, str] | str | None = None, +): + if date_parser is not lib.no_default: + warnings.warn( + "The argument 'date_parser' is deprecated and will " + "be removed in a future version. " + "Please use 'date_format' instead, or read your data in as 'object' dtype " + "and then call 'to_datetime'.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if date_parser is not lib.no_default and date_format is not None: + raise TypeError("Cannot use both 'date_parser' and 'date_format'") + + def unpack_if_single_element(arg): + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1: + return arg[0] + return arg + + def converter(*date_cols, col: Hashable): + if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm": + return date_cols[0] + + if date_parser is lib.no_default: + strs = parsing.concat_date_cols(date_cols) + date_fmt = ( + date_format.get(col) if isinstance(date_format, dict) else date_format + ) + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones will raise an error", + category=FutureWarning, + ) + result = tools.to_datetime( + ensure_object(strs), + format=date_fmt, + utc=False, + dayfirst=dayfirst, + errors="ignore", + cache=cache_dates, + ) + if isinstance(result, DatetimeIndex): + arr = result.to_numpy() + arr.flags.writeable = True + return arr + return result._values + else: + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones " + "will raise an error", + category=FutureWarning, + ) + result = tools.to_datetime( + date_parser( + *(unpack_if_single_element(arg) for arg in date_cols) + ), + errors="ignore", + cache=cache_dates, + ) + if isinstance(result, datetime.datetime): + raise Exception("scalar parser") + return result + except Exception: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones " + "will raise an error", + category=FutureWarning, + ) + return tools.to_datetime( + parsing.try_parse_dates( + parsing.concat_date_cols(date_cols), + parser=date_parser, + ), + errors="ignore", + ) + + return converter + + +parser_defaults = { + "delimiter": None, + "escapechar": None, + "quotechar": '"', + "quoting": csv.QUOTE_MINIMAL, + "doublequote": True, + "skipinitialspace": False, + "lineterminator": None, + "header": "infer", + "index_col": None, + "names": None, + "skiprows": None, + "skipfooter": 0, + "nrows": None, + "na_values": None, + "keep_default_na": True, + "true_values": None, + "false_values": None, + "converters": None, + "dtype": None, + "cache_dates": True, + "thousands": None, + "comment": None, + "decimal": ".", + # 'engine': 'c', + "parse_dates": False, + "keep_date_col": False, + "dayfirst": False, + "date_parser": lib.no_default, + "date_format": None, + "usecols": None, + # 'iterator': False, + "chunksize": None, + "verbose": False, + "encoding": None, + "compression": None, + "skip_blank_lines": True, + "encoding_errors": "strict", + "on_bad_lines": ParserBase.BadLineHandleMethod.ERROR, + "dtype_backend": lib.no_default, +} + + +def _process_date_conversion( + data_dict, + converter: Callable, + parse_spec, + index_col, + index_names, + columns, + keep_date_col: bool = False, + dtype_backend=lib.no_default, +): + def _isindex(colspec): + return (isinstance(index_col, list) and colspec in index_col) or ( + isinstance(index_names, list) and colspec in index_names + ) + + new_cols = [] + new_data = {} + + orig_names = columns + columns = list(columns) + + date_cols = set() + + if parse_spec is None or isinstance(parse_spec, bool): + return data_dict, columns + + if isinstance(parse_spec, list): + # list of column lists + for colspec in parse_spec: + if is_scalar(colspec) or isinstance(colspec, tuple): + if isinstance(colspec, int) and colspec not in data_dict: + colspec = orig_names[colspec] + if _isindex(colspec): + continue + elif dtype_backend == "pyarrow": + import pyarrow as pa + + dtype = data_dict[colspec].dtype + if isinstance(dtype, ArrowDtype) and ( + pa.types.is_timestamp(dtype.pyarrow_dtype) + or pa.types.is_date(dtype.pyarrow_dtype) + ): + continue + + # Pyarrow engine returns Series which we need to convert to + # numpy array before converter, its a no-op for other parsers + data_dict[colspec] = converter( + np.asarray(data_dict[colspec]), col=colspec + ) + else: + new_name, col, old_names = _try_convert_dates( + converter, colspec, data_dict, orig_names + ) + if new_name in data_dict: + raise ValueError(f"New date column already in dict {new_name}") + new_data[new_name] = col + new_cols.append(new_name) + date_cols.update(old_names) + + elif isinstance(parse_spec, dict): + # dict of new name to column list + for new_name, colspec in parse_spec.items(): + if new_name in data_dict: + raise ValueError(f"Date column {new_name} already in dict") + + _, col, old_names = _try_convert_dates( + converter, + colspec, + data_dict, + orig_names, + target_name=new_name, + ) + + new_data[new_name] = col + + # If original column can be converted to date we keep the converted values + # This can only happen if values are from single column + if len(colspec) == 1: + new_data[colspec[0]] = col + + new_cols.append(new_name) + date_cols.update(old_names) + + if isinstance(data_dict, DataFrame): + data_dict = concat([DataFrame(new_data), data_dict], axis=1, copy=False) + else: + data_dict.update(new_data) + new_cols.extend(columns) + + if not keep_date_col: + for c in list(date_cols): + data_dict.pop(c) + new_cols.remove(c) + + return data_dict, new_cols + + +def _try_convert_dates( + parser: Callable, colspec, data_dict, columns, target_name: str | None = None +): + colset = set(columns) + colnames = [] + + for c in colspec: + if c in colset: + colnames.append(c) + elif isinstance(c, int) and c not in columns: + colnames.append(columns[c]) + else: + colnames.append(c) + + new_name: tuple | str + if all(isinstance(x, tuple) for x in colnames): + new_name = tuple(map("_".join, zip(*colnames))) + else: + new_name = "_".join([str(x) for x in colnames]) + to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict] + + new_col = parser(*to_parse, col=new_name if target_name is None else target_name) + return new_name, new_col, colnames + + +def _get_na_values(col, na_values, na_fvalues, keep_default_na: bool): + """ + Get the NaN values for a given column. + + Parameters + ---------- + col : str + The name of the column. + na_values : array-like, dict + The object listing the NaN values as strings. + na_fvalues : array-like, dict + The object listing the NaN values as floats. + keep_default_na : bool + If `na_values` is a dict, and the column is not mapped in the + dictionary, whether to return the default NaN values or the empty set. + + Returns + ------- + nan_tuple : A length-two tuple composed of + + 1) na_values : the string NaN values for that column. + 2) na_fvalues : the float NaN values for that column. + """ + if isinstance(na_values, dict): + if col in na_values: + return na_values[col], na_fvalues[col] + else: + if keep_default_na: + return STR_NA_VALUES, set() + + return set(), set() + else: + return na_values, na_fvalues + + +def _validate_parse_dates_arg(parse_dates): + """ + Check whether or not the 'parse_dates' parameter + is a non-boolean scalar. Raises a ValueError if + that is the case. + """ + msg = ( + "Only booleans, lists, and dictionaries are accepted " + "for the 'parse_dates' parameter" + ) + + if not ( + parse_dates is None + or lib.is_bool(parse_dates) + or isinstance(parse_dates, (list, dict)) + ): + raise TypeError(msg) + + return parse_dates + + +def is_index_col(col) -> bool: + return col is not None and col is not False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/c_parser_wrapper.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/c_parser_wrapper.py new file mode 100644 index 00000000..0cd788c5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/c_parser_wrapper.py @@ -0,0 +1,410 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + parsers, +) +from pandas.compat._optional import import_optional_dependency +from pandas.errors import DtypeWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.concat import ( + concat_compat, + union_categoricals, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas.core.indexes.api import ensure_index_from_sequences + +from pandas.io.common import ( + dedup_names, + is_potential_multi_index, +) +from pandas.io.parsers.base_parser import ( + ParserBase, + ParserError, + is_index_col, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeArg, + DtypeObj, + ReadCsvBuffer, + ) + + from pandas import ( + Index, + MultiIndex, + ) + + +class CParserWrapper(ParserBase): + low_memory: bool + _reader: parsers.TextReader + + def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None: + super().__init__(kwds) + self.kwds = kwds + kwds = kwds.copy() + + self.low_memory = kwds.pop("low_memory", False) + + # #2442 + # error: Cannot determine type of 'index_col' + kwds["allow_leading_cols"] = ( + self.index_col is not False # type: ignore[has-type] + ) + + # GH20529, validate usecol arg before TextReader + kwds["usecols"] = self.usecols + + # Have to pass int, would break tests using TextReader directly otherwise :( + kwds["on_bad_lines"] = self.on_bad_lines.value + + for key in ( + "storage_options", + "encoding", + "memory_map", + "compression", + ): + kwds.pop(key, None) + + kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None)) + if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default: + kwds["dtype_backend"] = "numpy" + if kwds["dtype_backend"] == "pyarrow": + # Fail here loudly instead of in cython after reading + import_optional_dependency("pyarrow") + self._reader = parsers.TextReader(src, **kwds) + + self.unnamed_cols = self._reader.unnamed_cols + + # error: Cannot determine type of 'names' + passed_names = self.names is None # type: ignore[has-type] + + if self._reader.header is None: + self.names = None + else: + # error: Cannot determine type of 'names' + # error: Cannot determine type of 'index_names' + ( + self.names, # type: ignore[has-type] + self.index_names, + self.col_names, + passed_names, + ) = self._extract_multi_indexer_columns( + self._reader.header, + self.index_names, # type: ignore[has-type] + passed_names, + ) + + # error: Cannot determine type of 'names' + if self.names is None: # type: ignore[has-type] + self.names = list(range(self._reader.table_width)) + + # gh-9755 + # + # need to set orig_names here first + # so that proper indexing can be done + # with _set_noconvert_columns + # + # once names has been filtered, we will + # then set orig_names again to names + # error: Cannot determine type of 'names' + self.orig_names = self.names[:] # type: ignore[has-type] + + if self.usecols: + usecols = self._evaluate_usecols(self.usecols, self.orig_names) + + # GH 14671 + # assert for mypy, orig_names is List or None, None would error in issubset + assert self.orig_names is not None + if self.usecols_dtype == "string" and not set(usecols).issubset( + self.orig_names + ): + self._validate_usecols_names(usecols, self.orig_names) + + # error: Cannot determine type of 'names' + if len(self.names) > len(usecols): # type: ignore[has-type] + # error: Cannot determine type of 'names' + self.names = [ # type: ignore[has-type] + n + # error: Cannot determine type of 'names' + for i, n in enumerate(self.names) # type: ignore[has-type] + if (i in usecols or n in usecols) + ] + + # error: Cannot determine type of 'names' + if len(self.names) < len(usecols): # type: ignore[has-type] + # error: Cannot determine type of 'names' + self._validate_usecols_names( + usecols, + self.names, # type: ignore[has-type] + ) + + # error: Cannot determine type of 'names' + self._validate_parse_dates_presence(self.names) # type: ignore[has-type] + self._set_noconvert_columns() + + # error: Cannot determine type of 'names' + self.orig_names = self.names # type: ignore[has-type] + + if not self._has_complex_date_col: + # error: Cannot determine type of 'index_col' + if self._reader.leading_cols == 0 and is_index_col( + self.index_col # type: ignore[has-type] + ): + self._name_processed = True + ( + index_names, + # error: Cannot determine type of 'names' + self.names, # type: ignore[has-type] + self.index_col, + ) = self._clean_index_names( + # error: Cannot determine type of 'names' + self.names, # type: ignore[has-type] + # error: Cannot determine type of 'index_col' + self.index_col, # type: ignore[has-type] + ) + + if self.index_names is None: + self.index_names = index_names + + if self._reader.header is None and not passed_names: + assert self.index_names is not None + self.index_names = [None] * len(self.index_names) + + self._implicit_index = self._reader.leading_cols > 0 + + def close(self) -> None: + # close handles opened by C parser + try: + self._reader.close() + except ValueError: + pass + + def _set_noconvert_columns(self) -> None: + """ + Set the columns that should not undergo dtype conversions. + + Currently, any column that is involved with date parsing will not + undergo such conversions. + """ + assert self.orig_names is not None + # error: Cannot determine type of 'names' + + # much faster than using orig_names.index(x) xref GH#44106 + names_dict = {x: i for i, x in enumerate(self.orig_names)} + col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type] + # error: Cannot determine type of 'names' + noconvert_columns = self._set_noconvert_dtype_columns( + col_indices, + self.names, # type: ignore[has-type] + ) + for col in noconvert_columns: + self._reader.set_noconvert(col) + + def read( + self, + nrows: int | None = None, + ) -> tuple[ + Index | MultiIndex | None, + Sequence[Hashable] | MultiIndex, + Mapping[Hashable, ArrayLike], + ]: + index: Index | MultiIndex | None + column_names: Sequence[Hashable] | MultiIndex + try: + if self.low_memory: + chunks = self._reader.read_low_memory(nrows) + # destructive to chunks + data = _concatenate_chunks(chunks) + + else: + data = self._reader.read(nrows) + except StopIteration: + if self._first_chunk: + self._first_chunk = False + names = dedup_names( + self.orig_names, + is_potential_multi_index(self.orig_names, self.index_col), + ) + index, columns, col_dict = self._get_empty_meta( + names, + dtype=self.dtype, + ) + columns = self._maybe_make_multi_index_columns(columns, self.col_names) + + if self.usecols is not None: + columns = self._filter_usecols(columns) + + col_dict = {k: v for k, v in col_dict.items() if k in columns} + + return index, columns, col_dict + + else: + self.close() + raise + + # Done with first read, next time raise StopIteration + self._first_chunk = False + + # error: Cannot determine type of 'names' + names = self.names # type: ignore[has-type] + + if self._reader.leading_cols: + if self._has_complex_date_col: + raise NotImplementedError("file structure not yet supported") + + # implicit index, no index names + arrays = [] + + if self.index_col and self._reader.leading_cols != len(self.index_col): + raise ParserError( + "Could not construct index. Requested to use " + f"{len(self.index_col)} number of columns, but " + f"{self._reader.leading_cols} left to parse." + ) + + for i in range(self._reader.leading_cols): + if self.index_col is None: + values = data.pop(i) + else: + values = data.pop(self.index_col[i]) + + values = self._maybe_parse_dates(values, i, try_parse_dates=True) + arrays.append(values) + + index = ensure_index_from_sequences(arrays) + + if self.usecols is not None: + names = self._filter_usecols(names) + + names = dedup_names(names, is_potential_multi_index(names, self.index_col)) + + # rename dict keys + data_tups = sorted(data.items()) + data = {k: v for k, (i, v) in zip(names, data_tups)} + + column_names, date_data = self._do_date_conversions(names, data) + + # maybe create a mi on the columns + column_names = self._maybe_make_multi_index_columns( + column_names, self.col_names + ) + + else: + # rename dict keys + data_tups = sorted(data.items()) + + # ugh, mutation + + # assert for mypy, orig_names is List or None, None would error in list(...) + assert self.orig_names is not None + names = list(self.orig_names) + names = dedup_names(names, is_potential_multi_index(names, self.index_col)) + + if self.usecols is not None: + names = self._filter_usecols(names) + + # columns as list + alldata = [x[1] for x in data_tups] + if self.usecols is None: + self._check_data_length(names, alldata) + + data = {k: v for k, (i, v) in zip(names, data_tups)} + + names, date_data = self._do_date_conversions(names, data) + index, column_names = self._make_index(date_data, alldata, names) + + return index, column_names, date_data + + def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]: + # hackish + usecols = self._evaluate_usecols(self.usecols, names) + if usecols is not None and len(names) != len(usecols): + names = [ + name for i, name in enumerate(names) if i in usecols or name in usecols + ] + return names + + def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True): + if try_parse_dates and self._should_parse_dates(index): + values = self._date_conv( + values, + col=self.index_names[index] if self.index_names is not None else None, + ) + return values + + +def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: + """ + Concatenate chunks of data read with low_memory=True. + + The tricky part is handling Categoricals, where different chunks + may have different inferred categories. + """ + names = list(chunks[0].keys()) + warning_columns = [] + + result: dict = {} + for name in names: + arrs = [chunk.pop(name) for chunk in chunks] + # Check each arr for consistent types. + dtypes = {a.dtype for a in arrs} + non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)} + + dtype = dtypes.pop() + if isinstance(dtype, CategoricalDtype): + result[name] = union_categoricals(arrs, sort_categories=False) + else: + result[name] = concat_compat(arrs) + if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object): + warning_columns.append(str(name)) + + if warning_columns: + warning_names = ",".join(warning_columns) + warning_message = " ".join( + [ + f"Columns ({warning_names}) have mixed types. " + f"Specify dtype option on import or set low_memory=False." + ] + ) + warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) + return result + + +def ensure_dtype_objs( + dtype: DtypeArg | dict[Hashable, DtypeArg] | None +) -> DtypeObj | dict[Hashable, DtypeObj] | None: + """ + Ensure we have either None, a dtype object, or a dictionary mapping to + dtype objects. + """ + if isinstance(dtype, defaultdict): + # "None" not callable [misc] + default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc] + dtype_converted: defaultdict = defaultdict(lambda: default_dtype) + for key in dtype.keys(): + dtype_converted[key] = pandas_dtype(dtype[key]) + return dtype_converted + elif isinstance(dtype, dict): + return {k: pandas_dtype(dtype[k]) for k in dtype} + elif dtype is not None: + return pandas_dtype(dtype) + return dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py new file mode 100644 index 00000000..6846ea2b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/python_parser.py @@ -0,0 +1,1382 @@ +from __future__ import annotations + +from collections import ( + abc, + defaultdict, +) +from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, +) +import csv +from io import StringIO +import re +import sys +from typing import ( + IO, + TYPE_CHECKING, + DefaultDict, + Literal, + cast, +) + +import numpy as np + +from pandas._libs import lib +from pandas.errors import ( + EmptyDataError, + ParserError, +) +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer, + is_numeric_dtype, +) +from pandas.core.dtypes.inference import is_dict_like + +from pandas.io.common import ( + dedup_names, + is_potential_multi_index, +) +from pandas.io.parsers.base_parser import ( + ParserBase, + parser_defaults, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + ReadCsvBuffer, + Scalar, + ) + + from pandas import ( + Index, + MultiIndex, + ) + +# BOM character (byte order mark) +# This exists at the beginning of a file to indicate endianness +# of a file (stream). Unfortunately, this marker screws up parsing, +# so we need to remove it if we see it. +_BOM = "\ufeff" + + +class PythonParser(ParserBase): + _no_thousands_columns: set[int] + + def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None: + """ + Workhorse function for processing nested list into DataFrame + """ + super().__init__(kwds) + + self.data: Iterator[str] | None = None + self.buf: list = [] + self.pos = 0 + self.line_pos = 0 + + self.skiprows = kwds["skiprows"] + + if callable(self.skiprows): + self.skipfunc = self.skiprows + else: + self.skipfunc = lambda x: x in self.skiprows + + self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"]) + self.delimiter = kwds["delimiter"] + + self.quotechar = kwds["quotechar"] + if isinstance(self.quotechar, str): + self.quotechar = str(self.quotechar) + + self.escapechar = kwds["escapechar"] + self.doublequote = kwds["doublequote"] + self.skipinitialspace = kwds["skipinitialspace"] + self.lineterminator = kwds["lineterminator"] + self.quoting = kwds["quoting"] + self.skip_blank_lines = kwds["skip_blank_lines"] + + self.has_index_names = False + if "has_index_names" in kwds: + self.has_index_names = kwds["has_index_names"] + + self.verbose = kwds["verbose"] + + self.thousands = kwds["thousands"] + self.decimal = kwds["decimal"] + + self.comment = kwds["comment"] + + # Set self.data to something that can read lines. + if isinstance(f, list): + # read_excel: f is a list + self.data = cast(Iterator[str], f) + else: + assert hasattr(f, "readline") + self.data = self._make_reader(f) + + # Get columns in two steps: infer from data, then + # infer column indices from self.usecols if it is specified. + self._col_indices: list[int] | None = None + columns: list[list[Scalar | None]] + ( + columns, + self.num_original_columns, + self.unnamed_cols, + ) = self._infer_columns() + + # Now self.columns has the set of columns that we will process. + # The original set is stored in self.original_columns. + # error: Cannot determine type of 'index_names' + ( + self.columns, + self.index_names, + self.col_names, + _, + ) = self._extract_multi_indexer_columns( + columns, + self.index_names, # type: ignore[has-type] + ) + + # get popped off for index + self.orig_names: list[Hashable] = list(self.columns) + + # needs to be cleaned/refactored + # multiple date column thing turning into a real spaghetti factory + + if not self._has_complex_date_col: + (index_names, self.orig_names, self.columns) = self._get_index_name() + self._name_processed = True + if self.index_names is None: + self.index_names = index_names + + if self._col_indices is None: + self._col_indices = list(range(len(self.columns))) + + self._parse_date_cols = self._validate_parse_dates_presence(self.columns) + self._no_thousands_columns = self._set_no_thousand_columns() + + if len(self.decimal) != 1: + raise ValueError("Only length-1 decimal markers supported") + + @cache_readonly + def num(self) -> re.Pattern: + decimal = re.escape(self.decimal) + if self.thousands is None: + regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$" + else: + thousands = re.escape(self.thousands) + regex = ( + rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?" + rf"([0-9]?(E|e)\-?[0-9]+)?$" + ) + return re.compile(regex) + + def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]): + sep = self.delimiter + + if sep is None or len(sep) == 1: + if self.lineterminator: + raise ValueError( + "Custom line terminators not supported in python parser (yet)" + ) + + class MyDialect(csv.Dialect): + delimiter = self.delimiter + quotechar = self.quotechar + escapechar = self.escapechar + doublequote = self.doublequote + skipinitialspace = self.skipinitialspace + quoting = self.quoting + lineterminator = "\n" + + dia = MyDialect + + if sep is not None: + dia.delimiter = sep + else: + # attempt to sniff the delimiter from the first valid line, + # i.e. no comment line and not in skiprows + line = f.readline() + lines = self._check_comments([[line]])[0] + while self.skipfunc(self.pos) or not lines: + self.pos += 1 + line = f.readline() + lines = self._check_comments([[line]])[0] + lines_str = cast(list[str], lines) + + # since `line` was a string, lines will be a list containing + # only a single string + line = lines_str[0] + + self.pos += 1 + self.line_pos += 1 + sniffed = csv.Sniffer().sniff(line) + dia.delimiter = sniffed.delimiter + + # Note: encoding is irrelevant here + line_rdr = csv.reader(StringIO(line), dialect=dia) + self.buf.extend(list(line_rdr)) + + # Note: encoding is irrelevant here + reader = csv.reader(f, dialect=dia, strict=True) + + else: + + def _read(): + line = f.readline() + pat = re.compile(sep) + + yield pat.split(line.strip()) + + for line in f: + yield pat.split(line.strip()) + + reader = _read() + + return reader + + def read( + self, rows: int | None = None + ) -> tuple[ + Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike] + ]: + try: + content = self._get_lines(rows) + except StopIteration: + if self._first_chunk: + content = [] + else: + self.close() + raise + + # done with first read, next time raise StopIteration + self._first_chunk = False + + columns: Sequence[Hashable] = list(self.orig_names) + if not len(content): # pragma: no cover + # DataFrame with the right metadata, even though it's length 0 + # error: Cannot determine type of 'index_col' + names = dedup_names( + self.orig_names, + is_potential_multi_index( + self.orig_names, + self.index_col, # type: ignore[has-type] + ), + ) + index, columns, col_dict = self._get_empty_meta( + names, + self.dtype, + ) + conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names) + return index, conv_columns, col_dict + + # handle new style for names in index + count_empty_content_vals = count_empty_vals(content[0]) + indexnamerow = None + if self.has_index_names and count_empty_content_vals == len(columns): + indexnamerow = content[0] + content = content[1:] + + alldata = self._rows_to_cols(content) + data, columns = self._exclude_implicit_index(alldata) + + conv_data = self._convert_data(data) + columns, conv_data = self._do_date_conversions(columns, conv_data) + + index, result_columns = self._make_index( + conv_data, alldata, columns, indexnamerow + ) + + return index, result_columns, conv_data + + def _exclude_implicit_index( + self, + alldata: list[np.ndarray], + ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]: + # error: Cannot determine type of 'index_col' + names = dedup_names( + self.orig_names, + is_potential_multi_index( + self.orig_names, + self.index_col, # type: ignore[has-type] + ), + ) + + offset = 0 + if self._implicit_index: + # error: Cannot determine type of 'index_col' + offset = len(self.index_col) # type: ignore[has-type] + + len_alldata = len(alldata) + self._check_data_length(names, alldata) + + return { + name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata + }, names + + # legacy + def get_chunk( + self, size: int | None = None + ) -> tuple[ + Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike] + ]: + if size is None: + # error: "PythonParser" has no attribute "chunksize" + size = self.chunksize # type: ignore[attr-defined] + return self.read(rows=size) + + def _convert_data( + self, + data: Mapping[Hashable, np.ndarray], + ) -> Mapping[Hashable, ArrayLike]: + # apply converters + clean_conv = self._clean_mapping(self.converters) + clean_dtypes = self._clean_mapping(self.dtype) + + # Apply NA values. + clean_na_values = {} + clean_na_fvalues = {} + + if isinstance(self.na_values, dict): + for col in self.na_values: + na_value = self.na_values[col] + na_fvalue = self.na_fvalues[col] + + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] + + clean_na_values[col] = na_value + clean_na_fvalues[col] = na_fvalue + else: + clean_na_values = self.na_values + clean_na_fvalues = self.na_fvalues + + return self._convert_to_ndarrays( + data, + clean_na_values, + clean_na_fvalues, + self.verbose, + clean_conv, + clean_dtypes, + ) + + @cache_readonly + def _have_mi_columns(self) -> bool: + if self.header is None: + return False + + header = self.header + if isinstance(header, (list, tuple, np.ndarray)): + return len(header) > 1 + else: + return False + + def _infer_columns( + self, + ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]: + names = self.names + num_original_columns = 0 + clear_buffer = True + unnamed_cols: set[Scalar | None] = set() + + if self.header is not None: + header = self.header + have_mi_columns = self._have_mi_columns + + if isinstance(header, (list, tuple, np.ndarray)): + # we have a mi columns, so read an extra line + if have_mi_columns: + header = list(header) + [header[-1] + 1] + else: + header = [header] + + columns: list[list[Scalar | None]] = [] + for level, hr in enumerate(header): + try: + line = self._buffered_line() + + while self.line_pos <= hr: + line = self._next_line() + + except StopIteration as err: + if 0 < self.line_pos <= hr and ( + not have_mi_columns or hr != header[-1] + ): + # If no rows we want to raise a different message and if + # we have mi columns, the last line is not part of the header + joi = list(map(str, header[:-1] if have_mi_columns else header)) + msg = f"[{','.join(joi)}], len of {len(joi)}, " + raise ValueError( + f"Passed header={msg}" + f"but only {self.line_pos} lines in file" + ) from err + + # We have an empty file, so check + # if columns are provided. That will + # serve as the 'line' for parsing + if have_mi_columns and hr > 0: + if clear_buffer: + self._clear_buffer() + columns.append([None] * len(columns[-1])) + return columns, num_original_columns, unnamed_cols + + if not self.names: + raise EmptyDataError("No columns to parse from file") from err + + line = self.names[:] + + this_columns: list[Scalar | None] = [] + this_unnamed_cols = [] + + for i, c in enumerate(line): + if c == "": + if have_mi_columns: + col_name = f"Unnamed: {i}_level_{level}" + else: + col_name = f"Unnamed: {i}" + + this_unnamed_cols.append(i) + this_columns.append(col_name) + else: + this_columns.append(c) + + if not have_mi_columns: + counts: DefaultDict = defaultdict(int) + # Ensure that regular columns are used before unnamed ones + # to keep given names and mangle unnamed columns + col_loop_order = [ + i + for i in range(len(this_columns)) + if i not in this_unnamed_cols + ] + this_unnamed_cols + + # TODO: Use pandas.io.common.dedup_names instead (see #50371) + for i in col_loop_order: + col = this_columns[i] + old_col = col + cur_count = counts[col] + + if cur_count > 0: + while cur_count > 0: + counts[old_col] = cur_count + 1 + col = f"{old_col}.{cur_count}" + if col in this_columns: + cur_count += 1 + else: + cur_count = counts[col] + + if ( + self.dtype is not None + and is_dict_like(self.dtype) + and self.dtype.get(old_col) is not None + and self.dtype.get(col) is None + ): + self.dtype.update({col: self.dtype.get(old_col)}) + this_columns[i] = col + counts[col] = cur_count + 1 + elif have_mi_columns: + # if we have grabbed an extra line, but its not in our + # format so save in the buffer, and create an blank extra + # line for the rest of the parsing code + if hr == header[-1]: + lc = len(this_columns) + # error: Cannot determine type of 'index_col' + sic = self.index_col # type: ignore[has-type] + ic = len(sic) if sic is not None else 0 + unnamed_count = len(this_unnamed_cols) + + # if wrong number of blanks or no index, not our format + if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0: + clear_buffer = False + this_columns = [None] * lc + self.buf = [self.buf[-1]] + + columns.append(this_columns) + unnamed_cols.update({this_columns[i] for i in this_unnamed_cols}) + + if len(columns) == 1: + num_original_columns = len(this_columns) + + if clear_buffer: + self._clear_buffer() + + first_line: list[Scalar] | None + if names is not None: + # Read first row after header to check if data are longer + try: + first_line = self._next_line() + except StopIteration: + first_line = None + + len_first_data_row = 0 if first_line is None else len(first_line) + + if len(names) > len(columns[0]) and len(names) > len_first_data_row: + raise ValueError( + "Number of passed names did not match " + "number of header fields in the file" + ) + if len(columns) > 1: + raise TypeError("Cannot pass names with multi-index columns") + + if self.usecols is not None: + # Set _use_cols. We don't store columns because they are + # overwritten. + self._handle_usecols(columns, names, num_original_columns) + else: + num_original_columns = len(names) + if self._col_indices is not None and len(names) != len( + self._col_indices + ): + columns = [[names[i] for i in sorted(self._col_indices)]] + else: + columns = [names] + else: + columns = self._handle_usecols( + columns, columns[0], num_original_columns + ) + else: + ncols = len(self._header_line) + num_original_columns = ncols + + if not names: + columns = [list(range(ncols))] + columns = self._handle_usecols(columns, columns[0], ncols) + elif self.usecols is None or len(names) >= ncols: + columns = self._handle_usecols([names], names, ncols) + num_original_columns = len(names) + elif not callable(self.usecols) and len(names) != len(self.usecols): + raise ValueError( + "Number of passed names did not match number of " + "header fields in the file" + ) + else: + # Ignore output but set used columns. + columns = [names] + self._handle_usecols(columns, columns[0], ncols) + + return columns, num_original_columns, unnamed_cols + + @cache_readonly + def _header_line(self): + # Store line for reuse in _get_index_name + if self.header is not None: + return None + + try: + line = self._buffered_line() + except StopIteration as err: + if not self.names: + raise EmptyDataError("No columns to parse from file") from err + + line = self.names[:] + return line + + def _handle_usecols( + self, + columns: list[list[Scalar | None]], + usecols_key: list[Scalar | None], + num_original_columns: int, + ) -> list[list[Scalar | None]]: + """ + Sets self._col_indices + + usecols_key is used if there are string usecols. + """ + col_indices: set[int] | list[int] + if self.usecols is not None: + if callable(self.usecols): + col_indices = self._evaluate_usecols(self.usecols, usecols_key) + elif any(isinstance(u, str) for u in self.usecols): + if len(columns) > 1: + raise ValueError( + "If using multiple headers, usecols must be integers." + ) + col_indices = [] + + for col in self.usecols: + if isinstance(col, str): + try: + col_indices.append(usecols_key.index(col)) + except ValueError: + self._validate_usecols_names(self.usecols, usecols_key) + else: + col_indices.append(col) + else: + missing_usecols = [ + col for col in self.usecols if col >= num_original_columns + ] + if missing_usecols: + raise ParserError( + "Defining usecols without of bounds indices is not allowed. " + f"{missing_usecols} are out of bounds.", + ) + col_indices = self.usecols + + columns = [ + [n for i, n in enumerate(column) if i in col_indices] + for column in columns + ] + self._col_indices = sorted(col_indices) + return columns + + def _buffered_line(self) -> list[Scalar]: + """ + Return a line from buffer, filling buffer if required. + """ + if len(self.buf) > 0: + return self.buf[0] + else: + return self._next_line() + + def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]: + """ + Checks whether the file begins with the BOM character. + If it does, remove it. In addition, if there is quoting + in the field subsequent to the BOM, remove it as well + because it technically takes place at the beginning of + the name, not the middle of it. + """ + # first_row will be a list, so we need to check + # that that list is not empty before proceeding. + if not first_row: + return first_row + + # The first element of this row is the one that could have the + # BOM that we want to remove. Check that the first element is a + # string before proceeding. + if not isinstance(first_row[0], str): + return first_row + + # Check that the string is not empty, as that would + # obviously not have a BOM at the start of it. + if not first_row[0]: + return first_row + + # Since the string is non-empty, check that it does + # in fact begin with a BOM. + first_elt = first_row[0][0] + if first_elt != _BOM: + return first_row + + first_row_bom = first_row[0] + new_row: str + + if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar: + start = 2 + quote = first_row_bom[1] + end = first_row_bom[2:].index(quote) + 2 + + # Extract the data between the quotation marks + new_row = first_row_bom[start:end] + + # Extract any remaining data after the second + # quotation mark. + if len(first_row_bom) > end + 1: + new_row += first_row_bom[end + 1 :] + + else: + # No quotation so just remove BOM from first element + new_row = first_row_bom[1:] + + new_row_list: list[Scalar] = [new_row] + return new_row_list + first_row[1:] + + def _is_line_empty(self, line: list[Scalar]) -> bool: + """ + Check if a line is empty or not. + + Parameters + ---------- + line : str, array-like + The line of data to check. + + Returns + ------- + boolean : Whether or not the line is empty. + """ + return not line or all(not x for x in line) + + def _next_line(self) -> list[Scalar]: + if isinstance(self.data, list): + while self.skipfunc(self.pos): + if self.pos >= len(self.data): + break + self.pos += 1 + + while True: + try: + line = self._check_comments([self.data[self.pos]])[0] + self.pos += 1 + # either uncommented or blank to begin with + if not self.skip_blank_lines and ( + self._is_line_empty(self.data[self.pos - 1]) or line + ): + break + if self.skip_blank_lines: + ret = self._remove_empty_lines([line]) + if ret: + line = ret[0] + break + except IndexError: + raise StopIteration + else: + while self.skipfunc(self.pos): + self.pos += 1 + # assert for mypy, data is Iterator[str] or None, would error in next + assert self.data is not None + next(self.data) + + while True: + orig_line = self._next_iter_line(row_num=self.pos + 1) + self.pos += 1 + + if orig_line is not None: + line = self._check_comments([orig_line])[0] + + if self.skip_blank_lines: + ret = self._remove_empty_lines([line]) + + if ret: + line = ret[0] + break + elif self._is_line_empty(orig_line) or line: + break + + # This was the first line of the file, + # which could contain the BOM at the + # beginning of it. + if self.pos == 1: + line = self._check_for_bom(line) + + self.line_pos += 1 + self.buf.append(line) + return line + + def _alert_malformed(self, msg: str, row_num: int) -> None: + """ + Alert a user about a malformed row, depending on value of + `self.on_bad_lines` enum. + + If `self.on_bad_lines` is ERROR, the alert will be `ParserError`. + If `self.on_bad_lines` is WARN, the alert will be printed out. + + Parameters + ---------- + msg: str + The error message to display. + row_num: int + The row number where the parsing error occurred. + Because this row number is displayed, we 1-index, + even though we 0-index internally. + """ + if self.on_bad_lines == self.BadLineHandleMethod.ERROR: + raise ParserError(msg) + if self.on_bad_lines == self.BadLineHandleMethod.WARN: + base = f"Skipping line {row_num}: " + sys.stderr.write(base + msg + "\n") + + def _next_iter_line(self, row_num: int) -> list[Scalar] | None: + """ + Wrapper around iterating through `self.data` (CSV source). + + When a CSV error is raised, we check for specific + error messages that allow us to customize the + error message displayed to the user. + + Parameters + ---------- + row_num: int + The row number of the line being parsed. + """ + try: + # assert for mypy, data is Iterator[str] or None, would error in next + assert self.data is not None + line = next(self.data) + # for mypy + assert isinstance(line, list) + return line + except csv.Error as e: + if self.on_bad_lines in ( + self.BadLineHandleMethod.ERROR, + self.BadLineHandleMethod.WARN, + ): + msg = str(e) + + if "NULL byte" in msg or "line contains NUL" in msg: + msg = ( + "NULL byte detected. This byte " + "cannot be processed in Python's " + "native csv library at the moment, " + "so please pass in engine='c' instead" + ) + + if self.skipfooter > 0: + reason = ( + "Error could possibly be due to " + "parsing errors in the skipped footer rows " + "(the skipfooter keyword is only applied " + "after Python's csv library has parsed " + "all rows)." + ) + msg += ". " + reason + + self._alert_malformed(msg, row_num) + return None + + def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.comment is None: + return lines + ret = [] + for line in lines: + rl = [] + for x in line: + if ( + not isinstance(x, str) + or self.comment not in x + or x in self.na_values + ): + rl.append(x) + else: + x = x[: x.find(self.comment)] + if len(x) > 0: + rl.append(x) + break + ret.append(rl) + return ret + + def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + """ + Iterate through the lines and remove any that are + either empty or contain only one whitespace value + + Parameters + ---------- + lines : list of list of Scalars + The array of lines that we are to filter. + + Returns + ------- + filtered_lines : list of list of Scalars + The same array of lines with the "empty" ones removed. + """ + # Remove empty lines and lines with only one whitespace value + ret = [ + line + for line in lines + if ( + len(line) > 1 + or len(line) == 1 + and (not isinstance(line[0], str) or line[0].strip()) + ) + ] + return ret + + def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.thousands is None: + return lines + + return self._search_replace_num_columns( + lines=lines, search=self.thousands, replace="" + ) + + def _search_replace_num_columns( + self, lines: list[list[Scalar]], search: str, replace: str + ) -> list[list[Scalar]]: + ret = [] + for line in lines: + rl = [] + for i, x in enumerate(line): + if ( + not isinstance(x, str) + or search not in x + or i in self._no_thousands_columns + or not self.num.search(x.strip()) + ): + rl.append(x) + else: + rl.append(x.replace(search, replace)) + ret.append(rl) + return ret + + def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.decimal == parser_defaults["decimal"]: + return lines + + return self._search_replace_num_columns( + lines=lines, search=self.decimal, replace="." + ) + + def _clear_buffer(self) -> None: + self.buf = [] + + def _get_index_name( + self, + ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]: + """ + Try several cases to get lines: + + 0) There are headers on row 0 and row 1 and their + total summed lengths equals the length of the next line. + Treat row 0 as columns and row 1 as indices + 1) Look for implicit index: there are more columns + on row 1 than row 0. If this is true, assume that row + 1 lists index columns and row 0 lists normal columns. + 2) Get index from the columns if it was listed. + """ + columns: Sequence[Hashable] = self.orig_names + orig_names = list(columns) + columns = list(columns) + + line: list[Scalar] | None + if self._header_line is not None: + line = self._header_line + else: + try: + line = self._next_line() + except StopIteration: + line = None + + next_line: list[Scalar] | None + try: + next_line = self._next_line() + except StopIteration: + next_line = None + + # implicitly index_col=0 b/c 1 fewer column names + implicit_first_cols = 0 + if line is not None: + # leave it 0, #2442 + # Case 1 + # error: Cannot determine type of 'index_col' + index_col = self.index_col # type: ignore[has-type] + if index_col is not False: + implicit_first_cols = len(line) - self.num_original_columns + + # Case 0 + if ( + next_line is not None + and self.header is not None + and index_col is not False + ): + if len(next_line) == len(line) + self.num_original_columns: + # column and index names on diff rows + self.index_col = list(range(len(line))) + self.buf = self.buf[1:] + + for c in reversed(line): + columns.insert(0, c) + + # Update list of original names to include all indices. + orig_names = list(columns) + self.num_original_columns = len(columns) + return line, orig_names, columns + + if implicit_first_cols > 0: + # Case 1 + self._implicit_index = True + if self.index_col is None: + self.index_col = list(range(implicit_first_cols)) + + index_name = None + + else: + # Case 2 + (index_name, _, self.index_col) = self._clean_index_names( + columns, self.index_col + ) + + return index_name, orig_names, columns + + def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]: + col_len = self.num_original_columns + + if self._implicit_index: + col_len += len(self.index_col) + + max_len = max(len(row) for row in content) + + # Check that there are no rows with too many + # elements in their row (rows with too few + # elements are padded with NaN). + # error: Non-overlapping identity check (left operand type: "List[int]", + # right operand type: "Literal[False]") + if ( + max_len > col_len + and self.index_col is not False # type: ignore[comparison-overlap] + and self.usecols is None + ): + footers = self.skipfooter if self.skipfooter else 0 + bad_lines = [] + + iter_content = enumerate(content) + content_len = len(content) + content = [] + + for i, _content in iter_content: + actual_len = len(_content) + + if actual_len > col_len: + if callable(self.on_bad_lines): + new_l = self.on_bad_lines(_content) + if new_l is not None: + content.append(new_l) + elif self.on_bad_lines in ( + self.BadLineHandleMethod.ERROR, + self.BadLineHandleMethod.WARN, + ): + row_num = self.pos - (content_len - i + footers) + bad_lines.append((row_num, actual_len)) + + if self.on_bad_lines == self.BadLineHandleMethod.ERROR: + break + else: + content.append(_content) + + for row_num, actual_len in bad_lines: + msg = ( + f"Expected {col_len} fields in line {row_num + 1}, saw " + f"{actual_len}" + ) + if ( + self.delimiter + and len(self.delimiter) > 1 + and self.quoting != csv.QUOTE_NONE + ): + # see gh-13374 + reason = ( + "Error could possibly be due to quotes being " + "ignored when a multi-char delimiter is used." + ) + msg += ". " + reason + + self._alert_malformed(msg, row_num + 1) + + # see gh-13320 + zipped_content = list(lib.to_object_array(content, min_width=col_len).T) + + if self.usecols: + assert self._col_indices is not None + col_indices = self._col_indices + + if self._implicit_index: + zipped_content = [ + a + for i, a in enumerate(zipped_content) + if ( + i < len(self.index_col) + or i - len(self.index_col) in col_indices + ) + ] + else: + zipped_content = [ + a for i, a in enumerate(zipped_content) if i in col_indices + ] + return zipped_content + + def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]: + lines = self.buf + new_rows = None + + # already fetched some number + if rows is not None: + # we already have the lines in the buffer + if len(self.buf) >= rows: + new_rows, self.buf = self.buf[:rows], self.buf[rows:] + + # need some lines + else: + rows -= len(self.buf) + + if new_rows is None: + if isinstance(self.data, list): + if self.pos > len(self.data): + raise StopIteration + if rows is None: + new_rows = self.data[self.pos :] + new_pos = len(self.data) + else: + new_rows = self.data[self.pos : self.pos + rows] + new_pos = self.pos + rows + + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + self.pos = new_pos + + else: + new_rows = [] + try: + if rows is not None: + rows_to_skip = 0 + if self.skiprows is not None and self.pos is not None: + # Only read additional rows if pos is in skiprows + rows_to_skip = len( + set(self.skiprows) - set(range(self.pos)) + ) + + for _ in range(rows + rows_to_skip): + # assert for mypy, data is Iterator[str] or None, would + # error in next + assert self.data is not None + new_rows.append(next(self.data)) + + len_new_rows = len(new_rows) + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + else: + rows = 0 + + while True: + new_row = self._next_iter_line(row_num=self.pos + rows + 1) + rows += 1 + + if new_row is not None: + new_rows.append(new_row) + len_new_rows = len(new_rows) + + except StopIteration: + len_new_rows = len(new_rows) + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + if len(lines) == 0: + raise + self.pos += len_new_rows + + self.buf = [] + else: + lines = new_rows + + if self.skipfooter: + lines = lines[: -self.skipfooter] + + lines = self._check_comments(lines) + if self.skip_blank_lines: + lines = self._remove_empty_lines(lines) + lines = self._check_thousands(lines) + return self._check_decimal(lines) + + def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]: + if self.skiprows: + return [ + row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos) + ] + return new_rows + + def _set_no_thousand_columns(self) -> set[int]: + no_thousands_columns: set[int] = set() + if self.columns and self.parse_dates: + assert self._col_indices is not None + no_thousands_columns = self._set_noconvert_dtype_columns( + self._col_indices, self.columns + ) + if self.columns and self.dtype: + assert self._col_indices is not None + for i, col in zip(self._col_indices, self.columns): + if not isinstance(self.dtype, dict) and not is_numeric_dtype( + self.dtype + ): + no_thousands_columns.add(i) + if ( + isinstance(self.dtype, dict) + and col in self.dtype + and ( + not is_numeric_dtype(self.dtype[col]) + or is_bool_dtype(self.dtype[col]) + ) + ): + no_thousands_columns.add(i) + return no_thousands_columns + + +class FixedWidthReader(abc.Iterator): + """ + A reader of fixed-width lines. + """ + + def __init__( + self, + f: IO[str] | ReadCsvBuffer[str], + colspecs: list[tuple[int, int]] | Literal["infer"], + delimiter: str | None, + comment: str | None, + skiprows: set[int] | None = None, + infer_nrows: int = 100, + ) -> None: + self.f = f + self.buffer: Iterator | None = None + self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t " + self.comment = comment + if colspecs == "infer": + self.colspecs = self.detect_colspecs( + infer_nrows=infer_nrows, skiprows=skiprows + ) + else: + self.colspecs = colspecs + + if not isinstance(self.colspecs, (tuple, list)): + raise TypeError( + "column specifications must be a list or tuple, " + f"input was a {type(colspecs).__name__}" + ) + + for colspec in self.colspecs: + if not ( + isinstance(colspec, (tuple, list)) + and len(colspec) == 2 + and isinstance(colspec[0], (int, np.integer, type(None))) + and isinstance(colspec[1], (int, np.integer, type(None))) + ): + raise TypeError( + "Each column specification must be " + "2 element tuple or list of integers" + ) + + def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]: + """ + Read rows from self.f, skipping as specified. + + We distinguish buffer_rows (the first <= infer_nrows + lines) from the rows returned to detect_colspecs + because it's simpler to leave the other locations + with skiprows logic alone than to modify them to + deal with the fact we skipped some rows here as + well. + + Parameters + ---------- + infer_nrows : int + Number of rows to read from self.f, not counting + rows that are skipped. + skiprows: set, optional + Indices of rows to skip. + + Returns + ------- + detect_rows : list of str + A list containing the rows to read. + + """ + if skiprows is None: + skiprows = set() + buffer_rows = [] + detect_rows = [] + for i, row in enumerate(self.f): + if i not in skiprows: + detect_rows.append(row) + buffer_rows.append(row) + if len(detect_rows) >= infer_nrows: + break + self.buffer = iter(buffer_rows) + return detect_rows + + def detect_colspecs( + self, infer_nrows: int = 100, skiprows: set[int] | None = None + ) -> list[tuple[int, int]]: + # Regex escape the delimiters + delimiters = "".join([rf"\{x}" for x in self.delimiter]) + pattern = re.compile(f"([^{delimiters}]+)") + rows = self.get_rows(infer_nrows, skiprows) + if not rows: + raise EmptyDataError("No rows from which to infer column width") + max_len = max(map(len, rows)) + mask = np.zeros(max_len + 1, dtype=int) + if self.comment is not None: + rows = [row.partition(self.comment)[0] for row in rows] + for row in rows: + for m in pattern.finditer(row): + mask[m.start() : m.end()] = 1 + shifted = np.roll(mask, 1) + shifted[0] = 0 + edges = np.where((mask ^ shifted) == 1)[0] + edge_pairs = list(zip(edges[::2], edges[1::2])) + return edge_pairs + + def __next__(self) -> list[str]: + # Argument 1 to "next" has incompatible type "Union[IO[str], + # ReadCsvBuffer[str]]"; expected "SupportsNext[str]" + if self.buffer is not None: + try: + line = next(self.buffer) + except StopIteration: + self.buffer = None + line = next(self.f) # type: ignore[arg-type] + else: + line = next(self.f) # type: ignore[arg-type] + # Note: 'colspecs' is a sequence of half-open intervals. + return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs] + + +class FixedWidthFieldParser(PythonParser): + """ + Specialization that Converts fixed-width fields into DataFrames. + See PythonParser for details. + """ + + def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None: + # Support iterators, convert to a list. + self.colspecs = kwds.pop("colspecs") + self.infer_nrows = kwds.pop("infer_nrows") + PythonParser.__init__(self, f, **kwds) + + def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader: + return FixedWidthReader( + f, + self.colspecs, + self.delimiter, + self.comment, + self.skiprows, + self.infer_nrows, + ) + + def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + """ + Returns the list of lines without the empty ones. With fixed-width + fields, empty lines become arrays of empty strings. + + See PythonParser._remove_empty_lines. + """ + return [ + line + for line in lines + if any(not isinstance(e, str) or e.strip() for e in line) + ] + + +def count_empty_vals(vals) -> int: + return sum(1 for v in vals if v == "" or v is None) + + +def _validate_skipfooter_arg(skipfooter: int) -> int: + """ + Validate the 'skipfooter' parameter. + + Checks whether 'skipfooter' is a non-negative integer. + Raises a ValueError if that is not the case. + + Parameters + ---------- + skipfooter : non-negative integer + The number of rows to skip at the end of the file. + + Returns + ------- + validated_skipfooter : non-negative integer + The original input if the validation succeeds. + + Raises + ------ + ValueError : 'skipfooter' was not a non-negative integer. + """ + if not is_integer(skipfooter): + raise ValueError("skipfooter must be an integer") + + if skipfooter < 0: + raise ValueError("skipfooter cannot be negative") + + # Incompatible return value type (got "Union[int, integer[Any]]", expected "int") + return skipfooter # type: ignore[return-value] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/readers.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/readers.py new file mode 100644 index 00000000..7fad2b77 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/parsers/readers.py @@ -0,0 +1,2171 @@ +""" +Module contains tools for processing files into DataFrames or other objects + +GH#48849 provides a convenient way of deprecating keyword arguments +""" +from __future__ import annotations + +from collections import abc +import csv +import sys +from textwrap import fill +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Literal, + NamedTuple, + TypedDict, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.parsers import STR_NA_VALUES +from pandas.errors import ( + AbstractMethodError, + ParserWarning, +) +from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + is_file_like, + is_float, + is_integer, + is_list_like, +) + +from pandas.core.frame import DataFrame +from pandas.core.indexes.api import RangeIndex +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + IOHandles, + get_handle, + stringify_path, + validate_header_arg, +) +from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper +from pandas.io.parsers.base_parser import ( + ParserBase, + is_index_col, + parser_defaults, +) +from pandas.io.parsers.c_parser_wrapper import CParserWrapper +from pandas.io.parsers.python_parser import ( + FixedWidthFieldParser, + PythonParser, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + Sequence, + ) + from types import TracebackType + + from pandas._typing import ( + CompressionOptions, + CSVEngine, + DtypeArg, + DtypeBackend, + FilePath, + HashableT, + IndexLabel, + ReadCsvBuffer, + StorageOptions, + ) +_doc_read_csv_and_table = ( + r""" +{summary} + +Also supports optionally iterating or breaking of the file +into chunks. + +Additional help can be found in the online docs for +`IO Tools `_. + +Parameters +---------- +filepath_or_buffer : str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is + expected. A local file could be: file://localhost/path/to/table.csv. + + If you want to pass in a path object, pandas accepts any ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, such as + a file handle (e.g. via builtin ``open`` function) or ``StringIO``. +sep : str, default {_default_sep} + Character or regex pattern to treat as the delimiter. If ``sep=None``, the + C engine cannot automatically detect + the separator, but the Python parsing engine can, meaning the latter will + be used and automatically detect the separator from only the first valid + row of the file by Python's builtin sniffer tool, ``csv.Sniffer``. + In addition, separators longer than 1 character and different from + ``'\s+'`` will be interpreted as regular expressions and will also force + the use of the Python parsing engine. Note that regex delimiters are prone + to ignoring quoted data. Regex example: ``'\r\t'``. +delimiter : str, optional + Alias for ``sep``. +header : int, Sequence of int, 'infer' or None, default 'infer' + Row number(s) containing column labels and marking the start of the + data (zero-indexed). Default behavior is to infer the column names: if no ``names`` + are passed the behavior is identical to ``header=0`` and column + names are inferred from the first line of the file, if column + names are passed explicitly to ``names`` then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to + replace existing names. The header can be a list of integers that + specify row locations for a :class:`~pandas.MultiIndex` on the columns + e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be + skipped (e.g. 2 in this example is skipped). Note that this + parameter ignores commented lines and empty lines if + ``skip_blank_lines=True``, so ``header=0`` denotes the first line of + data rather than the first line of the file. +names : Sequence of Hashable, optional + Sequence of column labels to apply. If the file contains a header row, + then you should explicitly pass ``header=0`` to override the column names. + Duplicates in this list are not allowed. +index_col : Hashable, Sequence of Hashable or False, optional + Column(s) to use as row label(s), denoted either by column labels or column + indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex` + will be formed for the row labels. + + Note: ``index_col=False`` can be used to force pandas to *not* use the first + column as the index, e.g., when you have a malformed file with delimiters at + the end of each line. +usecols : list of Hashable or Callable, optional + Subset of columns to select, denoted either by column labels or column indices. + If list-like, all elements must either + be positional (i.e. integer indices into the document columns) or strings + that correspond to column names provided either by the user in ``names`` or + inferred from the document header row(s). If ``names`` are given, the document + header row(s) are not taken into account. For example, a valid list-like + ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. + Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. + To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order + preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` + for columns in ``['foo', 'bar']`` order or + ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` + for ``['bar', 'foo']`` order. + + If callable, the callable function will be evaluated against the column + names, returning names where the callable function evaluates to ``True``. An + example of a valid callable argument would be ``lambda x: x.upper() in + ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster + parsing time and lower memory usage. +dtype : dtype or dict of {{Hashable : dtype}}, optional + Data type(s) to apply to either the whole dataset or individual columns. + E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}`` + Use ``str`` or ``object`` together with suitable ``na_values`` settings + to preserve and not interpret ``dtype``. + If ``converters`` are specified, they will be applied INSTEAD + of ``dtype`` conversion. + + .. versionadded:: 1.5.0 + + Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where + the default determines the ``dtype`` of the columns which are not explicitly + listed. +engine : {{'c', 'python', 'pyarrow'}}, optional + Parser engine to use. The C and pyarrow engines are faster, while the python engine + is currently more feature-complete. Multithreading is currently only supported by + the pyarrow engine. + + .. versionadded:: 1.4.0 + + The 'pyarrow' engine was added as an *experimental* engine, and some features + are unsupported, or may not work correctly, with this engine. +converters : dict of {{Hashable : Callable}}, optional + Functions for converting values in specified columns. Keys can either + be column labels or column indices. +true_values : list, optional + Values to consider as ``True`` in addition to case-insensitive variants of 'True'. +false_values : list, optional + Values to consider as ``False`` in addition to case-insensitive variants of 'False'. +skipinitialspace : bool, default False + Skip spaces after delimiter. +skiprows : int, list of int or Callable, optional + Line numbers to skip (0-indexed) or number of lines to skip (``int``) + at the start of the file. + + If callable, the callable function will be evaluated against the row + indices, returning ``True`` if the row should be skipped and ``False`` otherwise. + An example of a valid callable argument would be ``lambda x: x in [0, 2]``. +skipfooter : int, default 0 + Number of lines at bottom of file to skip (Unsupported with ``engine='c'``). +nrows : int, optional + Number of rows of file to read. Useful for reading pieces of large files. +na_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional + Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific + per-column ``NA`` values. By default the following values are interpreted as + ``NaN``: " """ + + fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ") + + """ ". + +keep_default_na : bool, default True + Whether or not to include the default ``NaN`` values when parsing the data. + Depending on whether ``na_values`` is passed in, the behavior is as follows: + + * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values`` + is appended to the default ``NaN`` values used for parsing. + * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only + the default ``NaN`` values are used for parsing. + * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only + the ``NaN`` values specified ``na_values`` are used for parsing. + * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no + strings will be parsed as ``NaN``. + + Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and + ``na_values`` parameters will be ignored. +na_filter : bool, default True + Detect missing value markers (empty strings and the value of ``na_values``). In + data without any ``NA`` values, passing ``na_filter=False`` can improve the + performance of reading a large file. +verbose : bool, default False + Indicate number of ``NA`` values placed in non-numeric columns. +skip_blank_lines : bool, default True + If ``True``, skip over blank lines rather than interpreting as ``NaN`` values. +parse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \ +default False + The behavior is as follows: + + * ``bool``. If ``True`` -> try parsing the index. + * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3 + each as a separate date column. + * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse + as a single date column. + * ``dict``, e.g. ``{{'foo' : [1, 3]}}`` -> parse columns 1, 3 as date and call + result 'foo' + + If a column or index cannot be represented as an array of ``datetime``, + say because of an unparsable value or a mixture of timezones, the column + or index will be returned unaltered as an ``object`` data type. For + non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after + :func:`~pandas.read_csv`. + + Note: A fast-path exists for iso8601-formatted dates. +infer_datetime_format : bool, default False + If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the + format of the ``datetime`` strings in the columns, and if it can be inferred, + switch to a faster method of parsing them. In some cases this can increase + the parsing speed by 5-10x. + + .. deprecated:: 2.0.0 + A strict version of this argument is now the default, passing it has no effect. + +keep_date_col : bool, default False + If ``True`` and ``parse_dates`` specifies combining multiple columns then + keep the original columns. +date_parser : Callable, optional + Function to use for converting a sequence of string columns to an array of + ``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the + conversion. pandas will try to call ``date_parser`` in three different ways, + advancing to the next if an exception occurs: 1) Pass one or more arrays + (as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the + string values from the columns defined by ``parse_dates`` into a single array + and pass that; and 3) call ``date_parser`` once for each row using one or + more strings (corresponding to the columns defined by ``parse_dates``) as + arguments. + + .. deprecated:: 2.0.0 + Use ``date_format`` instead, or read in as ``object`` and then apply + :func:`~pandas.to_datetime` as-needed. +date_format : str or dict of column -> format, optional + Format to use for parsing dates when used in conjunction with ``parse_dates``. + For anything more complex, please read in as ``object`` and then apply + :func:`~pandas.to_datetime` as-needed. + + .. versionadded:: 2.0.0 +dayfirst : bool, default False + DD/MM format dates, international and European format. +cache_dates : bool, default True + If ``True``, use a cache of unique, converted dates to apply the ``datetime`` + conversion. May produce significant speed-up when parsing duplicate + date strings, especially ones with timezone offsets. + +iterator : bool, default False + Return ``TextFileReader`` object for iteration or getting chunks with + ``get_chunk()``. + + .. versionchanged:: 1.2 + + ``TextFileReader`` is a context manager. +chunksize : int, optional + Number of lines to read from the file per chunk. Passing a value will cause the + function to return a ``TextFileReader`` object for iteration. + See the `IO Tools docs + `_ + for more information on ``iterator`` and ``chunksize``. + + .. versionchanged:: 1.2 + + ``TextFileReader`` is a context manager. +{decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + +thousands : str (length 1), optional + Character acting as the thousands separator in numerical values. +decimal : str (length 1), default '.' + Character to recognize as decimal point (e.g., use ',' for European data). +lineterminator : str (length 1), optional + Character used to denote a line break. Only valid with C parser. +quotechar : str (length 1), optional + Character used to denote the start and end of a quoted item. Quoted + items can include the ``delimiter`` and it will be ignored. +quoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \ +3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL + Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is + ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special + characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``, + or ``lineterminator``. +doublequote : bool, default True + When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate + whether or not to interpret two consecutive ``quotechar`` elements INSIDE a + field as a single ``quotechar`` element. +escapechar : str (length 1), optional + Character used to escape other characters. +comment : str (length 1), optional + Character indicating that the remainder of line should not be parsed. + If found at the beginning + of a line, the line will be ignored altogether. This parameter must be a + single character. Like empty lines (as long as ``skip_blank_lines=True``), + fully commented lines are ignored by the parameter ``header`` but not by + ``skiprows``. For example, if ``comment='#'``, parsing + ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being + treated as the header. +encoding : str, optional, default 'utf-8' + Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python + standard encodings + `_ . + + .. versionchanged:: 1.2 + + When ``encoding`` is ``None``, ``errors='replace'`` is passed to + ``open()``. Otherwise, ``errors='strict'`` is passed to ``open()``. + This behavior was previously only the case for ``engine='python'``. + + .. versionchanged:: 1.3.0 + + ``encoding_errors`` is a new argument. ``encoding`` has no longer an + influence on how encoding errors are handled. + +encoding_errors : str, optional, default 'strict' + How encoding errors are treated. `List of possible values + `_ . + + .. versionadded:: 1.3.0 + +dialect : str or csv.Dialect, optional + If provided, this parameter will override values (default or not) for the + following parameters: ``delimiter``, ``doublequote``, ``escapechar``, + ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to + override values, a ``ParserWarning`` will be issued. See ``csv.Dialect`` + documentation for more details. +on_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error' + Specifies what to do upon encountering a bad line (a line with too many fields). + Allowed values are : + + - ``'error'``, raise an Exception when a bad line is encountered. + - ``'warn'``, raise a warning when a bad line is encountered and skip that line. + - ``'skip'``, skip bad lines without raising or warning when they are encountered. + + .. versionadded:: 1.3.0 + + .. versionadded:: 1.4.0 + + - Callable, function with signature + ``(bad_line: list[str]) -> list[str] | None`` that will process a single + bad line. ``bad_line`` is a list of strings split by the ``sep``. + If the function returns ``None``, the bad line will be ignored. + If the function returns a new ``list`` of strings with more elements than + expected, a ``ParserWarning`` will be emitted while dropping extra elements. + Only supported when ``engine='python'`` + +delim_whitespace : bool, default False + Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be + used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option + is set to ``True``, nothing should be passed in for the ``delimiter`` + parameter. +low_memory : bool, default True + Internally process the file in chunks, resulting in lower memory use + while parsing, but possibly mixed type inference. To ensure no mixed + types either set ``False``, or specify the type with the ``dtype`` parameter. + Note that the entire file is read into a single :class:`~pandas.DataFrame` + regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in + chunks. (Only valid with C parser). +memory_map : bool, default False + If a filepath is provided for ``filepath_or_buffer``, map the file object + directly onto memory and access the data directly from there. Using this + option can improve performance because there is no longer any I/O overhead. +float_precision : {{'high', 'legacy', 'round_trip'}}, optional + Specifies which converter the C engine should use for floating-point + values. The options are ``None`` or ``'high'`` for the ordinary converter, + ``'legacy'`` for the original lower precision pandas converter, and + ``'round_trip'`` for the round-trip converter. + + .. versionchanged:: 1.2 + +{storage_options} + + .. versionadded:: 1.2 + +dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + +Returns +------- +DataFrame or TextFileReader + A comma-separated values (csv) file is returned as two-dimensional + data structure with labeled axes. + +See Also +-------- +DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. +{see_also_func_name} : {see_also_func_summary} +read_fwf : Read a table of fixed-width formatted lines into DataFrame. + +Examples +-------- +>>> pd.{func_name}('data.csv') # doctest: +SKIP +""" +) + + +class _C_Parser_Defaults(TypedDict): + delim_whitespace: Literal[False] + na_filter: Literal[True] + low_memory: Literal[True] + memory_map: Literal[False] + float_precision: None + + +_c_parser_defaults: _C_Parser_Defaults = { + "delim_whitespace": False, + "na_filter": True, + "low_memory": True, + "memory_map": False, + "float_precision": None, +} + + +class _Fwf_Defaults(TypedDict): + colspecs: Literal["infer"] + infer_nrows: Literal[100] + widths: None + + +_fwf_defaults: _Fwf_Defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None} +_c_unsupported = {"skipfooter"} +_python_unsupported = {"low_memory", "float_precision"} +_pyarrow_unsupported = { + "skipfooter", + "float_precision", + "chunksize", + "comment", + "nrows", + "thousands", + "memory_map", + "dialect", + "on_bad_lines", + "delim_whitespace", + "quoting", + "lineterminator", + "converters", + "iterator", + "dayfirst", + "verbose", + "skipinitialspace", + "low_memory", +} + + +class _DeprecationConfig(NamedTuple): + default_value: Any + msg: str | None + + +@overload +def validate_integer(name: str, val: None, min_val: int = ...) -> None: + ... + + +@overload +def validate_integer(name: str, val: float, min_val: int = ...) -> int: + ... + + +@overload +def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None: + ... + + +def validate_integer( + name: str, val: int | float | None, min_val: int = 0 +) -> int | None: + """ + Checks whether the 'name' parameter for parsing is either + an integer OR float that can SAFELY be cast to an integer + without losing accuracy. Raises a ValueError if that is + not the case. + + Parameters + ---------- + name : str + Parameter name (used for error reporting) + val : int or float + The value to check + min_val : int + Minimum allowed value (val < min_val will result in a ValueError) + """ + if val is None: + return val + + msg = f"'{name:s}' must be an integer >={min_val:d}" + if is_float(val): + if int(val) != val: + raise ValueError(msg) + val = int(val) + elif not (is_integer(val) and val >= min_val): + raise ValueError(msg) + + return int(val) + + +def _validate_names(names: Sequence[Hashable] | None) -> None: + """ + Raise ValueError if the `names` parameter contains duplicates or has an + invalid data type. + + Parameters + ---------- + names : array-like or None + An array containing a list of the names used for the output DataFrame. + + Raises + ------ + ValueError + If names are not unique or are not ordered (e.g. set). + """ + if names is not None: + if len(names) != len(set(names)): + raise ValueError("Duplicate names are not allowed.") + if not ( + is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView) + ): + raise ValueError("Names should be an ordered collection.") + + +def _read( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds +) -> DataFrame | TextFileReader: + """Generic reader of line files.""" + # if we pass a date_parser and parse_dates=False, we should not parse the + # dates GH#44366 + if kwds.get("parse_dates", None) is None: + if ( + kwds.get("date_parser", lib.no_default) is lib.no_default + and kwds.get("date_format", None) is None + ): + kwds["parse_dates"] = False + else: + kwds["parse_dates"] = True + + # Extract some of the arguments (pass chunksize on). + iterator = kwds.get("iterator", False) + chunksize = kwds.get("chunksize", None) + if kwds.get("engine") == "pyarrow": + if iterator: + raise ValueError( + "The 'iterator' option is not supported with the 'pyarrow' engine" + ) + + if chunksize is not None: + raise ValueError( + "The 'chunksize' option is not supported with the 'pyarrow' engine" + ) + else: + chunksize = validate_integer("chunksize", chunksize, 1) + + nrows = kwds.get("nrows", None) + + # Check for duplicates in names. + _validate_names(kwds.get("names", None)) + + # Create the parser. + parser = TextFileReader(filepath_or_buffer, **kwds) + + if chunksize or iterator: + return parser + + with parser: + return parser.read(nrows) + + +# iterator=True -> TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[True], + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# chunksize=int -> TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# default case -> DataFrame +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +# Unions -> DataFrame | TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame | TextFileReader: + ... + + +@Appender( + _doc_read_csv_and_table.format( + func_name="read_csv", + summary="Read a comma-separated values (csv) file into DataFrame.", + see_also_func_name="read_table", + see_also_func_summary="Read general delimited file into DataFrame.", + _default_sep="','", + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] + % "filepath_or_buffer", + ) +) +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = lib.no_default, + delimiter: str | None | lib.NoDefault = None, + # Column and Index Locations and Names + header: int | Sequence[int] | None | Literal["infer"] = "infer", + names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, + index_col: IndexLabel | Literal[False] | None = None, + usecols: list[HashableT] | Callable[[Hashable], bool] | None = None, + # General Parsing Configuration + dtype: DtypeArg | None = None, + engine: CSVEngine | None = None, + converters: Mapping[Hashable, Callable] | None = None, + true_values: list | None = None, + false_values: list | None = None, + skipinitialspace: bool = False, + skiprows: list[int] | int | Callable[[Hashable], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + # NA and Missing Data Handling + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool = False, + skip_blank_lines: bool = True, + # Datetime Handling + parse_dates: bool | Sequence[Hashable] | None = None, + infer_datetime_format: bool | lib.NoDefault = lib.no_default, + keep_date_col: bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + # Iteration + iterator: bool = False, + chunksize: int | None = None, + # Quoting, Compression, and File Format + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: int = csv.QUOTE_MINIMAL, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, + # Error Handling + on_bad_lines: str = "error", + # Internal + delim_whitespace: bool = False, + low_memory: bool = _c_parser_defaults["low_memory"], + memory_map: bool = False, + float_precision: Literal["high", "legacy"] | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | TextFileReader: + if infer_datetime_format is not lib.no_default: + warnings.warn( + "The argument 'infer_datetime_format' is deprecated and will " + "be removed in a future version. " + "A strict version of it is now the default, see " + "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " + "You can safely remove this argument.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # locals() should never be modified + kwds = locals().copy() + del kwds["filepath_or_buffer"] + del kwds["sep"] + + kwds_defaults = _refine_defaults_read( + dialect, + delimiter, + delim_whitespace, + engine, + sep, + on_bad_lines, + names, + defaults={"delimiter": ","}, + dtype_backend=dtype_backend, + ) + kwds.update(kwds_defaults) + + return _read(filepath_or_buffer, kwds) + + +# iterator=True -> TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[True], + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# chunksize=int -> TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# default -> DataFrame +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +# Unions -> DataFrame | TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: list[HashableT] | Callable[[Hashable], bool] | None = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame | TextFileReader: + ... + + +@Appender( + _doc_read_csv_and_table.format( + func_name="read_table", + summary="Read general delimited file into DataFrame.", + see_also_func_name="read_csv", + see_also_func_summary=( + "Read a comma-separated values (csv) file into DataFrame." + ), + _default_sep=r"'\\t' (tab-stop)", + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] + % "filepath_or_buffer", + ) +) +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = lib.no_default, + delimiter: str | None | lib.NoDefault = None, + # Column and Index Locations and Names + header: int | Sequence[int] | None | Literal["infer"] = "infer", + names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, + index_col: IndexLabel | Literal[False] | None = None, + usecols: list[HashableT] | Callable[[Hashable], bool] | None = None, + # General Parsing Configuration + dtype: DtypeArg | None = None, + engine: CSVEngine | None = None, + converters: Mapping[Hashable, Callable] | None = None, + true_values: list | None = None, + false_values: list | None = None, + skipinitialspace: bool = False, + skiprows: list[int] | int | Callable[[Hashable], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + # NA and Missing Data Handling + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool = False, + skip_blank_lines: bool = True, + # Datetime Handling + parse_dates: bool | Sequence[Hashable] = False, + infer_datetime_format: bool | lib.NoDefault = lib.no_default, + keep_date_col: bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + # Iteration + iterator: bool = False, + chunksize: int | None = None, + # Quoting, Compression, and File Format + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: int = csv.QUOTE_MINIMAL, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, + # Error Handling + on_bad_lines: str = "error", + # Internal + delim_whitespace: bool = False, + low_memory: bool = _c_parser_defaults["low_memory"], + memory_map: bool = False, + float_precision: str | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | TextFileReader: + if infer_datetime_format is not lib.no_default: + warnings.warn( + "The argument 'infer_datetime_format' is deprecated and will " + "be removed in a future version. " + "A strict version of it is now the default, see " + "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " + "You can safely remove this argument.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # locals() should never be modified + kwds = locals().copy() + del kwds["filepath_or_buffer"] + del kwds["sep"] + + kwds_defaults = _refine_defaults_read( + dialect, + delimiter, + delim_whitespace, + engine, + sep, + on_bad_lines, + names, + defaults={"delimiter": "\t"}, + dtype_backend=dtype_backend, + ) + kwds.update(kwds_defaults) + + return _read(filepath_or_buffer, kwds) + + +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = "infer", + widths: Sequence[int] | None = None, + infer_nrows: int = 100, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwds, +) -> DataFrame | TextFileReader: + r""" + Read a table of fixed-width formatted lines into DataFrame. + + Also supports optionally iterating or breaking of the file + into chunks. + + Additional help can be found in the `online docs for IO Tools + `_. + + Parameters + ---------- + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a text ``read()`` function.The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.csv``. + colspecs : list of tuple (int, int) or 'infer'. optional + A list of tuples giving the extents of the fixed-width + fields of each line as half-open intervals (i.e., [from, to[ ). + String value 'infer' can be used to instruct the parser to try + detecting the column specifications from the first 100 rows of + the data which are not being skipped via skiprows (default='infer'). + widths : list of int, optional + A list of field widths which can be used instead of 'colspecs' if + the intervals are contiguous. + infer_nrows : int, default 100 + The number of rows to consider when letting the parser determine the + `colspecs`. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + **kwds : optional + Optional keyword arguments can be passed to ``TextFileReader``. + + Returns + ------- + DataFrame or TextFileReader + A comma-separated values (csv) file is returned as two-dimensional + data structure with labeled axes. + + See Also + -------- + DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. + read_csv : Read a comma-separated values (csv) file into DataFrame. + + Examples + -------- + >>> pd.read_fwf('data.csv') # doctest: +SKIP + """ + # Check input arguments. + if colspecs is None and widths is None: + raise ValueError("Must specify either colspecs or widths") + if colspecs not in (None, "infer") and widths is not None: + raise ValueError("You must specify only one of 'widths' and 'colspecs'") + + # Compute 'colspecs' from 'widths', if specified. + if widths is not None: + colspecs, col = [], 0 + for w in widths: + colspecs.append((col, col + w)) + col += w + + # for mypy + assert colspecs is not None + + # GH#40830 + # Ensure length of `colspecs` matches length of `names` + names = kwds.get("names") + if names is not None: + if len(names) != len(colspecs) and colspecs != "infer": + # need to check len(index_col) as it might contain + # unnamed indices, in which case it's name is not required + len_index = 0 + if kwds.get("index_col") is not None: + index_col: Any = kwds.get("index_col") + if index_col is not False: + if not is_list_like(index_col): + len_index = 1 + else: + len_index = len(index_col) + if kwds.get("usecols") is None and len(names) + len_index != len(colspecs): + # If usecols is used colspec may be longer than names + raise ValueError("Length of colspecs must match length of names") + + kwds["colspecs"] = colspecs + kwds["infer_nrows"] = infer_nrows + kwds["engine"] = "python-fwf" + + check_dtype_backend(dtype_backend) + kwds["dtype_backend"] = dtype_backend + return _read(filepath_or_buffer, kwds) + + +class TextFileReader(abc.Iterator): + """ + + Passed dialect overrides any of the related parser options + + """ + + def __init__( + self, + f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list, + engine: CSVEngine | None = None, + **kwds, + ) -> None: + if engine is not None: + engine_specified = True + else: + engine = "python" + engine_specified = False + self.engine = engine + self._engine_specified = kwds.get("engine_specified", engine_specified) + + _validate_skipfooter(kwds) + + dialect = _extract_dialect(kwds) + if dialect is not None: + if engine == "pyarrow": + raise ValueError( + "The 'dialect' option is not supported with the 'pyarrow' engine" + ) + kwds = _merge_with_dialect_properties(dialect, kwds) + + if kwds.get("header", "infer") == "infer": + kwds["header"] = 0 if kwds.get("names") is None else None + + self.orig_options = kwds + + # miscellanea + self._currow = 0 + + options = self._get_options_with_defaults(engine) + options["storage_options"] = kwds.get("storage_options", None) + + self.chunksize = options.pop("chunksize", None) + self.nrows = options.pop("nrows", None) + + self._check_file_or_buffer(f, engine) + self.options, self.engine = self._clean_options(options, engine) + + if "has_index_names" in kwds: + self.options["has_index_names"] = kwds["has_index_names"] + + self.handles: IOHandles | None = None + self._engine = self._make_engine(f, self.engine) + + def close(self) -> None: + if self.handles is not None: + self.handles.close() + self._engine.close() + + def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: + kwds = self.orig_options + + options = {} + default: object | None + + for argname, default in parser_defaults.items(): + value = kwds.get(argname, default) + + # see gh-12935 + if ( + engine == "pyarrow" + and argname in _pyarrow_unsupported + and value != default + and value != getattr(value, "value", default) + ): + raise ValueError( + f"The {repr(argname)} option is not supported with the " + f"'pyarrow' engine" + ) + options[argname] = value + + for argname, default in _c_parser_defaults.items(): + if argname in kwds: + value = kwds[argname] + + if engine != "c" and value != default: + # TODO: Refactor this logic, its pretty convoluted + if "python" in engine and argname not in _python_unsupported: + pass + elif "pyarrow" in engine and argname not in _pyarrow_unsupported: + pass + else: + raise ValueError( + f"The {repr(argname)} option is not supported with the " + f"{repr(engine)} engine" + ) + else: + value = default + options[argname] = value + + if engine == "python-fwf": + for argname, default in _fwf_defaults.items(): + options[argname] = kwds.get(argname, default) + + return options + + def _check_file_or_buffer(self, f, engine: CSVEngine) -> None: + # see gh-16530 + if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"): + # The C engine doesn't need the file-like to have the "__iter__" + # attribute. However, the Python engine needs "__iter__(...)" + # when iterating through such an object, meaning it + # needs to have that attribute + raise ValueError( + "The 'python' engine cannot iterate through this file buffer." + ) + + def _clean_options( + self, options: dict[str, Any], engine: CSVEngine + ) -> tuple[dict[str, Any], CSVEngine]: + result = options.copy() + + fallback_reason = None + + # C engine not supported yet + if engine == "c": + if options["skipfooter"] > 0: + fallback_reason = "the 'c' engine does not support skipfooter" + engine = "python" + + sep = options["delimiter"] + delim_whitespace = options["delim_whitespace"] + + if sep is None and not delim_whitespace: + if engine in ("c", "pyarrow"): + fallback_reason = ( + f"the '{engine}' engine does not support " + "sep=None with delim_whitespace=False" + ) + engine = "python" + elif sep is not None and len(sep) > 1: + if engine == "c" and sep == r"\s+": + result["delim_whitespace"] = True + del result["delimiter"] + elif engine not in ("python", "python-fwf"): + # wait until regex engine integrated + fallback_reason = ( + f"the '{engine}' engine does not support " + "regex separators (separators > 1 char and " + r"different from '\s+' are interpreted as regex)" + ) + engine = "python" + elif delim_whitespace: + if "python" in engine: + result["delimiter"] = r"\s+" + elif sep is not None: + encodeable = True + encoding = sys.getfilesystemencoding() or "utf-8" + try: + if len(sep.encode(encoding)) > 1: + encodeable = False + except UnicodeDecodeError: + encodeable = False + if not encodeable and engine not in ("python", "python-fwf"): + fallback_reason = ( + f"the separator encoded in {encoding} " + f"is > 1 char long, and the '{engine}' engine " + "does not support such separators" + ) + engine = "python" + + quotechar = options["quotechar"] + if quotechar is not None and isinstance(quotechar, (str, bytes)): + if ( + len(quotechar) == 1 + and ord(quotechar) > 127 + and engine not in ("python", "python-fwf") + ): + fallback_reason = ( + "ord(quotechar) > 127, meaning the " + "quotechar is larger than one byte, " + f"and the '{engine}' engine does not support such quotechars" + ) + engine = "python" + + if fallback_reason and self._engine_specified: + raise ValueError(fallback_reason) + + if engine == "c": + for arg in _c_unsupported: + del result[arg] + + if "python" in engine: + for arg in _python_unsupported: + if fallback_reason and result[arg] != _c_parser_defaults.get(arg): + raise ValueError( + "Falling back to the 'python' engine because " + f"{fallback_reason}, but this causes {repr(arg)} to be " + "ignored as it is not supported by the 'python' engine." + ) + del result[arg] + + if fallback_reason: + warnings.warn( + ( + "Falling back to the 'python' engine because " + f"{fallback_reason}; you can avoid this warning by specifying " + "engine='python'." + ), + ParserWarning, + stacklevel=find_stack_level(), + ) + + index_col = options["index_col"] + names = options["names"] + converters = options["converters"] + na_values = options["na_values"] + skiprows = options["skiprows"] + + validate_header_arg(options["header"]) + + if index_col is True: + raise ValueError("The value of index_col couldn't be 'True'") + if is_index_col(index_col): + if not isinstance(index_col, (list, tuple, np.ndarray)): + index_col = [index_col] + result["index_col"] = index_col + + names = list(names) if names is not None else names + + # type conversion-related + if converters is not None: + if not isinstance(converters, dict): + raise TypeError( + "Type converters must be a dict or subclass, " + f"input was a {type(converters).__name__}" + ) + else: + converters = {} + + # Converting values to NA + keep_default_na = options["keep_default_na"] + na_values, na_fvalues = _clean_na_values(na_values, keep_default_na) + + # handle skiprows; this is internally handled by the + # c-engine, so only need for python and pyarrow parsers + if engine == "pyarrow": + if not is_integer(skiprows) and skiprows is not None: + # pyarrow expects skiprows to be passed as an integer + raise ValueError( + "skiprows argument must be an integer when using " + "engine='pyarrow'" + ) + else: + if is_integer(skiprows): + skiprows = list(range(skiprows)) + if skiprows is None: + skiprows = set() + elif not callable(skiprows): + skiprows = set(skiprows) + + # put stuff back + result["names"] = names + result["converters"] = converters + result["na_values"] = na_values + result["na_fvalues"] = na_fvalues + result["skiprows"] = skiprows + + return result, engine + + def __next__(self) -> DataFrame: + try: + return self.get_chunk() + except StopIteration: + self.close() + raise + + def _make_engine( + self, + f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO, + engine: CSVEngine = "c", + ) -> ParserBase: + mapping: dict[str, type[ParserBase]] = { + "c": CParserWrapper, + "python": PythonParser, + "pyarrow": ArrowParserWrapper, + "python-fwf": FixedWidthFieldParser, + } + if engine not in mapping: + raise ValueError( + f"Unknown engine: {engine} (valid options are {mapping.keys()})" + ) + if not isinstance(f, list): + # open file here + is_text = True + mode = "r" + if engine == "pyarrow": + is_text = False + mode = "rb" + elif ( + engine == "c" + and self.options.get("encoding", "utf-8") == "utf-8" + and isinstance(stringify_path(f), str) + ): + # c engine can decode utf-8 bytes, adding TextIOWrapper makes + # the c-engine especially for memory_map=True far slower + is_text = False + if "b" not in mode: + mode += "b" + self.handles = get_handle( + f, + mode, + encoding=self.options.get("encoding", None), + compression=self.options.get("compression", None), + memory_map=self.options.get("memory_map", False), + is_text=is_text, + errors=self.options.get("encoding_errors", "strict"), + storage_options=self.options.get("storage_options", None), + ) + assert self.handles is not None + f = self.handles.handle + + elif engine != "python": + msg = f"Invalid file path or buffer object type: {type(f)}" + raise ValueError(msg) + + try: + return mapping[engine](f, **self.options) + except Exception: + if self.handles is not None: + self.handles.close() + raise + + def _failover_to_python(self) -> None: + raise AbstractMethodError(self) + + def read(self, nrows: int | None = None) -> DataFrame: + if self.engine == "pyarrow": + try: + # error: "ParserBase" has no attribute "read" + df = self._engine.read() # type: ignore[attr-defined] + except Exception: + self.close() + raise + else: + nrows = validate_integer("nrows", nrows) + try: + # error: "ParserBase" has no attribute "read" + ( + index, + columns, + col_dict, + ) = self._engine.read( # type: ignore[attr-defined] + nrows + ) + except Exception: + self.close() + raise + + if index is None: + if col_dict: + # Any column is actually fine: + new_rows = len(next(iter(col_dict.values()))) + index = RangeIndex(self._currow, self._currow + new_rows) + else: + new_rows = 0 + else: + new_rows = len(index) + + df = DataFrame(col_dict, columns=columns, index=index) + + self._currow += new_rows + return df + + def get_chunk(self, size: int | None = None) -> DataFrame: + if size is None: + size = self.chunksize + if self.nrows is not None: + if self._currow >= self.nrows: + raise StopIteration + size = min(size, self.nrows - self._currow) + return self.read(nrows=size) + + def __enter__(self) -> TextFileReader: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +def TextParser(*args, **kwds) -> TextFileReader: + """ + Converts lists of lists/tuples into DataFrames with proper type inference + and optional (e.g. string to datetime) conversion. Also enables iterating + lazily over chunks of large files + + Parameters + ---------- + data : file-like object or list + delimiter : separator character to use + dialect : str or csv.Dialect instance, optional + Ignored if delimiter is longer than 1 character + names : sequence, default + header : int, default 0 + Row to use to parse column labels. Defaults to the first row. Prior + rows will be discarded + index_col : int or list, optional + Column or columns to use as the (possibly hierarchical) index + has_index_names: bool, default False + True if the cols defined in index_col have an index name and are + not in the header. + na_values : scalar, str, list-like, or dict, optional + Additional strings to recognize as NA/NaN. + keep_default_na : bool, default True + thousands : str, optional + Thousands separator + comment : str, optional + Comment out remainder of line + parse_dates : bool, default False + keep_date_col : bool, default False + date_parser : function, optional + + .. deprecated:: 2.0.0 + date_format : str or dict of column -> format, default ``None`` + + .. versionadded:: 2.0.0 + skiprows : list of integers + Row numbers to skip + skipfooter : int + Number of line at bottom of file to skip + converters : dict, optional + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the cell (not column) content, and return the + transformed content. + encoding : str, optional + Encoding to use for UTF when reading/writing (ex. 'utf-8') + float_precision : str, optional + Specifies which converter the C engine should use for floating-point + values. The options are `None` or `high` for the ordinary converter, + `legacy` for the original lower precision pandas converter, and + `round_trip` for the round-trip converter. + + .. versionchanged:: 1.2 + """ + kwds["engine"] = "python" + return TextFileReader(*args, **kwds) + + +def _clean_na_values(na_values, keep_default_na: bool = True): + na_fvalues: set | dict + if na_values is None: + if keep_default_na: + na_values = STR_NA_VALUES + else: + na_values = set() + na_fvalues = set() + elif isinstance(na_values, dict): + old_na_values = na_values.copy() + na_values = {} # Prevent aliasing. + + # Convert the values in the na_values dictionary + # into array-likes for further use. This is also + # where we append the default NaN values, provided + # that `keep_default_na=True`. + for k, v in old_na_values.items(): + if not is_list_like(v): + v = [v] + + if keep_default_na: + v = set(v) | STR_NA_VALUES + + na_values[k] = v + na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()} + else: + if not is_list_like(na_values): + na_values = [na_values] + na_values = _stringify_na_values(na_values) + if keep_default_na: + na_values = na_values | STR_NA_VALUES + + na_fvalues = _floatify_na_values(na_values) + + return na_values, na_fvalues + + +def _floatify_na_values(na_values): + # create float versions of the na_values + result = set() + for v in na_values: + try: + v = float(v) + if not np.isnan(v): + result.add(v) + except (TypeError, ValueError, OverflowError): + pass + return result + + +def _stringify_na_values(na_values): + """return a stringified and numeric for these values""" + result: list[str | float] = [] + for x in na_values: + result.append(str(x)) + result.append(x) + try: + v = float(x) + + # we are like 999 here + if v == int(v): + v = int(v) + result.append(f"{v}.0") + result.append(str(v)) + + result.append(v) + except (TypeError, ValueError, OverflowError): + pass + try: + result.append(int(x)) + except (TypeError, ValueError, OverflowError): + pass + return set(result) + + +def _refine_defaults_read( + dialect: str | csv.Dialect | None, + delimiter: str | None | lib.NoDefault, + delim_whitespace: bool, + engine: CSVEngine | None, + sep: str | None | lib.NoDefault, + on_bad_lines: str | Callable, + names: Sequence[Hashable] | None | lib.NoDefault, + defaults: dict[str, Any], + dtype_backend: DtypeBackend | lib.NoDefault, +): + """Validate/refine default values of input parameters of read_csv, read_table. + + Parameters + ---------- + dialect : str or csv.Dialect + If provided, this parameter will override values (default or not) for the + following parameters: `delimiter`, `doublequote`, `escapechar`, + `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to + override values, a ParserWarning will be issued. See csv.Dialect + documentation for more details. + delimiter : str or object + Alias for sep. + delim_whitespace : bool + Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be + used as the sep. Equivalent to setting ``sep='\\s+'``. If this option + is set to True, nothing should be passed in for the ``delimiter`` + parameter. + engine : {{'c', 'python'}} + Parser engine to use. The C engine is faster while the python engine is + currently more feature-complete. + sep : str or object + A delimiter provided by the user (str) or a sentinel value, i.e. + pandas._libs.lib.no_default. + on_bad_lines : str, callable + An option for handling bad lines or a sentinel value(None). + names : array-like, optional + List of column names to use. If the file contains a header row, + then you should explicitly pass ``header=0`` to override the column names. + Duplicates in this list are not allowed. + defaults: dict + Default values of input parameters. + + Returns + ------- + kwds : dict + Input parameters with correct values. + + Raises + ------ + ValueError : + If a delimiter was specified with ``sep`` (or ``delimiter``) and + ``delim_whitespace=True``. + """ + # fix types for sep, delimiter to Union(str, Any) + delim_default = defaults["delimiter"] + kwds: dict[str, Any] = {} + # gh-23761 + # + # When a dialect is passed, it overrides any of the overlapping + # parameters passed in directly. We don't want to warn if the + # default parameters were passed in (since it probably means + # that the user didn't pass them in explicitly in the first place). + # + # "delimiter" is the annoying corner case because we alias it to + # "sep" before doing comparison to the dialect values later on. + # Thus, we need a flag to indicate that we need to "override" + # the comparison to dialect values by checking if default values + # for BOTH "delimiter" and "sep" were provided. + if dialect is not None: + kwds["sep_override"] = delimiter is None and ( + sep is lib.no_default or sep == delim_default + ) + + if delimiter and (sep is not lib.no_default): + raise ValueError("Specified a sep and a delimiter; you can only specify one.") + + kwds["names"] = None if names is lib.no_default else names + + # Alias sep -> delimiter. + if delimiter is None: + delimiter = sep + + if delim_whitespace and (delimiter is not lib.no_default): + raise ValueError( + "Specified a delimiter with both sep and " + "delim_whitespace=True; you can only specify one." + ) + + if delimiter == "\n": + raise ValueError( + r"Specified \n as separator or delimiter. This forces the python engine " + "which does not accept a line terminator. Hence it is not allowed to use " + "the line terminator as separator.", + ) + + if delimiter is lib.no_default: + # assign default separator value + kwds["delimiter"] = delim_default + else: + kwds["delimiter"] = delimiter + + if engine is not None: + kwds["engine_specified"] = True + else: + kwds["engine"] = "c" + kwds["engine_specified"] = False + + if on_bad_lines == "error": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR + elif on_bad_lines == "warn": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN + elif on_bad_lines == "skip": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP + elif callable(on_bad_lines): + if engine != "python": + raise ValueError( + "on_bad_line can only be a callable function if engine='python'" + ) + kwds["on_bad_lines"] = on_bad_lines + else: + raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines") + + check_dtype_backend(dtype_backend) + + kwds["dtype_backend"] = dtype_backend + + return kwds + + +def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None: + """ + Extract concrete csv dialect instance. + + Returns + ------- + csv.Dialect or None + """ + if kwds.get("dialect") is None: + return None + + dialect = kwds["dialect"] + if dialect in csv.list_dialects(): + dialect = csv.get_dialect(dialect) + + _validate_dialect(dialect) + + return dialect + + +MANDATORY_DIALECT_ATTRS = ( + "delimiter", + "doublequote", + "escapechar", + "skipinitialspace", + "quotechar", + "quoting", +) + + +def _validate_dialect(dialect: csv.Dialect) -> None: + """ + Validate csv dialect instance. + + Raises + ------ + ValueError + If incorrect dialect is provided. + """ + for param in MANDATORY_DIALECT_ATTRS: + if not hasattr(dialect, param): + raise ValueError(f"Invalid dialect {dialect} provided") + + +def _merge_with_dialect_properties( + dialect: csv.Dialect, + defaults: dict[str, Any], +) -> dict[str, Any]: + """ + Merge default kwargs in TextFileReader with dialect parameters. + + Parameters + ---------- + dialect : csv.Dialect + Concrete csv dialect. See csv.Dialect documentation for more details. + defaults : dict + Keyword arguments passed to TextFileReader. + + Returns + ------- + kwds : dict + Updated keyword arguments, merged with dialect parameters. + """ + kwds = defaults.copy() + + for param in MANDATORY_DIALECT_ATTRS: + dialect_val = getattr(dialect, param) + + parser_default = parser_defaults[param] + provided = kwds.get(param, parser_default) + + # Messages for conflicting values between the dialect + # instance and the actual parameters provided. + conflict_msgs = [] + + # Don't warn if the default parameter was passed in, + # even if it conflicts with the dialect (gh-23761). + if provided not in (parser_default, dialect_val): + msg = ( + f"Conflicting values for '{param}': '{provided}' was " + f"provided, but the dialect specifies '{dialect_val}'. " + "Using the dialect-specified value." + ) + + # Annoying corner case for not warning about + # conflicts between dialect and delimiter parameter. + # Refer to the outer "_read_" function for more info. + if not (param == "delimiter" and kwds.pop("sep_override", False)): + conflict_msgs.append(msg) + + if conflict_msgs: + warnings.warn( + "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level() + ) + kwds[param] = dialect_val + return kwds + + +def _validate_skipfooter(kwds: dict[str, Any]) -> None: + """ + Check whether skipfooter is compatible with other kwargs in TextFileReader. + + Parameters + ---------- + kwds : dict + Keyword arguments passed to TextFileReader. + + Raises + ------ + ValueError + If skipfooter is not compatible with other parameters. + """ + if kwds.get("skipfooter"): + if kwds.get("iterator") or kwds.get("chunksize"): + raise ValueError("'skipfooter' not supported for iteration") + if kwds.get("nrows"): + raise ValueError("'skipfooter' not supported with 'nrows'") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/pickle.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/pickle.py new file mode 100644 index 00000000..de9f1168 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/pickle.py @@ -0,0 +1,214 @@ +""" pickle compat """ +from __future__ import annotations + +import pickle +from typing import ( + TYPE_CHECKING, + Any, +) +import warnings + +from pandas.compat import pickle_compat as pc +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadPickleBuffer, + StorageOptions, + WriteBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "filepath_or_buffer", +) +def to_pickle( + obj: Any, + filepath_or_buffer: FilePath | WriteBuffer[bytes], + compression: CompressionOptions = "infer", + protocol: int = pickle.HIGHEST_PROTOCOL, + storage_options: StorageOptions | None = None, +) -> None: + """ + Pickle (serialize) object to file. + + Parameters + ---------- + obj : any object + Any python object. + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. + Also accepts URL. URL has to be of S3 or GCS. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + protocol : int + Int which indicates which protocol should be used by the pickler, + default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible + values for this parameter depend on the version of Python. For Python + 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. + For Python >= 3.4, 4 is a valid value. A negative value for the + protocol parameter is equivalent to setting its value to + HIGHEST_PROTOCOL. + + {storage_options} + + .. versionadded:: 1.2.0 + + .. [1] https://docs.python.org/3/library/pickle.html + + See Also + -------- + read_pickle : Load pickled pandas object (or any object) from file. + DataFrame.to_hdf : Write DataFrame to an HDF5 file. + DataFrame.to_sql : Write DataFrame to a SQL database. + DataFrame.to_parquet : Write a DataFrame to the binary parquet format. + + Examples + -------- + >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP + >>> original_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP + + >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP + >>> unpickled_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + """ # noqa: E501 + if protocol < 0: + protocol = pickle.HIGHEST_PROTOCOL + + with get_handle( + filepath_or_buffer, + "wb", + compression=compression, + is_text=False, + storage_options=storage_options, + ) as handles: + # letting pickle write directly to the buffer is more memory-efficient + pickle.dump(obj, handles.handle, protocol=protocol) + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer", +) +def read_pickle( + filepath_or_buffer: FilePath | ReadPickleBuffer, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, +) -> DataFrame | Series: + """ + Load pickled pandas object (or any object) from file. + + .. warning:: + + Loading pickled data received from untrusted sources can be + unsafe. See `here `__. + + Parameters + ---------- + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``readlines()`` function. + Also accepts URL. URL is not limited to S3 and GCS. + + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + .. versionadded:: 1.2.0 + + Returns + ------- + same type as object stored in file + + See Also + -------- + DataFrame.to_pickle : Pickle (serialize) DataFrame object to file. + Series.to_pickle : Pickle (serialize) Series object to file. + read_hdf : Read HDF5 file into a DataFrame. + read_sql : Read SQL query or database table into a DataFrame. + read_parquet : Load a parquet object, returning a DataFrame. + + Notes + ----- + read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3 + provided the object was serialized with to_pickle. + + Examples + -------- + >>> original_df = pd.DataFrame( + ... {{"foo": range(5), "bar": range(5, 10)}} + ... ) # doctest: +SKIP + >>> original_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP + + >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP + >>> unpickled_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + """ + excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError) + with get_handle( + filepath_or_buffer, + "rb", + compression=compression, + is_text=False, + storage_options=storage_options, + ) as handles: + # 1) try standard library Pickle + # 2) try pickle_compat (older pandas version) to handle subclass changes + # 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError + + try: + # TypeError for Cython complaints about object.__new__ vs Tick.__new__ + try: + with warnings.catch_warnings(record=True): + # We want to silence any warnings about, e.g. moved modules. + warnings.simplefilter("ignore", Warning) + return pickle.load(handles.handle) + except excs_to_catch: + # e.g. + # "No module named 'pandas.core.sparse.series'" + # "Can't get attribute '__nat_unpickle' on str: + # set the encoding if we need + if encoding is None: + encoding = _default_encoding + + return encoding + + +def _ensure_str(name): + """ + Ensure that an index / column name is a str (python 3); otherwise they + may be np.string dtype. Non-string dtypes are passed through unchanged. + + https://github.com/pandas-dev/pandas/issues/13492 + """ + if isinstance(name, str): + name = str(name) + return name + + +Term = PyTablesExpr + + +def _ensure_term(where, scope_level: int): + """ + Ensure that the where is a Term or a list of Term. + + This makes sure that we are capturing the scope of variables that are + passed create the terms here with a frame_level=2 (we are 2 levels down) + """ + # only consider list/tuple here as an ndarray is automatically a coordinate + # list + level = scope_level + 1 + if isinstance(where, (list, tuple)): + where = [ + Term(term, scope_level=level + 1) if maybe_expression(term) else term + for term in where + if term is not None + ] + elif maybe_expression(where): + where = Term(where, scope_level=level) + return where if where is None or len(where) else None + + +incompatibility_doc: Final = """ +where criteria is being ignored as this version [%s] is too old (or +not-defined), read the file in and write it out to a new file to upgrade (with +the copy_to method) +""" + +attribute_conflict_doc: Final = """ +the [%s] attribute of the existing index is [%s] which conflicts with the new +[%s], resetting the attribute to None +""" + +performance_doc: Final = """ +your performance may suffer as PyTables will pickle object types that it cannot +map directly to c-types [inferred_type->%s,key->%s] [items->%s] +""" + +# formats +_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"} + +# axes map +_AXES_MAP = {DataFrame: [0]} + +# register our configuration options +dropna_doc: Final = """ +: boolean + drop ALL nan rows when appending to a table +""" +format_doc: Final = """ +: format + default format writing format, if None, then + put will default to 'fixed' and append will default to 'table' +""" + +with config.config_prefix("io.hdf"): + config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool) + config.register_option( + "default_format", + None, + format_doc, + validator=config.is_one_of_factory(["fixed", "table", None]), + ) + +# oh the troubles to reduce import time +_table_mod = None +_table_file_open_policy_is_strict = False + + +def _tables(): + global _table_mod + global _table_file_open_policy_is_strict + if _table_mod is None: + import tables + + _table_mod = tables + + # set the file open policy + # return the file open policy; this changes as of pytables 3.1 + # depending on the HDF5 version + with suppress(AttributeError): + _table_file_open_policy_is_strict = ( + tables.file._FILE_OPEN_POLICY == "strict" + ) + + return _table_mod + + +# interface to/from ### + + +def to_hdf( + path_or_buf: FilePath | HDFStore, + key: str, + value: DataFrame | Series, + mode: str = "a", + complevel: int | None = None, + complib: str | None = None, + append: bool = False, + format: str | None = None, + index: bool = True, + min_itemsize: int | dict[str, int] | None = None, + nan_rep=None, + dropna: bool | None = None, + data_columns: Literal[True] | list[str] | None = None, + errors: str = "strict", + encoding: str = "UTF-8", +) -> None: + """store this object, close it if we opened it""" + if append: + f = lambda store: store.append( + key, + value, + format=format, + index=index, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + dropna=dropna, + data_columns=data_columns, + errors=errors, + encoding=encoding, + ) + else: + # NB: dropna is not passed to `put` + f = lambda store: store.put( + key, + value, + format=format, + index=index, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + data_columns=data_columns, + errors=errors, + encoding=encoding, + dropna=dropna, + ) + + path_or_buf = stringify_path(path_or_buf) + if isinstance(path_or_buf, str): + with HDFStore( + path_or_buf, mode=mode, complevel=complevel, complib=complib + ) as store: + f(store) + else: + f(path_or_buf) + + +def read_hdf( + path_or_buf: FilePath | HDFStore, + key=None, + mode: str = "r", + errors: str = "strict", + where: str | list | None = None, + start: int | None = None, + stop: int | None = None, + columns: list[str] | None = None, + iterator: bool = False, + chunksize: int | None = None, + **kwargs, +): + """ + Read from the store, close it if we opened it. + + Retrieve pandas object stored in file, optionally based on where + criteria. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + path_or_buf : str, path object, pandas.HDFStore + Any valid string path is acceptable. Only supports the local file system, + remote URLs and file-like objects are not supported. + + If you want to pass in a path object, pandas accepts any + ``os.PathLike``. + + Alternatively, pandas accepts an open :class:`pandas.HDFStore` object. + + key : object, optional + The group identifier in the store. Can be omitted if the HDF file + contains a single pandas object. + mode : {'r', 'r+', 'a'}, default 'r' + Mode to use when opening the file. Ignored if path_or_buf is a + :class:`pandas.HDFStore`. Default is 'r'. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. + where : list, optional + A list of Term (or convertible) objects. + start : int, optional + Row number to start selection. + stop : int, optional + Row number to stop selection. + columns : list, optional + A list of columns names to return. + iterator : bool, optional + Return an iterator object. + chunksize : int, optional + Number of rows to include in an iteration when using an iterator. + **kwargs + Additional keyword arguments passed to HDFStore. + + Returns + ------- + object + The selected object. Return type depends on the object stored. + + See Also + -------- + DataFrame.to_hdf : Write a HDF file from a DataFrame. + HDFStore : Low-level access to HDF files. + + Examples + -------- + >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP + >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP + >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP + """ + if mode not in ["r", "r+", "a"]: + raise ValueError( + f"mode {mode} is not allowed while performing a read. " + f"Allowed modes are r, r+ and a." + ) + # grab the scope + if where is not None: + where = _ensure_term(where, scope_level=1) + + if isinstance(path_or_buf, HDFStore): + if not path_or_buf.is_open: + raise OSError("The HDFStore must be open for reading.") + + store = path_or_buf + auto_close = False + else: + path_or_buf = stringify_path(path_or_buf) + if not isinstance(path_or_buf, str): + raise NotImplementedError( + "Support for generic buffers has not been implemented." + ) + try: + exists = os.path.exists(path_or_buf) + + # if filepath is too long + except (TypeError, ValueError): + exists = False + + if not exists: + raise FileNotFoundError(f"File {path_or_buf} does not exist") + + store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs) + # can't auto open/close if we are using an iterator + # so delegate to the iterator + auto_close = True + + try: + if key is None: + groups = store.groups() + if len(groups) == 0: + raise ValueError( + "Dataset(s) incompatible with Pandas data types, " + "not table, or no datasets found in HDF5 file." + ) + candidate_only_group = groups[0] + + # For the HDF file to have only one dataset, all other groups + # should then be metadata groups for that candidate group. (This + # assumes that the groups() method enumerates parent groups + # before their children.) + for group_to_check in groups[1:]: + if not _is_metadata_of(group_to_check, candidate_only_group): + raise ValueError( + "key must be provided when HDF5 " + "file contains multiple datasets." + ) + key = candidate_only_group._v_pathname + return store.select( + key, + where=where, + start=start, + stop=stop, + columns=columns, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + except (ValueError, TypeError, LookupError): + if not isinstance(path_or_buf, HDFStore): + # if there is an error, close the store if we opened it. + with suppress(AttributeError): + store.close() + + raise + + +def _is_metadata_of(group: Node, parent_group: Node) -> bool: + """Check if a given group is a metadata group for a given parent_group.""" + if group._v_depth <= parent_group._v_depth: + return False + + current = group + while current._v_depth > 1: + parent = current._v_parent + if parent == parent_group and current._v_name == "meta": + return True + current = current._v_parent + return False + + +class HDFStore: + """ + Dict-like IO interface for storing pandas objects in PyTables. + + Either Fixed or Table format. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + path : str + File path to HDF5 file. + mode : {'a', 'w', 'r', 'r+'}, default 'a' + + ``'r'`` + Read-only; no data can be modified. + ``'w'`` + Write; a new file is created (an existing file with the same + name would be deleted). + ``'a'`` + Append; an existing file is opened for reading and writing, + and if the file does not exist it is created. + ``'r+'`` + It is similar to ``'a'``, but the file must already exist. + complevel : int, 0-9, default None + Specifies a compression level for data. + A value of 0 or None disables compression. + complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' + Specifies the compression library to be used. + These additional compressors for Blosc are supported + (default if no compressor specified: 'blosc:blosclz'): + {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', + 'blosc:zlib', 'blosc:zstd'}. + Specifying a compression library which is not available issues + a ValueError. + fletcher32 : bool, default False + If applying compression use the fletcher32 checksum. + **kwargs + These parameters will be passed to the PyTables open_file method. + + Examples + -------- + >>> bar = pd.DataFrame(np.random.randn(10, 4)) + >>> store = pd.HDFStore('test.h5') + >>> store['foo'] = bar # write to HDF5 + >>> bar = store['foo'] # retrieve + >>> store.close() + + **Create or load HDF5 file in-memory** + + When passing the `driver` option to the PyTables open_file method through + **kwargs, the HDF5 file is loaded or created in-memory and will only be + written when closed: + + >>> bar = pd.DataFrame(np.random.randn(10, 4)) + >>> store = pd.HDFStore('test.h5', driver='H5FD_CORE') + >>> store['foo'] = bar + >>> store.close() # only now, data is written to disk + """ + + _handle: File | None + _mode: str + + def __init__( + self, + path, + mode: str = "a", + complevel: int | None = None, + complib=None, + fletcher32: bool = False, + **kwargs, + ) -> None: + if "format" in kwargs: + raise ValueError("format is not a defined argument for HDFStore") + + tables = import_optional_dependency("tables") + + if complib is not None and complib not in tables.filters.all_complibs: + raise ValueError( + f"complib only supports {tables.filters.all_complibs} compression." + ) + + if complib is None and complevel is not None: + complib = tables.filters.default_complib + + self._path = stringify_path(path) + if mode is None: + mode = "a" + self._mode = mode + self._handle = None + self._complevel = complevel if complevel else 0 + self._complib = complib + self._fletcher32 = fletcher32 + self._filters = None + self.open(mode=mode, **kwargs) + + def __fspath__(self) -> str: + return self._path + + @property + def root(self): + """return the root node""" + self._check_if_open() + assert self._handle is not None # for mypy + return self._handle.root + + @property + def filename(self) -> str: + return self._path + + def __getitem__(self, key: str): + return self.get(key) + + def __setitem__(self, key: str, value) -> None: + self.put(key, value) + + def __delitem__(self, key: str) -> None: + return self.remove(key) + + def __getattr__(self, name: str): + """allow attribute access to get stores""" + try: + return self.get(name) + except (KeyError, ClosedFileError): + pass + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __contains__(self, key: str) -> bool: + """ + check for existence of this key + can match the exact pathname or the pathnm w/o the leading '/' + """ + node = self.get_node(key) + if node is not None: + name = node._v_pathname + if key in (name, name[1:]): + return True + return False + + def __len__(self) -> int: + return len(self.groups()) + + def __repr__(self) -> str: + pstr = pprint_thing(self._path) + return f"{type(self)}\nFile path: {pstr}\n" + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + def keys(self, include: str = "pandas") -> list[str]: + """ + Return a list of keys corresponding to objects stored in HDFStore. + + Parameters + ---------- + + include : str, default 'pandas' + When kind equals 'pandas' return pandas objects. + When kind equals 'native' return native HDF5 Table objects. + + Returns + ------- + list + List of ABSOLUTE path-names (e.g. have the leading '/'). + + Raises + ------ + raises ValueError if kind has an illegal value + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> store.get('data') # doctest: +SKIP + >>> print(store.keys()) # doctest: +SKIP + ['/data1', '/data2'] + >>> store.close() # doctest: +SKIP + """ + if include == "pandas": + return [n._v_pathname for n in self.groups()] + + elif include == "native": + assert self._handle is not None # mypy + return [ + n._v_pathname for n in self._handle.walk_nodes("/", classname="Table") + ] + raise ValueError( + f"`include` should be either 'pandas' or 'native' but is '{include}'" + ) + + def __iter__(self) -> Iterator[str]: + return iter(self.keys()) + + def items(self) -> Iterator[tuple[str, list]]: + """ + iterate on key->group + """ + for g in self.groups(): + yield g._v_pathname, g + + def open(self, mode: str = "a", **kwargs) -> None: + """ + Open the file in the specified mode + + Parameters + ---------- + mode : {'a', 'w', 'r', 'r+'}, default 'a' + See HDFStore docstring or tables.open_file for info about modes + **kwargs + These parameters will be passed to the PyTables open_file method. + """ + tables = _tables() + + if self._mode != mode: + # if we are changing a write mode to read, ok + if self._mode in ["a", "w"] and mode in ["r", "r+"]: + pass + elif mode in ["w"]: + # this would truncate, raise here + if self.is_open: + raise PossibleDataLossError( + f"Re-opening the file [{self._path}] with mode [{self._mode}] " + "will delete the current file!" + ) + + self._mode = mode + + # close and reopen the handle + if self.is_open: + self.close() + + if self._complevel and self._complevel > 0: + self._filters = _tables().Filters( + self._complevel, self._complib, fletcher32=self._fletcher32 + ) + + if _table_file_open_policy_is_strict and self.is_open: + msg = ( + "Cannot open HDF5 file, which is already opened, " + "even in read-only mode." + ) + raise ValueError(msg) + + self._handle = tables.open_file(self._path, self._mode, **kwargs) + + def close(self) -> None: + """ + Close the PyTables file handle + """ + if self._handle is not None: + self._handle.close() + self._handle = None + + @property + def is_open(self) -> bool: + """ + return a boolean indicating whether the file is open + """ + if self._handle is None: + return False + return bool(self._handle.isopen) + + def flush(self, fsync: bool = False) -> None: + """ + Force all buffered modifications to be written to disk. + + Parameters + ---------- + fsync : bool (default False) + call ``os.fsync()`` on the file handle to force writing to disk. + + Notes + ----- + Without ``fsync=True``, flushing may not guarantee that the OS writes + to disk. With fsync, the operation will block until the OS claims the + file has been written; however, other caching layers may still + interfere. + """ + if self._handle is not None: + self._handle.flush() + if fsync: + with suppress(OSError): + os.fsync(self._handle.fileno()) + + def get(self, key: str): + """ + Retrieve pandas object stored in file. + + Parameters + ---------- + key : str + + Returns + ------- + object + Same type as object stored in file. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> store.get('data') # doctest: +SKIP + >>> store.close() # doctest: +SKIP + """ + with patch_pickle(): + # GH#31167 Without this patch, pickle doesn't know how to unpickle + # old DateOffset objects now that they are cdef classes. + group = self.get_node(key) + if group is None: + raise KeyError(f"No object named {key} in the file") + return self._read_group(group) + + def select( + self, + key: str, + where=None, + start=None, + stop=None, + columns=None, + iterator: bool = False, + chunksize: int | None = None, + auto_close: bool = False, + ): + """ + Retrieve pandas object stored in file, optionally based on where criteria. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + key : str + Object being retrieved from file. + where : list or None + List of Term (or convertible) objects, optional. + start : int or None + Row number to start selection. + stop : int, default None + Row number to stop selection. + columns : list or None + A list of columns that if not None, will limit the return columns. + iterator : bool or False + Returns an iterator. + chunksize : int or None + Number or rows to include in iteration, return an iterator. + auto_close : bool or False + Should automatically close the store when finished. + + Returns + ------- + object + Retrieved object from file. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> store.get('data') # doctest: +SKIP + >>> print(store.keys()) # doctest: +SKIP + ['/data1', '/data2'] + >>> store.select('/data1') # doctest: +SKIP + A B + 0 1 2 + 1 3 4 + >>> store.select('/data1', where='columns == A') # doctest: +SKIP + A + 0 1 + 1 3 + >>> store.close() # doctest: +SKIP + """ + group = self.get_node(key) + if group is None: + raise KeyError(f"No object named {key} in the file") + + # create the storer and axes + where = _ensure_term(where, scope_level=1) + s = self._create_storer(group) + s.infer_axes() + + # function to call on iteration + def func(_start, _stop, _where): + return s.read(start=_start, stop=_stop, where=_where, columns=columns) + + # create the iterator + it = TableIterator( + self, + s, + func, + where=where, + nrows=s.nrows, + start=start, + stop=stop, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + + return it.get_result() + + def select_as_coordinates( + self, + key: str, + where=None, + start: int | None = None, + stop: int | None = None, + ): + """ + return the selection as an Index + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + + Parameters + ---------- + key : str + where : list of Term (or convertible) objects, optional + start : integer (defaults to None), row number to start selection + stop : integer (defaults to None), row number to stop selection + """ + where = _ensure_term(where, scope_level=1) + tbl = self.get_storer(key) + if not isinstance(tbl, Table): + raise TypeError("can only read_coordinates with a table") + return tbl.read_coordinates(where=where, start=start, stop=stop) + + def select_column( + self, + key: str, + column: str, + start: int | None = None, + stop: int | None = None, + ): + """ + return a single column from the table. This is generally only useful to + select an indexable + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + key : str + column : str + The column of interest. + start : int or None, default None + stop : int or None, default None + + Raises + ------ + raises KeyError if the column is not found (or key is not a valid + store) + raises ValueError if the column can not be extracted individually (it + is part of a data block) + + """ + tbl = self.get_storer(key) + if not isinstance(tbl, Table): + raise TypeError("can only read_column with a table") + return tbl.read_column(column=column, start=start, stop=stop) + + def select_as_multiple( + self, + keys, + where=None, + selector=None, + columns=None, + start=None, + stop=None, + iterator: bool = False, + chunksize: int | None = None, + auto_close: bool = False, + ): + """ + Retrieve pandas objects from multiple tables. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters + ---------- + keys : a list of the tables + selector : the table to apply the where criteria (defaults to keys[0] + if not supplied) + columns : the columns I want back + start : integer (defaults to None), row number to start selection + stop : integer (defaults to None), row number to stop selection + iterator : bool, return an iterator, default False + chunksize : nrows to include in iteration, return an iterator + auto_close : bool, default False + Should automatically close the store when finished. + + Raises + ------ + raises KeyError if keys or selector is not found or keys is empty + raises TypeError if keys is not a list or tuple + raises ValueError if the tables are not ALL THE SAME DIMENSIONS + """ + # default to single select + where = _ensure_term(where, scope_level=1) + if isinstance(keys, (list, tuple)) and len(keys) == 1: + keys = keys[0] + if isinstance(keys, str): + return self.select( + key=keys, + where=where, + columns=columns, + start=start, + stop=stop, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + + if not isinstance(keys, (list, tuple)): + raise TypeError("keys must be a list/tuple") + + if not len(keys): + raise ValueError("keys must have a non-zero length") + + if selector is None: + selector = keys[0] + + # collect the tables + tbls = [self.get_storer(k) for k in keys] + s = self.get_storer(selector) + + # validate rows + nrows = None + for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): + if t is None: + raise KeyError(f"Invalid table [{k}]") + if not t.is_table: + raise TypeError( + f"object [{t.pathname}] is not a table, and cannot be used in all " + "select as multiple" + ) + + if nrows is None: + nrows = t.nrows + elif t.nrows != nrows: + raise ValueError("all tables must have exactly the same nrows!") + + # The isinstance checks here are redundant with the check above, + # but necessary for mypy; see GH#29757 + _tbls = [x for x in tbls if isinstance(x, Table)] + + # axis is the concentration axes + axis = {t.non_index_axes[0][0] for t in _tbls}.pop() + + def func(_start, _stop, _where): + # retrieve the objs, _where is always passed as a set of + # coordinates here + objs = [ + t.read(where=_where, columns=columns, start=_start, stop=_stop) + for t in tbls + ] + + # concat and return + return concat(objs, axis=axis, verify_integrity=False)._consolidate() + + # create the iterator + it = TableIterator( + self, + s, + func, + where=where, + nrows=nrows, + start=start, + stop=stop, + iterator=iterator, + chunksize=chunksize, + auto_close=auto_close, + ) + + return it.get_result(coordinates=True) + + def put( + self, + key: str, + value: DataFrame | Series, + format=None, + index: bool = True, + append: bool = False, + complib=None, + complevel: int | None = None, + min_itemsize: int | dict[str, int] | None = None, + nan_rep=None, + data_columns: Literal[True] | list[str] | None = None, + encoding=None, + errors: str = "strict", + track_times: bool = True, + dropna: bool = False, + ) -> None: + """ + Store object in HDFStore. + + Parameters + ---------- + key : str + value : {Series, DataFrame} + format : 'fixed(f)|table(t)', default is 'fixed' + Format to use when storing object in HDFStore. Value can be one of: + + ``'fixed'`` + Fixed format. Fast writing/reading. Not-appendable, nor searchable. + ``'table'`` + Table format. Write as a PyTables Table structure which may perform + worse but allow more flexible operations like searching / selecting + subsets of the data. + index : bool, default True + Write DataFrame index as a column. + append : bool, default False + This will force Table format, append the input data to the existing. + data_columns : list of columns or True, default None + List of columns to create as data columns, or True to use all columns. + See `here + `__. + encoding : str, default None + Provide an encoding for strings. + track_times : bool, default True + Parameter is propagated to 'create_table' method of 'PyTables'. + If set to False it enables to have the same h5 files (same hashes) + independent on creation time. + dropna : bool, default False, optional + Remove missing values. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + """ + if format is None: + format = get_option("io.hdf.default_format") or "fixed" + format = self._validate_format(format) + self._write_to_group( + key, + value, + format=format, + index=index, + append=append, + complib=complib, + complevel=complevel, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + data_columns=data_columns, + encoding=encoding, + errors=errors, + track_times=track_times, + dropna=dropna, + ) + + def remove(self, key: str, where=None, start=None, stop=None) -> None: + """ + Remove pandas object partially by specifying the where condition + + Parameters + ---------- + key : str + Node to remove or delete rows from + where : list of Term (or convertible) objects, optional + start : integer (defaults to None), row number to start selection + stop : integer (defaults to None), row number to stop selection + + Returns + ------- + number of rows removed (or None if not a Table) + + Raises + ------ + raises KeyError if key is not a valid store + + """ + where = _ensure_term(where, scope_level=1) + try: + s = self.get_storer(key) + except KeyError: + # the key is not a valid store, re-raising KeyError + raise + except AssertionError: + # surface any assertion errors for e.g. debugging + raise + except Exception as err: + # In tests we get here with ClosedFileError, TypeError, and + # _table_mod.NoSuchNodeError. TODO: Catch only these? + + if where is not None: + raise ValueError( + "trying to remove a node with a non-None where clause!" + ) from err + + # we are actually trying to remove a node (with children) + node = self.get_node(key) + if node is not None: + node._f_remove(recursive=True) + return None + + # remove the node + if com.all_none(where, start, stop): + s.group._f_remove(recursive=True) + + # delete from the table + else: + if not s.is_table: + raise ValueError( + "can only remove with where on objects written as tables" + ) + return s.delete(where=where, start=start, stop=stop) + + def append( + self, + key: str, + value: DataFrame | Series, + format=None, + axes=None, + index: bool | list[str] = True, + append: bool = True, + complib=None, + complevel: int | None = None, + columns=None, + min_itemsize: int | dict[str, int] | None = None, + nan_rep=None, + chunksize: int | None = None, + expectedrows=None, + dropna: bool | None = None, + data_columns: Literal[True] | list[str] | None = None, + encoding=None, + errors: str = "strict", + ) -> None: + """ + Append to Table in file. + + Node must already exist and be Table format. + + Parameters + ---------- + key : str + value : {Series, DataFrame} + format : 'table' is the default + Format to use when storing object in HDFStore. Value can be one of: + + ``'table'`` + Table format. Write as a PyTables Table structure which may perform + worse but allow more flexible operations like searching / selecting + subsets of the data. + index : bool, default True + Write DataFrame index as a column. + append : bool, default True + Append the input data to the existing. + data_columns : list of columns, or True, default None + List of columns to create as indexed data columns for on-disk + queries, or True to use all columns. By default only the axes + of the object are indexed. See `here + `__. + min_itemsize : dict of columns that specify minimum str sizes + nan_rep : str to use as str nan representation + chunksize : size to chunk the writing + expectedrows : expected TOTAL row size of this table + encoding : default None, provide an encoding for str + dropna : bool, default False, optional + Do not write an ALL nan row to the store settable + by the option 'io.hdf.dropna_table'. + + Notes + ----- + Does *not* check if data being appended overlaps with existing + data in the table, so be careful + + Examples + -------- + >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df1, format='table') # doctest: +SKIP + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B']) + >>> store.append('data', df2) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + A B + 0 1 2 + 1 3 4 + 0 5 6 + 1 7 8 + """ + if columns is not None: + raise TypeError( + "columns is not a supported keyword in append, try data_columns" + ) + + if dropna is None: + dropna = get_option("io.hdf.dropna_table") + if format is None: + format = get_option("io.hdf.default_format") or "table" + format = self._validate_format(format) + self._write_to_group( + key, + value, + format=format, + axes=axes, + index=index, + append=append, + complib=complib, + complevel=complevel, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + chunksize=chunksize, + expectedrows=expectedrows, + dropna=dropna, + data_columns=data_columns, + encoding=encoding, + errors=errors, + ) + + def append_to_multiple( + self, + d: dict, + value, + selector, + data_columns=None, + axes=None, + dropna: bool = False, + **kwargs, + ) -> None: + """ + Append to multiple tables + + Parameters + ---------- + d : a dict of table_name to table_columns, None is acceptable as the + values of one node (this will get all the remaining columns) + value : a pandas object + selector : a string that designates the indexable table; all of its + columns will be designed as data_columns, unless data_columns is + passed, in which case these are used + data_columns : list of columns to create as data columns, or True to + use all columns + dropna : if evaluates to True, drop rows from all tables if any single + row in each table has all NaN. Default False. + + Notes + ----- + axes parameter is currently not accepted + + """ + if axes is not None: + raise TypeError( + "axes is currently not accepted as a parameter to append_to_multiple; " + "you can create the tables independently instead" + ) + + if not isinstance(d, dict): + raise ValueError( + "append_to_multiple must have a dictionary specified as the " + "way to split the value" + ) + + if selector not in d: + raise ValueError( + "append_to_multiple requires a selector that is in passed dict" + ) + + # figure out the splitting axis (the non_index_axis) + axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))) + + # figure out how to split the value + remain_key = None + remain_values: list = [] + for k, v in d.items(): + if v is None: + if remain_key is not None: + raise ValueError( + "append_to_multiple can only have one value in d that is None" + ) + remain_key = k + else: + remain_values.extend(v) + if remain_key is not None: + ordered = value.axes[axis] + ordd = ordered.difference(Index(remain_values)) + ordd = sorted(ordered.get_indexer(ordd)) + d[remain_key] = ordered.take(ordd) + + # data_columns + if data_columns is None: + data_columns = d[selector] + + # ensure rows are synchronized across the tables + if dropna: + idxs = (value[cols].dropna(how="all").index for cols in d.values()) + valid_index = next(idxs) + for index in idxs: + valid_index = valid_index.intersection(index) + value = value.loc[valid_index] + + min_itemsize = kwargs.pop("min_itemsize", None) + + # append + for k, v in d.items(): + dc = data_columns if k == selector else None + + # compute the val + val = value.reindex(v, axis=axis) + + filtered = ( + {key: value for (key, value) in min_itemsize.items() if key in v} + if min_itemsize is not None + else None + ) + self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs) + + def create_table_index( + self, + key: str, + columns=None, + optlevel: int | None = None, + kind: str | None = None, + ) -> None: + """ + Create a pytables index on the table. + + Parameters + ---------- + key : str + columns : None, bool, or listlike[str] + Indicate which columns to create an index on. + + * False : Do not create any indexes. + * True : Create indexes on all columns. + * None : Create indexes on all columns. + * listlike : Create indexes on the given columns. + + optlevel : int or None, default None + Optimization level, if None, pytables defaults to 6. + kind : str or None, default None + Kind of index, if None, pytables defaults to "medium". + + Raises + ------ + TypeError: raises if the node is not a table + """ + # version requirements + _tables() + s = self.get_storer(key) + if s is None: + return + + if not isinstance(s, Table): + raise TypeError("cannot create table index on a Fixed format store") + s.create_index(columns=columns, optlevel=optlevel, kind=kind) + + def groups(self) -> list: + """ + Return a list of all the top-level nodes. + + Each node returned is not a pandas storage object. + + Returns + ------- + list + List of objects. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> print(store.groups()) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + [/data (Group) '' + children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array), + 'block0_items' (Array)]] + """ + _tables() + self._check_if_open() + assert self._handle is not None # for mypy + assert _table_mod is not None # for mypy + return [ + g + for g in self._handle.walk_groups() + if ( + not isinstance(g, _table_mod.link.Link) + and ( + getattr(g._v_attrs, "pandas_type", None) + or getattr(g, "table", None) + or (isinstance(g, _table_mod.table.Table) and g._v_name != "table") + ) + ) + ] + + def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]: + """ + Walk the pytables group hierarchy for pandas objects. + + This generator will yield the group path, subgroups and pandas object + names for each group. + + Any non-pandas PyTables objects that are not a group will be ignored. + + The `where` group itself is listed first (preorder), then each of its + child groups (following an alphanumerical order) is also traversed, + following the same procedure. + + Parameters + ---------- + where : str, default "/" + Group where to start walking. + + Yields + ------ + path : str + Full path to a group (without trailing '/'). + groups : list + Names (strings) of the groups contained in `path`. + leaves : list + Names (strings) of the pandas objects contained in `path`. + + Examples + -------- + >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df1, format='table') # doctest: +SKIP + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['A', 'B']) + >>> store.append('data', df2) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + >>> for group in store.walk(): # doctest: +SKIP + ... print(group) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + """ + _tables() + self._check_if_open() + assert self._handle is not None # for mypy + assert _table_mod is not None # for mypy + + for g in self._handle.walk_groups(where): + if getattr(g._v_attrs, "pandas_type", None) is not None: + continue + + groups = [] + leaves = [] + for child in g._v_children.values(): + pandas_type = getattr(child._v_attrs, "pandas_type", None) + if pandas_type is None: + if isinstance(child, _table_mod.group.Group): + groups.append(child._v_name) + else: + leaves.append(child._v_name) + + yield (g._v_pathname.rstrip("/"), groups, leaves) + + def get_node(self, key: str) -> Node | None: + """return the node with the key or None if it does not exist""" + self._check_if_open() + if not key.startswith("/"): + key = "/" + key + + assert self._handle is not None + assert _table_mod is not None # for mypy + try: + node = self._handle.get_node(self.root, key) + except _table_mod.exceptions.NoSuchNodeError: + return None + + assert isinstance(node, _table_mod.Node), type(node) + return node + + def get_storer(self, key: str) -> GenericFixed | Table: + """return the storer object for a key, raise if not in the file""" + group = self.get_node(key) + if group is None: + raise KeyError(f"No object named {key} in the file") + + s = self._create_storer(group) + s.infer_axes() + return s + + def copy( + self, + file, + mode: str = "w", + propindexes: bool = True, + keys=None, + complib=None, + complevel: int | None = None, + fletcher32: bool = False, + overwrite: bool = True, + ) -> HDFStore: + """ + Copy the existing store to a new file, updating in place. + + Parameters + ---------- + propindexes : bool, default True + Restore indexes in copied file. + keys : list, optional + List of keys to include in the copy (defaults to all). + overwrite : bool, default True + Whether to overwrite (remove and replace) existing nodes in the new store. + mode, complib, complevel, fletcher32 same as in HDFStore.__init__ + + Returns + ------- + open file handle of the new store + """ + new_store = HDFStore( + file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32 + ) + if keys is None: + keys = list(self.keys()) + if not isinstance(keys, (tuple, list)): + keys = [keys] + for k in keys: + s = self.get_storer(k) + if s is not None: + if k in new_store: + if overwrite: + new_store.remove(k) + + data = self.select(k) + if isinstance(s, Table): + index: bool | list[str] = False + if propindexes: + index = [a.name for a in s.axes if a.is_indexed] + new_store.append( + k, + data, + index=index, + data_columns=getattr(s, "data_columns", None), + encoding=s.encoding, + ) + else: + new_store.put(k, data, encoding=s.encoding) + + return new_store + + def info(self) -> str: + """ + Print detailed information on the store. + + Returns + ------- + str + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B']) + >>> store = pd.HDFStore("store.h5", 'w') # doctest: +SKIP + >>> store.put('data', df) # doctest: +SKIP + >>> print(store.info()) # doctest: +SKIP + >>> store.close() # doctest: +SKIP + + File path: store.h5 + /data frame (shape->[2,2]) + """ + path = pprint_thing(self._path) + output = f"{type(self)}\nFile path: {path}\n" + + if self.is_open: + lkeys = sorted(self.keys()) + if len(lkeys): + keys = [] + values = [] + + for k in lkeys: + try: + s = self.get_storer(k) + if s is not None: + keys.append(pprint_thing(s.pathname or k)) + values.append(pprint_thing(s or "invalid_HDFStore node")) + except AssertionError: + # surface any assertion errors for e.g. debugging + raise + except Exception as detail: + keys.append(k) + dstr = pprint_thing(detail) + values.append(f"[invalid_HDFStore node: {dstr}]") + + output += adjoin(12, keys, values) + else: + output += "Empty" + else: + output += "File is CLOSED" + + return output + + # ------------------------------------------------------------------------ + # private methods + + def _check_if_open(self): + if not self.is_open: + raise ClosedFileError(f"{self._path} file is not open!") + + def _validate_format(self, format: str) -> str: + """validate / deprecate formats""" + # validate + try: + format = _FORMAT_MAP[format.lower()] + except KeyError as err: + raise TypeError(f"invalid HDFStore format specified [{format}]") from err + + return format + + def _create_storer( + self, + group, + format=None, + value: DataFrame | Series | None = None, + encoding: str = "UTF-8", + errors: str = "strict", + ) -> GenericFixed | Table: + """return a suitable class to operate""" + cls: type[GenericFixed] | type[Table] + + if value is not None and not isinstance(value, (Series, DataFrame)): + raise TypeError("value must be None, Series, or DataFrame") + + pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None)) + tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None)) + + # infer the pt from the passed value + if pt is None: + if value is None: + _tables() + assert _table_mod is not None # for mypy + if getattr(group, "table", None) or isinstance( + group, _table_mod.table.Table + ): + pt = "frame_table" + tt = "generic_table" + else: + raise TypeError( + "cannot create a storer if the object is not existing " + "nor a value are passed" + ) + else: + if isinstance(value, Series): + pt = "series" + else: + pt = "frame" + + # we are actually a table + if format == "table": + pt += "_table" + + # a storer node + if "table" not in pt: + _STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed} + try: + cls = _STORER_MAP[pt] + except KeyError as err: + raise TypeError( + f"cannot properly create the storer for: [_STORER_MAP] [group->" + f"{group},value->{type(value)},format->{format}" + ) from err + return cls(self, group, encoding=encoding, errors=errors) + + # existing node (and must be a table) + if tt is None: + # if we are a writer, determine the tt + if value is not None: + if pt == "series_table": + index = getattr(value, "index", None) + if index is not None: + if index.nlevels == 1: + tt = "appendable_series" + elif index.nlevels > 1: + tt = "appendable_multiseries" + elif pt == "frame_table": + index = getattr(value, "index", None) + if index is not None: + if index.nlevels == 1: + tt = "appendable_frame" + elif index.nlevels > 1: + tt = "appendable_multiframe" + + _TABLE_MAP = { + "generic_table": GenericTable, + "appendable_series": AppendableSeriesTable, + "appendable_multiseries": AppendableMultiSeriesTable, + "appendable_frame": AppendableFrameTable, + "appendable_multiframe": AppendableMultiFrameTable, + "worm": WORMTable, + } + try: + cls = _TABLE_MAP[tt] + except KeyError as err: + raise TypeError( + f"cannot properly create the storer for: [_TABLE_MAP] [group->" + f"{group},value->{type(value)},format->{format}" + ) from err + + return cls(self, group, encoding=encoding, errors=errors) + + def _write_to_group( + self, + key: str, + value: DataFrame | Series, + format, + axes=None, + index: bool | list[str] = True, + append: bool = False, + complib=None, + complevel: int | None = None, + fletcher32=None, + min_itemsize: int | dict[str, int] | None = None, + chunksize: int | None = None, + expectedrows=None, + dropna: bool = False, + nan_rep=None, + data_columns=None, + encoding=None, + errors: str = "strict", + track_times: bool = True, + ) -> None: + # we don't want to store a table node at all if our object is 0-len + # as there are not dtypes + if getattr(value, "empty", None) and (format == "table" or append): + return + + group = self._identify_group(key, append) + + s = self._create_storer(group, format, value, encoding=encoding, errors=errors) + if append: + # raise if we are trying to append to a Fixed format, + # or a table that exists (and we are putting) + if not s.is_table or (s.is_table and format == "fixed" and s.is_exists): + raise ValueError("Can only append to Tables") + if not s.is_exists: + s.set_object_info() + else: + s.set_object_info() + + if not s.is_table and complib: + raise ValueError("Compression not supported on Fixed format stores") + + # write the object + s.write( + obj=value, + axes=axes, + append=append, + complib=complib, + complevel=complevel, + fletcher32=fletcher32, + min_itemsize=min_itemsize, + chunksize=chunksize, + expectedrows=expectedrows, + dropna=dropna, + nan_rep=nan_rep, + data_columns=data_columns, + track_times=track_times, + ) + + if isinstance(s, Table) and index: + s.create_index(columns=index) + + def _read_group(self, group: Node): + s = self._create_storer(group) + s.infer_axes() + return s.read() + + def _identify_group(self, key: str, append: bool) -> Node: + """Identify HDF5 group based on key, delete/create group if needed.""" + group = self.get_node(key) + + # we make this assertion for mypy; the get_node call will already + # have raised if this is incorrect + assert self._handle is not None + + # remove the node if we are not appending + if group is not None and not append: + self._handle.remove_node(group, recursive=True) + group = None + + if group is None: + group = self._create_nodes_and_group(key) + + return group + + def _create_nodes_and_group(self, key: str) -> Node: + """Create nodes from key and return group name.""" + # assertion for mypy + assert self._handle is not None + + paths = key.split("/") + # recursively create the groups + path = "/" + for p in paths: + if not len(p): + continue + new_path = path + if not path.endswith("/"): + new_path += "/" + new_path += p + group = self.get_node(new_path) + if group is None: + group = self._handle.create_group(path, p) + path = new_path + return group + + +class TableIterator: + """ + Define the iteration interface on a table + + Parameters + ---------- + store : HDFStore + s : the referred storer + func : the function to execute the query + where : the where of the query + nrows : the rows to iterate on + start : the passed start value (default is None) + stop : the passed stop value (default is None) + iterator : bool, default False + Whether to use the default iterator. + chunksize : the passed chunking value (default is 100000) + auto_close : bool, default False + Whether to automatically close the store at the end of iteration. + """ + + chunksize: int | None + store: HDFStore + s: GenericFixed | Table + + def __init__( + self, + store: HDFStore, + s: GenericFixed | Table, + func, + where, + nrows, + start=None, + stop=None, + iterator: bool = False, + chunksize: int | None = None, + auto_close: bool = False, + ) -> None: + self.store = store + self.s = s + self.func = func + self.where = where + + # set start/stop if they are not set if we are a table + if self.s.is_table: + if nrows is None: + nrows = 0 + if start is None: + start = 0 + if stop is None: + stop = nrows + stop = min(nrows, stop) + + self.nrows = nrows + self.start = start + self.stop = stop + + self.coordinates = None + if iterator or chunksize is not None: + if chunksize is None: + chunksize = 100000 + self.chunksize = int(chunksize) + else: + self.chunksize = None + + self.auto_close = auto_close + + def __iter__(self) -> Iterator: + # iterate + current = self.start + if self.coordinates is None: + raise ValueError("Cannot iterate until get_result is called.") + while current < self.stop: + stop = min(current + self.chunksize, self.stop) + value = self.func(None, None, self.coordinates[current:stop]) + current = stop + if value is None or not len(value): + continue + + yield value + + self.close() + + def close(self) -> None: + if self.auto_close: + self.store.close() + + def get_result(self, coordinates: bool = False): + # return the actual iterator + if self.chunksize is not None: + if not isinstance(self.s, Table): + raise TypeError("can only use an iterator or chunksize on a table") + + self.coordinates = self.s.read_coordinates(where=self.where) + + return self + + # if specified read via coordinates (necessary for multiple selections + if coordinates: + if not isinstance(self.s, Table): + raise TypeError("can only read_coordinates on a table") + where = self.s.read_coordinates( + where=self.where, start=self.start, stop=self.stop + ) + else: + where = self.where + + # directly return the result + results = self.func(self.start, self.stop, where) + self.close() + return results + + +class IndexCol: + """ + an index column description class + + Parameters + ---------- + axis : axis which I reference + values : the ndarray like converted values + kind : a string description of this type + typ : the pytables type + pos : the position in the pytables + + """ + + is_an_indexable: bool = True + is_data_indexable: bool = True + _info_fields = ["freq", "tz", "index_name"] + + def __init__( + self, + name: str, + values=None, + kind=None, + typ=None, + cname: str | None = None, + axis=None, + pos=None, + freq=None, + tz=None, + index_name=None, + ordered=None, + table=None, + meta=None, + metadata=None, + ) -> None: + if not isinstance(name, str): + raise ValueError("`name` must be a str.") + + self.values = values + self.kind = kind + self.typ = typ + self.name = name + self.cname = cname or name + self.axis = axis + self.pos = pos + self.freq = freq + self.tz = tz + self.index_name = index_name + self.ordered = ordered + self.table = table + self.meta = meta + self.metadata = metadata + + if pos is not None: + self.set_pos(pos) + + # These are ensured as long as the passed arguments match the + # constructor annotations. + assert isinstance(self.name, str) + assert isinstance(self.cname, str) + + @property + def itemsize(self) -> int: + # Assumes self.typ has already been initialized + return self.typ.itemsize + + @property + def kind_attr(self) -> str: + return f"{self.name}_kind" + + def set_pos(self, pos: int) -> None: + """set the position of this column in the Table""" + self.pos = pos + if pos is not None and self.typ is not None: + self.typ._v_pos = pos + + def __repr__(self) -> str: + temp = tuple( + map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind)) + ) + return ",".join( + [ + f"{key}->{value}" + for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp) + ] + ) + + def __eq__(self, other: Any) -> bool: + """compare 2 col items""" + return all( + getattr(self, a, None) == getattr(other, a, None) + for a in ["name", "cname", "axis", "pos"] + ) + + def __ne__(self, other) -> bool: + return not self.__eq__(other) + + @property + def is_indexed(self) -> bool: + """return whether I am an indexed column""" + if not hasattr(self.table, "cols"): + # e.g. if infer hasn't been called yet, self.table will be None. + return False + return getattr(self.table.cols, self.cname).is_indexed + + def convert( + self, values: np.ndarray, nan_rep, encoding: str, errors: str + ) -> tuple[np.ndarray, np.ndarray] | tuple[Index, Index]: + """ + Convert the data from this selection to the appropriate pandas type. + """ + assert isinstance(values, np.ndarray), type(values) + + # values is a recarray + if values.dtype.fields is not None: + # Copy, otherwise values will be a view + # preventing the original recarry from being free'ed + values = values[self.cname].copy() + + val_kind = _ensure_decoded(self.kind) + values = _maybe_convert(values, val_kind, encoding, errors) + + kwargs = {} + kwargs["name"] = _ensure_decoded(self.index_name) + + if self.freq is not None: + kwargs["freq"] = _ensure_decoded(self.freq) + + factory: type[Index] | type[DatetimeIndex] = Index + if lib.is_np_dtype(values.dtype, "M") or isinstance( + values.dtype, DatetimeTZDtype + ): + factory = DatetimeIndex + elif values.dtype == "i8" and "freq" in kwargs: + # PeriodIndex data is stored as i8 + # error: Incompatible types in assignment (expression has type + # "Callable[[Any, KwArg(Any)], PeriodIndex]", variable has type + # "Union[Type[Index], Type[DatetimeIndex]]") + factory = lambda x, **kwds: PeriodIndex( # type: ignore[assignment] + ordinal=x, **kwds + ) + + # making an Index instance could throw a number of different errors + try: + new_pd_index = factory(values, **kwargs) + except ValueError: + # if the output freq is different that what we recorded, + # it should be None (see also 'doc example part 2') + if "freq" in kwargs: + kwargs["freq"] = None + new_pd_index = factory(values, **kwargs) + final_pd_index = _set_tz(new_pd_index, self.tz) + return final_pd_index, final_pd_index + + def take_data(self): + """return the values""" + return self.values + + @property + def attrs(self): + return self.table._v_attrs + + @property + def description(self): + return self.table.description + + @property + def col(self): + """return my current col description""" + return getattr(self.description, self.cname, None) + + @property + def cvalues(self): + """return my cython values""" + return self.values + + def __iter__(self) -> Iterator: + return iter(self.values) + + def maybe_set_size(self, min_itemsize=None) -> None: + """ + maybe set a string col itemsize: + min_itemsize can be an integer or a dict with this columns name + with an integer size + """ + if _ensure_decoded(self.kind) == "string": + if isinstance(min_itemsize, dict): + min_itemsize = min_itemsize.get(self.name) + + if min_itemsize is not None and self.typ.itemsize < min_itemsize: + self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos) + + def validate_names(self) -> None: + pass + + def validate_and_set(self, handler: AppendableTable, append: bool) -> None: + self.table = handler.table + self.validate_col() + self.validate_attr(append) + self.validate_metadata(handler) + self.write_metadata(handler) + self.set_attr() + + def validate_col(self, itemsize=None): + """validate this column: return the compared against itemsize""" + # validate this column for string truncation (or reset to the max size) + if _ensure_decoded(self.kind) == "string": + c = self.col + if c is not None: + if itemsize is None: + itemsize = self.itemsize + if c.itemsize < itemsize: + raise ValueError( + f"Trying to store a string with len [{itemsize}] in " + f"[{self.cname}] column but\nthis column has a limit of " + f"[{c.itemsize}]!\nConsider using min_itemsize to " + "preset the sizes on these columns" + ) + return c.itemsize + + return None + + def validate_attr(self, append: bool) -> None: + # check for backwards incompatibility + if append: + existing_kind = getattr(self.attrs, self.kind_attr, None) + if existing_kind is not None and existing_kind != self.kind: + raise TypeError( + f"incompatible kind in col [{existing_kind} - {self.kind}]" + ) + + def update_info(self, info) -> None: + """ + set/update the info for this indexable with the key/value + if there is a conflict raise/warn as needed + """ + for key in self._info_fields: + value = getattr(self, key, None) + idx = info.setdefault(self.name, {}) + + existing_value = idx.get(key) + if key in idx and value is not None and existing_value != value: + # frequency/name just warn + if key in ["freq", "index_name"]: + ws = attribute_conflict_doc % (key, existing_value, value) + warnings.warn( + ws, AttributeConflictWarning, stacklevel=find_stack_level() + ) + + # reset + idx[key] = None + setattr(self, key, None) + + else: + raise ValueError( + f"invalid info for [{self.name}] for [{key}], " + f"existing_value [{existing_value}] conflicts with " + f"new value [{value}]" + ) + elif value is not None or existing_value is not None: + idx[key] = value + + def set_info(self, info) -> None: + """set my state from the passed info""" + idx = info.get(self.name) + if idx is not None: + self.__dict__.update(idx) + + def set_attr(self) -> None: + """set the kind for this column""" + setattr(self.attrs, self.kind_attr, self.kind) + + def validate_metadata(self, handler: AppendableTable) -> None: + """validate that kind=category does not change the categories""" + if self.meta == "category": + new_metadata = self.metadata + cur_metadata = handler.read_metadata(self.cname) + if ( + new_metadata is not None + and cur_metadata is not None + and not array_equivalent( + new_metadata, cur_metadata, strict_nan=True, dtype_equal=True + ) + ): + raise ValueError( + "cannot append a categorical with " + "different categories to the existing" + ) + + def write_metadata(self, handler: AppendableTable) -> None: + """set the meta data""" + if self.metadata is not None: + handler.write_metadata(self.cname, self.metadata) + + +class GenericIndexCol(IndexCol): + """an index which is not represented in the data of the table""" + + @property + def is_indexed(self) -> bool: + return False + + def convert( + self, values: np.ndarray, nan_rep, encoding: str, errors: str + ) -> tuple[Index, Index]: + """ + Convert the data from this selection to the appropriate pandas type. + + Parameters + ---------- + values : np.ndarray + nan_rep : str + encoding : str + errors : str + """ + assert isinstance(values, np.ndarray), type(values) + + index = RangeIndex(len(values)) + return index, index + + def set_attr(self) -> None: + pass + + +class DataCol(IndexCol): + """ + a data holding column, by definition this is not indexable + + Parameters + ---------- + data : the actual data + cname : the column name in the table to hold the data (typically + values) + meta : a string description of the metadata + metadata : the actual metadata + """ + + is_an_indexable = False + is_data_indexable = False + _info_fields = ["tz", "ordered"] + + def __init__( + self, + name: str, + values=None, + kind=None, + typ=None, + cname: str | None = None, + pos=None, + tz=None, + ordered=None, + table=None, + meta=None, + metadata=None, + dtype: DtypeArg | None = None, + data=None, + ) -> None: + super().__init__( + name=name, + values=values, + kind=kind, + typ=typ, + pos=pos, + cname=cname, + tz=tz, + ordered=ordered, + table=table, + meta=meta, + metadata=metadata, + ) + self.dtype = dtype + self.data = data + + @property + def dtype_attr(self) -> str: + return f"{self.name}_dtype" + + @property + def meta_attr(self) -> str: + return f"{self.name}_meta" + + def __repr__(self) -> str: + temp = tuple( + map( + pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape) + ) + ) + return ",".join( + [ + f"{key}->{value}" + for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp) + ] + ) + + def __eq__(self, other: Any) -> bool: + """compare 2 col items""" + return all( + getattr(self, a, None) == getattr(other, a, None) + for a in ["name", "cname", "dtype", "pos"] + ) + + def set_data(self, data: ArrayLike) -> None: + assert data is not None + assert self.dtype is None + + data, dtype_name = _get_data_and_dtype_name(data) + + self.data = data + self.dtype = dtype_name + self.kind = _dtype_to_kind(dtype_name) + + def take_data(self): + """return the data""" + return self.data + + @classmethod + def _get_atom(cls, values: ArrayLike) -> Col: + """ + Get an appropriately typed and shaped pytables.Col object for values. + """ + dtype = values.dtype + # error: Item "ExtensionDtype" of "Union[ExtensionDtype, dtype[Any]]" has no + # attribute "itemsize" + itemsize = dtype.itemsize # type: ignore[union-attr] + + shape = values.shape + if values.ndim == 1: + # EA, use block shape pretending it is 2D + # TODO(EA2D): not necessary with 2D EAs + shape = (1, values.size) + + if isinstance(values, Categorical): + codes = values.codes + atom = cls.get_atom_data(shape, kind=codes.dtype.name) + elif lib.is_np_dtype(dtype, "M") or isinstance(dtype, DatetimeTZDtype): + atom = cls.get_atom_datetime64(shape) + elif lib.is_np_dtype(dtype, "m"): + atom = cls.get_atom_timedelta64(shape) + elif is_complex_dtype(dtype): + atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0]) + elif is_string_dtype(dtype): + atom = cls.get_atom_string(shape, itemsize) + else: + atom = cls.get_atom_data(shape, kind=dtype.name) + + return atom + + @classmethod + def get_atom_string(cls, shape, itemsize): + return _tables().StringCol(itemsize=itemsize, shape=shape[0]) + + @classmethod + def get_atom_coltype(cls, kind: str) -> type[Col]: + """return the PyTables column class for this column""" + if kind.startswith("uint"): + k4 = kind[4:] + col_name = f"UInt{k4}Col" + elif kind.startswith("period"): + # we store as integer + col_name = "Int64Col" + else: + kcap = kind.capitalize() + col_name = f"{kcap}Col" + + return getattr(_tables(), col_name) + + @classmethod + def get_atom_data(cls, shape, kind: str) -> Col: + return cls.get_atom_coltype(kind=kind)(shape=shape[0]) + + @classmethod + def get_atom_datetime64(cls, shape): + return _tables().Int64Col(shape=shape[0]) + + @classmethod + def get_atom_timedelta64(cls, shape): + return _tables().Int64Col(shape=shape[0]) + + @property + def shape(self): + return getattr(self.data, "shape", None) + + @property + def cvalues(self): + """return my cython values""" + return self.data + + def validate_attr(self, append) -> None: + """validate that we have the same order as the existing & same dtype""" + if append: + existing_fields = getattr(self.attrs, self.kind_attr, None) + if existing_fields is not None and existing_fields != list(self.values): + raise ValueError("appended items do not match existing items in table!") + + existing_dtype = getattr(self.attrs, self.dtype_attr, None) + if existing_dtype is not None and existing_dtype != self.dtype: + raise ValueError( + "appended items dtype do not match existing items dtype in table!" + ) + + def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): + """ + Convert the data from this selection to the appropriate pandas type. + + Parameters + ---------- + values : np.ndarray + nan_rep : + encoding : str + errors : str + + Returns + ------- + index : listlike to become an Index + data : ndarraylike to become a column + """ + assert isinstance(values, np.ndarray), type(values) + + # values is a recarray + if values.dtype.fields is not None: + values = values[self.cname] + + assert self.typ is not None + if self.dtype is None: + # Note: in tests we never have timedelta64 or datetime64, + # so the _get_data_and_dtype_name may be unnecessary + converted, dtype_name = _get_data_and_dtype_name(values) + kind = _dtype_to_kind(dtype_name) + else: + converted = values + dtype_name = self.dtype + kind = self.kind + + assert isinstance(converted, np.ndarray) # for mypy + + # use the meta if needed + meta = _ensure_decoded(self.meta) + metadata = self.metadata + ordered = self.ordered + tz = self.tz + + assert dtype_name is not None + # convert to the correct dtype + dtype = _ensure_decoded(dtype_name) + + # reverse converts + if dtype == "datetime64": + # recreate with tz if indicated + converted = _set_tz(converted, tz, coerce=True) + + elif dtype == "timedelta64": + converted = np.asarray(converted, dtype="m8[ns]") + elif dtype == "date": + try: + converted = np.asarray( + [date.fromordinal(v) for v in converted], dtype=object + ) + except ValueError: + converted = np.asarray( + [date.fromtimestamp(v) for v in converted], dtype=object + ) + + elif meta == "category": + # we have a categorical + categories = metadata + codes = converted.ravel() + + # if we have stored a NaN in the categories + # then strip it; in theory we could have BOTH + # -1s in the codes and nulls :< + if categories is None: + # Handle case of NaN-only categorical columns in which case + # the categories are an empty array; when this is stored, + # pytables cannot write a zero-len array, so on readback + # the categories would be None and `read_hdf()` would fail. + categories = Index([], dtype=np.float64) + else: + mask = isna(categories) + if mask.any(): + categories = categories[~mask] + codes[codes != -1] -= mask.astype(int).cumsum()._values + + converted = Categorical.from_codes( + codes, categories=categories, ordered=ordered, validate=False + ) + + else: + try: + converted = converted.astype(dtype, copy=False) + except TypeError: + converted = converted.astype("O", copy=False) + + # convert nans / decode + if _ensure_decoded(kind) == "string": + converted = _unconvert_string_array( + converted, nan_rep=nan_rep, encoding=encoding, errors=errors + ) + + return self.values, converted + + def set_attr(self) -> None: + """set the data for this column""" + setattr(self.attrs, self.kind_attr, self.values) + setattr(self.attrs, self.meta_attr, self.meta) + assert self.dtype is not None + setattr(self.attrs, self.dtype_attr, self.dtype) + + +class DataIndexableCol(DataCol): + """represent a data column that can be indexed""" + + is_data_indexable = True + + def validate_names(self) -> None: + if not is_string_dtype(Index(self.values).dtype): + # TODO: should the message here be more specifically non-str? + raise ValueError("cannot have non-object label DataIndexableCol") + + @classmethod + def get_atom_string(cls, shape, itemsize): + return _tables().StringCol(itemsize=itemsize) + + @classmethod + def get_atom_data(cls, shape, kind: str) -> Col: + return cls.get_atom_coltype(kind=kind)() + + @classmethod + def get_atom_datetime64(cls, shape): + return _tables().Int64Col() + + @classmethod + def get_atom_timedelta64(cls, shape): + return _tables().Int64Col() + + +class GenericDataIndexableCol(DataIndexableCol): + """represent a generic pytables data column""" + + +class Fixed: + """ + represent an object in my store + facilitate read/write of various types of objects + this is an abstract base class + + Parameters + ---------- + parent : HDFStore + group : Node + The group node where the table resides. + """ + + pandas_kind: str + format_type: str = "fixed" # GH#30962 needed by dask + obj_type: type[DataFrame | Series] + ndim: int + parent: HDFStore + is_table: bool = False + + def __init__( + self, + parent: HDFStore, + group: Node, + encoding: str | None = "UTF-8", + errors: str = "strict", + ) -> None: + assert isinstance(parent, HDFStore), type(parent) + assert _table_mod is not None # needed for mypy + assert isinstance(group, _table_mod.Node), type(group) + self.parent = parent + self.group = group + self.encoding = _ensure_encoding(encoding) + self.errors = errors + + @property + def is_old_version(self) -> bool: + return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1 + + @property + def version(self) -> tuple[int, int, int]: + """compute and set our version""" + version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None)) + try: + version = tuple(int(x) for x in version.split(".")) + if len(version) == 2: + version = version + (0,) + except AttributeError: + version = (0, 0, 0) + return version + + @property + def pandas_type(self): + return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None)) + + def __repr__(self) -> str: + """return a pretty representation of myself""" + self.infer_axes() + s = self.shape + if s is not None: + if isinstance(s, (list, tuple)): + jshape = ",".join([pprint_thing(x) for x in s]) + s = f"[{jshape}]" + return f"{self.pandas_type:12.12} (shape->{s})" + return self.pandas_type + + def set_object_info(self) -> None: + """set my pandas type & version""" + self.attrs.pandas_type = str(self.pandas_kind) + self.attrs.pandas_version = str(_version) + + def copy(self) -> Fixed: + new_self = copy.copy(self) + return new_self + + @property + def shape(self): + return self.nrows + + @property + def pathname(self): + return self.group._v_pathname + + @property + def _handle(self): + return self.parent._handle + + @property + def _filters(self): + return self.parent._filters + + @property + def _complevel(self) -> int: + return self.parent._complevel + + @property + def _fletcher32(self) -> bool: + return self.parent._fletcher32 + + @property + def attrs(self): + return self.group._v_attrs + + def set_attrs(self) -> None: + """set our object attributes""" + + def get_attrs(self) -> None: + """get our object attributes""" + + @property + def storable(self): + """return my storable""" + return self.group + + @property + def is_exists(self) -> bool: + return False + + @property + def nrows(self): + return getattr(self.storable, "nrows", None) + + def validate(self, other) -> Literal[True] | None: + """validate against an existing storable""" + if other is None: + return None + return True + + def validate_version(self, where=None) -> None: + """are we trying to operate on an old version?""" + + def infer_axes(self) -> bool: + """ + infer the axes of my storer + return a boolean indicating if we have a valid storer or not + """ + s = self.storable + if s is None: + return False + self.get_attrs() + return True + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + raise NotImplementedError( + "cannot read on an abstract storer: subclasses should implement" + ) + + def write(self, **kwargs): + raise NotImplementedError( + "cannot write on an abstract storer: subclasses should implement" + ) + + def delete( + self, where=None, start: int | None = None, stop: int | None = None + ) -> None: + """ + support fully deleting the node in its entirety (only) - where + specification must be None + """ + if com.all_none(where, start, stop): + self._handle.remove_node(self.group, recursive=True) + return None + + raise TypeError("cannot delete on an abstract storer") + + +class GenericFixed(Fixed): + """a generified fixed version""" + + _index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"} + _reverse_index_map = {v: k for k, v in _index_type_map.items()} + attributes: list[str] = [] + + # indexer helpers + def _class_to_alias(self, cls) -> str: + return self._index_type_map.get(cls, "") + + def _alias_to_class(self, alias): + if isinstance(alias, type): # pragma: no cover + # compat: for a short period of time master stored types + return alias + return self._reverse_index_map.get(alias, Index) + + def _get_index_factory(self, attrs): + index_class = self._alias_to_class( + _ensure_decoded(getattr(attrs, "index_class", "")) + ) + + factory: Callable + + if index_class == DatetimeIndex: + + def f(values, freq=None, tz=None): + # data are already in UTC, localize and convert if tz present + dta = DatetimeArray._simple_new(values.values, freq=freq) + result = DatetimeIndex._simple_new(dta, name=None) + if tz is not None: + result = result.tz_localize("UTC").tz_convert(tz) + return result + + factory = f + elif index_class == PeriodIndex: + + def f(values, freq=None, tz=None): + dtype = PeriodDtype(freq) + parr = PeriodArray._simple_new(values, dtype=dtype) + return PeriodIndex._simple_new(parr, name=None) + + factory = f + else: + factory = index_class + + kwargs = {} + if "freq" in attrs: + kwargs["freq"] = attrs["freq"] + if index_class is Index: + # DTI/PI would be gotten by _alias_to_class + factory = TimedeltaIndex + + if "tz" in attrs: + if isinstance(attrs["tz"], bytes): + # created by python2 + kwargs["tz"] = attrs["tz"].decode("utf-8") + else: + # created by python3 + kwargs["tz"] = attrs["tz"] + assert index_class is DatetimeIndex # just checking + + return factory, kwargs + + def validate_read(self, columns, where) -> None: + """ + raise if any keywords are passed which are not-None + """ + if columns is not None: + raise TypeError( + "cannot pass a column specification when reading " + "a Fixed format store. this store must be selected in its entirety" + ) + if where is not None: + raise TypeError( + "cannot pass a where specification when reading " + "from a Fixed format store. this store must be selected in its entirety" + ) + + @property + def is_exists(self) -> bool: + return True + + def set_attrs(self) -> None: + """set our object attributes""" + self.attrs.encoding = self.encoding + self.attrs.errors = self.errors + + def get_attrs(self) -> None: + """retrieve our attributes""" + self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None)) + self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict")) + for n in self.attributes: + setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None))) + + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, **kwargs) -> None: # type: ignore[override] + self.set_attrs() + + def read_array(self, key: str, start: int | None = None, stop: int | None = None): + """read an array for the specified node (off of group""" + import tables + + node = getattr(self.group, key) + attrs = node._v_attrs + + transposed = getattr(attrs, "transposed", False) + + if isinstance(node, tables.VLArray): + ret = node[0][start:stop] + else: + dtype = _ensure_decoded(getattr(attrs, "value_type", None)) + shape = getattr(attrs, "shape", None) + + if shape is not None: + # length 0 axis + ret = np.empty(shape, dtype=dtype) + else: + ret = node[start:stop] + + if dtype == "datetime64": + # reconstruct a timezone if indicated + tz = getattr(attrs, "tz", None) + ret = _set_tz(ret, tz, coerce=True) + + elif dtype == "timedelta64": + ret = np.asarray(ret, dtype="m8[ns]") + + if transposed: + return ret.T + else: + return ret + + def read_index( + self, key: str, start: int | None = None, stop: int | None = None + ) -> Index: + variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety")) + + if variety == "multi": + return self.read_multi_index(key, start=start, stop=stop) + elif variety == "regular": + node = getattr(self.group, key) + index = self.read_index_node(node, start=start, stop=stop) + return index + else: # pragma: no cover + raise TypeError(f"unrecognized index variety: {variety}") + + def write_index(self, key: str, index: Index) -> None: + if isinstance(index, MultiIndex): + setattr(self.attrs, f"{key}_variety", "multi") + self.write_multi_index(key, index) + else: + setattr(self.attrs, f"{key}_variety", "regular") + converted = _convert_index("index", index, self.encoding, self.errors) + + self.write_array(key, converted.values) + + node = getattr(self.group, key) + node._v_attrs.kind = converted.kind + node._v_attrs.name = index.name + + if isinstance(index, (DatetimeIndex, PeriodIndex)): + node._v_attrs.index_class = self._class_to_alias(type(index)) + + if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + node._v_attrs.freq = index.freq + + if isinstance(index, DatetimeIndex) and index.tz is not None: + node._v_attrs.tz = _get_tz(index.tz) + + def write_multi_index(self, key: str, index: MultiIndex) -> None: + setattr(self.attrs, f"{key}_nlevels", index.nlevels) + + for i, (lev, level_codes, name) in enumerate( + zip(index.levels, index.codes, index.names) + ): + # write the level + if isinstance(lev.dtype, ExtensionDtype): + raise NotImplementedError( + "Saving a MultiIndex with an extension dtype is not supported." + ) + level_key = f"{key}_level{i}" + conv_level = _convert_index(level_key, lev, self.encoding, self.errors) + self.write_array(level_key, conv_level.values) + node = getattr(self.group, level_key) + node._v_attrs.kind = conv_level.kind + node._v_attrs.name = name + + # write the name + setattr(node._v_attrs, f"{key}_name{name}", name) + + # write the labels + label_key = f"{key}_label{i}" + self.write_array(label_key, level_codes) + + def read_multi_index( + self, key: str, start: int | None = None, stop: int | None = None + ) -> MultiIndex: + nlevels = getattr(self.attrs, f"{key}_nlevels") + + levels = [] + codes = [] + names: list[Hashable] = [] + for i in range(nlevels): + level_key = f"{key}_level{i}" + node = getattr(self.group, level_key) + lev = self.read_index_node(node, start=start, stop=stop) + levels.append(lev) + names.append(lev.name) + + label_key = f"{key}_label{i}" + level_codes = self.read_array(label_key, start=start, stop=stop) + codes.append(level_codes) + + return MultiIndex( + levels=levels, codes=codes, names=names, verify_integrity=True + ) + + def read_index_node( + self, node: Node, start: int | None = None, stop: int | None = None + ) -> Index: + data = node[start:stop] + # If the index was an empty array write_array_empty() will + # have written a sentinel. Here we replace it with the original. + if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0: + data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type) + kind = _ensure_decoded(node._v_attrs.kind) + name = None + + if "name" in node._v_attrs: + name = _ensure_str(node._v_attrs.name) + name = _ensure_decoded(name) + + attrs = node._v_attrs + factory, kwargs = self._get_index_factory(attrs) + + if kind in ("date", "object"): + index = factory( + _unconvert_index( + data, kind, encoding=self.encoding, errors=self.errors + ), + dtype=object, + **kwargs, + ) + else: + index = factory( + _unconvert_index( + data, kind, encoding=self.encoding, errors=self.errors + ), + **kwargs, + ) + + index.name = name + + return index + + def write_array_empty(self, key: str, value: ArrayLike) -> None: + """write a 0-len array""" + # ugly hack for length 0 axes + arr = np.empty((1,) * value.ndim) + self._handle.create_array(self.group, key, arr) + node = getattr(self.group, key) + node._v_attrs.value_type = str(value.dtype) + node._v_attrs.shape = value.shape + + def write_array( + self, key: str, obj: AnyArrayLike, items: Index | None = None + ) -> None: + # TODO: we only have a few tests that get here, the only EA + # that gets passed is DatetimeArray, and we never have + # both self._filters and EA + + value = extract_array(obj, extract_numpy=True) + + if key in self.group: + self._handle.remove_node(self.group, key) + + # Transform needed to interface with pytables row/col notation + empty_array = value.size == 0 + transposed = False + + if isinstance(value.dtype, CategoricalDtype): + raise NotImplementedError( + "Cannot store a category dtype in a HDF5 dataset that uses format=" + '"fixed". Use format="table".' + ) + if not empty_array: + if hasattr(value, "T"): + # ExtensionArrays (1d) may not have transpose. + value = value.T + transposed = True + + atom = None + if self._filters is not None: + with suppress(ValueError): + # get the atom for this datatype + atom = _tables().Atom.from_dtype(value.dtype) + + if atom is not None: + # We only get here if self._filters is non-None and + # the Atom.from_dtype call succeeded + + # create an empty chunked array and fill it from value + if not empty_array: + ca = self._handle.create_carray( + self.group, key, atom, value.shape, filters=self._filters + ) + ca[:] = value + + else: + self.write_array_empty(key, value) + + elif value.dtype.type == np.object_: + # infer the type, warn if we have a non-string type here (for + # performance) + inferred_type = lib.infer_dtype(value, skipna=False) + if empty_array: + pass + elif inferred_type == "string": + pass + else: + ws = performance_doc % (inferred_type, key, items) + warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level()) + + vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) + vlarr.append(value) + + elif lib.is_np_dtype(value.dtype, "M"): + self._handle.create_array(self.group, key, value.view("i8")) + getattr(self.group, key)._v_attrs.value_type = "datetime64" + elif isinstance(value.dtype, DatetimeTZDtype): + # store as UTC + # with a zone + + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "asi8" + self._handle.create_array( + self.group, key, value.asi8 # type: ignore[union-attr] + ) + + node = getattr(self.group, key) + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "tz" + node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr] + node._v_attrs.value_type = "datetime64" + elif lib.is_np_dtype(value.dtype, "m"): + self._handle.create_array(self.group, key, value.view("i8")) + getattr(self.group, key)._v_attrs.value_type = "timedelta64" + elif empty_array: + self.write_array_empty(key, value) + else: + self._handle.create_array(self.group, key, value) + + getattr(self.group, key)._v_attrs.transposed = transposed + + +class SeriesFixed(GenericFixed): + pandas_kind = "series" + attributes = ["name"] + + name: Hashable + + @property + def shape(self): + try: + return (len(self.group.values),) + except (TypeError, AttributeError): + return None + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ) -> Series: + self.validate_read(columns, where) + index = self.read_index("index", start=start, stop=stop) + values = self.read_array("values", start=start, stop=stop) + result = Series(values, index=index, name=self.name, copy=False) + if using_pyarrow_string_dtype() and is_string_array(values, skipna=True): + result = result.astype("string[pyarrow_numpy]") + return result + + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, **kwargs) -> None: # type: ignore[override] + super().write(obj, **kwargs) + self.write_index("index", obj.index) + self.write_array("values", obj) + self.attrs.name = obj.name + + +class BlockManagerFixed(GenericFixed): + attributes = ["ndim", "nblocks"] + + nblocks: int + + @property + def shape(self) -> Shape | None: + try: + ndim = self.ndim + + # items + items = 0 + for i in range(self.nblocks): + node = getattr(self.group, f"block{i}_items") + shape = getattr(node, "shape", None) + if shape is not None: + items += shape[0] + + # data shape + node = self.group.block0_values + shape = getattr(node, "shape", None) + if shape is not None: + shape = list(shape[0 : (ndim - 1)]) + else: + shape = [] + + shape.append(items) + + return shape + except AttributeError: + return None + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ) -> DataFrame: + # start, stop applied to rows, so 0th axis only + self.validate_read(columns, where) + select_axis = self.obj_type()._get_block_manager_axis(0) + + axes = [] + for i in range(self.ndim): + _start, _stop = (start, stop) if i == select_axis else (None, None) + ax = self.read_index(f"axis{i}", start=_start, stop=_stop) + axes.append(ax) + + items = axes[0] + dfs = [] + + for i in range(self.nblocks): + blk_items = self.read_index(f"block{i}_items") + values = self.read_array(f"block{i}_values", start=_start, stop=_stop) + + columns = items[items.get_indexer(blk_items)] + df = DataFrame(values.T, columns=columns, index=axes[1], copy=False) + if using_pyarrow_string_dtype() and is_string_array(values, skipna=True): + df = df.astype("string[pyarrow_numpy]") + dfs.append(df) + + if len(dfs) > 0: + out = concat(dfs, axis=1, copy=True) + out = out.reindex(columns=items, copy=False) + return out + + return DataFrame(columns=axes[0], index=axes[1]) + + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, **kwargs) -> None: # type: ignore[override] + super().write(obj, **kwargs) + + # TODO(ArrayManager) HDFStore relies on accessing the blocks + if isinstance(obj._mgr, ArrayManager): + obj = obj._as_manager("block") + + data = obj._mgr + if not data.is_consolidated(): + data = data.consolidate() + + self.attrs.ndim = data.ndim + for i, ax in enumerate(data.axes): + if i == 0 and (not ax.is_unique): + raise ValueError("Columns index has to be unique for fixed format") + self.write_index(f"axis{i}", ax) + + # Supporting mixed-type DataFrame objects...nontrivial + self.attrs.nblocks = len(data.blocks) + for i, blk in enumerate(data.blocks): + # I have no idea why, but writing values before items fixed #2299 + blk_items = data.items.take(blk.mgr_locs) + self.write_array(f"block{i}_values", blk.values, items=blk_items) + self.write_index(f"block{i}_items", blk_items) + + +class FrameFixed(BlockManagerFixed): + pandas_kind = "frame" + obj_type = DataFrame + + +class Table(Fixed): + """ + represent a table: + facilitate read/write of various types of tables + + Attrs in Table Node + ------------------- + These are attributes that are store in the main table node, they are + necessary to recreate these tables when read back in. + + index_axes : a list of tuples of the (original indexing axis and + index column) + non_index_axes: a list of tuples of the (original index axis and + columns on a non-indexing axis) + values_axes : a list of the columns which comprise the data of this + table + data_columns : a list of the columns that we are allowing indexing + (these become single columns in values_axes) + nan_rep : the string to use for nan representations for string + objects + levels : the names of levels + metadata : the names of the metadata columns + """ + + pandas_kind = "wide_table" + format_type: str = "table" # GH#30962 needed by dask + table_type: str + levels: int | list[Hashable] = 1 + is_table = True + + metadata: list + + def __init__( + self, + parent: HDFStore, + group: Node, + encoding: str | None = None, + errors: str = "strict", + index_axes: list[IndexCol] | None = None, + non_index_axes: list[tuple[AxisInt, Any]] | None = None, + values_axes: list[DataCol] | None = None, + data_columns: list | None = None, + info: dict | None = None, + nan_rep=None, + ) -> None: + super().__init__(parent, group, encoding=encoding, errors=errors) + self.index_axes = index_axes or [] + self.non_index_axes = non_index_axes or [] + self.values_axes = values_axes or [] + self.data_columns = data_columns or [] + self.info = info or {} + self.nan_rep = nan_rep + + @property + def table_type_short(self) -> str: + return self.table_type.split("_")[0] + + def __repr__(self) -> str: + """return a pretty representation of myself""" + self.infer_axes() + jdc = ",".join(self.data_columns) if len(self.data_columns) else "" + dc = f",dc->[{jdc}]" + + ver = "" + if self.is_old_version: + jver = ".".join([str(x) for x in self.version]) + ver = f"[{jver}]" + + jindex_axes = ",".join([a.name for a in self.index_axes]) + return ( + f"{self.pandas_type:12.12}{ver} " + f"(typ->{self.table_type_short},nrows->{self.nrows}," + f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})" + ) + + def __getitem__(self, c: str): + """return the axis for c""" + for a in self.axes: + if c == a.name: + return a + return None + + def validate(self, other) -> None: + """validate against an existing table""" + if other is None: + return + + if other.table_type != self.table_type: + raise TypeError( + "incompatible table_type with existing " + f"[{other.table_type} - {self.table_type}]" + ) + + for c in ["index_axes", "non_index_axes", "values_axes"]: + sv = getattr(self, c, None) + ov = getattr(other, c, None) + if sv != ov: + # show the error for the specific axes + # Argument 1 to "enumerate" has incompatible type + # "Optional[Any]"; expected "Iterable[Any]" [arg-type] + for i, sax in enumerate(sv): # type: ignore[arg-type] + # Value of type "Optional[Any]" is not indexable [index] + oax = ov[i] # type: ignore[index] + if sax != oax: + raise ValueError( + f"invalid combination of [{c}] on appending data " + f"[{sax}] vs current table [{oax}]" + ) + + # should never get here + raise Exception( + f"invalid combination of [{c}] on appending data [{sv}] vs " + f"current table [{ov}]" + ) + + @property + def is_multi_index(self) -> bool: + """the levels attribute is 1 or a list in the case of a multi-index""" + return isinstance(self.levels, list) + + def validate_multiindex( + self, obj: DataFrame | Series + ) -> tuple[DataFrame, list[Hashable]]: + """ + validate that we can store the multi-index; reset and return the + new object + """ + levels = com.fill_missing_names(obj.index.names) + try: + reset_obj = obj.reset_index() + except ValueError as err: + raise ValueError( + "duplicate names/columns in the multi-index when storing as a table" + ) from err + assert isinstance(reset_obj, DataFrame) # for mypy + return reset_obj, levels + + @property + def nrows_expected(self) -> int: + """based on our axes, compute the expected nrows""" + return np.prod([i.cvalues.shape[0] for i in self.index_axes]) + + @property + def is_exists(self) -> bool: + """has this table been created""" + return "table" in self.group + + @property + def storable(self): + return getattr(self.group, "table", None) + + @property + def table(self): + """return the table group (this is my storable)""" + return self.storable + + @property + def dtype(self): + return self.table.dtype + + @property + def description(self): + return self.table.description + + @property + def axes(self) -> itertools.chain[IndexCol]: + return itertools.chain(self.index_axes, self.values_axes) + + @property + def ncols(self) -> int: + """the number of total columns in the values axes""" + return sum(len(a.values) for a in self.values_axes) + + @property + def is_transposed(self) -> bool: + return False + + @property + def data_orientation(self) -> tuple[int, ...]: + """return a tuple of my permutated axes, non_indexable at the front""" + return tuple( + itertools.chain( + [int(a[0]) for a in self.non_index_axes], + [int(a.axis) for a in self.index_axes], + ) + ) + + def queryables(self) -> dict[str, Any]: + """return a dict of the kinds allowable columns for this object""" + # mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here + axis_names = {0: "index", 1: "columns"} + + # compute the values_axes queryables + d1 = [(a.cname, a) for a in self.index_axes] + d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes] + d3 = [ + (v.cname, v) for v in self.values_axes if v.name in set(self.data_columns) + ] + + return dict(d1 + d2 + d3) + + def index_cols(self): + """return a list of my index cols""" + # Note: each `i.cname` below is assured to be a str. + return [(i.axis, i.cname) for i in self.index_axes] + + def values_cols(self) -> list[str]: + """return a list of my values cols""" + return [i.cname for i in self.values_axes] + + def _get_metadata_path(self, key: str) -> str: + """return the metadata pathname for this key""" + group = self.group._v_pathname + return f"{group}/meta/{key}/meta" + + def write_metadata(self, key: str, values: np.ndarray) -> None: + """ + Write out a metadata array to the key as a fixed-format Series. + + Parameters + ---------- + key : str + values : ndarray + """ + self.parent.put( + self._get_metadata_path(key), + Series(values, copy=False), + format="table", + encoding=self.encoding, + errors=self.errors, + nan_rep=self.nan_rep, + ) + + def read_metadata(self, key: str): + """return the meta data array for this key""" + if getattr(getattr(self.group, "meta", None), key, None) is not None: + return self.parent.select(self._get_metadata_path(key)) + return None + + def set_attrs(self) -> None: + """set our table type & indexables""" + self.attrs.table_type = str(self.table_type) + self.attrs.index_cols = self.index_cols() + self.attrs.values_cols = self.values_cols() + self.attrs.non_index_axes = self.non_index_axes + self.attrs.data_columns = self.data_columns + self.attrs.nan_rep = self.nan_rep + self.attrs.encoding = self.encoding + self.attrs.errors = self.errors + self.attrs.levels = self.levels + self.attrs.info = self.info + + def get_attrs(self) -> None: + """retrieve our attributes""" + self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or [] + self.data_columns = getattr(self.attrs, "data_columns", None) or [] + self.info = getattr(self.attrs, "info", None) or {} + self.nan_rep = getattr(self.attrs, "nan_rep", None) + self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None)) + self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict")) + self.levels: list[Hashable] = getattr(self.attrs, "levels", None) or [] + self.index_axes = [a for a in self.indexables if a.is_an_indexable] + self.values_axes = [a for a in self.indexables if not a.is_an_indexable] + + def validate_version(self, where=None) -> None: + """are we trying to operate on an old version?""" + if where is not None: + if self.is_old_version: + ws = incompatibility_doc % ".".join([str(x) for x in self.version]) + warnings.warn( + ws, + IncompatibilityWarning, + stacklevel=find_stack_level(), + ) + + def validate_min_itemsize(self, min_itemsize) -> None: + """ + validate the min_itemsize doesn't contain items that are not in the + axes this needs data_columns to be defined + """ + if min_itemsize is None: + return + if not isinstance(min_itemsize, dict): + return + + q = self.queryables() + for k in min_itemsize: + # ok, apply generally + if k == "values": + continue + if k not in q: + raise ValueError( + f"min_itemsize has the key [{k}] which is not an axis or " + "data_column" + ) + + @cache_readonly + def indexables(self): + """create/cache the indexables if they don't exist""" + _indexables = [] + + desc = self.description + table_attrs = self.table.attrs + + # Note: each of the `name` kwargs below are str, ensured + # by the definition in index_cols. + # index columns + for i, (axis, name) in enumerate(self.attrs.index_cols): + atom = getattr(desc, name) + md = self.read_metadata(name) + meta = "category" if md is not None else None + + kind_attr = f"{name}_kind" + kind = getattr(table_attrs, kind_attr, None) + + index_col = IndexCol( + name=name, + axis=axis, + pos=i, + kind=kind, + typ=atom, + table=self.table, + meta=meta, + metadata=md, + ) + _indexables.append(index_col) + + # values columns + dc = set(self.data_columns) + base_pos = len(_indexables) + + def f(i, c): + assert isinstance(c, str) + klass = DataCol + if c in dc: + klass = DataIndexableCol + + atom = getattr(desc, c) + adj_name = _maybe_adjust_name(c, self.version) + + # TODO: why kind_attr here? + values = getattr(table_attrs, f"{adj_name}_kind", None) + dtype = getattr(table_attrs, f"{adj_name}_dtype", None) + # Argument 1 to "_dtype_to_kind" has incompatible type + # "Optional[Any]"; expected "str" [arg-type] + kind = _dtype_to_kind(dtype) # type: ignore[arg-type] + + md = self.read_metadata(c) + # TODO: figure out why these two versions of `meta` dont always match. + # meta = "category" if md is not None else None + meta = getattr(table_attrs, f"{adj_name}_meta", None) + + obj = klass( + name=adj_name, + cname=c, + values=values, + kind=kind, + pos=base_pos + i, + typ=atom, + table=self.table, + meta=meta, + metadata=md, + dtype=dtype, + ) + return obj + + # Note: the definition of `values_cols` ensures that each + # `c` below is a str. + _indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)]) + + return _indexables + + def create_index( + self, columns=None, optlevel=None, kind: str | None = None + ) -> None: + """ + Create a pytables index on the specified columns. + + Parameters + ---------- + columns : None, bool, or listlike[str] + Indicate which columns to create an index on. + + * False : Do not create any indexes. + * True : Create indexes on all columns. + * None : Create indexes on all columns. + * listlike : Create indexes on the given columns. + + optlevel : int or None, default None + Optimization level, if None, pytables defaults to 6. + kind : str or None, default None + Kind of index, if None, pytables defaults to "medium". + + Raises + ------ + TypeError if trying to create an index on a complex-type column. + + Notes + ----- + Cannot index Time64Col or ComplexCol. + Pytables must be >= 3.0. + """ + if not self.infer_axes(): + return + if columns is False: + return + + # index all indexables and data_columns + if columns is None or columns is True: + columns = [a.cname for a in self.axes if a.is_data_indexable] + if not isinstance(columns, (tuple, list)): + columns = [columns] + + kw = {} + if optlevel is not None: + kw["optlevel"] = optlevel + if kind is not None: + kw["kind"] = kind + + table = self.table + for c in columns: + v = getattr(table.cols, c, None) + if v is not None: + # remove the index if the kind/optlevel have changed + if v.is_indexed: + index = v.index + cur_optlevel = index.optlevel + cur_kind = index.kind + + if kind is not None and cur_kind != kind: + v.remove_index() + else: + kw["kind"] = cur_kind + + if optlevel is not None and cur_optlevel != optlevel: + v.remove_index() + else: + kw["optlevel"] = cur_optlevel + + # create the index + if not v.is_indexed: + if v.type.startswith("complex"): + raise TypeError( + "Columns containing complex values can be stored but " + "cannot be indexed when using table format. Either use " + "fixed format, set index=False, or do not include " + "the columns containing complex values to " + "data_columns when initializing the table." + ) + v.create_index(**kw) + elif c in self.non_index_axes[0][1]: + # GH 28156 + raise AttributeError( + f"column {c} is not a data_column.\n" + f"In order to read column {c} you must reload the dataframe \n" + f"into HDFStore and include {c} with the data_columns argument." + ) + + def _read_axes( + self, where, start: int | None = None, stop: int | None = None + ) -> list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]: + """ + Create the axes sniffed from the table. + + Parameters + ---------- + where : ??? + start : int or None, default None + stop : int or None, default None + + Returns + ------- + List[Tuple[index_values, column_values]] + """ + # create the selection + selection = Selection(self, where=where, start=start, stop=stop) + values = selection.select() + + results = [] + # convert the data + for a in self.axes: + a.set_info(self.info) + res = a.convert( + values, + nan_rep=self.nan_rep, + encoding=self.encoding, + errors=self.errors, + ) + results.append(res) + + return results + + @classmethod + def get_object(cls, obj, transposed: bool): + """return the data for this obj""" + return obj + + def validate_data_columns(self, data_columns, min_itemsize, non_index_axes): + """ + take the input data_columns and min_itemize and create a data + columns spec + """ + if not len(non_index_axes): + return [] + + axis, axis_labels = non_index_axes[0] + info = self.info.get(axis, {}) + if info.get("type") == "MultiIndex" and data_columns: + raise ValueError( + f"cannot use a multi-index on axis [{axis}] with " + f"data_columns {data_columns}" + ) + + # evaluate the passed data_columns, True == use all columns + # take only valid axis labels + if data_columns is True: + data_columns = list(axis_labels) + elif data_columns is None: + data_columns = [] + + # if min_itemsize is a dict, add the keys (exclude 'values') + if isinstance(min_itemsize, dict): + existing_data_columns = set(data_columns) + data_columns = list(data_columns) # ensure we do not modify + data_columns.extend( + [ + k + for k in min_itemsize.keys() + if k != "values" and k not in existing_data_columns + ] + ) + + # return valid columns in the order of our axis + return [c for c in data_columns if c in axis_labels] + + def _create_axes( + self, + axes, + obj: DataFrame, + validate: bool = True, + nan_rep=None, + data_columns=None, + min_itemsize=None, + ): + """ + Create and return the axes. + + Parameters + ---------- + axes: list or None + The names or numbers of the axes to create. + obj : DataFrame + The object to create axes on. + validate: bool, default True + Whether to validate the obj against an existing object already written. + nan_rep : + A value to use for string column nan_rep. + data_columns : List[str], True, or None, default None + Specify the columns that we want to create to allow indexing on. + + * True : Use all available columns. + * None : Use no columns. + * List[str] : Use the specified columns. + + min_itemsize: Dict[str, int] or None, default None + The min itemsize for a column in bytes. + """ + if not isinstance(obj, DataFrame): + group = self.group._v_name + raise TypeError( + f"cannot properly create the storer for: [group->{group}," + f"value->{type(obj)}]" + ) + + # set the default axes if needed + if axes is None: + axes = [0] + + # map axes to numbers + axes = [obj._get_axis_number(a) for a in axes] + + # do we have an existing table (if so, use its axes & data_columns) + if self.infer_axes(): + table_exists = True + axes = [a.axis for a in self.index_axes] + data_columns = list(self.data_columns) + nan_rep = self.nan_rep + # TODO: do we always have validate=True here? + else: + table_exists = False + + new_info = self.info + + assert self.ndim == 2 # with next check, we must have len(axes) == 1 + # currently support on ndim-1 axes + if len(axes) != self.ndim - 1: + raise ValueError( + "currently only support ndim-1 indexers in an AppendableTable" + ) + + # create according to the new data + new_non_index_axes: list = [] + + # nan_representation + if nan_rep is None: + nan_rep = "nan" + + # We construct the non-index-axis first, since that alters new_info + idx = next(x for x in [0, 1] if x not in axes) + + a = obj.axes[idx] + # we might be able to change the axes on the appending data if necessary + append_axis = list(a) + if table_exists: + indexer = len(new_non_index_axes) # i.e. 0 + exist_axis = self.non_index_axes[indexer][1] + if not array_equivalent( + np.array(append_axis), + np.array(exist_axis), + strict_nan=True, + dtype_equal=True, + ): + # ahah! -> reindex + if array_equivalent( + np.array(sorted(append_axis)), + np.array(sorted(exist_axis)), + strict_nan=True, + dtype_equal=True, + ): + append_axis = exist_axis + + # the non_index_axes info + info = new_info.setdefault(idx, {}) + info["names"] = list(a.names) + info["type"] = type(a).__name__ + + new_non_index_axes.append((idx, append_axis)) + + # Now we can construct our new index axis + idx = axes[0] + a = obj.axes[idx] + axis_name = obj._get_axis_name(idx) + new_index = _convert_index(axis_name, a, self.encoding, self.errors) + new_index.axis = idx + + # Because we are always 2D, there is only one new_index, so + # we know it will have pos=0 + new_index.set_pos(0) + new_index.update_info(new_info) + new_index.maybe_set_size(min_itemsize) # check for column conflicts + + new_index_axes = [new_index] + j = len(new_index_axes) # i.e. 1 + assert j == 1 + + # reindex by our non_index_axes & compute data_columns + assert len(new_non_index_axes) == 1 + for a in new_non_index_axes: + obj = _reindex_axis(obj, a[0], a[1]) + + transposed = new_index.axis == 1 + + # figure out data_columns and get out blocks + data_columns = self.validate_data_columns( + data_columns, min_itemsize, new_non_index_axes + ) + + frame = self.get_object(obj, transposed)._consolidate() + + blocks, blk_items = self._get_blocks_and_items( + frame, table_exists, new_non_index_axes, self.values_axes, data_columns + ) + + # add my values + vaxes = [] + for i, (blk, b_items) in enumerate(zip(blocks, blk_items)): + # shape of the data column are the indexable axes + klass = DataCol + name = None + + # we have a data_column + if data_columns and len(b_items) == 1 and b_items[0] in data_columns: + klass = DataIndexableCol + name = b_items[0] + if not (name is None or isinstance(name, str)): + # TODO: should the message here be more specifically non-str? + raise ValueError("cannot have non-object label DataIndexableCol") + + # make sure that we match up the existing columns + # if we have an existing table + existing_col: DataCol | None + + if table_exists and validate: + try: + existing_col = self.values_axes[i] + except (IndexError, KeyError) as err: + raise ValueError( + f"Incompatible appended table [{blocks}]" + f"with existing table [{self.values_axes}]" + ) from err + else: + existing_col = None + + new_name = name or f"values_block_{i}" + data_converted = _maybe_convert_for_string_atom( + new_name, + blk.values, + existing_col=existing_col, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + encoding=self.encoding, + errors=self.errors, + columns=b_items, + ) + adj_name = _maybe_adjust_name(new_name, self.version) + + typ = klass._get_atom(data_converted) + kind = _dtype_to_kind(data_converted.dtype.name) + tz = None + if getattr(data_converted, "tz", None) is not None: + tz = _get_tz(data_converted.tz) + + meta = metadata = ordered = None + if isinstance(data_converted.dtype, CategoricalDtype): + ordered = data_converted.ordered + meta = "category" + metadata = np.array(data_converted.categories, copy=False).ravel() + + data, dtype_name = _get_data_and_dtype_name(data_converted) + + col = klass( + name=adj_name, + cname=new_name, + values=list(b_items), + typ=typ, + pos=j, + kind=kind, + tz=tz, + ordered=ordered, + meta=meta, + metadata=metadata, + dtype=dtype_name, + data=data, + ) + col.update_info(new_info) + + vaxes.append(col) + + j += 1 + + dcs = [col.name for col in vaxes if col.is_data_indexable] + + new_table = type(self)( + parent=self.parent, + group=self.group, + encoding=self.encoding, + errors=self.errors, + index_axes=new_index_axes, + non_index_axes=new_non_index_axes, + values_axes=vaxes, + data_columns=dcs, + info=new_info, + nan_rep=nan_rep, + ) + if hasattr(self, "levels"): + # TODO: get this into constructor, only for appropriate subclass + new_table.levels = self.levels + + new_table.validate_min_itemsize(min_itemsize) + + if validate and table_exists: + new_table.validate(self) + + return new_table + + @staticmethod + def _get_blocks_and_items( + frame: DataFrame, + table_exists: bool, + new_non_index_axes, + values_axes, + data_columns, + ): + # Helper to clarify non-state-altering parts of _create_axes + + # TODO(ArrayManager) HDFStore relies on accessing the blocks + if isinstance(frame._mgr, ArrayManager): + frame = frame._as_manager("block") + + def get_blk_items(mgr): + return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks] + + mgr = frame._mgr + mgr = cast(BlockManager, mgr) + blocks: list[Block] = list(mgr.blocks) + blk_items: list[Index] = get_blk_items(mgr) + + if len(data_columns): + # TODO: prove that we only get here with axis == 1? + # It is the case in all extant tests, but NOT the case + # outside this `if len(data_columns)` check. + + axis, axis_labels = new_non_index_axes[0] + new_labels = Index(axis_labels).difference(Index(data_columns)) + mgr = frame.reindex(new_labels, axis=axis)._mgr + mgr = cast(BlockManager, mgr) + + blocks = list(mgr.blocks) + blk_items = get_blk_items(mgr) + for c in data_columns: + # This reindex would raise ValueError if we had a duplicate + # index, so we can infer that (as long as axis==1) we + # get a single column back, so a single block. + mgr = frame.reindex([c], axis=axis)._mgr + mgr = cast(BlockManager, mgr) + blocks.extend(mgr.blocks) + blk_items.extend(get_blk_items(mgr)) + + # reorder the blocks in the same order as the existing table if we can + if table_exists: + by_items = { + tuple(b_items.tolist()): (b, b_items) + for b, b_items in zip(blocks, blk_items) + } + new_blocks: list[Block] = [] + new_blk_items = [] + for ea in values_axes: + items = tuple(ea.values) + try: + b, b_items = by_items.pop(items) + new_blocks.append(b) + new_blk_items.append(b_items) + except (IndexError, KeyError) as err: + jitems = ",".join([pprint_thing(item) for item in items]) + raise ValueError( + f"cannot match existing table structure for [{jitems}] " + "on appending data" + ) from err + blocks = new_blocks + blk_items = new_blk_items + + return blocks, blk_items + + def process_axes(self, obj, selection: Selection, columns=None) -> DataFrame: + """process axes filters""" + # make a copy to avoid side effects + if columns is not None: + columns = list(columns) + + # make sure to include levels if we have them + if columns is not None and self.is_multi_index: + assert isinstance(self.levels, list) # assured by is_multi_index + for n in self.levels: + if n not in columns: + columns.insert(0, n) + + # reorder by any non_index_axes & limit to the select columns + for axis, labels in self.non_index_axes: + obj = _reindex_axis(obj, axis, labels, columns) + + def process_filter(field, filt, op): + for axis_name in obj._AXIS_ORDERS: + axis_number = obj._get_axis_number(axis_name) + axis_values = obj._get_axis(axis_name) + assert axis_number is not None + + # see if the field is the name of an axis + if field == axis_name: + # if we have a multi-index, then need to include + # the levels + if self.is_multi_index: + filt = filt.union(Index(self.levels)) + + takers = op(axis_values, filt) + return obj.loc(axis=axis_number)[takers] + + # this might be the name of a file IN an axis + elif field in axis_values: + # we need to filter on this dimension + values = ensure_index(getattr(obj, field).values) + filt = ensure_index(filt) + + # hack until we support reversed dim flags + if isinstance(obj, DataFrame): + axis_number = 1 - axis_number + + takers = op(values, filt) + return obj.loc(axis=axis_number)[takers] + + raise ValueError(f"cannot find the field [{field}] for filtering!") + + # apply the selection filters (but keep in the same order) + if selection.filter is not None: + for field, op, filt in selection.filter.format(): + obj = process_filter(field, filt, op) + + return obj + + def create_description( + self, + complib, + complevel: int | None, + fletcher32: bool, + expectedrows: int | None, + ) -> dict[str, Any]: + """create the description of the table from the axes & values""" + # provided expected rows if its passed + if expectedrows is None: + expectedrows = max(self.nrows_expected, 10000) + + d = {"name": "table", "expectedrows": expectedrows} + + # description from the axes & values + d["description"] = {a.cname: a.typ for a in self.axes} + + if complib: + if complevel is None: + complevel = self._complevel or 9 + filters = _tables().Filters( + complevel=complevel, + complib=complib, + fletcher32=fletcher32 or self._fletcher32, + ) + d["filters"] = filters + elif self._filters is not None: + d["filters"] = self._filters + + return d + + def read_coordinates( + self, where=None, start: int | None = None, stop: int | None = None + ): + """ + select coordinates (row numbers) from a table; return the + coordinates object + """ + # validate the version + self.validate_version(where) + + # infer the data kind + if not self.infer_axes(): + return False + + # create the selection + selection = Selection(self, where=where, start=start, stop=stop) + coords = selection.select_coords() + if selection.filter is not None: + for field, op, filt in selection.filter.format(): + data = self.read_column( + field, start=coords.min(), stop=coords.max() + 1 + ) + coords = coords[op(data.iloc[coords - coords.min()], filt).values] + + return Index(coords) + + def read_column( + self, + column: str, + where=None, + start: int | None = None, + stop: int | None = None, + ): + """ + return a single column from the table, generally only indexables + are interesting + """ + # validate the version + self.validate_version() + + # infer the data kind + if not self.infer_axes(): + return False + + if where is not None: + raise TypeError("read_column does not currently accept a where clause") + + # find the axes + for a in self.axes: + if column == a.name: + if not a.is_data_indexable: + raise ValueError( + f"column [{column}] can not be extracted individually; " + "it is not data indexable" + ) + + # column must be an indexable or a data column + c = getattr(self.table.cols, column) + a.set_info(self.info) + col_values = a.convert( + c[start:stop], + nan_rep=self.nan_rep, + encoding=self.encoding, + errors=self.errors, + ) + return Series(_set_tz(col_values[1], a.tz), name=column, copy=False) + + raise KeyError(f"column [{column}] not found in the table") + + +class WORMTable(Table): + """ + a write-once read-many table: this format DOES NOT ALLOW appending to a + table. writing is a one-time operation the data are stored in a format + that allows for searching the data on disk + """ + + table_type = "worm" + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + """ + read the indices and the indexing array, calculate offset rows and return + """ + raise NotImplementedError("WORMTable needs to implement read") + + def write(self, **kwargs) -> None: + """ + write in a format that we can search later on (but cannot append + to): write out the indices and the values using _write_array + (e.g. a CArray) create an indexing table so that we can search + """ + raise NotImplementedError("WORMTable needs to implement write") + + +class AppendableTable(Table): + """support the new appendable table formats""" + + table_type = "appendable" + + # error: Signature of "write" incompatible with supertype "Fixed" + def write( # type: ignore[override] + self, + obj, + axes=None, + append: bool = False, + complib=None, + complevel=None, + fletcher32=None, + min_itemsize=None, + chunksize: int | None = None, + expectedrows=None, + dropna: bool = False, + nan_rep=None, + data_columns=None, + track_times: bool = True, + ) -> None: + if not append and self.is_exists: + self._handle.remove_node(self.group, "table") + + # create the axes + table = self._create_axes( + axes=axes, + obj=obj, + validate=append, + min_itemsize=min_itemsize, + nan_rep=nan_rep, + data_columns=data_columns, + ) + + for a in table.axes: + a.validate_names() + + if not table.is_exists: + # create the table + options = table.create_description( + complib=complib, + complevel=complevel, + fletcher32=fletcher32, + expectedrows=expectedrows, + ) + + # set the table attributes + table.set_attrs() + + options["track_times"] = track_times + + # create the table + table._handle.create_table(table.group, **options) + + # update my info + table.attrs.info = table.info + + # validate the axes and set the kinds + for a in table.axes: + a.validate_and_set(table, append) + + # add the rows + table.write_data(chunksize, dropna=dropna) + + def write_data(self, chunksize: int | None, dropna: bool = False) -> None: + """ + we form the data into a 2-d including indexes,values,mask write chunk-by-chunk + """ + names = self.dtype.names + nrows = self.nrows_expected + + # if dropna==True, then drop ALL nan rows + masks = [] + if dropna: + for a in self.values_axes: + # figure the mask: only do if we can successfully process this + # column, otherwise ignore the mask + mask = isna(a.data).all(axis=0) + if isinstance(mask, np.ndarray): + masks.append(mask.astype("u1", copy=False)) + + # consolidate masks + if len(masks): + mask = masks[0] + for m in masks[1:]: + mask = mask & m + mask = mask.ravel() + else: + mask = None + + # broadcast the indexes if needed + indexes = [a.cvalues for a in self.index_axes] + nindexes = len(indexes) + assert nindexes == 1, nindexes # ensures we dont need to broadcast + + # transpose the values so first dimension is last + # reshape the values if needed + values = [a.take_data() for a in self.values_axes] + values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values] + bvalues = [] + for i, v in enumerate(values): + new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape + bvalues.append(v.reshape(new_shape)) + + # write the chunks + if chunksize is None: + chunksize = 100000 + + rows = np.empty(min(chunksize, nrows), dtype=self.dtype) + chunks = nrows // chunksize + 1 + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, nrows) + if start_i >= end_i: + break + + self.write_data_chunk( + rows, + indexes=[a[start_i:end_i] for a in indexes], + mask=mask[start_i:end_i] if mask is not None else None, + values=[v[start_i:end_i] for v in bvalues], + ) + + def write_data_chunk( + self, + rows: np.ndarray, + indexes: list[np.ndarray], + mask: npt.NDArray[np.bool_] | None, + values: list[np.ndarray], + ) -> None: + """ + Parameters + ---------- + rows : an empty memory space where we are putting the chunk + indexes : an array of the indexes + mask : an array of the masks + values : an array of the values + """ + # 0 len + for v in values: + if not np.prod(v.shape): + return + + nrows = indexes[0].shape[0] + if nrows != len(rows): + rows = np.empty(nrows, dtype=self.dtype) + names = self.dtype.names + nindexes = len(indexes) + + # indexes + for i, idx in enumerate(indexes): + rows[names[i]] = idx + + # values + for i, v in enumerate(values): + rows[names[i + nindexes]] = v + + # mask + if mask is not None: + m = ~mask.ravel().astype(bool, copy=False) + if not m.all(): + rows = rows[m] + + if len(rows): + self.table.append(rows) + self.table.flush() + + def delete(self, where=None, start: int | None = None, stop: int | None = None): + # delete all rows (and return the nrows) + if where is None or not len(where): + if start is None and stop is None: + nrows = self.nrows + self._handle.remove_node(self.group, recursive=True) + else: + # pytables<3.0 would remove a single row with stop=None + if stop is None: + stop = self.nrows + nrows = self.table.remove_rows(start=start, stop=stop) + self.table.flush() + return nrows + + # infer the data kind + if not self.infer_axes(): + return None + + # create the selection + table = self.table + selection = Selection(self, where, start=start, stop=stop) + values = selection.select_coords() + + # delete the rows in reverse order + sorted_series = Series(values, copy=False).sort_values() + ln = len(sorted_series) + + if ln: + # construct groups of consecutive rows + diff = sorted_series.diff() + groups = list(diff[diff > 1].index) + + # 1 group + if not len(groups): + groups = [0] + + # final element + if groups[-1] != ln: + groups.append(ln) + + # initial element + if groups[0] != 0: + groups.insert(0, 0) + + # we must remove in reverse order! + pg = groups.pop() + for g in reversed(groups): + rows = sorted_series.take(range(g, pg)) + table.remove_rows( + start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1 + ) + pg = g + + self.table.flush() + + # return the number of rows removed + return ln + + +class AppendableFrameTable(AppendableTable): + """support the new appendable table formats""" + + pandas_kind = "frame_table" + table_type = "appendable_frame" + ndim = 2 + obj_type: type[DataFrame | Series] = DataFrame + + @property + def is_transposed(self) -> bool: + return self.index_axes[0].axis == 1 + + @classmethod + def get_object(cls, obj, transposed: bool): + """these are written transposed""" + if transposed: + obj = obj.T + return obj + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + # validate the version + self.validate_version(where) + + # infer the data kind + if not self.infer_axes(): + return None + + result = self._read_axes(where=where, start=start, stop=stop) + + info = ( + self.info.get(self.non_index_axes[0][0], {}) + if len(self.non_index_axes) + else {} + ) + + inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]] + assert len(inds) == 1 + ind = inds[0] + + index = result[ind][0] + + frames = [] + for i, a in enumerate(self.axes): + if a not in self.values_axes: + continue + index_vals, cvalues = result[i] + + # we could have a multi-index constructor here + # ensure_index doesn't recognized our list-of-tuples here + if info.get("type") != "MultiIndex": + cols = Index(index_vals) + else: + cols = MultiIndex.from_tuples(index_vals) + + names = info.get("names") + if names is not None: + cols.set_names(names, inplace=True) + + if self.is_transposed: + values = cvalues + index_ = cols + cols_ = Index(index, name=getattr(index, "name", None)) + else: + values = cvalues.T + index_ = Index(index, name=getattr(index, "name", None)) + cols_ = cols + + # if we have a DataIndexableCol, its shape will only be 1 dim + if values.ndim == 1 and isinstance(values, np.ndarray): + values = values.reshape((1, values.shape[0])) + + if isinstance(values, np.ndarray): + df = DataFrame(values.T, columns=cols_, index=index_, copy=False) + elif isinstance(values, Index): + df = DataFrame(values, columns=cols_, index=index_) + else: + # Categorical + df = DataFrame._from_arrays([values], columns=cols_, index=index_) + if not (using_pyarrow_string_dtype() and values.dtype.kind == "O"): + assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype) + if using_pyarrow_string_dtype() and is_string_array( + values, # type: ignore[arg-type] + skipna=True, + ): + df = df.astype("string[pyarrow_numpy]") + frames.append(df) + + if len(frames) == 1: + df = frames[0] + else: + df = concat(frames, axis=1) + + selection = Selection(self, where=where, start=start, stop=stop) + # apply the selection filters & axis orderings + df = self.process_axes(df, selection=selection, columns=columns) + + return df + + +class AppendableSeriesTable(AppendableFrameTable): + """support the new appendable table formats""" + + pandas_kind = "series_table" + table_type = "appendable_series" + ndim = 2 + obj_type = Series + + @property + def is_transposed(self) -> bool: + return False + + @classmethod + def get_object(cls, obj, transposed: bool): + return obj + + def write(self, obj, data_columns=None, **kwargs): + """we are going to write this as a frame table""" + if not isinstance(obj, DataFrame): + name = obj.name or "values" + obj = obj.to_frame(name) + return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs) + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ) -> Series: + is_multi_index = self.is_multi_index + if columns is not None and is_multi_index: + assert isinstance(self.levels, list) # needed for mypy + for n in self.levels: + if n not in columns: + columns.insert(0, n) + s = super().read(where=where, columns=columns, start=start, stop=stop) + if is_multi_index: + s.set_index(self.levels, inplace=True) + + s = s.iloc[:, 0] + + # remove the default name + if s.name == "values": + s.name = None + return s + + +class AppendableMultiSeriesTable(AppendableSeriesTable): + """support the new appendable table formats""" + + pandas_kind = "series_table" + table_type = "appendable_multiseries" + + def write(self, obj, **kwargs): + """we are going to write this as a frame table""" + name = obj.name or "values" + newobj, self.levels = self.validate_multiindex(obj) + assert isinstance(self.levels, list) # for mypy + cols = list(self.levels) + cols.append(name) + newobj.columns = Index(cols) + return super().write(obj=newobj, **kwargs) + + +class GenericTable(AppendableFrameTable): + """a table that read/writes the generic pytables table format""" + + pandas_kind = "frame_table" + table_type = "generic_table" + ndim = 2 + obj_type = DataFrame + levels: list[Hashable] + + @property + def pandas_type(self) -> str: + return self.pandas_kind + + @property + def storable(self): + return getattr(self.group, "table", None) or self.group + + def get_attrs(self) -> None: + """retrieve our attributes""" + self.non_index_axes = [] + self.nan_rep = None + self.levels = [] + + self.index_axes = [a for a in self.indexables if a.is_an_indexable] + self.values_axes = [a for a in self.indexables if not a.is_an_indexable] + self.data_columns = [a.name for a in self.values_axes] + + @cache_readonly + def indexables(self): + """create the indexables from the table description""" + d = self.description + + # TODO: can we get a typ for this? AFAICT it is the only place + # where we aren't passing one + # the index columns is just a simple index + md = self.read_metadata("index") + meta = "category" if md is not None else None + index_col = GenericIndexCol( + name="index", axis=0, table=self.table, meta=meta, metadata=md + ) + + _indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col] + + for i, n in enumerate(d._v_names): + assert isinstance(n, str) + + atom = getattr(d, n) + md = self.read_metadata(n) + meta = "category" if md is not None else None + dc = GenericDataIndexableCol( + name=n, + pos=i, + values=[n], + typ=atom, + table=self.table, + meta=meta, + metadata=md, + ) + _indexables.append(dc) + + return _indexables + + def write(self, **kwargs): + raise NotImplementedError("cannot write on an generic table") + + +class AppendableMultiFrameTable(AppendableFrameTable): + """a frame with a multi-index""" + + table_type = "appendable_multiframe" + obj_type = DataFrame + ndim = 2 + _re_levels = re.compile(r"^level_\d+$") + + @property + def table_type_short(self) -> str: + return "appendable_multi" + + def write(self, obj, data_columns=None, **kwargs): + if data_columns is None: + data_columns = [] + elif data_columns is True: + data_columns = obj.columns.tolist() + obj, self.levels = self.validate_multiindex(obj) + assert isinstance(self.levels, list) # for mypy + for n in self.levels: + if n not in data_columns: + data_columns.insert(0, n) + return super().write(obj=obj, data_columns=data_columns, **kwargs) + + def read( + self, + where=None, + columns=None, + start: int | None = None, + stop: int | None = None, + ): + df = super().read(where=where, columns=columns, start=start, stop=stop) + df = df.set_index(self.levels) + + # remove names for 'level_%d' + df.index = df.index.set_names( + [None if self._re_levels.search(name) else name for name in df.index.names] + ) + + return df + + +def _reindex_axis( + obj: DataFrame, axis: AxisInt, labels: Index, other=None +) -> DataFrame: + ax = obj._get_axis(axis) + labels = ensure_index(labels) + + # try not to reindex even if other is provided + # if it equals our current index + if other is not None: + other = ensure_index(other) + if (other is None or labels.equals(other)) and labels.equals(ax): + return obj + + labels = ensure_index(labels.unique()) + if other is not None: + labels = ensure_index(other.unique()).intersection(labels, sort=False) + if not labels.equals(ax): + slicer: list[slice | Index] = [slice(None, None)] * obj.ndim + slicer[axis] = labels + obj = obj.loc[tuple(slicer)] + return obj + + +# tz to/from coercion + + +def _get_tz(tz: tzinfo) -> str | tzinfo: + """for a tz-aware type, return an encoded zone""" + zone = timezones.get_timezone(tz) + return zone + + +@overload +def _set_tz( + values: np.ndarray | Index, tz: str | tzinfo, coerce: bool = False +) -> DatetimeIndex: + ... + + +@overload +def _set_tz(values: np.ndarray | Index, tz: None, coerce: bool = False) -> np.ndarray: + ... + + +def _set_tz( + values: np.ndarray | Index, tz: str | tzinfo | None, coerce: bool = False +) -> np.ndarray | DatetimeIndex: + """ + coerce the values to a DatetimeIndex if tz is set + preserve the input shape if possible + + Parameters + ---------- + values : ndarray or Index + tz : str or tzinfo + coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray + """ + if isinstance(values, DatetimeIndex): + # If values is tzaware, the tz gets dropped in the values.ravel() + # call below (which returns an ndarray). So we are only non-lossy + # if `tz` matches `values.tz`. + assert values.tz is None or values.tz == tz + + if tz is not None: + if isinstance(values, DatetimeIndex): + name = values.name + values = values.asi8 + else: + name = None + values = values.ravel() + + tz = _ensure_decoded(tz) + values = DatetimeIndex(values, name=name) + values = values.tz_localize("UTC").tz_convert(tz) + elif coerce: + values = np.asarray(values, dtype="M8[ns]") + + # error: Incompatible return value type (got "Union[ndarray, Index]", + # expected "Union[ndarray, DatetimeIndex]") + return values # type: ignore[return-value] + + +def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol: + assert isinstance(name, str) + + index_name = index.name + # error: Argument 1 to "_get_data_and_dtype_name" has incompatible type "Index"; + # expected "Union[ExtensionArray, ndarray]" + converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[arg-type] + kind = _dtype_to_kind(dtype_name) + atom = DataIndexableCol._get_atom(converted) + + if ( + lib.is_np_dtype(index.dtype, "iu") + or needs_i8_conversion(index.dtype) + or is_bool_dtype(index.dtype) + ): + # Includes Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, + # in which case "kind" is "integer", "integer", "datetime64", + # "timedelta64", and "integer", respectively. + return IndexCol( + name, + values=converted, + kind=kind, + typ=atom, + freq=getattr(index, "freq", None), + tz=getattr(index, "tz", None), + index_name=index_name, + ) + + if isinstance(index, MultiIndex): + raise TypeError("MultiIndex not supported here!") + + inferred_type = lib.infer_dtype(index, skipna=False) + # we won't get inferred_type of "datetime64" or "timedelta64" as these + # would go through the DatetimeIndex/TimedeltaIndex paths above + + values = np.asarray(index) + + if inferred_type == "date": + converted = np.asarray([v.toordinal() for v in values], dtype=np.int32) + return IndexCol( + name, converted, "date", _tables().Time32Col(), index_name=index_name + ) + elif inferred_type == "string": + converted = _convert_string_array(values, encoding, errors) + itemsize = converted.dtype.itemsize + return IndexCol( + name, + converted, + "string", + _tables().StringCol(itemsize), + index_name=index_name, + ) + + elif inferred_type in ["integer", "floating"]: + return IndexCol( + name, values=converted, kind=kind, typ=atom, index_name=index_name + ) + else: + assert isinstance(converted, np.ndarray) and converted.dtype == object + assert kind == "object", kind + atom = _tables().ObjectAtom() + return IndexCol(name, converted, kind, atom, index_name=index_name) + + +def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index: + index: Index | np.ndarray + + if kind == "datetime64": + index = DatetimeIndex(data) + elif kind == "timedelta64": + index = TimedeltaIndex(data) + elif kind == "date": + try: + index = np.asarray([date.fromordinal(v) for v in data], dtype=object) + except ValueError: + index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object) + elif kind in ("integer", "float", "bool"): + index = np.asarray(data) + elif kind in ("string"): + index = _unconvert_string_array( + data, nan_rep=None, encoding=encoding, errors=errors + ) + elif kind == "object": + index = np.asarray(data[0]) + else: # pragma: no cover + raise ValueError(f"unrecognized index type {kind}") + return index + + +def _maybe_convert_for_string_atom( + name: str, + bvalues: ArrayLike, + existing_col, + min_itemsize, + nan_rep, + encoding, + errors, + columns: list[str], +): + if bvalues.dtype != object: + return bvalues + + bvalues = cast(np.ndarray, bvalues) + + dtype_name = bvalues.dtype.name + inferred_type = lib.infer_dtype(bvalues, skipna=False) + + if inferred_type == "date": + raise TypeError("[date] is not implemented as a table column") + if inferred_type == "datetime": + # after GH#8260 + # this only would be hit for a multi-timezone dtype which is an error + raise TypeError( + "too many timezones in this block, create separate data columns" + ) + + if not (inferred_type == "string" or dtype_name == "object"): + return bvalues + + mask = isna(bvalues) + data = bvalues.copy() + data[mask] = nan_rep + + # see if we have a valid string type + inferred_type = lib.infer_dtype(data, skipna=False) + if inferred_type != "string": + # we cannot serialize this data, so report an exception on a column + # by column basis + + # expected behaviour: + # search block for a non-string object column by column + for i in range(data.shape[0]): + col = data[i] + inferred_type = lib.infer_dtype(col, skipna=False) + if inferred_type != "string": + error_column_label = columns[i] if len(columns) > i else f"No.{i}" + raise TypeError( + f"Cannot serialize the column [{error_column_label}]\n" + f"because its data contents are not [string] but " + f"[{inferred_type}] object dtype" + ) + + # itemsize is the maximum length of a string (along any dimension) + + data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape) + itemsize = data_converted.itemsize + + # specified min_itemsize? + if isinstance(min_itemsize, dict): + min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0) + itemsize = max(min_itemsize or 0, itemsize) + + # check for column in the values conflicts + if existing_col is not None: + eci = existing_col.validate_col(itemsize) + if eci is not None and eci > itemsize: + itemsize = eci + + data_converted = data_converted.astype(f"|S{itemsize}", copy=False) + return data_converted + + +def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray: + """ + Take a string-like that is object dtype and coerce to a fixed size string type. + + Parameters + ---------- + data : np.ndarray[object] + encoding : str + errors : str + Handler for encoding errors. + + Returns + ------- + np.ndarray[fixed-length-string] + """ + # encode if needed + if len(data): + data = ( + Series(data.ravel(), copy=False) + .str.encode(encoding, errors) + ._values.reshape(data.shape) + ) + + # create the sized dtype + ensured = ensure_object(data.ravel()) + itemsize = max(1, libwriters.max_len_string_array(ensured)) + + data = np.asarray(data, dtype=f"S{itemsize}") + return data + + +def _unconvert_string_array( + data: np.ndarray, nan_rep, encoding: str, errors: str +) -> np.ndarray: + """ + Inverse of _convert_string_array. + + Parameters + ---------- + data : np.ndarray[fixed-length-string] + nan_rep : the storage repr of NaN + encoding : str + errors : str + Handler for encoding errors. + + Returns + ------- + np.ndarray[object] + Decoded data. + """ + shape = data.shape + data = np.asarray(data.ravel(), dtype=object) + + if len(data): + itemsize = libwriters.max_len_string_array(ensure_object(data)) + dtype = f"U{itemsize}" + + if isinstance(data[0], bytes): + data = Series(data, copy=False).str.decode(encoding, errors=errors)._values + else: + data = data.astype(dtype, copy=False).astype(object, copy=False) + + if nan_rep is None: + nan_rep = "nan" + + libwriters.string_array_replace_from_nan_rep(data, nan_rep) + return data.reshape(shape) + + +def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str): + assert isinstance(val_kind, str), type(val_kind) + if _need_convert(val_kind): + conv = _get_converter(val_kind, encoding, errors) + values = conv(values) + return values + + +def _get_converter(kind: str, encoding: str, errors: str): + if kind == "datetime64": + return lambda x: np.asarray(x, dtype="M8[ns]") + elif kind == "string": + return lambda x: _unconvert_string_array( + x, nan_rep=None, encoding=encoding, errors=errors + ) + else: # pragma: no cover + raise ValueError(f"invalid kind {kind}") + + +def _need_convert(kind: str) -> bool: + if kind in ("datetime64", "string"): + return True + return False + + +def _maybe_adjust_name(name: str, version: Sequence[int]) -> str: + """ + Prior to 0.10.1, we named values blocks like: values_block_0 an the + name values_0, adjust the given name if necessary. + + Parameters + ---------- + name : str + version : Tuple[int, int, int] + + Returns + ------- + str + """ + if isinstance(version, str) or len(version) < 3: + raise ValueError("Version is incorrect, expected sequence of 3 integers.") + + if version[0] == 0 and version[1] <= 10 and version[2] == 0: + m = re.search(r"values_block_(\d+)", name) + if m: + grp = m.groups()[0] + name = f"values_{grp}" + return name + + +def _dtype_to_kind(dtype_str: str) -> str: + """ + Find the "kind" string describing the given dtype name. + """ + dtype_str = _ensure_decoded(dtype_str) + + if dtype_str.startswith(("string", "bytes")): + kind = "string" + elif dtype_str.startswith("float"): + kind = "float" + elif dtype_str.startswith("complex"): + kind = "complex" + elif dtype_str.startswith(("int", "uint")): + kind = "integer" + elif dtype_str.startswith("datetime64"): + kind = "datetime64" + elif dtype_str.startswith("timedelta"): + kind = "timedelta64" + elif dtype_str.startswith("bool"): + kind = "bool" + elif dtype_str.startswith("category"): + kind = "category" + elif dtype_str.startswith("period"): + # We store the `freq` attr so we can restore from integers + kind = "integer" + elif dtype_str == "object": + kind = "object" + else: + raise ValueError(f"cannot interpret dtype of [{dtype_str}]") + + return kind + + +def _get_data_and_dtype_name(data: ArrayLike): + """ + Convert the passed data into a storable form and a dtype string. + """ + if isinstance(data, Categorical): + data = data.codes + + # For datetime64tz we need to drop the TZ in tests TODO: why? + dtype_name = data.dtype.name.split("[")[0] + + if data.dtype.kind in "mM": + data = np.asarray(data.view("i8")) + # TODO: we used to reshape for the dt64tz case, but no longer + # doing that doesn't seem to break anything. why? + + elif isinstance(data, PeriodIndex): + data = data.asi8 + + data = np.asarray(data) + return data, dtype_name + + +class Selection: + """ + Carries out a selection operation on a tables.Table object. + + Parameters + ---------- + table : a Table object + where : list of Terms (or convertible to) + start, stop: indices to start and/or stop selection + + """ + + def __init__( + self, + table: Table, + where=None, + start: int | None = None, + stop: int | None = None, + ) -> None: + self.table = table + self.where = where + self.start = start + self.stop = stop + self.condition = None + self.filter = None + self.terms = None + self.coordinates = None + + if is_list_like(where): + # see if we have a passed coordinate like + with suppress(ValueError): + inferred = lib.infer_dtype(where, skipna=False) + if inferred in ("integer", "boolean"): + where = np.asarray(where) + if where.dtype == np.bool_: + start, stop = self.start, self.stop + if start is None: + start = 0 + if stop is None: + stop = self.table.nrows + self.coordinates = np.arange(start, stop)[where] + elif issubclass(where.dtype.type, np.integer): + if (self.start is not None and (where < self.start).any()) or ( + self.stop is not None and (where >= self.stop).any() + ): + raise ValueError( + "where must have index locations >= start and < stop" + ) + self.coordinates = where + + if self.coordinates is None: + self.terms = self.generate(where) + + # create the numexpr & the filter + if self.terms is not None: + self.condition, self.filter = self.terms.evaluate() + + def generate(self, where): + """where can be a : dict,list,tuple,string""" + if where is None: + return None + + q = self.table.queryables() + try: + return PyTablesExpr(where, queryables=q, encoding=self.table.encoding) + except NameError as err: + # raise a nice message, suggesting that the user should use + # data_columns + qkeys = ",".join(q.keys()) + msg = dedent( + f"""\ + The passed where expression: {where} + contains an invalid variable reference + all of the variable references must be a reference to + an axis (e.g. 'index' or 'columns'), or a data_column + The currently defined references are: {qkeys} + """ + ) + raise ValueError(msg) from err + + def select(self): + """ + generate the selection + """ + if self.condition is not None: + return self.table.table.read_where( + self.condition.format(), start=self.start, stop=self.stop + ) + elif self.coordinates is not None: + return self.table.table.read_coordinates(self.coordinates) + return self.table.table.read(start=self.start, stop=self.stop) + + def select_coords(self): + """ + generate the selection + """ + start, stop = self.start, self.stop + nrows = self.table.nrows + if start is None: + start = 0 + elif start < 0: + start += nrows + if stop is None: + stop = nrows + elif stop < 0: + stop += nrows + + if self.condition is not None: + return self.table.table.get_where_list( + self.condition.format(), start=start, stop=stop, sort=True + ) + elif self.coordinates is not None: + return self.coordinates + + return np.arange(start, stop) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/__init__.py new file mode 100644 index 00000000..31773074 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/__init__.py @@ -0,0 +1,3 @@ +from pandas.io.sas.sasreader import read_sas + +__all__ = ["read_sas"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py new file mode 100644 index 00000000..f1fb21db --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py @@ -0,0 +1,752 @@ +""" +Read SAS7BDAT files + +Based on code written by Jared Hobbs: + https://bitbucket.org/jaredhobbs/sas7bdat + +See also: + https://github.com/BioStatMatt/sas7bdat + +Partial documentation of the file format: + https://cran.r-project.org/package=sas7bdat/vignettes/sas7bdat.pdf + +Reference for binary data compression: + http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm +""" +from __future__ import annotations + +from collections import abc +from datetime import ( + datetime, + timedelta, +) +import sys +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._libs.byteswap import ( + read_double_with_byteswap, + read_float_with_byteswap, + read_uint16_with_byteswap, + read_uint32_with_byteswap, + read_uint64_with_byteswap, +) +from pandas._libs.sas import ( + Parser, + get_subheader_index, +) +from pandas.errors import ( + EmptyDataError, + OutOfBoundsDatetime, +) + +import pandas as pd +from pandas import ( + DataFrame, + isna, +) + +from pandas.io.common import get_handle +import pandas.io.sas.sas_constants as const +from pandas.io.sas.sasreader import ReaderBase + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadBuffer, + ) + + +def _parse_datetime(sas_datetime: float, unit: str): + if isna(sas_datetime): + return pd.NaT + + if unit == "s": + return datetime(1960, 1, 1) + timedelta(seconds=sas_datetime) + + elif unit == "d": + return datetime(1960, 1, 1) + timedelta(days=sas_datetime) + + else: + raise ValueError("unit must be 'd' or 's'") + + +def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series: + """ + Convert to Timestamp if possible, otherwise to datetime.datetime. + SAS float64 lacks precision for more than ms resolution so the fit + to datetime.datetime is ok. + + Parameters + ---------- + sas_datetimes : {Series, Sequence[float]} + Dates or datetimes in SAS + unit : {str} + "d" if the floats represent dates, "s" for datetimes + + Returns + ------- + Series + Series of datetime64 dtype or datetime.datetime. + """ + try: + return pd.to_datetime(sas_datetimes, unit=unit, origin="1960-01-01") + except OutOfBoundsDatetime: + s_series = sas_datetimes.apply(_parse_datetime, unit=unit) + s_series = cast(pd.Series, s_series) + return s_series + + +class _Column: + col_id: int + name: str | bytes + label: str | bytes + format: str | bytes + ctype: bytes + length: int + + def __init__( + self, + col_id: int, + # These can be bytes when convert_header_text is False + name: str | bytes, + label: str | bytes, + format: str | bytes, + ctype: bytes, + length: int, + ) -> None: + self.col_id = col_id + self.name = name + self.label = label + self.format = format + self.ctype = ctype + self.length = length + + +# SAS7BDAT represents a SAS data file in SAS7BDAT format. +class SAS7BDATReader(ReaderBase, abc.Iterator): + """ + Read SAS files in SAS7BDAT format. + + Parameters + ---------- + path_or_buf : path name or buffer + Name of SAS file or file-like object pointing to SAS file + contents. + index : column identifier, defaults to None + Column to use as index. + convert_dates : bool, defaults to True + Attempt to convert dates to Pandas datetime values. Note that + some rarely used SAS date formats may be unsupported. + blank_missing : bool, defaults to True + Convert empty strings to missing values (SAS uses blanks to + indicate missing character variables). + chunksize : int, defaults to None + Return SAS7BDATReader object for iterations, returns chunks + with given number of lines. + encoding : str, 'infer', defaults to None + String encoding acc. to Python standard encodings, + encoding='infer' tries to detect the encoding from the file header, + encoding=None will leave the data in binary format. + convert_text : bool, defaults to True + If False, text variables are left as raw bytes. + convert_header_text : bool, defaults to True + If False, header text, including column names, are left as raw + bytes. + """ + + _int_length: int + _cached_page: bytes | None + + def __init__( + self, + path_or_buf: FilePath | ReadBuffer[bytes], + index=None, + convert_dates: bool = True, + blank_missing: bool = True, + chunksize: int | None = None, + encoding: str | None = None, + convert_text: bool = True, + convert_header_text: bool = True, + compression: CompressionOptions = "infer", + ) -> None: + self.index = index + self.convert_dates = convert_dates + self.blank_missing = blank_missing + self.chunksize = chunksize + self.encoding = encoding + self.convert_text = convert_text + self.convert_header_text = convert_header_text + + self.default_encoding = "latin-1" + self.compression = b"" + self.column_names_raw: list[bytes] = [] + self.column_names: list[str | bytes] = [] + self.column_formats: list[str | bytes] = [] + self.columns: list[_Column] = [] + + self._current_page_data_subheader_pointers: list[tuple[int, int]] = [] + self._cached_page = None + self._column_data_lengths: list[int] = [] + self._column_data_offsets: list[int] = [] + self._column_types: list[bytes] = [] + + self._current_row_in_file_index = 0 + self._current_row_on_page_index = 0 + self._current_row_in_file_index = 0 + + self.handles = get_handle( + path_or_buf, "rb", is_text=False, compression=compression + ) + + self._path_or_buf = self.handles.handle + + # Same order as const.SASIndex + self._subheader_processors = [ + self._process_rowsize_subheader, + self._process_columnsize_subheader, + self._process_subheader_counts, + self._process_columntext_subheader, + self._process_columnname_subheader, + self._process_columnattributes_subheader, + self._process_format_subheader, + self._process_columnlist_subheader, + None, # Data + ] + + try: + self._get_properties() + self._parse_metadata() + except Exception: + self.close() + raise + + def column_data_lengths(self) -> np.ndarray: + """Return a numpy int64 array of the column data lengths""" + return np.asarray(self._column_data_lengths, dtype=np.int64) + + def column_data_offsets(self) -> np.ndarray: + """Return a numpy int64 array of the column offsets""" + return np.asarray(self._column_data_offsets, dtype=np.int64) + + def column_types(self) -> np.ndarray: + """ + Returns a numpy character array of the column types: + s (string) or d (double) + """ + return np.asarray(self._column_types, dtype=np.dtype("S1")) + + def close(self) -> None: + self.handles.close() + + def _get_properties(self) -> None: + # Check magic number + self._path_or_buf.seek(0) + self._cached_page = self._path_or_buf.read(288) + if self._cached_page[0 : len(const.magic)] != const.magic: + raise ValueError("magic number mismatch (not a SAS file?)") + + # Get alignment information + buf = self._read_bytes(const.align_1_offset, const.align_1_length) + if buf == const.u64_byte_checker_value: + self.U64 = True + self._int_length = 8 + self._page_bit_offset = const.page_bit_offset_x64 + self._subheader_pointer_length = const.subheader_pointer_length_x64 + else: + self.U64 = False + self._page_bit_offset = const.page_bit_offset_x86 + self._subheader_pointer_length = const.subheader_pointer_length_x86 + self._int_length = 4 + buf = self._read_bytes(const.align_2_offset, const.align_2_length) + if buf == const.align_1_checker_value: + align1 = const.align_2_value + else: + align1 = 0 + + # Get endianness information + buf = self._read_bytes(const.endianness_offset, const.endianness_length) + if buf == b"\x01": + self.byte_order = "<" + self.need_byteswap = sys.byteorder == "big" + else: + self.byte_order = ">" + self.need_byteswap = sys.byteorder == "little" + + # Get encoding information + buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0] + if buf in const.encoding_names: + self.inferred_encoding = const.encoding_names[buf] + if self.encoding == "infer": + self.encoding = self.inferred_encoding + else: + self.inferred_encoding = f"unknown (code={buf})" + + # Timestamp is epoch 01/01/1960 + epoch = datetime(1960, 1, 1) + x = self._read_float( + const.date_created_offset + align1, const.date_created_length + ) + self.date_created = epoch + pd.to_timedelta(x, unit="s") + x = self._read_float( + const.date_modified_offset + align1, const.date_modified_length + ) + self.date_modified = epoch + pd.to_timedelta(x, unit="s") + + self.header_length = self._read_uint( + const.header_size_offset + align1, const.header_size_length + ) + + # Read the rest of the header into cached_page. + buf = self._path_or_buf.read(self.header_length - 288) + self._cached_page += buf + # error: Argument 1 to "len" has incompatible type "Optional[bytes]"; + # expected "Sized" + if len(self._cached_page) != self.header_length: # type: ignore[arg-type] + raise ValueError("The SAS7BDAT file appears to be truncated.") + + self._page_length = self._read_uint( + const.page_size_offset + align1, const.page_size_length + ) + + def __next__(self) -> DataFrame: + da = self.read(nrows=self.chunksize or 1) + if da.empty: + self.close() + raise StopIteration + return da + + # Read a single float of the given width (4 or 8). + def _read_float(self, offset: int, width: int): + assert self._cached_page is not None + if width == 4: + return read_float_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + elif width == 8: + return read_double_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + else: + self.close() + raise ValueError("invalid float width") + + # Read a single unsigned integer of the given width (1, 2, 4 or 8). + def _read_uint(self, offset: int, width: int) -> int: + assert self._cached_page is not None + if width == 1: + return self._read_bytes(offset, 1)[0] + elif width == 2: + return read_uint16_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + elif width == 4: + return read_uint32_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + elif width == 8: + return read_uint64_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + else: + self.close() + raise ValueError("invalid int width") + + def _read_bytes(self, offset: int, length: int): + assert self._cached_page is not None + if offset + length > len(self._cached_page): + self.close() + raise ValueError("The cached page is too small.") + return self._cached_page[offset : offset + length] + + def _read_and_convert_header_text(self, offset: int, length: int) -> str | bytes: + return self._convert_header_text( + self._read_bytes(offset, length).rstrip(b"\x00 ") + ) + + def _parse_metadata(self) -> None: + done = False + while not done: + self._cached_page = self._path_or_buf.read(self._page_length) + if len(self._cached_page) <= 0: + break + if len(self._cached_page) != self._page_length: + raise ValueError("Failed to read a meta data page from the SAS file.") + done = self._process_page_meta() + + def _process_page_meta(self) -> bool: + self._read_page_header() + pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type] + if self._current_page_type in pt: + self._process_page_metadata() + is_data_page = self._current_page_type == const.page_data_type + is_mix_page = self._current_page_type == const.page_mix_type + return bool( + is_data_page + or is_mix_page + or self._current_page_data_subheader_pointers != [] + ) + + def _read_page_header(self) -> None: + bit_offset = self._page_bit_offset + tx = const.page_type_offset + bit_offset + self._current_page_type = ( + self._read_uint(tx, const.page_type_length) & const.page_type_mask2 + ) + tx = const.block_count_offset + bit_offset + self._current_page_block_count = self._read_uint(tx, const.block_count_length) + tx = const.subheader_count_offset + bit_offset + self._current_page_subheaders_count = self._read_uint( + tx, const.subheader_count_length + ) + + def _process_page_metadata(self) -> None: + bit_offset = self._page_bit_offset + + for i in range(self._current_page_subheaders_count): + offset = const.subheader_pointers_offset + bit_offset + total_offset = offset + self._subheader_pointer_length * i + + subheader_offset = self._read_uint(total_offset, self._int_length) + total_offset += self._int_length + + subheader_length = self._read_uint(total_offset, self._int_length) + total_offset += self._int_length + + subheader_compression = self._read_uint(total_offset, 1) + total_offset += 1 + + subheader_type = self._read_uint(total_offset, 1) + + if ( + subheader_length == 0 + or subheader_compression == const.truncated_subheader_id + ): + continue + + subheader_signature = self._read_bytes(subheader_offset, self._int_length) + subheader_index = get_subheader_index(subheader_signature) + subheader_processor = self._subheader_processors[subheader_index] + + if subheader_processor is None: + f1 = subheader_compression in (const.compressed_subheader_id, 0) + f2 = subheader_type == const.compressed_subheader_type + if self.compression and f1 and f2: + self._current_page_data_subheader_pointers.append( + (subheader_offset, subheader_length) + ) + else: + self.close() + raise ValueError( + f"Unknown subheader signature {subheader_signature}" + ) + else: + subheader_processor(subheader_offset, subheader_length) + + def _process_rowsize_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + lcs_offset = offset + lcp_offset = offset + if self.U64: + lcs_offset += 682 + lcp_offset += 706 + else: + lcs_offset += 354 + lcp_offset += 378 + + self.row_length = self._read_uint( + offset + const.row_length_offset_multiplier * int_len, + int_len, + ) + self.row_count = self._read_uint( + offset + const.row_count_offset_multiplier * int_len, + int_len, + ) + self.col_count_p1 = self._read_uint( + offset + const.col_count_p1_multiplier * int_len, int_len + ) + self.col_count_p2 = self._read_uint( + offset + const.col_count_p2_multiplier * int_len, int_len + ) + mx = const.row_count_on_mix_page_offset_multiplier * int_len + self._mix_page_row_count = self._read_uint(offset + mx, int_len) + self._lcs = self._read_uint(lcs_offset, 2) + self._lcp = self._read_uint(lcp_offset, 2) + + def _process_columnsize_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + offset += int_len + self.column_count = self._read_uint(offset, int_len) + if self.col_count_p1 + self.col_count_p2 != self.column_count: + print( + f"Warning: column count mismatch ({self.col_count_p1} + " + f"{self.col_count_p2} != {self.column_count})\n" + ) + + # Unknown purpose + def _process_subheader_counts(self, offset: int, length: int) -> None: + pass + + def _process_columntext_subheader(self, offset: int, length: int) -> None: + offset += self._int_length + text_block_size = self._read_uint(offset, const.text_block_size_length) + + buf = self._read_bytes(offset, text_block_size) + cname_raw = buf[0:text_block_size].rstrip(b"\x00 ") + self.column_names_raw.append(cname_raw) + + if len(self.column_names_raw) == 1: + compression_literal = b"" + for cl in const.compression_literals: + if cl in cname_raw: + compression_literal = cl + self.compression = compression_literal + offset -= self._int_length + + offset1 = offset + 16 + if self.U64: + offset1 += 4 + + buf = self._read_bytes(offset1, self._lcp) + compression_literal = buf.rstrip(b"\x00") + if compression_literal == b"": + self._lcs = 0 + offset1 = offset + 32 + if self.U64: + offset1 += 4 + buf = self._read_bytes(offset1, self._lcp) + self.creator_proc = buf[0 : self._lcp] + elif compression_literal == const.rle_compression: + offset1 = offset + 40 + if self.U64: + offset1 += 4 + buf = self._read_bytes(offset1, self._lcp) + self.creator_proc = buf[0 : self._lcp] + elif self._lcs > 0: + self._lcp = 0 + offset1 = offset + 16 + if self.U64: + offset1 += 4 + buf = self._read_bytes(offset1, self._lcs) + self.creator_proc = buf[0 : self._lcp] + if hasattr(self, "creator_proc"): + self.creator_proc = self._convert_header_text(self.creator_proc) + + def _process_columnname_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + offset += int_len + column_name_pointers_count = (length - 2 * int_len - 12) // 8 + for i in range(column_name_pointers_count): + text_subheader = ( + offset + + const.column_name_pointer_length * (i + 1) + + const.column_name_text_subheader_offset + ) + col_name_offset = ( + offset + + const.column_name_pointer_length * (i + 1) + + const.column_name_offset_offset + ) + col_name_length = ( + offset + + const.column_name_pointer_length * (i + 1) + + const.column_name_length_offset + ) + + idx = self._read_uint( + text_subheader, const.column_name_text_subheader_length + ) + col_offset = self._read_uint( + col_name_offset, const.column_name_offset_length + ) + col_len = self._read_uint(col_name_length, const.column_name_length_length) + + name_raw = self.column_names_raw[idx] + cname = name_raw[col_offset : col_offset + col_len] + self.column_names.append(self._convert_header_text(cname)) + + def _process_columnattributes_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8) + for i in range(column_attributes_vectors_count): + col_data_offset = ( + offset + int_len + const.column_data_offset_offset + i * (int_len + 8) + ) + col_data_len = ( + offset + + 2 * int_len + + const.column_data_length_offset + + i * (int_len + 8) + ) + col_types = ( + offset + 2 * int_len + const.column_type_offset + i * (int_len + 8) + ) + + x = self._read_uint(col_data_offset, int_len) + self._column_data_offsets.append(x) + + x = self._read_uint(col_data_len, const.column_data_length_length) + self._column_data_lengths.append(x) + + x = self._read_uint(col_types, const.column_type_length) + self._column_types.append(b"d" if x == 1 else b"s") + + def _process_columnlist_subheader(self, offset: int, length: int) -> None: + # unknown purpose + pass + + def _process_format_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + text_subheader_format = ( + offset + const.column_format_text_subheader_index_offset + 3 * int_len + ) + col_format_offset = offset + const.column_format_offset_offset + 3 * int_len + col_format_len = offset + const.column_format_length_offset + 3 * int_len + text_subheader_label = ( + offset + const.column_label_text_subheader_index_offset + 3 * int_len + ) + col_label_offset = offset + const.column_label_offset_offset + 3 * int_len + col_label_len = offset + const.column_label_length_offset + 3 * int_len + + x = self._read_uint( + text_subheader_format, const.column_format_text_subheader_index_length + ) + format_idx = min(x, len(self.column_names_raw) - 1) + + format_start = self._read_uint( + col_format_offset, const.column_format_offset_length + ) + format_len = self._read_uint(col_format_len, const.column_format_length_length) + + label_idx = self._read_uint( + text_subheader_label, const.column_label_text_subheader_index_length + ) + label_idx = min(label_idx, len(self.column_names_raw) - 1) + + label_start = self._read_uint( + col_label_offset, const.column_label_offset_length + ) + label_len = self._read_uint(col_label_len, const.column_label_length_length) + + label_names = self.column_names_raw[label_idx] + column_label = self._convert_header_text( + label_names[label_start : label_start + label_len] + ) + format_names = self.column_names_raw[format_idx] + column_format = self._convert_header_text( + format_names[format_start : format_start + format_len] + ) + current_column_number = len(self.columns) + + col = _Column( + current_column_number, + self.column_names[current_column_number], + column_label, + column_format, + self._column_types[current_column_number], + self._column_data_lengths[current_column_number], + ) + + self.column_formats.append(column_format) + self.columns.append(col) + + def read(self, nrows: int | None = None) -> DataFrame: + if (nrows is None) and (self.chunksize is not None): + nrows = self.chunksize + elif nrows is None: + nrows = self.row_count + + if len(self._column_types) == 0: + self.close() + raise EmptyDataError("No columns to parse from file") + + if nrows > 0 and self._current_row_in_file_index >= self.row_count: + return DataFrame() + + nrows = min(nrows, self.row_count - self._current_row_in_file_index) + + nd = self._column_types.count(b"d") + ns = self._column_types.count(b"s") + + self._string_chunk = np.empty((ns, nrows), dtype=object) + self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8) + + self._current_row_in_chunk_index = 0 + p = Parser(self) + p.read(nrows) + + rslt = self._chunk_to_dataframe() + if self.index is not None: + rslt = rslt.set_index(self.index) + + return rslt + + def _read_next_page(self): + self._current_page_data_subheader_pointers = [] + self._cached_page = self._path_or_buf.read(self._page_length) + if len(self._cached_page) <= 0: + return True + elif len(self._cached_page) != self._page_length: + self.close() + msg = ( + "failed to read complete page from file (read " + f"{len(self._cached_page):d} of {self._page_length:d} bytes)" + ) + raise ValueError(msg) + + self._read_page_header() + if self._current_page_type in const.page_meta_types: + self._process_page_metadata() + + if self._current_page_type not in const.page_meta_types + [ + const.page_data_type, + const.page_mix_type, + ]: + return self._read_next_page() + + return False + + def _chunk_to_dataframe(self) -> DataFrame: + n = self._current_row_in_chunk_index + m = self._current_row_in_file_index + ix = range(m - n, m) + rslt = {} + + js, jb = 0, 0 + for j in range(self.column_count): + name = self.column_names[j] + + if self._column_types[j] == b"d": + col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d") + rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix) + if self.convert_dates: + if self.column_formats[j] in const.sas_date_formats: + rslt[name] = _convert_datetimes(rslt[name], "d") + elif self.column_formats[j] in const.sas_datetime_formats: + rslt[name] = _convert_datetimes(rslt[name], "s") + jb += 1 + elif self._column_types[j] == b"s": + rslt[name] = pd.Series(self._string_chunk[js, :], index=ix) + if self.convert_text and (self.encoding is not None): + rslt[name] = self._decode_string(rslt[name].str) + js += 1 + else: + self.close() + raise ValueError(f"unknown column type {repr(self._column_types[j])}") + + df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False) + return df + + def _decode_string(self, b): + return b.decode(self.encoding or self.default_encoding) + + def _convert_header_text(self, b: bytes) -> str | bytes: + if self.convert_header_text: + return self._decode_string(b) + else: + return b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_constants.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_constants.py new file mode 100644 index 00000000..62c17bd0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_constants.py @@ -0,0 +1,310 @@ +from __future__ import annotations + +from typing import Final + +magic: Final = ( + b"\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\xc2\xea\x81\x60" + b"\xb3\x14\x11\xcf\xbd\x92\x08\x00" + b"\x09\xc7\x31\x8c\x18\x1f\x10\x11" +) + +align_1_checker_value: Final = b"3" +align_1_offset: Final = 32 +align_1_length: Final = 1 +align_1_value: Final = 4 +u64_byte_checker_value: Final = b"3" +align_2_offset: Final = 35 +align_2_length: Final = 1 +align_2_value: Final = 4 +endianness_offset: Final = 37 +endianness_length: Final = 1 +platform_offset: Final = 39 +platform_length: Final = 1 +encoding_offset: Final = 70 +encoding_length: Final = 1 +dataset_offset: Final = 92 +dataset_length: Final = 64 +file_type_offset: Final = 156 +file_type_length: Final = 8 +date_created_offset: Final = 164 +date_created_length: Final = 8 +date_modified_offset: Final = 172 +date_modified_length: Final = 8 +header_size_offset: Final = 196 +header_size_length: Final = 4 +page_size_offset: Final = 200 +page_size_length: Final = 4 +page_count_offset: Final = 204 +page_count_length: Final = 4 +sas_release_offset: Final = 216 +sas_release_length: Final = 8 +sas_server_type_offset: Final = 224 +sas_server_type_length: Final = 16 +os_version_number_offset: Final = 240 +os_version_number_length: Final = 16 +os_maker_offset: Final = 256 +os_maker_length: Final = 16 +os_name_offset: Final = 272 +os_name_length: Final = 16 +page_bit_offset_x86: Final = 16 +page_bit_offset_x64: Final = 32 +subheader_pointer_length_x86: Final = 12 +subheader_pointer_length_x64: Final = 24 +page_type_offset: Final = 0 +page_type_length: Final = 2 +block_count_offset: Final = 2 +block_count_length: Final = 2 +subheader_count_offset: Final = 4 +subheader_count_length: Final = 2 +page_type_mask: Final = 0x0F00 +# Keep "page_comp_type" bits +page_type_mask2: Final = 0xF000 | page_type_mask +page_meta_type: Final = 0x0000 +page_data_type: Final = 0x0100 +page_mix_type: Final = 0x0200 +page_amd_type: Final = 0x0400 +page_meta2_type: Final = 0x4000 +page_comp_type: Final = 0x9000 +page_meta_types: Final = [page_meta_type, page_meta2_type] +subheader_pointers_offset: Final = 8 +truncated_subheader_id: Final = 1 +compressed_subheader_id: Final = 4 +compressed_subheader_type: Final = 1 +text_block_size_length: Final = 2 +row_length_offset_multiplier: Final = 5 +row_count_offset_multiplier: Final = 6 +col_count_p1_multiplier: Final = 9 +col_count_p2_multiplier: Final = 10 +row_count_on_mix_page_offset_multiplier: Final = 15 +column_name_pointer_length: Final = 8 +column_name_text_subheader_offset: Final = 0 +column_name_text_subheader_length: Final = 2 +column_name_offset_offset: Final = 2 +column_name_offset_length: Final = 2 +column_name_length_offset: Final = 4 +column_name_length_length: Final = 2 +column_data_offset_offset: Final = 8 +column_data_length_offset: Final = 8 +column_data_length_length: Final = 4 +column_type_offset: Final = 14 +column_type_length: Final = 1 +column_format_text_subheader_index_offset: Final = 22 +column_format_text_subheader_index_length: Final = 2 +column_format_offset_offset: Final = 24 +column_format_offset_length: Final = 2 +column_format_length_offset: Final = 26 +column_format_length_length: Final = 2 +column_label_text_subheader_index_offset: Final = 28 +column_label_text_subheader_index_length: Final = 2 +column_label_offset_offset: Final = 30 +column_label_offset_length: Final = 2 +column_label_length_offset: Final = 32 +column_label_length_length: Final = 2 +rle_compression: Final = b"SASYZCRL" +rdc_compression: Final = b"SASYZCR2" + +compression_literals: Final = [rle_compression, rdc_compression] + +# Incomplete list of encodings, using SAS nomenclature: +# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html +# corresponding to the Python documentation of standard encodings +# https://docs.python.org/3/library/codecs.html#standard-encodings +encoding_names: Final = { + 20: "utf-8", + 29: "latin1", + 30: "latin2", + 31: "latin3", + 32: "latin4", + 33: "cyrillic", + 34: "arabic", + 35: "greek", + 36: "hebrew", + 37: "latin5", + 38: "latin6", + 39: "cp874", + 40: "latin9", + 41: "cp437", + 42: "cp850", + 43: "cp852", + 44: "cp857", + 45: "cp858", + 46: "cp862", + 47: "cp864", + 48: "cp865", + 49: "cp866", + 50: "cp869", + 51: "cp874", + # 52: "", # not found + # 53: "", # not found + # 54: "", # not found + 55: "cp720", + 56: "cp737", + 57: "cp775", + 58: "cp860", + 59: "cp863", + 60: "cp1250", + 61: "cp1251", + 62: "cp1252", + 63: "cp1253", + 64: "cp1254", + 65: "cp1255", + 66: "cp1256", + 67: "cp1257", + 68: "cp1258", + 118: "cp950", + # 119: "", # not found + 123: "big5", + 125: "gb2312", + 126: "cp936", + 134: "euc_jp", + 136: "cp932", + 138: "shift_jis", + 140: "euc-kr", + 141: "cp949", + 227: "latin8", + # 228: "", # not found + # 229: "" # not found +} + + +class SASIndex: + row_size_index: Final = 0 + column_size_index: Final = 1 + subheader_counts_index: Final = 2 + column_text_index: Final = 3 + column_name_index: Final = 4 + column_attributes_index: Final = 5 + format_and_label_index: Final = 6 + column_list_index: Final = 7 + data_subheader_index: Final = 8 + + +subheader_signature_to_index: Final = { + b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index, + b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index, + b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index, + b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index, + b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index, + b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index, + b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index, + b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index, + b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index, + b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, + b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index, + b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index, + b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index, + b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index, + b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index, + b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, + b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index, + b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index, + b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, + b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index, + b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index, + b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index, + b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index, + b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index, +} + + +# List of frequently used SAS date and datetime formats +# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm +# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java +sas_date_formats: Final = ( + "DATE", + "DAY", + "DDMMYY", + "DOWNAME", + "JULDAY", + "JULIAN", + "MMDDYY", + "MMYY", + "MMYYC", + "MMYYD", + "MMYYP", + "MMYYS", + "MMYYN", + "MONNAME", + "MONTH", + "MONYY", + "QTR", + "QTRR", + "NENGO", + "WEEKDATE", + "WEEKDATX", + "WEEKDAY", + "WEEKV", + "WORDDATE", + "WORDDATX", + "YEAR", + "YYMM", + "YYMMC", + "YYMMD", + "YYMMP", + "YYMMS", + "YYMMN", + "YYMON", + "YYMMDD", + "YYQ", + "YYQC", + "YYQD", + "YYQP", + "YYQS", + "YYQN", + "YYQR", + "YYQRC", + "YYQRD", + "YYQRP", + "YYQRS", + "YYQRN", + "YYMMDDP", + "YYMMDDC", + "E8601DA", + "YYMMDDN", + "MMDDYYC", + "MMDDYYS", + "MMDDYYD", + "YYMMDDS", + "B8601DA", + "DDMMYYN", + "YYMMDDD", + "DDMMYYB", + "DDMMYYP", + "MMDDYYP", + "YYMMDDB", + "MMDDYYN", + "DDMMYYC", + "DDMMYYD", + "DDMMYYS", + "MINGUO", +) + +sas_datetime_formats: Final = ( + "DATETIME", + "DTWKDATX", + "B8601DN", + "B8601DT", + "B8601DX", + "B8601DZ", + "B8601LX", + "E8601DN", + "E8601DT", + "E8601DX", + "E8601DZ", + "E8601LX", + "DATEAMPM", + "DTDATE", + "DTMONYY", + "DTMONYY", + "DTWKDATX", + "DTYEAR", + "TOD", + "MDYAMPM", +) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_xport.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_xport.py new file mode 100644 index 00000000..e68f4789 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sas_xport.py @@ -0,0 +1,508 @@ +""" +Read a SAS XPort format file into a Pandas DataFrame. + +Based on code from Jack Cushman (github.com/jcushman/xport). + +The file format is defined here: + +https://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf +""" +from __future__ import annotations + +from collections import abc +from datetime import datetime +import struct +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level + +import pandas as pd + +from pandas.io.common import get_handle +from pandas.io.sas.sasreader import ReaderBase + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + DatetimeNaTType, + FilePath, + ReadBuffer, + ) +_correct_line1 = ( + "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!" + "000000000000000000000000000000 " +) +_correct_header1 = ( + "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000" +) +_correct_header2 = ( + "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!" + "000000000000000000000000000000 " +) +_correct_obs_header = ( + "HEADER RECORD*******OBS HEADER RECORD!!!!!!!" + "000000000000000000000000000000 " +) +_fieldkeys = [ + "ntype", + "nhfun", + "field_length", + "nvar0", + "name", + "label", + "nform", + "nfl", + "num_decimals", + "nfj", + "nfill", + "niform", + "nifl", + "nifd", + "npos", + "_", +] + + +_base_params_doc = """\ +Parameters +---------- +filepath_or_buffer : str or file-like object + Path to SAS file or object implementing binary read method.""" + +_params2_doc = """\ +index : identifier of index column + Identifier of column that should be used as index of the DataFrame. +encoding : str + Encoding for text data. +chunksize : int + Read file `chunksize` lines at a time, returns iterator.""" + +_format_params_doc = """\ +format : str + File format, only `xport` is currently supported.""" + +_iterator_doc = """\ +iterator : bool, default False + Return XportReader object for reading file incrementally.""" + + +_read_sas_doc = f"""Read a SAS file into a DataFrame. + +{_base_params_doc} +{_format_params_doc} +{_params2_doc} +{_iterator_doc} + +Returns +------- +DataFrame or XportReader + +Examples +-------- +Read a SAS Xport file: + +>>> df = pd.read_sas('filename.XPT') + +Read a Xport file in 10,000 line chunks: + +>>> itr = pd.read_sas('filename.XPT', chunksize=10000) +>>> for chunk in itr: +>>> do_something(chunk) + +""" + +_xport_reader_doc = f"""\ +Class for reading SAS Xport files. + +{_base_params_doc} +{_params2_doc} + +Attributes +---------- +member_info : list + Contains information about the file +fields : list + Contains information about the variables in the file +""" + +_read_method_doc = """\ +Read observations from SAS Xport file, returning as data frame. + +Parameters +---------- +nrows : int + Number of rows to read from data file; if None, read whole + file. + +Returns +------- +A DataFrame. +""" + + +def _parse_date(datestr: str) -> DatetimeNaTType: + """Given a date in xport format, return Python date.""" + try: + # e.g. "16FEB11:10:07:55" + return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") + except ValueError: + return pd.NaT + + +def _split_line(s: str, parts): + """ + Parameters + ---------- + s: str + Fixed-length string to split + parts: list of (name, length) pairs + Used to break up string, name '_' will be filtered from output. + + Returns + ------- + Dict of name:contents of string at given location. + """ + out = {} + start = 0 + for name, length in parts: + out[name] = s[start : start + length].strip() + start += length + del out["_"] + return out + + +def _handle_truncated_float_vec(vec, nbytes): + # This feature is not well documented, but some SAS XPORT files + # have 2-7 byte "truncated" floats. To read these truncated + # floats, pad them with zeros on the right to make 8 byte floats. + # + # References: + # https://github.com/jcushman/xport/pull/3 + # The R "foreign" library + + if nbytes != 8: + vec1 = np.zeros(len(vec), np.dtype("S8")) + dtype = np.dtype(f"S{nbytes},S{8 - nbytes}") + vec2 = vec1.view(dtype=dtype) + vec2["f0"] = vec + return vec2 + + return vec + + +def _parse_float_vec(vec): + """ + Parse a vector of float values representing IBM 8 byte floats into + native 8 byte floats. + """ + dtype = np.dtype(">u4,>u4") + vec1 = vec.view(dtype=dtype) + xport1 = vec1["f0"] + xport2 = vec1["f1"] + + # Start by setting first half of ieee number to first half of IBM + # number sans exponent + ieee1 = xport1 & 0x00FFFFFF + + # The fraction bit to the left of the binary point in the ieee + # format was set and the number was shifted 0, 1, 2, or 3 + # places. This will tell us how to adjust the ibm exponent to be a + # power of 2 ieee exponent and how to shift the fraction bits to + # restore the correct magnitude. + shift = np.zeros(len(vec), dtype=np.uint8) + shift[np.where(xport1 & 0x00200000)] = 1 + shift[np.where(xport1 & 0x00400000)] = 2 + shift[np.where(xport1 & 0x00800000)] = 3 + + # shift the ieee number down the correct number of places then + # set the second half of the ieee number to be the second half + # of the ibm number shifted appropriately, ored with the bits + # from the first half that would have been shifted in if we + # could shift a double. All we are worried about are the low + # order 3 bits of the first half since we're only shifting by + # 1, 2, or 3. + ieee1 >>= shift + ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift))) + + # clear the 1 bit to the left of the binary point + ieee1 &= 0xFFEFFFFF + + # set the exponent of the ieee number to be the actual exponent + # plus the shift count + 1023. Or this into the first half of the + # ieee number. The ibm exponent is excess 64 but is adjusted by 65 + # since during conversion to ibm format the exponent is + # incremented by 1 and the fraction bits left 4 positions to the + # right of the radix point. (had to add >> 24 because C treats & + # 0x7f as 0x7f000000 and Python doesn't) + ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | ( + xport1 & 0x80000000 + ) + + ieee = np.empty((len(ieee1),), dtype=">u4,>u4") + ieee["f0"] = ieee1 + ieee["f1"] = ieee2 + ieee = ieee.view(dtype=">f8") + ieee = ieee.astype("f8") + + return ieee + + +class XportReader(ReaderBase, abc.Iterator): + __doc__ = _xport_reader_doc + + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + index=None, + encoding: str | None = "ISO-8859-1", + chunksize: int | None = None, + compression: CompressionOptions = "infer", + ) -> None: + self._encoding = encoding + self._lines_read = 0 + self._index = index + self._chunksize = chunksize + + self.handles = get_handle( + filepath_or_buffer, + "rb", + encoding=encoding, + is_text=False, + compression=compression, + ) + self.filepath_or_buffer = self.handles.handle + + try: + self._read_header() + except Exception: + self.close() + raise + + def close(self) -> None: + self.handles.close() + + def _get_row(self): + return self.filepath_or_buffer.read(80).decode() + + def _read_header(self): + self.filepath_or_buffer.seek(0) + + # read file header + line1 = self._get_row() + if line1 != _correct_line1: + if "**COMPRESSED**" in line1: + # this was created with the PROC CPORT method and can't be read + # https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/movefile/p1bm6aqp3fw4uin1hucwh718f6kp.htm + raise ValueError( + "Header record indicates a CPORT file, which is not readable." + ) + raise ValueError("Header record is not an XPORT file.") + + line2 = self._get_row() + fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]] + file_info = _split_line(line2, fif) + if file_info["prefix"] != "SAS SAS SASLIB": + raise ValueError("Header record has invalid prefix.") + file_info["created"] = _parse_date(file_info["created"]) + self.file_info = file_info + + line3 = self._get_row() + file_info["modified"] = _parse_date(line3[:16]) + + # read member header + header1 = self._get_row() + header2 = self._get_row() + headflag1 = header1.startswith(_correct_header1) + headflag2 = header2 == _correct_header2 + if not (headflag1 and headflag2): + raise ValueError("Member header not found") + # usually 140, could be 135 + fieldnamelength = int(header1[-5:-2]) + + # member info + mem = [ + ["prefix", 8], + ["set_name", 8], + ["sasdata", 8], + ["version", 8], + ["OS", 8], + ["_", 24], + ["created", 16], + ] + member_info = _split_line(self._get_row(), mem) + mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]] + member_info.update(_split_line(self._get_row(), mem)) + member_info["modified"] = _parse_date(member_info["modified"]) + member_info["created"] = _parse_date(member_info["created"]) + self.member_info = member_info + + # read field names + types = {1: "numeric", 2: "char"} + fieldcount = int(self._get_row()[54:58]) + datalength = fieldnamelength * fieldcount + # round up to nearest 80 + if datalength % 80: + datalength += 80 - datalength % 80 + fielddata = self.filepath_or_buffer.read(datalength) + fields = [] + obs_length = 0 + while len(fielddata) >= fieldnamelength: + # pull data for one field + fieldbytes, fielddata = ( + fielddata[:fieldnamelength], + fielddata[fieldnamelength:], + ) + + # rest at end gets ignored, so if field is short, pad out + # to match struct pattern below + fieldbytes = fieldbytes.ljust(140) + + fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", fieldbytes) + field = dict(zip(_fieldkeys, fieldstruct)) + del field["_"] + field["ntype"] = types[field["ntype"]] + fl = field["field_length"] + if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)): + msg = f"Floating field width {fl} is not between 2 and 8." + raise TypeError(msg) + + for k, v in field.items(): + try: + field[k] = v.strip() + except AttributeError: + pass + + obs_length += field["field_length"] + fields += [field] + + header = self._get_row() + if not header == _correct_obs_header: + raise ValueError("Observation header not found.") + + self.fields = fields + self.record_length = obs_length + self.record_start = self.filepath_or_buffer.tell() + + self.nobs = self._record_count() + self.columns = [x["name"].decode() for x in self.fields] + + # Setup the dtype. + dtypel = [ + ("s" + str(i), "S" + str(field["field_length"])) + for i, field in enumerate(self.fields) + ] + dtype = np.dtype(dtypel) + self._dtype = dtype + + def __next__(self) -> pd.DataFrame: + return self.read(nrows=self._chunksize or 1) + + def _record_count(self) -> int: + """ + Get number of records in file. + + This is maybe suboptimal because we have to seek to the end of + the file. + + Side effect: returns file position to record_start. + """ + self.filepath_or_buffer.seek(0, 2) + total_records_length = self.filepath_or_buffer.tell() - self.record_start + + if total_records_length % 80 != 0: + warnings.warn( + "xport file may be corrupted.", + stacklevel=find_stack_level(), + ) + + if self.record_length > 80: + self.filepath_or_buffer.seek(self.record_start) + return total_records_length // self.record_length + + self.filepath_or_buffer.seek(-80, 2) + last_card_bytes = self.filepath_or_buffer.read(80) + last_card = np.frombuffer(last_card_bytes, dtype=np.uint64) + + # 8 byte blank + ix = np.flatnonzero(last_card == 2314885530818453536) + + if len(ix) == 0: + tail_pad = 0 + else: + tail_pad = 8 * len(ix) + + self.filepath_or_buffer.seek(self.record_start) + + return (total_records_length - tail_pad) // self.record_length + + def get_chunk(self, size: int | None = None) -> pd.DataFrame: + """ + Reads lines from Xport file and returns as dataframe + + Parameters + ---------- + size : int, defaults to None + Number of lines to read. If None, reads whole file. + + Returns + ------- + DataFrame + """ + if size is None: + size = self._chunksize + return self.read(nrows=size) + + def _missing_double(self, vec): + v = vec.view(dtype="u1,u1,u2,u4") + miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0) + miss1 = ( + ((v["f0"] >= 0x41) & (v["f0"] <= 0x5A)) + | (v["f0"] == 0x5F) + | (v["f0"] == 0x2E) + ) + miss &= miss1 + return miss + + @Appender(_read_method_doc) + def read(self, nrows: int | None = None) -> pd.DataFrame: + if nrows is None: + nrows = self.nobs + + read_lines = min(nrows, self.nobs - self._lines_read) + read_len = read_lines * self.record_length + if read_len <= 0: + self.close() + raise StopIteration + raw = self.filepath_or_buffer.read(read_len) + data = np.frombuffer(raw, dtype=self._dtype, count=read_lines) + + df_data = {} + for j, x in enumerate(self.columns): + vec = data["s" + str(j)] + ntype = self.fields[j]["ntype"] + if ntype == "numeric": + vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"]) + miss = self._missing_double(vec) + v = _parse_float_vec(vec) + v[miss] = np.nan + elif self.fields[j]["ntype"] == "char": + v = [y.rstrip() for y in vec] + + if self._encoding is not None: + v = [y.decode(self._encoding) for y in v] + + df_data.update({x: v}) + df = pd.DataFrame(df_data) + + if self._index is None: + df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines)) + else: + df = df.set_index(self._index) + + self._lines_read += read_lines + + return df diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sasreader.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sasreader.py new file mode 100644 index 00000000..7fdfd214 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sas/sasreader.py @@ -0,0 +1,180 @@ +""" +Read SAS sas7bdat or xport files. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Protocol, + overload, +) + +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import stringify_path + +if TYPE_CHECKING: + from collections.abc import Hashable + from types import TracebackType + + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadBuffer, + ) + + from pandas import DataFrame + + +class ReaderBase(Protocol): + """ + Protocol for XportReader and SAS7BDATReader classes. + """ + + def read(self, nrows: int | None = None) -> DataFrame: + ... + + def close(self) -> None: + ... + + def __enter__(self) -> ReaderBase: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +@overload +def read_sas( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + format: str | None = ..., + index: Hashable | None = ..., + encoding: str | None = ..., + chunksize: int = ..., + iterator: bool = ..., + compression: CompressionOptions = ..., +) -> ReaderBase: + ... + + +@overload +def read_sas( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + format: str | None = ..., + index: Hashable | None = ..., + encoding: str | None = ..., + chunksize: None = ..., + iterator: bool = ..., + compression: CompressionOptions = ..., +) -> DataFrame | ReaderBase: + ... + + +@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer") +def read_sas( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + format: str | None = None, + index: Hashable | None = None, + encoding: str | None = None, + chunksize: int | None = None, + iterator: bool = False, + compression: CompressionOptions = "infer", +) -> DataFrame | ReaderBase: + """ + Read SAS files stored as either XPORT or SAS7BDAT format files. + + Parameters + ---------- + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.sas7bdat``. + format : str {{'xport', 'sas7bdat'}} or None + If None, file format is inferred from file extension. If 'xport' or + 'sas7bdat', uses the corresponding format. + index : identifier of index column, defaults to None + Identifier of column that should be used as index of the DataFrame. + encoding : str, default is None + Encoding for text data. If None, text data are stored as raw bytes. + chunksize : int + Read file `chunksize` lines at a time, returns iterator. + + .. versionchanged:: 1.2 + + ``TextFileReader`` is a context manager. + iterator : bool, defaults to False + If True, returns an iterator for reading the file incrementally. + + .. versionchanged:: 1.2 + + ``TextFileReader`` is a context manager. + {decompression_options} + + Returns + ------- + DataFrame if iterator=False and chunksize=None, else SAS7BDATReader + or XportReader + + Examples + -------- + >>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP + """ + if format is None: + buffer_error_msg = ( + "If this is a buffer object rather " + "than a string name, you must specify a format string" + ) + filepath_or_buffer = stringify_path(filepath_or_buffer) + if not isinstance(filepath_or_buffer, str): + raise ValueError(buffer_error_msg) + fname = filepath_or_buffer.lower() + if ".xpt" in fname: + format = "xport" + elif ".sas7bdat" in fname: + format = "sas7bdat" + else: + raise ValueError( + f"unable to infer format of SAS file from filename: {repr(fname)}" + ) + + reader: ReaderBase + if format.lower() == "xport": + from pandas.io.sas.sas_xport import XportReader + + reader = XportReader( + filepath_or_buffer, + index=index, + encoding=encoding, + chunksize=chunksize, + compression=compression, + ) + elif format.lower() == "sas7bdat": + from pandas.io.sas.sas7bdat import SAS7BDATReader + + reader = SAS7BDATReader( + filepath_or_buffer, + index=index, + encoding=encoding, + chunksize=chunksize, + compression=compression, + ) + else: + raise ValueError("unknown SAS format") + + if iterator or chunksize: + return reader + + with reader: + return reader.read() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/spss.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/spss.py new file mode 100644 index 00000000..58487c6c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/spss.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.inference import is_list_like + +from pandas.io.common import stringify_path + +if TYPE_CHECKING: + from collections.abc import Sequence + from pathlib import Path + + from pandas._typing import DtypeBackend + + from pandas import DataFrame + + +def read_spss( + path: str | Path, + usecols: Sequence[str] | None = None, + convert_categoricals: bool = True, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame: + """ + Load an SPSS file from the file path, returning a DataFrame. + + Parameters + ---------- + path : str or Path + File path. + usecols : list-like, optional + Return a subset of the columns. If None, return all columns. + convert_categoricals : bool, default is True + Convert categorical columns into pd.Categorical. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame + + Examples + -------- + >>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP + """ + pyreadstat = import_optional_dependency("pyreadstat") + check_dtype_backend(dtype_backend) + + if usecols is not None: + if not is_list_like(usecols): + raise TypeError("usecols must be list-like.") + usecols = list(usecols) # pyreadstat requires a list + + df, _ = pyreadstat.read_sav( + stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals + ) + if dtype_backend is not lib.no_default: + df = df.convert_dtypes(dtype_backend=dtype_backend) + return df diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/sql.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sql.py new file mode 100644 index 00000000..c1d68d71 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/sql.py @@ -0,0 +1,2515 @@ +""" +Collection of query wrappers / abstractions to both facilitate data +retrieval and to reduce dependency on DB-specific API. +""" + +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from contextlib import ( + ExitStack, + contextmanager, +) +from datetime import ( + date, + datetime, + time, +) +from functools import partial +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + AbstractMethodError, + DatabaseError, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + is_dict_like, + is_list_like, +) +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import isna + +from pandas import get_option +from pandas.core.api import ( + DataFrame, + Series, +) +from pandas.core.arrays import ArrowExtensionArray +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.internals.construction import convert_object_array +from pandas.core.tools.datetimes import to_datetime + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Mapping, + ) + + from sqlalchemy import Table + from sqlalchemy.sql.expression import ( + Select, + TextClause, + ) + + from pandas._typing import ( + DateTimeErrorChoices, + DtypeArg, + DtypeBackend, + IndexLabel, + Self, + ) + + from pandas import Index + +# ----------------------------------------------------------------------------- +# -- Helper functions + + +def _process_parse_dates_argument(parse_dates): + """Process parse_dates argument for read_sql functions""" + # handle non-list entries for parse_dates gracefully + if parse_dates is True or parse_dates is None or parse_dates is False: + parse_dates = [] + + elif not hasattr(parse_dates, "__iter__"): + parse_dates = [parse_dates] + return parse_dates + + +def _handle_date_column( + col, utc: bool = False, format: str | dict[str, Any] | None = None +): + if isinstance(format, dict): + # GH35185 Allow custom error values in parse_dates argument of + # read_sql like functions. + # Format can take on custom to_datetime argument values such as + # {"errors": "coerce"} or {"dayfirst": True} + error: DateTimeErrorChoices = format.pop("errors", None) or "ignore" + return to_datetime(col, errors=error, **format) + else: + # Allow passing of formatting string for integers + # GH17855 + if format is None and ( + issubclass(col.dtype.type, np.floating) + or issubclass(col.dtype.type, np.integer) + ): + format = "s" + if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]: + return to_datetime(col, errors="coerce", unit=format, utc=utc) + elif isinstance(col.dtype, DatetimeTZDtype): + # coerce to UTC timezone + # GH11216 + return to_datetime(col, utc=True) + else: + return to_datetime(col, errors="coerce", format=format, utc=utc) + + +def _parse_date_columns(data_frame, parse_dates): + """ + Force non-datetime columns to be read as such. + Supports both string formatted and integer timestamp columns. + """ + parse_dates = _process_parse_dates_argument(parse_dates) + + # we want to coerce datetime64_tz dtypes for now to UTC + # we could in theory do a 'nice' conversion from a FixedOffset tz + # GH11216 + for i, (col_name, df_col) in enumerate(data_frame.items()): + if isinstance(df_col.dtype, DatetimeTZDtype) or col_name in parse_dates: + try: + fmt = parse_dates[col_name] + except TypeError: + fmt = None + data_frame.isetitem(i, _handle_date_column(df_col, format=fmt)) + + return data_frame + + +def _convert_arrays_to_dataframe( + data, + columns, + coerce_float: bool = True, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", +) -> DataFrame: + content = lib.to_object_array_tuples(data) + arrays = convert_object_array( + list(content.T), + dtype=None, + coerce_float=coerce_float, + dtype_backend=dtype_backend, + ) + if dtype_backend == "pyarrow": + pa = import_optional_dependency("pyarrow") + arrays = [ + ArrowExtensionArray(pa.array(arr, from_pandas=True)) for arr in arrays + ] + if arrays: + df = DataFrame(dict(zip(list(range(len(columns))), arrays))) + df.columns = columns + return df + else: + return DataFrame(columns=columns) + + +def _wrap_result( + data, + columns, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", +): + """Wrap result set of query in a DataFrame.""" + frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend) + + if dtype: + frame = frame.astype(dtype) + + frame = _parse_date_columns(frame, parse_dates) + + if index_col is not None: + frame = frame.set_index(index_col) + + return frame + + +def execute(sql, con, params=None): + """ + Execute the given SQL query using the provided connection object. + + Parameters + ---------- + sql : string + SQL query to be executed. + con : SQLAlchemy connection or sqlite3 connection + If a DBAPI2 object, only sqlite3 is supported. + params : list or tuple, optional, default: None + List of parameters to pass to execute method. + + Returns + ------- + Results Iterable + """ + warnings.warn( + "`pandas.io.sql.execute` is deprecated and " + "will be removed in the future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) # GH50185 + sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") + + if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Engine)): + raise TypeError("pandas.io.sql.execute requires a connection") # GH50185 + with pandasSQL_builder(con, need_transaction=True) as pandas_sql: + return pandas_sql.execute(sql, params) + + +# ----------------------------------------------------------------------------- +# -- Read and write to DataFrames + + +@overload +def read_sql_table( + table_name: str, + con, + schema=..., + index_col: str | list[str] | None = ..., + coerce_float=..., + parse_dates: list[str] | dict[str, str] | None = ..., + columns: list[str] | None = ..., + chunksize: None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +@overload +def read_sql_table( + table_name: str, + con, + schema=..., + index_col: str | list[str] | None = ..., + coerce_float=..., + parse_dates: list[str] | dict[str, str] | None = ..., + columns: list[str] | None = ..., + chunksize: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> Iterator[DataFrame]: + ... + + +def read_sql_table( + table_name: str, + con, + schema: str | None = None, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates: list[str] | dict[str, str] | None = None, + columns: list[str] | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL database table into a DataFrame. + + Given a table name and a SQLAlchemy connectable, returns a DataFrame. + This function does not support DBAPI connections. + + Parameters + ---------- + table_name : str + Name of SQL table in database. + con : SQLAlchemy connectable or str + A database URI could be provided as str. + SQLite DBAPI connection mode not supported. + schema : str, default None + Name of SQL schema in database to query (if database flavor + supports this). Uses default schema if None (default). + index_col : str or list of str, optional, default: None + Column(s) to set as index(MultiIndex). + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point. Can result in loss of Precision. + parse_dates : list or dict, default None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite. + columns : list, default None + List of column names to select from SQL table. + chunksize : int, default None + If specified, returns an iterator where `chunksize` is the number of + rows to include in each chunk. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame or Iterator[DataFrame] + A SQL table is returned as two-dimensional data structure with labeled + axes. + + See Also + -------- + read_sql_query : Read SQL query into a DataFrame. + read_sql : Read SQL query or database table into a DataFrame. + + Notes + ----- + Any datetime values with time zone information will be converted to UTC. + + Examples + -------- + >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP + """ + + check_dtype_backend(dtype_backend) + if dtype_backend is lib.no_default: + dtype_backend = "numpy" # type: ignore[assignment] + assert dtype_backend is not lib.no_default + + with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: + if not pandas_sql.has_table(table_name): + raise ValueError(f"Table {table_name} not found") + + table = pandas_sql.read_table( + table_name, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + columns=columns, + chunksize=chunksize, + dtype_backend=dtype_backend, + ) + + if table is not None: + return table + else: + raise ValueError(f"Table {table_name} not found", con) + + +@overload +def read_sql_query( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params: list[Any] | Mapping[str, Any] | None = ..., + parse_dates: list[str] | dict[str, str] | None = ..., + chunksize: None = ..., + dtype: DtypeArg | None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +@overload +def read_sql_query( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params: list[Any] | Mapping[str, Any] | None = ..., + parse_dates: list[str] | dict[str, str] | None = ..., + chunksize: int = ..., + dtype: DtypeArg | None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> Iterator[DataFrame]: + ... + + +def read_sql_query( + sql, + con, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + params: list[Any] | Mapping[str, Any] | None = None, + parse_dates: list[str] | dict[str, str] | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL query into a DataFrame. + + Returns a DataFrame corresponding to the result set of the query + string. Optionally provide an `index_col` parameter to use one of the + columns as the index, otherwise default integer index will be used. + + Parameters + ---------- + sql : str SQL query or SQLAlchemy Selectable (select or text object) + SQL query to be executed. + con : SQLAlchemy connectable, str, or sqlite3 connection + Using SQLAlchemy makes it possible to use any DB supported by that + library. If a DBAPI2 object, only sqlite3 is supported. + index_col : str or list of str, optional, default: None + Column(s) to set as index(MultiIndex). + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point. Useful for SQL result sets. + params : list, tuple or mapping, optional, default: None + List of parameters to pass to execute method. The syntax used + to pass parameters is database driver dependent. Check your + database driver documentation for which of the five syntax styles, + described in PEP 249's paramstyle, is supported. + Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite. + chunksize : int, default None + If specified, return an iterator where `chunksize` is the number of + rows to include in each chunk. + dtype : Type name or dict of columns + Data type for data or columns. E.g. np.float64 or + {'a': np.float64, 'b': np.int32, 'c': 'Int64'}. + + .. versionadded:: 1.3.0 + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame or Iterator[DataFrame] + + See Also + -------- + read_sql_table : Read SQL database table into a DataFrame. + read_sql : Read SQL query or database table into a DataFrame. + + Notes + ----- + Any datetime values with time zone information parsed via the `parse_dates` + parameter will be converted to UTC. + + Examples + -------- + >>> from sqlalchemy import create_engine # doctest: +SKIP + >>> engine = create_engine("sqlite:///database.db") # doctest: +SKIP + >>> with engine.connect() as conn, conn.begin(): # doctest: +SKIP + ... data = pd.read_sql_table("data", conn) # doctest: +SKIP + """ + + check_dtype_backend(dtype_backend) + if dtype_backend is lib.no_default: + dtype_backend = "numpy" # type: ignore[assignment] + assert dtype_backend is not lib.no_default + + with pandasSQL_builder(con) as pandas_sql: + return pandas_sql.read_query( + sql, + index_col=index_col, + params=params, + coerce_float=coerce_float, + parse_dates=parse_dates, + chunksize=chunksize, + dtype=dtype, + dtype_backend=dtype_backend, + ) + + +@overload +def read_sql( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params=..., + parse_dates=..., + columns: list[str] = ..., + chunksize: None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + dtype: DtypeArg | None = None, +) -> DataFrame: + ... + + +@overload +def read_sql( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params=..., + parse_dates=..., + columns: list[str] = ..., + chunksize: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + dtype: DtypeArg | None = None, +) -> Iterator[DataFrame]: + ... + + +def read_sql( + sql, + con, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + params=None, + parse_dates=None, + columns: list[str] | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + dtype: DtypeArg | None = None, +) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL query or database table into a DataFrame. + + This function is a convenience wrapper around ``read_sql_table`` and + ``read_sql_query`` (for backward compatibility). It will delegate + to the specific function depending on the provided input. A SQL query + will be routed to ``read_sql_query``, while a database table name will + be routed to ``read_sql_table``. Note that the delegated function might + have more specific notes about their functionality not listed here. + + Parameters + ---------- + sql : str or SQLAlchemy Selectable (select or text object) + SQL query to be executed or a table name. + con : SQLAlchemy connectable, str, or sqlite3 connection + Using SQLAlchemy makes it possible to use any DB supported by that + library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible + for engine disposal and connection closure for the SQLAlchemy connectable; str + connections are closed automatically. See + `here `_. + index_col : str or list of str, optional, default: None + Column(s) to set as index(MultiIndex). + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point, useful for SQL result sets. + params : list, tuple or dict, optional, default: None + List of parameters to pass to execute method. The syntax used + to pass parameters is database driver dependent. Check your + database driver documentation for which of the five syntax styles, + described in PEP 249's paramstyle, is supported. + Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite. + columns : list, default: None + List of column names to select from SQL table (only used when reading + a table). + chunksize : int, default None + If specified, return an iterator where `chunksize` is the + number of rows to include in each chunk. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + dtype : Type name or dict of columns + Data type for data or columns. E.g. np.float64 or + {'a': np.float64, 'b': np.int32, 'c': 'Int64'}. + The argument is ignored if a table is passed instead of a query. + + .. versionadded:: 2.0.0 + + Returns + ------- + DataFrame or Iterator[DataFrame] + + See Also + -------- + read_sql_table : Read SQL database table into a DataFrame. + read_sql_query : Read SQL query into a DataFrame. + + Examples + -------- + Read data from SQL via either a SQL query or a SQL tablename. + When using a SQLite database only SQL queries are accepted, + providing only the SQL tablename will result in an error. + + >>> from sqlite3 import connect + >>> conn = connect(':memory:') + >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']], + ... columns=['int_column', 'date_column']) + >>> df.to_sql(name='test_data', con=conn) + 2 + + >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn) + int_column date_column + 0 0 10/11/12 + 1 1 12/11/10 + + >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP + + Apply date parsing to columns through the ``parse_dates`` argument + The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns. + Custom argument values for applying ``pd.to_datetime`` on a column are specified + via a dictionary format: + + >>> pd.read_sql('SELECT int_column, date_column FROM test_data', + ... conn, + ... parse_dates={"date_column": {"format": "%d/%m/%y"}}) + int_column date_column + 0 0 2012-11-10 + 1 1 2010-11-12 + """ + + check_dtype_backend(dtype_backend) + if dtype_backend is lib.no_default: + dtype_backend = "numpy" # type: ignore[assignment] + assert dtype_backend is not lib.no_default + + with pandasSQL_builder(con) as pandas_sql: + if isinstance(pandas_sql, SQLiteDatabase): + return pandas_sql.read_query( + sql, + index_col=index_col, + params=params, + coerce_float=coerce_float, + parse_dates=parse_dates, + chunksize=chunksize, + dtype_backend=dtype_backend, + dtype=dtype, + ) + + try: + _is_table_name = pandas_sql.has_table(sql) + except Exception: + # using generic exception to catch errors from sql drivers (GH24988) + _is_table_name = False + + if _is_table_name: + return pandas_sql.read_table( + sql, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + columns=columns, + chunksize=chunksize, + dtype_backend=dtype_backend, + ) + else: + return pandas_sql.read_query( + sql, + index_col=index_col, + params=params, + coerce_float=coerce_float, + parse_dates=parse_dates, + chunksize=chunksize, + dtype_backend=dtype_backend, + dtype=dtype, + ) + + +def to_sql( + frame, + name: str, + con, + schema: str | None = None, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool = True, + index_label: IndexLabel | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, +) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame : DataFrame, Series + name : str + Name of SQL table. + con : SQLAlchemy connectable(engine/connection) or database string URI + or sqlite3 DBAPI2 connection + Using SQLAlchemy makes it possible to use any DB supported by that + library. + If a DBAPI2 object, only sqlite3 is supported. + schema : str, optional + Name of SQL schema in database to write to (if database flavor + supports this). If None, use default schema (default). + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : bool, default True + Write DataFrame index as a column. + index_label : str or sequence, optional + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + chunksize : int, optional + Specify the number of rows in each batch to be written at a time. + By default, all rows will be written at once. + dtype : dict or scalar, optional + Specifying the datatype for columns. If a dictionary is used, the + keys should be the column names and the values should be the + SQLAlchemy types or strings for the sqlite3 fallback mode. If a + scalar is provided, it will be applied to all columns. + method : {None, 'multi', callable}, optional + Controls the SQL insertion clause used: + + - None : Uses standard SQL ``INSERT`` clause (one per row). + - ``'multi'``: Pass multiple values in a single ``INSERT`` clause. + - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method `. + engine : {'auto', 'sqlalchemy'}, default 'auto' + SQL engine library to use. If 'auto', then the option + ``io.sql.engine`` is used. The default ``io.sql.engine`` + behavior is 'sqlalchemy' + + .. versionadded:: 1.3.0 + + **engine_kwargs + Any additional kwargs are passed to the engine. + + Returns + ------- + None or int + Number of rows affected by to_sql. None is returned if the callable + passed into ``method`` does not return an integer number of rows. + + .. versionadded:: 1.4.0 + + Notes + ----- + The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` + or SQLAlchemy connectable. The returned value may not reflect the exact number of written + rows as stipulated in the + `sqlite3 `__ or + `SQLAlchemy `__ + """ # noqa: E501 + if if_exists not in ("fail", "replace", "append"): + raise ValueError(f"'{if_exists}' is not valid for if_exists") + + if isinstance(frame, Series): + frame = frame.to_frame() + elif not isinstance(frame, DataFrame): + raise NotImplementedError( + "'frame' argument should be either a Series or a DataFrame" + ) + + with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: + return pandas_sql.to_sql( + frame, + name, + if_exists=if_exists, + index=index, + index_label=index_label, + schema=schema, + chunksize=chunksize, + dtype=dtype, + method=method, + engine=engine, + **engine_kwargs, + ) + + +def has_table(table_name: str, con, schema: str | None = None) -> bool: + """ + Check if DataBase has named table. + + Parameters + ---------- + table_name: string + Name of SQL table. + con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection + Using SQLAlchemy makes it possible to use any DB supported by that + library. + If a DBAPI2 object, only sqlite3 is supported. + schema : string, default None + Name of SQL schema in database to write to (if database flavor supports + this). If None, use default schema (default). + + Returns + ------- + boolean + """ + with pandasSQL_builder(con, schema=schema) as pandas_sql: + return pandas_sql.has_table(table_name) + + +table_exists = has_table + + +def pandasSQL_builder( + con, + schema: str | None = None, + need_transaction: bool = False, +) -> PandasSQL: + """ + Convenience function to return the correct PandasSQL subclass based on the + provided parameters. Also creates a sqlalchemy connection and transaction + if necessary. + """ + import sqlite3 + + if isinstance(con, sqlite3.Connection) or con is None: + return SQLiteDatabase(con) + + sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") + + if isinstance(con, str) and sqlalchemy is None: + raise ImportError("Using URI string without sqlalchemy installed.") + + if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): + return SQLDatabase(con, schema, need_transaction) + + warnings.warn( + "pandas only supports SQLAlchemy connectable (engine/connection) or " + "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " + "objects are not tested. Please consider using SQLAlchemy.", + UserWarning, + stacklevel=find_stack_level(), + ) + return SQLiteDatabase(con) + + +class SQLTable(PandasObject): + """ + For mapping Pandas tables to SQL tables. + Uses fact that table is reflected by SQLAlchemy to + do better type conversions. + Also holds various flags needed to avoid having to + pass them between functions all the time. + """ + + # TODO: support for multiIndex + + def __init__( + self, + name: str, + pandas_sql_engine, + frame=None, + index: bool | str | list[str] | None = True, + if_exists: Literal["fail", "replace", "append"] = "fail", + prefix: str = "pandas", + index_label=None, + schema=None, + keys=None, + dtype: DtypeArg | None = None, + ) -> None: + self.name = name + self.pd_sql = pandas_sql_engine + self.prefix = prefix + self.frame = frame + self.index = self._index_name(index, index_label) + self.schema = schema + self.if_exists = if_exists + self.keys = keys + self.dtype = dtype + + if frame is not None: + # We want to initialize based on a dataframe + self.table = self._create_table_setup() + else: + # no data provided, read-only mode + self.table = self.pd_sql.get_table(self.name, self.schema) + + if self.table is None: + raise ValueError(f"Could not init table '{name}'") + + if not len(self.name): + raise ValueError("Empty table name specified") + + def exists(self): + return self.pd_sql.has_table(self.name, self.schema) + + def sql_schema(self) -> str: + from sqlalchemy.schema import CreateTable + + return str(CreateTable(self.table).compile(self.pd_sql.con)) + + def _execute_create(self) -> None: + # Inserting table into database, add to MetaData object + self.table = self.table.to_metadata(self.pd_sql.meta) + with self.pd_sql.run_transaction(): + self.table.create(bind=self.pd_sql.con) + + def create(self) -> None: + if self.exists(): + if self.if_exists == "fail": + raise ValueError(f"Table '{self.name}' already exists.") + if self.if_exists == "replace": + self.pd_sql.drop_table(self.name, self.schema) + self._execute_create() + elif self.if_exists == "append": + pass + else: + raise ValueError(f"'{self.if_exists}' is not valid for if_exists") + else: + self._execute_create() + + def _execute_insert(self, conn, keys: list[str], data_iter) -> int: + """ + Execute SQL statement inserting data + + Parameters + ---------- + conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection + keys : list of str + Column names + data_iter : generator of list + Each item contains a list of values to be inserted + """ + data = [dict(zip(keys, row)) for row in data_iter] + result = conn.execute(self.table.insert(), data) + return result.rowcount + + def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int: + """ + Alternative to _execute_insert for DBs support multivalue INSERT. + + Note: multi-value insert is usually faster for analytics DBs + and tables containing a few columns + but performance degrades quickly with increase of columns. + """ + + from sqlalchemy import insert + + data = [dict(zip(keys, row)) for row in data_iter] + stmt = insert(self.table).values(data) + result = conn.execute(stmt) + return result.rowcount + + def insert_data(self) -> tuple[list[str], list[np.ndarray]]: + if self.index is not None: + temp = self.frame.copy() + temp.index.names = self.index + try: + temp.reset_index(inplace=True) + except ValueError as err: + raise ValueError(f"duplicate name in index/columns: {err}") from err + else: + temp = self.frame + + column_names = list(map(str, temp.columns)) + ncols = len(column_names) + # this just pre-allocates the list: None's will be replaced with ndarrays + # error: List item 0 has incompatible type "None"; expected "ndarray" + data_list: list[np.ndarray] = [None] * ncols # type: ignore[list-item] + + for i, (_, ser) in enumerate(temp.items()): + if ser.dtype.kind == "M": + if isinstance(ser._values, ArrowExtensionArray): + import pyarrow as pa + + if pa.types.is_date(ser.dtype.pyarrow_dtype): + # GH#53854 to_pydatetime not supported for pyarrow date dtypes + d = ser._values.to_numpy(dtype=object) + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + # GH#52459 to_pydatetime will return Index[object] + d = np.asarray(ser.dt.to_pydatetime(), dtype=object) + else: + d = ser._values.to_pydatetime() + elif ser.dtype.kind == "m": + vals = ser._values + if isinstance(vals, ArrowExtensionArray): + vals = vals.to_numpy(dtype=np.dtype("m8[ns]")) + # store as integers, see GH#6921, GH#7076 + d = vals.view("i8").astype(object) + else: + d = ser._values.astype(object) + + assert isinstance(d, np.ndarray), type(d) + + if ser._can_hold_na: + # Note: this will miss timedeltas since they are converted to int + mask = isna(d) + d[mask] = None + + data_list[i] = d + + return column_names, data_list + + def insert( + self, + chunksize: int | None = None, + method: Literal["multi"] | Callable | None = None, + ) -> int | None: + # set insert method + if method is None: + exec_insert = self._execute_insert + elif method == "multi": + exec_insert = self._execute_insert_multi + elif callable(method): + exec_insert = partial(method, self) + else: + raise ValueError(f"Invalid parameter `method`: {method}") + + keys, data_list = self.insert_data() + + nrows = len(self.frame) + + if nrows == 0: + return 0 + + if chunksize is None: + chunksize = nrows + elif chunksize == 0: + raise ValueError("chunksize argument should be non-zero") + + chunks = (nrows // chunksize) + 1 + total_inserted = None + with self.pd_sql.run_transaction() as conn: + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, nrows) + if start_i >= end_i: + break + + chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list)) + num_inserted = exec_insert(conn, keys, chunk_iter) + # GH 46891 + if num_inserted is not None: + if total_inserted is None: + total_inserted = num_inserted + else: + total_inserted += num_inserted + return total_inserted + + def _query_iterator( + self, + result, + exit_stack: ExitStack, + chunksize: int | None, + columns, + coerce_float: bool = True, + parse_dates=None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ): + """Return generator through chunked result set.""" + has_read_data = False + with exit_stack: + while True: + data = result.fetchmany(chunksize) + if not data: + if not has_read_data: + yield DataFrame.from_records( + [], columns=columns, coerce_float=coerce_float + ) + break + + has_read_data = True + self.frame = _convert_arrays_to_dataframe( + data, columns, coerce_float, dtype_backend + ) + + self._harmonize_columns( + parse_dates=parse_dates, dtype_backend=dtype_backend + ) + + if self.index is not None: + self.frame.set_index(self.index, inplace=True) + + yield self.frame + + def read( + self, + exit_stack: ExitStack, + coerce_float: bool = True, + parse_dates=None, + columns=None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + from sqlalchemy import select + + if columns is not None and len(columns) > 0: + cols = [self.table.c[n] for n in columns] + if self.index is not None: + for idx in self.index[::-1]: + cols.insert(0, self.table.c[idx]) + sql_select = select(*cols) + else: + sql_select = select(self.table) + result = self.pd_sql.execute(sql_select) + column_names = result.keys() + + if chunksize is not None: + return self._query_iterator( + result, + exit_stack, + chunksize, + column_names, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype_backend=dtype_backend, + ) + else: + data = result.fetchall() + self.frame = _convert_arrays_to_dataframe( + data, column_names, coerce_float, dtype_backend + ) + + self._harmonize_columns( + parse_dates=parse_dates, dtype_backend=dtype_backend + ) + + if self.index is not None: + self.frame.set_index(self.index, inplace=True) + + return self.frame + + def _index_name(self, index, index_label): + # for writing: index=True to include index in sql table + if index is True: + nlevels = self.frame.index.nlevels + # if index_label is specified, set this as index name(s) + if index_label is not None: + if not isinstance(index_label, list): + index_label = [index_label] + if len(index_label) != nlevels: + raise ValueError( + "Length of 'index_label' should match number of " + f"levels, which is {nlevels}" + ) + return index_label + # return the used column labels for the index columns + if ( + nlevels == 1 + and "index" not in self.frame.columns + and self.frame.index.name is None + ): + return ["index"] + else: + return com.fill_missing_names(self.frame.index.names) + + # for reading: index=(list of) string to specify column to set as index + elif isinstance(index, str): + return [index] + elif isinstance(index, list): + return index + else: + return None + + def _get_column_names_and_types(self, dtype_mapper): + column_names_and_types = [] + if self.index is not None: + for i, idx_label in enumerate(self.index): + idx_type = dtype_mapper(self.frame.index._get_level_values(i)) + column_names_and_types.append((str(idx_label), idx_type, True)) + + column_names_and_types += [ + (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False) + for i in range(len(self.frame.columns)) + ] + + return column_names_and_types + + def _create_table_setup(self): + from sqlalchemy import ( + Column, + PrimaryKeyConstraint, + Table, + ) + from sqlalchemy.schema import MetaData + + column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type) + + columns: list[Any] = [ + Column(name, typ, index=is_index) + for name, typ, is_index in column_names_and_types + ] + + if self.keys is not None: + if not is_list_like(self.keys): + keys = [self.keys] + else: + keys = self.keys + pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk") + columns.append(pkc) + + schema = self.schema or self.pd_sql.meta.schema + + # At this point, attach to new metadata, only attach to self.meta + # once table is created. + meta = MetaData() + return Table(self.name, meta, *columns, schema=schema) + + def _harmonize_columns( + self, + parse_dates=None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> None: + """ + Make the DataFrame's column types align with the SQL table + column types. + Need to work around limited NA value support. Floats are always + fine, ints must always be floats if there are Null values. + Booleans are hard because converting bool column with None replaces + all Nones with false. Therefore only convert bool if there are no + NA values. + Datetimes should already be converted to np.datetime64 if supported, + but here we also force conversion if required. + """ + parse_dates = _process_parse_dates_argument(parse_dates) + + for sql_col in self.table.columns: + col_name = sql_col.name + try: + df_col = self.frame[col_name] + + # Handle date parsing upfront; don't try to convert columns + # twice + if col_name in parse_dates: + try: + fmt = parse_dates[col_name] + except TypeError: + fmt = None + self.frame[col_name] = _handle_date_column(df_col, format=fmt) + continue + + # the type the dataframe column should have + col_type = self._get_dtype(sql_col.type) + + if ( + col_type is datetime + or col_type is date + or col_type is DatetimeTZDtype + ): + # Convert tz-aware Datetime SQL columns to UTC + utc = col_type is DatetimeTZDtype + self.frame[col_name] = _handle_date_column(df_col, utc=utc) + elif dtype_backend == "numpy" and col_type is float: + # floats support NA, can always convert! + self.frame[col_name] = df_col.astype(col_type, copy=False) + + elif dtype_backend == "numpy" and len(df_col) == df_col.count(): + # No NA values, can convert ints and bools + if col_type is np.dtype("int64") or col_type is bool: + self.frame[col_name] = df_col.astype(col_type, copy=False) + except KeyError: + pass # this column not in results + + def _sqlalchemy_type(self, col: Index | Series): + dtype: DtypeArg = self.dtype or {} + if is_dict_like(dtype): + dtype = cast(dict, dtype) + if col.name in dtype: + return dtype[col.name] + + # Infer type of column, while ignoring missing values. + # Needed for inserting typed data containing NULLs, GH 8778. + col_type = lib.infer_dtype(col, skipna=True) + + from sqlalchemy.types import ( + TIMESTAMP, + BigInteger, + Boolean, + Date, + DateTime, + Float, + Integer, + SmallInteger, + Text, + Time, + ) + + if col_type in ("datetime64", "datetime"): + # GH 9086: TIMESTAMP is the suggested type if the column contains + # timezone information + try: + # error: Item "Index" of "Union[Index, Series]" has no attribute "dt" + if col.dt.tz is not None: # type: ignore[union-attr] + return TIMESTAMP(timezone=True) + except AttributeError: + # The column is actually a DatetimeIndex + # GH 26761 or an Index with date-like data e.g. 9999-01-01 + if getattr(col, "tz", None) is not None: + return TIMESTAMP(timezone=True) + return DateTime + if col_type == "timedelta64": + warnings.warn( + "the 'timedelta' type is not supported, and will be " + "written as integer values (ns frequency) to the database.", + UserWarning, + stacklevel=find_stack_level(), + ) + return BigInteger + elif col_type == "floating": + if col.dtype == "float32": + return Float(precision=23) + else: + return Float(precision=53) + elif col_type == "integer": + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + if col.dtype.name.lower() in ("int8", "uint8", "int16"): + return SmallInteger + elif col.dtype.name.lower() in ("uint16", "int32"): + return Integer + elif col.dtype.name.lower() == "uint64": + raise ValueError("Unsigned 64 bit integer datatype is not supported") + else: + return BigInteger + elif col_type == "boolean": + return Boolean + elif col_type == "date": + return Date + elif col_type == "time": + return Time + elif col_type == "complex": + raise ValueError("Complex datatypes not supported") + + return Text + + def _get_dtype(self, sqltype): + from sqlalchemy.types import ( + TIMESTAMP, + Boolean, + Date, + DateTime, + Float, + Integer, + ) + + if isinstance(sqltype, Float): + return float + elif isinstance(sqltype, Integer): + # TODO: Refine integer size. + return np.dtype("int64") + elif isinstance(sqltype, TIMESTAMP): + # we have a timezone capable type + if not sqltype.timezone: + return datetime + return DatetimeTZDtype + elif isinstance(sqltype, DateTime): + # Caution: np.datetime64 is also a subclass of np.number. + return datetime + elif isinstance(sqltype, Date): + return date + elif isinstance(sqltype, Boolean): + return bool + return object + + +class PandasSQL(PandasObject, ABC): + """ + Subclasses Should define read_query and to_sql. + """ + + def __enter__(self) -> Self: + return self + + def __exit__(self, *args) -> None: + pass + + def read_table( + self, + table_name: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + columns=None, + schema: str | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + raise NotImplementedError + + @abstractmethod + def read_query( + self, + sql: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + params=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + pass + + @abstractmethod + def to_sql( + self, + frame, + name: str, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool = True, + index_label=None, + schema=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, + ) -> int | None: + pass + + @abstractmethod + def execute(self, sql: str | Select | TextClause, params=None): + pass + + @abstractmethod + def has_table(self, name: str, schema: str | None = None) -> bool: + pass + + @abstractmethod + def _create_sql_schema( + self, + frame: DataFrame, + table_name: str, + keys: list[str] | None = None, + dtype: DtypeArg | None = None, + schema: str | None = None, + ): + pass + + +class BaseEngine: + def insert_records( + self, + table: SQLTable, + con, + frame, + name: str, + index: bool | str | list[str] | None = True, + schema=None, + chunksize: int | None = None, + method=None, + **engine_kwargs, + ) -> int | None: + """ + Inserts data into already-prepared table + """ + raise AbstractMethodError(self) + + +class SQLAlchemyEngine(BaseEngine): + def __init__(self) -> None: + import_optional_dependency( + "sqlalchemy", extra="sqlalchemy is required for SQL support." + ) + + def insert_records( + self, + table: SQLTable, + con, + frame, + name: str, + index: bool | str | list[str] | None = True, + schema=None, + chunksize: int | None = None, + method=None, + **engine_kwargs, + ) -> int | None: + from sqlalchemy import exc + + try: + return table.insert(chunksize=chunksize, method=method) + except exc.StatementError as err: + # GH34431 + # https://stackoverflow.com/a/67358288/6067848 + msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?# + )|inf can not be used with MySQL""" + err_text = str(err.orig) + if re.search(msg, err_text): + raise ValueError("inf cannot be used with MySQL") from err + raise err + + +def get_engine(engine: str) -> BaseEngine: + """return our implementation""" + if engine == "auto": + engine = get_option("io.sql.engine") + + if engine == "auto": + # try engines in this order + engine_classes = [SQLAlchemyEngine] + + error_msgs = "" + for engine_class in engine_classes: + try: + return engine_class() + except ImportError as err: + error_msgs += "\n - " + str(err) + + raise ImportError( + "Unable to find a usable engine; " + "tried using: 'sqlalchemy'.\n" + "A suitable version of " + "sqlalchemy is required for sql I/O " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" + ) + + if engine == "sqlalchemy": + return SQLAlchemyEngine() + + raise ValueError("engine must be one of 'auto', 'sqlalchemy'") + + +class SQLDatabase(PandasSQL): + """ + This class enables conversion between DataFrame and SQL databases + using SQLAlchemy to handle DataBase abstraction. + + Parameters + ---------- + con : SQLAlchemy Connectable or URI string. + Connectable to connect with the database. Using SQLAlchemy makes it + possible to use any DB supported by that library. + schema : string, default None + Name of SQL schema in database to write to (if database flavor + supports this). If None, use default schema (default). + need_transaction : bool, default False + If True, SQLDatabase will create a transaction. + + """ + + def __init__( + self, con, schema: str | None = None, need_transaction: bool = False + ) -> None: + from sqlalchemy import create_engine + from sqlalchemy.engine import Engine + from sqlalchemy.schema import MetaData + + # self.exit_stack cleans up the Engine and Connection and commits the + # transaction if any of those objects was created below. + # Cleanup happens either in self.__exit__ or at the end of the iterator + # returned by read_sql when chunksize is not None. + self.exit_stack = ExitStack() + if isinstance(con, str): + con = create_engine(con) + self.exit_stack.callback(con.dispose) + if isinstance(con, Engine): + con = self.exit_stack.enter_context(con.connect()) + if need_transaction and not con.in_transaction(): + self.exit_stack.enter_context(con.begin()) + self.con = con + self.meta = MetaData(schema=schema) + self.returns_generator = False + + def __exit__(self, *args) -> None: + if not self.returns_generator: + self.exit_stack.close() + + @contextmanager + def run_transaction(self): + if not self.con.in_transaction(): + with self.con.begin(): + yield self.con + else: + yield self.con + + def execute(self, sql: str | Select | TextClause, params=None): + """Simple passthrough to SQLAlchemy connectable""" + args = [] if params is None else [params] + if isinstance(sql, str): + return self.con.exec_driver_sql(sql, *args) + return self.con.execute(sql, *args) + + def read_table( + self, + table_name: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + columns=None, + schema: str | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL database table into a DataFrame. + + Parameters + ---------- + table_name : str + Name of SQL table in database. + index_col : string, optional, default: None + Column to set as index. + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects + (like decimal.Decimal) to floating point. This can result in + loss of precision. + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg}``, where the arg corresponds + to the keyword arguments of :func:`pandas.to_datetime`. + Especially useful with databases without native Datetime support, + such as SQLite. + columns : list, default: None + List of column names to select from SQL table. + schema : string, default None + Name of SQL schema in database to query (if database flavor + supports this). If specified, this overwrites the default + schema of the SQL database object. + chunksize : int, default None + If specified, return an iterator where `chunksize` is the number + of rows to include in each chunk. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame + + See Also + -------- + pandas.read_sql_table + SQLDatabase.read_query + + """ + self.meta.reflect(bind=self.con, only=[table_name], views=True) + table = SQLTable(table_name, self, index=index_col, schema=schema) + if chunksize is not None: + self.returns_generator = True + return table.read( + self.exit_stack, + coerce_float=coerce_float, + parse_dates=parse_dates, + columns=columns, + chunksize=chunksize, + dtype_backend=dtype_backend, + ) + + @staticmethod + def _query_iterator( + result, + exit_stack: ExitStack, + chunksize: int, + columns, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ): + """Return generator through chunked result set""" + has_read_data = False + with exit_stack: + while True: + data = result.fetchmany(chunksize) + if not data: + if not has_read_data: + yield _wrap_result( + [], + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + break + + has_read_data = True + yield _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + + def read_query( + self, + sql: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + params=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL query into a DataFrame. + + Parameters + ---------- + sql : str + SQL query to be executed. + index_col : string, optional, default: None + Column name to use as index for the returned DataFrame object. + coerce_float : bool, default True + Attempt to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point, useful for SQL result sets. + params : list, tuple or dict, optional, default: None + List of parameters to pass to execute method. The syntax used + to pass parameters is database driver dependent. Check your + database driver documentation for which of the five syntax styles, + described in PEP 249's paramstyle, is supported. + Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict + corresponds to the keyword arguments of + :func:`pandas.to_datetime` Especially useful with databases + without native Datetime support, such as SQLite. + chunksize : int, default None + If specified, return an iterator where `chunksize` is the number + of rows to include in each chunk. + dtype : Type name or dict of columns + Data type for data or columns. E.g. np.float64 or + {'a': np.float64, 'b': np.int32, 'c': 'Int64'} + + .. versionadded:: 1.3.0 + + Returns + ------- + DataFrame + + See Also + -------- + read_sql_table : Read SQL database table into a DataFrame. + read_sql + + """ + result = self.execute(sql, params) + columns = result.keys() + + if chunksize is not None: + self.returns_generator = True + return self._query_iterator( + result, + self.exit_stack, + chunksize, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + else: + data = result.fetchall() + frame = _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + return frame + + read_sql = read_query + + def prep_table( + self, + frame, + name: str, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool | str | list[str] | None = True, + index_label=None, + schema=None, + dtype: DtypeArg | None = None, + ) -> SQLTable: + """ + Prepares table in the database for data insertion. Creates it if needed, etc. + """ + if dtype: + if not is_dict_like(dtype): + # error: Value expression in dictionary comprehension has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]"; expected type "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" + dtype = {col_name: dtype for col_name in frame} # type: ignore[misc] + else: + dtype = cast(dict, dtype) + + from sqlalchemy.types import TypeEngine + + for col, my_type in dtype.items(): + if isinstance(my_type, type) and issubclass(my_type, TypeEngine): + pass + elif isinstance(my_type, TypeEngine): + pass + else: + raise ValueError(f"The type of {col} is not a SQLAlchemy type") + + table = SQLTable( + name, + self, + frame=frame, + index=index, + if_exists=if_exists, + index_label=index_label, + schema=schema, + dtype=dtype, + ) + table.create() + return table + + def check_case_sensitive( + self, + name: str, + schema: str | None, + ) -> None: + """ + Checks table name for issues with case-sensitivity. + Method is called after data is inserted. + """ + if not name.isdigit() and not name.islower(): + # check for potentially case sensitivity issues (GH7815) + # Only check when name is not a number and name is not lower case + from sqlalchemy import inspect as sqlalchemy_inspect + + insp = sqlalchemy_inspect(self.con) + table_names = insp.get_table_names(schema=schema or self.meta.schema) + if name not in table_names: + msg = ( + f"The provided table name '{name}' is not found exactly as " + "such in the database after writing the table, possibly " + "due to case sensitivity issues. Consider using lower " + "case table names." + ) + warnings.warn( + msg, + UserWarning, + stacklevel=find_stack_level(), + ) + + def to_sql( + self, + frame, + name: str, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool = True, + index_label=None, + schema: str | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, + ) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame : DataFrame + name : string + Name of SQL table. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : boolean, default True + Write DataFrame index as a column. + index_label : string or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + schema : string, default None + Name of SQL schema in database to write to (if database flavor + supports this). If specified, this overwrites the default + schema of the SQLDatabase object. + chunksize : int, default None + If not None, then rows will be written in batches of this size at a + time. If None, all rows will be written at once. + dtype : single type or dict of column name to SQL type, default None + Optional specifying the datatype for columns. The SQL type should + be a SQLAlchemy type. If all columns are of the same type, one + single value can be used. + method : {None', 'multi', callable}, default None + Controls the SQL insertion clause used: + + * None : Uses standard SQL ``INSERT`` clause (one per row). + * 'multi': Pass multiple values in a single ``INSERT`` clause. + * callable with signature ``(pd_table, conn, keys, data_iter)``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method `. + engine : {'auto', 'sqlalchemy'}, default 'auto' + SQL engine library to use. If 'auto', then the option + ``io.sql.engine`` is used. The default ``io.sql.engine`` + behavior is 'sqlalchemy' + + .. versionadded:: 1.3.0 + + **engine_kwargs + Any additional kwargs are passed to the engine. + """ + sql_engine = get_engine(engine) + + table = self.prep_table( + frame=frame, + name=name, + if_exists=if_exists, + index=index, + index_label=index_label, + schema=schema, + dtype=dtype, + ) + + total_inserted = sql_engine.insert_records( + table=table, + con=self.con, + frame=frame, + name=name, + index=index, + schema=schema, + chunksize=chunksize, + method=method, + **engine_kwargs, + ) + + self.check_case_sensitive(name=name, schema=schema) + return total_inserted + + @property + def tables(self): + return self.meta.tables + + def has_table(self, name: str, schema: str | None = None) -> bool: + from sqlalchemy import inspect as sqlalchemy_inspect + + insp = sqlalchemy_inspect(self.con) + return insp.has_table(name, schema or self.meta.schema) + + def get_table(self, table_name: str, schema: str | None = None) -> Table: + from sqlalchemy import ( + Numeric, + Table, + ) + + schema = schema or self.meta.schema + tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema) + for column in tbl.columns: + if isinstance(column.type, Numeric): + column.type.asdecimal = False + return tbl + + def drop_table(self, table_name: str, schema: str | None = None) -> None: + schema = schema or self.meta.schema + if self.has_table(table_name, schema): + self.meta.reflect( + bind=self.con, only=[table_name], schema=schema, views=True + ) + with self.run_transaction(): + self.get_table(table_name, schema).drop(bind=self.con) + self.meta.clear() + + def _create_sql_schema( + self, + frame: DataFrame, + table_name: str, + keys: list[str] | None = None, + dtype: DtypeArg | None = None, + schema: str | None = None, + ): + table = SQLTable( + table_name, + self, + frame=frame, + index=False, + keys=keys, + dtype=dtype, + schema=schema, + ) + return str(table.sql_schema()) + + +# ---- SQL without SQLAlchemy --- +# sqlite-specific sql strings and handler class +# dictionary used for readability purposes +_SQL_TYPES = { + "string": "TEXT", + "floating": "REAL", + "integer": "INTEGER", + "datetime": "TIMESTAMP", + "date": "DATE", + "time": "TIME", + "boolean": "INTEGER", +} + + +def _get_unicode_name(name: object): + try: + uname = str(name).encode("utf-8", "strict").decode("utf-8") + except UnicodeError as err: + raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err + return uname + + +def _get_valid_sqlite_name(name: object): + # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\ + # -for-sqlite-table-column-names-in-python + # Ensure the string can be encoded as UTF-8. + # Ensure the string does not include any NUL characters. + # Replace all " with "". + # Wrap the entire thing in double quotes. + + uname = _get_unicode_name(name) + if not len(uname): + raise ValueError("Empty table or column name specified") + + nul_index = uname.find("\x00") + if nul_index >= 0: + raise ValueError("SQLite identifier cannot contain NULs") + return '"' + uname.replace('"', '""') + '"' + + +class SQLiteTable(SQLTable): + """ + Patch the SQLTable for fallback support. + Instead of a table variable just use the Create Table statement. + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self._register_date_adapters() + + def _register_date_adapters(self) -> None: + # GH 8341 + # register an adapter callable for datetime.time object + import sqlite3 + + # this will transform time(12,34,56,789) into '12:34:56.000789' + # (this is what sqlalchemy does) + def _adapt_time(t) -> str: + # This is faster than strftime + return f"{t.hour:02d}:{t.minute:02d}:{t.second:02d}.{t.microsecond:06d}" + + # Also register adapters for date/datetime and co + # xref https://docs.python.org/3.12/library/sqlite3.html#adapter-and-converter-recipes + # Python 3.12+ doesn't auto-register adapters for us anymore + + adapt_date_iso = lambda val: val.isoformat() + adapt_datetime_iso = lambda val: val.isoformat(" ") + + sqlite3.register_adapter(time, _adapt_time) + + sqlite3.register_adapter(date, adapt_date_iso) + sqlite3.register_adapter(datetime, adapt_datetime_iso) + + convert_date = lambda val: date.fromisoformat(val.decode()) + convert_timestamp = lambda val: datetime.fromisoformat(val.decode()) + + sqlite3.register_converter("date", convert_date) + sqlite3.register_converter("timestamp", convert_timestamp) + + def sql_schema(self) -> str: + return str(";\n".join(self.table)) + + def _execute_create(self) -> None: + with self.pd_sql.run_transaction() as conn: + for stmt in self.table: + conn.execute(stmt) + + def insert_statement(self, *, num_rows: int) -> str: + names = list(map(str, self.frame.columns)) + wld = "?" # wildcard char + escape = _get_valid_sqlite_name + + if self.index is not None: + for idx in self.index[::-1]: + names.insert(0, idx) + + bracketed_names = [escape(column) for column in names] + col_names = ",".join(bracketed_names) + + row_wildcards = ",".join([wld] * len(names)) + wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)]) + insert_statement = ( + f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}" + ) + return insert_statement + + def _execute_insert(self, conn, keys, data_iter) -> int: + data_list = list(data_iter) + conn.executemany(self.insert_statement(num_rows=1), data_list) + return conn.rowcount + + def _execute_insert_multi(self, conn, keys, data_iter) -> int: + data_list = list(data_iter) + flattened_data = [x for row in data_list for x in row] + conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data) + return conn.rowcount + + def _create_table_setup(self): + """ + Return a list of SQL statements that creates a table reflecting the + structure of a DataFrame. The first entry will be a CREATE TABLE + statement while the rest will be CREATE INDEX statements. + """ + column_names_and_types = self._get_column_names_and_types(self._sql_type_name) + escape = _get_valid_sqlite_name + + create_tbl_stmts = [ + escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types + ] + + if self.keys is not None and len(self.keys): + if not is_list_like(self.keys): + keys = [self.keys] + else: + keys = self.keys + cnames_br = ", ".join([escape(c) for c in keys]) + create_tbl_stmts.append( + f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})" + ) + if self.schema: + schema_name = self.schema + "." + else: + schema_name = "" + create_stmts = [ + "CREATE TABLE " + + schema_name + + escape(self.name) + + " (\n" + + ",\n ".join(create_tbl_stmts) + + "\n)" + ] + + ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index] + if len(ix_cols): + cnames = "_".join(ix_cols) + cnames_br = ",".join([escape(c) for c in ix_cols]) + create_stmts.append( + "CREATE INDEX " + + escape("ix_" + self.name + "_" + cnames) + + "ON " + + escape(self.name) + + " (" + + cnames_br + + ")" + ) + + return create_stmts + + def _sql_type_name(self, col): + dtype: DtypeArg = self.dtype or {} + if is_dict_like(dtype): + dtype = cast(dict, dtype) + if col.name in dtype: + return dtype[col.name] + + # Infer type of column, while ignoring missing values. + # Needed for inserting typed data containing NULLs, GH 8778. + col_type = lib.infer_dtype(col, skipna=True) + + if col_type == "timedelta64": + warnings.warn( + "the 'timedelta' type is not supported, and will be " + "written as integer values (ns frequency) to the database.", + UserWarning, + stacklevel=find_stack_level(), + ) + col_type = "integer" + + elif col_type == "datetime64": + col_type = "datetime" + + elif col_type == "empty": + col_type = "string" + + elif col_type == "complex": + raise ValueError("Complex datatypes not supported") + + if col_type not in _SQL_TYPES: + col_type = "string" + + return _SQL_TYPES[col_type] + + +class SQLiteDatabase(PandasSQL): + """ + Version of SQLDatabase to support SQLite connections (fallback without + SQLAlchemy). This should only be used internally. + + Parameters + ---------- + con : sqlite connection object + + """ + + def __init__(self, con) -> None: + self.con = con + + @contextmanager + def run_transaction(self): + cur = self.con.cursor() + try: + yield cur + self.con.commit() + except Exception: + self.con.rollback() + raise + finally: + cur.close() + + def execute(self, sql: str | Select | TextClause, params=None): + if not isinstance(sql, str): + raise TypeError("Query must be a string unless using sqlalchemy.") + args = [] if params is None else [params] + cur = self.con.cursor() + try: + cur.execute(sql, *args) + return cur + except Exception as exc: + try: + self.con.rollback() + except Exception as inner_exc: # pragma: no cover + ex = DatabaseError( + f"Execution failed on sql: {sql}\n{exc}\nunable to rollback" + ) + raise ex from inner_exc + + ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}") + raise ex from exc + + @staticmethod + def _query_iterator( + cursor, + chunksize: int, + columns, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ): + """Return generator through chunked result set""" + has_read_data = False + while True: + data = cursor.fetchmany(chunksize) + if type(data) == tuple: + data = list(data) + if not data: + cursor.close() + if not has_read_data: + result = DataFrame.from_records( + [], columns=columns, coerce_float=coerce_float + ) + if dtype: + result = result.astype(dtype) + yield result + break + + has_read_data = True + yield _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + + def read_query( + self, + sql, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + params=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + cursor = self.execute(sql, params) + columns = [col_desc[0] for col_desc in cursor.description] + + if chunksize is not None: + return self._query_iterator( + cursor, + chunksize, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + else: + data = self._fetchall_as_list(cursor) + cursor.close() + + frame = _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + return frame + + def _fetchall_as_list(self, cur): + result = cur.fetchall() + if not isinstance(result, list): + result = list(result) + return result + + def to_sql( + self, + frame, + name: str, + if_exists: str = "fail", + index: bool = True, + index_label=None, + schema=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, + ) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame: DataFrame + name: string + Name of SQL table. + if_exists: {'fail', 'replace', 'append'}, default 'fail' + fail: If table exists, do nothing. + replace: If table exists, drop it, recreate it, and insert data. + append: If table exists, insert data. Create if it does not exist. + index : bool, default True + Write DataFrame index as a column + index_label : string or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + schema : string, default None + Ignored parameter included for compatibility with SQLAlchemy + version of ``to_sql``. + chunksize : int, default None + If not None, then rows will be written in batches of this + size at a time. If None, all rows will be written at once. + dtype : single type or dict of column name to SQL type, default None + Optional specifying the datatype for columns. The SQL type should + be a string. If all columns are of the same type, one single value + can be used. + method : {None, 'multi', callable}, default None + Controls the SQL insertion clause used: + + * None : Uses standard SQL ``INSERT`` clause (one per row). + * 'multi': Pass multiple values in a single ``INSERT`` clause. + * callable with signature ``(pd_table, conn, keys, data_iter)``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method `. + """ + if dtype: + if not is_dict_like(dtype): + # error: Value expression in dictionary comprehension has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]"; expected type "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" + dtype = {col_name: dtype for col_name in frame} # type: ignore[misc] + else: + dtype = cast(dict, dtype) + + for col, my_type in dtype.items(): + if not isinstance(my_type, str): + raise ValueError(f"{col} ({my_type}) not a string") + + table = SQLiteTable( + name, + self, + frame=frame, + index=index, + if_exists=if_exists, + index_label=index_label, + dtype=dtype, + ) + table.create() + return table.insert(chunksize, method) + + def has_table(self, name: str, schema: str | None = None) -> bool: + wld = "?" + query = f""" + SELECT + name + FROM + sqlite_master + WHERE + type IN ('table', 'view') + AND name={wld}; + """ + + return len(self.execute(query, [name]).fetchall()) > 0 + + def get_table(self, table_name: str, schema: str | None = None) -> None: + return None # not supported in fallback mode + + def drop_table(self, name: str, schema: str | None = None) -> None: + drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}" + self.execute(drop_sql) + + def _create_sql_schema( + self, + frame, + table_name: str, + keys=None, + dtype: DtypeArg | None = None, + schema: str | None = None, + ): + table = SQLiteTable( + table_name, + self, + frame=frame, + index=False, + keys=keys, + dtype=dtype, + schema=schema, + ) + return str(table.sql_schema()) + + +def get_schema( + frame, + name: str, + keys=None, + con=None, + dtype: DtypeArg | None = None, + schema: str | None = None, +) -> str: + """ + Get the SQL db table schema for the given frame. + + Parameters + ---------- + frame : DataFrame + name : str + name of SQL table + keys : string or sequence, default: None + columns to use a primary key + con: an open SQL database connection object or a SQLAlchemy connectable + Using SQLAlchemy makes it possible to use any DB supported by that + library, default: None + If a DBAPI2 object, only sqlite3 is supported. + dtype : dict of column name to SQL type, default None + Optional specifying the datatype for columns. The SQL type should + be a SQLAlchemy type, or a string for sqlite3 fallback connection. + schema: str, default: None + Optional specifying the schema to be used in creating the table. + + .. versionadded:: 1.2.0 + """ + with pandasSQL_builder(con=con) as pandas_sql: + return pandas_sql._create_sql_schema( + frame, name, keys=keys, dtype=dtype, schema=schema + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/stata.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/stata.py new file mode 100644 index 00000000..0a02da09 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/stata.py @@ -0,0 +1,3799 @@ +""" +Module contains tools for processing Stata files into DataFrames + +The StataReader below was originally written by Joe Presbrey as part of PyDTA. +It has been extended and improved by Skipper Seabold from the Statsmodels +project who also developed the StataWriter and was finally added to pandas in +a once again improved version. + +You can find more information on http://presbrey.mit.edu/PyDTA and +https://www.statsmodels.org/devel/ +""" +from __future__ import annotations + +from collections import abc +from datetime import ( + datetime, + timedelta, +) +from io import BytesIO +import os +import struct +import sys +from typing import ( + IO, + TYPE_CHECKING, + Any, + AnyStr, + Callable, + Final, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.lib import infer_dtype +from pandas._libs.writers import max_len_string_array +from pandas.errors import ( + CategoricalConversionWarning, + InvalidColumnName, + PossiblePrecisionLoss, + ValueLabelTypeMismatch, +) +from pandas.util._decorators import ( + Appender, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_object, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas import ( + Categorical, + DatetimeIndex, + NaT, + Timestamp, + isna, + to_datetime, + to_timedelta, +) +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.arrays.integer import IntegerDtype +from pandas.core.frame import DataFrame +from pandas.core.indexes.base import Index +from pandas.core.series import Series +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + from types import TracebackType + from typing import Literal + + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + +_version_error = ( + "Version of given Stata file is {version}. pandas supports importing " + "versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), " + "114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16)," + "and 119 (Stata 15/16, over 32,767 variables)." +) + +_statafile_processing_params1 = """\ +convert_dates : bool, default True + Convert date variables to DataFrame time values. +convert_categoricals : bool, default True + Read value labels and convert columns to Categorical/Factor variables.""" + +_statafile_processing_params2 = """\ +index_col : str, optional + Column to set as index. +convert_missing : bool, default False + Flag indicating whether to convert missing values to their Stata + representations. If False, missing values are replaced with nan. + If True, columns containing missing values are returned with + object data types and missing values are represented by + StataMissingValue objects. +preserve_dtypes : bool, default True + Preserve Stata datatypes. If False, numeric data are upcast to pandas + default types for foreign data (float64 or int64). +columns : list or None + Columns to retain. Columns will be returned in the given order. None + returns all columns. +order_categoricals : bool, default True + Flag indicating whether converted categorical data are ordered.""" + +_chunksize_params = """\ +chunksize : int, default None + Return StataReader object for iterations, returns chunks with + given number of lines.""" + +_iterator_params = """\ +iterator : bool, default False + Return StataReader object.""" + +_reader_notes = """\ +Notes +----- +Categorical variables read through an iterator may not have the same +categories and dtype. This occurs when a variable stored in a DTA +file is associated to an incomplete set of value labels that only +label a strict subset of the values.""" + +_read_stata_doc = f""" +Read Stata file into DataFrame. + +Parameters +---------- +filepath_or_buffer : str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: ``file://localhost/path/to/table.dta``. + + If you want to pass in a path object, pandas accepts any ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, + such as a file handle (e.g. via builtin ``open`` function) + or ``StringIO``. +{_statafile_processing_params1} +{_statafile_processing_params2} +{_chunksize_params} +{_iterator_params} +{_shared_docs["decompression_options"] % "filepath_or_buffer"} +{_shared_docs["storage_options"]} + +Returns +------- +DataFrame or pandas.api.typing.StataReader + +See Also +-------- +io.stata.StataReader : Low-level reader for Stata data files. +DataFrame.to_stata: Export Stata data files. + +{_reader_notes} + +Examples +-------- + +Creating a dummy stata for this example + +>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', 'parrot'], +... 'speed': [350, 18, 361, 15]}}) # doctest: +SKIP +>>> df.to_stata('animals.dta') # doctest: +SKIP + +Read a Stata dta file: + +>>> df = pd.read_stata('animals.dta') # doctest: +SKIP + +Read a Stata dta file in 10,000 line chunks: + +>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8") # doctest: +SKIP +>>> df = pd.DataFrame(values, columns=["i"]) # doctest: +SKIP +>>> df.to_stata('filename.dta') # doctest: +SKIP + +>>> with pd.read_stata('filename.dta', chunksize=10000) as itr: # doctest: +SKIP +>>> for chunk in itr: +... # Operate on a single chunk, e.g., chunk.mean() +... pass # doctest: +SKIP +""" + +_read_method_doc = f"""\ +Reads observations from Stata file, converting them into a dataframe + +Parameters +---------- +nrows : int + Number of lines to read from data file, if None read whole file. +{_statafile_processing_params1} +{_statafile_processing_params2} + +Returns +------- +DataFrame +""" + +_stata_reader_doc = f"""\ +Class for reading Stata dta files. + +Parameters +---------- +path_or_buf : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or object + implementing a binary read() functions. +{_statafile_processing_params1} +{_statafile_processing_params2} +{_chunksize_params} +{_shared_docs["decompression_options"]} +{_shared_docs["storage_options"]} + +{_reader_notes} +""" + + +_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"] + + +stata_epoch: Final = datetime(1960, 1, 1) + + +# TODO: Add typing. As of January 2020 it is not possible to type this function since +# mypy doesn't understand that a Series and an int can be combined using mathematical +# operations. (+, -). +def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series: + """ + Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime + + Parameters + ---------- + dates : Series + The Stata Internal Format date to convert to datetime according to fmt + fmt : str + The format to convert to. Can be, tc, td, tw, tm, tq, th, ty + Returns + + Returns + ------- + converted : Series + The converted dates + + Examples + -------- + >>> dates = pd.Series([52]) + >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw") + 0 1961-01-01 + dtype: datetime64[ns] + + Notes + ----- + datetime/c - tc + milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day + datetime/C - tC - NOT IMPLEMENTED + milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds + date - td + days since 01jan1960 (01jan1960 = 0) + weekly date - tw + weeks since 1960w1 + This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. + The datetime value is the start of the week in terms of days in the + year, not ISO calendar weeks. + monthly date - tm + months since 1960m1 + quarterly date - tq + quarters since 1960q1 + half-yearly date - th + half-years since 1960h1 yearly + date - ty + years since 0000 + """ + MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year + MAX_DAY_DELTA = (Timestamp.max - datetime(1960, 1, 1)).days + MIN_DAY_DELTA = (Timestamp.min - datetime(1960, 1, 1)).days + MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000 + MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000 + + def convert_year_month_safe(year, month) -> Series: + """ + Convert year and month to datetimes, using pandas vectorized versions + when the date range falls within the range supported by pandas. + Otherwise it falls back to a slower but more robust method + using datetime. + """ + if year.max() < MAX_YEAR and year.min() > MIN_YEAR: + return to_datetime(100 * year + month, format="%Y%m") + else: + index = getattr(year, "index", None) + return Series([datetime(y, m, 1) for y, m in zip(year, month)], index=index) + + def convert_year_days_safe(year, days) -> Series: + """ + Converts year (e.g. 1999) and days since the start of the year to a + datetime or datetime64 Series + """ + if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR: + return to_datetime(year, format="%Y") + to_timedelta(days, unit="d") + else: + index = getattr(year, "index", None) + value = [ + datetime(y, 1, 1) + timedelta(days=int(d)) for y, d in zip(year, days) + ] + return Series(value, index=index) + + def convert_delta_safe(base, deltas, unit) -> Series: + """ + Convert base dates and deltas to datetimes, using pandas vectorized + versions if the deltas satisfy restrictions required to be expressed + as dates in pandas. + """ + index = getattr(deltas, "index", None) + if unit == "d": + if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA: + values = [base + timedelta(days=int(d)) for d in deltas] + return Series(values, index=index) + elif unit == "ms": + if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA: + values = [ + base + timedelta(microseconds=(int(d) * 1000)) for d in deltas + ] + return Series(values, index=index) + else: + raise ValueError("format not understood") + base = to_datetime(base) + deltas = to_timedelta(deltas, unit=unit) + return base + deltas + + # TODO(non-nano): If/when pandas supports more than datetime64[ns], this + # should be improved to use correct range, e.g. datetime[Y] for yearly + bad_locs = np.isnan(dates) + has_bad_values = False + if bad_locs.any(): + has_bad_values = True + # reset cache to avoid SettingWithCopy checks (we own the DataFrame and the + # `dates` Series is used to overwrite itself in the DataFramae) + dates._reset_cacher() + dates[bad_locs] = 1.0 # Replace with NaT + dates = dates.astype(np.int64) + + if fmt.startswith(("%tc", "tc")): # Delta ms relative to base + base = stata_epoch + ms = dates + conv_dates = convert_delta_safe(base, ms, "ms") + elif fmt.startswith(("%tC", "tC")): + warnings.warn( + "Encountered %tC format. Leaving in Stata Internal Format.", + stacklevel=find_stack_level(), + ) + conv_dates = Series(dates, dtype=object) + if has_bad_values: + conv_dates[bad_locs] = NaT + return conv_dates + # Delta days relative to base + elif fmt.startswith(("%td", "td", "%d", "d")): + base = stata_epoch + days = dates + conv_dates = convert_delta_safe(base, days, "d") + # does not count leap days - 7 days is a week. + # 52nd week may have more than 7 days + elif fmt.startswith(("%tw", "tw")): + year = stata_epoch.year + dates // 52 + days = (dates % 52) * 7 + conv_dates = convert_year_days_safe(year, days) + elif fmt.startswith(("%tm", "tm")): # Delta months relative to base + year = stata_epoch.year + dates // 12 + month = (dates % 12) + 1 + conv_dates = convert_year_month_safe(year, month) + elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base + year = stata_epoch.year + dates // 4 + quarter_month = (dates % 4) * 3 + 1 + conv_dates = convert_year_month_safe(year, quarter_month) + elif fmt.startswith(("%th", "th")): # Delta half-years relative to base + year = stata_epoch.year + dates // 2 + month = (dates % 2) * 6 + 1 + conv_dates = convert_year_month_safe(year, month) + elif fmt.startswith(("%ty", "ty")): # Years -- not delta + year = dates + first_month = np.ones_like(dates) + conv_dates = convert_year_month_safe(year, first_month) + else: + raise ValueError(f"Date fmt {fmt} not understood") + + if has_bad_values: # Restore NaT for bad values + conv_dates[bad_locs] = NaT + + return conv_dates + + +def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series: + """ + Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime + + Parameters + ---------- + dates : Series + Series or array containing datetime or datetime64[ns] to + convert to the Stata Internal Format given by fmt + fmt : str + The format to convert to. Can be, tc, td, tw, tm, tq, th, ty + """ + index = dates.index + NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000 + US_PER_DAY = NS_PER_DAY / 1000 + + def parse_dates_safe( + dates: Series, delta: bool = False, year: bool = False, days: bool = False + ): + d = {} + if lib.is_np_dtype(dates.dtype, "M"): + if delta: + time_delta = dates - Timestamp(stata_epoch).as_unit("ns") + d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds + if days or year: + date_index = DatetimeIndex(dates) + d["year"] = date_index._data.year + d["month"] = date_index._data.month + if days: + days_in_ns = dates.view(np.int64) - to_datetime( + d["year"], format="%Y" + ).view(np.int64) + d["days"] = days_in_ns // NS_PER_DAY + + elif infer_dtype(dates, skipna=False) == "datetime": + if delta: + delta = dates._values - stata_epoch + + def f(x: timedelta) -> float: + return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds + + v = np.vectorize(f) + d["delta"] = v(delta) + if year: + year_month = dates.apply(lambda x: 100 * x.year + x.month) + d["year"] = year_month._values // 100 + d["month"] = year_month._values - d["year"] * 100 + if days: + + def g(x: datetime) -> int: + return (x - datetime(x.year, 1, 1)).days + + v = np.vectorize(g) + d["days"] = v(dates) + else: + raise ValueError( + "Columns containing dates must contain either " + "datetime64, datetime or null values." + ) + + return DataFrame(d, index=index) + + bad_loc = isna(dates) + index = dates.index + if bad_loc.any(): + dates = Series(dates) + if lib.is_np_dtype(dates.dtype, "M"): + dates[bad_loc] = to_datetime(stata_epoch) + else: + dates[bad_loc] = stata_epoch + + if fmt in ["%tc", "tc"]: + d = parse_dates_safe(dates, delta=True) + conv_dates = d.delta / 1000 + elif fmt in ["%tC", "tC"]: + warnings.warn( + "Stata Internal Format tC not supported.", + stacklevel=find_stack_level(), + ) + conv_dates = dates + elif fmt in ["%td", "td"]: + d = parse_dates_safe(dates, delta=True) + conv_dates = d.delta // US_PER_DAY + elif fmt in ["%tw", "tw"]: + d = parse_dates_safe(dates, year=True, days=True) + conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7 + elif fmt in ["%tm", "tm"]: + d = parse_dates_safe(dates, year=True) + conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1 + elif fmt in ["%tq", "tq"]: + d = parse_dates_safe(dates, year=True) + conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 + elif fmt in ["%th", "th"]: + d = parse_dates_safe(dates, year=True) + conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int) + elif fmt in ["%ty", "ty"]: + d = parse_dates_safe(dates, year=True) + conv_dates = d.year + else: + raise ValueError(f"Format {fmt} is not a known Stata date format") + + conv_dates = Series(conv_dates, dtype=np.float64) + missing_value = struct.unpack(" DataFrame: + """ + Checks the dtypes of the columns of a pandas DataFrame for + compatibility with the data types and ranges supported by Stata, and + converts if necessary. + + Parameters + ---------- + data : DataFrame + The DataFrame to check and convert + + Notes + ----- + Numeric columns in Stata must be one of int8, int16, int32, float32 or + float64, with some additional value restrictions. int8 and int16 columns + are checked for violations of the value restrictions and upcast if needed. + int64 data is not usable in Stata, and so it is downcast to int32 whenever + the value are in the int32 range, and sidecast to float64 when larger than + this range. If the int64 values are outside of the range of those + perfectly representable as float64 values, a warning is raised. + + bool columns are cast to int8. uint columns are converted to int of the + same size if there is no loss in precision, otherwise are upcast to a + larger type. uint64 is currently not supported since it is concerted to + object in a DataFrame. + """ + ws = "" + # original, if small, if large + conversion_data: tuple[ + tuple[type, type, type], + tuple[type, type, type], + tuple[type, type, type], + tuple[type, type, type], + tuple[type, type, type], + ] = ( + (np.bool_, np.int8, np.int8), + (np.uint8, np.int8, np.int16), + (np.uint16, np.int16, np.int32), + (np.uint32, np.int32, np.int64), + (np.uint64, np.int64, np.float64), + ) + + float32_max = struct.unpack("= 2**53: + ws = precision_loss_doc.format("uint64", "float64") + + data[col] = data[col].astype(dtype) + + # Check values and upcast if necessary + + if dtype == np.int8 and not empty_df: + if data[col].max() > 100 or data[col].min() < -127: + data[col] = data[col].astype(np.int16) + elif dtype == np.int16 and not empty_df: + if data[col].max() > 32740 or data[col].min() < -32767: + data[col] = data[col].astype(np.int32) + elif dtype == np.int64: + if empty_df or ( + data[col].max() <= 2147483620 and data[col].min() >= -2147483647 + ): + data[col] = data[col].astype(np.int32) + else: + data[col] = data[col].astype(np.float64) + if data[col].max() >= 2**53 or data[col].min() <= -(2**53): + ws = precision_loss_doc.format("int64", "float64") + elif dtype in (np.float32, np.float64): + if np.isinf(data[col]).any(): + raise ValueError( + f"Column {col} contains infinity or -infinity" + "which is outside the range supported by Stata." + ) + value = data[col].max() + if dtype == np.float32 and value > float32_max: + data[col] = data[col].astype(np.float64) + elif dtype == np.float64: + if value > float64_max: + raise ValueError( + f"Column {col} has a maximum value ({value}) outside the range " + f"supported by Stata ({float64_max})" + ) + if is_nullable_int: + if orig_missing.any(): + # Replace missing by Stata sentinel value + sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name] + data.loc[orig_missing, col] = sentinel + if ws: + warnings.warn( + ws, + PossiblePrecisionLoss, + stacklevel=find_stack_level(), + ) + + return data + + +class StataValueLabel: + """ + Parse a categorical column and prepare formatted output + + Parameters + ---------- + catarray : Series + Categorical Series to encode + encoding : {"latin-1", "utf-8"} + Encoding to use for value labels. + """ + + def __init__( + self, catarray: Series, encoding: Literal["latin-1", "utf-8"] = "latin-1" + ) -> None: + if encoding not in ("latin-1", "utf-8"): + raise ValueError("Only latin-1 and utf-8 are supported.") + self.labname = catarray.name + self._encoding = encoding + categories = catarray.cat.categories + self.value_labels: list[tuple[float, str]] = list( + zip(np.arange(len(categories)), categories) + ) + self.value_labels.sort(key=lambda x: x[0]) + + self._prepare_value_labels() + + def _prepare_value_labels(self): + """Encode value labels.""" + + self.text_len = 0 + self.txt: list[bytes] = [] + self.n = 0 + # Offsets (length of categories), converted to int32 + self.off = np.array([], dtype=np.int32) + # Values, converted to int32 + self.val = np.array([], dtype=np.int32) + self.len = 0 + + # Compute lengths and setup lists of offsets and labels + offsets: list[int] = [] + values: list[float] = [] + for vl in self.value_labels: + category: str | bytes = vl[1] + if not isinstance(category, str): + category = str(category) + warnings.warn( + value_label_mismatch_doc.format(self.labname), + ValueLabelTypeMismatch, + stacklevel=find_stack_level(), + ) + category = category.encode(self._encoding) + offsets.append(self.text_len) + self.text_len += len(category) + 1 # +1 for the padding + values.append(vl[0]) + self.txt.append(category) + self.n += 1 + + if self.text_len > 32000: + raise ValueError( + "Stata value labels for a single variable must " + "have a combined length less than 32,000 characters." + ) + + # Ensure int32 + self.off = np.array(offsets, dtype=np.int32) + self.val = np.array(values, dtype=np.int32) + + # Total length + self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len + + def generate_value_label(self, byteorder: str) -> bytes: + """ + Generate the binary representation of the value labels. + + Parameters + ---------- + byteorder : str + Byte order of the output + + Returns + ------- + value_label : bytes + Bytes containing the formatted value label + """ + encoding = self._encoding + bio = BytesIO() + null_byte = b"\x00" + + # len + bio.write(struct.pack(byteorder + "i", self.len)) + + # labname + labname = str(self.labname)[:32].encode(encoding) + lab_len = 32 if encoding not in ("utf-8", "utf8") else 128 + labname = _pad_bytes(labname, lab_len + 1) + bio.write(labname) + + # padding - 3 bytes + for i in range(3): + bio.write(struct.pack("c", null_byte)) + + # value_label_table + # n - int32 + bio.write(struct.pack(byteorder + "i", self.n)) + + # textlen - int32 + bio.write(struct.pack(byteorder + "i", self.text_len)) + + # off - int32 array (n elements) + for offset in self.off: + bio.write(struct.pack(byteorder + "i", offset)) + + # val - int32 array (n elements) + for value in self.val: + bio.write(struct.pack(byteorder + "i", value)) + + # txt - Text labels, null terminated + for text in self.txt: + bio.write(text + null_byte) + + return bio.getvalue() + + +class StataNonCatValueLabel(StataValueLabel): + """ + Prepare formatted version of value labels + + Parameters + ---------- + labname : str + Value label name + value_labels: Dictionary + Mapping of values to labels + encoding : {"latin-1", "utf-8"} + Encoding to use for value labels. + """ + + def __init__( + self, + labname: str, + value_labels: dict[float, str], + encoding: Literal["latin-1", "utf-8"] = "latin-1", + ) -> None: + if encoding not in ("latin-1", "utf-8"): + raise ValueError("Only latin-1 and utf-8 are supported.") + + self.labname = labname + self._encoding = encoding + self.value_labels: list[tuple[float, str]] = sorted( + value_labels.items(), key=lambda x: x[0] + ) + self._prepare_value_labels() + + +class StataMissingValue: + """ + An observation's missing value. + + Parameters + ---------- + value : {int, float} + The Stata missing value code + + Notes + ----- + More information: + + Integer missing values make the code '.', '.a', ..., '.z' to the ranges + 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ... + 2147483647 (for int32). Missing values for floating point data types are + more complex but the pattern is simple to discern from the following table. + + np.float32 missing values (float in Stata) + 0000007f . + 0008007f .a + 0010007f .b + ... + 00c0007f .x + 00c8007f .y + 00d0007f .z + + np.float64 missing values (double in Stata) + 000000000000e07f . + 000000000001e07f .a + 000000000002e07f .b + ... + 000000000018e07f .x + 000000000019e07f .y + 00000000001ae07f .z + """ + + # Construct a dictionary of missing values + MISSING_VALUES: dict[float, str] = {} + bases: Final = (101, 32741, 2147483621) + for b in bases: + # Conversion to long to avoid hash issues on 32 bit platforms #8968 + MISSING_VALUES[b] = "." + for i in range(1, 27): + MISSING_VALUES[i + b] = "." + chr(96 + i) + + float32_base: bytes = b"\x00\x00\x00\x7f" + increment_32: int = struct.unpack(" 0: + MISSING_VALUES[key] += chr(96 + i) + int_value = struct.unpack(" 0: + MISSING_VALUES[key] += chr(96 + i) + int_value = struct.unpack("q", struct.pack(" None: + self._value = value + # Conversion to int to avoid hash issues on 32 bit platforms #8968 + value = int(value) if value < 2147483648 else float(value) + self._str = self.MISSING_VALUES[value] + + @property + def string(self) -> str: + """ + The Stata representation of the missing value: '.', '.a'..'.z' + + Returns + ------- + str + The representation of the missing value. + """ + return self._str + + @property + def value(self) -> float: + """ + The binary representation of the missing value. + + Returns + ------- + {int, float} + The binary representation of the missing value. + """ + return self._value + + def __str__(self) -> str: + return self.string + + def __repr__(self) -> str: + return f"{type(self)}({self})" + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, type(self)) + and self.string == other.string + and self.value == other.value + ) + + @classmethod + def get_base_missing_value(cls, dtype: np.dtype) -> float: + if dtype.type is np.int8: + value = cls.BASE_MISSING_VALUES["int8"] + elif dtype.type is np.int16: + value = cls.BASE_MISSING_VALUES["int16"] + elif dtype.type is np.int32: + value = cls.BASE_MISSING_VALUES["int32"] + elif dtype.type is np.float32: + value = cls.BASE_MISSING_VALUES["float32"] + elif dtype.type is np.float64: + value = cls.BASE_MISSING_VALUES["float64"] + else: + raise ValueError("Unsupported dtype") + return value + + +class StataParser: + def __init__(self) -> None: + # type code. + # -------------------- + # str1 1 = 0x01 + # str2 2 = 0x02 + # ... + # str244 244 = 0xf4 + # byte 251 = 0xfb (sic) + # int 252 = 0xfc + # long 253 = 0xfd + # float 254 = 0xfe + # double 255 = 0xff + # -------------------- + # NOTE: the byte type seems to be reserved for categorical variables + # with a label, but the underlying variable is -127 to 100 + # we're going to drop the label and cast to int + self.DTYPE_MAP = dict( + [(i, np.dtype(f"S{i}")) for i in range(1, 245)] + + [ + (251, np.dtype(np.int8)), + (252, np.dtype(np.int16)), + (253, np.dtype(np.int32)), + (254, np.dtype(np.float32)), + (255, np.dtype(np.float64)), + ] + ) + self.DTYPE_MAP_XML: dict[int, np.dtype] = { + 32768: np.dtype(np.uint8), # Keys to GSO + 65526: np.dtype(np.float64), + 65527: np.dtype(np.float32), + 65528: np.dtype(np.int32), + 65529: np.dtype(np.int16), + 65530: np.dtype(np.int8), + } + self.TYPE_MAP = list(tuple(range(251)) + tuple("bhlfd")) + self.TYPE_MAP_XML = { + # Not really a Q, unclear how to handle byteswap + 32768: "Q", + 65526: "d", + 65527: "f", + 65528: "l", + 65529: "h", + 65530: "b", + } + # NOTE: technically, some of these are wrong. there are more numbers + # that can be represented. it's the 27 ABOVE and BELOW the max listed + # numeric data type in [U] 12.2.2 of the 11.2 manual + float32_min = b"\xff\xff\xff\xfe" + float32_max = b"\xff\xff\xff\x7e" + float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff" + float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f" + self.VALID_RANGE = { + "b": (-127, 100), + "h": (-32767, 32740), + "l": (-2147483647, 2147483620), + "f": ( + np.float32(struct.unpack(" None: + super().__init__() + self._col_sizes: list[int] = [] + + # Arguments to the reader (can be temporarily overridden in + # calls to read). + self._convert_dates = convert_dates + self._convert_categoricals = convert_categoricals + self._index_col = index_col + self._convert_missing = convert_missing + self._preserve_dtypes = preserve_dtypes + self._columns = columns + self._order_categoricals = order_categoricals + self._original_path_or_buf = path_or_buf + self._compression = compression + self._storage_options = storage_options + self._encoding = "" + self._chunksize = chunksize + self._using_iterator = False + self._entered = False + if self._chunksize is None: + self._chunksize = 1 + elif not isinstance(chunksize, int) or chunksize <= 0: + raise ValueError("chunksize must be a positive integer when set.") + + # State variables for the file + self._close_file: Callable[[], None] | None = None + self._has_string_data = False + self._missing_values = False + self._can_read_value_labels = False + self._column_selector_set = False + self._value_labels_read = False + self._data_read = False + self._dtype: np.dtype | None = None + self._lines_read = 0 + + self._native_byteorder = _set_endianness(sys.byteorder) + + def _ensure_open(self) -> None: + """ + Ensure the file has been opened and its header data read. + """ + if not hasattr(self, "_path_or_buf"): + self._open_file() + + def _open_file(self) -> None: + """ + Open the file (with compression options, etc.), and read header information. + """ + if not self._entered: + warnings.warn( + "StataReader is being used without using a context manager. " + "Using StataReader as a context manager is the only supported method.", + ResourceWarning, + stacklevel=find_stack_level(), + ) + handles = get_handle( + self._original_path_or_buf, + "rb", + storage_options=self._storage_options, + is_text=False, + compression=self._compression, + ) + if hasattr(handles.handle, "seekable") and handles.handle.seekable(): + # If the handle is directly seekable, use it without an extra copy. + self._path_or_buf = handles.handle + self._close_file = handles.close + else: + # Copy to memory, and ensure no encoding. + with handles: + self._path_or_buf = BytesIO(handles.handle.read()) + self._close_file = self._path_or_buf.close + + self._read_header() + self._setup_dtype() + + def __enter__(self) -> StataReader: + """enter context manager""" + self._entered = True + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + if self._close_file: + self._close_file() + + def close(self) -> None: + """Close the handle if its open. + + .. deprecated: 2.0.0 + + The close method is not part of the public API. + The only supported way to use StataReader is to use it as a context manager. + """ + warnings.warn( + "The StataReader.close() method is not part of the public API and " + "will be removed in a future version without notice. " + "Using StataReader as a context manager is the only supported method.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self._close_file: + self._close_file() + + def _set_encoding(self) -> None: + """ + Set string encoding which depends on file version + """ + if self._format_version < 118: + self._encoding = "latin-1" + else: + self._encoding = "utf-8" + + def _read_int8(self) -> int: + return struct.unpack("b", self._path_or_buf.read(1))[0] + + def _read_uint8(self) -> int: + return struct.unpack("B", self._path_or_buf.read(1))[0] + + def _read_uint16(self) -> int: + return struct.unpack(f"{self._byteorder}H", self._path_or_buf.read(2))[0] + + def _read_uint32(self) -> int: + return struct.unpack(f"{self._byteorder}I", self._path_or_buf.read(4))[0] + + def _read_uint64(self) -> int: + return struct.unpack(f"{self._byteorder}Q", self._path_or_buf.read(8))[0] + + def _read_int16(self) -> int: + return struct.unpack(f"{self._byteorder}h", self._path_or_buf.read(2))[0] + + def _read_int32(self) -> int: + return struct.unpack(f"{self._byteorder}i", self._path_or_buf.read(4))[0] + + def _read_int64(self) -> int: + return struct.unpack(f"{self._byteorder}q", self._path_or_buf.read(8))[0] + + def _read_char8(self) -> bytes: + return struct.unpack("c", self._path_or_buf.read(1))[0] + + def _read_int16_count(self, count: int) -> tuple[int, ...]: + return struct.unpack( + f"{self._byteorder}{'h' * count}", + self._path_or_buf.read(2 * count), + ) + + def _read_header(self) -> None: + first_char = self._read_char8() + if first_char == b"<": + self._read_new_header() + else: + self._read_old_header(first_char) + + self._has_string_data = len([x for x in self._typlist if type(x) is int]) > 0 + + # calculate size of a data record + self._col_sizes = [self._calcsize(typ) for typ in self._typlist] + + def _read_new_header(self) -> None: + # The first part of the header is common to 117 - 119. + self._path_or_buf.read(27) # stata_dta>
+ self._format_version = int(self._path_or_buf.read(3)) + if self._format_version not in [117, 118, 119]: + raise ValueError(_version_error.format(version=self._format_version)) + self._set_encoding() + self._path_or_buf.read(21) # + self._byteorder = ">" if self._path_or_buf.read(3) == b"MSF" else "<" + self._path_or_buf.read(15) # + self._nvar = ( + self._read_uint16() if self._format_version <= 118 else self._read_uint32() + ) + self._path_or_buf.read(7) # + + self._nobs = self._get_nobs() + self._path_or_buf.read(11) # + self._time_stamp = self._get_time_stamp() + self._path_or_buf.read(26) #
+ self._path_or_buf.read(8) # 0x0000000000000000 + self._path_or_buf.read(8) # position of + + self._seek_vartypes = self._read_int64() + 16 + self._seek_varnames = self._read_int64() + 10 + self._seek_sortlist = self._read_int64() + 10 + self._seek_formats = self._read_int64() + 9 + self._seek_value_label_names = self._read_int64() + 19 + + # Requires version-specific treatment + self._seek_variable_labels = self._get_seek_variable_labels() + + self._path_or_buf.read(8) # + self._data_location = self._read_int64() + 6 + self._seek_strls = self._read_int64() + 7 + self._seek_value_labels = self._read_int64() + 14 + + self._typlist, self._dtyplist = self._get_dtypes(self._seek_vartypes) + + self._path_or_buf.seek(self._seek_varnames) + self._varlist = self._get_varlist() + + self._path_or_buf.seek(self._seek_sortlist) + self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] + + self._path_or_buf.seek(self._seek_formats) + self._fmtlist = self._get_fmtlist() + + self._path_or_buf.seek(self._seek_value_label_names) + self._lbllist = self._get_lbllist() + + self._path_or_buf.seek(self._seek_variable_labels) + self._variable_labels = self._get_variable_labels() + + # Get data type information, works for versions 117-119. + def _get_dtypes( + self, seek_vartypes: int + ) -> tuple[list[int | str], list[str | np.dtype]]: + self._path_or_buf.seek(seek_vartypes) + raw_typlist = [self._read_uint16() for _ in range(self._nvar)] + + def f(typ: int) -> int | str: + if typ <= 2045: + return typ + try: + return self.TYPE_MAP_XML[typ] + except KeyError as err: + raise ValueError(f"cannot convert stata types [{typ}]") from err + + typlist = [f(x) for x in raw_typlist] + + def g(typ: int) -> str | np.dtype: + if typ <= 2045: + return str(typ) + try: + return self.DTYPE_MAP_XML[typ] + except KeyError as err: + raise ValueError(f"cannot convert stata dtype [{typ}]") from err + + dtyplist = [g(x) for x in raw_typlist] + + return typlist, dtyplist + + def _get_varlist(self) -> list[str]: + # 33 in order formats, 129 in formats 118 and 119 + b = 33 if self._format_version < 118 else 129 + return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] + + # Returns the format list + def _get_fmtlist(self) -> list[str]: + if self._format_version >= 118: + b = 57 + elif self._format_version > 113: + b = 49 + elif self._format_version > 104: + b = 12 + else: + b = 7 + + return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] + + # Returns the label list + def _get_lbllist(self) -> list[str]: + if self._format_version >= 118: + b = 129 + elif self._format_version > 108: + b = 33 + else: + b = 9 + return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] + + def _get_variable_labels(self) -> list[str]: + if self._format_version >= 118: + vlblist = [ + self._decode(self._path_or_buf.read(321)) for _ in range(self._nvar) + ] + elif self._format_version > 105: + vlblist = [ + self._decode(self._path_or_buf.read(81)) for _ in range(self._nvar) + ] + else: + vlblist = [ + self._decode(self._path_or_buf.read(32)) for _ in range(self._nvar) + ] + return vlblist + + def _get_nobs(self) -> int: + if self._format_version >= 118: + return self._read_uint64() + else: + return self._read_uint32() + + def _get_data_label(self) -> str: + if self._format_version >= 118: + strlen = self._read_uint16() + return self._decode(self._path_or_buf.read(strlen)) + elif self._format_version == 117: + strlen = self._read_int8() + return self._decode(self._path_or_buf.read(strlen)) + elif self._format_version > 105: + return self._decode(self._path_or_buf.read(81)) + else: + return self._decode(self._path_or_buf.read(32)) + + def _get_time_stamp(self) -> str: + if self._format_version >= 118: + strlen = self._read_int8() + return self._path_or_buf.read(strlen).decode("utf-8") + elif self._format_version == 117: + strlen = self._read_int8() + return self._decode(self._path_or_buf.read(strlen)) + elif self._format_version > 104: + return self._decode(self._path_or_buf.read(18)) + else: + raise ValueError() + + def _get_seek_variable_labels(self) -> int: + if self._format_version == 117: + self._path_or_buf.read(8) # , throw away + # Stata 117 data files do not follow the described format. This is + # a work around that uses the previous label, 33 bytes for each + # variable, 20 for the closing tag and 17 for the opening tag + return self._seek_value_label_names + (33 * self._nvar) + 20 + 17 + elif self._format_version >= 118: + return self._read_int64() + 17 + else: + raise ValueError() + + def _read_old_header(self, first_char: bytes) -> None: + self._format_version = int(first_char[0]) + if self._format_version not in [104, 105, 108, 111, 113, 114, 115]: + raise ValueError(_version_error.format(version=self._format_version)) + self._set_encoding() + self._byteorder = ">" if self._read_int8() == 0x1 else "<" + self._filetype = self._read_int8() + self._path_or_buf.read(1) # unused + + self._nvar = self._read_uint16() + self._nobs = self._get_nobs() + + self._data_label = self._get_data_label() + + self._time_stamp = self._get_time_stamp() + + # descriptors + if self._format_version > 108: + typlist = [int(c) for c in self._path_or_buf.read(self._nvar)] + else: + buf = self._path_or_buf.read(self._nvar) + typlistb = np.frombuffer(buf, dtype=np.uint8) + typlist = [] + for tp in typlistb: + if tp in self.OLD_TYPE_MAPPING: + typlist.append(self.OLD_TYPE_MAPPING[tp]) + else: + typlist.append(tp - 127) # bytes + + try: + self._typlist = [self.TYPE_MAP[typ] for typ in typlist] + except ValueError as err: + invalid_types = ",".join([str(x) for x in typlist]) + raise ValueError(f"cannot convert stata types [{invalid_types}]") from err + try: + self._dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] + except ValueError as err: + invalid_dtypes = ",".join([str(x) for x in typlist]) + raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err + + if self._format_version > 108: + self._varlist = [ + self._decode(self._path_or_buf.read(33)) for _ in range(self._nvar) + ] + else: + self._varlist = [ + self._decode(self._path_or_buf.read(9)) for _ in range(self._nvar) + ] + self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] + + self._fmtlist = self._get_fmtlist() + + self._lbllist = self._get_lbllist() + + self._variable_labels = self._get_variable_labels() + + # ignore expansion fields (Format 105 and later) + # When reading, read five bytes; the last four bytes now tell you + # the size of the next read, which you discard. You then continue + # like this until you read 5 bytes of zeros. + + if self._format_version > 104: + while True: + data_type = self._read_int8() + if self._format_version > 108: + data_len = self._read_int32() + else: + data_len = self._read_int16() + if data_type == 0: + break + self._path_or_buf.read(data_len) + + # necessary data to continue parsing + self._data_location = self._path_or_buf.tell() + + def _setup_dtype(self) -> np.dtype: + """Map between numpy and state dtypes""" + if self._dtype is not None: + return self._dtype + + dtypes = [] # Convert struct data types to numpy data type + for i, typ in enumerate(self._typlist): + if typ in self.NUMPY_TYPE_MAP: + typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP + dtypes.append((f"s{i}", f"{self._byteorder}{self.NUMPY_TYPE_MAP[typ]}")) + else: + dtypes.append((f"s{i}", f"S{typ}")) + self._dtype = np.dtype(dtypes) + + return self._dtype + + def _calcsize(self, fmt: int | str) -> int: + if isinstance(fmt, int): + return fmt + return struct.calcsize(self._byteorder + fmt) + + def _decode(self, s: bytes) -> str: + # have bytes not strings, so must decode + s = s.partition(b"\0")[0] + try: + return s.decode(self._encoding) + except UnicodeDecodeError: + # GH 25960, fallback to handle incorrect format produced when 117 + # files are converted to 118 files in Stata + encoding = self._encoding + msg = f""" +One or more strings in the dta file could not be decoded using {encoding}, and +so the fallback encoding of latin-1 is being used. This can happen when a file +has been incorrectly encoded by Stata or some other software. You should verify +the string values returned are correct.""" + warnings.warn( + msg, + UnicodeWarning, + stacklevel=find_stack_level(), + ) + return s.decode("latin-1") + + def _read_value_labels(self) -> None: + self._ensure_open() + if self._value_labels_read: + # Don't read twice + return + if self._format_version <= 108: + # Value labels are not supported in version 108 and earlier. + self._value_labels_read = True + self._value_label_dict: dict[str, dict[float, str]] = {} + return + + if self._format_version >= 117: + self._path_or_buf.seek(self._seek_value_labels) + else: + assert self._dtype is not None + offset = self._nobs * self._dtype.itemsize + self._path_or_buf.seek(self._data_location + offset) + + self._value_labels_read = True + self._value_label_dict = {} + + while True: + if self._format_version >= 117: + if self._path_or_buf.read(5) == b" + break # end of value label table + + slength = self._path_or_buf.read(4) + if not slength: + break # end of value label table (format < 117) + if self._format_version <= 117: + labname = self._decode(self._path_or_buf.read(33)) + else: + labname = self._decode(self._path_or_buf.read(129)) + self._path_or_buf.read(3) # padding + + n = self._read_uint32() + txtlen = self._read_uint32() + off = np.frombuffer( + self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n + ) + val = np.frombuffer( + self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n + ) + ii = np.argsort(off) + off = off[ii] + val = val[ii] + txt = self._path_or_buf.read(txtlen) + self._value_label_dict[labname] = {} + for i in range(n): + end = off[i + 1] if i < n - 1 else txtlen + self._value_label_dict[labname][val[i]] = self._decode( + txt[off[i] : end] + ) + if self._format_version >= 117: + self._path_or_buf.read(6) # + self._value_labels_read = True + + def _read_strls(self) -> None: + self._path_or_buf.seek(self._seek_strls) + # Wrap v_o in a string to allow uint64 values as keys on 32bit OS + self.GSO = {"0": ""} + while True: + if self._path_or_buf.read(3) != b"GSO": + break + + if self._format_version == 117: + v_o = self._read_uint64() + else: + buf = self._path_or_buf.read(12) + # Only tested on little endian file on little endian machine. + v_size = 2 if self._format_version == 118 else 3 + if self._byteorder == "<": + buf = buf[0:v_size] + buf[4 : (12 - v_size)] + else: + # This path may not be correct, impossible to test + buf = buf[0:v_size] + buf[(4 + v_size) :] + v_o = struct.unpack("Q", buf)[0] + typ = self._read_uint8() + length = self._read_uint32() + va = self._path_or_buf.read(length) + if typ == 130: + decoded_va = va[0:-1].decode(self._encoding) + else: + # Stata says typ 129 can be binary, so use str + decoded_va = str(va) + # Wrap v_o in a string to allow uint64 values as keys on 32bit OS + self.GSO[str(v_o)] = decoded_va + + def __next__(self) -> DataFrame: + self._using_iterator = True + return self.read(nrows=self._chunksize) + + def get_chunk(self, size: int | None = None) -> DataFrame: + """ + Reads lines from Stata file and returns as dataframe + + Parameters + ---------- + size : int, defaults to None + Number of lines to read. If None, reads whole file. + + Returns + ------- + DataFrame + """ + if size is None: + size = self._chunksize + return self.read(nrows=size) + + @Appender(_read_method_doc) + def read( + self, + nrows: int | None = None, + convert_dates: bool | None = None, + convert_categoricals: bool | None = None, + index_col: str | None = None, + convert_missing: bool | None = None, + preserve_dtypes: bool | None = None, + columns: Sequence[str] | None = None, + order_categoricals: bool | None = None, + ) -> DataFrame: + self._ensure_open() + + # Handle options + if convert_dates is None: + convert_dates = self._convert_dates + if convert_categoricals is None: + convert_categoricals = self._convert_categoricals + if convert_missing is None: + convert_missing = self._convert_missing + if preserve_dtypes is None: + preserve_dtypes = self._preserve_dtypes + if columns is None: + columns = self._columns + if order_categoricals is None: + order_categoricals = self._order_categoricals + if index_col is None: + index_col = self._index_col + if nrows is None: + nrows = self._nobs + + # Handle empty file or chunk. If reading incrementally raise + # StopIteration. If reading the whole thing return an empty + # data frame. + if (self._nobs == 0) and nrows == 0: + self._can_read_value_labels = True + self._data_read = True + data = DataFrame(columns=self._varlist) + # Apply dtypes correctly + for i, col in enumerate(data.columns): + dt = self._dtyplist[i] + if isinstance(dt, np.dtype): + if dt.char != "S": + data[col] = data[col].astype(dt) + if columns is not None: + data = self._do_select_columns(data, columns) + return data + + if (self._format_version >= 117) and (not self._value_labels_read): + self._can_read_value_labels = True + self._read_strls() + + # Read data + assert self._dtype is not None + dtype = self._dtype + max_read_len = (self._nobs - self._lines_read) * dtype.itemsize + read_len = nrows * dtype.itemsize + read_len = min(read_len, max_read_len) + if read_len <= 0: + # Iterator has finished, should never be here unless + # we are reading the file incrementally + if convert_categoricals: + self._read_value_labels() + raise StopIteration + offset = self._lines_read * dtype.itemsize + self._path_or_buf.seek(self._data_location + offset) + read_lines = min(nrows, self._nobs - self._lines_read) + raw_data = np.frombuffer( + self._path_or_buf.read(read_len), dtype=dtype, count=read_lines + ) + + self._lines_read += read_lines + if self._lines_read == self._nobs: + self._can_read_value_labels = True + self._data_read = True + # if necessary, swap the byte order to native here + if self._byteorder != self._native_byteorder: + raw_data = raw_data.byteswap().view(raw_data.dtype.newbyteorder()) + + if convert_categoricals: + self._read_value_labels() + + if len(raw_data) == 0: + data = DataFrame(columns=self._varlist) + else: + data = DataFrame.from_records(raw_data) + data.columns = Index(self._varlist) + + # If index is not specified, use actual row number rather than + # restarting at 0 for each chunk. + if index_col is None: + rng = range(self._lines_read - read_lines, self._lines_read) + data.index = Index(rng) # set attr instead of set_index to avoid copy + + if columns is not None: + data = self._do_select_columns(data, columns) + + # Decode strings + for col, typ in zip(data, self._typlist): + if type(typ) is int: + data[col] = data[col].apply(self._decode) + + data = self._insert_strls(data) + + cols_ = np.where([dtyp is not None for dtyp in self._dtyplist])[0] + # Convert columns (if needed) to match input type + ix = data.index + requires_type_conversion = False + data_formatted = [] + for i in cols_: + if self._dtyplist[i] is not None: + col = data.columns[i] + dtype = data[col].dtype + if dtype != np.dtype(object) and dtype != self._dtyplist[i]: + requires_type_conversion = True + data_formatted.append( + (col, Series(data[col], ix, self._dtyplist[i])) + ) + else: + data_formatted.append((col, data[col])) + if requires_type_conversion: + data = DataFrame.from_dict(dict(data_formatted)) + del data_formatted + + data = self._do_convert_missing(data, convert_missing) + + if convert_dates: + + def any_startswith(x: str) -> bool: + return any(x.startswith(fmt) for fmt in _date_formats) + + cols = np.where([any_startswith(x) for x in self._fmtlist])[0] + for i in cols: + col = data.columns[i] + data[col] = _stata_elapsed_date_to_datetime_vec( + data[col], self._fmtlist[i] + ) + + if convert_categoricals and self._format_version > 108: + data = self._do_convert_categoricals( + data, self._value_label_dict, self._lbllist, order_categoricals + ) + + if not preserve_dtypes: + retyped_data = [] + convert = False + for col in data: + dtype = data[col].dtype + if dtype in (np.dtype(np.float16), np.dtype(np.float32)): + dtype = np.dtype(np.float64) + convert = True + elif dtype in ( + np.dtype(np.int8), + np.dtype(np.int16), + np.dtype(np.int32), + ): + dtype = np.dtype(np.int64) + convert = True + retyped_data.append((col, data[col].astype(dtype))) + if convert: + data = DataFrame.from_dict(dict(retyped_data)) + + if index_col is not None: + data = data.set_index(data.pop(index_col)) + + return data + + def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame: + # Check for missing values, and replace if found + replacements = {} + for i, colname in enumerate(data): + fmt = self._typlist[i] + if fmt not in self.VALID_RANGE: + continue + + fmt = cast(str, fmt) # only strs in VALID_RANGE + nmin, nmax = self.VALID_RANGE[fmt] + series = data[colname] + + # appreciably faster to do this with ndarray instead of Series + svals = series._values + missing = (svals < nmin) | (svals > nmax) + + if not missing.any(): + continue + + if convert_missing: # Replacement follows Stata notation + missing_loc = np.nonzero(np.asarray(missing))[0] + umissing, umissing_loc = np.unique(series[missing], return_inverse=True) + replacement = Series(series, dtype=object) + for j, um in enumerate(umissing): + missing_value = StataMissingValue(um) + + loc = missing_loc[umissing_loc == j] + replacement.iloc[loc] = missing_value + else: # All replacements are identical + dtype = series.dtype + if dtype not in (np.float32, np.float64): + dtype = np.float64 + replacement = Series(series, dtype=dtype) + if not replacement._values.flags["WRITEABLE"]: + # only relevant for ArrayManager; construction + # path for BlockManager ensures writeability + replacement = replacement.copy() + # Note: operating on ._values is much faster than directly + # TODO: can we fix that? + replacement._values[missing] = np.nan + replacements[colname] = replacement + + if replacements: + for col, value in replacements.items(): + data[col] = value + return data + + def _insert_strls(self, data: DataFrame) -> DataFrame: + if not hasattr(self, "GSO") or len(self.GSO) == 0: + return data + for i, typ in enumerate(self._typlist): + if typ != "Q": + continue + # Wrap v_o in a string to allow uint64 values as keys on 32bit OS + data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]] + return data + + def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame: + if not self._column_selector_set: + column_set = set(columns) + if len(column_set) != len(columns): + raise ValueError("columns contains duplicate entries") + unmatched = column_set.difference(data.columns) + if unmatched: + joined = ", ".join(list(unmatched)) + raise ValueError( + "The following columns were not " + f"found in the Stata data set: {joined}" + ) + # Copy information for retained columns for later processing + dtyplist = [] + typlist = [] + fmtlist = [] + lbllist = [] + for col in columns: + i = data.columns.get_loc(col) + dtyplist.append(self._dtyplist[i]) + typlist.append(self._typlist[i]) + fmtlist.append(self._fmtlist[i]) + lbllist.append(self._lbllist[i]) + + self._dtyplist = dtyplist + self._typlist = typlist + self._fmtlist = fmtlist + self._lbllist = lbllist + self._column_selector_set = True + + return data[columns] + + def _do_convert_categoricals( + self, + data: DataFrame, + value_label_dict: dict[str, dict[float, str]], + lbllist: Sequence[str], + order_categoricals: bool, + ) -> DataFrame: + """ + Converts categorical columns to Categorical type. + """ + value_labels = list(value_label_dict.keys()) + cat_converted_data = [] + for col, label in zip(data, lbllist): + if label in value_labels: + # Explicit call with ordered=True + vl = value_label_dict[label] + keys = np.array(list(vl.keys())) + column = data[col] + key_matches = column.isin(keys) + if self._using_iterator and key_matches.all(): + initial_categories: np.ndarray | None = keys + # If all categories are in the keys and we are iterating, + # use the same keys for all chunks. If some are missing + # value labels, then we will fall back to the categories + # varying across chunks. + else: + if self._using_iterator: + # warn is using an iterator + warnings.warn( + categorical_conversion_warning, + CategoricalConversionWarning, + stacklevel=find_stack_level(), + ) + initial_categories = None + cat_data = Categorical( + column, categories=initial_categories, ordered=order_categoricals + ) + if initial_categories is None: + # If None here, then we need to match the cats in the Categorical + categories = [] + for category in cat_data.categories: + if category in vl: + categories.append(vl[category]) + else: + categories.append(category) + else: + # If all cats are matched, we can use the values + categories = list(vl.values()) + try: + # Try to catch duplicate categories + # TODO: if we get a non-copying rename_categories, use that + cat_data = cat_data.rename_categories(categories) + except ValueError as err: + vc = Series(categories, copy=False).value_counts() + repeated_cats = list(vc.index[vc > 1]) + repeats = "-" * 80 + "\n" + "\n".join(repeated_cats) + # GH 25772 + msg = f""" +Value labels for column {col} are not unique. These cannot be converted to +pandas categoricals. + +Either read the file with `convert_categoricals` set to False or use the +low level interface in `StataReader` to separately read the values and the +value_labels. + +The repeated labels are: +{repeats} +""" + raise ValueError(msg) from err + # TODO: is the next line needed above in the data(...) method? + cat_series = Series(cat_data, index=data.index, copy=False) + cat_converted_data.append((col, cat_series)) + else: + cat_converted_data.append((col, data[col])) + data = DataFrame(dict(cat_converted_data), copy=False) + return data + + @property + def data_label(self) -> str: + """ + Return data label of Stata file. + + Examples + -------- + >>> df = pd.DataFrame([(1,)], columns=["variable"]) + >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) + >>> data_label = "This is a data file." + >>> path = "/My_path/filename.dta" + >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP + ... data_label=data_label, # doctest: +SKIP + ... version=None) # doctest: +SKIP + >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP + ... print(reader.data_label) # doctest: +SKIP + This is a data file. + """ + self._ensure_open() + return self._data_label + + @property + def time_stamp(self) -> str: + """ + Return time stamp of Stata file. + """ + self._ensure_open() + return self._time_stamp + + def variable_labels(self) -> dict[str, str]: + """ + Return a dict associating each variable name with corresponding label. + + Returns + ------- + dict + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"]) + >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) + >>> path = "/My_path/filename.dta" + >>> variable_labels = {"col_1": "This is an example"} + >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP + ... variable_labels=variable_labels, version=None) # doctest: +SKIP + >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP + ... print(reader.variable_labels()) # doctest: +SKIP + {'index': '', 'col_1': 'This is an example', 'col_2': ''} + >>> pd.read_stata(path) # doctest: +SKIP + index col_1 col_2 + 0 0 1 2 + 1 1 3 4 + """ + self._ensure_open() + return dict(zip(self._varlist, self._variable_labels)) + + def value_labels(self) -> dict[str, dict[float, str]]: + """ + Return a nested dict associating each variable name to its value and label. + + Returns + ------- + dict + + Examples + -------- + >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"]) + >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) + >>> path = "/My_path/filename.dta" + >>> value_labels = {"col_1": {3: "x"}} + >>> df.to_stata(path, time_stamp=time_stamp, # doctest: +SKIP + ... value_labels=value_labels, version=None) # doctest: +SKIP + >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP + ... print(reader.value_labels()) # doctest: +SKIP + {'col_1': {3: 'x'}} + >>> pd.read_stata(path) # doctest: +SKIP + index col_1 col_2 + 0 0 1 2 + 1 1 x 4 + """ + if not self._value_labels_read: + self._read_value_labels() + + return self._value_label_dict + + +@Appender(_read_stata_doc) +def read_stata( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + convert_dates: bool = True, + convert_categoricals: bool = True, + index_col: str | None = None, + convert_missing: bool = False, + preserve_dtypes: bool = True, + columns: Sequence[str] | None = None, + order_categoricals: bool = True, + chunksize: int | None = None, + iterator: bool = False, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, +) -> DataFrame | StataReader: + reader = StataReader( + filepath_or_buffer, + convert_dates=convert_dates, + convert_categoricals=convert_categoricals, + index_col=index_col, + convert_missing=convert_missing, + preserve_dtypes=preserve_dtypes, + columns=columns, + order_categoricals=order_categoricals, + chunksize=chunksize, + storage_options=storage_options, + compression=compression, + ) + + if iterator or chunksize: + return reader + + with reader: + return reader.read() + + +def _set_endianness(endianness: str) -> str: + if endianness.lower() in ["<", "little"]: + return "<" + elif endianness.lower() in [">", "big"]: + return ">" + else: # pragma : no cover + raise ValueError(f"Endianness {endianness} not understood") + + +def _pad_bytes(name: AnyStr, length: int) -> AnyStr: + """ + Take a char string and pads it with null bytes until it's length chars. + """ + if isinstance(name, bytes): + return name + b"\x00" * (length - len(name)) + return name + "\x00" * (length - len(name)) + + +def _convert_datetime_to_stata_type(fmt: str) -> np.dtype: + """ + Convert from one of the stata date formats to a type in TYPE_MAP. + """ + if fmt in [ + "tc", + "%tc", + "td", + "%td", + "tw", + "%tw", + "tm", + "%tm", + "tq", + "%tq", + "th", + "%th", + "ty", + "%ty", + ]: + return np.dtype(np.float64) # Stata expects doubles for SIFs + else: + raise NotImplementedError(f"Format {fmt} not implemented") + + +def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict: + new_dict = {} + for key in convert_dates: + if not convert_dates[key].startswith("%"): # make sure proper fmts + convert_dates[key] = "%" + convert_dates[key] + if key in varlist: + new_dict.update({varlist.index(key): convert_dates[key]}) + else: + if not isinstance(key, int): + raise ValueError("convert_dates key must be a column or an integer") + new_dict.update({key: convert_dates[key]}) + return new_dict + + +def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int: + """ + Convert dtype types to stata types. Returns the byte of the given ordinal. + See TYPE_MAP and comments for an explanation. This is also explained in + the dta spec. + 1 - 244 are strings of this length + Pandas Stata + 251 - for int8 byte + 252 - for int16 int + 253 - for int32 long + 254 - for float32 float + 255 - for double double + + If there are dates to convert, then dtype will already have the correct + type inserted. + """ + # TODO: expand to handle datetime to integer conversion + if dtype.type is np.object_: # try to coerce it to the biggest string + # not memory efficient, what else could we + # do? + itemsize = max_len_string_array(ensure_object(column._values)) + return max(itemsize, 1) + elif dtype.type is np.float64: + return 255 + elif dtype.type is np.float32: + return 254 + elif dtype.type is np.int32: + return 253 + elif dtype.type is np.int16: + return 252 + elif dtype.type is np.int8: + return 251 + else: # pragma : no cover + raise NotImplementedError(f"Data type {dtype} not supported.") + + +def _dtype_to_default_stata_fmt( + dtype, column: Series, dta_version: int = 114, force_strl: bool = False +) -> str: + """ + Map numpy dtype to stata's default format for this type. Not terribly + important since users can change this in Stata. Semantics are + + object -> "%DDs" where DD is the length of the string. If not a string, + raise ValueError + float64 -> "%10.0g" + float32 -> "%9.0g" + int64 -> "%9.0g" + int32 -> "%12.0g" + int16 -> "%8.0g" + int8 -> "%8.0g" + strl -> "%9s" + """ + # TODO: Refactor to combine type with format + # TODO: expand this to handle a default datetime format? + if dta_version < 117: + max_str_len = 244 + else: + max_str_len = 2045 + if force_strl: + return "%9s" + if dtype.type is np.object_: + itemsize = max_len_string_array(ensure_object(column._values)) + if itemsize > max_str_len: + if dta_version >= 117: + return "%9s" + else: + raise ValueError(excessive_string_length_error.format(column.name)) + return "%" + str(max(itemsize, 1)) + "s" + elif dtype == np.float64: + return "%10.0g" + elif dtype == np.float32: + return "%9.0g" + elif dtype == np.int32: + return "%12.0g" + elif dtype in (np.int8, np.int16): + return "%8.0g" + else: # pragma : no cover + raise NotImplementedError(f"Data type {dtype} not supported.") + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "fname", +) +class StataWriter(StataParser): + """ + A class for writing Stata binary dta files + + Parameters + ---------- + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. + data : DataFrame + Input to save + convert_dates : dict + Dictionary mapping columns containing datetime types to stata internal + format to use when writing the dates. Options are 'tc', 'td', 'tm', + 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. + Datetime columns that do not have a conversion type specified will be + converted to 'tc'. Raises NotImplementedError if a datetime column has + timezone information + write_index : bool + Write the index to Stata dataset. + byteorder : str + Can be ">", "<", "little", or "big". default is `sys.byteorder` + time_stamp : datetime + A datetime to use as file creation date. Default is the current time + data_label : str + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict + Dictionary containing columns as keys and variable labels as values. + Each label must be 80 characters or smaller. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + .. versionadded:: 1.2.0 + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. The combined length of all labels for a single + variable must be 32,000 characters or smaller. + + .. versionadded:: 1.4.0 + + Returns + ------- + writer : StataWriter instance + The StataWriter instance has a write_file method, which will + write the file to the given `fname`. + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime + * Column dtype is not representable in Stata + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + Examples + -------- + >>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b']) + >>> writer = StataWriter('./data_file.dta', data) + >>> writer.write_file() + + Directly write a zip file + >>> compression = {{"method": "zip", "archive_name": "data_file.dta"}} + >>> writer = StataWriter('./data_file.zip', data, compression=compression) + >>> writer.write_file() + + Save a DataFrame with dates + >>> from datetime import datetime + >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date']) + >>> writer = StataWriter('./date_data_file.dta', data, {{'date' : 'tw'}}) + >>> writer.write_file() + """ + + _max_string_length = 244 + _encoding: Literal["latin-1", "utf-8"] = "latin-1" + + def __init__( + self, + fname: FilePath | WriteBuffer[bytes], + data: DataFrame, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: str | None = None, + time_stamp: datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + *, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + super().__init__() + self.data = data + self._convert_dates = {} if convert_dates is None else convert_dates + self._write_index = write_index + self._time_stamp = time_stamp + self._data_label = data_label + self._variable_labels = variable_labels + self._non_cat_value_labels = value_labels + self._value_labels: list[StataValueLabel] = [] + self._has_value_labels = np.array([], dtype=bool) + self._compression = compression + self._output_file: IO[bytes] | None = None + self._converted_names: dict[Hashable, str] = {} + # attach nobs, nvars, data, varlist, typlist + self._prepare_pandas(data) + self.storage_options = storage_options + + if byteorder is None: + byteorder = sys.byteorder + self._byteorder = _set_endianness(byteorder) + self._fname = fname + self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8} + + def _write(self, to_write: str) -> None: + """ + Helper to call encode before writing to file for Python 3 compat. + """ + self.handles.handle.write(to_write.encode(self._encoding)) + + def _write_bytes(self, value: bytes) -> None: + """ + Helper to assert file is open before writing. + """ + self.handles.handle.write(value) + + def _prepare_non_cat_value_labels( + self, data: DataFrame + ) -> list[StataNonCatValueLabel]: + """ + Check for value labels provided for non-categorical columns. Value + labels + """ + non_cat_value_labels: list[StataNonCatValueLabel] = [] + if self._non_cat_value_labels is None: + return non_cat_value_labels + + for labname, labels in self._non_cat_value_labels.items(): + if labname in self._converted_names: + colname = self._converted_names[labname] + elif labname in data.columns: + colname = str(labname) + else: + raise KeyError( + f"Can't create value labels for {labname}, it wasn't " + "found in the dataset." + ) + + if not is_numeric_dtype(data[colname].dtype): + # Labels should not be passed explicitly for categorical + # columns that will be converted to int + raise ValueError( + f"Can't create value labels for {labname}, value labels " + "can only be applied to numeric columns." + ) + svl = StataNonCatValueLabel(colname, labels, self._encoding) + non_cat_value_labels.append(svl) + return non_cat_value_labels + + def _prepare_categoricals(self, data: DataFrame) -> DataFrame: + """ + Check for categorical columns, retain categorical information for + Stata file and convert categorical data to int + """ + is_cat = [isinstance(data[col].dtype, CategoricalDtype) for col in data] + if not any(is_cat): + return data + + self._has_value_labels |= np.array(is_cat) + + get_base_missing_value = StataMissingValue.get_base_missing_value + data_formatted = [] + for col, col_is_cat in zip(data, is_cat): + if col_is_cat: + svl = StataValueLabel(data[col], encoding=self._encoding) + self._value_labels.append(svl) + dtype = data[col].cat.codes.dtype + if dtype == np.int64: + raise ValueError( + "It is not possible to export " + "int64-based categorical data to Stata." + ) + values = data[col].cat.codes._values.copy() + + # Upcast if needed so that correct missing values can be set + if values.max() >= get_base_missing_value(dtype): + if dtype == np.int8: + dtype = np.dtype(np.int16) + elif dtype == np.int16: + dtype = np.dtype(np.int32) + else: + dtype = np.dtype(np.float64) + values = np.array(values, dtype=dtype) + + # Replace missing values with Stata missing value for type + values[values == -1] = get_base_missing_value(dtype) + data_formatted.append((col, values)) + else: + data_formatted.append((col, data[col])) + return DataFrame.from_dict(dict(data_formatted)) + + def _replace_nans(self, data: DataFrame) -> DataFrame: + # return data + """ + Checks floating point data columns for nans, and replaces these with + the generic Stata for missing value (.) + """ + for c in data: + dtype = data[c].dtype + if dtype in (np.float32, np.float64): + if dtype == np.float32: + replacement = self.MISSING_VALUES["f"] + else: + replacement = self.MISSING_VALUES["d"] + data[c] = data[c].fillna(replacement) + + return data + + def _update_strl_names(self) -> None: + """No-op, forward compatibility""" + + def _validate_variable_name(self, name: str) -> str: + """ + Validate variable names for Stata export. + + Parameters + ---------- + name : str + Variable name + + Returns + ------- + str + The validated name with invalid characters replaced with + underscores. + + Notes + ----- + Stata 114 and 117 support ascii characters in a-z, A-Z, 0-9 + and _. + """ + for c in name: + if ( + (c < "A" or c > "Z") + and (c < "a" or c > "z") + and (c < "0" or c > "9") + and c != "_" + ): + name = name.replace(c, "_") + return name + + def _check_column_names(self, data: DataFrame) -> DataFrame: + """ + Checks column names to ensure that they are valid Stata column names. + This includes checks for: + * Non-string names + * Stata keywords + * Variables that start with numbers + * Variables with names that are too long + + When an illegal variable name is detected, it is converted, and if + dates are exported, the variable name is propagated to the date + conversion dictionary + """ + converted_names: dict[Hashable, str] = {} + columns = list(data.columns) + original_columns = columns[:] + + duplicate_var_id = 0 + for j, name in enumerate(columns): + orig_name = name + if not isinstance(name, str): + name = str(name) + + name = self._validate_variable_name(name) + + # Variable name must not be a reserved word + if name in self.RESERVED_WORDS: + name = "_" + name + + # Variable name may not start with a number + if "0" <= name[0] <= "9": + name = "_" + name + + name = name[: min(len(name), 32)] + + if not name == orig_name: + # check for duplicates + while columns.count(name) > 0: + # prepend ascending number to avoid duplicates + name = "_" + str(duplicate_var_id) + name + name = name[: min(len(name), 32)] + duplicate_var_id += 1 + converted_names[orig_name] = name + + columns[j] = name + + data.columns = Index(columns) + + # Check date conversion, and fix key if needed + if self._convert_dates: + for c, o in zip(columns, original_columns): + if c != o: + self._convert_dates[c] = self._convert_dates[o] + del self._convert_dates[o] + + if converted_names: + conversion_warning = [] + for orig_name, name in converted_names.items(): + msg = f"{orig_name} -> {name}" + conversion_warning.append(msg) + + ws = invalid_name_doc.format("\n ".join(conversion_warning)) + warnings.warn( + ws, + InvalidColumnName, + stacklevel=find_stack_level(), + ) + + self._converted_names = converted_names + self._update_strl_names() + + return data + + def _set_formats_and_types(self, dtypes: Series) -> None: + self.fmtlist: list[str] = [] + self.typlist: list[int] = [] + for col, dtype in dtypes.items(): + self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, self.data[col])) + self.typlist.append(_dtype_to_stata_type(dtype, self.data[col])) + + def _prepare_pandas(self, data: DataFrame) -> None: + # NOTE: we might need a different API / class for pandas objects so + # we can set different semantics - handle this with a PR to pandas.io + + data = data.copy() + + if self._write_index: + temp = data.reset_index() + if isinstance(temp, DataFrame): + data = temp + + # Ensure column names are strings + data = self._check_column_names(data) + + # Check columns for compatibility with stata, upcast if necessary + # Raise if outside the supported range + data = _cast_to_stata_types(data) + + # Replace NaNs with Stata missing values + data = self._replace_nans(data) + + # Set all columns to initially unlabelled + self._has_value_labels = np.repeat(False, data.shape[1]) + + # Create value labels for non-categorical data + non_cat_value_labels = self._prepare_non_cat_value_labels(data) + + non_cat_columns = [svl.labname for svl in non_cat_value_labels] + has_non_cat_val_labels = data.columns.isin(non_cat_columns) + self._has_value_labels |= has_non_cat_val_labels + self._value_labels.extend(non_cat_value_labels) + + # Convert categoricals to int data, and strip labels + data = self._prepare_categoricals(data) + + self.nobs, self.nvar = data.shape + self.data = data + self.varlist = data.columns.tolist() + + dtypes = data.dtypes + + # Ensure all date columns are converted + for col in data: + if col in self._convert_dates: + continue + if lib.is_np_dtype(data[col].dtype, "M"): + self._convert_dates[col] = "tc" + + self._convert_dates = _maybe_convert_to_int_keys( + self._convert_dates, self.varlist + ) + for key in self._convert_dates: + new_type = _convert_datetime_to_stata_type(self._convert_dates[key]) + dtypes.iloc[key] = np.dtype(new_type) + + # Verify object arrays are strings and encode to bytes + self._encode_strings() + + self._set_formats_and_types(dtypes) + + # set the given format for the datetime cols + if self._convert_dates is not None: + for key in self._convert_dates: + if isinstance(key, int): + self.fmtlist[key] = self._convert_dates[key] + + def _encode_strings(self) -> None: + """ + Encode strings in dta-specific encoding + + Do not encode columns marked for date conversion or for strL + conversion. The strL converter independently handles conversion and + also accepts empty string arrays. + """ + convert_dates = self._convert_dates + # _convert_strl is not available in dta 114 + convert_strl = getattr(self, "_convert_strl", []) + for i, col in enumerate(self.data): + # Skip columns marked for date conversion or strl conversion + if i in convert_dates or col in convert_strl: + continue + column = self.data[col] + dtype = column.dtype + if dtype.type is np.object_: + inferred_dtype = infer_dtype(column, skipna=True) + if not ((inferred_dtype == "string") or len(column) == 0): + col = column.name + raise ValueError( + f"""\ +Column `{col}` cannot be exported.\n\nOnly string-like object arrays +containing all strings or a mix of strings and None can be exported. +Object arrays containing only null values are prohibited. Other object +types cannot be exported and must first be converted to one of the +supported types.""" + ) + encoded = self.data[col].str.encode(self._encoding) + # If larger than _max_string_length do nothing + if ( + max_len_string_array(ensure_object(encoded._values)) + <= self._max_string_length + ): + self.data[col] = encoded + + def write_file(self) -> None: + """ + Export DataFrame object to Stata dta format. + + Examples + -------- + >>> df = pd.DataFrame({"fully_labelled": [1, 2, 3, 3, 1], + ... "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan], + ... "Y": [7, 7, 9, 8, 10], + ... "Z": pd.Categorical(["j", "k", "l", "k", "j"]), + ... }) + >>> path = "/My_path/filename.dta" + >>> labels = {"fully_labelled": {1: "one", 2: "two", 3: "three"}, + ... "partially_labelled": {1.0: "one", 2.0: "two"}, + ... } + >>> writer = pd.io.stata.StataWriter(path, + ... df, + ... value_labels=labels) # doctest: +SKIP + >>> writer.write_file() # doctest: +SKIP + >>> df = pd.read_stata(path) # doctest: +SKIP + >>> df # doctest: +SKIP + index fully_labelled partially_labeled Y Z + 0 0 one one 7 j + 1 1 two two 7 k + 2 2 three NaN 9 l + 3 3 three 9.0 8 k + 4 4 one NaN 10 j + """ + with get_handle( + self._fname, + "wb", + compression=self._compression, + is_text=False, + storage_options=self.storage_options, + ) as self.handles: + if self.handles.compression["method"] is not None: + # ZipFile creates a file (with the same name) for each write call. + # Write it first into a buffer and then write the buffer to the ZipFile. + self._output_file, self.handles.handle = self.handles.handle, BytesIO() + self.handles.created_handles.append(self.handles.handle) + + try: + self._write_header( + data_label=self._data_label, time_stamp=self._time_stamp + ) + self._write_map() + self._write_variable_types() + self._write_varnames() + self._write_sortlist() + self._write_formats() + self._write_value_label_names() + self._write_variable_labels() + self._write_expansion_fields() + self._write_characteristics() + records = self._prepare_data() + self._write_data(records) + self._write_strls() + self._write_value_labels() + self._write_file_close_tag() + self._write_map() + self._close() + except Exception as exc: + self.handles.close() + if isinstance(self._fname, (str, os.PathLike)) and os.path.isfile( + self._fname + ): + try: + os.unlink(self._fname) + except OSError: + warnings.warn( + f"This save was not successful but {self._fname} could not " + "be deleted. This file is not valid.", + ResourceWarning, + stacklevel=find_stack_level(), + ) + raise exc + + def _close(self) -> None: + """ + Close the file if it was created by the writer. + + If a buffer or file-like object was passed in, for example a GzipFile, + then leave this file open for the caller to close. + """ + # write compression + if self._output_file is not None: + assert isinstance(self.handles.handle, BytesIO) + bio, self.handles.handle = self.handles.handle, self._output_file + self.handles.handle.write(bio.getvalue()) + + def _write_map(self) -> None: + """No-op, future compatibility""" + + def _write_file_close_tag(self) -> None: + """No-op, future compatibility""" + + def _write_characteristics(self) -> None: + """No-op, future compatibility""" + + def _write_strls(self) -> None: + """No-op, future compatibility""" + + def _write_expansion_fields(self) -> None: + """Write 5 zeros for expansion fields""" + self._write(_pad_bytes("", 5)) + + def _write_value_labels(self) -> None: + for vl in self._value_labels: + self._write_bytes(vl.generate_value_label(self._byteorder)) + + def _write_header( + self, + data_label: str | None = None, + time_stamp: datetime | None = None, + ) -> None: + byteorder = self._byteorder + # ds_format - just use 114 + self._write_bytes(struct.pack("b", 114)) + # byteorder + self._write(byteorder == ">" and "\x01" or "\x02") + # filetype + self._write("\x01") + # unused + self._write("\x00") + # number of vars, 2 bytes + self._write_bytes(struct.pack(byteorder + "h", self.nvar)[:2]) + # number of obs, 4 bytes + self._write_bytes(struct.pack(byteorder + "i", self.nobs)[:4]) + # data label 81 bytes, char, null terminated + if data_label is None: + self._write_bytes(self._null_terminate_bytes(_pad_bytes("", 80))) + else: + self._write_bytes( + self._null_terminate_bytes(_pad_bytes(data_label[:80], 80)) + ) + # time stamp, 18 bytes, char, null terminated + # format dd Mon yyyy hh:mm + if time_stamp is None: + time_stamp = datetime.now() + elif not isinstance(time_stamp, datetime): + raise ValueError("time_stamp should be datetime type") + # GH #13856 + # Avoid locale-specific month conversion + months = [ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + month_lookup = {i + 1: month for i, month in enumerate(months)} + ts = ( + time_stamp.strftime("%d ") + + month_lookup[time_stamp.month] + + time_stamp.strftime(" %Y %H:%M") + ) + self._write_bytes(self._null_terminate_bytes(ts)) + + def _write_variable_types(self) -> None: + for typ in self.typlist: + self._write_bytes(struct.pack("B", typ)) + + def _write_varnames(self) -> None: + # varlist names are checked by _check_column_names + # varlist, requires null terminated + for name in self.varlist: + name = self._null_terminate_str(name) + name = _pad_bytes(name[:32], 33) + self._write(name) + + def _write_sortlist(self) -> None: + # srtlist, 2*(nvar+1), int array, encoded by byteorder + srtlist = _pad_bytes("", 2 * (self.nvar + 1)) + self._write(srtlist) + + def _write_formats(self) -> None: + # fmtlist, 49*nvar, char array + for fmt in self.fmtlist: + self._write(_pad_bytes(fmt, 49)) + + def _write_value_label_names(self) -> None: + # lbllist, 33*nvar, char array + for i in range(self.nvar): + # Use variable name when categorical + if self._has_value_labels[i]: + name = self.varlist[i] + name = self._null_terminate_str(name) + name = _pad_bytes(name[:32], 33) + self._write(name) + else: # Default is empty label + self._write(_pad_bytes("", 33)) + + def _write_variable_labels(self) -> None: + # Missing labels are 80 blank characters plus null termination + blank = _pad_bytes("", 81) + + if self._variable_labels is None: + for i in range(self.nvar): + self._write(blank) + return + + for col in self.data: + if col in self._variable_labels: + label = self._variable_labels[col] + if len(label) > 80: + raise ValueError("Variable labels must be 80 characters or fewer") + is_latin1 = all(ord(c) < 256 for c in label) + if not is_latin1: + raise ValueError( + "Variable labels must contain only characters that " + "can be encoded in Latin-1" + ) + self._write(_pad_bytes(label, 81)) + else: + self._write(blank) + + def _convert_strls(self, data: DataFrame) -> DataFrame: + """No-op, future compatibility""" + return data + + def _prepare_data(self) -> np.rec.recarray: + data = self.data + typlist = self.typlist + convert_dates = self._convert_dates + # 1. Convert dates + if self._convert_dates is not None: + for i, col in enumerate(data): + if i in convert_dates: + data[col] = _datetime_to_stata_elapsed_vec( + data[col], self.fmtlist[i] + ) + # 2. Convert strls + data = self._convert_strls(data) + + # 3. Convert bad string data to '' and pad to correct length + dtypes = {} + native_byteorder = self._byteorder == _set_endianness(sys.byteorder) + for i, col in enumerate(data): + typ = typlist[i] + if typ <= self._max_string_length: + data[col] = data[col].fillna("").apply(_pad_bytes, args=(typ,)) + stype = f"S{typ}" + dtypes[col] = stype + data[col] = data[col].astype(stype) + else: + dtype = data[col].dtype + if not native_byteorder: + dtype = dtype.newbyteorder(self._byteorder) + dtypes[col] = dtype + + return data.to_records(index=False, column_dtypes=dtypes) + + def _write_data(self, records: np.rec.recarray) -> None: + self._write_bytes(records.tobytes()) + + @staticmethod + def _null_terminate_str(s: str) -> str: + s += "\x00" + return s + + def _null_terminate_bytes(self, s: str) -> bytes: + return self._null_terminate_str(s).encode(self._encoding) + + +def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int: + """ + Converts dtype types to stata types. Returns the byte of the given ordinal. + See TYPE_MAP and comments for an explanation. This is also explained in + the dta spec. + 1 - 2045 are strings of this length + Pandas Stata + 32768 - for object strL + 65526 - for int8 byte + 65527 - for int16 int + 65528 - for int32 long + 65529 - for float32 float + 65530 - for double double + + If there are dates to convert, then dtype will already have the correct + type inserted. + """ + # TODO: expand to handle datetime to integer conversion + if force_strl: + return 32768 + if dtype.type is np.object_: # try to coerce it to the biggest string + # not memory efficient, what else could we + # do? + itemsize = max_len_string_array(ensure_object(column._values)) + itemsize = max(itemsize, 1) + if itemsize <= 2045: + return itemsize + return 32768 + elif dtype.type is np.float64: + return 65526 + elif dtype.type is np.float32: + return 65527 + elif dtype.type is np.int32: + return 65528 + elif dtype.type is np.int16: + return 65529 + elif dtype.type is np.int8: + return 65530 + else: # pragma : no cover + raise NotImplementedError(f"Data type {dtype} not supported.") + + +def _pad_bytes_new(name: str | bytes, length: int) -> bytes: + """ + Takes a bytes instance and pads it with null bytes until it's length chars. + """ + if isinstance(name, str): + name = bytes(name, "utf-8") + return name + b"\x00" * (length - len(name)) + + +class StataStrLWriter: + """ + Converter for Stata StrLs + + Stata StrLs map 8 byte values to strings which are stored using a + dictionary-like format where strings are keyed to two values. + + Parameters + ---------- + df : DataFrame + DataFrame to convert + columns : Sequence[str] + List of columns names to convert to StrL + version : int, optional + dta version. Currently supports 117, 118 and 119 + byteorder : str, optional + Can be ">", "<", "little", or "big". default is `sys.byteorder` + + Notes + ----- + Supports creation of the StrL block of a dta file for dta versions + 117, 118 and 119. These differ in how the GSO is stored. 118 and + 119 store the GSO lookup value as a uint32 and a uint64, while 117 + uses two uint32s. 118 and 119 also encode all strings as unicode + which is required by the format. 117 uses 'latin-1' a fixed width + encoding that extends the 7-bit ascii table with an additional 128 + characters. + """ + + def __init__( + self, + df: DataFrame, + columns: Sequence[str], + version: int = 117, + byteorder: str | None = None, + ) -> None: + if version not in (117, 118, 119): + raise ValueError("Only dta versions 117, 118 and 119 supported") + self._dta_ver = version + + self.df = df + self.columns = columns + self._gso_table = {"": (0, 0)} + if byteorder is None: + byteorder = sys.byteorder + self._byteorder = _set_endianness(byteorder) + + gso_v_type = "I" # uint32 + gso_o_type = "Q" # uint64 + self._encoding = "utf-8" + if version == 117: + o_size = 4 + gso_o_type = "I" # 117 used uint32 + self._encoding = "latin-1" + elif version == 118: + o_size = 6 + else: # version == 119 + o_size = 5 + self._o_offet = 2 ** (8 * (8 - o_size)) + self._gso_o_type = gso_o_type + self._gso_v_type = gso_v_type + + def _convert_key(self, key: tuple[int, int]) -> int: + v, o = key + return v + self._o_offet * o + + def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]: + """ + Generates the GSO lookup table for the DataFrame + + Returns + ------- + gso_table : dict + Ordered dictionary using the string found as keys + and their lookup position (v,o) as values + gso_df : DataFrame + DataFrame where strl columns have been converted to + (v,o) values + + Notes + ----- + Modifies the DataFrame in-place. + + The DataFrame returned encodes the (v,o) values as uint64s. The + encoding depends on the dta version, and can be expressed as + + enc = v + o * 2 ** (o_size * 8) + + so that v is stored in the lower bits and o is in the upper + bits. o_size is + + * 117: 4 + * 118: 6 + * 119: 5 + """ + gso_table = self._gso_table + gso_df = self.df + columns = list(gso_df.columns) + selected = gso_df[self.columns] + col_index = [(col, columns.index(col)) for col in self.columns] + keys = np.empty(selected.shape, dtype=np.uint64) + for o, (idx, row) in enumerate(selected.iterrows()): + for j, (col, v) in enumerate(col_index): + val = row[col] + # Allow columns with mixed str and None (GH 23633) + val = "" if val is None else val + key = gso_table.get(val, None) + if key is None: + # Stata prefers human numbers + key = (v + 1, o + 1) + gso_table[val] = key + keys[o, j] = self._convert_key(key) + for i, col in enumerate(self.columns): + gso_df[col] = keys[:, i] + + return gso_table, gso_df + + def generate_blob(self, gso_table: dict[str, tuple[int, int]]) -> bytes: + """ + Generates the binary blob of GSOs that is written to the dta file. + + Parameters + ---------- + gso_table : dict + Ordered dictionary (str, vo) + + Returns + ------- + gso : bytes + Binary content of dta file to be placed between strl tags + + Notes + ----- + Output format depends on dta version. 117 uses two uint32s to + express v and o while 118+ uses a uint32 for v and a uint64 for o. + """ + # Format information + # Length includes null term + # 117 + # GSOvvvvooootllllxxxxxxxxxxxxxxx...x + # 3 u4 u4 u1 u4 string + null term + # + # 118, 119 + # GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x + # 3 u4 u8 u1 u4 string + null term + + bio = BytesIO() + gso = bytes("GSO", "ascii") + gso_type = struct.pack(self._byteorder + "B", 130) + null = struct.pack(self._byteorder + "B", 0) + v_type = self._byteorder + self._gso_v_type + o_type = self._byteorder + self._gso_o_type + len_type = self._byteorder + "I" + for strl, vo in gso_table.items(): + if vo == (0, 0): + continue + v, o = vo + + # GSO + bio.write(gso) + + # vvvv + bio.write(struct.pack(v_type, v)) + + # oooo / oooooooo + bio.write(struct.pack(o_type, o)) + + # t + bio.write(gso_type) + + # llll + utf8_string = bytes(strl, "utf-8") + bio.write(struct.pack(len_type, len(utf8_string) + 1)) + + # xxx...xxx + bio.write(utf8_string) + bio.write(null) + + return bio.getvalue() + + +class StataWriter117(StataWriter): + """ + A class for writing Stata binary dta files in Stata 13 format (117) + + Parameters + ---------- + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. + data : DataFrame + Input to save + convert_dates : dict + Dictionary mapping columns containing datetime types to stata internal + format to use when writing the dates. Options are 'tc', 'td', 'tm', + 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. + Datetime columns that do not have a conversion type specified will be + converted to 'tc'. Raises NotImplementedError if a datetime column has + timezone information + write_index : bool + Write the index to Stata dataset. + byteorder : str + Can be ">", "<", "little", or "big". default is `sys.byteorder` + time_stamp : datetime + A datetime to use as file creation date. Default is the current time + data_label : str + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict + Dictionary containing columns as keys and variable labels as values. + Each label must be 80 characters or smaller. + convert_strl : list + List of columns names to convert to Stata StrL format. Columns with + more than 2045 characters are automatically written as StrL. + Smaller columns can be converted by including the column name. Using + StrLs can reduce output file size when strings are longer than 8 + characters, and either frequently repeated or sparse. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. The combined length of all labels for a single + variable must be 32,000 characters or smaller. + + .. versionadded:: 1.4.0 + + Returns + ------- + writer : StataWriter117 instance + The StataWriter117 instance has a write_file method, which will + write the file to the given `fname`. + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime + * Column dtype is not representable in Stata + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + Examples + -------- + >>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c']) + >>> writer = pd.io.stata.StataWriter117('./data_file.dta', data) + >>> writer.write_file() + + Directly write a zip file + >>> compression = {"method": "zip", "archive_name": "data_file.dta"} + >>> writer = pd.io.stata.StataWriter117( + ... './data_file.zip', data, compression=compression + ... ) + >>> writer.write_file() + + Or with long strings stored in strl format + >>> data = pd.DataFrame([['A relatively long string'], [''], ['']], + ... columns=['strls']) + >>> writer = pd.io.stata.StataWriter117( + ... './data_file_with_long_strings.dta', data, convert_strl=['strls']) + >>> writer.write_file() + """ + + _max_string_length = 2045 + _dta_version = 117 + + def __init__( + self, + fname: FilePath | WriteBuffer[bytes], + data: DataFrame, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: str | None = None, + time_stamp: datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + convert_strl: Sequence[Hashable] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + *, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + # Copy to new list since convert_strl might be modified later + self._convert_strl: list[Hashable] = [] + if convert_strl is not None: + self._convert_strl.extend(convert_strl) + + super().__init__( + fname, + data, + convert_dates, + write_index, + byteorder=byteorder, + time_stamp=time_stamp, + data_label=data_label, + variable_labels=variable_labels, + value_labels=value_labels, + compression=compression, + storage_options=storage_options, + ) + self._map: dict[str, int] = {} + self._strl_blob = b"" + + @staticmethod + def _tag(val: str | bytes, tag: str) -> bytes: + """Surround val with """ + if isinstance(val, str): + val = bytes(val, "utf-8") + return bytes("<" + tag + ">", "utf-8") + val + bytes("", "utf-8") + + def _update_map(self, tag: str) -> None: + """Update map location for tag with file position""" + assert self.handles.handle is not None + self._map[tag] = self.handles.handle.tell() + + def _write_header( + self, + data_label: str | None = None, + time_stamp: datetime | None = None, + ) -> None: + """Write the file header""" + byteorder = self._byteorder + self._write_bytes(bytes("", "utf-8")) + bio = BytesIO() + # ds_format - 117 + bio.write(self._tag(bytes(str(self._dta_version), "utf-8"), "release")) + # byteorder + bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", "byteorder")) + # number of vars, 2 bytes in 117 and 118, 4 byte in 119 + nvar_type = "H" if self._dta_version <= 118 else "I" + bio.write(self._tag(struct.pack(byteorder + nvar_type, self.nvar), "K")) + # 117 uses 4 bytes, 118 uses 8 + nobs_size = "I" if self._dta_version == 117 else "Q" + bio.write(self._tag(struct.pack(byteorder + nobs_size, self.nobs), "N")) + # data label 81 bytes, char, null terminated + label = data_label[:80] if data_label is not None else "" + encoded_label = label.encode(self._encoding) + label_size = "B" if self._dta_version == 117 else "H" + label_len = struct.pack(byteorder + label_size, len(encoded_label)) + encoded_label = label_len + encoded_label + bio.write(self._tag(encoded_label, "label")) + # time stamp, 18 bytes, char, null terminated + # format dd Mon yyyy hh:mm + if time_stamp is None: + time_stamp = datetime.now() + elif not isinstance(time_stamp, datetime): + raise ValueError("time_stamp should be datetime type") + # Avoid locale-specific month conversion + months = [ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + month_lookup = {i + 1: month for i, month in enumerate(months)} + ts = ( + time_stamp.strftime("%d ") + + month_lookup[time_stamp.month] + + time_stamp.strftime(" %Y %H:%M") + ) + # '\x11' added due to inspection of Stata file + stata_ts = b"\x11" + bytes(ts, "utf-8") + bio.write(self._tag(stata_ts, "timestamp")) + self._write_bytes(self._tag(bio.getvalue(), "header")) + + def _write_map(self) -> None: + """ + Called twice during file write. The first populates the values in + the map with 0s. The second call writes the final map locations when + all blocks have been written. + """ + if not self._map: + self._map = { + "stata_data": 0, + "map": self.handles.handle.tell(), + "variable_types": 0, + "varnames": 0, + "sortlist": 0, + "formats": 0, + "value_label_names": 0, + "variable_labels": 0, + "characteristics": 0, + "data": 0, + "strls": 0, + "value_labels": 0, + "stata_data_close": 0, + "end-of-file": 0, + } + # Move to start of map + self.handles.handle.seek(self._map["map"]) + bio = BytesIO() + for val in self._map.values(): + bio.write(struct.pack(self._byteorder + "Q", val)) + self._write_bytes(self._tag(bio.getvalue(), "map")) + + def _write_variable_types(self) -> None: + self._update_map("variable_types") + bio = BytesIO() + for typ in self.typlist: + bio.write(struct.pack(self._byteorder + "H", typ)) + self._write_bytes(self._tag(bio.getvalue(), "variable_types")) + + def _write_varnames(self) -> None: + self._update_map("varnames") + bio = BytesIO() + # 118 scales by 4 to accommodate utf-8 data worst case encoding + vn_len = 32 if self._dta_version == 117 else 128 + for name in self.varlist: + name = self._null_terminate_str(name) + name = _pad_bytes_new(name[:32].encode(self._encoding), vn_len + 1) + bio.write(name) + self._write_bytes(self._tag(bio.getvalue(), "varnames")) + + def _write_sortlist(self) -> None: + self._update_map("sortlist") + sort_size = 2 if self._dta_version < 119 else 4 + self._write_bytes(self._tag(b"\x00" * sort_size * (self.nvar + 1), "sortlist")) + + def _write_formats(self) -> None: + self._update_map("formats") + bio = BytesIO() + fmt_len = 49 if self._dta_version == 117 else 57 + for fmt in self.fmtlist: + bio.write(_pad_bytes_new(fmt.encode(self._encoding), fmt_len)) + self._write_bytes(self._tag(bio.getvalue(), "formats")) + + def _write_value_label_names(self) -> None: + self._update_map("value_label_names") + bio = BytesIO() + # 118 scales by 4 to accommodate utf-8 data worst case encoding + vl_len = 32 if self._dta_version == 117 else 128 + for i in range(self.nvar): + # Use variable name when categorical + name = "" # default name + if self._has_value_labels[i]: + name = self.varlist[i] + name = self._null_terminate_str(name) + encoded_name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1) + bio.write(encoded_name) + self._write_bytes(self._tag(bio.getvalue(), "value_label_names")) + + def _write_variable_labels(self) -> None: + # Missing labels are 80 blank characters plus null termination + self._update_map("variable_labels") + bio = BytesIO() + # 118 scales by 4 to accommodate utf-8 data worst case encoding + vl_len = 80 if self._dta_version == 117 else 320 + blank = _pad_bytes_new("", vl_len + 1) + + if self._variable_labels is None: + for _ in range(self.nvar): + bio.write(blank) + self._write_bytes(self._tag(bio.getvalue(), "variable_labels")) + return + + for col in self.data: + if col in self._variable_labels: + label = self._variable_labels[col] + if len(label) > 80: + raise ValueError("Variable labels must be 80 characters or fewer") + try: + encoded = label.encode(self._encoding) + except UnicodeEncodeError as err: + raise ValueError( + "Variable labels must contain only characters that " + f"can be encoded in {self._encoding}" + ) from err + + bio.write(_pad_bytes_new(encoded, vl_len + 1)) + else: + bio.write(blank) + self._write_bytes(self._tag(bio.getvalue(), "variable_labels")) + + def _write_characteristics(self) -> None: + self._update_map("characteristics") + self._write_bytes(self._tag(b"", "characteristics")) + + def _write_data(self, records) -> None: + self._update_map("data") + self._write_bytes(b"") + self._write_bytes(records.tobytes()) + self._write_bytes(b"") + + def _write_strls(self) -> None: + self._update_map("strls") + self._write_bytes(self._tag(self._strl_blob, "strls")) + + def _write_expansion_fields(self) -> None: + """No-op in dta 117+""" + + def _write_value_labels(self) -> None: + self._update_map("value_labels") + bio = BytesIO() + for vl in self._value_labels: + lab = vl.generate_value_label(self._byteorder) + lab = self._tag(lab, "lbl") + bio.write(lab) + self._write_bytes(self._tag(bio.getvalue(), "value_labels")) + + def _write_file_close_tag(self) -> None: + self._update_map("stata_data_close") + self._write_bytes(bytes("", "utf-8")) + self._update_map("end-of-file") + + def _update_strl_names(self) -> None: + """ + Update column names for conversion to strl if they might have been + changed to comply with Stata naming rules + """ + # Update convert_strl if names changed + for orig, new in self._converted_names.items(): + if orig in self._convert_strl: + idx = self._convert_strl.index(orig) + self._convert_strl[idx] = new + + def _convert_strls(self, data: DataFrame) -> DataFrame: + """ + Convert columns to StrLs if either very large or in the + convert_strl variable + """ + convert_cols = [ + col + for i, col in enumerate(data) + if self.typlist[i] == 32768 or col in self._convert_strl + ] + + if convert_cols: + ssw = StataStrLWriter(data, convert_cols, version=self._dta_version) + tab, new_data = ssw.generate_table() + data = new_data + self._strl_blob = ssw.generate_blob(tab) + return data + + def _set_formats_and_types(self, dtypes: Series) -> None: + self.typlist = [] + self.fmtlist = [] + for col, dtype in dtypes.items(): + force_strl = col in self._convert_strl + fmt = _dtype_to_default_stata_fmt( + dtype, + self.data[col], + dta_version=self._dta_version, + force_strl=force_strl, + ) + self.fmtlist.append(fmt) + self.typlist.append( + _dtype_to_stata_type_117(dtype, self.data[col], force_strl) + ) + + +class StataWriterUTF8(StataWriter117): + """ + Stata binary dta file writing in Stata 15 (118) and 16 (119) formats + + DTA 118 and 119 format files support unicode string data (both fixed + and strL) format. Unicode is also supported in value labels, variable + labels and the dataset label. Format 119 is automatically used if the + file contains more than 32,767 variables. + + Parameters + ---------- + fname : path (string), buffer or path object + string, path object (pathlib.Path or py._path.local.LocalPath) or + object implementing a binary write() functions. If using a buffer + then the buffer will not be automatically closed after the file + is written. + data : DataFrame + Input to save + convert_dates : dict, default None + Dictionary mapping columns containing datetime types to stata internal + format to use when writing the dates. Options are 'tc', 'td', 'tm', + 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. + Datetime columns that do not have a conversion type specified will be + converted to 'tc'. Raises NotImplementedError if a datetime column has + timezone information + write_index : bool, default True + Write the index to Stata dataset. + byteorder : str, default None + Can be ">", "<", "little", or "big". default is `sys.byteorder` + time_stamp : datetime, default None + A datetime to use as file creation date. Default is the current time + data_label : str, default None + A label for the data set. Must be 80 characters or smaller. + variable_labels : dict, default None + Dictionary containing columns as keys and variable labels as values. + Each label must be 80 characters or smaller. + convert_strl : list, default None + List of columns names to convert to Stata StrL format. Columns with + more than 2045 characters are automatically written as StrL. + Smaller columns can be converted by including the column name. Using + StrLs can reduce output file size when strings are longer than 8 + characters, and either frequently repeated or sparse. + version : int, default None + The dta version to use. By default, uses the size of data to determine + the version. 118 is used if data.shape[1] <= 32767, and 119 is used + for storing larger DataFrames. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + value_labels : dict of dicts + Dictionary containing columns as keys and dictionaries of column value + to labels as values. The combined length of all labels for a single + variable must be 32,000 characters or smaller. + + .. versionadded:: 1.4.0 + + Returns + ------- + StataWriterUTF8 + The instance has a write_file method, which will write the file to the + given `fname`. + + Raises + ------ + NotImplementedError + * If datetimes contain timezone information + ValueError + * Columns listed in convert_dates are neither datetime64[ns] + or datetime + * Column dtype is not representable in Stata + * Column listed in convert_dates is not in DataFrame + * Categorical label contains more than 32,000 characters + + Examples + -------- + Using Unicode data and column names + + >>> from pandas.io.stata import StataWriterUTF8 + >>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ']) + >>> writer = StataWriterUTF8('./data_file.dta', data) + >>> writer.write_file() + + Directly write a zip file + >>> compression = {"method": "zip", "archive_name": "data_file.dta"} + >>> writer = StataWriterUTF8('./data_file.zip', data, compression=compression) + >>> writer.write_file() + + Or with long strings stored in strl format + + >>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']], + ... columns=['strls']) + >>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data, + ... convert_strl=['strls']) + >>> writer.write_file() + """ + + _encoding: Literal["utf-8"] = "utf-8" + + def __init__( + self, + fname: FilePath | WriteBuffer[bytes], + data: DataFrame, + convert_dates: dict[Hashable, str] | None = None, + write_index: bool = True, + byteorder: str | None = None, + time_stamp: datetime | None = None, + data_label: str | None = None, + variable_labels: dict[Hashable, str] | None = None, + convert_strl: Sequence[Hashable] | None = None, + version: int | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + *, + value_labels: dict[Hashable, dict[float, str]] | None = None, + ) -> None: + if version is None: + version = 118 if data.shape[1] <= 32767 else 119 + elif version not in (118, 119): + raise ValueError("version must be either 118 or 119.") + elif version == 118 and data.shape[1] > 32767: + raise ValueError( + "You must use version 119 for data sets containing more than" + "32,767 variables" + ) + + super().__init__( + fname, + data, + convert_dates=convert_dates, + write_index=write_index, + byteorder=byteorder, + time_stamp=time_stamp, + data_label=data_label, + variable_labels=variable_labels, + value_labels=value_labels, + convert_strl=convert_strl, + compression=compression, + storage_options=storage_options, + ) + # Override version set in StataWriter117 init + self._dta_version = version + + def _validate_variable_name(self, name: str) -> str: + """ + Validate variable names for Stata export. + + Parameters + ---------- + name : str + Variable name + + Returns + ------- + str + The validated name with invalid characters replaced with + underscores. + + Notes + ----- + Stata 118+ support most unicode characters. The only limitation is in + the ascii range where the characters supported are a-z, A-Z, 0-9 and _. + """ + # High code points appear to be acceptable + for c in name: + if ( + ( + ord(c) < 128 + and (c < "A" or c > "Z") + and (c < "a" or c > "z") + and (c < "0" or c > "9") + and c != "_" + ) + or 128 <= ord(c) < 192 + or c in {"×", "÷"} # noqa: RUF001 + ): + name = name.replace(c, "_") + + return name diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/io/xml.py b/dbdpy-env/lib/python3.9/site-packages/pandas/io/xml.py new file mode 100644 index 00000000..918fe4d2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/io/xml.py @@ -0,0 +1,1149 @@ +""" +:mod:``pandas.io.xml`` is a module for reading XML. +""" + +from __future__ import annotations + +import io +from os import PathLike +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import warnings + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + AbstractMethodError, + ParserError, +) +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import is_list_like + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + file_exists, + get_handle, + infer_compression, + is_file_like, + is_fsspec_url, + is_url, + stringify_path, +) +from pandas.io.parsers import TextParser + +if TYPE_CHECKING: + from collections.abc import Sequence + from xml.etree.ElementTree import Element + + from lxml import etree + + from pandas._typing import ( + CompressionOptions, + ConvertersArg, + DtypeArg, + DtypeBackend, + FilePath, + ParseDatesArg, + ReadBuffer, + StorageOptions, + XMLParsers, + ) + + from pandas import DataFrame + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "path_or_buffer", +) +class _XMLFrameParser: + """ + Internal subclass to parse XML into DataFrames. + + Parameters + ---------- + path_or_buffer : a valid JSON ``str``, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. + + xpath : str or regex + The ``XPath`` expression to parse required set of nodes for + migration to :class:`~pandas.DataFrame`. ``etree`` supports limited ``XPath``. + + namespaces : dict + The namespaces defined in XML document (``xmlns:namespace='URI'``) + as dicts with key being namespace and value the URI. + + elems_only : bool + Parse only the child elements at the specified ``xpath``. + + attrs_only : bool + Parse only the attributes at the specified ``xpath``. + + names : list + Column names for :class:`~pandas.DataFrame`of parsed XML data. + + dtype : dict + Data type for data or columns. E.g. {{'a': np.float64, + 'b': np.int32, 'c': 'Int64'}} + + .. versionadded:: 1.5.0 + + converters : dict, optional + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels. + + .. versionadded:: 1.5.0 + + parse_dates : bool or list of int or names or list of lists or dict + Converts either index or select columns to datetimes + + .. versionadded:: 1.5.0 + + encoding : str + Encoding of xml object or document. + + stylesheet : str or file-like + URL, file, file-like object, or a raw string containing XSLT, + ``etree`` does not support XSLT but retained for consistency. + + iterparse : dict, optional + Dict with row element as key and list of descendant elements + and/or attributes as value to be retrieved in iterparsing of + XML document. + + .. versionadded:: 1.5.0 + + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + See also + -------- + pandas.io.xml._EtreeFrameParser + pandas.io.xml._LxmlFrameParser + + Notes + ----- + To subclass this class effectively you must override the following methods:` + * :func:`parse_data` + * :func:`_parse_nodes` + * :func:`_iterparse_nodes` + * :func:`_parse_doc` + * :func:`_validate_names` + * :func:`_validate_path` + + + See each method's respective documentation for details on their + functionality. + """ + + def __init__( + self, + path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], + xpath: str, + namespaces: dict[str, str] | None, + elems_only: bool, + attrs_only: bool, + names: Sequence[str] | None, + dtype: DtypeArg | None, + converters: ConvertersArg | None, + parse_dates: ParseDatesArg | None, + encoding: str | None, + stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None, + iterparse: dict[str, list[str]] | None, + compression: CompressionOptions, + storage_options: StorageOptions, + ) -> None: + self.path_or_buffer = path_or_buffer + self.xpath = xpath + self.namespaces = namespaces + self.elems_only = elems_only + self.attrs_only = attrs_only + self.names = names + self.dtype = dtype + self.converters = converters + self.parse_dates = parse_dates + self.encoding = encoding + self.stylesheet = stylesheet + self.iterparse = iterparse + self.is_style = None + self.compression: CompressionOptions = compression + self.storage_options = storage_options + + def parse_data(self) -> list[dict[str, str | None]]: + """ + Parse xml data. + + This method will call the other internal methods to + validate ``xpath``, names, parse and return specific nodes. + """ + + raise AbstractMethodError(self) + + def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]: + """ + Parse xml nodes. + + This method will parse the children and attributes of elements + in ``xpath``, conditionally for only elements, only attributes + or both while optionally renaming node names. + + Raises + ------ + ValueError + * If only elements and only attributes are specified. + + Notes + ----- + Namespace URIs will be removed from return node values. Also, + elements with missing children or attributes compared to siblings + will have optional keys filled with None values. + """ + + dicts: list[dict[str, str | None]] + + if self.elems_only and self.attrs_only: + raise ValueError("Either element or attributes can be parsed not both.") + if self.elems_only: + if self.names: + dicts = [ + { + **( + {el.tag: el.text} + if el.text and not el.text.isspace() + else {} + ), + **{ + nm: ch.text if ch.text else None + for nm, ch in zip(self.names, el.findall("*")) + }, + } + for el in elems + ] + else: + dicts = [ + {ch.tag: ch.text if ch.text else None for ch in el.findall("*")} + for el in elems + ] + + elif self.attrs_only: + dicts = [ + {k: v if v else None for k, v in el.attrib.items()} for el in elems + ] + + elif self.names: + dicts = [ + { + **el.attrib, + **({el.tag: el.text} if el.text and not el.text.isspace() else {}), + **{ + nm: ch.text if ch.text else None + for nm, ch in zip(self.names, el.findall("*")) + }, + } + for el in elems + ] + + else: + dicts = [ + { + **el.attrib, + **({el.tag: el.text} if el.text and not el.text.isspace() else {}), + **{ch.tag: ch.text if ch.text else None for ch in el.findall("*")}, + } + for el in elems + ] + + dicts = [ + {k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts + ] + + keys = list(dict.fromkeys([k for d in dicts for k in d.keys()])) + dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts] + + if self.names: + dicts = [dict(zip(self.names, d.values())) for d in dicts] + + return dicts + + def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]: + """ + Iterparse xml nodes. + + This method will read in local disk, decompressed XML files for elements + and underlying descendants using iterparse, a method to iterate through + an XML tree without holding entire XML tree in memory. + + Raises + ------ + TypeError + * If ``iterparse`` is not a dict or its dict value is not list-like. + ParserError + * If ``path_or_buffer`` is not a physical file on disk or file-like object. + * If no data is returned from selected items in ``iterparse``. + + Notes + ----- + Namespace URIs will be removed from return node values. Also, + elements with missing children or attributes in submitted list + will have optional keys filled with None values. + """ + + dicts: list[dict[str, str | None]] = [] + row: dict[str, str | None] | None = None + + if not isinstance(self.iterparse, dict): + raise TypeError( + f"{type(self.iterparse).__name__} is not a valid type for iterparse" + ) + + row_node = next(iter(self.iterparse.keys())) if self.iterparse else "" + if not is_list_like(self.iterparse[row_node]): + raise TypeError( + f"{type(self.iterparse[row_node])} is not a valid type " + "for value in iterparse" + ) + + if (not hasattr(self.path_or_buffer, "read")) and ( + not isinstance(self.path_or_buffer, (str, PathLike)) + or is_url(self.path_or_buffer) + or is_fsspec_url(self.path_or_buffer) + or ( + isinstance(self.path_or_buffer, str) + and self.path_or_buffer.startswith((" list[Any]: + """ + Validate ``xpath``. + + This method checks for syntax, evaluation, or empty nodes return. + + Raises + ------ + SyntaxError + * If xpah is not supported or issues with namespaces. + + ValueError + * If xpah does not return any nodes. + """ + + raise AbstractMethodError(self) + + def _validate_names(self) -> None: + """ + Validate names. + + This method will check if names is a list-like and aligns + with length of parse nodes. + + Raises + ------ + ValueError + * If value is not a list and less then length of nodes. + """ + raise AbstractMethodError(self) + + def _parse_doc( + self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str] + ) -> Element | etree._Element: + """ + Build tree from path_or_buffer. + + This method will parse XML object into tree + either from string/bytes or file location. + """ + raise AbstractMethodError(self) + + +class _EtreeFrameParser(_XMLFrameParser): + """ + Internal class to parse XML into DataFrames with the Python + standard library XML module: `xml.etree.ElementTree`. + """ + + def parse_data(self) -> list[dict[str, str | None]]: + from xml.etree.ElementTree import iterparse + + if self.stylesheet is not None: + raise ValueError( + "To use stylesheet, you need lxml installed and selected as parser." + ) + + if self.iterparse is None: + self.xml_doc = self._parse_doc(self.path_or_buffer) + elems = self._validate_path() + + self._validate_names() + + xml_dicts: list[dict[str, str | None]] = ( + self._parse_nodes(elems) + if self.iterparse is None + else self._iterparse_nodes(iterparse) + ) + + return xml_dicts + + def _validate_path(self) -> list[Any]: + """ + Notes + ----- + ``etree`` supports limited ``XPath``. If user attempts a more complex + expression syntax error will raise. + """ + + msg = ( + "xpath does not return any nodes or attributes. " + "Be sure to specify in `xpath` the parent nodes of " + "children and attributes to parse. " + "If document uses namespaces denoted with " + "xmlns, be sure to define namespaces and " + "use them in xpath." + ) + try: + elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces) + children = [ch for el in elems for ch in el.findall("*")] + attrs = {k: v for el in elems for k, v in el.attrib.items()} + + if elems is None: + raise ValueError(msg) + + if elems is not None: + if self.elems_only and children == []: + raise ValueError(msg) + if self.attrs_only and attrs == {}: + raise ValueError(msg) + if children == [] and attrs == {}: + raise ValueError(msg) + + except (KeyError, SyntaxError): + raise SyntaxError( + "You have used an incorrect or unsupported XPath " + "expression for etree library or you used an " + "undeclared namespace prefix." + ) + + return elems + + def _validate_names(self) -> None: + children: list[Any] + + if self.names: + if self.iterparse: + children = self.iterparse[next(iter(self.iterparse))] + else: + parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces) + children = parent.findall("*") if parent is not None else [] + + if is_list_like(self.names): + if len(self.names) < len(children): + raise ValueError( + "names does not match length of child elements in xpath." + ) + else: + raise TypeError( + f"{type(self.names).__name__} is not a valid type for names" + ) + + def _parse_doc( + self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str] + ) -> Element: + from xml.etree.ElementTree import ( + XMLParser, + parse, + ) + + handle_data = get_data_from_filepath( + filepath_or_buffer=raw_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + document = parse(xml_data, parser=curr_parser) + + return document.getroot() + + +class _LxmlFrameParser(_XMLFrameParser): + """ + Internal class to parse XML into :class:`~pandas.DataFrame` with third-party + full-featured XML library, ``lxml``, that supports + ``XPath`` 1.0 and XSLT 1.0. + """ + + def parse_data(self) -> list[dict[str, str | None]]: + """ + Parse xml data. + + This method will call the other internal methods to + validate ``xpath``, names, optionally parse and run XSLT, + and parse original or transformed XML and return specific nodes. + """ + from lxml.etree import iterparse + + if self.iterparse is None: + self.xml_doc = self._parse_doc(self.path_or_buffer) + + if self.stylesheet: + self.xsl_doc = self._parse_doc(self.stylesheet) + self.xml_doc = self._transform_doc() + + elems = self._validate_path() + + self._validate_names() + + xml_dicts: list[dict[str, str | None]] = ( + self._parse_nodes(elems) + if self.iterparse is None + else self._iterparse_nodes(iterparse) + ) + + return xml_dicts + + def _validate_path(self) -> list[Any]: + msg = ( + "xpath does not return any nodes or attributes. " + "Be sure to specify in `xpath` the parent nodes of " + "children and attributes to parse. " + "If document uses namespaces denoted with " + "xmlns, be sure to define namespaces and " + "use them in xpath." + ) + + elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces) + children = [ch for el in elems for ch in el.xpath("*")] + attrs = {k: v for el in elems for k, v in el.attrib.items()} + + if elems == []: + raise ValueError(msg) + + if elems != []: + if self.elems_only and children == []: + raise ValueError(msg) + if self.attrs_only and attrs == {}: + raise ValueError(msg) + if children == [] and attrs == {}: + raise ValueError(msg) + + return elems + + def _validate_names(self) -> None: + children: list[Any] + + if self.names: + if self.iterparse: + children = self.iterparse[next(iter(self.iterparse))] + else: + children = self.xml_doc.xpath( + self.xpath + "[1]/*", namespaces=self.namespaces + ) + + if is_list_like(self.names): + if len(self.names) < len(children): + raise ValueError( + "names does not match length of child elements in xpath." + ) + else: + raise TypeError( + f"{type(self.names).__name__} is not a valid type for names" + ) + + def _parse_doc( + self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str] + ) -> etree._Element: + from lxml.etree import ( + XMLParser, + fromstring, + parse, + ) + + handle_data = get_data_from_filepath( + filepath_or_buffer=raw_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + + if isinstance(xml_data, io.StringIO): + if self.encoding is None: + raise TypeError( + "Can not pass encoding None when input is StringIO." + ) + + document = fromstring( + xml_data.getvalue().encode(self.encoding), parser=curr_parser + ) + else: + document = parse(xml_data, parser=curr_parser) + + return document + + def _transform_doc(self) -> etree._XSLTResultTree: + """ + Transform original tree using stylesheet. + + This method will transform original xml using XSLT script into + am ideally flatter xml document for easier parsing and migration + to Data Frame. + """ + from lxml.etree import XSLT + + transformer = XSLT(self.xsl_doc) + new_doc = transformer(self.xml_doc) + + return new_doc + + +def get_data_from_filepath( + filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str], + encoding: str | None, + compression: CompressionOptions, + storage_options: StorageOptions, +) -> str | bytes | ReadBuffer[bytes] | ReadBuffer[str]: + """ + Extract raw XML data. + + The method accepts three input types: + 1. filepath (string-like) + 2. file-like object (e.g. open file object, StringIO) + 3. XML string or bytes + + This method turns (1) into (2) to simplify the rest of the processing. + It returns input types (2) and (3) unchanged. + """ + if not isinstance(filepath_or_buffer, bytes): + filepath_or_buffer = stringify_path(filepath_or_buffer) + + if ( + isinstance(filepath_or_buffer, str) + and not filepath_or_buffer.startswith((" io.StringIO | io.BytesIO: + """ + Convert extracted raw data. + + This method will return underlying data of extracted XML content. + The data either has a `read` attribute (e.g. a file object or a + StringIO/BytesIO) or is a string or bytes that is an XML document. + """ + + if isinstance(data, str): + data = io.StringIO(data) + + elif isinstance(data, bytes): + data = io.BytesIO(data) + + return data + + +def _data_to_frame(data, **kwargs) -> DataFrame: + """ + Convert parsed data to Data Frame. + + This method will bind xml dictionary data of keys and values + into named columns of Data Frame using the built-in TextParser + class that build Data Frame and infers specific dtypes. + """ + + tags = next(iter(data)) + nodes = [list(d.values()) for d in data] + + try: + with TextParser(nodes, names=tags, **kwargs) as tp: + return tp.read() + except ParserError: + raise ParserError( + "XML document may be too complex for import. " + "Try to flatten document and use distinct " + "element and attribute names." + ) + + +def _parse( + path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], + xpath: str, + namespaces: dict[str, str] | None, + elems_only: bool, + attrs_only: bool, + names: Sequence[str] | None, + dtype: DtypeArg | None, + converters: ConvertersArg | None, + parse_dates: ParseDatesArg | None, + encoding: str | None, + parser: XMLParsers, + stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None, + iterparse: dict[str, list[str]] | None, + compression: CompressionOptions, + storage_options: StorageOptions, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwargs, +) -> DataFrame: + """ + Call internal parsers. + + This method will conditionally call internal parsers: + LxmlFrameParser and/or EtreeParser. + + Raises + ------ + ImportError + * If lxml is not installed if selected as parser. + + ValueError + * If parser is not lxml or etree. + """ + + p: _EtreeFrameParser | _LxmlFrameParser + + if isinstance(path_or_buffer, str) and not any( + [ + is_file_like(path_or_buffer), + file_exists(path_or_buffer), + is_url(path_or_buffer), + is_fsspec_url(path_or_buffer), + ] + ): + warnings.warn( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if parser == "lxml": + lxml = import_optional_dependency("lxml.etree", errors="ignore") + + if lxml is not None: + p = _LxmlFrameParser( + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + dtype, + converters, + parse_dates, + encoding, + stylesheet, + iterparse, + compression, + storage_options, + ) + else: + raise ImportError("lxml not found, please install or use the etree parser.") + + elif parser == "etree": + p = _EtreeFrameParser( + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + dtype, + converters, + parse_dates, + encoding, + stylesheet, + iterparse, + compression, + storage_options, + ) + else: + raise ValueError("Values for parser can only be lxml or etree.") + + data_dicts = p.parse_data() + + return _data_to_frame( + data=data_dicts, + dtype=dtype, + converters=converters, + parse_dates=parse_dates, + dtype_backend=dtype_backend, + **kwargs, + ) + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "path_or_buffer", +) +def read_xml( + path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], + *, + xpath: str = "./*", + namespaces: dict[str, str] | None = None, + elems_only: bool = False, + attrs_only: bool = False, + names: Sequence[str] | None = None, + dtype: DtypeArg | None = None, + converters: ConvertersArg | None = None, + parse_dates: ParseDatesArg | None = None, + # encoding can not be None for lxml and StringIO input + encoding: str | None = "utf-8", + parser: XMLParsers = "lxml", + stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None, + iterparse: dict[str, list[str]] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame: + r""" + Read XML document into a :class:`~pandas.DataFrame` object. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a ``read()`` function. The string can be any valid XML + string or a path. The string can further be a URL. Valid URL schemes + include http, ftp, s3, and file. + + .. deprecated:: 2.1.0 + Passing xml literal strings is deprecated. + Wrap literal xml input in ``io.StringIO`` or ``io.BytesIO`` instead. + + xpath : str, optional, default './\*' + The ``XPath`` to parse required set of nodes for migration to + :class:`~pandas.DataFrame`.``XPath`` should return a collection of elements + and not a single element. Note: The ``etree`` parser supports limited ``XPath`` + expressions. For more complex ``XPath``, use ``lxml`` which requires + installation. + + namespaces : dict, optional + The namespaces defined in XML document as dicts with key being + namespace prefix and value the URI. There is no need to include all + namespaces in XML, only the ones used in ``xpath`` expression. + Note: if XML document uses default namespace denoted as + `xmlns=''` without a prefix, you must assign any temporary + namespace prefix such as 'doc' to the URI in order to parse + underlying nodes and/or attributes. For example, :: + + namespaces = {{"doc": "https://example.com"}} + + elems_only : bool, optional, default False + Parse only the child elements at the specified ``xpath``. By default, + all child elements and non-empty text nodes are returned. + + attrs_only : bool, optional, default False + Parse only the attributes at the specified ``xpath``. + By default, all attributes are returned. + + names : list-like, optional + Column names for DataFrame of parsed XML data. Use this parameter to + rename original element names and distinguish same named elements and + attributes. + + dtype : Type name or dict of column -> type, optional + Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32, + 'c': 'Int64'}} + Use `str` or `object` together with suitable `na_values` settings + to preserve and not interpret dtype. + If converters are specified, they will be applied INSTEAD + of dtype conversion. + + .. versionadded:: 1.5.0 + + converters : dict, optional + Dict of functions for converting values in certain columns. Keys can either + be integers or column labels. + + .. versionadded:: 1.5.0 + + parse_dates : bool or list of int or names or list of lists or dict, default False + Identifiers to parse index or columns to datetime. The behavior is as follows: + + * boolean. If True -> try parsing the index. + * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 + each as a separate date column. + * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as + a single date column. + * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call + result 'foo' + + .. versionadded:: 1.5.0 + + encoding : str, optional, default 'utf-8' + Encoding of XML document. + + parser : {{'lxml','etree'}}, default 'lxml' + Parser module to use for retrieval of data. Only 'lxml' and + 'etree' are supported. With 'lxml' more complex ``XPath`` searches + and ability to use XSLT stylesheet are supported. + + stylesheet : str, path object or file-like object + A URL, file-like object, or a raw string containing an XSLT script. + This stylesheet should flatten complex, deeply nested XML documents + for easier parsing. To use this feature you must have ``lxml`` module + installed and specify 'lxml' as ``parser``. The ``xpath`` must + reference nodes of transformed XML document generated after XSLT + transformation and not the original XML document. Only XSLT 1.0 + scripts and not later versions is currently supported. + + iterparse : dict, optional + The nodes or attributes to retrieve in iterparsing of XML document + as a dict with key being the name of repeating element and value being + list of elements or attribute names that are descendants of the repeated + element. Note: If this option is used, it will replace ``xpath`` parsing + and unlike ``xpath``, descendants do not need to relate to each other but can + exist any where in document under the repeating element. This memory- + efficient method should be used for very large XML files (500MB, 1GB, or 5GB+). + For example, :: + + iterparse = {{"row_element": ["child_elem", "attr", "grandchild_elem"]}} + + .. versionadded:: 1.5.0 + + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + df + A DataFrame. + + See Also + -------- + read_json : Convert a JSON string to pandas object. + read_html : Read HTML tables into a list of DataFrame objects. + + Notes + ----- + This method is best designed to import shallow XML documents in + following format which is the ideal fit for the two-dimensions of a + ``DataFrame`` (row by column). :: + + + + data + data + data + ... + + + ... + + ... + + + As a file format, XML documents can be designed any way including + layout of elements and attributes as long as it conforms to W3C + specifications. Therefore, this method is a convenience handler for + a specific flatter design and not all possible XML structures. + + However, for more complex XML documents, ``stylesheet`` allows you to + temporarily redesign original document with XSLT (a special purpose + language) for a flatter version for migration to a DataFrame. + + This function will *always* return a single :class:`DataFrame` or raise + exceptions due to issues with XML document, ``xpath``, or other + parameters. + + See the :ref:`read_xml documentation in the IO section of the docs + ` for more information in using this method to parse XML + files to DataFrames. + + Examples + -------- + >>> import io + >>> xml = ''' + ... + ... + ... square + ... 360 + ... 4.0 + ... + ... + ... circle + ... 360 + ... + ... + ... + ... triangle + ... 180 + ... 3.0 + ... + ... ''' + + >>> df = pd.read_xml(io.StringIO(xml)) + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + >>> xml = ''' + ... + ... + ... + ... + ... ''' + + >>> df = pd.read_xml(io.StringIO(xml), xpath=".//row") + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + >>> xml = ''' + ... + ... + ... square + ... 360 + ... 4.0 + ... + ... + ... circle + ... 360 + ... + ... + ... + ... triangle + ... 180 + ... 3.0 + ... + ... ''' + + >>> df = pd.read_xml(io.StringIO(xml), + ... xpath="//doc:row", + ... namespaces={{"doc": "https://example.com"}}) + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + """ + check_dtype_backend(dtype_backend) + + return _parse( + path_or_buffer=path_or_buffer, + xpath=xpath, + namespaces=namespaces, + elems_only=elems_only, + attrs_only=attrs_only, + names=names, + dtype=dtype, + converters=converters, + parse_dates=parse_dates, + encoding=encoding, + parser=parser, + stylesheet=stylesheet, + iterparse=iterparse, + compression=compression, + storage_options=storage_options, + dtype_backend=dtype_backend, + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/__init__.py new file mode 100644 index 00000000..55c861e3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/__init__.py @@ -0,0 +1,98 @@ +""" +Plotting public API. + +Authors of third-party plotting backends should implement a module with a +public ``plot(data, kind, **kwargs)``. The parameter `data` will contain +the data structure and can be a `Series` or a `DataFrame`. For example, +for ``df.plot()`` the parameter `data` will contain the DataFrame `df`. +In some cases, the data structure is transformed before being sent to +the backend (see PlotAccessor.__call__ in pandas/plotting/_core.py for +the exact transformations). + +The parameter `kind` will be one of: + +- line +- bar +- barh +- box +- hist +- kde +- area +- pie +- scatter +- hexbin + +See the pandas API reference for documentation on each kind of plot. + +Any other keyword argument is currently assumed to be backend specific, +but some parameters may be unified and added to the signature in the +future (e.g. `title` which should be useful for any backend). + +Currently, all the Matplotlib functions in pandas are accessed through +the selected backend. For example, `pandas.plotting.boxplot` (equivalent +to `DataFrame.boxplot`) is also accessed in the selected backend. This +is expected to change, and the exact API is under discussion. But with +the current version, backends are expected to implement the next functions: + +- plot (describe above, used for `Series.plot` and `DataFrame.plot`) +- hist_series and hist_frame (for `Series.hist` and `DataFrame.hist`) +- boxplot (`pandas.plotting.boxplot(df)` equivalent to `DataFrame.boxplot`) +- boxplot_frame and boxplot_frame_groupby +- register and deregister (register converters for the tick formats) +- Plots not called as `Series` and `DataFrame` methods: + - table + - andrews_curves + - autocorrelation_plot + - bootstrap_plot + - lag_plot + - parallel_coordinates + - radviz + - scatter_matrix + +Use the code in pandas/plotting/_matplotib.py and +https://github.com/pyviz/hvplot as a reference on how to write a backend. + +For the discussion about the API see +https://github.com/pandas-dev/pandas/issues/26747. +""" +from pandas.plotting._core import ( + PlotAccessor, + boxplot, + boxplot_frame, + boxplot_frame_groupby, + hist_frame, + hist_series, +) +from pandas.plotting._misc import ( + andrews_curves, + autocorrelation_plot, + bootstrap_plot, + deregister as deregister_matplotlib_converters, + lag_plot, + parallel_coordinates, + plot_params, + radviz, + register as register_matplotlib_converters, + scatter_matrix, + table, +) + +__all__ = [ + "PlotAccessor", + "boxplot", + "boxplot_frame", + "boxplot_frame_groupby", + "hist_frame", + "hist_series", + "scatter_matrix", + "radviz", + "andrews_curves", + "bootstrap_plot", + "parallel_coordinates", + "lag_plot", + "autocorrelation_plot", + "table", + "plot_params", + "register_matplotlib_converters", + "deregister_matplotlib_converters", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_core.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_core.py new file mode 100644 index 00000000..07c77ec4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_core.py @@ -0,0 +1,1949 @@ +from __future__ import annotations + +import importlib +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +from pandas._config import get_option + +from pandas.util._decorators import ( + Appender, + Substitution, +) + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core.base import PandasObject + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + import types + + from matplotlib.axes import Axes + import numpy as np + + from pandas._typing import IndexLabel + + from pandas import DataFrame + + +def hist_series( + self, + by=None, + ax=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + figsize: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, +): + """ + Draw histogram of the input series using matplotlib. + + Parameters + ---------- + by : object, optional + If passed, then used to form histograms for separate groups. + ax : matplotlib axis object + If not passed, uses gca(). + grid : bool, default True + Whether to show axis grid lines. + xlabelsize : int, default None + If specified changes the x-axis label size. + xrot : float, default None + Rotation of x axis labels. + ylabelsize : int, default None + If specified changes the y-axis label size. + yrot : float, default None + Rotation of y axis labels. + figsize : tuple, default None + Figure size in inches by default. + bins : int or sequence, default 10 + Number of histogram bins to be used. If an integer is given, bins + 1 + bin edges are calculated and returned. If bins is a sequence, gives + bin edges, including left edge of first bin and right edge of last + bin. In this case, bins is returned unmodified. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + legend : bool, default False + Whether to show the legend. + + **kwargs + To be passed to the actual plotting function. + + Returns + ------- + matplotlib.AxesSubplot + A histogram plot. + + See Also + -------- + matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. + + Examples + -------- + For Series: + + .. plot:: + :context: close-figs + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst) + >>> hist = ser.hist() + + For Groupby: + + .. plot:: + :context: close-figs + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst) + >>> hist = ser.groupby(level=0).hist() + """ + plot_backend = _get_plot_backend(backend) + return plot_backend.hist_series( + self, + by=by, + ax=ax, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + figsize=figsize, + bins=bins, + legend=legend, + **kwargs, + ) + + +def hist_frame( + data: DataFrame, + column: IndexLabel | None = None, + by=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + ax=None, + sharex: bool = False, + sharey: bool = False, + figsize: tuple[int, int] | None = None, + layout: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, +): + """ + Make a histogram of the DataFrame's columns. + + A `histogram`_ is a representation of the distribution of data. + This function calls :meth:`matplotlib.pyplot.hist`, on each series in + the DataFrame, resulting in one histogram per column. + + .. _histogram: https://en.wikipedia.org/wiki/Histogram + + Parameters + ---------- + data : DataFrame + The pandas object holding the data. + column : str or sequence, optional + If passed, will be used to limit data to a subset of columns. + by : object, optional + If passed, then used to form histograms for separate groups. + grid : bool, default True + Whether to show axis grid lines. + xlabelsize : int, default None + If specified changes the x-axis label size. + xrot : float, default None + Rotation of x axis labels. For example, a value of 90 displays the + x labels rotated 90 degrees clockwise. + ylabelsize : int, default None + If specified changes the y-axis label size. + yrot : float, default None + Rotation of y axis labels. For example, a value of 90 displays the + y labels rotated 90 degrees clockwise. + ax : Matplotlib axes object, default None + The axes to plot the histogram on. + sharex : bool, default True if ax is None else False + In case subplots=True, share x axis and set some x axis labels to + invisible; defaults to True if ax is None otherwise False if an ax + is passed in. + Note that passing in both an ax and sharex=True will alter all x axis + labels for all subplots in a figure. + sharey : bool, default False + In case subplots=True, share y axis and set some y axis labels to + invisible. + figsize : tuple, optional + The size in inches of the figure to create. Uses the value in + `matplotlib.rcParams` by default. + layout : tuple, optional + Tuple of (rows, columns) for the layout of the histograms. + bins : int or sequence, default 10 + Number of histogram bins to be used. If an integer is given, bins + 1 + bin edges are calculated and returned. If bins is a sequence, gives + bin edges, including left edge of first bin and right edge of last + bin. In this case, bins is returned unmodified. + + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + + legend : bool, default False + Whether to show the legend. + + **kwargs + All other plotting keyword arguments to be passed to + :meth:`matplotlib.pyplot.hist`. + + Returns + ------- + matplotlib.AxesSubplot or numpy.ndarray of them + + See Also + -------- + matplotlib.pyplot.hist : Plot a histogram using matplotlib. + + Examples + -------- + This example draws a histogram based on the length and width of + some animals, displayed in three bins + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({ + ... 'length': [1.5, 0.5, 1.2, 0.9, 3], + ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] + ... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) + >>> hist = df.hist(bins=3) + """ + plot_backend = _get_plot_backend(backend) + return plot_backend.hist_frame( + data, + column=column, + by=by, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + legend=legend, + bins=bins, + **kwargs, + ) + + +_boxplot_doc = """ +Make a box plot from DataFrame columns. + +Make a box-and-whisker plot from DataFrame columns, optionally grouped +by some other columns. A box plot is a method for graphically depicting +groups of numerical data through their quartiles. +The box extends from the Q1 to Q3 quartile values of the data, +with a line at the median (Q2). The whiskers extend from the edges +of box to show the range of the data. By default, they extend no more than +`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest +data point within that interval. Outliers are plotted as separate dots. + +For further details see +Wikipedia's entry for `boxplot `_. + +Parameters +---------- +%(data)s\ +column : str or list of str, optional + Column name or list of names, or vector. + Can be any valid input to :meth:`pandas.DataFrame.groupby`. +by : str or array-like, optional + Column in the DataFrame to :meth:`pandas.DataFrame.groupby`. + One box-plot will be done per value of columns in `by`. +ax : object of class matplotlib.axes.Axes, optional + The matplotlib axes to be used by boxplot. +fontsize : float or str + Tick label font size in points or as a string (e.g., `large`). +rot : float, default 0 + The rotation angle of labels (in degrees) + with respect to the screen coordinate system. +grid : bool, default True + Setting this to True will show the grid. +figsize : A tuple (width, height) in inches + The size of the figure to create in matplotlib. +layout : tuple (rows, columns), optional + For example, (3, 5) will display the subplots + using 3 rows and 5 columns, starting from the top-left. +return_type : {'axes', 'dict', 'both'} or None, default 'axes' + The kind of object to return. The default is ``axes``. + + * 'axes' returns the matplotlib axes the boxplot is drawn on. + * 'dict' returns a dictionary whose values are the matplotlib + Lines of the boxplot. + * 'both' returns a namedtuple with the axes and dict. + * when grouping with ``by``, a Series mapping columns to + ``return_type`` is returned. + + If ``return_type`` is `None`, a NumPy array + of axes with the same shape as ``layout`` is returned. +%(backend)s\ + +**kwargs + All other plotting keyword arguments to be passed to + :func:`matplotlib.pyplot.boxplot`. + +Returns +------- +result + See Notes. + +See Also +-------- +pandas.Series.plot.hist: Make a histogram. +matplotlib.pyplot.boxplot : Matplotlib equivalent plot. + +Notes +----- +The return type depends on the `return_type` parameter: + +* 'axes' : object of class matplotlib.axes.Axes +* 'dict' : dict of matplotlib.lines.Line2D objects +* 'both' : a namedtuple with structure (ax, lines) + +For data grouped with ``by``, return a Series of the above or a numpy +array: + +* :class:`~pandas.Series` +* :class:`~numpy.array` (for ``return_type = None``) + +Use ``return_type='dict'`` when you want to tweak the appearance +of the lines after plotting. In this case a dict containing the Lines +making up the boxes, caps, fliers, medians, and whiskers is returned. + +Examples +-------- + +Boxplots can be created for every column in the dataframe +by ``df.boxplot()`` or indicating the columns to be used: + +.. plot:: + :context: close-figs + + >>> np.random.seed(1234) + >>> df = pd.DataFrame(np.random.randn(10, 4), + ... columns=['Col1', 'Col2', 'Col3', 'Col4']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) # doctest: +SKIP + +Boxplots of variables distributions grouped by the values of a third +variable can be created using the option ``by``. For instance: + +.. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10, 2), + ... columns=['Col1', 'Col2']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> boxplot = df.boxplot(by='X') + +A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot +in order to group the data by combination of the variables in the x-axis: + +.. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10, 3), + ... columns=['Col1', 'Col2', 'Col3']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', + ... 'B', 'A', 'B', 'A', 'B']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y']) + +The layout of boxplot can be adjusted giving a tuple to ``layout``: + +.. plot:: + :context: close-figs + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... layout=(2, 1)) + +Additional formatting can be done to the boxplot, like suppressing the grid +(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``) +or changing the fontsize (i.e. ``fontsize=15``): + +.. plot:: + :context: close-figs + + >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) # doctest: +SKIP + +The parameter ``return_type`` can be used to select the type of element +returned by `boxplot`. When ``return_type='axes'`` is selected, +the matplotlib axes on which the boxplot is drawn are returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes') + >>> type(boxplot) + + +When grouping with ``by``, a Series mapping columns to ``return_type`` +is returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type='axes') + >>> type(boxplot) + + +If ``return_type`` is `None`, a NumPy array of axes with the same shape +as ``layout`` is returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type=None) + >>> type(boxplot) + +""" + +_backend_doc = """\ +backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. +""" + + +_bar_or_line_doc = """ + Parameters + ---------- + x : label or position, optional + Allows plotting of one column versus another. If not specified, + the index of the DataFrame is used. + y : label or position, optional + Allows plotting of one column versus another. If not specified, + all numerical columns are used. + color : str, array-like, or dict, optional + The color for each of the DataFrame's columns. Possible values are: + + - A single color string referred to by name, RGB or RGBA code, + for instance 'red' or '#a98d19'. + + - A sequence of color strings referred to by name, RGB or RGBA + code, which will be used for each column recursively. For + instance ['green','yellow'] each column's %(kind)s will be filled in + green or yellow, alternatively. If there is only a single column to + be plotted, then only the first color from the color list will be + used. + + - A dict of the form {column name : color}, so that each column will be + colored accordingly. For example, if your columns are called `a` and + `b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for + column `a` in green and %(kind)ss for column `b` in red. + + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or np.ndarray of them + An ndarray is returned with one :class:`matplotlib.axes.Axes` + per column when ``subplots=True``. +""" + + +@Substitution(data="data : DataFrame\n The data to visualize.\n", backend="") +@Appender(_boxplot_doc) +def boxplot( + data: DataFrame, + column: str | list[str] | None = None, + by: str | list[str] | None = None, + ax: Axes | None = None, + fontsize: float | str | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout: tuple[int, int] | None = None, + return_type: str | None = None, + **kwargs, +): + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.boxplot( + data, + column=column, + by=by, + ax=ax, + fontsize=fontsize, + rot=rot, + grid=grid, + figsize=figsize, + layout=layout, + return_type=return_type, + **kwargs, + ) + + +@Substitution(data="", backend=_backend_doc) +@Appender(_boxplot_doc) +def boxplot_frame( + self, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + backend=None, + **kwargs, +): + plot_backend = _get_plot_backend(backend) + return plot_backend.boxplot_frame( + self, + column=column, + by=by, + ax=ax, + fontsize=fontsize, + rot=rot, + grid=grid, + figsize=figsize, + layout=layout, + return_type=return_type, + **kwargs, + ) + + +def boxplot_frame_groupby( + grouped, + subplots: bool = True, + column=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + ax=None, + figsize: tuple[float, float] | None = None, + layout=None, + sharex: bool = False, + sharey: bool = True, + backend=None, + **kwargs, +): + """ + Make box plots from DataFrameGroupBy data. + + Parameters + ---------- + grouped : Grouped DataFrame + subplots : bool + * ``False`` - no subplots will be used + * ``True`` - create a subplot for each group. + + column : column name or list of names, or vector + Can be any valid input to groupby. + fontsize : float or str + rot : label rotation angle + grid : Setting this to True will show the grid + ax : Matplotlib axis object, default None + figsize : A tuple (width, height) in inches + layout : tuple (optional) + The layout of the plot: (rows, columns). + sharex : bool, default False + Whether x-axes will be shared among subplots. + sharey : bool, default True + Whether y-axes will be shared among subplots. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + **kwargs + All other plotting keyword arguments to be passed to + matplotlib's boxplot function. + + Returns + ------- + dict of key/value = group key/DataFrame.boxplot return value + or DataFrame.boxplot return value in case subplots=figures=False + + Examples + -------- + You can create boxplots for grouped data and show them as separate subplots: + + .. plot:: + :context: close-figs + + >>> import itertools + >>> tuples = [t for t in itertools.product(range(1000), range(4))] + >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) + >>> data = np.random.randn(len(index),4) + >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index) + >>> grouped = df.groupby(level='lvl1') + >>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10)) # doctest: +SKIP + + The ``subplots=False`` option shows the boxplots in a single figure. + + .. plot:: + :context: close-figs + + >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) # doctest: +SKIP + """ + plot_backend = _get_plot_backend(backend) + return plot_backend.boxplot_frame_groupby( + grouped, + subplots=subplots, + column=column, + fontsize=fontsize, + rot=rot, + grid=grid, + ax=ax, + figsize=figsize, + layout=layout, + sharex=sharex, + sharey=sharey, + **kwargs, + ) + + +class PlotAccessor(PandasObject): + """ + Make plots of Series or DataFrame. + + Uses the backend specified by the + option ``plotting.backend``. By default, matplotlib is used. + + Parameters + ---------- + data : Series or DataFrame + The object for which the method is called. + x : label or position, default None + Only used if data is a DataFrame. + y : label, position or list of label, positions, default None + Allows plotting of one column versus another. Only used if data is a + DataFrame. + kind : str + The kind of plot to produce: + + - 'line' : line plot (default) + - 'bar' : vertical bar plot + - 'barh' : horizontal bar plot + - 'hist' : histogram + - 'box' : boxplot + - 'kde' : Kernel Density Estimation plot + - 'density' : same as 'kde' + - 'area' : area plot + - 'pie' : pie plot + - 'scatter' : scatter plot (DataFrame only) + - 'hexbin' : hexbin plot (DataFrame only) + ax : matplotlib axes object, default None + An axes of the current figure. + subplots : bool or sequence of iterables, default False + Whether to group columns into subplots: + + - ``False`` : No subplots will be used + - ``True`` : Make separate subplots for each column. + - sequence of iterables of column labels: Create a subplot for each + group of columns. For example `[('a', 'c'), ('b', 'd')]` will + create 2 subplots: one with columns 'a' and 'c', and one + with columns 'b' and 'd'. Remaining columns that aren't specified + will be plotted in additional subplots (one per column). + + .. versionadded:: 1.5.0 + + sharex : bool, default True if ax is None else False + In case ``subplots=True``, share x axis and set some x axis labels + to invisible; defaults to True if ax is None otherwise False if + an ax is passed in; Be aware, that passing in both an ax and + ``sharex=True`` will alter all x axis labels for all axis in a figure. + sharey : bool, default False + In case ``subplots=True``, share y axis and set some y axis labels to invisible. + layout : tuple, optional + (rows, columns) for the layout of subplots. + figsize : a tuple (width, height) in inches + Size of a figure object. + use_index : bool, default True + Use index as ticks for x axis. + title : str or list + Title to use for the plot. If a string is passed, print the string + at the top of the figure. If a list is passed and `subplots` is + True, print each item in the list above the corresponding subplot. + grid : bool, default None (matlab style default) + Axis grid lines. + legend : bool or {'reverse'} + Place legend on axis subplots. + style : list or dict + The matplotlib line style per column. + logx : bool or 'sym', default False + Use log scaling or symlog scaling on x axis. + + logy : bool or 'sym' default False + Use log scaling or symlog scaling on y axis. + + loglog : bool or 'sym', default False + Use log scaling or symlog scaling on both x and y axes. + + xticks : sequence + Values to use for the xticks. + yticks : sequence + Values to use for the yticks. + xlim : 2-tuple/list + Set the x limits of the current axes. + ylim : 2-tuple/list + Set the y limits of the current axes. + xlabel : label, optional + Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the + x-column name for planar plots. + + .. versionchanged:: 1.2.0 + + Now applicable to planar plots (`scatter`, `hexbin`). + + .. versionchanged:: 2.0.0 + + Now applicable to histograms. + + ylabel : label, optional + Name to use for the ylabel on y-axis. Default will show no ylabel, or the + y-column name for planar plots. + + .. versionchanged:: 1.2.0 + + Now applicable to planar plots (`scatter`, `hexbin`). + + .. versionchanged:: 2.0.0 + + Now applicable to histograms. + + rot : float, default None + Rotation for ticks (xticks for vertical, yticks for horizontal + plots). + fontsize : float, default None + Font size for xticks and yticks. + colormap : str or matplotlib colormap object, default None + Colormap to select colors from. If string, load colormap with that + name from matplotlib. + colorbar : bool, optional + If True, plot colorbar (only relevant for 'scatter' and 'hexbin' + plots). + position : float + Specify relative alignments for bar plot layout. + From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 + (center). + table : bool, Series or DataFrame, default False + If True, draw a table using the data in the DataFrame and the data + will be transposed to meet matplotlib's default layout. + If a Series or DataFrame is passed, use passed data to draw a + table. + yerr : DataFrame, Series, array-like, dict and str + See :ref:`Plotting with Error Bars ` for + detail. + xerr : DataFrame, Series, array-like, dict and str + Equivalent to yerr. + stacked : bool, default False in line and bar plots, and True in area plot + If True, create stacked plot. + secondary_y : bool or sequence, default False + Whether to plot on the secondary y-axis if a list/tuple, which + columns to plot on secondary y-axis. + mark_right : bool, default True + When using a secondary_y axis, automatically mark the column + labels with "(right)" in the legend. + include_bool : bool, default is False + If True, boolean values can be plotted. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + :class:`matplotlib.axes.Axes` or numpy.ndarray of them + If the backend is not the default matplotlib one, the return value + will be the object returned by the backend. + + Notes + ----- + - See matplotlib documentation online for more on this subject + - If `kind` = 'bar' or 'barh', you can specify relative alignments + for bar plot layout by `position` keyword. + From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 + (center) + + Examples + -------- + For Series: + + .. plot:: + :context: close-figs + + >>> ser = pd.Series([1, 2, 3, 3]) + >>> plot = ser.plot(kind='hist', title="My plot") + + For DataFrame: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'length': [1.5, 0.5, 1.2, 0.9, 3], + ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]}, + ... index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) + >>> plot = df.plot(title="DataFrame Plot") + + For SeriesGroupBy: + + .. plot:: + :context: close-figs + + >>> lst = [-1, -2, -3, 1, 2, 3] + >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst) + >>> plot = ser.groupby(lambda x: x > 0).plot(title="SeriesGroupBy Plot") + + For DataFrameGroupBy: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({"col1" : [1, 2, 3, 4], + ... "col2" : ["A", "B", "A", "B"]}) + >>> plot = df.groupby("col2").plot(kind="bar", title="DataFrameGroupBy Plot") + """ + + _common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box") + _series_kinds = ("pie",) + _dataframe_kinds = ("scatter", "hexbin") + _kind_aliases = {"density": "kde"} + _all_kinds = _common_kinds + _series_kinds + _dataframe_kinds + + def __init__(self, data) -> None: + self._parent = data + + @staticmethod + def _get_call_args(backend_name: str, data, args, kwargs): + """ + This function makes calls to this accessor `__call__` method compatible + with the previous `SeriesPlotMethods.__call__` and + `DataFramePlotMethods.__call__`. Those had slightly different + signatures, since `DataFramePlotMethods` accepted `x` and `y` + parameters. + """ + if isinstance(data, ABCSeries): + arg_def = [ + ("kind", "line"), + ("ax", None), + ("figsize", None), + ("use_index", True), + ("title", None), + ("grid", None), + ("legend", False), + ("style", None), + ("logx", False), + ("logy", False), + ("loglog", False), + ("xticks", None), + ("yticks", None), + ("xlim", None), + ("ylim", None), + ("rot", None), + ("fontsize", None), + ("colormap", None), + ("table", False), + ("yerr", None), + ("xerr", None), + ("label", None), + ("secondary_y", False), + ("xlabel", None), + ("ylabel", None), + ] + elif isinstance(data, ABCDataFrame): + arg_def = [ + ("x", None), + ("y", None), + ("kind", "line"), + ("ax", None), + ("subplots", False), + ("sharex", None), + ("sharey", False), + ("layout", None), + ("figsize", None), + ("use_index", True), + ("title", None), + ("grid", None), + ("legend", True), + ("style", None), + ("logx", False), + ("logy", False), + ("loglog", False), + ("xticks", None), + ("yticks", None), + ("xlim", None), + ("ylim", None), + ("rot", None), + ("fontsize", None), + ("colormap", None), + ("table", False), + ("yerr", None), + ("xerr", None), + ("secondary_y", False), + ("xlabel", None), + ("ylabel", None), + ] + else: + raise TypeError( + f"Called plot accessor for type {type(data).__name__}, " + "expected Series or DataFrame" + ) + + if args and isinstance(data, ABCSeries): + positional_args = str(args)[1:-1] + keyword_args = ", ".join( + [f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)] + ) + msg = ( + "`Series.plot()` should not be called with positional " + "arguments, only keyword arguments. The order of " + "positional arguments will change in the future. " + f"Use `Series.plot({keyword_args})` instead of " + f"`Series.plot({positional_args})`." + ) + raise TypeError(msg) + + pos_args = {name: value for (name, _), value in zip(arg_def, args)} + if backend_name == "pandas.plotting._matplotlib": + kwargs = dict(arg_def, **pos_args, **kwargs) + else: + kwargs = dict(pos_args, **kwargs) + + x = kwargs.pop("x", None) + y = kwargs.pop("y", None) + kind = kwargs.pop("kind", "line") + return x, y, kind, kwargs + + def __call__(self, *args, **kwargs): + plot_backend = _get_plot_backend(kwargs.pop("backend", None)) + + x, y, kind, kwargs = self._get_call_args( + plot_backend.__name__, self._parent, args, kwargs + ) + + kind = self._kind_aliases.get(kind, kind) + + # when using another backend, get out of the way + if plot_backend.__name__ != "pandas.plotting._matplotlib": + return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs) + + if kind not in self._all_kinds: + raise ValueError(f"{kind} is not a valid plot kind") + + # The original data structured can be transformed before passed to the + # backend. For example, for DataFrame is common to set the index as the + # `x` parameter, and return a Series with the parameter `y` as values. + data = self._parent.copy() + + if isinstance(data, ABCSeries): + kwargs["reuse_plot"] = True + + if kind in self._dataframe_kinds: + if isinstance(data, ABCDataFrame): + return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs) + else: + raise ValueError(f"plot kind {kind} can only be used for data frames") + elif kind in self._series_kinds: + if isinstance(data, ABCDataFrame): + if y is None and kwargs.get("subplots") is False: + raise ValueError( + f"{kind} requires either y column or 'subplots=True'" + ) + if y is not None: + if is_integer(y) and not data.columns._holds_integer(): + y = data.columns[y] + # converted to series actually. copy to not modify + data = data[y].copy() + data.index.name = y + elif isinstance(data, ABCDataFrame): + data_cols = data.columns + if x is not None: + if is_integer(x) and not data.columns._holds_integer(): + x = data_cols[x] + elif not isinstance(data[x], ABCSeries): + raise ValueError("x must be a label or position") + data = data.set_index(x) + if y is not None: + # check if we have y as int or list of ints + int_ylist = is_list_like(y) and all(is_integer(c) for c in y) + int_y_arg = is_integer(y) or int_ylist + if int_y_arg and not data.columns._holds_integer(): + y = data_cols[y] + + label_kw = kwargs["label"] if "label" in kwargs else False + for kw in ["xerr", "yerr"]: + if kw in kwargs and ( + isinstance(kwargs[kw], str) or is_integer(kwargs[kw]) + ): + try: + kwargs[kw] = data[kwargs[kw]] + except (IndexError, KeyError, TypeError): + pass + + # don't overwrite + data = data[y].copy() + + if isinstance(data, ABCSeries): + label_name = label_kw or y + data.name = label_name + else: + match = is_list_like(label_kw) and len(label_kw) == len(y) + if label_kw and not match: + raise ValueError( + "label should be list-like and same length as y" + ) + label_name = label_kw or data.columns + data.columns = label_name + + return plot_backend.plot(data, kind=kind, **kwargs) + + __call__.__doc__ = __doc__ + + @Appender( + """ + See Also + -------- + matplotlib.pyplot.plot : Plot y versus x as lines and/or markers. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> s = pd.Series([1, 3, 2]) + >>> s.plot.line() # doctest: +SKIP + + .. plot:: + :context: close-figs + + The following example shows the populations for some animals + over the years. + + >>> df = pd.DataFrame({ + ... 'pig': [20, 18, 489, 675, 1776], + ... 'horse': [4, 25, 281, 600, 1900] + ... }, index=[1990, 1997, 2003, 2009, 2014]) + >>> lines = df.plot.line() + + .. plot:: + :context: close-figs + + An example with subplots, so an array of axes is returned. + + >>> axes = df.plot.line(subplots=True) + >>> type(axes) + + + .. plot:: + :context: close-figs + + Let's repeat the same example, but specifying colors for + each column (in this case, for each animal). + + >>> axes = df.plot.line( + ... subplots=True, color={"pig": "pink", "horse": "#742802"} + ... ) + + .. plot:: + :context: close-figs + + The following example shows the relationship between both + populations. + + >>> lines = df.plot.line(x='pig', y='horse') + """ + ) + @Substitution(kind="line") + @Appender(_bar_or_line_doc) + def line( + self, x: Hashable | None = None, y: Hashable | None = None, **kwargs + ) -> PlotAccessor: + """ + Plot Series or DataFrame as lines. + + This function is useful to plot lines using DataFrame's values + as coordinates. + """ + return self(kind="line", x=x, y=y, **kwargs) + + @Appender( + """ + See Also + -------- + DataFrame.plot.barh : Horizontal bar plot. + DataFrame.plot : Make plots of a DataFrame. + matplotlib.pyplot.bar : Make a bar plot with matplotlib. + + Examples + -------- + Basic plot. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]}) + >>> ax = df.plot.bar(x='lab', y='val', rot=0) + + Plot a whole dataframe to a bar plot. Each column is assigned a + distinct color, and each row is nested in a group along the + horizontal axis. + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.bar(rot=0) + + Plot stacked bar charts for the DataFrame + + .. plot:: + :context: close-figs + + >>> ax = df.plot.bar(stacked=True) + + Instead of nesting, the figure can be split by column with + ``subplots=True``. In this case, a :class:`numpy.ndarray` of + :class:`matplotlib.axes.Axes` are returned. + + .. plot:: + :context: close-figs + + >>> axes = df.plot.bar(rot=0, subplots=True) + >>> axes[1].legend(loc=2) # doctest: +SKIP + + If you don't like the default colours, you can specify how you'd + like each column to be colored. + + .. plot:: + :context: close-figs + + >>> axes = df.plot.bar( + ... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"} + ... ) + >>> axes[1].legend(loc=2) # doctest: +SKIP + + Plot a single column. + + .. plot:: + :context: close-figs + + >>> ax = df.plot.bar(y='speed', rot=0) + + Plot only selected categories for the DataFrame. + + .. plot:: + :context: close-figs + + >>> ax = df.plot.bar(x='lifespan', rot=0) + """ + ) + @Substitution(kind="bar") + @Appender(_bar_or_line_doc) + def bar( # pylint: disable=disallowed-name + self, x: Hashable | None = None, y: Hashable | None = None, **kwargs + ) -> PlotAccessor: + """ + Vertical bar plot. + + A bar plot is a plot that presents categorical data with + rectangular bars with lengths proportional to the values that they + represent. A bar plot shows comparisons among discrete categories. One + axis of the plot shows the specific categories being compared, and the + other axis represents a measured value. + """ + return self(kind="bar", x=x, y=y, **kwargs) + + @Appender( + """ + See Also + -------- + DataFrame.plot.bar: Vertical bar plot. + DataFrame.plot : Make plots of DataFrame using matplotlib. + matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib. + + Examples + -------- + Basic example + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) + >>> ax = df.plot.barh(x='lab', y='val') + + Plot a whole DataFrame to a horizontal bar plot + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.barh() + + Plot stacked barh charts for the DataFrame + + .. plot:: + :context: close-figs + + >>> ax = df.plot.barh(stacked=True) + + We can specify colors for each column + + .. plot:: + :context: close-figs + + >>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"}) + + Plot a column of the DataFrame to a horizontal bar plot + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.barh(y='speed') + + Plot DataFrame versus the desired column + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.barh(x='lifespan') + """ + ) + @Substitution(kind="bar") + @Appender(_bar_or_line_doc) + def barh( + self, x: Hashable | None = None, y: Hashable | None = None, **kwargs + ) -> PlotAccessor: + """ + Make a horizontal bar plot. + + A horizontal bar plot is a plot that presents quantitative data with + rectangular bars with lengths proportional to the values that they + represent. A bar plot shows comparisons among discrete categories. One + axis of the plot shows the specific categories being compared, and the + other axis represents a measured value. + """ + return self(kind="barh", x=x, y=y, **kwargs) + + def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor: + r""" + Make a box plot of the DataFrame columns. + + A box plot is a method for graphically depicting groups of numerical + data through their quartiles. + The box extends from the Q1 to Q3 quartile values of the data, + with a line at the median (Q2). The whiskers extend from the edges + of box to show the range of the data. The position of the whiskers + is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the + box. Outlier points are those past the end of the whiskers. + + For further details see Wikipedia's + entry for `boxplot `__. + + A consideration when using this chart is that the box and the whiskers + can overlap, which is very common when plotting small sets of data. + + Parameters + ---------- + by : str or sequence + Column in the DataFrame to group by. + + .. versionchanged:: 1.4.0 + + Previously, `by` is silently ignore and makes no groupings + + **kwargs + Additional keywords are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + :class:`matplotlib.axes.Axes` or numpy.ndarray of them + + See Also + -------- + DataFrame.boxplot: Another method to draw a box plot. + Series.plot.box: Draw a box plot from a Series object. + matplotlib.pyplot.boxplot: Draw a box plot in matplotlib. + + Examples + -------- + Draw a box plot from a DataFrame with four columns of randomly + generated data. + + .. plot:: + :context: close-figs + + >>> data = np.random.randn(25, 4) + >>> df = pd.DataFrame(data, columns=list('ABCD')) + >>> ax = df.plot.box() + + You can also generate groupings if you specify the `by` parameter (which + can take a column name, or a list or tuple of column names): + + .. versionchanged:: 1.4.0 + + .. plot:: + :context: close-figs + + >>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85] + >>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list}) + >>> ax = df.plot.box(column="age", by="gender", figsize=(10, 8)) + """ + return self(kind="box", by=by, **kwargs) + + def hist( + self, by: IndexLabel | None = None, bins: int = 10, **kwargs + ) -> PlotAccessor: + """ + Draw one histogram of the DataFrame's columns. + + A histogram is a representation of the distribution of data. + This function groups the values of all given Series in the DataFrame + into bins and draws all bins in one :class:`matplotlib.axes.Axes`. + This is useful when the DataFrame's Series are in a similar scale. + + Parameters + ---------- + by : str or sequence, optional + Column in the DataFrame to group by. + + .. versionchanged:: 1.4.0 + + Previously, `by` is silently ignore and makes no groupings + + bins : int, default 10 + Number of histogram bins to be used. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + class:`matplotlib.AxesSubplot` + Return a histogram plot. + + See Also + -------- + DataFrame.hist : Draw histograms per DataFrame's Series. + Series.hist : Draw a histogram with Series' data. + + Examples + -------- + When we roll a die 6000 times, we expect to get each value around 1000 + times. But when we roll two dice and sum the result, the distribution + is going to be quite different. A histogram illustrates those + distributions. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame( + ... np.random.randint(1, 7, 6000), + ... columns = ['one']) + >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) + >>> ax = df.plot.hist(bins=12, alpha=0.5) + + A grouped histogram can be generated by providing the parameter `by` (which + can be a column name, or a list of column names): + + .. plot:: + :context: close-figs + + >>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85] + >>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list}) + >>> ax = df.plot.hist(column=["age"], by="gender", figsize=(10, 8)) + """ + return self(kind="hist", by=by, bins=bins, **kwargs) + + def kde( + self, + bw_method: Literal["scott", "silverman"] | float | Callable | None = None, + ind: np.ndarray | int | None = None, + **kwargs, + ) -> PlotAccessor: + """ + Generate Kernel Density Estimate plot using Gaussian kernels. + + In statistics, `kernel density estimation`_ (KDE) is a non-parametric + way to estimate the probability density function (PDF) of a random + variable. This function uses Gaussian kernels and includes automatic + bandwidth determination. + + .. _kernel density estimation: + https://en.wikipedia.org/wiki/Kernel_density_estimation + + Parameters + ---------- + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. + If None (default), 'scott' is used. + See :class:`scipy.stats.gaussian_kde` for more information. + ind : NumPy array or int, optional + Evaluation points for the estimated PDF. If None (default), + 1000 equally spaced points are used. If `ind` is a NumPy array, the + KDE is evaluated at the points passed. If `ind` is an integer, + `ind` number of equally spaced points are used. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or numpy.ndarray of them + + See Also + -------- + scipy.stats.gaussian_kde : Representation of a kernel-density + estimate using Gaussian kernels. This is the function used + internally to estimate the PDF. + + Examples + -------- + Given a Series of points randomly sampled from an unknown + distribution, estimate its PDF using KDE with automatic + bandwidth determination and plot the results, evaluating them at + 1000 equally spaced points (default): + + .. plot:: + :context: close-figs + + >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5]) + >>> ax = s.plot.kde() + + A scalar bandwidth can be specified. Using a small bandwidth value can + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(bw_method=0.3) + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(bw_method=3) + + Finally, the `ind` parameter determines the evaluation points for the + plot of the estimated PDF: + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5]) + + For DataFrame, it works in the same way: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({ + ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], + ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], + ... }) + >>> ax = df.plot.kde() + + A scalar bandwidth can be specified. Using a small bandwidth value can + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.kde(bw_method=0.3) + + .. plot:: + :context: close-figs + + >>> ax = df.plot.kde(bw_method=3) + + Finally, the `ind` parameter determines the evaluation points for the + plot of the estimated PDF: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6]) + """ + return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs) + + density = kde + + def area( + self, + x: Hashable | None = None, + y: Hashable | None = None, + stacked: bool = True, + **kwargs, + ) -> PlotAccessor: + """ + Draw a stacked area plot. + + An area plot displays quantitative data visually. + This function wraps the matplotlib area function. + + Parameters + ---------- + x : label or position, optional + Coordinates for the X axis. By default uses the index. + y : label or position, optional + Column to plot. By default uses all columns. + stacked : bool, default True + Area plots are stacked by default. Set to False to create a + unstacked plot. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or numpy.ndarray + Area plot, or array of area plots if subplots is True. + + See Also + -------- + DataFrame.plot : Make plots of DataFrame using matplotlib / pylab. + + Examples + -------- + Draw an area plot based on basic business metrics: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({ + ... 'sales': [3, 2, 3, 9, 10, 6], + ... 'signups': [5, 5, 6, 12, 14, 13], + ... 'visits': [20, 42, 28, 62, 81, 50], + ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01', + ... freq='M')) + >>> ax = df.plot.area() + + Area plots are stacked by default. To produce an unstacked plot, + pass ``stacked=False``: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.area(stacked=False) + + Draw an area plot for a single column: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.area(y='sales') + + Draw with a different `x`: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({ + ... 'sales': [3, 2, 3], + ... 'visits': [20, 42, 28], + ... 'day': [1, 2, 3], + ... }) + >>> ax = df.plot.area(x='day') + """ + return self(kind="area", x=x, y=y, stacked=stacked, **kwargs) + + def pie(self, **kwargs) -> PlotAccessor: + """ + Generate a pie plot. + + A pie plot is a proportional representation of the numerical data in a + column. This function wraps :meth:`matplotlib.pyplot.pie` for the + specified column. If no column reference is passed and + ``subplots=True`` a pie plot is drawn for each numerical column + independently. + + Parameters + ---------- + y : int or label, optional + Label or position of the column to plot. + If not provided, ``subplots=True`` argument must be passed. + **kwargs + Keyword arguments to pass on to :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or np.ndarray of them + A NumPy array is returned when `subplots` is True. + + See Also + -------- + Series.plot.pie : Generate a pie plot for a Series. + DataFrame.plot : Make plots of a DataFrame. + + Examples + -------- + In the example below we have a DataFrame with the information about + planet's mass and radius. We pass the 'mass' column to the + pie function to get a pie plot. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97], + ... 'radius': [2439.7, 6051.8, 6378.1]}, + ... index=['Mercury', 'Venus', 'Earth']) + >>> plot = df.plot.pie(y='mass', figsize=(5, 5)) + + .. plot:: + :context: close-figs + + >>> plot = df.plot.pie(subplots=True, figsize=(11, 6)) + """ + if ( + isinstance(self._parent, ABCDataFrame) + and kwargs.get("y", None) is None + and not kwargs.get("subplots", False) + ): + raise ValueError("pie requires either y column or 'subplots=True'") + return self(kind="pie", **kwargs) + + def scatter( + self, + x: Hashable, + y: Hashable, + s: Hashable | Sequence[Hashable] | None = None, + c: Hashable | Sequence[Hashable] | None = None, + **kwargs, + ) -> PlotAccessor: + """ + Create a scatter plot with varying marker point size and color. + + The coordinates of each point are defined by two dataframe columns and + filled circles are used to represent each point. This kind of plot is + useful to see complex correlations between two variables. Points could + be for instance natural 2D coordinates like longitude and latitude in + a map or, in general, any pair of metrics that can be plotted against + each other. + + Parameters + ---------- + x : int or str + The column name or column position to be used as horizontal + coordinates for each point. + y : int or str + The column name or column position to be used as vertical + coordinates for each point. + s : str, scalar or array-like, optional + The size of each point. Possible values are: + + - A string with the name of the column to be used for marker's size. + + - A single scalar so all points have the same size. + + - A sequence of scalars, which will be used for each point's size + recursively. For instance, when passing [2,14] all points size + will be either 2 or 14, alternatively. + + c : str, int or array-like, optional + The color of each point. Possible values are: + + - A single color string referred to by name, RGB or RGBA code, + for instance 'red' or '#a98d19'. + + - A sequence of color strings referred to by name, RGB or RGBA + code, which will be used for each point's color recursively. For + instance ['green','yellow'] all points will be filled in green or + yellow, alternatively. + + - A column name or position whose values will be used to color the + marker points according to a colormap. + + **kwargs + Keyword arguments to pass on to :meth:`DataFrame.plot`. + + Returns + ------- + :class:`matplotlib.axes.Axes` or numpy.ndarray of them + + See Also + -------- + matplotlib.pyplot.scatter : Scatter plot using multiple input data + formats. + + Examples + -------- + Let's see how to draw a scatter plot using coordinates from the values + in a DataFrame's columns. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1], + ... [6.4, 3.2, 1], [5.9, 3.0, 2]], + ... columns=['length', 'width', 'species']) + >>> ax1 = df.plot.scatter(x='length', + ... y='width', + ... c='DarkBlue') + + And now with the color determined by a column as well. + + .. plot:: + :context: close-figs + + >>> ax2 = df.plot.scatter(x='length', + ... y='width', + ... c='species', + ... colormap='viridis') + """ + return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs) + + def hexbin( + self, + x: Hashable, + y: Hashable, + C: Hashable | None = None, + reduce_C_function: Callable | None = None, + gridsize: int | tuple[int, int] | None = None, + **kwargs, + ) -> PlotAccessor: + """ + Generate a hexagonal binning plot. + + Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None` + (the default), this is a histogram of the number of occurrences + of the observations at ``(x[i], y[i])``. + + If `C` is specified, specifies values at given coordinates + ``(x[i], y[i])``. These values are accumulated for each hexagonal + bin and then reduced according to `reduce_C_function`, + having as default the NumPy's mean function (:meth:`numpy.mean`). + (If `C` is specified, it must also be a 1-D sequence + of the same length as `x` and `y`, or a column label.) + + Parameters + ---------- + x : int or str + The column label or position for x points. + y : int or str + The column label or position for y points. + C : int or str, optional + The column label or position for the value of `(x, y)` point. + reduce_C_function : callable, default `np.mean` + Function of one argument that reduces all the values in a bin to + a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`). + gridsize : int or tuple of (int, int), default 100 + The number of hexagons in the x-direction. + The corresponding number of hexagons in the y-direction is + chosen in a way that the hexagons are approximately regular. + Alternatively, gridsize can be a tuple with two elements + specifying the number of hexagons in the x-direction and the + y-direction. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.AxesSubplot + The matplotlib ``Axes`` on which the hexbin is plotted. + + See Also + -------- + DataFrame.plot : Make plots of a DataFrame. + matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib, + the matplotlib function that is used under the hood. + + Examples + -------- + The following examples are generated with random data from + a normal distribution. + + .. plot:: + :context: close-figs + + >>> n = 10000 + >>> df = pd.DataFrame({'x': np.random.randn(n), + ... 'y': np.random.randn(n)}) + >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20) + + The next example uses `C` and `np.sum` as `reduce_C_function`. + Note that `'observations'` values ranges from 1 to 5 but the result + plot shows values up to more than 25. This is because of the + `reduce_C_function`. + + .. plot:: + :context: close-figs + + >>> n = 500 + >>> df = pd.DataFrame({ + ... 'coord_x': np.random.uniform(-3, 3, size=n), + ... 'coord_y': np.random.uniform(30, 50, size=n), + ... 'observations': np.random.randint(1,5, size=n) + ... }) + >>> ax = df.plot.hexbin(x='coord_x', + ... y='coord_y', + ... C='observations', + ... reduce_C_function=np.sum, + ... gridsize=10, + ... cmap="viridis") + """ + if reduce_C_function is not None: + kwargs["reduce_C_function"] = reduce_C_function + if gridsize is not None: + kwargs["gridsize"] = gridsize + + return self(kind="hexbin", x=x, y=y, C=C, **kwargs) + + +_backends: dict[str, types.ModuleType] = {} + + +def _load_backend(backend: str) -> types.ModuleType: + """ + Load a pandas plotting backend. + + Parameters + ---------- + backend : str + The identifier for the backend. Either an entrypoint item registered + with importlib.metadata, "matplotlib", or a module name. + + Returns + ------- + types.ModuleType + The imported backend. + """ + from importlib.metadata import entry_points + + if backend == "matplotlib": + # Because matplotlib is an optional dependency and first-party backend, + # we need to attempt an import here to raise an ImportError if needed. + try: + module = importlib.import_module("pandas.plotting._matplotlib") + except ImportError: + raise ImportError( + "matplotlib is required for plotting when the " + 'default backend "matplotlib" is selected.' + ) from None + return module + + found_backend = False + + eps = entry_points() + key = "pandas_plotting_backends" + # entry_points lost dict API ~ PY 3.10 + # https://github.com/python/importlib_metadata/issues/298 + if hasattr(eps, "select"): + entry = eps.select(group=key) # pyright: ignore[reportGeneralTypeIssues] + else: + # Argument 2 to "get" of "dict" has incompatible type "Tuple[]"; + # expected "EntryPoints" [arg-type] + entry = eps.get(key, ()) # type: ignore[arg-type] + for entry_point in entry: + found_backend = entry_point.name == backend + if found_backend: + module = entry_point.load() + break + + if not found_backend: + # Fall back to unregistered, module name approach. + try: + module = importlib.import_module(backend) + found_backend = True + except ImportError: + # We re-raise later on. + pass + + if found_backend: + if hasattr(module, "plot"): + # Validate that the interface is implemented when the option is set, + # rather than at plot time. + return module + + raise ValueError( + f"Could not find plotting backend '{backend}'. Ensure that you've " + f"installed the package providing the '{backend}' entrypoint, or that " + "the package has a top-level `.plot` method." + ) + + +def _get_plot_backend(backend: str | None = None): + """ + Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). + + The plotting system of pandas uses matplotlib by default, but the idea here + is that it can also work with other third-party backends. This function + returns the module which provides a top-level `.plot` method that will + actually do the plotting. The backend is specified from a string, which + either comes from the keyword argument `backend`, or, if not specified, from + the option `pandas.options.plotting.backend`. All the rest of the code in + this file uses the backend specified there for the plotting. + + The backend is imported lazily, as matplotlib is a soft dependency, and + pandas can be used without it being installed. + + Notes + ----- + Modifies `_backends` with imported backend as a side effect. + """ + backend_str: str = backend or get_option("plotting.backend") + + if backend_str in _backends: + return _backends[backend_str] + + module = _load_backend(backend_str) + _backends[backend_str] = module + return module diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/__init__.py new file mode 100644 index 00000000..75c61da0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/__init__.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas.plotting._matplotlib.boxplot import ( + BoxPlot, + boxplot, + boxplot_frame, + boxplot_frame_groupby, +) +from pandas.plotting._matplotlib.converter import ( + deregister, + register, +) +from pandas.plotting._matplotlib.core import ( + AreaPlot, + BarhPlot, + BarPlot, + HexBinPlot, + LinePlot, + PiePlot, + ScatterPlot, +) +from pandas.plotting._matplotlib.hist import ( + HistPlot, + KdePlot, + hist_frame, + hist_series, +) +from pandas.plotting._matplotlib.misc import ( + andrews_curves, + autocorrelation_plot, + bootstrap_plot, + lag_plot, + parallel_coordinates, + radviz, + scatter_matrix, +) +from pandas.plotting._matplotlib.tools import table + +if TYPE_CHECKING: + from pandas.plotting._matplotlib.core import MPLPlot + +PLOT_CLASSES: dict[str, type[MPLPlot]] = { + "line": LinePlot, + "bar": BarPlot, + "barh": BarhPlot, + "box": BoxPlot, + "hist": HistPlot, + "kde": KdePlot, + "area": AreaPlot, + "pie": PiePlot, + "scatter": ScatterPlot, + "hexbin": HexBinPlot, +} + + +def plot(data, kind, **kwargs): + # Importing pyplot at the top of the file (before the converters are + # registered) causes problems in matplotlib 2 (converters seem to not + # work) + import matplotlib.pyplot as plt + + if kwargs.pop("reuse_plot", False): + ax = kwargs.get("ax") + if ax is None and len(plt.get_fignums()) > 0: + with plt.rc_context(): + ax = plt.gca() + kwargs["ax"] = getattr(ax, "left_ax", ax) + plot_obj = PLOT_CLASSES[kind](data, **kwargs) + plot_obj.generate() + plot_obj.draw() + return plot_obj.result + + +__all__ = [ + "plot", + "hist_series", + "hist_frame", + "boxplot", + "boxplot_frame", + "boxplot_frame_groupby", + "table", + "andrews_curves", + "autocorrelation_plot", + "bootstrap_plot", + "lag_plot", + "parallel_coordinates", + "radviz", + "scatter_matrix", + "register", + "deregister", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/boxplot.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/boxplot.py new file mode 100644 index 00000000..83cb8a6a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/boxplot.py @@ -0,0 +1,550 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, + NamedTuple, +) +import warnings + +from matplotlib.artist import setp +import numpy as np + +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_dict_like +from pandas.core.dtypes.missing import remove_na_arraylike + +import pandas as pd +import pandas.core.common as com + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.core import ( + LinePlot, + MPLPlot, +) +from pandas.plotting._matplotlib.groupby import create_iter_data_given_by +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + maybe_adjust_figure, +) + +if TYPE_CHECKING: + from collections.abc import Collection + + from matplotlib.axes import Axes + from matplotlib.lines import Line2D + + from pandas._typing import MatplotlibColor + + +class BoxPlot(LinePlot): + @property + def _kind(self) -> Literal["box"]: + return "box" + + _layout_type = "horizontal" + + _valid_return_types = (None, "axes", "dict", "both") + + class BP(NamedTuple): + # namedtuple to hold results + ax: Axes + lines: dict[str, list[Line2D]] + + def __init__(self, data, return_type: str = "axes", **kwargs) -> None: + if return_type not in self._valid_return_types: + raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}") + + self.return_type = return_type + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called + + def _args_adjust(self) -> None: + if self.subplots: + # Disable label ax sharing. Otherwise, all subplots shows last + # column label + if self.orientation == "vertical": + self.sharex = False + else: + self.sharey = False + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, ax, y, column_num=None, return_type: str = "axes", **kwds + ): + if y.ndim == 2: + y = [remove_na_arraylike(v) for v in y] + # Boxplot fails with empty arrays, so need to add a NaN + # if any cols are empty + # GH 8181 + y = [v if v.size > 0 else np.array([np.nan]) for v in y] + else: + y = remove_na_arraylike(y) + bp = ax.boxplot(y, **kwds) + + if return_type == "dict": + return bp, bp + elif return_type == "both": + return cls.BP(ax=ax, lines=bp), bp + else: + return ax, bp + + def _validate_color_args(self): + if "color" in self.kwds: + if self.colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used " + "simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + self.color = self.kwds.pop("color") + + if isinstance(self.color, dict): + valid_keys = ["boxes", "whiskers", "medians", "caps"] + for key in self.color: + if key not in valid_keys: + raise ValueError( + f"color dict contains invalid key '{key}'. " + f"The key must be either {valid_keys}" + ) + else: + self.color = None + + # get standard colors for default + colors = get_standard_colors(num_colors=3, colormap=self.colormap, color=None) + # use 2 colors by default, for box/whisker and median + # flier colors isn't needed here + # because it can be specified by ``sym`` kw + self._boxes_c = colors[0] + self._whiskers_c = colors[0] + self._medians_c = colors[2] + self._caps_c = colors[0] + + def _get_colors( + self, + num_colors=None, + color_kwds: dict[str, MatplotlibColor] + | MatplotlibColor + | Collection[MatplotlibColor] + | None = "color", + ) -> None: + pass + + def maybe_color_bp(self, bp) -> None: + if isinstance(self.color, dict): + boxes = self.color.get("boxes", self._boxes_c) + whiskers = self.color.get("whiskers", self._whiskers_c) + medians = self.color.get("medians", self._medians_c) + caps = self.color.get("caps", self._caps_c) + else: + # Other types are forwarded to matplotlib + # If None, use default colors + boxes = self.color or self._boxes_c + whiskers = self.color or self._whiskers_c + medians = self.color or self._medians_c + caps = self.color or self._caps_c + + # GH 30346, when users specifying those arguments explicitly, our defaults + # for these four kwargs should be overridden; if not, use Pandas settings + if not self.kwds.get("boxprops"): + setp(bp["boxes"], color=boxes, alpha=1) + if not self.kwds.get("whiskerprops"): + setp(bp["whiskers"], color=whiskers, alpha=1) + if not self.kwds.get("medianprops"): + setp(bp["medians"], color=medians, alpha=1) + if not self.kwds.get("capprops"): + setp(bp["caps"], color=caps, alpha=1) + + def _make_plot(self) -> None: + if self.subplots: + self._return_obj = pd.Series(dtype=object) + + # Re-create iterated data if `by` is assigned by users + data = ( + create_iter_data_given_by(self.data, self._kind) + if self.by is not None + else self.data + ) + + for i, (label, y) in enumerate(self._iter_data(data=data)): + ax = self._get_ax(i) + kwds = self.kwds.copy() + + # When by is applied, show title for subplots to know which group it is + # just like df.boxplot, and need to apply T on y to provide right input + if self.by is not None: + y = y.T + ax.set_title(pprint_thing(label)) + + # When `by` is assigned, the ticklabels will become unique grouped + # values, instead of label which is used as subtitle in this case. + ticklabels = [ + pprint_thing(col) for col in self.data.columns.levels[0] + ] + else: + ticklabels = [pprint_thing(label)] + + ret, bp = self._plot( + ax, y, column_num=i, return_type=self.return_type, **kwds + ) + self.maybe_color_bp(bp) + self._return_obj[label] = ret + self._set_ticklabels(ax, ticklabels) + else: + y = self.data.values.T + ax = self._get_ax(0) + kwds = self.kwds.copy() + + ret, bp = self._plot( + ax, y, column_num=0, return_type=self.return_type, **kwds + ) + self.maybe_color_bp(bp) + self._return_obj = ret + + labels = [left for left, _ in self._iter_data()] + labels = [pprint_thing(left) for left in labels] + if not self.use_index: + labels = [pprint_thing(key) for key in range(len(labels))] + self._set_ticklabels(ax, labels) + + def _set_ticklabels(self, ax: Axes, labels: list[str]) -> None: + if self.orientation == "vertical": + ax.set_xticklabels(labels) + else: + ax.set_yticklabels(labels) + + def _make_legend(self) -> None: + pass + + def _post_plot_logic(self, ax, data) -> None: + # GH 45465: make sure that the boxplot doesn't ignore xlabel/ylabel + if self.xlabel: + ax.set_xlabel(pprint_thing(self.xlabel)) + if self.ylabel: + ax.set_ylabel(pprint_thing(self.ylabel)) + + @property + def orientation(self) -> Literal["horizontal", "vertical"]: + if self.kwds.get("vert", True): + return "vertical" + else: + return "horizontal" + + @property + def result(self): + if self.return_type is None: + return super().result + else: + return self._return_obj + + +def _grouped_plot_by_column( + plotf, + data, + columns=None, + by=None, + numeric_only: bool = True, + grid: bool = False, + figsize: tuple[float, float] | None = None, + ax=None, + layout=None, + return_type=None, + **kwargs, +): + grouped = data.groupby(by, observed=False) + if columns is None: + if not isinstance(by, (list, tuple)): + by = [by] + columns = data._get_numeric_data().columns.difference(by) + naxes = len(columns) + fig, axes = create_subplots( + naxes=naxes, + sharex=kwargs.pop("sharex", True), + sharey=kwargs.pop("sharey", True), + figsize=figsize, + ax=ax, + layout=layout, + ) + + _axes = flatten_axes(axes) + + # GH 45465: move the "by" label based on "vert" + xlabel, ylabel = kwargs.pop("xlabel", None), kwargs.pop("ylabel", None) + if kwargs.get("vert", True): + xlabel = xlabel or by + else: + ylabel = ylabel or by + + ax_values = [] + + for i, col in enumerate(columns): + ax = _axes[i] + gp_col = grouped[col] + keys, values = zip(*gp_col) + re_plotf = plotf(keys, values, ax, xlabel=xlabel, ylabel=ylabel, **kwargs) + ax.set_title(col) + ax_values.append(re_plotf) + ax.grid(grid) + + result = pd.Series(ax_values, index=columns, copy=False) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type is None: + result = axes + + byline = by[0] if len(by) == 1 else by + fig.suptitle(f"Boxplot grouped by {byline}") + maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) + + return result + + +def boxplot( + data, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + **kwds, +): + import matplotlib.pyplot as plt + + # validate return_type: + if return_type not in BoxPlot._valid_return_types: + raise ValueError("return_type must be {'axes', 'dict', 'both'}") + + if isinstance(data, pd.Series): + data = data.to_frame("x") + column = "x" + + def _get_colors(): + # num_colors=3 is required as method maybe_color_bp takes the colors + # in positions 0 and 2. + # if colors not provided, use same defaults as DataFrame.plot.box + result = get_standard_colors(num_colors=3) + result = np.take(result, [0, 0, 2]) + result = np.append(result, "k") + + colors = kwds.pop("color", None) + if colors: + if is_dict_like(colors): + # replace colors in result array with user-specified colors + # taken from the colors dict parameter + # "boxes" value placed in position 0, "whiskers" in 1, etc. + valid_keys = ["boxes", "whiskers", "medians", "caps"] + key_to_index = dict(zip(valid_keys, range(4))) + for key, value in colors.items(): + if key in valid_keys: + result[key_to_index[key]] = value + else: + raise ValueError( + f"color dict contains invalid key '{key}'. " + f"The key must be either {valid_keys}" + ) + else: + result.fill(colors) + + return result + + def maybe_color_bp(bp, **kwds) -> None: + # GH 30346, when users specifying those arguments explicitly, our defaults + # for these four kwargs should be overridden; if not, use Pandas settings + if not kwds.get("boxprops"): + setp(bp["boxes"], color=colors[0], alpha=1) + if not kwds.get("whiskerprops"): + setp(bp["whiskers"], color=colors[1], alpha=1) + if not kwds.get("medianprops"): + setp(bp["medians"], color=colors[2], alpha=1) + if not kwds.get("capprops"): + setp(bp["caps"], color=colors[3], alpha=1) + + def plot_group(keys, values, ax: Axes, **kwds): + # GH 45465: xlabel/ylabel need to be popped out before plotting happens + xlabel, ylabel = kwds.pop("xlabel", None), kwds.pop("ylabel", None) + if xlabel: + ax.set_xlabel(pprint_thing(xlabel)) + if ylabel: + ax.set_ylabel(pprint_thing(ylabel)) + + keys = [pprint_thing(x) for x in keys] + values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] + bp = ax.boxplot(values, **kwds) + if fontsize is not None: + ax.tick_params(axis="both", labelsize=fontsize) + + # GH 45465: x/y are flipped when "vert" changes + is_vertical = kwds.get("vert", True) + ticks = ax.get_xticks() if is_vertical else ax.get_yticks() + if len(ticks) != len(keys): + i, remainder = divmod(len(ticks), len(keys)) + assert remainder == 0, remainder + keys *= i + if is_vertical: + ax.set_xticklabels(keys, rotation=rot) + else: + ax.set_yticklabels(keys, rotation=rot) + maybe_color_bp(bp, **kwds) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type == "dict": + return bp + elif return_type == "both": + return BoxPlot.BP(ax=ax, lines=bp) + else: + return ax + + colors = _get_colors() + if column is None: + columns = None + elif isinstance(column, (list, tuple)): + columns = column + else: + columns = [column] + + if by is not None: + # Prefer array return type for 2-D plots to match the subplot layout + # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580 + result = _grouped_plot_by_column( + plot_group, + data, + columns=columns, + by=by, + grid=grid, + figsize=figsize, + ax=ax, + layout=layout, + return_type=return_type, + **kwds, + ) + else: + if return_type is None: + return_type = "axes" + if layout is not None: + raise ValueError("The 'layout' keyword is not supported when 'by' is None") + + if ax is None: + rc = {"figure.figsize": figsize} if figsize is not None else {} + with plt.rc_context(rc): + ax = plt.gca() + data = data._get_numeric_data() + naxes = len(data.columns) + if naxes == 0: + raise ValueError( + "boxplot method requires numerical columns, nothing to plot." + ) + if columns is None: + columns = data.columns + else: + data = data[columns] + + result = plot_group(columns, data.values.T, ax, **kwds) + ax.grid(grid) + + return result + + +def boxplot_frame( + self, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + **kwds, +): + import matplotlib.pyplot as plt + + ax = boxplot( + self, + column=column, + by=by, + ax=ax, + fontsize=fontsize, + grid=grid, + rot=rot, + figsize=figsize, + layout=layout, + return_type=return_type, + **kwds, + ) + plt.draw_if_interactive() + return ax + + +def boxplot_frame_groupby( + grouped, + subplots: bool = True, + column=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + ax=None, + figsize: tuple[float, float] | None = None, + layout=None, + sharex: bool = False, + sharey: bool = True, + **kwds, +): + if subplots is True: + naxes = len(grouped) + fig, axes = create_subplots( + naxes=naxes, + squeeze=False, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + ) + axes = flatten_axes(axes) + + ret = pd.Series(dtype=object) + + for (key, group), ax in zip(grouped, axes): + d = group.boxplot( + ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds + ) + ax.set_title(pprint_thing(key)) + ret.loc[key] = d + maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) + else: + keys, frames = zip(*grouped) + if grouped.axis == 0: + df = pd.concat(frames, keys=keys, axis=1) + elif len(frames) > 1: + df = frames[0].join(frames[1::]) + else: + df = frames[0] + + # GH 16748, DataFrameGroupby fails when subplots=False and `column` argument + # is assigned, and in this case, since `df` here becomes MI after groupby, + # so we need to couple the keys (grouped values) and column (original df + # column) together to search for subset to plot + if column is not None: + column = com.convert_to_list_like(column) + multi_key = pd.MultiIndex.from_product([keys, column]) + column = list(multi_key.values) + ret = df.boxplot( + column=column, + fontsize=fontsize, + rot=rot, + grid=grid, + ax=ax, + figsize=figsize, + layout=layout, + **kwds, + ) + return ret diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/converter.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/converter.py new file mode 100644 index 00000000..be0ded0e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/converter.py @@ -0,0 +1,1139 @@ +from __future__ import annotations + +import contextlib +import datetime as pydt +from datetime import ( + datetime, + timedelta, + tzinfo, +) +import functools +from typing import ( + TYPE_CHECKING, + Any, + Final, + cast, +) +import warnings + +import matplotlib.dates as mdates +from matplotlib.ticker import ( + AutoLocator, + Formatter, + Locator, +) +from matplotlib.transforms import nonsingular +import matplotlib.units as munits +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + Timestamp, + to_offset, +) +from pandas._libs.tslibs.dtypes import FreqGroup +from pandas._typing import F + +from pandas.core.dtypes.common import ( + is_float, + is_float_dtype, + is_integer, + is_integer_dtype, + is_nested_list_like, +) + +from pandas import ( + Index, + Series, + get_option, +) +import pandas.core.common as com +from pandas.core.indexes.datetimes import date_range +from pandas.core.indexes.period import ( + Period, + PeriodIndex, + period_range, +) +import pandas.core.tools.datetimes as tools + +if TYPE_CHECKING: + from collections.abc import Generator + + from pandas._libs.tslibs.offsets import BaseOffset + +# constants +HOURS_PER_DAY: Final = 24.0 +MIN_PER_HOUR: Final = 60.0 +SEC_PER_MIN: Final = 60.0 + +SEC_PER_HOUR: Final = SEC_PER_MIN * MIN_PER_HOUR +SEC_PER_DAY: Final = SEC_PER_HOUR * HOURS_PER_DAY + +MUSEC_PER_DAY: Final = 10**6 * SEC_PER_DAY + +_mpl_units = {} # Cache for units overwritten by us + + +def get_pairs(): + pairs = [ + (Timestamp, DatetimeConverter), + (Period, PeriodConverter), + (pydt.datetime, DatetimeConverter), + (pydt.date, DatetimeConverter), + (pydt.time, TimeConverter), + (np.datetime64, DatetimeConverter), + ] + return pairs + + +def register_pandas_matplotlib_converters(func: F) -> F: + """ + Decorator applying pandas_converters. + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + with pandas_converters(): + return func(*args, **kwargs) + + return cast(F, wrapper) + + +@contextlib.contextmanager +def pandas_converters() -> Generator[None, None, None]: + """ + Context manager registering pandas' converters for a plot. + + See Also + -------- + register_pandas_matplotlib_converters : Decorator that applies this. + """ + value = get_option("plotting.matplotlib.register_converters") + + if value: + # register for True or "auto" + register() + try: + yield + finally: + if value == "auto": + # only deregister for "auto" + deregister() + + +def register() -> None: + pairs = get_pairs() + for type_, cls in pairs: + # Cache previous converter if present + if type_ in munits.registry and not isinstance(munits.registry[type_], cls): + previous = munits.registry[type_] + _mpl_units[type_] = previous + # Replace with pandas converter + munits.registry[type_] = cls() + + +def deregister() -> None: + # Renamed in pandas.plotting.__init__ + for type_, cls in get_pairs(): + # We use type to catch our classes directly, no inheritance + if type(munits.registry.get(type_)) is cls: + munits.registry.pop(type_) + + # restore the old keys + for unit, formatter in _mpl_units.items(): + if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: + # make it idempotent by excluding ours. + munits.registry[unit] = formatter + + +def _to_ordinalf(tm: pydt.time) -> float: + tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10**6 + return tot_sec + + +def time2num(d): + if isinstance(d, str): + parsed = Timestamp(d) + return _to_ordinalf(parsed.time()) + if isinstance(d, pydt.time): + return _to_ordinalf(d) + return d + + +class TimeConverter(munits.ConversionInterface): + @staticmethod + def convert(value, unit, axis): + valid_types = (str, pydt.time) + if isinstance(value, valid_types) or is_integer(value) or is_float(value): + return time2num(value) + if isinstance(value, Index): + return value.map(time2num) + if isinstance(value, (list, tuple, np.ndarray, Index)): + return [time2num(x) for x in value] + return value + + @staticmethod + def axisinfo(unit, axis) -> munits.AxisInfo | None: + if unit != "time": + return None + + majloc = AutoLocator() + majfmt = TimeFormatter(majloc) + return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label="time") + + @staticmethod + def default_units(x, axis) -> str: + return "time" + + +# time formatter +class TimeFormatter(Formatter): + def __init__(self, locs) -> None: + self.locs = locs + + def __call__(self, x, pos: int = 0) -> str: + """ + Return the time of day as a formatted string. + + Parameters + ---------- + x : float + The time of day specified as seconds since 00:00 (midnight), + with up to microsecond precision. + pos + Unused + + Returns + ------- + str + A string in HH:MM:SS.mmmuuu format. Microseconds, + milliseconds and seconds are only displayed if non-zero. + """ + fmt = "%H:%M:%S.%f" + s = int(x) + msus = round((x - s) * 10**6) + ms = msus // 1000 + us = msus % 1000 + m, s = divmod(s, 60) + h, m = divmod(m, 60) + _, h = divmod(h, 24) + if us != 0: + return pydt.time(h, m, s, msus).strftime(fmt) + elif ms != 0: + return pydt.time(h, m, s, msus).strftime(fmt)[:-3] + elif s != 0: + return pydt.time(h, m, s).strftime("%H:%M:%S") + + return pydt.time(h, m).strftime("%H:%M") + + +# Period Conversion + + +class PeriodConverter(mdates.DateConverter): + @staticmethod + def convert(values, units, axis): + if is_nested_list_like(values): + values = [PeriodConverter._convert_1d(v, units, axis) for v in values] + else: + values = PeriodConverter._convert_1d(values, units, axis) + return values + + @staticmethod + def _convert_1d(values, units, axis): + if not hasattr(axis, "freq"): + raise TypeError("Axis must have `freq` set to convert to Periods") + valid_types = (str, datetime, Period, pydt.date, pydt.time, np.datetime64) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + if ( + isinstance(values, valid_types) + or is_integer(values) + or is_float(values) + ): + return get_datevalue(values, axis.freq) + elif isinstance(values, PeriodIndex): + return values.asfreq(axis.freq).asi8 + elif isinstance(values, Index): + return values.map(lambda x: get_datevalue(x, axis.freq)) + elif lib.infer_dtype(values, skipna=False) == "period": + # https://github.com/pandas-dev/pandas/issues/24304 + # convert ndarray[period] -> PeriodIndex + return PeriodIndex(values, freq=axis.freq).asi8 + elif isinstance(values, (list, tuple, np.ndarray, Index)): + return [get_datevalue(x, axis.freq) for x in values] + return values + + +def get_datevalue(date, freq): + if isinstance(date, Period): + return date.asfreq(freq).ordinal + elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)): + return Period(date, freq).ordinal + elif ( + is_integer(date) + or is_float(date) + or (isinstance(date, (np.ndarray, Index)) and (date.size == 1)) + ): + return date + elif date is None: + return None + raise ValueError(f"Unrecognizable date '{date}'") + + +# Datetime Conversion +class DatetimeConverter(mdates.DateConverter): + @staticmethod + def convert(values, unit, axis): + # values might be a 1-d array, or a list-like of arrays. + if is_nested_list_like(values): + values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] + else: + values = DatetimeConverter._convert_1d(values, unit, axis) + return values + + @staticmethod + def _convert_1d(values, unit, axis): + def try_parse(values): + try: + return mdates.date2num(tools.to_datetime(values)) + except Exception: + return values + + if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)): + return mdates.date2num(values) + elif is_integer(values) or is_float(values): + return values + elif isinstance(values, str): + return try_parse(values) + elif isinstance(values, (list, tuple, np.ndarray, Index, Series)): + if isinstance(values, Series): + # https://github.com/matplotlib/matplotlib/issues/11391 + # Series was skipped. Convert to DatetimeIndex to get asi8 + values = Index(values) + if isinstance(values, Index): + values = values.values + if not isinstance(values, np.ndarray): + values = com.asarray_tuplesafe(values) + + if is_integer_dtype(values) or is_float_dtype(values): + return values + + try: + values = tools.to_datetime(values) + except Exception: + pass + + values = mdates.date2num(values) + + return values + + @staticmethod + def axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo: + """ + Return the :class:`~matplotlib.units.AxisInfo` for *unit*. + + *unit* is a tzinfo instance or None. + The *axis* argument is required but not used. + """ + tz = unit + + majloc = PandasAutoDateLocator(tz=tz) + majfmt = PandasAutoDateFormatter(majloc, tz=tz) + datemin = pydt.date(2000, 1, 1) + datemax = pydt.date(2010, 1, 1) + + return munits.AxisInfo( + majloc=majloc, majfmt=majfmt, label="", default_limits=(datemin, datemax) + ) + + +class PandasAutoDateFormatter(mdates.AutoDateFormatter): + def __init__(self, locator, tz=None, defaultfmt: str = "%Y-%m-%d") -> None: + mdates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) + + +class PandasAutoDateLocator(mdates.AutoDateLocator): + def get_locator(self, dmin, dmax): + """Pick the best locator based on a distance.""" + tot_sec = (dmax - dmin).total_seconds() + + if abs(tot_sec) < self.minticks: + self._freq = -1 + locator = MilliSecondLocator(self.tz) + locator.set_axis(self.axis) + + locator.axis.set_view_interval(*self.axis.get_view_interval()) + locator.axis.set_data_interval(*self.axis.get_data_interval()) + return locator + + return mdates.AutoDateLocator.get_locator(self, dmin, dmax) + + def _get_unit(self): + return MilliSecondLocator.get_unit_generic(self._freq) + + +class MilliSecondLocator(mdates.DateLocator): + UNIT = 1.0 / (24 * 3600 * 1000) + + def __init__(self, tz) -> None: + mdates.DateLocator.__init__(self, tz) + self._interval = 1.0 + + def _get_unit(self): + return self.get_unit_generic(-1) + + @staticmethod + def get_unit_generic(freq): + unit = mdates.RRuleLocator.get_unit_generic(freq) + if unit < 0: + return MilliSecondLocator.UNIT + return unit + + def __call__(self): + # if no data have been set, this will tank with a ValueError + try: + dmin, dmax = self.viewlim_to_dt() + except ValueError: + return [] + + # We need to cap at the endpoints of valid datetime + nmax, nmin = mdates.date2num((dmax, dmin)) + + num = (nmax - nmin) * 86400 * 1000 + max_millis_ticks = 6 + for interval in [1, 10, 50, 100, 200, 500]: + if num <= interval * (max_millis_ticks - 1): + self._interval = interval + break + # We went through the whole loop without breaking, default to 1 + self._interval = 1000.0 + + estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) + + if estimate > self.MAXTICKS * 2: + raise RuntimeError( + "MillisecondLocator estimated to generate " + f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS" + f"* 2 ({self.MAXTICKS * 2:d}) " + ) + + interval = self._get_interval() + freq = f"{interval}L" + tz = self.tz.tzname(None) + st = dmin.replace(tzinfo=None) + ed = dmin.replace(tzinfo=None) + all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) + + try: + if len(all_dates) > 0: + locs = self.raise_if_exceeds(mdates.date2num(all_dates)) + return locs + except Exception: # pragma: no cover + pass + + lims = mdates.date2num([dmin, dmax]) + return lims + + def _get_interval(self): + return self._interval + + def autoscale(self): + """ + Set the view limits to include the data range. + """ + # We need to cap at the endpoints of valid datetime + dmin, dmax = self.datalim_to_dt() + + vmin = mdates.date2num(dmin) + vmax = mdates.date2num(dmax) + + return self.nonsingular(vmin, vmax) + + +def _from_ordinal(x, tz: tzinfo | None = None) -> datetime: + ix = int(x) + dt = datetime.fromordinal(ix) + remainder = float(x) - ix + hour, remainder = divmod(24 * remainder, 1) + minute, remainder = divmod(60 * remainder, 1) + second, remainder = divmod(60 * remainder, 1) + microsecond = int(1_000_000 * remainder) + if microsecond < 10: + microsecond = 0 # compensate for rounding errors + dt = datetime( + dt.year, dt.month, dt.day, int(hour), int(minute), int(second), microsecond + ) + if tz is not None: + dt = dt.astimezone(tz) + + if microsecond > 999990: # compensate for rounding errors + dt += timedelta(microseconds=1_000_000 - microsecond) + + return dt + + +# Fixed frequency dynamic tick locators and formatters + +# ------------------------------------------------------------------------- +# --- Locators --- +# ------------------------------------------------------------------------- + + +def _get_default_annual_spacing(nyears) -> tuple[int, int]: + """ + Returns a default spacing between consecutive ticks for annual data. + """ + if nyears < 11: + (min_spacing, maj_spacing) = (1, 1) + elif nyears < 20: + (min_spacing, maj_spacing) = (1, 2) + elif nyears < 50: + (min_spacing, maj_spacing) = (1, 5) + elif nyears < 100: + (min_spacing, maj_spacing) = (5, 10) + elif nyears < 200: + (min_spacing, maj_spacing) = (5, 25) + elif nyears < 600: + (min_spacing, maj_spacing) = (10, 50) + else: + factor = nyears // 1000 + 1 + (min_spacing, maj_spacing) = (factor * 20, factor * 100) + return (min_spacing, maj_spacing) + + +def period_break(dates: PeriodIndex, period: str) -> np.ndarray: + """ + Returns the indices where the given period changes. + + Parameters + ---------- + dates : PeriodIndex + Array of intervals to monitor. + period : str + Name of the period to monitor. + """ + current = getattr(dates, period) + previous = getattr(dates - 1 * dates.freq, period) + return np.nonzero(current - previous)[0] + + +def has_level_label(label_flags: np.ndarray, vmin: float) -> bool: + """ + Returns true if the ``label_flags`` indicate there is at least one label + for this level. + + if the minimum view limit is not an exact integer, then the first tick + label won't be shown, so we must adjust for that. + """ + if label_flags.size == 0 or ( + label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0 + ): + return False + else: + return True + + +def _daily_finder(vmin, vmax, freq: BaseOffset): + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + freq_group = FreqGroup.from_period_dtype_code(dtype_code) + + periodsperday = -1 + + if dtype_code >= FreqGroup.FR_HR.value: + if freq_group == FreqGroup.FR_NS: + periodsperday = 24 * 60 * 60 * 1000000000 + elif freq_group == FreqGroup.FR_US: + periodsperday = 24 * 60 * 60 * 1000000 + elif freq_group == FreqGroup.FR_MS: + periodsperday = 24 * 60 * 60 * 1000 + elif freq_group == FreqGroup.FR_SEC: + periodsperday = 24 * 60 * 60 + elif freq_group == FreqGroup.FR_MIN: + periodsperday = 24 * 60 + elif freq_group == FreqGroup.FR_HR: + periodsperday = 24 + else: # pragma: no cover + raise ValueError(f"unexpected frequency: {dtype_code}") + periodsperyear = 365 * periodsperday + periodspermonth = 28 * periodsperday + + elif freq_group == FreqGroup.FR_BUS: + periodsperyear = 261 + periodspermonth = 19 + elif freq_group == FreqGroup.FR_DAY: + periodsperyear = 365 + periodspermonth = 28 + elif freq_group == FreqGroup.FR_WK: + periodsperyear = 52 + periodspermonth = 3 + else: # pragma: no cover + raise ValueError("unexpected frequency") + + # save this for later usage + vmin_orig = vmin + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + (vmin, vmax) = ( + Period(ordinal=int(vmin), freq=freq), + Period(ordinal=int(vmax), freq=freq), + ) + assert isinstance(vmin, Period) + assert isinstance(vmax, Period) + span = vmax.ordinal - vmin.ordinal + 1 + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + dates_ = period_range(start=vmin, end=vmax, freq=freq) + + # Initialize the output + info = np.zeros( + span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")] + ) + info["val"][:] = dates_.asi8 + info["fmt"][:] = "" + info["maj"][[0, -1]] = True + # .. and set some shortcuts + info_maj = info["maj"] + info_min = info["min"] + info_fmt = info["fmt"] + + def first_label(label_flags): + if (label_flags[0] == 0) and (label_flags.size > 1) and ((vmin_orig % 1) > 0.0): + return label_flags[1] + else: + return label_flags[0] + + # Case 1. Less than a month + if span <= periodspermonth: + day_start = period_break(dates_, "day") + month_start = period_break(dates_, "month") + + def _hour_finder(label_interval, force_year_start) -> None: + _hour = dates_.hour + _prev_hour = (dates_ - 1 * dates_.freq).hour + hour_start = (_hour - _prev_hour) != 0 + info_maj[day_start] = True + info_min[hour_start & (_hour % label_interval == 0)] = True + year_start = period_break(dates_, "year") + info_fmt[hour_start & (_hour % label_interval == 0)] = "%H:%M" + info_fmt[day_start] = "%H:%M\n%d-%b" + info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" + if force_year_start and not has_level_label(year_start, vmin_orig): + info_fmt[first_label(day_start)] = "%H:%M\n%d-%b\n%Y" + + def _minute_finder(label_interval) -> None: + hour_start = period_break(dates_, "hour") + _minute = dates_.minute + _prev_minute = (dates_ - 1 * dates_.freq).minute + minute_start = (_minute - _prev_minute) != 0 + info_maj[hour_start] = True + info_min[minute_start & (_minute % label_interval == 0)] = True + year_start = period_break(dates_, "year") + info_fmt = info["fmt"] + info_fmt[minute_start & (_minute % label_interval == 0)] = "%H:%M" + info_fmt[day_start] = "%H:%M\n%d-%b" + info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" + + def _second_finder(label_interval) -> None: + minute_start = period_break(dates_, "minute") + _second = dates_.second + _prev_second = (dates_ - 1 * dates_.freq).second + second_start = (_second - _prev_second) != 0 + info["maj"][minute_start] = True + info["min"][second_start & (_second % label_interval == 0)] = True + year_start = period_break(dates_, "year") + info_fmt = info["fmt"] + info_fmt[second_start & (_second % label_interval == 0)] = "%H:%M:%S" + info_fmt[day_start] = "%H:%M:%S\n%d-%b" + info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y" + + if span < periodsperday / 12000: + _second_finder(1) + elif span < periodsperday / 6000: + _second_finder(2) + elif span < periodsperday / 2400: + _second_finder(5) + elif span < periodsperday / 1200: + _second_finder(10) + elif span < periodsperday / 800: + _second_finder(15) + elif span < periodsperday / 400: + _second_finder(30) + elif span < periodsperday / 150: + _minute_finder(1) + elif span < periodsperday / 70: + _minute_finder(2) + elif span < periodsperday / 24: + _minute_finder(5) + elif span < periodsperday / 12: + _minute_finder(15) + elif span < periodsperday / 6: + _minute_finder(30) + elif span < periodsperday / 2.5: + _hour_finder(1, False) + elif span < periodsperday / 1.5: + _hour_finder(2, False) + elif span < periodsperday * 1.25: + _hour_finder(3, False) + elif span < periodsperday * 2.5: + _hour_finder(6, True) + elif span < periodsperday * 4: + _hour_finder(12, True) + else: + info_maj[month_start] = True + info_min[day_start] = True + year_start = period_break(dates_, "year") + info_fmt = info["fmt"] + info_fmt[day_start] = "%d" + info_fmt[month_start] = "%d\n%b" + info_fmt[year_start] = "%d\n%b\n%Y" + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(day_start)] = "%d\n%b\n%Y" + else: + info_fmt[first_label(month_start)] = "%d\n%b\n%Y" + + # Case 2. Less than three months + elif span <= periodsperyear // 4: + month_start = period_break(dates_, "month") + info_maj[month_start] = True + if dtype_code < FreqGroup.FR_HR.value: + info["min"] = True + else: + day_start = period_break(dates_, "day") + info["min"][day_start] = True + week_start = period_break(dates_, "week") + year_start = period_break(dates_, "year") + info_fmt[week_start] = "%d" + info_fmt[month_start] = "\n\n%b" + info_fmt[year_start] = "\n\n%b\n%Y" + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(week_start)] = "\n\n%b\n%Y" + else: + info_fmt[first_label(month_start)] = "\n\n%b\n%Y" + # Case 3. Less than 14 months ............... + elif span <= 1.15 * periodsperyear: + year_start = period_break(dates_, "year") + month_start = period_break(dates_, "month") + week_start = period_break(dates_, "week") + info_maj[month_start] = True + info_min[week_start] = True + info_min[year_start] = False + info_min[month_start] = False + info_fmt[month_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + if not has_level_label(year_start, vmin_orig): + info_fmt[first_label(month_start)] = "%b\n%Y" + # Case 4. Less than 2.5 years ............... + elif span <= 2.5 * periodsperyear: + year_start = period_break(dates_, "year") + quarter_start = period_break(dates_, "quarter") + month_start = period_break(dates_, "month") + info_maj[quarter_start] = True + info_min[month_start] = True + info_fmt[quarter_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + # Case 4. Less than 4 years ................. + elif span <= 4 * periodsperyear: + year_start = period_break(dates_, "year") + month_start = period_break(dates_, "month") + info_maj[year_start] = True + info_min[month_start] = True + info_min[year_start] = False + + month_break = dates_[month_start].month + jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] + info_fmt[jan_or_jul] = "%b" + info_fmt[year_start] = "%b\n%Y" + # Case 5. Less than 11 years ................ + elif span <= 11 * periodsperyear: + year_start = period_break(dates_, "year") + quarter_start = period_break(dates_, "quarter") + info_maj[year_start] = True + info_min[quarter_start] = True + info_min[year_start] = False + info_fmt[year_start] = "%Y" + # Case 6. More than 12 years ................ + else: + year_start = period_break(dates_, "year") + year_break = dates_[year_start].year + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(year_break % maj_anndef == 0)] + info_maj[major_idx] = True + minor_idx = year_start[(year_break % min_anndef == 0)] + info_min[minor_idx] = True + info_fmt[major_idx] = "%Y" + + return info + + +def _monthly_finder(vmin, vmax, freq): + periodsperyear = 12 + + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + # Initialize the output + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + dates_ = info["val"] + info["fmt"] = "" + year_start = (dates_ % 12 == 0).nonzero()[0] + info_maj = info["maj"] + info_fmt = info["fmt"] + + if span <= 1.15 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + info_fmt[:] = "%b" + info_fmt[year_start] = "%b\n%Y" + + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = "%b\n%Y" + + elif span <= 2.5 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + # TODO: Check the following : is it really info['fmt'] ? + info["fmt"][quarter_start] = True + info["min"] = True + + info_fmt[quarter_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + + elif span <= 4 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) + info_fmt[jan_or_jul] = "%b" + info_fmt[year_start] = "%b\n%Y" + + elif span <= 11 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + info["min"][quarter_start] = True + + info_fmt[year_start] = "%Y" + + else: + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + years = dates_[year_start] // 12 + 1 + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info["min"][year_start[(years % min_anndef == 0)]] = True + + info_fmt[major_idx] = "%Y" + + return info + + +def _quarterly_finder(vmin, vmax, freq): + periodsperyear = 4 + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + info["fmt"] = "" + dates_ = info["val"] + info_maj = info["maj"] + info_fmt = info["fmt"] + year_start = (dates_ % 4 == 0).nonzero()[0] + + if span <= 3.5 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + info_fmt[:] = "Q%q" + info_fmt[year_start] = "Q%q\n%F" + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = "Q%q\n%F" + + elif span <= 11 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + info_fmt[year_start] = "%F" + + else: + # https://github.com/pandas-dev/pandas/pull/47602 + years = dates_[year_start] // 4 + 1970 + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info["min"][year_start[(years % min_anndef == 0)]] = True + info_fmt[major_idx] = "%F" + + return info + + +def _annual_finder(vmin, vmax, freq): + (vmin, vmax) = (int(vmin), int(vmax + 1)) + span = vmax - vmin + 1 + + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + info["fmt"] = "" + dates_ = info["val"] + + (min_anndef, maj_anndef) = _get_default_annual_spacing(span) + major_idx = dates_ % maj_anndef == 0 + info["maj"][major_idx] = True + info["min"][(dates_ % min_anndef == 0)] = True + info["fmt"][major_idx] = "%Y" + + return info + + +def get_finder(freq: BaseOffset): + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + fgroup = FreqGroup.from_period_dtype_code(dtype_code) + + if fgroup == FreqGroup.FR_ANN: + return _annual_finder + elif fgroup == FreqGroup.FR_QTR: + return _quarterly_finder + elif fgroup == FreqGroup.FR_MTH: + return _monthly_finder + elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK: + return _daily_finder + else: # pragma: no cover + raise NotImplementedError(f"Unsupported frequency: {dtype_code}") + + +class TimeSeries_DateLocator(Locator): + """ + Locates the ticks along an axis controlled by a :class:`Series`. + + Parameters + ---------- + freq : BaseOffset + Valid frequency specifier. + minor_locator : {False, True}, optional + Whether the locator is for minor ticks (True) or not. + dynamic_mode : {True, False}, optional + Whether the locator should work in dynamic mode. + base : {int}, optional + quarter : {int}, optional + month : {int}, optional + day : {int}, optional + """ + + def __init__( + self, + freq: BaseOffset, + minor_locator: bool = False, + dynamic_mode: bool = True, + base: int = 1, + quarter: int = 1, + month: int = 1, + day: int = 1, + plot_obj=None, + ) -> None: + freq = to_offset(freq) + self.freq = freq + self.base = base + (self.quarter, self.month, self.day) = (quarter, month, day) + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _get_default_locs(self, vmin, vmax): + """Returns the default locations of ticks.""" + if self.plot_obj.date_axis_info is None: + self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) + + locator = self.plot_obj.date_axis_info + + if self.isminor: + return np.compress(locator["min"], locator["val"]) + return np.compress(locator["maj"], locator["val"]) + + def __call__(self): + """Return the locations of the ticks.""" + # axis calls Locator.set_axis inside set_m_formatter + + vi = tuple(self.axis.get_view_interval()) + if vi != self.plot_obj.view_interval: + self.plot_obj.date_axis_info = None + self.plot_obj.view_interval = vi + vmin, vmax = vi + if vmax < vmin: + vmin, vmax = vmax, vmin + if self.isdynamic: + locs = self._get_default_locs(vmin, vmax) + else: # pragma: no cover + base = self.base + (d, m) = divmod(vmin, base) + vmin = (d + 1) * base + locs = list(range(vmin, vmax + 1, base)) + return locs + + def autoscale(self): + """ + Sets the view limits to the nearest multiples of base that contain the + data. + """ + # requires matplotlib >= 0.98.0 + (vmin, vmax) = self.axis.get_data_interval() + + locs = self._get_default_locs(vmin, vmax) + (vmin, vmax) = locs[[0, -1]] + if vmin == vmax: + vmin -= 1 + vmax += 1 + return nonsingular(vmin, vmax) + + +# ------------------------------------------------------------------------- +# --- Formatter --- +# ------------------------------------------------------------------------- + + +class TimeSeries_DateFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`PeriodIndex`. + + Parameters + ---------- + freq : BaseOffset + Valid frequency specifier. + minor_locator : bool, default False + Whether the current formatter should apply to minor ticks (True) or + major ticks (False). + dynamic_mode : bool, default True + Whether the formatter works in dynamic mode or not. + """ + + def __init__( + self, + freq: BaseOffset, + minor_locator: bool = False, + dynamic_mode: bool = True, + plot_obj=None, + ) -> None: + freq = to_offset(freq) + self.format = None + self.freq = freq + self.locs: list[Any] = [] # unused, for matplotlib compat + self.formatdict: dict[Any, Any] | None = None + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _set_default_format(self, vmin, vmax): + """Returns the default ticks spacing.""" + if self.plot_obj.date_axis_info is None: + self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) + info = self.plot_obj.date_axis_info + + if self.isminor: + format = np.compress(info["min"] & np.logical_not(info["maj"]), info) + else: + format = np.compress(info["maj"], info) + self.formatdict = {x: f for (x, _, _, f) in format} + return self.formatdict + + def set_locs(self, locs) -> None: + """Sets the locations of the ticks""" + # don't actually use the locs. This is just needed to work with + # matplotlib. Force to use vmin, vmax + + self.locs = locs + + (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) + if vi != self.plot_obj.view_interval: + self.plot_obj.date_axis_info = None + self.plot_obj.view_interval = vi + if vmax < vmin: + (vmin, vmax) = (vmax, vmin) + self._set_default_format(vmin, vmax) + + def __call__(self, x, pos: int = 0) -> str: + if self.formatdict is None: + return "" + else: + fmt = self.formatdict.pop(x, "") + if isinstance(fmt, np.bytes_): + fmt = fmt.decode("utf-8") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Period with BDay freq is deprecated", + category=FutureWarning, + ) + period = Period(ordinal=int(x), freq=self.freq) + assert isinstance(period, Period) + return period.strftime(fmt) + + +class TimeSeries_TimedeltaFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. + """ + + @staticmethod + def format_timedelta_ticks(x, pos, n_decimals: int) -> str: + """ + Convert seconds to 'D days HH:MM:SS.F' + """ + s, ns = divmod(x, 10**9) + m, s = divmod(s, 60) + h, m = divmod(m, 60) + d, h = divmod(h, 24) + decimals = int(ns * 10 ** (n_decimals - 9)) + s = f"{int(h):02d}:{int(m):02d}:{int(s):02d}" + if n_decimals > 0: + s += f".{decimals:0{n_decimals}d}" + if d != 0: + s = f"{int(d):d} days {s}" + return s + + def __call__(self, x, pos: int = 0) -> str: + (vmin, vmax) = tuple(self.axis.get_view_interval()) + n_decimals = min(int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin)))), 9) + return self.format_timedelta_ticks(x, pos, n_decimals) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/core.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/core.py new file mode 100644 index 00000000..c62f7327 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/core.py @@ -0,0 +1,1884 @@ +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from collections.abc import ( + Hashable, + Iterable, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Literal, +) +import warnings + +import matplotlib as mpl +import numpy as np + +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_any_real_numeric_dtype, + is_float, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_iterator, + is_list_like, + is_number, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCMultiIndex, + ABCPeriodIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.util.version import Version + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib import tools +from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters +from pandas.plotting._matplotlib.groupby import reconstruct_data_with_by +from pandas.plotting._matplotlib.misc import unpack_single_str_list +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.timeseries import ( + decorate_axes, + format_dateaxis, + maybe_convert_index, + maybe_resample, + use_dynamic_x, +) +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + format_date_labels, + get_all_lines, + get_xlim, + handle_shared_axes, +) + +if TYPE_CHECKING: + from matplotlib.artist import Artist + from matplotlib.axes import Axes + from matplotlib.axis import Axis + + from pandas._typing import ( + IndexLabel, + PlottingOrientation, + npt, + ) + + +def _color_in_style(style: str) -> bool: + """ + Check if there is a color letter in the style string. + """ + from matplotlib.colors import BASE_COLORS + + return not set(BASE_COLORS).isdisjoint(style) + + +class MPLPlot(ABC): + """ + Base class for assembling a pandas plot using matplotlib + + Parameters + ---------- + data : + + """ + + @property + @abstractmethod + def _kind(self) -> str: + """Specify kind str. Must be overridden in child class""" + raise NotImplementedError + + _layout_type = "vertical" + _default_rot = 0 + + @property + def orientation(self) -> str | None: + return None + + axes: np.ndarray # of Axes objects + + def __init__( + self, + data, + kind=None, + by: IndexLabel | None = None, + subplots: bool | Sequence[Sequence[str]] = False, + sharex=None, + sharey: bool = False, + use_index: bool = True, + figsize: tuple[float, float] | None = None, + grid=None, + legend: bool | str = True, + rot=None, + ax=None, + fig=None, + title=None, + xlim=None, + ylim=None, + xticks=None, + yticks=None, + xlabel: Hashable | None = None, + ylabel: Hashable | None = None, + fontsize: int | None = None, + secondary_y: bool | tuple | list | np.ndarray = False, + colormap=None, + table: bool = False, + layout=None, + include_bool: bool = False, + column: IndexLabel | None = None, + **kwds, + ) -> None: + import matplotlib.pyplot as plt + + self.data = data + + # if users assign an empty list or tuple, raise `ValueError` + # similar to current `df.box` and `df.hist` APIs. + if by in ([], ()): + raise ValueError("No group keys passed!") + self.by = com.maybe_make_list(by) + + # Assign the rest of columns into self.columns if by is explicitly defined + # while column is not, only need `columns` in hist/box plot when it's DF + # TODO: Might deprecate `column` argument in future PR (#28373) + if isinstance(data, DataFrame): + if column: + self.columns = com.maybe_make_list(column) + elif self.by is None: + self.columns = [ + col for col in data.columns if is_numeric_dtype(data[col]) + ] + else: + self.columns = [ + col + for col in data.columns + if col not in self.by and is_numeric_dtype(data[col]) + ] + + # For `hist` plot, need to get grouped original data before `self.data` is + # updated later + if self.by is not None and self._kind == "hist": + self._grouped = data.groupby(unpack_single_str_list(self.by)) + + self.kind = kind + + self.subplots = self._validate_subplots_kwarg(subplots) + + if sharex is None: + # if by is defined, subplots are used and sharex should be False + if ax is None and by is None: + self.sharex = True + else: + # if we get an axis, the users should do the visibility + # setting... + self.sharex = False + else: + self.sharex = sharex + + self.sharey = sharey + self.figsize = figsize + self.layout = layout + + self.xticks = xticks + self.yticks = yticks + self.xlim = xlim + self.ylim = ylim + self.title = title + self.use_index = use_index + self.xlabel = xlabel + self.ylabel = ylabel + + self.fontsize = fontsize + + if rot is not None: + self.rot = rot + # need to know for format_date_labels since it's rotated to 30 by + # default + self._rot_set = True + else: + self._rot_set = False + self.rot = self._default_rot + + if grid is None: + grid = False if secondary_y else plt.rcParams["axes.grid"] + + self.grid = grid + self.legend = legend + self.legend_handles: list[Artist] = [] + self.legend_labels: list[Hashable] = [] + + self.logx = kwds.pop("logx", False) + self.logy = kwds.pop("logy", False) + self.loglog = kwds.pop("loglog", False) + self.label = kwds.pop("label", None) + self.style = kwds.pop("style", None) + self.mark_right = kwds.pop("mark_right", True) + self.stacked = kwds.pop("stacked", False) + + self.ax = ax + self.fig = fig + self.axes = np.array([], dtype=object) # "real" version get set in `generate` + + # parse errorbar input if given + xerr = kwds.pop("xerr", None) + yerr = kwds.pop("yerr", None) + self.errors = { + kw: self._parse_errorbars(kw, err) + for kw, err in zip(["xerr", "yerr"], [xerr, yerr]) + } + + if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndex)): + secondary_y = [secondary_y] + self.secondary_y = secondary_y + + # ugly TypeError if user passes matplotlib's `cmap` name. + # Probably better to accept either. + if "cmap" in kwds and colormap: + raise TypeError("Only specify one of `cmap` and `colormap`.") + if "cmap" in kwds: + self.colormap = kwds.pop("cmap") + else: + self.colormap = colormap + + self.table = table + self.include_bool = include_bool + + self.kwds = kwds + + self._validate_color_args() + + def _validate_subplots_kwarg( + self, subplots: bool | Sequence[Sequence[str]] + ) -> bool | list[tuple[int, ...]]: + """ + Validate the subplots parameter + + - check type and content + - check for duplicate columns + - check for invalid column names + - convert column names into indices + - add missing columns in a group of their own + See comments in code below for more details. + + Parameters + ---------- + subplots : subplots parameters as passed to PlotAccessor + + Returns + ------- + validated subplots : a bool or a list of tuples of column indices. Columns + in the same tuple will be grouped together in the resulting plot. + """ + + if isinstance(subplots, bool): + return subplots + elif not isinstance(subplots, Iterable): + raise ValueError("subplots should be a bool or an iterable") + + supported_kinds = ( + "line", + "bar", + "barh", + "hist", + "kde", + "density", + "area", + "pie", + ) + if self._kind not in supported_kinds: + raise ValueError( + "When subplots is an iterable, kind must be " + f"one of {', '.join(supported_kinds)}. Got {self._kind}." + ) + + if isinstance(self.data, ABCSeries): + raise NotImplementedError( + "An iterable subplots for a Series is not supported." + ) + + columns = self.data.columns + if isinstance(columns, ABCMultiIndex): + raise NotImplementedError( + "An iterable subplots for a DataFrame with a MultiIndex column " + "is not supported." + ) + + if columns.nunique() != len(columns): + raise NotImplementedError( + "An iterable subplots for a DataFrame with non-unique column " + "labels is not supported." + ) + + # subplots is a list of tuples where each tuple is a group of + # columns to be grouped together (one ax per group). + # we consolidate the subplots list such that: + # - the tuples contain indices instead of column names + # - the columns that aren't yet in the list are added in a group + # of their own. + # For example with columns from a to g, and + # subplots = [(a, c), (b, f, e)], + # we end up with [(ai, ci), (bi, fi, ei), (di,), (gi,)] + # This way, we can handle self.subplots in a homogeneous manner + # later. + # TODO: also accept indices instead of just names? + + out = [] + seen_columns: set[Hashable] = set() + for group in subplots: + if not is_list_like(group): + raise ValueError( + "When subplots is an iterable, each entry " + "should be a list/tuple of column names." + ) + idx_locs = columns.get_indexer_for(group) + if (idx_locs == -1).any(): + bad_labels = np.extract(idx_locs == -1, group) + raise ValueError( + f"Column label(s) {list(bad_labels)} not found in the DataFrame." + ) + unique_columns = set(group) + duplicates = seen_columns.intersection(unique_columns) + if duplicates: + raise ValueError( + "Each column should be in only one subplot. " + f"Columns {duplicates} were found in multiple subplots." + ) + seen_columns = seen_columns.union(unique_columns) + out.append(tuple(idx_locs)) + + unseen_columns = columns.difference(seen_columns) + for column in unseen_columns: + idx_loc = columns.get_loc(column) + out.append((idx_loc,)) + return out + + def _validate_color_args(self): + if ( + "color" in self.kwds + and self.nseries == 1 + and self.kwds["color"] is not None + and not is_list_like(self.kwds["color"]) + ): + # support series.plot(color='green') + self.kwds["color"] = [self.kwds["color"]] + + if ( + "color" in self.kwds + and isinstance(self.kwds["color"], tuple) + and self.nseries == 1 + and len(self.kwds["color"]) in (3, 4) + ): + # support RGB and RGBA tuples in series plot + self.kwds["color"] = [self.kwds["color"]] + + if ( + "color" in self.kwds or "colors" in self.kwds + ) and self.colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + + if "color" in self.kwds and self.style is not None: + if is_list_like(self.style): + styles = self.style + else: + styles = [self.style] + # need only a single match + for s in styles: + if _color_in_style(s): + raise ValueError( + "Cannot pass 'style' string with a color symbol and " + "'color' keyword argument. Please use one or the " + "other or pass 'style' without a color symbol" + ) + + def _iter_data(self, data=None, keep_index: bool = False, fillna=None): + if data is None: + data = self.data + if fillna is not None: + data = data.fillna(fillna) + + for col, values in data.items(): + if keep_index is True: + yield col, values + else: + yield col, values.values + + @property + def nseries(self) -> int: + # When `by` is explicitly assigned, grouped data size will be defined, and + # this will determine number of subplots to have, aka `self.nseries` + if self.data.ndim == 1: + return 1 + elif self.by is not None and self._kind == "hist": + return len(self._grouped) + elif self.by is not None and self._kind == "box": + return len(self.columns) + else: + return self.data.shape[1] + + def draw(self) -> None: + self.plt.draw_if_interactive() + + def generate(self) -> None: + self._args_adjust() + self._compute_plot_data() + self._setup_subplots() + self._make_plot() + self._add_table() + self._make_legend() + self._adorn_subplots() + + for ax in self.axes: + self._post_plot_logic_common(ax, self.data) + self._post_plot_logic(ax, self.data) + + @abstractmethod + def _args_adjust(self) -> None: + pass + + def _has_plotted_object(self, ax: Axes) -> bool: + """check whether ax has data""" + return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0 + + def _maybe_right_yaxis(self, ax: Axes, axes_num): + if not self.on_right(axes_num): + # secondary axes may be passed via ax kw + return self._get_ax_layer(ax) + + if hasattr(ax, "right_ax"): + # if it has right_ax property, ``ax`` must be left axes + return ax.right_ax + elif hasattr(ax, "left_ax"): + # if it has left_ax property, ``ax`` must be right axes + return ax + else: + # otherwise, create twin axes + orig_ax, new_ax = ax, ax.twinx() + # TODO: use Matplotlib public API when available + new_ax._get_lines = orig_ax._get_lines + new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill + orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax + + if not self._has_plotted_object(orig_ax): # no data on left y + orig_ax.get_yaxis().set_visible(False) + + if self.logy is True or self.loglog is True: + new_ax.set_yscale("log") + elif self.logy == "sym" or self.loglog == "sym": + new_ax.set_yscale("symlog") + return new_ax + + def _setup_subplots(self): + if self.subplots: + naxes = ( + self.nseries if isinstance(self.subplots, bool) else len(self.subplots) + ) + fig, axes = create_subplots( + naxes=naxes, + sharex=self.sharex, + sharey=self.sharey, + figsize=self.figsize, + ax=self.ax, + layout=self.layout, + layout_type=self._layout_type, + ) + elif self.ax is None: + fig = self.plt.figure(figsize=self.figsize) + axes = fig.add_subplot(111) + else: + fig = self.ax.get_figure() + if self.figsize is not None: + fig.set_size_inches(self.figsize) + axes = self.ax + + axes = flatten_axes(axes) + + valid_log = {False, True, "sym", None} + input_log = {self.logx, self.logy, self.loglog} + if input_log - valid_log: + invalid_log = next(iter(input_log - valid_log)) + raise ValueError( + f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given." + ) + + if self.logx is True or self.loglog is True: + [a.set_xscale("log") for a in axes] + elif self.logx == "sym" or self.loglog == "sym": + [a.set_xscale("symlog") for a in axes] + + if self.logy is True or self.loglog is True: + [a.set_yscale("log") for a in axes] + elif self.logy == "sym" or self.loglog == "sym": + [a.set_yscale("symlog") for a in axes] + + self.fig = fig + self.axes = axes + + @property + def result(self): + """ + Return result axes + """ + if self.subplots: + if self.layout is not None and not is_list_like(self.ax): + return self.axes.reshape(*self.layout) + else: + return self.axes + else: + sec_true = isinstance(self.secondary_y, bool) and self.secondary_y + # error: Argument 1 to "len" has incompatible type "Union[bool, + # Tuple[Any, ...], List[Any], ndarray[Any, Any]]"; expected "Sized" + all_sec = ( + is_list_like(self.secondary_y) + and len(self.secondary_y) == self.nseries # type: ignore[arg-type] + ) + if sec_true or all_sec: + # if all data is plotted on secondary, return right axes + return self._get_ax_layer(self.axes[0], primary=False) + else: + return self.axes[0] + + def _convert_to_ndarray(self, data): + # GH31357: categorical columns are processed separately + if isinstance(data.dtype, CategoricalDtype): + return data + + # GH32073: cast to float if values contain nulled integers + if (is_integer_dtype(data.dtype) or is_float_dtype(data.dtype)) and isinstance( + data.dtype, ExtensionDtype + ): + return data.to_numpy(dtype="float", na_value=np.nan) + + # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to + # np.ndarray before plot. + if len(data) > 0: + return np.asarray(data) + + return data + + def _compute_plot_data(self): + data = self.data + + if isinstance(data, ABCSeries): + label = self.label + if label is None and data.name is None: + label = "" + if label is None: + # We'll end up with columns of [0] instead of [None] + data = data.to_frame() + else: + data = data.to_frame(name=label) + elif self._kind in ("hist", "box"): + cols = self.columns if self.by is None else self.columns + self.by + data = data.loc[:, cols] + + # GH15079 reconstruct data if by is defined + if self.by is not None: + self.subplots = True + data = reconstruct_data_with_by(self.data, by=self.by, cols=self.columns) + + # GH16953, infer_objects is needed as fallback, for ``Series`` + # with ``dtype == object`` + data = data.infer_objects(copy=False) + include_type = [np.number, "datetime", "datetimetz", "timedelta"] + + # GH23719, allow plotting boolean + if self.include_bool is True: + include_type.append(np.bool_) + + # GH22799, exclude datetime-like type for boxplot + exclude_type = None + if self._kind == "box": + # TODO: change after solving issue 27881 + include_type = [np.number] + exclude_type = ["timedelta"] + + # GH 18755, include object and category type for scatter plot + if self._kind == "scatter": + include_type.extend(["object", "category"]) + + numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type) + + try: + is_empty = numeric_data.columns.empty + except AttributeError: + is_empty = not len(numeric_data) + + # no non-numeric frames or series allowed + if is_empty: + raise TypeError("no numeric data to plot") + + self.data = numeric_data.apply(self._convert_to_ndarray) + + def _make_plot(self): + raise AbstractMethodError(self) + + def _add_table(self) -> None: + if self.table is False: + return + elif self.table is True: + data = self.data.transpose() + else: + data = self.table + ax = self._get_ax(0) + tools.table(ax, data) + + def _post_plot_logic_common(self, ax, data): + """Common post process for each axes""" + if self.orientation == "vertical" or self.orientation is None: + self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize) + self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize) + + if hasattr(ax, "right_ax"): + self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize) + + elif self.orientation == "horizontal": + self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize) + self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize) + + if hasattr(ax, "right_ax"): + self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize) + else: # pragma no cover + raise ValueError + + @abstractmethod + def _post_plot_logic(self, ax, data) -> None: + """Post process for each axes. Overridden in child classes""" + + def _adorn_subplots(self): + """Common post process unrelated to data""" + if len(self.axes) > 0: + all_axes = self._get_subplots() + nrows, ncols = self._get_axes_layout() + handle_shared_axes( + axarr=all_axes, + nplots=len(all_axes), + naxes=nrows * ncols, + nrows=nrows, + ncols=ncols, + sharex=self.sharex, + sharey=self.sharey, + ) + + for ax in self.axes: + ax = getattr(ax, "right_ax", ax) + if self.yticks is not None: + ax.set_yticks(self.yticks) + + if self.xticks is not None: + ax.set_xticks(self.xticks) + + if self.ylim is not None: + ax.set_ylim(self.ylim) + + if self.xlim is not None: + ax.set_xlim(self.xlim) + + # GH9093, currently Pandas does not show ylabel, so if users provide + # ylabel will set it as ylabel in the plot. + if self.ylabel is not None: + ax.set_ylabel(pprint_thing(self.ylabel)) + + ax.grid(self.grid) + + if self.title: + if self.subplots: + if is_list_like(self.title): + if len(self.title) != self.nseries: + raise ValueError( + "The length of `title` must equal the number " + "of columns if using `title` of type `list` " + "and `subplots=True`.\n" + f"length of title = {len(self.title)}\n" + f"number of columns = {self.nseries}" + ) + + for ax, title in zip(self.axes, self.title): + ax.set_title(title) + else: + self.fig.suptitle(self.title) + else: + if is_list_like(self.title): + msg = ( + "Using `title` of type `list` is not supported " + "unless `subplots=True` is passed" + ) + raise ValueError(msg) + self.axes[0].set_title(self.title) + + def _apply_axis_properties( + self, axis: Axis, rot=None, fontsize: int | None = None + ) -> None: + """ + Tick creation within matplotlib is reasonably expensive and is + internally deferred until accessed as Ticks are created/destroyed + multiple times per draw. It's therefore beneficial for us to avoid + accessing unless we will act on the Tick. + """ + if rot is not None or fontsize is not None: + # rot=0 is a valid setting, hence the explicit None check + labels = axis.get_majorticklabels() + axis.get_minorticklabels() + for label in labels: + if rot is not None: + label.set_rotation(rot) + if fontsize is not None: + label.set_fontsize(fontsize) + + @property + def legend_title(self) -> str | None: + if not isinstance(self.data.columns, ABCMultiIndex): + name = self.data.columns.name + if name is not None: + name = pprint_thing(name) + return name + else: + stringified = map(pprint_thing, self.data.columns.names) + return ",".join(stringified) + + def _mark_right_label(self, label: str, index: int) -> str: + """ + Append ``(right)`` to the label of a line if it's plotted on the right axis. + + Note that ``(right)`` is only appended when ``subplots=False``. + """ + if not self.subplots and self.mark_right and self.on_right(index): + label += " (right)" + return label + + def _append_legend_handles_labels(self, handle: Artist, label: str) -> None: + """ + Append current handle and label to ``legend_handles`` and ``legend_labels``. + + These will be used to make the legend. + """ + self.legend_handles.append(handle) + self.legend_labels.append(label) + + def _make_legend(self) -> None: + ax, leg = self._get_ax_legend(self.axes[0]) + + handles = [] + labels = [] + title = "" + + if not self.subplots: + if leg is not None: + title = leg.get_title().get_text() + # Replace leg.legend_handles because it misses marker info + if Version(mpl.__version__) < Version("3.7"): + handles = leg.legendHandles + else: + handles = leg.legend_handles + labels = [x.get_text() for x in leg.get_texts()] + + if self.legend: + if self.legend == "reverse": + handles += reversed(self.legend_handles) + labels += reversed(self.legend_labels) + else: + handles += self.legend_handles + labels += self.legend_labels + + if self.legend_title is not None: + title = self.legend_title + + if len(handles) > 0: + ax.legend(handles, labels, loc="best", title=title) + + elif self.subplots and self.legend: + for ax in self.axes: + if ax.get_visible(): + ax.legend(loc="best") + + def _get_ax_legend(self, ax: Axes): + """ + Take in axes and return ax and legend under different scenarios + """ + leg = ax.get_legend() + + other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None) + other_leg = None + if other_ax is not None: + other_leg = other_ax.get_legend() + if leg is None and other_leg is not None: + leg = other_leg + ax = other_ax + return ax, leg + + @cache_readonly + def plt(self): + import matplotlib.pyplot as plt + + return plt + + _need_to_set_index = False + + def _get_xticks(self, convert_period: bool = False): + index = self.data.index + is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time") + + if self.use_index: + if convert_period and isinstance(index, ABCPeriodIndex): + self.data = self.data.reindex(index=index.sort_values()) + x = self.data.index.to_timestamp()._mpl_repr() + elif is_any_real_numeric_dtype(index.dtype): + # Matplotlib supports numeric values or datetime objects as + # xaxis values. Taking LBYL approach here, by the time + # matplotlib raises exception when using non numeric/datetime + # values for xaxis, several actions are already taken by plt. + x = index._mpl_repr() + elif is_datetype: + self.data = self.data[notna(self.data.index)] + self.data = self.data.sort_index() + x = self.data.index._mpl_repr() + else: + self._need_to_set_index = True + x = list(range(len(index))) + else: + x = list(range(len(index))) + + return x + + @classmethod + @register_pandas_matplotlib_converters + def _plot( + cls, ax: Axes, x, y: np.ndarray, style=None, is_errorbar: bool = False, **kwds + ): + mask = isna(y) + if mask.any(): + y = np.ma.array(y) + y = np.ma.masked_where(mask, y) + + if isinstance(x, ABCIndex): + x = x._mpl_repr() + + if is_errorbar: + if "xerr" in kwds: + kwds["xerr"] = np.array(kwds.get("xerr")) + if "yerr" in kwds: + kwds["yerr"] = np.array(kwds.get("yerr")) + return ax.errorbar(x, y, **kwds) + else: + # prevent style kwarg from going to errorbar, where it is unsupported + args = (x, y, style) if style is not None else (x, y) + return ax.plot(*args, **kwds) + + def _get_custom_index_name(self): + """Specify whether xlabel/ylabel should be used to override index name""" + return self.xlabel + + def _get_index_name(self) -> str | None: + if isinstance(self.data.index, ABCMultiIndex): + name = self.data.index.names + if com.any_not_none(*name): + name = ",".join([pprint_thing(x) for x in name]) + else: + name = None + else: + name = self.data.index.name + if name is not None: + name = pprint_thing(name) + + # GH 45145, override the default axis label if one is provided. + index_name = self._get_custom_index_name() + if index_name is not None: + name = pprint_thing(index_name) + + return name + + @classmethod + def _get_ax_layer(cls, ax, primary: bool = True): + """get left (primary) or right (secondary) axes""" + if primary: + return getattr(ax, "left_ax", ax) + else: + return getattr(ax, "right_ax", ax) + + def _col_idx_to_axis_idx(self, col_idx: int) -> int: + """Return the index of the axis where the column at col_idx should be plotted""" + if isinstance(self.subplots, list): + # Subplots is a list: some columns will be grouped together in the same ax + return next( + group_idx + for (group_idx, group) in enumerate(self.subplots) + if col_idx in group + ) + else: + # subplots is True: one ax per column + return col_idx + + def _get_ax(self, i: int): + # get the twinx ax if appropriate + if self.subplots: + i = self._col_idx_to_axis_idx(i) + ax = self.axes[i] + ax = self._maybe_right_yaxis(ax, i) + self.axes[i] = ax + else: + ax = self.axes[0] + ax = self._maybe_right_yaxis(ax, i) + + ax.get_yaxis().set_visible(True) + return ax + + @classmethod + def get_default_ax(cls, ax) -> None: + import matplotlib.pyplot as plt + + if ax is None and len(plt.get_fignums()) > 0: + with plt.rc_context(): + ax = plt.gca() + ax = cls._get_ax_layer(ax) + + def on_right(self, i): + if isinstance(self.secondary_y, bool): + return self.secondary_y + + if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndex)): + return self.data.columns[i] in self.secondary_y + + def _apply_style_colors(self, colors, kwds, col_num, label: str): + """ + Manage style and color based on column number and its label. + Returns tuple of appropriate style and kwds which "color" may be added. + """ + style = None + if self.style is not None: + if isinstance(self.style, list): + try: + style = self.style[col_num] + except IndexError: + pass + elif isinstance(self.style, dict): + style = self.style.get(label, style) + else: + style = self.style + + has_color = "color" in kwds or self.colormap is not None + nocolor_style = style is None or not _color_in_style(style) + if (has_color or self.subplots) and nocolor_style: + if isinstance(colors, dict): + kwds["color"] = colors[label] + else: + kwds["color"] = colors[col_num % len(colors)] + return style, kwds + + def _get_colors( + self, + num_colors: int | None = None, + color_kwds: str = "color", + ): + if num_colors is None: + num_colors = self.nseries + + return get_standard_colors( + num_colors=num_colors, + colormap=self.colormap, + color=self.kwds.get(color_kwds), + ) + + def _parse_errorbars(self, label, err): + """ + Look for error keyword arguments and return the actual errorbar data + or return the error DataFrame/dict + + Error bars can be specified in several ways: + Series: the user provides a pandas.Series object of the same + length as the data + ndarray: provides a np.ndarray of the same length as the data + DataFrame/dict: error values are paired with keys matching the + key in the plotted DataFrame + str: the name of the column within the plotted DataFrame + + Asymmetrical error bars are also supported, however raw error values + must be provided in this case. For a ``N`` length :class:`Series`, a + ``2xN`` array should be provided indicating lower and upper (or left + and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors + should be in a ``Mx2xN`` array. + """ + if err is None: + return None + + def match_labels(data, e): + e = e.reindex(data.index) + return e + + # key-matched DataFrame + if isinstance(err, ABCDataFrame): + err = match_labels(self.data, err) + # key-matched dict + elif isinstance(err, dict): + pass + + # Series of error values + elif isinstance(err, ABCSeries): + # broadcast error series across data + err = match_labels(self.data, err) + err = np.atleast_2d(err) + err = np.tile(err, (self.nseries, 1)) + + # errors are a column in the dataframe + elif isinstance(err, str): + evalues = self.data[err].values + self.data = self.data[self.data.columns.drop(err)] + err = np.atleast_2d(evalues) + err = np.tile(err, (self.nseries, 1)) + + elif is_list_like(err): + if is_iterator(err): + err = np.atleast_2d(list(err)) + else: + # raw error values + err = np.atleast_2d(err) + + err_shape = err.shape + + # asymmetrical error bars + if isinstance(self.data, ABCSeries) and err_shape[0] == 2: + err = np.expand_dims(err, 0) + err_shape = err.shape + if err_shape[2] != len(self.data): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape (2, {len(self.data)})" + ) + elif isinstance(self.data, ABCDataFrame) and err.ndim == 3: + if ( + (err_shape[0] != self.nseries) + or (err_shape[1] != 2) + or (err_shape[2] != len(self.data)) + ): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape ({self.nseries}, 2, {len(self.data)})" + ) + + # broadcast errors to each data series + if len(err) == 1: + err = np.tile(err, (self.nseries, 1)) + + elif is_number(err): + err = np.tile([err], (self.nseries, len(self.data))) + + else: + msg = f"No valid {label} detected" + raise ValueError(msg) + + return err + + def _get_errorbars( + self, label=None, index=None, xerr: bool = True, yerr: bool = True + ): + errors = {} + + for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]): + if flag: + err = self.errors[kw] + # user provided label-matched dataframe of errors + if isinstance(err, (ABCDataFrame, dict)): + if label is not None and label in err.keys(): + err = err[label] + else: + err = None + elif index is not None and err is not None: + err = err[index] + + if err is not None: + errors[kw] = err + return errors + + def _get_subplots(self): + from matplotlib.axes import Subplot + + return [ + ax + for ax in self.fig.get_axes() + if (isinstance(ax, Subplot) and ax.get_subplotspec() is not None) + ] + + def _get_axes_layout(self) -> tuple[int, int]: + axes = self._get_subplots() + x_set = set() + y_set = set() + for ax in axes: + # check axes coordinates to estimate layout + points = ax.get_position().get_points() + x_set.add(points[0][0]) + y_set.add(points[0][1]) + return (len(y_set), len(x_set)) + + +class PlanePlot(MPLPlot, ABC): + """ + Abstract class for plotting on plane, currently scatter and hexbin. + """ + + _layout_type = "single" + + def __init__(self, data, x, y, **kwargs) -> None: + MPLPlot.__init__(self, data, **kwargs) + if x is None or y is None: + raise ValueError(self._kind + " requires an x and y column") + if is_integer(x) and not self.data.columns._holds_integer(): + x = self.data.columns[x] + if is_integer(y) and not self.data.columns._holds_integer(): + y = self.data.columns[y] + + # Scatter plot allows to plot objects data + if self._kind == "hexbin": + if len(self.data[x]._get_numeric_data()) == 0: + raise ValueError(self._kind + " requires x column to be numeric") + if len(self.data[y]._get_numeric_data()) == 0: + raise ValueError(self._kind + " requires y column to be numeric") + + self.x = x + self.y = y + + @property + def nseries(self) -> int: + return 1 + + def _post_plot_logic(self, ax: Axes, data) -> None: + x, y = self.x, self.y + xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x) + ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + + def _plot_colorbar(self, ax: Axes, **kwds): + # Addresses issues #10611 and #10678: + # When plotting scatterplots and hexbinplots in IPython + # inline backend the colorbar axis height tends not to + # exactly match the parent axis height. + # The difference is due to small fractional differences + # in floating points with similar representation. + # To deal with this, this method forces the colorbar + # height to take the height of the parent axes. + # For a more detailed description of the issue + # see the following link: + # https://github.com/ipython/ipython/issues/11215 + + # GH33389, if ax is used multiple times, we should always + # use the last one which contains the latest information + # about the ax + img = ax.collections[-1] + return self.fig.colorbar(img, ax=ax, **kwds) + + +class ScatterPlot(PlanePlot): + @property + def _kind(self) -> Literal["scatter"]: + return "scatter" + + def __init__(self, data, x, y, s=None, c=None, **kwargs) -> None: + if s is None: + # hide the matplotlib default for size, in case we want to change + # the handling of this argument later + s = 20 + elif is_hashable(s) and s in data.columns: + s = data[s] + super().__init__(data, x, y, s=s, **kwargs) + if is_integer(c) and not self.data.columns._holds_integer(): + c = self.data.columns[c] + self.c = c + + def _make_plot(self): + x, y, c, data = self.x, self.y, self.c, self.data + ax = self.axes[0] + + c_is_column = is_hashable(c) and c in self.data.columns + + color_by_categorical = c_is_column and isinstance( + self.data[c].dtype, CategoricalDtype + ) + + color = self.kwds.pop("color", None) + if c is not None and color is not None: + raise TypeError("Specify exactly one of `c` and `color`") + if c is None and color is None: + c_values = self.plt.rcParams["patch.facecolor"] + elif color is not None: + c_values = color + elif color_by_categorical: + c_values = self.data[c].cat.codes + elif c_is_column: + c_values = self.data[c].values + else: + c_values = c + + if self.colormap is not None: + cmap = mpl.colormaps.get_cmap(self.colormap) + # cmap is only used if c_values are integers, otherwise UserWarning. + # GH-53908: additionally call isinstance() because is_integer_dtype + # returns True for "b" (meaning "blue" and not int8 in this context) + elif not isinstance(c_values, str) and is_integer_dtype(c_values): + # pandas uses colormap, matplotlib uses cmap. + cmap = mpl.colormaps["Greys"] + else: + cmap = None + + if color_by_categorical: + from matplotlib import colors + + n_cats = len(self.data[c].cat.categories) + cmap = colors.ListedColormap([cmap(i) for i in range(cmap.N)]) + bounds = np.linspace(0, n_cats, n_cats + 1) + norm = colors.BoundaryNorm(bounds, cmap.N) + else: + norm = self.kwds.pop("norm", None) + # plot colorbar if + # 1. colormap is assigned, and + # 2.`c` is a column containing only numeric values + plot_colorbar = self.colormap or c_is_column + cb = self.kwds.pop("colorbar", is_numeric_dtype(c_values) and plot_colorbar) + + if self.legend and hasattr(self, "label"): + label = self.label + else: + label = None + scatter = ax.scatter( + data[x].values, + data[y].values, + c=c_values, + label=label, + cmap=cmap, + norm=norm, + **self.kwds, + ) + if cb: + cbar_label = c if c_is_column else "" + cbar = self._plot_colorbar(ax, label=cbar_label) + if color_by_categorical: + cbar.set_ticks(np.linspace(0.5, n_cats - 0.5, n_cats)) + cbar.ax.set_yticklabels(self.data[c].cat.categories) + + if label is not None: + self._append_legend_handles_labels(scatter, label) + else: + self.legend = False + + errors_x = self._get_errorbars(label=x, index=0, yerr=False) + errors_y = self._get_errorbars(label=y, index=0, xerr=False) + if len(errors_x) > 0 or len(errors_y) > 0: + err_kwds = dict(errors_x, **errors_y) + err_kwds["ecolor"] = scatter.get_facecolor()[0] + ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds) + + def _args_adjust(self) -> None: + pass + + +class HexBinPlot(PlanePlot): + @property + def _kind(self) -> Literal["hexbin"]: + return "hexbin" + + def __init__(self, data, x, y, C=None, **kwargs) -> None: + super().__init__(data, x, y, **kwargs) + if is_integer(C) and not self.data.columns._holds_integer(): + C = self.data.columns[C] + self.C = C + + def _make_plot(self) -> None: + x, y, data, C = self.x, self.y, self.data, self.C + ax = self.axes[0] + # pandas uses colormap, matplotlib uses cmap. + cmap = self.colormap or "BuGn" + cmap = mpl.colormaps.get_cmap(cmap) + cb = self.kwds.pop("colorbar", True) + + if C is None: + c_values = None + else: + c_values = data[C].values + + ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds) + if cb: + self._plot_colorbar(ax) + + def _make_legend(self) -> None: + pass + + def _args_adjust(self) -> None: + pass + + +class LinePlot(MPLPlot): + _default_rot = 0 + + @property + def orientation(self) -> PlottingOrientation: + return "vertical" + + @property + def _kind(self) -> Literal["line", "area", "hist", "kde", "box"]: + return "line" + + def __init__(self, data, **kwargs) -> None: + from pandas.plotting import plot_params + + MPLPlot.__init__(self, data, **kwargs) + if self.stacked: + self.data = self.data.fillna(value=0) + self.x_compat = plot_params["x_compat"] + if "x_compat" in self.kwds: + self.x_compat = bool(self.kwds.pop("x_compat")) + + def _is_ts_plot(self) -> bool: + # this is slightly deceptive + return not self.x_compat and self.use_index and self._use_dynamic_x() + + def _use_dynamic_x(self): + return use_dynamic_x(self._get_ax(0), self.data) + + def _make_plot(self) -> None: + if self._is_ts_plot(): + data = maybe_convert_index(self._get_ax(0), self.data) + + x = data.index # dummy, not used + plotf = self._ts_plot + it = self._iter_data(data=data, keep_index=True) + else: + x = self._get_xticks(convert_period=True) + # error: Incompatible types in assignment (expression has type + # "Callable[[Any, Any, Any, Any, Any, Any, KwArg(Any)], Any]", variable has + # type "Callable[[Any, Any, Any, Any, KwArg(Any)], Any]") + plotf = self._plot # type: ignore[assignment] + it = self._iter_data() + + stacking_id = self._get_stacking_id() + is_errorbar = com.any_not_none(*self.errors.values()) + + colors = self._get_colors() + for i, (label, y) in enumerate(it): + ax = self._get_ax(i) + kwds = self.kwds.copy() + style, kwds = self._apply_style_colors(colors, kwds, i, label) + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) # .encode('utf-8') + label = self._mark_right_label(label, index=i) + kwds["label"] = label + + newlines = plotf( + ax, + x, + y, + style=style, + column_num=i, + stacking_id=stacking_id, + is_errorbar=is_errorbar, + **kwds, + ) + self._append_legend_handles_labels(newlines[0], label) + + if self._is_ts_plot(): + # reset of xlim should be used for ts data + # TODO: GH28021, should find a way to change view limit on xaxis + lines = get_all_lines(ax) + left, right = get_xlim(lines) + ax.set_xlim(left, right) + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, ax: Axes, x, y, style=None, column_num=None, stacking_id=None, **kwds + ): + # column_num is used to get the target column from plotf in line and + # area plots + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"]) + lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds) + cls._update_stacker(ax, stacking_id, y) + return lines + + def _ts_plot(self, ax: Axes, x, data, style=None, **kwds): + # accept x to be consistent with normal plot func, + # x is not passed to tsplot as it uses data.index as x coordinate + # column_num must be in kwds for stacking purpose + freq, data = maybe_resample(data, ax, kwds) + + # Set ax with freq info + decorate_axes(ax, freq, kwds) + # digging deeper + if hasattr(ax, "left_ax"): + decorate_axes(ax.left_ax, freq, kwds) + if hasattr(ax, "right_ax"): + decorate_axes(ax.right_ax, freq, kwds) + ax._plot_data.append((data, self._kind, kwds)) + + lines = self._plot(ax, data.index, data.values, style=style, **kwds) + # set date formatter, locators and rescale limits + format_dateaxis(ax, ax.freq, data.index) + return lines + + def _get_stacking_id(self): + if self.stacked: + return id(self.data) + else: + return None + + @classmethod + def _initialize_stacker(cls, ax: Axes, stacking_id, n: int) -> None: + if stacking_id is None: + return + if not hasattr(ax, "_stacker_pos_prior"): + ax._stacker_pos_prior = {} + if not hasattr(ax, "_stacker_neg_prior"): + ax._stacker_neg_prior = {} + ax._stacker_pos_prior[stacking_id] = np.zeros(n) + ax._stacker_neg_prior[stacking_id] = np.zeros(n) + + @classmethod + def _get_stacked_values(cls, ax: Axes, stacking_id, values, label): + if stacking_id is None: + return values + if not hasattr(ax, "_stacker_pos_prior"): + # stacker may not be initialized for subplots + cls._initialize_stacker(ax, stacking_id, len(values)) + + if (values >= 0).all(): + return ax._stacker_pos_prior[stacking_id] + values + elif (values <= 0).all(): + return ax._stacker_neg_prior[stacking_id] + values + + raise ValueError( + "When stacked is True, each column must be either " + "all positive or all negative. " + f"Column '{label}' contains both positive and negative values" + ) + + @classmethod + def _update_stacker(cls, ax: Axes, stacking_id, values) -> None: + if stacking_id is None: + return + if (values >= 0).all(): + ax._stacker_pos_prior[stacking_id] += values + elif (values <= 0).all(): + ax._stacker_neg_prior[stacking_id] += values + + def _args_adjust(self) -> None: + pass + + def _post_plot_logic(self, ax: Axes, data) -> None: + from matplotlib.ticker import FixedLocator + + def get_label(i): + if is_float(i) and i.is_integer(): + i = int(i) + try: + return pprint_thing(data.index[i]) + except Exception: + return "" + + if self._need_to_set_index: + xticks = ax.get_xticks() + xticklabels = [get_label(x) for x in xticks] + ax.xaxis.set_major_locator(FixedLocator(xticks)) + ax.set_xticklabels(xticklabels) + + # If the index is an irregular time series, then by default + # we rotate the tick labels. The exception is if there are + # subplots which don't share their x-axes, in which we case + # we don't rotate the ticklabels as by default the subplots + # would be too close together. + condition = ( + not self._use_dynamic_x() + and (data.index._is_all_dates and self.use_index) + and (not self.subplots or (self.subplots and self.sharex)) + ) + + index_name = self._get_index_name() + + if condition: + # irregular TS rotated 30 deg. by default + # probably a better place to check / set this. + if not self._rot_set: + self.rot = 30 + format_date_labels(ax, rot=self.rot) + + if index_name is not None and self.use_index: + ax.set_xlabel(index_name) + + +class AreaPlot(LinePlot): + @property + def _kind(self) -> Literal["area"]: + return "area" + + def __init__(self, data, **kwargs) -> None: + kwargs.setdefault("stacked", True) + data = data.fillna(value=0) + LinePlot.__init__(self, data, **kwargs) + + if not self.stacked: + # use smaller alpha to distinguish overlap + self.kwds.setdefault("alpha", 0.5) + + if self.logy or self.loglog: + raise ValueError("Log-y scales are not supported in area plot") + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y, + style=None, + column_num=None, + stacking_id=None, + is_errorbar: bool = False, + **kwds, + ): + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"]) + + # need to remove label, because subplots uses mpl legend as it is + line_kwds = kwds.copy() + line_kwds.pop("label") + lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds) + + # get data from the line to get coordinates for fill_between + xdata, y_values = lines[0].get_data(orig=False) + + # unable to use ``_get_stacked_values`` here to get starting point + if stacking_id is None: + start = np.zeros(len(y)) + elif (y >= 0).all(): + start = ax._stacker_pos_prior[stacking_id] + elif (y <= 0).all(): + start = ax._stacker_neg_prior[stacking_id] + else: + start = np.zeros(len(y)) + + if "color" not in kwds: + kwds["color"] = lines[0].get_color() + + rect = ax.fill_between(xdata, start, y_values, **kwds) + cls._update_stacker(ax, stacking_id, y) + + # LinePlot expects list of artists + res = [rect] + return res + + def _args_adjust(self) -> None: + pass + + def _post_plot_logic(self, ax: Axes, data) -> None: + LinePlot._post_plot_logic(self, ax, data) + + is_shared_y = len(list(ax.get_shared_y_axes())) > 0 + # do not override the default axis behaviour in case of shared y axes + if self.ylim is None and not is_shared_y: + if (data >= 0).all().all(): + ax.set_ylim(0, None) + elif (data <= 0).all().all(): + ax.set_ylim(None, 0) + + +class BarPlot(MPLPlot): + @property + def _kind(self) -> Literal["bar", "barh"]: + return "bar" + + _default_rot = 90 + + @property + def orientation(self) -> PlottingOrientation: + return "vertical" + + def __init__(self, data, **kwargs) -> None: + # we have to treat a series differently than a + # 1-column DataFrame w.r.t. color handling + self._is_series = isinstance(data, ABCSeries) + self.bar_width = kwargs.pop("width", 0.5) + pos = kwargs.pop("position", 0.5) + kwargs.setdefault("align", "center") + self.tick_pos = np.arange(len(data)) + + self.bottom = kwargs.pop("bottom", 0) + self.left = kwargs.pop("left", 0) + + self.log = kwargs.pop("log", False) + MPLPlot.__init__(self, data, **kwargs) + + if self.stacked or self.subplots: + self.tickoffset = self.bar_width * pos + if kwargs["align"] == "edge": + self.lim_offset = self.bar_width / 2 + else: + self.lim_offset = 0 + elif kwargs["align"] == "edge": + w = self.bar_width / self.nseries + self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5 + self.lim_offset = w * 0.5 + else: + self.tickoffset = self.bar_width * pos + self.lim_offset = 0 + + self.ax_pos = self.tick_pos - self.tickoffset + + def _args_adjust(self) -> None: + if is_list_like(self.bottom): + self.bottom = np.array(self.bottom) + if is_list_like(self.left): + self.left = np.array(self.left) + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y, + w, + start: int | npt.NDArray[np.intp] = 0, + log: bool = False, + **kwds, + ): + return ax.bar(x, y, w, bottom=start, log=log, **kwds) + + @property + def _start_base(self): + return self.bottom + + def _make_plot(self) -> None: + colors = self._get_colors() + ncolors = len(colors) + + pos_prior = neg_prior = np.zeros(len(self.data)) + K = self.nseries + + for i, (label, y) in enumerate(self._iter_data(fillna=0)): + ax = self._get_ax(i) + kwds = self.kwds.copy() + if self._is_series: + kwds["color"] = colors + elif isinstance(colors, dict): + kwds["color"] = colors[label] + else: + kwds["color"] = colors[i % ncolors] + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) + label = self._mark_right_label(label, index=i) + + if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None): + kwds["ecolor"] = mpl.rcParams["xtick.color"] + + start = 0 + if self.log and (y >= 1).all(): + start = 1 + start = start + self._start_base + + if self.subplots: + w = self.bar_width / 2 + rect = self._plot( + ax, + self.ax_pos + w, + y, + self.bar_width, + start=start, + label=label, + log=self.log, + **kwds, + ) + ax.set_title(label) + elif self.stacked: + mask = y > 0 + start = np.where(mask, pos_prior, neg_prior) + self._start_base + w = self.bar_width / 2 + rect = self._plot( + ax, + self.ax_pos + w, + y, + self.bar_width, + start=start, + label=label, + log=self.log, + **kwds, + ) + pos_prior = pos_prior + np.where(mask, y, 0) + neg_prior = neg_prior + np.where(mask, 0, y) + else: + w = self.bar_width / K + rect = self._plot( + ax, + self.ax_pos + (i + 0.5) * w, + y, + w, + start=start, + label=label, + log=self.log, + **kwds, + ) + self._append_legend_handles_labels(rect, label) + + def _post_plot_logic(self, ax: Axes, data) -> None: + if self.use_index: + str_index = [pprint_thing(key) for key in data.index] + else: + str_index = [pprint_thing(key) for key in range(data.shape[0])] + + s_edge = self.ax_pos[0] - 0.25 + self.lim_offset + e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset + + self._decorate_ticks(ax, self._get_index_name(), str_index, s_edge, e_edge) + + def _decorate_ticks(self, ax: Axes, name, ticklabels, start_edge, end_edge) -> None: + ax.set_xlim((start_edge, end_edge)) + + if self.xticks is not None: + ax.set_xticks(np.array(self.xticks)) + else: + ax.set_xticks(self.tick_pos) + ax.set_xticklabels(ticklabels) + + if name is not None and self.use_index: + ax.set_xlabel(name) + + +class BarhPlot(BarPlot): + @property + def _kind(self) -> Literal["barh"]: + return "barh" + + _default_rot = 0 + + @property + def orientation(self) -> Literal["horizontal"]: + return "horizontal" + + @property + def _start_base(self): + return self.left + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y, + w, + start: int | npt.NDArray[np.intp] = 0, + log: bool = False, + **kwds, + ): + return ax.barh(x, y, w, left=start, log=log, **kwds) + + def _get_custom_index_name(self): + return self.ylabel + + def _decorate_ticks(self, ax: Axes, name, ticklabels, start_edge, end_edge) -> None: + # horizontal bars + ax.set_ylim((start_edge, end_edge)) + ax.set_yticks(self.tick_pos) + ax.set_yticklabels(ticklabels) + if name is not None and self.use_index: + ax.set_ylabel(name) + ax.set_xlabel(self.xlabel) + + +class PiePlot(MPLPlot): + @property + def _kind(self) -> Literal["pie"]: + return "pie" + + _layout_type = "horizontal" + + def __init__(self, data, kind=None, **kwargs) -> None: + data = data.fillna(value=0) + if (data < 0).any().any(): + raise ValueError(f"{self._kind} plot doesn't allow negative values") + MPLPlot.__init__(self, data, kind=kind, **kwargs) + + def _args_adjust(self) -> None: + self.grid = False + self.logy = False + self.logx = False + self.loglog = False + + def _validate_color_args(self) -> None: + pass + + def _make_plot(self) -> None: + colors = self._get_colors(num_colors=len(self.data), color_kwds="colors") + self.kwds.setdefault("colors", colors) + + for i, (label, y) in enumerate(self._iter_data()): + ax = self._get_ax(i) + if label is not None: + label = pprint_thing(label) + ax.set_ylabel(label) + + kwds = self.kwds.copy() + + def blank_labeler(label, value): + if value == 0: + return "" + else: + return label + + idx = [pprint_thing(v) for v in self.data.index] + labels = kwds.pop("labels", idx) + # labels is used for each wedge's labels + # Blank out labels for values of 0 so they don't overlap + # with nonzero wedges + if labels is not None: + blabels = [blank_labeler(left, value) for left, value in zip(labels, y)] + else: + blabels = None + results = ax.pie(y, labels=blabels, **kwds) + + if kwds.get("autopct", None) is not None: + patches, texts, autotexts = results + else: + patches, texts = results + autotexts = [] + + if self.fontsize is not None: + for t in texts + autotexts: + t.set_fontsize(self.fontsize) + + # leglabels is used for legend labels + leglabels = labels if labels is not None else idx + for _patch, _leglabel in zip(patches, leglabels): + self._append_legend_handles_labels(_patch, _leglabel) + + def _post_plot_logic(self, ax: Axes, data) -> None: + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/groupby.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/groupby.py new file mode 100644 index 00000000..00c3c411 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/groupby.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.missing import remove_na_arraylike + +from pandas import ( + DataFrame, + MultiIndex, + Series, + concat, +) + +from pandas.plotting._matplotlib.misc import unpack_single_str_list + +if TYPE_CHECKING: + from pandas._typing import IndexLabel + + +def create_iter_data_given_by( + data: DataFrame, kind: str = "hist" +) -> dict[str, DataFrame | Series]: + """ + Create data for iteration given `by` is assigned or not, and it is only + used in both hist and boxplot. + + If `by` is assigned, return a dictionary of DataFrames in which the key of + dictionary is the values in groups. + If `by` is not assigned, return input as is, and this preserves current + status of iter_data. + + Parameters + ---------- + data : reformatted grouped data from `_compute_plot_data` method. + kind : str, plot kind. This function is only used for `hist` and `box` plots. + + Returns + ------- + iter_data : DataFrame or Dictionary of DataFrames + + Examples + -------- + If `by` is assigned: + + >>> import numpy as np + >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')] + >>> mi = MultiIndex.from_tuples(tuples) + >>> value = [[1, 3, np.nan, np.nan], + ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] + >>> data = DataFrame(value, columns=mi) + >>> create_iter_data_given_by(data) + {'h1': h1 + a b + 0 1.0 3.0 + 1 3.0 4.0 + 2 NaN NaN, 'h2': h2 + a b + 0 NaN NaN + 1 NaN NaN + 2 5.0 6.0} + """ + + # For `hist` plot, before transformation, the values in level 0 are values + # in groups and subplot titles, and later used for column subselection and + # iteration; For `box` plot, values in level 1 are column names to show, + # and are used for iteration and as subplots titles. + if kind == "hist": + level = 0 + else: + level = 1 + + # Select sub-columns based on the value of level of MI, and if `by` is + # assigned, data must be a MI DataFrame + assert isinstance(data.columns, MultiIndex) + return { + col: data.loc[:, data.columns.get_level_values(level) == col] + for col in data.columns.levels[level] + } + + +def reconstruct_data_with_by( + data: DataFrame, by: IndexLabel, cols: IndexLabel +) -> DataFrame: + """ + Internal function to group data, and reassign multiindex column names onto the + result in order to let grouped data be used in _compute_plot_data method. + + Parameters + ---------- + data : Original DataFrame to plot + by : grouped `by` parameter selected by users + cols : columns of data set (excluding columns used in `by`) + + Returns + ------- + Output is the reconstructed DataFrame with MultiIndex columns. The first level + of MI is unique values of groups, and second level of MI is the columns + selected by users. + + Examples + -------- + >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]} + >>> df = DataFrame(d) + >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b']) + h1 h2 + a b a b + 0 1.0 3.0 NaN NaN + 1 3.0 4.0 NaN NaN + 2 NaN NaN 5.0 6.0 + """ + by_modified = unpack_single_str_list(by) + grouped = data.groupby(by_modified) + + data_list = [] + for key, group in grouped: + # error: List item 1 has incompatible type "Union[Hashable, + # Sequence[Hashable]]"; expected "Iterable[Hashable]" + columns = MultiIndex.from_product([[key], cols]) # type: ignore[list-item] + sub_group = group[cols] + sub_group.columns = columns + data_list.append(sub_group) + + data = concat(data_list, axis=1) + return data + + +def reformat_hist_y_given_by( + y: Series | np.ndarray, by: IndexLabel | None +) -> Series | np.ndarray: + """Internal function to reformat y given `by` is applied or not for hist plot. + + If by is None, input y is 1-d with NaN removed; and if by is not None, groupby + will take place and input y is multi-dimensional array. + """ + if by is not None and len(y.shape) > 1: + return np.array([remove_na_arraylike(col) for col in y.T]).T + return remove_na_arraylike(y) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/hist.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/hist.py new file mode 100644 index 00000000..076b95a8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/hist.py @@ -0,0 +1,548 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, +) + +import numpy as np + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, +) +from pandas.core.dtypes.missing import ( + isna, + remove_na_arraylike, +) + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.core import ( + LinePlot, + MPLPlot, +) +from pandas.plotting._matplotlib.groupby import ( + create_iter_data_given_by, + reformat_hist_y_given_by, +) +from pandas.plotting._matplotlib.misc import unpack_single_str_list +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + maybe_adjust_figure, + set_ticks_props, +) + +if TYPE_CHECKING: + from matplotlib.axes import Axes + + from pandas._typing import PlottingOrientation + + from pandas import DataFrame + + +class HistPlot(LinePlot): + @property + def _kind(self) -> Literal["hist", "kde"]: + return "hist" + + def __init__( + self, + data, + bins: int | np.ndarray | list[np.ndarray] = 10, + bottom: int | np.ndarray = 0, + **kwargs, + ) -> None: + self.bins = bins # use mpl default + self.bottom = bottom + self.xlabel = kwargs.get("xlabel") + self.ylabel = kwargs.get("ylabel") + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called + + def _args_adjust(self) -> None: + # calculate bin number separately in different subplots + # where subplots are created based on by argument + if is_integer(self.bins): + if self.by is not None: + by_modified = unpack_single_str_list(self.by) + grouped = self.data.groupby(by_modified)[self.columns] + self.bins = [self._calculate_bins(group) for key, group in grouped] + else: + self.bins = self._calculate_bins(self.data) + + if is_list_like(self.bottom): + self.bottom = np.array(self.bottom) + + def _calculate_bins(self, data: DataFrame) -> np.ndarray: + """Calculate bins given data""" + nd_values = data.infer_objects(copy=False)._get_numeric_data() + values = np.ravel(nd_values) + values = values[~isna(values)] + + hist, bins = np.histogram( + values, bins=self.bins, range=self.kwds.get("range", None) + ) + return bins + + # error: Signature of "_plot" incompatible with supertype "LinePlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax, + y, + style=None, + bottom: int | np.ndarray = 0, + column_num: int = 0, + stacking_id=None, + *, + bins, + **kwds, + ): + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(bins) - 1) + + base = np.zeros(len(bins) - 1) + bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"]) + # ignore style + n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds) + cls._update_stacker(ax, stacking_id, n) + return patches + + def _make_plot(self) -> None: + colors = self._get_colors() + stacking_id = self._get_stacking_id() + + # Re-create iterated data if `by` is assigned by users + data = ( + create_iter_data_given_by(self.data, self._kind) + if self.by is not None + else self.data + ) + + for i, (label, y) in enumerate(self._iter_data(data=data)): + ax = self._get_ax(i) + + kwds = self.kwds.copy() + + label = pprint_thing(label) + label = self._mark_right_label(label, index=i) + kwds["label"] = label + + style, kwds = self._apply_style_colors(colors, kwds, i, label) + if style is not None: + kwds["style"] = style + + kwds = self._make_plot_keywords(kwds, y) + + # the bins is multi-dimension array now and each plot need only 1-d and + # when by is applied, label should be columns that are grouped + if self.by is not None: + kwds["bins"] = kwds["bins"][i] + kwds["label"] = self.columns + kwds.pop("color") + + # We allow weights to be a multi-dimensional array, e.g. a (10, 2) array, + # and each sub-array (10,) will be called in each iteration. If users only + # provide 1D array, we assume the same weights is used for all iterations + weights = kwds.get("weights", None) + if weights is not None: + if np.ndim(weights) != 1 and np.shape(weights)[-1] != 1: + try: + weights = weights[:, i] + except IndexError as err: + raise ValueError( + "weights must have the same shape as data, " + "or be a single column" + ) from err + weights = weights[~isna(y)] + kwds["weights"] = weights + + y = reformat_hist_y_given_by(y, self.by) + + artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds) + + # when by is applied, show title for subplots to know which group it is + if self.by is not None: + ax.set_title(pprint_thing(label)) + + self._append_legend_handles_labels(artists[0], label) + + def _make_plot_keywords(self, kwds, y): + """merge BoxPlot/KdePlot properties to passed kwds""" + # y is required for KdePlot + kwds["bottom"] = self.bottom + kwds["bins"] = self.bins + return kwds + + def _post_plot_logic(self, ax: Axes, data) -> None: + if self.orientation == "horizontal": + ax.set_xlabel("Frequency" if self.xlabel is None else self.xlabel) + ax.set_ylabel(self.ylabel) + else: + ax.set_xlabel(self.xlabel) + ax.set_ylabel("Frequency" if self.ylabel is None else self.ylabel) + + @property + def orientation(self) -> PlottingOrientation: + if self.kwds.get("orientation", None) == "horizontal": + return "horizontal" + else: + return "vertical" + + +class KdePlot(HistPlot): + @property + def _kind(self) -> Literal["kde"]: + return "kde" + + @property + def orientation(self) -> Literal["vertical"]: + return "vertical" + + def __init__(self, data, bw_method=None, ind=None, **kwargs) -> None: + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called + self.bw_method = bw_method + self.ind = ind + + def _args_adjust(self) -> None: + pass + + def _get_ind(self, y): + if self.ind is None: + # np.nanmax() and np.nanmin() ignores the missing values + sample_range = np.nanmax(y) - np.nanmin(y) + ind = np.linspace( + np.nanmin(y) - 0.5 * sample_range, + np.nanmax(y) + 0.5 * sample_range, + 1000, + ) + elif is_integer(self.ind): + sample_range = np.nanmax(y) - np.nanmin(y) + ind = np.linspace( + np.nanmin(y) - 0.5 * sample_range, + np.nanmax(y) + 0.5 * sample_range, + self.ind, + ) + else: + ind = self.ind + return ind + + @classmethod + def _plot( + cls, + ax, + y, + style=None, + bw_method=None, + ind=None, + column_num=None, + stacking_id=None, + **kwds, + ): + from scipy.stats import gaussian_kde + + y = remove_na_arraylike(y) + gkde = gaussian_kde(y, bw_method=bw_method) + + y = gkde.evaluate(ind) + lines = MPLPlot._plot(ax, ind, y, style=style, **kwds) + return lines + + def _make_plot_keywords(self, kwds, y): + kwds["bw_method"] = self.bw_method + kwds["ind"] = self._get_ind(y) + return kwds + + def _post_plot_logic(self, ax, data) -> None: + ax.set_ylabel("Density") + + +def _grouped_plot( + plotf, + data, + column=None, + by=None, + numeric_only: bool = True, + figsize: tuple[float, float] | None = None, + sharex: bool = True, + sharey: bool = True, + layout=None, + rot: float = 0, + ax=None, + **kwargs, +): + # error: Non-overlapping equality check (left operand type: "Optional[Tuple[float, + # float]]", right operand type: "Literal['default']") + if figsize == "default": # type: ignore[comparison-overlap] + # allowed to specify mpl default with 'default' + raise ValueError( + "figsize='default' is no longer supported. " + "Specify figure size by tuple instead" + ) + + grouped = data.groupby(by) + if column is not None: + grouped = grouped[column] + + naxes = len(grouped) + fig, axes = create_subplots( + naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout + ) + + _axes = flatten_axes(axes) + + for i, (key, group) in enumerate(grouped): + ax = _axes[i] + if numeric_only and isinstance(group, ABCDataFrame): + group = group._get_numeric_data() + plotf(group, ax, **kwargs) + ax.set_title(pprint_thing(key)) + + return fig, axes + + +def _grouped_hist( + data, + column=None, + by=None, + ax=None, + bins: int = 50, + figsize: tuple[float, float] | None = None, + layout=None, + sharex: bool = False, + sharey: bool = False, + rot: float = 90, + grid: bool = True, + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, + legend: bool = False, + **kwargs, +): + """ + Grouped histogram + + Parameters + ---------- + data : Series/DataFrame + column : object, optional + by : object, optional + ax : axes, optional + bins : int, default 50 + figsize : tuple, optional + layout : optional + sharex : bool, default False + sharey : bool, default False + rot : float, default 90 + grid : bool, default True + legend: : bool, default False + kwargs : dict, keyword arguments passed to matplotlib.Axes.hist + + Returns + ------- + collection of Matplotlib Axes + """ + if legend: + assert "label" not in kwargs + if data.ndim == 1: + kwargs["label"] = data.name + elif column is None: + kwargs["label"] = data.columns + else: + kwargs["label"] = column + + def plot_group(group, ax) -> None: + ax.hist(group.dropna().values, bins=bins, **kwargs) + if legend: + ax.legend() + + if xrot is None: + xrot = rot + + fig, axes = _grouped_plot( + plot_group, + data, + column=column, + by=by, + sharex=sharex, + sharey=sharey, + ax=ax, + figsize=figsize, + layout=layout, + rot=rot, + ) + + set_ticks_props( + axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot + ) + + maybe_adjust_figure( + fig, bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3 + ) + return axes + + +def hist_series( + self, + by=None, + ax=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, + figsize: tuple[float, float] | None = None, + bins: int = 10, + legend: bool = False, + **kwds, +): + import matplotlib.pyplot as plt + + if legend and "label" in kwds: + raise ValueError("Cannot use both legend and label") + + if by is None: + if kwds.get("layout", None) is not None: + raise ValueError("The 'layout' keyword is not supported when 'by' is None") + # hack until the plotting interface is a bit more unified + fig = kwds.pop( + "figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize) + ) + if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()): + fig.set_size_inches(*figsize, forward=True) + if ax is None: + ax = fig.gca() + elif ax.get_figure() != fig: + raise AssertionError("passed axis not bound to passed figure") + values = self.dropna().values + if legend: + kwds["label"] = self.name + ax.hist(values, bins=bins, **kwds) + if legend: + ax.legend() + ax.grid(grid) + axes = np.array([ax]) + + set_ticks_props( + axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot + ) + + else: + if "figure" in kwds: + raise ValueError( + "Cannot pass 'figure' when using the " + "'by' argument, since a new 'Figure' instance will be created" + ) + axes = _grouped_hist( + self, + by=by, + ax=ax, + grid=grid, + figsize=figsize, + bins=bins, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + legend=legend, + **kwds, + ) + + if hasattr(axes, "ndim"): + if axes.ndim == 1 and len(axes) == 1: + return axes[0] + return axes + + +def hist_frame( + data, + column=None, + by=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, + ax=None, + sharex: bool = False, + sharey: bool = False, + figsize: tuple[float, float] | None = None, + layout=None, + bins: int = 10, + legend: bool = False, + **kwds, +): + if legend and "label" in kwds: + raise ValueError("Cannot use both legend and label") + if by is not None: + axes = _grouped_hist( + data, + column=column, + by=by, + ax=ax, + grid=grid, + figsize=figsize, + sharex=sharex, + sharey=sharey, + layout=layout, + bins=bins, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + legend=legend, + **kwds, + ) + return axes + + if column is not None: + if not isinstance(column, (list, np.ndarray, ABCIndex)): + column = [column] + data = data[column] + # GH32590 + data = data.select_dtypes( + include=(np.number, "datetime64", "datetimetz"), exclude="timedelta" + ) + naxes = len(data.columns) + + if naxes == 0: + raise ValueError( + "hist method requires numerical or datetime columns, nothing to plot." + ) + + fig, axes = create_subplots( + naxes=naxes, + ax=ax, + squeeze=False, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + ) + _axes = flatten_axes(axes) + + can_set_label = "label" not in kwds + + for i, col in enumerate(data.columns): + ax = _axes[i] + if legend and can_set_label: + kwds["label"] = col + ax.hist(data[col].dropna().values, bins=bins, **kwds) + ax.set_title(col) + ax.grid(grid) + if legend: + ax.legend() + + set_ticks_props( + axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot + ) + maybe_adjust_figure(fig, wspace=0.3, hspace=0.3) + + return axes diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/misc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/misc.py new file mode 100644 index 00000000..1f921258 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/misc.py @@ -0,0 +1,481 @@ +from __future__ import annotations + +import random +from typing import TYPE_CHECKING + +from matplotlib import patches +import matplotlib.lines as mlines +import numpy as np + +from pandas.core.dtypes.missing import notna + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import ( + create_subplots, + do_adjust_figure, + maybe_adjust_figure, + set_ticks_props, +) + +if TYPE_CHECKING: + from collections.abc import Hashable + + from matplotlib.axes import Axes + from matplotlib.figure import Figure + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +def scatter_matrix( + frame: DataFrame, + alpha: float = 0.5, + figsize: tuple[float, float] | None = None, + ax=None, + grid: bool = False, + diagonal: str = "hist", + marker: str = ".", + density_kwds=None, + hist_kwds=None, + range_padding: float = 0.05, + **kwds, +): + df = frame._get_numeric_data() + n = df.columns.size + naxes = n * n + fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) + + # no gaps between subplots + maybe_adjust_figure(fig, wspace=0, hspace=0) + + mask = notna(df) + + marker = _get_marker_compat(marker) + + hist_kwds = hist_kwds or {} + density_kwds = density_kwds or {} + + # GH 14855 + kwds.setdefault("edgecolors", "none") + + boundaries_list = [] + for a in df.columns: + values = df[a].values[mask[a].values] + rmin_, rmax_ = np.min(values), np.max(values) + rdelta_ext = (rmax_ - rmin_) * range_padding / 2 + boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext)) + + for i, a in enumerate(df.columns): + for j, b in enumerate(df.columns): + ax = axes[i, j] + + if i == j: + values = df[a].values[mask[a].values] + + # Deal with the diagonal by drawing a histogram there. + if diagonal == "hist": + ax.hist(values, **hist_kwds) + + elif diagonal in ("kde", "density"): + from scipy.stats import gaussian_kde + + y = values + gkde = gaussian_kde(y) + ind = np.linspace(y.min(), y.max(), 1000) + ax.plot(ind, gkde.evaluate(ind), **density_kwds) + + ax.set_xlim(boundaries_list[i]) + + else: + common = (mask[a] & mask[b]).values + + ax.scatter( + df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds + ) + + ax.set_xlim(boundaries_list[j]) + ax.set_ylim(boundaries_list[i]) + + ax.set_xlabel(b) + ax.set_ylabel(a) + + if j != 0: + ax.yaxis.set_visible(False) + if i != n - 1: + ax.xaxis.set_visible(False) + + if len(df.columns) > 1: + lim1 = boundaries_list[0] + locs = axes[0][1].yaxis.get_majorticklocs() + locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])] + adj = (locs - lim1[0]) / (lim1[1] - lim1[0]) + + lim0 = axes[0][0].get_ylim() + adj = adj * (lim0[1] - lim0[0]) + lim0[0] + axes[0][0].yaxis.set_ticks(adj) + + if np.all(locs == locs.astype(int)): + # if all ticks are int + locs = locs.astype(int) + axes[0][0].yaxis.set_ticklabels(locs) + + set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + return axes + + +def _get_marker_compat(marker): + if marker not in mlines.lineMarkers: + return "o" + return marker + + +def radviz( + frame: DataFrame, + class_column, + ax: Axes | None = None, + color=None, + colormap=None, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + def normalize(series): + a = min(series) + b = max(series) + return (series - a) / (b - a) + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + df = frame.drop(class_column, axis=1).apply(normalize) + + if ax is None: + ax = plt.gca() + ax.set_xlim(-1, 1) + ax.set_ylim(-1, 1) + + to_plot: dict[Hashable, list[list]] = {} + colors = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + + for kls in classes: + to_plot[kls] = [[], []] + + m = len(frame.columns) - 1 + s = np.array( + [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]] + ) + + for i in range(n): + row = df.iloc[i].values + row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) + y = (s * row_).sum(axis=0) / row.sum() + kls = class_col.iat[i] + to_plot[kls][0].append(y[0]) + to_plot[kls][1].append(y[1]) + + for i, kls in enumerate(classes): + ax.scatter( + to_plot[kls][0], + to_plot[kls][1], + color=colors[i], + label=pprint_thing(kls), + **kwds, + ) + ax.legend() + + ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor="none")) + + for xy, name in zip(s, df.columns): + ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="gray")) + + if xy[0] < 0.0 and xy[1] < 0.0: + ax.text( + xy[0] - 0.025, xy[1] - 0.025, name, ha="right", va="top", size="small" + ) + elif xy[0] < 0.0 <= xy[1]: + ax.text( + xy[0] - 0.025, + xy[1] + 0.025, + name, + ha="right", + va="bottom", + size="small", + ) + elif xy[1] < 0.0 <= xy[0]: + ax.text( + xy[0] + 0.025, xy[1] - 0.025, name, ha="left", va="top", size="small" + ) + elif xy[0] >= 0.0 and xy[1] >= 0.0: + ax.text( + xy[0] + 0.025, xy[1] + 0.025, name, ha="left", va="bottom", size="small" + ) + + ax.axis("equal") + return ax + + +def andrews_curves( + frame: DataFrame, + class_column, + ax: Axes | None = None, + samples: int = 200, + color=None, + colormap=None, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + def function(amplitudes): + def f(t): + x1 = amplitudes[0] + result = x1 / np.sqrt(2.0) + + # Take the rest of the coefficients and resize them + # appropriately. Take a copy of amplitudes as otherwise numpy + # deletes the element from amplitudes itself. + coeffs = np.delete(np.copy(amplitudes), 0) + coeffs = np.resize(coeffs, (int((coeffs.size + 1) / 2), 2)) + + # Generate the harmonics and arguments for the sin and cos + # functions. + harmonics = np.arange(0, coeffs.shape[0]) + 1 + trig_args = np.outer(harmonics, t) + + result += np.sum( + coeffs[:, 0, np.newaxis] * np.sin(trig_args) + + coeffs[:, 1, np.newaxis] * np.cos(trig_args), + axis=0, + ) + return result + + return f + + n = len(frame) + class_col = frame[class_column] + classes = frame[class_column].drop_duplicates() + df = frame.drop(class_column, axis=1) + t = np.linspace(-np.pi, np.pi, samples) + used_legends: set[str] = set() + + color_values = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + colors = dict(zip(classes, color_values)) + if ax is None: + ax = plt.gca() + ax.set_xlim(-np.pi, np.pi) + for i in range(n): + row = df.iloc[i].values + f = function(row) + y = f(t) + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(t, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(t, y, color=colors[kls], **kwds) + + ax.legend(loc="upper right") + ax.grid() + return ax + + +def bootstrap_plot( + series: Series, + fig: Figure | None = None, + size: int = 50, + samples: int = 500, + **kwds, +) -> Figure: + import matplotlib.pyplot as plt + + # TODO: is the failure mentioned below still relevant? + # random.sample(ndarray, int) fails on python 3.3, sigh + data = list(series.values) + samplings = [random.sample(data, size) for _ in range(samples)] + + means = np.array([np.mean(sampling) for sampling in samplings]) + medians = np.array([np.median(sampling) for sampling in samplings]) + midranges = np.array( + [(min(sampling) + max(sampling)) * 0.5 for sampling in samplings] + ) + if fig is None: + fig = plt.figure() + x = list(range(samples)) + axes = [] + ax1 = fig.add_subplot(2, 3, 1) + ax1.set_xlabel("Sample") + axes.append(ax1) + ax1.plot(x, means, **kwds) + ax2 = fig.add_subplot(2, 3, 2) + ax2.set_xlabel("Sample") + axes.append(ax2) + ax2.plot(x, medians, **kwds) + ax3 = fig.add_subplot(2, 3, 3) + ax3.set_xlabel("Sample") + axes.append(ax3) + ax3.plot(x, midranges, **kwds) + ax4 = fig.add_subplot(2, 3, 4) + ax4.set_xlabel("Mean") + axes.append(ax4) + ax4.hist(means, **kwds) + ax5 = fig.add_subplot(2, 3, 5) + ax5.set_xlabel("Median") + axes.append(ax5) + ax5.hist(medians, **kwds) + ax6 = fig.add_subplot(2, 3, 6) + ax6.set_xlabel("Midrange") + axes.append(ax6) + ax6.hist(midranges, **kwds) + for axis in axes: + plt.setp(axis.get_xticklabels(), fontsize=8) + plt.setp(axis.get_yticklabels(), fontsize=8) + if do_adjust_figure(fig): + plt.tight_layout() + return fig + + +def parallel_coordinates( + frame: DataFrame, + class_column, + cols=None, + ax: Axes | None = None, + color=None, + use_columns: bool = False, + xticks=None, + colormap=None, + axvlines: bool = True, + axvlines_kwds=None, + sort_labels: bool = False, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + if axvlines_kwds is None: + axvlines_kwds = {"linewidth": 1, "color": "black"} + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + + if cols is None: + df = frame.drop(class_column, axis=1) + else: + df = frame[cols] + + used_legends: set[str] = set() + + ncols = len(df.columns) + + # determine values to use for xticks + x: list[int] | Index + if use_columns is True: + if not np.all(np.isreal(list(df.columns))): + raise ValueError("Columns must be numeric to be used as xticks") + x = df.columns + elif xticks is not None: + if not np.all(np.isreal(xticks)): + raise ValueError("xticks specified must be numeric") + if len(xticks) != ncols: + raise ValueError("Length of xticks must match number of columns") + x = xticks + else: + x = list(range(ncols)) + + if ax is None: + ax = plt.gca() + + color_values = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + + if sort_labels: + classes = sorted(classes) + color_values = sorted(color_values) + colors = dict(zip(classes, color_values)) + + for i in range(n): + y = df.iloc[i].values + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(x, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(x, y, color=colors[kls], **kwds) + + if axvlines: + for i in x: + ax.axvline(i, **axvlines_kwds) + + ax.set_xticks(x) + ax.set_xticklabels(df.columns) + ax.set_xlim(x[0], x[-1]) + ax.legend(loc="upper right") + ax.grid() + return ax + + +def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: + # workaround because `c='b'` is hardcoded in matplotlib's scatter method + import matplotlib.pyplot as plt + + kwds.setdefault("c", plt.rcParams["patch.facecolor"]) + + data = series.values + y1 = data[:-lag] + y2 = data[lag:] + if ax is None: + ax = plt.gca() + ax.set_xlabel("y(t)") + ax.set_ylabel(f"y(t + {lag})") + ax.scatter(y1, y2, **kwds) + return ax + + +def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwds) -> Axes: + import matplotlib.pyplot as plt + + n = len(series) + data = np.asarray(series) + if ax is None: + ax = plt.gca() + ax.set_xlim(1, n) + ax.set_ylim(-1.0, 1.0) + mean = np.mean(data) + c0 = np.sum((data - mean) ** 2) / n + + def r(h): + return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0 + + x = np.arange(n) + 1 + y = [r(loc) for loc in x] + z95 = 1.959963984540054 + z99 = 2.5758293035489004 + ax.axhline(y=z99 / np.sqrt(n), linestyle="--", color="grey") + ax.axhline(y=z95 / np.sqrt(n), color="grey") + ax.axhline(y=0.0, color="black") + ax.axhline(y=-z95 / np.sqrt(n), color="grey") + ax.axhline(y=-z99 / np.sqrt(n), linestyle="--", color="grey") + ax.set_xlabel("Lag") + ax.set_ylabel("Autocorrelation") + ax.plot(x, y, **kwds) + if "label" in kwds: + ax.legend() + ax.grid() + return ax + + +def unpack_single_str_list(keys): + # GH 42795 + if isinstance(keys, list) and len(keys) == 1: + keys = keys[0] + return keys diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/style.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/style.py new file mode 100644 index 00000000..a5f34e94 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/style.py @@ -0,0 +1,276 @@ +from __future__ import annotations + +from collections.abc import ( + Collection, + Iterator, +) +import itertools +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import matplotlib as mpl +import matplotlib.colors +import numpy as np + +from pandas._typing import MatplotlibColor as Color +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_list_like + +import pandas.core.common as com + +if TYPE_CHECKING: + from matplotlib.colors import Colormap + + +def get_standard_colors( + num_colors: int, + colormap: Colormap | None = None, + color_type: str = "default", + color: dict[str, Color] | Color | Collection[Color] | None = None, +): + """ + Get standard colors based on `colormap`, `color_type` or `color` inputs. + + Parameters + ---------- + num_colors : int + Minimum number of colors to be returned. + Ignored if `color` is a dictionary. + colormap : :py:class:`matplotlib.colors.Colormap`, optional + Matplotlib colormap. + When provided, the resulting colors will be derived from the colormap. + color_type : {"default", "random"}, optional + Type of colors to derive. Used if provided `color` and `colormap` are None. + Ignored if either `color` or `colormap` are not None. + color : dict or str or sequence, optional + Color(s) to be used for deriving sequence of colors. + Can be either be a dictionary, or a single color (single color string, + or sequence of floats representing a single color), + or a sequence of colors. + + Returns + ------- + dict or list + Standard colors. Can either be a mapping if `color` was a dictionary, + or a list of colors with a length of `num_colors` or more. + + Warns + ----- + UserWarning + If both `colormap` and `color` are provided. + Parameter `color` will override. + """ + if isinstance(color, dict): + return color + + colors = _derive_colors( + color=color, + colormap=colormap, + color_type=color_type, + num_colors=num_colors, + ) + + return list(_cycle_colors(colors, num_colors=num_colors)) + + +def _derive_colors( + *, + color: Color | Collection[Color] | None, + colormap: str | Colormap | None, + color_type: str, + num_colors: int, +) -> list[Color]: + """ + Derive colors from either `colormap`, `color_type` or `color` inputs. + + Get a list of colors either from `colormap`, or from `color`, + or from `color_type` (if both `colormap` and `color` are None). + + Parameters + ---------- + color : str or sequence, optional + Color(s) to be used for deriving sequence of colors. + Can be either be a single color (single color string, or sequence of floats + representing a single color), or a sequence of colors. + colormap : :py:class:`matplotlib.colors.Colormap`, optional + Matplotlib colormap. + When provided, the resulting colors will be derived from the colormap. + color_type : {"default", "random"}, optional + Type of colors to derive. Used if provided `color` and `colormap` are None. + Ignored if either `color` or `colormap`` are not None. + num_colors : int + Number of colors to be extracted. + + Returns + ------- + list + List of colors extracted. + + Warns + ----- + UserWarning + If both `colormap` and `color` are provided. + Parameter `color` will override. + """ + if color is None and colormap is not None: + return _get_colors_from_colormap(colormap, num_colors=num_colors) + elif color is not None: + if colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + return _get_colors_from_color(color) + else: + return _get_colors_from_color_type(color_type, num_colors=num_colors) + + +def _cycle_colors(colors: list[Color], num_colors: int) -> Iterator[Color]: + """Cycle colors until achieving max of `num_colors` or length of `colors`. + + Extra colors will be ignored by matplotlib if there are more colors + than needed and nothing needs to be done here. + """ + max_colors = max(num_colors, len(colors)) + yield from itertools.islice(itertools.cycle(colors), max_colors) + + +def _get_colors_from_colormap( + colormap: str | Colormap, + num_colors: int, +) -> list[Color]: + """Get colors from colormap.""" + cmap = _get_cmap_instance(colormap) + return [cmap(num) for num in np.linspace(0, 1, num=num_colors)] + + +def _get_cmap_instance(colormap: str | Colormap) -> Colormap: + """Get instance of matplotlib colormap.""" + if isinstance(colormap, str): + cmap = colormap + colormap = mpl.colormaps[colormap] + if colormap is None: + raise ValueError(f"Colormap {cmap} is not recognized") + return colormap + + +def _get_colors_from_color( + color: Color | Collection[Color], +) -> list[Color]: + """Get colors from user input color.""" + if len(color) == 0: + raise ValueError(f"Invalid color argument: {color}") + + if _is_single_color(color): + color = cast(Color, color) + return [color] + + color = cast(Collection[Color], color) + return list(_gen_list_of_colors_from_iterable(color)) + + +def _is_single_color(color: Color | Collection[Color]) -> bool: + """Check if `color` is a single color, not a sequence of colors. + + Single color is of these kinds: + - Named color "red", "C0", "firebrick" + - Alias "g" + - Sequence of floats, such as (0.1, 0.2, 0.3) or (0.1, 0.2, 0.3, 0.4). + + See Also + -------- + _is_single_string_color + """ + if isinstance(color, str) and _is_single_string_color(color): + # GH #36972 + return True + + if _is_floats_color(color): + return True + + return False + + +def _gen_list_of_colors_from_iterable(color: Collection[Color]) -> Iterator[Color]: + """ + Yield colors from string of several letters or from collection of colors. + """ + for x in color: + if _is_single_color(x): + yield x + else: + raise ValueError(f"Invalid color {x}") + + +def _is_floats_color(color: Color | Collection[Color]) -> bool: + """Check if color comprises a sequence of floats representing color.""" + return bool( + is_list_like(color) + and (len(color) == 3 or len(color) == 4) + and all(isinstance(x, (int, float)) for x in color) + ) + + +def _get_colors_from_color_type(color_type: str, num_colors: int) -> list[Color]: + """Get colors from user input color type.""" + if color_type == "default": + return _get_default_colors(num_colors) + elif color_type == "random": + return _get_random_colors(num_colors) + else: + raise ValueError("color_type must be either 'default' or 'random'") + + +def _get_default_colors(num_colors: int) -> list[Color]: + """Get `num_colors` of default colors from matplotlib rc params.""" + import matplotlib.pyplot as plt + + colors = [c["color"] for c in plt.rcParams["axes.prop_cycle"]] + return colors[0:num_colors] + + +def _get_random_colors(num_colors: int) -> list[Color]: + """Get `num_colors` of random colors.""" + return [_random_color(num) for num in range(num_colors)] + + +def _random_color(column: int) -> list[float]: + """Get a random color represented as a list of length 3""" + # GH17525 use common._random_state to avoid resetting the seed + rs = com.random_state(column) + return rs.rand(3).tolist() + + +def _is_single_string_color(color: Color) -> bool: + """Check if `color` is a single string color. + + Examples of single string colors: + - 'r' + - 'g' + - 'red' + - 'green' + - 'C3' + - 'firebrick' + + Parameters + ---------- + color : Color + Color string or sequence of floats. + + Returns + ------- + bool + True if `color` looks like a valid color. + False otherwise. + """ + conv = matplotlib.colors.ColorConverter() + try: + conv.to_rgba(color) + except ValueError: + return False + else: + return True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/timeseries.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/timeseries.py new file mode 100644 index 00000000..90e6f29d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/timeseries.py @@ -0,0 +1,348 @@ +# TODO: Use the fact that axis can have units to simplify the process + +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._libs.tslibs import ( + BaseOffset, + Period, + to_offset, +) +from pandas._libs.tslibs.dtypes import FreqGroup + +from pandas.core.dtypes.generic import ( + ABCDatetimeIndex, + ABCPeriodIndex, + ABCTimedeltaIndex, +) + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.converter import ( + TimeSeries_DateFormatter, + TimeSeries_DateLocator, + TimeSeries_TimedeltaFormatter, +) +from pandas.tseries.frequencies import ( + get_period_alias, + is_subperiod, + is_superperiod, +) + +if TYPE_CHECKING: + from datetime import timedelta + + from matplotlib.axes import Axes + + from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Series, + ) + +# --------------------------------------------------------------------- +# Plotting functions and monkey patches + + +def maybe_resample(series: Series, ax: Axes, kwargs): + # resample against axes freq if necessary + freq, ax_freq = _get_freq(ax, series) + + if freq is None: # pragma: no cover + raise ValueError("Cannot use dynamic axis without frequency info") + + # Convert DatetimeIndex to PeriodIndex + if isinstance(series.index, ABCDatetimeIndex): + series = series.to_period(freq=freq) + + if ax_freq is not None and freq != ax_freq: + if is_superperiod(freq, ax_freq): # upsample input + series = series.copy() + # error: "Index" has no attribute "asfreq" + series.index = series.index.asfreq( # type: ignore[attr-defined] + ax_freq, how="s" + ) + freq = ax_freq + elif _is_sup(freq, ax_freq): # one is weekly + how = kwargs.pop("how", "last") + series = getattr(series.resample("D"), how)().dropna() + series = getattr(series.resample(ax_freq), how)().dropna() + freq = ax_freq + elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): + _upsample_others(ax, freq, kwargs) + else: # pragma: no cover + raise ValueError("Incompatible frequency conversion") + return freq, series + + +def _is_sub(f1: str, f2: str) -> bool: + return (f1.startswith("W") and is_subperiod("D", f2)) or ( + f2.startswith("W") and is_subperiod(f1, "D") + ) + + +def _is_sup(f1: str, f2: str) -> bool: + return (f1.startswith("W") and is_superperiod("D", f2)) or ( + f2.startswith("W") and is_superperiod(f1, "D") + ) + + +def _upsample_others(ax: Axes, freq, kwargs) -> None: + legend = ax.get_legend() + lines, labels = _replot_ax(ax, freq, kwargs) + _replot_ax(ax, freq, kwargs) + + other_ax = None + if hasattr(ax, "left_ax"): + other_ax = ax.left_ax + if hasattr(ax, "right_ax"): + other_ax = ax.right_ax + + if other_ax is not None: + rlines, rlabels = _replot_ax(other_ax, freq, kwargs) + lines.extend(rlines) + labels.extend(rlabels) + + if legend is not None and kwargs.get("legend", True) and len(lines) > 0: + title = legend.get_title().get_text() + if title == "None": + title = None + ax.legend(lines, labels, loc="best", title=title) + + +def _replot_ax(ax: Axes, freq, kwargs): + data = getattr(ax, "_plot_data", None) + + # clear current axes and data + ax._plot_data = [] + ax.clear() + + decorate_axes(ax, freq, kwargs) + + lines = [] + labels = [] + if data is not None: + for series, plotf, kwds in data: + series = series.copy() + idx = series.index.asfreq(freq, how="S") + series.index = idx + ax._plot_data.append((series, plotf, kwds)) + + # for tsplot + if isinstance(plotf, str): + from pandas.plotting._matplotlib import PLOT_CLASSES + + plotf = PLOT_CLASSES[plotf]._plot + + lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0]) + labels.append(pprint_thing(series.name)) + + return lines, labels + + +def decorate_axes(ax: Axes, freq, kwargs) -> None: + """Initialize axes for time-series plotting""" + if not hasattr(ax, "_plot_data"): + ax._plot_data = [] + + ax.freq = freq + xaxis = ax.get_xaxis() + xaxis.freq = freq + if not hasattr(ax, "legendlabels"): + ax.legendlabels = [kwargs.get("label", None)] + else: + ax.legendlabels.append(kwargs.get("label", None)) + ax.view_interval = None + ax.date_axis_info = None + + +def _get_ax_freq(ax: Axes): + """ + Get the freq attribute of the ax object if set. + Also checks shared axes (eg when using secondary yaxis, sharex=True + or twinx) + """ + ax_freq = getattr(ax, "freq", None) + if ax_freq is None: + # check for left/right ax in case of secondary yaxis + if hasattr(ax, "left_ax"): + ax_freq = getattr(ax.left_ax, "freq", None) + elif hasattr(ax, "right_ax"): + ax_freq = getattr(ax.right_ax, "freq", None) + if ax_freq is None: + # check if a shared ax (sharex/twinx) has already freq set + shared_axes = ax.get_shared_x_axes().get_siblings(ax) + if len(shared_axes) > 1: + for shared_ax in shared_axes: + ax_freq = getattr(shared_ax, "freq", None) + if ax_freq is not None: + break + return ax_freq + + +def _get_period_alias(freq: timedelta | BaseOffset | str) -> str | None: + freqstr = to_offset(freq).rule_code + + return get_period_alias(freqstr) + + +def _get_freq(ax: Axes, series: Series): + # get frequency from data + freq = getattr(series.index, "freq", None) + if freq is None: + freq = getattr(series.index, "inferred_freq", None) + freq = to_offset(freq) + + ax_freq = _get_ax_freq(ax) + + # use axes freq if no data freq + if freq is None: + freq = ax_freq + + # get the period frequency + freq = _get_period_alias(freq) + return freq, ax_freq + + +def use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool: + freq = _get_index_freq(data.index) + ax_freq = _get_ax_freq(ax) + + if freq is None: # convert irregular if axes has freq info + freq = ax_freq + # do not use tsplot if irregular was plotted first + elif (ax_freq is None) and (len(ax.get_lines()) > 0): + return False + + if freq is None: + return False + + freq_str = _get_period_alias(freq) + + if freq_str is None: + return False + + # FIXME: hack this for 0.10.1, creating more technical debt...sigh + if isinstance(data.index, ABCDatetimeIndex): + # error: "BaseOffset" has no attribute "_period_dtype_code" + base = to_offset(freq_str)._period_dtype_code # type: ignore[attr-defined] + x = data.index + if base <= FreqGroup.FR_DAY.value: + return x[:1].is_normalized + period = Period(x[0], freq_str) + assert isinstance(period, Period) + return period.to_timestamp().tz_localize(x.tz) == x[0] + return True + + +def _get_index_freq(index: Index) -> BaseOffset | None: + freq = getattr(index, "freq", None) + if freq is None: + freq = getattr(index, "inferred_freq", None) + if freq == "B": + # error: "Index" has no attribute "dayofweek" + weekdays = np.unique(index.dayofweek) # type: ignore[attr-defined] + if (5 in weekdays) or (6 in weekdays): + freq = None + + freq = to_offset(freq) + return freq + + +def maybe_convert_index(ax: Axes, data): + # tsplot converts automatically, but don't want to convert index + # over and over for DataFrames + if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): + freq: str | BaseOffset | None = data.index.freq + + if freq is None: + # We only get here for DatetimeIndex + data.index = cast("DatetimeIndex", data.index) + freq = data.index.inferred_freq + freq = to_offset(freq) + + if freq is None: + freq = _get_ax_freq(ax) + + if freq is None: + raise ValueError("Could not get frequency alias for plotting") + + freq_str = _get_period_alias(freq) + + import warnings + + with warnings.catch_warnings(): + # suppress Period[B] deprecation warning + # TODO: need to find an alternative to this before the deprecation + # is enforced! + warnings.filterwarnings( + "ignore", + r"PeriodDtype\[B\] is deprecated", + category=FutureWarning, + ) + + if isinstance(data.index, ABCDatetimeIndex): + data = data.tz_localize(None).to_period(freq=freq_str) + elif isinstance(data.index, ABCPeriodIndex): + data.index = data.index.asfreq(freq=freq_str) + return data + + +# Patch methods for subplot. Only format_dateaxis is currently used. +# Do we need the rest for convenience? + + +def _format_coord(freq, t, y) -> str: + time_period = Period(ordinal=int(t), freq=freq) + return f"t = {time_period} y = {y:8f}" + + +def format_dateaxis(subplot, freq, index) -> None: + """ + Pretty-formats the date axis (x-axis). + + Major and minor ticks are automatically set for the frequency of the + current underlying series. As the dynamic mode is activated by + default, changing the limits of the x axis will intelligently change + the positions of the ticks. + """ + from matplotlib import pylab + + # handle index specific formatting + # Note: DatetimeIndex does not use this + # interface. DatetimeIndex uses matplotlib.date directly + if isinstance(index, ABCPeriodIndex): + majlocator = TimeSeries_DateLocator( + freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot + ) + minlocator = TimeSeries_DateLocator( + freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot + ) + subplot.xaxis.set_major_locator(majlocator) + subplot.xaxis.set_minor_locator(minlocator) + + majformatter = TimeSeries_DateFormatter( + freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot + ) + minformatter = TimeSeries_DateFormatter( + freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot + ) + subplot.xaxis.set_major_formatter(majformatter) + subplot.xaxis.set_minor_formatter(minformatter) + + # x and y coord info + subplot.format_coord = functools.partial(_format_coord, freq) + + elif isinstance(index, ABCTimedeltaIndex): + subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter()) + else: + raise TypeError("index type not supported") + + pylab.draw_if_interactive() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/tools.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/tools.py new file mode 100644 index 00000000..8c0e401f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_matplotlib/tools.py @@ -0,0 +1,484 @@ +# being a bit too dynamic +from __future__ import annotations + +from math import ceil +from typing import TYPE_CHECKING +import warnings + +from matplotlib import ticker +import matplotlib.table +import numpy as np + +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from matplotlib.axes import Axes + from matplotlib.axis import Axis + from matplotlib.figure import Figure + from matplotlib.lines import Line2D + from matplotlib.table import Table + + from pandas import ( + DataFrame, + Series, + ) + + +def do_adjust_figure(fig: Figure) -> bool: + """Whether fig has constrained_layout enabled.""" + if not hasattr(fig, "get_constrained_layout"): + return False + return not fig.get_constrained_layout() + + +def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: + """Call fig.subplots_adjust unless fig has constrained_layout enabled.""" + if do_adjust_figure(fig): + fig.subplots_adjust(*args, **kwargs) + + +def format_date_labels(ax: Axes, rot) -> None: + # mini version of autofmt_xdate + for label in ax.get_xticklabels(): + label.set_ha("right") + label.set_rotation(rot) + fig = ax.get_figure() + maybe_adjust_figure(fig, bottom=0.2) + + +def table( + ax, data: DataFrame | Series, rowLabels=None, colLabels=None, **kwargs +) -> Table: + if isinstance(data, ABCSeries): + data = data.to_frame() + elif isinstance(data, ABCDataFrame): + pass + else: + raise ValueError("Input data must be DataFrame or Series") + + if rowLabels is None: + rowLabels = data.index + + if colLabels is None: + colLabels = data.columns + + cellText = data.values + + return matplotlib.table.table( + ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs + ) + + +def _get_layout( + nplots: int, + layout: tuple[int, int] | None = None, + layout_type: str = "box", +) -> tuple[int, int]: + if layout is not None: + if not isinstance(layout, (tuple, list)) or len(layout) != 2: + raise ValueError("Layout must be a tuple of (rows, columns)") + + nrows, ncols = layout + + if nrows == -1 and ncols > 0: + layout = nrows, ncols = (ceil(nplots / ncols), ncols) + elif ncols == -1 and nrows > 0: + layout = nrows, ncols = (nrows, ceil(nplots / nrows)) + elif ncols <= 0 and nrows <= 0: + msg = "At least one dimension of layout must be positive" + raise ValueError(msg) + + if nrows * ncols < nplots: + raise ValueError( + f"Layout of {nrows}x{ncols} must be larger than required size {nplots}" + ) + + return layout + + if layout_type == "single": + return (1, 1) + elif layout_type == "horizontal": + return (1, nplots) + elif layout_type == "vertical": + return (nplots, 1) + + layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} + try: + return layouts[nplots] + except KeyError: + k = 1 + while k**2 < nplots: + k += 1 + + if (k - 1) * k >= nplots: + return k, (k - 1) + else: + return k, k + + +# copied from matplotlib/pyplot.py and modified for pandas.plotting + + +def create_subplots( + naxes: int, + sharex: bool = False, + sharey: bool = False, + squeeze: bool = True, + subplot_kw=None, + ax=None, + layout=None, + layout_type: str = "box", + **fig_kw, +): + """ + Create a figure with a set of subplots already made. + + This utility wrapper makes it convenient to create common layouts of + subplots, including the enclosing figure object, in a single call. + + Parameters + ---------- + naxes : int + Number of required axes. Exceeded axes are set invisible. Default is + nrows * ncols. + + sharex : bool + If True, the X axis will be shared amongst all subplots. + + sharey : bool + If True, the Y axis will be shared amongst all subplots. + + squeeze : bool + + If True, extra dimensions are squeezed out from the returned axis object: + - if only one subplot is constructed (nrows=ncols=1), the resulting + single Axis object is returned as a scalar. + - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object + array of Axis objects are returned as numpy 1-d arrays. + - for NxM subplots with N>1 and M>1 are returned as a 2d array. + + If False, no squeezing is done: the returned axis object is always + a 2-d array containing Axis instances, even if it ends up being 1x1. + + subplot_kw : dict + Dict with keywords passed to the add_subplot() call used to create each + subplots. + + ax : Matplotlib axis object, optional + + layout : tuple + Number of rows and columns of the subplot grid. + If not specified, calculated from naxes and layout_type + + layout_type : {'box', 'horizontal', 'vertical'}, default 'box' + Specify how to layout the subplot grid. + + fig_kw : Other keyword arguments to be passed to the figure() call. + Note that all keywords not recognized above will be + automatically included here. + + Returns + ------- + fig, ax : tuple + - fig is the Matplotlib Figure object + - ax can be either a single axis object or an array of axis objects if + more than one subplot was created. The dimensions of the resulting array + can be controlled with the squeeze keyword, see above. + + Examples + -------- + x = np.linspace(0, 2*np.pi, 400) + y = np.sin(x**2) + + # Just a figure and one subplot + f, ax = plt.subplots() + ax.plot(x, y) + ax.set_title('Simple plot') + + # Two subplots, unpack the output array immediately + f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) + ax1.plot(x, y) + ax1.set_title('Sharing Y axis') + ax2.scatter(x, y) + + # Four polar axes + plt.subplots(2, 2, subplot_kw=dict(polar=True)) + """ + import matplotlib.pyplot as plt + + if subplot_kw is None: + subplot_kw = {} + + if ax is None: + fig = plt.figure(**fig_kw) + else: + if is_list_like(ax): + if squeeze: + ax = flatten_axes(ax) + if layout is not None: + warnings.warn( + "When passing multiple axes, layout keyword is ignored.", + UserWarning, + stacklevel=find_stack_level(), + ) + if sharex or sharey: + warnings.warn( + "When passing multiple axes, sharex and sharey " + "are ignored. These settings must be specified when creating axes.", + UserWarning, + stacklevel=find_stack_level(), + ) + if ax.size == naxes: + fig = ax.flat[0].get_figure() + return fig, ax + else: + raise ValueError( + f"The number of passed axes must be {naxes}, the " + "same as the output plot" + ) + + fig = ax.get_figure() + # if ax is passed and a number of subplots is 1, return ax as it is + if naxes == 1: + if squeeze: + return fig, ax + else: + return fig, flatten_axes(ax) + else: + warnings.warn( + "To output multiple subplots, the figure containing " + "the passed axes is being cleared.", + UserWarning, + stacklevel=find_stack_level(), + ) + fig.clear() + + nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) + nplots = nrows * ncols + + # Create empty object array to hold all axes. It's easiest to make it 1-d + # so we can just append subplots upon creation, and then + axarr = np.empty(nplots, dtype=object) + + # Create first subplot separately, so we can share it if requested + ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) + + if sharex: + subplot_kw["sharex"] = ax0 + if sharey: + subplot_kw["sharey"] = ax0 + axarr[0] = ax0 + + # Note off-by-one counting because add_subplot uses the MATLAB 1-based + # convention. + for i in range(1, nplots): + kwds = subplot_kw.copy() + # Set sharex and sharey to None for blank/dummy axes, these can + # interfere with proper axis limits on the visible axes if + # they share axes e.g. issue #7528 + if i >= naxes: + kwds["sharex"] = None + kwds["sharey"] = None + ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) + axarr[i] = ax + + if naxes != nplots: + for ax in axarr[naxes:]: + ax.set_visible(False) + + handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) + + if squeeze: + # Reshape the array to have the final desired dimension (nrow,ncol), + # though discarding unneeded dimensions that equal 1. If we only have + # one subplot, just return it instead of a 1-element array. + if nplots == 1: + axes = axarr[0] + else: + axes = axarr.reshape(nrows, ncols).squeeze() + else: + # returned axis array will be always 2-d, even if nrows=ncols=1 + axes = axarr.reshape(nrows, ncols) + + return fig, axes + + +def _remove_labels_from_axis(axis: Axis) -> None: + for t in axis.get_majorticklabels(): + t.set_visible(False) + + # set_visible will not be effective if + # minor axis has NullLocator and NullFormatter (default) + if isinstance(axis.get_minor_locator(), ticker.NullLocator): + axis.set_minor_locator(ticker.AutoLocator()) + if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): + axis.set_minor_formatter(ticker.FormatStrFormatter("")) + for t in axis.get_minorticklabels(): + t.set_visible(False) + + axis.get_label().set_visible(False) + + +def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool: + """ + Return whether an axis is externally shared. + + Parameters + ---------- + ax1 : matplotlib.axes.Axes + Axis to query. + compare_axis : str + `"x"` or `"y"` according to whether the X-axis or Y-axis is being + compared. + + Returns + ------- + bool + `True` if the axis is externally shared. Otherwise `False`. + + Notes + ----- + If two axes with different positions are sharing an axis, they can be + referred to as *externally* sharing the common axis. + + If two axes sharing an axis also have the same position, they can be + referred to as *internally* sharing the common axis (a.k.a twinning). + + _handle_shared_axes() is only interested in axes externally sharing an + axis, regardless of whether either of the axes is also internally sharing + with a third axis. + """ + if compare_axis == "x": + axes = ax1.get_shared_x_axes() + elif compare_axis == "y": + axes = ax1.get_shared_y_axes() + else: + raise ValueError( + "_has_externally_shared_axis() needs 'x' or 'y' as a second parameter" + ) + + axes = axes.get_siblings(ax1) + + # Retain ax1 and any of its siblings which aren't in the same position as it + ax1_points = ax1.get_position().get_points() + + for ax2 in axes: + if not np.array_equal(ax1_points, ax2.get_position().get_points()): + return True + + return False + + +def handle_shared_axes( + axarr: Iterable[Axes], + nplots: int, + naxes: int, + nrows: int, + ncols: int, + sharex: bool, + sharey: bool, +) -> None: + if nplots > 1: + row_num = lambda x: x.get_subplotspec().rowspan.start + col_num = lambda x: x.get_subplotspec().colspan.start + + is_first_col = lambda x: x.get_subplotspec().is_first_col() + + if nrows > 1: + try: + # first find out the ax layout, + # so that we can correctly handle 'gaps" + layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_) + for ax in axarr: + layout[row_num(ax), col_num(ax)] = ax.get_visible() + + for ax in axarr: + # only the last row of subplots should get x labels -> all + # other off layout handles the case that the subplot is + # the last in the column, because below is no subplot/gap. + if not layout[row_num(ax) + 1, col_num(ax)]: + continue + if sharex or _has_externally_shared_axis(ax, "x"): + _remove_labels_from_axis(ax.xaxis) + + except IndexError: + # if gridspec is used, ax.rowNum and ax.colNum may different + # from layout shape. in this case, use last_row logic + is_last_row = lambda x: x.get_subplotspec().is_last_row() + for ax in axarr: + if is_last_row(ax): + continue + if sharex or _has_externally_shared_axis(ax, "x"): + _remove_labels_from_axis(ax.xaxis) + + if ncols > 1: + for ax in axarr: + # only the first column should get y labels -> set all other to + # off as we only have labels in the first column and we always + # have a subplot there, we can skip the layout test + if is_first_col(ax): + continue + if sharey or _has_externally_shared_axis(ax, "y"): + _remove_labels_from_axis(ax.yaxis) + + +def flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray: + if not is_list_like(axes): + return np.array([axes]) + elif isinstance(axes, (np.ndarray, ABCIndex)): + return np.asarray(axes).ravel() + return np.array(axes) + + +def set_ticks_props( + axes: Axes | Sequence[Axes], + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, +): + import matplotlib.pyplot as plt + + for ax in flatten_axes(axes): + if xlabelsize is not None: + plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) + if xrot is not None: + plt.setp(ax.get_xticklabels(), rotation=xrot) + if ylabelsize is not None: + plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) + if yrot is not None: + plt.setp(ax.get_yticklabels(), rotation=yrot) + return axes + + +def get_all_lines(ax: Axes) -> list[Line2D]: + lines = ax.get_lines() + + if hasattr(ax, "right_ax"): + lines += ax.right_ax.get_lines() + + if hasattr(ax, "left_ax"): + lines += ax.left_ax.get_lines() + + return lines + + +def get_xlim(lines: Iterable[Line2D]) -> tuple[float, float]: + left, right = np.inf, -np.inf + for line in lines: + x = line.get_xdata(orig=False) + left = min(np.nanmin(x), left) + right = max(np.nanmax(x), right) + return left, right diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_misc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_misc.py new file mode 100644 index 00000000..625780ac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/plotting/_misc.py @@ -0,0 +1,688 @@ +from __future__ import annotations + +from contextlib import contextmanager +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.plotting._core import _get_plot_backend + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Mapping, + ) + + from matplotlib.axes import Axes + from matplotlib.colors import Colormap + from matplotlib.figure import Figure + from matplotlib.table import Table + import numpy as np + + from pandas import ( + DataFrame, + Series, + ) + + +def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table: + """ + Helper function to convert DataFrame and Series to matplotlib.table. + + Parameters + ---------- + ax : Matplotlib axes object + data : DataFrame or Series + Data for table contents. + **kwargs + Keyword arguments to be passed to matplotlib.table.table. + If `rowLabels` or `colLabels` is not specified, data index or column + name will be used. + + Returns + ------- + matplotlib table object + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> import matplotlib.pyplot as plt + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> fix, ax = plt.subplots() + >>> ax.axis('off') + (0.0, 1.0, 0.0, 1.0) + >>> table = pd.plotting.table(ax, df, loc='center', + ... cellLoc='center', colWidths=list([.2, .2])) + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.table( + ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs + ) + + +def register() -> None: + """ + Register pandas formatters and converters with matplotlib. + + This function modifies the global ``matplotlib.units.registry`` + dictionary. pandas adds custom converters for + + * pd.Timestamp + * pd.Period + * np.datetime64 + * datetime.datetime + * datetime.date + * datetime.time + + See Also + -------- + deregister_matplotlib_converters : Remove pandas formatters and converters. + + Examples + -------- + .. plot:: + :context: close-figs + + The following line is done automatically by pandas so + the plot can be rendered: + + >>> pd.plotting.register_matplotlib_converters() + + >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'), + ... 'y': [1, 2] + ... }) + >>> plot = df.plot.line(x='ts', y='y') + + Unsetting the register manually an error will be raised: + + >>> pd.set_option("plotting.matplotlib.register_converters", + ... False) # doctest: +SKIP + >>> df.plot.line(x='ts', y='y') # doctest: +SKIP + Traceback (most recent call last): + TypeError: float() argument must be a string or a real number, not 'Period' + """ + plot_backend = _get_plot_backend("matplotlib") + plot_backend.register() + + +def deregister() -> None: + """ + Remove pandas formatters and converters. + + Removes the custom converters added by :func:`register`. This + attempts to set the state of the registry back to the state before + pandas registered its own units. Converters for pandas' own types like + Timestamp and Period are removed completely. Converters for types + pandas overwrites, like ``datetime.datetime``, are restored to their + original value. + + See Also + -------- + register_matplotlib_converters : Register pandas formatters and converters + with matplotlib. + + Examples + -------- + .. plot:: + :context: close-figs + + The following line is done automatically by pandas so + the plot can be rendered: + + >>> pd.plotting.register_matplotlib_converters() + + >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'), + ... 'y': [1, 2] + ... }) + >>> plot = df.plot.line(x='ts', y='y') + + Unsetting the register manually an error will be raised: + + >>> pd.set_option("plotting.matplotlib.register_converters", + ... False) # doctest: +SKIP + >>> df.plot.line(x='ts', y='y') # doctest: +SKIP + Traceback (most recent call last): + TypeError: float() argument must be a string or a real number, not 'Period' + """ + plot_backend = _get_plot_backend("matplotlib") + plot_backend.deregister() + + +def scatter_matrix( + frame: DataFrame, + alpha: float = 0.5, + figsize: tuple[float, float] | None = None, + ax: Axes | None = None, + grid: bool = False, + diagonal: str = "hist", + marker: str = ".", + density_kwds: Mapping[str, Any] | None = None, + hist_kwds: Mapping[str, Any] | None = None, + range_padding: float = 0.05, + **kwargs, +) -> np.ndarray: + """ + Draw a matrix of scatter plots. + + Parameters + ---------- + frame : DataFrame + alpha : float, optional + Amount of transparency applied. + figsize : (float,float), optional + A tuple (width, height) in inches. + ax : Matplotlib axis object, optional + grid : bool, optional + Setting this to True will show the grid. + diagonal : {'hist', 'kde'} + Pick between 'kde' and 'hist' for either Kernel Density Estimation or + Histogram plot in the diagonal. + marker : str, optional + Matplotlib marker type, default '.'. + density_kwds : keywords + Keyword arguments to be passed to kernel density estimate plot. + hist_kwds : keywords + Keyword arguments to be passed to hist function. + range_padding : float, default 0.05 + Relative extension of axis range in x and y with respect to + (x_max - x_min) or (y_max - y_min). + **kwargs + Keyword arguments to be passed to scatter function. + + Returns + ------- + numpy.ndarray + A matrix of scatter plots. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) + >>> pd.plotting.scatter_matrix(df, alpha=0.2) + array([[, , + , ], + [, , + , ], + [, , + , ], + [, , + , ]], + dtype=object) + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.scatter_matrix( + frame=frame, + alpha=alpha, + figsize=figsize, + ax=ax, + grid=grid, + diagonal=diagonal, + marker=marker, + density_kwds=density_kwds, + hist_kwds=hist_kwds, + range_padding=range_padding, + **kwargs, + ) + + +def radviz( + frame: DataFrame, + class_column: str, + ax: Axes | None = None, + color: list[str] | tuple[str, ...] | None = None, + colormap: Colormap | str | None = None, + **kwds, +) -> Axes: + """ + Plot a multidimensional dataset in 2D. + + Each Series in the DataFrame is represented as a evenly distributed + slice on a circle. Each data point is rendered in the circle according to + the value on each Series. Highly correlated `Series` in the `DataFrame` + are placed closer on the unit circle. + + RadViz allow to project a N-dimensional data set into a 2D space where the + influence of each dimension can be interpreted as a balance between the + influence of all dimensions. + + More info available at the `original article + `_ + describing RadViz. + + Parameters + ---------- + frame : `DataFrame` + Object holding the data. + class_column : str + Column name containing the name of the data point category. + ax : :class:`matplotlib.axes.Axes`, optional + A plot instance to which to add the information. + color : list[str] or tuple[str], optional + Assign a color to each category. Example: ['blue', 'green']. + colormap : str or :class:`matplotlib.colors.Colormap`, default None + Colormap to select colors from. If string, load colormap with that + name from matplotlib. + **kwds + Options to pass to matplotlib scatter plotting method. + + Returns + ------- + :class:`matplotlib.axes.Axes` + + See Also + -------- + pandas.plotting.andrews_curves : Plot clustering visualization. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame( + ... { + ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6], + ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6], + ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0], + ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2], + ... 'Category': [ + ... 'virginica', + ... 'virginica', + ... 'setosa', + ... 'virginica', + ... 'virginica', + ... 'versicolor', + ... 'versicolor', + ... 'setosa', + ... 'virginica', + ... 'setosa' + ... ] + ... } + ... ) + >>> pd.plotting.radviz(df, 'Category') # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.radviz( + frame=frame, + class_column=class_column, + ax=ax, + color=color, + colormap=colormap, + **kwds, + ) + + +def andrews_curves( + frame: DataFrame, + class_column: str, + ax: Axes | None = None, + samples: int = 200, + color: list[str] | tuple[str, ...] | None = None, + colormap: Colormap | str | None = None, + **kwargs, +) -> Axes: + """ + Generate a matplotlib plot for visualising clusters of multivariate data. + + Andrews curves have the functional form: + + .. math:: + f(t) = \\frac{x_1}{\\sqrt{2}} + x_2 \\sin(t) + x_3 \\cos(t) + + x_4 \\sin(2t) + x_5 \\cos(2t) + \\cdots + + Where :math:`x` coefficients correspond to the values of each dimension + and :math:`t` is linearly spaced between :math:`-\\pi` and :math:`+\\pi`. + Each row of frame then corresponds to a single curve. + + Parameters + ---------- + frame : DataFrame + Data to be plotted, preferably normalized to (0.0, 1.0). + class_column : label + Name of the column containing class names. + ax : axes object, default None + Axes to use. + samples : int + Number of points to plot in each curve. + color : str, list[str] or tuple[str], optional + Colors to use for the different classes. Colors can be strings + or 3-element floating point RGB values. + colormap : str or matplotlib colormap object, default None + Colormap to select colors from. If a string, load colormap with that + name from matplotlib. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + :class:`matplotlib.axes.Axes` + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.read_csv( + ... 'https://raw.githubusercontent.com/pandas-dev/' + ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' + ... ) + >>> pd.plotting.andrews_curves(df, 'Name') # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.andrews_curves( + frame=frame, + class_column=class_column, + ax=ax, + samples=samples, + color=color, + colormap=colormap, + **kwargs, + ) + + +def bootstrap_plot( + series: Series, + fig: Figure | None = None, + size: int = 50, + samples: int = 500, + **kwds, +) -> Figure: + """ + Bootstrap plot on mean, median and mid-range statistics. + + The bootstrap plot is used to estimate the uncertainty of a statistic + by relying on random sampling with replacement [1]_. This function will + generate bootstrapping plots for mean, median and mid-range statistics + for the given number of samples of the given size. + + .. [1] "Bootstrapping (statistics)" in \ + https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 + + Parameters + ---------- + series : pandas.Series + Series from where to get the samplings for the bootstrapping. + fig : matplotlib.figure.Figure, default None + If given, it will use the `fig` reference for plotting instead of + creating a new one with default parameters. + size : int, default 50 + Number of data points to consider during each sampling. It must be + less than or equal to the length of the `series`. + samples : int, default 500 + Number of times the bootstrap procedure is performed. + **kwds + Options to pass to matplotlib plotting method. + + Returns + ------- + matplotlib.figure.Figure + Matplotlib figure. + + See Also + -------- + pandas.DataFrame.plot : Basic plotting for DataFrame objects. + pandas.Series.plot : Basic plotting for Series objects. + + Examples + -------- + This example draws a basic bootstrap plot for a Series. + + .. plot:: + :context: close-figs + + >>> s = pd.Series(np.random.uniform(size=100)) + >>> pd.plotting.bootstrap_plot(s) +
+ """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.bootstrap_plot( + series=series, fig=fig, size=size, samples=samples, **kwds + ) + + +def parallel_coordinates( + frame: DataFrame, + class_column: str, + cols: list[str] | None = None, + ax: Axes | None = None, + color: list[str] | tuple[str, ...] | None = None, + use_columns: bool = False, + xticks: list | tuple | None = None, + colormap: Colormap | str | None = None, + axvlines: bool = True, + axvlines_kwds: Mapping[str, Any] | None = None, + sort_labels: bool = False, + **kwargs, +) -> Axes: + """ + Parallel coordinates plotting. + + Parameters + ---------- + frame : DataFrame + class_column : str + Column name containing class names. + cols : list, optional + A list of column names to use. + ax : matplotlib.axis, optional + Matplotlib axis object. + color : list or tuple, optional + Colors to use for the different classes. + use_columns : bool, optional + If true, columns will be used as xticks. + xticks : list or tuple, optional + A list of values to use for xticks. + colormap : str or matplotlib colormap, default None + Colormap to use for line colors. + axvlines : bool, optional + If true, vertical lines will be added at each xtick. + axvlines_kwds : keywords, optional + Options to be passed to axvline method for vertical lines. + sort_labels : bool, default False + Sort class_column labels, useful when assigning colors. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + matplotlib.axes.Axes + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.read_csv( + ... 'https://raw.githubusercontent.com/pandas-dev/' + ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' + ... ) + >>> pd.plotting.parallel_coordinates( + ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') + ... ) # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.parallel_coordinates( + frame=frame, + class_column=class_column, + cols=cols, + ax=ax, + color=color, + use_columns=use_columns, + xticks=xticks, + colormap=colormap, + axvlines=axvlines, + axvlines_kwds=axvlines_kwds, + sort_labels=sort_labels, + **kwargs, + ) + + +def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: + """ + Lag plot for time series. + + Parameters + ---------- + series : Series + The time series to visualize. + lag : int, default 1 + Lag length of the scatter plot. + ax : Matplotlib axis object, optional + The matplotlib axis object to use. + **kwds + Matplotlib scatter method keyword arguments. + + Returns + ------- + matplotlib.axes.Axes + + Examples + -------- + Lag plots are most commonly used to look for patterns in time series data. + + Given the following time series + + .. plot:: + :context: close-figs + + >>> np.random.seed(5) + >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) + >>> s = pd.Series(x) + >>> s.plot() # doctest: +SKIP + + A lag plot with ``lag=1`` returns + + .. plot:: + :context: close-figs + + >>> pd.plotting.lag_plot(s, lag=1) + + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds) + + +def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Axes: + """ + Autocorrelation plot for time series. + + Parameters + ---------- + series : Series + The time series to visualize. + ax : Matplotlib axis object, optional + The matplotlib axis object to use. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + matplotlib.axes.Axes + + Examples + -------- + The horizontal lines in the plot correspond to 95% and 99% confidence bands. + + The dashed line is 99% confidence band. + + .. plot:: + :context: close-figs + + >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) + >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) + >>> pd.plotting.autocorrelation_plot(s) # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs) + + +class _Options(dict): + """ + Stores pandas plotting options. + + Allows for parameter aliasing so you can just use parameter names that are + the same as the plot function parameters, but is stored in a canonical + format that makes it easy to breakdown into groups later. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> np.random.seed(42) + >>> df = pd.DataFrame({'A': np.random.randn(10), + ... 'B': np.random.randn(10)}, + ... index=pd.date_range("1/1/2000", + ... freq='4MS', periods=10)) + >>> with pd.plotting.plot_params.use("x_compat", True): + ... _ = df["A"].plot(color="r") + ... _ = df["B"].plot(color="g") + """ + + # alias so the names are same as plotting method parameter names + _ALIASES = {"x_compat": "xaxis.compat"} + _DEFAULT_KEYS = ["xaxis.compat"] + + def __init__(self, deprecated: bool = False) -> None: + self._deprecated = deprecated + super().__setitem__("xaxis.compat", False) + + def __getitem__(self, key): + key = self._get_canonical_key(key) + if key not in self: + raise ValueError(f"{key} is not a valid pandas plotting option") + return super().__getitem__(key) + + def __setitem__(self, key, value) -> None: + key = self._get_canonical_key(key) + super().__setitem__(key, value) + + def __delitem__(self, key) -> None: + key = self._get_canonical_key(key) + if key in self._DEFAULT_KEYS: + raise ValueError(f"Cannot remove default parameter {key}") + super().__delitem__(key) + + def __contains__(self, key) -> bool: + key = self._get_canonical_key(key) + return super().__contains__(key) + + def reset(self) -> None: + """ + Reset the option store to its initial state + + Returns + ------- + None + """ + # error: Cannot access "__init__" directly + self.__init__() # type: ignore[misc] + + def _get_canonical_key(self, key): + return self._ALIASES.get(key, key) + + @contextmanager + def use(self, key, value) -> Generator[_Options, None, None]: + """ + Temporarily set a parameter value using the with statement. + Aliasing allowed. + """ + old_value = self[key] + try: + self[key] = value + yield self + finally: + self[key] = old_value + + +plot_params = _Options() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/pyproject.toml b/dbdpy-env/lib/python3.9/site-packages/pandas/pyproject.toml new file mode 100644 index 00000000..77b49d44 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/pyproject.toml @@ -0,0 +1,791 @@ +[build-system] +# Minimum requirements for the build system to execute. +# See https://github.com/scipy/scipy/pull/12940 for the AIX issue. +requires = [ + "meson-python==0.13.1", + "meson==1.2.1", + "wheel", + "Cython>=0.29.33,<3", # Note: sync with setup.py, environment.yml and asv.conf.json + # Note: numpy 1.25 has a backwards compatible C API by default + # we don't want to force users to compile with 1.25 though + # (Ideally, in the future, though, oldest-supported-numpy can be dropped when our min numpy is 1.25.x) + "oldest-supported-numpy>=2022.8.16; python_version<'3.12'", + "numpy>=1.26.0,<2; python_version>='3.12'", + "versioneer[toml]" +] + +build-backend = "mesonpy" + +[project] +name = 'pandas' +dynamic = [ + 'version' +] +description = 'Powerful data structures for data analysis, time series, and statistics' +readme = 'README.md' +authors = [ + { name = 'The Pandas Development Team', email='pandas-dev@python.org' }, +] +license = {file = 'LICENSE'} +requires-python = '>=3.9' +dependencies = [ + "numpy>=1.22.4,<2; python_version<'3.11'", + "numpy>=1.23.2,<2; python_version=='3.11'", + "numpy>=1.26.0,<2; python_version>='3.12'", + "python-dateutil>=2.8.2", + "pytz>=2020.1", + "tzdata>=2022.1" +] +classifiers = [ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: BSD License', + 'Operating System :: OS Independent', + 'Programming Language :: Cython', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Scientific/Engineering' +] + +[project.urls] +homepage = 'https://pandas.pydata.org' +documentation = 'https://pandas.pydata.org/docs/' +repository = 'https://github.com/pandas-dev/pandas' + +[project.entry-points."pandas_plotting_backends"] +matplotlib = "pandas:plotting._matplotlib" + +[project.optional-dependencies] +test = ['hypothesis>=6.46.1', 'pytest>=7.3.2', 'pytest-xdist>=2.2.0'] +performance = ['bottleneck>=1.3.4', 'numba>=0.55.2', 'numexpr>=2.8.0'] +computation = ['scipy>=1.8.1', 'xarray>=2022.03.0'] +fss = ['fsspec>=2022.05.0'] +aws = ['s3fs>=2022.05.0'] +gcp = ['gcsfs>=2022.05.0', 'pandas-gbq>=0.17.5'] +excel = ['odfpy>=1.4.1', 'openpyxl>=3.0.10', 'pyxlsb>=1.0.9', 'xlrd>=2.0.1', 'xlsxwriter>=3.0.3'] +parquet = ['pyarrow>=7.0.0'] +feather = ['pyarrow>=7.0.0'] +hdf5 = [# blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.20.1', + 'tables>=3.7.0'] +spss = ['pyreadstat>=1.1.5'] +postgresql = ['SQLAlchemy>=1.4.36', 'psycopg2>=2.9.3'] +mysql = ['SQLAlchemy>=1.4.36', 'pymysql>=1.0.2'] +sql-other = ['SQLAlchemy>=1.4.36'] +html = ['beautifulsoup4>=4.11.1', 'html5lib>=1.1', 'lxml>=4.8.0'] +xml = ['lxml>=4.8.0'] +plot = ['matplotlib>=3.6.1'] +output-formatting = ['jinja2>=3.1.2', 'tabulate>=0.8.10'] +clipboard = ['PyQt5>=5.15.6', 'qtpy>=2.2.0'] +compression = ['zstandard>=0.17.0'] +consortium-standard = ['dataframe-api-compat>=0.1.7'] +all = ['beautifulsoup4>=4.11.1', + # blosc only available on conda (https://github.com/Blosc/python-blosc/issues/297) + #'blosc>=1.21.0', + 'bottleneck>=1.3.4', + 'dataframe-api-compat>=0.1.7', + 'fastparquet>=0.8.1', + 'fsspec>=2022.05.0', + 'gcsfs>=2022.05.0', + 'html5lib>=1.1', + 'hypothesis>=6.46.1', + 'jinja2>=3.1.2', + 'lxml>=4.8.0', + 'matplotlib>=3.6.1', + 'numba>=0.55.2', + 'numexpr>=2.8.0', + 'odfpy>=1.4.1', + 'openpyxl>=3.0.10', + 'pandas-gbq>=0.17.5', + 'psycopg2>=2.9.3', + 'pyarrow>=7.0.0', + 'pymysql>=1.0.2', + 'PyQt5>=5.15.6', + 'pyreadstat>=1.1.5', + 'pytest>=7.3.2', + 'pytest-xdist>=2.2.0', + 'pyxlsb>=1.0.9', + 'qtpy>=2.2.0', + 'scipy>=1.8.1', + 's3fs>=2022.05.0', + 'SQLAlchemy>=1.4.36', + 'tables>=3.7.0', + 'tabulate>=0.8.10', + 'xarray>=2022.03.0', + 'xlrd>=2.0.1', + 'xlsxwriter>=3.0.3', + 'zstandard>=0.17.0'] + +# TODO: Remove after setuptools support is dropped. +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +include = ["pandas", "pandas.*"] +namespaces = false + +[tool.setuptools.exclude-package-data] +"*" = ["*.c", "*.h"] + +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. +[tool.versioneer] +VCS = "git" +style = "pep440" +versionfile_source = "pandas/_version.py" +versionfile_build = "pandas/_version.py" +tag_prefix = "v" +parentdir_prefix = "pandas-" + +[tool.meson-python.args] +setup = ['--vsenv'] # For Windows + +[tool.cibuildwheel] +skip = "cp36-* cp37-* cp38-* pp* *_i686 *_ppc64le *_s390x *-musllinux_aarch64" +build-verbosity = "3" +environment = {LDFLAGS="-Wl,--strip-all"} +test-requires = "hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0" +test-command = """ + PANDAS_CI='1' python -c 'import pandas as pd; \ + pd.test(extra_args=["-m not clipboard and not single_cpu and not slow and not network and not db", "-n 2", "--no-strict-data-files"]); \ + pd.test(extra_args=["-m not clipboard and single_cpu and not slow and not network and not db", "--no-strict-data-files"]);' \ + """ + +[tool.cibuildwheel.macos] +archs = "x86_64 arm64" +test-skip = "*_arm64" + +[tool.cibuildwheel.windows] +before-build = "pip install delvewheel" +repair-wheel-command = "delvewheel repair -w {dest_dir} {wheel}" + +[[tool.cibuildwheel.overrides]] +select = "*-musllinux*" +before-test = "apk update && apk add musl-locales" + +[[tool.cibuildwheel.overrides]] +select = "*-win*" +# We test separately for Windows, since we use +# the windowsservercore docker image to check if any dlls are +# missing from the wheel +test-command = "" + +[[tool.cibuildwheel.overrides]] +# Don't strip wheels on macOS. +# macOS doesn't support stripping wheels with linker +# https://github.com/MacPython/numpy-wheels/pull/87#issuecomment-624878264 +select = "*-macosx*" +environment = {CFLAGS="-g0"} + +[tool.black] +target-version = ['py39', 'py310'] +required-version = '23.7.0' +exclude = ''' +( + asv_bench/env + | \.egg + | \.git + | \.hg + | \.mypy_cache + | \.nox + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + | setup.py +) +''' + +[tool.ruff] +line-length = 88 +target-version = "py310" +fix = true +unfixable = [] + +select = [ + # pyflakes + "F", + # pycodestyle + "E", "W", + # flake8-2020 + "YTT", + # flake8-bugbear + "B", + # flake8-quotes + "Q", + # flake8-debugger + "T10", + # flake8-gettext + "INT", + # pylint + "PLC", "PLE", "PLR", "PLW", + # misc lints + "PIE", + # flake8-pyi + "PYI", + # tidy imports + "TID", + # implicit string concatenation + "ISC", + # type-checking imports + "TCH", + # comprehensions + "C4", + # pygrep-hooks + "PGH", + # Ruff-specific rules + "RUF", + # flake8-bandit: exec-builtin + "S102", + # numpy-legacy-random + "NPY002", + # Perflint + "PERF", +] + +ignore = [ + ### Intentionally disabled + # space before : (needed for how black formats slicing) + "E203", + # module level import not at top of file + "E402", + # do not assign a lambda expression, use a def + "E731", + # line break before binary operator + # "W503", # not yet implemented + # line break after binary operator + # "W504", # not yet implemented + # controversial + "B006", + # controversial + "B007", + # controversial + "B008", + # setattr is used to side-step mypy + "B009", + # getattr is used to side-step mypy + "B010", + # tests use assert False + "B011", + # tests use comparisons but not their returned value + "B015", + # false positives + "B019", + # Loop control variable overrides iterable it iterates + "B020", + # Function definition does not bind loop variable + "B023", + # Functions defined inside a loop must not use variables redefined in the loop + # "B301", # not yet implemented + # Only works with python >=3.10 + "B905", + # Too many arguments to function call + "PLR0913", + # Too many returns + "PLR0911", + # Too many branches + "PLR0912", + # Too many statements + "PLR0915", + # Redefined loop name + "PLW2901", + # Global statements are discouraged + "PLW0603", + # Docstrings should not be included in stubs + "PYI021", + # No builtin `eval()` allowed + "PGH001", + # compare-to-empty-string + "PLC1901", + # Use typing_extensions.TypeAlias for type aliases + # "PYI026", # not yet implemented + # Use "collections.abc.*" instead of "typing.*" (PEP 585 syntax) + # "PYI027", # not yet implemented + # while int | float can be shortened to float, the former is more explicit + # "PYI041", # not yet implemented + # incorrect-dict-iterator, flags valid Series.items usage + "PERF102", + # try-except-in-loop, becomes useless in Python 3.11 + "PERF203", + + + ### TODO: Enable gradually + # Useless statement + "B018", + # Within an except clause, raise exceptions with ... + "B904", + # Magic number + "PLR2004", + # comparison-with-itself + "PLR0124", + # Consider `elif` instead of `else` then `if` to remove indentation level + "PLR5501", + # collection-literal-concatenation + "RUF005", + # pairwise-over-zipped (>=PY310 only) + "RUF007", + # explicit-f-string-type-conversion + "RUF010", + # mutable-class-default + "RUF012" +] + +exclude = [ + "doc/sphinxext/*.py", + "doc/build/*.py", + "doc/temp/*.py", + ".eggs/*.py", + # vendored files + "pandas/util/version/*", + "pandas/io/clipboard/__init__.py", + # exclude asv benchmark environments from linting + "env", +] + +[tool.ruff.per-file-ignores] +# relative imports allowed for asv_bench +"asv_bench/*" = ["TID", "NPY002"] +# to be enabled gradually +"pandas/core/*" = ["PLR5501"] +"pandas/tests/*" = ["B028"] +"scripts/*" = ["B028"] +# Keep this one enabled +"pandas/_typing.py" = ["TCH"] + +[tool.pylint.messages_control] +max-line-length = 88 +disable = [ + # intentionally turned off + "bad-mcs-classmethod-argument", + "broad-except", + "c-extension-no-member", + "comparison-with-itself", + "consider-using-enumerate", + "import-error", + "import-outside-toplevel", + "invalid-name", + "invalid-unary-operand-type", + "line-too-long", + "no-else-continue", + "no-else-raise", + "no-else-return", + "no-member", + "no-name-in-module", + "not-an-iterable", + "overridden-final-method", + "pointless-statement", + "redundant-keyword-arg", + "singleton-comparison", + "too-many-ancestors", + "too-many-arguments", + "too-many-boolean-expressions", + "too-many-branches", + "too-many-function-args", + "too-many-instance-attributes", + "too-many-locals", + "too-many-nested-blocks", + "too-many-public-methods", + "too-many-return-statements", + "too-many-statements", + "unexpected-keyword-arg", + "ungrouped-imports", + "unsubscriptable-object", + "unsupported-assignment-operation", + "unsupported-membership-test", + "unused-import", + "use-dict-literal", + "use-implicit-booleaness-not-comparison", + "use-implicit-booleaness-not-len", + "wrong-import-order", + "wrong-import-position", + "redefined-loop-name", + + # misc + "abstract-class-instantiated", + "no-value-for-parameter", + "undefined-variable", + "unpacking-non-sequence", + "used-before-assignment", + + # pylint type "C": convention, for programming standard violation + "missing-class-docstring", + "missing-function-docstring", + "missing-module-docstring", + "superfluous-parens", + "too-many-lines", + "unidiomatic-typecheck", + "unnecessary-dunder-call", + "unnecessary-lambda-assignment", + + # pylint type "R": refactor, for bad code smell + "consider-using-with", + "cyclic-import", + "duplicate-code", + "inconsistent-return-statements", + "redefined-argument-from-local", + "too-few-public-methods", + + # pylint type "W": warning, for python specific problems + "abstract-method", + "arguments-differ", + "arguments-out-of-order", + "arguments-renamed", + "attribute-defined-outside-init", + "broad-exception-raised", + "comparison-with-callable", + "dangerous-default-value", + "deprecated-module", + "eval-used", + "expression-not-assigned", + "fixme", + "global-statement", + "invalid-overridden-method", + "keyword-arg-before-vararg", + "possibly-unused-variable", + "protected-access", + "raise-missing-from", + "redefined-builtin", + "redefined-outer-name", + "self-cls-assignment", + "signature-differs", + "super-init-not-called", + "try-except-raise", + "unnecessary-lambda", + "unused-argument", + "unused-variable", + "using-constant-test" +] + +[tool.pytest.ini_options] +# sync minversion with pyproject.toml & install.rst +minversion = "7.3.2" +addopts = "--strict-markers --strict-config --capture=no --durations=30 --junitxml=test-data.xml" +empty_parameter_set_mark = "fail_at_collect" +xfail_strict = true +testpaths = "pandas" +doctest_optionflags = [ + "NORMALIZE_WHITESPACE", + "IGNORE_EXCEPTION_DETAIL", + "ELLIPSIS", +] +filterwarnings = [ + "error:::pandas", + "error::ResourceWarning", + "error::pytest.PytestUnraisableExceptionWarning", + # TODO(PY311-minimum): Specify EncodingWarning + # Ignore 3rd party EncodingWarning but raise on pandas' + "ignore:.*encoding.* argument not specified", + "error:.*encoding.* argument not specified::pandas", + "ignore:.*ssl.SSLSocket:pytest.PytestUnraisableExceptionWarning", + "ignore:.*ssl.SSLSocket:ResourceWarning", + # GH 44844: Can remove once minimum matplotlib version >= 3.7 + "ignore:.*FileIO:pytest.PytestUnraisableExceptionWarning", + "ignore:.*BufferedRandom:ResourceWarning", + "ignore::ResourceWarning:asyncio", + # From plotting doctests + "ignore:More than 20 figures have been opened:RuntimeWarning", + # Will be fixed in numba 0.56: https://github.com/numba/numba/issues/7758 + "ignore:`np.MachAr` is deprecated:DeprecationWarning:numba", + "ignore:.*urllib3:DeprecationWarning:botocore", + "ignore:Setuptools is replacing distutils.:UserWarning:_distutils_hack", + # https://github.com/PyTables/PyTables/issues/822 + "ignore:a closed node found in the registry:UserWarning:tables", + "ignore:`np.object` is a deprecated:DeprecationWarning:tables", + "ignore:tostring:DeprecationWarning:tables", + "ignore:distutils Version classes are deprecated:DeprecationWarning:pandas_datareader", + "ignore:distutils Version classes are deprecated:DeprecationWarning:numexpr", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fastparquet", + "ignore:distutils Version classes are deprecated:DeprecationWarning:fsspec", + # Can be removed once https://github.com/numpy/numpy/pull/24794 is merged + "ignore:.*In the future `np.long` will be defined as.*:FutureWarning", +] +junit_family = "xunit2" +markers = [ + "single_cpu: tests that should run on a single cpu only", + "slow: mark a test as slow", + "network: mark a test as network", + "db: tests requiring a database (mysql or postgres)", + "clipboard: mark a pd.read_clipboard test", + "arm_slow: mark a test as slow for arm64 architecture", + "arraymanager: mark a test to run with ArrayManager enabled", +] + +[tool.mypy] +# Import discovery +mypy_path = "typings" +files = ["pandas", "typings"] +namespace_packages = false +explicit_package_bases = false +ignore_missing_imports = true +follow_imports = "normal" +follow_imports_for_stubs = false +no_site_packages = false +no_silence_site_packages = false +# Platform configuration +python_version = "3.11" +platform = "linux-64" +# Disallow dynamic typing +disallow_any_unimported = false # TODO +disallow_any_expr = false # TODO +disallow_any_decorated = false # TODO +disallow_any_explicit = false # TODO +disallow_any_generics = false # TODO +disallow_subclassing_any = false # TODO +# Untyped definitions and calls +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +# None and Optional handling +no_implicit_optional = true +strict_optional = true +# Configuring warnings +warn_redundant_casts = true +warn_unused_ignores = true +warn_no_return = true +warn_return_any = false # TODO +warn_unreachable = false # GH#27396 +# Suppressing errors +ignore_errors = false +enable_error_code = "ignore-without-code" +# Miscellaneous strictness flags +allow_untyped_globals = false +allow_redefinition = false +local_partial_types = false +implicit_reexport = true +strict_equality = true +# Configuring error messages +show_error_context = false +show_column_numbers = false +show_error_codes = true + +[[tool.mypy.overrides]] +module = [ + "pandas._config.config", # TODO + "pandas._libs.*", + "pandas._testing.*", # TODO + "pandas.arrays", # TODO + "pandas.compat.numpy.function", # TODO + "pandas.compat._optional", # TODO + "pandas.compat.compressors", # TODO + "pandas.compat.pickle_compat", # TODO + "pandas.core._numba.executor", # TODO + "pandas.core.array_algos.datetimelike_accumulations", # TODO + "pandas.core.array_algos.masked_accumulations", # TODO + "pandas.core.array_algos.masked_reductions", # TODO + "pandas.core.array_algos.putmask", # TODO + "pandas.core.array_algos.quantile", # TODO + "pandas.core.array_algos.replace", # TODO + "pandas.core.array_algos.take", # TODO + "pandas.core.arrays.*", # TODO + "pandas.core.computation.*", # TODO + "pandas.core.dtypes.astype", # TODO + "pandas.core.dtypes.cast", # TODO + "pandas.core.dtypes.common", # TODO + "pandas.core.dtypes.concat", # TODO + "pandas.core.dtypes.dtypes", # TODO + "pandas.core.dtypes.generic", # TODO + "pandas.core.dtypes.inference", # TODO + "pandas.core.dtypes.missing", # TODO + "pandas.core.groupby.categorical", # TODO + "pandas.core.groupby.generic", # TODO + "pandas.core.groupby.grouper", # TODO + "pandas.core.groupby.groupby", # TODO + "pandas.core.groupby.ops", # TODO + "pandas.core.indexers.*", # TODO + "pandas.core.indexes.*", # TODO + "pandas.core.interchange.column", # TODO + "pandas.core.interchange.dataframe_protocol", # TODO + "pandas.core.interchange.from_dataframe", # TODO + "pandas.core.internals.*", # TODO + "pandas.core.methods.*", # TODO + "pandas.core.ops.array_ops", # TODO + "pandas.core.ops.common", # TODO + "pandas.core.ops.invalid", # TODO + "pandas.core.ops.mask_ops", # TODO + "pandas.core.ops.missing", # TODO + "pandas.core.reshape.*", # TODO + "pandas.core.strings.*", # TODO + "pandas.core.tools.*", # TODO + "pandas.core.window.common", # TODO + "pandas.core.window.ewm", # TODO + "pandas.core.window.expanding", # TODO + "pandas.core.window.numba_", # TODO + "pandas.core.window.online", # TODO + "pandas.core.window.rolling", # TODO + "pandas.core.accessor", # TODO + "pandas.core.algorithms", # TODO + "pandas.core.apply", # TODO + "pandas.core.arraylike", # TODO + "pandas.core.base", # TODO + "pandas.core.common", # TODO + "pandas.core.config_init", # TODO + "pandas.core.construction", # TODO + "pandas.core.flags", # TODO + "pandas.core.frame", # TODO + "pandas.core.generic", # TODO + "pandas.core.indexing", # TODO + "pandas.core.missing", # TODO + "pandas.core.nanops", # TODO + "pandas.core.resample", # TODO + "pandas.core.roperator", # TODO + "pandas.core.sample", # TODO + "pandas.core.series", # TODO + "pandas.core.sorting", # TODO + "pandas.errors", # TODO + "pandas.io.clipboard", # TODO + "pandas.io.excel._base", # TODO + "pandas.io.excel._odfreader", # TODO + "pandas.io.excel._odswriter", # TODO + "pandas.io.excel._openpyxl", # TODO + "pandas.io.excel._pyxlsb", # TODO + "pandas.io.excel._xlrd", # TODO + "pandas.io.excel._xlsxwriter", # TODO + "pandas.io.formats.console", # TODO + "pandas.io.formats.css", # TODO + "pandas.io.formats.excel", # TODO + "pandas.io.formats.format", # TODO + "pandas.io.formats.info", # TODO + "pandas.io.formats.printing", # TODO + "pandas.io.formats.style", # TODO + "pandas.io.formats.style_render", # TODO + "pandas.io.formats.xml", # TODO + "pandas.io.json.*", # TODO + "pandas.io.parsers.*", # TODO + "pandas.io.sas.sas_xport", # TODO + "pandas.io.sas.sas7bdat", # TODO + "pandas.io.clipboards", # TODO + "pandas.io.common", # TODO + "pandas.io.gbq", # TODO + "pandas.io.html", # TODO + "pandas.io.gbq", # TODO + "pandas.io.parquet", # TODO + "pandas.io.pytables", # TODO + "pandas.io.sql", # TODO + "pandas.io.stata", # TODO + "pandas.io.xml", # TODO + "pandas.plotting.*", # TODO + "pandas.tests.*", + "pandas.tseries.frequencies", # TODO + "pandas.tseries.holiday", # TODO + "pandas.util._decorators", # TODO + "pandas.util._doctools", # TODO + "pandas.util._print_versions", # TODO + "pandas.util._test_decorators", # TODO + "pandas.util._validators", # TODO + "pandas.util", # TODO + "pandas._version", + "pandas.conftest", + "pandas" +] +disallow_untyped_calls = false +disallow_untyped_defs = false +disallow_incomplete_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.*", + "pandas._version", + "pandas.io.clipboard", +] +check_untyped_defs = false + +[[tool.mypy.overrides]] +module = [ + "pandas.tests.apply.test_series_apply", + "pandas.tests.arithmetic.conftest", + "pandas.tests.arrays.sparse.test_combine_concat", + "pandas.tests.dtypes.test_common", + "pandas.tests.frame.methods.test_to_records", + "pandas.tests.groupby.test_rank", + "pandas.tests.groupby.transform.test_transform", + "pandas.tests.indexes.interval.test_interval", + "pandas.tests.indexing.test_categorical", + "pandas.tests.io.excel.test_writers", + "pandas.tests.reductions.test_reductions", + "pandas.tests.test_expressions", +] +ignore_errors = true + +# To be kept consistent with "Import Formatting" section in contributing.rst +[tool.isort] +known_pre_libs = "pandas._config" +known_pre_core = ["pandas._libs", "pandas._typing", "pandas.util._*", "pandas.compat", "pandas.errors"] +known_dtypes = "pandas.core.dtypes" +known_post_core = ["pandas.tseries", "pandas.io", "pandas.plotting"] +sections = ["FUTURE", "STDLIB", "THIRDPARTY" ,"PRE_LIBS" , "PRE_CORE", "DTYPES", "FIRSTPARTY", "POST_CORE", "LOCALFOLDER"] +profile = "black" +combine_as_imports = true +force_grid_wrap = 2 +force_sort_within_sections = true +skip_glob = "env" +skip = "pandas/__init__.py" + +[tool.pyright] +pythonVersion = "3.11" +typeCheckingMode = "basic" +useLibraryCodeForTypes = false +include = ["pandas", "typings"] +exclude = ["pandas/tests", "pandas/io/clipboard", "pandas/util/version"] +# enable subset of "strict" +reportDuplicateImport = true +reportInconsistentConstructor = true +reportInvalidStubStatement = true +reportOverlappingOverload = true +reportPropertyTypeMismatch = true +reportUntypedClassDecorator = true +reportUntypedFunctionDecorator = true +reportUntypedNamedTuple = true +reportUnusedImport = true +# disable subset of "basic" +reportGeneralTypeIssues = false +reportMissingModuleSource = false +reportOptionalCall = false +reportOptionalIterable = false +reportOptionalMemberAccess = false +reportOptionalOperand = false +reportOptionalSubscript = false +reportPrivateImportUsage = false +reportUnboundVariable = false + +[tool.coverage.run] +branch = true +omit = ["pandas/_typing.py", "pandas/_version.py"] +plugins = ["Cython.Coverage"] +source = ["pandas"] + +[tool.coverage.report] +ignore_errors = false +show_missing = true +omit = ["pandas/_version.py"] +exclude_lines = [ + # Have to re-enable the standard pragma + "pragma: no cover", + # Don't complain about missing debug-only code:s + "def __repr__", + "if self.debug", + # Don't complain if tests don't hit defensive assertion code: + "raise AssertionError", + "raise NotImplementedError", + "AbstractMethodError", + # Don't complain if non-runnable code isn't run: + "if 0:", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[tool.coverage.html] +directory = "coverage_html_report" + +[tool.codespell] +ignore-words-list = "blocs, coo, hist, nd, sav, ser, recuse, nin, timere" +ignore-regex = 'https://([\w/\.])+' diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/testing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/testing.py new file mode 100644 index 00000000..841b55df --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/testing.py @@ -0,0 +1,18 @@ +""" +Public testing utility functions. +""" + + +from pandas._testing import ( + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_series_equal, +) + +__all__ = [ + "assert_extension_array_equal", + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", +] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_api.py new file mode 100644 index 00000000..60bcb97a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_api.py @@ -0,0 +1,383 @@ +from __future__ import annotations + +import pytest + +import pandas as pd +from pandas import api +import pandas._testing as tm +from pandas.api import ( + extensions as api_extensions, + indexers as api_indexers, + interchange as api_interchange, + types as api_types, + typing as api_typing, +) + + +class Base: + def check(self, namespace, expected, ignored=None): + # see which names are in the namespace, minus optional + # ignored ones + # compare vs the expected + + result = sorted( + f for f in dir(namespace) if not f.startswith("__") and f != "annotations" + ) + if ignored is not None: + result = sorted(set(result) - set(ignored)) + + expected = sorted(expected) + tm.assert_almost_equal(result, expected) + + +class TestPDApi(Base): + # these are optionally imported based on testing + # & need to be ignored + ignored = ["tests", "locale", "conftest", "_version_meson"] + + # top-level sub-packages + public_lib = [ + "api", + "arrays", + "options", + "test", + "testing", + "errors", + "plotting", + "io", + "tseries", + ] + private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"] + + # misc + misc = ["IndexSlice", "NaT", "NA"] + + # top-level classes + classes = [ + "ArrowDtype", + "Categorical", + "CategoricalIndex", + "DataFrame", + "DateOffset", + "DatetimeIndex", + "ExcelFile", + "ExcelWriter", + "Flags", + "Grouper", + "HDFStore", + "Index", + "MultiIndex", + "Period", + "PeriodIndex", + "RangeIndex", + "Series", + "SparseDtype", + "StringDtype", + "Timedelta", + "TimedeltaIndex", + "Timestamp", + "Interval", + "IntervalIndex", + "CategoricalDtype", + "PeriodDtype", + "IntervalDtype", + "DatetimeTZDtype", + "BooleanDtype", + "Int8Dtype", + "Int16Dtype", + "Int32Dtype", + "Int64Dtype", + "UInt8Dtype", + "UInt16Dtype", + "UInt32Dtype", + "UInt64Dtype", + "Float32Dtype", + "Float64Dtype", + "NamedAgg", + ] + + # these are already deprecated; awaiting removal + deprecated_classes: list[str] = [] + + # external modules exposed in pandas namespace + modules: list[str] = [] + + # top-level functions + funcs = [ + "array", + "bdate_range", + "concat", + "crosstab", + "cut", + "date_range", + "interval_range", + "eval", + "factorize", + "get_dummies", + "from_dummies", + "infer_freq", + "isna", + "isnull", + "lreshape", + "melt", + "notna", + "notnull", + "offsets", + "merge", + "merge_ordered", + "merge_asof", + "period_range", + "pivot", + "pivot_table", + "qcut", + "show_versions", + "timedelta_range", + "unique", + "value_counts", + "wide_to_long", + ] + + # top-level option funcs + funcs_option = [ + "reset_option", + "describe_option", + "get_option", + "option_context", + "set_option", + "set_eng_float_format", + ] + + # top-level read_* funcs + funcs_read = [ + "read_clipboard", + "read_csv", + "read_excel", + "read_fwf", + "read_gbq", + "read_hdf", + "read_html", + "read_xml", + "read_json", + "read_pickle", + "read_sas", + "read_sql", + "read_sql_query", + "read_sql_table", + "read_stata", + "read_table", + "read_feather", + "read_parquet", + "read_orc", + "read_spss", + ] + + # top-level json funcs + funcs_json = ["json_normalize"] + + # top-level to_* funcs + funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"] + + # top-level to deprecate in the future + deprecated_funcs_in_future: list[str] = [] + + # these are already deprecated; awaiting removal + deprecated_funcs: list[str] = [] + + # private modules in pandas namespace + private_modules = [ + "_config", + "_libs", + "_is_numpy_dev", + "_pandas_datetime_CAPI", + "_pandas_parser_CAPI", + "_testing", + "_typing", + ] + if not pd._built_with_meson: + private_modules.append("_version") + + def test_api(self): + checkthese = ( + self.public_lib + + self.private_lib + + self.misc + + self.modules + + self.classes + + self.funcs + + self.funcs_option + + self.funcs_read + + self.funcs_json + + self.funcs_to + + self.private_modules + ) + self.check(namespace=pd, expected=checkthese, ignored=self.ignored) + + def test_api_all(self): + expected = set( + self.public_lib + + self.misc + + self.modules + + self.classes + + self.funcs + + self.funcs_option + + self.funcs_read + + self.funcs_json + + self.funcs_to + ) - set(self.deprecated_classes) + actual = set(pd.__all__) + + extraneous = actual - expected + assert not extraneous + + missing = expected - actual + assert not missing + + def test_depr(self): + deprecated_list = ( + self.deprecated_classes + + self.deprecated_funcs + + self.deprecated_funcs_in_future + ) + for depr in deprecated_list: + with tm.assert_produces_warning(FutureWarning): + _ = getattr(pd, depr) + + +class TestApi(Base): + allowed_api_dirs = [ + "types", + "extensions", + "indexers", + "interchange", + "typing", + ] + allowed_typing = [ + "DataFrameGroupBy", + "DatetimeIndexResamplerGroupby", + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "JsonReader", + "NaTType", + "NAType", + "PeriodIndexResamplerGroupby", + "Resampler", + "Rolling", + "RollingGroupby", + "SeriesGroupBy", + "StataReader", + "TimedeltaIndexResamplerGroupby", + "TimeGrouper", + "Window", + ] + allowed_api_types = [ + "is_any_real_numeric_dtype", + "is_array_like", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dict_like", + "is_dtype_equal", + "is_extension_array_dtype", + "is_file_like", + "is_float", + "is_float_dtype", + "is_hashable", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_interval", + "is_interval_dtype", + "is_iterator", + "is_list_like", + "is_named_tuple", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_period_dtype", + "is_re", + "is_re_compilable", + "is_scalar", + "is_signed_integer_dtype", + "is_sparse", + "is_string_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "pandas_dtype", + "infer_dtype", + "union_categoricals", + "CategoricalDtype", + "DatetimeTZDtype", + "IntervalDtype", + "PeriodDtype", + ] + allowed_api_interchange = ["from_dataframe", "DataFrame"] + allowed_api_indexers = [ + "check_array_indexer", + "BaseIndexer", + "FixedForwardWindowIndexer", + "VariableOffsetWindowIndexer", + ] + allowed_api_extensions = [ + "no_default", + "ExtensionDtype", + "register_extension_dtype", + "register_dataframe_accessor", + "register_index_accessor", + "register_series_accessor", + "take", + "ExtensionArray", + "ExtensionScalarOpsMixin", + ] + + def test_api(self): + self.check(api, self.allowed_api_dirs) + + def test_api_typing(self): + self.check(api_typing, self.allowed_typing) + + def test_api_types(self): + self.check(api_types, self.allowed_api_types) + + def test_api_interchange(self): + self.check(api_interchange, self.allowed_api_interchange) + + def test_api_indexers(self): + self.check(api_indexers, self.allowed_api_indexers) + + def test_api_extensions(self): + self.check(api_extensions, self.allowed_api_extensions) + + +class TestTesting(Base): + funcs = [ + "assert_frame_equal", + "assert_series_equal", + "assert_index_equal", + "assert_extension_array_equal", + ] + + def test_testing(self): + from pandas import testing + + self.check(testing, self.funcs) + + def test_util_in_top_level(self): + with pytest.raises(AttributeError, match="foo"): + pd.util.foo + + +def test_pandas_array_alias(): + msg = "PandasArray has been renamed NumpyExtensionArray" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = pd.arrays.PandasArray + + assert res is pd.arrays.NumpyExtensionArray diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_types.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_types.py new file mode 100644 index 00000000..fbaa6e7e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/api/test_types.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import pandas._testing as tm +from pandas.api import types +from pandas.tests.api.test_api import Base + + +class TestTypes(Base): + allowed = [ + "is_any_real_numeric_dtype", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dtype_equal", + "is_float", + "is_float_dtype", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_scalar", + "is_sparse", + "is_string_dtype", + "is_signed_integer_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "is_period_dtype", + "is_interval", + "is_interval_dtype", + "is_re", + "is_re_compilable", + "is_dict_like", + "is_iterator", + "is_file_like", + "is_list_like", + "is_hashable", + "is_array_like", + "is_named_tuple", + "pandas_dtype", + "union_categoricals", + "infer_dtype", + "is_extension_array_dtype", + ] + deprecated: list[str] = [] + dtypes = ["CategoricalDtype", "DatetimeTZDtype", "PeriodDtype", "IntervalDtype"] + + def test_types(self): + self.check(types, self.allowed + self.dtypes + self.deprecated) + + def test_deprecated_from_api_types(self): + for t in self.deprecated: + with tm.assert_produces_warning(FutureWarning): + getattr(types, t)(1) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/common.py new file mode 100644 index 00000000..b4d153df --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/common.py @@ -0,0 +1,7 @@ +from pandas.core.groupby.base import transformation_kernels + +# There is no Series.cumcount or DataFrame.cumcount +series_transform_kernels = [ + x for x in sorted(transformation_kernels) if x != "cumcount" +] +frame_transform_kernels = [x for x in sorted(transformation_kernels) if x != "cumcount"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/conftest.py new file mode 100644 index 00000000..b68c6235 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/conftest.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest + +from pandas import DataFrame + + +@pytest.fixture +def int_frame_const_col(): + """ + Fixture for DataFrame of ints which are constant per column + + Columns are ['A', 'B', 'C'], with values (per column): [1, 2, 3] + """ + df = DataFrame( + np.tile(np.arange(3, dtype="int64"), 6).reshape(6, -1) + 1, + columns=["A", "B", "C"], + ) + return df diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply.py new file mode 100644 index 00000000..92612861 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply.py @@ -0,0 +1,1635 @@ +from datetime import datetime +import warnings + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.frame.common import zip_frames + + +def test_apply(float_frame): + with np.errstate(all="ignore"): + # ufunc + result = np.sqrt(float_frame["A"]) + expected = float_frame.apply(np.sqrt)["A"] + tm.assert_series_equal(result, expected) + + # aggregator + result = float_frame.apply(np.mean)["A"] + expected = np.mean(float_frame["A"]) + assert result == expected + + d = float_frame.index[0] + result = float_frame.apply(np.mean, axis=1) + expected = np.mean(float_frame.xs(d)) + assert result[d] == expected + assert result.index is float_frame.index + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("raw", [True, False]) +def test_apply_args(float_frame, axis, raw): + result = float_frame.apply(lambda x, y: x + y, axis, args=(1,), raw=raw) + expected = float_frame + 1 + tm.assert_frame_equal(result, expected) + + +def test_apply_categorical_func(): + # GH 9573 + df = DataFrame({"c0": ["A", "A", "B", "B"], "c1": ["C", "C", "D", "D"]}) + result = df.apply(lambda ts: ts.astype("category")) + + assert result.shape == (4, 2) + assert isinstance(result["c0"].dtype, CategoricalDtype) + assert isinstance(result["c1"].dtype, CategoricalDtype) + + +def test_apply_axis1_with_ea(): + # GH#36785 + expected = DataFrame({"A": [Timestamp("2013-01-01", tz="UTC")]}) + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data, dtype", + [(1, None), (1, CategoricalDtype([1])), (Timestamp("2013-01-01", tz="UTC"), None)], +) +def test_agg_axis1_duplicate_index(data, dtype): + # GH 42380 + expected = DataFrame([[data], [data]], index=["a", "a"], dtype=dtype) + result = expected.agg(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +def test_apply_mixed_datetimelike(): + # mixed datetimelike + # GH 7778 + expected = DataFrame( + { + "A": date_range("20130101", periods=3), + "B": pd.to_timedelta(np.arange(3), unit="s"), + } + ) + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", [np.sqrt, np.mean]) +def test_apply_empty(func): + # empty + empty_frame = DataFrame() + + result = empty_frame.apply(func) + assert result.empty + + +def test_apply_float_frame(float_frame): + no_rows = float_frame[:0] + result = no_rows.apply(lambda x: x.mean()) + expected = Series(np.nan, index=float_frame.columns) + tm.assert_series_equal(result, expected) + + no_cols = float_frame.loc[:, []] + result = no_cols.apply(lambda x: x.mean(), axis=1) + expected = Series(np.nan, index=float_frame.index) + tm.assert_series_equal(result, expected) + + +def test_apply_empty_except_index(): + # GH 2476 + expected = DataFrame(index=["a"]) + result = expected.apply(lambda x: x["a"], axis=1) + tm.assert_frame_equal(result, expected) + + +def test_apply_with_reduce_empty(): + # reduce with an empty DataFrame + empty_frame = DataFrame() + + x = [] + result = empty_frame.apply(x.append, axis=1, result_type="expand") + tm.assert_frame_equal(result, empty_frame) + result = empty_frame.apply(x.append, axis=1, result_type="reduce") + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + empty_with_cols = DataFrame(columns=["a", "b", "c"]) + result = empty_with_cols.apply(x.append, axis=1, result_type="expand") + tm.assert_frame_equal(result, empty_with_cols) + result = empty_with_cols.apply(x.append, axis=1, result_type="reduce") + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + # Ensure that x.append hasn't been called + assert x == [] + + +@pytest.mark.parametrize("func", ["sum", "prod", "any", "all"]) +def test_apply_funcs_over_empty(func): + # GH 28213 + df = DataFrame(columns=["a", "b", "c"]) + + result = df.apply(getattr(np, func)) + expected = getattr(df, func)() + if func in ("sum", "prod"): + expected = expected.astype(float) + tm.assert_series_equal(result, expected) + + +def test_nunique_empty(): + # GH 28213 + df = DataFrame(columns=["a", "b", "c"]) + + result = df.nunique() + expected = Series(0, index=df.columns) + tm.assert_series_equal(result, expected) + + result = df.T.nunique() + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + +def test_apply_standard_nonunique(): + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"]) + + result = df.apply(lambda s: s[0], axis=1) + expected = Series([1, 4, 7], ["a", "a", "c"]) + tm.assert_series_equal(result, expected) + + result = df.T.apply(lambda s: s[0], axis=0) + tm.assert_series_equal(result, expected) + + +def test_apply_broadcast_scalars(float_frame): + # scalars + result = float_frame.apply(np.mean, result_type="broadcast") + expected = DataFrame([float_frame.mean()], index=float_frame.index) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_scalars_axis1(float_frame): + result = float_frame.apply(np.mean, axis=1, result_type="broadcast") + m = float_frame.mean(axis=1) + expected = DataFrame({c: m for c in float_frame.columns}) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_lists_columns(float_frame): + # lists + result = float_frame.apply( + lambda x: list(range(len(float_frame.columns))), + axis=1, + result_type="broadcast", + ) + m = list(range(len(float_frame.columns))) + expected = DataFrame( + [m] * len(float_frame.index), + dtype="float64", + index=float_frame.index, + columns=float_frame.columns, + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_lists_index(float_frame): + result = float_frame.apply( + lambda x: list(range(len(float_frame.index))), result_type="broadcast" + ) + m = list(range(len(float_frame.index))) + expected = DataFrame( + {c: m for c in float_frame.columns}, + dtype="float64", + index=float_frame.index, + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_broadcast_list_lambda_func(int_frame_const_col): + # preserve columns + df = int_frame_const_col + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="broadcast") + tm.assert_frame_equal(result, df) + + +def test_apply_broadcast_series_lambda_func(int_frame_const_col): + df = int_frame_const_col + result = df.apply( + lambda x: Series([1, 2, 3], index=list("abc")), + axis=1, + result_type="broadcast", + ) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_raw_float_frame(float_frame, axis): + def _assert_raw(x): + assert isinstance(x, np.ndarray) + assert x.ndim == 1 + + float_frame.apply(_assert_raw, axis=axis, raw=True) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_raw_float_frame_lambda(float_frame, axis): + result = float_frame.apply(np.mean, axis=axis, raw=True) + expected = float_frame.apply(lambda x: x.values.mean(), axis=axis) + tm.assert_series_equal(result, expected) + + +def test_apply_raw_float_frame_no_reduction(float_frame): + # no reduction + result = float_frame.apply(lambda x: x * 2, raw=True) + expected = float_frame * 2 + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_raw_mixed_type_frame(mixed_type_frame, axis): + def _assert_raw(x): + assert isinstance(x, np.ndarray) + assert x.ndim == 1 + + # Mixed dtype (GH-32423) + mixed_type_frame.apply(_assert_raw, axis=axis, raw=True) + + +def test_apply_axis1(float_frame): + d = float_frame.index[0] + result = float_frame.apply(np.mean, axis=1)[d] + expected = np.mean(float_frame.xs(d)) + assert result == expected + + +def test_apply_mixed_dtype_corner(): + df = DataFrame({"A": ["foo"], "B": [1.0]}) + result = df[:0].apply(np.mean, axis=1) + # the result here is actually kind of ambiguous, should it be a Series + # or a DataFrame? + expected = Series(np.nan, index=pd.Index([], dtype="int64")) + tm.assert_series_equal(result, expected) + + +def test_apply_mixed_dtype_corner_indexing(): + df = DataFrame({"A": ["foo"], "B": [1.0]}) + result = df.apply(lambda x: x["A"], axis=1) + expected = Series(["foo"], index=[0]) + tm.assert_series_equal(result, expected) + + result = df.apply(lambda x: x["B"], axis=1) + expected = Series([1.0], index=[0]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore::RuntimeWarning") +@pytest.mark.parametrize("ax", ["index", "columns"]) +@pytest.mark.parametrize( + "func", [lambda x: x, lambda x: x.mean()], ids=["identity", "mean"] +) +@pytest.mark.parametrize("raw", [True, False]) +@pytest.mark.parametrize("axis", [0, 1]) +def test_apply_empty_infer_type(ax, func, raw, axis): + df = DataFrame(**{ax: ["a", "b", "c"]}) + + with np.errstate(all="ignore"): + test_res = func(np.array([], dtype="f8")) + is_reduction = not isinstance(test_res, np.ndarray) + + result = df.apply(func, axis=axis, raw=raw) + if is_reduction: + agg_axis = df._get_agg_axis(axis) + assert isinstance(result, Series) + assert result.index is agg_axis + else: + assert isinstance(result, DataFrame) + + +def test_apply_empty_infer_type_broadcast(): + no_cols = DataFrame(index=["a", "b", "c"]) + result = no_cols.apply(lambda x: x.mean(), result_type="broadcast") + assert isinstance(result, DataFrame) + + +def test_apply_with_args_kwds_add_some(float_frame): + def add_some(x, howmuch=0): + return x + howmuch + + result = float_frame.apply(add_some, howmuch=2) + expected = float_frame.apply(lambda x: x + 2) + tm.assert_frame_equal(result, expected) + + +def test_apply_with_args_kwds_agg_and_add(float_frame): + def agg_and_add(x, howmuch=0): + return x.mean() + howmuch + + result = float_frame.apply(agg_and_add, howmuch=2) + expected = float_frame.apply(lambda x: x.mean() + 2) + tm.assert_series_equal(result, expected) + + +def test_apply_with_args_kwds_subtract_and_divide(float_frame): + def subtract_and_divide(x, sub, divide=1): + return (x - sub) / divide + + result = float_frame.apply(subtract_and_divide, args=(2,), divide=2) + expected = float_frame.apply(lambda x: (x - 2.0) / 2.0) + tm.assert_frame_equal(result, expected) + + +def test_apply_yield_list(float_frame): + result = float_frame.apply(list) + tm.assert_frame_equal(result, float_frame) + + +def test_apply_reduce_Series(float_frame): + float_frame.iloc[::2, float_frame.columns.get_loc("A")] = np.nan + expected = float_frame.mean(1) + result = float_frame.apply(np.mean, axis=1) + tm.assert_series_equal(result, expected) + + +def test_apply_reduce_to_dict(): + # GH 25196 37544 + data = DataFrame([[1, 2], [3, 4]], columns=["c0", "c1"], index=["i0", "i1"]) + + result = data.apply(dict, axis=0) + expected = Series([{"i0": 1, "i1": 3}, {"i0": 2, "i1": 4}], index=data.columns) + tm.assert_series_equal(result, expected) + + result = data.apply(dict, axis=1) + expected = Series([{"c0": 1, "c1": 2}, {"c0": 3, "c1": 4}], index=data.index) + tm.assert_series_equal(result, expected) + + +def test_apply_differently_indexed(): + df = DataFrame(np.random.default_rng(2).standard_normal((20, 10))) + + result = df.apply(Series.describe, axis=0) + expected = DataFrame({i: v.describe() for i, v in df.items()}, columns=df.columns) + tm.assert_frame_equal(result, expected) + + result = df.apply(Series.describe, axis=1) + expected = DataFrame({i: v.describe() for i, v in df.T.items()}, columns=df.index).T + tm.assert_frame_equal(result, expected) + + +def test_apply_bug(): + # GH 6125 + positions = DataFrame( + [ + [1, "ABC0", 50], + [1, "YUM0", 20], + [1, "DEF0", 20], + [2, "ABC1", 50], + [2, "YUM1", 20], + [2, "DEF1", 20], + ], + columns=["a", "market", "position"], + ) + + def f(r): + return r["market"] + + expected = positions.apply(f, axis=1) + + positions = DataFrame( + [ + [datetime(2013, 1, 1), "ABC0", 50], + [datetime(2013, 1, 2), "YUM0", 20], + [datetime(2013, 1, 3), "DEF0", 20], + [datetime(2013, 1, 4), "ABC1", 50], + [datetime(2013, 1, 5), "YUM1", 20], + [datetime(2013, 1, 6), "DEF1", 20], + ], + columns=["a", "market", "position"], + ) + result = positions.apply(f, axis=1) + tm.assert_series_equal(result, expected) + + +def test_apply_convert_objects(): + expected = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + result = expected.apply(lambda x: x, axis=1) + tm.assert_frame_equal(result, expected) + + +def test_apply_attach_name(float_frame): + result = float_frame.apply(lambda x: x.name) + expected = Series(float_frame.columns, index=float_frame.columns) + tm.assert_series_equal(result, expected) + + +def test_apply_attach_name_axis1(float_frame): + result = float_frame.apply(lambda x: x.name, axis=1) + expected = Series(float_frame.index, index=float_frame.index) + tm.assert_series_equal(result, expected) + + +def test_apply_attach_name_non_reduction(float_frame): + # non-reductions + result = float_frame.apply(lambda x: np.repeat(x.name, len(x))) + expected = DataFrame( + np.tile(float_frame.columns, (len(float_frame.index), 1)), + index=float_frame.index, + columns=float_frame.columns, + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_attach_name_non_reduction_axis1(float_frame): + result = float_frame.apply(lambda x: np.repeat(x.name, len(x)), axis=1) + expected = Series( + np.repeat(t[0], len(float_frame.columns)) for t in float_frame.itertuples() + ) + expected.index = float_frame.index + tm.assert_series_equal(result, expected) + + +def test_apply_multi_index(): + index = MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "d"]]) + s = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["col1", "col2"]) + result = s.apply(lambda x: Series({"min": min(x), "max": max(x)}), 1) + expected = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, columns=["min", "max"]) + tm.assert_frame_equal(result, expected, check_like=True) + + +@pytest.mark.parametrize( + "df, dicts", + [ + [ + DataFrame([["foo", "bar"], ["spam", "eggs"]]), + Series([{0: "foo", 1: "spam"}, {0: "bar", 1: "eggs"}]), + ], + [DataFrame([[0, 1], [2, 3]]), Series([{0: 0, 1: 2}, {0: 1, 1: 3}])], + ], +) +def test_apply_dict(df, dicts): + # GH 8735 + fn = lambda x: x.to_dict() + reduce_true = df.apply(fn, result_type="reduce") + reduce_false = df.apply(fn, result_type="expand") + reduce_none = df.apply(fn) + + tm.assert_series_equal(reduce_true, dicts) + tm.assert_frame_equal(reduce_false, df) + tm.assert_series_equal(reduce_none, dicts) + + +def test_apply_non_numpy_dtype(): + # GH 12244 + df = DataFrame({"dt": date_range("2015-01-01", periods=3, tz="Europe/Brussels")}) + result = df.apply(lambda x: x) + tm.assert_frame_equal(result, df) + + result = df.apply(lambda x: x + pd.Timedelta("1day")) + expected = DataFrame( + {"dt": date_range("2015-01-02", periods=3, tz="Europe/Brussels")} + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_non_numpy_dtype_category(): + df = DataFrame({"dt": ["a", "b", "c", "a"]}, dtype="category") + result = df.apply(lambda x: x) + tm.assert_frame_equal(result, df) + + +def test_apply_dup_names_multi_agg(): + # GH 21063 + df = DataFrame([[0, 1], [2, 3]], columns=["a", "a"]) + expected = DataFrame([[0, 1]], columns=["a", "a"], index=["min"]) + result = df.agg(["min"]) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("op", ["apply", "agg"]) +def test_apply_nested_result_axis_1(op): + # GH 13820 + def apply_list(row): + return [2 * row["A"], 2 * row["C"], 2 * row["B"]] + + df = DataFrame(np.zeros((4, 4)), columns=list("ABCD")) + result = getattr(df, op)(apply_list, axis=1) + expected = Series( + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] + ) + tm.assert_series_equal(result, expected) + + +def test_apply_noreduction_tzaware_object(): + # https://github.com/pandas-dev/pandas/issues/31505 + expected = DataFrame( + {"foo": [Timestamp("2020", tz="UTC")]}, dtype="datetime64[ns, UTC]" + ) + result = expected.apply(lambda x: x) + tm.assert_frame_equal(result, expected) + result = expected.apply(lambda x: x.copy()) + tm.assert_frame_equal(result, expected) + + +def test_apply_function_runs_once(): + # https://github.com/pandas-dev/pandas/issues/30815 + + df = DataFrame({"a": [1, 2, 3]}) + names = [] # Save row names function is applied to + + def reducing_function(row): + names.append(row.name) + + def non_reducing_function(row): + names.append(row.name) + return row + + for func in [reducing_function, non_reducing_function]: + del names[:] + + df.apply(func, axis=1) + assert names == list(df.index) + + +def test_apply_raw_function_runs_once(): + # https://github.com/pandas-dev/pandas/issues/34506 + + df = DataFrame({"a": [1, 2, 3]}) + values = [] # Save row values function is applied to + + def reducing_function(row): + values.extend(row) + + def non_reducing_function(row): + values.extend(row) + return row + + for func in [reducing_function, non_reducing_function]: + del values[:] + + df.apply(func, raw=True, axis=1) + assert values == list(df.a.to_list()) + + +def test_apply_with_byte_string(): + # GH 34529 + df = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"]) + expected = DataFrame(np.array([b"abcd", b"efgh"]), columns=["col"], dtype=object) + # After we make the apply we expect a dataframe just + # like the original but with the object datatype + result = df.apply(lambda x: x.astype("object")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("val", ["asd", 12, None, np.nan]) +def test_apply_category_equalness(val): + # Check if categorical comparisons on apply, GH 21239 + df_values = ["asd", None, 12, "asd", "cde", np.nan] + df = DataFrame({"a": df_values}, dtype="category") + + result = df.a.apply(lambda x: x == val) + expected = Series( + [np.nan if pd.isnull(x) else x == val for x in df_values], name="a" + ) + tm.assert_series_equal(result, expected) + + +# the user has supplied an opaque UDF where +# they are transforming the input that requires +# us to infer the output + + +def test_infer_row_shape(): + # GH 17437 + # if row shape is changing, infer it + df = DataFrame(np.random.default_rng(2).random((10, 2))) + result = df.apply(np.fft.fft, axis=0).shape + assert result == (10, 2) + + result = df.apply(np.fft.rfft, axis=0).shape + assert result == (6, 2) + + +@pytest.mark.parametrize( + "ops, by_row, expected", + [ + ({"a": lambda x: x + 1}, "compat", DataFrame({"a": [2, 3]})), + ({"a": lambda x: x + 1}, False, DataFrame({"a": [2, 3]})), + ({"a": lambda x: x.sum()}, "compat", Series({"a": 3})), + ({"a": lambda x: x.sum()}, False, Series({"a": 3})), + ( + {"a": ["sum", np.sum, lambda x: x.sum()]}, + "compat", + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ( + {"a": ["sum", np.sum, lambda x: x.sum()]}, + False, + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ({"a": lambda x: 1}, "compat", DataFrame({"a": [1, 1]})), + ({"a": lambda x: 1}, False, Series({"a": 1})), + ], +) +def test_dictlike_lambda(ops, by_row, expected): + # GH53601 + df = DataFrame({"a": [1, 2]}) + result = df.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {"a": lambda x: x + 1}, + {"a": lambda x: x.sum()}, + {"a": ["sum", np.sum, lambda x: x.sum()]}, + {"a": lambda x: 1}, + ], +) +def test_dictlike_lambda_raises(ops): + # GH53601 + df = DataFrame({"a": [1, 2]}) + with pytest.raises(ValueError, match="by_row=True not allowed"): + df.apply(ops, by_row=True) + + +def test_with_dictlike_columns(): + # GH 17602 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1) + expected = Series([{"s": 3} for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + df["tm"] = [ + Timestamp("2017-05-01 00:00:00"), + Timestamp("2017-05-02 00:00:00"), + ] + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1) + tm.assert_series_equal(result, expected) + + # compose a series + result = (df["a"] + df["b"]).apply(lambda x: {"s": x}) + expected = Series([{"s": 3}, {"s": 3}]) + tm.assert_series_equal(result, expected) + + +def test_with_dictlike_columns_with_datetime(): + # GH 18775 + df = DataFrame() + df["author"] = ["X", "Y", "Z"] + df["publisher"] = ["BBC", "NBC", "N24"] + df["date"] = pd.to_datetime( + ["17-10-2010 07:15:30", "13-05-2011 08:20:35", "15-01-2013 09:09:09"], + dayfirst=True, + ) + result = df.apply(lambda x: {}, axis=1) + expected = Series([{}, {}, {}]) + tm.assert_series_equal(result, expected) + + +def test_with_dictlike_columns_with_infer(): + # GH 17602 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand") + expected = DataFrame({"s": [3, 3]}) + tm.assert_frame_equal(result, expected) + + df["tm"] = [ + Timestamp("2017-05-01 00:00:00"), + Timestamp("2017-05-02 00:00:00"), + ] + result = df.apply(lambda x: {"s": x["a"] + x["b"]}, axis=1, result_type="expand") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, by_row, expected", + [ + ([lambda x: x + 1], "compat", DataFrame({("a", ""): [2, 3]})), + ([lambda x: x + 1], False, DataFrame({("a", ""): [2, 3]})), + ([lambda x: x.sum()], "compat", DataFrame({"a": [3]}, index=[""])), + ([lambda x: x.sum()], False, DataFrame({"a": [3]}, index=[""])), + ( + ["sum", np.sum, lambda x: x.sum()], + "compat", + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ( + ["sum", np.sum, lambda x: x.sum()], + False, + DataFrame({"a": [3, 3, 3]}, index=["sum", "sum", ""]), + ), + ( + [lambda x: x + 1, lambda x: 3], + "compat", + DataFrame([[2, 3], [3, 3]], columns=[["a", "a"], ["", ""]]), + ), + ( + [lambda x: 2, lambda x: 3], + False, + DataFrame({"a": [2, 3]}, ["", ""]), + ), + ], +) +def test_listlike_lambda(ops, by_row, expected): + # GH53601 + df = DataFrame({"a": [1, 2]}) + result = df.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + [lambda x: x + 1], + [lambda x: x.sum()], + ["sum", np.sum, lambda x: x.sum()], + [lambda x: x + 1, lambda x: 3], + ], +) +def test_listlike_lambda_raises(ops): + # GH53601 + df = DataFrame({"a": [1, 2]}) + with pytest.raises(ValueError, match="by_row=True not allowed"): + df.apply(ops, by_row=True) + + +def test_with_listlike_columns(): + # GH 17348 + df = DataFrame( + { + "a": Series(np.random.default_rng(2).standard_normal(4)), + "b": ["a", "list", "of", "words"], + "ts": date_range("2016-10-01", periods=4, freq="H"), + } + ) + + result = df[["a", "b"]].apply(tuple, axis=1) + expected = Series([t[1:] for t in df[["a", "b"]].itertuples()]) + tm.assert_series_equal(result, expected) + + result = df[["a", "ts"]].apply(tuple, axis=1) + expected = Series([t[1:] for t in df[["a", "ts"]].itertuples()]) + tm.assert_series_equal(result, expected) + + +def test_with_listlike_columns_returning_list(): + # GH 18919 + df = DataFrame({"x": Series([["a", "b"], ["q"]]), "y": Series([["z"], ["q", "t"]])}) + df.index = MultiIndex.from_tuples([("i0", "j0"), ("i1", "j1")]) + + result = df.apply(lambda row: [el for el in row["x"] if el in row["y"]], axis=1) + expected = Series([[], ["q"]], index=df.index) + tm.assert_series_equal(result, expected) + + +def test_infer_output_shape_columns(): + # GH 18573 + + df = DataFrame( + { + "number": [1.0, 2.0], + "string": ["foo", "bar"], + "datetime": [ + Timestamp("2017-11-29 03:30:00"), + Timestamp("2017-11-29 03:45:00"), + ], + } + ) + result = df.apply(lambda row: (row.number, row.string), axis=1) + expected = Series([(t.number, t.string) for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +def test_infer_output_shape_listlike_columns(): + # GH 16353 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 3)), columns=["A", "B", "C"] + ) + + result = df.apply(lambda x: [1, 2, 3], axis=1) + expected = Series([[1, 2, 3] for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + result = df.apply(lambda x: [1, 2], axis=1) + expected = Series([[1, 2] for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("val", [1, 2]) +def test_infer_output_shape_listlike_columns_np_func(val): + # GH 17970 + df = DataFrame({"a": [1, 2, 3]}, index=list("abc")) + + result = df.apply(lambda row: np.ones(val), axis=1) + expected = Series([np.ones(val) for t in df.itertuples()], index=df.index) + tm.assert_series_equal(result, expected) + + +def test_infer_output_shape_listlike_columns_with_timestamp(): + # GH 17892 + df = DataFrame( + { + "a": [ + Timestamp("2010-02-01"), + Timestamp("2010-02-04"), + Timestamp("2010-02-05"), + Timestamp("2010-02-06"), + ], + "b": [9, 5, 4, 3], + "c": [5, 3, 4, 2], + "d": [1, 2, 3, 4], + } + ) + + def fun(x): + return (1, 2) + + result = df.apply(fun, axis=1) + expected = Series([(1, 2) for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("lst", [[1, 2, 3], [1, 2]]) +def test_consistent_coerce_for_shapes(lst): + # we want column names to NOT be propagated + # just because the shape matches the input shape + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"] + ) + + result = df.apply(lambda x: lst, axis=1) + expected = Series([lst for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + +def test_consistent_names(int_frame_const_col): + # if a Series is returned, we should use the resulting index names + df = int_frame_const_col + + result = df.apply( + lambda x: Series([1, 2, 3], index=["test", "other", "cols"]), axis=1 + ) + expected = int_frame_const_col.rename( + columns={"A": "test", "B": "other", "C": "cols"} + ) + tm.assert_frame_equal(result, expected) + + result = df.apply(lambda x: Series([1, 2], index=["test", "other"]), axis=1) + expected = expected[["test", "other"]] + tm.assert_frame_equal(result, expected) + + +def test_result_type(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") + expected = df.copy() + expected.columns = [0, 1, 2] + tm.assert_frame_equal(result, expected) + + +def test_result_type_shorter_list(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + result = df.apply(lambda x: [1, 2], axis=1, result_type="expand") + expected = df[["A", "B"]].copy() + expected.columns = [0, 1] + tm.assert_frame_equal(result, expected) + + +def test_result_type_broadcast(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + # broadcast result + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="broadcast") + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +def test_result_type_broadcast_series_func(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + columns = ["other", "col", "names"] + result = df.apply( + lambda x: Series([1, 2, 3], index=columns), axis=1, result_type="broadcast" + ) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +def test_result_type_series_result(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + # series result + result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +def test_result_type_series_result_other_index(int_frame_const_col): + # result_type should be consistent no matter which + # path we take in the code + df = int_frame_const_col + # series result with other index + columns = ["other", "col", "names"] + result = df.apply(lambda x: Series([1, 2, 3], index=columns), axis=1) + expected = df.copy() + expected.columns = columns + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "box", + [lambda x: list(x), lambda x: tuple(x), lambda x: np.array(x, dtype="int64")], + ids=["list", "tuple", "array"], +) +def test_consistency_for_boxed(box, int_frame_const_col): + # passing an array or list should not affect the output shape + df = int_frame_const_col + + result = df.apply(lambda x: box([1, 2]), axis=1) + expected = Series([box([1, 2]) for t in df.itertuples()]) + tm.assert_series_equal(result, expected) + + result = df.apply(lambda x: box([1, 2]), axis=1, result_type="expand") + expected = int_frame_const_col[["A", "B"]].rename(columns={"A": 0, "B": 1}) + tm.assert_frame_equal(result, expected) + + +def test_agg_transform(axis, float_frame): + other_axis = 1 if axis in {0, "index"} else 0 + + with np.errstate(all="ignore"): + f_abs = np.abs(float_frame) + f_sqrt = np.sqrt(float_frame) + + # ufunc + expected = f_sqrt.copy() + result = float_frame.apply(np.sqrt, axis=axis) + tm.assert_frame_equal(result, expected) + + # list-like + result = float_frame.apply([np.sqrt], axis=axis) + expected = f_sqrt.copy() + if axis in {0, "index"}: + expected.columns = MultiIndex.from_product([float_frame.columns, ["sqrt"]]) + else: + expected.index = MultiIndex.from_product([float_frame.index, ["sqrt"]]) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both + # functions per series and then concatting + result = float_frame.apply([np.abs, np.sqrt], axis=axis) + expected = zip_frames([f_abs, f_sqrt], axis=other_axis) + if axis in {0, "index"}: + expected.columns = MultiIndex.from_product( + [float_frame.columns, ["absolute", "sqrt"]] + ) + else: + expected.index = MultiIndex.from_product( + [float_frame.index, ["absolute", "sqrt"]] + ) + tm.assert_frame_equal(result, expected) + + +def test_demo(): + # demonstration tests + df = DataFrame({"A": range(5), "B": 5}) + + result = df.agg(["min", "max"]) + expected = DataFrame( + {"A": [0, 4], "B": [5, 5]}, columns=["A", "B"], index=["min", "max"] + ) + tm.assert_frame_equal(result, expected) + + +def test_demo_dict_agg(): + # demonstration tests + df = DataFrame({"A": range(5), "B": 5}) + result = df.agg({"A": ["min", "max"], "B": ["sum", "max"]}) + expected = DataFrame( + {"A": [4.0, 0.0, np.nan], "B": [5.0, np.nan, 25.0]}, + columns=["A", "B"], + index=["max", "min", "sum"], + ) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + +def test_agg_with_name_as_column_name(): + # GH 36212 - Column name is "name" + data = {"name": ["foo", "bar"]} + df = DataFrame(data) + + # result's name should be None + result = df.agg({"name": "count"}) + expected = Series({"name": 2}) + tm.assert_series_equal(result, expected) + + # Check if name is still preserved when aggregating series instead + result = df["name"].agg({"name": "count"}) + expected = Series({"name": 2}, name="name") + tm.assert_series_equal(result, expected) + + +def test_agg_multiple_mixed(): + # GH 20909 + mdf = DataFrame( + { + "A": [1, 2, 3], + "B": [1.0, 2.0, 3.0], + "C": ["foo", "bar", "baz"], + } + ) + expected = DataFrame( + { + "A": [1, 6], + "B": [1.0, 6.0], + "C": ["bar", "foobarbaz"], + }, + index=["min", "sum"], + ) + # sorted index + result = mdf.agg(["min", "sum"]) + tm.assert_frame_equal(result, expected) + + result = mdf[["C", "B", "A"]].agg(["sum", "min"]) + # GH40420: the result of .agg should have an index that is sorted + # according to the arguments provided to agg. + expected = expected[["C", "B", "A"]].reindex(["sum", "min"]) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_mixed_raises(): + # GH 20909 + mdf = DataFrame( + { + "A": [1, 2, 3], + "B": [1.0, 2.0, 3.0], + "C": ["foo", "bar", "baz"], + "D": date_range("20130101", periods=3), + } + ) + + # sorted index + msg = "does not support reduction" + with pytest.raises(TypeError, match=msg): + mdf.agg(["min", "sum"]) + + with pytest.raises(TypeError, match=msg): + mdf[["D", "C", "B", "A"]].agg(["sum", "min"]) + + +def test_agg_reduce(axis, float_frame): + other_axis = 1 if axis in {0, "index"} else 0 + name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values() + + # all reducers + expected = pd.concat( + [ + float_frame.mean(axis=axis), + float_frame.max(axis=axis), + float_frame.sum(axis=axis), + ], + axis=1, + ) + expected.columns = ["mean", "max", "sum"] + expected = expected.T if axis in {0, "index"} else expected + + result = float_frame.agg(["mean", "max", "sum"], axis=axis) + tm.assert_frame_equal(result, expected) + + # dict input with scalars + func = {name1: "mean", name2: "sum"} + result = float_frame.agg(func, axis=axis) + expected = Series( + [ + float_frame.loc(other_axis)[name1].mean(), + float_frame.loc(other_axis)[name2].sum(), + ], + index=[name1, name2], + ) + tm.assert_series_equal(result, expected) + + # dict input with lists + func = {name1: ["mean"], name2: ["sum"]} + result = float_frame.agg(func, axis=axis) + expected = DataFrame( + { + name1: Series([float_frame.loc(other_axis)[name1].mean()], index=["mean"]), + name2: Series([float_frame.loc(other_axis)[name2].sum()], index=["sum"]), + } + ) + expected = expected.T if axis in {1, "columns"} else expected + tm.assert_frame_equal(result, expected) + + # dict input with lists with multiple + func = {name1: ["mean", "sum"], name2: ["sum", "max"]} + result = float_frame.agg(func, axis=axis) + expected = pd.concat( + { + name1: Series( + [ + float_frame.loc(other_axis)[name1].mean(), + float_frame.loc(other_axis)[name1].sum(), + ], + index=["mean", "sum"], + ), + name2: Series( + [ + float_frame.loc(other_axis)[name2].sum(), + float_frame.loc(other_axis)[name2].max(), + ], + index=["sum", "max"], + ), + }, + axis=1, + ) + expected = expected.T if axis in {1, "columns"} else expected + tm.assert_frame_equal(result, expected) + + +def test_nuiscance_columns(): + # GH 15015 + df = DataFrame( + { + "A": [1, 2, 3], + "B": [1.0, 2.0, 3.0], + "C": ["foo", "bar", "baz"], + "D": date_range("20130101", periods=3), + } + ) + + result = df.agg("min") + expected = Series([1, 1.0, "bar", Timestamp("20130101")], index=df.columns) + tm.assert_series_equal(result, expected) + + result = df.agg(["min"]) + expected = DataFrame( + [[1, 1.0, "bar", Timestamp("20130101")]], + index=["min"], + columns=df.columns, + ) + tm.assert_frame_equal(result, expected) + + msg = "does not support reduction" + with pytest.raises(TypeError, match=msg): + df.agg("sum") + + result = df[["A", "B", "C"]].agg("sum") + expected = Series([6, 6.0, "foobarbaz"], index=["A", "B", "C"]) + tm.assert_series_equal(result, expected) + + msg = "does not support reduction" + with pytest.raises(TypeError, match=msg): + df.agg(["sum"]) + + +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_non_callable_aggregates(how): + # GH 16405 + # 'size' is a property of frame/series + # validate that this is working + # GH 39116 - expand to apply + df = DataFrame( + {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]} + ) + + # Function aggregate + result = getattr(df, how)({"A": "count"}) + expected = Series({"A": 2}) + + tm.assert_series_equal(result, expected) + + # Non-function aggregate + result = getattr(df, how)({"A": "size"}) + expected = Series({"A": 3}) + + tm.assert_series_equal(result, expected) + + # Mix function and non-function aggs + result1 = getattr(df, how)(["count", "size"]) + result2 = getattr(df, how)( + {"A": ["count", "size"], "B": ["count", "size"], "C": ["count", "size"]} + ) + expected = DataFrame( + { + "A": {"count": 2, "size": 3}, + "B": {"count": 2, "size": 3}, + "C": {"count": 2, "size": 3}, + } + ) + + tm.assert_frame_equal(result1, result2, check_like=True) + tm.assert_frame_equal(result2, expected, check_like=True) + + # Just functional string arg is same as calling df.arg() + result = getattr(df, how)("count") + expected = df.count() + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_size_as_str(how, axis): + # GH 39934 + df = DataFrame( + {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]} + ) + # Just a string attribute arg same as calling df.arg + # on the columns + result = getattr(df, how)("size", axis=axis) + if axis in (0, "index"): + expected = Series(df.shape[0], index=df.columns) + else: + expected = Series(df.shape[1], index=df.index) + tm.assert_series_equal(result, expected) + + +def test_agg_listlike_result(): + # GH-29587 user defined function returning list-likes + df = DataFrame({"A": [2, 2, 3], "B": [1.5, np.nan, 1.5], "C": ["foo", None, "bar"]}) + + def func(group_col): + return list(group_col.dropna().unique()) + + result = df.agg(func) + expected = Series([[2, 3], [1.5], ["foo", "bar"]], index=["A", "B", "C"]) + tm.assert_series_equal(result, expected) + + result = df.agg([func]) + expected = expected.to_frame("func").T + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize( + "args, kwargs", + [ + ((1, 2, 3), {}), + ((8, 7, 15), {}), + ((1, 2), {}), + ((1,), {"b": 2}), + ((), {"a": 1, "b": 2}), + ((), {"a": 2, "b": 1}), + ((), {"a": 1, "b": 2, "c": 3}), + ], +) +def test_agg_args_kwargs(axis, args, kwargs): + def f(x, a, b, c=3): + return x.sum() + (a + b) / c + + df = DataFrame([[1, 2], [3, 4]]) + + if axis == 0: + expected = Series([5.0, 7.0]) + else: + expected = Series([4.0, 8.0]) + + result = df.agg(f, axis, *args, **kwargs) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("num_cols", [2, 3, 5]) +def test_frequency_is_original(num_cols): + # GH 22150 + index = pd.DatetimeIndex(["1950-06-30", "1952-10-24", "1953-05-29"]) + original = index.copy() + df = DataFrame(1, index=index, columns=range(num_cols)) + df.apply(lambda x: x) + assert index.freq == original.freq + + +def test_apply_datetime_tz_issue(): + # GH 29052 + + timestamps = [ + Timestamp("2019-03-15 12:34:31.909000+0000", tz="UTC"), + Timestamp("2019-03-15 12:34:34.359000+0000", tz="UTC"), + Timestamp("2019-03-15 12:34:34.660000+0000", tz="UTC"), + ] + df = DataFrame(data=[0, 1, 2], index=timestamps) + result = df.apply(lambda x: x.name, axis=1) + expected = Series(index=timestamps, data=timestamps) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("df", [DataFrame({"A": ["a", None], "B": ["c", "d"]})]) +@pytest.mark.parametrize("method", ["min", "max", "sum"]) +def test_mixed_column_raises(df, method): + # GH 16832 + if method == "sum": + msg = r'can only concatenate str \(not "int"\) to str' + else: + msg = "not supported between instances of 'str' and 'float'" + with pytest.raises(TypeError, match=msg): + getattr(df, method)() + + +@pytest.mark.parametrize("col", [1, 1.0, True, "a", np.nan]) +def test_apply_dtype(col): + # GH 31466 + df = DataFrame([[1.0, col]], columns=["a", "b"]) + result = df.apply(lambda x: x.dtype) + expected = df.dtypes + + tm.assert_series_equal(result, expected) + + +def test_apply_mutating(using_array_manager, using_copy_on_write): + # GH#35462 case where applied func pins a new BlockManager to a row + df = DataFrame({"a": range(100), "b": range(100, 200)}) + df_orig = df.copy() + + def func(row): + mgr = row._mgr + row.loc["a"] += 1 + assert row._mgr is not mgr + return row + + expected = df.copy() + expected["a"] += 1 + + result = df.apply(func, axis=1) + + tm.assert_frame_equal(result, expected) + if using_copy_on_write or using_array_manager: + # INFO(CoW) With copy on write, mutating a viewing row doesn't mutate the parent + # INFO(ArrayManager) With BlockManager, the row is a view and mutated in place, + # with ArrayManager the row is not a view, and thus not mutated in place + tm.assert_frame_equal(df, df_orig) + else: + tm.assert_frame_equal(df, result) + + +def test_apply_empty_list_reduce(): + # GH#35683 get columns correct + df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"]) + + result = df.apply(lambda x: [], result_type="reduce") + expected = Series({"a": [], "b": []}, dtype=object) + tm.assert_series_equal(result, expected) + + +def test_apply_no_suffix_index(): + # GH36189 + pdf = DataFrame([[4, 9]] * 3, columns=["A", "B"]) + result = pdf.apply(["sum", lambda x: x.sum(), lambda x: x.sum()]) + expected = DataFrame( + {"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "", ""] + ) + + tm.assert_frame_equal(result, expected) + + +def test_apply_raw_returns_string(): + # https://github.com/pandas-dev/pandas/issues/35940 + df = DataFrame({"A": ["aa", "bbb"]}) + result = df.apply(lambda x: x[0], axis=1, raw=True) + expected = Series(["aa", "bbb"]) + tm.assert_series_equal(result, expected) + + +def test_aggregation_func_column_order(): + # GH40420: the result of .agg should have an index that is sorted + # according to the arguments provided to agg. + df = DataFrame( + [ + (1, 0, 0), + (2, 0, 0), + (3, 0, 0), + (4, 5, 4), + (5, 6, 6), + (6, 7, 7), + ], + columns=("att1", "att2", "att3"), + ) + + def sum_div2(s): + return s.sum() / 2 + + aggs = ["sum", sum_div2, "count", "min"] + result = df.agg(aggs) + expected = DataFrame( + { + "att1": [21.0, 10.5, 6.0, 1.0], + "att2": [18.0, 9.0, 6.0, 0.0], + "att3": [17.0, 8.5, 6.0, 0.0], + }, + index=["sum", "sum_div2", "count", "min"], + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_getitem_axis_1(): + # GH 13427 + df = DataFrame({"a": [0, 1, 2], "b": [1, 2, 3]}) + result = df[["a", "a"]].apply(lambda x: x.iloc[0] + x.iloc[1], axis=1) + expected = Series([0, 2, 4]) + tm.assert_series_equal(result, expected) + + +def test_nuisance_depr_passes_through_warnings(): + # GH 43740 + # DataFrame.agg with list-likes may emit warnings for both individual + # args and for entire columns, but we only want to emit once. We + # catch and suppress the warnings for individual args, but need to make + # sure if some other warnings were raised, they get passed through to + # the user. + + def expected_warning(x): + warnings.warn("Hello, World!") + return x.sum() + + df = DataFrame({"a": [1, 2, 3]}) + with tm.assert_produces_warning(UserWarning, match="Hello, World!"): + df.agg([expected_warning]) + + +def test_apply_type(): + # GH 46719 + df = DataFrame( + {"col1": [3, "string", float], "col2": [0.25, datetime(2020, 1, 1), np.nan]}, + index=["a", "b", "c"], + ) + + # axis=0 + result = df.apply(type, axis=0) + expected = Series({"col1": Series, "col2": Series}) + tm.assert_series_equal(result, expected) + + # axis=1 + result = df.apply(type, axis=1) + expected = Series({"a": Series, "b": Series, "c": Series}) + tm.assert_series_equal(result, expected) + + +def test_apply_on_empty_dataframe(): + # GH 39111 + df = DataFrame({"a": [1, 2], "b": [3, 0]}) + result = df.head(0).apply(lambda x: max(x["a"], x["b"]), axis=1) + expected = Series([], dtype=np.float64) + tm.assert_series_equal(result, expected) + + +def test_apply_return_list(): + df = DataFrame({"a": [1, 2], "b": [2, 3]}) + result = df.apply(lambda x: [x.values]) + expected = DataFrame({"a": [[1, 2]], "b": [[2, 3]]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "test, constant", + [ + ({"a": [1, 2, 3], "b": [1, 1, 1]}, {"a": [1, 2, 3], "b": [1]}), + ({"a": [2, 2, 2], "b": [1, 1, 1]}, {"a": [2], "b": [1]}), + ], +) +def test_unique_agg_type_is_series(test, constant): + # GH#22558 + df1 = DataFrame(test) + expected = Series(data=constant, index=["a", "b"], dtype="object") + aggregation = {"a": "unique", "b": "unique"} + + result = df1.agg(aggregation) + + tm.assert_series_equal(result, expected) + + +def test_any_apply_keyword_non_zero_axis_regression(): + # https://github.com/pandas-dev/pandas/issues/48656 + df = DataFrame({"A": [1, 2, 0], "B": [0, 2, 0], "C": [0, 0, 0]}) + expected = Series([True, True, False]) + tm.assert_series_equal(df.any(axis=1), expected) + + result = df.apply("any", axis=1) + tm.assert_series_equal(result, expected) + + result = df.apply("any", 1) + tm.assert_series_equal(result, expected) + + +def test_agg_mapping_func_deprecated(): + # GH 53325 + df = DataFrame({"x": [1, 2, 3]}) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + # single func already takes the vectorized path + result = df.agg(foo1, 0, 3, c=4) + expected = df + 7 + tm.assert_frame_equal(result, expected) + + msg = "using .+ in Series.agg cannot aggregate and" + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg([foo1, foo2], 0, 3, c=4) + expected = DataFrame( + [[8, 8], [9, 9], [10, 10]], columns=[["x", "x"], ["foo1", "foo2"]] + ) + tm.assert_frame_equal(result, expected) + + # TODO: the result below is wrong, should be fixed (GH53325) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg({"x": foo1}, 0, 3, c=4) + expected = DataFrame([2, 3, 4], columns=["x"]) + tm.assert_frame_equal(result, expected) + + +def test_agg_std(): + df = DataFrame(np.arange(6).reshape(3, 2), columns=["A", "B"]) + + with tm.assert_produces_warning(FutureWarning, match="using DataFrame.std"): + result = df.agg(np.std) + expected = Series({"A": 2.0, "B": 2.0}, dtype=float) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match="using Series.std"): + result = df.agg([np.std]) + expected = DataFrame({"A": 2.0, "B": 2.0}, index=["std"]) + tm.assert_frame_equal(result, expected) + + +def test_agg_dist_like_and_nonunique_columns(): + # GH#51099 + df = DataFrame( + {"A": [None, 2, 3], "B": [1.0, np.nan, 3.0], "C": ["foo", None, "bar"]} + ) + df.columns = ["A", "A", "C"] + + result = df.agg({"A": "count"}) + expected = df["A"].count() + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py new file mode 100644 index 00000000..723bdd34 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_apply_relabeling.py @@ -0,0 +1,113 @@ +import numpy as np +import pytest + +from pandas.compat.numpy import np_version_gte1p25 + +import pandas as pd +import pandas._testing as tm + + +def test_agg_relabel(): + # GH 26513 + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + + # simplest case with one column, one func + result = df.agg(foo=("B", "sum")) + expected = pd.DataFrame({"B": [10]}, index=pd.Index(["foo"])) + tm.assert_frame_equal(result, expected) + + # test on same column with different methods + result = df.agg(foo=("B", "sum"), bar=("B", "min")) + expected = pd.DataFrame({"B": [10, 1]}, index=pd.Index(["foo", "bar"])) + + tm.assert_frame_equal(result, expected) + + +def test_agg_relabel_multi_columns_multi_methods(): + # GH 26513, test on multiple columns with multiple methods + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + result = df.agg( + foo=("A", "sum"), + bar=("B", "mean"), + cat=("A", "min"), + dat=("B", "max"), + f=("A", "max"), + g=("C", "min"), + ) + expected = pd.DataFrame( + { + "A": [6.0, np.nan, 1.0, np.nan, 2.0, np.nan], + "B": [np.nan, 2.5, np.nan, 4.0, np.nan, np.nan], + "C": [np.nan, np.nan, np.nan, np.nan, np.nan, 3.0], + }, + index=pd.Index(["foo", "bar", "cat", "dat", "f", "g"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.xfail(np_version_gte1p25, reason="name of min now equals name of np.min") +def test_agg_relabel_partial_functions(): + # GH 26513, test on partial, functools or more complex cases + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + msg = "using Series.[mean|min]" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg(foo=("A", np.mean), bar=("A", "mean"), cat=("A", min)) + expected = pd.DataFrame( + {"A": [1.5, 1.5, 1.0]}, index=pd.Index(["foo", "bar", "cat"]) + ) + tm.assert_frame_equal(result, expected) + + msg = "using Series.[mean|min|max|sum]" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.agg( + foo=("A", min), + bar=("A", np.min), + cat=("B", max), + dat=("C", "min"), + f=("B", np.sum), + kk=("B", lambda x: min(x)), + ) + expected = pd.DataFrame( + { + "A": [1.0, 1.0, np.nan, np.nan, np.nan, np.nan], + "B": [np.nan, np.nan, 4.0, np.nan, 10.0, 1.0], + "C": [np.nan, np.nan, np.nan, 3.0, np.nan, np.nan], + }, + index=pd.Index(["foo", "bar", "cat", "dat", "f", "kk"]), + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_namedtuple(): + # GH 26513 + df = pd.DataFrame({"A": [0, 1], "B": [1, 2]}) + result = df.agg( + foo=pd.NamedAgg("B", "sum"), + bar=pd.NamedAgg("B", "min"), + cat=pd.NamedAgg(column="B", aggfunc="count"), + fft=pd.NamedAgg("B", aggfunc="max"), + ) + + expected = pd.DataFrame( + {"B": [3, 1, 2, 2]}, index=pd.Index(["foo", "bar", "cat", "fft"]) + ) + tm.assert_frame_equal(result, expected) + + result = df.agg( + foo=pd.NamedAgg("A", "min"), + bar=pd.NamedAgg(column="B", aggfunc="max"), + cat=pd.NamedAgg(column="A", aggfunc="max"), + ) + expected = pd.DataFrame( + {"A": [0.0, np.nan, 1.0], "B": [np.nan, 2.0, np.nan]}, + index=pd.Index(["foo", "bar", "cat"]), + ) + tm.assert_frame_equal(result, expected) + + +def test_reconstruct_func(): + # GH 28472, test to ensure reconstruct_func isn't moved; + # This method is used by other libraries (e.g. dask) + result = pd.core.apply.reconstruct_func("min") + expected = (False, "min", None, None) + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_transform.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_transform.py new file mode 100644 index 00000000..2d575158 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_frame_transform.py @@ -0,0 +1,264 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.tests.apply.common import frame_transform_kernels +from pandas.tests.frame.common import zip_frames + + +def unpack_obj(obj, klass, axis): + """ + Helper to ensure we have the right type of object for a test parametrized + over frame_or_series. + """ + if klass is not DataFrame: + obj = obj["A"] + if axis != 0: + pytest.skip(f"Test is only for DataFrame with axis={axis}") + return obj + + +def test_transform_ufunc(axis, float_frame, frame_or_series): + # GH 35964 + obj = unpack_obj(float_frame, frame_or_series, axis) + + with np.errstate(all="ignore"): + f_sqrt = np.sqrt(obj) + + # ufunc + result = obj.transform(np.sqrt, axis=axis) + expected = f_sqrt + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sqrt], ["sqrt"]), + ([np.abs, np.sqrt], ["absolute", "sqrt"]), + (np.array([np.sqrt]), ["sqrt"]), + (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]), + ], +) +def test_transform_listlike(axis, float_frame, ops, names): + # GH 35964 + other_axis = 1 if axis in {0, "index"} else 0 + with np.errstate(all="ignore"): + expected = zip_frames([op(float_frame) for op in ops], axis=other_axis) + if axis in {0, "index"}: + expected.columns = MultiIndex.from_product([float_frame.columns, names]) + else: + expected.index = MultiIndex.from_product([float_frame.index, names]) + result = float_frame.transform(ops, axis=axis) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("ops", [[], np.array([])]) +def test_transform_empty_listlike(float_frame, ops, frame_or_series): + obj = unpack_obj(float_frame, frame_or_series, 0) + + with pytest.raises(ValueError, match="No transform functions were provided"): + obj.transform(ops) + + +def test_transform_listlike_func_with_args(): + # GH 50624 + df = DataFrame({"x": [1, 2, 3]}) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + msg = r"foo1\(\) got an unexpected keyword argument 'b'" + with pytest.raises(TypeError, match=msg): + df.transform([foo1, foo2], 0, 3, b=3, c=4) + + result = df.transform([foo1, foo2], 0, 3, c=4) + expected = DataFrame( + [[8, 8], [9, 9], [10, 10]], + columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("box", [dict, Series]) +def test_transform_dictlike(axis, float_frame, box): + # GH 35964 + if axis in (0, "index"): + e = float_frame.columns[0] + expected = float_frame[[e]].transform(np.abs) + else: + e = float_frame.index[0] + expected = float_frame.iloc[[0]].transform(np.abs) + result = float_frame.transform(box({e: np.abs}), axis=axis) + tm.assert_frame_equal(result, expected) + + +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = DataFrame({"a": [1, 2], "b": [1, 4], "c": [1, 4]}) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {}, + {"A": []}, + {"A": [], "B": "cumsum"}, + {"A": "cumsum", "B": []}, + {"A": [], "B": ["cumsum"]}, + {"A": ["cumsum"], "B": []}, + ], +) +def test_transform_empty_dictlike(float_frame, ops, frame_or_series): + obj = unpack_obj(float_frame, frame_or_series, 0) + + with pytest.raises(ValueError, match="No transform functions were provided"): + obj.transform(ops) + + +@pytest.mark.parametrize("use_apply", [True, False]) +def test_transform_udf(axis, float_frame, use_apply, frame_or_series): + # GH 35964 + obj = unpack_obj(float_frame, frame_or_series, axis) + + # transform uses UDF either via apply or passing the entire DataFrame + def func(x): + # transform is using apply iff x is not a DataFrame + if use_apply == isinstance(x, frame_or_series): + # Force transform to fallback + raise ValueError + return x + 1 + + result = obj.transform(func, axis=axis) + expected = obj + 1 + tm.assert_equal(result, expected) + + +wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"] +frame_kernels_raise = [x for x in frame_transform_kernels if x not in wont_fail] + + +@pytest.mark.parametrize("op", [*frame_kernels_raise, lambda x: x + 1]) +def test_transform_bad_dtype(op, frame_or_series, request): + # GH 35964 + if op == "ngroup": + request.node.add_marker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + + obj = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms + obj = tm.get_obj(obj, frame_or_series) + error = TypeError + msg = "|".join( + [ + "not supported between instances of 'type' and 'type'", + "unsupported operand type", + ] + ) + + with pytest.raises(error, match=msg): + obj.transform(op) + with pytest.raises(error, match=msg): + obj.transform([op]) + with pytest.raises(error, match=msg): + obj.transform({"A": op}) + with pytest.raises(error, match=msg): + obj.transform({"A": [op]}) + + +@pytest.mark.parametrize("op", frame_kernels_raise) +def test_transform_failure_typeerror(request, op): + # GH 35964 + + if op == "ngroup": + request.node.add_marker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + + # Using object makes most transform kernels fail + df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]}) + error = TypeError + msg = "|".join( + [ + "not supported between instances of 'type' and 'type'", + "unsupported operand type", + ] + ) + + with pytest.raises(error, match=msg): + df.transform([op]) + + with pytest.raises(error, match=msg): + df.transform({"A": op, "B": op}) + + with pytest.raises(error, match=msg): + df.transform({"A": [op], "B": [op]}) + + with pytest.raises(error, match=msg): + df.transform({"A": [op, "shift"], "B": [op]}) + + +def test_transform_failure_valueerror(): + # GH 40211 + def op(x): + if np.sum(np.sum(x)) < 10: + raise ValueError + return x + + df = DataFrame({"A": [1, 2, 3], "B": [400, 500, 600]}) + msg = "Transform function failed" + + with pytest.raises(ValueError, match=msg): + df.transform([op]) + + with pytest.raises(ValueError, match=msg): + df.transform({"A": op, "B": op}) + + with pytest.raises(ValueError, match=msg): + df.transform({"A": [op], "B": [op]}) + + with pytest.raises(ValueError, match=msg): + df.transform({"A": [op, "shift"], "B": [op]}) + + +@pytest.mark.parametrize("use_apply", [True, False]) +def test_transform_passes_args(use_apply, frame_or_series): + # GH 35964 + # transform uses UDF either via apply or passing the entire DataFrame + expected_args = [1, 2] + expected_kwargs = {"c": 3} + + def f(x, a, b, c): + # transform is using apply iff x is not a DataFrame + if use_apply == isinstance(x, frame_or_series): + # Force transform to fallback + raise ValueError + assert [a, b] == expected_args + assert c == expected_kwargs["c"] + return x + + frame_or_series([1]).transform(f, 0, *expected_args, **expected_kwargs) + + +def test_transform_empty_dataframe(): + # https://github.com/pandas-dev/pandas/issues/39636 + df = DataFrame([], columns=["col1", "col2"]) + result = df.transform(lambda x: x + 10) + tm.assert_frame_equal(result, df) + + result = df["col1"].transform(lambda x: x + 10) + tm.assert_series_equal(result, df["col1"]) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_invalid_arg.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_invalid_arg.py new file mode 100644 index 00000000..a3d9de5e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_invalid_arg.py @@ -0,0 +1,352 @@ +# Tests specifically aimed at detecting bad arguments. +# This file is organized by reason for exception. +# 1. always invalid argument values +# 2. missing column(s) +# 3. incompatible ops/dtype/args/kwargs +# 4. invalid result shape/type +# If your test does not fit into one of these categories, add to this list. + +from itertools import chain +import re + +import numpy as np +import pytest + +from pandas.errors import SpecificationError + +from pandas import ( + DataFrame, + Series, + date_range, + notna, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("result_type", ["foo", 1]) +def test_result_type_error(result_type, int_frame_const_col): + # allowed result_type + df = int_frame_const_col + + msg = ( + "invalid value for result_type, must be one of " + "{None, 'reduce', 'broadcast', 'expand'}" + ) + with pytest.raises(ValueError, match=msg): + df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type) + + +def test_apply_invalid_axis_value(): + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"]) + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.apply(lambda x: x, 2) + + +def test_agg_raises(): + # GH 26513 + df = DataFrame({"A": [0, 1], "B": [1, 2]}) + msg = "Must provide" + + with pytest.raises(TypeError, match=msg): + df.agg() + + +def test_map_with_invalid_na_action_raises(): + # https://github.com/pandas-dev/pandas/issues/32815 + s = Series([1, 2, 3]) + msg = "na_action must either be 'ignore' or None" + with pytest.raises(ValueError, match=msg): + s.map(lambda x: x, na_action="____") + + +@pytest.mark.parametrize("input_na_action", ["____", True]) +def test_map_arg_is_dict_with_invalid_na_action_raises(input_na_action): + # https://github.com/pandas-dev/pandas/issues/46588 + s = Series([1, 2, 3]) + msg = f"na_action must either be 'ignore' or None, {input_na_action} was passed" + with pytest.raises(ValueError, match=msg): + s.map({1: 2}, na_action=input_na_action) + + +@pytest.mark.parametrize("method", ["apply", "agg", "transform"]) +@pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}]) +def test_nested_renamer(frame_or_series, method, func): + # GH 35964 + obj = frame_or_series({"A": [1]}) + match = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=match): + getattr(obj, method)(func) + + +@pytest.mark.parametrize( + "renamer", + [{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}], +) +def test_series_nested_renamer(renamer): + s = Series(range(6), dtype="int64", name="series") + msg = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + s.agg(renamer) + + +def test_apply_dict_depr(): + tsdf = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + columns=["A", "B", "C"], + index=date_range("1/1/2000", periods=10), + ) + msg = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + tsdf.A.agg({"foo": ["sum", "mean"]}) + + +@pytest.mark.parametrize("method", ["agg", "transform"]) +def test_dict_nested_renaming_depr(method): + df = DataFrame({"A": range(5), "B": 5}) + + # nested renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}}) + + +@pytest.mark.parametrize("method", ["apply", "agg", "transform"]) +@pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}]) +def test_missing_column(method, func): + # GH 40004 + obj = DataFrame({"A": [1]}) + match = re.escape("Column(s) ['B'] do not exist") + with pytest.raises(KeyError, match=match): + getattr(obj, method)(func) + + +def test_transform_mixed_column_name_dtypes(): + # GH39025 + df = DataFrame({"a": ["1"]}) + msg = r"Column\(s\) \[1, 'b'\] do not exist" + with pytest.raises(KeyError, match=msg): + df.transform({"a": int, 1: str, "b": int}) + + +@pytest.mark.parametrize( + "how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)] +) +def test_apply_str_axis_1_raises(how, args): + # GH 39211 - some ops don't support axis=1 + df = DataFrame({"a": [1, 2], "b": [3, 4]}) + msg = f"Operation {how} does not support axis=1" + with pytest.raises(ValueError, match=msg): + df.apply(how, axis=1, args=args) + + +def test_transform_axis_1_raises(): + # GH 35964 + msg = "No axis named 1 for object type Series" + with pytest.raises(ValueError, match=msg): + Series([1]).transform("sum", axis=1) + + +def test_apply_modify_traceback(): + data = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + data.loc[4, "C"] = np.nan + + def transform(row): + if row["C"].startswith("shin") and row["A"] == "foo": + row["D"] = 7 + return row + + def transform2(row): + if notna(row["C"]) and row["C"].startswith("shin") and row["A"] == "foo": + row["D"] = 7 + return row + + msg = "'float' object has no attribute 'startswith'" + with pytest.raises(AttributeError, match=msg): + data.apply(transform, axis=1) + + +@pytest.mark.parametrize( + "df, func, expected", + tm.get_cython_table_params( + DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]] + ), +) +def test_agg_cython_table_raises_frame(df, func, expected, axis): + # GH 21224 + msg = "can't multiply sequence by non-int of type 'str'" + warn = None if isinstance(func, str) else FutureWarning + with pytest.raises(expected, match=msg): + with tm.assert_produces_warning(warn, match="using DataFrame.cumprod"): + df.agg(func, axis=axis) + + +@pytest.mark.parametrize( + "series, func, expected", + chain( + tm.get_cython_table_params( + Series("a b c".split()), + [ + ("mean", TypeError), # mean raises TypeError + ("prod", TypeError), + ("std", TypeError), + ("var", TypeError), + ("median", TypeError), + ("cumprod", TypeError), + ], + ) + ), +) +def test_agg_cython_table_raises_series(series, func, expected): + # GH21224 + msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type" + if func == "median" or func is np.nanmedian or func is np.median: + msg = r"Cannot convert \['a' 'b' 'c'\] to numeric" + warn = None if isinstance(func, str) else FutureWarning + + with pytest.raises(expected, match=msg): + # e.g. Series('a b'.split()).cumprod() will raise + with tm.assert_produces_warning(warn, match="is currently using Series.*"): + series.agg(func) + + +def test_agg_none_to_type(): + # GH 40543 + df = DataFrame({"a": [None]}) + msg = re.escape("int() argument must be a string") + with pytest.raises(TypeError, match=msg): + df.agg({"a": lambda x: int(x.iloc[0])}) + + +def test_transform_none_to_type(): + # GH#34377 + df = DataFrame({"a": [None]}) + msg = "argument must be a" + with pytest.raises(TypeError, match=msg): + df.transform({"a": lambda x: int(x.iloc[0])}) + + +@pytest.mark.parametrize( + "func", + [ + lambda x: np.array([1, 2]).reshape(-1, 2), + lambda x: [1, 2], + lambda x: Series([1, 2]), + ], +) +def test_apply_broadcast_error(int_frame_const_col, func): + df = int_frame_const_col + + # > 1 ndim + msg = "too many dims to broadcast|cannot broadcast result" + with pytest.raises(ValueError, match=msg): + df.apply(func, axis=1, result_type="broadcast") + + +def test_transform_and_agg_err_agg(axis, float_frame): + # cannot both transform and agg + msg = "cannot combine transform and aggregation operations" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + float_frame.agg(["max", "sqrt"], axis=axis) + + +@pytest.mark.filterwarnings("ignore::FutureWarning") # GH53325 +@pytest.mark.parametrize( + "func, msg", + [ + (["sqrt", "max"], "cannot combine transform and aggregation"), + ( + {"foo": np.sqrt, "bar": "sum"}, + "cannot perform both aggregation and transformation", + ), + ], +) +def test_transform_and_agg_err_series(string_series, func, msg): + # we are trying to transform with an aggregator + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + string_series.agg(func) + + +@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]]) +def test_transform_wont_agg_frame(axis, float_frame, func): + # GH 35964 + # cannot both transform and agg + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + float_frame.transform(func, axis=axis) + + +@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]]) +def test_transform_wont_agg_series(string_series, func): + # GH 35964 + # we are trying to transform with an aggregator + msg = "Function did not transform" + + warn = RuntimeWarning if func[0] == "sqrt" else None + warn_msg = "invalid value encountered in sqrt" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(warn, match=warn_msg, check_stacklevel=False): + string_series.transform(func) + + +@pytest.mark.parametrize( + "op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}] +) +def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper): + # GH 35964 + op = op_wrapper(all_reductions) + + obj = DataFrame({"A": [1, 2, 3]}) + obj = tm.get_obj(obj, frame_or_series) + + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + obj.transform(op) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply.py new file mode 100644 index 00000000..aeb6a01e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply.py @@ -0,0 +1,689 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + concat, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.apply.common import series_transform_kernels + + +@pytest.fixture(params=[False, "compat"]) +def by_row(request): + return request.param + + +def test_series_map_box_timedelta(by_row): + # GH#11349 + ser = Series(timedelta_range("1 day 1 s", periods=3, freq="h")) + + def f(x): + return x.total_seconds() if by_row else x.dt.total_seconds() + + result = ser.apply(f, by_row=by_row) + + expected = ser.map(lambda x: x.total_seconds()) + tm.assert_series_equal(result, expected) + + expected = Series([86401.0, 90001.0, 93601.0]) + tm.assert_series_equal(result, expected) + + +def test_apply(datetime_series, by_row): + result = datetime_series.apply(np.sqrt, by_row=by_row) + with np.errstate(all="ignore"): + expected = np.sqrt(datetime_series) + tm.assert_series_equal(result, expected) + + # element-wise apply (ufunc) + result = datetime_series.apply(np.exp, by_row=by_row) + expected = np.exp(datetime_series) + tm.assert_series_equal(result, expected) + + # empty series + s = Series(dtype=object, name="foo", index=Index([], name="bar")) + rs = s.apply(lambda x: x, by_row=by_row) + tm.assert_series_equal(s, rs) + + # check all metadata (GH 9322) + assert s is not rs + assert s.index is rs.index + assert s.dtype == rs.dtype + assert s.name == rs.name + + # index but no data + s = Series(index=[1, 2, 3], dtype=np.float64) + rs = s.apply(lambda x: x, by_row=by_row) + tm.assert_series_equal(s, rs) + + +def test_apply_map_same_length_inference_bug(): + s = Series([1, 2]) + + def f(x): + return (x, x + 1) + + result = s.apply(f, by_row="compat") + expected = s.map(f) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("convert_dtype", [True, False]) +def test_apply_convert_dtype_deprecated(convert_dtype): + ser = Series(np.random.default_rng(2).standard_normal(10)) + + def func(x): + return x if x > 0 else np.nan + + with tm.assert_produces_warning(FutureWarning): + ser.apply(func, convert_dtype=convert_dtype, by_row="compat") + + +def test_apply_args(): + s = Series(["foo,bar"]) + + result = s.apply(str.split, args=(",",)) + assert result[0] == ["foo", "bar"] + assert isinstance(result[0], list) + + +@pytest.mark.parametrize( + "args, kwargs, increment", + [((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)], +) +def test_agg_args(args, kwargs, increment): + # GH 43357 + def f(x, a=0, b=0, c=0): + return x + a + 10 * b + 100 * c + + s = Series([1, 2]) + msg = ( + "in Series.agg cannot aggregate and has been deprecated. " + "Use Series.transform to keep behavior unchanged." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.agg(f, 0, *args, **kwargs) + expected = s + increment + tm.assert_series_equal(result, expected) + + +def test_agg_mapping_func_deprecated(): + # GH 53325 + s = Series([1, 2, 3]) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + msg = "using .+ in Series.agg cannot aggregate and" + with tm.assert_produces_warning(FutureWarning, match=msg): + s.agg(foo1, 0, 3, c=4) + with tm.assert_produces_warning(FutureWarning, match=msg): + s.agg([foo1, foo2], 0, 3, c=4) + with tm.assert_produces_warning(FutureWarning, match=msg): + s.agg({"a": foo1, "b": foo2}, 0, 3, c=4) + + +def test_series_apply_map_box_timestamps(by_row): + # GH#2689, GH#2627 + ser = Series(pd.date_range("1/1/2000", periods=10)) + + def func(x): + return (x.hour, x.day, x.month) + + if not by_row: + msg = "Series' object has no attribute 'hour'" + with pytest.raises(AttributeError, match=msg): + ser.apply(func, by_row=by_row) + return + + result = ser.apply(func, by_row=by_row) + expected = ser.map(func) + tm.assert_series_equal(result, expected) + + +def test_apply_box(): + # ufunc will not be boxed. Same test cases as the test_map_box + vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")] + s = Series(vals) + assert s.dtype == "datetime64[ns]" + # boxed value must be Timestamp instance + res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat") + exp = Series(["Timestamp_1_None", "Timestamp_2_None"]) + tm.assert_series_equal(res, exp) + + vals = [ + pd.Timestamp("2011-01-01", tz="US/Eastern"), + pd.Timestamp("2011-01-02", tz="US/Eastern"), + ] + s = Series(vals) + assert s.dtype == "datetime64[ns, US/Eastern]" + res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}", by_row="compat") + exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"]) + tm.assert_series_equal(res, exp) + + # timedelta + vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")] + s = Series(vals) + assert s.dtype == "timedelta64[ns]" + res = s.apply(lambda x: f"{type(x).__name__}_{x.days}", by_row="compat") + exp = Series(["Timedelta_1", "Timedelta_2"]) + tm.assert_series_equal(res, exp) + + # period + vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")] + s = Series(vals) + assert s.dtype == "Period[M]" + res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}", by_row="compat") + exp = Series(["Period_M", "Period_M"]) + tm.assert_series_equal(res, exp) + + +def test_apply_datetimetz(by_row): + values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize( + "Asia/Tokyo" + ) + s = Series(values, name="XX") + + result = s.apply(lambda x: x + pd.offsets.Day(), by_row=by_row) + exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize( + "Asia/Tokyo" + ) + exp = Series(exp_values, name="XX") + tm.assert_series_equal(result, exp) + + result = s.apply(lambda x: x.hour if by_row else x.dt.hour, by_row=by_row) + exp = Series(list(range(24)) + [0], name="XX", dtype="int64" if by_row else "int32") + tm.assert_series_equal(result, exp) + + # not vectorized + def f(x): + return str(x.tz) if by_row else str(x.dt.tz) + + result = s.apply(f, by_row=by_row) + if by_row: + exp = Series(["Asia/Tokyo"] * 25, name="XX") + tm.assert_series_equal(result, exp) + else: + result == "Asia/Tokyo" + + +def test_apply_categorical(by_row): + values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True) + ser = Series(values, name="XX", index=list("abcdefg")) + + if not by_row: + msg = "Series' object has no attribute 'lower" + with pytest.raises(AttributeError, match=msg): + ser.apply(lambda x: x.lower(), by_row=by_row) + assert ser.apply(lambda x: "A", by_row=by_row) == "A" + return + + result = ser.apply(lambda x: x.lower(), by_row=by_row) + + # should be categorical dtype when the number of categories are + # the same + values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True) + exp = Series(values, name="XX", index=list("abcdefg")) + tm.assert_series_equal(result, exp) + tm.assert_categorical_equal(result.values, exp.values) + + result = ser.apply(lambda x: "A") + exp = Series(["A"] * 7, name="XX", index=list("abcdefg")) + tm.assert_series_equal(result, exp) + assert result.dtype == object + + +@pytest.mark.parametrize("series", [["1-1", "1-1", np.nan], ["1-1", "1-2", np.nan]]) +def test_apply_categorical_with_nan_values(series, by_row): + # GH 20714 bug fixed in: GH 24275 + s = Series(series, dtype="category") + if not by_row: + msg = "'Series' object has no attribute 'split'" + with pytest.raises(AttributeError, match=msg): + s.apply(lambda x: x.split("-")[0], by_row=by_row) + return + + result = s.apply(lambda x: x.split("-")[0], by_row=by_row) + result = result.astype(object) + expected = Series(["1", "1", np.nan], dtype="category") + expected = expected.astype(object) + tm.assert_series_equal(result, expected) + + +def test_apply_empty_integer_series_with_datetime_index(by_row): + # GH 21245 + s = Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int) + result = s.apply(lambda x: x, by_row=by_row) + tm.assert_series_equal(result, s) + + +def test_apply_dataframe_iloc(): + uintDF = DataFrame(np.uint64([1, 2, 3, 4, 5]), columns=["Numbers"]) + indexDF = DataFrame([2, 3, 2, 1, 2], columns=["Indices"]) + + def retrieve(targetRow, targetDF): + val = targetDF["Numbers"].iloc[targetRow] + return val + + result = indexDF["Indices"].apply(retrieve, args=(uintDF,)) + expected = Series([3, 4, 3, 2, 3], name="Indices", dtype="uint64") + tm.assert_series_equal(result, expected) + + +def test_transform(string_series, by_row): + # transforming functions + + with np.errstate(all="ignore"): + f_sqrt = np.sqrt(string_series) + f_abs = np.abs(string_series) + + # ufunc + result = string_series.apply(np.sqrt, by_row=by_row) + expected = f_sqrt.copy() + tm.assert_series_equal(result, expected) + + # list-like + result = string_series.apply([np.sqrt], by_row=by_row) + expected = f_sqrt.to_frame().copy() + expected.columns = ["sqrt"] + tm.assert_frame_equal(result, expected) + + result = string_series.apply(["sqrt"], by_row=by_row) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both functions per + # series and then concatting + expected = concat([f_sqrt, f_abs], axis=1) + expected.columns = ["sqrt", "absolute"] + result = string_series.apply([np.sqrt, np.abs], by_row=by_row) + tm.assert_frame_equal(result, expected) + + # dict, provide renaming + expected = concat([f_sqrt, f_abs], axis=1) + expected.columns = ["foo", "bar"] + expected = expected.unstack().rename("series") + + result = string_series.apply({"foo": np.sqrt, "bar": np.abs}, by_row=by_row) + tm.assert_series_equal(result.reindex_like(expected), expected) + + +@pytest.mark.parametrize("op", series_transform_kernels) +def test_transform_partial_failure(op, request): + # GH 35964 + if op in ("ffill", "bfill", "pad", "backfill", "shift"): + request.node.add_marker( + pytest.mark.xfail(reason=f"{op} is successful on any dtype") + ) + + # Using object makes most transform kernels fail + ser = Series(3 * [object]) + + if op in ("fillna", "ngroup"): + error = ValueError + msg = "Transform function failed" + else: + error = TypeError + msg = "|".join( + [ + "not supported between instances of 'type' and 'type'", + "unsupported operand type", + ] + ) + + with pytest.raises(error, match=msg): + ser.transform([op, "shift"]) + + with pytest.raises(error, match=msg): + ser.transform({"A": op, "B": "shift"}) + + with pytest.raises(error, match=msg): + ser.transform({"A": [op], "B": ["shift"]}) + + with pytest.raises(error, match=msg): + ser.transform({"A": [op, "shift"], "B": [op]}) + + +def test_transform_partial_failure_valueerror(): + # GH 40211 + def noop(x): + return x + + def raising_op(_): + raise ValueError + + ser = Series(3 * [object]) + msg = "Transform function failed" + + with pytest.raises(ValueError, match=msg): + ser.transform([noop, raising_op]) + + with pytest.raises(ValueError, match=msg): + ser.transform({"A": raising_op, "B": noop}) + + with pytest.raises(ValueError, match=msg): + ser.transform({"A": [raising_op], "B": [noop]}) + + with pytest.raises(ValueError, match=msg): + ser.transform({"A": [noop, raising_op], "B": [noop]}) + + +def test_demo(): + # demonstration tests + s = Series(range(6), dtype="int64", name="series") + + result = s.agg(["min", "max"]) + expected = Series([0, 5], index=["min", "max"], name="series") + tm.assert_series_equal(result, expected) + + result = s.agg({"foo": "min"}) + expected = Series([0], index=["foo"], name="series") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", [str, lambda x: str(x)]) +def test_apply_map_evaluate_lambdas_the_same(string_series, func, by_row): + # test that we are evaluating row-by-row first if by_row="compat" + # else vectorized evaluation + result = string_series.apply(func, by_row=by_row) + + if by_row: + expected = string_series.map(func) + tm.assert_series_equal(result, expected) + else: + assert result == str(string_series) + + +def test_agg_evaluate_lambdas(string_series): + # GH53325 + # in the future, the result will be a Series class. + + with tm.assert_produces_warning(FutureWarning): + result = string_series.agg(lambda x: type(x)) + assert isinstance(result, Series) and len(result) == len(string_series) + + with tm.assert_produces_warning(FutureWarning): + result = string_series.agg(type) + assert isinstance(result, Series) and len(result) == len(string_series) + + +@pytest.mark.parametrize("op_name", ["agg", "apply"]) +def test_with_nested_series(datetime_series, op_name): + # GH 2316 + # .agg with a reducer and a transform, what to do + msg = "cannot aggregate" + warning = FutureWarning if op_name == "agg" else None + with tm.assert_produces_warning(warning, match=msg): + # GH52123 + result = getattr(datetime_series, op_name)( + lambda x: Series([x, x**2], index=["x", "x^2"]) + ) + expected = DataFrame({"x": datetime_series, "x^2": datetime_series**2}) + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = datetime_series.agg(lambda x: Series([x, x**2], index=["x", "x^2"])) + tm.assert_frame_equal(result, expected) + + +def test_replicate_describe(string_series): + # this also tests a result set that is all scalars + expected = string_series.describe() + result = string_series.apply( + { + "count": "count", + "mean": "mean", + "std": "std", + "min": "min", + "25%": lambda x: x.quantile(0.25), + "50%": "median", + "75%": lambda x: x.quantile(0.75), + "max": "max", + }, + ) + tm.assert_series_equal(result, expected) + + +def test_reduce(string_series): + # reductions with named functions + result = string_series.agg(["sum", "mean"]) + expected = Series( + [string_series.sum(), string_series.mean()], + ["sum", "mean"], + name=string_series.name, + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "how, kwds", + [("agg", {}), ("apply", {"by_row": "compat"}), ("apply", {"by_row": False})], +) +def test_non_callable_aggregates(how, kwds): + # test agg using non-callable series attributes + # GH 39116 - expand to apply + s = Series([1, 2, None]) + + # Calling agg w/ just a string arg same as calling s.arg + result = getattr(s, how)("size", **kwds) + expected = s.size + assert result == expected + + # test when mixed w/ callable reducers + result = getattr(s, how)(["size", "count", "mean"], **kwds) + expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5}) + tm.assert_series_equal(result, expected) + + result = getattr(s, how)({"size": "size", "count": "count", "mean": "mean"}, **kwds) + tm.assert_series_equal(result, expected) + + +def test_series_apply_no_suffix_index(by_row): + # GH36189 + s = Series([4] * 3) + result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()], by_row=by_row) + expected = Series([12, 12, 12], index=["sum", "", ""]) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dti,exp", + [ + ( + Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])), + DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"), + ), + ( + tm.makeTimeSeries(nper=30), + DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"), + ), + ], +) +@pytest.mark.parametrize("aware", [True, False]) +def test_apply_series_on_date_time_index_aware_series(dti, exp, aware): + # GH 25959 + # Calling apply on a localized time series should not cause an error + if aware: + index = dti.tz_localize("UTC").index + else: + index = dti.index + result = Series(index).apply(lambda x: Series([1, 2])) + tm.assert_frame_equal(result, exp) + + +@pytest.mark.parametrize( + "by_row, expected", [("compat", Series(np.ones(30), dtype="int64")), (False, 1)] +) +def test_apply_scalar_on_date_time_index_aware_series(by_row, expected): + # GH 25959 + # Calling apply on a localized time series should not cause an error + series = tm.makeTimeSeries(nper=30).tz_localize("UTC") + result = Series(series.index).apply(lambda x: 1, by_row=by_row) + tm.assert_equal(result, expected) + + +def test_apply_to_timedelta(by_row): + list_of_valid_strings = ["00:00:01", "00:00:02"] + a = pd.to_timedelta(list_of_valid_strings) + b = Series(list_of_valid_strings).apply(pd.to_timedelta, by_row=by_row) + tm.assert_series_equal(Series(a), b) + + list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT] + + a = pd.to_timedelta(list_of_strings) + ser = Series(list_of_strings) + b = ser.apply(pd.to_timedelta, by_row=by_row) + tm.assert_series_equal(Series(a), b) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sum], ["sum"]), + ([np.sum, np.mean], ["sum", "mean"]), + (np.array([np.sum]), ["sum"]), + (np.array([np.sum, np.mean]), ["sum", "mean"]), + ], +) +@pytest.mark.parametrize( + "how, kwargs", + [["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]], +) +def test_apply_listlike_reducer(string_series, ops, names, how, kwargs): + # GH 39140 + expected = Series({name: op(string_series) for name, op in zip(names, ops)}) + expected.name = "series" + warn = FutureWarning if how == "agg" else None + msg = f"using Series.[{'|'.join(names)}]" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(string_series, how)(ops, **kwargs) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {"A": np.sum}, + {"A": np.sum, "B": np.mean}, + Series({"A": np.sum}), + Series({"A": np.sum, "B": np.mean}), + ], +) +@pytest.mark.parametrize( + "how, kwargs", + [["agg", {}], ["apply", {"by_row": "compat"}], ["apply", {"by_row": False}]], +) +def test_apply_dictlike_reducer(string_series, ops, how, kwargs, by_row): + # GH 39140 + expected = Series({name: op(string_series) for name, op in ops.items()}) + expected.name = string_series.name + warn = FutureWarning if how == "agg" else None + msg = "using Series.[sum|mean]" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(string_series, how)(ops, **kwargs) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sqrt], ["sqrt"]), + ([np.abs, np.sqrt], ["absolute", "sqrt"]), + (np.array([np.sqrt]), ["sqrt"]), + (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]), + ], +) +def test_apply_listlike_transformer(string_series, ops, names, by_row): + # GH 39140 + with np.errstate(all="ignore"): + expected = concat([op(string_series) for op in ops], axis=1) + expected.columns = names + result = string_series.apply(ops, by_row=by_row) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, expected", + [ + ([lambda x: x], DataFrame({"": [1, 2, 3]})), + ([lambda x: x.sum()], Series([6], index=[""])), + ], +) +def test_apply_listlike_lambda(ops, expected, by_row): + # GH53400 + ser = Series([1, 2, 3]) + result = ser.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "ops", + [ + {"A": np.sqrt}, + {"A": np.sqrt, "B": np.exp}, + Series({"A": np.sqrt}), + Series({"A": np.sqrt, "B": np.exp}), + ], +) +def test_apply_dictlike_transformer(string_series, ops, by_row): + # GH 39140 + with np.errstate(all="ignore"): + expected = concat({name: op(string_series) for name, op in ops.items()}) + expected.name = string_series.name + result = string_series.apply(ops, by_row=by_row) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, expected", + [ + ( + {"a": lambda x: x}, + Series([1, 2, 3], index=MultiIndex.from_arrays([["a"] * 3, range(3)])), + ), + ({"a": lambda x: x.sum()}, Series([6], index=["a"])), + ], +) +def test_apply_dictlike_lambda(ops, by_row, expected): + # GH53400 + ser = Series([1, 2, 3]) + result = ser.apply(ops, by_row=by_row) + tm.assert_equal(result, expected) + + +def test_apply_retains_column_name(by_row): + # GH 16380 + df = DataFrame({"x": range(3)}, Index(range(3), name="x")) + result = df.x.apply(lambda x: Series(range(x + 1), Index(range(x + 1), name="y"))) + expected = DataFrame( + [[0.0, np.nan, np.nan], [0.0, 1.0, np.nan], [0.0, 1.0, 2.0]], + columns=Index(range(3), name="y"), + index=Index(range(3), name="x"), + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_type(): + # GH 46719 + s = Series([3, "string", float], index=["a", "b", "c"]) + result = s.apply(type) + expected = Series([int, str, type], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + +def test_series_apply_unpack_nested_data(): + # GH#55189 + ser = Series([[1, 2, 3], [4, 5, 6, 7]]) + result = ser.apply(lambda x: Series(x)) + expected = DataFrame({0: [1.0, 4.0], 1: [2.0, 5.0], 2: [3.0, 6.0], 3: [np.nan, 7]}) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply_relabeling.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply_relabeling.py new file mode 100644 index 00000000..cdfa054f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_apply_relabeling.py @@ -0,0 +1,39 @@ +import pandas as pd +import pandas._testing as tm + + +def test_relabel_no_duplicated_method(): + # this is to test there is no duplicated method used in agg + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]}) + + result = df["A"].agg(foo="sum") + expected = df["A"].agg({"foo": "sum"}) + tm.assert_series_equal(result, expected) + + result = df["B"].agg(foo="min", bar="max") + expected = df["B"].agg({"foo": "min", "bar": "max"}) + tm.assert_series_equal(result, expected) + + msg = "using Series.[sum|min|max]" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df["B"].agg(foo=sum, bar=min, cat="max") + msg = "using Series.[sum|min|max]" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df["B"].agg({"foo": sum, "bar": min, "cat": "max"}) + tm.assert_series_equal(result, expected) + + +def test_relabel_duplicated_method(): + # this is to test with nested renaming, duplicated method can be used + # if they are assigned with different new names + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]}) + + result = df["A"].agg(foo="sum", bar="sum") + expected = pd.Series([6, 6], index=["foo", "bar"], name="A") + tm.assert_series_equal(result, expected) + + msg = "using Series.min" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df["B"].agg(foo=min, bar="min") + expected = pd.Series([1, 1], index=["foo", "bar"], name="B") + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_transform.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_transform.py new file mode 100644 index 00000000..82592c47 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_series_transform.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, + concat, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "args, kwargs, increment", + [((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)], +) +def test_agg_args(args, kwargs, increment): + # GH 43357 + def f(x, a=0, b=0, c=0): + return x + a + 10 * b + 100 * c + + s = Series([1, 2]) + result = s.transform(f, 0, *args, **kwargs) + expected = s + increment + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ops, names", + [ + ([np.sqrt], ["sqrt"]), + ([np.abs, np.sqrt], ["absolute", "sqrt"]), + (np.array([np.sqrt]), ["sqrt"]), + (np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]), + ], +) +def test_transform_listlike(string_series, ops, names): + # GH 35964 + with np.errstate(all="ignore"): + expected = concat([op(string_series) for op in ops], axis=1) + expected.columns = names + result = string_series.transform(ops) + tm.assert_frame_equal(result, expected) + + +def test_transform_listlike_func_with_args(): + # GH 50624 + + s = Series([1, 2, 3]) + + def foo1(x, a=1, c=0): + return x + a + c + + def foo2(x, b=2, c=0): + return x + b + c + + msg = r"foo1\(\) got an unexpected keyword argument 'b'" + with pytest.raises(TypeError, match=msg): + s.transform([foo1, foo2], 0, 3, b=3, c=4) + + result = s.transform([foo1, foo2], 0, 3, c=4) + expected = DataFrame({"foo1": [8, 9, 10], "foo2": [8, 9, 10]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("box", [dict, Series]) +def test_transform_dictlike(string_series, box): + # GH 35964 + with np.errstate(all="ignore"): + expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1) + expected.columns = ["foo", "bar"] + result = string_series.transform(box({"foo": np.sqrt, "bar": np.abs})) + tm.assert_frame_equal(result, expected) + + +def test_transform_dictlike_mixed(): + # GH 40018 - mix of lists and non-lists in values of a dictionary + df = Series([1, 4]) + result = df.transform({"b": ["sqrt", "abs"], "c": "sqrt"}) + expected = DataFrame( + [[1.0, 1, 1.0], [2.0, 4, 2.0]], + columns=MultiIndex([("b", "c"), ("sqrt", "abs")], [(0, 0, 1), (0, 1, 0)]), + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_str.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_str.py new file mode 100644 index 00000000..363d0285 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/apply/test_str.py @@ -0,0 +1,314 @@ +from itertools import chain +import operator + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_number + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.tests.apply.common import ( + frame_transform_kernels, + series_transform_kernels, +) + + +@pytest.mark.parametrize("func", ["sum", "mean", "min", "max", "std"]) +@pytest.mark.parametrize( + "args,kwds", + [ + pytest.param([], {}, id="no_args_or_kwds"), + pytest.param([1], {}, id="axis_from_args"), + pytest.param([], {"axis": 1}, id="axis_from_kwds"), + pytest.param([], {"numeric_only": True}, id="optional_kwds"), + pytest.param([1, True], {"numeric_only": True}, id="args_and_kwds"), + ], +) +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_apply_with_string_funcs(request, float_frame, func, args, kwds, how): + if len(args) > 1 and how == "agg": + request.node.add_marker( + pytest.mark.xfail( + raises=TypeError, + reason="agg/apply signature mismatch - agg passes 2nd " + "argument to func", + ) + ) + result = getattr(float_frame, how)(func, *args, **kwds) + expected = getattr(float_frame, func)(*args, **kwds) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("arg", ["sum", "mean", "min", "max", "std"]) +def test_with_string_args(datetime_series, arg): + result = datetime_series.apply(arg) + expected = getattr(datetime_series, arg)() + assert result == expected + + +@pytest.mark.parametrize("op", ["mean", "median", "std", "var"]) +@pytest.mark.parametrize("how", ["agg", "apply"]) +def test_apply_np_reducer(op, how): + # GH 39116 + float_frame = DataFrame({"a": [1, 2], "b": [3, 4]}) + result = getattr(float_frame, how)(op) + # pandas ddof defaults to 1, numpy to 0 + kwargs = {"ddof": 1} if op in ("std", "var") else {} + expected = Series( + getattr(np, op)(float_frame, axis=0, **kwargs), index=float_frame.columns + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "op", ["abs", "ceil", "cos", "cumsum", "exp", "log", "sqrt", "square"] +) +@pytest.mark.parametrize("how", ["transform", "apply"]) +def test_apply_np_transformer(float_frame, op, how): + # GH 39116 + + # float_frame will _usually_ have negative values, which will + # trigger the warning here, but let's put one in just to be sure + float_frame.iloc[0, 0] = -1.0 + warn = None + if op in ["log", "sqrt"]: + warn = RuntimeWarning + + with tm.assert_produces_warning(warn, check_stacklevel=False): + # float_frame fixture is defined in conftest.py, so we don't check the + # stacklevel as otherwise the test would fail. + result = getattr(float_frame, how)(op) + expected = getattr(np, op)(float_frame) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "series, func, expected", + chain( + tm.get_cython_table_params( + Series(dtype=np.float64), + [ + ("sum", 0), + ("max", np.nan), + ("min", np.nan), + ("all", True), + ("any", False), + ("mean", np.nan), + ("prod", 1), + ("std", np.nan), + ("var", np.nan), + ("median", np.nan), + ], + ), + tm.get_cython_table_params( + Series([np.nan, 1, 2, 3]), + [ + ("sum", 6), + ("max", 3), + ("min", 1), + ("all", True), + ("any", True), + ("mean", 2), + ("prod", 6), + ("std", 1), + ("var", 1), + ("median", 2), + ], + ), + tm.get_cython_table_params( + Series("a b c".split()), + [ + ("sum", "abc"), + ("max", "c"), + ("min", "a"), + ("all", True), + ("any", True), + ], + ), + ), +) +def test_agg_cython_table_series(series, func, expected): + # GH21224 + # test reducing functions in + # pandas.core.base.SelectionMixin._cython_table + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using Series.*"): + result = series.agg(func) + if is_number(expected): + assert np.isclose(result, expected, equal_nan=True) + else: + assert result == expected + + +@pytest.mark.parametrize( + "series, func, expected", + chain( + tm.get_cython_table_params( + Series(dtype=np.float64), + [ + ("cumprod", Series([], dtype=np.float64)), + ("cumsum", Series([], dtype=np.float64)), + ], + ), + tm.get_cython_table_params( + Series([np.nan, 1, 2, 3]), + [ + ("cumprod", Series([np.nan, 1, 2, 6])), + ("cumsum", Series([np.nan, 1, 3, 6])), + ], + ), + tm.get_cython_table_params( + Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))] + ), + ), +) +def test_agg_cython_table_transform_series(series, func, expected): + # GH21224 + # test transforming functions in + # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum) + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using Series.*"): + result = series.agg(func) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "df, func, expected", + chain( + tm.get_cython_table_params( + DataFrame(), + [ + ("sum", Series(dtype="float64")), + ("max", Series(dtype="float64")), + ("min", Series(dtype="float64")), + ("all", Series(dtype=bool)), + ("any", Series(dtype=bool)), + ("mean", Series(dtype="float64")), + ("prod", Series(dtype="float64")), + ("std", Series(dtype="float64")), + ("var", Series(dtype="float64")), + ("median", Series(dtype="float64")), + ], + ), + tm.get_cython_table_params( + DataFrame([[np.nan, 1], [1, 2]]), + [ + ("sum", Series([1.0, 3])), + ("max", Series([1.0, 2])), + ("min", Series([1.0, 1])), + ("all", Series([True, True])), + ("any", Series([True, True])), + ("mean", Series([1, 1.5])), + ("prod", Series([1.0, 2])), + ("std", Series([np.nan, 0.707107])), + ("var", Series([np.nan, 0.5])), + ("median", Series([1, 1.5])), + ], + ), + ), +) +def test_agg_cython_table_frame(df, func, expected, axis): + # GH 21224 + # test reducing functions in + # pandas.core.base.SelectionMixin._cython_table + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"): + # GH#53425 + result = df.agg(func, axis=axis) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "df, func, expected", + chain( + tm.get_cython_table_params( + DataFrame(), [("cumprod", DataFrame()), ("cumsum", DataFrame())] + ), + tm.get_cython_table_params( + DataFrame([[np.nan, 1], [1, 2]]), + [ + ("cumprod", DataFrame([[np.nan, 1], [1, 2]])), + ("cumsum", DataFrame([[np.nan, 1], [1, 3]])), + ], + ), + ), +) +def test_agg_cython_table_transform_frame(df, func, expected, axis): + # GH 21224 + # test transforming functions in + # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum) + if axis in ("columns", 1): + # operating blockwise doesn't let us preserve dtypes + expected = expected.astype("float64") + + warn = None if isinstance(func, str) else FutureWarning + with tm.assert_produces_warning(warn, match="is currently using DataFrame.*"): + # GH#53425 + result = df.agg(func, axis=axis) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("op", series_transform_kernels) +def test_transform_groupby_kernel_series(request, string_series, op): + # GH 35964 + if op == "ngroup": + request.node.add_marker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + args = [0.0] if op == "fillna" else [] + ones = np.ones(string_series.shape[0]) + expected = string_series.groupby(ones).transform(op, *args) + result = string_series.transform(op, 0, *args) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("op", frame_transform_kernels) +def test_transform_groupby_kernel_frame(request, axis, float_frame, op): + if op == "ngroup": + request.node.add_marker( + pytest.mark.xfail(raises=ValueError, reason="ngroup not valid for NDFrame") + ) + + # GH 35964 + + args = [0.0] if op == "fillna" else [] + if axis in (0, "index"): + ones = np.ones(float_frame.shape[0]) + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + else: + ones = np.ones(float_frame.shape[1]) + msg = "DataFrame.groupby with axis=1 is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = float_frame.groupby(ones, axis=axis) + expected = gb.transform(op, *args) + result = float_frame.transform(op, axis, *args) + tm.assert_frame_equal(result, expected) + + # same thing, but ensuring we have multiple blocks + assert "E" not in float_frame.columns + float_frame["E"] = float_frame["A"].copy() + assert len(float_frame._mgr.arrays) > 1 + + if axis in (0, "index"): + ones = np.ones(float_frame.shape[0]) + else: + ones = np.ones(float_frame.shape[1]) + with tm.assert_produces_warning(FutureWarning, match=msg): + gb2 = float_frame.groupby(ones, axis=axis) + expected2 = gb2.transform(op, *args) + result2 = float_frame.transform(op, axis, *args) + tm.assert_frame_equal(result2, expected2) + + +@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"]) +def test_transform_method_name(method): + # GH 19760 + df = DataFrame({"A": [-1, 2]}) + result = df.transform(method) + expected = operator.methodcaller(method)(df) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/common.py new file mode 100644 index 00000000..b608df15 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/common.py @@ -0,0 +1,155 @@ +""" +Assertion helpers for arithmetic tests. +""" +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + array, +) +import pandas._testing as tm +from pandas.core.arrays import ( + BooleanArray, + NumpyExtensionArray, +) + + +def assert_cannot_add(left, right, msg="cannot add"): + """ + Helper to assert that left and right cannot be added. + + Parameters + ---------- + left : object + right : object + msg : str, default "cannot add" + """ + with pytest.raises(TypeError, match=msg): + left + right + with pytest.raises(TypeError, match=msg): + right + left + + +def assert_invalid_addsub_type(left, right, msg=None): + """ + Helper to assert that left and right can be neither added nor subtracted. + + Parameters + ---------- + left : object + right : object + msg : str or None, default None + """ + with pytest.raises(TypeError, match=msg): + left + right + with pytest.raises(TypeError, match=msg): + right + left + with pytest.raises(TypeError, match=msg): + left - right + with pytest.raises(TypeError, match=msg): + right - left + + +def get_upcast_box(left, right, is_cmp: bool = False): + """ + Get the box to use for 'expected' in an arithmetic or comparison operation. + + Parameters + left : Any + right : Any + is_cmp : bool, default False + Whether the operation is a comparison method. + """ + + if isinstance(left, DataFrame) or isinstance(right, DataFrame): + return DataFrame + if isinstance(left, Series) or isinstance(right, Series): + if is_cmp and isinstance(left, Index): + # Index does not defer for comparisons + return np.array + return Series + if isinstance(left, Index) or isinstance(right, Index): + if is_cmp: + return np.array + return Index + return tm.to_array + + +def assert_invalid_comparison(left, right, box): + """ + Assert that comparison operations with mismatched types behave correctly. + + Parameters + ---------- + left : np.ndarray, ExtensionArray, Index, or Series + right : object + box : {pd.DataFrame, pd.Series, pd.Index, pd.array, tm.to_array} + """ + # Not for tznaive-tzaware comparison + + # Note: not quite the same as how we do this for tm.box_expected + xbox = box if box not in [Index, array] else np.array + + def xbox2(x): + # Eventually we'd like this to be tighter, but for now we'll + # just exclude NumpyExtensionArray[bool] + if isinstance(x, NumpyExtensionArray): + return x._ndarray + if isinstance(x, BooleanArray): + # NB: we are assuming no pd.NAs for now + return x.astype(bool) + return x + + # rev_box: box to use for reversed comparisons + rev_box = xbox + if isinstance(right, Index) and isinstance(left, Series): + rev_box = np.array + + result = xbox2(left == right) + expected = xbox(np.zeros(result.shape, dtype=np.bool_)) + + tm.assert_equal(result, expected) + + result = xbox2(right == left) + tm.assert_equal(result, rev_box(expected)) + + result = xbox2(left != right) + tm.assert_equal(result, ~expected) + + result = xbox2(right != left) + tm.assert_equal(result, rev_box(~expected)) + + msg = "|".join( + [ + "Invalid comparison between", + "Cannot compare type", + "not supported between", + "invalid type promotion", + ( + # GH#36706 npdev 1.20.0 2020-09-28 + r"The DTypes and " + r" do not have a common DType. " + "For example they cannot be stored in a single array unless the " + "dtype is `object`." + ), + ] + ) + with pytest.raises(TypeError, match=msg): + left < right + with pytest.raises(TypeError, match=msg): + left <= right + with pytest.raises(TypeError, match=msg): + left > right + with pytest.raises(TypeError, match=msg): + left >= right + with pytest.raises(TypeError, match=msg): + right < left + with pytest.raises(TypeError, match=msg): + right <= left + with pytest.raises(TypeError, match=msg): + right > left + with pytest.raises(TypeError, match=msg): + right >= left diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/conftest.py new file mode 100644 index 00000000..7dd51692 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/conftest.py @@ -0,0 +1,228 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + RangeIndex, +) +import pandas._testing as tm +from pandas.core.computation import expressions as expr + + +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) +def switch_numexpr_min_elements(request): + _MIN_ELEMENTS = expr._MIN_ELEMENTS + expr._MIN_ELEMENTS = request.param + yield request.param + expr._MIN_ELEMENTS = _MIN_ELEMENTS + + +# ------------------------------------------------------------------ + + +# doctest with +SKIP for one fixture fails during setup with +# 'DoctestItem' object has no attribute 'callspec' +# due to switch_numexpr_min_elements fixture +@pytest.fixture(params=[1, np.array(1, dtype=np.int64)]) +def one(request): + """ + Several variants of integer value 1. The zero-dim integer array + behaves like an integer. + + This fixture can be used to check that datetimelike indexes handle + addition and subtraction of integers and zero-dimensional arrays + of integers. + + Examples + -------- + dti = pd.date_range('2016-01-01', periods=2, freq='H') + dti + DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'], + dtype='datetime64[ns]', freq='H') + dti + one + DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'], + dtype='datetime64[ns]', freq='H') + """ + return request.param + + +zeros = [ + box_cls([0] * 5, dtype=dtype) + for box_cls in [Index, np.array, pd.array] + for dtype in [np.int64, np.uint64, np.float64] +] +zeros.extend([box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [Index, np.array]]) +zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]]) +zeros.extend([np.array(-0.0, dtype=np.float64)]) +zeros.extend([0, 0.0, -0.0]) + + +# doctest with +SKIP for zero fixture fails during setup with +# 'DoctestItem' object has no attribute 'callspec' +# due to switch_numexpr_min_elements fixture +@pytest.fixture(params=zeros) +def zero(request): + """ + Several types of scalar zeros and length 5 vectors of zeros. + + This fixture can be used to check that numeric-dtype indexes handle + division by any zero numeric-dtype. + + Uses vector of length 5 for broadcasting with `numeric_idx` fixture, + which creates numeric-dtype vectors also of length 5. + + Examples + -------- + arr = RangeIndex(5) + arr / zeros + Index([nan, inf, inf, inf, inf], dtype='float64') + """ + return request.param + + +# ------------------------------------------------------------------ +# Vector Fixtures + + +@pytest.fixture( + params=[ + # TODO: add more dtypes here + Index(np.arange(5, dtype="float64")), + Index(np.arange(5, dtype="int64")), + Index(np.arange(5, dtype="uint64")), + RangeIndex(5), + ], + ids=lambda x: type(x).__name__, +) +def numeric_idx(request): + """ + Several types of numeric-dtypes Index objects + """ + return request.param + + +# ------------------------------------------------------------------ +# Scalar Fixtures + + +@pytest.fixture( + params=[ + pd.Timedelta("10m7s").to_pytimedelta(), + pd.Timedelta("10m7s"), + pd.Timedelta("10m7s").to_timedelta64(), + ], + ids=lambda x: type(x).__name__, +) +def scalar_td(request): + """ + Several variants of Timedelta scalars representing 10 minutes and 7 seconds. + """ + return request.param + + +@pytest.fixture( + params=[ + pd.offsets.Day(3), + pd.offsets.Hour(72), + pd.Timedelta(days=3).to_pytimedelta(), + pd.Timedelta("72:00:00"), + np.timedelta64(3, "D"), + np.timedelta64(72, "h"), + ], + ids=lambda x: type(x).__name__, +) +def three_days(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 3-day timedelta + """ + return request.param + + +@pytest.fixture( + params=[ + pd.offsets.Hour(2), + pd.offsets.Minute(120), + pd.Timedelta(hours=2).to_pytimedelta(), + pd.Timedelta(seconds=2 * 3600), + np.timedelta64(2, "h"), + np.timedelta64(120, "m"), + ], + ids=lambda x: type(x).__name__, +) +def two_hours(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 2-hour timedelta + """ + return request.param + + +_common_mismatch = [ + pd.offsets.YearBegin(2), + pd.offsets.MonthBegin(1), + pd.offsets.Minute(), +] + + +@pytest.fixture( + params=[ + pd.Timedelta(minutes=30).to_pytimedelta(), + np.timedelta64(30, "s"), + pd.Timedelta(seconds=30), + ] + + _common_mismatch +) +def not_hourly(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Hourly frequencies. + """ + return request.param + + +@pytest.fixture( + params=[ + np.timedelta64(4, "h"), + pd.Timedelta(hours=23).to_pytimedelta(), + pd.Timedelta("23:00:00"), + ] + + _common_mismatch +) +def not_daily(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Daily frequencies. + """ + return request.param + + +@pytest.fixture( + params=[ + np.timedelta64(365, "D"), + pd.Timedelta(days=365).to_pytimedelta(), + pd.Timedelta(days=365), + ] + + _common_mismatch +) +def mismatched_freq(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Monthly or Annual frequencies. + """ + return request.param + + +# ------------------------------------------------------------------ + + +@pytest.fixture( + params=[Index, pd.Series, tm.to_array, np.array, list], ids=lambda x: x.__name__ +) +def box_1d_array(request): + """ + Fixture to test behavior for Index, Series, tm.to_array, numpy Array and list + classes + """ + return request.param diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_array_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_array_ops.py new file mode 100644 index 00000000..2c347d96 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_array_ops.py @@ -0,0 +1,39 @@ +import operator + +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.ops.array_ops import ( + comparison_op, + na_logical_op, +) + + +def test_na_logical_op_2d(): + left = np.arange(8).reshape(4, 2) + right = left.astype(object) + right[0, 0] = np.nan + + # Check that we fall back to the vec_binop branch + with pytest.raises(TypeError, match="unsupported operand type"): + operator.or_(left, right) + + result = na_logical_op(left, right, operator.or_) + expected = right + tm.assert_numpy_array_equal(result, expected) + + +def test_object_comparison_2d(): + left = np.arange(9).reshape(3, 3).astype(object) + right = left.T + + result = comparison_op(left, right, operator.eq) + expected = np.eye(3).astype(bool) + tm.assert_numpy_array_equal(result, expected) + + # Ensure that cython doesn't raise on non-writeable arg, which + # we can get from np.broadcast_to + right.flags.writeable = False + result = comparison_op(left, right, operator.ne) + tm.assert_numpy_array_equal(result, ~expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_categorical.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_categorical.py new file mode 100644 index 00000000..d6f3a13c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_categorical.py @@ -0,0 +1,25 @@ +import numpy as np + +from pandas import ( + Categorical, + Series, +) +import pandas._testing as tm + + +class TestCategoricalComparisons: + def test_categorical_nan_equality(self): + cat = Series(Categorical(["a", "b", "c", np.nan])) + expected = Series([True, True, True, False]) + result = cat == cat + tm.assert_series_equal(result, expected) + + def test_categorical_tuple_equality(self): + # GH 18050 + ser = Series([(0, 0), (0, 1), (0, 0), (1, 0), (1, 1)]) + expected = Series([True, False, True, False, False]) + result = ser == (0, 0) + tm.assert_series_equal(result, expected) + + result = ser.astype("category") == (0, 0) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_datetime64.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_datetime64.py new file mode 100644 index 00000000..34b526bf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_datetime64.py @@ -0,0 +1,2470 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for datetime64 and datetime64tz dtypes +from datetime import ( + datetime, + time, + timedelta, +) +from itertools import ( + product, + starmap, +) +import operator + +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs.conversion import localize_pydatetime +from pandas._libs.tslibs.offsets import shift_months +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + DateOffset, + DatetimeIndex, + NaT, + Period, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.core import roperator +from pandas.tests.arithmetic.common import ( + assert_cannot_add, + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + +# ------------------------------------------------------------------ +# Comparisons + + +class TestDatetime64ArrayLikeComparisons: + # Comparison tests for datetime64 vectors fully parametrized over + # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison + # tests will eventually end up here. + + def test_compare_zerodim(self, tz_naive_fixture, box_with_array): + # Test comparison with zero-dimensional array is unboxed + tz = tz_naive_fixture + box = box_with_array + dti = date_range("20130101", periods=3, tz=tz) + + other = np.array(dti.to_numpy()[0]) + + dtarr = tm.box_expected(dti, box) + xbox = get_upcast_box(dtarr, other, True) + result = dtarr <= other + expected = np.array([True, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + "foo", + -1, + 99, + 4.0, + object(), + timedelta(days=2), + # GH#19800, GH#19301 datetime.date comparison raises to + # match DatetimeIndex/Timestamp. This also matches the behavior + # of stdlib datetime.datetime + datetime(2001, 1, 1).date(), + # GH#19301 None and NaN are *not* cast to NaT for comparisons + None, + np.nan, + ], + ) + def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array): + # GH#22074, GH#15966 + tz = tz_naive_fixture + + rng = date_range("1/1/2000", periods=10, tz=tz) + dtarr = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(dtarr, other, box_with_array) + + @pytest.mark.parametrize( + "other", + [ + # GH#4968 invalid date/int comparisons + list(range(10)), + np.arange(10), + np.arange(10).astype(np.float32), + np.arange(10).astype(object), + pd.timedelta_range("1ns", periods=10).array, + np.array(pd.timedelta_range("1ns", periods=10)), + list(pd.timedelta_range("1ns", periods=10)), + pd.timedelta_range("1 Day", periods=10).astype(object), + pd.period_range("1971-01-01", freq="D", periods=10).array, + pd.period_range("1971-01-01", freq="D", periods=10).astype(object), + ], + ) + def test_dt64arr_cmp_arraylike_invalid( + self, other, tz_naive_fixture, box_with_array + ): + tz = tz_naive_fixture + + dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data + obj = tm.box_expected(dta, box_with_array) + assert_invalid_comparison(obj, other, box_with_array) + + def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture): + tz = tz_naive_fixture + + dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data + + other = np.array([0, 1, 2, dta[3], Timedelta(days=1)]) + result = dta == other + expected = np.array([False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dta != other + tm.assert_numpy_array_equal(result, ~expected) + + msg = "Invalid comparison between|Cannot compare type|not supported between" + with pytest.raises(TypeError, match=msg): + dta < other + with pytest.raises(TypeError, match=msg): + dta > other + with pytest.raises(TypeError, match=msg): + dta <= other + with pytest.raises(TypeError, match=msg): + dta >= other + + def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array): + # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly + tz = tz_naive_fixture + box = box_with_array + + ts = Timestamp("2021-01-01", tz=tz) + ser = Series([ts, NaT]) + + obj = tm.box_expected(ser, box) + xbox = get_upcast_box(obj, ts, True) + + expected = Series([True, False], dtype=np.bool_) + expected = tm.box_expected(expected, xbox) + + result = obj == ts + tm.assert_equal(result, expected) + + +class TestDatetime64SeriesComparison: + # TODO: moved from tests.series.test_operators; needs cleanup + + @pytest.mark.parametrize( + "pair", + [ + ( + [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")], + [NaT, NaT, Timestamp("2011-01-03")], + ), + ( + [Timedelta("1 days"), NaT, Timedelta("3 days")], + [NaT, NaT, Timedelta("3 days")], + ), + ( + [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")], + [NaT, NaT, Period("2011-03", freq="M")], + ), + ], + ) + @pytest.mark.parametrize("reverse", [True, False]) + @pytest.mark.parametrize("dtype", [None, object]) + @pytest.mark.parametrize( + "op, expected", + [ + (operator.eq, Series([False, False, True])), + (operator.ne, Series([True, True, False])), + (operator.lt, Series([False, False, False])), + (operator.gt, Series([False, False, False])), + (operator.ge, Series([False, False, True])), + (operator.le, Series([False, False, True])), + ], + ) + def test_nat_comparisons( + self, + dtype, + index_or_series, + reverse, + pair, + op, + expected, + ): + box = index_or_series + lhs, rhs = pair + if reverse: + # add lhs / rhs switched data + lhs, rhs = rhs, lhs + + left = Series(lhs, dtype=dtype) + right = box(rhs, dtype=dtype) + + result = op(left, right) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data", + [ + [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")], + [Timedelta("1 days"), NaT, Timedelta("3 days")], + [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")], + ], + ) + @pytest.mark.parametrize("dtype", [None, object]) + def test_nat_comparisons_scalar(self, dtype, data, box_with_array): + box = box_with_array + + left = Series(data, dtype=dtype) + left = tm.box_expected(left, box) + xbox = get_upcast_box(left, NaT, True) + + expected = [False, False, False] + expected = tm.box_expected(expected, xbox) + if box is pd.array and dtype is object: + expected = pd.array(expected, dtype="bool") + + tm.assert_equal(left == NaT, expected) + tm.assert_equal(NaT == left, expected) + + expected = [True, True, True] + expected = tm.box_expected(expected, xbox) + if box is pd.array and dtype is object: + expected = pd.array(expected, dtype="bool") + tm.assert_equal(left != NaT, expected) + tm.assert_equal(NaT != left, expected) + + expected = [False, False, False] + expected = tm.box_expected(expected, xbox) + if box is pd.array and dtype is object: + expected = pd.array(expected, dtype="bool") + tm.assert_equal(left < NaT, expected) + tm.assert_equal(NaT > left, expected) + tm.assert_equal(left <= NaT, expected) + tm.assert_equal(NaT >= left, expected) + + tm.assert_equal(left > NaT, expected) + tm.assert_equal(NaT < left, expected) + tm.assert_equal(left >= NaT, expected) + tm.assert_equal(NaT <= left, expected) + + @pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)]) + def test_series_comparison_scalars(self, val): + series = Series(date_range("1/1/2000", periods=10)) + + result = series > val + expected = Series([x > val for x in series]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")] + ) + def test_timestamp_compare_series(self, left, right): + # see gh-4982 + # Make sure we can compare Timestamps on the right AND left hand side. + ser = Series(date_range("20010101", periods=10), name="dates") + s_nat = ser.copy(deep=True) + + ser[0] = Timestamp("nat") + ser[3] = Timestamp("nat") + + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # No NaT + expected = left_f(ser, Timestamp("20010109")) + result = right_f(Timestamp("20010109"), ser) + tm.assert_series_equal(result, expected) + + # NaT + expected = left_f(ser, Timestamp("nat")) + result = right_f(Timestamp("nat"), ser) + tm.assert_series_equal(result, expected) + + # Compare to Timestamp with series containing NaT + expected = left_f(s_nat, Timestamp("20010109")) + result = right_f(Timestamp("20010109"), s_nat) + tm.assert_series_equal(result, expected) + + # Compare to NaT with series containing NaT + expected = left_f(s_nat, NaT) + result = right_f(NaT, s_nat) + tm.assert_series_equal(result, expected) + + def test_dt64arr_timestamp_equality(self, box_with_array): + # GH#11034 + box = box_with_array + + ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT]) + ser = tm.box_expected(ser, box) + xbox = get_upcast_box(ser, ser, True) + + result = ser != ser + expected = tm.box_expected([False, False, True], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser != ser[0] + + else: + result = ser != ser[0] + expected = tm.box_expected([False, True, True], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser != ser[2] + else: + result = ser != ser[2] + expected = tm.box_expected([True, True, True], xbox) + tm.assert_equal(result, expected) + + result = ser == ser + expected = tm.box_expected([True, True, False], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser == ser[0] + else: + result = ser == ser[0] + expected = tm.box_expected([True, False, False], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser == ser[2] + else: + result = ser == ser[2] + expected = tm.box_expected([False, False, False], xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "datetimelike", + [ + Timestamp("20130101"), + datetime(2013, 1, 1), + np.datetime64("2013-01-01T00:00", "ns"), + ], + ) + @pytest.mark.parametrize( + "op,expected", + [ + (operator.lt, [True, False, False, False]), + (operator.le, [True, True, False, False]), + (operator.eq, [False, True, False, False]), + (operator.gt, [False, False, False, True]), + ], + ) + def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected): + # GH#17965, test for ability to compare datetime64[ns] columns + # to datetimelike + ser = Series( + [ + Timestamp("20120101"), + Timestamp("20130101"), + np.nan, + Timestamp("20130103"), + ], + name="A", + ) + result = op(ser, datetimelike) + expected = Series(expected, name="A") + tm.assert_series_equal(result, expected) + + +class TestDatetimeIndexComparisons: + # TODO: moved from tests.indexes.test_base; parametrize and de-duplicate + def test_comparators(self, comparison_op): + index = tm.makeDateIndex(100) + element = index[len(index) // 2] + element = Timestamp(element).to_datetime64() + + arr = np.array(index) + arr_result = comparison_op(arr, element) + index_result = comparison_op(index, element) + + assert isinstance(index_result, np.ndarray) + tm.assert_numpy_array_equal(arr_result, index_result) + + @pytest.mark.parametrize( + "other", + [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")], + ) + def test_dti_cmp_datetimelike(self, other, tz_naive_fixture): + tz = tz_naive_fixture + dti = date_range("2016-01-01", periods=2, tz=tz) + if tz is not None: + if isinstance(other, np.datetime64): + pytest.skip("no tzaware version available") + other = localize_pydatetime(other, dti.tzinfo) + + result = dti == other + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dti > other + expected = np.array([False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = dti >= other + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = dti < other + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dti <= other + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_dti_cmp_nat(self, dtype, box_with_array): + left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")]) + right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")]) + + left = tm.box_expected(left, box_with_array) + right = tm.box_expected(right, box_with_array) + xbox = get_upcast_box(left, right, True) + + lhs, rhs = left, right + if dtype is object: + lhs, rhs = left.astype(object), right.astype(object) + + result = rhs == lhs + expected = np.array([False, False, True]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + result = lhs != rhs + expected = np.array([True, True, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + expected = np.array([False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs == NaT, expected) + tm.assert_equal(NaT == rhs, expected) + + expected = np.array([True, True, True]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs != NaT, expected) + tm.assert_equal(NaT != lhs, expected) + + expected = np.array([False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs < NaT, expected) + tm.assert_equal(NaT > lhs, expected) + + def test_dti_cmp_nat_behaves_like_float_cmp_nan(self): + fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) + fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) + + didx1 = DatetimeIndex( + ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"] + ) + didx2 = DatetimeIndex( + ["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"] + ) + darr = np.array( + [ + np.datetime64("2014-02-01 00:00"), + np.datetime64("2014-03-01 00:00"), + np.datetime64("nat"), + np.datetime64("nat"), + np.datetime64("2014-06-01 00:00"), + np.datetime64("2014-07-01 00:00"), + ] + ) + + cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)] + + # Check pd.NaT is handles as the same as np.nan + with tm.assert_produces_warning(None): + for idx1, idx2 in cases: + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(None): + for idx1, val in [(fidx1, np.nan), (didx1, NaT)]: + result = idx1 < val + expected = np.array([False, False, False, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 > val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + tm.assert_numpy_array_equal(result, expected) + result = idx1 >= val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, True, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + # Check pd.NaT is handles as the same as np.nan + with tm.assert_produces_warning(None): + for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]: + result = idx1 < val + expected = np.array([True, False, False, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 > val + expected = np.array([False, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + expected = np.array([True, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 >= val + expected = np.array([False, False, True, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == val + expected = np.array([False, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, False, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_comparison_tzawareness_compat(self, comparison_op, box_with_array): + # GH#18162 + op = comparison_op + box = box_with_array + + dr = date_range("2016-01-01", periods=6) + dz = dr.tz_localize("US/Pacific") + + dr = tm.box_expected(dr, box) + dz = tm.box_expected(dz, box) + + if box is pd.DataFrame: + tolist = lambda x: x.astype(object).values.tolist()[0] + else: + tolist = list + + if op not in [operator.eq, operator.ne]: + msg = ( + r"Invalid comparison between dtype=datetime64\[ns.*\] " + "and (Timestamp|DatetimeArray|list|ndarray)" + ) + with pytest.raises(TypeError, match=msg): + op(dr, dz) + + with pytest.raises(TypeError, match=msg): + op(dr, tolist(dz)) + with pytest.raises(TypeError, match=msg): + op(dr, np.array(tolist(dz), dtype=object)) + with pytest.raises(TypeError, match=msg): + op(dz, dr) + + with pytest.raises(TypeError, match=msg): + op(dz, tolist(dr)) + with pytest.raises(TypeError, match=msg): + op(dz, np.array(tolist(dr), dtype=object)) + + # The aware==aware and naive==naive comparisons should *not* raise + assert np.all(dr == dr) + assert np.all(dr == tolist(dr)) + assert np.all(tolist(dr) == dr) + assert np.all(np.array(tolist(dr), dtype=object) == dr) + assert np.all(dr == np.array(tolist(dr), dtype=object)) + + assert np.all(dz == dz) + assert np.all(dz == tolist(dz)) + assert np.all(tolist(dz) == dz) + assert np.all(np.array(tolist(dz), dtype=object) == dz) + assert np.all(dz == np.array(tolist(dz), dtype=object)) + + def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array): + # GH#18162 + op = comparison_op + + dr = date_range("2016-01-01", periods=6) + dz = dr.tz_localize("US/Pacific") + + dr = tm.box_expected(dr, box_with_array) + dz = tm.box_expected(dz, box_with_array) + + # Check comparisons against scalar Timestamps + ts = Timestamp("2000-03-14 01:59") + ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam") + + assert np.all(dr > ts) + msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp" + if op not in [operator.eq, operator.ne]: + with pytest.raises(TypeError, match=msg): + op(dr, ts_tz) + + assert np.all(dz > ts_tz) + if op not in [operator.eq, operator.ne]: + with pytest.raises(TypeError, match=msg): + op(dz, ts) + + if op not in [operator.eq, operator.ne]: + # GH#12601: Check comparison against Timestamps and DatetimeIndex + with pytest.raises(TypeError, match=msg): + op(ts, dz) + + @pytest.mark.parametrize( + "other", + [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")], + ) + # Bug in NumPy? https://github.com/numpy/numpy/issues/13841 + # Raising in __eq__ will fallback to NumPy, which warns, fails, + # then re-raises the original exception. So we just need to ignore. + @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning") + def test_scalar_comparison_tzawareness( + self, comparison_op, other, tz_aware_fixture, box_with_array + ): + op = comparison_op + tz = tz_aware_fixture + dti = date_range("2016-01-01", periods=2, tz=tz) + + dtarr = tm.box_expected(dti, box_with_array) + xbox = get_upcast_box(dtarr, other, True) + if op in [operator.eq, operator.ne]: + exbool = op is operator.ne + expected = np.array([exbool, exbool], dtype=bool) + expected = tm.box_expected(expected, xbox) + + result = op(dtarr, other) + tm.assert_equal(result, expected) + + result = op(other, dtarr) + tm.assert_equal(result, expected) + else: + msg = ( + r"Invalid comparison between dtype=datetime64\[ns, .*\] " + f"and {type(other).__name__}" + ) + with pytest.raises(TypeError, match=msg): + op(dtarr, other) + with pytest.raises(TypeError, match=msg): + op(other, dtarr) + + def test_nat_comparison_tzawareness(self, comparison_op): + # GH#19276 + # tzaware DatetimeIndex should not raise when compared to NaT + op = comparison_op + + dti = DatetimeIndex( + ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"] + ) + expected = np.array([op == operator.ne] * len(dti)) + result = op(dti, NaT) + tm.assert_numpy_array_equal(result, expected) + + result = op(dti.tz_localize("US/Pacific"), NaT) + tm.assert_numpy_array_equal(result, expected) + + def test_dti_cmp_str(self, tz_naive_fixture): + # GH#22074 + # regardless of tz, we expect these comparisons are valid + tz = tz_naive_fixture + rng = date_range("1/1/2000", periods=10, tz=tz) + other = "1/1/2000" + + result = rng == other + expected = np.array([True] + [False] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng != other + expected = np.array([False] + [True] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng < other + expected = np.array([False] * 10) + tm.assert_numpy_array_equal(result, expected) + + result = rng <= other + expected = np.array([True] + [False] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng > other + expected = np.array([False] + [True] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng >= other + expected = np.array([True] * 10) + tm.assert_numpy_array_equal(result, expected) + + def test_dti_cmp_list(self): + rng = date_range("1/1/2000", periods=10) + + result = rng == list(rng) + expected = rng == rng + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + pd.timedelta_range("1D", periods=10), + pd.timedelta_range("1D", periods=10).to_series(), + pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"), + ], + ids=lambda x: type(x).__name__, + ) + def test_dti_cmp_tdi_tzawareness(self, other): + # GH#22074 + # reversion test that we _don't_ call _assert_tzawareness_compat + # when comparing against TimedeltaIndex + dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo") + + result = dti == other + expected = np.array([False] * 10) + tm.assert_numpy_array_equal(result, expected) + + result = dti != other + expected = np.array([True] * 10) + tm.assert_numpy_array_equal(result, expected) + msg = "Invalid comparison between" + with pytest.raises(TypeError, match=msg): + dti < other + with pytest.raises(TypeError, match=msg): + dti <= other + with pytest.raises(TypeError, match=msg): + dti > other + with pytest.raises(TypeError, match=msg): + dti >= other + + def test_dti_cmp_object_dtype(self): + # GH#22074 + dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo") + + other = dti.astype("O") + + result = dti == other + expected = np.array([True] * 10) + tm.assert_numpy_array_equal(result, expected) + + other = dti.tz_localize(None) + result = dti != other + tm.assert_numpy_array_equal(result, expected) + + other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5) + result = dti == other + expected = np.array([True] * 5 + [False] * 5) + tm.assert_numpy_array_equal(result, expected) + msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + dti >= other + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestDatetime64Arithmetic: + # This class is intended for "finished" tests that are fully parametrized + # over DataFrame/Series/Index/DatetimeArray + + # ------------------------------------------------------------- + # Addition/Subtraction of timedelta-like + + @pytest.mark.arm_slow + def test_dt64arr_add_timedeltalike_scalar( + self, tz_naive_fixture, two_hours, box_with_array + ): + # GH#22005, GH#22163 check DataFrame doesn't raise TypeError + tz = tz_naive_fixture + + rng = date_range("2000-01-01", "2000-02-01", tz=tz) + expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz) + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = rng + two_hours + tm.assert_equal(result, expected) + + result = two_hours + rng + tm.assert_equal(result, expected) + + rng += two_hours + tm.assert_equal(rng, expected) + + def test_dt64arr_sub_timedeltalike_scalar( + self, tz_naive_fixture, two_hours, box_with_array + ): + tz = tz_naive_fixture + + rng = date_range("2000-01-01", "2000-02-01", tz=tz) + expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz) + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = rng - two_hours + tm.assert_equal(result, expected) + + rng -= two_hours + tm.assert_equal(rng, expected) + + def test_dt64_array_sub_dt_with_different_timezone(self, box_with_array): + t1 = date_range("20130101", periods=3).tz_localize("US/Eastern") + t1 = tm.box_expected(t1, box_with_array) + t2 = Timestamp("20130101").tz_localize("CET") + tnaive = Timestamp(20130101) + + result = t1 - t2 + expected = TimedeltaIndex( + ["0 days 06:00:00", "1 days 06:00:00", "2 days 06:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = t2 - t1 + expected = TimedeltaIndex( + ["-1 days +18:00:00", "-2 days +18:00:00", "-3 days +18:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + t1 - tnaive + + with pytest.raises(TypeError, match=msg): + tnaive - t1 + + def test_dt64_array_sub_dt64_array_with_different_timezone(self, box_with_array): + t1 = date_range("20130101", periods=3).tz_localize("US/Eastern") + t1 = tm.box_expected(t1, box_with_array) + t2 = date_range("20130101", periods=3).tz_localize("CET") + t2 = tm.box_expected(t2, box_with_array) + tnaive = date_range("20130101", periods=3) + + result = t1 - t2 + expected = TimedeltaIndex( + ["0 days 06:00:00", "0 days 06:00:00", "0 days 06:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = t2 - t1 + expected = TimedeltaIndex( + ["-1 days +18:00:00", "-1 days +18:00:00", "-1 days +18:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + t1 - tnaive + + with pytest.raises(TypeError, match=msg): + tnaive - t1 + + def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture): + # GH#23320 special handling for timedelta64("NaT") + tz = tz_naive_fixture + + dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS") + other = np.timedelta64("NaT") + expected = DatetimeIndex(["NaT"] * 9, tz=tz) + + obj = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = "cannot subtract" + with pytest.raises(TypeError, match=msg): + other - obj + + def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array): + tz = tz_naive_fixture + dti = date_range("2016-01-01", periods=3, tz=tz) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz) + + dtarr = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = dtarr + tdarr + tm.assert_equal(result, expected) + result = tdarr + dtarr + tm.assert_equal(result, expected) + + expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz) + expected = tm.box_expected(expected, box_with_array) + + result = dtarr - tdarr + tm.assert_equal(result, expected) + msg = "cannot subtract|(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + tdarr - dtarr + + # ----------------------------------------------------------------- + # Subtraction of datetime-like scalars + + @pytest.mark.parametrize( + "ts", + [ + Timestamp("2013-01-01"), + Timestamp("2013-01-01").to_pydatetime(), + Timestamp("2013-01-01").to_datetime64(), + # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano + # for DataFrame operation + np.datetime64("2013-01-01", "D"), + ], + ) + def test_dt64arr_sub_dtscalar(self, box_with_array, ts): + # GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype + idx = date_range("2013-01-01", periods=3)._with_freq(None) + idx = tm.box_expected(idx, box_with_array) + + expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"]) + expected = tm.box_expected(expected, box_with_array) + + result = idx - ts + tm.assert_equal(result, expected) + + result = ts - idx + tm.assert_equal(result, -expected) + tm.assert_equal(result, -expected) + + def test_dt64arr_sub_timestamp_tzaware(self, box_with_array): + ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern") + ser = ser._with_freq(None) + ts = ser[0] + + ser = tm.box_expected(ser, box_with_array) + + delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")]) + expected = tm.box_expected(delta_series, box_with_array) + + tm.assert_equal(ser - ts, expected) + tm.assert_equal(ts - ser, -expected) + + def test_dt64arr_sub_NaT(self, box_with_array): + # GH#18808 + dti = DatetimeIndex([NaT, Timestamp("19900315")]) + ser = tm.box_expected(dti, box_with_array) + + result = ser - NaT + expected = Series([NaT, NaT], dtype="timedelta64[ns]") + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + dti_tz = dti.tz_localize("Asia/Tokyo") + ser_tz = tm.box_expected(dti_tz, box_with_array) + + result = ser_tz - NaT + expected = Series([NaT, NaT], dtype="timedelta64[ns]") + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + # ------------------------------------------------------------- + # Subtraction of datetime-like array-like + + def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture): + dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture) + expected = dti - dti + + obj = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + result = obj - obj.astype(object) + tm.assert_equal(result, expected) + + def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array): + dti = date_range("2016-01-01", periods=3, tz=None) + dt64vals = dti.values + + dtarr = tm.box_expected(dti, box_with_array) + + expected = dtarr - dtarr + result = dtarr - dt64vals + tm.assert_equal(result, expected) + result = dt64vals - dtarr + tm.assert_equal(result, expected) + + def test_dt64arr_aware_sub_dt64ndarray_raises( + self, tz_aware_fixture, box_with_array + ): + tz = tz_aware_fixture + dti = date_range("2016-01-01", periods=3, tz=tz) + dt64vals = dti.values + + dtarr = tm.box_expected(dti, box_with_array) + msg = "Cannot subtract tz-naive and tz-aware datetime" + with pytest.raises(TypeError, match=msg): + dtarr - dt64vals + with pytest.raises(TypeError, match=msg): + dt64vals - dtarr + + # ------------------------------------------------------------- + # Addition of datetime-like others (invalid) + + def test_dt64arr_add_dtlike_raises(self, tz_naive_fixture, box_with_array): + # GH#22163 ensure DataFrame doesn't cast Timestamp to i8 + # GH#9631 + tz = tz_naive_fixture + + dti = date_range("2016-01-01", periods=3, tz=tz) + if tz is None: + dti2 = dti.tz_localize("US/Eastern") + else: + dti2 = dti.tz_localize(None) + dtarr = tm.box_expected(dti, box_with_array) + + assert_cannot_add(dtarr, dti.values) + assert_cannot_add(dtarr, dti) + assert_cannot_add(dtarr, dtarr) + assert_cannot_add(dtarr, dti[0]) + assert_cannot_add(dtarr, dti[0].to_pydatetime()) + assert_cannot_add(dtarr, dti[0].to_datetime64()) + assert_cannot_add(dtarr, dti2[0]) + assert_cannot_add(dtarr, dti2[0].to_pydatetime()) + assert_cannot_add(dtarr, np.datetime64("2011-01-01", "D")) + + # ------------------------------------------------------------- + # Other Invalid Addition/Subtraction + + # Note: freq here includes both Tick and non-Tick offsets; this is + # relevant because historically integer-addition was allowed if we had + # a freq. + @pytest.mark.parametrize("freq", ["H", "D", "W", "M", "MS", "Q", "B", None]) + @pytest.mark.parametrize("dtype", [None, "uint8"]) + def test_dt64arr_addsub_intlike( + self, request, dtype, box_with_array, freq, tz_naive_fixture + ): + # GH#19959, GH#19123, GH#19012 + tz = tz_naive_fixture + if box_with_array is pd.DataFrame: + request.node.add_marker( + pytest.mark.xfail(raises=ValueError, reason="Axis alignment fails") + ) + + if freq is None: + dti = DatetimeIndex(["NaT", "2017-04-05 06:07:08"], tz=tz) + else: + dti = date_range("2016-01-01", periods=2, freq=freq, tz=tz) + + obj = box_with_array(dti) + other = np.array([4, -1]) + if dtype is not None: + other = other.astype(dtype) + + msg = "|".join( + [ + "Addition/subtraction of integers", + "cannot subtract DatetimeArray from", + # IntegerArray + "can only perform ops with numeric values", + "unsupported operand type.*Categorical", + r"unsupported operand type\(s\) for -: 'int' and 'Timestamp'", + ] + ) + assert_invalid_addsub_type(obj, 1, msg) + assert_invalid_addsub_type(obj, np.int64(2), msg) + assert_invalid_addsub_type(obj, np.array(3, dtype=np.int64), msg) + assert_invalid_addsub_type(obj, other, msg) + assert_invalid_addsub_type(obj, np.array(other), msg) + assert_invalid_addsub_type(obj, pd.array(other), msg) + assert_invalid_addsub_type(obj, pd.Categorical(other), msg) + assert_invalid_addsub_type(obj, pd.Index(other), msg) + assert_invalid_addsub_type(obj, Series(other), msg) + + @pytest.mark.parametrize( + "other", + [ + 3.14, + np.array([2.0, 3.0]), + # GH#13078 datetime +/- Period is invalid + Period("2011-01-01", freq="D"), + # https://github.com/pandas-dev/pandas/issues/10329 + time(1, 2, 3), + ], + ) + @pytest.mark.parametrize("dti_freq", [None, "D"]) + def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array): + dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq) + dtarr = tm.box_expected(dti, box_with_array) + msg = "|".join( + [ + "unsupported operand type", + "cannot (add|subtract)", + "cannot use operands with types", + "ufunc '?(add|subtract)'? cannot use operands with types", + "Concatenation operation is not implemented for NumPy arrays", + ] + ) + assert_invalid_addsub_type(dtarr, other, msg) + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"]) + @pytest.mark.parametrize("dti_freq", [None, "D"]) + def test_dt64arr_add_sub_parr( + self, dti_freq, pi_freq, box_with_array, box_with_array2 + ): + # GH#20049 subtracting PeriodIndex should raise TypeError + dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq) + pi = dti.to_period(pi_freq) + + dtarr = tm.box_expected(dti, box_with_array) + parr = tm.box_expected(pi, box_with_array2) + msg = "|".join( + [ + "cannot (add|subtract)", + "unsupported operand", + "descriptor.*requires", + "ufunc.*cannot use operands", + ] + ) + assert_invalid_addsub_type(dtarr, parr, msg) + + @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") + def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture): + # https://github.com/pandas-dev/pandas/issues/10329 + + tz = tz_naive_fixture + + obj1 = date_range("2012-01-01", periods=3, tz=tz) + obj2 = [time(i, i, i) for i in range(3)] + + obj1 = tm.box_expected(obj1, box_with_array) + obj2 = tm.box_expected(obj2, box_with_array) + + msg = "|".join( + [ + "unsupported operand", + "cannot subtract DatetimeArray from ndarray", + ] + ) + # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being + # applied to Series or DatetimeIndex + # we aren't testing that here, so ignore. + assert_invalid_addsub_type(obj1, obj2, msg=msg) + + # ------------------------------------------------------------- + # Other invalid operations + + @pytest.mark.parametrize( + "dt64_series", + [ + Series([Timestamp("19900315"), Timestamp("19900315")]), + Series([NaT, Timestamp("19900315")]), + Series([NaT, NaT], dtype="datetime64[ns]"), + ], + ) + @pytest.mark.parametrize("one", [1, 1.0, np.array(1)]) + def test_dt64_mul_div_numeric_invalid(self, one, dt64_series, box_with_array): + obj = tm.box_expected(dt64_series, box_with_array) + + msg = "cannot perform .* with this index type" + + # multiplication + with pytest.raises(TypeError, match=msg): + obj * one + with pytest.raises(TypeError, match=msg): + one * obj + + # division + with pytest.raises(TypeError, match=msg): + obj / one + with pytest.raises(TypeError, match=msg): + one / obj + + +class TestDatetime64DateOffsetArithmetic: + # ------------------------------------------------------------- + # Tick DateOffsets + + # TODO: parametrize over timezone? + def test_dt64arr_series_add_tick_DateOffset(self, box_with_array): + # GH#4532 + # operate with pd.offsets + ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + expected = Series( + [Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")] + ) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = ser + pd.offsets.Second(5) + tm.assert_equal(result, expected) + + result2 = pd.offsets.Second(5) + ser + tm.assert_equal(result2, expected) + + def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array): + # GH#4532 + # operate with pd.offsets + ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + expected = Series( + [Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")] + ) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = ser - pd.offsets.Second(5) + tm.assert_equal(result, expected) + + result2 = -pd.offsets.Second(5) + ser + tm.assert_equal(result2, expected) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + pd.offsets.Second(5) - ser + + @pytest.mark.parametrize( + "cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"] + ) + def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array): + # GH#4532 + # smoke tests for valid DateOffsets + ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + ser = tm.box_expected(ser, box_with_array) + + offset_cls = getattr(pd.offsets, cls_name) + ser + offset_cls(5) + offset_cls(5) + ser + ser - offset_cls(5) + + def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array): + # GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype + tz = tz_aware_fixture + if tz == "US/Pacific": + dates = date_range("2012-11-01", periods=3, tz=tz) + offset = dates + pd.offsets.Hour(5) + assert dates[0] + pd.offsets.Hour(5) == offset[0] + + dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H") + expected = DatetimeIndex( + ["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"], + freq="H", + tz=tz, + ) + + dates = tm.box_expected(dates, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]: + offset = dates + scalar + tm.assert_equal(offset, expected) + offset = scalar + dates + tm.assert_equal(offset, expected) + + roundtrip = offset - scalar + tm.assert_equal(roundtrip, dates) + + msg = "|".join( + ["bad operand type for unary -", "cannot subtract DatetimeArray"] + ) + with pytest.raises(TypeError, match=msg): + scalar - dates + + # ------------------------------------------------------------- + # RelativeDelta DateOffsets + + def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): + # GH#10699 + vec = DatetimeIndex( + [ + Timestamp("2000-01-05 00:15:00"), + Timestamp("2000-01-31 00:23:00"), + Timestamp("2000-01-01"), + Timestamp("2000-03-31"), + Timestamp("2000-02-29"), + Timestamp("2000-12-31"), + Timestamp("2000-05-15"), + Timestamp("2001-06-15"), + ] + ) + vec = tm.box_expected(vec, box_with_array) + vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec + + # DateOffset relativedelta fastpath + relative_kwargs = [ + ("years", 2), + ("months", 5), + ("days", 3), + ("hours", 5), + ("minutes", 10), + ("seconds", 2), + ("microseconds", 5), + ] + for i, (unit, value) in enumerate(relative_kwargs): + off = DateOffset(**{unit: value}) + + expected = DatetimeIndex([x + off for x in vec_items]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec + off) + + expected = DatetimeIndex([x - off for x in vec_items]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec - off) + + off = DateOffset(**dict(relative_kwargs[: i + 1])) + + expected = DatetimeIndex([x + off for x in vec_items]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec + off) + + expected = DatetimeIndex([x - off for x in vec_items]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec - off) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + off - vec + + # ------------------------------------------------------------- + # Non-Tick, Non-RelativeDelta DateOffsets + + # TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes + # tz-aware cases which this does not + @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") + @pytest.mark.parametrize( + "cls_and_kwargs", + [ + "YearBegin", + ("YearBegin", {"month": 5}), + "YearEnd", + ("YearEnd", {"month": 5}), + "MonthBegin", + "MonthEnd", + "SemiMonthEnd", + "SemiMonthBegin", + "Week", + ("Week", {"weekday": 3}), + "Week", + ("Week", {"weekday": 6}), + "BusinessDay", + "BDay", + "QuarterEnd", + "QuarterBegin", + "CustomBusinessDay", + "CDay", + "CBMonthEnd", + "CBMonthBegin", + "BMonthBegin", + "BMonthEnd", + "BusinessHour", + "BYearBegin", + "BYearEnd", + "BQuarterBegin", + ("LastWeekOfMonth", {"weekday": 2}), + ( + "FY5253Quarter", + { + "qtr_with_extra_week": 1, + "startingMonth": 1, + "weekday": 2, + "variation": "nearest", + }, + ), + ("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}), + ("WeekOfMonth", {"weekday": 2, "week": 2}), + "Easter", + ("DateOffset", {"day": 4}), + ("DateOffset", {"month": 5}), + ], + ) + @pytest.mark.parametrize("normalize", [True, False]) + @pytest.mark.parametrize("n", [0, 5]) + def test_dt64arr_add_sub_DateOffsets( + self, box_with_array, n, normalize, cls_and_kwargs + ): + # GH#10699 + # assert vectorized operation matches pointwise operations + + if isinstance(cls_and_kwargs, tuple): + # If cls_name param is a tuple, then 2nd entry is kwargs for + # the offset constructor + cls_name, kwargs = cls_and_kwargs + else: + cls_name = cls_and_kwargs + kwargs = {} + + if n == 0 and cls_name in [ + "WeekOfMonth", + "LastWeekOfMonth", + "FY5253Quarter", + "FY5253", + ]: + # passing n = 0 is invalid for these offset classes + return + + vec = DatetimeIndex( + [ + Timestamp("2000-01-05 00:15:00"), + Timestamp("2000-01-31 00:23:00"), + Timestamp("2000-01-01"), + Timestamp("2000-03-31"), + Timestamp("2000-02-29"), + Timestamp("2000-12-31"), + Timestamp("2000-05-15"), + Timestamp("2001-06-15"), + ] + ) + vec = tm.box_expected(vec, box_with_array) + vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec + + offset_cls = getattr(pd.offsets, cls_name) + + # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being + # applied to Series or DatetimeIndex + # we aren't testing that here, so ignore. + + offset = offset_cls(n, normalize=normalize, **kwargs) + + expected = DatetimeIndex([x + offset for x in vec_items]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec + offset) + + expected = DatetimeIndex([x - offset for x in vec_items]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec - offset) + + expected = DatetimeIndex([offset + x for x in vec_items]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, offset + vec) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + offset - vec + + def test_dt64arr_add_sub_DateOffset(self, box_with_array): + # GH#10699 + s = date_range("2000-01-01", "2000-01-31", name="a") + s = tm.box_expected(s, box_with_array) + result = s + DateOffset(years=1) + result2 = DateOffset(years=1) + s + exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None) + exp = tm.box_expected(exp, box_with_array) + tm.assert_equal(result, exp) + tm.assert_equal(result2, exp) + + result = s - DateOffset(years=1) + exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None) + exp = tm.box_expected(exp, box_with_array) + tm.assert_equal(result, exp) + + s = DatetimeIndex( + [ + Timestamp("2000-01-15 00:15:00", tz="US/Central"), + Timestamp("2000-02-15", tz="US/Central"), + ], + name="a", + ) + s = tm.box_expected(s, box_with_array) + result = s + pd.offsets.Day() + result2 = pd.offsets.Day() + s + exp = DatetimeIndex( + [ + Timestamp("2000-01-16 00:15:00", tz="US/Central"), + Timestamp("2000-02-16", tz="US/Central"), + ], + name="a", + ) + exp = tm.box_expected(exp, box_with_array) + tm.assert_equal(result, exp) + tm.assert_equal(result2, exp) + + s = DatetimeIndex( + [ + Timestamp("2000-01-15 00:15:00", tz="US/Central"), + Timestamp("2000-02-15", tz="US/Central"), + ], + name="a", + ) + s = tm.box_expected(s, box_with_array) + result = s + pd.offsets.MonthEnd() + result2 = pd.offsets.MonthEnd() + s + exp = DatetimeIndex( + [ + Timestamp("2000-01-31 00:15:00", tz="US/Central"), + Timestamp("2000-02-29", tz="US/Central"), + ], + name="a", + ) + exp = tm.box_expected(exp, box_with_array) + tm.assert_equal(result, exp) + tm.assert_equal(result2, exp) + + @pytest.mark.parametrize( + "other", + [ + np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]), + np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]), + np.array( # matching offsets + [pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)] + ), + ], + ) + @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub]) + def test_dt64arr_add_sub_offset_array( + self, tz_naive_fixture, box_with_array, op, other + ): + # GH#18849 + # GH#10699 array of offsets + + tz = tz_naive_fixture + dti = date_range("2017-01-01", periods=2, tz=tz) + dtarr = tm.box_expected(dti, box_with_array) + + expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))]) + expected = tm.box_expected(expected, box_with_array).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = op(dtarr, other) + tm.assert_equal(res, expected) + + # Same thing but boxing other + other = tm.box_expected(other, box_with_array) + if box_with_array is pd.array and op is roperator.radd: + # We expect a NumpyExtensionArray, not ndarray[object] here + expected = pd.array(expected, dtype=object) + with tm.assert_produces_warning(PerformanceWarning): + res = op(dtarr, other) + tm.assert_equal(res, expected) + + @pytest.mark.parametrize( + "op, offset, exp, exp_freq", + [ + ( + "__add__", + DateOffset(months=3, days=10), + [ + Timestamp("2014-04-11"), + Timestamp("2015-04-11"), + Timestamp("2016-04-11"), + Timestamp("2017-04-11"), + ], + None, + ), + ( + "__add__", + DateOffset(months=3), + [ + Timestamp("2014-04-01"), + Timestamp("2015-04-01"), + Timestamp("2016-04-01"), + Timestamp("2017-04-01"), + ], + "AS-APR", + ), + ( + "__sub__", + DateOffset(months=3, days=10), + [ + Timestamp("2013-09-21"), + Timestamp("2014-09-21"), + Timestamp("2015-09-21"), + Timestamp("2016-09-21"), + ], + None, + ), + ( + "__sub__", + DateOffset(months=3), + [ + Timestamp("2013-10-01"), + Timestamp("2014-10-01"), + Timestamp("2015-10-01"), + Timestamp("2016-10-01"), + ], + "AS-OCT", + ), + ], + ) + def test_dti_add_sub_nonzero_mth_offset( + self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array + ): + # GH 26258 + tz = tz_aware_fixture + date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz) + date = tm.box_expected(date, box_with_array, False) + mth = getattr(date, op) + result = mth(offset) + + expected = DatetimeIndex(exp, tz=tz) + expected = tm.box_expected(expected, box_with_array, False) + tm.assert_equal(result, expected) + + +class TestDatetime64OverflowHandling: + # TODO: box + de-duplicate + + def test_dt64_overflow_masking(self, box_with_array): + # GH#25317 + left = Series([Timestamp("1969-12-31")]) + right = Series([NaT]) + + left = tm.box_expected(left, box_with_array) + right = tm.box_expected(right, box_with_array) + + expected = TimedeltaIndex([NaT]) + expected = tm.box_expected(expected, box_with_array) + + result = left - right + tm.assert_equal(result, expected) + + def test_dt64_series_arith_overflow(self): + # GH#12534, fixed by GH#19024 + dt = Timestamp("1700-01-31") + td = Timedelta("20000 Days") + dti = date_range("1949-09-30", freq="100Y", periods=4) + ser = Series(dti) + msg = "Overflow in int64 addition" + with pytest.raises(OverflowError, match=msg): + ser - dt + with pytest.raises(OverflowError, match=msg): + dt - ser + with pytest.raises(OverflowError, match=msg): + ser + td + with pytest.raises(OverflowError, match=msg): + td + ser + + ser.iloc[-1] = NaT + expected = Series( + ["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]" + ) + res = ser + td + tm.assert_series_equal(res, expected) + res = td + ser + tm.assert_series_equal(res, expected) + + ser.iloc[1:] = NaT + expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]") + res = ser - dt + tm.assert_series_equal(res, expected) + res = dt - ser + tm.assert_series_equal(res, -expected) + + def test_datetimeindex_sub_timestamp_overflow(self): + dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]) + dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]) + + tsneg = Timestamp("1950-01-01").as_unit("ns") + ts_neg_variants = [ + tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype("datetime64[ns]"), + tsneg.to_datetime64().astype("datetime64[D]"), + ] + + tspos = Timestamp("1980-01-01").as_unit("ns") + ts_pos_variants = [ + tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype("datetime64[ns]"), + tspos.to_datetime64().astype("datetime64[D]"), + ] + msg = "Overflow in int64 addition" + for variant in ts_neg_variants: + with pytest.raises(OverflowError, match=msg): + dtimax - variant + + expected = Timestamp.max._value - tspos._value + for variant in ts_pos_variants: + res = dtimax - variant + assert res[1]._value == expected + + expected = Timestamp.min._value - tsneg._value + for variant in ts_neg_variants: + res = dtimin - variant + assert res[1]._value == expected + + for variant in ts_pos_variants: + with pytest.raises(OverflowError, match=msg): + dtimin - variant + + def test_datetimeindex_sub_datetimeindex_overflow(self): + # GH#22492, GH#22508 + dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]) + dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]) + + ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"]) + ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"]) + + # General tests + expected = Timestamp.max._value - ts_pos[1]._value + result = dtimax - ts_pos + assert result[1]._value == expected + + expected = Timestamp.min._value - ts_neg[1]._value + result = dtimin - ts_neg + assert result[1]._value == expected + msg = "Overflow in int64 addition" + with pytest.raises(OverflowError, match=msg): + dtimax - ts_neg + + with pytest.raises(OverflowError, match=msg): + dtimin - ts_pos + + # Edge cases + tmin = pd.to_datetime([Timestamp.min]) + t1 = tmin + Timedelta.max + Timedelta("1us") + with pytest.raises(OverflowError, match=msg): + t1 - tmin + + tmax = pd.to_datetime([Timestamp.max]) + t2 = tmax + Timedelta.min - Timedelta("1us") + with pytest.raises(OverflowError, match=msg): + tmax - t2 + + +class TestTimestampSeriesArithmetic: + def test_empty_series_add_sub(self, box_with_array): + # GH#13844 + a = Series(dtype="M8[ns]") + b = Series(dtype="m8[ns]") + a = box_with_array(a) + b = box_with_array(b) + tm.assert_equal(a, a + b) + tm.assert_equal(a, a - b) + tm.assert_equal(a, b + a) + msg = "cannot subtract" + with pytest.raises(TypeError, match=msg): + b - a + + def test_operators_datetimelike(self): + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # ## datetime64 ### + dt1 = Series( + [ + Timestamp("20111230"), + Timestamp("20120101"), + Timestamp("20120103"), + ] + ) + dt1.iloc[2] = np.nan + dt2 = Series( + [ + Timestamp("20111231"), + Timestamp("20120102"), + Timestamp("20120104"), + ] + ) + dt1 - dt2 + dt2 - dt1 + + # datetime64 with timetimedelta + dt1 + td1 + td1 + dt1 + dt1 - td1 + + # timetimedelta with datetime64 + td1 + dt1 + dt1 + td1 + + def test_dt64ser_sub_datetime_dtype(self): + ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00)) + dt = datetime(1993, 6, 22, 13, 30) + ser = Series([ts]) + result = pd.to_timedelta(np.abs(ser - dt)) + assert result.dtype == "timedelta64[ns]" + + # ------------------------------------------------------------- + # TODO: This next block of tests came from tests.series.test_operators, + # needs to be de-duplicated and parametrized over `box` classes + + @pytest.mark.parametrize( + "left, right, op_fail", + [ + [ + [Timestamp("20111230"), Timestamp("20120101"), NaT], + [Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")], + ["__sub__", "__rsub__"], + ], + [ + [Timestamp("20111230"), Timestamp("20120101"), NaT], + [timedelta(minutes=5, seconds=3), timedelta(minutes=5, seconds=3), NaT], + ["__add__", "__radd__", "__sub__"], + ], + [ + [ + Timestamp("20111230", tz="US/Eastern"), + Timestamp("20111230", tz="US/Eastern"), + NaT, + ], + [timedelta(minutes=5, seconds=3), NaT, timedelta(minutes=5, seconds=3)], + ["__add__", "__radd__", "__sub__"], + ], + ], + ) + def test_operators_datetimelike_invalid( + self, left, right, op_fail, all_arithmetic_operators + ): + # these are all TypeError ops + op_str = all_arithmetic_operators + arg1 = Series(left) + arg2 = Series(right) + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + op = getattr(arg1, op_str, None) + # Previously, _validate_for_numeric_binop in core/indexes/base.py + # did this for us. + if op_str not in op_fail: + with pytest.raises( + TypeError, match="operate|[cC]annot|unsupported operand" + ): + op(arg2) + else: + # Smoke test + op(arg2) + + def test_sub_single_tz(self): + # GH#12290 + s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")]) + s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")]) + result = s1 - s2 + expected = Series([Timedelta("2days")]) + tm.assert_series_equal(result, expected) + result = s2 - s1 + expected = Series([Timedelta("-2days")]) + tm.assert_series_equal(result, expected) + + def test_dt64tz_series_sub_dtitz(self): + # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series + # (with same tz) raises, fixed by #19024 + dti = date_range("1999-09-30", periods=10, tz="US/Pacific") + ser = Series(dti) + expected = Series(TimedeltaIndex(["0days"] * 10)) + + res = dti - ser + tm.assert_series_equal(res, expected) + res = ser - dti + tm.assert_series_equal(res, expected) + + def test_sub_datetime_compat(self): + # see GH#14088 + s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT]) + dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc) + exp = Series([Timedelta("1 days"), NaT]) + tm.assert_series_equal(s - dt, exp) + tm.assert_series_equal(s - Timestamp(dt), exp) + + def test_dt64_series_add_mixed_tick_DateOffset(self): + # GH#4532 + # operate with pd.offsets + s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + + result = s + pd.offsets.Milli(5) + result2 = pd.offsets.Milli(5) + s + expected = Series( + [Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")] + ) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5) + expected = Series( + [Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")] + ) + tm.assert_series_equal(result, expected) + + def test_datetime64_ops_nat(self): + # GH#11349 + datetime_series = Series([NaT, Timestamp("19900315")]) + nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]") + single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]") + + # subtraction + tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) + msg = "bad operand type for unary -: 'DatetimeArray'" + with pytest.raises(TypeError, match=msg): + -single_nat_dtype_datetime + datetime_series + + tm.assert_series_equal( + -NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp + ) + with pytest.raises(TypeError, match=msg): + -single_nat_dtype_datetime + nat_series_dtype_timestamp + + # addition + tm.assert_series_equal( + nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp + ) + + tm.assert_series_equal( + nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp + ) + + # ------------------------------------------------------------- + # Timezone-Centric Tests + + def test_operators_datetimelike_with_timezones(self): + tz = "US/Eastern" + dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo") + dt2 = dt1.copy() + dt2.iloc[2] = np.nan + + td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H")) + td2 = td1.copy() + td2.iloc[1] = np.nan + assert td2._values.freq is None + + result = dt1 + td1[0] + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 + td2[0] + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + # odd numpy behavior with scalar timedeltas + result = td1[0] + dt1 + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = td2[0] + dt2 + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt1 - td1[0] + exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + td1[0] - dt1 + + result = dt2 - td2[0] + exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + with pytest.raises(TypeError, match=msg): + td2[0] - dt2 + + result = dt1 + td1 + exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 + td2 + exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt1 - td1 + exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 - td2 + exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + msg = "cannot (add|subtract)" + with pytest.raises(TypeError, match=msg): + td1 - dt1 + with pytest.raises(TypeError, match=msg): + td2 - dt2 + + +class TestDatetimeIndexArithmetic: + # ------------------------------------------------------------- + # Binary operations DatetimeIndex and TimedeltaIndex/array + + def test_dti_add_tdi(self, tz_naive_fixture): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + tdi = pd.timedelta_range("0 days", periods=10) + expected = date_range("2017-01-01", periods=10, tz=tz) + expected = expected._with_freq(None) + + # add with TimedeltaIndex + result = dti + tdi + tm.assert_index_equal(result, expected) + + result = tdi + dti + tm.assert_index_equal(result, expected) + + # add with timedelta64 array + result = dti + tdi.values + tm.assert_index_equal(result, expected) + + result = tdi.values + dti + tm.assert_index_equal(result, expected) + + def test_dti_iadd_tdi(self, tz_naive_fixture): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + tdi = pd.timedelta_range("0 days", periods=10) + expected = date_range("2017-01-01", periods=10, tz=tz) + expected = expected._with_freq(None) + + # iadd with TimedeltaIndex + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + result += tdi + tm.assert_index_equal(result, expected) + + result = pd.timedelta_range("0 days", periods=10) + result += dti + tm.assert_index_equal(result, expected) + + # iadd with timedelta64 array + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + result += tdi.values + tm.assert_index_equal(result, expected) + + result = pd.timedelta_range("0 days", periods=10) + result += dti + tm.assert_index_equal(result, expected) + + def test_dti_sub_tdi(self, tz_naive_fixture): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + tdi = pd.timedelta_range("0 days", periods=10) + expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D") + expected = expected._with_freq(None) + + # sub with TimedeltaIndex + result = dti - tdi + tm.assert_index_equal(result, expected) + + msg = "cannot subtract .*TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi - dti + + # sub with timedelta64 array + result = dti - tdi.values + tm.assert_index_equal(result, expected) + + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi.values - dti + + def test_dti_isub_tdi(self, tz_naive_fixture): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + tdi = pd.timedelta_range("0 days", periods=10) + expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D") + expected = expected._with_freq(None) + + # isub with TimedeltaIndex + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + result -= tdi + tm.assert_index_equal(result, expected) + + # DTA.__isub__ GH#43904 + dta = dti._data.copy() + dta -= tdi + tm.assert_datetime_array_equal(dta, expected._data) + + out = dti._data.copy() + np.subtract(out, tdi, out=out) + tm.assert_datetime_array_equal(out, expected._data) + + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi -= dti + + # isub with timedelta64 array + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + result -= tdi.values + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError, match=msg): + tdi.values -= dti + + with pytest.raises(TypeError, match=msg): + tdi._values -= dti + + # ------------------------------------------------------------- + # Binary Operations DatetimeIndex and datetime-like + # TODO: A couple other tests belong in this section. Move them in + # A PR where there isn't already a giant diff. + + # ------------------------------------------------------------- + + def test_dta_add_sub_index(self, tz_naive_fixture): + # Check that DatetimeArray defers to Index classes + dti = date_range("20130101", periods=3, tz=tz_naive_fixture) + dta = dti.array + result = dta - dti + expected = dti - dti + tm.assert_index_equal(result, expected) + + tdi = result + result = dta + tdi + expected = dti + tdi + tm.assert_index_equal(result, expected) + + result = dta - tdi + expected = dti - tdi + tm.assert_index_equal(result, expected) + + def test_sub_dti_dti(self): + # previously performed setop (deprecated in 0.16.0), now changed to + # return subtraction -> TimeDeltaIndex (GH ...) + + dti = date_range("20130101", periods=3) + dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern") + expected = TimedeltaIndex([0, 0, 0]) + + result = dti - dti + tm.assert_index_equal(result, expected) + + result = dti_tz - dti_tz + tm.assert_index_equal(result, expected) + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + dti_tz - dti + + with pytest.raises(TypeError, match=msg): + dti - dti_tz + + # isub + dti -= dti + tm.assert_index_equal(dti, expected) + + # different length raises ValueError + dti1 = date_range("20130101", periods=3) + dti2 = date_range("20130101", periods=4) + msg = "cannot add indices of unequal length" + with pytest.raises(ValueError, match=msg): + dti1 - dti2 + + # NaN propagation + dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"]) + dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan]) + expected = TimedeltaIndex(["1 days", np.nan, np.nan]) + result = dti2 - dti1 + tm.assert_index_equal(result, expected) + + # ------------------------------------------------------------------- + # TODO: Most of this block is moved from series or frame tests, needs + # cleanup, box-parametrization, and de-duplication + + @pytest.mark.parametrize("op", [operator.add, operator.sub]) + def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array): + ser = Series( + [ + Timestamp("20130301"), + Timestamp("20130228 23:00:00"), + Timestamp("20130228 22:00:00"), + Timestamp("20130228 21:00:00"), + ] + ) + obj = box_with_array(ser) + + intervals = ["D", "h", "m", "s", "us"] + + def timedelta64(*args): + # see casting notes in NumPy gh-12927 + return np.sum(list(starmap(np.timedelta64, zip(args, intervals)))) + + for d, h, m, s, us in product(*([range(2)] * 5)): + nptd = timedelta64(d, h, m, s, us) + pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us) + lhs = op(obj, nptd) + rhs = op(obj, pytd) + + tm.assert_equal(lhs, rhs) + + def test_ops_nat_mixed_datetime64_timedelta64(self): + # GH#11349 + timedelta_series = Series([NaT, Timedelta("1s")]) + datetime_series = Series([NaT, Timestamp("19900315")]) + nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]") + nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]") + single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]") + single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]") + + # subtraction + tm.assert_series_equal( + datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp + ) + tm.assert_series_equal( + -single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp + ) + + # without a Series wrapping the NaT, it is ambiguous + # whether it is a datetime64 or timedelta64 + # defaults to interpreting it as timedelta64 + tm.assert_series_equal( + nat_series_dtype_timestamp - single_nat_dtype_datetime, + nat_series_dtype_timedelta, + ) + + tm.assert_series_equal( + nat_series_dtype_timestamp - single_nat_dtype_timedelta, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + -single_nat_dtype_timedelta + nat_series_dtype_timestamp, + nat_series_dtype_timestamp, + ) + msg = "cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + timedelta_series - single_nat_dtype_datetime + + # addition + tm.assert_series_equal( + nat_series_dtype_timestamp + single_nat_dtype_timedelta, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timestamp, + nat_series_dtype_timestamp, + ) + + tm.assert_series_equal( + nat_series_dtype_timestamp + single_nat_dtype_timedelta, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timestamp, + nat_series_dtype_timestamp, + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_datetime, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + single_nat_dtype_datetime + nat_series_dtype_timedelta, + nat_series_dtype_timestamp, + ) + + def test_ufunc_coercions(self): + idx = date_range("2011-01-01", periods=3, freq="2D", name="x") + + delta = np.timedelta64(1, "D") + exp = date_range("2011-01-02", periods=3, freq="2D", name="x") + for result in [idx + delta, np.add(idx, delta)]: + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + exp = date_range("2010-12-31", periods=3, freq="2D", name="x") + + for result in [idx - delta, np.subtract(idx, delta)]: + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + # When adding/subtracting an ndarray (which has no .freq), the result + # does not infer freq + idx = idx._with_freq(None) + delta = np.array( + [np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")] + ) + exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x") + + for result in [idx + delta, np.add(idx, delta)]: + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x") + for result in [idx - delta, np.subtract(idx, delta)]: + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + def test_dti_add_series(self, tz_naive_fixture, names): + # GH#13905 + tz = tz_naive_fixture + index = DatetimeIndex( + ["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0] + ) + ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1]) + expected = Series(index + Timedelta(seconds=5), index=index, name=names[2]) + + # passing name arg isn't enough when names[2] is None + expected.name = names[2] + assert expected.dtype == index.dtype + result = ser + index + tm.assert_series_equal(result, expected) + result2 = index + ser + tm.assert_series_equal(result2, expected) + + expected = index + Timedelta(seconds=5) + result3 = ser.values + index + tm.assert_index_equal(result3, expected) + result4 = index + ser.values + tm.assert_index_equal(result4, expected) + + @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub]) + def test_dti_addsub_offset_arraylike( + self, tz_naive_fixture, names, op, index_or_series + ): + # GH#18849, GH#19744 + other_box = index_or_series + + tz = tz_naive_fixture + dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0]) + other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1]) + + xbox = get_upcast_box(dti, other) + + with tm.assert_produces_warning(PerformanceWarning): + res = op(dti, other) + + expected = DatetimeIndex( + [op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer" + ) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(res, expected) + + @pytest.mark.parametrize("other_box", [pd.Index, np.array]) + def test_dti_addsub_object_arraylike( + self, tz_naive_fixture, box_with_array, other_box + ): + tz = tz_naive_fixture + + dti = date_range("2017-01-01", periods=2, tz=tz) + dtarr = tm.box_expected(dti, box_with_array) + other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)]) + xbox = get_upcast_box(dtarr, other) + + expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture) + expected = tm.box_expected(expected, xbox).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + result = dtarr + other + tm.assert_equal(result, expected) + + expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture) + expected = tm.box_expected(expected, xbox).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + result = dtarr - other + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("years", [-1, 0, 1]) +@pytest.mark.parametrize("months", [-2, 0, 2]) +def test_shift_months(years, months): + dti = DatetimeIndex( + [ + Timestamp("2000-01-05 00:15:00"), + Timestamp("2000-01-31 00:23:00"), + Timestamp("2000-01-01"), + Timestamp("2000-02-29"), + Timestamp("2000-12-31"), + ] + ) + actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months)) + + raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti] + expected = DatetimeIndex(raw) + tm.assert_index_equal(actual, expected) + + +def test_dt64arr_addsub_object_dtype_2d(): + # block-wise DataFrame operations will require operating on 2D + # DatetimeArray/TimedeltaArray, so check that specifically. + dti = date_range("1994-02-13", freq="2W", periods=4) + dta = dti._data.reshape((4, 1)) + + other = np.array([[pd.offsets.Day(n)] for n in range(4)]) + assert other.shape == dta.shape + + with tm.assert_produces_warning(PerformanceWarning): + result = dta + other + with tm.assert_produces_warning(PerformanceWarning): + expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1) + + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(PerformanceWarning): + # Case where we expect to get a TimedeltaArray back + result2 = dta - dta.astype(object) + + assert result2.shape == (4, 1) + assert all(td._value == 0 for td in result2.ravel()) + + +def test_non_nano_dt64_addsub_np_nat_scalars(): + # GH 52295 + ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]") + result = ser - np.datetime64("nat", "ms") + expected = Series([NaT] * 3, dtype="timedelta64[ms]") + tm.assert_series_equal(result, expected) + + result = ser + np.timedelta64("nat", "ms") + expected = Series([NaT] * 3, dtype="datetime64[ms]") + tm.assert_series_equal(result, expected) + + +def test_non_nano_dt64_addsub_np_nat_scalars_unitless(): + # GH 52295 + # TODO: Can we default to the ser unit? + ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]") + result = ser - np.datetime64("nat") + expected = Series([NaT] * 3, dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected) + + result = ser + np.timedelta64("nat") + expected = Series([NaT] * 3, dtype="datetime64[ns]") + tm.assert_series_equal(result, expected) + + +def test_non_nano_dt64_addsub_np_nat_scalars_unsupported_unit(): + # GH 52295 + ser = Series([12332, 23243, 33243], dtype="datetime64[s]") + result = ser - np.datetime64("nat", "D") + expected = Series([NaT] * 3, dtype="timedelta64[s]") + tm.assert_series_equal(result, expected) + + result = ser + np.timedelta64("nat", "D") + expected = Series([NaT] * 3, dtype="datetime64[s]") + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_interval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_interval.py new file mode 100644 index 00000000..0e316cf4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_interval.py @@ -0,0 +1,306 @@ +import operator + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_list_like + +import pandas as pd +from pandas import ( + Categorical, + Index, + Interval, + IntervalIndex, + Period, + Series, + Timedelta, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import ( + BooleanArray, + IntervalArray, +) +from pandas.tests.arithmetic.common import get_upcast_box + + +@pytest.fixture( + params=[ + (Index([0, 2, 4, 4]), Index([1, 3, 5, 8])), + (Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])), + ( + timedelta_range("0 days", periods=3).insert(3, pd.NaT), + timedelta_range("1 day", periods=3).insert(3, pd.NaT), + ), + ( + date_range("20170101", periods=3).insert(3, pd.NaT), + date_range("20170102", periods=3).insert(3, pd.NaT), + ), + ( + date_range("20170101", periods=3, tz="US/Eastern").insert(3, pd.NaT), + date_range("20170102", periods=3, tz="US/Eastern").insert(3, pd.NaT), + ), + ], + ids=lambda x: str(x[0].dtype), +) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +@pytest.fixture +def interval_array(left_right_dtypes): + """ + Fixture to generate an IntervalArray of various dtypes containing NA if possible + """ + left, right = left_right_dtypes + return IntervalArray.from_arrays(left, right) + + +def create_categorical_intervals(left, right, closed="right"): + return Categorical(IntervalIndex.from_arrays(left, right, closed)) + + +def create_series_intervals(left, right, closed="right"): + return Series(IntervalArray.from_arrays(left, right, closed)) + + +def create_series_categorical_intervals(left, right, closed="right"): + return Series(Categorical(IntervalIndex.from_arrays(left, right, closed))) + + +class TestComparison: + @pytest.fixture(params=[operator.eq, operator.ne]) + def op(self, request): + return request.param + + @pytest.fixture( + params=[ + IntervalArray.from_arrays, + IntervalIndex.from_arrays, + create_categorical_intervals, + create_series_intervals, + create_series_categorical_intervals, + ], + ids=[ + "IntervalArray", + "IntervalIndex", + "Categorical[Interval]", + "Series[Interval]", + "Series[Categorical[Interval]]", + ], + ) + def interval_constructor(self, request): + """ + Fixture for all pandas native interval constructors. + To be used as the LHS of IntervalArray comparisons. + """ + return request.param + + def elementwise_comparison(self, op, interval_array, other): + """ + Helper that performs elementwise comparisons between `array` and `other` + """ + other = other if is_list_like(other) else [other] * len(interval_array) + expected = np.array([op(x, y) for x, y in zip(interval_array, other)]) + if isinstance(other, Series): + return Series(expected, index=other.index) + return expected + + def test_compare_scalar_interval(self, op, interval_array): + # matches first interval + other = interval_array[0] + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + # matches on a single endpoint but not both + other = Interval(interval_array.left[0], interval_array.right[1]) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed): + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = Interval(0, 1, closed=other_closed) + + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_scalar_na(self, op, interval_array, nulls_fixture, box_with_array): + box = box_with_array + obj = tm.box_expected(interval_array, box) + result = op(obj, nulls_fixture) + + if nulls_fixture is pd.NA: + # GH#31882 + exp = np.ones(interval_array.shape, dtype=bool) + expected = BooleanArray(exp, exp) + else: + expected = self.elementwise_comparison(op, interval_array, nulls_fixture) + + if not (box is Index and nulls_fixture is pd.NA): + # don't cast expected from BooleanArray to ndarray[object] + xbox = get_upcast_box(obj, nulls_fixture, True) + expected = tm.box_expected(expected, xbox) + + tm.assert_equal(result, expected) + + rev = op(nulls_fixture, obj) + tm.assert_equal(rev, expected) + + @pytest.mark.parametrize( + "other", + [ + 0, + 1.0, + True, + "foo", + Timestamp("2017-01-01"), + Timestamp("2017-01-01", tz="US/Eastern"), + Timedelta("0 days"), + Period("2017-01-01", "D"), + ], + ) + def test_compare_scalar_other(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_list_like_interval(self, op, interval_array, interval_constructor): + # same endpoints + other = interval_constructor(interval_array.left, interval_array.right) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + # different endpoints + other = interval_constructor( + interval_array.left[::-1], interval_array.right[::-1] + ) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + # all nan endpoints + other = interval_constructor([np.nan] * 4, [np.nan] * 4) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + def test_compare_list_like_interval_mixed_closed( + self, op, interval_constructor, closed, other_closed + ): + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = interval_constructor(range(2), range(1, 3), closed=other_closed) + + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + ( + Interval(0, 1), + Interval(Timedelta("1 day"), Timedelta("2 days")), + Interval(4, 5, "both"), + Interval(10, 20, "neither"), + ), + (0, 1.5, Timestamp("20170103"), np.nan), + ( + Timestamp("20170102", tz="US/Eastern"), + Timedelta("2 days"), + "baz", + pd.NaT, + ), + ], + ) + def test_compare_list_like_object(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_list_like_nan(self, op, interval_array, nulls_fixture): + other = [nulls_fixture] * 4 + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + np.arange(4, dtype="int64"), + np.arange(4, dtype="float64"), + date_range("2017-01-01", periods=4), + date_range("2017-01-01", periods=4, tz="US/Eastern"), + timedelta_range("0 days", periods=4), + period_range("2017-01-01", periods=4, freq="D"), + Categorical(list("abab")), + Categorical(date_range("2017-01-01", periods=4)), + pd.array(list("abcd")), + pd.array(["foo", 3.14, None, object()], dtype=object), + ], + ids=lambda x: str(x.dtype), + ) + def test_compare_list_like_other(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("length", [1, 3, 5]) + @pytest.mark.parametrize("other_constructor", [IntervalArray, list]) + def test_compare_length_mismatch_errors(self, op, other_constructor, length): + interval_array = IntervalArray.from_arrays(range(4), range(1, 5)) + other = other_constructor([Interval(0, 1)] * length) + with pytest.raises(ValueError, match="Lengths must match to compare"): + op(interval_array, other) + + @pytest.mark.parametrize( + "constructor, expected_type, assert_func", + [ + (IntervalIndex, np.array, tm.assert_numpy_array_equal), + (Series, Series, tm.assert_series_equal), + ], + ) + def test_index_series_compat(self, op, constructor, expected_type, assert_func): + # IntervalIndex/Series that rely on IntervalArray for comparisons + breaks = range(4) + index = constructor(IntervalIndex.from_breaks(breaks)) + + # scalar comparisons + other = index[0] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + other = breaks[0] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + # list-like comparisons + other = IntervalArray.from_breaks(breaks) + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + other = [index[0], breaks[0], "foo"] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None]) + def test_comparison_operations(self, scalars): + # GH #28981 + expected = Series([False, False]) + s = Series([Interval(0, 1), Interval(1, 2)], dtype="interval") + result = s == scalars + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_numeric.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_numeric.py new file mode 100644 index 00000000..fa17c24f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_numeric.py @@ -0,0 +1,1490 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for numeric dtypes +from __future__ import annotations + +from collections import abc +from datetime import timedelta +from decimal import Decimal +import operator + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + RangeIndex, + Series, + Timedelta, + TimedeltaIndex, + array, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.computation import expressions as expr +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, +) + + +@pytest.fixture(params=[Index, Series, tm.to_array]) +def box_pandas_1d_array(request): + """ + Fixture to test behavior for Index, Series and tm.to_array classes + """ + return request.param + + +def adjust_negative_zero(zero, expected): + """ + Helper to adjust the expected result if we are dividing by -0.0 + as opposed to 0.0 + """ + if np.signbit(np.array(zero)).any(): + # All entries in the `zero` fixture should be either + # all-negative or no-negative. + assert np.signbit(np.array(zero)).all() + + expected *= -1 + + return expected + + +def compare_op(series, other, op): + left = np.abs(series) if op in (ops.rpow, operator.pow) else series + right = np.abs(other) if op in (ops.rpow, operator.pow) else other + + cython_or_numpy = op(left, right) + python = left.combine(right, op) + if isinstance(other, Series) and not other.index.equals(series.index): + python.index = python.index._with_freq(None) + tm.assert_series_equal(cython_or_numpy, python) + + +# TODO: remove this kludge once mypy stops giving false positives here +# List comprehension has incompatible type List[PandasObject]; expected List[RangeIndex] +# See GH#29725 +_ldtypes = ["i1", "i2", "i4", "i8", "u1", "u2", "u4", "u8", "f2", "f4", "f8"] +lefts: list[Index | Series] = [RangeIndex(10, 40, 10)] +lefts.extend([Series([10, 20, 30], dtype=dtype) for dtype in _ldtypes]) +lefts.extend([Index([10, 20, 30], dtype=dtype) for dtype in _ldtypes if dtype != "f2"]) + +# ------------------------------------------------------------------ +# Comparisons + + +class TestNumericComparisons: + def test_operator_series_comparison_zerorank(self): + # GH#13006 + result = np.float64(0) > Series([1, 2, 3]) + expected = 0.0 > Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + result = Series([1, 2, 3]) < np.float64(0) + expected = Series([1, 2, 3]) < 0.0 + tm.assert_series_equal(result, expected) + result = np.array([0, 1, 2])[0] > Series([0, 1, 2]) + expected = 0.0 > Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + + def test_df_numeric_cmp_dt64_raises(self, box_with_array, fixed_now_ts): + # GH#8932, GH#22163 + ts = fixed_now_ts + obj = np.array(range(5)) + obj = tm.box_expected(obj, box_with_array) + + assert_invalid_comparison(obj, ts, box_with_array) + + def test_compare_invalid(self): + # GH#8058 + # ops testing + a = Series(np.random.default_rng(2).standard_normal(5), name=0) + b = Series(np.random.default_rng(2).standard_normal(5)) + b.name = pd.Timestamp("2000-01-01") + tm.assert_series_equal(a / b, 1 / (b / a)) + + def test_numeric_cmp_string_numexpr_path(self, box_with_array, monkeypatch): + # GH#36377, GH#35700 + box = box_with_array + xbox = box if box is not Index else np.ndarray + + obj = Series(np.random.default_rng(2).standard_normal(51)) + obj = tm.box_expected(obj, box, transpose=False) + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 50) + result = obj == "a" + + expected = Series(np.zeros(51, dtype=bool)) + expected = tm.box_expected(expected, xbox, transpose=False) + tm.assert_equal(result, expected) + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 50) + result = obj != "a" + tm.assert_equal(result, ~expected) + + msg = "Invalid comparison between dtype=float64 and str" + with pytest.raises(TypeError, match=msg): + obj < "a" + + +# ------------------------------------------------------------------ +# Numeric dtypes Arithmetic with Datetime/Timedelta Scalar + + +class TestNumericArraylikeArithmeticWithDatetimeLike: + @pytest.mark.parametrize("box_cls", [np.array, Index, Series]) + @pytest.mark.parametrize( + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) + ) + def test_mul_td64arr(self, left, box_cls): + # GH#22390 + right = np.array([1, 2, 3], dtype="m8[s]") + right = box_cls(right) + + expected = TimedeltaIndex(["10s", "40s", "90s"], dtype=right.dtype) + + if isinstance(left, Series) or box_cls is Series: + expected = Series(expected) + assert expected.dtype == right.dtype + + result = left * right + tm.assert_equal(result, expected) + + result = right * left + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("box_cls", [np.array, Index, Series]) + @pytest.mark.parametrize( + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) + ) + def test_div_td64arr(self, left, box_cls): + # GH#22390 + right = np.array([10, 40, 90], dtype="m8[s]") + right = box_cls(right) + + expected = TimedeltaIndex(["1s", "2s", "3s"], dtype=right.dtype) + if isinstance(left, Series) or box_cls is Series: + expected = Series(expected) + assert expected.dtype == right.dtype + + result = right / left + tm.assert_equal(result, expected) + + result = right // left + tm.assert_equal(result, expected) + + # (true_) needed for min-versions build 2022-12-26 + msg = "ufunc '(true_)?divide' cannot use operands with types" + with pytest.raises(TypeError, match=msg): + left / right + + msg = "ufunc 'floor_divide' cannot use operands with types" + with pytest.raises(TypeError, match=msg): + left // right + + # TODO: also test Tick objects; + # see test_numeric_arr_rdiv_tdscalar for note on these failing + @pytest.mark.parametrize( + "scalar_td", + [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta(), + Timedelta(days=1).to_timedelta64().astype("timedelta64[s]"), + Timedelta(days=1).to_timedelta64().astype("timedelta64[ms]"), + ], + ids=lambda x: type(x).__name__, + ) + def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array): + # GH#19333 + box = box_with_array + index = numeric_idx + expected = TimedeltaIndex([Timedelta(days=n) for n in range(len(index))]) + if isinstance(scalar_td, np.timedelta64): + dtype = scalar_td.dtype + expected = expected.astype(dtype) + elif type(scalar_td) is timedelta: + expected = expected.astype("m8[us]") + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = index * scalar_td + tm.assert_equal(result, expected) + + commute = scalar_td * index + tm.assert_equal(commute, expected) + + @pytest.mark.parametrize( + "scalar_td", + [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta(), + ], + ids=lambda x: type(x).__name__, + ) + @pytest.mark.parametrize("dtype", [np.int64, np.float64]) + def test_numeric_arr_mul_tdscalar_numexpr_path( + self, dtype, scalar_td, box_with_array + ): + # GH#44772 for the float64 case + box = box_with_array + + arr_i8 = np.arange(2 * 10**4).astype(np.int64, copy=False) + arr = arr_i8.astype(dtype, copy=False) + obj = tm.box_expected(arr, box, transpose=False) + + expected = arr_i8.view("timedelta64[D]").astype("timedelta64[ns]") + if type(scalar_td) is timedelta: + expected = expected.astype("timedelta64[us]") + + expected = tm.box_expected(expected, box, transpose=False) + + result = obj * scalar_td + tm.assert_equal(result, expected) + + result = scalar_td * obj + tm.assert_equal(result, expected) + + def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array): + box = box_with_array + + index = numeric_idx[1:3] + + expected = TimedeltaIndex(["3 Days", "36 Hours"]) + if isinstance(three_days, np.timedelta64): + dtype = three_days.dtype + if dtype < np.dtype("m8[s]"): + # i.e. resolution is lower -> use lowest supported resolution + dtype = np.dtype("m8[s]") + expected = expected.astype(dtype) + elif type(three_days) is timedelta: + expected = expected.astype("m8[us]") + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = three_days / index + tm.assert_equal(result, expected) + + msg = "cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + index / three_days + + @pytest.mark.parametrize( + "other", + [ + Timedelta(hours=31), + Timedelta(hours=31).to_pytimedelta(), + Timedelta(hours=31).to_timedelta64(), + Timedelta(hours=31).to_timedelta64().astype("m8[h]"), + np.timedelta64("NaT"), + np.timedelta64("NaT", "D"), + pd.offsets.Minute(3), + pd.offsets.Second(0), + # GH#28080 numeric+datetimelike should raise; Timestamp used + # to raise NullFrequencyError but that behavior was removed in 1.0 + pd.Timestamp("2021-01-01", tz="Asia/Tokyo"), + pd.Timestamp("2021-01-01"), + pd.Timestamp("2021-01-01").to_pydatetime(), + pd.Timestamp("2021-01-01", tz="UTC").to_pydatetime(), + pd.Timestamp("2021-01-01").to_datetime64(), + np.datetime64("NaT", "ns"), + pd.NaT, + ], + ids=repr, + ) + def test_add_sub_datetimedeltalike_invalid( + self, numeric_idx, other, box_with_array + ): + box = box_with_array + + left = tm.box_expected(numeric_idx, box) + msg = "|".join( + [ + "unsupported operand type", + "Addition/subtraction of integers and integer-arrays", + "Instead of adding/subtracting", + "cannot use operands with types dtype", + "Concatenation operation is not implemented for NumPy arrays", + "Cannot (add|subtract) NaT (to|from) ndarray", + # pd.array vs np.datetime64 case + r"operand type\(s\) all returned NotImplemented from __array_ufunc__", + "can only perform ops with numeric values", + "cannot subtract DatetimeArray from ndarray", + # pd.Timedelta(1) + Index([0, 1, 2]) + "Cannot add or subtract Timedelta from integers", + ] + ) + assert_invalid_addsub_type(left, other, msg) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestDivisionByZero: + def test_div_zero(self, zero, numeric_idx): + idx = numeric_idx + + expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + # We only adjust for Index, because Series does not yet apply + # the adjustment correctly. + expected2 = adjust_negative_zero(zero, expected) + + result = idx / zero + tm.assert_index_equal(result, expected2) + ser_compat = Series(idx).astype("i8") / np.array(zero).astype("i8") + tm.assert_series_equal(ser_compat, Series(expected)) + + def test_floordiv_zero(self, zero, numeric_idx): + idx = numeric_idx + + expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + # We only adjust for Index, because Series does not yet apply + # the adjustment correctly. + expected2 = adjust_negative_zero(zero, expected) + + result = idx // zero + tm.assert_index_equal(result, expected2) + ser_compat = Series(idx).astype("i8") // np.array(zero).astype("i8") + tm.assert_series_equal(ser_compat, Series(expected)) + + def test_mod_zero(self, zero, numeric_idx): + idx = numeric_idx + + expected = Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64) + result = idx % zero + tm.assert_index_equal(result, expected) + ser_compat = Series(idx).astype("i8") % np.array(zero).astype("i8") + tm.assert_series_equal(ser_compat, Series(result)) + + def test_divmod_zero(self, zero, numeric_idx): + idx = numeric_idx + + exleft = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + exright = Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64) + exleft = adjust_negative_zero(zero, exleft) + + result = divmod(idx, zero) + tm.assert_index_equal(result[0], exleft) + tm.assert_index_equal(result[1], exright) + + @pytest.mark.parametrize("op", [operator.truediv, operator.floordiv]) + def test_div_negative_zero(self, zero, numeric_idx, op): + # Check that -1 / -0.0 returns np.inf, not -np.inf + if numeric_idx.dtype == np.uint64: + pytest.skip(f"Not relevant for {numeric_idx.dtype}") + idx = numeric_idx - 3 + + expected = Index([-np.inf, -np.inf, -np.inf, np.nan, np.inf], dtype=np.float64) + expected = adjust_negative_zero(zero, expected) + + result = op(idx, zero) + tm.assert_index_equal(result, expected) + + # ------------------------------------------------------------------ + + @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) + def test_ser_div_ser( + self, + switch_numexpr_min_elements, + dtype1, + any_real_numpy_dtype, + ): + # no longer do integer div for any ops, but deal with the 0's + dtype2 = any_real_numpy_dtype + + first = Series([3, 4, 5, 8], name="first").astype(dtype1) + second = Series([0, 0, 0, 3], name="second").astype(dtype2) + + with np.errstate(all="ignore"): + expected = Series( + first.values.astype(np.float64) / second.values, + dtype="float64", + name=None, + ) + expected.iloc[0:3] = np.inf + if first.dtype == "int64" and second.dtype == "float32": + # when using numexpr, the casting rules are slightly different + # and int64/float32 combo results in float32 instead of float64 + if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0: + expected = expected.astype("float32") + + result = first / second + tm.assert_series_equal(result, expected) + assert not result.equals(second / first) + + @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) + def test_ser_divmod_zero(self, dtype1, any_real_numpy_dtype): + # GH#26987 + dtype2 = any_real_numpy_dtype + left = Series([1, 1]).astype(dtype1) + right = Series([0, 2]).astype(dtype2) + + # GH#27321 pandas convention is to set 1 // 0 to np.inf, as opposed + # to numpy which sets to np.nan; patch `expected[0]` below + expected = left // right, left % right + expected = list(expected) + expected[0] = expected[0].astype(np.float64) + expected[0][0] = np.inf + result = divmod(left, right) + + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + # rdivmod case + result = divmod(left.values, right) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + def test_ser_divmod_inf(self): + left = Series([np.inf, 1.0]) + right = Series([np.inf, 2.0]) + + expected = left // right, left % right + result = divmod(left, right) + + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + # rdivmod case + result = divmod(left.values, right) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + def test_rdiv_zero_compat(self): + # GH#8674 + zero_array = np.array([0] * 5) + data = np.random.default_rng(2).standard_normal(5) + expected = Series([0.0] * 5) + + result = zero_array / Series(data) + tm.assert_series_equal(result, expected) + + result = Series(zero_array) / data + tm.assert_series_equal(result, expected) + + result = Series(zero_array) / Series(data) + tm.assert_series_equal(result, expected) + + def test_div_zero_inf_signs(self): + # GH#9144, inf signing + ser = Series([-1, 0, 1], name="first") + expected = Series([-np.inf, np.nan, np.inf], name="first") + + result = ser / 0 + tm.assert_series_equal(result, expected) + + def test_rdiv_zero(self): + # GH#9144 + ser = Series([-1, 0, 1], name="first") + expected = Series([0.0, np.nan, 0.0], name="first") + + result = 0 / ser + tm.assert_series_equal(result, expected) + + def test_floordiv_div(self): + # GH#9144 + ser = Series([-1, 0, 1], name="first") + + result = ser // 0 + expected = Series([-np.inf, np.nan, np.inf], name="first") + tm.assert_series_equal(result, expected) + + def test_df_div_zero_df(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + result = df / df + + first = Series([1.0, 1.0, 1.0, 1.0]) + second = Series([np.nan, np.nan, np.nan, 1]) + expected = pd.DataFrame({"first": first, "second": second}) + tm.assert_frame_equal(result, expected) + + def test_df_div_zero_array(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + first = Series([1.0, 1.0, 1.0, 1.0]) + second = Series([np.nan, np.nan, np.nan, 1]) + expected = pd.DataFrame({"first": first, "second": second}) + + with np.errstate(all="ignore"): + arr = df.values.astype("float") / df.values + result = pd.DataFrame(arr, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + def test_df_div_zero_int(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + result = df / 0 + expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns) + expected.iloc[0:3, 1] = np.nan + tm.assert_frame_equal(result, expected) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all="ignore"): + arr = df.values.astype("float64") / 0 + result2 = pd.DataFrame(arr, index=df.index, columns=df.columns) + tm.assert_frame_equal(result2, expected) + + def test_df_div_zero_series_does_not_commute(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + ser = df[0] + res = ser / df + res2 = df / ser + assert not res.fillna(0).equals(res2.fillna(0)) + + # ------------------------------------------------------------------ + # Mod By Zero + + def test_df_mod_zero_df(self, using_array_manager): + # GH#3590, modulo as ints + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + # this is technically wrong, as the integer portion is coerced to float + first = Series([0, 0, 0, 0]) + if not using_array_manager: + # INFO(ArrayManager) BlockManager doesn't preserve dtype per column + # while ArrayManager performs op column-wisedoes and thus preserves + # dtype if possible + first = first.astype("float64") + second = Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({"first": first, "second": second}) + result = df % df + tm.assert_frame_equal(result, expected) + + # GH#38939 If we dont pass copy=False, df is consolidated and + # result["first"] is float64 instead of int64 + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}, copy=False) + first = Series([0, 0, 0, 0], dtype="int64") + second = Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({"first": first, "second": second}) + result = df % df + tm.assert_frame_equal(result, expected) + + def test_df_mod_zero_array(self): + # GH#3590, modulo as ints + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + # this is technically wrong, as the integer portion is coerced to float + # ### + first = Series([0, 0, 0, 0], dtype="float64") + second = Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({"first": first, "second": second}) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all="ignore"): + arr = df.values % df.values + result2 = pd.DataFrame(arr, index=df.index, columns=df.columns, dtype="float64") + result2.iloc[0:3, 1] = np.nan + tm.assert_frame_equal(result2, expected) + + def test_df_mod_zero_int(self): + # GH#3590, modulo as ints + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + result = df % 0 + expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all="ignore"): + arr = df.values.astype("float64") % 0 + result2 = pd.DataFrame(arr, index=df.index, columns=df.columns) + tm.assert_frame_equal(result2, expected) + + def test_df_mod_zero_series_does_not_commute(self): + # GH#3590, modulo as ints + # not commutative with series + df = pd.DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + ser = df[0] + res = ser % df + res2 = df % ser + assert not res.fillna(0).equals(res2.fillna(0)) + + +class TestMultiplicationDivision: + # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ + # for non-timestamp/timedelta/period dtypes + + def test_divide_decimal(self, box_with_array): + # resolves issue GH#9787 + box = box_with_array + ser = Series([Decimal(10)]) + expected = Series([Decimal(5)]) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = ser / Decimal(2) + + tm.assert_equal(result, expected) + + result = ser // Decimal(2) + tm.assert_equal(result, expected) + + def test_div_equiv_binop(self): + # Test Series.div as well as Series.__div__ + # float/integer issue + # GH#7785 + first = Series([1, 0], name="first") + second = Series([-0.01, -0.02], name="second") + expected = Series([-0.01, -np.inf]) + + result = second.div(first) + tm.assert_series_equal(result, expected, check_names=False) + + result = second / first + tm.assert_series_equal(result, expected) + + def test_div_int(self, numeric_idx): + idx = numeric_idx + result = idx / 1 + expected = idx.astype("float64") + tm.assert_index_equal(result, expected) + + result = idx / 2 + expected = Index(idx.values / 2) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("op", [operator.mul, ops.rmul, operator.floordiv]) + def test_mul_int_identity(self, op, numeric_idx, box_with_array): + idx = numeric_idx + idx = tm.box_expected(idx, box_with_array) + + result = op(idx, 1) + tm.assert_equal(result, idx) + + def test_mul_int_array(self, numeric_idx): + idx = numeric_idx + didx = idx * idx + + result = idx * np.array(5, dtype="int64") + tm.assert_index_equal(result, idx * 5) + + arr_dtype = "uint64" if idx.dtype == np.uint64 else "int64" + result = idx * np.arange(5, dtype=arr_dtype) + tm.assert_index_equal(result, didx) + + def test_mul_int_series(self, numeric_idx): + idx = numeric_idx + didx = idx * idx + + arr_dtype = "uint64" if idx.dtype == np.uint64 else "int64" + result = idx * Series(np.arange(5, dtype=arr_dtype)) + tm.assert_series_equal(result, Series(didx)) + + def test_mul_float_series(self, numeric_idx): + idx = numeric_idx + rng5 = np.arange(5, dtype="float64") + + result = idx * Series(rng5 + 0.1) + expected = Series(rng5 * (rng5 + 0.1)) + tm.assert_series_equal(result, expected) + + def test_mul_index(self, numeric_idx): + idx = numeric_idx + + result = idx * idx + tm.assert_index_equal(result, idx**2) + + def test_mul_datelike_raises(self, numeric_idx): + idx = numeric_idx + msg = "cannot perform __rmul__ with this index type" + with pytest.raises(TypeError, match=msg): + idx * pd.date_range("20130101", periods=5) + + def test_mul_size_mismatch_raises(self, numeric_idx): + idx = numeric_idx + msg = "operands could not be broadcast together" + with pytest.raises(ValueError, match=msg): + idx * idx[0:3] + with pytest.raises(ValueError, match=msg): + idx * np.array([1, 2]) + + @pytest.mark.parametrize("op", [operator.pow, ops.rpow]) + def test_pow_float(self, op, numeric_idx, box_with_array): + # test power calculations both ways, GH#14973 + box = box_with_array + idx = numeric_idx + expected = Index(op(idx.values, 2.0)) + + idx = tm.box_expected(idx, box) + expected = tm.box_expected(expected, box) + + result = op(idx, 2.0) + tm.assert_equal(result, expected) + + def test_modulo(self, numeric_idx, box_with_array): + # GH#9244 + box = box_with_array + idx = numeric_idx + expected = Index(idx.values % 2) + + idx = tm.box_expected(idx, box) + expected = tm.box_expected(expected, box) + + result = idx % 2 + tm.assert_equal(result, expected) + + def test_divmod_scalar(self, numeric_idx): + idx = numeric_idx + + result = divmod(idx, 2) + with np.errstate(all="ignore"): + div, mod = divmod(idx.values, 2) + + expected = Index(div), Index(mod) + for r, e in zip(result, expected): + tm.assert_index_equal(r, e) + + def test_divmod_ndarray(self, numeric_idx): + idx = numeric_idx + other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2 + + result = divmod(idx, other) + with np.errstate(all="ignore"): + div, mod = divmod(idx.values, other) + + expected = Index(div), Index(mod) + for r, e in zip(result, expected): + tm.assert_index_equal(r, e) + + def test_divmod_series(self, numeric_idx): + idx = numeric_idx + other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2 + + result = divmod(idx, Series(other)) + with np.errstate(all="ignore"): + div, mod = divmod(idx.values, other) + + expected = Series(div), Series(mod) + for r, e in zip(result, expected): + tm.assert_series_equal(r, e) + + @pytest.mark.parametrize("other", [np.nan, 7, -23, 2.718, -3.14, np.inf]) + def test_ops_np_scalar(self, other): + vals = np.random.default_rng(2).standard_normal((5, 3)) + f = lambda x: pd.DataFrame( + x, index=list("ABCDE"), columns=["jim", "joe", "jolie"] + ) + + df = f(vals) + + tm.assert_frame_equal(df / np.array(other), f(vals / other)) + tm.assert_frame_equal(np.array(other) * df, f(vals * other)) + tm.assert_frame_equal(df + np.array(other), f(vals + other)) + tm.assert_frame_equal(np.array(other) - df, f(other - vals)) + + # TODO: This came from series.test.test_operators, needs cleanup + def test_operators_frame(self): + # rpow does not work with DataFrame + ts = tm.makeTimeSeries() + ts.name = "ts" + + df = pd.DataFrame({"A": ts}) + + tm.assert_series_equal(ts + ts, ts + df["A"], check_names=False) + tm.assert_series_equal(ts**ts, ts ** df["A"], check_names=False) + tm.assert_series_equal(ts < ts, ts < df["A"], check_names=False) + tm.assert_series_equal(ts / ts, ts / df["A"], check_names=False) + + # TODO: this came from tests.series.test_analytics, needs cleanup and + # de-duplication with test_modulo above + def test_modulo2(self): + with np.errstate(all="ignore"): + # GH#3590, modulo as ints + p = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + result = p["first"] % p["second"] + expected = Series(p["first"].values % p["second"].values, dtype="float64") + expected.iloc[0:3] = np.nan + tm.assert_series_equal(result, expected) + + result = p["first"] % 0 + expected = Series(np.nan, index=p.index, name="first") + tm.assert_series_equal(result, expected) + + p = p.astype("float64") + result = p["first"] % p["second"] + expected = Series(p["first"].values % p["second"].values) + tm.assert_series_equal(result, expected) + + p = p.astype("float64") + result = p["first"] % p["second"] + result2 = p["second"] % p["first"] + assert not result.equals(result2) + + def test_modulo_zero_int(self): + # GH#9144 + with np.errstate(all="ignore"): + s = Series([0, 1]) + + result = s % 0 + expected = Series([np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + result = 0 % s + expected = Series([np.nan, 0.0]) + tm.assert_series_equal(result, expected) + + +class TestAdditionSubtraction: + # __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__ + # for non-timestamp/timedelta/period dtypes + + @pytest.mark.parametrize( + "first, second, expected", + [ + ( + Series([1, 2, 3], index=list("ABC"), name="x"), + Series([2, 2, 2], index=list("ABD"), name="x"), + Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x"), + ), + ( + Series([1, 2, 3], index=list("ABC"), name="x"), + Series([2, 2, 2, 2], index=list("ABCD"), name="x"), + Series([3, 4, 5, np.nan], index=list("ABCD"), name="x"), + ), + ], + ) + def test_add_series(self, first, second, expected): + # GH#1134 + tm.assert_series_equal(first + second, expected) + tm.assert_series_equal(second + first, expected) + + @pytest.mark.parametrize( + "first, second, expected", + [ + ( + pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), + pd.DataFrame({"x": [2, 2, 2]}, index=list("ABD")), + pd.DataFrame({"x": [3.0, 4.0, np.nan, np.nan]}, index=list("ABCD")), + ), + ( + pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), + pd.DataFrame({"x": [2, 2, 2, 2]}, index=list("ABCD")), + pd.DataFrame({"x": [3, 4, 5, np.nan]}, index=list("ABCD")), + ), + ], + ) + def test_add_frames(self, first, second, expected): + # GH#1134 + tm.assert_frame_equal(first + second, expected) + tm.assert_frame_equal(second + first, expected) + + # TODO: This came from series.test.test_operators, needs cleanup + def test_series_frame_radd_bug(self, fixed_now_ts): + # GH#353 + vals = Series(tm.makeStringIndex()) + result = "foo_" + vals + expected = vals.map(lambda x: "foo_" + x) + tm.assert_series_equal(result, expected) + + frame = pd.DataFrame({"vals": vals}) + result = "foo_" + frame + expected = pd.DataFrame({"vals": vals.map(lambda x: "foo_" + x)}) + tm.assert_frame_equal(result, expected) + + ts = tm.makeTimeSeries() + ts.name = "ts" + + # really raise this time + fix_now = fixed_now_ts.to_pydatetime() + msg = "|".join( + [ + "unsupported operand type", + # wrong error message, see https://github.com/numpy/numpy/issues/18832 + "Concatenation operation", + ] + ) + with pytest.raises(TypeError, match=msg): + fix_now + ts + + with pytest.raises(TypeError, match=msg): + ts + fix_now + + # TODO: This came from series.test.test_operators, needs cleanup + def test_datetime64_with_index(self): + # arithmetic integer ops with an index + ser = Series(np.random.default_rng(2).standard_normal(5)) + expected = ser - ser.index.to_series() + result = ser - ser.index + tm.assert_series_equal(result, expected) + + # GH#4629 + # arithmetic datetime64 ops with an index + ser = Series( + pd.date_range("20130101", periods=5), + index=pd.date_range("20130101", periods=5), + ) + expected = ser - ser.index.to_series() + result = ser - ser.index + tm.assert_series_equal(result, expected) + + msg = "cannot subtract PeriodArray from DatetimeArray" + with pytest.raises(TypeError, match=msg): + # GH#18850 + result = ser - ser.index.to_period() + + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), + index=pd.date_range("20130101", periods=5), + ) + df["date"] = pd.Timestamp("20130102") + df["expected"] = df["date"] - df.index.to_series() + df["result"] = df["date"] - df.index + tm.assert_series_equal(df["result"], df["expected"], check_names=False) + + # TODO: taken from tests.frame.test_operators, needs cleanup + def test_frame_operators(self, float_frame): + frame = float_frame + + garbage = np.random.default_rng(2).random(4) + colSeries = Series(garbage, index=np.array(frame.columns)) + + idSum = frame + frame + seriesSum = frame + colSeries + + for col, series in idSum.items(): + for idx, val in series.items(): + origVal = frame[col][idx] * 2 + if not np.isnan(val): + assert val == origVal + else: + assert np.isnan(origVal) + + for col, series in seriesSum.items(): + for idx, val in series.items(): + origVal = frame[col][idx] + colSeries[col] + if not np.isnan(val): + assert val == origVal + else: + assert np.isnan(origVal) + + def test_frame_operators_col_align(self, float_frame): + frame2 = pd.DataFrame(float_frame, columns=["D", "C", "B", "A"]) + added = frame2 + frame2 + expected = frame2 * 2 + tm.assert_frame_equal(added, expected) + + def test_frame_operators_none_to_nan(self): + df = pd.DataFrame({"a": ["a", None, "b"]}) + tm.assert_frame_equal(df + df, pd.DataFrame({"a": ["aa", np.nan, "bb"]})) + + @pytest.mark.parametrize("dtype", ("float", "int64")) + def test_frame_operators_empty_like(self, dtype): + # Test for issue #10181 + frames = [ + pd.DataFrame(dtype=dtype), + pd.DataFrame(columns=["A"], dtype=dtype), + pd.DataFrame(index=[0], dtype=dtype), + ] + for df in frames: + assert (df + df).equals(df) + tm.assert_frame_equal(df + df, df) + + @pytest.mark.parametrize( + "func", + [lambda x: x * 2, lambda x: x[::2], lambda x: 5], + ids=["multiply", "slice", "constant"], + ) + def test_series_operators_arithmetic(self, all_arithmetic_functions, func): + op = all_arithmetic_functions + series = tm.makeTimeSeries().rename("ts") + other = func(series) + compare_op(series, other, op) + + @pytest.mark.parametrize( + "func", [lambda x: x + 1, lambda x: 5], ids=["add", "constant"] + ) + def test_series_operators_compare(self, comparison_op, func): + op = comparison_op + series = tm.makeTimeSeries().rename("ts") + other = func(series) + compare_op(series, other, op) + + @pytest.mark.parametrize( + "func", + [lambda x: x * 2, lambda x: x[::2], lambda x: 5], + ids=["multiply", "slice", "constant"], + ) + def test_divmod(self, func): + series = tm.makeTimeSeries().rename("ts") + other = func(series) + results = divmod(series, other) + if isinstance(other, abc.Iterable) and len(series) != len(other): + # if the lengths don't match, this is the test where we use + # `tser[::2]`. Pad every other value in `other_np` with nan. + other_np = [] + for n in other: + other_np.append(n) + other_np.append(np.nan) + else: + other_np = other + other_np = np.asarray(other_np) + with np.errstate(all="ignore"): + expecteds = divmod(series.values, np.asarray(other_np)) + + for result, expected in zip(results, expecteds): + # check the values, name, and index separately + tm.assert_almost_equal(np.asarray(result), expected) + + assert result.name == series.name + tm.assert_index_equal(result.index, series.index._with_freq(None)) + + def test_series_divmod_zero(self): + # Check that divmod uses pandas convention for division by zero, + # which does not match numpy. + # pandas convention has + # 1/0 == np.inf + # -1/0 == -np.inf + # 1/-0.0 == -np.inf + # -1/-0.0 == np.inf + tser = tm.makeTimeSeries().rename("ts") + other = tser * 0 + + result = divmod(tser, other) + exp1 = Series([np.inf] * len(tser), index=tser.index, name="ts") + exp2 = Series([np.nan] * len(tser), index=tser.index, name="ts") + tm.assert_series_equal(result[0], exp1) + tm.assert_series_equal(result[1], exp2) + + +class TestUFuncCompat: + # TODO: add more dtypes + @pytest.mark.parametrize("holder", [Index, RangeIndex, Series]) + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_ufunc_compat(self, holder, dtype): + box = Series if holder is Series else Index + + if holder is RangeIndex: + if dtype != np.int64: + pytest.skip(f"dtype {dtype} not relevant for RangeIndex") + idx = RangeIndex(0, 5, name="foo") + else: + idx = holder(np.arange(5, dtype=dtype), name="foo") + result = np.sin(idx) + expected = box(np.sin(np.arange(5, dtype=dtype)), name="foo") + tm.assert_equal(result, expected) + + # TODO: add more dtypes + @pytest.mark.parametrize("holder", [Index, Series]) + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_ufunc_coercions(self, holder, dtype): + idx = holder([1, 2, 3, 4, 5], dtype=dtype, name="x") + box = Series if holder is Series else Index + + result = np.sqrt(idx) + assert result.dtype == "f8" and isinstance(result, box) + exp = Index(np.sqrt(np.array([1, 2, 3, 4, 5], dtype=np.float64)), name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = np.divide(idx, 2.0) + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + # _evaluate_numeric_binop + result = idx + 2.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([3.0, 4.0, 5.0, 6.0, 7.0], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = idx - 2.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([-1.0, 0.0, 1.0, 2.0, 3.0], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = idx * 1.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = idx / 2.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + # TODO: add more dtypes + @pytest.mark.parametrize("holder", [Index, Series]) + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_ufunc_multiple_return_values(self, holder, dtype): + obj = holder([1, 2, 3], dtype=dtype, name="x") + box = Series if holder is Series else Index + + result = np.modf(obj) + assert isinstance(result, tuple) + exp1 = Index([0.0, 0.0, 0.0], dtype=np.float64, name="x") + exp2 = Index([1.0, 2.0, 3.0], dtype=np.float64, name="x") + tm.assert_equal(result[0], tm.box_expected(exp1, box)) + tm.assert_equal(result[1], tm.box_expected(exp2, box)) + + def test_ufunc_at(self): + s = Series([0, 1, 2], index=[1, 2, 3], name="x") + np.add.at(s, [0, 2], 10) + expected = Series([10, 1, 12], index=[1, 2, 3], name="x") + tm.assert_series_equal(s, expected) + + +class TestObjectDtypeEquivalence: + # Tests that arithmetic operations match operations executed elementwise + + @pytest.mark.parametrize("dtype", [None, object]) + def test_numarr_with_dtype_add_nan(self, dtype, box_with_array): + box = box_with_array + ser = Series([1, 2, 3], dtype=dtype) + expected = Series([np.nan, np.nan, np.nan], dtype=dtype) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = np.nan + ser + tm.assert_equal(result, expected) + + result = ser + np.nan + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_numarr_with_dtype_add_int(self, dtype, box_with_array): + box = box_with_array + ser = Series([1, 2, 3], dtype=dtype) + expected = Series([2, 3, 4], dtype=dtype) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = 1 + ser + tm.assert_equal(result, expected) + + result = ser + 1 + tm.assert_equal(result, expected) + + # TODO: moved from tests.series.test_operators; needs cleanup + @pytest.mark.parametrize( + "op", + [operator.add, operator.sub, operator.mul, operator.truediv, operator.floordiv], + ) + def test_operators_reverse_object(self, op): + # GH#56 + arr = Series( + np.random.default_rng(2).standard_normal(10), + index=np.arange(10), + dtype=object, + ) + + result = op(1.0, arr) + expected = op(1.0, arr.astype(float)) + tm.assert_series_equal(result.astype(float), expected) + + +class TestNumericArithmeticUnsorted: + # Tests in this class have been moved from type-specific test modules + # but not yet sorted, parametrized, and de-duplicated + @pytest.mark.parametrize( + "op", + [ + operator.add, + operator.sub, + operator.mul, + operator.floordiv, + operator.truediv, + ], + ) + @pytest.mark.parametrize( + "idx1", + [ + RangeIndex(0, 10, 1), + RangeIndex(0, 20, 2), + RangeIndex(-10, 10, 2), + RangeIndex(5, -5, -1), + ], + ) + @pytest.mark.parametrize( + "idx2", + [ + RangeIndex(0, 10, 1), + RangeIndex(0, 20, 2), + RangeIndex(-10, 10, 2), + RangeIndex(5, -5, -1), + ], + ) + def test_binops_index(self, op, idx1, idx2): + idx1 = idx1._rename("foo") + idx2 = idx2._rename("bar") + result = op(idx1, idx2) + expected = op(Index(idx1.to_numpy()), Index(idx2.to_numpy())) + tm.assert_index_equal(result, expected, exact="equiv") + + @pytest.mark.parametrize( + "op", + [ + operator.add, + operator.sub, + operator.mul, + operator.floordiv, + operator.truediv, + ], + ) + @pytest.mark.parametrize( + "idx", + [ + RangeIndex(0, 10, 1), + RangeIndex(0, 20, 2), + RangeIndex(-10, 10, 2), + RangeIndex(5, -5, -1), + ], + ) + @pytest.mark.parametrize("scalar", [-1, 1, 2]) + def test_binops_index_scalar(self, op, idx, scalar): + result = op(idx, scalar) + expected = op(Index(idx.to_numpy()), scalar) + tm.assert_index_equal(result, expected, exact="equiv") + + @pytest.mark.parametrize("idx1", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)]) + @pytest.mark.parametrize("idx2", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)]) + def test_binops_index_pow(self, idx1, idx2): + # numpy does not allow powers of negative integers so test separately + # https://github.com/numpy/numpy/pull/8127 + idx1 = idx1._rename("foo") + idx2 = idx2._rename("bar") + result = pow(idx1, idx2) + expected = pow(Index(idx1.to_numpy()), Index(idx2.to_numpy())) + tm.assert_index_equal(result, expected, exact="equiv") + + @pytest.mark.parametrize("idx", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)]) + @pytest.mark.parametrize("scalar", [1, 2]) + def test_binops_index_scalar_pow(self, idx, scalar): + # numpy does not allow powers of negative integers so test separately + # https://github.com/numpy/numpy/pull/8127 + result = pow(idx, scalar) + expected = pow(Index(idx.to_numpy()), scalar) + tm.assert_index_equal(result, expected, exact="equiv") + + # TODO: divmod? + @pytest.mark.parametrize( + "op", + [ + operator.add, + operator.sub, + operator.mul, + operator.floordiv, + operator.truediv, + operator.pow, + operator.mod, + ], + ) + def test_arithmetic_with_frame_or_series(self, op): + # check that we return NotImplemented when operating with Series + # or DataFrame + index = RangeIndex(5) + other = Series(np.random.default_rng(2).standard_normal(5)) + + expected = op(Series(index), other) + result = op(index, other) + tm.assert_series_equal(result, expected) + + other = pd.DataFrame(np.random.default_rng(2).standard_normal((2, 5))) + expected = op(pd.DataFrame([index, index]), other) + result = op(index, other) + tm.assert_frame_equal(result, expected) + + def test_numeric_compat2(self): + # validate that we are handling the RangeIndex overrides to numeric ops + # and returning RangeIndex where possible + + idx = RangeIndex(0, 10, 2) + + result = idx * 2 + expected = RangeIndex(0, 20, 4) + tm.assert_index_equal(result, expected, exact=True) + + result = idx + 2 + expected = RangeIndex(2, 12, 2) + tm.assert_index_equal(result, expected, exact=True) + + result = idx - 2 + expected = RangeIndex(-2, 8, 2) + tm.assert_index_equal(result, expected, exact=True) + + result = idx / 2 + expected = RangeIndex(0, 5, 1).astype("float64") + tm.assert_index_equal(result, expected, exact=True) + + result = idx / 4 + expected = RangeIndex(0, 10, 2) / 4 + tm.assert_index_equal(result, expected, exact=True) + + result = idx // 1 + expected = idx + tm.assert_index_equal(result, expected, exact=True) + + # __mul__ + result = idx * idx + expected = Index(idx.values * idx.values) + tm.assert_index_equal(result, expected, exact=True) + + # __pow__ + idx = RangeIndex(0, 1000, 2) + result = idx**2 + expected = Index(idx._values) ** 2 + tm.assert_index_equal(Index(result.values), expected, exact=True) + + @pytest.mark.parametrize( + "idx, div, expected", + [ + # TODO: add more dtypes + (RangeIndex(0, 1000, 2), 2, RangeIndex(0, 500, 1)), + (RangeIndex(-99, -201, -3), -3, RangeIndex(33, 67, 1)), + ( + RangeIndex(0, 1000, 1), + 2, + Index(RangeIndex(0, 1000, 1)._values) // 2, + ), + ( + RangeIndex(0, 100, 1), + 2.0, + Index(RangeIndex(0, 100, 1)._values) // 2.0, + ), + (RangeIndex(0), 50, RangeIndex(0)), + (RangeIndex(2, 4, 2), 3, RangeIndex(0, 1, 1)), + (RangeIndex(-5, -10, -6), 4, RangeIndex(-2, -1, 1)), + (RangeIndex(-100, -200, 3), 2, RangeIndex(0)), + ], + ) + def test_numeric_compat2_floordiv(self, idx, div, expected): + # __floordiv__ + tm.assert_index_equal(idx // div, expected, exact=True) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64]) + @pytest.mark.parametrize("delta", [1, 0, -1]) + def test_addsub_arithmetic(self, dtype, delta): + # GH#8142 + delta = dtype(delta) + index = Index([10, 11, 12], dtype=dtype) + result = index + delta + expected = Index(index.values + delta, dtype=dtype) + tm.assert_index_equal(result, expected) + + # this subtraction used to fail + result = index - delta + expected = Index(index.values - delta, dtype=dtype) + tm.assert_index_equal(result, expected) + + tm.assert_index_equal(index + index, 2 * index) + tm.assert_index_equal(index - index, 0 * index) + assert not (index - index).empty + + +def test_fill_value_inf_masking(): + # GH #27464 make sure we mask 0/1 with Inf and not NaN + df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]}) + + other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3]) + + result = df.rfloordiv(other, fill_value=1) + + expected = pd.DataFrame( + {"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]} + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_div_silenced(): + # GH#26793 + pdf1 = pd.DataFrame( + { + "A": np.arange(10), + "B": [np.nan, 1, 2, 3, 4] * 2, + "C": [np.nan] * 10, + "D": np.arange(10), + }, + index=list("abcdefghij"), + columns=list("ABCD"), + ) + pdf2 = pd.DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=list("abcdefghjk"), + columns=list("ABCX"), + ) + with tm.assert_produces_warning(None): + pdf1.div(pdf2, fill_value=0) + + +@pytest.mark.parametrize( + "data, expected_data", + [([0, 1, 2], [0, 2, 4])], +) +def test_integer_array_add_list_like( + box_pandas_1d_array, box_1d_array, data, expected_data +): + # GH22606 Verify operators with IntegerArray and list-likes + arr = array(data, dtype="Int64") + container = box_pandas_1d_array(arr) + left = container + box_1d_array(data) + right = box_1d_array(data) + container + + if Series in [box_1d_array, box_pandas_1d_array]: + cls = Series + elif Index in [box_1d_array, box_pandas_1d_array]: + cls = Index + else: + cls = array + + expected = cls(expected_data, dtype="Int64") + + tm.assert_equal(left, expected) + tm.assert_equal(right, expected) + + +def test_sub_multiindex_swapped_levels(): + # GH 9952 + df = pd.DataFrame( + {"a": np.random.default_rng(2).standard_normal(6)}, + index=pd.MultiIndex.from_product( + [["a", "b"], [0, 1, 2]], names=["levA", "levB"] + ), + ) + df2 = df.copy() + df2.index = df2.index.swaplevel(0, 1) + result = df - df2 + expected = pd.DataFrame([0.0] * 6, columns=["a"], index=df.index) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("power", [1, 2, 5]) +@pytest.mark.parametrize("string_size", [0, 1, 2, 5]) +def test_empty_str_comparison(power, string_size): + # GH 37348 + a = np.array(range(10**power)) + right = pd.DataFrame(a, dtype=np.int64) + left = " " * string_size + + result = right == left + expected = pd.DataFrame(np.zeros(right.shape, dtype=bool)) + tm.assert_frame_equal(result, expected) + + +def test_series_add_sub_with_UInt64(): + # GH 22023 + series1 = Series([1, 2, 3]) + series2 = Series([2, 1, 3], dtype="UInt64") + + result = series1 + series2 + expected = Series([3, 3, 6], dtype="Float64") + tm.assert_series_equal(result, expected) + + result = series1 - series2 + expected = Series([-1, 1, 0], dtype="Float64") + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_object.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_object.py new file mode 100644 index 00000000..76e38500 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_object.py @@ -0,0 +1,406 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for object dtype +import datetime +from decimal import Decimal +import operator + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Series, + Timestamp, + option_context, +) +import pandas._testing as tm +from pandas.core import ops + +# ------------------------------------------------------------------ +# Comparisons + + +class TestObjectComparisons: + def test_comparison_object_numeric_nas(self, comparison_op): + ser = Series(np.random.default_rng(2).standard_normal(10), dtype=object) + shifted = ser.shift(2) + + func = comparison_op + + result = func(ser, shifted) + expected = func(ser.astype(float), shifted.astype(float)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + def test_object_comparisons(self, infer_string): + with option_context("future.infer_string", infer_string): + ser = Series(["a", "b", np.nan, "c", "a"]) + + result = ser == "a" + expected = Series([True, False, False, False, True]) + tm.assert_series_equal(result, expected) + + result = ser < "a" + expected = Series([False, False, False, False, False]) + tm.assert_series_equal(result, expected) + + result = ser != "a" + expected = -(ser == "a") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_more_na_comparisons(self, dtype): + left = Series(["a", np.nan, "c"], dtype=dtype) + right = Series(["a", np.nan, "d"], dtype=dtype) + + result = left == right + expected = Series([True, False, False]) + tm.assert_series_equal(result, expected) + + result = left != right + expected = Series([False, True, True]) + tm.assert_series_equal(result, expected) + + result = left == np.nan + expected = Series([False, False, False]) + tm.assert_series_equal(result, expected) + + result = left != np.nan + expected = Series([True, True, True]) + tm.assert_series_equal(result, expected) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestArithmetic: + def test_add_period_to_array_of_offset(self): + # GH#50162 + per = pd.Period("2012-1-1", freq="D") + pi = pd.period_range("2012-1-1", periods=10, freq="D") + idx = per - pi + + expected = pd.Index([x + per for x in idx], dtype=object) + result = idx + per + tm.assert_index_equal(result, expected) + + result = per + idx + tm.assert_index_equal(result, expected) + + # TODO: parametrize + def test_pow_ops_object(self): + # GH#22922 + # pow is weird with masking & 1, so testing here + a = Series([1, np.nan, 1, np.nan], dtype=object) + b = Series([1, np.nan, np.nan, 1], dtype=object) + result = a**b + expected = Series(a.values**b.values, dtype=object) + tm.assert_series_equal(result, expected) + + result = b**a + expected = Series(b.values**a.values, dtype=object) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + @pytest.mark.parametrize("other", ["category", "Int64"]) + def test_add_extension_scalar(self, other, box_with_array, op): + # GH#22378 + # Check that scalars satisfying is_extension_array_dtype(obj) + # do not incorrectly try to dispatch to an ExtensionArray operation + + arr = Series(["a", "b", "c"]) + expected = Series([op(x, other) for x in arr]) + + arr = tm.box_expected(arr, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = op(arr, other) + tm.assert_equal(result, expected) + + def test_objarr_add_str(self, box_with_array): + ser = Series(["x", np.nan, "x"]) + expected = Series(["xa", np.nan, "xa"]) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = ser + "a" + tm.assert_equal(result, expected) + + def test_objarr_radd_str(self, box_with_array): + ser = Series(["x", np.nan, "x"]) + expected = Series(["ax", np.nan, "ax"]) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = "a" + ser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "data", + [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT], + ["x", "y", 1], + ], + ) + @pytest.mark.parametrize("dtype", [None, object]) + def test_objarr_radd_str_invalid(self, dtype, data, box_with_array): + ser = Series(data, dtype=dtype) + + ser = tm.box_expected(ser, box_with_array) + msg = "|".join( + [ + "can only concatenate str", + "did not contain a loop with signature matching types", + "unsupported operand type", + "must be str", + ] + ) + with pytest.raises(TypeError, match=msg): + "foo_" + ser + + @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub]) + def test_objarr_add_invalid(self, op, box_with_array): + # invalid ops + box = box_with_array + + obj_ser = tm.makeObjectSeries() + obj_ser.name = "objects" + + obj_ser = tm.box_expected(obj_ser, box) + msg = "|".join( + ["can only concatenate str", "unsupported operand type", "must be str"] + ) + with pytest.raises(Exception, match=msg): + op(obj_ser, 1) + with pytest.raises(Exception, match=msg): + op(obj_ser, np.array(1, dtype=np.int64)) + + # TODO: Moved from tests.series.test_operators; needs cleanup + def test_operators_na_handling(self): + ser = Series(["foo", "bar", "baz", np.nan]) + result = "prefix_" + ser + expected = Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan]) + tm.assert_series_equal(result, expected) + + result = ser + "_suffix" + expected = Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan]) + tm.assert_series_equal(result, expected) + + # TODO: parametrize over box + @pytest.mark.parametrize("dtype", [None, object]) + def test_series_with_dtype_radd_timedelta(self, dtype): + # note this test is _not_ aimed at timedelta64-dtyped Series + # as of 2.0 we retain object dtype when ser.dtype == object + ser = Series( + [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")], + dtype=dtype, + ) + expected = Series( + [pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")], + dtype=dtype, + ) + + result = pd.Timedelta("3 days") + ser + tm.assert_series_equal(result, expected) + + result = ser + pd.Timedelta("3 days") + tm.assert_series_equal(result, expected) + + # TODO: cleanup & parametrize over box + def test_mixed_timezone_series_ops_object(self): + # GH#13043 + ser = Series( + [ + Timestamp("2015-01-01", tz="US/Eastern"), + Timestamp("2015-01-01", tz="Asia/Tokyo"), + ], + name="xxx", + ) + assert ser.dtype == object + + exp = Series( + [ + Timestamp("2015-01-02", tz="US/Eastern"), + Timestamp("2015-01-02", tz="Asia/Tokyo"), + ], + name="xxx", + ) + tm.assert_series_equal(ser + pd.Timedelta("1 days"), exp) + tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp) + + # object series & object series + ser2 = Series( + [ + Timestamp("2015-01-03", tz="US/Eastern"), + Timestamp("2015-01-05", tz="Asia/Tokyo"), + ], + name="xxx", + ) + assert ser2.dtype == object + exp = Series( + [pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx", dtype=object + ) + tm.assert_series_equal(ser2 - ser, exp) + tm.assert_series_equal(ser - ser2, -exp) + + ser = Series( + [pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")], + name="xxx", + dtype=object, + ) + assert ser.dtype == object + + exp = Series( + [pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], + name="xxx", + dtype=object, + ) + tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp) + tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp) + + # TODO: cleanup & parametrize over box + def test_iadd_preserves_name(self): + # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name + ser = Series([1, 2, 3]) + ser.index.name = "foo" + + ser.index += 1 + assert ser.index.name == "foo" + + ser.index -= 1 + assert ser.index.name == "foo" + + def test_add_string(self): + # from bug report + index = pd.Index(["a", "b", "c"]) + index2 = index + "foo" + + assert "a" not in index2 + assert "afoo" in index2 + + def test_iadd_string(self): + index = pd.Index(["a", "b", "c"]) + # doesn't fail test unless there is a check before `+=` + assert "a" in index + + index += "_x" + assert "a_x" in index + + def test_add(self): + index = tm.makeStringIndex(100) + expected = pd.Index(index.values * 2) + tm.assert_index_equal(index + index, expected) + tm.assert_index_equal(index + index.tolist(), expected) + tm.assert_index_equal(index.tolist() + index, expected) + + # test add and radd + index = pd.Index(list("abc")) + expected = pd.Index(["a1", "b1", "c1"]) + tm.assert_index_equal(index + "1", expected) + expected = pd.Index(["1a", "1b", "1c"]) + tm.assert_index_equal("1" + index, expected) + + def test_sub_fail(self): + index = tm.makeStringIndex(100) + + msg = "unsupported operand type|Cannot broadcast" + with pytest.raises(TypeError, match=msg): + index - "a" + with pytest.raises(TypeError, match=msg): + index - index + with pytest.raises(TypeError, match=msg): + index - index.tolist() + with pytest.raises(TypeError, match=msg): + index.tolist() - index + + def test_sub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(0), Decimal(1)]) + + result = index - Decimal(1) + tm.assert_index_equal(result, expected) + + result = index - pd.Index([Decimal(1), Decimal(1)]) + tm.assert_index_equal(result, expected) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + index - "foo" + + with pytest.raises(TypeError, match=msg): + index - np.array([2, "foo"], dtype=object) + + def test_rsub_object(self, fixed_now_ts): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(1), Decimal(0)]) + + result = Decimal(2) - index + tm.assert_index_equal(result, expected) + + result = np.array([Decimal(2), Decimal(2)]) - index + tm.assert_index_equal(result, expected) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + "foo" - index + + with pytest.raises(TypeError, match=msg): + np.array([True, fixed_now_ts]) - index + + +class MyIndex(pd.Index): + # Simple index subclass that tracks ops calls. + + _calls: int + + @classmethod + def _simple_new(cls, values, name=None, dtype=None): + result = object.__new__(cls) + result._data = values + result._name = name + result._calls = 0 + result._reset_identity() + + return result + + def __add__(self, other): + self._calls += 1 + return self._simple_new(self._data) + + def __radd__(self, other): + return self.__add__(other) + + +@pytest.mark.parametrize( + "other", + [ + [datetime.timedelta(1), datetime.timedelta(2)], + [datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)], + [pd.Period("2000"), pd.Period("2001")], + ["a", "b"], + ], + ids=["timedelta", "datetime", "period", "object"], +) +def test_index_ops_defer_to_unknown_subclasses(other): + # https://github.com/pandas-dev/pandas/issues/31109 + values = np.array( + [datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)], dtype=object + ) + a = MyIndex._simple_new(values) + other = pd.Index(other) + result = other + a + assert isinstance(result, MyIndex) + assert a._calls == 1 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_period.py new file mode 100644 index 00000000..7a079ae7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_period.py @@ -0,0 +1,1600 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for Period dtype +import operator + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + IncompatibleFrequency, + Period, + Timestamp, + to_offset, +) +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + PeriodIndex, + Series, + Timedelta, + TimedeltaIndex, + period_range, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.arrays import TimedeltaArray +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + +# ------------------------------------------------------------------ +# Comparisons + + +class TestPeriodArrayLikeComparisons: + # Comparison tests for PeriodDtype vectors fully parametrized over + # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison + # tests will eventually end up here. + + @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")]) + def test_eq_scalar(self, other, box_with_array): + idx = PeriodIndex(["2017", "2017", "2018"], freq="D") + idx = tm.box_expected(idx, box_with_array) + xbox = get_upcast_box(idx, other, True) + + expected = np.array([True, True, False]) + expected = tm.box_expected(expected, xbox) + + result = idx == other + + tm.assert_equal(result, expected) + + def test_compare_zerodim(self, box_with_array): + # GH#26689 make sure we unbox zero-dimensional arrays + + pi = period_range("2000", periods=4) + other = np.array(pi.to_numpy()[0]) + + pi = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(pi, other, True) + + result = pi <= other + expected = np.array([True, False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "scalar", + [ + "foo", + Timestamp("2021-01-01"), + Timedelta(days=4), + 9, + 9.5, + 2000, # specifically don't consider 2000 to match Period("2000", "D") + False, + None, + ], + ) + def test_compare_invalid_scalar(self, box_with_array, scalar): + # GH#28980 + # comparison with scalar that cannot be interpreted as a Period + pi = period_range("2000", periods=4) + parr = tm.box_expected(pi, box_with_array) + assert_invalid_comparison(parr, scalar, box_with_array) + + @pytest.mark.parametrize( + "other", + [ + pd.date_range("2000", periods=4).array, + pd.timedelta_range("1D", periods=4).array, + np.arange(4), + np.arange(4).astype(np.float64), + list(range(4)), + # match Period semantics by not treating integers as Periods + [2000, 2001, 2002, 2003], + np.arange(2000, 2004), + np.arange(2000, 2004).astype(object), + pd.Index([2000, 2001, 2002, 2003]), + ], + ) + def test_compare_invalid_listlike(self, box_with_array, other): + pi = period_range("2000", periods=4) + parr = tm.box_expected(pi, box_with_array) + assert_invalid_comparison(parr, other, box_with_array) + + @pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)]) + def test_compare_object_dtype(self, box_with_array, other_box): + pi = period_range("2000", periods=5) + parr = tm.box_expected(pi, box_with_array) + + other = other_box(pi) + xbox = get_upcast_box(parr, other, True) + + expected = np.array([True, True, True, True, True]) + expected = tm.box_expected(expected, xbox) + + result = parr == other + tm.assert_equal(result, expected) + result = parr <= other + tm.assert_equal(result, expected) + result = parr >= other + tm.assert_equal(result, expected) + + result = parr != other + tm.assert_equal(result, ~expected) + result = parr < other + tm.assert_equal(result, ~expected) + result = parr > other + tm.assert_equal(result, ~expected) + + other = other_box(pi[::-1]) + + expected = np.array([False, False, True, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr == other + tm.assert_equal(result, expected) + + expected = np.array([True, True, True, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr <= other + tm.assert_equal(result, expected) + + expected = np.array([False, False, True, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr >= other + tm.assert_equal(result, expected) + + expected = np.array([True, True, False, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr != other + tm.assert_equal(result, expected) + + expected = np.array([True, True, False, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr < other + tm.assert_equal(result, expected) + + expected = np.array([False, False, False, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr > other + tm.assert_equal(result, expected) + + +class TestPeriodIndexComparisons: + # TODO: parameterize over boxes + + def test_pi_cmp_period(self): + idx = period_range("2007-01", periods=20, freq="M") + per = idx[10] + + result = idx < per + exp = idx.values < idx.values[10] + tm.assert_numpy_array_equal(result, exp) + + # Tests Period.__richcmp__ against ndarray[object, ndim=2] + result = idx.values.reshape(10, 2) < per + tm.assert_numpy_array_equal(result, exp.reshape(10, 2)) + + # Tests Period.__richcmp__ against ndarray[object, ndim=0] + result = idx < np.array(per) + tm.assert_numpy_array_equal(result, exp) + + # TODO: moved from test_datetime64; de-duplicate with version below + def test_parr_cmp_period_scalar2(self, box_with_array): + pi = period_range("2000-01-01", periods=10, freq="D") + + val = pi[3] + expected = [x > val for x in pi] + + ser = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(ser, val, True) + + expected = tm.box_expected(expected, xbox) + result = ser > val + tm.assert_equal(result, expected) + + val = pi[5] + result = ser > val + expected = [x > val for x in pi] + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_period_scalar(self, freq, box_with_array): + # GH#13200 + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + per = Period("2011-02", freq=freq) + xbox = get_upcast_box(base, per, True) + + exp = np.array([False, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base == per, exp) + tm.assert_equal(per == base, exp) + + exp = np.array([True, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base != per, exp) + tm.assert_equal(per != base, exp) + + exp = np.array([False, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base > per, exp) + tm.assert_equal(per < base, exp) + + exp = np.array([True, False, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base < per, exp) + tm.assert_equal(per > base, exp) + + exp = np.array([False, True, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base >= per, exp) + tm.assert_equal(per <= base, exp) + + exp = np.array([True, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base <= per, exp) + tm.assert_equal(per >= base, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_pi(self, freq, box_with_array): + # GH#13200 + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + + # TODO: could also box idx? + idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq) + + xbox = get_upcast_box(base, idx, True) + + exp = np.array([False, False, True, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base == idx, exp) + + exp = np.array([True, True, False, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base != idx, exp) + + exp = np.array([False, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base > idx, exp) + + exp = np.array([True, False, False, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base < idx, exp) + + exp = np.array([False, True, True, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base >= idx, exp) + + exp = np.array([True, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base <= idx, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array): + # GH#13200 + # different base freq + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + + msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period" + with pytest.raises(TypeError, match=msg): + base <= Period("2011", freq="A") + + with pytest.raises(TypeError, match=msg): + Period("2011", freq="A") >= base + + # TODO: Could parametrize over boxes for idx? + idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A") + rev_msg = r"Invalid comparison between dtype=period\[A-DEC\] and PeriodArray" + idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg + with pytest.raises(TypeError, match=idx_msg): + base <= idx + + # Different frequency + msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period" + with pytest.raises(TypeError, match=msg): + base <= Period("2011", freq="4M") + + with pytest.raises(TypeError, match=msg): + Period("2011", freq="4M") >= base + + idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M") + rev_msg = r"Invalid comparison between dtype=period\[4M\] and PeriodArray" + idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg + with pytest.raises(TypeError, match=idx_msg): + base <= idx + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_pi_cmp_nat(self, freq): + idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + per = idx1[1] + + result = idx1 > per + exp = np.array([False, False, False, True]) + tm.assert_numpy_array_equal(result, exp) + result = per < idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == pd.NaT + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + result = pd.NaT == idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != pd.NaT + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + result = pd.NaT != idx1 + tm.assert_numpy_array_equal(result, exp) + + idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq) + result = idx1 < idx2 + exp = np.array([True, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx2 + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx2 + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx1 + exp = np.array([True, True, False, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx1 + exp = np.array([False, False, True, False]) + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_pi_cmp_nat_mismatched_freq_raises(self, freq): + idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + + diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M") + msg = rf"Invalid comparison between dtype=period\[{freq}\] and PeriodArray" + with pytest.raises(TypeError, match=msg): + idx1 > diff + + result = idx1 == diff + expected = np.array([False, False, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # TODO: De-duplicate with test_pi_cmp_nat + @pytest.mark.parametrize("dtype", [object, None]) + def test_comp_nat(self, dtype): + left = PeriodIndex([Period("2011-01-01"), pd.NaT, Period("2011-01-03")]) + right = PeriodIndex([pd.NaT, pd.NaT, Period("2011-01-03")]) + + if dtype is not None: + left = left.astype(dtype) + right = right.astype(dtype) + + result = left == right + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = left != right + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == right, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(left != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != left, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > left, expected) + + +class TestPeriodSeriesComparisons: + def test_cmp_series_period_series_mixed_freq(self): + # GH#13200 + base = Series( + [ + Period("2011", freq="A"), + Period("2011-02", freq="M"), + Period("2013", freq="A"), + Period("2011-04", freq="M"), + ] + ) + + ser = Series( + [ + Period("2012", freq="A"), + Period("2011-01", freq="M"), + Period("2013", freq="A"), + Period("2011-05", freq="M"), + ] + ) + + exp = Series([False, False, True, False]) + tm.assert_series_equal(base == ser, exp) + + exp = Series([True, True, False, True]) + tm.assert_series_equal(base != ser, exp) + + exp = Series([False, True, False, False]) + tm.assert_series_equal(base > ser, exp) + + exp = Series([True, False, False, True]) + tm.assert_series_equal(base < ser, exp) + + exp = Series([False, True, True, False]) + tm.assert_series_equal(base >= ser, exp) + + exp = Series([True, False, True, True]) + tm.assert_series_equal(base <= ser, exp) + + +class TestPeriodIndexSeriesComparisonConsistency: + """Test PeriodIndex and Period Series Ops consistency""" + + # TODO: needs parametrization+de-duplication + + def _check(self, values, func, expected): + # Test PeriodIndex and Period Series Ops consistency + + idx = PeriodIndex(values) + result = func(idx) + + # check that we don't pass an unwanted type to tm.assert_equal + assert isinstance(expected, (pd.Index, np.ndarray)) + tm.assert_equal(result, expected) + + s = Series(values) + result = func(s) + + exp = Series(expected, name=values.name) + tm.assert_series_equal(result, exp) + + def test_pi_comp_period(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + per = idx[2] + + f = lambda x: x == per + exp = np.array([False, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per == x + self._check(idx, f, exp) + + f = lambda x: x != per + exp = np.array([True, True, False, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per != x + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, True, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x > per + exp = np.array([False, False, False, True], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, True, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + def test_pi_comp_period_nat(self): + idx = PeriodIndex( + ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" + ) + per = idx[2] + + f = lambda x: x == per + exp = np.array([False, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per == x + self._check(idx, f, exp) + + f = lambda x: x == pd.NaT + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: pd.NaT == x + self._check(idx, f, exp) + + f = lambda x: x != per + exp = np.array([True, True, False, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per != x + self._check(idx, f, exp) + + f = lambda x: x != pd.NaT + exp = np.array([True, True, True, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: pd.NaT != x + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x < per + exp = np.array([True, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x > pd.NaT + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: pd.NaT >= x + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestPeriodFrameArithmetic: + def test_ops_frame_period(self): + # GH#13043 + df = pd.DataFrame( + { + "A": [Period("2015-01", freq="M"), Period("2015-02", freq="M")], + "B": [Period("2014-01", freq="M"), Period("2014-02", freq="M")], + } + ) + assert df["A"].dtype == "Period[M]" + assert df["B"].dtype == "Period[M]" + + p = Period("2015-03", freq="M") + off = p.freq + # dtype will be object because of original dtype + exp = pd.DataFrame( + { + "A": np.array([2 * off, 1 * off], dtype=object), + "B": np.array([14 * off, 13 * off], dtype=object), + } + ) + tm.assert_frame_equal(p - df, exp) + tm.assert_frame_equal(df - p, -1 * exp) + + df2 = pd.DataFrame( + { + "A": [Period("2015-05", freq="M"), Period("2015-06", freq="M")], + "B": [Period("2015-05", freq="M"), Period("2015-06", freq="M")], + } + ) + assert df2["A"].dtype == "Period[M]" + assert df2["B"].dtype == "Period[M]" + + exp = pd.DataFrame( + { + "A": np.array([4 * off, 4 * off], dtype=object), + "B": np.array([16 * off, 16 * off], dtype=object), + } + ) + tm.assert_frame_equal(df2 - df, exp) + tm.assert_frame_equal(df - df2, -1 * exp) + + +class TestPeriodIndexArithmetic: + # --------------------------------------------------------------- + # __add__/__sub__ with PeriodIndex + # PeriodIndex + other is defined for integers and timedelta-like others + # PeriodIndex - other is defined for integers, timedelta-like others, + # and PeriodIndex (with matching freq) + + def test_parr_add_iadd_parr_raises(self, box_with_array): + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="D", periods=5) + # TODO: parametrize over boxes for other? + + rng = tm.box_expected(rng, box_with_array) + # An earlier implementation of PeriodIndex addition performed + # a set operation (union). This has since been changed to + # raise a TypeError. See GH#14164 and GH#13077 for historical + # reference. + msg = r"unsupported operand type\(s\) for \+: .* and .*" + with pytest.raises(TypeError, match=msg): + rng + other + + with pytest.raises(TypeError, match=msg): + rng += other + + def test_pi_sub_isub_pi(self): + # GH#20049 + # For historical reference see GH#14164, GH#13077. + # PeriodIndex subtraction originally performed set difference, + # then changed to raise TypeError before being implemented in GH#20049 + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="D", periods=5) + + off = rng.freq + expected = pd.Index([-5 * off] * 5) + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_pi_with_nat(self): + rng = period_range("1/1/2000", freq="D", periods=5) + other = rng[1:].insert(0, pd.NaT) + assert other[1:].equals(rng[1:]) + + result = rng - other + off = rng.freq + expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off]) + tm.assert_index_equal(result, expected) + + def test_parr_sub_pi_mismatched_freq(self, box_with_array, box_with_array2): + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="H", periods=5) + + rng = tm.box_expected(rng, box_with_array) + other = tm.box_expected(other, box_with_array2) + msg = r"Input has different freq=[HD] from PeriodArray\(freq=[DH]\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + def test_sub_n_gt_1_ticks(self, tick_classes, n): + # GH 23878 + p1_d = "19910905" + p2_d = "19920406" + p1 = PeriodIndex([p1_d], freq=tick_classes(n)) + p2 = PeriodIndex([p2_d], freq=tick_classes(n)) + + expected = PeriodIndex([p2_d], freq=p2.freq.base) - PeriodIndex( + [p1_d], freq=p1.freq.base + ) + + tm.assert_index_equal((p2 - p1), expected) + + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + @pytest.mark.parametrize( + "offset, kwd_name", + [ + (pd.offsets.YearEnd, "month"), + (pd.offsets.QuarterEnd, "startingMonth"), + (pd.offsets.MonthEnd, None), + (pd.offsets.Week, "weekday"), + ], + ) + def test_sub_n_gt_1_offsets(self, offset, kwd_name, n): + # GH 23878 + kwds = {kwd_name: 3} if kwd_name is not None else {} + p1_d = "19910905" + p2_d = "19920406" + freq = offset(n, normalize=False, **kwds) + p1 = PeriodIndex([p1_d], freq=freq) + p2 = PeriodIndex([p2_d], freq=freq) + + result = p2 - p1 + expected = PeriodIndex([p2_d], freq=freq.base) - PeriodIndex( + [p1_d], freq=freq.base + ) + + tm.assert_index_equal(result, expected) + + # ------------------------------------------------------------- + # Invalid Operations + + @pytest.mark.parametrize( + "other", + [ + # datetime scalars + Timestamp("2016-01-01"), + Timestamp("2016-01-01").to_pydatetime(), + Timestamp("2016-01-01").to_datetime64(), + # datetime-like arrays + pd.date_range("2016-01-01", periods=3, freq="H"), + pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"), + pd.date_range("2016-01-01", periods=3, freq="S")._data, + pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data, + # Miscellaneous invalid types + 3.14, + np.array([2.0, 3.0, 4.0]), + ], + ) + def test_parr_add_sub_invalid(self, other, box_with_array): + # GH#23215 + rng = period_range("1/1/2000", freq="D", periods=3) + rng = tm.box_expected(rng, box_with_array) + + msg = "|".join( + [ + r"(:?cannot add PeriodArray and .*)", + r"(:?cannot subtract .* from (:?a\s)?.*)", + r"(:?unsupported operand type\(s\) for \+: .* and .*)", + r"unsupported operand type\(s\) for [+-]: .* and .*", + ] + ) + assert_invalid_addsub_type(rng, other, msg) + with pytest.raises(TypeError, match=msg): + rng + other + with pytest.raises(TypeError, match=msg): + other + rng + with pytest.raises(TypeError, match=msg): + rng - other + with pytest.raises(TypeError, match=msg): + other - rng + + # ----------------------------------------------------------------- + # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64] + + def test_pi_add_sub_td64_array_non_tick_raises(self): + rng = period_range("1/1/2000", freq="Q", periods=3) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]" + with pytest.raises(TypeError, match=msg): + rng + tdarr + with pytest.raises(TypeError, match=msg): + tdarr + rng + + with pytest.raises(TypeError, match=msg): + rng - tdarr + msg = r"cannot subtract PeriodArray from TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdarr - rng + + def test_pi_add_sub_td64_array_tick(self): + # PeriodIndex + Timedelta-like is allowed only with + # tick-like frequencies + rng = period_range("1/1/2000", freq="90D", periods=3) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + expected = period_range("12/31/1999", freq="90D", periods=3) + result = rng + tdi + tm.assert_index_equal(result, expected) + result = rng + tdarr + tm.assert_index_equal(result, expected) + result = tdi + rng + tm.assert_index_equal(result, expected) + result = tdarr + rng + tm.assert_index_equal(result, expected) + + expected = period_range("1/2/2000", freq="90D", periods=3) + + result = rng - tdi + tm.assert_index_equal(result, expected) + result = rng - tdarr + tm.assert_index_equal(result, expected) + + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + tdarr - rng + + with pytest.raises(TypeError, match=msg): + tdi - rng + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"]) + @pytest.mark.parametrize("tdi_freq", [None, "H"]) + def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq): + box = box_with_array + xbox = box if box not in [pd.array, tm.to_array] else pd.Index + + tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq) + dti = Timestamp("2018-03-07 17:16:40") + tdi + pi = dti.to_period(pi_freq) + + # TODO: parametrize over box for pi? + td64obj = tm.box_expected(tdi, box) + + if pi_freq == "H": + result = pi - td64obj + expected = (pi.to_timestamp("S") - tdi).to_period(pi_freq) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + # Subtract from scalar + result = pi[0] - td64obj + expected = (pi[0].to_timestamp("S") - tdi).to_period(pi_freq) + expected = tm.box_expected(expected, box) + tm.assert_equal(result, expected) + + elif pi_freq == "D": + # Tick, but non-compatible + msg = ( + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq." + ) + with pytest.raises(IncompatibleFrequency, match=msg): + pi - td64obj + + with pytest.raises(IncompatibleFrequency, match=msg): + pi[0] - td64obj + + else: + # With non-Tick freq, we could not add timedelta64 array regardless + # of what its resolution is + msg = "Cannot add or subtract timedelta64" + with pytest.raises(TypeError, match=msg): + pi - td64obj + with pytest.raises(TypeError, match=msg): + pi[0] - td64obj + + # ----------------------------------------------------------------- + # operations with array/Index of DateOffset objects + + @pytest.mark.parametrize("box", [np.array, pd.Index]) + def test_pi_add_offset_array(self, box): + # GH#18849 + pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")]) + offs = box( + [ + pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12), + ] + ) + expected = PeriodIndex([Period("2015Q2"), Period("2015Q4")]).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi + offs + tm.assert_index_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = offs + pi + tm.assert_index_equal(res2, expected) + + unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + # addition/subtraction ops with incompatible offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = r"Input cannot be converted to Period\(freq=Q-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + pi + unanchored + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + unanchored + pi + + @pytest.mark.parametrize("box", [np.array, pd.Index]) + def test_pi_sub_offset_array(self, box): + # GH#18824 + pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")]) + other = box( + [ + pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12), + ] + ) + + expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))]) + expected = expected.astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi - other + tm.assert_index_equal(res, expected) + + anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + pi - anchored + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored - pi + + def test_pi_add_iadd_int(self, one): + # Variants of `one` for #19012 + rng = period_range("2000-01-01 09:00", freq="H", periods=10) + result = rng + one + expected = period_range("2000-01-01 10:00", freq="H", periods=10) + tm.assert_index_equal(result, expected) + rng += one + tm.assert_index_equal(rng, expected) + + def test_pi_sub_isub_int(self, one): + """ + PeriodIndex.__sub__ and __isub__ with several representations of + the integer 1, e.g. int, np.int64, np.uint8, ... + """ + rng = period_range("2000-01-01 09:00", freq="H", periods=10) + result = rng - one + expected = period_range("2000-01-01 08:00", freq="H", periods=10) + tm.assert_index_equal(result, expected) + rng -= one + tm.assert_index_equal(rng, expected) + + @pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)]) + def test_pi_sub_intlike(self, five): + rng = period_range("2007-01", periods=50) + + result = rng - five + exp = rng + (-five) + tm.assert_index_equal(result, exp) + + def test_pi_add_sub_int_array_freqn_gt1(self): + # GH#47209 test adding array of ints when freq.n > 1 matches + # scalar behavior + pi = period_range("2016-01-01", periods=10, freq="2D") + arr = np.arange(10) + result = pi + arr + expected = pd.Index([x + y for x, y in zip(pi, arr)]) + tm.assert_index_equal(result, expected) + + result = pi - arr + expected = pd.Index([x - y for x, y in zip(pi, arr)]) + tm.assert_index_equal(result, expected) + + def test_pi_sub_isub_offset(self): + # offset + # DateOffset + rng = period_range("2014", "2024", freq="A") + result = rng - pd.offsets.YearEnd(5) + expected = period_range("2009", "2019", freq="A") + tm.assert_index_equal(result, expected) + rng -= pd.offsets.YearEnd(5) + tm.assert_index_equal(rng, expected) + + rng = period_range("2014-01", "2016-12", freq="M") + result = rng - pd.offsets.MonthEnd(5) + expected = period_range("2013-08", "2016-07", freq="M") + tm.assert_index_equal(result, expected) + + rng -= pd.offsets.MonthEnd(5) + tm.assert_index_equal(rng, expected) + + @pytest.mark.parametrize("transpose", [True, False]) + def test_pi_add_offset_n_gt1(self, box_with_array, transpose): + # GH#23215 + # add offset to PeriodIndex with freq.n > 1 + + per = Period("2016-01", freq="2M") + pi = PeriodIndex([per]) + + expected = PeriodIndex(["2016-03"], freq="2M") + + pi = tm.box_expected(pi, box_with_array, transpose=transpose) + expected = tm.box_expected(expected, box_with_array, transpose=transpose) + + result = pi + per.freq + tm.assert_equal(result, expected) + + result = per.freq + pi + tm.assert_equal(result, expected) + + def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array): + # GH#23215 + # PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0 + pi = PeriodIndex(["2016-01"], freq="2M") + expected = PeriodIndex(["2016-04"], freq="2M") + + pi = tm.box_expected(pi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = pi + to_offset("3M") + tm.assert_equal(result, expected) + + result = to_offset("3M") + pi + tm.assert_equal(result, expected) + + # --------------------------------------------------------------- + # __add__/__sub__ with integer arrays + + @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_pi_add_intarray(self, int_holder, op): + # GH#19959 + pi = PeriodIndex([Period("2015Q1"), Period("NaT")]) + other = int_holder([4, -1]) + + result = op(pi, other) + expected = PeriodIndex([Period("2016Q1"), Period("NaT")]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) + def test_pi_sub_intarray(self, int_holder): + # GH#19959 + pi = PeriodIndex([Period("2015Q1"), Period("NaT")]) + other = int_holder([4, -1]) + + result = pi - other + expected = PeriodIndex([Period("2014Q1"), Period("NaT")]) + tm.assert_index_equal(result, expected) + + msg = r"bad operand type for unary -: 'PeriodArray'" + with pytest.raises(TypeError, match=msg): + other - pi + + # --------------------------------------------------------------- + # Timedelta-like (timedelta, timedelta64, Timedelta, Tick) + # TODO: Some of these are misnomers because of non-Tick DateOffsets + + def test_parr_add_timedeltalike_minute_gt1(self, three_days, box_with_array): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # minute frequency with n != 1. A more general case is tested below + # in test_pi_add_timedeltalike_tick_gt1, but here we write out the + # expected result more explicitly. + other = three_days + rng = period_range("2014-05-01", periods=3, freq="2D") + rng = tm.box_expected(rng, box_with_array) + + expected = PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D") + expected = tm.box_expected(expected, box_with_array) + + result = rng + other + tm.assert_equal(result, expected) + + result = other + rng + tm.assert_equal(result, expected) + + # subtraction + expected = PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D") + expected = tm.box_expected(expected, box_with_array) + result = rng - other + tm.assert_equal(result, expected) + + msg = "|".join( + [ + r"bad operand type for unary -: 'PeriodArray'", + r"cannot subtract PeriodArray from timedelta64\[[hD]\]", + ] + ) + with pytest.raises(TypeError, match=msg): + other - rng + + @pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"]) + def test_parr_add_timedeltalike_tick_gt1(self, three_days, freqstr, box_with_array): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # tick-like frequency with n != 1 + other = three_days + rng = period_range("2014-05-01", periods=6, freq=freqstr) + first = rng[0] + rng = tm.box_expected(rng, box_with_array) + + expected = period_range(first + other, periods=6, freq=freqstr) + expected = tm.box_expected(expected, box_with_array) + + result = rng + other + tm.assert_equal(result, expected) + + result = other + rng + tm.assert_equal(result, expected) + + # subtraction + expected = period_range(first - other, periods=6, freq=freqstr) + expected = tm.box_expected(expected, box_with_array) + result = rng - other + tm.assert_equal(result, expected) + msg = "|".join( + [ + r"bad operand type for unary -: 'PeriodArray'", + r"cannot subtract PeriodArray from timedelta64\[[hD]\]", + ] + ) + with pytest.raises(TypeError, match=msg): + other - rng + + def test_pi_add_iadd_timedeltalike_daily(self, three_days): + # Tick + other = three_days + rng = period_range("2014-05-01", "2014-05-15", freq="D") + expected = period_range("2014-05-04", "2014-05-18", freq="D") + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_isub_timedeltalike_daily(self, three_days): + # Tick-like 3 Days + other = three_days + rng = period_range("2014-05-01", "2014-05-15", freq="D") + expected = period_range("2014-04-28", "2014-05-12", freq="D") + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_parr_add_sub_timedeltalike_freq_mismatch_daily( + self, not_daily, box_with_array + ): + other = not_daily + rng = period_range("2014-05-01", "2014-05-15", freq="D") + rng = tm.box_expected(rng, box_with_array) + + msg = "|".join( + [ + # non-timedelta-like DateOffset + "Input has different freq(=.+)? from Period.*?\\(freq=D\\)", + # timedelta/td64/Timedelta but not a multiple of 24H + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq.", + ] + ) + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + def test_pi_add_iadd_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H") + expected = period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H") + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_parr_add_timedeltalike_mismatched_freq_hourly( + self, not_hourly, box_with_array + ): + other = not_hourly + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H") + rng = tm.box_expected(rng, box_with_array) + msg = "|".join( + [ + # non-timedelta-like DateOffset + "Input has different freq(=.+)? from Period.*?\\(freq=H\\)", + # timedelta/td64/Timedelta but not a multiple of 24H + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq.", + ] + ) + + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + + def test_pi_sub_isub_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H") + expected = period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H") + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_add_iadd_timedeltalike_annual(self): + # offset + # DateOffset + rng = period_range("2014", "2024", freq="A") + result = rng + pd.offsets.YearEnd(5) + expected = period_range("2019", "2029", freq="A") + tm.assert_index_equal(result, expected) + rng += pd.offsets.YearEnd(5) + tm.assert_index_equal(rng, expected) + + def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq): + other = mismatched_freq + rng = period_range("2014", "2024", freq="A") + msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + def test_pi_add_iadd_timedeltalike_M(self): + rng = period_range("2014-01", "2016-12", freq="M") + expected = period_range("2014-06", "2017-05", freq="M") + + result = rng + pd.offsets.MonthEnd(5) + tm.assert_index_equal(result, expected) + + rng += pd.offsets.MonthEnd(5) + tm.assert_index_equal(rng, expected) + + def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq): + other = mismatched_freq + rng = period_range("2014-01", "2016-12", freq="M") + msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + @pytest.mark.parametrize("transpose", [True, False]) + def test_parr_add_sub_td64_nat(self, box_with_array, transpose): + # GH#23320 special handling for timedelta64("NaT") + pi = period_range("1994-04-01", periods=9, freq="19D") + other = np.timedelta64("NaT") + expected = PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box_with_array, transpose=transpose) + expected = tm.box_expected(expected, box_with_array, transpose=transpose) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + other - obj + + @pytest.mark.parametrize( + "other", + [ + np.array(["NaT"] * 9, dtype="m8[ns]"), + TimedeltaArray._from_sequence(["NaT"] * 9), + ], + ) + def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other): + pi = period_range("1994-04-01", periods=9, freq="19D") + expected = PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + other - obj + + # some but not *all* NaT + other = other.copy() + other[0] = np.timedelta64(0, "ns") + expected = PeriodIndex([pi[0]] + ["NaT"] * 8, freq="19D") + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + with pytest.raises(TypeError, match=msg): + other - obj + + # --------------------------------------------------------------- + # Unsorted + + def test_parr_add_sub_index(self): + # Check that PeriodArray defers to Index on arithmetic ops + pi = period_range("2000-12-31", periods=3) + parr = pi.array + + result = parr - pi + expected = pi - pi + tm.assert_index_equal(result, expected) + + def test_parr_add_sub_object_array(self): + pi = period_range("2000-12-31", periods=3, freq="D") + parr = pi.array + + other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3]) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr + other + + expected = PeriodIndex( + ["2001-01-01", "2001-01-03", "2001-01-05"], freq="D" + )._data.astype(object) + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr - other + + expected = PeriodIndex(["2000-12-30"] * 3, freq="D")._data.astype(object) + tm.assert_equal(result, expected) + + +class TestPeriodSeriesArithmetic: + def test_parr_add_timedeltalike_scalar(self, three_days, box_with_array): + # GH#13043 + ser = Series( + [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], + name="xxx", + ) + assert ser.dtype == "Period[D]" + + expected = Series( + [Period("2015-01-04", freq="D"), Period("2015-01-05", freq="D")], + name="xxx", + ) + + obj = tm.box_expected(ser, box_with_array) + if box_with_array is pd.DataFrame: + assert (obj.dtypes == "Period[D]").all() + + expected = tm.box_expected(expected, box_with_array) + + result = obj + three_days + tm.assert_equal(result, expected) + + result = three_days + obj + tm.assert_equal(result, expected) + + def test_ops_series_period(self): + # GH#13043 + ser = Series( + [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], + name="xxx", + ) + assert ser.dtype == "Period[D]" + + per = Period("2015-01-10", freq="D") + off = per.freq + # dtype will be object because of original dtype + expected = Series([9 * off, 8 * off], name="xxx", dtype=object) + tm.assert_series_equal(per - ser, expected) + tm.assert_series_equal(ser - per, -1 * expected) + + s2 = Series( + [Period("2015-01-05", freq="D"), Period("2015-01-04", freq="D")], + name="xxx", + ) + assert s2.dtype == "Period[D]" + + expected = Series([4 * off, 2 * off], name="xxx", dtype=object) + tm.assert_series_equal(s2 - ser, expected) + tm.assert_series_equal(ser - s2, -1 * expected) + + +class TestPeriodIndexSeriesMethods: + """Test PeriodIndex and Period Series Ops consistency""" + + def _check(self, values, func, expected): + idx = PeriodIndex(values) + result = func(idx) + tm.assert_equal(result, expected) + + ser = Series(values) + result = func(ser) + + exp = Series(expected, name=values.name) + tm.assert_series_equal(result, exp) + + def test_pi_ops(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + + expected = PeriodIndex( + ["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx" + ) + + self._check(idx, lambda x: x + 2, expected) + self._check(idx, lambda x: 2 + x, expected) + + self._check(idx + 2, lambda x: x - 2, idx) + + result = idx - Period("2011-01", freq="M") + off = idx.freq + exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = Period("2011-01", freq="M") - idx + exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx") + tm.assert_index_equal(result, exp) + + @pytest.mark.parametrize("ng", ["str", 1.5]) + @pytest.mark.parametrize( + "func", + [ + lambda obj, ng: obj + ng, + lambda obj, ng: ng + obj, + lambda obj, ng: obj - ng, + lambda obj, ng: ng - obj, + lambda obj, ng: np.add(obj, ng), + lambda obj, ng: np.add(ng, obj), + lambda obj, ng: np.subtract(obj, ng), + lambda obj, ng: np.subtract(ng, obj), + ], + ) + def test_parr_ops_errors(self, ng, func, box_with_array): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + obj = tm.box_expected(idx, box_with_array) + msg = "|".join( + [ + r"unsupported operand type\(s\)", + "can only concatenate", + r"must be str", + "object to str implicitly", + ] + ) + + with pytest.raises(TypeError, match=msg): + func(obj, ng) + + def test_pi_ops_nat(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + expected = PeriodIndex( + ["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx" + ) + + self._check(idx, lambda x: x + 2, expected) + self._check(idx, lambda x: 2 + x, expected) + self._check(idx, lambda x: np.add(x, 2), expected) + + self._check(idx + 2, lambda x: x - 2, idx) + self._check(idx + 2, lambda x: np.subtract(x, 2), idx) + + # freq with mult + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx" + ) + expected = PeriodIndex( + ["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx" + ) + + self._check(idx, lambda x: x + 3, expected) + self._check(idx, lambda x: 3 + x, expected) + self._check(idx, lambda x: np.add(x, 3), expected) + + self._check(idx + 3, lambda x: x - 3, idx) + self._check(idx + 3, lambda x: np.subtract(x, 3), idx) + + def test_pi_ops_array_int(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + f = lambda x: x + np.array([1, 2, 3, 4]) + exp = PeriodIndex( + ["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: np.add(x, np.array([4, -1, 1, 2])) + exp = PeriodIndex( + ["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: x - np.array([1, 2, 3, 4]) + exp = PeriodIndex( + ["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: np.subtract(x, np.array([3, 2, 3, -2])) + exp = PeriodIndex( + ["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + def test_pi_ops_offset(self): + idx = PeriodIndex( + ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"], + freq="D", + name="idx", + ) + f = lambda x: x + pd.offsets.Day() + exp = PeriodIndex( + ["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + f = lambda x: x + pd.offsets.Day(2) + exp = PeriodIndex( + ["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + f = lambda x: x - pd.offsets.Day(2) + exp = PeriodIndex( + ["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + def test_pi_offset_errors(self): + idx = PeriodIndex( + ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"], + freq="D", + name="idx", + ) + ser = Series(idx) + + msg = ( + "Cannot add/subtract timedelta-like from PeriodArray that is not " + "an integer multiple of the PeriodArray's freq" + ) + for obj in [idx, ser]: + with pytest.raises(IncompatibleFrequency, match=msg): + obj + pd.offsets.Hour(2) + + with pytest.raises(IncompatibleFrequency, match=msg): + pd.offsets.Hour(2) + obj + + with pytest.raises(IncompatibleFrequency, match=msg): + obj - pd.offsets.Hour(2) + + def test_pi_sub_period(self): + # GH#13071 + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + + result = idx - Period("2012-01", freq="M") + off = idx.freq + exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = np.subtract(idx, Period("2012-01", freq="M")) + tm.assert_index_equal(result, exp) + + result = Period("2012-01", freq="M") - idx + exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = np.subtract(Period("2012-01", freq="M"), idx) + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + result = idx - Period("NaT", freq="M") + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + result = Period("NaT", freq="M") - idx + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + def test_pi_sub_pdnat(self): + # GH#13071, GH#19389 + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + exp = TimedeltaIndex([pd.NaT] * 4, name="idx") + tm.assert_index_equal(pd.NaT - idx, exp) + tm.assert_index_equal(idx - pd.NaT, exp) + + def test_pi_sub_period_nat(self): + # GH#13071 + idx = PeriodIndex( + ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" + ) + + result = idx - Period("2012-01", freq="M") + off = idx.freq + exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = Period("2012-01", freq="M") - idx + exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + tm.assert_index_equal(idx - Period("NaT", freq="M"), exp) + tm.assert_index_equal(Period("NaT", freq="M") - idx, exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_timedelta64.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_timedelta64.py new file mode 100644 index 00000000..3d237b3a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arithmetic/test_timedelta64.py @@ -0,0 +1,2174 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas.errors import ( + OutOfBoundsDatetime, + PerformanceWarning, +) + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + NaT, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + offsets, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import NumpyExtensionArray +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + + +def assert_dtype(obj, expected_dtype): + """ + Helper to check the dtype for a Series, Index, or single-column DataFrame. + """ + dtype = tm.get_dtype(obj) + + assert dtype == expected_dtype + + +def get_expected_name(box, names): + if box is DataFrame: + # Since we are operating with a DataFrame and a non-DataFrame, + # the non-DataFrame is cast to Series and its name ignored. + exname = names[0] + elif box in [tm.to_array, pd.array]: + exname = names[1] + else: + exname = names[2] + return exname + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Comparisons + + +class TestTimedelta64ArrayLikeComparisons: + # Comparison tests for timedelta64[ns] vectors fully parametrized over + # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison + # tests will eventually end up here. + + def test_compare_timedelta64_zerodim(self, box_with_array): + # GH#26689 should unbox when comparing with zerodim array + box = box_with_array + xbox = box_with_array if box_with_array not in [Index, pd.array] else np.ndarray + + tdi = timedelta_range("2H", periods=4) + other = np.array(tdi.to_numpy()[0]) + + tdi = tm.box_expected(tdi, box) + res = tdi <= other + expected = np.array([True, False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(res, expected) + + @pytest.mark.parametrize( + "td_scalar", + [ + timedelta(days=1), + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + offsets.Hour(24), + ], + ) + def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar): + # regression test for GH#5963 + box = box_with_array + xbox = box if box not in [Index, pd.array] else np.ndarray + + ser = Series([timedelta(days=1), timedelta(days=2)]) + ser = tm.box_expected(ser, box) + actual = ser > td_scalar + expected = Series([False, True]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(actual, expected) + + @pytest.mark.parametrize( + "invalid", + [ + 345600000000000, + "a", + Timestamp("2021-01-01"), + Timestamp("2021-01-01").now("UTC"), + Timestamp("2021-01-01").now().to_datetime64(), + Timestamp("2021-01-01").now().to_pydatetime(), + Timestamp("2021-01-01").date(), + np.array(4), # zero-dim mismatched dtype + ], + ) + def test_td64_comparisons_invalid(self, box_with_array, invalid): + # GH#13624 for str + box = box_with_array + + rng = timedelta_range("1 days", periods=10) + obj = tm.box_expected(rng, box) + + assert_invalid_comparison(obj, invalid, box) + + @pytest.mark.parametrize( + "other", + [ + list(range(10)), + np.arange(10), + np.arange(10).astype(np.float32), + np.arange(10).astype(object), + pd.date_range("1970-01-01", periods=10, tz="UTC").array, + np.array(pd.date_range("1970-01-01", periods=10)), + list(pd.date_range("1970-01-01", periods=10)), + pd.date_range("1970-01-01", periods=10).astype(object), + pd.period_range("1971-01-01", freq="D", periods=10).array, + pd.period_range("1971-01-01", freq="D", periods=10).astype(object), + ], + ) + def test_td64arr_cmp_arraylike_invalid(self, other, box_with_array): + # We don't parametrize this over box_with_array because listlike + # other plays poorly with assert_invalid_comparison reversed checks + + rng = timedelta_range("1 days", periods=10)._data + rng = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(rng, other, box_with_array) + + def test_td64arr_cmp_mixed_invalid(self): + rng = timedelta_range("1 days", periods=5)._data + other = np.array([0, 1, 2, rng[3], Timestamp("2021-01-01")]) + + result = rng == other + expected = np.array([False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = rng != other + tm.assert_numpy_array_equal(result, ~expected) + + msg = "Invalid comparison between|Cannot compare type|not supported between" + with pytest.raises(TypeError, match=msg): + rng < other + with pytest.raises(TypeError, match=msg): + rng > other + with pytest.raises(TypeError, match=msg): + rng <= other + with pytest.raises(TypeError, match=msg): + rng >= other + + +class TestTimedelta64ArrayComparisons: + # TODO: All of these need to be parametrized over box + + @pytest.mark.parametrize("dtype", [None, object]) + def test_comp_nat(self, dtype): + left = TimedeltaIndex([Timedelta("1 days"), NaT, Timedelta("3 days")]) + right = TimedeltaIndex([NaT, NaT, Timedelta("3 days")]) + + lhs, rhs = left, right + if dtype is object: + lhs, rhs = left.astype(object), right.astype(object) + + result = rhs == lhs + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = rhs != lhs + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs == NaT, expected) + tm.assert_numpy_array_equal(NaT == rhs, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(lhs != NaT, expected) + tm.assert_numpy_array_equal(NaT != lhs, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs < NaT, expected) + tm.assert_numpy_array_equal(NaT > lhs, expected) + + @pytest.mark.parametrize( + "idx2", + [ + TimedeltaIndex( + ["2 day", "2 day", NaT, NaT, "1 day 00:00:02", "5 days 00:00:03"] + ), + np.array( + [ + np.timedelta64(2, "D"), + np.timedelta64(2, "D"), + np.timedelta64("nat"), + np.timedelta64("nat"), + np.timedelta64(1, "D") + np.timedelta64(2, "s"), + np.timedelta64(5, "D") + np.timedelta64(3, "s"), + ] + ), + ], + ) + def test_comparisons_nat(self, idx2): + idx1 = TimedeltaIndex( + [ + "1 day", + NaT, + "1 day 00:00:01", + NaT, + "1 day 00:00:01", + "5 day 00:00:03", + ] + ) + # Check pd.NaT is handles as the same as np.nan + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + # TODO: better name + def test_comparisons_coverage(self): + rng = timedelta_range("1 days", periods=10) + + result = rng < rng[3] + expected = np.array([True, True, True] + [False] * 7) + tm.assert_numpy_array_equal(result, expected) + + result = rng == list(rng) + exp = rng == rng + tm.assert_numpy_array_equal(result, exp) + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Arithmetic Operations + + +class TestTimedelta64ArithmeticUnsorted: + # Tests moved from type-specific test files but not + # yet sorted/parametrized/de-duplicated + + def test_ufunc_coercions(self): + # normal ops are also tested in tseries/test_timedeltas.py + idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x") + + for result in [idx * 2, np.multiply(idx, 2)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["4H", "8H", "12H", "16H", "20H"], freq="4H", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "4H" + + for result in [idx / 2, np.divide(idx, 2)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["1H", "2H", "3H", "4H", "5H"], freq="H", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "H" + + for result in [-idx, np.negative(idx)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex( + ["-2H", "-4H", "-6H", "-8H", "-10H"], freq="-2H", name="x" + ) + tm.assert_index_equal(result, exp) + assert result.freq == "-2H" + + idx = TimedeltaIndex(["-2H", "-1H", "0H", "1H", "2H"], freq="H", name="x") + for result in [abs(idx), np.absolute(idx)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["2H", "1H", "0H", "1H", "2H"], freq=None, name="x") + tm.assert_index_equal(result, exp) + assert result.freq is None + + def test_subtraction_ops(self): + # with datetimes/timedelta and tdi/dti + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + td = Timedelta("1 days") + dt = Timestamp("20130101") + + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi - dt + with pytest.raises(TypeError, match=msg): + tdi - dti + + msg = r"unsupported operand type\(s\) for -" + with pytest.raises(TypeError, match=msg): + td - dt + + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + td - dti + + result = dt - dti + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar") + tm.assert_index_equal(result, expected) + + result = dti - dt + expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar") + tm.assert_index_equal(result, expected) + + result = tdi - td + expected = TimedeltaIndex(["0 days", NaT, "1 days"], name="foo") + tm.assert_index_equal(result, expected, check_names=False) + + result = td - tdi + expected = TimedeltaIndex(["0 days", NaT, "-1 days"], name="foo") + tm.assert_index_equal(result, expected, check_names=False) + + result = dti - td + expected = DatetimeIndex( + ["20121231", "20130101", "20130102"], freq="D", name="bar" + ) + tm.assert_index_equal(result, expected, check_names=False) + + result = dt - tdi + expected = DatetimeIndex(["20121231", NaT, "20121230"], name="foo") + tm.assert_index_equal(result, expected) + + def test_subtraction_ops_with_tz(self, box_with_array): + # check that dt/dti subtraction ops with tz are validated + dti = pd.date_range("20130101", periods=3) + dti = tm.box_expected(dti, box_with_array) + ts = Timestamp("20130101") + dt = ts.to_pydatetime() + dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern") + dti_tz = tm.box_expected(dti_tz, box_with_array) + ts_tz = Timestamp("20130101").tz_localize("US/Eastern") + ts_tz2 = Timestamp("20130101").tz_localize("CET") + dt_tz = ts_tz.to_pydatetime() + td = Timedelta("1 days") + + def _check(result, expected): + assert result == expected + assert isinstance(result, Timedelta) + + # scalars + result = ts - ts + expected = Timedelta("0 days") + _check(result, expected) + + result = dt_tz - ts_tz + expected = Timedelta("0 days") + _check(result, expected) + + result = ts_tz - dt_tz + expected = Timedelta("0 days") + _check(result, expected) + + # tz mismatches + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects." + with pytest.raises(TypeError, match=msg): + dt_tz - ts + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt_tz - dt + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt - dt_tz + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects." + with pytest.raises(TypeError, match=msg): + ts - dt_tz + with pytest.raises(TypeError, match=msg): + ts_tz2 - ts + with pytest.raises(TypeError, match=msg): + ts_tz2 - dt + + msg = "Cannot subtract tz-naive and tz-aware" + # with dti + with pytest.raises(TypeError, match=msg): + dti - ts_tz + with pytest.raises(TypeError, match=msg): + dti_tz - ts + + result = dti_tz - dt_tz + expected = TimedeltaIndex(["0 days", "1 days", "2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = dt_tz - dti_tz + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = dti_tz - ts_tz + expected = TimedeltaIndex(["0 days", "1 days", "2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = ts_tz - dti_tz + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = td - td + expected = Timedelta("0 days") + _check(result, expected) + + result = dti_tz - td + expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern") + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + def test_dti_tdi_numeric_ops(self): + # These are normally union/diff set-like ops + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + + result = tdi - tdi + expected = TimedeltaIndex(["0 days", NaT, "0 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = tdi + tdi + expected = TimedeltaIndex(["2 days", NaT, "4 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = dti - tdi # name will be reset + expected = DatetimeIndex(["20121231", NaT, "20130101"]) + tm.assert_index_equal(result, expected) + + def test_addition_ops(self): + # with datetimes/timedelta and tdi/dti + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + td = Timedelta("1 days") + dt = Timestamp("20130101") + + result = tdi + dt + expected = DatetimeIndex(["20130102", NaT, "20130103"], name="foo") + tm.assert_index_equal(result, expected) + + result = dt + tdi + expected = DatetimeIndex(["20130102", NaT, "20130103"], name="foo") + tm.assert_index_equal(result, expected) + + result = td + tdi + expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = tdi + td + expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo") + tm.assert_index_equal(result, expected) + + # unequal length + msg = "cannot add indices of unequal length" + with pytest.raises(ValueError, match=msg): + tdi + dti[0:1] + with pytest.raises(ValueError, match=msg): + tdi[0:1] + dti + + # random indexes + msg = "Addition/subtraction of integers and integer-arrays" + with pytest.raises(TypeError, match=msg): + tdi + Index([1, 2, 3], dtype=np.int64) + + # this is a union! + # pytest.raises(TypeError, lambda : Index([1,2,3]) + tdi) + + result = tdi + dti # name will be reset + expected = DatetimeIndex(["20130102", NaT, "20130105"]) + tm.assert_index_equal(result, expected) + + result = dti + tdi # name will be reset + expected = DatetimeIndex(["20130102", NaT, "20130105"]) + tm.assert_index_equal(result, expected) + + result = dt + td + expected = Timestamp("20130102") + assert result == expected + + result = td + dt + expected = Timestamp("20130102") + assert result == expected + + # TODO: Needs more informative name, probably split up into + # more targeted tests + @pytest.mark.parametrize("freq", ["D", "B"]) + def test_timedelta(self, freq): + index = pd.date_range("1/1/2000", periods=50, freq=freq) + + shifted = index + timedelta(1) + back = shifted + timedelta(-1) + back = back._with_freq("infer") + tm.assert_index_equal(index, back) + + if freq == "D": + expected = pd.tseries.offsets.Day(1) + assert index.freq == expected + assert shifted.freq == expected + assert back.freq == expected + else: # freq == 'B' + assert index.freq == pd.tseries.offsets.BusinessDay(1) + assert shifted.freq is None + assert back.freq == pd.tseries.offsets.BusinessDay(1) + + result = index - timedelta(1) + expected = index + timedelta(-1) + tm.assert_index_equal(result, expected) + + def test_timedelta_tick_arithmetic(self): + # GH#4134, buggy with timedeltas + rng = pd.date_range("2013", "2014") + s = Series(rng) + result1 = rng - offsets.Hour(1) + result2 = DatetimeIndex(s - np.timedelta64(100000000)) + result3 = rng - np.timedelta64(100000000) + result4 = DatetimeIndex(s - offsets.Hour(1)) + + assert result1.freq == rng.freq + result1 = result1._with_freq(None) + tm.assert_index_equal(result1, result4) + + assert result3.freq == rng.freq + result3 = result3._with_freq(None) + tm.assert_index_equal(result2, result3) + + def test_tda_add_sub_index(self): + # Check that TimedeltaArray defers to Index on arithmetic ops + tdi = TimedeltaIndex(["1 days", NaT, "2 days"]) + tda = tdi.array + + dti = pd.date_range("1999-12-31", periods=3, freq="D") + + result = tda + dti + expected = tdi + dti + tm.assert_index_equal(result, expected) + + result = tda + tdi + expected = tdi + tdi + tm.assert_index_equal(result, expected) + + result = tda - tdi + expected = tdi - tdi + tm.assert_index_equal(result, expected) + + def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture): + # Result should be cast back to DatetimeArray + box = box_with_array + + dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture) + dti = dti._with_freq(None) + tdi = dti - dti + + obj = tm.box_expected(tdi, box) + other = tm.box_expected(dti, box) + + with tm.assert_produces_warning(PerformanceWarning): + result = obj + other.astype(object) + tm.assert_equal(result, other.astype(object)) + + # ------------------------------------------------------------- + # Binary operations TimedeltaIndex and timedelta-like + + def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as + is now numeric + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D") + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + orig_rng = rng + rng += two_hours + tm.assert_equal(rng, expected) + if box_with_array is not Index: + # Check that operation is actually inplace + tm.assert_equal(orig_rng, expected) + + def test_tdi_isub_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as - is now numeric + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00") + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + orig_rng = rng + rng -= two_hours + tm.assert_equal(rng, expected) + if box_with_array is not Index: + # Check that operation is actually inplace + tm.assert_equal(orig_rng, expected) + + # ------------------------------------------------------------- + + def test_tdi_ops_attributes(self): + rng = timedelta_range("2 days", periods=5, freq="2D", name="x") + + result = rng + 1 * rng.freq + exp = timedelta_range("4 days", periods=5, freq="2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + result = rng - 2 * rng.freq + exp = timedelta_range("-2 days", periods=5, freq="2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + result = rng * 2 + exp = timedelta_range("4 days", periods=5, freq="4D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "4D" + + result = rng / 2 + exp = timedelta_range("1 days", periods=5, freq="D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "D" + + result = -rng + exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "-2D" + + rng = timedelta_range("-2 days", periods=5, freq="D", name="x") + + result = abs(rng) + exp = TimedeltaIndex( + ["2 days", "1 days", "0 days", "1 days", "2 days"], name="x" + ) + tm.assert_index_equal(result, exp) + assert result.freq is None + + +class TestAddSubNaTMasking: + # TODO: parametrize over boxes + + @pytest.mark.parametrize("str_ts", ["1950-01-01", "1980-01-01"]) + def test_tdarr_add_timestamp_nat_masking(self, box_with_array, str_ts): + # GH#17991 checking for overflow-masking with NaT + tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"]) + tdobj = tm.box_expected(tdinat, box_with_array) + + ts = Timestamp(str_ts) + ts_variants = [ + ts, + ts.to_pydatetime(), + ts.to_datetime64().astype("datetime64[ns]"), + ts.to_datetime64().astype("datetime64[D]"), + ] + + for variant in ts_variants: + res = tdobj + variant + if box_with_array is DataFrame: + assert res.iloc[1, 1] is NaT + else: + assert res[1] is NaT + + def test_tdi_add_overflow(self): + # See GH#14068 + # preliminary test scalar analogue of vectorized tests below + # TODO: Make raised error message more informative and test + with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"): + pd.to_timedelta(106580, "D") + Timestamp("2000") + with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"): + Timestamp("2000") + pd.to_timedelta(106580, "D") + + _NaT = NaT._value + 1 + msg = "Overflow in int64 addition" + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta([106580], "D") + Timestamp("2000") + with pytest.raises(OverflowError, match=msg): + Timestamp("2000") + pd.to_timedelta([106580], "D") + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta([_NaT]) - Timedelta("1 days") + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days") + with pytest.raises(OverflowError, match=msg): + ( + pd.to_timedelta([_NaT, "5 days", "1 hours"]) + - pd.to_timedelta(["7 seconds", _NaT, "4 hours"]) + ) + + # These should not overflow! + exp = TimedeltaIndex([NaT]) + result = pd.to_timedelta([NaT]) - Timedelta("1 days") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex(["4 days", NaT]) + result = pd.to_timedelta(["5 days", NaT]) - Timedelta("1 days") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([NaT, NaT, "5 hours"]) + result = pd.to_timedelta([NaT, "5 days", "1 hours"]) + pd.to_timedelta( + ["7 seconds", NaT, "4 hours"] + ) + tm.assert_index_equal(result, exp) + + +class TestTimedeltaArraylikeAddSubOps: + # Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__ + + def test_sub_nat_retain_unit(self): + ser = pd.to_timedelta(Series(["00:00:01"])).astype("m8[s]") + + result = ser - NaT + expected = Series([NaT], dtype="m8[s]") + tm.assert_series_equal(result, expected) + + # TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs + # parametrization+de-duplication + def test_timedelta_ops_with_missing_values(self): + # setup + s1 = pd.to_timedelta(Series(["00:00:01"])) + s2 = pd.to_timedelta(Series(["00:00:02"])) + + msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]" + with pytest.raises(TypeError, match=msg): + # Passing datetime64-dtype data to TimedeltaIndex is no longer + # supported GH#29794 + pd.to_timedelta(Series([NaT])) # TODO: belongs elsewhere? + + sn = pd.to_timedelta(Series([NaT], dtype="m8[ns]")) + + df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta) + df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta) + with pytest.raises(TypeError, match=msg): + # Passing datetime64-dtype data to TimedeltaIndex is no longer + # supported GH#29794 + DataFrame([NaT]).apply(pd.to_timedelta) # TODO: belongs elsewhere? + + dfn = DataFrame([NaT._value]).apply(pd.to_timedelta) + + scalar1 = pd.to_timedelta("00:00:01") + scalar2 = pd.to_timedelta("00:00:02") + timedelta_NaT = pd.to_timedelta("NaT") + + actual = scalar1 + scalar1 + assert actual == scalar2 + actual = scalar2 - scalar1 + assert actual == scalar1 + + actual = s1 + s1 + tm.assert_series_equal(actual, s2) + actual = s2 - s1 + tm.assert_series_equal(actual, s1) + + actual = s1 + scalar1 + tm.assert_series_equal(actual, s2) + actual = scalar1 + s1 + tm.assert_series_equal(actual, s2) + actual = s2 - scalar1 + tm.assert_series_equal(actual, s1) + actual = -scalar1 + s2 + tm.assert_series_equal(actual, s1) + + actual = s1 + timedelta_NaT + tm.assert_series_equal(actual, sn) + actual = timedelta_NaT + s1 + tm.assert_series_equal(actual, sn) + actual = s1 - timedelta_NaT + tm.assert_series_equal(actual, sn) + actual = -timedelta_NaT + s1 + tm.assert_series_equal(actual, sn) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + s1 + np.nan + with pytest.raises(TypeError, match=msg): + np.nan + s1 + with pytest.raises(TypeError, match=msg): + s1 - np.nan + with pytest.raises(TypeError, match=msg): + -np.nan + s1 + + actual = s1 + NaT + tm.assert_series_equal(actual, sn) + actual = s2 - NaT + tm.assert_series_equal(actual, sn) + + actual = s1 + df1 + tm.assert_frame_equal(actual, df2) + actual = s2 - df1 + tm.assert_frame_equal(actual, df1) + actual = df1 + s1 + tm.assert_frame_equal(actual, df2) + actual = df2 - s1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + df1 + tm.assert_frame_equal(actual, df2) + actual = df2 - df1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + scalar1 + tm.assert_frame_equal(actual, df2) + actual = df2 - scalar1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + timedelta_NaT + tm.assert_frame_equal(actual, dfn) + actual = df1 - timedelta_NaT + tm.assert_frame_equal(actual, dfn) + + msg = "cannot subtract a datelike from|unsupported operand type" + with pytest.raises(TypeError, match=msg): + df1 + np.nan + with pytest.raises(TypeError, match=msg): + df1 - np.nan + + actual = df1 + NaT # NaT is datetime, not timedelta + tm.assert_frame_equal(actual, dfn) + actual = df1 - NaT + tm.assert_frame_equal(actual, dfn) + + # TODO: moved from tests.series.test_operators, needs splitting, cleanup, + # de-duplication, box-parametrization... + def test_operators_timedelta64(self): + # series ops + v1 = pd.date_range("2012-1-1", periods=3, freq="D") + v2 = pd.date_range("2012-1-2", periods=3, freq="D") + rs = Series(v2) - Series(v1) + xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]") + tm.assert_series_equal(rs, xp) + assert rs.dtype == "timedelta64[ns]" + + df = DataFrame({"A": v1}) + td = Series([timedelta(days=i) for i in range(3)]) + assert td.dtype == "timedelta64[ns]" + + # series on the rhs + result = df["A"] - df["A"].shift() + assert result.dtype == "timedelta64[ns]" + + result = df["A"] + td + assert result.dtype == "M8[ns]" + + # scalar Timestamp on rhs + maxa = df["A"].max() + assert isinstance(maxa, Timestamp) + + resultb = df["A"] - df["A"].max() + assert resultb.dtype == "timedelta64[ns]" + + # timestamp on lhs + result = resultb + df["A"] + values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")] + expected = Series(values, name="A") + tm.assert_series_equal(result, expected) + + # datetimes on rhs + result = df["A"] - datetime(2001, 1, 1) + expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A") + tm.assert_series_equal(result, expected) + assert result.dtype == "m8[ns]" + + d = datetime(2001, 1, 1, 3, 4) + resulta = df["A"] - d + assert resulta.dtype == "m8[ns]" + + # roundtrip + resultb = resulta + d + tm.assert_series_equal(df["A"], resultb) + + # timedeltas on rhs + td = timedelta(days=1) + resulta = df["A"] + td + resultb = resulta - td + tm.assert_series_equal(resultb, df["A"]) + assert resultb.dtype == "M8[ns]" + + # roundtrip + td = timedelta(minutes=5, seconds=3) + resulta = df["A"] + td + resultb = resulta - td + tm.assert_series_equal(df["A"], resultb) + assert resultb.dtype == "M8[ns]" + + # inplace + value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1)) + rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1)) + assert rs[2] == value + + def test_timedelta64_ops_nat(self): + # GH 11349 + timedelta_series = Series([NaT, Timedelta("1s")]) + nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]") + single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]") + + # subtraction + tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta) + tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta) + + tm.assert_series_equal( + timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + -single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta + ) + + # addition + tm.assert_series_equal( + nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_timedelta, + nat_series_dtype_timedelta, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timedelta, + nat_series_dtype_timedelta, + ) + + tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta) + tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta) + + tm.assert_series_equal( + timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_timedelta, + nat_series_dtype_timedelta, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timedelta, + nat_series_dtype_timedelta, + ) + + # multiplication + tm.assert_series_equal( + nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + 1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal(timedelta_series * 1, timedelta_series) + tm.assert_series_equal(1 * timedelta_series, timedelta_series) + + tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")])) + tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")])) + + tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta) + tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta) + + # division + tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")])) + tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")])) + tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta) + + # ------------------------------------------------------------- + # Binary operations td64 arraylike and datetime-like + + @pytest.mark.parametrize("cls", [Timestamp, datetime, np.datetime64]) + def test_td64arr_add_sub_datetimelike_scalar( + self, cls, box_with_array, tz_naive_fixture + ): + # GH#11925, GH#29558, GH#23215 + tz = tz_naive_fixture + + dt_scalar = Timestamp("2012-01-01", tz=tz) + if cls is datetime: + ts = dt_scalar.to_pydatetime() + elif cls is np.datetime64: + if tz_naive_fixture is not None: + pytest.skip(f"{cls} doesn support {tz_naive_fixture}") + ts = dt_scalar.to_datetime64() + else: + ts = dt_scalar + + tdi = timedelta_range("1 day", periods=3) + expected = pd.date_range("2012-01-02", periods=3, tz=tz) + + tdarr = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + tm.assert_equal(ts + tdarr, expected) + tm.assert_equal(tdarr + ts, expected) + + expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D", tz=tz) + expected2 = tm.box_expected(expected2, box_with_array) + + tm.assert_equal(ts - tdarr, expected2) + tm.assert_equal(ts + (-tdarr), expected2) + + msg = "cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + tdarr - ts + + def test_td64arr_add_datetime64_nat(self, box_with_array): + # GH#23215 + other = np.datetime64("NaT") + + tdi = timedelta_range("1 day", periods=3) + expected = DatetimeIndex(["NaT", "NaT", "NaT"]) + + tdser = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + tm.assert_equal(tdser + other, expected) + tm.assert_equal(other + tdser, expected) + + def test_td64arr_sub_dt64_array(self, box_with_array): + dti = pd.date_range("2016-01-01", periods=3) + tdi = TimedeltaIndex(["-1 Day"] * 3) + dtarr = dti.values + expected = DatetimeIndex(dtarr) - tdi + + tdi = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + msg = "cannot subtract a datelike from" + with pytest.raises(TypeError, match=msg): + tdi - dtarr + + # TimedeltaIndex.__rsub__ + result = dtarr - tdi + tm.assert_equal(result, expected) + + def test_td64arr_add_dt64_array(self, box_with_array): + dti = pd.date_range("2016-01-01", periods=3) + tdi = TimedeltaIndex(["-1 Day"] * 3) + dtarr = dti.values + expected = DatetimeIndex(dtarr) + tdi + + tdi = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdi + dtarr + tm.assert_equal(result, expected) + result = dtarr + tdi + tm.assert_equal(result, expected) + + # ------------------------------------------------------------------ + # Invalid __add__/__sub__ operations + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"]) + @pytest.mark.parametrize("tdi_freq", [None, "H"]) + def test_td64arr_sub_periodlike( + self, box_with_array, box_with_array2, tdi_freq, pi_freq + ): + # GH#20049 subtracting PeriodIndex should raise TypeError + tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq) + dti = Timestamp("2018-03-07 17:16:40") + tdi + pi = dti.to_period(pi_freq) + per = pi[0] + + tdi = tm.box_expected(tdi, box_with_array) + pi = tm.box_expected(pi, box_with_array2) + msg = "cannot subtract|unsupported operand type" + with pytest.raises(TypeError, match=msg): + tdi - pi + + # GH#13078 subtraction of Period scalar not supported + with pytest.raises(TypeError, match=msg): + tdi - per + + @pytest.mark.parametrize( + "other", + [ + # GH#12624 for str case + "a", + # GH#19123 + 1, + 1.5, + np.array(2), + ], + ) + def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other): + # vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + tdarr = tm.box_expected(tdser, box_with_array) + + assert_invalid_addsub_type(tdarr, other) + + @pytest.mark.parametrize( + "vec", + [ + np.array([1, 2, 3]), + Index([1, 2, 3]), + Series([1, 2, 3]), + DataFrame([[1, 2, 3]]), + ], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_addsub_numeric_arr_invalid( + self, box_with_array, vec, any_real_numpy_dtype + ): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + tdarr = tm.box_expected(tdser, box_with_array) + + vector = vec.astype(any_real_numpy_dtype) + assert_invalid_addsub_type(tdarr, vector) + + def test_td64arr_add_sub_int(self, box_with_array, one): + # Variants of `one` for #19012, deprecated GH#22535 + rng = timedelta_range("1 days 09:00:00", freq="H", periods=10) + tdarr = tm.box_expected(rng, box_with_array) + + msg = "Addition/subtraction of integers" + assert_invalid_addsub_type(tdarr, one, msg) + + # TODO: get inplace ops into assert_invalid_addsub_type + with pytest.raises(TypeError, match=msg): + tdarr += one + with pytest.raises(TypeError, match=msg): + tdarr -= one + + def test_td64arr_add_sub_integer_array(self, box_with_array): + # GH#19959, deprecated GH#22535 + # GH#22696 for DataFrame case, check that we don't dispatch to numpy + # implementation, which treats int64 as m8[ns] + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days 09:00:00", freq="H", periods=3) + tdarr = tm.box_expected(rng, box) + other = tm.box_expected([4, 3, 2], xbox) + + msg = "Addition/subtraction of integers and integer-arrays" + assert_invalid_addsub_type(tdarr, other, msg) + + def test_td64arr_addsub_integer_array_no_freq(self, box_with_array): + # GH#19959 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"]) + tdarr = tm.box_expected(tdi, box) + other = tm.box_expected([14, -1, 16], xbox) + + msg = "Addition/subtraction of integers" + assert_invalid_addsub_type(tdarr, other, msg) + + # ------------------------------------------------------------------ + # Operations with timedelta-like others + + def test_td64arr_add_sub_td64_array(self, box_with_array): + box = box_with_array + dti = pd.date_range("2016-01-01", periods=3) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = 2 * tdi + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box) + + result = tdi + tdarr + tm.assert_equal(result, expected) + result = tdarr + tdi + tm.assert_equal(result, expected) + + expected_sub = 0 * tdi + result = tdi - tdarr + tm.assert_equal(result, expected_sub) + result = tdarr - tdi + tm.assert_equal(result, expected_sub) + + def test_td64arr_add_sub_tdi(self, box_with_array, names): + # GH#17250 make sure result dtype is correct + # GH#19043 make sure names are propagated correctly + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["0 days", "1 day"], name=names[1]) + tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi + ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[0]) + expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], name=exname) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = tdi + ser + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + result = ser + tdi + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + expected = Series( + [Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=exname + ) + expected = tm.box_expected(expected, box) + + result = tdi - ser + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + result = ser - tdi + tm.assert_equal(result, -expected) + assert_dtype(result, "timedelta64[ns]") + + @pytest.mark.parametrize("tdnat", [np.timedelta64("NaT"), NaT]) + def test_td64arr_add_sub_td64_nat(self, box_with_array, tdnat): + # GH#18808, GH#23320 special handling for timedelta64("NaT") + box = box_with_array + tdi = TimedeltaIndex([NaT, Timedelta("1s")]) + expected = TimedeltaIndex(["NaT"] * 2) + + obj = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box) + + result = obj + tdnat + tm.assert_equal(result, expected) + result = tdnat + obj + tm.assert_equal(result, expected) + result = obj - tdnat + tm.assert_equal(result, expected) + result = tdnat - obj + tm.assert_equal(result, expected) + + def test_td64arr_add_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as + is now numeric + # GH#10699 for Tick cases + box = box_with_array + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D") + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, box) + + result = rng + two_hours + tm.assert_equal(result, expected) + + result = two_hours + rng + tm.assert_equal(result, expected) + + def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as - is now numeric + # GH#10699 for Tick cases + box = box_with_array + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, box) + + result = rng - two_hours + tm.assert_equal(result, expected) + + result = two_hours - rng + tm.assert_equal(result, -expected) + + # ------------------------------------------------------------------ + # __add__/__sub__ with DateOffsets and arrays of DateOffsets + + def test_td64arr_add_sub_offset_index(self, names, box_with_array): + # GH#18849, GH#19744 + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0]) + other = Index([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1]) + other = np.array(other) if box in [tm.to_array, pd.array] else other + + expected = TimedeltaIndex( + [tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=exname + ) + expected_sub = TimedeltaIndex( + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname + ) + + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box).astype(object, copy=False) + expected_sub = tm.box_expected(expected_sub, box).astype(object, copy=False) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_equal(res2, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res_sub = tdi - other + tm.assert_equal(res_sub, expected_sub) + + def test_td64arr_add_sub_offset_array(self, box_with_array): + # GH#18849, GH#18824 + box = box_with_array + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"]) + other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)]) + + expected = TimedeltaIndex( + [tdi[n] + other[n] for n in range(len(tdi))], freq="infer" + ) + expected_sub = TimedeltaIndex( + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer" + ) + + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_equal(res2, expected) + + expected_sub = tm.box_expected(expected_sub, box_with_array).astype(object) + with tm.assert_produces_warning(PerformanceWarning): + res_sub = tdi - other + tm.assert_equal(res_sub, expected_sub) + + def test_td64arr_with_offset_series(self, names, box_with_array): + # GH#18849 + box = box_with_array + box2 = Series if box in [Index, tm.to_array, pd.array] else box + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0]) + other = Series([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1]) + + expected_add = Series( + [tdi[n] + other[n] for n in range(len(tdi))], name=exname, dtype=object + ) + obj = tm.box_expected(tdi, box) + expected_add = tm.box_expected(expected_add, box2).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = obj + other + tm.assert_equal(res, expected_add) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + obj + tm.assert_equal(res2, expected_add) + + expected_sub = Series( + [tdi[n] - other[n] for n in range(len(tdi))], name=exname, dtype=object + ) + expected_sub = tm.box_expected(expected_sub, box2).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res3 = obj - other + tm.assert_equal(res3, expected_sub) + + @pytest.mark.parametrize("obox", [np.array, Index, Series]) + def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array): + # GH#18824 + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"]) + tdi = tm.box_expected(tdi, box_with_array) + + anchored = obox([offsets.MonthEnd(), offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = "has incorrect type|cannot add the type MonthEnd" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdi + anchored + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored + tdi + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdi - anchored + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored - tdi + + # ------------------------------------------------------------------ + # Unsorted + + def test_td64arr_add_sub_object_array(self, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + tdi = timedelta_range("1 day", periods=3, freq="D") + tdarr = tm.box_expected(tdi, box) + + other = np.array([Timedelta(days=1), offsets.Day(2), Timestamp("2000-01-04")]) + + with tm.assert_produces_warning(PerformanceWarning): + result = tdarr + other + + expected = Index( + [Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")] + ) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(result, expected) + + msg = "unsupported operand type|cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdarr - other + + with tm.assert_produces_warning(PerformanceWarning): + result = other - tdarr + + expected = Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")]) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(result, expected) + + +class TestTimedeltaArraylikeMulDivOps: + # Tests for timedelta64[ns] + # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ + + # ------------------------------------------------------------------ + # Multiplication + # organized with scalar others first, then array-like + + def test_td64arr_mul_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + + result = idx * 1 + tm.assert_equal(result, idx) + + result = 1 * idx + tm.assert_equal(result, idx) + + def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array): + rng = timedelta_range("1 days", "10 days", name="foo") + rng = tm.box_expected(rng, box_with_array) + msg = "argument must be an integer|cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + rng * two_hours + + def test_tdi_mul_int_array_zerodim(self, box_with_array): + rng5 = np.arange(5, dtype="int64") + idx = TimedeltaIndex(rng5) + expected = TimedeltaIndex(rng5 * 5) + + idx = tm.box_expected(idx, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = idx * np.array(5, dtype="int64") + tm.assert_equal(result, expected) + + def test_tdi_mul_int_array(self, box_with_array): + rng5 = np.arange(5, dtype="int64") + idx = TimedeltaIndex(rng5) + expected = TimedeltaIndex(rng5**2) + + idx = tm.box_expected(idx, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = idx * rng5 + tm.assert_equal(result, expected) + + def test_tdi_mul_int_series(self, box_with_array): + box = box_with_array + xbox = Series if box in [Index, tm.to_array, pd.array] else box + + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2) + + idx = tm.box_expected(idx, box) + expected = tm.box_expected(expected, xbox) + + result = idx * Series(np.arange(5, dtype="int64")) + tm.assert_equal(result, expected) + + def test_tdi_mul_float_series(self, box_with_array): + box = box_with_array + xbox = Series if box in [Index, tm.to_array, pd.array] else box + + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box) + + rng5f = np.arange(5, dtype="float64") + expected = TimedeltaIndex(rng5f * (rng5f + 1.0)) + expected = tm.box_expected(expected, xbox) + + result = idx * Series(rng5f + 1.0) + tm.assert_equal(result, expected) + + # TODO: Put Series/DataFrame in others? + @pytest.mark.parametrize( + "other", + [ + np.arange(1, 11), + Index(np.arange(1, 11), np.int64), + Index(range(1, 11), np.uint64), + Index(range(1, 11), np.float64), + pd.RangeIndex(1, 11), + ], + ids=lambda x: type(x).__name__, + ) + def test_tdi_rmul_arraylike(self, other, box_with_array): + box = box_with_array + + tdi = TimedeltaIndex(["1 Day"] * 10) + expected = timedelta_range("1 days", "10 days")._with_freq(None) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, other) + + expected = tm.box_expected(expected, xbox) + + result = other * tdi + tm.assert_equal(result, expected) + commute = tdi * other + tm.assert_equal(commute, expected) + + # ------------------------------------------------------------------ + # __div__, __rdiv__ + + def test_td64arr_div_nat_invalid(self, box_with_array): + # don't allow division by NaT (maybe could in the future) + rng = timedelta_range("1 days", "10 days", name="foo") + rng = tm.box_expected(rng, box_with_array) + + with pytest.raises(TypeError, match="unsupported operand type"): + rng / NaT + with pytest.raises(TypeError, match="Cannot divide NaTType by"): + NaT / rng + + dt64nat = np.datetime64("NaT", "ns") + msg = "|".join( + [ + # 'divide' on npdev as of 2021-12-18 + "ufunc '(true_divide|divide)' cannot use operands", + "cannot perform __r?truediv__", + "Cannot divide datetime64 by TimedeltaArray", + ] + ) + with pytest.raises(TypeError, match=msg): + rng / dt64nat + with pytest.raises(TypeError, match=msg): + dt64nat / rng + + def test_td64arr_div_td64nat(self, box_with_array): + # GH#23829 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days", "10 days") + rng = tm.box_expected(rng, box) + + other = np.timedelta64("NaT") + + expected = np.array([np.nan] * 10) + expected = tm.box_expected(expected, xbox) + + result = rng / other + tm.assert_equal(result, expected) + + result = other / rng + tm.assert_equal(result, expected) + + def test_td64arr_div_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + + result = idx / 1 + tm.assert_equal(result, idx) + + with pytest.raises(TypeError, match="Cannot divide"): + # GH#23829 + 1 / idx + + def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array): + # GH#20088, GH#22163 ensure DataFrame returns correct dtype + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days", "10 days", name="foo") + expected = Index((np.arange(10) + 1) * 12, dtype=np.float64, name="foo") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + result = rng / two_hours + tm.assert_equal(result, expected) + + result = two_hours / rng + expected = 1 / expected + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("m", [1, 3, 10]) + @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) + def test_td64arr_div_td64_scalar(self, m, unit, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + ser = Series([Timedelta(days=59)] * 3) + ser[2] = np.nan + flat = ser + ser = tm.box_expected(ser, box) + + # op + expected = Series([x / np.timedelta64(m, unit) for x in flat]) + expected = tm.box_expected(expected, xbox) + result = ser / np.timedelta64(m, unit) + tm.assert_equal(result, expected) + + # reverse op + expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat]) + expected = tm.box_expected(expected, xbox) + result = np.timedelta64(m, unit) / ser + tm.assert_equal(result, expected) + + def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + expected = Index([12, np.nan, 24], dtype=np.float64, name="foo") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + result = rng / two_hours + tm.assert_equal(result, expected) + + result = two_hours / rng + expected = 1 / expected + tm.assert_equal(result, expected) + + def test_td64arr_div_td64_ndarray(self, box_with_array): + # GH#22631 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = TimedeltaIndex(["1 days", NaT, "2 days"]) + expected = Index([12, np.nan, 24], dtype=np.float64) + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + other = np.array([2, 4, 2], dtype="m8[h]") + result = rng / other + tm.assert_equal(result, expected) + + result = rng / tm.box_expected(other, box) + tm.assert_equal(result, expected) + + result = rng / other.astype(object) + tm.assert_equal(result, expected.astype(object)) + + result = rng / list(other) + tm.assert_equal(result, expected) + + # reversed op + expected = 1 / expected + result = other / rng + tm.assert_equal(result, expected) + + result = tm.box_expected(other, box) / rng + tm.assert_equal(result, expected) + + result = other.astype(object) / rng + tm.assert_equal(result, expected) + + result = list(other) / rng + tm.assert_equal(result, expected) + + def test_tdarr_div_length_mismatch(self, box_with_array): + rng = TimedeltaIndex(["1 days", NaT, "2 days"]) + mismatched = [1, 2, 3, 4] + + rng = tm.box_expected(rng, box_with_array) + msg = "Cannot divide vectors|Unable to coerce to Series" + for obj in [mismatched, mismatched[:2]]: + # one shorter, one longer + for other in [obj, np.array(obj), Index(obj)]: + with pytest.raises(ValueError, match=msg): + rng / other + with pytest.raises(ValueError, match=msg): + other / rng + + def test_td64_div_object_mixed_result(self, box_with_array): + # Case where we having a NaT in the result inseat of timedelta64("NaT") + # is misleading + orig = timedelta_range("1 Day", periods=3).insert(1, NaT) + tdi = tm.box_expected(orig, box_with_array, transpose=False) + + other = np.array([orig[0], 1.5, 2.0, orig[2]], dtype=object) + other = tm.box_expected(other, box_with_array, transpose=False) + + res = tdi / other + + expected = Index([1.0, np.timedelta64("NaT", "ns"), orig[0], 1.5], dtype=object) + expected = tm.box_expected(expected, box_with_array, transpose=False) + if isinstance(expected, NumpyExtensionArray): + expected = expected.to_numpy() + tm.assert_equal(res, expected) + if box_with_array is DataFrame: + # We have a np.timedelta64(NaT), not pd.NaT + assert isinstance(res.iloc[1, 0], np.timedelta64) + + res = tdi // other + + expected = Index([1, np.timedelta64("NaT", "ns"), orig[0], 1], dtype=object) + expected = tm.box_expected(expected, box_with_array, transpose=False) + if isinstance(expected, NumpyExtensionArray): + expected = expected.to_numpy() + tm.assert_equal(res, expected) + if box_with_array is DataFrame: + # We have a np.timedelta64(NaT), not pd.NaT + assert isinstance(res.iloc[1, 0], np.timedelta64) + + # ------------------------------------------------------------------ + # __floordiv__, __rfloordiv__ + + def test_td64arr_floordiv_td64arr_with_nat( + self, box_with_array, using_array_manager + ): + # GH#35529 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + left = Series([1000, 222330, 30], dtype="timedelta64[ns]") + right = Series([1000, 222330, None], dtype="timedelta64[ns]") + + left = tm.box_expected(left, box) + right = tm.box_expected(right, box) + + expected = np.array([1.0, 1.0, np.nan], dtype=np.float64) + expected = tm.box_expected(expected, xbox) + if box is DataFrame and using_array_manager: + # INFO(ArrayManager) floordiv returns integer, and ArrayManager + # performs ops column-wise and thus preserves int64 dtype for + # columns without missing values + expected[[0, 1]] = expected[[0, 1]].astype("int64") + + with tm.maybe_produces_warning( + RuntimeWarning, box is pd.array, check_stacklevel=False + ): + result = left // right + + tm.assert_equal(result, expected) + + # case that goes through __rfloordiv__ with arraylike + with tm.maybe_produces_warning( + RuntimeWarning, box is pd.array, check_stacklevel=False + ): + result = np.asarray(left) // right + tm.assert_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning") + def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td): + # GH#18831, GH#19125 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + td = Timedelta("5m3s") # i.e. (scalar_td - 1sec) / 2 + + td1 = Series([td, td, NaT], dtype="m8[ns]") + td1 = tm.box_expected(td1, box, transpose=False) + + expected = Series([0, 0, np.nan]) + expected = tm.box_expected(expected, xbox, transpose=False) + + result = td1 // scalar_td + tm.assert_equal(result, expected) + + # Reversed op + expected = Series([2, 2, np.nan]) + expected = tm.box_expected(expected, xbox, transpose=False) + + result = scalar_td // td1 + tm.assert_equal(result, expected) + + # same thing buts let's be explicit about calling __rfloordiv__ + result = td1.__rfloordiv__(scalar_td) + tm.assert_equal(result, expected) + + def test_td64arr_floordiv_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + result = idx // 1 + tm.assert_equal(result, idx) + + pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*" + with pytest.raises(TypeError, match=pattern): + 1 // idx + + # ------------------------------------------------------------------ + # mod, divmod + # TODO: operations with timedelta-like arrays, numeric arrays, + # reversed ops + + def test_td64arr_mod_tdscalar(self, box_with_array, three_days): + tdi = timedelta_range("1 Day", "9 days") + tdarr = tm.box_expected(tdi, box_with_array) + + expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3) + expected = tm.box_expected(expected, box_with_array) + + result = tdarr % three_days + tm.assert_equal(result, expected) + + warn = None + if box_with_array is DataFrame and isinstance(three_days, pd.DateOffset): + warn = PerformanceWarning + # TODO: making expected be object here a result of DataFrame.__divmod__ + # being defined in a naive way that does not dispatch to the underlying + # array's __divmod__ + expected = expected.astype(object) + + with tm.assert_produces_warning(warn): + result = divmod(tdarr, three_days) + + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], tdarr // three_days) + + def test_td64arr_mod_int(self, box_with_array): + tdi = timedelta_range("1 ns", "10 ns", periods=10) + tdarr = tm.box_expected(tdi, box_with_array) + + expected = TimedeltaIndex(["1 ns", "0 ns"] * 5) + expected = tm.box_expected(expected, box_with_array) + + result = tdarr % 2 + tm.assert_equal(result, expected) + + msg = "Cannot divide int by" + with pytest.raises(TypeError, match=msg): + 2 % tdarr + + result = divmod(tdarr, 2) + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], tdarr // 2) + + def test_td64arr_rmod_tdscalar(self, box_with_array, three_days): + tdi = timedelta_range("1 Day", "9 days") + tdarr = tm.box_expected(tdi, box_with_array) + + expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6 + expected = TimedeltaIndex(expected) + expected = tm.box_expected(expected, box_with_array) + + result = three_days % tdarr + tm.assert_equal(result, expected) + + result = divmod(three_days, tdarr) + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], three_days // tdarr) + + # ------------------------------------------------------------------ + # Operations with invalid others + + def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box_with_array) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = "operate|unsupported|cannot|not supported" + with pytest.raises(TypeError, match=pattern): + td1 * scalar_td + with pytest.raises(TypeError, match=pattern): + scalar_td * td1 + + def test_td64arr_mul_too_short_raises(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + msg = "|".join( + [ + "cannot use operands with types dtype", + "Cannot multiply with unequal lengths", + "Unable to coerce to Series", + ] + ) + with pytest.raises(TypeError, match=msg): + # length check before dtype check + idx * idx[:3] + with pytest.raises(ValueError, match=msg): + idx * np.array([1, 2]) + + def test_td64arr_mul_td64arr_raises(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + msg = "cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + idx * idx + + # ------------------------------------------------------------------ + # Operations with numeric others + + def test_td64arr_mul_numeric_scalar(self, box_with_array, one): + # GH#4521 + # divide/multiply by integers + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser * (-one) + tm.assert_equal(result, expected) + result = (-one) * tdser + tm.assert_equal(result, expected) + + expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]") + expected = tm.box_expected(expected, box_with_array) + + result = tdser * (2 * one) + tm.assert_equal(result, expected) + result = (2 * one) * tdser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) + def test_td64arr_div_numeric_scalar(self, box_with_array, two): + # GH#4521 + # divide/multiply by integers + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser / two + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match="Cannot divide"): + two / tdser + + @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) + def test_td64arr_floordiv_numeric_scalar(self, box_with_array, two): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser // two + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match="Cannot divide"): + two // tdser + + @pytest.mark.parametrize( + "vector", + [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_rmul_numeric_array( + self, + box_with_array, + vector, + any_real_numpy_dtype, + ): + # GH#4521 + # divide/multiply by integers + + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + vector = vector.astype(any_real_numpy_dtype) + + expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + + expected = tm.box_expected(expected, xbox) + + result = tdser * vector + tm.assert_equal(result, expected) + + result = vector * tdser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "vector", + [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_div_numeric_array( + self, box_with_array, vector, any_real_numpy_dtype + ): + # GH#4521 + # divide/multiply by integers + + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + vector = vector.astype(any_real_numpy_dtype) + + expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + expected = tm.box_expected(expected, xbox) + + result = tdser / vector + tm.assert_equal(result, expected) + + pattern = "|".join( + [ + "true_divide'? cannot use operands", + "cannot perform __div__", + "cannot perform __truediv__", + "unsupported operand", + "Cannot divide", + "ufunc 'divide' cannot use operands with types", + ] + ) + with pytest.raises(TypeError, match=pattern): + vector / tdser + + result = tdser / vector.astype(object) + if box_with_array is DataFrame: + expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))] + expected = tm.box_expected(expected, xbox).astype(object) + # We specifically expect timedelta64("NaT") here, not pd.NA + msg = "The 'downcast' keyword in fillna" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected[2] = expected[2].fillna( + np.timedelta64("NaT", "ns"), downcast=False + ) + else: + expected = [tdser[n] / vector[n] for n in range(len(tdser))] + expected = [ + x if x is not NaT else np.timedelta64("NaT", "ns") for x in expected + ] + if xbox is tm.to_array: + expected = tm.to_array(expected).astype(object) + else: + expected = xbox(expected, dtype=object) + + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match=pattern): + vector.astype(object) / tdser + + def test_td64arr_mul_int_series(self, box_with_array, names): + # GH#19042 test for correct name attachment + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex( + ["0days", "1day", "2days", "3days", "4days"], name=names[0] + ) + # TODO: Should we be parametrizing over types for `ser` too? + ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) + + expected = Series( + ["0days", "1day", "4days", "9days", "16days"], + dtype="timedelta64[ns]", + name=exname, + ) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) + + expected = tm.box_expected(expected, xbox) + + result = ser * tdi + tm.assert_equal(result, expected) + + result = tdi * ser + tm.assert_equal(result, expected) + + # TODO: Should we be parametrizing over types for `ser` too? + def test_float_series_rdiv_td64arr(self, box_with_array, names): + # GH#19042 test for correct name attachment + box = box_with_array + tdi = TimedeltaIndex( + ["0days", "1day", "2days", "3days", "4days"], name=names[0] + ) + ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) + + xname = names[2] if box not in [tm.to_array, pd.array] else names[1] + expected = Series( + [tdi[n] / ser[n] for n in range(len(ser))], + dtype="timedelta64[ns]", + name=xname, + ) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) + expected = tm.box_expected(expected, xbox) + + result = ser.__rtruediv__(tdi) + if box is DataFrame: + assert result is NotImplemented + else: + tm.assert_equal(result, expected) + + def test_td64arr_all_nat_div_object_dtype_numeric(self, box_with_array): + # GH#39750 make sure we infer the result as td64 + tdi = TimedeltaIndex([NaT, NaT]) + + left = tm.box_expected(tdi, box_with_array) + right = np.array([2, 2.0], dtype=object) + + tdnat = np.timedelta64("NaT", "ns") + expected = Index([tdnat] * 2, dtype=object) + if box_with_array is not Index: + expected = tm.box_expected(expected, box_with_array).astype(object) + if box_with_array in [Series, DataFrame]: + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = expected.fillna(tdnat, downcast=False) # GH#18463 + + result = left / right + tm.assert_equal(result, expected) + + result = left // right + tm.assert_equal(result, expected) + + +class TestTimedelta64ArrayLikeArithmetic: + # Arithmetic tests for timedelta64[ns] vectors fully parametrized over + # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic + # tests will eventually end up here. + + def test_td64arr_pow_invalid(self, scalar_td, box_with_array): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box_with_array) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = "operate|unsupported|cannot|not supported" + with pytest.raises(TypeError, match=pattern): + scalar_td**td1 + + with pytest.raises(TypeError, match=pattern): + td1**scalar_td + + +def test_add_timestamp_to_timedelta(): + # GH: 35897 + timestamp = Timestamp("2021-01-01") + result = timestamp + timedelta_range("0s", "1s", periods=31) + expected = DatetimeIndex( + [ + timestamp + + ( + pd.to_timedelta("0.033333333s") * i + + pd.to_timedelta("0.000000001s") * divmod(i, 3)[0] + ) + for i in range(31) + ] + ) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_arithmetic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_arithmetic.py new file mode 100644 index 00000000..197e8312 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_arithmetic.py @@ -0,0 +1,129 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.fixture +def data(): + """Fixture returning boolean array with valid and missing values.""" + return pd.array( + [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], + dtype="boolean", + ) + + +@pytest.fixture +def left_array(): + """Fixture returning boolean array with valid and missing values.""" + return pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + + +@pytest.fixture +def right_array(): + """Fixture returning boolean array with valid and missing values.""" + return pd.array([True, False, None] * 3, dtype="boolean") + + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [ + ("add", [True, True, None, True, False, None, None, None, None]), + ("mul", [True, False, None, False, False, None, None, None, None]), + ], + ids=["add", "mul"], +) +def test_add_mul(left_array, right_array, opname, exp): + op = getattr(operator, opname) + result = op(left_array, right_array) + expected = pd.array(exp, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_sub(left_array, right_array): + msg = ( + r"numpy boolean subtract, the `-` operator, is (?:deprecated|not supported), " + r"use the bitwise_xor, the `\^` operator, or the logical_xor function instead\." + ) + with pytest.raises(TypeError, match=msg): + left_array - right_array + + +def test_div(left_array, right_array): + msg = "operator '.*' not implemented for bool dtypes" + with pytest.raises(NotImplementedError, match=msg): + # check that we are matching the non-masked Series behavior + pd.Series(left_array._data) / pd.Series(right_array._data) + + with pytest.raises(NotImplementedError, match=msg): + left_array / right_array + + +@pytest.mark.parametrize( + "opname", + [ + "floordiv", + "mod", + "pow", + ], +) +def test_op_int8(left_array, right_array, opname): + op = getattr(operator, opname) + if opname != "mod": + msg = "operator '.*' not implemented for bool dtypes" + with pytest.raises(NotImplementedError, match=msg): + result = op(left_array, right_array) + return + result = op(left_array, right_array) + expected = op(left_array.astype("Int8"), right_array.astype("Int8")) + tm.assert_extension_array_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators): + # invalid ops + + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + # invalid scalars + msg = ( + "did not contain a loop with signature matching types|" + "BooleanArray cannot perform the operation|" + "not supported for the input types, and the inputs could not be safely coerced " + "to any supported types according to the casting rule ''safe''" + ) + with pytest.raises(TypeError, match=msg): + ops("foo") + msg = "|".join( + [ + r"unsupported operand type\(s\) for", + "Concatenation operation is not implemented for NumPy arrays", + ] + ) + with pytest.raises(TypeError, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + if op not in ("__mul__", "__rmul__"): + # TODO(extension) numpy's mul with object array sees booleans as numbers + msg = "|".join( + [ + r"unsupported operand type\(s\) for", + "can only concatenate str", + "not all arguments converted during string formatting", + ] + ) + with pytest.raises(TypeError, match=msg): + ops(pd.Series("foo", index=s.index)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_astype.py new file mode 100644 index 00000000..932e903c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_astype.py @@ -0,0 +1,53 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_astype(): + # with missing values + arr = pd.array([True, False, None], dtype="boolean") + + with pytest.raises(ValueError, match="cannot convert NA to integer"): + arr.astype("int64") + + with pytest.raises(ValueError, match="cannot convert float NaN to"): + arr.astype("bool") + + result = arr.astype("float64") + expected = np.array([1, 0, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("str") + expected = np.array(["True", "False", ""], dtype=f"{tm.ENDIAN}U5") + tm.assert_numpy_array_equal(result, expected) + + # no missing values + arr = pd.array([True, False, True], dtype="boolean") + result = arr.astype("int64") + expected = np.array([1, 0, 1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("bool") + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_to_boolean_array(): + # astype to BooleanArray + arr = pd.array([True, False, None], dtype="boolean") + + result = arr.astype("boolean") + tm.assert_extension_array_equal(result, arr) + result = arr.astype(pd.BooleanDtype()) + tm.assert_extension_array_equal(result, arr) + + +def test_astype_to_integer_array(): + # astype to IntegerArray + arr = pd.array([True, False, None], dtype="boolean") + + result = arr.astype("Int64") + expected = pd.array([1, 0, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_comparison.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_comparison.py new file mode 100644 index 00000000..2eeb9da5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_comparison.py @@ -0,0 +1,60 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import BooleanArray +from pandas.tests.arrays.masked_shared import ComparisonOps + + +@pytest.fixture +def data(): + """Fixture returning boolean array with valid and missing data""" + return pd.array( + [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], + dtype="boolean", + ) + + +@pytest.fixture +def dtype(): + """Fixture returning BooleanDtype""" + return pd.BooleanDtype() + + +class TestComparisonOps(ComparisonOps): + def test_compare_scalar(self, data, comparison_op): + self._compare_other(data, comparison_op, True) + + def test_compare_array(self, data, comparison_op): + other = pd.array([True] * len(data), dtype="boolean") + self._compare_other(data, comparison_op, other) + other = np.array([True] * len(data)) + self._compare_other(data, comparison_op, other) + other = pd.Series([True] * len(data)) + self._compare_other(data, comparison_op, other) + + @pytest.mark.parametrize("other", [True, False, pd.NA]) + def test_scalar(self, other, comparison_op, dtype): + ComparisonOps.test_scalar(self, other, comparison_op, dtype) + + def test_array(self, comparison_op): + op = comparison_op + a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + b = pd.array([True, False, None] * 3, dtype="boolean") + + result = op(a, b) + + values = op(a._data, b._data) + mask = a._mask | b._mask + expected = BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = None + tm.assert_extension_array_equal( + a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + ) + tm.assert_extension_array_equal( + b, pd.array([True, False, None] * 3, dtype="boolean") + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_construction.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_construction.py new file mode 100644 index 00000000..d26eea19 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_construction.py @@ -0,0 +1,326 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import BooleanArray +from pandas.core.arrays.boolean import coerce_to_array + + +def test_boolean_array_constructor(): + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + + result = BooleanArray(values, mask) + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises(TypeError, match="values should be boolean numpy array"): + BooleanArray(values.tolist(), mask) + + with pytest.raises(TypeError, match="mask should be boolean numpy array"): + BooleanArray(values, mask.tolist()) + + with pytest.raises(TypeError, match="values should be boolean numpy array"): + BooleanArray(values.astype(int), mask) + + with pytest.raises(TypeError, match="mask should be boolean numpy array"): + BooleanArray(values, None) + + with pytest.raises(ValueError, match="values.shape must match mask.shape"): + BooleanArray(values.reshape(1, -1), mask) + + with pytest.raises(ValueError, match="values.shape must match mask.shape"): + BooleanArray(values, mask.reshape(1, -1)) + + +def test_boolean_array_constructor_copy(): + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + + result = BooleanArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = BooleanArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +def test_to_boolean_array(): + expected = BooleanArray( + np.array([True, False, True]), np.array([False, False, False]) + ) + + result = pd.array([True, False, True], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([True, False, True]), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + expected = BooleanArray( + np.array([True, False, True]), np.array([False, False, True]) + ) + + result = pd.array([True, False, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_all_none(): + expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True])) + + result = pd.array([None, None, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + ([True, False, None, np.nan, pd.NA], [True, False, None, None, None]), + ([True, np.nan], [True, None]), + ([True, pd.NA], [True, None]), + ([np.nan, np.nan], [None, None]), + (np.array([np.nan, np.nan], dtype=float), [None, None]), + ], +) +def test_to_boolean_array_missing_indicators(a, b): + result = pd.array(a, dtype="boolean") + expected = pd.array(b, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + ["1", "2"], + # "foo", + [1, 2], + [1.0, 2.0], + pd.date_range("20130101", periods=2), + np.array(["foo"]), + np.array([1, 2]), + np.array([1.0, 2.0]), + [np.nan, {"a": 1}], + ], +) +def test_to_boolean_array_error(values): + # error in converting existing arrays to BooleanArray + msg = "Need to pass bool-like value" + with pytest.raises(TypeError, match=msg): + pd.array(values, dtype="boolean") + + +def test_to_boolean_array_from_integer_array(): + result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean") + expected = pd.array([True, False, True, False], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + result = pd.array(np.array([1, 0, 1, None]), dtype="boolean") + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_from_float_array(): + result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean") + expected = pd.array([True, False, True, False], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean") + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_integer_like(): + # integers of 0's and 1's + result = pd.array([1, 0, 1, 0], dtype="boolean") + expected = pd.array([True, False, True, False], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + result = pd.array([1, 0, 1, None], dtype="boolean") + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_coerce_to_array(): + # TODO this is currently not public API + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + result = BooleanArray(*coerce_to_array(values, mask=mask)) + expected = BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + assert result._data is values + assert result._mask is mask + result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True)) + expected = BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + assert result._data is not values + assert result._mask is not mask + + # mixed missing from values and mask + values = [True, False, None, False] + mask = np.array([False, False, False, True], dtype="bool") + result = BooleanArray(*coerce_to_array(values, mask=mask)) + expected = BooleanArray( + np.array([True, False, True, True]), np.array([False, False, True, True]) + ) + tm.assert_extension_array_equal(result, expected) + result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask)) + tm.assert_extension_array_equal(result, expected) + result = BooleanArray(*coerce_to_array(values, mask=mask.tolist())) + tm.assert_extension_array_equal(result, expected) + + # raise errors for wrong dimension + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + + # passing 2D values is OK as long as no mask + coerce_to_array(values.reshape(1, -1)) + + with pytest.raises(ValueError, match="values.shape and mask.shape must match"): + coerce_to_array(values.reshape(1, -1), mask=mask) + + with pytest.raises(ValueError, match="values.shape and mask.shape must match"): + coerce_to_array(values, mask=mask.reshape(1, -1)) + + +def test_coerce_to_array_from_boolean_array(): + # passing BooleanArray to coerce_to_array + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + arr = BooleanArray(values, mask) + result = BooleanArray(*coerce_to_array(arr)) + tm.assert_extension_array_equal(result, arr) + # no copy + assert result._data is arr._data + assert result._mask is arr._mask + + result = BooleanArray(*coerce_to_array(arr), copy=True) + tm.assert_extension_array_equal(result, arr) + assert result._data is not arr._data + assert result._mask is not arr._mask + + with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"): + coerce_to_array(arr, mask=mask) + + +def test_coerce_to_numpy_array(): + # with missing values -> object dtype + arr = pd.array([True, False, None], dtype="boolean") + result = np.array(arr) + expected = np.array([True, False, pd.NA], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + # also with no missing values -> object dtype + arr = pd.array([True, False, True], dtype="boolean") + result = np.array(arr) + expected = np.array([True, False, True], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + # force bool dtype + result = np.array(arr, dtype="bool") + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + # with missing values will raise error + arr = pd.array([True, False, None], dtype="boolean") + msg = ( + "cannot convert to 'bool'-dtype NumPy array with missing values. " + "Specify an appropriate 'na_value' for this dtype." + ) + with pytest.raises(ValueError, match=msg): + np.array(arr, dtype="bool") + + +def test_to_boolean_array_from_strings(): + result = BooleanArray._from_sequence_of_strings( + np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object) + ) + expected = BooleanArray( + np.array([True, False, True, True, False, False, False]), + np.array([False, False, False, False, False, False, True]), + ) + + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_from_strings_invalid_string(): + with pytest.raises(ValueError, match="cannot be cast"): + BooleanArray._from_sequence_of_strings(["donkey"]) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy(box): + con = pd.Series if box else pd.array + # default (with or without missing values) -> object dtype + arr = con([True, False, True], dtype="boolean") + result = arr.to_numpy() + expected = np.array([True, False, True], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + arr = con([True, False, None], dtype="boolean") + result = arr.to_numpy() + expected = np.array([True, False, pd.NA], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + arr = con([True, False, None], dtype="boolean") + result = arr.to_numpy(dtype="str") + expected = np.array([True, False, pd.NA], dtype=f"{tm.ENDIAN}U5") + tm.assert_numpy_array_equal(result, expected) + + # no missing values -> can convert to bool, otherwise raises + arr = con([True, False, True], dtype="boolean") + result = arr.to_numpy(dtype="bool") + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + arr = con([True, False, None], dtype="boolean") + with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"): + result = arr.to_numpy(dtype="bool") + + # specify dtype and na_value + arr = con([True, False, None], dtype="boolean") + result = arr.to_numpy(dtype=object, na_value=None) + expected = np.array([True, False, None], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype=bool, na_value=False) + expected = np.array([True, False, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="int64", na_value=-99) + expected = np.array([1, 0, -99], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([1, 0, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + # converting to int or float without specifying na_value raises + with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"): + arr.to_numpy(dtype="int64") + with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"): + arr.to_numpy(dtype="float64") + + +def test_to_numpy_copy(): + # to_numpy can be zero-copy if no missing values + arr = pd.array([True, False, True], dtype="boolean") + result = arr.to_numpy(dtype=bool) + result[0] = False + tm.assert_extension_array_equal( + arr, pd.array([False, False, True], dtype="boolean") + ) + + arr = pd.array([True, False, True], dtype="boolean") + result = arr.to_numpy(dtype=bool, copy=True) + result[0] = False + tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_function.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_function.py new file mode 100644 index 00000000..2b3f3d3d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_function.py @@ -0,0 +1,126 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "ufunc", [np.add, np.logical_or, np.logical_and, np.logical_xor] +) +def test_ufuncs_binary(ufunc): + # two BooleanArrays + a = pd.array([True, False, None], dtype="boolean") + result = ufunc(a, a) + expected = pd.array(ufunc(a._data, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s, a) + expected = pd.Series(ufunc(a._data, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_series_equal(result, expected) + + # Boolean with numpy array + arr = np.array([True, True, False]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a._data, arr), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + # BooleanArray with scalar + result = ufunc(a, True) + expected = pd.array(ufunc(a._data, True), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + result = ufunc(True, a) + expected = pd.array(ufunc(True, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + # not handled types + msg = r"operand type\(s\) all returned NotImplemented from __array_ufunc__" + with pytest.raises(TypeError, match=msg): + ufunc(a, "test") + + +@pytest.mark.parametrize("ufunc", [np.logical_not]) +def test_ufuncs_unary(ufunc): + a = pd.array([True, False, None], dtype="boolean") + result = ufunc(a) + expected = pd.array(ufunc(a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + ser = pd.Series(a) + result = ufunc(ser) + expected = pd.Series(ufunc(a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_series_equal(result, expected) + + +def test_ufunc_numeric(): + # np.sqrt on np.bool_ returns float16, which we upcast to Float32 + # bc we do not have Float16 + arr = pd.array([True, False, None], dtype="boolean") + + res = np.sqrt(arr) + + expected = pd.array([1, 0, None], dtype="Float32") + tm.assert_extension_array_equal(res, expected) + + +@pytest.mark.parametrize("values", [[True, False], [True, None]]) +def test_ufunc_reduce_raises(values): + arr = pd.array(values, dtype="boolean") + + res = np.add.reduce(arr) + if arr[-1] is pd.NA: + expected = pd.NA + else: + expected = arr._data.sum() + tm.assert_almost_equal(res, expected) + + +def test_value_counts_na(): + arr = pd.array([True, False, pd.NA], dtype="boolean") + result = arr.value_counts(dropna=False) + expected = pd.Series([1, 1, 1], index=arr, dtype="Int64", name="count") + assert expected.index.dtype == arr.dtype + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([1, 1], index=arr[:-1], dtype="Int64", name="count") + assert expected.index.dtype == arr.dtype + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(): + ser = pd.Series([True, False, pd.NA], dtype="boolean") + result = ser.value_counts(normalize=True) + expected = pd.Series([1, 1], index=ser[:-1], dtype="Float64", name="proportion") / 2 + assert expected.index.dtype == "boolean" + tm.assert_series_equal(result, expected) + + +def test_diff(): + a = pd.array( + [True, True, False, False, True, None, True, None, False], dtype="boolean" + ) + result = pd.core.algorithms.diff(a, 1) + expected = pd.array( + [None, False, True, False, True, None, None, None, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + ser = pd.Series(a) + result = ser.diff() + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_indexing.py new file mode 100644 index 00000000..6a7daea1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_indexing.py @@ -0,0 +1,13 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("na", [None, np.nan, pd.NA]) +def test_setitem_missing_values(na): + arr = pd.array([True, False, None], dtype="boolean") + expected = pd.array([True, None, None], dtype="boolean") + arr[1] = na + tm.assert_extension_array_equal(arr, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_logical.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_logical.py new file mode 100644 index 00000000..66c117ea --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_logical.py @@ -0,0 +1,254 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import BooleanArray +from pandas.core.ops.mask_ops import ( + kleene_and, + kleene_or, + kleene_xor, +) +from pandas.tests.extension.base import BaseOpsUtil + + +class TestLogicalOps(BaseOpsUtil): + def test_numpy_scalars_ok(self, all_logical_operators): + a = pd.array([True, False, None], dtype="boolean") + op = getattr(a, all_logical_operators) + + tm.assert_extension_array_equal(op(True), op(np.bool_(True))) + tm.assert_extension_array_equal(op(False), op(np.bool_(False))) + + def get_op_from_name(self, op_name): + short_opname = op_name.strip("_") + short_opname = short_opname if "xor" in short_opname else short_opname + "_" + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op + + def test_empty_ok(self, all_logical_operators): + a = pd.array([], dtype="boolean") + op_name = all_logical_operators + result = getattr(a, op_name)(True) + tm.assert_extension_array_equal(a, result) + + result = getattr(a, op_name)(False) + tm.assert_extension_array_equal(a, result) + + result = getattr(a, op_name)(pd.NA) + tm.assert_extension_array_equal(a, result) + + @pytest.mark.parametrize( + "other", ["a", pd.Timestamp(2017, 1, 1, 12), np.timedelta64(4)] + ) + def test_eq_mismatched_type(self, other): + # GH-44499 + arr = pd.array([True, False]) + result = arr == other + expected = pd.array([False, False]) + tm.assert_extension_array_equal(result, expected) + + result = arr != other + expected = pd.array([True, True]) + tm.assert_extension_array_equal(result, expected) + + def test_logical_length_mismatch_raises(self, all_logical_operators): + op_name = all_logical_operators + a = pd.array([True, False, None], dtype="boolean") + msg = "Lengths must match" + + with pytest.raises(ValueError, match=msg): + getattr(a, op_name)([True, False]) + + with pytest.raises(ValueError, match=msg): + getattr(a, op_name)(np.array([True, False])) + + with pytest.raises(ValueError, match=msg): + getattr(a, op_name)(pd.array([True, False], dtype="boolean")) + + def test_logical_nan_raises(self, all_logical_operators): + op_name = all_logical_operators + a = pd.array([True, False, None], dtype="boolean") + msg = "Got float instead" + + with pytest.raises(TypeError, match=msg): + getattr(a, op_name)(np.nan) + + @pytest.mark.parametrize("other", ["a", 1]) + def test_non_bool_or_na_other_raises(self, other, all_logical_operators): + a = pd.array([True, False], dtype="boolean") + with pytest.raises(TypeError, match=str(type(other).__name__)): + getattr(a, all_logical_operators)(other) + + def test_kleene_or(self): + # A clear test of behavior. + a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + b = pd.array([True, False, None] * 3, dtype="boolean") + result = a | b + expected = pd.array( + [True, True, True, True, False, None, True, None, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + result = b | a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + ) + tm.assert_extension_array_equal( + b, pd.array([True, False, None] * 3, dtype="boolean") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (pd.NA, [True, None, None]), + (True, [True, True, True]), + (np.bool_(True), [True, True, True]), + (False, [True, False, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_or_scalar(self, other, expected): + # TODO: test True & False + a = pd.array([True, False, None], dtype="boolean") + result = a | other + expected = pd.array(expected, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + result = other | a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True, False, None], dtype="boolean") + ) + + def test_kleene_and(self): + # A clear test of behavior. + a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + b = pd.array([True, False, None] * 3, dtype="boolean") + result = a & b + expected = pd.array( + [True, False, None, False, False, False, None, False, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + result = b & a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + ) + tm.assert_extension_array_equal( + b, pd.array([True, False, None] * 3, dtype="boolean") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (pd.NA, [None, False, None]), + (True, [True, False, None]), + (False, [False, False, False]), + (np.bool_(True), [True, False, None]), + (np.bool_(False), [False, False, False]), + ], + ) + def test_kleene_and_scalar(self, other, expected): + a = pd.array([True, False, None], dtype="boolean") + result = a & other + expected = pd.array(expected, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + result = other & a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True, False, None], dtype="boolean") + ) + + def test_kleene_xor(self): + a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + b = pd.array([True, False, None] * 3, dtype="boolean") + result = a ^ b + expected = pd.array( + [False, True, None, True, False, None, None, None, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + result = b ^ a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + ) + tm.assert_extension_array_equal( + b, pd.array([True, False, None] * 3, dtype="boolean") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (pd.NA, [None, None, None]), + (True, [False, True, None]), + (np.bool_(True), [False, True, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_xor_scalar(self, other, expected): + a = pd.array([True, False, None], dtype="boolean") + result = a ^ other + expected = pd.array(expected, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + result = other ^ a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True, False, None], dtype="boolean") + ) + + @pytest.mark.parametrize("other", [True, False, pd.NA, [True, False, None] * 3]) + def test_no_masked_assumptions(self, other, all_logical_operators): + # The logical operations should not assume that masked values are False! + a = pd.arrays.BooleanArray( + np.array([True, True, True, False, False, False, True, False, True]), + np.array([False] * 6 + [True, True, True]), + ) + b = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + if isinstance(other, list): + other = pd.array(other, dtype="boolean") + + result = getattr(a, all_logical_operators)(other) + expected = getattr(b, all_logical_operators)(other) + tm.assert_extension_array_equal(result, expected) + + if isinstance(other, BooleanArray): + other._data[other._mask] = True + a._data[a._mask] = False + + result = getattr(a, all_logical_operators)(other) + expected = getattr(b, all_logical_operators)(other) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("operation", [kleene_or, kleene_xor, kleene_and]) +def test_error_both_scalar(operation): + msg = r"Either `left` or `right` need to be a np\.ndarray." + with pytest.raises(TypeError, match=msg): + # masks need to be non-None, otherwise it ends up in an infinite recursion + operation(True, True, np.zeros(1), np.zeros(1)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_ops.py new file mode 100644 index 00000000..95ebe852 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_ops.py @@ -0,0 +1,27 @@ +import pandas as pd +import pandas._testing as tm + + +class TestUnaryOps: + def test_invert(self): + a = pd.array([True, False, None], dtype="boolean") + expected = pd.array([False, True, None], dtype="boolean") + tm.assert_extension_array_equal(~a, expected) + + expected = pd.Series(expected, index=["a", "b", "c"], name="name") + result = ~pd.Series(a, index=["a", "b", "c"], name="name") + tm.assert_series_equal(result, expected) + + df = pd.DataFrame({"A": a, "B": [True, False, False]}, index=["a", "b", "c"]) + result = ~df + expected = pd.DataFrame( + {"A": expected, "B": [False, True, True]}, index=["a", "b", "c"] + ) + tm.assert_frame_equal(result, expected) + + def test_abs(self): + # matching numpy behavior, abs is the identity function + arr = pd.array([True, False, None], dtype="boolean") + result = abs(arr) + + tm.assert_extension_array_equal(result, arr) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_reduction.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_reduction.py new file mode 100644 index 00000000..dd8c3eda --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_reduction.py @@ -0,0 +1,62 @@ +import numpy as np +import pytest + +import pandas as pd + + +@pytest.fixture +def data(): + """Fixture returning boolean array, with valid and missing values.""" + return pd.array( + [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], + dtype="boolean", + ) + + +@pytest.mark.parametrize( + "values, exp_any, exp_all, exp_any_noskip, exp_all_noskip", + [ + ([True, pd.NA], True, True, True, pd.NA), + ([False, pd.NA], False, False, pd.NA, False), + ([pd.NA], False, True, pd.NA, pd.NA), + ([], False, True, False, True), + # GH-33253: all True / all False values buggy with skipna=False + ([True, True], True, True, True, True), + ([False, False], False, False, False, False), + ], +) +def test_any_all(values, exp_any, exp_all, exp_any_noskip, exp_all_noskip): + # the methods return numpy scalars + exp_any = pd.NA if exp_any is pd.NA else np.bool_(exp_any) + exp_all = pd.NA if exp_all is pd.NA else np.bool_(exp_all) + exp_any_noskip = pd.NA if exp_any_noskip is pd.NA else np.bool_(exp_any_noskip) + exp_all_noskip = pd.NA if exp_all_noskip is pd.NA else np.bool_(exp_all_noskip) + + for con in [pd.array, pd.Series]: + a = con(values, dtype="boolean") + assert a.any() is exp_any + assert a.all() is exp_all + assert a.any(skipna=False) is exp_any_noskip + assert a.all(skipna=False) is exp_all_noskip + + assert np.any(a.any()) is exp_any + assert np.all(a.all()) is exp_all + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_reductions_return_types(dropna, data, all_numeric_reductions): + op = all_numeric_reductions + s = pd.Series(data) + if dropna: + s = s.dropna() + + if op in ("sum", "prod"): + assert isinstance(getattr(s, op)(), np.int_) + elif op == "count": + # Oddly on the 32 bit build (but not Windows), this is intc (!= intp) + assert isinstance(getattr(s, op)(), np.integer) + elif op in ("min", "max"): + assert isinstance(getattr(s, op)(), np.bool_) + else: + # "mean", "std", "var", "median", "kurt", "skew" + assert isinstance(getattr(s, op)(), np.float64) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_repr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_repr.py new file mode 100644 index 00000000..0ee904b1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_repr.py @@ -0,0 +1,13 @@ +import pandas as pd + + +def test_repr(): + df = pd.DataFrame({"A": pd.array([True, False, None], dtype="boolean")}) + expected = " A\n0 True\n1 False\n2 " + assert repr(df) == expected + + expected = "0 True\n1 False\n2 \nName: A, dtype: boolean" + assert repr(df.A) == expected + + expected = "\n[True, False, ]\nLength: 3, dtype: boolean" + assert repr(df.A.array) == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/conftest.py new file mode 100644 index 00000000..d5b49e3e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/conftest.py @@ -0,0 +1,15 @@ +import pytest + +from pandas import Categorical + + +@pytest.fixture(params=[True, False]) +def allow_fill(request): + """Boolean 'allow_fill' parameter for Categorical.take""" + return request.param + + +@pytest.fixture +def factor(): + """Fixture returning a Categorical object""" + return Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_algos.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_algos.py new file mode 100644 index 00000000..d4c19a49 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_algos.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("ordered", [True, False]) +@pytest.mark.parametrize("categories", [["b", "a", "c"], ["a", "b", "c", "d"]]) +def test_factorize(categories, ordered): + cat = pd.Categorical( + ["b", "b", "a", "c", None], categories=categories, ordered=ordered + ) + codes, uniques = pd.factorize(cat) + expected_codes = np.array([0, 0, 1, 2, -1], dtype=np.intp) + expected_uniques = pd.Categorical( + ["b", "a", "c"], categories=categories, ordered=ordered + ) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_factorized_sort(): + cat = pd.Categorical(["b", "b", None, "a"]) + codes, uniques = pd.factorize(cat, sort=True) + expected_codes = np.array([1, 1, -1, 0], dtype=np.intp) + expected_uniques = pd.Categorical(["a", "b"]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_factorized_sort_ordered(): + cat = pd.Categorical( + ["b", "b", None, "a"], categories=["c", "b", "a"], ordered=True + ) + + codes, uniques = pd.factorize(cat, sort=True) + expected_codes = np.array([0, 0, -1, 1], dtype=np.intp) + expected_uniques = pd.Categorical( + ["b", "a"], categories=["c", "b", "a"], ordered=True + ) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_isin_cats(): + # GH2003 + cat = pd.Categorical(["a", "b", np.nan]) + + result = cat.isin(["a", np.nan]) + expected = np.array([True, False, True], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + result = cat.isin(["a", "c"]) + expected = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + +@pytest.mark.parametrize("value", [[""], [None, ""], [pd.NaT, ""]]) +def test_isin_cats_corner_cases(value): + # GH36550 + cat = pd.Categorical([""]) + result = cat.isin(value) + expected = np.array([True], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + +@pytest.mark.parametrize("empty", [[], pd.Series(dtype=object), np.array([])]) +def test_isin_empty(empty): + s = pd.Categorical(["a", "b"]) + expected = np.array([False, False], dtype=bool) + + result = s.isin(empty) + tm.assert_numpy_array_equal(expected, result) + + +def test_diff(): + ser = pd.Series([1, 2, 3], dtype="category") + + msg = "Convert to a suitable dtype" + with pytest.raises(TypeError, match=msg): + ser.diff() + + df = ser.to_frame(name="A") + with pytest.raises(TypeError, match=msg): + df.diff() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_analytics.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_analytics.py new file mode 100644 index 00000000..c2c53fbc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_analytics.py @@ -0,0 +1,349 @@ +import re +import sys + +import numpy as np +import pytest + +from pandas.compat import PYPY + +from pandas import ( + Categorical, + CategoricalDtype, + DataFrame, + Index, + NaT, + Series, + date_range, +) +import pandas._testing as tm +from pandas.api.types import is_scalar + + +class TestCategoricalAnalytics: + @pytest.mark.parametrize("aggregation", ["min", "max"]) + def test_min_max_not_ordered_raises(self, aggregation): + # unordered cats have no min/max + cat = Categorical(["a", "b", "c", "d"], ordered=False) + msg = f"Categorical is not ordered for operation {aggregation}" + agg_func = getattr(cat, aggregation) + + with pytest.raises(TypeError, match=msg): + agg_func() + + ufunc = np.minimum if aggregation == "min" else np.maximum + with pytest.raises(TypeError, match=msg): + ufunc.reduce(cat) + + def test_min_max_ordered(self, index_or_series_or_array): + cat = Categorical(["a", "b", "c", "d"], ordered=True) + obj = index_or_series_or_array(cat) + _min = obj.min() + _max = obj.max() + assert _min == "a" + assert _max == "d" + + assert np.minimum.reduce(obj) == "a" + assert np.maximum.reduce(obj) == "d" + # TODO: raises if we pass axis=0 (on Index and Categorical, not Series) + + cat = Categorical( + ["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True + ) + obj = index_or_series_or_array(cat) + _min = obj.min() + _max = obj.max() + assert _min == "d" + assert _max == "a" + assert np.minimum.reduce(obj) == "d" + assert np.maximum.reduce(obj) == "a" + + def test_min_max_reduce(self): + # GH52788 + cat = Categorical(["a", "b", "c", "d"], ordered=True) + df = DataFrame(cat) + + result_max = df.agg("max") + expected_max = Series(Categorical(["d"], dtype=cat.dtype)) + tm.assert_series_equal(result_max, expected_max) + + result_min = df.agg("min") + expected_min = Series(Categorical(["a"], dtype=cat.dtype)) + tm.assert_series_equal(result_min, expected_min) + + @pytest.mark.parametrize( + "categories,expected", + [ + (list("ABC"), np.nan), + ([1, 2, 3], np.nan), + pytest.param( + Series(date_range("2020-01-01", periods=3), dtype="category"), + NaT, + marks=pytest.mark.xfail( + reason="https://github.com/pandas-dev/pandas/issues/29962" + ), + ), + ], + ) + @pytest.mark.parametrize("aggregation", ["min", "max"]) + def test_min_max_ordered_empty(self, categories, expected, aggregation): + # GH 30227 + cat = Categorical([], categories=categories, ordered=True) + + agg_func = getattr(cat, aggregation) + result = agg_func() + assert result is expected + + @pytest.mark.parametrize( + "values, categories", + [(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])], + ) + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("function", ["min", "max"]) + def test_min_max_with_nan(self, values, categories, function, skipna): + # GH 25303 + cat = Categorical(values, categories=categories, ordered=True) + result = getattr(cat, function)(skipna=skipna) + + if skipna is False: + assert result is np.nan + else: + expected = categories[0] if function == "min" else categories[2] + assert result == expected + + @pytest.mark.parametrize("function", ["min", "max"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_only_nan(self, function, skipna): + # https://github.com/pandas-dev/pandas/issues/33450 + cat = Categorical([np.nan], categories=[1, 2], ordered=True) + result = getattr(cat, function)(skipna=skipna) + assert result is np.nan + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numeric_only_min_max_raises(self, method): + # GH 25303 + cat = Categorical( + [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True + ) + with pytest.raises(TypeError, match=".* got an unexpected keyword"): + getattr(cat, method)(numeric_only=True) + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numpy_min_max_raises(self, method): + cat = Categorical(["a", "b", "c", "b"], ordered=False) + msg = ( + f"Categorical is not ordered for operation {method}\n" + "you can use .as_ordered() to change the Categorical to an ordered one" + ) + method = getattr(np, method) + with pytest.raises(TypeError, match=re.escape(msg)): + method(cat) + + @pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"]) + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg): + cat = Categorical(["a", "b", "c", "b"], ordered=True) + msg = ( + f"the '{kwarg}' parameter is not supported in the pandas implementation " + f"of {method}" + ) + if kwarg == "axis": + msg = r"`axis` must be fewer than the number of dimensions \(1\)" + kwargs = {kwarg: 42} + method = getattr(np, method) + with pytest.raises(ValueError, match=msg): + method(cat, **kwargs) + + @pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")]) + def test_numpy_min_max_axis_equals_none(self, method, expected): + cat = Categorical(["a", "b", "c", "b"], ordered=True) + method = getattr(np, method) + result = method(cat, axis=None) + assert result == expected + + @pytest.mark.parametrize( + "values,categories,exp_mode", + [ + ([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]), + ([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]), + ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]), + ([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]), + ([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), + ([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), + ], + ) + def test_mode(self, values, categories, exp_mode): + cat = Categorical(values, categories=categories, ordered=True) + res = Series(cat).mode()._values + exp = Categorical(exp_mode, categories=categories, ordered=True) + tm.assert_categorical_equal(res, exp) + + def test_searchsorted(self, ordered): + # https://github.com/pandas-dev/pandas/issues/8420 + # https://github.com/pandas-dev/pandas/issues/14522 + + cat = Categorical( + ["cheese", "milk", "apple", "bread", "bread"], + categories=["cheese", "milk", "apple", "bread"], + ordered=ordered, + ) + ser = Series(cat) + + # Searching for single item argument, side='left' (default) + res_cat = cat.searchsorted("apple") + assert res_cat == 2 + assert is_scalar(res_cat) + + res_ser = ser.searchsorted("apple") + assert res_ser == 2 + assert is_scalar(res_ser) + + # Searching for single item array, side='left' (default) + res_cat = cat.searchsorted(["bread"]) + res_ser = ser.searchsorted(["bread"]) + exp = np.array([3], dtype=np.intp) + tm.assert_numpy_array_equal(res_cat, exp) + tm.assert_numpy_array_equal(res_ser, exp) + + # Searching for several items array, side='right' + res_cat = cat.searchsorted(["apple", "bread"], side="right") + res_ser = ser.searchsorted(["apple", "bread"], side="right") + exp = np.array([3, 5], dtype=np.intp) + tm.assert_numpy_array_equal(res_cat, exp) + tm.assert_numpy_array_equal(res_ser, exp) + + # Searching for a single value that is not from the Categorical + with pytest.raises(TypeError, match="cucumber"): + cat.searchsorted("cucumber") + with pytest.raises(TypeError, match="cucumber"): + ser.searchsorted("cucumber") + + # Searching for multiple values one of each is not from the Categorical + msg = ( + "Cannot setitem on a Categorical with a new category, " + "set the categories first" + ) + with pytest.raises(TypeError, match=msg): + cat.searchsorted(["bread", "cucumber"]) + with pytest.raises(TypeError, match=msg): + ser.searchsorted(["bread", "cucumber"]) + + def test_unique(self, ordered): + # GH38140 + dtype = CategoricalDtype(["a", "b", "c"], ordered=ordered) + + # categories are reordered based on value when ordered=False + cat = Categorical(["a", "b", "c"], dtype=dtype) + res = cat.unique() + tm.assert_categorical_equal(res, cat) + + cat = Categorical(["a", "b", "a", "a"], dtype=dtype) + res = cat.unique() + tm.assert_categorical_equal(res, Categorical(["a", "b"], dtype=dtype)) + + cat = Categorical(["c", "a", "b", "a", "a"], dtype=dtype) + res = cat.unique() + exp_cat = Categorical(["c", "a", "b"], dtype=dtype) + tm.assert_categorical_equal(res, exp_cat) + + # nan must be removed + cat = Categorical(["b", np.nan, "b", np.nan, "a"], dtype=dtype) + res = cat.unique() + exp_cat = Categorical(["b", np.nan, "a"], dtype=dtype) + tm.assert_categorical_equal(res, exp_cat) + + def test_unique_index_series(self, ordered): + # GH38140 + dtype = CategoricalDtype([3, 2, 1], ordered=ordered) + + c = Categorical([3, 1, 2, 2, 1], dtype=dtype) + # Categorical.unique sorts categories by appearance order + # if ordered=False + exp = Categorical([3, 1, 2], dtype=dtype) + tm.assert_categorical_equal(c.unique(), exp) + + tm.assert_index_equal(Index(c).unique(), Index(exp)) + tm.assert_categorical_equal(Series(c).unique(), exp) + + c = Categorical([1, 1, 2, 2], dtype=dtype) + exp = Categorical([1, 2], dtype=dtype) + tm.assert_categorical_equal(c.unique(), exp) + tm.assert_index_equal(Index(c).unique(), Index(exp)) + tm.assert_categorical_equal(Series(c).unique(), exp) + + def test_shift(self): + # GH 9416 + cat = Categorical(["a", "b", "c", "d", "a"]) + + # shift forward + sp1 = cat.shift(1) + xp1 = Categorical([np.nan, "a", "b", "c", "d"]) + tm.assert_categorical_equal(sp1, xp1) + tm.assert_categorical_equal(cat[:-1], sp1[1:]) + + # shift back + sn2 = cat.shift(-2) + xp2 = Categorical( + ["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"] + ) + tm.assert_categorical_equal(sn2, xp2) + tm.assert_categorical_equal(cat[2:], sn2[:-2]) + + # shift by zero + tm.assert_categorical_equal(cat, cat.shift(0)) + + def test_nbytes(self): + cat = Categorical([1, 2, 3]) + exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories + assert cat.nbytes == exp + + def test_memory_usage(self): + cat = Categorical([1, 2, 3]) + + # .categories is an index, so we include the hashtable + assert 0 < cat.nbytes <= cat.memory_usage() + assert 0 < cat.nbytes <= cat.memory_usage(deep=True) + + cat = Categorical(["foo", "foo", "bar"]) + assert cat.memory_usage(deep=True) > cat.nbytes + + if not PYPY: + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = cat.memory_usage(deep=True) - sys.getsizeof(cat) + assert abs(diff) < 100 + + def test_map(self): + c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True) + result = c.map(lambda x: x.lower(), na_action=None) + exp = Categorical(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_categorical_equal(result, exp) + + c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False) + result = c.map(lambda x: x.lower(), na_action=None) + exp = Categorical(list("ababc"), categories=list("abc"), ordered=False) + tm.assert_categorical_equal(result, exp) + + result = c.map(lambda x: 1, na_action=None) + # GH 12766: Return an index not an array + tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64))) + + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_inplace_raises(self, value): + cat = Categorical(["A", "B", "B", "C", "A"]) + msg = ( + 'For argument "inplace" expected type bool, ' + f"received type {type(value).__name__}" + ) + + with pytest.raises(ValueError, match=msg): + cat.sort_values(inplace=value) + + def test_quantile_empty(self): + # make sure we have correct itemsize on resulting codes + cat = Categorical(["A", "B"]) + idx = Index([0.0, 0.5]) + result = cat[:0]._quantile(idx, interpolation="linear") + assert result._codes.dtype == np.int8 + + expected = cat.take([-1, -1], allow_fill=True) + tm.assert_extension_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_api.py new file mode 100644 index 00000000..b4215b4a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_api.py @@ -0,0 +1,500 @@ +import re + +import numpy as np +import pytest + +from pandas.compat import PY311 + +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + Index, + Series, + StringDtype, +) +import pandas._testing as tm +from pandas.core.arrays.categorical import recode_for_categories + + +class TestCategoricalAPI: + def test_to_list_deprecated(self): + # GH#51254 + cat1 = Categorical(list("acb"), ordered=False) + msg = "Categorical.to_list is deprecated and will be removed" + with tm.assert_produces_warning(FutureWarning, match=msg): + cat1.to_list() + + def test_ordered_api(self): + # GH 9347 + cat1 = Categorical(list("acb"), ordered=False) + tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"])) + assert not cat1.ordered + + cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False) + tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"])) + assert not cat2.ordered + + cat3 = Categorical(list("acb"), ordered=True) + tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"])) + assert cat3.ordered + + cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True) + tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"])) + assert cat4.ordered + + def test_set_ordered(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + cat2 = cat.as_unordered() + assert not cat2.ordered + cat2 = cat.as_ordered() + assert cat2.ordered + + assert cat2.set_ordered(True).ordered + assert not cat2.set_ordered(False).ordered + + # removed in 0.19.0 + msg = ( + "property 'ordered' of 'Categorical' object has no setter" + if PY311 + else "can't set attribute" + ) + with pytest.raises(AttributeError, match=msg): + cat.ordered = True + with pytest.raises(AttributeError, match=msg): + cat.ordered = False + + def test_rename_categories(self): + cat = Categorical(["a", "b", "c", "a"]) + + # inplace=False: the old one must not be changed + res = cat.rename_categories([1, 2, 3]) + tm.assert_numpy_array_equal( + res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64) + ) + tm.assert_index_equal(res.categories, Index([1, 2, 3])) + + exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_) + tm.assert_numpy_array_equal(cat.__array__(), exp_cat) + + exp_cat = Index(["a", "b", "c"]) + tm.assert_index_equal(cat.categories, exp_cat) + + # GH18862 (let rename_categories take callables) + result = cat.rename_categories(lambda x: x.upper()) + expected = Categorical(["A", "B", "C", "A"]) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]]) + def test_rename_categories_wrong_length_raises(self, new_categories): + cat = Categorical(["a", "b", "c", "a"]) + msg = ( + "new categories need to have the same number of items as the " + "old categories!" + ) + with pytest.raises(ValueError, match=msg): + cat.rename_categories(new_categories) + + def test_rename_categories_series(self): + # https://github.com/pandas-dev/pandas/issues/17981 + c = Categorical(["a", "b"]) + result = c.rename_categories(Series([0, 1], index=["a", "b"])) + expected = Categorical([0, 1]) + tm.assert_categorical_equal(result, expected) + + def test_rename_categories_dict(self): + # GH 17336 + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1}) + expected = Index([4, 3, 2, 1]) + tm.assert_index_equal(res.categories, expected) + + # Test for dicts of smaller length + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"a": 1, "c": 3}) + + expected = Index([1, "b", 3, "d"]) + tm.assert_index_equal(res.categories, expected) + + # Test for dicts with bigger length + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6}) + expected = Index([1, 2, 3, 4]) + tm.assert_index_equal(res.categories, expected) + + # Test for dicts with no items from old categories + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"f": 1, "g": 3}) + + expected = Index(["a", "b", "c", "d"]) + tm.assert_index_equal(res.categories, expected) + + def test_reorder_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + old = cat.copy() + new = Categorical( + ["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True + ) + + res = cat.reorder_categories(["c", "b", "a"]) + # cat must be the same as before + tm.assert_categorical_equal(cat, old) + # only res is changed + tm.assert_categorical_equal(res, new) + + @pytest.mark.parametrize( + "new_categories", + [ + ["a"], # not all "old" included in "new" + ["a", "b", "d"], # still not all "old" in "new" + ["a", "b", "c", "d"], # all "old" included in "new", but too long + ], + ) + def test_reorder_categories_raises(self, new_categories): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + msg = "items in new_categories are not the same as in old categories" + with pytest.raises(ValueError, match=msg): + cat.reorder_categories(new_categories) + + def test_add_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + old = cat.copy() + new = Categorical( + ["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True + ) + + res = cat.add_categories("d") + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + res = cat.add_categories(["d"]) + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + # GH 9927 + cat = Categorical(list("abc"), ordered=True) + expected = Categorical(list("abc"), categories=list("abcde"), ordered=True) + # test with Series, np.array, index, list + res = cat.add_categories(Series(["d", "e"])) + tm.assert_categorical_equal(res, expected) + res = cat.add_categories(np.array(["d", "e"])) + tm.assert_categorical_equal(res, expected) + res = cat.add_categories(Index(["d", "e"])) + tm.assert_categorical_equal(res, expected) + res = cat.add_categories(["d", "e"]) + tm.assert_categorical_equal(res, expected) + + def test_add_categories_existing_raises(self): + # new is in old categories + cat = Categorical(["a", "b", "c", "d"], ordered=True) + msg = re.escape("new categories must not include old categories: {'d'}") + with pytest.raises(ValueError, match=msg): + cat.add_categories(["d"]) + + def test_add_categories_losing_dtype_information(self): + # GH#48812 + cat = Categorical(Series([1, 2], dtype="Int64")) + ser = Series([4], dtype="Int64") + result = cat.add_categories(ser) + expected = Categorical( + Series([1, 2], dtype="Int64"), categories=Series([1, 2, 4], dtype="Int64") + ) + tm.assert_categorical_equal(result, expected) + + cat = Categorical(Series(["a", "b", "a"], dtype=StringDtype())) + ser = Series(["d"], dtype=StringDtype()) + result = cat.add_categories(ser) + expected = Categorical( + Series(["a", "b", "a"], dtype=StringDtype()), + categories=Series(["a", "b", "d"], dtype=StringDtype()), + ) + tm.assert_categorical_equal(result, expected) + + def test_set_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + exp_categories = Index(["c", "b", "a"]) + exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_) + + cat = cat.set_categories(["c", "b", "a"]) + res = cat.set_categories(["a", "b", "c"]) + # cat must be the same as before + tm.assert_index_equal(cat.categories, exp_categories) + tm.assert_numpy_array_equal(cat.__array__(), exp_values) + # only res is changed + exp_categories_back = Index(["a", "b", "c"]) + tm.assert_index_equal(res.categories, exp_categories_back) + tm.assert_numpy_array_equal(res.__array__(), exp_values) + + # not all "old" included in "new" -> all not included ones are now + # np.nan + cat = Categorical(["a", "b", "c", "a"], ordered=True) + res = cat.set_categories(["a"]) + tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8)) + + # still not all "old" in "new" + res = cat.set_categories(["a", "b", "d"]) + tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8)) + tm.assert_index_equal(res.categories, Index(["a", "b", "d"])) + + # all "old" included in "new" + cat = cat.set_categories(["a", "b", "c", "d"]) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_index_equal(cat.categories, exp_categories) + + # internals... + c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8)) + tm.assert_index_equal(c.categories, Index([1, 2, 3, 4])) + + exp = np.array([1, 2, 3, 4, 1], dtype=np.int64) + tm.assert_numpy_array_equal(np.asarray(c), exp) + + # all "pointers" to '4' must be changed from 3 to 0,... + c = c.set_categories([4, 3, 2, 1]) + + # positions are changed + tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8)) + + # categories are now in new order + tm.assert_index_equal(c.categories, Index([4, 3, 2, 1])) + + # output is the same + exp = np.array([1, 2, 3, 4, 1], dtype=np.int64) + tm.assert_numpy_array_equal(np.asarray(c), exp) + assert c.min() == 4 + assert c.max() == 1 + + # set_categories should set the ordering if specified + c2 = c.set_categories([4, 3, 2, 1], ordered=False) + assert not c2.ordered + + tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2)) + + # set_categories should pass thru the ordering + c2 = c.set_ordered(False).set_categories([4, 3, 2, 1]) + assert not c2.ordered + + tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2)) + + @pytest.mark.parametrize( + "values, categories, new_categories", + [ + # No NaNs, same cats, same order + (["a", "b", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["a", "b", "a"], ["a", "b"], ["b", "a"]), + # Same, unsorted + (["b", "a", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["b", "a", "a"], ["a", "b"], ["b", "a"]), + # NaNs + (["a", "b", "c"], ["a", "b"], ["a", "b"]), + (["a", "b", "c"], ["a", "b"], ["b", "a"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + # Introduce NaNs + (["a", "b", "c"], ["a", "b"], ["a"]), + (["a", "b", "c"], ["a", "b"], ["b"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + # No overlap + (["a", "b", "c"], ["a", "b"], ["d", "e"]), + ], + ) + @pytest.mark.parametrize("ordered", [True, False]) + def test_set_categories_many(self, values, categories, new_categories, ordered): + c = Categorical(values, categories) + expected = Categorical(values, new_categories, ordered) + result = c.set_categories(new_categories, ordered=ordered) + tm.assert_categorical_equal(result, expected) + + def test_set_categories_rename_less(self): + # GH 24675 + cat = Categorical(["A", "B"]) + result = cat.set_categories(["A"], rename=True) + expected = Categorical(["A", np.nan]) + tm.assert_categorical_equal(result, expected) + + def test_set_categories_private(self): + cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"]) + cat._set_categories(["a", "c", "d", "e"]) + expected = Categorical(["a", "c", "d"], categories=list("acde")) + tm.assert_categorical_equal(cat, expected) + + # fastpath + cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"]) + cat._set_categories(["a", "c", "d", "e"], fastpath=True) + expected = Categorical(["a", "c", "d"], categories=list("acde")) + tm.assert_categorical_equal(cat, expected) + + def test_remove_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + old = cat.copy() + new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"], ordered=True) + + res = cat.remove_categories("c") + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + res = cat.remove_categories(["c"]) + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + @pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]]) + def test_remove_categories_raises(self, removals): + cat = Categorical(["a", "b", "a"]) + message = re.escape("removals must all be in old categories: {'c'}") + + with pytest.raises(ValueError, match=message): + cat.remove_categories(removals) + + def test_remove_unused_categories(self): + c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"]) + exp_categories_all = Index(["a", "b", "c", "d", "e"]) + exp_categories_dropped = Index(["a", "b", "c", "d"]) + + tm.assert_index_equal(c.categories, exp_categories_all) + + res = c.remove_unused_categories() + tm.assert_index_equal(res.categories, exp_categories_dropped) + tm.assert_index_equal(c.categories, exp_categories_all) + + # with NaN values (GH11599) + c = Categorical(["a", "b", "c", np.nan], categories=["a", "b", "c", "d", "e"]) + res = c.remove_unused_categories() + tm.assert_index_equal(res.categories, Index(np.array(["a", "b", "c"]))) + exp_codes = np.array([0, 1, 2, -1], dtype=np.int8) + tm.assert_numpy_array_equal(res.codes, exp_codes) + tm.assert_index_equal(c.categories, exp_categories_all) + + val = ["F", np.nan, "D", "B", "D", "F", np.nan] + cat = Categorical(values=val, categories=list("ABCDEFG")) + out = cat.remove_unused_categories() + tm.assert_index_equal(out.categories, Index(["B", "D", "F"])) + exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8) + tm.assert_numpy_array_equal(out.codes, exp_codes) + assert out.tolist() == val + + alpha = list("abcdefghijklmnopqrstuvwxyz") + val = np.random.default_rng(2).choice(alpha[::2], 10000).astype("object") + val[np.random.default_rng(2).choice(len(val), 100)] = np.nan + + cat = Categorical(values=val, categories=alpha) + out = cat.remove_unused_categories() + assert out.tolist() == val.tolist() + + +class TestCategoricalAPIWithFactor: + def test_describe(self, factor): + # string type + desc = factor.describe() + assert factor.ordered + exp_index = CategoricalIndex( + ["a", "b", "c"], name="categories", ordered=factor.ordered + ) + expected = DataFrame( + {"counts": [3, 2, 3], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0]}, index=exp_index + ) + tm.assert_frame_equal(desc, expected) + + # check unused categories + cat = factor.copy() + cat = cat.set_categories(["a", "b", "c", "d"]) + desc = cat.describe() + + exp_index = CategoricalIndex( + list("abcd"), ordered=factor.ordered, name="categories" + ) + expected = DataFrame( + {"counts": [3, 2, 3, 0], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0, 0]}, + index=exp_index, + ) + tm.assert_frame_equal(desc, expected) + + # check an integer one + cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]) + desc = cat.describe() + exp_index = CategoricalIndex([1, 2, 3], ordered=cat.ordered, name="categories") + expected = DataFrame( + {"counts": [5, 3, 3], "freqs": [5 / 11.0, 3 / 11.0, 3 / 11.0]}, + index=exp_index, + ) + tm.assert_frame_equal(desc, expected) + + # https://github.com/pandas-dev/pandas/issues/3678 + # describe should work with NaN + cat = Categorical([np.nan, 1, 2, 2]) + desc = cat.describe() + expected = DataFrame( + {"counts": [1, 2, 1], "freqs": [1 / 4.0, 2 / 4.0, 1 / 4.0]}, + index=CategoricalIndex( + [1, 2, np.nan], categories=[1, 2], name="categories" + ), + ) + tm.assert_frame_equal(desc, expected) + + +class TestPrivateCategoricalAPI: + def test_codes_immutable(self): + # Codes should be read only + c = Categorical(["a", "b", "c", "a", np.nan]) + exp = np.array([0, 1, 2, 0, -1], dtype="int8") + tm.assert_numpy_array_equal(c.codes, exp) + + # Assignments to codes should raise + msg = ( + "property 'codes' of 'Categorical' object has no setter" + if PY311 + else "can't set attribute" + ) + with pytest.raises(AttributeError, match=msg): + c.codes = np.array([0, 1, 2, 0, 1], dtype="int8") + + # changes in the codes array should raise + codes = c.codes + + with pytest.raises(ValueError, match="assignment destination is read-only"): + codes[4] = 1 + + # But even after getting the codes, the original array should still be + # writeable! + c[4] = "a" + exp = np.array([0, 1, 2, 0, 0], dtype="int8") + tm.assert_numpy_array_equal(c.codes, exp) + c._codes[4] = 2 + exp = np.array([0, 1, 2, 0, 2], dtype="int8") + tm.assert_numpy_array_equal(c.codes, exp) + + @pytest.mark.parametrize( + "codes, old, new, expected", + [ + ([0, 1], ["a", "b"], ["a", "b"], [0, 1]), + ([0, 1], ["b", "a"], ["b", "a"], [0, 1]), + ([0, 1], ["a", "b"], ["b", "a"], [1, 0]), + ([0, 1], ["b", "a"], ["a", "b"], [1, 0]), + ([0, 1, 0, 1], ["a", "b"], ["a", "b", "c"], [0, 1, 0, 1]), + ([0, 1, 2, 2], ["a", "b", "c"], ["a", "b"], [0, 1, -1, -1]), + ([0, 1, -1], ["a", "b", "c"], ["a", "b", "c"], [0, 1, -1]), + ([0, 1, -1], ["a", "b", "c"], ["b"], [-1, 0, -1]), + ([0, 1, -1], ["a", "b", "c"], ["d"], [-1, -1, -1]), + ([0, 1, -1], ["a", "b", "c"], [], [-1, -1, -1]), + ([-1, -1], [], ["a", "b"], [-1, -1]), + ([1, 0], ["b", "a"], ["a", "b"], [0, 1]), + ], + ) + def test_recode_to_categories(self, codes, old, new, expected): + codes = np.asanyarray(codes, dtype=np.int8) + expected = np.asanyarray(expected, dtype=np.int8) + old = Index(old) + new = Index(new) + result = recode_for_categories(codes, old, new) + tm.assert_numpy_array_equal(result, expected) + + def test_recode_to_categories_large(self): + N = 1000 + codes = np.arange(N) + old = Index(codes) + expected = np.arange(N - 1, -1, -1, dtype=np.int16) + new = Index(expected) + result = recode_for_categories(codes, old, new) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_astype.py new file mode 100644 index 00000000..d2f9f6df --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_astype.py @@ -0,0 +1,155 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + DatetimeIndex, + Interval, + NaT, + Period, + Timestamp, + array, + to_datetime, +) +import pandas._testing as tm + + +class TestAstype: + @pytest.mark.parametrize("cls", [Categorical, CategoricalIndex]) + @pytest.mark.parametrize("values", [[1, np.nan], [Timestamp("2000"), NaT]]) + def test_astype_nan_to_int(self, cls, values): + # GH#28406 + obj = cls(values) + + msg = "Cannot (cast|convert)" + with pytest.raises((ValueError, TypeError), match=msg): + obj.astype(int) + + @pytest.mark.parametrize( + "expected", + [ + array(["2019", "2020"], dtype="datetime64[ns, UTC]"), + array([0, 0], dtype="timedelta64[ns]"), + array([Period("2019"), Period("2020")], dtype="period[A-DEC]"), + array([Interval(0, 1), Interval(1, 2)], dtype="interval"), + array([1, np.nan], dtype="Int64"), + ], + ) + def test_astype_category_to_extension_dtype(self, expected): + # GH#28668 + result = expected.astype("category").astype(expected.dtype) + + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, expected", + [ + ( + "datetime64[ns]", + np.array(["2015-01-01T00:00:00.000000000"], dtype="datetime64[ns]"), + ), + ( + "datetime64[ns, MET]", + DatetimeIndex([Timestamp("2015-01-01 00:00:00+0100", tz="MET")]).array, + ), + ], + ) + def test_astype_to_datetime64(self, dtype, expected): + # GH#28448 + result = Categorical(["2015-01-01"]).astype(dtype) + assert result == expected + + def test_astype_str_int_categories_to_nullable_int(self): + # GH#39616 + dtype = CategoricalDtype([str(i) for i in range(5)]) + codes = np.random.default_rng(2).integers(5, size=20) + arr = Categorical.from_codes(codes, dtype=dtype) + + res = arr.astype("Int64") + expected = array(codes, dtype="Int64") + tm.assert_extension_array_equal(res, expected) + + def test_astype_str_int_categories_to_nullable_float(self): + # GH#39616 + dtype = CategoricalDtype([str(i / 2) for i in range(5)]) + codes = np.random.default_rng(2).integers(5, size=20) + arr = Categorical.from_codes(codes, dtype=dtype) + + res = arr.astype("Float64") + expected = array(codes, dtype="Float64") / 2 + tm.assert_extension_array_equal(res, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_astype(self, ordered): + # string + cat = Categorical(list("abbaaccc"), ordered=ordered) + result = cat.astype(object) + expected = np.array(cat) + tm.assert_numpy_array_equal(result, expected) + + msg = r"Cannot cast object dtype to float64" + with pytest.raises(ValueError, match=msg): + cat.astype(float) + + # numeric + cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered) + result = cat.astype(object) + expected = np.array(cat, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = cat.astype(int) + expected = np.array(cat, dtype="int") + tm.assert_numpy_array_equal(result, expected) + + result = cat.astype(float) + expected = np.array(cat, dtype=float) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("cat_ordered", [True, False]) + def test_astype_category(self, dtype_ordered, cat_ordered): + # GH#10696/GH#18593 + data = list("abcaacbab") + cat = Categorical(data, categories=list("bac"), ordered=cat_ordered) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = cat.astype(dtype) + expected = Categorical(data, categories=cat.categories, ordered=dtype_ordered) + tm.assert_categorical_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(list("adc"), dtype_ordered) + result = cat.astype(dtype) + expected = Categorical(data, dtype=dtype) + tm.assert_categorical_equal(result, expected) + + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = cat.astype("category") + expected = cat + tm.assert_categorical_equal(result, expected) + + def test_astype_object_datetime_categories(self): + # GH#40754 + cat = Categorical(to_datetime(["2021-03-27", NaT])) + result = cat.astype(object) + expected = np.array([Timestamp("2021-03-27 00:00:00"), NaT], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + def test_astype_object_timestamp_categories(self): + # GH#18024 + cat = Categorical([Timestamp("2014-01-01")]) + result = cat.astype(object) + expected = np.array([Timestamp("2014-01-01 00:00:00")], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + def test_astype_category_readonly_mask_values(self): + # GH#53658 + arr = array([0, 1, 2], dtype="Int64") + arr._mask.flags["WRITEABLE"] = False + result = arr.astype("category") + expected = array([0, 1, 2], dtype="Int64").astype("category") + tm.assert_extension_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_constructors.py new file mode 100644 index 00000000..e25e31e2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_constructors.py @@ -0,0 +1,778 @@ +from datetime import ( + date, + datetime, +) + +import numpy as np +import pytest + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DatetimeIndex, + Index, + Interval, + IntervalIndex, + MultiIndex, + NaT, + Series, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestCategoricalConstructors: + def test_fastpath_deprecated(self): + codes = np.array([1, 2, 3]) + dtype = CategoricalDtype(categories=["a", "b", "c", "d"], ordered=False) + msg = "The 'fastpath' keyword in Categorical is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + Categorical(codes, dtype=dtype, fastpath=True) + + def test_categorical_from_cat_and_dtype_str_preserve_ordered(self): + # GH#49309 we should preserve orderedness in `res` + cat = Categorical([3, 1], categories=[3, 2, 1], ordered=True) + + res = Categorical(cat, dtype="category") + assert res.dtype.ordered + + def test_categorical_disallows_scalar(self): + # GH#38433 + with pytest.raises(TypeError, match="Categorical input must be list-like"): + Categorical("A", categories=["A", "B"]) + + def test_categorical_1d_only(self): + # ndim > 1 + msg = "> 1 ndim Categorical are not supported at this time" + with pytest.raises(NotImplementedError, match=msg): + Categorical(np.array([list("abcd")])) + + def test_validate_ordered(self): + # see gh-14058 + exp_msg = "'ordered' must either be 'True' or 'False'" + exp_err = TypeError + + # This should be a boolean. + ordered = np.array([0, 1, 2]) + + with pytest.raises(exp_err, match=exp_msg): + Categorical([1, 2, 3], ordered=ordered) + + with pytest.raises(exp_err, match=exp_msg): + Categorical.from_codes( + [0, 0, 1], categories=["a", "b", "c"], ordered=ordered + ) + + def test_constructor_empty(self): + # GH 17248 + c = Categorical([]) + expected = Index([]) + tm.assert_index_equal(c.categories, expected) + + c = Categorical([], categories=[1, 2, 3]) + expected = Index([1, 2, 3], dtype=np.int64) + tm.assert_index_equal(c.categories, expected) + + def test_constructor_empty_boolean(self): + # see gh-22702 + cat = Categorical([], categories=[True, False]) + categories = sorted(cat.categories.tolist()) + assert categories == [False, True] + + def test_constructor_tuples(self): + values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object) + result = Categorical(values) + expected = Index([(1,), (1, 2)], tupleize_cols=False) + tm.assert_index_equal(result.categories, expected) + assert result.ordered is False + + def test_constructor_tuples_datetimes(self): + # numpy will auto reshape when all of the tuples are the + # same len, so add an extra one with 2 items and slice it off + values = np.array( + [ + (Timestamp("2010-01-01"),), + (Timestamp("2010-01-02"),), + (Timestamp("2010-01-01"),), + (Timestamp("2010-01-02"),), + ("a", "b"), + ], + dtype=object, + )[:-1] + result = Categorical(values) + expected = Index( + [(Timestamp("2010-01-01"),), (Timestamp("2010-01-02"),)], + tupleize_cols=False, + ) + tm.assert_index_equal(result.categories, expected) + + def test_constructor_unsortable(self): + # it works! + arr = np.array([1, 2, 3, datetime.now()], dtype="O") + factor = Categorical(arr, ordered=False) + assert not factor.ordered + + # this however will raise as cannot be sorted + msg = ( + "'values' is not ordered, please explicitly specify the " + "categories order by passing in a categories argument." + ) + with pytest.raises(TypeError, match=msg): + Categorical(arr, ordered=True) + + def test_constructor_interval(self): + result = Categorical( + [Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True + ) + ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)]) + exp = Categorical(ii, ordered=True) + tm.assert_categorical_equal(result, exp) + tm.assert_index_equal(result.categories, ii) + + def test_constructor(self): + exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_) + c1 = Categorical(exp_arr) + tm.assert_numpy_array_equal(c1.__array__(), exp_arr) + c2 = Categorical(exp_arr, categories=["a", "b", "c"]) + tm.assert_numpy_array_equal(c2.__array__(), exp_arr) + c2 = Categorical(exp_arr, categories=["c", "b", "a"]) + tm.assert_numpy_array_equal(c2.__array__(), exp_arr) + + # categories must be unique + msg = "Categorical categories must be unique" + with pytest.raises(ValueError, match=msg): + Categorical([1, 2], [1, 2, 2]) + + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], ["a", "b", "b"]) + + # The default should be unordered + c1 = Categorical(["a", "b", "c", "a"]) + assert not c1.ordered + + # Categorical as input + c1 = Categorical(["a", "b", "c", "a"]) + c2 = Categorical(c1) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + c2 = Categorical(c1) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"]) + c2 = Categorical(c1) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"]) + c2 = Categorical(c1, categories=["a", "b", "c"]) + tm.assert_numpy_array_equal(c1.__array__(), c2.__array__()) + tm.assert_index_equal(c2.categories, Index(["a", "b", "c"])) + + # Series of dtype category + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + c2 = Categorical(Series(c1)) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"]) + c2 = Categorical(Series(c1)) + tm.assert_categorical_equal(c1, c2) + + # Series + c1 = Categorical(["a", "b", "c", "a"]) + c2 = Categorical(Series(["a", "b", "c", "a"])) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"]) + tm.assert_categorical_equal(c1, c2) + + # This should result in integer categories, not float! + cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3]) + assert is_integer_dtype(cat.categories) + + # https://github.com/pandas-dev/pandas/issues/3678 + cat = Categorical([np.nan, 1, 2, 3]) + assert is_integer_dtype(cat.categories) + + # this should result in floats + cat = Categorical([np.nan, 1, 2.0, 3]) + assert is_float_dtype(cat.categories) + + cat = Categorical([np.nan, 1.0, 2.0, 3.0]) + assert is_float_dtype(cat.categories) + + # This doesn't work -> this would probably need some kind of "remember + # the original type" feature to try to cast the array interface result + # to... + + # vals = np.asarray(cat[cat.notna()]) + # assert is_integer_dtype(vals) + + # corner cases + cat = Categorical([1]) + assert len(cat.categories) == 1 + assert cat.categories[0] == 1 + assert len(cat.codes) == 1 + assert cat.codes[0] == 0 + + cat = Categorical(["a"]) + assert len(cat.categories) == 1 + assert cat.categories[0] == "a" + assert len(cat.codes) == 1 + assert cat.codes[0] == 0 + + # two arrays + # - when the first is an integer dtype and the second is not + # - when the resulting codes are all -1/NaN + with tm.assert_produces_warning(None): + Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"]) + + with tm.assert_produces_warning(None): + Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) + + # the next one are from the old docs + with tm.assert_produces_warning(None): + Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) + cat = Categorical([1, 2], categories=[1, 2, 3]) + + # this is a legitimate constructor + with tm.assert_produces_warning(None): + Categorical(np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True) + + def test_constructor_with_existing_categories(self): + # GH25318: constructing with pd.Series used to bogusly skip recoding + # categories + c0 = Categorical(["a", "b", "c", "a"]) + c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"]) + + c2 = Categorical(c0, categories=c1.categories) + tm.assert_categorical_equal(c1, c2) + + c3 = Categorical(Series(c0), categories=c1.categories) + tm.assert_categorical_equal(c1, c3) + + def test_constructor_not_sequence(self): + # https://github.com/pandas-dev/pandas/issues/16022 + msg = r"^Parameter 'categories' must be list-like, was" + with pytest.raises(TypeError, match=msg): + Categorical(["a", "b"], categories="a") + + def test_constructor_with_null(self): + # Cannot have NaN in categories + msg = "Categorical categories cannot be null" + with pytest.raises(ValueError, match=msg): + Categorical([np.nan, "a", "b", "c"], categories=[np.nan, "a", "b", "c"]) + + with pytest.raises(ValueError, match=msg): + Categorical([None, "a", "b", "c"], categories=[None, "a", "b", "c"]) + + with pytest.raises(ValueError, match=msg): + Categorical( + DatetimeIndex(["nat", "20160101"]), + categories=[NaT, Timestamp("20160101")], + ) + + def test_constructor_with_index(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + tm.assert_categorical_equal(ci.values, Categorical(ci)) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + tm.assert_categorical_equal( + ci.values, Categorical(ci.astype(object), categories=ci.categories) + ) + + def test_constructor_with_generator(self): + # This was raising an Error in isna(single_val).any() because isna + # returned a scalar for a generator + + exp = Categorical([0, 1, 2]) + cat = Categorical(x for x in [0, 1, 2]) + tm.assert_categorical_equal(cat, exp) + cat = Categorical(range(3)) + tm.assert_categorical_equal(cat, exp) + + MultiIndex.from_product([range(5), ["a", "b", "c"]]) + + # check that categories accept generators and sequences + cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2])) + tm.assert_categorical_equal(cat, exp) + cat = Categorical([0, 1, 2], categories=range(3)) + tm.assert_categorical_equal(cat, exp) + + def test_constructor_with_rangeindex(self): + # RangeIndex is preserved in Categories + rng = Index(range(3)) + + cat = Categorical(rng) + tm.assert_index_equal(cat.categories, rng, exact=True) + + cat = Categorical([1, 2, 0], categories=rng) + tm.assert_index_equal(cat.categories, rng, exact=True) + + @pytest.mark.parametrize( + "dtl", + [ + date_range("1995-01-01 00:00:00", periods=5, freq="s"), + date_range("1995-01-01 00:00:00", periods=5, freq="s", tz="US/Eastern"), + timedelta_range("1 day", periods=5, freq="s"), + ], + ) + def test_constructor_with_datetimelike(self, dtl): + # see gh-12077 + # constructor with a datetimelike and NaT + + s = Series(dtl) + c = Categorical(s) + + expected = type(dtl)(s) + expected._data.freq = None + + tm.assert_index_equal(c.categories, expected) + tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8")) + + # with NaT + s2 = s.copy() + s2.iloc[-1] = NaT + c = Categorical(s2) + + expected = type(dtl)(s2.dropna()) + expected._data.freq = None + + tm.assert_index_equal(c.categories, expected) + + exp = np.array([0, 1, 2, 3, -1], dtype=np.int8) + tm.assert_numpy_array_equal(c.codes, exp) + + result = repr(c) + assert "NaT" in result + + def test_constructor_from_index_series_datetimetz(self): + idx = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern") + idx = idx._with_freq(None) # freq not preserved in result.categories + result = Categorical(idx) + tm.assert_index_equal(result.categories, idx) + + result = Categorical(Series(idx)) + tm.assert_index_equal(result.categories, idx) + + def test_constructor_date_objects(self): + # we dont cast date objects to timestamps, matching Index constructor + v = date.today() + + cat = Categorical([v, v]) + assert cat.categories.dtype == object + assert type(cat.categories[0]) is date + + def test_constructor_from_index_series_timedelta(self): + idx = timedelta_range("1 days", freq="D", periods=3) + idx = idx._with_freq(None) # freq not preserved in result.categories + result = Categorical(idx) + tm.assert_index_equal(result.categories, idx) + + result = Categorical(Series(idx)) + tm.assert_index_equal(result.categories, idx) + + def test_constructor_from_index_series_period(self): + idx = period_range("2015-01-01", freq="D", periods=3) + result = Categorical(idx) + tm.assert_index_equal(result.categories, idx) + + result = Categorical(Series(idx)) + tm.assert_index_equal(result.categories, idx) + + @pytest.mark.parametrize( + "values", + [ + np.array([1.0, 1.2, 1.8, np.nan]), + np.array([1, 2, 3], dtype="int64"), + ["a", "b", "c", np.nan], + [pd.Period("2014-01"), pd.Period("2014-02"), NaT], + [Timestamp("2014-01-01"), Timestamp("2014-01-02"), NaT], + [ + Timestamp("2014-01-01", tz="US/Eastern"), + Timestamp("2014-01-02", tz="US/Eastern"), + NaT, + ], + ], + ) + def test_constructor_invariant(self, values): + # GH 14190 + c = Categorical(values) + c2 = Categorical(c) + tm.assert_categorical_equal(c, c2) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_constructor_with_dtype(self, ordered): + categories = ["b", "a", "c"] + dtype = CategoricalDtype(categories, ordered=ordered) + result = Categorical(["a", "b", "a", "c"], dtype=dtype) + expected = Categorical( + ["a", "b", "a", "c"], categories=categories, ordered=ordered + ) + tm.assert_categorical_equal(result, expected) + assert result.ordered is ordered + + def test_constructor_dtype_and_others_raises(self): + dtype = CategoricalDtype(["a", "b"], ordered=True) + msg = "Cannot specify `categories` or `ordered` together with `dtype`." + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], categories=["a", "b"], dtype=dtype) + + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], ordered=True, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], ordered=False, dtype=dtype) + + @pytest.mark.parametrize("categories", [None, ["a", "b"], ["a", "c"]]) + @pytest.mark.parametrize("ordered", [True, False]) + def test_constructor_str_category(self, categories, ordered): + result = Categorical( + ["a", "b"], categories=categories, ordered=ordered, dtype="category" + ) + expected = Categorical(["a", "b"], categories=categories, ordered=ordered) + tm.assert_categorical_equal(result, expected) + + def test_constructor_str_unknown(self): + with pytest.raises(ValueError, match="Unknown dtype"): + Categorical([1, 2], dtype="foo") + + def test_constructor_np_strs(self): + # GH#31499 Hashtable.map_locations needs to work on np.str_ objects + cat = Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")]) + assert all(isinstance(x, np.str_) for x in cat.categories) + + def test_constructor_from_categorical_with_dtype(self): + dtype = CategoricalDtype(["a", "b", "c"], ordered=True) + values = Categorical(["a", "b", "d"]) + result = Categorical(values, dtype=dtype) + # We use dtype.categories, not values.categories + expected = Categorical( + ["a", "b", "d"], categories=["a", "b", "c"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + def test_constructor_from_categorical_with_unknown_dtype(self): + dtype = CategoricalDtype(None, ordered=True) + values = Categorical(["a", "b", "d"]) + result = Categorical(values, dtype=dtype) + # We use values.categories, not dtype.categories + expected = Categorical( + ["a", "b", "d"], categories=["a", "b", "d"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + def test_constructor_from_categorical_string(self): + values = Categorical(["a", "b", "d"]) + # use categories, ordered + result = Categorical( + values, categories=["a", "b", "c"], ordered=True, dtype="category" + ) + expected = Categorical( + ["a", "b", "d"], categories=["a", "b", "c"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + # No string + result = Categorical(values, categories=["a", "b", "c"], ordered=True) + tm.assert_categorical_equal(result, expected) + + def test_constructor_with_categorical_categories(self): + # GH17884 + expected = Categorical(["a", "b"], categories=["a", "b", "c"]) + + result = Categorical(["a", "b"], categories=Categorical(["a", "b", "c"])) + tm.assert_categorical_equal(result, expected) + + result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"])) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("klass", [lambda x: np.array(x, dtype=object), list]) + def test_construction_with_null(self, klass, nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/31927 + values = klass(["a", nulls_fixture, "b"]) + result = Categorical(values) + + dtype = CategoricalDtype(["a", "b"]) + codes = [0, -1, 1] + expected = Categorical.from_codes(codes=codes, dtype=dtype) + + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("validate", [True, False]) + def test_from_codes_nullable_int_categories(self, any_numeric_ea_dtype, validate): + # GH#39649 + cats = pd.array(range(5), dtype=any_numeric_ea_dtype) + codes = np.random.default_rng(2).integers(5, size=3) + dtype = CategoricalDtype(cats) + arr = Categorical.from_codes(codes, dtype=dtype, validate=validate) + assert arr.categories.dtype == cats.dtype + tm.assert_index_equal(arr.categories, Index(cats)) + + def test_from_codes_empty(self): + cat = ["a", "b", "c"] + result = Categorical.from_codes([], categories=cat) + expected = Categorical([], categories=cat) + + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("validate", [True, False]) + def test_from_codes_validate(self, validate): + # GH53122 + dtype = CategoricalDtype(["a", "b"]) + if validate: + with pytest.raises(ValueError, match="codes need to be between "): + Categorical.from_codes([4, 5], dtype=dtype, validate=validate) + else: + # passes, though has incorrect codes, but that's the user responsibility + Categorical.from_codes([4, 5], dtype=dtype, validate=validate) + + def test_from_codes_too_few_categories(self): + dtype = CategoricalDtype(categories=[1, 2]) + msg = "codes need to be between " + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([1, 2], categories=dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([1, 2], dtype=dtype) + + def test_from_codes_non_int_codes(self): + dtype = CategoricalDtype(categories=[1, 2]) + msg = "codes need to be array-like integers" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(["a"], categories=dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(["a"], dtype=dtype) + + def test_from_codes_non_unique_categories(self): + with pytest.raises(ValueError, match="Categorical categories must be unique"): + Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"]) + + def test_from_codes_nan_cat_included(self): + with pytest.raises(ValueError, match="Categorical categories cannot be null"): + Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan]) + + def test_from_codes_too_negative(self): + dtype = CategoricalDtype(categories=["a", "b", "c"]) + msg = r"codes need to be between -1 and len\(categories\)-1" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([-2, 1, 2], categories=dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([-2, 1, 2], dtype=dtype) + + def test_from_codes(self): + dtype = CategoricalDtype(categories=["a", "b", "c"]) + exp = Categorical(["a", "b", "c"], ordered=False) + res = Categorical.from_codes([0, 1, 2], categories=dtype.categories) + tm.assert_categorical_equal(exp, res) + + res = Categorical.from_codes([0, 1, 2], dtype=dtype) + tm.assert_categorical_equal(exp, res) + + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_categorical_categories(self, klass): + # GH17884 + expected = Categorical(["a", "b"], categories=["a", "b", "c"]) + + result = Categorical.from_codes([0, 1], categories=klass(["a", "b", "c"])) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_non_unique_categorical_categories(self, klass): + with pytest.raises(ValueError, match="Categorical categories must be unique"): + Categorical.from_codes([0, 1], klass(["a", "b", "a"])) + + def test_from_codes_with_nan_code(self): + # GH21767 + codes = [1, 2, np.nan] + dtype = CategoricalDtype(categories=["a", "b", "c"]) + with pytest.raises(ValueError, match="codes need to be array-like integers"): + Categorical.from_codes(codes, categories=dtype.categories) + with pytest.raises(ValueError, match="codes need to be array-like integers"): + Categorical.from_codes(codes, dtype=dtype) + + @pytest.mark.parametrize("codes", [[1.0, 2.0, 0], [1.1, 2.0, 0]]) + def test_from_codes_with_float(self, codes): + # GH21767 + # float codes should raise even if values are equal to integers + dtype = CategoricalDtype(categories=["a", "b", "c"]) + + msg = "codes need to be array-like integers" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(codes, dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(codes, dtype=dtype) + + def test_from_codes_with_dtype_raises(self): + msg = "Cannot specify" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes( + [0, 1], categories=["a", "b"], dtype=CategoricalDtype(["a", "b"]) + ) + + with pytest.raises(ValueError, match=msg): + Categorical.from_codes( + [0, 1], ordered=True, dtype=CategoricalDtype(["a", "b"]) + ) + + def test_from_codes_neither(self): + msg = "Both were None" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([0, 1]) + + def test_from_codes_with_nullable_int(self): + codes = pd.array([0, 1], dtype="Int64") + categories = ["a", "b"] + + result = Categorical.from_codes(codes, categories=categories) + expected = Categorical.from_codes(codes.to_numpy(int), categories=categories) + + tm.assert_categorical_equal(result, expected) + + def test_from_codes_with_nullable_int_na_raises(self): + codes = pd.array([0, None], dtype="Int64") + categories = ["a", "b"] + + msg = "codes cannot contain NA values" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(codes, categories=categories) + + @pytest.mark.parametrize("dtype", [None, "category"]) + def test_from_inferred_categories(self, dtype): + cats = ["a", "b"] + codes = np.array([0, 0, 1, 1], dtype="i8") + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical.from_codes(codes, cats) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, "category"]) + def test_from_inferred_categories_sorts(self, dtype): + cats = ["b", "a"] + codes = np.array([0, 1, 1, 1], dtype="i8") + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical.from_codes([1, 0, 0, 0], ["a", "b"]) + tm.assert_categorical_equal(result, expected) + + def test_from_inferred_categories_dtype(self): + cats = ["a", "b", "d"] + codes = np.array([0, 1, 0, 2], dtype="i8") + dtype = CategoricalDtype(["c", "b", "a"], ordered=True) + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical( + ["a", "b", "a", "d"], categories=["c", "b", "a"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + def test_from_inferred_categories_coerces(self): + cats = ["1", "2", "bad"] + codes = np.array([0, 0, 1, 2], dtype="i8") + dtype = CategoricalDtype([1, 2]) + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical([1, 1, 2, np.nan]) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("ordered", [None, True, False]) + def test_construction_with_ordered(self, ordered): + # GH 9347, 9190 + cat = Categorical([0, 1, 2], ordered=ordered) + assert cat.ordered == bool(ordered) + + def test_constructor_imaginary(self): + values = [1, 2, 3 + 1j] + c1 = Categorical(values) + tm.assert_index_equal(c1.categories, Index(values)) + tm.assert_numpy_array_equal(np.array(c1), np.array(values)) + + def test_constructor_string_and_tuples(self): + # GH 21416 + c = Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object)) + expected_index = Index([("a", "b"), ("b", "a"), "c"]) + assert c.categories.equals(expected_index) + + def test_interval(self): + idx = pd.interval_range(0, 10, periods=10) + cat = Categorical(idx, categories=idx) + expected_codes = np.arange(10, dtype="int8") + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # infer categories + cat = Categorical(idx) + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # list values + cat = Categorical(list(idx)) + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # list values, categories + cat = Categorical(list(idx), categories=list(idx)) + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # shuffled + values = idx.take([1, 2, 0]) + cat = Categorical(values, categories=idx) + tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="int8")) + tm.assert_index_equal(cat.categories, idx) + + # extra + values = pd.interval_range(8, 11, periods=3) + cat = Categorical(values, categories=idx) + expected_codes = np.array([8, 9, -1], dtype="int8") + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # overlapping + idx = IntervalIndex([Interval(0, 2), Interval(0, 1)]) + cat = Categorical(idx, categories=idx) + expected_codes = np.array([0, 1], dtype="int8") + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + def test_categorical_extension_array_nullable(self, nulls_fixture): + # GH: + arr = pd.arrays.StringArray._from_sequence([nulls_fixture] * 2) + result = Categorical(arr) + assert arr.dtype == result.categories.dtype + expected = Categorical(Series([pd.NA, pd.NA], dtype=arr.dtype)) + tm.assert_categorical_equal(result, expected) + + def test_from_sequence_copy(self): + cat = Categorical(np.arange(5).repeat(2)) + result = Categorical._from_sequence(cat, dtype=None, copy=False) + + # more generally, we'd be OK with a view + assert result._codes is cat._codes + + result = Categorical._from_sequence(cat, dtype=None, copy=True) + + assert not tm.shares_memory(result, cat) + + def test_constructor_datetime64_non_nano(self): + categories = np.arange(10).view("M8[D]") + values = categories[::2].copy() + + cat = Categorical(values, categories=categories) + assert (cat == values).all() + + def test_constructor_preserves_freq(self): + # GH33830 freq retention in categorical + dti = date_range("2016-01-01", periods=5) + + expected = dti.freq + + cat = Categorical(dti) + result = cat.categories.freq + + assert expected == result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_dtypes.py new file mode 100644 index 00000000..525663ca --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_dtypes.py @@ -0,0 +1,139 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + IntervalIndex, + Series, + Timestamp, +) +import pandas._testing as tm + + +class TestCategoricalDtypes: + def test_categories_match_up_to_permutation(self): + # test dtype comparisons between cats + + c1 = Categorical(list("aabca"), categories=list("abc"), ordered=False) + c2 = Categorical(list("aabca"), categories=list("cab"), ordered=False) + c3 = Categorical(list("aabca"), categories=list("cab"), ordered=True) + assert c1._categories_match_up_to_permutation(c1) + assert c2._categories_match_up_to_permutation(c2) + assert c3._categories_match_up_to_permutation(c3) + assert c1._categories_match_up_to_permutation(c2) + assert not c1._categories_match_up_to_permutation(c3) + assert not c1._categories_match_up_to_permutation(Index(list("aabca"))) + assert not c1._categories_match_up_to_permutation(c1.astype(object)) + assert c1._categories_match_up_to_permutation(CategoricalIndex(c1)) + assert c1._categories_match_up_to_permutation( + CategoricalIndex(c1, categories=list("cab")) + ) + assert not c1._categories_match_up_to_permutation( + CategoricalIndex(c1, ordered=True) + ) + + # GH 16659 + s1 = Series(c1) + s2 = Series(c2) + s3 = Series(c3) + assert c1._categories_match_up_to_permutation(s1) + assert c2._categories_match_up_to_permutation(s2) + assert c3._categories_match_up_to_permutation(s3) + assert c1._categories_match_up_to_permutation(s2) + assert not c1._categories_match_up_to_permutation(s3) + assert not c1._categories_match_up_to_permutation(s1.astype(object)) + + def test_set_dtype_same(self): + c = Categorical(["a", "b", "c"]) + result = c._set_dtype(CategoricalDtype(["a", "b", "c"])) + tm.assert_categorical_equal(result, c) + + def test_set_dtype_new_categories(self): + c = Categorical(["a", "b", "c"]) + result = c._set_dtype(CategoricalDtype(list("abcd"))) + tm.assert_numpy_array_equal(result.codes, c.codes) + tm.assert_index_equal(result.dtype.categories, Index(list("abcd"))) + + @pytest.mark.parametrize( + "values, categories, new_categories", + [ + # No NaNs, same cats, same order + (["a", "b", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["a", "b", "a"], ["a", "b"], ["b", "a"]), + # Same, unsorted + (["b", "a", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["b", "a", "a"], ["a", "b"], ["b", "a"]), + # NaNs + (["a", "b", "c"], ["a", "b"], ["a", "b"]), + (["a", "b", "c"], ["a", "b"], ["b", "a"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + # Introduce NaNs + (["a", "b", "c"], ["a", "b"], ["a"]), + (["a", "b", "c"], ["a", "b"], ["b"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + # No overlap + (["a", "b", "c"], ["a", "b"], ["d", "e"]), + ], + ) + @pytest.mark.parametrize("ordered", [True, False]) + def test_set_dtype_many(self, values, categories, new_categories, ordered): + c = Categorical(values, categories) + expected = Categorical(values, new_categories, ordered) + result = c._set_dtype(expected.dtype) + tm.assert_categorical_equal(result, expected) + + def test_set_dtype_no_overlap(self): + c = Categorical(["a", "b", "c"], ["d", "e"]) + result = c._set_dtype(CategoricalDtype(["a", "b"])) + expected = Categorical([None, None, None], categories=["a", "b"]) + tm.assert_categorical_equal(result, expected) + + def test_codes_dtypes(self): + # GH 8453 + result = Categorical(["foo", "bar", "baz"]) + assert result.codes.dtype == "int8" + + result = Categorical([f"foo{i:05d}" for i in range(400)]) + assert result.codes.dtype == "int16" + + result = Categorical([f"foo{i:05d}" for i in range(40000)]) + assert result.codes.dtype == "int32" + + # adding cats + result = Categorical(["foo", "bar", "baz"]) + assert result.codes.dtype == "int8" + result = result.add_categories([f"foo{i:05d}" for i in range(400)]) + assert result.codes.dtype == "int16" + + # removing cats + result = result.remove_categories([f"foo{i:05d}" for i in range(300)]) + assert result.codes.dtype == "int8" + + def test_iter_python_types(self): + # GH-19909 + cat = Categorical([1, 2]) + assert isinstance(next(iter(cat)), int) + assert isinstance(cat.tolist()[0], int) + + def test_iter_python_types_datetime(self): + cat = Categorical([Timestamp("2017-01-01"), Timestamp("2017-01-02")]) + assert isinstance(next(iter(cat)), Timestamp) + assert isinstance(cat.tolist()[0], Timestamp) + + def test_interval_index_category(self): + # GH 38316 + index = IntervalIndex.from_breaks(np.arange(3, dtype="uint64")) + + result = CategoricalIndex(index).dtype.categories + expected = IntervalIndex.from_arrays( + [0, 1], [1, 2], dtype="interval[uint64, right]" + ) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_indexing.py new file mode 100644 index 00000000..e15bac9f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_indexing.py @@ -0,0 +1,384 @@ +import math + +import numpy as np +import pytest + +from pandas import ( + NA, + Categorical, + CategoricalIndex, + Index, + Interval, + IntervalIndex, + NaT, + PeriodIndex, + Series, + Timedelta, + Timestamp, +) +import pandas._testing as tm +import pandas.core.common as com + + +class TestCategoricalIndexingWithFactor: + def test_getitem(self, factor): + assert factor[0] == "a" + assert factor[-1] == "c" + + subf = factor[[0, 1, 2]] + tm.assert_numpy_array_equal(subf._codes, np.array([0, 1, 1], dtype=np.int8)) + + subf = factor[np.asarray(factor) == "c"] + tm.assert_numpy_array_equal(subf._codes, np.array([2, 2, 2], dtype=np.int8)) + + def test_setitem(self, factor): + # int/positional + c = factor.copy() + c[0] = "b" + assert c[0] == "b" + c[-1] = "a" + assert c[-1] == "a" + + # boolean + c = factor.copy() + indexer = np.zeros(len(c), dtype="bool") + indexer[0] = True + indexer[-1] = True + c[indexer] = "c" + expected = Categorical(["c", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + + tm.assert_categorical_equal(c, expected) + + @pytest.mark.parametrize( + "other", + [Categorical(["b", "a"]), Categorical(["b", "a"], categories=["b", "a"])], + ) + def test_setitem_same_but_unordered(self, other): + # GH-24142 + target = Categorical(["a", "b"], categories=["a", "b"]) + mask = np.array([True, False]) + target[mask] = other[mask] + expected = Categorical(["b", "b"], categories=["a", "b"]) + tm.assert_categorical_equal(target, expected) + + @pytest.mark.parametrize( + "other", + [ + Categorical(["b", "a"], categories=["b", "a", "c"]), + Categorical(["b", "a"], categories=["a", "b", "c"]), + Categorical(["a", "a"], categories=["a"]), + Categorical(["b", "b"], categories=["b"]), + ], + ) + def test_setitem_different_unordered_raises(self, other): + # GH-24142 + target = Categorical(["a", "b"], categories=["a", "b"]) + mask = np.array([True, False]) + msg = "Cannot set a Categorical with another, without identical categories" + with pytest.raises(TypeError, match=msg): + target[mask] = other[mask] + + @pytest.mark.parametrize( + "other", + [ + Categorical(["b", "a"]), + Categorical(["b", "a"], categories=["b", "a"], ordered=True), + Categorical(["b", "a"], categories=["a", "b", "c"], ordered=True), + ], + ) + def test_setitem_same_ordered_raises(self, other): + # Gh-24142 + target = Categorical(["a", "b"], categories=["a", "b"], ordered=True) + mask = np.array([True, False]) + msg = "Cannot set a Categorical with another, without identical categories" + with pytest.raises(TypeError, match=msg): + target[mask] = other[mask] + + def test_setitem_tuple(self): + # GH#20439 + cat = Categorical([(0, 1), (0, 2), (0, 1)]) + + # This should not raise + cat[1] = cat[0] + assert cat[1] == (0, 1) + + def test_setitem_listlike(self): + # GH#9469 + # properly coerce the input indexers + + cat = Categorical( + np.random.default_rng(2).integers(0, 5, size=150000).astype(np.int8) + ).add_categories([-1000]) + indexer = np.array([100000]).astype(np.int64) + cat[indexer] = -1000 + + # we are asserting the code result here + # which maps to the -1000 category + result = cat.codes[np.array([100000]).astype(np.int64)] + tm.assert_numpy_array_equal(result, np.array([5], dtype="int8")) + + +class TestCategoricalIndexing: + def test_getitem_slice(self): + cat = Categorical(["a", "b", "c", "d", "a", "b", "c"]) + sliced = cat[3] + assert sliced == "d" + + sliced = cat[3:5] + expected = Categorical(["d", "a"], categories=["a", "b", "c", "d"]) + tm.assert_categorical_equal(sliced, expected) + + def test_getitem_listlike(self): + # GH 9469 + # properly coerce the input indexers + + c = Categorical( + np.random.default_rng(2).integers(0, 5, size=150000).astype(np.int8) + ) + result = c.codes[np.array([100000]).astype(np.int64)] + expected = c[np.array([100000]).astype(np.int64)].codes + tm.assert_numpy_array_equal(result, expected) + + def test_periodindex(self): + idx1 = PeriodIndex( + ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M" + ) + + cat1 = Categorical(idx1) + str(cat1) + exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8) + exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M") + tm.assert_numpy_array_equal(cat1._codes, exp_arr) + tm.assert_index_equal(cat1.categories, exp_idx) + + idx2 = PeriodIndex( + ["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M" + ) + cat2 = Categorical(idx2, ordered=True) + str(cat2) + exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8) + exp_idx2 = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M") + tm.assert_numpy_array_equal(cat2._codes, exp_arr) + tm.assert_index_equal(cat2.categories, exp_idx2) + + idx3 = PeriodIndex( + [ + "2013-12", + "2013-11", + "2013-10", + "2013-09", + "2013-08", + "2013-07", + "2013-05", + ], + freq="M", + ) + cat3 = Categorical(idx3, ordered=True) + exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8) + exp_idx = PeriodIndex( + [ + "2013-05", + "2013-07", + "2013-08", + "2013-09", + "2013-10", + "2013-11", + "2013-12", + ], + freq="M", + ) + tm.assert_numpy_array_equal(cat3._codes, exp_arr) + tm.assert_index_equal(cat3.categories, exp_idx) + + @pytest.mark.parametrize( + "null_val", + [None, np.nan, NaT, NA, math.nan, "NaT", "nat", "NAT", "nan", "NaN", "NAN"], + ) + def test_periodindex_on_null_types(self, null_val): + # GH 46673 + result = PeriodIndex(["2022-04-06", "2022-04-07", null_val], freq="D") + expected = PeriodIndex(["2022-04-06", "2022-04-07", "NaT"], dtype="period[D]") + assert result[2] is NaT + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]]) + def test_categories_assignments_wrong_length_raises(self, new_categories): + cat = Categorical(["a", "b", "c", "a"]) + msg = ( + "new categories need to have the same number of items " + "as the old categories!" + ) + with pytest.raises(ValueError, match=msg): + cat.rename_categories(new_categories) + + # Combinations of sorted/unique: + @pytest.mark.parametrize( + "idx_values", [[1, 2, 3, 4], [1, 3, 2, 4], [1, 3, 3, 4], [1, 2, 2, 4]] + ) + # Combinations of missing/unique + @pytest.mark.parametrize("key_values", [[1, 2], [1, 5], [1, 1], [5, 5]]) + @pytest.mark.parametrize("key_class", [Categorical, CategoricalIndex]) + @pytest.mark.parametrize("dtype", [None, "category", "key"]) + def test_get_indexer_non_unique(self, idx_values, key_values, key_class, dtype): + # GH 21448 + key = key_class(key_values, categories=range(1, 5)) + + if dtype == "key": + dtype = key.dtype + + # Test for flat index and CategoricalIndex with same/different cats: + idx = Index(idx_values, dtype=dtype) + expected, exp_miss = idx.get_indexer_non_unique(key_values) + result, res_miss = idx.get_indexer_non_unique(key) + + tm.assert_numpy_array_equal(expected, result) + tm.assert_numpy_array_equal(exp_miss, res_miss) + + exp_unique = idx.unique().get_indexer(key_values) + res_unique = idx.unique().get_indexer(key) + tm.assert_numpy_array_equal(res_unique, exp_unique) + + def test_where_unobserved_nan(self): + ser = Series(Categorical(["a", "b"])) + result = ser.where([True, False]) + expected = Series(Categorical(["a", None], categories=["a", "b"])) + tm.assert_series_equal(result, expected) + + # all NA + ser = Series(Categorical(["a", "b"])) + result = ser.where([False, False]) + expected = Series(Categorical([None, None], categories=["a", "b"])) + tm.assert_series_equal(result, expected) + + def test_where_unobserved_categories(self): + ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"])) + result = ser.where([True, True, False], other="b") + expected = Series(Categorical(["a", "b", "b"], categories=ser.cat.categories)) + tm.assert_series_equal(result, expected) + + def test_where_other_categorical(self): + ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"])) + other = Categorical(["b", "c", "a"], categories=["a", "c", "b", "d"]) + result = ser.where([True, False, True], other) + expected = Series(Categorical(["a", "c", "c"], dtype=ser.dtype)) + tm.assert_series_equal(result, expected) + + def test_where_new_category_raises(self): + ser = Series(Categorical(["a", "b", "c"])) + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + ser.where([True, False, True], "d") + + def test_where_ordered_differs_rasies(self): + ser = Series( + Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"], ordered=True) + ) + other = Categorical( + ["b", "c", "a"], categories=["a", "c", "b", "d"], ordered=True + ) + with pytest.raises(TypeError, match="without identical categories"): + ser.where([True, False, True], other) + + +class TestContains: + def test_contains(self): + # GH#21508 + cat = Categorical(list("aabbca"), categories=list("cab")) + + assert "b" in cat + assert "z" not in cat + assert np.nan not in cat + with pytest.raises(TypeError, match="unhashable type: 'list'"): + assert [1] in cat + + # assert codes NOT in index + assert 0 not in cat + assert 1 not in cat + + cat = Categorical(list("aabbca") + [np.nan], categories=list("cab")) + assert np.nan in cat + + @pytest.mark.parametrize( + "item, expected", + [ + (Interval(0, 1), True), + (1.5, True), + (Interval(0.5, 1.5), False), + ("a", False), + (Timestamp(1), False), + (Timedelta(1), False), + ], + ids=str, + ) + def test_contains_interval(self, item, expected): + # GH#23705 + cat = Categorical(IntervalIndex.from_breaks(range(3))) + result = item in cat + assert result is expected + + def test_contains_list(self): + # GH#21729 + cat = Categorical([1, 2, 3]) + + assert "a" not in cat + + with pytest.raises(TypeError, match="unhashable type"): + ["a"] in cat + + with pytest.raises(TypeError, match="unhashable type"): + ["a", "b"] in cat + + +@pytest.mark.parametrize("index", [True, False]) +def test_mask_with_boolean(index): + ser = Series(range(3)) + idx = Categorical([True, False, True]) + if index: + idx = CategoricalIndex(idx) + + assert com.is_bool_indexer(idx) + result = ser[idx] + expected = ser[idx.astype("object")] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("index", [True, False]) +def test_mask_with_boolean_na_treated_as_false(index): + # https://github.com/pandas-dev/pandas/issues/31503 + ser = Series(range(3)) + idx = Categorical([True, False, None]) + if index: + idx = CategoricalIndex(idx) + + result = ser[idx] + expected = ser[idx.fillna(False)] + + tm.assert_series_equal(result, expected) + + +@pytest.fixture +def non_coercible_categorical(monkeypatch): + """ + Monkeypatch Categorical.__array__ to ensure no implicit conversion. + + Raises + ------ + ValueError + When Categorical.__array__ is called. + """ + + # TODO(Categorical): identify other places where this may be + # useful and move to a conftest.py + def array(self, dtype=None): + raise ValueError("I cannot be converted.") + + with monkeypatch.context() as m: + m.setattr(Categorical, "__array__", array) + yield + + +def test_series_at(): + arr = Categorical(["a", "b", "c"]) + ser = Series(arr) + result = ser.at[0] + assert result == "a" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_map.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_map.py new file mode 100644 index 00000000..3d41b7cc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_map.py @@ -0,0 +1,154 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.fixture(params=[None, "ignore"]) +def na_action(request): + return request.param + + +@pytest.mark.parametrize( + "data, categories", + [ + (list("abcbca"), list("cab")), + (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)), + ], + ids=["string", "interval"], +) +def test_map_str(data, categories, ordered, na_action): + # GH 31202 - override base class since we want to maintain categorical/ordered + cat = Categorical(data, categories=categories, ordered=ordered) + result = cat.map(str, na_action=na_action) + expected = Categorical( + map(str, data), categories=map(str, categories), ordered=ordered + ) + tm.assert_categorical_equal(result, expected) + + +def test_map(na_action): + cat = Categorical(list("ABABC"), categories=list("CBA"), ordered=True) + result = cat.map(lambda x: x.lower(), na_action=na_action) + exp = Categorical(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_categorical_equal(result, exp) + + cat = Categorical(list("ABABC"), categories=list("BAC"), ordered=False) + result = cat.map(lambda x: x.lower(), na_action=na_action) + exp = Categorical(list("ababc"), categories=list("bac"), ordered=False) + tm.assert_categorical_equal(result, exp) + + # GH 12766: Return an index not an array + result = cat.map(lambda x: 1, na_action=na_action) + exp = Index(np.array([1] * 5, dtype=np.int64)) + tm.assert_index_equal(result, exp) + + # change categories dtype + cat = Categorical(list("ABABC"), categories=list("BAC"), ordered=False) + + def f(x): + return {"A": 10, "B": 20, "C": 30}.get(x) + + result = cat.map(f, na_action=na_action) + exp = Categorical([10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False) + tm.assert_categorical_equal(result, exp) + + mapper = Series([10, 20, 30], index=["A", "B", "C"]) + result = cat.map(mapper, na_action=na_action) + tm.assert_categorical_equal(result, exp) + + result = cat.map({"A": 10, "B": 20, "C": 30}, na_action=na_action) + tm.assert_categorical_equal(result, exp) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, Index([False, False, True])), + ([1, 2, np.nan], pd.isna, Index([False, False, True])), + ([1, 1, np.nan], {1: False}, Categorical([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + Categorical([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False] * 3), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_none(data, f, expected): # GH 24241 + values = Categorical(data) + result = values.map(f, na_action=None) + if isinstance(expected, Categorical): + tm.assert_categorical_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, Categorical([False, False, np.nan])), + ([1, 2, np.nan], pd.isna, Index([False, False, np.nan])), + ([1, 1, np.nan], {1: False}, Categorical([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + Categorical([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_ignore(data, f, expected): # GH 24241 + values = Categorical(data) + result = values.map(f, na_action="ignore") + if data[1] == 1: + tm.assert_categorical_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + +def test_map_with_dict_or_series(na_action): + orig_values = ["a", "B", 1, "a"] + new_values = ["one", 2, 3.0, "one"] + cat = Categorical(orig_values) + + mapper = Series(new_values[:-1], index=orig_values[:-1]) + result = cat.map(mapper, na_action=na_action) + + # Order of categories in result can be different + expected = Categorical(new_values, categories=[3.0, 2, "one"]) + tm.assert_categorical_equal(result, expected) + + mapper = dict(zip(orig_values[:-1], new_values[:-1])) + result = cat.map(mapper, na_action=na_action) + # Order of categories in result can be different + tm.assert_categorical_equal(result, expected) + + +def test_map_na_action_no_default_deprecated(): + # GH51645 + cat = Categorical(["a", "b", "c"]) + msg = ( + "The default value of 'ignore' for the `na_action` parameter in " + "pandas.Categorical.map is deprecated and will be " + "changed to 'None' in a future version. Please set na_action to the " + "desired value to avoid seeing this warning" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + cat.map(lambda x: x) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_missing.py new file mode 100644 index 00000000..0eeb01b7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_missing.py @@ -0,0 +1,216 @@ +import collections + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + Series, + isna, +) +import pandas._testing as tm + + +class TestCategoricalMissing: + def test_isna(self): + exp = np.array([False, False, True]) + cat = Categorical(["a", "b", np.nan]) + res = cat.isna() + + tm.assert_numpy_array_equal(res, exp) + + def test_na_flags_int_categories(self): + # #1457 + + categories = list(range(10)) + labels = np.random.default_rng(2).integers(0, 10, 20) + labels[::5] = -1 + + cat = Categorical(labels, categories) + repr(cat) + + tm.assert_numpy_array_equal(isna(cat), labels == -1) + + def test_nan_handling(self): + # Nans are represented as -1 in codes + c = Categorical(["a", "b", np.nan, "a"]) + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) + c[1] = np.nan + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8)) + + # Adding nan to categories should make assigned nan point to the + # category! + c = Categorical(["a", "b", np.nan, "a"]) + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) + + def test_set_dtype_nans(self): + c = Categorical(["a", "b", np.nan]) + result = c._set_dtype(CategoricalDtype(["a", "c"])) + tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8")) + + def test_set_item_nan(self): + cat = Categorical([1, 2, 3]) + cat[1] = np.nan + + exp = Categorical([1, np.nan, 3], categories=[1, 2, 3]) + tm.assert_categorical_equal(cat, exp) + + @pytest.mark.parametrize( + "fillna_kwargs, msg", + [ + ( + {"value": 1, "method": "ffill"}, + "Cannot specify both 'value' and 'method'.", + ), + ({}, "Must specify a fill 'value' or 'method'."), + ({"method": "bad"}, "Invalid fill method. Expecting .* bad"), + ( + {"value": Series([1, 2, 3, 4, "a"])}, + "Cannot setitem on a Categorical with a new category", + ), + ], + ) + def test_fillna_raises(self, fillna_kwargs, msg): + # https://github.com/pandas-dev/pandas/issues/19682 + # https://github.com/pandas-dev/pandas/issues/13628 + cat = Categorical([1, 2, 3, None, None]) + + if len(fillna_kwargs) == 1 and "value" in fillna_kwargs: + err = TypeError + else: + err = ValueError + + with pytest.raises(err, match=msg): + cat.fillna(**fillna_kwargs) + + @pytest.mark.parametrize("named", [True, False]) + def test_fillna_iterable_category(self, named): + # https://github.com/pandas-dev/pandas/issues/21097 + if named: + Point = collections.namedtuple("Point", "x y") + else: + Point = lambda *args: args # tuple + cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object)) + result = cat.fillna(Point(0, 0)) + expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)]) + + tm.assert_categorical_equal(result, expected) + + # Case where the Point is not among our categories; we want ValueError, + # not NotImplementedError GH#41914 + cat = Categorical(np.array([Point(1, 0), Point(0, 1), None], dtype=object)) + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + cat.fillna(Point(0, 0)) + + def test_fillna_array(self): + # accept Categorical or ndarray value if it holds appropriate values + cat = Categorical(["A", "B", "C", None, None]) + + other = cat.fillna("C") + result = cat.fillna(other) + tm.assert_categorical_equal(result, other) + assert isna(cat[-1]) # didn't modify original inplace + + other = np.array(["A", "B", "C", "B", "A"]) + result = cat.fillna(other) + expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype) + tm.assert_categorical_equal(result, expected) + assert isna(cat[-1]) # didn't modify original inplace + + @pytest.mark.parametrize( + "values, expected", + [ + ([1, 2, 3], np.array([False, False, False])), + ([1, 2, np.nan], np.array([False, False, True])), + ([1, 2, np.inf], np.array([False, False, True])), + ([1, 2, pd.NA], np.array([False, False, True])), + ], + ) + def test_use_inf_as_na(self, values, expected): + # https://github.com/pandas-dev/pandas/issues/33594 + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + cat = Categorical(values) + result = cat.isna() + tm.assert_numpy_array_equal(result, expected) + + result = Series(cat).isna() + expected = Series(expected) + tm.assert_series_equal(result, expected) + + result = DataFrame(cat).isna() + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "values, expected", + [ + ([1, 2, 3], np.array([False, False, False])), + ([1, 2, np.nan], np.array([False, False, True])), + ([1, 2, np.inf], np.array([False, False, True])), + ([1, 2, pd.NA], np.array([False, False, True])), + ], + ) + def test_use_inf_as_na_outside_context(self, values, expected): + # https://github.com/pandas-dev/pandas/issues/33594 + # Using isna directly for Categorical will fail in general here + cat = Categorical(values) + + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = isna(cat) + tm.assert_numpy_array_equal(result, expected) + + result = isna(Series(cat)) + expected = Series(expected) + tm.assert_series_equal(result, expected) + + result = isna(DataFrame(cat)) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "a1, a2, categories", + [ + (["a", "b", "c"], [np.nan, "a", "b"], ["a", "b", "c"]), + ([1, 2, 3], [np.nan, 1, 2], [1, 2, 3]), + ], + ) + def test_compare_categorical_with_missing(self, a1, a2, categories): + # GH 28384 + cat_type = CategoricalDtype(categories) + + # != + result = Series(a1, dtype=cat_type) != Series(a2, dtype=cat_type) + expected = Series(a1) != Series(a2) + tm.assert_series_equal(result, expected) + + # == + result = Series(a1, dtype=cat_type) == Series(a2, dtype=cat_type) + expected = Series(a1) == Series(a2) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "na_value, dtype", + [ + (pd.NaT, "datetime64[ns]"), + (None, "float64"), + (np.nan, "float64"), + (pd.NA, "float64"), + ], + ) + def test_categorical_only_missing_values_no_cast(self, na_value, dtype): + # GH#44900 + result = Categorical([na_value, na_value]) + tm.assert_index_equal(result.categories, Index([], dtype=dtype)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_operators.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_operators.py new file mode 100644 index 00000000..a1e50917 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_operators.py @@ -0,0 +1,413 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestCategoricalOpsWithFactor: + def test_categories_none_comparisons(self): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + tm.assert_categorical_equal(factor, factor) + + def test_comparisons(self, factor): + result = factor[factor == "a"] + expected = factor[np.asarray(factor) == "a"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor != "a"] + expected = factor[np.asarray(factor) != "a"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor < "c"] + expected = factor[np.asarray(factor) < "c"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor > "a"] + expected = factor[np.asarray(factor) > "a"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor >= "b"] + expected = factor[np.asarray(factor) >= "b"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor <= "b"] + expected = factor[np.asarray(factor) <= "b"] + tm.assert_categorical_equal(result, expected) + + n = len(factor) + + other = factor[np.random.default_rng(2).permutation(n)] + result = factor == other + expected = np.asarray(factor) == np.asarray(other) + tm.assert_numpy_array_equal(result, expected) + + result = factor == "d" + expected = np.zeros(len(factor), dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # comparisons with categoricals + cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True) + cat_rev_base = Categorical( + ["b", "b", "b"], categories=["c", "b", "a"], ordered=True + ) + cat = Categorical(["a", "b", "c"], ordered=True) + cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True) + + # comparisons need to take categories ordering into account + res_rev = cat_rev > cat_rev_base + exp_rev = np.array([True, False, False]) + tm.assert_numpy_array_equal(res_rev, exp_rev) + + res_rev = cat_rev < cat_rev_base + exp_rev = np.array([False, False, True]) + tm.assert_numpy_array_equal(res_rev, exp_rev) + + res = cat > cat_base + exp = np.array([False, False, True]) + tm.assert_numpy_array_equal(res, exp) + + # Only categories with same categories can be compared + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + cat > cat_rev + + cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"]) + + with pytest.raises(TypeError, match=msg): + cat_rev > cat_rev_base2 + + # Only categories with same ordering information can be compared + cat_unordered = cat.set_ordered(False) + assert not (cat > cat).any() + + with pytest.raises(TypeError, match=msg): + cat > cat_unordered + + # comparison (in both directions) with Series will raise + s = Series(["b", "b", "b"]) + msg = ( + "Cannot compare a Categorical for op __gt__ with type " + r"" + ) + with pytest.raises(TypeError, match=msg): + cat > s + with pytest.raises(TypeError, match=msg): + cat_rev > s + with pytest.raises(TypeError, match=msg): + s < cat + with pytest.raises(TypeError, match=msg): + s < cat_rev + + # comparison with numpy.array will raise in both direction, but only on + # newer numpy versions + a = np.array(["b", "b", "b"]) + with pytest.raises(TypeError, match=msg): + cat > a + with pytest.raises(TypeError, match=msg): + cat_rev > a + + # Make sure that unequal comparison take the categories order in + # account + cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True) + exp = np.array([True, False, False]) + res = cat_rev > "b" + tm.assert_numpy_array_equal(res, exp) + + # check that zero-dim array gets unboxed + res = cat_rev > np.array("b") + tm.assert_numpy_array_equal(res, exp) + + +class TestCategoricalOps: + @pytest.mark.parametrize( + "categories", + [["a", "b"], [0, 1], [Timestamp("2019"), Timestamp("2020")]], + ) + def test_not_equal_with_na(self, categories): + # https://github.com/pandas-dev/pandas/issues/32276 + c1 = Categorical.from_codes([-1, 0], categories=categories) + c2 = Categorical.from_codes([0, 1], categories=categories) + + result = c1 != c2 + + assert result.all() + + def test_compare_frame(self): + # GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame + data = ["a", "b", 2, "a"] + cat = Categorical(data) + + df = DataFrame(cat) + + result = cat == df.T + expected = DataFrame([[True, True, True, True]]) + tm.assert_frame_equal(result, expected) + + result = cat[::-1] != df.T + expected = DataFrame([[False, True, True, False]]) + tm.assert_frame_equal(result, expected) + + def test_compare_frame_raises(self, comparison_op): + # alignment raises unless we transpose + op = comparison_op + cat = Categorical(["a", "b", 2, "a"]) + df = DataFrame(cat) + msg = "Unable to coerce to Series, length must be 1: given 4" + with pytest.raises(ValueError, match=msg): + op(cat, df) + + def test_datetime_categorical_comparison(self): + dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True) + tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True])) + tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True])) + + def test_reflected_comparison_with_scalars(self): + # GH8658 + cat = Categorical([1, 2, 3], ordered=True) + tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True])) + tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True])) + + def test_comparison_with_unknown_scalars(self): + # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057 + # and following comparisons with scalars not in categories should raise + # for unequal comps, but not for equal/not equal + cat = Categorical([1, 2, 3], ordered=True) + + msg = "Invalid comparison between dtype=category and int" + with pytest.raises(TypeError, match=msg): + cat < 4 + with pytest.raises(TypeError, match=msg): + cat > 4 + with pytest.raises(TypeError, match=msg): + 4 < cat + with pytest.raises(TypeError, match=msg): + 4 > cat + + tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False])) + tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True])) + + def test_comparison_with_tuple(self): + cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object)) + + result = cat == "foo" + expected = np.array([True, False, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + result = cat == (0, 1) + expected = np.array([False, True, False, True], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + result = cat != (0, 1) + tm.assert_numpy_array_equal(result, ~expected) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_comparison_of_ordered_categorical_with_nan_to_scalar( + self, compare_operators_no_eq_ne + ): + # https://github.com/pandas-dev/pandas/issues/26504 + # BUG: fix ordered categorical comparison with missing values (#26504 ) + # and following comparisons with scalars in categories with missing + # values should be evaluated as False + + cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True) + scalar = 2 + expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar) + actual = getattr(cat, compare_operators_no_eq_ne)(scalar) + tm.assert_numpy_array_equal(actual, expected) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_comparison_of_ordered_categorical_with_nan_to_listlike( + self, compare_operators_no_eq_ne + ): + # https://github.com/pandas-dev/pandas/issues/26504 + # and following comparisons of missing values in ordered Categorical + # with listlike should be evaluated as False + + cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True) + other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True) + expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2) + actual = getattr(cat, compare_operators_no_eq_ne)(other) + tm.assert_numpy_array_equal(actual, expected) + + @pytest.mark.parametrize( + "data,reverse,base", + [(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])], + ) + def test_comparisons(self, data, reverse, base): + cat_rev = Series(Categorical(data, categories=reverse, ordered=True)) + cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True)) + cat = Series(Categorical(data, ordered=True)) + cat_base = Series( + Categorical(base, categories=cat.cat.categories, ordered=True) + ) + s = Series(base) + a = np.array(base) + + # comparisons need to take categories ordering into account + res_rev = cat_rev > cat_rev_base + exp_rev = Series([True, False, False]) + tm.assert_series_equal(res_rev, exp_rev) + + res_rev = cat_rev < cat_rev_base + exp_rev = Series([False, False, True]) + tm.assert_series_equal(res_rev, exp_rev) + + res = cat > cat_base + exp = Series([False, False, True]) + tm.assert_series_equal(res, exp) + + scalar = base[1] + res = cat > scalar + exp = Series([False, False, True]) + exp2 = cat.values > scalar + tm.assert_series_equal(res, exp) + tm.assert_numpy_array_equal(res.values, exp2) + res_rev = cat_rev > scalar + exp_rev = Series([True, False, False]) + exp_rev2 = cat_rev.values > scalar + tm.assert_series_equal(res_rev, exp_rev) + tm.assert_numpy_array_equal(res_rev.values, exp_rev2) + + # Only categories with same categories can be compared + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + cat > cat_rev + + # categorical cannot be compared to Series or numpy array, and also + # not the other way around + msg = ( + "Cannot compare a Categorical for op __gt__ with type " + r"" + ) + with pytest.raises(TypeError, match=msg): + cat > s + with pytest.raises(TypeError, match=msg): + cat_rev > s + with pytest.raises(TypeError, match=msg): + cat > a + with pytest.raises(TypeError, match=msg): + cat_rev > a + + with pytest.raises(TypeError, match=msg): + s < cat + with pytest.raises(TypeError, match=msg): + s < cat_rev + + with pytest.raises(TypeError, match=msg): + a < cat + with pytest.raises(TypeError, match=msg): + a < cat_rev + + @pytest.mark.parametrize( + "ctor", + [ + lambda *args, **kwargs: Categorical(*args, **kwargs), + lambda *args, **kwargs: Series(Categorical(*args, **kwargs)), + ], + ) + def test_unordered_different_order_equal(self, ctor): + # https://github.com/pandas-dev/pandas/issues/16014 + c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False) + c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False) + assert (c1 == c2).all() + + c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False) + c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False) + assert (c1 != c2).all() + + c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False) + c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False) + assert (c1 != c2).all() + + c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False) + c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False) + result = c1 == c2 + tm.assert_numpy_array_equal(np.array(result), np.array([True, False])) + + def test_unordered_different_categories_raises(self): + c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False) + c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False) + + with pytest.raises(TypeError, match=("Categoricals can only be compared")): + c1 == c2 + + def test_compare_different_lengths(self): + c1 = Categorical([], categories=["a", "b"]) + c2 = Categorical([], categories=["a"]) + + msg = "Categoricals can only be compared if 'categories' are the same." + with pytest.raises(TypeError, match=msg): + c1 == c2 + + def test_compare_unordered_different_order(self): + # https://github.com/pandas-dev/pandas/issues/16603#issuecomment- + # 349290078 + a = Categorical(["a"], categories=["a", "b"]) + b = Categorical(["b"], categories=["b", "a"]) + assert not a.equals(b) + + def test_numeric_like_ops(self): + df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)}) + labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=["value"], ascending=True) + df["value_group"] = pd.cut( + df.value, range(0, 10500, 500), right=False, labels=cat_labels + ) + + # numeric ops should not succeed + for op, str_rep in [ + ("__add__", r"\+"), + ("__sub__", "-"), + ("__mul__", r"\*"), + ("__truediv__", "/"), + ]: + msg = f"Series cannot perform the operation {str_rep}|unsupported operand" + with pytest.raises(TypeError, match=msg): + getattr(df, op)(df) + + # reduction ops should not succeed (unless specifically defined, e.g. + # min/max) + s = df["value_group"] + for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]: + msg = f"does not support reduction '{op}'" + with pytest.raises(TypeError, match=msg): + getattr(s, op)(numeric_only=False) + + def test_numeric_like_ops_series(self): + # numpy ops + s = Series(Categorical([1, 2, 3, 4])) + with pytest.raises(TypeError, match="does not support reduction 'sum'"): + np.sum(s) + + @pytest.mark.parametrize( + "op, str_rep", + [ + ("__add__", r"\+"), + ("__sub__", "-"), + ("__mul__", r"\*"), + ("__truediv__", "/"), + ], + ) + def test_numeric_like_ops_series_arith(self, op, str_rep): + # numeric ops on a Series + s = Series(Categorical([1, 2, 3, 4])) + msg = f"Series cannot perform the operation {str_rep}|unsupported operand" + with pytest.raises(TypeError, match=msg): + getattr(s, op)(2) + + def test_numeric_like_ops_series_invalid(self): + # invalid ufunc + s = Series(Categorical([1, 2, 3, 4])) + msg = "Object with dtype category cannot perform the numpy op log" + with pytest.raises(TypeError, match=msg): + np.log(s) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_replace.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_replace.py new file mode 100644 index 00000000..0611d04d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_replace.py @@ -0,0 +1,91 @@ +import pytest + +import pandas as pd +from pandas import Categorical +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_replace,value,expected,flip_categories", + [ + # one-to-one + (1, 2, [2, 2, 3], False), + (1, 4, [4, 2, 3], False), + (4, 1, [1, 2, 3], False), + (5, 6, [1, 2, 3], False), + # many-to-one + ([1], 2, [2, 2, 3], False), + ([1, 2], 3, [3, 3, 3], False), + ([1, 2], 4, [4, 4, 3], False), + ((1, 2, 4), 5, [5, 5, 3], False), + ((5, 6), 2, [1, 2, 3], False), + ([1], [2], [2, 2, 3], False), + ([1, 4], [5, 2], [5, 2, 3], False), + # GH49404: overlap between to_replace and value + ([1, 2, 3], [2, 3, 4], [2, 3, 4], False), + # GH50872, GH46884: replace with null + (1, None, [None, 2, 3], False), + (1, pd.NA, [None, 2, 3], False), + # check_categorical sorts categories, which crashes on mixed dtypes + (3, "4", [1, 2, "4"], False), + ([1, 2, "3"], "5", ["5", "5", 3], True), + ], +) +def test_replace_categorical_series(to_replace, value, expected, flip_categories): + # GH 31720 + + ser = pd.Series([1, 2, 3], dtype="category") + result = ser.replace(to_replace, value) + expected = pd.Series(expected, dtype="category") + ser.replace(to_replace, value, inplace=True) + + if flip_categories: + expected = expected.cat.set_categories(expected.cat.categories[::-1]) + + tm.assert_series_equal(expected, result, check_category_order=False) + tm.assert_series_equal(expected, ser, check_category_order=False) + + +@pytest.mark.parametrize( + "to_replace, value, result, expected_error_msg", + [ + ("b", "c", ["a", "c"], "Categorical.categories are different"), + ("c", "d", ["a", "b"], None), + # https://github.com/pandas-dev/pandas/issues/33288 + ("a", "a", ["a", "b"], None), + ("b", None, ["a", None], "Categorical.categories length are different"), + ], +) +def test_replace_categorical(to_replace, value, result, expected_error_msg): + # GH#26988 + cat = Categorical(["a", "b"]) + expected = Categorical(result) + result = pd.Series(cat, copy=False).replace(to_replace, value)._values + + tm.assert_categorical_equal(result, expected) + if to_replace == "b": # the "c" test is supposed to be unchanged + with pytest.raises(AssertionError, match=expected_error_msg): + # ensure non-inplace call does not affect original + tm.assert_categorical_equal(cat, expected) + + ser = pd.Series(cat, copy=False) + ser.replace(to_replace, value, inplace=True) + tm.assert_categorical_equal(cat, expected) + + +def test_replace_categorical_ea_dtype(): + # GH49404 + cat = Categorical(pd.array(["a", "b"], dtype="string")) + result = pd.Series(cat).replace(["a", "b"], ["c", pd.NA])._values + expected = Categorical(pd.array(["c", pd.NA], dtype="string")) + tm.assert_categorical_equal(result, expected) + + +def test_replace_maintain_ordering(): + # GH51016 + dtype = pd.CategoricalDtype([0, 1, 2], ordered=True) + ser = pd.Series([0, 1, 2], dtype=dtype) + result = ser.replace(0, 2) + expected_dtype = pd.CategoricalDtype([1, 2], ordered=True) + expected = pd.Series([2, 1, 2], dtype=expected_dtype) + tm.assert_series_equal(expected, result, check_category_order=True) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_repr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_repr.py new file mode 100644 index 00000000..cdf5d967 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_repr.py @@ -0,0 +1,535 @@ +import numpy as np + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Series, + date_range, + option_context, + period_range, + timedelta_range, +) + + +class TestCategoricalReprWithFactor: + def test_print(self, factor): + expected = [ + "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']", + "Categories (3, object): ['a' < 'b' < 'c']", + ] + expected = "\n".join(expected) + actual = repr(factor) + assert actual == expected + + +class TestCategoricalRepr: + def test_big_print(self): + codes = np.array([0, 1, 2, 0, 1, 2] * 100) + dtype = CategoricalDtype(categories=["a", "b", "c"]) + factor = Categorical.from_codes(codes, dtype=dtype) + expected = [ + "['a', 'b', 'c', 'a', 'b', ..., 'b', 'c', 'a', 'b', 'c']", + "Length: 600", + "Categories (3, object): ['a', 'b', 'c']", + ] + expected = "\n".join(expected) + + actual = repr(factor) + + assert actual == expected + + def test_empty_print(self): + factor = Categorical([], ["a", "b", "c"]) + expected = "[], Categories (3, object): ['a', 'b', 'c']" + actual = repr(factor) + assert actual == expected + + assert expected == actual + factor = Categorical([], ["a", "b", "c"], ordered=True) + expected = "[], Categories (3, object): ['a' < 'b' < 'c']" + actual = repr(factor) + assert expected == actual + + factor = Categorical([], []) + expected = "[], Categories (0, object): []" + assert expected == repr(factor) + + def test_print_none_width(self): + # GH10087 + a = Series(Categorical([1, 2, 3, 4])) + exp = ( + "0 1\n1 2\n2 3\n3 4\n" + "dtype: category\nCategories (4, int64): [1, 2, 3, 4]" + ) + + with option_context("display.width", None): + assert exp == repr(a) + + def test_unicode_print(self): + c = Categorical(["aaaaa", "bb", "cccc"] * 20) + expected = """\ +['aaaaa', 'bb', 'cccc', 'aaaaa', 'bb', ..., 'bb', 'cccc', 'aaaaa', 'bb', 'cccc'] +Length: 60 +Categories (3, object): ['aaaaa', 'bb', 'cccc']""" + + assert repr(c) == expected + + c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20) + expected = """\ +['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] +Length: 60 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 + + assert repr(c) == expected + + # unicode option should not affect to Categorical, as it doesn't care + # the repr width + with option_context("display.unicode.east_asian_width", True): + c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20) + expected = """['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] +Length: 60 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 + + assert repr(c) == expected + + def test_categorical_repr(self): + c = Categorical([1, 2, 3]) + exp = """[1, 2, 3] +Categories (3, int64): [1, 2, 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3]) + exp = """[1, 2, 3, 1, 2, 3] +Categories (3, int64): [1, 2, 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 4, 5] * 10) + exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] +Length: 50 +Categories (5, int64): [1, 2, 3, 4, 5]""" + + assert repr(c) == exp + + c = Categorical(np.arange(20, dtype=np.int64)) + exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] +Length: 20 +Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]""" + + assert repr(c) == exp + + def test_categorical_repr_ordered(self): + c = Categorical([1, 2, 3], ordered=True) + exp = """[1, 2, 3] +Categories (3, int64): [1 < 2 < 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True) + exp = """[1, 2, 3, 1, 2, 3] +Categories (3, int64): [1 < 2 < 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True) + exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] +Length: 50 +Categories (5, int64): [1 < 2 < 3 < 4 < 5]""" + + assert repr(c) == exp + + c = Categorical(np.arange(20, dtype=np.int64), ordered=True) + exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] +Length: 20 +Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]""" + + assert repr(c) == exp + + def test_categorical_repr_datetime(self): + idx = date_range("2011-01-01 09:00", freq="H", periods=5) + c = Categorical(idx) + + exp = ( + "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " + "2011-01-01 12:00:00, 2011-01-01 13:00:00]\n" + "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" + " 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]" + "" + ) + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = ( + "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " + "2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]\n" + "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" + " 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]" + ) + + assert repr(c) == exp + + idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern") + c = Categorical(idx) + exp = ( + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " + "2011-01-01 13:00:00-05:00]\n" + "Categories (5, datetime64[ns, US/Eastern]): " + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" + " " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" + " " + "2011-01-01 13:00:00-05:00]" + ) + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = ( + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " + "2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, " + "2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, " + "2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n" + "Categories (5, datetime64[ns, US/Eastern]): " + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" + " " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" + " " + "2011-01-01 13:00:00-05:00]" + ) + + assert repr(c) == exp + + def test_categorical_repr_datetime_ordered(self): + idx = date_range("2011-01-01 09:00", freq="H", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] +Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] +Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern") + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < + 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < + 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_int_with_nan(self): + c = Categorical([1, 2, np.nan]) + c_exp = """[1, 2, NaN]\nCategories (2, int64): [1, 2]""" + assert repr(c) == c_exp + + s = Series([1, 2, np.nan], dtype="object").astype("category") + s_exp = """0 1\n1 2\n2 NaN +dtype: category +Categories (2, int64): [1, 2]""" + assert repr(s) == s_exp + + def test_categorical_repr_period(self): + idx = period_range("2011-01-01 09:00", freq="H", periods=5) + c = Categorical(idx) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = period_range("2011-01", freq="M", periods=5) + c = Categorical(idx) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_period_ordered(self): + idx = period_range("2011-01-01 09:00", freq="H", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = period_range("2011-01", freq="M", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_timedelta(self): + idx = timedelta_range("1 days", periods=5) + c = Categorical(idx) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa: E501 + + assert repr(c) == exp + + idx = timedelta_range("1 hours", periods=20) + c = Categorical(idx) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 20 +Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, + 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 40 +Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, + 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_timedelta_ordered(self): + idx = timedelta_range("1 days", periods=5) + c = Categorical(idx, ordered=True) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa: E501 + + assert repr(c) == exp + + idx = timedelta_range("1 hours", periods=20) + c = Categorical(idx, ordered=True) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 20 +Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < + 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 40 +Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < + 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_index_repr(self): + idx = CategoricalIndex(Categorical([1, 2, 3])) + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == exp + + i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64))) + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_ordered(self): + i = CategoricalIndex(Categorical([1, 2, 3], ordered=True)) + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64), ordered=True)) + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_datetime(self): + idx = date_range("2011-01-01 09:00", freq="H", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', + '2011-01-01 11:00:00', '2011-01-01 12:00:00', + '2011-01-01 13:00:00'], + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern") + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_datetime_ordered(self): + idx = date_range("2011-01-01 09:00", freq="H", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', + '2011-01-01 11:00:00', '2011-01-01 12:00:00', + '2011-01-01 13:00:00'], + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern") + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + i = CategoricalIndex(Categorical(idx.append(idx), ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00', + '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', + '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_period(self): + # test all length + idx = period_range("2011-01-01 09:00", freq="H", periods=1) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="H", periods=2) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="H", periods=3) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="H", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + i = CategoricalIndex(Categorical(idx.append(idx))) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00', + '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', + '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = period_range("2011-01", freq="M", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_period_ordered(self): + idx = period_range("2011-01-01 09:00", freq="H", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = period_range("2011-01", freq="M", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_timedelta(self): + idx = timedelta_range("1 days", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = timedelta_range("1 hours", periods=10) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', + '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', + '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', + '9 days 01:00:00'], + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_timedelta_ordered(self): + idx = timedelta_range("1 days", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = timedelta_range("1 hours", periods=10) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', + '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', + '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', + '9 days 01:00:00'], + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_str_repr(self): + # GH 33676 + result = repr(Categorical([1, "2", 3, 4])) + expected = "[1, '2', 3, 4]\nCategories (4, object): [1, 3, 4, '2']" + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_sorting.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_sorting.py new file mode 100644 index 00000000..ae527065 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_sorting.py @@ -0,0 +1,128 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + Index, +) +import pandas._testing as tm + + +class TestCategoricalSort: + def test_argsort(self): + c = Categorical([5, 3, 1, 4, 2], ordered=True) + + expected = np.array([2, 4, 1, 3, 0]) + tm.assert_numpy_array_equal( + c.argsort(ascending=True), expected, check_dtype=False + ) + + expected = expected[::-1] + tm.assert_numpy_array_equal( + c.argsort(ascending=False), expected, check_dtype=False + ) + + def test_numpy_argsort(self): + c = Categorical([5, 3, 1, 4, 2], ordered=True) + + expected = np.array([2, 4, 1, 3, 0]) + tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) + + tm.assert_numpy_array_equal( + np.argsort(c, kind="mergesort"), expected, check_dtype=False + ) + + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(c, axis=0) + + msg = "the 'order' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(c, order="C") + + def test_sort_values(self): + # unordered cats are sortable + cat = Categorical(["a", "b", "b", "a"], ordered=False) + cat.sort_values() + + cat = Categorical(["a", "c", "b", "d"], ordered=True) + + # sort_values + res = cat.sort_values() + exp = np.array(["a", "b", "c", "d"], dtype=object) + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + cat = Categorical( + ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True + ) + res = cat.sort_values() + exp = np.array(["a", "b", "c", "d"], dtype=object) + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + res = cat.sort_values(ascending=False) + exp = np.array(["d", "c", "b", "a"], dtype=object) + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + # sort (inplace order) + cat1 = cat.copy() + orig_codes = cat1._codes + cat1.sort_values(inplace=True) + assert cat1._codes is orig_codes + exp = np.array(["a", "b", "c", "d"], dtype=object) + tm.assert_numpy_array_equal(cat1.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + # reverse + cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) + res = cat.sort_values(ascending=False) + exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_numpy_array_equal(res.__array__(), exp_val) + tm.assert_index_equal(res.categories, exp_categories) + + def test_sort_values_na_position(self): + # see gh-12882 + cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) + exp_categories = Index([2, 5]) + + exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) + res = cat.sort_values() # default arguments + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) + res = cat.sort_values(ascending=True, na_position="first") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) + res = cat.sort_values(ascending=False, na_position="first") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) + res = cat.sort_values(ascending=True, na_position="last") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) + res = cat.sort_values(ascending=False, na_position="last") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) + res = cat.sort_values(ascending=False, na_position="last") + exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_numpy_array_equal(res.__array__(), exp_val) + tm.assert_index_equal(res.categories, exp_categories) + + cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) + res = cat.sort_values(ascending=False, na_position="first") + exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_numpy_array_equal(res.__array__(), exp_val) + tm.assert_index_equal(res.categories, exp_categories) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_subclass.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_subclass.py new file mode 100644 index 00000000..48325395 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_subclass.py @@ -0,0 +1,22 @@ +from pandas import Categorical +import pandas._testing as tm + + +class TestCategoricalSubclassing: + def test_constructor(self): + sc = tm.SubclassedCategorical(["a", "b", "c"]) + assert isinstance(sc, tm.SubclassedCategorical) + tm.assert_categorical_equal(sc, Categorical(["a", "b", "c"])) + + def test_from_codes(self): + sc = tm.SubclassedCategorical.from_codes([1, 0, 2], ["a", "b", "c"]) + assert isinstance(sc, tm.SubclassedCategorical) + exp = Categorical.from_codes([1, 0, 2], ["a", "b", "c"]) + tm.assert_categorical_equal(sc, exp) + + def test_map(self): + sc = tm.SubclassedCategorical(["a", "b", "c"]) + res = sc.map(lambda x: x.upper(), na_action=None) + assert isinstance(res, tm.SubclassedCategorical) + exp = Categorical(["A", "B", "C"]) + tm.assert_categorical_equal(res, exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_take.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_take.py new file mode 100644 index 00000000..fb79fe49 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_take.py @@ -0,0 +1,83 @@ +import numpy as np +import pytest + +from pandas import Categorical +import pandas._testing as tm + + +class TestTake: + # https://github.com/pandas-dev/pandas/issues/20664 + + def test_take_default_allow_fill(self): + cat = Categorical(["a", "b"]) + with tm.assert_produces_warning(None): + result = cat.take([0, -1]) + + assert result.equals(cat) + + def test_take_positive_no_warning(self): + cat = Categorical(["a", "b"]) + with tm.assert_produces_warning(None): + cat.take([0, 0]) + + def test_take_bounds(self, allow_fill): + # https://github.com/pandas-dev/pandas/issues/20664 + cat = Categorical(["a", "b", "a"]) + if allow_fill: + msg = "indices are out-of-bounds" + else: + msg = "index 4 is out of bounds for( axis 0 with)? size 3" + with pytest.raises(IndexError, match=msg): + cat.take([4, 5], allow_fill=allow_fill) + + def test_take_empty(self, allow_fill): + # https://github.com/pandas-dev/pandas/issues/20664 + cat = Categorical([], categories=["a", "b"]) + if allow_fill: + msg = "indices are out-of-bounds" + else: + msg = "cannot do a non-empty take from an empty axes" + with pytest.raises(IndexError, match=msg): + cat.take([0], allow_fill=allow_fill) + + def test_positional_take(self, ordered): + cat = Categorical(["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered) + result = cat.take([0, 1, 2], allow_fill=False) + expected = Categorical( + ["a", "a", "b"], categories=cat.categories, ordered=ordered + ) + tm.assert_categorical_equal(result, expected) + + def test_positional_take_unobserved(self, ordered): + cat = Categorical(["a", "b"], categories=["a", "b", "c"], ordered=ordered) + result = cat.take([1, 0], allow_fill=False) + expected = Categorical(["b", "a"], categories=cat.categories, ordered=ordered) + tm.assert_categorical_equal(result, expected) + + def test_take_allow_fill(self): + # https://github.com/pandas-dev/pandas/issues/23296 + cat = Categorical(["a", "a", "b"]) + result = cat.take([0, -1, -1], allow_fill=True) + expected = Categorical(["a", np.nan, np.nan], categories=["a", "b"]) + tm.assert_categorical_equal(result, expected) + + def test_take_fill_with_negative_one(self): + # -1 was a category + cat = Categorical([-1, 0, 1]) + result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1) + expected = Categorical([-1, -1, 0], categories=[-1, 0, 1]) + tm.assert_categorical_equal(result, expected) + + def test_take_fill_value(self): + # https://github.com/pandas-dev/pandas/issues/23296 + cat = Categorical(["a", "b", "c"]) + result = cat.take([0, 1, -1], fill_value="a", allow_fill=True) + expected = Categorical(["a", "b", "a"], categories=["a", "b", "c"]) + tm.assert_categorical_equal(result, expected) + + def test_take_fill_value_new_raises(self): + # https://github.com/pandas-dev/pandas/issues/23296 + cat = Categorical(["a", "b", "c"]) + xpr = r"Cannot setitem on a Categorical with a new category \(d\)" + with pytest.raises(TypeError, match=xpr): + cat.take([0, 1, -1], fill_value="d", allow_fill=True) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_warnings.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_warnings.py new file mode 100644 index 00000000..68c59706 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/categorical/test_warnings.py @@ -0,0 +1,19 @@ +import pytest + +import pandas._testing as tm + + +class TestCategoricalWarnings: + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; c = pd.Categorical([])" + ip.run_cell(code) + + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None, raise_on_extra_warnings=False): + with provisionalcompleter("ignore"): + list(ip.Completer.completions("c.", 1)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_constructors.py new file mode 100644 index 00000000..30f47e37 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_constructors.py @@ -0,0 +1,256 @@ +import numpy as np +import pytest + +from pandas._libs import iNaT + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray +from pandas.core.arrays.datetimes import _sequence_to_dt64ns + + +class TestDatetimeArrayConstructor: + def test_from_sequence_invalid_type(self): + mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)]) + with pytest.raises(TypeError, match="Cannot create a DatetimeArray"): + DatetimeArray._from_sequence(mi) + + def test_only_1dim_accepted(self): + arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]") + + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 3-dim, we allow 2D to sneak in for ops purposes GH#29853 + DatetimeArray(arr.reshape(2, 2, 1)) + + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 0-dim + DatetimeArray(arr[[0]].squeeze()) + + def test_freq_validation(self): + # GH#24623 check that invalid instances cannot be created with the + # public constructor + arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 + + msg = ( + "Inferred frequency H from passed values does not " + "conform to passed frequency W-SUN" + ) + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr, freq="W") + + @pytest.mark.parametrize( + "meth", + [ + DatetimeArray._from_sequence, + _sequence_to_dt64ns, + pd.to_datetime, + pd.DatetimeIndex, + ], + ) + def test_mixing_naive_tzaware_raises(self, meth): + # GH#24569 + arr = np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")]) + + msg = ( + "Cannot mix tz-aware with tz-naive values|" + "Tz-aware datetime.datetime cannot be converted " + "to datetime64 unless utc=True" + ) + + for obj in [arr, arr[::-1]]: + # check that we raise regardless of whether naive is found + # before aware or vice-versa + with pytest.raises(ValueError, match=msg): + meth(obj) + + def test_from_pandas_array(self): + arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 + + result = DatetimeArray._from_sequence(arr)._with_freq("infer") + + expected = pd.date_range("1970-01-01", periods=5, freq="H")._data + tm.assert_datetime_array_equal(result, expected) + + def test_mismatched_timezone_raises(self): + arr = DatetimeArray( + np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"), + dtype=DatetimeTZDtype(tz="US/Central"), + ) + dtype = DatetimeTZDtype(tz="US/Eastern") + msg = r"dtype=datetime64\[ns.*\] does not match data dtype datetime64\[ns.*\]" + with pytest.raises(TypeError, match=msg): + DatetimeArray(arr, dtype=dtype) + + # also with mismatched tzawareness + with pytest.raises(TypeError, match=msg): + DatetimeArray(arr, dtype=np.dtype("M8[ns]")) + with pytest.raises(TypeError, match=msg): + DatetimeArray(arr.tz_localize(None), dtype=arr.dtype) + + def test_non_array_raises(self): + with pytest.raises(ValueError, match="list"): + DatetimeArray([1, 2, 3]) + + def test_bool_dtype_raises(self): + arr = np.array([1, 2, 3], dtype="bool") + + msg = "Unexpected value for 'dtype': 'bool'. Must be" + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr) + + msg = r"dtype bool cannot be converted to datetime64\[ns\]" + with pytest.raises(TypeError, match=msg): + DatetimeArray._from_sequence(arr) + + with pytest.raises(TypeError, match=msg): + _sequence_to_dt64ns(arr) + + with pytest.raises(TypeError, match=msg): + pd.DatetimeIndex(arr) + + with pytest.raises(TypeError, match=msg): + pd.to_datetime(arr) + + def test_incorrect_dtype_raises(self): + with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): + DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category") + + def test_freq_infer_raises(self): + with pytest.raises(ValueError, match="Frequency inference"): + DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer") + + def test_copy(self): + data = np.array([1, 2, 3], dtype="M8[ns]") + arr = DatetimeArray(data, copy=False) + assert arr._ndarray is data + + arr = DatetimeArray(data, copy=True) + assert arr._ndarray is not data + + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_numpy_datetime_unit(self, unit): + data = np.array([1, 2, 3], dtype=f"M8[{unit}]") + arr = DatetimeArray(data) + assert arr.unit == unit + assert arr[0].unit == unit + + +class TestSequenceToDT64NS: + def test_tz_dtype_mismatch_raises(self): + arr = DatetimeArray._from_sequence( + ["2000"], dtype=DatetimeTZDtype(tz="US/Central") + ) + with pytest.raises(TypeError, match="data is already tz-aware"): + DatetimeArray._from_sequence_not_strict( + arr, dtype=DatetimeTZDtype(tz="UTC") + ) + + def test_tz_dtype_matches(self): + dtype = DatetimeTZDtype(tz="US/Central") + arr = DatetimeArray._from_sequence(["2000"], dtype=dtype) + result = DatetimeArray._from_sequence_not_strict(arr, dtype=dtype) + tm.assert_equal(arr, result) + + @pytest.mark.parametrize("order", ["F", "C"]) + def test_2d(self, order): + dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific") + arr = np.array(dti, dtype=object).reshape(3, 2) + if order == "F": + arr = arr.T + + res = _sequence_to_dt64ns(arr) + expected = _sequence_to_dt64ns(arr.ravel()) + + tm.assert_numpy_array_equal(res[0].ravel(), expected[0]) + assert res[1] == expected[1] + assert res[2] == expected[2] + + res = DatetimeArray._from_sequence(arr) + expected = DatetimeArray._from_sequence(arr.ravel()).reshape(arr.shape) + tm.assert_datetime_array_equal(res, expected) + + +# ---------------------------------------------------------------------------- +# Arrow interaction + + +EXTREME_VALUES = [0, 123456789, None, iNaT, 2**63 - 1, -(2**63) + 1] +FINE_TO_COARSE_SAFE = [123_000_000_000, None, -123_000_000_000] +COARSE_TO_FINE_SAFE = [123, None, -123] + + +@pytest.mark.parametrize( + ("pa_unit", "pd_unit", "pa_tz", "pd_tz", "data"), + [ + ("s", "s", "UTC", "UTC", EXTREME_VALUES), + ("ms", "ms", "UTC", "Europe/Berlin", EXTREME_VALUES), + ("us", "us", "US/Eastern", "UTC", EXTREME_VALUES), + ("ns", "ns", "US/Central", "Asia/Kolkata", EXTREME_VALUES), + ("ns", "s", "UTC", "UTC", FINE_TO_COARSE_SAFE), + ("us", "ms", "UTC", "Europe/Berlin", FINE_TO_COARSE_SAFE), + ("ms", "us", "US/Eastern", "UTC", COARSE_TO_FINE_SAFE), + ("s", "ns", "US/Central", "Asia/Kolkata", COARSE_TO_FINE_SAFE), + ], +) +def test_from_arrowtest_from_arrow_with_different_units_and_timezones_with_( + pa_unit, pd_unit, pa_tz, pd_tz, data +): + pa = pytest.importorskip("pyarrow") + + pa_type = pa.timestamp(pa_unit, tz=pa_tz) + arr = pa.array(data, type=pa_type) + dtype = DatetimeTZDtype(unit=pd_unit, tz=pd_tz) + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray( + np.array(data, dtype=f"datetime64[{pa_unit}]").astype(f"datetime64[{pd_unit}]"), + dtype=dtype, + ) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + ("unit", "tz"), + [ + ("s", "UTC"), + ("ms", "Europe/Berlin"), + ("us", "US/Eastern"), + ("ns", "Asia/Kolkata"), + ("ns", "UTC"), + ], +) +def test_from_arrow_from_empty(unit, tz): + pa = pytest.importorskip("pyarrow") + + data = [] + arr = pa.array(data) + dtype = DatetimeTZDtype(unit=unit, tz=tz) + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray(np.array(data, dtype=f"datetime64[{unit}]")) + expected = expected.tz_localize(tz=tz) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) + + +def test_from_arrow_from_integers(): + pa = pytest.importorskip("pyarrow") + + data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789] + arr = pa.array(data) + dtype = DatetimeTZDtype(unit="ns", tz="UTC") + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray(np.array(data, dtype="datetime64[ns]")) + expected = expected.tz_localize("UTC") + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_cumulative.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_cumulative.py new file mode 100644 index 00000000..ca9760d5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_cumulative.py @@ -0,0 +1,46 @@ +import pytest + +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray + + +class TestAccumulator: + def test_accumulators_freq(self): + # GH#50297 + arr = DatetimeArray._from_sequence_not_strict( + [ + "2000-01-01", + "2000-01-02", + "2000-01-03", + ], + freq="D", + ) + result = arr._accumulate("cummin") + expected = DatetimeArray._from_sequence_not_strict( + ["2000-01-01"] * 3, freq=None + ) + tm.assert_datetime_array_equal(result, expected) + + result = arr._accumulate("cummax") + expected = DatetimeArray._from_sequence_not_strict( + [ + "2000-01-01", + "2000-01-02", + "2000-01-03", + ], + freq=None, + ) + tm.assert_datetime_array_equal(result, expected) + + @pytest.mark.parametrize("func", ["cumsum", "cumprod"]) + def test_accumulators_disallowed(self, func): + # GH#50297 + arr = DatetimeArray._from_sequence_not_strict( + [ + "2000-01-01", + "2000-01-02", + ], + freq="D", + ) + with pytest.raises(TypeError, match=f"Accumulation {func}"): + arr._accumulate(func) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_reductions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_reductions.py new file mode 100644 index 00000000..59a4443a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/datetimes/test_reductions.py @@ -0,0 +1,183 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas import NaT +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray + + +class TestReductions: + @pytest.fixture(params=["s", "ms", "us", "ns"]) + def unit(self, request): + return request.param + + @pytest.fixture + def arr1d(self, tz_naive_fixture): + """Fixture returning DatetimeArray with parametrized timezones""" + tz = tz_naive_fixture + dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") + arr = DatetimeArray._from_sequence( + [ + "2000-01-03", + "2000-01-03", + "NaT", + "2000-01-02", + "2000-01-05", + "2000-01-04", + ], + dtype=dtype, + ) + return arr + + def test_min_max(self, arr1d, unit): + arr = arr1d + arr = arr.as_unit(unit) + tz = arr.tz + + result = arr.min() + expected = pd.Timestamp("2000-01-02", tz=tz).as_unit(unit) + assert result == expected + assert result.unit == expected.unit + + result = arr.max() + expected = pd.Timestamp("2000-01-05", tz=tz).as_unit(unit) + assert result == expected + assert result.unit == expected.unit + + result = arr.min(skipna=False) + assert result is NaT + + result = arr.max(skipna=False) + assert result is NaT + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_empty(self, skipna, tz): + dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") + arr = DatetimeArray._from_sequence([], dtype=dtype) + result = arr.min(skipna=skipna) + assert result is NaT + + result = arr.max(skipna=skipna) + assert result is NaT + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_median_empty(self, skipna, tz): + dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") + arr = DatetimeArray._from_sequence([], dtype=dtype) + result = arr.median(skipna=skipna) + assert result is NaT + + arr = arr.reshape(0, 3) + result = arr.median(axis=0, skipna=skipna) + expected = type(arr)._from_sequence([NaT, NaT, NaT], dtype=arr.dtype) + tm.assert_equal(result, expected) + + result = arr.median(axis=1, skipna=skipna) + expected = type(arr)._from_sequence([], dtype=arr.dtype) + tm.assert_equal(result, expected) + + def test_median(self, arr1d): + arr = arr1d + + result = arr.median() + assert result == arr[0] + result = arr.median(skipna=False) + assert result is NaT + + result = arr.dropna().median(skipna=False) + assert result == arr[0] + + result = arr.median(axis=0) + assert result == arr[0] + + def test_median_axis(self, arr1d): + arr = arr1d + assert arr.median(axis=0) == arr.median() + assert arr.median(axis=0, skipna=False) is NaT + + msg = r"abs\(axis\) must be less than ndim" + with pytest.raises(ValueError, match=msg): + arr.median(axis=1) + + @pytest.mark.filterwarnings("ignore:All-NaN slice encountered:RuntimeWarning") + def test_median_2d(self, arr1d): + arr = arr1d.reshape(1, -1) + + # axis = None + assert arr.median() == arr1d.median() + assert arr.median(skipna=False) is NaT + + # axis = 0 + result = arr.median(axis=0) + expected = arr1d + tm.assert_equal(result, expected) + + # Since column 3 is all-NaT, we get NaT there with or without skipna + result = arr.median(axis=0, skipna=False) + expected = arr1d + tm.assert_equal(result, expected) + + # axis = 1 + result = arr.median(axis=1) + expected = type(arr)._from_sequence([arr1d.median()]) + tm.assert_equal(result, expected) + + result = arr.median(axis=1, skipna=False) + expected = type(arr)._from_sequence([NaT], dtype=arr.dtype) + tm.assert_equal(result, expected) + + def test_mean(self, arr1d): + arr = arr1d + + # manually verified result + expected = arr[0] + 0.4 * pd.Timedelta(days=1) + + result = arr.mean() + assert result == expected + result = arr.mean(skipna=False) + assert result is NaT + + result = arr.dropna().mean(skipna=False) + assert result == expected + + result = arr.mean(axis=0) + assert result == expected + + def test_mean_2d(self): + dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific") + dta = dti._data.reshape(3, 2) + + result = dta.mean(axis=0) + expected = dta[1] + tm.assert_datetime_array_equal(result, expected) + + result = dta.mean(axis=1) + expected = dta[:, 0] + pd.Timedelta(hours=12) + tm.assert_datetime_array_equal(result, expected) + + result = dta.mean(axis=None) + expected = dti.mean() + assert result == expected + + @pytest.mark.parametrize("skipna", [True, False]) + def test_mean_empty(self, arr1d, skipna): + arr = arr1d[:0] + + assert arr.mean(skipna=skipna) is NaT + + arr2d = arr.reshape(0, 3) + result = arr2d.mean(axis=0, skipna=skipna) + expected = DatetimeArray._from_sequence([NaT, NaT, NaT], dtype=arr.dtype) + tm.assert_datetime_array_equal(result, expected) + + result = arr2d.mean(axis=1, skipna=skipna) + expected = arr # i.e. 1D, empty + tm.assert_datetime_array_equal(result, expected) + + result = arr2d.mean(axis=None, skipna=skipna) + assert result is NaT diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/conftest.py new file mode 100644 index 00000000..5e971c66 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/conftest.py @@ -0,0 +1,48 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) + + +@pytest.fixture(params=[Float32Dtype, Float64Dtype]) +def dtype(request): + """Parametrized fixture returning a float 'dtype'""" + return request.param() + + +@pytest.fixture +def data(dtype): + """Fixture returning 'data' array according to parametrized float 'dtype'""" + return pd.array( + list(np.arange(0.1, 0.9, 0.1)) + + [pd.NA] + + list(np.arange(1, 9.8, 0.1)) + + [pd.NA] + + [9.9, 10.0], + dtype=dtype, + ) + + +@pytest.fixture +def data_missing(dtype): + """ + Fixture returning array with missing data according to parametrized float + 'dtype'. + """ + return pd.array([np.nan, 0.1], dtype=dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture returning 'data' or 'data_missing' float arrays. + + Used to test dtype conversion with and without missing values. + """ + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_arithmetic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_arithmetic.py new file mode 100644 index 00000000..056c22d8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_arithmetic.py @@ -0,0 +1,233 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [ + ("add", [1.1, 2.2, None, None, 5.5]), + ("mul", [0.1, 0.4, None, None, 2.5]), + ("sub", [0.9, 1.8, None, None, 4.5]), + ("truediv", [10.0, 10.0, None, None, 10.0]), + ("floordiv", [9.0, 9.0, None, None, 10.0]), + ("mod", [0.1, 0.2, None, None, 0.0]), + ], + ids=["add", "mul", "sub", "div", "floordiv", "mod"], +) +def test_array_op(dtype, opname, exp): + a = pd.array([1.0, 2.0, None, 4.0, 5.0], dtype=dtype) + b = pd.array([0.1, 0.2, 0.3, None, 0.5], dtype=dtype) + + op = getattr(operator, opname) + + result = op(a, b) + expected = pd.array(exp, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) +def test_divide_by_zero(dtype, zero, negative): + # TODO pending NA/NaN discussion + # https://github.com/pandas-dev/pandas/issues/32265/ + a = pd.array([0, 1, -1, None], dtype=dtype) + result = a / zero + expected = FloatingArray( + np.array([np.nan, np.inf, -np.inf, np.nan], dtype=dtype.numpy_dtype), + np.array([False, False, False, True]), + ) + if negative: + expected *= -1 + tm.assert_extension_array_equal(result, expected) + + +def test_pow_scalar(dtype): + a = pd.array([-1, 0, 1, None, 2], dtype=dtype) + result = a**0 + expected = pd.array([1, 1, 1, 1, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a**1 + expected = pd.array([-1, 0, 1, None, 2], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a**pd.NA + expected = pd.array([None, None, 1, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = a**np.nan + # TODO np.nan should be converted to pd.NA / missing before operation? + expected = FloatingArray( + np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype=dtype.numpy_dtype), + mask=a._mask, + ) + tm.assert_extension_array_equal(result, expected) + + # reversed + a = a[1:] # Can't raise integers to negative powers. + + result = 0**a + expected = pd.array([1, 0, None, 0], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = 1**a + expected = pd.array([1, 1, 1, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = pd.NA**a + expected = pd.array([1, None, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = np.nan**a + expected = FloatingArray( + np.array([1, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype), mask=a._mask + ) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_array(dtype): + a = pd.array([0, 0, 0, 1, 1, 1, None, None, None], dtype=dtype) + b = pd.array([0, 1, None, 0, 1, None, 0, 1, None], dtype=dtype) + result = a**b + expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_rpow_one_to_na(): + # https://github.com/pandas-dev/pandas/issues/22022 + # https://github.com/pandas-dev/pandas/issues/29997 + arr = pd.array([np.nan, np.nan], dtype="Float64") + result = np.array([1.0, 2.0]) ** arr + expected = pd.array([1.0, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("other", [0, 0.5]) +def test_arith_zero_dim_ndarray(other): + arr = pd.array([1, None, 2], dtype="Float64") + result = arr + np.array(other) + expected = arr + other + tm.assert_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators): + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + # invalid scalars + msg = "|".join( + [ + r"can only perform ops with numeric values", + r"FloatingArray cannot perform the operation mod", + "unsupported operand type", + "not all arguments converted during string formatting", + "can't multiply sequence by non-int of type 'float'", + "ufunc 'subtract' cannot use operands with types dtype", + r"can only concatenate str \(not \"float\"\) to str", + "ufunc '.*' not supported for the input types, and the inputs could not", + "ufunc '.*' did not contain a loop with signature matching types", + "Concatenation operation is not implemented for NumPy arrays", + ] + ) + with pytest.raises(TypeError, match=msg): + ops("foo") + with pytest.raises(TypeError, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + with pytest.raises(TypeError, match=msg): + ops(pd.Series("foo", index=s.index)) + + msg = "|".join( + [ + "can only perform ops with numeric values", + "cannot perform .* with this index type: DatetimeArray", + "Addition/subtraction of integers and integer-arrays " + "with DatetimeArray is no longer supported. *", + "unsupported operand type", + "not all arguments converted during string formatting", + "can't multiply sequence by non-int of type 'float'", + "ufunc 'subtract' cannot use operands with types dtype", + ( + "ufunc 'add' cannot use operands with types " + rf"dtype\('{tm.ENDIAN}M8\[ns\]'\)" + ), + r"ufunc 'add' cannot use operands with types dtype\('float\d{2}'\)", + "cannot subtract DatetimeArray from ndarray", + ] + ) + with pytest.raises(TypeError, match=msg): + ops(pd.Series(pd.date_range("20180101", periods=len(s)))) + + +# Various +# ----------------------------------------------------------------------------- + + +def test_cross_type_arithmetic(): + df = pd.DataFrame( + { + "A": pd.array([1, 2, np.nan], dtype="Float64"), + "B": pd.array([1, np.nan, 3], dtype="Float32"), + "C": np.array([1, 2, 3], dtype="float64"), + } + ) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype="Float64") + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, None], dtype="boolean") + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype="Float64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "source, neg_target, abs_target", + [ + ([1.1, 2.2, 3.3], [-1.1, -2.2, -3.3], [1.1, 2.2, 3.3]), + ([1.1, 2.2, None], [-1.1, -2.2, None], [1.1, 2.2, None]), + ([-1.1, 0.0, 1.1], [1.1, 0.0, -1.1], [1.1, 0.0, 1.1]), + ], +) +def test_unary_float_operators(float_ea_dtype, source, neg_target, abs_target): + # GH38794 + dtype = float_ea_dtype + arr = pd.array(source, dtype=dtype) + neg_result, pos_result, abs_result = -arr, +arr, abs(arr) + neg_target = pd.array(neg_target, dtype=dtype) + abs_target = pd.array(abs_target, dtype=dtype) + + tm.assert_extension_array_equal(neg_result, neg_target) + tm.assert_extension_array_equal(pos_result, arr) + assert not tm.shares_memory(pos_result, arr) + tm.assert_extension_array_equal(abs_result, abs_target) + + +def test_bitwise(dtype): + left = pd.array([1, None, 3, 4], dtype=dtype) + right = pd.array([None, 3, 5, 4], dtype=dtype) + + with pytest.raises(TypeError, match="unsupported operand type"): + left | right + with pytest.raises(TypeError, match="unsupported operand type"): + left & right + with pytest.raises(TypeError, match="unsupported operand type"): + left ^ right diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_astype.py new file mode 100644 index 00000000..ade3dbd2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_astype.py @@ -0,0 +1,128 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_astype(): + # with missing values + arr = pd.array([0.1, 0.2, None], dtype="Float64") + + with pytest.raises(ValueError, match="cannot convert NA to integer"): + arr.astype("int64") + + with pytest.raises(ValueError, match="cannot convert float NaN to bool"): + arr.astype("bool") + + result = arr.astype("float64") + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + # no missing values + arr = pd.array([0.0, 1.0, 0.5], dtype="Float64") + result = arr.astype("int64") + expected = np.array([0, 1, 0], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("bool") + expected = np.array([False, True, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_to_floating_array(): + # astype to FloatingArray + arr = pd.array([0.0, 1.0, None], dtype="Float64") + + result = arr.astype("Float64") + tm.assert_extension_array_equal(result, arr) + result = arr.astype(pd.Float64Dtype()) + tm.assert_extension_array_equal(result, arr) + result = arr.astype("Float32") + expected = pd.array([0.0, 1.0, None], dtype="Float32") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_to_boolean_array(): + # astype to BooleanArray + arr = pd.array([0.0, 1.0, None], dtype="Float64") + + result = arr.astype("boolean") + expected = pd.array([False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = arr.astype(pd.BooleanDtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_astype_to_integer_array(): + # astype to IntegerArray + arr = pd.array([0.0, 1.5, None], dtype="Float64") + + result = arr.astype("Int64") + expected = pd.array([0, 1, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_str(): + a = pd.array([0.1, 0.2, None], dtype="Float64") + expected = np.array(["0.1", "0.2", ""], dtype="U32") + + tm.assert_numpy_array_equal(a.astype(str), expected) + tm.assert_numpy_array_equal(a.astype("str"), expected) + + +def test_astype_copy(): + arr = pd.array([0.1, 0.2, None], dtype="Float64") + orig = pd.array([0.1, 0.2, None], dtype="Float64") + + # copy=True -> ensure both data and mask are actual copies + result = arr.astype("Float64", copy=True) + assert result is not arr + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + # copy=False + result = arr.astype("Float64", copy=False) + assert result is arr + assert np.shares_memory(result._data, arr._data) + assert np.shares_memory(result._mask, arr._mask) + result[0] = 10 + assert arr[0] == 10 + result[0] = pd.NA + assert arr[0] is pd.NA + + # astype to different dtype -> always needs a copy -> even with copy=False + # we need to ensure that also the mask is actually copied + arr = pd.array([0.1, 0.2, None], dtype="Float64") + orig = pd.array([0.1, 0.2, None], dtype="Float64") + + result = arr.astype("Float32", copy=False) + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + +def test_astype_object(dtype): + arr = pd.array([1.0, pd.NA], dtype=dtype) + + result = arr.astype(object) + expected = np.array([1.0, pd.NA], dtype=object) + tm.assert_numpy_array_equal(result, expected) + # check exact element types + assert isinstance(result[0], float) + assert result[1] is pd.NA + + +def test_Float64_conversion(): + # GH#40729 + testseries = pd.Series(["1", "2", "3", "4"], dtype="object") + result = testseries.astype(pd.Float64Dtype()) + + expected = pd.Series([1.0, 2.0, 3.0, 4.0], dtype=pd.Float64Dtype()) + + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_comparison.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_comparison.py new file mode 100644 index 00000000..a429649f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_comparison.py @@ -0,0 +1,65 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray +from pandas.tests.arrays.masked_shared import ( + ComparisonOps, + NumericOps, +) + + +class TestComparisonOps(NumericOps, ComparisonOps): + @pytest.mark.parametrize("other", [True, False, pd.NA, -1.0, 0.0, 1]) + def test_scalar(self, other, comparison_op, dtype): + ComparisonOps.test_scalar(self, other, comparison_op, dtype) + + def test_compare_with_integerarray(self, comparison_op): + op = comparison_op + a = pd.array([0, 1, None] * 3, dtype="Int64") + b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Float64") + other = b.astype("Int64") + expected = op(a, other) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + expected = op(other, a) + result = op(b, a) + tm.assert_extension_array_equal(result, expected) + + +def test_equals(): + # GH-30652 + # equals is generally tested in /tests/extension/base/methods, but this + # specifically tests that two arrays of the same class but different dtype + # do not evaluate equal + a1 = pd.array([1, 2, None], dtype="Float64") + a2 = pd.array([1, 2, None], dtype="Float32") + assert a1.equals(a2) is False + + +def test_equals_nan_vs_na(): + # GH#44382 + + mask = np.zeros(3, dtype=bool) + data = np.array([1.0, np.nan, 3.0], dtype=np.float64) + + left = FloatingArray(data, mask) + assert left.equals(left) + tm.assert_extension_array_equal(left, left) + + assert left.equals(left.copy()) + assert left.equals(FloatingArray(data.copy(), mask.copy())) + + mask2 = np.array([False, True, False], dtype=bool) + data2 = np.array([1.0, 2.0, 3.0], dtype=np.float64) + right = FloatingArray(data2, mask2) + assert right.equals(right) + tm.assert_extension_array_equal(right, right) + + assert not left.equals(right) + + # with mask[1] = True, the only difference is data[1], which should + # not matter for equals + mask[1] = True + assert left.equals(right) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_concat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_concat.py new file mode 100644 index 00000000..2174a834 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_concat.py @@ -0,0 +1,20 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Float64", "Float64"], "Float64"), + (["Float32", "Float64"], "Float64"), + (["Float32", "Float32"], "Float32"), + ], +) +def test_concat_series(to_concat_dtypes, result_dtype): + result = pd.concat([pd.Series([1, 2, pd.NA], dtype=t) for t in to_concat_dtypes]) + expected = pd.concat([pd.Series([1, 2, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_construction.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_construction.py new file mode 100644 index 00000000..4007ee6b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_construction.py @@ -0,0 +1,204 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) + + +def test_uses_pandas_na(): + a = pd.array([1, None], dtype=Float64Dtype()) + assert a[1] is pd.NA + + +def test_floating_array_constructor(): + values = np.array([1, 2, 3, 4], dtype="float64") + mask = np.array([False, False, False, True], dtype="bool") + + result = FloatingArray(values, mask) + expected = pd.array([1, 2, 3, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + tm.assert_numpy_array_equal(result._data, values) + tm.assert_numpy_array_equal(result._mask, mask) + + msg = r".* should be .* numpy array. Use the 'pd.array' function instead" + with pytest.raises(TypeError, match=msg): + FloatingArray(values.tolist(), mask) + + with pytest.raises(TypeError, match=msg): + FloatingArray(values, mask.tolist()) + + with pytest.raises(TypeError, match=msg): + FloatingArray(values.astype(int), mask) + + msg = r"__init__\(\) missing 1 required positional argument: 'mask'" + with pytest.raises(TypeError, match=msg): + FloatingArray(values) + + +def test_floating_array_disallows_float16(): + # GH#44715 + arr = np.array([1, 2], dtype=np.float16) + mask = np.array([False, False]) + + msg = "FloatingArray does not support np.float16 dtype" + with pytest.raises(TypeError, match=msg): + FloatingArray(arr, mask) + + +def test_floating_array_disallows_Float16_dtype(request): + # GH#44715 + with pytest.raises(TypeError, match="data type 'Float16' not understood"): + pd.array([1.0, 2.0], dtype="Float16") + + +def test_floating_array_constructor_copy(): + values = np.array([1, 2, 3, 4], dtype="float64") + mask = np.array([False, False, False, True], dtype="bool") + + result = FloatingArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = FloatingArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +def test_to_array(): + result = pd.array([0.1, 0.2, 0.3, 0.4]) + expected = pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + ([1, None], [1, pd.NA]), + ([None], [pd.NA]), + ([None, np.nan], [pd.NA, pd.NA]), + ([1, np.nan], [1, pd.NA]), + ([np.nan], [pd.NA]), + ], +) +def test_to_array_none_is_nan(a, b): + result = pd.array(a, dtype="Float64") + expected = pd.array(b, dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +def test_to_array_mixed_integer_float(): + result = pd.array([1, 2.0]) + expected = pd.array([1.0, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = pd.array([1, None, 2.0]) + expected = pd.array([1.0, None, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + "foo", + 1, + 1.0, + pd.date_range("20130101", periods=2), + np.array(["foo"]), + [[1, 2], [3, 4]], + [np.nan, {"a": 1}], + # GH#44514 all-NA case used to get quietly swapped out before checking ndim + np.array([pd.NA] * 6, dtype=object).reshape(3, 2), + ], +) +def test_to_array_error(values): + # error in converting existing arrays to FloatingArray + msg = "|".join( + [ + "cannot be converted to FloatingDtype", + "values must be a 1D list-like", + "Cannot pass scalar", + r"float\(\) argument must be a string or a (real )?number, not 'dict'", + "could not convert string to float: 'foo'", + r"could not convert string to float: np\.str_\('foo'\)", + ] + ) + with pytest.raises((TypeError, ValueError), match=msg): + pd.array(values, dtype="Float64") + + +@pytest.mark.parametrize("values", [["1", "2", None], ["1.5", "2", None]]) +def test_construct_from_float_strings(values): + # see also test_to_integer_array_str + expected = pd.array([float(values[0]), 2, None], dtype="Float64") + + res = pd.array(values, dtype="Float64") + tm.assert_extension_array_equal(res, expected) + + res = FloatingArray._from_sequence(values) + tm.assert_extension_array_equal(res, expected) + + +def test_to_array_inferred_dtype(): + # if values has dtype -> respect it + result = pd.array(np.array([1, 2], dtype="float32")) + assert result.dtype == Float32Dtype() + + # if values have no dtype -> always float64 + result = pd.array([1.0, 2.0]) + assert result.dtype == Float64Dtype() + + +def test_to_array_dtype_keyword(): + result = pd.array([1, 2], dtype="Float32") + assert result.dtype == Float32Dtype() + + # if values has dtype -> override it + result = pd.array(np.array([1, 2], dtype="float32"), dtype="Float64") + assert result.dtype == Float64Dtype() + + +def test_to_array_integer(): + result = pd.array([1, 2], dtype="Float64") + expected = pd.array([1.0, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # for integer dtypes, the itemsize is not preserved + # TODO can we specify "floating" in general? + result = pd.array(np.array([1, 2], dtype="int32"), dtype="Float64") + assert result.dtype == Float64Dtype() + + +@pytest.mark.parametrize( + "bool_values, values, target_dtype, expected_dtype", + [ + ([False, True], [0, 1], Float64Dtype(), Float64Dtype()), + ([False, True], [0, 1], "Float64", Float64Dtype()), + ([False, True, np.nan], [0, 1, np.nan], Float64Dtype(), Float64Dtype()), + ], +) +def test_to_array_bool(bool_values, values, target_dtype, expected_dtype): + result = pd.array(bool_values, dtype=target_dtype) + assert result.dtype == expected_dtype + expected = pd.array(values, dtype=target_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_series_from_float(data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_contains.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_contains.py new file mode 100644 index 00000000..95664269 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_contains.py @@ -0,0 +1,12 @@ +import numpy as np + +import pandas as pd + + +def test_contains_nan(): + # GH#52840 + arr = pd.array(range(5)) / 0 + + assert np.isnan(arr._data[0]) + assert not arr.isna()[0] + assert np.nan in arr diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_function.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_function.py new file mode 100644 index 00000000..40fd66fd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_function.py @@ -0,0 +1,194 @@ +import numpy as np +import pytest + +from pandas.compat import IS64 + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("ufunc", [np.abs, np.sign]) +# np.sign emits a warning with nans, +@pytest.mark.filterwarnings("ignore:invalid value encountered in sign:RuntimeWarning") +def test_ufuncs_single(ufunc): + a = pd.array([1, 2, -3, np.nan], dtype="Float64") + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt]) +def test_ufuncs_single_float(ufunc): + a = pd.array([1.0, 0.2, 3.0, np.nan], dtype="Float64") + with np.errstate(invalid="ignore"): + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + with np.errstate(invalid="ignore"): + result = ufunc(s) + expected = pd.Series(ufunc(s.astype(float)), dtype="Float64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.add, np.subtract]) +def test_ufuncs_binary_float(ufunc): + # two FloatingArrays + a = pd.array([1, 0.2, -3, np.nan], dtype="Float64") + result = ufunc(a, a) + expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # FloatingArray with numpy array + arr = np.array([1, 2, 3, 4]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a.astype(float), arr), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # FloatingArray with scalar + result = ufunc(a, 1) + expected = pd.array(ufunc(a.astype(float), 1), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(1, a) + expected = pd.array(ufunc(1, a.astype(float)), dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("values", [[0, 1], [0, None]]) +def test_ufunc_reduce_raises(values): + arr = pd.array(values, dtype="Float64") + + res = np.add.reduce(arr) + expected = arr.sum(skipna=False) + tm.assert_almost_equal(res, expected) + + +@pytest.mark.skipif(not IS64, reason="GH 36579: fail on 32-bit system") +@pytest.mark.parametrize( + "pandasmethname, kwargs", + [ + ("var", {"ddof": 0}), + ("var", {"ddof": 1}), + ("std", {"ddof": 0}), + ("std", {"ddof": 1}), + ("kurtosis", {}), + ("skew", {}), + ("sem", {}), + ], +) +def test_stat_method(pandasmethname, kwargs): + s = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, np.nan, np.nan], dtype="Float64") + pandasmeth = getattr(s, pandasmethname) + result = pandasmeth(**kwargs) + s2 = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype="float64") + pandasmeth = getattr(s2, pandasmethname) + expected = pandasmeth(**kwargs) + assert expected == result + + +def test_value_counts_na(): + arr = pd.array([0.1, 0.2, 0.1, pd.NA], dtype="Float64") + result = arr.value_counts(dropna=False) + idx = pd.Index([0.1, 0.2, pd.NA], dtype=arr.dtype) + assert idx.dtype == arr.dtype + expected = pd.Series([2, 1, 1], index=idx, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=idx[:-1], dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_empty(): + ser = pd.Series([], dtype="Float64") + result = ser.value_counts() + idx = pd.Index([], dtype="Float64") + assert idx.dtype == "Float64" + expected = pd.Series([], index=idx, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(): + ser = pd.Series([0.1, 0.2, 0.1, pd.NA], dtype="Float64") + result = ser.value_counts(normalize=True) + expected = pd.Series([2, 1], index=ser[:2], dtype="Float64", name="proportion") / 3 + assert expected.index.dtype == ser.dtype + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 4]) +def test_floating_array_sum(skipna, min_count, dtype): + arr = pd.array([1, 2, 3, None], dtype=dtype) + result = arr.sum(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 6.0 + else: + assert result is pd.NA + + +@pytest.mark.parametrize( + "values, expected", [([1, 2, 3], 6.0), ([1, 2, 3, None], 6.0), ([None], 0.0)] +) +def test_floating_array_numpy_sum(values, expected): + arr = pd.array(values, dtype="Float64") + result = np.sum(arr) + assert result == expected + + +@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"]) +def test_preserve_dtypes(op): + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([0.1, None, 3.0], dtype="Float64"), + } + ) + + # op + result = getattr(df.C, op)() + assert isinstance(result, np.float64) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([0.1, 3], dtype="Float64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_floating_array_min_max(skipna, method, dtype): + arr = pd.array([0.0, 1.0, None], dtype=dtype) + func = getattr(arr, method) + result = func(skipna=skipna) + if skipna: + assert result == (0 if method == "min" else 1) + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 9]) +def test_floating_array_prod(skipna, min_count, dtype): + arr = pd.array([1.0, 2.0, None], dtype=dtype) + result = arr.prod(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 2 + else: + assert result is pd.NA diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_repr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_repr.py new file mode 100644 index 00000000..ea2cdd4f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_repr.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + np.dtype(dtype.type).kind == "f" + assert dtype.name is not None + + +@pytest.mark.parametrize( + "dtype, expected", + [(Float32Dtype(), "Float32Dtype()"), (Float64Dtype(), "Float64Dtype()")], +) +def test_repr_dtype(dtype, expected): + assert repr(dtype) == expected + + +def test_repr_array(): + result = repr(pd.array([1.0, None, 3.0])) + expected = "\n[1.0, , 3.0]\nLength: 3, dtype: Float64" + assert result == expected + + +def test_repr_array_long(): + data = pd.array([1.0, 2.0, None] * 1000) + expected = """ +[ 1.0, 2.0, , 1.0, 2.0, , 1.0, 2.0, , 1.0, + ... + , 1.0, 2.0, , 1.0, 2.0, , 1.0, 2.0, ] +Length: 3000, dtype: Float64""" + result = repr(data) + assert result == expected + + +def test_frame_repr(data_missing): + df = pd.DataFrame({"A": data_missing}) + result = repr(df) + expected = " A\n0 \n1 0.1" + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_to_numpy.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_to_numpy.py new file mode 100644 index 00000000..2ed52439 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/floating/test_to_numpy.py @@ -0,0 +1,132 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy(box): + con = pd.Series if box else pd.array + + # default (with or without missing values) -> object dtype + arr = con([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy() + expected = np.array([0.1, 0.2, 0.3], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + arr = con([0.1, 0.2, None], dtype="Float64") + result = arr.to_numpy() + expected = np.array([0.1, 0.2, pd.NA], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_float(box): + con = pd.Series if box else pd.array + + # no missing values -> can convert to float, otherwise raises + arr = con([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy(dtype="float64") + expected = np.array([0.1, 0.2, 0.3], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + arr = con([0.1, 0.2, None], dtype="Float64") + with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"): + result = arr.to_numpy(dtype="float64") + + # need to explicitly specify na_value + result = arr.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([0.1, 0.2, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_int(box): + con = pd.Series if box else pd.array + + # no missing values -> can convert to int, otherwise raises + arr = con([1.0, 2.0, 3.0], dtype="Float64") + result = arr.to_numpy(dtype="int64") + expected = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + arr = con([1.0, 2.0, None], dtype="Float64") + with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"): + result = arr.to_numpy(dtype="int64") + + # automatic casting (floors the values) + arr = con([0.1, 0.9, 1.1], dtype="Float64") + result = arr.to_numpy(dtype="int64") + expected = np.array([0, 0, 1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_na_value(box): + con = pd.Series if box else pd.array + + arr = con([0.0, 1.0, None], dtype="Float64") + result = arr.to_numpy(dtype=object, na_value=None) + expected = np.array([0.0, 1.0, None], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype=bool, na_value=False) + expected = np.array([False, True, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="int64", na_value=-99) + expected = np.array([0, 1, -99], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_na_value_with_nan(): + # array with both NaN and NA -> only fill NA with `na_value` + arr = FloatingArray(np.array([0.0, np.nan, 0.0]), np.array([False, False, True])) + result = arr.to_numpy(dtype="float64", na_value=-1) + expected = np.array([0.0, np.nan, -1.0], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "float32", "int32", "int64", "bool"]) +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_dtype(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0], dtype="Float64") + + result = arr.to_numpy(dtype=dtype) + expected = np.array([0, 1], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "float32", "int32", "int64", "bool"]) +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_na_raises(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0, None], dtype="Float64") + with pytest.raises(ValueError, match=dtype): + arr.to_numpy(dtype=dtype) + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy_string(box, dtype): + con = pd.Series if box else pd.array + arr = con([0.0, 1.0, None], dtype="Float64") + + result = arr.to_numpy(dtype="str") + expected = np.array([0.0, 1.0, pd.NA], dtype=f"{tm.ENDIAN}U32") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_copy(): + # to_numpy can be zero-copy if no missing values + arr = pd.array([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy(dtype="float64") + result[0] = 10 + tm.assert_extension_array_equal(arr, pd.array([10, 0.2, 0.3], dtype="Float64")) + + arr = pd.array([0.1, 0.2, 0.3], dtype="Float64") + result = arr.to_numpy(dtype="float64", copy=True) + result[0] = 10 + tm.assert_extension_array_equal(arr, pd.array([0.1, 0.2, 0.3], dtype="Float64")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/conftest.py new file mode 100644 index 00000000..f73400df --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/conftest.py @@ -0,0 +1,68 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) + + +@pytest.fixture( + params=[ + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + ] +) +def dtype(request): + """Parametrized fixture returning integer 'dtype'""" + return request.param() + + +@pytest.fixture +def data(dtype): + """ + Fixture returning 'data' array with valid and missing values according to + parametrized integer 'dtype'. + + Used to test dtype conversion with and without missing values. + """ + return pd.array( + list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100], + dtype=dtype, + ) + + +@pytest.fixture +def data_missing(dtype): + """ + Fixture returning array with exactly one NaN and one valid integer, + according to parametrized integer 'dtype'. + + Used to test dtype conversion with and without missing values. + """ + return pd.array([np.nan, 1], dtype=dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture returning 'data' or 'data_missing' integer arrays. + + Used to test dtype conversion with and without missing values. + """ + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_arithmetic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_arithmetic.py new file mode 100644 index 00000000..ce6c245c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_arithmetic.py @@ -0,0 +1,369 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import ops +from pandas.core.arrays import FloatingArray + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])], + ids=["add", "mul"], +) +def test_add_mul(dtype, opname, exp): + a = pd.array([0, 1, None, 3, 4], dtype=dtype) + b = pd.array([1, 2, 3, None, 5], dtype=dtype) + + # array / array + expected = pd.array(exp, dtype=dtype) + + op = getattr(operator, opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + op = getattr(ops, "r" + opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + +def test_sub(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a - b + expected = pd.array([1, 1, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_div(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a / b + expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) +def test_divide_by_zero(zero, negative): + # https://github.com/pandas-dev/pandas/issues/27398, GH#22793 + a = pd.array([0, 1, -1, None], dtype="Int64") + result = a / zero + expected = FloatingArray( + np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"), + np.array([False, False, False, True]), + ) + if negative: + expected *= -1 + tm.assert_extension_array_equal(result, expected) + + +def test_floordiv(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a // b + # Series op sets 1//0 to np.inf, which IntegerArray does not do (yet) + expected = pd.array([0, 2, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_floordiv_by_int_zero_no_mask(any_int_ea_dtype): + # GH 48223: Aligns with non-masked floordiv + # but differs from numpy + # https://github.com/pandas-dev/pandas/issues/30188#issuecomment-564452740 + ser = pd.Series([0, 1], dtype=any_int_ea_dtype) + result = 1 // ser + expected = pd.Series([np.inf, 1.0], dtype="Float64") + tm.assert_series_equal(result, expected) + + ser_non_nullable = ser.astype(ser.dtype.numpy_dtype) + result = 1 // ser_non_nullable + expected = expected.astype(np.float64) + tm.assert_series_equal(result, expected) + + +def test_mod(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a % b + expected = pd.array([0, 0, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_scalar(): + a = pd.array([-1, 0, 1, None, 2], dtype="Int64") + result = a**0 + expected = pd.array([1, 1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**1 + expected = pd.array([-1, 0, 1, None, 2], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**pd.NA + expected = pd.array([None, None, 1, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**np.nan + expected = FloatingArray( + np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"), + np.array([False, False, False, True, False]), + ) + tm.assert_extension_array_equal(result, expected) + + # reversed + a = a[1:] # Can't raise integers to negative powers. + + result = 0**a + expected = pd.array([1, 0, None, 0], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = 1**a + expected = pd.array([1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = pd.NA**a + expected = pd.array([1, None, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = np.nan**a + expected = FloatingArray( + np.array([1, np.nan, np.nan, np.nan], dtype="float64"), + np.array([False, False, True, False]), + ) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_array(): + a = pd.array([0, 0, 0, 1, 1, 1, None, None, None]) + b = pd.array([0, 1, None, 0, 1, None, 0, 1, None]) + result = a**b + expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None]) + tm.assert_extension_array_equal(result, expected) + + +def test_rpow_one_to_na(): + # https://github.com/pandas-dev/pandas/issues/22022 + # https://github.com/pandas-dev/pandas/issues/29997 + arr = pd.array([np.nan, np.nan], dtype="Int64") + result = np.array([1.0, 2.0]) ** arr + expected = pd.array([1.0, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("other", [0, 0.5]) +def test_numpy_zero_dim_ndarray(other): + arr = pd.array([1, None, 2]) + result = arr + np.array(other) + expected = arr + other + tm.assert_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators): + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + # invalid scalars + msg = "|".join( + [ + r"can only perform ops with numeric values", + r"IntegerArray cannot perform the operation mod", + r"unsupported operand type", + r"can only concatenate str \(not \"int\"\) to str", + "not all arguments converted during string", + "ufunc '.*' not supported for the input types, and the inputs could not", + "ufunc '.*' did not contain a loop with signature matching types", + "Addition/subtraction of integers and integer-arrays with Timestamp", + ] + ) + with pytest.raises(TypeError, match=msg): + ops("foo") + with pytest.raises(TypeError, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + str_ser = pd.Series("foo", index=s.index) + # with pytest.raises(TypeError, match=msg): + if all_arithmetic_operators in [ + "__mul__", + "__rmul__", + ]: # (data[~data.isna()] >= 0).all(): + res = ops(str_ser) + expected = pd.Series(["foo" * x for x in data], index=s.index) + expected = expected.fillna(np.nan) + # TODO: doing this fillna to keep tests passing as we make + # assert_almost_equal stricter, but the expected with pd.NA seems + # more-correct than np.nan here. + tm.assert_series_equal(res, expected) + else: + with pytest.raises(TypeError, match=msg): + ops(str_ser) + + msg = "|".join( + [ + "can only perform ops with numeric values", + "cannot perform .* with this index type: DatetimeArray", + "Addition/subtraction of integers and integer-arrays " + "with DatetimeArray is no longer supported. *", + "unsupported operand type", + r"can only concatenate str \(not \"int\"\) to str", + "not all arguments converted during string", + "cannot subtract DatetimeArray from ndarray", + ] + ) + with pytest.raises(TypeError, match=msg): + ops(pd.Series(pd.date_range("20180101", periods=len(s)))) + + +# Various +# ----------------------------------------------------------------------------- + + +# TODO test unsigned overflow + + +def test_arith_coerce_scalar(data, all_arithmetic_operators): + op = tm.get_op_from_name(all_arithmetic_operators) + s = pd.Series(data) + other = 0.01 + + result = op(s, other) + expected = op(s.astype(float), other) + expected = expected.astype("Float64") + + # rmod results in NaN that wasn't NA in original nullable Series -> unmask it + if all_arithmetic_operators == "__rmod__": + mask = (s == 0).fillna(False).to_numpy(bool) + expected.array._mask[mask] = False + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("other", [1.0, np.array(1.0)]) +def test_arithmetic_conversion(all_arithmetic_operators, other): + # if we have a float operand we should have a float result + # if that is equal to an integer + op = tm.get_op_from_name(all_arithmetic_operators) + + s = pd.Series([1, 2, 3], dtype="Int64") + result = op(s, other) + assert result.dtype == "Float64" + + +def test_cross_type_arithmetic(): + df = pd.DataFrame( + { + "A": pd.Series([1, 2, np.nan], dtype="Int64"), + "B": pd.Series([1, np.nan, 3], dtype="UInt8"), + "C": [1, 2, 3], + } + ) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype="Int64") + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, None], dtype="boolean") + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype="Int64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("op", ["mean"]) +def test_reduce_to_float(op): + # some reduce ops always return float, even if the result + # is a rounded number + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([1, None, 3], dtype="Int64"), + } + ) + + # op + result = getattr(df.C, op)() + assert isinstance(result, float) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Float64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "source, neg_target, abs_target", + [ + ([1, 2, 3], [-1, -2, -3], [1, 2, 3]), + ([1, 2, None], [-1, -2, None], [1, 2, None]), + ([-1, 0, 1], [1, 0, -1], [1, 0, 1]), + ], +) +def test_unary_int_operators(any_signed_int_ea_dtype, source, neg_target, abs_target): + dtype = any_signed_int_ea_dtype + arr = pd.array(source, dtype=dtype) + neg_result, pos_result, abs_result = -arr, +arr, abs(arr) + neg_target = pd.array(neg_target, dtype=dtype) + abs_target = pd.array(abs_target, dtype=dtype) + + tm.assert_extension_array_equal(neg_result, neg_target) + tm.assert_extension_array_equal(pos_result, arr) + assert not tm.shares_memory(pos_result, arr) + tm.assert_extension_array_equal(abs_result, abs_target) + + +def test_values_multiplying_large_series_by_NA(): + # GH#33701 + + result = pd.NA * pd.Series(np.zeros(10001)) + expected = pd.Series([pd.NA] * 10001) + + tm.assert_series_equal(result, expected) + + +def test_bitwise(dtype): + left = pd.array([1, None, 3, 4], dtype=dtype) + right = pd.array([None, 3, 5, 4], dtype=dtype) + + result = left | right + expected = pd.array([None, None, 3 | 5, 4 | 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = left & right + expected = pd.array([None, None, 3 & 5, 4 & 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = left ^ right + expected = pd.array([None, None, 3 ^ 5, 4 ^ 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + # TODO: desired behavior when operating with boolean? defer? + + floats = right.astype("Float64") + with pytest.raises(TypeError, match="unsupported operand type"): + left | floats + with pytest.raises(TypeError, match="unsupported operand type"): + left & floats + with pytest.raises(TypeError, match="unsupported operand type"): + left ^ floats diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_comparison.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_comparison.py new file mode 100644 index 00000000..568b0b08 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_comparison.py @@ -0,0 +1,39 @@ +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.arrays.masked_shared import ( + ComparisonOps, + NumericOps, +) + + +class TestComparisonOps(NumericOps, ComparisonOps): + @pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1]) + def test_scalar(self, other, comparison_op, dtype): + ComparisonOps.test_scalar(self, other, comparison_op, dtype) + + def test_compare_to_int(self, dtype, comparison_op): + # GH 28930 + op_name = f"__{comparison_op.__name__}__" + s1 = pd.Series([1, None, 3], dtype=dtype) + s2 = pd.Series([1, None, 3], dtype="float") + + method = getattr(s1, op_name) + result = method(2) + + method = getattr(s2, op_name) + expected = method(2).astype("boolean") + expected[s2.isna()] = pd.NA + + tm.assert_series_equal(result, expected) + + +def test_equals(): + # GH-30652 + # equals is generally tested in /tests/extension/base/methods, but this + # specifically tests that two arrays of the same class but different dtype + # do not evaluate equal + a1 = pd.array([1, 2, None], dtype="Int64") + a2 = pd.array([1, 2, None], dtype="Int32") + assert a1.equals(a2) is False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_concat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_concat.py new file mode 100644 index 00000000..feba574d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_concat.py @@ -0,0 +1,69 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Int64", "Int64"], "Int64"), + (["UInt64", "UInt64"], "UInt64"), + (["Int8", "Int8"], "Int8"), + (["Int8", "Int16"], "Int16"), + (["UInt8", "Int8"], "Int16"), + (["Int32", "UInt32"], "Int64"), + (["Int64", "UInt64"], "Float64"), + (["Int64", "boolean"], "object"), + (["UInt8", "boolean"], "object"), + ], +) +def test_concat_series(to_concat_dtypes, result_dtype): + # we expect the same dtypes as we would get with non-masked inputs, + # just masked where available. + + result = pd.concat([pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes]) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat( + [pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes[::-1]] + ) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Int64", "int64"], "Int64"), + (["UInt64", "uint64"], "UInt64"), + (["Int8", "int8"], "Int8"), + (["Int8", "int16"], "Int16"), + (["UInt8", "int8"], "Int16"), + (["Int32", "uint32"], "Int64"), + (["Int64", "uint64"], "Float64"), + (["Int64", "bool"], "object"), + (["UInt8", "bool"], "object"), + ], +) +def test_concat_series_with_numpy(to_concat_dtypes, result_dtype): + # we expect the same dtypes as we would get with non-masked inputs, + # just masked where available. + + s1 = pd.Series([0, 1, pd.NA], dtype=to_concat_dtypes[0]) + s2 = pd.Series(np.array([0, 1], dtype=to_concat_dtypes[1])) + result = pd.concat([s1, s2], ignore_index=True) + expected = pd.Series([0, 1, pd.NA, 0, 1], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat([s2, s1], ignore_index=True) + expected = pd.Series([0, 1, 0, 1, pd.NA], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_construction.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_construction.py new file mode 100644 index 00000000..9ecfc51c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_construction.py @@ -0,0 +1,243 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_integer +from pandas.core.arrays import IntegerArray +from pandas.core.arrays.integer import ( + Int8Dtype, + Int32Dtype, + Int64Dtype, +) + + +@pytest.fixture(params=[pd.array, IntegerArray._from_sequence]) +def constructor(request): + """Fixture returning parametrized IntegerArray from given sequence. + + Used to test dtype conversions. + """ + return request.param + + +def test_uses_pandas_na(): + a = pd.array([1, None], dtype=Int64Dtype()) + assert a[1] is pd.NA + + +def test_from_dtype_from_float(data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from int / list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from int / array + expected = pd.Series(data).dropna().reset_index(drop=True) + dropped = np.array(data.dropna()).astype(np.dtype(dtype.type)) + result = pd.Series(dropped, dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + +def test_conversions(data_missing): + # astype to object series + df = pd.DataFrame({"A": data_missing}) + result = df["A"].astype("object") + expected = pd.Series(np.array([pd.NA, 1], dtype=object), name="A") + tm.assert_series_equal(result, expected) + + # convert to object ndarray + # we assert that we are exactly equal + # including type conversions of scalars + result = df["A"].astype("object").values + expected = np.array([pd.NA, 1], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + for r, e in zip(result, expected): + if pd.isnull(r): + assert pd.isnull(e) + elif is_integer(r): + assert r == e + assert is_integer(e) + else: + assert r == e + assert type(r) == type(e) + + +def test_integer_array_constructor(): + values = np.array([1, 2, 3, 4], dtype="int64") + mask = np.array([False, False, False, True], dtype="bool") + + result = IntegerArray(values, mask) + expected = pd.array([1, 2, 3, np.nan], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + msg = r".* should be .* numpy array. Use the 'pd.array' function instead" + with pytest.raises(TypeError, match=msg): + IntegerArray(values.tolist(), mask) + + with pytest.raises(TypeError, match=msg): + IntegerArray(values, mask.tolist()) + + with pytest.raises(TypeError, match=msg): + IntegerArray(values.astype(float), mask) + msg = r"__init__\(\) missing 1 required positional argument: 'mask'" + with pytest.raises(TypeError, match=msg): + IntegerArray(values) + + +def test_integer_array_constructor_copy(): + values = np.array([1, 2, 3, 4], dtype="int64") + mask = np.array([False, False, False, True], dtype="bool") + + result = IntegerArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = IntegerArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +@pytest.mark.parametrize( + "a, b", + [ + ([1, None], [1, np.nan]), + ([None], [np.nan]), + ([None, np.nan], [np.nan, np.nan]), + ([np.nan, np.nan], [np.nan, np.nan]), + ], +) +def test_to_integer_array_none_is_nan(a, b): + result = pd.array(a, dtype="Int64") + expected = pd.array(b, dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + "foo", + 1, + 1.0, + pd.date_range("20130101", periods=2), + np.array(["foo"]), + [[1, 2], [3, 4]], + [np.nan, {"a": 1}], + ], +) +def test_to_integer_array_error(values): + # error in converting existing arrays to IntegerArrays + msg = "|".join( + [ + r"cannot be converted to IntegerDtype", + r"invalid literal for int\(\) with base 10:", + r"values must be a 1D list-like", + r"Cannot pass scalar", + r"int\(\) argument must be a string", + ] + ) + with pytest.raises((ValueError, TypeError), match=msg): + pd.array(values, dtype="Int64") + + with pytest.raises((ValueError, TypeError), match=msg): + IntegerArray._from_sequence(values) + + +def test_to_integer_array_inferred_dtype(constructor): + # if values has dtype -> respect it + result = constructor(np.array([1, 2], dtype="int8")) + assert result.dtype == Int8Dtype() + result = constructor(np.array([1, 2], dtype="int32")) + assert result.dtype == Int32Dtype() + + # if values have no dtype -> always int64 + result = constructor([1, 2]) + assert result.dtype == Int64Dtype() + + +def test_to_integer_array_dtype_keyword(constructor): + result = constructor([1, 2], dtype="Int8") + assert result.dtype == Int8Dtype() + + # if values has dtype -> override it + result = constructor(np.array([1, 2], dtype="int8"), dtype="Int32") + assert result.dtype == Int32Dtype() + + +def test_to_integer_array_float(): + result = IntegerArray._from_sequence([1.0, 2.0]) + expected = pd.array([1, 2], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises(TypeError, match="cannot safely cast non-equivalent"): + IntegerArray._from_sequence([1.5, 2.0]) + + # for float dtypes, the itemsize is not preserved + result = IntegerArray._from_sequence(np.array([1.0, 2.0], dtype="float32")) + assert result.dtype == Int64Dtype() + + +def test_to_integer_array_str(): + result = IntegerArray._from_sequence(["1", "2", None]) + expected = pd.array([1, 2, np.nan], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises( + ValueError, match=r"invalid literal for int\(\) with base 10: .*" + ): + IntegerArray._from_sequence(["1", "2", ""]) + + with pytest.raises( + ValueError, match=r"invalid literal for int\(\) with base 10: .*" + ): + IntegerArray._from_sequence(["1.5", "2.0"]) + + +@pytest.mark.parametrize( + "bool_values, int_values, target_dtype, expected_dtype", + [ + ([False, True], [0, 1], Int64Dtype(), Int64Dtype()), + ([False, True], [0, 1], "Int64", Int64Dtype()), + ([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()), + ], +) +def test_to_integer_array_bool( + constructor, bool_values, int_values, target_dtype, expected_dtype +): + result = constructor(bool_values, dtype=target_dtype) + assert result.dtype == expected_dtype + expected = pd.array(int_values, dtype=target_dtype) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values, to_dtype, result_dtype", + [ + (np.array([1], dtype="int64"), None, Int64Dtype), + (np.array([1, np.nan]), None, Int64Dtype), + (np.array([1, np.nan]), "int8", Int8Dtype), + ], +) +def test_to_integer_array(values, to_dtype, result_dtype): + # convert existing arrays to IntegerArrays + result = IntegerArray._from_sequence(values, dtype=to_dtype) + assert result.dtype == result_dtype() + expected = pd.array(values, dtype=result_dtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_integer_array_from_boolean(): + # GH31104 + expected = pd.array(np.array([True, False]), dtype="Int64") + result = pd.array(np.array([True, False], dtype=object), dtype="Int64") + tm.assert_extension_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_dtypes.py new file mode 100644 index 00000000..f50b4cfd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_dtypes.py @@ -0,0 +1,295 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.generic import ABCIndex + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.integer import ( + Int8Dtype, + UInt32Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == "i" + else: + assert np.dtype(dtype.type).kind == "u" + assert dtype.name is not None + + +@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"]) +def test_preserve_dtypes(op): + # TODO(#22346): preserve Int64 dtype + # for ops that enable (mean would actually work here + # but generally it is a float return value) + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([1, None, 3], dtype="Int64"), + } + ) + + # op + result = getattr(df.C, op)() + if op in {"sum", "prod", "min", "max"}: + assert isinstance(result, np.int64) + else: + assert isinstance(result, int) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +def test_astype_nansafe(): + # see gh-22343 + arr = pd.array([np.nan, 1, 2], dtype="Int8") + msg = "cannot convert NA to integer" + + with pytest.raises(ValueError, match=msg): + arr.astype("uint32") + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_construct_index(all_data, dropna): + # ensure that we do not coerce to different Index dtype or non-index + + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Index(pd.array(other, dtype=all_data.dtype)) + expected = pd.Index(other, dtype=all_data.dtype) + assert all_data.dtype == expected.dtype # dont coerce to object + + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_astype_index(all_data, dropna): + # as an int/uint index to Index + + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + + dtype = all_data.dtype + idx = pd.Index(np.array(other)) + assert isinstance(idx, ABCIndex) + + result = idx.astype(dtype) + expected = idx.astype(object).astype(dtype) + tm.assert_index_equal(result, expected) + + +def test_astype(all_data): + all_data = all_data[:10] + + ints = all_data[~all_data.isna()] + mixed = all_data + dtype = Int8Dtype() + + # coerce to same type - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype) + expected = pd.Series(ints) + tm.assert_series_equal(result, expected) + + # coerce to same other - ints + s = pd.Series(ints) + result = s.astype(dtype) + expected = pd.Series(ints, dtype=dtype) + tm.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype.numpy_dtype) + expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype)) + tm.assert_series_equal(result, expected) + + # coerce to same type - mixed + s = pd.Series(mixed) + result = s.astype(all_data.dtype) + expected = pd.Series(mixed) + tm.assert_series_equal(result, expected) + + # coerce to same other - mixed + s = pd.Series(mixed) + result = s.astype(dtype) + expected = pd.Series(mixed, dtype=dtype) + tm.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - mixed + s = pd.Series(mixed) + msg = "cannot convert NA to integer" + with pytest.raises(ValueError, match=msg): + s.astype(all_data.dtype.numpy_dtype) + + # coerce to object + s = pd.Series(mixed) + result = s.astype("object") + expected = pd.Series(np.asarray(mixed)) + tm.assert_series_equal(result, expected) + + +def test_astype_copy(): + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + # copy=True -> ensure both data and mask are actual copies + result = arr.astype("Int64", copy=True) + assert result is not arr + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + # copy=False + result = arr.astype("Int64", copy=False) + assert result is arr + assert np.shares_memory(result._data, arr._data) + assert np.shares_memory(result._mask, arr._mask) + result[0] = 10 + assert arr[0] == 10 + result[0] = pd.NA + assert arr[0] is pd.NA + + # astype to different dtype -> always needs a copy -> even with copy=False + # we need to ensure that also the mask is actually copied + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + result = arr.astype("Int32", copy=False) + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + +def test_astype_to_larger_numpy(): + a = pd.array([1, 2], dtype="Int32") + result = a.astype("int64") + expected = np.array([1, 2], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + a = pd.array([1, 2], dtype="UInt32") + result = a.astype("uint64") + expected = np.array([1, 2], dtype="uint64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"]) +def test_astype_specific_casting(dtype): + s = pd.Series([1, 2, 3], dtype="Int64") + result = s.astype(dtype) + expected = pd.Series([1, 2, 3], dtype=dtype) + tm.assert_series_equal(result, expected) + + s = pd.Series([1, 2, 3, None], dtype="Int64") + result = s.astype(dtype) + expected = pd.Series([1, 2, 3, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_astype_floating(): + arr = pd.array([1, 2, None], dtype="Int64") + result = arr.astype("Float64") + expected = pd.array([1.0, 2.0, None], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_dt64(): + # GH#32435 + arr = pd.array([1, 2, 3, pd.NA]) * 10**9 + + result = arr.astype("datetime64[ns]") + + expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +def test_construct_cast_invalid(dtype): + msg = "cannot safely" + arr = [1.2, 2.3, 3.7] + with pytest.raises(TypeError, match=msg): + pd.array(arr, dtype=dtype) + + with pytest.raises(TypeError, match=msg): + pd.Series(arr).astype(dtype) + + arr = [1.2, 2.3, 3.7, np.nan] + with pytest.raises(TypeError, match=msg): + pd.array(arr, dtype=dtype) + + with pytest.raises(TypeError, match=msg): + pd.Series(arr).astype(dtype) + + +@pytest.mark.parametrize("in_series", [True, False]) +def test_to_numpy_na_nan(in_series): + a = pd.array([0, 1, None], dtype="Int64") + if in_series: + a = pd.Series(a) + + result = a.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([0.0, 1.0, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + result = a.to_numpy(dtype="int64", na_value=-1) + expected = np.array([0, 1, -1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = a.to_numpy(dtype="bool", na_value=False) + expected = np.array([False, True, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("in_series", [True, False]) +@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"]) +def test_to_numpy_dtype(dtype, in_series): + a = pd.array([0, 1], dtype="Int64") + if in_series: + a = pd.Series(a) + + result = a.to_numpy(dtype=dtype) + expected = np.array([0, 1], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"]) +def test_to_numpy_na_raises(dtype): + a = pd.array([0, 1, None], dtype="Int64") + with pytest.raises(ValueError, match=dtype): + a.to_numpy(dtype=dtype) + + +def test_astype_str(): + a = pd.array([1, 2, None], dtype="Int64") + expected = np.array(["1", "2", ""], dtype=f"{tm.ENDIAN}U21") + + tm.assert_numpy_array_equal(a.astype(str), expected) + tm.assert_numpy_array_equal(a.astype("str"), expected) + + +def test_astype_boolean(): + # https://github.com/pandas-dev/pandas/issues/31102 + a = pd.array([1, 0, -1, 2, None], dtype="Int64") + result = a.astype("boolean") + expected = pd.array([True, False, True, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_function.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_function.py new file mode 100644 index 00000000..d48b636a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_function.py @@ -0,0 +1,203 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + + +@pytest.mark.parametrize("ufunc", [np.abs, np.sign]) +# np.sign emits a warning with nans, +@pytest.mark.filterwarnings("ignore:invalid value encountered in sign:RuntimeWarning") +def test_ufuncs_single_int(ufunc): + a = pd.array([1, 2, -3, np.nan]) + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s) + expected = pd.Series(pd.array(ufunc(a.astype(float)), dtype="Int64")) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt]) +def test_ufuncs_single_float(ufunc): + a = pd.array([1, 2, -3, np.nan]) + with np.errstate(invalid="ignore"): + result = ufunc(a) + expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask) + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + with np.errstate(invalid="ignore"): + result = ufunc(s) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.add, np.subtract]) +def test_ufuncs_binary_int(ufunc): + # two IntegerArrays + a = pd.array([1, 2, -3, np.nan]) + result = ufunc(a, a) + expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + # IntegerArray with numpy array + arr = np.array([1, 2, 3, 4]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a.astype(float), arr), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + # IntegerArray with scalar + result = ufunc(a, 1) + expected = pd.array(ufunc(a.astype(float), 1), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(1, a) + expected = pd.array(ufunc(1, a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_ufunc_binary_output(): + a = pd.array([1, 2, np.nan]) + result = np.modf(a) + expected = np.modf(a.to_numpy(na_value=np.nan, dtype="float")) + expected = (pd.array(expected[0]), pd.array(expected[1])) + + assert isinstance(result, tuple) + assert len(result) == 2 + + for x, y in zip(result, expected): + tm.assert_extension_array_equal(x, y) + + +@pytest.mark.parametrize("values", [[0, 1], [0, None]]) +def test_ufunc_reduce_raises(values): + arr = pd.array(values) + + res = np.add.reduce(arr) + expected = arr.sum(skipna=False) + tm.assert_almost_equal(res, expected) + + +@pytest.mark.parametrize( + "pandasmethname, kwargs", + [ + ("var", {"ddof": 0}), + ("var", {"ddof": 1}), + ("std", {"ddof": 0}), + ("std", {"ddof": 1}), + ("kurtosis", {}), + ("skew", {}), + ("sem", {}), + ], +) +def test_stat_method(pandasmethname, kwargs): + s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64") + pandasmeth = getattr(s, pandasmethname) + result = pandasmeth(**kwargs) + s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64") + pandasmeth = getattr(s2, pandasmethname) + expected = pandasmeth(**kwargs) + assert expected == result + + +def test_value_counts_na(): + arr = pd.array([1, 2, 1, pd.NA], dtype="Int64") + result = arr.value_counts(dropna=False) + ex_index = pd.Index([1, 2, pd.NA], dtype="Int64") + assert ex_index.dtype == "Int64" + expected = pd.Series([2, 1, 1], index=ex_index, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=arr[:2], dtype="Int64", name="count") + assert expected.index.dtype == arr.dtype + tm.assert_series_equal(result, expected) + + +def test_value_counts_empty(): + # https://github.com/pandas-dev/pandas/issues/33317 + ser = pd.Series([], dtype="Int64") + result = ser.value_counts() + idx = pd.Index([], dtype=ser.dtype) + assert idx.dtype == ser.dtype + expected = pd.Series([], index=idx, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(): + # GH 33172 + ser = pd.Series([1, 2, 1, pd.NA], dtype="Int64") + result = ser.value_counts(normalize=True) + expected = pd.Series([2, 1], index=ser[:2], dtype="Float64", name="proportion") / 3 + assert expected.index.dtype == ser.dtype + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 4]) +def test_integer_array_sum(skipna, min_count, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([1, 2, 3, None], dtype=dtype) + result = arr.sum(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 6 + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_integer_array_min_max(skipna, method, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([0, 1, None], dtype=dtype) + func = getattr(arr, method) + result = func(skipna=skipna) + if skipna: + assert result == (0 if method == "min" else 1) + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 9]) +def test_integer_array_prod(skipna, min_count, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([1, 2, None], dtype=dtype) + result = arr.prod(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 2 + else: + assert result is pd.NA + + +@pytest.mark.parametrize( + "values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)] +) +def test_integer_array_numpy_sum(values, expected): + arr = pd.array(values, dtype="Int64") + result = np.sum(arr) + assert result == expected + + +@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"]) +def test_dataframe_reductions(op): + # https://github.com/pandas-dev/pandas/pull/32867 + # ensure the integers are not cast to float during reductions + df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")}) + result = df.max() + assert isinstance(result["a"], np.int64) + + +# TODO(jreback) - these need testing / are broken + +# shift + +# set_index (destroys type) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_indexing.py new file mode 100644 index 00000000..4b953d69 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_indexing.py @@ -0,0 +1,19 @@ +import pandas as pd +import pandas._testing as tm + + +def test_array_setitem_nullable_boolean_mask(): + # GH 31446 + ser = pd.Series([1, 2], dtype="Int64") + result = ser.where(ser > 1) + expected = pd.Series([pd.NA, 2], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_array_setitem(): + # GH 31446 + arr = pd.Series([1, 2], dtype="Int64").array + arr[arr > 1] = 1 + + expected = pd.array([1, 1], dtype="Int64") + tm.assert_extension_array_equal(arr, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_reduction.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_reduction.py new file mode 100644 index 00000000..1c91cd25 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_reduction.py @@ -0,0 +1,123 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + array, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", np.int64(3)], + ["prod", np.int64(2)], + ["min", np.int64(1)], + ["max", np.int64(2)], + ["mean", np.float64(1.5)], + ["median", np.float64(1.5)], + ["var", np.float64(0.5)], + ["std", np.float64(0.5**0.5)], + ["skew", pd.NA], + ["kurt", pd.NA], + ["any", True], + ["all", True], + ], +) +def test_series_reductions(op, expected): + ser = Series([1, 2], dtype="Int64") + result = getattr(ser, op)() + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", Series([3], index=["a"], dtype="Int64")], + ["prod", Series([2], index=["a"], dtype="Int64")], + ["min", Series([1], index=["a"], dtype="Int64")], + ["max", Series([2], index=["a"], dtype="Int64")], + ["mean", Series([1.5], index=["a"], dtype="Float64")], + ["median", Series([1.5], index=["a"], dtype="Float64")], + ["var", Series([0.5], index=["a"], dtype="Float64")], + ["std", Series([0.5**0.5], index=["a"], dtype="Float64")], + ["skew", Series([pd.NA], index=["a"], dtype="Float64")], + ["kurt", Series([pd.NA], index=["a"], dtype="Float64")], + ["any", Series([True], index=["a"], dtype="boolean")], + ["all", Series([True], index=["a"], dtype="boolean")], + ], +) +def test_dataframe_reductions(op, expected): + df = DataFrame({"a": array([1, 2], dtype="Int64")}) + result = getattr(df, op)() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", array([1, 3], dtype="Int64")], + ["prod", array([1, 3], dtype="Int64")], + ["min", array([1, 3], dtype="Int64")], + ["max", array([1, 3], dtype="Int64")], + ["mean", array([1, 3], dtype="Float64")], + ["median", array([1, 3], dtype="Float64")], + ["var", array([pd.NA], dtype="Float64")], + ["std", array([pd.NA], dtype="Float64")], + ["skew", array([pd.NA], dtype="Float64")], + ["any", array([True, True], dtype="boolean")], + ["all", array([True, True], dtype="boolean")], + ], +) +def test_groupby_reductions(op, expected): + df = DataFrame( + { + "A": ["a", "b", "b"], + "B": array([1, None, 3], dtype="Int64"), + } + ) + result = getattr(df.groupby("A"), op)() + expected = DataFrame(expected, index=pd.Index(["a", "b"], name="A"), columns=["B"]) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", Series([4, 4], index=["B", "C"], dtype="Float64")], + ["prod", Series([3, 3], index=["B", "C"], dtype="Float64")], + ["min", Series([1, 1], index=["B", "C"], dtype="Float64")], + ["max", Series([3, 3], index=["B", "C"], dtype="Float64")], + ["mean", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["median", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["var", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["std", Series([2**0.5, 2**0.5], index=["B", "C"], dtype="Float64")], + ["skew", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")], + ["kurt", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")], + ["any", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")], + ["all", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")], + ], +) +def test_mixed_reductions(op, expected): + df = DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": array([1, None, 3], dtype="Int64"), + } + ) + + # series + result = getattr(df.C, op)() + tm.assert_equal(result, expected["C"]) + + # frame + if op in ["any", "all"]: + result = getattr(df, op)() + else: + result = getattr(df, op)(numeric_only=True) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_repr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_repr.py new file mode 100644 index 00000000..168210ee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/integer/test_repr.py @@ -0,0 +1,67 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == "i" + else: + assert np.dtype(dtype.type).kind == "u" + assert dtype.name is not None + + +@pytest.mark.parametrize( + "dtype, expected", + [ + (Int8Dtype(), "Int8Dtype()"), + (Int16Dtype(), "Int16Dtype()"), + (Int32Dtype(), "Int32Dtype()"), + (Int64Dtype(), "Int64Dtype()"), + (UInt8Dtype(), "UInt8Dtype()"), + (UInt16Dtype(), "UInt16Dtype()"), + (UInt32Dtype(), "UInt32Dtype()"), + (UInt64Dtype(), "UInt64Dtype()"), + ], +) +def test_repr_dtype(dtype, expected): + assert repr(dtype) == expected + + +def test_repr_array(): + result = repr(pd.array([1, None, 3])) + expected = "\n[1, , 3]\nLength: 3, dtype: Int64" + assert result == expected + + +def test_repr_array_long(): + data = pd.array([1, 2, None] * 1000) + expected = ( + "\n" + "[ 1, 2, , 1, 2, , 1, 2, , 1,\n" + " ...\n" + " , 1, 2, , 1, 2, , 1, 2, ]\n" + "Length: 3000, dtype: Int64" + ) + result = repr(data) + assert result == expected + + +def test_frame_repr(data_missing): + df = pd.DataFrame({"A": data_missing}) + result = repr(df) + expected = " A\n0 \n1 1" + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_astype.py new file mode 100644 index 00000000..d7a2140f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_astype.py @@ -0,0 +1,28 @@ +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + Index, + IntervalIndex, +) +import pandas._testing as tm + + +class TestAstype: + @pytest.mark.parametrize("ordered", [True, False]) + def test_astype_categorical_retains_ordered(self, ordered): + index = IntervalIndex.from_breaks(range(5)) + arr = index._data + + dtype = CategoricalDtype(None, ordered=ordered) + + expected = Categorical(list(arr), ordered=ordered) + result = arr.astype(dtype) + assert result.ordered is ordered + tm.assert_categorical_equal(result, expected) + + # test IntervalIndex.astype while we're at it. + result = index.astype(dtype) + expected = Index(expected) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_interval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_interval.py new file mode 100644 index 00000000..761b8528 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_interval.py @@ -0,0 +1,412 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + Interval, + IntervalIndex, + Timedelta, + Timestamp, + date_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import IntervalArray + + +@pytest.fixture( + params=[ + (Index([0, 2, 4]), Index([1, 3, 5])), + (Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])), + (timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)), + (date_range("20170101", periods=3), date_range("20170102", periods=3)), + ( + date_range("20170101", periods=3, tz="US/Eastern"), + date_range("20170102", periods=3, tz="US/Eastern"), + ), + ], + ids=lambda x: str(x[0].dtype), +) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +class TestAttributes: + @pytest.mark.parametrize( + "left, right", + [ + (0, 1), + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timestamp("2018-01-02")), + ( + Timestamp("2018-01-01", tz="US/Eastern"), + Timestamp("2018-01-02", tz="US/Eastern"), + ), + ], + ) + @pytest.mark.parametrize("constructor", [IntervalArray, IntervalIndex]) + def test_is_empty(self, constructor, left, right, closed): + # GH27219 + tuples = [(left, left), (left, right), np.nan] + expected = np.array([closed != "both", False, False]) + result = constructor.from_tuples(tuples, closed=closed).is_empty + tm.assert_numpy_array_equal(result, expected) + + +class TestMethods: + @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"]) + def test_set_closed(self, closed, new_closed): + # GH 21670 + array = IntervalArray.from_breaks(range(10), closed=closed) + result = array.set_closed(new_closed) + expected = IntervalArray.from_breaks(range(10), closed=new_closed) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + Interval(0, 1, closed="right"), + IntervalArray.from_breaks([1, 2, 3, 4], closed="right"), + ], + ) + def test_where_raises(self, other): + # GH#45768 The IntervalArray methods raises; the Series method coerces + ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], closed="left")) + mask = np.array([True, False, True]) + match = "'value.closed' is 'right', expected 'left'." + with pytest.raises(ValueError, match=match): + ser.array._where(mask, other) + + res = ser.where(mask, other=other) + expected = ser.astype(object).where(mask, other) + tm.assert_series_equal(res, expected) + + def test_shift(self): + # https://github.com/pandas-dev/pandas/issues/31495, GH#22428, GH#31502 + a = IntervalArray.from_breaks([1, 2, 3]) + result = a.shift() + # int -> float + expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)]) + tm.assert_interval_array_equal(result, expected) + + msg = "can only insert Interval objects and NA into an IntervalArray" + with pytest.raises(TypeError, match=msg): + a.shift(1, fill_value=pd.NaT) + + def test_shift_datetime(self): + # GH#31502, GH#31504 + a = IntervalArray.from_breaks(date_range("2000", periods=4)) + result = a.shift(2) + expected = a.take([-1, -1, 0], allow_fill=True) + tm.assert_interval_array_equal(result, expected) + + result = a.shift(-1) + expected = a.take([1, 2, -1], allow_fill=True) + tm.assert_interval_array_equal(result, expected) + + msg = "can only insert Interval objects and NA into an IntervalArray" + with pytest.raises(TypeError, match=msg): + a.shift(1, fill_value=np.timedelta64("NaT", "ns")) + + +class TestSetitem: + def test_set_na(self, left_right_dtypes): + left, right = left_right_dtypes + left = left.copy(deep=True) + right = right.copy(deep=True) + result = IntervalArray.from_arrays(left, right) + + if result.dtype.subtype.kind not in ["m", "M"]: + msg = "'value' should be an interval type, got <.*NaTType'> instead." + with pytest.raises(TypeError, match=msg): + result[0] = pd.NaT + if result.dtype.subtype.kind in ["i", "u"]: + msg = "Cannot set float NaN to integer-backed IntervalArray" + # GH#45484 TypeError, not ValueError, matches what we get with + # non-NA un-holdable value. + with pytest.raises(TypeError, match=msg): + result[0] = np.nan + return + + result[0] = np.nan + + expected_left = Index([left._na_value] + list(left[1:])) + expected_right = Index([right._na_value] + list(right[1:])) + expected = IntervalArray.from_arrays(expected_left, expected_right) + + tm.assert_extension_array_equal(result, expected) + + def test_setitem_mismatched_closed(self): + arr = IntervalArray.from_breaks(range(4)) + orig = arr.copy() + other = arr.set_closed("both") + + msg = "'value.closed' is 'both', expected 'right'" + with pytest.raises(ValueError, match=msg): + arr[0] = other[0] + with pytest.raises(ValueError, match=msg): + arr[:1] = other[:1] + with pytest.raises(ValueError, match=msg): + arr[:0] = other[:0] + with pytest.raises(ValueError, match=msg): + arr[:] = other[::-1] + with pytest.raises(ValueError, match=msg): + arr[:] = list(other[::-1]) + with pytest.raises(ValueError, match=msg): + arr[:] = other[::-1].astype(object) + with pytest.raises(ValueError, match=msg): + arr[:] = other[::-1].astype("category") + + # empty list should be no-op + arr[:0] = [] + tm.assert_interval_array_equal(arr, orig) + + +def test_repr(): + # GH 25022 + arr = IntervalArray.from_tuples([(0, 1), (1, 2)]) + result = repr(arr) + expected = ( + "\n" + "[(0, 1], (1, 2]]\n" + "Length: 2, dtype: interval[int64, right]" + ) + assert result == expected + + +class TestReductions: + def test_min_max_invalid_axis(self, left_right_dtypes): + left, right = left_right_dtypes + left = left.copy(deep=True) + right = right.copy(deep=True) + arr = IntervalArray.from_arrays(left, right) + + msg = "`axis` must be fewer than the number of dimensions" + for axis in [-2, 1]: + with pytest.raises(ValueError, match=msg): + arr.min(axis=axis) + with pytest.raises(ValueError, match=msg): + arr.max(axis=axis) + + msg = "'>=' not supported between" + with pytest.raises(TypeError, match=msg): + arr.min(axis="foo") + with pytest.raises(TypeError, match=msg): + arr.max(axis="foo") + + def test_min_max(self, left_right_dtypes, index_or_series_or_array): + # GH#44746 + left, right = left_right_dtypes + left = left.copy(deep=True) + right = right.copy(deep=True) + arr = IntervalArray.from_arrays(left, right) + + # The expected results below are only valid if monotonic + assert left.is_monotonic_increasing + assert Index(arr).is_monotonic_increasing + + MIN = arr[0] + MAX = arr[-1] + + indexer = np.arange(len(arr)) + np.random.default_rng(2).shuffle(indexer) + arr = arr.take(indexer) + + arr_na = arr.insert(2, np.nan) + + arr = index_or_series_or_array(arr) + arr_na = index_or_series_or_array(arr_na) + + for skipna in [True, False]: + res = arr.min(skipna=skipna) + assert res == MIN + assert type(res) == type(MIN) + + res = arr.max(skipna=skipna) + assert res == MAX + assert type(res) == type(MAX) + + res = arr_na.min(skipna=False) + assert np.isnan(res) + res = arr_na.max(skipna=False) + assert np.isnan(res) + + res = arr_na.min(skipna=True) + assert res == MIN + assert type(res) == type(MIN) + res = arr_na.max(skipna=True) + assert res == MAX + assert type(res) == type(MAX) + + +# ---------------------------------------------------------------------------- +# Arrow interaction + + +def test_arrow_extension_type(): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + p1 = ArrowIntervalType(pa.int64(), "left") + p2 = ArrowIntervalType(pa.int64(), "left") + p3 = ArrowIntervalType(pa.int64(), "right") + + assert p1.closed == "left" + assert p1 == p2 + assert p1 != p3 + assert hash(p1) == hash(p2) + assert hash(p1) != hash(p3) + + +def test_arrow_array(): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + intervals = pd.interval_range(1, 5, freq=1).array + + result = pa.array(intervals) + assert isinstance(result.type, ArrowIntervalType) + assert result.type.closed == intervals.closed + assert result.type.subtype == pa.int64() + assert result.storage.field("left").equals(pa.array([1, 2, 3, 4], type="int64")) + assert result.storage.field("right").equals(pa.array([2, 3, 4, 5], type="int64")) + + expected = pa.array([{"left": i, "right": i + 1} for i in range(1, 5)]) + assert result.storage.equals(expected) + + # convert to its storage type + result = pa.array(intervals, type=expected.type) + assert result.equals(expected) + + # unsupported conversions + with pytest.raises(TypeError, match="Not supported to convert IntervalArray"): + pa.array(intervals, type="float64") + + with pytest.raises(TypeError, match="Not supported to convert IntervalArray"): + pa.array(intervals, type=ArrowIntervalType(pa.float64(), "left")) + + +def test_arrow_array_missing(): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + arr = IntervalArray.from_breaks([0.0, 1.0, 2.0, 3.0]) + arr[1] = None + + result = pa.array(arr) + assert isinstance(result.type, ArrowIntervalType) + assert result.type.closed == arr.closed + assert result.type.subtype == pa.float64() + + # fields have missing values (not NaN) + left = pa.array([0.0, None, 2.0], type="float64") + right = pa.array([1.0, None, 3.0], type="float64") + assert result.storage.field("left").equals(left) + assert result.storage.field("right").equals(right) + + # structarray itself also has missing values on the array level + vals = [ + {"left": 0.0, "right": 1.0}, + {"left": None, "right": None}, + {"left": 2.0, "right": 3.0}, + ] + expected = pa.StructArray.from_pandas(vals, mask=np.array([False, True, False])) + assert result.storage.equals(expected) + + +@pytest.mark.parametrize( + "breaks", + [[0.0, 1.0, 2.0, 3.0], date_range("2017", periods=4, freq="D")], + ids=["float", "datetime64[ns]"], +) +def test_arrow_table_roundtrip(breaks): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + arr = IntervalArray.from_breaks(breaks) + arr[1] = None + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + assert isinstance(table.field("a").type, ArrowIntervalType) + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.IntervalDtype) + tm.assert_frame_equal(result, df) + + table2 = pa.concat_tables([table, table]) + result = table2.to_pandas() + expected = pd.concat([df, df], ignore_index=True) + tm.assert_frame_equal(result, expected) + + # GH-41040 + table = pa.table( + [pa.chunked_array([], type=table.column(0).type)], schema=table.schema + ) + result = table.to_pandas() + tm.assert_frame_equal(result, expected[0:0]) + + +@pytest.mark.parametrize( + "breaks", + [[0.0, 1.0, 2.0, 3.0], date_range("2017", periods=4, freq="D")], + ids=["float", "datetime64[ns]"], +) +def test_arrow_table_roundtrip_without_metadata(breaks): + pa = pytest.importorskip("pyarrow") + + arr = IntervalArray.from_breaks(breaks) + arr[1] = None + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + # remove the metadata + table = table.replace_schema_metadata() + assert table.schema.metadata is None + + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.IntervalDtype) + tm.assert_frame_equal(result, df) + + +def test_from_arrow_from_raw_struct_array(): + # in case pyarrow lost the Interval extension type (eg on parquet roundtrip + # with datetime64[ns] subtype, see GH-45881), still allow conversion + # from arrow to IntervalArray + pa = pytest.importorskip("pyarrow") + + arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}]) + dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither") + + result = dtype.__from_arrow__(arr) + expected = IntervalArray.from_breaks( + np.array([0, 1, 2], dtype="int64"), closed="neither" + ) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("timezone", ["UTC", "US/Pacific", "GMT"]) +def test_interval_index_subtype(timezone, inclusive_endpoints_fixture): + # GH 46999 + dates = date_range("2022", periods=3, tz=timezone) + dtype = f"interval[datetime64[ns, {timezone}], {inclusive_endpoints_fixture}]" + result = IntervalIndex.from_arrays( + ["2022-01-01", "2022-01-02"], + ["2022-01-02", "2022-01-03"], + closed=inclusive_endpoints_fixture, + dtype=dtype, + ) + expected = IntervalIndex.from_arrays( + dates[:-1], dates[1:], closed=inclusive_endpoints_fixture + ) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_ops.py new file mode 100644 index 00000000..4853bec5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_ops.py @@ -0,0 +1,93 @@ +"""Tests for Interval-Interval operations, such as overlaps, contains, etc.""" +import numpy as np +import pytest + +from pandas import ( + Interval, + IntervalIndex, + Timedelta, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import IntervalArray + + +@pytest.fixture(params=[IntervalArray, IntervalIndex]) +def constructor(request): + """ + Fixture for testing both interval container classes. + """ + return request.param + + +@pytest.fixture( + params=[ + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timedelta("1 day")), + (0, 1), + ], + ids=lambda x: type(x[0]).__name__, +) +def start_shift(request): + """ + Fixture for generating intervals of different types from a start value + and a shift value that can be added to start to generate an endpoint. + """ + return request.param + + +class TestOverlaps: + def test_overlaps_interval(self, constructor, start_shift, closed, other_closed): + start, shift = start_shift + interval = Interval(start, start + 3 * shift, other_closed) + + # intervals: identical, nested, spanning, partial, adjacent, disjoint + tuples = [ + (start, start + 3 * shift), + (start + shift, start + 2 * shift), + (start - shift, start + 4 * shift), + (start + 2 * shift, start + 4 * shift), + (start + 3 * shift, start + 4 * shift), + (start + 4 * shift, start + 5 * shift), + ] + interval_container = constructor.from_tuples(tuples, closed) + + adjacent = interval.closed_right and interval_container.closed_left + expected = np.array([True, True, True, True, adjacent, False]) + result = interval_container.overlaps(interval) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("other_constructor", [IntervalArray, IntervalIndex]) + def test_overlaps_interval_container(self, constructor, other_constructor): + # TODO: modify this test when implemented + interval_container = constructor.from_breaks(range(5)) + other_container = other_constructor.from_breaks(range(5)) + with pytest.raises(NotImplementedError, match="^$"): + interval_container.overlaps(other_container) + + def test_overlaps_na(self, constructor, start_shift): + """NA values are marked as False""" + start, shift = start_shift + interval = Interval(start, start + shift) + + tuples = [ + (start, start + shift), + np.nan, + (start + 2 * shift, start + 3 * shift), + ] + interval_container = constructor.from_tuples(tuples) + + expected = np.array([True, False, False]) + result = interval_container.overlaps(interval) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")], + ids=lambda x: type(x).__name__, + ) + def test_overlaps_invalid_type(self, constructor, other): + interval_container = constructor.from_breaks(range(5)) + msg = f"`other` must be Interval-like, got {type(other).__name__}" + with pytest.raises(TypeError, match=msg): + interval_container.overlaps(other) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arithmetic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arithmetic.py new file mode 100644 index 00000000..f4b571ca --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arithmetic.py @@ -0,0 +1,248 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +# integer dtypes +arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES] +scalars: list[Any] = [2] * len(arrays) +# floating dtypes +arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES] +scalars += [0.2, 0.2] +# boolean +arrays += [pd.array([True, False, True, None], dtype="boolean")] +scalars += [False] + + +@pytest.fixture(params=zip(arrays, scalars), ids=[a.dtype.name for a in arrays]) +def data(request): + """Fixture returning parametrized (array, scalar) tuple. + + Used to test equivalence of scalars, numpy arrays with array ops, and the + equivalence of DataFrame and Series ops. + """ + return request.param + + +def check_skip(data, op_name): + if isinstance(data.dtype, pd.BooleanDtype) and "sub" in op_name: + pytest.skip("subtract not implemented for boolean") + + +def is_bool_not_implemented(data, op_name): + # match non-masked behavior + return data.dtype.kind == "b" and op_name.strip("_").lstrip("r") in [ + "pow", + "truediv", + "floordiv", + ] + + +# Test equivalence of scalars, numpy arrays with array ops +# ----------------------------------------------------------------------------- + + +def test_array_scalar_like_equivalence(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + scalar_array = pd.array([scalar] * len(data), dtype=data.dtype) + + # TODO also add len-1 array (np.array([scalar], dtype=data.dtype.numpy_dtype)) + for scalar in [scalar, data.dtype.type(scalar)]: + if is_bool_not_implemented(data, all_arithmetic_operators): + msg = "operator '.*' not implemented for bool dtypes" + with pytest.raises(NotImplementedError, match=msg): + op(data, scalar) + with pytest.raises(NotImplementedError, match=msg): + op(data, scalar_array) + else: + result = op(data, scalar) + expected = op(data, scalar_array) + tm.assert_extension_array_equal(result, expected) + + +def test_array_NA(data, all_arithmetic_operators): + data, _ = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + scalar = pd.NA + scalar_array = pd.array([pd.NA] * len(data), dtype=data.dtype) + + mask = data._mask.copy() + + if is_bool_not_implemented(data, all_arithmetic_operators): + msg = "operator '.*' not implemented for bool dtypes" + with pytest.raises(NotImplementedError, match=msg): + op(data, scalar) + # GH#45421 check op doesn't alter data._mask inplace + tm.assert_numpy_array_equal(mask, data._mask) + return + + result = op(data, scalar) + # GH#45421 check op doesn't alter data._mask inplace + tm.assert_numpy_array_equal(mask, data._mask) + + expected = op(data, scalar_array) + tm.assert_numpy_array_equal(mask, data._mask) + + tm.assert_extension_array_equal(result, expected) + + +def test_numpy_array_equivalence(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + numpy_array = np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype) + pd_array = pd.array(numpy_array, dtype=data.dtype) + + if is_bool_not_implemented(data, all_arithmetic_operators): + msg = "operator '.*' not implemented for bool dtypes" + with pytest.raises(NotImplementedError, match=msg): + op(data, numpy_array) + with pytest.raises(NotImplementedError, match=msg): + op(data, pd_array) + return + + result = op(data, numpy_array) + expected = op(data, pd_array) + tm.assert_extension_array_equal(result, expected) + + +# Test equivalence with Series and DataFrame ops +# ----------------------------------------------------------------------------- + + +def test_frame(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + # DataFrame with scalar + df = pd.DataFrame({"A": data}) + + if is_bool_not_implemented(data, all_arithmetic_operators): + msg = "operator '.*' not implemented for bool dtypes" + with pytest.raises(NotImplementedError, match=msg): + op(df, scalar) + with pytest.raises(NotImplementedError, match=msg): + op(data, scalar) + return + + result = op(df, scalar) + expected = pd.DataFrame({"A": op(data, scalar)}) + tm.assert_frame_equal(result, expected) + + +def test_series(data, all_arithmetic_operators): + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + check_skip(data, all_arithmetic_operators) + + ser = pd.Series(data) + + others = [ + scalar, + np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype), + pd.array([scalar] * len(data), dtype=data.dtype), + pd.Series([scalar] * len(data), dtype=data.dtype), + ] + + for other in others: + if is_bool_not_implemented(data, all_arithmetic_operators): + msg = "operator '.*' not implemented for bool dtypes" + with pytest.raises(NotImplementedError, match=msg): + op(ser, other) + + else: + result = op(ser, other) + expected = pd.Series(op(data, other)) + tm.assert_series_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_object(data, all_arithmetic_operators): + data, _ = data + + op = all_arithmetic_operators + opa = getattr(data, op) + + # 2d -> return NotImplemented + result = opa(pd.DataFrame({"A": data})) + assert result is NotImplemented + + msg = r"can only perform ops with 1-d structures" + with pytest.raises(NotImplementedError, match=msg): + opa(np.arange(len(data)).reshape(-1, len(data))) + + +def test_error_len_mismatch(data, all_arithmetic_operators): + # operating with a list-like with non-matching length raises + data, scalar = data + op = tm.get_op_from_name(all_arithmetic_operators) + + other = [scalar] * (len(data) - 1) + + err = ValueError + msg = "|".join( + [ + r"operands could not be broadcast together with shapes \(3,\) \(4,\)", + r"operands could not be broadcast together with shapes \(4,\) \(3,\)", + ] + ) + if data.dtype.kind == "b" and all_arithmetic_operators.strip("_") in [ + "sub", + "rsub", + ]: + err = TypeError + msg = ( + r"numpy boolean subtract, the `\-` operator, is not supported, use " + r"the bitwise_xor, the `\^` operator, or the logical_xor function instead" + ) + elif is_bool_not_implemented(data, all_arithmetic_operators): + msg = "operator '.*' not implemented for bool dtypes" + err = NotImplementedError + + for other in [other, np.array(other)]: + with pytest.raises(err, match=msg): + op(data, other) + + s = pd.Series(data) + with pytest.raises(err, match=msg): + op(s, other) + + +@pytest.mark.parametrize("op", ["__neg__", "__abs__", "__invert__"]) +def test_unary_op_does_not_propagate_mask(data, op): + # https://github.com/pandas-dev/pandas/issues/39943 + data, _ = data + ser = pd.Series(data) + + if op == "__invert__" and data.dtype.kind == "f": + # we follow numpy in raising + msg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=msg): + getattr(ser, op)() + with pytest.raises(TypeError, match=msg): + getattr(data, op)() + with pytest.raises(TypeError, match=msg): + # Check that this is still the numpy behavior + getattr(data._data, op)() + + return + + result = getattr(ser, op)() + expected = result.copy(deep=True) + ser[0] = None + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arrow_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arrow_compat.py new file mode 100644 index 00000000..fc2094bd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_arrow_compat.py @@ -0,0 +1,204 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +pa = pytest.importorskip("pyarrow", minversion="1.0.1") + +from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask + +arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES] +arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES] +arrays += [pd.array([True, False, True, None], dtype="boolean")] + + +@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays]) +def data(request): + """ + Fixture returning parametrized array from given dtype, including integer, + float and boolean + """ + return request.param + + +def test_arrow_array(data): + arr = pa.array(data) + expected = pa.array( + data.to_numpy(object, na_value=None), + type=pa.from_numpy_dtype(data.dtype.numpy_dtype), + ) + assert arr.equals(expected) + + +def test_arrow_roundtrip(data): + df = pd.DataFrame({"a": data}) + table = pa.table(df) + assert table.field("a").type == str(data.dtype.numpy_dtype) + result = table.to_pandas() + assert result["a"].dtype == data.dtype + tm.assert_frame_equal(result, df) + + +def test_dataframe_from_arrow_types_mapper(): + def types_mapper(arrow_type): + if pa.types.is_boolean(arrow_type): + return pd.BooleanDtype() + elif pa.types.is_integer(arrow_type): + return pd.Int64Dtype() + + bools_array = pa.array([True, None, False], type=pa.bool_()) + ints_array = pa.array([1, None, 2], type=pa.int64()) + small_ints_array = pa.array([-1, 0, 7], type=pa.int8()) + record_batch = pa.RecordBatch.from_arrays( + [bools_array, ints_array, small_ints_array], ["bools", "ints", "small_ints"] + ) + result = record_batch.to_pandas(types_mapper=types_mapper) + bools = pd.Series([True, None, False], dtype="boolean") + ints = pd.Series([1, None, 2], dtype="Int64") + small_ints = pd.Series([-1, 0, 7], dtype="Int64") + expected = pd.DataFrame({"bools": bools, "ints": ints, "small_ints": small_ints}) + tm.assert_frame_equal(result, expected) + + +def test_arrow_load_from_zero_chunks(data): + # GH-41040 + + df = pd.DataFrame({"a": data[0:0]}) + table = pa.table(df) + assert table.field("a").type == str(data.dtype.numpy_dtype) + table = pa.table( + [pa.chunked_array([], type=table.field("a").type)], schema=table.schema + ) + result = table.to_pandas() + assert result["a"].dtype == data.dtype + tm.assert_frame_equal(result, df) + + +def test_arrow_from_arrow_uint(): + # https://github.com/pandas-dev/pandas/issues/31896 + # possible mismatch in types + + dtype = pd.UInt32Dtype() + result = dtype.__from_arrow__(pa.array([1, 2, 3, 4, None], type="int64")) + expected = pd.array([1, 2, 3, 4, None], dtype="UInt32") + + tm.assert_extension_array_equal(result, expected) + + +def test_arrow_sliced(data): + # https://github.com/pandas-dev/pandas/issues/38525 + + df = pd.DataFrame({"a": data}) + table = pa.table(df) + result = table.slice(2, None).to_pandas() + expected = df.iloc[2:].reset_index(drop=True) + tm.assert_frame_equal(result, expected) + + # no missing values + df2 = df.fillna(data[0]) + table = pa.table(df2) + result = table.slice(2, None).to_pandas() + expected = df2.iloc[2:].reset_index(drop=True) + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def np_dtype_to_arrays(any_real_numpy_dtype): + """ + Fixture returning actual and expected dtype, pandas and numpy arrays and + mask from a given numpy dtype + """ + np_dtype = np.dtype(any_real_numpy_dtype) + pa_type = pa.from_numpy_dtype(np_dtype) + + # None ensures the creation of a bitmask buffer. + pa_array = pa.array([0, 1, 2, None], type=pa_type) + # Since masked Arrow buffer slots are not required to contain a specific + # value, assert only the first three values of the created np.array + np_expected = np.array([0, 1, 2], dtype=np_dtype) + mask_expected = np.array([True, True, True, False]) + return np_dtype, pa_array, np_expected, mask_expected + + +def test_pyarrow_array_to_numpy_and_mask(np_dtype_to_arrays): + """ + Test conversion from pyarrow array to numpy array. + + Modifies the pyarrow buffer to contain padding and offset, which are + considered valid buffers by pyarrow. + + Also tests empty pyarrow arrays with non empty buffers. + See https://github.com/pandas-dev/pandas/issues/40896 + """ + np_dtype, pa_array, np_expected, mask_expected = np_dtype_to_arrays + data, mask = pyarrow_array_to_numpy_and_mask(pa_array, np_dtype) + tm.assert_numpy_array_equal(data[:3], np_expected) + tm.assert_numpy_array_equal(mask, mask_expected) + + mask_buffer = pa_array.buffers()[0] + data_buffer = pa_array.buffers()[1] + data_buffer_bytes = pa_array.buffers()[1].to_pybytes() + + # Add trailing padding to the buffer. + data_buffer_trail = pa.py_buffer(data_buffer_bytes + b"\x00") + pa_array_trail = pa.Array.from_buffers( + type=pa_array.type, + length=len(pa_array), + buffers=[mask_buffer, data_buffer_trail], + offset=pa_array.offset, + ) + pa_array_trail.validate() + data, mask = pyarrow_array_to_numpy_and_mask(pa_array_trail, np_dtype) + tm.assert_numpy_array_equal(data[:3], np_expected) + tm.assert_numpy_array_equal(mask, mask_expected) + + # Add offset to the buffer. + offset = b"\x00" * (pa_array.type.bit_width // 8) + data_buffer_offset = pa.py_buffer(offset + data_buffer_bytes) + mask_buffer_offset = pa.py_buffer(b"\x0E") + pa_array_offset = pa.Array.from_buffers( + type=pa_array.type, + length=len(pa_array), + buffers=[mask_buffer_offset, data_buffer_offset], + offset=pa_array.offset + 1, + ) + pa_array_offset.validate() + data, mask = pyarrow_array_to_numpy_and_mask(pa_array_offset, np_dtype) + tm.assert_numpy_array_equal(data[:3], np_expected) + tm.assert_numpy_array_equal(mask, mask_expected) + + # Empty array + np_expected_empty = np.array([], dtype=np_dtype) + mask_expected_empty = np.array([], dtype=np.bool_) + + pa_array_offset = pa.Array.from_buffers( + type=pa_array.type, + length=0, + buffers=[mask_buffer, data_buffer], + offset=pa_array.offset, + ) + pa_array_offset.validate() + data, mask = pyarrow_array_to_numpy_and_mask(pa_array_offset, np_dtype) + tm.assert_numpy_array_equal(data[:3], np_expected_empty) + tm.assert_numpy_array_equal(mask, mask_expected_empty) + + +@pytest.mark.parametrize( + "arr", [pa.nulls(10), pa.chunked_array([pa.nulls(4), pa.nulls(6)])] +) +def test_from_arrow_null(data, arr): + res = data.dtype.__from_arrow__(arr) + assert res.isna().all() + assert len(res) == 10 + + +def test_from_arrow_type_error(data): + # ensure that __from_arrow__ returns a TypeError when getting a wrong + # array type + + arr = pa.array(data).cast("string") + with pytest.raises(TypeError, match=None): + # we don't test the exact error message, only the fact that it raises + # a TypeError is relevant + data.dtype.__from_arrow__(arr) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_function.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_function.py new file mode 100644 index 00000000..4c7bd6e2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_function.py @@ -0,0 +1,57 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +import pandas._testing as tm + +arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES] +arrays += [ + pd.array([0.141, -0.268, 5.895, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES +] + + +@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays]) +def data(request): + """ + Fixture returning parametrized 'data' array with different integer and + floating point types + """ + return request.param + + +@pytest.fixture() +def numpy_dtype(data): + """ + Fixture returning numpy dtype from 'data' input array. + """ + # For integer dtype, the numpy conversion must be done to float + if is_integer_dtype(data): + numpy_dtype = float + else: + numpy_dtype = data.dtype.type + return numpy_dtype + + +def test_round(data, numpy_dtype): + # No arguments + result = data.round() + expected = pd.array( + np.round(data.to_numpy(dtype=numpy_dtype, na_value=None)), dtype=data.dtype + ) + tm.assert_extension_array_equal(result, expected) + + # Decimals argument + result = data.round(decimals=2) + expected = pd.array( + np.round(data.to_numpy(dtype=numpy_dtype, na_value=None), decimals=2), + dtype=data.dtype, + ) + tm.assert_extension_array_equal(result, expected) + + +def test_tolist(data): + result = data.tolist() + expected = list(data) + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_indexing.py new file mode 100644 index 00000000..28ee451a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked/test_indexing.py @@ -0,0 +1,60 @@ +import re + +import numpy as np +import pytest + +import pandas as pd + + +class TestSetitemValidation: + def _check_setitem_invalid(self, arr, invalid): + msg = f"Invalid value '{str(invalid)}' for dtype {arr.dtype}" + msg = re.escape(msg) + with pytest.raises(TypeError, match=msg): + arr[0] = invalid + + with pytest.raises(TypeError, match=msg): + arr[:] = invalid + + with pytest.raises(TypeError, match=msg): + arr[[0]] = invalid + + # FIXME: don't leave commented-out + # with pytest.raises(TypeError): + # arr[[0]] = [invalid] + + # with pytest.raises(TypeError): + # arr[[0]] = np.array([invalid], dtype=object) + + # Series non-coercion, behavior subject to change + ser = pd.Series(arr) + with pytest.raises(TypeError, match=msg): + ser[0] = invalid + # TODO: so, so many other variants of this... + + _invalid_scalars = [ + 1 + 2j, + "True", + "1", + "1.0", + pd.NaT, + np.datetime64("NaT"), + np.timedelta64("NaT"), + ] + + @pytest.mark.parametrize( + "invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)] + ) + def test_setitem_validation_scalar_bool(self, invalid): + arr = pd.array([True, False, None], dtype="boolean") + self._check_setitem_invalid(arr, invalid) + + @pytest.mark.parametrize("invalid", _invalid_scalars + [True, 1.5, np.float64(1.5)]) + def test_setitem_validation_scalar_int(self, invalid, any_int_ea_dtype): + arr = pd.array([1, 2, None], dtype=any_int_ea_dtype) + self._check_setitem_invalid(arr, invalid) + + @pytest.mark.parametrize("invalid", _invalid_scalars + [True]) + def test_setitem_validation_scalar_float(self, invalid, float_ea_dtype): + arr = pd.array([1, 2, None], dtype=float_ea_dtype) + self._check_setitem_invalid(arr, invalid) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked_shared.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked_shared.py new file mode 100644 index 00000000..3e744022 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/masked_shared.py @@ -0,0 +1,154 @@ +""" +Tests shared by MaskedArray subclasses. +""" +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension.base import BaseOpsUtil + + +class ComparisonOps(BaseOpsUtil): + def _compare_other(self, data, op, other): + # array + result = pd.Series(op(data, other)) + expected = pd.Series(op(data._data, other), dtype="boolean") + + # fill the nan locations + expected[data._mask] = pd.NA + + tm.assert_series_equal(result, expected) + + # series + ser = pd.Series(data) + result = op(ser, other) + + # Set nullable dtype here to avoid upcasting when setting to pd.NA below + expected = op(pd.Series(data._data), other).astype("boolean") + + # fill the nan locations + expected[data._mask] = pd.NA + + tm.assert_series_equal(result, expected) + + # subclass will override to parametrize 'other' + def test_scalar(self, other, comparison_op, dtype): + op = comparison_op + left = pd.array([1, 0, None], dtype=dtype) + + result = op(left, other) + + if other is pd.NA: + expected = pd.array([None, None, None], dtype="boolean") + else: + values = op(left._data, other) + expected = pd.arrays.BooleanArray(values, left._mask, copy=True) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal(left, pd.array([1, 0, None], dtype=dtype)) + + +class NumericOps: + # Shared by IntegerArray and FloatingArray, not BooleanArray + + def test_searchsorted_nan(self, dtype): + # The base class casts to object dtype, for which searchsorted returns + # 0 from the left and 10 from the right. + arr = pd.array(range(10), dtype=dtype) + + assert arr.searchsorted(np.nan, side="left") == 10 + assert arr.searchsorted(np.nan, side="right") == 10 + + def test_no_shared_mask(self, data): + result = data + 1 + assert not tm.shares_memory(result, data) + + def test_array(self, comparison_op, dtype): + op = comparison_op + + left = pd.array([0, 1, 2, None, None, None], dtype=dtype) + right = pd.array([0, 1, None, 0, 1, None], dtype=dtype) + + result = op(left, right) + values = op(left._data, right._data) + mask = left._mask | right._mask + + expected = pd.arrays.BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal( + left, pd.array([0, 1, 2, None, None, None], dtype=dtype) + ) + tm.assert_extension_array_equal( + right, pd.array([0, 1, None, 0, 1, None], dtype=dtype) + ) + + def test_compare_with_booleanarray(self, comparison_op, dtype): + op = comparison_op + + left = pd.array([True, False, None] * 3, dtype="boolean") + right = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype=dtype) + other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean") + + expected = op(left, other) + result = op(left, right) + tm.assert_extension_array_equal(result, expected) + + # reversed op + expected = op(other, left) + result = op(right, left) + tm.assert_extension_array_equal(result, expected) + + def test_compare_to_string(self, dtype): + # GH#28930 + ser = pd.Series([1, None], dtype=dtype) + result = ser == "a" + expected = pd.Series([False, pd.NA], dtype="boolean") + + tm.assert_series_equal(result, expected) + + def test_ufunc_with_out(self, dtype): + arr = pd.array([1, 2, 3], dtype=dtype) + arr2 = pd.array([1, 2, pd.NA], dtype=dtype) + + mask = arr == arr + mask2 = arr2 == arr2 + + result = np.zeros(3, dtype=bool) + result |= mask + # If MaskedArray.__array_ufunc__ handled "out" appropriately, + # `result` should still be an ndarray. + assert isinstance(result, np.ndarray) + assert result.all() + + # result |= mask worked because mask could be cast losslessly to + # boolean ndarray. mask2 can't, so this raises + result = np.zeros(3, dtype=bool) + msg = "Specify an appropriate 'na_value' for this dtype" + with pytest.raises(ValueError, match=msg): + result |= mask2 + + # addition + res = np.add(arr, arr2) + expected = pd.array([2, 4, pd.NA], dtype=dtype) + tm.assert_extension_array_equal(res, expected) + + # when passing out=arr, we will modify 'arr' inplace. + res = np.add(arr, arr2, out=arr) + assert res is arr + tm.assert_extension_array_equal(res, expected) + tm.assert_extension_array_equal(arr, expected) + + def test_mul_td64_array(self, dtype): + # GH#45622 + arr = pd.array([1, 2, pd.NA], dtype=dtype) + other = np.arange(3, dtype=np.int64).view("m8[ns]") + + result = arr * other + expected = pd.array([pd.Timedelta(0), pd.Timedelta(2), pd.NaT]) + tm.assert_extension_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_indexing.py new file mode 100644 index 00000000..225d64ad --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_indexing.py @@ -0,0 +1,41 @@ +import numpy as np + +from pandas.core.dtypes.common import is_scalar + +import pandas as pd +import pandas._testing as tm + + +class TestSearchsorted: + def test_searchsorted_string(self, string_dtype): + arr = pd.array(["a", "b", "c"], dtype=string_dtype) + + result = arr.searchsorted("a", side="left") + assert is_scalar(result) + assert result == 0 + + result = arr.searchsorted("a", side="right") + assert is_scalar(result) + assert result == 1 + + def test_searchsorted_numeric_dtypes_scalar(self, any_real_numpy_dtype): + arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype) + result = arr.searchsorted(30) + assert is_scalar(result) + assert result == 2 + + result = arr.searchsorted([30]) + expected = np.array([2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_searchsorted_numeric_dtypes_vector(self, any_real_numpy_dtype): + arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype) + result = arr.searchsorted([2, 30]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_searchsorted_sorter(self, any_real_numpy_dtype): + arr = pd.array([3, 1, 2], dtype=any_real_numpy_dtype) + result = arr.searchsorted([0, 3], sorter=np.argsort(arr)) + expected = np.array([0, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_numpy.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_numpy.py new file mode 100644 index 00000000..4217745e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/numpy_/test_numpy.py @@ -0,0 +1,324 @@ +""" +Additional tests for NumpyExtensionArray that aren't covered by +the interface tests. +""" +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import NumpyEADtype + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import NumpyExtensionArray + + +@pytest.fixture( + params=[ + np.array(["a", "b"], dtype=object), + np.array([0, 1], dtype=float), + np.array([0, 1], dtype=int), + np.array([0, 1 + 2j], dtype=complex), + np.array([True, False], dtype=bool), + np.array([0, 1], dtype="datetime64[ns]"), + np.array([0, 1], dtype="timedelta64[ns]"), + ] +) +def any_numpy_array(request): + """ + Parametrized fixture for NumPy arrays with different dtypes. + + This excludes string and bytes. + """ + return request.param + + +# ---------------------------------------------------------------------------- +# NumpyEADtype + + +@pytest.mark.parametrize( + "dtype, expected", + [ + ("bool", True), + ("int", True), + ("uint", True), + ("float", True), + ("complex", True), + ("str", False), + ("bytes", False), + ("datetime64[ns]", False), + ("object", False), + ("void", False), + ], +) +def test_is_numeric(dtype, expected): + dtype = NumpyEADtype(dtype) + assert dtype._is_numeric is expected + + +@pytest.mark.parametrize( + "dtype, expected", + [ + ("bool", True), + ("int", False), + ("uint", False), + ("float", False), + ("complex", False), + ("str", False), + ("bytes", False), + ("datetime64[ns]", False), + ("object", False), + ("void", False), + ], +) +def test_is_boolean(dtype, expected): + dtype = NumpyEADtype(dtype) + assert dtype._is_boolean is expected + + +def test_repr(): + dtype = NumpyEADtype(np.dtype("int64")) + assert repr(dtype) == "NumpyEADtype('int64')" + + +def test_constructor_from_string(): + result = NumpyEADtype.construct_from_string("int64") + expected = NumpyEADtype(np.dtype("int64")) + assert result == expected + + +def test_dtype_univalent(any_numpy_dtype): + dtype = NumpyEADtype(any_numpy_dtype) + + result = NumpyEADtype(dtype) + assert result == dtype + + +# ---------------------------------------------------------------------------- +# Construction + + +def test_constructor_no_coercion(): + with pytest.raises(ValueError, match="NumPy array"): + NumpyExtensionArray([1, 2, 3]) + + +def test_series_constructor_with_copy(): + ndarray = np.array([1, 2, 3]) + ser = pd.Series(NumpyExtensionArray(ndarray), copy=True) + + assert ser.values is not ndarray + + +def test_series_constructor_with_astype(): + ndarray = np.array([1, 2, 3]) + result = pd.Series(NumpyExtensionArray(ndarray), dtype="float64") + expected = pd.Series([1.0, 2.0, 3.0], dtype="float64") + tm.assert_series_equal(result, expected) + + +def test_from_sequence_dtype(): + arr = np.array([1, 2, 3], dtype="int64") + result = NumpyExtensionArray._from_sequence(arr, dtype="uint64") + expected = NumpyExtensionArray(np.array([1, 2, 3], dtype="uint64")) + tm.assert_extension_array_equal(result, expected) + + +def test_constructor_copy(): + arr = np.array([0, 1]) + result = NumpyExtensionArray(arr, copy=True) + + assert not tm.shares_memory(result, arr) + + +def test_constructor_with_data(any_numpy_array): + nparr = any_numpy_array + arr = NumpyExtensionArray(nparr) + assert arr.dtype.numpy_dtype == nparr.dtype + + +# ---------------------------------------------------------------------------- +# Conversion + + +def test_to_numpy(): + arr = NumpyExtensionArray(np.array([1, 2, 3])) + result = arr.to_numpy() + assert result is arr._ndarray + + result = arr.to_numpy(copy=True) + assert result is not arr._ndarray + + result = arr.to_numpy(dtype="f8") + expected = np.array([1, 2, 3], dtype="f8") + tm.assert_numpy_array_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# Setitem + + +def test_setitem_series(): + ser = pd.Series([1, 2, 3]) + ser.array[0] = 10 + expected = pd.Series([10, 2, 3]) + tm.assert_series_equal(ser, expected) + + +def test_setitem(any_numpy_array): + nparr = any_numpy_array + arr = NumpyExtensionArray(nparr, copy=True) + + arr[0] = arr[1] + nparr[0] = nparr[1] + + tm.assert_numpy_array_equal(arr.to_numpy(), nparr) + + +# ---------------------------------------------------------------------------- +# Reductions + + +def test_bad_reduce_raises(): + arr = np.array([1, 2, 3], dtype="int64") + arr = NumpyExtensionArray(arr) + msg = "cannot perform not_a_method with type int" + with pytest.raises(TypeError, match=msg): + arr._reduce(msg) + + +def test_validate_reduction_keyword_args(): + arr = NumpyExtensionArray(np.array([1, 2, 3])) + msg = "the 'keepdims' parameter is not supported .*all" + with pytest.raises(ValueError, match=msg): + arr.all(keepdims=True) + + +def test_np_max_nested_tuples(): + # case where checking in ufunc.nout works while checking for tuples + # does not + vals = [ + (("j", "k"), ("l", "m")), + (("l", "m"), ("o", "p")), + (("o", "p"), ("j", "k")), + ] + ser = pd.Series(vals) + arr = ser.array + + assert arr.max() is arr[2] + assert ser.max() is arr[2] + + result = np.maximum.reduce(arr) + assert result == arr[2] + + result = np.maximum.reduce(ser) + assert result == arr[2] + + +def test_np_reduce_2d(): + raw = np.arange(12).reshape(4, 3) + arr = NumpyExtensionArray(raw) + + res = np.maximum.reduce(arr, axis=0) + tm.assert_extension_array_equal(res, arr[-1]) + + alt = arr.max(axis=0) + tm.assert_extension_array_equal(alt, arr[-1]) + + +# ---------------------------------------------------------------------------- +# Ops + + +@pytest.mark.parametrize("ufunc", [np.abs, np.negative, np.positive]) +def test_ufunc_unary(ufunc): + arr = NumpyExtensionArray(np.array([-1.0, 0.0, 1.0])) + result = ufunc(arr) + expected = NumpyExtensionArray(ufunc(arr._ndarray)) + tm.assert_extension_array_equal(result, expected) + + # same thing but with the 'out' keyword + out = NumpyExtensionArray(np.array([-9.0, -9.0, -9.0])) + ufunc(arr, out=out) + tm.assert_extension_array_equal(out, expected) + + +def test_ufunc(): + arr = NumpyExtensionArray(np.array([-1.0, 0.0, 1.0])) + + r1, r2 = np.divmod(arr, np.add(arr, 2)) + e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2)) + e1 = NumpyExtensionArray(e1) + e2 = NumpyExtensionArray(e2) + tm.assert_extension_array_equal(r1, e1) + tm.assert_extension_array_equal(r2, e2) + + +def test_basic_binop(): + # Just a basic smoke test. The EA interface tests exercise this + # more thoroughly. + x = NumpyExtensionArray(np.array([1, 2, 3])) + result = x + x + expected = NumpyExtensionArray(np.array([2, 4, 6])) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [None, object]) +def test_setitem_object_typecode(dtype): + arr = NumpyExtensionArray(np.array(["a", "b", "c"], dtype=dtype)) + arr[0] = "t" + expected = NumpyExtensionArray(np.array(["t", "b", "c"], dtype=dtype)) + tm.assert_extension_array_equal(arr, expected) + + +def test_setitem_no_coercion(): + # https://github.com/pandas-dev/pandas/issues/28150 + arr = NumpyExtensionArray(np.array([1, 2, 3])) + with pytest.raises(ValueError, match="int"): + arr[0] = "a" + + # With a value that we do coerce, check that we coerce the value + # and not the underlying array. + arr[0] = 2.5 + assert isinstance(arr[0], (int, np.integer)), type(arr[0]) + + +def test_setitem_preserves_views(): + # GH#28150, see also extension test of the same name + arr = NumpyExtensionArray(np.array([1, 2, 3])) + view1 = arr.view() + view2 = arr[:] + view3 = np.asarray(arr) + + arr[0] = 9 + assert view1[0] == 9 + assert view2[0] == 9 + assert view3[0] == 9 + + arr[-1] = 2.5 + view1[-1] = 5 + assert arr[-1] == 5 + + +@pytest.mark.parametrize("dtype", [np.int64, np.uint64]) +def test_quantile_empty(dtype): + # we should get back np.nans, not -1s + arr = NumpyExtensionArray(np.array([], dtype=dtype)) + idx = pd.Index([0.0, 0.5]) + + result = arr._quantile(idx, interpolation="linear") + expected = NumpyExtensionArray(np.array([np.nan, np.nan])) + tm.assert_extension_array_equal(result, expected) + + +def test_factorize_unsigned(): + # don't raise when calling factorize on unsigned int NumpyExtensionArray + arr = np.array([1, 2, 3], dtype=np.uint64) + obj = NumpyExtensionArray(arr) + + res_codes, res_unique = obj.factorize() + exp_codes, exp_unique = pd.factorize(arr) + + tm.assert_numpy_array_equal(res_codes, exp_codes) + + tm.assert_extension_array_equal(res_unique, NumpyExtensionArray(exp_unique)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_arrow_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_arrow_compat.py new file mode 100644 index 00000000..903fc317 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_arrow_compat.py @@ -0,0 +1,124 @@ +import pytest + +from pandas.compat.pyarrow import pa_version_under10p0 + +from pandas.core.dtypes.dtypes import PeriodDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + PeriodArray, + period_array, +) + +pa = pytest.importorskip("pyarrow", minversion="1.0.1") + + +def test_arrow_extension_type(): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + p1 = ArrowPeriodType("D") + p2 = ArrowPeriodType("D") + p3 = ArrowPeriodType("M") + + assert p1.freq == "D" + assert p1 == p2 + assert p1 != p3 + assert hash(p1) == hash(p2) + assert hash(p1) != hash(p3) + + +@pytest.mark.xfail(not pa_version_under10p0, reason="Wrong behavior with pyarrow 10") +@pytest.mark.parametrize( + "data, freq", + [ + (pd.date_range("2017", periods=3), "D"), + (pd.date_range("2017", periods=3, freq="A"), "A-DEC"), + ], +) +def test_arrow_array(data, freq): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + periods = period_array(data, freq=freq) + result = pa.array(periods) + assert isinstance(result.type, ArrowPeriodType) + assert result.type.freq == freq + expected = pa.array(periods.asi8, type="int64") + assert result.storage.equals(expected) + + # convert to its storage type + result = pa.array(periods, type=pa.int64()) + assert result.equals(expected) + + # unsupported conversions + msg = "Not supported to convert PeriodArray to 'double' type" + with pytest.raises(TypeError, match=msg): + pa.array(periods, type="float64") + + with pytest.raises(TypeError, match="different 'freq'"): + pa.array(periods, type=ArrowPeriodType("T")) + + +def test_arrow_array_missing(): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + arr = PeriodArray([1, 2, 3], dtype="period[D]") + arr[1] = pd.NaT + + result = pa.array(arr) + assert isinstance(result.type, ArrowPeriodType) + assert result.type.freq == "D" + expected = pa.array([1, None, 3], type="int64") + assert result.storage.equals(expected) + + +def test_arrow_table_roundtrip(): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + arr = PeriodArray([1, 2, 3], dtype="period[D]") + arr[1] = pd.NaT + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + assert isinstance(table.field("a").type, ArrowPeriodType) + result = table.to_pandas() + assert isinstance(result["a"].dtype, PeriodDtype) + tm.assert_frame_equal(result, df) + + table2 = pa.concat_tables([table, table]) + result = table2.to_pandas() + expected = pd.concat([df, df], ignore_index=True) + tm.assert_frame_equal(result, expected) + + +def test_arrow_load_from_zero_chunks(): + # GH-41040 + + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + arr = PeriodArray([], dtype="period[D]") + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + assert isinstance(table.field("a").type, ArrowPeriodType) + table = pa.table( + [pa.chunked_array([], type=table.column(0).type)], schema=table.schema + ) + result = table.to_pandas() + assert isinstance(result["a"].dtype, PeriodDtype) + tm.assert_frame_equal(result, df) + + +def test_arrow_table_roundtrip_without_metadata(): + arr = PeriodArray([1, 2, 3], dtype="period[H]") + arr[1] = pd.NaT + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + # remove the metadata + table = table.replace_schema_metadata() + assert table.schema.metadata is None + + result = table.to_pandas() + assert isinstance(result["a"].dtype, PeriodDtype) + tm.assert_frame_equal(result, df) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_astype.py new file mode 100644 index 00000000..57634cf0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_astype.py @@ -0,0 +1,67 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import PeriodDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import period_array + + +@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) +def test_astype_int(dtype): + # We choose to ignore the sign and size of integers for + # Period/Datetime/Timedelta astype + arr = period_array(["2000", "2001", None], freq="D") + + if np.dtype(dtype) != np.int64: + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype(dtype) + return + + result = arr.astype(dtype) + expected = arr._ndarray.view("i8") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_copies(): + arr = period_array(["2000", "2001", None], freq="D") + result = arr.astype(np.int64, copy=False) + + # Add the `.base`, since we now use `.asi8` which returns a view. + # We could maybe override it in PeriodArray to return ._ndarray directly. + assert result.base is arr._ndarray + + result = arr.astype(np.int64, copy=True) + assert result is not arr._ndarray + tm.assert_numpy_array_equal(result, arr._ndarray.view("i8")) + + +def test_astype_categorical(): + arr = period_array(["2000", "2001", "2001", None], freq="D") + result = arr.astype("category") + categories = pd.PeriodIndex(["2000", "2001"], freq="D") + expected = pd.Categorical.from_codes([0, 1, 1, -1], categories=categories) + tm.assert_categorical_equal(result, expected) + + +def test_astype_period(): + arr = period_array(["2000", "2001", None], freq="D") + result = arr.astype(PeriodDtype("M")) + expected = period_array(["2000", "2001", None], freq="M") + tm.assert_period_array_equal(result, expected) + + +@pytest.mark.parametrize("other", ["datetime64[ns]", "timedelta64[ns]"]) +def test_astype_datetime(other): + arr = period_array(["2000", "2001", None], freq="D") + # slice off the [ns] so that the regex matches. + if other == "timedelta64[ns]": + with pytest.raises(TypeError, match=other[:-4]): + arr.astype(other) + + else: + # GH#45038 allow period->dt64 because we allow dt64->period + result = arr.astype(other) + expected = pd.DatetimeIndex(["2000", "2001", pd.NaT])._data + tm.assert_datetime_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_constructors.py new file mode 100644 index 00000000..ecc9ee74 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_constructors.py @@ -0,0 +1,146 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +from pandas._libs.tslibs.offsets import MonthEnd +from pandas._libs.tslibs.period import IncompatibleFrequency + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + PeriodArray, + period_array, +) + + +@pytest.mark.parametrize( + "data, freq, expected", + [ + ([pd.Period("2017", "D")], None, [17167]), + ([pd.Period("2017", "D")], "D", [17167]), + ([2017], "D", [17167]), + (["2017"], "D", [17167]), + ([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]), + ([pd.Period("2017", "D"), None], None, [17167, iNaT]), + (pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]), + (pd.date_range("2017", periods=3), None, [17167, 17168, 17169]), + (pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]), + ], +) +def test_period_array_ok(data, freq, expected): + result = period_array(data, freq=freq).asi8 + expected = np.asarray(expected, dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + +def test_period_array_readonly_object(): + # https://github.com/pandas-dev/pandas/issues/25403 + pa = period_array([pd.Period("2019-01-01")]) + arr = np.asarray(pa, dtype="object") + arr.setflags(write=False) + + result = period_array(arr) + tm.assert_period_array_equal(result, pa) + + result = pd.Series(arr) + tm.assert_series_equal(result, pd.Series(pa)) + + result = pd.DataFrame({"A": arr}) + tm.assert_frame_equal(result, pd.DataFrame({"A": pa})) + + +def test_from_datetime64_freq_changes(): + # https://github.com/pandas-dev/pandas/issues/23438 + arr = pd.date_range("2017", periods=3, freq="D") + result = PeriodArray._from_datetime64(arr, freq="M") + expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M") + tm.assert_period_array_equal(result, expected) + + +@pytest.mark.parametrize("freq", ["2M", MonthEnd(2)]) +def test_from_datetime64_freq_2M(freq): + arr = np.array( + ["2020-01-01T00:00:00", "2020-01-02T00:00:00"], dtype="datetime64[ns]" + ) + result = PeriodArray._from_datetime64(arr, freq) + expected = period_array(["2020-01", "2020-01"], freq=freq) + tm.assert_period_array_equal(result, expected) + + +@pytest.mark.parametrize( + "data, freq, msg", + [ + ( + [pd.Period("2017", "D"), pd.Period("2017", "A")], + None, + "Input has different freq", + ), + ([pd.Period("2017", "D")], "A", "Input has different freq"), + ], +) +def test_period_array_raises(data, freq, msg): + with pytest.raises(IncompatibleFrequency, match=msg): + period_array(data, freq) + + +def test_period_array_non_period_series_raies(): + ser = pd.Series([1, 2, 3]) + with pytest.raises(TypeError, match="dtype"): + PeriodArray(ser, dtype="period[D]") + + +def test_period_array_freq_mismatch(): + arr = period_array(["2000", "2001"], freq="D") + with pytest.raises(IncompatibleFrequency, match="freq"): + PeriodArray(arr, dtype="period[M]") + + dtype = pd.PeriodDtype(pd.tseries.offsets.MonthEnd()) + with pytest.raises(IncompatibleFrequency, match="freq"): + PeriodArray(arr, dtype=dtype) + + +def test_from_sequence_disallows_i8(): + arr = period_array(["2000", "2001"], freq="D") + + msg = str(arr[0].ordinal) + with pytest.raises(TypeError, match=msg): + PeriodArray._from_sequence(arr.asi8, dtype=arr.dtype) + + with pytest.raises(TypeError, match=msg): + PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype) + + +def test_from_td64nat_sequence_raises(): + # GH#44507 + td = pd.NaT.to_numpy("m8[ns]") + + dtype = pd.period_range("2005-01-01", periods=3, freq="D").dtype + + arr = np.array([None], dtype=object) + arr[0] = td + + msg = "Value must be Period, string, integer, or datetime" + with pytest.raises(ValueError, match=msg): + PeriodArray._from_sequence(arr, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + pd.PeriodIndex(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.Index(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.array(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.Series(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.DataFrame(arr, dtype=dtype) + + +def test_freq_deprecated(): + # GH#52462 + data = np.arange(5).astype(np.int64) + msg = "The 'freq' keyword in the PeriodArray constructor is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = PeriodArray(data, freq="M") + + expected = PeriodArray(data, dtype="period[M]") + tm.assert_equal(res, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_reductions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_reductions.py new file mode 100644 index 00000000..2889cc78 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/period/test_reductions.py @@ -0,0 +1,42 @@ +import pytest + +import pandas as pd +from pandas.core.arrays import period_array + + +class TestReductions: + def test_min_max(self): + arr = period_array( + [ + "2000-01-03", + "2000-01-03", + "NaT", + "2000-01-02", + "2000-01-05", + "2000-01-04", + ], + freq="D", + ) + + result = arr.min() + expected = pd.Period("2000-01-02", freq="D") + assert result == expected + + result = arr.max() + expected = pd.Period("2000-01-05", freq="D") + assert result == expected + + result = arr.min(skipna=False) + assert result is pd.NaT + + result = arr.max(skipna=False) + assert result is pd.NaT + + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_empty(self, skipna): + arr = period_array([], freq="D") + result = arr.min(skipna=skipna) + assert result is pd.NaT + + result = arr.max(skipna=skipna) + assert result is pd.NaT diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_accessor.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_accessor.py new file mode 100644 index 00000000..87eb7bcf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_accessor.py @@ -0,0 +1,253 @@ +import string + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestSeriesAccessor: + def test_to_dense(self): + ser = pd.Series([0, 1, 0, 10], dtype="Sparse[int64]") + result = ser.sparse.to_dense() + expected = pd.Series([0, 1, 0, 10]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"]) + def test_get_attributes(self, attr): + arr = SparseArray([0, 1]) + ser = pd.Series(arr) + + result = getattr(ser.sparse, attr) + expected = getattr(arr, attr) + assert result == expected + + def test_from_coo(self): + scipy_sparse = pytest.importorskip("scipy.sparse") + + row = [0, 3, 1, 0] + col = [0, 3, 1, 2] + data = [4, 5, 7, 9] + + sp_array = scipy_sparse.coo_matrix((data, (row, col))) + result = pd.Series.sparse.from_coo(sp_array) + + index = pd.MultiIndex.from_arrays( + [ + np.array([0, 0, 1, 3], dtype=np.int32), + np.array([0, 2, 1, 3], dtype=np.int32), + ], + ) + expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "sort_labels, expected_rows, expected_cols, expected_values_pos", + [ + ( + False, + [("b", 2), ("a", 2), ("b", 1), ("a", 1)], + [("z", 1), ("z", 2), ("x", 2), ("z", 0)], + {1: (1, 0), 3: (3, 3)}, + ), + ( + True, + [("a", 1), ("a", 2), ("b", 1), ("b", 2)], + [("x", 2), ("z", 0), ("z", 1), ("z", 2)], + {1: (1, 2), 3: (0, 1)}, + ), + ], + ) + def test_to_coo( + self, sort_labels, expected_rows, expected_cols, expected_values_pos + ): + sp_sparse = pytest.importorskip("scipy.sparse") + + values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0) + index = pd.MultiIndex.from_tuples( + [ + ("b", 2, "z", 1), + ("a", 2, "z", 2), + ("a", 2, "z", 1), + ("a", 2, "x", 2), + ("b", 1, "z", 1), + ("a", 1, "z", 0), + ] + ) + ss = pd.Series(values, index=index) + + expected_A = np.zeros((4, 4)) + for value, (row, col) in expected_values_pos.items(): + expected_A[row, col] = value + + A, rows, cols = ss.sparse.to_coo( + row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels + ) + assert isinstance(A, sp_sparse.coo_matrix) + tm.assert_numpy_array_equal(A.toarray(), expected_A) + assert rows == expected_rows + assert cols == expected_cols + + def test_non_sparse_raises(self): + ser = pd.Series([1, 2, 3]) + with pytest.raises(AttributeError, match=".sparse"): + ser.sparse.density + + +class TestFrameAccessor: + def test_accessor_raises(self): + df = pd.DataFrame({"A": [0, 1]}) + with pytest.raises(AttributeError, match="sparse"): + df.sparse + + @pytest.mark.parametrize("format", ["csc", "csr", "coo"]) + @pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])]) + @pytest.mark.parametrize("dtype", ["float64", "int64"]) + def test_from_spmatrix(self, format, labels, dtype): + sp_sparse = pytest.importorskip("scipy.sparse") + + sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item()) + + mat = sp_sparse.eye(10, format=format, dtype=dtype) + result = pd.DataFrame.sparse.from_spmatrix(mat, index=labels, columns=labels) + expected = pd.DataFrame( + np.eye(10, dtype=dtype), index=labels, columns=labels + ).astype(sp_dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("format", ["csc", "csr", "coo"]) + def test_from_spmatrix_including_explicit_zero(self, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(10, 2, density=0.5, format=format) + mat.data[0] = 0 + result = pd.DataFrame.sparse.from_spmatrix(mat) + dtype = SparseDtype("float64", 0.0) + expected = pd.DataFrame(mat.todense()).astype(dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "columns", + [["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]], + ) + def test_from_spmatrix_columns(self, columns): + sp_sparse = pytest.importorskip("scipy.sparse") + + dtype = SparseDtype("float64", 0.0) + + mat = sp_sparse.random(10, 2, density=0.5) + result = pd.DataFrame.sparse.from_spmatrix(mat, columns=columns) + expected = pd.DataFrame(mat.toarray(), columns=columns).astype(dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)] + ) + def test_to_coo(self, colnames): + sp_sparse = pytest.importorskip("scipy.sparse") + + df = pd.DataFrame( + {colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]" + ) + result = df.sparse.to_coo() + expected = sp_sparse.coo_matrix(np.asarray(df)) + assert (result != expected).nnz == 0 + + @pytest.mark.parametrize("fill_value", [1, np.nan]) + def test_to_coo_nonzero_fill_val_raises(self, fill_value): + pytest.importorskip("scipy") + df = pd.DataFrame( + { + "A": SparseArray( + [fill_value, fill_value, fill_value, 2], fill_value=fill_value + ), + "B": SparseArray( + [fill_value, 2, fill_value, fill_value], fill_value=fill_value + ), + } + ) + with pytest.raises(ValueError, match="fill value must be 0"): + df.sparse.to_coo() + + def test_to_coo_midx_categorical(self): + # GH#50996 + sp_sparse = pytest.importorskip("scipy.sparse") + + midx = pd.MultiIndex.from_arrays( + [ + pd.CategoricalIndex(list("ab"), name="x"), + pd.CategoricalIndex([0, 1], name="y"), + ] + ) + + ser = pd.Series(1, index=midx, dtype="Sparse[int]") + result = ser.sparse.to_coo(row_levels=["x"], column_levels=["y"])[0] + expected = sp_sparse.coo_matrix( + (np.array([1, 1]), (np.array([0, 1]), np.array([0, 1]))), shape=(2, 2) + ) + assert (result != expected).nnz == 0 + + def test_to_dense(self): + df = pd.DataFrame( + { + "A": SparseArray([1, 0], dtype=SparseDtype("int64", 0)), + "B": SparseArray([1, 0], dtype=SparseDtype("int64", 1)), + "C": SparseArray([1.0, 0.0], dtype=SparseDtype("float64", 0.0)), + }, + index=["b", "a"], + ) + result = df.sparse.to_dense() + expected = pd.DataFrame( + {"A": [1, 0], "B": [1, 0], "C": [1.0, 0.0]}, index=["b", "a"] + ) + tm.assert_frame_equal(result, expected) + + def test_density(self): + df = pd.DataFrame( + { + "A": SparseArray([1, 0, 2, 1], fill_value=0), + "B": SparseArray([0, 1, 1, 1], fill_value=0), + } + ) + res = df.sparse.density + expected = 0.75 + assert res == expected + + @pytest.mark.parametrize("dtype", ["int64", "float64"]) + @pytest.mark.parametrize("dense_index", [True, False]) + def test_series_from_coo(self, dtype, dense_index): + sp_sparse = pytest.importorskip("scipy.sparse") + + A = sp_sparse.eye(3, format="coo", dtype=dtype) + result = pd.Series.sparse.from_coo(A, dense_index=dense_index) + + index = pd.MultiIndex.from_tuples( + [ + np.array([0, 0], dtype=np.int32), + np.array([1, 1], dtype=np.int32), + np.array([2, 2], dtype=np.int32), + ], + ) + expected = pd.Series(SparseArray(np.array([1, 1, 1], dtype=dtype)), index=index) + if dense_index: + expected = expected.reindex(pd.MultiIndex.from_product(index.levels)) + + tm.assert_series_equal(result, expected) + + def test_series_from_coo_incorrect_format_raises(self): + # gh-26554 + sp_sparse = pytest.importorskip("scipy.sparse") + + m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]])) + with pytest.raises( + TypeError, match="Expected coo_matrix. Got csr_matrix instead." + ): + pd.Series.sparse.from_coo(m) + + def test_with_column_named_sparse(self): + # https://github.com/pandas-dev/pandas/issues/30758 + df = pd.DataFrame({"sparse": pd.arrays.SparseArray([1, 2])}) + assert isinstance(df.sparse, pd.core.arrays.sparse.accessor.SparseFrameAccessor) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py new file mode 100644 index 00000000..ffc93b4e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py @@ -0,0 +1,514 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture(params=["integer", "block"]) +def kind(request): + """kind kwarg to pass to SparseArray""" + return request.param + + +@pytest.fixture(params=[True, False]) +def mix(request): + """ + Fixture returning True or False, determining whether to operate + op(sparse, dense) instead of op(sparse, sparse) + """ + return request.param + + +class TestSparseArrayArithmetics: + def _assert(self, a, b): + # We have to use tm.assert_sp_array_equal. See GH #45126 + tm.assert_numpy_array_equal(a, b) + + def _check_numeric_ops(self, a, b, a_dense, b_dense, mix: bool, op): + # Check that arithmetic behavior matches non-Sparse Series arithmetic + + if isinstance(a_dense, np.ndarray): + expected = op(pd.Series(a_dense), b_dense).values + elif isinstance(b_dense, np.ndarray): + expected = op(a_dense, pd.Series(b_dense)).values + else: + raise NotImplementedError + + with np.errstate(invalid="ignore", divide="ignore"): + if mix: + result = op(a, b_dense).to_dense() + else: + result = op(a, b).to_dense() + + self._assert(result, expected) + + def _check_bool_result(self, res): + assert isinstance(res, SparseArray) + assert isinstance(res.dtype, SparseDtype) + assert res.dtype.subtype == np.bool_ + assert isinstance(res.fill_value, bool) + + def _check_comparison_ops(self, a, b, a_dense, b_dense): + with np.errstate(invalid="ignore"): + # Unfortunately, trying to wrap the computation of each expected + # value is with np.errstate() is too tedious. + # + # sparse & sparse + self._check_bool_result(a == b) + self._assert((a == b).to_dense(), a_dense == b_dense) + + self._check_bool_result(a != b) + self._assert((a != b).to_dense(), a_dense != b_dense) + + self._check_bool_result(a >= b) + self._assert((a >= b).to_dense(), a_dense >= b_dense) + + self._check_bool_result(a <= b) + self._assert((a <= b).to_dense(), a_dense <= b_dense) + + self._check_bool_result(a > b) + self._assert((a > b).to_dense(), a_dense > b_dense) + + self._check_bool_result(a < b) + self._assert((a < b).to_dense(), a_dense < b_dense) + + # sparse & dense + self._check_bool_result(a == b_dense) + self._assert((a == b_dense).to_dense(), a_dense == b_dense) + + self._check_bool_result(a != b_dense) + self._assert((a != b_dense).to_dense(), a_dense != b_dense) + + self._check_bool_result(a >= b_dense) + self._assert((a >= b_dense).to_dense(), a_dense >= b_dense) + + self._check_bool_result(a <= b_dense) + self._assert((a <= b_dense).to_dense(), a_dense <= b_dense) + + self._check_bool_result(a > b_dense) + self._assert((a > b_dense).to_dense(), a_dense > b_dense) + + self._check_bool_result(a < b_dense) + self._assert((a < b_dense).to_dense(), a_dense < b_dense) + + def _check_logical_ops(self, a, b, a_dense, b_dense): + # sparse & sparse + self._check_bool_result(a & b) + self._assert((a & b).to_dense(), a_dense & b_dense) + + self._check_bool_result(a | b) + self._assert((a | b).to_dense(), a_dense | b_dense) + # sparse & dense + self._check_bool_result(a & b_dense) + self._assert((a & b_dense).to_dense(), a_dense & b_dense) + + self._check_bool_result(a | b_dense) + self._assert((a | b_dense).to_dense(), a_dense | b_dense) + + @pytest.mark.parametrize("scalar", [0, 1, 3]) + @pytest.mark.parametrize("fill_value", [None, 0, 2]) + def test_float_scalar( + self, kind, mix, all_arithmetic_functions, fill_value, scalar, request + ): + op = all_arithmetic_functions + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + a = SparseArray(values, kind=kind, fill_value=fill_value) + self._check_numeric_ops(a, scalar, values, scalar, mix, op) + + def test_float_scalar_comparison(self, kind): + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + + a = SparseArray(values, kind=kind) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + a = SparseArray(values, kind=kind, fill_value=0) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + a = SparseArray(values, kind=kind, fill_value=2) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + def test_float_same_index_without_nans(self, kind, mix, all_arithmetic_functions): + # when sp_index are the same + op = all_arithmetic_functions + + values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_same_index_with_nans( + self, kind, mix, all_arithmetic_functions, request + ): + # when sp_index are the same + op = all_arithmetic_functions + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_same_index_comparison(self, kind): + # when sp_index are the same + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + def test_float_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_array_different_kind(self, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind="integer") + b = SparseArray(rvalues, kind="block") + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind="integer", fill_value=0) + b = SparseArray(rvalues, kind="block") + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind="integer", fill_value=0) + b = SparseArray(rvalues, kind="block", fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind="integer", fill_value=1) + b = SparseArray(rvalues, kind="block", fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_array_comparison(self, kind): + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + def test_int_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + # have to specify dtype explicitly until fixing GH 667 + dtype = np.int64 + + values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) + + a = SparseArray(values, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, fill_value=0, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, fill_value=1, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype, fill_value=1) + b = SparseArray(rvalues, fill_value=2, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_int_array_comparison(self, kind): + dtype = "int64" + # int32 NI ATM + + values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) + + a = SparseArray(values, dtype=dtype, kind=kind) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0) + b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=1) + b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_same_index(self, kind, fill_value): + # GH 14000 + # when sp_index are the same + values = np.array([True, False, True, True], dtype=np.bool_) + rvalues = np.array([True, False, True, True], dtype=np.bool_) + + a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) + + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_array_logical(self, kind, fill_value): + # GH 14000 + # when sp_index are the same + values = np.array([True, False, True, False, True, True], dtype=np.bool_) + rvalues = np.array([True, False, False, True, False, True], dtype=np.bool_) + + a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) + + def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request): + op = all_arithmetic_functions + rdtype = "int64" + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_mixed_array_comparison(self, kind): + rdtype = "int64" + # int32 NI ATM + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + def test_xor(self): + s = SparseArray([True, True, False, False]) + t = SparseArray([True, False, True, False]) + result = s ^ t + sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32")) + expected = SparseArray([False, True, True], sparse_index=sp_index) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("op", [operator.eq, operator.add]) +def test_with_list(op): + arr = SparseArray([0, 1], fill_value=0) + result = op(arr, [0, 1]) + expected = op(arr, SparseArray([0, 1])) + tm.assert_sp_array_equal(result, expected) + + +def test_with_dataframe(): + # GH#27910 + arr = SparseArray([0, 1], fill_value=0) + df = pd.DataFrame([[1, 2], [3, 4]]) + result = arr.__add__(df) + assert result is NotImplemented + + +def test_with_zerodim_ndarray(): + # GH#27910 + arr = SparseArray([0, 1], fill_value=0) + + result = arr * np.array(2) + expected = arr * 2 + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.abs, np.exp]) +@pytest.mark.parametrize( + "arr", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])] +) +def test_ufuncs(ufunc, arr): + result = ufunc(arr) + fill_value = ufunc(arr.fill_value) + expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + (SparseArray([0, 0, 0]), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + ], +) +@pytest.mark.parametrize("ufunc", [np.add, np.greater]) +def test_binary_ufuncs(ufunc, a, b): + # can't say anything about fill value here. + result = ufunc(a, b) + expected = ufunc(np.asarray(a), np.asarray(b)) + assert isinstance(result, SparseArray) + tm.assert_numpy_array_equal(np.asarray(result), expected) + + +def test_ndarray_inplace(): + sparray = SparseArray([0, 2, 0, 0]) + ndarray = np.array([0, 1, 2, 3]) + ndarray += sparray + expected = np.array([0, 3, 2, 3]) + tm.assert_numpy_array_equal(ndarray, expected) + + +def test_sparray_inplace(): + sparray = SparseArray([0, 2, 0, 0]) + ndarray = np.array([0, 1, 2, 3]) + sparray += ndarray + expected = SparseArray([0, 3, 2, 3], fill_value=0) + tm.assert_sp_array_equal(sparray, expected) + + +@pytest.mark.parametrize("cons", [list, np.array, SparseArray]) +def test_mismatched_length_cmp_op(cons): + left = SparseArray([True, True]) + right = cons([True, True, True]) + with pytest.raises(ValueError, match="operands have mismatched length"): + left & right + + +@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "floordiv", "pow"]) +@pytest.mark.parametrize("fill_value", [np.nan, 3]) +def test_binary_operators(op, fill_value): + op = getattr(operator, op) + data1 = np.random.default_rng(2).standard_normal(20) + data2 = np.random.default_rng(2).standard_normal(20) + + data1[::2] = fill_value + data2[::3] = fill_value + + first = SparseArray(data1, fill_value=fill_value) + second = SparseArray(data2, fill_value=fill_value) + + with np.errstate(all="ignore"): + res = op(first, second) + exp = SparseArray( + op(first.to_dense(), second.to_dense()), fill_value=first.fill_value + ) + assert isinstance(res, SparseArray) + tm.assert_almost_equal(res.to_dense(), exp.to_dense()) + + res2 = op(first, second.to_dense()) + assert isinstance(res2, SparseArray) + tm.assert_sp_array_equal(res, res2) + + res3 = op(first.to_dense(), second) + assert isinstance(res3, SparseArray) + tm.assert_sp_array_equal(res, res3) + + res4 = op(first, 4) + assert isinstance(res4, SparseArray) + + # Ignore this if the actual op raises (e.g. pow). + try: + exp = op(first.to_dense(), 4) + exp_fv = op(first.fill_value, 4) + except ValueError: + pass + else: + tm.assert_almost_equal(res4.fill_value, exp_fv) + tm.assert_almost_equal(res4.to_dense(), exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_array.py new file mode 100644 index 00000000..883d6ea3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_array.py @@ -0,0 +1,480 @@ +import re + +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +import pandas as pd +from pandas import ( + SparseDtype, + isna, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture +def arr_data(): + """Fixture returning numpy array with valid and missing entries""" + return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + + +@pytest.fixture +def arr(arr_data): + """Fixture returning SparseArray from 'arr_data'""" + return SparseArray(arr_data) + + +@pytest.fixture +def zarr(): + """Fixture returning SparseArray with integer entries and 'fill_value=0'""" + return SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + + +class TestSparseArray: + @pytest.mark.parametrize("fill_value", [0, None, np.nan]) + def test_shift_fill_value(self, fill_value): + # GH #24128 + sparse = SparseArray(np.array([1, 0, 0, 3, 0]), fill_value=8.0) + res = sparse.shift(1, fill_value=fill_value) + if isna(fill_value): + fill_value = res.dtype.na_value + exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]), fill_value=8.0) + tm.assert_sp_array_equal(res, exp) + + def test_set_fill_value(self): + arr = SparseArray([1.0, np.nan, 2.0], fill_value=np.nan) + arr.fill_value = 2 + assert arr.fill_value == 2 + + arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64) + arr.fill_value = 2 + assert arr.fill_value == 2 + + msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + arr.fill_value = 3.1 + assert arr.fill_value == 3.1 + + arr.fill_value = np.nan + assert np.isnan(arr.fill_value) + + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) + arr.fill_value = True + assert arr.fill_value is True + + with tm.assert_produces_warning(FutureWarning, match=msg): + arr.fill_value = 0 + + arr.fill_value = np.nan + assert np.isnan(arr.fill_value) + + @pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)]) + def test_set_fill_invalid_non_scalar(self, val): + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) + msg = "fill_value must be a scalar" + + with pytest.raises(ValueError, match=msg): + arr.fill_value = val + + def test_copy(self, arr): + arr2 = arr.copy() + assert arr2.sp_values is not arr.sp_values + assert arr2.sp_index is arr.sp_index + + def test_values_asarray(self, arr_data, arr): + tm.assert_almost_equal(arr.to_dense(), arr_data) + + @pytest.mark.parametrize( + "data,shape,dtype", + [ + ([0, 0, 0, 0, 0], (5,), None), + ([], (0,), None), + ([0], (1,), None), + (["A", "A", np.nan, "B"], (4,), object), + ], + ) + def test_shape(self, data, shape, dtype): + # GH 21126 + out = SparseArray(data, dtype=dtype) + assert out.shape == shape + + @pytest.mark.parametrize( + "vals", + [ + [np.nan, np.nan, np.nan, np.nan, np.nan], + [1, np.nan, np.nan, 3, np.nan], + [1, np.nan, 0, 3, 0], + ], + ) + @pytest.mark.parametrize("fill_value", [None, 0]) + def test_dense_repr(self, vals, fill_value): + vals = np.array(vals) + arr = SparseArray(vals, fill_value=fill_value) + + res = arr.to_dense() + tm.assert_numpy_array_equal(res, vals) + + @pytest.mark.parametrize("fix", ["arr", "zarr"]) + def test_pickle(self, fix, request): + obj = request.getfixturevalue(fix) + unpickled = tm.round_trip_pickle(obj) + tm.assert_sp_array_equal(unpickled, obj) + + def test_generator_warnings(self): + sp_arr = SparseArray([1, 2, 3]) + with tm.assert_produces_warning(None): + for _ in sp_arr: + pass + + def test_where_retain_fill_value(self): + # GH#45691 don't lose fill_value on _where + arr = SparseArray([np.nan, 1.0], fill_value=0) + + mask = np.array([True, False]) + + res = arr._where(~mask, 1) + exp = SparseArray([1, 1.0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + ser = pd.Series(arr) + res = ser.where(~mask, 1) + tm.assert_series_equal(res, pd.Series(exp)) + + def test_fillna(self): + s = SparseArray([1, np.nan, np.nan, 3, np.nan]) + res = s.fillna(-1) + exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, 0, 3, 0]) + res = s.fillna(-1) + exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([np.nan, np.nan, np.nan, np.nan]) + res = s.fillna(-1) + exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + # float dtype's fill_value is np.nan, replaced by -1 + s = SparseArray([0.0, 0.0, 0.0, 0.0]) + res = s.fillna(-1) + exp = SparseArray([0.0, 0.0, 0.0, 0.0], fill_value=-1) + tm.assert_sp_array_equal(res, exp) + + # int dtype shouldn't have missing. No changes. + s = SparseArray([0, 0, 0, 0]) + assert s.dtype == SparseDtype(np.int64) + assert s.fill_value == 0 + res = s.fillna(-1) + tm.assert_sp_array_equal(res, s) + + s = SparseArray([0, 0, 0, 0], fill_value=0) + assert s.dtype == SparseDtype(np.int64) + assert s.fill_value == 0 + res = s.fillna(-1) + exp = SparseArray([0, 0, 0, 0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + # fill_value can be nan if there is no missing hole. + # only fill_value will be changed + s = SparseArray([0, 0, 0, 0], fill_value=np.nan) + assert s.dtype == SparseDtype(np.int64, fill_value=np.nan) + assert np.isnan(s.fill_value) + res = s.fillna(-1) + exp = SparseArray([0, 0, 0, 0], fill_value=-1) + tm.assert_sp_array_equal(res, exp) + + def test_fillna_overlap(self): + s = SparseArray([1, np.nan, np.nan, 3, np.nan]) + # filling with existing value doesn't replace existing value with + # fill_value, i.e. existing 3 remains in sp_values + res = s.fillna(3) + exp = np.array([1, 3, 3, 3, 3], dtype=np.float64) + tm.assert_numpy_array_equal(res.to_dense(), exp) + + s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0) + res = s.fillna(3) + exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + def test_nonzero(self): + # Tests regression #21172. + sa = SparseArray([float("nan"), float("nan"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + expected = np.array([2, 5, 9], dtype=np.int32) + (result,) = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + sa = SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + (result,) = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + +class TestSparseArrayAnalytics: + @pytest.mark.parametrize( + "data,expected", + [ + ( + np.array([1, 2, 3, 4, 5], dtype=float), # non-null data + SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0])), + ), + ( + np.array([1, 2, np.nan, 4, 5], dtype=float), # null data + SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])), + ), + ], + ) + @pytest.mark.parametrize("numpy", [True, False]) + def test_cumsum(self, data, expected, numpy): + cumsum = np.cumsum if numpy else lambda s: s.cumsum() + + out = cumsum(SparseArray(data)) + tm.assert_sp_array_equal(out, expected) + + out = cumsum(SparseArray(data, fill_value=np.nan)) + tm.assert_sp_array_equal(out, expected) + + out = cumsum(SparseArray(data, fill_value=2)) + tm.assert_sp_array_equal(out, expected) + + if numpy: # numpy compatibility checks. + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.cumsum(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.cumsum(SparseArray(data), out=out) + else: + axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid. + msg = re.escape(f"axis(={axis}) out of bounds") + with pytest.raises(ValueError, match=msg): + SparseArray(data).cumsum(axis=axis) + + def test_ufunc(self): + # GH 13853 make sure ufunc is applied to fill_value + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray([1, np.nan, 2, np.nan, 2]) + tm.assert_sp_array_equal(abs(sparse), result) + tm.assert_sp_array_equal(np.abs(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1) + tm.assert_sp_array_equal(abs(sparse), result) + tm.assert_sp_array_equal(np.abs(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=-1) + exp = SparseArray([1, 1, 2, 2], fill_value=1) + tm.assert_sp_array_equal(abs(sparse), exp) + tm.assert_sp_array_equal(np.abs(sparse), exp) + + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2])) + tm.assert_sp_array_equal(np.sin(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1)) + tm.assert_sp_array_equal(np.sin(sparse), result) + + sparse = SparseArray([1, -1, 0, -2], fill_value=0) + result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0)) + tm.assert_sp_array_equal(np.sin(sparse), result) + + def test_ufunc_args(self): + # GH 13853 make sure ufunc is applied to fill_value, including its arg + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray([2, np.nan, 3, np.nan, -1]) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray([2, 0, 3, -1], fill_value=2) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + sparse = SparseArray([1, -1, 0, -2], fill_value=0) + result = SparseArray([2, 0, 1, -1], fill_value=1) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + @pytest.mark.parametrize("fill_value", [0.0, np.nan]) + def test_modf(self, fill_value): + # https://github.com/pandas-dev/pandas/issues/26946 + sparse = SparseArray([fill_value] * 10 + [1.1, 2.2], fill_value=fill_value) + r1, r2 = np.modf(sparse) + e1, e2 = np.modf(np.asarray(sparse)) + tm.assert_sp_array_equal(r1, SparseArray(e1, fill_value=fill_value)) + tm.assert_sp_array_equal(r2, SparseArray(e2, fill_value=fill_value)) + + def test_nbytes_integer(self): + arr = SparseArray([1, 0, 0, 0, 2], kind="integer") + result = arr.nbytes + # (2 * 8) + 2 * 4 + assert result == 24 + + def test_nbytes_block(self): + arr = SparseArray([1, 2, 0, 0, 0], kind="block") + result = arr.nbytes + # (2 * 8) + 4 + 4 + # sp_values, blocs, blengths + assert result == 24 + + def test_asarray_datetime64(self): + s = SparseArray(pd.to_datetime(["2012", None, None, "2013"])) + np.asarray(s) + + def test_density(self): + arr = SparseArray([0, 1]) + assert arr.density == 0.5 + + def test_npoints(self): + arr = SparseArray([0, 1]) + assert arr.npoints == 1 + + +def test_setting_fill_value_fillna_still_works(): + # This is why letting users update fill_value / dtype is bad + # astype has the same problem. + arr = SparseArray([1.0, np.nan, 1.0], fill_value=0.0) + arr.fill_value = np.nan + result = arr.isna() + # Can't do direct comparison, since the sp_index will be different + # So let's convert to ndarray and check there. + result = np.asarray(result) + + expected = np.array([False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_setting_fill_value_updates(): + arr = SparseArray([0.0, np.nan], fill_value=0) + arr.fill_value = np.nan + # use private constructor to get the index right + # otherwise both nans would be un-stored. + expected = SparseArray._simple_new( + sparse_array=np.array([np.nan]), + sparse_index=IntIndex(2, [1]), + dtype=SparseDtype(float, np.nan), + ) + tm.assert_sp_array_equal(arr, expected) + + +@pytest.mark.parametrize( + "arr,fill_value,loc", + [ + ([None, 1, 2], None, 0), + ([0, None, 2], None, 1), + ([0, 1, None], None, 2), + ([0, 1, 1, None, None], None, 3), + ([1, 1, 1, 2], None, -1), + ([], None, -1), + ([None, 1, 0, 0, None, 2], None, 0), + ([None, 1, 0, 0, None, 2], 1, 1), + ([None, 1, 0, 0, None, 2], 2, 5), + ([None, 1, 0, 0, None, 2], 3, -1), + ([None, 0, 0, 1, 2, 1], 0, 1), + ([None, 0, 0, 1, 2, 1], 1, 3), + ], +) +def test_first_fill_value_loc(arr, fill_value, loc): + result = SparseArray(arr, fill_value=fill_value)._first_fill_value_loc() + assert result == loc + + +@pytest.mark.parametrize( + "arr", + [ + [1, 2, np.nan, np.nan], + [1, np.nan, 2, np.nan], + [1, 2, np.nan], + [np.nan, 1, 0, 0, np.nan, 2], + [np.nan, 0, 0, 1, 2, 1], + ], +) +@pytest.mark.parametrize("fill_value", [np.nan, 0, 1]) +def test_unique_na_fill(arr, fill_value): + a = SparseArray(arr, fill_value=fill_value).unique() + b = pd.Series(arr).unique() + assert isinstance(a, SparseArray) + a = np.asarray(a) + tm.assert_numpy_array_equal(a, b) + + +def test_unique_all_sparse(): + # https://github.com/pandas-dev/pandas/issues/23168 + arr = SparseArray([0, 0]) + result = arr.unique() + expected = SparseArray([0]) + tm.assert_sp_array_equal(result, expected) + + +def test_map(): + arr = SparseArray([0, 1, 2]) + expected = SparseArray([10, 11, 12], fill_value=10) + + # dict + result = arr.map({0: 10, 1: 11, 2: 12}) + tm.assert_sp_array_equal(result, expected) + + # series + result = arr.map(pd.Series({0: 10, 1: 11, 2: 12})) + tm.assert_sp_array_equal(result, expected) + + # function + result = arr.map(pd.Series({0: 10, 1: 11, 2: 12})) + expected = SparseArray([10, 11, 12], fill_value=10) + tm.assert_sp_array_equal(result, expected) + + +def test_map_missing(): + arr = SparseArray([0, 1, 2]) + expected = SparseArray([10, 11, None], fill_value=10) + + result = arr.map({0: 10, 1: 11}) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("fill_value", [np.nan, 1]) +def test_dropna(fill_value): + # GH-28287 + arr = SparseArray([np.nan, 1], fill_value=fill_value) + exp = SparseArray([1.0], fill_value=fill_value) + tm.assert_sp_array_equal(arr.dropna(), exp) + + df = pd.DataFrame({"a": [0, 1], "b": arr}) + expected_df = pd.DataFrame({"a": [1], "b": exp}, index=pd.Index([1])) + tm.assert_equal(df.dropna(), expected_df) + + +def test_drop_duplicates_fill_value(): + # GH 11726 + df = pd.DataFrame(np.zeros((5, 5))).apply(lambda x: SparseArray(x, fill_value=0)) + result = df.drop_duplicates() + expected = pd.DataFrame({i: SparseArray([0.0], fill_value=0) for i in range(5)}) + tm.assert_frame_equal(result, expected) + + +def test_zero_sparse_column(): + # GH 27781 + df1 = pd.DataFrame({"A": SparseArray([0, 0, 0]), "B": [1, 2, 3]}) + df2 = pd.DataFrame({"A": SparseArray([0, 1, 0]), "B": [1, 2, 3]}) + result = df1.loc[df1["B"] != 2] + expected = df2.loc[df2["B"] != 2] + tm.assert_frame_equal(result, expected) + + expected = pd.DataFrame({"A": SparseArray([0, 0]), "B": [1, 3]}, index=[0, 2]) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_astype.py new file mode 100644 index 00000000..83a507e6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_astype.py @@ -0,0 +1,133 @@ +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +from pandas import ( + SparseDtype, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestAstype: + def test_astype(self): + # float -> float + arr = SparseArray([None, None, 0, 2]) + result = arr.astype("Sparse[float32]") + expected = SparseArray([None, None, 0, 2], dtype=np.dtype("float32")) + tm.assert_sp_array_equal(result, expected) + + dtype = SparseDtype("float64", fill_value=0) + result = arr.astype(dtype) + expected = SparseArray._simple_new( + np.array([0.0, 2.0], dtype=dtype.subtype), IntIndex(4, [2, 3]), dtype + ) + tm.assert_sp_array_equal(result, expected) + + dtype = SparseDtype("int64", 0) + result = arr.astype(dtype) + expected = SparseArray._simple_new( + np.array([0, 2], dtype=np.int64), IntIndex(4, [2, 3]), dtype + ) + tm.assert_sp_array_equal(result, expected) + + arr = SparseArray([0, np.nan, 0, 1], fill_value=0) + with pytest.raises(ValueError, match="NA"): + arr.astype("Sparse[i8]") + + def test_astype_bool(self): + a = SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0)) + result = a.astype(bool) + expected = np.array([1, 0, 0, 1], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # update fill value + result = a.astype(SparseDtype(bool, False)) + expected = SparseArray( + [True, False, False, True], dtype=SparseDtype(bool, False) + ) + tm.assert_sp_array_equal(result, expected) + + def test_astype_all(self, any_real_numpy_dtype): + vals = np.array([1, 2, 3]) + arr = SparseArray(vals, fill_value=1) + typ = np.dtype(any_real_numpy_dtype) + res = arr.astype(typ) + tm.assert_numpy_array_equal(res, vals.astype(any_real_numpy_dtype)) + + @pytest.mark.parametrize( + "arr, dtype, expected", + [ + ( + SparseArray([0, 1]), + "float", + SparseArray([0.0, 1.0], dtype=SparseDtype(float, 0.0)), + ), + (SparseArray([0, 1]), bool, SparseArray([False, True])), + ( + SparseArray([0, 1], fill_value=1), + bool, + SparseArray([False, True], dtype=SparseDtype(bool, True)), + ), + pytest.param( + SparseArray([0, 1]), + "datetime64[ns]", + SparseArray( + np.array([0, 1], dtype="datetime64[ns]"), + dtype=SparseDtype("datetime64[ns]", Timestamp("1970")), + ), + ), + ( + SparseArray([0, 1, 10]), + str, + SparseArray(["0", "1", "10"], dtype=SparseDtype(str, "0")), + ), + (SparseArray(["10", "20"]), float, SparseArray([10.0, 20.0])), + ( + SparseArray([0, 1, 0]), + object, + SparseArray([0, 1, 0], dtype=SparseDtype(object, 0)), + ), + ], + ) + def test_astype_more(self, arr, dtype, expected): + result = arr.astype(arr.dtype.update_dtype(dtype)) + tm.assert_sp_array_equal(result, expected) + + def test_astype_nan_raises(self): + arr = SparseArray([1.0, np.nan]) + with pytest.raises(ValueError, match="Cannot convert non-finite"): + arr.astype(int) + + def test_astype_copy_false(self): + # GH#34456 bug caused by using .view instead of .astype in astype_nansafe + arr = SparseArray([1, 2, 3]) + + dtype = SparseDtype(float, 0) + + result = arr.astype(dtype, copy=False) + expected = SparseArray([1.0, 2.0, 3.0], fill_value=0.0) + tm.assert_sp_array_equal(result, expected) + + def test_astype_dt64_to_int64(self): + # GH#49631 match non-sparse behavior + values = np.array(["NaT", "2016-01-02", "2016-01-03"], dtype="M8[ns]") + + arr = SparseArray(values) + result = arr.astype("int64") + expected = values.astype("int64") + tm.assert_numpy_array_equal(result, expected) + + # we should also be able to cast to equivalent Sparse[int64] + dtype_int64 = SparseDtype("int64", np.iinfo(np.int64).min) + result2 = arr.astype(dtype_int64) + tm.assert_numpy_array_equal(result2.to_numpy(), expected) + + # GH#50087 we should match the non-sparse behavior regardless of + # if we have a fill_value other than NaT + dtype = SparseDtype("datetime64[ns]", values[1]) + arr3 = SparseArray(values, dtype=dtype) + result3 = arr3.astype("int64") + tm.assert_numpy_array_equal(result3, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py new file mode 100644 index 00000000..0f09af26 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py @@ -0,0 +1,62 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestSparseArrayConcat: + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_basic(self, kind): + a = SparseArray([1, 0, 0, 2], kind=kind) + b = SparseArray([1, 0, 2, 2], kind=kind) + + result = SparseArray._concat_same_type([a, b]) + # Can't make any assertions about the sparse index itself + # since we aren't don't merge sparse blocs across arrays + # in to_concat + expected = np.array([1, 2, 1, 2, 2], dtype="int64") + tm.assert_numpy_array_equal(result.sp_values, expected) + assert result.kind == kind + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_uses_first_kind(self, kind): + other = "integer" if kind == "block" else "block" + a = SparseArray([1, 0, 0, 2], kind=kind) + b = SparseArray([1, 0, 2, 2], kind=other) + + result = SparseArray._concat_same_type([a, b]) + expected = np.array([1, 2, 1, 2, 2], dtype="int64") + tm.assert_numpy_array_equal(result.sp_values, expected) + assert result.kind == kind + + +@pytest.mark.parametrize( + "other, expected_dtype", + [ + # compatible dtype -> preserve sparse + (pd.Series([3, 4, 5], dtype="int64"), pd.SparseDtype("int64", 0)), + # (pd.Series([3, 4, 5], dtype="Int64"), pd.SparseDtype("int64", 0)), + # incompatible dtype -> Sparse[common dtype] + (pd.Series([1.5, 2.5, 3.5], dtype="float64"), pd.SparseDtype("float64", 0)), + # incompatible dtype -> Sparse[object] dtype + (pd.Series(["a", "b", "c"], dtype=object), pd.SparseDtype(object, 0)), + # categorical with compatible categories -> dtype of the categories + (pd.Series([3, 4, 5], dtype="category"), np.dtype("int64")), + (pd.Series([1.5, 2.5, 3.5], dtype="category"), np.dtype("float64")), + # categorical with incompatible categories -> object dtype + (pd.Series(["a", "b", "c"], dtype="category"), np.dtype(object)), + ], +) +def test_concat_with_non_sparse(other, expected_dtype): + # https://github.com/pandas-dev/pandas/issues/34336 + s_sparse = pd.Series([1, 0, 2], dtype=pd.SparseDtype("int64", 0)) + + result = pd.concat([s_sparse, other], ignore_index=True) + expected = pd.Series(list(s_sparse) + list(other)).astype(expected_dtype) + tm.assert_series_equal(result, expected) + + result = pd.concat([other, s_sparse], ignore_index=True) + expected = pd.Series(list(other) + list(s_sparse)).astype(expected_dtype) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_constructors.py new file mode 100644 index 00000000..2831c8ab --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_constructors.py @@ -0,0 +1,285 @@ +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +import pandas as pd +from pandas import ( + SparseDtype, + isna, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestConstructors: + def test_constructor_dtype(self): + arr = SparseArray([np.nan, 1, 2, np.nan]) + assert arr.dtype == SparseDtype(np.float64, np.nan) + assert arr.dtype.subtype == np.float64 + assert np.isnan(arr.fill_value) + + arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0) + assert arr.dtype == SparseDtype(np.float64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], dtype=np.float64) + assert arr.dtype == SparseDtype(np.float64, np.nan) + assert np.isnan(arr.fill_value) + + arr = SparseArray([0, 1, 2, 4], dtype=np.int64) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], dtype=None) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + def test_constructor_dtype_str(self): + result = SparseArray([1, 2, 3], dtype="int") + expected = SparseArray([1, 2, 3], dtype=int) + tm.assert_sp_array_equal(result, expected) + + def test_constructor_sparse_dtype(self): + result = SparseArray([1, 0, 0, 1], dtype=SparseDtype("int64", -1)) + expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64) + tm.assert_sp_array_equal(result, expected) + assert result.sp_values.dtype == np.dtype("int64") + + def test_constructor_sparse_dtype_str(self): + result = SparseArray([1, 0, 0, 1], dtype="Sparse[int32]") + expected = SparseArray([1, 0, 0, 1], dtype=np.int32) + tm.assert_sp_array_equal(result, expected) + assert result.sp_values.dtype == np.dtype("int32") + + def test_constructor_object_dtype(self): + # GH#11856 + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object) + assert arr.dtype == SparseDtype(object) + assert np.isnan(arr.fill_value) + + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object, fill_value="A") + assert arr.dtype == SparseDtype(object, "A") + assert arr.fill_value == "A" + + def test_constructor_object_dtype_bool_fill(self): + # GH#17574 + data = [False, 0, 100.0, 0.0] + arr = SparseArray(data, dtype=object, fill_value=False) + assert arr.dtype == SparseDtype(object, False) + assert arr.fill_value is False + arr_expected = np.array(data, dtype=object) + it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected)) + assert np.fromiter(it, dtype=np.bool_).all() + + @pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int]) + def test_constructor_na_dtype(self, dtype): + with pytest.raises(ValueError, match="Cannot convert"): + SparseArray([0, 1, np.nan], dtype=dtype) + + def test_constructor_warns_when_losing_timezone(self): + # GH#32501 warn when losing timezone information + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + + expected = SparseArray(np.asarray(dti, dtype="datetime64[ns]")) + + with tm.assert_produces_warning(UserWarning): + result = SparseArray(dti) + + tm.assert_sp_array_equal(result, expected) + + with tm.assert_produces_warning(UserWarning): + result = SparseArray(pd.Series(dti)) + + tm.assert_sp_array_equal(result, expected) + + def test_constructor_spindex_dtype(self): + arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2])) + # TODO: actionable? + # XXX: Behavior change: specifying SparseIndex no longer changes the + # fill_value + expected = SparseArray([0, 1, 2, 0], kind="integer") + tm.assert_sp_array_equal(arr, expected) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2, 3], + sparse_index=IntIndex(4, [1, 2, 3]), + dtype=np.int64, + fill_value=0, + ) + exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64 + ) + exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2, 3], + sparse_index=IntIndex(4, [1, 2, 3]), + dtype=None, + fill_value=0, + ) + exp = SparseArray([0, 1, 2, 3], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + @pytest.mark.parametrize("sparse_index", [None, IntIndex(1, [0])]) + def test_constructor_spindex_dtype_scalar(self, sparse_index): + # scalar input + msg = "Constructing SparseArray with scalar data is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None) + exp = SparseArray([1], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + with tm.assert_produces_warning(FutureWarning, match=msg): + arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None) + exp = SparseArray([1], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + def test_constructor_spindex_dtype_scalar_broadcasts(self): + arr = SparseArray( + data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None + ) + exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + @pytest.mark.parametrize( + "data, fill_value", + [ + (np.array([1, 2]), 0), + (np.array([1.0, 2.0]), np.nan), + ([True, False], False), + ([pd.Timestamp("2017-01-01")], pd.NaT), + ], + ) + def test_constructor_inferred_fill_value(self, data, fill_value): + result = SparseArray(data).fill_value + + if isna(fill_value): + assert isna(result) + else: + assert result == fill_value + + @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) + @pytest.mark.parametrize("size", [0, 10]) + def test_from_spmatrix(self, size, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(size, 1, density=0.5, format=format) + result = SparseArray.from_spmatrix(mat) + + result = np.asarray(result) + expected = mat.toarray().ravel() + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) + def test_from_spmatrix_including_explicit_zero(self, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(10, 1, density=0.5, format=format) + mat.data[0] = 0 + result = SparseArray.from_spmatrix(mat) + + result = np.asarray(result) + expected = mat.toarray().ravel() + tm.assert_numpy_array_equal(result, expected) + + def test_from_spmatrix_raises(self): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.eye(5, 4, format="csc") + + with pytest.raises(ValueError, match="not '4'"): + SparseArray.from_spmatrix(mat) + + def test_constructor_from_too_large_array(self): + with pytest.raises(TypeError, match="expected dimension <= 1 data"): + SparseArray(np.arange(10).reshape((2, 5))) + + def test_constructor_from_sparse(self): + zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + res = SparseArray(zarr) + assert res.fill_value == 0 + tm.assert_almost_equal(res.sp_values, zarr.sp_values) + + def test_constructor_copy(self): + arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + arr = SparseArray(arr_data) + + cp = SparseArray(arr, copy=True) + cp.sp_values[:3] = 0 + assert not (arr.sp_values[:3] == 0).any() + + not_copy = SparseArray(arr) + not_copy.sp_values[:3] = 0 + assert (arr.sp_values[:3] == 0).all() + + def test_constructor_bool(self): + # GH#10648 + data = np.array([False, False, True, True, False, False]) + arr = SparseArray(data, fill_value=False, dtype=bool) + + assert arr.dtype == SparseDtype(bool) + tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True])) + # Behavior change: np.asarray densifies. + # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) + tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32)) + + dense = arr.to_dense() + assert dense.dtype == bool + tm.assert_numpy_array_equal(dense, data) + + def test_constructor_bool_fill_value(self): + arr = SparseArray([True, False, True], dtype=None) + assert arr.dtype == SparseDtype(np.bool_) + assert not arr.fill_value + + arr = SparseArray([True, False, True], dtype=np.bool_) + assert arr.dtype == SparseDtype(np.bool_) + assert not arr.fill_value + + arr = SparseArray([True, False, True], dtype=np.bool_, fill_value=True) + assert arr.dtype == SparseDtype(np.bool_, True) + assert arr.fill_value + + def test_constructor_float32(self): + # GH#10648 + data = np.array([1.0, np.nan, 3], dtype=np.float32) + arr = SparseArray(data, dtype=np.float32) + + assert arr.dtype == SparseDtype(np.float32) + tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32)) + # Behavior change: np.asarray densifies. + # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) + tm.assert_numpy_array_equal( + arr.sp_index.indices, np.array([0, 2], dtype=np.int32) + ) + + dense = arr.to_dense() + assert dense.dtype == np.float32 + tm.assert_numpy_array_equal(dense, data) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_dtype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_dtype.py new file mode 100644 index 00000000..234f4092 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_dtype.py @@ -0,0 +1,224 @@ +import re +import warnings + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype + + +@pytest.mark.parametrize( + "dtype, fill_value", + [ + ("int", 0), + ("float", np.nan), + ("bool", False), + ("object", np.nan), + ("datetime64[ns]", np.datetime64("NaT", "ns")), + ("timedelta64[ns]", np.timedelta64("NaT", "ns")), + ], +) +def test_inferred_dtype(dtype, fill_value): + sparse_dtype = SparseDtype(dtype) + result = sparse_dtype.fill_value + if pd.isna(fill_value): + assert pd.isna(result) and type(result) == type(fill_value) + else: + assert result == fill_value + + +def test_from_sparse_dtype(): + dtype = SparseDtype("float", 0) + result = SparseDtype(dtype) + assert result.fill_value == 0 + + +def test_from_sparse_dtype_fill_value(): + dtype = SparseDtype("int", 1) + result = SparseDtype(dtype, fill_value=2) + expected = SparseDtype("int", 2) + assert result == expected + + +@pytest.mark.parametrize( + "dtype, fill_value", + [ + ("int", None), + ("float", None), + ("bool", None), + ("object", None), + ("datetime64[ns]", None), + ("timedelta64[ns]", None), + ("int", np.nan), + ("float", 0), + ], +) +def test_equal(dtype, fill_value): + a = SparseDtype(dtype, fill_value) + b = SparseDtype(dtype, fill_value) + assert a == b + assert b == a + + +def test_nans_equal(): + a = SparseDtype(float, float("nan")) + b = SparseDtype(float, np.nan) + assert a == b + assert b == a + + +with warnings.catch_warnings(): + msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated" + warnings.filterwarnings("ignore", msg, category=FutureWarning) + + tups = [ + (SparseDtype("float64"), SparseDtype("float32")), + (SparseDtype("float64"), SparseDtype("float64", 0)), + (SparseDtype("float64"), SparseDtype("datetime64[ns]", np.nan)), + (SparseDtype(int, pd.NaT), SparseDtype(float, pd.NaT)), + (SparseDtype("float64"), np.dtype("float64")), + ] + + +@pytest.mark.parametrize( + "a, b", + tups, +) +def test_not_equal(a, b): + assert a != b + + +def test_construct_from_string_raises(): + with pytest.raises( + TypeError, match="Cannot construct a 'SparseDtype' from 'not a dtype'" + ): + SparseDtype.construct_from_string("not a dtype") + + +@pytest.mark.parametrize( + "dtype, expected", + [ + (SparseDtype(int), True), + (SparseDtype(float), True), + (SparseDtype(bool), True), + (SparseDtype(object), False), + (SparseDtype(str), False), + ], +) +def test_is_numeric(dtype, expected): + assert dtype._is_numeric is expected + + +def test_str_uses_object(): + result = SparseDtype(str).subtype + assert result == np.dtype("object") + + +@pytest.mark.parametrize( + "string, expected", + [ + ("Sparse[float64]", SparseDtype(np.dtype("float64"))), + ("Sparse[float32]", SparseDtype(np.dtype("float32"))), + ("Sparse[int]", SparseDtype(np.dtype("int"))), + ("Sparse[str]", SparseDtype(np.dtype("str"))), + ("Sparse[datetime64[ns]]", SparseDtype(np.dtype("datetime64[ns]"))), + ("Sparse", SparseDtype(np.dtype("float"), np.nan)), + ], +) +def test_construct_from_string(string, expected): + result = SparseDtype.construct_from_string(string) + assert result == expected + + +@pytest.mark.parametrize( + "a, b, expected", + [ + (SparseDtype(float, 0.0), SparseDtype(np.dtype("float"), 0.0), True), + (SparseDtype(int, 0), SparseDtype(int, 0), True), + (SparseDtype(float, float("nan")), SparseDtype(float, np.nan), True), + (SparseDtype(float, 0), SparseDtype(float, np.nan), False), + (SparseDtype(int, 0.0), SparseDtype(float, 0.0), False), + ], +) +def test_hash_equal(a, b, expected): + result = a == b + assert result is expected + + result = hash(a) == hash(b) + assert result is expected + + +@pytest.mark.parametrize( + "string, expected", + [ + ("Sparse[int]", "int"), + ("Sparse[int, 0]", "int"), + ("Sparse[int64]", "int64"), + ("Sparse[int64, 0]", "int64"), + ("Sparse[datetime64[ns], 0]", "datetime64[ns]"), + ], +) +def test_parse_subtype(string, expected): + subtype, _ = SparseDtype._parse_subtype(string) + assert subtype == expected + + +@pytest.mark.parametrize( + "string", ["Sparse[int, 1]", "Sparse[float, 0.0]", "Sparse[bool, True]"] +) +def test_construct_from_string_fill_value_raises(string): + with pytest.raises(TypeError, match="fill_value in the string is not"): + SparseDtype.construct_from_string(string) + + +@pytest.mark.parametrize( + "original, dtype, expected", + [ + (SparseDtype(int, 0), float, SparseDtype(float, 0.0)), + (SparseDtype(int, 1), float, SparseDtype(float, 1.0)), + (SparseDtype(int, 1), str, SparseDtype(object, "1")), + (SparseDtype(float, 1.5), int, SparseDtype(int, 1)), + ], +) +def test_update_dtype(original, dtype, expected): + result = original.update_dtype(dtype) + assert result == expected + + +@pytest.mark.parametrize( + "original, dtype, expected_error_msg", + [ + ( + SparseDtype(float, np.nan), + int, + re.escape("Cannot convert non-finite values (NA or inf) to integer"), + ), + ( + SparseDtype(str, "abc"), + int, + r"invalid literal for int\(\) with base 10: ('abc'|np\.str_\('abc'\))", + ), + ], +) +def test_update_dtype_raises(original, dtype, expected_error_msg): + with pytest.raises(ValueError, match=expected_error_msg): + original.update_dtype(dtype) + + +def test_repr(): + # GH-34352 + result = str(SparseDtype("int64", fill_value=0)) + expected = "Sparse[int64, 0]" + assert result == expected + + result = str(SparseDtype(object, fill_value="0")) + expected = "Sparse[object, '0']" + assert result == expected + + +def test_sparse_dtype_subtype_must_be_numpy_dtype(): + # GH#53160 + msg = "SparseDtype subtype must be a numpy dtype" + with pytest.raises(TypeError, match=msg): + SparseDtype("category", fill_value="c") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_indexing.py new file mode 100644 index 00000000..d63d0fb0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_indexing.py @@ -0,0 +1,295 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture +def arr_data(): + return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + + +@pytest.fixture +def arr(arr_data): + return SparseArray(arr_data) + + +class TestGetitem: + def test_getitem(self, arr): + dense = arr.to_dense() + for i, value in enumerate(arr): + tm.assert_almost_equal(value, dense[i]) + tm.assert_almost_equal(arr[-i], dense[-i]) + + def test_getitem_arraylike_mask(self, arr): + arr = SparseArray([0, 1, 2]) + result = arr[[True, False, True]] + expected = SparseArray([0, 2]) + tm.assert_sp_array_equal(result, expected) + + @pytest.mark.parametrize( + "slc", + [ + np.s_[:], + np.s_[1:10], + np.s_[1:100], + np.s_[10:1], + np.s_[:-3], + np.s_[-5:-4], + np.s_[:-12], + np.s_[-12:], + np.s_[2:], + np.s_[2::3], + np.s_[::2], + np.s_[::-1], + np.s_[::-2], + np.s_[1:6:2], + np.s_[:-6:-2], + ], + ) + @pytest.mark.parametrize( + "as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []] + ) + def test_getslice(self, slc, as_dense): + as_dense = np.array(as_dense) + arr = SparseArray(as_dense) + + result = arr[slc] + expected = SparseArray(as_dense[slc]) + + tm.assert_sp_array_equal(result, expected) + + def test_getslice_tuple(self): + dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0]) + + sparse = SparseArray(dense) + res = sparse[(slice(4, None),)] + exp = SparseArray(dense[4:]) + tm.assert_sp_array_equal(res, exp) + + sparse = SparseArray(dense, fill_value=0) + res = sparse[(slice(4, None),)] + exp = SparseArray(dense[4:], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + msg = "too many indices for array" + with pytest.raises(IndexError, match=msg): + sparse[4:, :] + + with pytest.raises(IndexError, match=msg): + # check numpy compat + dense[4:, :] + + def test_boolean_slice_empty(self): + arr = SparseArray([0, 1, 2]) + res = arr[[False, False, False]] + assert res.dtype == arr.dtype + + def test_getitem_bool_sparse_array(self, arr): + # GH 23122 + spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True) + exp = SparseArray([np.nan, 2, np.nan, 5, 6]) + tm.assert_sp_array_equal(arr[spar_bool], exp) + + spar_bool = ~spar_bool + res = arr[spar_bool] + exp = SparseArray([np.nan, 1, 3, 4, np.nan]) + tm.assert_sp_array_equal(res, exp) + + spar_bool = SparseArray( + [False, True, np.nan] * 3, dtype=np.bool_, fill_value=np.nan + ) + res = arr[spar_bool] + exp = SparseArray([np.nan, 3, 5]) + tm.assert_sp_array_equal(res, exp) + + def test_getitem_bool_sparse_array_as_comparison(self): + # GH 45110 + arr = SparseArray([1, 2, 3, 4, np.nan, np.nan], fill_value=np.nan) + res = arr[arr > 2] + exp = SparseArray([3.0, 4.0], fill_value=np.nan) + tm.assert_sp_array_equal(res, exp) + + def test_get_item(self, arr): + zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + + assert np.isnan(arr[1]) + assert arr[2] == 1 + assert arr[7] == 5 + + assert zarr[0] == 0 + assert zarr[2] == 1 + assert zarr[7] == 5 + + errmsg = "must be an integer between -10 and 10" + + with pytest.raises(IndexError, match=errmsg): + arr[11] + + with pytest.raises(IndexError, match=errmsg): + arr[-11] + + assert arr[-1] == arr[len(arr) - 1] + + +class TestSetitem: + def test_set_item(self, arr_data): + arr = SparseArray(arr_data).copy() + + def setitem(): + arr[5] = 3 + + def setslice(): + arr[1:5] = 2 + + with pytest.raises(TypeError, match="assignment via setitem"): + setitem() + + with pytest.raises(TypeError, match="assignment via setitem"): + setslice() + + +class TestTake: + def test_take_scalar_raises(self, arr): + msg = "'indices' must be an array, not a scalar '2'." + with pytest.raises(ValueError, match=msg): + arr.take(2) + + def test_take(self, arr_data, arr): + exp = SparseArray(np.take(arr_data, [2, 3])) + tm.assert_sp_array_equal(arr.take([2, 3]), exp) + + exp = SparseArray(np.take(arr_data, [0, 1, 2])) + tm.assert_sp_array_equal(arr.take([0, 1, 2]), exp) + + def test_take_all_empty(self): + a = pd.array([0, 0], dtype=SparseDtype("int64")) + result = a.take([0, 1], allow_fill=True, fill_value=np.nan) + tm.assert_sp_array_equal(a, result) + + def test_take_fill_value(self): + data = np.array([1, np.nan, 0, 3, 0]) + sparse = SparseArray(data, fill_value=0) + + exp = SparseArray(np.take(data, [0]), fill_value=0) + tm.assert_sp_array_equal(sparse.take([0]), exp) + + exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0) + tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp) + + def test_take_negative(self, arr_data, arr): + exp = SparseArray(np.take(arr_data, [-1])) + tm.assert_sp_array_equal(arr.take([-1]), exp) + + exp = SparseArray(np.take(arr_data, [-4, -3, -2])) + tm.assert_sp_array_equal(arr.take([-4, -3, -2]), exp) + + def test_bad_take(self, arr): + with pytest.raises(IndexError, match="bounds"): + arr.take([11]) + + def test_take_filling(self): + # similar tests as GH 12631 + sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4]) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([np.nan, np.nan, 4]) + tm.assert_sp_array_equal(result, expected) + + # TODO: actionable? + # XXX: test change: fill_value=True -> allow_fill=True + result = sparse.take(np.array([1, 0, -1]), allow_fill=True) + expected = SparseArray([np.nan, np.nan, np.nan]) + tm.assert_sp_array_equal(result, expected) + + # allow_fill=False + result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = SparseArray([np.nan, np.nan, 4]) + tm.assert_sp_array_equal(result, expected) + + msg = "Invalid value in 'indices'" + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -2]), allow_fill=True) + + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -5]), allow_fill=True) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), allow_fill=True) + + def test_take_filling_fill_value(self): + # same tests as GH#12631 + sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([0, np.nan, 4], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + # fill_value + result = sparse.take(np.array([1, 0, -1]), allow_fill=True) + # TODO: actionable? + # XXX: behavior change. + # the old way of filling self.fill_value doesn't follow EA rules. + # It's supposed to be self.dtype.na_value (nan in this case) + expected = SparseArray([0, np.nan, np.nan], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + # allow_fill=False + result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = SparseArray([0, np.nan, 4], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + msg = "Invalid value in 'indices'." + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -2]), allow_fill=True) + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -5]), allow_fill=True) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), fill_value=True) + + @pytest.mark.parametrize("kind", ["block", "integer"]) + def test_take_filling_all_nan(self, kind): + sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan], kind=kind) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) + tm.assert_sp_array_equal(result, expected) + + result = sparse.take(np.array([1, 0, -1]), fill_value=True) + expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) + tm.assert_sp_array_equal(result, expected) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), fill_value=True) + + +class TestWhere: + def test_where_retain_fill_value(self): + # GH#45691 don't lose fill_value on _where + arr = SparseArray([np.nan, 1.0], fill_value=0) + + mask = np.array([True, False]) + + res = arr._where(~mask, 1) + exp = SparseArray([1, 1.0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + ser = pd.Series(arr) + res = ser.where(~mask, 1) + tm.assert_series_equal(res, pd.Series(exp)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_libsparse.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_libsparse.py new file mode 100644 index 00000000..7a77a206 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_libsparse.py @@ -0,0 +1,551 @@ +import operator + +import numpy as np +import pytest + +import pandas._libs.sparse as splib +import pandas.util._test_decorators as td + +from pandas import Series +import pandas._testing as tm +from pandas.core.arrays.sparse import ( + BlockIndex, + IntIndex, + make_sparse_index, +) + + +@pytest.fixture +def test_length(): + return 20 + + +@pytest.fixture( + params=[ + [ + [0, 7, 15], + [3, 5, 5], + [2, 9, 14], + [2, 3, 5], + [2, 9, 15], + [1, 3, 4], + ], + [ + [0, 5], + [4, 4], + [1], + [4], + [1], + [3], + ], + [ + [0], + [10], + [0, 5], + [3, 7], + [0, 5], + [3, 5], + ], + [ + [10], + [5], + [0, 12], + [5, 3], + [12], + [3], + ], + [ + [0, 10], + [4, 6], + [5, 17], + [4, 2], + [], + [], + ], + [ + [0], + [5], + [], + [], + [], + [], + ], + ], + ids=[ + "plain_case", + "delete_blocks", + "split_blocks", + "skip_block", + "no_intersect", + "one_empty", + ], +) +def cases(request): + return request.param + + +class TestSparseIndexUnion: + @pytest.mark.parametrize( + "xloc, xlen, yloc, ylen, eloc, elen", + [ + [[0], [5], [5], [4], [0], [9]], + [[0, 10], [5, 5], [2, 17], [5, 2], [0, 10, 17], [7, 5, 2]], + [[1], [5], [3], [5], [1], [7]], + [[2, 10], [4, 4], [4], [8], [2], [12]], + [[0, 5], [3, 5], [0], [7], [0], [10]], + [[2, 10], [4, 4], [4, 13], [8, 4], [2], [15]], + [[2], [15], [4, 9, 14], [3, 2, 2], [2], [15]], + [[0, 10], [3, 3], [5, 15], [2, 2], [0, 5, 10, 15], [3, 2, 3, 2]], + ], + ) + def test_index_make_union(self, xloc, xlen, yloc, ylen, eloc, elen, test_length): + # Case 1 + # x: ---- + # y: ---- + # r: -------- + # Case 2 + # x: ----- ----- + # y: ----- -- + # Case 3 + # x: ------ + # y: ------- + # r: ---------- + # Case 4 + # x: ------ ----- + # y: ------- + # r: ------------- + # Case 5 + # x: --- ----- + # y: ------- + # r: ------------- + # Case 6 + # x: ------ ----- + # y: ------- --- + # r: ------------- + # Case 7 + # x: ---------------------- + # y: ---- ---- --- + # r: ---------------------- + # Case 8 + # x: ---- --- + # y: --- --- + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + bresult = xindex.make_union(yindex) + assert isinstance(bresult, BlockIndex) + tm.assert_numpy_array_equal(bresult.blocs, np.array(eloc, dtype=np.int32)) + tm.assert_numpy_array_equal(bresult.blengths, np.array(elen, dtype=np.int32)) + + ixindex = xindex.to_int_index() + iyindex = yindex.to_int_index() + iresult = ixindex.make_union(iyindex) + assert isinstance(iresult, IntIndex) + tm.assert_numpy_array_equal(iresult.indices, bresult.to_int_index().indices) + + def test_int_index_make_union(self): + a = IntIndex(5, np.array([0, 3, 4], dtype=np.int32)) + b = IntIndex(5, np.array([0, 2], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 2, 3, 4], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([], dtype=np.int32)) + b = IntIndex(5, np.array([0, 2], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 2], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([], dtype=np.int32)) + b = IntIndex(5, np.array([], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) + b = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 1, 2, 3, 4], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([0, 1], dtype=np.int32)) + b = IntIndex(4, np.array([0, 1], dtype=np.int32)) + + msg = "Indices must reference same underlying length" + with pytest.raises(ValueError, match=msg): + a.make_union(b) + + +class TestSparseIndexIntersect: + @td.skip_if_windows + def test_intersect(self, cases, test_length): + xloc, xlen, yloc, ylen, eloc, elen = cases + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + expected = BlockIndex(test_length, eloc, elen) + longer_index = BlockIndex(test_length + 1, yloc, ylen) + + result = xindex.intersect(yindex) + assert result.equals(expected) + result = xindex.to_int_index().intersect(yindex.to_int_index()) + assert result.equals(expected.to_int_index()) + + msg = "Indices must reference same underlying length" + with pytest.raises(Exception, match=msg): + xindex.intersect(longer_index) + with pytest.raises(Exception, match=msg): + xindex.to_int_index().intersect(longer_index.to_int_index()) + + def test_intersect_empty(self): + xindex = IntIndex(4, np.array([], dtype=np.int32)) + yindex = IntIndex(4, np.array([2, 3], dtype=np.int32)) + assert xindex.intersect(yindex).equals(xindex) + assert yindex.intersect(xindex).equals(xindex) + + xindex = xindex.to_block_index() + yindex = yindex.to_block_index() + assert xindex.intersect(yindex).equals(xindex) + assert yindex.intersect(xindex).equals(xindex) + + @pytest.mark.parametrize( + "case", + [ + # Argument 2 to "IntIndex" has incompatible type "ndarray[Any, + # dtype[signedinteger[_32Bit]]]"; expected "Sequence[int]" + IntIndex(5, np.array([1, 2], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(5, np.array([0, 2, 4], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(0, np.array([], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(5, np.array([], dtype=np.int32)), # type: ignore[arg-type] + ], + ) + def test_intersect_identical(self, case): + assert case.intersect(case).equals(case) + case = case.to_block_index() + assert case.intersect(case).equals(case) + + +class TestSparseIndexCommon: + def test_int_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) + + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) + assert isinstance(idx, IntIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) + + def test_block_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 3 + tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_lookup(self, kind): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == -1 + assert idx.lookup(1) == -1 + assert idx.lookup(2) == 0 + assert idx.lookup(3) == 1 + assert idx.lookup(4) == -1 + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) + + for i in range(-1, 5): + assert idx.lookup(i) == -1 + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == 0 + assert idx.lookup(1) == 1 + assert idx.lookup(2) == 2 + assert idx.lookup(3) == 3 + assert idx.lookup(4) == -1 + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == 0 + assert idx.lookup(1) == -1 + assert idx.lookup(2) == 1 + assert idx.lookup(3) == 2 + assert idx.lookup(4) == -1 + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_lookup_array(self, kind): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + + res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) + exp = np.array([-1, -1, 0], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) + exp = np.array([-1, 0, -1, 1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([-1, 0, 2, 4], dtype=np.int32)) + exp = np.array([-1, -1, -1, -1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) + exp = np.array([-1, 0, 2], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) + exp = np.array([-1, 2, 1, 3], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([2, 1, 3, 0], dtype=np.int32)) + exp = np.array([1, -1, 2, 0], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([1, 4, 2, 5], dtype=np.int32)) + exp = np.array([-1, -1, 1, -1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + @pytest.mark.parametrize( + "idx, expected", + [ + [0, -1], + [5, 0], + [7, 2], + [8, -1], + [9, -1], + [10, -1], + [11, -1], + [12, 3], + [17, 8], + [18, -1], + ], + ) + def test_lookup_basics(self, idx, expected): + bindex = BlockIndex(20, [5, 12], [3, 6]) + assert bindex.lookup(idx) == expected + + iindex = bindex.to_int_index() + assert iindex.lookup(idx) == expected + + +class TestBlockIndex: + def test_block_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 3 + tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) + + @pytest.mark.parametrize("i", [5, 10, 100, 101]) + def test_make_block_boundary(self, i): + idx = make_sparse_index(i, np.arange(0, i, 2, dtype=np.int32), kind="block") + + exp = np.arange(0, i, 2, dtype=np.int32) + tm.assert_numpy_array_equal(idx.blocs, exp) + tm.assert_numpy_array_equal(idx.blengths, np.ones(len(exp), dtype=np.int32)) + + def test_equals(self): + index = BlockIndex(10, [0, 4], [2, 5]) + + assert index.equals(index) + assert not index.equals(BlockIndex(10, [0, 4], [2, 6])) + + def test_check_integrity(self): + locs = [] + lengths = [] + + # 0-length OK + BlockIndex(0, locs, lengths) + + # also OK even though empty + BlockIndex(1, locs, lengths) + + msg = "Block 0 extends beyond end" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [5], [10]) + + msg = "Block 0 overlaps" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [2, 5], [5, 3]) + + def test_to_int_index(self): + locs = [0, 10] + lengths = [4, 6] + exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15] + + block = BlockIndex(20, locs, lengths) + dense = block.to_int_index() + + tm.assert_numpy_array_equal(dense.indices, np.array(exp_inds, dtype=np.int32)) + + def test_to_block_index(self): + index = BlockIndex(10, [0, 5], [4, 5]) + assert index.to_block_index() is index + + +class TestIntIndex: + def test_check_integrity(self): + # Too many indices than specified in self.length + msg = "Too many indices" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=1, indices=[1, 2, 3]) + + # No index can be negative. + msg = "No index can be less than zero" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, -2, 3]) + + # No index can be negative. + msg = "No index can be less than zero" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, -2, 3]) + + # All indices must be less than the length. + msg = "All indices must be less than the length" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 2, 5]) + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 2, 6]) + + # Indices must be strictly ascending. + msg = "Indices must be strictly increasing" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 3, 2]) + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 3, 3]) + + def test_int_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) + + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) + assert isinstance(idx, IntIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) + + def test_equals(self): + index = IntIndex(10, [0, 1, 2, 3, 4]) + assert index.equals(index) + assert not index.equals(IntIndex(10, [0, 1, 2, 3])) + + def test_to_block_index(self, cases, test_length): + xloc, xlen, yloc, ylen, _, _ = cases + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + + # see if survive the round trip + xbindex = xindex.to_int_index().to_block_index() + ybindex = yindex.to_int_index().to_block_index() + assert isinstance(xbindex, BlockIndex) + assert xbindex.equals(xindex) + assert ybindex.equals(yindex) + + def test_to_int_index(self): + index = IntIndex(10, [2, 3, 4, 5, 6]) + assert index.to_int_index() is index + + +class TestSparseOperators: + @pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"]) + def test_op(self, opname, cases, test_length): + xloc, xlen, yloc, ylen, _, _ = cases + sparse_op = getattr(splib, f"sparse_{opname}_float64") + python_op = getattr(operator, opname) + + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + + xdindex = xindex.to_int_index() + ydindex = yindex.to_int_index() + + x = np.arange(xindex.npoints) * 10.0 + 1 + y = np.arange(yindex.npoints) * 100.0 + 1 + + xfill = 0 + yfill = 2 + + result_block_vals, rb_index, bfill = sparse_op( + x, xindex, xfill, y, yindex, yfill + ) + result_int_vals, ri_index, ifill = sparse_op( + x, xdindex, xfill, y, ydindex, yfill + ) + + assert rb_index.to_int_index().equals(ri_index) + tm.assert_numpy_array_equal(result_block_vals, result_int_vals) + assert bfill == ifill + + # check versus Series... + xseries = Series(x, xdindex.indices) + xseries = xseries.reindex(np.arange(test_length)).fillna(xfill) + + yseries = Series(y, ydindex.indices) + yseries = yseries.reindex(np.arange(test_length)).fillna(yfill) + + series_result = python_op(xseries, yseries) + series_result = series_result.reindex(ri_index.indices) + + tm.assert_numpy_array_equal(result_block_vals, series_result.values) + tm.assert_numpy_array_equal(result_int_vals, series_result.values) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_reductions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_reductions.py new file mode 100644 index 00000000..f44423d5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_reductions.py @@ -0,0 +1,306 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + SparseDtype, + Timestamp, + isna, +) +from pandas.core.arrays.sparse import SparseArray + + +class TestReductions: + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([True, True, True], True, False), + ([1, 2, 1], 1, 0), + ([1.0, 2.0, 1.0], 1.0, 0.0), + ], + ) + def test_all(self, data, pos, neg): + # GH#17570 + out = SparseArray(data).all() + assert out + + out = SparseArray(data, fill_value=pos).all() + assert out + + data[1] = neg + out = SparseArray(data).all() + assert not out + + out = SparseArray(data, fill_value=pos).all() + assert not out + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([True, True, True], True, False), + ([1, 2, 1], 1, 0), + ([1.0, 2.0, 1.0], 1.0, 0.0), + ], + ) + def test_numpy_all(self, data, pos, neg): + # GH#17570 + out = np.all(SparseArray(data)) + assert out + + out = np.all(SparseArray(data, fill_value=pos)) + assert out + + data[1] = neg + out = np.all(SparseArray(data)) + assert not out + + out = np.all(SparseArray(data, fill_value=pos)) + assert not out + + # raises with a different message on py2. + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.all(SparseArray(data), out=np.array([])) + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([False, True, False], True, False), + ([0, 2, 0], 2, 0), + ([0.0, 2.0, 0.0], 2.0, 0.0), + ], + ) + def test_any(self, data, pos, neg): + # GH#17570 + out = SparseArray(data).any() + assert out + + out = SparseArray(data, fill_value=pos).any() + assert out + + data[1] = neg + out = SparseArray(data).any() + assert not out + + out = SparseArray(data, fill_value=pos).any() + assert not out + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([False, True, False], True, False), + ([0, 2, 0], 2, 0), + ([0.0, 2.0, 0.0], 2.0, 0.0), + ], + ) + def test_numpy_any(self, data, pos, neg): + # GH#17570 + out = np.any(SparseArray(data)) + assert out + + out = np.any(SparseArray(data, fill_value=pos)) + assert out + + data[1] = neg + out = np.any(SparseArray(data)) + assert not out + + out = np.any(SparseArray(data, fill_value=pos)) + assert not out + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.any(SparseArray(data), out=out) + + def test_sum(self): + data = np.arange(10).astype(float) + out = SparseArray(data).sum() + assert out == 45.0 + + data[5] = np.nan + out = SparseArray(data, fill_value=2).sum() + assert out == 40.0 + + out = SparseArray(data, fill_value=np.nan).sum() + assert out == 40.0 + + @pytest.mark.parametrize( + "arr", + [np.array([0, 1, np.nan, 1]), np.array([0, 1, 1])], + ) + @pytest.mark.parametrize("fill_value", [0, 1, np.nan]) + @pytest.mark.parametrize("min_count, expected", [(3, 2), (4, np.nan)]) + def test_sum_min_count(self, arr, fill_value, min_count, expected): + # GH#25777 + sparray = SparseArray(arr, fill_value=fill_value) + result = sparray.sum(min_count=min_count) + if np.isnan(expected): + assert np.isnan(result) + else: + assert result == expected + + def test_bool_sum_min_count(self): + spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True) + res = spar_bool.sum(min_count=1) + assert res == 5 + res = spar_bool.sum(min_count=11) + assert isna(res) + + def test_numpy_sum(self): + data = np.arange(10).astype(float) + out = np.sum(SparseArray(data)) + assert out == 45.0 + + data[5] = np.nan + out = np.sum(SparseArray(data, fill_value=2)) + assert out == 40.0 + + out = np.sum(SparseArray(data, fill_value=np.nan)) + assert out == 40.0 + + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.sum(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.sum(SparseArray(data), out=out) + + def test_mean(self): + data = np.arange(10).astype(float) + out = SparseArray(data).mean() + assert out == 4.5 + + data[5] = np.nan + out = SparseArray(data).mean() + assert out == 40.0 / 9 + + def test_numpy_mean(self): + data = np.arange(10).astype(float) + out = np.mean(SparseArray(data)) + assert out == 4.5 + + data[5] = np.nan + out = np.mean(SparseArray(data)) + assert out == 40.0 / 9 + + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.mean(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.mean(SparseArray(data), out=out) + + +class TestMinMax: + @pytest.mark.parametrize( + "raw_data,max_expected,min_expected", + [ + (np.arange(5.0), [4], [0]), + (-np.arange(5.0), [0], [-4]), + (np.array([0, 1, 2, np.nan, 4]), [4], [0]), + (np.array([np.nan] * 5), [np.nan], [np.nan]), + (np.array([]), [np.nan], [np.nan]), + ], + ) + def test_nan_fill_value(self, raw_data, max_expected, min_expected): + arr = SparseArray(raw_data) + max_result = arr.max() + min_result = arr.min() + assert max_result in max_expected + assert min_result in min_expected + + max_result = arr.max(skipna=False) + min_result = arr.min(skipna=False) + if np.isnan(raw_data).any(): + assert np.isnan(max_result) + assert np.isnan(min_result) + else: + assert max_result in max_expected + assert min_result in min_expected + + @pytest.mark.parametrize( + "fill_value,max_expected,min_expected", + [ + (100, 100, 0), + (-100, 1, -100), + ], + ) + def test_fill_value(self, fill_value, max_expected, min_expected): + arr = SparseArray( + np.array([fill_value, 0, 1]), dtype=SparseDtype("int", fill_value) + ) + max_result = arr.max() + assert max_result == max_expected + + min_result = arr.min() + assert min_result == min_expected + + def test_only_fill_value(self): + fv = 100 + arr = SparseArray(np.array([fv, fv, fv]), dtype=SparseDtype("int", fv)) + assert len(arr._valid_sp_values) == 0 + + assert arr.max() == fv + assert arr.min() == fv + assert arr.max(skipna=False) == fv + assert arr.min(skipna=False) == fv + + @pytest.mark.parametrize("func", ["min", "max"]) + @pytest.mark.parametrize("data", [np.array([]), np.array([np.nan, np.nan])]) + @pytest.mark.parametrize( + "dtype,expected", + [ + (SparseDtype(np.float64, np.nan), np.nan), + (SparseDtype(np.float64, 5.0), np.nan), + (SparseDtype("datetime64[ns]", NaT), NaT), + (SparseDtype("datetime64[ns]", Timestamp("2018-05-05")), NaT), + ], + ) + def test_na_value_if_no_valid_values(self, func, data, dtype, expected): + arr = SparseArray(data, dtype=dtype) + result = getattr(arr, func)() + if expected is NaT: + # TODO: pin down whether we wrap datetime64("NaT") + assert result is NaT or np.isnat(result) + else: + assert np.isnan(result) + + +class TestArgmaxArgmin: + @pytest.mark.parametrize( + "arr,argmax_expected,argmin_expected", + [ + (SparseArray([1, 2, 0, 1, 2]), 1, 2), + (SparseArray([-1, -2, 0, -1, -2]), 2, 1), + (SparseArray([np.nan, 1, 0, 0, np.nan, -1]), 1, 5), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2]), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=-1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=0), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=2), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=3), 5, 2), + (SparseArray([0] * 10 + [-1], fill_value=0), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=-1), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=1), 0, 10), + (SparseArray([-1] + [0] * 10, fill_value=0), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=0), 0, 1), + (SparseArray([-1] + [0] * 10, fill_value=-1), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=1), 0, 1), + ], + ) + def test_argmax_argmin(self, arr, argmax_expected, argmin_expected): + argmax_result = arr.argmax() + argmin_result = arr.argmin() + assert argmax_result == argmax_expected + assert argmin_result == argmin_expected + + @pytest.mark.parametrize( + "arr,method", + [(SparseArray([]), "argmax"), (SparseArray([]), "argmin")], + ) + def test_empty_array(self, arr, method): + msg = f"attempt to get {method} of an empty sequence" + with pytest.raises(ValueError, match=msg): + arr.argmax() if method == "argmax" else arr.argmin() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_unary.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_unary.py new file mode 100644 index 00000000..c00a7377 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/sparse/test_unary.py @@ -0,0 +1,79 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import SparseArray + + +@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning") +@pytest.mark.parametrize("fill_value", [0, np.nan]) +@pytest.mark.parametrize("op", [operator.pos, operator.neg]) +def test_unary_op(op, fill_value): + arr = np.array([0, 1, np.nan, 2]) + sparray = SparseArray(arr, fill_value=fill_value) + result = op(sparray) + expected = SparseArray(op(arr), fill_value=op(fill_value)) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("fill_value", [True, False]) +def test_invert(fill_value): + arr = np.array([True, False, False, True]) + sparray = SparseArray(arr, fill_value=fill_value) + result = ~sparray + expected = SparseArray(~arr, fill_value=not fill_value) + tm.assert_sp_array_equal(result, expected) + + result = ~pd.Series(sparray) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + result = ~pd.DataFrame({"A": sparray}) + expected = pd.DataFrame({"A": expected}) + tm.assert_frame_equal(result, expected) + + +class TestUnaryMethods: + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in cast:RuntimeWarning" + ) + def test_neg_operator(self): + arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + res = -arr + exp = SparseArray([1, 2, np.nan, -3], fill_value=np.nan, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8) + res = -arr + exp = SparseArray([1, 2, -1, -3], fill_value=1, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in cast:RuntimeWarning" + ) + def test_abs_operator(self): + arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + res = abs(arr) + exp = SparseArray([1, 2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8) + res = abs(arr) + exp = SparseArray([1, 2, 1, 3], fill_value=1, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + def test_invert_operator(self): + arr = SparseArray([False, True, False, True], fill_value=False, dtype=np.bool_) + exp = SparseArray( + np.invert([False, True, False, True]), fill_value=True, dtype=np.bool_ + ) + res = ~arr + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([0, 1, 0, 2, 3, 0], fill_value=0, dtype=np.int32) + res = ~arr + exp = SparseArray([-1, -2, -1, -3, -4, -1], fill_value=-1, dtype=np.int32) + tm.assert_sp_array_equal(exp, res) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string.py new file mode 100644 index 00000000..b9c59018 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string.py @@ -0,0 +1,666 @@ +""" +This module tests the functionality of StringArray and ArrowStringArray. +Tests for the str accessors are in pandas/tests/strings/test_string_array.py +""" +import operator + +import numpy as np +import pytest + +from pandas.compat.pyarrow import pa_version_under12p0 + +from pandas.core.dtypes.common import is_dtype_equal + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) + + +def na_val(dtype): + if dtype.storage == "pyarrow_numpy": + return np.nan + else: + return pd.NA + + +@pytest.fixture +def dtype(string_storage): + """Fixture giving StringDtype from parametrized 'string_storage'""" + return pd.StringDtype(storage=string_storage) + + +@pytest.fixture +def cls(dtype): + """Fixture giving array type from parametrized 'dtype'""" + return dtype.construct_array_type() + + +def test_repr(dtype): + df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype=dtype)}) + if dtype.storage == "pyarrow_numpy": + expected = " A\n0 a\n1 NaN\n2 b" + else: + expected = " A\n0 a\n1 \n2 b" + assert repr(df) == expected + + if dtype.storage == "pyarrow_numpy": + expected = "0 a\n1 NaN\n2 b\nName: A, dtype: string" + else: + expected = "0 a\n1 \n2 b\nName: A, dtype: string" + assert repr(df.A) == expected + + if dtype.storage == "pyarrow": + arr_name = "ArrowStringArray" + expected = f"<{arr_name}>\n['a', , 'b']\nLength: 3, dtype: string" + elif dtype.storage == "pyarrow_numpy": + arr_name = "ArrowStringArrayNumpySemantics" + expected = f"<{arr_name}>\n['a', nan, 'b']\nLength: 3, dtype: string" + else: + arr_name = "StringArray" + expected = f"<{arr_name}>\n['a', , 'b']\nLength: 3, dtype: string" + assert repr(df.A.array) == expected + + +def test_none_to_nan(cls): + a = cls._from_sequence(["a", None, "b"]) + assert a[1] is not None + assert a[1] is na_val(a.dtype) + + +def test_setitem_validates(cls): + arr = cls._from_sequence(["a", "b"]) + + if cls is pd.arrays.StringArray: + msg = "Cannot set non-string value '10' into a StringArray." + else: + msg = "Scalar must be NA or str" + with pytest.raises(TypeError, match=msg): + arr[0] = 10 + + if cls is pd.arrays.StringArray: + msg = "Must provide strings." + else: + msg = "Scalar must be NA or str" + with pytest.raises(TypeError, match=msg): + arr[:] = np.array([1, 2]) + + +def test_setitem_with_scalar_string(dtype): + # is_float_dtype considers some strings, like 'd', to be floats + # which can cause issues. + arr = pd.array(["a", "c"], dtype=dtype) + arr[0] = "d" + expected = pd.array(["d", "c"], dtype=dtype) + tm.assert_extension_array_equal(arr, expected) + + +def test_setitem_with_array_with_missing(dtype): + # ensure that when setting with an array of values, we don't mutate the + # array `value` in __setitem__(self, key, value) + arr = pd.array(["a", "b", "c"], dtype=dtype) + value = np.array(["A", None]) + value_orig = value.copy() + arr[[0, 1]] = value + + expected = pd.array(["A", pd.NA, "c"], dtype=dtype) + tm.assert_extension_array_equal(arr, expected) + tm.assert_numpy_array_equal(value, value_orig) + + +def test_astype_roundtrip(dtype): + ser = pd.Series(pd.date_range("2000", periods=12)) + ser[0] = None + + casted = ser.astype(dtype) + assert is_dtype_equal(casted.dtype, dtype) + + result = casted.astype("datetime64[ns]") + tm.assert_series_equal(result, ser) + + +def test_add(dtype): + a = pd.Series(["a", "b", "c", None, None], dtype=dtype) + b = pd.Series(["x", "y", None, "z", None], dtype=dtype) + + result = a + b + expected = pd.Series(["ax", "by", None, None, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = a.add(b) + tm.assert_series_equal(result, expected) + + result = a.radd(b) + expected = pd.Series(["xa", "yb", None, None, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = a.add(b, fill_value="-") + expected = pd.Series(["ax", "by", "c-", "-z", None], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_add_2d(dtype, request, arrow_string_storage): + if dtype.storage in arrow_string_storage: + reason = "Failed: DID NOT RAISE " + mark = pytest.mark.xfail(raises=None, reason=reason) + request.node.add_marker(mark) + + a = pd.array(["a", "b", "c"], dtype=dtype) + b = np.array([["a", "b", "c"]], dtype=object) + with pytest.raises(ValueError, match="3 != 1"): + a + b + + s = pd.Series(a) + with pytest.raises(ValueError, match="3 != 1"): + s + b + + +def test_add_sequence(dtype): + a = pd.array(["a", "b", None, None], dtype=dtype) + other = ["x", None, "y", None] + + result = a + other + expected = pd.array(["ax", None, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = other + a + expected = pd.array(["xa", None, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_mul(dtype, request, arrow_string_storage): + if dtype.storage in arrow_string_storage: + reason = "unsupported operand type(s) for *: 'ArrowStringArray' and 'int'" + mark = pytest.mark.xfail(raises=NotImplementedError, reason=reason) + request.node.add_marker(mark) + + a = pd.array(["a", "b", None], dtype=dtype) + result = a * 2 + expected = pd.array(["aa", "bb", None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = 2 * a + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.xfail(reason="GH-28527") +def test_add_strings(dtype): + arr = pd.array(["a", "b", "c", "d"], dtype=dtype) + df = pd.DataFrame([["t", "y", "v", "w"]]) + assert arr.__add__(df) is NotImplemented + + result = arr + df + expected = pd.DataFrame([["at", "by", "cv", "dw"]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + result = df + arr + expected = pd.DataFrame([["ta", "yb", "vc", "wd"]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.xfail(reason="GH-28527") +def test_add_frame(dtype): + arr = pd.array(["a", "b", np.nan, np.nan], dtype=dtype) + df = pd.DataFrame([["x", np.nan, "y", np.nan]]) + + assert arr.__add__(df) is NotImplemented + + result = arr + df + expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + result = df + arr + expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + +def test_comparison_methods_scalar(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + a = pd.array(["a", None, "c"], dtype=dtype) + other = "a" + result = getattr(a, op_name)(other) + if dtype.storage == "pyarrow_numpy": + expected = np.array([getattr(item, op_name)(other) for item in a]) + if comparison_op == operator.ne: + expected[1] = True + else: + expected[1] = False + tm.assert_numpy_array_equal(result, expected.astype(np.bool_)) + else: + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = np.array([getattr(item, op_name)(other) for item in a], dtype=object) + expected = pd.array(expected, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_comparison_methods_scalar_pd_na(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + a = pd.array(["a", None, "c"], dtype=dtype) + result = getattr(a, op_name)(pd.NA) + + if dtype.storage == "pyarrow_numpy": + if operator.ne == comparison_op: + expected = np.array([True, True, True]) + else: + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + else: + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = pd.array([None, None, None], dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) + + +def test_comparison_methods_scalar_not_string(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + + a = pd.array(["a", None, "c"], dtype=dtype) + other = 42 + + if op_name not in ["__eq__", "__ne__"]: + with pytest.raises(TypeError, match="not supported between"): + getattr(a, op_name)(other) + + return + + result = getattr(a, op_name)(other) + + if dtype.storage == "pyarrow_numpy": + expected_data = { + "__eq__": [False, False, False], + "__ne__": [True, True, True], + }[op_name] + expected = np.array(expected_data) + tm.assert_numpy_array_equal(result, expected) + else: + expected_data = {"__eq__": [False, None, False], "__ne__": [True, None, True]}[ + op_name + ] + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = pd.array(expected_data, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_comparison_methods_array(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + + a = pd.array(["a", None, "c"], dtype=dtype) + other = [None, None, "c"] + result = getattr(a, op_name)(other) + if dtype.storage == "pyarrow_numpy": + if operator.ne == comparison_op: + expected = np.array([True, True, False]) + else: + expected = np.array([False, False, False]) + expected[-1] = getattr(other[-1], op_name)(a[-1]) + tm.assert_numpy_array_equal(result, expected) + + result = getattr(a, op_name)(pd.NA) + if operator.ne == comparison_op: + expected = np.array([True, True, True]) + else: + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + else: + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = np.full(len(a), fill_value=None, dtype="object") + expected[-1] = getattr(other[-1], op_name)(a[-1]) + expected = pd.array(expected, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + result = getattr(a, op_name)(pd.NA) + expected = pd.array([None, None, None], dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_constructor_raises(cls): + if cls is pd.arrays.StringArray: + msg = "StringArray requires a sequence of strings or pandas.NA" + else: + msg = "Unsupported type '' for ArrowExtensionArray" + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", "b"], dtype="S1")) + + with pytest.raises(ValueError, match=msg): + cls(np.array([])) + + if cls is pd.arrays.StringArray: + # GH#45057 np.nan and None do NOT raise, as they are considered valid NAs + # for string dtype + cls(np.array(["a", np.nan], dtype=object)) + cls(np.array(["a", None], dtype=object)) + else: + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", np.nan], dtype=object)) + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", None], dtype=object)) + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", pd.NaT], dtype=object)) + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", np.datetime64("NaT", "ns")], dtype=object)) + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", np.timedelta64("NaT", "ns")], dtype=object)) + + +@pytest.mark.parametrize("na", [np.nan, np.float64("nan"), float("nan"), None, pd.NA]) +def test_constructor_nan_like(na): + expected = pd.arrays.StringArray(np.array(["a", pd.NA])) + tm.assert_extension_array_equal( + pd.arrays.StringArray(np.array(["a", na], dtype="object")), expected + ) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_from_sequence_no_mutate(copy, cls, request): + nan_arr = np.array(["a", np.nan], dtype=object) + expected_input = nan_arr.copy() + na_arr = np.array(["a", pd.NA], dtype=object) + + result = cls._from_sequence(nan_arr, copy=copy) + + if cls in (ArrowStringArray, ArrowStringArrayNumpySemantics): + import pyarrow as pa + + expected = cls(pa.array(na_arr, type=pa.string(), from_pandas=True)) + else: + expected = cls(na_arr) + + tm.assert_extension_array_equal(result, expected) + tm.assert_numpy_array_equal(nan_arr, expected_input) + + +def test_astype_int(dtype): + arr = pd.array(["1", "2", "3"], dtype=dtype) + result = arr.astype("int64") + expected = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + arr = pd.array(["1", pd.NA, "3"], dtype=dtype) + msg = r"int\(\) argument must be a string, a bytes-like object or a( real)? number" + with pytest.raises(TypeError, match=msg): + arr.astype("int64") + + +def test_astype_nullable_int(dtype): + arr = pd.array(["1", pd.NA, "3"], dtype=dtype) + + result = arr.astype("Int64") + expected = pd.array([1, pd.NA, 3], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_float(dtype, any_float_dtype): + # Don't compare arrays (37974) + ser = pd.Series(["1.1", pd.NA, "3.3"], dtype=dtype) + result = ser.astype(any_float_dtype) + expected = pd.Series([1.1, np.nan, 3.3], dtype=any_float_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.xfail(reason="Not implemented StringArray.sum") +def test_reduce(skipna, dtype): + arr = pd.Series(["a", "b", "c"], dtype=dtype) + result = arr.sum(skipna=skipna) + assert result == "abc" + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.xfail(reason="Not implemented StringArray.sum") +def test_reduce_missing(skipna, dtype): + arr = pd.Series([None, "a", None, "b", "c", None], dtype=dtype) + result = arr.sum(skipna=skipna) + if skipna: + assert result == "abc" + else: + assert pd.isna(result) + + +@pytest.mark.parametrize("method", ["min", "max"]) +@pytest.mark.parametrize("skipna", [True, False]) +def test_min_max(method, skipna, dtype, request): + arr = pd.Series(["a", "b", "c", None], dtype=dtype) + result = getattr(arr, method)(skipna=skipna) + if skipna: + expected = "a" if method == "min" else "c" + assert result == expected + else: + assert result is na_val(arr.dtype) + + +@pytest.mark.parametrize("method", ["min", "max"]) +@pytest.mark.parametrize("box", [pd.Series, pd.array]) +def test_min_max_numpy(method, box, dtype, request, arrow_string_storage): + if dtype.storage in arrow_string_storage and box is pd.array: + if box is pd.array: + reason = "'<=' not supported between instances of 'str' and 'NoneType'" + else: + reason = "'ArrowStringArray' object has no attribute 'max'" + mark = pytest.mark.xfail(raises=TypeError, reason=reason) + request.node.add_marker(mark) + + arr = box(["a", "b", "c", None], dtype=dtype) + result = getattr(np, method)(arr) + expected = "a" if method == "min" else "c" + assert result == expected + + +def test_fillna_args(dtype, request, arrow_string_storage): + # GH 37987 + + arr = pd.array(["a", pd.NA], dtype=dtype) + + res = arr.fillna(value="b") + expected = pd.array(["a", "b"], dtype=dtype) + tm.assert_extension_array_equal(res, expected) + + res = arr.fillna(value=np.str_("b")) + expected = pd.array(["a", "b"], dtype=dtype) + tm.assert_extension_array_equal(res, expected) + + if dtype.storage in arrow_string_storage: + msg = "Invalid value '1' for dtype string" + else: + msg = "Cannot set non-string value '1' into a StringArray." + with pytest.raises(TypeError, match=msg): + arr.fillna(value=1) + + +def test_arrow_array(dtype): + # protocol added in 0.15.0 + pa = pytest.importorskip("pyarrow") + + data = pd.array(["a", "b", "c"], dtype=dtype) + arr = pa.array(data) + expected = pa.array(list(data), type=pa.string(), from_pandas=True) + if dtype.storage in ("pyarrow", "pyarrow_numpy") and pa_version_under12p0: + expected = pa.chunked_array(expected) + + assert arr.equals(expected) + + +def test_arrow_roundtrip(dtype, string_storage2): + # roundtrip possible from arrow 1.0.0 + pa = pytest.importorskip("pyarrow") + + data = pd.array(["a", "b", None], dtype=dtype) + df = pd.DataFrame({"a": data}) + table = pa.table(df) + assert table.field("a").type == "string" + with pd.option_context("string_storage", string_storage2): + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.StringDtype) + expected = df.astype(f"string[{string_storage2}]") + tm.assert_frame_equal(result, expected) + # ensure the missing value is represented by NA and not np.nan or None + assert result.loc[2, "a"] is na_val(result["a"].dtype) + + +def test_arrow_load_from_zero_chunks(dtype, string_storage2): + # GH-41040 + pa = pytest.importorskip("pyarrow") + + data = pd.array([], dtype=dtype) + df = pd.DataFrame({"a": data}) + table = pa.table(df) + assert table.field("a").type == "string" + # Instantiate the same table with no chunks at all + table = pa.table([pa.chunked_array([], type=pa.string())], schema=table.schema) + with pd.option_context("string_storage", string_storage2): + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.StringDtype) + expected = df.astype(f"string[{string_storage2}]") + tm.assert_frame_equal(result, expected) + + +def test_value_counts_na(dtype): + if getattr(dtype, "storage", "") == "pyarrow": + exp_dtype = "int64[pyarrow]" + elif getattr(dtype, "storage", "") == "pyarrow_numpy": + exp_dtype = "int64" + else: + exp_dtype = "Int64" + arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype) + result = arr.value_counts(dropna=False) + expected = pd.Series([2, 1, 1], index=arr[[0, 1, 3]], dtype=exp_dtype, name="count") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=arr[:2], dtype=exp_dtype, name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(dtype): + if getattr(dtype, "storage", "") == "pyarrow": + exp_dtype = "double[pyarrow]" + elif getattr(dtype, "storage", "") == "pyarrow_numpy": + exp_dtype = np.float64 + else: + exp_dtype = "Float64" + ser = pd.Series(["a", "b", "a", pd.NA], dtype=dtype) + result = ser.value_counts(normalize=True) + expected = pd.Series([2, 1], index=ser[:2], dtype=exp_dtype, name="proportion") / 3 + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "values, expected", + [ + (["a", "b", "c"], np.array([False, False, False])), + (["a", "b", None], np.array([False, False, True])), + ], +) +def test_use_inf_as_na(values, expected, dtype): + # https://github.com/pandas-dev/pandas/issues/33655 + values = pd.array(values, dtype=dtype) + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = values.isna() + tm.assert_numpy_array_equal(result, expected) + + result = pd.Series(values).isna() + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + result = pd.DataFrame(values).isna() + expected = pd.DataFrame(expected) + tm.assert_frame_equal(result, expected) + + +def test_memory_usage(dtype, arrow_string_storage): + # GH 33963 + + if dtype.storage in arrow_string_storage: + pytest.skip(f"not applicable for {dtype.storage}") + + series = pd.Series(["a", "b", "c"], dtype=dtype) + + assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True) + + +@pytest.mark.parametrize("float_dtype", [np.float16, np.float32, np.float64]) +def test_astype_from_float_dtype(float_dtype, dtype): + # https://github.com/pandas-dev/pandas/issues/36451 + ser = pd.Series([0.1], dtype=float_dtype) + result = ser.astype(dtype) + expected = pd.Series(["0.1"], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_to_numpy_returns_pdna_default(dtype): + arr = pd.array(["a", pd.NA, "b"], dtype=dtype) + result = np.array(arr) + expected = np.array(["a", na_val(dtype), "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_na_value(dtype, nulls_fixture): + na_value = nulls_fixture + arr = pd.array(["a", pd.NA, "b"], dtype=dtype) + result = arr.to_numpy(na_value=na_value) + expected = np.array(["a", na_value, "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin(dtype, fixed_now_ts): + s = pd.Series(["a", "b", None], dtype=dtype) + + result = s.isin(["a", "c"]) + expected = pd.Series([True, False, False]) + tm.assert_series_equal(result, expected) + + result = s.isin(["a", pd.NA]) + expected = pd.Series([True, False, True]) + tm.assert_series_equal(result, expected) + + result = s.isin([]) + expected = pd.Series([False, False, False]) + tm.assert_series_equal(result, expected) + + result = s.isin(["a", fixed_now_ts]) + expected = pd.Series([True, False, False]) + tm.assert_series_equal(result, expected) + + +def test_setitem_scalar_with_mask_validation(dtype): + # https://github.com/pandas-dev/pandas/issues/47628 + # setting None with a boolean mask (through _putmaks) should still result + # in pd.NA values in the underlying array + ser = pd.Series(["a", "b", "c"], dtype=dtype) + mask = np.array([False, True, False]) + + ser[mask] = None + assert ser.array[1] is na_val(ser.dtype) + + # for other non-string we should also raise an error + ser = pd.Series(["a", "b", "c"], dtype=dtype) + if type(ser.array) is pd.arrays.StringArray: + msg = "Cannot set non-string value" + else: + msg = "Scalar must be NA or str" + with pytest.raises(TypeError, match=msg): + ser[mask] = 1 + + +def test_from_numpy_str(dtype): + vals = ["a", "b", "c"] + arr = np.array(vals, dtype=np.str_) + result = pd.array(arr, dtype=dtype) + expected = pd.array(vals, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_tolist(dtype): + vals = ["a", "b", "c"] + arr = pd.array(vals, dtype=dtype) + result = arr.tolist() + expected = vals + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string_arrow.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string_arrow.py new file mode 100644 index 00000000..c1d424f1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/string_/test_string_arrow.py @@ -0,0 +1,266 @@ +import pickle +import re + +import numpy as np +import pytest + +from pandas.compat import pa_version_under7p0 + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.string_ import ( + StringArray, + StringDtype, +) +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) + +skip_if_no_pyarrow = pytest.mark.skipif( + pa_version_under7p0, + reason="pyarrow>=7.0.0 is required for PyArrow backed StringArray", +) + + +@skip_if_no_pyarrow +def test_eq_all_na(): + a = pd.array([pd.NA, pd.NA], dtype=StringDtype("pyarrow")) + result = a == a + expected = pd.array([pd.NA, pd.NA], dtype="boolean[pyarrow]") + tm.assert_extension_array_equal(result, expected) + + +def test_config(string_storage): + with pd.option_context("string_storage", string_storage): + assert StringDtype().storage == string_storage + result = pd.array(["a", "b"]) + assert result.dtype.storage == string_storage + + expected = ( + StringDtype(string_storage).construct_array_type()._from_sequence(["a", "b"]) + ) + tm.assert_equal(result, expected) + + +def test_config_bad_storage_raises(): + msg = re.escape("Value must be one of python|pyarrow") + with pytest.raises(ValueError, match=msg): + pd.options.mode.string_storage = "foo" + + +@skip_if_no_pyarrow +@pytest.mark.parametrize("chunked", [True, False]) +@pytest.mark.parametrize("array", ["numpy", "pyarrow"]) +def test_constructor_not_string_type_raises(array, chunked, arrow_string_storage): + import pyarrow as pa + + array = pa if array in arrow_string_storage else np + + arr = array.array([1, 2, 3]) + if chunked: + if array is np: + pytest.skip("chunked not applicable to numpy array") + arr = pa.chunked_array(arr) + if array is np: + msg = "Unsupported type '' for ArrowExtensionArray" + else: + msg = re.escape( + "ArrowStringArray requires a PyArrow (chunked) array of string type" + ) + with pytest.raises(ValueError, match=msg): + ArrowStringArray(arr) + + +@pytest.mark.parametrize("chunked", [True, False]) +def test_constructor_not_string_type_value_dictionary_raises(chunked): + pa = pytest.importorskip("pyarrow") + + arr = pa.array([1, 2, 3], pa.dictionary(pa.int32(), pa.int32())) + if chunked: + arr = pa.chunked_array(arr) + + msg = re.escape( + "ArrowStringArray requires a PyArrow (chunked) array of string type" + ) + with pytest.raises(ValueError, match=msg): + ArrowStringArray(arr) + + +@pytest.mark.parametrize("chunked", [True, False]) +def test_constructor_valid_string_type_value_dictionary(chunked): + pa = pytest.importorskip("pyarrow") + + arr = pa.array(["1", "2", "3"], pa.dictionary(pa.int32(), pa.utf8())) + if chunked: + arr = pa.chunked_array(arr) + + arr = ArrowStringArray(arr) + assert pa.types.is_string(arr._pa_array.type.value_type) + + +def test_constructor_from_list(): + # GH#27673 + pytest.importorskip("pyarrow", minversion="1.0.0") + result = pd.Series(["E"], dtype=StringDtype(storage="pyarrow")) + assert isinstance(result.dtype, StringDtype) + assert result.dtype.storage == "pyarrow" + + +@skip_if_no_pyarrow +def test_from_sequence_wrong_dtype_raises(): + with pd.option_context("string_storage", "python"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string") + + with pd.option_context("string_storage", "pyarrow"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string") + + with pytest.raises(AssertionError, match=None): + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string[python]") + + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string[pyarrow]") + + with pytest.raises(AssertionError, match=None): + with pd.option_context("string_storage", "python"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + with pd.option_context("string_storage", "pyarrow"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + with pytest.raises(AssertionError, match=None): + ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype("python")) + + ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype("pyarrow")) + + with pd.option_context("string_storage", "python"): + StringArray._from_sequence(["a", None, "c"], dtype="string") + + with pd.option_context("string_storage", "pyarrow"): + StringArray._from_sequence(["a", None, "c"], dtype="string") + + StringArray._from_sequence(["a", None, "c"], dtype="string[python]") + + with pytest.raises(AssertionError, match=None): + StringArray._from_sequence(["a", None, "c"], dtype="string[pyarrow]") + + with pd.option_context("string_storage", "python"): + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + with pytest.raises(AssertionError, match=None): + with pd.option_context("string_storage", "pyarrow"): + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype("python")) + + with pytest.raises(AssertionError, match=None): + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype("pyarrow")) + + +@pytest.mark.skipif( + not pa_version_under7p0, + reason="pyarrow is installed", +) +def test_pyarrow_not_installed_raises(): + msg = re.escape("pyarrow>=7.0.0 is required for PyArrow backed") + + with pytest.raises(ImportError, match=msg): + StringDtype(storage="pyarrow") + + with pytest.raises(ImportError, match=msg): + ArrowStringArray([]) + + with pytest.raises(ImportError, match=msg): + ArrowStringArrayNumpySemantics([]) + + with pytest.raises(ImportError, match=msg): + ArrowStringArray._from_sequence(["a", None, "b"]) + + +@skip_if_no_pyarrow +@pytest.mark.parametrize("multiple_chunks", [False, True]) +@pytest.mark.parametrize( + "key, value, expected", + [ + (-1, "XX", ["a", "b", "c", "d", "XX"]), + (1, "XX", ["a", "XX", "c", "d", "e"]), + (1, None, ["a", None, "c", "d", "e"]), + (1, pd.NA, ["a", None, "c", "d", "e"]), + ([1, 3], "XX", ["a", "XX", "c", "XX", "e"]), + ([1, 3], ["XX", "YY"], ["a", "XX", "c", "YY", "e"]), + ([1, 3], ["XX", None], ["a", "XX", "c", None, "e"]), + ([1, 3], ["XX", pd.NA], ["a", "XX", "c", None, "e"]), + ([0, -1], ["XX", "YY"], ["XX", "b", "c", "d", "YY"]), + ([-1, 0], ["XX", "YY"], ["YY", "b", "c", "d", "XX"]), + (slice(3, None), "XX", ["a", "b", "c", "XX", "XX"]), + (slice(2, 4), ["XX", "YY"], ["a", "b", "XX", "YY", "e"]), + (slice(3, 1, -1), ["XX", "YY"], ["a", "b", "YY", "XX", "e"]), + (slice(None), "XX", ["XX", "XX", "XX", "XX", "XX"]), + ([False, True, False, True, False], ["XX", "YY"], ["a", "XX", "c", "YY", "e"]), + ], +) +def test_setitem(multiple_chunks, key, value, expected): + import pyarrow as pa + + result = pa.array(list("abcde")) + expected = pa.array(expected) + + if multiple_chunks: + result = pa.chunked_array([result[:3], result[3:]]) + expected = pa.chunked_array([expected[:3], expected[3:]]) + + result = ArrowStringArray(result) + expected = ArrowStringArray(expected) + + result[key] = value + tm.assert_equal(result, expected) + + +@skip_if_no_pyarrow +def test_setitem_invalid_indexer_raises(): + import pyarrow as pa + + arr = ArrowStringArray(pa.array(list("abcde"))) + + with pytest.raises(IndexError, match=None): + arr[5] = "foo" + + with pytest.raises(IndexError, match=None): + arr[-6] = "foo" + + with pytest.raises(IndexError, match=None): + arr[[0, 5]] = "foo" + + with pytest.raises(IndexError, match=None): + arr[[0, -6]] = "foo" + + with pytest.raises(IndexError, match=None): + arr[[True, True, False]] = "foo" + + with pytest.raises(ValueError, match=None): + arr[[0, 1]] = ["foo", "bar", "baz"] + + +@skip_if_no_pyarrow +@pytest.mark.parametrize("dtype", ["string[pyarrow]", "string[pyarrow_numpy]"]) +def test_pickle_roundtrip(dtype): + # GH 42600 + expected = pd.Series(range(10), dtype=dtype) + expected_sliced = expected.head(2) + full_pickled = pickle.dumps(expected) + sliced_pickled = pickle.dumps(expected_sliced) + + assert len(full_pickled) > len(sliced_pickled) + + result = pickle.loads(full_pickled) + tm.assert_series_equal(result, expected) + + result_sliced = pickle.loads(sliced_pickled) + tm.assert_series_equal(result_sliced, expected_sliced) + + +@skip_if_no_pyarrow +def test_string_dtype_error_message(): + # GH#55051 + msg = "Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'." + with pytest.raises(ValueError, match=msg): + StringDtype("bla") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_array.py new file mode 100644 index 00000000..2746cd91 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_array.py @@ -0,0 +1,446 @@ +import datetime +import decimal +import re + +import numpy as np +import pytest +import pytz + +import pandas as pd +import pandas._testing as tm +from pandas.api.extensions import register_extension_dtype +from pandas.arrays import ( + BooleanArray, + DatetimeArray, + FloatingArray, + IntegerArray, + IntervalArray, + SparseArray, + TimedeltaArray, +) +from pandas.core.arrays import ( + NumpyExtensionArray, + period_array, +) +from pandas.tests.extension.decimal import ( + DecimalArray, + DecimalDtype, + to_decimal, +) + + +@pytest.mark.parametrize("dtype_unit", ["M8[h]", "M8[m]", "m8[h]", "M8[m]"]) +def test_dt64_array(dtype_unit): + # PR 53817 + dtype_var = np.dtype(dtype_unit) + msg = ( + r"datetime64 and timedelta64 dtype resolutions other than " + r"'s', 'ms', 'us', and 'ns' are deprecated. " + r"In future releases passing unsupported resolutions will " + r"raise an exception." + ) + with tm.assert_produces_warning(FutureWarning, match=re.escape(msg)): + pd.array([], dtype=dtype_var) + + +@pytest.mark.parametrize( + "data, dtype, expected", + [ + # Basic NumPy defaults. + ([], None, FloatingArray._from_sequence([])), + ([1, 2], None, IntegerArray._from_sequence([1, 2])), + ([1, 2], object, NumpyExtensionArray(np.array([1, 2], dtype=object))), + ( + [1, 2], + np.dtype("float32"), + NumpyExtensionArray(np.array([1.0, 2.0], dtype=np.dtype("float32"))), + ), + ( + np.array([], dtype=object), + None, + NumpyExtensionArray(np.array([], dtype=object)), + ), + (np.array([1, 2], dtype="int64"), None, IntegerArray._from_sequence([1, 2])), + ( + np.array([1.0, 2.0], dtype="float64"), + None, + FloatingArray._from_sequence([1.0, 2.0]), + ), + # String alias passes through to NumPy + ([1, 2], "float32", NumpyExtensionArray(np.array([1, 2], dtype="float32"))), + ([1, 2], "int64", NumpyExtensionArray(np.array([1, 2], dtype=np.int64))), + # GH#44715 FloatingArray does not support float16, so fall + # back to NumpyExtensionArray + ( + np.array([1, 2], dtype=np.float16), + None, + NumpyExtensionArray(np.array([1, 2], dtype=np.float16)), + ), + # idempotency with e.g. pd.array(pd.array([1, 2], dtype="int64")) + ( + NumpyExtensionArray(np.array([1, 2], dtype=np.int32)), + None, + NumpyExtensionArray(np.array([1, 2], dtype=np.int32)), + ), + # Period alias + ( + [pd.Period("2000", "D"), pd.Period("2001", "D")], + "Period[D]", + period_array(["2000", "2001"], freq="D"), + ), + # Period dtype + ( + [pd.Period("2000", "D")], + pd.PeriodDtype("D"), + period_array(["2000"], freq="D"), + ), + # Datetime (naive) + ( + [1, 2], + np.dtype("datetime64[ns]"), + DatetimeArray._from_sequence(np.array([1, 2], dtype="datetime64[ns]")), + ), + ( + [1, 2], + np.dtype("datetime64[s]"), + DatetimeArray._from_sequence(np.array([1, 2], dtype="datetime64[s]")), + ), + ( + np.array([1, 2], dtype="datetime64[ns]"), + None, + DatetimeArray._from_sequence(np.array([1, 2], dtype="datetime64[ns]")), + ), + ( + pd.DatetimeIndex(["2000", "2001"]), + np.dtype("datetime64[ns]"), + DatetimeArray._from_sequence(["2000", "2001"]), + ), + ( + pd.DatetimeIndex(["2000", "2001"]), + None, + DatetimeArray._from_sequence(["2000", "2001"]), + ), + ( + ["2000", "2001"], + np.dtype("datetime64[ns]"), + DatetimeArray._from_sequence(["2000", "2001"]), + ), + # Datetime (tz-aware) + ( + ["2000", "2001"], + pd.DatetimeTZDtype(tz="CET"), + DatetimeArray._from_sequence( + ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET") + ), + ), + # Timedelta + ( + ["1H", "2H"], + np.dtype("timedelta64[ns]"), + TimedeltaArray._from_sequence(["1H", "2H"]), + ), + ( + pd.TimedeltaIndex(["1H", "2H"]), + np.dtype("timedelta64[ns]"), + TimedeltaArray._from_sequence(["1H", "2H"]), + ), + ( + np.array([1, 2], dtype="m8[s]"), + np.dtype("timedelta64[s]"), + TimedeltaArray._from_sequence(np.array([1, 2], dtype="m8[s]")), + ), + ( + pd.TimedeltaIndex(["1H", "2H"]), + None, + TimedeltaArray._from_sequence(["1H", "2H"]), + ), + ( + # preserve non-nano, i.e. don't cast to NumpyExtensionArray + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + None, + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + ), + ( + # preserve non-nano, i.e. don't cast to NumpyExtensionArray + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + np.dtype("m8[s]"), + TimedeltaArray._simple_new( + np.arange(5, dtype=np.int64).view("m8[s]"), dtype=np.dtype("m8[s]") + ), + ), + # Category + (["a", "b"], "category", pd.Categorical(["a", "b"])), + ( + ["a", "b"], + pd.CategoricalDtype(None, ordered=True), + pd.Categorical(["a", "b"], ordered=True), + ), + # Interval + ( + [pd.Interval(1, 2), pd.Interval(3, 4)], + "interval", + IntervalArray.from_tuples([(1, 2), (3, 4)]), + ), + # Sparse + ([0, 1], "Sparse[int64]", SparseArray([0, 1], dtype="int64")), + # IntegerNA + ([1, None], "Int16", pd.array([1, None], dtype="Int16")), + ( + pd.Series([1, 2]), + None, + NumpyExtensionArray(np.array([1, 2], dtype=np.int64)), + ), + # String + ( + ["a", None], + "string", + pd.StringDtype().construct_array_type()._from_sequence(["a", None]), + ), + ( + ["a", None], + pd.StringDtype(), + pd.StringDtype().construct_array_type()._from_sequence(["a", None]), + ), + # Boolean + ([True, None], "boolean", BooleanArray._from_sequence([True, None])), + ([True, None], pd.BooleanDtype(), BooleanArray._from_sequence([True, None])), + # Index + (pd.Index([1, 2]), None, NumpyExtensionArray(np.array([1, 2], dtype=np.int64))), + # Series[EA] returns the EA + ( + pd.Series(pd.Categorical(["a", "b"], categories=["a", "b", "c"])), + None, + pd.Categorical(["a", "b"], categories=["a", "b", "c"]), + ), + # "3rd party" EAs work + ([decimal.Decimal(0), decimal.Decimal(1)], "decimal", to_decimal([0, 1])), + # pass an ExtensionArray, but a different dtype + ( + period_array(["2000", "2001"], freq="D"), + "category", + pd.Categorical([pd.Period("2000", "D"), pd.Period("2001", "D")]), + ), + ], +) +def test_array(data, dtype, expected): + result = pd.array(data, dtype=dtype) + tm.assert_equal(result, expected) + + +def test_array_copy(): + a = np.array([1, 2]) + # default is to copy + b = pd.array(a, dtype=a.dtype) + assert not tm.shares_memory(a, b) + + # copy=True + b = pd.array(a, dtype=a.dtype, copy=True) + assert not tm.shares_memory(a, b) + + # copy=False + b = pd.array(a, dtype=a.dtype, copy=False) + assert tm.shares_memory(a, b) + + +cet = pytz.timezone("CET") + + +@pytest.mark.parametrize( + "data, expected", + [ + # period + ( + [pd.Period("2000", "D"), pd.Period("2001", "D")], + period_array(["2000", "2001"], freq="D"), + ), + # interval + ([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2])), + # datetime + ( + [pd.Timestamp("2000"), pd.Timestamp("2001")], + DatetimeArray._from_sequence(["2000", "2001"]), + ), + ( + [datetime.datetime(2000, 1, 1), datetime.datetime(2001, 1, 1)], + DatetimeArray._from_sequence(["2000", "2001"]), + ), + ( + np.array([1, 2], dtype="M8[ns]"), + DatetimeArray(np.array([1, 2], dtype="M8[ns]")), + ), + ( + np.array([1, 2], dtype="M8[us]"), + DatetimeArray._simple_new( + np.array([1, 2], dtype="M8[us]"), dtype=np.dtype("M8[us]") + ), + ), + # datetimetz + ( + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2001", tz="CET")], + DatetimeArray._from_sequence( + ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz="CET") + ), + ), + ( + [ + datetime.datetime(2000, 1, 1, tzinfo=cet), + datetime.datetime(2001, 1, 1, tzinfo=cet), + ], + DatetimeArray._from_sequence( + ["2000", "2001"], dtype=pd.DatetimeTZDtype(tz=cet) + ), + ), + # timedelta + ( + [pd.Timedelta("1H"), pd.Timedelta("2H")], + TimedeltaArray._from_sequence(["1H", "2H"]), + ), + ( + np.array([1, 2], dtype="m8[ns]"), + TimedeltaArray(np.array([1, 2], dtype="m8[ns]")), + ), + ( + np.array([1, 2], dtype="m8[us]"), + TimedeltaArray(np.array([1, 2], dtype="m8[us]")), + ), + # integer + ([1, 2], IntegerArray._from_sequence([1, 2])), + ([1, None], IntegerArray._from_sequence([1, None])), + ([1, pd.NA], IntegerArray._from_sequence([1, pd.NA])), + ([1, np.nan], IntegerArray._from_sequence([1, np.nan])), + # float + ([0.1, 0.2], FloatingArray._from_sequence([0.1, 0.2])), + ([0.1, None], FloatingArray._from_sequence([0.1, pd.NA])), + ([0.1, np.nan], FloatingArray._from_sequence([0.1, pd.NA])), + ([0.1, pd.NA], FloatingArray._from_sequence([0.1, pd.NA])), + # integer-like float + ([1.0, 2.0], FloatingArray._from_sequence([1.0, 2.0])), + ([1.0, None], FloatingArray._from_sequence([1.0, pd.NA])), + ([1.0, np.nan], FloatingArray._from_sequence([1.0, pd.NA])), + ([1.0, pd.NA], FloatingArray._from_sequence([1.0, pd.NA])), + # mixed-integer-float + ([1, 2.0], FloatingArray._from_sequence([1.0, 2.0])), + ([1, np.nan, 2.0], FloatingArray._from_sequence([1.0, None, 2.0])), + # string + ( + ["a", "b"], + pd.StringDtype().construct_array_type()._from_sequence(["a", "b"]), + ), + ( + ["a", None], + pd.StringDtype().construct_array_type()._from_sequence(["a", None]), + ), + # Boolean + ([True, False], BooleanArray._from_sequence([True, False])), + ([True, None], BooleanArray._from_sequence([True, None])), + ], +) +def test_array_inference(data, expected): + result = pd.array(data) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + # mix of frequencies + [pd.Period("2000", "D"), pd.Period("2001", "A")], + # mix of closed + [pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right")], + # Mix of timezones + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")], + # Mix of tz-aware and tz-naive + [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000")], + np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")]), + ], +) +def test_array_inference_fails(data): + result = pd.array(data) + expected = NumpyExtensionArray(np.array(data, dtype=object)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("data", [np.array(0)]) +def test_nd_raises(data): + with pytest.raises(ValueError, match="NumpyExtensionArray must be 1-dimensional"): + pd.array(data, dtype="int64") + + +def test_scalar_raises(): + with pytest.raises(ValueError, match="Cannot pass scalar '1'"): + pd.array(1) + + +def test_dataframe_raises(): + # GH#51167 don't accidentally cast to StringArray by doing inference on columns + df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + msg = "Cannot pass DataFrame to 'pandas.array'" + with pytest.raises(TypeError, match=msg): + pd.array(df) + + +def test_bounds_check(): + # GH21796 + with pytest.raises( + TypeError, match=r"cannot safely cast non-equivalent int(32|64) to uint16" + ): + pd.array([-1, 2, 3], dtype="UInt16") + + +# --------------------------------------------------------------------------- +# A couple dummy classes to ensure that Series and Indexes are unboxed before +# getting to the EA classes. + + +@register_extension_dtype +class DecimalDtype2(DecimalDtype): + name = "decimal2" + + @classmethod + def construct_array_type(cls): + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return DecimalArray2 + + +class DecimalArray2(DecimalArray): + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + if isinstance(scalars, (pd.Series, pd.Index)): + raise TypeError("scalars should not be of type pd.Series or pd.Index") + + return super()._from_sequence(scalars, dtype=dtype, copy=copy) + + +def test_array_unboxes(index_or_series): + box = index_or_series + + data = box([decimal.Decimal("1"), decimal.Decimal("2")]) + # make sure it works + with pytest.raises( + TypeError, match="scalars should not be of type pd.Series or pd.Index" + ): + DecimalArray2._from_sequence(data) + + result = pd.array(data, dtype="decimal2") + expected = DecimalArray2._from_sequence(data.values) + tm.assert_equal(result, expected) + + +def test_array_to_numpy_na(): + # GH#40638 + arr = pd.array([pd.NA, 1], dtype="string") + result = arr.to_numpy(na_value=True, dtype=bool) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimelike.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimelike.py new file mode 100644 index 00000000..96aab94b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimelike.py @@ -0,0 +1,1335 @@ +from __future__ import annotations + +import re +import warnings + +import numpy as np +import pytest + +from pandas._libs import ( + NaT, + OutOfBoundsDatetime, + Timestamp, +) + +import pandas as pd +from pandas import ( + DatetimeIndex, + Period, + PeriodIndex, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.datetimes import _sequence_to_dt64ns +from pandas.core.arrays.timedeltas import sequence_to_td64ns + + +# TODO: more freq variants +@pytest.fixture(params=["D", "B", "W", "M", "Q", "Y"]) +def freqstr(request): + """Fixture returning parametrized frequency in string format.""" + return request.param + + +@pytest.fixture +def period_index(freqstr): + """ + A fixture to provide PeriodIndex objects with different frequencies. + + Most PeriodArray behavior is already tested in PeriodIndex tests, + so here we just test that the PeriodArray behavior matches + the PeriodIndex behavior. + """ + # TODO: non-monotone indexes; NaTs, different start dates + with warnings.catch_warnings(): + # suppress deprecation of Period[B] + warnings.filterwarnings( + "ignore", message="Period with BDay freq", category=FutureWarning + ) + pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) + return pi + + +@pytest.fixture +def datetime_index(freqstr): + """ + A fixture to provide DatetimeIndex objects with different frequencies. + + Most DatetimeArray behavior is already tested in DatetimeIndex tests, + so here we just test that the DatetimeArray behavior matches + the DatetimeIndex behavior. + """ + # TODO: non-monotone indexes; NaTs, different start dates, timezones + dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) + return dti + + +@pytest.fixture +def timedelta_index(): + """ + A fixture to provide TimedeltaIndex objects with different frequencies. + Most TimedeltaArray behavior is already tested in TimedeltaIndex tests, + so here we just test that the TimedeltaArray behavior matches + the TimedeltaIndex behavior. + """ + # TODO: flesh this out + return TimedeltaIndex(["1 Day", "3 Hours", "NaT"]) + + +class SharedTests: + index_cls: type[DatetimeIndex | PeriodIndex | TimedeltaIndex] + + @pytest.fixture + def arr1d(self): + """Fixture returning DatetimeArray with daily frequency.""" + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = self.array_cls(data, freq="D") + return arr + + def test_compare_len1_raises(self, arr1d): + # make sure we raise when comparing with different lengths, specific + # to the case where one has length-1, which numpy would broadcast + arr = arr1d + idx = self.index_cls(arr) + + with pytest.raises(ValueError, match="Lengths must match"): + arr == arr[:1] + + # test the index classes while we're at it, GH#23078 + with pytest.raises(ValueError, match="Lengths must match"): + idx <= idx[[0]] + + @pytest.mark.parametrize( + "result", + [ + pd.date_range("2020", periods=3), + pd.date_range("2020", periods=3, tz="UTC"), + pd.timedelta_range("0 days", periods=3), + pd.period_range("2020Q1", periods=3, freq="Q"), + ], + ) + def test_compare_with_Categorical(self, result): + expected = pd.Categorical(result) + assert all(result == expected) + assert not any(result != expected) + + @pytest.mark.parametrize("reverse", [True, False]) + @pytest.mark.parametrize("as_index", [True, False]) + def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered): + other = pd.Categorical(arr1d, ordered=ordered) + if as_index: + other = pd.CategoricalIndex(other) + + left, right = arr1d, other + if reverse: + left, right = right, left + + ones = np.ones(arr1d.shape, dtype=bool) + zeros = ~ones + + result = left == right + tm.assert_numpy_array_equal(result, ones) + + result = left != right + tm.assert_numpy_array_equal(result, zeros) + + if not reverse and not as_index: + # Otherwise Categorical raises TypeError bc it is not ordered + # TODO: we should probably get the same behavior regardless? + result = left < right + tm.assert_numpy_array_equal(result, zeros) + + result = left <= right + tm.assert_numpy_array_equal(result, ones) + + result = left > right + tm.assert_numpy_array_equal(result, zeros) + + result = left >= right + tm.assert_numpy_array_equal(result, ones) + + def test_take(self): + data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9 + np.random.default_rng(2).shuffle(data) + + if self.array_cls is PeriodArray: + arr = PeriodArray(data, dtype="period[D]") + else: + arr = self.array_cls(data) + idx = self.index_cls._simple_new(arr) + + takers = [1, 4, 94] + result = arr.take(takers) + expected = idx.take(takers) + + tm.assert_index_equal(self.index_cls(result), expected) + + takers = np.array([1, 4, 94]) + result = arr.take(takers) + expected = idx.take(takers) + + tm.assert_index_equal(self.index_cls(result), expected) + + @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp(2021, 1, 1, 12).time]) + def test_take_fill_raises(self, fill_value, arr1d): + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + arr1d.take([0, 1], allow_fill=True, fill_value=fill_value) + + def test_take_fill(self, arr1d): + np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + + arr = arr1d # self.array_cls(data, freq="D") + + result = arr.take([-1, 1], allow_fill=True, fill_value=None) + assert result[0] is NaT + + result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan) + assert result[0] is NaT + + result = arr.take([-1, 1], allow_fill=True, fill_value=NaT) + assert result[0] is NaT + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_take_fill_str(self, arr1d): + # Cast str fill_value matching other fill_value-taking methods + result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1])) + expected = arr1d[[-1, 1]] + tm.assert_equal(result, expected) + + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + arr1d.take([-1, 1], allow_fill=True, fill_value="foo") + + def test_concat_same_type(self, arr1d): + arr = arr1d + idx = self.index_cls(arr) + idx = idx.insert(0, NaT) + arr = self.array_cls(idx) + + result = arr._concat_same_type([arr[:-1], arr[1:], arr]) + arr2 = arr.astype(object) + expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]), None) + + tm.assert_index_equal(self.index_cls(result), expected) + + def test_unbox_scalar(self, arr1d): + result = arr1d._unbox_scalar(arr1d[0]) + expected = arr1d._ndarray.dtype.type + assert isinstance(result, expected) + + result = arr1d._unbox_scalar(NaT) + assert isinstance(result, expected) + + msg = f"'value' should be a {self.scalar_type.__name__}." + with pytest.raises(ValueError, match=msg): + arr1d._unbox_scalar("foo") + + def test_check_compatible_with(self, arr1d): + arr1d._check_compatible_with(arr1d[0]) + arr1d._check_compatible_with(arr1d[:1]) + arr1d._check_compatible_with(NaT) + + def test_scalar_from_string(self, arr1d): + result = arr1d._scalar_from_string(str(arr1d[0])) + assert result == arr1d[0] + + def test_reduce_invalid(self, arr1d): + msg = "does not support reduction 'not a method'" + with pytest.raises(TypeError, match=msg): + arr1d._reduce("not a method") + + @pytest.mark.parametrize("method", ["pad", "backfill"]) + def test_fillna_method_doesnt_change_orig(self, method): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls(data) + arr[4] = NaT + + fill_value = arr[3] if method == "pad" else arr[5] + + result = arr._pad_or_backfill(method=method) + assert result[4] == fill_value + + # check that the original was not changed + assert arr[4] is NaT + + def test_searchsorted(self): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls(data) + + # scalar + result = arr.searchsorted(arr[1]) + assert result == 1 + + result = arr.searchsorted(arr[2], side="right") + assert result == 3 + + # own-type + result = arr.searchsorted(arr[1:3]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + result = arr.searchsorted(arr[1:3], side="right") + expected = np.array([2, 3], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + # GH#29884 match numpy convention on whether NaT goes + # at the end or the beginning + result = arr.searchsorted(NaT) + assert result == 10 + + @pytest.mark.parametrize("box", [None, "index", "series"]) + def test_searchsorted_castable_strings(self, arr1d, box, string_storage): + arr = arr1d + if box is None: + pass + elif box == "index": + # Test the equivalent Index.searchsorted method while we're here + arr = self.index_cls(arr) + else: + # Test the equivalent Series.searchsorted method while we're here + arr = pd.Series(arr) + + # scalar + result = arr.searchsorted(str(arr[1])) + assert result == 1 + + result = arr.searchsorted(str(arr[2]), side="right") + assert result == 3 + + result = arr.searchsorted([str(x) for x in arr[1:3]]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + "or array of those. Got 'str' instead." + ), + ): + arr.searchsorted("foo") + + if string_storage == "python": + arr_type = "StringArray" + elif string_storage == "pyarrow": + arr_type = "ArrowStringArray" + else: + arr_type = "ArrowStringArrayNumpySemantics" + + with pd.option_context("string_storage", string_storage): + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + f"or array of those. Got '{arr_type}' instead." + ), + ): + arr.searchsorted([str(arr[1]), "baz"]) + + def test_getitem_near_implementation_bounds(self): + # We only check tz-naive for DTA bc the bounds are slightly different + # for other tzs + i8vals = np.asarray([NaT._value + n for n in range(1, 5)], dtype="i8") + if self.array_cls is PeriodArray: + arr = self.array_cls(i8vals, dtype="period[ns]") + else: + arr = self.array_cls(i8vals, freq="ns") + arr[0] # should not raise OutOfBoundsDatetime + + index = pd.Index(arr) + index[0] # should not raise OutOfBoundsDatetime + + ser = pd.Series(arr) + ser[0] # should not raise OutOfBoundsDatetime + + def test_getitem_2d(self, arr1d): + # 2d slicing on a 1D array + expected = type(arr1d)(arr1d._ndarray[:, np.newaxis], dtype=arr1d.dtype) + result = arr1d[:, np.newaxis] + tm.assert_equal(result, expected) + + # Lookup on a 2D array + arr2d = expected + expected = type(arr2d)(arr2d._ndarray[:3, 0], dtype=arr2d.dtype) + result = arr2d[:3, 0] + tm.assert_equal(result, expected) + + # Scalar lookup + result = arr2d[-1, 0] + expected = arr1d[-1] + assert result == expected + + def test_iter_2d(self, arr1d): + data2d = arr1d._ndarray[:3, np.newaxis] + arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) + result = list(arr2d) + assert len(result) == 3 + for x in result: + assert isinstance(x, type(arr1d)) + assert x.ndim == 1 + assert x.dtype == arr1d.dtype + + def test_repr_2d(self, arr1d): + data2d = arr1d._ndarray[:3, np.newaxis] + arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) + + result = repr(arr2d) + + if isinstance(arr2d, TimedeltaArray): + expected = ( + f"<{type(arr2d).__name__}>\n" + "[\n" + f"['{arr1d[0]._repr_base()}'],\n" + f"['{arr1d[1]._repr_base()}'],\n" + f"['{arr1d[2]._repr_base()}']\n" + "]\n" + f"Shape: (3, 1), dtype: {arr1d.dtype}" + ) + else: + expected = ( + f"<{type(arr2d).__name__}>\n" + "[\n" + f"['{arr1d[0]}'],\n" + f"['{arr1d[1]}'],\n" + f"['{arr1d[2]}']\n" + "]\n" + f"Shape: (3, 1), dtype: {arr1d.dtype}" + ) + + assert result == expected + + def test_setitem(self): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls(data, freq="D") + + arr[0] = arr[1] + expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + expected[0] = expected[1] + + tm.assert_numpy_array_equal(arr.asi8, expected) + + arr[:2] = arr[-2:] + expected[:2] = expected[-2:] + tm.assert_numpy_array_equal(arr.asi8, expected) + + @pytest.mark.parametrize( + "box", + [ + pd.Index, + pd.Series, + np.array, + list, + NumpyExtensionArray, + ], + ) + def test_setitem_object_dtype(self, box, arr1d): + expected = arr1d.copy()[::-1] + if expected.dtype.kind in ["m", "M"]: + expected = expected._with_freq(None) + + vals = expected + if box is list: + vals = list(vals) + elif box is np.array: + # if we do np.array(x).astype(object) then dt64 and td64 cast to ints + vals = np.array(vals.astype(object)) + elif box is NumpyExtensionArray: + vals = box(np.asarray(vals, dtype=object)) + else: + vals = box(vals).astype(object) + + arr1d[:] = vals + + tm.assert_equal(arr1d, expected) + + def test_setitem_strs(self, arr1d): + # Check that we parse strs in both scalar and listlike + + # Setting list-like of strs + expected = arr1d.copy() + expected[[0, 1]] = arr1d[-2:] + + result = arr1d.copy() + result[:2] = [str(x) for x in arr1d[-2:]] + tm.assert_equal(result, expected) + + # Same thing but now for just a scalar str + expected = arr1d.copy() + expected[0] = arr1d[-1] + + result = arr1d.copy() + result[0] = str(arr1d[-1]) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("as_index", [True, False]) + def test_setitem_categorical(self, arr1d, as_index): + expected = arr1d.copy()[::-1] + if not isinstance(expected, PeriodArray): + expected = expected._with_freq(None) + + cat = pd.Categorical(arr1d) + if as_index: + cat = pd.CategoricalIndex(cat) + + arr1d[:] = cat[::-1] + + tm.assert_equal(arr1d, expected) + + def test_setitem_raises(self, arr1d): + arr = arr1d[:10] + val = arr[0] + + with pytest.raises(IndexError, match="index 12 is out of bounds"): + arr[12] = val + + with pytest.raises(TypeError, match="value should be a.* 'object'"): + arr[0] = object() + + msg = "cannot set using a list-like indexer with a different length" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[[]] = [arr[1]] + + msg = "cannot set using a slice indexer with a different length than" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[1:1] = arr[:3] + + @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series]) + def test_setitem_numeric_raises(self, arr1d, box): + # We dont case e.g. int64 to our own dtype for setitem + + msg = ( + f"value should be a '{arr1d._scalar_type.__name__}', " + "'NaT', or array of those. Got" + ) + with pytest.raises(TypeError, match=msg): + arr1d[:2] = box([0, 1]) + + with pytest.raises(TypeError, match=msg): + arr1d[:2] = box([0.0, 1.0]) + + def test_inplace_arithmetic(self): + # GH#24115 check that iadd and isub are actually in-place + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls(data, freq="D") + + expected = arr + pd.Timedelta(days=1) + arr += pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + + expected = arr - pd.Timedelta(days=1) + arr -= pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + + def test_shift_fill_int_deprecated(self, arr1d): + # GH#31971, enforced in 2.0 + with pytest.raises(TypeError, match="value should be a"): + arr1d.shift(1, fill_value=1) + + def test_median(self, arr1d): + arr = arr1d + if len(arr) % 2 == 0: + # make it easier to define `expected` + arr = arr[:-1] + + expected = arr[len(arr) // 2] + + result = arr.median() + assert type(result) is type(expected) + assert result == expected + + arr[len(arr) // 2] = NaT + if not isinstance(expected, Period): + expected = arr[len(arr) // 2 - 1 : len(arr) // 2 + 2].mean() + + assert arr.median(skipna=False) is NaT + + result = arr.median() + assert type(result) is type(expected) + assert result == expected + + assert arr[:0].median() is NaT + assert arr[:0].median(skipna=False) is NaT + + # 2d Case + arr2 = arr.reshape(-1, 1) + + result = arr2.median(axis=None) + assert type(result) is type(expected) + assert result == expected + + assert arr2.median(axis=None, skipna=False) is NaT + + result = arr2.median(axis=0) + expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype) + tm.assert_equal(result, expected2) + + result = arr2.median(axis=0, skipna=False) + expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype) + tm.assert_equal(result, expected2) + + result = arr2.median(axis=1) + tm.assert_equal(result, arr) + + result = arr2.median(axis=1, skipna=False) + tm.assert_equal(result, arr) + + def test_from_integer_array(self): + arr = np.array([1, 2, 3], dtype=np.int64) + expected = self.array_cls(arr, dtype=self.example_dtype) + + data = pd.array(arr, dtype="Int64") + result = self.array_cls(data, dtype=self.example_dtype) + + tm.assert_extension_array_equal(result, expected) + + +class TestDatetimeArray(SharedTests): + index_cls = DatetimeIndex + array_cls = DatetimeArray + scalar_type = Timestamp + example_dtype = "M8[ns]" + + @pytest.fixture + def arr1d(self, tz_naive_fixture, freqstr): + """ + Fixture returning DatetimeArray with parametrized frequency and + timezones + """ + tz = tz_naive_fixture + dti = pd.date_range("2016-01-01 01:01:00", periods=5, freq=freqstr, tz=tz) + dta = dti._data + return dta + + def test_round(self, arr1d): + # GH#24064 + dti = self.index_cls(arr1d) + + result = dti.round(freq="2T") + expected = dti - pd.Timedelta(minutes=1) + expected = expected._with_freq(None) + tm.assert_index_equal(result, expected) + + dta = dti._data + result = dta.round(freq="2T") + expected = expected._data._with_freq(None) + tm.assert_datetime_array_equal(result, expected) + + def test_array_interface(self, datetime_index): + arr = DatetimeArray(datetime_index) + + # default asarray gives the same underlying data (for tz naive) + result = np.asarray(arr) + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, copy=False) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + + # specifying M8[ns] gives the same result as default + result = np.asarray(arr, dtype="datetime64[ns]") + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="datetime64[ns]", copy=False) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="datetime64[ns]") + assert result is not expected + tm.assert_numpy_array_equal(result, expected) + + # to object dtype + result = np.asarray(arr, dtype=object) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to other dtype always copies + result = np.asarray(arr, dtype="int64") + assert result is not arr.asi8 + assert not np.may_share_memory(arr, result) + expected = arr.asi8.copy() + tm.assert_numpy_array_equal(result, expected) + + # other dtypes handled by numpy + for dtype in ["float64", str]: + result = np.asarray(arr, dtype=dtype) + expected = np.asarray(arr).astype(dtype) + tm.assert_numpy_array_equal(result, expected) + + def test_array_object_dtype(self, arr1d): + # GH#23524 + arr = arr1d + dti = self.index_cls(arr1d) + + expected = np.array(list(dti)) + + result = np.array(arr, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # also test the DatetimeIndex method while we're at it + result = np.array(dti, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_array_tz(self, arr1d): + # GH#23524 + arr = arr1d + dti = self.index_cls(arr1d) + + expected = dti.asi8.view("M8[ns]") + result = np.array(arr, dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + result = np.array(arr, dtype="datetime64[ns]") + tm.assert_numpy_array_equal(result, expected) + + # check that we are not making copies when setting copy=False + result = np.array(arr, dtype="M8[ns]", copy=False) + assert result.base is expected.base + assert result.base is not None + result = np.array(arr, dtype="datetime64[ns]", copy=False) + assert result.base is expected.base + assert result.base is not None + + def test_array_i8_dtype(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + + expected = dti.asi8 + result = np.array(arr, dtype="i8") + tm.assert_numpy_array_equal(result, expected) + + result = np.array(arr, dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + # check that we are still making copies when setting copy=False + result = np.array(arr, dtype="i8", copy=False) + assert result.base is not expected.base + assert result.base is None + + def test_from_array_keeps_base(self): + # Ensure that DatetimeArray._ndarray.base isn't lost. + arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") + dta = DatetimeArray(arr) + + assert dta._ndarray is arr + dta = DatetimeArray(arr[:0]) + assert dta._ndarray.base is arr + + def test_from_dti(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + assert list(dti) == list(arr) + + # Check that Index.__new__ knows what to do with DatetimeArray + dti2 = pd.Index(arr) + assert isinstance(dti2, DatetimeIndex) + assert list(dti2) == list(arr) + + def test_astype_object(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(dti) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_to_period(self, datetime_index, freqstr): + dti = datetime_index + arr = DatetimeArray(dti) + + expected = dti.to_period(freq=freqstr) + result = arr.to_period(freq=freqstr) + assert isinstance(result, PeriodArray) + + tm.assert_equal(result, expected._data) + + def test_to_period_2d(self, arr1d): + arr2d = arr1d.reshape(1, -1) + + warn = None if arr1d.tz is None else UserWarning + with tm.assert_produces_warning(warn): + result = arr2d.to_period("D") + expected = arr1d.to_period("D").reshape(1, -1) + tm.assert_period_array_equal(result, expected) + + @pytest.mark.parametrize("propname", DatetimeArray._bool_ops) + def test_bool_properties(self, arr1d, propname): + # in this case _bool_ops is just `is_leap_year` + dti = self.index_cls(arr1d) + arr = arr1d + assert dti.freq == arr.freq + + result = getattr(arr, propname) + expected = np.array(getattr(dti, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("propname", DatetimeArray._field_ops) + def test_int_properties(self, arr1d, propname): + dti = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(dti, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + def test_take_fill_valid(self, arr1d, fixed_now_ts): + arr = arr1d + dti = self.index_cls(arr1d) + + now = fixed_now_ts.tz_localize(dti.tz) + result = arr.take([-1, 1], allow_fill=True, fill_value=now) + assert result[0] == now + + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # fill_value Timedelta invalid + arr.take([-1, 1], allow_fill=True, fill_value=now - now) + + with pytest.raises(TypeError, match=msg): + # fill_value Period invalid + arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1")) + + tz = None if dti.tz is not None else "US/Eastern" + now = fixed_now_ts.tz_localize(tz) + msg = "Cannot compare tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + # Timestamp with mismatched tz-awareness + arr.take([-1, 1], allow_fill=True, fill_value=now) + + value = NaT._value + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # require NaT, not iNaT, as it could be confused with an integer + arr.take([-1, 1], allow_fill=True, fill_value=value) + + value = np.timedelta64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + if arr.tz is not None: + # GH#37356 + # Assuming here that arr1d fixture does not include Australia/Melbourne + value = fixed_now_ts.tz_localize("Australia/Melbourne") + result = arr.take([-1, 1], allow_fill=True, fill_value=value) + + expected = arr.take( + [-1, 1], + allow_fill=True, + fill_value=value.tz_convert(arr.dtype.tz), + ) + tm.assert_equal(result, expected) + + def test_concat_same_type_invalid(self, arr1d): + # different timezones + arr = arr1d + + if arr.tz is None: + other = arr.tz_localize("UTC") + else: + other = arr.tz_localize(None) + + with pytest.raises(ValueError, match="to_concat must have the same"): + arr._concat_same_type([arr, other]) + + def test_concat_same_type_different_freq(self): + # we *can* concatenate DTI with different freqs. + a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central")) + b = DatetimeArray(pd.date_range("2000", periods=2, freq="H", tz="US/Central")) + result = DatetimeArray._concat_same_type([a, b]) + expected = DatetimeArray( + pd.to_datetime( + [ + "2000-01-01 00:00:00", + "2000-01-02 00:00:00", + "2000-01-01 00:00:00", + "2000-01-01 01:00:00", + ] + ).tz_localize("US/Central") + ) + + tm.assert_datetime_array_equal(result, expected) + + def test_strftime(self, arr1d): + arr = arr1d + + result = arr.strftime("%Y %b") + expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_strftime_nat(self): + # GH 29578 + arr = DatetimeArray(DatetimeIndex(["2019-01-01", NaT])) + + result = arr.strftime("%Y-%m-%d") + expected = np.array(["2019-01-01", np.nan], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +class TestTimedeltaArray(SharedTests): + index_cls = TimedeltaIndex + array_cls = TimedeltaArray + scalar_type = pd.Timedelta + example_dtype = "m8[ns]" + + def test_from_tdi(self): + tdi = TimedeltaIndex(["1 Day", "3 Hours"]) + arr = TimedeltaArray(tdi) + assert list(arr) == list(tdi) + + # Check that Index.__new__ knows what to do with TimedeltaArray + tdi2 = pd.Index(arr) + assert isinstance(tdi2, TimedeltaIndex) + assert list(tdi2) == list(arr) + + def test_astype_object(self): + tdi = TimedeltaIndex(["1 Day", "3 Hours"]) + arr = TimedeltaArray(tdi) + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(tdi) + + def test_to_pytimedelta(self, timedelta_index): + tdi = timedelta_index + arr = TimedeltaArray(tdi) + + expected = tdi.to_pytimedelta() + result = arr.to_pytimedelta() + + tm.assert_numpy_array_equal(result, expected) + + def test_total_seconds(self, timedelta_index): + tdi = timedelta_index + arr = TimedeltaArray(tdi) + + expected = tdi.total_seconds() + result = arr.total_seconds() + + tm.assert_numpy_array_equal(result, expected.values) + + @pytest.mark.parametrize("propname", TimedeltaArray._field_ops) + def test_int_properties(self, timedelta_index, propname): + tdi = timedelta_index + arr = TimedeltaArray(tdi) + + result = getattr(arr, propname) + expected = np.array(getattr(tdi, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self, timedelta_index): + arr = TimedeltaArray(timedelta_index) + + # default asarray gives the same underlying data + result = np.asarray(arr) + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, copy=False) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + + # specifying m8[ns] gives the same result as default + result = np.asarray(arr, dtype="timedelta64[ns]") + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="timedelta64[ns]", copy=False) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="timedelta64[ns]") + assert result is not expected + tm.assert_numpy_array_equal(result, expected) + + # to object dtype + result = np.asarray(arr, dtype=object) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to other dtype always copies + result = np.asarray(arr, dtype="int64") + assert result is not arr.asi8 + assert not np.may_share_memory(arr, result) + expected = arr.asi8.copy() + tm.assert_numpy_array_equal(result, expected) + + # other dtypes handled by numpy + for dtype in ["float64", str]: + result = np.asarray(arr, dtype=dtype) + expected = np.asarray(arr).astype(dtype) + tm.assert_numpy_array_equal(result, expected) + + def test_take_fill_valid(self, timedelta_index, fixed_now_ts): + tdi = timedelta_index + arr = TimedeltaArray(tdi) + + td1 = pd.Timedelta(days=1) + result = arr.take([-1, 1], allow_fill=True, fill_value=td1) + assert result[0] == td1 + + value = fixed_now_ts + msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # fill_value Timestamp invalid + arr.take([0, 1], allow_fill=True, fill_value=value) + + value = fixed_now_ts.to_period("D") + with pytest.raises(TypeError, match=msg): + # fill_value Period invalid + arr.take([0, 1], allow_fill=True, fill_value=value) + + value = np.datetime64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + +@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning") +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +class TestPeriodArray(SharedTests): + index_cls = PeriodIndex + array_cls = PeriodArray + scalar_type = Period + example_dtype = PeriodIndex([], freq="W").dtype + + @pytest.fixture + def arr1d(self, period_index): + """ + Fixture returning DatetimeArray from parametrized PeriodIndex objects + """ + return period_index._data + + def test_from_pi(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + assert list(arr) == list(pi) + + # Check that Index.__new__ knows what to do with PeriodArray + pi2 = pd.Index(arr) + assert isinstance(pi2, PeriodIndex) + assert list(pi2) == list(arr) + + def test_astype_object(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(pi) + + def test_take_fill_valid(self, arr1d): + arr = arr1d + + value = NaT._value + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # require NaT, not iNaT, as it could be confused with an integer + arr.take([-1, 1], allow_fill=True, fill_value=value) + + value = np.timedelta64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + @pytest.mark.parametrize("how", ["S", "E"]) + def test_to_timestamp(self, how, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + + expected = DatetimeArray(pi.to_timestamp(how=how)) + result = arr.to_timestamp(how=how) + assert isinstance(result, DatetimeArray) + + tm.assert_equal(result, expected) + + def test_to_timestamp_roundtrip_bday(self): + # Case where infer_freq inside would choose "D" instead of "B" + dta = pd.date_range("2021-10-18", periods=3, freq="B")._data + parr = dta.to_period() + result = parr.to_timestamp() + assert result.freq == "B" + tm.assert_extension_array_equal(result, dta) + + dta2 = dta[::2] + parr2 = dta2.to_period() + result2 = parr2.to_timestamp() + assert result2.freq == "2B" + tm.assert_extension_array_equal(result2, dta2) + + parr3 = dta.to_period("2B") + result3 = parr3.to_timestamp() + assert result3.freq == "B" + tm.assert_extension_array_equal(result3, dta) + + def test_to_timestamp_out_of_bounds(self): + # GH#19643 previously overflowed silently + pi = pd.period_range("1500", freq="Y", periods=3) + msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00" + with pytest.raises(OutOfBoundsDatetime, match=msg): + pi.to_timestamp() + + with pytest.raises(OutOfBoundsDatetime, match=msg): + pi._data.to_timestamp() + + @pytest.mark.parametrize("propname", PeriodArray._bool_ops) + def test_bool_properties(self, arr1d, propname): + # in this case _bool_ops is just `is_leap_year` + pi = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(pi, propname)) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("propname", PeriodArray._field_ops) + def test_int_properties(self, arr1d, propname): + pi = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(pi, propname)) + + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self, arr1d): + arr = arr1d + + # default asarray gives objects + result = np.asarray(arr) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to object dtype (same as default) + result = np.asarray(arr, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(arr, dtype="int64") + tm.assert_numpy_array_equal(result, arr.asi8) + + # to other dtypes + msg = r"float\(\) argument must be a string or a( real)? number, not 'Period'" + with pytest.raises(TypeError, match=msg): + np.asarray(arr, dtype="float64") + + result = np.asarray(arr, dtype="S20") + expected = np.asarray(arr).astype("S20") + tm.assert_numpy_array_equal(result, expected) + + def test_strftime(self, arr1d): + arr = arr1d + + result = arr.strftime("%Y") + expected = np.array([per.strftime("%Y") for per in arr], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_strftime_nat(self): + # GH 29578 + arr = PeriodArray(PeriodIndex(["2019-01-01", NaT], dtype="period[D]")) + + result = arr.strftime("%Y-%m-%d") + expected = np.array(["2019-01-01", np.nan], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "arr,casting_nats", + [ + ( + TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (NaT, np.timedelta64("NaT", "ns")), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (NaT, np.datetime64("NaT", "ns")), + ), + (pd.period_range("2000-01-01", periods=3, freq="D")._data, (NaT,)), + ], + ids=lambda x: type(x).__name__, +) +def test_casting_nat_setitem_array(arr, casting_nats): + expected = type(arr)._from_sequence([NaT, arr[1], arr[2]]) + + for nat in casting_nats: + arr = arr.copy() + arr[0] = nat + tm.assert_equal(arr, expected) + + +@pytest.mark.parametrize( + "arr,non_casting_nats", + [ + ( + TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (np.datetime64("NaT", "ns"), NaT._value), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (np.timedelta64("NaT", "ns"), NaT._value), + ), + ( + pd.period_range("2000-01-01", periods=3, freq="D")._data, + (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value), + ), + ], + ids=lambda x: type(x).__name__, +) +def test_invalid_nat_setitem_array(arr, non_casting_nats): + msg = ( + "value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. " + "Got '(timedelta64|datetime64|int)' instead." + ) + + for nat in non_casting_nats: + with pytest.raises(TypeError, match=msg): + arr[0] = nat + + +@pytest.mark.parametrize( + "arr", + [ + pd.date_range("2000", periods=4).array, + pd.timedelta_range("2000", periods=4).array, + ], +) +def test_to_numpy_extra(arr): + arr[0] = NaT + original = arr.copy() + + result = arr.to_numpy() + assert np.isnan(result[0]) + + result = arr.to_numpy(dtype="int64") + assert result[0] == -9223372036854775808 + + result = arr.to_numpy(dtype="int64", na_value=0) + assert result[0] == 0 + + result = arr.to_numpy(na_value=arr[1].to_numpy()) + assert result[0] == result[1] + + result = arr.to_numpy(na_value=arr[1].to_numpy(copy=False)) + assert result[0] == result[1] + + tm.assert_equal(arr, original) + + +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize( + "values", + [ + pd.to_datetime(["2020-01-01", "2020-02-01"]), + TimedeltaIndex([1, 2], unit="D"), + PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), + ], +) +@pytest.mark.parametrize( + "klass", + [ + list, + np.array, + pd.array, + pd.Series, + pd.Index, + pd.Categorical, + pd.CategoricalIndex, + ], +) +def test_searchsorted_datetimelike_with_listlike(values, klass, as_index): + # https://github.com/pandas-dev/pandas/issues/32762 + if not as_index: + values = values._data + + result = values.searchsorted(klass(values)) + expected = np.array([0, 1], dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + pd.to_datetime(["2020-01-01", "2020-02-01"]), + TimedeltaIndex([1, 2], unit="D"), + PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), + ], +) +@pytest.mark.parametrize( + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] +) +def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg): + # https://github.com/pandas-dev/pandas/issues/32762 + msg = "[Unexpected type|Cannot compare]" + with pytest.raises(TypeError, match=msg): + values.searchsorted(arg) + + +@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series]) +def test_period_index_construction_from_strings(klass): + # https://github.com/pandas-dev/pandas/issues/26109 + strings = ["2020Q1", "2020Q2"] * 2 + data = klass(strings) + result = PeriodIndex(data, freq="Q") + expected = PeriodIndex([Period(s) for s in strings]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) +def test_from_pandas_array(dtype): + # GH#24615 + data = np.array([1, 2, 3], dtype=dtype) + arr = NumpyExtensionArray(data) + + cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] + + result = cls(arr) + expected = cls(data) + tm.assert_extension_array_equal(result, expected) + + result = cls._from_sequence(arr) + expected = cls._from_sequence(data) + tm.assert_extension_array_equal(result, expected) + + func = {"M8[ns]": _sequence_to_dt64ns, "m8[ns]": sequence_to_td64ns}[dtype] + result = func(arr)[0] + expected = func(data)[0] + tm.assert_equal(result, expected) + + func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] + result = func(arr).array + expected = func(data).array + tm.assert_equal(result, expected) + + # Let's check the Indexes while we're here + idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] + result = idx_cls(arr) + expected = idx_cls(data) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimes.py new file mode 100644 index 00000000..c2d68a79 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_datetimes.py @@ -0,0 +1,760 @@ +""" +Tests for DatetimeArray +""" +from __future__ import annotations + +from datetime import timedelta +import operator + +try: + from zoneinfo import ZoneInfo +except ImportError: + # Cannot assign to a type + ZoneInfo = None # type: ignore[misc, assignment] + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + npy_unit_to_abbrev, + tz_compare, +) + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) + + +class TestNonNano: + @pytest.fixture(params=["s", "ms", "us"]) + def unit(self, request): + """Fixture returning parametrized time units""" + return request.param + + @pytest.fixture + def dtype(self, unit, tz_naive_fixture): + tz = tz_naive_fixture + if tz is None: + return np.dtype(f"datetime64[{unit}]") + else: + return DatetimeTZDtype(unit=unit, tz=tz) + + @pytest.fixture + def dta_dti(self, unit, dtype): + tz = getattr(dtype, "tz", None) + + dti = pd.date_range("2016-01-01", periods=55, freq="D", tz=tz) + if tz is None: + arr = np.asarray(dti).astype(f"M8[{unit}]") + else: + arr = np.asarray(dti.tz_convert("UTC").tz_localize(None)).astype( + f"M8[{unit}]" + ) + + dta = DatetimeArray._simple_new(arr, dtype=dtype) + return dta, dti + + @pytest.fixture + def dta(self, dta_dti): + dta, dti = dta_dti + return dta + + def test_non_nano(self, unit, dtype): + arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]") + dta = DatetimeArray._simple_new(arr, dtype=dtype) + + assert dta.dtype == dtype + assert dta[0].unit == unit + assert tz_compare(dta.tz, dta[0].tz) + assert (dta[0] == dta[:1]).all() + + @pytest.mark.parametrize( + "field", DatetimeArray._field_ops + DatetimeArray._bool_ops + ) + def test_fields(self, unit, field, dtype, dta_dti): + dta, dti = dta_dti + + assert (dti == dta).all() + + res = getattr(dta, field) + expected = getattr(dti._data, field) + tm.assert_numpy_array_equal(res, expected) + + def test_normalize(self, unit): + dti = pd.date_range("2016-01-01 06:00:00", periods=55, freq="D") + arr = np.asarray(dti).astype(f"M8[{unit}]") + + dta = DatetimeArray._simple_new(arr, dtype=arr.dtype) + + assert not dta.is_normalized + + # TODO: simplify once we can just .astype to other unit + exp = np.asarray(dti.normalize()).astype(f"M8[{unit}]") + expected = DatetimeArray._simple_new(exp, dtype=exp.dtype) + + res = dta.normalize() + tm.assert_extension_array_equal(res, expected) + + def test_simple_new_requires_match(self, unit): + arr = np.arange(5, dtype=np.int64).view(f"M8[{unit}]") + dtype = DatetimeTZDtype(unit, "UTC") + + dta = DatetimeArray._simple_new(arr, dtype=dtype) + assert dta.dtype == dtype + + wrong = DatetimeTZDtype("ns", "UTC") + with pytest.raises(AssertionError, match=""): + DatetimeArray._simple_new(arr, dtype=wrong) + + def test_std_non_nano(self, unit): + dti = pd.date_range("2016-01-01", periods=55, freq="D") + arr = np.asarray(dti).astype(f"M8[{unit}]") + + dta = DatetimeArray._simple_new(arr, dtype=arr.dtype) + + # we should match the nano-reso std, but floored to our reso. + res = dta.std() + assert res._creso == dta._creso + assert res == dti.std().floor(unit) + + @pytest.mark.filterwarnings("ignore:Converting to PeriodArray.*:UserWarning") + def test_to_period(self, dta_dti): + dta, dti = dta_dti + result = dta.to_period("D") + expected = dti._data.to_period("D") + + tm.assert_extension_array_equal(result, expected) + + def test_iter(self, dta): + res = next(iter(dta)) + expected = dta[0] + + assert type(res) is pd.Timestamp + assert res._value == expected._value + assert res._creso == expected._creso + assert res == expected + + def test_astype_object(self, dta): + result = dta.astype(object) + assert all(x._creso == dta._creso for x in result) + assert all(x == y for x, y in zip(result, dta)) + + def test_to_pydatetime(self, dta_dti): + dta, dti = dta_dti + + result = dta.to_pydatetime() + expected = dti.to_pydatetime() + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("meth", ["time", "timetz", "date"]) + def test_time_date(self, dta_dti, meth): + dta, dti = dta_dti + + result = getattr(dta, meth) + expected = getattr(dti, meth) + tm.assert_numpy_array_equal(result, expected) + + def test_format_native_types(self, unit, dtype, dta_dti): + # In this case we should get the same formatted values with our nano + # version dti._data as we do with the non-nano dta + dta, dti = dta_dti + + res = dta._format_native_types() + exp = dti._data._format_native_types() + tm.assert_numpy_array_equal(res, exp) + + def test_repr(self, dta_dti, unit): + dta, dti = dta_dti + + assert repr(dta) == repr(dti._data).replace("[ns", f"[{unit}") + + # TODO: tests with td64 + def test_compare_mismatched_resolutions(self, comparison_op): + # comparison that numpy gets wrong bc of silent overflows + op = comparison_op + + iinfo = np.iinfo(np.int64) + vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64) + + # Construct so that arr2[1] < arr[1] < arr[2] < arr2[2] + arr = np.array(vals).view("M8[ns]") + arr2 = arr.view("M8[s]") + + left = DatetimeArray._simple_new(arr, dtype=arr.dtype) + right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype) + + if comparison_op is operator.eq: + expected = np.array([False, False, False]) + elif comparison_op is operator.ne: + expected = np.array([True, True, True]) + elif comparison_op in [operator.lt, operator.le]: + expected = np.array([False, False, True]) + else: + expected = np.array([False, True, False]) + + result = op(left, right) + tm.assert_numpy_array_equal(result, expected) + + result = op(left[1], right) + tm.assert_numpy_array_equal(result, expected) + + if op not in [operator.eq, operator.ne]: + # check that numpy still gets this wrong; if it is fixed we may be + # able to remove compare_mismatched_resolutions + np_res = op(left._ndarray, right._ndarray) + tm.assert_numpy_array_equal(np_res[1:], ~expected[1:]) + + def test_add_mismatched_reso_doesnt_downcast(self): + # https://github.com/pandas-dev/pandas/pull/48748#issuecomment-1260181008 + td = pd.Timedelta(microseconds=1) + dti = pd.date_range("2016-01-01", periods=3) - td + dta = dti._data.as_unit("us") + + res = dta + td.as_unit("us") + # even though the result is an even number of days + # (so we _could_ downcast to unit="s"), we do not. + assert res.unit == "us" + + @pytest.mark.parametrize( + "scalar", + [ + timedelta(hours=2), + pd.Timedelta(hours=2), + np.timedelta64(2, "h"), + np.timedelta64(2 * 3600 * 1000, "ms"), + pd.offsets.Minute(120), + pd.offsets.Hour(2), + ], + ) + def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar): + dta, dti = dta_dti + + td = pd.Timedelta(scalar) + exp_reso = max(dta._creso, td._creso) + exp_unit = npy_unit_to_abbrev(exp_reso) + + expected = (dti + td)._data.as_unit(exp_unit) + result = dta + scalar + tm.assert_extension_array_equal(result, expected) + + result = scalar + dta + tm.assert_extension_array_equal(result, expected) + + expected = (dti - td)._data.as_unit(exp_unit) + result = dta - scalar + tm.assert_extension_array_equal(result, expected) + + def test_sub_datetimelike_scalar_mismatch(self): + dti = pd.date_range("2016-01-01", periods=3) + dta = dti._data.as_unit("us") + + ts = dta[0].as_unit("s") + + result = dta - ts + expected = (dti - dti[0])._data.as_unit("us") + assert result.dtype == "m8[us]" + tm.assert_extension_array_equal(result, expected) + + def test_sub_datetime64_reso_mismatch(self): + dti = pd.date_range("2016-01-01", periods=3) + left = dti._data.as_unit("s") + right = left.as_unit("ms") + + result = left - right + exp_values = np.array([0, 0, 0], dtype="m8[ms]") + expected = TimedeltaArray._simple_new( + exp_values, + dtype=exp_values.dtype, + ) + tm.assert_extension_array_equal(result, expected) + result2 = right - left + tm.assert_extension_array_equal(result2, expected) + + +class TestDatetimeArrayComparisons: + # TODO: merge this into tests/arithmetic/test_datetime64 once it is + # sufficiently robust + + def test_cmp_dt64_arraylike_tznaive(self, comparison_op): + # arbitrary tz-naive DatetimeIndex + op = comparison_op + + dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None) + arr = DatetimeArray(dti) + assert arr.freq == dti.freq + assert arr.tz == dti.tz + + right = dti + + expected = np.ones(len(arr), dtype=bool) + if comparison_op.__name__ in ["ne", "gt", "lt"]: + # for these the comparisons should be all-False + expected = ~expected + + result = op(arr, arr) + tm.assert_numpy_array_equal(result, expected) + for other in [ + right, + np.array(right), + list(right), + tuple(right), + right.astype(object), + ]: + result = op(arr, other) + tm.assert_numpy_array_equal(result, expected) + + result = op(other, arr) + tm.assert_numpy_array_equal(result, expected) + + +class TestDatetimeArray: + def test_astype_non_nano_tznaive(self): + dti = pd.date_range("2016-01-01", periods=3) + + res = dti.astype("M8[s]") + assert res.dtype == "M8[s]" + + dta = dti._data + res = dta.astype("M8[s]") + assert res.dtype == "M8[s]" + assert isinstance(res, pd.core.arrays.DatetimeArray) # used to be ndarray + + def test_astype_non_nano_tzaware(self): + dti = pd.date_range("2016-01-01", periods=3, tz="UTC") + + res = dti.astype("M8[s, US/Pacific]") + assert res.dtype == "M8[s, US/Pacific]" + + dta = dti._data + res = dta.astype("M8[s, US/Pacific]") + assert res.dtype == "M8[s, US/Pacific]" + + # from non-nano to non-nano, preserving reso + res2 = res.astype("M8[s, UTC]") + assert res2.dtype == "M8[s, UTC]" + assert not tm.shares_memory(res2, res) + + res3 = res.astype("M8[s, UTC]", copy=False) + assert res2.dtype == "M8[s, UTC]" + assert tm.shares_memory(res3, res) + + def test_astype_to_same(self): + arr = DatetimeArray._from_sequence( + ["2000"], dtype=DatetimeTZDtype(tz="US/Central") + ) + result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False) + assert result is arr + + @pytest.mark.parametrize("dtype", ["datetime64[ns]", "datetime64[ns, UTC]"]) + @pytest.mark.parametrize( + "other", ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, CET]"] + ) + def test_astype_copies(self, dtype, other): + # https://github.com/pandas-dev/pandas/pull/32490 + ser = pd.Series([1, 2], dtype=dtype) + orig = ser.copy() + + err = False + if (dtype == "datetime64[ns]") ^ (other == "datetime64[ns]"): + # deprecated in favor of tz_localize + err = True + + if err: + if dtype == "datetime64[ns]": + msg = "Use obj.tz_localize instead or series.dt.tz_localize instead" + else: + msg = "from timezone-aware dtype to timezone-naive dtype" + with pytest.raises(TypeError, match=msg): + ser.astype(other) + else: + t = ser.astype(other) + t[:] = pd.NaT + tm.assert_series_equal(ser, orig) + + @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) + def test_astype_int(self, dtype): + arr = DatetimeArray._from_sequence([pd.Timestamp("2000"), pd.Timestamp("2001")]) + + if np.dtype(dtype) != np.int64: + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype(dtype) + return + + result = arr.astype(dtype) + expected = arr._ndarray.view("i8") + tm.assert_numpy_array_equal(result, expected) + + def test_astype_to_sparse_dt64(self): + # GH#50082 + dti = pd.date_range("2016-01-01", periods=4) + dta = dti._data + result = dta.astype("Sparse[datetime64[ns]]") + + assert result.dtype == "Sparse[datetime64[ns]]" + assert (result == dta).all() + + def test_tz_setter_raises(self): + arr = DatetimeArray._from_sequence( + ["2000"], dtype=DatetimeTZDtype(tz="US/Central") + ) + with pytest.raises(AttributeError, match="tz_localize"): + arr.tz = "UTC" + + def test_setitem_str_impute_tz(self, tz_naive_fixture): + # Like for getitem, if we are passed a naive-like string, we impute + # our own timezone. + tz = tz_naive_fixture + + data = np.array([1, 2, 3], dtype="M8[ns]") + dtype = data.dtype if tz is None else DatetimeTZDtype(tz=tz) + arr = DatetimeArray(data, dtype=dtype) + expected = arr.copy() + + ts = pd.Timestamp("2020-09-08 16:50").tz_localize(tz) + setter = str(ts.tz_localize(None)) + + # Setting a scalar tznaive string + expected[0] = ts + arr[0] = setter + tm.assert_equal(arr, expected) + + # Setting a listlike of tznaive strings + expected[1] = ts + arr[:2] = [setter, setter] + tm.assert_equal(arr, expected) + + def test_setitem_different_tz_raises(self): + # pre-2.0 we required exact tz match, in 2.0 we require only + # tzawareness-match + data = np.array([1, 2, 3], dtype="M8[ns]") + arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) + with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"): + arr[0] = pd.Timestamp("2000") + + ts = pd.Timestamp("2000", tz="US/Eastern") + arr[0] = ts + assert arr[0] == ts.tz_convert("US/Central") + + def test_setitem_clears_freq(self): + a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central")) + a[0] = pd.Timestamp("2000", tz="US/Central") + assert a.freq is None + + @pytest.mark.parametrize( + "obj", + [ + pd.Timestamp("2021-01-01"), + pd.Timestamp("2021-01-01").to_datetime64(), + pd.Timestamp("2021-01-01").to_pydatetime(), + ], + ) + def test_setitem_objects(self, obj): + # make sure we accept datetime64 and datetime in addition to Timestamp + dti = pd.date_range("2000", periods=2, freq="D") + arr = dti._data + + arr[0] = obj + assert arr[0] == obj + + def test_repeat_preserves_tz(self): + dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central") + arr = DatetimeArray(dti) + + repeated = arr.repeat([1, 1]) + + # preserves tz and values, but not freq + expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) + tm.assert_equal(repeated, expected) + + def test_value_counts_preserves_tz(self): + dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central") + arr = DatetimeArray(dti).repeat([4, 3]) + + result = arr.value_counts() + + # Note: not tm.assert_index_equal, since `freq`s do not match + assert result.index.equals(dti) + + arr[-2] = pd.NaT + result = arr.value_counts(dropna=False) + expected = pd.Series([4, 2, 1], index=[dti[0], dti[1], pd.NaT], name="count") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method", ["pad", "backfill"]) + def test_fillna_preserves_tz(self, method): + dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central") + arr = DatetimeArray(dti, copy=True) + arr[2] = pd.NaT + + fill_val = dti[1] if method == "pad" else dti[3] + expected = DatetimeArray._from_sequence( + [dti[0], dti[1], fill_val, dti[3], dti[4]], + dtype=DatetimeTZDtype(tz="US/Central"), + ) + + result = arr._pad_or_backfill(method=method) + tm.assert_extension_array_equal(result, expected) + + # assert that arr and dti were not modified in-place + assert arr[2] is pd.NaT + assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central") + + def test_fillna_2d(self): + dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific") + dta = dti._data.reshape(3, 2).copy() + dta[0, 1] = pd.NaT + dta[1, 0] = pd.NaT + + res1 = dta._pad_or_backfill(method="pad") + expected1 = dta.copy() + expected1[1, 0] = dta[0, 0] + tm.assert_extension_array_equal(res1, expected1) + + res2 = dta._pad_or_backfill(method="backfill") + expected2 = dta.copy() + expected2 = dta.copy() + expected2[1, 0] = dta[2, 0] + expected2[0, 1] = dta[1, 1] + tm.assert_extension_array_equal(res2, expected2) + + # with different ordering for underlying ndarray; behavior should + # be unchanged + dta2 = dta._from_backing_data(dta._ndarray.copy(order="F")) + assert dta2._ndarray.flags["F_CONTIGUOUS"] + assert not dta2._ndarray.flags["C_CONTIGUOUS"] + tm.assert_extension_array_equal(dta, dta2) + + res3 = dta2._pad_or_backfill(method="pad") + tm.assert_extension_array_equal(res3, expected1) + + res4 = dta2._pad_or_backfill(method="backfill") + tm.assert_extension_array_equal(res4, expected2) + + # test the DataFrame method while we're here + df = pd.DataFrame(dta) + res = df.ffill() + expected = pd.DataFrame(expected1) + tm.assert_frame_equal(res, expected) + + res = df.bfill() + expected = pd.DataFrame(expected2) + tm.assert_frame_equal(res, expected) + + def test_array_interface_tz(self): + tz = "US/Central" + data = DatetimeArray(pd.date_range("2017", periods=2, tz=tz)) + result = np.asarray(data) + + expected = np.array( + [ + pd.Timestamp("2017-01-01T00:00:00", tz=tz), + pd.Timestamp("2017-01-02T00:00:00", tz=tz), + ], + dtype=object, + ) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype="M8[ns]") + + expected = np.array( + ["2017-01-01T06:00:00", "2017-01-02T06:00:00"], dtype="M8[ns]" + ) + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self): + data = DatetimeArray(pd.date_range("2017", periods=2)) + expected = np.array( + ["2017-01-01T00:00:00", "2017-01-02T00:00:00"], dtype="datetime64[ns]" + ) + + result = np.asarray(data) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(data, dtype=object) + expected = np.array( + [pd.Timestamp("2017-01-01T00:00:00"), pd.Timestamp("2017-01-02T00:00:00")], + dtype=object, + ) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_different_tz(self, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = DatetimeArray(data, freq="D").tz_localize("Asia/Tokyo") + if index: + arr = pd.Index(arr) + + expected = arr.searchsorted(arr[2]) + result = arr.searchsorted(arr[2].tz_convert("UTC")) + assert result == expected + + expected = arr.searchsorted(arr[2:6]) + result = arr.searchsorted(arr[2:6].tz_convert("UTC")) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_tzawareness_compat(self, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = DatetimeArray(data, freq="D") + if index: + arr = pd.Index(arr) + + mismatch = arr.tz_localize("Asia/Tokyo") + + msg = "Cannot compare tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + arr.searchsorted(mismatch[0]) + with pytest.raises(TypeError, match=msg): + arr.searchsorted(mismatch) + + with pytest.raises(TypeError, match=msg): + mismatch.searchsorted(arr[0]) + with pytest.raises(TypeError, match=msg): + mismatch.searchsorted(arr) + + @pytest.mark.parametrize( + "other", + [ + 1, + np.int64(1), + 1.0, + np.timedelta64("NaT"), + pd.Timedelta(days=2), + "invalid", + np.arange(10, dtype="i8") * 24 * 3600 * 10**9, + np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10**9, + pd.Timestamp("2021-01-01").to_period("D"), + ], + ) + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_invalid_types(self, other, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = DatetimeArray(data, freq="D") + if index: + arr = pd.Index(arr) + + msg = "|".join( + [ + "searchsorted requires compatible dtype or scalar", + "value should be a 'Timestamp', 'NaT', or array of those. Got", + ] + ) + with pytest.raises(TypeError, match=msg): + arr.searchsorted(other) + + def test_shift_fill_value(self): + dti = pd.date_range("2016-01-01", periods=3) + + dta = dti._data + expected = DatetimeArray(np.roll(dta._ndarray, 1)) + + fv = dta[-1] + for fill_value in [fv, fv.to_pydatetime(), fv.to_datetime64()]: + result = dta.shift(1, fill_value=fill_value) + tm.assert_datetime_array_equal(result, expected) + + dta = dta.tz_localize("UTC") + expected = expected.tz_localize("UTC") + fv = dta[-1] + for fill_value in [fv, fv.to_pydatetime()]: + result = dta.shift(1, fill_value=fill_value) + tm.assert_datetime_array_equal(result, expected) + + def test_shift_value_tzawareness_mismatch(self): + dti = pd.date_range("2016-01-01", periods=3) + + dta = dti._data + + fv = dta[-1].tz_localize("UTC") + for invalid in [fv, fv.to_pydatetime()]: + with pytest.raises(TypeError, match="Cannot compare"): + dta.shift(1, fill_value=invalid) + + dta = dta.tz_localize("UTC") + fv = dta[-1].tz_localize(None) + for invalid in [fv, fv.to_pydatetime(), fv.to_datetime64()]: + with pytest.raises(TypeError, match="Cannot compare"): + dta.shift(1, fill_value=invalid) + + def test_shift_requires_tzmatch(self): + # pre-2.0 we required exact tz match, in 2.0 we require just + # matching tzawareness + dti = pd.date_range("2016-01-01", periods=3, tz="UTC") + dta = dti._data + + fill_value = pd.Timestamp("2020-10-18 18:44", tz="US/Pacific") + + result = dta.shift(1, fill_value=fill_value) + expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC")) + tm.assert_equal(result, expected) + + def test_tz_localize_t2d(self): + dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific") + dta = dti._data.reshape(3, 4) + result = dta.tz_localize(None) + + expected = dta.ravel().tz_localize(None).reshape(dta.shape) + tm.assert_datetime_array_equal(result, expected) + + roundtrip = expected.tz_localize("US/Pacific") + tm.assert_datetime_array_equal(roundtrip, dta) + + easts = ["US/Eastern", "dateutil/US/Eastern"] + if ZoneInfo is not None: + try: + tz = ZoneInfo("US/Eastern") + except KeyError: + # no tzdata + pass + else: + # Argument 1 to "append" of "list" has incompatible type "ZoneInfo"; + # expected "str" + easts.append(tz) # type: ignore[arg-type] + + @pytest.mark.parametrize("tz", easts) + def test_iter_zoneinfo_fold(self, tz): + # GH#49684 + utc_vals = np.array( + [1320552000, 1320555600, 1320559200, 1320562800], dtype=np.int64 + ) + utc_vals *= 1_000_000_000 + + dta = DatetimeArray(utc_vals).tz_localize("UTC").tz_convert(tz) + + left = dta[2] + right = list(dta)[2] + assert str(left) == str(right) + # previously there was a bug where with non-pytz right would be + # Timestamp('2011-11-06 01:00:00-0400', tz='US/Eastern') + # while left would be + # Timestamp('2011-11-06 01:00:00-0500', tz='US/Eastern') + # The .value's would match (so they would compare as equal), + # but the folds would not + assert left.utcoffset() == right.utcoffset() + + # The same bug in ints_to_pydatetime affected .astype, so we test + # that here. + right2 = dta.astype(object)[2] + assert str(left) == str(right2) + assert left.utcoffset() == right2.utcoffset() + + +def test_factorize_sort_without_freq(): + dta = DatetimeArray._from_sequence([0, 2, 1]) + + msg = r"call pd.factorize\(obj, sort=True\) instead" + with pytest.raises(NotImplementedError, match=msg): + dta.factorize(sort=True) + + # Do TimedeltaArray while we're here + tda = dta - dta[0] + with pytest.raises(NotImplementedError, match=msg): + tda.factorize(sort=True) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_ndarray_backed.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_ndarray_backed.py new file mode 100644 index 00000000..1fe7cc9b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_ndarray_backed.py @@ -0,0 +1,75 @@ +""" +Tests for subclasses of NDArrayBackedExtensionArray +""" +import numpy as np + +from pandas import ( + CategoricalIndex, + date_range, +) +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + NumpyExtensionArray, + TimedeltaArray, +) + + +class TestEmpty: + def test_empty_categorical(self): + ci = CategoricalIndex(["a", "b", "c"], ordered=True) + dtype = ci.dtype + + # case with int8 codes + shape = (4,) + result = Categorical._empty(shape, dtype=dtype) + assert isinstance(result, Categorical) + assert result.shape == shape + assert result._ndarray.dtype == np.int8 + + # case where repr would segfault if we didn't override base implementation + result = Categorical._empty((4096,), dtype=dtype) + assert isinstance(result, Categorical) + assert result.shape == (4096,) + assert result._ndarray.dtype == np.int8 + repr(result) + + # case with int16 codes + ci = CategoricalIndex(list(range(512)) * 4, ordered=False) + dtype = ci.dtype + result = Categorical._empty(shape, dtype=dtype) + assert isinstance(result, Categorical) + assert result.shape == shape + assert result._ndarray.dtype == np.int16 + + def test_empty_dt64tz(self): + dti = date_range("2016-01-01", periods=2, tz="Asia/Tokyo") + dtype = dti.dtype + + shape = (0,) + result = DatetimeArray._empty(shape, dtype=dtype) + assert result.dtype == dtype + assert isinstance(result, DatetimeArray) + assert result.shape == shape + + def test_empty_dt64(self): + shape = (3, 9) + result = DatetimeArray._empty(shape, dtype="datetime64[ns]") + assert isinstance(result, DatetimeArray) + assert result.shape == shape + + def test_empty_td64(self): + shape = (3, 9) + result = TimedeltaArray._empty(shape, dtype="m8[ns]") + assert isinstance(result, TimedeltaArray) + assert result.shape == shape + + def test_empty_pandas_array(self): + arr = NumpyExtensionArray(np.array([1, 2])) + dtype = arr.dtype + + shape = (3, 9) + result = NumpyExtensionArray._empty(shape, dtype=dtype) + assert isinstance(result, NumpyExtensionArray) + assert result.dtype == dtype + assert result.shape == shape diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_period.py new file mode 100644 index 00000000..d1e954bc --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_period.py @@ -0,0 +1,184 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +from pandas._libs.tslibs.period import IncompatibleFrequency + +from pandas.core.dtypes.base import _registry as registry +from pandas.core.dtypes.dtypes import PeriodDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import PeriodArray + +# ---------------------------------------------------------------------------- +# Dtype + + +def test_registered(): + assert PeriodDtype in registry.dtypes + result = registry.find("Period[D]") + expected = PeriodDtype("D") + assert result == expected + + +# ---------------------------------------------------------------------------- +# period_array + + +def test_asi8(): + result = PeriodArray._from_sequence(["2000", "2001", None], dtype="period[D]").asi8 + expected = np.array([10957, 11323, iNaT]) + tm.assert_numpy_array_equal(result, expected) + + +def test_take_raises(): + arr = PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]") + with pytest.raises(IncompatibleFrequency, match="freq"): + arr.take([0, -1], allow_fill=True, fill_value=pd.Period("2000", freq="W")) + + msg = "value should be a 'Period' or 'NaT'. Got 'str' instead" + with pytest.raises(TypeError, match=msg): + arr.take([0, -1], allow_fill=True, fill_value="foo") + + +def test_fillna_raises(): + arr = PeriodArray._from_sequence(["2000", "2001", "2002"], dtype="period[D]") + with pytest.raises(ValueError, match="Length"): + arr.fillna(arr[:2]) + + +def test_fillna_copies(): + arr = PeriodArray._from_sequence(["2000", "2001", "2002"], dtype="period[D]") + result = arr.fillna(pd.Period("2000", "D")) + assert result is not arr + + +# ---------------------------------------------------------------------------- +# setitem + + +@pytest.mark.parametrize( + "key, value, expected", + [ + ([0], pd.Period("2000", "D"), [10957, 1, 2]), + ([0], None, [iNaT, 1, 2]), + ([0], np.nan, [iNaT, 1, 2]), + ([0, 1, 2], pd.Period("2000", "D"), [10957] * 3), + ( + [0, 1, 2], + [pd.Period("2000", "D"), pd.Period("2001", "D"), pd.Period("2002", "D")], + [10957, 11323, 11688], + ), + ], +) +def test_setitem(key, value, expected): + arr = PeriodArray(np.arange(3), dtype="period[D]") + expected = PeriodArray(expected, dtype="period[D]") + arr[key] = value + tm.assert_period_array_equal(arr, expected) + + +def test_setitem_raises_incompatible_freq(): + arr = PeriodArray(np.arange(3), dtype="period[D]") + with pytest.raises(IncompatibleFrequency, match="freq"): + arr[0] = pd.Period("2000", freq="A") + + other = PeriodArray._from_sequence(["2000", "2001"], dtype="period[A]") + with pytest.raises(IncompatibleFrequency, match="freq"): + arr[[0, 1]] = other + + +def test_setitem_raises_length(): + arr = PeriodArray(np.arange(3), dtype="period[D]") + with pytest.raises(ValueError, match="length"): + arr[[0, 1]] = [pd.Period("2000", freq="D")] + + +def test_setitem_raises_type(): + arr = PeriodArray(np.arange(3), dtype="period[D]") + with pytest.raises(TypeError, match="int"): + arr[0] = 1 + + +# ---------------------------------------------------------------------------- +# Ops + + +def test_sub_period(): + arr = PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]") + other = pd.Period("2000", freq="M") + with pytest.raises(IncompatibleFrequency, match="freq"): + arr - other + + +def test_sub_period_overflow(): + # GH#47538 + dti = pd.date_range("1677-09-22", periods=2, freq="D") + pi = dti.to_period("ns") + + per = pd.Period._from_ordinal(10**14, pi.freq) + + with pytest.raises(OverflowError, match="Overflow in int64 addition"): + pi - per + + with pytest.raises(OverflowError, match="Overflow in int64 addition"): + per - pi + + +# ---------------------------------------------------------------------------- +# Methods + + +@pytest.mark.parametrize( + "other", + [ + pd.Period("2000", freq="H"), + PeriodArray._from_sequence(["2000", "2001", "2000"], dtype="period[H]"), + ], +) +def test_where_different_freq_raises(other): + # GH#45768 The PeriodArray method raises, the Series method coerces + ser = pd.Series( + PeriodArray._from_sequence(["2000", "2001", "2002"], dtype="period[D]") + ) + cond = np.array([True, False, True]) + + with pytest.raises(IncompatibleFrequency, match="freq"): + ser.array._where(cond, other) + + res = ser.where(cond, other) + expected = ser.astype(object).where(cond, other) + tm.assert_series_equal(res, expected) + + +# ---------------------------------------------------------------------------- +# Printing + + +def test_repr_small(): + arr = PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]") + result = str(arr) + expected = ( + "\n['2000-01-01', '2001-01-01']\nLength: 2, dtype: period[D]" + ) + assert result == expected + + +def test_repr_large(): + arr = PeriodArray._from_sequence(["2000", "2001"] * 500, dtype="period[D]") + result = str(arr) + expected = ( + "\n" + "['2000-01-01', '2001-01-01', '2000-01-01', '2001-01-01', " + "'2000-01-01',\n" + " '2001-01-01', '2000-01-01', '2001-01-01', '2000-01-01', " + "'2001-01-01',\n" + " ...\n" + " '2000-01-01', '2001-01-01', '2000-01-01', '2001-01-01', " + "'2000-01-01',\n" + " '2001-01-01', '2000-01-01', '2001-01-01', '2000-01-01', " + "'2001-01-01']\n" + "Length: 1000, dtype: period[D]" + ) + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_timedeltas.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_timedeltas.py new file mode 100644 index 00000000..1043c2ee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/test_timedeltas.py @@ -0,0 +1,311 @@ +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import Timedelta +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + TimedeltaArray, +) + + +class TestNonNano: + @pytest.fixture(params=["s", "ms", "us"]) + def unit(self, request): + return request.param + + @pytest.fixture + def tda(self, unit): + arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]") + return TimedeltaArray._simple_new(arr, dtype=arr.dtype) + + def test_non_nano(self, unit): + arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]") + tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype) + + assert tda.dtype == arr.dtype + assert tda[0].unit == unit + + def test_as_unit_raises(self, tda): + # GH#50616 + with pytest.raises(ValueError, match="Supported units"): + tda.as_unit("D") + + tdi = pd.Index(tda) + with pytest.raises(ValueError, match="Supported units"): + tdi.as_unit("D") + + @pytest.mark.parametrize("field", TimedeltaArray._field_ops) + def test_fields(self, tda, field): + as_nano = tda._ndarray.astype("m8[ns]") + tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype) + + result = getattr(tda, field) + expected = getattr(tda_nano, field) + tm.assert_numpy_array_equal(result, expected) + + def test_to_pytimedelta(self, tda): + as_nano = tda._ndarray.astype("m8[ns]") + tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype) + + result = tda.to_pytimedelta() + expected = tda_nano.to_pytimedelta() + tm.assert_numpy_array_equal(result, expected) + + def test_total_seconds(self, unit, tda): + as_nano = tda._ndarray.astype("m8[ns]") + tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype) + + result = tda.total_seconds() + expected = tda_nano.total_seconds() + tm.assert_numpy_array_equal(result, expected) + + def test_timedelta_array_total_seconds(self): + # GH34290 + expected = Timedelta("2 min").total_seconds() + + result = pd.array([Timedelta("2 min")]).total_seconds()[0] + assert result == expected + + def test_total_seconds_nanoseconds(self): + # issue #48521 + start_time = pd.Series(["2145-11-02 06:00:00"]).astype("datetime64[ns]") + end_time = pd.Series(["2145-11-02 07:06:00"]).astype("datetime64[ns]") + expected = (end_time - start_time).values / np.timedelta64(1, "s") + result = (end_time - start_time).dt.total_seconds().values + assert result == expected + + @pytest.mark.parametrize( + "nat", [np.datetime64("NaT", "ns"), np.datetime64("NaT", "us")] + ) + def test_add_nat_datetimelike_scalar(self, nat, tda): + result = tda + nat + assert isinstance(result, DatetimeArray) + assert result._creso == tda._creso + assert result.isna().all() + + result = nat + tda + assert isinstance(result, DatetimeArray) + assert result._creso == tda._creso + assert result.isna().all() + + def test_add_pdnat(self, tda): + result = tda + pd.NaT + assert isinstance(result, TimedeltaArray) + assert result._creso == tda._creso + assert result.isna().all() + + result = pd.NaT + tda + assert isinstance(result, TimedeltaArray) + assert result._creso == tda._creso + assert result.isna().all() + + # TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert + # or tz_localize with non-nano; implement tests specific to that. + def test_add_datetimelike_scalar(self, tda, tz_naive_fixture): + ts = pd.Timestamp("2016-01-01", tz=tz_naive_fixture).as_unit("ns") + + expected = tda.as_unit("ns") + ts + res = tda + ts + tm.assert_extension_array_equal(res, expected) + res = ts + tda + tm.assert_extension_array_equal(res, expected) + + ts += Timedelta(1) # case where we can't cast losslessly + + exp_values = tda._ndarray + ts.asm8 + expected = ( + DatetimeArray._simple_new(exp_values, dtype=exp_values.dtype) + .tz_localize("UTC") + .tz_convert(ts.tz) + ) + + result = tda + ts + tm.assert_extension_array_equal(result, expected) + + result = ts + tda + tm.assert_extension_array_equal(result, expected) + + def test_mul_scalar(self, tda): + other = 2 + result = tda * other + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_mul_listlike(self, tda): + other = np.arange(len(tda)) + result = tda * other + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_mul_listlike_object(self, tda): + other = np.arange(len(tda)) + result = tda * other.astype(object) + expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_div_numeric_scalar(self, tda): + other = 2 + result = tda / other + expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_div_td_scalar(self, tda): + other = timedelta(seconds=1) + result = tda / other + expected = tda._ndarray / np.timedelta64(1, "s") + tm.assert_numpy_array_equal(result, expected) + + def test_div_numeric_array(self, tda): + other = np.arange(len(tda)) + result = tda / other + expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype) + tm.assert_extension_array_equal(result, expected) + assert result._creso == tda._creso + + def test_div_td_array(self, tda): + other = tda._ndarray + tda._ndarray[-1] + result = tda / other + expected = tda._ndarray / other + tm.assert_numpy_array_equal(result, expected) + + def test_add_timedeltaarraylike(self, tda): + tda_nano = tda.astype("m8[ns]") + + expected = tda_nano * 2 + res = tda_nano + tda + tm.assert_extension_array_equal(res, expected) + res = tda + tda_nano + tm.assert_extension_array_equal(res, expected) + + expected = tda_nano * 0 + res = tda - tda_nano + tm.assert_extension_array_equal(res, expected) + + res = tda_nano - tda + tm.assert_extension_array_equal(res, expected) + + +class TestTimedeltaArray: + @pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) + def test_astype_int(self, dtype): + arr = TimedeltaArray._from_sequence([Timedelta("1H"), Timedelta("2H")]) + + if np.dtype(dtype) != np.int64: + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype(dtype) + return + + result = arr.astype(dtype) + expected = arr._ndarray.view("i8") + tm.assert_numpy_array_equal(result, expected) + + def test_setitem_clears_freq(self): + a = TimedeltaArray(pd.timedelta_range("1H", periods=2, freq="H")) + a[0] = Timedelta("1H") + assert a.freq is None + + @pytest.mark.parametrize( + "obj", + [ + Timedelta(seconds=1), + Timedelta(seconds=1).to_timedelta64(), + Timedelta(seconds=1).to_pytimedelta(), + ], + ) + def test_setitem_objects(self, obj): + # make sure we accept timedelta64 and timedelta in addition to Timedelta + tdi = pd.timedelta_range("2 Days", periods=4, freq="H") + arr = TimedeltaArray(tdi, freq=tdi.freq) + + arr[0] = obj + assert arr[0] == Timedelta(seconds=1) + + @pytest.mark.parametrize( + "other", + [ + 1, + np.int64(1), + 1.0, + np.datetime64("NaT"), + pd.Timestamp("2021-01-01"), + "invalid", + np.arange(10, dtype="i8") * 24 * 3600 * 10**9, + (np.arange(10) * 24 * 3600 * 10**9).view("datetime64[ns]"), + pd.Timestamp("2021-01-01").to_period("D"), + ], + ) + @pytest.mark.parametrize("index", [True, False]) + def test_searchsorted_invalid_types(self, other, index): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + arr = TimedeltaArray(data, freq="D") + if index: + arr = pd.Index(arr) + + msg = "|".join( + [ + "searchsorted requires compatible dtype or scalar", + "value should be a 'Timedelta', 'NaT', or array of those. Got", + ] + ) + with pytest.raises(TypeError, match=msg): + arr.searchsorted(other) + + +class TestUnaryOps: + def test_abs(self): + vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + arr = TimedeltaArray(vals) + + evals = np.array([3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + expected = TimedeltaArray(evals) + + result = abs(arr) + tm.assert_timedelta_array_equal(result, expected) + + result2 = np.abs(arr) + tm.assert_timedelta_array_equal(result2, expected) + + def test_pos(self): + vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + arr = TimedeltaArray(vals) + + result = +arr + tm.assert_timedelta_array_equal(result, arr) + assert not tm.shares_memory(result, arr) + + result2 = np.positive(arr) + tm.assert_timedelta_array_equal(result2, arr) + assert not tm.shares_memory(result2, arr) + + def test_neg(self): + vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]") + arr = TimedeltaArray(vals) + + evals = np.array([3600 * 10**9, "NaT", -7200 * 10**9], dtype="m8[ns]") + expected = TimedeltaArray(evals) + + result = -arr + tm.assert_timedelta_array_equal(result, expected) + + result2 = np.negative(arr) + tm.assert_timedelta_array_equal(result2, expected) + + def test_neg_freq(self): + tdi = pd.timedelta_range("2 Days", periods=4, freq="H") + arr = TimedeltaArray(tdi, freq=tdi.freq) + + expected = TimedeltaArray(-tdi._data, freq=-tdi.freq) + + result = -arr + tm.assert_timedelta_array_equal(result, expected) + + result2 = np.negative(arr) + tm.assert_timedelta_array_equal(result2, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_constructors.py new file mode 100644 index 00000000..3a076a68 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_constructors.py @@ -0,0 +1,63 @@ +import numpy as np +import pytest + +from pandas.core.arrays import TimedeltaArray + + +class TestTimedeltaArrayConstructor: + def test_only_1dim_accepted(self): + # GH#25282 + arr = np.array([0, 1, 2, 3], dtype="m8[h]").astype("m8[ns]") + + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 3-dim, we allow 2D to sneak in for ops purposes GH#29853 + TimedeltaArray(arr.reshape(2, 2, 1)) + + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 0-dim + TimedeltaArray(arr[[0]].squeeze()) + + def test_freq_validation(self): + # ensure that the public constructor cannot create an invalid instance + arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10**9 + + msg = ( + "Inferred frequency None from passed values does not " + "conform to passed frequency D" + ) + with pytest.raises(ValueError, match=msg): + TimedeltaArray(arr.view("timedelta64[ns]"), freq="D") + + def test_non_array_raises(self): + with pytest.raises(ValueError, match="list"): + TimedeltaArray([1, 2, 3]) + + def test_other_type_raises(self): + with pytest.raises(ValueError, match="dtype bool cannot be converted"): + TimedeltaArray(np.array([1, 2, 3], dtype="bool")) + + def test_incorrect_dtype_raises(self): + # TODO: why TypeError for 'category' but ValueError for i8? + with pytest.raises( + ValueError, match=r"category cannot be converted to timedelta64\[ns\]" + ): + TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype="category") + + with pytest.raises( + ValueError, match=r"dtype int64 cannot be converted to timedelta64\[ns\]" + ): + TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64")) + + def test_copy(self): + data = np.array([1, 2, 3], dtype="m8[ns]") + arr = TimedeltaArray(data, copy=False) + assert arr._ndarray is data + + arr = TimedeltaArray(data, copy=True) + assert arr._ndarray is not data + assert arr._ndarray.base is not data + + def test_from_sequence_dtype(self): + msg = "dtype .*object.* cannot be converted to timedelta64" + with pytest.raises(ValueError, match=msg): + TimedeltaArray._from_sequence([], dtype=object) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_cumulative.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_cumulative.py new file mode 100644 index 00000000..b321dc05 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_cumulative.py @@ -0,0 +1,19 @@ +import pytest + +import pandas._testing as tm +from pandas.core.arrays import TimedeltaArray + + +class TestAccumulator: + def test_accumulators_disallowed(self): + # GH#50297 + arr = TimedeltaArray._from_sequence_not_strict(["1D", "2D"]) + with pytest.raises(TypeError, match="cumprod not supported"): + arr._accumulate("cumprod") + + def test_cumsum(self): + # GH#50297 + arr = TimedeltaArray._from_sequence_not_strict(["1D", "2D"]) + result = arr._accumulate("cumsum") + expected = TimedeltaArray._from_sequence_not_strict(["1D", "3D"]) + tm.assert_timedelta_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_reductions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_reductions.py new file mode 100644 index 00000000..72d45f5b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/arrays/timedeltas/test_reductions.py @@ -0,0 +1,215 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import Timedelta +import pandas._testing as tm +from pandas.core import nanops +from pandas.core.arrays import TimedeltaArray + + +class TestReductions: + @pytest.mark.parametrize("name", ["std", "min", "max", "median", "mean"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_reductions_empty(self, name, skipna): + tdi = pd.TimedeltaIndex([]) + arr = tdi.array + + result = getattr(tdi, name)(skipna=skipna) + assert result is pd.NaT + + result = getattr(arr, name)(skipna=skipna) + assert result is pd.NaT + + @pytest.mark.parametrize("skipna", [True, False]) + def test_sum_empty(self, skipna): + tdi = pd.TimedeltaIndex([]) + arr = tdi.array + + result = tdi.sum(skipna=skipna) + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = arr.sum(skipna=skipna) + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + def test_min_max(self): + arr = TimedeltaArray._from_sequence(["3H", "3H", "NaT", "2H", "5H", "4H"]) + + result = arr.min() + expected = Timedelta("2H") + assert result == expected + + result = arr.max() + expected = Timedelta("5H") + assert result == expected + + result = arr.min(skipna=False) + assert result is pd.NaT + + result = arr.max(skipna=False) + assert result is pd.NaT + + def test_sum(self): + tdi = pd.TimedeltaIndex(["3H", "3H", "NaT", "2H", "5H", "4H"]) + arr = tdi.array + + result = arr.sum(skipna=True) + expected = Timedelta(hours=17) + assert isinstance(result, Timedelta) + assert result == expected + + result = tdi.sum(skipna=True) + assert isinstance(result, Timedelta) + assert result == expected + + result = arr.sum(skipna=False) + assert result is pd.NaT + + result = tdi.sum(skipna=False) + assert result is pd.NaT + + result = arr.sum(min_count=9) + assert result is pd.NaT + + result = tdi.sum(min_count=9) + assert result is pd.NaT + + result = arr.sum(min_count=1) + assert isinstance(result, Timedelta) + assert result == expected + + result = tdi.sum(min_count=1) + assert isinstance(result, Timedelta) + assert result == expected + + def test_npsum(self): + # GH#25282, GH#25335 np.sum should return a Timedelta, not timedelta64 + tdi = pd.TimedeltaIndex(["3H", "3H", "2H", "5H", "4H"]) + arr = tdi.array + + result = np.sum(tdi) + expected = Timedelta(hours=17) + assert isinstance(result, Timedelta) + assert result == expected + + result = np.sum(arr) + assert isinstance(result, Timedelta) + assert result == expected + + def test_sum_2d_skipna_false(self): + arr = np.arange(8).astype(np.int64).view("m8[s]").astype("m8[ns]").reshape(4, 2) + arr[-1, -1] = "Nat" + + tda = TimedeltaArray(arr) + + result = tda.sum(skipna=False) + assert result is pd.NaT + + result = tda.sum(axis=0, skipna=False) + expected = pd.TimedeltaIndex([Timedelta(seconds=12), pd.NaT])._values + tm.assert_timedelta_array_equal(result, expected) + + result = tda.sum(axis=1, skipna=False) + expected = pd.TimedeltaIndex( + [ + Timedelta(seconds=1), + Timedelta(seconds=5), + Timedelta(seconds=9), + pd.NaT, + ] + )._values + tm.assert_timedelta_array_equal(result, expected) + + # Adding a Timestamp makes this a test for DatetimeArray.std + @pytest.mark.parametrize( + "add", + [ + Timedelta(0), + pd.Timestamp("2021-01-01"), + pd.Timestamp("2021-01-01", tz="UTC"), + pd.Timestamp("2021-01-01", tz="Asia/Tokyo"), + ], + ) + def test_std(self, add): + tdi = pd.TimedeltaIndex(["0H", "4H", "NaT", "4H", "0H", "2H"]) + add + arr = tdi.array + + result = arr.std(skipna=True) + expected = Timedelta(hours=2) + assert isinstance(result, Timedelta) + assert result == expected + + result = tdi.std(skipna=True) + assert isinstance(result, Timedelta) + assert result == expected + + if getattr(arr, "tz", None) is None: + result = nanops.nanstd(np.asarray(arr), skipna=True) + assert isinstance(result, np.timedelta64) + assert result == expected + + result = arr.std(skipna=False) + assert result is pd.NaT + + result = tdi.std(skipna=False) + assert result is pd.NaT + + if getattr(arr, "tz", None) is None: + result = nanops.nanstd(np.asarray(arr), skipna=False) + assert isinstance(result, np.timedelta64) + assert np.isnat(result) + + def test_median(self): + tdi = pd.TimedeltaIndex(["0H", "3H", "NaT", "5H06m", "0H", "2H"]) + arr = tdi.array + + result = arr.median(skipna=True) + expected = Timedelta(hours=2) + assert isinstance(result, Timedelta) + assert result == expected + + result = tdi.median(skipna=True) + assert isinstance(result, Timedelta) + assert result == expected + + result = arr.median(skipna=False) + assert result is pd.NaT + + result = tdi.median(skipna=False) + assert result is pd.NaT + + def test_mean(self): + tdi = pd.TimedeltaIndex(["0H", "3H", "NaT", "5H06m", "0H", "2H"]) + arr = tdi._data + + # manually verified result + expected = Timedelta(arr.dropna()._ndarray.mean()) + + result = arr.mean() + assert result == expected + result = arr.mean(skipna=False) + assert result is pd.NaT + + result = arr.dropna().mean(skipna=False) + assert result == expected + + result = arr.mean(axis=0) + assert result == expected + + def test_mean_2d(self): + tdi = pd.timedelta_range("14 days", periods=6) + tda = tdi._data.reshape(3, 2) + + result = tda.mean(axis=0) + expected = tda[1] + tm.assert_timedelta_array_equal(result, expected) + + result = tda.mean(axis=1) + expected = tda[:, 0] + Timedelta(hours=12) + tm.assert_timedelta_array_equal(result, expected) + + result = tda.mean(axis=None) + expected = tdi.mean() + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/common.py new file mode 100644 index 00000000..ad0b3941 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/common.py @@ -0,0 +1,9 @@ +from typing import Any + +from pandas import Index + + +def allow_na_ops(obj: Any) -> bool: + """Whether to skip test cases including NaN""" + is_bool_index = isinstance(obj, Index) and obj.inferred_type == "boolean" + return not is_bool_index and obj._can_hold_na diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_constructors.py new file mode 100644 index 00000000..4e954891 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_constructors.py @@ -0,0 +1,174 @@ +from datetime import datetime +import sys + +import numpy as np +import pytest + +from pandas.compat import PYPY + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm +from pandas.core.accessor import PandasDelegate +from pandas.core.base import ( + NoNewAttributesMixin, + PandasObject, +) + + +def series_via_frame_from_dict(x, **kwargs): + return DataFrame({"a": x}, **kwargs)["a"] + + +def series_via_frame_from_scalar(x, **kwargs): + return DataFrame(x, **kwargs)[0] + + +@pytest.fixture( + params=[ + Series, + series_via_frame_from_dict, + series_via_frame_from_scalar, + Index, + ], + ids=["Series", "DataFrame-dict", "DataFrame-array", "Index"], +) +def constructor(request): + return request.param + + +class TestPandasDelegate: + class Delegator: + _properties = ["prop"] + _methods = ["test_method"] + + def _set_prop(self, value): + self.prop = value + + def _get_prop(self): + return self.prop + + prop = property(_get_prop, _set_prop, doc="foo property") + + def test_method(self, *args, **kwargs): + """a test method""" + + class Delegate(PandasDelegate, PandasObject): + def __init__(self, obj) -> None: + self.obj = obj + + def test_invalid_delegation(self): + # these show that in order for the delegation to work + # the _delegate_* methods need to be overridden to not raise + # a TypeError + + self.Delegate._add_delegate_accessors( + delegate=self.Delegator, + accessors=self.Delegator._properties, + typ="property", + ) + self.Delegate._add_delegate_accessors( + delegate=self.Delegator, accessors=self.Delegator._methods, typ="method" + ) + + delegate = self.Delegate(self.Delegator()) + + msg = "You cannot access the property prop" + with pytest.raises(TypeError, match=msg): + delegate.prop + + msg = "The property prop cannot be set" + with pytest.raises(TypeError, match=msg): + delegate.prop = 5 + + msg = "You cannot access the property prop" + with pytest.raises(TypeError, match=msg): + delegate.prop + + @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") + def test_memory_usage(self): + # Delegate does not implement memory_usage. + # Check that we fall back to in-built `__sizeof__` + # GH 12924 + delegate = self.Delegate(self.Delegator()) + sys.getsizeof(delegate) + + +class TestNoNewAttributesMixin: + def test_mixin(self): + class T(NoNewAttributesMixin): + pass + + t = T() + assert not hasattr(t, "__frozen") + + t.a = "test" + assert t.a == "test" + + t._freeze() + assert "__frozen" in dir(t) + assert getattr(t, "__frozen") + msg = "You cannot add any new attribute" + with pytest.raises(AttributeError, match=msg): + t.b = "test" + + assert not hasattr(t, "b") + + +class TestConstruction: + # test certain constructor behaviours on dtype inference across Series, + # Index and DataFrame + + @pytest.mark.parametrize( + "a", + [ + np.array(["2263-01-01"], dtype="datetime64[D]"), + np.array([datetime(2263, 1, 1)], dtype=object), + np.array([np.datetime64("2263-01-01", "D")], dtype=object), + np.array(["2263-01-01"], dtype=object), + ], + ids=[ + "datetime64[D]", + "object-datetime.datetime", + "object-numpy-scalar", + "object-string", + ], + ) + def test_constructor_datetime_outofbound(self, a, constructor): + # GH-26853 (+ bug GH-26206 out of bound non-ns unit) + + # No dtype specified (dtype inference) + # datetime64[non-ns] raise error, other cases result in object dtype + # and preserve original data + if a.dtype.kind == "M": + # Can't fit in nanosecond bounds -> get the nearest supported unit + result = constructor(a) + assert result.dtype == "M8[s]" + else: + result = constructor(a) + assert result.dtype == "object" + tm.assert_numpy_array_equal(result.to_numpy(), a) + + # Explicit dtype specified + # Forced conversion fails for all -> all cases raise error + msg = "Out of bounds|Out of bounds .* present at position 0" + with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg): + constructor(a, dtype="datetime64[ns]") + + def test_constructor_datetime_nonns(self, constructor): + arr = np.array(["2020-01-01T00:00:00.000000"], dtype="datetime64[us]") + dta = pd.core.arrays.DatetimeArray._simple_new(arr, dtype=arr.dtype) + expected = constructor(dta) + assert expected.dtype == arr.dtype + + result = constructor(arr) + tm.assert_equal(result, expected) + + # https://github.com/pandas-dev/pandas/issues/34843 + arr.flags.writeable = False + result = constructor(arr) + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_conversion.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_conversion.py new file mode 100644 index 00000000..5b9618bf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_conversion.py @@ -0,0 +1,547 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas import ( + CategoricalIndex, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + TimedeltaArray, +) + + +class TestToIterable: + # test that we convert an iterable to python types + + dtypes = [ + ("int8", int), + ("int16", int), + ("int32", int), + ("int64", int), + ("uint8", int), + ("uint16", int), + ("uint32", int), + ("uint64", int), + ("float16", float), + ("float32", float), + ("float64", float), + ("datetime64[ns]", Timestamp), + ("datetime64[ns, US/Eastern]", Timestamp), + ("timedelta64[ns]", Timedelta), + ] + + @pytest.mark.parametrize("dtype, rdtype", dtypes) + @pytest.mark.parametrize( + "method", + [ + lambda x: x.tolist(), + lambda x: x.to_list(), + lambda x: list(x), + lambda x: list(x.__iter__()), + ], + ids=["tolist", "to_list", "list", "iter"], + ) + def test_iterable(self, index_or_series, method, dtype, rdtype): + # gh-10904 + # gh-13258 + # coerce iteration to underlying python / pandas types + typ = index_or_series + if dtype == "float16" and issubclass(typ, pd.Index): + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + typ([1], dtype=dtype) + return + s = typ([1], dtype=dtype) + result = method(s)[0] + assert isinstance(result, rdtype) + + @pytest.mark.parametrize( + "dtype, rdtype, obj", + [ + ("object", object, "a"), + ("object", int, 1), + ("category", object, "a"), + ("category", int, 1), + ], + ) + @pytest.mark.parametrize( + "method", + [ + lambda x: x.tolist(), + lambda x: x.to_list(), + lambda x: list(x), + lambda x: list(x.__iter__()), + ], + ids=["tolist", "to_list", "list", "iter"], + ) + def test_iterable_object_and_category( + self, index_or_series, method, dtype, rdtype, obj + ): + # gh-10904 + # gh-13258 + # coerce iteration to underlying python / pandas types + typ = index_or_series + s = typ([obj], dtype=dtype) + result = method(s)[0] + assert isinstance(result, rdtype) + + @pytest.mark.parametrize("dtype, rdtype", dtypes) + def test_iterable_items(self, dtype, rdtype): + # gh-13258 + # test if items yields the correct boxed scalars + # this only applies to series + s = Series([1], dtype=dtype) + _, result = next(iter(s.items())) + assert isinstance(result, rdtype) + + _, result = next(iter(s.items())) + assert isinstance(result, rdtype) + + @pytest.mark.parametrize( + "dtype, rdtype", dtypes + [("object", int), ("category", int)] + ) + def test_iterable_map(self, index_or_series, dtype, rdtype): + # gh-13236 + # coerce iteration to underlying python / pandas types + typ = index_or_series + if dtype == "float16" and issubclass(typ, pd.Index): + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + typ([1], dtype=dtype) + return + s = typ([1], dtype=dtype) + result = s.map(type)[0] + if not isinstance(rdtype, tuple): + rdtype = (rdtype,) + assert result in rdtype + + @pytest.mark.parametrize( + "method", + [ + lambda x: x.tolist(), + lambda x: x.to_list(), + lambda x: list(x), + lambda x: list(x.__iter__()), + ], + ids=["tolist", "to_list", "list", "iter"], + ) + def test_categorial_datetimelike(self, method): + i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")]) + + result = method(i)[0] + assert isinstance(result, Timestamp) + + def test_iter_box(self): + vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")] + s = Series(vals) + assert s.dtype == "datetime64[ns]" + for res, exp in zip(s, vals): + assert isinstance(res, Timestamp) + assert res.tz is None + assert res == exp + + vals = [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + ] + s = Series(vals) + + assert s.dtype == "datetime64[ns, US/Eastern]" + for res, exp in zip(s, vals): + assert isinstance(res, Timestamp) + assert res.tz == exp.tz + assert res == exp + + # timedelta + vals = [Timedelta("1 days"), Timedelta("2 days")] + s = Series(vals) + assert s.dtype == "timedelta64[ns]" + for res, exp in zip(s, vals): + assert isinstance(res, Timedelta) + assert res == exp + + # period + vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")] + s = Series(vals) + assert s.dtype == "Period[M]" + for res, exp in zip(s, vals): + assert isinstance(res, pd.Period) + assert res.freq == "M" + assert res == exp + + +@pytest.mark.parametrize( + "arr, expected_type, dtype", + [ + (np.array([0, 1], dtype=np.int64), np.ndarray, "int64"), + (np.array(["a", "b"]), np.ndarray, "object"), + (pd.Categorical(["a", "b"]), pd.Categorical, "category"), + ( + pd.DatetimeIndex(["2017", "2018"], tz="US/Central"), + DatetimeArray, + "datetime64[ns, US/Central]", + ), + ( + pd.PeriodIndex([2018, 2019], freq="A"), + PeriodArray, + pd.core.dtypes.dtypes.PeriodDtype("A-DEC"), + ), + (pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval"), + ( + pd.DatetimeIndex(["2017", "2018"]), + DatetimeArray, + "datetime64[ns]", + ), + ( + pd.TimedeltaIndex([10**10]), + TimedeltaArray, + "m8[ns]", + ), + ], +) +def test_values_consistent(arr, expected_type, dtype): + l_values = Series(arr)._values + r_values = pd.Index(arr)._values + assert type(l_values) is expected_type + assert type(l_values) is type(r_values) + + tm.assert_equal(l_values, r_values) + + +@pytest.mark.parametrize("arr", [np.array([1, 2, 3])]) +def test_numpy_array(arr): + ser = Series(arr) + result = ser.array + expected = NumpyExtensionArray(arr) + tm.assert_extension_array_equal(result, expected) + + +def test_numpy_array_all_dtypes(any_numpy_dtype): + ser = Series(dtype=any_numpy_dtype) + result = ser.array + if np.dtype(any_numpy_dtype).kind == "M": + assert isinstance(result, DatetimeArray) + elif np.dtype(any_numpy_dtype).kind == "m": + assert isinstance(result, TimedeltaArray) + else: + assert isinstance(result, NumpyExtensionArray) + + +@pytest.mark.parametrize( + "arr, attr", + [ + (pd.Categorical(["a", "b"]), "_codes"), + (PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]"), "_ndarray"), + (pd.array([0, np.nan], dtype="Int64"), "_data"), + (IntervalArray.from_breaks([0, 1]), "_left"), + (SparseArray([0, 1]), "_sparse_values"), + (DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_ndarray"), + # tz-aware Datetime + ( + DatetimeArray( + np.array( + ["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]" + ), + dtype=DatetimeTZDtype(tz="US/Central"), + ), + "_ndarray", + ), + ], +) +def test_array(arr, attr, index_or_series, request): + box = index_or_series + + result = box(arr, copy=False).array + + if attr: + arr = getattr(arr, attr) + result = getattr(result, attr) + + assert result is arr + + +def test_array_multiindex_raises(): + idx = pd.MultiIndex.from_product([["A"], ["a", "b"]]) + msg = "MultiIndex has no single backing array" + with pytest.raises(ValueError, match=msg): + idx.array + + +@pytest.mark.parametrize( + "arr, expected", + [ + (np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)), + (pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)), + ( + pd.core.arrays.period_array(["2000", "2001"], freq="D"), + np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]), + ), + (pd.array([0, np.nan], dtype="Int64"), np.array([0, pd.NA], dtype=object)), + ( + IntervalArray.from_breaks([0, 1, 2]), + np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object), + ), + (SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)), + # tz-naive datetime + ( + DatetimeArray(np.array(["2000", "2001"], dtype="M8[ns]")), + np.array(["2000", "2001"], dtype="M8[ns]"), + ), + # tz-aware stays tz`-aware + ( + DatetimeArray( + np.array( + ["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]" + ), + dtype=DatetimeTZDtype(tz="US/Central"), + ), + np.array( + [ + Timestamp("2000-01-01", tz="US/Central"), + Timestamp("2000-01-02", tz="US/Central"), + ] + ), + ), + # Timedelta + ( + TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"), + np.array([0, 3600000000000], dtype="m8[ns]"), + ), + # GH#26406 tz is preserved in Categorical[dt64tz] + ( + pd.Categorical(date_range("2016-01-01", periods=2, tz="US/Pacific")), + np.array( + [ + Timestamp("2016-01-01", tz="US/Pacific"), + Timestamp("2016-01-02", tz="US/Pacific"), + ] + ), + ), + ], +) +def test_to_numpy(arr, expected, index_or_series_or_array, request): + box = index_or_series_or_array + + with tm.assert_produces_warning(None): + thing = box(arr) + + if arr.dtype.name == "int64" and box is pd.array: + mark = pytest.mark.xfail(reason="thing is Int64 and to_numpy() returns object") + request.node.add_marker(mark) + + result = thing.to_numpy() + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(thing) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("as_series", [True, False]) +@pytest.mark.parametrize( + "arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)] +) +def test_to_numpy_copy(arr, as_series): + obj = pd.Index(arr, copy=False) + if as_series: + obj = Series(obj.values, copy=False) + + # no copy by default + result = obj.to_numpy() + assert np.shares_memory(arr, result) is True + + result = obj.to_numpy(copy=False) + assert np.shares_memory(arr, result) is True + + # copy=True + result = obj.to_numpy(copy=True) + assert np.shares_memory(arr, result) is False + + +@pytest.mark.parametrize("as_series", [True, False]) +def test_to_numpy_dtype(as_series): + tz = "US/Eastern" + obj = pd.DatetimeIndex(["2000", "2001"], tz=tz) + if as_series: + obj = Series(obj) + + # preserve tz by default + result = obj.to_numpy() + expected = np.array( + [Timestamp("2000", tz=tz), Timestamp("2001", tz=tz)], dtype=object + ) + tm.assert_numpy_array_equal(result, expected) + + result = obj.to_numpy(dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = obj.to_numpy(dtype="M8[ns]") + expected = np.array(["2000-01-01T05", "2001-01-01T05"], dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values, dtype, na_value, expected", + [ + ([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]), + ( + [Timestamp("2000"), Timestamp("2000"), pd.NaT], + None, + Timestamp("2000"), + [np.datetime64("2000-01-01T00:00:00.000000000")] * 3, + ), + ], +) +def test_to_numpy_na_value_numpy_dtype( + index_or_series, values, dtype, na_value, expected +): + obj = index_or_series(values) + result = obj.to_numpy(dtype=dtype, na_value=na_value) + expected = np.array(expected) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "data, multiindex, dtype, na_value, expected", + [ + ( + [1, 2, None, 4], + [(0, "a"), (0, "b"), (1, "b"), (1, "c")], + float, + None, + [1.0, 2.0, np.nan, 4.0], + ), + ( + [1, 2, None, 4], + [(0, "a"), (0, "b"), (1, "b"), (1, "c")], + float, + np.nan, + [1.0, 2.0, np.nan, 4.0], + ), + ( + [1.0, 2.0, np.nan, 4.0], + [("a", 0), ("a", 1), ("a", 2), ("b", 0)], + int, + 0, + [1, 2, 0, 4], + ), + ( + [Timestamp("2000"), Timestamp("2000"), pd.NaT], + [(0, Timestamp("2021")), (0, Timestamp("2022")), (1, Timestamp("2000"))], + None, + Timestamp("2000"), + [np.datetime64("2000-01-01T00:00:00.000000000")] * 3, + ), + ], +) +def test_to_numpy_multiindex_series_na_value( + data, multiindex, dtype, na_value, expected +): + index = pd.MultiIndex.from_tuples(multiindex) + series = Series(data, index=index) + result = series.to_numpy(dtype=dtype, na_value=na_value) + expected = np.array(expected) + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_kwargs_raises(): + # numpy + s = Series([1, 2, 3]) + msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + s.to_numpy(foo=True) + + # extension + s = Series([1, 2, 3], dtype="Int64") + with pytest.raises(TypeError, match=msg): + s.to_numpy(foo=True) + + +@pytest.mark.parametrize( + "data", + [ + {"a": [1, 2, 3], "b": [1, 2, None]}, + {"a": np.array([1, 2, 3]), "b": np.array([1, 2, np.nan])}, + {"a": pd.array([1, 2, 3]), "b": pd.array([1, 2, None])}, + ], +) +@pytest.mark.parametrize("dtype, na_value", [(float, np.nan), (object, None)]) +def test_to_numpy_dataframe_na_value(data, dtype, na_value): + # https://github.com/pandas-dev/pandas/issues/33820 + df = pd.DataFrame(data) + result = df.to_numpy(dtype=dtype, na_value=na_value) + expected = np.array([[1, 1], [2, 2], [3, na_value]], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "data, expected", + [ + ( + {"a": pd.array([1, 2, None])}, + np.array([[1.0], [2.0], [np.nan]], dtype=float), + ), + ( + {"a": [1, 2, 3], "b": [1, 2, 3]}, + np.array([[1, 1], [2, 2], [3, 3]], dtype=float), + ), + ], +) +def test_to_numpy_dataframe_single_block(data, expected): + # https://github.com/pandas-dev/pandas/issues/33820 + df = pd.DataFrame(data) + result = df.to_numpy(dtype=float, na_value=np.nan) + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_dataframe_single_block_no_mutate(): + # https://github.com/pandas-dev/pandas/issues/33820 + result = pd.DataFrame(np.array([1.0, 2.0, np.nan])) + expected = pd.DataFrame(np.array([1.0, 2.0, np.nan])) + result.to_numpy(na_value=0.0) + tm.assert_frame_equal(result, expected) + + +class TestAsArray: + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_asarray_object_dt64(self, tz): + ser = Series(date_range("2000", periods=2, tz=tz)) + + with tm.assert_produces_warning(None): + # Future behavior (for tzaware case) with no warning + result = np.asarray(ser, dtype=object) + + expected = np.array( + [Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)] + ) + tm.assert_numpy_array_equal(result, expected) + + def test_asarray_tz_naive(self): + # This shouldn't produce a warning. + ser = Series(date_range("2000", periods=2)) + expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") + result = np.asarray(ser) + + tm.assert_numpy_array_equal(result, expected) + + def test_asarray_tz_aware(self): + tz = "US/Central" + ser = Series(date_range("2000", periods=2, tz=tz)) + expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]") + result = np.asarray(ser, dtype="datetime64[ns]") + + tm.assert_numpy_array_equal(result, expected) + + # Old behavior with no warning + result = np.asarray(ser, dtype="M8[ns]") + + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_fillna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_fillna.py new file mode 100644 index 00000000..7300d301 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_fillna.py @@ -0,0 +1,60 @@ +""" +Though Index.fillna and Series.fillna has separate impl, +test here to confirm these works as the same +""" + +import numpy as np +import pytest + +from pandas import MultiIndex +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +def test_fillna(index_or_series_obj): + # GH 11343 + obj = index_or_series_obj + + if isinstance(obj, MultiIndex): + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + obj.fillna(0) + return + + # values will not be changed + fill_value = obj.values[0] if len(obj) > 0 else 0 + result = obj.fillna(fill_value) + + tm.assert_equal(obj, result) + + # check shallow_copied + assert obj is not result + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_fillna_null(null_obj, index_or_series_obj): + # GH 11343 + obj = index_or_series_obj + klass = type(obj) + + if not allow_na_ops(obj): + pytest.skip(f"{klass} doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(obj, MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + fill_value = values[0] + expected = values.copy() + values[0:2] = null_obj + expected[0:2] = fill_value + + expected = klass(expected) + obj = klass(values) + + result = obj.fillna(fill_value) + tm.assert_equal(result, expected) + + # check shallow_copied + assert obj is not result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_misc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_misc.py new file mode 100644 index 00000000..3ca53c40 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_misc.py @@ -0,0 +1,184 @@ +import sys + +import numpy as np +import pytest + +from pandas.compat import PYPY + +from pandas.core.dtypes.common import ( + is_dtype_equal, + is_object_dtype, +) + +import pandas as pd +from pandas import ( + Index, + Series, +) +import pandas._testing as tm + + +def test_isnull_notnull_docstrings(): + # GH#41855 make sure its clear these are aliases + doc = pd.DataFrame.notnull.__doc__ + assert doc.startswith("\nDataFrame.notnull is an alias for DataFrame.notna.\n") + doc = pd.DataFrame.isnull.__doc__ + assert doc.startswith("\nDataFrame.isnull is an alias for DataFrame.isna.\n") + + doc = Series.notnull.__doc__ + assert doc.startswith("\nSeries.notnull is an alias for Series.notna.\n") + doc = Series.isnull.__doc__ + assert doc.startswith("\nSeries.isnull is an alias for Series.isna.\n") + + +@pytest.mark.parametrize( + "op_name, op", + [ + ("add", "+"), + ("sub", "-"), + ("mul", "*"), + ("mod", "%"), + ("pow", "**"), + ("truediv", "/"), + ("floordiv", "//"), + ], +) +def test_binary_ops_docstring(frame_or_series, op_name, op): + # not using the all_arithmetic_functions fixture with _get_opstr + # as _get_opstr is used internally in the dynamic implementation of the docstring + klass = frame_or_series + + operand1 = klass.__name__.lower() + operand2 = "other" + expected_str = " ".join([operand1, op, operand2]) + assert expected_str in getattr(klass, op_name).__doc__ + + # reverse version of the binary ops + expected_str = " ".join([operand2, op, operand1]) + assert expected_str in getattr(klass, "r" + op_name).__doc__ + + +def test_ndarray_compat_properties(index_or_series_obj): + obj = index_or_series_obj + + # Check that we work. + for p in ["shape", "dtype", "T", "nbytes"]: + assert getattr(obj, p, None) is not None + + # deprecated properties + for p in ["strides", "itemsize", "base", "data"]: + assert not hasattr(obj, p) + + msg = "can only convert an array of size 1 to a Python scalar" + with pytest.raises(ValueError, match=msg): + obj.item() # len > 1 + + assert obj.ndim == 1 + assert obj.size == len(obj) + + assert Index([1]).item() == 1 + assert Series([1]).item() == 1 + + +@pytest.mark.skipif(PYPY, reason="not relevant for PyPy") +def test_memory_usage(index_or_series_memory_obj): + obj = index_or_series_memory_obj + # Clear index caches so that len(obj) == 0 report 0 memory usage + if isinstance(obj, Series): + is_ser = True + obj.index._engine.clear_mapping() + else: + is_ser = False + obj._engine.clear_mapping() + + res = obj.memory_usage() + res_deep = obj.memory_usage(deep=True) + + is_object = is_object_dtype(obj) or (is_ser and is_object_dtype(obj.index)) + is_categorical = isinstance(obj.dtype, pd.CategoricalDtype) or ( + is_ser and isinstance(obj.index.dtype, pd.CategoricalDtype) + ) + is_object_string = is_dtype_equal(obj, "string[python]") or ( + is_ser and is_dtype_equal(obj.index.dtype, "string[python]") + ) + + if len(obj) == 0: + expected = 0 + assert res_deep == res == expected + elif is_object or is_categorical or is_object_string: + # only deep will pick them up + assert res_deep > res + else: + assert res == res_deep + + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = res_deep - sys.getsizeof(obj) + assert abs(diff) < 100 + + +def test_memory_usage_components_series(series_with_simple_index): + series = series_with_simple_index + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + +@pytest.mark.parametrize("dtype", tm.NARROW_NP_DTYPES) +def test_memory_usage_components_narrow_series(dtype): + series = tm.make_rand_series(name="a", dtype=dtype) + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + +def test_searchsorted(request, index_or_series_obj): + # numpy.searchsorted calls obj.searchsorted under the hood. + # See gh-12238 + obj = index_or_series_obj + + if isinstance(obj, pd.MultiIndex): + # See gh-14833 + request.node.add_marker( + pytest.mark.xfail( + reason="np.searchsorted doesn't work on pd.MultiIndex: GH 14833" + ) + ) + elif obj.dtype.kind == "c" and isinstance(obj, Index): + # TODO: Should Series cases also raise? Looks like they use numpy + # comparison semantics https://github.com/numpy/numpy/issues/15981 + mark = pytest.mark.xfail(reason="complex objects are not comparable") + request.node.add_marker(mark) + + max_obj = max(obj, default=0) + index = np.searchsorted(obj, max_obj) + assert 0 <= index <= len(obj) + + index = np.searchsorted(obj, max_obj, sorter=range(len(obj))) + assert 0 <= index <= len(obj) + + +def test_access_by_position(index_flat): + index = index_flat + + if len(index) == 0: + pytest.skip("Test doesn't make sense on empty data") + + series = Series(index) + assert index[0] == series.iloc[0] + assert index[5] == series.iloc[5] + assert index[-1] == series.iloc[-1] + + size = len(index) + assert index[-1] == index[size - 1] + + msg = f"index {size} is out of bounds for axis 0 with size {size}" + if is_dtype_equal(index.dtype, "string[pyarrow]"): + msg = "index out of bounds" + with pytest.raises(IndexError, match=msg): + index[size] + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + series.iloc[size] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_transpose.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_transpose.py new file mode 100644 index 00000000..246f33d2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_transpose.py @@ -0,0 +1,56 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalDtype, + DataFrame, +) +import pandas._testing as tm + + +def test_transpose(index_or_series_obj): + obj = index_or_series_obj + tm.assert_equal(obj.transpose(), obj) + + +def test_transpose_non_default_axes(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + with pytest.raises(ValueError, match=msg): + obj.transpose(1) + with pytest.raises(ValueError, match=msg): + obj.transpose(axes=1) + + +def test_numpy_transpose(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + tm.assert_equal(np.transpose(obj), obj) + + with pytest.raises(ValueError, match=msg): + np.transpose(obj, axes=1) + + +@pytest.mark.parametrize( + "data, transposed_data, index, columns, dtype", + [ + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], int), + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], CategoricalDtype([1, 2])), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], int), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], CategoricalDtype([1, 2])), + ([[1, 2], [3, 4]], [[1, 3], [2, 4]], ["a", "a"], ["b", "b"], int), + ( + [[1, 2], [3, 4]], + [[1, 3], [2, 4]], + ["a", "a"], + ["b", "b"], + CategoricalDtype([1, 2, 3, 4]), + ), + ], +) +def test_duplicate_labels(data, transposed_data, index, columns, dtype): + # GH 42380 + df = DataFrame(data, index=index, columns=columns, dtype=dtype) + result = df.T + expected = DataFrame(transposed_data, index=columns, columns=index, dtype=dtype) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_unique.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_unique.py new file mode 100644 index 00000000..4c845d8f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_unique.py @@ -0,0 +1,121 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_unique(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.unique() + + # dict.fromkeys preserves the order + unique_values = list(dict.fromkeys(obj.values)) + if isinstance(obj, pd.MultiIndex): + expected = pd.MultiIndex.from_tuples(unique_values) + expected.names = obj.names + tm.assert_index_equal(result, expected, exact=True) + elif isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if isinstance(obj.dtype, pd.DatetimeTZDtype): + expected = expected.normalize() + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(unique_values) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_unique_null(null_obj, index_or_series_obj): + obj = index_or_series_obj + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + result = obj.unique() + + unique_values_raw = dict.fromkeys(obj.values) + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)] + unique_values = [null_obj] + unique_values_not_null + + if isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if isinstance(obj.dtype, pd.DatetimeTZDtype): + result = result.normalize() + expected = expected.normalize() + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(unique_values, dtype=obj.dtype) + tm.assert_numpy_array_equal(result, expected) + + +def test_nunique(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + expected = len(obj.unique()) + assert obj.nunique(dropna=False) == expected + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_nunique_null(null_obj, index_or_series_obj): + obj = index_or_series_obj + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + + if isinstance(obj, pd.CategoricalIndex): + assert obj.nunique() == len(obj.categories) + assert obj.nunique(dropna=False) == len(obj.categories) + 1 + else: + num_unique_values = len(obj.unique()) + assert obj.nunique() == max(0, num_unique_values - 1) + assert obj.nunique(dropna=False) == max(0, num_unique_values) + + +@pytest.mark.single_cpu +def test_unique_bad_unicode(index_or_series): + # regression test for #34550 + uval = "\ud83d" # smiley emoji + + obj = index_or_series([uval] * 2) + result = obj.unique() + + if isinstance(obj, pd.Index): + expected = pd.Index(["\ud83d"], dtype=object) + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(["\ud83d"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_nunique_dropna(dropna): + # GH37566 + ser = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT]) + res = ser.nunique(dropna) + assert res == 1 if dropna else 5 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_value_counts.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_value_counts.py new file mode 100644 index 00000000..3cdfb7fe --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/base/test_value_counts.py @@ -0,0 +1,322 @@ +import collections +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + Interval, + IntervalIndex, + MultiIndex, + Series, + Timedelta, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_value_counts(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.value_counts() + + counter = collections.Counter(obj) + expected = Series(dict(counter.most_common()), dtype=np.int64, name="count") + + if obj.dtype != np.float16: + expected.index = expected.index.astype(obj.dtype) + else: + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + expected.index.astype(obj.dtype) + return + if isinstance(expected.index, MultiIndex): + expected.index.names = obj.names + else: + expected.index.name = obj.name + + if not isinstance(result.dtype, np.dtype): + if getattr(obj.dtype, "storage", "") == "pyarrow": + expected = expected.astype("int64[pyarrow]") + else: + # i.e IntegerDtype + expected = expected.astype("Int64") + + # TODO(GH#32514): Order of entries with the same count is inconsistent + # on CI (gh-32449) + if obj.duplicated().any(): + result = result.sort_index() + expected = expected.sort_index() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_value_counts_null(null_obj, index_or_series_obj): + orig = index_or_series_obj + obj = orig.copy() + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(orig, MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + counter = collections.Counter(obj.dropna()) + expected = Series(dict(counter.most_common()), dtype=np.int64, name="count") + + if obj.dtype != np.float16: + expected.index = expected.index.astype(obj.dtype) + else: + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + expected.index.astype(obj.dtype) + return + expected.index.name = obj.name + + result = obj.value_counts() + if obj.duplicated().any(): + # TODO(GH#32514): + # Order of entries with the same count is inconsistent on CI (gh-32449) + expected = expected.sort_index() + result = result.sort_index() + + if not isinstance(result.dtype, np.dtype): + if getattr(obj.dtype, "storage", "") == "pyarrow": + expected = expected.astype("int64[pyarrow]") + else: + # i.e IntegerDtype + expected = expected.astype("Int64") + tm.assert_series_equal(result, expected) + + expected[null_obj] = 3 + + result = obj.value_counts(dropna=False) + if obj.duplicated().any(): + # TODO(GH#32514): + # Order of entries with the same count is inconsistent on CI (gh-32449) + expected = expected.sort_index() + result = result.sort_index() + tm.assert_series_equal(result, expected) + + +def test_value_counts_inferred(index_or_series): + klass = index_or_series + s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"] + s = klass(s_values) + expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"], name="count") + tm.assert_series_equal(s.value_counts(), expected) + + if isinstance(s, Index): + exp = Index(np.unique(np.array(s_values, dtype=np.object_))) + tm.assert_index_equal(s.unique(), exp) + else: + exp = np.unique(np.array(s_values, dtype=np.object_)) + tm.assert_numpy_array_equal(s.unique(), exp) + + assert s.nunique() == 4 + # don't sort, have to sort after the fact as not sorting is + # platform-dep + hist = s.value_counts(sort=False).sort_values() + expected = Series([3, 1, 4, 2], index=list("acbd"), name="count").sort_values() + tm.assert_series_equal(hist, expected) + + # sort ascending + hist = s.value_counts(ascending=True) + expected = Series([1, 2, 3, 4], index=list("cdab"), name="count") + tm.assert_series_equal(hist, expected) + + # relative histogram. + hist = s.value_counts(normalize=True) + expected = Series( + [0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"], name="proportion" + ) + tm.assert_series_equal(hist, expected) + + +def test_value_counts_bins(index_or_series): + klass = index_or_series + s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"] + s = klass(s_values) + + # bins + msg = "bins argument only works with numeric data" + with pytest.raises(TypeError, match=msg): + s.value_counts(bins=1) + + s1 = Series([1, 1, 2, 3]) + res1 = s1.value_counts(bins=1) + exp1 = Series({Interval(0.997, 3.0): 4}, name="count") + tm.assert_series_equal(res1, exp1) + res1n = s1.value_counts(bins=1, normalize=True) + exp1n = Series({Interval(0.997, 3.0): 1.0}, name="proportion") + tm.assert_series_equal(res1n, exp1n) + + if isinstance(s1, Index): + tm.assert_index_equal(s1.unique(), Index([1, 2, 3])) + else: + exp = np.array([1, 2, 3], dtype=np.int64) + tm.assert_numpy_array_equal(s1.unique(), exp) + + assert s1.nunique() == 3 + + # these return the same + res4 = s1.value_counts(bins=4, dropna=True) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count") + tm.assert_series_equal(res4, exp4) + + res4 = s1.value_counts(bins=4, dropna=False) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count") + tm.assert_series_equal(res4, exp4) + + res4n = s1.value_counts(bins=4, normalize=True) + exp4n = Series( + [0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2]), name="proportion" + ) + tm.assert_series_equal(res4n, exp4n) + + # handle NA's properly + s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"] + s = klass(s_values) + expected = Series([4, 3, 2], index=["b", "a", "d"], name="count") + tm.assert_series_equal(s.value_counts(), expected) + + if isinstance(s, Index): + exp = Index(["a", "b", np.nan, "d"]) + tm.assert_index_equal(s.unique(), exp) + else: + exp = np.array(["a", "b", np.nan, "d"], dtype=object) + tm.assert_numpy_array_equal(s.unique(), exp) + assert s.nunique() == 3 + + s = klass({}) if klass is dict else klass({}, dtype=object) + expected = Series([], dtype=np.int64, name="count") + tm.assert_series_equal(s.value_counts(), expected, check_index_type=False) + # returned dtype differs depending on original + if isinstance(s, Index): + tm.assert_index_equal(s.unique(), Index([]), exact=False) + else: + tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False) + + assert s.nunique() == 0 + + +def test_value_counts_datetime64(index_or_series): + klass = index_or_series + + # GH 3002, datetime64[ns] + # don't test names though + df = pd.DataFrame( + { + "person_id": ["xxyyzz", "xxyyzz", "xxyyzz", "xxyyww", "foofoo", "foofoo"], + "dt": pd.to_datetime( + [ + "2010-01-01", + "2010-01-01", + "2010-01-01", + "2009-01-01", + "2008-09-09", + "2008-09-09", + ] + ), + "food": ["PIE", "GUM", "EGG", "EGG", "PIE", "GUM"], + } + ) + + s = klass(df["dt"].copy()) + s.name = None + idx = pd.to_datetime( + ["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"] + ) + expected_s = Series([3, 2, 1], index=idx, name="count") + tm.assert_series_equal(s.value_counts(), expected_s) + + expected = pd.array( + np.array( + ["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"], + dtype="datetime64[ns]", + ) + ) + if isinstance(s, Index): + tm.assert_index_equal(s.unique(), DatetimeIndex(expected)) + else: + tm.assert_extension_array_equal(s.unique(), expected) + + assert s.nunique() == 3 + + # with NaT + s = df["dt"].copy() + s = klass(list(s.values) + [pd.NaT] * 4) + + result = s.value_counts() + assert result.index.dtype == "datetime64[ns]" + tm.assert_series_equal(result, expected_s) + + result = s.value_counts(dropna=False) + expected_s = pd.concat( + [Series([4], index=DatetimeIndex([pd.NaT]), name="count"), expected_s] + ) + tm.assert_series_equal(result, expected_s) + + assert s.dtype == "datetime64[ns]" + unique = s.unique() + assert unique.dtype == "datetime64[ns]" + + # numpy_array_equal cannot compare pd.NaT + if isinstance(s, Index): + exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]) + tm.assert_index_equal(unique, exp_idx) + else: + tm.assert_extension_array_equal(unique[:3], expected) + assert pd.isna(unique[3]) + + assert s.nunique() == 3 + assert s.nunique(dropna=False) == 4 + + # timedelta64[ns] + td = df.dt - df.dt + timedelta(1) + td = klass(td, name="dt") + + result = td.value_counts() + expected_s = Series([6], index=Index([Timedelta("1day")], name="dt"), name="count") + tm.assert_series_equal(result, expected_s) + + expected = TimedeltaIndex(["1 days"], name="dt") + if isinstance(td, Index): + tm.assert_index_equal(td.unique(), expected) + else: + tm.assert_extension_array_equal(td.unique(), expected._values) + + td2 = timedelta(1) + (df.dt - df.dt) + td2 = klass(td2, name="dt") + result2 = td2.value_counts() + tm.assert_series_equal(result2, expected_s) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_value_counts_with_nan(dropna, index_or_series): + # GH31944 + klass = index_or_series + values = [True, pd.NA, np.nan] + obj = klass(values) + res = obj.value_counts(dropna=dropna) + if dropna is True: + expected = Series([1], index=Index([True], dtype=obj.dtype), name="count") + else: + expected = Series([1, 1, 1], index=[True, pd.NA, np.nan], name="count") + tm.assert_series_equal(res, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_compat.py new file mode 100644 index 00000000..856a5b3a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_compat.py @@ -0,0 +1,32 @@ +import pytest + +from pandas.compat._optional import VERSIONS + +import pandas as pd +from pandas.core.computation import expr +from pandas.core.computation.engines import ENGINES +from pandas.util.version import Version + + +def test_compat(): + # test we have compat with our version of numexpr + + from pandas.core.computation.check import NUMEXPR_INSTALLED + + ne = pytest.importorskip("numexpr") + + ver = ne.__version__ + if Version(ver) < Version(VERSIONS["numexpr"]): + assert not NUMEXPR_INSTALLED + else: + assert NUMEXPR_INSTALLED + + +@pytest.mark.parametrize("engine", ENGINES) +@pytest.mark.parametrize("parser", expr.PARSERS) +def test_invalid_numexpr_version(engine, parser): + if engine == "numexpr": + pytest.importorskip("numexpr") + a, b = 1, 2 # noqa: F841 + res = pd.eval("a + b", engine=engine, parser=parser) + assert res == 3 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_eval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_eval.py new file mode 100644 index 00000000..3aa7decf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/computation/test_eval.py @@ -0,0 +1,1936 @@ +from __future__ import annotations + +from functools import reduce +from itertools import product +import operator + +import numpy as np +import pytest + +from pandas.compat import PY312 +from pandas.errors import ( + NumExprClobberingError, + PerformanceWarning, + UndefinedVariableError, +) +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import ( + is_bool, + is_float, + is_list_like, + is_scalar, +) + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm +from pandas.core.computation import ( + expr, + pytables, +) +from pandas.core.computation.engines import ENGINES +from pandas.core.computation.expr import ( + BaseExprVisitor, + PandasExprVisitor, + PythonExprVisitor, +) +from pandas.core.computation.expressions import ( + NUMEXPR_INSTALLED, + USE_NUMEXPR, +) +from pandas.core.computation.ops import ( + ARITH_OPS_SYMS, + SPECIAL_CASE_ARITH_OPS_SYMS, + _binary_math_ops, + _binary_ops_dict, + _unary_math_ops, +) +from pandas.core.computation.scope import DEFAULT_GLOBALS + + +@pytest.fixture( + params=( + pytest.param( + engine, + marks=[ + pytest.mark.skipif( + engine == "numexpr" and not USE_NUMEXPR, + reason=f"numexpr enabled->{USE_NUMEXPR}, " + f"installed->{NUMEXPR_INSTALLED}", + ), + td.skip_if_no_ne, + ], + ) + for engine in ENGINES + ) +) +def engine(request): + return request.param + + +@pytest.fixture(params=expr.PARSERS) +def parser(request): + return request.param + + +def _eval_single_bin(lhs, cmp1, rhs, engine): + c = _binary_ops_dict[cmp1] + if ENGINES[engine].has_neg_frac: + try: + return c(lhs, rhs) + except ValueError as e: + if str(e).startswith( + "negative number cannot be raised to a fractional power" + ): + return np.nan + raise + return c(lhs, rhs) + + +# TODO: using range(5) here is a kludge +@pytest.fixture( + params=list(range(5)), + ids=["DataFrame", "Series", "SeriesNaN", "DataFrameNaN", "float"], +) +def lhs(request): + nan_df1 = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + nan_df1[nan_df1 > 0.5] = np.nan + + opts = ( + DataFrame(np.random.default_rng(2).standard_normal((10, 5))), + Series(np.random.default_rng(2).standard_normal(5)), + Series([1, 2, np.nan, np.nan, 5]), + nan_df1, + np.random.default_rng(2).standard_normal(), + ) + return opts[request.param] + + +rhs = lhs +midhs = lhs + + +class TestEval: + @pytest.mark.parametrize( + "cmp1", + ["!=", "==", "<=", ">=", "<", ">"], + ids=["ne", "eq", "le", "ge", "lt", "gt"], + ) + @pytest.mark.parametrize("cmp2", [">", "<"], ids=["gt", "lt"]) + @pytest.mark.parametrize("binop", expr.BOOL_OPS_SYMS) + def test_complex_cmp_ops(self, cmp1, cmp2, binop, lhs, rhs, engine, parser): + if parser == "python" and binop in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)" + pd.eval(ex, engine=engine, parser=parser) + return + + lhs_new = _eval_single_bin(lhs, cmp1, rhs, engine) + rhs_new = _eval_single_bin(lhs, cmp2, rhs, engine) + expected = _eval_single_bin(lhs_new, binop, rhs_new, engine) + + ex = f"(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)" + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("cmp_op", expr.CMP_OPS_SYMS) + def test_simple_cmp_ops(self, cmp_op, lhs, rhs, engine, parser): + lhs = lhs < 0 + rhs = rhs < 0 + + if parser == "python" and cmp_op in ["in", "not in"]: + msg = "'(In|NotIn)' nodes are not implemented" + + with pytest.raises(NotImplementedError, match=msg): + ex = f"lhs {cmp_op} rhs" + pd.eval(ex, engine=engine, parser=parser) + return + + ex = f"lhs {cmp_op} rhs" + msg = "|".join( + [ + r"only list-like( or dict-like)? objects are allowed to be " + r"passed to (DataFrame\.)?isin\(\), you passed a " + r"(`|')bool(`|')", + "argument of type 'bool' is not iterable", + ] + ) + if cmp_op in ("in", "not in") and not is_list_like(rhs): + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + engine=engine, + parser=parser, + local_dict={"lhs": lhs, "rhs": rhs}, + ) + else: + expected = _eval_single_bin(lhs, cmp_op, rhs, engine) + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("op", expr.CMP_OPS_SYMS) + def test_compound_invert_op(self, op, lhs, rhs, request, engine, parser): + if parser == "python" and op in ["in", "not in"]: + msg = "'(In|NotIn)' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + ex = f"~(lhs {op} rhs)" + pd.eval(ex, engine=engine, parser=parser) + return + + if ( + is_float(lhs) + and not is_float(rhs) + and op in ["in", "not in"] + and engine == "python" + and parser == "pandas" + ): + mark = pytest.mark.xfail( + reason="Looks like expected is negative, unclear whether " + "expected is incorrect or result is incorrect" + ) + request.node.add_marker(mark) + skip_these = ["in", "not in"] + ex = f"~(lhs {op} rhs)" + + msg = "|".join( + [ + r"only list-like( or dict-like)? objects are allowed to be " + r"passed to (DataFrame\.)?isin\(\), you passed a " + r"(`|')float(`|')", + "argument of type 'float' is not iterable", + ] + ) + if is_scalar(rhs) and op in skip_these: + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + engine=engine, + parser=parser, + local_dict={"lhs": lhs, "rhs": rhs}, + ) + else: + # compound + if is_scalar(lhs) and is_scalar(rhs): + lhs, rhs = (np.array([x]) for x in (lhs, rhs)) + expected = _eval_single_bin(lhs, op, rhs, engine) + if is_scalar(expected): + expected = not expected + else: + expected = ~expected + result = pd.eval(ex, engine=engine, parser=parser) + tm.assert_almost_equal(expected, result) + + @pytest.mark.parametrize("cmp1", ["<", ">"]) + @pytest.mark.parametrize("cmp2", ["<", ">"]) + def test_chained_cmp_op(self, cmp1, cmp2, lhs, midhs, rhs, engine, parser): + mid = midhs + if parser == "python": + ex1 = f"lhs {cmp1} mid {cmp2} rhs" + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex1, engine=engine, parser=parser) + return + + lhs_new = _eval_single_bin(lhs, cmp1, mid, engine) + rhs_new = _eval_single_bin(mid, cmp2, rhs, engine) + + if lhs_new is not None and rhs_new is not None: + ex1 = f"lhs {cmp1} mid {cmp2} rhs" + ex2 = f"lhs {cmp1} mid and mid {cmp2} rhs" + ex3 = f"(lhs {cmp1} mid) & (mid {cmp2} rhs)" + expected = _eval_single_bin(lhs_new, "&", rhs_new, engine) + + for ex in (ex1, ex2, ex3): + result = pd.eval(ex, engine=engine, parser=parser) + + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize( + "arith1", sorted(set(ARITH_OPS_SYMS).difference(SPECIAL_CASE_ARITH_OPS_SYMS)) + ) + def test_binary_arith_ops(self, arith1, lhs, rhs, engine, parser): + ex = f"lhs {arith1} rhs" + result = pd.eval(ex, engine=engine, parser=parser) + expected = _eval_single_bin(lhs, arith1, rhs, engine) + + tm.assert_almost_equal(result, expected) + ex = f"lhs {arith1} rhs {arith1} rhs" + result = pd.eval(ex, engine=engine, parser=parser) + nlhs = _eval_single_bin(lhs, arith1, rhs, engine) + try: + nlhs, ghs = nlhs.align(rhs) + except (ValueError, TypeError, AttributeError): + # ValueError: series frame or frame series align + # TypeError, AttributeError: series or frame with scalar align + return + else: + if engine == "numexpr": + import numexpr as ne + + # direct numpy comparison + expected = ne.evaluate(f"nlhs {arith1} ghs") + # Update assert statement due to unreliable numerical + # precision component (GH37328) + # TODO: update testing code so that assert_almost_equal statement + # can be replaced again by the assert_numpy_array_equal statement + tm.assert_almost_equal(result.values, expected) + else: + expected = eval(f"nlhs {arith1} ghs") + tm.assert_almost_equal(result, expected) + + # modulus, pow, and floor division require special casing + + def test_modulus(self, lhs, rhs, engine, parser): + ex = r"lhs % rhs" + result = pd.eval(ex, engine=engine, parser=parser) + expected = lhs % rhs + tm.assert_almost_equal(result, expected) + + if engine == "numexpr": + import numexpr as ne + + expected = ne.evaluate(r"expected % rhs") + if isinstance(result, (DataFrame, Series)): + tm.assert_almost_equal(result.values, expected) + else: + tm.assert_almost_equal(result, expected.item()) + else: + expected = _eval_single_bin(expected, "%", rhs, engine) + tm.assert_almost_equal(result, expected) + + def test_floor_division(self, lhs, rhs, engine, parser): + ex = "lhs // rhs" + + if engine == "python": + res = pd.eval(ex, engine=engine, parser=parser) + expected = lhs // rhs + tm.assert_equal(res, expected) + else: + msg = ( + r"unsupported operand type\(s\) for //: 'VariableNode' and " + "'VariableNode'" + ) + with pytest.raises(TypeError, match=msg): + pd.eval( + ex, + local_dict={"lhs": lhs, "rhs": rhs}, + engine=engine, + parser=parser, + ) + + @td.skip_if_windows + def test_pow(self, lhs, rhs, engine, parser): + # odd failure on win32 platform, so skip + ex = "lhs ** rhs" + expected = _eval_single_bin(lhs, "**", rhs, engine) + result = pd.eval(ex, engine=engine, parser=parser) + + if ( + is_scalar(lhs) + and is_scalar(rhs) + and isinstance(expected, (complex, np.complexfloating)) + and np.isnan(result) + ): + msg = "(DataFrame.columns|numpy array) are different" + with pytest.raises(AssertionError, match=msg): + tm.assert_numpy_array_equal(result, expected) + else: + tm.assert_almost_equal(result, expected) + + ex = "(lhs ** rhs) ** rhs" + result = pd.eval(ex, engine=engine, parser=parser) + + middle = _eval_single_bin(lhs, "**", rhs, engine) + expected = _eval_single_bin(middle, "**", rhs, engine) + tm.assert_almost_equal(result, expected) + + def test_check_single_invert_op(self, lhs, engine, parser): + # simple + try: + elb = lhs.astype(bool) + except AttributeError: + elb = np.array([bool(lhs)]) + expected = ~elb + result = pd.eval("~elb", engine=engine, parser=parser) + tm.assert_almost_equal(expected, result) + + def test_frame_invert(self, engine, parser): + expr = "~lhs" + + # ~ ## + # frame + # float always raises + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2))) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert_dd'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + # int raises on numexpr + lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # bool always works + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # object raises + lhs = DataFrame( + {"b": ["a", 1, 2.0], "c": np.random.default_rng(2).standard_normal(3) > 0.5} + ) + if engine == "numexpr": + with pytest.raises(ValueError, match="unknown type object"): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "bad operand type for unary ~: 'str'" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + def test_series_invert(self, engine, parser): + # ~ #### + expr = "~lhs" + + # series + # float raises + lhs = Series(np.random.default_rng(2).standard_normal(5)) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert_dd'" + with pytest.raises(NotImplementedError, match=msg): + result = pd.eval(expr, engine=engine, parser=parser) + else: + msg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + # int raises on numexpr + lhs = Series(np.random.default_rng(2).integers(5, size=5)) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'invert" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # bool + lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # float + # int + # bool + + # object + lhs = Series(["a", 1, 2.0]) + if engine == "numexpr": + with pytest.raises(ValueError, match="unknown type object"): + pd.eval(expr, engine=engine, parser=parser) + else: + msg = "bad operand type for unary ~: 'str'" + with pytest.raises(TypeError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + + def test_frame_negate(self, engine, parser): + expr = "-lhs" + + # float + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2))) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # int + lhs = DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'neg_bb'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + def test_series_negate(self, engine, parser): + expr = "-lhs" + + # float + lhs = Series(np.random.default_rng(2).standard_normal(5)) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # int + lhs = Series(np.random.default_rng(2).integers(5, size=5)) + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = Series(np.random.default_rng(2).standard_normal(5) > 0.5) + if engine == "numexpr": + msg = "couldn't find matching opcode for 'neg_bb'" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(expr, engine=engine, parser=parser) + else: + expect = -lhs + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + @pytest.mark.parametrize( + "lhs", + [ + # Float + DataFrame(np.random.default_rng(2).standard_normal((5, 2))), + # Int + DataFrame(np.random.default_rng(2).integers(5, size=(5, 2))), + # bool doesn't work with numexpr but works elsewhere + DataFrame(np.random.default_rng(2).standard_normal((5, 2)) > 0.5), + ], + ) + def test_frame_pos(self, lhs, engine, parser): + expr = "+lhs" + expect = lhs + + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_frame_equal(expect, result) + + @pytest.mark.parametrize( + "lhs", + [ + # Float + Series(np.random.default_rng(2).standard_normal(5)), + # Int + Series(np.random.default_rng(2).integers(5, size=5)), + # bool doesn't work with numexpr but works elsewhere + Series(np.random.default_rng(2).standard_normal(5) > 0.5), + ], + ) + def test_series_pos(self, lhs, engine, parser): + expr = "+lhs" + expect = lhs + + result = pd.eval(expr, engine=engine, parser=parser) + tm.assert_series_equal(expect, result) + + def test_scalar_unary(self, engine, parser): + msg = "bad operand type for unary ~: 'float'" + warn = None + if PY312 and not (engine == "numexpr" and parser == "pandas"): + warn = DeprecationWarning + with pytest.raises(TypeError, match=msg): + pd.eval("~1.0", engine=engine, parser=parser) + + assert pd.eval("-1.0", parser=parser, engine=engine) == -1.0 + assert pd.eval("+1.0", parser=parser, engine=engine) == +1.0 + assert pd.eval("~1", parser=parser, engine=engine) == ~1 + assert pd.eval("-1", parser=parser, engine=engine) == -1 + assert pd.eval("+1", parser=parser, engine=engine) == +1 + with tm.assert_produces_warning( + warn, match="Bitwise inversion", check_stacklevel=False + ): + assert pd.eval("~True", parser=parser, engine=engine) == ~True + with tm.assert_produces_warning( + warn, match="Bitwise inversion", check_stacklevel=False + ): + assert pd.eval("~False", parser=parser, engine=engine) == ~False + assert pd.eval("-True", parser=parser, engine=engine) == -True + assert pd.eval("-False", parser=parser, engine=engine) == -False + assert pd.eval("+True", parser=parser, engine=engine) == +True + assert pd.eval("+False", parser=parser, engine=engine) == +False + + def test_unary_in_array(self): + # GH 11235 + # TODO: 2022-01-29: result return list with numexpr 2.7.3 in CI + # but cannot reproduce locally + result = np.array( + pd.eval("[-True, True, +True, -False, False, +False, -37, 37, ~37, +37]"), + dtype=np.object_, + ) + expected = np.array( + [ + -True, + True, + +True, + -False, + False, + +False, + -37, + 37, + ~37, + +37, + ], + dtype=np.object_, + ) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", [np.float32, np.float64]) + @pytest.mark.parametrize("expr", ["x < -0.1", "-5 > x"]) + def test_float_comparison_bin_op(self, dtype, expr): + # GH 16363 + df = DataFrame({"x": np.array([0], dtype=dtype)}) + res = df.eval(expr) + assert res.values == np.array([False]) + + def test_unary_in_function(self): + # GH 46471 + df = DataFrame({"x": [0, 1, np.nan]}) + + result = df.eval("x.fillna(-1)") + expected = df.x.fillna(-1) + # column name becomes None if using numexpr + # only check names when the engine is not numexpr + tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR) + + result = df.eval("x.shift(1, fill_value=-1)") + expected = df.x.shift(1, fill_value=-1) + tm.assert_series_equal(result, expected, check_names=not USE_NUMEXPR) + + @pytest.mark.parametrize( + "ex", + ( + "1 or 2", + "1 and 2", + "a and b", + "a or b", + "1 or 2 and (3 + 2) > 3", + "2 * x > 2 or 1 and 2", + "2 * df > 3 and 1 or a", + ), + ) + def test_disallow_scalar_bool_ops(self, ex, engine, parser): + x, a, b = np.random.default_rng(2).standard_normal(3), 1, 2 # noqa: F841 + df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) # noqa: F841 + + msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + + def test_identical(self, engine, parser): + # see gh-10546 + x = 1 + result = pd.eval("x", engine=engine, parser=parser) + assert result == 1 + assert is_scalar(result) + + x = 1.5 + result = pd.eval("x", engine=engine, parser=parser) + assert result == 1.5 + assert is_scalar(result) + + x = False + result = pd.eval("x", engine=engine, parser=parser) + assert not result + assert is_bool(result) + assert is_scalar(result) + + x = np.array([1]) + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([1])) + assert result.shape == (1,) + + x = np.array([1.5]) + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([1.5])) + assert result.shape == (1,) + + x = np.array([False]) # noqa: F841 + result = pd.eval("x", engine=engine, parser=parser) + tm.assert_numpy_array_equal(result, np.array([False])) + assert result.shape == (1,) + + def test_line_continuation(self, engine, parser): + # GH 11149 + exp = """1 + 2 * \ + 5 - 1 + 2 """ + result = pd.eval(exp, engine=engine, parser=parser) + assert result == 12 + + def test_float_truncation(self, engine, parser): + # GH 14241 + exp = "1000000000.006" + result = pd.eval(exp, engine=engine, parser=parser) + expected = np.float64(exp) + assert result == expected + + df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) + cutoff = 1000000000.0006 + result = df.query(f"A < {cutoff:.4f}") + assert result.empty + + cutoff = 1000000000.0010 + result = df.query(f"A > {cutoff:.4f}") + expected = df.loc[[1, 2], :] + tm.assert_frame_equal(expected, result) + + exact = 1000000000.0011 + result = df.query(f"A == {exact:.4f}") + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + def test_disallow_python_keywords(self): + # GH 18221 + df = DataFrame([[0, 0, 0]], columns=["foo", "bar", "class"]) + msg = "Python keyword not valid identifier in numexpr query" + with pytest.raises(SyntaxError, match=msg): + df.query("class == 0") + + df = DataFrame() + df.index.name = "lambda" + with pytest.raises(SyntaxError, match=msg): + df.query("lambda == 0") + + def test_true_false_logic(self): + # GH 25823 + # This behavior is deprecated in Python 3.12 + with tm.maybe_produces_warning( + DeprecationWarning, PY312, check_stacklevel=False + ): + assert pd.eval("not True") == -2 + assert pd.eval("not False") == -1 + assert pd.eval("True and not True") == 0 + + def test_and_logic_string_match(self): + # GH 25823 + event = Series({"a": "hello"}) + assert pd.eval(f"{event.str.match('hello').a}") + assert pd.eval(f"{event.str.match('hello').a and event.str.match('hello').a}") + + +f = lambda *args, **kwargs: np.random.default_rng(2).standard_normal() + + +# ------------------------------------- +# gh-12388: Typecasting rules consistency with python + + +class TestTypeCasting: + @pytest.mark.parametrize("op", ["+", "-", "*", "**", "/"]) + # maybe someday... numexpr has too many upcasting rules now + # chain(*(np.core.sctypes[x] for x in ['uint', 'int', 'float'])) + @pytest.mark.parametrize("dt", [np.float32, np.float64]) + @pytest.mark.parametrize("left_right", [("df", "3"), ("3", "df")]) + def test_binop_typecasting(self, engine, parser, op, dt, left_right): + df = tm.makeCustomDataframe(5, 3, data_gen_f=f, dtype=dt) + left, right = left_right + s = f"{left} {op} {right}" + res = pd.eval(s, engine=engine, parser=parser) + assert df.values.dtype == dt + assert res.values.dtype == dt + tm.assert_frame_equal(res, eval(s)) + + +# ------------------------------------- +# Basic and complex alignment + + +def should_warn(*args): + not_mono = not any(map(operator.attrgetter("is_monotonic_increasing"), args)) + only_one_dt = reduce( + operator.xor, (issubclass(x.dtype.type, np.datetime64) for x in args) + ) + return not_mono and only_one_dt + + +class TestAlignment: + index_types = ["i", "s", "dt"] + lhs_index_types = index_types + ["s"] # 'p' + + def test_align_nested_unary_op(self, engine, parser): + s = "df * ~2" + df = tm.makeCustomDataframe(5, 3, data_gen_f=f) + res = pd.eval(s, engine=engine, parser=parser) + tm.assert_frame_equal(res, df * ~2) + + @pytest.mark.filterwarnings("always::RuntimeWarning") + @pytest.mark.parametrize("lr_idx_type", lhs_index_types) + @pytest.mark.parametrize("rr_idx_type", index_types) + @pytest.mark.parametrize("c_idx_type", index_types) + def test_basic_frame_alignment( + self, engine, parser, lr_idx_type, rr_idx_type, c_idx_type + ): + df = tm.makeCustomDataframe( + 10, 10, data_gen_f=f, r_idx_type=lr_idx_type, c_idx_type=c_idx_type + ) + df2 = tm.makeCustomDataframe( + 20, 10, data_gen_f=f, r_idx_type=rr_idx_type, c_idx_type=c_idx_type + ) + # only warns if not monotonic and not sortable + if should_warn(df.index, df2.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + df2", engine=engine, parser=parser) + else: + res = pd.eval("df + df2", engine=engine, parser=parser) + tm.assert_frame_equal(res, df + df2) + + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + @pytest.mark.parametrize("c_idx_type", lhs_index_types) + def test_frame_comparison(self, engine, parser, r_idx_type, c_idx_type): + df = tm.makeCustomDataframe( + 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type + ) + res = pd.eval("df < 2", engine=engine, parser=parser) + tm.assert_frame_equal(res, df < 2) + + df3 = DataFrame( + np.random.default_rng(2).standard_normal(df.shape), + index=df.index, + columns=df.columns, + ) + res = pd.eval("df < df3", engine=engine, parser=parser) + tm.assert_frame_equal(res, df < df3) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("r1", lhs_index_types) + @pytest.mark.parametrize("c1", index_types) + @pytest.mark.parametrize("r2", index_types) + @pytest.mark.parametrize("c2", index_types) + def test_medium_complex_frame_alignment(self, engine, parser, r1, c1, r2, c2): + df = tm.makeCustomDataframe(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) + df2 = tm.makeCustomDataframe(4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + df3 = tm.makeCustomDataframe(5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + if should_warn(df.index, df2.index, df3.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + df2 + df3", engine=engine, parser=parser) + else: + res = pd.eval("df + df2 + df3", engine=engine, parser=parser) + tm.assert_frame_equal(res, df + df2 + df3) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize("c_idx_type", index_types) + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + def test_basic_frame_series_alignment( + self, engine, parser, index_name, r_idx_type, c_idx_type + ): + df = tm.makeCustomDataframe( + 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df + s", engine=engine, parser=parser) + else: + res = pd.eval("df + s", engine=engine, parser=parser) + + if r_idx_type == "dt" or c_idx_type == "dt": + expected = df.add(s) if engine == "numexpr" else df + s + else: + expected = df + s + tm.assert_frame_equal(res, expected) + + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize( + "r_idx_type, c_idx_type", + list(product(["i", "s"], ["i", "s"])) + [("dt", "dt")], + ) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_basic_series_frame_alignment( + self, request, engine, parser, index_name, r_idx_type, c_idx_type + ): + if ( + engine == "numexpr" + and parser in ("pandas", "python") + and index_name == "index" + and r_idx_type == "i" + and c_idx_type == "s" + ): + reason = ( + f"Flaky column ordering when engine={engine}, " + f"parser={parser}, index_name={index_name}, " + f"r_idx_type={r_idx_type}, c_idx_type={c_idx_type}" + ) + request.node.add_marker(pytest.mark.xfail(reason=reason, strict=False)) + df = tm.makeCustomDataframe( + 10, 7, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + if should_warn(s.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("s + df", engine=engine, parser=parser) + else: + res = pd.eval("s + df", engine=engine, parser=parser) + + if r_idx_type == "dt" or c_idx_type == "dt": + expected = df.add(s) if engine == "numexpr" else s + df + else: + expected = s + df + tm.assert_frame_equal(res, expected) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("c_idx_type", index_types) + @pytest.mark.parametrize("r_idx_type", lhs_index_types) + @pytest.mark.parametrize("index_name", ["index", "columns"]) + @pytest.mark.parametrize("op", ["+", "*"]) + def test_series_frame_commutativity( + self, engine, parser, index_name, op, r_idx_type, c_idx_type + ): + df = tm.makeCustomDataframe( + 10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type + ) + index = getattr(df, index_name) + s = Series(np.random.default_rng(2).standard_normal(5), index[:5]) + + lhs = f"s {op} df" + rhs = f"df {op} s" + if should_warn(df.index, s.index): + with tm.assert_produces_warning(RuntimeWarning): + a = pd.eval(lhs, engine=engine, parser=parser) + with tm.assert_produces_warning(RuntimeWarning): + b = pd.eval(rhs, engine=engine, parser=parser) + else: + a = pd.eval(lhs, engine=engine, parser=parser) + b = pd.eval(rhs, engine=engine, parser=parser) + + if r_idx_type != "dt" and c_idx_type != "dt": + if engine == "numexpr": + tm.assert_frame_equal(a, b) + + @pytest.mark.filterwarnings("always::RuntimeWarning") + @pytest.mark.parametrize("r1", lhs_index_types) + @pytest.mark.parametrize("c1", index_types) + @pytest.mark.parametrize("r2", index_types) + @pytest.mark.parametrize("c2", index_types) + def test_complex_series_frame_alignment(self, engine, parser, r1, c1, r2, c2): + n = 3 + m1 = 5 + m2 = 2 * m1 + + index_name = np.random.default_rng(2).choice(["index", "columns"]) + obj_name = np.random.default_rng(2).choice(["df", "df2"]) + + df = tm.makeCustomDataframe(m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) + df2 = tm.makeCustomDataframe(m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + index = getattr(locals().get(obj_name), index_name) + ser = Series(np.random.default_rng(2).standard_normal(n), index[:n]) + + if r2 == "dt" or c2 == "dt": + if engine == "numexpr": + expected2 = df2.add(ser) + else: + expected2 = df2 + ser + else: + expected2 = df2 + ser + + if r1 == "dt" or c1 == "dt": + if engine == "numexpr": + expected = expected2.add(df) + else: + expected = expected2 + df + else: + expected = expected2 + df + + if should_warn(df2.index, ser.index, df.index): + with tm.assert_produces_warning(RuntimeWarning): + res = pd.eval("df2 + ser + df", engine=engine, parser=parser) + else: + res = pd.eval("df2 + ser + df", engine=engine, parser=parser) + assert res.shape == expected.shape + tm.assert_frame_equal(res, expected) + + def test_performance_warning_for_poor_alignment(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 10))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + if engine == "numexpr": + seen = PerformanceWarning + else: + seen = False + + with tm.assert_produces_warning(seen): + pd.eval("df + s", engine=engine, parser=parser) + + s = Series(np.random.default_rng(2).standard_normal(1000)) + with tm.assert_produces_warning(False): + pd.eval("df + s", engine=engine, parser=parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 10000))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + with tm.assert_produces_warning(False): + pd.eval("df + s", engine=engine, parser=parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 10))) + s = Series(np.random.default_rng(2).standard_normal(10000)) + + is_python_engine = engine == "python" + + if not is_python_engine: + wrn = PerformanceWarning + else: + wrn = False + + with tm.assert_produces_warning(wrn) as w: + pd.eval("df + s", engine=engine, parser=parser) + + if not is_python_engine: + assert len(w) == 1 + msg = str(w[0].message) + logged = np.log10(s.size - df.shape[1]) + expected = ( + f"Alignment difference on axis 1 is larger " + f"than an order of magnitude on term 'df', " + f"by more than {logged:.4g}; performance may suffer." + ) + assert msg == expected + + +# ------------------------------------ +# Slightly more complex ops + + +class TestOperations: + def eval(self, *args, **kwargs): + kwargs["level"] = kwargs.pop("level", 0) + 1 + return pd.eval(*args, **kwargs) + + def test_simple_arith_ops(self, engine, parser): + exclude_arith = [] + if parser == "python": + exclude_arith = ["in", "not in"] + + arith_ops = [ + op + for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS + if op not in exclude_arith + ] + + ops = (op for op in arith_ops if op != "//") + + for op in ops: + ex = f"1 {op} 1" + ex2 = f"x {op} 1" + ex3 = f"1 {op} (x + 1)" + + if op in ("in", "not in"): + msg = "argument of type 'int' is not iterable" + with pytest.raises(TypeError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + else: + expec = _eval_single_bin(1, op, 1, engine) + x = self.eval(ex, engine=engine, parser=parser) + assert x == expec + + expec = _eval_single_bin(x, op, 1, engine) + y = self.eval(ex2, local_dict={"x": x}, engine=engine, parser=parser) + assert y == expec + + expec = _eval_single_bin(1, op, x + 1, engine) + y = self.eval(ex3, local_dict={"x": x}, engine=engine, parser=parser) + assert y == expec + + @pytest.mark.parametrize("rhs", [True, False]) + @pytest.mark.parametrize("lhs", [True, False]) + @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS) + def test_simple_bool_ops(self, rhs, lhs, op): + ex = f"{lhs} {op} {rhs}" + + if parser == "python" and op in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + self.eval(ex) + return + + res = self.eval(ex) + exp = eval(ex) + assert res == exp + + @pytest.mark.parametrize("rhs", [True, False]) + @pytest.mark.parametrize("lhs", [True, False]) + @pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS) + def test_bool_ops_with_constants(self, rhs, lhs, op): + ex = f"{lhs} {op} {rhs}" + + if parser == "python" and op in ["and", "or"]: + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + self.eval(ex) + return + + res = self.eval(ex) + exp = eval(ex) + assert res == exp + + def test_4d_ndarray_fails(self): + x = np.random.default_rng(2).standard_normal((3, 4, 5, 6)) + y = Series(np.random.default_rng(2).standard_normal(10)) + msg = "N-dimensional objects, where N > 2, are not supported with eval" + with pytest.raises(NotImplementedError, match=msg): + self.eval("x + y", local_dict={"x": x, "y": y}) + + def test_constant(self): + x = self.eval("1") + assert x == 1 + + def test_single_variable(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df2 = self.eval("df", local_dict={"df": df}) + tm.assert_frame_equal(df, df2) + + def test_failing_subscript_with_name_error(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841 + with pytest.raises(NameError, match="name 'x' is not defined"): + self.eval("df[x > 2] > 2") + + def test_lhs_expression_subscript(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + result = self.eval("(df + 1)[df > 2]", local_dict={"df": df}) + expected = (df + 1)[df > 2] + tm.assert_frame_equal(result, expected) + + def test_attr_expression(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc") + ) + expr1 = "df.a < df.b" + expec1 = df.a < df.b + expr2 = "df.a + df.b + df.c" + expec2 = df.a + df.b + df.c + expr3 = "df.a + df.b + df.c[df.b < 0]" + expec3 = df.a + df.b + df.c[df.b < 0] + exprs = expr1, expr2, expr3 + expecs = expec1, expec2, expec3 + for e, expec in zip(exprs, expecs): + tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df})) + + def test_assignment_fails(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=list("abc") + ) + df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + expr1 = "df = df2" + msg = "cannot assign without a target object" + with pytest.raises(ValueError, match=msg): + self.eval(expr1, local_dict={"df": df, "df2": df2}) + + def test_assignment_column_multiple_raise(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # multiple assignees + with pytest.raises(SyntaxError, match="invalid syntax"): + df.eval("d c = a + b") + + def test_assignment_column_invalid_assign(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # invalid assignees + msg = "left hand side of an assignment must be a single name" + with pytest.raises(SyntaxError, match=msg): + df.eval("d,c = a + b") + + def test_assignment_column_invalid_assign_function_call(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + msg = "cannot assign to function call" + with pytest.raises(SyntaxError, match=msg): + df.eval('Timestamp("20131001") = a + b') + + def test_assignment_single_assign_existing(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # single assignment - existing variable + expected = df.copy() + expected["a"] = expected["a"] + expected["b"] + df.eval("a = a + b", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_new(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # single assignment - new variable + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + df.eval("c = a + b", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_local_overlap(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + df = df.copy() + a = 1 # noqa: F841 + df.eval("a = 1 + b", inplace=True) + + expected = df.copy() + expected["a"] = 1 + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_assignment_single_assign_name(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + + a = 1 # noqa: F841 + old_a = df.a.copy() + df.eval("a = a + b", inplace=True) + result = old_a + df.b + tm.assert_series_equal(result, df.a, check_names=False) + assert result.name is None + + def test_assignment_multiple_raises(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # multiple assignment + df.eval("c = a + b", inplace=True) + msg = "can only assign a single expression" + with pytest.raises(SyntaxError, match=msg): + df.eval("c = a = b") + + def test_assignment_explicit(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + # explicit targets + self.eval("c = df.a + df.b", local_dict={"df": df}, target=df, inplace=True) + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_column_in(self): + # GH 11235 + df = DataFrame({"a": [11], "b": [-32]}) + result = df.eval("a in [11, -32]") + expected = Series([True]) + # TODO: 2022-01-29: Name check failed with numexpr 2.7.3 in CI + # but cannot reproduce locally + tm.assert_series_equal(result, expected, check_names=False) + + @pytest.mark.xfail(reason="Unknown: Omitted test_ in name prior.") + def test_assignment_not_inplace(self): + # see gh-9297 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("ab") + ) + + actual = df.eval("c = a + b", inplace=False) + assert actual is not None + + expected = df.copy() + expected["c"] = expected["a"] + expected["b"] + tm.assert_frame_equal(df, expected) + + def test_multi_line_expression(self): + # GH 11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + expected["c"] = expected["a"] + expected["b"] + expected["d"] = expected["c"] + expected["b"] + answer = df.eval( + """ + c = a + b + d = c + b""", + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + expected["a"] = expected["a"] - 1 + expected["e"] = expected["a"] + 2 + answer = df.eval( + """ + a = a - 1 + e = a + 2""", + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + # multi-line not valid if not all assignments + msg = "Multi-line expressions are only valid if all expressions contain" + with pytest.raises(ValueError, match=msg): + df.eval( + """ + a = b + 2 + b - 2""", + inplace=False, + ) + + def test_multi_line_expression_not_inplace(self): + # GH 11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + expected["c"] = expected["a"] + expected["b"] + expected["d"] = expected["c"] + expected["b"] + df = df.eval( + """ + c = a + b + d = c + b""", + inplace=False, + ) + tm.assert_frame_equal(expected, df) + + expected["a"] = expected["a"] - 1 + expected["e"] = expected["a"] + 2 + df = df.eval( + """ + a = a - 1 + e = a + 2""", + inplace=False, + ) + tm.assert_frame_equal(expected, df) + + def test_multi_line_expression_local_variable(self): + # GH 15342 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + + local_var = 7 + expected["c"] = expected["a"] * local_var + expected["d"] = expected["c"] + local_var + answer = df.eval( + """ + c = a * @local_var + d = c + @local_var + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_multi_line_expression_callable_local_variable(self): + # 26426 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + def local_func(a, b): + return b + + expected = df.copy() + expected["c"] = expected["a"] * local_func(1, 7) + expected["d"] = expected["c"] + local_func(1, 7) + answer = df.eval( + """ + c = a * @local_func(1, 7) + d = c + @local_func(1, 7) + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_multi_line_expression_callable_local_variable_with_kwargs(self): + # 26426 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + def local_func(a, b): + return b + + expected = df.copy() + expected["c"] = expected["a"] * local_func(b=7, a=1) + expected["d"] = expected["c"] + local_func(b=7, a=1) + answer = df.eval( + """ + c = a * @local_func(b=7, a=1) + d = c + @local_func(b=7, a=1) + """, + inplace=True, + ) + tm.assert_frame_equal(expected, df) + assert answer is None + + def test_assignment_in_query(self): + # GH 8664 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + msg = "cannot assign without a target object" + with pytest.raises(ValueError, match=msg): + df.query("a = 1") + tm.assert_frame_equal(df, df_orig) + + def test_query_inplace(self): + # see gh-11149 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = df.copy() + expected = expected[expected["a"] == 2] + df.query("a == 2", inplace=True) + tm.assert_frame_equal(expected, df) + + df = {} + expected = {"a": 3} + + self.eval("a = 1 + 2", target=df, inplace=True) + tm.assert_dict_equal(df, expected) + + @pytest.mark.parametrize("invalid_target", [1, "cat", [1, 2], np.array([]), (1, 3)]) + def test_cannot_item_assign(self, invalid_target): + msg = "Cannot assign expression output to target" + expression = "a = 1 + 2" + + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=True) + + if hasattr(invalid_target, "copy"): + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=False) + + @pytest.mark.parametrize("invalid_target", [1, "cat", (1, 3)]) + def test_cannot_copy_item(self, invalid_target): + msg = "Cannot return a copy of the target" + expression = "a = 1 + 2" + + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=invalid_target, inplace=False) + + @pytest.mark.parametrize("target", [1, "cat", [1, 2], np.array([]), (1, 3), {1: 2}]) + def test_inplace_no_assignment(self, target): + expression = "1 + 2" + + assert self.eval(expression, target=target, inplace=False) == 3 + + msg = "Cannot operate inplace if there is no assignment" + with pytest.raises(ValueError, match=msg): + self.eval(expression, target=target, inplace=True) + + def test_basic_period_index_boolean_expression(self): + df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type="p", r_idx_type="i") + + e = df < 2 + r = self.eval("df < 2", local_dict={"df": df}) + x = df < 2 + + tm.assert_frame_equal(r, e) + tm.assert_frame_equal(x, e) + + def test_basic_period_index_subscript_expression(self): + df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type="p", r_idx_type="i") + r = self.eval("df[df < 2 + 3]", local_dict={"df": df}) + e = df[df < 2 + 3] + tm.assert_frame_equal(r, e) + + def test_nested_period_index_subscript_expression(self): + df = tm.makeCustomDataframe(2, 2, data_gen_f=f, c_idx_type="p", r_idx_type="i") + r = self.eval("df[df[df < 2] < 2] + df * 2", local_dict={"df": df}) + e = df[df[df < 2] < 2] + df * 2 + tm.assert_frame_equal(r, e) + + def test_date_boolean(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + res = self.eval( + "df.dates1 < 20130101", + local_dict={"df": df}, + engine=engine, + parser=parser, + ) + expec = df.dates1 < "20130101" + tm.assert_series_equal(res, expec, check_names=False) + + def test_simple_in_ops(self, engine, parser): + if parser != "python": + res = pd.eval("1 in [1, 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("2 in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("3 in (1, 2)", engine=engine, parser=parser) + assert not res + + res = pd.eval("3 not in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[3] not in (1, 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[3] in ([3], 2)", engine=engine, parser=parser) + assert res + + res = pd.eval("[[3]] in [[[3]], 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("(3,) in [(3,), 2]", engine=engine, parser=parser) + assert res + + res = pd.eval("(3,) not in [(3,), 2]", engine=engine, parser=parser) + assert not res + + res = pd.eval("[(3,)] in [[(3,)], 2]", engine=engine, parser=parser) + assert res + else: + msg = "'In' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval("1 in [1, 2]", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("2 in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("3 in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("[(3,)] in (1, 2, [(3,)])", engine=engine, parser=parser) + msg = "'NotIn' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval("3 not in (1, 2)", engine=engine, parser=parser) + with pytest.raises(NotImplementedError, match=msg): + pd.eval("[3] not in (1, 2, [[3]])", engine=engine, parser=parser) + + def test_check_many_exprs(self, engine, parser): + a = 1 # noqa: F841 + expr = " * ".join("a" * 33) + expected = 1 + res = pd.eval(expr, engine=engine, parser=parser) + assert res == expected + + @pytest.mark.parametrize( + "expr", + [ + "df > 2 and df > 3", + "df > 2 or df > 3", + "not df > 2", + ], + ) + def test_fails_and_or_not(self, expr, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + if parser == "python": + msg = "'BoolOp' nodes are not implemented" + if "not" in expr: + msg = "'Not' nodes are not implemented" + + with pytest.raises(NotImplementedError, match=msg): + pd.eval( + expr, + local_dict={"df": df}, + parser=parser, + engine=engine, + ) + else: + # smoke-test, should not raise + pd.eval( + expr, + local_dict={"df": df}, + parser=parser, + engine=engine, + ) + + @pytest.mark.parametrize("char", ["|", "&"]) + def test_fails_ampersand_pipe(self, char, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) # noqa: F841 + ex = f"(df + 2)[df > 1] > 0 {char} (df > 0)" + if parser == "python": + msg = "cannot evaluate scalar only bool ops" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, parser=parser, engine=engine) + else: + # smoke-test, should not raise + pd.eval(ex, parser=parser, engine=engine) + + +class TestMath: + def eval(self, *args, **kwargs): + kwargs["level"] = kwargs.pop("level", 0) + 1 + return pd.eval(*args, **kwargs) + + @pytest.mark.skipif( + not NUMEXPR_INSTALLED, reason="Unary ops only implemented for numexpr" + ) + @pytest.mark.parametrize("fn", _unary_math_ops) + def test_unary_functions(self, fn): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + a = df.a + + expr = f"{fn}(a)" + got = self.eval(expr) + with np.errstate(all="ignore"): + expect = getattr(np, fn)(a) + tm.assert_series_equal(got, expect, check_names=False) + + @pytest.mark.parametrize("fn", _binary_math_ops) + def test_binary_functions(self, fn): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + a = df.a + b = df.b + + expr = f"{fn}(a, b)" + got = self.eval(expr) + with np.errstate(all="ignore"): + expect = getattr(np, fn)(a, b) + tm.assert_almost_equal(got, expect, check_names=False) + + def test_df_use_case(self, engine, parser): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + df.eval( + "e = arctan2(sin(a), b)", + engine=engine, + parser=parser, + inplace=True, + ) + got = df.e + expect = np.arctan2(np.sin(df.a), df.b) + tm.assert_series_equal(got, expect, check_names=False) + + def test_df_arithmetic_subexpression(self, engine, parser): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + df.eval("e = sin(a + b)", engine=engine, parser=parser, inplace=True) + got = df.e + expect = np.sin(df.a + df.b) + tm.assert_series_equal(got, expect, check_names=False) + + @pytest.mark.parametrize( + "dtype, expect_dtype", + [ + (np.int32, np.float64), + (np.int64, np.float64), + (np.float32, np.float32), + (np.float64, np.float64), + pytest.param(np.complex128, np.complex128, marks=td.skip_if_windows), + ], + ) + def test_result_types(self, dtype, expect_dtype, engine, parser): + # xref https://github.com/pandas-dev/pandas/issues/12293 + # this fails on Windows, apparently a floating point precision issue + + # Did not test complex64 because DataFrame is converting it to + # complex128. Due to https://github.com/pandas-dev/pandas/issues/10952 + df = DataFrame( + {"a": np.random.default_rng(2).standard_normal(10).astype(dtype)} + ) + assert df.a.dtype == dtype + df.eval("b = sin(a)", engine=engine, parser=parser, inplace=True) + got = df.b + expect = np.sin(df.a) + assert expect.dtype == got.dtype + assert expect_dtype == got.dtype + tm.assert_series_equal(got, expect, check_names=False) + + def test_undefined_func(self, engine, parser): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + msg = '"mysin" is not a supported function' + + with pytest.raises(ValueError, match=msg): + df.eval("mysin(a)", engine=engine, parser=parser) + + def test_keyword_arg(self, engine, parser): + df = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + msg = 'Function "sin" does not support keyword arguments' + + with pytest.raises(TypeError, match=msg): + df.eval("sin(x=a)", engine=engine, parser=parser) + + +_var_s = np.random.default_rng(2).standard_normal(10) + + +class TestScope: + def test_global_scope(self, engine, parser): + e = "_var_s * 2" + tm.assert_numpy_array_equal( + _var_s * 2, pd.eval(e, engine=engine, parser=parser) + ) + + def test_no_new_locals(self, engine, parser): + x = 1 + lcls = locals().copy() + pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser) + lcls2 = locals().copy() + lcls2.pop("lcls") + assert lcls == lcls2 + + def test_no_new_globals(self, engine, parser): + x = 1 # noqa: F841 + gbls = globals().copy() + pd.eval("x + 1", engine=engine, parser=parser) + gbls2 = globals().copy() + assert gbls == gbls2 + + def test_empty_locals(self, engine, parser): + # GH 47084 + x = 1 # noqa: F841 + msg = "name 'x' is not defined" + with pytest.raises(UndefinedVariableError, match=msg): + pd.eval("x + 1", engine=engine, parser=parser, local_dict={}) + + def test_empty_globals(self, engine, parser): + # GH 47084 + msg = "name '_var_s' is not defined" + e = "_var_s * 2" + with pytest.raises(UndefinedVariableError, match=msg): + pd.eval(e, engine=engine, parser=parser, global_dict={}) + + +@td.skip_if_no_ne +def test_invalid_engine(): + msg = "Invalid engine 'asdf' passed" + with pytest.raises(KeyError, match=msg): + pd.eval("x + y", local_dict={"x": 1, "y": 2}, engine="asdf") + + +@td.skip_if_no_ne +@pytest.mark.parametrize( + ("use_numexpr", "expected"), + ( + (True, "numexpr"), + (False, "python"), + ), +) +def test_numexpr_option_respected(use_numexpr, expected): + # GH 32556 + from pandas.core.computation.eval import _check_engine + + with pd.option_context("compute.use_numexpr", use_numexpr): + result = _check_engine(None) + assert result == expected + + +@td.skip_if_no_ne +def test_numexpr_option_incompatible_op(): + # GH 32556 + with pd.option_context("compute.use_numexpr", False): + df = DataFrame( + {"A": [True, False, True, False, None, None], "B": [1, 2, 3, 4, 5, 6]} + ) + result = df.query("A.isnull()") + expected = DataFrame({"A": [None, None], "B": [5, 6]}, index=[4, 5]) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no_ne +def test_invalid_parser(): + msg = "Invalid parser 'asdf' passed" + with pytest.raises(KeyError, match=msg): + pd.eval("x + y", local_dict={"x": 1, "y": 2}, parser="asdf") + + +_parsers: dict[str, type[BaseExprVisitor]] = { + "python": PythonExprVisitor, + "pytables": pytables.PyTablesExprVisitor, + "pandas": PandasExprVisitor, +} + + +@pytest.mark.parametrize("engine", ENGINES) +@pytest.mark.parametrize("parser", _parsers) +def test_disallowed_nodes(engine, parser): + VisitorClass = _parsers[parser] + inst = VisitorClass("x + 1", engine, parser) + + for ops in VisitorClass.unsupported_nodes: + msg = "nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + getattr(inst, ops)() + + +def test_syntax_error_exprs(engine, parser): + e = "s +" + with pytest.raises(SyntaxError, match="invalid syntax"): + pd.eval(e, engine=engine, parser=parser) + + +def test_name_error_exprs(engine, parser): + e = "s + t" + msg = "name 's' is not defined" + with pytest.raises(NameError, match=msg): + pd.eval(e, engine=engine, parser=parser) + + +@pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"]) +def test_invalid_local_variable_reference(engine, parser, express): + a, b = 1, 2 # noqa: F841 + + if parser != "pandas": + with pytest.raises(SyntaxError, match="The '@' prefix is only"): + pd.eval(express, engine=engine, parser=parser) + else: + with pytest.raises(SyntaxError, match="The '@' prefix is not"): + pd.eval(express, engine=engine, parser=parser) + + +def test_numexpr_builtin_raises(engine, parser): + sin, dotted_line = 1, 2 + if engine == "numexpr": + msg = "Variables in expression .+" + with pytest.raises(NumExprClobberingError, match=msg): + pd.eval("sin + dotted_line", engine=engine, parser=parser) + else: + res = pd.eval("sin + dotted_line", engine=engine, parser=parser) + assert res == sin + dotted_line + + +def test_bad_resolver_raises(engine, parser): + cannot_resolve = 42, 3.0 + with pytest.raises(TypeError, match="Resolver of type .+"): + pd.eval("1 + 2", resolvers=cannot_resolve, engine=engine, parser=parser) + + +def test_empty_string_raises(engine, parser): + # GH 13139 + with pytest.raises(ValueError, match="expr cannot be an empty string"): + pd.eval("", engine=engine, parser=parser) + + +def test_more_than_one_expression_raises(engine, parser): + with pytest.raises(SyntaxError, match="only a single expression is allowed"): + pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser) + + +@pytest.mark.parametrize("cmp", ("and", "or")) +@pytest.mark.parametrize("lhs", (int, float)) +@pytest.mark.parametrize("rhs", (int, float)) +def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser): + gen = { + int: lambda: np.random.default_rng(2).integers(10), + float: np.random.default_rng(2).standard_normal, + } + + mid = gen[lhs]() # noqa: F841 + lhs = gen[lhs]() + rhs = gen[rhs]() + + ex1 = f"lhs {cmp} mid {cmp} rhs" + ex2 = f"lhs {cmp} mid and mid {cmp} rhs" + ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)" + for ex in (ex1, ex2, ex3): + msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not" + with pytest.raises(NotImplementedError, match=msg): + pd.eval(ex, engine=engine, parser=parser) + + +@pytest.mark.parametrize( + "other", + [ + "'x'", + "...", + ], +) +def test_equals_various(other): + df = DataFrame({"A": ["a", "b", "c"]}) + result = df.eval(f"A == {other}") + expected = Series([False, False, False], name="A") + if USE_NUMEXPR: + # https://github.com/pandas-dev/pandas/issues/10239 + # lose name with numexpr engine. Remove when that's fixed. + expected.name = None + tm.assert_series_equal(result, expected) + + +def test_inf(engine, parser): + s = "inf + 1" + expected = np.inf + result = pd.eval(s, engine=engine, parser=parser) + assert result == expected + + +@pytest.mark.parametrize("column", ["Temp(°C)", "Capacitance(μF)"]) +def test_query_token(engine, column): + # See: https://github.com/pandas-dev/pandas/pull/42826 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=[column, "b"] + ) + expected = df[df[column] > 5] + query_string = f"`{column}` > 5" + result = df.query(query_string, engine=engine) + tm.assert_frame_equal(result, expected) + + +def test_negate_lt_eq_le(engine, parser): + df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"]) + expected = df[~(df.cat > 0)] + + result = df.query("~(cat > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + if parser == "python": + msg = "'Not' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("not (cat > 0)", engine=engine, parser=parser) + else: + result = df.query("not (cat > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "column", + DEFAULT_GLOBALS.keys(), +) +def test_eval_no_support_column_name(request, column): + # GH 44603 + if column in ["True", "False", "inf", "Inf"]: + request.node.add_marker( + pytest.mark.xfail( + raises=KeyError, + reason=f"GH 47859 DataFrame eval not supported with {column}", + ) + ) + + df = DataFrame( + np.random.default_rng(2).integers(0, 100, size=(10, 2)), + columns=[column, "col1"], + ) + expected = df[df[column] > 6] + result = df.query(f"{column}>6") + + tm.assert_frame_equal(result, expected) + + +def test_set_inplace(using_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/47449 + # Ensure we don't only update the DataFrame inplace, but also the actual + # column values, such that references to this column also get updated + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result_view = df[:] + ser = df["A"] + df.eval("A = B + C", inplace=True) + expected = DataFrame({"A": [11, 13, 15], "B": [4, 5, 6], "C": [7, 8, 9]}) + tm.assert_frame_equal(df, expected) + if not using_copy_on_write: + tm.assert_series_equal(ser, expected["A"]) + tm.assert_series_equal(result_view["A"], expected["A"]) + else: + expected = Series([1, 2, 3], name="A") + tm.assert_series_equal(ser, expected) + tm.assert_series_equal(result_view["A"], expected) + + +class TestValidate: + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + msg = 'For argument "inplace" expected type bool, received type' + with pytest.raises(ValueError, match=msg): + pd.eval("2+2", inplace=value) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_config.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_config.py new file mode 100644 index 00000000..f49ae942 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_config.py @@ -0,0 +1,437 @@ +import pytest + +from pandas._config import config as cf +from pandas._config.config import OptionError + +import pandas as pd +import pandas._testing as tm + + +class TestConfig: + @pytest.fixture(autouse=True) + def clean_config(self, monkeypatch): + with monkeypatch.context() as m: + m.setattr(cf, "_global_config", {}) + m.setattr(cf, "options", cf.DictWrapper(cf._global_config)) + m.setattr(cf, "_deprecated_options", {}) + m.setattr(cf, "_registered_options", {}) + + # Our test fixture in conftest.py sets "chained_assignment" + # to "raise" only after all test methods have been setup. + # However, after this setup, there is no longer any + # "chained_assignment" option, so re-register it. + cf.register_option("chained_assignment", "raise") + yield + + def test_api(self): + # the pandas object exposes the user API + assert hasattr(pd, "get_option") + assert hasattr(pd, "set_option") + assert hasattr(pd, "reset_option") + assert hasattr(pd, "describe_option") + + def test_is_one_of_factory(self): + v = cf.is_one_of_factory([None, 12]) + + v(12) + v(None) + msg = r"Value must be one of None\|12" + with pytest.raises(ValueError, match=msg): + v(1.1) + + def test_register_option(self): + cf.register_option("a", 1, "doc") + + # can't register an already registered option + msg = "Option 'a' has already been registered" + with pytest.raises(OptionError, match=msg): + cf.register_option("a", 1, "doc") + + # can't register an already registered option + msg = "Path prefix to option 'a' is already an option" + with pytest.raises(OptionError, match=msg): + cf.register_option("a.b.c.d1", 1, "doc") + with pytest.raises(OptionError, match=msg): + cf.register_option("a.b.c.d2", 1, "doc") + + # no python keywords + msg = "for is a python keyword" + with pytest.raises(ValueError, match=msg): + cf.register_option("for", 0) + with pytest.raises(ValueError, match=msg): + cf.register_option("a.for.b", 0) + # must be valid identifier (ensure attribute access works) + msg = "oh my goddess! is not a valid identifier" + with pytest.raises(ValueError, match=msg): + cf.register_option("Oh my Goddess!", 0) + + # we can register options several levels deep + # without predefining the intermediate steps + # and we can define differently named options + # in the same namespace + cf.register_option("k.b.c.d1", 1, "doc") + cf.register_option("k.b.c.d2", 1, "doc") + + def test_describe_option(self): + cf.register_option("a", 1, "doc") + cf.register_option("b", 1, "doc2") + cf.deprecate_option("b") + + cf.register_option("c.d.e1", 1, "doc3") + cf.register_option("c.d.e2", 1, "doc4") + cf.register_option("f", 1) + cf.register_option("g.h", 1) + cf.register_option("k", 2) + cf.deprecate_option("g.h", rkey="k") + cf.register_option("l", "foo") + + # non-existent keys raise KeyError + msg = r"No such keys\(s\)" + with pytest.raises(OptionError, match=msg): + cf.describe_option("no.such.key") + + # we can get the description for any key we registered + assert "doc" in cf.describe_option("a", _print_desc=False) + assert "doc2" in cf.describe_option("b", _print_desc=False) + assert "precated" in cf.describe_option("b", _print_desc=False) + assert "doc3" in cf.describe_option("c.d.e1", _print_desc=False) + assert "doc4" in cf.describe_option("c.d.e2", _print_desc=False) + + # if no doc is specified we get a default message + # saying "description not available" + assert "available" in cf.describe_option("f", _print_desc=False) + assert "available" in cf.describe_option("g.h", _print_desc=False) + assert "precated" in cf.describe_option("g.h", _print_desc=False) + assert "k" in cf.describe_option("g.h", _print_desc=False) + + # default is reported + assert "foo" in cf.describe_option("l", _print_desc=False) + # current value is reported + assert "bar" not in cf.describe_option("l", _print_desc=False) + cf.set_option("l", "bar") + assert "bar" in cf.describe_option("l", _print_desc=False) + + def test_case_insensitive(self): + cf.register_option("KanBAN", 1, "doc") + + assert "doc" in cf.describe_option("kanbaN", _print_desc=False) + assert cf.get_option("kanBaN") == 1 + cf.set_option("KanBan", 2) + assert cf.get_option("kAnBaN") == 2 + + # gets of non-existent keys fail + msg = r"No such keys\(s\): 'no_such_option'" + with pytest.raises(OptionError, match=msg): + cf.get_option("no_such_option") + cf.deprecate_option("KanBan") + + assert cf._is_deprecated("kAnBaN") + + def test_get_option(self): + cf.register_option("a", 1, "doc") + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("b.b", None, "doc2") + + # gets of existing keys succeed + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + assert cf.get_option("b.b") is None + + # gets of non-existent keys fail + msg = r"No such keys\(s\): 'no_such_option'" + with pytest.raises(OptionError, match=msg): + cf.get_option("no_such_option") + + def test_set_option(self): + cf.register_option("a", 1, "doc") + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("b.b", None, "doc2") + + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + assert cf.get_option("b.b") is None + + cf.set_option("a", 2) + cf.set_option("b.c", "wurld") + cf.set_option("b.b", 1.1) + + assert cf.get_option("a") == 2 + assert cf.get_option("b.c") == "wurld" + assert cf.get_option("b.b") == 1.1 + + msg = r"No such keys\(s\): 'no.such.key'" + with pytest.raises(OptionError, match=msg): + cf.set_option("no.such.key", None) + + def test_set_option_empty_args(self): + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + cf.set_option() + + def test_set_option_uneven_args(self): + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + cf.set_option("a.b", 2, "b.c") + + def test_set_option_invalid_single_argument_type(self): + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + cf.set_option(2) + + def test_set_option_multiple(self): + cf.register_option("a", 1, "doc") + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("b.b", None, "doc2") + + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + assert cf.get_option("b.b") is None + + cf.set_option("a", "2", "b.c", None, "b.b", 10.0) + + assert cf.get_option("a") == "2" + assert cf.get_option("b.c") is None + assert cf.get_option("b.b") == 10.0 + + def test_validation(self): + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("d", 1, "doc", validator=cf.is_nonnegative_int) + cf.register_option("b.c", "hullo", "doc2", validator=cf.is_text) + + msg = "Value must have type ''" + with pytest.raises(ValueError, match=msg): + cf.register_option("a.b.c.d2", "NO", "doc", validator=cf.is_int) + + cf.set_option("a", 2) # int is_int + cf.set_option("b.c", "wurld") # str is_str + cf.set_option("d", 2) + cf.set_option("d", None) # non-negative int can be None + + # None not is_int + with pytest.raises(ValueError, match=msg): + cf.set_option("a", None) + with pytest.raises(ValueError, match=msg): + cf.set_option("a", "ab") + + msg = "Value must be a nonnegative integer or None" + with pytest.raises(ValueError, match=msg): + cf.register_option("a.b.c.d3", "NO", "doc", validator=cf.is_nonnegative_int) + with pytest.raises(ValueError, match=msg): + cf.register_option("a.b.c.d3", -2, "doc", validator=cf.is_nonnegative_int) + + msg = r"Value must be an instance of \|" + with pytest.raises(ValueError, match=msg): + cf.set_option("b.c", 1) + + validator = cf.is_one_of_factory([None, cf.is_callable]) + cf.register_option("b", lambda: None, "doc", validator=validator) + # pylint: disable-next=consider-using-f-string + cf.set_option("b", "%.1f".format) # Formatter is callable + cf.set_option("b", None) # Formatter is none (default) + with pytest.raises(ValueError, match="Value must be a callable"): + cf.set_option("b", "%.1f") + + def test_reset_option(self): + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str) + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + cf.set_option("a", 2) + cf.set_option("b.c", "wurld") + assert cf.get_option("a") == 2 + assert cf.get_option("b.c") == "wurld" + + cf.reset_option("a") + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "wurld" + cf.reset_option("b.c") + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + def test_reset_option_all(self): + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("b.c", "hullo", "doc2", validator=cf.is_str) + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + cf.set_option("a", 2) + cf.set_option("b.c", "wurld") + assert cf.get_option("a") == 2 + assert cf.get_option("b.c") == "wurld" + + cf.reset_option("all") + assert cf.get_option("a") == 1 + assert cf.get_option("b.c") == "hullo" + + def test_deprecate_option(self): + # we can deprecate non-existent options + cf.deprecate_option("foo") + + assert cf._is_deprecated("foo") + with tm.assert_produces_warning(FutureWarning, match="deprecated"): + with pytest.raises(KeyError, match="No such keys.s.: 'foo'"): + cf.get_option("foo") + + cf.register_option("a", 1, "doc", validator=cf.is_int) + cf.register_option("b.c", "hullo", "doc2") + cf.register_option("foo", "hullo", "doc2") + + cf.deprecate_option("a", removal_ver="nifty_ver") + with tm.assert_produces_warning(FutureWarning, match="eprecated.*nifty_ver"): + cf.get_option("a") + + msg = "Option 'a' has already been defined as deprecated" + with pytest.raises(OptionError, match=msg): + cf.deprecate_option("a") + + cf.deprecate_option("b.c", "zounds!") + with tm.assert_produces_warning(FutureWarning, match="zounds!"): + cf.get_option("b.c") + + # test rerouting keys + cf.register_option("d.a", "foo", "doc2") + cf.register_option("d.dep", "bar", "doc2") + assert cf.get_option("d.a") == "foo" + assert cf.get_option("d.dep") == "bar" + + cf.deprecate_option("d.dep", rkey="d.a") # reroute d.dep to d.a + with tm.assert_produces_warning(FutureWarning, match="eprecated"): + assert cf.get_option("d.dep") == "foo" + + with tm.assert_produces_warning(FutureWarning, match="eprecated"): + cf.set_option("d.dep", "baz") # should overwrite "d.a" + + with tm.assert_produces_warning(FutureWarning, match="eprecated"): + assert cf.get_option("d.dep") == "baz" + + def test_config_prefix(self): + with cf.config_prefix("base"): + cf.register_option("a", 1, "doc1") + cf.register_option("b", 2, "doc2") + assert cf.get_option("a") == 1 + assert cf.get_option("b") == 2 + + cf.set_option("a", 3) + cf.set_option("b", 4) + assert cf.get_option("a") == 3 + assert cf.get_option("b") == 4 + + assert cf.get_option("base.a") == 3 + assert cf.get_option("base.b") == 4 + assert "doc1" in cf.describe_option("base.a", _print_desc=False) + assert "doc2" in cf.describe_option("base.b", _print_desc=False) + + cf.reset_option("base.a") + cf.reset_option("base.b") + + with cf.config_prefix("base"): + assert cf.get_option("a") == 1 + assert cf.get_option("b") == 2 + + def test_callback(self): + k = [None] + v = [None] + + def callback(key): + k.append(key) + v.append(cf.get_option(key)) + + cf.register_option("d.a", "foo", cb=callback) + cf.register_option("d.b", "foo", cb=callback) + + del k[-1], v[-1] + cf.set_option("d.a", "fooz") + assert k[-1] == "d.a" + assert v[-1] == "fooz" + + del k[-1], v[-1] + cf.set_option("d.b", "boo") + assert k[-1] == "d.b" + assert v[-1] == "boo" + + del k[-1], v[-1] + cf.reset_option("d.b") + assert k[-1] == "d.b" + + def test_set_ContextManager(self): + def eq(val): + assert cf.get_option("a") == val + + cf.register_option("a", 0) + eq(0) + with cf.option_context("a", 15): + eq(15) + with cf.option_context("a", 25): + eq(25) + eq(15) + eq(0) + + cf.set_option("a", 17) + eq(17) + + # Test that option_context can be used as a decorator too (#34253). + @cf.option_context("a", 123) + def f(): + eq(123) + + f() + + def test_attribute_access(self): + holder = [] + + def f3(key): + holder.append(True) + + cf.register_option("a", 0) + cf.register_option("c", 0, cb=f3) + options = cf.options + + assert options.a == 0 + with cf.option_context("a", 15): + assert options.a == 15 + + options.a = 500 + assert cf.get_option("a") == 500 + + cf.reset_option("a") + assert options.a == cf.get_option("a", 0) + + msg = "You can only set the value of existing options" + with pytest.raises(OptionError, match=msg): + options.b = 1 + with pytest.raises(OptionError, match=msg): + options.display = 1 + + # make sure callback kicks when using this form of setting + options.c = 1 + assert len(holder) == 1 + + def test_option_context_scope(self): + # Ensure that creating a context does not affect the existing + # environment as it is supposed to be used with the `with` statement. + # See https://github.com/pandas-dev/pandas/issues/8514 + + original_value = 60 + context_value = 10 + option_name = "a" + + cf.register_option(option_name, original_value) + + # Ensure creating contexts didn't affect the current context. + ctx = cf.option_context(option_name, context_value) + assert cf.get_option(option_name) == original_value + + # Ensure the correct value is available inside the context. + with ctx: + assert cf.get_option(option_name) == context_value + + # Ensure the current context is reset + assert cf.get_option(option_name) == original_value + + def test_dictwrapper_getattr(self): + options = cf.options + # GH 19789 + with pytest.raises(OptionError, match="No such option"): + options.bananas + assert not hasattr(options, "bananas") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_localization.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_localization.py new file mode 100644 index 00000000..3907f557 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/config/test_localization.py @@ -0,0 +1,156 @@ +import codecs +import locale +import os + +import pytest + +from pandas._config.localization import ( + can_set_locale, + get_locales, + set_locale, +) + +from pandas.compat import ISMUSL + +import pandas as pd + +_all_locales = get_locales() +_current_locale = locale.setlocale(locale.LC_ALL) # getlocale() is wrong, see GH#46595 + +# Don't run any of these tests if we have no locales. +pytestmark = pytest.mark.skipif(not _all_locales, reason="Need locales") + +_skip_if_only_one_locale = pytest.mark.skipif( + len(_all_locales) <= 1, reason="Need multiple locales for meaningful test" +) + + +def _get_current_locale(lc_var: int = locale.LC_ALL) -> str: + # getlocale is not always compliant with setlocale, use setlocale. GH#46595 + return locale.setlocale(lc_var) + + +@pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME)) +def test_can_set_current_locale(lc_var): + # Can set the current locale + before_locale = _get_current_locale(lc_var) + assert can_set_locale(before_locale, lc_var=lc_var) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +@pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME)) +def test_can_set_locale_valid_set(lc_var): + # Can set the default locale. + before_locale = _get_current_locale(lc_var) + assert can_set_locale("", lc_var=lc_var) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +@pytest.mark.parametrize( + "lc_var", + ( + locale.LC_ALL, + locale.LC_CTYPE, + pytest.param( + locale.LC_TIME, + marks=pytest.mark.skipif( + ISMUSL, reason="MUSL allows setting invalid LC_TIME." + ), + ), + ), +) +def test_can_set_locale_invalid_set(lc_var): + # Cannot set an invalid locale. + before_locale = _get_current_locale(lc_var) + assert not can_set_locale("non-existent_locale", lc_var=lc_var) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +@pytest.mark.parametrize( + "lang,enc", + [ + ("it_CH", "UTF-8"), + ("en_US", "ascii"), + ("zh_CN", "GB2312"), + ("it_IT", "ISO-8859-1"), + ], +) +@pytest.mark.parametrize("lc_var", (locale.LC_ALL, locale.LC_CTYPE, locale.LC_TIME)) +def test_can_set_locale_no_leak(lang, enc, lc_var): + # Test that can_set_locale does not leak even when returning False. See GH#46595 + before_locale = _get_current_locale(lc_var) + can_set_locale((lang, enc), locale.LC_ALL) + after_locale = _get_current_locale(lc_var) + assert before_locale == after_locale + + +def test_can_set_locale_invalid_get(monkeypatch): + # see GH#22129 + # In some cases, an invalid locale can be set, + # but a subsequent getlocale() raises a ValueError. + + def mock_get_locale(): + raise ValueError() + + with monkeypatch.context() as m: + m.setattr(locale, "getlocale", mock_get_locale) + assert not can_set_locale("") + + +def test_get_locales_at_least_one(): + # see GH#9744 + assert len(_all_locales) > 0 + + +@_skip_if_only_one_locale +def test_get_locales_prefix(): + first_locale = _all_locales[0] + assert len(get_locales(prefix=first_locale[:2])) > 0 + + +@_skip_if_only_one_locale +@pytest.mark.parametrize( + "lang,enc", + [ + ("it_CH", "UTF-8"), + ("en_US", "ascii"), + ("zh_CN", "GB2312"), + ("it_IT", "ISO-8859-1"), + ], +) +def test_set_locale(lang, enc): + before_locale = _get_current_locale() + + enc = codecs.lookup(enc).name + new_locale = lang, enc + + if not can_set_locale(new_locale): + msg = "unsupported locale setting" + + with pytest.raises(locale.Error, match=msg): + with set_locale(new_locale): + pass + else: + with set_locale(new_locale) as normalized_locale: + new_lang, new_enc = normalized_locale.split(".") + new_enc = codecs.lookup(enc).name + + normalized_locale = new_lang, new_enc + assert normalized_locale == new_locale + + # Once we exit the "with" statement, locale should be back to what it was. + after_locale = _get_current_locale() + assert before_locale == after_locale + + +def test_encoding_detected(): + system_locale = os.environ.get("LC_ALL") + system_encoding = system_locale.split(".")[-1] if system_locale else "utf-8" + + assert ( + codecs.lookup(pd.options.display.encoding).name + == codecs.lookup(system_encoding).name + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/construction/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/construction/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/construction/test_extract_array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/construction/test_extract_array.py new file mode 100644 index 00000000..4dd3eda8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/construction/test_extract_array.py @@ -0,0 +1,18 @@ +from pandas import Index +import pandas._testing as tm +from pandas.core.construction import extract_array + + +def test_extract_array_rangeindex(): + ri = Index(range(5)) + + expected = ri._values + res = extract_array(ri, extract_numpy=True, extract_range=True) + tm.assert_numpy_array_equal(res, expected) + res = extract_array(ri, extract_numpy=False, extract_range=True) + tm.assert_numpy_array_equal(res, expected) + + res = extract_array(ri, extract_numpy=True, extract_range=False) + tm.assert_index_equal(res, ri) + res = extract_array(ri, extract_numpy=False, extract_range=False) + tm.assert_index_equal(res, ri) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_datetimeindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_datetimeindex.py new file mode 100644 index 00000000..f54beca4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_datetimeindex.py @@ -0,0 +1,65 @@ +import pytest + +from pandas import ( + DatetimeIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "cons", + [ + lambda x: DatetimeIndex(x), + lambda x: DatetimeIndex(DatetimeIndex(x)), + ], +) +def test_datetimeindex(using_copy_on_write, cons): + dt = date_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + idx = cons(ser) + expected = idx.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + + +def test_datetimeindex_tz_convert(using_copy_on_write): + dt = date_range("2019-12-31", periods=3, freq="D", tz="Europe/Berlin") + ser = Series(dt) + idx = DatetimeIndex(ser).tz_convert("US/Eastern") + expected = idx.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31", tz="Europe/Berlin") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + + +def test_datetimeindex_tz_localize(using_copy_on_write): + dt = date_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + idx = DatetimeIndex(ser).tz_localize("Europe/Berlin") + expected = idx.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + + +def test_datetimeindex_isocalendar(using_copy_on_write): + dt = date_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + df = DatetimeIndex(ser).isocalendar() + expected = df.index.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + + +def test_index_values(using_copy_on_write): + idx = date_range("2019-12-31", periods=3, freq="D") + result = idx.values + if using_copy_on_write: + assert result.flags.writeable is False + else: + assert result.flags.writeable is True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_index.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_index.py new file mode 100644 index 00000000..6411e20a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_index.py @@ -0,0 +1,178 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def index_view(index_data=[1, 2]): + df = DataFrame({"a": index_data, "b": 1.5}) + view = df[:] + df = df.set_index("a", drop=True) + idx = df.index + # df = None + return idx, view + + +def test_set_index_update_column(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1}) + df = df.set_index("a", drop=False) + expected = df.index.copy(deep=True) + df.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 2], name="a")) + + +def test_set_index_drop_update_column(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + view = df[:] + df = df.set_index("a", drop=True) + expected = df.index.copy(deep=True) + view.iloc[0, 0] = 100 + tm.assert_index_equal(df.index, expected) + + +def test_set_index_series(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + ser = Series([10, 11]) + df = df.set_index(ser) + expected = df.index.copy(deep=True) + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 11])) + + +def test_assign_index_as_series(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + ser = Series([10, 11]) + df.index = ser + expected = df.index.copy(deep=True) + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 11])) + + +def test_assign_index_as_index(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + ser = Series([10, 11]) + rhs_index = Index(ser) + df.index = rhs_index + rhs_index = None # overwrite to clear reference + expected = df.index.copy(deep=True) + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 11])) + + +def test_index_from_series(using_copy_on_write): + ser = Series([1, 2]) + idx = Index(ser) + expected = idx.copy(deep=True) + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + else: + tm.assert_index_equal(idx, Index([100, 2])) + + +def test_index_from_series_copy(using_copy_on_write): + ser = Series([1, 2]) + idx = Index(ser, copy=True) # noqa: F841 + arr = get_array(ser) + ser.iloc[0] = 100 + assert np.shares_memory(get_array(ser), arr) + + +def test_index_from_index(using_copy_on_write): + ser = Series([1, 2]) + idx = Index(ser) + idx = Index(idx) + expected = idx.copy(deep=True) + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + else: + tm.assert_index_equal(idx, Index([100, 2])) + + +@pytest.mark.parametrize( + "func", + [ + lambda x: x._shallow_copy(x._values), + lambda x: x.view(), + lambda x: x.take([0, 1]), + lambda x: x.repeat([1, 1]), + lambda x: x[slice(0, 2)], + lambda x: x[[0, 1]], + lambda x: x._getitem_slice(slice(0, 2)), + lambda x: x.delete([]), + lambda x: x.rename("b"), + lambda x: x.astype("Int64", copy=False), + ], + ids=[ + "_shallow_copy", + "view", + "take", + "repeat", + "getitem_slice", + "getitem_list", + "_getitem_slice", + "delete", + "rename", + "astype", + ], +) +def test_index_ops(using_copy_on_write, func, request): + idx, view_ = index_view() + expected = idx.copy(deep=True) + if "astype" in request.node.callspec.id: + expected = expected.astype("Int64") + idx = func(idx) + view_.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_index_equal(idx, expected, check_names=False) + + +def test_infer_objects(using_copy_on_write): + idx, view_ = index_view(["a", "b"]) + expected = idx.copy(deep=True) + idx = idx.infer_objects(copy=False) + view_.iloc[0, 0] = "aaaa" + if using_copy_on_write: + tm.assert_index_equal(idx, expected, check_names=False) + + +def test_index_to_frame(using_copy_on_write): + idx = Index([1, 2, 3], name="a") + expected = idx.copy(deep=True) + df = idx.to_frame() + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), idx._values) + assert not df._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(df, "a"), idx._values) + + df.iloc[0, 0] = 100 + tm.assert_index_equal(idx, expected) + + +def test_index_values(using_copy_on_write): + idx = Index([1, 2, 3]) + result = idx.values + if using_copy_on_write: + assert result.flags.writeable is False + else: + assert result.flags.writeable is True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_periodindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_periodindex.py new file mode 100644 index 00000000..94bc3a66 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_periodindex.py @@ -0,0 +1,26 @@ +import pytest + +from pandas import ( + Period, + PeriodIndex, + Series, + period_range, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "cons", + [ + lambda x: PeriodIndex(x), + lambda x: PeriodIndex(PeriodIndex(x)), + ], +) +def test_periodindex(using_copy_on_write, cons): + dt = period_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + idx = cons(ser) + expected = idx.copy(deep=True) + ser.iloc[0] = Period("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_timedeltaindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_timedeltaindex.py new file mode 100644 index 00000000..a543e06c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/index/test_timedeltaindex.py @@ -0,0 +1,26 @@ +import pytest + +from pandas import ( + Series, + Timedelta, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "cons", + [ + lambda x: TimedeltaIndex(x), + lambda x: TimedeltaIndex(TimedeltaIndex(x)), + ], +) +def test_timedeltaindex(using_copy_on_write, cons): + dt = timedelta_range("1 day", periods=3) + ser = Series(dt) + idx = cons(ser) + expected = idx.copy(deep=True) + ser.iloc[0] = Timedelta("5 days") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_array.py new file mode 100644 index 00000000..62a6a337 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_array.py @@ -0,0 +1,185 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + +# ----------------------------------------------------------------------------- +# Copy/view behaviour for accessing underlying array of Series/DataFrame + + +@pytest.mark.parametrize( + "method", + [lambda ser: ser.values, lambda ser: np.asarray(ser)], + ids=["values", "asarray"], +) +def test_series_values(using_copy_on_write, method): + ser = Series([1, 2, 3], name="name") + ser_orig = ser.copy() + + arr = method(ser) + + if using_copy_on_write: + # .values still gives a view but is read-only + assert np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is False + + # mutating series through arr therefore doesn't work + with pytest.raises(ValueError, match="read-only"): + arr[0] = 0 + tm.assert_series_equal(ser, ser_orig) + + # mutating the series itself still works + ser.iloc[0] = 0 + assert ser.values[0] == 0 + else: + assert arr.flags.writeable is True + arr[0] = 0 + assert ser.iloc[0] == 0 + + +@pytest.mark.parametrize( + "method", + [lambda df: df.values, lambda df: np.asarray(df)], + ids=["values", "asarray"], +) +def test_dataframe_values(using_copy_on_write, using_array_manager, method): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + + arr = method(df) + + if using_copy_on_write: + # .values still gives a view but is read-only + assert np.shares_memory(arr, get_array(df, "a")) + assert arr.flags.writeable is False + + # mutating series through arr therefore doesn't work + with pytest.raises(ValueError, match="read-only"): + arr[0, 0] = 0 + tm.assert_frame_equal(df, df_orig) + + # mutating the series itself still works + df.iloc[0, 0] = 0 + assert df.values[0, 0] == 0 + else: + assert arr.flags.writeable is True + arr[0, 0] = 0 + if not using_array_manager: + assert df.iloc[0, 0] == 0 + else: + tm.assert_frame_equal(df, df_orig) + + +def test_series_to_numpy(using_copy_on_write): + ser = Series([1, 2, 3], name="name") + ser_orig = ser.copy() + + # default: copy=False, no dtype or NAs + arr = ser.to_numpy() + if using_copy_on_write: + # to_numpy still gives a view but is read-only + assert np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is False + + # mutating series through arr therefore doesn't work + with pytest.raises(ValueError, match="read-only"): + arr[0] = 0 + tm.assert_series_equal(ser, ser_orig) + + # mutating the series itself still works + ser.iloc[0] = 0 + assert ser.values[0] == 0 + else: + assert arr.flags.writeable is True + arr[0] = 0 + assert ser.iloc[0] == 0 + + # specify copy=False gives a writeable array + ser = Series([1, 2, 3], name="name") + arr = ser.to_numpy(copy=True) + assert not np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is True + + # specifying a dtype that already causes a copy also gives a writeable array + ser = Series([1, 2, 3], name="name") + arr = ser.to_numpy(dtype="float64") + assert not np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is True + + +@pytest.mark.parametrize("order", ["F", "C"]) +def test_ravel_read_only(using_copy_on_write, order): + ser = Series([1, 2, 3]) + arr = ser.ravel(order=order) + if using_copy_on_write: + assert arr.flags.writeable is False + assert np.shares_memory(get_array(ser), arr) + + +def test_series_array_ea_dtypes(using_copy_on_write): + ser = Series([1, 2, 3], dtype="Int64") + arr = np.asarray(ser, dtype="int64") + assert np.shares_memory(arr, get_array(ser)) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + arr = np.asarray(ser) + assert not np.shares_memory(arr, get_array(ser)) + assert arr.flags.writeable is True + + +def test_dataframe_array_ea_dtypes(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + arr = np.asarray(df, dtype="int64") + # TODO: This should be able to share memory, but we are roundtripping + # through object + assert not np.shares_memory(arr, get_array(df, "a")) + assert arr.flags.writeable is True + + arr = np.asarray(df) + if using_copy_on_write: + # TODO(CoW): This should be True + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + +def test_dataframe_array_string_dtype(using_copy_on_write, using_array_manager): + df = DataFrame({"a": ["a", "b"]}, dtype="string") + arr = np.asarray(df) + if not using_array_manager: + assert np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + +def test_dataframe_multiple_numpy_dtypes(): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + arr = np.asarray(df) + assert not np.shares_memory(arr, get_array(df, "a")) + assert arr.flags.writeable is True + + +def test_values_is_ea(using_copy_on_write): + df = DataFrame({"a": date_range("2012-01-01", periods=3)}) + arr = np.asarray(df) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + +def test_empty_dataframe(): + df = DataFrame() + arr = np.asarray(df) + assert arr.flags.writeable is True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_astype.py new file mode 100644 index 00000000..4b751ad4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_astype.py @@ -0,0 +1,250 @@ +import numpy as np +import pytest + +from pandas.compat import pa_version_under7p0 +from pandas.compat.pyarrow import pa_version_under12p0 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_astype_single_dtype(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": 1.5}) + df_orig = df.copy() + df2 = df.astype("float64") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 2] = 5.5 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + # mutating parent also doesn't update result + df2 = df.astype("float64") + df.iloc[0, 2] = 5.5 + tm.assert_frame_equal(df2, df_orig.astype("float64")) + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +@pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"]) +def test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype): + if new_dtype == "int64[pyarrow]" and pa_version_under7p0: + pytest.skip("pyarrow not installed") + df = DataFrame({"a": [1, 2, 3]}, dtype=dtype) + df_orig = df.copy() + df2 = df.astype(new_dtype) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + # mutating parent also doesn't update result + df2 = df.astype(new_dtype) + df.iloc[0, 0] = 100 + tm.assert_frame_equal(df2, df_orig.astype(new_dtype)) + + +@pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"]) +def test_astype_different_target_dtype(using_copy_on_write, dtype): + if dtype == "int32[pyarrow]" and pa_version_under7p0: + pytest.skip("pyarrow not installed") + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + df2 = df.astype(dtype) + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert df2._mgr._has_no_reference(0) + + df2.iloc[0, 0] = 5 + tm.assert_frame_equal(df, df_orig) + + # mutating parent also doesn't update result + df2 = df.astype(dtype) + df.iloc[0, 0] = 100 + tm.assert_frame_equal(df2, df_orig.astype(dtype)) + + +@td.skip_array_manager_invalid_test +def test_astype_numpy_to_ea(): + ser = Series([1, 2, 3]) + with pd.option_context("mode.copy_on_write", True): + result = ser.astype("Int64") + assert np.shares_memory(get_array(ser), get_array(result)) + + +@pytest.mark.parametrize( + "dtype, new_dtype", [("object", "string"), ("string", "object")] +) +def test_astype_string_and_object(using_copy_on_write, dtype, new_dtype): + df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype) + df_orig = df.copy() + df2 = df.astype(new_dtype) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = "x" + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype, new_dtype", [("object", "string"), ("string", "object")] +) +def test_astype_string_and_object_update_original( + using_copy_on_write, dtype, new_dtype +): + df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype) + df2 = df.astype(new_dtype) + df_orig = df2.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df.iloc[0, 0] = "x" + tm.assert_frame_equal(df2, df_orig) + + +def test_astype_dict_dtypes(using_copy_on_write): + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": Series([1.5, 1.5, 1.5], dtype="float64")} + ) + df_orig = df.copy() + df2 = df.astype({"a": "float64", "c": "float64"}) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 2] = 5.5 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + + df2.iloc[0, 1] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + tm.assert_frame_equal(df, df_orig) + + +def test_astype_different_datetime_resos(using_copy_on_write): + df = DataFrame({"a": date_range("2019-12-31", periods=2, freq="D")}) + result = df.astype("datetime64[ms]") + + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + if using_copy_on_write: + assert result._mgr._has_no_reference(0) + + +def test_astype_different_timezones(using_copy_on_write): + df = DataFrame( + {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")} + ) + result = df.astype("datetime64[ns, Europe/Berlin]") + if using_copy_on_write: + assert not result._mgr._has_no_reference(0) + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + + +def test_astype_different_timezones_different_reso(using_copy_on_write): + df = DataFrame( + {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")} + ) + result = df.astype("datetime64[ms, Europe/Berlin]") + if using_copy_on_write: + assert result._mgr._has_no_reference(0) + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + + +@pytest.mark.skipif(pa_version_under7p0, reason="pyarrow not installed") +def test_astype_arrow_timestamp(using_copy_on_write): + df = DataFrame( + { + "a": [ + Timestamp("2020-01-01 01:01:01.000001"), + Timestamp("2020-01-01 01:01:01.000001"), + ] + }, + dtype="M8[ns]", + ) + result = df.astype("timestamp[ns][pyarrow]") + if using_copy_on_write: + assert not result._mgr._has_no_reference(0) + if pa_version_under12p0: + assert not np.shares_memory( + get_array(df, "a"), get_array(result, "a")._pa_array + ) + else: + assert np.shares_memory( + get_array(df, "a"), get_array(result, "a")._pa_array + ) + + +def test_convert_dtypes_infer_objects(using_copy_on_write): + ser = Series(["a", "b", "c"]) + ser_orig = ser.copy() + result = ser.convert_dtypes( + convert_integer=False, + convert_boolean=False, + convert_floating=False, + convert_string=False, + ) + + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(result)) + else: + assert not np.shares_memory(get_array(ser), get_array(result)) + + result.iloc[0] = "x" + tm.assert_series_equal(ser, ser_orig) + + +def test_convert_dtypes(using_copy_on_write): + df = DataFrame({"a": ["a", "b"], "b": [1, 2], "c": [1.5, 2.5], "d": [True, False]}) + df_orig = df.copy() + df2 = df.convert_dtypes() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "d"), get_array(df, "d")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "d"), get_array(df, "d")) + + df2.iloc[0, 0] = "x" + tm.assert_frame_equal(df, df_orig) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_clip.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_clip.py new file mode 100644 index 00000000..6a27a063 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_clip.py @@ -0,0 +1,83 @@ +import numpy as np + +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_clip_inplace_reference(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df_copy = df.copy() + arr_a = get_array(df, "a") + view = df[:] + df.clip(lower=2, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + tm.assert_frame_equal(df_copy, view) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + + +def test_clip_inplace_reference_no_op(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df_copy = df.copy() + arr_a = get_array(df, "a") + view = df[:] + df.clip(lower=0, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert not view._mgr._has_no_reference(0) + tm.assert_frame_equal(df_copy, view) + + +def test_clip_inplace(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + df.clip(lower=2, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_clip(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df_orig = df.copy() + df2 = df.clip(lower=2) + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + tm.assert_frame_equal(df_orig, df) + + +def test_clip_no_op(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df2 = df.clip(lower=0) + + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_clip_chained_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 4, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].clip(1, 2, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].clip(1, 2, inplace=True) + tm.assert_frame_equal(df, df_orig) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_constructors.py new file mode 100644 index 00000000..af7e7599 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_constructors.py @@ -0,0 +1,354 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Period, + PeriodIndex, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + +# ----------------------------------------------------------------------------- +# Copy/view behaviour for Series / DataFrame constructors + + +@pytest.mark.parametrize("dtype", [None, "int64"]) +def test_series_from_series(dtype, using_copy_on_write): + # Case: constructing a Series from another Series object follows CoW rules: + # a new object is returned and thus mutations are not propagated + ser = Series([1, 2, 3], name="name") + + # default is copy=False -> new Series is a shallow copy / view of original + result = Series(ser, dtype=dtype) + + # the shallow copy still shares memory + assert np.shares_memory(get_array(ser), get_array(result)) + + if using_copy_on_write: + assert result._mgr.blocks[0].refs.has_reference() + + if using_copy_on_write: + # mutating new series copy doesn't mutate original + result.iloc[0] = 0 + assert ser.iloc[0] == 1 + # mutating triggered a copy-on-write -> no longer shares memory + assert not np.shares_memory(get_array(ser), get_array(result)) + else: + # mutating shallow copy does mutate original + result.iloc[0] = 0 + assert ser.iloc[0] == 0 + # and still shares memory + assert np.shares_memory(get_array(ser), get_array(result)) + + # the same when modifying the parent + result = Series(ser, dtype=dtype) + + if using_copy_on_write: + # mutating original doesn't mutate new series + ser.iloc[0] = 0 + assert result.iloc[0] == 1 + else: + # mutating original does mutate shallow copy + ser.iloc[0] = 0 + assert result.iloc[0] == 0 + + +def test_series_from_series_with_reindex(using_copy_on_write): + # Case: constructing a Series from another Series with specifying an index + # that potentially requires a reindex of the values + ser = Series([1, 2, 3], name="name") + + # passing an index that doesn't actually require a reindex of the values + # -> without CoW we get an actual mutating view + for index in [ + ser.index, + ser.index.copy(), + list(ser.index), + ser.index.rename("idx"), + ]: + result = Series(ser, index=index) + assert np.shares_memory(ser.values, result.values) + result.iloc[0] = 0 + if using_copy_on_write: + assert ser.iloc[0] == 1 + else: + assert ser.iloc[0] == 0 + + # ensure that if an actual reindex is needed, we don't have any refs + # (mutating the result wouldn't trigger CoW) + result = Series(ser, index=[0, 1, 2, 3]) + assert not np.shares_memory(ser.values, result.values) + if using_copy_on_write: + assert not result._mgr.blocks[0].refs.has_reference() + + +@pytest.mark.parametrize("fastpath", [False, True]) +@pytest.mark.parametrize("dtype", [None, "int64"]) +@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)]) +@pytest.mark.parametrize( + "arr", [np.array([1, 2, 3], dtype="int64"), pd.array([1, 2, 3], dtype="Int64")] +) +def test_series_from_array(using_copy_on_write, idx, dtype, fastpath, arr): + if idx is None or dtype is not None: + fastpath = False + ser = Series(arr, dtype=dtype, index=idx, fastpath=fastpath) + ser_orig = ser.copy() + data = getattr(arr, "_data", arr) + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), data) + else: + assert np.shares_memory(get_array(ser), data) + + arr[0] = 100 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + else: + expected = Series([100, 2, 3], dtype=dtype if dtype is not None else arr.dtype) + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("copy", [True, False, None]) +def test_series_from_array_different_dtype(using_copy_on_write, copy): + arr = np.array([1, 2, 3], dtype="int64") + ser = Series(arr, dtype="int32", copy=copy) + assert not np.shares_memory(get_array(ser), arr) + + +@pytest.mark.parametrize( + "idx", + [ + Index([1, 2]), + DatetimeIndex([Timestamp("2019-12-31"), Timestamp("2020-12-31")]), + PeriodIndex([Period("2019-12-31"), Period("2020-12-31")]), + TimedeltaIndex([Timedelta("1 days"), Timedelta("2 days")]), + ], +) +def test_series_from_index(using_copy_on_write, idx): + ser = Series(idx) + expected = idx.copy(deep=True) + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(idx)) + assert not ser._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(ser), get_array(idx)) + ser.iloc[0] = ser.iloc[1] + tm.assert_index_equal(idx, expected) + + +def test_series_from_index_different_dtypes(using_copy_on_write): + idx = Index([1, 2, 3], dtype="int64") + ser = Series(idx, dtype="int32") + assert not np.shares_memory(get_array(ser), get_array(idx)) + if using_copy_on_write: + assert ser._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("fastpath", [False, True]) +@pytest.mark.parametrize("dtype", [None, "int64"]) +@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)]) +def test_series_from_block_manager(using_copy_on_write, idx, dtype, fastpath): + ser = Series([1, 2, 3], dtype="int64") + ser_orig = ser.copy() + ser2 = Series(ser._mgr, dtype=dtype, fastpath=fastpath, index=idx) + assert np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert not ser2._mgr._has_no_reference(0) + + ser2.iloc[0] = 100 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + else: + expected = Series([100, 2, 3]) + tm.assert_series_equal(ser, expected) + + +def test_series_from_block_manager_different_dtype(using_copy_on_write): + ser = Series([1, 2, 3], dtype="int64") + ser2 = Series(ser._mgr, dtype="int32") + assert not np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert ser2._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("func", [lambda x: x, lambda x: x._mgr]) +@pytest.mark.parametrize("columns", [None, ["a"]]) +def test_dataframe_constructor_mgr_or_df(using_copy_on_write, columns, func): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + + new_df = DataFrame(func(df)) + + assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a")) + new_df.iloc[0] = 100 + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(new_df, "a")) + tm.assert_frame_equal(df, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a")) + tm.assert_frame_equal(df, new_df) + + +@pytest.mark.parametrize("dtype", [None, "int64", "Int64"]) +@pytest.mark.parametrize("index", [None, [0, 1, 2]]) +@pytest.mark.parametrize("columns", [None, ["a", "b"], ["a", "b", "c"]]) +def test_dataframe_from_dict_of_series( + request, using_copy_on_write, columns, index, dtype +): + # Case: constructing a DataFrame from Series objects with copy=False + # has to do a lazy following CoW rules + # (the default for DataFrame(dict) is still to copy to ensure consolidation) + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + s1_orig = s1.copy() + expected = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6]}, index=index, columns=columns, dtype=dtype + ) + + result = DataFrame( + {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False + ) + + # the shallow copy still shares memory + assert np.shares_memory(get_array(result, "a"), get_array(s1)) + + # mutating the new dataframe doesn't mutate original + result.iloc[0, 0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(s1)) + tm.assert_series_equal(s1, s1_orig) + else: + assert s1.iloc[0] == 10 + + # the same when modifying the parent series + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + result = DataFrame( + {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False + ) + s1.iloc[0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(s1)) + tm.assert_frame_equal(result, expected) + else: + assert result.iloc[0, 0] == 10 + + +@pytest.mark.parametrize("dtype", [None, "int64"]) +def test_dataframe_from_dict_of_series_with_reindex(dtype): + # Case: constructing a DataFrame from Series objects with copy=False + # and passing an index that requires an actual (no-view) reindex -> need + # to ensure the result doesn't have refs set up to unnecessarily trigger + # a copy on write + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + df = DataFrame({"a": s1, "b": s2}, index=[1, 2, 3], dtype=dtype, copy=False) + + # df should own its memory, so mutating shouldn't trigger a copy + arr_before = get_array(df, "a") + assert not np.shares_memory(arr_before, get_array(s1)) + df.iloc[0, 0] = 100 + arr_after = get_array(df, "a") + assert np.shares_memory(arr_before, arr_after) + + +@pytest.mark.parametrize("cons", [Series, Index]) +@pytest.mark.parametrize( + "data, dtype", [([1, 2], None), ([1, 2], "int64"), (["a", "b"], None)] +) +def test_dataframe_from_series_or_index(using_copy_on_write, data, dtype, cons): + obj = cons(data, dtype=dtype) + obj_orig = obj.copy() + df = DataFrame(obj, dtype=dtype) + assert np.shares_memory(get_array(obj), get_array(df, 0)) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + + df.iloc[0, 0] = data[-1] + if using_copy_on_write: + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize("cons", [Series, Index]) +def test_dataframe_from_series_or_index_different_dtype(using_copy_on_write, cons): + obj = cons([1, 2], dtype="int64") + df = DataFrame(obj, dtype="int32") + assert not np.shares_memory(get_array(obj), get_array(df, 0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_dataframe_from_series_infer_datetime(using_copy_on_write): + ser = Series([Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype=object) + df = DataFrame(ser) + assert not np.shares_memory(get_array(ser), get_array(df, 0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("index", [None, [0, 1, 2]]) +def test_dataframe_from_dict_of_series_with_dtype(index): + # Variant of above, but now passing a dtype that causes a copy + # -> need to ensure the result doesn't have refs set up to unnecessarily + # trigger a copy on write + s1 = Series([1.0, 2.0, 3.0]) + s2 = Series([4, 5, 6]) + df = DataFrame({"a": s1, "b": s2}, index=index, dtype="int64", copy=False) + + # df should own its memory, so mutating shouldn't trigger a copy + arr_before = get_array(df, "a") + assert not np.shares_memory(arr_before, get_array(s1)) + df.iloc[0, 0] = 100 + arr_after = get_array(df, "a") + assert np.shares_memory(arr_before, arr_after) + + +@pytest.mark.parametrize("copy", [False, None, True]) +def test_frame_from_numpy_array(using_copy_on_write, copy, using_array_manager): + arr = np.array([[1, 2], [3, 4]]) + df = DataFrame(arr, copy=copy) + + if ( + using_copy_on_write + and copy is not False + or copy is True + or (using_array_manager and copy is None) + ): + assert not np.shares_memory(get_array(df, 0), arr) + else: + assert np.shares_memory(get_array(df, 0), arr) + + +def test_dataframe_from_records_with_dataframe(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + with tm.assert_produces_warning(FutureWarning): + df2 = DataFrame.from_records(df) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + df2.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + tm.assert_frame_equal(df, df2) + + +def test_frame_from_dict_of_index(using_copy_on_write): + idx = Index([1, 2, 3]) + expected = idx.copy(deep=True) + df = DataFrame({"a": idx}, copy=False) + assert np.shares_memory(get_array(df, "a"), idx._values) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + + df.iloc[0, 0] = 100 + tm.assert_index_equal(idx, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_core_functionalities.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_core_functionalities.py new file mode 100644 index 00000000..5c177465 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_core_functionalities.py @@ -0,0 +1,100 @@ +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_assigning_to_same_variable_removes_references(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + df = df.reset_index() + if using_copy_on_write: + assert df._mgr._has_no_reference(1) + arr = get_array(df, "a") + df.iloc[0, 1] = 100 # Write into a + + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_setitem_dont_track_unnecessary_references(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1}) + + df["b"] = 100 + arr = get_array(df, "a") + # We split the block in setitem, if we are not careful the new blocks will + # reference each other triggering a copy + df.iloc[0, 0] = 100 + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_setitem_with_view_copies(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1}) + view = df[:] + expected = df.copy() + + df["b"] = 100 + arr = get_array(df, "a") + df.iloc[0, 0] = 100 # Check that we correctly track reference + if using_copy_on_write: + assert not np.shares_memory(arr, get_array(df, "a")) + tm.assert_frame_equal(view, expected) + + +def test_setitem_with_view_invalidated_does_not_copy(using_copy_on_write, request): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1}) + view = df[:] + + df["b"] = 100 + arr = get_array(df, "a") + view = None # noqa: F841 + df.iloc[0, 0] = 100 + if using_copy_on_write: + # Setitem split the block. Since the old block shared data with view + # all the new blocks are referencing view and each other. When view + # goes out of scope, they don't share data with any other block, + # so we should not trigger a copy + mark = pytest.mark.xfail( + reason="blk.delete does not track references correctly" + ) + request.node.add_marker(mark) + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_out_of_scope(using_copy_on_write): + def func(): + df = DataFrame({"a": [1, 2], "b": 1.5, "c": 1}) + # create some subset + result = df[["a", "b"]] + return result + + result = func() + if using_copy_on_write: + assert not result._mgr.blocks[0].refs.has_reference() + assert not result._mgr.blocks[1].refs.has_reference() + + +def test_delete(using_copy_on_write): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"] + ) + del df["b"] + if using_copy_on_write: + assert not df._mgr.blocks[0].refs.has_reference() + assert not df._mgr.blocks[1].refs.has_reference() + + df = df[["a"]] + if using_copy_on_write: + assert not df._mgr.blocks[0].refs.has_reference() + + +def test_delete_reference(using_copy_on_write): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"] + ) + x = df[:] + del df["b"] + if using_copy_on_write: + assert df._mgr.blocks[0].refs.has_reference() + assert df._mgr.blocks[1].refs.has_reference() + assert x._mgr.blocks[0].refs.has_reference() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_functions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_functions.py new file mode 100644 index 00000000..56e4b186 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_functions.py @@ -0,0 +1,396 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + concat, + merge, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_concat_frames(using_copy_on_write): + df = DataFrame({"b": ["a"] * 3}) + df2 = DataFrame({"a": ["a"] * 3}) + df_orig = df.copy() + result = concat([df, df2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + result.iloc[0, 0] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + result.iloc[0, 1] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_concat_frames_updating_input(using_copy_on_write): + df = DataFrame({"b": ["a"] * 3}) + df2 = DataFrame({"a": ["a"] * 3}) + result = concat([df, df2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + expected = result.copy() + df.iloc[0, 0] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + df2.iloc[0, 0] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + tm.assert_frame_equal(result, expected) + + +def test_concat_series(using_copy_on_write): + ser = Series([1, 2], name="a") + ser2 = Series([3, 4], name="b") + ser_orig = ser.copy() + ser2_orig = ser2.copy() + result = concat([ser, ser2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), ser.values) + assert np.shares_memory(get_array(result, "b"), ser2.values) + else: + assert not np.shares_memory(get_array(result, "a"), ser.values) + assert not np.shares_memory(get_array(result, "b"), ser2.values) + + result.iloc[0, 0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), ser.values) + assert np.shares_memory(get_array(result, "b"), ser2.values) + + result.iloc[0, 1] = 1000 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), ser2.values) + tm.assert_series_equal(ser, ser_orig) + tm.assert_series_equal(ser2, ser2_orig) + + +def test_concat_frames_chained(using_copy_on_write): + df1 = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df2 = DataFrame({"c": [4, 5, 6]}) + df3 = DataFrame({"d": [4, 5, 6]}) + result = concat([concat([df1, df2], axis=1), df3], axis=1) + expected = result.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "c"), get_array(df2, "c")) + assert np.shares_memory(get_array(result, "d"), get_array(df3, "d")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(result, "d"), get_array(df3, "d")) + + df1.iloc[0, 0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + + tm.assert_frame_equal(result, expected) + + +def test_concat_series_chained(using_copy_on_write): + ser1 = Series([1, 2, 3], name="a") + ser2 = Series([4, 5, 6], name="c") + ser3 = Series([4, 5, 6], name="d") + result = concat([concat([ser1, ser2], axis=1), ser3], axis=1) + expected = result.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(ser1, "a")) + assert np.shares_memory(get_array(result, "c"), get_array(ser2, "c")) + assert np.shares_memory(get_array(result, "d"), get_array(ser3, "d")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a")) + assert not np.shares_memory(get_array(result, "c"), get_array(ser2, "c")) + assert not np.shares_memory(get_array(result, "d"), get_array(ser3, "d")) + + ser1.iloc[0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a")) + + tm.assert_frame_equal(result, expected) + + +def test_concat_series_updating_input(using_copy_on_write): + ser = Series([1, 2], name="a") + ser2 = Series([3, 4], name="b") + expected = DataFrame({"a": [1, 2], "b": [3, 4]}) + result = concat([ser, ser2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(ser, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + + ser.iloc[0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + tm.assert_frame_equal(result, expected) + + ser2.iloc[0] = 1000 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + tm.assert_frame_equal(result, expected) + + +def test_concat_mixed_series_frame(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "c": 1}) + ser = Series([4, 5, 6], name="d") + result = concat([df, ser], axis=1) + expected = result.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(result, "c"), get_array(df, "c")) + assert np.shares_memory(get_array(result, "d"), get_array(ser, "d")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(result, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d")) + + ser.iloc[0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d")) + + df.iloc[0, 0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("copy", [True, None, False]) +def test_concat_copy_keyword(using_copy_on_write, copy): + df = DataFrame({"a": [1, 2]}) + df2 = DataFrame({"b": [1.5, 2.5]}) + + result = concat([df, df2], axis=1, copy=copy) + + if using_copy_on_write or copy is False: + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + + +@pytest.mark.parametrize( + "func", + [ + lambda df1, df2, **kwargs: df1.merge(df2, **kwargs), + lambda df1, df2, **kwargs: merge(df1, df2, **kwargs), + ], +) +def test_merge_on_key(using_copy_on_write, func): + df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]}) + df2 = DataFrame({"key": ["a", "b", "c"], "b": [4, 5, 6]}) + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = func(df1, df2, on="key") + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(result, "key"), get_array(df1, "key")) + assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 2] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +def test_merge_on_index(using_copy_on_write): + df1 = DataFrame({"a": [1, 2, 3]}) + df2 = DataFrame({"b": [4, 5, 6]}) + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = merge(df1, df2, left_index=True, right_index=True) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +@pytest.mark.parametrize( + "func, how", + [ + (lambda df1, df2, **kwargs: merge(df2, df1, on="key", **kwargs), "right"), + (lambda df1, df2, **kwargs: merge(df1, df2, on="key", **kwargs), "left"), + ], +) +def test_merge_on_key_enlarging_one(using_copy_on_write, func, how): + df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]}) + df2 = DataFrame({"key": ["a", "b"], "b": [4, 5]}) + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = func(df1, df2, how=how) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + assert df2._mgr._has_no_reference(1) + assert df2._mgr._has_no_reference(0) + assert np.shares_memory(get_array(result, "key"), get_array(df1, "key")) is ( + how == "left" + ) + assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + if how == "left": + result.iloc[0, 1] = 0 + else: + result.iloc[0, 2] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +@pytest.mark.parametrize("copy", [True, None, False]) +def test_merge_copy_keyword(using_copy_on_write, copy): + df = DataFrame({"a": [1, 2]}) + df2 = DataFrame({"b": [3, 4.5]}) + + result = df.merge(df2, copy=copy, left_index=True, right_index=True) + + if using_copy_on_write or copy is False: + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + + +def test_join_on_key(using_copy_on_write): + df_index = Index(["a", "b", "c"], name="key") + + df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True)) + df2 = DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True)) + + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = df1.join(df2, on="key") + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(result.index), get_array(df1.index)) + assert not np.shares_memory(get_array(result.index), get_array(df2.index)) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +def test_join_multiple_dataframes_on_key(using_copy_on_write): + df_index = Index(["a", "b", "c"], name="key") + + df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True)) + dfs_list = [ + DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True)), + DataFrame({"c": [7, 8, 9]}, index=df_index.copy(deep=True)), + ] + + df1_orig = df1.copy() + dfs_list_orig = [df.copy() for df in dfs_list] + + result = df1.join(dfs_list) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + assert np.shares_memory(get_array(result.index), get_array(df1.index)) + assert not np.shares_memory( + get_array(result.index), get_array(dfs_list[0].index) + ) + assert not np.shares_memory( + get_array(result.index), get_array(dfs_list[1].index) + ) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + result.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + result.iloc[0, 2] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + tm.assert_frame_equal(df1, df1_orig) + for df, df_orig in zip(dfs_list, dfs_list_orig): + tm.assert_frame_equal(df, df_orig) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_indexing.py new file mode 100644 index 00000000..ebb25bd5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_indexing.py @@ -0,0 +1,1119 @@ +import numpy as np +import pytest + +from pandas.errors import SettingWithCopyWarning + +from pandas.core.dtypes.common import is_float_dtype + +import pandas as pd +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@pytest.fixture(params=["numpy", "nullable"]) +def backend(request): + if request.param == "numpy": + + def make_dataframe(*args, **kwargs): + return DataFrame(*args, **kwargs) + + def make_series(*args, **kwargs): + return Series(*args, **kwargs) + + elif request.param == "nullable": + + def make_dataframe(*args, **kwargs): + df = DataFrame(*args, **kwargs) + df_nullable = df.convert_dtypes() + # convert_dtypes will try to cast float to int if there is no loss in + # precision -> undo that change + for col in df.columns: + if is_float_dtype(df[col].dtype) and not is_float_dtype( + df_nullable[col].dtype + ): + df_nullable[col] = df_nullable[col].astype("Float64") + # copy final result to ensure we start with a fully self-owning DataFrame + return df_nullable.copy() + + def make_series(*args, **kwargs): + ser = Series(*args, **kwargs) + return ser.convert_dtypes().copy() + + return request.param, make_dataframe, make_series + + +# ----------------------------------------------------------------------------- +# Indexing operations taking subset + modifying the subset/parent + + +def test_subset_column_selection(backend, using_copy_on_write): + # Case: taking a subset of the columns of a DataFrame + # + afterwards modifying the subset + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + subset = df[["a", "c"]] + + if using_copy_on_write: + # the subset shares memory ... + assert np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + # ... but uses CoW when being modified + subset.iloc[0, 0] = 0 + else: + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + # INFO this no longer raise warning since pandas 1.4 + # with pd.option_context("chained_assignment", "warn"): + # with tm.assert_produces_warning(SettingWithCopyWarning): + subset.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + + expected = DataFrame({"a": [0, 2, 3], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(subset, expected) + tm.assert_frame_equal(df, df_orig) + + +def test_subset_column_selection_modify_parent(backend, using_copy_on_write): + # Case: taking a subset of the columns of a DataFrame + # + afterwards modifying the parent + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + + subset = df[["a", "c"]] + + if using_copy_on_write: + # the subset shares memory ... + assert np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + # ... but parent uses CoW parent when it is modified + df.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + if using_copy_on_write: + # different column/block still shares memory + assert np.shares_memory(get_array(subset, "c"), get_array(df, "c")) + + expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(subset, expected) + + +def test_subset_row_slice(backend, using_copy_on_write): + # Case: taking a subset of the rows of a DataFrame using a slice + # + afterwards modifying the subset + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + subset = df[1:3] + subset._mgr._verify_integrity() + + assert np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + + if using_copy_on_write: + subset.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + + else: + # INFO this no longer raise warning since pandas 1.4 + # with pd.option_context("chained_assignment", "warn"): + # with tm.assert_produces_warning(SettingWithCopyWarning): + subset.iloc[0, 0] = 0 + + subset._mgr._verify_integrity() + + expected = DataFrame({"a": [0, 3], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3)) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.iloc[1, 0] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_column_slice(backend, using_copy_on_write, using_array_manager, dtype): + # Case: taking a subset of the columns of a DataFrame using a slice + # + afterwards modifying the subset + dtype_backend, DataFrame, _ = backend + single_block = ( + dtype == "int64" and dtype_backend == "numpy" + ) and not using_array_manager + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + subset = df.iloc[:, 1:] + subset._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(subset, "b"), get_array(df, "b")) + + subset.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(subset, "b"), get_array(df, "b")) + + else: + # we only get a warning in case of a single block + warn = SettingWithCopyWarning if single_block else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + subset.iloc[0, 0] = 0 + + expected = DataFrame({"b": [0, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}) + tm.assert_frame_equal(subset, expected) + # original parent dataframe is not modified (also not for BlockManager case, + # except for single block) + if not using_copy_on_write and (using_array_manager or single_block): + df_orig.iloc[0, 1] = 0 + tm.assert_frame_equal(df, df_orig) + else: + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +@pytest.mark.parametrize( + "row_indexer", + [slice(1, 2), np.array([False, True, True]), np.array([1, 2])], + ids=["slice", "mask", "array"], +) +@pytest.mark.parametrize( + "column_indexer", + [slice("b", "c"), np.array([False, True, True]), ["b", "c"]], + ids=["slice", "mask", "array"], +) +def test_subset_loc_rows_columns( + backend, + dtype, + row_indexer, + column_indexer, + using_array_manager, + using_copy_on_write, +): + # Case: taking a subset of the rows+columns of a DataFrame using .loc + # + afterwards modifying the subset + # Generic test for several combinations of row/column indexers, not all + # of those could actually return a view / need CoW (so this test is not + # checking memory sharing, only ensuring subsequent mutation doesn't + # affect the parent dataframe) + dtype_backend, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + subset = df.loc[row_indexer, column_indexer] + + # modifying the subset never modifies the parent + subset.iloc[0, 0] = 0 + + expected = DataFrame( + {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3) + ) + tm.assert_frame_equal(subset, expected) + # a few corner cases _do_ actually modify the parent (with both row and column + # slice, and in case of ArrayManager or BlockManager with single block) + if ( + isinstance(row_indexer, slice) + and isinstance(column_indexer, slice) + and ( + using_array_manager + or ( + dtype == "int64" + and dtype_backend == "numpy" + and not using_copy_on_write + ) + ) + ): + df_orig.iloc[1, 1] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +@pytest.mark.parametrize( + "row_indexer", + [slice(1, 3), np.array([False, True, True]), np.array([1, 2])], + ids=["slice", "mask", "array"], +) +@pytest.mark.parametrize( + "column_indexer", + [slice(1, 3), np.array([False, True, True]), [1, 2]], + ids=["slice", "mask", "array"], +) +def test_subset_iloc_rows_columns( + backend, + dtype, + row_indexer, + column_indexer, + using_array_manager, + using_copy_on_write, +): + # Case: taking a subset of the rows+columns of a DataFrame using .iloc + # + afterwards modifying the subset + # Generic test for several combinations of row/column indexers, not all + # of those could actually return a view / need CoW (so this test is not + # checking memory sharing, only ensuring subsequent mutation doesn't + # affect the parent dataframe) + dtype_backend, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + subset = df.iloc[row_indexer, column_indexer] + + # modifying the subset never modifies the parent + subset.iloc[0, 0] = 0 + + expected = DataFrame( + {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3) + ) + tm.assert_frame_equal(subset, expected) + # a few corner cases _do_ actually modify the parent (with both row and column + # slice, and in case of ArrayManager or BlockManager with single block) + if ( + isinstance(row_indexer, slice) + and isinstance(column_indexer, slice) + and ( + using_array_manager + or ( + dtype == "int64" + and dtype_backend == "numpy" + and not using_copy_on_write + ) + ) + ): + df_orig.iloc[1, 1] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "indexer", + [slice(0, 2), np.array([True, True, False]), np.array([0, 1])], + ids=["slice", "mask", "array"], +) +def test_subset_set_with_row_indexer(backend, indexer_si, indexer, using_copy_on_write): + # Case: setting values with a row indexer on a viewing subset + # subset[indexer] = value and subset.iloc[indexer] = value + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]}) + df_orig = df.copy() + subset = df[1:4] + + if ( + indexer_si is tm.setitem + and isinstance(indexer, np.ndarray) + and indexer.dtype == "int" + ): + pytest.skip("setitem with labels selects on columns") + + if using_copy_on_write: + indexer_si(subset)[indexer] = 0 + else: + # INFO iloc no longer raises warning since pandas 1.4 + warn = SettingWithCopyWarning if indexer_si is tm.setitem else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + indexer_si(subset)[indexer] = 0 + + expected = DataFrame( + {"a": [0, 0, 4], "b": [0, 0, 7], "c": [0.0, 0.0, 0.4]}, index=range(1, 4) + ) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig[1:3] = 0 + tm.assert_frame_equal(df, df_orig) + + +def test_subset_set_with_mask(backend, using_copy_on_write): + # Case: setting values with a mask on a viewing subset: subset[mask] = value + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]}) + df_orig = df.copy() + subset = df[1:4] + + mask = subset > 3 + + if using_copy_on_write: + subset[mask] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + subset[mask] = 0 + + expected = DataFrame( + {"a": [2, 3, 0], "b": [0, 0, 0], "c": [0.20, 0.3, 0.4]}, index=range(1, 4) + ) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.loc[3, "a"] = 0 + df_orig.loc[1:3, "b"] = 0 + tm.assert_frame_equal(df, df_orig) + + +def test_subset_set_column(backend, using_copy_on_write): + # Case: setting a single column on a viewing subset -> subset[col] = value + dtype_backend, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + subset = df[1:3] + + if dtype_backend == "numpy": + arr = np.array([10, 11], dtype="int64") + else: + arr = pd.array([10, 11], dtype="Int64") + + if using_copy_on_write: + subset["a"] = arr + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + subset["a"] = arr + + subset._mgr._verify_integrity() + expected = DataFrame( + {"a": [10, 11], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3) + ) + tm.assert_frame_equal(subset, expected) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_set_column_with_loc( + backend, using_copy_on_write, using_array_manager, dtype +): + # Case: setting a single column with loc on a viewing subset + # -> subset.loc[:, col] = value + _, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write: + subset.loc[:, "a"] = np.array([10, 11], dtype="int64") + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning( + None, + raise_on_extra_warnings=not using_array_manager, + ): + subset.loc[:, "a"] = np.array([10, 11], dtype="int64") + + subset._mgr._verify_integrity() + expected = DataFrame( + {"a": [10, 11], "b": [5, 6], "c": np.array([8, 9], dtype=dtype)}, + index=range(1, 3), + ) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.loc[1:3, "a"] = np.array([10, 11], dtype="int64") + tm.assert_frame_equal(df, df_orig) + + +def test_subset_set_column_with_loc2(backend, using_copy_on_write, using_array_manager): + # Case: setting a single column with loc on a viewing subset + # -> subset.loc[:, col] = value + # separate test for case of DataFrame of a single column -> takes a separate + # code path + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write: + subset.loc[:, "a"] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning( + None, + raise_on_extra_warnings=not using_array_manager, + ): + subset.loc[:, "a"] = 0 + + subset._mgr._verify_integrity() + expected = DataFrame({"a": [0, 0]}, index=range(1, 3)) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.loc[1:3, "a"] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_set_columns(backend, using_copy_on_write, dtype): + # Case: setting multiple columns on a viewing subset + # -> subset[[col1, col2]] = value + dtype_backend, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write: + subset[["a", "c"]] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + subset[["a", "c"]] = 0 + + subset._mgr._verify_integrity() + if using_copy_on_write: + # first and third column should certainly have no references anymore + assert all(subset._mgr._has_no_reference(i) for i in [0, 2]) + expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3)) + if dtype_backend == "nullable": + # there is not yet a global option, so overriding a column by setting a scalar + # defaults to numpy dtype even if original column was nullable + expected["a"] = expected["a"].astype("int64") + expected["c"] = expected["c"].astype("int64") + + tm.assert_frame_equal(subset, expected) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "indexer", + [slice("a", "b"), np.array([True, True, False]), ["a", "b"]], + ids=["slice", "mask", "array"], +) +def test_subset_set_with_column_indexer(backend, indexer, using_copy_on_write): + # Case: setting multiple columns with a column indexer on a viewing subset + # -> subset.loc[:, [col1, col2]] = value + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write: + subset.loc[:, indexer] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + # As of 2.0, this setitem attempts (successfully) to set values + # inplace, so the assignment is not chained. + subset.loc[:, indexer] = 0 + + subset._mgr._verify_integrity() + expected = DataFrame({"a": [0, 0], "b": [0.0, 0.0], "c": [5, 6]}, index=range(1, 3)) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + # pre-2.0, in the mixed case with BlockManager, only column "a" + # would be mutated in the parent frame. this changed with the + # enforcement of GH#45333 + df_orig.loc[1:2, ["a", "b"]] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df[["a", "b"]][0:2], + lambda df: df[0:2][["a", "b"]], + lambda df: df[["a", "b"]].iloc[0:2], + lambda df: df[["a", "b"]].loc[0:1], + lambda df: df[0:2].iloc[:, 0:2], + lambda df: df[0:2].loc[:, "a":"b"], # type: ignore[misc] + ], + ids=[ + "row-getitem-slice", + "column-getitem", + "row-iloc-slice", + "row-loc-slice", + "column-iloc-slice", + "column-loc-slice", + ], +) +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_chained_getitem( + request, backend, method, dtype, using_copy_on_write, using_array_manager +): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + _, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + # when not using CoW, it depends on whether we have a single block or not + # and whether we are slicing the columns -> in that case we have a view + test_callspec = request.node.callspec.id + if not using_array_manager: + subset_is_view = test_callspec in ( + "numpy-single-block-column-iloc-slice", + "numpy-single-block-column-loc-slice", + ) + else: + # with ArrayManager, it doesn't matter whether we have + # single vs mixed block or numpy vs nullable dtypes + subset_is_view = test_callspec.endswith( + ("column-iloc-slice", "column-loc-slice") + ) + + # modify subset -> don't modify parent + subset = method(df) + subset.iloc[0, 0] = 0 + if using_copy_on_write or (not subset_is_view): + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = method(df) + df.iloc[0, 0] = 0 + expected = DataFrame({"a": [1, 2], "b": [4, 5]}) + if using_copy_on_write or not subset_is_view: + tm.assert_frame_equal(subset, expected) + else: + assert subset.iloc[0, 0] == 0 + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_chained_getitem_column(backend, dtype, using_copy_on_write): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + _, DataFrame, Series = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + # modify subset -> don't modify parent + subset = df[:]["a"][0:2] + df._clear_item_cache() + subset.iloc[0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = df[:]["a"][0:2] + df._clear_item_cache() + df.iloc[0, 0] = 0 + expected = Series([1, 2], name="a") + if using_copy_on_write: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda s: s["a":"c"]["a":"b"], # type: ignore[misc] + lambda s: s.iloc[0:3].iloc[0:2], + lambda s: s.loc["a":"c"].loc["a":"b"], # type: ignore[misc] + lambda s: s.loc["a":"c"] # type: ignore[misc] + .iloc[0:3] + .iloc[0:2] + .loc["a":"b"] # type: ignore[misc] + .iloc[0:1], + ], + ids=["getitem", "iloc", "loc", "long-chain"], +) +def test_subset_chained_getitem_series(backend, method, using_copy_on_write): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + # modify subset -> don't modify parent + subset = method(s) + subset.iloc[0] = 0 + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + assert s.iloc[0] == 0 + + # modify parent -> don't modify subset + subset = s.iloc[0:3].iloc[0:2] + s.iloc[0] = 0 + expected = Series([1, 2], index=["a", "b"]) + if using_copy_on_write: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +def test_subset_chained_single_block_row(using_copy_on_write, using_array_manager): + # not parametrizing this for dtype backend, since this explicitly tests single block + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + # modify subset -> don't modify parent + subset = df[:].iloc[0].iloc[0:2] + subset.iloc[0] = 0 + if using_copy_on_write or using_array_manager: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = df[:].iloc[0].iloc[0:2] + df.iloc[0, 0] = 0 + expected = Series([1, 4], index=["a", "b"], name=0) + if using_copy_on_write or using_array_manager: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df[:], + lambda df: df.loc[:, :], + lambda df: df.loc[:], + lambda df: df.iloc[:, :], + lambda df: df.iloc[:], + ], + ids=["getitem", "loc", "loc-rows", "iloc", "iloc-rows"], +) +def test_null_slice(backend, method, using_copy_on_write): + # Case: also all variants of indexing with a null slice (:) should return + # new objects to ensure we correctly use CoW for the results + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + df2 = method(df) + + # we always return new objects (shallow copy), regardless of CoW or not + assert df2 is not df + + # and those trigger CoW when mutated + df2.iloc[0, 0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda s: s[:], + lambda s: s.loc[:], + lambda s: s.iloc[:], + ], + ids=["getitem", "loc", "iloc"], +) +def test_null_slice_series(backend, method, using_copy_on_write): + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + s2 = method(s) + + # we always return new objects, regardless of CoW or not + assert s2 is not s + + # and those trigger CoW when mutated + s2.iloc[0] = 0 + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + assert s.iloc[0] == 0 + + +# TODO add more tests modifying the parent + + +# ----------------------------------------------------------------------------- +# Series -- Indexing operations taking subset + modifying the subset/parent + + +def test_series_getitem_slice(backend, using_copy_on_write): + # Case: taking a slice of a Series + afterwards modifying the subset + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + subset = s[:] + assert np.shares_memory(get_array(subset), get_array(s)) + + subset.iloc[0] = 0 + + if using_copy_on_write: + assert not np.shares_memory(get_array(subset), get_array(s)) + + expected = Series([0, 2, 3], index=["a", "b", "c"]) + tm.assert_series_equal(subset, expected) + + if using_copy_on_write: + # original parent series is not modified (CoW) + tm.assert_series_equal(s, s_orig) + else: + # original parent series is actually updated + assert s.iloc[0] == 0 + + +@pytest.mark.parametrize( + "indexer", + [slice(0, 2), np.array([True, True, False]), np.array([0, 1])], + ids=["slice", "mask", "array"], +) +def test_series_subset_set_with_indexer( + backend, indexer_si, indexer, using_copy_on_write +): + # Case: setting values in a viewing Series with an indexer + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + subset = s[:] + + warn = None + msg = "Series.__setitem__ treating keys as positions is deprecated" + if ( + indexer_si is tm.setitem + and isinstance(indexer, np.ndarray) + and indexer.dtype.kind == "i" + ): + warn = FutureWarning + + with tm.assert_produces_warning(warn, match=msg): + indexer_si(subset)[indexer] = 0 + expected = Series([0, 0, 3], index=["a", "b", "c"]) + tm.assert_series_equal(subset, expected) + + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + tm.assert_series_equal(s, expected) + + +# ----------------------------------------------------------------------------- +# del operator + + +def test_del_frame(backend, using_copy_on_write): + # Case: deleting a column with `del` on a viewing child dataframe should + # not modify parent + update the references + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df[:] + + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + del df2["b"] + + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + tm.assert_frame_equal(df, df_orig) + tm.assert_frame_equal(df2, df_orig[["a", "c"]]) + df2._mgr._verify_integrity() + + df.loc[0, "b"] = 200 + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + df_orig = df.copy() + + df2.loc[0, "a"] = 100 + if using_copy_on_write: + # modifying child after deleting a column still doesn't update parent + tm.assert_frame_equal(df, df_orig) + else: + assert df.loc[0, "a"] == 100 + + +def test_del_series(backend): + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + s2 = s[:] + + assert np.shares_memory(get_array(s), get_array(s2)) + + del s2["a"] + + assert not np.shares_memory(get_array(s), get_array(s2)) + tm.assert_series_equal(s, s_orig) + tm.assert_series_equal(s2, s_orig[["b", "c"]]) + + # modifying s2 doesn't need copy on write (due to `del`, s2 is backed by new array) + values = s2.values + s2.loc["b"] = 100 + assert values[0] == 100 + + +# ----------------------------------------------------------------------------- +# Accessing column as Series + + +def test_column_as_series(backend, using_copy_on_write, using_array_manager): + # Case: selecting a single column now also uses Copy-on-Write + dtype_backend, DataFrame, Series = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + s = df["a"] + + assert np.shares_memory(get_array(s, "a"), get_array(df, "a")) + + if using_copy_on_write or using_array_manager: + s[0] = 0 + else: + warn = SettingWithCopyWarning if dtype_backend == "numpy" else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + s[0] = 0 + + expected = Series([0, 2, 3], name="a") + tm.assert_series_equal(s, expected) + if using_copy_on_write: + # assert not np.shares_memory(s.values, get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + # ensure cached series on getitem is not the changed series + tm.assert_series_equal(df["a"], df_orig["a"]) + else: + df_orig.iloc[0, 0] = 0 + tm.assert_frame_equal(df, df_orig) + + +def test_column_as_series_set_with_upcast( + backend, using_copy_on_write, using_array_manager +): + # Case: selecting a single column now also uses Copy-on-Write -> when + # setting a value causes an upcast, we don't need to update the parent + # DataFrame through the cache mechanism + dtype_backend, DataFrame, Series = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + s = df["a"] + if dtype_backend == "nullable": + with pytest.raises(TypeError, match="Invalid value"): + s[0] = "foo" + expected = Series([1, 2, 3], name="a") + elif using_copy_on_write or using_array_manager: + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + s[0] = "foo" + expected = Series(["foo", 2, 3], dtype=object, name="a") + else: + with pd.option_context("chained_assignment", "warn"): + msg = "|".join( + [ + "A value is trying to be set on a copy of a slice from a DataFrame", + "Setting an item of incompatible dtype is deprecated", + ] + ) + with tm.assert_produces_warning( + (SettingWithCopyWarning, FutureWarning), match=msg + ): + s[0] = "foo" + expected = Series(["foo", 2, 3], dtype=object, name="a") + + tm.assert_series_equal(s, expected) + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + # ensure cached series on getitem is not the changed series + tm.assert_series_equal(df["a"], df_orig["a"]) + else: + df_orig["a"] = expected + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df["a"], + lambda df: df.loc[:, "a"], + lambda df: df.iloc[:, 0], + ], + ids=["getitem", "loc", "iloc"], +) +def test_column_as_series_no_item_cache( + request, backend, method, using_copy_on_write, using_array_manager +): + # Case: selecting a single column (which now also uses Copy-on-Write to protect + # the view) should always give a new object (i.e. not make use of a cache) + dtype_backend, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + s1 = method(df) + s2 = method(df) + + is_iloc = "iloc" in request.node.name + if using_copy_on_write or is_iloc: + assert s1 is not s2 + else: + assert s1 is s2 + + if using_copy_on_write or using_array_manager: + s1.iloc[0] = 0 + else: + warn = SettingWithCopyWarning if dtype_backend == "numpy" else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + s1.iloc[0] = 0 + + if using_copy_on_write: + tm.assert_series_equal(s2, df_orig["a"]) + tm.assert_frame_equal(df, df_orig) + else: + assert s2.iloc[0] == 0 + + +# TODO add tests for other indexing methods on the Series + + +def test_dataframe_add_column_from_series(backend, using_copy_on_write): + # Case: adding a new column to a DataFrame from an existing column/series + # -> delays copy under CoW + _, DataFrame, Series = backend + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + + s = Series([10, 11, 12]) + df["new"] = s + if using_copy_on_write: + assert np.shares_memory(get_array(df, "new"), get_array(s)) + else: + assert not np.shares_memory(get_array(df, "new"), get_array(s)) + + # editing series -> doesn't modify column in frame + s[0] = 0 + expected = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "new": [10, 11, 12]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("val", [100, "a"]) +@pytest.mark.parametrize( + "indexer_func, indexer", + [ + (tm.loc, (0, "a")), + (tm.iloc, (0, 0)), + (tm.loc, ([0], "a")), + (tm.iloc, ([0], 0)), + (tm.loc, (slice(None), "a")), + (tm.iloc, (slice(None), 0)), + ], +) +@pytest.mark.parametrize( + "col", [[0.1, 0.2, 0.3], [7, 8, 9]], ids=["mixed-block", "single-block"] +) +def test_set_value_copy_only_necessary_column( + using_copy_on_write, indexer_func, indexer, val, col +): + # When setting inplace, only copy column that is modified instead of the whole + # block (by splitting the block) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": col}) + df_orig = df.copy() + view = df[:] + + if val == "a" and indexer[0] != slice(None): + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype is deprecated" + ): + indexer_func(df)[indexer] = val + else: + indexer_func(df)[indexer] = val + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(view, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(view, "a")) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "c"), get_array(view, "c")) + if val == "a": + assert not np.shares_memory(get_array(df, "a"), get_array(view, "a")) + else: + assert np.shares_memory(get_array(df, "a"), get_array(view, "a")) + + +def test_series_midx_slice(using_copy_on_write): + ser = Series([1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]])) + result = ser[1] + assert np.shares_memory(get_array(ser), get_array(result)) + result.iloc[0] = 100 + if using_copy_on_write: + expected = Series( + [1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]]) + ) + tm.assert_series_equal(ser, expected) + + +def test_getitem_midx_slice(using_copy_on_write, using_array_manager): + df = DataFrame({("a", "x"): [1, 2], ("a", "y"): 1, ("b", "x"): 2}) + df_orig = df.copy() + new_df = df[("a",)] + + if using_copy_on_write: + assert not new_df._mgr._has_no_reference(0) + + if not using_array_manager: + assert np.shares_memory(get_array(df, ("a", "x")), get_array(new_df, "x")) + if using_copy_on_write: + new_df.iloc[0, 0] = 100 + tm.assert_frame_equal(df_orig, df) + + +def test_series_midx_tuples_slice(using_copy_on_write): + ser = Series( + [1, 2, 3], + index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]), + ) + result = ser[(1, 2)] + assert np.shares_memory(get_array(ser), get_array(result)) + result.iloc[0] = 100 + if using_copy_on_write: + expected = Series( + [1, 2, 3], + index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]), + ) + tm.assert_series_equal(ser, expected) + + +def test_loc_enlarging_with_dataframe(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + rhs = DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) + rhs_orig = rhs.copy() + df.loc[:, ["b", "c"]] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + assert np.shares_memory(get_array(df, "c"), get_array(rhs, "c")) + assert not df._mgr._has_no_reference(1) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + + df.iloc[0, 1] = 100 + tm.assert_frame_equal(rhs, rhs_orig) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_internals.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_internals.py new file mode 100644 index 00000000..a7273313 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_internals.py @@ -0,0 +1,151 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@td.skip_array_manager_invalid_test +def test_consolidate(using_copy_on_write): + # create unconsolidated DataFrame + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df["c"] = [4, 5, 6] + + # take a viewing subset + subset = df[:] + + # each block of subset references a block of df + assert all(blk.refs.has_reference() for blk in subset._mgr.blocks) + + # consolidate the two int64 blocks + subset._consolidate_inplace() + + # the float64 block still references the parent one because it still a view + assert subset._mgr.blocks[0].refs.has_reference() + # equivalent of assert np.shares_memory(df["b"].values, subset["b"].values) + # but avoids caching df["b"] + assert np.shares_memory(get_array(df, "b"), get_array(subset, "b")) + + # the new consolidated int64 block does not reference another + assert not subset._mgr.blocks[1].refs.has_reference() + + # the parent dataframe now also only is linked for the float column + assert not df._mgr.blocks[0].refs.has_reference() + assert df._mgr.blocks[1].refs.has_reference() + assert not df._mgr.blocks[2].refs.has_reference() + + # and modifying subset still doesn't modify parent + if using_copy_on_write: + subset.iloc[0, 1] = 0.0 + assert not df._mgr.blocks[1].refs.has_reference() + assert df.loc[0, "b"] == 0.1 + + +@pytest.mark.single_cpu +@td.skip_array_manager_invalid_test +def test_switch_options(): + # ensure we can switch the value of the option within one session + # (assuming data is constructed after switching) + + # using the option_context to ensure we set back to global option value + # after running the test + with pd.option_context("mode.copy_on_write", False): + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + subset = df[:] + subset.iloc[0, 0] = 0 + # df updated with CoW disabled + assert df.iloc[0, 0] == 0 + + pd.options.mode.copy_on_write = True + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + subset = df[:] + subset.iloc[0, 0] = 0 + # df not updated with CoW enabled + assert df.iloc[0, 0] == 1 + + pd.options.mode.copy_on_write = False + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + subset = df[:] + subset.iloc[0, 0] = 0 + # df updated with CoW disabled + assert df.iloc[0, 0] == 0 + + +@td.skip_array_manager_invalid_test +@pytest.mark.parametrize("dtype", [np.intp, np.int8]) +@pytest.mark.parametrize( + "locs, arr", + [ + ([0], np.array([-1, -2, -3])), + ([1], np.array([-1, -2, -3])), + ([5], np.array([-1, -2, -3])), + ([0, 1], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([0, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([0, 1, 2], np.array([[-1, -2, -3], [-4, -5, -6], [-4, -5, -6]]).T), + ([1, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ], +) +def test_iset_splits_blocks_inplace(using_copy_on_write, locs, arr, dtype): + # Nothing currently calls iset with + # more than 1 loc with inplace=True (only happens with inplace=False) + # but ensure that it works + df = DataFrame( + { + "a": [1, 2, 3], + "b": [4, 5, 6], + "c": [7, 8, 9], + "d": [10, 11, 12], + "e": [13, 14, 15], + "f": ["a", "b", "c"], + }, + ) + arr = arr.astype(dtype) + df_orig = df.copy() + df2 = df.copy(deep=None) # Trigger a CoW (if enabled, otherwise makes copy) + df2._mgr.iset(locs, arr, inplace=True) + + tm.assert_frame_equal(df, df_orig) + + if using_copy_on_write: + for i, col in enumerate(df.columns): + if i not in locs: + assert np.shares_memory(get_array(df, col), get_array(df2, col)) + else: + for col in df.columns: + assert not np.shares_memory(get_array(df, col), get_array(df2, col)) + + +def test_exponential_backoff(): + # GH#55518 + df = DataFrame({"a": [1, 2, 3]}) + for i in range(490): + df.copy(deep=False) + + assert len(df._mgr.blocks[0].refs.referenced_blocks) == 491 + + df = DataFrame({"a": [1, 2, 3]}) + dfs = [df.copy(deep=False) for i in range(510)] + + for i in range(20): + df.copy(deep=False) + assert len(df._mgr.blocks[0].refs.referenced_blocks) == 531 + assert df._mgr.blocks[0].refs.clear_counter == 1000 + + for i in range(500): + df.copy(deep=False) + + # Don't reduce since we still have over 500 objects alive + assert df._mgr.blocks[0].refs.clear_counter == 1000 + + dfs = dfs[:300] + for i in range(500): + df.copy(deep=False) + + # Reduce since there are less than 500 objects alive + assert df._mgr.blocks[0].refs.clear_counter == 500 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_interp_fillna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_interp_fillna.py new file mode 100644 index 00000000..5507e81d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_interp_fillna.py @@ -0,0 +1,377 @@ +import numpy as np +import pytest + +from pandas import ( + NA, + ArrowDtype, + DataFrame, + Interval, + NaT, + Series, + Timestamp, + interval_range, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@pytest.mark.parametrize("method", ["pad", "nearest", "linear"]) +def test_interpolate_no_op(using_copy_on_write, method): + df = DataFrame({"a": [1, 2]}) + df_orig = df.copy() + + warn = None + if method == "pad": + warn = FutureWarning + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = df.interpolate(method=method) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = 100 + + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_interp_fill_functions(using_copy_on_write, func): + # Check that these takes the same code paths as interpolate + df = DataFrame({"a": [1, 2]}) + df_orig = df.copy() + + result = getattr(df, func)() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = 100 + + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +@pytest.mark.parametrize( + "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]] +) +def test_interpolate_triggers_copy(using_copy_on_write, vals, func): + df = DataFrame({"a": vals}) + result = getattr(df, func)() + + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + if using_copy_on_write: + # Check that we don't have references when triggering a copy + assert result._mgr._has_no_reference(0) + + +@pytest.mark.parametrize( + "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]] +) +def test_interpolate_inplace_no_reference_no_copy(using_copy_on_write, vals): + df = DataFrame({"a": vals}) + arr = get_array(df, "a") + df.interpolate(method="linear", inplace=True) + + assert np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + # Check that we don't have references when triggering a copy + assert df._mgr._has_no_reference(0) + + +@pytest.mark.parametrize( + "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]] +) +def test_interpolate_inplace_with_refs(using_copy_on_write, vals): + df = DataFrame({"a": [1, np.nan, 2]}) + df_orig = df.copy() + arr = get_array(df, "a") + view = df[:] + df.interpolate(method="linear", inplace=True) + + if using_copy_on_write: + # Check that copy was triggered in interpolate and that we don't + # have any references left + assert not np.shares_memory(arr, get_array(df, "a")) + tm.assert_frame_equal(df_orig, view) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_interpolate_cleaned_fill_method(using_copy_on_write): + # Check that "method is set to None" case works correctly + df = DataFrame({"a": ["a", np.nan, "c"], "b": 1}) + df_orig = df.copy() + + msg = "DataFrame.interpolate with object dtype" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(method="linear") + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = Timestamp("2021-12-31") + + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_interpolate_object_convert_no_op(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"], "b": 1}) + arr_a = get_array(df, "a") + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True) + + # Now CoW makes a copy, it should not! + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr_a, get_array(df, "a")) + + +def test_interpolate_object_convert_copies(using_copy_on_write): + df = DataFrame({"a": Series([1, 2], dtype=object), "b": 1}) + arr_a = get_array(df, "a") + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert not np.shares_memory(arr_a, get_array(df, "a")) + + +def test_interpolate_downcast(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2.5], "b": 1}) + arr_a = get_array(df, "a") + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True, downcast="infer") + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr_a, get_array(df, "a")) + + +def test_interpolate_downcast_reference_triggers_copy(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2.5], "b": 1}) + df_orig = df.copy() + arr_a = get_array(df, "a") + view = df[:] + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True, downcast="infer") + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert not np.shares_memory(arr_a, get_array(df, "a")) + tm.assert_frame_equal(df_orig, view) + else: + tm.assert_frame_equal(df, view) + + +def test_fillna(using_copy_on_write): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + df_orig = df.copy() + + df2 = df.fillna(5.5) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + df2.iloc[0, 1] = 100 + tm.assert_frame_equal(df_orig, df) + + +def test_fillna_dict(using_copy_on_write): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + df_orig = df.copy() + + df2 = df.fillna({"a": 100.5}) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + df2.iloc[0, 1] = 100 + tm.assert_frame_equal(df_orig, df) + + +@pytest.mark.parametrize("downcast", [None, False]) +def test_fillna_inplace(using_copy_on_write, downcast): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.fillna(5.5, inplace=True, downcast=downcast) + assert np.shares_memory(get_array(df, "a"), arr_a) + assert np.shares_memory(get_array(df, "b"), arr_b) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert df._mgr._has_no_reference(1) + + +def test_fillna_inplace_reference(using_copy_on_write): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + df_orig = df.copy() + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + view = df[:] + + df.fillna(5.5, inplace=True) + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert np.shares_memory(get_array(df, "b"), arr_b) + assert view._mgr._has_no_reference(0) + assert df._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + assert np.shares_memory(get_array(df, "b"), arr_b) + expected = DataFrame({"a": [1.5, 5.5], "b": 1}) + tm.assert_frame_equal(df, expected) + + +def test_fillna_interval_inplace_reference(using_copy_on_write): + # Set dtype explicitly to avoid implicit cast when setting nan + ser = Series( + interval_range(start=0, end=5), name="a", dtype="interval[float64, right]" + ) + ser.iloc[1] = np.nan + + ser_orig = ser.copy() + view = ser[:] + ser.fillna(value=Interval(left=0, right=5), inplace=True) + + if using_copy_on_write: + assert not np.shares_memory( + get_array(ser, "a").left.values, get_array(view, "a").left.values + ) + tm.assert_series_equal(view, ser_orig) + else: + assert np.shares_memory( + get_array(ser, "a").left.values, get_array(view, "a").left.values + ) + + +def test_fillna_series_empty_arg(using_copy_on_write): + ser = Series([1, np.nan, 2]) + ser_orig = ser.copy() + result = ser.fillna({}) + + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(result)) + else: + assert not np.shares_memory(get_array(ser), get_array(result)) + + ser.iloc[0] = 100.5 + tm.assert_series_equal(ser_orig, result) + + +def test_fillna_series_empty_arg_inplace(using_copy_on_write): + ser = Series([1, np.nan, 2]) + arr = get_array(ser) + ser.fillna({}, inplace=True) + + assert np.shares_memory(get_array(ser), arr) + if using_copy_on_write: + assert ser._mgr._has_no_reference(0) + + +def test_fillna_ea_noop_shares_memory( + using_copy_on_write, any_numeric_ea_and_arrow_dtype +): + df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype) + df_orig = df.copy() + df2 = df.fillna(100) + + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not df2._mgr._has_no_reference(1) + elif isinstance(df.dtypes.iloc[0], ArrowDtype): + # arrow is immutable, so no-ops do not need to copy underlying array + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + tm.assert_frame_equal(df_orig, df) + + df2.iloc[0, 1] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert df2._mgr._has_no_reference(1) + assert df._mgr._has_no_reference(1) + tm.assert_frame_equal(df_orig, df) + + +def test_fillna_inplace_ea_noop_shares_memory( + using_copy_on_write, any_numeric_ea_and_arrow_dtype +): + df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype) + df_orig = df.copy() + view = df[:] + df.fillna(100, inplace=True) + + if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(view, "a")) + else: + # MaskedArray can actually respect inplace=True + assert np.shares_memory(get_array(df, "a"), get_array(view, "a")) + + assert np.shares_memory(get_array(df, "b"), get_array(view, "b")) + if using_copy_on_write: + assert not df._mgr._has_no_reference(1) + assert not view._mgr._has_no_reference(1) + + df.iloc[0, 1] = 100 + if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write: + tm.assert_frame_equal(df_orig, view) + else: + # we actually have a view + tm.assert_frame_equal(df, view) + + +def test_fillna_chained_assignment(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].fillna(100, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].fillna(100, inplace=True) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("func", ["interpolate", "ffill", "bfill"]) +def test_interpolate_chained_assignment(using_copy_on_write, func): + df = DataFrame({"a": [1, np.nan, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + getattr(df["a"], func)(inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + getattr(df[["a"]], func)(inplace=True) + tm.assert_frame_equal(df, df_orig) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_methods.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_methods.py new file mode 100644 index 00000000..fe1be2d8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_methods.py @@ -0,0 +1,1935 @@ +import numpy as np +import pytest + +from pandas.errors import SettingWithCopyWarning + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Period, + Series, + Timestamp, + date_range, + period_range, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_copy(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_copy = df.copy() + + # the deep copy by defaults takes a shallow copy of the Index + assert df_copy.index is not df.index + assert df_copy.columns is not df.columns + assert df_copy.index.is_(df.index) + assert df_copy.columns.is_(df.columns) + + # the deep copy doesn't share memory + assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + if using_copy_on_write: + assert not df_copy._mgr.blocks[0].refs.has_reference() + assert not df_copy._mgr.blocks[1].refs.has_reference() + + # mutating copy doesn't mutate original + df_copy.iloc[0, 0] = 0 + assert df.iloc[0, 0] == 1 + + +def test_copy_shallow(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_copy = df.copy(deep=False) + + # the shallow copy also makes a shallow copy of the index + if using_copy_on_write: + assert df_copy.index is not df.index + assert df_copy.columns is not df.columns + assert df_copy.index.is_(df.index) + assert df_copy.columns.is_(df.columns) + else: + assert df_copy.index is df.index + assert df_copy.columns is df.columns + + # the shallow copy still shares memory + assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + if using_copy_on_write: + assert df_copy._mgr.blocks[0].refs.has_reference() + assert df_copy._mgr.blocks[1].refs.has_reference() + + if using_copy_on_write: + # mutating shallow copy doesn't mutate original + df_copy.iloc[0, 0] = 0 + assert df.iloc[0, 0] == 1 + # mutating triggered a copy-on-write -> no longer shares memory + assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + # but still shares memory for the other columns/blocks + assert np.shares_memory(get_array(df_copy, "c"), get_array(df, "c")) + else: + # mutating shallow copy does mutate original + df_copy.iloc[0, 0] = 0 + assert df.iloc[0, 0] == 0 + # and still shares memory + assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + + +@pytest.mark.parametrize("copy", [True, None, False]) +@pytest.mark.parametrize( + "method", + [ + lambda df, copy: df.rename(columns=str.lower, copy=copy), + lambda df, copy: df.reindex(columns=["a", "c"], copy=copy), + lambda df, copy: df.reindex_like(df, copy=copy), + lambda df, copy: df.align(df, copy=copy)[0], + lambda df, copy: df.set_axis(["a", "b", "c"], axis="index", copy=copy), + lambda df, copy: df.rename_axis(index="test", copy=copy), + lambda df, copy: df.rename_axis(columns="test", copy=copy), + lambda df, copy: df.astype({"b": "int64"}, copy=copy), + # lambda df, copy: df.swaplevel(0, 0, copy=copy), + lambda df, copy: df.swapaxes(0, 0, copy=copy), + lambda df, copy: df.truncate(0, 5, copy=copy), + lambda df, copy: df.infer_objects(copy=copy), + lambda df, copy: df.to_timestamp(copy=copy), + lambda df, copy: df.to_period(freq="D", copy=copy), + lambda df, copy: df.tz_localize("US/Central", copy=copy), + lambda df, copy: df.tz_convert("US/Central", copy=copy), + lambda df, copy: df.set_flags(allows_duplicate_labels=False, copy=copy), + ], + ids=[ + "rename", + "reindex", + "reindex_like", + "align", + "set_axis", + "rename_axis0", + "rename_axis1", + "astype", + # "swaplevel", # only series + "swapaxes", + "truncate", + "infer_objects", + "to_timestamp", + "to_period", + "tz_localize", + "tz_convert", + "set_flags", + ], +) +def test_methods_copy_keyword( + request, method, copy, using_copy_on_write, using_array_manager +): + index = None + if "to_timestamp" in request.node.callspec.id: + index = period_range("2012-01-01", freq="D", periods=3) + elif "to_period" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_localize" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_convert" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels") + + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=index) + + if "swapaxes" in request.node.callspec.id: + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df2 = method(df, copy=copy) + else: + df2 = method(df, copy=copy) + + share_memory = using_copy_on_write or copy is False + + if request.node.callspec.id.startswith("reindex-"): + # TODO copy=False without CoW still returns a copy in this case + if not using_copy_on_write and not using_array_manager and copy is False: + share_memory = False + + if share_memory: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +@pytest.mark.parametrize("copy", [True, None, False]) +@pytest.mark.parametrize( + "method", + [ + lambda ser, copy: ser.rename(index={0: 100}, copy=copy), + lambda ser, copy: ser.rename(None, copy=copy), + lambda ser, copy: ser.reindex(index=ser.index, copy=copy), + lambda ser, copy: ser.reindex_like(ser, copy=copy), + lambda ser, copy: ser.align(ser, copy=copy)[0], + lambda ser, copy: ser.set_axis(["a", "b", "c"], axis="index", copy=copy), + lambda ser, copy: ser.rename_axis(index="test", copy=copy), + lambda ser, copy: ser.astype("int64", copy=copy), + lambda ser, copy: ser.swaplevel(0, 1, copy=copy), + lambda ser, copy: ser.swapaxes(0, 0, copy=copy), + lambda ser, copy: ser.truncate(0, 5, copy=copy), + lambda ser, copy: ser.infer_objects(copy=copy), + lambda ser, copy: ser.to_timestamp(copy=copy), + lambda ser, copy: ser.to_period(freq="D", copy=copy), + lambda ser, copy: ser.tz_localize("US/Central", copy=copy), + lambda ser, copy: ser.tz_convert("US/Central", copy=copy), + lambda ser, copy: ser.set_flags(allows_duplicate_labels=False, copy=copy), + ], + ids=[ + "rename (dict)", + "rename", + "reindex", + "reindex_like", + "align", + "set_axis", + "rename_axis0", + "astype", + "swaplevel", + "swapaxes", + "truncate", + "infer_objects", + "to_timestamp", + "to_period", + "tz_localize", + "tz_convert", + "set_flags", + ], +) +def test_methods_series_copy_keyword(request, method, copy, using_copy_on_write): + index = None + if "to_timestamp" in request.node.callspec.id: + index = period_range("2012-01-01", freq="D", periods=3) + elif "to_period" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_localize" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_convert" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels") + elif "swaplevel" in request.node.callspec.id: + index = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]]) + + ser = Series([1, 2, 3], index=index) + + if "swapaxes" in request.node.callspec.id: + msg = "'Series.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser2 = method(ser, copy=copy) + else: + ser2 = method(ser, copy=copy) + + share_memory = using_copy_on_write or copy is False + + if share_memory: + assert np.shares_memory(get_array(ser2), get_array(ser)) + else: + assert not np.shares_memory(get_array(ser2), get_array(ser)) + + +@pytest.mark.parametrize("copy", [True, None, False]) +def test_transpose_copy_keyword(using_copy_on_write, copy, using_array_manager): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + result = df.transpose(copy=copy) + share_memory = using_copy_on_write or copy is False or copy is None + share_memory = share_memory and not using_array_manager + + if share_memory: + assert np.shares_memory(get_array(df, "a"), get_array(result, 0)) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + + +# ----------------------------------------------------------------------------- +# DataFrame methods returning new DataFrame using shallow copy + + +def test_reset_index(using_copy_on_write): + # Case: resetting the index (i.e. adding a new column) + mutating the + # resulting dataframe + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=[10, 11, 12] + ) + df_orig = df.copy() + df2 = df.reset_index() + df2._mgr._verify_integrity() + + if using_copy_on_write: + # still shares memory (df2 is a shallow copy) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 2] = 0 + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("index", [pd.RangeIndex(0, 2), Index([1, 2])]) +def test_reset_index_series_drop(using_copy_on_write, index): + ser = Series([1, 2], index=index) + ser_orig = ser.copy() + ser2 = ser.reset_index(drop=True) + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(ser2)) + assert not ser._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(ser), get_array(ser2)) + + ser2.iloc[0] = 100 + tm.assert_series_equal(ser, ser_orig) + + +def test_rename_columns(using_copy_on_write): + # Case: renaming columns returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.rename(columns=str.upper) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "C"), get_array(df, "c")) + expected = DataFrame({"A": [0, 2, 3], "B": [4, 5, 6], "C": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df, df_orig) + + +def test_rename_columns_modify_parent(using_copy_on_write): + # Case: renaming columns returns a new dataframe + # + afterwards modifying the original (parent) dataframe + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df2 = df.rename(columns=str.upper) + df2_orig = df2.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + df.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "C"), get_array(df, "c")) + expected = DataFrame({"a": [0, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, df2_orig) + + +def test_pipe(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + df_orig = df.copy() + + def testfunc(df): + return df + + df2 = df.pipe(testfunc) + + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column + df2.iloc[0, 0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + expected = DataFrame({"a": [0, 2, 3], "b": 1.5}) + tm.assert_frame_equal(df, expected) + + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + +def test_pipe_modify_df(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + df_orig = df.copy() + + def testfunc(df): + df.iloc[0, 0] = 100 + return df + + df2 = df.pipe(testfunc) + + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + expected = DataFrame({"a": [100, 2, 3], "b": 1.5}) + tm.assert_frame_equal(df, expected) + + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + +def test_reindex_columns(using_copy_on_write): + # Case: reindexing the column returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.reindex(columns=["a", "c"]) + + if using_copy_on_write: + # still shares memory (df2 is a shallow copy) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + # mutating df2 triggers a copy-on-write for that column + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "index", + [ + lambda idx: idx, + lambda idx: idx.view(), + lambda idx: idx.copy(), + lambda idx: list(idx), + ], + ids=["identical", "view", "copy", "values"], +) +def test_reindex_rows(index, using_copy_on_write): + # Case: reindexing the rows with an index that matches the current index + # can use a shallow copy + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.reindex(index=index(df.index)) + + if using_copy_on_write: + # still shares memory (df2 is a shallow copy) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + # mutating df2 triggers a copy-on-write for that column + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +def test_drop_on_column(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.drop(columns="a") + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + else: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +def test_select_dtypes(using_copy_on_write): + # Case: selecting columns using `select_dtypes()` returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.select_dtypes("int64") + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "filter_kwargs", [{"items": ["a"]}, {"like": "a"}, {"regex": "a"}] +) +def test_filter(using_copy_on_write, filter_kwargs): + # Case: selecting columns using `filter()` returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.filter(**filter_kwargs) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + if using_copy_on_write: + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_shift_no_op(using_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], + index=date_range("2020-01-01", "2020-01-03"), + columns=["a", "b"], + ) + df_orig = df.copy() + df2 = df.shift(periods=0) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + tm.assert_frame_equal(df2, df_orig) + + +def test_shift_index(using_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], + index=date_range("2020-01-01", "2020-01-03"), + columns=["a", "b"], + ) + df2 = df.shift(periods=1, axis=0) + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_shift_rows_freq(using_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], + index=date_range("2020-01-01", "2020-01-03"), + columns=["a", "b"], + ) + df_orig = df.copy() + df_orig.index = date_range("2020-01-02", "2020-01-04") + df2 = df.shift(periods=1, freq="1D") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + tm.assert_frame_equal(df2, df_orig) + + +def test_shift_columns(using_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], columns=date_range("2020-01-01", "2020-01-02") + ) + df2 = df.shift(periods=1, axis=1) + + assert np.shares_memory(get_array(df2, "2020-01-02"), get_array(df, "2020-01-01")) + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory( + get_array(df2, "2020-01-02"), get_array(df, "2020-01-01") + ) + expected = DataFrame( + [[np.nan, 1], [np.nan, 3], [np.nan, 5]], + columns=date_range("2020-01-01", "2020-01-02"), + ) + tm.assert_frame_equal(df2, expected) + + +def test_pop(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + view_original = df[:] + result = df.pop("a") + + assert np.shares_memory(result.values, get_array(view_original, "a")) + assert np.shares_memory(get_array(df, "b"), get_array(view_original, "b")) + + if using_copy_on_write: + result.iloc[0] = 0 + assert not np.shares_memory(result.values, get_array(view_original, "a")) + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(view_original, "b")) + tm.assert_frame_equal(view_original, df_orig) + else: + expected = DataFrame({"a": [1, 2, 3], "b": [0, 5, 6], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(view_original, expected) + + +@pytest.mark.parametrize( + "func", + [ + lambda x, y: x.align(y), + lambda x, y: x.align(y.a, axis=0), + lambda x, y: x.align(y.a.iloc[slice(0, 1)], axis=1), + ], +) +def test_align_frame(using_copy_on_write, func): + df = DataFrame({"a": [1, 2, 3], "b": "a"}) + df_orig = df.copy() + df_changed = df[["b", "a"]].copy() + df2, _ = func(df, df_changed) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_align_series(using_copy_on_write): + ser = Series([1, 2]) + ser_orig = ser.copy() + ser_other = ser.copy() + ser2, ser_other_result = ser.align(ser_other) + + if using_copy_on_write: + assert np.shares_memory(ser2.values, ser.values) + assert np.shares_memory(ser_other_result.values, ser_other.values) + else: + assert not np.shares_memory(ser2.values, ser.values) + assert not np.shares_memory(ser_other_result.values, ser_other.values) + + ser2.iloc[0] = 0 + ser_other_result.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2.values, ser.values) + assert not np.shares_memory(ser_other_result.values, ser_other.values) + tm.assert_series_equal(ser, ser_orig) + tm.assert_series_equal(ser_other, ser_orig) + + +def test_align_copy_false(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + df2, df3 = df.align(df, copy=False) + + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + if using_copy_on_write: + df2.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + df3.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + +def test_align_with_series_copy_false(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + ser = Series([1, 2, 3], name="x") + ser_orig = ser.copy() + df_orig = df.copy() + df2, ser2 = df.align(ser, copy=False, axis=0) + + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + assert np.shares_memory(get_array(ser, "x"), get_array(ser2, "x")) + + if using_copy_on_write: + df2.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + ser2.loc[0] = 0 + tm.assert_series_equal(ser, ser_orig) # Original is unchanged + + +def test_to_frame(using_copy_on_write): + # Case: converting a Series to a DataFrame with to_frame + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + + df = ser[:].to_frame() + + # currently this always returns a "view" + assert np.shares_memory(ser.values, get_array(df, 0)) + + df.iloc[0, 0] = 0 + + if using_copy_on_write: + # mutating df triggers a copy-on-write for that column + assert not np.shares_memory(ser.values, get_array(df, 0)) + tm.assert_series_equal(ser, ser_orig) + else: + # but currently select_dtypes() actually returns a view -> mutates parent + expected = ser_orig.copy() + expected.iloc[0] = 0 + tm.assert_series_equal(ser, expected) + + # modify original series -> don't modify dataframe + df = ser[:].to_frame() + ser.iloc[0] = 0 + + if using_copy_on_write: + tm.assert_frame_equal(df, ser_orig.to_frame()) + else: + expected = ser_orig.copy().to_frame() + expected.iloc[0, 0] = 0 + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("ax", ["index", "columns"]) +def test_swapaxes_noop(using_copy_on_write, ax): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df2 = df.swapaxes(ax, ax) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_swapaxes_single_block(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["x", "y", "z"]) + df_orig = df.copy() + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df2 = df.swapaxes("index", "columns") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "x"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_swapaxes_read_only_array(): + df = DataFrame({"a": [1, 2], "b": 3}) + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df = df.swapaxes(axis1="index", axis2="columns") + df.iloc[0, 0] = 100 + expected = DataFrame({0: [100, 3], 1: [2, 3]}, index=["a", "b"]) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "method, idx", + [ + (lambda df: df.copy(deep=False).copy(deep=False), 0), + (lambda df: df.reset_index().reset_index(), 2), + (lambda df: df.rename(columns=str.upper).rename(columns=str.lower), 0), + (lambda df: df.copy(deep=False).select_dtypes(include="number"), 0), + ], + ids=["shallow-copy", "reset_index", "rename", "select_dtypes"], +) +def test_chained_methods(request, method, idx, using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + # when not using CoW, only the copy() variant actually gives a view + df2_is_view = not using_copy_on_write and request.node.callspec.id == "shallow-copy" + + # modify df2 -> don't modify df + df2 = method(df) + df2.iloc[0, idx] = 0 + if not df2_is_view: + tm.assert_frame_equal(df, df_orig) + + # modify df -> don't modify df2 + df2 = method(df) + df.iloc[0, 0] = 0 + if not df2_is_view: + tm.assert_frame_equal(df2.iloc[:, idx:], df_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})]) +def test_to_timestamp(using_copy_on_write, obj): + obj.index = Index([Period("2012-1-1", freq="D"), Period("2012-1-2", freq="D")]) + + obj_orig = obj.copy() + obj2 = obj.to_timestamp() + + if using_copy_on_write: + assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + else: + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + + # mutating obj2 triggers a copy-on-write for that column / block + obj2.iloc[0] = 0 + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})]) +def test_to_period(using_copy_on_write, obj): + obj.index = Index([Timestamp("2019-12-31"), Timestamp("2020-12-31")]) + + obj_orig = obj.copy() + obj2 = obj.to_period(freq="Y") + + if using_copy_on_write: + assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + else: + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + + # mutating obj2 triggers a copy-on-write for that column / block + obj2.iloc[0] = 0 + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + tm.assert_equal(obj, obj_orig) + + +def test_set_index(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.set_index("a") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + else: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 1] = 0 + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +def test_set_index_mutating_parent_does_not_mutate_index(): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + result = df.set_index("a") + expected = result.copy() + + df.iloc[0, 0] = 100 + tm.assert_frame_equal(result, expected) + + +def test_add_prefix(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.add_prefix("CoW_") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a")) + df2.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a")) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "CoW_c"), get_array(df, "c")) + expected = DataFrame( + {"CoW_a": [0, 2, 3], "CoW_b": [4, 5, 6], "CoW_c": [0.1, 0.2, 0.3]} + ) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df, df_orig) + + +def test_add_suffix(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.add_suffix("_CoW") + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a")) + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c_CoW"), get_array(df, "c")) + expected = DataFrame( + {"a_CoW": [0, 2, 3], "b_CoW": [4, 5, 6], "c_CoW": [0.1, 0.2, 0.3]} + ) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("axis, val", [(0, 5.5), (1, np.nan)]) +def test_dropna(using_copy_on_write, axis, val): + df = DataFrame({"a": [1, 2, 3], "b": [4, val, 6], "c": "d"}) + df_orig = df.copy() + df2 = df.dropna(axis=axis) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("val", [5, 5.5]) +def test_dropna_series(using_copy_on_write, val): + ser = Series([1, val, 4]) + ser_orig = ser.copy() + ser2 = ser.dropna() + + if using_copy_on_write: + assert np.shares_memory(ser2.values, ser.values) + else: + assert not np.shares_memory(ser2.values, ser.values) + + ser2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df.head(), + lambda df: df.head(2), + lambda df: df.tail(), + lambda df: df.tail(3), + ], +) +def test_head_tail(method, using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = method(df) + df2._mgr._verify_integrity() + + if using_copy_on_write: + # We are explicitly deviating for CoW here to make an eager copy (avoids + # tracking references for very cheap ops) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + # modify df2 to trigger CoW for that block + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + # without CoW enabled, head and tail return views. Mutating df2 also mutates df. + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + df2.iloc[0, 0] = 1 + tm.assert_frame_equal(df, df_orig) + + +def test_infer_objects(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": "c", "c": 1, "d": "x"}) + df_orig = df.copy() + df2 = df.infer_objects() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + df2.iloc[0, 0] = 0 + df2.iloc[0, 1] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + tm.assert_frame_equal(df, df_orig) + + +def test_infer_objects_no_reference(using_copy_on_write): + df = DataFrame( + { + "a": [1, 2], + "b": "c", + "c": 1, + "d": Series( + [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object" + ), + "e": "b", + } + ) + df = df.infer_objects() + + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + arr_d = get_array(df, "d") + + df.iloc[0, 0] = 0 + df.iloc[0, 1] = "d" + df.iloc[0, 3] = Timestamp("2018-12-31") + if using_copy_on_write: + assert np.shares_memory(arr_a, get_array(df, "a")) + # TODO(CoW): Block splitting causes references here + assert not np.shares_memory(arr_b, get_array(df, "b")) + assert np.shares_memory(arr_d, get_array(df, "d")) + + +def test_infer_objects_reference(using_copy_on_write): + df = DataFrame( + { + "a": [1, 2], + "b": "c", + "c": 1, + "d": Series( + [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object" + ), + } + ) + view = df[:] # noqa: F841 + df = df.infer_objects() + + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + arr_d = get_array(df, "d") + + df.iloc[0, 0] = 0 + df.iloc[0, 1] = "d" + df.iloc[0, 3] = Timestamp("2018-12-31") + if using_copy_on_write: + assert not np.shares_memory(arr_a, get_array(df, "a")) + assert not np.shares_memory(arr_b, get_array(df, "b")) + assert np.shares_memory(arr_d, get_array(df, "d")) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"before": "a", "after": "b", "axis": 1}, + {"before": 0, "after": 1, "axis": 0}, + ], +) +def test_truncate(using_copy_on_write, kwargs): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}) + df_orig = df.copy() + df2 = df.truncate(**kwargs) + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("method", ["assign", "drop_duplicates"]) +def test_assign_drop_duplicates(using_copy_on_write, method): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + df2 = getattr(df, method)() + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})]) +def test_take(using_copy_on_write, obj): + # Check that no copy is made when we take all rows in original order + obj_orig = obj.copy() + obj2 = obj.take([0, 1]) + + if using_copy_on_write: + assert np.shares_memory(obj2.values, obj.values) + else: + assert not np.shares_memory(obj2.values, obj.values) + + obj2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(obj2.values, obj.values) + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})]) +def test_between_time(using_copy_on_write, obj): + obj.index = date_range("2018-04-09", periods=2, freq="1D20min") + obj_orig = obj.copy() + obj2 = obj.between_time("0:00", "1:00") + + if using_copy_on_write: + assert np.shares_memory(obj2.values, obj.values) + else: + assert not np.shares_memory(obj2.values, obj.values) + + obj2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(obj2.values, obj.values) + tm.assert_equal(obj, obj_orig) + + +def test_reindex_like(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": "a"}) + other = DataFrame({"b": "a", "a": [1, 2]}) + + df_orig = df.copy() + df2 = df.reindex_like(other) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_sort_index(using_copy_on_write): + # GH 49473 + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + ser2 = ser.sort_index() + + if using_copy_on_write: + assert np.shares_memory(ser.values, ser2.values) + else: + assert not np.shares_memory(ser.values, ser2.values) + + # mutating ser triggers a copy-on-write for the column / block + ser2.iloc[0] = 0 + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize( + "obj, kwargs", + [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})], +) +def test_sort_values(using_copy_on_write, obj, kwargs): + obj_orig = obj.copy() + obj2 = obj.sort_values(**kwargs) + + if using_copy_on_write: + assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + else: + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + + # mutating df triggers a copy-on-write for the column / block + obj2.iloc[0] = 0 + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize( + "obj, kwargs", + [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})], +) +def test_sort_values_inplace(using_copy_on_write, obj, kwargs, using_array_manager): + obj_orig = obj.copy() + view = obj[:] + obj.sort_values(inplace=True, **kwargs) + + assert np.shares_memory(get_array(obj, "a"), get_array(view, "a")) + + # mutating obj triggers a copy-on-write for the column / block + obj.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(obj, "a"), get_array(view, "a")) + tm.assert_equal(view, obj_orig) + else: + assert np.shares_memory(get_array(obj, "a"), get_array(view, "a")) + + +@pytest.mark.parametrize("decimals", [-1, 0, 1]) +def test_round(using_copy_on_write, decimals): + df = DataFrame({"a": [1, 2], "b": "c"}) + df_orig = df.copy() + df2 = df.round(decimals=decimals) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + # TODO: Make inplace by using out parameter of ndarray.round? + if decimals >= 0: + # Ensure lazy copy if no-op + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + df2.iloc[0, 1] = "d" + df2.iloc[0, 0] = 4 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_reorder_levels(using_copy_on_write): + index = MultiIndex.from_tuples( + [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"] + ) + df = DataFrame({"a": [1, 2, 3, 4]}, index=index) + df_orig = df.copy() + df2 = df.reorder_levels(order=["two", "one"]) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_series_reorder_levels(using_copy_on_write): + index = MultiIndex.from_tuples( + [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"] + ) + ser = Series([1, 2, 3, 4], index=index) + ser_orig = ser.copy() + ser2 = ser.reorder_levels(order=["two", "one"]) + + if using_copy_on_write: + assert np.shares_memory(ser2.values, ser.values) + else: + assert not np.shares_memory(ser2.values, ser.values) + + ser2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2, 3]), DataFrame({"a": [1, 2, 3]})]) +def test_swaplevel(using_copy_on_write, obj): + index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"]) + obj.index = index + obj_orig = obj.copy() + obj2 = obj.swaplevel() + + if using_copy_on_write: + assert np.shares_memory(obj2.values, obj.values) + else: + assert not np.shares_memory(obj2.values, obj.values) + + obj2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(obj2.values, obj.values) + tm.assert_equal(obj, obj_orig) + + +def test_frame_set_axis(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.set_axis(["a", "b", "c"], axis="index") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_series_set_axis(using_copy_on_write): + # GH 49473 + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + ser2 = ser.set_axis(["a", "b", "c"], axis="index") + + if using_copy_on_write: + assert np.shares_memory(ser, ser2) + else: + assert not np.shares_memory(ser, ser2) + + # mutating ser triggers a copy-on-write for the column / block + ser2.iloc[0] = 0 + assert not np.shares_memory(ser2, ser) + tm.assert_series_equal(ser, ser_orig) + + +def test_set_flags(using_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + ser2 = ser.set_flags(allows_duplicate_labels=False) + + assert np.shares_memory(ser, ser2) + + # mutating ser triggers a copy-on-write for the column / block + ser2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2, ser) + tm.assert_series_equal(ser, ser_orig) + else: + assert np.shares_memory(ser2, ser) + expected = Series([0, 2, 3]) + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("kwargs", [{"mapper": "test"}, {"index": "test"}]) +def test_rename_axis(using_copy_on_write, kwargs): + df = DataFrame({"a": [1, 2, 3, 4]}, index=Index([1, 2, 3, 4], name="a")) + df_orig = df.copy() + df2 = df.rename_axis(**kwargs) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "func, tz", [("tz_convert", "Europe/Berlin"), ("tz_localize", None)] +) +def test_tz_convert_localize(using_copy_on_write, func, tz): + # GH 49473 + ser = Series( + [1, 2], index=date_range(start="2014-08-01 09:00", freq="H", periods=2, tz=tz) + ) + ser_orig = ser.copy() + ser2 = getattr(ser, func)("US/Central") + + if using_copy_on_write: + assert np.shares_memory(ser.values, ser2.values) + else: + assert not np.shares_memory(ser.values, ser2.values) + + # mutating ser triggers a copy-on-write for the column / block + ser2.iloc[0] = 0 + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +def test_droplevel(using_copy_on_write): + # GH 49473 + index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"]) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=index) + df_orig = df.copy() + df2 = df.droplevel(0) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + tm.assert_frame_equal(df, df_orig) + + +def test_squeeze(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + series = df.squeeze() + + # Should share memory regardless of CoW since squeeze is just an iloc + assert np.shares_memory(series.values, get_array(df, "a")) + + # mutating squeezed df triggers a copy-on-write for that column/block + series.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(series.values, get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + else: + # Without CoW the original will be modified + assert np.shares_memory(series.values, get_array(df, "a")) + assert df.loc[0, "a"] == 0 + + +def test_items(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + # Test this twice, since the second time, the item cache will be + # triggered, and we want to make sure it still works then. + for i in range(2): + for name, ser in df.items(): + assert np.shares_memory(get_array(ser, name), get_array(df, name)) + + # mutating df triggers a copy-on-write for that column / block + ser.iloc[0] = 0 + + if using_copy_on_write: + assert not np.shares_memory(get_array(ser, name), get_array(df, name)) + tm.assert_frame_equal(df, df_orig) + else: + # Original frame will be modified + assert df.loc[0, name] == 0 + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +def test_putmask(using_copy_on_write, dtype): + df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype) + view = df[:] + df_orig = df.copy() + df[df == df] = 5 + + if using_copy_on_write: + assert not np.shares_memory(get_array(view, "a"), get_array(df, "a")) + tm.assert_frame_equal(view, df_orig) + else: + # Without CoW the original will be modified + assert np.shares_memory(get_array(view, "a"), get_array(df, "a")) + assert view.iloc[0, 0] == 5 + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +def test_putmask_no_reference(using_copy_on_write, dtype): + df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype) + arr_a = get_array(df, "a") + df[df == df] = 5 + + if using_copy_on_write: + assert np.shares_memory(arr_a, get_array(df, "a")) + + +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +def test_putmask_aligns_rhs_no_reference(using_copy_on_write, dtype): + df = DataFrame({"a": [1.5, 2], "b": 1.5}, dtype=dtype) + arr_a = get_array(df, "a") + df[df == df] = DataFrame({"a": [5.5, 5]}) + + if using_copy_on_write: + assert np.shares_memory(arr_a, get_array(df, "a")) + + +@pytest.mark.parametrize( + "val, exp, warn", [(5.5, True, FutureWarning), (5, False, None)] +) +def test_putmask_dont_copy_some_blocks(using_copy_on_write, val, exp, warn): + df = DataFrame({"a": [1, 2], "b": 1, "c": 1.5}) + view = df[:] + df_orig = df.copy() + indexer = DataFrame( + [[True, False, False], [True, False, False]], columns=list("abc") + ) + with tm.assert_produces_warning(warn, match="incompatible dtype"): + df[indexer] = val + + if using_copy_on_write: + assert not np.shares_memory(get_array(view, "a"), get_array(df, "a")) + # TODO(CoW): Could split blocks to avoid copying the whole block + assert np.shares_memory(get_array(view, "b"), get_array(df, "b")) is exp + assert np.shares_memory(get_array(view, "c"), get_array(df, "c")) + assert df._mgr._has_no_reference(1) is not exp + assert not df._mgr._has_no_reference(2) + tm.assert_frame_equal(view, df_orig) + elif val == 5: + # Without CoW the original will be modified, the other case upcasts, e.g. copy + assert np.shares_memory(get_array(view, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(view, "c"), get_array(df, "c")) + assert view.iloc[0, 0] == 5 + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +@pytest.mark.parametrize( + "func", + [ + lambda ser: ser.where(ser > 0, 10), + lambda ser: ser.mask(ser <= 0, 10), + ], +) +def test_where_mask_noop(using_copy_on_write, dtype, func): + ser = Series([1, 2, 3], dtype=dtype) + ser_orig = ser.copy() + + result = func(ser) + + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(result)) + else: + assert not np.shares_memory(get_array(ser), get_array(result)) + + result.iloc[0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), get_array(result)) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +@pytest.mark.parametrize( + "func", + [ + lambda ser: ser.where(ser < 0, 10), + lambda ser: ser.mask(ser >= 0, 10), + ], +) +def test_where_mask(using_copy_on_write, dtype, func): + ser = Series([1, 2, 3], dtype=dtype) + ser_orig = ser.copy() + + result = func(ser) + + assert not np.shares_memory(get_array(ser), get_array(result)) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize("dtype, val", [("int64", 10.5), ("Int64", 10)]) +@pytest.mark.parametrize( + "func", + [ + lambda df, val: df.where(df < 0, val), + lambda df, val: df.mask(df >= 0, val), + ], +) +def test_where_mask_noop_on_single_column(using_copy_on_write, dtype, val, func): + df = DataFrame({"a": [1, 2, 3], "b": [-4, -5, -6]}, dtype=dtype) + df_orig = df.copy() + + result = func(df, val) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(result, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(result, "b")) + + result.iloc[0, 1] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(result, "b")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("func", ["mask", "where"]) +def test_chained_where_mask(using_copy_on_write, func): + df = DataFrame({"a": [1, 4, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + getattr(df["a"], func)(df["a"] > 2, 5, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True) + tm.assert_frame_equal(df, df_orig) + + +def test_asfreq_noop(using_copy_on_write): + df = DataFrame( + {"a": [0.0, None, 2.0, 3.0]}, + index=date_range("1/1/2000", periods=4, freq="T"), + ) + df_orig = df.copy() + df2 = df.asfreq(freq="T") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_iterrows(using_copy_on_write): + df = DataFrame({"a": 0, "b": 1}, index=[1, 2, 3]) + df_orig = df.copy() + + for _, sub in df.iterrows(): + sub.iloc[0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_interpolate_creates_copy(using_copy_on_write): + # GH#51126 + df = DataFrame({"a": [1.5, np.nan, 3]}) + view = df[:] + expected = df.copy() + + df.ffill(inplace=True) + df.iloc[0, 0] = 100.5 + + if using_copy_on_write: + tm.assert_frame_equal(view, expected) + else: + expected = DataFrame({"a": [100.5, 1.5, 3]}) + tm.assert_frame_equal(view, expected) + + +def test_isetitem(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + df2 = df.copy(deep=None) # Trigger a CoW + df2.isetitem(1, np.array([-1, -2, -3])) # This is inplace + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + df2.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + else: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_isetitem_series(using_copy_on_write, dtype): + df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)}) + ser = Series([7, 8, 9]) + ser_orig = ser.copy() + df.isetitem(0, ser) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(ser)) + assert not df._mgr._has_no_reference(0) + + # mutating dataframe doesn't update series + df.loc[0, "a"] = 0 + tm.assert_series_equal(ser, ser_orig) + + # mutating series doesn't update dataframe + df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)}) + ser = Series([7, 8, 9]) + df.isetitem(0, ser) + + ser.loc[0] = 0 + expected = DataFrame({"a": [7, 8, 9], "b": np.array([4, 5, 6], dtype=dtype)}) + tm.assert_frame_equal(df, expected) + + +def test_isetitem_frame(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}) + rhs = DataFrame({"a": [4, 5, 6], "b": 2}) + df.isetitem([0, 1], rhs) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(rhs, "a")) + assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + assert not df._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(rhs, "a")) + assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + expected = df.copy() + rhs.iloc[0, 0] = 100 + rhs.iloc[0, 1] = 100 + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("key", ["a", ["a"]]) +def test_get(using_copy_on_write, key): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + + result = df.get(key) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + result.iloc[0] = 0 + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + else: + # for non-CoW it depends on whether we got a Series or DataFrame if it + # is a view or copy or triggers a warning or not + warn = SettingWithCopyWarning if isinstance(key, list) else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + result.iloc[0] = 0 + + if isinstance(key, list): + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + +@pytest.mark.parametrize("axis, key", [(0, 0), (1, "a")]) +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_xs(using_copy_on_write, using_array_manager, axis, key, dtype): + single_block = (dtype == "int64") and not using_array_manager + is_view = single_block or (using_array_manager and axis == 1) + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + result = df.xs(key, axis=axis) + + if axis == 1 or single_block: + assert np.shares_memory(get_array(df, "a"), get_array(result)) + elif using_copy_on_write: + assert result._mgr._has_no_reference(0) + + if using_copy_on_write or is_view: + result.iloc[0] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + result.iloc[0] = 0 + + if using_copy_on_write or (not single_block and axis == 0): + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("key, level", [("l1", 0), (2, 1)]) +def test_xs_multiindex(using_copy_on_write, using_array_manager, key, level, axis): + arr = np.arange(18).reshape(6, 3) + index = MultiIndex.from_product([["l1", "l2"], [1, 2, 3]], names=["lev1", "lev2"]) + df = DataFrame(arr, index=index, columns=list("abc")) + if axis == 1: + df = df.transpose().copy() + df_orig = df.copy() + + result = df.xs(key, level=level, axis=axis) + + if level == 0: + assert np.shares_memory( + get_array(df, df.columns[0]), get_array(result, result.columns[0]) + ) + + warn = ( + SettingWithCopyWarning + if not using_copy_on_write and not using_array_manager + else None + ) + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + result.iloc[0, 0] = 0 + + tm.assert_frame_equal(df, df_orig) + + +def test_update_frame(using_copy_on_write): + df1 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]}) + df2 = DataFrame({"b": [100.0]}, index=[1]) + df1_orig = df1.copy() + view = df1[:] + + df1.update(df2) + + expected = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 100.0, 6.0]}) + tm.assert_frame_equal(df1, expected) + if using_copy_on_write: + # df1 is updated, but its view not + tm.assert_frame_equal(view, df1_orig) + assert np.shares_memory(get_array(df1, "a"), get_array(view, "a")) + assert not np.shares_memory(get_array(df1, "b"), get_array(view, "b")) + else: + tm.assert_frame_equal(view, expected) + + +def test_update_series(using_copy_on_write): + ser1 = Series([1.0, 2.0, 3.0]) + ser2 = Series([100.0], index=[1]) + ser1_orig = ser1.copy() + view = ser1[:] + + ser1.update(ser2) + + expected = Series([1.0, 100.0, 3.0]) + tm.assert_series_equal(ser1, expected) + if using_copy_on_write: + # ser1 is updated, but its view not + tm.assert_series_equal(view, ser1_orig) + else: + tm.assert_series_equal(view, expected) + + +def test_update_chained_assignment(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + ser2 = Series([100.0], index=[1]) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].update(ser2) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].update(ser2.to_frame()) + tm.assert_frame_equal(df, df_orig) + + +def test_inplace_arithmetic_series(): + ser = Series([1, 2, 3]) + data = get_array(ser) + ser *= 2 + assert np.shares_memory(get_array(ser), data) + tm.assert_numpy_array_equal(data, get_array(ser)) + + +def test_inplace_arithmetic_series_with_reference(using_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + view = ser[:] + ser *= 2 + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), get_array(view)) + tm.assert_series_equal(ser_orig, view) + else: + assert np.shares_memory(get_array(ser), get_array(view)) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_transpose(using_copy_on_write, copy, using_array_manager): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + result = df.transpose(copy=copy) + + if not copy and not using_array_manager or using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(result, 0)) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + + result.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_transpose_different_dtypes(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + df_orig = df.copy() + result = df.T + + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + result.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_transpose_ea_single_column(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + result = df.T + + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + + +def test_transform_frame(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + + def func(ser): + ser.iloc[0] = 100 + return ser + + df.transform(func) + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_transform_series(using_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + + def func(ser): + ser.iloc[0] = 100 + return ser + + ser.transform(func) + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + + +def test_count_read_only_array(): + df = DataFrame({"a": [1, 2], "b": 3}) + result = df.count() + result.iloc[0] = 100 + expected = Series([100, 2], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +def test_series_view(using_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + + ser2 = ser.view() + assert np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert not ser2._mgr._has_no_reference(0) + + ser2.iloc[0] = 100 + + if using_copy_on_write: + tm.assert_series_equal(ser_orig, ser) + else: + expected = Series([100, 2, 3]) + tm.assert_series_equal(ser, expected) + + +def test_insert_series(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + df.insert(loc=1, value=ser, column="b") + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(df, "b")) + assert not df._mgr._has_no_reference(1) + else: + assert not np.shares_memory(get_array(ser), get_array(df, "b")) + + df.iloc[0, 1] = 100 + tm.assert_series_equal(ser, ser_orig) + + +def test_eval(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + + result = df.eval("c = a+b") + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + + result.iloc[0, 0] = 100 + tm.assert_frame_equal(df, df_orig) + + +def test_eval_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + df_view = df[:] + + df.eval("c = a+b", inplace=True) + assert np.shares_memory(get_array(df, "a"), get_array(df_view, "a")) + + df.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df_view, df_orig) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_replace.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_replace.py new file mode 100644 index 00000000..085f355d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_replace.py @@ -0,0 +1,432 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@pytest.mark.parametrize( + "replace_kwargs", + [ + {"to_replace": {"a": 1, "b": 4}, "value": -1}, + # Test CoW splits blocks to avoid copying unchanged columns + {"to_replace": {"a": 1}, "value": -1}, + {"to_replace": {"b": 4}, "value": -1}, + {"to_replace": {"b": {4: 1}}}, + # TODO: Add these in a further optimization + # We would need to see which columns got replaced in the mask + # which could be expensive + # {"to_replace": {"b": 1}}, + # 1 + ], +) +def test_replace(using_copy_on_write, replace_kwargs): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": ["foo", "bar", "baz"]}) + df_orig = df.copy() + + df_replaced = df.replace(**replace_kwargs) + + if using_copy_on_write: + if (df_replaced["b"] == df["b"]).all(): + assert np.shares_memory(get_array(df_replaced, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c")) + + # mutating squeezed df triggers a copy-on-write for that column/block + df_replaced.loc[0, "c"] = -1 + if using_copy_on_write: + assert not np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c")) + + if "a" in replace_kwargs["to_replace"]: + arr = get_array(df_replaced, "a") + df_replaced.loc[0, "a"] = 100 + assert np.shares_memory(get_array(df_replaced, "a"), arr) + tm.assert_frame_equal(df, df_orig) + + +def test_replace_regex_inplace_refs(using_copy_on_write): + df = DataFrame({"a": ["aaa", "bbb"]}) + df_orig = df.copy() + view = df[:] + arr = get_array(df, "a") + df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True) + if using_copy_on_write: + assert not np.shares_memory(arr, get_array(df, "a")) + assert df._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_replace_regex_inplace(using_copy_on_write): + df = DataFrame({"a": ["aaa", "bbb"]}) + arr = get_array(df, "a") + df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr, get_array(df, "a")) + + df_orig = df.copy() + df2 = df.replace(to_replace=r"^b.*$", value="new", regex=True) + tm.assert_frame_equal(df_orig, df) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_replace_regex_inplace_no_op(using_copy_on_write): + df = DataFrame({"a": [1, 2]}) + arr = get_array(df, "a") + df.replace(to_replace=r"^a.$", value="new", inplace=True, regex=True) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr, get_array(df, "a")) + + df_orig = df.copy() + df2 = df.replace(to_replace=r"^x.$", value="new", regex=True) + tm.assert_frame_equal(df_orig, df) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_replace_mask_all_false_second_block(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3], "b": 100.5, "c": 1, "d": 2}) + df_orig = df.copy() + + df2 = df.replace(to_replace=1.5, value=55.5) + + if using_copy_on_write: + # TODO: Block splitting would allow us to avoid copying b + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + else: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + df2.loc[0, "c"] = 1 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + # TODO: This should split and not copy the whole block + # assert np.shares_memory(get_array(df, "d"), get_array(df2, "d")) + + +def test_replace_coerce_single_column(using_copy_on_write, using_array_manager): + df = DataFrame({"a": [1.5, 2, 3], "b": 100.5}) + df_orig = df.copy() + + df2 = df.replace(to_replace=1.5, value="a") + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + elif not using_array_manager: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + if using_copy_on_write: + df2.loc[0, "b"] = 0.5 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + +def test_replace_to_replace_wrong_dtype(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3], "b": 100.5}) + df_orig = df.copy() + + df2 = df.replace(to_replace="xxx", value=1.5) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + df2.loc[0, "b"] = 0.5 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + +def test_replace_list_categorical(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}, dtype="category") + arr = get_array(df, "a") + df.replace(["c"], value="a", inplace=True) + assert np.shares_memory(arr.codes, get_array(df, "a").codes) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + df_orig = df.copy() + df2 = df.replace(["b"], value="a") + assert not np.shares_memory(arr.codes, get_array(df2, "a").codes) + + tm.assert_frame_equal(df, df_orig) + + +def test_replace_list_inplace_refs_categorical(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}, dtype="category") + view = df[:] + df_orig = df.copy() + df.replace(["c"], value="a", inplace=True) + if using_copy_on_write: + assert not np.shares_memory( + get_array(view, "a").codes, get_array(df, "a").codes + ) + tm.assert_frame_equal(df_orig, view) + else: + # This could be inplace + assert not np.shares_memory( + get_array(view, "a").codes, get_array(df, "a").codes + ) + + +@pytest.mark.parametrize("to_replace", [1.5, [1.5], []]) +def test_replace_inplace(using_copy_on_write, to_replace): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + df.replace(to_replace=1.5, value=15.5, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("to_replace", [1.5, [1.5]]) +def test_replace_inplace_reference(using_copy_on_write, to_replace): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + view = df[:] + df.replace(to_replace=to_replace, value=15.5, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + + +@pytest.mark.parametrize("to_replace", ["a", 100.5]) +def test_replace_inplace_reference_no_op(using_copy_on_write, to_replace): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + view = df[:] + df.replace(to_replace=to_replace, value=15.5, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert not view._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("to_replace", [1, [1]]) +@pytest.mark.parametrize("val", [1, 1.5]) +def test_replace_categorical_inplace_reference(using_copy_on_write, val, to_replace): + df = DataFrame({"a": Categorical([1, 2, 3])}) + df_orig = df.copy() + arr_a = get_array(df, "a") + view = df[:] + df.replace(to_replace=to_replace, value=val, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a").codes, arr_a.codes) + + +@pytest.mark.parametrize("val", [1, 1.5]) +def test_replace_categorical_inplace(using_copy_on_write, val): + df = DataFrame({"a": Categorical([1, 2, 3])}) + arr_a = get_array(df, "a") + df.replace(to_replace=1, value=val, inplace=True) + + assert np.shares_memory(get_array(df, "a").codes, arr_a.codes) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + expected = DataFrame({"a": Categorical([val, 2, 3])}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("val", [1, 1.5]) +def test_replace_categorical(using_copy_on_write, val): + df = DataFrame({"a": Categorical([1, 2, 3])}) + df_orig = df.copy() + df2 = df.replace(to_replace=1, value=val) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert df2._mgr._has_no_reference(0) + assert not np.shares_memory(get_array(df, "a").codes, get_array(df2, "a").codes) + tm.assert_frame_equal(df, df_orig) + + arr_a = get_array(df2, "a").codes + df2.iloc[0, 0] = 2.0 + assert np.shares_memory(get_array(df2, "a").codes, arr_a) + + +@pytest.mark.parametrize("method", ["where", "mask"]) +def test_masking_inplace(using_copy_on_write, method): + df = DataFrame({"a": [1.5, 2, 3]}) + df_orig = df.copy() + arr_a = get_array(df, "a") + view = df[:] + + method = getattr(df, method) + method(df["a"] > 1.6, -1, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + + +def test_replace_empty_list(using_copy_on_write): + df = DataFrame({"a": [1, 2]}) + + df2 = df.replace([], []) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not df._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + arr_a = get_array(df, "a") + df.replace([], []) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), arr_a) + assert not df._mgr._has_no_reference(0) + assert not df2._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("value", ["d", None]) +def test_replace_object_list_inplace(using_copy_on_write, value): + df = DataFrame({"a": ["a", "b", "c"]}) + arr = get_array(df, "a") + df.replace(["c"], value, inplace=True) + if using_copy_on_write or value is None: + assert np.shares_memory(arr, get_array(df, "a")) + else: + # This could be inplace + assert not np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_replace_list_multiple_elements_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + arr = get_array(df, "a") + df.replace([1, 2], 4, inplace=True) + if using_copy_on_write: + assert np.shares_memory(arr, get_array(df, "a")) + assert df._mgr._has_no_reference(0) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_replace_list_none(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}) + + df_orig = df.copy() + df2 = df.replace(["b"], value=None) + tm.assert_frame_equal(df, df_orig) + + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + +def test_replace_list_none_inplace_refs(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}) + arr = get_array(df, "a") + df_orig = df.copy() + view = df[:] + df.replace(["a"], value=None, inplace=True) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert not np.shares_memory(arr, get_array(df, "a")) + tm.assert_frame_equal(df_orig, view) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_replace_columnwise_no_op_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + view = df[:] + df_orig = df.copy() + df.replace({"a": 10}, 100, inplace=True) + if using_copy_on_write: + assert np.shares_memory(get_array(view, "a"), get_array(df, "a")) + df.iloc[0, 0] = 100 + tm.assert_frame_equal(view, df_orig) + + +def test_replace_columnwise_no_op(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + df_orig = df.copy() + df2 = df.replace({"a": 10}, 100) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + df2.iloc[0, 0] = 100 + tm.assert_frame_equal(df, df_orig) + + +def test_replace_chained_assignment(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].replace(1, 100, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].replace(1, 100, inplace=True) + tm.assert_frame_equal(df, df_orig) + + +def test_replace_listlike(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + df_orig = df.copy() + + result = df.replace([200, 201], [11, 11]) + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = 100 + tm.assert_frame_equal(df, df) + + result = df.replace([200, 2], [10, 10]) + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_replace_listlike_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + arr = get_array(df, "a") + df.replace([200, 2], [10, 11], inplace=True) + assert np.shares_memory(get_array(df, "a"), arr) + + view = df[:] + df_orig = df.copy() + df.replace([200, 3], [10, 11], inplace=True) + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), arr) + tm.assert_frame_equal(df, view) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_setitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_setitem.py new file mode 100644 index 00000000..5016b57b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_setitem.py @@ -0,0 +1,142 @@ +import numpy as np + +from pandas import ( + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + +# ----------------------------------------------------------------------------- +# Copy/view behaviour for the values that are set in a DataFrame + + +def test_set_column_with_array(): + # Case: setting an array as a new column (df[col] = arr) copies that data + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + arr = np.array([1, 2, 3], dtype="int64") + + df["c"] = arr + + # the array data is copied + assert not np.shares_memory(get_array(df, "c"), arr) + # and thus modifying the array does not modify the DataFrame + arr[0] = 0 + tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c")) + + +def test_set_column_with_series(using_copy_on_write): + # Case: setting a series as a new column (df[col] = s) copies that data + # (with delayed copy with CoW) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + ser = Series([1, 2, 3]) + + df["c"] = ser + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(ser)) + else: + # the series data is copied + assert not np.shares_memory(get_array(df, "c"), get_array(ser)) + + # and modifying the series does not modify the DataFrame + ser.iloc[0] = 0 + assert ser.iloc[0] == 0 + tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c")) + + +def test_set_column_with_index(using_copy_on_write): + # Case: setting an index as a new column (df[col] = idx) copies that data + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + idx = Index([1, 2, 3]) + + df["c"] = idx + + # the index data is copied + assert not np.shares_memory(get_array(df, "c"), idx.values) + + idx = RangeIndex(1, 4) + arr = idx.values + + df["d"] = idx + + assert not np.shares_memory(get_array(df, "d"), arr) + + +def test_set_columns_with_dataframe(using_copy_on_write): + # Case: setting a DataFrame as new columns copies that data + # (with delayed copy with CoW) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}) + + df[["c", "d"]] = df2 + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + else: + # the data is copied + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + + # and modifying the set DataFrame does not modify the original DataFrame + df2.iloc[0, 0] = 0 + tm.assert_series_equal(df["c"], Series([7, 8, 9], name="c")) + + +def test_setitem_series_no_copy(using_copy_on_write): + # Case: setting a Series as column into a DataFrame can delay copying that data + df = DataFrame({"a": [1, 2, 3]}) + rhs = Series([4, 5, 6]) + rhs_orig = rhs.copy() + + # adding a new column + df["b"] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(rhs), get_array(df, "b")) + + df.iloc[0, 1] = 100 + tm.assert_series_equal(rhs, rhs_orig) + + +def test_setitem_series_no_copy_single_block(using_copy_on_write): + # Overwriting an existing column that is a single block + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + rhs = Series([4, 5, 6]) + rhs_orig = rhs.copy() + + df["a"] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(rhs), get_array(df, "a")) + + df.iloc[0, 0] = 100 + tm.assert_series_equal(rhs, rhs_orig) + + +def test_setitem_series_no_copy_split_block(using_copy_on_write): + # Overwriting an existing column that is part of a larger block + df = DataFrame({"a": [1, 2, 3], "b": 1}) + rhs = Series([4, 5, 6]) + rhs_orig = rhs.copy() + + df["b"] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(rhs), get_array(df, "b")) + + df.iloc[0, 1] = 100 + tm.assert_series_equal(rhs, rhs_orig) + + +def test_setitem_series_column_midx_broadcasting(using_copy_on_write): + # Setting a Series to multiple columns will repeat the data + # (currently copying the data eagerly) + df = DataFrame( + [[1, 2, 3], [3, 4, 5]], + columns=MultiIndex.from_arrays([["a", "a", "b"], [1, 2, 3]]), + ) + rhs = Series([10, 11]) + df["a"] = rhs + assert not np.shares_memory(get_array(rhs), df._get_column_array(0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_util.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_util.py new file mode 100644 index 00000000..ff55330d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/test_util.py @@ -0,0 +1,14 @@ +import numpy as np + +from pandas import DataFrame +from pandas.tests.copy_view.util import get_array + + +def test_get_array_numpy(): + df = DataFrame({"a": [1, 2, 3]}) + assert np.shares_memory(get_array(df, "a"), get_array(df, "a")) + + +def test_get_array_masked(): + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + assert np.shares_memory(get_array(df, "a"), get_array(df, "a")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/util.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/util.py new file mode 100644 index 00000000..96933442 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/copy_view/util.py @@ -0,0 +1,30 @@ +from pandas import ( + Categorical, + Index, + Series, +) +from pandas.core.arrays import BaseMaskedArray + + +def get_array(obj, col=None): + """ + Helper method to get array for a DataFrame column or a Series. + + Equivalent of df[col].values, but without going through normal getitem, + which triggers tracking references / CoW (and we might be testing that + this is done by some other operation). + """ + if isinstance(obj, Index): + arr = obj._values + elif isinstance(obj, Series) and (col is None or obj.name == col): + arr = obj._values + else: + assert col is not None + icol = obj.columns.get_loc(col) + assert isinstance(icol, int) + arr = obj._get_column_array(icol) + if isinstance(arr, BaseMaskedArray): + return arr._data + elif isinstance(arr, Categorical): + return arr + return getattr(arr, "_ndarray", arr) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_can_hold_element.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_can_hold_element.py new file mode 100644 index 00000000..3b7d76ea --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_can_hold_element.py @@ -0,0 +1,79 @@ +import numpy as np + +from pandas.core.dtypes.cast import can_hold_element + + +def test_can_hold_element_range(any_int_numpy_dtype): + # GH#44261 + dtype = np.dtype(any_int_numpy_dtype) + arr = np.array([], dtype=dtype) + + rng = range(2, 127) + assert can_hold_element(arr, rng) + + # negatives -> can't be held by uint dtypes + rng = range(-2, 127) + if dtype.kind == "i": + assert can_hold_element(arr, rng) + else: + assert not can_hold_element(arr, rng) + + rng = range(2, 255) + if dtype == "int8": + assert not can_hold_element(arr, rng) + else: + assert can_hold_element(arr, rng) + + rng = range(-255, 65537) + if dtype.kind == "u": + assert not can_hold_element(arr, rng) + elif dtype.itemsize < 4: + assert not can_hold_element(arr, rng) + else: + assert can_hold_element(arr, rng) + + # empty + rng = range(-(10**10), -(10**10)) + assert len(rng) == 0 + # assert can_hold_element(arr, rng) + + rng = range(10**10, 10**10) + assert len(rng) == 0 + assert can_hold_element(arr, rng) + + +def test_can_hold_element_int_values_float_ndarray(): + arr = np.array([], dtype=np.int64) + + element = np.array([1.0, 2.0]) + assert can_hold_element(arr, element) + + assert not can_hold_element(arr, element + 0.5) + + # integer but not losslessly castable to int64 + element = np.array([3, 2**65], dtype=np.float64) + assert not can_hold_element(arr, element) + + +def test_can_hold_element_int8_int(): + arr = np.array([], dtype=np.int8) + + element = 2 + assert can_hold_element(arr, element) + assert can_hold_element(arr, np.int8(element)) + assert can_hold_element(arr, np.uint8(element)) + assert can_hold_element(arr, np.int16(element)) + assert can_hold_element(arr, np.uint16(element)) + assert can_hold_element(arr, np.int32(element)) + assert can_hold_element(arr, np.uint32(element)) + assert can_hold_element(arr, np.int64(element)) + assert can_hold_element(arr, np.uint64(element)) + + element = 2**9 + assert not can_hold_element(arr, element) + assert not can_hold_element(arr, np.int16(element)) + assert not can_hold_element(arr, np.uint16(element)) + assert not can_hold_element(arr, np.int32(element)) + assert not can_hold_element(arr, np.uint32(element)) + assert not can_hold_element(arr, np.int64(element)) + assert not can_hold_element(arr, np.uint64(element)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_from_scalar.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_from_scalar.py new file mode 100644 index 00000000..0ce04ce2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_from_scalar.py @@ -0,0 +1,55 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas import ( + Categorical, + Timedelta, +) +import pandas._testing as tm + + +def test_cast_1d_array_like_from_scalar_categorical(): + # see gh-19565 + # + # Categorical result from scalar did not maintain + # categories and ordering of the passed dtype. + cats = ["a", "b", "c"] + cat_type = CategoricalDtype(categories=cats, ordered=False) + expected = Categorical(["a", "a"], categories=cats) + + result = construct_1d_arraylike_from_scalar("a", len(expected), cat_type) + tm.assert_categorical_equal(result, expected) + + +def test_cast_1d_array_like_from_timestamp(fixed_now_ts): + # check we dont lose nanoseconds + ts = fixed_now_ts + Timedelta(1) + res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]")) + assert res[0] == ts + + +def test_cast_1d_array_like_from_timedelta(): + # check we dont lose nanoseconds + td = Timedelta(1) + res = construct_1d_arraylike_from_scalar(td, 2, np.dtype("m8[ns]")) + assert res[0] == td + + +def test_cast_1d_array_like_mismatched_datetimelike(): + td = np.timedelta64("NaT", "ns") + dt = np.datetime64("NaT", "ns") + + with pytest.raises(TypeError, match="Cannot cast"): + construct_1d_arraylike_from_scalar(td, 2, dt.dtype) + + with pytest.raises(TypeError, match="Cannot cast"): + construct_1d_arraylike_from_scalar(np.timedelta64(4, "ns"), 2, dt.dtype) + + with pytest.raises(TypeError, match="Cannot cast"): + construct_1d_arraylike_from_scalar(dt, 2, td.dtype) + + with pytest.raises(TypeError, match="Cannot cast"): + construct_1d_arraylike_from_scalar(np.datetime64(4, "ns"), 2, td.dtype) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.py new file mode 100644 index 00000000..10085ddd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.py @@ -0,0 +1,30 @@ +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.construction import sanitize_array + + +@pytest.mark.parametrize( + "values, dtype, expected", + [ + ([1, 2, 3], None, np.array([1, 2, 3], dtype=np.int64)), + (np.array([1, 2, 3]), None, np.array([1, 2, 3])), + (["1", "2", None], None, np.array(["1", "2", None])), + (["1", "2", None], np.dtype("str"), np.array(["1", "2", None])), + ([1, 2, None], np.dtype("str"), np.array(["1", "2", None])), + ], +) +def test_construct_1d_ndarray_preserving_na(values, dtype, expected): + result = sanitize_array(values, index=None, dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]"]) +def test_construct_1d_ndarray_preserving_na_datetimelike(dtype): + arr = np.arange(5, dtype=np.int64).view(dtype) + expected = np.array(list(arr), dtype=object) + assert all(isinstance(x, type(arr[0])) for x in expected) + + result = sanitize_array(arr, index=None, dtype=np.dtype(object)) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_object_arr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_object_arr.py new file mode 100644 index 00000000..cb44f91f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_construct_object_arr.py @@ -0,0 +1,20 @@ +import pytest + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + + +@pytest.mark.parametrize("datum1", [1, 2.0, "3", (4, 5), [6, 7], None]) +@pytest.mark.parametrize("datum2", [8, 9.0, "10", (11, 12), [13, 14], None]) +def test_cast_1d_array(datum1, datum2): + data = [datum1, datum2] + result = construct_1d_object_array_from_listlike(data) + + # Direct comparison fails: https://github.com/numpy/numpy/issues/10218 + assert result.dtype == "object" + assert list(result) == data + + +@pytest.mark.parametrize("val", [1, 2.0, None]) +def test_cast_1d_array_invalid_scalar(val): + with pytest.raises(TypeError, match="has no len()"): + construct_1d_object_array_from_listlike(val) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_dict_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_dict_compat.py new file mode 100644 index 00000000..13dc82d7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_dict_compat.py @@ -0,0 +1,14 @@ +import numpy as np + +from pandas.core.dtypes.cast import dict_compat + +from pandas import Timestamp + + +def test_dict_compat(): + data_datetime64 = {np.datetime64("1990-03-15"): 1, np.datetime64("2015-03-15"): 2} + data_unchanged = {1: 2, 3: 4, 5: 6} + expected = {Timestamp("1990-3-15"): 1, Timestamp("2015-03-15"): 2} + assert dict_compat(data_datetime64) == expected + assert dict_compat(expected) == expected + assert dict_compat(data_unchanged) == data_unchanged diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_downcast.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_downcast.py new file mode 100644 index 00000000..c01eac74 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_downcast.py @@ -0,0 +1,97 @@ +import decimal + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import maybe_downcast_to_dtype + +from pandas import ( + Series, + Timedelta, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "arr,dtype,expected", + [ + ( + np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]), + "infer", + np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]), + ), + ( + np.array([8.0, 8.0, 8.0, 8.0, 8.9999999999995]), + "infer", + np.array([8, 8, 8, 8, 9], dtype=np.int64), + ), + ( + np.array([8.0, 8.0, 8.0, 8.0, 9.0000000000005]), + "infer", + np.array([8, 8, 8, 8, 9], dtype=np.int64), + ), + ( + # This is a judgement call, but we do _not_ downcast Decimal + # objects + np.array([decimal.Decimal(0.0)]), + "int64", + np.array([decimal.Decimal(0.0)]), + ), + ( + # GH#45837 + np.array([Timedelta(days=1), Timedelta(days=2)], dtype=object), + "infer", + np.array([1, 2], dtype="m8[D]").astype("m8[ns]"), + ), + # TODO: similar for dt64, dt64tz, Period, Interval? + ], +) +def test_downcast(arr, expected, dtype): + result = maybe_downcast_to_dtype(arr, dtype) + tm.assert_numpy_array_equal(result, expected) + + +def test_downcast_booleans(): + # see gh-16875: coercing of booleans. + ser = Series([True, True, False]) + result = maybe_downcast_to_dtype(ser, np.dtype(np.float64)) + + expected = ser + tm.assert_series_equal(result, expected) + + +def test_downcast_conversion_no_nan(any_real_numpy_dtype): + dtype = any_real_numpy_dtype + expected = np.array([1, 2]) + arr = np.array([1.0, 2.0], dtype=dtype) + + result = maybe_downcast_to_dtype(arr, "infer") + tm.assert_almost_equal(result, expected, check_dtype=False) + + +def test_downcast_conversion_nan(float_numpy_dtype): + dtype = float_numpy_dtype + data = [1.0, 2.0, np.nan] + + expected = np.array(data, dtype=dtype) + arr = np.array(data, dtype=dtype) + + result = maybe_downcast_to_dtype(arr, "infer") + tm.assert_almost_equal(result, expected) + + +def test_downcast_conversion_empty(any_real_numpy_dtype): + dtype = any_real_numpy_dtype + arr = np.array([], dtype=dtype) + result = maybe_downcast_to_dtype(arr, np.dtype("int64")) + tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64)) + + +@pytest.mark.parametrize("klass", [np.datetime64, np.timedelta64]) +def test_datetime_likes_nan(klass): + dtype = klass.__name__ + "[ns]" + arr = np.array([1, 2, np.nan]) + + exp = np.array([1, 2, klass("NaT")], dtype) + res = maybe_downcast_to_dtype(arr, dtype) + tm.assert_numpy_array_equal(res, exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_find_common_type.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_find_common_type.py new file mode 100644 index 00000000..8ce05337 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_find_common_type.py @@ -0,0 +1,175 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) + +from pandas import ( + Categorical, + Index, +) + + +@pytest.mark.parametrize( + "source_dtypes,expected_common_dtype", + [ + ((np.int64,), np.int64), + ((np.uint64,), np.uint64), + ((np.float32,), np.float32), + ((object,), object), + # Into ints. + ((np.int16, np.int64), np.int64), + ((np.int32, np.uint32), np.int64), + ((np.uint16, np.uint64), np.uint64), + # Into floats. + ((np.float16, np.float32), np.float32), + ((np.float16, np.int16), np.float32), + ((np.float32, np.int16), np.float32), + ((np.uint64, np.int64), np.float64), + ((np.int16, np.float64), np.float64), + ((np.float16, np.int64), np.float64), + # Into others. + ((np.complex128, np.int32), np.complex128), + ((object, np.float32), object), + ((object, np.int16), object), + # Bool with int. + ((np.dtype("bool"), np.int64), object), + ((np.dtype("bool"), np.int32), object), + ((np.dtype("bool"), np.int16), object), + ((np.dtype("bool"), np.int8), object), + ((np.dtype("bool"), np.uint64), object), + ((np.dtype("bool"), np.uint32), object), + ((np.dtype("bool"), np.uint16), object), + ((np.dtype("bool"), np.uint8), object), + # Bool with float. + ((np.dtype("bool"), np.float64), object), + ((np.dtype("bool"), np.float32), object), + ( + (np.dtype("datetime64[ns]"), np.dtype("datetime64[ns]")), + np.dtype("datetime64[ns]"), + ), + ( + (np.dtype("timedelta64[ns]"), np.dtype("timedelta64[ns]")), + np.dtype("timedelta64[ns]"), + ), + ( + (np.dtype("datetime64[ns]"), np.dtype("datetime64[ms]")), + np.dtype("datetime64[ns]"), + ), + ( + (np.dtype("timedelta64[ms]"), np.dtype("timedelta64[ns]")), + np.dtype("timedelta64[ns]"), + ), + ((np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")), object), + ((np.dtype("datetime64[ns]"), np.int64), object), + ], +) +def test_numpy_dtypes(source_dtypes, expected_common_dtype): + source_dtypes = [pandas_dtype(x) for x in source_dtypes] + assert find_common_type(source_dtypes) == expected_common_dtype + + +def test_raises_empty_input(): + with pytest.raises(ValueError, match="no types given"): + find_common_type([]) + + +@pytest.mark.parametrize( + "dtypes,exp_type", + [ + ([CategoricalDtype()], "category"), + ([object, CategoricalDtype()], object), + ([CategoricalDtype(), CategoricalDtype()], "category"), + ], +) +def test_categorical_dtype(dtypes, exp_type): + assert find_common_type(dtypes) == exp_type + + +def test_datetimetz_dtype_match(): + dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern") + assert find_common_type([dtype, dtype]) == "datetime64[ns, US/Eastern]" + + +@pytest.mark.parametrize( + "dtype2", + [ + DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"), + np.dtype("datetime64[ns]"), + object, + np.int64, + ], +) +def test_datetimetz_dtype_mismatch(dtype2): + dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern") + assert find_common_type([dtype, dtype2]) == object + assert find_common_type([dtype2, dtype]) == object + + +def test_period_dtype_match(): + dtype = PeriodDtype(freq="D") + assert find_common_type([dtype, dtype]) == "period[D]" + + +@pytest.mark.parametrize( + "dtype2", + [ + DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"), + PeriodDtype(freq="2D"), + PeriodDtype(freq="H"), + np.dtype("datetime64[ns]"), + object, + np.int64, + ], +) +def test_period_dtype_mismatch(dtype2): + dtype = PeriodDtype(freq="D") + assert find_common_type([dtype, dtype2]) == object + assert find_common_type([dtype2, dtype]) == object + + +interval_dtypes = [ + IntervalDtype(np.int64, "right"), + IntervalDtype(np.float64, "right"), + IntervalDtype(np.uint64, "right"), + IntervalDtype(DatetimeTZDtype(unit="ns", tz="US/Eastern"), "right"), + IntervalDtype("M8[ns]", "right"), + IntervalDtype("m8[ns]", "right"), +] + + +@pytest.mark.parametrize("left", interval_dtypes) +@pytest.mark.parametrize("right", interval_dtypes) +def test_interval_dtype(left, right): + result = find_common_type([left, right]) + + if left is right: + assert result is left + + elif left.subtype.kind in ["i", "u", "f"]: + # i.e. numeric + if right.subtype.kind in ["i", "u", "f"]: + # both numeric -> common numeric subtype + expected = IntervalDtype(np.float64, "right") + assert result == expected + else: + assert result == object + + else: + assert result == object + + +@pytest.mark.parametrize("dtype", interval_dtypes) +def test_interval_dtype_with_categorical(dtype): + obj = Index([], dtype=dtype) + + cat = Categorical([], categories=obj) + + result = find_common_type([dtype, cat.dtype]) + assert result == dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_datetimelike.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_datetimelike.py new file mode 100644 index 00000000..3c3844e6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_datetimelike.py @@ -0,0 +1,28 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + NaT, + Series, + Timestamp, +) + + +@pytest.mark.parametrize( + "data,exp_size", + [ + # see gh-16362. + ([[NaT, "a", "b", 0], [NaT, "b", "c", 1]], 8), + ([[NaT, "a", 0], [NaT, "b", 1]], 6), + ], +) +def test_maybe_infer_to_datetimelike_df_construct(data, exp_size): + result = DataFrame(np.array(data)) + assert result.size == exp_size + + +def test_maybe_infer_to_datetimelike_ser_construct(): + # see gh-19671. + result = Series(["M1701", Timestamp("20130101")]) + assert result.dtype.kind == "O" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_dtype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_dtype.py new file mode 100644 index 00000000..50eaa1f4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_infer_dtype.py @@ -0,0 +1,208 @@ +from datetime import ( + date, + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import ( + infer_dtype_from, + infer_dtype_from_array, + infer_dtype_from_scalar, +) +from pandas.core.dtypes.common import is_dtype_equal + +from pandas import ( + Categorical, + Interval, + Period, + Series, + Timedelta, + Timestamp, + date_range, +) + + +def test_infer_dtype_from_int_scalar(any_int_numpy_dtype): + # Test that infer_dtype_from_scalar is + # returning correct dtype for int and float. + data = np.dtype(any_int_numpy_dtype).type(12) + dtype, val = infer_dtype_from_scalar(data) + assert dtype == type(data) + + +def test_infer_dtype_from_float_scalar(float_numpy_dtype): + float_numpy_dtype = np.dtype(float_numpy_dtype).type + data = float_numpy_dtype(12) + + dtype, val = infer_dtype_from_scalar(data) + assert dtype == float_numpy_dtype + + +@pytest.mark.parametrize( + "data,exp_dtype", [(12, np.int64), (np.float64(12), np.float64)] +) +def test_infer_dtype_from_python_scalar(data, exp_dtype): + dtype, val = infer_dtype_from_scalar(data) + assert dtype == exp_dtype + + +@pytest.mark.parametrize("bool_val", [True, False]) +def test_infer_dtype_from_boolean(bool_val): + dtype, val = infer_dtype_from_scalar(bool_val) + assert dtype == np.bool_ + + +def test_infer_dtype_from_complex(complex_dtype): + data = np.dtype(complex_dtype).type(1) + dtype, val = infer_dtype_from_scalar(data) + assert dtype == np.complex128 + + +def test_infer_dtype_from_datetime(): + dt64 = np.datetime64(1, "ns") + dtype, val = infer_dtype_from_scalar(dt64) + assert dtype == "M8[ns]" + + ts = Timestamp(1) + dtype, val = infer_dtype_from_scalar(ts) + assert dtype == "M8[ns]" + + dt = datetime(2000, 1, 1, 0, 0) + dtype, val = infer_dtype_from_scalar(dt) + assert dtype == "M8[us]" + + +def test_infer_dtype_from_timedelta(): + td64 = np.timedelta64(1, "ns") + dtype, val = infer_dtype_from_scalar(td64) + assert dtype == "m8[ns]" + + pytd = timedelta(1) + dtype, val = infer_dtype_from_scalar(pytd) + assert dtype == "m8[us]" + + td = Timedelta(1) + dtype, val = infer_dtype_from_scalar(td) + assert dtype == "m8[ns]" + + +@pytest.mark.parametrize("freq", ["M", "D"]) +def test_infer_dtype_from_period(freq): + p = Period("2011-01-01", freq=freq) + dtype, val = infer_dtype_from_scalar(p) + + exp_dtype = f"period[{freq}]" + + assert dtype == exp_dtype + assert val == p + + +def test_infer_dtype_misc(): + dt = date(2000, 1, 1) + dtype, val = infer_dtype_from_scalar(dt) + assert dtype == np.object_ + + ts = Timestamp(1, tz="US/Eastern") + dtype, val = infer_dtype_from_scalar(ts) + assert dtype == "datetime64[ns, US/Eastern]" + + +@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo"]) +def test_infer_from_scalar_tz(tz): + dt = Timestamp(1, tz=tz) + dtype, val = infer_dtype_from_scalar(dt) + + exp_dtype = f"datetime64[ns, {tz}]" + + assert dtype == exp_dtype + assert val == dt + + +@pytest.mark.parametrize( + "left, right, subtype", + [ + (0, 1, "int64"), + (0.0, 1.0, "float64"), + (Timestamp(0), Timestamp(1), "datetime64[ns]"), + (Timestamp(0, tz="UTC"), Timestamp(1, tz="UTC"), "datetime64[ns, UTC]"), + (Timedelta(0), Timedelta(1), "timedelta64[ns]"), + ], +) +def test_infer_from_interval(left, right, subtype, closed): + # GH 30337 + interval = Interval(left, right, closed) + result_dtype, result_value = infer_dtype_from_scalar(interval) + expected_dtype = f"interval[{subtype}, {closed}]" + assert result_dtype == expected_dtype + assert result_value == interval + + +def test_infer_dtype_from_scalar_errors(): + msg = "invalid ndarray passed to infer_dtype_from_scalar" + + with pytest.raises(ValueError, match=msg): + infer_dtype_from_scalar(np.array([1])) + + +@pytest.mark.parametrize( + "value, expected", + [ + ("foo", np.object_), + (b"foo", np.object_), + (1, np.int64), + (1.5, np.float64), + (np.datetime64("2016-01-01"), np.dtype("M8[s]")), + (Timestamp("20160101"), np.dtype("M8[s]")), + (Timestamp("20160101", tz="UTC"), "datetime64[s, UTC]"), + ], +) +def test_infer_dtype_from_scalar(value, expected): + dtype, _ = infer_dtype_from_scalar(value) + assert is_dtype_equal(dtype, expected) + + with pytest.raises(TypeError, match="must be list-like"): + infer_dtype_from_array(value) + + +@pytest.mark.parametrize( + "arr, expected", + [ + ([1], np.dtype(int)), + (np.array([1], dtype=np.int64), np.int64), + ([np.nan, 1, ""], np.object_), + (np.array([[1.0, 2.0]]), np.float64), + (Categorical(list("aabc")), "category"), + (Categorical([1, 2, 3]), "category"), + (date_range("20160101", periods=3), np.dtype("=M8[ns]")), + ( + date_range("20160101", periods=3, tz="US/Eastern"), + "datetime64[ns, US/Eastern]", + ), + (Series([1.0, 2, 3]), np.float64), + (Series(list("abc")), np.object_), + ( + Series(date_range("20160101", periods=3, tz="US/Eastern")), + "datetime64[ns, US/Eastern]", + ), + ], +) +def test_infer_dtype_from_array(arr, expected): + dtype, _ = infer_dtype_from_array(arr) + assert is_dtype_equal(dtype, expected) + + +@pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) +def test_infer_dtype_from_scalar_zerodim_datetimelike(cls): + # ndarray.item() can incorrectly return int instead of td64/dt64 + val = cls(1234, "ns") + arr = np.array(val) + + dtype, res = infer_dtype_from_scalar(arr) + assert dtype.type is cls + assert isinstance(res, cls) + + dtype, res = infer_dtype_from(arr) + assert dtype.type is cls diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_maybe_box_native.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_maybe_box_native.py new file mode 100644 index 00000000..3f62f31d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_maybe_box_native.py @@ -0,0 +1,40 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import maybe_box_native + +from pandas import ( + Interval, + Period, + Timedelta, + Timestamp, +) + + +@pytest.mark.parametrize( + "obj,expected_dtype", + [ + (b"\x00\x10", bytes), + (int(4), int), + (np.uint(4), int), + (np.int32(-4), int), + (np.uint8(4), int), + (float(454.98), float), + (np.float16(0.4), float), + (np.float64(1.4), float), + (np.bool_(False), bool), + (datetime(2005, 2, 25), datetime), + (np.datetime64("2005-02-25"), Timestamp), + (Timestamp("2005-02-25"), Timestamp), + (np.timedelta64(1, "D"), Timedelta), + (Timedelta(1, "D"), Timedelta), + (Interval(0, 1), Interval), + (Period("4Q2005"), Period), + ], +) +def test_maybe_box_native(obj, expected_dtype): + boxed_obj = maybe_box_native(obj) + result_dtype = type(boxed_obj) + assert result_dtype is expected_dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_promote.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_promote.py new file mode 100644 index 00000000..1becf3b9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/cast/test_promote.py @@ -0,0 +1,530 @@ +""" +These test the method maybe_promote from core/dtypes/cast.py +""" + +import datetime +from decimal import Decimal + +import numpy as np +import pytest + +from pandas._libs.tslibs import NaT + +from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.common import is_scalar +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import isna + +import pandas as pd + + +def _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar=None): + """ + Auxiliary function to unify testing of scalar/array promotion. + + Parameters + ---------- + dtype : dtype + The value to pass on as the first argument to maybe_promote. + fill_value : scalar + The value to pass on as the second argument to maybe_promote as + a scalar. + expected_dtype : dtype + The expected dtype returned by maybe_promote (by design this is the + same regardless of whether fill_value was passed as a scalar or in an + array!). + exp_val_for_scalar : scalar + The expected value for the (potentially upcast) fill_value returned by + maybe_promote. + """ + assert is_scalar(fill_value) + + # here, we pass on fill_value as a scalar directly; the expected value + # returned from maybe_promote is fill_value, potentially upcast to the + # returned dtype. + result_dtype, result_fill_value = maybe_promote(dtype, fill_value) + expected_fill_value = exp_val_for_scalar + + assert result_dtype == expected_dtype + _assert_match(result_fill_value, expected_fill_value) + + +def _assert_match(result_fill_value, expected_fill_value): + # GH#23982/25425 require the same type in addition to equality/NA-ness + res_type = type(result_fill_value) + ex_type = type(expected_fill_value) + + if hasattr(result_fill_value, "dtype"): + # Compare types in a way that is robust to platform-specific + # idiosyncrasies where e.g. sometimes we get "ulonglong" as an alias + # for "uint64" or "intc" as an alias for "int32" + assert result_fill_value.dtype.kind == expected_fill_value.dtype.kind + assert result_fill_value.dtype.itemsize == expected_fill_value.dtype.itemsize + else: + # On some builds, type comparison fails, e.g. np.int32 != np.int32 + assert res_type == ex_type or res_type.__name__ == ex_type.__name__ + + match_value = result_fill_value == expected_fill_value + if match_value is pd.NA: + match_value = False + + # Note: type check above ensures that we have the _same_ NA value + # for missing values, None == None (which is checked + # through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT + match_missing = isna(result_fill_value) and isna(expected_fill_value) + + assert match_value or match_missing + + +@pytest.mark.parametrize( + "dtype, fill_value, expected_dtype", + [ + # size 8 + ("int8", 1, "int8"), + ("int8", np.iinfo("int8").max + 1, "int16"), + ("int8", np.iinfo("int16").max + 1, "int32"), + ("int8", np.iinfo("int32").max + 1, "int64"), + ("int8", np.iinfo("int64").max + 1, "object"), + ("int8", -1, "int8"), + ("int8", np.iinfo("int8").min - 1, "int16"), + ("int8", np.iinfo("int16").min - 1, "int32"), + ("int8", np.iinfo("int32").min - 1, "int64"), + ("int8", np.iinfo("int64").min - 1, "object"), + # keep signed-ness as long as possible + ("uint8", 1, "uint8"), + ("uint8", np.iinfo("int8").max + 1, "uint8"), + ("uint8", np.iinfo("uint8").max + 1, "uint16"), + ("uint8", np.iinfo("int16").max + 1, "uint16"), + ("uint8", np.iinfo("uint16").max + 1, "uint32"), + ("uint8", np.iinfo("int32").max + 1, "uint32"), + ("uint8", np.iinfo("uint32").max + 1, "uint64"), + ("uint8", np.iinfo("int64").max + 1, "uint64"), + ("uint8", np.iinfo("uint64").max + 1, "object"), + # max of uint8 cannot be contained in int8 + ("uint8", -1, "int16"), + ("uint8", np.iinfo("int8").min - 1, "int16"), + ("uint8", np.iinfo("int16").min - 1, "int32"), + ("uint8", np.iinfo("int32").min - 1, "int64"), + ("uint8", np.iinfo("int64").min - 1, "object"), + # size 16 + ("int16", 1, "int16"), + ("int16", np.iinfo("int8").max + 1, "int16"), + ("int16", np.iinfo("int16").max + 1, "int32"), + ("int16", np.iinfo("int32").max + 1, "int64"), + ("int16", np.iinfo("int64").max + 1, "object"), + ("int16", -1, "int16"), + ("int16", np.iinfo("int8").min - 1, "int16"), + ("int16", np.iinfo("int16").min - 1, "int32"), + ("int16", np.iinfo("int32").min - 1, "int64"), + ("int16", np.iinfo("int64").min - 1, "object"), + ("uint16", 1, "uint16"), + ("uint16", np.iinfo("int8").max + 1, "uint16"), + ("uint16", np.iinfo("uint8").max + 1, "uint16"), + ("uint16", np.iinfo("int16").max + 1, "uint16"), + ("uint16", np.iinfo("uint16").max + 1, "uint32"), + ("uint16", np.iinfo("int32").max + 1, "uint32"), + ("uint16", np.iinfo("uint32").max + 1, "uint64"), + ("uint16", np.iinfo("int64").max + 1, "uint64"), + ("uint16", np.iinfo("uint64").max + 1, "object"), + ("uint16", -1, "int32"), + ("uint16", np.iinfo("int8").min - 1, "int32"), + ("uint16", np.iinfo("int16").min - 1, "int32"), + ("uint16", np.iinfo("int32").min - 1, "int64"), + ("uint16", np.iinfo("int64").min - 1, "object"), + # size 32 + ("int32", 1, "int32"), + ("int32", np.iinfo("int8").max + 1, "int32"), + ("int32", np.iinfo("int16").max + 1, "int32"), + ("int32", np.iinfo("int32").max + 1, "int64"), + ("int32", np.iinfo("int64").max + 1, "object"), + ("int32", -1, "int32"), + ("int32", np.iinfo("int8").min - 1, "int32"), + ("int32", np.iinfo("int16").min - 1, "int32"), + ("int32", np.iinfo("int32").min - 1, "int64"), + ("int32", np.iinfo("int64").min - 1, "object"), + ("uint32", 1, "uint32"), + ("uint32", np.iinfo("int8").max + 1, "uint32"), + ("uint32", np.iinfo("uint8").max + 1, "uint32"), + ("uint32", np.iinfo("int16").max + 1, "uint32"), + ("uint32", np.iinfo("uint16").max + 1, "uint32"), + ("uint32", np.iinfo("int32").max + 1, "uint32"), + ("uint32", np.iinfo("uint32").max + 1, "uint64"), + ("uint32", np.iinfo("int64").max + 1, "uint64"), + ("uint32", np.iinfo("uint64").max + 1, "object"), + ("uint32", -1, "int64"), + ("uint32", np.iinfo("int8").min - 1, "int64"), + ("uint32", np.iinfo("int16").min - 1, "int64"), + ("uint32", np.iinfo("int32").min - 1, "int64"), + ("uint32", np.iinfo("int64").min - 1, "object"), + # size 64 + ("int64", 1, "int64"), + ("int64", np.iinfo("int8").max + 1, "int64"), + ("int64", np.iinfo("int16").max + 1, "int64"), + ("int64", np.iinfo("int32").max + 1, "int64"), + ("int64", np.iinfo("int64").max + 1, "object"), + ("int64", -1, "int64"), + ("int64", np.iinfo("int8").min - 1, "int64"), + ("int64", np.iinfo("int16").min - 1, "int64"), + ("int64", np.iinfo("int32").min - 1, "int64"), + ("int64", np.iinfo("int64").min - 1, "object"), + ("uint64", 1, "uint64"), + ("uint64", np.iinfo("int8").max + 1, "uint64"), + ("uint64", np.iinfo("uint8").max + 1, "uint64"), + ("uint64", np.iinfo("int16").max + 1, "uint64"), + ("uint64", np.iinfo("uint16").max + 1, "uint64"), + ("uint64", np.iinfo("int32").max + 1, "uint64"), + ("uint64", np.iinfo("uint32").max + 1, "uint64"), + ("uint64", np.iinfo("int64").max + 1, "uint64"), + ("uint64", np.iinfo("uint64").max + 1, "object"), + ("uint64", -1, "object"), + ("uint64", np.iinfo("int8").min - 1, "object"), + ("uint64", np.iinfo("int16").min - 1, "object"), + ("uint64", np.iinfo("int32").min - 1, "object"), + ("uint64", np.iinfo("int64").min - 1, "object"), + ], +) +def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype): + dtype = np.dtype(dtype) + expected_dtype = np.dtype(expected_dtype) + + # output is not a generic int, but corresponds to expected_dtype + exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0] + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_int_with_float(any_int_numpy_dtype, float_numpy_dtype): + dtype = np.dtype(any_int_numpy_dtype) + fill_dtype = np.dtype(float_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # filling int with float always upcasts to float64 + expected_dtype = np.float64 + # fill_value can be different float type + exp_val_for_scalar = np.float64(fill_value) + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_float_with_int(float_numpy_dtype, any_int_numpy_dtype): + dtype = np.dtype(float_numpy_dtype) + fill_dtype = np.dtype(any_int_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # filling float with int always keeps float dtype + # because: np.finfo('float32').max > np.iinfo('uint64').max + expected_dtype = dtype + # output is not a generic float, but corresponds to expected_dtype + exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0] + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +@pytest.mark.parametrize( + "dtype, fill_value, expected_dtype", + [ + # float filled with float + ("float32", 1, "float32"), + ("float32", np.finfo("float32").max * 1.1, "float64"), + ("float64", 1, "float64"), + ("float64", np.finfo("float32").max * 1.1, "float64"), + # complex filled with float + ("complex64", 1, "complex64"), + ("complex64", np.finfo("float32").max * 1.1, "complex128"), + ("complex128", 1, "complex128"), + ("complex128", np.finfo("float32").max * 1.1, "complex128"), + # float filled with complex + ("float32", 1 + 1j, "complex64"), + ("float32", np.finfo("float32").max * (1.1 + 1j), "complex128"), + ("float64", 1 + 1j, "complex128"), + ("float64", np.finfo("float32").max * (1.1 + 1j), "complex128"), + # complex filled with complex + ("complex64", 1 + 1j, "complex64"), + ("complex64", np.finfo("float32").max * (1.1 + 1j), "complex128"), + ("complex128", 1 + 1j, "complex128"), + ("complex128", np.finfo("float32").max * (1.1 + 1j), "complex128"), + ], +) +def test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype): + dtype = np.dtype(dtype) + expected_dtype = np.dtype(expected_dtype) + + # output is not a generic float, but corresponds to expected_dtype + exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0] + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_bool_with_any(any_numpy_dtype): + dtype = np.dtype(bool) + fill_dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # filling bool with anything but bool casts to object + expected_dtype = np.dtype(object) if fill_dtype != bool else fill_dtype + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_any_with_bool(any_numpy_dtype): + dtype = np.dtype(any_numpy_dtype) + fill_value = True + + # filling anything but bool with bool casts to object + expected_dtype = np.dtype(object) if dtype != bool else dtype + # output is not a generic bool, but corresponds to expected_dtype + exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0] + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype): + dtype = np.dtype(bytes_dtype) + fill_dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # we never use bytes dtype internally, always promote to object + expected_dtype = np.dtype(np.object_) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_any_with_bytes(any_numpy_dtype): + dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype + fill_value = b"abc" + + # we never use bytes dtype internally, always promote to object + expected_dtype = np.dtype(np.object_) + # output is not a generic bytes, but corresponds to expected_dtype + exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0] + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_datetime64_with_any(datetime64_dtype, any_numpy_dtype): + dtype = np.dtype(datetime64_dtype) + fill_dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # filling datetime with anything but datetime casts to object + if fill_dtype.kind == "M": + expected_dtype = dtype + # for datetime dtypes, scalar values get cast to to_datetime64 + exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64() + else: + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +@pytest.mark.parametrize( + "fill_value", + [ + pd.Timestamp("now"), + np.datetime64("now"), + datetime.datetime.now(), + datetime.date.today(), + ], + ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"], +) +def test_maybe_promote_any_with_datetime64(any_numpy_dtype, fill_value): + dtype = np.dtype(any_numpy_dtype) + + # filling datetime with anything but datetime casts to object + if dtype.kind == "M": + expected_dtype = dtype + # for datetime dtypes, scalar values get cast to pd.Timestamp.value + exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64() + else: + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + if type(fill_value) is datetime.date and dtype.kind == "M": + # Casting date to dt64 is deprecated, in 2.0 enforced to cast to object + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +@pytest.mark.parametrize( + "fill_value", + [ + pd.Timestamp(2023, 1, 1), + np.datetime64("2023-01-01"), + datetime.datetime(2023, 1, 1), + datetime.date(2023, 1, 1), + ], + ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"], +) +def test_maybe_promote_any_numpy_dtype_with_datetimetz( + any_numpy_dtype, tz_aware_fixture, fill_value +): + dtype = np.dtype(any_numpy_dtype) + fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture) + + fill_value = pd.Series([fill_value], dtype=fill_dtype)[0] + + # filling any numpy dtype with datetimetz casts to object + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_timedelta64_with_any(timedelta64_dtype, any_numpy_dtype): + dtype = np.dtype(timedelta64_dtype) + fill_dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # filling timedelta with anything but timedelta casts to object + if fill_dtype.kind == "m": + expected_dtype = dtype + # for timedelta dtypes, scalar values get cast to pd.Timedelta.value + exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64() + else: + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +@pytest.mark.parametrize( + "fill_value", + [pd.Timedelta(days=1), np.timedelta64(24, "h"), datetime.timedelta(1)], + ids=["pd.Timedelta", "np.timedelta64", "datetime.timedelta"], +) +def test_maybe_promote_any_with_timedelta64(any_numpy_dtype, fill_value): + dtype = np.dtype(any_numpy_dtype) + + # filling anything but timedelta with timedelta casts to object + if dtype.kind == "m": + expected_dtype = dtype + # for timedelta dtypes, scalar values get cast to pd.Timedelta.value + exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64() + else: + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype): + dtype = np.dtype(string_dtype) + fill_dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # filling string with anything casts to object + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_any_with_string(any_numpy_dtype): + dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype + fill_value = "abc" + + # filling anything with a string casts to object + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_object_with_any(object_dtype, any_numpy_dtype): + dtype = np.dtype(object_dtype) + fill_dtype = np.dtype(any_numpy_dtype) + + # create array of given dtype; casts "1" to correct dtype + fill_value = np.array([1], dtype=fill_dtype)[0] + + # filling object with anything stays object + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_any_with_object(any_numpy_dtype): + dtype = np.dtype(any_numpy_dtype) + + # create array of object dtype from a scalar value (i.e. passing + # dtypes.common.is_scalar), which can however not be cast to int/float etc. + fill_value = pd.DateOffset(1) + + # filling object with anything stays object + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) + + +def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype, nulls_fixture): + fill_value = nulls_fixture + dtype = np.dtype(any_numpy_dtype) + + if isinstance(fill_value, Decimal): + # Subject to change, but ATM (When Decimal(NAN) is being added to nulls_fixture) + # this is the existing behavior in maybe_promote, + # hinges on is_valid_na_for_dtype + if dtype.kind in "iufc": + if dtype.kind in "iu": + expected_dtype = np.dtype(np.float64) + else: + expected_dtype = dtype + exp_val_for_scalar = np.nan + else: + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + elif dtype.kind in "iu" and fill_value is not NaT: + # integer + other missing value (np.nan / None) casts to float + expected_dtype = np.float64 + exp_val_for_scalar = np.nan + elif dtype == object and fill_value is NaT: + # inserting into object does not cast the value + # but *does* cast None to np.nan + expected_dtype = np.dtype(object) + exp_val_for_scalar = fill_value + elif dtype.kind in "mM": + # datetime / timedelta cast all missing values to dtyped-NaT + expected_dtype = dtype + exp_val_for_scalar = dtype.type("NaT", "ns") + elif fill_value is NaT: + # NaT upcasts everything that's not datetime/timedelta to object + expected_dtype = np.dtype(object) + exp_val_for_scalar = NaT + elif dtype.kind in "fc": + # float / complex + missing value (!= NaT) stays the same + expected_dtype = dtype + exp_val_for_scalar = np.nan + else: + # all other cases cast to object, and use np.nan as missing value + expected_dtype = np.dtype(object) + if fill_value is pd.NA: + exp_val_for_scalar = pd.NA + else: + exp_val_for_scalar = np.nan + + _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_common.py new file mode 100644 index 00000000..9419cb69 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_common.py @@ -0,0 +1,792 @@ +from __future__ import annotations + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.astype import astype_array +import pandas.core.dtypes.common as com +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + CategoricalDtypeType, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import isna + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import pandas_dtype +from pandas.arrays import SparseArray + + +# EA & Actual Dtypes +def to_ea_dtypes(dtypes): + """convert list of string dtypes to EA dtype""" + return [getattr(pd, dt + "Dtype") for dt in dtypes] + + +def to_numpy_dtypes(dtypes): + """convert list of string dtypes to numpy dtype""" + return [getattr(np, dt) for dt in dtypes if isinstance(dt, str)] + + +class TestNumpyEADtype: + # Passing invalid dtype, both as a string or object, must raise TypeError + # Per issue GH15520 + @pytest.mark.parametrize("box", [pd.Timestamp, "pd.Timestamp", list]) + def test_invalid_dtype_error(self, box): + with pytest.raises(TypeError, match="not understood"): + com.pandas_dtype(box) + + @pytest.mark.parametrize( + "dtype", + [ + object, + "float64", + np.object_, + np.dtype("object"), + "O", + np.float64, + float, + np.dtype("float64"), + "object_", + ], + ) + def test_pandas_dtype_valid(self, dtype): + assert com.pandas_dtype(dtype) == dtype + + @pytest.mark.parametrize( + "dtype", ["M8[ns]", "m8[ns]", "object", "float64", "int64"] + ) + def test_numpy_dtype(self, dtype): + assert com.pandas_dtype(dtype) == np.dtype(dtype) + + def test_numpy_string_dtype(self): + # do not parse freq-like string as period dtype + assert com.pandas_dtype("U") == np.dtype("U") + assert com.pandas_dtype("S") == np.dtype("S") + + @pytest.mark.parametrize( + "dtype", + [ + "datetime64[ns, US/Eastern]", + "datetime64[ns, Asia/Tokyo]", + "datetime64[ns, UTC]", + # GH#33885 check that the M8 alias is understood + "M8[ns, US/Eastern]", + "M8[ns, Asia/Tokyo]", + "M8[ns, UTC]", + ], + ) + def test_datetimetz_dtype(self, dtype): + assert com.pandas_dtype(dtype) == DatetimeTZDtype.construct_from_string(dtype) + assert com.pandas_dtype(dtype) == dtype + + def test_categorical_dtype(self): + assert com.pandas_dtype("category") == CategoricalDtype() + + @pytest.mark.parametrize( + "dtype", + [ + "period[D]", + "period[3M]", + "period[U]", + "Period[D]", + "Period[3M]", + "Period[U]", + ], + ) + def test_period_dtype(self, dtype): + assert com.pandas_dtype(dtype) is not PeriodDtype(dtype) + assert com.pandas_dtype(dtype) == PeriodDtype(dtype) + assert com.pandas_dtype(dtype) == dtype + + +dtypes = { + "datetime_tz": com.pandas_dtype("datetime64[ns, US/Eastern]"), + "datetime": com.pandas_dtype("datetime64[ns]"), + "timedelta": com.pandas_dtype("timedelta64[ns]"), + "period": PeriodDtype("D"), + "integer": np.dtype(np.int64), + "float": np.dtype(np.float64), + "object": np.dtype(object), + "category": com.pandas_dtype("category"), + "string": pd.StringDtype(), +} + + +@pytest.mark.parametrize("name1,dtype1", list(dtypes.items()), ids=lambda x: str(x)) +@pytest.mark.parametrize("name2,dtype2", list(dtypes.items()), ids=lambda x: str(x)) +def test_dtype_equal(name1, dtype1, name2, dtype2): + # match equal to self, but not equal to other + assert com.is_dtype_equal(dtype1, dtype1) + if name1 != name2: + assert not com.is_dtype_equal(dtype1, dtype2) + + +@pytest.mark.parametrize("name,dtype", list(dtypes.items()), ids=lambda x: str(x)) +def test_pyarrow_string_import_error(name, dtype): + # GH-44276 + assert not com.is_dtype_equal(dtype, "string[pyarrow]") + + +@pytest.mark.parametrize( + "dtype1,dtype2", + [ + (np.int8, np.int64), + (np.int16, np.int64), + (np.int32, np.int64), + (np.float32, np.float64), + (PeriodDtype("D"), PeriodDtype("2D")), # PeriodType + ( + com.pandas_dtype("datetime64[ns, US/Eastern]"), + com.pandas_dtype("datetime64[ns, CET]"), + ), # Datetime + (None, None), # gh-15941: no exception should be raised. + ], +) +def test_dtype_equal_strict(dtype1, dtype2): + assert not com.is_dtype_equal(dtype1, dtype2) + + +def get_is_dtype_funcs(): + """ + Get all functions in pandas.core.dtypes.common that + begin with 'is_' and end with 'dtype' + + """ + fnames = [f for f in dir(com) if (f.startswith("is_") and f.endswith("dtype"))] + fnames.remove("is_string_or_object_np_dtype") # fastpath requires np.dtype obj + return [getattr(com, fname) for fname in fnames] + + +@pytest.mark.filterwarnings( + "ignore:is_categorical_dtype is deprecated:DeprecationWarning" +) +@pytest.mark.parametrize("func", get_is_dtype_funcs(), ids=lambda x: x.__name__) +def test_get_dtype_error_catch(func): + # see gh-15941 + # + # No exception should be raised. + + msg = f"{func.__name__} is deprecated" + warn = None + if ( + func is com.is_int64_dtype + or func is com.is_interval_dtype + or func is com.is_datetime64tz_dtype + or func is com.is_categorical_dtype + or func is com.is_period_dtype + ): + warn = DeprecationWarning + + with tm.assert_produces_warning(warn, match=msg): + assert not func(None) + + +def test_is_object(): + assert com.is_object_dtype(object) + assert com.is_object_dtype(np.array([], dtype=object)) + + assert not com.is_object_dtype(int) + assert not com.is_object_dtype(np.array([], dtype=int)) + assert not com.is_object_dtype([1, 2, 3]) + + +@pytest.mark.parametrize( + "check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)] +) +def test_is_sparse(check_scipy): + msg = "is_sparse is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert com.is_sparse(SparseArray([1, 2, 3])) + + assert not com.is_sparse(np.array([1, 2, 3])) + + if check_scipy: + import scipy.sparse + + assert not com.is_sparse(scipy.sparse.bsr_matrix([1, 2, 3])) + + +def test_is_scipy_sparse(): + sp_sparse = pytest.importorskip("scipy.sparse") + + assert com.is_scipy_sparse(sp_sparse.bsr_matrix([1, 2, 3])) + + assert not com.is_scipy_sparse(SparseArray([1, 2, 3])) + + +def test_is_datetime64_dtype(): + assert not com.is_datetime64_dtype(object) + assert not com.is_datetime64_dtype([1, 2, 3]) + assert not com.is_datetime64_dtype(np.array([], dtype=int)) + + assert com.is_datetime64_dtype(np.datetime64) + assert com.is_datetime64_dtype(np.array([], dtype=np.datetime64)) + + +def test_is_datetime64tz_dtype(): + msg = "is_datetime64tz_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert not com.is_datetime64tz_dtype(object) + assert not com.is_datetime64tz_dtype([1, 2, 3]) + assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) + assert com.is_datetime64tz_dtype(pd.DatetimeIndex(["2000"], tz="US/Eastern")) + + +def test_custom_ea_kind_M_not_datetime64tz(): + # GH 34986 + class NotTZDtype(ExtensionDtype): + @property + def kind(self) -> str: + return "M" + + not_tz_dtype = NotTZDtype() + msg = "is_datetime64tz_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert not com.is_datetime64tz_dtype(not_tz_dtype) + assert not com.needs_i8_conversion(not_tz_dtype) + + +def test_is_timedelta64_dtype(): + assert not com.is_timedelta64_dtype(object) + assert not com.is_timedelta64_dtype(None) + assert not com.is_timedelta64_dtype([1, 2, 3]) + assert not com.is_timedelta64_dtype(np.array([], dtype=np.datetime64)) + assert not com.is_timedelta64_dtype("0 days") + assert not com.is_timedelta64_dtype("0 days 00:00:00") + assert not com.is_timedelta64_dtype(["0 days 00:00:00"]) + assert not com.is_timedelta64_dtype("NO DATE") + + assert com.is_timedelta64_dtype(np.timedelta64) + assert com.is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]")) + assert com.is_timedelta64_dtype(pd.to_timedelta(["0 days", "1 days"])) + + +def test_is_period_dtype(): + msg = "is_period_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert not com.is_period_dtype(object) + assert not com.is_period_dtype([1, 2, 3]) + assert not com.is_period_dtype(pd.Period("2017-01-01")) + + assert com.is_period_dtype(PeriodDtype(freq="D")) + assert com.is_period_dtype(pd.PeriodIndex([], freq="A")) + + +def test_is_interval_dtype(): + msg = "is_interval_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert not com.is_interval_dtype(object) + assert not com.is_interval_dtype([1, 2, 3]) + + assert com.is_interval_dtype(IntervalDtype()) + + interval = pd.Interval(1, 2, closed="right") + assert not com.is_interval_dtype(interval) + assert com.is_interval_dtype(pd.IntervalIndex([interval])) + + +def test_is_categorical_dtype(): + msg = "is_categorical_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert not com.is_categorical_dtype(object) + assert not com.is_categorical_dtype([1, 2, 3]) + + assert com.is_categorical_dtype(CategoricalDtype()) + assert com.is_categorical_dtype(pd.Categorical([1, 2, 3])) + assert com.is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) + + +def test_is_string_dtype(): + assert not com.is_string_dtype(int) + assert not com.is_string_dtype(pd.Series([1, 2])) + + assert com.is_string_dtype(str) + assert com.is_string_dtype(object) + assert com.is_string_dtype(np.array(["a", "b"])) + assert com.is_string_dtype(pd.StringDtype()) + + +@pytest.mark.parametrize( + "data", + [[(0, 1), (1, 1)], pd.Categorical([1, 2, 3]), np.array([1, 2], dtype=object)], +) +def test_is_string_dtype_arraylike_with_object_elements_not_strings(data): + # GH 15585 + assert not com.is_string_dtype(pd.Series(data)) + + +def test_is_string_dtype_nullable(nullable_string_dtype): + assert com.is_string_dtype(pd.array(["a", "b"], dtype=nullable_string_dtype)) + + +integer_dtypes: list = [] + + +@pytest.mark.parametrize( + "dtype", + integer_dtypes + + [pd.Series([1, 2])] + + tm.ALL_INT_NUMPY_DTYPES + + to_numpy_dtypes(tm.ALL_INT_NUMPY_DTYPES) + + tm.ALL_INT_EA_DTYPES + + to_ea_dtypes(tm.ALL_INT_EA_DTYPES), +) +def test_is_integer_dtype(dtype): + assert com.is_integer_dtype(dtype) + + +@pytest.mark.parametrize( + "dtype", + [ + str, + float, + np.datetime64, + np.timedelta64, + pd.Index([1, 2.0]), + np.array(["a", "b"]), + np.array([], dtype=np.timedelta64), + ], +) +def test_is_not_integer_dtype(dtype): + assert not com.is_integer_dtype(dtype) + + +signed_integer_dtypes: list = [] + + +@pytest.mark.parametrize( + "dtype", + signed_integer_dtypes + + [pd.Series([1, 2])] + + tm.SIGNED_INT_NUMPY_DTYPES + + to_numpy_dtypes(tm.SIGNED_INT_NUMPY_DTYPES) + + tm.SIGNED_INT_EA_DTYPES + + to_ea_dtypes(tm.SIGNED_INT_EA_DTYPES), +) +def test_is_signed_integer_dtype(dtype): + assert com.is_integer_dtype(dtype) + + +@pytest.mark.parametrize( + "dtype", + [ + str, + float, + np.datetime64, + np.timedelta64, + pd.Index([1, 2.0]), + np.array(["a", "b"]), + np.array([], dtype=np.timedelta64), + ] + + tm.UNSIGNED_INT_NUMPY_DTYPES + + to_numpy_dtypes(tm.UNSIGNED_INT_NUMPY_DTYPES) + + tm.UNSIGNED_INT_EA_DTYPES + + to_ea_dtypes(tm.UNSIGNED_INT_EA_DTYPES), +) +def test_is_not_signed_integer_dtype(dtype): + assert not com.is_signed_integer_dtype(dtype) + + +unsigned_integer_dtypes: list = [] + + +@pytest.mark.parametrize( + "dtype", + unsigned_integer_dtypes + + [pd.Series([1, 2], dtype=np.uint32)] + + tm.UNSIGNED_INT_NUMPY_DTYPES + + to_numpy_dtypes(tm.UNSIGNED_INT_NUMPY_DTYPES) + + tm.UNSIGNED_INT_EA_DTYPES + + to_ea_dtypes(tm.UNSIGNED_INT_EA_DTYPES), +) +def test_is_unsigned_integer_dtype(dtype): + assert com.is_unsigned_integer_dtype(dtype) + + +@pytest.mark.parametrize( + "dtype", + [ + str, + float, + np.datetime64, + np.timedelta64, + pd.Index([1, 2.0]), + np.array(["a", "b"]), + np.array([], dtype=np.timedelta64), + ] + + tm.SIGNED_INT_NUMPY_DTYPES + + to_numpy_dtypes(tm.SIGNED_INT_NUMPY_DTYPES) + + tm.SIGNED_INT_EA_DTYPES + + to_ea_dtypes(tm.SIGNED_INT_EA_DTYPES), +) +def test_is_not_unsigned_integer_dtype(dtype): + assert not com.is_unsigned_integer_dtype(dtype) + + +@pytest.mark.parametrize( + "dtype", [np.int64, np.array([1, 2], dtype=np.int64), "Int64", pd.Int64Dtype] +) +def test_is_int64_dtype(dtype): + msg = "is_int64_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert com.is_int64_dtype(dtype) + + +def test_type_comparison_with_numeric_ea_dtype(any_numeric_ea_dtype): + # GH#43038 + assert pandas_dtype(any_numeric_ea_dtype) == any_numeric_ea_dtype + + +def test_type_comparison_with_real_numpy_dtype(any_real_numpy_dtype): + # GH#43038 + assert pandas_dtype(any_real_numpy_dtype) == any_real_numpy_dtype + + +def test_type_comparison_with_signed_int_ea_dtype_and_signed_int_numpy_dtype( + any_signed_int_ea_dtype, any_signed_int_numpy_dtype +): + # GH#43038 + assert not pandas_dtype(any_signed_int_ea_dtype) == any_signed_int_numpy_dtype + + +@pytest.mark.parametrize( + "dtype", + [ + str, + float, + np.int32, + np.uint64, + pd.Index([1, 2.0]), + np.array(["a", "b"]), + np.array([1, 2], dtype=np.uint32), + "int8", + "Int8", + pd.Int8Dtype, + ], +) +def test_is_not_int64_dtype(dtype): + msg = "is_int64_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert not com.is_int64_dtype(dtype) + + +def test_is_datetime64_any_dtype(): + assert not com.is_datetime64_any_dtype(int) + assert not com.is_datetime64_any_dtype(str) + assert not com.is_datetime64_any_dtype(np.array([1, 2])) + assert not com.is_datetime64_any_dtype(np.array(["a", "b"])) + + assert com.is_datetime64_any_dtype(np.datetime64) + assert com.is_datetime64_any_dtype(np.array([], dtype=np.datetime64)) + assert com.is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern")) + assert com.is_datetime64_any_dtype( + pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]") + ) + + +def test_is_datetime64_ns_dtype(): + assert not com.is_datetime64_ns_dtype(int) + assert not com.is_datetime64_ns_dtype(str) + assert not com.is_datetime64_ns_dtype(np.datetime64) + assert not com.is_datetime64_ns_dtype(np.array([1, 2])) + assert not com.is_datetime64_ns_dtype(np.array(["a", "b"])) + assert not com.is_datetime64_ns_dtype(np.array([], dtype=np.datetime64)) + + # This datetime array has the wrong unit (ps instead of ns) + assert not com.is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) + + assert com.is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern")) + assert com.is_datetime64_ns_dtype( + pd.DatetimeIndex([1, 2, 3], dtype=np.dtype("datetime64[ns]")) + ) + + # non-nano dt64tz + assert not com.is_datetime64_ns_dtype(DatetimeTZDtype("us", "US/Eastern")) + + +def test_is_timedelta64_ns_dtype(): + assert not com.is_timedelta64_ns_dtype(np.dtype("m8[ps]")) + assert not com.is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64)) + + assert com.is_timedelta64_ns_dtype(np.dtype("m8[ns]")) + assert com.is_timedelta64_ns_dtype(np.array([1, 2], dtype="m8[ns]")) + + +def test_is_numeric_v_string_like(): + assert not com.is_numeric_v_string_like(np.array([1]), 1) + assert not com.is_numeric_v_string_like(np.array([1]), np.array([2])) + assert not com.is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"])) + + assert com.is_numeric_v_string_like(np.array([1]), "foo") + assert com.is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"])) + assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2])) + + +def test_needs_i8_conversion(): + assert not com.needs_i8_conversion(str) + assert not com.needs_i8_conversion(np.int64) + assert not com.needs_i8_conversion(pd.Series([1, 2])) + assert not com.needs_i8_conversion(np.array(["a", "b"])) + + assert not com.needs_i8_conversion(np.datetime64) + assert com.needs_i8_conversion(np.dtype(np.datetime64)) + assert not com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) + assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]").dtype) + assert not com.needs_i8_conversion(pd.DatetimeIndex(["2000"], tz="US/Eastern")) + assert com.needs_i8_conversion(pd.DatetimeIndex(["2000"], tz="US/Eastern").dtype) + + +def test_is_numeric_dtype(): + assert not com.is_numeric_dtype(str) + assert not com.is_numeric_dtype(np.datetime64) + assert not com.is_numeric_dtype(np.timedelta64) + assert not com.is_numeric_dtype(np.array(["a", "b"])) + assert not com.is_numeric_dtype(np.array([], dtype=np.timedelta64)) + + assert com.is_numeric_dtype(int) + assert com.is_numeric_dtype(float) + assert com.is_numeric_dtype(np.uint64) + assert com.is_numeric_dtype(pd.Series([1, 2])) + assert com.is_numeric_dtype(pd.Index([1, 2.0])) + + class MyNumericDType(ExtensionDtype): + @property + def type(self): + return str + + @property + def name(self): + raise NotImplementedError + + @classmethod + def construct_array_type(cls): + raise NotImplementedError + + def _is_numeric(self) -> bool: + return True + + assert com.is_numeric_dtype(MyNumericDType()) + + +def test_is_any_real_numeric_dtype(): + assert not com.is_any_real_numeric_dtype(str) + assert not com.is_any_real_numeric_dtype(bool) + assert not com.is_any_real_numeric_dtype(complex) + assert not com.is_any_real_numeric_dtype(object) + assert not com.is_any_real_numeric_dtype(np.datetime64) + assert not com.is_any_real_numeric_dtype(np.array(["a", "b", complex(1, 2)])) + assert not com.is_any_real_numeric_dtype(pd.DataFrame([complex(1, 2), True])) + + assert com.is_any_real_numeric_dtype(int) + assert com.is_any_real_numeric_dtype(float) + assert com.is_any_real_numeric_dtype(np.array([1, 2.5])) + + +def test_is_float_dtype(): + assert not com.is_float_dtype(str) + assert not com.is_float_dtype(int) + assert not com.is_float_dtype(pd.Series([1, 2])) + assert not com.is_float_dtype(np.array(["a", "b"])) + + assert com.is_float_dtype(float) + assert com.is_float_dtype(pd.Index([1, 2.0])) + + +def test_is_bool_dtype(): + assert not com.is_bool_dtype(int) + assert not com.is_bool_dtype(str) + assert not com.is_bool_dtype(pd.Series([1, 2])) + assert not com.is_bool_dtype(pd.Series(["a", "b"], dtype="category")) + assert not com.is_bool_dtype(np.array(["a", "b"])) + assert not com.is_bool_dtype(pd.Index(["a", "b"])) + assert not com.is_bool_dtype("Int64") + + assert com.is_bool_dtype(bool) + assert com.is_bool_dtype(np.bool_) + assert com.is_bool_dtype(pd.Series([True, False], dtype="category")) + assert com.is_bool_dtype(np.array([True, False])) + assert com.is_bool_dtype(pd.Index([True, False])) + + assert com.is_bool_dtype(pd.BooleanDtype()) + assert com.is_bool_dtype(pd.array([True, False, None], dtype="boolean")) + assert com.is_bool_dtype("boolean") + + +def test_is_bool_dtype_numpy_error(): + # GH39010 + assert not com.is_bool_dtype("0 - Name") + + +@pytest.mark.parametrize( + "check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)] +) +def test_is_extension_array_dtype(check_scipy): + assert not com.is_extension_array_dtype([1, 2, 3]) + assert not com.is_extension_array_dtype(np.array([1, 2, 3])) + assert not com.is_extension_array_dtype(pd.DatetimeIndex([1, 2, 3])) + + cat = pd.Categorical([1, 2, 3]) + assert com.is_extension_array_dtype(cat) + assert com.is_extension_array_dtype(pd.Series(cat)) + assert com.is_extension_array_dtype(SparseArray([1, 2, 3])) + assert com.is_extension_array_dtype(pd.DatetimeIndex(["2000"], tz="US/Eastern")) + + dtype = DatetimeTZDtype("ns", tz="US/Eastern") + s = pd.Series([], dtype=dtype) + assert com.is_extension_array_dtype(s) + + if check_scipy: + import scipy.sparse + + assert not com.is_extension_array_dtype(scipy.sparse.bsr_matrix([1, 2, 3])) + + +def test_is_complex_dtype(): + assert not com.is_complex_dtype(int) + assert not com.is_complex_dtype(str) + assert not com.is_complex_dtype(pd.Series([1, 2])) + assert not com.is_complex_dtype(np.array(["a", "b"])) + + assert com.is_complex_dtype(np.complex128) + assert com.is_complex_dtype(complex) + assert com.is_complex_dtype(np.array([1 + 1j, 5])) + + +@pytest.mark.parametrize( + "input_param,result", + [ + (int, np.dtype(int)), + ("int32", np.dtype("int32")), + (float, np.dtype(float)), + ("float64", np.dtype("float64")), + (np.dtype("float64"), np.dtype("float64")), + (str, np.dtype(str)), + (pd.Series([1, 2], dtype=np.dtype("int16")), np.dtype("int16")), + (pd.Series(["a", "b"]), np.dtype(object)), + (pd.Index([1, 2]), np.dtype("int64")), + (pd.Index(["a", "b"]), np.dtype(object)), + ("category", "category"), + (pd.Categorical(["a", "b"]).dtype, CategoricalDtype(["a", "b"])), + (pd.Categorical(["a", "b"]), CategoricalDtype(["a", "b"])), + (pd.CategoricalIndex(["a", "b"]).dtype, CategoricalDtype(["a", "b"])), + (pd.CategoricalIndex(["a", "b"]), CategoricalDtype(["a", "b"])), + (CategoricalDtype(), CategoricalDtype()), + (pd.DatetimeIndex([1, 2]), np.dtype("=M8[ns]")), + (pd.DatetimeIndex([1, 2]).dtype, np.dtype("=M8[ns]")), + (" df.two.sum() + + with tm.assert_produces_warning(None): + # successfully modify column in place + # this should not raise a warning + df.one += 1 + assert df.one.iloc[0] == 2 + + with tm.assert_produces_warning(None): + # successfully add an attribute to a series + # this should not raise a warning + df.two.not_an_index = [1, 2] + + with tm.assert_produces_warning(UserWarning): + # warn when setting column to nonexistent name + df.four = df.two + 2 + assert df.four.sum() > df.two.sum() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_inference.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_inference.py new file mode 100644 index 00000000..2198ed0c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_inference.py @@ -0,0 +1,1985 @@ +""" +These the test the public routines exposed in types/common.py +related to inference and not otherwise tested in types/test_common.py + +""" +import collections +from collections import namedtuple +from collections.abc import Iterator +from datetime import ( + date, + datetime, + time, + timedelta, +) +from decimal import Decimal +from fractions import Fraction +from io import StringIO +import itertools +from numbers import Number +import re +import sys +from typing import ( + Generic, + TypeVar, +) + +import numpy as np +import pytest +import pytz + +from pandas._libs import ( + lib, + missing as libmissing, + ops as libops, +) + +from pandas.core.dtypes import inference +from pandas.core.dtypes.common import ( + ensure_int32, + is_bool, + is_complex, + is_datetime64_any_dtype, + is_datetime64_dtype, + is_datetime64_ns_dtype, + is_datetime64tz_dtype, + is_float, + is_integer, + is_number, + is_scalar, + is_scipy_sparse, + is_timedelta64_dtype, + is_timedelta64_ns_dtype, +) + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DateOffset, + DatetimeIndex, + Index, + Interval, + Period, + PeriodIndex, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import ( + BooleanArray, + FloatingArray, + IntegerArray, +) + + +@pytest.fixture(params=[True, False], ids=str) +def coerce(request): + return request.param + + +class MockNumpyLikeArray: + """ + A class which is numpy-like (e.g. Pint's Quantity) but not actually numpy + + The key is that it is not actually a numpy array so + ``util.is_array(mock_numpy_like_array_instance)`` returns ``False``. Other + important properties are that the class defines a :meth:`__iter__` method + (so that ``isinstance(abc.Iterable)`` returns ``True``) and has a + :meth:`ndim` property, as pandas special-cases 0-dimensional arrays in some + cases. + + We expect pandas to behave with respect to such duck arrays exactly as + with real numpy arrays. In particular, a 0-dimensional duck array is *NOT* + a scalar (`is_scalar(np.array(1)) == False`), but it is not list-like either. + """ + + def __init__(self, values) -> None: + self._values = values + + def __iter__(self) -> Iterator: + iter_values = iter(self._values) + + def it_outer(): + yield from iter_values + + return it_outer() + + def __len__(self) -> int: + return len(self._values) + + def __array__(self, t=None): + return np.asarray(self._values, dtype=t) + + @property + def ndim(self): + return self._values.ndim + + @property + def dtype(self): + return self._values.dtype + + @property + def size(self): + return self._values.size + + @property + def shape(self): + return self._values.shape + + +# collect all objects to be tested for list-like-ness; use tuples of objects, +# whether they are list-like or not (special casing for sets), and their ID +ll_params = [ + ([1], True, "list"), + ([], True, "list-empty"), + ((1,), True, "tuple"), + ((), True, "tuple-empty"), + ({"a": 1}, True, "dict"), + ({}, True, "dict-empty"), + ({"a", 1}, "set", "set"), + (set(), "set", "set-empty"), + (frozenset({"a", 1}), "set", "frozenset"), + (frozenset(), "set", "frozenset-empty"), + (iter([1, 2]), True, "iterator"), + (iter([]), True, "iterator-empty"), + ((x for x in [1, 2]), True, "generator"), + ((_ for _ in []), True, "generator-empty"), + (Series([1]), True, "Series"), + (Series([], dtype=object), True, "Series-empty"), + # Series.str will still raise a TypeError if iterated + (Series(["a"]).str, True, "StringMethods"), + (Series([], dtype="O").str, True, "StringMethods-empty"), + (Index([1]), True, "Index"), + (Index([]), True, "Index-empty"), + (DataFrame([[1]]), True, "DataFrame"), + (DataFrame(), True, "DataFrame-empty"), + (np.ndarray((2,) * 1), True, "ndarray-1d"), + (np.array([]), True, "ndarray-1d-empty"), + (np.ndarray((2,) * 2), True, "ndarray-2d"), + (np.array([[]]), True, "ndarray-2d-empty"), + (np.ndarray((2,) * 3), True, "ndarray-3d"), + (np.array([[[]]]), True, "ndarray-3d-empty"), + (np.ndarray((2,) * 4), True, "ndarray-4d"), + (np.array([[[[]]]]), True, "ndarray-4d-empty"), + (np.array(2), False, "ndarray-0d"), + (MockNumpyLikeArray(np.ndarray((2,) * 1)), True, "duck-ndarray-1d"), + (MockNumpyLikeArray(np.array([])), True, "duck-ndarray-1d-empty"), + (MockNumpyLikeArray(np.ndarray((2,) * 2)), True, "duck-ndarray-2d"), + (MockNumpyLikeArray(np.array([[]])), True, "duck-ndarray-2d-empty"), + (MockNumpyLikeArray(np.ndarray((2,) * 3)), True, "duck-ndarray-3d"), + (MockNumpyLikeArray(np.array([[[]]])), True, "duck-ndarray-3d-empty"), + (MockNumpyLikeArray(np.ndarray((2,) * 4)), True, "duck-ndarray-4d"), + (MockNumpyLikeArray(np.array([[[[]]]])), True, "duck-ndarray-4d-empty"), + (MockNumpyLikeArray(np.array(2)), False, "duck-ndarray-0d"), + (1, False, "int"), + (b"123", False, "bytes"), + (b"", False, "bytes-empty"), + ("123", False, "string"), + ("", False, "string-empty"), + (str, False, "string-type"), + (object(), False, "object"), + (np.nan, False, "NaN"), + (None, False, "None"), +] +objs, expected, ids = zip(*ll_params) + + +@pytest.fixture(params=zip(objs, expected), ids=ids) +def maybe_list_like(request): + return request.param + + +def test_is_list_like(maybe_list_like): + obj, expected = maybe_list_like + expected = True if expected == "set" else expected + assert inference.is_list_like(obj) == expected + + +def test_is_list_like_disallow_sets(maybe_list_like): + obj, expected = maybe_list_like + expected = False if expected == "set" else expected + assert inference.is_list_like(obj, allow_sets=False) == expected + + +def test_is_list_like_recursion(): + # GH 33721 + # interpreter would crash with SIGABRT + def list_like(): + inference.is_list_like([]) + list_like() + + rec_limit = sys.getrecursionlimit() + try: + # Limit to avoid stack overflow on Windows CI + sys.setrecursionlimit(100) + with tm.external_error_raised(RecursionError): + list_like() + finally: + sys.setrecursionlimit(rec_limit) + + +def test_is_list_like_iter_is_none(): + # GH 43373 + # is_list_like was yielding false positives with __iter__ == None + class NotListLike: + def __getitem__(self, item): + return self + + __iter__ = None + + assert not inference.is_list_like(NotListLike()) + + +def test_is_list_like_generic(): + # GH 49649 + # is_list_like was yielding false positives for Generic classes in python 3.11 + T = TypeVar("T") + + class MyDataFrame(DataFrame, Generic[T]): + ... + + tstc = MyDataFrame[int] + tst = MyDataFrame[int]({"x": [1, 2, 3]}) + + assert not inference.is_list_like(tstc) + assert isinstance(tst, DataFrame) + assert inference.is_list_like(tst) + + +def test_is_sequence(): + is_seq = inference.is_sequence + assert is_seq((1, 2)) + assert is_seq([1, 2]) + assert not is_seq("abcd") + assert not is_seq(np.int64) + + class A: + def __getitem__(self, item): + return 1 + + assert not is_seq(A()) + + +def test_is_array_like(): + assert inference.is_array_like(Series([], dtype=object)) + assert inference.is_array_like(Series([1, 2])) + assert inference.is_array_like(np.array(["a", "b"])) + assert inference.is_array_like(Index(["2016-01-01"])) + assert inference.is_array_like(np.array([2, 3])) + assert inference.is_array_like(MockNumpyLikeArray(np.array([2, 3]))) + + class DtypeList(list): + dtype = "special" + + assert inference.is_array_like(DtypeList()) + + assert not inference.is_array_like([1, 2, 3]) + assert not inference.is_array_like(()) + assert not inference.is_array_like("foo") + assert not inference.is_array_like(123) + + +@pytest.mark.parametrize( + "inner", + [ + [], + [1], + (1,), + (1, 2), + {"a": 1}, + {1, "a"}, + Series([1]), + Series([], dtype=object), + Series(["a"]).str, + (x for x in range(5)), + ], +) +@pytest.mark.parametrize("outer", [list, Series, np.array, tuple]) +def test_is_nested_list_like_passes(inner, outer): + result = outer([inner for _ in range(5)]) + assert inference.is_list_like(result) + + +@pytest.mark.parametrize( + "obj", + [ + "abc", + [], + [1], + (1,), + ["a"], + "a", + {"a"}, + [1, 2, 3], + Series([1]), + DataFrame({"A": [1]}), + ([1, 2] for _ in range(5)), + ], +) +def test_is_nested_list_like_fails(obj): + assert not inference.is_nested_list_like(obj) + + +@pytest.mark.parametrize("ll", [{}, {"A": 1}, Series([1]), collections.defaultdict()]) +def test_is_dict_like_passes(ll): + assert inference.is_dict_like(ll) + + +@pytest.mark.parametrize( + "ll", + [ + "1", + 1, + [1, 2], + (1, 2), + range(2), + Index([1]), + dict, + collections.defaultdict, + Series, + ], +) +def test_is_dict_like_fails(ll): + assert not inference.is_dict_like(ll) + + +@pytest.mark.parametrize("has_keys", [True, False]) +@pytest.mark.parametrize("has_getitem", [True, False]) +@pytest.mark.parametrize("has_contains", [True, False]) +def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains): + class DictLike: + def __init__(self, d) -> None: + self.d = d + + if has_keys: + + def keys(self): + return self.d.keys() + + if has_getitem: + + def __getitem__(self, key): + return self.d.__getitem__(key) + + if has_contains: + + def __contains__(self, key) -> bool: + return self.d.__contains__(key) + + d = DictLike({1: 2}) + result = inference.is_dict_like(d) + expected = has_keys and has_getitem and has_contains + + assert result is expected + + +def test_is_file_like(): + class MockFile: + pass + + is_file = inference.is_file_like + + data = StringIO("data") + assert is_file(data) + + # No read / write attributes + # No iterator attributes + m = MockFile() + assert not is_file(m) + + MockFile.write = lambda self: 0 + + # Write attribute but not an iterator + m = MockFile() + assert not is_file(m) + + # gh-16530: Valid iterator just means we have the + # __iter__ attribute for our purposes. + MockFile.__iter__ = lambda self: self + + # Valid write-only file + m = MockFile() + assert is_file(m) + + del MockFile.write + MockFile.read = lambda self: 0 + + # Valid read-only file + m = MockFile() + assert is_file(m) + + # Iterator but no read / write attributes + data = [1, 2, 3] + assert not is_file(data) + + +test_tuple = collections.namedtuple("test_tuple", ["a", "b", "c"]) + + +@pytest.mark.parametrize("ll", [test_tuple(1, 2, 3)]) +def test_is_names_tuple_passes(ll): + assert inference.is_named_tuple(ll) + + +@pytest.mark.parametrize("ll", [(1, 2, 3), "a", Series({"pi": 3.14})]) +def test_is_names_tuple_fails(ll): + assert not inference.is_named_tuple(ll) + + +def test_is_hashable(): + # all new-style classes are hashable by default + class HashableClass: + pass + + class UnhashableClass1: + __hash__ = None + + class UnhashableClass2: + def __hash__(self): + raise TypeError("Not hashable") + + hashable = (1, 3.14, np.float64(3.14), "a", (), (1,), HashableClass()) + not_hashable = ([], UnhashableClass1()) + abc_hashable_not_really_hashable = (([],), UnhashableClass2()) + + for i in hashable: + assert inference.is_hashable(i) + for i in not_hashable: + assert not inference.is_hashable(i) + for i in abc_hashable_not_really_hashable: + assert not inference.is_hashable(i) + + # numpy.array is no longer collections.abc.Hashable as of + # https://github.com/numpy/numpy/pull/5326, just test + # is_hashable() + assert not inference.is_hashable(np.array([])) + + +@pytest.mark.parametrize("ll", [re.compile("ad")]) +def test_is_re_passes(ll): + assert inference.is_re(ll) + + +@pytest.mark.parametrize("ll", ["x", 2, 3, object()]) +def test_is_re_fails(ll): + assert not inference.is_re(ll) + + +@pytest.mark.parametrize( + "ll", [r"a", "x", r"asdf", re.compile("adsf"), r"\u2233\s*", re.compile(r"")] +) +def test_is_recompilable_passes(ll): + assert inference.is_re_compilable(ll) + + +@pytest.mark.parametrize("ll", [1, [], object()]) +def test_is_recompilable_fails(ll): + assert not inference.is_re_compilable(ll) + + +class TestInference: + @pytest.mark.parametrize( + "arr", + [ + np.array(list("abc"), dtype="S1"), + np.array(list("abc"), dtype="S1").astype(object), + [b"a", np.nan, b"c"], + ], + ) + def test_infer_dtype_bytes(self, arr): + result = lib.infer_dtype(arr, skipna=True) + assert result == "bytes" + + @pytest.mark.parametrize( + "value, expected", + [ + (float("inf"), True), + (np.inf, True), + (-np.inf, False), + (1, False), + ("a", False), + ], + ) + def test_isposinf_scalar(self, value, expected): + # GH 11352 + result = libmissing.isposinf_scalar(value) + assert result is expected + + @pytest.mark.parametrize( + "value, expected", + [ + (float("-inf"), True), + (-np.inf, True), + (np.inf, False), + (1, False), + ("a", False), + ], + ) + def test_isneginf_scalar(self, value, expected): + result = libmissing.isneginf_scalar(value) + assert result is expected + + @pytest.mark.parametrize( + "convert_to_masked_nullable, exp", + [ + ( + True, + BooleanArray( + np.array([True, False], dtype="bool"), np.array([False, True]) + ), + ), + (False, np.array([True, np.nan], dtype="object")), + ], + ) + def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp): + # GH 40687 + arr = np.array([True, np.nan], dtype=object) + result = libops.maybe_convert_bool( + arr, set(), convert_to_masked_nullable=convert_to_masked_nullable + ) + if convert_to_masked_nullable: + tm.assert_extension_array_equal(BooleanArray(*result), exp) + else: + result = result[0] + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + @pytest.mark.parametrize("coerce_numeric", [True, False]) + @pytest.mark.parametrize( + "infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"] + ) + @pytest.mark.parametrize("prefix", ["", "-", "+"]) + def test_maybe_convert_numeric_infinities( + self, coerce_numeric, infinity, prefix, convert_to_masked_nullable + ): + # see gh-13274 + result, _ = lib.maybe_convert_numeric( + np.array([prefix + infinity], dtype=object), + na_values={"", "NULL", "nan"}, + coerce_numeric=coerce_numeric, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + expected = np.array([np.inf if prefix in ["", "+"] else -np.inf]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable): + msg = "Unable to parse string" + with pytest.raises(ValueError, match=msg): + lib.maybe_convert_numeric( + np.array(["foo_inf"], dtype=object), + na_values={"", "NULL", "nan"}, + coerce_numeric=False, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_maybe_convert_numeric_post_floatify_nan( + self, coerce, convert_to_masked_nullable + ): + # see gh-13314 + data = np.array(["1.200", "-999.000", "4.500"], dtype=object) + expected = np.array([1.2, np.nan, 4.5], dtype=np.float64) + nan_values = {-999, -999.0} + + out = lib.maybe_convert_numeric( + data, + nan_values, + coerce, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + if convert_to_masked_nullable: + expected = FloatingArray(expected, np.isnan(expected)) + tm.assert_extension_array_equal(expected, FloatingArray(*out)) + else: + out = out[0] + tm.assert_numpy_array_equal(out, expected) + + def test_convert_infs(self): + arr = np.array(["inf", "inf", "inf"], dtype="O") + result, _ = lib.maybe_convert_numeric(arr, set(), False) + assert result.dtype == np.float64 + + arr = np.array(["-inf", "-inf", "-inf"], dtype="O") + result, _ = lib.maybe_convert_numeric(arr, set(), False) + assert result.dtype == np.float64 + + def test_scientific_no_exponent(self): + # See PR 12215 + arr = np.array(["42E", "2E", "99e", "6e"], dtype="O") + result, _ = lib.maybe_convert_numeric(arr, set(), False, True) + assert np.all(np.isnan(result)) + + def test_convert_non_hashable(self): + # GH13324 + # make sure that we are handing non-hashables + arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object) + result, _ = lib.maybe_convert_numeric(arr, set(), False, True) + tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan])) + + def test_convert_numeric_uint64(self): + arr = np.array([2**63], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp) + + arr = np.array([str(2**63)], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp) + + arr = np.array([np.uint64(2**63)], dtype=object) + exp = np.array([2**63], dtype=np.uint64) + tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp) + + @pytest.mark.parametrize( + "arr", + [ + np.array([2**63, np.nan], dtype=object), + np.array([str(2**63), np.nan], dtype=object), + np.array([np.nan, 2**63], dtype=object), + np.array([np.nan, str(2**63)], dtype=object), + ], + ) + def test_convert_numeric_uint64_nan(self, coerce, arr): + expected = arr.astype(float) if coerce else arr.copy() + result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_convert_numeric_uint64_nan_values( + self, coerce, convert_to_masked_nullable + ): + arr = np.array([2**63, 2**63 + 1], dtype=object) + na_values = {2**63} + + expected = ( + np.array([np.nan, 2**63 + 1], dtype=float) if coerce else arr.copy() + ) + result = lib.maybe_convert_numeric( + arr, + na_values, + coerce_numeric=coerce, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + if convert_to_masked_nullable and coerce: + expected = IntegerArray( + np.array([0, 2**63 + 1], dtype="u8"), + np.array([True, False], dtype="bool"), + ) + result = IntegerArray(*result) + else: + result = result[0] # discard mask + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize( + "case", + [ + np.array([2**63, -1], dtype=object), + np.array([str(2**63), -1], dtype=object), + np.array([str(2**63), str(-1)], dtype=object), + np.array([-1, 2**63], dtype=object), + np.array([-1, str(2**63)], dtype=object), + np.array([str(-1), str(2**63)], dtype=object), + ], + ) + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_convert_numeric_int64_uint64( + self, case, coerce, convert_to_masked_nullable + ): + expected = case.astype(float) if coerce else case.copy() + result, _ = lib.maybe_convert_numeric( + case, + set(), + coerce_numeric=coerce, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("convert_to_masked_nullable", [True, False]) + def test_convert_numeric_string_uint64(self, convert_to_masked_nullable): + # GH32394 + result = lib.maybe_convert_numeric( + np.array(["uint64"], dtype=object), + set(), + coerce_numeric=True, + convert_to_masked_nullable=convert_to_masked_nullable, + ) + if convert_to_masked_nullable: + result = FloatingArray(*result) + else: + result = result[0] + assert np.isnan(result) + + @pytest.mark.parametrize("value", [-(2**63) - 1, 2**64]) + def test_convert_int_overflow(self, value): + # see gh-18584 + arr = np.array([value], dtype=object) + result = lib.maybe_convert_objects(arr) + tm.assert_numpy_array_equal(arr, result) + + @pytest.mark.parametrize("val", [None, np.nan, float("nan")]) + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_maybe_convert_objects_nat_inference(self, val, dtype): + dtype = np.dtype(dtype) + vals = np.array([pd.NaT, val], dtype=object) + result = lib.maybe_convert_objects( + vals, + convert_non_numeric=True, + dtype_if_all_nat=dtype, + ) + assert result.dtype == dtype + assert np.isnat(result).all() + + result = lib.maybe_convert_objects( + vals[::-1], + convert_non_numeric=True, + dtype_if_all_nat=dtype, + ) + assert result.dtype == dtype + assert np.isnat(result).all() + + @pytest.mark.parametrize( + "value, expected_dtype", + [ + # see gh-4471 + ([2**63], np.uint64), + # NumPy bug: can't compare uint64 to int64, as that + # results in both casting to float64, so we should + # make sure that this function is robust against it + ([np.uint64(2**63)], np.uint64), + ([2, -1], np.int64), + ([2**63, -1], object), + # GH#47294 + ([np.uint8(1)], np.uint8), + ([np.uint16(1)], np.uint16), + ([np.uint32(1)], np.uint32), + ([np.uint64(1)], np.uint64), + ([np.uint8(2), np.uint16(1)], np.uint16), + ([np.uint32(2), np.uint16(1)], np.uint32), + ([np.uint32(2), -1], object), + ([np.uint32(2), 1], np.uint64), + ([np.uint32(2), np.int32(1)], object), + ], + ) + def test_maybe_convert_objects_uint(self, value, expected_dtype): + arr = np.array(value, dtype=object) + exp = np.array(value, dtype=expected_dtype) + tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp) + + def test_maybe_convert_objects_datetime(self): + # GH27438 + arr = np.array( + [np.datetime64("2000-01-01"), np.timedelta64(1, "s")], dtype=object + ) + exp = arr.copy() + out = lib.maybe_convert_objects(arr, convert_non_numeric=True) + tm.assert_numpy_array_equal(out, exp) + + arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object) + exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]") + out = lib.maybe_convert_objects(arr, convert_non_numeric=True) + tm.assert_numpy_array_equal(out, exp) + + # with convert_non_numeric=True, the nan is a valid NA value for td64 + arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object) + exp = exp[::-1] + out = lib.maybe_convert_objects(arr, convert_non_numeric=True) + tm.assert_numpy_array_equal(out, exp) + + def test_maybe_convert_objects_dtype_if_all_nat(self): + arr = np.array([pd.NaT, pd.NaT], dtype=object) + out = lib.maybe_convert_objects(arr, convert_non_numeric=True) + # no dtype_if_all_nat passed -> we dont guess + tm.assert_numpy_array_equal(out, arr) + + out = lib.maybe_convert_objects( + arr, + convert_non_numeric=True, + dtype_if_all_nat=np.dtype("timedelta64[ns]"), + ) + exp = np.array(["NaT", "NaT"], dtype="timedelta64[ns]") + tm.assert_numpy_array_equal(out, exp) + + out = lib.maybe_convert_objects( + arr, + convert_non_numeric=True, + dtype_if_all_nat=np.dtype("datetime64[ns]"), + ) + exp = np.array(["NaT", "NaT"], dtype="datetime64[ns]") + tm.assert_numpy_array_equal(out, exp) + + def test_maybe_convert_objects_dtype_if_all_nat_invalid(self): + # we accept datetime64[ns], timedelta64[ns], and EADtype + arr = np.array([pd.NaT, pd.NaT], dtype=object) + + with pytest.raises(ValueError, match="int64"): + lib.maybe_convert_objects( + arr, + convert_non_numeric=True, + dtype_if_all_nat=np.dtype("int64"), + ) + + @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) + def test_maybe_convert_objects_datetime_overflow_safe(self, dtype): + stamp = datetime(2363, 10, 4) # Enterprise-D launch date + if dtype == "timedelta64[ns]": + stamp = stamp - datetime(1970, 1, 1) + arr = np.array([stamp], dtype=object) + + out = lib.maybe_convert_objects(arr, convert_non_numeric=True) + # no OutOfBoundsDatetime/OutOfBoundsTimedeltas + tm.assert_numpy_array_equal(out, arr) + + def test_maybe_convert_objects_mixed_datetimes(self): + ts = Timestamp("now") + vals = [ts, ts.to_pydatetime(), ts.to_datetime64(), pd.NaT, np.nan, None] + + for data in itertools.permutations(vals): + data = np.array(list(data), dtype=object) + expected = DatetimeIndex(data)._data._ndarray + result = lib.maybe_convert_objects(data, convert_non_numeric=True) + tm.assert_numpy_array_equal(result, expected) + + def test_maybe_convert_objects_timedelta64_nat(self): + obj = np.timedelta64("NaT", "ns") + arr = np.array([obj], dtype=object) + assert arr[0] is obj + + result = lib.maybe_convert_objects(arr, convert_non_numeric=True) + + expected = np.array([obj], dtype="m8[ns]") + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "exp", + [ + IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])), + IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])), + ], + ) + def test_maybe_convert_objects_nullable_integer(self, exp): + # GH27335 + arr = np.array([2, np.nan], dtype=object) + result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True) + + tm.assert_extension_array_equal(result, exp) + + @pytest.mark.parametrize( + "dtype, val", [("int64", 1), ("uint64", np.iinfo(np.int64).max + 1)] + ) + def test_maybe_convert_objects_nullable_none(self, dtype, val): + # GH#50043 + arr = np.array([val, None, 3], dtype="object") + result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True) + expected = IntegerArray( + np.array([val, 0, 3], dtype=dtype), np.array([False, True, False]) + ) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize( + "convert_to_masked_nullable, exp", + [ + (True, IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True]))), + (False, np.array([2, np.nan], dtype="float64")), + ], + ) + def test_maybe_convert_numeric_nullable_integer( + self, convert_to_masked_nullable, exp + ): + # GH 40687 + arr = np.array([2, np.nan], dtype=object) + result = lib.maybe_convert_numeric( + arr, set(), convert_to_masked_nullable=convert_to_masked_nullable + ) + if convert_to_masked_nullable: + result = IntegerArray(*result) + tm.assert_extension_array_equal(result, exp) + else: + result = result[0] + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize( + "convert_to_masked_nullable, exp", + [ + ( + True, + FloatingArray( + np.array([2.0, 0.0], dtype="float64"), np.array([False, True]) + ), + ), + (False, np.array([2.0, np.nan], dtype="float64")), + ], + ) + def test_maybe_convert_numeric_floating_array( + self, convert_to_masked_nullable, exp + ): + # GH 40687 + arr = np.array([2.0, np.nan], dtype=object) + result = lib.maybe_convert_numeric( + arr, set(), convert_to_masked_nullable=convert_to_masked_nullable + ) + if convert_to_masked_nullable: + tm.assert_extension_array_equal(FloatingArray(*result), exp) + else: + result = result[0] + tm.assert_numpy_array_equal(result, exp) + + def test_maybe_convert_objects_bool_nan(self): + # GH32146 + ind = Index([True, False, np.nan], dtype=object) + exp = np.array([True, False, np.nan], dtype=object) + out = lib.maybe_convert_objects(ind.values, safe=1) + tm.assert_numpy_array_equal(out, exp) + + def test_maybe_convert_objects_nullable_boolean(self): + # GH50047 + arr = np.array([True, False], dtype=object) + exp = np.array([True, False]) + out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True) + tm.assert_numpy_array_equal(out, exp) + + arr = np.array([True, False, pd.NaT], dtype=object) + exp = np.array([True, False, pd.NaT], dtype=object) + out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True) + tm.assert_numpy_array_equal(out, exp) + + @pytest.mark.parametrize("val", [None, np.nan]) + def test_maybe_convert_objects_nullable_boolean_na(self, val): + # GH50047 + arr = np.array([True, False, val], dtype=object) + exp = BooleanArray( + np.array([True, False, False]), np.array([False, False, True]) + ) + out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True) + tm.assert_extension_array_equal(out, exp) + + @pytest.mark.parametrize( + "data0", + [ + True, + 1, + 1.0, + 1.0 + 1.0j, + np.int8(1), + np.int16(1), + np.int32(1), + np.int64(1), + np.float16(1), + np.float32(1), + np.float64(1), + np.complex64(1), + np.complex128(1), + ], + ) + @pytest.mark.parametrize( + "data1", + [ + True, + 1, + 1.0, + 1.0 + 1.0j, + np.int8(1), + np.int16(1), + np.int32(1), + np.int64(1), + np.float16(1), + np.float32(1), + np.float64(1), + np.complex64(1), + np.complex128(1), + ], + ) + def test_maybe_convert_objects_itemsize(self, data0, data1): + # GH 40908 + data = [data0, data1] + arr = np.array(data, dtype="object") + + common_kind = np.result_type(type(data0), type(data1)).kind + kind0 = "python" if not hasattr(data0, "dtype") else data0.dtype.kind + kind1 = "python" if not hasattr(data1, "dtype") else data1.dtype.kind + if kind0 != "python" and kind1 != "python": + kind = common_kind + itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize) + elif is_bool(data0) or is_bool(data1): + kind = "bool" if (is_bool(data0) and is_bool(data1)) else "object" + itemsize = "" + elif is_complex(data0) or is_complex(data1): + kind = common_kind + itemsize = 16 + else: + kind = common_kind + itemsize = 8 + + expected = np.array(data, dtype=f"{kind}{itemsize}") + result = lib.maybe_convert_objects(arr) + tm.assert_numpy_array_equal(result, expected) + + def test_mixed_dtypes_remain_object_array(self): + # GH14956 + arr = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object) + result = lib.maybe_convert_objects(arr, convert_non_numeric=True) + tm.assert_numpy_array_equal(result, arr) + + @pytest.mark.parametrize( + "idx", + [ + pd.IntervalIndex.from_breaks(range(5), closed="both"), + pd.period_range("2016-01-01", periods=3, freq="D"), + ], + ) + def test_maybe_convert_objects_ea(self, idx): + result = lib.maybe_convert_objects( + np.array(idx, dtype=object), + convert_non_numeric=True, + ) + tm.assert_extension_array_equal(result, idx._data) + + +class TestTypeInference: + # Dummy class used for testing with Python objects + class Dummy: + pass + + def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype): + # see pandas/conftest.py + inferred_dtype, values = any_skipna_inferred_dtype + + # make sure the inferred dtype of the fixture is as requested + assert inferred_dtype == lib.infer_dtype(values, skipna=True) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_length_zero(self, skipna): + result = lib.infer_dtype(np.array([], dtype="i4"), skipna=skipna) + assert result == "integer" + + result = lib.infer_dtype([], skipna=skipna) + assert result == "empty" + + # GH 18004 + arr = np.array([np.array([], dtype=object), np.array([], dtype=object)]) + result = lib.infer_dtype(arr, skipna=skipna) + assert result == "empty" + + def test_integers(self): + arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "integer" + + arr = np.array([1, 2, 3, np.int64(4), np.int32(5), "foo"], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "mixed-integer" + + arr = np.array([1, 2, 3, 4, 5], dtype="i4") + result = lib.infer_dtype(arr, skipna=True) + assert result == "integer" + + @pytest.mark.parametrize( + "arr, skipna", + [ + (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), False), + (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), True), + (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), False), + (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), True), + ], + ) + def test_integer_na(self, arr, skipna): + # GH 27392 + result = lib.infer_dtype(arr, skipna=skipna) + expected = "integer" if skipna else "integer-na" + assert result == expected + + def test_infer_dtype_skipna_default(self): + # infer_dtype `skipna` default deprecated in GH#24050, + # changed to True in GH#29876 + arr = np.array([1, 2, 3, np.nan], dtype=object) + + result = lib.infer_dtype(arr) + assert result == "integer" + + def test_bools(self): + arr = np.array([True, False, True, True, True], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "boolean" + + arr = np.array([np.bool_(True), np.bool_(False)], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "boolean" + + arr = np.array([True, False, True, "foo"], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "mixed" + + arr = np.array([True, False, True], dtype=bool) + result = lib.infer_dtype(arr, skipna=True) + assert result == "boolean" + + arr = np.array([True, np.nan, False], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "boolean" + + result = lib.infer_dtype(arr, skipna=False) + assert result == "mixed" + + def test_floats(self): + arr = np.array([1.0, 2.0, 3.0, np.float64(4), np.float32(5)], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "floating" + + arr = np.array([1, 2, 3, np.float64(4), np.float32(5), "foo"], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "mixed-integer" + + arr = np.array([1, 2, 3, 4, 5], dtype="f4") + result = lib.infer_dtype(arr, skipna=True) + assert result == "floating" + + arr = np.array([1, 2, 3, 4, 5], dtype="f8") + result = lib.infer_dtype(arr, skipna=True) + assert result == "floating" + + def test_decimals(self): + # GH15690 + arr = np.array([Decimal(1), Decimal(2), Decimal(3)]) + result = lib.infer_dtype(arr, skipna=True) + assert result == "decimal" + + arr = np.array([1.0, 2.0, Decimal(3)]) + result = lib.infer_dtype(arr, skipna=True) + assert result == "mixed" + + result = lib.infer_dtype(arr[::-1], skipna=True) + assert result == "mixed" + + arr = np.array([Decimal(1), Decimal("NaN"), Decimal(3)]) + result = lib.infer_dtype(arr, skipna=True) + assert result == "decimal" + + arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype="O") + result = lib.infer_dtype(arr, skipna=True) + assert result == "decimal" + + # complex is compatible with nan, so skipna has no effect + @pytest.mark.parametrize("skipna", [True, False]) + def test_complex(self, skipna): + # gets cast to complex on array construction + arr = np.array([1.0, 2.0, 1 + 1j]) + result = lib.infer_dtype(arr, skipna=skipna) + assert result == "complex" + + arr = np.array([1.0, 2.0, 1 + 1j], dtype="O") + result = lib.infer_dtype(arr, skipna=skipna) + assert result == "mixed" + + result = lib.infer_dtype(arr[::-1], skipna=skipna) + assert result == "mixed" + + # gets cast to complex on array construction + arr = np.array([1, np.nan, 1 + 1j]) + result = lib.infer_dtype(arr, skipna=skipna) + assert result == "complex" + + arr = np.array([1.0, np.nan, 1 + 1j], dtype="O") + result = lib.infer_dtype(arr, skipna=skipna) + assert result == "mixed" + + # complex with nans stays complex + arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype="O") + result = lib.infer_dtype(arr, skipna=skipna) + assert result == "complex" + + # test smaller complex dtype; will pass through _try_infer_map fastpath + arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64) + result = lib.infer_dtype(arr, skipna=skipna) + assert result == "complex" + + def test_string(self): + pass + + def test_unicode(self): + arr = ["a", np.nan, "c"] + result = lib.infer_dtype(arr, skipna=False) + # This currently returns "mixed", but it's not clear that's optimal. + # This could also return "string" or "mixed-string" + assert result == "mixed" + + # even though we use skipna, we are only skipping those NAs that are + # considered matching by is_string_array + arr = ["a", np.nan, "c"] + result = lib.infer_dtype(arr, skipna=True) + assert result == "string" + + arr = ["a", pd.NA, "c"] + result = lib.infer_dtype(arr, skipna=True) + assert result == "string" + + arr = ["a", pd.NaT, "c"] + result = lib.infer_dtype(arr, skipna=True) + assert result == "mixed" + + arr = ["a", "c"] + result = lib.infer_dtype(arr, skipna=False) + assert result == "string" + + @pytest.mark.parametrize( + "dtype, missing, skipna, expected", + [ + (float, np.nan, False, "floating"), + (float, np.nan, True, "floating"), + (object, np.nan, False, "floating"), + (object, np.nan, True, "empty"), + (object, None, False, "mixed"), + (object, None, True, "empty"), + ], + ) + @pytest.mark.parametrize("box", [Series, np.array]) + def test_object_empty(self, box, missing, dtype, skipna, expected): + # GH 23421 + arr = box([missing, missing], dtype=dtype) + + result = lib.infer_dtype(arr, skipna=skipna) + assert result == expected + + def test_datetime(self): + dates = [datetime(2012, 1, x) for x in range(1, 20)] + index = Index(dates) + assert index.inferred_type == "datetime64" + + def test_infer_dtype_datetime64(self): + arr = np.array( + [np.datetime64("2011-01-01"), np.datetime64("2011-01-01")], dtype=object + ) + assert lib.infer_dtype(arr, skipna=True) == "datetime64" + + @pytest.mark.parametrize("na_value", [pd.NaT, np.nan]) + def test_infer_dtype_datetime64_with_na(self, na_value): + # starts with nan + arr = np.array([na_value, np.datetime64("2011-01-02")]) + assert lib.infer_dtype(arr, skipna=True) == "datetime64" + + arr = np.array([na_value, np.datetime64("2011-01-02"), na_value]) + assert lib.infer_dtype(arr, skipna=True) == "datetime64" + + @pytest.mark.parametrize( + "arr", + [ + np.array( + [np.timedelta64("nat"), np.datetime64("2011-01-02")], dtype=object + ), + np.array( + [np.datetime64("2011-01-02"), np.timedelta64("nat")], dtype=object + ), + np.array([np.datetime64("2011-01-01"), Timestamp("2011-01-02")]), + np.array([Timestamp("2011-01-02"), np.datetime64("2011-01-01")]), + np.array([np.nan, Timestamp("2011-01-02"), 1.1]), + np.array([np.nan, "2011-01-01", Timestamp("2011-01-02")], dtype=object), + np.array([np.datetime64("nat"), np.timedelta64(1, "D")], dtype=object), + np.array([np.timedelta64(1, "D"), np.datetime64("nat")], dtype=object), + ], + ) + def test_infer_datetimelike_dtype_mixed(self, arr): + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + def test_infer_dtype_mixed_integer(self): + arr = np.array([np.nan, Timestamp("2011-01-02"), 1]) + assert lib.infer_dtype(arr, skipna=True) == "mixed-integer" + + @pytest.mark.parametrize( + "arr", + [ + np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")]), + np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]), + np.array([datetime(2011, 1, 1), Timestamp("2011-01-02")]), + ], + ) + def test_infer_dtype_datetime(self, arr): + assert lib.infer_dtype(arr, skipna=True) == "datetime" + + @pytest.mark.parametrize("na_value", [pd.NaT, np.nan]) + @pytest.mark.parametrize( + "time_stamp", [Timestamp("2011-01-01"), datetime(2011, 1, 1)] + ) + def test_infer_dtype_datetime_with_na(self, na_value, time_stamp): + # starts with nan + arr = np.array([na_value, time_stamp]) + assert lib.infer_dtype(arr, skipna=True) == "datetime" + + arr = np.array([na_value, time_stamp, na_value]) + assert lib.infer_dtype(arr, skipna=True) == "datetime" + + @pytest.mark.parametrize( + "arr", + [ + np.array([Timedelta("1 days"), Timedelta("2 days")]), + np.array([np.timedelta64(1, "D"), np.timedelta64(2, "D")], dtype=object), + np.array([timedelta(1), timedelta(2)]), + ], + ) + def test_infer_dtype_timedelta(self, arr): + assert lib.infer_dtype(arr, skipna=True) == "timedelta" + + @pytest.mark.parametrize("na_value", [pd.NaT, np.nan]) + @pytest.mark.parametrize( + "delta", [Timedelta("1 days"), np.timedelta64(1, "D"), timedelta(1)] + ) + def test_infer_dtype_timedelta_with_na(self, na_value, delta): + # starts with nan + arr = np.array([na_value, delta]) + assert lib.infer_dtype(arr, skipna=True) == "timedelta" + + arr = np.array([na_value, delta, na_value]) + assert lib.infer_dtype(arr, skipna=True) == "timedelta" + + def test_infer_dtype_period(self): + # GH 13664 + arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="D")]) + assert lib.infer_dtype(arr, skipna=True) == "period" + + # non-homogeneous freqs -> mixed + arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="M")]) + assert lib.infer_dtype(arr, skipna=True) == "mixed" + + @pytest.mark.parametrize("klass", [pd.array, Series, Index]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_infer_dtype_period_array(self, klass, skipna): + # https://github.com/pandas-dev/pandas/issues/23553 + values = klass( + [ + Period("2011-01-01", freq="D"), + Period("2011-01-02", freq="D"), + pd.NaT, + ] + ) + assert lib.infer_dtype(values, skipna=skipna) == "period" + + # periods but mixed freq + values = klass( + [ + Period("2011-01-01", freq="D"), + Period("2011-01-02", freq="M"), + pd.NaT, + ] + ) + # with pd.array this becomes NumpyExtensionArray which ends up + # as "unknown-array" + exp = "unknown-array" if klass is pd.array else "mixed" + assert lib.infer_dtype(values, skipna=skipna) == exp + + def test_infer_dtype_period_mixed(self): + arr = np.array( + [Period("2011-01", freq="M"), np.datetime64("nat")], dtype=object + ) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array( + [np.datetime64("nat"), Period("2011-01", freq="M")], dtype=object + ) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + @pytest.mark.parametrize("na_value", [pd.NaT, np.nan]) + def test_infer_dtype_period_with_na(self, na_value): + # starts with nan + arr = np.array([na_value, Period("2011-01", freq="D")]) + assert lib.infer_dtype(arr, skipna=True) == "period" + + arr = np.array([na_value, Period("2011-01", freq="D"), na_value]) + assert lib.infer_dtype(arr, skipna=True) == "period" + + def test_infer_dtype_all_nan_nat_like(self): + arr = np.array([np.nan, np.nan]) + assert lib.infer_dtype(arr, skipna=True) == "floating" + + # nan and None mix are result in mixed + arr = np.array([np.nan, np.nan, None]) + assert lib.infer_dtype(arr, skipna=True) == "empty" + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([None, np.nan, np.nan]) + assert lib.infer_dtype(arr, skipna=True) == "empty" + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + # pd.NaT + arr = np.array([pd.NaT]) + assert lib.infer_dtype(arr, skipna=False) == "datetime" + + arr = np.array([pd.NaT, np.nan]) + assert lib.infer_dtype(arr, skipna=False) == "datetime" + + arr = np.array([np.nan, pd.NaT]) + assert lib.infer_dtype(arr, skipna=False) == "datetime" + + arr = np.array([np.nan, pd.NaT, np.nan]) + assert lib.infer_dtype(arr, skipna=False) == "datetime" + + arr = np.array([None, pd.NaT, None]) + assert lib.infer_dtype(arr, skipna=False) == "datetime" + + # np.datetime64(nat) + arr = np.array([np.datetime64("nat")]) + assert lib.infer_dtype(arr, skipna=False) == "datetime64" + + for n in [np.nan, pd.NaT, None]: + arr = np.array([n, np.datetime64("nat"), n]) + assert lib.infer_dtype(arr, skipna=False) == "datetime64" + + arr = np.array([pd.NaT, n, np.datetime64("nat"), n]) + assert lib.infer_dtype(arr, skipna=False) == "datetime64" + + arr = np.array([np.timedelta64("nat")], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "timedelta" + + for n in [np.nan, pd.NaT, None]: + arr = np.array([n, np.timedelta64("nat"), n]) + assert lib.infer_dtype(arr, skipna=False) == "timedelta" + + arr = np.array([pd.NaT, n, np.timedelta64("nat"), n]) + assert lib.infer_dtype(arr, skipna=False) == "timedelta" + + # datetime / timedelta mixed + arr = np.array([pd.NaT, np.datetime64("nat"), np.timedelta64("nat"), np.nan]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([np.timedelta64("nat"), np.datetime64("nat")], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + def test_is_datetimelike_array_all_nan_nat_like(self): + arr = np.array([np.nan, pd.NaT, np.datetime64("nat")]) + assert lib.is_datetime_array(arr) + assert lib.is_datetime64_array(arr) + assert not lib.is_timedelta_or_timedelta64_array(arr) + + arr = np.array([np.nan, pd.NaT, np.timedelta64("nat")]) + assert not lib.is_datetime_array(arr) + assert not lib.is_datetime64_array(arr) + assert lib.is_timedelta_or_timedelta64_array(arr) + + arr = np.array([np.nan, pd.NaT, np.datetime64("nat"), np.timedelta64("nat")]) + assert not lib.is_datetime_array(arr) + assert not lib.is_datetime64_array(arr) + assert not lib.is_timedelta_or_timedelta64_array(arr) + + arr = np.array([np.nan, pd.NaT]) + assert lib.is_datetime_array(arr) + assert lib.is_datetime64_array(arr) + assert lib.is_timedelta_or_timedelta64_array(arr) + + arr = np.array([np.nan, np.nan], dtype=object) + assert not lib.is_datetime_array(arr) + assert not lib.is_datetime64_array(arr) + assert not lib.is_timedelta_or_timedelta64_array(arr) + + assert lib.is_datetime_with_singletz_array( + np.array( + [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130102", tz="US/Eastern"), + ], + dtype=object, + ) + ) + assert not lib.is_datetime_with_singletz_array( + np.array( + [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130102", tz="CET"), + ], + dtype=object, + ) + ) + + @pytest.mark.parametrize( + "func", + [ + "is_datetime_array", + "is_datetime64_array", + "is_bool_array", + "is_timedelta_or_timedelta64_array", + "is_date_array", + "is_time_array", + "is_interval_array", + ], + ) + def test_other_dtypes_for_array(self, func): + func = getattr(lib, func) + arr = np.array(["foo", "bar"]) + assert not func(arr) + assert not func(arr.reshape(2, 1)) + + arr = np.array([1, 2]) + assert not func(arr) + assert not func(arr.reshape(2, 1)) + + def test_date(self): + dates = [date(2012, 1, day) for day in range(1, 20)] + index = Index(dates) + assert index.inferred_type == "date" + + dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan] + result = lib.infer_dtype(dates, skipna=False) + assert result == "mixed" + + result = lib.infer_dtype(dates, skipna=True) + assert result == "date" + + @pytest.mark.parametrize( + "values", + [ + [date(2020, 1, 1), Timestamp("2020-01-01")], + [Timestamp("2020-01-01"), date(2020, 1, 1)], + [date(2020, 1, 1), pd.NaT], + [pd.NaT, date(2020, 1, 1)], + ], + ) + @pytest.mark.parametrize("skipna", [True, False]) + def test_infer_dtype_date_order_invariant(self, values, skipna): + # https://github.com/pandas-dev/pandas/issues/33741 + result = lib.infer_dtype(values, skipna=skipna) + assert result == "date" + + def test_is_numeric_array(self): + assert lib.is_float_array(np.array([1, 2.0])) + assert lib.is_float_array(np.array([1, 2.0, np.nan])) + assert not lib.is_float_array(np.array([1, 2])) + + assert lib.is_integer_array(np.array([1, 2])) + assert not lib.is_integer_array(np.array([1, 2.0])) + + def test_is_string_array(self): + # We should only be accepting pd.NA, np.nan, + # other floating point nans e.g. float('nan')] + # when skipna is True. + assert lib.is_string_array(np.array(["foo", "bar"])) + assert not lib.is_string_array( + np.array(["foo", "bar", pd.NA], dtype=object), skipna=False + ) + assert lib.is_string_array( + np.array(["foo", "bar", pd.NA], dtype=object), skipna=True + ) + # we allow NaN/None in the StringArray constructor, so its allowed here + assert lib.is_string_array( + np.array(["foo", "bar", None], dtype=object), skipna=True + ) + assert lib.is_string_array( + np.array(["foo", "bar", np.nan], dtype=object), skipna=True + ) + # But not e.g. datetimelike or Decimal NAs + assert not lib.is_string_array( + np.array(["foo", "bar", pd.NaT], dtype=object), skipna=True + ) + assert not lib.is_string_array( + np.array(["foo", "bar", np.datetime64("NaT")], dtype=object), skipna=True + ) + assert not lib.is_string_array( + np.array(["foo", "bar", Decimal("NaN")], dtype=object), skipna=True + ) + + assert not lib.is_string_array( + np.array(["foo", "bar", None], dtype=object), skipna=False + ) + assert not lib.is_string_array( + np.array(["foo", "bar", np.nan], dtype=object), skipna=False + ) + assert not lib.is_string_array(np.array([1, 2])) + + def test_to_object_array_tuples(self): + r = (5, 6) + values = [r] + lib.to_object_array_tuples(values) + + # make sure record array works + record = namedtuple("record", "x y") + r = record(5, 6) + values = [r] + lib.to_object_array_tuples(values) + + def test_object(self): + # GH 7431 + # cannot infer more than this as only a single element + arr = np.array([None], dtype="O") + result = lib.infer_dtype(arr, skipna=False) + assert result == "mixed" + result = lib.infer_dtype(arr, skipna=True) + assert result == "empty" + + def test_to_object_array_width(self): + # see gh-13320 + rows = [[1, 2, 3], [4, 5, 6]] + + expected = np.array(rows, dtype=object) + out = lib.to_object_array(rows) + tm.assert_numpy_array_equal(out, expected) + + expected = np.array(rows, dtype=object) + out = lib.to_object_array(rows, min_width=1) + tm.assert_numpy_array_equal(out, expected) + + expected = np.array( + [[1, 2, 3, None, None], [4, 5, 6, None, None]], dtype=object + ) + out = lib.to_object_array(rows, min_width=5) + tm.assert_numpy_array_equal(out, expected) + + def test_is_period(self): + assert lib.is_period(Period("2011-01", freq="M")) + assert not lib.is_period(PeriodIndex(["2011-01"], freq="M")) + assert not lib.is_period(Timestamp("2011-01")) + assert not lib.is_period(1) + assert not lib.is_period(np.nan) + + def test_categorical(self): + # GH 8974 + arr = Categorical(list("abc")) + result = lib.infer_dtype(arr, skipna=True) + assert result == "categorical" + + result = lib.infer_dtype(Series(arr), skipna=True) + assert result == "categorical" + + arr = Categorical(list("abc"), categories=["cegfab"], ordered=True) + result = lib.infer_dtype(arr, skipna=True) + assert result == "categorical" + + result = lib.infer_dtype(Series(arr), skipna=True) + assert result == "categorical" + + @pytest.mark.parametrize("asobject", [True, False]) + def test_interval(self, asobject): + idx = pd.IntervalIndex.from_breaks(range(5), closed="both") + if asobject: + idx = idx.astype(object) + + inferred = lib.infer_dtype(idx, skipna=False) + assert inferred == "interval" + + inferred = lib.infer_dtype(idx._data, skipna=False) + assert inferred == "interval" + + inferred = lib.infer_dtype(Series(idx, dtype=idx.dtype), skipna=False) + assert inferred == "interval" + + @pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0]) + def test_interval_mismatched_closed(self, value): + first = Interval(value, value, closed="left") + second = Interval(value, value, closed="right") + + # if closed match, we should infer "interval" + arr = np.array([first, first], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "interval" + + # if closed dont match, we should _not_ get "interval" + arr2 = np.array([first, second], dtype=object) + assert lib.infer_dtype(arr2, skipna=False) == "mixed" + + def test_interval_mismatched_subtype(self): + first = Interval(0, 1, closed="left") + second = Interval(Timestamp(0), Timestamp(1), closed="left") + third = Interval(Timedelta(0), Timedelta(1), closed="left") + + arr = np.array([first, second]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([second, third]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([first, third]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + # float vs int subdtype are compatible + flt_interval = Interval(1.5, 2.5, closed="left") + arr = np.array([first, flt_interval], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "interval" + + @pytest.mark.parametrize("klass", [pd.array, Series]) + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]]) + def test_string_dtype(self, data, skipna, klass, nullable_string_dtype): + # StringArray + val = klass(data, dtype=nullable_string_dtype) + inferred = lib.infer_dtype(val, skipna=skipna) + assert inferred == "string" + + @pytest.mark.parametrize("klass", [pd.array, Series]) + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("data", [[True, False, True], [True, False, pd.NA]]) + def test_boolean_dtype(self, data, skipna, klass): + # BooleanArray + val = klass(data, dtype="boolean") + inferred = lib.infer_dtype(val, skipna=skipna) + assert inferred == "boolean" + + +class TestNumberScalar: + def test_is_number(self): + assert is_number(True) + assert is_number(1) + assert is_number(1.1) + assert is_number(1 + 3j) + assert is_number(np.int64(1)) + assert is_number(np.float64(1.1)) + assert is_number(np.complex128(1 + 3j)) + assert is_number(np.nan) + + assert not is_number(None) + assert not is_number("x") + assert not is_number(datetime(2011, 1, 1)) + assert not is_number(np.datetime64("2011-01-01")) + assert not is_number(Timestamp("2011-01-01")) + assert not is_number(Timestamp("2011-01-01", tz="US/Eastern")) + assert not is_number(timedelta(1000)) + assert not is_number(Timedelta("1 days")) + + # questionable + assert not is_number(np.bool_(False)) + assert is_number(np.timedelta64(1, "D")) + + def test_is_bool(self): + assert is_bool(True) + assert is_bool(False) + assert is_bool(np.bool_(False)) + + assert not is_bool(1) + assert not is_bool(1.1) + assert not is_bool(1 + 3j) + assert not is_bool(np.int64(1)) + assert not is_bool(np.float64(1.1)) + assert not is_bool(np.complex128(1 + 3j)) + assert not is_bool(np.nan) + assert not is_bool(None) + assert not is_bool("x") + assert not is_bool(datetime(2011, 1, 1)) + assert not is_bool(np.datetime64("2011-01-01")) + assert not is_bool(Timestamp("2011-01-01")) + assert not is_bool(Timestamp("2011-01-01", tz="US/Eastern")) + assert not is_bool(timedelta(1000)) + assert not is_bool(np.timedelta64(1, "D")) + assert not is_bool(Timedelta("1 days")) + + def test_is_integer(self): + assert is_integer(1) + assert is_integer(np.int64(1)) + + assert not is_integer(True) + assert not is_integer(1.1) + assert not is_integer(1 + 3j) + assert not is_integer(False) + assert not is_integer(np.bool_(False)) + assert not is_integer(np.float64(1.1)) + assert not is_integer(np.complex128(1 + 3j)) + assert not is_integer(np.nan) + assert not is_integer(None) + assert not is_integer("x") + assert not is_integer(datetime(2011, 1, 1)) + assert not is_integer(np.datetime64("2011-01-01")) + assert not is_integer(Timestamp("2011-01-01")) + assert not is_integer(Timestamp("2011-01-01", tz="US/Eastern")) + assert not is_integer(timedelta(1000)) + assert not is_integer(Timedelta("1 days")) + assert not is_integer(np.timedelta64(1, "D")) + + def test_is_float(self): + assert is_float(1.1) + assert is_float(np.float64(1.1)) + assert is_float(np.nan) + + assert not is_float(True) + assert not is_float(1) + assert not is_float(1 + 3j) + assert not is_float(False) + assert not is_float(np.bool_(False)) + assert not is_float(np.int64(1)) + assert not is_float(np.complex128(1 + 3j)) + assert not is_float(None) + assert not is_float("x") + assert not is_float(datetime(2011, 1, 1)) + assert not is_float(np.datetime64("2011-01-01")) + assert not is_float(Timestamp("2011-01-01")) + assert not is_float(Timestamp("2011-01-01", tz="US/Eastern")) + assert not is_float(timedelta(1000)) + assert not is_float(np.timedelta64(1, "D")) + assert not is_float(Timedelta("1 days")) + + def test_is_datetime_dtypes(self): + ts = pd.date_range("20130101", periods=3) + tsa = pd.date_range("20130101", periods=3, tz="US/Eastern") + + msg = "is_datetime64tz_dtype is deprecated" + + assert is_datetime64_dtype("datetime64") + assert is_datetime64_dtype("datetime64[ns]") + assert is_datetime64_dtype(ts) + assert not is_datetime64_dtype(tsa) + + assert not is_datetime64_ns_dtype("datetime64") + assert is_datetime64_ns_dtype("datetime64[ns]") + assert is_datetime64_ns_dtype(ts) + assert is_datetime64_ns_dtype(tsa) + + assert is_datetime64_any_dtype("datetime64") + assert is_datetime64_any_dtype("datetime64[ns]") + assert is_datetime64_any_dtype(ts) + assert is_datetime64_any_dtype(tsa) + + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert not is_datetime64tz_dtype("datetime64") + assert not is_datetime64tz_dtype("datetime64[ns]") + assert not is_datetime64tz_dtype(ts) + assert is_datetime64tz_dtype(tsa) + + @pytest.mark.parametrize("tz", ["US/Eastern", "UTC"]) + def test_is_datetime_dtypes_with_tz(self, tz): + dtype = f"datetime64[ns, {tz}]" + assert not is_datetime64_dtype(dtype) + + msg = "is_datetime64tz_dtype is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + assert is_datetime64tz_dtype(dtype) + assert is_datetime64_ns_dtype(dtype) + assert is_datetime64_any_dtype(dtype) + + def test_is_timedelta(self): + assert is_timedelta64_dtype("timedelta64") + assert is_timedelta64_dtype("timedelta64[ns]") + assert not is_timedelta64_ns_dtype("timedelta64") + assert is_timedelta64_ns_dtype("timedelta64[ns]") + + tdi = TimedeltaIndex([1e14, 2e14], dtype="timedelta64[ns]") + assert is_timedelta64_dtype(tdi) + assert is_timedelta64_ns_dtype(tdi) + assert is_timedelta64_ns_dtype(tdi.astype("timedelta64[ns]")) + + assert not is_timedelta64_ns_dtype(Index([], dtype=np.float64)) + assert not is_timedelta64_ns_dtype(Index([], dtype=np.int64)) + + +class TestIsScalar: + def test_is_scalar_builtin_scalars(self): + assert is_scalar(None) + assert is_scalar(True) + assert is_scalar(False) + assert is_scalar(Fraction()) + assert is_scalar(0.0) + assert is_scalar(1) + assert is_scalar(complex(2)) + assert is_scalar(float("NaN")) + assert is_scalar(np.nan) + assert is_scalar("foobar") + assert is_scalar(b"foobar") + assert is_scalar(datetime(2014, 1, 1)) + assert is_scalar(date(2014, 1, 1)) + assert is_scalar(time(12, 0)) + assert is_scalar(timedelta(hours=1)) + assert is_scalar(pd.NaT) + assert is_scalar(pd.NA) + + def test_is_scalar_builtin_nonscalars(self): + assert not is_scalar({}) + assert not is_scalar([]) + assert not is_scalar([1]) + assert not is_scalar(()) + assert not is_scalar((1,)) + assert not is_scalar(slice(None)) + assert not is_scalar(Ellipsis) + + def test_is_scalar_numpy_array_scalars(self): + assert is_scalar(np.int64(1)) + assert is_scalar(np.float64(1.0)) + assert is_scalar(np.int32(1)) + assert is_scalar(np.complex64(2)) + assert is_scalar(np.object_("foobar")) + assert is_scalar(np.str_("foobar")) + assert is_scalar(np.bytes_(b"foobar")) + assert is_scalar(np.datetime64("2014-01-01")) + assert is_scalar(np.timedelta64(1, "h")) + + @pytest.mark.parametrize( + "zerodim", + [ + np.array(1), + np.array("foobar"), + np.array(np.datetime64("2014-01-01")), + np.array(np.timedelta64(1, "h")), + np.array(np.datetime64("NaT")), + ], + ) + def test_is_scalar_numpy_zerodim_arrays(self, zerodim): + assert not is_scalar(zerodim) + assert is_scalar(lib.item_from_zerodim(zerodim)) + + @pytest.mark.parametrize("arr", [np.array([]), np.array([[]])]) + def test_is_scalar_numpy_arrays(self, arr): + assert not is_scalar(arr) + assert not is_scalar(MockNumpyLikeArray(arr)) + + def test_is_scalar_pandas_scalars(self): + assert is_scalar(Timestamp("2014-01-01")) + assert is_scalar(Timedelta(hours=1)) + assert is_scalar(Period("2014-01-01")) + assert is_scalar(Interval(left=0, right=1)) + assert is_scalar(DateOffset(days=1)) + assert is_scalar(pd.offsets.Minute(3)) + + def test_is_scalar_pandas_containers(self): + assert not is_scalar(Series(dtype=object)) + assert not is_scalar(Series([1])) + assert not is_scalar(DataFrame()) + assert not is_scalar(DataFrame([[1]])) + assert not is_scalar(Index([])) + assert not is_scalar(Index([1])) + assert not is_scalar(Categorical([])) + assert not is_scalar(DatetimeIndex([])._data) + assert not is_scalar(TimedeltaIndex([])._data) + assert not is_scalar(DatetimeIndex([])._data.to_period("D")) + assert not is_scalar(pd.array([1, 2, 3])) + + def test_is_scalar_number(self): + # Number() is not recognied by PyNumber_Check, so by extension + # is not recognized by is_scalar, but instances of non-abstract + # subclasses are. + + class Numeric(Number): + def __init__(self, value) -> None: + self.value = value + + def __int__(self) -> int: + return self.value + + num = Numeric(1) + assert is_scalar(num) + + +@pytest.mark.parametrize("unit", ["ms", "us", "ns"]) +def test_datetimeindex_from_empty_datetime64_array(unit): + idx = DatetimeIndex(np.array([], dtype=f"datetime64[{unit}]")) + assert len(idx) == 0 + + +def test_nan_to_nat_conversions(): + df = DataFrame( + {"A": np.asarray(range(10), dtype="float64"), "B": Timestamp("20010101")} + ) + df.iloc[3:6, :] = np.nan + result = df.loc[4, "B"] + assert result is pd.NaT + + s = df["B"].copy() + s[8:9] = np.nan + assert s[8] is pd.NaT + + +@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning") +def test_is_scipy_sparse(spmatrix): + pytest.importorskip("scipy") + assert is_scipy_sparse(spmatrix([[0, 1]])) + assert not is_scipy_sparse(np.array([1])) + + +def test_ensure_int32(): + values = np.arange(10, dtype=np.int32) + result = ensure_int32(values) + assert result.dtype == np.int32 + + values = np.arange(10, dtype=np.int64) + result = ensure_int32(values) + assert result.dtype == np.int32 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_missing.py new file mode 100644 index 00000000..451ac2af --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/dtypes/test_missing.py @@ -0,0 +1,908 @@ +from contextlib import nullcontext +from datetime import datetime +from decimal import Decimal + +import numpy as np +import pytest + +from pandas._config import config as cf + +from pandas._libs import missing as libmissing +from pandas._libs.tslibs import iNaT +from pandas.compat.numpy import np_version_gte1p25 + +from pandas.core.dtypes.common import ( + is_float, + is_scalar, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.missing import ( + array_equivalent, + is_valid_na_for_dtype, + isna, + isnull, + na_value_for_dtype, + notna, + notnull, +) + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + NaT, + Series, + TimedeltaIndex, + date_range, +) +import pandas._testing as tm + +fix_now = pd.Timestamp("2021-01-01") +fix_utcnow = pd.Timestamp("2021-01-01", tz="UTC") + + +@pytest.mark.parametrize("notna_f", [notna, notnull]) +def test_notna_notnull(notna_f): + assert notna_f(1.0) + assert not notna_f(None) + assert not notna_f(np.nan) + + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with cf.option_context("mode.use_inf_as_na", False): + assert notna_f(np.inf) + assert notna_f(-np.inf) + + arr = np.array([1.5, np.inf, 3.5, -np.inf]) + result = notna_f(arr) + assert result.all() + + with tm.assert_produces_warning(FutureWarning, match=msg): + with cf.option_context("mode.use_inf_as_na", True): + assert not notna_f(np.inf) + assert not notna_f(-np.inf) + + arr = np.array([1.5, np.inf, 3.5, -np.inf]) + result = notna_f(arr) + assert result.sum() == 2 + + +@pytest.mark.parametrize("null_func", [notna, notnull, isna, isnull]) +@pytest.mark.parametrize( + "ser", + [ + tm.makeFloatSeries(), + tm.makeStringSeries(), + tm.makeObjectSeries(), + tm.makeTimeSeries(), + tm.makePeriodSeries(), + ], +) +def test_null_check_is_series(null_func, ser): + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with cf.option_context("mode.use_inf_as_na", False): + assert isinstance(null_func(ser), Series) + + +class TestIsNA: + def test_0d_array(self): + assert isna(np.array(np.nan)) + assert not isna(np.array(0.0)) + assert not isna(np.array(0)) + # test object dtype + assert isna(np.array(np.nan, dtype=object)) + assert not isna(np.array(0.0, dtype=object)) + assert not isna(np.array(0, dtype=object)) + + @pytest.mark.parametrize("shape", [(4, 0), (4,)]) + def test_empty_object(self, shape): + arr = np.empty(shape=shape, dtype=object) + result = isna(arr) + expected = np.ones(shape=shape, dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("isna_f", [isna, isnull]) + def test_isna_isnull(self, isna_f): + assert not isna_f(1.0) + assert isna_f(None) + assert isna_f(np.nan) + assert float("nan") + assert not isna_f(np.inf) + assert not isna_f(-np.inf) + + # type + assert not isna_f(type(Series(dtype=object))) + assert not isna_f(type(Series(dtype=np.float64))) + assert not isna_f(type(pd.DataFrame())) + + @pytest.mark.parametrize("isna_f", [isna, isnull]) + @pytest.mark.parametrize( + "df", + [ + tm.makeTimeDataFrame(), + tm.makePeriodFrame(), + tm.makeMixedDataFrame(), + ], + ) + def test_isna_isnull_frame(self, isna_f, df): + # frame + result = isna_f(df) + expected = df.apply(isna_f) + tm.assert_frame_equal(result, expected) + + def test_isna_lists(self): + result = isna([[False]]) + exp = np.array([[False]]) + tm.assert_numpy_array_equal(result, exp) + + result = isna([[1], [2]]) + exp = np.array([[False], [False]]) + tm.assert_numpy_array_equal(result, exp) + + # list of strings / unicode + result = isna(["foo", "bar"]) + exp = np.array([False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = isna(["foo", "bar"]) + exp = np.array([False, False]) + tm.assert_numpy_array_equal(result, exp) + + # GH20675 + result = isna([np.nan, "world"]) + exp = np.array([True, False]) + tm.assert_numpy_array_equal(result, exp) + + def test_isna_nat(self): + result = isna([NaT]) + exp = np.array([True]) + tm.assert_numpy_array_equal(result, exp) + + result = isna(np.array([NaT], dtype=object)) + exp = np.array([True]) + tm.assert_numpy_array_equal(result, exp) + + def test_isna_numpy_nat(self): + arr = np.array( + [ + NaT, + np.datetime64("NaT"), + np.timedelta64("NaT"), + np.datetime64("NaT", "s"), + ] + ) + result = isna(arr) + expected = np.array([True] * 4) + tm.assert_numpy_array_equal(result, expected) + + def test_isna_datetime(self): + assert not isna(datetime.now()) + assert notna(datetime.now()) + + idx = date_range("1/1/1990", periods=20) + exp = np.ones(len(idx), dtype=bool) + tm.assert_numpy_array_equal(notna(idx), exp) + + idx = np.asarray(idx) + idx[0] = iNaT + idx = DatetimeIndex(idx) + mask = isna(idx) + assert mask[0] + exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool) + tm.assert_numpy_array_equal(mask, exp) + + # GH 9129 + pidx = idx.to_period(freq="M") + mask = isna(pidx) + assert mask[0] + exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool) + tm.assert_numpy_array_equal(mask, exp) + + mask = isna(pidx[1:]) + exp = np.zeros(len(mask), dtype=bool) + tm.assert_numpy_array_equal(mask, exp) + + def test_isna_old_datetimelike(self): + # isna_old should work for dt64tz, td64, and period, not just tznaive + dti = date_range("2016-01-01", periods=3) + dta = dti._data + dta[-1] = NaT + expected = np.array([False, False, True], dtype=bool) + + objs = [dta, dta.tz_localize("US/Eastern"), dta - dta, dta.to_period("D")] + + for obj in objs: + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with cf.option_context("mode.use_inf_as_na", True): + result = isna(obj) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "value, expected", + [ + (np.complex128(np.nan), True), + (np.float64(1), False), + (np.array([1, 1 + 0j, np.nan, 3]), np.array([False, False, True, False])), + ( + np.array([1, 1 + 0j, np.nan, 3], dtype=object), + np.array([False, False, True, False]), + ), + ( + np.array([1, 1 + 0j, np.nan, 3]).astype(object), + np.array([False, False, True, False]), + ), + ], + ) + def test_complex(self, value, expected): + result = isna(value) + if is_scalar(result): + assert result is expected + else: + tm.assert_numpy_array_equal(result, expected) + + def test_datetime_other_units(self): + idx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"]) + exp = np.array([False, True, False]) + tm.assert_numpy_array_equal(isna(idx), exp) + tm.assert_numpy_array_equal(notna(idx), ~exp) + tm.assert_numpy_array_equal(isna(idx.values), exp) + tm.assert_numpy_array_equal(notna(idx.values), ~exp) + + @pytest.mark.parametrize( + "dtype", + [ + "datetime64[D]", + "datetime64[h]", + "datetime64[m]", + "datetime64[s]", + "datetime64[ms]", + "datetime64[us]", + "datetime64[ns]", + ], + ) + def test_datetime_other_units_astype(self, dtype): + idx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"]) + values = idx.values.astype(dtype) + + exp = np.array([False, True, False]) + tm.assert_numpy_array_equal(isna(values), exp) + tm.assert_numpy_array_equal(notna(values), ~exp) + + exp = Series([False, True, False]) + s = Series(values) + tm.assert_series_equal(isna(s), exp) + tm.assert_series_equal(notna(s), ~exp) + s = Series(values, dtype=object) + tm.assert_series_equal(isna(s), exp) + tm.assert_series_equal(notna(s), ~exp) + + def test_timedelta_other_units(self): + idx = TimedeltaIndex(["1 days", "NaT", "2 days"]) + exp = np.array([False, True, False]) + tm.assert_numpy_array_equal(isna(idx), exp) + tm.assert_numpy_array_equal(notna(idx), ~exp) + tm.assert_numpy_array_equal(isna(idx.values), exp) + tm.assert_numpy_array_equal(notna(idx.values), ~exp) + + @pytest.mark.parametrize( + "dtype", + [ + "timedelta64[D]", + "timedelta64[h]", + "timedelta64[m]", + "timedelta64[s]", + "timedelta64[ms]", + "timedelta64[us]", + "timedelta64[ns]", + ], + ) + def test_timedelta_other_units_dtype(self, dtype): + idx = TimedeltaIndex(["1 days", "NaT", "2 days"]) + values = idx.values.astype(dtype) + + exp = np.array([False, True, False]) + tm.assert_numpy_array_equal(isna(values), exp) + tm.assert_numpy_array_equal(notna(values), ~exp) + + exp = Series([False, True, False]) + s = Series(values) + tm.assert_series_equal(isna(s), exp) + tm.assert_series_equal(notna(s), ~exp) + s = Series(values, dtype=object) + tm.assert_series_equal(isna(s), exp) + tm.assert_series_equal(notna(s), ~exp) + + def test_period(self): + idx = pd.PeriodIndex(["2011-01", "NaT", "2012-01"], freq="M") + exp = np.array([False, True, False]) + tm.assert_numpy_array_equal(isna(idx), exp) + tm.assert_numpy_array_equal(notna(idx), ~exp) + + exp = Series([False, True, False]) + s = Series(idx) + tm.assert_series_equal(isna(s), exp) + tm.assert_series_equal(notna(s), ~exp) + s = Series(idx, dtype=object) + tm.assert_series_equal(isna(s), exp) + tm.assert_series_equal(notna(s), ~exp) + + def test_decimal(self): + # scalars GH#23530 + a = Decimal(1.0) + assert isna(a) is False + assert notna(a) is True + + b = Decimal("NaN") + assert isna(b) is True + assert notna(b) is False + + # array + arr = np.array([a, b]) + expected = np.array([False, True]) + result = isna(arr) + tm.assert_numpy_array_equal(result, expected) + + result = notna(arr) + tm.assert_numpy_array_equal(result, ~expected) + + # series + ser = Series(arr) + expected = Series(expected) + result = isna(ser) + tm.assert_series_equal(result, expected) + + result = notna(ser) + tm.assert_series_equal(result, ~expected) + + # index + idx = Index(arr) + expected = np.array([False, True]) + result = isna(idx) + tm.assert_numpy_array_equal(result, expected) + + result = notna(idx) + tm.assert_numpy_array_equal(result, ~expected) + + +@pytest.mark.parametrize("dtype_equal", [True, False]) +def test_array_equivalent(dtype_equal): + assert array_equivalent( + np.array([np.nan, np.nan]), np.array([np.nan, np.nan]), dtype_equal=dtype_equal + ) + assert array_equivalent( + np.array([np.nan, 1, np.nan]), + np.array([np.nan, 1, np.nan]), + dtype_equal=dtype_equal, + ) + assert array_equivalent( + np.array([np.nan, None], dtype="object"), + np.array([np.nan, None], dtype="object"), + dtype_equal=dtype_equal, + ) + # Check the handling of nested arrays in array_equivalent_object + assert array_equivalent( + np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"), + np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"), + dtype_equal=dtype_equal, + ) + assert array_equivalent( + np.array([np.nan, 1 + 1j], dtype="complex"), + np.array([np.nan, 1 + 1j], dtype="complex"), + dtype_equal=dtype_equal, + ) + assert not array_equivalent( + np.array([np.nan, 1 + 1j], dtype="complex"), + np.array([np.nan, 1 + 2j], dtype="complex"), + dtype_equal=dtype_equal, + ) + assert not array_equivalent( + np.array([np.nan, 1, np.nan]), + np.array([np.nan, 2, np.nan]), + dtype_equal=dtype_equal, + ) + assert not array_equivalent( + np.array(["a", "b", "c", "d"]), np.array(["e", "e"]), dtype_equal=dtype_equal + ) + assert array_equivalent( + Index([0, np.nan]), Index([0, np.nan]), dtype_equal=dtype_equal + ) + assert not array_equivalent( + Index([0, np.nan]), Index([1, np.nan]), dtype_equal=dtype_equal + ) + assert array_equivalent( + DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]), dtype_equal=dtype_equal + ) + assert not array_equivalent( + DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]), dtype_equal=dtype_equal + ) + assert array_equivalent( + TimedeltaIndex([0, np.nan]), + TimedeltaIndex([0, np.nan]), + dtype_equal=dtype_equal, + ) + assert not array_equivalent( + TimedeltaIndex([0, np.nan]), + TimedeltaIndex([1, np.nan]), + dtype_equal=dtype_equal, + ) + + dti1 = DatetimeIndex([0, np.nan], tz="US/Eastern") + dti2 = DatetimeIndex([0, np.nan], tz="CET") + dti3 = DatetimeIndex([1, np.nan], tz="US/Eastern") + + assert array_equivalent( + dti1, + dti1, + dtype_equal=dtype_equal, + ) + assert not array_equivalent( + dti1, + dti3, + dtype_equal=dtype_equal, + ) + # The rest are not dtype_equal + assert not array_equivalent(DatetimeIndex([0, np.nan]), dti1) + assert array_equivalent( + dti2, + dti1, + ) + + assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) + + +@pytest.mark.parametrize( + "val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None] +) +def test_array_equivalent_series(val): + arr = np.array([1, 2]) + msg = "elementwise comparison failed" + cm = ( + # stacklevel is chosen to make sense when called from .equals + tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False) + if isinstance(val, str) and not np_version_gte1p25 + else nullcontext() + ) + with cm: + assert not array_equivalent(Series([arr, arr]), Series([arr, val])) + + +def test_array_equivalent_array_mismatched_shape(): + # to trigger the motivating bug, the first N elements of the arrays need + # to match + first = np.array([1, 2, 3]) + second = np.array([1, 2]) + + left = Series([first, "a"], dtype=object) + right = Series([second, "a"], dtype=object) + assert not array_equivalent(left, right) + + +def test_array_equivalent_array_mismatched_dtype(): + # same shape, different dtype can still be equivalent + first = np.array([1, 2], dtype=np.float64) + second = np.array([1, 2]) + + left = Series([first, "a"], dtype=object) + right = Series([second, "a"], dtype=object) + assert array_equivalent(left, right) + + +def test_array_equivalent_different_dtype_but_equal(): + # Unclear if this is exposed anywhere in the public-facing API + assert array_equivalent(np.array([1, 2]), np.array([1.0, 2.0])) + + +@pytest.mark.parametrize( + "lvalue, rvalue", + [ + # There are 3 variants for each of lvalue and rvalue. We include all + # three for the tz-naive `now` and exclude the datetim64 variant + # for utcnow because it drops tzinfo. + (fix_now, fix_utcnow), + (fix_now.to_datetime64(), fix_utcnow), + (fix_now.to_pydatetime(), fix_utcnow), + (fix_now, fix_utcnow), + (fix_now.to_datetime64(), fix_utcnow.to_pydatetime()), + (fix_now.to_pydatetime(), fix_utcnow.to_pydatetime()), + ], +) +def test_array_equivalent_tzawareness(lvalue, rvalue): + # we shouldn't raise if comparing tzaware and tznaive datetimes + left = np.array([lvalue], dtype=object) + right = np.array([rvalue], dtype=object) + + assert not array_equivalent(left, right, strict_nan=True) + assert not array_equivalent(left, right, strict_nan=False) + + +def test_array_equivalent_compat(): + # see gh-13388 + m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) + n = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) + assert array_equivalent(m, n, strict_nan=True) + assert array_equivalent(m, n, strict_nan=False) + + m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) + n = np.array([(1, 2), (4, 3)], dtype=[("a", int), ("b", float)]) + assert not array_equivalent(m, n, strict_nan=True) + assert not array_equivalent(m, n, strict_nan=False) + + m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)]) + n = np.array([(1, 2), (3, 4)], dtype=[("b", int), ("a", float)]) + assert not array_equivalent(m, n, strict_nan=True) + assert not array_equivalent(m, n, strict_nan=False) + + +@pytest.mark.parametrize("dtype", ["O", "S", "U"]) +def test_array_equivalent_str(dtype): + assert array_equivalent( + np.array(["A", "B"], dtype=dtype), np.array(["A", "B"], dtype=dtype) + ) + assert not array_equivalent( + np.array(["A", "B"], dtype=dtype), np.array(["A", "X"], dtype=dtype) + ) + + +@pytest.mark.parametrize( + "strict_nan", [pytest.param(True, marks=pytest.mark.xfail), False] +) +def test_array_equivalent_nested(strict_nan): + # reached in groupby aggregations, make sure we use np.any when checking + # if the comparison is truthy + left = np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object) + right = np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object) + + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + left = np.empty(2, dtype=object) + left[:] = [np.array([50, 70, 90]), np.array([20, 30, 40])] + right = np.empty(2, dtype=object) + right[:] = [np.array([50, 70, 90]), np.array([20, 30, 40])] + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + left = np.array([np.array([50, 50, 50]), np.array([40, 40])], dtype=object) + right = np.array([50, 40]) + assert not array_equivalent(left, right, strict_nan=strict_nan) + + +@pytest.mark.filterwarnings("ignore:elementwise comparison failed:DeprecationWarning") +@pytest.mark.parametrize( + "strict_nan", [pytest.param(True, marks=pytest.mark.xfail), False] +) +def test_array_equivalent_nested2(strict_nan): + # more than one level of nesting + left = np.array( + [ + np.array([np.array([50, 70]), np.array([90])], dtype=object), + np.array([np.array([20, 30])], dtype=object), + ], + dtype=object, + ) + right = np.array( + [ + np.array([np.array([50, 70]), np.array([90])], dtype=object), + np.array([np.array([20, 30])], dtype=object), + ], + dtype=object, + ) + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + left = np.array([np.array([np.array([50, 50, 50])], dtype=object)], dtype=object) + right = np.array([50]) + assert not array_equivalent(left, right, strict_nan=strict_nan) + + +@pytest.mark.parametrize( + "strict_nan", [pytest.param(True, marks=pytest.mark.xfail), False] +) +def test_array_equivalent_nested_list(strict_nan): + left = np.array([[50, 70, 90], [20, 30]], dtype=object) + right = np.array([[50, 70, 90], [20, 30]], dtype=object) + + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + left = np.array([[50, 50, 50], [40, 40]], dtype=object) + right = np.array([50, 40]) + assert not array_equivalent(left, right, strict_nan=strict_nan) + + +@pytest.mark.filterwarnings("ignore:elementwise comparison failed:DeprecationWarning") +@pytest.mark.xfail(reason="failing") +@pytest.mark.parametrize("strict_nan", [True, False]) +def test_array_equivalent_nested_mixed_list(strict_nan): + # mixed arrays / lists in left and right + # https://github.com/pandas-dev/pandas/issues/50360 + left = np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object) + right = np.array([[1, 2, 3], [4, 5]], dtype=object) + + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + # multiple levels of nesting + left = np.array( + [ + np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object), + np.array([np.array([6]), np.array([7, 8]), np.array([9])], dtype=object), + ], + dtype=object, + ) + right = np.array([[[1, 2, 3], [4, 5]], [[6], [7, 8], [9]]], dtype=object) + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + # same-length lists + subarr = np.empty(2, dtype=object) + subarr[:] = [ + np.array([None, "b"], dtype=object), + np.array(["c", "d"], dtype=object), + ] + left = np.array([subarr, None], dtype=object) + right = np.array([[[None, "b"], ["c", "d"]], None], dtype=object) + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + +@pytest.mark.xfail(reason="failing") +@pytest.mark.parametrize("strict_nan", [True, False]) +def test_array_equivalent_nested_dicts(strict_nan): + left = np.array([{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object) + right = np.array( + [{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object + ) + assert array_equivalent(left, right, strict_nan=strict_nan) + assert not array_equivalent(left, right[::-1], strict_nan=strict_nan) + + right2 = np.array([{"f1": 1, "f2": ["a", "b"]}], dtype=object) + assert array_equivalent(left, right2, strict_nan=strict_nan) + assert not array_equivalent(left, right2[::-1], strict_nan=strict_nan) + + +def test_array_equivalent_index_with_tuples(): + # GH#48446 + idx1 = Index(np.array([(pd.NA, 4), (1, 1)], dtype="object")) + idx2 = Index(np.array([(1, 1), (pd.NA, 4)], dtype="object")) + assert not array_equivalent(idx1, idx2) + assert not idx1.equals(idx2) + assert not array_equivalent(idx2, idx1) + assert not idx2.equals(idx1) + + idx1 = Index(np.array([(4, pd.NA), (1, 1)], dtype="object")) + idx2 = Index(np.array([(1, 1), (4, pd.NA)], dtype="object")) + assert not array_equivalent(idx1, idx2) + assert not idx1.equals(idx2) + assert not array_equivalent(idx2, idx1) + assert not idx2.equals(idx1) + + +@pytest.mark.parametrize( + "dtype, na_value", + [ + # Datetime-like + (np.dtype("M8[ns]"), np.datetime64("NaT", "ns")), + (np.dtype("m8[ns]"), np.timedelta64("NaT", "ns")), + (DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]"), NaT), + (PeriodDtype("M"), NaT), + # Integer + ("u1", 0), + ("u2", 0), + ("u4", 0), + ("u8", 0), + ("i1", 0), + ("i2", 0), + ("i4", 0), + ("i8", 0), + # Bool + ("bool", False), + # Float + ("f2", np.nan), + ("f4", np.nan), + ("f8", np.nan), + # Object + ("O", np.nan), + # Interval + (IntervalDtype(), np.nan), + ], +) +def test_na_value_for_dtype(dtype, na_value): + result = na_value_for_dtype(pandas_dtype(dtype)) + # identify check doesn't work for datetime64/timedelta64("NaT") bc they + # are not singletons + assert result is na_value or ( + isna(result) and isna(na_value) and type(result) is type(na_value) + ) + + +class TestNAObj: + def _check_behavior(self, arr, expected): + result = libmissing.isnaobj(arr) + tm.assert_numpy_array_equal(result, expected) + result = libmissing.isnaobj(arr, inf_as_na=True) + tm.assert_numpy_array_equal(result, expected) + + arr = np.atleast_2d(arr) + expected = np.atleast_2d(expected) + + result = libmissing.isnaobj(arr) + tm.assert_numpy_array_equal(result, expected) + result = libmissing.isnaobj(arr, inf_as_na=True) + tm.assert_numpy_array_equal(result, expected) + + # Test fortran order + arr = arr.copy(order="F") + result = libmissing.isnaobj(arr) + tm.assert_numpy_array_equal(result, expected) + result = libmissing.isnaobj(arr, inf_as_na=True) + tm.assert_numpy_array_equal(result, expected) + + def test_basic(self): + arr = np.array([1, None, "foo", -5.1, NaT, np.nan]) + expected = np.array([False, True, False, False, True, True]) + + self._check_behavior(arr, expected) + + def test_non_obj_dtype(self): + arr = np.array([1, 3, np.nan, 5], dtype=float) + expected = np.array([False, False, True, False]) + + self._check_behavior(arr, expected) + + def test_empty_arr(self): + arr = np.array([]) + expected = np.array([], dtype=bool) + + self._check_behavior(arr, expected) + + def test_empty_str_inp(self): + arr = np.array([""]) # empty but not na + expected = np.array([False]) + + self._check_behavior(arr, expected) + + def test_empty_like(self): + # see gh-13717: no segfaults! + arr = np.empty_like([None]) + expected = np.array([True]) + + self._check_behavior(arr, expected) + + +m8_units = ["as", "ps", "ns", "us", "ms", "s", "m", "h", "D", "W", "M", "Y"] + +na_vals = ( + [ + None, + NaT, + float("NaN"), + complex("NaN"), + np.nan, + np.float64("NaN"), + np.float32("NaN"), + np.complex64(np.nan), + np.complex128(np.nan), + np.datetime64("NaT"), + np.timedelta64("NaT"), + ] + + [np.datetime64("NaT", unit) for unit in m8_units] + + [np.timedelta64("NaT", unit) for unit in m8_units] +) + +inf_vals = [ + float("inf"), + float("-inf"), + complex("inf"), + complex("-inf"), + np.inf, + -np.inf, +] + +int_na_vals = [ + # Values that match iNaT, which we treat as null in specific cases + np.int64(NaT._value), + int(NaT._value), +] + +sometimes_na_vals = [Decimal("NaN")] + +never_na_vals = [ + # float/complex values that when viewed as int64 match iNaT + -0.0, + np.float64("-0.0"), + -0j, + np.complex64(-0j), +] + + +class TestLibMissing: + @pytest.mark.parametrize("func", [libmissing.checknull, isna]) + @pytest.mark.parametrize( + "value", na_vals + sometimes_na_vals # type: ignore[operator] + ) + def test_checknull_na_vals(self, func, value): + assert func(value) + + @pytest.mark.parametrize("func", [libmissing.checknull, isna]) + @pytest.mark.parametrize("value", inf_vals) + def test_checknull_inf_vals(self, func, value): + assert not func(value) + + @pytest.mark.parametrize("func", [libmissing.checknull, isna]) + @pytest.mark.parametrize("value", int_na_vals) + def test_checknull_intna_vals(self, func, value): + assert not func(value) + + @pytest.mark.parametrize("func", [libmissing.checknull, isna]) + @pytest.mark.parametrize("value", never_na_vals) + def test_checknull_never_na_vals(self, func, value): + assert not func(value) + + @pytest.mark.parametrize( + "value", na_vals + sometimes_na_vals # type: ignore[operator] + ) + def test_checknull_old_na_vals(self, value): + assert libmissing.checknull(value, inf_as_na=True) + + @pytest.mark.parametrize("value", inf_vals) + def test_checknull_old_inf_vals(self, value): + assert libmissing.checknull(value, inf_as_na=True) + + @pytest.mark.parametrize("value", int_na_vals) + def test_checknull_old_intna_vals(self, value): + assert not libmissing.checknull(value, inf_as_na=True) + + @pytest.mark.parametrize("value", int_na_vals) + def test_checknull_old_never_na_vals(self, value): + assert not libmissing.checknull(value, inf_as_na=True) + + def test_is_matching_na(self, nulls_fixture, nulls_fixture2): + left = nulls_fixture + right = nulls_fixture2 + + assert libmissing.is_matching_na(left, left) + + if left is right: + assert libmissing.is_matching_na(left, right) + elif is_float(left) and is_float(right): + # np.nan vs float("NaN") we consider as matching + assert libmissing.is_matching_na(left, right) + elif type(left) is type(right): + # e.g. both Decimal("NaN") + assert libmissing.is_matching_na(left, right) + else: + assert not libmissing.is_matching_na(left, right) + + def test_is_matching_na_nan_matches_none(self): + assert not libmissing.is_matching_na(None, np.nan) + assert not libmissing.is_matching_na(np.nan, None) + + assert libmissing.is_matching_na(None, np.nan, nan_matches_none=True) + assert libmissing.is_matching_na(np.nan, None, nan_matches_none=True) + + +class TestIsValidNAForDtype: + def test_is_valid_na_for_dtype_interval(self): + dtype = IntervalDtype("int64", "left") + assert not is_valid_na_for_dtype(NaT, dtype) + + dtype = IntervalDtype("datetime64[ns]", "both") + assert not is_valid_na_for_dtype(NaT, dtype) + + def test_is_valid_na_for_dtype_categorical(self): + dtype = CategoricalDtype(categories=[0, 1, 2]) + assert is_valid_na_for_dtype(np.nan, dtype) + + assert not is_valid_na_for_dtype(NaT, dtype) + assert not is_valid_na_for_dtype(np.datetime64("NaT", "ns"), dtype) + assert not is_valid_na_for_dtype(np.timedelta64("NaT", "ns"), dtype) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/__init__.py new file mode 100644 index 00000000..49da6af0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/__init__.py @@ -0,0 +1,6 @@ +from pandas.tests.extension.array_with_attr.array import ( + FloatAttrArray, + FloatAttrDtype, +) + +__all__ = ["FloatAttrArray", "FloatAttrDtype"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/array.py new file mode 100644 index 00000000..4e40b6d0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/array.py @@ -0,0 +1,86 @@ +""" +Test extension array that has custom attribute information (not stored on the dtype). + +""" +from __future__ import annotations + +import numbers +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype + +import pandas as pd +from pandas.core.arrays import ExtensionArray + +if TYPE_CHECKING: + from pandas._typing import type_t + + +class FloatAttrDtype(ExtensionDtype): + type = float + name = "float_attr" + na_value = np.nan + + @classmethod + def construct_array_type(cls) -> type_t[FloatAttrArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return FloatAttrArray + + +class FloatAttrArray(ExtensionArray): + dtype = FloatAttrDtype() + __array_priority__ = 1000 + + def __init__(self, values, attr=None) -> None: + if not isinstance(values, np.ndarray): + raise TypeError("Need to pass a numpy array of float64 dtype as values") + if not values.dtype == "float64": + raise TypeError("Need to pass a numpy array of float64 dtype as values") + self.data = values + self.attr = attr + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + data = np.array(scalars, dtype="float64", copy=copy) + return cls(data) + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self.data[item] + else: + # slice, list-like, mask + item = pd.api.indexers.check_array_indexer(self, item) + return type(self)(self.data[item], self.attr) + + def __len__(self) -> int: + return len(self.data) + + def isna(self): + return np.isnan(self.data) + + def take(self, indexer, allow_fill=False, fill_value=None): + from pandas.api.extensions import take + + data = self.data + if allow_fill and fill_value is None: + fill_value = self.dtype.na_value + + result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill) + return type(self)(result, self.attr) + + def copy(self): + return type(self)(self.data.copy(), self.attr) + + @classmethod + def _concat_same_type(cls, to_concat): + data = np.concatenate([x.data for x in to_concat]) + attr = to_concat[0].attr if len(to_concat) else None + return cls(data, attr) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py new file mode 100644 index 00000000..3735fe40 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py @@ -0,0 +1,33 @@ +import numpy as np + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension.array_with_attr import FloatAttrArray + + +def test_concat_with_all_na(): + # https://github.com/pandas-dev/pandas/pull/47762 + # ensure that attribute of the column array is preserved (when it gets + # preserved in reindexing the array) during merge/concat + arr = FloatAttrArray(np.array([np.nan, np.nan], dtype="float64"), attr="test") + + df1 = pd.DataFrame({"col": arr, "key": [0, 1]}) + df2 = pd.DataFrame({"key": [0, 1], "col2": [1, 2]}) + result = pd.merge(df1, df2, on="key") + expected = pd.DataFrame({"col": arr, "key": [0, 1], "col2": [1, 2]}) + tm.assert_frame_equal(result, expected) + assert result["col"].array.attr == "test" + + df1 = pd.DataFrame({"col": arr, "key": [0, 1]}) + df2 = pd.DataFrame({"key": [0, 2], "col2": [1, 2]}) + result = pd.merge(df1, df2, on="key") + expected = pd.DataFrame({"col": arr.take([0]), "key": [0], "col2": [1]}) + tm.assert_frame_equal(result, expected) + assert result["col"].array.attr == "test" + + result = pd.concat([df1.set_index("key"), df2.set_index("key")], axis=1) + expected = pd.DataFrame( + {"col": arr.take([0, 1, -1]), "col2": [1, np.nan, 2], "key": [0, 1, 2]} + ).set_index("key") + tm.assert_frame_equal(result, expected) + assert result["col"].array.attr == "test" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/__init__.py new file mode 100644 index 00000000..7cd55b72 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/__init__.py @@ -0,0 +1,94 @@ +""" +Base test suite for extension arrays. + +These tests are intended for third-party libraries to subclass to validate +that their extension arrays and dtypes satisfy the interface. Moving or +renaming the tests should not be done lightly. + +Libraries are expected to implement a few pytest fixtures to provide data +for the tests. The fixtures may be located in either + +* The same module as your test class. +* A ``conftest.py`` in the same directory as your test class. + +The full list of fixtures may be found in the ``conftest.py`` next to this +file. + +.. code-block:: python + + import pytest + from pandas.tests.extension.base import BaseDtypeTests + + + @pytest.fixture + def dtype(): + return MyDtype() + + + class TestMyDtype(BaseDtypeTests): + pass + + +Your class ``TestDtype`` will inherit all the tests defined on +``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype`` +wherever the test requires it. You're free to implement additional tests. + +""" +from pandas.tests.extension.base.accumulate import BaseAccumulateTests +from pandas.tests.extension.base.casting import BaseCastingTests +from pandas.tests.extension.base.constructors import BaseConstructorsTests +from pandas.tests.extension.base.dim2 import ( # noqa: F401 + Dim2CompatTests, + NDArrayBacked2DTests, +) +from pandas.tests.extension.base.dtype import BaseDtypeTests +from pandas.tests.extension.base.getitem import BaseGetitemTests +from pandas.tests.extension.base.groupby import BaseGroupbyTests +from pandas.tests.extension.base.index import BaseIndexTests +from pandas.tests.extension.base.interface import BaseInterfaceTests +from pandas.tests.extension.base.io import BaseParsingTests +from pandas.tests.extension.base.methods import BaseMethodsTests +from pandas.tests.extension.base.missing import BaseMissingTests +from pandas.tests.extension.base.ops import ( # noqa: F401 + BaseArithmeticOpsTests, + BaseComparisonOpsTests, + BaseOpsUtil, + BaseUnaryOpsTests, +) +from pandas.tests.extension.base.printing import BasePrintingTests +from pandas.tests.extension.base.reduce import ( # noqa: F401 + BaseBooleanReduceTests, + BaseNoReduceTests, + BaseNumericReduceTests, + BaseReduceTests, +) +from pandas.tests.extension.base.reshaping import BaseReshapingTests +from pandas.tests.extension.base.setitem import BaseSetitemTests + + +# One test class that you can inherit as an alternative to inheriting all the +# test classes above. +# Note 1) this excludes Dim2CompatTests and NDArrayBacked2DTests. +# Note 2) this uses BaseReduceTests and and _not_ BaseBooleanReduceTests, +# BaseNoReduceTests, or BaseNumericReduceTests +class ExtensionTests( + BaseAccumulateTests, + BaseCastingTests, + BaseConstructorsTests, + BaseDtypeTests, + BaseGetitemTests, + BaseGroupbyTests, + BaseIndexTests, + BaseInterfaceTests, + BaseParsingTests, + BaseMethodsTests, + BaseMissingTests, + BaseArithmeticOpsTests, + BaseComparisonOpsTests, + BaseUnaryOpsTests, + BasePrintingTests, + BaseReduceTests, + BaseReshapingTests, + BaseSetitemTests, +): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/accumulate.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/accumulate.py new file mode 100644 index 00000000..4648f661 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/accumulate.py @@ -0,0 +1,41 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseAccumulateTests: + """ + Accumulation specific tests. Generally these only + make sense for numeric/boolean operations. + """ + + def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool: + # Do we expect this accumulation to be supported for this dtype? + # We default to assuming "no"; subclass authors should override here. + return False + + def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool): + alt = ser.astype("float64") + result = getattr(ser, op_name)(skipna=skipna) + + if result.dtype == pd.Float32Dtype() and op_name == "cumprod" and skipna: + # TODO: avoid special-casing here + pytest.skip( + f"Float32 precision lead to large differences with op {op_name} " + f"and skipna={skipna}" + ) + + expected = getattr(alt, op_name)(skipna=skipna) + tm.assert_series_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series(self, data, all_numeric_accumulations, skipna): + op_name = all_numeric_accumulations + ser = pd.Series(data) + + if self._supports_accumulation(ser, op_name): + self.check_accumulate(ser, op_name, skipna) + else: + with pytest.raises(NotImplementedError): + getattr(ser, op_name)(skipna=skipna) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/base.py new file mode 100644 index 00000000..747ebee7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/base.py @@ -0,0 +1,2 @@ +class BaseExtensionTests: + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/casting.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/casting.py new file mode 100644 index 00000000..2bfe801c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/casting.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm +from pandas.core.internals.blocks import NumpyBlock + + +class BaseCastingTests: + """Casting to and from ExtensionDtypes""" + + def test_astype_object_series(self, all_data): + ser = pd.Series(all_data, name="A") + result = ser.astype(object) + assert result.dtype == np.dtype(object) + if hasattr(result._mgr, "blocks"): + blk = result._mgr.blocks[0] + assert isinstance(blk, NumpyBlock) + assert blk.is_object + assert isinstance(result._mgr.array, np.ndarray) + assert result._mgr.array.dtype == np.dtype(object) + + def test_astype_object_frame(self, all_data): + df = pd.DataFrame({"A": all_data}) + + result = df.astype(object) + if hasattr(result._mgr, "blocks"): + blk = result._mgr.blocks[0] + assert isinstance(blk, NumpyBlock), type(blk) + assert blk.is_object + assert isinstance(result._mgr.arrays[0], np.ndarray) + assert result._mgr.arrays[0].dtype == np.dtype(object) + + # check that we can compare the dtypes + comp = result.dtypes == df.dtypes + assert not comp.any() + + def test_tolist(self, data): + result = pd.Series(data).tolist() + expected = list(data) + assert result == expected + + def test_astype_str(self, data): + result = pd.Series(data[:5]).astype(str) + expected = pd.Series([str(x) for x in data[:5]], dtype=str) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "nullable_string_dtype", + [ + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], + ) + def test_astype_string(self, data, nullable_string_dtype): + # GH-33465, GH#45326 as of 2.0 we decode bytes instead of calling str(obj) + result = pd.Series(data[:5]).astype(nullable_string_dtype) + expected = pd.Series( + [str(x) if not isinstance(x, bytes) else x.decode() for x in data[:5]], + dtype=nullable_string_dtype, + ) + tm.assert_series_equal(result, expected) + + def test_to_numpy(self, data): + expected = np.asarray(data) + + result = data.to_numpy() + tm.assert_equal(result, expected) + + result = pd.Series(data).to_numpy() + tm.assert_equal(result, expected) + + def test_astype_empty_dataframe(self, dtype): + # https://github.com/pandas-dev/pandas/issues/33113 + df = pd.DataFrame() + result = df.astype(dtype) + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize("copy", [True, False]) + def test_astype_own_type(self, data, copy): + # ensure that astype returns the original object for equal dtype and copy=False + # https://github.com/pandas-dev/pandas/issues/28488 + result = data.astype(data.dtype, copy=copy) + assert (result is data) is (not copy) + tm.assert_extension_array_equal(result, data) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/constructors.py new file mode 100644 index 00000000..8828f33b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/constructors.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.extensions import ExtensionArray +from pandas.core.internals.blocks import EABackedBlock + + +class BaseConstructorsTests: + def test_from_sequence_from_cls(self, data): + result = type(data)._from_sequence(data, dtype=data.dtype) + tm.assert_extension_array_equal(result, data) + + data = data[:0] + result = type(data)._from_sequence(data, dtype=data.dtype) + tm.assert_extension_array_equal(result, data) + + def test_array_from_scalars(self, data): + scalars = [data[0], data[1], data[2]] + result = data._from_sequence(scalars) + assert isinstance(result, type(data)) + + def test_series_constructor(self, data): + result = pd.Series(data, copy=False) + assert result.dtype == data.dtype + assert len(result) == len(data) + if hasattr(result._mgr, "blocks"): + assert isinstance(result._mgr.blocks[0], EABackedBlock) + assert result._mgr.array is data + + # Series[EA] is unboxed / boxed correctly + result2 = pd.Series(result) + assert result2.dtype == data.dtype + if hasattr(result._mgr, "blocks"): + assert isinstance(result2._mgr.blocks[0], EABackedBlock) + + def test_series_constructor_no_data_with_index(self, dtype, na_value): + result = pd.Series(index=[1, 2, 3], dtype=dtype) + expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype) + tm.assert_series_equal(result, expected) + + # GH 33559 - empty index + result = pd.Series(index=[], dtype=dtype) + expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype) + tm.assert_series_equal(result, expected) + + def test_series_constructor_scalar_na_with_index(self, dtype, na_value): + result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype) + expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype) + tm.assert_series_equal(result, expected) + + def test_series_constructor_scalar_with_index(self, data, dtype): + scalar = data[0] + result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype) + expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = pd.Series(scalar, index=["foo"], dtype=dtype) + expected = pd.Series([scalar], index=["foo"], dtype=dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("from_series", [True, False]) + def test_dataframe_constructor_from_dict(self, data, from_series): + if from_series: + data = pd.Series(data) + result = pd.DataFrame({"A": data}) + assert result.dtypes["A"] == data.dtype + assert result.shape == (len(data), 1) + if hasattr(result._mgr, "blocks"): + assert isinstance(result._mgr.blocks[0], EABackedBlock) + assert isinstance(result._mgr.arrays[0], ExtensionArray) + + def test_dataframe_from_series(self, data): + result = pd.DataFrame(pd.Series(data)) + assert result.dtypes[0] == data.dtype + assert result.shape == (len(data), 1) + if hasattr(result._mgr, "blocks"): + assert isinstance(result._mgr.blocks[0], EABackedBlock) + assert isinstance(result._mgr.arrays[0], ExtensionArray) + + def test_series_given_mismatched_index_raises(self, data): + msg = r"Length of values \(3\) does not match length of index \(5\)" + with pytest.raises(ValueError, match=msg): + pd.Series(data[:3], index=[0, 1, 2, 3, 4]) + + def test_from_dtype(self, data): + # construct from our dtype & string dtype + dtype = data.dtype + + expected = pd.Series(data) + result = pd.Series(list(data), dtype=dtype) + tm.assert_series_equal(result, expected) + + result = pd.Series(list(data), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # gh-30280 + + expected = pd.DataFrame(data).astype(dtype) + result = pd.DataFrame(list(data), dtype=dtype) + tm.assert_frame_equal(result, expected) + + result = pd.DataFrame(list(data), dtype=str(dtype)) + tm.assert_frame_equal(result, expected) + + def test_pandas_array(self, data): + # pd.array(extension_array) should be idempotent... + result = pd.array(data) + tm.assert_extension_array_equal(result, data) + + def test_pandas_array_dtype(self, data): + # ... but specifying dtype will override idempotency + result = pd.array(data, dtype=np.dtype(object)) + expected = pd.arrays.NumpyExtensionArray(np.asarray(data, dtype=object)) + tm.assert_equal(result, expected) + + def test_construct_empty_dataframe(self, dtype): + # GH 33623 + result = pd.DataFrame(columns=["a"], dtype=dtype) + expected = pd.DataFrame( + {"a": pd.array([], dtype=dtype)}, index=pd.RangeIndex(0) + ) + tm.assert_frame_equal(result, expected) + + def test_empty(self, dtype): + cls = dtype.construct_array_type() + result = cls._empty((4,), dtype=dtype) + assert isinstance(result, cls) + assert result.dtype == dtype + assert result.shape == (4,) + + # GH#19600 method on ExtensionDtype + result2 = dtype.empty((4,)) + assert isinstance(result2, cls) + assert result2.dtype == dtype + assert result2.shape == (4,) + + result2 = dtype.empty(4) + assert isinstance(result2, cls) + assert result2.dtype == dtype + assert result2.shape == (4,) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dim2.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dim2.py new file mode 100644 index 00000000..bff4fbd7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dim2.py @@ -0,0 +1,334 @@ +""" +Tests for 2D compatibility. +""" +import numpy as np +import pytest + +from pandas._libs.missing import is_matching_na + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer_dtype, +) + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + +class Dim2CompatTests: + # Note: these are ONLY for ExtensionArray subclasses that support 2D arrays. + # i.e. not for pyarrow-backed EAs. + + def test_transpose(self, data): + arr2d = data.repeat(2).reshape(-1, 2) + shape = arr2d.shape + assert shape[0] != shape[-1] # otherwise the rest of the test is useless + + assert arr2d.T.shape == shape[::-1] + + def test_frame_from_2d_array(self, data): + arr2d = data.repeat(2).reshape(-1, 2) + + df = pd.DataFrame(arr2d) + expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]}) + tm.assert_frame_equal(df, expected) + + def test_swapaxes(self, data): + arr2d = data.repeat(2).reshape(-1, 2) + + result = arr2d.swapaxes(0, 1) + expected = arr2d.T + tm.assert_extension_array_equal(result, expected) + + def test_delete_2d(self, data): + arr2d = data.repeat(3).reshape(-1, 3) + + # axis = 0 + result = arr2d.delete(1, axis=0) + expected = data.delete(1).repeat(3).reshape(-1, 3) + tm.assert_extension_array_equal(result, expected) + + # axis = 1 + result = arr2d.delete(1, axis=1) + expected = data.repeat(2).reshape(-1, 2) + tm.assert_extension_array_equal(result, expected) + + def test_take_2d(self, data): + arr2d = data.reshape(-1, 1) + + result = arr2d.take([0, 0, -1], axis=0) + + expected = data.take([0, 0, -1]).reshape(-1, 1) + tm.assert_extension_array_equal(result, expected) + + def test_repr_2d(self, data): + # this could fail in a corner case where an element contained the name + res = repr(data.reshape(1, -1)) + assert res.count(f"<{type(data).__name__}") == 1 + + res = repr(data.reshape(-1, 1)) + assert res.count(f"<{type(data).__name__}") == 1 + + def test_reshape(self, data): + arr2d = data.reshape(-1, 1) + assert arr2d.shape == (data.size, 1) + assert len(arr2d) == len(data) + + arr2d = data.reshape((-1, 1)) + assert arr2d.shape == (data.size, 1) + assert len(arr2d) == len(data) + + with pytest.raises(ValueError): + data.reshape((data.size, 2)) + with pytest.raises(ValueError): + data.reshape(data.size, 2) + + def test_getitem_2d(self, data): + arr2d = data.reshape(1, -1) + + result = arr2d[0] + tm.assert_extension_array_equal(result, data) + + with pytest.raises(IndexError): + arr2d[1] + + with pytest.raises(IndexError): + arr2d[-2] + + result = arr2d[:] + tm.assert_extension_array_equal(result, arr2d) + + result = arr2d[:, :] + tm.assert_extension_array_equal(result, arr2d) + + result = arr2d[:, 0] + expected = data[[0]] + tm.assert_extension_array_equal(result, expected) + + # dimension-expanding getitem on 1D + result = data[:, np.newaxis] + tm.assert_extension_array_equal(result, arr2d.T) + + def test_iter_2d(self, data): + arr2d = data.reshape(1, -1) + + objs = list(iter(arr2d)) + assert len(objs) == arr2d.shape[0] + + for obj in objs: + assert isinstance(obj, type(data)) + assert obj.dtype == data.dtype + assert obj.ndim == 1 + assert len(obj) == arr2d.shape[1] + + def test_tolist_2d(self, data): + arr2d = data.reshape(1, -1) + + result = arr2d.tolist() + expected = [data.tolist()] + + assert isinstance(result, list) + assert all(isinstance(x, list) for x in result) + + assert result == expected + + def test_concat_2d(self, data): + left = type(data)._concat_same_type([data, data]).reshape(-1, 2) + right = left.copy() + + # axis=0 + result = left._concat_same_type([left, right], axis=0) + expected = data._concat_same_type([data] * 4).reshape(-1, 2) + tm.assert_extension_array_equal(result, expected) + + # axis=1 + result = left._concat_same_type([left, right], axis=1) + assert result.shape == (len(data), 4) + tm.assert_extension_array_equal(result[:, :2], left) + tm.assert_extension_array_equal(result[:, 2:], right) + + # axis > 1 -> invalid + msg = "axis 2 is out of bounds for array of dimension 2" + with pytest.raises(ValueError, match=msg): + left._concat_same_type([left, right], axis=2) + + @pytest.mark.parametrize("method", ["backfill", "pad"]) + def test_fillna_2d_method(self, data_missing, method): + # pad_or_backfill is always along axis=0 + arr = data_missing.repeat(2).reshape(2, 2) + assert arr[0].isna().all() + assert not arr[1].isna().any() + + result = arr._pad_or_backfill(method=method, limit=None) + + expected = data_missing._pad_or_backfill(method=method).repeat(2).reshape(2, 2) + tm.assert_extension_array_equal(result, expected) + + # Reverse so that backfill is not a no-op. + arr2 = arr[::-1] + assert not arr2[0].isna().any() + assert arr2[1].isna().all() + + result2 = arr2._pad_or_backfill(method=method, limit=None) + + expected2 = ( + data_missing[::-1]._pad_or_backfill(method=method).repeat(2).reshape(2, 2) + ) + tm.assert_extension_array_equal(result2, expected2) + + @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"]) + def test_reductions_2d_axis_none(self, data, method): + arr2d = data.reshape(1, -1) + + err_expected = None + err_result = None + try: + expected = getattr(data, method)() + except Exception as err: + # if the 1D reduction is invalid, the 2D reduction should be as well + err_expected = err + try: + result = getattr(arr2d, method)(axis=None) + except Exception as err2: + err_result = err2 + + else: + result = getattr(arr2d, method)(axis=None) + + if err_result is not None or err_expected is not None: + assert type(err_result) == type(err_expected) + return + + assert is_matching_na(result, expected) or result == expected + + @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"]) + @pytest.mark.parametrize("min_count", [0, 1]) + def test_reductions_2d_axis0(self, data, method, min_count): + if min_count == 1 and method not in ["sum", "prod"]: + pytest.skip(f"min_count not relevant for {method}") + + arr2d = data.reshape(1, -1) + + kwargs = {} + if method in ["std", "var"]: + # pass ddof=0 so we get all-zero std instead of all-NA std + kwargs["ddof"] = 0 + elif method in ["prod", "sum"]: + kwargs["min_count"] = min_count + + try: + result = getattr(arr2d, method)(axis=0, **kwargs) + except Exception as err: + try: + getattr(data, method)() + except Exception as err2: + assert type(err) == type(err2) + return + else: + raise AssertionError("Both reductions should raise or neither") + + def get_reduction_result_dtype(dtype): + # windows and 32bit builds will in some cases have int32/uint32 + # where other builds will have int64/uint64. + if dtype.itemsize == 8: + return dtype + elif dtype.kind in "ib": + return NUMPY_INT_TO_DTYPE[np.dtype(int)] + else: + # i.e. dtype.kind == "u" + return NUMPY_INT_TO_DTYPE[np.dtype("uint")] + + if method in ["sum", "prod"]: + # std and var are not dtype-preserving + expected = data + if data.dtype.kind in "iub": + dtype = get_reduction_result_dtype(data.dtype) + expected = data.astype(dtype) + assert dtype == expected.dtype + + if min_count == 0: + fill_value = 1 if method == "prod" else 0 + expected = expected.fillna(fill_value) + + tm.assert_extension_array_equal(result, expected) + elif method == "median": + # std and var are not dtype-preserving + expected = data + tm.assert_extension_array_equal(result, expected) + elif method in ["mean", "std", "var"]: + if is_integer_dtype(data) or is_bool_dtype(data): + data = data.astype("Float64") + if method == "mean": + tm.assert_extension_array_equal(result, data) + else: + tm.assert_extension_array_equal(result, data - data) + + @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"]) + def test_reductions_2d_axis1(self, data, method): + arr2d = data.reshape(1, -1) + + try: + result = getattr(arr2d, method)(axis=1) + except Exception as err: + try: + getattr(data, method)() + except Exception as err2: + assert type(err) == type(err2) + return + else: + raise AssertionError("Both reductions should raise or neither") + + # not necessarily type/dtype-preserving, so weaker assertions + assert result.shape == (1,) + expected_scalar = getattr(data, method)() + res = result[0] + assert is_matching_na(res, expected_scalar) or res == expected_scalar + + +class NDArrayBacked2DTests(Dim2CompatTests): + # More specific tests for NDArrayBackedExtensionArray subclasses + + def test_copy_order(self, data): + # We should be matching numpy semantics for the "order" keyword in 'copy' + arr2d = data.repeat(2).reshape(-1, 2) + assert arr2d._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d.copy() + assert res._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d[::2, ::2].copy() + assert res._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d.copy("F") + assert not res._ndarray.flags["C_CONTIGUOUS"] + assert res._ndarray.flags["F_CONTIGUOUS"] + + res = arr2d.copy("K") + assert res._ndarray.flags["C_CONTIGUOUS"] + + res = arr2d.T.copy("K") + assert not res._ndarray.flags["C_CONTIGUOUS"] + assert res._ndarray.flags["F_CONTIGUOUS"] + + # order not accepted by numpy + msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)" + with pytest.raises(ValueError, match=msg): + arr2d.copy("Q") + + # neither contiguity + arr_nc = arr2d[::2] + assert not arr_nc._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc._ndarray.flags["F_CONTIGUOUS"] + + assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"] + + assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"] + + assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"] + assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"] + + assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"] + assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dtype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dtype.py new file mode 100644 index 00000000..5ba65cea --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/dtype.py @@ -0,0 +1,118 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import ( + infer_dtype, + is_object_dtype, + is_string_dtype, +) + + +class BaseDtypeTests: + """Base class for ExtensionDtype classes""" + + def test_name(self, dtype): + assert isinstance(dtype.name, str) + + def test_kind(self, dtype): + valid = set("biufcmMOSUV") + assert dtype.kind in valid + + def test_is_dtype_from_name(self, dtype): + result = type(dtype).is_dtype(dtype.name) + assert result is True + + def test_is_dtype_unboxes_dtype(self, data, dtype): + assert dtype.is_dtype(data) is True + + def test_is_dtype_from_self(self, dtype): + result = type(dtype).is_dtype(dtype) + assert result is True + + def test_is_dtype_other_input(self, dtype): + assert dtype.is_dtype([1, 2, 3]) is False + + def test_is_not_string_type(self, dtype): + assert not is_string_dtype(dtype) + + def test_is_not_object_type(self, dtype): + assert not is_object_dtype(dtype) + + def test_eq_with_str(self, dtype): + assert dtype == dtype.name + assert dtype != dtype.name + "-suffix" + + def test_eq_with_numpy_object(self, dtype): + assert dtype != np.dtype("object") + + def test_eq_with_self(self, dtype): + assert dtype == dtype + assert dtype != object() + + def test_array_type(self, data, dtype): + assert dtype.construct_array_type() is type(data) + + def test_check_dtype(self, data): + dtype = data.dtype + + # check equivalency for using .dtypes + df = pd.DataFrame( + {"A": pd.Series(data, dtype=dtype), "B": data, "C": "foo", "D": 1} + ) + result = df.dtypes == str(dtype) + assert np.dtype("int64") != "Int64" + + expected = pd.Series([True, True, False, False], index=list("ABCD")) + + tm.assert_series_equal(result, expected) + + expected = pd.Series([True, True, False, False], index=list("ABCD")) + result = df.dtypes.apply(str) == str(dtype) + tm.assert_series_equal(result, expected) + + def test_hashable(self, dtype): + hash(dtype) # no error + + def test_str(self, dtype): + assert str(dtype) == dtype.name + + def test_eq(self, dtype): + assert dtype == dtype.name + assert dtype != "anonther_type" + + def test_construct_from_string_own_name(self, dtype): + result = dtype.construct_from_string(dtype.name) + assert type(result) is type(dtype) + + # check OK as classmethod + result = type(dtype).construct_from_string(dtype.name) + assert type(result) is type(dtype) + + def test_construct_from_string_another_type_raises(self, dtype): + msg = f"Cannot construct a '{type(dtype).__name__}' from 'another_type'" + with pytest.raises(TypeError, match=msg): + type(dtype).construct_from_string("another_type") + + def test_construct_from_string_wrong_type_raises(self, dtype): + with pytest.raises( + TypeError, + match="'construct_from_string' expects a string, got ", + ): + type(dtype).construct_from_string(0) + + def test_get_common_dtype(self, dtype): + # in practice we will not typically call this with a 1-length list + # (we shortcut to just use that dtype as the common dtype), but + # still testing as good practice to have this working (and it is the + # only case we can test in general) + assert dtype._get_common_dtype([dtype]) == dtype + + @pytest.mark.parametrize("skipna", [True, False]) + def test_infer_dtype(self, data, data_missing, skipna): + # only testing that this works without raising an error + res = infer_dtype(data, skipna=skipna) + assert isinstance(res, str) + res = infer_dtype(data_missing, skipna=skipna) + assert isinstance(res, str) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/getitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/getitem.py new file mode 100644 index 00000000..5f0c1b96 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/getitem.py @@ -0,0 +1,469 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseGetitemTests: + """Tests for ExtensionArray.__getitem__.""" + + def test_iloc_series(self, data): + ser = pd.Series(data) + result = ser.iloc[:4] + expected = pd.Series(data[:4]) + tm.assert_series_equal(result, expected) + + result = ser.iloc[[0, 1, 2, 3]] + tm.assert_series_equal(result, expected) + + def test_iloc_frame(self, data): + df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")}) + expected = pd.DataFrame({"A": data[:4]}) + + # slice -> frame + result = df.iloc[:4, [0]] + tm.assert_frame_equal(result, expected) + + # sequence -> frame + result = df.iloc[[0, 1, 2, 3], [0]] + tm.assert_frame_equal(result, expected) + + expected = pd.Series(data[:4], name="A") + + # slice -> series + result = df.iloc[:4, 0] + tm.assert_series_equal(result, expected) + + # sequence -> series + result = df.iloc[:4, 0] + tm.assert_series_equal(result, expected) + + # GH#32959 slice columns with step + result = df.iloc[:, ::2] + tm.assert_frame_equal(result, df[["A"]]) + result = df[["B", "A"]].iloc[:, ::2] + tm.assert_frame_equal(result, df[["B"]]) + + def test_iloc_frame_single_block(self, data): + # GH#32959 null slice along index, slice along columns with single-block + df = pd.DataFrame({"A": data}) + + result = df.iloc[:, :] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, :1] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, :2] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, ::2] + tm.assert_frame_equal(result, df) + + result = df.iloc[:, 1:2] + tm.assert_frame_equal(result, df.iloc[:, :0]) + + result = df.iloc[:, -1:] + tm.assert_frame_equal(result, df) + + def test_loc_series(self, data): + ser = pd.Series(data) + result = ser.loc[:3] + expected = pd.Series(data[:4]) + tm.assert_series_equal(result, expected) + + result = ser.loc[[0, 1, 2, 3]] + tm.assert_series_equal(result, expected) + + def test_loc_frame(self, data): + df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")}) + expected = pd.DataFrame({"A": data[:4]}) + + # slice -> frame + result = df.loc[:3, ["A"]] + tm.assert_frame_equal(result, expected) + + # sequence -> frame + result = df.loc[[0, 1, 2, 3], ["A"]] + tm.assert_frame_equal(result, expected) + + expected = pd.Series(data[:4], name="A") + + # slice -> series + result = df.loc[:3, "A"] + tm.assert_series_equal(result, expected) + + # sequence -> series + result = df.loc[:3, "A"] + tm.assert_series_equal(result, expected) + + def test_loc_iloc_frame_single_dtype(self, data): + # GH#27110 bug in ExtensionBlock.iget caused df.iloc[n] to incorrectly + # return a scalar + df = pd.DataFrame({"A": data}) + expected = pd.Series([data[2]], index=["A"], name=2, dtype=data.dtype) + + result = df.loc[2] + tm.assert_series_equal(result, expected) + + expected = pd.Series( + [data[-1]], index=["A"], name=len(data) - 1, dtype=data.dtype + ) + result = df.iloc[-1] + tm.assert_series_equal(result, expected) + + def test_getitem_scalar(self, data): + result = data[0] + assert isinstance(result, data.dtype.type) + + result = pd.Series(data)[0] + assert isinstance(result, data.dtype.type) + + def test_getitem_invalid(self, data): + # TODO: box over scalar, [scalar], (scalar,)? + + msg = ( + r"only integers, slices \(`:`\), ellipsis \(`...`\), numpy.newaxis " + r"\(`None`\) and integer or boolean arrays are valid indices" + ) + with pytest.raises(IndexError, match=msg): + data["foo"] + with pytest.raises(IndexError, match=msg): + data[2.5] + + ub = len(data) + msg = "|".join( + [ + "list index out of range", # json + "index out of bounds", # pyarrow + "Out of bounds access", # Sparse + f"loc must be an integer between -{ub} and {ub}", # Sparse + f"index {ub+1} is out of bounds for axis 0 with size {ub}", + f"index -{ub+1} is out of bounds for axis 0 with size {ub}", + ] + ) + with pytest.raises(IndexError, match=msg): + data[ub + 1] + with pytest.raises(IndexError, match=msg): + data[-ub - 1] + + def test_getitem_scalar_na(self, data_missing, na_cmp, na_value): + result = data_missing[0] + assert na_cmp(result, na_value) + + def test_getitem_empty(self, data): + # Indexing with empty list + result = data[[]] + assert len(result) == 0 + assert isinstance(result, type(data)) + + expected = data[np.array([], dtype="int64")] + tm.assert_extension_array_equal(result, expected) + + def test_getitem_mask(self, data): + # Empty mask, raw array + mask = np.zeros(len(data), dtype=bool) + result = data[mask] + assert len(result) == 0 + assert isinstance(result, type(data)) + + # Empty mask, in series + mask = np.zeros(len(data), dtype=bool) + result = pd.Series(data)[mask] + assert len(result) == 0 + assert result.dtype == data.dtype + + # non-empty mask, raw array + mask[0] = True + result = data[mask] + assert len(result) == 1 + assert isinstance(result, type(data)) + + # non-empty mask, in series + result = pd.Series(data)[mask] + assert len(result) == 1 + assert result.dtype == data.dtype + + def test_getitem_mask_raises(self, data): + mask = np.array([True, False]) + msg = f"Boolean index has wrong length: 2 instead of {len(data)}" + with pytest.raises(IndexError, match=msg): + data[mask] + + mask = pd.array(mask, dtype="boolean") + with pytest.raises(IndexError, match=msg): + data[mask] + + def test_getitem_boolean_array_mask(self, data): + mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") + result = data[mask] + assert len(result) == 0 + assert isinstance(result, type(data)) + + result = pd.Series(data)[mask] + assert len(result) == 0 + assert result.dtype == data.dtype + + mask[:5] = True + expected = data.take([0, 1, 2, 3, 4]) + result = data[mask] + tm.assert_extension_array_equal(result, expected) + + expected = pd.Series(expected) + result = pd.Series(data)[mask] + tm.assert_series_equal(result, expected) + + def test_getitem_boolean_na_treated_as_false(self, data): + # https://github.com/pandas-dev/pandas/issues/31503 + mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") + mask[:2] = pd.NA + mask[2:4] = True + + result = data[mask] + expected = data[mask.fillna(False)] + + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(data) + + result = s[mask] + expected = s[mask.fillna(False)] + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_getitem_integer_array(self, data, idx): + result = data[idx] + assert len(result) == 3 + assert isinstance(result, type(data)) + expected = data.take([0, 1, 2]) + tm.assert_extension_array_equal(result, expected) + + expected = pd.Series(expected) + result = pd.Series(data)[idx] + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "idx", + [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")], + ids=["list", "integer-array"], + ) + def test_getitem_integer_with_missing_raises(self, data, idx): + msg = "Cannot index with an integer indexer containing NA values" + with pytest.raises(ValueError, match=msg): + data[idx] + + @pytest.mark.xfail( + reason="Tries label-based and raises KeyError; " + "in some cases raises when calling np.asarray" + ) + @pytest.mark.parametrize( + "idx", + [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")], + ids=["list", "integer-array"], + ) + def test_getitem_series_integer_with_missing_raises(self, data, idx): + msg = "Cannot index with an integer indexer containing NA values" + # TODO: this raises KeyError about labels not found (it tries label-based) + + ser = pd.Series(data, index=[chr(100 + i) for i in range(len(data))]) + with pytest.raises(ValueError, match=msg): + ser[idx] + + def test_getitem_slice(self, data): + # getitem[slice] should return an array + result = data[slice(0)] # empty + assert isinstance(result, type(data)) + + result = data[slice(1)] # scalar + assert isinstance(result, type(data)) + + def test_getitem_ellipsis_and_slice(self, data): + # GH#40353 this is called from slice_block_rows + result = data[..., :] + tm.assert_extension_array_equal(result, data) + + result = data[:, ...] + tm.assert_extension_array_equal(result, data) + + result = data[..., :3] + tm.assert_extension_array_equal(result, data[:3]) + + result = data[:3, ...] + tm.assert_extension_array_equal(result, data[:3]) + + result = data[..., ::2] + tm.assert_extension_array_equal(result, data[::2]) + + result = data[::2, ...] + tm.assert_extension_array_equal(result, data[::2]) + + def test_get(self, data): + # GH 20882 + s = pd.Series(data, index=[2 * i for i in range(len(data))]) + assert s.get(4) == s.iloc[2] + + result = s.get([4, 6]) + expected = s.iloc[[2, 3]] + tm.assert_series_equal(result, expected) + + result = s.get(slice(2)) + expected = s.iloc[[0, 1]] + tm.assert_series_equal(result, expected) + + assert s.get(-1) is None + assert s.get(s.index.max() + 1) is None + + s = pd.Series(data[:6], index=list("abcdef")) + assert s.get("c") == s.iloc[2] + + result = s.get(slice("b", "d")) + expected = s.iloc[[1, 2, 3]] + tm.assert_series_equal(result, expected) + + result = s.get("Z") + assert result is None + + msg = "Series.__getitem__ treating keys as positions is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert s.get(4) == s.iloc[4] + assert s.get(-1) == s.iloc[-1] + assert s.get(len(s)) is None + + # GH 21257 + s = pd.Series(data) + with tm.assert_produces_warning(None): + # GH#45324 make sure we aren't giving a spurious FutureWarning + s2 = s[::2] + assert s2.get(1) is None + + def test_take_sequence(self, data): + result = pd.Series(data)[[0, 1, 3]] + assert result.iloc[0] == data[0] + assert result.iloc[1] == data[1] + assert result.iloc[2] == data[3] + + def test_take(self, data, na_value, na_cmp): + result = data.take([0, -1]) + assert result.dtype == data.dtype + assert result[0] == data[0] + assert result[1] == data[-1] + + result = data.take([0, -1], allow_fill=True, fill_value=na_value) + assert result[0] == data[0] + assert na_cmp(result[1], na_value) + + with pytest.raises(IndexError, match="out of bounds"): + data.take([len(data) + 1]) + + def test_take_empty(self, data, na_value, na_cmp): + empty = data[:0] + + result = empty.take([-1], allow_fill=True) + assert na_cmp(result[0], na_value) + + msg = "cannot do a non-empty take from an empty axes|out of bounds" + + with pytest.raises(IndexError, match=msg): + empty.take([-1]) + + with pytest.raises(IndexError, match="cannot do a non-empty take"): + empty.take([0, 1]) + + def test_take_negative(self, data): + # https://github.com/pandas-dev/pandas/issues/20640 + n = len(data) + result = data.take([0, -n, n - 1, -1]) + expected = data.take([0, 0, n - 1, n - 1]) + tm.assert_extension_array_equal(result, expected) + + def test_take_non_na_fill_value(self, data_missing): + fill_value = data_missing[1] # valid + na = data_missing[0] + + arr = data_missing._from_sequence( + [na, fill_value, na], dtype=data_missing.dtype + ) + result = arr.take([-1, 1], fill_value=fill_value, allow_fill=True) + expected = arr.take([1, 1]) + tm.assert_extension_array_equal(result, expected) + + def test_take_pandas_style_negative_raises(self, data, na_value): + with pytest.raises(ValueError, match=""): + data.take([0, -2], fill_value=na_value, allow_fill=True) + + @pytest.mark.parametrize("allow_fill", [True, False]) + def test_take_out_of_bounds_raises(self, data, allow_fill): + arr = data[:3] + + with pytest.raises(IndexError, match="out of bounds|out-of-bounds"): + arr.take(np.asarray([0, 3]), allow_fill=allow_fill) + + def test_take_series(self, data): + s = pd.Series(data) + result = s.take([0, -1]) + expected = pd.Series( + data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype), + index=[0, len(data) - 1], + ) + tm.assert_series_equal(result, expected) + + def test_reindex(self, data, na_value): + s = pd.Series(data) + result = s.reindex([0, 1, 3]) + expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3]) + tm.assert_series_equal(result, expected) + + n = len(data) + result = s.reindex([-1, 0, n]) + expected = pd.Series( + data._from_sequence([na_value, data[0], na_value], dtype=s.dtype), + index=[-1, 0, n], + ) + tm.assert_series_equal(result, expected) + + result = s.reindex([n, n + 1]) + expected = pd.Series( + data._from_sequence([na_value, na_value], dtype=s.dtype), index=[n, n + 1] + ) + tm.assert_series_equal(result, expected) + + def test_reindex_non_na_fill_value(self, data_missing): + valid = data_missing[1] + na = data_missing[0] + + arr = data_missing._from_sequence([na, valid], dtype=data_missing.dtype) + ser = pd.Series(arr) + result = ser.reindex([0, 1, 2], fill_value=valid) + expected = pd.Series( + data_missing._from_sequence([na, valid, valid], dtype=data_missing.dtype) + ) + + tm.assert_series_equal(result, expected) + + def test_loc_len1(self, data): + # see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim + df = pd.DataFrame({"A": data}) + res = df.loc[[0], "A"] + assert res.ndim == 1 + assert res._mgr.arrays[0].ndim == 1 + if hasattr(res._mgr, "blocks"): + assert res._mgr._block.ndim == 1 + + def test_item(self, data): + # https://github.com/pandas-dev/pandas/pull/30175 + s = pd.Series(data) + result = s[:1].item() + assert result == data[0] + + msg = "can only convert an array of size 1 to a Python scalar" + with pytest.raises(ValueError, match=msg): + s[:0].item() + + with pytest.raises(ValueError, match=msg): + s.item() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/groupby.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/groupby.py new file mode 100644 index 00000000..6f72a6c2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/groupby.py @@ -0,0 +1,162 @@ +import re + +import pytest + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_numeric_dtype, + is_object_dtype, + is_string_dtype, +) + +import pandas as pd +import pandas._testing as tm + + +class BaseGroupbyTests: + """Groupby-specific tests.""" + + def test_grouping_grouper(self, data_for_grouping): + df = pd.DataFrame( + {"A": ["B", "B", None, None, "A", "A", "B", "C"], "B": data_for_grouping} + ) + gr1 = df.groupby("A").grouper.groupings[0] + gr2 = df.groupby("B").grouper.groupings[0] + + tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values) + tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping) + + @pytest.mark.parametrize("as_index", [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values, and the final entry has c==b + # (see data_for_grouping docstring) + df = df.iloc[:-1] + + result = df.groupby("B", as_index=as_index).A.mean() + _, uniques = pd.factorize(data_for_grouping, sort=True) + + exp_vals = [3.0, 1.0, 4.0] + if is_bool: + exp_vals = exp_vals[:-1] + if as_index: + index = pd.Index(uniques, name="B") + expected = pd.Series(exp_vals, index=index, name="A") + tm.assert_series_equal(result, expected) + else: + expected = pd.DataFrame({"B": uniques, "A": exp_vals}) + tm.assert_frame_equal(result, expected) + + def test_groupby_agg_extension(self, data_for_grouping): + # GH#38980 groupby agg on extension type fails for non-numeric types + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + + expected = df.iloc[[0, 2, 4, 7]] + expected = expected.set_index("A") + + result = df.groupby("A").agg({"B": "first"}) + tm.assert_frame_equal(result, expected) + + result = df.groupby("A").agg("first") + tm.assert_frame_equal(result, expected) + + result = df.groupby("A").first() + tm.assert_frame_equal(result, expected) + + def test_groupby_extension_no_sort(self, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values, and the final entry has c==b + # (see data_for_grouping docstring) + df = df.iloc[:-1] + + result = df.groupby("B", sort=False).A.mean() + _, index = pd.factorize(data_for_grouping, sort=False) + + index = pd.Index(index, name="B") + exp_vals = [1.0, 3.0, 4.0] + if is_bool: + exp_vals = exp_vals[:-1] + expected = pd.Series(exp_vals, index=index, name="A") + tm.assert_series_equal(result, expected) + + def test_groupby_extension_transform(self, data_for_grouping): + is_bool = data_for_grouping.dtype._is_boolean + + valid = data_for_grouping[~data_for_grouping.isna()] + df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid}) + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values, and the final entry has c==b + # (see data_for_grouping docstring) + df = df.iloc[:-1] + + result = df.groupby("B").A.transform(len) + expected = pd.Series([3, 3, 2, 2, 3, 1], name="A") + if is_bool: + expected = expected[:-1] + + tm.assert_series_equal(result, expected) + + def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + df.groupby("B", group_keys=False).apply(groupby_apply_op) + df.groupby("B", group_keys=False).A.apply(groupby_apply_op) + df.groupby("A", group_keys=False).apply(groupby_apply_op) + df.groupby("A", group_keys=False).B.apply(groupby_apply_op) + + def test_groupby_apply_identity(self, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + result = df.groupby("A").B.apply(lambda x: x.array) + expected = pd.Series( + [ + df.B.iloc[[0, 1, 6]].array, + df.B.iloc[[2, 3]].array, + df.B.iloc[[4, 5]].array, + df.B.iloc[[7]].array, + ], + index=pd.Index([1, 2, 3, 4], name="A"), + name="B", + ) + tm.assert_series_equal(result, expected) + + def test_in_numeric_groupby(self, data_for_grouping): + df = pd.DataFrame( + { + "A": [1, 1, 2, 2, 3, 3, 1, 4], + "B": data_for_grouping, + "C": [1, 1, 1, 1, 1, 1, 1, 1], + } + ) + + dtype = data_for_grouping.dtype + if ( + is_numeric_dtype(dtype) + or is_bool_dtype(dtype) + or dtype.name == "decimal" + or is_string_dtype(dtype) + or is_object_dtype(dtype) + or dtype.kind == "m" # in particular duration[*][pyarrow] + ): + expected = pd.Index(["B", "C"]) + result = df.groupby("A").sum().columns + else: + expected = pd.Index(["C"]) + + msg = "|".join( + [ + # period/datetime + "does not support sum operations", + # all others + re.escape(f"agg function failed [how->sum,dtype->{dtype}"), + ] + ) + with pytest.raises(TypeError, match=msg): + df.groupby("A").sum() + result = df.groupby("A").sum(numeric_only=True).columns + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/index.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/index.py new file mode 100644 index 00000000..72c4ebfb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/index.py @@ -0,0 +1,19 @@ +""" +Tests for Indexes backed by arbitrary ExtensionArrays. +""" +import pandas as pd + + +class BaseIndexTests: + """Tests for Index object backed by an ExtensionArray""" + + def test_index_from_array(self, data): + idx = pd.Index(data) + assert data.dtype == idx.dtype + + def test_index_from_listlike_with_dtype(self, data): + idx = pd.Index(data, dtype=data.dtype) + assert idx.dtype == data.dtype + + idx = pd.Index(list(data), dtype=data.dtype) + assert idx.dtype == data.dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/interface.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/interface.py new file mode 100644 index 00000000..f19561e5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/interface.py @@ -0,0 +1,133 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_extension_array_dtype +from pandas.core.dtypes.dtypes import ExtensionDtype + +import pandas as pd +import pandas._testing as tm + + +class BaseInterfaceTests: + """Tests that the basic interface is satisfied.""" + + # ------------------------------------------------------------------------ + # Interface + # ------------------------------------------------------------------------ + + def test_len(self, data): + assert len(data) == 100 + + def test_size(self, data): + assert data.size == 100 + + def test_ndim(self, data): + assert data.ndim == 1 + + def test_can_hold_na_valid(self, data): + # GH-20761 + assert data._can_hold_na is True + + def test_contains(self, data, data_missing): + # GH-37867 + # Tests for membership checks. Membership checks for nan-likes is tricky and + # the settled on rule is: `nan_like in arr` is True if nan_like is + # arr.dtype.na_value and arr.isna().any() is True. Else the check returns False. + + na_value = data.dtype.na_value + # ensure data without missing values + data = data[~data.isna()] + + # first elements are non-missing + assert data[0] in data + assert data_missing[0] in data_missing + + # check the presence of na_value + assert na_value in data_missing + assert na_value not in data + + # the data can never contain other nan-likes than na_value + for na_value_obj in tm.NULL_OBJECTS: + if na_value_obj is na_value or type(na_value_obj) == type(na_value): + # type check for e.g. two instances of Decimal("NAN") + continue + assert na_value_obj not in data + assert na_value_obj not in data_missing + + def test_memory_usage(self, data): + s = pd.Series(data) + result = s.memory_usage(index=False) + assert result == s.nbytes + + def test_array_interface(self, data): + result = np.array(data) + assert result[0] == data[0] + + result = np.array(data, dtype=object) + expected = np.array(list(data), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_is_extension_array_dtype(self, data): + assert is_extension_array_dtype(data) + assert is_extension_array_dtype(data.dtype) + assert is_extension_array_dtype(pd.Series(data)) + assert isinstance(data.dtype, ExtensionDtype) + + def test_no_values_attribute(self, data): + # GH-20735: EA's with .values attribute give problems with internal + # code, disallowing this for now until solved + assert not hasattr(data, "values") + assert not hasattr(data, "_values") + + def test_is_numeric_honored(self, data): + result = pd.Series(data) + if hasattr(result._mgr, "blocks"): + assert result._mgr.blocks[0].is_numeric is data.dtype._is_numeric + + def test_isna_extension_array(self, data_missing): + # If your `isna` returns an ExtensionArray, you must also implement + # _reduce. At the *very* least, you must implement any and all + na = data_missing.isna() + if is_extension_array_dtype(na): + assert na._reduce("any") + assert na.any() + + assert not na._reduce("all") + assert not na.all() + + assert na.dtype._is_boolean + + def test_copy(self, data): + # GH#27083 removing deep keyword from EA.copy + assert data[0] != data[1] + result = data.copy() + + if data.dtype._is_immutable: + pytest.skip("test_copy assumes mutability") + + data[1] = data[0] + assert result[1] != result[0] + + def test_view(self, data): + # view with no dtype should return a shallow copy, *not* the same + # object + assert data[1] != data[0] + + result = data.view() + assert result is not data + assert type(result) == type(data) + + if data.dtype._is_immutable: + pytest.skip("test_view assumes mutability") + + result[1] = result[0] + assert data[1] == data[0] + + # check specifically that the `dtype` kwarg is accepted + data.view(dtype=None) + + def test_tolist(self, data): + result = data.tolist() + expected = list(data) + assert isinstance(result, list) + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/io.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/io.py new file mode 100644 index 00000000..c369ec8a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/io.py @@ -0,0 +1,19 @@ +from io import StringIO + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseParsingTests: + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_EA_types(self, engine, data): + df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))}) + csv_output = df.to_csv(index=False, na_rep=np.nan) + result = pd.read_csv( + StringIO(csv_output), dtype={"with_dtype": str(data.dtype)}, engine=engine + ) + expected = df + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/methods.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/methods.py new file mode 100644 index 00000000..16059155 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/methods.py @@ -0,0 +1,703 @@ +import inspect +import operator + +import numpy as np +import pytest + +from pandas._typing import Dtype + +from pandas.core.dtypes.common import is_bool_dtype +from pandas.core.dtypes.missing import na_value_for_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.sorting import nargsort + + +class BaseMethodsTests: + """Various Series and DataFrame methods.""" + + def test_hash_pandas_object(self, data): + # _hash_pandas_object should return a uint64 ndarray of the same length + # as the data + from pandas.core.util.hashing import _default_hash_key + + res = data._hash_pandas_object( + encoding="utf-8", hash_key=_default_hash_key, categorize=False + ) + assert res.dtype == np.uint64 + assert res.shape == data.shape + + def test_value_counts_default_dropna(self, data): + # make sure we have consistent default dropna kwarg + if not hasattr(data, "value_counts"): + pytest.skip(f"value_counts is not implemented for {type(data)}") + sig = inspect.signature(data.value_counts) + kwarg = sig.parameters["dropna"] + assert kwarg.default is True + + @pytest.mark.parametrize("dropna", [True, False]) + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts(dropna=dropna).sort_index() + + tm.assert_series_equal(result, expected) + + def test_value_counts_with_normalize(self, data): + # GH 33172 + data = data[:10].unique() + values = np.array(data[~data.isna()]) + ser = pd.Series(data, dtype=data.dtype) + + result = ser.value_counts(normalize=True).sort_index() + + if not isinstance(data, pd.Categorical): + expected = pd.Series( + [1 / len(values)] * len(values), index=result.index, name="proportion" + ) + else: + expected = pd.Series(0.0, index=result.index, name="proportion") + expected[result > 0] = 1 / len(values) + + if getattr(data.dtype, "storage", "") == "pyarrow" or isinstance( + data.dtype, pd.ArrowDtype + ): + # TODO: avoid special-casing + expected = expected.astype("double[pyarrow]") + elif getattr(data.dtype, "storage", "") == "pyarrow_numpy": + # TODO: avoid special-casing + expected = expected.astype("float64") + elif na_value_for_dtype(data.dtype) is pd.NA: + # TODO(GH#44692): avoid special-casing + expected = expected.astype("Float64") + + tm.assert_series_equal(result, expected) + + def test_count(self, data_missing): + df = pd.DataFrame({"A": data_missing}) + result = df.count(axis="columns") + expected = pd.Series([0, 1]) + tm.assert_series_equal(result, expected) + + def test_series_count(self, data_missing): + # GH#26835 + ser = pd.Series(data_missing) + result = ser.count() + expected = 1 + assert result == expected + + def test_apply_simple_series(self, data): + result = pd.Series(data).apply(id) + assert isinstance(result, pd.Series) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data_missing, na_action): + result = data_missing.map(lambda x: x, na_action=na_action) + expected = data_missing.to_numpy() + tm.assert_numpy_array_equal(result, expected) + + def test_argsort(self, data_for_sorting): + result = pd.Series(data_for_sorting).argsort() + # argsort result gets passed to take, so should be np.intp + expected = pd.Series(np.array([2, 0, 1], dtype=np.intp)) + tm.assert_series_equal(result, expected) + + def test_argsort_missing_array(self, data_missing_for_sorting): + result = data_missing_for_sorting.argsort() + # argsort result gets passed to take, so should be np.intp + expected = np.array([2, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_argsort_missing(self, data_missing_for_sorting): + msg = "The behavior of Series.argsort in the presence of NA values" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.Series(data_missing_for_sorting).argsort() + expected = pd.Series(np.array([1, -1, 0], dtype=np.intp)) + tm.assert_series_equal(result, expected) + + def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value): + # GH 24382 + is_bool = data_for_sorting.dtype._is_boolean + + exp_argmax = 1 + exp_argmax_repeated = 3 + if is_bool: + # See data_for_sorting docstring + exp_argmax = 0 + exp_argmax_repeated = 1 + + # data_for_sorting -> [B, C, A] with A < B < C + assert data_for_sorting.argmax() == exp_argmax + assert data_for_sorting.argmin() == 2 + + # with repeated values -> first occurrence + data = data_for_sorting.take([2, 0, 0, 1, 1, 2]) + assert data.argmax() == exp_argmax_repeated + assert data.argmin() == 0 + + # with missing values + # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. + assert data_missing_for_sorting.argmax() == 0 + assert data_missing_for_sorting.argmin() == 2 + + @pytest.mark.parametrize("method", ["argmax", "argmin"]) + def test_argmin_argmax_empty_array(self, method, data): + # GH 24382 + err_msg = "attempt to get" + with pytest.raises(ValueError, match=err_msg): + getattr(data[:0], method)() + + @pytest.mark.parametrize("method", ["argmax", "argmin"]) + def test_argmin_argmax_all_na(self, method, data, na_value): + # all missing with skipna=True is the same as empty + err_msg = "attempt to get" + data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype) + with pytest.raises(ValueError, match=err_msg): + getattr(data_na, method)() + + @pytest.mark.parametrize( + "op_name, skipna, expected", + [ + ("idxmax", True, 0), + ("idxmin", True, 2), + ("argmax", True, 0), + ("argmin", True, 2), + ("idxmax", False, np.nan), + ("idxmin", False, np.nan), + ("argmax", False, -1), + ("argmin", False, -1), + ], + ) + def test_argreduce_series( + self, data_missing_for_sorting, op_name, skipna, expected + ): + # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. + warn = None + msg = "The behavior of Series.argmax/argmin" + if op_name.startswith("arg") and expected == -1: + warn = FutureWarning + if op_name.startswith("idx") and np.isnan(expected): + warn = FutureWarning + msg = f"The behavior of Series.{op_name}" + ser = pd.Series(data_missing_for_sorting) + with tm.assert_produces_warning(warn, match=msg): + result = getattr(ser, op_name)(skipna=skipna) + tm.assert_almost_equal(result, expected) + + def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting): + # GH#38733 + data = data_missing_for_sorting + + with pytest.raises(NotImplementedError, match=""): + data.argmin(skipna=False) + + with pytest.raises(NotImplementedError, match=""): + data.argmax(skipna=False) + + @pytest.mark.parametrize( + "na_position, expected", + [ + ("last", np.array([2, 0, 1], dtype=np.dtype("intp"))), + ("first", np.array([1, 2, 0], dtype=np.dtype("intp"))), + ], + ) + def test_nargsort(self, data_missing_for_sorting, na_position, expected): + # GH 25439 + result = nargsort(data_missing_for_sorting, na_position=na_position) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values(self, data_for_sorting, ascending, sort_by_key): + ser = pd.Series(data_for_sorting) + result = ser.sort_values(ascending=ascending, key=sort_by_key) + expected = ser.iloc[[2, 0, 1]] + if not ascending: + # GH 35922. Expect stable sort + if ser.nunique() == 2: + expected = ser.iloc[[0, 1, 2]] + else: + expected = ser.iloc[[1, 0, 2]] + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values_missing( + self, data_missing_for_sorting, ascending, sort_by_key + ): + ser = pd.Series(data_missing_for_sorting) + result = ser.sort_values(ascending=ascending, key=sort_by_key) + if ascending: + expected = ser.iloc[[2, 0, 1]] + else: + expected = ser.iloc[[0, 2, 1]] + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values_frame(self, data_for_sorting, ascending): + df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting}) + result = df.sort_values(["A", "B"]) + expected = pd.DataFrame( + {"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1] + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("box", [pd.Series, lambda x: x]) + @pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique]) + def test_unique(self, data, box, method): + duplicated = box(data._from_sequence([data[0], data[0]])) + + result = method(duplicated) + + assert len(result) == 1 + assert isinstance(result, type(data)) + assert result[0] == duplicated[0] + + def test_factorize(self, data_for_grouping): + codes, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True) + + is_bool = data_for_grouping.dtype._is_boolean + if is_bool: + # only 2 unique values + expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 0], dtype=np.intp) + expected_uniques = data_for_grouping.take([0, 4]) + else: + expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 2], dtype=np.intp) + expected_uniques = data_for_grouping.take([0, 4, 7]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_extension_array_equal(uniques, expected_uniques) + + def test_factorize_equivalence(self, data_for_grouping): + codes_1, uniques_1 = pd.factorize(data_for_grouping, use_na_sentinel=True) + codes_2, uniques_2 = data_for_grouping.factorize(use_na_sentinel=True) + + tm.assert_numpy_array_equal(codes_1, codes_2) + tm.assert_extension_array_equal(uniques_1, uniques_2) + assert len(uniques_1) == len(pd.unique(uniques_1)) + assert uniques_1.dtype == data_for_grouping.dtype + + def test_factorize_empty(self, data): + codes, uniques = pd.factorize(data[:0]) + expected_codes = np.array([], dtype=np.intp) + expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_extension_array_equal(uniques, expected_uniques) + + def test_fillna_copy_frame(self, data_missing): + arr = data_missing.take([1, 1]) + df = pd.DataFrame({"A": arr}) + df_orig = df.copy() + + filled_val = df.iloc[0, 0] + result = df.fillna(filled_val) + + result.iloc[0, 0] = filled_val + + tm.assert_frame_equal(df, df_orig) + + def test_fillna_copy_series(self, data_missing): + arr = data_missing.take([1, 1]) + ser = pd.Series(arr, copy=False) + ser_orig = ser.copy() + + filled_val = ser[0] + result = ser.fillna(filled_val) + result.iloc[0] = filled_val + + tm.assert_series_equal(ser, ser_orig) + + def test_fillna_length_mismatch(self, data_missing): + msg = "Length of 'value' does not match." + with pytest.raises(ValueError, match=msg): + data_missing.fillna(data_missing.take([1])) + + # Subclasses can override if we expect e.g Sparse[bool], boolean, pyarrow[bool] + _combine_le_expected_dtype: Dtype = np.dtype(bool) + + def test_combine_le(self, data_repeated): + # GH 20825 + # Test that combine works when doing a <= (le) comparison + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + result = s1.combine(s2, lambda x1, x2: x1 <= x2) + expected = pd.Series( + [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))], + dtype=self._combine_le_expected_dtype, + ) + tm.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 <= x2) + expected = pd.Series( + [a <= val for a in list(orig_data1)], + dtype=self._combine_le_expected_dtype, + ) + tm.assert_series_equal(result, expected) + + def test_combine_add(self, data_repeated): + # GH 20825 + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + + # Check if the operation is supported pointwise for our scalars. If not, + # we will expect Series.combine to raise as well. + try: + with np.errstate(over="ignore"): + expected = pd.Series( + orig_data1._from_sequence( + [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))] + ) + ) + except TypeError: + # If the operation is not supported pointwise for our scalars, + # then Series.combine should also raise + with pytest.raises(TypeError): + s1.combine(s2, lambda x1, x2: x1 + x2) + return + + result = s1.combine(s2, lambda x1, x2: x1 + x2) + tm.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 + x2) + expected = pd.Series( + orig_data1._from_sequence([a + val for a in list(orig_data1)]) + ) + tm.assert_series_equal(result, expected) + + def test_combine_first(self, data): + # https://github.com/pandas-dev/pandas/issues/24147 + a = pd.Series(data[:3]) + b = pd.Series(data[2:5], index=[2, 3, 4]) + result = a.combine_first(b) + expected = pd.Series(data[:5]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("frame", [True, False]) + @pytest.mark.parametrize( + "periods, indices", + [(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])], + ) + def test_container_shift(self, data, frame, periods, indices): + # https://github.com/pandas-dev/pandas/issues/22386 + subset = data[:5] + data = pd.Series(subset, name="A") + expected = pd.Series(subset.take(indices, allow_fill=True), name="A") + + if frame: + result = data.to_frame(name="A").assign(B=1).shift(periods) + expected = pd.concat( + [expected, pd.Series([1] * 5, name="B").shift(periods)], axis=1 + ) + compare = tm.assert_frame_equal + else: + result = data.shift(periods) + compare = tm.assert_series_equal + + compare(result, expected) + + def test_shift_0_periods(self, data): + # GH#33856 shifting with periods=0 should return a copy, not same obj + result = data.shift(0) + assert data[0] != data[1] # otherwise below is invalid + data[0] = data[1] + assert result[0] != result[1] # i.e. not the same object/view + + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods): + data = data[:5] + if is_bool_dtype(data.dtype): + op = operator.xor + else: + op = operator.sub + try: + # does this array implement ops? + op(data, data) + except Exception: + pytest.skip(f"{type(data)} does not support diff") + s = pd.Series(data) + result = s.diff(periods) + expected = pd.Series(op(data, data.shift(periods))) + tm.assert_series_equal(result, expected) + + df = pd.DataFrame({"A": data, "B": [1.0] * 5}) + result = df.diff(periods) + if periods == 1: + b = [np.nan, 0, 0, 0, 0] + else: + b = [0, 0, 0, np.nan, np.nan] + expected = pd.DataFrame({"A": expected, "B": b}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "periods, indices", + [[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]], + ) + def test_shift_non_empty_array(self, data, periods, indices): + # https://github.com/pandas-dev/pandas/issues/23911 + subset = data[:2] + result = subset.shift(periods) + expected = subset.take(indices, allow_fill=True) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4]) + def test_shift_empty_array(self, data, periods): + # https://github.com/pandas-dev/pandas/issues/23911 + empty = data[:0] + result = empty.shift(periods) + expected = empty + tm.assert_extension_array_equal(result, expected) + + def test_shift_zero_copies(self, data): + # GH#31502 + result = data.shift(0) + assert result is not data + + result = data[:0].shift(2) + assert result is not data + + def test_shift_fill_value(self, data): + arr = data[:4] + fill_value = data[0] + result = arr.shift(1, fill_value=fill_value) + expected = data.take([0, 0, 1, 2]) + tm.assert_extension_array_equal(result, expected) + + result = arr.shift(-2, fill_value=fill_value) + expected = data.take([2, 3, 0, 0]) + tm.assert_extension_array_equal(result, expected) + + def test_not_hashable(self, data): + # We are in general mutable, so not hashable + with pytest.raises(TypeError, match="unhashable type"): + hash(data) + + def test_hash_pandas_object_works(self, data, as_frame): + # https://github.com/pandas-dev/pandas/issues/23066 + data = pd.Series(data) + if as_frame: + data = data.to_frame() + a = pd.util.hash_pandas_object(data) + b = pd.util.hash_pandas_object(data) + tm.assert_equal(a, b) + + def test_searchsorted(self, data_for_sorting, as_series): + if data_for_sorting.dtype._is_boolean: + return self._test_searchsorted_bool_dtypes(data_for_sorting, as_series) + + b, c, a = data_for_sorting + arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c] + + if as_series: + arr = pd.Series(arr) + assert arr.searchsorted(a) == 0 + assert arr.searchsorted(a, side="right") == 1 + + assert arr.searchsorted(b) == 1 + assert arr.searchsorted(b, side="right") == 2 + + assert arr.searchsorted(c) == 2 + assert arr.searchsorted(c, side="right") == 3 + + result = arr.searchsorted(arr.take([0, 2])) + expected = np.array([0, 2], dtype=np.intp) + + tm.assert_numpy_array_equal(result, expected) + + # sorter + sorter = np.array([1, 2, 0]) + assert data_for_sorting.searchsorted(a, sorter=sorter) == 0 + + def _test_searchsorted_bool_dtypes(self, data_for_sorting, as_series): + # We call this from test_searchsorted in cases where we have a + # boolean-like dtype. The non-bool test assumes we have more than 2 + # unique values. + dtype = data_for_sorting.dtype + data_for_sorting = pd.array([True, False], dtype=dtype) + b, a = data_for_sorting + arr = type(data_for_sorting)._from_sequence([a, b]) + + if as_series: + arr = pd.Series(arr) + assert arr.searchsorted(a) == 0 + assert arr.searchsorted(a, side="right") == 1 + + assert arr.searchsorted(b) == 1 + assert arr.searchsorted(b, side="right") == 2 + + result = arr.searchsorted(arr.take([0, 1])) + expected = np.array([0, 1], dtype=np.intp) + + tm.assert_numpy_array_equal(result, expected) + + # sorter + sorter = np.array([1, 0]) + assert data_for_sorting.searchsorted(a, sorter=sorter) == 0 + + def test_where_series(self, data, na_value, as_frame): + assert data[0] != data[1] + cls = type(data) + a, b = data[:2] + + orig = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype)) + ser = orig.copy() + cond = np.array([True, True, False, False]) + + if as_frame: + ser = ser.to_frame(name="a") + cond = cond.reshape(-1, 1) + + result = ser.where(cond) + expected = pd.Series( + cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype) + ) + + if as_frame: + expected = expected.to_frame(name="a") + tm.assert_equal(result, expected) + + ser.mask(~cond, inplace=True) + tm.assert_equal(ser, expected) + + # array other + ser = orig.copy() + if as_frame: + ser = ser.to_frame(name="a") + cond = np.array([True, False, True, True]) + other = cls._from_sequence([a, b, a, b], dtype=data.dtype) + if as_frame: + other = pd.DataFrame({"a": other}) + cond = pd.DataFrame({"a": cond}) + result = ser.where(cond, other) + expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype)) + if as_frame: + expected = expected.to_frame(name="a") + tm.assert_equal(result, expected) + + ser.mask(~cond, other, inplace=True) + tm.assert_equal(ser, expected) + + @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]]) + def test_repeat(self, data, repeats, as_series, use_numpy): + arr = type(data)._from_sequence(data[:3], dtype=data.dtype) + if as_series: + arr = pd.Series(arr) + + result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats) + + repeats = [repeats] * 3 if isinstance(repeats, int) else repeats + expected = [x for x, n in zip(arr, repeats) for _ in range(n)] + expected = type(data)._from_sequence(expected, dtype=data.dtype) + if as_series: + expected = pd.Series(expected, index=arr.index.repeat(repeats)) + + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "repeats, kwargs, error, msg", + [ + (2, {"axis": 1}, ValueError, "axis"), + (-1, {}, ValueError, "negative"), + ([1, 2], {}, ValueError, "shape"), + (2, {"foo": "bar"}, TypeError, "'foo'"), + ], + ) + def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy): + with pytest.raises(error, match=msg): + if use_numpy: + np.repeat(data, repeats, **kwargs) + else: + data.repeat(repeats, **kwargs) + + def test_delete(self, data): + result = data.delete(0) + expected = data[1:] + tm.assert_extension_array_equal(result, expected) + + result = data.delete([1, 3]) + expected = data._concat_same_type([data[[0]], data[[2]], data[4:]]) + tm.assert_extension_array_equal(result, expected) + + def test_insert(self, data): + # insert at the beginning + result = data[1:].insert(0, data[0]) + tm.assert_extension_array_equal(result, data) + + result = data[1:].insert(-len(data[1:]), data[0]) + tm.assert_extension_array_equal(result, data) + + # insert at the middle + result = data[:-1].insert(4, data[-1]) + + taker = np.arange(len(data)) + taker[5:] = taker[4:-1] + taker[4] = len(data) - 1 + expected = data.take(taker) + tm.assert_extension_array_equal(result, expected) + + def test_insert_invalid(self, data, invalid_scalar): + item = invalid_scalar + + with pytest.raises((TypeError, ValueError)): + data.insert(0, item) + + with pytest.raises((TypeError, ValueError)): + data.insert(4, item) + + with pytest.raises((TypeError, ValueError)): + data.insert(len(data) - 1, item) + + def test_insert_invalid_loc(self, data): + ub = len(data) + + with pytest.raises(IndexError): + data.insert(ub + 1, data[0]) + + with pytest.raises(IndexError): + data.insert(-ub - 1, data[0]) + + with pytest.raises(TypeError): + # we expect TypeError here instead of IndexError to match np.insert + data.insert(1.5, data[0]) + + @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame]) + def test_equals(self, data, na_value, as_series, box): + data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype) + data_na = type(data)._from_sequence([na_value] * len(data), dtype=data.dtype) + + data = tm.box_expected(data, box, transpose=False) + data2 = tm.box_expected(data2, box, transpose=False) + data_na = tm.box_expected(data_na, box, transpose=False) + + # we are asserting with `is True/False` explicitly, to test that the + # result is an actual Python bool, and not something "truthy" + + assert data.equals(data) is True + assert data.equals(data.copy()) is True + + # unequal other data + assert data.equals(data2) is False + assert data.equals(data_na) is False + + # different length + assert data[:2].equals(data[:3]) is False + + # empty are equal + assert data[:0].equals(data[:0]) is True + + # other types + assert data.equals(None) is False + assert data[[0]].equals(data[0]) is False + + def test_equals_same_data_different_object(self, data): + # https://github.com/pandas-dev/pandas/issues/34660 + assert pd.Series(data).equals(pd.Series(data)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/missing.py new file mode 100644 index 00000000..40cc952d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/missing.py @@ -0,0 +1,166 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseMissingTests: + def test_isna(self, data_missing): + expected = np.array([True, False]) + + result = pd.isna(data_missing) + tm.assert_numpy_array_equal(result, expected) + + result = pd.Series(data_missing).isna() + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + # GH 21189 + result = pd.Series(data_missing).drop([0, 1]).isna() + expected = pd.Series([], dtype=bool) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("na_func", ["isna", "notna"]) + def test_isna_returns_copy(self, data_missing, na_func): + result = pd.Series(data_missing) + expected = result.copy() + mask = getattr(result, na_func)() + if isinstance(mask.dtype, pd.SparseDtype): + mask = np.array(mask) + + mask[:] = True + tm.assert_series_equal(result, expected) + + def test_dropna_array(self, data_missing): + result = data_missing.dropna() + expected = data_missing[[1]] + tm.assert_extension_array_equal(result, expected) + + def test_dropna_series(self, data_missing): + ser = pd.Series(data_missing) + result = ser.dropna() + expected = ser.iloc[[1]] + tm.assert_series_equal(result, expected) + + def test_dropna_frame(self, data_missing): + df = pd.DataFrame({"A": data_missing}) + + # defaults + result = df.dropna() + expected = df.iloc[[1]] + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.dropna(axis="columns") + expected = pd.DataFrame(index=pd.RangeIndex(2), columns=pd.Index([])) + tm.assert_frame_equal(result, expected) + + # multiple + df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]}) + result = df.dropna() + expected = df.iloc[:0] + tm.assert_frame_equal(result, expected) + + def test_fillna_scalar(self, data_missing): + valid = data_missing[1] + result = data_missing.fillna(valid) + expected = data_missing.fillna(valid) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:Series.fillna with 'method' is deprecated:FutureWarning" + ) + def test_fillna_limit_pad(self, data_missing): + arr = data_missing.take([1, 0, 0, 0, 1]) + result = pd.Series(arr).ffill(limit=2) + expected = pd.Series(data_missing.take([1, 1, 1, 0, 1])) + tm.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:Series.fillna with 'method' is deprecated:FutureWarning" + ) + def test_fillna_limit_backfill(self, data_missing): + arr = data_missing.take([1, 0, 0, 0, 1]) + result = pd.Series(arr).fillna(method="backfill", limit=2) + expected = pd.Series(data_missing.take([1, 0, 1, 1, 1])) + tm.assert_series_equal(result, expected) + + def test_fillna_no_op_returns_copy(self, data): + data = data[~data.isna()] + + valid = data[0] + result = data.fillna(valid) + assert result is not data + tm.assert_extension_array_equal(result, data) + + result = data._pad_or_backfill(method="backfill") + assert result is not data + tm.assert_extension_array_equal(result, data) + + def test_fillna_series(self, data_missing): + fill_value = data_missing[1] + ser = pd.Series(data_missing) + + result = ser.fillna(fill_value) + expected = pd.Series( + data_missing._from_sequence( + [fill_value, fill_value], dtype=data_missing.dtype + ) + ) + tm.assert_series_equal(result, expected) + + # Fill with a series + result = ser.fillna(expected) + tm.assert_series_equal(result, expected) + + # Fill with a series not affecting the missing values + result = ser.fillna(ser) + tm.assert_series_equal(result, ser) + + def test_fillna_series_method(self, data_missing, fillna_method): + fill_value = data_missing[1] + + if fillna_method == "ffill": + data_missing = data_missing[::-1] + + result = getattr(pd.Series(data_missing), fillna_method)() + expected = pd.Series( + data_missing._from_sequence( + [fill_value, fill_value], dtype=data_missing.dtype + ) + ) + + tm.assert_series_equal(result, expected) + + def test_fillna_frame(self, data_missing): + fill_value = data_missing[1] + + result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value) + + expected = pd.DataFrame( + { + "A": data_missing._from_sequence( + [fill_value, fill_value], dtype=data_missing.dtype + ), + "B": [1, 2], + } + ) + + tm.assert_frame_equal(result, expected) + + def test_fillna_fill_other(self, data): + result = pd.DataFrame({"A": data, "B": [np.nan] * len(data)}).fillna({"B": 0.0}) + + expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)}) + + tm.assert_frame_equal(result, expected) + + def test_use_inf_as_na_no_effect(self, data_missing): + ser = pd.Series(data_missing) + expected = ser.isna() + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = ser.isna() + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/ops.py new file mode 100644 index 00000000..064242f3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/ops.py @@ -0,0 +1,265 @@ +from __future__ import annotations + +from typing import final + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import ops + + +class BaseOpsUtil: + series_scalar_exc: type[Exception] | None = TypeError + frame_scalar_exc: type[Exception] | None = TypeError + series_array_exc: type[Exception] | None = TypeError + divmod_exc: type[Exception] | None = TypeError + + def _get_expected_exception( + self, op_name: str, obj, other + ) -> type[Exception] | None: + # Find the Exception, if any we expect to raise calling + # obj.__op_name__(other) + + # The self.obj_bar_exc pattern isn't great in part because it can depend + # on op_name or dtypes, but we use it here for backward-compatibility. + if op_name in ["__divmod__", "__rdivmod__"]: + return self.divmod_exc + if isinstance(obj, pd.Series) and isinstance(other, pd.Series): + return self.series_array_exc + elif isinstance(obj, pd.Series): + return self.series_scalar_exc + else: + return self.frame_scalar_exc + + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + # In _check_op we check that the result of a pointwise operation + # (found via _combine) matches the result of the vectorized + # operation obj.__op_name__(other). + # In some cases pandas dtype inference on the scalar result may not + # give a matching dtype even if both operations are behaving "correctly". + # In these cases, do extra required casting here. + return pointwise_result + + def get_op_from_name(self, op_name: str): + return tm.get_op_from_name(op_name) + + # Subclasses are not expected to need to override check_opname, _check_op, + # _check_divmod_op, or _combine. + # Ideally any relevant overriding can be done in _cast_pointwise_result, + # get_op_from_name, and the specification of `exc`. If you find a use + # case that still requires overriding _check_op or _combine, please let + # us know at github.com/pandas-dev/pandas/issues + @final + def check_opname(self, ser: pd.Series, op_name: str, other): + exc = self._get_expected_exception(op_name, ser, other) + op = self.get_op_from_name(op_name) + + self._check_op(ser, op, other, op_name, exc) + + # see comment on check_opname + @final + def _combine(self, obj, other, op): + if isinstance(obj, pd.DataFrame): + if len(obj.columns) != 1: + raise NotImplementedError + expected = obj.iloc[:, 0].combine(other, op).to_frame() + else: + expected = obj.combine(other, op) + return expected + + # see comment on check_opname + @final + def _check_op( + self, ser: pd.Series, op, other, op_name: str, exc=NotImplementedError + ): + # Check that the Series/DataFrame arithmetic/comparison method matches + # the pointwise result from _combine. + + if exc is None: + result = op(ser, other) + expected = self._combine(ser, other, op) + expected = self._cast_pointwise_result(op_name, ser, other, expected) + assert isinstance(result, type(ser)) + tm.assert_equal(result, expected) + else: + with pytest.raises(exc): + op(ser, other) + + # see comment on check_opname + @final + def _check_divmod_op(self, ser: pd.Series, op, other): + # check that divmod behavior matches behavior of floordiv+mod + if op is divmod: + exc = self._get_expected_exception("__divmod__", ser, other) + else: + exc = self._get_expected_exception("__rdivmod__", ser, other) + if exc is None: + result_div, result_mod = op(ser, other) + if op is divmod: + expected_div, expected_mod = ser // other, ser % other + else: + expected_div, expected_mod = other // ser, other % ser + tm.assert_series_equal(result_div, expected_div) + tm.assert_series_equal(result_mod, expected_mod) + else: + with pytest.raises(exc): + divmod(ser, other) + + +class BaseArithmeticOpsTests(BaseOpsUtil): + """ + Various Series and DataFrame arithmetic ops methods. + + Subclasses supporting various ops should set the class variables + to indicate that they support ops of that kind + + * series_scalar_exc = TypeError + * frame_scalar_exc = TypeError + * series_array_exc = TypeError + * divmod_exc = TypeError + """ + + series_scalar_exc: type[Exception] | None = TypeError + frame_scalar_exc: type[Exception] | None = TypeError + series_array_exc: type[Exception] | None = TypeError + divmod_exc: type[Exception] | None = TypeError + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + # series & scalar + op_name = all_arithmetic_operators + ser = pd.Series(data) + self.check_opname(ser, op_name, ser.iloc[0]) + + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): + # frame & scalar + op_name = all_arithmetic_operators + df = pd.DataFrame({"A": data}) + self.check_opname(df, op_name, data[0]) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + # ndarray & other series + op_name = all_arithmetic_operators + ser = pd.Series(data) + self.check_opname(ser, op_name, pd.Series([ser.iloc[0]] * len(ser))) + + def test_divmod(self, data): + ser = pd.Series(data) + self._check_divmod_op(ser, divmod, 1) + self._check_divmod_op(1, ops.rdivmod, ser) + + def test_divmod_series_array(self, data, data_for_twos): + ser = pd.Series(data) + self._check_divmod_op(ser, divmod, data) + + other = data_for_twos + self._check_divmod_op(other, ops.rdivmod, ser) + + other = pd.Series(other) + self._check_divmod_op(other, ops.rdivmod, ser) + + def test_add_series_with_extension_array(self, data): + # Check adding an ExtensionArray to a Series of the same dtype matches + # the behavior of adding the arrays directly and then wrapping in a + # Series. + + ser = pd.Series(data) + + exc = self._get_expected_exception("__add__", ser, data) + if exc is not None: + with pytest.raises(exc): + ser + data + return + + result = ser + data + expected = pd.Series(data + data) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame, pd.Index]) + @pytest.mark.parametrize( + "op_name", + [ + x + for x in tm.arithmetic_dunder_methods + tm.comparison_dunder_methods + if not x.startswith("__r") + ], + ) + def test_direct_arith_with_ndframe_returns_not_implemented( + self, data, box, op_name + ): + # EAs should return NotImplemented for ops with Series/DataFrame/Index + # Pandas takes care of unboxing the series and calling the EA's op. + other = box(data) + + if hasattr(data, op_name): + result = getattr(data, op_name)(other) + assert result is NotImplemented + + +class BaseComparisonOpsTests(BaseOpsUtil): + """Various Series and DataFrame comparison ops methods.""" + + def _compare_other(self, ser: pd.Series, data, op, other): + if op.__name__ in ["eq", "ne"]: + # comparison should match point-wise comparisons + result = op(ser, other) + expected = ser.combine(other, op) + expected = self._cast_pointwise_result(op.__name__, ser, other, expected) + tm.assert_series_equal(result, expected) + + else: + exc = None + try: + result = op(ser, other) + except Exception as err: + exc = err + + if exc is None: + # Didn't error, then should match pointwise behavior + expected = ser.combine(other, op) + expected = self._cast_pointwise_result( + op.__name__, ser, other, expected + ) + tm.assert_series_equal(result, expected) + else: + with pytest.raises(type(exc)): + ser.combine(other, op) + + def test_compare_scalar(self, data, comparison_op): + ser = pd.Series(data) + self._compare_other(ser, data, comparison_op, 0) + + def test_compare_array(self, data, comparison_op): + ser = pd.Series(data) + other = pd.Series([data[0]] * len(data), dtype=data.dtype) + self._compare_other(ser, data, comparison_op, other) + + +class BaseUnaryOpsTests(BaseOpsUtil): + def test_invert(self, data): + ser = pd.Series(data, name="name") + result = ~ser + expected = pd.Series(~data, name="name") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs]) + def test_unary_ufunc_dunder_equivalence(self, data, ufunc): + # the dunder __pos__ works if and only if np.positive works, + # same for __neg__/np.negative and __abs__/np.abs + attr = {np.positive: "__pos__", np.negative: "__neg__", np.abs: "__abs__"}[ + ufunc + ] + + exc = None + try: + result = getattr(data, attr)() + except Exception as err: + exc = err + + # if __pos__ raised, then so should the ufunc + with pytest.raises((type(exc), TypeError)): + ufunc(data) + else: + alt = ufunc(data) + tm.assert_extension_array_equal(result, alt) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/printing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/printing.py new file mode 100644 index 00000000..b20236ec --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/printing.py @@ -0,0 +1,41 @@ +import io + +import pytest + +import pandas as pd + + +class BasePrintingTests: + """Tests checking the formatting of your EA when printed.""" + + @pytest.mark.parametrize("size", ["big", "small"]) + def test_array_repr(self, data, size): + if size == "small": + data = data[:5] + else: + data = type(data)._concat_same_type([data] * 5) + + result = repr(data) + assert type(data).__name__ in result + assert f"Length: {len(data)}" in result + assert str(data.dtype) in result + if size == "big": + assert "..." in result + + def test_array_repr_unicode(self, data): + result = str(data) + assert isinstance(result, str) + + def test_series_repr(self, data): + ser = pd.Series(data) + assert data.dtype.name in repr(ser) + + def test_dataframe_repr(self, data): + df = pd.DataFrame({"A": data}) + repr(df) + + def test_dtype_name_in_info(self, data): + buf = io.StringIO() + pd.DataFrame({"A": data}).info(buf=buf) + result = buf.getvalue() + assert data.dtype.name in result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reduce.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reduce.py new file mode 100644 index 00000000..91d4855d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reduce.py @@ -0,0 +1,149 @@ +from typing import final + +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_numeric_dtype + + +class BaseReduceTests: + """ + Reduction specific tests. Generally these only + make sense for numeric/boolean operations. + """ + + def _supports_reduction(self, obj, op_name: str) -> bool: + # Specify if we expect this reduction to succeed. + return False + + def check_reduce(self, s, op_name, skipna): + # We perform the same operation on the np.float64 data and check + # that the results match. Override if you need to cast to something + # other than float64. + res_op = getattr(s, op_name) + + try: + alt = s.astype("float64") + except (TypeError, ValueError): + # e.g. Interval can't cast, so let's cast to object and do + # the reduction pointwise + alt = s.astype(object) + + exp_op = getattr(alt, op_name) + if op_name == "count": + result = res_op() + expected = exp_op() + else: + result = res_op(skipna=skipna) + expected = exp_op(skipna=skipna) + tm.assert_almost_equal(result, expected) + + def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool): + # Find the expected dtype when the given reduction is done on a DataFrame + # column with this array. The default assumes float64-like behavior, + # i.e. retains the dtype. + return arr.dtype + + # We anticipate that authors should not need to override check_reduce_frame, + # but should be able to do any necessary overriding in + # _get_expected_reduction_dtype. If you have a use case where this + # does not hold, please let us know at github.com/pandas-dev/pandas/issues. + @final + def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool): + # Check that the 2D reduction done in a DataFrame reduction "looks like" + # a wrapped version of the 1D reduction done by Series. + arr = ser.array + df = pd.DataFrame({"a": arr}) + + kwargs = {"ddof": 1} if op_name in ["var", "std"] else {} + + cmp_dtype = self._get_expected_reduction_dtype(arr, op_name, skipna) + + # The DataFrame method just calls arr._reduce with keepdims=True, + # so this first check is perfunctory. + result1 = arr._reduce(op_name, skipna=skipna, keepdims=True, **kwargs) + result2 = getattr(df, op_name)(skipna=skipna, **kwargs).array + tm.assert_extension_array_equal(result1, result2) + + # Check that the 2D reduction looks like a wrapped version of the + # 1D reduction + if not skipna and ser.isna().any(): + expected = pd.array([pd.NA], dtype=cmp_dtype) + else: + exp_value = getattr(ser.dropna(), op_name)() + expected = pd.array([exp_value], dtype=cmp_dtype) + + tm.assert_extension_array_equal(result1, expected) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): + op_name = all_boolean_reductions + s = pd.Series(data) + + if not self._supports_reduction(s, op_name): + msg = ( + "[Cc]annot perform|Categorical is not ordered for operation|" + "does not support reduction|" + ) + + with pytest.raises(TypeError, match=msg): + getattr(s, op_name)(skipna=skipna) + + else: + self.check_reduce(s, op_name, skipna) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): + op_name = all_numeric_reductions + s = pd.Series(data) + + if not self._supports_reduction(s, op_name): + msg = ( + "[Cc]annot perform|Categorical is not ordered for operation|" + "does not support reduction|" + ) + + with pytest.raises(TypeError, match=msg): + getattr(s, op_name)(skipna=skipna) + + else: + # min/max with empty produce numpy warnings + self.check_reduce(s, op_name, skipna) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_frame(self, data, all_numeric_reductions, skipna): + op_name = all_numeric_reductions + s = pd.Series(data) + if not is_numeric_dtype(s.dtype): + pytest.skip("not numeric dtype") + + if op_name in ["count", "kurt", "sem"]: + pytest.skip(f"{op_name} not an array method") + + if not self._supports_reduction(s, op_name): + pytest.skip(f"Reduction {op_name} not supported for this dtype") + + self.check_reduce_frame(s, op_name, skipna) + + +# TODO: deprecate BaseNoReduceTests, BaseNumericReduceTests, BaseBooleanReduceTests +class BaseNoReduceTests(BaseReduceTests): + """we don't define any reductions""" + + +class BaseNumericReduceTests(BaseReduceTests): + # For backward compatibility only, this only runs the numeric reductions + def _supports_reduction(self, obj, op_name: str) -> bool: + if op_name in ["any", "all"]: + pytest.skip("These are tested in BaseBooleanReduceTests") + return True + + +class BaseBooleanReduceTests(BaseReduceTests): + # For backward compatibility only, this only runs the numeric reductions + def _supports_reduction(self, obj, op_name: str) -> bool: + if op_name not in ["any", "all"]: + pytest.skip("These are tested in BaseNumericReduceTests") + return True diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reshaping.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reshaping.py new file mode 100644 index 00000000..5d9c03e1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/reshaping.py @@ -0,0 +1,374 @@ +import itertools + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.extensions import ExtensionArray +from pandas.core.internals.blocks import EABackedBlock + + +class BaseReshapingTests: + """Tests for reshaping and concatenation.""" + + @pytest.mark.parametrize("in_frame", [True, False]) + def test_concat(self, data, in_frame): + wrapped = pd.Series(data) + if in_frame: + wrapped = pd.DataFrame(wrapped) + result = pd.concat([wrapped, wrapped], ignore_index=True) + + assert len(result) == len(data) * 2 + + if in_frame: + dtype = result.dtypes[0] + else: + dtype = result.dtype + + assert dtype == data.dtype + if hasattr(result._mgr, "blocks"): + assert isinstance(result._mgr.blocks[0], EABackedBlock) + assert isinstance(result._mgr.arrays[0], ExtensionArray) + + @pytest.mark.parametrize("in_frame", [True, False]) + def test_concat_all_na_block(self, data_missing, in_frame): + valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1]) + na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3]) + if in_frame: + valid_block = pd.DataFrame({"a": valid_block}) + na_block = pd.DataFrame({"a": na_block}) + result = pd.concat([valid_block, na_block]) + if in_frame: + expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])}) + tm.assert_frame_equal(result, expected) + else: + expected = pd.Series(data_missing.take([1, 1, 0, 0])) + tm.assert_series_equal(result, expected) + + def test_concat_mixed_dtypes(self, data): + # https://github.com/pandas-dev/pandas/issues/20762 + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"A": [1, 2, 3]}) + df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category") + dfs = [df1, df2, df3] + + # dataframes + result = pd.concat(dfs) + expected = pd.concat([x.astype(object) for x in dfs]) + tm.assert_frame_equal(result, expected) + + # series + result = pd.concat([x["A"] for x in dfs]) + expected = pd.concat([x["A"].astype(object) for x in dfs]) + tm.assert_series_equal(result, expected) + + # simple test for just EA and one other + result = pd.concat([df1, df2.astype(object)]) + expected = pd.concat([df1.astype("object"), df2.astype("object")]) + tm.assert_frame_equal(result, expected) + + result = pd.concat([df1["A"], df2["A"].astype(object)]) + expected = pd.concat([df1["A"].astype("object"), df2["A"].astype("object")]) + tm.assert_series_equal(result, expected) + + def test_concat_columns(self, data, na_value): + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"B": [1, 2, 3]}) + + expected = pd.DataFrame({"A": data[:3], "B": [1, 2, 3]}) + result = pd.concat([df1, df2], axis=1) + tm.assert_frame_equal(result, expected) + result = pd.concat([df1["A"], df2["B"]], axis=1) + tm.assert_frame_equal(result, expected) + + # non-aligned + df2 = pd.DataFrame({"B": [1, 2, 3]}, index=[1, 2, 3]) + expected = pd.DataFrame( + { + "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype), + "B": [np.nan, 1, 2, 3], + } + ) + + result = pd.concat([df1, df2], axis=1) + tm.assert_frame_equal(result, expected) + result = pd.concat([df1["A"], df2["B"]], axis=1) + tm.assert_frame_equal(result, expected) + + def test_concat_extension_arrays_copy_false(self, data, na_value): + # GH 20756 + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"B": data[3:7]}) + expected = pd.DataFrame( + { + "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype), + "B": data[3:7], + } + ) + result = pd.concat([df1, df2], axis=1, copy=False) + tm.assert_frame_equal(result, expected) + + def test_concat_with_reindex(self, data): + # GH-33027 + a = pd.DataFrame({"a": data[:5]}) + b = pd.DataFrame({"b": data[:5]}) + result = pd.concat([a, b], ignore_index=True) + expected = pd.DataFrame( + { + "a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True), + "b": data.take(([-1] * 5) + list(range(5)), allow_fill=True), + } + ) + tm.assert_frame_equal(result, expected) + + def test_align(self, data, na_value): + a = data[:3] + b = data[2:5] + r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3])) + + # Assumes that the ctor can take a list of scalars of the type + e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype)) + e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype)) + tm.assert_series_equal(r1, e1) + tm.assert_series_equal(r2, e2) + + def test_align_frame(self, data, na_value): + a = data[:3] + b = data[2:5] + r1, r2 = pd.DataFrame({"A": a}).align(pd.DataFrame({"A": b}, index=[1, 2, 3])) + + # Assumes that the ctor can take a list of scalars of the type + e1 = pd.DataFrame( + {"A": data._from_sequence(list(a) + [na_value], dtype=data.dtype)} + ) + e2 = pd.DataFrame( + {"A": data._from_sequence([na_value] + list(b), dtype=data.dtype)} + ) + tm.assert_frame_equal(r1, e1) + tm.assert_frame_equal(r2, e2) + + def test_align_series_frame(self, data, na_value): + # https://github.com/pandas-dev/pandas/issues/20576 + ser = pd.Series(data, name="a") + df = pd.DataFrame({"col": np.arange(len(ser) + 1)}) + r1, r2 = ser.align(df) + + e1 = pd.Series( + data._from_sequence(list(data) + [na_value], dtype=data.dtype), + name=ser.name, + ) + + tm.assert_series_equal(r1, e1) + tm.assert_frame_equal(r2, df) + + def test_set_frame_expand_regular_with_extension(self, data): + df = pd.DataFrame({"A": [1] * len(data)}) + df["B"] = data + expected = pd.DataFrame({"A": [1] * len(data), "B": data}) + tm.assert_frame_equal(df, expected) + + def test_set_frame_expand_extension_with_regular(self, data): + df = pd.DataFrame({"A": data}) + df["B"] = [1] * len(data) + expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) + tm.assert_frame_equal(df, expected) + + def test_set_frame_overwrite_object(self, data): + # https://github.com/pandas-dev/pandas/issues/20555 + df = pd.DataFrame({"A": [1] * len(data)}, dtype=object) + df["A"] = data + assert df.dtypes["A"] == data.dtype + + def test_merge(self, data, na_value): + # GH-20743 + df1 = pd.DataFrame({"ext": data[:3], "int1": [1, 2, 3], "key": [0, 1, 2]}) + df2 = pd.DataFrame({"int2": [1, 2, 3, 4], "key": [0, 0, 1, 3]}) + + res = pd.merge(df1, df2) + exp = pd.DataFrame( + { + "int1": [1, 1, 2], + "int2": [1, 2, 3], + "key": [0, 0, 1], + "ext": data._from_sequence( + [data[0], data[0], data[1]], dtype=data.dtype + ), + } + ) + tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]]) + + res = pd.merge(df1, df2, how="outer") + exp = pd.DataFrame( + { + "int1": [1, 1, 2, 3, np.nan], + "int2": [1, 2, 3, np.nan, 4], + "key": [0, 0, 1, 2, 3], + "ext": data._from_sequence( + [data[0], data[0], data[1], data[2], na_value], dtype=data.dtype + ), + } + ) + tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]]) + + def test_merge_on_extension_array(self, data): + # GH 23020 + a, b = data[:2] + key = type(data)._from_sequence([a, b], dtype=data.dtype) + + df = pd.DataFrame({"key": key, "val": [1, 2]}) + result = pd.merge(df, df, on="key") + expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]}) + tm.assert_frame_equal(result, expected) + + # order + result = pd.merge(df.iloc[[1, 0]], df, on="key") + expected = expected.iloc[[1, 0]].reset_index(drop=True) + tm.assert_frame_equal(result, expected) + + def test_merge_on_extension_array_duplicates(self, data): + # GH 23020 + a, b = data[:2] + key = type(data)._from_sequence([a, b, a], dtype=data.dtype) + df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) + df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]}) + + result = pd.merge(df1, df2, on="key") + expected = pd.DataFrame( + { + "key": key.take([0, 0, 0, 0, 1]), + "val_x": [1, 1, 3, 3, 2], + "val_y": [1, 3, 1, 3, 2], + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "columns", + [ + ["A", "B"], + pd.MultiIndex.from_tuples( + [("A", "a"), ("A", "b")], names=["outer", "inner"] + ), + ], + ) + @pytest.mark.parametrize("future_stack", [True, False]) + def test_stack(self, data, columns, future_stack): + df = pd.DataFrame({"A": data[:5], "B": data[:5]}) + df.columns = columns + result = df.stack(future_stack=future_stack) + expected = df.astype(object).stack(future_stack=future_stack) + # we need a second astype(object), in case the constructor inferred + # object -> specialized, as is done for period. + expected = expected.astype(object) + + if isinstance(expected, pd.Series): + assert result.dtype == df.iloc[:, 0].dtype + else: + assert all(result.dtypes == df.iloc[:, 0].dtype) + + result = result.astype(object) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + # Two levels, uniform. + pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]), + # non-uniform + pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]), + # three levels, non-uniform + pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]), + pd.MultiIndex.from_tuples( + [ + ("A", "a", 1), + ("A", "b", 0), + ("A", "a", 0), + ("B", "a", 0), + ("B", "c", 1), + ] + ), + ], + ) + @pytest.mark.parametrize("obj", ["series", "frame"]) + def test_unstack(self, data, index, obj): + data = data[: len(index)] + if obj == "series": + ser = pd.Series(data, index=index) + else: + ser = pd.DataFrame({"A": data, "B": data}, index=index) + + n = index.nlevels + levels = list(range(n)) + # [0, 1, 2] + # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + combinations = itertools.chain.from_iterable( + itertools.permutations(levels, i) for i in range(1, n) + ) + + for level in combinations: + result = ser.unstack(level=level) + assert all( + isinstance(result[col].array, type(data)) for col in result.columns + ) + + if obj == "series": + # We should get the same result with to_frame+unstack+droplevel + df = ser.to_frame() + + alt = df.unstack(level=level).droplevel(0, axis=1) + tm.assert_frame_equal(result, alt) + + obj_ser = ser.astype(object) + + expected = obj_ser.unstack(level=level, fill_value=data.dtype.na_value) + if obj == "series": + assert (expected.dtypes == object).all() + + result = result.astype(object) + tm.assert_frame_equal(result, expected) + + def test_ravel(self, data): + # as long as EA is 1D-only, ravel is a no-op + result = data.ravel() + assert type(result) == type(data) + + if data.dtype._is_immutable: + pytest.skip("test_ravel assumes mutability") + + # Check that we have a view, not a copy + result[0] = result[1] + assert data[0] == data[1] + + def test_transpose(self, data): + result = data.transpose() + assert type(result) == type(data) + + # check we get a new object + assert result is not data + + # If we ever _did_ support 2D, shape should be reversed + assert result.shape == data.shape[::-1] + + if data.dtype._is_immutable: + pytest.skip("test_transpose assumes mutability") + + # Check that we have a view, not a copy + result[0] = result[1] + assert data[0] == data[1] + + def test_transpose_frame(self, data): + df = pd.DataFrame({"A": data[:4], "B": data[:4]}, index=["a", "b", "c", "d"]) + result = df.T + expected = pd.DataFrame( + { + "a": type(data)._from_sequence([data[0]] * 2, dtype=data.dtype), + "b": type(data)._from_sequence([data[1]] * 2, dtype=data.dtype), + "c": type(data)._from_sequence([data[2]] * 2, dtype=data.dtype), + "d": type(data)._from_sequence([data[3]] * 2, dtype=data.dtype), + }, + index=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(np.transpose(np.transpose(df)), df) + tm.assert_frame_equal(np.transpose(np.transpose(df[["A"]])), df[["A"]]) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/setitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/setitem.py new file mode 100644 index 00000000..fd90635d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/base/setitem.py @@ -0,0 +1,445 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseSetitemTests: + @pytest.fixture( + params=[ + lambda x: x.index, + lambda x: list(x.index), + lambda x: slice(None), + lambda x: slice(0, len(x)), + lambda x: range(len(x)), + lambda x: list(range(len(x))), + lambda x: np.ones(len(x), dtype=bool), + ], + ids=[ + "index", + "list[index]", + "null_slice", + "full_slice", + "range", + "list(range)", + "mask", + ], + ) + def full_indexer(self, request): + """ + Fixture for an indexer to pass to obj.loc to get/set the full length of the + object. + + In some cases, assumes that obj.index is the default RangeIndex. + """ + return request.param + + @pytest.fixture(autouse=True) + def skip_if_immutable(self, dtype, request): + if dtype._is_immutable: + node = request.node + if node.name.split("[")[0] == "test_is_immutable": + # This fixture is auto-used, but we want to not-skip + # test_is_immutable. + return + pytest.skip("__setitem__ test not applicable with immutable dtype") + + def test_is_immutable(self, data): + if data.dtype._is_immutable: + with pytest.raises(TypeError): + data[0] = data[0] + else: + data[0] = data[1] + assert data[0] == data[1] + + def test_setitem_scalar_series(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + data[0] = data[1] + assert data[0] == data[1] + + def test_setitem_sequence(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + original = data.copy() + + data[[0, 1]] = [data[1], data[0]] + assert data[0] == original[1] + assert data[1] == original[0] + + def test_setitem_sequence_mismatched_length_raises(self, data, as_array): + ser = pd.Series(data) + original = ser.copy() + value = [data[0]] + if as_array: + value = data._from_sequence(value) + + xpr = "cannot set using a {} indexer with a different length" + with pytest.raises(ValueError, match=xpr.format("list-like")): + ser[[0, 1]] = value + # Ensure no modifications made before the exception + tm.assert_series_equal(ser, original) + + with pytest.raises(ValueError, match=xpr.format("slice")): + ser[slice(3)] = value + tm.assert_series_equal(ser, original) + + def test_setitem_empty_indexer(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + original = data.copy() + data[np.array([], dtype=int)] = [] + tm.assert_equal(data, original) + + def test_setitem_sequence_broadcasts(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + data[[0, 1]] = data[2] + assert data[0] == data[2] + assert data[1] == data[2] + + @pytest.mark.parametrize("setter", ["loc", "iloc"]) + def test_setitem_scalar(self, data, setter): + arr = pd.Series(data) + setter = getattr(arr, setter) + setter[0] = data[1] + assert arr[0] == data[1] + + def test_setitem_loc_scalar_mixed(self, data): + df = pd.DataFrame({"A": np.arange(len(data)), "B": data}) + df.loc[0, "B"] = data[1] + assert df.loc[0, "B"] == data[1] + + def test_setitem_loc_scalar_single(self, data): + df = pd.DataFrame({"B": data}) + df.loc[10, "B"] = data[1] + assert df.loc[10, "B"] == data[1] + + def test_setitem_loc_scalar_multiple_homogoneous(self, data): + df = pd.DataFrame({"A": data, "B": data}) + df.loc[10, "B"] = data[1] + assert df.loc[10, "B"] == data[1] + + def test_setitem_iloc_scalar_mixed(self, data): + df = pd.DataFrame({"A": np.arange(len(data)), "B": data}) + df.iloc[0, 1] = data[1] + assert df.loc[0, "B"] == data[1] + + def test_setitem_iloc_scalar_single(self, data): + df = pd.DataFrame({"B": data}) + df.iloc[10, 0] = data[1] + assert df.loc[10, "B"] == data[1] + + def test_setitem_iloc_scalar_multiple_homogoneous(self, data): + df = pd.DataFrame({"A": data, "B": data}) + df.iloc[10, 1] = data[1] + assert df.loc[10, "B"] == data[1] + + @pytest.mark.parametrize( + "mask", + [ + np.array([True, True, True, False, False]), + pd.array([True, True, True, False, False], dtype="boolean"), + pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"), + ], + ids=["numpy-array", "boolean-array", "boolean-array-na"], + ) + def test_setitem_mask(self, data, mask, box_in_series): + arr = data[:5].copy() + expected = arr.take([0, 0, 0, 3, 4]) + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + arr[mask] = data[0] + tm.assert_equal(expected, arr) + + def test_setitem_mask_raises(self, data, box_in_series): + # wrong length + mask = np.array([True, False]) + + if box_in_series: + data = pd.Series(data) + + with pytest.raises(IndexError, match="wrong length"): + data[mask] = data[0] + + mask = pd.array(mask, dtype="boolean") + with pytest.raises(IndexError, match="wrong length"): + data[mask] = data[0] + + def test_setitem_mask_boolean_array_with_na(self, data, box_in_series): + mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") + mask[:3] = True + mask[3:5] = pd.NA + + if box_in_series: + data = pd.Series(data) + + data[mask] = data[0] + + assert (data[:3] == data[0]).all() + + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_setitem_integer_array(self, data, idx, box_in_series): + arr = data[:5].copy() + expected = data.take([0, 0, 0, 3, 4]) + + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + + arr[idx] = arr[0] + tm.assert_equal(arr, expected) + + @pytest.mark.parametrize( + "idx, box_in_series", + [ + ([0, 1, 2, pd.NA], False), + pytest.param( + [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948") + ), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + ], + ids=["list-False", "list-True", "integer-array-False", "integer-array-True"], + ) + def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series): + arr = data.copy() + + # TODO(xfail) this raises KeyError about labels not found (it tries label-based) + # for list of labels with Series + if box_in_series: + arr = pd.Series(data, index=[chr(100 + i) for i in range(len(data))]) + + msg = "Cannot index with an integer indexer containing NA values" + with pytest.raises(ValueError, match=msg): + arr[idx] = arr[0] + + @pytest.mark.parametrize("as_callable", [True, False]) + @pytest.mark.parametrize("setter", ["loc", None]) + def test_setitem_mask_aligned(self, data, as_callable, setter): + ser = pd.Series(data) + mask = np.zeros(len(data), dtype=bool) + mask[:2] = True + + if as_callable: + mask2 = lambda x: mask + else: + mask2 = mask + + if setter: + # loc + target = getattr(ser, setter) + else: + # Series.__setitem__ + target = ser + + target[mask2] = data[5:7] + + ser[mask2] = data[5:7] + assert ser[0] == data[5] + assert ser[1] == data[6] + + @pytest.mark.parametrize("setter", ["loc", None]) + def test_setitem_mask_broadcast(self, data, setter): + ser = pd.Series(data) + mask = np.zeros(len(data), dtype=bool) + mask[:2] = True + + if setter: # loc + target = getattr(ser, setter) + else: # __setitem__ + target = ser + + target[mask] = data[10] + assert ser[0] == data[10] + assert ser[1] == data[10] + + def test_setitem_expand_columns(self, data): + df = pd.DataFrame({"A": data}) + result = df.copy() + result["B"] = 1 + expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.loc[:, "B"] = 1 + tm.assert_frame_equal(result, expected) + + # overwrite with new type + result["B"] = data + expected = pd.DataFrame({"A": data, "B": data}) + tm.assert_frame_equal(result, expected) + + def test_setitem_expand_with_extension(self, data): + df = pd.DataFrame({"A": [1] * len(data)}) + result = df.copy() + result["B"] = data + expected = pd.DataFrame({"A": [1] * len(data), "B": data}) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.loc[:, "B"] = data + tm.assert_frame_equal(result, expected) + + def test_setitem_frame_invalid_length(self, data): + df = pd.DataFrame({"A": [1] * len(data)}) + xpr = ( + rf"Length of values \({len(data[:5])}\) " + rf"does not match length of index \({len(df)}\)" + ) + with pytest.raises(ValueError, match=xpr): + df["B"] = data[:5] + + def test_setitem_tuple_index(self, data): + ser = pd.Series(data[:2], index=[(0, 0), (0, 1)]) + expected = pd.Series(data.take([1, 1]), index=ser.index) + ser[(0, 0)] = data[1] + tm.assert_series_equal(ser, expected) + + def test_setitem_slice(self, data, box_in_series): + arr = data[:5].copy() + expected = data.take([0, 0, 0, 3, 4]) + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + + arr[:3] = data[0] + tm.assert_equal(arr, expected) + + def test_setitem_loc_iloc_slice(self, data): + arr = data[:5].copy() + s = pd.Series(arr, index=["a", "b", "c", "d", "e"]) + expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index) + + result = s.copy() + result.iloc[:3] = data[0] + tm.assert_equal(result, expected) + + result = s.copy() + result.loc[:"c"] = data[0] + tm.assert_equal(result, expected) + + def test_setitem_slice_mismatch_length_raises(self, data): + arr = data[:5] + with pytest.raises(ValueError): + arr[:1] = arr[:2] + + def test_setitem_slice_array(self, data): + arr = data[:5].copy() + arr[:5] = data[-5:] + tm.assert_extension_array_equal(arr, data[-5:]) + + def test_setitem_scalar_key_sequence_raise(self, data): + arr = data[:5].copy() + with pytest.raises(ValueError): + arr[0] = arr[[0, 1]] + + def test_setitem_preserves_views(self, data): + # GH#28150 setitem shouldn't swap the underlying data + view1 = data.view() + view2 = data[:] + + data[0] = data[1] + assert view1[0] == data[1] + assert view2[0] == data[1] + + def test_setitem_with_expansion_dataframe_column(self, data, full_indexer): + # https://github.com/pandas-dev/pandas/issues/32395 + df = expected = pd.DataFrame({"data": pd.Series(data)}) + result = pd.DataFrame(index=df.index) + + key = full_indexer(df) + result.loc[key, "data"] = df["data"] + + tm.assert_frame_equal(result, expected) + + def test_setitem_with_expansion_row(self, data, na_value): + df = pd.DataFrame({"data": data[:1]}) + + df.loc[1, "data"] = data[1] + expected = pd.DataFrame({"data": data[:2]}) + tm.assert_frame_equal(df, expected) + + # https://github.com/pandas-dev/pandas/issues/47284 + df.loc[2, "data"] = na_value + expected = pd.DataFrame( + {"data": pd.Series([data[0], data[1], na_value], dtype=data.dtype)} + ) + tm.assert_frame_equal(df, expected) + + def test_setitem_series(self, data, full_indexer): + # https://github.com/pandas-dev/pandas/issues/32395 + ser = pd.Series(data, name="data") + result = pd.Series(index=ser.index, dtype=object, name="data") + + # because result has object dtype, the attempt to do setting inplace + # is successful, and object dtype is retained + key = full_indexer(ser) + result.loc[key] = ser + + expected = pd.Series( + data.astype(object), index=ser.index, name="data", dtype=object + ) + tm.assert_series_equal(result, expected) + + def test_setitem_frame_2d_values(self, data): + # GH#44514 + df = pd.DataFrame({"A": data}) + + # Avoiding using_array_manager fixture + # https://github.com/pandas-dev/pandas/pull/44514#discussion_r754002410 + using_array_manager = isinstance(df._mgr, pd.core.internals.ArrayManager) + using_copy_on_write = pd.options.mode.copy_on_write + + blk_data = df._mgr.arrays[0] + + orig = df.copy() + + df.iloc[:] = df + tm.assert_frame_equal(df, orig) + + df.iloc[:-1] = df.iloc[:-1] + tm.assert_frame_equal(df, orig) + + df.iloc[:] = df.values + tm.assert_frame_equal(df, orig) + if not using_array_manager and not using_copy_on_write: + # GH#33457 Check that this setting occurred in-place + # FIXME(ArrayManager): this should work there too + assert df._mgr.arrays[0] is blk_data + + df.iloc[:-1] = df.values[:-1] + tm.assert_frame_equal(df, orig) + + def test_delitem_series(self, data): + # GH#40763 + ser = pd.Series(data, name="data") + + taker = np.arange(len(ser)) + taker = np.delete(taker, 1) + + expected = ser[taker] + del ser[1] + tm.assert_series_equal(ser, expected) + + def test_setitem_invalid(self, data, invalid_scalar): + msg = "" # messages vary by subclass, so we do not test it + with pytest.raises((ValueError, TypeError), match=msg): + data[0] = invalid_scalar + + with pytest.raises((ValueError, TypeError), match=msg): + data[:] = invalid_scalar + + def test_setitem_2d_values(self, data): + # GH50085 + original = data.copy() + df = pd.DataFrame({"a": data, "b": data}) + df.loc[[0, 1], :] = df.loc[[1, 0], :].values + assert (df.loc[0, :] == original[1]).all() + assert (df.loc[1, :] == original[0]).all() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/conftest.py new file mode 100644 index 00000000..7b7945b1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/conftest.py @@ -0,0 +1,221 @@ +import operator + +import pytest + +from pandas import ( + Series, + options, +) + + +@pytest.fixture +def dtype(): + """A fixture providing the ExtensionDtype to validate.""" + raise NotImplementedError + + +@pytest.fixture +def data(): + """ + Length-100 array for this type. + + * data[0] and data[1] should both be non missing + * data[0] and data[1] should not be equal + """ + raise NotImplementedError + + +@pytest.fixture +def data_for_twos(dtype): + """ + Length-100 array in which all the elements are two. + + Call pytest.skip in your fixture if the dtype does not support divmod. + """ + if not (dtype._is_numeric or dtype.kind == "m"): + # Object-dtypes may want to allow this, but for the most part + # only numeric and timedelta-like dtypes will need to implement this. + pytest.skip("Not a numeric dtype") + + raise NotImplementedError + + +@pytest.fixture +def data_missing(): + """Length-2 array with [NA, Valid]""" + raise NotImplementedError + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture giving 'data' and 'data_missing'""" + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing + + +@pytest.fixture +def data_repeated(data): + """ + Generate many datasets. + + Parameters + ---------- + data : fixture implementing `data` + + Returns + ------- + Callable[[int], Generator]: + A callable that takes a `count` argument and + returns a generator yielding `count` datasets. + """ + + def gen(count): + for _ in range(count): + yield data + + return gen + + +@pytest.fixture +def data_for_sorting(): + """ + Length-3 array with a known sort order. + + This should be three items [B, C, A] with + A < B < C + + For boolean dtypes (for which there are only 2 values available), + set B=C=True + """ + raise NotImplementedError + + +@pytest.fixture +def data_missing_for_sorting(): + """ + Length-3 array with a known sort order. + + This should be three items [B, NA, A] with + A < B and NA missing. + """ + raise NotImplementedError + + +@pytest.fixture +def na_cmp(): + """ + Binary operator for comparing NA values. + + Should return a function of two arguments that returns + True if both arguments are (scalar) NA for your type. + + By default, uses ``operator.is_`` + """ + return operator.is_ + + +@pytest.fixture +def na_value(dtype): + """The scalar missing value for this type. Default dtype.na_value""" + return dtype.na_value + + +@pytest.fixture +def data_for_grouping(): + """ + Data for factorization, grouping, and unique tests. + + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing. + + If a dtype has _is_boolean = True, i.e. only 2 unique non-NA entries, + then set C=B. + """ + raise NotImplementedError + + +@pytest.fixture(params=[True, False]) +def box_in_series(request): + """Whether to box the data in a Series""" + return request.param + + +@pytest.fixture( + params=[ + lambda x: 1, + lambda x: [1] * len(x), + lambda x: Series([1] * len(x)), + lambda x: x, + ], + ids=["scalar", "list", "series", "object"], +) +def groupby_apply_op(request): + """ + Functions to test groupby.apply(). + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_frame(request): + """ + Boolean fixture to support Series and Series.to_frame() comparison testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_series(request): + """ + Boolean fixture to support arr and Series(arr) comparison testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def use_numpy(request): + """ + Boolean fixture to support comparison testing of ExtensionDtype array + and numpy array. + """ + return request.param + + +@pytest.fixture(params=["ffill", "bfill"]) +def fillna_method(request): + """ + Parametrized fixture giving method parameters 'ffill' and 'bfill' for + Series.fillna(method=) testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_array(request): + """ + Boolean fixture to support ExtensionDtype _from_sequence method testing. + """ + return request.param + + +@pytest.fixture +def invalid_scalar(data): + """ + A scalar that *cannot* be held by this ExtensionArray. + + The default should work for most subclasses, but is not guaranteed. + + If the array can hold any item (i.e. object dtype), then use pytest.skip. + """ + return object.__new__(object) + + +@pytest.fixture +def using_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is enabled. + """ + return options.mode.copy_on_write and options.mode.data_manager == "block" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/__init__.py new file mode 100644 index 00000000..2a8c7e9f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/__init__.py @@ -0,0 +1,6 @@ +from pandas.tests.extension.date.array import ( + DateArray, + DateDtype, +) + +__all__ = ["DateArray", "DateDtype"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/array.py new file mode 100644 index 00000000..39accd6d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/date/array.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +import datetime as dt +from typing import ( + TYPE_CHECKING, + Any, + cast, +) + +import numpy as np + +from pandas.core.dtypes.dtypes import register_extension_dtype + +from pandas.api.extensions import ( + ExtensionArray, + ExtensionDtype, +) +from pandas.api.types import pandas_dtype + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + Dtype, + PositionalIndexer, + ) + + +@register_extension_dtype +class DateDtype(ExtensionDtype): + @property + def type(self): + return dt.date + + @property + def name(self): + return "DateDtype" + + @classmethod + def construct_from_string(cls, string: str): + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + + if string == cls.__name__: + return cls() + else: + raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + + @classmethod + def construct_array_type(cls): + return DateArray + + @property + def na_value(self): + return dt.date.min + + def __repr__(self) -> str: + return self.name + + +class DateArray(ExtensionArray): + def __init__( + self, + dates: ( + dt.date + | Sequence[dt.date] + | tuple[np.ndarray, np.ndarray, np.ndarray] + | np.ndarray + ), + ) -> None: + if isinstance(dates, dt.date): + self._year = np.array([dates.year]) + self._month = np.array([dates.month]) + self._day = np.array([dates.year]) + return + + ldates = len(dates) + if isinstance(dates, list): + # pre-allocate the arrays since we know the size before hand + self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999) + self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31) + self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12) + # populate them + for i, (y, m, d) in enumerate( + (date.year, date.month, date.day) for date in dates + ): + self._year[i] = y + self._month[i] = m + self._day[i] = d + + elif isinstance(dates, tuple): + # only support triples + if ldates != 3: + raise ValueError("only triples are valid") + # check if all elements have the same type + if any(not isinstance(x, np.ndarray) for x in dates): + raise TypeError("invalid type") + ly, lm, ld = (len(cast(np.ndarray, d)) for d in dates) + if not ly == lm == ld: + raise ValueError( + f"tuple members must have the same length: {(ly, lm, ld)}" + ) + self._year = dates[0].astype(np.uint16) + self._month = dates[1].astype(np.uint8) + self._day = dates[2].astype(np.uint8) + + elif isinstance(dates, np.ndarray) and dates.dtype == "U10": + self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999) + self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31) + self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12) + + # error: "object_" object is not iterable + obj = np.char.split(dates, sep="-") + for (i,), (y, m, d) in np.ndenumerate(obj): # type: ignore[misc] + self._year[i] = int(y) + self._month[i] = int(m) + self._day[i] = int(d) + + else: + raise TypeError(f"{type(dates)} is not supported") + + @property + def dtype(self) -> ExtensionDtype: + return DateDtype() + + def astype(self, dtype, copy=True): + dtype = pandas_dtype(dtype) + + if isinstance(dtype, DateDtype): + data = self.copy() if copy else self + else: + data = self.to_numpy(dtype=dtype, copy=copy, na_value=dt.date.min) + + return data + + @property + def nbytes(self) -> int: + return self._year.nbytes + self._month.nbytes + self._day.nbytes + + def __len__(self) -> int: + return len(self._year) # all 3 arrays are enforced to have the same length + + def __getitem__(self, item: PositionalIndexer): + if isinstance(item, int): + return dt.date(self._year[item], self._month[item], self._day[item]) + else: + raise NotImplementedError("only ints are supported as indexes") + + def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None: + if not isinstance(key, int): + raise NotImplementedError("only ints are supported as indexes") + + if not isinstance(value, dt.date): + raise TypeError("you can only set datetime.date types") + + self._year[key] = value.year + self._month[key] = value.month + self._day[key] = value.day + + def __repr__(self) -> str: + return f"DateArray{list(zip(self._year, self._month, self._day))}" + + def copy(self) -> DateArray: + return DateArray((self._year.copy(), self._month.copy(), self._day.copy())) + + def isna(self) -> np.ndarray: + return np.logical_and( + np.logical_and( + self._year == dt.date.min.year, self._month == dt.date.min.month + ), + self._day == dt.date.min.day, + ) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False): + if isinstance(scalars, dt.date): + pass + elif isinstance(scalars, DateArray): + pass + elif isinstance(scalars, np.ndarray): + scalars = scalars.astype("U10") # 10 chars for yyyy-mm-dd + return DateArray(scalars) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/__init__.py new file mode 100644 index 00000000..34727b43 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/__init__.py @@ -0,0 +1,8 @@ +from pandas.tests.extension.decimal.array import ( + DecimalArray, + DecimalDtype, + make_data, + to_decimal, +) + +__all__ = ["DecimalArray", "DecimalDtype", "to_decimal", "make_data"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/array.py new file mode 100644 index 00000000..9ce7ac30 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/array.py @@ -0,0 +1,309 @@ +from __future__ import annotations + +import decimal +import numbers +import sys +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.common import ( + is_dtype_equal, + is_float, + is_integer, + pandas_dtype, +) + +import pandas as pd +from pandas.api.extensions import ( + no_default, + register_extension_dtype, +) +from pandas.api.types import ( + is_list_like, + is_scalar, +) +from pandas.core import arraylike +from pandas.core.algorithms import value_counts_internal as value_counts +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ( + ExtensionArray, + ExtensionScalarOpsMixin, +) +from pandas.core.indexers import check_array_indexer + +if TYPE_CHECKING: + from pandas._typing import type_t + + +@register_extension_dtype +class DecimalDtype(ExtensionDtype): + type = decimal.Decimal + name = "decimal" + na_value = decimal.Decimal("NaN") + _metadata = ("context",) + + def __init__(self, context=None) -> None: + self.context = context or decimal.getcontext() + + def __repr__(self) -> str: + return f"DecimalDtype(context={self.context})" + + @classmethod + def construct_array_type(cls) -> type_t[DecimalArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return DecimalArray + + @property + def _is_numeric(self) -> bool: + return True + + +class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray): + __array_priority__ = 1000 + + def __init__(self, values, dtype=None, copy=False, context=None) -> None: + for i, val in enumerate(values): + if is_float(val) or is_integer(val): + if np.isnan(val): + values[i] = DecimalDtype.na_value + else: + # error: Argument 1 has incompatible type "float | int | + # integer[Any]"; expected "Decimal | float | str | tuple[int, + # Sequence[int], int]" + values[i] = DecimalDtype.type(val) # type: ignore[arg-type] + elif not isinstance(val, decimal.Decimal): + raise TypeError("All values must be of type " + str(decimal.Decimal)) + values = np.asarray(values, dtype=object) + + self._data = values + # Some aliases for common attribute names to ensure pandas supports + # these + self._items = self.data = self._data + # those aliases are currently not working due to assumptions + # in internal code (GH-20735) + # self._values = self.values = self.data + self._dtype = DecimalDtype(context) + + @property + def dtype(self): + return self._dtype + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + return cls(scalars) + + @classmethod + def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): + return cls._from_sequence([decimal.Decimal(x) for x in strings], dtype, copy) + + @classmethod + def _from_factorized(cls, values, original): + return cls(values) + + _HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray) + + def to_numpy( + self, + dtype=None, + copy: bool = False, + na_value: object = no_default, + decimals=None, + ) -> np.ndarray: + result = np.asarray(self, dtype=dtype) + if decimals is not None: + result = np.asarray([round(x, decimals) for x in result]) + return result + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # + if not all( + isinstance(t, self._HANDLED_TYPES + (DecimalArray,)) for t in inputs + ): + return NotImplemented + + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + # e.g. test_array_ufunc_series_scalar_other + return result + + if "out" in kwargs: + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + inputs = tuple(x._data if isinstance(x, DecimalArray) else x for x in inputs) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + def reconstruct(x): + if isinstance(x, (decimal.Decimal, numbers.Number)): + return x + else: + return DecimalArray._from_sequence(x) + + if ufunc.nout > 1: + return tuple(reconstruct(x) for x in result) + else: + return reconstruct(result) + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self._data[item] + else: + # array, slice. + item = pd.api.indexers.check_array_indexer(self, item) + return type(self)(self._data[item]) + + def take(self, indexer, allow_fill=False, fill_value=None): + from pandas.api.extensions import take + + data = self._data + if allow_fill and fill_value is None: + fill_value = self.dtype.na_value + + result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill) + return self._from_sequence(result) + + def copy(self): + return type(self)(self._data.copy(), dtype=self.dtype) + + def astype(self, dtype, copy=True): + if is_dtype_equal(dtype, self._dtype): + if not copy: + return self + dtype = pandas_dtype(dtype) + if isinstance(dtype, type(self.dtype)): + return type(self)(self._data, copy=copy, context=dtype.context) + + return super().astype(dtype, copy=copy) + + def __setitem__(self, key, value) -> None: + if is_list_like(value): + if is_scalar(key): + raise ValueError("setting an array element with a sequence.") + value = [decimal.Decimal(v) for v in value] + else: + value = decimal.Decimal(value) + + key = check_array_indexer(self, key) + self._data[key] = value + + def __len__(self) -> int: + return len(self._data) + + def __contains__(self, item) -> bool | np.bool_: + if not isinstance(item, decimal.Decimal): + return False + elif item.is_nan(): + return self.isna().any() + else: + return super().__contains__(item) + + @property + def nbytes(self) -> int: + n = len(self) + if n: + return n * sys.getsizeof(self[0]) + return 0 + + def isna(self): + return np.array([x.is_nan() for x in self._data], dtype=bool) + + @property + def _na_value(self): + return decimal.Decimal("NaN") + + def _formatter(self, boxed=False): + if boxed: + return "Decimal: {}".format + return repr + + @classmethod + def _concat_same_type(cls, to_concat): + return cls(np.concatenate([x._data for x in to_concat])) + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + if skipna and self.isna().any(): + # If we don't have any NAs, we can ignore skipna + other = self[~self.isna()] + result = other._reduce(name, **kwargs) + elif name == "sum" and len(self) == 0: + # GH#29630 avoid returning int 0 or np.bool_(False) on old numpy + result = decimal.Decimal(0) + else: + try: + op = getattr(self.data, name) + except AttributeError as err: + raise NotImplementedError( + f"decimal does not support the {name} operation" + ) from err + result = op(axis=0) + + if keepdims: + return type(self)([result]) + else: + return result + + def _cmp_method(self, other, op): + # For use with OpsMixin + def convert_values(param): + if isinstance(param, ExtensionArray) or is_list_like(param): + ovalues = param + else: + # Assume it's an object + ovalues = [param] * len(self) + return ovalues + + lvalues = self + rvalues = convert_values(other) + + # If the operator is not defined for the underlying objects, + # a TypeError should be raised + res = [op(a, b) for (a, b) in zip(lvalues, rvalues)] + + return np.asarray(res, dtype=bool) + + def value_counts(self, dropna: bool = True): + return value_counts(self.to_numpy(), dropna=dropna) + + # We override fillna here to simulate a 3rd party EA that has done so. This + # lets us test the deprecation telling authors to implement _pad_or_backfill + # Simulate a 3rd-party EA that has not yet updated to include a "copy" + # keyword in its fillna method. + # error: Signature of "fillna" incompatible with supertype "ExtensionArray" + def fillna( # type: ignore[override] + self, + value=None, + method=None, + limit: int | None = None, + ): + return super().fillna(value=value, method=method, limit=limit, copy=True) + + +def to_decimal(values, context=None): + return DecimalArray([decimal.Decimal(x) for x in values], context=context) + + +def make_data(): + return [decimal.Decimal(val) for val in np.random.default_rng(2).random(100)] + + +DecimalArray._add_arithmetic_ops() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/test_decimal.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/test_decimal.py new file mode 100644 index 00000000..e61a2894 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/decimal/test_decimal.py @@ -0,0 +1,543 @@ +from __future__ import annotations + +import decimal +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension import base +from pandas.tests.extension.decimal.array import ( + DecimalArray, + DecimalDtype, + make_data, + to_decimal, +) + + +@pytest.fixture +def dtype(): + return DecimalDtype() + + +@pytest.fixture +def data(): + return DecimalArray(make_data()) + + +@pytest.fixture +def data_for_twos(): + return DecimalArray([decimal.Decimal(2) for _ in range(100)]) + + +@pytest.fixture +def data_missing(): + return DecimalArray([decimal.Decimal("NaN"), decimal.Decimal(1)]) + + +@pytest.fixture +def data_for_sorting(): + return DecimalArray( + [decimal.Decimal("1"), decimal.Decimal("2"), decimal.Decimal("0")] + ) + + +@pytest.fixture +def data_missing_for_sorting(): + return DecimalArray( + [decimal.Decimal("1"), decimal.Decimal("NaN"), decimal.Decimal("0")] + ) + + +@pytest.fixture +def na_cmp(): + return lambda x, y: x.is_nan() and y.is_nan() + + +@pytest.fixture +def data_for_grouping(): + b = decimal.Decimal("1.0") + a = decimal.Decimal("0.0") + c = decimal.Decimal("2.0") + na = decimal.Decimal("NaN") + return DecimalArray([b, b, na, na, a, a, b, c]) + + +class TestDecimalArray(base.ExtensionTests): + def _get_expected_exception( + self, op_name: str, obj, other + ) -> type[Exception] | None: + return None + + def _supports_reduction(self, obj, op_name: str) -> bool: + return True + + def check_reduce(self, s, op_name, skipna): + if op_name == "count": + return super().check_reduce(s, op_name, skipna) + else: + result = getattr(s, op_name)(skipna=skipna) + expected = getattr(np.asarray(s), op_name)() + tm.assert_almost_equal(result, expected) + + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request): + if all_numeric_reductions in ["kurt", "skew", "sem", "median"]: + mark = pytest.mark.xfail(raises=NotImplementedError) + request.node.add_marker(mark) + super().test_reduce_series_numeric(data, all_numeric_reductions, skipna) + + def test_reduce_frame(self, data, all_numeric_reductions, skipna, request): + op_name = all_numeric_reductions + if op_name in ["skew", "median"]: + mark = pytest.mark.xfail(raises=NotImplementedError) + request.node.add_marker(mark) + + return super().test_reduce_frame(data, all_numeric_reductions, skipna) + + def test_compare_scalar(self, data, comparison_op): + ser = pd.Series(data) + self._compare_other(ser, data, comparison_op, 0.5) + + def test_compare_array(self, data, comparison_op): + ser = pd.Series(data) + + alter = np.random.default_rng(2).choice([-1, 0, 1], len(data)) + # Randomly double, halve or keep same value + other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter] + self._compare_other(ser, data, comparison_op, other) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + op_name = all_arithmetic_operators + ser = pd.Series(data) + + context = decimal.getcontext() + divbyzerotrap = context.traps[decimal.DivisionByZero] + invalidoptrap = context.traps[decimal.InvalidOperation] + context.traps[decimal.DivisionByZero] = 0 + context.traps[decimal.InvalidOperation] = 0 + + # Decimal supports ops with int, but not float + other = pd.Series([int(d * 100) for d in data]) + self.check_opname(ser, op_name, other) + + if "mod" not in op_name: + self.check_opname(ser, op_name, ser * 2) + + self.check_opname(ser, op_name, 0) + self.check_opname(ser, op_name, 5) + context.traps[decimal.DivisionByZero] = divbyzerotrap + context.traps[decimal.InvalidOperation] = invalidoptrap + + def test_fillna_frame(self, data_missing): + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + super().test_fillna_frame(data_missing) + + def test_fillna_limit_pad(self, data_missing): + msg = "ExtensionArray.fillna 'method' keyword is deprecated" + with tm.assert_produces_warning( + DeprecationWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_pad(data_missing) + + msg = "The 'method' keyword in DecimalArray.fillna is deprecated" + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_pad(data_missing) + + def test_fillna_limit_backfill(self, data_missing): + msg = "Series.fillna with 'method' is deprecated" + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_backfill(data_missing) + + msg = "ExtensionArray.fillna 'method' keyword is deprecated" + with tm.assert_produces_warning( + DeprecationWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_backfill(data_missing) + + msg = "The 'method' keyword in DecimalArray.fillna is deprecated" + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_backfill(data_missing) + + def test_fillna_no_op_returns_copy(self, data): + msg = "|".join( + [ + "ExtensionArray.fillna 'method' keyword is deprecated", + "The 'method' keyword in DecimalArray.fillna is deprecated", + ] + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False + ): + super().test_fillna_no_op_returns_copy(data) + + def test_fillna_series(self, data_missing): + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + super().test_fillna_series(data_missing) + + def test_fillna_series_method(self, data_missing, fillna_method): + msg = "|".join( + [ + "ExtensionArray.fillna 'method' keyword is deprecated", + "The 'method' keyword in DecimalArray.fillna is deprecated", + ] + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False + ): + super().test_fillna_series_method(data_missing, fillna_method) + + def test_fillna_copy_frame(self, data_missing, using_copy_on_write): + warn = DeprecationWarning if not using_copy_on_write else None + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + super().test_fillna_copy_frame(data_missing) + + def test_fillna_copy_series(self, data_missing, using_copy_on_write): + warn = DeprecationWarning if not using_copy_on_write else None + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + super().test_fillna_copy_series(data_missing) + + @pytest.mark.parametrize("dropna", [True, False]) + def test_value_counts(self, all_data, dropna, request): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + vcs = pd.Series(all_data).value_counts(dropna=dropna) + vcs_ex = pd.Series(other).value_counts(dropna=dropna) + + with decimal.localcontext() as ctx: + # avoid raising when comparing Decimal("NAN") < Decimal(2) + ctx.traps[decimal.InvalidOperation] = False + + result = vcs.sort_index() + expected = vcs_ex.sort_index() + + tm.assert_series_equal(result, expected) + + def test_series_repr(self, data): + # Overriding this base test to explicitly test that + # the custom _formatter is used + ser = pd.Series(data) + assert data.dtype.name in repr(ser) + assert "Decimal: " in repr(ser) + + @pytest.mark.xfail( + reason="Looks like the test (incorrectly) implicitly assumes int/bool dtype" + ) + def test_invert(self, data): + super().test_invert(data) + + @pytest.mark.xfail(reason="Inconsistent array-vs-scalar behavior") + @pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs]) + def test_unary_ufunc_dunder_equivalence(self, data, ufunc): + super().test_unary_ufunc_dunder_equivalence(data, ufunc) + + +def test_take_na_value_other_decimal(): + arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) + result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0")) + expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")]) + tm.assert_extension_array_equal(result, expected) + + +def test_series_constructor_coerce_data_to_extension_dtype(): + dtype = DecimalDtype() + ser = pd.Series([0, 1, 2], dtype=dtype) + + arr = DecimalArray( + [decimal.Decimal(0), decimal.Decimal(1), decimal.Decimal(2)], + dtype=dtype, + ) + exp = pd.Series(arr) + tm.assert_series_equal(ser, exp) + + +def test_series_constructor_with_dtype(): + arr = DecimalArray([decimal.Decimal("10.0")]) + result = pd.Series(arr, dtype=DecimalDtype()) + expected = pd.Series(arr) + tm.assert_series_equal(result, expected) + + result = pd.Series(arr, dtype="int64") + expected = pd.Series([10]) + tm.assert_series_equal(result, expected) + + +def test_dataframe_constructor_with_dtype(): + arr = DecimalArray([decimal.Decimal("10.0")]) + + result = pd.DataFrame({"A": arr}, dtype=DecimalDtype()) + expected = pd.DataFrame({"A": arr}) + tm.assert_frame_equal(result, expected) + + arr = DecimalArray([decimal.Decimal("10.0")]) + result = pd.DataFrame({"A": arr}, dtype="int64") + expected = pd.DataFrame({"A": [10]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("frame", [True, False]) +def test_astype_dispatches(frame): + # This is a dtype-specific test that ensures Series[decimal].astype + # gets all the way through to ExtensionArray.astype + # Designing a reliable smoke test that works for arbitrary data types + # is difficult. + data = pd.Series(DecimalArray([decimal.Decimal(2)]), name="a") + ctx = decimal.Context() + ctx.prec = 5 + + if frame: + data = data.to_frame() + + result = data.astype(DecimalDtype(ctx)) + + if frame: + result = result["a"] + + assert result.dtype.context.prec == ctx.prec + + +class DecimalArrayWithoutFromSequence(DecimalArray): + """Helper class for testing error handling in _from_sequence.""" + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + raise KeyError("For the test") + + +class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence): + @classmethod + def _create_arithmetic_method(cls, op): + return cls._create_method(op, coerce_to_dtype=False) + + +DecimalArrayWithoutCoercion._add_arithmetic_ops() + + +def test_combine_from_sequence_raises(monkeypatch): + # https://github.com/pandas-dev/pandas/issues/22850 + cls = DecimalArrayWithoutFromSequence + + @classmethod + def construct_array_type(cls): + return DecimalArrayWithoutFromSequence + + monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type) + + arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) + ser = pd.Series(arr) + result = ser.combine(ser, operator.add) + + # note: object dtype + expected = pd.Series( + [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object" + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "class_", [DecimalArrayWithoutFromSequence, DecimalArrayWithoutCoercion] +) +def test_scalar_ops_from_sequence_raises(class_): + # op(EA, EA) should return an EA, or an ndarray if it's not possible + # to return an EA with the return values. + arr = class_([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) + result = arr + arr + expected = np.array( + [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object" + ) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "reverse, expected_div, expected_mod", + [(False, [0, 1, 1, 2], [1, 0, 1, 0]), (True, [2, 1, 0, 0], [0, 0, 2, 2])], +) +def test_divmod_array(reverse, expected_div, expected_mod): + # https://github.com/pandas-dev/pandas/issues/22930 + arr = to_decimal([1, 2, 3, 4]) + if reverse: + div, mod = divmod(2, arr) + else: + div, mod = divmod(arr, 2) + expected_div = to_decimal(expected_div) + expected_mod = to_decimal(expected_mod) + + tm.assert_extension_array_equal(div, expected_div) + tm.assert_extension_array_equal(mod, expected_mod) + + +def test_ufunc_fallback(data): + a = data[:5] + s = pd.Series(a, index=range(3, 8)) + result = np.abs(s) + expected = pd.Series(np.abs(a), index=range(3, 8)) + tm.assert_series_equal(result, expected) + + +def test_array_ufunc(): + a = to_decimal([1, 2, 3]) + result = np.exp(a) + expected = to_decimal(np.exp(a._data)) + tm.assert_extension_array_equal(result, expected) + + +def test_array_ufunc_series(): + a = to_decimal([1, 2, 3]) + s = pd.Series(a) + result = np.exp(s) + expected = pd.Series(to_decimal(np.exp(a._data))) + tm.assert_series_equal(result, expected) + + +def test_array_ufunc_series_scalar_other(): + # check _HANDLED_TYPES + a = to_decimal([1, 2, 3]) + s = pd.Series(a) + result = np.add(s, decimal.Decimal(1)) + expected = pd.Series(np.add(a, decimal.Decimal(1))) + tm.assert_series_equal(result, expected) + + +def test_array_ufunc_series_defer(): + a = to_decimal([1, 2, 3]) + s = pd.Series(a) + + expected = pd.Series(to_decimal([2, 4, 6])) + r1 = np.add(s, a) + r2 = np.add(a, s) + + tm.assert_series_equal(r1, expected) + tm.assert_series_equal(r2, expected) + + +def test_groupby_agg(): + # Ensure that the result of agg is inferred to be decimal dtype + # https://github.com/pandas-dev/pandas/issues/29141 + + data = make_data()[:5] + df = pd.DataFrame( + {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)} + ) + + # single key, selected column + expected = pd.Series(to_decimal([data[0], data[3]])) + result = df.groupby("id1")["decimals"].agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + result = df["decimals"].groupby(df["id1"]).agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + + # multiple keys, selected column + expected = pd.Series( + to_decimal([data[0], data[1], data[3]]), + index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 1)]), + ) + result = df.groupby(["id1", "id2"])["decimals"].agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + result = df["decimals"].groupby([df["id1"], df["id2"]]).agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + + # multiple columns + expected = pd.DataFrame({"id2": [0, 1], "decimals": to_decimal([data[0], data[3]])}) + result = df.groupby("id1").agg(lambda x: x.iloc[0]) + tm.assert_frame_equal(result, expected, check_names=False) + + +def test_groupby_agg_ea_method(monkeypatch): + # Ensure that the result of agg is inferred to be decimal dtype + # https://github.com/pandas-dev/pandas/issues/29141 + + def DecimalArray__my_sum(self): + return np.sum(np.array(self)) + + monkeypatch.setattr(DecimalArray, "my_sum", DecimalArray__my_sum, raising=False) + + data = make_data()[:5] + df = pd.DataFrame({"id": [0, 0, 0, 1, 1], "decimals": DecimalArray(data)}) + expected = pd.Series(to_decimal([data[0] + data[1] + data[2], data[3] + data[4]])) + + result = df.groupby("id")["decimals"].agg(lambda x: x.values.my_sum()) + tm.assert_series_equal(result, expected, check_names=False) + s = pd.Series(DecimalArray(data)) + grouper = np.array([0, 0, 0, 1, 1], dtype=np.int64) + result = s.groupby(grouper).agg(lambda x: x.values.my_sum()) + tm.assert_series_equal(result, expected, check_names=False) + + +def test_indexing_no_materialize(monkeypatch): + # See https://github.com/pandas-dev/pandas/issues/29708 + # Ensure that indexing operations do not materialize (convert to a numpy + # array) the ExtensionArray unnecessary + + def DecimalArray__array__(self, dtype=None): + raise Exception("tried to convert a DecimalArray to a numpy array") + + monkeypatch.setattr(DecimalArray, "__array__", DecimalArray__array__, raising=False) + + data = make_data() + s = pd.Series(DecimalArray(data)) + df = pd.DataFrame({"a": s, "b": range(len(s))}) + + # ensure the following operations do not raise an error + s[s > 0.5] + df[s > 0.5] + s.at[0] + df.at[0, "a"] + + +def test_to_numpy_keyword(): + # test the extra keyword + values = [decimal.Decimal("1.1111"), decimal.Decimal("2.2222")] + expected = np.array( + [decimal.Decimal("1.11"), decimal.Decimal("2.22")], dtype="object" + ) + a = pd.array(values, dtype="decimal") + result = a.to_numpy(decimals=2) + tm.assert_numpy_array_equal(result, expected) + + result = pd.Series(a).to_numpy(decimals=2) + tm.assert_numpy_array_equal(result, expected) + + +def test_array_copy_on_write(using_copy_on_write): + df = pd.DataFrame({"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype="object") + df2 = df.astype(DecimalDtype()) + df.iloc[0, 0] = 0 + if using_copy_on_write: + expected = pd.DataFrame( + {"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype=DecimalDtype() + ) + tm.assert_equal(df2.values, expected.values) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/__init__.py new file mode 100644 index 00000000..7ebfd54a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/__init__.py @@ -0,0 +1,7 @@ +from pandas.tests.extension.json.array import ( + JSONArray, + JSONDtype, + make_data, +) + +__all__ = ["JSONArray", "JSONDtype", "make_data"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/array.py new file mode 100644 index 00000000..05472eb7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/array.py @@ -0,0 +1,247 @@ +""" +Test extension array for storing nested data in a pandas container. + +The JSONArray stores lists of dictionaries. The storage mechanism is a list, +not an ndarray. + +Note +---- +We currently store lists of UserDicts. Pandas has a few places +internally that specifically check for dicts, and does non-scalar things +in that case. We *want* the dictionaries to be treated as scalars, so we +hack around pandas by using UserDicts. +""" +from __future__ import annotations + +from collections import ( + UserDict, + abc, +) +import itertools +import numbers +import string +import sys +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_list_like, + pandas_dtype, +) + +import pandas as pd +from pandas.api.extensions import ( + ExtensionArray, + ExtensionDtype, +) +from pandas.core.indexers import unpack_tuple_and_ellipses + +if TYPE_CHECKING: + from collections.abc import Mapping + + from pandas._typing import type_t + + +class JSONDtype(ExtensionDtype): + type = abc.Mapping + name = "json" + na_value: Mapping[str, Any] = UserDict() + + @classmethod + def construct_array_type(cls) -> type_t[JSONArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return JSONArray + + +class JSONArray(ExtensionArray): + dtype = JSONDtype() + __array_priority__ = 1000 + + def __init__(self, values, dtype=None, copy=False) -> None: + for val in values: + if not isinstance(val, self.dtype.type): + raise TypeError("All values must be of type " + str(self.dtype.type)) + self.data = values + + # Some aliases for common attribute names to ensure pandas supports + # these + self._items = self._data = self.data + # those aliases are currently not working due to assumptions + # in internal code (GH-20735) + # self._values = self.values = self.data + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + return cls(scalars) + + @classmethod + def _from_factorized(cls, values, original): + return cls([UserDict(x) for x in values if x != ()]) + + def __getitem__(self, item): + if isinstance(item, tuple): + item = unpack_tuple_and_ellipses(item) + + if isinstance(item, numbers.Integral): + return self.data[item] + elif isinstance(item, slice) and item == slice(None): + # Make sure we get a view + return type(self)(self.data) + elif isinstance(item, slice): + # slice + return type(self)(self.data[item]) + elif not is_list_like(item): + # e.g. "foo" or 2.5 + # exception message copied from numpy + raise IndexError( + r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis " + r"(`None`) and integer or boolean arrays are valid indices" + ) + else: + item = pd.api.indexers.check_array_indexer(self, item) + if is_bool_dtype(item.dtype): + return self._from_sequence([x for x, m in zip(self, item) if m]) + # integer + return type(self)([self.data[i] for i in item]) + + def __setitem__(self, key, value) -> None: + if isinstance(key, numbers.Integral): + self.data[key] = value + else: + if not isinstance(value, (type(self), abc.Sequence)): + # broadcast value + value = itertools.cycle([value]) + + if isinstance(key, np.ndarray) and key.dtype == "bool": + # masking + for i, (k, v) in enumerate(zip(key, value)): + if k: + assert isinstance(v, self.dtype.type) + self.data[i] = v + else: + for k, v in zip(key, value): + assert isinstance(v, self.dtype.type) + self.data[k] = v + + def __len__(self) -> int: + return len(self.data) + + def __eq__(self, other): + return NotImplemented + + def __ne__(self, other): + return NotImplemented + + def __array__(self, dtype=None): + if dtype is None: + dtype = object + if dtype == object: + # on py38 builds it looks like numpy is inferring to a non-1D array + return construct_1d_object_array_from_listlike(list(self)) + return np.asarray(self.data, dtype=dtype) + + @property + def nbytes(self) -> int: + return sys.getsizeof(self.data) + + def isna(self): + return np.array([x == self.dtype.na_value for x in self.data], dtype=bool) + + def take(self, indexer, allow_fill=False, fill_value=None): + # re-implement here, since NumPy has trouble setting + # sized objects like UserDicts into scalar slots of + # an ndarary. + indexer = np.asarray(indexer) + msg = ( + "Index is out of bounds or cannot do a " + "non-empty take from an empty array." + ) + + if allow_fill: + if fill_value is None: + fill_value = self.dtype.na_value + # bounds check + if (indexer < -1).any(): + raise ValueError + try: + output = [ + self.data[loc] if loc != -1 else fill_value for loc in indexer + ] + except IndexError as err: + raise IndexError(msg) from err + else: + try: + output = [self.data[loc] for loc in indexer] + except IndexError as err: + raise IndexError(msg) from err + + return self._from_sequence(output) + + def copy(self): + return type(self)(self.data[:]) + + def astype(self, dtype, copy=True): + # NumPy has issues when all the dicts are the same length. + # np.array([UserDict(...), UserDict(...)]) fails, + # but np.array([{...}, {...}]) works, so cast. + from pandas.core.arrays.string_ import StringDtype + + dtype = pandas_dtype(dtype) + # needed to add this check for the Series constructor + if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: + if copy: + return self.copy() + return self + elif isinstance(dtype, StringDtype): + value = self.astype(str) # numpy doesn't like nested dicts + return dtype.construct_array_type()._from_sequence(value, copy=False) + + return np.array([dict(x) for x in self], dtype=dtype, copy=copy) + + def unique(self): + # Parent method doesn't work since np.array will try to infer + # a 2-dim object. + return type(self)([dict(x) for x in {tuple(d.items()) for d in self.data}]) + + @classmethod + def _concat_same_type(cls, to_concat): + data = list(itertools.chain.from_iterable(x.data for x in to_concat)) + return cls(data) + + def _values_for_factorize(self): + frozen = self._values_for_argsort() + if len(frozen) == 0: + # factorize_array expects 1-d array, this is a len-0 2-d array. + frozen = frozen.ravel() + return frozen, () + + def _values_for_argsort(self): + # Bypass NumPy's shape inference to get a (N,) array of tuples. + frozen = [tuple(x.items()) for x in self] + return construct_1d_object_array_from_listlike(frozen) + + +def make_data(): + # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer + rng = np.random.default_rng(2) + return [ + UserDict( + [ + (rng.choice(list(string.ascii_letters)), rng.integers(0, 100)) + for _ in range(rng.integers(0, 10)) + ] + ) + for _ in range(100) + ] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/test_json.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/test_json.py new file mode 100644 index 00000000..9e1a4fb5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/json/test_json.py @@ -0,0 +1,379 @@ +import collections +import operator +import sys + +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension import base +from pandas.tests.extension.json.array import ( + JSONArray, + JSONDtype, + make_data, +) + + +@pytest.fixture +def dtype(): + return JSONDtype() + + +@pytest.fixture +def data(): + """Length-100 PeriodArray for semantics test.""" + data = make_data() + + # Why the while loop? NumPy is unable to construct an ndarray from + # equal-length ndarrays. Many of our operations involve coercing the + # EA to an ndarray of objects. To avoid random test failures, we ensure + # that our data is coercible to an ndarray. Several tests deal with only + # the first two elements, so that's what we'll check. + + while len(data[0]) == len(data[1]): + data = make_data() + + return JSONArray(data) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return JSONArray([{}, {"a": 10}]) + + +@pytest.fixture +def data_for_sorting(): + return JSONArray([{"b": 1}, {"c": 4}, {"a": 2, "c": 3}]) + + +@pytest.fixture +def data_missing_for_sorting(): + return JSONArray([{"b": 1}, {}, {"a": 4}]) + + +@pytest.fixture +def na_cmp(): + return operator.eq + + +@pytest.fixture +def data_for_grouping(): + return JSONArray( + [ + {"b": 1}, + {"b": 1}, + {}, + {}, + {"a": 0, "c": 2}, + {"a": 0, "c": 2}, + {"b": 1}, + {"c": 2}, + ] + ) + + +class BaseJSON: + pass + + +class TestDtype(BaseJSON, base.BaseDtypeTests): + pass + + +class TestInterface(BaseJSON, base.BaseInterfaceTests): + @pytest.mark.xfail( + reason="comparison method not implemented for JSONArray (GH-37867)" + ) + def test_contains(self, data): + # GH-37867 + super().test_contains(data) + + +class TestConstructors(BaseJSON, base.BaseConstructorsTests): + @pytest.mark.xfail(reason="not implemented constructor from dtype") + def test_from_dtype(self, data): + # construct from our dtype & string dtype + super().test_from_dtype(data) + + @pytest.mark.xfail(reason="RecursionError, GH-33900") + def test_series_constructor_no_data_with_index(self, dtype, na_value): + # RecursionError: maximum recursion depth exceeded in comparison + rec_limit = sys.getrecursionlimit() + try: + # Limit to avoid stack overflow on Windows CI + sys.setrecursionlimit(100) + super().test_series_constructor_no_data_with_index(dtype, na_value) + finally: + sys.setrecursionlimit(rec_limit) + + @pytest.mark.xfail(reason="RecursionError, GH-33900") + def test_series_constructor_scalar_na_with_index(self, dtype, na_value): + # RecursionError: maximum recursion depth exceeded in comparison + rec_limit = sys.getrecursionlimit() + try: + # Limit to avoid stack overflow on Windows CI + sys.setrecursionlimit(100) + super().test_series_constructor_scalar_na_with_index(dtype, na_value) + finally: + sys.setrecursionlimit(rec_limit) + + @pytest.mark.xfail(reason="collection as scalar, GH-33901") + def test_series_constructor_scalar_with_index(self, data, dtype): + # TypeError: All values must be of type + rec_limit = sys.getrecursionlimit() + try: + # Limit to avoid stack overflow on Windows CI + sys.setrecursionlimit(100) + super().test_series_constructor_scalar_with_index(data, dtype) + finally: + sys.setrecursionlimit(rec_limit) + + +class TestReshaping(BaseJSON, base.BaseReshapingTests): + @pytest.mark.xfail(reason="Different definitions of NA") + def test_stack(self): + """ + The test does .astype(object).stack(future_stack=True). If we happen to have + any missing values in `data`, then we'll end up with different + rows since we consider `{}` NA, but `.astype(object)` doesn't. + """ + super().test_stack() + + @pytest.mark.xfail(reason="dict for NA") + def test_unstack(self, data, index): + # The base test has NaN for the expected NA value. + # this matches otherwise + return super().test_unstack(data, index) + + +class TestGetitem(BaseJSON, base.BaseGetitemTests): + pass + + +class TestIndex(BaseJSON, base.BaseIndexTests): + pass + + +class TestMissing(BaseJSON, base.BaseMissingTests): + @pytest.mark.xfail(reason="Setting a dict as a scalar") + def test_fillna_series(self): + """We treat dictionaries as a mapping in fillna, not a scalar.""" + super().test_fillna_series() + + @pytest.mark.xfail(reason="Setting a dict as a scalar") + def test_fillna_frame(self): + """We treat dictionaries as a mapping in fillna, not a scalar.""" + super().test_fillna_frame() + + +unhashable = pytest.mark.xfail(reason="Unhashable") + + +class TestReduce(base.BaseReduceTests): + pass + + +class TestMethods(BaseJSON, base.BaseMethodsTests): + @unhashable + def test_value_counts(self, all_data, dropna): + super().test_value_counts(all_data, dropna) + + @unhashable + def test_value_counts_with_normalize(self, data): + super().test_value_counts_with_normalize(data) + + @unhashable + def test_sort_values_frame(self): + # TODO (EA.factorize): see if _values_for_factorize allows this. + super().test_sort_values_frame() + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values(self, data_for_sorting, ascending, sort_by_key): + super().test_sort_values(data_for_sorting, ascending, sort_by_key) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values_missing( + self, data_missing_for_sorting, ascending, sort_by_key + ): + super().test_sort_values_missing( + data_missing_for_sorting, ascending, sort_by_key + ) + + @pytest.mark.xfail(reason="combine for JSONArray not supported") + def test_combine_le(self, data_repeated): + super().test_combine_le(data_repeated) + + @pytest.mark.xfail( + reason="combine for JSONArray not supported - " + "may pass depending on random data", + strict=False, + raises=AssertionError, + ) + def test_combine_first(self, data): + super().test_combine_first(data) + + @pytest.mark.xfail(reason="broadcasting error") + def test_where_series(self, data, na_value): + # Fails with + # *** ValueError: operands could not be broadcast together + # with shapes (4,) (4,) (0,) + super().test_where_series(data, na_value) + + @pytest.mark.xfail(reason="Can't compare dicts.") + def test_searchsorted(self, data_for_sorting): + super().test_searchsorted(data_for_sorting) + + @pytest.mark.xfail(reason="Can't compare dicts.") + def test_equals(self, data, na_value, as_series): + super().test_equals(data, na_value, as_series) + + @pytest.mark.skip("fill-value is interpreted as a dict of values") + def test_fillna_copy_frame(self, data_missing): + super().test_fillna_copy_frame(data_missing) + + def test_equals_same_data_different_object( + self, data, using_copy_on_write, request + ): + if using_copy_on_write: + mark = pytest.mark.xfail(reason="Fails with CoW") + request.node.add_marker(mark) + super().test_equals_same_data_different_object(data) + + +class TestCasting(BaseJSON, base.BaseCastingTests): + @pytest.mark.xfail(reason="failing on np.array(self, dtype=str)") + def test_astype_str(self): + """This currently fails in NumPy on np.array(self, dtype=str) with + + *** ValueError: setting an array element with a sequence + """ + super().test_astype_str() + + +# We intentionally don't run base.BaseSetitemTests because pandas' +# internals has trouble setting sequences of values into scalar positions. + + +class TestGroupby(BaseJSON, base.BaseGroupbyTests): + @unhashable + def test_groupby_extension_transform(self): + """ + This currently fails in Series.name.setter, since the + name must be hashable, but the value is a dictionary. + I think this is what we want, i.e. `.name` should be the original + values, and not the values for factorization. + """ + super().test_groupby_extension_transform() + + @unhashable + def test_groupby_extension_apply(self): + """ + This fails in Index._do_unique_check with + + > hash(val) + E TypeError: unhashable type: 'UserDict' with + + I suspect that once we support Index[ExtensionArray], + we'll be able to dispatch unique. + """ + super().test_groupby_extension_apply() + + @unhashable + def test_groupby_extension_agg(self): + """ + This fails when we get to tm.assert_series_equal when left.index + contains dictionaries, which are not hashable. + """ + super().test_groupby_extension_agg() + + @unhashable + def test_groupby_extension_no_sort(self): + """ + This fails when we get to tm.assert_series_equal when left.index + contains dictionaries, which are not hashable. + """ + super().test_groupby_extension_no_sort() + + +class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests): + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + if len(data[0]) != 1: + mark = pytest.mark.xfail(reason="raises in coercing to Series") + request.node.add_marker(mark) + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + +class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests): + def test_compare_array(self, data, comparison_op, request): + if comparison_op.__name__ in ["eq", "ne"]: + mark = pytest.mark.xfail(reason="Comparison methods not implemented") + request.node.add_marker(mark) + super().test_compare_array(data, comparison_op) + + +class TestPrinting(BaseJSON, base.BasePrintingTests): + pass + + +def custom_assert_series_equal(left, right, *args, **kwargs): + # NumPy doesn't handle an array of equal-length UserDicts. + # The default assert_series_equal eventually does a + # Series.values, which raises. We work around it by + # converting the UserDicts to dicts. + if left.dtype.name == "json": + assert left.dtype == right.dtype + left = pd.Series( + JSONArray(left.values.astype(object)), index=left.index, name=left.name + ) + right = pd.Series( + JSONArray(right.values.astype(object)), + index=right.index, + name=right.name, + ) + tm.assert_series_equal(left, right, *args, **kwargs) + + +def custom_assert_frame_equal(left, right, *args, **kwargs): + obj_type = kwargs.get("obj", "DataFrame") + tm.assert_index_equal( + left.columns, + right.columns, + exact=kwargs.get("check_column_type", "equiv"), + check_names=kwargs.get("check_names", True), + check_exact=kwargs.get("check_exact", False), + check_categorical=kwargs.get("check_categorical", True), + obj=f"{obj_type}.columns", + ) + + jsons = (left.dtypes == "json").index + + for col in jsons: + custom_assert_series_equal(left[col], right[col], *args, **kwargs) + + left = left.drop(columns=jsons) + right = right.drop(columns=jsons) + tm.assert_frame_equal(left, right, *args, **kwargs) + + +def test_custom_asserts(): + # This would always trigger the KeyError from trying to put + # an array of equal-length UserDicts inside an ndarray. + data = JSONArray( + [ + collections.UserDict({"a": 1}), + collections.UserDict({"b": 2}), + collections.UserDict({"c": 3}), + ] + ) + a = pd.Series(data) + custom_assert_series_equal(a, a) + custom_assert_frame_equal(a.to_frame(), a.to_frame()) + + b = pd.Series(data.take([0, 0, 1])) + msg = r"Series are different" + with pytest.raises(AssertionError, match=msg): + custom_assert_series_equal(a, b) + + with pytest.raises(AssertionError, match=msg): + custom_assert_frame_equal(a.to_frame(), b.to_frame()) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/__init__.py new file mode 100644 index 00000000..0f3f2f35 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/__init__.py @@ -0,0 +1,7 @@ +from pandas.tests.extension.list.array import ( + ListArray, + ListDtype, + make_data, +) + +__all__ = ["ListArray", "ListDtype", "make_data"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/array.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/array.py new file mode 100644 index 00000000..5b895508 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/array.py @@ -0,0 +1,134 @@ +""" +Test extension array for storing nested data in a pandas container. + +The ListArray stores an ndarray of lists. +""" +from __future__ import annotations + +import numbers +import string +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype + +import pandas as pd +from pandas.api.types import ( + is_object_dtype, + is_string_dtype, +) +from pandas.core.arrays import ExtensionArray + +if TYPE_CHECKING: + from pandas._typing import type_t + + +class ListDtype(ExtensionDtype): + type = list + name = "list" + na_value = np.nan + + @classmethod + def construct_array_type(cls) -> type_t[ListArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return ListArray + + +class ListArray(ExtensionArray): + dtype = ListDtype() + __array_priority__ = 1000 + + def __init__(self, values, dtype=None, copy=False) -> None: + if not isinstance(values, np.ndarray): + raise TypeError("Need to pass a numpy array as values") + for val in values: + if not isinstance(val, self.dtype.type) and not pd.isna(val): + raise TypeError("All values must be of type " + str(self.dtype.type)) + self.data = values + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + data = np.empty(len(scalars), dtype=object) + data[:] = scalars + return cls(data) + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self.data[item] + else: + # slice, list-like, mask + return type(self)(self.data[item]) + + def __len__(self) -> int: + return len(self.data) + + def isna(self): + return np.array( + [not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool + ) + + def take(self, indexer, allow_fill=False, fill_value=None): + # re-implement here, since NumPy has trouble setting + # sized objects like UserDicts into scalar slots of + # an ndarary. + indexer = np.asarray(indexer) + msg = ( + "Index is out of bounds or cannot do a " + "non-empty take from an empty array." + ) + + if allow_fill: + if fill_value is None: + fill_value = self.dtype.na_value + # bounds check + if (indexer < -1).any(): + raise ValueError + try: + output = [ + self.data[loc] if loc != -1 else fill_value for loc in indexer + ] + except IndexError as err: + raise IndexError(msg) from err + else: + try: + output = [self.data[loc] for loc in indexer] + except IndexError as err: + raise IndexError(msg) from err + + return self._from_sequence(output) + + def copy(self): + return type(self)(self.data[:]) + + def astype(self, dtype, copy=True): + if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: + if copy: + return self.copy() + return self + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # numpy has problems with astype(str) for nested elements + return np.array([str(x) for x in self.data], dtype=dtype) + return np.array(self.data, dtype=dtype, copy=copy) + + @classmethod + def _concat_same_type(cls, to_concat): + data = np.concatenate([x.data for x in to_concat]) + return cls(data) + + +def make_data(): + # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer + rng = np.random.default_rng(2) + data = np.empty(100, dtype=object) + data[:] = [ + [rng.choice(list(string.ascii_letters)) for _ in range(rng.integers(0, 10))] + for _ in range(100) + ] + return data diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/test_list.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/test_list.py new file mode 100644 index 00000000..295f0867 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/list/test_list.py @@ -0,0 +1,33 @@ +import pytest + +import pandas as pd +from pandas.tests.extension.list.array import ( + ListArray, + ListDtype, + make_data, +) + + +@pytest.fixture +def dtype(): + return ListDtype() + + +@pytest.fixture +def data(): + """Length-100 ListArray for semantics test.""" + data = make_data() + + while len(data[0]) == len(data[1]): + data = make_data() + + return ListArray(data) + + +def test_to_csv(data): + # https://github.com/pandas-dev/pandas/issues/28840 + # array with list-likes fail when doing astype(str) on the numpy array + # which was done in to_native_types + df = pd.DataFrame({"a": data}) + res = df.to_csv() + assert str(data[0]) in res diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_arrow.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_arrow.py new file mode 100644 index 00000000..36b26ada --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_arrow.py @@ -0,0 +1,3121 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. +""" +from __future__ import annotations + +from datetime import ( + date, + datetime, + time, + timedelta, +) +from decimal import Decimal +from io import ( + BytesIO, + StringIO, +) +import operator +import pickle +import re + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas._libs.tslibs import timezones +from pandas.compat import ( + PY311, + is_ci_environment, + is_platform_windows, + pa_version_under7p0, + pa_version_under8p0, + pa_version_under9p0, + pa_version_under11p0, + pa_version_under13p0, + pa_version_under14p0, +) + +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtypeType, +) + +import pandas as pd +import pandas._testing as tm +from pandas.api.extensions import no_default +from pandas.api.types import ( + is_bool_dtype, + is_float_dtype, + is_integer_dtype, + is_numeric_dtype, + is_signed_integer_dtype, + is_string_dtype, + is_unsigned_integer_dtype, +) +from pandas.tests.extension import base + +pa = pytest.importorskip("pyarrow", minversion="7.0.0") + +from pandas.core.arrays.arrow.array import ArrowExtensionArray +from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + +def _require_timezone_database(request): + if is_platform_windows() and is_ci_environment(): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + "TODO: Set ARROW_TIMEZONE_DATABASE environment variable " + "on CI to path to the tzdata for pyarrow." + ), + ) + request.node.add_marker(mark) + + +@pytest.fixture(params=tm.ALL_PYARROW_DTYPES, ids=str) +def dtype(request): + return ArrowDtype(pyarrow_dtype=request.param) + + +@pytest.fixture +def data(dtype): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_boolean(pa_dtype): + data = [True, False] * 4 + [None] + [True, False] * 44 + [None] + [True, False] + elif pa.types.is_floating(pa_dtype): + data = [1.0, 0.0] * 4 + [None] + [-2.0, -1.0] * 44 + [None] + [0.5, 99.5] + elif pa.types.is_signed_integer(pa_dtype): + data = [1, 0] * 4 + [None] + [-2, -1] * 44 + [None] + [1, 99] + elif pa.types.is_unsigned_integer(pa_dtype): + data = [1, 0] * 4 + [None] + [2, 1] * 44 + [None] + [1, 99] + elif pa.types.is_decimal(pa_dtype): + data = ( + [Decimal("1"), Decimal("0.0")] * 4 + + [None] + + [Decimal("-2.0"), Decimal("-1.0")] * 44 + + [None] + + [Decimal("0.5"), Decimal("33.123")] + ) + elif pa.types.is_date(pa_dtype): + data = ( + [date(2022, 1, 1), date(1999, 12, 31)] * 4 + + [None] + + [date(2022, 1, 1), date(2022, 1, 1)] * 44 + + [None] + + [date(1999, 12, 31), date(1999, 12, 31)] + ) + elif pa.types.is_timestamp(pa_dtype): + data = ( + [datetime(2020, 1, 1, 1, 1, 1, 1), datetime(1999, 1, 1, 1, 1, 1, 1)] * 4 + + [None] + + [datetime(2020, 1, 1, 1), datetime(1999, 1, 1, 1)] * 44 + + [None] + + [datetime(2020, 1, 1), datetime(1999, 1, 1)] + ) + elif pa.types.is_duration(pa_dtype): + data = ( + [timedelta(1), timedelta(1, 1)] * 4 + + [None] + + [timedelta(-1), timedelta(0)] * 44 + + [None] + + [timedelta(-10), timedelta(10)] + ) + elif pa.types.is_time(pa_dtype): + data = ( + [time(12, 0), time(0, 12)] * 4 + + [None] + + [time(0, 0), time(1, 1)] * 44 + + [None] + + [time(0, 5), time(5, 0)] + ) + elif pa.types.is_string(pa_dtype): + data = ["a", "b"] * 4 + [None] + ["1", "2"] * 44 + [None] + ["!", ">"] + elif pa.types.is_binary(pa_dtype): + data = [b"a", b"b"] * 4 + [None] + [b"1", b"2"] * 44 + [None] + [b"!", b">"] + else: + raise NotImplementedError + return pd.array(data, dtype=dtype) + + +@pytest.fixture +def data_missing(data): + """Length-2 array with [NA, Valid]""" + return type(data)._from_sequence([None, data[0]], dtype=data.dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture returning 'data' or 'data_missing' integer arrays. + + Used to test dtype conversion with and without missing values. + """ + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing + + +@pytest.fixture +def data_for_grouping(dtype): + """ + Data for factorization, grouping, and unique tests. + + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_boolean(pa_dtype): + A = False + B = True + C = True + elif pa.types.is_floating(pa_dtype): + A = -1.1 + B = 0.0 + C = 1.1 + elif pa.types.is_signed_integer(pa_dtype): + A = -1 + B = 0 + C = 1 + elif pa.types.is_unsigned_integer(pa_dtype): + A = 0 + B = 1 + C = 10 + elif pa.types.is_date(pa_dtype): + A = date(1999, 12, 31) + B = date(2010, 1, 1) + C = date(2022, 1, 1) + elif pa.types.is_timestamp(pa_dtype): + A = datetime(1999, 1, 1, 1, 1, 1, 1) + B = datetime(2020, 1, 1) + C = datetime(2020, 1, 1, 1) + elif pa.types.is_duration(pa_dtype): + A = timedelta(-1) + B = timedelta(0) + C = timedelta(1, 4) + elif pa.types.is_time(pa_dtype): + A = time(0, 0) + B = time(0, 12) + C = time(12, 12) + elif pa.types.is_string(pa_dtype): + A = "a" + B = "b" + C = "c" + elif pa.types.is_binary(pa_dtype): + A = b"a" + B = b"b" + C = b"c" + elif pa.types.is_decimal(pa_dtype): + A = Decimal("-1.1") + B = Decimal("0.0") + C = Decimal("1.1") + else: + raise NotImplementedError + return pd.array([B, B, None, None, A, A, B, C], dtype=dtype) + + +@pytest.fixture +def data_for_sorting(data_for_grouping): + """ + Length-3 array with a known sort order. + + This should be three items [B, C, A] with + A < B < C + """ + return type(data_for_grouping)._from_sequence( + [data_for_grouping[0], data_for_grouping[7], data_for_grouping[4]], + dtype=data_for_grouping.dtype, + ) + + +@pytest.fixture +def data_missing_for_sorting(data_for_grouping): + """ + Length-3 array with a known sort order. + + This should be three items [B, NA, A] with + A < B and NA missing. + """ + return type(data_for_grouping)._from_sequence( + [data_for_grouping[0], data_for_grouping[2], data_for_grouping[4]], + dtype=data_for_grouping.dtype, + ) + + +@pytest.fixture +def data_for_twos(data): + """Length-100 array in which all the elements are two.""" + pa_dtype = data.dtype.pyarrow_dtype + if ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or pa.types.is_decimal(pa_dtype) + or pa.types.is_duration(pa_dtype) + ): + return pd.array([2] * 100, dtype=data.dtype) + # tests will be xfailed where 2 is not a valid scalar for pa_dtype + return data + # TODO: skip otherwise? + + +class TestBaseCasting(base.BaseCastingTests): + def test_astype_str(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_binary(pa_dtype): + request.node.add_marker( + pytest.mark.xfail( + reason=f"For {pa_dtype} .astype(str) decodes.", + ) + ) + super().test_astype_str(data) + + +class TestConstructors(base.BaseConstructorsTests): + def test_from_dtype(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_string(pa_dtype) or pa.types.is_decimal(pa_dtype): + if pa.types.is_string(pa_dtype): + reason = "ArrowDtype(pa.string()) != StringDtype('pyarrow')" + else: + reason = f"pyarrow.type_for_alias cannot infer {pa_dtype}" + + request.node.add_marker( + pytest.mark.xfail( + reason=reason, + ) + ) + super().test_from_dtype(data) + + def test_from_sequence_pa_array(self, data): + # https://github.com/pandas-dev/pandas/pull/47034#discussion_r955500784 + # data._pa_array = pa.ChunkedArray + result = type(data)._from_sequence(data._pa_array) + tm.assert_extension_array_equal(result, data) + assert isinstance(result._pa_array, pa.ChunkedArray) + + result = type(data)._from_sequence(data._pa_array.combine_chunks()) + tm.assert_extension_array_equal(result, data) + assert isinstance(result._pa_array, pa.ChunkedArray) + + def test_from_sequence_pa_array_notimplemented(self, request): + with pytest.raises(NotImplementedError, match="Converting strings to"): + ArrowExtensionArray._from_sequence_of_strings( + ["12-1"], dtype=pa.month_day_nano_interval() + ) + + def test_from_sequence_of_strings_pa_array(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_time64(pa_dtype) and pa_dtype.equals("time64[ns]") and not PY311: + request.node.add_marker( + pytest.mark.xfail( + reason="Nanosecond time parsing not supported.", + ) + ) + elif pa_version_under11p0 and ( + pa.types.is_duration(pa_dtype) or pa.types.is_decimal(pa_dtype) + ): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"pyarrow doesn't support parsing {pa_dtype}", + ) + ) + elif pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None: + _require_timezone_database(request) + + pa_array = data._pa_array.cast(pa.string()) + result = type(data)._from_sequence_of_strings(pa_array, dtype=data.dtype) + tm.assert_extension_array_equal(result, data) + + pa_array = pa_array.combine_chunks() + result = type(data)._from_sequence_of_strings(pa_array, dtype=data.dtype) + tm.assert_extension_array_equal(result, data) + + +class TestGetitemTests(base.BaseGetitemTests): + pass + + +class TestBaseAccumulateTests(base.BaseAccumulateTests): + def check_accumulate(self, ser, op_name, skipna): + result = getattr(ser, op_name)(skipna=skipna) + + pa_type = ser.dtype.pyarrow_dtype + if pa.types.is_temporal(pa_type): + # Just check that we match the integer behavior. + if pa_type.bit_width == 32: + int_type = "int32[pyarrow]" + else: + int_type = "int64[pyarrow]" + ser = ser.astype(int_type) + result = result.astype(int_type) + + result = result.astype("Float64") + expected = getattr(ser.astype("Float64"), op_name)(skipna=skipna) + tm.assert_series_equal(result, expected, check_dtype=False) + + def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool: + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no + # attribute "pyarrow_dtype" + pa_type = ser.dtype.pyarrow_dtype # type: ignore[union-attr] + + if ( + pa.types.is_string(pa_type) + or pa.types.is_binary(pa_type) + or pa.types.is_decimal(pa_type) + ): + if op_name in ["cumsum", "cumprod", "cummax", "cummin"]: + return False + elif pa.types.is_boolean(pa_type): + if op_name in ["cumprod", "cummax", "cummin"]: + return False + elif pa.types.is_temporal(pa_type): + if op_name == "cumsum" and not pa.types.is_duration(pa_type): + return False + elif op_name == "cumprod": + return False + return True + + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series(self, data, all_numeric_accumulations, skipna, request): + pa_type = data.dtype.pyarrow_dtype + op_name = all_numeric_accumulations + ser = pd.Series(data) + + if not self._supports_accumulation(ser, op_name): + # The base class test will check that we raise + return super().test_accumulate_series( + data, all_numeric_accumulations, skipna + ) + + if pa_version_under9p0 or ( + pa_version_under13p0 and all_numeric_accumulations != "cumsum" + ): + # xfailing takes a long time to run because pytest + # renders the exception messages even when not showing them + opt = request.config.option + if opt.markexpr and "not slow" in opt.markexpr: + pytest.skip( + f"{all_numeric_accumulations} not implemented for pyarrow < 9" + ) + mark = pytest.mark.xfail( + reason=f"{all_numeric_accumulations} not implemented for pyarrow < 9" + ) + request.node.add_marker(mark) + + elif all_numeric_accumulations == "cumsum" and ( + pa.types.is_boolean(pa_type) or pa.types.is_decimal(pa_type) + ): + request.node.add_marker( + pytest.mark.xfail( + reason=f"{all_numeric_accumulations} not implemented for {pa_type}", + raises=NotImplementedError, + ) + ) + + self.check_accumulate(ser, op_name, skipna) + + +class TestReduce(base.BaseReduceTests): + def _supports_reduction(self, obj, op_name: str) -> bool: + dtype = tm.get_dtype(obj) + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has + # no attribute "pyarrow_dtype" + pa_dtype = dtype.pyarrow_dtype # type: ignore[union-attr] + if pa.types.is_temporal(pa_dtype) and op_name in [ + "sum", + "var", + "skew", + "kurt", + "prod", + ]: + if pa.types.is_duration(pa_dtype) and op_name in ["sum"]: + # summing timedeltas is one case that *is* well-defined + pass + else: + return False + elif ( + pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype) + ) and op_name in [ + "sum", + "mean", + "median", + "prod", + "std", + "sem", + "var", + "skew", + "kurt", + ]: + return False + + if ( + pa.types.is_temporal(pa_dtype) + and not pa.types.is_duration(pa_dtype) + and op_name in ["any", "all"] + ): + # xref GH#34479 we support this in our non-pyarrow datetime64 dtypes, + # but it isn't obvious we _should_. For now, we keep the pyarrow + # behavior which does not support this. + return False + + return True + + def check_reduce(self, ser, op_name, skipna): + pa_dtype = ser.dtype.pyarrow_dtype + if op_name == "count": + result = getattr(ser, op_name)() + else: + result = getattr(ser, op_name)(skipna=skipna) + + if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype): + ser = ser.astype("Float64") + # TODO: in the opposite case, aren't we testing... nothing? + if op_name == "count": + expected = getattr(ser, op_name)() + else: + expected = getattr(ser, op_name)(skipna=skipna) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request): + dtype = data.dtype + pa_dtype = dtype.pyarrow_dtype + + xfail_mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_numeric_reductions} is not implemented in " + f"pyarrow={pa.__version__} for {pa_dtype}" + ), + ) + if all_numeric_reductions in {"skew", "kurt"} and ( + dtype._is_numeric or dtype.kind == "b" + ): + request.node.add_marker(xfail_mark) + elif ( + all_numeric_reductions in {"var", "std", "median"} + and pa_version_under7p0 + and pa.types.is_decimal(pa_dtype) + ): + request.node.add_marker(xfail_mark) + elif ( + all_numeric_reductions == "sem" + and pa_version_under8p0 + and (dtype._is_numeric or pa.types.is_temporal(pa_dtype)) + ): + request.node.add_marker(xfail_mark) + + elif pa.types.is_boolean(pa_dtype) and all_numeric_reductions in { + "sem", + "std", + "var", + "median", + }: + request.node.add_marker(xfail_mark) + super().test_reduce_series_numeric(data, all_numeric_reductions, skipna) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_boolean( + self, data, all_boolean_reductions, skipna, na_value, request + ): + pa_dtype = data.dtype.pyarrow_dtype + xfail_mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_boolean_reductions} is not implemented in " + f"pyarrow={pa.__version__} for {pa_dtype}" + ), + ) + if pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype): + # We *might* want to make this behave like the non-pyarrow cases, + # but have not yet decided. + request.node.add_marker(xfail_mark) + + return super().test_reduce_series_boolean(data, all_boolean_reductions, skipna) + + def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool): + if op_name in ["max", "min"]: + cmp_dtype = arr.dtype + elif arr.dtype.name == "decimal128(7, 3)[pyarrow]": + if op_name not in ["median", "var", "std"]: + cmp_dtype = arr.dtype + else: + cmp_dtype = "float64[pyarrow]" + elif op_name in ["median", "var", "std", "mean", "skew"]: + cmp_dtype = "float64[pyarrow]" + else: + cmp_dtype = { + "i": "int64[pyarrow]", + "u": "uint64[pyarrow]", + "f": "float64[pyarrow]", + }[arr.dtype.kind] + return cmp_dtype + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_frame(self, data, all_numeric_reductions, skipna, request): + op_name = all_numeric_reductions + if op_name == "skew": + if data.dtype._is_numeric: + mark = pytest.mark.xfail(reason="skew not implemented") + request.node.add_marker(mark) + return super().test_reduce_frame(data, all_numeric_reductions, skipna) + + @pytest.mark.parametrize("typ", ["int64", "uint64", "float64"]) + def test_median_not_approximate(self, typ): + # GH 52679 + result = pd.Series([1, 2], dtype=f"{typ}[pyarrow]").median() + assert result == 1.5 + + +class TestBaseGroupby(base.BaseGroupbyTests): + def test_in_numeric_groupby(self, data_for_grouping): + dtype = data_for_grouping.dtype + if is_string_dtype(dtype): + df = pd.DataFrame( + { + "A": [1, 1, 2, 2, 3, 3, 1, 4], + "B": data_for_grouping, + "C": [1, 1, 1, 1, 1, 1, 1, 1], + } + ) + + expected = pd.Index(["C"]) + msg = re.escape(f"agg function failed [how->sum,dtype->{dtype}") + with pytest.raises(TypeError, match=msg): + df.groupby("A").sum() + result = df.groupby("A").sum(numeric_only=True).columns + tm.assert_index_equal(result, expected) + else: + super().test_in_numeric_groupby(data_for_grouping) + + +class TestBaseDtype(base.BaseDtypeTests): + def test_construct_from_string_own_name(self, dtype, request): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_decimal(pa_dtype): + request.node.add_marker( + pytest.mark.xfail( + raises=NotImplementedError, + reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}", + ) + ) + + if pa.types.is_string(pa_dtype): + # We still support StringDtype('pyarrow') over ArrowDtype(pa.string()) + msg = r"string\[pyarrow\] should be constructed by StringDtype" + with pytest.raises(TypeError, match=msg): + dtype.construct_from_string(dtype.name) + + return + + super().test_construct_from_string_own_name(dtype) + + def test_is_dtype_from_name(self, dtype, request): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_string(pa_dtype): + # We still support StringDtype('pyarrow') over ArrowDtype(pa.string()) + assert not type(dtype).is_dtype(dtype.name) + else: + if pa.types.is_decimal(pa_dtype): + request.node.add_marker( + pytest.mark.xfail( + raises=NotImplementedError, + reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}", + ) + ) + super().test_is_dtype_from_name(dtype) + + def test_construct_from_string_another_type_raises(self, dtype): + msg = r"'another_type' must end with '\[pyarrow\]'" + with pytest.raises(TypeError, match=msg): + type(dtype).construct_from_string("another_type") + + def test_get_common_dtype(self, dtype, request): + pa_dtype = dtype.pyarrow_dtype + if ( + pa.types.is_date(pa_dtype) + or pa.types.is_time(pa_dtype) + or (pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None) + or pa.types.is_binary(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + request.node.add_marker( + pytest.mark.xfail( + reason=( + f"{pa_dtype} does not have associated numpy " + f"dtype findable by find_common_type" + ) + ) + ) + super().test_get_common_dtype(dtype) + + def test_is_not_string_type(self, dtype): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_string(pa_dtype): + assert is_string_dtype(dtype) + else: + super().test_is_not_string_type(dtype) + + +class TestBaseIndex(base.BaseIndexTests): + pass + + +class TestBaseInterface(base.BaseInterfaceTests): + @pytest.mark.xfail( + reason="GH 45419: pyarrow.ChunkedArray does not support views.", run=False + ) + def test_view(self, data): + super().test_view(data) + + +class TestBaseMissing(base.BaseMissingTests): + def test_fillna_no_op_returns_copy(self, data): + data = data[~data.isna()] + + valid = data[0] + result = data.fillna(valid) + assert result is not data + tm.assert_extension_array_equal(result, data) + + result = data.fillna(method="backfill") + assert result is not data + tm.assert_extension_array_equal(result, data) + + +class TestBasePrinting(base.BasePrintingTests): + pass + + +class TestBaseReshaping(base.BaseReshapingTests): + @pytest.mark.xfail( + reason="GH 45419: pyarrow.ChunkedArray does not support views", run=False + ) + def test_transpose(self, data): + super().test_transpose(data) + + +class TestBaseSetitem(base.BaseSetitemTests): + @pytest.mark.xfail( + reason="GH 45419: pyarrow.ChunkedArray does not support views", run=False + ) + def test_setitem_preserves_views(self, data): + super().test_setitem_preserves_views(data) + + +class TestBaseParsing(base.BaseParsingTests): + @pytest.mark.parametrize("dtype_backend", ["pyarrow", no_default]) + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_EA_types(self, engine, data, dtype_backend, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_decimal(pa_dtype): + request.node.add_marker( + pytest.mark.xfail( + raises=NotImplementedError, + reason=f"Parameterized types {pa_dtype} not supported.", + ) + ) + elif pa.types.is_timestamp(pa_dtype) and pa_dtype.unit in ("us", "ns"): + request.node.add_marker( + pytest.mark.xfail( + raises=ValueError, + reason="https://github.com/pandas-dev/pandas/issues/49767", + ) + ) + elif pa.types.is_binary(pa_dtype): + request.node.add_marker( + pytest.mark.xfail(reason="CSV parsers don't correctly handle binary") + ) + df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))}) + csv_output = df.to_csv(index=False, na_rep=np.nan) + if pa.types.is_binary(pa_dtype): + csv_output = BytesIO(csv_output) + else: + csv_output = StringIO(csv_output) + result = pd.read_csv( + csv_output, + dtype={"with_dtype": str(data.dtype)}, + engine=engine, + dtype_backend=dtype_backend, + ) + expected = df + tm.assert_frame_equal(result, expected) + + +class TestBaseUnaryOps(base.BaseUnaryOpsTests): + def test_invert(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if not (pa.types.is_boolean(pa_dtype) or pa.types.is_integer(pa_dtype)): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"pyarrow.compute.invert does support {pa_dtype}", + ) + ) + super().test_invert(data) + + +class TestBaseMethods(base.BaseMethodsTests): + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_unsigned_integer(pa_dtype) and periods == 1: + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + f"diff with {pa_dtype} and periods={periods} will overflow" + ), + ) + ) + super().test_diff(data, periods) + + def test_value_counts_returns_pyarrow_int64(self, data): + # GH 51462 + data = data[:10] + result = data.value_counts() + assert result.dtype == ArrowDtype(pa.int64()) + + def test_argmin_argmax( + self, data_for_sorting, data_missing_for_sorting, na_value, request + ): + pa_dtype = data_for_sorting.dtype.pyarrow_dtype + if pa.types.is_decimal(pa_dtype) and pa_version_under7p0: + request.node.add_marker( + pytest.mark.xfail( + reason=f"No pyarrow kernel for {pa_dtype}", + raises=pa.ArrowNotImplementedError, + ) + ) + super().test_argmin_argmax(data_for_sorting, data_missing_for_sorting, na_value) + + @pytest.mark.parametrize( + "op_name, skipna, expected", + [ + ("idxmax", True, 0), + ("idxmin", True, 2), + ("argmax", True, 0), + ("argmin", True, 2), + ("idxmax", False, np.nan), + ("idxmin", False, np.nan), + ("argmax", False, -1), + ("argmin", False, -1), + ], + ) + def test_argreduce_series( + self, data_missing_for_sorting, op_name, skipna, expected, request + ): + pa_dtype = data_missing_for_sorting.dtype.pyarrow_dtype + if pa.types.is_decimal(pa_dtype) and pa_version_under7p0 and skipna: + request.node.add_marker( + pytest.mark.xfail( + reason=f"No pyarrow kernel for {pa_dtype}", + raises=pa.ArrowNotImplementedError, + ) + ) + super().test_argreduce_series( + data_missing_for_sorting, op_name, skipna, expected + ) + + _combine_le_expected_dtype = "bool[pyarrow]" + + +class TestBaseArithmeticOps(base.BaseArithmeticOpsTests): + divmod_exc = NotImplementedError + + def get_op_from_name(self, op_name): + short_opname = op_name.strip("_") + if short_opname == "rtruediv": + # use the numpy version that won't raise on division by zero + + def rtruediv(x, y): + return np.divide(y, x) + + return rtruediv + elif short_opname == "rfloordiv": + return lambda x, y: np.floor_divide(y, x) + + return tm.get_op_from_name(op_name) + + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + # BaseOpsUtil._combine can upcast expected dtype + # (because it generates expected on python scalars) + # while ArrowExtensionArray maintains original type + expected = pointwise_result + + was_frame = False + if isinstance(expected, pd.DataFrame): + was_frame = True + expected_data = expected.iloc[:, 0] + original_dtype = obj.iloc[:, 0].dtype + else: + expected_data = expected + original_dtype = obj.dtype + + orig_pa_type = original_dtype.pyarrow_dtype + if not was_frame and isinstance(other, pd.Series): + # i.e. test_arith_series_with_array + if not ( + pa.types.is_floating(orig_pa_type) + or ( + pa.types.is_integer(orig_pa_type) + and op_name not in ["__truediv__", "__rtruediv__"] + ) + or pa.types.is_duration(orig_pa_type) + or pa.types.is_timestamp(orig_pa_type) + or pa.types.is_date(orig_pa_type) + or pa.types.is_decimal(orig_pa_type) + ): + # base class _combine always returns int64, while + # ArrowExtensionArray does not upcast + return expected + elif not ( + (op_name == "__floordiv__" and pa.types.is_integer(orig_pa_type)) + or pa.types.is_duration(orig_pa_type) + or pa.types.is_timestamp(orig_pa_type) + or pa.types.is_date(orig_pa_type) + or pa.types.is_decimal(orig_pa_type) + ): + # base class _combine always returns int64, while + # ArrowExtensionArray does not upcast + return expected + + pa_expected = pa.array(expected_data._values) + + if pa.types.is_duration(pa_expected.type): + if pa.types.is_date(orig_pa_type): + if pa.types.is_date64(orig_pa_type): + # TODO: why is this different vs date32? + unit = "ms" + else: + unit = "s" + else: + # pyarrow sees sequence of datetime/timedelta objects and defaults + # to "us" but the non-pointwise op retains unit + # timestamp or duration + unit = orig_pa_type.unit + if type(other) in [datetime, timedelta] and unit in ["s", "ms"]: + # pydatetime/pytimedelta objects have microsecond reso, so we + # take the higher reso of the original and microsecond. Note + # this matches what we would do with DatetimeArray/TimedeltaArray + unit = "us" + + pa_expected = pa_expected.cast(f"duration[{unit}]") + + elif pa.types.is_decimal(pa_expected.type) and pa.types.is_decimal( + orig_pa_type + ): + # decimal precision can resize in the result type depending on data + # just compare the float values + alt = getattr(obj, op_name)(other) + alt_dtype = tm.get_dtype(alt) + assert isinstance(alt_dtype, ArrowDtype) + if op_name == "__pow__" and isinstance(other, Decimal): + # TODO: would it make more sense to retain Decimal here? + alt_dtype = ArrowDtype(pa.float64()) + elif ( + op_name == "__pow__" + and isinstance(other, pd.Series) + and other.dtype == original_dtype + ): + # TODO: would it make more sense to retain Decimal here? + alt_dtype = ArrowDtype(pa.float64()) + else: + assert pa.types.is_decimal(alt_dtype.pyarrow_dtype) + return expected.astype(alt_dtype) + + else: + pa_expected = pa_expected.cast(orig_pa_type) + + pd_expected = type(expected_data._values)(pa_expected) + if was_frame: + expected = pd.DataFrame( + pd_expected, index=expected.index, columns=expected.columns + ) + else: + expected = pd.Series(pd_expected) + return expected + + def _is_temporal_supported(self, opname, pa_dtype): + return not pa_version_under8p0 and ( + ( + opname in ("__add__", "__radd__") + or ( + opname + in ("__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__") + and not pa_version_under14p0 + ) + ) + and pa.types.is_duration(pa_dtype) + or opname in ("__sub__", "__rsub__") + and pa.types.is_temporal(pa_dtype) + ) + + def _get_expected_exception( + self, op_name: str, obj, other + ) -> type[Exception] | None: + if op_name in ("__divmod__", "__rdivmod__"): + return self.divmod_exc + + dtype = tm.get_dtype(obj) + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no + # attribute "pyarrow_dtype" + pa_dtype = dtype.pyarrow_dtype # type: ignore[union-attr] + + arrow_temporal_supported = self._is_temporal_supported(op_name, pa_dtype) + if op_name in { + "__mod__", + "__rmod__", + }: + exc = NotImplementedError + elif arrow_temporal_supported: + exc = None + elif op_name in ["__add__", "__radd__"] and ( + pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype) + ): + exc = None + elif not ( + pa.types.is_floating(pa_dtype) + or pa.types.is_integer(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + # TODO: in many of these cases, e.g. non-duration temporal, + # these will *never* be allowed. Would it make more sense to + # re-raise as TypeError, more consistent with non-pyarrow cases? + exc = pa.ArrowNotImplementedError + else: + exc = None + return exc + + def _get_arith_xfail_marker(self, opname, pa_dtype): + mark = None + + arrow_temporal_supported = self._is_temporal_supported(opname, pa_dtype) + + if ( + opname == "__rpow__" + and ( + pa.types.is_floating(pa_dtype) + or pa.types.is_integer(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ) + and not pa_version_under7p0 + ): + mark = pytest.mark.xfail( + reason=( + f"GH#29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL " + f"for {pa_dtype}" + ) + ) + elif arrow_temporal_supported and ( + pa.types.is_time(pa_dtype) + or ( + opname + in ("__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__") + and pa.types.is_duration(pa_dtype) + ) + ): + mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{opname} not supported between" + f"pd.NA and {pa_dtype} Python scalar" + ), + ) + elif ( + opname == "__rfloordiv__" + and (pa.types.is_integer(pa_dtype) or pa.types.is_decimal(pa_dtype)) + and not pa_version_under7p0 + ): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="divide by 0", + ) + elif ( + opname == "__rtruediv__" + and pa.types.is_decimal(pa_dtype) + and not pa_version_under7p0 + ): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="divide by 0", + ) + elif ( + opname == "__pow__" + and pa.types.is_decimal(pa_dtype) + and pa_version_under7p0 + ): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="Invalid decimal function: power_checked", + ) + + return mark + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request): + pa_dtype = data.dtype.pyarrow_dtype + + if all_arithmetic_operators == "__rmod__" and ( + pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype) + ): + pytest.skip("Skip testing Python string formatting") + + mark = self._get_arith_xfail_marker(all_arithmetic_operators, pa_dtype) + if mark is not None: + request.node.add_marker(mark) + + super().test_arith_series_with_scalar(data, all_arithmetic_operators) + + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + pa_dtype = data.dtype.pyarrow_dtype + + if all_arithmetic_operators == "__rmod__" and ( + pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype) + ): + pytest.skip("Skip testing Python string formatting") + + mark = self._get_arith_xfail_marker(all_arithmetic_operators, pa_dtype) + if mark is not None: + request.node.add_marker(mark) + + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + def test_arith_series_with_array(self, data, all_arithmetic_operators, request): + pa_dtype = data.dtype.pyarrow_dtype + + if ( + all_arithmetic_operators + in ( + "__sub__", + "__rsub__", + ) + and pa.types.is_unsigned_integer(pa_dtype) + and not pa_version_under7p0 + ): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + f"Implemented pyarrow.compute.subtract_checked " + f"which raises on overflow for {pa_dtype}" + ), + ) + ) + + mark = self._get_arith_xfail_marker(all_arithmetic_operators, pa_dtype) + if mark is not None: + request.node.add_marker(mark) + + op_name = all_arithmetic_operators + ser = pd.Series(data) + # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray + # since ser.iloc[0] is a python scalar + other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype)) + + self.check_opname(ser, op_name, other) + + def test_add_series_with_extension_array(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + + if pa_dtype.equals("int8"): + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=f"raises on overflow for {pa_dtype}", + ) + ) + super().test_add_series_with_extension_array(data) + + +class TestBaseComparisonOps(base.BaseComparisonOpsTests): + def test_compare_array(self, data, comparison_op, na_value): + ser = pd.Series(data) + # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray + # since ser.iloc[0] is a python scalar + other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype)) + if comparison_op.__name__ in ["eq", "ne"]: + # comparison should match point-wise comparisons + result = comparison_op(ser, other) + # Series.combine does not calculate the NA mask correctly + # when comparing over an array + assert result[8] is na_value + assert result[97] is na_value + expected = ser.combine(other, comparison_op) + expected[8] = na_value + expected[97] = na_value + tm.assert_series_equal(result, expected) + + else: + return super().test_compare_array(data, comparison_op) + + def test_invalid_other_comp(self, data, comparison_op): + # GH 48833 + with pytest.raises( + NotImplementedError, match=".* not implemented for " + ): + comparison_op(data, object()) + + @pytest.mark.parametrize("masked_dtype", ["boolean", "Int64", "Float64"]) + def test_comp_masked_numpy(self, masked_dtype, comparison_op): + # GH 52625 + data = [1, 0, None] + ser_masked = pd.Series(data, dtype=masked_dtype) + ser_pa = pd.Series(data, dtype=f"{masked_dtype.lower()}[pyarrow]") + result = comparison_op(ser_pa, ser_masked) + if comparison_op in [operator.lt, operator.gt, operator.ne]: + exp = [False, False, None] + else: + exp = [True, True, None] + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +class TestLogicalOps: + """Various Series and DataFrame logical ops methods.""" + + def test_kleene_or(self): + a = pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]") + b = pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + result = a | b + expected = pd.Series( + [True, True, True, True, False, None, True, None, None], + dtype="boolean[pyarrow]", + ) + tm.assert_series_equal(result, expected) + + result = b | a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, + pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]"), + ) + tm.assert_series_equal( + b, pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (None, [True, None, None]), + (pd.NA, [True, None, None]), + (True, [True, True, True]), + (np.bool_(True), [True, True, True]), + (False, [True, False, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_or_scalar(self, other, expected): + a = pd.Series([True, False, None], dtype="boolean[pyarrow]") + result = a | other + expected = pd.Series(expected, dtype="boolean[pyarrow]") + tm.assert_series_equal(result, expected) + + result = other | a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, pd.Series([True, False, None], dtype="boolean[pyarrow]") + ) + + def test_kleene_and(self): + a = pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]") + b = pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + result = a & b + expected = pd.Series( + [True, False, None, False, False, False, None, False, None], + dtype="boolean[pyarrow]", + ) + tm.assert_series_equal(result, expected) + + result = b & a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, + pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]"), + ) + tm.assert_series_equal( + b, pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (None, [None, False, None]), + (pd.NA, [None, False, None]), + (True, [True, False, None]), + (False, [False, False, False]), + (np.bool_(True), [True, False, None]), + (np.bool_(False), [False, False, False]), + ], + ) + def test_kleene_and_scalar(self, other, expected): + a = pd.Series([True, False, None], dtype="boolean[pyarrow]") + result = a & other + expected = pd.Series(expected, dtype="boolean[pyarrow]") + tm.assert_series_equal(result, expected) + + result = other & a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, pd.Series([True, False, None], dtype="boolean[pyarrow]") + ) + + def test_kleene_xor(self): + a = pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]") + b = pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + result = a ^ b + expected = pd.Series( + [False, True, None, True, False, None, None, None, None], + dtype="boolean[pyarrow]", + ) + tm.assert_series_equal(result, expected) + + result = b ^ a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, + pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]"), + ) + tm.assert_series_equal( + b, pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (None, [None, None, None]), + (pd.NA, [None, None, None]), + (True, [False, True, None]), + (np.bool_(True), [False, True, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_xor_scalar(self, other, expected): + a = pd.Series([True, False, None], dtype="boolean[pyarrow]") + result = a ^ other + expected = pd.Series(expected, dtype="boolean[pyarrow]") + tm.assert_series_equal(result, expected) + + result = other ^ a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, pd.Series([True, False, None], dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "op, exp", + [ + ["__and__", True], + ["__or__", True], + ["__xor__", False], + ], + ) + def test_logical_masked_numpy(self, op, exp): + # GH 52625 + data = [True, False, None] + ser_masked = pd.Series(data, dtype="boolean") + ser_pa = pd.Series(data, dtype="boolean[pyarrow]") + result = getattr(ser_pa, op)(ser_masked) + expected = pd.Series([exp, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.ALL_INT_PYARROW_DTYPES) +def test_bitwise(pa_type): + # GH 54495 + dtype = ArrowDtype(pa_type) + left = pd.Series([1, None, 3, 4], dtype=dtype) + right = pd.Series([None, 3, 5, 4], dtype=dtype) + + result = left | right + expected = pd.Series([None, None, 3 | 5, 4 | 4], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = left & right + expected = pd.Series([None, None, 3 & 5, 4 & 4], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = left ^ right + expected = pd.Series([None, None, 3 ^ 5, 4 ^ 4], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = ~left + expected = ~(left.fillna(0).to_numpy()) + expected = pd.Series(expected, dtype=dtype).mask(left.isnull()) + tm.assert_series_equal(result, expected) + + +def test_arrowdtype_construct_from_string_type_with_unsupported_parameters(): + with pytest.raises(NotImplementedError, match="Passing pyarrow type"): + ArrowDtype.construct_from_string("not_a_real_dype[s, tz=UTC][pyarrow]") + + with pytest.raises(NotImplementedError, match="Passing pyarrow type"): + ArrowDtype.construct_from_string("decimal(7, 2)[pyarrow]") + + +def test_arrowdtype_construct_from_string_supports_dt64tz(): + # as of GH#50689, timestamptz is supported + dtype = ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]") + expected = ArrowDtype(pa.timestamp("s", "UTC")) + assert dtype == expected + + +def test_arrowdtype_construct_from_string_type_only_one_pyarrow(): + # GH#51225 + invalid = "int64[pyarrow]foobar[pyarrow]" + msg = ( + r"Passing pyarrow type specific parameters \(\[pyarrow\]\) in the " + r"string is not supported\." + ) + with pytest.raises(NotImplementedError, match=msg): + pd.Series(range(3), dtype=invalid) + + +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] +) +@pytest.mark.parametrize("quantile", [0.5, [0.5, 0.5]]) +def test_quantile(data, interpolation, quantile, request): + pa_dtype = data.dtype.pyarrow_dtype + + data = data.take([0, 0, 0]) + ser = pd.Series(data) + + if ( + pa.types.is_string(pa_dtype) + or pa.types.is_binary(pa_dtype) + or pa.types.is_boolean(pa_dtype) + ): + # For string, bytes, and bool, we don't *expect* to have quantile work + # Note this matches the non-pyarrow behavior + if pa_version_under7p0: + msg = r"Function quantile has no kernel matching input types \(.*\)" + else: + msg = r"Function 'quantile' has no kernel matching input types \(.*\)" + with pytest.raises(pa.ArrowNotImplementedError, match=msg): + ser.quantile(q=quantile, interpolation=interpolation) + return + + if ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or (pa.types.is_decimal(pa_dtype) and not pa_version_under7p0) + ): + pass + elif pa.types.is_temporal(data._pa_array.type): + pass + else: + request.node.add_marker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"quantile not supported by pyarrow for {pa_dtype}", + ) + ) + data = data.take([0, 0, 0]) + ser = pd.Series(data) + result = ser.quantile(q=quantile, interpolation=interpolation) + + if pa.types.is_timestamp(pa_dtype) and interpolation not in ["lower", "higher"]: + # rounding error will make the check below fail + # (e.g. '2020-01-01 01:01:01.000001' vs '2020-01-01 01:01:01.000001024'), + # so we'll check for now that we match the numpy analogue + if pa_dtype.tz: + pd_dtype = f"M8[{pa_dtype.unit}, {pa_dtype.tz}]" + else: + pd_dtype = f"M8[{pa_dtype.unit}]" + ser_np = ser.astype(pd_dtype) + + expected = ser_np.quantile(q=quantile, interpolation=interpolation) + if quantile == 0.5: + if pa_dtype.unit == "us": + expected = expected.to_pydatetime(warn=False) + assert result == expected + else: + if pa_dtype.unit == "us": + expected = expected.dt.floor("us") + tm.assert_series_equal(result, expected.astype(data.dtype)) + return + + if quantile == 0.5: + assert result == data[0] + else: + # Just check the values + expected = pd.Series(data.take([0, 0]), index=[0.5, 0.5]) + if ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + expected = expected.astype("float64[pyarrow]") + result = result.astype("float64[pyarrow]") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "take_idx, exp_idx", + [[[0, 0, 2, 2, 4, 4], [4, 0]], [[0, 0, 0, 2, 4, 4], [0]]], + ids=["multi_mode", "single_mode"], +) +def test_mode_dropna_true(data_for_grouping, take_idx, exp_idx): + data = data_for_grouping.take(take_idx) + ser = pd.Series(data) + result = ser.mode(dropna=True) + expected = pd.Series(data_for_grouping.take(exp_idx)) + tm.assert_series_equal(result, expected) + + +def test_mode_dropna_false_mode_na(data): + # GH 50982 + more_nans = pd.Series([None, None, data[0]], dtype=data.dtype) + result = more_nans.mode(dropna=False) + expected = pd.Series([None], dtype=data.dtype) + tm.assert_series_equal(result, expected) + + expected = pd.Series([data[0], None], dtype=data.dtype) + result = expected.mode(dropna=False) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "arrow_dtype, expected_type", + [ + [pa.binary(), bytes], + [pa.binary(16), bytes], + [pa.large_binary(), bytes], + [pa.large_string(), str], + [pa.list_(pa.int64()), list], + [pa.large_list(pa.int64()), list], + [pa.map_(pa.string(), pa.int64()), list], + [pa.struct([("f1", pa.int8()), ("f2", pa.string())]), dict], + [pa.dictionary(pa.int64(), pa.int64()), CategoricalDtypeType], + ], +) +def test_arrow_dtype_type(arrow_dtype, expected_type): + # GH 51845 + # TODO: Redundant with test_getitem_scalar once arrow_dtype exists in data fixture + assert ArrowDtype(arrow_dtype).type == expected_type + + +def test_is_bool_dtype(): + # GH 22667 + data = ArrowExtensionArray(pa.array([True, False, True])) + assert is_bool_dtype(data) + assert pd.core.common.is_bool_indexer(data) + s = pd.Series(range(len(data))) + result = s[data] + expected = s[np.asarray(data)] + tm.assert_series_equal(result, expected) + + +def test_is_numeric_dtype(data): + # GH 50563 + pa_type = data.dtype.pyarrow_dtype + if ( + pa.types.is_floating(pa_type) + or pa.types.is_integer(pa_type) + or pa.types.is_decimal(pa_type) + ): + assert is_numeric_dtype(data) + else: + assert not is_numeric_dtype(data) + + +def test_is_integer_dtype(data): + # GH 50667 + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_integer(pa_type): + assert is_integer_dtype(data) + else: + assert not is_integer_dtype(data) + + +def test_is_signed_integer_dtype(data): + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_signed_integer(pa_type): + assert is_signed_integer_dtype(data) + else: + assert not is_signed_integer_dtype(data) + + +def test_is_unsigned_integer_dtype(data): + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_unsigned_integer(pa_type): + assert is_unsigned_integer_dtype(data) + else: + assert not is_unsigned_integer_dtype(data) + + +def test_is_float_dtype(data): + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_floating(pa_type): + assert is_float_dtype(data) + else: + assert not is_float_dtype(data) + + +def test_pickle_roundtrip(data): + # GH 42600 + expected = pd.Series(data) + expected_sliced = expected.head(2) + full_pickled = pickle.dumps(expected) + sliced_pickled = pickle.dumps(expected_sliced) + + assert len(full_pickled) > len(sliced_pickled) + + result = pickle.loads(full_pickled) + tm.assert_series_equal(result, expected) + + result_sliced = pickle.loads(sliced_pickled) + tm.assert_series_equal(result_sliced, expected_sliced) + + +def test_astype_from_non_pyarrow(data): + # GH49795 + pd_array = data._pa_array.to_pandas().array + result = pd_array.astype(data.dtype) + assert not isinstance(pd_array.dtype, ArrowDtype) + assert isinstance(result.dtype, ArrowDtype) + tm.assert_extension_array_equal(result, data) + + +def test_astype_float_from_non_pyarrow_str(): + # GH50430 + ser = pd.Series(["1.0"]) + result = ser.astype("float64[pyarrow]") + expected = pd.Series([1.0], dtype="float64[pyarrow]") + tm.assert_series_equal(result, expected) + + +def test_to_numpy_with_defaults(data): + # GH49973 + result = data.to_numpy() + + pa_type = data._pa_array.type + if ( + pa.types.is_duration(pa_type) + or pa.types.is_timestamp(pa_type) + or pa.types.is_date(pa_type) + ): + expected = np.array(list(data)) + else: + expected = np.array(data._pa_array) + + if data._hasna: + expected = expected.astype(object) + expected[pd.isna(data)] = pd.NA + + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_int_with_na(): + # GH51227: ensure to_numpy does not convert int to float + data = [1, None] + arr = pd.array(data, dtype="int64[pyarrow]") + result = arr.to_numpy() + expected = np.array([1, pd.NA], dtype=object) + assert isinstance(result[0], int) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("na_val, exp", [(lib.no_default, np.nan), (1, 1)]) +def test_to_numpy_null_array(na_val, exp): + # GH#52443 + arr = pd.array([pd.NA, pd.NA], dtype="null[pyarrow]") + result = arr.to_numpy(dtype="float64", na_value=na_val) + expected = np.array([exp] * 2, dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_null_array_no_dtype(): + # GH#52443 + arr = pd.array([pd.NA, pd.NA], dtype="null[pyarrow]") + result = arr.to_numpy(dtype=None) + expected = np.array([pd.NA] * 2, dtype="object") + tm.assert_numpy_array_equal(result, expected) + + +def test_setitem_null_slice(data): + # GH50248 + orig = data.copy() + + result = orig.copy() + result[:] = data[0] + expected = ArrowExtensionArray._from_sequence( + [data[0]] * len(data), + dtype=data._pa_array.type, + ) + tm.assert_extension_array_equal(result, expected) + + result = orig.copy() + result[:] = data[::-1] + expected = data[::-1] + tm.assert_extension_array_equal(result, expected) + + result = orig.copy() + result[:] = data.tolist() + expected = data + tm.assert_extension_array_equal(result, expected) + + +def test_setitem_invalid_dtype(data): + # GH50248 + pa_type = data._pa_array.type + if pa.types.is_string(pa_type) or pa.types.is_binary(pa_type): + fill_value = 123 + err = TypeError + msg = "Invalid value '123' for dtype" + elif ( + pa.types.is_integer(pa_type) + or pa.types.is_floating(pa_type) + or pa.types.is_boolean(pa_type) + ): + fill_value = "foo" + err = pa.ArrowInvalid + msg = "Could not convert" + else: + fill_value = "foo" + err = TypeError + msg = "Invalid value 'foo' for dtype" + with pytest.raises(err, match=msg): + data[:] = fill_value + + +@pytest.mark.skipif(pa_version_under8p0, reason="returns object with 7.0") +def test_from_arrow_respecting_given_dtype(): + date_array = pa.array( + [pd.Timestamp("2019-12-31"), pd.Timestamp("2019-12-31")], type=pa.date32() + ) + result = date_array.to_pandas( + types_mapper={pa.date32(): ArrowDtype(pa.date64())}.get + ) + expected = pd.Series( + [pd.Timestamp("2019-12-31"), pd.Timestamp("2019-12-31")], + dtype=ArrowDtype(pa.date64()), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.skipif(pa_version_under8p0, reason="doesn't raise with 7") +def test_from_arrow_respecting_given_dtype_unsafe(): + array = pa.array([1.5, 2.5], type=pa.float64()) + with pytest.raises(pa.ArrowInvalid, match="Float value 1.5 was truncated"): + array.to_pandas(types_mapper={pa.float64(): ArrowDtype(pa.int64())}.get) + + +def test_round(): + dtype = "float64[pyarrow]" + + ser = pd.Series([0.0, 1.23, 2.56, pd.NA], dtype=dtype) + result = ser.round(1) + expected = pd.Series([0.0, 1.2, 2.6, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + ser = pd.Series([123.4, pd.NA, 56.78], dtype=dtype) + result = ser.round(-1) + expected = pd.Series([120.0, pd.NA, 60.0], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_searchsorted_with_na_raises(data_for_sorting, as_series): + # GH50447 + b, c, a = data_for_sorting + arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c] + arr[-1] = pd.NA + + if as_series: + arr = pd.Series(arr) + + msg = ( + "searchsorted requires array to be sorted, " + "which is impossible with NAs present." + ) + with pytest.raises(ValueError, match=msg): + arr.searchsorted(b) + + +def test_sort_values_dictionary(): + df = pd.DataFrame( + { + "a": pd.Series( + ["x", "y"], dtype=ArrowDtype(pa.dictionary(pa.int32(), pa.string())) + ), + "b": [1, 2], + }, + ) + expected = df.copy() + result = df.sort_values(by=["a", "b"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("pat", ["abc", "a[a-z]{2}"]) +def test_str_count(pat): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.count(pat) + expected = pd.Series([1, None], dtype=ArrowDtype(pa.int32())) + tm.assert_series_equal(result, expected) + + +def test_str_count_flags_unsupported(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="count not"): + ser.str.count("abc", flags=1) + + +@pytest.mark.parametrize( + "side, str_func", [["left", "rjust"], ["right", "ljust"], ["both", "center"]] +) +def test_str_pad(side, str_func): + ser = pd.Series(["a", None], dtype=ArrowDtype(pa.string())) + result = ser.str.pad(width=3, side=side, fillchar="x") + expected = pd.Series( + [getattr("a", str_func)(3, "x"), None], dtype=ArrowDtype(pa.string()) + ) + tm.assert_series_equal(result, expected) + + +def test_str_pad_invalid_side(): + ser = pd.Series(["a", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(ValueError, match="Invalid side: foo"): + ser.str.pad(3, "foo", "x") + + +@pytest.mark.parametrize( + "pat, case, na, regex, exp", + [ + ["ab", False, None, False, [True, None]], + ["Ab", True, None, False, [False, None]], + ["ab", False, True, False, [True, True]], + ["a[a-z]{1}", False, None, True, [True, None]], + ["A[a-z]{1}", True, None, True, [False, None]], + ], +) +def test_str_contains(pat, case, na, regex, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.contains(pat, case=case, na=na, regex=regex) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +def test_str_contains_flags_unsupported(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="contains not"): + ser.str.contains("a", flags=1) + + +@pytest.mark.parametrize( + "side, pat, na, exp", + [ + ["startswith", "ab", None, [True, None]], + ["startswith", "b", False, [False, False]], + ["endswith", "b", True, [False, True]], + ["endswith", "bc", None, [True, None]], + ], +) +def test_str_start_ends_with(side, pat, na, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, side)(pat, na=na) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "arg_name, arg", + [["pat", re.compile("b")], ["repl", str], ["case", False], ["flags", 1]], +) +def test_str_replace_unsupported(arg_name, arg): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + kwargs = {"pat": "b", "repl": "x", "regex": True} + kwargs[arg_name] = arg + with pytest.raises(NotImplementedError, match="replace is not supported"): + ser.str.replace(**kwargs) + + +@pytest.mark.parametrize( + "pat, repl, n, regex, exp", + [ + ["a", "x", -1, False, ["xbxc", None]], + ["a", "x", 1, False, ["xbac", None]], + ["[a-b]", "x", -1, True, ["xxxc", None]], + ], +) +def test_str_replace(pat, repl, n, regex, exp): + ser = pd.Series(["abac", None], dtype=ArrowDtype(pa.string())) + result = ser.str.replace(pat, repl, n=n, regex=regex) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_repeat_unsupported(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="repeat is not"): + ser.str.repeat([1, 2]) + + +@pytest.mark.xfail( + pa_version_under7p0, + reason="Unsupported for pyarrow < 7", + raises=NotImplementedError, +) +def test_str_repeat(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.repeat(2) + expected = pd.Series(["abcabc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pat, case, na, exp", + [ + ["ab", False, None, [True, None]], + ["Ab", True, None, [False, None]], + ["bc", True, None, [False, None]], + ["ab", False, True, [True, True]], + ["a[a-z]{1}", False, None, [True, None]], + ["A[a-z]{1}", True, None, [False, None]], + ], +) +def test_str_match(pat, case, na, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.match(pat, case=case, na=na) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pat, case, na, exp", + [ + ["abc", False, None, [True, None]], + ["Abc", True, None, [False, None]], + ["bc", True, None, [False, None]], + ["ab", False, True, [True, True]], + ["a[a-z]{2}", False, None, [True, None]], + ["A[a-z]{1}", True, None, [False, None]], + ], +) +def test_str_fullmatch(pat, case, na, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.match(pat, case=case, na=na) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "sub, start, end, exp, exp_typ", + [["ab", 0, None, [0, None], pa.int32()], ["bc", 1, 3, [2, None], pa.int64()]], +) +def test_str_find(sub, start, end, exp, exp_typ): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.find(sub, start=start, end=end) + expected = pd.Series(exp, dtype=ArrowDtype(exp_typ)) + tm.assert_series_equal(result, expected) + + +def test_str_find_notimplemented(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="find not implemented"): + ser.str.find("ab", start=1) + + +@pytest.mark.parametrize( + "i, exp", + [ + [1, ["b", "e", None]], + [-1, ["c", "e", None]], + [2, ["c", None, None]], + [-3, ["a", None, None]], + [4, [None, None, None]], + ], +) +def test_str_get(i, exp): + ser = pd.Series(["abc", "de", None], dtype=ArrowDtype(pa.string())) + result = ser.str.get(i) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail( + reason="TODO: StringMethods._validate should support Arrow list types", + raises=AttributeError, +) +def test_str_join(): + ser = pd.Series(ArrowExtensionArray(pa.array([list("abc"), list("123"), None]))) + result = ser.str.join("=") + expected = pd.Series(["a=b=c", "1=2=3", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_join_string_type(): + ser = pd.Series(ArrowExtensionArray(pa.array(["abc", "123", None]))) + result = ser.str.join("=") + expected = pd.Series(["a=b=c", "1=2=3", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, stop, step, exp", + [ + [None, 2, None, ["ab", None]], + [None, 2, 1, ["ab", None]], + [1, 3, 1, ["bc", None]], + ], +) +def test_str_slice(start, stop, step, exp): + ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string())) + result = ser.str.slice(start, stop, step) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, stop, repl, exp", + [ + [1, 2, "x", ["axcd", None]], + [None, 2, "x", ["xcd", None]], + [None, 2, None, ["cd", None]], + ], +) +def test_str_slice_replace(start, stop, repl, exp): + ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string())) + result = ser.str.slice_replace(start, stop, repl) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "value, method, exp", + [ + ["a1c", "isalnum", True], + ["!|,", "isalnum", False], + ["aaa", "isalpha", True], + ["!!!", "isalpha", False], + ["٠", "isdecimal", True], # noqa: RUF001 + ["~!", "isdecimal", False], + ["2", "isdigit", True], + ["~", "isdigit", False], + ["aaa", "islower", True], + ["aaA", "islower", False], + ["123", "isnumeric", True], + ["11I", "isnumeric", False], + [" ", "isspace", True], + ["", "isspace", False], + ["The That", "istitle", True], + ["the That", "istitle", False], + ["AAA", "isupper", True], + ["AAc", "isupper", False], + ], +) +def test_str_is_functions(value, method, exp): + ser = pd.Series([value, None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)() + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + ["capitalize", "Abc def"], + ["title", "Abc Def"], + ["swapcase", "AbC Def"], + ["lower", "abc def"], + ["upper", "ABC DEF"], + ["casefold", "abc def"], + ], +) +def test_str_transform_functions(method, exp): + ser = pd.Series(["aBc dEF", None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)() + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_len(): + ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string())) + result = ser.str.len() + expected = pd.Series([4, None], dtype=ArrowDtype(pa.int32())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, to_strip, val", + [ + ["strip", None, " abc "], + ["strip", "x", "xabcx"], + ["lstrip", None, " abc"], + ["lstrip", "x", "xabc"], + ["rstrip", None, "abc "], + ["rstrip", "x", "abcx"], + ], +) +def test_str_strip(method, to_strip, val): + ser = pd.Series([val, None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)(to_strip=to_strip) + expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("val", ["abc123", "abc"]) +def test_str_removesuffix(val): + ser = pd.Series([val, None], dtype=ArrowDtype(pa.string())) + result = ser.str.removesuffix("123") + expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("val", ["123abc", "abc"]) +def test_str_removeprefix(val): + ser = pd.Series([val, None], dtype=ArrowDtype(pa.string())) + result = ser.str.removeprefix("123") + expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("errors", ["ignore", "strict"]) +@pytest.mark.parametrize( + "encoding, exp", + [ + ["utf8", b"abc"], + ["utf32", b"\xff\xfe\x00\x00a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00"], + ], +) +def test_str_encode(errors, encoding, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.encode(encoding, errors) + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.binary())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("flags", [0, 2]) +def test_str_findall(flags): + ser = pd.Series(["abc", "efg", None], dtype=ArrowDtype(pa.string())) + result = ser.str.findall("b", flags=flags) + expected = pd.Series([["b"], [], None], dtype=ArrowDtype(pa.list_(pa.string()))) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["index", "rindex"]) +@pytest.mark.parametrize( + "start, end", + [ + [0, None], + [1, 4], + ], +) +def test_str_r_index(method, start, end): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)("c", start, end) + expected = pd.Series([2, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) + + with pytest.raises(ValueError, match="substring not found"): + getattr(ser.str, method)("foo", start, end) + + +@pytest.mark.parametrize("form", ["NFC", "NFKC"]) +def test_str_normalize(form): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.normalize(form) + expected = ser.copy() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, end", + [ + [0, None], + [1, 4], + ], +) +def test_str_rfind(start, end): + ser = pd.Series(["abcba", "foo", None], dtype=ArrowDtype(pa.string())) + result = ser.str.rfind("c", start, end) + expected = pd.Series([2, -1, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) + + +def test_str_translate(): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = ser.str.translate({97: "b"}) + expected = pd.Series(["bbcbb", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_wrap(): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = ser.str.wrap(3) + expected = pd.Series(["abc\nba", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_get_dummies(): + ser = pd.Series(["a|b", None, "a|c"], dtype=ArrowDtype(pa.string())) + result = ser.str.get_dummies() + expected = pd.DataFrame( + [[True, True, False], [False, False, False], [True, False, True]], + dtype=ArrowDtype(pa.bool_()), + columns=["a", "b", "c"], + ) + tm.assert_frame_equal(result, expected) + + +def test_str_partition(): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = ser.str.partition("b") + expected = pd.DataFrame( + [["a", "b", "cba"], [None, None, None]], dtype=ArrowDtype(pa.string()) + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.partition("b", expand=False) + expected = pd.Series(ArrowExtensionArray(pa.array([["a", "b", "cba"], None]))) + tm.assert_series_equal(result, expected) + + result = ser.str.rpartition("b") + expected = pd.DataFrame( + [["abc", "b", "a"], [None, None, None]], dtype=ArrowDtype(pa.string()) + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.rpartition("b", expand=False) + expected = pd.Series(ArrowExtensionArray(pa.array([["abc", "b", "a"], None]))) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["rsplit", "split"]) +def test_str_split_pat_none(method): + # GH 56271 + ser = pd.Series(["a1 cbc\nb", None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)() + expected = pd.Series(ArrowExtensionArray(pa.array([["a1", "cbc", "b"], None]))) + tm.assert_series_equal(result, expected) + + +def test_str_split(): + # GH 52401 + ser = pd.Series(["a1cbcb", "a2cbcb", None], dtype=ArrowDtype(pa.string())) + result = ser.str.split("c") + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1", "b", "b"], ["a2", "b", "b"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.split("c", n=1) + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1", "bcb"], ["a2", "bcb"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.split("[1-2]", regex=True) + expected = pd.Series( + ArrowExtensionArray(pa.array([["a", "cbcb"], ["a", "cbcb"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.split("[1-2]", regex=True, expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a", "a", None])), + 1: ArrowExtensionArray(pa.array(["cbcb", "cbcb", None])), + } + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.split("1", expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a", "a2cbcb", None])), + 1: ArrowExtensionArray(pa.array(["cbcb", None, None])), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_str_rsplit(): + # GH 52401 + ser = pd.Series(["a1cbcb", "a2cbcb", None], dtype=ArrowDtype(pa.string())) + result = ser.str.rsplit("c") + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1", "b", "b"], ["a2", "b", "b"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.rsplit("c", n=1) + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1cb", "b"], ["a2cb", "b"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.rsplit("c", n=1, expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a1cb", "a2cb", None])), + 1: ArrowExtensionArray(pa.array(["b", "b", None])), + } + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.rsplit("1", expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a", "a2cbcb", None])), + 1: ArrowExtensionArray(pa.array(["cbcb", None, None])), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_str_unsupported_extract(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises( + NotImplementedError, match="str.extract not supported with pd.ArrowDtype" + ): + ser.str.extract(r"[ab](\d)") + + +@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) +def test_duration_from_strings_with_nat(unit): + # GH51175 + strings = ["1000", "NaT"] + pa_type = pa.duration(unit) + result = ArrowExtensionArray._from_sequence_of_strings(strings, dtype=pa_type) + expected = ArrowExtensionArray(pa.array([1000, None], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +def test_unsupported_dt(data): + pa_dtype = data.dtype.pyarrow_dtype + if not pa.types.is_temporal(pa_dtype): + with pytest.raises( + AttributeError, match="Can only use .dt accessor with datetimelike values" + ): + pd.Series(data).dt + + +@pytest.mark.parametrize( + "prop, expected", + [ + ["year", 2023], + ["day", 2], + ["day_of_week", 0], + ["dayofweek", 0], + ["weekday", 0], + ["day_of_year", 2], + ["dayofyear", 2], + ["hour", 3], + ["minute", 4], + pytest.param( + "is_leap_year", + False, + marks=pytest.mark.xfail( + pa_version_under8p0, + raises=NotImplementedError, + reason="is_leap_year not implemented for pyarrow < 8.0", + ), + ), + ["microsecond", 5], + ["month", 1], + ["nanosecond", 6], + ["quarter", 1], + ["second", 7], + ["date", date(2023, 1, 2)], + ["time", time(3, 4, 7, 5)], + ], +) +def test_dt_properties(prop, expected): + ser = pd.Series( + [ + pd.Timestamp( + year=2023, + month=1, + day=2, + hour=3, + minute=4, + second=7, + microsecond=5, + nanosecond=6, + ), + None, + ], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = getattr(ser.dt, prop) + exp_type = None + if isinstance(expected, date): + exp_type = pa.date32() + elif isinstance(expected, time): + exp_type = pa.time64("ns") + expected = pd.Series(ArrowExtensionArray(pa.array([expected, None], type=exp_type))) + tm.assert_series_equal(result, expected) + + +def test_dt_is_month_start_end(): + ser = pd.Series( + [ + datetime(year=2023, month=12, day=2, hour=3), + datetime(year=2023, month=1, day=1, hour=3), + datetime(year=2023, month=3, day=31, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.is_month_start + expected = pd.Series([False, True, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + result = ser.dt.is_month_end + expected = pd.Series([False, False, True, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +def test_dt_is_year_start_end(): + ser = pd.Series( + [ + datetime(year=2023, month=12, day=31, hour=3), + datetime(year=2023, month=1, day=1, hour=3), + datetime(year=2023, month=3, day=31, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.is_year_start + expected = pd.Series([False, True, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + result = ser.dt.is_year_end + expected = pd.Series([True, False, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +def test_dt_is_quarter_start_end(): + ser = pd.Series( + [ + datetime(year=2023, month=11, day=30, hour=3), + datetime(year=2023, month=1, day=1, hour=3), + datetime(year=2023, month=3, day=31, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.is_quarter_start + expected = pd.Series([False, True, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + result = ser.dt.is_quarter_end + expected = pd.Series([False, False, True, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["days_in_month", "daysinmonth"]) +def test_dt_days_in_month(method): + ser = pd.Series( + [ + datetime(year=2023, month=3, day=30, hour=3), + datetime(year=2023, month=4, day=1, hour=3), + datetime(year=2023, month=2, day=3, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = getattr(ser.dt, method) + expected = pd.Series([31, 30, 28, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) + + +def test_dt_normalize(): + ser = pd.Series( + [ + datetime(year=2023, month=3, day=30), + datetime(year=2023, month=4, day=1, hour=3), + datetime(year=2023, month=2, day=3, hour=23, minute=59, second=59), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.normalize() + expected = pd.Series( + [ + datetime(year=2023, month=3, day=30), + datetime(year=2023, month=4, day=1), + datetime(year=2023, month=2, day=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_dt_time_preserve_unit(unit): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit)), + ) + assert ser.dt.unit == unit + + result = ser.dt.time + expected = pd.Series( + ArrowExtensionArray(pa.array([time(3, 0), None], type=pa.time64(unit))) + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"]) +def test_dt_tz(tz): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns", tz=tz)), + ) + result = ser.dt.tz + assert result == timezones.maybe_get_tz(tz) + + +def test_dt_isocalendar(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = ser.dt.isocalendar() + expected = pd.DataFrame( + [[2023, 1, 1], [0, 0, 0]], + columns=["year", "week", "day"], + dtype="int64[pyarrow]", + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", [["day_name", "Sunday"], ["month_name", "January"]] +) +def test_dt_day_month_name(method, exp, request): + # GH 52388 + _require_timezone_database(request) + + ser = pd.Series([datetime(2023, 1, 1), None], dtype=ArrowDtype(pa.timestamp("ms"))) + result = getattr(ser.dt, method)() + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_dt_strftime(request): + _require_timezone_database(request) + + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = ser.dt.strftime("%Y-%m-%dT%H:%M:%S") + expected = pd.Series( + ["2023-01-02T03:00:00.000000000", None], dtype=ArrowDtype(pa.string()) + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["ceil", "floor", "round"]) +def test_dt_roundlike_tz_options_not_supported(method): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(NotImplementedError, match="ambiguous is not supported."): + getattr(ser.dt, method)("1H", ambiguous="NaT") + + with pytest.raises(NotImplementedError, match="nonexistent is not supported."): + getattr(ser.dt, method)("1H", nonexistent="NaT") + + +@pytest.mark.parametrize("method", ["ceil", "floor", "round"]) +def test_dt_roundlike_unsupported_freq(method): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(ValueError, match="freq='1B' is not supported"): + getattr(ser.dt, method)("1B") + + with pytest.raises(ValueError, match="Must specify a valid frequency: None"): + getattr(ser.dt, method)(None) + + +@pytest.mark.xfail( + pa_version_under7p0, reason="Methods not supported for pyarrow < 7.0" +) +@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U", "N"]) +@pytest.mark.parametrize("method", ["ceil", "floor", "round"]) +def test_dt_ceil_year_floor(freq, method): + ser = pd.Series( + [datetime(year=2023, month=1, day=1), None], + ) + pa_dtype = ArrowDtype(pa.timestamp("ns")) + expected = getattr(ser.dt, method)(f"1{freq}").astype(pa_dtype) + result = getattr(ser.astype(pa_dtype).dt, method)(f"1{freq}") + tm.assert_series_equal(result, expected) + + +def test_dt_to_pydatetime(): + # GH 51859 + data = [datetime(2022, 1, 1), datetime(2023, 1, 1)] + ser = pd.Series(data, dtype=ArrowDtype(pa.timestamp("ns"))) + + msg = "The behavior of ArrowTemporalProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.dt.to_pydatetime() + expected = np.array(data, dtype=object) + tm.assert_numpy_array_equal(result, expected) + assert all(type(res) is datetime for res in result) + + msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = ser.astype("datetime64[ns]").dt.to_pydatetime() + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("date_type", [32, 64]) +def test_dt_to_pydatetime_date_error(date_type): + # GH 52812 + ser = pd.Series( + [date(2022, 12, 31)], + dtype=ArrowDtype(getattr(pa, f"date{date_type}")()), + ) + msg = "The behavior of ArrowTemporalProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pytest.raises(ValueError, match="to_pydatetime cannot be called with"): + ser.dt.to_pydatetime() + + +def test_dt_tz_localize_unsupported_tz_options(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(NotImplementedError, match="ambiguous='NaT' is not supported"): + ser.dt.tz_localize("UTC", ambiguous="NaT") + + with pytest.raises(NotImplementedError, match="nonexistent='NaT' is not supported"): + ser.dt.tz_localize("UTC", nonexistent="NaT") + + +def test_dt_tz_localize_none(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns", tz="US/Pacific")), + ) + result = ser.dt.tz_localize(None) + expected = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_dt_tz_localize(unit, request): + _require_timezone_database(request) + + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit)), + ) + result = ser.dt.tz_localize("US/Pacific") + exp_data = pa.array( + [datetime(year=2023, month=1, day=2, hour=3), None], type=pa.timestamp(unit) + ) + exp_data = pa.compute.assume_timezone(exp_data, "US/Pacific") + expected = pd.Series(ArrowExtensionArray(exp_data)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "nonexistent, exp_date", + [ + ["shift_forward", datetime(year=2023, month=3, day=12, hour=3)], + ["shift_backward", pd.Timestamp("2023-03-12 01:59:59.999999999")], + ], +) +def test_dt_tz_localize_nonexistent(nonexistent, exp_date, request): + _require_timezone_database(request) + + ser = pd.Series( + [datetime(year=2023, month=3, day=12, hour=2, minute=30), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = ser.dt.tz_localize("US/Pacific", nonexistent=nonexistent) + exp_data = pa.array([exp_date, None], type=pa.timestamp("ns")) + exp_data = pa.compute.assume_timezone(exp_data, "US/Pacific") + expected = pd.Series(ArrowExtensionArray(exp_data)) + tm.assert_series_equal(result, expected) + + +def test_dt_tz_convert_not_tz_raises(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(TypeError, match="Cannot convert tz-naive timestamps"): + ser.dt.tz_convert("UTC") + + +def test_dt_tz_convert_none(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns", "US/Pacific")), + ) + result = ser.dt.tz_convert(None) + expected = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_dt_tz_convert(unit): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit, "US/Pacific")), + ) + result = ser.dt.tz_convert("US/Eastern") + expected = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit, "US/Eastern")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +def test_boolean_reduce_series_all_null(all_boolean_reductions, skipna): + # GH51624 + ser = pd.Series([None], dtype="float64[pyarrow]") + result = getattr(ser, all_boolean_reductions)(skipna=skipna) + if skipna: + expected = all_boolean_reductions == "all" + else: + expected = pd.NA + assert result is expected + + +def test_from_sequence_of_strings_boolean(): + true_strings = ["true", "TRUE", "True", "1", "1.0"] + false_strings = ["false", "FALSE", "False", "0", "0.0"] + nulls = [None] + strings = true_strings + false_strings + nulls + bools = ( + [True] * len(true_strings) + [False] * len(false_strings) + [None] * len(nulls) + ) + + result = ArrowExtensionArray._from_sequence_of_strings(strings, dtype=pa.bool_()) + expected = pd.array(bools, dtype="boolean[pyarrow]") + tm.assert_extension_array_equal(result, expected) + + strings = ["True", "foo"] + with pytest.raises(pa.ArrowInvalid, match="Failed to parse"): + ArrowExtensionArray._from_sequence_of_strings(strings, dtype=pa.bool_()) + + +def test_concat_empty_arrow_backed_series(dtype): + # GH#51734 + ser = pd.Series([], dtype=dtype) + expected = ser.copy() + result = pd.concat([ser[np.array([], dtype=np.bool_)]]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["string", "string[pyarrow]"]) +def test_series_from_string_array(dtype): + arr = pa.array("the quick brown fox".split()) + ser = pd.Series(arr, dtype=dtype) + expected = pd.Series(ArrowExtensionArray(arr), dtype=dtype) + tm.assert_series_equal(ser, expected) + + +# _data was renamed to _pa_data +class OldArrowExtensionArray(ArrowExtensionArray): + def __getstate__(self): + state = super().__getstate__() + state["_data"] = state.pop("_pa_array") + return state + + +def test_pickle_old_arrowextensionarray(): + data = pa.array([1]) + expected = OldArrowExtensionArray(data) + result = pickle.loads(pickle.dumps(expected)) + tm.assert_extension_array_equal(result, expected) + assert result._pa_array == pa.chunked_array(data) + assert not hasattr(result, "_data") + + +def test_setitem_boolean_replace_with_mask_segfault(): + # GH#52059 + N = 145_000 + arr = ArrowExtensionArray(pa.chunked_array([np.ones((N,), dtype=np.bool_)])) + expected = arr.copy() + arr[np.zeros((N,), dtype=np.bool_)] = False + assert arr._pa_array == expected._pa_array + + +@pytest.mark.parametrize( + "data, arrow_dtype", + [ + ([b"a", b"b"], pa.large_binary()), + (["a", "b"], pa.large_string()), + ], +) +def test_conversion_large_dtypes_from_numpy_array(data, arrow_dtype): + dtype = ArrowDtype(arrow_dtype) + result = pd.array(np.array(data), dtype=dtype) + expected = pd.array(data, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_concat_null_array(): + df = pd.DataFrame({"a": [None, None]}, dtype=ArrowDtype(pa.null())) + df2 = pd.DataFrame({"a": [0, 1]}, dtype="int64[pyarrow]") + + result = pd.concat([df, df2], ignore_index=True) + expected = pd.DataFrame({"a": [None, None, 0, 1]}, dtype="int64[pyarrow]") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.ALL_INT_PYARROW_DTYPES + tm.FLOAT_PYARROW_DTYPES) +def test_describe_numeric_data(pa_type): + # GH 52470 + data = pd.Series([1, 2, 3], dtype=ArrowDtype(pa_type)) + result = data.describe() + expected = pd.Series( + [3, 2, 1, 1, 1.5, 2.0, 2.5, 3], + dtype=ArrowDtype(pa.float64()), + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.TIMEDELTA_PYARROW_DTYPES) +def test_describe_timedelta_data(pa_type): + # GH53001 + data = pd.Series(range(1, 10), dtype=ArrowDtype(pa_type)) + result = data.describe() + expected = pd.Series( + [9] + pd.to_timedelta([5, 2, 1, 3, 5, 7, 9], unit=pa_type.unit).tolist(), + dtype=object, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.DATETIME_PYARROW_DTYPES) +def test_describe_datetime_data(pa_type): + # GH53001 + data = pd.Series(range(1, 10), dtype=ArrowDtype(pa_type)) + result = data.describe() + expected = pd.Series( + [9] + + [ + pd.Timestamp(v, tz=pa_type.tz, unit=pa_type.unit) + for v in [5, 1, 3, 5, 7, 9] + ], + dtype=object, + index=["count", "mean", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_quantile_temporal(pa_type): + # GH52678 + data = [1, 2, 3] + ser = pd.Series(data, dtype=ArrowDtype(pa_type)) + result = ser.quantile(0.1) + expected = ser[0] + assert result == expected + + +def test_date32_repr(): + # GH48238 + arrow_dt = pa.array([date.fromisoformat("2020-01-01")], type=pa.date32()) + ser = pd.Series(arrow_dt, dtype=ArrowDtype(arrow_dt.type)) + assert repr(ser) == "0 2020-01-01\ndtype: date32[day][pyarrow]" + + +@pytest.mark.xfail( + pa_version_under8p0, + reason="Function 'add_checked' has no kernel matching input types", + raises=pa.ArrowNotImplementedError, +) +def test_duration_overflow_from_ndarray_containing_nat(): + # GH52843 + data_ts = pd.to_datetime([1, None]) + data_td = pd.to_timedelta([1, None]) + ser_ts = pd.Series(data_ts, dtype=ArrowDtype(pa.timestamp("ns"))) + ser_td = pd.Series(data_td, dtype=ArrowDtype(pa.duration("ns"))) + result = ser_ts + ser_td + expected = pd.Series([2, None], dtype=ArrowDtype(pa.timestamp("ns"))) + tm.assert_series_equal(result, expected) + + +def test_infer_dtype_pyarrow_dtype(data, request): + res = lib.infer_dtype(data) + assert res != "unknown-array" + + if data._hasna and res in ["floating", "datetime64", "timedelta64"]: + mark = pytest.mark.xfail( + reason="in infer_dtype pd.NA is not ignored in these cases " + "even with skipna=True in the list(data) check below" + ) + request.node.add_marker(mark) + + assert res == lib.infer_dtype(list(data), skipna=True) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_from_sequence_temporal(pa_type): + # GH 53171 + val = 3 + unit = pa_type.unit + if pa.types.is_duration(pa_type): + seq = [pd.Timedelta(val, unit=unit).as_unit(unit)] + else: + seq = [pd.Timestamp(val, unit=unit, tz=pa_type.tz).as_unit(unit)] + + result = ArrowExtensionArray._from_sequence(seq, dtype=pa_type) + expected = ArrowExtensionArray(pa.array([val], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_setitem_temporal(pa_type): + # GH 53171 + unit = pa_type.unit + if pa.types.is_duration(pa_type): + val = pd.Timedelta(1, unit=unit).as_unit(unit) + else: + val = pd.Timestamp(1, unit=unit, tz=pa_type.tz).as_unit(unit) + + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + + result = arr.copy() + result[:] = val + expected = ArrowExtensionArray(pa.array([1, 1, 1], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_arithmetic_temporal(pa_type, request): + # GH 53171 + if pa_version_under8p0 and pa.types.is_duration(pa_type): + mark = pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason="Function 'subtract_checked' has no kernel matching input types", + ) + request.node.add_marker(mark) + + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + unit = pa_type.unit + result = arr - pd.Timedelta(1, unit=unit).as_unit(unit) + expected = ArrowExtensionArray(pa.array([0, 1, 2], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_comparison_temporal(pa_type): + # GH 53171 + unit = pa_type.unit + if pa.types.is_duration(pa_type): + val = pd.Timedelta(1, unit=unit).as_unit(unit) + else: + val = pd.Timestamp(1, unit=unit, tz=pa_type.tz).as_unit(unit) + + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + + result = arr > val + expected = ArrowExtensionArray(pa.array([False, True, True], type=pa.bool_())) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_getitem_temporal(pa_type): + # GH 53326 + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + result = arr[1] + if pa.types.is_duration(pa_type): + expected = pd.Timedelta(2, unit=pa_type.unit).as_unit(pa_type.unit) + assert isinstance(result, pd.Timedelta) + else: + expected = pd.Timestamp(2, unit=pa_type.unit, tz=pa_type.tz).as_unit( + pa_type.unit + ) + assert isinstance(result, pd.Timestamp) + assert result.unit == expected.unit + assert result == expected + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_iter_temporal(pa_type): + # GH 53326 + arr = ArrowExtensionArray(pa.array([1, None], type=pa_type)) + result = list(arr) + if pa.types.is_duration(pa_type): + expected = [ + pd.Timedelta(1, unit=pa_type.unit).as_unit(pa_type.unit), + pd.NA, + ] + assert isinstance(result[0], pd.Timedelta) + else: + expected = [ + pd.Timestamp(1, unit=pa_type.unit, tz=pa_type.tz).as_unit(pa_type.unit), + pd.NA, + ] + assert isinstance(result[0], pd.Timestamp) + assert result[0].unit == expected[0].unit + assert result == expected + + +def test_groupby_series_size_returns_pa_int(data): + # GH 54132 + ser = pd.Series(data[:3], index=["a", "a", "b"]) + result = ser.groupby(level=0).size() + expected = pd.Series([2, 1], dtype="int64[pyarrow]", index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_to_numpy_temporal(pa_type): + # GH 53326 + arr = ArrowExtensionArray(pa.array([1, None], type=pa_type)) + result = arr.to_numpy() + if pa.types.is_duration(pa_type): + expected = [ + pd.Timedelta(1, unit=pa_type.unit).as_unit(pa_type.unit), + pd.NA, + ] + assert isinstance(result[0], pd.Timedelta) + else: + expected = [ + pd.Timestamp(1, unit=pa_type.unit, tz=pa_type.tz).as_unit(pa_type.unit), + pd.NA, + ] + assert isinstance(result[0], pd.Timestamp) + expected = np.array(expected, dtype=object) + assert result[0].unit == expected[0].unit + tm.assert_numpy_array_equal(result, expected) + + +def test_groupby_count_return_arrow_dtype(data_missing): + df = pd.DataFrame({"A": [1, 1], "B": data_missing, "C": data_missing}) + result = df.groupby("A").count() + expected = pd.DataFrame( + [[1, 1]], + index=pd.Index([1], name="A"), + columns=["B", "C"], + dtype="int64[pyarrow]", + ) + tm.assert_frame_equal(result, expected) + + +def test_fixed_size_list(): + # GH#55000 + ser = pd.Series( + [[1, 2], [3, 4]], dtype=ArrowDtype(pa.list_(pa.int64(), list_size=2)) + ) + result = ser.dtype.type + assert result == list + + +def test_arrowextensiondtype_dataframe_repr(): + # GH 54062 + df = pd.DataFrame( + pd.period_range("2012", periods=3), + columns=["col"], + dtype=ArrowDtype(ArrowPeriodType("D")), + ) + result = repr(df) + # TODO: repr value may not be expected; address how + # pyarrow.ExtensionType values are displayed + expected = " col\n0 15340\n1 15341\n2 15342" + assert result == expected + + +@pytest.mark.parametrize("pa_type", tm.TIMEDELTA_PYARROW_DTYPES) +def test_duration_fillna_numpy(pa_type): + # GH 54707 + ser1 = pd.Series([None, 2], dtype=ArrowDtype(pa_type)) + ser2 = pd.Series(np.array([1, 3], dtype=f"m8[{pa_type.unit}]")) + result = ser1.fillna(ser2) + expected = pd.Series([1, 2], dtype=ArrowDtype(pa_type)) + tm.assert_series_equal(result, expected) + + +def test_comparison_not_propagating_arrow_error(): + # GH#54944 + a = pd.Series([1 << 63], dtype="uint64[pyarrow]") + b = pd.Series([None], dtype="int64[pyarrow]") + with pytest.raises(pa.lib.ArrowInvalid, match="Integer value"): + a < b + + +def test_factorize_chunked_dictionary(): + # GH 54844 + pa_array = pa.chunked_array( + [pa.array(["a"]).dictionary_encode(), pa.array(["b"]).dictionary_encode()] + ) + ser = pd.Series(ArrowExtensionArray(pa_array)) + res_indices, res_uniques = ser.factorize() + exp_indicies = np.array([0, 1], dtype=np.intp) + exp_uniques = pd.Index(ArrowExtensionArray(pa_array.combine_chunks())) + tm.assert_numpy_array_equal(res_indices, exp_indicies) + tm.assert_index_equal(res_uniques, exp_uniques) + + +def test_arrow_floordiv(): + # GH 55561 + a = pd.Series([-7], dtype="int64[pyarrow]") + b = pd.Series([4], dtype="int64[pyarrow]") + expected = pd.Series([-2], dtype="int64[pyarrow]") + result = a // b + tm.assert_series_equal(result, expected) + + +def test_string_to_datetime_parsing_cast(): + # GH 56266 + string_dates = ["2020-01-01 04:30:00", "2020-01-02 00:00:00", "2020-01-03 00:00:00"] + result = pd.Series(string_dates, dtype="timestamp[ns][pyarrow]") + expected = pd.Series( + ArrowExtensionArray(pa.array(pd.to_datetime(string_dates), from_pandas=True)) + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_categorical.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_categorical.py new file mode 100644 index 00000000..33e5c9ad --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_categorical.py @@ -0,0 +1,232 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import string + +import numpy as np +import pytest + +import pandas as pd +from pandas import Categorical +import pandas._testing as tm +from pandas.api.types import CategoricalDtype +from pandas.tests.extension import base + + +def make_data(): + while True: + values = np.random.default_rng(2).choice(list(string.ascii_letters), size=100) + # ensure we meet the requirements + # 1. first two not null + # 2. first and second are different + if values[0] != values[1]: + break + return values + + +@pytest.fixture +def dtype(): + return CategoricalDtype() + + +@pytest.fixture +def data(): + """Length-100 array for this type. + + * data[0] and data[1] should both be non missing + * data[0] and data[1] should not be equal + """ + return Categorical(make_data()) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return Categorical([np.nan, "A"]) + + +@pytest.fixture +def data_for_sorting(): + return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True) + + +@pytest.fixture +def data_missing_for_sorting(): + return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True) + + +@pytest.fixture +def data_for_grouping(): + return Categorical(["a", "a", None, None, "b", "b", "a", "c"]) + + +class TestDtype(base.BaseDtypeTests): + pass + + +class TestInterface(base.BaseInterfaceTests): + @pytest.mark.xfail(reason="Memory usage doesn't match") + def test_memory_usage(self, data): + # TODO: Is this deliberate? + super().test_memory_usage(data) + + def test_contains(self, data, data_missing): + # GH-37867 + # na value handling in Categorical.__contains__ is deprecated. + # See base.BaseInterFaceTests.test_contains for more details. + + na_value = data.dtype.na_value + # ensure data without missing values + data = data[~data.isna()] + + # first elements are non-missing + assert data[0] in data + assert data_missing[0] in data_missing + + # check the presence of na_value + assert na_value in data_missing + assert na_value not in data + + # Categoricals can contain other nan-likes than na_value + for na_value_obj in tm.NULL_OBJECTS: + if na_value_obj is na_value: + continue + assert na_value_obj not in data + assert na_value_obj in data_missing # this line differs from super method + + +class TestConstructors(base.BaseConstructorsTests): + def test_empty(self, dtype): + cls = dtype.construct_array_type() + result = cls._empty((4,), dtype=dtype) + + assert isinstance(result, cls) + # the dtype we passed is not initialized, so will not match the + # dtype on our result. + assert result.dtype == CategoricalDtype([]) + + +class TestReshaping(base.BaseReshapingTests): + pass + + +class TestGetitem(base.BaseGetitemTests): + @pytest.mark.skip(reason="Backwards compatibility") + def test_getitem_scalar(self, data): + # CategoricalDtype.type isn't "correct" since it should + # be a parent of the elements (object). But don't want + # to break things by changing. + super().test_getitem_scalar(data) + + +class TestSetitem(base.BaseSetitemTests): + pass + + +class TestIndex(base.BaseIndexTests): + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestReduce(base.BaseReduceTests): + pass + + +class TestAccumulate(base.BaseAccumulateTests): + pass + + +class TestMethods(base.BaseMethodsTests): + @pytest.mark.xfail(reason="Unobserved categories included") + def test_value_counts(self, all_data, dropna): + return super().test_value_counts(all_data, dropna) + + def test_combine_add(self, data_repeated): + # GH 20825 + # When adding categoricals in combine, result is a string + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + result = s1.combine(s2, lambda x1, x2: x1 + x2) + expected = pd.Series( + [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))] + ) + tm.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 + x2) + expected = pd.Series([a + val for a in list(orig_data1)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data, na_action): + result = data.map(lambda x: x, na_action=na_action) + tm.assert_extension_array_equal(result, data) + + +class TestCasting(base.BaseCastingTests): + pass + + +class TestArithmeticOps(base.BaseArithmeticOpsTests): + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + # frame & scalar + op_name = all_arithmetic_operators + if op_name == "__rmod__": + request.node.add_marker( + pytest.mark.xfail( + reason="rmod never called when string is first argument" + ) + ) + super().test_arith_frame_with_scalar(data, op_name) + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request): + op_name = all_arithmetic_operators + if op_name == "__rmod__": + request.node.add_marker( + pytest.mark.xfail( + reason="rmod never called when string is first argument" + ) + ) + super().test_arith_series_with_scalar(data, op_name) + + +class TestComparisonOps(base.BaseComparisonOpsTests): + def _compare_other(self, s, data, op, other): + op_name = f"__{op.__name__}__" + if op_name not in ["__eq__", "__ne__"]: + msg = "Unordered Categoricals can only compare equality or not" + with pytest.raises(TypeError, match=msg): + op(data, other) + else: + return super()._compare_other(s, data, op, other) + + +class TestParsing(base.BaseParsingTests): + pass + + +class Test2DCompat(base.NDArrayBacked2DTests): + def test_repr_2d(self, data): + # Categorical __repr__ doesn't include "Categorical", so we need + # to special-case + res = repr(data.reshape(1, -1)) + assert res.count("\nCategories") == 1 + + res = repr(data.reshape(-1, 1)) + assert res.count("\nCategories") == 1 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_common.py new file mode 100644 index 00000000..3d8523f3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_common.py @@ -0,0 +1,103 @@ +import numpy as np +import pytest + +from pandas.core.dtypes import dtypes +from pandas.core.dtypes.common import is_extension_array_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ExtensionArray + + +class DummyDtype(dtypes.ExtensionDtype): + pass + + +class DummyArray(ExtensionArray): + def __init__(self, data) -> None: + self.data = data + + def __array__(self, dtype): + return self.data + + @property + def dtype(self): + return DummyDtype() + + def astype(self, dtype, copy=True): + # we don't support anything but a single dtype + if isinstance(dtype, DummyDtype): + if copy: + return type(self)(self.data) + return self + + return np.array(self, dtype=dtype, copy=copy) + + +class TestExtensionArrayDtype: + @pytest.mark.parametrize( + "values", + [ + pd.Categorical([]), + pd.Categorical([]).dtype, + pd.Series(pd.Categorical([])), + DummyDtype(), + DummyArray(np.array([1, 2])), + ], + ) + def test_is_extension_array_dtype(self, values): + assert is_extension_array_dtype(values) + + @pytest.mark.parametrize("values", [np.array([]), pd.Series(np.array([]))]) + def test_is_not_extension_array_dtype(self, values): + assert not is_extension_array_dtype(values) + + +def test_astype(): + arr = DummyArray(np.array([1, 2, 3])) + expected = np.array([1, 2, 3], dtype=object) + + result = arr.astype(object) + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("object") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_no_copy(): + arr = DummyArray(np.array([1, 2, 3], dtype=np.int64)) + result = arr.astype(arr.dtype, copy=False) + + assert arr is result + + result = arr.astype(arr.dtype) + assert arr is not result + + +@pytest.mark.parametrize("dtype", [dtypes.CategoricalDtype(), dtypes.IntervalDtype()]) +def test_is_extension_array_dtype(dtype): + assert isinstance(dtype, dtypes.ExtensionDtype) + assert is_extension_array_dtype(dtype) + + +class CapturingStringArray(pd.arrays.StringArray): + """Extend StringArray to capture arguments to __getitem__""" + + def __getitem__(self, item): + self.last_item_arg = item + return super().__getitem__(item) + + +def test_ellipsis_index(): + # GH#42430 1D slices over extension types turn into N-dimensional slices + # over ExtensionArrays + df = pd.DataFrame( + {"col1": CapturingStringArray(np.array(["hello", "world"], dtype=object))} + ) + _ = df.iloc[:1] + + # String comparison because there's no native way to compare slices. + # Before the fix for GH#42430, last_item_arg would get set to the 2D slice + # (Ellipsis, slice(None, 1, None)) + out = df["col1"].array.last_item_arg + assert str(out) == "slice(None, 1, None)" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_datetime.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_datetime.py new file mode 100644 index 00000000..97773d0d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_datetime.py @@ -0,0 +1,159 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray +from pandas.tests.extension import base + + +@pytest.fixture(params=["US/Central"]) +def dtype(request): + return DatetimeTZDtype(unit="ns", tz=request.param) + + +@pytest.fixture +def data(dtype): + data = DatetimeArray(pd.date_range("2000", periods=100, tz=dtype.tz), dtype=dtype) + return data + + +@pytest.fixture +def data_missing(dtype): + return DatetimeArray( + np.array(["NaT", "2000-01-01"], dtype="datetime64[ns]"), dtype=dtype + ) + + +@pytest.fixture +def data_for_sorting(dtype): + a = pd.Timestamp("2000-01-01") + b = pd.Timestamp("2000-01-02") + c = pd.Timestamp("2000-01-03") + return DatetimeArray(np.array([b, c, a], dtype="datetime64[ns]"), dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + a = pd.Timestamp("2000-01-01") + b = pd.Timestamp("2000-01-02") + return DatetimeArray(np.array([b, "NaT", a], dtype="datetime64[ns]"), dtype=dtype) + + +@pytest.fixture +def data_for_grouping(dtype): + """ + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + a = pd.Timestamp("2000-01-01") + b = pd.Timestamp("2000-01-02") + c = pd.Timestamp("2000-01-03") + na = "NaT" + return DatetimeArray( + np.array([b, b, na, na, a, a, b, c], dtype="datetime64[ns]"), dtype=dtype + ) + + +@pytest.fixture +def na_cmp(): + def cmp(a, b): + return a is pd.NaT and a is b + + return cmp + + +# ---------------------------------------------------------------------------- +class BaseDatetimeTests: + pass + + +# ---------------------------------------------------------------------------- +# Tests +class TestDatetimeDtype(BaseDatetimeTests, base.BaseDtypeTests): + pass + + +class TestConstructors(BaseDatetimeTests, base.BaseConstructorsTests): + def test_series_constructor(self, data): + # Series construction drops any .freq attr + data = data._with_freq(None) + super().test_series_constructor(data) + + +class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests): + pass + + +class TestIndex(base.BaseIndexTests): + pass + + +class TestMethods(BaseDatetimeTests, base.BaseMethodsTests): + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data, na_action): + result = data.map(lambda x: x, na_action=na_action) + tm.assert_extension_array_equal(result, data) + + +class TestInterface(BaseDatetimeTests, base.BaseInterfaceTests): + pass + + +class TestArithmeticOps(BaseDatetimeTests, base.BaseArithmeticOpsTests): + implements = {"__sub__", "__rsub__"} + + def _get_expected_exception(self, op_name, obj, other): + if op_name in self.implements: + return None + return super()._get_expected_exception(op_name, obj, other) + + +class TestCasting(BaseDatetimeTests, base.BaseCastingTests): + pass + + +class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests): + pass + + +class TestMissing(BaseDatetimeTests, base.BaseMissingTests): + pass + + +class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests): + pass + + +class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests): + pass + + +class TestGroupby(BaseDatetimeTests, base.BaseGroupbyTests): + pass + + +class TestPrinting(BaseDatetimeTests, base.BasePrintingTests): + pass + + +class Test2DCompat(BaseDatetimeTests, base.NDArrayBacked2DTests): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_extension.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_extension.py new file mode 100644 index 00000000..1ed626cd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_extension.py @@ -0,0 +1,26 @@ +""" +Tests for behavior if an author does *not* implement EA methods. +""" +import numpy as np +import pytest + +from pandas.core.arrays import ExtensionArray + + +class MyEA(ExtensionArray): + def __init__(self, values) -> None: + self._values = values + + +@pytest.fixture +def data(): + arr = np.arange(10) + return MyEA(arr) + + +class TestExtensionArray: + def test_errors(self, data, all_arithmetic_operators): + # invalid ops + op_name = all_arithmetic_operators + with pytest.raises(AttributeError): + getattr(data, op_name) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_interval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_interval.py new file mode 100644 index 00000000..66b25abb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_interval.py @@ -0,0 +1,103 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import IntervalDtype + +from pandas import Interval +from pandas.core.arrays import IntervalArray +from pandas.tests.extension import base + + +def make_data(): + N = 100 + left_array = np.random.default_rng(2).uniform(size=N).cumsum() + right_array = left_array + np.random.default_rng(2).uniform(size=N) + return [Interval(left, right) for left, right in zip(left_array, right_array)] + + +@pytest.fixture +def dtype(): + return IntervalDtype() + + +@pytest.fixture +def data(): + """Length-100 PeriodArray for semantics test.""" + return IntervalArray(make_data()) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return IntervalArray.from_tuples([None, (0, 1)]) + + +@pytest.fixture +def data_for_twos(): + pytest.skip("Not a numeric dtype") + + +@pytest.fixture +def data_for_sorting(): + return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)]) + + +@pytest.fixture +def data_missing_for_sorting(): + return IntervalArray.from_tuples([(1, 2), None, (0, 1)]) + + +@pytest.fixture +def data_for_grouping(): + a = (0, 1) + b = (1, 2) + c = (2, 3) + return IntervalArray.from_tuples([b, b, None, None, a, a, b, c]) + + +class TestIntervalArray(base.ExtensionTests): + divmod_exc = TypeError + + def _supports_reduction(self, obj, op_name: str) -> bool: + return op_name in ["min", "max"] + + @pytest.mark.xfail( + reason="Raises with incorrect message bc it disallows *all* listlikes " + "instead of just wrong-length listlikes" + ) + def test_fillna_length_mismatch(self, data_missing): + super().test_fillna_length_mismatch(data_missing) + + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_EA_types(self, engine, data): + expected_msg = r".*must implement _from_sequence_of_strings.*" + with pytest.raises(NotImplementedError, match=expected_msg): + super().test_EA_types(engine, data) + + @pytest.mark.xfail( + reason="Looks like the test (incorrectly) implicitly assumes int/bool dtype" + ) + def test_invert(self, data): + super().test_invert(data) + + +# TODO: either belongs in tests.arrays.interval or move into base tests. +def test_fillna_non_scalar_raises(data_missing): + msg = "can only insert Interval objects and NA into an IntervalArray" + with pytest.raises(TypeError, match=msg): + data_missing.fillna([1, 1]) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_masked.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_masked.py new file mode 100644 index 00000000..588a2fb5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_masked.py @@ -0,0 +1,452 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import numpy as np +import pytest + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat.numpy import np_version_gt2 + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) +from pandas.tests.extension import base + +is_windows_or_32bit = (is_platform_windows() and not np_version_gt2) or not IS64 + +pytestmark = [ + pytest.mark.filterwarnings( + "ignore:invalid value encountered in divide:RuntimeWarning" + ), + pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning"), + # overflow only relevant for Floating dtype cases cases + pytest.mark.filterwarnings("ignore:overflow encountered in reduce:RuntimeWarning"), +] + + +def make_data(): + return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100] + + +def make_float_data(): + return ( + list(np.arange(0.1, 0.9, 0.1)) + + [pd.NA] + + list(np.arange(1, 9.8, 0.1)) + + [pd.NA] + + [9.9, 10.0] + ) + + +def make_bool_data(): + return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False] + + +@pytest.fixture( + params=[ + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + Float32Dtype, + Float64Dtype, + BooleanDtype, + ] +) +def dtype(request): + return request.param() + + +@pytest.fixture +def data(dtype): + if dtype.kind == "f": + data = make_float_data() + elif dtype.kind == "b": + data = make_bool_data() + else: + data = make_data() + return pd.array(data, dtype=dtype) + + +@pytest.fixture +def data_for_twos(dtype): + if dtype.kind == "b": + return pd.array(np.ones(100), dtype=dtype) + return pd.array(np.ones(100) * 2, dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + if dtype.kind == "f": + return pd.array([pd.NA, 0.1], dtype=dtype) + elif dtype.kind == "b": + return pd.array([np.nan, True], dtype=dtype) + return pd.array([pd.NA, 1], dtype=dtype) + + +@pytest.fixture +def data_for_sorting(dtype): + if dtype.kind == "f": + return pd.array([0.1, 0.2, 0.0], dtype=dtype) + elif dtype.kind == "b": + return pd.array([True, True, False], dtype=dtype) + return pd.array([1, 2, 0], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + if dtype.kind == "f": + return pd.array([0.1, pd.NA, 0.0], dtype=dtype) + elif dtype.kind == "b": + return pd.array([True, np.nan, False], dtype=dtype) + return pd.array([1, pd.NA, 0], dtype=dtype) + + +@pytest.fixture +def na_cmp(): + # we are pd.NA + return lambda x, y: x is pd.NA and y is pd.NA + + +@pytest.fixture +def data_for_grouping(dtype): + if dtype.kind == "f": + b = 0.1 + a = 0.0 + c = 0.2 + elif dtype.kind == "b": + b = True + a = False + c = b + else: + b = 1 + a = 0 + c = 2 + + na = pd.NA + return pd.array([b, b, na, na, a, a, b, c], dtype=dtype) + + +class TestDtype(base.BaseDtypeTests): + pass + + +class TestArithmeticOps(base.BaseArithmeticOpsTests): + def _get_expected_exception(self, op_name, obj, other): + try: + dtype = tm.get_dtype(obj) + except AttributeError: + # passed arguments reversed + dtype = tm.get_dtype(other) + + if dtype.kind == "b": + if op_name.strip("_").lstrip("r") in ["pow", "truediv", "floordiv"]: + # match behavior with non-masked bool dtype + return NotImplementedError + elif op_name in ["__sub__", "__rsub__"]: + # exception message would include "numpy boolean subtract"" + return TypeError + return None + return super()._get_expected_exception(op_name, obj, other) + + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + sdtype = tm.get_dtype(obj) + expected = pointwise_result + + if sdtype.kind in "iu": + if op_name in ("__rtruediv__", "__truediv__", "__div__"): + expected = expected.fillna(np.nan).astype("Float64") + else: + # combine method result in 'biggest' (int64) dtype + expected = expected.astype(sdtype) + elif sdtype.kind == "b": + if op_name in ( + "__floordiv__", + "__rfloordiv__", + "__pow__", + "__rpow__", + "__mod__", + "__rmod__", + ): + # combine keeps boolean type + expected = expected.astype("Int8") + + elif op_name in ("__truediv__", "__rtruediv__"): + # combine with bools does not generate the correct result + # (numpy behaviour for div is to regard the bools as numeric) + op = self.get_op_from_name(op_name) + expected = self._combine(obj.astype(float), other, op) + expected = expected.astype("Float64") + + if op_name == "__rpow__": + # for rpow, combine does not propagate NaN + result = getattr(obj, op_name)(other) + expected[result.isna()] = np.nan + else: + # combine method result in 'biggest' (float64) dtype + expected = expected.astype(sdtype) + return expected + + series_scalar_exc = None + series_array_exc = None + frame_scalar_exc = None + divmod_exc = None + + def test_divmod_series_array(self, data, data_for_twos, request): + if data.dtype.kind == "b": + mark = pytest.mark.xfail( + reason="Inconsistency between floordiv and divmod; we raise for " + "floordiv but not for divmod. This matches what we do for " + "non-masked bool dtype." + ) + request.node.add_marker(mark) + super().test_divmod_series_array(data, data_for_twos) + + +class TestComparisonOps(base.BaseComparisonOpsTests): + series_scalar_exc = None + series_array_exc = None + frame_scalar_exc = None + + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + return pointwise_result.astype("boolean") + + +class TestInterface(base.BaseInterfaceTests): + pass + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + pass + + # for test_concat_mixed_dtypes test + # concat of an Integer and Int coerces to object dtype + # TODO(jreback) once integrated this would + + +class TestGetitem(base.BaseGetitemTests): + pass + + +class TestSetitem(base.BaseSetitemTests): + pass + + +class TestIndex(base.BaseIndexTests): + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestMethods(base.BaseMethodsTests): + def test_combine_le(self, data_repeated): + # TODO: patching self is a bad pattern here + orig_data1, orig_data2 = data_repeated(2) + if orig_data1.dtype.kind == "b": + self._combine_le_expected_dtype = "boolean" + else: + # TODO: can we make this boolean? + self._combine_le_expected_dtype = object + super().test_combine_le(data_repeated) + + +class TestCasting(base.BaseCastingTests): + pass + + +class TestGroupby(base.BaseGroupbyTests): + pass + + +class TestReduce(base.BaseReduceTests): + def _supports_reduction(self, obj, op_name: str) -> bool: + if op_name in ["any", "all"] and tm.get_dtype(obj).kind != "b": + pytest.skip(reason="Tested in tests/reductions/test_reductions.py") + return True + + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + # overwrite to ensure pd.NA is tested instead of np.nan + # https://github.com/pandas-dev/pandas/issues/30958 + + cmp_dtype = "int64" + if ser.dtype.kind == "f": + # Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has + # no attribute "numpy_dtype" + cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr] + elif ser.dtype.kind == "b": + if op_name in ["min", "max"]: + cmp_dtype = "bool" + + if op_name == "count": + result = getattr(ser, op_name)() + expected = getattr(ser.dropna().astype(cmp_dtype), op_name)() + else: + result = getattr(ser, op_name)(skipna=skipna) + expected = getattr(ser.dropna().astype(cmp_dtype), op_name)(skipna=skipna) + if not skipna and ser.isna().any() and op_name not in ["any", "all"]: + expected = pd.NA + tm.assert_almost_equal(result, expected) + + def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool): + if tm.is_float_dtype(arr.dtype): + cmp_dtype = arr.dtype.name + elif op_name in ["mean", "median", "var", "std", "skew"]: + cmp_dtype = "Float64" + elif op_name in ["max", "min"]: + cmp_dtype = arr.dtype.name + elif arr.dtype in ["Int64", "UInt64"]: + cmp_dtype = arr.dtype.name + elif tm.is_signed_integer_dtype(arr.dtype): + # TODO: Why does Window Numpy 2.0 dtype depend on skipna? + cmp_dtype = ( + "Int32" + if (is_platform_windows() and (not np_version_gt2 or not skipna)) + or not IS64 + else "Int64" + ) + elif tm.is_unsigned_integer_dtype(arr.dtype): + cmp_dtype = ( + "UInt32" + if (is_platform_windows() and (not np_version_gt2 or not skipna)) + or not IS64 + else "UInt64" + ) + elif arr.dtype.kind == "b": + if op_name in ["mean", "median", "var", "std", "skew"]: + cmp_dtype = "Float64" + elif op_name in ["min", "max"]: + cmp_dtype = "boolean" + elif op_name in ["sum", "prod"]: + cmp_dtype = ( + "Int32" + if (is_platform_windows() and (not np_version_gt2 or not skipna)) + or not IS64 + else "Int64" + ) + else: + raise TypeError("not supposed to reach this") + else: + raise TypeError("not supposed to reach this") + return cmp_dtype + + +class TestAccumulation(base.BaseAccumulateTests): + def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool: + return True + + def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool): + # overwrite to ensure pd.NA is tested instead of np.nan + # https://github.com/pandas-dev/pandas/issues/30958 + length = 64 + if is_windows_or_32bit: + # Item "ExtensionDtype" of "Union[dtype[Any], ExtensionDtype]" has + # no attribute "itemsize" + if not ser.dtype.itemsize == 8: # type: ignore[union-attr] + length = 32 + + if ser.dtype.name.startswith("U"): + expected_dtype = f"UInt{length}" + elif ser.dtype.name.startswith("I"): + expected_dtype = f"Int{length}" + elif ser.dtype.name.startswith("F"): + # Incompatible types in assignment (expression has type + # "Union[dtype[Any], ExtensionDtype]", variable has type "str") + expected_dtype = ser.dtype # type: ignore[assignment] + elif ser.dtype.kind == "b": + if op_name in ("cummin", "cummax"): + expected_dtype = "boolean" + else: + expected_dtype = f"Int{length}" + + if op_name == "cumsum": + result = getattr(ser, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser.astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name in ["cummax", "cummin"]: + result = getattr(ser, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser.astype("float64"), op_name)(skipna=skipna), + dtype=ser.dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name == "cumprod": + result = getattr(ser[:12], op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser[:12].astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + + else: + raise NotImplementedError(f"{op_name} not supported") + + +class TestUnaryOps(base.BaseUnaryOpsTests): + def test_invert(self, data, request): + if data.dtype.kind == "f": + mark = pytest.mark.xfail( + reason="Looks like the base class test implicitly assumes " + "boolean/integer dtypes" + ) + request.node.add_marker(mark) + super().test_invert(data) + + +class TestPrinting(base.BasePrintingTests): + pass + + +class TestParsing(base.BaseParsingTests): + pass + + +class Test2DCompat(base.Dim2CompatTests): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_numpy.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_numpy.py new file mode 100644 index 00000000..a54729de --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_numpy.py @@ -0,0 +1,437 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +Note: we do not bother with base.BaseIndexTests because NumpyExtensionArray +will never be held in an Index. +""" +import numpy as np +import pytest + +from pandas.core.dtypes.cast import can_hold_element +from pandas.core.dtypes.dtypes import NumpyEADtype + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_object_dtype +from pandas.core.arrays.numpy_ import NumpyExtensionArray +from pandas.core.internals import blocks +from pandas.tests.extension import base + + +def _can_hold_element_patched(obj, element) -> bool: + if isinstance(element, NumpyExtensionArray): + element = element.to_numpy() + return can_hold_element(obj, element) + + +orig_assert_attr_equal = tm.assert_attr_equal + + +def _assert_attr_equal(attr: str, left, right, obj: str = "Attributes"): + """ + patch tm.assert_attr_equal so NumpyEADtype("object") is closed enough to + np.dtype("object") + """ + if attr == "dtype": + lattr = getattr(left, "dtype", None) + rattr = getattr(right, "dtype", None) + if isinstance(lattr, NumpyEADtype) and not isinstance(rattr, NumpyEADtype): + left = left.astype(lattr.numpy_dtype) + elif isinstance(rattr, NumpyEADtype) and not isinstance(lattr, NumpyEADtype): + right = right.astype(rattr.numpy_dtype) + + orig_assert_attr_equal(attr, left, right, obj) + + +@pytest.fixture(params=["float", "object"]) +def dtype(request): + return NumpyEADtype(np.dtype(request.param)) + + +@pytest.fixture +def allow_in_pandas(monkeypatch): + """ + A monkeypatch to tells pandas to let us in. + + By default, passing a NumpyExtensionArray to an index / series / frame + constructor will unbox that NumpyExtensionArray to an ndarray, and treat + it as a non-EA column. We don't want people using EAs without + reason. + + The mechanism for this is a check against ABCNumpyExtensionArray + in each constructor. + + But, for testing, we need to allow them in pandas. So we patch + the _typ of NumpyExtensionArray, so that we evade the ABCNumpyExtensionArray + check. + """ + with monkeypatch.context() as m: + m.setattr(NumpyExtensionArray, "_typ", "extension") + m.setattr(blocks, "can_hold_element", _can_hold_element_patched) + m.setattr(tm.asserters, "assert_attr_equal", _assert_attr_equal) + yield + + +@pytest.fixture +def data(allow_in_pandas, dtype): + if dtype.numpy_dtype == "object": + return pd.Series([(i,) for i in range(100)]).array + return NumpyExtensionArray(np.arange(1, 101, dtype=dtype._dtype)) + + +@pytest.fixture +def data_missing(allow_in_pandas, dtype): + if dtype.numpy_dtype == "object": + return NumpyExtensionArray(np.array([np.nan, (1,)], dtype=object)) + return NumpyExtensionArray(np.array([np.nan, 1.0])) + + +@pytest.fixture +def na_cmp(): + def cmp(a, b): + return np.isnan(a) and np.isnan(b) + + return cmp + + +@pytest.fixture +def data_for_sorting(allow_in_pandas, dtype): + """Length-3 array with a known sort order. + + This should be three items [B, C, A] with + A < B < C + """ + if dtype.numpy_dtype == "object": + # Use an empty tuple for first element, then remove, + # to disable np.array's shape inference. + return NumpyExtensionArray(np.array([(), (2,), (3,), (1,)], dtype=object)[1:]) + return NumpyExtensionArray(np.array([1, 2, 0])) + + +@pytest.fixture +def data_missing_for_sorting(allow_in_pandas, dtype): + """Length-3 array with a known sort order. + + This should be three items [B, NA, A] with + A < B and NA missing. + """ + if dtype.numpy_dtype == "object": + return NumpyExtensionArray(np.array([(1,), np.nan, (0,)], dtype=object)) + return NumpyExtensionArray(np.array([1, np.nan, 0])) + + +@pytest.fixture +def data_for_grouping(allow_in_pandas, dtype): + """Data for factorization, grouping, and unique tests. + + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + if dtype.numpy_dtype == "object": + a, b, c = (1,), (2,), (3,) + else: + a, b, c = np.arange(3) + return NumpyExtensionArray( + np.array([b, b, np.nan, np.nan, a, a, b, c], dtype=dtype.numpy_dtype) + ) + + +@pytest.fixture +def data_for_twos(dtype): + if dtype.kind == "O": + pytest.skip("Not a numeric dtype") + arr = np.ones(100) * 2 + return NumpyExtensionArray._from_sequence(arr, dtype=dtype) + + +@pytest.fixture +def skip_numpy_object(dtype, request): + """ + Tests for NumpyExtensionArray with nested data. Users typically won't create + these objects via `pd.array`, but they can show up through `.array` + on a Series with nested data. Many of the base tests fail, as they aren't + appropriate for nested data. + + This fixture allows these tests to be skipped when used as a usefixtures + marker to either an individual test or a test class. + """ + if dtype == "object": + mark = pytest.mark.xfail(reason="Fails for object dtype") + request.node.add_marker(mark) + + +skip_nested = pytest.mark.usefixtures("skip_numpy_object") + + +class BaseNumPyTests: + pass + + +class TestCasting(BaseNumPyTests, base.BaseCastingTests): + pass + + +class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests): + @pytest.mark.skip(reason="We don't register our dtype") + # We don't want to register. This test should probably be split in two. + def test_from_dtype(self, data): + pass + + @skip_nested + def test_series_constructor_scalar_with_index(self, data, dtype): + # ValueError: Length of passed values is 1, index implies 3. + super().test_series_constructor_scalar_with_index(data, dtype) + + +class TestDtype(BaseNumPyTests, base.BaseDtypeTests): + def test_check_dtype(self, data, request): + if data.dtype.numpy_dtype == "object": + request.node.add_marker( + pytest.mark.xfail( + reason=f"NumpyExtensionArray expectedly clashes with a " + f"NumPy name: {data.dtype.numpy_dtype}" + ) + ) + super().test_check_dtype(data) + + def test_is_not_object_type(self, dtype, request): + if dtype.numpy_dtype == "object": + # Different from BaseDtypeTests.test_is_not_object_type + # because NumpyEADtype(object) is an object type + assert is_object_dtype(dtype) + else: + super().test_is_not_object_type(dtype) + + +class TestGetitem(BaseNumPyTests, base.BaseGetitemTests): + @skip_nested + def test_getitem_scalar(self, data): + # AssertionError + super().test_getitem_scalar(data) + + +class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests): + pass + + +class TestInterface(BaseNumPyTests, base.BaseInterfaceTests): + @skip_nested + def test_array_interface(self, data): + # NumPy array shape inference + super().test_array_interface(data) + + +class TestMethods(BaseNumPyTests, base.BaseMethodsTests): + @skip_nested + def test_shift_fill_value(self, data): + # np.array shape inference. Shift implementation fails. + super().test_shift_fill_value(data) + + @skip_nested + def test_fillna_copy_frame(self, data_missing): + # The "scalar" for this array isn't a scalar. + super().test_fillna_copy_frame(data_missing) + + @skip_nested + def test_fillna_copy_series(self, data_missing): + # The "scalar" for this array isn't a scalar. + super().test_fillna_copy_series(data_missing) + + @skip_nested + def test_searchsorted(self, data_for_sorting, as_series): + # Test setup fails. + super().test_searchsorted(data_for_sorting, as_series) + + @pytest.mark.xfail(reason="NumpyExtensionArray.diff may fail on dtype") + def test_diff(self, data, periods): + return super().test_diff(data, periods) + + def test_insert(self, data, request): + if data.dtype.numpy_dtype == object: + mark = pytest.mark.xfail(reason="Dimension mismatch in np.concatenate") + request.node.add_marker(mark) + + super().test_insert(data) + + @skip_nested + def test_insert_invalid(self, data, invalid_scalar): + # NumpyExtensionArray[object] can hold anything, so skip + super().test_insert_invalid(data, invalid_scalar) + + +class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests): + divmod_exc = None + series_scalar_exc = None + frame_scalar_exc = None + series_array_exc = None + + @skip_nested + def test_divmod(self, data): + super().test_divmod(data) + + @skip_nested + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + super().test_arith_series_with_scalar(data, all_arithmetic_operators) + + def test_arith_series_with_array(self, data, all_arithmetic_operators, request): + opname = all_arithmetic_operators + if data.dtype.numpy_dtype == object and opname not in ["__add__", "__radd__"]: + mark = pytest.mark.xfail(reason="Fails for object dtype") + request.node.add_marker(mark) + super().test_arith_series_with_array(data, all_arithmetic_operators) + + @skip_nested + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + +class TestPrinting(BaseNumPyTests, base.BasePrintingTests): + pass + + +class TestReduce(BaseNumPyTests, base.BaseReduceTests): + def _supports_reduction(self, obj, op_name: str) -> bool: + if tm.get_dtype(obj).kind == "O": + return op_name in ["sum", "min", "max", "any", "all"] + return True + + def check_reduce(self, s, op_name, skipna): + res_op = getattr(s, op_name) + # avoid coercing int -> float. Just cast to the actual numpy type. + exp_op = getattr(s.astype(s.dtype._dtype), op_name) + if op_name == "count": + result = res_op() + expected = exp_op() + else: + result = res_op(skipna=skipna) + expected = exp_op(skipna=skipna) + tm.assert_almost_equal(result, expected) + + @pytest.mark.skip("tests not written yet") + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_frame(self, data, all_numeric_reductions, skipna): + pass + + +class TestMissing(BaseNumPyTests, base.BaseMissingTests): + @skip_nested + def test_fillna_series(self, data_missing): + # Non-scalar "scalar" values. + super().test_fillna_series(data_missing) + + @skip_nested + def test_fillna_frame(self, data_missing): + # Non-scalar "scalar" values. + super().test_fillna_frame(data_missing) + + +class TestReshaping(BaseNumPyTests, base.BaseReshapingTests): + pass + + +class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): + @skip_nested + def test_setitem_invalid(self, data, invalid_scalar): + # object dtype can hold anything, so doesn't raise + super().test_setitem_invalid(data, invalid_scalar) + + @skip_nested + def test_setitem_sequence_broadcasts(self, data, box_in_series): + # ValueError: cannot set using a list-like indexer with a different + # length than the value + super().test_setitem_sequence_broadcasts(data, box_in_series) + + @skip_nested + @pytest.mark.parametrize("setter", ["loc", None]) + def test_setitem_mask_broadcast(self, data, setter): + # ValueError: cannot set using a list-like indexer with a different + # length than the value + super().test_setitem_mask_broadcast(data, setter) + + @skip_nested + def test_setitem_scalar_key_sequence_raise(self, data): + # Failed: DID NOT RAISE + super().test_setitem_scalar_key_sequence_raise(data) + + # TODO: there is some issue with NumpyExtensionArray, therefore, + # skip the setitem test for now, and fix it later (GH 31446) + + @skip_nested + @pytest.mark.parametrize( + "mask", + [ + np.array([True, True, True, False, False]), + pd.array([True, True, True, False, False], dtype="boolean"), + ], + ids=["numpy-array", "boolean-array"], + ) + def test_setitem_mask(self, data, mask, box_in_series): + super().test_setitem_mask(data, mask, box_in_series) + + @skip_nested + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_setitem_integer_array(self, data, idx, box_in_series): + super().test_setitem_integer_array(data, idx, box_in_series) + + @pytest.mark.parametrize( + "idx, box_in_series", + [ + ([0, 1, 2, pd.NA], False), + pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + ], + ids=["list-False", "list-True", "integer-array-False", "integer-array-True"], + ) + def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series): + super().test_setitem_integer_with_missing_raises(data, idx, box_in_series) + + @skip_nested + def test_setitem_slice(self, data, box_in_series): + super().test_setitem_slice(data, box_in_series) + + @skip_nested + def test_setitem_loc_iloc_slice(self, data): + super().test_setitem_loc_iloc_slice(data) + + def test_setitem_with_expansion_dataframe_column(self, data, full_indexer): + # https://github.com/pandas-dev/pandas/issues/32395 + df = expected = pd.DataFrame({"data": pd.Series(data)}) + result = pd.DataFrame(index=df.index) + + # because result has object dtype, the attempt to do setting inplace + # is successful, and object dtype is retained + key = full_indexer(df) + result.loc[key, "data"] = df["data"] + + # base class method has expected = df; NumpyExtensionArray behaves oddly because + # we patch _typ for these tests. + if data.dtype.numpy_dtype != object: + if not isinstance(key, slice) or key != slice(None): + expected = pd.DataFrame({"data": data.to_numpy()}) + tm.assert_frame_equal(result, expected) + + +@skip_nested +class TestParsing(BaseNumPyTests, base.BaseParsingTests): + pass + + +class Test2DCompat(BaseNumPyTests, base.NDArrayBacked2DTests): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_period.py new file mode 100644 index 00000000..63297c20 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_period.py @@ -0,0 +1,143 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import numpy as np +import pytest + +from pandas._libs import iNaT +from pandas.compat import is_platform_windows +from pandas.compat.numpy import np_version_gte1p24 + +from pandas.core.dtypes.dtypes import PeriodDtype + +import pandas._testing as tm +from pandas.core.arrays import PeriodArray +from pandas.tests.extension import base + + +@pytest.fixture(params=["D", "2D"]) +def dtype(request): + return PeriodDtype(freq=request.param) + + +@pytest.fixture +def data(dtype): + return PeriodArray(np.arange(1970, 2070), dtype=dtype) + + +@pytest.fixture +def data_for_sorting(dtype): + return PeriodArray([2018, 2019, 2017], dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + return PeriodArray([iNaT, 2017], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + return PeriodArray([2018, iNaT, 2017], dtype=dtype) + + +@pytest.fixture +def data_for_grouping(dtype): + B = 2018 + NA = iNaT + A = 2017 + C = 2019 + return PeriodArray([B, B, NA, NA, A, A, B, C], dtype=dtype) + + +class BasePeriodTests: + pass + + +class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests): + pass + + +class TestConstructors(BasePeriodTests, base.BaseConstructorsTests): + pass + + +class TestGetitem(BasePeriodTests, base.BaseGetitemTests): + pass + + +class TestIndex(base.BaseIndexTests): + pass + + +class TestMethods(BasePeriodTests, base.BaseMethodsTests): + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods): + if is_platform_windows() and np_version_gte1p24: + with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False): + super().test_diff(data, periods) + else: + super().test_diff(data, periods) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data, na_action): + result = data.map(lambda x: x, na_action=na_action) + tm.assert_extension_array_equal(result, data) + + +class TestInterface(BasePeriodTests, base.BaseInterfaceTests): + pass + + +class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests): + def _get_expected_exception(self, op_name, obj, other): + if op_name in ("__sub__", "__rsub__"): + return None + return super()._get_expected_exception(op_name, obj, other) + + +class TestCasting(BasePeriodTests, base.BaseCastingTests): + pass + + +class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests): + pass + + +class TestMissing(BasePeriodTests, base.BaseMissingTests): + pass + + +class TestReshaping(BasePeriodTests, base.BaseReshapingTests): + pass + + +class TestSetitem(BasePeriodTests, base.BaseSetitemTests): + pass + + +class TestGroupby(BasePeriodTests, base.BaseGroupbyTests): + pass + + +class TestPrinting(BasePeriodTests, base.BasePrintingTests): + pass + + +class TestParsing(BasePeriodTests, base.BaseParsingTests): + pass + + +class Test2DCompat(BasePeriodTests, base.NDArrayBacked2DTests): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_sparse.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_sparse.py new file mode 100644 index 00000000..01448a2f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_sparse.py @@ -0,0 +1,450 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.arrays import SparseArray +from pandas.tests.extension import base + + +def make_data(fill_value): + rng = np.random.default_rng(2) + if np.isnan(fill_value): + data = rng.uniform(size=100) + else: + data = rng.integers(1, 100, size=100, dtype=int) + if data[0] == data[1]: + data[0] += 1 + + data[2::3] = fill_value + return data + + +@pytest.fixture +def dtype(): + return SparseDtype() + + +@pytest.fixture(params=[0, np.nan]) +def data(request): + """Length-100 PeriodArray for semantics test.""" + res = SparseArray(make_data(request.param), fill_value=request.param) + return res + + +@pytest.fixture +def data_for_twos(): + return SparseArray(np.ones(100) * 2) + + +@pytest.fixture(params=[0, np.nan]) +def data_missing(request): + """Length 2 array with [NA, Valid]""" + return SparseArray([np.nan, 1], fill_value=request.param) + + +@pytest.fixture(params=[0, np.nan]) +def data_repeated(request): + """Return different versions of data for count times""" + + def gen(count): + for _ in range(count): + yield SparseArray(make_data(request.param), fill_value=request.param) + + yield gen + + +@pytest.fixture(params=[0, np.nan]) +def data_for_sorting(request): + return SparseArray([2, 3, 1], fill_value=request.param) + + +@pytest.fixture(params=[0, np.nan]) +def data_missing_for_sorting(request): + return SparseArray([2, np.nan, 1], fill_value=request.param) + + +@pytest.fixture +def na_cmp(): + return lambda left, right: pd.isna(left) and pd.isna(right) + + +@pytest.fixture(params=[0, np.nan]) +def data_for_grouping(request): + return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param) + + +@pytest.fixture(params=[0, np.nan]) +def data_for_compare(request): + return SparseArray([0, 0, np.nan, -2, -1, 4, 2, 3, 0, 0], fill_value=request.param) + + +class BaseSparseTests: + def _check_unsupported(self, data): + if data.dtype == SparseDtype(int, 0): + pytest.skip("Can't store nan in int array.") + + +class TestDtype(BaseSparseTests, base.BaseDtypeTests): + def test_array_type_with_arg(self, data, dtype): + assert dtype.construct_array_type() is SparseArray + + +class TestInterface(BaseSparseTests, base.BaseInterfaceTests): + pass + + +class TestConstructors(BaseSparseTests, base.BaseConstructorsTests): + pass + + +class TestReshaping(BaseSparseTests, base.BaseReshapingTests): + def test_concat_mixed_dtypes(self, data): + # https://github.com/pandas-dev/pandas/issues/20762 + # This should be the same, aside from concat([sparse, float]) + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"A": [1, 2, 3]}) + df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category") + dfs = [df1, df2, df3] + + # dataframes + result = pd.concat(dfs) + expected = pd.concat( + [x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs] + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "columns", + [ + ["A", "B"], + pd.MultiIndex.from_tuples( + [("A", "a"), ("A", "b")], names=["outer", "inner"] + ), + ], + ) + @pytest.mark.parametrize("future_stack", [True, False]) + def test_stack(self, data, columns, future_stack): + super().test_stack(data, columns, future_stack) + + def test_concat_columns(self, data, na_value): + self._check_unsupported(data) + super().test_concat_columns(data, na_value) + + def test_concat_extension_arrays_copy_false(self, data, na_value): + self._check_unsupported(data) + super().test_concat_extension_arrays_copy_false(data, na_value) + + def test_align(self, data, na_value): + self._check_unsupported(data) + super().test_align(data, na_value) + + def test_align_frame(self, data, na_value): + self._check_unsupported(data) + super().test_align_frame(data, na_value) + + def test_align_series_frame(self, data, na_value): + self._check_unsupported(data) + super().test_align_series_frame(data, na_value) + + def test_merge(self, data, na_value): + self._check_unsupported(data) + super().test_merge(data, na_value) + + +class TestGetitem(BaseSparseTests, base.BaseGetitemTests): + def test_get(self, data): + ser = pd.Series(data, index=[2 * i for i in range(len(data))]) + if np.isnan(ser.values.fill_value): + assert np.isnan(ser.get(4)) and np.isnan(ser.iloc[2]) + else: + assert ser.get(4) == ser.iloc[2] + assert ser.get(2) == ser.iloc[1] + + def test_reindex(self, data, na_value): + self._check_unsupported(data) + super().test_reindex(data, na_value) + + +class TestSetitem(BaseSparseTests, base.BaseSetitemTests): + pass + + +class TestIndex(base.BaseIndexTests): + pass + + +class TestMissing(BaseSparseTests, base.BaseMissingTests): + def test_isna(self, data_missing): + sarr = SparseArray(data_missing) + expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value)) + expected = SparseArray([True, False], dtype=expected_dtype) + result = sarr.isna() + tm.assert_sp_array_equal(result, expected) + + # test isna for arr without na + sarr = sarr.fillna(0) + expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value)) + expected = SparseArray([False, False], fill_value=False, dtype=expected_dtype) + tm.assert_equal(sarr.isna(), expected) + + def test_fillna_limit_backfill(self, data_missing): + warns = (PerformanceWarning, FutureWarning) + with tm.assert_produces_warning(warns, check_stacklevel=False): + super().test_fillna_limit_backfill(data_missing) + + def test_fillna_no_op_returns_copy(self, data, request): + if np.isnan(data.fill_value): + request.node.add_marker( + pytest.mark.xfail(reason="returns array with different fill value") + ) + super().test_fillna_no_op_returns_copy(data) + + @pytest.mark.xfail(reason="Unsupported") + def test_fillna_series(self): + # this one looks doable. + super().test_fillna_series() + + def test_fillna_frame(self, data_missing): + # Have to override to specify that fill_value will change. + fill_value = data_missing[1] + + result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value) + + if pd.isna(data_missing.fill_value): + dtype = SparseDtype(data_missing.dtype, fill_value) + else: + dtype = data_missing.dtype + + expected = pd.DataFrame( + { + "A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype), + "B": [1, 2], + } + ) + + tm.assert_frame_equal(result, expected) + + +class TestMethods(BaseSparseTests, base.BaseMethodsTests): + _combine_le_expected_dtype = "Sparse[bool]" + + def test_fillna_copy_frame(self, data_missing, using_copy_on_write): + arr = data_missing.take([1, 1]) + df = pd.DataFrame({"A": arr}, copy=False) + + filled_val = df.iloc[0, 0] + result = df.fillna(filled_val) + + if hasattr(df._mgr, "blocks"): + if using_copy_on_write: + assert df.values.base is result.values.base + else: + assert df.values.base is not result.values.base + assert df.A._values.to_dense() is arr.to_dense() + + def test_fillna_copy_series(self, data_missing, using_copy_on_write): + arr = data_missing.take([1, 1]) + ser = pd.Series(arr, copy=False) + + filled_val = ser[0] + result = ser.fillna(filled_val) + + if using_copy_on_write: + assert ser._values is result._values + + else: + assert ser._values is not result._values + assert ser._values.to_dense() is arr.to_dense() + + @pytest.mark.xfail(reason="Not Applicable") + def test_fillna_length_mismatch(self, data_missing): + super().test_fillna_length_mismatch(data_missing) + + def test_where_series(self, data, na_value): + assert data[0] != data[1] + cls = type(data) + a, b = data[:2] + + ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype)) + + cond = np.array([True, True, False, False]) + result = ser.where(cond) + + new_dtype = SparseDtype("float", 0.0) + expected = pd.Series( + cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype) + ) + tm.assert_series_equal(result, expected) + + other = cls._from_sequence([a, b, a, b], dtype=data.dtype) + cond = np.array([True, False, True, True]) + result = ser.where(cond, other) + expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype)) + tm.assert_series_equal(result, expected) + + def test_searchsorted(self, data_for_sorting, as_series): + with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False): + super().test_searchsorted(data_for_sorting, as_series) + + def test_shift_0_periods(self, data): + # GH#33856 shifting with periods=0 should return a copy, not same obj + result = data.shift(0) + + data._sparse_values[0] = data._sparse_values[1] + assert result._sparse_values[0] != result._sparse_values[1] + + @pytest.mark.parametrize("method", ["argmax", "argmin"]) + def test_argmin_argmax_all_na(self, method, data, na_value): + # overriding because Sparse[int64, 0] cannot handle na_value + self._check_unsupported(data) + super().test_argmin_argmax_all_na(method, data, na_value) + + @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame]) + def test_equals(self, data, na_value, as_series, box): + self._check_unsupported(data) + super().test_equals(data, na_value, as_series, box) + + @pytest.mark.parametrize( + "func, na_action, expected", + [ + (lambda x: x, None, SparseArray([1.0, np.nan])), + (lambda x: x, "ignore", SparseArray([1.0, np.nan])), + (str, None, SparseArray(["1.0", "nan"], fill_value="nan")), + (str, "ignore", SparseArray(["1.0", np.nan])), + ], + ) + def test_map(self, func, na_action, expected): + # GH52096 + data = SparseArray([1, np.nan]) + result = data.map(func, na_action=na_action) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map_raises(self, data, na_action): + # GH52096 + msg = "fill value in the sparse values not supported" + with pytest.raises(ValueError, match=msg): + data.map(lambda x: np.nan, na_action=na_action) + + +class TestCasting(BaseSparseTests, base.BaseCastingTests): + @pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype") + def test_astype_string(self, data): + super().test_astype_string(data) + + +class TestArithmeticOps(BaseSparseTests, base.BaseArithmeticOpsTests): + series_scalar_exc = None + frame_scalar_exc = None + divmod_exc = None + series_array_exc = None + + def _skip_if_different_combine(self, data): + if data.fill_value == 0: + # arith ops call on dtype.fill_value so that the sparsity + # is maintained. Combine can't be called on a dtype in + # general, so we can't make the expected. This is tested elsewhere + pytest.skip("Incorrected expected from Series.combine and tested elsewhere") + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + self._skip_if_different_combine(data) + super().test_arith_series_with_scalar(data, all_arithmetic_operators) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + self._skip_if_different_combine(data) + super().test_arith_series_with_array(data, all_arithmetic_operators) + + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + if data.dtype.fill_value != 0: + pass + elif all_arithmetic_operators.strip("_") not in [ + "mul", + "rmul", + "floordiv", + "rfloordiv", + "pow", + "mod", + "rmod", + ]: + mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch") + request.node.add_marker(mark) + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + +class TestComparisonOps(BaseSparseTests): + def _compare_other(self, data_for_compare: SparseArray, comparison_op, other): + op = comparison_op + + result = op(data_for_compare, other) + assert isinstance(result, SparseArray) + assert result.dtype.subtype == np.bool_ + + if isinstance(other, SparseArray): + fill_value = op(data_for_compare.fill_value, other.fill_value) + else: + fill_value = np.all( + op(np.asarray(data_for_compare.fill_value), np.asarray(other)) + ) + + expected = SparseArray( + op(data_for_compare.to_dense(), np.asarray(other)), + fill_value=fill_value, + dtype=np.bool_, + ) + tm.assert_sp_array_equal(result, expected) + + def test_scalar(self, data_for_compare: SparseArray, comparison_op): + self._compare_other(data_for_compare, comparison_op, 0) + self._compare_other(data_for_compare, comparison_op, 1) + self._compare_other(data_for_compare, comparison_op, -1) + self._compare_other(data_for_compare, comparison_op, np.nan) + + @pytest.mark.xfail(reason="Wrong indices") + def test_array(self, data_for_compare: SparseArray, comparison_op): + arr = np.linspace(-4, 5, 10) + self._compare_other(data_for_compare, comparison_op, arr) + + @pytest.mark.xfail(reason="Wrong indices") + def test_sparse_array(self, data_for_compare: SparseArray, comparison_op): + arr = data_for_compare + 1 + self._compare_other(data_for_compare, comparison_op, arr) + arr = data_for_compare * 2 + self._compare_other(data_for_compare, comparison_op, arr) + + +class TestPrinting(BaseSparseTests, base.BasePrintingTests): + @pytest.mark.xfail(reason="Different repr") + def test_array_repr(self, data, size): + super().test_array_repr(data, size) + + +class TestParsing(BaseSparseTests, base.BaseParsingTests): + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_EA_types(self, engine, data): + expected_msg = r".*must implement _from_sequence_of_strings.*" + with pytest.raises(NotImplementedError, match=expected_msg): + super().test_EA_types(engine, data) + + +class TestNoNumericAccumulations(base.BaseAccumulateTests): + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_string.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_string.py new file mode 100644 index 00000000..51762899 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/extension/test_string.py @@ -0,0 +1,239 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import string + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_string_dtype +from pandas.core.arrays import ArrowStringArray +from pandas.core.arrays.string_ import StringDtype +from pandas.tests.extension import base + + +def split_array(arr): + if arr.dtype.storage != "pyarrow": + pytest.skip("only applicable for pyarrow chunked array n/a") + + def _split_array(arr): + import pyarrow as pa + + arrow_array = arr._pa_array + split = len(arrow_array) // 2 + arrow_array = pa.chunked_array( + [*arrow_array[:split].chunks, *arrow_array[split:].chunks] + ) + assert arrow_array.num_chunks == 2 + return type(arr)(arrow_array) + + return _split_array(arr) + + +@pytest.fixture(params=[True, False]) +def chunked(request): + return request.param + + +@pytest.fixture +def dtype(string_storage): + return StringDtype(storage=string_storage) + + +@pytest.fixture +def data(dtype, chunked): + strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100) + while strings[0] == strings[1]: + strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100) + + arr = dtype.construct_array_type()._from_sequence(strings) + return split_array(arr) if chunked else arr + + +@pytest.fixture +def data_missing(dtype, chunked): + """Length 2 array with [NA, Valid]""" + arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"]) + return split_array(arr) if chunked else arr + + +@pytest.fixture +def data_for_sorting(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"]) + return split_array(arr) if chunked else arr + + +@pytest.fixture +def data_missing_for_sorting(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"]) + return split_array(arr) if chunked else arr + + +@pytest.fixture +def data_for_grouping(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence( + ["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"] + ) + return split_array(arr) if chunked else arr + + +class TestDtype(base.BaseDtypeTests): + def test_eq_with_str(self, dtype): + assert dtype == f"string[{dtype.storage}]" + super().test_eq_with_str(dtype) + + def test_is_not_string_type(self, dtype): + # Different from BaseDtypeTests.test_is_not_string_type + # because StringDtype is a string type + assert is_string_dtype(dtype) + + +class TestInterface(base.BaseInterfaceTests): + def test_view(self, data, request, arrow_string_storage): + if data.dtype.storage in arrow_string_storage: + pytest.skip(reason="2D support not implemented for ArrowStringArray") + super().test_view(data) + + +class TestConstructors(base.BaseConstructorsTests): + def test_from_dtype(self, data): + # base test uses string representation of dtype + pass + + +class TestReshaping(base.BaseReshapingTests): + def test_transpose(self, data, request, arrow_string_storage): + if data.dtype.storage in arrow_string_storage: + pytest.skip(reason="2D support not implemented for ArrowStringArray") + super().test_transpose(data) + + +class TestGetitem(base.BaseGetitemTests): + pass + + +class TestSetitem(base.BaseSetitemTests): + def test_setitem_preserves_views(self, data, request, arrow_string_storage): + if data.dtype.storage in arrow_string_storage: + pytest.skip(reason="2D support not implemented for ArrowStringArray") + super().test_setitem_preserves_views(data) + + +class TestIndex(base.BaseIndexTests): + pass + + +class TestMissing(base.BaseMissingTests): + def test_dropna_array(self, data_missing): + result = data_missing.dropna() + expected = data_missing[[1]] + tm.assert_extension_array_equal(result, expected) + + def test_fillna_no_op_returns_copy(self, data): + data = data[~data.isna()] + + valid = data[0] + result = data.fillna(valid) + assert result is not data + tm.assert_extension_array_equal(result, data) + + result = data.fillna(method="backfill") + assert result is not data + tm.assert_extension_array_equal(result, data) + + +class TestReduce(base.BaseReduceTests): + def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool: + return ( + ser.dtype.storage == "pyarrow_numpy" # type: ignore[union-attr] + and op_name in ("any", "all") + ) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): + op_name = all_numeric_reductions + + if op_name in ["min", "max"]: + return None + + ser = pd.Series(data) + with pytest.raises(TypeError): + getattr(ser, op_name)(skipna=skipna) + + +class TestMethods(base.BaseMethodsTests): + pass + + +class TestCasting(base.BaseCastingTests): + pass + + +class TestComparisonOps(base.BaseComparisonOpsTests): + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + dtype = tm.get_dtype(obj) + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no + # attribute "storage" + if dtype.storage == "pyarrow": # type: ignore[union-attr] + cast_to = "boolean[pyarrow]" + elif dtype.storage == "pyarrow_numpy": # type: ignore[union-attr] + cast_to = np.bool_ # type: ignore[assignment] + else: + cast_to = "boolean" + return pointwise_result.astype(cast_to) + + def test_compare_scalar(self, data, comparison_op): + ser = pd.Series(data) + self._compare_other(ser, data, comparison_op, "abc") + + +class TestParsing(base.BaseParsingTests): + pass + + +class TestPrinting(base.BasePrintingTests): + pass + + +class TestGroupBy(base.BaseGroupbyTests): + @pytest.mark.filterwarnings("ignore:Falling back:pandas.errors.PerformanceWarning") + def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): + super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op) + + +class Test2DCompat(base.Dim2CompatTests): + @pytest.fixture(autouse=True) + def arrow_not_supported(self, data, request): + if isinstance(data, ArrowStringArray): + pytest.skip(reason="2D support not implemented for ArrowStringArray") + + +def test_searchsorted_with_na_raises(data_for_sorting, as_series): + # GH50447 + b, c, a = data_for_sorting + arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c] + arr[-1] = pd.NA + + if as_series: + arr = pd.Series(arr) + + msg = ( + "searchsorted requires array to be sorted, " + "which is impossible with NAs present." + ) + with pytest.raises(ValueError, match=msg): + arr.searchsorted(b) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/common.py new file mode 100644 index 00000000..fc41d790 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/common.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas import ( + DataFrame, + concat, +) + +if TYPE_CHECKING: + from pandas._typing import AxisInt + + +def _check_mixed_float(df, dtype=None): + # float16 are most likely to be upcasted to float32 + dtypes = {"A": "float32", "B": "float32", "C": "float16", "D": "float64"} + if isinstance(dtype, str): + dtypes = {k: dtype for k, v in dtypes.items()} + elif isinstance(dtype, dict): + dtypes.update(dtype) + if dtypes.get("A"): + assert df.dtypes["A"] == dtypes["A"] + if dtypes.get("B"): + assert df.dtypes["B"] == dtypes["B"] + if dtypes.get("C"): + assert df.dtypes["C"] == dtypes["C"] + if dtypes.get("D"): + assert df.dtypes["D"] == dtypes["D"] + + +def _check_mixed_int(df, dtype=None): + dtypes = {"A": "int32", "B": "uint64", "C": "uint8", "D": "int64"} + if isinstance(dtype, str): + dtypes = {k: dtype for k, v in dtypes.items()} + elif isinstance(dtype, dict): + dtypes.update(dtype) + if dtypes.get("A"): + assert df.dtypes["A"] == dtypes["A"] + if dtypes.get("B"): + assert df.dtypes["B"] == dtypes["B"] + if dtypes.get("C"): + assert df.dtypes["C"] == dtypes["C"] + if dtypes.get("D"): + assert df.dtypes["D"] == dtypes["D"] + + +def zip_frames(frames: list[DataFrame], axis: AxisInt = 1) -> DataFrame: + """ + take a list of frames, zip them together under the + assumption that these all have the first frames' index/columns. + + Returns + ------- + new_frame : DataFrame + """ + if axis == 1: + columns = frames[0].columns + zipped = [f.loc[:, c] for c in columns for f in frames] + return concat(zipped, axis=1) + else: + index = frames[0].index + zipped = [f.loc[i, :] for i in index for f in frames] + return DataFrame(zipped) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/conftest.py new file mode 100644 index 00000000..fb2df0b8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/conftest.py @@ -0,0 +1,261 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + NaT, + date_range, +) +import pandas._testing as tm + + +@pytest.fixture +def float_frame_with_na(): + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']; some entries are missing + + A B C D + ABwBzA0ljw -1.128865 -0.897161 0.046603 0.274997 + DJiRzmbyQF 0.728869 0.233502 0.722431 -0.890872 + neMgPD5UBF 0.486072 -1.027393 -0.031553 1.449522 + 0yWA4n8VeX -1.937191 -1.142531 0.805215 -0.462018 + 3slYUbbqU1 0.153260 1.164691 1.489795 -0.545826 + soujjZ0A08 NaN NaN NaN NaN + 7W6NLGsjB9 NaN NaN NaN NaN + ... ... ... ... ... + uhfeaNkCR1 -0.231210 -0.340472 0.244717 -0.901590 + n6p7GYuBIV -0.419052 1.922721 -0.125361 -0.727717 + ZhzAeY6p1y 1.234374 -1.425359 -0.827038 -0.633189 + uWdPsORyUh 0.046738 -0.980445 -1.102965 0.605503 + 3DJA6aN590 -0.091018 -1.684734 -1.100900 0.215947 + 2GBPAzdbMk -2.883405 -1.021071 1.209877 1.633083 + sHadBoyVHw -2.223032 -0.326384 0.258931 0.245517 + + [30 rows x 4 columns] + """ + df = DataFrame(tm.getSeriesData()) + # set some NAs + df.iloc[5:10] = np.nan + df.iloc[15:20, -2:] = np.nan + return df + + +@pytest.fixture +def bool_frame_with_na(): + """ + Fixture for DataFrame of booleans with index of unique strings + + Columns are ['A', 'B', 'C', 'D']; some entries are missing + + A B C D + zBZxY2IDGd False False False False + IhBWBMWllt False True True True + ctjdvZSR6R True False True True + AVTujptmxb False True False True + G9lrImrSWq False False False True + sFFwdIUfz2 NaN NaN NaN NaN + s15ptEJnRb NaN NaN NaN NaN + ... ... ... ... ... + UW41KkDyZ4 True True False False + l9l6XkOdqV True False False False + X2MeZfzDYA False True False False + xWkIKU7vfX False True False True + QOhL6VmpGU False False False True + 22PwkRJdat False True False False + kfboQ3VeIK True False True False + + [30 rows x 4 columns] + """ + df = DataFrame(tm.getSeriesData()) > 0 + df = df.astype(object) + # set some NAs + df.iloc[5:10] = np.nan + df.iloc[15:20, -2:] = np.nan + + # For `any` tests we need to have at least one True before the first NaN + # in each column + for i in range(4): + df.iloc[i, i] = True + return df + + +@pytest.fixture +def float_string_frame(): + """ + Fixture for DataFrame of floats and strings with index of unique strings + + Columns are ['A', 'B', 'C', 'D', 'foo']. + + A B C D foo + w3orJvq07g -1.594062 -1.084273 -1.252457 0.356460 bar + PeukuVdmz2 0.109855 -0.955086 -0.809485 0.409747 bar + ahp2KvwiM8 -1.533729 -0.142519 -0.154666 1.302623 bar + 3WSJ7BUCGd 2.484964 0.213829 0.034778 -2.327831 bar + khdAmufk0U -0.193480 -0.743518 -0.077987 0.153646 bar + LE2DZiFlrE -0.193566 -1.343194 -0.107321 0.959978 bar + HJXSJhVn7b 0.142590 1.257603 -0.659409 -0.223844 bar + ... ... ... ... ... ... + 9a1Vypttgw -1.316394 1.601354 0.173596 1.213196 bar + h5d1gVFbEy 0.609475 1.106738 -0.155271 0.294630 bar + mK9LsTQG92 1.303613 0.857040 -1.019153 0.369468 bar + oOLksd9gKH 0.558219 -0.134491 -0.289869 -0.951033 bar + 9jgoOjKyHg 0.058270 -0.496110 -0.413212 -0.852659 bar + jZLDHclHAO 0.096298 1.267510 0.549206 -0.005235 bar + lR0nxDp1C2 -2.119350 -0.794384 0.544118 0.145849 bar + + [30 rows x 5 columns] + """ + df = DataFrame(tm.getSeriesData()) + df["foo"] = "bar" + return df + + +@pytest.fixture +def mixed_float_frame(): + """ + Fixture for DataFrame of different float types with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + + A B C D + GI7bbDaEZe -0.237908 -0.246225 -0.468506 0.752993 + KGp9mFepzA -1.140809 -0.644046 -1.225586 0.801588 + VeVYLAb1l2 -1.154013 -1.677615 0.690430 -0.003731 + kmPME4WKhO 0.979578 0.998274 -0.776367 0.897607 + CPyopdXTiz 0.048119 -0.257174 0.836426 0.111266 + 0kJZQndAj0 0.274357 -0.281135 -0.344238 0.834541 + tqdwQsaHG8 -0.979716 -0.519897 0.582031 0.144710 + ... ... ... ... ... + 7FhZTWILQj -2.906357 1.261039 -0.780273 -0.537237 + 4pUDPM4eGq -2.042512 -0.464382 -0.382080 1.132612 + B8dUgUzwTi -1.506637 -0.364435 1.087891 0.297653 + hErlVYjVv9 1.477453 -0.495515 -0.713867 1.438427 + 1BKN3o7YLs 0.127535 -0.349812 -0.881836 0.489827 + 9S4Ekn7zga 1.445518 -2.095149 0.031982 0.373204 + xN1dNn6OV6 1.425017 -0.983995 -0.363281 -0.224502 + + [30 rows x 4 columns] + """ + df = DataFrame(tm.getSeriesData()) + df.A = df.A.astype("float32") + df.B = df.B.astype("float32") + df.C = df.C.astype("float16") + df.D = df.D.astype("float64") + return df + + +@pytest.fixture +def mixed_int_frame(): + """ + Fixture for DataFrame of different int types with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + + A B C D + mUrCZ67juP 0 1 2 2 + rw99ACYaKS 0 1 0 0 + 7QsEcpaaVU 0 1 1 1 + xkrimI2pcE 0 1 0 0 + dz01SuzoS8 0 1 255 255 + ccQkqOHX75 -1 1 0 0 + DN0iXaoDLd 0 1 0 0 + ... .. .. ... ... + Dfb141wAaQ 1 1 254 254 + IPD8eQOVu5 0 1 0 0 + CcaKulsCmv 0 1 0 0 + rIBa8gu7E5 0 1 0 0 + RP6peZmh5o 0 1 1 1 + NMb9pipQWQ 0 1 0 0 + PqgbJEzjib 0 1 3 3 + + [30 rows x 4 columns] + """ + df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()}) + df.A = df.A.astype("int32") + df.B = np.ones(len(df.B), dtype="uint64") + df.C = df.C.astype("uint8") + df.D = df.C.astype("int64") + return df + + +@pytest.fixture +def timezone_frame(): + """ + Fixture for DataFrame of date_range Series with different time zones + + Columns are ['A', 'B', 'C']; some entries are missing + + A B C + 0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00 + 1 2013-01-02 NaT NaT + 2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00 + """ + df = DataFrame( + { + "A": date_range("20130101", periods=3), + "B": date_range("20130101", periods=3, tz="US/Eastern"), + "C": date_range("20130101", periods=3, tz="CET"), + } + ) + df.iloc[1, 1] = NaT + df.iloc[1, 2] = NaT + return df + + +@pytest.fixture +def uint64_frame(): + """ + Fixture for DataFrame with uint64 values + + Columns are ['A', 'B'] + """ + return DataFrame( + {"A": np.arange(3), "B": [2**63, 2**63 + 5, 2**63 + 10]}, dtype=np.uint64 + ) + + +@pytest.fixture +def simple_frame(): + """ + Fixture for simple 3x3 DataFrame + + Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c']. + + one two three + a 1.0 2.0 3.0 + b 4.0 5.0 6.0 + c 7.0 8.0 9.0 + """ + arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) + + return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"]) + + +@pytest.fixture +def frame_of_index_cols(): + """ + Fixture for DataFrame of columns that can be used for indexing + + Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')]; + 'A' & 'B' contain duplicates (but are jointly unique), the rest are unique. + + A B C D E (tuple, as, label) + 0 foo one a 0.608477 -0.012500 -1.664297 + 1 foo two b -0.633460 0.249614 -0.364411 + 2 foo three c 0.615256 2.154968 -0.834666 + 3 bar one d 0.234246 1.085675 0.718445 + 4 bar two e 0.533841 -0.005702 -3.533912 + """ + df = DataFrame( + { + "A": ["foo", "foo", "foo", "bar", "bar"], + "B": ["one", "two", "three", "one", "two"], + "C": ["a", "b", "c", "d", "e"], + "D": np.random.default_rng(2).standard_normal(5), + "E": np.random.default_rng(2).standard_normal(5), + ("tuple", "as", "label"): np.random.default_rng(2).standard_normal(5), + } + ) + return df diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_dict.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_dict.py new file mode 100644 index 00000000..d78924ff --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_dict.py @@ -0,0 +1,202 @@ +from collections import OrderedDict + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, +) +import pandas._testing as tm + + +class TestFromDict: + # Note: these tests are specific to the from_dict method, not for + # passing dictionaries to DataFrame.__init__ + + def test_constructor_list_of_odicts(self): + data = [ + OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]), + OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]), + OrderedDict([["a", 1.5], ["d", 6]]), + OrderedDict(), + OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]), + OrderedDict([["b", 3], ["c", 4], ["d", 6]]), + ] + + result = DataFrame(data) + expected = DataFrame.from_dict( + dict(zip(range(len(data)), data)), orient="index" + ) + tm.assert_frame_equal(result, expected.reindex(result.index)) + + def test_constructor_single_row(self): + data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])] + + result = DataFrame(data) + expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex( + result.index + ) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_series(self): + data = [ + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]), + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]), + ] + sdict = OrderedDict(zip(["x", "y"], data)) + idx = Index(["a", "b", "c"]) + + # all named + data2 = [ + Series([1.5, 3, 4], idx, dtype="O", name="x"), + Series([1.5, 3, 6], idx, name="y"), + ] + result = DataFrame(data2) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected) + + # some unnamed + data2 = [ + Series([1.5, 3, 4], idx, dtype="O", name="x"), + Series([1.5, 3, 6], idx), + ] + result = DataFrame(data2) + + sdict = OrderedDict(zip(["x", "Unnamed 0"], data)) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected) + + # none named + data = [ + OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]), + OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]), + OrderedDict([["a", 1.5], ["d", 6]]), + OrderedDict(), + OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]), + OrderedDict([["b", 3], ["c", 4], ["d", 6]]), + ] + data = [Series(d) for d in data] + + result = DataFrame(data) + sdict = OrderedDict(zip(range(len(data)), data)) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected.reindex(result.index)) + + result2 = DataFrame(data, index=np.arange(6, dtype=np.int64)) + tm.assert_frame_equal(result, result2) + + result = DataFrame([Series(dtype=object)]) + expected = DataFrame(index=[0]) + tm.assert_frame_equal(result, expected) + + data = [ + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]), + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]), + ] + sdict = OrderedDict(zip(range(len(data)), data)) + + idx = Index(["a", "b", "c"]) + data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)] + result = DataFrame(data2) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected) + + def test_constructor_orient(self, float_string_frame): + data_dict = float_string_frame.T._series + recons = DataFrame.from_dict(data_dict, orient="index") + expected = float_string_frame.reindex(index=recons.index) + tm.assert_frame_equal(recons, expected) + + # dict of sequence + a = {"hi": [32, 3, 3], "there": [3, 5, 3]} + rs = DataFrame.from_dict(a, orient="index") + xp = DataFrame.from_dict(a).T.reindex(list(a.keys())) + tm.assert_frame_equal(rs, xp) + + def test_constructor_from_ordered_dict(self): + # GH#8425 + a = OrderedDict( + [ + ("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])), + ("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])), + ("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])), + ] + ) + expected = DataFrame.from_dict(a, orient="columns").T + result = DataFrame.from_dict(a, orient="index") + tm.assert_frame_equal(result, expected) + + def test_from_dict_columns_parameter(self): + # GH#18529 + # Test new columns parameter for from_dict that was added to make + # from_items(..., orient='index', columns=[...]) easier to replicate + result = DataFrame.from_dict( + OrderedDict([("A", [1, 2]), ("B", [4, 5])]), + orient="index", + columns=["one", "two"], + ) + expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"]) + tm.assert_frame_equal(result, expected) + + msg = "cannot use columns parameter with orient='columns'" + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict( + {"A": [1, 2], "B": [4, 5]}, + orient="columns", + columns=["one", "two"], + ) + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"]) + + @pytest.mark.parametrize( + "data_dict, orient, expected", + [ + ({}, "index", RangeIndex(0)), + ( + [{("a",): 1}, {("a",): 2}], + "columns", + Index([("a",)], tupleize_cols=False), + ), + ( + [OrderedDict([(("a",), 1), (("b",), 2)])], + "columns", + Index([("a",), ("b",)], tupleize_cols=False), + ), + ([{("a", "b"): 1}], "columns", Index([("a", "b")], tupleize_cols=False)), + ], + ) + def test_constructor_from_dict_tuples(self, data_dict, orient, expected): + # GH#16769 + df = DataFrame.from_dict(data_dict, orient) + result = df.columns + tm.assert_index_equal(result, expected) + + def test_frame_dict_constructor_empty_series(self): + s1 = Series( + [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]) + ) + s2 = Series( + [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]) + ) + s3 = Series(dtype=object) + + # it works! + DataFrame({"foo": s1, "bar": s2, "baz": s3}) + DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2}) + + def test_from_dict_scalars_requires_index(self): + msg = "If using all scalar values, you must pass an index" + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)])) + + def test_from_dict_orient_invalid(self): + msg = ( + "Expected 'index', 'columns' or 'tight' for orient parameter. " + "Got 'abc' instead" + ) + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict({"foo": 1, "baz": 3, "bar": 2}, orient="abc") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_records.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_records.py new file mode 100644 index 00000000..bb4aed21 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/constructors/test_from_records.py @@ -0,0 +1,499 @@ +from collections.abc import Iterator +from datetime import datetime +from decimal import Decimal + +import numpy as np +import pytest +import pytz + +from pandas.compat import is_platform_little_endian + +from pandas import ( + CategoricalIndex, + DataFrame, + Index, + Interval, + RangeIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestFromRecords: + def test_from_records_dt64tz_frame(self): + # GH#51162 don't lose tz when calling from_records with DataFrame input + dti = date_range("2016-01-01", periods=10, tz="US/Pacific") + df = DataFrame({i: dti for i in range(4)}) + with tm.assert_produces_warning(FutureWarning): + res = DataFrame.from_records(df) + tm.assert_frame_equal(res, df) + + def test_from_records_with_datetimes(self): + # this may fail on certain platforms because of a numpy issue + # related GH#6140 + if not is_platform_little_endian(): + pytest.skip("known failure of test on non-little endian") + + # construction with a null in a recarray + # GH#6140 + expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]}) + + arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])] + dtypes = [("EXPIRY", " None: + self.args = args + + def __getitem__(self, i): + return self.args[i] + + def __iter__(self) -> Iterator: + return iter(self.args) + + recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)] + tups = [tuple(rec) for rec in recs] + + result = DataFrame.from_records(recs) + expected = DataFrame.from_records(tups) + tm.assert_frame_equal(result, expected) + + def test_from_records_len0_with_columns(self): + # GH#2633 + result = DataFrame.from_records([], index="foo", columns=["foo", "bar"]) + expected = Index(["bar"]) + + assert len(result) == 0 + assert result.index.name == "foo" + tm.assert_index_equal(result.columns, expected) + + def test_from_records_series_list_dict(self): + # GH#27358 + expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T + data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]]) + result = DataFrame.from_records(data) + tm.assert_frame_equal(result, expected) + + def test_from_records_series_categorical_index(self): + # GH#32805 + index = CategoricalIndex( + [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)] + ) + series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) + frame = DataFrame.from_records(series_of_dicts, index=index) + expected = DataFrame( + {"a": [1, 2, np.nan], "b": [np.nan, np.nan, 3]}, index=index + ) + tm.assert_frame_equal(frame, expected) + + def test_frame_from_records_utc(self): + rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)} + + # it works + DataFrame.from_records([rec], index="begin_time") + + def test_from_records_to_records(self): + # from numpy documentation + arr = np.zeros((2,), dtype=("i4,f4,S10")) + arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")] + + DataFrame.from_records(arr) + + index = Index(np.arange(len(arr))[::-1]) + indexed_frame = DataFrame.from_records(arr, index=index) + tm.assert_index_equal(indexed_frame.index, index) + + # without names, it should go to last ditch + arr2 = np.zeros((2, 3)) + tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2)) + + # wrong length + msg = "|".join( + [ + r"Length of values \(2\) does not match length of index \(1\)", + ] + ) + with pytest.raises(ValueError, match=msg): + DataFrame.from_records(arr, index=index[:-1]) + + indexed_frame = DataFrame.from_records(arr, index="f1") + + # what to do? + records = indexed_frame.to_records() + assert len(records.dtype.names) == 3 + + records = indexed_frame.to_records(index=False) + assert len(records.dtype.names) == 2 + assert "index" not in records.dtype.names + + def test_from_records_nones(self): + tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)] + + df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"]) + assert np.isnan(df["c"][0]) + + def test_from_records_iterator(self): + arr = np.array( + [(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)], + dtype=[ + ("x", np.float64), + ("u", np.float32), + ("y", np.int64), + ("z", np.int32), + ], + ) + df = DataFrame.from_records(iter(arr), nrows=2) + xp = DataFrame( + { + "x": np.array([1.0, 3.0], dtype=np.float64), + "u": np.array([1.0, 3.0], dtype=np.float32), + "y": np.array([2, 4], dtype=np.int64), + "z": np.array([2, 4], dtype=np.int32), + } + ) + tm.assert_frame_equal(df.reindex_like(xp), xp) + + # no dtypes specified here, so just compare with the default + arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)] + df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2) + tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False) + + def test_from_records_tuples_generator(self): + def tuple_generator(length): + for i in range(length): + letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + yield (i, letters[i % len(letters)], i / length) + + columns_names = ["Integer", "String", "Float"] + columns = [ + [i[j] for i in tuple_generator(10)] for j in range(len(columns_names)) + ] + data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]} + expected = DataFrame(data, columns=columns_names) + + generator = tuple_generator(10) + result = DataFrame.from_records(generator, columns=columns_names) + tm.assert_frame_equal(result, expected) + + def test_from_records_lists_generator(self): + def list_generator(length): + for i in range(length): + letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + yield [i, letters[i % len(letters)], i / length] + + columns_names = ["Integer", "String", "Float"] + columns = [ + [i[j] for i in list_generator(10)] for j in range(len(columns_names)) + ] + data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]} + expected = DataFrame(data, columns=columns_names) + + generator = list_generator(10) + result = DataFrame.from_records(generator, columns=columns_names) + tm.assert_frame_equal(result, expected) + + def test_from_records_columns_not_modified(self): + tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)] + + columns = ["a", "b", "c"] + original_columns = list(columns) + + DataFrame.from_records(tuples, columns=columns, index="a") + + assert columns == original_columns + + def test_from_records_decimal(self): + tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)] + + df = DataFrame.from_records(tuples, columns=["a"]) + assert df["a"].dtype == object + + df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True) + assert df["a"].dtype == np.float64 + assert np.isnan(df["a"].values[-1]) + + def test_from_records_duplicates(self): + result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"]) + + expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"]) + + tm.assert_frame_equal(result, expected) + + def test_from_records_set_index_name(self): + def create_dict(order_id): + return { + "order_id": order_id, + "quantity": np.random.default_rng(2).integers(1, 10), + "price": np.random.default_rng(2).integers(1, 10), + } + + documents = [create_dict(i) for i in range(10)] + # demo missing data + documents.append({"order_id": 10, "quantity": 5}) + + result = DataFrame.from_records(documents, index="order_id") + assert result.index.name == "order_id" + + # MultiIndex + result = DataFrame.from_records(documents, index=["order_id", "quantity"]) + assert result.index.names == ("order_id", "quantity") + + def test_from_records_misc_brokenness(self): + # GH#2179 + + data = {1: ["foo"], 2: ["bar"]} + + result = DataFrame.from_records(data, columns=["a", "b"]) + exp = DataFrame(data, columns=["a", "b"]) + tm.assert_frame_equal(result, exp) + + # overlap in index/index_names + + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + + result = DataFrame.from_records(data, index=["a", "b", "c"]) + exp = DataFrame(data, index=["a", "b", "c"]) + tm.assert_frame_equal(result, exp) + + # GH#2623 + rows = [] + rows.append([datetime(2010, 1, 1), 1]) + rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj + df2_obj = DataFrame.from_records(rows, columns=["date", "test"]) + result = df2_obj.dtypes + expected = Series( + [np.dtype("datetime64[ns]"), np.dtype("object")], index=["date", "test"] + ) + tm.assert_series_equal(result, expected) + + rows = [] + rows.append([datetime(2010, 1, 1), 1]) + rows.append([datetime(2010, 1, 2), 1]) + df2_obj = DataFrame.from_records(rows, columns=["date", "test"]) + result = df2_obj.dtypes + expected = Series( + [np.dtype("datetime64[ns]"), np.dtype("int64")], index=["date", "test"] + ) + tm.assert_series_equal(result, expected) + + def test_from_records_empty(self): + # GH#3562 + result = DataFrame.from_records([], columns=["a", "b", "c"]) + expected = DataFrame(columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + result = DataFrame.from_records([], columns=["a", "b", "b"]) + expected = DataFrame(columns=["a", "b", "b"]) + tm.assert_frame_equal(result, expected) + + def test_from_records_empty_with_nonempty_fields_gh3682(self): + a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)]) + df = DataFrame.from_records(a, index="id") + + ex_index = Index([1], name="id") + expected = DataFrame({"value": [2]}, index=ex_index, columns=["value"]) + tm.assert_frame_equal(df, expected) + + b = a[:0] + df2 = DataFrame.from_records(b, index="id") + tm.assert_frame_equal(df2, df.iloc[:0]) + + def test_from_records_empty2(self): + # GH#42456 + dtype = [("prop", int)] + shape = (0, len(dtype)) + arr = np.empty(shape, dtype=dtype) + + result = DataFrame.from_records(arr) + expected = DataFrame({"prop": np.array([], dtype=int)}) + tm.assert_frame_equal(result, expected) + + alt = DataFrame(arr) + tm.assert_frame_equal(alt, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_coercion.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_coercion.py new file mode 100644 index 00000000..ba0d8613 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_coercion.py @@ -0,0 +1,199 @@ +""" +Tests for values coercion in setitem-like operations on DataFrame. + +For the most part, these should be multi-column DataFrames, otherwise +we would share the tests with Series. +""" +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + NaT, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameSetitemCoercion: + @pytest.mark.parametrize("consolidate", [True, False]) + def test_loc_setitem_multiindex_columns(self, consolidate): + # GH#18415 Setting values in a single column preserves dtype, + # while setting them in multiple columns did unwanted cast. + + # Note that A here has 2 blocks, below we do the same thing + # with a consolidated frame. + A = DataFrame(np.zeros((6, 5), dtype=np.float32)) + A = pd.concat([A, A], axis=1, keys=[1, 2]) + if consolidate: + A = A._consolidate() + + A.loc[2:3, (1, slice(2, 3))] = np.ones((2, 2), dtype=np.float32) + assert (A.dtypes == np.float32).all() + + A.loc[0:5, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32) + + assert (A.dtypes == np.float32).all() + + A.loc[:, (1, slice(2, 3))] = np.ones((6, 2), dtype=np.float32) + assert (A.dtypes == np.float32).all() + + # TODO: i think this isn't about MultiIndex and could be done with iloc? + + +def test_37477(): + # fixed by GH#45121 + orig = DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]}) + expected = DataFrame({"A": [1, 2, 3], "B": [3, 1.2, 5]}) + + df = orig.copy() + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + df.at[1, "B"] = 1.2 + tm.assert_frame_equal(df, expected) + + df = orig.copy() + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + df.loc[1, "B"] = 1.2 + tm.assert_frame_equal(df, expected) + + df = orig.copy() + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + df.iat[1, 1] = 1.2 + tm.assert_frame_equal(df, expected) + + df = orig.copy() + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + df.iloc[1, 1] = 1.2 + tm.assert_frame_equal(df, expected) + + +def test_6942(indexer_al): + # check that the .at __setitem__ after setting "Live" actually sets the data + start = Timestamp("2014-04-01") + t1 = Timestamp("2014-04-23 12:42:38.883082") + t2 = Timestamp("2014-04-24 01:33:30.040039") + + dti = date_range(start, periods=1) + orig = DataFrame(index=dti, columns=["timenow", "Live"]) + + df = orig.copy() + indexer_al(df)[start, "timenow"] = t1 + + df["Live"] = True + + df.at[start, "timenow"] = t2 + assert df.iloc[0, 0] == t2 + + +def test_26395(indexer_al): + # .at case fixed by GH#45121 (best guess) + df = DataFrame(index=["A", "B", "C"]) + df["D"] = 0 + + indexer_al(df)["C", "D"] = 2 + expected = DataFrame({"D": [0, 0, 2]}, index=["A", "B", "C"], dtype=np.int64) + tm.assert_frame_equal(df, expected) + + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + indexer_al(df)["C", "D"] = 44.5 + expected = DataFrame({"D": [0, 0, 44.5]}, index=["A", "B", "C"], dtype=np.float64) + tm.assert_frame_equal(df, expected) + + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + indexer_al(df)["C", "D"] = "hello" + expected = DataFrame({"D": [0, 0, "hello"]}, index=["A", "B", "C"], dtype=object) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.xfail(reason="unwanted upcast") +def test_15231(): + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + df.loc[2] = Series({"a": 5, "b": 6}) + assert (df.dtypes == np.int64).all() + + df.loc[3] = Series({"a": 7}) + + # df["a"] doesn't have any NaNs, should not have been cast + exp_dtypes = Series([np.int64, np.float64], dtype=object, index=["a", "b"]) + tm.assert_series_equal(df.dtypes, exp_dtypes) + + +def test_iloc_setitem_unnecesssary_float_upcasting(): + # GH#12255 + df = DataFrame( + { + 0: np.array([1, 3], dtype=np.float32), + 1: np.array([2, 4], dtype=np.float32), + 2: ["a", "b"], + } + ) + orig = df.copy() + + values = df[0].values.reshape(2, 1) + df.iloc[:, 0:1] = values + + tm.assert_frame_equal(df, orig) + + +@pytest.mark.xfail(reason="unwanted casting to dt64") +def test_12499(): + # TODO: OP in GH#12499 used np.datetim64("NaT") instead of pd.NaT, + # which has consequences for the expected df["two"] (though i think at + # the time it might not have because of a separate bug). See if it makes + # a difference which one we use here. + ts = Timestamp("2016-03-01 03:13:22.98986", tz="UTC") + + data = [{"one": 0, "two": ts}] + orig = DataFrame(data) + df = orig.copy() + df.loc[1] = [np.nan, NaT] + + expected = DataFrame( + {"one": [0, np.nan], "two": Series([ts, NaT], dtype="datetime64[ns, UTC]")} + ) + tm.assert_frame_equal(df, expected) + + data = [{"one": 0, "two": ts}] + df = orig.copy() + df.loc[1, :] = [np.nan, NaT] + tm.assert_frame_equal(df, expected) + + +def test_20476(): + mi = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]]) + df = DataFrame(-1, index=range(3), columns=mi) + filler = DataFrame([[1, 2, 3.0]] * 3, index=range(3), columns=["a", "b", "c"]) + df["A"] = filler + + expected = DataFrame( + { + 0: [1, 1, 1], + 1: [2, 2, 2], + 2: [3.0, 3.0, 3.0], + 3: [-1, -1, -1], + 4: [-1, -1, -1], + 5: [-1, -1, -1], + } + ) + expected.columns = mi + exp_dtypes = Series( + [np.dtype(np.int64)] * 2 + [np.dtype(np.float64)] + [np.dtype(np.int64)] * 3, + index=mi, + ) + tm.assert_series_equal(df.dtypes, exp_dtypes) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_delitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_delitem.py new file mode 100644 index 00000000..daec991b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_delitem.py @@ -0,0 +1,60 @@ +import re + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, +) + + +class TestDataFrameDelItem: + def test_delitem(self, float_frame): + del float_frame["A"] + assert "A" not in float_frame + + def test_delitem_multiindex(self): + midx = MultiIndex.from_product([["A", "B"], [1, 2]]) + df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), columns=midx) + assert len(df.columns) == 4 + assert ("A",) in df.columns + assert "A" in df.columns + + result = df["A"] + assert isinstance(result, DataFrame) + del df["A"] + + assert len(df.columns) == 2 + + # A still in the levels, BUT get a KeyError if trying + # to delete + assert ("A",) not in df.columns + with pytest.raises(KeyError, match=re.escape("('A',)")): + del df[("A",)] + + # behavior of dropped/deleted MultiIndex levels changed from + # GH 2770 to GH 19027: MultiIndex no longer '.__contains__' + # levels which are dropped/deleted + assert "A" not in df.columns + with pytest.raises(KeyError, match=re.escape("('A',)")): + del df["A"] + + def test_delitem_corner(self, float_frame): + f = float_frame.copy() + del f["D"] + assert len(f.columns) == 3 + with pytest.raises(KeyError, match=r"^'D'$"): + del f["D"] + del f["B"] + assert len(f.columns) == 2 + + def test_delitem_col_still_multiindex(self): + arrays = [["a", "b", "c", "top"], ["", "", "", "OD"], ["", "", "", "wx"]] + + tuples = sorted(zip(*arrays)) + index = MultiIndex.from_tuples(tuples) + + df = DataFrame(np.random.default_rng(2).standard_normal((3, 4)), columns=index) + del df[("a", "", "")] + assert isinstance(df.columns, MultiIndex) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get.py new file mode 100644 index 00000000..5f2651ee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get.py @@ -0,0 +1,27 @@ +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +class TestGet: + def test_get(self, float_frame): + b = float_frame.get("B") + tm.assert_series_equal(b, float_frame["B"]) + + assert float_frame.get("foo") is None + tm.assert_series_equal( + float_frame.get("foo", float_frame["B"]), float_frame["B"] + ) + + @pytest.mark.parametrize( + "df", + [ + DataFrame(), + DataFrame(columns=list("AB")), + DataFrame(columns=list("AB"), index=range(3)), + ], + ) + def test_get_none(self, df): + # see gh-5652 + assert df.get(None) is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get_value.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get_value.py new file mode 100644 index 00000000..65a1c64a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_get_value.py @@ -0,0 +1,22 @@ +import pytest + +from pandas import ( + DataFrame, + MultiIndex, +) + + +class TestGetValue: + def test_get_set_value_no_partial_indexing(self): + # partial w/ MultiIndex raise exception + index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)]) + df = DataFrame(index=index, columns=range(4)) + with pytest.raises(KeyError, match=r"^0$"): + df._get_value(0, 1) + + def test_get_value(self, float_frame): + for idx in float_frame.index: + for col in float_frame.columns: + result = float_frame._get_value(idx, col) + expected = float_frame[col][idx] + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_getitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_getitem.py new file mode 100644 index 00000000..9d9324f5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_getitem.py @@ -0,0 +1,478 @@ +import re + +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + DataFrame, + DateOffset, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + concat, + date_range, + get_dummies, + period_range, +) +import pandas._testing as tm +from pandas.core.arrays import SparseArray + + +class TestGetitem: + def test_getitem_unused_level_raises(self): + # GH#20410 + mi = MultiIndex( + levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]], + codes=[[1, 0], [1, 0]], + ) + df = DataFrame(-1, index=range(3), columns=mi) + + with pytest.raises(KeyError, match="notevenone"): + df["notevenone"] + + def test_getitem_periodindex(self): + rng = period_range("1/1/2000", periods=5) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)), columns=rng) + + ts = df[rng[0]] + tm.assert_series_equal(ts, df.iloc[:, 0]) + + # GH#1211; smoketest unrelated to the rest of this test + repr(df) + + ts = df["1/1/2000"] + tm.assert_series_equal(ts, df.iloc[:, 0]) + + def test_getitem_list_of_labels_categoricalindex_cols(self): + # GH#16115 + cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")]) + + expected = DataFrame([[1, 0], [0, 1]], dtype="bool", index=[0, 1], columns=cats) + dummies = get_dummies(cats) + result = dummies[list(dummies.columns)] + tm.assert_frame_equal(result, expected) + + def test_getitem_sparse_column_return_type_and_dtype(self): + # https://github.com/pandas-dev/pandas/issues/23559 + data = SparseArray([0, 1]) + df = DataFrame({"A": data}) + expected = Series(data, name="A") + result = df["A"] + tm.assert_series_equal(result, expected) + + # Also check iloc and loc while we're here + result = df.iloc[:, 0] + tm.assert_series_equal(result, expected) + + result = df.loc[:, "A"] + tm.assert_series_equal(result, expected) + + def test_getitem_string_columns(self): + # GH#46185 + df = DataFrame([[1, 2]], columns=Index(["A", "B"], dtype="string")) + result = df.A + expected = df["A"] + tm.assert_series_equal(result, expected) + + +class TestGetitemListLike: + def test_getitem_list_missing_key(self): + # GH#13822, incorrect error string with non-unique columns when missing + # column is accessed + df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]}) + df.columns = ["x", "x", "z"] + + # Check that we get the correct value in the KeyError + with pytest.raises(KeyError, match=r"\['y'\] not in index"): + df[["x", "y", "z"]] + + def test_getitem_list_duplicates(self): + # GH#1943 + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), columns=list("AABC") + ) + df.columns.name = "foo" + + result = df[["B", "C"]] + assert result.columns.name == "foo" + + expected = df.iloc[:, 2:] + tm.assert_frame_equal(result, expected) + + def test_getitem_dupe_cols(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"]) + msg = "\"None of [Index(['baf'], dtype='object')] are in the [columns]\"" + with pytest.raises(KeyError, match=re.escape(msg)): + df[["baf"]] + + @pytest.mark.parametrize( + "idx_type", + [ + list, + iter, + Index, + set, + lambda keys: dict(zip(keys, range(len(keys)))), + lambda keys: dict(zip(keys, range(len(keys)))).keys(), + ], + ids=["list", "iter", "Index", "set", "dict", "dict_keys"], + ) + @pytest.mark.parametrize("levels", [1, 2]) + def test_getitem_listlike(self, idx_type, levels, float_frame): + # GH#21294 + + if levels == 1: + frame, missing = float_frame, "food" + else: + # MultiIndex columns + frame = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), + columns=Index( + [("foo", "bar"), ("baz", "qux"), ("peek", "aboo")], + name=("sth", "sth2"), + ), + ) + missing = ("good", "food") + + keys = [frame.columns[1], frame.columns[0]] + idx = idx_type(keys) + idx_check = list(idx_type(keys)) + + if isinstance(idx, (set, dict)): + with pytest.raises(TypeError, match="as an indexer is not supported"): + frame[idx] + + return + else: + result = frame[idx] + + expected = frame.loc[:, idx_check] + expected.columns.names = frame.columns.names + + tm.assert_frame_equal(result, expected) + + idx = idx_type(keys + [missing]) + with pytest.raises(KeyError, match="not in index"): + frame[idx] + + def test_getitem_iloc_generator(self): + # GH#39614 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + result = df.iloc[indexer] + expected = DataFrame({"a": [2, 3], "b": [5, 6]}, index=[1, 2]) + tm.assert_frame_equal(result, expected) + + def test_getitem_iloc_two_dimensional_generator(self): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + result = df.iloc[indexer, 1] + expected = Series([5, 6], name="b", index=[1, 2]) + tm.assert_series_equal(result, expected) + + def test_getitem_iloc_dateoffset_days(self): + # GH 46671 + df = DataFrame( + list(range(10)), + index=date_range("01-01-2022", periods=10, freq=DateOffset(days=1)), + ) + result = df.loc["2022-01-01":"2022-01-03"] + expected = DataFrame( + [0, 1, 2], + index=DatetimeIndex( + ["2022-01-01", "2022-01-02", "2022-01-03"], + dtype="datetime64[ns]", + freq=DateOffset(days=1), + ), + ) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + list(range(10)), + index=date_range( + "01-01-2022", periods=10, freq=DateOffset(days=1, hours=2) + ), + ) + result = df.loc["2022-01-01":"2022-01-03"] + expected = DataFrame( + [0, 1, 2], + index=DatetimeIndex( + ["2022-01-01 00:00:00", "2022-01-02 02:00:00", "2022-01-03 04:00:00"], + dtype="datetime64[ns]", + freq=DateOffset(days=1, hours=2), + ), + ) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + list(range(10)), + index=date_range("01-01-2022", periods=10, freq=DateOffset(minutes=3)), + ) + result = df.loc["2022-01-01":"2022-01-03"] + tm.assert_frame_equal(result, df) + + +class TestGetitemCallable: + def test_getitem_callable(self, float_frame): + # GH#12533 + result = float_frame[lambda x: "A"] + expected = float_frame.loc[:, "A"] + tm.assert_series_equal(result, expected) + + result = float_frame[lambda x: ["A", "B"]] + expected = float_frame.loc[:, ["A", "B"]] + tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]]) + + df = float_frame[:3] + result = df[lambda x: [True, False, True]] + expected = float_frame.iloc[[0, 2], :] + tm.assert_frame_equal(result, expected) + + def test_loc_multiindex_columns_one_level(self): + # GH#29749 + df = DataFrame([[1, 2]], columns=[["a", "b"]]) + expected = DataFrame([1], columns=[["a"]]) + + result = df["a"] + tm.assert_frame_equal(result, expected) + + result = df.loc[:, "a"] + tm.assert_frame_equal(result, expected) + + +class TestGetitemBooleanMask: + def test_getitem_bool_mask_categorical_index(self): + df3 = DataFrame( + { + "A": np.arange(6, dtype="int64"), + }, + index=CategoricalIndex( + [1, 1, 2, 1, 3, 2], + dtype=CategoricalDtype([3, 2, 1], ordered=True), + name="B", + ), + ) + df4 = DataFrame( + { + "A": np.arange(6, dtype="int64"), + }, + index=CategoricalIndex( + [1, 1, 2, 1, 3, 2], + dtype=CategoricalDtype([3, 2, 1], ordered=False), + name="B", + ), + ) + + result = df3[df3.index == "a"] + expected = df3.iloc[[]] + tm.assert_frame_equal(result, expected) + + result = df4[df4.index == "a"] + expected = df4.iloc[[]] + tm.assert_frame_equal(result, expected) + + result = df3[df3.index == 1] + expected = df3.iloc[[0, 1, 3]] + tm.assert_frame_equal(result, expected) + + result = df4[df4.index == 1] + expected = df4.iloc[[0, 1, 3]] + tm.assert_frame_equal(result, expected) + + # since we have an ordered categorical + + # CategoricalIndex([1, 1, 2, 1, 3, 2], + # categories=[3, 2, 1], + # ordered=True, + # name='B') + result = df3[df3.index < 2] + expected = df3.iloc[[4]] + tm.assert_frame_equal(result, expected) + + result = df3[df3.index > 1] + expected = df3.iloc[[]] + tm.assert_frame_equal(result, expected) + + # unordered + # cannot be compared + + # CategoricalIndex([1, 1, 2, 1, 3, 2], + # categories=[3, 2, 1], + # ordered=False, + # name='B') + msg = "Unordered Categoricals can only compare equality or not" + with pytest.raises(TypeError, match=msg): + df4[df4.index < 2] + with pytest.raises(TypeError, match=msg): + df4[df4.index > 1] + + @pytest.mark.parametrize( + "data1,data2,expected_data", + ( + ( + [[1, 2], [3, 4]], + [[0.5, 6], [7, 8]], + [[np.nan, 3.0], [np.nan, 4.0], [np.nan, 7.0], [6.0, 8.0]], + ), + ( + [[1, 2], [3, 4]], + [[5, 6], [7, 8]], + [[np.nan, 3.0], [np.nan, 4.0], [5, 7], [6, 8]], + ), + ), + ) + def test_getitem_bool_mask_duplicate_columns_mixed_dtypes( + self, + data1, + data2, + expected_data, + ): + # GH#31954 + + df1 = DataFrame(np.array(data1)) + df2 = DataFrame(np.array(data2)) + df = concat([df1, df2], axis=1) + + result = df[df > 2] + + exdict = {i: np.array(col) for i, col in enumerate(expected_data)} + expected = DataFrame(exdict).rename(columns={2: 0, 3: 1}) + tm.assert_frame_equal(result, expected) + + @pytest.fixture + def df_dup_cols(self): + dups = ["A", "A", "C", "D"] + df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64") + return df + + def test_getitem_boolean_frame_unaligned_with_duplicate_columns(self, df_dup_cols): + # `df.A > 6` is a DataFrame with a different shape from df + + # boolean with the duplicate raises + df = df_dup_cols + msg = "cannot reindex on an axis with duplicate labels" + with pytest.raises(ValueError, match=msg): + df[df.A > 6] + + def test_getitem_boolean_series_with_duplicate_columns(self, df_dup_cols): + # boolean indexing + # GH#4879 + df = DataFrame( + np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64" + ) + expected = df[df.C > 6] + expected.columns = df_dup_cols.columns + + df = df_dup_cols + result = df[df.C > 6] + + tm.assert_frame_equal(result, expected) + result.dtypes + str(result) + + def test_getitem_boolean_frame_with_duplicate_columns(self, df_dup_cols): + # where + df = DataFrame( + np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64" + ) + # `df > 6` is a DataFrame with the same shape+alignment as df + expected = df[df > 6] + expected.columns = df_dup_cols.columns + + df = df_dup_cols + result = df[df > 6] + + tm.assert_frame_equal(result, expected) + result.dtypes + str(result) + + def test_getitem_empty_frame_with_boolean(self): + # Test for issue GH#11859 + + df = DataFrame() + df2 = df[df > 0] + tm.assert_frame_equal(df, df2) + + def test_getitem_returns_view_when_column_is_unique_in_df( + self, using_copy_on_write + ): + # GH#45316 + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"]) + df_orig = df.copy() + view = df["b"] + view.loc[:] = 100 + if using_copy_on_write: + expected = df_orig + else: + expected = DataFrame([[1, 2, 100], [4, 5, 100]], columns=["a", "a", "b"]) + tm.assert_frame_equal(df, expected) + + def test_getitem_frozenset_unique_in_column(self): + # GH#41062 + df = DataFrame([[1, 2, 3, 4]], columns=[frozenset(["KEY"]), "B", "C", "C"]) + result = df[frozenset(["KEY"])] + expected = Series([1], name=frozenset(["KEY"])) + tm.assert_series_equal(result, expected) + + +class TestGetitemSlice: + def test_getitem_slice_float64(self, frame_or_series): + values = np.arange(10.0, 50.0, 2) + index = Index(values) + + start, end = values[[5, 15]] + + data = np.random.default_rng(2).standard_normal((20, 3)) + if frame_or_series is not DataFrame: + data = data[:, 0] + + obj = frame_or_series(data, index=index) + + result = obj[start:end] + expected = obj.iloc[5:16] + tm.assert_equal(result, expected) + + result = obj.loc[start:end] + tm.assert_equal(result, expected) + + def test_getitem_datetime_slice(self): + # GH#43223 + df = DataFrame( + {"a": 0}, + index=DatetimeIndex( + [ + "11.01.2011 22:00", + "11.01.2011 23:00", + "12.01.2011 00:00", + "2011-01-13 00:00", + ] + ), + ) + with pytest.raises( + KeyError, match="Value based partial slicing on non-monotonic" + ): + df["2011-01-01":"2011-11-01"] + + def test_getitem_slice_same_dim_only_one_axis(self): + # GH#54622 + df = DataFrame(np.random.default_rng(2).standard_normal((10, 8))) + result = df.iloc[(slice(None, None, 2),)] + assert result.shape == (5, 8) + expected = df.iloc[slice(None, None, 2), slice(None)] + tm.assert_frame_equal(result, expected) + + +class TestGetitemDeprecatedIndexers: + @pytest.mark.parametrize("key", [{"a", "b"}, {"a": "a"}]) + def test_getitem_dict_and_set_deprecated(self, key): + # GH#42825 enforced in 2.0 + df = DataFrame( + [[1, 2], [3, 4]], columns=MultiIndex.from_tuples([("a", 1), ("b", 2)]) + ) + with pytest.raises(TypeError, match="as an indexer is not supported"): + df[key] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_indexing.py new file mode 100644 index 00000000..51b0a0a1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_indexing.py @@ -0,0 +1,1973 @@ +from collections import namedtuple +from datetime import ( + datetime, + timedelta, +) +from decimal import Decimal +import re + +import numpy as np +import pytest + +from pandas._libs import iNaT +from pandas.errors import ( + InvalidIndexError, + PerformanceWarning, + SettingWithCopyError, +) +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + date_range, + isna, + notna, + to_datetime, +) +import pandas._testing as tm + +# We pass through a TypeError raised by numpy +_slice_msg = "slice indices must be integers or None or have an __index__ method" + + +class TestDataFrameIndexing: + def test_getitem(self, float_frame): + # Slicing + sl = float_frame[:20] + assert len(sl.index) == 20 + + # Column access + for _, series in sl.items(): + assert len(series.index) == 20 + assert tm.equalContents(series.index, sl.index) + + for key, _ in float_frame._series.items(): + assert float_frame[key] is not None + + assert "random" not in float_frame + with pytest.raises(KeyError, match="random"): + float_frame["random"] + + def test_getitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype): + # GH51053 + dtype = any_numeric_dtype + idx = Index([1, 0, 1], dtype=dtype) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx) + result = df[1] + expected = DataFrame([[1, 3], [4, 6]], columns=Index([1, 1], dtype=dtype)) + tm.assert_frame_equal(result, expected, check_exact=True) + + def test_getitem2(self, float_frame): + df = float_frame.copy() + df["$10"] = np.random.default_rng(2).standard_normal(len(df)) + + ad = np.random.default_rng(2).standard_normal(len(df)) + df["@awesome_domain"] = ad + + with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")): + df.__getitem__('df["$10"]') + + res = df["@awesome_domain"] + tm.assert_numpy_array_equal(ad, res.values) + + def test_setitem_numeric_should_not_fallback_to_positional(self, any_numeric_dtype): + # GH51053 + dtype = any_numeric_dtype + idx = Index([1, 0, 1], dtype=dtype) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=idx) + df[1] = 10 + expected = DataFrame([[10, 2, 10], [10, 5, 10]], columns=idx) + tm.assert_frame_equal(df, expected, check_exact=True) + + def test_setitem_list(self, float_frame): + float_frame["E"] = "foo" + data = float_frame[["A", "B"]] + float_frame[["B", "A"]] = data + + tm.assert_series_equal(float_frame["B"], data["A"], check_names=False) + tm.assert_series_equal(float_frame["A"], data["B"], check_names=False) + + msg = "Columns must be same length as key" + with pytest.raises(ValueError, match=msg): + data[["A"]] = float_frame[["A", "B"]] + newcolumndata = range(len(data.index) - 1) + msg = ( + rf"Length of values \({len(newcolumndata)}\) " + rf"does not match length of index \({len(data)}\)" + ) + with pytest.raises(ValueError, match=msg): + data["A"] = newcolumndata + + def test_setitem_list2(self): + df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=int) + df.loc[1, ["tt1", "tt2"]] = [1, 2] + + result = df.loc[df.index[1], ["tt1", "tt2"]] + expected = Series([1, 2], df.columns, dtype=int, name=1) + tm.assert_series_equal(result, expected) + + df["tt1"] = df["tt2"] = "0" + df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"] + result = df.loc[df.index[1], ["tt1", "tt2"]] + expected = Series(["1", "2"], df.columns, name=1) + tm.assert_series_equal(result, expected) + + def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame): + # boolean indexing + d = datetime_frame.index[10] + indexer = datetime_frame.index > d + indexer_obj = indexer.astype(object) + + subindex = datetime_frame.index[indexer] + subframe = datetime_frame[indexer] + + tm.assert_index_equal(subindex, subframe.index) + with pytest.raises(ValueError, match="Item wrong length"): + datetime_frame[indexer[:-1]] + + subframe_obj = datetime_frame[indexer_obj] + tm.assert_frame_equal(subframe_obj, subframe) + + with pytest.raises(ValueError, match="Boolean array expected"): + datetime_frame[datetime_frame] + + # test that Series work + indexer_obj = Series(indexer_obj, datetime_frame.index) + + subframe_obj = datetime_frame[indexer_obj] + tm.assert_frame_equal(subframe_obj, subframe) + + # test that Series indexers reindex + # we are producing a warning that since the passed boolean + # key is not the same as the given index, we will reindex + # not sure this is really necessary + with tm.assert_produces_warning(UserWarning): + indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1]) + subframe_obj = datetime_frame[indexer_obj] + tm.assert_frame_equal(subframe_obj, subframe) + + # test df[df > 0] + for df in [ + datetime_frame, + mixed_float_frame, + mixed_int_frame, + ]: + data = df._get_numeric_data() + bif = df[df > 0] + bifw = DataFrame( + {c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns}, + index=data.index, + columns=data.columns, + ) + + # add back other columns to compare + for c in df.columns: + if c not in bifw: + bifw[c] = df[c] + bifw = bifw.reindex(columns=df.columns) + + tm.assert_frame_equal(bif, bifw, check_dtype=False) + for c in df.columns: + if bif[c].dtype != bifw[c].dtype: + assert bif[c].dtype == df[c].dtype + + def test_getitem_boolean_casting(self, datetime_frame): + # don't upcast if we don't need to + df = datetime_frame.copy() + df["E"] = 1 + df["E"] = df["E"].astype("int32") + df["E1"] = df["E"].copy() + df["F"] = 1 + df["F"] = df["F"].astype("int64") + df["F1"] = df["F"].copy() + + casted = df[df > 0] + result = casted.dtypes + expected = Series( + [np.dtype("float64")] * 4 + + [np.dtype("int32")] * 2 + + [np.dtype("int64")] * 2, + index=["A", "B", "C", "D", "E", "E1", "F", "F1"], + ) + tm.assert_series_equal(result, expected) + + # int block splitting + df.loc[df.index[1:3], ["E1", "F1"]] = 0 + casted = df[df > 0] + result = casted.dtypes + expected = Series( + [np.dtype("float64")] * 4 + + [np.dtype("int32")] + + [np.dtype("float64")] + + [np.dtype("int64")] + + [np.dtype("float64")], + index=["A", "B", "C", "D", "E", "E1", "F", "F1"], + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "lst", [[True, False, True], [True, True, True], [False, False, False]] + ) + def test_getitem_boolean_list(self, lst): + df = DataFrame(np.arange(12).reshape(3, 4)) + result = df[lst] + expected = df.loc[df.index[lst]] + tm.assert_frame_equal(result, expected) + + def test_getitem_boolean_iadd(self): + arr = np.random.default_rng(2).standard_normal((5, 5)) + + df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"]) + + df[df < 0] += 1 + arr[arr < 0] += 1 + + tm.assert_almost_equal(df.values, arr) + + def test_boolean_index_empty_corner(self): + # #2096 + blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([])) + + # both of these should succeed trivially + k = np.array([], bool) + + blah[k] + blah[k] = 0 + + def test_getitem_ix_mixed_integer(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), + index=[1, 10, "C", "E"], + columns=[1, 2, 3], + ) + + result = df.iloc[:-1] + expected = df.loc[df.index[:-1]] + tm.assert_frame_equal(result, expected) + + result = df.loc[[1, 10]] + expected = df.loc[Index([1, 10])] + tm.assert_frame_equal(result, expected) + + def test_getitem_ix_mixed_integer2(self): + # 11320 + df = DataFrame( + { + "rna": (1.5, 2.2, 3.2, 4.5), + -1000: [11, 21, 36, 40], + 0: [10, 22, 43, 34], + 1000: [0, 10, 20, 30], + }, + columns=["rna", -1000, 0, 1000], + ) + result = df[[1000]] + expected = df.iloc[:, [3]] + tm.assert_frame_equal(result, expected) + result = df[[-1000]] + expected = df.iloc[:, [1]] + tm.assert_frame_equal(result, expected) + + def test_getattr(self, float_frame): + tm.assert_series_equal(float_frame.A, float_frame["A"]) + msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'" + with pytest.raises(AttributeError, match=msg): + float_frame.NONEXISTENT_NAME + + def test_setattr_column(self): + df = DataFrame({"foobar": 1}, index=range(10)) + + df.foobar = 5 + assert (df.foobar == 5).all() + + def test_setitem(self, float_frame, using_copy_on_write): + # not sure what else to do here + series = float_frame["A"][::2] + float_frame["col5"] = series + assert "col5" in float_frame + + assert len(series) == 15 + assert len(float_frame) == 30 + + exp = np.ravel(np.column_stack((series.values, [np.nan] * 15))) + exp = Series(exp, index=float_frame.index, name="col5") + tm.assert_series_equal(float_frame["col5"], exp) + + series = float_frame["A"] + float_frame["col6"] = series + tm.assert_series_equal(series, float_frame["col6"], check_names=False) + + # set ndarray + arr = np.random.default_rng(2).standard_normal(len(float_frame)) + float_frame["col9"] = arr + assert (float_frame["col9"] == arr).all() + + float_frame["col7"] = 5 + assert (float_frame["col7"] == 5).all() + + float_frame["col0"] = 3.14 + assert (float_frame["col0"] == 3.14).all() + + float_frame["col8"] = "foo" + assert (float_frame["col8"] == "foo").all() + + # this is partially a view (e.g. some blocks are view) + # so raise/warn + smaller = float_frame[:2] + + msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame" + if using_copy_on_write: + # With CoW, adding a new column doesn't raise a warning + smaller["col10"] = ["1", "2"] + else: + with pytest.raises(SettingWithCopyError, match=msg): + smaller["col10"] = ["1", "2"] + + assert smaller["col10"].dtype == np.object_ + assert (smaller["col10"] == ["1", "2"]).all() + + def test_setitem2(self): + # dtype changing GH4204 + df = DataFrame([[0, 0]]) + df.iloc[0] = np.nan + expected = DataFrame([[np.nan, np.nan]]) + tm.assert_frame_equal(df, expected) + + df = DataFrame([[0, 0]]) + df.loc[0] = np.nan + tm.assert_frame_equal(df, expected) + + def test_setitem_boolean(self, float_frame): + df = float_frame.copy() + values = float_frame.values.copy() + + df[df["A"] > 0] = 4 + values[values[:, 0] > 0] = 4 + tm.assert_almost_equal(df.values, values) + + # test that column reindexing works + series = df["A"] == 4 + series = series.reindex(df.index[::-1]) + df[series] = 1 + values[values[:, 0] == 4] = 1 + tm.assert_almost_equal(df.values, values) + + df[df > 0] = 5 + values[values > 0] = 5 + tm.assert_almost_equal(df.values, values) + + df[df == 5] = 0 + values[values == 5] = 0 + tm.assert_almost_equal(df.values, values) + + # a df that needs alignment first + df[df[:-1] < 0] = 2 + np.putmask(values[:-1], values[:-1] < 0, 2) + tm.assert_almost_equal(df.values, values) + + # indexed with same shape but rows-reversed df + df[df[::-1] == 2] = 3 + values[values == 2] = 3 + tm.assert_almost_equal(df.values, values) + + msg = "Must pass DataFrame or 2-d ndarray with boolean values only" + with pytest.raises(TypeError, match=msg): + df[df * 0] = 2 + + # index with DataFrame + df_orig = df.copy() + mask = df > np.abs(df) + df[df > np.abs(df)] = np.nan + values = df_orig.values.copy() + values[mask.values] = np.nan + expected = DataFrame(values, index=df_orig.index, columns=df_orig.columns) + tm.assert_frame_equal(df, expected) + + # set from DataFrame + df[df > np.abs(df)] = df * 2 + np.putmask(values, mask.values, df.values * 2) + expected = DataFrame(values, index=df_orig.index, columns=df_orig.columns) + tm.assert_frame_equal(df, expected) + + def test_setitem_cast(self, float_frame): + float_frame["D"] = float_frame["D"].astype("i8") + assert float_frame["D"].dtype == np.int64 + + # #669, should not cast? + # this is now set to int64, which means a replacement of the column to + # the value dtype (and nothing to do with the existing dtype) + float_frame["B"] = 0 + assert float_frame["B"].dtype == np.int64 + + # cast if pass array of course + float_frame["B"] = np.arange(len(float_frame)) + assert issubclass(float_frame["B"].dtype.type, np.integer) + + float_frame["foo"] = "bar" + float_frame["foo"] = 0 + assert float_frame["foo"].dtype == np.int64 + + float_frame["foo"] = "bar" + float_frame["foo"] = 2.5 + assert float_frame["foo"].dtype == np.float64 + + float_frame["something"] = 0 + assert float_frame["something"].dtype == np.int64 + float_frame["something"] = 2 + assert float_frame["something"].dtype == np.int64 + float_frame["something"] = 2.5 + assert float_frame["something"].dtype == np.float64 + + def test_setitem_corner(self, float_frame): + # corner case + df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3)) + del df["B"] + df["B"] = [1.0, 2.0, 3.0] + assert "B" in df + assert len(df.columns) == 2 + + df["A"] = "beginning" + df["E"] = "foo" + df["D"] = "bar" + df[datetime.now()] = "date" + df[datetime.now()] = 5.0 + + # what to do when empty frame with index + dm = DataFrame(index=float_frame.index) + dm["A"] = "foo" + dm["B"] = "bar" + assert len(dm.columns) == 2 + assert dm.values.dtype == np.object_ + + # upcast + dm["C"] = 1 + assert dm["C"].dtype == np.int64 + + dm["E"] = 1.0 + assert dm["E"].dtype == np.float64 + + # set existing column + dm["A"] = "bar" + assert "bar" == dm["A"].iloc[0] + + dm = DataFrame(index=np.arange(3)) + dm["A"] = 1 + dm["foo"] = "bar" + del dm["foo"] + dm["foo"] = "bar" + assert dm["foo"].dtype == np.object_ + + dm["coercible"] = ["1", "2", "3"] + assert dm["coercible"].dtype == np.object_ + + def test_setitem_corner2(self): + data = { + "title": ["foobar", "bar", "foobar"] + ["foobar"] * 17, + "cruft": np.random.default_rng(2).random(20), + } + + df = DataFrame(data) + ix = df[df["title"] == "bar"].index + + df.loc[ix, ["title"]] = "foobar" + df.loc[ix, ["cruft"]] = 0 + + assert df.loc[1, "title"] == "foobar" + assert df.loc[1, "cruft"] == 0 + + def test_setitem_ambig(self): + # Difficulties with mixed-type data + # Created as float type + dm = DataFrame(index=range(3), columns=range(3)) + + coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3)) + uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3)) + + dm[0] = np.ones(3) + assert len(dm.columns) == 3 + + dm[1] = coercable_series + assert len(dm.columns) == 3 + + dm[2] = uncoercable_series + assert len(dm.columns) == 3 + assert dm[2].dtype == np.object_ + + def test_setitem_None(self, float_frame): + # GH #766 + float_frame[None] = float_frame["A"] + tm.assert_series_equal( + float_frame.iloc[:, -1], float_frame["A"], check_names=False + ) + tm.assert_series_equal( + float_frame.loc[:, None], float_frame["A"], check_names=False + ) + tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False) + repr(float_frame) + + def test_loc_setitem_boolean_mask_allfalse(self): + # GH 9596 + df = DataFrame( + {"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]} + ) + + result = df.copy() + result.loc[result.b.isna(), "a"] = result.a + tm.assert_frame_equal(result, df) + + def test_getitem_fancy_slice_integers_step(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + + # this is OK + df.iloc[:8:2] + df.iloc[:8:2] = np.nan + assert isna(df.iloc[:8:2]).values.all() + + def test_getitem_setitem_integer_slice_keyerrors(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 5)), index=range(0, 20, 2) + ) + + # this is OK + cp = df.copy() + cp.iloc[4:10] = 0 + assert (cp.iloc[4:10] == 0).values.all() + + # so is this + cp = df.copy() + cp.iloc[3:11] = 0 + assert (cp.iloc[3:11] == 0).values.all() + + result = df.iloc[2:6] + result2 = df.loc[3:11] + expected = df.reindex([4, 6, 8, 10]) + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + # non-monotonic, raise KeyError + df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]] + with pytest.raises(KeyError, match=r"^3$"): + df2.loc[3:11] + with pytest.raises(KeyError, match=r"^3$"): + df2.loc[3:11] = 0 + + @td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view + def test_fancy_getitem_slice_mixed( + self, float_frame, float_string_frame, using_copy_on_write + ): + sliced = float_string_frame.iloc[:, -3:] + assert sliced["D"].dtype == np.float64 + + # get view with single block + # setting it triggers setting with copy + original = float_frame.copy() + sliced = float_frame.iloc[:, -3:] + + assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values) + + sliced.loc[:, "C"] = 4.0 + if not using_copy_on_write: + assert (float_frame["C"] == 4).all() + + # with the enforcement of GH#45333 in 2.0, this remains a view + np.shares_memory(sliced["C"]._values, float_frame["C"]._values) + else: + tm.assert_frame_equal(float_frame, original) + + def test_getitem_setitem_non_ix_labels(self): + df = tm.makeTimeDataFrame() + + start, end = df.index[[5, 10]] + + result = df.loc[start:end] + result2 = df[start:end] + expected = df[5:11] + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + result = df.copy() + result.loc[start:end] = 0 + result2 = df.copy() + result2[start:end] = 0 + expected = df.copy() + expected[5:11] = 0 + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + def test_ix_multi_take(self): + df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) + rs = df.loc[df.index == 0, :] + xp = df.reindex([0]) + tm.assert_frame_equal(rs, xp) + + # GH#1321 + df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) + rs = df.loc[df.index == 0, df.columns == 1] + xp = df.reindex(index=[0], columns=[1]) + tm.assert_frame_equal(rs, xp) + + def test_getitem_fancy_scalar(self, float_frame): + f = float_frame + ix = f.loc + + # individual value + for col in f.columns: + ts = f[col] + for idx in f.index[::5]: + assert ix[idx, col] == ts[idx] + + @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values + def test_setitem_fancy_scalar(self, float_frame): + f = float_frame + expected = float_frame.copy() + ix = f.loc + + # individual value + for j, col in enumerate(f.columns): + f[col] + for idx in f.index[::5]: + i = f.index.get_loc(idx) + val = np.random.default_rng(2).standard_normal() + expected.iloc[i, j] = val + + ix[idx, col] = val + tm.assert_frame_equal(f, expected) + + def test_getitem_fancy_boolean(self, float_frame): + f = float_frame + ix = f.loc + + expected = f.reindex(columns=["B", "D"]) + result = ix[:, [False, True, False, True]] + tm.assert_frame_equal(result, expected) + + expected = f.reindex(index=f.index[5:10], columns=["B", "D"]) + result = ix[f.index[5:10], [False, True, False, True]] + tm.assert_frame_equal(result, expected) + + boolvec = f.index > f.index[7] + expected = f.reindex(index=f.index[boolvec]) + result = ix[boolvec] + tm.assert_frame_equal(result, expected) + result = ix[boolvec, :] + tm.assert_frame_equal(result, expected) + + result = ix[boolvec, f.columns[2:]] + expected = f.reindex(index=f.index[boolvec], columns=["C", "D"]) + tm.assert_frame_equal(result, expected) + + @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values + def test_setitem_fancy_boolean(self, float_frame): + # from 2d, set with booleans + frame = float_frame.copy() + expected = float_frame.copy() + values = expected.values.copy() + + mask = frame["A"] > 0 + frame.loc[mask] = 0.0 + values[mask.values] = 0.0 + expected = DataFrame(values, index=expected.index, columns=expected.columns) + tm.assert_frame_equal(frame, expected) + + frame = float_frame.copy() + expected = float_frame.copy() + values = expected.values.copy() + frame.loc[mask, ["A", "B"]] = 0.0 + values[mask.values, :2] = 0.0 + expected = DataFrame(values, index=expected.index, columns=expected.columns) + tm.assert_frame_equal(frame, expected) + + def test_getitem_fancy_ints(self, float_frame): + result = float_frame.iloc[[1, 4, 7]] + expected = float_frame.loc[float_frame.index[[1, 4, 7]]] + tm.assert_frame_equal(result, expected) + + result = float_frame.iloc[:, [2, 0, 1]] + expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]] + tm.assert_frame_equal(result, expected) + + def test_getitem_setitem_boolean_misaligned(self, float_frame): + # boolean index misaligned labels + mask = float_frame["A"][::-1] > 1 + + result = float_frame.loc[mask] + expected = float_frame.loc[mask[::-1]] + tm.assert_frame_equal(result, expected) + + cp = float_frame.copy() + expected = float_frame.copy() + cp.loc[mask] = 0 + expected.loc[mask] = 0 + tm.assert_frame_equal(cp, expected) + + def test_getitem_setitem_boolean_multi(self): + df = DataFrame(np.random.default_rng(2).standard_normal((3, 2))) + + # get + k1 = np.array([True, False, True]) + k2 = np.array([False, True]) + result = df.loc[k1, k2] + expected = df.loc[[0, 2], [1]] + tm.assert_frame_equal(result, expected) + + expected = df.copy() + df.loc[np.array([True, False, True]), np.array([False, True])] = 5 + expected.loc[[0, 2], [1]] = 5 + tm.assert_frame_equal(df, expected) + + def test_getitem_setitem_float_labels(self, using_array_manager): + index = Index([1.5, 2, 3, 4, 5]) + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)), index=index) + + result = df.loc[1.5:4] + expected = df.reindex([1.5, 2, 3, 4]) + tm.assert_frame_equal(result, expected) + assert len(result) == 4 + + result = df.loc[4:5] + expected = df.reindex([4, 5]) # reindex with int + tm.assert_frame_equal(result, expected, check_index_type=False) + assert len(result) == 2 + + result = df.loc[4:5] + expected = df.reindex([4.0, 5.0]) # reindex with float + tm.assert_frame_equal(result, expected) + assert len(result) == 2 + + # loc_float changes this to work properly + result = df.loc[1:2] + expected = df.iloc[0:2] + tm.assert_frame_equal(result, expected) + + df.loc[1:2] = 0 + msg = r"The behavior of obj\[i:j\] with a float-dtype index" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df[1:2] + assert (result == 0).all().all() + + # #2727 + index = Index([1.0, 2.5, 3.5, 4.5, 5.0]) + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)), index=index) + + # positional slicing only via iloc! + msg = ( + "cannot do positional indexing on Index with " + r"these indexers \[1.0\] of type float" + ) + with pytest.raises(TypeError, match=msg): + df.iloc[1.0:5] + + result = df.iloc[4:5] + expected = df.reindex([5.0]) + tm.assert_frame_equal(result, expected) + assert len(result) == 1 + + cp = df.copy() + + with pytest.raises(TypeError, match=_slice_msg): + cp.iloc[1.0:5] = 0 + + with pytest.raises(TypeError, match=msg): + result = cp.iloc[1.0:5] == 0 + + assert result.values.all() + assert (cp.iloc[0:1] == df.iloc[0:1]).values.all() + + cp = df.copy() + cp.iloc[4:5] = 0 + assert (cp.iloc[4:5] == 0).values.all() + assert (cp.iloc[0:4] == df.iloc[0:4]).values.all() + + # float slicing + result = df.loc[1.0:5] + expected = df + tm.assert_frame_equal(result, expected) + assert len(result) == 5 + + result = df.loc[1.1:5] + expected = df.reindex([2.5, 3.5, 4.5, 5.0]) + tm.assert_frame_equal(result, expected) + assert len(result) == 4 + + result = df.loc[4.51:5] + expected = df.reindex([5.0]) + tm.assert_frame_equal(result, expected) + assert len(result) == 1 + + result = df.loc[1.0:5.0] + expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0]) + tm.assert_frame_equal(result, expected) + assert len(result) == 5 + + cp = df.copy() + cp.loc[1.0:5.0] = 0 + result = cp.loc[1.0:5.0] + assert (result == 0).values.all() + + def test_setitem_single_column_mixed_datetime(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=["a", "b", "c", "d", "e"], + columns=["foo", "bar", "baz"], + ) + + df["timestamp"] = Timestamp("20010102") + + # check our dtypes + result = df.dtypes + expected = Series( + [np.dtype("float64")] * 3 + [np.dtype("datetime64[s]")], + index=["foo", "bar", "baz", "timestamp"], + ) + tm.assert_series_equal(result, expected) + + # GH#16674 iNaT is treated as an integer when given by the user + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + df.loc["b", "timestamp"] = iNaT + assert not isna(df.loc["b", "timestamp"]) + assert df["timestamp"].dtype == np.object_ + assert df.loc["b", "timestamp"] == iNaT + + # allow this syntax (as of GH#3216) + df.loc["c", "timestamp"] = np.nan + assert isna(df.loc["c", "timestamp"]) + + # allow this syntax + df.loc["d", :] = np.nan + assert not isna(df.loc["c", :]).all() + + def test_setitem_mixed_datetime(self): + # GH 9336 + expected = DataFrame( + { + "a": [0, 0, 0, 0, 13, 14], + "b": [ + datetime(2012, 1, 1), + 1, + "x", + "y", + datetime(2013, 1, 1), + datetime(2014, 1, 1), + ], + } + ) + df = DataFrame(0, columns=list("ab"), index=range(6)) + df["b"] = pd.NaT + df.loc[0, "b"] = datetime(2012, 1, 1) + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + df.loc[1, "b"] = 1 + df.loc[[2, 3], "b"] = "x", "y" + A = np.array( + [ + [13, np.datetime64("2013-01-01T00:00:00")], + [14, np.datetime64("2014-01-01T00:00:00")], + ] + ) + df.loc[[4, 5], ["a", "b"]] = A + tm.assert_frame_equal(df, expected) + + def test_setitem_frame_float(self, float_frame): + piece = float_frame.loc[float_frame.index[:2], ["A", "B"]] + float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values + result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values + expected = piece.values + tm.assert_almost_equal(result, expected) + + def test_setitem_frame_mixed(self, float_string_frame): + # GH 3216 + + # already aligned + f = float_string_frame.copy() + piece = DataFrame( + [[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"] + ) + key = (f.index[slice(None, 2)], ["A", "B"]) + f.loc[key] = piece + tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values) + + def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame): + # GH#3216 rows unaligned + f = float_string_frame.copy() + piece = DataFrame( + [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]], + index=list(f.index[0:2]) + ["foo", "bar"], + columns=["A", "B"], + ) + key = (f.index[slice(None, 2)], ["A", "B"]) + f.loc[key] = piece + tm.assert_almost_equal( + f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2] + ) + + def test_setitem_frame_mixed_key_unaligned(self, float_string_frame): + # GH#3216 key is unaligned with values + f = float_string_frame.copy() + piece = f.loc[f.index[:2], ["A"]] + piece.index = f.index[-2:] + key = (f.index[slice(-2, None)], ["A", "B"]) + f.loc[key] = piece + piece["B"] = np.nan + tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values) + + def test_setitem_frame_mixed_ndarray(self, float_string_frame): + # GH#3216 ndarray + f = float_string_frame.copy() + piece = float_string_frame.loc[f.index[:2], ["A", "B"]] + key = (f.index[slice(-2, None)], ["A", "B"]) + f.loc[key] = piece.values + tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values) + + def test_setitem_frame_upcast(self): + # needs upcasting + df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"]) + df2 = df.copy() + df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5 + expected = df.reindex(columns=["A", "B"]) + expected += 0.5 + expected["C"] = df["C"] + tm.assert_frame_equal(df2, expected) + + def test_setitem_frame_align(self, float_frame): + piece = float_frame.loc[float_frame.index[:2], ["A", "B"]] + piece.index = float_frame.index[-2:] + piece.columns = ["A", "B"] + float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece + result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values + expected = piece.values + tm.assert_almost_equal(result, expected) + + def test_getitem_setitem_ix_duplicates(self): + # #1201 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=["foo", "foo", "bar", "baz", "bar"], + ) + + result = df.loc["foo"] + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.loc["bar"] + expected = df.iloc[[2, 4]] + tm.assert_frame_equal(result, expected) + + result = df.loc["baz"] + expected = df.iloc[3] + tm.assert_series_equal(result, expected) + + def test_getitem_ix_boolean_duplicates_multiple(self): + # #1201 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=["foo", "foo", "bar", "baz", "bar"], + ) + + result = df.loc[["bar"]] + exp = df.iloc[[2, 4]] + tm.assert_frame_equal(result, exp) + + result = df.loc[df[1] > 0] + exp = df[df[1] > 0] + tm.assert_frame_equal(result, exp) + + result = df.loc[df[0] > 0] + exp = df[df[0] > 0] + tm.assert_frame_equal(result, exp) + + @pytest.mark.parametrize("bool_value", [True, False]) + def test_getitem_setitem_ix_bool_keyerror(self, bool_value): + # #2199 + df = DataFrame({"a": [1, 2, 3]}) + message = f"{bool_value}: boolean label can not be used without a boolean index" + with pytest.raises(KeyError, match=message): + df.loc[bool_value] + + msg = "cannot use a single bool to index into setitem" + with pytest.raises(KeyError, match=msg): + df.loc[bool_value] = 0 + + # TODO: rename? remove? + def test_single_element_ix_dont_upcast(self, float_frame): + float_frame["E"] = 1 + assert issubclass(float_frame["E"].dtype.type, (int, np.integer)) + + result = float_frame.loc[float_frame.index[5], "E"] + assert is_integer(result) + + # GH 11617 + df = DataFrame({"a": [1.23]}) + df["b"] = 666 + + result = df.loc[0, "b"] + assert is_integer(result) + + expected = Series([666], [0], name="b") + result = df.loc[[0], "b"] + tm.assert_series_equal(result, expected) + + def test_iloc_row(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), index=range(0, 20, 2) + ) + + result = df.iloc[1] + exp = df.loc[2] + tm.assert_series_equal(result, exp) + + result = df.iloc[2] + exp = df.loc[4] + tm.assert_series_equal(result, exp) + + # slice + result = df.iloc[slice(4, 8)] + expected = df.loc[8:14] + tm.assert_frame_equal(result, expected) + + # list of integers + result = df.iloc[[1, 2, 4, 6]] + expected = df.reindex(df.index[[1, 2, 4, 6]]) + tm.assert_frame_equal(result, expected) + + def test_iloc_row_slice_view(self, using_copy_on_write, request): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), index=range(0, 20, 2) + ) + original = df.copy() + + # verify slice is view + # setting it makes it raise/warn + subset = df.iloc[slice(4, 8)] + + assert np.shares_memory(df[2], subset[2]) + + exp_col = original[2].copy() + subset.loc[:, 2] = 0.0 + if not using_copy_on_write: + subset.loc[:, 2] = 0.0 + exp_col._values[4:8] = 0.0 + + # With the enforcement of GH#45333 in 2.0, this remains a view + assert np.shares_memory(df[2], subset[2]) + tm.assert_series_equal(df[2], exp_col) + + def test_iloc_col(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 10)), columns=range(0, 20, 2) + ) + + result = df.iloc[:, 1] + exp = df.loc[:, 2] + tm.assert_series_equal(result, exp) + + result = df.iloc[:, 2] + exp = df.loc[:, 4] + tm.assert_series_equal(result, exp) + + # slice + result = df.iloc[:, slice(4, 8)] + expected = df.loc[:, 8:14] + tm.assert_frame_equal(result, expected) + + # list of integers + result = df.iloc[:, [1, 2, 4, 6]] + expected = df.reindex(columns=df.columns[[1, 2, 4, 6]]) + tm.assert_frame_equal(result, expected) + + def test_iloc_col_slice_view(self, using_array_manager, using_copy_on_write): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 10)), columns=range(0, 20, 2) + ) + original = df.copy() + subset = df.iloc[:, slice(4, 8)] + + if not using_array_manager and not using_copy_on_write: + # verify slice is view + assert np.shares_memory(df[8]._values, subset[8]._values) + + subset.loc[:, 8] = 0.0 + + assert (df[8] == 0).all() + + # with the enforcement of GH#45333 in 2.0, this remains a view + assert np.shares_memory(df[8]._values, subset[8]._values) + else: + if using_copy_on_write: + # verify slice is view + assert np.shares_memory(df[8]._values, subset[8]._values) + subset[8] = 0.0 + # subset changed + assert (subset[8] == 0).all() + # but df itself did not change (setitem replaces full column) + tm.assert_frame_equal(df, original) + + def test_loc_duplicates(self): + # gh-17105 + + # insert a duplicate element to the index + trange = date_range( + start=Timestamp(year=2017, month=1, day=1), + end=Timestamp(year=2017, month=1, day=5), + ) + + trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5)) + + df = DataFrame(0, index=trange, columns=["A", "B"]) + bool_idx = np.array([False, False, False, False, False, True]) + + # assignment + df.loc[trange[bool_idx], "A"] = 6 + + expected = DataFrame( + {"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange + ) + tm.assert_frame_equal(df, expected) + + # in-place + df = DataFrame(0, index=trange, columns=["A", "B"]) + df.loc[trange[bool_idx], "A"] += 6 + tm.assert_frame_equal(df, expected) + + def test_setitem_with_unaligned_tz_aware_datetime_column(self): + # GH 12981 + # Assignment of unaligned offset-aware datetime series. + # Make sure timezone isn't lost + column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates") + df = DataFrame({"dates": column}) + df["dates"] = column[[1, 0, 2]] + tm.assert_series_equal(df["dates"], column) + + df = DataFrame({"dates": column}) + df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]] + tm.assert_series_equal(df["dates"], column) + + def test_loc_setitem_datetimelike_with_inference(self): + # GH 7592 + # assignment of timedeltas with NaT + + one_hour = timedelta(hours=1) + df = DataFrame(index=date_range("20130101", periods=4)) + df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]") + df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]") + df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]") + df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]") + df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]") + df["F"] = np.timedelta64("NaT") + df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]") + df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3) + df["H"] = np.datetime64("NaT") + result = df.dtypes + expected = Series( + [np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2, + index=list("ABCDEFGH"), + ) + tm.assert_series_equal(result, expected) + + def test_getitem_boolean_indexing_mixed(self): + df = DataFrame( + { + 0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan}, + 1: { + 35: np.nan, + 40: 0.32632316859446198, + 43: np.nan, + 49: 0.32632316859446198, + 50: 0.39114724480578139, + }, + 2: { + 35: np.nan, + 40: np.nan, + 43: 0.29012581014105987, + 49: np.nan, + 50: np.nan, + }, + 3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan}, + 4: { + 35: 0.34215328467153283, + 40: np.nan, + 43: np.nan, + 49: np.nan, + 50: np.nan, + }, + "y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}, + } + ) + + # mixed int/float ok + df2 = df.copy() + df2[df2 > 0.3] = 1 + expected = df.copy() + expected.loc[40, 1] = 1 + expected.loc[49, 1] = 1 + expected.loc[50, 1] = 1 + expected.loc[35, 4] = 1 + tm.assert_frame_equal(df2, expected) + + df["foo"] = "test" + msg = "not supported between instances|unorderable types" + + with pytest.raises(TypeError, match=msg): + df[df > 0.3] = 1 + + def test_type_error_multiindex(self): + # See gh-12218 + mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"]) + dg = DataFrame( + [[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i") + ) + with pytest.raises(InvalidIndexError, match="slice"): + dg[:, 0] + + index = Index(range(2), name="i") + columns = MultiIndex( + levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"] + ) + expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index) + + result = dg.loc[:, (slice(None), 0)] + tm.assert_frame_equal(result, expected) + + name = ("x", 0) + index = Index(range(2), name="i") + expected = Series([1, 3], index=index, name=name) + + result = dg["x", 0] + tm.assert_series_equal(result, expected) + + def test_getitem_interval_index_partial_indexing(self): + # GH#36490 + df = DataFrame( + np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5)) + ) + + expected = df.iloc[:, 0] + + res = df[0.5] + tm.assert_series_equal(res, expected) + + res = df.loc[:, 0.5] + tm.assert_series_equal(res, expected) + + def test_setitem_array_as_cell_value(self): + # GH#43422 + df = DataFrame(columns=["a", "b"], dtype=object) + df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))} + expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]}) + tm.assert_frame_equal(df, expected) + + def test_iloc_setitem_nullable_2d_values(self): + df = DataFrame({"A": [1, 2, 3]}, dtype="Int64") + orig = df.copy() + + df.loc[:] = df.values[:, ::-1] + tm.assert_frame_equal(df, orig) + + df.loc[:] = pd.core.arrays.NumpyExtensionArray(df.values[:, ::-1]) + tm.assert_frame_equal(df, orig) + + df.iloc[:] = df.iloc[:, :] + tm.assert_frame_equal(df, orig) + + def test_getitem_segfault_with_empty_like_object(self): + # GH#46848 + df = DataFrame(np.empty((1, 1), dtype=object)) + df[0] = np.empty_like(df[0]) + # this produces the segfault + df[[0]] + + @pytest.mark.parametrize( + "null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")] + ) + def test_setting_mismatched_na_into_nullable_fails( + self, null, any_numeric_ea_dtype + ): + # GH#44514 don't cast mismatched nulls to pd.NA + df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype) + ser = df["A"] + arr = ser._values + + msg = "|".join( + [ + r"timedelta64\[ns\] cannot be converted to (Floating|Integer)Dtype", + r"datetime64\[ns\] cannot be converted to (Floating|Integer)Dtype", + "'values' contains non-numeric NA", + r"Invalid value '.*' for dtype (U?Int|Float)\d{1,2}", + ] + ) + with pytest.raises(TypeError, match=msg): + arr[0] = null + + with pytest.raises(TypeError, match=msg): + arr[:2] = [null, null] + + with pytest.raises(TypeError, match=msg): + ser[0] = null + + with pytest.raises(TypeError, match=msg): + ser[:2] = [null, null] + + with pytest.raises(TypeError, match=msg): + ser.iloc[0] = null + + with pytest.raises(TypeError, match=msg): + ser.iloc[:2] = [null, null] + + with pytest.raises(TypeError, match=msg): + df.iloc[0, 0] = null + + with pytest.raises(TypeError, match=msg): + df.iloc[:2, 0] = [null, null] + + # Multi-Block + df2 = df.copy() + df2["B"] = ser.copy() + with pytest.raises(TypeError, match=msg): + df2.iloc[0, 0] = null + + with pytest.raises(TypeError, match=msg): + df2.iloc[:2, 0] = [null, null] + + def test_loc_expand_empty_frame_keep_index_name(self): + # GH#45621 + df = DataFrame(columns=["b"], index=Index([], name="a")) + df.loc[0] = 1 + expected = DataFrame({"b": [1]}, index=Index([0], name="a")) + tm.assert_frame_equal(df, expected) + + def test_loc_expand_empty_frame_keep_midx_names(self): + # GH#46317 + df = DataFrame( + columns=["d"], index=MultiIndex.from_tuples([], names=["a", "b", "c"]) + ) + df.loc[(1, 2, 3)] = "foo" + expected = DataFrame( + {"d": ["foo"]}, + index=MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"]), + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "val, idxr, warn", + [ + ("x", "a", None), # TODO: this should warn as well + ("x", ["a"], None), # TODO: this should warn as well + (1, "a", None), # TODO: this should warn as well + (1, ["a"], FutureWarning), + ], + ) + def test_loc_setitem_rhs_frame(self, idxr, val, warn): + # GH#47578 + df = DataFrame({"a": [1, 2]}) + + with tm.assert_produces_warning( + warn, match="Setting an item of incompatible dtype" + ): + df.loc[:, idxr] = DataFrame({"a": [val, 11]}, index=[1, 2]) + expected = DataFrame({"a": [np.nan, val]}) + tm.assert_frame_equal(df, expected) + + @td.skip_array_manager_invalid_test + def test_iloc_setitem_enlarge_no_warning(self): + # GH#47381 + df = DataFrame(columns=["a", "b"]) + expected = df.copy() + view = df[:] + with tm.assert_produces_warning(None): + df.iloc[:, 0] = np.array([1, 2], dtype=np.float64) + tm.assert_frame_equal(view, expected) + + def test_loc_internals_not_updated_correctly(self): + # GH#47867 all steps are necessary to reproduce the initial bug + df = DataFrame( + {"bool_col": True, "a": 1, "b": 2.5}, + index=MultiIndex.from_arrays([[1, 2], [1, 2]], names=["idx1", "idx2"]), + ) + idx = [(1, 1)] + + df["c"] = 3 + df.loc[idx, "c"] = 0 + + df.loc[idx, "c"] + df.loc[idx, ["a", "b"]] + + df.loc[idx, "c"] = 15 + result = df.loc[idx, "c"] + expected = df = Series( + 15, + index=MultiIndex.from_arrays([[1], [1]], names=["idx1", "idx2"]), + name="c", + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("val", [None, [None], pd.NA, [pd.NA]]) + def test_iloc_setitem_string_list_na(self, val): + # GH#45469 + df = DataFrame({"a": ["a", "b", "c"]}, dtype="string") + df.iloc[[0], :] = val + expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string") + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("val", [None, pd.NA]) + def test_iloc_setitem_string_na(self, val): + # GH#45469 + df = DataFrame({"a": ["a", "b", "c"]}, dtype="string") + df.iloc[0, :] = val + expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string") + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("func", [list, Series, np.array]) + def test_iloc_setitem_ea_null_slice_length_one_list(self, func): + # GH#48016 + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + df.iloc[:, func([0])] = 5 + expected = DataFrame({"a": [5, 5, 5]}, dtype="Int64") + tm.assert_frame_equal(df, expected) + + def test_loc_named_tuple_for_midx(self): + # GH#48124 + df = DataFrame( + index=MultiIndex.from_product( + [["A", "B"], ["a", "b", "c"]], names=["first", "second"] + ) + ) + indexer_tuple = namedtuple("Indexer", df.index.names) + idxr = indexer_tuple(first="A", second=["a", "b"]) + result = df.loc[idxr, :] + expected = DataFrame( + index=MultiIndex.from_tuples( + [("A", "a"), ("A", "b")], names=["first", "second"] + ) + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("indexer", [["a"], "a"]) + @pytest.mark.parametrize("col", [{}, {"b": 1}]) + def test_set_2d_casting_date_to_int(self, col, indexer): + # GH#49159 + df = DataFrame( + {"a": [Timestamp("2022-12-29"), Timestamp("2022-12-30")], **col}, + ) + df.loc[[1], indexer] = df["a"] + pd.Timedelta(days=1) + expected = DataFrame( + {"a": [Timestamp("2022-12-29"), Timestamp("2022-12-31")], **col}, + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("col", [{}, {"name": "a"}]) + def test_loc_setitem_reordering_with_all_true_indexer(self, col): + # GH#48701 + n = 17 + df = DataFrame({**col, "x": range(n), "y": range(n)}) + expected = df.copy() + df.loc[n * [True], ["x", "y"]] = df[["x", "y"]] + tm.assert_frame_equal(df, expected) + + def test_loc_rhs_empty_warning(self): + # GH48480 + df = DataFrame(columns=["a", "b"]) + expected = df.copy() + rhs = DataFrame(columns=["a"]) + with tm.assert_produces_warning(None): + df.loc[:, "a"] = rhs + tm.assert_frame_equal(df, expected) + + def test_iloc_ea_series_indexer(self): + # GH#49521 + df = DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + indexer = Series([0, 1], dtype="Int64") + row_indexer = Series([1], dtype="Int64") + result = df.iloc[row_indexer, indexer] + expected = DataFrame([[5, 6]], index=[1]) + tm.assert_frame_equal(result, expected) + + result = df.iloc[row_indexer.values, indexer.values] + tm.assert_frame_equal(result, expected) + + def test_iloc_ea_series_indexer_with_na(self): + # GH#49521 + df = DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + indexer = Series([0, pd.NA], dtype="Int64") + msg = "cannot convert" + with pytest.raises(ValueError, match=msg): + df.iloc[:, indexer] + with pytest.raises(ValueError, match=msg): + df.iloc[:, indexer.values] + + @pytest.mark.parametrize("indexer", [True, (True,)]) + @pytest.mark.parametrize("dtype", [bool, "boolean"]) + def test_loc_bool_multiindex(self, dtype, indexer): + # GH#47687 + midx = MultiIndex.from_arrays( + [ + Series([True, True, False, False], dtype=dtype), + Series([True, False, True, False], dtype=dtype), + ], + names=["a", "b"], + ) + df = DataFrame({"c": [1, 2, 3, 4]}, index=midx) + with tm.maybe_produces_warning(PerformanceWarning, isinstance(indexer, tuple)): + result = df.loc[indexer] + expected = DataFrame( + {"c": [1, 2]}, index=Index([True, False], name="b", dtype=dtype) + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("utc", [False, True]) + @pytest.mark.parametrize("indexer", ["date", ["date"]]) + def test_loc_datetime_assignment_dtype_does_not_change(self, utc, indexer): + # GH#49837 + df = DataFrame( + { + "date": to_datetime( + [datetime(2022, 1, 20), datetime(2022, 1, 22)], utc=utc + ), + "update": [True, False], + } + ) + expected = df.copy(deep=True) + + update_df = df[df["update"]] + + df.loc[df["update"], indexer] = update_df["date"] + + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("indexer, idx", [(tm.loc, 1), (tm.iloc, 2)]) + def test_setitem_value_coercing_dtypes(self, indexer, idx): + # GH#50467 + df = DataFrame([["1", np.nan], ["2", np.nan], ["3", np.nan]], dtype=object) + rhs = DataFrame([[1, np.nan], [2, np.nan]]) + indexer(df)[:idx, :] = rhs + expected = DataFrame([[1, np.nan], [2, np.nan], ["3", np.nan]], dtype=object) + tm.assert_frame_equal(df, expected) + + +class TestDataFrameIndexingUInt64: + def test_setitem(self, uint64_frame): + df = uint64_frame + idx = df["A"].rename("foo") + + # setitem + assert "C" not in df.columns + df["C"] = idx + tm.assert_series_equal(df["C"], Series(idx, name="C")) + + assert "D" not in df.columns + df["D"] = "foo" + df["D"] = idx + tm.assert_series_equal(df["D"], Series(idx, name="D")) + del df["D"] + + # With NaN: because uint64 has no NaN element, + # the column should be cast to object. + df2 = df.copy() + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + df2.iloc[1, 1] = pd.NaT + df2.iloc[1, 2] = pd.NaT + result = df2["B"] + tm.assert_series_equal(notna(result), Series([True, False, True], name="B")) + tm.assert_series_equal( + df2.dtypes, + Series( + [np.dtype("uint64"), np.dtype("O"), np.dtype("O")], + index=["A", "B", "C"], + ), + ) + + +def test_object_casting_indexing_wraps_datetimelike(using_array_manager): + # GH#31649, check the indexing methods all the way down the stack + df = DataFrame( + { + "A": [1, 2], + "B": date_range("2000", periods=2), + "C": pd.timedelta_range("1 Day", periods=2), + } + ) + + ser = df.loc[0] + assert isinstance(ser.values[1], Timestamp) + assert isinstance(ser.values[2], pd.Timedelta) + + ser = df.iloc[0] + assert isinstance(ser.values[1], Timestamp) + assert isinstance(ser.values[2], pd.Timedelta) + + ser = df.xs(0, axis=0) + assert isinstance(ser.values[1], Timestamp) + assert isinstance(ser.values[2], pd.Timedelta) + + if using_array_manager: + # remainder of the test checking BlockManager internals + return + + mgr = df._mgr + mgr._rebuild_blknos_and_blklocs() + arr = mgr.fast_xs(0).array + assert isinstance(arr[1], Timestamp) + assert isinstance(arr[2], pd.Timedelta) + + blk = mgr.blocks[mgr.blknos[1]] + assert blk.dtype == "M8[ns]" # we got the right block + val = blk.iget((0, 0)) + assert isinstance(val, Timestamp) + + blk = mgr.blocks[mgr.blknos[2]] + assert blk.dtype == "m8[ns]" # we got the right block + val = blk.iget((0, 0)) + assert isinstance(val, pd.Timedelta) + + +msg1 = r"Cannot setitem on a Categorical with a new category( \(.*\))?, set the" +msg2 = "Cannot set a Categorical with another, without identical categories" + + +class TestLocILocDataFrameCategorical: + @pytest.fixture + def orig(self): + cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"]) + idx = Index(["h", "i", "j", "k", "l", "m", "n"]) + values = [1, 1, 1, 1, 1, 1, 1] + orig = DataFrame({"cats": cats, "values": values}, index=idx) + return orig + + @pytest.fixture + def exp_single_row(self): + # The expected values if we change a single row + cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"]) + idx1 = Index(["h", "i", "j", "k", "l", "m", "n"]) + values1 = [1, 1, 2, 1, 1, 1, 1] + exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1) + return exp_single_row + + @pytest.fixture + def exp_multi_row(self): + # assign multiple rows (mixed values) (-> array) -> exp_multi_row + # changed multiple rows + cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"]) + idx2 = Index(["h", "i", "j", "k", "l", "m", "n"]) + values2 = [1, 1, 2, 2, 1, 1, 1] + exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2) + return exp_multi_row + + @pytest.fixture + def exp_parts_cats_col(self): + # changed part of the cats column + cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"]) + idx3 = Index(["h", "i", "j", "k", "l", "m", "n"]) + values3 = [1, 1, 1, 1, 1, 1, 1] + exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3) + return exp_parts_cats_col + + @pytest.fixture + def exp_single_cats_value(self): + # changed single value in cats col + cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"]) + idx4 = Index(["h", "i", "j", "k", "l", "m", "n"]) + values4 = [1, 1, 1, 1, 1, 1, 1] + exp_single_cats_value = DataFrame( + {"cats": cats4, "values": values4}, index=idx4 + ) + return exp_single_cats_value + + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) + def test_loc_iloc_setitem_list_of_lists(self, orig, exp_multi_row, indexer): + # - assign multiple rows (mixed values) -> exp_multi_row + df = orig.copy() + + key = slice(2, 4) + if indexer is tm.loc: + key = slice("j", "k") + + indexer(df)[key, :] = [["b", 2], ["b", 2]] + tm.assert_frame_equal(df, exp_multi_row) + + df = orig.copy() + with pytest.raises(TypeError, match=msg1): + indexer(df)[key, :] = [["c", 2], ["c", 2]] + + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc, tm.at, tm.iat]) + def test_loc_iloc_at_iat_setitem_single_value_in_categories( + self, orig, exp_single_cats_value, indexer + ): + # - assign a single value -> exp_single_cats_value + df = orig.copy() + + key = (2, 0) + if indexer in [tm.loc, tm.at]: + key = (df.index[2], df.columns[0]) + + # "b" is among the categories for df["cat"}] + indexer(df)[key] = "b" + tm.assert_frame_equal(df, exp_single_cats_value) + + # "c" is not among the categories for df["cat"] + with pytest.raises(TypeError, match=msg1): + indexer(df)[key] = "c" + + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) + def test_loc_iloc_setitem_mask_single_value_in_categories( + self, orig, exp_single_cats_value, indexer + ): + # mask with single True + df = orig.copy() + + mask = df.index == "j" + key = 0 + if indexer is tm.loc: + key = df.columns[key] + + indexer(df)[mask, key] = "b" + tm.assert_frame_equal(df, exp_single_cats_value) + + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) + def test_loc_iloc_setitem_full_row_non_categorical_rhs( + self, orig, exp_single_row, indexer + ): + # - assign a complete row (mixed values) -> exp_single_row + df = orig.copy() + + key = 2 + if indexer is tm.loc: + key = df.index[2] + + # not categorical dtype, but "b" _is_ among the categories for df["cat"] + indexer(df)[key, :] = ["b", 2] + tm.assert_frame_equal(df, exp_single_row) + + # "c" is not among the categories for df["cat"] + with pytest.raises(TypeError, match=msg1): + indexer(df)[key, :] = ["c", 2] + + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) + def test_loc_iloc_setitem_partial_col_categorical_rhs( + self, orig, exp_parts_cats_col, indexer + ): + # assign a part of a column with dtype == categorical -> + # exp_parts_cats_col + df = orig.copy() + + key = (slice(2, 4), 0) + if indexer is tm.loc: + key = (slice("j", "k"), df.columns[0]) + + # same categories as we currently have in df["cats"] + compat = Categorical(["b", "b"], categories=["a", "b"]) + indexer(df)[key] = compat + tm.assert_frame_equal(df, exp_parts_cats_col) + + # categories do not match df["cat"]'s, but "b" is among them + semi_compat = Categorical(list("bb"), categories=list("abc")) + with pytest.raises(TypeError, match=msg2): + # different categories but holdable values + # -> not sure if this should fail or pass + indexer(df)[key] = semi_compat + + # categories do not match df["cat"]'s, and "c" is not among them + incompat = Categorical(list("cc"), categories=list("abc")) + with pytest.raises(TypeError, match=msg2): + # different values + indexer(df)[key] = incompat + + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) + def test_loc_iloc_setitem_non_categorical_rhs( + self, orig, exp_parts_cats_col, indexer + ): + # assign a part of a column with dtype != categorical -> exp_parts_cats_col + df = orig.copy() + + key = (slice(2, 4), 0) + if indexer is tm.loc: + key = (slice("j", "k"), df.columns[0]) + + # "b" is among the categories for df["cat"] + indexer(df)[key] = ["b", "b"] + tm.assert_frame_equal(df, exp_parts_cats_col) + + # "c" not part of the categories + with pytest.raises(TypeError, match=msg1): + indexer(df)[key] = ["c", "c"] + + @pytest.mark.parametrize("indexer", [tm.getitem, tm.loc, tm.iloc]) + def test_getitem_preserve_object_index_with_dates(self, indexer): + # https://github.com/pandas-dev/pandas/pull/42950 - when selecting a column + # from dataframe, don't try to infer object dtype index on Series construction + idx = date_range("2012", periods=3).astype(object) + df = DataFrame({0: [1, 2, 3]}, index=idx) + assert df.index.dtype == object + + if indexer is tm.getitem: + ser = indexer(df)[0] + else: + ser = indexer(df)[:, 0] + + assert ser.index.dtype == object + + def test_loc_on_multiindex_one_level(self): + # GH#45779 + df = DataFrame( + data=[[0], [1]], + index=MultiIndex.from_tuples([("a",), ("b",)], names=["first"]), + ) + expected = DataFrame( + data=[[0]], index=MultiIndex.from_tuples([("a",)], names=["first"]) + ) + result = df.loc["a"] + tm.assert_frame_equal(result, expected) + + +class TestDeprecatedIndexers: + @pytest.mark.parametrize( + "key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})] + ) + def test_getitem_dict_and_set_deprecated(self, key): + # GH#42825 enforced in 2.0 + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + with pytest.raises(TypeError, match="as an indexer is not supported"): + df.loc[key] + + @pytest.mark.parametrize( + "key", + [ + {1}, + {1: 1}, + (({1}, 2), "a"), + (({1: 1}, 2), "a"), + ((1, 2), {"a"}), + ((1, 2), {"a": "a"}), + ], + ) + def test_getitem_dict_and_set_deprecated_multiindex(self, key): + # GH#42825 enforced in 2.0 + df = DataFrame( + [[1, 2], [3, 4]], + columns=["a", "b"], + index=MultiIndex.from_tuples([(1, 2), (3, 4)]), + ) + with pytest.raises(TypeError, match="as an indexer is not supported"): + df.loc[key] + + @pytest.mark.parametrize( + "key", [{1}, {1: 1}, ({1}, "a"), ({1: 1}, "a"), (1, {"a"}), (1, {"a": "a"})] + ) + def test_setitem_dict_and_set_disallowed(self, key): + # GH#42825 enforced in 2.0 + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + with pytest.raises(TypeError, match="as an indexer is not supported"): + df.loc[key] = 1 + + @pytest.mark.parametrize( + "key", + [ + {1}, + {1: 1}, + (({1}, 2), "a"), + (({1: 1}, 2), "a"), + ((1, 2), {"a"}), + ((1, 2), {"a": "a"}), + ], + ) + def test_setitem_dict_and_set_disallowed_multiindex(self, key): + # GH#42825 enforced in 2.0 + df = DataFrame( + [[1, 2], [3, 4]], + columns=["a", "b"], + index=MultiIndex.from_tuples([(1, 2), (3, 4)]), + ) + with pytest.raises(TypeError, match="as an indexer is not supported"): + df.loc[key] = 1 + + +def test_adding_new_conditional_column() -> None: + # https://github.com/pandas-dev/pandas/issues/55025 + df = DataFrame({"x": [1]}) + df.loc[df["x"] == 1, "y"] = "1" + expected = DataFrame({"x": [1], "y": ["1"]}) + tm.assert_frame_equal(df, expected) + + df = DataFrame({"x": [1]}) + # try inserting something which numpy would store as 'object' + value = lambda x: x + df.loc[df["x"] == 1, "y"] = value + expected = DataFrame({"x": [1], "y": [value]}) + tm.assert_frame_equal(df, expected) + + +def test_add_new_column_infer_string(): + # GH#55366 + pytest.importorskip("pyarrow") + df = DataFrame({"x": [1]}) + with pd.option_context("future.infer_string", True): + df.loc[df["x"] == 1, "y"] = "1" + expected = DataFrame( + {"x": [1], "y": Series(["1"], dtype="string[pyarrow_numpy]")}, + columns=Index(["x", "y"], dtype=object), + ) + tm.assert_frame_equal(df, expected) + + +class TestSetitemValidation: + # This is adapted from pandas/tests/arrays/masked/test_indexing.py + # but checks for warnings instead of errors. + def _check_setitem_invalid(self, df, invalid, indexer, warn): + msg = "Setting an item of incompatible dtype is deprecated" + msg = re.escape(msg) + + orig_df = df.copy() + + # iloc + with tm.assert_produces_warning(warn, match=msg): + df.iloc[indexer, 0] = invalid + df = orig_df.copy() + + # loc + with tm.assert_produces_warning(warn, match=msg): + df.loc[indexer, "a"] = invalid + df = orig_df.copy() + + _invalid_scalars = [ + 1 + 2j, + "True", + "1", + "1.0", + pd.NaT, + np.datetime64("NaT"), + np.timedelta64("NaT"), + ] + _indexers = [0, [0], slice(0, 1), [True, False, False]] + + @pytest.mark.parametrize( + "invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)] + ) + @pytest.mark.parametrize("indexer", _indexers) + def test_setitem_validation_scalar_bool(self, invalid, indexer): + df = DataFrame({"a": [True, False, False]}, dtype="bool") + self._check_setitem_invalid(df, invalid, indexer, FutureWarning) + + @pytest.mark.parametrize("invalid", _invalid_scalars + [True, 1.5, np.float64(1.5)]) + @pytest.mark.parametrize("indexer", _indexers) + def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer): + df = DataFrame({"a": [1, 2, 3]}, dtype=any_int_numpy_dtype) + if isna(invalid) and invalid is not pd.NaT: + warn = None + else: + warn = FutureWarning + self._check_setitem_invalid(df, invalid, indexer, warn) + + @pytest.mark.parametrize("invalid", _invalid_scalars + [True]) + @pytest.mark.parametrize("indexer", _indexers) + def test_setitem_validation_scalar_float(self, invalid, float_numpy_dtype, indexer): + df = DataFrame({"a": [1, 2, None]}, dtype=float_numpy_dtype) + self._check_setitem_invalid(df, invalid, indexer, FutureWarning) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_insert.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_insert.py new file mode 100644 index 00000000..12229c28 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_insert.py @@ -0,0 +1,122 @@ +""" +test_insert is specifically for the DataFrame.insert method; not to be +confused with tests with "insert" in their names that are really testing +__setitem__. +""" +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + + +class TestDataFrameInsert: + def test_insert(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=np.arange(5), + columns=["c", "b", "a"], + ) + + df.insert(0, "foo", df["a"]) + tm.assert_index_equal(df.columns, Index(["foo", "c", "b", "a"])) + tm.assert_series_equal(df["a"], df["foo"], check_names=False) + + df.insert(2, "bar", df["c"]) + tm.assert_index_equal(df.columns, Index(["foo", "c", "bar", "b", "a"])) + tm.assert_almost_equal(df["c"], df["bar"], check_names=False) + + with pytest.raises(ValueError, match="already exists"): + df.insert(1, "a", df["b"]) + + msg = "cannot insert c, already exists" + with pytest.raises(ValueError, match=msg): + df.insert(1, "c", df["b"]) + + df.columns.name = "some_name" + # preserve columns name field + df.insert(0, "baz", df["c"]) + assert df.columns.name == "some_name" + + def test_insert_column_bug_4032(self): + # GH#4032, inserting a column and renaming causing errors + df = DataFrame({"b": [1.1, 2.2]}) + + df = df.rename(columns={}) + df.insert(0, "a", [1, 2]) + result = df.rename(columns={}) + + str(result) + expected = DataFrame([[1, 1.1], [2, 2.2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + df.insert(0, "c", [1.3, 2.3]) + result = df.rename(columns={}) + + str(result) + expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]], columns=["c", "a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_insert_with_columns_dups(self): + # GH#14291 + df = DataFrame() + df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True) + df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True) + df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True) + exp = DataFrame( + [["a", "d", "g"], ["b", "e", "h"], ["c", "f", "i"]], columns=["A", "A", "A"] + ) + tm.assert_frame_equal(df, exp) + + def test_insert_item_cache(self, using_array_manager, using_copy_on_write): + df = DataFrame(np.random.default_rng(2).standard_normal((4, 3))) + ser = df[0] + + if using_array_manager: + expected_warning = None + else: + # with BlockManager warn about high fragmentation of single dtype + expected_warning = PerformanceWarning + + with tm.assert_produces_warning(expected_warning): + for n in range(100): + df[n + 3] = df[1] * n + + if using_copy_on_write: + ser.iloc[0] = 99 + assert df.iloc[0, 0] == df[0][0] + assert df.iloc[0, 0] != 99 + else: + ser.values[0] = 99 + assert df.iloc[0, 0] == df[0][0] + assert df.iloc[0, 0] == 99 + + def test_insert_EA_no_warning(self): + # PerformanceWarning about fragmented frame should not be raised when + # using EAs (https://github.com/pandas-dev/pandas/issues/44098) + df = DataFrame( + np.random.default_rng(2).integers(0, 100, size=(3, 100)), dtype="Int64" + ) + with tm.assert_produces_warning(None): + df["a"] = np.array([1, 2, 3]) + + def test_insert_frame(self): + # GH#42403 + df = DataFrame({"col1": [1, 2], "col2": [3, 4]}) + + msg = ( + "Expected a one-dimensional object, got a DataFrame with 2 columns instead." + ) + with pytest.raises(ValueError, match=msg): + df.insert(1, "newcol", df) + + def test_insert_int64_loc(self): + # GH#53193 + df = DataFrame({"a": [1, 2]}) + df.insert(np.int64(0), "b", 0) + tm.assert_frame_equal(df, DataFrame({"b": [0, 0], "a": [1, 2]})) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_mask.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_mask.py new file mode 100644 index 00000000..264e27c9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_mask.py @@ -0,0 +1,152 @@ +""" +Tests for DataFrame.mask; tests DataFrame.where as a side-effect. +""" + +import numpy as np + +from pandas import ( + NA, + DataFrame, + Float64Dtype, + Series, + StringDtype, + Timedelta, + isna, +) +import pandas._testing as tm + + +class TestDataFrameMask: + def test_mask(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + cond = df > 0 + + rs = df.where(cond, np.nan) + tm.assert_frame_equal(rs, df.mask(df <= 0)) + tm.assert_frame_equal(rs, df.mask(~cond)) + + other = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + rs = df.where(cond, other) + tm.assert_frame_equal(rs, df.mask(df <= 0, other)) + tm.assert_frame_equal(rs, df.mask(~cond, other)) + + def test_mask2(self): + # see GH#21891 + df = DataFrame([1, 2]) + res = df.mask([[True], [False]]) + + exp = DataFrame([np.nan, 2]) + tm.assert_frame_equal(res, exp) + + def test_mask_inplace(self): + # GH#8801 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + cond = df > 0 + + rdf = df.copy() + + return_value = rdf.where(cond, inplace=True) + assert return_value is None + tm.assert_frame_equal(rdf, df.where(cond)) + tm.assert_frame_equal(rdf, df.mask(~cond)) + + rdf = df.copy() + return_value = rdf.where(cond, -df, inplace=True) + assert return_value is None + tm.assert_frame_equal(rdf, df.where(cond, -df)) + tm.assert_frame_equal(rdf, df.mask(~cond, -df)) + + def test_mask_edge_case_1xN_frame(self): + # GH#4071 + df = DataFrame([[1, 2]]) + res = df.mask(DataFrame([[True, False]])) + expec = DataFrame([[np.nan, 2]]) + tm.assert_frame_equal(res, expec) + + def test_mask_callable(self): + # GH#12533 + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = df.mask(lambda x: x > 4, lambda x: x + 1) + exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]]) + tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, df.mask(df > 4, df + 1)) + + # return ndarray and scalar + result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99) + exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]]) + tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99)) + + # chain + result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10) + exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]]) + tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, (df + 2).mask((df + 2) > 8, (df + 2) + 10)) + + def test_mask_dtype_bool_conversion(self): + # GH#3733 + df = DataFrame(data=np.random.default_rng(2).standard_normal((100, 50))) + df = df.where(df > 0) # create nans + bools = df > 0 + mask = isna(df) + expected = bools.astype(object).mask(mask) + result = bools.mask(mask) + tm.assert_frame_equal(result, expected) + + +def test_mask_stringdtype(frame_or_series): + # GH 40824 + obj = DataFrame( + {"A": ["foo", "bar", "baz", NA]}, + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + filtered_obj = DataFrame( + {"A": ["this", "that"]}, index=["id2", "id3"], dtype=StringDtype() + ) + expected = DataFrame( + {"A": [NA, "this", "that", NA]}, + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + if frame_or_series is Series: + obj = obj["A"] + filtered_obj = filtered_obj["A"] + expected = expected["A"] + + filter_ser = Series([False, True, True, False]) + result = obj.mask(filter_ser, filtered_obj) + + tm.assert_equal(result, expected) + + +def test_mask_where_dtype_timedelta(): + # https://github.com/pandas-dev/pandas/issues/39548 + df = DataFrame([Timedelta(i, unit="d") for i in range(5)]) + + expected = DataFrame(np.full(5, np.nan, dtype="timedelta64[ns]")) + tm.assert_frame_equal(df.mask(df.notna()), expected) + + expected = DataFrame( + [np.nan, np.nan, np.nan, Timedelta("3 day"), Timedelta("4 day")] + ) + tm.assert_frame_equal(df.where(df > Timedelta(2, unit="d")), expected) + + +def test_mask_return_dtype(): + # GH#50488 + ser = Series([0.0, 1.0, 2.0, 3.0], dtype=Float64Dtype()) + cond = ~ser.isna() + other = Series([True, False, True, False]) + excepted = Series([1.0, 0.0, 1.0, 0.0], dtype=ser.dtype) + result = ser.mask(cond, other) + tm.assert_series_equal(result, excepted) + + +def test_mask_inplace_no_other(): + # GH#51685 + df = DataFrame({"a": [1.0, 2.0], "b": ["x", "y"]}) + cond = DataFrame({"a": [True, False], "b": [False, True]}) + df.mask(cond, inplace=True) + expected = DataFrame({"a": [np.nan, 2], "b": ["x", np.nan]}) + tm.assert_frame_equal(df, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_set_value.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_set_value.py new file mode 100644 index 00000000..32312868 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_set_value.py @@ -0,0 +1,78 @@ +import numpy as np + +from pandas.core.dtypes.common import is_float_dtype + +from pandas import ( + DataFrame, + isna, +) +import pandas._testing as tm + + +class TestSetValue: + def test_set_value(self, float_frame): + for idx in float_frame.index: + for col in float_frame.columns: + float_frame._set_value(idx, col, 1) + assert float_frame[col][idx] == 1 + + def test_set_value_resize(self, float_frame): + res = float_frame._set_value("foobar", "B", 0) + assert res is None + assert float_frame.index[-1] == "foobar" + assert float_frame._get_value("foobar", "B") == 0 + + float_frame.loc["foobar", "qux"] = 0 + assert float_frame._get_value("foobar", "qux") == 0 + + res = float_frame.copy() + res._set_value("foobar", "baz", "sam") + assert res["baz"].dtype == np.object_ + + res = float_frame.copy() + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + res._set_value("foobar", "baz", True) + assert res["baz"].dtype == np.object_ + + res = float_frame.copy() + res._set_value("foobar", "baz", 5) + assert is_float_dtype(res["baz"]) + assert isna(res["baz"].drop(["foobar"])).all() + + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + res._set_value("foobar", "baz", "sam") + assert res.loc["foobar", "baz"] == "sam" + + def test_set_value_with_index_dtype_change(self): + df_orig = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=range(3), + columns=list("ABC"), + ) + + # this is actually ambiguous as the 2 is interpreted as a positional + # so column is not created + df = df_orig.copy() + df._set_value("C", 2, 1.0) + assert list(df.index) == list(df_orig.index) + ["C"] + # assert list(df.columns) == list(df_orig.columns) + [2] + + df = df_orig.copy() + df.loc["C", 2] = 1.0 + assert list(df.index) == list(df_orig.index) + ["C"] + # assert list(df.columns) == list(df_orig.columns) + [2] + + # create both new + df = df_orig.copy() + df._set_value("C", "D", 1.0) + assert list(df.index) == list(df_orig.index) + ["C"] + assert list(df.columns) == list(df_orig.columns) + ["D"] + + df = df_orig.copy() + df.loc["C", "D"] = 1.0 + assert list(df.index) == list(df_orig.index) + ["C"] + assert list(df.columns) == list(df_orig.columns) + ["D"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_setitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_setitem.py new file mode 100644 index 00000000..fc2e817b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_setitem.py @@ -0,0 +1,1352 @@ +from datetime import datetime + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.base import _registry as ea_registry +from pandas.core.dtypes.common import is_object_dtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + Interval, + IntervalIndex, + MultiIndex, + NaT, + Period, + PeriodIndex, + Series, + Timestamp, + cut, + date_range, + notna, + period_range, +) +import pandas._testing as tm +from pandas.core.arrays import SparseArray + +from pandas.tseries.offsets import BDay + + +class TestDataFrameSetItem: + def test_setitem_str_subclass(self): + # GH#37366 + class mystring(str): + pass + + data = ["2020-10-22 01:21:00+00:00"] + index = DatetimeIndex(data) + df = DataFrame({"a": [1]}, index=index) + df["b"] = 2 + df[mystring("c")] = 3 + expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index) + tm.assert_equal(df, expected) + + @pytest.mark.parametrize( + "dtype", ["int32", "int64", "uint32", "uint64", "float32", "float64"] + ) + def test_setitem_dtype(self, dtype, float_frame): + # Use integers since casting negative floats to uints is undefined + arr = np.random.default_rng(2).integers(1, 10, len(float_frame)) + + float_frame[dtype] = np.array(arr, dtype=dtype) + assert float_frame[dtype].dtype.name == dtype + + def test_setitem_list_not_dataframe(self, float_frame): + data = np.random.default_rng(2).standard_normal((len(float_frame), 2)) + float_frame[["A", "B"]] = data + tm.assert_almost_equal(float_frame[["A", "B"]].values, data) + + def test_setitem_error_msmgs(self): + # GH 7432 + df = DataFrame( + {"bar": [1, 2, 3], "baz": ["d", "e", "f"]}, + index=Index(["a", "b", "c"], name="foo"), + ) + ser = Series( + ["g", "h", "i", "j"], + index=Index(["a", "b", "c", "a"], name="foo"), + name="fiz", + ) + msg = "cannot reindex on an axis with duplicate labels" + with pytest.raises(ValueError, match=msg): + df["newcol"] = ser + + # GH 4107, more descriptive error message + df = DataFrame( + np.random.default_rng(2).integers(0, 2, (4, 4)), + columns=["a", "b", "c", "d"], + ) + + msg = "Cannot set a DataFrame with multiple columns to the single column gr" + with pytest.raises(ValueError, match=msg): + df["gr"] = df.groupby(["b", "c"]).count() + + def test_setitem_benchmark(self): + # from the vb_suite/frame_methods/frame_insert_columns + N = 10 + K = 5 + df = DataFrame(index=range(N)) + new_col = np.random.default_rng(2).standard_normal(N) + for i in range(K): + df[i] = new_col + expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N)) + tm.assert_frame_equal(df, expected) + + def test_setitem_different_dtype(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=np.arange(5), + columns=["c", "b", "a"], + ) + df.insert(0, "foo", df["a"]) + df.insert(2, "bar", df["c"]) + + # diff dtype + + # new item + df["x"] = df["a"].astype("float32") + result = df.dtypes + expected = Series( + [np.dtype("float64")] * 5 + [np.dtype("float32")], + index=["foo", "c", "bar", "b", "a", "x"], + ) + tm.assert_series_equal(result, expected) + + # replacing current (in different block) + df["a"] = df["a"].astype("float32") + result = df.dtypes + expected = Series( + [np.dtype("float64")] * 4 + [np.dtype("float32")] * 2, + index=["foo", "c", "bar", "b", "a", "x"], + ) + tm.assert_series_equal(result, expected) + + df["y"] = df["a"].astype("int32") + result = df.dtypes + expected = Series( + [np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")], + index=["foo", "c", "bar", "b", "a", "x", "y"], + ) + tm.assert_series_equal(result, expected) + + def test_setitem_empty_columns(self): + # GH 13522 + df = DataFrame(index=["A", "B", "C"]) + df["X"] = df.index + df["X"] = ["x", "y", "z"] + exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"]) + tm.assert_frame_equal(df, exp) + + def test_setitem_dt64_index_empty_columns(self): + rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") + df = DataFrame(index=np.arange(len(rng))) + + df["A"] = rng + assert df["A"].dtype == np.dtype("M8[ns]") + + def test_setitem_timestamp_empty_columns(self): + # GH#19843 + df = DataFrame(index=range(3)) + df["now"] = Timestamp("20130101", tz="UTC").as_unit("ns") + + expected = DataFrame( + [[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"] + ) + tm.assert_frame_equal(df, expected) + + def test_setitem_wrong_length_categorical_dtype_raises(self): + # GH#29523 + cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"]) + df = DataFrame(range(10), columns=["bar"]) + + msg = ( + rf"Length of values \({len(cat)}\) " + rf"does not match length of index \({len(df)}\)" + ) + with pytest.raises(ValueError, match=msg): + df["foo"] = cat + + def test_setitem_with_sparse_value(self): + # GH#8131 + df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]}) + sp_array = SparseArray([0, 0, 1]) + df["new_column"] = sp_array + + expected = Series(sp_array, name="new_column") + tm.assert_series_equal(df["new_column"], expected) + + def test_setitem_with_unaligned_sparse_value(self): + df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]}) + sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0]) + + df["new_column"] = sp_series + expected = Series(SparseArray([1, 0, 0]), name="new_column") + tm.assert_series_equal(df["new_column"], expected) + + def test_setitem_period_preserves_dtype(self): + # GH: 26861 + data = [Period("2003-12", "D")] + result = DataFrame([]) + result["a"] = data + + expected = DataFrame({"a": data}) + + tm.assert_frame_equal(result, expected) + + def test_setitem_dict_preserves_dtypes(self): + # https://github.com/pandas-dev/pandas/issues/34573 + expected = DataFrame( + { + "a": Series([0, 1, 2], dtype="int64"), + "b": Series([1, 2, 3], dtype=float), + "c": Series([1, 2, 3], dtype=float), + "d": Series([1, 2, 3], dtype="uint32"), + } + ) + df = DataFrame( + { + "a": Series([], dtype="int64"), + "b": Series([], dtype=float), + "c": Series([], dtype=float), + "d": Series([], dtype="uint32"), + } + ) + for idx, b in enumerate([1, 2, 3]): + df.loc[df.shape[0]] = { + "a": int(idx), + "b": float(b), + "c": float(b), + "d": np.uint32(b), + } + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "obj,dtype", + [ + (Period("2020-01"), PeriodDtype("M")), + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), + ), + ], + ) + def test_setitem_extension_types(self, obj, dtype): + # GH: 34832 + expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)}) + + df = DataFrame({"idx": [1, 2, 3]}) + df["obj"] = obj + + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "ea_name", + [ + dtype.name + for dtype in ea_registry.dtypes + # property would require instantiation + if not isinstance(dtype.name, property) + ] + + ["datetime64[ns, UTC]", "period[D]"], + ) + def test_setitem_with_ea_name(self, ea_name): + # GH 38386 + result = DataFrame([0]) + result[ea_name] = [1] + expected = DataFrame({0: [0], ea_name: [1]}) + tm.assert_frame_equal(result, expected) + + def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self): + # GH#7492 + data_ns = np.array([1, "nat"], dtype="datetime64[ns]") + result = Series(data_ns).to_frame() + result["new"] = data_ns + expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]") + tm.assert_frame_equal(result, expected) + + # OutOfBoundsDatetime error shouldn't occur; as of 2.0 we preserve "M8[s]" + data_s = np.array([1, "nat"], dtype="datetime64[s]") + result["new"] = data_s + tm.assert_series_equal(result[0], expected[0]) + tm.assert_numpy_array_equal(result["new"].to_numpy(), data_s) + + @pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"]) + def test_frame_setitem_datetime64_col_other_units(self, unit): + # Check that non-nano dt64 values get cast to dt64 on setitem + # into a not-yet-existing column + n = 100 + + dtype = np.dtype(f"M8[{unit}]") + vals = np.arange(n, dtype=np.int64).view(dtype) + if unit in ["s", "ms"]: + # supported unit + ex_vals = vals + else: + # we get the nearest supported units, i.e. "s" + ex_vals = vals.astype("datetime64[s]") + + df = DataFrame({"ints": np.arange(n)}, index=np.arange(n)) + df[unit] = vals + + assert df[unit].dtype == ex_vals.dtype + assert (df[unit].values == ex_vals).all() + + @pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"]) + def test_frame_setitem_existing_datetime64_col_other_units(self, unit): + # Check that non-nano dt64 values get cast to dt64 on setitem + # into an already-existing dt64 column + n = 100 + + dtype = np.dtype(f"M8[{unit}]") + vals = np.arange(n, dtype=np.int64).view(dtype) + ex_vals = vals.astype("datetime64[ns]") + + df = DataFrame({"ints": np.arange(n)}, index=np.arange(n)) + df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]") + + # We overwrite existing dt64 column with new, non-nano dt64 vals + df["dates"] = vals + assert (df["dates"].values == ex_vals).all() + + def test_setitem_dt64tz(self, timezone_frame, using_copy_on_write): + df = timezone_frame + idx = df["B"].rename("foo") + + # setitem + df["C"] = idx + tm.assert_series_equal(df["C"], Series(idx, name="C")) + + df["D"] = "foo" + df["D"] = idx + tm.assert_series_equal(df["D"], Series(idx, name="D")) + del df["D"] + + # assert that A & C are not sharing the same base (e.g. they + # are copies) + # Note: This does not hold with Copy on Write (because of lazy copying) + v1 = df._mgr.arrays[1] + v2 = df._mgr.arrays[2] + tm.assert_extension_array_equal(v1, v2) + v1base = v1._ndarray.base + v2base = v2._ndarray.base + if not using_copy_on_write: + assert v1base is None or (id(v1base) != id(v2base)) + else: + assert id(v1base) == id(v2base) + + # with nan + df2 = df.copy() + df2.iloc[1, 1] = NaT + df2.iloc[1, 2] = NaT + result = df2["B"] + tm.assert_series_equal(notna(result), Series([True, False, True], name="B")) + tm.assert_series_equal(df2.dtypes, df.dtypes) + + def test_setitem_periodindex(self): + rng = period_range("1/1/2000", periods=5, name="index") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)), index=rng) + + df["Index"] = rng + rs = Index(df["Index"]) + tm.assert_index_equal(rs, rng, check_names=False) + assert rs.name == "Index" + assert rng.name == "index" + + rs = df.reset_index().set_index("index") + assert isinstance(rs.index, PeriodIndex) + tm.assert_index_equal(rs.index, rng) + + def test_setitem_complete_column_with_array(self): + # GH#37954 + df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]}) + arr = np.array([[1, 1], [3, 1], [5, 1]]) + df[["c", "d"]] = arr + expected = DataFrame( + { + "a": ["one", "two", "three"], + "b": [1, 2, 3], + "c": [1, 3, 5], + "d": [1, 1, 1], + } + ) + expected["c"] = expected["c"].astype(arr.dtype) + expected["d"] = expected["d"].astype(arr.dtype) + assert expected["c"].dtype == arr.dtype + assert expected["d"].dtype == arr.dtype + tm.assert_frame_equal(df, expected) + + def test_setitem_period_d_dtype(self): + # GH 39763 + rng = period_range("2016-01-01", periods=9, freq="D", name="A") + result = DataFrame(rng) + expected = DataFrame( + {"A": ["NaT", "NaT", "NaT", "NaT", "NaT", "NaT", "NaT", "NaT", "NaT"]}, + dtype="period[D]", + ) + result.iloc[:] = rng._na_value + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["f8", "i8", "u8"]) + def test_setitem_bool_with_numeric_index(self, dtype): + # GH#36319 + cols = Index([1, 2, 3], dtype=dtype) + df = DataFrame(np.random.default_rng(2).standard_normal((3, 3)), columns=cols) + + df[False] = ["a", "b", "c"] + + expected_cols = Index([1, 2, 3, False], dtype=object) + if dtype == "f8": + expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object) + + tm.assert_index_equal(df.columns, expected_cols) + + @pytest.mark.parametrize("indexer", ["B", ["B"]]) + def test_setitem_frame_length_0_str_key(self, indexer): + # GH#38831 + df = DataFrame(columns=["A", "B"]) + other = DataFrame({"B": [1, 2]}) + df[indexer] = other + expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]}) + expected["A"] = expected["A"].astype("object") + tm.assert_frame_equal(df, expected) + + def test_setitem_frame_duplicate_columns(self): + # GH#15695 + cols = ["A", "B", "C"] * 2 + df = DataFrame(index=range(3), columns=cols) + df.loc[0, "A"] = (0, 3) + df.loc[:, "B"] = (1, 4) + df["C"] = (2, 5) + expected = DataFrame( + [ + [0, 1, 2, 3, 4, 5], + [np.nan, 1, 2, np.nan, 4, 5], + [np.nan, 1, 2, np.nan, 4, 5], + ], + dtype="object", + ) + + # set these with unique columns to be extra-unambiguous + expected[2] = expected[2].astype(np.int64) + expected[5] = expected[5].astype(np.int64) + expected.columns = cols + + tm.assert_frame_equal(df, expected) + + def test_setitem_frame_duplicate_columns_size_mismatch(self): + # GH#39510 + cols = ["A", "B", "C"] * 2 + df = DataFrame(index=range(3), columns=cols) + with pytest.raises(ValueError, match="Columns must be same length as key"): + df[["A"]] = (0, 3, 5) + + df2 = df.iloc[:, :3] # unique columns + with pytest.raises(ValueError, match="Columns must be same length as key"): + df2[["A"]] = (0, 3, 5) + + @pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]]) + def test_setitem_df_wrong_column_number(self, cols): + # GH#38604 + df = DataFrame([[1, 2, 3]], columns=cols) + rhs = DataFrame([[10, 11]], columns=["d", "e"]) + msg = "Columns must be same length as key" + with pytest.raises(ValueError, match=msg): + df["a"] = rhs + + def test_setitem_listlike_indexer_duplicate_columns(self): + # GH#38604 + df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"]) + rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"]) + df[["a", "b"]] = rhs + expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"]) + tm.assert_frame_equal(df, expected) + + df[["c", "b"]] = rhs + expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"]) + tm.assert_frame_equal(df, expected) + + def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self): + # GH#39403 + df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"]) + rhs = DataFrame([[10, 11]], columns=["a", "b"]) + msg = "Columns must be same length as key" + with pytest.raises(ValueError, match=msg): + df[["a", "b"]] = rhs + + def test_setitem_intervals(self): + df = DataFrame({"A": range(10)}) + ser = cut(df["A"], 5) + assert isinstance(ser.cat.categories, IntervalIndex) + + # B & D end up as Categoricals + # the remainder are converted to in-line objects + # containing an IntervalIndex.values + df["B"] = ser + df["C"] = np.array(ser) + df["D"] = ser.values + df["E"] = np.array(ser.values) + df["F"] = ser.astype(object) + + assert isinstance(df["B"].dtype, CategoricalDtype) + assert isinstance(df["B"].cat.categories.dtype, IntervalDtype) + assert isinstance(df["D"].dtype, CategoricalDtype) + assert isinstance(df["D"].cat.categories.dtype, IntervalDtype) + + # These go through the Series constructor and so get inferred back + # to IntervalDtype + assert isinstance(df["C"].dtype, IntervalDtype) + assert isinstance(df["E"].dtype, IntervalDtype) + + # But the Series constructor doesn't do inference on Series objects, + # so setting df["F"] doesn't get cast back to IntervalDtype + assert is_object_dtype(df["F"]) + + # they compare equal as Index + # when converted to numpy objects + c = lambda x: Index(np.array(x)) + tm.assert_index_equal(c(df.B), c(df.B)) + tm.assert_index_equal(c(df.B), c(df.C), check_names=False) + tm.assert_index_equal(c(df.B), c(df.D), check_names=False) + tm.assert_index_equal(c(df.C), c(df.D), check_names=False) + + # B & D are the same Series + tm.assert_series_equal(df["B"], df["B"]) + tm.assert_series_equal(df["B"], df["D"], check_names=False) + + # C & E are the same Series + tm.assert_series_equal(df["C"], df["C"]) + tm.assert_series_equal(df["C"], df["E"], check_names=False) + + def test_setitem_categorical(self): + # GH#35369 + df = DataFrame({"h": Series(list("mn")).astype("category")}) + df.h = df.h.cat.reorder_categories(["n", "m"]) + expected = DataFrame( + {"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])} + ) + tm.assert_frame_equal(df, expected) + + def test_setitem_with_empty_listlike(self): + # GH#17101 + index = Index([], name="idx") + result = DataFrame(columns=["A"], index=index) + result["A"] = [] + expected = DataFrame(columns=["A"], index=index) + tm.assert_index_equal(result.index, expected.index) + + @pytest.mark.parametrize( + "cols, values, expected", + [ + (["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates + (["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order + (["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols + (["C", "B", "a"], [1, 2, 3], 3), # no duplicates + (["B", "C", "a"], [3, 2, 1], 1), # alphabetical order + (["C", "a", "B"], [3, 2, 1], 2), # in the middle + ], + ) + def test_setitem_same_column(self, cols, values, expected): + # GH#23239 + df = DataFrame([values], columns=cols) + df["a"] = df["a"] + result = df["a"].values[0] + assert result == expected + + def test_setitem_multi_index(self): + # GH#7655, test that assigning to a sub-frame of a frame + # with multi-index columns aligns both rows and columns + it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"] + + cols = MultiIndex.from_product(it) + index = date_range("20141006", periods=20) + vals = np.random.default_rng(2).integers(1, 1000, (len(index), len(cols))) + df = DataFrame(vals, columns=cols, index=index) + + i, j = df.index.values.copy(), it[-1][:] + + np.random.default_rng(2).shuffle(i) + df["jim"] = df["jolie"].loc[i, ::-1] + tm.assert_frame_equal(df["jim"], df["jolie"]) + + np.random.default_rng(2).shuffle(j) + df[("joe", "first")] = df[("jolie", "last")].loc[i, j] + tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")]) + + np.random.default_rng(2).shuffle(j) + df[("joe", "last")] = df[("jolie", "first")].loc[i, j] + tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")]) + + @pytest.mark.parametrize( + "columns,box,expected", + [ + ( + ["A", "B", "C", "D"], + 7, + DataFrame( + [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]], + columns=["A", "B", "C", "D"], + ), + ), + ( + ["C", "D"], + [7, 8], + DataFrame( + [[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]], + columns=["A", "B", "C", "D"], + ), + ), + ( + ["A", "B", "C"], + np.array([7, 8, 9], dtype=np.int64), + DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]), + ), + ( + ["B", "C", "D"], + [[7, 8, 9], [10, 11, 12], [13, 14, 15]], + DataFrame( + [[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]], + columns=["A", "B", "C", "D"], + ), + ), + ( + ["C", "A", "D"], + np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64), + DataFrame( + [[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]], + columns=["A", "B", "C", "D"], + ), + ), + ( + ["A", "C"], + DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]), + DataFrame( + [[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"] + ), + ), + ], + ) + def test_setitem_list_missing_columns(self, columns, box, expected): + # GH#29334 + df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"]) + df[columns] = box + tm.assert_frame_equal(df, expected) + + def test_setitem_list_of_tuples(self, float_frame): + tuples = list(zip(float_frame["A"], float_frame["B"])) + float_frame["tuples"] = tuples + + result = float_frame["tuples"] + expected = Series(tuples, index=float_frame.index, name="tuples") + tm.assert_series_equal(result, expected) + + def test_setitem_iloc_generator(self): + # GH#39614 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + df.iloc[indexer] = 1 + expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]}) + tm.assert_frame_equal(df, expected) + + def test_setitem_iloc_two_dimensional_generator(self): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + df.iloc[indexer, 1] = 1 + expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]}) + tm.assert_frame_equal(df, expected) + + def test_setitem_dtypes_bytes_type_to_object(self): + # GH 20734 + index = Series(name="id", dtype="S24") + df = DataFrame(index=index) + df["a"] = Series(name="a", index=index, dtype=np.uint32) + df["b"] = Series(name="b", index=index, dtype="S64") + df["c"] = Series(name="c", index=index, dtype="S64") + df["d"] = Series(name="d", index=index, dtype=np.uint8) + result = df.dtypes + expected = Series([np.uint32, object, object, np.uint8], index=list("abcd")) + tm.assert_series_equal(result, expected) + + def test_boolean_mask_nullable_int64(self): + # GH 28928 + result = DataFrame({"a": [3, 4], "b": [5, 6]}).astype( + {"a": "int64", "b": "Int64"} + ) + mask = Series(False, index=result.index) + result.loc[mask, "a"] = result["a"] + result.loc[mask, "b"] = result["b"] + expected = DataFrame({"a": [3, 4], "b": [5, 6]}).astype( + {"a": "int64", "b": "Int64"} + ) + tm.assert_frame_equal(result, expected) + + def test_setitem_ea_dtype_rhs_series(self): + # GH#47425 + df = DataFrame({"a": [1, 2]}) + df["a"] = Series([1, 2], dtype="Int64") + expected = DataFrame({"a": [1, 2]}, dtype="Int64") + tm.assert_frame_equal(df, expected) + + # TODO(ArrayManager) set column with 2d column array, see #44788 + @td.skip_array_manager_not_yet_implemented + def test_setitem_npmatrix_2d(self): + # GH#42376 + # for use-case df["x"] = sparse.random((10, 10)).mean(axis=1) + expected = DataFrame( + {"np-array": np.ones(10), "np-matrix": np.ones(10)}, index=np.arange(10) + ) + + a = np.ones((10, 1)) + df = DataFrame(index=np.arange(10)) + df["np-array"] = a + + # Instantiation of `np.matrix` gives PendingDeprecationWarning + with tm.assert_produces_warning(PendingDeprecationWarning): + df["np-matrix"] = np.matrix(a) + + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("vals", [{}, {"d": "a"}]) + def test_setitem_aligning_dict_with_index(self, vals): + # GH#47216 + df = DataFrame({"a": [1, 2], "b": [3, 4], **vals}) + df.loc[:, "a"] = {1: 100, 0: 200} + df.loc[:, "c"] = {0: 5, 1: 6} + df.loc[:, "e"] = {1: 5} + expected = DataFrame( + {"a": [200, 100], "b": [3, 4], **vals, "c": [5, 6], "e": [np.nan, 5]} + ) + tm.assert_frame_equal(df, expected) + + def test_setitem_rhs_dataframe(self): + # GH#47578 + df = DataFrame({"a": [1, 2]}) + df["a"] = DataFrame({"a": [10, 11]}, index=[1, 2]) + expected = DataFrame({"a": [np.nan, 10]}) + tm.assert_frame_equal(df, expected) + + df = DataFrame({"a": [1, 2]}) + df.isetitem(0, DataFrame({"a": [10, 11]}, index=[1, 2])) + tm.assert_frame_equal(df, expected) + + def test_setitem_frame_overwrite_with_ea_dtype(self, any_numeric_ea_dtype): + # GH#46896 + df = DataFrame(columns=["a", "b"], data=[[1, 2], [3, 4]]) + df["a"] = DataFrame({"a": [10, 11]}, dtype=any_numeric_ea_dtype) + expected = DataFrame( + { + "a": Series([10, 11], dtype=any_numeric_ea_dtype), + "b": [2, 4], + } + ) + tm.assert_frame_equal(df, expected) + + def test_setitem_string_option_object_index(self): + # GH#55638 + pytest.importorskip("pyarrow") + df = DataFrame({"a": [1, 2]}) + with pd.option_context("future.infer_string", True): + df["b"] = Index(["a", "b"], dtype=object) + expected = DataFrame({"a": [1, 2], "b": Series(["a", "b"], dtype=object)}) + tm.assert_frame_equal(df, expected) + + def test_setitem_frame_midx_columns(self): + # GH#49121 + df = DataFrame({("a", "b"): [10]}) + expected = df.copy() + col_name = ("a", "b") + df[col_name] = df[[col_name]] + tm.assert_frame_equal(df, expected) + + +class TestSetitemTZAwareValues: + @pytest.fixture + def idx(self): + naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B") + idx = naive.tz_localize("US/Pacific") + return idx + + @pytest.fixture + def expected(self, idx): + expected = Series(np.array(idx.tolist(), dtype="object"), name="B") + assert expected.dtype == idx.dtype + return expected + + def test_setitem_dt64series(self, idx, expected): + # convert to utc + df = DataFrame(np.random.default_rng(2).standard_normal((2, 1)), columns=["A"]) + df["B"] = idx + df["B"] = idx.to_series(index=[0, 1]).dt.tz_convert(None) + + result = df["B"] + comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B") + tm.assert_series_equal(result, comp) + + def test_setitem_datetimeindex(self, idx, expected): + # setting a DataFrame column with a tzaware DTI retains the dtype + df = DataFrame(np.random.default_rng(2).standard_normal((2, 1)), columns=["A"]) + + # assign to frame + df["B"] = idx + result = df["B"] + tm.assert_series_equal(result, expected) + + def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected): + # setting a DataFrame column with a tzaware DTI retains the dtype + df = DataFrame(np.random.default_rng(2).standard_normal((2, 1)), columns=["A"]) + + # object array of datetimes with a tz + df["B"] = idx.to_pydatetime() + result = df["B"] + tm.assert_series_equal(result, expected) + + +class TestDataFrameSetItemWithExpansion: + def test_setitem_listlike_views(self, using_copy_on_write): + # GH#38148 + df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]}) + + # get one column as a view of df + ser = df["a"] + + # add columns with list-like indexer + df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]]) + + # edit in place the first column to check view semantics + df.iloc[0, 0] = 100 + + if using_copy_on_write: + expected = Series([1, 2, 3], name="a") + else: + expected = Series([100, 2, 3], name="a") + tm.assert_series_equal(ser, expected) + + def test_setitem_string_column_numpy_dtype_raising(self): + # GH#39010 + df = DataFrame([[1, 2], [3, 4]]) + df["0 - Name"] = [5, 6] + expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"]) + tm.assert_frame_equal(df, expected) + + def test_setitem_empty_df_duplicate_columns(self, using_copy_on_write): + # GH#38521 + df = DataFrame(columns=["a", "b", "b"], dtype="float64") + df.loc[:, "a"] = list(range(2)) + expected = DataFrame( + [[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"] + ) + tm.assert_frame_equal(df, expected) + + def test_setitem_with_expansion_categorical_dtype(self): + # assignment + df = DataFrame( + { + "value": np.array( + np.random.default_rng(2).integers(0, 10000, 100), dtype="int32" + ) + } + ) + labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)]) + + df = df.sort_values(by=["value"], ascending=True) + ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels) + cat = ser.values + + # setting with a Categorical + df["D"] = cat + str(df) + + result = df.dtypes + expected = Series( + [np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)], + index=["value", "D"], + ) + tm.assert_series_equal(result, expected) + + # setting with a Series + df["E"] = ser + str(df) + + result = df.dtypes + expected = Series( + [ + np.dtype("int32"), + CategoricalDtype(categories=labels, ordered=False), + CategoricalDtype(categories=labels, ordered=False), + ], + index=["value", "D", "E"], + ) + tm.assert_series_equal(result, expected) + + result1 = df["D"] + result2 = df["E"] + tm.assert_categorical_equal(result1._mgr.array, cat) + + # sorting + ser.name = "E" + tm.assert_series_equal(result2.sort_index(), ser.sort_index()) + + def test_setitem_scalars_no_index(self): + # GH#16823 / GH#17894 + df = DataFrame() + df["foo"] = 1 + expected = DataFrame(columns=["foo"]).astype(np.int64) + tm.assert_frame_equal(df, expected) + + def test_setitem_newcol_tuple_key(self, float_frame): + assert ( + "A", + "B", + ) not in float_frame.columns + float_frame["A", "B"] = float_frame["A"] + assert ("A", "B") in float_frame.columns + + result = float_frame["A", "B"] + expected = float_frame["A"] + tm.assert_series_equal(result, expected, check_names=False) + + def test_frame_setitem_newcol_timestamp(self): + # GH#2155 + columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay()) + data = DataFrame(columns=columns, index=range(10)) + t = datetime(2012, 11, 1) + ts = Timestamp(t) + data[ts] = np.nan # works, mostly a smoke-test + assert np.isnan(data[ts]).all() + + def test_frame_setitem_rangeindex_into_new_col(self): + # GH#47128 + df = DataFrame({"a": ["a", "b"]}) + df["b"] = df.index + df.loc[[False, True], "b"] = 100 + result = df.loc[[1], :] + expected = DataFrame({"a": ["b"], "b": [100]}, index=[1]) + tm.assert_frame_equal(result, expected) + + def test_setitem_frame_keep_ea_dtype(self, any_numeric_ea_dtype): + # GH#46896 + df = DataFrame(columns=["a", "b"], data=[[1, 2], [3, 4]]) + df["c"] = DataFrame({"a": [10, 11]}, dtype=any_numeric_ea_dtype) + expected = DataFrame( + { + "a": [1, 3], + "b": [2, 4], + "c": Series([10, 11], dtype=any_numeric_ea_dtype), + } + ) + tm.assert_frame_equal(df, expected) + + def test_loc_expansion_with_timedelta_type(self): + result = DataFrame(columns=list("abc")) + result.loc[0] = { + "a": pd.to_timedelta(5, unit="s"), + "b": pd.to_timedelta(72, unit="s"), + "c": "23", + } + expected = DataFrame( + [[pd.Timedelta("0 days 00:00:05"), pd.Timedelta("0 days 00:01:12"), "23"]], + index=Index([0]), + columns=(["a", "b", "c"]), + ) + tm.assert_frame_equal(result, expected) + + +class TestDataFrameSetItemSlicing: + def test_setitem_slice_position(self): + # GH#31469 + df = DataFrame(np.zeros((100, 1))) + df[-4:] = 1 + arr = np.zeros((100, 1)) + arr[-4:] = 1 + expected = DataFrame(arr) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc]) + @pytest.mark.parametrize("box", [Series, np.array, list, pd.array]) + @pytest.mark.parametrize("n", [1, 2, 3]) + def test_setitem_slice_indexer_broadcasting_rhs(self, n, box, indexer): + # GH#40440 + df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]) + indexer(df)[1:] = box([10, 11, 12]) + expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"]) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("box", [Series, np.array, list, pd.array]) + @pytest.mark.parametrize("n", [1, 2, 3]) + def test_setitem_list_indexer_broadcasting_rhs(self, n, box): + # GH#40440 + df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]) + df.iloc[list(range(1, n + 1))] = box([10, 11, 12]) + expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"]) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc]) + @pytest.mark.parametrize("box", [Series, np.array, list, pd.array]) + @pytest.mark.parametrize("n", [1, 2, 3]) + def test_setitem_slice_broadcasting_rhs_mixed_dtypes(self, n, box, indexer): + # GH#40440 + df = DataFrame( + [[1, 3, 5], ["x", "y", "z"]] + [[2, 4, 6]] * n, columns=["a", "b", "c"] + ) + indexer(df)[1:] = box([10, 11, 12]) + expected = DataFrame( + [[1, 3, 5]] + [[10, 11, 12]] * (n + 1), + columns=["a", "b", "c"], + dtype="object", + ) + tm.assert_frame_equal(df, expected) + + +class TestDataFrameSetItemCallable: + def test_setitem_callable(self): + # GH#12533 + df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}) + df[lambda x: "A"] = [11, 12, 13, 14] + + exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]}) + tm.assert_frame_equal(df, exp) + + def test_setitem_other_callable(self): + # GH#13299 + def inc(x): + return x + 1 + + # Set dtype object straight away to avoid upcast when setting inc below + df = DataFrame([[-1, 1], [1, -1]], dtype=object) + df[df > 0] = inc + + expected = DataFrame([[-1, inc], [inc, -1]]) + tm.assert_frame_equal(df, expected) + + +class TestDataFrameSetItemBooleanMask: + @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values + @pytest.mark.parametrize( + "mask_type", + [lambda df: df > np.abs(df) / 2, lambda df: (df > np.abs(df) / 2).values], + ids=["dataframe", "array"], + ) + def test_setitem_boolean_mask(self, mask_type, float_frame): + # Test for issue #18582 + df = float_frame.copy() + mask = mask_type(df) + + # index with boolean mask + result = df.copy() + result[mask] = np.nan + + expected = df.values.copy() + expected[np.array(mask)] = np.nan + expected = DataFrame(expected, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(reason="Currently empty indexers are treated as all False") + @pytest.mark.parametrize("box", [list, np.array, Series]) + def test_setitem_loc_empty_indexer_raises_with_non_empty_value(self, box): + # GH#37672 + df = DataFrame({"a": ["a"], "b": [1], "c": [1]}) + if box == Series: + indexer = box([], dtype="object") + else: + indexer = box([]) + msg = "Must have equal len keys and value when setting with an iterable" + with pytest.raises(ValueError, match=msg): + df.loc[indexer, ["b"]] = [1] + + @pytest.mark.parametrize("box", [list, np.array, Series]) + def test_setitem_loc_only_false_indexer_dtype_changed(self, box): + # GH#37550 + # Dtype is only changed when value to set is a Series and indexer is + # empty/bool all False + df = DataFrame({"a": ["a"], "b": [1], "c": [1]}) + indexer = box([False]) + df.loc[indexer, ["b"]] = 10 - df["c"] + expected = DataFrame({"a": ["a"], "b": [1], "c": [1]}) + tm.assert_frame_equal(df, expected) + + df.loc[indexer, ["b"]] = 9 + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("indexer", [tm.setitem, tm.loc]) + def test_setitem_boolean_mask_aligning(self, indexer): + # GH#39931 + df = DataFrame({"a": [1, 4, 2, 3], "b": [5, 6, 7, 8]}) + expected = df.copy() + mask = df["a"] >= 3 + indexer(df)[mask] = indexer(df)[mask].sort_values("a") + tm.assert_frame_equal(df, expected) + + def test_setitem_mask_categorical(self): + # assign multiple rows (mixed values) (-> array) -> exp_multi_row + # changed multiple rows + cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"]) + idx2 = Index(["h", "i", "j", "k", "l", "m", "n"]) + values2 = [1, 1, 2, 2, 1, 1, 1] + exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2) + + catsf = Categorical( + ["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"] + ) + idxf = Index(["h", "i", "j", "k", "l", "m", "n"]) + valuesf = [1, 1, 3, 3, 1, 1, 1] + df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf) + + exp_fancy = exp_multi_row.copy() + exp_fancy["cats"] = exp_fancy["cats"].cat.set_categories(["a", "b", "c"]) + + mask = df["cats"] == "c" + df[mask] = ["b", 2] + # category c is kept in .categories + tm.assert_frame_equal(df, exp_fancy) + + @pytest.mark.parametrize("dtype", ["float", "int64"]) + @pytest.mark.parametrize("kwargs", [{}, {"index": [1]}, {"columns": ["A"]}]) + def test_setitem_empty_frame_with_boolean(self, dtype, kwargs): + # see GH#10126 + kwargs["dtype"] = dtype + df = DataFrame(**kwargs) + + df2 = df.copy() + df[df > df2] = 47 + tm.assert_frame_equal(df, df2) + + def test_setitem_boolean_indexing(self): + idx = list(range(3)) + cols = ["A", "B", "C"] + df1 = DataFrame( + index=idx, + columns=cols, + data=np.array( + [[0.0, 0.5, 1.0], [1.5, 2.0, 2.5], [3.0, 3.5, 4.0]], dtype=float + ), + ) + df2 = DataFrame(index=idx, columns=cols, data=np.ones((len(idx), len(cols)))) + + expected = DataFrame( + index=idx, + columns=cols, + data=np.array([[0.0, 0.5, 1.0], [1.5, 2.0, -1], [-1, -1, -1]], dtype=float), + ) + + df1[df1 > 2.0 * df2] = -1 + tm.assert_frame_equal(df1, expected) + with pytest.raises(ValueError, match="Item wrong length"): + df1[df1.index[:-1] > 2] = -1 + + def test_loc_setitem_all_false_boolean_two_blocks(self): + # GH#40885 + df = DataFrame({"a": [1, 2], "b": [3, 4], "c": "a"}) + expected = df.copy() + indexer = Series([False, False], name="c") + df.loc[indexer, ["b"]] = DataFrame({"b": [5, 6]}, index=[0, 1]) + tm.assert_frame_equal(df, expected) + + def test_setitem_ea_boolean_mask(self): + # GH#47125 + df = DataFrame([[-1, 2], [3, -4]]) + expected = DataFrame([[0, 2], [3, 0]]) + boolean_indexer = DataFrame( + { + 0: Series([True, False], dtype="boolean"), + 1: Series([pd.NA, True], dtype="boolean"), + } + ) + df[boolean_indexer] = 0 + tm.assert_frame_equal(df, expected) + + +class TestDataFrameSetitemCopyViewSemantics: + def test_setitem_always_copy(self, float_frame): + assert "E" not in float_frame.columns + s = float_frame["A"].copy() + float_frame["E"] = s + + float_frame.iloc[5:10, float_frame.columns.get_loc("E")] = np.nan + assert notna(s[5:10]).all() + + @pytest.mark.parametrize("consolidate", [True, False]) + def test_setitem_partial_column_inplace( + self, consolidate, using_array_manager, using_copy_on_write + ): + # This setting should be in-place, regardless of whether frame is + # single-block or multi-block + # GH#304 this used to be incorrectly not-inplace, in which case + # we needed to ensure _item_cache was cleared. + + df = DataFrame( + {"x": [1.1, 2.1, 3.1, 4.1], "y": [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3] + ) + df.insert(2, "z", np.nan) + if not using_array_manager: + if consolidate: + df._consolidate_inplace() + assert len(df._mgr.blocks) == 1 + else: + assert len(df._mgr.blocks) == 2 + + zvals = df["z"]._values + + df.loc[2:, "z"] = 42 + + expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z") + tm.assert_series_equal(df["z"], expected) + + # check setting occurred in-place + if not using_copy_on_write: + tm.assert_numpy_array_equal(zvals, expected.values) + assert np.shares_memory(zvals, df["z"]._values) + + def test_setitem_duplicate_columns_not_inplace(self): + # GH#39510 + cols = ["A", "B"] * 2 + df = DataFrame(0.0, index=[0], columns=cols) + df_copy = df.copy() + df_view = df[:] + df["B"] = (2, 5) + + expected = DataFrame([[0.0, 2, 0.0, 5]], columns=cols) + tm.assert_frame_equal(df_view, df_copy) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "value", [1, np.array([[1], [1]], dtype="int64"), [[1], [1]]] + ) + def test_setitem_same_dtype_not_inplace(self, value, using_array_manager): + # GH#39510 + cols = ["A", "B"] + df = DataFrame(0, index=[0, 1], columns=cols) + df_copy = df.copy() + df_view = df[:] + df[["B"]] = value + + expected = DataFrame([[0, 1], [0, 1]], columns=cols) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df_view, df_copy) + + @pytest.mark.parametrize("value", [1.0, np.array([[1.0], [1.0]]), [[1.0], [1.0]]]) + def test_setitem_listlike_key_scalar_value_not_inplace(self, value): + # GH#39510 + cols = ["A", "B"] + df = DataFrame(0, index=[0, 1], columns=cols) + df_copy = df.copy() + df_view = df[:] + df[["B"]] = value + + expected = DataFrame([[0, 1.0], [0, 1.0]], columns=cols) + tm.assert_frame_equal(df_view, df_copy) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "indexer", + [ + "a", + ["a"], + pytest.param( + [True, False], + marks=pytest.mark.xfail( + reason="Boolean indexer incorrectly setting inplace", + strict=False, # passing on some builds, no obvious pattern + ), + ), + ], + ) + @pytest.mark.parametrize( + "value, set_value", + [ + (1, 5), + (1.0, 5.0), + (Timestamp("2020-12-31"), Timestamp("2021-12-31")), + ("a", "b"), + ], + ) + def test_setitem_not_operating_inplace(self, value, set_value, indexer): + # GH#43406 + df = DataFrame({"a": value}, index=[0, 1]) + expected = df.copy() + view = df[:] + df[indexer] = set_value + tm.assert_frame_equal(view, expected) + + @td.skip_array_manager_invalid_test + def test_setitem_column_update_inplace(self, using_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/47172 + + labels = [f"c{i}" for i in range(10)] + df = DataFrame({col: np.zeros(len(labels)) for col in labels}, index=labels) + values = df._mgr.blocks[0].values + + if not using_copy_on_write: + for label in df.columns: + df[label][label] = 1 + + # diagonal values all updated + assert np.all(values[np.arange(10), np.arange(10)] == 1) + else: + with tm.raises_chained_assignment_error(): + for label in df.columns: + df[label][label] = 1 + # original dataframe not updated + assert np.all(values[np.arange(10), np.arange(10)] == 0) + + def test_setitem_column_frame_as_category(self): + # GH31581 + df = DataFrame([1, 2, 3]) + df["col1"] = DataFrame([1, 2, 3], dtype="category") + df["col2"] = Series([1, 2, 3], dtype="category") + + expected_types = Series( + ["int64", "category", "category"], index=[0, "col1", "col2"] + ) + tm.assert_series_equal(df.dtypes, expected_types) + + @pytest.mark.parametrize("dtype", ["int64", "Int64"]) + def test_setitem_iloc_with_numpy_array(self, dtype): + # GH-33828 + df = DataFrame({"a": np.ones(3)}, dtype=dtype) + df.iloc[np.array([0]), np.array([0])] = np.array([[2]]) + + expected = DataFrame({"a": [2, 1, 1]}, dtype=dtype) + tm.assert_frame_equal(df, expected) + + def test_setitem_frame_dup_cols_dtype(self): + # GH#53143 + df = DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=["a", "b", "a", "c"]) + rhs = DataFrame([[0, 1.5], [2, 2.5]], columns=["a", "a"]) + df["a"] = rhs + expected = DataFrame( + [[0, 2, 1.5, 4], [2, 5, 2.5, 7]], columns=["a", "b", "a", "c"] + ) + tm.assert_frame_equal(df, expected) + + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"]) + rhs = DataFrame([[0, 1.5], [2, 2.5]], columns=["a", "a"]) + df["a"] = rhs + expected = DataFrame([[0, 1.5, 3], [2, 2.5, 6]], columns=["a", "a", "b"]) + tm.assert_frame_equal(df, expected) + + def test_frame_setitem_empty_dataframe(self): + # GH#28871 + df = DataFrame({"date": [datetime(2000, 1, 1)]}).set_index("date") + df = df[0:0].copy() + + df["3010"] = None + df["2010"] = None + + expected = DataFrame( + [], + columns=["3010", "2010"], + index=Index([], dtype="datetime64[ns]", name="date"), + ) + tm.assert_frame_equal(df, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_take.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_take.py new file mode 100644 index 00000000..8c172314 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_take.py @@ -0,0 +1,92 @@ +import pytest + +import pandas._testing as tm + + +class TestDataFrameTake: + def test_take_slices_deprecated(self, float_frame): + # GH#51539 + df = float_frame + + slc = slice(0, 4, 1) + with tm.assert_produces_warning(FutureWarning): + df.take(slc, axis=0) + with tm.assert_produces_warning(FutureWarning): + df.take(slc, axis=1) + + def test_take(self, float_frame): + # homogeneous + order = [3, 1, 2, 0] + for df in [float_frame]: + result = df.take(order, axis=0) + expected = df.reindex(df.index.take(order)) + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.take(order, axis=1) + expected = df.loc[:, ["D", "B", "C", "A"]] + tm.assert_frame_equal(result, expected, check_names=False) + + # negative indices + order = [2, 1, -1] + for df in [float_frame]: + result = df.take(order, axis=0) + expected = df.reindex(df.index.take(order)) + tm.assert_frame_equal(result, expected) + + result = df.take(order, axis=0) + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.take(order, axis=1) + expected = df.loc[:, ["C", "B", "D"]] + tm.assert_frame_equal(result, expected, check_names=False) + + # illegal indices + msg = "indices are out-of-bounds" + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, 30], axis=0) + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, -31], axis=0) + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, 5], axis=1) + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, -5], axis=1) + + def test_take_mixed_type(self, float_string_frame): + # mixed-dtype + order = [4, 1, 2, 0, 3] + for df in [float_string_frame]: + result = df.take(order, axis=0) + expected = df.reindex(df.index.take(order)) + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.take(order, axis=1) + expected = df.loc[:, ["foo", "B", "C", "A", "D"]] + tm.assert_frame_equal(result, expected) + + # negative indices + order = [4, 1, -2] + for df in [float_string_frame]: + result = df.take(order, axis=0) + expected = df.reindex(df.index.take(order)) + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.take(order, axis=1) + expected = df.loc[:, ["foo", "B", "D"]] + tm.assert_frame_equal(result, expected) + + def test_take_mixed_numeric(self, mixed_float_frame, mixed_int_frame): + # by dtype + order = [1, 2, 0, 3] + for df in [mixed_float_frame, mixed_int_frame]: + result = df.take(order, axis=0) + expected = df.reindex(df.index.take(order)) + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = df.take(order, axis=1) + expected = df.loc[:, ["B", "C", "A", "D"]] + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_where.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_where.py new file mode 100644 index 00000000..3d3df2d7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_where.py @@ -0,0 +1,1077 @@ +from datetime import datetime + +from hypothesis import given +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_scalar + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Series, + StringDtype, + Timestamp, + date_range, + isna, +) +import pandas._testing as tm +from pandas._testing._hypothesis import OPTIONAL_ONE_OF_ALL + + +@pytest.fixture(params=["default", "float_string", "mixed_float", "mixed_int"]) +def where_frame(request, float_string_frame, mixed_float_frame, mixed_int_frame): + if request.param == "default": + return DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["A", "B", "C"] + ) + if request.param == "float_string": + return float_string_frame + if request.param == "mixed_float": + return mixed_float_frame + if request.param == "mixed_int": + return mixed_int_frame + + +def _safe_add(df): + # only add to the numeric items + def is_ok(s): + return ( + issubclass(s.dtype.type, (np.integer, np.floating)) and s.dtype != "uint8" + ) + + return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s) for c, s in df.items())) + + +class TestDataFrameIndexingWhere: + def test_where_get(self, where_frame, float_string_frame): + def _check_get(df, cond, check_dtypes=True): + other1 = _safe_add(df) + rs = df.where(cond, other1) + rs2 = df.where(cond.values, other1) + for k, v in rs.items(): + exp = Series(np.where(cond[k], df[k], other1[k]), index=v.index) + tm.assert_series_equal(v, exp, check_names=False) + tm.assert_frame_equal(rs, rs2) + + # dtypes + if check_dtypes: + assert (rs.dtypes == df.dtypes).all() + + # check getting + df = where_frame + if df is float_string_frame: + msg = "'>' not supported between instances of 'str' and 'int'" + with pytest.raises(TypeError, match=msg): + df > 0 + return + cond = df > 0 + _check_get(df, cond) + + def test_where_upcasting(self): + # upcasting case (GH # 2794) + df = DataFrame( + { + c: Series([1] * 3, dtype=c) + for c in ["float32", "float64", "int32", "int64"] + } + ) + df.iloc[1, :] = 0 + result = df.dtypes + expected = Series( + [ + np.dtype("float32"), + np.dtype("float64"), + np.dtype("int32"), + np.dtype("int64"), + ], + index=["float32", "float64", "int32", "int64"], + ) + + # when we don't preserve boolean casts + # + # expected = Series({ 'float32' : 1, 'float64' : 3 }) + + tm.assert_series_equal(result, expected) + + def test_where_alignment(self, where_frame, float_string_frame): + # aligning + def _check_align(df, cond, other, check_dtypes=True): + rs = df.where(cond, other) + for i, k in enumerate(rs.columns): + result = rs[k] + d = df[k].values + c = cond[k].reindex(df[k].index).fillna(False).values + + if is_scalar(other): + o = other + elif isinstance(other, np.ndarray): + o = Series(other[:, i], index=result.index).values + else: + o = other[k].values + + new_values = d if c.all() else np.where(c, d, o) + expected = Series(new_values, index=result.index, name=k) + + # since we can't always have the correct numpy dtype + # as numpy doesn't know how to downcast, don't check + tm.assert_series_equal(result, expected, check_dtype=False) + + # dtypes + # can't check dtype when other is an ndarray + + if check_dtypes and not isinstance(other, np.ndarray): + assert (rs.dtypes == df.dtypes).all() + + df = where_frame + if df is float_string_frame: + msg = "'>' not supported between instances of 'str' and 'int'" + with pytest.raises(TypeError, match=msg): + df > 0 + return + + # other is a frame + cond = (df > 0)[1:] + _check_align(df, cond, _safe_add(df)) + + # check other is ndarray + cond = df > 0 + _check_align(df, cond, (_safe_add(df).values)) + + # integers are upcast, so don't check the dtypes + cond = df > 0 + check_dtypes = all(not issubclass(s.type, np.integer) for s in df.dtypes) + _check_align(df, cond, np.nan, check_dtypes=check_dtypes) + + # Ignore deprecation warning in Python 3.12 for inverting a bool + @pytest.mark.filterwarnings("ignore::DeprecationWarning") + def test_where_invalid(self): + # invalid conditions + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["A", "B", "C"] + ) + cond = df > 0 + + err1 = (df + 1).values[0:2, :] + msg = "other must be the same shape as self when an ndarray" + with pytest.raises(ValueError, match=msg): + df.where(cond, err1) + + err2 = cond.iloc[:2, :].values + other1 = _safe_add(df) + msg = "Array conditional must be same shape as self" + with pytest.raises(ValueError, match=msg): + df.where(err2, other1) + + with pytest.raises(ValueError, match=msg): + df.mask(True) + with pytest.raises(ValueError, match=msg): + df.mask(0) + + def test_where_set(self, where_frame, float_string_frame, mixed_int_frame): + # where inplace + + def _check_set(df, cond, check_dtypes=True): + dfi = df.copy() + econd = cond.reindex_like(df).fillna(True) + expected = dfi.mask(~econd) + + return_value = dfi.where(cond, np.nan, inplace=True) + assert return_value is None + tm.assert_frame_equal(dfi, expected) + + # dtypes (and confirm upcasts)x + if check_dtypes: + for k, v in df.dtypes.items(): + if issubclass(v.type, np.integer) and not cond[k].all(): + v = np.dtype("float64") + assert dfi[k].dtype == v + + df = where_frame + if df is float_string_frame: + msg = "'>' not supported between instances of 'str' and 'int'" + with pytest.raises(TypeError, match=msg): + df > 0 + return + if df is mixed_int_frame: + df = df.astype("float64") + + cond = df > 0 + _check_set(df, cond) + + cond = df >= 0 + _check_set(df, cond) + + # aligning + cond = (df >= 0)[1:] + _check_set(df, cond) + + def test_where_series_slicing(self): + # GH 10218 + # test DataFrame.where with Series slicing + df = DataFrame({"a": range(3), "b": range(4, 7)}) + result = df.where(df["a"] == 1) + expected = df[df["a"] == 1].reindex(df.index) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("klass", [list, tuple, np.array]) + def test_where_array_like(self, klass): + # see gh-15414 + df = DataFrame({"a": [1, 2, 3]}) + cond = [[False], [True], [True]] + expected = DataFrame({"a": [np.nan, 2, 3]}) + + result = df.where(klass(cond)) + tm.assert_frame_equal(result, expected) + + df["b"] = 2 + expected["b"] = [2, np.nan, 2] + cond = [[False, True], [True, False], [True, True]] + + result = df.where(klass(cond)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "cond", + [ + [[1], [0], [1]], + Series([[2], [5], [7]]), + DataFrame({"a": [2, 5, 7]}), + [["True"], ["False"], ["True"]], + [[Timestamp("2017-01-01")], [pd.NaT], [Timestamp("2017-01-02")]], + ], + ) + def test_where_invalid_input_single(self, cond): + # see gh-15414: only boolean arrays accepted + df = DataFrame({"a": [1, 2, 3]}) + msg = "Boolean array expected for the condition" + + with pytest.raises(ValueError, match=msg): + df.where(cond) + + @pytest.mark.parametrize( + "cond", + [ + [[0, 1], [1, 0], [1, 1]], + Series([[0, 2], [5, 0], [4, 7]]), + [["False", "True"], ["True", "False"], ["True", "True"]], + DataFrame({"a": [2, 5, 7], "b": [4, 8, 9]}), + [ + [pd.NaT, Timestamp("2017-01-01")], + [Timestamp("2017-01-02"), pd.NaT], + [Timestamp("2017-01-03"), Timestamp("2017-01-03")], + ], + ], + ) + def test_where_invalid_input_multiple(self, cond): + # see gh-15414: only boolean arrays accepted + df = DataFrame({"a": [1, 2, 3], "b": [2, 2, 2]}) + msg = "Boolean array expected for the condition" + + with pytest.raises(ValueError, match=msg): + df.where(cond) + + def test_where_dataframe_col_match(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + cond = DataFrame([[True, False, True], [False, False, True]]) + + result = df.where(cond) + expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]]) + tm.assert_frame_equal(result, expected) + + # this *does* align, though has no matching columns + cond.columns = ["a", "b", "c"] + result = df.where(cond) + expected = DataFrame(np.nan, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + def test_where_ndframe_align(self): + msg = "Array conditional must be same shape as self" + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + + cond = [True] + with pytest.raises(ValueError, match=msg): + df.where(cond) + + expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]]) + + out = df.where(Series(cond)) + tm.assert_frame_equal(out, expected) + + cond = np.array([False, True, False, True]) + with pytest.raises(ValueError, match=msg): + df.where(cond) + + expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]]) + + out = df.where(Series(cond)) + tm.assert_frame_equal(out, expected) + + def test_where_bug(self): + # see gh-2793 + df = DataFrame( + {"a": [1.0, 2.0, 3.0, 4.0], "b": [4.0, 3.0, 2.0, 1.0]}, dtype="float64" + ) + expected = DataFrame( + {"a": [np.nan, np.nan, 3.0, 4.0], "b": [4.0, 3.0, np.nan, np.nan]}, + dtype="float64", + ) + result = df.where(df > 2, np.nan) + tm.assert_frame_equal(result, expected) + + result = df.copy() + return_value = result.where(result > 2, np.nan, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_where_bug_mixed(self, any_signed_int_numpy_dtype): + # see gh-2793 + df = DataFrame( + { + "a": np.array([1, 2, 3, 4], dtype=any_signed_int_numpy_dtype), + "b": np.array([4.0, 3.0, 2.0, 1.0], dtype="float64"), + } + ) + + expected = DataFrame( + {"a": [-1, -1, 3, 4], "b": [4.0, 3.0, -1, -1]}, + ).astype({"a": any_signed_int_numpy_dtype, "b": "float64"}) + + result = df.where(df > 2, -1) + tm.assert_frame_equal(result, expected) + + result = df.copy() + return_value = result.where(result > 2, -1, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_where_bug_transposition(self): + # see gh-7506 + a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]}) + b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]}) + do_not_replace = b.isna() | (a > b) + + expected = a.copy() + expected[~do_not_replace] = b + + result = a.where(do_not_replace, b) + tm.assert_frame_equal(result, expected) + + a = DataFrame({0: [4, 6], 1: [1, 0]}) + b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]}) + do_not_replace = b.isna() | (a > b) + + expected = a.copy() + expected[~do_not_replace] = b + + result = a.where(do_not_replace, b) + tm.assert_frame_equal(result, expected) + + def test_where_datetime(self): + # GH 3311 + df = DataFrame( + { + "A": date_range("20130102", periods=5), + "B": date_range("20130104", periods=5), + "C": np.random.default_rng(2).standard_normal(5), + } + ) + + stamp = datetime(2013, 1, 3) + msg = "'>' not supported between instances of 'float' and 'datetime.datetime'" + with pytest.raises(TypeError, match=msg): + df > stamp + + result = df[df.iloc[:, :-1] > stamp] + + expected = df.copy() + expected.loc[[0, 1], "A"] = np.nan + + expected.loc[:, "C"] = np.nan + tm.assert_frame_equal(result, expected) + + def test_where_none(self): + # GH 4667 + # setting with None changes dtype + df = DataFrame({"series": Series(range(10))}).astype(float) + df[df > 7] = None + expected = DataFrame( + {"series": Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])} + ) + tm.assert_frame_equal(df, expected) + + # GH 7656 + df = DataFrame( + [ + {"A": 1, "B": np.nan, "C": "Test"}, + {"A": np.nan, "B": "Test", "C": np.nan}, + ] + ) + + orig = df.copy() + + mask = ~isna(df) + df.where(mask, None, inplace=True) + expected = DataFrame( + { + "A": [1.0, np.nan], + "B": [None, "Test"], + "C": ["Test", None], + } + ) + tm.assert_frame_equal(df, expected) + + df = orig.copy() + df[~mask] = None + tm.assert_frame_equal(df, expected) + + def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self): + # see gh-21947 + df = DataFrame(columns=["a"]) + cond = df + assert (cond.dtypes == object).all() + + result = df.where(cond) + tm.assert_frame_equal(result, df) + + def test_where_align(self): + def create(): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 3))) + df.iloc[3:5, 0] = np.nan + df.iloc[4:6, 1] = np.nan + df.iloc[5:8, 2] = np.nan + return df + + # series + df = create() + expected = df.fillna(df.mean()) + result = df.where(pd.notna(df), df.mean(), axis="columns") + tm.assert_frame_equal(result, expected) + + return_value = df.where(pd.notna(df), df.mean(), inplace=True, axis="columns") + assert return_value is None + tm.assert_frame_equal(df, expected) + + df = create().fillna(0) + expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0]) + result = df.where(df > 0, df[0], axis="index") + tm.assert_frame_equal(result, expected) + result = df.where(df > 0, df[0], axis="rows") + tm.assert_frame_equal(result, expected) + + # frame + df = create() + expected = df.fillna(1) + result = df.where( + pd.notna(df), DataFrame(1, index=df.index, columns=df.columns) + ) + tm.assert_frame_equal(result, expected) + + def test_where_complex(self): + # GH 6345 + expected = DataFrame([[1 + 1j, 2], [np.nan, 4 + 1j]], columns=["a", "b"]) + df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=["a", "b"]) + df[df.abs() >= 5] = np.nan + tm.assert_frame_equal(df, expected) + + def test_where_axis(self): + # GH 9736 + df = DataFrame(np.random.default_rng(2).standard_normal((2, 2))) + mask = DataFrame([[False, False], [False, False]]) + ser = Series([0, 1]) + + expected = DataFrame([[0, 0], [1, 1]], dtype="float64") + result = df.where(mask, ser, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.copy() + return_value = result.where(mask, ser, axis="index", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + expected = DataFrame([[0, 1], [0, 1]], dtype="float64") + result = df.where(mask, ser, axis="columns") + tm.assert_frame_equal(result, expected) + + result = df.copy() + return_value = result.where(mask, ser, axis="columns", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_where_axis_with_upcast(self): + # Upcast needed + df = DataFrame([[1, 2], [3, 4]], dtype="int64") + mask = DataFrame([[False, False], [False, False]]) + ser = Series([0, np.nan]) + + expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype="float64") + result = df.where(mask, ser, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.copy() + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + return_value = result.where(mask, ser, axis="index", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + expected = DataFrame([[0, np.nan], [0, np.nan]]) + result = df.where(mask, ser, axis="columns") + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + { + 0: np.array([0, 0], dtype="int64"), + 1: np.array([np.nan, np.nan], dtype="float64"), + } + ) + result = df.copy() + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + return_value = result.where(mask, ser, axis="columns", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_where_axis_multiple_dtypes(self): + # Multiple dtypes (=> multiple Blocks) + df = pd.concat( + [ + DataFrame(np.random.default_rng(2).standard_normal((10, 2))), + DataFrame( + np.random.default_rng(2).integers(0, 10, size=(10, 2)), + dtype="int64", + ), + ], + ignore_index=True, + axis=1, + ) + mask = DataFrame(False, columns=df.columns, index=df.index) + s1 = Series(1, index=df.columns) + s2 = Series(2, index=df.index) + + result = df.where(mask, s1, axis="columns") + expected = DataFrame(1.0, columns=df.columns, index=df.index) + expected[2] = expected[2].astype("int64") + expected[3] = expected[3].astype("int64") + tm.assert_frame_equal(result, expected) + + result = df.copy() + return_value = result.where(mask, s1, axis="columns", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + result = df.where(mask, s2, axis="index") + expected = DataFrame(2.0, columns=df.columns, index=df.index) + expected[2] = expected[2].astype("int64") + expected[3] = expected[3].astype("int64") + tm.assert_frame_equal(result, expected) + + result = df.copy() + return_value = result.where(mask, s2, axis="index", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + # DataFrame vs DataFrame + d1 = df.copy().drop(1, axis=0) + # Explicit cast to avoid implicit cast when setting value to np.nan + expected = df.copy().astype("float") + expected.loc[1, :] = np.nan + + result = df.where(mask, d1) + tm.assert_frame_equal(result, expected) + result = df.where(mask, d1, axis="index") + tm.assert_frame_equal(result, expected) + result = df.copy() + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + return_value = result.where(mask, d1, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + result = df.copy() + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + return_value = result.where(mask, d1, inplace=True, axis="index") + assert return_value is None + tm.assert_frame_equal(result, expected) + + d2 = df.copy().drop(1, axis=1) + expected = df.copy() + expected.loc[:, 1] = np.nan + + result = df.where(mask, d2) + tm.assert_frame_equal(result, expected) + result = df.where(mask, d2, axis="columns") + tm.assert_frame_equal(result, expected) + result = df.copy() + return_value = result.where(mask, d2, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + result = df.copy() + return_value = result.where(mask, d2, inplace=True, axis="columns") + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_where_callable(self): + # GH 12533 + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = df.where(lambda x: x > 4, lambda x: x + 1) + exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]]) + tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, df.where(df > 4, df + 1)) + + # return ndarray and scalar + result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99) + exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]]) + tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, df.where(df % 2 == 0, 99)) + + # chain + result = (df + 2).where(lambda x: x > 8, lambda x: x + 10) + exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]]) + tm.assert_frame_equal(result, exp) + tm.assert_frame_equal(result, (df + 2).where((df + 2) > 8, (df + 2) + 10)) + + def test_where_tz_values(self, tz_naive_fixture, frame_or_series): + obj1 = DataFrame( + DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture), + columns=["date"], + ) + obj2 = DataFrame( + DatetimeIndex(["20150103", "20150104", "20150105"], tz=tz_naive_fixture), + columns=["date"], + ) + mask = DataFrame([True, True, False], columns=["date"]) + exp = DataFrame( + DatetimeIndex(["20150101", "20150102", "20150105"], tz=tz_naive_fixture), + columns=["date"], + ) + if frame_or_series is Series: + obj1 = obj1["date"] + obj2 = obj2["date"] + mask = mask["date"] + exp = exp["date"] + + result = obj1.where(mask, obj2) + tm.assert_equal(exp, result) + + def test_df_where_change_dtype(self): + # GH#16979 + df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list("ABC")) + mask = np.array([[True, False, False], [False, False, True]]) + + result = df.where(mask) + expected = DataFrame( + [[0, np.nan, np.nan], [np.nan, np.nan, 5]], columns=list("ABC") + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("kwargs", [{}, {"other": None}]) + def test_df_where_with_category(self, kwargs): + # GH#16979 + data = np.arange(2 * 3, dtype=np.int64).reshape(2, 3) + df = DataFrame(data, columns=list("ABC")) + mask = np.array([[True, False, False], [False, False, True]]) + + # change type to category + df.A = df.A.astype("category") + df.B = df.B.astype("category") + df.C = df.C.astype("category") + + result = df.where(mask, **kwargs) + A = pd.Categorical([0, np.nan], categories=[0, 3]) + B = pd.Categorical([np.nan, np.nan], categories=[1, 4]) + C = pd.Categorical([np.nan, 5], categories=[2, 5]) + expected = DataFrame({"A": A, "B": B, "C": C}) + + tm.assert_frame_equal(result, expected) + + # Check Series.where while we're here + result = df.A.where(mask[:, 0], **kwargs) + expected = Series(A, name="A") + + tm.assert_series_equal(result, expected) + + def test_where_categorical_filtering(self): + # GH#22609 Verify filtering operations on DataFrames with categorical Series + df = DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"]) + df["b"] = df["b"].astype("category") + + result = df.where(df["a"] > 0) + # Explicitly cast to 'float' to avoid implicit cast when setting np.nan + expected = df.copy().astype({"a": "float"}) + expected.loc[0, :] = np.nan + + tm.assert_equal(result, expected) + + def test_where_ea_other(self): + # GH#38729/GH#38742 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + arr = pd.array([7, pd.NA, 9]) + ser = Series(arr) + mask = np.ones(df.shape, dtype=bool) + mask[1, :] = False + + # TODO: ideally we would get Int64 instead of object + result = df.where(mask, ser, axis=0) + expected = DataFrame({"A": [1, pd.NA, 3], "B": [4, pd.NA, 6]}).astype(object) + tm.assert_frame_equal(result, expected) + + ser2 = Series(arr[:2], index=["A", "B"]) + expected = DataFrame({"A": [1, 7, 3], "B": [4, pd.NA, 6]}) + expected["B"] = expected["B"].astype(object) + result = df.where(mask, ser2, axis=1) + tm.assert_frame_equal(result, expected) + + def test_where_interval_noop(self): + # GH#44181 + df = DataFrame([pd.Interval(0, 0)]) + res = df.where(df.notna()) + tm.assert_frame_equal(res, df) + + ser = df[0] + res = ser.where(ser.notna()) + tm.assert_series_equal(res, ser) + + def test_where_interval_fullop_downcast(self, frame_or_series): + # GH#45768 + obj = frame_or_series([pd.Interval(0, 0)] * 2) + other = frame_or_series([1.0, 2.0]) + res = obj.where(~obj.notna(), other) + + # since all entries are being changed, we will downcast result + # from object to ints (not floats) + tm.assert_equal(res, other.astype(np.int64)) + + # unlike where, Block.putmask does not downcast + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + obj.mask(obj.notna(), other, inplace=True) + tm.assert_equal(obj, other.astype(object)) + + @pytest.mark.parametrize( + "dtype", + [ + "timedelta64[ns]", + "datetime64[ns]", + "datetime64[ns, Asia/Tokyo]", + "Period[D]", + ], + ) + def test_where_datetimelike_noop(self, dtype): + # GH#45135, analogue to GH#44181 for Period don't raise on no-op + # For td64/dt64/dt64tz we already don't raise, but also are + # checking that we don't unnecessarily upcast to object. + ser = Series(np.arange(3) * 10**9, dtype=np.int64).view(dtype) + df = ser.to_frame() + mask = np.array([False, False, False]) + + res = ser.where(~mask, "foo") + tm.assert_series_equal(res, ser) + + mask2 = mask.reshape(-1, 1) + res2 = df.where(~mask2, "foo") + tm.assert_frame_equal(res2, df) + + res3 = ser.mask(mask, "foo") + tm.assert_series_equal(res3, ser) + + res4 = df.mask(mask2, "foo") + tm.assert_frame_equal(res4, df) + + # opposite case where we are replacing *all* values -> we downcast + # from object dtype # GH#45768 + res5 = df.where(mask2, 4) + expected = DataFrame(4, index=df.index, columns=df.columns) + tm.assert_frame_equal(res5, expected) + + # unlike where, Block.putmask does not downcast + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + df.mask(~mask2, 4, inplace=True) + tm.assert_frame_equal(df, expected.astype(object)) + + +def test_where_int_downcasting_deprecated(): + # GH#44597 + arr = np.arange(6).astype(np.int16).reshape(3, 2) + df = DataFrame(arr) + + mask = np.zeros(arr.shape, dtype=bool) + mask[:, 0] = True + + res = df.where(mask, 2**17) + + expected = DataFrame({0: arr[:, 0], 1: np.array([2**17] * 3, dtype=np.int32)}) + tm.assert_frame_equal(res, expected) + + +def test_where_copies_with_noop(frame_or_series): + # GH-39595 + result = frame_or_series([1, 2, 3, 4]) + expected = result.copy() + col = result[0] if frame_or_series is DataFrame else result + + where_res = result.where(col < 5) + where_res *= 2 + + tm.assert_equal(result, expected) + + where_res = result.where(col > 5, [1, 2, 3, 4]) + where_res *= 2 + + tm.assert_equal(result, expected) + + +def test_where_string_dtype(frame_or_series): + # GH40824 + obj = frame_or_series( + ["a", "b", "c", "d"], index=["id1", "id2", "id3", "id4"], dtype=StringDtype() + ) + filtered_obj = frame_or_series( + ["b", "c"], index=["id2", "id3"], dtype=StringDtype() + ) + filter_ser = Series([False, True, True, False]) + + result = obj.where(filter_ser, filtered_obj) + expected = frame_or_series( + [pd.NA, "b", "c", pd.NA], + index=["id1", "id2", "id3", "id4"], + dtype=StringDtype(), + ) + tm.assert_equal(result, expected) + + result = obj.mask(~filter_ser, filtered_obj) + tm.assert_equal(result, expected) + + obj.mask(~filter_ser, filtered_obj, inplace=True) + tm.assert_equal(result, expected) + + +def test_where_bool_comparison(): + # GH 10336 + df_mask = DataFrame( + {"AAA": [True] * 4, "BBB": [False] * 4, "CCC": [True, False, True, False]} + ) + result = df_mask.where(df_mask == False) # noqa: E712 + expected = DataFrame( + { + "AAA": np.array([np.nan] * 4, dtype=object), + "BBB": [False] * 4, + "CCC": [np.nan, False, np.nan, False], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_where_none_nan_coerce(): + # GH 15613 + expected = DataFrame( + { + "A": [Timestamp("20130101"), pd.NaT, Timestamp("20130103")], + "B": [1, 2, np.nan], + } + ) + result = expected.where(expected.notnull(), None) + tm.assert_frame_equal(result, expected) + + +def test_where_duplicate_axes_mixed_dtypes(): + # GH 25399, verify manually masking is not affected anymore by dtype of column for + # duplicate axes. + result = DataFrame(data=[[0, np.nan]], columns=Index(["A", "A"])) + index, columns = result.axes + mask = DataFrame(data=[[True, True]], columns=columns, index=index) + a = result.astype(object).where(mask) + b = result.astype("f8").where(mask) + c = result.T.where(mask.T).T + d = result.where(mask) # used to fail with "cannot reindex from a duplicate axis" + tm.assert_frame_equal(a.astype("f8"), b.astype("f8")) + tm.assert_frame_equal(b.astype("f8"), c.astype("f8")) + tm.assert_frame_equal(c.astype("f8"), d.astype("f8")) + + +def test_where_columns_casting(): + # GH 42295 + + df = DataFrame({"a": [1.0, 2.0], "b": [3, np.nan]}) + expected = df.copy() + result = df.where(pd.notnull(df), None) + # make sure dtypes don't change + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize("as_cat", [True, False]) +def test_where_period_invalid_na(frame_or_series, as_cat, request): + # GH#44697 + idx = pd.period_range("2016-01-01", periods=3, freq="D") + if as_cat: + idx = idx.astype("category") + obj = frame_or_series(idx) + + # NA value that we should *not* cast to Period dtype + tdnat = pd.NaT.to_numpy("m8[ns]") + + mask = np.array([True, True, False], ndmin=obj.ndim).T + + if as_cat: + msg = ( + r"Cannot setitem on a Categorical with a new category \(NaT\), " + "set the categories first" + ) + else: + msg = "value should be a 'Period'" + + if as_cat: + with pytest.raises(TypeError, match=msg): + obj.where(mask, tdnat) + + with pytest.raises(TypeError, match=msg): + obj.mask(mask, tdnat) + + with pytest.raises(TypeError, match=msg): + obj.mask(mask, tdnat, inplace=True) + + else: + # With PeriodDtype, ser[i] = tdnat coerces instead of raising, + # so for consistency, ser[mask] = tdnat must as well + expected = obj.astype(object).where(mask, tdnat) + result = obj.where(mask, tdnat) + tm.assert_equal(result, expected) + + expected = obj.astype(object).mask(mask, tdnat) + result = obj.mask(mask, tdnat) + tm.assert_equal(result, expected) + + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + obj.mask(mask, tdnat, inplace=True) + tm.assert_equal(obj, expected) + + +def test_where_nullable_invalid_na(frame_or_series, any_numeric_ea_dtype): + # GH#44697 + arr = pd.array([1, 2, 3], dtype=any_numeric_ea_dtype) + obj = frame_or_series(arr) + + mask = np.array([True, True, False], ndmin=obj.ndim).T + + msg = r"Invalid value '.*' for dtype (U?Int|Float)\d{1,2}" + + for null in tm.NP_NAT_OBJECTS + [pd.NaT]: + # NaT is an NA value that we should *not* cast to pd.NA dtype + with pytest.raises(TypeError, match=msg): + obj.where(mask, null) + + with pytest.raises(TypeError, match=msg): + obj.mask(mask, null) + + +@given(data=OPTIONAL_ONE_OF_ALL) +def test_where_inplace_casting(data): + # GH 22051 + df = DataFrame({"a": data}) + df_copy = df.where(pd.notnull(df), None).copy() + df.where(pd.notnull(df), None, inplace=True) + tm.assert_equal(df, df_copy) + + +def test_where_downcast_to_td64(): + ser = Series([1, 2, 3]) + + mask = np.array([False, False, False]) + + td = pd.Timedelta(days=1) + + res = ser.where(mask, td) + expected = Series([td, td, td], dtype="m8[ns]") + tm.assert_series_equal(res, expected) + + +def _check_where_equivalences(df, mask, other, expected): + # similar to tests.series.indexing.test_setitem.SetitemCastingEquivalences + # but with DataFrame in mind and less fleshed-out + res = df.where(mask, other) + tm.assert_frame_equal(res, expected) + + res = df.mask(~mask, other) + tm.assert_frame_equal(res, expected) + + # Note: frame.mask(~mask, other, inplace=True) takes some more work bc + # Block.putmask does *not* downcast. The change to 'expected' here + # is specific to the cases in test_where_dt64_2d. + df = df.copy() + df.mask(~mask, other, inplace=True) + if not mask.all(): + # with mask.all(), Block.putmask is a no-op, so does not downcast + expected = expected.copy() + expected["A"] = expected["A"].astype(object) + tm.assert_frame_equal(df, expected) + + +def test_where_dt64_2d(): + dti = date_range("2016-01-01", periods=6) + dta = dti._data.reshape(3, 2) + other = dta - dta[0, 0] + + df = DataFrame(dta, columns=["A", "B"]) + + mask = np.asarray(df.isna()).copy() + mask[:, 1] = True + + # setting all of one column, none of the other + expected = DataFrame({"A": other[:, 0], "B": dta[:, 1]}) + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + _check_where_equivalences(df, mask, other, expected) + + # setting part of one column, none of the other + mask[1, 0] = True + expected = DataFrame( + { + "A": np.array([other[0, 0], dta[1, 0], other[2, 0]], dtype=object), + "B": dta[:, 1], + } + ) + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype" + ): + _check_where_equivalences(df, mask, other, expected) + + # setting nothing in either column + mask[:] = True + expected = df + _check_where_equivalences(df, mask, other, expected) + + +def test_where_producing_ea_cond_for_np_dtype(): + # GH#44014 + df = DataFrame({"a": Series([1, pd.NA, 2], dtype="Int64"), "b": [1, 2, 3]}) + result = df.where(lambda x: x.apply(lambda y: y > 1, axis=1)) + expected = DataFrame( + {"a": Series([pd.NA, pd.NA, 2], dtype="Int64"), "b": [np.nan, 2, 3]} + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "replacement", [0.001, True, "snake", None, datetime(2022, 5, 4)] +) +def test_where_int_overflow(replacement): + # GH 31687 + df = DataFrame([[1.0, 2e25, "nine"], [np.nan, 0.1, None]]) + result = df.where(pd.notnull(df), replacement) + expected = DataFrame([[1.0, 2e25, "nine"], [replacement, 0.1, replacement]]) + + tm.assert_frame_equal(result, expected) + + +def test_where_inplace_no_other(): + # GH#51685 + df = DataFrame({"a": [1.0, 2.0], "b": ["x", "y"]}) + cond = DataFrame({"a": [True, False], "b": [False, True]}) + df.where(cond, inplace=True) + expected = DataFrame({"a": [1, np.nan], "b": [np.nan, "y"]}) + tm.assert_frame_equal(df, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_xs.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_xs.py new file mode 100644 index 00000000..492dd387 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_xs.py @@ -0,0 +1,432 @@ +import re + +import numpy as np +import pytest + +from pandas.errors import SettingWithCopyError + +from pandas import ( + DataFrame, + Index, + IndexSlice, + MultiIndex, + Series, + concat, +) +import pandas._testing as tm + +from pandas.tseries.offsets import BDay + + +@pytest.fixture +def four_level_index_dataframe(): + arr = np.array( + [ + [-0.5109, -2.3358, -0.4645, 0.05076, 0.364], + [0.4473, 1.4152, 0.2834, 1.00661, 0.1744], + [-0.6662, -0.5243, -0.358, 0.89145, 2.5838], + ] + ) + index = MultiIndex( + levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]], + codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]], + names=["one", "two", "three", "four"], + ) + return DataFrame(arr, index=index, columns=list("ABCDE")) + + +class TestXS: + def test_xs(self, float_frame, datetime_frame, using_copy_on_write): + float_frame_orig = float_frame.copy() + idx = float_frame.index[5] + xs = float_frame.xs(idx) + for item, value in xs.items(): + if np.isnan(value): + assert np.isnan(float_frame[item][idx]) + else: + assert value == float_frame[item][idx] + + # mixed-type xs + test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}} + frame = DataFrame(test_data) + xs = frame.xs("1") + assert xs.dtype == np.object_ + assert xs["A"] == 1 + assert xs["B"] == "1" + + with pytest.raises( + KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00')") + ): + datetime_frame.xs(datetime_frame.index[0] - BDay()) + + # xs get column + series = float_frame.xs("A", axis=1) + expected = float_frame["A"] + tm.assert_series_equal(series, expected) + + # view is returned if possible + series = float_frame.xs("A", axis=1) + series[:] = 5 + if using_copy_on_write: + # but with CoW the view shouldn't propagate mutations + tm.assert_series_equal(float_frame["A"], float_frame_orig["A"]) + assert not (expected == 5).all() + else: + assert (expected == 5).all() + + def test_xs_corner(self): + # pathological mixed-type reordering case + df = DataFrame(index=[0]) + df["A"] = 1.0 + df["B"] = "foo" + df["C"] = 2.0 + df["D"] = "bar" + df["E"] = 3.0 + + xs = df.xs(0) + exp = Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0) + tm.assert_series_equal(xs, exp) + + # no columns but Index(dtype=object) + df = DataFrame(index=["a", "b", "c"]) + result = df.xs("a") + expected = Series([], name="a", dtype=np.float64) + tm.assert_series_equal(result, expected) + + def test_xs_duplicates(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), + index=["b", "b", "c", "b", "a"], + ) + + cross = df.xs("c") + exp = df.iloc[2] + tm.assert_series_equal(cross, exp) + + def test_xs_keep_level(self): + df = DataFrame( + { + "day": {0: "sat", 1: "sun"}, + "flavour": {0: "strawberry", 1: "strawberry"}, + "sales": {0: 10, 1: 12}, + "year": {0: 2008, 1: 2008}, + } + ).set_index(["year", "flavour", "day"]) + result = df.xs("sat", level="day", drop_level=False) + expected = df[:1] + tm.assert_frame_equal(result, expected) + + result = df.xs((2008, "sat"), level=["year", "day"], drop_level=False) + tm.assert_frame_equal(result, expected) + + def test_xs_view(self, using_array_manager, using_copy_on_write): + # in 0.14 this will return a view if possible a copy otherwise, but + # this is numpy dependent + + dm = DataFrame(np.arange(20.0).reshape(4, 5), index=range(4), columns=range(5)) + df_orig = dm.copy() + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + dm.xs(2)[:] = 20 + tm.assert_frame_equal(dm, df_orig) + elif using_array_manager: + # INFO(ArrayManager) with ArrayManager getting a row as a view is + # not possible + msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(SettingWithCopyError, match=msg): + dm.xs(2)[:] = 20 + assert not (dm.xs(2) == 20).any() + else: + dm.xs(2)[:] = 20 + assert (dm.xs(2) == 20).all() + + +class TestXSWithMultiIndex: + def test_xs_doc_example(self): + # TODO: more descriptive name + # based on example in advanced.rst + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = list(zip(*arrays)) + + index = MultiIndex.from_tuples(tuples, names=["first", "second"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 8)), + index=["A", "B", "C"], + columns=index, + ) + + result = df.xs(("one", "bar"), level=("second", "first"), axis=1) + + expected = df.iloc[:, [0]] + tm.assert_frame_equal(result, expected) + + def test_xs_integer_key(self): + # see GH#2107 + dates = range(20111201, 20111205) + ids = list("abcde") + index = MultiIndex.from_product([dates, ids], names=["date", "secid"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 3)), + index, + ["X", "Y", "Z"], + ) + + result = df.xs(20111201, level="date") + expected = df.loc[20111201, :] + tm.assert_frame_equal(result, expected) + + def test_xs_level(self, multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + result = df.xs("two", level="second") + expected = df[df.index.get_level_values(1) == "two"] + expected.index = Index(["foo", "bar", "baz", "qux"], name="first") + tm.assert_frame_equal(result, expected) + + def test_xs_level_eq_2(self): + arr = np.random.default_rng(2).standard_normal((3, 5)) + index = MultiIndex( + levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]], + codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]], + ) + df = DataFrame(arr, index=index) + expected = DataFrame(arr[1:2], index=[["a"], ["b"]]) + result = df.xs("c", level=2) + tm.assert_frame_equal(result, expected) + + def test_xs_setting_with_copy_error( + self, multiindex_dataframe_random_data, using_copy_on_write + ): + # this is a copy in 0.14 + df = multiindex_dataframe_random_data + df_orig = df.copy() + result = df.xs("two", level="second") + + if using_copy_on_write: + result[:] = 10 + else: + # setting this will give a SettingWithCopyError + # as we are trying to write a view + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(SettingWithCopyError, match=msg): + result[:] = 10 + tm.assert_frame_equal(df, df_orig) + + def test_xs_setting_with_copy_error_multiple( + self, four_level_index_dataframe, using_copy_on_write + ): + # this is a copy in 0.14 + df = four_level_index_dataframe + df_orig = df.copy() + result = df.xs(("a", 4), level=["one", "four"]) + + if using_copy_on_write: + result[:] = 10 + else: + # setting this will give a SettingWithCopyError + # as we are trying to write a view + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(SettingWithCopyError, match=msg): + result[:] = 10 + tm.assert_frame_equal(df, df_orig) + + @pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])]) + def test_xs_with_duplicates(self, key, level, multiindex_dataframe_random_data): + # see GH#13719 + frame = multiindex_dataframe_random_data + df = concat([frame] * 2) + assert df.index.is_unique is False + expected = concat([frame.xs("one", level="second")] * 2) + + if isinstance(key, list): + result = df.xs(tuple(key), level=level) + else: + result = df.xs(key, level=level) + tm.assert_frame_equal(result, expected) + + def test_xs_missing_values_in_index(self): + # see GH#6574 + # missing values in returned index should be preserved + acc = [ + ("a", "abcde", 1), + ("b", "bbcde", 2), + ("y", "yzcde", 25), + ("z", "xbcde", 24), + ("z", None, 26), + ("z", "zbcde", 25), + ("z", "ybcde", 26), + ] + df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"]) + expected = DataFrame( + {"cnt": [24, 26, 25, 26]}, + index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"), + ) + + result = df.xs("z", level="a1") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "key, level, exp_arr, exp_index", + [ + ("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")), + ("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")), + ], + ) + def test_xs_named_levels_axis_eq_1(self, key, level, exp_arr, exp_index): + # see GH#2903 + arr = np.random.default_rng(2).standard_normal((4, 4)) + index = MultiIndex( + levels=[["a", "b"], ["bar", "foo", "hello", "world"]], + codes=[[0, 0, 1, 1], [0, 1, 2, 3]], + names=["lvl0", "lvl1"], + ) + df = DataFrame(arr, columns=index) + result = df.xs(key, level=level, axis=1) + expected = DataFrame(exp_arr(arr), columns=exp_index) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "indexer", + [ + lambda df: df.xs(("a", 4), level=["one", "four"]), + lambda df: df.xs("a").xs(4, level="four"), + ], + ) + def test_xs_level_multiple(self, indexer, four_level_index_dataframe): + df = four_level_index_dataframe + expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]] + expected_index = MultiIndex( + levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"] + ) + expected = DataFrame( + expected_values, index=expected_index, columns=list("ABCDE") + ) + result = indexer(df) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")] + ) + def test_xs_level0(self, indexer, four_level_index_dataframe): + df = four_level_index_dataframe + expected_values = [ + [-0.5109, -2.3358, -0.4645, 0.05076, 0.364], + [0.4473, 1.4152, 0.2834, 1.00661, 0.1744], + ] + expected_index = MultiIndex( + levels=[["b", "q"], [10.0032, 20.0], [4, 5]], + codes=[[0, 1], [0, 1], [1, 0]], + names=["two", "three", "four"], + ) + expected = DataFrame( + expected_values, index=expected_index, columns=list("ABCDE") + ) + + result = indexer(df) + tm.assert_frame_equal(result, expected) + + def test_xs_values(self, multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + result = df.xs(("bar", "two")).values + expected = df.values[4] + tm.assert_almost_equal(result, expected) + + def test_xs_loc_equality(self, multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + result = df.xs(("bar", "two")) + expected = df.loc[("bar", "two")] + tm.assert_series_equal(result, expected) + + def test_xs_IndexSlice_argument_not_implemented(self, frame_or_series): + # GH#35301 + + index = MultiIndex( + levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + obj = DataFrame(np.random.default_rng(2).standard_normal((6, 4)), index=index) + if frame_or_series is Series: + obj = obj[0] + + expected = obj.iloc[-2:].droplevel(0) + + result = obj.xs(IndexSlice[("foo", "qux", 0), :]) + tm.assert_equal(result, expected) + + result = obj.loc[IndexSlice[("foo", "qux", 0), :]] + tm.assert_equal(result, expected) + + def test_xs_levels_raises(self, frame_or_series): + obj = DataFrame({"A": [1, 2, 3]}) + if frame_or_series is Series: + obj = obj["A"] + + msg = "Index must be a MultiIndex" + with pytest.raises(TypeError, match=msg): + obj.xs(0, level="as") + + def test_xs_multiindex_droplevel_false(self): + # GH#19056 + mi = MultiIndex.from_tuples( + [("a", "x"), ("a", "y"), ("b", "x")], names=["level1", "level2"] + ) + df = DataFrame([[1, 2, 3]], columns=mi) + result = df.xs("a", axis=1, drop_level=False) + expected = DataFrame( + [[1, 2]], + columns=MultiIndex.from_tuples( + [("a", "x"), ("a", "y")], names=["level1", "level2"] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_xs_droplevel_false(self): + # GH#19056 + df = DataFrame([[1, 2, 3]], columns=Index(["a", "b", "c"])) + result = df.xs("a", axis=1, drop_level=False) + expected = DataFrame({"a": [1]}) + tm.assert_frame_equal(result, expected) + + def test_xs_droplevel_false_view(self, using_array_manager, using_copy_on_write): + # GH#37832 + df = DataFrame([[1, 2, 3]], columns=Index(["a", "b", "c"])) + result = df.xs("a", axis=1, drop_level=False) + # check that result still views the same data as df + assert np.shares_memory(result.iloc[:, 0]._values, df.iloc[:, 0]._values) + + df.iloc[0, 0] = 2 + if using_copy_on_write: + # with copy on write the subset is never modified + expected = DataFrame({"a": [1]}) + else: + # modifying original df also modifies result when having a single block + expected = DataFrame({"a": [2]}) + tm.assert_frame_equal(result, expected) + + # with mixed dataframe, modifying the parent doesn't modify result + # TODO the "split" path behaves differently here as with single block + df = DataFrame([[1, 2.5, "a"]], columns=Index(["a", "b", "c"])) + result = df.xs("a", axis=1, drop_level=False) + df.iloc[0, 0] = 2 + if using_copy_on_write: + # with copy on write the subset is never modified + expected = DataFrame({"a": [1]}) + elif using_array_manager: + # Here the behavior is consistent + expected = DataFrame({"a": [2]}) + else: + # FIXME: iloc does not update the array inplace using + # "split" path + expected = DataFrame({"a": [1]}) + tm.assert_frame_equal(result, expected) + + def test_xs_list_indexer_droplevel_false(self): + # GH#41760 + mi = MultiIndex.from_tuples([("x", "m", "a"), ("x", "n", "b"), ("y", "o", "c")]) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=mi) + with pytest.raises(KeyError, match="y"): + df.xs(("x", "y"), drop_level=False, axis=1) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/__init__.py new file mode 100644 index 00000000..245594bf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/__init__.py @@ -0,0 +1,7 @@ +""" +Test files dedicated to individual (stand-alone) DataFrame methods + +Ideally these files/tests should correspond 1-to-1 with tests.series.methods + +These may also present opportunities for sharing/de-duplicating test code. +""" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_add_prefix_suffix.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_add_prefix_suffix.py new file mode 100644 index 00000000..92d7cdd7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_add_prefix_suffix.py @@ -0,0 +1,49 @@ +import pytest + +from pandas import Index +import pandas._testing as tm + + +def test_add_prefix_suffix(float_frame): + with_prefix = float_frame.add_prefix("foo#") + expected = Index([f"foo#{c}" for c in float_frame.columns]) + tm.assert_index_equal(with_prefix.columns, expected) + + with_suffix = float_frame.add_suffix("#foo") + expected = Index([f"{c}#foo" for c in float_frame.columns]) + tm.assert_index_equal(with_suffix.columns, expected) + + with_pct_prefix = float_frame.add_prefix("%") + expected = Index([f"%{c}" for c in float_frame.columns]) + tm.assert_index_equal(with_pct_prefix.columns, expected) + + with_pct_suffix = float_frame.add_suffix("%") + expected = Index([f"{c}%" for c in float_frame.columns]) + tm.assert_index_equal(with_pct_suffix.columns, expected) + + +def test_add_prefix_suffix_axis(float_frame): + # GH 47819 + with_prefix = float_frame.add_prefix("foo#", axis=0) + expected = Index([f"foo#{c}" for c in float_frame.index]) + tm.assert_index_equal(with_prefix.index, expected) + + with_prefix = float_frame.add_prefix("foo#", axis=1) + expected = Index([f"foo#{c}" for c in float_frame.columns]) + tm.assert_index_equal(with_prefix.columns, expected) + + with_pct_suffix = float_frame.add_suffix("#foo", axis=0) + expected = Index([f"{c}#foo" for c in float_frame.index]) + tm.assert_index_equal(with_pct_suffix.index, expected) + + with_pct_suffix = float_frame.add_suffix("#foo", axis=1) + expected = Index([f"{c}#foo" for c in float_frame.columns]) + tm.assert_index_equal(with_pct_suffix.columns, expected) + + +def test_add_prefix_suffix_invalid_axis(float_frame): + with pytest.raises(ValueError, match="No axis named 2 for object type DataFrame"): + float_frame.add_prefix("foo#", axis=2) + + with pytest.raises(ValueError, match="No axis named 2 for object type DataFrame"): + float_frame.add_suffix("foo#", axis=2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_align.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_align.py new file mode 100644 index 00000000..87a56c07 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_align.py @@ -0,0 +1,490 @@ +from datetime import timezone + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameAlign: + def test_align_asfreq_method_raises(self): + df = DataFrame({"A": [1, np.nan, 2]}) + msg = "Invalid fill method" + msg2 = "The 'method', 'limit', and 'fill_axis' keywords" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + df.align(df.iloc[::-1], method="asfreq") + + def test_frame_align_aware(self): + idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") + idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern") + df1 = DataFrame(np.random.default_rng(2).standard_normal((len(idx1), 3)), idx1) + df2 = DataFrame(np.random.default_rng(2).standard_normal((len(idx2), 3)), idx2) + new1, new2 = df1.align(df2) + assert df1.index.tz == new1.index.tz + assert df2.index.tz == new2.index.tz + + # different timezones convert to UTC + + # frame with frame + df1_central = df1.tz_convert("US/Central") + new1, new2 = df1.align(df1_central) + assert new1.index.tz is timezone.utc + assert new2.index.tz is timezone.utc + + # frame with Series + new1, new2 = df1.align(df1_central[0], axis=0) + assert new1.index.tz is timezone.utc + assert new2.index.tz is timezone.utc + + df1[0].align(df1_central, axis=0) + assert new1.index.tz is timezone.utc + assert new2.index.tz is timezone.utc + + def test_align_float(self, float_frame, using_copy_on_write): + af, bf = float_frame.align(float_frame) + assert af._mgr is not float_frame._mgr + + af, bf = float_frame.align(float_frame, copy=False) + if not using_copy_on_write: + assert af._mgr is float_frame._mgr + else: + assert af._mgr is not float_frame._mgr + + # axis = 0 + other = float_frame.iloc[:-5, :3] + af, bf = float_frame.align(other, axis=0, fill_value=-1) + + tm.assert_index_equal(bf.columns, other.columns) + + # test fill value + join_idx = float_frame.index.join(other.index) + diff_a = float_frame.index.difference(join_idx) + diff_a_vals = af.reindex(diff_a).values + assert (diff_a_vals == -1).all() + + af, bf = float_frame.align(other, join="right", axis=0) + tm.assert_index_equal(bf.columns, other.columns) + tm.assert_index_equal(bf.index, other.index) + tm.assert_index_equal(af.index, other.index) + + # axis = 1 + other = float_frame.iloc[:-5, :3].copy() + af, bf = float_frame.align(other, axis=1) + tm.assert_index_equal(bf.columns, float_frame.columns) + tm.assert_index_equal(bf.index, other.index) + + # test fill value + join_idx = float_frame.index.join(other.index) + diff_a = float_frame.index.difference(join_idx) + diff_a_vals = af.reindex(diff_a).values + + assert (diff_a_vals == -1).all() + + af, bf = float_frame.align(other, join="inner", axis=1) + tm.assert_index_equal(bf.columns, other.columns) + + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + af, bf = float_frame.align(other, join="inner", axis=1, method="pad") + tm.assert_index_equal(bf.columns, other.columns) + + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + af, bf = float_frame.align( + other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None + ) + tm.assert_index_equal(bf.index, Index([])) + + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + af, bf = float_frame.align( + other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0 + ) + tm.assert_index_equal(bf.index, Index([])) + + # Try to align DataFrame to Series along bad axis + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + float_frame.align(af.iloc[0, :3], join="inner", axis=2) + + def test_align_frame_with_series(self, float_frame): + # align dataframe to series with broadcast or not + idx = float_frame.index + s = Series(range(len(idx)), index=idx) + + left, right = float_frame.align(s, axis=0) + tm.assert_index_equal(left.index, float_frame.index) + tm.assert_index_equal(right.index, float_frame.index) + assert isinstance(right, Series) + + msg = "The 'broadcast_axis' keyword in DataFrame.align is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + left, right = float_frame.align(s, broadcast_axis=1) + tm.assert_index_equal(left.index, float_frame.index) + expected = {c: s for c in float_frame.columns} + expected = DataFrame( + expected, index=float_frame.index, columns=float_frame.columns + ) + tm.assert_frame_equal(right, expected) + + def test_align_series_condition(self): + # see gh-9558 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + result = df[df["a"] == 2] + expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + result = df.where(df["a"] == 2, 0) + expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]}) + tm.assert_frame_equal(result, expected) + + def test_align_int(self, int_frame): + # test other non-float types + other = DataFrame(index=range(5), columns=["A", "B", "C"]) + + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + af, bf = int_frame.align(other, join="inner", axis=1, method="pad") + tm.assert_index_equal(bf.columns, other.columns) + + def test_align_mixed_type(self, float_string_frame): + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + af, bf = float_string_frame.align( + float_string_frame, join="inner", axis=1, method="pad" + ) + tm.assert_index_equal(bf.columns, float_string_frame.columns) + + def test_align_mixed_float(self, mixed_float_frame): + # mixed floats/ints + other = DataFrame(index=range(5), columns=["A", "B", "C"]) + + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + af, bf = mixed_float_frame.align( + other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0 + ) + tm.assert_index_equal(bf.index, Index([])) + + def test_align_mixed_int(self, mixed_int_frame): + other = DataFrame(index=range(5), columns=["A", "B", "C"]) + + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + af, bf = mixed_int_frame.align( + other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0 + ) + tm.assert_index_equal(bf.index, Index([])) + + @pytest.mark.parametrize( + "l_ordered,r_ordered,expected", + [ + [True, True, pd.CategoricalIndex], + [True, False, Index], + [False, True, Index], + [False, False, pd.CategoricalIndex], + ], + ) + def test_align_categorical(self, l_ordered, r_ordered, expected): + # GH-28397 + df_1 = DataFrame( + { + "A": np.arange(6, dtype="int64"), + "B": Series(list("aabbca")).astype( + pd.CategoricalDtype(list("cab"), ordered=l_ordered) + ), + } + ).set_index("B") + df_2 = DataFrame( + { + "A": np.arange(5, dtype="int64"), + "B": Series(list("babca")).astype( + pd.CategoricalDtype(list("cab"), ordered=r_ordered) + ), + } + ).set_index("B") + + aligned_1, aligned_2 = df_1.align(df_2) + assert isinstance(aligned_1.index, expected) + assert isinstance(aligned_2.index, expected) + tm.assert_index_equal(aligned_1.index, aligned_2.index) + + def test_align_multiindex(self): + # GH#10665 + # same test cases as test_align_multiindex in test_series.py + + midx = pd.MultiIndex.from_product( + [range(2), range(3), range(2)], names=("a", "b", "c") + ) + idx = Index(range(2), name="b") + df1 = DataFrame(np.arange(12, dtype="int64"), index=midx) + df2 = DataFrame(np.arange(2, dtype="int64"), index=idx) + + # these must be the same results (but flipped) + res1l, res1r = df1.align(df2, join="left") + res2l, res2r = df2.align(df1, join="right") + + expl = df1 + tm.assert_frame_equal(expl, res1l) + tm.assert_frame_equal(expl, res2r) + expr = DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx) + tm.assert_frame_equal(expr, res1r) + tm.assert_frame_equal(expr, res2l) + + res1l, res1r = df1.align(df2, join="right") + res2l, res2r = df2.align(df1, join="left") + + exp_idx = pd.MultiIndex.from_product( + [range(2), range(2), range(2)], names=("a", "b", "c") + ) + expl = DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx) + tm.assert_frame_equal(expl, res1l) + tm.assert_frame_equal(expl, res2r) + expr = DataFrame([0, 0, 1, 1] * 2, index=exp_idx) + tm.assert_frame_equal(expr, res1r) + tm.assert_frame_equal(expr, res2l) + + def test_align_series_combinations(self): + df = DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")) + s = Series([1, 2, 4], index=list("ABD"), name="x") + + # frame + series + res1, res2 = df.align(s, axis=0) + exp1 = DataFrame( + {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, + index=list("ABCDE"), + ) + exp2 = Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x") + + tm.assert_frame_equal(res1, exp1) + tm.assert_series_equal(res2, exp2) + + # series + frame + res1, res2 = s.align(df) + tm.assert_series_equal(res1, exp2) + tm.assert_frame_equal(res2, exp1) + + def test_multiindex_align_to_series_with_common_index_level(self): + # GH-46001 + foo_index = Index([1, 2, 3], name="foo") + bar_index = Index([1, 2], name="bar") + + series = Series([1, 2], index=bar_index, name="foo_series") + df = DataFrame( + {"col": np.arange(6)}, + index=pd.MultiIndex.from_product([foo_index, bar_index]), + ) + + expected_r = Series([1, 2] * 3, index=df.index, name="foo_series") + result_l, result_r = df.align(series, axis=0) + + tm.assert_frame_equal(result_l, df) + tm.assert_series_equal(result_r, expected_r) + + def test_multiindex_align_to_series_with_common_index_level_missing_in_left(self): + # GH-46001 + foo_index = Index([1, 2, 3], name="foo") + bar_index = Index([1, 2], name="bar") + + series = Series( + [1, 2, 3, 4], index=Index([1, 2, 3, 4], name="bar"), name="foo_series" + ) + df = DataFrame( + {"col": np.arange(6)}, + index=pd.MultiIndex.from_product([foo_index, bar_index]), + ) + + expected_r = Series([1, 2] * 3, index=df.index, name="foo_series") + result_l, result_r = df.align(series, axis=0) + + tm.assert_frame_equal(result_l, df) + tm.assert_series_equal(result_r, expected_r) + + def test_multiindex_align_to_series_with_common_index_level_missing_in_right(self): + # GH-46001 + foo_index = Index([1, 2, 3], name="foo") + bar_index = Index([1, 2, 3, 4], name="bar") + + series = Series([1, 2], index=Index([1, 2], name="bar"), name="foo_series") + df = DataFrame( + {"col": np.arange(12)}, + index=pd.MultiIndex.from_product([foo_index, bar_index]), + ) + + expected_r = Series( + [1, 2, np.nan, np.nan] * 3, index=df.index, name="foo_series" + ) + result_l, result_r = df.align(series, axis=0) + + tm.assert_frame_equal(result_l, df) + tm.assert_series_equal(result_r, expected_r) + + def test_multiindex_align_to_series_with_common_index_level_missing_in_both(self): + # GH-46001 + foo_index = Index([1, 2, 3], name="foo") + bar_index = Index([1, 3, 4], name="bar") + + series = Series( + [1, 2, 3], index=Index([1, 2, 4], name="bar"), name="foo_series" + ) + df = DataFrame( + {"col": np.arange(9)}, + index=pd.MultiIndex.from_product([foo_index, bar_index]), + ) + + expected_r = Series([1, np.nan, 3] * 3, index=df.index, name="foo_series") + result_l, result_r = df.align(series, axis=0) + + tm.assert_frame_equal(result_l, df) + tm.assert_series_equal(result_r, expected_r) + + def test_multiindex_align_to_series_with_common_index_level_non_unique_cols(self): + # GH-46001 + foo_index = Index([1, 2, 3], name="foo") + bar_index = Index([1, 2], name="bar") + + series = Series([1, 2], index=bar_index, name="foo_series") + df = DataFrame( + np.arange(18).reshape(6, 3), + index=pd.MultiIndex.from_product([foo_index, bar_index]), + ) + df.columns = ["cfoo", "cbar", "cfoo"] + + expected = Series([1, 2] * 3, index=df.index, name="foo_series") + result_left, result_right = df.align(series, axis=0) + + tm.assert_series_equal(result_right, expected) + tm.assert_index_equal(result_left.columns, df.columns) + + def test_missing_axis_specification_exception(self): + df = DataFrame(np.arange(50).reshape((10, 5))) + series = Series(np.arange(5)) + + with pytest.raises(ValueError, match=r"axis=0 or 1"): + df.align(series) + + def _check_align(self, a, b, axis, fill_axis, how, method, limit=None): + msg = ( + "The 'method', 'limit', and 'fill_axis' keywords in DataFrame.align " + "are deprecated" + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + aa, ab = a.align( + b, axis=axis, join=how, method=method, limit=limit, fill_axis=fill_axis + ) + + join_index, join_columns = None, None + + ea, eb = a, b + if axis is None or axis == 0: + join_index = a.index.join(b.index, how=how) + ea = ea.reindex(index=join_index) + eb = eb.reindex(index=join_index) + + if axis is None or axis == 1: + join_columns = a.columns.join(b.columns, how=how) + ea = ea.reindex(columns=join_columns) + eb = eb.reindex(columns=join_columns) + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + ea = ea.fillna(axis=fill_axis, method=method, limit=limit) + eb = eb.fillna(axis=fill_axis, method=method, limit=limit) + + tm.assert_frame_equal(aa, ea) + tm.assert_frame_equal(ab, eb) + + @pytest.mark.parametrize("meth", ["pad", "bfill"]) + @pytest.mark.parametrize("ax", [0, 1, None]) + @pytest.mark.parametrize("fax", [0, 1]) + @pytest.mark.parametrize("how", ["inner", "outer", "left", "right"]) + def test_align_fill_method(self, how, meth, ax, fax, float_frame): + df = float_frame + self._check_align_fill(df, how, meth, ax, fax) + + def _check_align_fill(self, frame, kind, meth, ax, fax): + left = frame.iloc[0:4, :10] + right = frame.iloc[2:, 6:] + empty = frame.iloc[:0, :0] + + self._check_align(left, right, axis=ax, fill_axis=fax, how=kind, method=meth) + self._check_align( + left, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 + ) + + # empty left + self._check_align(empty, right, axis=ax, fill_axis=fax, how=kind, method=meth) + self._check_align( + empty, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 + ) + + # empty right + self._check_align(left, empty, axis=ax, fill_axis=fax, how=kind, method=meth) + self._check_align( + left, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 + ) + + # both empty + self._check_align(empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth) + self._check_align( + empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 + ) + + def test_align_series_check_copy(self): + # GH# + df = DataFrame({0: [1, 2]}) + ser = Series([1], name=0) + expected = ser.copy() + result, other = df.align(ser, axis=1) + ser.iloc[0] = 100 + tm.assert_series_equal(other, expected) + + def test_align_identical_different_object(self): + # GH#51032 + df = DataFrame({"a": [1, 2]}) + ser = Series([3, 4]) + result, result2 = df.align(ser, axis=0) + tm.assert_frame_equal(result, df) + tm.assert_series_equal(result2, ser) + assert df is not result + assert ser is not result2 + + def test_align_identical_different_object_columns(self): + # GH#51032 + df = DataFrame({"a": [1, 2]}) + ser = Series([1], index=["a"]) + result, result2 = df.align(ser, axis=1) + tm.assert_frame_equal(result, df) + tm.assert_series_equal(result2, ser) + assert df is not result + assert ser is not result2 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asfreq.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asfreq.py new file mode 100644 index 00000000..2c5137db --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asfreq.py @@ -0,0 +1,235 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas._libs.tslibs.offsets import MonthEnd + +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + +from pandas.tseries import offsets + + +class TestAsFreq: + @pytest.fixture(params=["s", "ms", "us", "ns"]) + def unit(self, request): + return request.param + + def test_asfreq2(self, frame_or_series): + ts = frame_or_series( + [0.0, 1.0, 2.0], + index=DatetimeIndex( + [ + datetime(2009, 10, 30), + datetime(2009, 11, 30), + datetime(2009, 12, 31), + ], + freq="BM", + ), + ) + + daily_ts = ts.asfreq("B") + monthly_ts = daily_ts.asfreq("BM") + tm.assert_equal(monthly_ts, ts) + + daily_ts = ts.asfreq("B", method="pad") + monthly_ts = daily_ts.asfreq("BM") + tm.assert_equal(monthly_ts, ts) + + daily_ts = ts.asfreq(offsets.BDay()) + monthly_ts = daily_ts.asfreq(offsets.BMonthEnd()) + tm.assert_equal(monthly_ts, ts) + + result = ts[:0].asfreq("M") + assert len(result) == 0 + assert result is not ts + + if frame_or_series is Series: + daily_ts = ts.asfreq("D", fill_value=-1) + result = daily_ts.value_counts().sort_index() + expected = Series( + [60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0], name="count" + ).sort_index() + tm.assert_series_equal(result, expected) + + def test_asfreq_datetimeindex_empty(self, frame_or_series): + # GH#14320 + index = DatetimeIndex(["2016-09-29 11:00"]) + expected = frame_or_series(index=index, dtype=object).asfreq("H") + result = frame_or_series([3], index=index.copy()).asfreq("H") + tm.assert_index_equal(expected.index, result.index) + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_tz_aware_asfreq_smoke(self, tz, frame_or_series): + dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) + + obj = frame_or_series( + np.random.default_rng(2).standard_normal(len(dr)), index=dr + ) + + # it works! + obj.asfreq("T") + + def test_asfreq_normalize(self, frame_or_series): + rng = date_range("1/1/2000 09:30", periods=20) + norm = date_range("1/1/2000", periods=20) + + vals = np.random.default_rng(2).standard_normal((20, 3)) + + obj = DataFrame(vals, index=rng) + expected = DataFrame(vals, index=norm) + if frame_or_series is Series: + obj = obj[0] + expected = expected[0] + + result = obj.asfreq("D", normalize=True) + tm.assert_equal(result, expected) + + def test_asfreq_keep_index_name(self, frame_or_series): + # GH#9854 + index_name = "bar" + index = date_range("20130101", periods=20, name=index_name) + obj = DataFrame(list(range(20)), columns=["foo"], index=index) + obj = tm.get_obj(obj, frame_or_series) + + assert index_name == obj.index.name + assert index_name == obj.asfreq("10D").index.name + + def test_asfreq_ts(self, frame_or_series): + index = period_range(freq="A", start="1/1/2001", end="12/31/2010") + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 3)), index=index + ) + obj = tm.get_obj(obj, frame_or_series) + + result = obj.asfreq("D", how="end") + exp_index = index.asfreq("D", how="end") + assert len(result) == len(obj) + tm.assert_index_equal(result.index, exp_index) + + result = obj.asfreq("D", how="start") + exp_index = index.asfreq("D", how="start") + assert len(result) == len(obj) + tm.assert_index_equal(result.index, exp_index) + + def test_asfreq_resample_set_correct_freq(self, frame_or_series): + # GH#5613 + # we test if .asfreq() and .resample() set the correct value for .freq + dti = to_datetime(["2012-01-01", "2012-01-02", "2012-01-03"]) + obj = DataFrame({"col": [1, 2, 3]}, index=dti) + obj = tm.get_obj(obj, frame_or_series) + + # testing the settings before calling .asfreq() and .resample() + assert obj.index.freq is None + assert obj.index.inferred_freq == "D" + + # does .asfreq() set .freq correctly? + assert obj.asfreq("D").index.freq == "D" + + # does .resample() set .freq correctly? + assert obj.resample("D").asfreq().index.freq == "D" + + def test_asfreq_empty(self, datetime_frame): + # test does not blow up on length-0 DataFrame + zero_length = datetime_frame.reindex([]) + result = zero_length.asfreq("BM") + assert result is not zero_length + + def test_asfreq(self, datetime_frame): + offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd()) + rule_monthly = datetime_frame.asfreq("BM") + + tm.assert_frame_equal(offset_monthly, rule_monthly) + + rule_monthly.asfreq("B", method="pad") + # TODO: actually check that this worked. + + # don't forget! + rule_monthly.asfreq("B", method="pad") + + def test_asfreq_datetimeindex(self): + df = DataFrame( + {"A": [1, 2, 3]}, + index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)], + ) + df = df.asfreq("B") + assert isinstance(df.index, DatetimeIndex) + + ts = df["A"].asfreq("B") + assert isinstance(ts.index, DatetimeIndex) + + def test_asfreq_fillvalue(self): + # test for fill value during upsampling, related to issue 3715 + + # setup + rng = date_range("1/1/2016", periods=10, freq="2S") + # Explicit cast to 'float' to avoid implicit cast when setting None + ts = Series(np.arange(len(rng)), index=rng, dtype="float") + df = DataFrame({"one": ts}) + + # insert pre-existing missing value + df.loc["2016-01-01 00:00:08", "one"] = None + + actual_df = df.asfreq(freq="1S", fill_value=9.0) + expected_df = df.asfreq(freq="1S").fillna(9.0) + expected_df.loc["2016-01-01 00:00:08", "one"] = None + tm.assert_frame_equal(expected_df, actual_df) + + expected_series = ts.asfreq(freq="1S").fillna(9.0) + actual_series = ts.asfreq(freq="1S", fill_value=9.0) + tm.assert_series_equal(expected_series, actual_series) + + def test_asfreq_with_date_object_index(self, frame_or_series): + rng = date_range("1/1/2000", periods=20) + ts = frame_or_series(np.random.default_rng(2).standard_normal(20), index=rng) + + ts2 = ts.copy() + ts2.index = [x.date() for x in ts2.index] + + result = ts2.asfreq("4H", method="ffill") + expected = ts.asfreq("4H", method="ffill") + tm.assert_equal(result, expected) + + def test_asfreq_with_unsorted_index(self, frame_or_series): + # GH#39805 + # Test that rows are not dropped when the datetime index is out of order + index = to_datetime(["2021-01-04", "2021-01-02", "2021-01-03", "2021-01-01"]) + result = frame_or_series(range(4), index=index) + + expected = result.reindex(sorted(index)) + expected.index = expected.index._with_freq("infer") + + result = result.asfreq("D") + tm.assert_equal(result, expected) + + def test_asfreq_after_normalize(self, unit): + # https://github.com/pandas-dev/pandas/issues/50727 + result = DatetimeIndex( + date_range("2000", periods=2).as_unit(unit).normalize(), freq="D" + ) + expected = DatetimeIndex(["2000-01-01", "2000-01-02"], freq="D").as_unit(unit) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "freq, freq_half", + [ + ("2M", "M"), + (MonthEnd(2), MonthEnd(1)), + ], + ) + def test_asfreq_2M(self, freq, freq_half): + index = date_range("1/1/2000", periods=6, freq=freq_half) + df = DataFrame({"s": Series([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], index=index)}) + expected = df.asfreq(freq=freq) + + index = date_range("1/1/2000", periods=3, freq=freq) + result = DataFrame({"s": Series([0.0, 2.0, 4.0], index=index)}) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asof.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asof.py new file mode 100644 index 00000000..5683ec60 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_asof.py @@ -0,0 +1,198 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import IncompatibleFrequency + +from pandas import ( + DataFrame, + Period, + Series, + Timestamp, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + + +@pytest.fixture +def date_range_frame(): + """ + Fixture for DataFrame of ints with date_range index + + Columns are ['A', 'B']. + """ + N = 50 + rng = date_range("1/1/1990", periods=N, freq="53s") + return DataFrame({"A": np.arange(N), "B": np.arange(N)}, index=rng) + + +class TestFrameAsof: + def test_basic(self, date_range_frame): + # Explicitly cast to float to avoid implicit cast when setting np.nan + df = date_range_frame.astype({"A": "float"}) + N = 50 + df.loc[df.index[15:30], "A"] = np.nan + dates = date_range("1/1/1990", periods=N * 3, freq="25s") + + result = df.asof(dates) + assert result.notna().all(1).all() + lb = df.index[14] + ub = df.index[30] + + dates = list(dates) + + result = df.asof(dates) + assert result.notna().all(1).all() + + mask = (result.index >= lb) & (result.index < ub) + rs = result[mask] + assert (rs == 14).all(1).all() + + def test_subset(self, date_range_frame): + N = 10 + # explicitly cast to float to avoid implicit upcast when setting to np.nan + df = date_range_frame.iloc[:N].copy().astype({"A": "float"}) + df.loc[df.index[4:8], "A"] = np.nan + dates = date_range("1/1/1990", periods=N * 3, freq="25s") + + # with a subset of A should be the same + result = df.asof(dates, subset="A") + expected = df.asof(dates) + tm.assert_frame_equal(result, expected) + + # same with A/B + result = df.asof(dates, subset=["A", "B"]) + expected = df.asof(dates) + tm.assert_frame_equal(result, expected) + + # B gives df.asof + result = df.asof(dates, subset="B") + expected = df.resample("25s", closed="right").ffill().reindex(dates) + expected.iloc[20:] = 9 + # no "missing", so "B" can retain int dtype (df["A"].dtype platform-dependent) + expected["B"] = expected["B"].astype(df["B"].dtype) + + tm.assert_frame_equal(result, expected) + + def test_missing(self, date_range_frame): + # GH 15118 + # no match found - `where` value before earliest date in index + N = 10 + # Cast to 'float64' to avoid upcast when introducing nan in df.asof + df = date_range_frame.iloc[:N].copy().astype("float64") + + result = df.asof("1989-12-31") + + expected = Series( + index=["A", "B"], name=Timestamp("1989-12-31"), dtype=np.float64 + ) + tm.assert_series_equal(result, expected) + + result = df.asof(to_datetime(["1989-12-31"])) + expected = DataFrame( + index=to_datetime(["1989-12-31"]), columns=["A", "B"], dtype="float64" + ) + tm.assert_frame_equal(result, expected) + + # Check that we handle PeriodIndex correctly, dont end up with + # period.ordinal for series name + df = df.to_period("D") + result = df.asof("1989-12-31") + assert isinstance(result.name, Period) + + def test_asof_all_nans(self, frame_or_series): + # GH 15713 + # DataFrame/Series is all nans + result = frame_or_series([np.nan]).asof([0]) + expected = frame_or_series([np.nan]) + tm.assert_equal(result, expected) + + def test_all_nans(self, date_range_frame): + # GH 15713 + # DataFrame is all nans + + # testing non-default indexes, multiple inputs + N = 150 + rng = date_range_frame.index + dates = date_range("1/1/1990", periods=N, freq="25s") + result = DataFrame(np.nan, index=rng, columns=["A"]).asof(dates) + expected = DataFrame(np.nan, index=dates, columns=["A"]) + tm.assert_frame_equal(result, expected) + + # testing multiple columns + dates = date_range("1/1/1990", periods=N, freq="25s") + result = DataFrame(np.nan, index=rng, columns=["A", "B", "C"]).asof(dates) + expected = DataFrame(np.nan, index=dates, columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + # testing scalar input + result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof([3]) + expected = DataFrame(np.nan, index=[3], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof(3) + expected = Series(np.nan, index=["A", "B"], name=3) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "stamp,expected", + [ + ( + Timestamp("2018-01-01 23:22:43.325+00:00"), + Series(2, name=Timestamp("2018-01-01 23:22:43.325+00:00")), + ), + ( + Timestamp("2018-01-01 22:33:20.682+01:00"), + Series(1, name=Timestamp("2018-01-01 22:33:20.682+01:00")), + ), + ], + ) + def test_time_zone_aware_index(self, stamp, expected): + # GH21194 + # Testing awareness of DataFrame index considering different + # UTC and timezone + df = DataFrame( + data=[1, 2], + index=[ + Timestamp("2018-01-01 21:00:05.001+00:00"), + Timestamp("2018-01-01 22:35:10.550+00:00"), + ], + ) + + result = df.asof(stamp) + tm.assert_series_equal(result, expected) + + def test_is_copy(self, date_range_frame): + # GH-27357, GH-30784: ensure the result of asof is an actual copy and + # doesn't track the parent dataframe / doesn't give SettingWithCopy warnings + df = date_range_frame.astype({"A": "float"}) + N = 50 + df.loc[df.index[15:30], "A"] = np.nan + dates = date_range("1/1/1990", periods=N * 3, freq="25s") + + result = df.asof(dates) + + with tm.assert_produces_warning(None): + result["C"] = 1 + + def test_asof_periodindex_mismatched_freq(self): + N = 50 + rng = period_range("1/1/1990", periods=N, freq="H") + df = DataFrame(np.random.default_rng(2).standard_normal(N), index=rng) + + # Mismatched freq + msg = "Input has different freq" + with pytest.raises(IncompatibleFrequency, match=msg): + df.asof(rng.asfreq("D")) + + def test_asof_preserves_bool_dtype(self): + # GH#16063 was casting bools to floats + dti = date_range("2017-01-01", freq="MS", periods=4) + ser = Series([True, False, True], index=dti[:-1]) + + ts = dti[-1] + res = ser.asof([ts]) + + expected = Series([True], index=[ts]) + tm.assert_series_equal(res, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_assign.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_assign.py new file mode 100644 index 00000000..0ae501d4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_assign.py @@ -0,0 +1,84 @@ +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +class TestAssign: + def test_assign(self): + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + original = df.copy() + result = df.assign(C=df.B / df.A) + expected = df.copy() + expected["C"] = [4, 2.5, 2] + tm.assert_frame_equal(result, expected) + + # lambda syntax + result = df.assign(C=lambda x: x.B / x.A) + tm.assert_frame_equal(result, expected) + + # original is unmodified + tm.assert_frame_equal(df, original) + + # Non-Series array-like + result = df.assign(C=[4, 2.5, 2]) + tm.assert_frame_equal(result, expected) + # original is unmodified + tm.assert_frame_equal(df, original) + + result = df.assign(B=df.B / df.A) + expected = expected.drop("B", axis=1).rename(columns={"C": "B"}) + tm.assert_frame_equal(result, expected) + + # overwrite + result = df.assign(A=df.A + df.B) + expected = df.copy() + expected["A"] = [5, 7, 9] + tm.assert_frame_equal(result, expected) + + # lambda + result = df.assign(A=lambda x: x.A + x.B) + tm.assert_frame_equal(result, expected) + + def test_assign_multiple(self): + df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=["A", "B"]) + result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B) + expected = DataFrame( + [[1, 4, 7, 1, 4], [2, 5, 8, 2, 5], [3, 6, 9, 3, 6]], columns=list("ABCDE") + ) + tm.assert_frame_equal(result, expected) + + def test_assign_order(self): + # GH 9818 + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + result = df.assign(D=df.A + df.B, C=df.A - df.B) + + expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]], columns=list("ABDC")) + tm.assert_frame_equal(result, expected) + result = df.assign(C=df.A - df.B, D=df.A + df.B) + + expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]], columns=list("ABCD")) + + tm.assert_frame_equal(result, expected) + + def test_assign_bad(self): + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + + # non-keyword argument + msg = r"assign\(\) takes 1 positional argument but 2 were given" + with pytest.raises(TypeError, match=msg): + df.assign(lambda x: x.A) + msg = "'DataFrame' object has no attribute 'C'" + with pytest.raises(AttributeError, match=msg): + df.assign(C=df.A, D=df.A + df.C) + + def test_assign_dependent(self): + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + + result = df.assign(C=df.A, D=lambda x: x["A"] + x["C"]) + expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], columns=list("ABCD")) + tm.assert_frame_equal(result, expected) + + result = df.assign(C=lambda df: df.A, D=lambda df: df["A"] + df["C"]) + expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], columns=list("ABCD")) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_astype.py new file mode 100644 index 00000000..6590f10c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_astype.py @@ -0,0 +1,889 @@ +import re + +import numpy as np +import pytest + +from pandas.compat import pa_version_under7p0 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, + DataFrame, + DatetimeTZDtype, + Index, + Interval, + IntervalDtype, + NaT, + Series, + Timedelta, + Timestamp, + concat, + date_range, + option_context, +) +import pandas._testing as tm + + +def _check_cast(df, v): + """ + Check if all dtypes of df are equal to v + """ + assert all(s.dtype.name == v for _, s in df.items()) + + +class TestAstype: + def test_astype_float(self, float_frame): + casted = float_frame.astype(int) + expected = DataFrame( + float_frame.values.astype(int), + index=float_frame.index, + columns=float_frame.columns, + ) + tm.assert_frame_equal(casted, expected) + + casted = float_frame.astype(np.int32) + expected = DataFrame( + float_frame.values.astype(np.int32), + index=float_frame.index, + columns=float_frame.columns, + ) + tm.assert_frame_equal(casted, expected) + + float_frame["foo"] = "5" + casted = float_frame.astype(int) + expected = DataFrame( + float_frame.values.astype(int), + index=float_frame.index, + columns=float_frame.columns, + ) + tm.assert_frame_equal(casted, expected) + + def test_astype_mixed_float(self, mixed_float_frame): + # mixed casting + casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32") + _check_cast(casted, "float32") + + casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16") + _check_cast(casted, "float16") + + def test_astype_mixed_type(self, mixed_type_frame): + # mixed casting + mn = mixed_type_frame._get_numeric_data().copy() + mn["little_float"] = np.array(12345.0, dtype="float16") + mn["big_float"] = np.array(123456789101112.0, dtype="float64") + + casted = mn.astype("float64") + _check_cast(casted, "float64") + + casted = mn.astype("int64") + _check_cast(casted, "int64") + + casted = mn.reindex(columns=["little_float"]).astype("float16") + _check_cast(casted, "float16") + + casted = mn.astype("float32") + _check_cast(casted, "float32") + + casted = mn.astype("int32") + _check_cast(casted, "int32") + + # to object + casted = mn.astype("O") + _check_cast(casted, "object") + + def test_astype_with_exclude_string(self, float_frame): + df = float_frame.copy() + expected = float_frame.astype(int) + df["string"] = "foo" + casted = df.astype(int, errors="ignore") + + expected["string"] = "foo" + tm.assert_frame_equal(casted, expected) + + df = float_frame.copy() + expected = float_frame.astype(np.int32) + df["string"] = "foo" + casted = df.astype(np.int32, errors="ignore") + + expected["string"] = "foo" + tm.assert_frame_equal(casted, expected) + + def test_astype_with_view_float(self, float_frame): + # this is the only real reason to do it this way + tf = np.round(float_frame).astype(np.int32) + tf.astype(np.float32, copy=False) + + # TODO(wesm): verification? + tf = float_frame.astype(np.float64) + tf.astype(np.int64, copy=False) + + def test_astype_with_view_mixed_float(self, mixed_float_frame): + tf = mixed_float_frame.reindex(columns=["A", "B", "C"]) + + tf.astype(np.int64) + tf.astype(np.float32) + + @pytest.mark.parametrize("dtype", [np.int32, np.int64]) + @pytest.mark.parametrize("val", [np.nan, np.inf]) + def test_astype_cast_nan_inf_int(self, val, dtype): + # see GH#14265 + # + # Check NaN and inf --> raise error when converting to int. + msg = "Cannot convert non-finite values \\(NA or inf\\) to integer" + df = DataFrame([val]) + + with pytest.raises(ValueError, match=msg): + df.astype(dtype) + + def test_astype_str(self): + # see GH#9757 + a = Series(date_range("2010-01-04", periods=5)) + b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern")) + c = Series([Timedelta(x, unit="d") for x in range(5)]) + d = Series(range(5)) + e = Series([0.0, 0.2, 0.4, 0.6, 0.8]) + + df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e}) + + # Datetime-like + result = df.astype(str) + + expected = DataFrame( + { + "a": list(map(str, (Timestamp(x)._date_repr for x in a._values))), + "b": list(map(str, map(Timestamp, b._values))), + "c": [Timedelta(x)._repr_base() for x in c._values], + "d": list(map(str, d._values)), + "e": list(map(str, e._values)), + } + ) + + tm.assert_frame_equal(result, expected) + + def test_astype_str_float(self): + # see GH#11302 + result = DataFrame([np.nan]).astype(str) + expected = DataFrame(["nan"]) + + tm.assert_frame_equal(result, expected) + result = DataFrame([1.12345678901234567890]).astype(str) + + val = "1.1234567890123457" + expected = DataFrame([val]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype_class", [dict, Series]) + def test_astype_dict_like(self, dtype_class): + # GH7271 & GH16717 + a = Series(date_range("2010-01-04", periods=5)) + b = Series(range(5)) + c = Series([0.0, 0.2, 0.4, 0.6, 0.8]) + d = Series(["1.0", "2", "3.14", "4", "5.4"]) + df = DataFrame({"a": a, "b": b, "c": c, "d": d}) + original = df.copy(deep=True) + + # change type of a subset of columns + dt1 = dtype_class({"b": "str", "d": "float32"}) + result = df.astype(dt1) + expected = DataFrame( + { + "a": a, + "b": Series(["0", "1", "2", "3", "4"]), + "c": c, + "d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"), + } + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(df, original) + + dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64}) + result = df.astype(dt2) + expected = DataFrame( + { + "a": a, + "b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"), + "c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"), + "d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"), + } + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(df, original) + + # change all columns + dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str}) + tm.assert_frame_equal(df.astype(dt3), df.astype(str)) + tm.assert_frame_equal(df, original) + + # error should be raised when using something other than column labels + # in the keys of the dtype dict + dt4 = dtype_class({"b": str, 2: str}) + dt5 = dtype_class({"e": str}) + msg_frame = ( + "Only a column name can be used for the key in a dtype mappings argument. " + "'{}' not found in columns." + ) + with pytest.raises(KeyError, match=msg_frame.format(2)): + df.astype(dt4) + with pytest.raises(KeyError, match=msg_frame.format("e")): + df.astype(dt5) + tm.assert_frame_equal(df, original) + + # if the dtypes provided are the same as the original dtypes, the + # resulting DataFrame should be the same as the original DataFrame + dt6 = dtype_class({col: df[col].dtype for col in df.columns}) + equiv = df.astype(dt6) + tm.assert_frame_equal(df, equiv) + tm.assert_frame_equal(df, original) + + # GH#16717 + # if dtypes provided is empty, the resulting DataFrame + # should be the same as the original DataFrame + dt7 = dtype_class({}) if dtype_class is dict else dtype_class({}, dtype=object) + equiv = df.astype(dt7) + tm.assert_frame_equal(df, equiv) + tm.assert_frame_equal(df, original) + + def test_astype_duplicate_col(self): + a1 = Series([1, 2, 3, 4, 5], name="a") + b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b") + a2 = Series([0, 1, 2, 3, 4], name="a") + df = concat([a1, b, a2], axis=1) + + result = df.astype(str) + a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a") + b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b") + a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a") + expected = concat([a1_str, b_str, a2_str], axis=1) + tm.assert_frame_equal(result, expected) + + result = df.astype({"a": "str"}) + expected = concat([a1_str, b, a2_str], axis=1) + tm.assert_frame_equal(result, expected) + + def test_astype_duplicate_col_series_arg(self): + # GH#44417 + vals = np.random.default_rng(2).standard_normal((3, 4)) + df = DataFrame(vals, columns=["A", "B", "C", "A"]) + dtypes = df.dtypes + dtypes.iloc[0] = str + dtypes.iloc[2] = "Float64" + + result = df.astype(dtypes) + expected = DataFrame( + { + 0: vals[:, 0].astype(str), + 1: vals[:, 1], + 2: pd.array(vals[:, 2], dtype="Float64"), + 3: vals[:, 3], + } + ) + expected.columns = df.columns + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [ + "category", + CategoricalDtype(), + CategoricalDtype(ordered=True), + CategoricalDtype(ordered=False), + CategoricalDtype(categories=list("abcdef")), + CategoricalDtype(categories=list("edba"), ordered=False), + CategoricalDtype(categories=list("edcb"), ordered=True), + ], + ids=repr, + ) + def test_astype_categorical(self, dtype): + # GH#18099 + d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")} + df = DataFrame(d) + result = df.astype(dtype) + expected = DataFrame({k: Categorical(v, dtype=dtype) for k, v in d.items()}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("cls", [CategoricalDtype, DatetimeTZDtype, IntervalDtype]) + def test_astype_categoricaldtype_class_raises(self, cls): + df = DataFrame({"A": ["a", "a", "b", "c"]}) + xpr = f"Expected an instance of {cls.__name__}" + with pytest.raises(TypeError, match=xpr): + df.astype({"A": cls}) + + with pytest.raises(TypeError, match=xpr): + df["A"].astype(cls) + + @pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"]) + def test_astype_extension_dtypes(self, dtype): + # GH#22578 + df = DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"]) + + expected1 = DataFrame( + { + "a": pd.array([1, 3, 5], dtype=dtype), + "b": pd.array([2, 4, 6], dtype=dtype), + } + ) + tm.assert_frame_equal(df.astype(dtype), expected1) + tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1) + tm.assert_frame_equal(df.astype(dtype).astype("float64"), df) + + df = DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"]) + df["b"] = df["b"].astype(dtype) + expected2 = DataFrame( + {"a": [1.0, 3.0, 5.0], "b": pd.array([2, 4, 6], dtype=dtype)} + ) + tm.assert_frame_equal(df, expected2) + + tm.assert_frame_equal(df.astype(dtype), expected1) + tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1) + + @pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"]) + def test_astype_extension_dtypes_1d(self, dtype): + # GH#22578 + df = DataFrame({"a": [1.0, 2.0, 3.0]}) + + expected1 = DataFrame({"a": pd.array([1, 2, 3], dtype=dtype)}) + tm.assert_frame_equal(df.astype(dtype), expected1) + tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1) + + df = DataFrame({"a": [1.0, 2.0, 3.0]}) + df["a"] = df["a"].astype(dtype) + expected2 = DataFrame({"a": pd.array([1, 2, 3], dtype=dtype)}) + tm.assert_frame_equal(df, expected2) + + tm.assert_frame_equal(df.astype(dtype), expected1) + tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1) + + @pytest.mark.parametrize("dtype", ["category", "Int64"]) + def test_astype_extension_dtypes_duplicate_col(self, dtype): + # GH#24704 + a1 = Series([0, np.nan, 4], name="a") + a2 = Series([np.nan, 3, 5], name="a") + df = concat([a1, a2], axis=1) + + result = df.astype(dtype) + expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", [{100: "float64", 200: "uint64"}, "category", "float64"] + ) + def test_astype_column_metadata(self, dtype): + # GH#19920 + columns = Index([100, 200, 300], dtype=np.uint64, name="foo") + df = DataFrame(np.arange(15).reshape(5, 3), columns=columns) + df = df.astype(dtype) + tm.assert_index_equal(df.columns, columns) + + @pytest.mark.parametrize("unit", ["Y", "M", "W", "D", "h", "m"]) + def test_astype_from_object_to_datetime_unit(self, unit): + vals = [ + ["2015-01-01", "2015-01-02", "2015-01-03"], + ["2017-01-01", "2017-01-02", "2017-02-03"], + ] + df = DataFrame(vals, dtype=object) + with pytest.raises(TypeError, match="Cannot cast"): + df.astype(f"M8[{unit}]") + + @pytest.mark.parametrize("unit", ["Y", "M", "W", "D", "h", "m"]) + def test_astype_from_object_to_timedelta_unit(self, unit): + vals = [ + ["1 Day", "2 Days", "3 Days"], + ["4 Days", "5 Days", "6 Days"], + ] + df = DataFrame(vals, dtype=object) + msg = ( + r"Cannot convert from timedelta64\[ns\] to timedelta64\[.*\]. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + with pytest.raises(ValueError, match=msg): + # TODO: this is ValueError while for DatetimeArray it is TypeError; + # get these consistent + df.astype(f"m8[{unit}]") + + @pytest.mark.parametrize("dtype", ["M8", "m8"]) + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) + def test_astype_from_datetimelike_to_object(self, dtype, unit): + # tests astype to object dtype + # GH#19223 / GH#12425 + dtype = f"{dtype}[{unit}]" + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + result = df.astype(object) + assert (result.dtypes == object).all() + + if dtype.startswith("M8"): + assert result.iloc[0, 0] == Timestamp(1, unit=unit) + else: + assert result.iloc[0, 0] == Timedelta(1, unit=unit) + + @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) + @pytest.mark.parametrize("dtype", ["M8", "m8"]) + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) + def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit): + # tests all units from numeric origination + # GH#19223 / GH#12425 + dtype = f"{dtype}[{unit}]" + arr = np.array([[1, 2, 3]], dtype=arr_dtype) + df = DataFrame(arr) + result = df.astype(dtype) + expected = DataFrame(arr.astype(dtype)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) + def test_astype_to_datetime_unit(self, unit): + # tests all units from datetime origination + # GH#19223 + dtype = f"M8[{unit}]" + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + ser = df.iloc[:, 0] + idx = Index(ser) + dta = ser._values + + if unit in ["ns", "us", "ms", "s"]: + # GH#48928 + result = df.astype(dtype) + else: + # we use the nearest supported dtype (i.e. M8[s]) + msg = rf"Cannot cast DatetimeArray to dtype datetime64\[{unit}\]" + with pytest.raises(TypeError, match=msg): + df.astype(dtype) + + with pytest.raises(TypeError, match=msg): + ser.astype(dtype) + + with pytest.raises(TypeError, match=msg.replace("Array", "Index")): + idx.astype(dtype) + + with pytest.raises(TypeError, match=msg): + dta.astype(dtype) + + return + + exp_df = DataFrame(arr.astype(dtype)) + assert (exp_df.dtypes == dtype).all() + tm.assert_frame_equal(result, exp_df) + + res_ser = ser.astype(dtype) + exp_ser = exp_df.iloc[:, 0] + assert exp_ser.dtype == dtype + tm.assert_series_equal(res_ser, exp_ser) + + exp_dta = exp_ser._values + + res_index = idx.astype(dtype) + exp_index = Index(exp_ser) + assert exp_index.dtype == dtype + tm.assert_index_equal(res_index, exp_index) + + res_dta = dta.astype(dtype) + assert exp_dta.dtype == dtype + tm.assert_extension_array_equal(res_dta, exp_dta) + + @pytest.mark.parametrize("unit", ["ns"]) + def test_astype_to_timedelta_unit_ns(self, unit): + # preserver the timedelta conversion + # GH#19223 + dtype = f"m8[{unit}]" + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + result = df.astype(dtype) + expected = DataFrame(arr.astype(dtype)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"]) + def test_astype_to_timedelta_unit(self, unit): + # coerce to float + # GH#19223 until 2.0 used to coerce to float + dtype = f"m8[{unit}]" + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + ser = df.iloc[:, 0] + tdi = Index(ser) + tda = tdi._values + + if unit in ["us", "ms", "s"]: + assert (df.dtypes == dtype).all() + result = df.astype(dtype) + else: + # We get the nearest supported unit, i.e. "s" + assert (df.dtypes == "m8[s]").all() + + msg = ( + rf"Cannot convert from timedelta64\[s\] to timedelta64\[{unit}\]. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + with pytest.raises(ValueError, match=msg): + df.astype(dtype) + with pytest.raises(ValueError, match=msg): + ser.astype(dtype) + with pytest.raises(ValueError, match=msg): + tdi.astype(dtype) + with pytest.raises(ValueError, match=msg): + tda.astype(dtype) + + return + + result = df.astype(dtype) + # The conversion is a no-op, so we just get a copy + expected = df + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) + def test_astype_to_incorrect_datetimelike(self, unit): + # trying to astype a m to a M, or vice-versa + # GH#19224 + dtype = f"M8[{unit}]" + other = f"m8[{unit}]" + + df = DataFrame(np.array([[1, 2, 3]], dtype=dtype)) + msg = "|".join( + [ + # BlockManager path + rf"Cannot cast DatetimeArray to dtype timedelta64\[{unit}\]", + # ArrayManager path + "cannot astype a datetimelike from " + rf"\[datetime64\[ns\]\] to \[timedelta64\[{unit}\]\]", + ] + ) + with pytest.raises(TypeError, match=msg): + df.astype(other) + + msg = "|".join( + [ + # BlockManager path + rf"Cannot cast TimedeltaArray to dtype datetime64\[{unit}\]", + # ArrayManager path + "cannot astype a timedelta from " + rf"\[timedelta64\[ns\]\] to \[datetime64\[{unit}\]\]", + ] + ) + df = DataFrame(np.array([[1, 2, 3]], dtype=other)) + with pytest.raises(TypeError, match=msg): + df.astype(dtype) + + def test_astype_arg_for_errors(self): + # GH#14878 + + df = DataFrame([1, 2, 3]) + + msg = ( + "Expected value of kwarg 'errors' to be one of " + "['raise', 'ignore']. Supplied value is 'True'" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + df.astype(np.float64, errors=True) + + df.astype(np.int8, errors="ignore") + + def test_astype_invalid_conversion(self): + # GH#47571 + df = DataFrame({"a": [1, 2, "text"], "b": [1, 2, 3]}) + + msg = ( + "invalid literal for int() with base 10: 'text': " + "Error while type casting for column 'a'" + ) + + with pytest.raises(ValueError, match=re.escape(msg)): + df.astype({"a": int}) + + def test_astype_arg_for_errors_dictlist(self): + # GH#25905 + df = DataFrame( + [ + {"a": "1", "b": "16.5%", "c": "test"}, + {"a": "2.2", "b": "15.3", "c": "another_test"}, + ] + ) + expected = DataFrame( + [ + {"a": 1.0, "b": "16.5%", "c": "test"}, + {"a": 2.2, "b": "15.3", "c": "another_test"}, + ] + ) + type_dict = {"a": "float64", "b": "float64", "c": "object"} + + result = df.astype(dtype=type_dict, errors="ignore") + + tm.assert_frame_equal(result, expected) + + def test_astype_dt64tz(self, timezone_frame): + # astype + expected = np.array( + [ + [ + Timestamp("2013-01-01 00:00:00"), + Timestamp("2013-01-02 00:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + [ + Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), + NaT, + Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), + ], + [ + Timestamp("2013-01-01 00:00:00+0100", tz="CET"), + NaT, + Timestamp("2013-01-03 00:00:00+0100", tz="CET"), + ], + ], + dtype=object, + ).T + expected = DataFrame( + expected, + index=timezone_frame.index, + columns=timezone_frame.columns, + dtype=object, + ) + result = timezone_frame.astype(object) + tm.assert_frame_equal(result, expected) + + msg = "Cannot use .astype to convert from timezone-aware dtype to timezone-" + with pytest.raises(TypeError, match=msg): + # dt64tz->dt64 deprecated + timezone_frame.astype("datetime64[ns]") + + def test_astype_dt64tz_to_str(self, timezone_frame): + # str formatting + result = timezone_frame.astype(str) + expected = DataFrame( + [ + [ + "2013-01-01", + "2013-01-01 00:00:00-05:00", + "2013-01-01 00:00:00+01:00", + ], + ["2013-01-02", "NaT", "NaT"], + [ + "2013-01-03", + "2013-01-03 00:00:00-05:00", + "2013-01-03 00:00:00+01:00", + ], + ], + columns=timezone_frame.columns, + ) + tm.assert_frame_equal(result, expected) + + with option_context("display.max_columns", 20): + result = str(timezone_frame) + assert ( + "0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00" + ) in result + assert ( + "1 2013-01-02 NaT NaT" + ) in result + assert ( + "2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00" + ) in result + + def test_astype_empty_dtype_dict(self): + # issue mentioned further down in the following issue's thread + # https://github.com/pandas-dev/pandas/issues/33113 + df = DataFrame() + result = df.astype({}) + tm.assert_frame_equal(result, df) + assert result is not df + + @pytest.mark.parametrize( + "data, dtype", + [ + (["x", "y", "z"], "string[python]"), + pytest.param( + ["x", "y", "z"], + "string[pyarrow]", + marks=td.skip_if_no("pyarrow"), + ), + (["x", "y", "z"], "category"), + (3 * [Timestamp("2020-01-01", tz="UTC")], None), + (3 * [Interval(0, 1)], None), + ], + ) + @pytest.mark.parametrize("errors", ["raise", "ignore"]) + def test_astype_ignores_errors_for_extension_dtypes(self, data, dtype, errors): + # https://github.com/pandas-dev/pandas/issues/35471 + df = DataFrame(Series(data, dtype=dtype)) + if errors == "ignore": + expected = df + result = df.astype(float, errors=errors) + tm.assert_frame_equal(result, expected) + else: + msg = "(Cannot cast)|(could not convert)" + with pytest.raises((ValueError, TypeError), match=msg): + df.astype(float, errors=errors) + + def test_astype_tz_conversion(self): + # GH 35973 + val = {"tz": date_range("2020-08-30", freq="d", periods=2, tz="Europe/London")} + df = DataFrame(val) + result = df.astype({"tz": "datetime64[ns, Europe/Berlin]"}) + + expected = df + expected["tz"] = expected["tz"].dt.tz_convert("Europe/Berlin") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz", ["UTC", "Europe/Berlin"]) + def test_astype_tz_object_conversion(self, tz): + # GH 35973 + val = {"tz": date_range("2020-08-30", freq="d", periods=2, tz="Europe/London")} + expected = DataFrame(val) + + # convert expected to object dtype from other tz str (independently tested) + result = expected.astype({"tz": f"datetime64[ns, {tz}]"}) + result = result.astype({"tz": "object"}) + + # do real test: object dtype to a specified tz, different from construction tz. + result = result.astype({"tz": "datetime64[ns, Europe/London]"}) + tm.assert_frame_equal(result, expected) + + def test_astype_dt64_to_string(self, frame_or_series, tz_naive_fixture): + # GH#41409 + tz = tz_naive_fixture + + dti = date_range("2016-01-01", periods=3, tz=tz) + dta = dti._data + dta[0] = NaT + + obj = frame_or_series(dta) + result = obj.astype("string") + + # Check that Series/DataFrame.astype matches DatetimeArray.astype + expected = frame_or_series(dta.astype("string")) + tm.assert_equal(result, expected) + + item = result.iloc[0] + if frame_or_series is DataFrame: + item = item.iloc[0] + assert item is pd.NA + + # For non-NA values, we should match what we get for non-EA str + alt = obj.astype(str) + assert np.all(alt.iloc[1:] == result.iloc[1:]) + + def test_astype_td64_to_string(self, frame_or_series): + # GH#41409 + tdi = pd.timedelta_range("1 Day", periods=3) + obj = frame_or_series(tdi) + + expected = frame_or_series(["1 days", "2 days", "3 days"], dtype="string") + result = obj.astype("string") + tm.assert_equal(result, expected) + + def test_astype_bytes(self): + # GH#39474 + result = DataFrame(["foo", "bar", "baz"]).astype(bytes) + assert result.dtypes[0] == np.dtype("S3") + + @pytest.mark.parametrize( + "index_slice", + [ + np.s_[:2, :2], + np.s_[:1, :2], + np.s_[:2, :1], + np.s_[::2, ::2], + np.s_[::1, ::2], + np.s_[::2, ::1], + ], + ) + def test_astype_noncontiguous(self, index_slice): + # GH#42396 + data = np.arange(16).reshape(4, 4) + df = DataFrame(data) + + result = df.iloc[index_slice].astype("int16") + expected = df.iloc[index_slice] + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_astype_retain_attrs(self, any_numpy_dtype): + # GH#44414 + df = DataFrame({"a": [0, 1, 2], "b": [3, 4, 5]}) + df.attrs["Location"] = "Michigan" + + result = df.astype({"a": any_numpy_dtype}).attrs + expected = df.attrs + + tm.assert_dict_equal(expected, result) + + +class TestAstypeCategorical: + def test_astype_from_categorical3(self): + df = DataFrame({"cats": [1, 2, 3, 4, 5, 6], "vals": [1, 2, 3, 4, 5, 6]}) + cats = Categorical([1, 2, 3, 4, 5, 6]) + exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]}) + df["cats"] = df["cats"].astype("category") + tm.assert_frame_equal(exp_df, df) + + def test_astype_from_categorical4(self): + df = DataFrame( + {"cats": ["a", "b", "b", "a", "a", "d"], "vals": [1, 2, 3, 4, 5, 6]} + ) + cats = Categorical(["a", "b", "b", "a", "a", "d"]) + exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]}) + df["cats"] = df["cats"].astype("category") + tm.assert_frame_equal(exp_df, df) + + def test_categorical_astype_to_int(self, any_int_dtype): + # GH#39402 + + df = DataFrame(data={"col1": pd.array([2.0, 1.0, 3.0])}) + df.col1 = df.col1.astype("category") + df.col1 = df.col1.astype(any_int_dtype) + expected = DataFrame({"col1": pd.array([2, 1, 3], dtype=any_int_dtype)}) + tm.assert_frame_equal(df, expected) + + def test_astype_categorical_to_string_missing(self): + # https://github.com/pandas-dev/pandas/issues/41797 + df = DataFrame(["a", "b", np.nan]) + expected = df.astype(str) + cat = df.astype("category") + result = cat.astype(str) + tm.assert_frame_equal(result, expected) + + +class IntegerArrayNoCopy(pd.core.arrays.IntegerArray): + # GH 42501 + + def copy(self): + assert False + + +class Int16DtypeNoCopy(pd.Int16Dtype): + # GH 42501 + + @classmethod + def construct_array_type(cls): + return IntegerArrayNoCopy + + +def test_frame_astype_no_copy(): + # GH 42501 + df = DataFrame({"a": [1, 4, None, 5], "b": [6, 7, 8, 9]}, dtype=object) + result = df.astype({"a": Int16DtypeNoCopy()}, copy=False) + + assert result.a.dtype == pd.Int16Dtype() + assert np.shares_memory(df.b.values, result.b.values) + + +@pytest.mark.skipif(pa_version_under7p0, reason="pyarrow is required for this test") +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +def test_astype_copies(dtype): + # GH#50984 + df = DataFrame({"a": [1, 2, 3]}, dtype=dtype) + result = df.astype("int64[pyarrow]", copy=True) + df.iloc[0, 0] = 100 + expected = DataFrame({"a": [1, 2, 3]}, dtype="int64[pyarrow]") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("val", [None, 1, 1.5, np.nan, NaT]) +def test_astype_to_string_not_modifying_input(string_storage, val): + # GH#51073 + df = DataFrame({"a": ["a", "b", val]}) + expected = df.copy() + with option_context("mode.string_storage", string_storage): + df.astype("string", copy=False) + tm.assert_frame_equal(df, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_at_time.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_at_time.py new file mode 100644 index 00000000..67200396 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_at_time.py @@ -0,0 +1,132 @@ +from datetime import time + +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import timezones + +from pandas import ( + DataFrame, + date_range, +) +import pandas._testing as tm + + +class TestAtTime: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_localized_at_time(self, tzstr, frame_or_series): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("4/16/2012", "5/1/2012", freq="H") + ts = frame_or_series( + np.random.default_rng(2).standard_normal(len(rng)), index=rng + ) + + ts_local = ts.tz_localize(tzstr) + + result = ts_local.at_time(time(10, 0)) + expected = ts.at_time(time(10, 0)).tz_localize(tzstr) + tm.assert_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + def test_at_time(self, frame_or_series): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 2)), index=rng + ) + ts = tm.get_obj(ts, frame_or_series) + rs = ts.at_time(rng[1]) + assert (rs.index.hour == rng[1].hour).all() + assert (rs.index.minute == rng[1].minute).all() + assert (rs.index.second == rng[1].second).all() + + result = ts.at_time("9:30") + expected = ts.at_time(time(9, 30)) + tm.assert_equal(result, expected) + + def test_at_time_midnight(self, frame_or_series): + # midnight, everything + rng = date_range("1/1/2000", "1/31/2000") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng + ) + ts = tm.get_obj(ts, frame_or_series) + + result = ts.at_time(time(0, 0)) + tm.assert_equal(result, ts) + + def test_at_time_nonexistent(self, frame_or_series): + # time doesn't exist + rng = date_range("1/1/2012", freq="23Min", periods=384) + ts = DataFrame(np.random.default_rng(2).standard_normal(len(rng)), rng) + ts = tm.get_obj(ts, frame_or_series) + rs = ts.at_time("16:00") + assert len(rs) == 0 + + @pytest.mark.parametrize( + "hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)] + ) + def test_at_time_errors(self, hour): + # GH#24043 + dti = date_range("2018", periods=3, freq="H") + df = DataFrame(list(range(len(dti))), index=dti) + if getattr(hour, "tzinfo", None) is None: + result = df.at_time(hour) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(ValueError, match="Index must be timezone"): + df.at_time(hour) + + def test_at_time_tz(self): + # GH#24043 + dti = date_range("2018", periods=3, freq="H", tz="US/Pacific") + df = DataFrame(list(range(len(dti))), index=dti) + result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern"))) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + + def test_at_time_raises(self, frame_or_series): + # GH#20725 + obj = DataFrame([[1, 2, 3], [4, 5, 6]]) + obj = tm.get_obj(obj, frame_or_series) + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex + obj.at_time("00:00") + + @pytest.mark.parametrize("axis", ["index", "columns", 0, 1]) + def test_at_time_axis(self, axis): + # issue 8839 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.default_rng(2).standard_normal((len(rng), len(rng)))) + ts.index, ts.columns = rng, rng + + indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)] + + if axis in ["index", 0]: + expected = ts.loc[indices, :] + elif axis in ["columns", 1]: + expected = ts.loc[:, indices] + + result = ts.at_time("9:30", axis=axis) + + # Without clearing freq, result has freq 1440T and expected 5T + result.index = result.index._with_freq(None) + expected.index = expected.index._with_freq(None) + tm.assert_frame_equal(result, expected) + + def test_at_time_datetimeindex(self): + index = date_range("2012-01-01", "2012-01-05", freq="30min") + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 5)), index=index + ) + akey = time(12, 0, 0) + ainds = [24, 72, 120, 168] + + result = df.at_time(akey) + expected = df.loc[akey] + expected2 = df.iloc[ainds] + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected2) + assert len(result) == 4 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_between_time.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_between_time.py new file mode 100644 index 00000000..4c1e009b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_between_time.py @@ -0,0 +1,227 @@ +from datetime import ( + datetime, + time, +) + +import numpy as np +import pytest + +from pandas._libs.tslibs import timezones +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestBetweenTime: + @td.skip_if_not_us_locale + def test_between_time_formats(self, frame_or_series): + # GH#11818 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 2)), index=rng + ) + ts = tm.get_obj(ts, frame_or_series) + + strings = [ + ("2:00", "2:30"), + ("0200", "0230"), + ("2:00am", "2:30am"), + ("0200am", "0230am"), + ("2:00:00", "2:30:00"), + ("020000", "023000"), + ("2:00:00am", "2:30:00am"), + ("020000am", "023000am"), + ] + expected_length = 28 + + for time_string in strings: + assert len(ts.between_time(*time_string)) == expected_length + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_localized_between_time(self, tzstr, frame_or_series): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("4/16/2012", "5/1/2012", freq="H") + ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) + if frame_or_series is DataFrame: + ts = ts.to_frame() + + ts_local = ts.tz_localize(tzstr) + + t1, t2 = time(10, 0), time(11, 0) + result = ts_local.between_time(t1, t2) + expected = ts.between_time(t1, t2).tz_localize(tzstr) + tm.assert_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + def test_between_time_types(self, frame_or_series): + # GH11818 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + obj = DataFrame({"A": 0}, index=rng) + obj = tm.get_obj(obj, frame_or_series) + + msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time" + with pytest.raises(ValueError, match=msg): + obj.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + def test_between_time(self, inclusive_endpoints_fixture, frame_or_series): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 2)), index=rng + ) + ts = tm.get_obj(ts, frame_or_series) + + stime = time(0, 0) + etime = time(1, 0) + inclusive = inclusive_endpoints_fixture + + filtered = ts.between_time(stime, etime, inclusive=inclusive) + exp_len = 13 * 4 + 1 + + if inclusive in ["right", "neither"]: + exp_len -= 5 + if inclusive in ["left", "neither"]: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inclusive in ["left", "both"]: + assert t >= stime + else: + assert t > stime + + if inclusive in ["right", "both"]: + assert t <= etime + else: + assert t < etime + + result = ts.between_time("00:00", "01:00") + expected = ts.between_time(stime, etime) + tm.assert_equal(result, expected) + + # across midnight + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 2)), index=rng + ) + ts = tm.get_obj(ts, frame_or_series) + stime = time(22, 0) + etime = time(9, 0) + + filtered = ts.between_time(stime, etime, inclusive=inclusive) + exp_len = (12 * 11 + 1) * 4 + 1 + if inclusive in ["right", "neither"]: + exp_len -= 4 + if inclusive in ["left", "neither"]: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inclusive in ["left", "both"]: + assert (t >= stime) or (t <= etime) + else: + assert (t > stime) or (t <= etime) + + if inclusive in ["right", "both"]: + assert (t <= etime) or (t >= stime) + else: + assert (t < etime) or (t >= stime) + + def test_between_time_raises(self, frame_or_series): + # GH#20725 + obj = DataFrame([[1, 2, 3], [4, 5, 6]]) + obj = tm.get_obj(obj, frame_or_series) + + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex + obj.between_time(start_time="00:00", end_time="12:00") + + def test_between_time_axis(self, frame_or_series): + # GH#8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) + if frame_or_series is DataFrame: + ts = ts.to_frame() + + stime, etime = ("08:00:00", "09:00:00") + expected_length = 7 + + assert len(ts.between_time(stime, etime)) == expected_length + assert len(ts.between_time(stime, etime, axis=0)) == expected_length + msg = f"No axis named {ts.ndim} for object type {type(ts).__name__}" + with pytest.raises(ValueError, match=msg): + ts.between_time(stime, etime, axis=ts.ndim) + + def test_between_time_axis_aliases(self, axis): + # GH#8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + ts = DataFrame(np.random.default_rng(2).standard_normal((len(rng), len(rng)))) + stime, etime = ("08:00:00", "09:00:00") + exp_len = 7 + + if axis in ["index", 0]: + ts.index = rng + assert len(ts.between_time(stime, etime)) == exp_len + assert len(ts.between_time(stime, etime, axis=0)) == exp_len + + if axis in ["columns", 1]: + ts.columns = rng + selected = ts.between_time(stime, etime, axis=1).columns + assert len(selected) == exp_len + + def test_between_time_axis_raises(self, axis): + # issue 8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + mask = np.arange(0, len(rng)) + rand_data = np.random.default_rng(2).standard_normal((len(rng), len(rng))) + ts = DataFrame(rand_data, index=rng, columns=rng) + stime, etime = ("08:00:00", "09:00:00") + + msg = "Index must be DatetimeIndex" + if axis in ["columns", 1]: + ts.index = mask + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime) + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=0) + + if axis in ["index", 0]: + ts.columns = mask + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=1) + + def test_between_time_datetimeindex(self): + index = date_range("2012-01-01", "2012-01-05", freq="30min") + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 5)), index=index + ) + bkey = slice(time(13, 0, 0), time(14, 0, 0)) + binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172] + + result = df.between_time(bkey.start, bkey.stop) + expected = df.loc[bkey] + expected2 = df.iloc[binds] + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected2) + assert len(result) == 12 + + def test_between_time_incorrect_arg_inclusive(self): + # GH40245 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 2)), index=rng + ) + + stime = time(0, 0) + etime = time(1, 0) + inclusive = "bad_string" + msg = "Inclusive has to be either 'both', 'neither', 'left' or 'right'" + with pytest.raises(ValueError, match=msg): + ts.between_time(stime, etime, inclusive=inclusive) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_clip.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_clip.py new file mode 100644 index 00000000..f7b221d8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_clip.py @@ -0,0 +1,190 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFrameClip: + def test_clip(self, float_frame): + median = float_frame.median().median() + original = float_frame.copy() + + double = float_frame.clip(upper=median, lower=median) + assert not (double.values != median).any() + + # Verify that float_frame was not changed inplace + assert (float_frame.values == original.values).all() + + def test_inplace_clip(self, float_frame): + # GH#15388 + median = float_frame.median().median() + frame_copy = float_frame.copy() + + return_value = frame_copy.clip(upper=median, lower=median, inplace=True) + assert return_value is None + assert not (frame_copy.values != median).any() + + def test_dataframe_clip(self): + # GH#2747 + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 2))) + + for lb, ub in [(-1, 1), (1, -1)]: + clipped_df = df.clip(lb, ub) + + lb, ub = min(lb, ub), max(ub, lb) + lb_mask = df.values <= lb + ub_mask = df.values >= ub + mask = ~lb_mask & ~ub_mask + assert (clipped_df.values[lb_mask] == lb).all() + assert (clipped_df.values[ub_mask] == ub).all() + assert (clipped_df.values[mask] == df.values[mask]).all() + + def test_clip_mixed_numeric(self): + # clip on mixed integer or floats + # GH#24162, clipping now preserves numeric types per column + df = DataFrame({"A": [1, 2, 3], "B": [1.0, np.nan, 3.0]}) + result = df.clip(1, 2) + expected = DataFrame({"A": [1, 2, 2], "B": [1.0, np.nan, 2.0]}) + tm.assert_frame_equal(result, expected) + + df = DataFrame([[1, 2, 3.4], [3, 4, 5.6]], columns=["foo", "bar", "baz"]) + expected = df.dtypes + result = df.clip(upper=3).dtypes + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("inplace", [True, False]) + def test_clip_against_series(self, inplace): + # GH#6966 + + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 2))) + lb = Series(np.random.default_rng(2).standard_normal(1000)) + ub = lb + 1 + + original = df.copy() + clipped_df = df.clip(lb, ub, axis=0, inplace=inplace) + + if inplace: + clipped_df = df + + for i in range(2): + lb_mask = original.iloc[:, i] <= lb + ub_mask = original.iloc[:, i] >= ub + mask = ~lb_mask & ~ub_mask + + result = clipped_df.loc[lb_mask, i] + tm.assert_series_equal(result, lb[lb_mask], check_names=False) + assert result.name == i + + result = clipped_df.loc[ub_mask, i] + tm.assert_series_equal(result, ub[ub_mask], check_names=False) + assert result.name == i + + tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i]) + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])]) + @pytest.mark.parametrize( + "axis,res", + [ + (0, [[2.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 7.0, 7.0]]), + (1, [[2.0, 3.0, 4.0], [4.0, 5.0, 6.0], [5.0, 6.0, 7.0]]), + ], + ) + def test_clip_against_list_like(self, simple_frame, inplace, lower, axis, res): + # GH#15390 + original = simple_frame.copy(deep=True) + + result = original.clip(lower=lower, upper=[5, 6, 7], axis=axis, inplace=inplace) + + expected = DataFrame(res, columns=original.columns, index=original.index) + if inplace: + result = original + tm.assert_frame_equal(result, expected, check_exact=True) + + @pytest.mark.parametrize("axis", [0, 1, None]) + def test_clip_against_frame(self, axis): + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 2))) + lb = DataFrame(np.random.default_rng(2).standard_normal((1000, 2))) + ub = lb + 1 + + clipped_df = df.clip(lb, ub, axis=axis) + + lb_mask = df <= lb + ub_mask = df >= ub + mask = ~lb_mask & ~ub_mask + + tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask]) + tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask]) + tm.assert_frame_equal(clipped_df[mask], df[mask]) + + def test_clip_against_unordered_columns(self): + # GH#20911 + df1 = DataFrame( + np.random.default_rng(2).standard_normal((1000, 4)), + columns=["A", "B", "C", "D"], + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((1000, 4)), + columns=["D", "A", "B", "C"], + ) + df3 = DataFrame(df2.values - 1, columns=["B", "D", "C", "A"]) + result_upper = df1.clip(lower=0, upper=df2) + expected_upper = df1.clip(lower=0, upper=df2[df1.columns]) + result_lower = df1.clip(lower=df3, upper=3) + expected_lower = df1.clip(lower=df3[df1.columns], upper=3) + result_lower_upper = df1.clip(lower=df3, upper=df2) + expected_lower_upper = df1.clip(lower=df3[df1.columns], upper=df2[df1.columns]) + tm.assert_frame_equal(result_upper, expected_upper) + tm.assert_frame_equal(result_lower, expected_lower) + tm.assert_frame_equal(result_lower_upper, expected_lower_upper) + + def test_clip_with_na_args(self, float_frame): + """Should process np.nan argument as None""" + # GH#17276 + tm.assert_frame_equal(float_frame.clip(np.nan), float_frame) + tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame) + + # GH#19992 and adjusted in GH#40420 + df = DataFrame({"col_0": [1, 2, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]}) + + result = df.clip(lower=[4, 5, np.nan], axis=0) + expected = DataFrame( + {"col_0": [4, 5, 3], "col_1": [4, 5, 6], "col_2": [7, 8, 9]} + ) + tm.assert_frame_equal(result, expected) + + result = df.clip(lower=[4, 5, np.nan], axis=1) + expected = DataFrame( + {"col_0": [4, 4, 4], "col_1": [5, 5, 6], "col_2": [7, 8, 9]} + ) + tm.assert_frame_equal(result, expected) + + # GH#40420 + data = {"col_0": [9, -3, 0, -1, 5], "col_1": [-2, -7, 6, 8, -5]} + df = DataFrame(data) + t = Series([2, -4, np.nan, 6, 3]) + result = df.clip(lower=t, axis=0) + expected = DataFrame({"col_0": [9, -3, 0, 6, 5], "col_1": [2, -4, 6, 8, 3]}) + tm.assert_frame_equal(result, expected) + + def test_clip_int_data_with_float_bound(self): + # GH51472 + df = DataFrame({"a": [1, 2, 3]}) + result = df.clip(lower=1.5) + expected = DataFrame({"a": [1.5, 2.0, 3.0]}) + tm.assert_frame_equal(result, expected) + + def test_clip_with_list_bound(self): + # GH#54817 + df = DataFrame([1, 5]) + expected = DataFrame([3, 5]) + result = df.clip([3]) + tm.assert_frame_equal(result, expected) + + expected = DataFrame([1, 3]) + result = df.clip(upper=[3]) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine.py new file mode 100644 index 00000000..bc6a67e4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestCombine: + @pytest.mark.parametrize( + "data", + [ + pd.date_range("2000", periods=4), + pd.date_range("2000", periods=4, tz="US/Central"), + pd.period_range("2000", periods=4), + pd.timedelta_range(0, periods=4), + ], + ) + def test_combine_datetlike_udf(self, data): + # GH#23079 + df = pd.DataFrame({"A": data}) + other = df.copy() + df.iloc[1, 0] = None + + def combiner(a, b): + return b + + result = df.combine(other, combiner) + tm.assert_frame_equal(result, other) + + def test_combine_generic(self, float_frame): + df1 = float_frame + df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]] + + combined = df1.combine(df2, np.add) + combined2 = df2.combine(df1, np.add) + assert combined["D"].isna().all() + assert combined2["D"].isna().all() + + chunk = combined.loc[combined.index[:-5], ["A", "B", "C"]] + chunk2 = combined2.loc[combined2.index[:-5], ["A", "B", "C"]] + + exp = ( + float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]].reindex_like(chunk) + * 2 + ) + tm.assert_frame_equal(chunk, exp) + tm.assert_frame_equal(chunk2, exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine_first.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine_first.py new file mode 100644 index 00000000..156e50d5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_combine_first.py @@ -0,0 +1,548 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.common import is_dtype_equal + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + + +class TestDataFrameCombineFirst: + def test_combine_first_mixed(self): + a = Series(["a", "b"], index=range(2)) + b = Series(range(2), index=range(2)) + f = DataFrame({"A": a, "B": b}) + + a = Series(["a", "b"], index=range(5, 7)) + b = Series(range(2), index=range(5, 7)) + g = DataFrame({"A": a, "B": b}) + + exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6]) + combined = f.combine_first(g) + tm.assert_frame_equal(combined, exp) + + def test_combine_first(self, float_frame): + # disjoint + head, tail = float_frame[:5], float_frame[5:] + + combined = head.combine_first(tail) + reordered_frame = float_frame.reindex(combined.index) + tm.assert_frame_equal(combined, reordered_frame) + assert tm.equalContents(combined.columns, float_frame.columns) + tm.assert_series_equal(combined["A"], reordered_frame["A"]) + + # same index + fcopy = float_frame.copy() + fcopy["A"] = 1 + del fcopy["C"] + + fcopy2 = float_frame.copy() + fcopy2["B"] = 0 + del fcopy2["D"] + + combined = fcopy.combine_first(fcopy2) + + assert (combined["A"] == 1).all() + tm.assert_series_equal(combined["B"], fcopy["B"]) + tm.assert_series_equal(combined["C"], fcopy2["C"]) + tm.assert_series_equal(combined["D"], fcopy["D"]) + + # overlap + head, tail = reordered_frame[:10].copy(), reordered_frame + head["A"] = 1 + + combined = head.combine_first(tail) + assert (combined["A"][:10] == 1).all() + + # reverse overlap + tail.iloc[:10, tail.columns.get_loc("A")] = 0 + combined = tail.combine_first(head) + assert (combined["A"][:10] == 0).all() + + # no overlap + f = float_frame[:10] + g = float_frame[10:] + combined = f.combine_first(g) + tm.assert_series_equal(combined["A"].reindex(f.index), f["A"]) + tm.assert_series_equal(combined["A"].reindex(g.index), g["A"]) + + # corner cases + comb = float_frame.combine_first(DataFrame()) + tm.assert_frame_equal(comb, float_frame) + + comb = DataFrame().combine_first(float_frame) + tm.assert_frame_equal(comb, float_frame) + + comb = float_frame.combine_first(DataFrame(index=["faz", "boo"])) + assert "faz" in comb.index + + # #2525 + df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)]) + df2 = DataFrame(columns=["b"]) + result = df.combine_first(df2) + assert "b" in result + + def test_combine_first_mixed_bug(self): + idx = Index(["a", "b", "c", "e"]) + ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx) + ser2 = Series(["a", "b", "c", "e"], index=idx) + ser3 = Series([12, 4, 5, 97], index=idx) + + frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3}) + + idx = Index(["a", "b", "c", "f"]) + ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx) + ser2 = Series(["a", "b", "c", "f"], index=idx) + ser3 = Series([12, 4, 5, 97], index=idx) + + frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3}) + + combined = frame1.combine_first(frame2) + assert len(combined.columns) == 5 + + def test_combine_first_same_as_in_update(self): + # gh 3016 (same as in update) + df = DataFrame( + [[1.0, 2.0, False, True], [4.0, 5.0, True, False]], + columns=["A", "B", "bool1", "bool2"], + ) + + other = DataFrame([[45, 45]], index=[0], columns=["A", "B"]) + result = df.combine_first(other) + tm.assert_frame_equal(result, df) + + df.loc[0, "A"] = np.nan + result = df.combine_first(other) + df.loc[0, "A"] = 45 + tm.assert_frame_equal(result, df) + + def test_combine_first_doc_example(self): + # doc example + df1 = DataFrame( + {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]} + ) + + df2 = DataFrame( + { + "A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0], + "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0], + } + ) + + result = df1.combine_first(df2) + expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]}) + tm.assert_frame_equal(result, expected) + + def test_combine_first_return_obj_type_with_bools(self): + # GH3552 + + df1 = DataFrame( + [[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]] + ) + df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2]) + + expected = Series([True, True, False], name=2, dtype=bool) + + result_12 = df1.combine_first(df2)[2] + tm.assert_series_equal(result_12, expected) + + result_21 = df2.combine_first(df1)[2] + tm.assert_series_equal(result_21, expected) + + @pytest.mark.parametrize( + "data1, data2, data_expected", + ( + ( + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + [pd.NaT, pd.NaT, pd.NaT], + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + ), + ( + [pd.NaT, pd.NaT, pd.NaT], + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + ), + ( + [datetime(2000, 1, 2), pd.NaT, pd.NaT], + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)], + ), + ( + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + [datetime(2000, 1, 2), pd.NaT, pd.NaT], + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + ), + ), + ) + def test_combine_first_convert_datatime_correctly( + self, data1, data2, data_expected + ): + # GH 3593 + + df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2}) + result = df1.combine_first(df2) + expected = DataFrame({"a": data_expected}) + tm.assert_frame_equal(result, expected) + + def test_combine_first_align_nan(self): + # GH 7509 (not fixed) + dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"]) + dfb = DataFrame([[4], [5]], columns=["b"]) + assert dfa["a"].dtype == "datetime64[ns]" + assert dfa["b"].dtype == "int64" + + res = dfa.combine_first(dfb) + exp = DataFrame( + {"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]}, + columns=["a", "b"], + ) + tm.assert_frame_equal(res, exp) + assert res["a"].dtype == "datetime64[ns]" + # TODO: this must be int64 + assert res["b"].dtype == "int64" + + res = dfa.iloc[:0].combine_first(dfb) + exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"]) + tm.assert_frame_equal(res, exp) + # TODO: this must be datetime64 + assert res["a"].dtype == "float64" + # TODO: this must be int64 + assert res["b"].dtype == "int64" + + def test_combine_first_timezone(self): + # see gh-7630 + data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC") + df1 = DataFrame( + columns=["UTCdatetime", "abc"], + data=data1, + index=pd.date_range("20140627", periods=1), + ) + data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC") + df2 = DataFrame( + columns=["UTCdatetime", "xyz"], + data=data2, + index=pd.date_range("20140628", periods=1), + ) + res = df2[["UTCdatetime"]].combine_first(df1) + exp = DataFrame( + { + "UTCdatetime": [ + pd.Timestamp("2010-01-01 01:01", tz="UTC"), + pd.Timestamp("2012-12-12 12:12", tz="UTC"), + ], + "abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT], + }, + columns=["UTCdatetime", "abc"], + index=pd.date_range("20140627", periods=2, freq="D"), + ) + assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]" + assert res["abc"].dtype == "datetime64[ns, UTC]" + + tm.assert_frame_equal(res, exp) + + # see gh-10567 + dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC") + df1 = DataFrame({"DATE": dts1}) + dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC") + df2 = DataFrame({"DATE": dts2}) + + res = df1.combine_first(df2) + tm.assert_frame_equal(res, df1) + assert res["DATE"].dtype == "datetime64[ns, UTC]" + + dts1 = pd.DatetimeIndex( + ["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern" + ) + df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7]) + dts2 = pd.DatetimeIndex( + ["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern" + ) + df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5]) + + res = df1.combine_first(df2) + exp_dts = pd.DatetimeIndex( + [ + "2011-01-01", + "2012-01-01", + "NaT", + "2012-01-02", + "2011-01-03", + "2011-01-04", + ], + tz="US/Eastern", + ) + exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + tm.assert_frame_equal(res, exp) + + # different tz + dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern") + df1 = DataFrame({"DATE": dts1}) + dts2 = pd.date_range("2015-01-03", "2015-01-05") + df2 = DataFrame({"DATE": dts2}) + + # if df1 doesn't have NaN, keep its dtype + res = df1.combine_first(df2) + tm.assert_frame_equal(res, df1) + assert res["DATE"].dtype == "datetime64[ns, US/Eastern]" + + dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern") + df1 = DataFrame({"DATE": dts1}) + dts2 = pd.date_range("2015-01-01", "2015-01-03") + df2 = DataFrame({"DATE": dts2}) + + res = df1.combine_first(df2) + exp_dts = [ + pd.Timestamp("2015-01-01", tz="US/Eastern"), + pd.Timestamp("2015-01-02", tz="US/Eastern"), + pd.Timestamp("2015-01-03"), + ] + exp = DataFrame({"DATE": exp_dts}) + tm.assert_frame_equal(res, exp) + assert res["DATE"].dtype == "object" + + def test_combine_first_timedelta(self): + data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"]) + df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7]) + data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"]) + df2 = DataFrame({"TD": data2}, index=[2, 4, 5]) + + res = df1.combine_first(df2) + exp_dts = pd.TimedeltaIndex( + ["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"] + ) + exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + tm.assert_frame_equal(res, exp) + assert res["TD"].dtype == "timedelta64[ns]" + + def test_combine_first_period(self): + data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M") + df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7]) + data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M") + df2 = DataFrame({"P": data2}, index=[2, 4, 5]) + + res = df1.combine_first(df2) + exp_dts = pd.PeriodIndex( + ["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M" + ) + exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + tm.assert_frame_equal(res, exp) + assert res["P"].dtype == data1.dtype + + # different freq + dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D") + df2 = DataFrame({"P": dts2}, index=[2, 4, 5]) + + res = df1.combine_first(df2) + exp_dts = [ + pd.Period("2011-01", freq="M"), + pd.Period("2012-01-01", freq="D"), + pd.NaT, + pd.Period("2012-01-02", freq="D"), + pd.Period("2011-03", freq="M"), + pd.Period("2011-04", freq="M"), + ] + exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) + tm.assert_frame_equal(res, exp) + assert res["P"].dtype == "object" + + def test_combine_first_int(self): + # GH14687 - integer series that do no align exactly + + df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64") + df2 = DataFrame({"a": [1, 4]}, dtype="int64") + + result_12 = df1.combine_first(df2) + expected_12 = DataFrame({"a": [0, 1, 3, 5]}) + tm.assert_frame_equal(result_12, expected_12) + + result_21 = df2.combine_first(df1) + expected_21 = DataFrame({"a": [1, 4, 3, 5]}) + tm.assert_frame_equal(result_21, expected_21) + + @pytest.mark.parametrize("val", [1, 1.0]) + def test_combine_first_with_asymmetric_other(self, val): + # see gh-20699 + df1 = DataFrame({"isNum": [val]}) + df2 = DataFrame({"isBool": [True]}) + + res = df1.combine_first(df2) + exp = DataFrame({"isBool": [True], "isNum": [val]}) + + tm.assert_frame_equal(res, exp) + + def test_combine_first_string_dtype_only_na(self, nullable_string_dtype): + # GH: 37519 + df = DataFrame( + {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype + ) + df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype=nullable_string_dtype) + df.set_index(["a", "b"], inplace=True) + df2.set_index(["a", "b"], inplace=True) + result = df.combine_first(df2) + expected = DataFrame( + {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype + ).set_index(["a", "b"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "scalar1, scalar2", + [ + (datetime(2020, 1, 1), datetime(2020, 1, 2)), + (pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")), + (pd.Timedelta("89 days"), pd.Timedelta("60 min")), + (pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")), + ], +) +def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture): + # GH28481 + na_value = nulls_fixture + + frame = DataFrame([[na_value, na_value]], columns=["a", "b"]) + other = DataFrame([[scalar1, scalar2]], columns=["b", "c"]) + + common_dtype = find_common_type([frame.dtypes["b"], other.dtypes["b"]]) + + if is_dtype_equal(common_dtype, "object") or frame.dtypes["b"] == other.dtypes["b"]: + val = scalar1 + else: + val = na_value + + result = frame.combine_first(other) + + expected = DataFrame([[na_value, val, scalar2]], columns=["a", "b", "c"]) + + expected["b"] = expected["b"].astype(common_dtype) + + tm.assert_frame_equal(result, expected) + + +def test_combine_first_timestamp_bug_NaT(): + # GH28481 + frame = DataFrame([[pd.NaT, pd.NaT]], columns=["a", "b"]) + other = DataFrame( + [[datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["b", "c"] + ) + + result = frame.combine_first(other) + expected = DataFrame( + [[pd.NaT, datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["a", "b", "c"] + ) + + tm.assert_frame_equal(result, expected) + + +def test_combine_first_with_nan_multiindex(): + # gh-36562 + + mi1 = MultiIndex.from_arrays( + [["b", "b", "c", "a", "b", np.nan], [1, 2, 3, 4, 5, 6]], names=["a", "b"] + ) + df = DataFrame({"c": [1, 1, 1, 1, 1, 1]}, index=mi1) + mi2 = MultiIndex.from_arrays( + [["a", "b", "c", "a", "b", "d"], [1, 1, 1, 1, 1, 1]], names=["a", "b"] + ) + s = Series([1, 2, 3, 4, 5, 6], index=mi2) + res = df.combine_first(DataFrame({"d": s})) + mi_expected = MultiIndex.from_arrays( + [ + ["a", "a", "a", "b", "b", "b", "b", "c", "c", "d", np.nan], + [1, 1, 4, 1, 1, 2, 5, 1, 3, 1, 6], + ], + names=["a", "b"], + ) + expected = DataFrame( + { + "c": [np.nan, np.nan, 1, 1, 1, 1, 1, np.nan, 1, np.nan, 1], + "d": [1.0, 4.0, np.nan, 2.0, 5.0, np.nan, np.nan, 3.0, np.nan, 6.0, np.nan], + }, + index=mi_expected, + ) + tm.assert_frame_equal(res, expected) + + +def test_combine_preserve_dtypes(): + # GH7509 + a_column = Series(["a", "b"], index=range(2)) + b_column = Series(range(2), index=range(2)) + df1 = DataFrame({"A": a_column, "B": b_column}) + + c_column = Series(["a", "b"], index=range(5, 7)) + b_column = Series(range(-1, 1), index=range(5, 7)) + df2 = DataFrame({"B": b_column, "C": c_column}) + + expected = DataFrame( + { + "A": ["a", "b", np.nan, np.nan], + "B": [0, 1, -1, 0], + "C": [np.nan, np.nan, "a", "b"], + }, + index=[0, 1, 5, 6], + ) + combined = df1.combine_first(df2) + tm.assert_frame_equal(combined, expected) + + +def test_combine_first_duplicates_rows_for_nan_index_values(): + # GH39881 + df1 = DataFrame( + {"x": [9, 10, 11]}, + index=MultiIndex.from_arrays([[1, 2, 3], [np.nan, 5, 6]], names=["a", "b"]), + ) + + df2 = DataFrame( + {"y": [12, 13, 14]}, + index=MultiIndex.from_arrays([[1, 2, 4], [np.nan, 5, 7]], names=["a", "b"]), + ) + + expected = DataFrame( + { + "x": [9.0, 10.0, 11.0, np.nan], + "y": [12.0, 13.0, np.nan, 14.0], + }, + index=MultiIndex.from_arrays( + [[1, 2, 3, 4], [np.nan, 5, 6, 7]], names=["a", "b"] + ), + ) + combined = df1.combine_first(df2) + tm.assert_frame_equal(combined, expected) + + +def test_combine_first_int64_not_cast_to_float64(): + # GH 28613 + df_1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df_2 = DataFrame({"A": [1, 20, 30], "B": [40, 50, 60], "C": [12, 34, 65]}) + result = df_1.combine_first(df_2) + expected = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [12, 34, 65]}) + tm.assert_frame_equal(result, expected) + + +def test_midx_losing_dtype(): + # GH#49830 + midx = MultiIndex.from_arrays([[0, 0], [np.nan, np.nan]]) + midx2 = MultiIndex.from_arrays([[1, 1], [np.nan, np.nan]]) + df1 = DataFrame({"a": [None, 4]}, index=midx) + df2 = DataFrame({"a": [3, 3]}, index=midx2) + result = df1.combine_first(df2) + expected_midx = MultiIndex.from_arrays( + [[0, 0, 1, 1], [np.nan, np.nan, np.nan, np.nan]] + ) + expected = DataFrame({"a": [np.nan, 4, 3, 3]}, index=expected_midx) + tm.assert_frame_equal(result, expected) + + +def test_combine_first_empty_columns(): + left = DataFrame(columns=["a", "b"]) + right = DataFrame(columns=["a", "c"]) + result = left.combine_first(right) + expected = DataFrame(columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_compare.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_compare.py new file mode 100644 index 00000000..a4d0a706 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_compare.py @@ -0,0 +1,305 @@ +import numpy as np +import pytest + +from pandas.compat.numpy import np_version_gte1p25 + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"]) +def test_compare_axis(align_axis): + # GH#30429 + df = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + columns=["col1", "col2", "col3"], + ) + df2 = df.copy() + df2.loc[0, "col1"] = "c" + df2.loc[2, "col3"] = 4.0 + + result = df.compare(df2, align_axis=align_axis) + + if align_axis in (1, "columns"): + indices = pd.Index([0, 2]) + columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]]) + expected = pd.DataFrame( + [["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, 4.0]], + index=indices, + columns=columns, + ) + else: + indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]]) + columns = pd.Index(["col1", "col3"]) + expected = pd.DataFrame( + [["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]], + index=indices, + columns=columns, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "keep_shape, keep_equal", + [ + (True, False), + (False, True), + (True, True), + # False, False case is already covered in test_compare_axis + ], +) +def test_compare_various_formats(keep_shape, keep_equal): + df = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + columns=["col1", "col2", "col3"], + ) + df2 = df.copy() + df2.loc[0, "col1"] = "c" + df2.loc[2, "col3"] = 4.0 + + result = df.compare(df2, keep_shape=keep_shape, keep_equal=keep_equal) + + if keep_shape: + indices = pd.Index([0, 1, 2]) + columns = pd.MultiIndex.from_product( + [["col1", "col2", "col3"], ["self", "other"]] + ) + if keep_equal: + expected = pd.DataFrame( + [ + ["a", "c", 1.0, 1.0, 1.0, 1.0], + ["b", "b", 2.0, 2.0, 2.0, 2.0], + ["c", "c", np.nan, np.nan, 3.0, 4.0], + ], + index=indices, + columns=columns, + ) + else: + expected = pd.DataFrame( + [ + ["a", "c", np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, 3.0, 4.0], + ], + index=indices, + columns=columns, + ) + else: + indices = pd.Index([0, 2]) + columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]]) + expected = pd.DataFrame( + [["a", "c", 1.0, 1.0], ["c", "c", 3.0, 4.0]], index=indices, columns=columns + ) + tm.assert_frame_equal(result, expected) + + +def test_compare_with_equal_nulls(): + # We want to make sure two NaNs are considered the same + # and dropped where applicable + df = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + columns=["col1", "col2", "col3"], + ) + df2 = df.copy() + df2.loc[0, "col1"] = "c" + + result = df.compare(df2) + indices = pd.Index([0]) + columns = pd.MultiIndex.from_product([["col1"], ["self", "other"]]) + expected = pd.DataFrame([["a", "c"]], index=indices, columns=columns) + tm.assert_frame_equal(result, expected) + + +def test_compare_with_non_equal_nulls(): + # We want to make sure the relevant NaNs do not get dropped + # even if the entire row or column are NaNs + df = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + columns=["col1", "col2", "col3"], + ) + df2 = df.copy() + df2.loc[0, "col1"] = "c" + df2.loc[2, "col3"] = np.nan + + result = df.compare(df2) + + indices = pd.Index([0, 2]) + columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]]) + expected = pd.DataFrame( + [["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, np.nan]], + index=indices, + columns=columns, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("align_axis", [0, 1]) +def test_compare_multi_index(align_axis): + df = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]} + ) + df.columns = pd.MultiIndex.from_arrays([["a", "a", "b"], ["col1", "col2", "col3"]]) + df.index = pd.MultiIndex.from_arrays([["x", "x", "y"], [0, 1, 2]]) + + df2 = df.copy() + df2.iloc[0, 0] = "c" + df2.iloc[2, 2] = 4.0 + + result = df.compare(df2, align_axis=align_axis) + + if align_axis == 0: + indices = pd.MultiIndex.from_arrays( + [["x", "x", "y", "y"], [0, 0, 2, 2], ["self", "other", "self", "other"]] + ) + columns = pd.MultiIndex.from_arrays([["a", "b"], ["col1", "col3"]]) + data = [["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]] + else: + indices = pd.MultiIndex.from_arrays([["x", "y"], [0, 2]]) + columns = pd.MultiIndex.from_arrays( + [ + ["a", "a", "b", "b"], + ["col1", "col1", "col3", "col3"], + ["self", "other", "self", "other"], + ] + ) + data = [["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, 4.0]] + + expected = pd.DataFrame(data=data, index=indices, columns=columns) + tm.assert_frame_equal(result, expected) + + +def test_compare_unaligned_objects(): + # test DataFrames with different indices + msg = ( + r"Can only compare identically-labeled \(both index and columns\) DataFrame " + "objects" + ) + with pytest.raises(ValueError, match=msg): + df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"]) + df2 = pd.DataFrame([1, 2, 3], index=["a", "b", "d"]) + df1.compare(df2) + + # test DataFrames with different shapes + msg = ( + r"Can only compare identically-labeled \(both index and columns\) DataFrame " + "objects" + ) + with pytest.raises(ValueError, match=msg): + df1 = pd.DataFrame(np.ones((3, 3))) + df2 = pd.DataFrame(np.zeros((2, 1))) + df1.compare(df2) + + +def test_compare_result_names(): + # GH 44354 + df1 = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + ) + df2 = pd.DataFrame( + { + "col1": ["c", "b", "c"], + "col2": [1.0, 2.0, np.nan], + "col3": [1.0, 2.0, np.nan], + }, + ) + result = df1.compare(df2, result_names=("left", "right")) + expected = pd.DataFrame( + { + ("col1", "left"): {0: "a", 2: np.nan}, + ("col1", "right"): {0: "c", 2: np.nan}, + ("col3", "left"): {0: np.nan, 2: 3.0}, + ("col3", "right"): {0: np.nan, 2: np.nan}, + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "result_names", + [ + [1, 2], + "HK", + {"2": 2, "3": 3}, + 3, + 3.0, + ], +) +def test_invalid_input_result_names(result_names): + # GH 44354 + df1 = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + ) + df2 = pd.DataFrame( + { + "col1": ["c", "b", "c"], + "col2": [1.0, 2.0, np.nan], + "col3": [1.0, 2.0, np.nan], + }, + ) + with pytest.raises( + TypeError, + match=( + f"Passing 'result_names' as a {type(result_names)} is not " + "supported. Provide 'result_names' as a tuple instead." + ), + ): + df1.compare(df2, result_names=result_names) + + +@pytest.mark.parametrize( + "val1,val2", + [(4, pd.NA), (pd.NA, pd.NA), (pd.NA, 4)], +) +def test_compare_ea_and_np_dtype(val1, val2): + # GH 48966 + arr = [4.0, val1] + ser = pd.Series([1, val2], dtype="Int64") + + df1 = pd.DataFrame({"a": arr, "b": [1.0, 2]}) + df2 = pd.DataFrame({"a": ser, "b": [1.0, 2]}) + expected = pd.DataFrame( + { + ("a", "self"): arr, + ("a", "other"): ser, + ("b", "self"): np.nan, + ("b", "other"): np.nan, + } + ) + if val1 is pd.NA and val2 is pd.NA: + # GH#18463 TODO: is this really the desired behavior? + expected.loc[1, ("a", "self")] = np.nan + + if val1 is pd.NA and np_version_gte1p25: + # can't compare with numpy array if it contains pd.NA + with pytest.raises(TypeError, match="boolean value of NA is ambiguous"): + result = df1.compare(df2, keep_shape=True) + else: + result = df1.compare(df2, keep_shape=True) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "df1_val,df2_val,diff_self,diff_other", + [ + (4, 3, 4, 3), + (4, 4, pd.NA, pd.NA), + (4, pd.NA, 4, pd.NA), + (pd.NA, pd.NA, pd.NA, pd.NA), + ], +) +def test_compare_nullable_int64_dtype(df1_val, df2_val, diff_self, diff_other): + # GH 48966 + df1 = pd.DataFrame({"a": pd.Series([df1_val, pd.NA], dtype="Int64"), "b": [1.0, 2]}) + df2 = df1.copy() + df2.loc[0, "a"] = df2_val + + expected = pd.DataFrame( + { + ("a", "self"): pd.Series([diff_self, pd.NA], dtype="Int64"), + ("a", "other"): pd.Series([diff_other, pd.NA], dtype="Int64"), + ("b", "self"): np.nan, + ("b", "other"): np.nan, + } + ) + result = df1.compare(df2, keep_shape=True) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py new file mode 100644 index 00000000..c2b1016e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py @@ -0,0 +1,177 @@ +import datetime + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestConvertDtypes: + @pytest.mark.parametrize( + "convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")] + ) + def test_convert_dtypes(self, convert_integer, expected, string_storage): + # Specific types are tested in tests/series/test_dtypes.py + # Just check that it works for DataFrame here + df = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), + "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), + } + ) + with pd.option_context("string_storage", string_storage): + result = df.convert_dtypes(True, True, convert_integer, False) + expected = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=expected), + "b": pd.Series(["x", "y", "z"], dtype=f"string[{string_storage}]"), + } + ) + tm.assert_frame_equal(result, expected) + + def test_convert_empty(self): + # Empty DataFrame can pass convert_dtypes, see GH#40393 + empty_df = pd.DataFrame() + tm.assert_frame_equal(empty_df, empty_df.convert_dtypes()) + + def test_convert_dtypes_retain_column_names(self): + # GH#41435 + df = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) + df.columns.name = "cols" + + result = df.convert_dtypes() + tm.assert_index_equal(result.columns, df.columns) + assert result.columns.name == "cols" + + def test_pyarrow_dtype_backend(self): + pa = pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), + "b": pd.Series(["x", "y", None], dtype=np.dtype("O")), + "c": pd.Series([True, False, None], dtype=np.dtype("O")), + "d": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), + "e": pd.Series(pd.date_range("2022", periods=3)), + "f": pd.Series(pd.date_range("2022", periods=3, tz="UTC").as_unit("s")), + "g": pd.Series(pd.timedelta_range("1D", periods=3)), + } + ) + result = df.convert_dtypes(dtype_backend="pyarrow") + expected = pd.DataFrame( + { + "a": pd.arrays.ArrowExtensionArray( + pa.array([1, 2, 3], type=pa.int32()) + ), + "b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])), + "c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])), + "d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])), + "e": pd.arrays.ArrowExtensionArray( + pa.array( + [ + datetime.datetime(2022, 1, 1), + datetime.datetime(2022, 1, 2), + datetime.datetime(2022, 1, 3), + ], + type=pa.timestamp(unit="ns"), + ) + ), + "f": pd.arrays.ArrowExtensionArray( + pa.array( + [ + datetime.datetime(2022, 1, 1), + datetime.datetime(2022, 1, 2), + datetime.datetime(2022, 1, 3), + ], + type=pa.timestamp(unit="s", tz="UTC"), + ) + ), + "g": pd.arrays.ArrowExtensionArray( + pa.array( + [ + datetime.timedelta(1), + datetime.timedelta(2), + datetime.timedelta(3), + ], + type=pa.duration("ns"), + ) + ), + } + ) + tm.assert_frame_equal(result, expected) + + def test_pyarrow_dtype_backend_already_pyarrow(self): + pytest.importorskip("pyarrow") + expected = pd.DataFrame([1, 2, 3], dtype="int64[pyarrow]") + result = expected.convert_dtypes(dtype_backend="pyarrow") + tm.assert_frame_equal(result, expected) + + def test_pyarrow_dtype_backend_from_pandas_nullable(self): + pa = pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "a": pd.Series([1, 2, None], dtype="Int32"), + "b": pd.Series(["x", "y", None], dtype="string[python]"), + "c": pd.Series([True, False, None], dtype="boolean"), + "d": pd.Series([None, 100.5, 200], dtype="Float64"), + } + ) + result = df.convert_dtypes(dtype_backend="pyarrow") + expected = pd.DataFrame( + { + "a": pd.arrays.ArrowExtensionArray( + pa.array([1, 2, None], type=pa.int32()) + ), + "b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])), + "c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])), + "d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])), + } + ) + tm.assert_frame_equal(result, expected) + + def test_pyarrow_dtype_empty_object(self): + # GH 50970 + pytest.importorskip("pyarrow") + expected = pd.DataFrame(columns=[0]) + result = expected.convert_dtypes(dtype_backend="pyarrow") + tm.assert_frame_equal(result, expected) + + def test_pyarrow_engine_lines_false(self): + # GH 48893 + df = pd.DataFrame({"a": [1, 2, 3]}) + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + df.convert_dtypes(dtype_backend="numpy") + + def test_pyarrow_backend_no_conversion(self): + # GH#52872 + pytest.importorskip("pyarrow") + df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"}) + expected = df.copy() + result = df.convert_dtypes( + convert_floating=False, + convert_integer=False, + convert_boolean=False, + convert_string=False, + dtype_backend="pyarrow", + ) + tm.assert_frame_equal(result, expected) + + def test_convert_dtypes_pyarrow_to_np_nullable(self): + # GH 53648 + pytest.importorskip("pyarrow") + ser = pd.DataFrame(range(2), dtype="int32[pyarrow]") + result = ser.convert_dtypes(dtype_backend="numpy_nullable") + expected = pd.DataFrame(range(2), dtype="Int32") + tm.assert_frame_equal(result, expected) + + def test_convert_dtypes_pyarrow_timestamp(self): + # GH 54191 + pytest.importorskip("pyarrow") + ser = pd.Series(pd.date_range("2020-01-01", "2020-01-02", freq="1min")) + expected = ser.astype("timestamp[ms][pyarrow]") + result = expected.convert_dtypes(dtype_backend="pyarrow") + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_copy.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_copy.py new file mode 100644 index 00000000..95fcaaa4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_copy.py @@ -0,0 +1,64 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import DataFrame +import pandas._testing as tm + + +class TestCopy: + @pytest.mark.parametrize("attr", ["index", "columns"]) + def test_copy_index_name_checking(self, float_frame, attr): + # don't want to be able to modify the index stored elsewhere after + # making a copy + ind = getattr(float_frame, attr) + ind.name = None + cp = float_frame.copy() + getattr(cp, attr).name = "foo" + assert getattr(float_frame, attr).name is None + + @td.skip_copy_on_write_invalid_test + def test_copy_cache(self): + # GH#31784 _item_cache not cleared on copy causes incorrect reads after updates + df = DataFrame({"a": [1]}) + + df["x"] = [0] + df["a"] + + df.copy() + + df["a"].values[0] = -1 + + tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0]})) + + df["y"] = [0] + + assert df["a"].values[0] == -1 + tm.assert_frame_equal(df, DataFrame({"a": [-1], "x": [0], "y": [0]})) + + def test_copy(self, float_frame, float_string_frame): + cop = float_frame.copy() + cop["E"] = cop["A"] + assert "E" not in float_frame + + # copy objects + copy = float_string_frame.copy() + assert copy._mgr is not float_string_frame._mgr + + @td.skip_array_manager_invalid_test + def test_copy_consolidates(self): + # GH#42477 + df = DataFrame( + { + "a": np.random.default_rng(2).integers(0, 100, size=55), + "b": np.random.default_rng(2).integers(0, 100, size=55), + } + ) + + for i in range(0, 10): + df.loc[:, f"n_{i}"] = np.random.default_rng(2).integers(0, 100, size=55) + + assert len(df._mgr.blocks) == 11 + result = df.copy() + assert len(result._mgr.blocks) == 1 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_count.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_count.py new file mode 100644 index 00000000..1553a8a8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_count.py @@ -0,0 +1,39 @@ +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFrameCount: + def test_count(self): + # corner case + frame = DataFrame() + ct1 = frame.count(1) + assert isinstance(ct1, Series) + + ct2 = frame.count(0) + assert isinstance(ct2, Series) + + # GH#423 + df = DataFrame(index=range(10)) + result = df.count(1) + expected = Series(0, index=df.index) + tm.assert_series_equal(result, expected) + + df = DataFrame(columns=range(10)) + result = df.count(0) + expected = Series(0, index=df.columns) + tm.assert_series_equal(result, expected) + + df = DataFrame() + result = df.count() + expected = Series(dtype="int64") + tm.assert_series_equal(result, expected) + + def test_count_objects(self, float_string_frame): + dm = DataFrame(float_string_frame._series) + df = DataFrame(float_string_frame._series) + + tm.assert_series_equal(dm.count(), df.count()) + tm.assert_series_equal(dm.count(1), df.count(1)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_cov_corr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_cov_corr.py new file mode 100644 index 00000000..23a96561 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_cov_corr.py @@ -0,0 +1,458 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + isna, +) +import pandas._testing as tm + + +class TestDataFrameCov: + def test_cov(self, float_frame, float_string_frame): + # min_periods no NAs (corner case) + expected = float_frame.cov() + result = float_frame.cov(min_periods=len(float_frame)) + + tm.assert_frame_equal(expected, result) + + result = float_frame.cov(min_periods=len(float_frame) + 1) + assert isna(result.values).all() + + # with NAs + frame = float_frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + frame.iloc[5:10, frame.columns.get_loc("B")] = np.nan + result = frame.cov(min_periods=len(frame) - 8) + expected = frame.cov() + expected.loc["A", "B"] = np.nan + expected.loc["B", "A"] = np.nan + tm.assert_frame_equal(result, expected) + + # regular + result = frame.cov() + expected = frame["A"].cov(frame["C"]) + tm.assert_almost_equal(result["A"]["C"], expected) + + # fails on non-numeric types + with pytest.raises(ValueError, match="could not convert string to float"): + float_string_frame.cov() + result = float_string_frame.cov(numeric_only=True) + expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov() + tm.assert_frame_equal(result, expected) + + # Single column frame + df = DataFrame(np.linspace(0.0, 1.0, 10)) + result = df.cov() + expected = DataFrame( + np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns + ) + tm.assert_frame_equal(result, expected) + df.loc[0] = np.nan + result = df.cov() + expected = DataFrame( + np.cov(df.values[1:].T).reshape((1, 1)), + index=df.columns, + columns=df.columns, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3]) + def test_cov_ddof(self, test_ddof): + # GH#34611 + np_array1 = np.random.default_rng(2).random(10) + np_array2 = np.random.default_rng(2).random(10) + df = DataFrame({0: np_array1, 1: np_array2}) + result = df.cov(ddof=test_ddof) + expected_np = np.cov(np_array1, np_array2, ddof=test_ddof) + expected = DataFrame(expected_np) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "other_column", [pd.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])] + ) + def test_cov_nullable_integer(self, other_column): + # https://github.com/pandas-dev/pandas/issues/33803 + data = DataFrame({"a": pd.array([1, 2, None]), "b": other_column}) + result = data.cov() + arr = np.array([[0.5, 0.5], [0.5, 1.0]]) + expected = DataFrame(arr, columns=["a", "b"], index=["a", "b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("numeric_only", [True, False]) + def test_cov_numeric_only(self, numeric_only): + # when dtypes of pandas series are different + # then ndarray will have dtype=object, + # so it need to be properly handled + df = DataFrame({"a": [1, 0], "c": ["x", "y"]}) + expected = DataFrame(0.5, index=["a"], columns=["a"]) + if numeric_only: + result = df.cov(numeric_only=numeric_only) + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(ValueError, match="could not convert string to float"): + df.cov(numeric_only=numeric_only) + + +class TestDataFrameCorr: + # DataFrame.corr(), as opposed to DataFrame.corrwith + + @pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"]) + def test_corr_scipy_method(self, float_frame, method): + pytest.importorskip("scipy") + float_frame.loc[float_frame.index[:5], "A"] = np.nan + float_frame.loc[float_frame.index[5:10], "B"] = np.nan + float_frame.loc[float_frame.index[:10], "A"] = float_frame["A"][10:20] + + correls = float_frame.corr(method=method) + expected = float_frame["A"].corr(float_frame["C"], method=method) + tm.assert_almost_equal(correls["A"]["C"], expected) + + # --------------------------------------------------------------------- + + def test_corr_non_numeric(self, float_string_frame): + with pytest.raises(ValueError, match="could not convert string to float"): + float_string_frame.corr() + result = float_string_frame.corr(numeric_only=True) + expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"]) + def test_corr_nooverlap(self, meth): + # nothing in common + pytest.importorskip("scipy") + df = DataFrame( + { + "A": [1, 1.5, 1, np.nan, np.nan, np.nan], + "B": [np.nan, np.nan, np.nan, 1, 1.5, 1], + "C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + } + ) + rs = df.corr(meth) + assert isna(rs.loc["A", "B"]) + assert isna(rs.loc["B", "A"]) + assert rs.loc["A", "A"] == 1 + assert rs.loc["B", "B"] == 1 + assert isna(rs.loc["C", "C"]) + + @pytest.mark.parametrize("meth", ["pearson", "spearman"]) + def test_corr_constant(self, meth): + # constant --> all NA + df = DataFrame( + { + "A": [1, 1, 1, np.nan, np.nan, np.nan], + "B": [np.nan, np.nan, np.nan, 1, 1, 1], + } + ) + rs = df.corr(meth) + assert isna(rs.values).all() + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"]) + def test_corr_int_and_boolean(self, meth): + # when dtypes of pandas series are different + # then ndarray will have dtype=object, + # so it need to be properly handled + pytest.importorskip("scipy") + df = DataFrame({"a": [True, False], "b": [1, 0]}) + + expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"]) + result = df.corr(meth) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("method", ["cov", "corr"]) + def test_corr_cov_independent_index_column(self, method): + # GH#14617 + df = DataFrame( + np.random.default_rng(2).standard_normal(4 * 10).reshape(10, 4), + columns=list("abcd"), + ) + result = getattr(df, method)() + assert result.index is not result.columns + assert result.index.equals(result.columns) + + def test_corr_invalid_method(self): + # GH#22298 + df = DataFrame(np.random.default_rng(2).normal(size=(10, 2))) + msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, " + with pytest.raises(ValueError, match=msg): + df.corr(method="____") + + def test_corr_int(self): + # dtypes other than float64 GH#1761 + df = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]}) + + df.cov() + df.corr() + + @pytest.mark.parametrize( + "nullable_column", [pd.array([1, 2, 3]), pd.array([1, 2, None])] + ) + @pytest.mark.parametrize( + "other_column", + [pd.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, np.nan])], + ) + @pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"]) + def test_corr_nullable_integer(self, nullable_column, other_column, method): + # https://github.com/pandas-dev/pandas/issues/33803 + pytest.importorskip("scipy") + data = DataFrame({"a": nullable_column, "b": other_column}) + result = data.corr(method=method) + expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_corr_item_cache(self, using_copy_on_write): + # Check that corr does not lead to incorrect entries in item_cache + + df = DataFrame({"A": range(10)}) + df["B"] = range(10)[::-1] + + ser = df["A"] # populate item_cache + assert len(df._mgr.arrays) == 2 # i.e. 2 blocks + + _ = df.corr(numeric_only=True) + + if using_copy_on_write: + ser.iloc[0] = 99 + assert df.loc[0, "A"] == 0 + else: + # Check that the corr didn't break link between ser and df + ser.values[0] = 99 + assert df.loc[0, "A"] == 99 + assert df["A"] is ser + assert df.values[0, 0] == 99 + + @pytest.mark.parametrize("length", [2, 20, 200, 2000]) + def test_corr_for_constant_columns(self, length): + # GH: 37448 + df = DataFrame(length * [[0.4, 0.1]], columns=["A", "B"]) + result = df.corr() + expected = DataFrame( + {"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, index=["A", "B"] + ) + tm.assert_frame_equal(result, expected) + + def test_calc_corr_small_numbers(self): + # GH: 37452 + df = DataFrame( + {"A": [1.0e-20, 2.0e-20, 3.0e-20], "B": [1.0e-20, 2.0e-20, 3.0e-20]} + ) + result = df.corr() + expected = DataFrame({"A": [1.0, 1.0], "B": [1.0, 1.0]}, index=["A", "B"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"]) + def test_corr_min_periods_greater_than_length(self, method): + pytest.importorskip("scipy") + df = DataFrame({"A": [1, 2], "B": [1, 2]}) + result = df.corr(method=method, min_periods=3) + expected = DataFrame( + {"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, index=["A", "B"] + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"]) + @pytest.mark.parametrize("numeric_only", [True, False]) + def test_corr_numeric_only(self, meth, numeric_only): + # when dtypes of pandas series are different + # then ndarray will have dtype=object, + # so it need to be properly handled + pytest.importorskip("scipy") + df = DataFrame({"a": [1, 0], "b": [1, 0], "c": ["x", "y"]}) + expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"]) + if numeric_only: + result = df.corr(meth, numeric_only=numeric_only) + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(ValueError, match="could not convert string to float"): + df.corr(meth, numeric_only=numeric_only) + + +class TestDataFrameCorrWith: + @pytest.mark.parametrize( + "dtype", + [ + "float64", + "Float64", + pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], + ) + def test_corrwith(self, datetime_frame, dtype): + datetime_frame = datetime_frame.astype(dtype) + + a = datetime_frame + noise = Series(np.random.default_rng(2).standard_normal(len(a)), index=a.index) + + b = datetime_frame.add(noise, axis=0) + + # make sure order does not matter + b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:]) + del b["B"] + + colcorr = a.corrwith(b, axis=0) + tm.assert_almost_equal(colcorr["A"], a["A"].corr(b["A"])) + + rowcorr = a.corrwith(b, axis=1) + tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0)) + + dropped = a.corrwith(b, axis=0, drop=True) + tm.assert_almost_equal(dropped["A"], a["A"].corr(b["A"])) + assert "B" not in dropped + + dropped = a.corrwith(b, axis=1, drop=True) + assert a.index[-1] not in dropped.index + + # non time-series data + index = ["a", "b", "c", "d", "e"] + columns = ["one", "two", "three", "four"] + df1 = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + index=index, + columns=columns, + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=index[:4], + columns=columns, + ) + correls = df1.corrwith(df2, axis=1) + for row in index[:4]: + tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row])) + + def test_corrwith_with_objects(self): + df1 = tm.makeTimeDataFrame() + df2 = tm.makeTimeDataFrame() + cols = ["A", "B", "C", "D"] + + df1["obj"] = "foo" + df2["obj"] = "bar" + + with pytest.raises(TypeError, match="Could not convert"): + df1.corrwith(df2) + result = df1.corrwith(df2, numeric_only=True) + expected = df1.loc[:, cols].corrwith(df2.loc[:, cols]) + tm.assert_series_equal(result, expected) + + with pytest.raises(TypeError, match="unsupported operand type"): + df1.corrwith(df2, axis=1) + result = df1.corrwith(df2, axis=1, numeric_only=True) + expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1) + tm.assert_series_equal(result, expected) + + def test_corrwith_series(self, datetime_frame): + result = datetime_frame.corrwith(datetime_frame["A"]) + expected = datetime_frame.apply(datetime_frame["A"].corr) + + tm.assert_series_equal(result, expected) + + def test_corrwith_matches_corrcoef(self): + df1 = DataFrame(np.arange(10000), columns=["a"]) + df2 = DataFrame(np.arange(10000) ** 2, columns=["a"]) + c1 = df1.corrwith(df2)["a"] + c2 = np.corrcoef(df1["a"], df2["a"])[0][1] + + tm.assert_almost_equal(c1, c2) + assert c1 < 1 + + @pytest.mark.parametrize("numeric_only", [True, False]) + def test_corrwith_mixed_dtypes(self, numeric_only): + # GH#18570 + df = DataFrame( + {"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]} + ) + s = Series([0, 6, 7, 3]) + if numeric_only: + result = df.corrwith(s, numeric_only=numeric_only) + corrs = [df["a"].corr(s), df["b"].corr(s)] + expected = Series(data=corrs, index=["a", "b"]) + tm.assert_series_equal(result, expected) + else: + with pytest.raises( + ValueError, + match="could not convert string to float", + ): + df.corrwith(s, numeric_only=numeric_only) + + def test_corrwith_index_intersection(self): + df1 = DataFrame( + np.random.default_rng(2).random(size=(10, 2)), columns=["a", "b"] + ) + df2 = DataFrame( + np.random.default_rng(2).random(size=(10, 3)), columns=["a", "b", "c"] + ) + + result = df1.corrwith(df2, drop=True).index.sort_values() + expected = df1.columns.intersection(df2.columns).sort_values() + tm.assert_index_equal(result, expected) + + def test_corrwith_index_union(self): + df1 = DataFrame( + np.random.default_rng(2).random(size=(10, 2)), columns=["a", "b"] + ) + df2 = DataFrame( + np.random.default_rng(2).random(size=(10, 3)), columns=["a", "b", "c"] + ) + + result = df1.corrwith(df2, drop=False).index.sort_values() + expected = df1.columns.union(df2.columns).sort_values() + tm.assert_index_equal(result, expected) + + def test_corrwith_dup_cols(self): + # GH#21925 + df1 = DataFrame(np.vstack([np.arange(10)] * 3).T) + df2 = df1.copy() + df2 = pd.concat((df2, df2[0]), axis=1) + + result = df1.corrwith(df2) + expected = Series(np.ones(4), index=[0, 0, 1, 2]) + tm.assert_series_equal(result, expected) + + def test_corr_numerical_instabilities(self): + # GH#45640 + df = DataFrame([[0.2, 0.4], [0.4, 0.2]]) + result = df.corr() + expected = DataFrame({0: [1.0, -1.0], 1: [-1.0, 1.0]}) + tm.assert_frame_equal(result - 1, expected - 1, atol=1e-17) + + def test_corrwith_spearman(self): + # GH#21925 + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random(size=(100, 3))) + result = df.corrwith(df**2, method="spearman") + expected = Series(np.ones(len(result))) + tm.assert_series_equal(result, expected) + + def test_corrwith_kendall(self): + # GH#21925 + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random(size=(100, 3))) + result = df.corrwith(df**2, method="kendall") + expected = Series(np.ones(len(result))) + tm.assert_series_equal(result, expected) + + def test_corrwith_spearman_with_tied_data(self): + # GH#48826 + pytest.importorskip("scipy") + df1 = DataFrame( + { + "A": [1, np.nan, 7, 8], + "B": [False, True, True, False], + "C": [10, 4, 9, 3], + } + ) + df2 = df1[["B", "C"]] + result = (df1 + 1).corrwith(df2.B, method="spearman") + expected = Series([0.0, 1.0, 0.0], index=["A", "B", "C"]) + tm.assert_series_equal(result, expected) + + df_bool = DataFrame( + {"A": [True, True, False, False], "B": [True, False, False, True]} + ) + ser_bool = Series([True, True, False, True]) + result = df_bool.corrwith(ser_bool) + expected = Series([0.57735, 0.57735], index=["A", "B"]) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_describe.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_describe.py new file mode 100644 index 00000000..f56a7896 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_describe.py @@ -0,0 +1,417 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameDescribe: + def test_describe_bool_in_mixed_frame(self): + df = DataFrame( + { + "string_data": ["a", "b", "c", "d", "e"], + "bool_data": [True, True, False, False, False], + "int_data": [10, 20, 30, 40, 50], + } + ) + + # Integer data are included in .describe() output, + # Boolean and string data are not. + result = df.describe() + expected = DataFrame( + {"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]}, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_frame_equal(result, expected) + + # Top value is a boolean value that is False + result = df.describe(include=["bool"]) + + expected = DataFrame( + {"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"] + ) + tm.assert_frame_equal(result, expected) + + def test_describe_empty_object(self): + # GH#27183 + df = DataFrame({"A": [None, None]}, dtype=object) + result = df.describe() + expected = DataFrame( + {"A": [0, 0, np.nan, np.nan]}, + dtype=object, + index=["count", "unique", "top", "freq"], + ) + tm.assert_frame_equal(result, expected) + + result = df.iloc[:0].describe() + tm.assert_frame_equal(result, expected) + + def test_describe_bool_frame(self): + # GH#13891 + df = DataFrame( + { + "bool_data_1": [False, False, True, True], + "bool_data_2": [False, True, True, True], + } + ) + result = df.describe() + expected = DataFrame( + {"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]}, + index=["count", "unique", "top", "freq"], + ) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + { + "bool_data": [False, False, True, True, False], + "int_data": [0, 1, 2, 3, 4], + } + ) + result = df.describe() + expected = DataFrame( + {"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]}, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + {"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]} + ) + result = df.describe() + expected = DataFrame( + {"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]}, + index=["count", "unique", "top", "freq"], + ) + tm.assert_frame_equal(result, expected) + + def test_describe_categorical(self): + df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)}) + labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=["value"], ascending=True) + df["value_group"] = pd.cut( + df.value, range(0, 10500, 500), right=False, labels=cat_labels + ) + cat = df + + # Categoricals should not show up together with numerical columns + result = cat.describe() + assert len(result.columns) == 1 + + # In a frame, describe() for the cat should be the same as for string + # arrays (count, unique, top, freq) + + cat = Categorical( + ["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True + ) + s = Series(cat) + result = s.describe() + expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"]) + tm.assert_series_equal(result, expected) + + cat = Series(Categorical(["a", "b", "c", "c"])) + df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]}) + result = df3.describe() + tm.assert_numpy_array_equal(result["cat"].values, result["s"].values) + + def test_describe_empty_categorical_column(self): + # GH#26397 + # Ensure the index of an empty categorical DataFrame column + # also contains (count, unique, top, freq) + df = DataFrame({"empty_col": Categorical([])}) + result = df.describe() + expected = DataFrame( + {"empty_col": [0, 0, np.nan, np.nan]}, + index=["count", "unique", "top", "freq"], + dtype="object", + ) + tm.assert_frame_equal(result, expected) + # ensure NaN, not None + assert np.isnan(result.iloc[2, 0]) + assert np.isnan(result.iloc[3, 0]) + + def test_describe_categorical_columns(self): + # GH#11558 + columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX") + df = DataFrame( + { + "int1": [10, 20, 30, 40, 50], + "int2": [10, 20, 30, 40, 50], + "obj": ["A", 0, None, "X", 1], + }, + columns=columns, + ) + result = df.describe() + + exp_columns = pd.CategoricalIndex( + ["int1", "int2"], + categories=["int1", "int2", "obj"], + ordered=True, + name="XXX", + ) + expected = DataFrame( + { + "int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50], + "int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50], + }, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + columns=exp_columns, + ) + + tm.assert_frame_equal(result, expected) + tm.assert_categorical_equal(result.columns.values, expected.columns.values) + + def test_describe_datetime_columns(self): + columns = pd.DatetimeIndex( + ["2011-01-01", "2011-02-01", "2011-03-01"], + freq="MS", + tz="US/Eastern", + name="XXX", + ) + df = DataFrame( + { + 0: [10, 20, 30, 40, 50], + 1: [10, 20, 30, 40, 50], + 2: ["A", 0, None, "X", 1], + } + ) + df.columns = columns + result = df.describe() + + exp_columns = pd.DatetimeIndex( + ["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX" + ) + expected = DataFrame( + { + 0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50], + 1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50], + }, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + expected.columns = exp_columns + tm.assert_frame_equal(result, expected) + assert result.columns.freq == "MS" + assert result.columns.tz == expected.columns.tz + + def test_describe_timedelta_values(self): + # GH#6145 + t1 = pd.timedelta_range("1 days", freq="D", periods=5) + t2 = pd.timedelta_range("1 hours", freq="H", periods=5) + df = DataFrame({"t1": t1, "t2": t2}) + + expected = DataFrame( + { + "t1": [ + 5, + pd.Timedelta("3 days"), + df.iloc[:, 0].std(), + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + pd.Timedelta("4 days"), + pd.Timedelta("5 days"), + ], + "t2": [ + 5, + pd.Timedelta("3 hours"), + df.iloc[:, 1].std(), + pd.Timedelta("1 hours"), + pd.Timedelta("2 hours"), + pd.Timedelta("3 hours"), + pd.Timedelta("4 hours"), + pd.Timedelta("5 hours"), + ], + }, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + + result = df.describe() + tm.assert_frame_equal(result, expected) + + exp_repr = ( + " t1 t2\n" + "count 5 5\n" + "mean 3 days 00:00:00 0 days 03:00:00\n" + "std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n" + "min 1 days 00:00:00 0 days 01:00:00\n" + "25% 2 days 00:00:00 0 days 02:00:00\n" + "50% 3 days 00:00:00 0 days 03:00:00\n" + "75% 4 days 00:00:00 0 days 04:00:00\n" + "max 5 days 00:00:00 0 days 05:00:00" + ) + assert repr(result) == exp_repr + + def test_describe_tz_values(self, tz_naive_fixture): + # GH#21332 + tz = tz_naive_fixture + s1 = Series(range(5)) + start = Timestamp(2018, 1, 1) + end = Timestamp(2018, 1, 5) + s2 = Series(date_range(start, end, tz=tz)) + df = DataFrame({"s1": s1, "s2": s2}) + + expected = DataFrame( + { + "s1": [5, 2, 0, 1, 2, 3, 4, 1.581139], + "s2": [ + 5, + Timestamp(2018, 1, 3).tz_localize(tz), + start.tz_localize(tz), + s2[1], + s2[2], + s2[3], + end.tz_localize(tz), + np.nan, + ], + }, + index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], + ) + result = df.describe(include="all") + tm.assert_frame_equal(result, expected) + + def test_datetime_is_numeric_includes_datetime(self): + df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]}) + result = df.describe() + expected = DataFrame( + { + "a": [ + 3, + Timestamp("2012-01-02"), + Timestamp("2012-01-01"), + Timestamp("2012-01-01T12:00:00"), + Timestamp("2012-01-02"), + Timestamp("2012-01-02T12:00:00"), + Timestamp("2012-01-03"), + np.nan, + ], + "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], + }, + index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], + ) + tm.assert_frame_equal(result, expected) + + def test_describe_tz_values2(self): + tz = "CET" + s1 = Series(range(5)) + start = Timestamp(2018, 1, 1) + end = Timestamp(2018, 1, 5) + s2 = Series(date_range(start, end, tz=tz)) + df = DataFrame({"s1": s1, "s2": s2}) + + s1_ = s1.describe() + s2_ = s2.describe() + idx = [ + "count", + "mean", + "min", + "25%", + "50%", + "75%", + "max", + "std", + ] + expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).reindex( + idx, copy=False + ) + + result = df.describe(include="all") + tm.assert_frame_equal(result, expected) + + def test_describe_percentiles_integer_idx(self): + # GH#26660 + df = DataFrame({"x": [1]}) + pct = np.linspace(0, 1, 10 + 1) + result = df.describe(percentiles=pct) + + expected = DataFrame( + {"x": [1.0, 1.0, np.nan, 1.0, *(1.0 for _ in pct), 1.0]}, + index=[ + "count", + "mean", + "std", + "min", + "0%", + "10%", + "20%", + "30%", + "40%", + "50%", + "60%", + "70%", + "80%", + "90%", + "100%", + "max", + ], + ) + tm.assert_frame_equal(result, expected) + + def test_describe_does_not_raise_error_for_dictlike_elements(self): + # GH#32409 + df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}]) + expected = DataFrame( + {"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"] + ) + result = df.describe() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]]) + def test_describe_when_include_all_exclude_not_allowed(self, exclude): + """ + When include is 'all', then setting exclude != None is not allowed. + """ + df = DataFrame({"x": [1], "y": [2], "z": [3]}) + msg = "exclude must be None when include is 'all'" + with pytest.raises(ValueError, match=msg): + df.describe(include="all", exclude=exclude) + + def test_describe_with_duplicate_columns(self): + df = DataFrame( + [[1, 1, 1], [2, 2, 2], [3, 3, 3]], + columns=["bar", "a", "a"], + dtype="float64", + ) + result = df.describe() + ser = df.iloc[:, 0].describe() + expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1) + tm.assert_frame_equal(result, expected) + + def test_ea_with_na(self, any_numeric_ea_dtype): + # GH#48778 + + df = DataFrame({"a": [1, pd.NA, pd.NA], "b": pd.NA}, dtype=any_numeric_ea_dtype) + result = df.describe() + expected = DataFrame( + {"a": [1.0, 1.0, pd.NA] + [1.0] * 5, "b": [0.0] + [pd.NA] * 7}, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + dtype="Float64", + ) + tm.assert_frame_equal(result, expected) + + def test_describe_exclude_pa_dtype(self): + # GH#52570 + pa = pytest.importorskip("pyarrow") + df = DataFrame( + { + "a": Series([1, 2, 3], dtype=pd.ArrowDtype(pa.int8())), + "b": Series([1, 2, 3], dtype=pd.ArrowDtype(pa.int16())), + "c": Series([1, 2, 3], dtype=pd.ArrowDtype(pa.int32())), + } + ) + result = df.describe( + include=pd.ArrowDtype(pa.int8()), exclude=pd.ArrowDtype(pa.int32()) + ) + expected = DataFrame( + {"a": [3, 2, 1, 1, 1.5, 2, 2.5, 3]}, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + dtype=pd.ArrowDtype(pa.float64()), + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_diff.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_diff.py new file mode 100644 index 00000000..b401f182 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_diff.py @@ -0,0 +1,304 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameDiff: + def test_diff_requires_integer(self): + df = DataFrame(np.random.default_rng(2).standard_normal((2, 2))) + with pytest.raises(ValueError, match="periods must be an integer"): + df.diff(1.5) + + # GH#44572 np.int64 is accepted + @pytest.mark.parametrize("num", [1, np.int64(1)]) + def test_diff(self, datetime_frame, num): + df = datetime_frame + the_diff = df.diff(num) + + expected = df["A"] - df["A"].shift(num) + tm.assert_series_equal(the_diff["A"], expected) + + def test_diff_int_dtype(self): + # int dtype + a = 10_000_000_000_000_000 + b = a + 1 + ser = Series([a, b]) + + rs = DataFrame({"s": ser}).diff() + assert rs.s[1] == 1 + + def test_diff_mixed_numeric(self, datetime_frame): + # mixed numeric + tf = datetime_frame.astype("float32") + the_diff = tf.diff(1) + tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1)) + + def test_diff_axis1_nonconsolidated(self): + # GH#10907 + df = DataFrame({"y": Series([2]), "z": Series([3])}) + df.insert(0, "x", 1) + result = df.diff(axis=1) + expected = DataFrame({"x": np.nan, "y": Series(1), "z": Series(1)}) + tm.assert_frame_equal(result, expected) + + def test_diff_timedelta64_with_nat(self): + # GH#32441 + arr = np.arange(6).reshape(3, 2).astype("timedelta64[ns]") + arr[:, 0] = np.timedelta64("NaT", "ns") + + df = DataFrame(arr) + result = df.diff(1, axis=0) + + expected = DataFrame({0: df[0], 1: [pd.NaT, pd.Timedelta(2), pd.Timedelta(2)]}) + tm.assert_equal(result, expected) + + result = df.diff(0) + expected = df - df + assert expected[0].isna().all() + tm.assert_equal(result, expected) + + result = df.diff(-1, axis=1) + expected = df * np.nan + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_diff_datetime_axis0_with_nat(self, tz): + # GH#32441 + dti = pd.DatetimeIndex(["NaT", "2019-01-01", "2019-01-02"], tz=tz) + ser = Series(dti) + + df = ser.to_frame() + + result = df.diff() + ex_index = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta(days=1)]) + expected = Series(ex_index).to_frame() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_diff_datetime_with_nat_zero_periods(self, tz): + # diff on NaT values should give NaT, not timedelta64(0) + dti = date_range("2016-01-01", periods=4, tz=tz) + ser = Series(dti) + df = ser.to_frame() + + df[1] = ser.copy() + + df.iloc[:, 0] = pd.NaT + + expected = df - df + assert expected[0].isna().all() + + result = df.diff(0, axis=0) + tm.assert_frame_equal(result, expected) + + result = df.diff(0, axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_diff_datetime_axis0(self, tz): + # GH#18578 + df = DataFrame( + { + 0: date_range("2010", freq="D", periods=2, tz=tz), + 1: date_range("2010", freq="D", periods=2, tz=tz), + } + ) + + result = df.diff(axis=0) + expected = DataFrame( + { + 0: pd.TimedeltaIndex(["NaT", "1 days"]), + 1: pd.TimedeltaIndex(["NaT", "1 days"]), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_diff_datetime_axis1(self, tz): + # GH#18578 + df = DataFrame( + { + 0: date_range("2010", freq="D", periods=2, tz=tz), + 1: date_range("2010", freq="D", periods=2, tz=tz), + } + ) + + result = df.diff(axis=1) + expected = DataFrame( + { + 0: pd.TimedeltaIndex(["NaT", "NaT"]), + 1: pd.TimedeltaIndex(["0 days", "0 days"]), + } + ) + tm.assert_frame_equal(result, expected) + + def test_diff_timedelta(self): + # GH#4533 + df = DataFrame( + { + "time": [Timestamp("20130101 9:01"), Timestamp("20130101 9:02")], + "value": [1.0, 2.0], + } + ) + + res = df.diff() + exp = DataFrame( + [[pd.NaT, np.nan], [pd.Timedelta("00:01:00"), 1]], columns=["time", "value"] + ) + tm.assert_frame_equal(res, exp) + + def test_diff_mixed_dtype(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["A"] = np.array([1, 2, 3, 4, 5], dtype=object) + + result = df.diff() + assert result[0].dtype == np.float64 + + def test_diff_neg_n(self, datetime_frame): + rs = datetime_frame.diff(-1) + xp = datetime_frame - datetime_frame.shift(-1) + tm.assert_frame_equal(rs, xp) + + def test_diff_float_n(self, datetime_frame): + rs = datetime_frame.diff(1.0) + xp = datetime_frame.diff(1) + tm.assert_frame_equal(rs, xp) + + def test_diff_axis(self): + # GH#9727 + df = DataFrame([[1.0, 2.0], [3.0, 4.0]]) + tm.assert_frame_equal( + df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]]) + ) + tm.assert_frame_equal( + df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]]) + ) + + def test_diff_period(self): + # GH#32995 Don't pass an incorrect axis + pi = date_range("2016-01-01", periods=3).to_period("D") + df = DataFrame({"A": pi}) + + result = df.diff(1, axis=1) + + expected = (df - pd.NaT).astype(object) + tm.assert_frame_equal(result, expected) + + def test_diff_axis1_mixed_dtypes(self): + # GH#32995 operate column-wise when we have mixed dtypes and axis=1 + df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) + + expected = DataFrame({"A": [np.nan, np.nan, np.nan], "B": df["B"] / 2}) + + result = df.diff(axis=1) + tm.assert_frame_equal(result, expected) + + # GH#21437 mixed-float-dtypes + df = DataFrame( + {"a": np.arange(3, dtype="float32"), "b": np.arange(3, dtype="float64")} + ) + result = df.diff(axis=1) + expected = DataFrame({"a": df["a"] * np.nan, "b": df["b"] * 0}) + tm.assert_frame_equal(result, expected) + + def test_diff_axis1_mixed_dtypes_large_periods(self): + # GH#32995 operate column-wise when we have mixed dtypes and axis=1 + df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) + + expected = df * np.nan + + result = df.diff(axis=1, periods=3) + tm.assert_frame_equal(result, expected) + + def test_diff_axis1_mixed_dtypes_negative_periods(self): + # GH#32995 operate column-wise when we have mixed dtypes and axis=1 + df = DataFrame({"A": range(3), "B": 2 * np.arange(3, dtype=np.float64)}) + + expected = DataFrame({"A": -1.0 * df["A"], "B": df["B"] * np.nan}) + + result = df.diff(axis=1, periods=-1) + tm.assert_frame_equal(result, expected) + + def test_diff_sparse(self): + # GH#28813 .diff() should work for sparse dataframes as well + sparse_df = DataFrame([[0, 1], [1, 0]], dtype="Sparse[int]") + + result = sparse_df.diff() + expected = DataFrame( + [[np.nan, np.nan], [1.0, -1.0]], dtype=pd.SparseDtype("float", 0.0) + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "axis,expected", + [ + ( + 0, + DataFrame( + { + "a": [np.nan, 0, 1, 0, np.nan, np.nan, np.nan, 0], + "b": [np.nan, 1, np.nan, np.nan, -2, 1, np.nan, np.nan], + "c": np.repeat(np.nan, 8), + "d": [np.nan, 3, 5, 7, 9, 11, 13, 15], + }, + dtype="Int64", + ), + ), + ( + 1, + DataFrame( + { + "a": np.repeat(np.nan, 8), + "b": [0, 1, np.nan, 1, np.nan, np.nan, np.nan, 0], + "c": np.repeat(np.nan, 8), + "d": np.repeat(np.nan, 8), + }, + dtype="Int64", + ), + ), + ], + ) + def test_diff_integer_na(self, axis, expected): + # GH#24171 IntegerNA Support for DataFrame.diff() + df = DataFrame( + { + "a": np.repeat([0, 1, np.nan, 2], 2), + "b": np.tile([0, 1, np.nan, 2], 2), + "c": np.repeat(np.nan, 8), + "d": np.arange(1, 9) ** 2, + }, + dtype="Int64", + ) + + # Test case for default behaviour of diff + result = df.diff(axis=axis) + tm.assert_frame_equal(result, expected) + + def test_diff_readonly(self): + # https://github.com/pandas-dev/pandas/issues/35559 + arr = np.random.default_rng(2).standard_normal((5, 2)) + arr.flags.writeable = False + df = DataFrame(arr) + result = df.diff() + expected = DataFrame(np.array(df)).diff() + tm.assert_frame_equal(result, expected) + + def test_diff_all_int_dtype(self, any_int_numpy_dtype): + # GH 14773 + df = DataFrame(range(5)) + df = df.astype(any_int_numpy_dtype) + result = df.diff() + expected_dtype = ( + "float32" if any_int_numpy_dtype in ("int8", "int16") else "float64" + ) + expected = DataFrame([np.nan, 1.0, 1.0, 1.0, 1.0], dtype=expected_dtype) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dot.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dot.py new file mode 100644 index 00000000..3e01f67c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dot.py @@ -0,0 +1,155 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class DotSharedTests: + @pytest.fixture + def obj(self): + raise NotImplementedError + + @pytest.fixture + def other(self) -> DataFrame: + """ + other is a DataFrame that is indexed so that obj.dot(other) is valid + """ + raise NotImplementedError + + @pytest.fixture + def expected(self, obj, other) -> DataFrame: + """ + The expected result of obj.dot(other) + """ + raise NotImplementedError + + @classmethod + def reduced_dim_assert(cls, result, expected): + """ + Assertion about results with 1 fewer dimension that self.obj + """ + raise NotImplementedError + + def test_dot_equiv_values_dot(self, obj, other, expected): + # `expected` is constructed from obj.values.dot(other.values) + result = obj.dot(other) + tm.assert_equal(result, expected) + + def test_dot_2d_ndarray(self, obj, other, expected): + # Check ndarray argument; in this case we get matching values, + # but index/columns may not match + result = obj.dot(other.values) + assert np.all(result == expected.values) + + def test_dot_1d_ndarray(self, obj, expected): + # can pass correct-length array + row = obj.iloc[0] if obj.ndim == 2 else obj + + result = obj.dot(row.values) + expected = obj.dot(row) + self.reduced_dim_assert(result, expected) + + def test_dot_series(self, obj, other, expected): + # Check series argument + result = obj.dot(other["1"]) + self.reduced_dim_assert(result, expected["1"]) + + def test_dot_series_alignment(self, obj, other, expected): + result = obj.dot(other.iloc[::-1]["1"]) + self.reduced_dim_assert(result, expected["1"]) + + def test_dot_aligns(self, obj, other, expected): + # Check index alignment + other2 = other.iloc[::-1] + result = obj.dot(other2) + tm.assert_equal(result, expected) + + def test_dot_shape_mismatch(self, obj): + msg = "Dot product shape mismatch" + # exception raised is of type Exception + with pytest.raises(Exception, match=msg): + obj.dot(obj.values[:3]) + + def test_dot_misaligned(self, obj, other): + msg = "matrices are not aligned" + with pytest.raises(ValueError, match=msg): + obj.dot(other.T) + + +class TestSeriesDot(DotSharedTests): + @pytest.fixture + def obj(self): + return Series( + np.random.default_rng(2).standard_normal(4), index=["p", "q", "r", "s"] + ) + + @pytest.fixture + def other(self): + return DataFrame( + np.random.default_rng(2).standard_normal((3, 4)), + index=["1", "2", "3"], + columns=["p", "q", "r", "s"], + ).T + + @pytest.fixture + def expected(self, obj, other): + return Series(np.dot(obj.values, other.values), index=other.columns) + + @classmethod + def reduced_dim_assert(cls, result, expected): + """ + Assertion about results with 1 fewer dimension that self.obj + """ + tm.assert_almost_equal(result, expected) + + +class TestDataFrameDot(DotSharedTests): + @pytest.fixture + def obj(self): + return DataFrame( + np.random.default_rng(2).standard_normal((3, 4)), + index=["a", "b", "c"], + columns=["p", "q", "r", "s"], + ) + + @pytest.fixture + def other(self): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), + index=["p", "q", "r", "s"], + columns=["1", "2"], + ) + + @pytest.fixture + def expected(self, obj, other): + return DataFrame( + np.dot(obj.values, other.values), index=obj.index, columns=other.columns + ) + + @classmethod + def reduced_dim_assert(cls, result, expected): + """ + Assertion about results with 1 fewer dimension that self.obj + """ + tm.assert_series_equal(result, expected, check_names=False) + assert result.name is None + + +@pytest.mark.parametrize( + "dtype,exp_dtype", + [("Float32", "Float64"), ("Int16", "Int32"), ("float[pyarrow]", "double[pyarrow]")], +) +def test_arrow_dtype(dtype, exp_dtype): + pytest.importorskip("pyarrow") + + cols = ["a", "b"] + df_a = DataFrame([[1, 2], [3, 4], [5, 6]], columns=cols, dtype="int32") + df_b = DataFrame([[1, 0], [0, 1]], index=cols, dtype=dtype) + result = df_a.dot(df_b) + expected = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=exp_dtype) + + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop.py new file mode 100644 index 00000000..9a4882f1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop.py @@ -0,0 +1,546 @@ +import re + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "msg,labels,level", + [ + (r"labels \[4\] not found in level", 4, "a"), + (r"labels \[7\] not found in level", 7, "b"), + ], +) +def test_drop_raise_exception_if_labels_not_in_level(msg, labels, level): + # GH 8594 + mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) + s = Series([10, 20, 30], index=mi) + df = DataFrame([10, 20, 30], index=mi) + + with pytest.raises(KeyError, match=msg): + s.drop(labels, level=level) + with pytest.raises(KeyError, match=msg): + df.drop(labels, level=level) + + +@pytest.mark.parametrize("labels,level", [(4, "a"), (7, "b")]) +def test_drop_errors_ignore(labels, level): + # GH 8594 + mi = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=["a", "b"]) + s = Series([10, 20, 30], index=mi) + df = DataFrame([10, 20, 30], index=mi) + + expected_s = s.drop(labels, level=level, errors="ignore") + tm.assert_series_equal(s, expected_s) + + expected_df = df.drop(labels, level=level, errors="ignore") + tm.assert_frame_equal(df, expected_df) + + +def test_drop_with_non_unique_datetime_index_and_invalid_keys(): + # GH 30399 + + # define dataframe with unique datetime index + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + columns=["a", "b", "c"], + index=pd.date_range("2012", freq="H", periods=5), + ) + # create dataframe with non-unique datetime index + df = df.iloc[[0, 2, 2, 3]].copy() + + with pytest.raises(KeyError, match="not found in axis"): + df.drop(["a", "b"]) # Dropping with labels not exist in the index + + +class TestDataFrameDrop: + def test_drop_names(self): + df = DataFrame( + [[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=["a", "b", "c"], + columns=["d", "e", "f"], + ) + df.index.name, df.columns.name = "first", "second" + df_dropped_b = df.drop("b") + df_dropped_e = df.drop("e", axis=1) + df_inplace_b, df_inplace_e = df.copy(), df.copy() + return_value = df_inplace_b.drop("b", inplace=True) + assert return_value is None + return_value = df_inplace_e.drop("e", axis=1, inplace=True) + assert return_value is None + for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e): + assert obj.index.name == "first" + assert obj.columns.name == "second" + assert list(df.columns) == ["d", "e", "f"] + + msg = r"\['g'\] not found in axis" + with pytest.raises(KeyError, match=msg): + df.drop(["g"]) + with pytest.raises(KeyError, match=msg): + df.drop(["g"], axis=1) + + # errors = 'ignore' + dropped = df.drop(["g"], errors="ignore") + expected = Index(["a", "b", "c"], name="first") + tm.assert_index_equal(dropped.index, expected) + + dropped = df.drop(["b", "g"], errors="ignore") + expected = Index(["a", "c"], name="first") + tm.assert_index_equal(dropped.index, expected) + + dropped = df.drop(["g"], axis=1, errors="ignore") + expected = Index(["d", "e", "f"], name="second") + tm.assert_index_equal(dropped.columns, expected) + + dropped = df.drop(["d", "g"], axis=1, errors="ignore") + expected = Index(["e", "f"], name="second") + tm.assert_index_equal(dropped.columns, expected) + + # GH 16398 + dropped = df.drop([], errors="ignore") + expected = Index(["a", "b", "c"], name="first") + tm.assert_index_equal(dropped.index, expected) + + def test_drop(self): + simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}) + tm.assert_frame_equal(simple.drop("A", axis=1), simple[["B"]]) + tm.assert_frame_equal(simple.drop(["A", "B"], axis="columns"), simple[[]]) + tm.assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :]) + tm.assert_frame_equal(simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :]) + + with pytest.raises(KeyError, match=r"\[5\] not found in axis"): + simple.drop(5) + with pytest.raises(KeyError, match=r"\['C'\] not found in axis"): + simple.drop("C", axis=1) + with pytest.raises(KeyError, match=r"\[5\] not found in axis"): + simple.drop([1, 5]) + with pytest.raises(KeyError, match=r"\['C'\] not found in axis"): + simple.drop(["A", "C"], axis=1) + + # GH 42881 + with pytest.raises(KeyError, match=r"\['C', 'D', 'F'\] not found in axis"): + simple.drop(["C", "D", "F"], axis=1) + + # errors = 'ignore' + tm.assert_frame_equal(simple.drop(5, errors="ignore"), simple) + tm.assert_frame_equal( + simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :] + ) + tm.assert_frame_equal(simple.drop("C", axis=1, errors="ignore"), simple) + tm.assert_frame_equal( + simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]] + ) + + # non-unique - wheee! + nu_df = DataFrame( + list(zip(range(3), range(-3, 1), list("abc"))), columns=["a", "a", "b"] + ) + tm.assert_frame_equal(nu_df.drop("a", axis=1), nu_df[["b"]]) + tm.assert_frame_equal(nu_df.drop("b", axis="columns"), nu_df["a"]) + tm.assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398 + + nu_df = nu_df.set_index(Index(["X", "Y", "X"])) + nu_df.columns = list("abc") + tm.assert_frame_equal(nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :]) + tm.assert_frame_equal(nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :]) + + # inplace cache issue + # GH#5628 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=list("abc") + ) + expected = df[~(df.b > 0)] + return_value = df.drop(labels=df[df.b > 0].index, inplace=True) + assert return_value is None + tm.assert_frame_equal(df, expected) + + def test_drop_multiindex_not_lexsorted(self): + # GH#11640 + + # define the lexsorted version + lexsorted_mi = MultiIndex.from_tuples( + [("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"] + ) + lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi) + assert lexsorted_df.columns._is_lexsorted() + + # define the non-lexsorted version + not_lexsorted_df = DataFrame( + columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]] + ) + not_lexsorted_df = not_lexsorted_df.pivot_table( + index="a", columns=["b", "c"], values="d" + ) + not_lexsorted_df = not_lexsorted_df.reset_index() + assert not not_lexsorted_df.columns._is_lexsorted() + + expected = lexsorted_df.drop("a", axis=1).astype(float) + with tm.assert_produces_warning(PerformanceWarning): + result = not_lexsorted_df.drop("a", axis=1) + + tm.assert_frame_equal(result, expected) + + def test_drop_api_equivalence(self): + # equivalence of the labels/axis and index/columns API's (GH#12392) + df = DataFrame( + [[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=["a", "b", "c"], + columns=["d", "e", "f"], + ) + + res1 = df.drop("a") + res2 = df.drop(index="a") + tm.assert_frame_equal(res1, res2) + + res1 = df.drop("d", axis=1) + res2 = df.drop(columns="d") + tm.assert_frame_equal(res1, res2) + + res1 = df.drop(labels="e", axis=1) + res2 = df.drop(columns="e") + tm.assert_frame_equal(res1, res2) + + res1 = df.drop(["a"], axis=0) + res2 = df.drop(index=["a"]) + tm.assert_frame_equal(res1, res2) + + res1 = df.drop(["a"], axis=0).drop(["d"], axis=1) + res2 = df.drop(index=["a"], columns=["d"]) + tm.assert_frame_equal(res1, res2) + + msg = "Cannot specify both 'labels' and 'index'/'columns'" + with pytest.raises(ValueError, match=msg): + df.drop(labels="a", index="b") + + with pytest.raises(ValueError, match=msg): + df.drop(labels="a", columns="b") + + msg = "Need to specify at least one of 'labels', 'index' or 'columns'" + with pytest.raises(ValueError, match=msg): + df.drop(axis=1) + + data = [[1, 2, 3], [1, 2, 3]] + + @pytest.mark.parametrize( + "actual", + [ + DataFrame(data=data, index=["a", "a"]), + DataFrame(data=data, index=["a", "b"]), + DataFrame(data=data, index=["a", "b"]).set_index([0, 1]), + DataFrame(data=data, index=["a", "a"]).set_index([0, 1]), + ], + ) + def test_raise_on_drop_duplicate_index(self, actual): + # GH#19186 + level = 0 if isinstance(actual.index, MultiIndex) else None + msg = re.escape("\"['c'] not found in axis\"") + with pytest.raises(KeyError, match=msg): + actual.drop("c", level=level, axis=0) + with pytest.raises(KeyError, match=msg): + actual.T.drop("c", level=level, axis=1) + expected_no_err = actual.drop("c", axis=0, level=level, errors="ignore") + tm.assert_frame_equal(expected_no_err, actual) + expected_no_err = actual.T.drop("c", axis=1, level=level, errors="ignore") + tm.assert_frame_equal(expected_no_err.T, actual) + + @pytest.mark.parametrize("index", [[1, 2, 3], [1, 1, 2]]) + @pytest.mark.parametrize("drop_labels", [[], [1], [2]]) + def test_drop_empty_list(self, index, drop_labels): + # GH#21494 + expected_index = [i for i in index if i not in drop_labels] + frame = DataFrame(index=index).drop(drop_labels) + tm.assert_frame_equal(frame, DataFrame(index=expected_index)) + + @pytest.mark.parametrize("index", [[1, 2, 3], [1, 2, 2]]) + @pytest.mark.parametrize("drop_labels", [[1, 4], [4, 5]]) + def test_drop_non_empty_list(self, index, drop_labels): + # GH# 21494 + with pytest.raises(KeyError, match="not found in axis"): + DataFrame(index=index).drop(drop_labels) + + @pytest.mark.parametrize( + "empty_listlike", + [ + [], + {}, + np.array([]), + Series([], dtype="datetime64[ns]"), + Index([]), + DatetimeIndex([]), + ], + ) + def test_drop_empty_listlike_non_unique_datetime_index(self, empty_listlike): + # GH#27994 + data = {"column_a": [5, 10], "column_b": ["one", "two"]} + index = [Timestamp("2021-01-01"), Timestamp("2021-01-01")] + df = DataFrame(data, index=index) + + # Passing empty list-like should return the same DataFrame. + expected = df.copy() + result = df.drop(empty_listlike) + tm.assert_frame_equal(result, expected) + + def test_mixed_depth_drop(self): + arrays = [ + ["a", "top", "top", "routine1", "routine1", "routine2"], + ["", "OD", "OD", "result1", "result2", "result1"], + ["", "wx", "wy", "", "", ""], + ] + + tuples = sorted(zip(*arrays)) + index = MultiIndex.from_tuples(tuples) + df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index) + + result = df.drop("a", axis=1) + expected = df.drop([("a", "", "")], axis=1) + tm.assert_frame_equal(expected, result) + + result = df.drop(["top"], axis=1) + expected = df.drop([("top", "OD", "wx")], axis=1) + expected = expected.drop([("top", "OD", "wy")], axis=1) + tm.assert_frame_equal(expected, result) + + result = df.drop(("top", "OD", "wx"), axis=1) + expected = df.drop([("top", "OD", "wx")], axis=1) + tm.assert_frame_equal(expected, result) + + expected = df.drop([("top", "OD", "wy")], axis=1) + expected = df.drop("top", axis=1) + + result = df.drop("result1", level=1, axis=1) + expected = df.drop( + [("routine1", "result1", ""), ("routine2", "result1", "")], axis=1 + ) + tm.assert_frame_equal(expected, result) + + def test_drop_multiindex_other_level_nan(self): + # GH#12754 + df = ( + DataFrame( + { + "A": ["one", "one", "two", "two"], + "B": [np.nan, 0.0, 1.0, 2.0], + "C": ["a", "b", "c", "c"], + "D": [1, 2, 3, 4], + } + ) + .set_index(["A", "B", "C"]) + .sort_index() + ) + result = df.drop("c", level="C") + expected = DataFrame( + [2, 1], + columns=["D"], + index=MultiIndex.from_tuples( + [("one", 0.0, "b"), ("one", np.nan, "a")], names=["A", "B", "C"] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_drop_nonunique(self): + df = DataFrame( + [ + ["x-a", "x", "a", 1.5], + ["x-a", "x", "a", 1.2], + ["z-c", "z", "c", 3.1], + ["x-a", "x", "a", 4.1], + ["x-b", "x", "b", 5.1], + ["x-b", "x", "b", 4.1], + ["x-b", "x", "b", 2.2], + ["y-a", "y", "a", 1.2], + ["z-b", "z", "b", 2.1], + ], + columns=["var1", "var2", "var3", "var4"], + ) + + grp_size = df.groupby("var1").size() + drop_idx = grp_size.loc[grp_size == 1] + + idf = df.set_index(["var1", "var2", "var3"]) + + # it works! GH#2101 + result = idf.drop(drop_idx.index, level=0).reset_index() + expected = df[-df.var1.isin(drop_idx.index)] + + result.index = expected.index + + tm.assert_frame_equal(result, expected) + + def test_drop_level(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.drop(["bar", "qux"], level="first") + expected = frame.iloc[[0, 1, 2, 5, 6]] + tm.assert_frame_equal(result, expected) + + result = frame.drop(["two"], level="second") + expected = frame.iloc[[0, 2, 3, 6, 7, 9]] + tm.assert_frame_equal(result, expected) + + result = frame.T.drop(["bar", "qux"], axis=1, level="first") + expected = frame.iloc[[0, 1, 2, 5, 6]].T + tm.assert_frame_equal(result, expected) + + result = frame.T.drop(["two"], axis=1, level="second") + expected = frame.iloc[[0, 2, 3, 6, 7, 9]].T + tm.assert_frame_equal(result, expected) + + def test_drop_level_nonunique_datetime(self): + # GH#12701 + idx = Index([2, 3, 4, 4, 5], name="id") + idxdt = pd.to_datetime( + [ + "2016-03-23 14:00", + "2016-03-23 15:00", + "2016-03-23 16:00", + "2016-03-23 16:00", + "2016-03-23 17:00", + ] + ) + df = DataFrame(np.arange(10).reshape(5, 2), columns=list("ab"), index=idx) + df["tstamp"] = idxdt + df = df.set_index("tstamp", append=True) + ts = Timestamp("201603231600") + assert df.index.is_unique is False + + result = df.drop(ts, level="tstamp") + expected = df.loc[idx != 4] + tm.assert_frame_equal(result, expected) + + def test_drop_tz_aware_timestamp_across_dst(self, frame_or_series): + # GH#21761 + start = Timestamp("2017-10-29", tz="Europe/Berlin") + end = Timestamp("2017-10-29 04:00:00", tz="Europe/Berlin") + index = pd.date_range(start, end, freq="15min") + data = frame_or_series(data=[1] * len(index), index=index) + result = data.drop(start) + expected_start = Timestamp("2017-10-29 00:15:00", tz="Europe/Berlin") + expected_idx = pd.date_range(expected_start, end, freq="15min") + expected = frame_or_series(data=[1] * len(expected_idx), index=expected_idx) + tm.assert_equal(result, expected) + + def test_drop_preserve_names(self): + index = MultiIndex.from_arrays( + [[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]], names=["one", "two"] + ) + + df = DataFrame(np.random.default_rng(2).standard_normal((6, 3)), index=index) + + result = df.drop([(0, 2)]) + assert result.index.names == ("one", "two") + + @pytest.mark.parametrize( + "operation", ["__iadd__", "__isub__", "__imul__", "__ipow__"] + ) + @pytest.mark.parametrize("inplace", [False, True]) + def test_inplace_drop_and_operation(self, operation, inplace): + # GH#30484 + df = DataFrame({"x": range(5)}) + expected = df.copy() + df["y"] = range(5) + y = df["y"] + + with tm.assert_produces_warning(None): + if inplace: + df.drop("y", axis=1, inplace=inplace) + else: + df = df.drop("y", axis=1, inplace=inplace) + + # Perform operation and check result + getattr(y, operation)(1) + tm.assert_frame_equal(df, expected) + + def test_drop_with_non_unique_multiindex(self): + # GH#36293 + mi = MultiIndex.from_arrays([["x", "y", "x"], ["i", "j", "i"]]) + df = DataFrame([1, 2, 3], index=mi) + result = df.drop(index="x") + expected = DataFrame([2], index=MultiIndex.from_arrays([["y"], ["j"]])) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("indexer", [("a", "a"), [("a", "a")]]) + def test_drop_tuple_with_non_unique_multiindex(self, indexer): + # GH#42771 + idx = MultiIndex.from_product([["a", "b"], ["a", "a"]]) + df = DataFrame({"x": range(len(idx))}, index=idx) + result = df.drop(index=[("a", "a")]) + expected = DataFrame( + {"x": [2, 3]}, index=MultiIndex.from_tuples([("b", "a"), ("b", "a")]) + ) + tm.assert_frame_equal(result, expected) + + def test_drop_with_duplicate_columns(self): + df = DataFrame( + [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"] + ) + result = df.drop(["a"], axis=1) + expected = DataFrame([[1], [1], [1]], columns=["bar"]) + tm.assert_frame_equal(result, expected) + result = df.drop("a", axis=1) + tm.assert_frame_equal(result, expected) + + def test_drop_with_duplicate_columns2(self): + # drop buggy GH#6240 + df = DataFrame( + { + "A": np.random.default_rng(2).standard_normal(5), + "B": np.random.default_rng(2).standard_normal(5), + "C": np.random.default_rng(2).standard_normal(5), + "D": ["a", "b", "c", "d", "e"], + } + ) + + expected = df.take([0, 1, 1], axis=1) + df2 = df.take([2, 0, 1, 2, 1], axis=1) + result = df2.drop("C", axis=1) + tm.assert_frame_equal(result, expected) + + def test_drop_inplace_no_leftover_column_reference(self): + # GH 13934 + df = DataFrame({"a": [1, 2, 3]}) + a = df.a + df.drop(["a"], axis=1, inplace=True) + tm.assert_index_equal(df.columns, Index([], dtype="object")) + a -= a.mean() + tm.assert_index_equal(df.columns, Index([], dtype="object")) + + def test_drop_level_missing_label_multiindex(self): + # GH 18561 + df = DataFrame(index=MultiIndex.from_product([range(3), range(3)])) + with pytest.raises(KeyError, match="labels \\[5\\] not found in level"): + df.drop(5, level=0) + + @pytest.mark.parametrize("idx, level", [(["a", "b"], 0), (["a"], None)]) + def test_drop_index_ea_dtype(self, any_numeric_ea_dtype, idx, level): + # GH#45860 + df = DataFrame( + {"a": [1, 2, 2, pd.NA], "b": 100}, dtype=any_numeric_ea_dtype + ).set_index(idx) + result = df.drop(Index([2, pd.NA]), level=level) + expected = DataFrame( + {"a": [1], "b": 100}, dtype=any_numeric_ea_dtype + ).set_index(idx) + tm.assert_frame_equal(result, expected) + + def test_drop_parse_strings_datetime_index(self): + # GH #5355 + df = DataFrame( + {"a": [1, 2], "b": [1, 2]}, + index=[Timestamp("2000-01-03"), Timestamp("2000-01-04")], + ) + result = df.drop("2000-01-03", axis=0) + expected = DataFrame({"a": [2], "b": [2]}, index=[Timestamp("2000-01-04")]) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop_duplicates.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop_duplicates.py new file mode 100644 index 00000000..df121392 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_drop_duplicates.py @@ -0,0 +1,473 @@ +from datetime import datetime +import re + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + NaT, + concat, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]]) +def test_drop_duplicates_with_misspelled_column_name(subset): + # GH 19730 + df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]}) + msg = re.escape("Index(['a'], dtype='object')") + + with pytest.raises(KeyError, match=msg): + df.drop_duplicates(subset) + + +def test_drop_duplicates(): + df = DataFrame( + { + "AAA": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": [1, 1, 2, 2, 2, 2, 1, 2], + "D": range(8), + } + ) + # single column + result = df.drop_duplicates("AAA") + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("AAA", keep="last") + expected = df.loc[[6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("AAA", keep=False) + expected = df.loc[[]] + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + expected = df.loc[[0, 1, 2, 3]] + result = df.drop_duplicates(np.array(["AAA", "B"])) + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates(["AAA", "B"]) + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(("AAA", "B"), keep="last") + expected = df.loc[[0, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(("AAA", "B"), keep=False) + expected = df.loc[[0]] + tm.assert_frame_equal(result, expected) + + # consider everything + df2 = df.loc[:, ["AAA", "B", "C"]] + + result = df2.drop_duplicates() + # in this case only + expected = df2.drop_duplicates(["AAA", "B"]) + tm.assert_frame_equal(result, expected) + + result = df2.drop_duplicates(keep="last") + expected = df2.drop_duplicates(["AAA", "B"], keep="last") + tm.assert_frame_equal(result, expected) + + result = df2.drop_duplicates(keep=False) + expected = df2.drop_duplicates(["AAA", "B"], keep=False) + tm.assert_frame_equal(result, expected) + + # integers + result = df.drop_duplicates("C") + expected = df.iloc[[0, 2]] + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates("C", keep="last") + expected = df.iloc[[-2, -1]] + tm.assert_frame_equal(result, expected) + + df["E"] = df["C"].astype("int8") + result = df.drop_duplicates("E") + expected = df.iloc[[0, 2]] + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates("E", keep="last") + expected = df.iloc[[-2, -1]] + tm.assert_frame_equal(result, expected) + + # GH 11376 + df = DataFrame({"x": [7, 6, 3, 3, 4, 8, 0], "y": [0, 6, 5, 5, 9, 1, 2]}) + expected = df.loc[df.index != 3] + tm.assert_frame_equal(df.drop_duplicates(), expected) + + df = DataFrame([[1, 0], [0, 2]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + df = DataFrame([[-2, 0], [0, -4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + x = np.iinfo(np.int64).max / 3 * 2 + df = DataFrame([[-x, x], [0, x + 4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + df = DataFrame([[-x, x], [x, x + 4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + # GH 11864 + df = DataFrame([i] * 9 for i in range(16)) + df = concat([df, DataFrame([[1] + [0] * 8])], ignore_index=True) + + for keep in ["first", "last", False]: + assert df.duplicated(keep=keep).sum() == 0 + + +def test_drop_duplicates_with_duplicate_column_names(): + # GH17836 + df = DataFrame([[1, 2, 5], [3, 4, 6], [3, 4, 7]], columns=["a", "a", "b"]) + + result0 = df.drop_duplicates() + tm.assert_frame_equal(result0, df) + + result1 = df.drop_duplicates("a") + expected1 = df[:2] + tm.assert_frame_equal(result1, expected1) + + +def test_drop_duplicates_for_take_all(): + df = DataFrame( + { + "AAA": ["foo", "bar", "baz", "bar", "foo", "bar", "qux", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": [1, 1, 2, 2, 2, 2, 1, 2], + "D": range(8), + } + ) + # single column + result = df.drop_duplicates("AAA") + expected = df.iloc[[0, 1, 2, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("AAA", keep="last") + expected = df.iloc[[2, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("AAA", keep=False) + expected = df.iloc[[2, 6]] + tm.assert_frame_equal(result, expected) + + # multiple columns + result = df.drop_duplicates(["AAA", "B"]) + expected = df.iloc[[0, 1, 2, 3, 4, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(["AAA", "B"], keep="last") + expected = df.iloc[[0, 1, 2, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(["AAA", "B"], keep=False) + expected = df.iloc[[0, 1, 2, 6]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_tuple(): + df = DataFrame( + { + ("AA", "AB"): ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": [1, 1, 2, 2, 2, 2, 1, 2], + "D": range(8), + } + ) + # single column + result = df.drop_duplicates(("AA", "AB")) + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(("AA", "AB"), keep="last") + expected = df.loc[[6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(("AA", "AB"), keep=False) + expected = df.loc[[]] # empty df + assert len(result) == 0 + tm.assert_frame_equal(result, expected) + + # multi column + expected = df.loc[[0, 1, 2, 3]] + result = df.drop_duplicates((("AA", "AB"), "B")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "df", + [ + DataFrame(), + DataFrame(columns=[]), + DataFrame(columns=["A", "B", "C"]), + DataFrame(index=[]), + DataFrame(index=["A", "B", "C"]), + ], +) +def test_drop_duplicates_empty(df): + # GH 20516 + result = df.drop_duplicates() + tm.assert_frame_equal(result, df) + + result = df.copy() + result.drop_duplicates(inplace=True) + tm.assert_frame_equal(result, df) + + +def test_drop_duplicates_NA(): + # none + df = DataFrame( + { + "A": [None, None, "foo", "bar", "foo", "bar", "bar", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0], + "D": range(8), + } + ) + # single column + result = df.drop_duplicates("A") + expected = df.loc[[0, 2, 3]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("A", keep="last") + expected = df.loc[[1, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("A", keep=False) + expected = df.loc[[]] # empty df + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + result = df.drop_duplicates(["A", "B"]) + expected = df.loc[[0, 2, 3, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(["A", "B"], keep="last") + expected = df.loc[[1, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(["A", "B"], keep=False) + expected = df.loc[[6]] + tm.assert_frame_equal(result, expected) + + # nan + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0], + "D": range(8), + } + ) + # single column + result = df.drop_duplicates("C") + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("C", keep="last") + expected = df.loc[[3, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("C", keep=False) + expected = df.loc[[]] # empty df + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + result = df.drop_duplicates(["C", "B"]) + expected = df.loc[[0, 1, 2, 4]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(["C", "B"], keep="last") + expected = df.loc[[1, 3, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(["C", "B"], keep=False) + expected = df.loc[[1]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_NA_for_take_all(): + # none + df = DataFrame( + { + "A": [None, None, "foo", "bar", "foo", "baz", "bar", "qux"], + "C": [1.0, np.nan, np.nan, np.nan, 1.0, 2.0, 3, 1.0], + } + ) + + # single column + result = df.drop_duplicates("A") + expected = df.iloc[[0, 2, 3, 5, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("A", keep="last") + expected = df.iloc[[1, 4, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("A", keep=False) + expected = df.iloc[[5, 7]] + tm.assert_frame_equal(result, expected) + + # nan + + # single column + result = df.drop_duplicates("C") + expected = df.iloc[[0, 1, 5, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("C", keep="last") + expected = df.iloc[[3, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates("C", keep=False) + expected = df.iloc[[5, 6]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_inplace(): + orig = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": [1, 1, 2, 2, 2, 2, 1, 2], + "D": range(8), + } + ) + # single column + df = orig.copy() + return_value = df.drop_duplicates("A", inplace=True) + expected = orig[:2] + result = df + tm.assert_frame_equal(result, expected) + assert return_value is None + + df = orig.copy() + return_value = df.drop_duplicates("A", keep="last", inplace=True) + expected = orig.loc[[6, 7]] + result = df + tm.assert_frame_equal(result, expected) + assert return_value is None + + df = orig.copy() + return_value = df.drop_duplicates("A", keep=False, inplace=True) + expected = orig.loc[[]] + result = df + tm.assert_frame_equal(result, expected) + assert len(df) == 0 + assert return_value is None + + # multi column + df = orig.copy() + return_value = df.drop_duplicates(["A", "B"], inplace=True) + expected = orig.loc[[0, 1, 2, 3]] + result = df + tm.assert_frame_equal(result, expected) + assert return_value is None + + df = orig.copy() + return_value = df.drop_duplicates(["A", "B"], keep="last", inplace=True) + expected = orig.loc[[0, 5, 6, 7]] + result = df + tm.assert_frame_equal(result, expected) + assert return_value is None + + df = orig.copy() + return_value = df.drop_duplicates(["A", "B"], keep=False, inplace=True) + expected = orig.loc[[0]] + result = df + tm.assert_frame_equal(result, expected) + assert return_value is None + + # consider everything + orig2 = orig.loc[:, ["A", "B", "C"]].copy() + + df2 = orig2.copy() + return_value = df2.drop_duplicates(inplace=True) + # in this case only + expected = orig2.drop_duplicates(["A", "B"]) + result = df2 + tm.assert_frame_equal(result, expected) + assert return_value is None + + df2 = orig2.copy() + return_value = df2.drop_duplicates(keep="last", inplace=True) + expected = orig2.drop_duplicates(["A", "B"], keep="last") + result = df2 + tm.assert_frame_equal(result, expected) + assert return_value is None + + df2 = orig2.copy() + return_value = df2.drop_duplicates(keep=False, inplace=True) + expected = orig2.drop_duplicates(["A", "B"], keep=False) + result = df2 + tm.assert_frame_equal(result, expected) + assert return_value is None + + +@pytest.mark.parametrize("inplace", [True, False]) +@pytest.mark.parametrize( + "origin_dict, output_dict, ignore_index, output_index", + [ + ({"A": [2, 2, 3]}, {"A": [2, 3]}, True, [0, 1]), + ({"A": [2, 2, 3]}, {"A": [2, 3]}, False, [0, 2]), + ({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, True, [0, 1]), + ({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, False, [0, 2]), + ], +) +def test_drop_duplicates_ignore_index( + inplace, origin_dict, output_dict, ignore_index, output_index +): + # GH 30114 + df = DataFrame(origin_dict) + expected = DataFrame(output_dict, index=output_index) + + if inplace: + result_df = df.copy() + result_df.drop_duplicates(ignore_index=ignore_index, inplace=inplace) + else: + result_df = df.drop_duplicates(ignore_index=ignore_index, inplace=inplace) + + tm.assert_frame_equal(result_df, expected) + tm.assert_frame_equal(df, DataFrame(origin_dict)) + + +def test_drop_duplicates_null_in_object_column(nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/32992 + df = DataFrame([[1, nulls_fixture], [2, "a"]], dtype=object) + result = df.drop_duplicates() + tm.assert_frame_equal(result, df) + + +def test_drop_duplicates_series_vs_dataframe(keep): + # GH#14192 + df = DataFrame( + { + "a": [1, 1, 1, "one", "one"], + "b": [2, 2, np.nan, np.nan, np.nan], + "c": [3, 3, np.nan, np.nan, "three"], + "d": [1, 2, 3, 4, 4], + "e": [ + datetime(2015, 1, 1), + datetime(2015, 1, 1), + datetime(2015, 2, 1), + NaT, + NaT, + ], + } + ) + for column in df.columns: + dropped_frame = df[[column]].drop_duplicates(keep=keep) + dropped_series = df[column].drop_duplicates(keep=keep) + tm.assert_frame_equal(dropped_frame, dropped_series.to_frame()) + + +@pytest.mark.parametrize("arg", [[1], 1, "True", [], 0]) +def test_drop_duplicates_non_boolean_ignore_index(arg): + # GH#38274 + df = DataFrame({"a": [1, 2, 1, 3]}) + msg = '^For argument "ignore_index" expected type bool, received type .*.$' + with pytest.raises(ValueError, match=msg): + df.drop_duplicates(ignore_index=arg) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_droplevel.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_droplevel.py new file mode 100644 index 00000000..e1302d4b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_droplevel.py @@ -0,0 +1,36 @@ +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, +) +import pandas._testing as tm + + +class TestDropLevel: + def test_droplevel(self, frame_or_series): + # GH#20342 + cols = MultiIndex.from_tuples( + [("c", "e"), ("d", "f")], names=["level_1", "level_2"] + ) + mi = MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]) + df = DataFrame([[3, 4], [7, 8], [11, 12]], index=mi, columns=cols) + if frame_or_series is not DataFrame: + df = df.iloc[:, 0] + + # test that dropping of a level in index works + expected = df.reset_index("a", drop=True) + result = df.droplevel("a", axis="index") + tm.assert_equal(result, expected) + + if frame_or_series is DataFrame: + # test that dropping of a level in columns works + expected = df.copy() + expected.columns = Index(["c", "d"], name="level_1") + result = df.droplevel("level_2", axis="columns") + tm.assert_equal(result, expected) + else: + # test that droplevel raises ValueError on axis != 0 + with pytest.raises(ValueError, match="No axis named columns"): + df.droplevel(1, axis="columns") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py new file mode 100644 index 00000000..7899b4ae --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dropna.py @@ -0,0 +1,285 @@ +import datetime + +import dateutil +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFrameMissingData: + def test_dropEmptyRows(self, float_frame): + N = len(float_frame.index) + mat = np.random.default_rng(2).standard_normal(N) + mat[:5] = np.nan + + frame = DataFrame({"foo": mat}, index=float_frame.index) + original = Series(mat, index=float_frame.index, name="foo") + expected = original.dropna() + inplace_frame1, inplace_frame2 = frame.copy(), frame.copy() + + smaller_frame = frame.dropna(how="all") + # check that original was preserved + tm.assert_series_equal(frame["foo"], original) + return_value = inplace_frame1.dropna(how="all", inplace=True) + tm.assert_series_equal(smaller_frame["foo"], expected) + tm.assert_series_equal(inplace_frame1["foo"], expected) + assert return_value is None + + smaller_frame = frame.dropna(how="all", subset=["foo"]) + return_value = inplace_frame2.dropna(how="all", subset=["foo"], inplace=True) + tm.assert_series_equal(smaller_frame["foo"], expected) + tm.assert_series_equal(inplace_frame2["foo"], expected) + assert return_value is None + + def test_dropIncompleteRows(self, float_frame): + N = len(float_frame.index) + mat = np.random.default_rng(2).standard_normal(N) + mat[:5] = np.nan + + frame = DataFrame({"foo": mat}, index=float_frame.index) + frame["bar"] = 5 + original = Series(mat, index=float_frame.index, name="foo") + inp_frame1, inp_frame2 = frame.copy(), frame.copy() + + smaller_frame = frame.dropna() + tm.assert_series_equal(frame["foo"], original) + return_value = inp_frame1.dropna(inplace=True) + + exp = Series(mat[5:], index=float_frame.index[5:], name="foo") + tm.assert_series_equal(smaller_frame["foo"], exp) + tm.assert_series_equal(inp_frame1["foo"], exp) + assert return_value is None + + samesize_frame = frame.dropna(subset=["bar"]) + tm.assert_series_equal(frame["foo"], original) + assert (frame["bar"] == 5).all() + return_value = inp_frame2.dropna(subset=["bar"], inplace=True) + tm.assert_index_equal(samesize_frame.index, float_frame.index) + tm.assert_index_equal(inp_frame2.index, float_frame.index) + assert return_value is None + + def test_dropna(self): + df = DataFrame(np.random.default_rng(2).standard_normal((6, 4))) + df.iloc[:2, 2] = np.nan + + dropped = df.dropna(axis=1) + expected = df.loc[:, [0, 1, 3]] + inp = df.copy() + return_value = inp.dropna(axis=1, inplace=True) + tm.assert_frame_equal(dropped, expected) + tm.assert_frame_equal(inp, expected) + assert return_value is None + + dropped = df.dropna(axis=0) + expected = df.loc[list(range(2, 6))] + inp = df.copy() + return_value = inp.dropna(axis=0, inplace=True) + tm.assert_frame_equal(dropped, expected) + tm.assert_frame_equal(inp, expected) + assert return_value is None + + # threshold + dropped = df.dropna(axis=1, thresh=5) + expected = df.loc[:, [0, 1, 3]] + inp = df.copy() + return_value = inp.dropna(axis=1, thresh=5, inplace=True) + tm.assert_frame_equal(dropped, expected) + tm.assert_frame_equal(inp, expected) + assert return_value is None + + dropped = df.dropna(axis=0, thresh=4) + expected = df.loc[range(2, 6)] + inp = df.copy() + return_value = inp.dropna(axis=0, thresh=4, inplace=True) + tm.assert_frame_equal(dropped, expected) + tm.assert_frame_equal(inp, expected) + assert return_value is None + + dropped = df.dropna(axis=1, thresh=4) + tm.assert_frame_equal(dropped, df) + + dropped = df.dropna(axis=1, thresh=3) + tm.assert_frame_equal(dropped, df) + + # subset + dropped = df.dropna(axis=0, subset=[0, 1, 3]) + inp = df.copy() + return_value = inp.dropna(axis=0, subset=[0, 1, 3], inplace=True) + tm.assert_frame_equal(dropped, df) + tm.assert_frame_equal(inp, df) + assert return_value is None + + # all + dropped = df.dropna(axis=1, how="all") + tm.assert_frame_equal(dropped, df) + + df[2] = np.nan + dropped = df.dropna(axis=1, how="all") + expected = df.loc[:, [0, 1, 3]] + tm.assert_frame_equal(dropped, expected) + + # bad input + msg = "No axis named 3 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.dropna(axis=3) + + def test_drop_and_dropna_caching(self): + # tst that cacher updates + original = Series([1, 2, np.nan], name="A") + expected = Series([1, 2], dtype=original.dtype, name="A") + df = DataFrame({"A": original.values.copy()}) + df2 = df.copy() + df["A"].dropna() + tm.assert_series_equal(df["A"], original) + + ser = df["A"] + return_value = ser.dropna(inplace=True) + tm.assert_series_equal(ser, expected) + tm.assert_series_equal(df["A"], original) + assert return_value is None + + df2["A"].drop([1]) + tm.assert_series_equal(df2["A"], original) + + ser = df2["A"] + return_value = ser.drop([1], inplace=True) + tm.assert_series_equal(ser, original.drop([1])) + tm.assert_series_equal(df2["A"], original) + assert return_value is None + + def test_dropna_corner(self, float_frame): + # bad input + msg = "invalid how option: foo" + with pytest.raises(ValueError, match=msg): + float_frame.dropna(how="foo") + # non-existent column - 8303 + with pytest.raises(KeyError, match=r"^\['X'\]$"): + float_frame.dropna(subset=["A", "X"]) + + def test_dropna_multiple_axes(self): + df = DataFrame( + [ + [1, np.nan, 2, 3], + [4, np.nan, 5, 6], + [np.nan, np.nan, np.nan, np.nan], + [7, np.nan, 8, 9], + ] + ) + + # GH20987 + with pytest.raises(TypeError, match="supplying multiple axes"): + df.dropna(how="all", axis=[0, 1]) + with pytest.raises(TypeError, match="supplying multiple axes"): + df.dropna(how="all", axis=(0, 1)) + + inp = df.copy() + with pytest.raises(TypeError, match="supplying multiple axes"): + inp.dropna(how="all", axis=(0, 1), inplace=True) + + def test_dropna_tz_aware_datetime(self): + # GH13407 + df = DataFrame() + dt1 = datetime.datetime(2015, 1, 1, tzinfo=dateutil.tz.tzutc()) + dt2 = datetime.datetime(2015, 2, 2, tzinfo=dateutil.tz.tzutc()) + df["Time"] = [dt1] + result = df.dropna(axis=0) + expected = DataFrame({"Time": [dt1]}) + tm.assert_frame_equal(result, expected) + + # Ex2 + df = DataFrame({"Time": [dt1, None, np.nan, dt2]}) + result = df.dropna(axis=0) + expected = DataFrame([dt1, dt2], columns=["Time"], index=[0, 3]) + tm.assert_frame_equal(result, expected) + + def test_dropna_categorical_interval_index(self): + # GH 25087 + ii = pd.IntervalIndex.from_breaks([0, 2.78, 3.14, 6.28]) + ci = pd.CategoricalIndex(ii) + df = DataFrame({"A": list("abc")}, index=ci) + + expected = df + result = df.dropna() + tm.assert_frame_equal(result, expected) + + def test_dropna_with_duplicate_columns(self): + df = DataFrame( + { + "A": np.random.default_rng(2).standard_normal(5), + "B": np.random.default_rng(2).standard_normal(5), + "C": np.random.default_rng(2).standard_normal(5), + "D": ["a", "b", "c", "d", "e"], + } + ) + df.iloc[2, [0, 1, 2]] = np.nan + df.iloc[0, 0] = np.nan + df.iloc[1, 1] = np.nan + df.iloc[:, 3] = np.nan + expected = df.dropna(subset=["A", "B", "C"], how="all") + expected.columns = ["A", "A", "B", "C"] + + df.columns = ["A", "A", "B", "C"] + + result = df.dropna(subset=["A", "C"], how="all") + tm.assert_frame_equal(result, expected) + + def test_set_single_column_subset(self): + # GH 41021 + df = DataFrame({"A": [1, 2, 3], "B": list("abc"), "C": [4, np.nan, 5]}) + expected = DataFrame( + {"A": [1, 3], "B": list("ac"), "C": [4.0, 5.0]}, index=[0, 2] + ) + result = df.dropna(subset="C") + tm.assert_frame_equal(result, expected) + + def test_single_column_not_present_in_axis(self): + # GH 41021 + df = DataFrame({"A": [1, 2, 3]}) + + # Column not present + with pytest.raises(KeyError, match="['D']"): + df.dropna(subset="D", axis=0) + + def test_subset_is_nparray(self): + # GH 41021 + df = DataFrame({"A": [1, 2, np.nan], "B": list("abc"), "C": [4, np.nan, 5]}) + expected = DataFrame({"A": [1.0], "B": ["a"], "C": [4.0]}) + result = df.dropna(subset=np.array(["A", "C"])) + tm.assert_frame_equal(result, expected) + + def test_no_nans_in_frame(self, axis): + # GH#41965 + df = DataFrame([[1, 2], [3, 4]], columns=pd.RangeIndex(0, 2)) + expected = df.copy() + result = df.dropna(axis=axis) + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_how_thresh_param_incompatible(self): + # GH46575 + df = DataFrame([1, 2, pd.NA]) + msg = "You cannot set both the how and thresh arguments at the same time" + with pytest.raises(TypeError, match=msg): + df.dropna(how="all", thresh=2) + + with pytest.raises(TypeError, match=msg): + df.dropna(how="any", thresh=2) + + with pytest.raises(TypeError, match=msg): + df.dropna(how=None, thresh=None) + + @pytest.mark.parametrize("val", [1, 1.5]) + def test_dropna_ignore_index(self, val): + # GH#31725 + df = DataFrame({"a": [1, 2, val]}, index=[3, 2, 1]) + result = df.dropna(ignore_index=True) + expected = DataFrame({"a": [1, 2, val]}) + tm.assert_frame_equal(result, expected) + + df.dropna(ignore_index=True, inplace=True) + tm.assert_frame_equal(df, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dtypes.py new file mode 100644 index 00000000..4bdf1697 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_dtypes.py @@ -0,0 +1,150 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, + option_context, +) +import pandas._testing as tm + + +class TestDataFrameDataTypes: + def test_empty_frame_dtypes(self): + empty_df = DataFrame() + tm.assert_series_equal(empty_df.dtypes, Series(dtype=object)) + + nocols_df = DataFrame(index=[1, 2, 3]) + tm.assert_series_equal(nocols_df.dtypes, Series(dtype=object)) + + norows_df = DataFrame(columns=list("abc")) + tm.assert_series_equal(norows_df.dtypes, Series(object, index=list("abc"))) + + norows_int_df = DataFrame(columns=list("abc")).astype(np.int32) + tm.assert_series_equal( + norows_int_df.dtypes, Series(np.dtype("int32"), index=list("abc")) + ) + + df = DataFrame({"a": 1, "b": True, "c": 1.0}, index=[1, 2, 3]) + ex_dtypes = Series({"a": np.int64, "b": np.bool_, "c": np.float64}) + tm.assert_series_equal(df.dtypes, ex_dtypes) + + # same but for empty slice of df + tm.assert_series_equal(df[:0].dtypes, ex_dtypes) + + def test_datetime_with_tz_dtypes(self): + tzframe = DataFrame( + { + "A": date_range("20130101", periods=3), + "B": date_range("20130101", periods=3, tz="US/Eastern"), + "C": date_range("20130101", periods=3, tz="CET"), + } + ) + tzframe.iloc[1, 1] = pd.NaT + tzframe.iloc[1, 2] = pd.NaT + result = tzframe.dtypes.sort_index() + expected = Series( + [ + np.dtype("datetime64[ns]"), + DatetimeTZDtype("ns", "US/Eastern"), + DatetimeTZDtype("ns", "CET"), + ], + ["A", "B", "C"], + ) + + tm.assert_series_equal(result, expected) + + def test_dtypes_are_correct_after_column_slice(self): + # GH6525 + df = DataFrame(index=range(5), columns=list("abc"), dtype=np.float64) + tm.assert_series_equal( + df.dtypes, + Series({"a": np.float64, "b": np.float64, "c": np.float64}), + ) + tm.assert_series_equal(df.iloc[:, 2:].dtypes, Series({"c": np.float64})) + tm.assert_series_equal( + df.dtypes, + Series({"a": np.float64, "b": np.float64, "c": np.float64}), + ) + + @pytest.mark.parametrize( + "data", + [pd.NA, True], + ) + def test_dtypes_are_correct_after_groupby_last(self, data): + # GH46409 + df = DataFrame( + {"id": [1, 2, 3, 4], "test": [True, pd.NA, data, False]} + ).convert_dtypes() + result = df.groupby("id").last().test + expected = df.set_index("id").test + assert result.dtype == pd.BooleanDtype() + tm.assert_series_equal(expected, result) + + def test_dtypes_gh8722(self, float_string_frame): + float_string_frame["bool"] = float_string_frame["A"] > 0 + result = float_string_frame.dtypes + expected = Series( + {k: v.dtype for k, v in float_string_frame.items()}, index=result.index + ) + tm.assert_series_equal(result, expected) + + # compat, GH 8722 + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with option_context("use_inf_as_na", True): + df = DataFrame([[1]]) + result = df.dtypes + tm.assert_series_equal(result, Series({0: np.dtype("int64")})) + + def test_dtypes_timedeltas(self): + df = DataFrame( + { + "A": Series(date_range("2012-1-1", periods=3, freq="D")), + "B": Series([timedelta(days=i) for i in range(3)]), + } + ) + result = df.dtypes + expected = Series( + [np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB") + ) + tm.assert_series_equal(result, expected) + + df["C"] = df["A"] + df["B"] + result = df.dtypes + expected = Series( + [ + np.dtype("datetime64[ns]"), + np.dtype("timedelta64[ns]"), + np.dtype("datetime64[ns]"), + ], + index=list("ABC"), + ) + tm.assert_series_equal(result, expected) + + # mixed int types + df["D"] = 1 + result = df.dtypes + expected = Series( + [ + np.dtype("datetime64[ns]"), + np.dtype("timedelta64[ns]"), + np.dtype("datetime64[ns]"), + np.dtype("int64"), + ], + index=list("ABCD"), + ) + tm.assert_series_equal(result, expected) + + def test_frame_apply_np_array_return_type(self): + # GH 35517 + df = DataFrame([["foo"]]) + result = df.apply(lambda col: np.array("bar")) + expected = Series(["bar"]) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_duplicated.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_duplicated.py new file mode 100644 index 00000000..788aede8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_duplicated.py @@ -0,0 +1,117 @@ +import re +import sys + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]]) +def test_duplicated_with_misspelled_column_name(subset): + # GH 19730 + df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]}) + msg = re.escape("Index(['a'], dtype='object')") + + with pytest.raises(KeyError, match=msg): + df.duplicated(subset) + + +def test_duplicated_implemented_no_recursion(): + # gh-21524 + # Ensure duplicated isn't implemented using recursion that + # can fail on wide frames + df = DataFrame(np.random.default_rng(2).integers(0, 1000, (10, 1000))) + rec_limit = sys.getrecursionlimit() + try: + sys.setrecursionlimit(100) + result = df.duplicated() + finally: + sys.setrecursionlimit(rec_limit) + + # Then duplicates produce the bool Series as a result and don't fail during + # calculation. Actual values doesn't matter here, though usually it's all + # False in this case + assert isinstance(result, Series) + assert result.dtype == np.bool_ + + +@pytest.mark.parametrize( + "keep, expected", + [ + ("first", Series([False, False, True, False, True])), + ("last", Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])), + ], +) +def test_duplicated_keep(keep, expected): + df = DataFrame({"A": [0, 1, 1, 2, 0], "B": ["a", "b", "b", "c", "a"]}) + + result = df.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal") +@pytest.mark.parametrize( + "keep, expected", + [ + ("first", Series([False, False, True, False, True])), + ("last", Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])), + ], +) +def test_duplicated_nan_none(keep, expected): + df = DataFrame({"C": [np.nan, 3, 3, None, np.nan], "x": 1}, dtype=object) + + result = df.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("subset", [None, ["A", "B"], "A"]) +def test_duplicated_subset(subset, keep): + df = DataFrame( + { + "A": [0, 1, 1, 2, 0], + "B": ["a", "b", "b", "c", "a"], + "C": [np.nan, 3, 3, None, np.nan], + } + ) + + if subset is None: + subset = list(df.columns) + elif isinstance(subset, str): + # need to have a DataFrame, not a Series + # -> select columns with singleton list, not string + subset = [subset] + + expected = df[subset].duplicated(keep=keep) + result = df.duplicated(keep=keep, subset=subset) + tm.assert_series_equal(result, expected) + + +def test_duplicated_on_empty_frame(): + # GH 25184 + + df = DataFrame(columns=["a", "b"]) + dupes = df.duplicated("a") + + result = df[dupes] + expected = df.copy() + tm.assert_frame_equal(result, expected) + + +def test_frame_datetime64_duplicated(): + dates = date_range("2010-07-01", end="2010-08-05") + + tst = DataFrame({"symbol": "AAA", "date": dates}) + result = tst.duplicated(["date", "symbol"]) + assert (-result).all() + + tst = DataFrame({"date": dates}) + result = tst.date.duplicated() + assert (-result).all() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_equals.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_equals.py new file mode 100644 index 00000000..4028a26d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_equals.py @@ -0,0 +1,85 @@ +import numpy as np + +from pandas import ( + DataFrame, + date_range, +) +import pandas._testing as tm + + +class TestEquals: + def test_dataframe_not_equal(self): + # see GH#28839 + df1 = DataFrame({"a": [1, 2], "b": ["s", "d"]}) + df2 = DataFrame({"a": ["s", "d"], "b": [1, 2]}) + assert df1.equals(df2) is False + + def test_equals_different_blocks(self, using_array_manager): + # GH#9330 + df0 = DataFrame({"A": ["x", "y"], "B": [1, 2], "C": ["w", "z"]}) + df1 = df0.reset_index()[["A", "B", "C"]] + if not using_array_manager: + # this assert verifies that the above operations have + # induced a block rearrangement + assert df0._mgr.blocks[0].dtype != df1._mgr.blocks[0].dtype + + # do the real tests + tm.assert_frame_equal(df0, df1) + assert df0.equals(df1) + assert df1.equals(df0) + + def test_equals(self): + # Add object dtype column with nans + index = np.random.default_rng(2).random(10) + df1 = DataFrame( + np.random.default_rng(2).random(10), index=index, columns=["floats"] + ) + df1["text"] = "the sky is so blue. we could use more chocolate.".split() + df1["start"] = date_range("2000-1-1", periods=10, freq="T") + df1["end"] = date_range("2000-1-1", periods=10, freq="D") + df1["diff"] = df1["end"] - df1["start"] + # Explicitly cast to object, to avoid implicit cast when setting np.nan + df1["bool"] = (np.arange(10) % 3 == 0).astype(object) + df1.loc[::2] = np.nan + df2 = df1.copy() + assert df1["text"].equals(df2["text"]) + assert df1["start"].equals(df2["start"]) + assert df1["end"].equals(df2["end"]) + assert df1["diff"].equals(df2["diff"]) + assert df1["bool"].equals(df2["bool"]) + assert df1.equals(df2) + assert not df1.equals(object) + + # different dtype + different = df1.copy() + different["floats"] = different["floats"].astype("float32") + assert not df1.equals(different) + + # different index + different_index = -index + different = df2.set_index(different_index) + assert not df1.equals(different) + + # different columns + different = df2.copy() + different.columns = df2.columns[::-1] + assert not df1.equals(different) + + # DatetimeIndex + index = date_range("2000-1-1", periods=10, freq="T") + df1 = df1.set_index(index) + df2 = df1.copy() + assert df1.equals(df2) + + # MultiIndex + df3 = df1.set_index(["text"], append=True) + df2 = df1.set_index(["text"], append=True) + assert df3.equals(df2) + + df2 = df1.set_index(["floats"], append=True) + assert not df3.equals(df2) + + # NaN in index + df3 = df1.set_index(["floats"], append=True) + df2 = df1.set_index(["floats"], append=True) + assert df3.equals(df2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_explode.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_explode.py new file mode 100644 index 00000000..d1e4a603 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_explode.py @@ -0,0 +1,303 @@ +import re + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_error(): + df = pd.DataFrame( + {"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1} + ) + with pytest.raises( + ValueError, match="column must be a scalar, tuple, or list thereof" + ): + df.explode([list("AA")]) + + with pytest.raises(ValueError, match="column must be unique"): + df.explode(list("AA")) + + df.columns = list("AA") + with pytest.raises( + ValueError, + match=re.escape("DataFrame columns must be unique. Duplicate columns: ['A']"), + ): + df.explode("A") + + +@pytest.mark.parametrize( + "input_subset, error_message", + [ + ( + list("AC"), + "columns must have matching element counts", + ), + ( + [], + "column must be nonempty", + ), + ( + list("AC"), + "columns must have matching element counts", + ), + ], +) +def test_error_multi_columns(input_subset, error_message): + # GH 39240 + df = pd.DataFrame( + { + "A": [[0, 1, 2], np.nan, [], (3, 4)], + "B": 1, + "C": [["a", "b", "c"], "foo", [], ["d", "e", "f"]], + }, + index=list("abcd"), + ) + with pytest.raises(ValueError, match=error_message): + df.explode(input_subset) + + +@pytest.mark.parametrize( + "scalar", + ["a", 0, 1.5, pd.Timedelta("1 days"), pd.Timestamp("2019-12-31")], +) +def test_basic(scalar): + df = pd.DataFrame( + {scalar: pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1} + ) + result = df.explode(scalar) + expected = pd.DataFrame( + { + scalar: pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object + ), + "B": 1, + } + ) + tm.assert_frame_equal(result, expected) + + +def test_multi_index_rows(): + df = pd.DataFrame( + {"A": np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), "B": 1}, + index=pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]), + ) + + result = df.explode("A") + expected = pd.DataFrame( + { + "A": pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], + index=pd.MultiIndex.from_tuples( + [ + ("a", 1), + ("a", 1), + ("a", 1), + ("a", 2), + ("b", 1), + ("b", 2), + ("b", 2), + ] + ), + dtype=object, + ), + "B": 1, + } + ) + tm.assert_frame_equal(result, expected) + + +def test_multi_index_columns(): + df = pd.DataFrame( + {("A", 1): np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), ("A", 2): 1} + ) + + result = df.explode(("A", 1)) + expected = pd.DataFrame( + { + ("A", 1): pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4], + index=pd.Index([0, 0, 0, 1, 2, 3, 3]), + dtype=object, + ), + ("A", 2): 1, + } + ) + tm.assert_frame_equal(result, expected) + + +def test_usecase(): + # explode a single column + # gh-10511 + df = pd.DataFrame( + [[11, range(5), 10], [22, range(3), 20]], columns=list("ABC") + ).set_index("C") + result = df.explode("B") + + expected = pd.DataFrame( + { + "A": [11, 11, 11, 11, 11, 22, 22, 22], + "B": np.array([0, 1, 2, 3, 4, 0, 1, 2], dtype=object), + "C": [10, 10, 10, 10, 10, 20, 20, 20], + }, + columns=list("ABC"), + ).set_index("C") + + tm.assert_frame_equal(result, expected) + + # gh-8517 + df = pd.DataFrame( + [["2014-01-01", "Alice", "A B"], ["2014-01-02", "Bob", "C D"]], + columns=["dt", "name", "text"], + ) + result = df.assign(text=df.text.str.split(" ")).explode("text") + expected = pd.DataFrame( + [ + ["2014-01-01", "Alice", "A"], + ["2014-01-01", "Alice", "B"], + ["2014-01-02", "Bob", "C"], + ["2014-01-02", "Bob", "D"], + ], + columns=["dt", "name", "text"], + index=[0, 0, 1, 1], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "input_dict, input_index, expected_dict, expected_index", + [ + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + [0, 0], + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + [0, 0, 0, 0], + ), + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + pd.Index([0, 0], name="my_index"), + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + pd.Index([0, 0, 0, 0], name="my_index"), + ), + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + pd.MultiIndex.from_arrays( + [[0, 0], [1, 1]], names=["my_first_index", "my_second_index"] + ), + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + pd.MultiIndex.from_arrays( + [[0, 0, 0, 0], [1, 1, 1, 1]], + names=["my_first_index", "my_second_index"], + ), + ), + ( + {"col1": [[1, 2], [3, 4]], "col2": ["foo", "bar"]}, + pd.MultiIndex.from_arrays([[0, 0], [1, 1]], names=["my_index", None]), + {"col1": [1, 2, 3, 4], "col2": ["foo", "foo", "bar", "bar"]}, + pd.MultiIndex.from_arrays( + [[0, 0, 0, 0], [1, 1, 1, 1]], names=["my_index", None] + ), + ), + ], +) +def test_duplicate_index(input_dict, input_index, expected_dict, expected_index): + # GH 28005 + df = pd.DataFrame(input_dict, index=input_index) + result = df.explode("col1") + expected = pd.DataFrame(expected_dict, index=expected_index, dtype=object) + tm.assert_frame_equal(result, expected) + + +def test_ignore_index(): + # GH 34932 + df = pd.DataFrame({"id": range(0, 20, 10), "values": [list("ab"), list("cd")]}) + result = df.explode("values", ignore_index=True) + expected = pd.DataFrame( + {"id": [0, 0, 10, 10], "values": list("abcd")}, index=[0, 1, 2, 3] + ) + tm.assert_frame_equal(result, expected) + + +def test_explode_sets(): + # https://github.com/pandas-dev/pandas/issues/35614 + df = pd.DataFrame({"a": [{"x", "y"}], "b": [1]}, index=[1]) + result = df.explode(column="a").sort_values(by="a") + expected = pd.DataFrame({"a": ["x", "y"], "b": [1, 1]}, index=[1, 1]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "input_subset, expected_dict, expected_index", + [ + ( + list("AC"), + { + "A": pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4, np.nan], + index=list("aaabcdde"), + dtype=object, + ), + "B": 1, + "C": ["a", "b", "c", "foo", np.nan, "d", "e", np.nan], + }, + list("aaabcdde"), + ), + ( + list("A"), + { + "A": pd.Series( + [0, 1, 2, np.nan, np.nan, 3, 4, np.nan], + index=list("aaabcdde"), + dtype=object, + ), + "B": 1, + "C": [ + ["a", "b", "c"], + ["a", "b", "c"], + ["a", "b", "c"], + "foo", + [], + ["d", "e"], + ["d", "e"], + np.nan, + ], + }, + list("aaabcdde"), + ), + ], +) +def test_multi_columns(input_subset, expected_dict, expected_index): + # GH 39240 + df = pd.DataFrame( + { + "A": [[0, 1, 2], np.nan, [], (3, 4), np.nan], + "B": 1, + "C": [["a", "b", "c"], "foo", [], ["d", "e"], np.nan], + }, + index=list("abcde"), + ) + result = df.explode(input_subset) + expected = pd.DataFrame(expected_dict, expected_index) + tm.assert_frame_equal(result, expected) + + +def test_multi_columns_nan_empty(): + # GH 46084 + df = pd.DataFrame( + { + "A": [[0, 1], [5], [], [2, 3]], + "B": [9, 8, 7, 6], + "C": [[1, 2], np.nan, [], [3, 4]], + } + ) + result = df.explode(["A", "C"]) + expected = pd.DataFrame( + { + "A": np.array([0, 1, 5, np.nan, 2, 3], dtype=object), + "B": [9, 9, 8, 7, 6, 6], + "C": np.array([1, 2, np.nan, np.nan, 3, 4], dtype=object), + }, + index=[0, 0, 1, 2, 3, 3], + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_fillna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_fillna.py new file mode 100644 index 00000000..812150bb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_fillna.py @@ -0,0 +1,832 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + NaT, + PeriodIndex, + Series, + TimedeltaIndex, + Timestamp, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.tests.frame.common import _check_mixed_float + + +class TestFillNA: + def test_fillna_dict_inplace_nonunique_columns(self, using_copy_on_write): + df = DataFrame( + {"A": [np.nan] * 3, "B": [NaT, Timestamp(1), NaT], "C": [np.nan, "foo", 2]} + ) + df.columns = ["A", "A", "A"] + orig = df[:] + + df.fillna({"A": 2}, inplace=True) + # The first and third columns can be set inplace, while the second cannot. + + expected = DataFrame( + {"A": [2.0] * 3, "B": [2, Timestamp(1), 2], "C": [2, "foo", 2]} + ) + expected.columns = ["A", "A", "A"] + tm.assert_frame_equal(df, expected) + + # TODO: what's the expected/desired behavior with CoW? + if not using_copy_on_write: + assert tm.shares_memory(df.iloc[:, 0], orig.iloc[:, 0]) + assert not tm.shares_memory(df.iloc[:, 1], orig.iloc[:, 1]) + if not using_copy_on_write: + assert tm.shares_memory(df.iloc[:, 2], orig.iloc[:, 2]) + + @td.skip_array_manager_not_yet_implemented + def test_fillna_on_column_view(self, using_copy_on_write): + # GH#46149 avoid unnecessary copies + arr = np.full((40, 50), np.nan) + df = DataFrame(arr, copy=False) + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df[0].fillna(-1, inplace=True) + assert np.isnan(arr[:, 0]).all() + else: + df[0].fillna(-1, inplace=True) + assert (arr[:, 0] == -1).all() + + # i.e. we didn't create a new 49-column block + assert len(df._mgr.arrays) == 1 + assert np.shares_memory(df.values, arr) + + def test_fillna_datetime(self, datetime_frame): + tf = datetime_frame + tf.loc[tf.index[:5], "A"] = np.nan + tf.loc[tf.index[-5:], "A"] = np.nan + + zero_filled = datetime_frame.fillna(0) + assert (zero_filled.loc[zero_filled.index[:5], "A"] == 0).all() + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + padded = datetime_frame.fillna(method="pad") + assert np.isnan(padded.loc[padded.index[:5], "A"]).all() + assert ( + padded.loc[padded.index[-5:], "A"] == padded.loc[padded.index[-5], "A"] + ).all() + + msg = "Must specify a fill 'value' or 'method'" + with pytest.raises(ValueError, match=msg): + datetime_frame.fillna() + msg = "Cannot specify both 'value' and 'method'" + with pytest.raises(ValueError, match=msg): + datetime_frame.fillna(5, method="ffill") + + def test_fillna_mixed_type(self, float_string_frame): + mf = float_string_frame + mf.loc[mf.index[5:20], "foo"] = np.nan + mf.loc[mf.index[-10:], "A"] = np.nan + # TODO: make stronger assertion here, GH 25640 + mf.fillna(value=0) + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + mf.fillna(method="pad") + + def test_fillna_mixed_float(self, mixed_float_frame): + # mixed numeric (but no float16) + mf = mixed_float_frame.reindex(columns=["A", "B", "D"]) + mf.loc[mf.index[-10:], "A"] = np.nan + result = mf.fillna(value=0) + _check_mixed_float(result, dtype={"C": None}) + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = mf.fillna(method="pad") + _check_mixed_float(result, dtype={"C": None}) + + def test_fillna_empty(self, using_copy_on_write): + if using_copy_on_write: + pytest.skip("condition is unnecessary complex and is deprecated anyway") + # empty frame (GH#2778) + df = DataFrame(columns=["x"]) + for m in ["pad", "backfill"]: + msg = "Series.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.x.fillna(method=m, inplace=True) + df.x.fillna(method=m) + + def test_fillna_different_dtype(self): + # with different dtype (GH#3386) + df = DataFrame( + [["a", "a", np.nan, "a"], ["b", "b", np.nan, "b"], ["c", "c", np.nan, "c"]] + ) + + result = df.fillna({2: "foo"}) + expected = DataFrame( + [["a", "a", "foo", "a"], ["b", "b", "foo", "b"], ["c", "c", "foo", "c"]] + ) + tm.assert_frame_equal(result, expected) + + return_value = df.fillna({2: "foo"}, inplace=True) + tm.assert_frame_equal(df, expected) + assert return_value is None + + def test_fillna_limit_and_value(self): + # limit and value + df = DataFrame(np.random.default_rng(2).standard_normal((10, 3))) + df.iloc[2:7, 0] = np.nan + df.iloc[3:5, 2] = np.nan + + expected = df.copy() + expected.iloc[2, 0] = 999 + expected.iloc[3, 2] = 999 + result = df.fillna(999, limit=1) + tm.assert_frame_equal(result, expected) + + def test_fillna_datelike(self): + # with datelike + # GH#6344 + df = DataFrame( + { + "Date": [NaT, Timestamp("2014-1-1")], + "Date2": [Timestamp("2013-1-1"), NaT], + } + ) + + expected = df.copy() + expected["Date"] = expected["Date"].fillna(df.loc[df.index[0], "Date2"]) + result = df.fillna(value={"Date": df["Date2"]}) + tm.assert_frame_equal(result, expected) + + def test_fillna_tzaware(self): + # with timezone + # GH#15855 + df = DataFrame({"A": [Timestamp("2012-11-11 00:00:00+01:00"), NaT]}) + exp = DataFrame( + { + "A": [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + } + ) + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.fillna(method="pad") + tm.assert_frame_equal(res, exp) + + df = DataFrame({"A": [NaT, Timestamp("2012-11-11 00:00:00+01:00")]}) + exp = DataFrame( + { + "A": [ + Timestamp("2012-11-11 00:00:00+01:00"), + Timestamp("2012-11-11 00:00:00+01:00"), + ] + } + ) + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.fillna(method="bfill") + tm.assert_frame_equal(res, exp) + + def test_fillna_tzaware_different_column(self): + # with timezone in another column + # GH#15522 + df = DataFrame( + { + "A": date_range("20130101", periods=4, tz="US/Eastern"), + "B": [1, 2, np.nan, np.nan], + } + ) + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.fillna(method="pad") + expected = DataFrame( + { + "A": date_range("20130101", periods=4, tz="US/Eastern"), + "B": [1.0, 2.0, 2.0, 2.0], + } + ) + tm.assert_frame_equal(result, expected) + + def test_na_actions_categorical(self): + cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3]) + vals = ["a", "b", np.nan, "d"] + df = DataFrame({"cats": cat, "vals": vals}) + cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3]) + vals2 = ["a", "b", "b", "d"] + df_exp_fill = DataFrame({"cats": cat2, "vals": vals2}) + cat3 = Categorical([1, 2, 3], categories=[1, 2, 3]) + vals3 = ["a", "b", np.nan] + df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3}) + cat4 = Categorical([1, 2], categories=[1, 2, 3]) + vals4 = ["a", "b"] + df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4}) + + # fillna + res = df.fillna(value={"cats": 3, "vals": "b"}) + tm.assert_frame_equal(res, df_exp_fill) + + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + df.fillna(value={"cats": 4, "vals": "c"}) + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.fillna(method="pad") + tm.assert_frame_equal(res, df_exp_fill) + + # dropna + res = df.dropna(subset=["cats"]) + tm.assert_frame_equal(res, df_exp_drop_cats) + + res = df.dropna() + tm.assert_frame_equal(res, df_exp_drop_all) + + # make sure that fillna takes missing values into account + c = Categorical([np.nan, "b", np.nan], categories=["a", "b"]) + df = DataFrame({"cats": c, "vals": [1, 2, 3]}) + + cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"]) + df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]}) + + res = df.fillna("a") + tm.assert_frame_equal(res, df_exp) + + def test_fillna_categorical_nan(self): + # GH#14021 + # np.nan should always be a valid filler + cat = Categorical([np.nan, 2, np.nan]) + val = Categorical([np.nan, np.nan, np.nan]) + df = DataFrame({"cats": cat, "vals": val}) + + # GH#32950 df.median() is poorly behaved because there is no + # Categorical.median + median = Series({"cats": 2.0, "vals": np.nan}) + + res = df.fillna(median) + v_exp = [np.nan, np.nan, np.nan] + df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp}, dtype="category") + tm.assert_frame_equal(res, df_exp) + + result = df.cats.fillna(np.nan) + tm.assert_series_equal(result, df.cats) + + result = df.vals.fillna(np.nan) + tm.assert_series_equal(result, df.vals) + + idx = DatetimeIndex( + ["2011-01-01 09:00", "2016-01-01 23:45", "2011-01-01 09:00", NaT, NaT] + ) + df = DataFrame({"a": Categorical(idx)}) + tm.assert_frame_equal(df.fillna(value=NaT), df) + + idx = PeriodIndex(["2011-01", "2011-01", "2011-01", NaT, NaT], freq="M") + df = DataFrame({"a": Categorical(idx)}) + tm.assert_frame_equal(df.fillna(value=NaT), df) + + idx = TimedeltaIndex(["1 days", "2 days", "1 days", NaT, NaT]) + df = DataFrame({"a": Categorical(idx)}) + tm.assert_frame_equal(df.fillna(value=NaT), df) + + def test_fillna_downcast(self): + # GH#15277 + # infer int64 from float64 + df = DataFrame({"a": [1.0, np.nan]}) + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.fillna(0, downcast="infer") + expected = DataFrame({"a": [1, 0]}) + tm.assert_frame_equal(result, expected) + + # infer int64 from float64 when fillna value is a dict + df = DataFrame({"a": [1.0, np.nan]}) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.fillna({"a": 0}, downcast="infer") + expected = DataFrame({"a": [1, 0]}) + tm.assert_frame_equal(result, expected) + + def test_fillna_downcast_false(self, frame_or_series): + # GH#45603 preserve object dtype with downcast=False + obj = frame_or_series([1, 2, 3], dtype="object") + msg = "The 'downcast' keyword in fillna" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = obj.fillna("", downcast=False) + tm.assert_equal(result, obj) + + def test_fillna_downcast_noop(self, frame_or_series): + # GH#45423 + # Two relevant paths: + # 1) not _can_hold_na (e.g. integer) + # 2) _can_hold_na + noop + not can_hold_element + + obj = frame_or_series([1, 2, 3], dtype=np.int64) + + msg = "The 'downcast' keyword in fillna" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#40988 + res = obj.fillna("foo", downcast=np.dtype(np.int32)) + expected = obj.astype(np.int32) + tm.assert_equal(res, expected) + + obj2 = obj.astype(np.float64) + with tm.assert_produces_warning(FutureWarning, match=msg): + res2 = obj2.fillna("foo", downcast="infer") + expected2 = obj # get back int64 + tm.assert_equal(res2, expected2) + + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#40988 + res3 = obj2.fillna("foo", downcast=np.dtype(np.int32)) + tm.assert_equal(res3, expected) + + @pytest.mark.parametrize("columns", [["A", "A", "B"], ["A", "A"]]) + def test_fillna_dictlike_value_duplicate_colnames(self, columns): + # GH#43476 + df = DataFrame(np.nan, index=[0, 1], columns=columns) + with tm.assert_produces_warning(None): + result = df.fillna({"A": 0}) + + expected = df.copy() + expected["A"] = 0.0 + tm.assert_frame_equal(result, expected) + + def test_fillna_dtype_conversion(self): + # make sure that fillna on an empty frame works + df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) + result = df.dtypes + expected = Series([np.dtype("object")] * 5, index=[1, 2, 3, 4, 5]) + tm.assert_series_equal(result, expected) + + result = df.fillna(1) + expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) + tm.assert_frame_equal(result, expected) + + # empty block + df = DataFrame(index=range(3), columns=["A", "B"], dtype="float64") + result = df.fillna("nan") + expected = DataFrame("nan", index=range(3), columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("val", ["", 1, np.nan, 1.0]) + def test_fillna_dtype_conversion_equiv_replace(self, val): + df = DataFrame({"A": [1, np.nan], "B": [1.0, 2.0]}) + expected = df.replace(np.nan, val) + result = df.fillna(val) + tm.assert_frame_equal(result, expected) + + def test_fillna_datetime_columns(self): + # GH#7095 + df = DataFrame( + { + "A": [-1, -2, np.nan], + "B": date_range("20130101", periods=3), + "C": ["foo", "bar", None], + "D": ["foo2", "bar2", None], + }, + index=date_range("20130110", periods=3), + ) + result = df.fillna("?") + expected = DataFrame( + { + "A": [-1, -2, "?"], + "B": date_range("20130101", periods=3), + "C": ["foo", "bar", "?"], + "D": ["foo2", "bar2", "?"], + }, + index=date_range("20130110", periods=3), + ) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + { + "A": [-1, -2, np.nan], + "B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), NaT], + "C": ["foo", "bar", None], + "D": ["foo2", "bar2", None], + }, + index=date_range("20130110", periods=3), + ) + result = df.fillna("?") + expected = DataFrame( + { + "A": [-1, -2, "?"], + "B": [Timestamp("2013-01-01"), Timestamp("2013-01-02"), "?"], + "C": ["foo", "bar", "?"], + "D": ["foo2", "bar2", "?"], + }, + index=date_range("20130110", periods=3), + ) + tm.assert_frame_equal(result, expected) + + def test_ffill(self, datetime_frame): + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + alt = datetime_frame.fillna(method="ffill") + tm.assert_frame_equal(datetime_frame.ffill(), alt) + + def test_bfill(self, datetime_frame): + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + alt = datetime_frame.fillna(method="bfill") + + tm.assert_frame_equal(datetime_frame.bfill(), alt) + + def test_frame_pad_backfill_limit(self): + index = np.arange(10) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=index) + + result = df[:2].reindex(index, method="pad", limit=5) + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df[:2].reindex(index).fillna(method="pad") + expected.iloc[-3:] = np.nan + tm.assert_frame_equal(result, expected) + + result = df[-2:].reindex(index, method="backfill", limit=5) + + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df[-2:].reindex(index).fillna(method="backfill") + expected.iloc[:3] = np.nan + tm.assert_frame_equal(result, expected) + + def test_frame_fillna_limit(self): + index = np.arange(10) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=index) + + result = df[:2].reindex(index) + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = result.fillna(method="pad", limit=5) + + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df[:2].reindex(index).fillna(method="pad") + expected.iloc[-3:] = np.nan + tm.assert_frame_equal(result, expected) + + result = df[-2:].reindex(index) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = result.fillna(method="backfill", limit=5) + + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df[-2:].reindex(index).fillna(method="backfill") + expected.iloc[:3] = np.nan + tm.assert_frame_equal(result, expected) + + def test_fillna_skip_certain_blocks(self): + # don't try to fill boolean, int blocks + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)).astype(int)) + + # it works! + df.fillna(np.nan) + + @pytest.mark.parametrize("type", [int, float]) + def test_fillna_positive_limit(self, type): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))).astype(type) + + msg = "Limit must be greater than 0" + with pytest.raises(ValueError, match=msg): + df.fillna(0, limit=-5) + + @pytest.mark.parametrize("type", [int, float]) + def test_fillna_integer_limit(self, type): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))).astype(type) + + msg = "Limit must be an integer" + with pytest.raises(ValueError, match=msg): + df.fillna(0, limit=0.5) + + def test_fillna_inplace(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + df.loc[:4, 1] = np.nan + df.loc[-4:, 3] = np.nan + + expected = df.fillna(value=0) + assert expected is not df + + df.fillna(value=0, inplace=True) + tm.assert_frame_equal(df, expected) + + expected = df.fillna(value={0: 0}, inplace=True) + assert expected is None + + df.loc[:4, 1] = np.nan + df.loc[-4:, 3] = np.nan + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.fillna(method="ffill") + assert expected is not df + + with tm.assert_produces_warning(FutureWarning, match=msg): + df.fillna(method="ffill", inplace=True) + tm.assert_frame_equal(df, expected) + + def test_fillna_dict_series(self): + df = DataFrame( + { + "a": [np.nan, 1, 2, np.nan, np.nan], + "b": [1, 2, 3, np.nan, np.nan], + "c": [np.nan, 1, 2, 3, 4], + } + ) + + result = df.fillna({"a": 0, "b": 5}) + + expected = df.copy() + expected["a"] = expected["a"].fillna(0) + expected["b"] = expected["b"].fillna(5) + tm.assert_frame_equal(result, expected) + + # it works + result = df.fillna({"a": 0, "b": 5, "d": 7}) + + # Series treated same as dict + result = df.fillna(df.max()) + expected = df.fillna(df.max().to_dict()) + tm.assert_frame_equal(result, expected) + + # disable this for now + with pytest.raises(NotImplementedError, match="column by column"): + df.fillna(df.max(1), axis=1) + + def test_fillna_dataframe(self): + # GH#8377 + df = DataFrame( + { + "a": [np.nan, 1, 2, np.nan, np.nan], + "b": [1, 2, 3, np.nan, np.nan], + "c": [np.nan, 1, 2, 3, 4], + }, + index=list("VWXYZ"), + ) + + # df2 may have different index and columns + df2 = DataFrame( + { + "a": [np.nan, 10, 20, 30, 40], + "b": [50, 60, 70, 80, 90], + "foo": ["bar"] * 5, + }, + index=list("VWXuZ"), + ) + + result = df.fillna(df2) + + # only those columns and indices which are shared get filled + expected = DataFrame( + { + "a": [np.nan, 1, 2, np.nan, 40], + "b": [1, 2, 3, np.nan, 90], + "c": [np.nan, 1, 2, 3, 4], + }, + index=list("VWXYZ"), + ) + + tm.assert_frame_equal(result, expected) + + def test_fillna_columns(self): + arr = np.random.default_rng(2).standard_normal((10, 10)) + arr[:, ::2] = np.nan + df = DataFrame(arr) + + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.fillna(method="ffill", axis=1) + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.T.fillna(method="pad").T + tm.assert_frame_equal(result, expected) + + df.insert(6, "foo", 5) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.fillna(method="ffill", axis=1) + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.astype(float).fillna(method="ffill", axis=1) + tm.assert_frame_equal(result, expected) + + def test_fillna_invalid_method(self, float_frame): + with pytest.raises(ValueError, match="ffil"): + float_frame.fillna(method="ffil") + + def test_fillna_invalid_value(self, float_frame): + # list + msg = '"value" parameter must be a scalar or dict, but you passed a "{}"' + with pytest.raises(TypeError, match=msg.format("list")): + float_frame.fillna([1, 2]) + # tuple + with pytest.raises(TypeError, match=msg.format("tuple")): + float_frame.fillna((1, 2)) + # frame with series + msg = ( + '"value" parameter must be a scalar, dict or Series, but you ' + 'passed a "DataFrame"' + ) + with pytest.raises(TypeError, match=msg): + float_frame.iloc[:, 0].fillna(float_frame) + + def test_fillna_col_reordering(self): + cols = ["COL." + str(i) for i in range(5, 0, -1)] + data = np.random.default_rng(2).random((20, 5)) + df = DataFrame(index=range(20), columns=cols, data=data) + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + filled = df.fillna(method="ffill") + assert df.columns.tolist() == filled.columns.tolist() + + def test_fill_corner(self, float_frame, float_string_frame): + mf = float_string_frame + mf.loc[mf.index[5:20], "foo"] = np.nan + mf.loc[mf.index[-10:], "A"] = np.nan + + filled = float_string_frame.fillna(value=0) + assert (filled.loc[filled.index[5:20], "foo"] == 0).all() + del float_string_frame["foo"] + + float_frame.reindex(columns=[]).fillna(value=0) + + def test_fillna_downcast_dict(self): + # GH#40809 + df = DataFrame({"col1": [1, np.nan]}) + + msg = "The 'downcast' keyword in fillna" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.fillna({"col1": 2}, downcast={"col1": "int64"}) + expected = DataFrame({"col1": [1, 2]}) + tm.assert_frame_equal(result, expected) + + def test_fillna_with_columns_and_limit(self): + # GH40989 + df = DataFrame( + [ + [np.nan, 2, np.nan, 0], + [3, 4, np.nan, 1], + [np.nan, np.nan, np.nan, 5], + [np.nan, 3, np.nan, 4], + ], + columns=list("ABCD"), + ) + result = df.fillna(axis=1, value=100, limit=1) + result2 = df.fillna(axis=1, value=100, limit=2) + + expected = DataFrame( + { + "A": Series([100, 3, 100, 100], dtype="float64"), + "B": [2, 4, np.nan, 3], + "C": [np.nan, 100, np.nan, np.nan], + "D": Series([0, 1, 5, 4], dtype="float64"), + }, + index=[0, 1, 2, 3], + ) + expected2 = DataFrame( + { + "A": Series([100, 3, 100, 100], dtype="float64"), + "B": Series([2, 4, 100, 3], dtype="float64"), + "C": [100, 100, np.nan, 100], + "D": Series([0, 1, 5, 4], dtype="float64"), + }, + index=[0, 1, 2, 3], + ) + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected2) + + def test_fillna_datetime_inplace(self): + # GH#48863 + df = DataFrame( + { + "date1": to_datetime(["2018-05-30", None]), + "date2": to_datetime(["2018-09-30", None]), + } + ) + expected = df.copy() + df.fillna(np.nan, inplace=True) + tm.assert_frame_equal(df, expected) + + def test_fillna_inplace_with_columns_limit_and_value(self): + # GH40989 + df = DataFrame( + [ + [np.nan, 2, np.nan, 0], + [3, 4, np.nan, 1], + [np.nan, np.nan, np.nan, 5], + [np.nan, 3, np.nan, 4], + ], + columns=list("ABCD"), + ) + + expected = df.fillna(axis=1, value=100, limit=1) + assert expected is not df + + df.fillna(axis=1, value=100, limit=1, inplace=True) + tm.assert_frame_equal(df, expected) + + @td.skip_array_manager_invalid_test + @pytest.mark.parametrize("val", [-1, {"x": -1, "y": -1}]) + def test_inplace_dict_update_view(self, val, using_copy_on_write): + # GH#47188 + df = DataFrame({"x": [np.nan, 2], "y": [np.nan, 2]}) + df_orig = df.copy() + result_view = df[:] + df.fillna(val, inplace=True) + expected = DataFrame({"x": [-1, 2.0], "y": [-1.0, 2]}) + tm.assert_frame_equal(df, expected) + if using_copy_on_write: + tm.assert_frame_equal(result_view, df_orig) + else: + tm.assert_frame_equal(result_view, expected) + + def test_single_block_df_with_horizontal_axis(self): + # GH 47713 + df = DataFrame( + { + "col1": [5, 0, np.nan, 10, np.nan], + "col2": [7, np.nan, np.nan, 5, 3], + "col3": [12, np.nan, 1, 2, 0], + "col4": [np.nan, 1, 1, np.nan, 18], + } + ) + result = df.fillna(50, limit=1, axis=1) + expected = DataFrame( + [ + [5.0, 7.0, 12.0, 50.0], + [0.0, 50.0, np.nan, 1.0], + [50.0, np.nan, 1.0, 1.0], + [10.0, 5.0, 2.0, 50.0], + [50.0, 3.0, 0.0, 18.0], + ], + columns=["col1", "col2", "col3", "col4"], + ) + tm.assert_frame_equal(result, expected) + + def test_fillna_with_multi_index_frame(self): + # GH 47649 + pdf = DataFrame( + { + ("x", "a"): [np.nan, 2.0, 3.0], + ("x", "b"): [1.0, 2.0, np.nan], + ("y", "c"): [1.0, 2.0, np.nan], + } + ) + expected = DataFrame( + { + ("x", "a"): [-1.0, 2.0, 3.0], + ("x", "b"): [1.0, 2.0, -1.0], + ("y", "c"): [1.0, 2.0, np.nan], + } + ) + tm.assert_frame_equal(pdf.fillna({"x": -1}), expected) + tm.assert_frame_equal(pdf.fillna({"x": -1, ("x", "b"): -2}), expected) + + expected = DataFrame( + { + ("x", "a"): [-1.0, 2.0, 3.0], + ("x", "b"): [1.0, 2.0, -2.0], + ("y", "c"): [1.0, 2.0, np.nan], + } + ) + tm.assert_frame_equal(pdf.fillna({("x", "b"): -2, "x": -1}), expected) + + +def test_fillna_nonconsolidated_frame(): + # https://github.com/pandas-dev/pandas/issues/36495 + df = DataFrame( + [ + [1, 1, 1, 1.0], + [2, 2, 2, 2.0], + [3, 3, 3, 3.0], + ], + columns=["i1", "i2", "i3", "f1"], + ) + df_nonconsol = df.pivot(index="i1", columns="i2") + result = df_nonconsol.fillna(0) + assert result.isna().sum().sum() == 0 + + +def test_fillna_nones_inplace(): + # GH 48480 + df = DataFrame( + [[None, None], [None, None]], + columns=["A", "B"], + ) + with tm.assert_produces_warning(False): + df.fillna(value={"A": 1, "B": 2}, inplace=True) + + expected = DataFrame([[1, 2], [1, 2]], columns=["A", "B"]) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("func", ["pad", "backfill"]) +def test_pad_backfill_deprecated(func): + # GH#33396 + df = DataFrame({"a": [1, 2, 3]}) + with tm.assert_produces_warning(FutureWarning): + getattr(df, func)() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_filter.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_filter.py new file mode 100644 index 00000000..9d5e6876 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_filter.py @@ -0,0 +1,153 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + + +class TestDataFrameFilter: + def test_filter(self, float_frame, float_string_frame): + # Items + filtered = float_frame.filter(["A", "B", "E"]) + assert len(filtered.columns) == 2 + assert "E" not in filtered + + filtered = float_frame.filter(["A", "B", "E"], axis="columns") + assert len(filtered.columns) == 2 + assert "E" not in filtered + + # Other axis + idx = float_frame.index[0:4] + filtered = float_frame.filter(idx, axis="index") + expected = float_frame.reindex(index=idx) + tm.assert_frame_equal(filtered, expected) + + # like + fcopy = float_frame.copy() + fcopy["AA"] = 1 + + filtered = fcopy.filter(like="A") + assert len(filtered.columns) == 2 + assert "AA" in filtered + + # like with ints in column names + df = DataFrame(0.0, index=[0, 1, 2], columns=[0, 1, "_A", "_B"]) + filtered = df.filter(like="_") + assert len(filtered.columns) == 2 + + # regex with ints in column names + # from PR #10384 + df = DataFrame(0.0, index=[0, 1, 2], columns=["A1", 1, "B", 2, "C"]) + expected = DataFrame( + 0.0, index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object) + ) + filtered = df.filter(regex="^[0-9]+$") + tm.assert_frame_equal(filtered, expected) + + expected = DataFrame(0.0, index=[0, 1, 2], columns=[0, "0", 1, "1"]) + # shouldn't remove anything + filtered = expected.filter(regex="^[0-9]+$") + tm.assert_frame_equal(filtered, expected) + + # pass in None + with pytest.raises(TypeError, match="Must pass"): + float_frame.filter() + with pytest.raises(TypeError, match="Must pass"): + float_frame.filter(items=None) + with pytest.raises(TypeError, match="Must pass"): + float_frame.filter(axis=1) + + # test mutually exclusive arguments + with pytest.raises(TypeError, match="mutually exclusive"): + float_frame.filter(items=["one", "three"], regex="e$", like="bbi") + with pytest.raises(TypeError, match="mutually exclusive"): + float_frame.filter(items=["one", "three"], regex="e$", axis=1) + with pytest.raises(TypeError, match="mutually exclusive"): + float_frame.filter(items=["one", "three"], regex="e$") + with pytest.raises(TypeError, match="mutually exclusive"): + float_frame.filter(items=["one", "three"], like="bbi", axis=0) + with pytest.raises(TypeError, match="mutually exclusive"): + float_frame.filter(items=["one", "three"], like="bbi") + + # objects + filtered = float_string_frame.filter(like="foo") + assert "foo" in filtered + + # unicode columns, won't ascii-encode + df = float_frame.rename(columns={"B": "\u2202"}) + filtered = df.filter(like="C") + assert "C" in filtered + + def test_filter_regex_search(self, float_frame): + fcopy = float_frame.copy() + fcopy["AA"] = 1 + + # regex + filtered = fcopy.filter(regex="[A]+") + assert len(filtered.columns) == 2 + assert "AA" in filtered + + # doesn't have to be at beginning + df = DataFrame( + {"aBBa": [1, 2], "BBaBB": [1, 2], "aCCa": [1, 2], "aCCaBB": [1, 2]} + ) + + result = df.filter(regex="BB") + exp = df[[x for x in df.columns if "BB" in x]] + tm.assert_frame_equal(result, exp) + + @pytest.mark.parametrize( + "name,expected", + [ + ("a", DataFrame({"a": [1, 2]})), + ("a", DataFrame({"a": [1, 2]})), + ("あ", DataFrame({"あ": [3, 4]})), + ], + ) + def test_filter_unicode(self, name, expected): + # GH13101 + df = DataFrame({"a": [1, 2], "あ": [3, 4]}) + + tm.assert_frame_equal(df.filter(like=name), expected) + tm.assert_frame_equal(df.filter(regex=name), expected) + + @pytest.mark.parametrize("name", ["a", "a"]) + def test_filter_bytestring(self, name): + # GH13101 + df = DataFrame({b"a": [1, 2], b"b": [3, 4]}) + expected = DataFrame({b"a": [1, 2]}) + + tm.assert_frame_equal(df.filter(like=name), expected) + tm.assert_frame_equal(df.filter(regex=name), expected) + + def test_filter_corner(self): + empty = DataFrame() + + result = empty.filter([]) + tm.assert_frame_equal(result, empty) + + result = empty.filter(like="foo") + tm.assert_frame_equal(result, empty) + + def test_filter_regex_non_string(self): + # GH#5798 trying to filter on non-string columns should drop, + # not raise + df = DataFrame(np.random.default_rng(2).random((3, 2)), columns=["STRING", 123]) + result = df.filter(regex="STRING") + expected = df[["STRING"]] + tm.assert_frame_equal(result, expected) + + def test_filter_keep_order(self): + # GH#54980 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + result = df.filter(items=["B", "A"]) + expected = df[["B", "A"]] + tm.assert_frame_equal(result, expected) + + def test_filter_different_dtype(self): + # GH#54980 + df = DataFrame({1: [1, 2, 3], 2: [4, 5, 6]}) + result = df.filter(items=["B", "A"]) + expected = df[[]] + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_and_last.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_and_last.py new file mode 100644 index 00000000..2e85edc7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_and_last.py @@ -0,0 +1,124 @@ +""" +Note: includes tests for `last` +""" +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + bdate_range, +) +import pandas._testing as tm + +deprecated_msg = "first is deprecated" +last_deprecated_msg = "last is deprecated" + + +class TestFirst: + def test_first_subset(self, frame_or_series): + ts = tm.makeTimeDataFrame(freq="12h") + ts = tm.get_obj(ts, frame_or_series) + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = ts.first("10d") + assert len(result) == 20 + + ts = tm.makeTimeDataFrame(freq="D") + ts = tm.get_obj(ts, frame_or_series) + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = ts.first("10d") + assert len(result) == 10 + + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = ts.first("3M") + expected = ts[:"3/31/2000"] + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = ts.first("21D") + expected = ts[:21] + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = ts[:0].first("3M") + tm.assert_equal(result, ts[:0]) + + def test_first_last_raises(self, frame_or_series): + # GH#20725 + obj = DataFrame([[1, 2, 3], [4, 5, 6]]) + obj = tm.get_obj(obj, frame_or_series) + + msg = "'first' only supports a DatetimeIndex index" + with tm.assert_produces_warning( + FutureWarning, match=deprecated_msg + ), pytest.raises( + TypeError, match=msg + ): # index is not a DatetimeIndex + obj.first("1D") + + msg = "'last' only supports a DatetimeIndex index" + with tm.assert_produces_warning( + FutureWarning, match=last_deprecated_msg + ), pytest.raises( + TypeError, match=msg + ): # index is not a DatetimeIndex + obj.last("1D") + + def test_last_subset(self, frame_or_series): + ts = tm.makeTimeDataFrame(freq="12h") + ts = tm.get_obj(ts, frame_or_series) + with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): + result = ts.last("10d") + assert len(result) == 20 + + ts = tm.makeTimeDataFrame(nper=30, freq="D") + ts = tm.get_obj(ts, frame_or_series) + with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): + result = ts.last("10d") + assert len(result) == 10 + + with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): + result = ts.last("21D") + expected = ts["2000-01-10":] + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): + result = ts.last("21D") + expected = ts[-21:] + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): + result = ts[:0].last("3M") + tm.assert_equal(result, ts[:0]) + + @pytest.mark.parametrize("start, periods", [("2010-03-31", 1), ("2010-03-30", 2)]) + def test_first_with_first_day_last_of_month(self, frame_or_series, start, periods): + # GH#29623 + x = frame_or_series([1] * 100, index=bdate_range(start, periods=100)) + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = x.first("1M") + expected = frame_or_series( + [1] * periods, index=bdate_range(start, periods=periods) + ) + tm.assert_equal(result, expected) + + def test_first_with_first_day_end_of_frq_n_greater_one(self, frame_or_series): + # GH#29623 + x = frame_or_series([1] * 100, index=bdate_range("2010-03-31", periods=100)) + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = x.first("2M") + expected = frame_or_series( + [1] * 23, index=bdate_range("2010-03-31", "2010-04-30") + ) + tm.assert_equal(result, expected) + + def test_empty_not_input(self): + # GH#51032 + df = DataFrame(index=pd.DatetimeIndex([])) + with tm.assert_produces_warning(FutureWarning, match=last_deprecated_msg): + result = df.last(offset=1) + + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = df.first(offset=1) + + tm.assert_frame_equal(df, result) + assert df is not result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_valid_index.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_valid_index.py new file mode 100644 index 00000000..a448768f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_first_valid_index.py @@ -0,0 +1,74 @@ +""" +Includes test for last_valid_index. +""" +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestFirstValidIndex: + def test_first_valid_index_single_nan(self, frame_or_series): + # GH#9752 Series/DataFrame should both return None, not raise + obj = frame_or_series([np.nan]) + + assert obj.first_valid_index() is None + assert obj.iloc[:0].first_valid_index() is None + + @pytest.mark.parametrize( + "empty", [DataFrame(), Series(dtype=object), Series([], index=[], dtype=object)] + ) + def test_first_valid_index_empty(self, empty): + # GH#12800 + assert empty.last_valid_index() is None + assert empty.first_valid_index() is None + + @pytest.mark.parametrize( + "data,idx,expected_first,expected_last", + [ + ({"A": [1, 2, 3]}, [1, 1, 2], 1, 2), + ({"A": [1, 2, 3]}, [1, 2, 2], 1, 2), + ({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"), + ({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2), + ({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2), + ({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2), + ], + ) + def test_first_last_valid_frame(self, data, idx, expected_first, expected_last): + # GH#21441 + df = DataFrame(data, index=idx) + assert expected_first == df.first_valid_index() + assert expected_last == df.last_valid_index() + + @pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex]) + def test_first_last_valid(self, index_func): + N = 30 + index = index_func(N) + mat = np.random.default_rng(2).standard_normal(N) + mat[:5] = np.nan + mat[-5:] = np.nan + + frame = DataFrame({"foo": mat}, index=index) + assert frame.first_valid_index() == frame.index[5] + assert frame.last_valid_index() == frame.index[-6] + + ser = frame["foo"] + assert ser.first_valid_index() == frame.index[5] + assert ser.last_valid_index() == frame.index[-6] + + @pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex]) + def test_first_last_valid_all_nan(self, index_func): + # GH#17400: no valid entries + index = index_func(30) + frame = DataFrame(np.nan, columns=["foo"], index=index) + + assert frame.last_valid_index() is None + assert frame.first_valid_index() is None + + ser = frame["foo"] + assert ser.first_valid_index() is None + assert ser.last_valid_index() is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_get_numeric_data.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_get_numeric_data.py new file mode 100644 index 00000000..ec1c7686 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_get_numeric_data.py @@ -0,0 +1,102 @@ +import numpy as np + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import IntervalArray + + +class TestGetNumericData: + def test_get_numeric_data_preserve_dtype(self): + # get the numeric data + obj = DataFrame({"A": [1, "2", 3.0]}) + result = obj._get_numeric_data() + expected = DataFrame(dtype=object, index=pd.RangeIndex(3), columns=[]) + tm.assert_frame_equal(result, expected) + + def test_get_numeric_data(self): + datetime64name = np.dtype("M8[s]").name + objectname = np.dtype(np.object_).name + + df = DataFrame( + {"a": 1.0, "b": 2, "c": "foo", "f": Timestamp("20010102")}, + index=np.arange(10), + ) + result = df.dtypes + expected = Series( + [ + np.dtype("float64"), + np.dtype("int64"), + np.dtype(objectname), + np.dtype(datetime64name), + ], + index=["a", "b", "c", "f"], + ) + tm.assert_series_equal(result, expected) + + df = DataFrame( + { + "a": 1.0, + "b": 2, + "c": "foo", + "d": np.array([1.0] * 10, dtype="float32"), + "e": np.array([1] * 10, dtype="int32"), + "f": np.array([1] * 10, dtype="int16"), + "g": Timestamp("20010102"), + }, + index=np.arange(10), + ) + + result = df._get_numeric_data() + expected = df.loc[:, ["a", "b", "d", "e", "f"]] + tm.assert_frame_equal(result, expected) + + only_obj = df.loc[:, ["c", "g"]] + result = only_obj._get_numeric_data() + expected = df.loc[:, []] + tm.assert_frame_equal(result, expected) + + df = DataFrame.from_dict({"a": [1, 2], "b": ["foo", "bar"], "c": [np.pi, np.e]}) + result = df._get_numeric_data() + expected = DataFrame.from_dict({"a": [1, 2], "c": [np.pi, np.e]}) + tm.assert_frame_equal(result, expected) + + df = result.copy() + result = df._get_numeric_data() + expected = df + tm.assert_frame_equal(result, expected) + + def test_get_numeric_data_mixed_dtype(self): + # numeric and object columns + + df = DataFrame( + { + "a": [1, 2, 3], + "b": [True, False, True], + "c": ["foo", "bar", "baz"], + "d": [None, None, None], + "e": [3.14, 0.577, 2.773], + } + ) + result = df._get_numeric_data() + tm.assert_index_equal(result.columns, Index(["a", "b", "e"])) + + def test_get_numeric_data_extension_dtype(self): + # GH#22290 + df = DataFrame( + { + "A": pd.array([-10, np.nan, 0, 10, 20, 30], dtype="Int64"), + "B": Categorical(list("abcabc")), + "C": pd.array([0, 1, 2, 3, np.nan, 5], dtype="UInt8"), + "D": IntervalArray.from_breaks(range(7)), + } + ) + result = df._get_numeric_data() + expected = df.loc[:, ["A", "C"]] + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_head_tail.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_head_tail.py new file mode 100644 index 00000000..9363c4d7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_head_tail.py @@ -0,0 +1,57 @@ +import numpy as np + +from pandas import DataFrame +import pandas._testing as tm + + +def test_head_tail_generic(index, frame_or_series): + # GH#5370 + + ndim = 2 if frame_or_series is DataFrame else 1 + shape = (len(index),) * ndim + vals = np.random.default_rng(2).standard_normal(shape) + obj = frame_or_series(vals, index=index) + + tm.assert_equal(obj.head(), obj.iloc[:5]) + tm.assert_equal(obj.tail(), obj.iloc[-5:]) + + # 0-len + tm.assert_equal(obj.head(0), obj.iloc[0:0]) + tm.assert_equal(obj.tail(0), obj.iloc[0:0]) + + # bounded + tm.assert_equal(obj.head(len(obj) + 1), obj) + tm.assert_equal(obj.tail(len(obj) + 1), obj) + + # neg index + tm.assert_equal(obj.head(-3), obj.head(len(index) - 3)) + tm.assert_equal(obj.tail(-3), obj.tail(len(index) - 3)) + + +def test_head_tail(float_frame): + tm.assert_frame_equal(float_frame.head(), float_frame[:5]) + tm.assert_frame_equal(float_frame.tail(), float_frame[-5:]) + + tm.assert_frame_equal(float_frame.head(0), float_frame[0:0]) + tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0]) + + tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1]) + tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:]) + tm.assert_frame_equal(float_frame.head(1), float_frame[:1]) + tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:]) + # with a float index + df = float_frame.copy() + df.index = np.arange(len(float_frame)) + 0.1 + tm.assert_frame_equal(df.head(), df.iloc[:5]) + tm.assert_frame_equal(df.tail(), df.iloc[-5:]) + tm.assert_frame_equal(df.head(0), df[0:0]) + tm.assert_frame_equal(df.tail(0), df[0:0]) + tm.assert_frame_equal(df.head(-1), df.iloc[:-1]) + tm.assert_frame_equal(df.tail(-1), df.iloc[1:]) + + +def test_head_tail_empty(): + # test empty dataframe + empty_df = DataFrame() + tm.assert_frame_equal(empty_df.tail(), empty_df) + tm.assert_frame_equal(empty_df.head(), empty_df) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_infer_objects.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_infer_objects.py new file mode 100644 index 00000000..a824a615 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_infer_objects.py @@ -0,0 +1,42 @@ +from datetime import datetime + +from pandas import DataFrame +import pandas._testing as tm + + +class TestInferObjects: + def test_infer_objects(self): + # GH#11221 + df = DataFrame( + { + "a": ["a", 1, 2, 3], + "b": ["b", 2.0, 3.0, 4.1], + "c": [ + "c", + datetime(2016, 1, 1), + datetime(2016, 1, 2), + datetime(2016, 1, 3), + ], + "d": [1, 2, 3, "d"], + }, + columns=["a", "b", "c", "d"], + ) + df = df.iloc[1:].infer_objects() + + assert df["a"].dtype == "int64" + assert df["b"].dtype == "float64" + assert df["c"].dtype == "M8[ns]" + assert df["d"].dtype == "object" + + expected = DataFrame( + { + "a": [1, 2, 3], + "b": [2.0, 3.0, 4.1], + "c": [datetime(2016, 1, 1), datetime(2016, 1, 2), datetime(2016, 1, 3)], + "d": [2, 3, "d"], + }, + columns=["a", "b", "c", "d"], + ) + # reconstruct frame to verify inference is same + result = df.reset_index(drop=True) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_interpolate.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_interpolate.py new file mode 100644 index 00000000..67aa07dd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_interpolate.py @@ -0,0 +1,505 @@ +import numpy as np +import pytest + +from pandas.errors import ChainedAssignmentError +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + NaT, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameInterpolate: + def test_interpolate_complex(self): + # GH#53635 + ser = Series([complex("1+1j"), float("nan"), complex("2+2j")]) + assert ser.dtype.kind == "c" + + res = ser.interpolate() + expected = Series([ser[0], ser[0] * 1.5, ser[2]]) + tm.assert_series_equal(res, expected) + + df = ser.to_frame() + res = df.interpolate() + expected = expected.to_frame() + tm.assert_frame_equal(res, expected) + + def test_interpolate_datetimelike_values(self, frame_or_series): + # GH#11312, GH#51005 + orig = Series(date_range("2012-01-01", periods=5)) + ser = orig.copy() + ser[2] = NaT + + res = frame_or_series(ser).interpolate() + expected = frame_or_series(orig) + tm.assert_equal(res, expected) + + # datetime64tz cast + ser_tz = ser.dt.tz_localize("US/Pacific") + res_tz = frame_or_series(ser_tz).interpolate() + expected_tz = frame_or_series(orig.dt.tz_localize("US/Pacific")) + tm.assert_equal(res_tz, expected_tz) + + # timedelta64 cast + ser_td = ser - ser[0] + res_td = frame_or_series(ser_td).interpolate() + expected_td = frame_or_series(orig - orig[0]) + tm.assert_equal(res_td, expected_td) + + def test_interpolate_inplace(self, frame_or_series, using_array_manager, request): + # GH#44749 + if using_array_manager and frame_or_series is DataFrame: + mark = pytest.mark.xfail(reason=".values-based in-place check is invalid") + request.node.add_marker(mark) + + obj = frame_or_series([1, np.nan, 2]) + orig = obj.values + + obj.interpolate(inplace=True) + expected = frame_or_series([1, 1.5, 2]) + tm.assert_equal(obj, expected) + + # check we operated *actually* inplace + assert np.shares_memory(orig, obj.values) + assert orig.squeeze()[1] == 1.5 + + def test_interp_basic(self, using_copy_on_write): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + expected = DataFrame( + { + "A": [1.0, 2.0, 3.0, 4.0], + "B": [1.0, 4.0, 9.0, 9.0], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + msg = "DataFrame.interpolate with object dtype" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate() + tm.assert_frame_equal(result, expected) + + # check we didn't operate inplace GH#45791 + cvalues = df["C"]._values + dvalues = df["D"].values + if using_copy_on_write: + assert np.shares_memory(cvalues, result["C"]._values) + assert np.shares_memory(dvalues, result["D"]._values) + else: + assert not np.shares_memory(cvalues, result["C"]._values) + assert not np.shares_memory(dvalues, result["D"]._values) + + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.interpolate(inplace=True) + assert res is None + tm.assert_frame_equal(df, expected) + + # check we DID operate inplace + assert np.shares_memory(df["C"]._values, cvalues) + assert np.shares_memory(df["D"]._values, dvalues) + + def test_interp_basic_with_non_range_index(self): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + + msg = "DataFrame.interpolate with object dtype" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.set_index("C").interpolate() + expected = df.set_index("C") + expected.loc[3, "A"] = 3 + expected.loc[5, "B"] = 9 + tm.assert_frame_equal(result, expected) + + def test_interp_empty(self): + # https://github.com/pandas-dev/pandas/issues/35598 + df = DataFrame() + result = df.interpolate() + assert result is not df + expected = df + tm.assert_frame_equal(result, expected) + + def test_interp_bad_method(self): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + } + ) + msg = ( + r"method must be one of \['linear', 'time', 'index', 'values', " + r"'nearest', 'zero', 'slinear', 'quadratic', 'cubic', " + r"'barycentric', 'krogh', 'spline', 'polynomial', " + r"'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', " + r"'cubicspline'\]. Got 'not_a_method' instead." + ) + with pytest.raises(ValueError, match=msg): + df.interpolate(method="not_a_method") + + def test_interp_combo(self): + df = DataFrame( + { + "A": [1.0, 2.0, np.nan, 4.0], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + + result = df["A"].interpolate() + expected = Series([1.0, 2.0, 3.0, 4.0], name="A") + tm.assert_series_equal(result, expected) + + msg = "The 'downcast' keyword in Series.interpolate is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df["A"].interpolate(downcast="infer") + expected = Series([1, 2, 3, 4], name="A") + tm.assert_series_equal(result, expected) + + def test_inerpolate_invalid_downcast(self): + # GH#53103 + df = DataFrame( + { + "A": [1.0, 2.0, np.nan, 4.0], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + + msg = "downcast must be either None or 'infer'" + msg2 = "The 'downcast' keyword in DataFrame.interpolate is deprecated" + msg3 = "The 'downcast' keyword in Series.interpolate is deprecated" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + df.interpolate(downcast="int64") + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg3): + df["A"].interpolate(downcast="int64") + + def test_interp_nan_idx(self): + df = DataFrame({"A": [1, 2, np.nan, 4], "B": [np.nan, 2, 3, 4]}) + df = df.set_index("A") + msg = ( + "Interpolation with NaNs in the index has not been implemented. " + "Try filling those NaNs before interpolating." + ) + with pytest.raises(NotImplementedError, match=msg): + df.interpolate(method="values") + + def test_interp_various(self): + pytest.importorskip("scipy") + df = DataFrame( + {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]} + ) + df = df.set_index("C") + expected = df.copy() + result = df.interpolate(method="polynomial", order=1) + + expected.loc[3, "A"] = 2.66666667 + expected.loc[13, "A"] = 5.76923076 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="cubic") + # GH #15662. + expected.loc[3, "A"] = 2.81547781 + expected.loc[13, "A"] = 5.52964175 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="nearest") + expected.loc[3, "A"] = 2 + expected.loc[13, "A"] = 5 + tm.assert_frame_equal(result, expected, check_dtype=False) + + result = df.interpolate(method="quadratic") + expected.loc[3, "A"] = 2.82150771 + expected.loc[13, "A"] = 6.12648668 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="slinear") + expected.loc[3, "A"] = 2.66666667 + expected.loc[13, "A"] = 5.76923077 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="zero") + expected.loc[3, "A"] = 2.0 + expected.loc[13, "A"] = 5 + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_interp_alt_scipy(self): + pytest.importorskip("scipy") + df = DataFrame( + {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]} + ) + result = df.interpolate(method="barycentric") + expected = df.copy() + expected.loc[2, "A"] = 3 + expected.loc[5, "A"] = 6 + tm.assert_frame_equal(result, expected) + + msg = "The 'downcast' keyword in DataFrame.interpolate is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(method="barycentric", downcast="infer") + tm.assert_frame_equal(result, expected.astype(np.int64)) + + result = df.interpolate(method="krogh") + expectedk = df.copy() + expectedk["A"] = expected["A"] + tm.assert_frame_equal(result, expectedk) + + result = df.interpolate(method="pchip") + expected.loc[2, "A"] = 3 + expected.loc[5, "A"] = 6.0 + + tm.assert_frame_equal(result, expected) + + def test_interp_rowwise(self): + df = DataFrame( + { + 0: [1, 2, np.nan, 4], + 1: [2, 3, 4, np.nan], + 2: [np.nan, 4, 5, 6], + 3: [4, np.nan, 6, 7], + 4: [1, 2, 3, 4], + } + ) + result = df.interpolate(axis=1) + expected = df.copy() + expected.loc[3, 1] = 5 + expected.loc[0, 2] = 3 + expected.loc[1, 3] = 3 + expected[4] = expected[4].astype(np.float64) + tm.assert_frame_equal(result, expected) + + result = df.interpolate(axis=1, method="values") + tm.assert_frame_equal(result, expected) + + result = df.interpolate(axis=0) + expected = df.interpolate() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "axis_name, axis_number", + [ + pytest.param("rows", 0, id="rows_0"), + pytest.param("index", 0, id="index_0"), + pytest.param("columns", 1, id="columns_1"), + ], + ) + def test_interp_axis_names(self, axis_name, axis_number): + # GH 29132: test axis names + data = {0: [0, np.nan, 6], 1: [1, np.nan, 7], 2: [2, 5, 8]} + + df = DataFrame(data, dtype=np.float64) + result = df.interpolate(axis=axis_name, method="linear") + expected = df.interpolate(axis=axis_number, method="linear") + tm.assert_frame_equal(result, expected) + + def test_rowwise_alt(self): + df = DataFrame( + { + 0: [0, 0.5, 1.0, np.nan, 4, 8, np.nan, np.nan, 64], + 1: [1, 2, 3, 4, 3, 2, 1, 0, -1], + } + ) + df.interpolate(axis=0) + # TODO: assert something? + + @pytest.mark.parametrize( + "check_scipy", [False, pytest.param(True, marks=td.skip_if_no_scipy)] + ) + def test_interp_leading_nans(self, check_scipy): + df = DataFrame( + {"A": [np.nan, np.nan, 0.5, 0.25, 0], "B": [np.nan, -3, -3.5, np.nan, -4]} + ) + result = df.interpolate() + expected = df.copy() + expected.loc[3, "B"] = -3.75 + tm.assert_frame_equal(result, expected) + + if check_scipy: + result = df.interpolate(method="polynomial", order=1) + tm.assert_frame_equal(result, expected) + + def test_interp_raise_on_only_mixed(self, axis): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": ["a", "b", "c", "d"], + "C": [np.nan, 2, 5, 7], + "D": [np.nan, np.nan, 9, 9], + "E": [1, 2, 3, 4], + } + ) + msg = ( + "Cannot interpolate with all object-dtype columns " + "in the DataFrame. Try setting at least one " + "column to a numeric dtype." + ) + with pytest.raises(TypeError, match=msg): + df.astype("object").interpolate(axis=axis) + + def test_interp_raise_on_all_object_dtype(self): + # GH 22985 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, dtype="object") + msg = ( + "Cannot interpolate with all object-dtype columns " + "in the DataFrame. Try setting at least one " + "column to a numeric dtype." + ) + with pytest.raises(TypeError, match=msg): + df.interpolate() + + def test_interp_inplace(self, using_copy_on_write): + df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]}) + expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]}) + expected_cow = df.copy() + result = df.copy() + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + return_value = result["a"].interpolate(inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected_cow) + else: + return_value = result["a"].interpolate(inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + result = df.copy() + msg = "The 'downcast' keyword in Series.interpolate is deprecated" + + if using_copy_on_write: + with tm.assert_produces_warning( + (FutureWarning, ChainedAssignmentError), match=msg + ): + return_value = result["a"].interpolate(inplace=True, downcast="infer") + assert return_value is None + tm.assert_frame_equal(result, expected_cow) + else: + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = result["a"].interpolate(inplace=True, downcast="infer") + assert return_value is None + tm.assert_frame_equal(result, expected.astype("int64")) + + def test_interp_inplace_row(self): + # GH 10395 + result = DataFrame( + {"a": [1.0, 2.0, 3.0, 4.0], "b": [np.nan, 2.0, 3.0, 4.0], "c": [3, 2, 2, 2]} + ) + expected = result.interpolate(method="linear", axis=1, inplace=False) + return_value = result.interpolate(method="linear", axis=1, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_interp_ignore_all_good(self): + # GH + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 2, 3, 4], + "C": [1.0, 2.0, np.nan, 4.0], + "D": [1.0, 2.0, 3.0, 4.0], + } + ) + expected = DataFrame( + { + "A": np.array([1, 2, 3, 4], dtype="float64"), + "B": np.array([1, 2, 3, 4], dtype="int64"), + "C": np.array([1.0, 2.0, 3, 4.0], dtype="float64"), + "D": np.array([1.0, 2.0, 3.0, 4.0], dtype="float64"), + } + ) + + msg = "The 'downcast' keyword in DataFrame.interpolate is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(downcast=None) + tm.assert_frame_equal(result, expected) + + # all good + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df[["B", "D"]].interpolate(downcast=None) + tm.assert_frame_equal(result, df[["B", "D"]]) + + def test_interp_time_inplace_axis(self): + # GH 9687 + periods = 5 + idx = date_range(start="2014-01-01", periods=periods) + data = np.random.default_rng(2).random((periods, periods)) + data[data < 0.5] = np.nan + expected = DataFrame(index=idx, columns=idx, data=data) + + result = expected.interpolate(axis=0, method="time") + return_value = expected.interpolate(axis=0, method="time", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("axis_name, axis_number", [("index", 0), ("columns", 1)]) + def test_interp_string_axis(self, axis_name, axis_number): + # https://github.com/pandas-dev/pandas/issues/25190 + x = np.linspace(0, 100, 1000) + y = np.sin(x) + df = DataFrame( + data=np.tile(y, (10, 1)), index=np.arange(10), columns=x + ).reindex(columns=x * 1.005) + result = df.interpolate(method="linear", axis=axis_name) + expected = df.interpolate(method="linear", axis=axis_number) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("multiblock", [True, False]) + @pytest.mark.parametrize("method", ["ffill", "bfill", "pad"]) + def test_interp_fillna_methods( + self, request, axis, multiblock, method, using_array_manager + ): + # GH 12918 + if using_array_manager and axis in (1, "columns"): + # TODO(ArrayManager) support axis=1 + td.mark_array_manager_not_yet_implemented(request) + + df = DataFrame( + { + "A": [1.0, 2.0, 3.0, 4.0, np.nan, 5.0], + "B": [2.0, 4.0, 6.0, np.nan, 8.0, 10.0], + "C": [3.0, 6.0, 9.0, np.nan, np.nan, 30.0], + } + ) + if multiblock: + df["D"] = np.nan + df["E"] = 1.0 + + method2 = method if method != "pad" else "ffill" + expected = getattr(df, method2)(axis=axis) + msg = f"DataFrame.interpolate with method={method} is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(method=method, axis=axis) + tm.assert_frame_equal(result, expected) + + def test_interpolate_empty_df(self): + # GH#53199 + df = DataFrame() + expected = df.copy() + result = df.interpolate(inplace=True) + assert result is None + tm.assert_frame_equal(df, expected) + + def test_interpolate_ea_raise(self): + # GH#55347 + df = DataFrame({"a": [1, None, 2]}, dtype="Int64") + with pytest.raises(NotImplementedError, match="does not implement"): + df.interpolate() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_is_homogeneous_dtype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_is_homogeneous_dtype.py new file mode 100644 index 00000000..a5f285d3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_is_homogeneous_dtype.py @@ -0,0 +1,57 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + Categorical, + DataFrame, +) + +# _is_homogeneous_type always returns True for ArrayManager +pytestmark = td.skip_array_manager_invalid_test + + +@pytest.mark.parametrize( + "data, expected", + [ + # empty + (DataFrame(), True), + # multi-same + (DataFrame({"A": [1, 2], "B": [1, 2]}), True), + # multi-object + ( + DataFrame( + { + "A": np.array([1, 2], dtype=object), + "B": np.array(["a", "b"], dtype=object), + } + ), + True, + ), + # multi-extension + ( + DataFrame({"A": Categorical(["a", "b"]), "B": Categorical(["a", "b"])}), + True, + ), + # differ types + (DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False), + # differ sizes + ( + DataFrame( + { + "A": np.array([1, 2], dtype=np.int32), + "B": np.array([1, 2], dtype=np.int64), + } + ), + False, + ), + # multi-extension differ + ( + DataFrame({"A": Categorical(["a", "b"]), "B": Categorical(["b", "c"])}), + False, + ), + ], +) +def test_is_homogeneous_type(data, expected): + assert data._is_homogeneous_type is expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isetitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isetitem.py new file mode 100644 index 00000000..69f394af --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isetitem.py @@ -0,0 +1,50 @@ +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFrameSetItem: + def test_isetitem_ea_df(self): + # GH#49922 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + rhs = DataFrame([[11, 12], [13, 14]], dtype="Int64") + + df.isetitem([0, 1], rhs) + expected = DataFrame( + { + 0: Series([11, 13], dtype="Int64"), + 1: Series([12, 14], dtype="Int64"), + 2: [3, 6], + } + ) + tm.assert_frame_equal(df, expected) + + def test_isetitem_ea_df_scalar_indexer(self): + # GH#49922 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + rhs = DataFrame([[11], [13]], dtype="Int64") + + df.isetitem(2, rhs) + expected = DataFrame( + { + 0: [1, 4], + 1: [2, 5], + 2: Series([11, 13], dtype="Int64"), + } + ) + tm.assert_frame_equal(df, expected) + + def test_isetitem_dimension_mismatch(self): + # GH#51701 + df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}) + value = df.copy() + with pytest.raises(ValueError, match="Got 2 positions but value has 3 columns"): + df.isetitem([1, 2], value) + + value = df.copy() + with pytest.raises(ValueError, match="Got 2 positions but value has 1 columns"): + df.isetitem([1, 2], value[["a"]]) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isin.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isin.py new file mode 100644 index 00000000..b4511aad --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_isin.py @@ -0,0 +1,227 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm + + +class TestDataFrameIsIn: + def test_isin(self): + # GH#4211 + df = DataFrame( + { + "vals": [1, 2, 3, 4], + "ids": ["a", "b", "f", "n"], + "ids2": ["a", "n", "c", "n"], + }, + index=["foo", "bar", "baz", "qux"], + ) + other = ["a", "b", "c"] + + result = df.isin(other) + expected = DataFrame([df.loc[s].isin(other) for s in df.index]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) + def test_isin_empty(self, empty): + # GH#16991 + df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]}) + expected = DataFrame(False, df.index, df.columns) + + result = df.isin(empty) + tm.assert_frame_equal(result, expected) + + def test_isin_dict(self): + df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]}) + d = {"A": ["a"]} + + expected = DataFrame(False, df.index, df.columns) + expected.loc[0, "A"] = True + + result = df.isin(d) + tm.assert_frame_equal(result, expected) + + # non unique columns + df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]}) + df.columns = ["A", "A"] + expected = DataFrame(False, df.index, df.columns) + expected.loc[0, "A"] = True + result = df.isin(d) + tm.assert_frame_equal(result, expected) + + def test_isin_with_string_scalar(self): + # GH#4763 + df = DataFrame( + { + "vals": [1, 2, 3, 4], + "ids": ["a", "b", "f", "n"], + "ids2": ["a", "n", "c", "n"], + }, + index=["foo", "bar", "baz", "qux"], + ) + msg = ( + r"only list-like or dict-like objects are allowed " + r"to be passed to DataFrame.isin\(\), you passed a 'str'" + ) + with pytest.raises(TypeError, match=msg): + df.isin("a") + + with pytest.raises(TypeError, match=msg): + df.isin("aaa") + + def test_isin_df(self): + df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}) + df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]}) + expected = DataFrame(False, df1.index, df1.columns) + result = df1.isin(df2) + expected.loc[[1, 3], "A"] = True + expected.loc[[0, 2], "B"] = True + tm.assert_frame_equal(result, expected) + + # partial overlapping columns + df2.columns = ["A", "C"] + result = df1.isin(df2) + expected["B"] = False + tm.assert_frame_equal(result, expected) + + def test_isin_tuples(self): + # GH#16394 + df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]}) + df["C"] = list(zip(df["A"], df["B"])) + result = df["C"].isin([(1, "a")]) + tm.assert_series_equal(result, Series([True, False, False], name="C")) + + def test_isin_df_dupe_values(self): + df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}) + # just cols duped + df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"]) + msg = r"cannot compute isin with a duplicate axis\." + with pytest.raises(ValueError, match=msg): + df1.isin(df2) + + # just index duped + df2 = DataFrame( + [[0, 2], [12, 4], [2, np.nan], [4, 5]], + columns=["A", "B"], + index=[0, 0, 1, 1], + ) + with pytest.raises(ValueError, match=msg): + df1.isin(df2) + + # cols and index: + df2.columns = ["B", "B"] + with pytest.raises(ValueError, match=msg): + df1.isin(df2) + + def test_isin_dupe_self(self): + other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]}) + df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"]) + result = df.isin(other) + expected = DataFrame(False, index=df.index, columns=df.columns) + expected.loc[0] = True + expected.iloc[1, 1] = True + tm.assert_frame_equal(result, expected) + + def test_isin_against_series(self): + df = DataFrame( + {"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"] + ) + s = Series([1, 3, 11, 4], index=["a", "b", "c", "d"]) + expected = DataFrame(False, index=df.index, columns=df.columns) + expected.loc["a", "A"] = True + expected.loc["d"] = True + result = df.isin(s) + tm.assert_frame_equal(result, expected) + + def test_isin_multiIndex(self): + idx = MultiIndex.from_tuples( + [ + (0, "a", "foo"), + (0, "a", "bar"), + (0, "b", "bar"), + (0, "b", "baz"), + (2, "a", "foo"), + (2, "a", "bar"), + (2, "c", "bar"), + (2, "c", "baz"), + (1, "b", "foo"), + (1, "b", "bar"), + (1, "c", "bar"), + (1, "c", "baz"), + ] + ) + df1 = DataFrame({"A": np.ones(12), "B": np.zeros(12)}, index=idx) + df2 = DataFrame( + { + "A": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + "B": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1], + } + ) + # against regular index + expected = DataFrame(False, index=df1.index, columns=df1.columns) + result = df1.isin(df2) + tm.assert_frame_equal(result, expected) + + df2.index = idx + expected = df2.values.astype(bool) + expected[:, 1] = ~expected[:, 1] + expected = DataFrame(expected, columns=["A", "B"], index=idx) + + result = df1.isin(df2) + tm.assert_frame_equal(result, expected) + + def test_isin_empty_datetimelike(self): + # GH#15473 + df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])}) + df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]}) + df2 = DataFrame({"date": []}) + df3 = DataFrame() + + expected = DataFrame({"date": [False, False]}) + + result = df1_ts.isin(df2) + tm.assert_frame_equal(result, expected) + result = df1_ts.isin(df3) + tm.assert_frame_equal(result, expected) + + result = df1_td.isin(df2) + tm.assert_frame_equal(result, expected) + result = df1_td.isin(df3) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + DataFrame({"a": [1, 2, 3]}, dtype="category"), + Series([1, 2, 3], dtype="category"), + ], + ) + def test_isin_category_frame(self, values): + # GH#34256 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = DataFrame({"a": [True, True, True], "b": [False, False, False]}) + + result = df.isin(values) + tm.assert_frame_equal(result, expected) + + def test_isin_read_only(self): + # https://github.com/pandas-dev/pandas/issues/37174 + arr = np.array([1, 2, 3]) + arr.setflags(write=False) + df = DataFrame([1, 2, 3]) + result = df.isin(arr) + expected = DataFrame([True, True, True]) + tm.assert_frame_equal(result, expected) + + def test_isin_not_lossy(self): + # GH 53514 + val = 1666880195890293744 + df = DataFrame({"a": [val], "b": [1.0]}) + result = df.isin([val]) + expected = DataFrame({"a": [True], "b": [False]}) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_iterrows.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_iterrows.py new file mode 100644 index 00000000..0bd0bed7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_iterrows.py @@ -0,0 +1,16 @@ +from pandas import ( + DataFrame, + Timedelta, +) + + +def test_no_overflow_of_freq_and_time_in_dataframe(): + # GH 35665 + df = DataFrame( + { + "some_string": ["2222Y3"], + "time": [Timedelta("0 days 00:00:00.990000")], + } + ) + for _, row in df.iterrows(): + assert row.dtype == "object" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_join.py new file mode 100644 index 00000000..3b6b8605 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_join.py @@ -0,0 +1,576 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas.errors import MergeError + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + date_range, + period_range, +) +import pandas._testing as tm +from pandas.core.reshape.concat import concat + + +@pytest.fixture +def frame_with_period_index(): + return DataFrame( + data=np.arange(20).reshape(4, 5), + columns=list("abcde"), + index=period_range(start="2000", freq="A", periods=4), + ) + + +@pytest.fixture +def left(): + return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0]) + + +@pytest.fixture +def right(): + return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2]) + + +@pytest.fixture +def left_no_dup(): + return DataFrame( + {"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]}, + index=range(4), + ) + + +@pytest.fixture +def right_no_dup(): + return DataFrame( + { + "a": ["a", "b", "c", "d", "e"], + "c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"], + }, + index=range(5), + ).set_index("a") + + +@pytest.fixture +def left_w_dups(left_no_dup): + return concat( + [left_no_dup, DataFrame({"a": ["a"], "b": ["cow"]}, index=[3])], sort=True + ) + + +@pytest.fixture +def right_w_dups(right_no_dup): + return concat( + [right_no_dup, DataFrame({"a": ["e"], "c": ["moo"]}, index=[3])] + ).set_index("a") + + +@pytest.mark.parametrize( + "how, sort, expected", + [ + ("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])), + ("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])), + ( + "left", + False, + DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]), + ), + ( + "left", + True, + DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]), + ), + ( + "right", + False, + DataFrame({"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2]), + ), + ( + "right", + True, + DataFrame({"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3]), + ), + ( + "outer", + False, + DataFrame( + {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]}, + index=[0, 1, 2, 3], + ), + ), + ( + "outer", + True, + DataFrame( + {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]}, + index=[0, 1, 2, 3], + ), + ), + ], +) +def test_join(left, right, how, sort, expected): + result = left.join(right, how=how, sort=sort, validate="1:1") + tm.assert_frame_equal(result, expected) + + +def test_suffix_on_list_join(): + first = DataFrame({"key": [1, 2, 3, 4, 5]}) + second = DataFrame({"key": [1, 8, 3, 2, 5], "v1": [1, 2, 3, 4, 5]}) + third = DataFrame({"keys": [5, 2, 3, 4, 1], "v2": [1, 2, 3, 4, 5]}) + + # check proper errors are raised + msg = "Suffixes not supported when joining multiple DataFrames" + with pytest.raises(ValueError, match=msg): + first.join([second], lsuffix="y") + with pytest.raises(ValueError, match=msg): + first.join([second, third], rsuffix="x") + with pytest.raises(ValueError, match=msg): + first.join([second, third], lsuffix="y", rsuffix="x") + with pytest.raises(ValueError, match="Indexes have overlapping values"): + first.join([second, third]) + + # no errors should be raised + arr_joined = first.join([third]) + norm_joined = first.join(third) + tm.assert_frame_equal(arr_joined, norm_joined) + + +def test_join_invalid_validate(left_no_dup, right_no_dup): + # GH 46622 + # Check invalid arguments + msg = ( + '"invalid" is not a valid argument. ' + "Valid arguments are:\n" + '- "1:1"\n' + '- "1:m"\n' + '- "m:1"\n' + '- "m:m"\n' + '- "one_to_one"\n' + '- "one_to_many"\n' + '- "many_to_one"\n' + '- "many_to_many"' + ) + with pytest.raises(ValueError, match=msg): + left_no_dup.merge(right_no_dup, on="a", validate="invalid") + + +@pytest.mark.parametrize("dtype", ["object", "string[pyarrow]"]) +def test_join_on_single_col_dup_on_right(left_no_dup, right_w_dups, dtype): + # GH 46622 + # Dups on right allowed by one_to_many constraint + if dtype == "string[pyarrow]": + pytest.importorskip("pyarrow") + left_no_dup = left_no_dup.astype(dtype) + right_w_dups.index = right_w_dups.index.astype(dtype) + left_no_dup.join( + right_w_dups, + on="a", + validate="one_to_many", + ) + + # Dups on right not allowed by one_to_one constraint + msg = "Merge keys are not unique in right dataset; not a one-to-one merge" + with pytest.raises(MergeError, match=msg): + left_no_dup.join( + right_w_dups, + on="a", + validate="one_to_one", + ) + + +def test_join_on_single_col_dup_on_left(left_w_dups, right_no_dup): + # GH 46622 + # Dups on left allowed by many_to_one constraint + left_w_dups.join( + right_no_dup, + on="a", + validate="many_to_one", + ) + + # Dups on left not allowed by one_to_one constraint + msg = "Merge keys are not unique in left dataset; not a one-to-one merge" + with pytest.raises(MergeError, match=msg): + left_w_dups.join( + right_no_dup, + on="a", + validate="one_to_one", + ) + + +def test_join_on_single_col_dup_on_both(left_w_dups, right_w_dups): + # GH 46622 + # Dups on both allowed by many_to_many constraint + left_w_dups.join(right_w_dups, on="a", validate="many_to_many") + + # Dups on both not allowed by many_to_one constraint + msg = "Merge keys are not unique in right dataset; not a many-to-one merge" + with pytest.raises(MergeError, match=msg): + left_w_dups.join( + right_w_dups, + on="a", + validate="many_to_one", + ) + + # Dups on both not allowed by one_to_many constraint + msg = "Merge keys are not unique in left dataset; not a one-to-many merge" + with pytest.raises(MergeError, match=msg): + left_w_dups.join( + right_w_dups, + on="a", + validate="one_to_many", + ) + + +def test_join_on_multi_col_check_dup(): + # GH 46622 + # Two column join, dups in both, but jointly no dups + left = DataFrame( + { + "a": ["a", "a", "b", "b"], + "b": [0, 1, 0, 1], + "c": ["cat", "dog", "weasel", "horse"], + }, + index=range(4), + ).set_index(["a", "b"]) + + right = DataFrame( + { + "a": ["a", "a", "b"], + "b": [0, 1, 0], + "d": ["meow", "bark", "um... weasel noise?"], + }, + index=range(3), + ).set_index(["a", "b"]) + + expected_multi = DataFrame( + { + "a": ["a", "a", "b"], + "b": [0, 1, 0], + "c": ["cat", "dog", "weasel"], + "d": ["meow", "bark", "um... weasel noise?"], + }, + index=range(3), + ).set_index(["a", "b"]) + + # Jointly no dups allowed by one_to_one constraint + result = left.join(right, how="inner", validate="1:1") + tm.assert_frame_equal(result, expected_multi) + + +def test_join_index(float_frame): + # left / right + + f = float_frame.loc[float_frame.index[:10], ["A", "B"]] + f2 = float_frame.loc[float_frame.index[5:], ["C", "D"]].iloc[::-1] + + joined = f.join(f2) + tm.assert_index_equal(f.index, joined.index) + expected_columns = Index(["A", "B", "C", "D"]) + tm.assert_index_equal(joined.columns, expected_columns) + + joined = f.join(f2, how="left") + tm.assert_index_equal(joined.index, f.index) + tm.assert_index_equal(joined.columns, expected_columns) + + joined = f.join(f2, how="right") + tm.assert_index_equal(joined.index, f2.index) + tm.assert_index_equal(joined.columns, expected_columns) + + # inner + + joined = f.join(f2, how="inner") + tm.assert_index_equal(joined.index, f.index[5:10]) + tm.assert_index_equal(joined.columns, expected_columns) + + # outer + + joined = f.join(f2, how="outer") + tm.assert_index_equal(joined.index, float_frame.index.sort_values()) + tm.assert_index_equal(joined.columns, expected_columns) + + with pytest.raises(ValueError, match="join method"): + f.join(f2, how="foo") + + # corner case - overlapping columns + msg = "columns overlap but no suffix" + for how in ("outer", "left", "inner"): + with pytest.raises(ValueError, match=msg): + float_frame.join(float_frame, how=how) + + +def test_join_index_more(float_frame): + af = float_frame.loc[:, ["A", "B"]] + bf = float_frame.loc[::2, ["C", "D"]] + + expected = af.copy() + expected["C"] = float_frame["C"][::2] + expected["D"] = float_frame["D"][::2] + + result = af.join(bf) + tm.assert_frame_equal(result, expected) + + result = af.join(bf, how="right") + tm.assert_frame_equal(result, expected[::2]) + + result = bf.join(af, how="right") + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + +def test_join_index_series(float_frame): + df = float_frame.copy() + ser = df.pop(float_frame.columns[-1]) + joined = df.join(ser) + + tm.assert_frame_equal(joined, float_frame) + + ser.name = None + with pytest.raises(ValueError, match="must have a name"): + df.join(ser) + + +def test_join_overlap(float_frame): + df1 = float_frame.loc[:, ["A", "B", "C"]] + df2 = float_frame.loc[:, ["B", "C", "D"]] + + joined = df1.join(df2, lsuffix="_df1", rsuffix="_df2") + df1_suf = df1.loc[:, ["B", "C"]].add_suffix("_df1") + df2_suf = df2.loc[:, ["B", "C"]].add_suffix("_df2") + + no_overlap = float_frame.loc[:, ["A", "D"]] + expected = df1_suf.join(df2_suf).join(no_overlap) + + # column order not necessarily sorted + tm.assert_frame_equal(joined, expected.loc[:, joined.columns]) + + +def test_join_period_index(frame_with_period_index): + other = frame_with_period_index.rename(columns=lambda key: f"{key}{key}") + + joined_values = np.concatenate([frame_with_period_index.values] * 2, axis=1) + + joined_cols = frame_with_period_index.columns.append(other.columns) + + joined = frame_with_period_index.join(other) + expected = DataFrame( + data=joined_values, columns=joined_cols, index=frame_with_period_index.index + ) + + tm.assert_frame_equal(joined, expected) + + +def test_join_left_sequence_non_unique_index(): + # https://github.com/pandas-dev/pandas/issues/19607 + df1 = DataFrame({"a": [0, 10, 20]}, index=[1, 2, 3]) + df2 = DataFrame({"b": [100, 200, 300]}, index=[4, 3, 2]) + df3 = DataFrame({"c": [400, 500, 600]}, index=[2, 2, 4]) + + joined = df1.join([df2, df3], how="left") + + expected = DataFrame( + { + "a": [0, 10, 10, 20], + "b": [np.nan, 300, 300, 200], + "c": [np.nan, 400, 500, np.nan], + }, + index=[1, 2, 2, 3], + ) + + tm.assert_frame_equal(joined, expected) + + +def test_join_list_series(float_frame): + # GH#46850 + # Join a DataFrame with a list containing both a Series and a DataFrame + left = float_frame.A.to_frame() + right = [float_frame.B, float_frame[["C", "D"]]] + result = left.join(right) + tm.assert_frame_equal(result, float_frame) + + +@pytest.mark.parametrize("sort_kw", [True, False]) +def test_suppress_future_warning_with_sort_kw(sort_kw): + a = DataFrame({"col1": [1, 2]}, index=["c", "a"]) + + b = DataFrame({"col2": [4, 5]}, index=["b", "a"]) + + c = DataFrame({"col3": [7, 8]}, index=["a", "b"]) + + expected = DataFrame( + { + "col1": {"a": 2.0, "b": float("nan"), "c": 1.0}, + "col2": {"a": 5.0, "b": 4.0, "c": float("nan")}, + "col3": {"a": 7.0, "b": 8.0, "c": float("nan")}, + } + ) + if sort_kw is False: + expected = expected.reindex(index=["c", "a", "b"]) + + with tm.assert_produces_warning(None): + result = a.join([b, c], how="outer", sort=sort_kw) + tm.assert_frame_equal(result, expected) + + +class TestDataFrameJoin: + def test_join(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + a = frame.loc[frame.index[:5], ["A"]] + b = frame.loc[frame.index[2:], ["B", "C"]] + + joined = a.join(b, how="outer").reindex(frame.index) + expected = frame.copy().values.copy() + expected[np.isnan(joined.values)] = np.nan + expected = DataFrame(expected, index=frame.index, columns=frame.columns) + + assert not np.isnan(joined.values).all() + + tm.assert_frame_equal(joined, expected) + + def test_join_segfault(self): + # GH#1532 + df1 = DataFrame({"a": [1, 1], "b": [1, 2], "x": [1, 2]}) + df2 = DataFrame({"a": [2, 2], "b": [1, 2], "y": [1, 2]}) + df1 = df1.set_index(["a", "b"]) + df2 = df2.set_index(["a", "b"]) + # it works! + for how in ["left", "right", "outer"]: + df1.join(df2, how=how) + + def test_join_str_datetime(self): + str_dates = ["20120209", "20120222"] + dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] + + A = DataFrame(str_dates, index=range(2), columns=["aa"]) + C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates) + + tst = A.join(C, on="aa") + + assert len(tst.columns) == 3 + + def test_join_multiindex_leftright(self): + # GH 10741 + df1 = DataFrame( + [ + ["a", "x", 0.471780], + ["a", "y", 0.774908], + ["a", "z", 0.563634], + ["b", "x", -0.353756], + ["b", "y", 0.368062], + ["b", "z", -1.721840], + ["c", "x", 1], + ["c", "y", 2], + ["c", "z", 3], + ], + columns=["first", "second", "value1"], + ).set_index(["first", "second"]) + + df2 = DataFrame([["a", 10], ["b", 20]], columns=["first", "value2"]).set_index( + ["first"] + ) + + exp = DataFrame( + [ + [0.471780, 10], + [0.774908, 10], + [0.563634, 10], + [-0.353756, 20], + [0.368062, 20], + [-1.721840, 20], + [1.000000, np.nan], + [2.000000, np.nan], + [3.000000, np.nan], + ], + index=df1.index, + columns=["value1", "value2"], + ) + + # these must be the same results (but columns are flipped) + tm.assert_frame_equal(df1.join(df2, how="left"), exp) + tm.assert_frame_equal(df2.join(df1, how="right"), exp[["value2", "value1"]]) + + exp_idx = MultiIndex.from_product( + [["a", "b"], ["x", "y", "z"]], names=["first", "second"] + ) + exp = DataFrame( + [ + [0.471780, 10], + [0.774908, 10], + [0.563634, 10], + [-0.353756, 20], + [0.368062, 20], + [-1.721840, 20], + ], + index=exp_idx, + columns=["value1", "value2"], + ) + + tm.assert_frame_equal(df1.join(df2, how="right"), exp) + tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]]) + + def test_join_multiindex_dates(self): + # GH 33692 + date = pd.Timestamp(2000, 1, 1).date() + + df1_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"]) + df1 = DataFrame({"col1": [0]}, index=df1_index) + df2_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"]) + df2 = DataFrame({"col2": [0]}, index=df2_index) + df3_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"]) + df3 = DataFrame({"col3": [0]}, index=df3_index) + + result = df1.join([df2, df3]) + + expected_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"]) + expected = DataFrame( + {"col1": [0], "col2": [0], "col3": [0]}, index=expected_index + ) + + tm.assert_equal(result, expected) + + def test_merge_join_different_levels_raises(self): + # GH#9455 + # GH 40993: For raising, enforced in 2.0 + + # first dataframe + df1 = DataFrame(columns=["a", "b"], data=[[1, 11], [0, 22]]) + + # second dataframe + columns = MultiIndex.from_tuples([("a", ""), ("c", "c1")]) + df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]]) + + # merge + with pytest.raises( + MergeError, match="Not allowed to merge between different levels" + ): + pd.merge(df1, df2, on="a") + + # join, see discussion in GH#12219 + with pytest.raises( + MergeError, match="Not allowed to merge between different levels" + ): + df1.join(df2, on="a") + + def test_frame_join_tzaware(self): + test1 = DataFrame( + np.zeros((6, 3)), + index=date_range( + "2012-11-15 00:00:00", periods=6, freq="100L", tz="US/Central" + ), + ) + test2 = DataFrame( + np.zeros((3, 3)), + index=date_range( + "2012-11-15 00:00:00", periods=3, freq="250L", tz="US/Central" + ), + columns=range(3, 6), + ) + + result = test1.join(test2, how="outer") + expected = test1.index.union(test2.index) + + tm.assert_index_equal(result.index, expected) + assert result.index.tz.zone == "US/Central" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_map.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_map.py new file mode 100644 index 00000000..0de88114 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_map.py @@ -0,0 +1,216 @@ +from datetime import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def test_map(float_frame): + result = float_frame.map(lambda x: x * 2) + tm.assert_frame_equal(result, float_frame * 2) + float_frame.map(type) + + # GH 465: function returning tuples + result = float_frame.map(lambda x: (x, x))["A"].iloc[0] + assert isinstance(result, tuple) + + +@pytest.mark.parametrize("val", [1, 1.0]) +def test_map_float_object_conversion(val): + # GH 2909: object conversion to float in constructor? + df = DataFrame(data=[val, "a"]) + result = df.map(lambda x: x).dtypes[0] + assert result == object + + +@pytest.mark.parametrize("na_action", [None, "ignore"]) +def test_map_keeps_dtype(na_action): + # GH52219 + arr = Series(["a", np.nan, "b"]) + sparse_arr = arr.astype(pd.SparseDtype(object)) + df = DataFrame(data={"a": arr, "b": sparse_arr}) + + def func(x): + return str.upper(x) if not pd.isna(x) else x + + result = df.map(func, na_action=na_action) + + expected_sparse = pd.array(["A", np.nan, "B"], dtype=pd.SparseDtype(object)) + expected_arr = expected_sparse.astype(object) + expected = DataFrame({"a": expected_arr, "b": expected_sparse}) + + tm.assert_frame_equal(result, expected) + + result_empty = df.iloc[:0, :].map(func, na_action=na_action) + expected_empty = expected.iloc[:0, :] + tm.assert_frame_equal(result_empty, expected_empty) + + +def test_map_str(): + # GH 2786 + df = DataFrame(np.random.default_rng(2).random((3, 4))) + df2 = df.copy() + cols = ["a", "a", "a", "a"] + df.columns = cols + + expected = df2.map(str) + expected.columns = cols + result = df.map(str) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "col, val", + [["datetime", Timestamp("20130101")], ["timedelta", pd.Timedelta("1 min")]], +) +def test_map_datetimelike(col, val): + # datetime/timedelta + df = DataFrame(np.random.default_rng(2).random((3, 4))) + df[col] = val + result = df.map(str) + assert result.loc[0, col] == str(df.loc[0, col]) + + +@pytest.mark.parametrize( + "expected", + [ + DataFrame(), + DataFrame(columns=list("ABC")), + DataFrame(index=list("ABC")), + DataFrame({"A": [], "B": [], "C": []}), + ], +) +@pytest.mark.parametrize("func", [round, lambda x: x]) +def test_map_empty(expected, func): + # GH 8222 + result = expected.map(func) + tm.assert_frame_equal(result, expected) + + +def test_map_kwargs(): + # GH 40652 + result = DataFrame([[1, 2], [3, 4]]).map(lambda x, y: x + y, y=2) + expected = DataFrame([[3, 4], [5, 6]]) + tm.assert_frame_equal(result, expected) + + +def test_map_na_ignore(float_frame): + # GH 23803 + strlen_frame = float_frame.map(lambda x: len(str(x))) + float_frame_with_na = float_frame.copy() + mask = np.random.default_rng(2).integers(0, 2, size=float_frame.shape, dtype=bool) + float_frame_with_na[mask] = pd.NA + strlen_frame_na_ignore = float_frame_with_na.map( + lambda x: len(str(x)), na_action="ignore" + ) + # Set float64 type to avoid upcast when setting NA below + strlen_frame_with_na = strlen_frame.copy().astype("float64") + strlen_frame_with_na[mask] = pd.NA + tm.assert_frame_equal(strlen_frame_na_ignore, strlen_frame_with_na) + + +def test_map_box_timestamps(): + # GH 2689, GH 2627 + ser = Series(date_range("1/1/2000", periods=10)) + + def func(x): + return (x.hour, x.day, x.month) + + # it works! + DataFrame(ser).map(func) + + +def test_map_box(): + # ufunc will not be boxed. Same test cases as the test_map_box + df = DataFrame( + { + "a": [Timestamp("2011-01-01"), Timestamp("2011-01-02")], + "b": [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + ], + "c": [pd.Timedelta("1 days"), pd.Timedelta("2 days")], + "d": [ + pd.Period("2011-01-01", freq="M"), + pd.Period("2011-01-02", freq="M"), + ], + } + ) + + result = df.map(lambda x: type(x).__name__) + expected = DataFrame( + { + "a": ["Timestamp", "Timestamp"], + "b": ["Timestamp", "Timestamp"], + "c": ["Timedelta", "Timedelta"], + "d": ["Period", "Period"], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_frame_map_dont_convert_datetime64(): + from pandas.tseries.offsets import BDay + + df = DataFrame({"x1": [datetime(1996, 1, 1)]}) + + df = df.map(lambda x: x + BDay()) + df = df.map(lambda x: x + BDay()) + + result = df.x1.dtype + assert result == "M8[ns]" + + +def test_map_function_runs_once(): + df = DataFrame({"a": [1, 2, 3]}) + values = [] # Save values function is applied to + + def reducing_function(val): + values.append(val) + + def non_reducing_function(val): + values.append(val) + return val + + for func in [reducing_function, non_reducing_function]: + del values[:] + + df.map(func) + assert values == df.a.to_list() + + +def test_map_type(): + # GH 46719 + df = DataFrame( + {"col1": [3, "string", float], "col2": [0.25, datetime(2020, 1, 1), np.nan]}, + index=["a", "b", "c"], + ) + + result = df.map(type) + expected = DataFrame( + {"col1": [int, str, type], "col2": [float, datetime, float]}, + index=["a", "b", "c"], + ) + tm.assert_frame_equal(result, expected) + + +def test_map_invalid_na_action(float_frame): + # GH 23803 + with pytest.raises(ValueError, match="na_action must be .*Got 'abc'"): + float_frame.map(lambda x: len(str(x)), na_action="abc") + + +def test_applymap_deprecated(): + # GH52353 + df = DataFrame({"a": [1, 2, 3]}) + msg = "DataFrame.applymap has been deprecated. Use DataFrame.map instead." + with tm.assert_produces_warning(FutureWarning, match=msg): + df.applymap(lambda x: x) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_matmul.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_matmul.py new file mode 100644 index 00000000..be9462b6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_matmul.py @@ -0,0 +1,98 @@ +import operator + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +class TestMatMul: + def test_matmul(self): + # matmul test is for GH#10259 + a = DataFrame( + np.random.default_rng(2).standard_normal((3, 4)), + index=["a", "b", "c"], + columns=["p", "q", "r", "s"], + ) + b = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), + index=["p", "q", "r", "s"], + columns=["one", "two"], + ) + + # DataFrame @ DataFrame + result = operator.matmul(a, b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_frame_equal(result, expected) + + # DataFrame @ Series + result = operator.matmul(a, b.one) + expected = Series(np.dot(a.values, b.one.values), index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + # np.array @ DataFrame + result = operator.matmul(a.values, b) + assert isinstance(result, DataFrame) + assert result.columns.equals(b.columns) + assert result.index.equals(Index(range(3))) + expected = np.dot(a.values, b.values) + tm.assert_almost_equal(result.values, expected) + + # nested list @ DataFrame (__rmatmul__) + result = operator.matmul(a.values.tolist(), b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_almost_equal(result.values, expected.values) + + # mixed dtype DataFrame @ DataFrame + a["q"] = a.q.round().astype(int) + result = operator.matmul(a, b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_frame_equal(result, expected) + + # different dtypes DataFrame @ DataFrame + a = a.astype(int) + result = operator.matmul(a, b) + expected = DataFrame( + np.dot(a.values, b.values), index=["a", "b", "c"], columns=["one", "two"] + ) + tm.assert_frame_equal(result, expected) + + # unaligned + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 4)), + index=[1, 2, 3], + columns=range(4), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=range(5), + columns=[1, 2, 3], + ) + + with pytest.raises(ValueError, match="aligned"): + operator.matmul(df, df2) + + def test_matmul_message_shapes(self): + # GH#21581 exception message should reflect original shapes, + # not transposed shapes + a = np.random.default_rng(2).random((10, 4)) + b = np.random.default_rng(2).random((5, 3)) + + df = DataFrame(b) + + msg = r"shapes \(10, 4\) and \(5, 3\) not aligned" + with pytest.raises(ValueError, match=msg): + a @ df + with pytest.raises(ValueError, match=msg): + a.tolist() @ df diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_nlargest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_nlargest.py new file mode 100644 index 00000000..0bdf9a0e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_nlargest.py @@ -0,0 +1,250 @@ +""" +Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo" +but are implicitly also testing nsmallest_foo. +""" +from string import ascii_lowercase + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.util.version import Version + + +@pytest.fixture +def df_duplicates(): + return pd.DataFrame( + {"a": [1, 2, 3, 4, 4], "b": [1, 1, 1, 1, 1], "c": [0, 1, 2, 5, 4]}, + index=[0, 0, 1, 1, 1], + ) + + +@pytest.fixture +def df_strings(): + return pd.DataFrame( + { + "a": np.random.default_rng(2).permutation(10), + "b": list(ascii_lowercase[:10]), + "c": np.random.default_rng(2).permutation(10).astype("float64"), + } + ) + + +@pytest.fixture +def df_main_dtypes(): + return pd.DataFrame( + { + "group": [1, 1, 2], + "int": [1, 2, 3], + "float": [4.0, 5.0, 6.0], + "string": list("abc"), + "category_string": pd.Series(list("abc")).astype("category"), + "category_int": [7, 8, 9], + "datetime": pd.date_range("20130101", periods=3), + "datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"), + }, + columns=[ + "group", + "int", + "float", + "string", + "category_string", + "category_int", + "datetime", + "datetimetz", + "timedelta", + ], + ) + + +class TestNLargestNSmallest: + # ---------------------------------------------------------------------- + # Top / bottom + @pytest.mark.parametrize( + "order", + [ + ["a"], + ["c"], + ["a", "b"], + ["a", "c"], + ["b", "a"], + ["b", "c"], + ["a", "b", "c"], + ["c", "a", "b"], + ["c", "b", "a"], + ["b", "c", "a"], + ["b", "a", "c"], + # dups! + ["b", "c", "c"], + ], + ) + @pytest.mark.parametrize("n", range(1, 11)) + def test_nlargest_n(self, df_strings, nselect_method, n, order): + # GH#10393 + df = df_strings + if "b" in order: + error_msg = ( + f"Column 'b' has dtype object, " + f"cannot use method '{nselect_method}' with this dtype" + ) + with pytest.raises(TypeError, match=error_msg): + getattr(df, nselect_method)(n, order) + else: + ascending = nselect_method == "nsmallest" + result = getattr(df, nselect_method)(n, order) + expected = df.sort_values(order, ascending=ascending).head(n) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "columns", [["group", "category_string"], ["group", "string"]] + ) + def test_nlargest_error(self, df_main_dtypes, nselect_method, columns): + df = df_main_dtypes + col = columns[1] + error_msg = ( + f"Column '{col}' has dtype {df[col].dtype}, " + f"cannot use method '{nselect_method}' with this dtype" + ) + # escape some characters that may be in the repr + error_msg = ( + error_msg.replace("(", "\\(") + .replace(")", "\\)") + .replace("[", "\\[") + .replace("]", "\\]") + ) + with pytest.raises(TypeError, match=error_msg): + getattr(df, nselect_method)(2, columns) + + def test_nlargest_all_dtypes(self, df_main_dtypes): + df = df_main_dtypes + df.nsmallest(2, list(set(df) - {"category_string", "string"})) + df.nlargest(2, list(set(df) - {"category_string", "string"})) + + def test_nlargest_duplicates_on_starter_columns(self): + # regression test for GH#22752 + + df = pd.DataFrame({"a": [2, 2, 2, 1, 1, 1], "b": [1, 2, 3, 3, 2, 1]}) + + result = df.nlargest(4, columns=["a", "b"]) + expected = pd.DataFrame( + {"a": [2, 2, 2, 1], "b": [3, 2, 1, 3]}, index=[2, 1, 0, 3] + ) + tm.assert_frame_equal(result, expected) + + result = df.nsmallest(4, columns=["a", "b"]) + expected = pd.DataFrame( + {"a": [1, 1, 1, 2], "b": [1, 2, 3, 1]}, index=[5, 4, 3, 0] + ) + tm.assert_frame_equal(result, expected) + + def test_nlargest_n_identical_values(self): + # GH#15297 + df = pd.DataFrame({"a": [1] * 5, "b": [1, 2, 3, 4, 5]}) + + result = df.nlargest(3, "a") + expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]}, index=[0, 1, 2]) + tm.assert_frame_equal(result, expected) + + result = df.nsmallest(3, "a") + expected = pd.DataFrame({"a": [1] * 3, "b": [1, 2, 3]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "order", + [["a", "b", "c"], ["c", "b", "a"], ["a"], ["b"], ["a", "b"], ["c", "b"]], + ) + @pytest.mark.parametrize("n", range(1, 6)) + def test_nlargest_n_duplicate_index(self, df_duplicates, n, order, request): + # GH#13412 + + df = df_duplicates + result = df.nsmallest(n, order) + expected = df.sort_values(order).head(n) + tm.assert_frame_equal(result, expected) + + result = df.nlargest(n, order) + expected = df.sort_values(order, ascending=False).head(n) + if Version(np.__version__) >= Version("1.25") and ( + (order == ["a"] and n in (1, 2, 3, 4)) or (order == ["a", "b"]) and n == 5 + ): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + tm.assert_frame_equal(result, expected) + + def test_nlargest_duplicate_keep_all_ties(self): + # GH#16818 + df = pd.DataFrame( + {"a": [5, 4, 4, 2, 3, 3, 3, 3], "b": [10, 9, 8, 7, 5, 50, 10, 20]} + ) + result = df.nlargest(4, "a", keep="all") + expected = pd.DataFrame( + { + "a": {0: 5, 1: 4, 2: 4, 4: 3, 5: 3, 6: 3, 7: 3}, + "b": {0: 10, 1: 9, 2: 8, 4: 5, 5: 50, 6: 10, 7: 20}, + } + ) + tm.assert_frame_equal(result, expected) + + result = df.nsmallest(2, "a", keep="all") + expected = pd.DataFrame( + { + "a": {3: 2, 4: 3, 5: 3, 6: 3, 7: 3}, + "b": {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}, + } + ) + tm.assert_frame_equal(result, expected) + + def test_nlargest_multiindex_column_lookup(self): + # Check whether tuples are correctly treated as multi-level lookups. + # GH#23033 + df = pd.DataFrame( + columns=pd.MultiIndex.from_product([["x"], ["a", "b"]]), + data=[[0.33, 0.13], [0.86, 0.25], [0.25, 0.70], [0.85, 0.91]], + ) + + # nsmallest + result = df.nsmallest(3, ("x", "a")) + expected = df.iloc[[2, 0, 3]] + tm.assert_frame_equal(result, expected) + + # nlargest + result = df.nlargest(3, ("x", "b")) + expected = df.iloc[[3, 2, 1]] + tm.assert_frame_equal(result, expected) + + def test_nlargest_nan(self): + # GH#43060 + df = pd.DataFrame([np.nan, np.nan, 0, 1, 2, 3]) + result = df.nlargest(5, 0) + expected = df.sort_values(0, ascending=False).head(5) + tm.assert_frame_equal(result, expected) + + def test_nsmallest_nan_after_n_element(self): + # GH#46589 + df = pd.DataFrame( + { + "a": [1, 2, 3, 4, 5, None, 7], + "b": [7, 6, 5, 4, 3, 2, 1], + "c": [1, 1, 2, 2, 3, 3, 3], + }, + index=range(7), + ) + result = df.nsmallest(5, columns=["a", "b"]) + expected = pd.DataFrame( + { + "a": [1, 2, 3, 4, 5], + "b": [7, 6, 5, 4, 3], + "c": [1, 1, 2, 2, 3], + }, + index=range(5), + ).astype({"a": "float"}) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pct_change.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pct_change.py new file mode 100644 index 00000000..92b66e12 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pct_change.py @@ -0,0 +1,180 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFramePctChange: + @pytest.mark.parametrize( + "periods, fill_method, limit, exp", + [ + (1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]), + (1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]), + (1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]), + (1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]), + (-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]), + (-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]), + (-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]), + (-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]), + ], + ) + def test_pct_change_with_nas( + self, periods, fill_method, limit, exp, frame_or_series + ): + vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan] + obj = frame_or_series(vals) + + msg = ( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + f"{type(obj).__name__}.pct_change are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + res = obj.pct_change(periods=periods, fill_method=fill_method, limit=limit) + tm.assert_equal(res, frame_or_series(exp)) + + def test_pct_change_numeric(self): + # GH#11150 + pnl = DataFrame( + [np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(0, 40, 10)] + ).astype(np.float64) + pnl.iat[1, 0] = np.nan + pnl.iat[1, 1] = np.nan + pnl.iat[2, 3] = 60 + + msg = ( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + "DataFrame.pct_change are deprecated" + ) + + for axis in range(2): + expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(axis=axis) - 1 + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pnl.pct_change(axis=axis, fill_method="pad") + tm.assert_frame_equal(result, expected) + + def test_pct_change(self, datetime_frame): + msg = ( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + "DataFrame.pct_change are deprecated" + ) + + rs = datetime_frame.pct_change(fill_method=None) + tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1) + + rs = datetime_frame.pct_change(2) + filled = datetime_frame.ffill() + tm.assert_frame_equal(rs, filled / filled.shift(2) - 1) + + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = datetime_frame.pct_change(fill_method="bfill", limit=1) + filled = datetime_frame.bfill(limit=1) + tm.assert_frame_equal(rs, filled / filled.shift(1) - 1) + + rs = datetime_frame.pct_change(freq="5D") + filled = datetime_frame.ffill() + tm.assert_frame_equal( + rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled) + ) + + def test_pct_change_shift_over_nas(self): + s = Series([1.0, 1.5, np.nan, 2.5, 3.0]) + + df = DataFrame({"a": s, "b": s}) + + msg = "The default fill_method='pad' in DataFrame.pct_change is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + chg = df.pct_change() + + expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2]) + edf = DataFrame({"a": expected, "b": expected}) + tm.assert_frame_equal(chg, edf) + + @pytest.mark.parametrize( + "freq, periods, fill_method, limit", + [ + ("5B", 5, None, None), + ("3B", 3, None, None), + ("3B", 3, "bfill", None), + ("7B", 7, "pad", 1), + ("7B", 7, "bfill", 3), + ("14B", 14, None, None), + ], + ) + def test_pct_change_periods_freq( + self, datetime_frame, freq, periods, fill_method, limit + ): + msg = ( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + "DataFrame.pct_change are deprecated" + ) + + # GH#7292 + with tm.assert_produces_warning(FutureWarning, match=msg): + rs_freq = datetime_frame.pct_change( + freq=freq, fill_method=fill_method, limit=limit + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + rs_periods = datetime_frame.pct_change( + periods, fill_method=fill_method, limit=limit + ) + tm.assert_frame_equal(rs_freq, rs_periods) + + empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns) + with tm.assert_produces_warning(FutureWarning, match=msg): + rs_freq = empty_ts.pct_change( + freq=freq, fill_method=fill_method, limit=limit + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + rs_periods = empty_ts.pct_change( + periods, fill_method=fill_method, limit=limit + ) + tm.assert_frame_equal(rs_freq, rs_periods) + + +@pytest.mark.parametrize("fill_method", ["pad", "ffill", None]) +def test_pct_change_with_duplicated_indices(fill_method): + # GH30463 + data = DataFrame( + {0: [np.nan, 1, 2, 3, 9, 18], 1: [0, 1, np.nan, 3, 9, 18]}, index=["a", "b"] * 3 + ) + + warn = None if fill_method is None else FutureWarning + msg = ( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + "DataFrame.pct_change are deprecated" + ) + with tm.assert_produces_warning(warn, match=msg): + result = data.pct_change(fill_method=fill_method) + + if fill_method is None: + second_column = [np.nan, np.inf, np.nan, np.nan, 2.0, 1.0] + else: + second_column = [np.nan, np.inf, 0.0, 2.0, 2.0, 1.0] + expected = DataFrame( + {0: [np.nan, np.nan, 1.0, 0.5, 2.0, 1.0], 1: second_column}, + index=["a", "b"] * 3, + ) + tm.assert_frame_equal(result, expected) + + +def test_pct_change_none_beginning_no_warning(): + # GH#54481 + df = DataFrame( + [ + [1, None], + [2, 1], + [3, 2], + [4, 3], + [5, 4], + ] + ) + result = df.pct_change() + expected = DataFrame( + {0: [np.nan, 1, 0.5, 1 / 3, 0.25], 1: [np.nan, np.nan, 1, 0.5, 1 / 3]} + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pipe.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pipe.py new file mode 100644 index 00000000..5bcc4360 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pipe.py @@ -0,0 +1,39 @@ +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestPipe: + def test_pipe(self, frame_or_series): + obj = DataFrame({"A": [1, 2, 3]}) + expected = DataFrame({"A": [1, 4, 9]}) + if frame_or_series is Series: + obj = obj["A"] + expected = expected["A"] + + f = lambda x, y: x**y + result = obj.pipe(f, 2) + tm.assert_equal(result, expected) + + def test_pipe_tuple(self, frame_or_series): + obj = DataFrame({"A": [1, 2, 3]}) + obj = tm.get_obj(obj, frame_or_series) + + f = lambda x, y: y + result = obj.pipe((f, "y"), 0) + tm.assert_equal(result, obj) + + def test_pipe_tuple_error(self, frame_or_series): + obj = DataFrame({"A": [1, 2, 3]}) + obj = tm.get_obj(obj, frame_or_series) + + f = lambda x, y: y + + msg = "y is both the pipe target and a keyword argument" + + with pytest.raises(ValueError, match=msg): + obj.pipe((f, "y"), x=1, y=0) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pop.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pop.py new file mode 100644 index 00000000..617f0c3a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pop.py @@ -0,0 +1,71 @@ +import numpy as np + +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm + + +class TestDataFramePop: + def test_pop(self, float_frame): + float_frame.columns.name = "baz" + + float_frame.pop("A") + assert "A" not in float_frame + + float_frame["foo"] = "bar" + float_frame.pop("foo") + assert "foo" not in float_frame + assert float_frame.columns.name == "baz" + + # gh-10912: inplace ops cause caching issue + a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"]) + b = a.pop("B") + b += 1 + + # original frame + expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"]) + tm.assert_frame_equal(a, expected) + + # result + expected = Series([2, 5], index=["X", "Y"], name="B") + 1 + tm.assert_series_equal(b, expected) + + def test_pop_non_unique_cols(self): + df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]}) + df.columns = ["a", "b", "a"] + + res = df.pop("a") + assert type(res) == DataFrame + assert len(res) == 2 + assert len(df.columns) == 1 + assert "b" in df.columns + assert "a" not in df.columns + assert len(df.index) == 2 + + def test_mixed_depth_pop(self): + arrays = [ + ["a", "top", "top", "routine1", "routine1", "routine2"], + ["", "OD", "OD", "result1", "result2", "result1"], + ["", "wx", "wy", "", "", ""], + ] + + tuples = sorted(zip(*arrays)) + index = MultiIndex.from_tuples(tuples) + df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index) + + df1 = df.copy() + df2 = df.copy() + result = df1.pop("a") + expected = df2.pop(("a", "", "")) + tm.assert_series_equal(expected, result, check_names=False) + tm.assert_frame_equal(df1, df2) + assert result.name == "a" + + expected = df1["top"] + df1 = df1.drop(["top"], axis=1) + result = df2.pop("top") + tm.assert_frame_equal(expected, result) + tm.assert_frame_equal(df1, df2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_quantile.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_quantile.py new file mode 100644 index 00000000..61b253b2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_quantile.py @@ -0,0 +1,981 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, +) +import pandas._testing as tm + + +@pytest.fixture( + params=[["linear", "single"], ["nearest", "table"]], ids=lambda x: "-".join(x) +) +def interp_method(request): + """(interpolation, method) arguments for quantile""" + return request.param + + +class TestDataFrameQuantile: + @pytest.mark.parametrize( + "df,expected", + [ + [ + DataFrame( + { + 0: Series(pd.arrays.SparseArray([1, 2])), + 1: Series(pd.arrays.SparseArray([3, 4])), + } + ), + Series([1.5, 3.5], name=0.5), + ], + [ + DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")), + Series([1.0], name=0.5), + ], + ], + ) + def test_quantile_sparse(self, df, expected): + # GH#17198 + # GH#24600 + result = df.quantile() + expected = expected.astype("Sparse[float]") + tm.assert_series_equal(result, expected) + + def test_quantile( + self, datetime_frame, interp_method, using_array_manager, request + ): + interpolation, method = interp_method + df = datetime_frame + result = df.quantile( + 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series( + [np.percentile(df[col], 10) for col in df.columns], + index=df.columns, + name=0.1, + ) + if interpolation == "linear": + # np.percentile values only comparable to linear interpolation + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result.index, expected.index) + request.node.add_marker( + pytest.mark.xfail( + using_array_manager, reason="Name set incorrectly for arraymanager" + ) + ) + assert result.name == expected.name + + result = df.quantile( + 0.9, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series( + [np.percentile(df.loc[date], 90) for date in df.index], + index=df.index, + name=0.9, + ) + if interpolation == "linear": + # np.percentile values only comparable to linear interpolation + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result.index, expected.index) + request.node.add_marker( + pytest.mark.xfail( + using_array_manager, reason="Name set incorrectly for arraymanager" + ) + ) + assert result.name == expected.name + + def test_empty(self, interp_method): + interpolation, method = interp_method + q = DataFrame({"x": [], "y": []}).quantile( + 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method + ) + assert np.isnan(q["x"]) and np.isnan(q["y"]) + + def test_non_numeric_exclusion(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]}) + rs = df.quantile( + 0.5, numeric_only=True, interpolation=interpolation, method=method + ) + xp = df.median(numeric_only=True).rename(0.5) + if interpolation == "nearest": + xp = (xp + 0.5).astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + tm.assert_series_equal(rs, xp) + + def test_axis(self, interp_method, request, using_array_manager): + # axis + interpolation, method = interp_method + df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) + result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + tm.assert_series_equal(result, expected) + + result = df.quantile( + [0.5, 0.75], axis=1, interpolation=interpolation, method=method + ) + expected = DataFrame( + {1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75] + ) + if interpolation == "nearest": + expected.iloc[0, :] -= 0.5 + expected.iloc[1, :] += 0.25 + expected = expected.astype(np.int64) + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_axis_numeric_only_true(self, interp_method, request, using_array_manager): + # We may want to break API in the future to change this + # so that we exclude non-numeric along the same axis + # See GH #7312 + interpolation, method = interp_method + df = DataFrame([[1, 2, 3], ["a", "b", 4]]) + result = df.quantile( + 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series([3.0, 4.0], index=[0, 1], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + tm.assert_series_equal(result, expected) + + def test_quantile_date_range(self, interp_method, request, using_array_manager): + # GH 2460 + interpolation, method = interp_method + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + ser = Series(dti) + df = DataFrame(ser) + + result = df.quantile( + numeric_only=False, interpolation=interpolation, method=method + ) + expected = Series( + ["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]" + ) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + + tm.assert_series_equal(result, expected) + + def test_quantile_axis_mixed(self, interp_method, request, using_array_manager): + # mixed on axis=1 + interpolation, method = interp_method + df = DataFrame( + { + "A": [1, 2, 3], + "B": [2.0, 3.0, 4.0], + "C": pd.date_range("20130101", periods=3), + "D": ["foo", "bar", "baz"], + } + ) + result = df.quantile( + 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series([1.5, 2.5, 3.5], name=0.5) + if interpolation == "nearest": + expected -= 0.5 + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + tm.assert_series_equal(result, expected) + + # must raise + msg = "'<' not supported between instances of 'Timestamp' and 'float'" + with pytest.raises(TypeError, match=msg): + df.quantile(0.5, axis=1, numeric_only=False) + + def test_quantile_axis_parameter(self, interp_method, request, using_array_manager): + # GH 9543/9544 + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) + + result = df.quantile(0.5, axis=0, interpolation=interpolation, method=method) + + expected = Series([2.0, 3.0], index=["A", "B"], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + tm.assert_series_equal(result, expected) + + expected = df.quantile( + 0.5, axis="index", interpolation=interpolation, method=method + ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + tm.assert_series_equal(result, expected) + + result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + + expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + tm.assert_series_equal(result, expected) + + result = df.quantile( + 0.5, axis="columns", interpolation=interpolation, method=method + ) + tm.assert_series_equal(result, expected) + + msg = "No axis named -1 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.quantile(0.1, axis=-1, interpolation=interpolation, method=method) + msg = "No axis named column for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.quantile(0.1, axis="column") + + def test_quantile_interpolation(self): + # see gh-10174 + + # interpolation method other than default linear + df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) + result = df.quantile(0.5, axis=1, interpolation="nearest") + expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5) + tm.assert_series_equal(result, expected) + + # cross-check interpolation=nearest results in original dtype + exp = np.percentile( + np.array([[1, 2, 3], [2, 3, 4]]), + 0.5, + axis=0, + method="nearest", + ) + expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="int64") + tm.assert_series_equal(result, expected) + + # float + df = DataFrame({"A": [1.0, 2.0, 3.0], "B": [2.0, 3.0, 4.0]}, index=[1, 2, 3]) + result = df.quantile(0.5, axis=1, interpolation="nearest") + expected = Series([1.0, 2.0, 3.0], index=[1, 2, 3], name=0.5) + tm.assert_series_equal(result, expected) + exp = np.percentile( + np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]), + 0.5, + axis=0, + method="nearest", + ) + expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="float64") + tm.assert_series_equal(result, expected) + + # axis + result = df.quantile([0.5, 0.75], axis=1, interpolation="lower") + expected = DataFrame( + {1: [1.0, 1.0], 2: [2.0, 2.0], 3: [3.0, 3.0]}, index=[0.5, 0.75] + ) + tm.assert_frame_equal(result, expected) + + # test degenerate case + df = DataFrame({"x": [], "y": []}) + q = df.quantile(0.1, axis=0, interpolation="higher") + assert np.isnan(q["x"]) and np.isnan(q["y"]) + + # multi + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) + result = df.quantile([0.25, 0.5], interpolation="midpoint") + + # https://github.com/numpy/numpy/issues/7163 + expected = DataFrame( + [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], + index=[0.25, 0.5], + columns=["a", "b", "c"], + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_interpolation_datetime(self, datetime_frame): + # see gh-10174 + + # interpolation = linear (default case) + df = datetime_frame + q = df.quantile(0.1, axis=0, numeric_only=True, interpolation="linear") + assert q["A"] == np.percentile(df["A"], 10) + + def test_quantile_interpolation_int(self, int_frame): + # see gh-10174 + + df = int_frame + # interpolation = linear (default case) + q = df.quantile(0.1) + assert q["A"] == np.percentile(df["A"], 10) + + # test with and without interpolation keyword + q1 = df.quantile(0.1, axis=0, interpolation="linear") + assert q1["A"] == np.percentile(df["A"], 10) + tm.assert_series_equal(q, q1) + + def test_quantile_multi(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) + result = df.quantile([0.25, 0.5], interpolation=interpolation, method=method) + expected = DataFrame( + [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], + index=[0.25, 0.5], + columns=["a", "b", "c"], + ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_multi_axis_1(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) + result = df.quantile( + [0.25, 0.5], axis=1, interpolation=interpolation, method=method + ) + expected = DataFrame( + [[1.0, 2.0, 3.0]] * 2, index=[0.25, 0.5], columns=[0, 1, 2] + ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_multi_empty(self, interp_method): + interpolation, method = interp_method + result = DataFrame({"x": [], "y": []}).quantile( + [0.1, 0.9], axis=0, interpolation=interpolation, method=method + ) + expected = DataFrame( + {"x": [np.nan, np.nan], "y": [np.nan, np.nan]}, index=[0.1, 0.9] + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_datetime(self): + df = DataFrame({"a": pd.to_datetime(["2010", "2011"]), "b": [0, 5]}) + + # exclude datetime + result = df.quantile(0.5, numeric_only=True) + expected = Series([2.5], index=["b"], name=0.5) + tm.assert_series_equal(result, expected) + + # datetime + result = df.quantile(0.5, numeric_only=False) + expected = Series( + [Timestamp("2010-07-02 12:00:00"), 2.5], index=["a", "b"], name=0.5 + ) + tm.assert_series_equal(result, expected) + + # datetime w/ multi + result = df.quantile([0.5], numeric_only=False) + expected = DataFrame( + [[Timestamp("2010-07-02 12:00:00"), 2.5]], index=[0.5], columns=["a", "b"] + ) + tm.assert_frame_equal(result, expected) + + # axis = 1 + df["c"] = pd.to_datetime(["2011", "2012"]) + result = df[["a", "c"]].quantile(0.5, axis=1, numeric_only=False) + expected = Series( + [Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")], + index=[0, 1], + name=0.5, + ) + tm.assert_series_equal(result, expected) + + result = df[["a", "c"]].quantile([0.5], axis=1, numeric_only=False) + expected = DataFrame( + [[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")]], + index=[0.5], + columns=[0, 1], + ) + tm.assert_frame_equal(result, expected) + + # empty when numeric_only=True + result = df[["a", "c"]].quantile(0.5, numeric_only=True) + expected = Series([], index=[], dtype=np.float64, name=0.5) + tm.assert_series_equal(result, expected) + + result = df[["a", "c"]].quantile([0.5], numeric_only=True) + expected = DataFrame(index=[0.5], columns=[]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [ + "datetime64[ns]", + "datetime64[ns, US/Pacific]", + "timedelta64[ns]", + "Period[D]", + ], + ) + def test_quantile_dt64_empty(self, dtype, interp_method): + # GH#41544 + interpolation, method = interp_method + df = DataFrame(columns=["a", "b"], dtype=dtype) + + res = df.quantile( + 0.5, axis=1, numeric_only=False, interpolation=interpolation, method=method + ) + expected = Series([], index=[], name=0.5, dtype=dtype) + tm.assert_series_equal(res, expected) + + # no columns in result, so no dtype preservation + res = df.quantile( + [0.5], + axis=1, + numeric_only=False, + interpolation=interpolation, + method=method, + ) + expected = DataFrame(index=[0.5], columns=[]) + tm.assert_frame_equal(res, expected) + + @pytest.mark.parametrize("invalid", [-1, 2, [0.5, -1], [0.5, 2]]) + def test_quantile_invalid(self, invalid, datetime_frame, interp_method): + msg = "percentiles should all be in the interval \\[0, 1\\]" + interpolation, method = interp_method + with pytest.raises(ValueError, match=msg): + datetime_frame.quantile(invalid, interpolation=interpolation, method=method) + + def test_quantile_box(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + df = DataFrame( + { + "A": [ + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), + ], + "B": [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), + ], + "C": [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + ], + } + ) + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + + exp = Series( + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + ], + name=0.5, + index=["A", "B", "C"], + ) + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) + exp = DataFrame( + [ + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + ] + ], + index=[0.5], + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(res, exp) + + def test_quantile_box_nat(self): + # DatetimeLikeBlock may be consolidated and contain NaT in different loc + df = DataFrame( + { + "A": [ + Timestamp("2011-01-01"), + pd.NaT, + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), + ], + "a": [ + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), + pd.NaT, + Timestamp("2011-01-03"), + ], + "B": [ + Timestamp("2011-01-01", tz="US/Eastern"), + pd.NaT, + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), + ], + "b": [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.NaT, + Timestamp("2011-01-03", tz="US/Eastern"), + ], + "C": [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + pd.NaT, + ], + "c": [ + pd.NaT, + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + ], + }, + columns=list("AaBbCc"), + ) + + res = df.quantile(0.5, numeric_only=False) + exp = Series( + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + pd.Timedelta("2 days"), + ], + name=0.5, + index=list("AaBbCc"), + ) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], numeric_only=False) + exp = DataFrame( + [ + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + pd.Timedelta("2 days"), + ] + ], + index=[0.5], + columns=list("AaBbCc"), + ) + tm.assert_frame_equal(res, exp) + + def test_quantile_nan(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + # GH 14357 - float block where some cols have missing values + df = DataFrame({"a": np.arange(1, 6.0), "b": np.arange(1, 6.0)}) + df.iloc[-1, 1] = np.nan + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series( + [3.0, 2.5 if interpolation == "linear" else 3.0], index=["a", "b"], name=0.5 + ) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) + exp = DataFrame( + { + "a": [3.0, 4.0], + "b": [2.5, 3.25] if interpolation == "linear" else [3.0, 4.0], + }, + index=[0.5, 0.75], + ) + tm.assert_frame_equal(res, exp) + + res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + exp = Series(np.arange(1.0, 6.0), name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5, 0.75], axis=1, interpolation=interpolation, method=method + ) + exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75]) + if interpolation == "nearest": + exp.iloc[1, -1] = np.nan + tm.assert_frame_equal(res, exp) + + # full-nan column + df["b"] = np.nan + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series([3.0, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) + exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75]) + tm.assert_frame_equal(res, exp) + + def test_quantile_nat(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + # full NaT column + df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]}) + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = Series([pd.NaT], index=["a"], name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) + exp = DataFrame({"a": [pd.NaT]}, index=[0.5]) + tm.assert_frame_equal(res, exp) + + # mixed non-null / full null column + df = DataFrame( + { + "a": [ + Timestamp("2012-01-01"), + Timestamp("2012-01-02"), + Timestamp("2012-01-03"), + ], + "b": [pd.NaT, pd.NaT, pd.NaT], + } + ) + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = Series([Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) + exp = DataFrame( + [[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"] + ) + tm.assert_frame_equal(res, exp) + + def test_quantile_empty_no_rows_floats(self, interp_method): + interpolation, method = interp_method + + df = DataFrame(columns=["a", "b"], dtype="float64") + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], interpolation=interpolation, method=method) + exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5]) + tm.assert_frame_equal(res, exp) + + res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + exp = Series([], index=[], dtype="float64", name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], axis=1, interpolation=interpolation, method=method) + exp = DataFrame(columns=[], index=[0.5]) + tm.assert_frame_equal(res, exp) + + def test_quantile_empty_no_rows_ints(self, interp_method): + interpolation, method = interp_method + df = DataFrame(columns=["a", "b"], dtype="int64") + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + + def test_quantile_empty_no_rows_dt64(self, interp_method): + interpolation, method = interp_method + # datetimes + df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]") + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = Series( + [pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5 + ) + tm.assert_series_equal(res, exp) + + # Mixed dt64/dt64tz + df["a"] = df["a"].dt.tz_localize("US/Central") + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = exp.astype(object) + if interpolation == "nearest": + # GH#18463 TODO: would we prefer NaTs here? + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + exp = exp.fillna(np.nan, downcast=False) + tm.assert_series_equal(res, exp) + + # both dt64tz + df["b"] = df["b"].dt.tz_localize("US/Central") + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = exp.astype(df["b"].dtype) + tm.assert_series_equal(res, exp) + + def test_quantile_empty_no_columns(self, interp_method): + # GH#23925 _get_numeric_data may drop all columns + interpolation, method = interp_method + df = DataFrame(pd.date_range("1/1/18", periods=5)) + df.columns.name = "captain tightpants" + result = df.quantile( + 0.5, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series([], index=[], name=0.5, dtype=np.float64) + expected.index.name = "captain tightpants" + tm.assert_series_equal(result, expected) + + result = df.quantile( + [0.5], numeric_only=True, interpolation=interpolation, method=method + ) + expected = DataFrame([], index=[0.5], columns=[]) + expected.columns.name = "captain tightpants" + tm.assert_frame_equal(result, expected) + + def test_quantile_item_cache( + self, using_array_manager, interp_method, using_copy_on_write + ): + # previous behavior incorrect retained an invalid _item_cache entry + interpolation, method = interp_method + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"] + ) + df["D"] = df["A"] * 2 + ser = df["A"] + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + df.quantile(numeric_only=False, interpolation=interpolation, method=method) + + if using_copy_on_write: + ser.iloc[0] = 99 + assert df.iloc[0, 0] == df["A"][0] + assert df.iloc[0, 0] != 99 + else: + ser.values[0] = 99 + assert df.iloc[0, 0] == df["A"][0] + assert df.iloc[0, 0] == 99 + + def test_invalid_method(self): + with pytest.raises(ValueError, match="Invalid method: foo"): + DataFrame(range(1)).quantile(0.5, method="foo") + + def test_table_invalid_interpolation(self): + with pytest.raises(ValueError, match="Invalid interpolation: foo"): + DataFrame(range(1)).quantile(0.5, method="table", interpolation="foo") + + +class TestQuantileExtensionDtype: + # TODO: tests for axis=1? + # TODO: empty case? + + @pytest.fixture( + params=[ + pytest.param( + pd.IntervalIndex.from_breaks(range(10)), + marks=pytest.mark.xfail(reason="raises when trying to add Intervals"), + ), + pd.period_range("2016-01-01", periods=9, freq="D"), + pd.date_range("2016-01-01", periods=9, tz="US/Pacific"), + pd.timedelta_range("1 Day", periods=9), + pd.array(np.arange(9), dtype="Int64"), + pd.array(np.arange(9), dtype="Float64"), + ], + ids=lambda x: str(x.dtype), + ) + def index(self, request): + # NB: not actually an Index object + idx = request.param + idx.name = "A" + return idx + + @pytest.fixture + def obj(self, index, frame_or_series): + # bc index is not always an Index (yet), we need to re-patch .name + obj = frame_or_series(index).copy() + + if frame_or_series is Series: + obj.name = "A" + else: + obj.columns = ["A"] + return obj + + def compute_quantile(self, obj, qs): + if isinstance(obj, Series): + result = obj.quantile(qs) + else: + result = obj.quantile(qs, numeric_only=False) + return result + + def test_quantile_ea(self, request, obj, index): + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + exp_dtype = index.dtype + if index.dtype == "Int64": + # match non-nullable casting behavior + exp_dtype = "Float64" + + # expected here assumes len(index) == 9 + expected = Series( + [index[4], index[0], index[-1]], dtype=exp_dtype, index=qs, name="A" + ) + expected = type(obj)(expected) + + tm.assert_equal(result, expected) + + def test_quantile_ea_with_na(self, obj, index): + obj.iloc[0] = index._na_value + obj.iloc[-1] = index._na_value + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + # expected here assumes len(index) == 9 + expected = Series( + [index[4], index[1], index[-2]], dtype=index.dtype, index=qs, name="A" + ) + expected = type(obj)(expected) + tm.assert_equal(result, expected) + + def test_quantile_ea_all_na(self, request, obj, index): + obj.iloc[:] = index._na_value + # Check dtypes were preserved; this was once a problem see GH#39763 + assert np.all(obj.dtypes == index.dtype) + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value) + expected = Series(expected, index=qs, name="A") + expected = type(obj)(expected) + tm.assert_equal(result, expected) + + def test_quantile_ea_scalar(self, request, obj, index): + # scalar qs + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = 0.5 + result = self.compute_quantile(obj, qs) + + exp_dtype = index.dtype + if index.dtype == "Int64": + exp_dtype = "Float64" + + expected = Series({"A": index[4]}, dtype=exp_dtype, name=0.5) + if isinstance(obj, Series): + expected = expected["A"] + assert result == expected + else: + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, expected_data, expected_index, axis", + [ + ["float64", [], [], 1], + ["int64", [], [], 1], + ["float64", [np.nan, np.nan], ["a", "b"], 0], + ["int64", [np.nan, np.nan], ["a", "b"], 0], + ], + ) + def test_empty_numeric(self, dtype, expected_data, expected_index, axis): + # GH 14564 + df = DataFrame(columns=["a", "b"], dtype=dtype) + result = df.quantile(0.5, axis=axis) + expected = Series( + expected_data, name=0.5, index=Index(expected_index), dtype="float64" + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, expected_data, expected_index, axis, expected_dtype", + [ + ["datetime64[ns]", [], [], 1, "datetime64[ns]"], + ["datetime64[ns]", [pd.NaT, pd.NaT], ["a", "b"], 0, "datetime64[ns]"], + ], + ) + def test_empty_datelike( + self, dtype, expected_data, expected_index, axis, expected_dtype + ): + # GH 14564 + df = DataFrame(columns=["a", "b"], dtype=dtype) + result = df.quantile(0.5, axis=axis, numeric_only=False) + expected = Series( + expected_data, name=0.5, index=Index(expected_index), dtype=expected_dtype + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "expected_data, expected_index, axis", + [ + [[np.nan, np.nan], range(2), 1], + [[], [], 0], + ], + ) + def test_datelike_numeric_only(self, expected_data, expected_index, axis): + # GH 14564 + df = DataFrame( + { + "a": pd.to_datetime(["2010", "2011"]), + "b": [0, 5], + "c": pd.to_datetime(["2011", "2012"]), + } + ) + result = df[["a", "c"]].quantile(0.5, axis=axis, numeric_only=True) + expected = Series( + expected_data, name=0.5, index=Index(expected_index), dtype=np.float64 + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rank.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rank.py new file mode 100644 index 00000000..b5b5e426 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rank.py @@ -0,0 +1,502 @@ +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas._libs.algos import ( + Infinity, + NegInfinity, +) + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestRank: + s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]) + df = DataFrame({"A": s, "B": s}) + + results = { + "average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]), + "min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]), + "max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]), + "first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]), + "dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]), + } + + @pytest.fixture(params=["average", "min", "max", "first", "dense"]) + def method(self, request): + """ + Fixture for trying all rank methods + """ + return request.param + + def test_rank(self, float_frame): + sp_stats = pytest.importorskip("scipy.stats") + + float_frame.loc[::2, "A"] = np.nan + float_frame.loc[::3, "B"] = np.nan + float_frame.loc[::4, "C"] = np.nan + float_frame.loc[::5, "D"] = np.nan + + ranks0 = float_frame.rank() + ranks1 = float_frame.rank(1) + mask = np.isnan(float_frame.values) + + fvals = float_frame.fillna(np.inf).values + + exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals) + exp0[mask] = np.nan + + exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals) + exp1[mask] = np.nan + + tm.assert_almost_equal(ranks0.values, exp0) + tm.assert_almost_equal(ranks1.values, exp1) + + # integers + df = DataFrame( + np.random.default_rng(2).integers(0, 5, size=40).reshape((10, 4)) + ) + + result = df.rank() + exp = df.astype(float).rank() + tm.assert_frame_equal(result, exp) + + result = df.rank(1) + exp = df.astype(float).rank(1) + tm.assert_frame_equal(result, exp) + + def test_rank2(self): + df = DataFrame([[1, 3, 2], [1, 2, 3]]) + expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0 + result = df.rank(1, pct=True) + tm.assert_frame_equal(result, expected) + + df = DataFrame([[1, 3, 2], [1, 2, 3]]) + expected = df.rank(0) / 2.0 + result = df.rank(0, pct=True) + tm.assert_frame_equal(result, expected) + + df = DataFrame([["b", "c", "a"], ["a", "c", "b"]]) + expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]]) + result = df.rank(1, numeric_only=False) + tm.assert_frame_equal(result, expected) + + expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]]) + result = df.rank(0, numeric_only=False) + tm.assert_frame_equal(result, expected) + + df = DataFrame([["b", np.nan, "a"], ["a", "c", "b"]]) + expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]]) + result = df.rank(1, numeric_only=False) + tm.assert_frame_equal(result, expected) + + expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]]) + result = df.rank(0, numeric_only=False) + tm.assert_frame_equal(result, expected) + + # f7u12, this does not work without extensive workaround + data = [ + [datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], + [datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)], + ] + df = DataFrame(data) + + # check the rank + expected = DataFrame([[2.0, np.nan, 1.0], [2.0, 3.0, 1.0]]) + result = df.rank(1, numeric_only=False, ascending=True) + tm.assert_frame_equal(result, expected) + + expected = DataFrame([[1.0, np.nan, 2.0], [2.0, 1.0, 3.0]]) + result = df.rank(1, numeric_only=False, ascending=False) + tm.assert_frame_equal(result, expected) + + df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10, 1e60, 1e80, 1e-30]}) + exp = DataFrame({"a": [3.5, 1.0, 3.5, 5.0, 6.0, 7.0, 2.0]}) + tm.assert_frame_equal(df.rank(), exp) + + def test_rank_does_not_mutate(self): + # GH#18521 + # Check rank does not mutate DataFrame + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), dtype="float64" + ) + expected = df.copy() + df.rank() + result = df + tm.assert_frame_equal(result, expected) + + def test_rank_mixed_frame(self, float_string_frame): + float_string_frame["datetime"] = datetime.now() + float_string_frame["timedelta"] = timedelta(days=1, seconds=1) + + float_string_frame.rank(numeric_only=False) + with pytest.raises(TypeError, match="not supported between instances of"): + float_string_frame.rank(axis=1) + + def test_rank_na_option(self, float_frame): + sp_stats = pytest.importorskip("scipy.stats") + + float_frame.loc[::2, "A"] = np.nan + float_frame.loc[::3, "B"] = np.nan + float_frame.loc[::4, "C"] = np.nan + float_frame.loc[::5, "D"] = np.nan + + # bottom + ranks0 = float_frame.rank(na_option="bottom") + ranks1 = float_frame.rank(1, na_option="bottom") + + fvals = float_frame.fillna(np.inf).values + + exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fvals) + exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fvals) + + tm.assert_almost_equal(ranks0.values, exp0) + tm.assert_almost_equal(ranks1.values, exp1) + + # top + ranks0 = float_frame.rank(na_option="top") + ranks1 = float_frame.rank(1, na_option="top") + + fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values + fval1 = float_frame.T + fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T + fval1 = fval1.fillna(np.inf).values + + exp0 = np.apply_along_axis(sp_stats.rankdata, 0, fval0) + exp1 = np.apply_along_axis(sp_stats.rankdata, 1, fval1) + + tm.assert_almost_equal(ranks0.values, exp0) + tm.assert_almost_equal(ranks1.values, exp1) + + # descending + + # bottom + ranks0 = float_frame.rank(na_option="top", ascending=False) + ranks1 = float_frame.rank(1, na_option="top", ascending=False) + + fvals = float_frame.fillna(np.inf).values + + exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fvals) + exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fvals) + + tm.assert_almost_equal(ranks0.values, exp0) + tm.assert_almost_equal(ranks1.values, exp1) + + # descending + + # top + ranks0 = float_frame.rank(na_option="bottom", ascending=False) + ranks1 = float_frame.rank(1, na_option="bottom", ascending=False) + + fval0 = float_frame.fillna((float_frame.min() - 1).to_dict()).values + fval1 = float_frame.T + fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T + fval1 = fval1.fillna(np.inf).values + + exp0 = np.apply_along_axis(sp_stats.rankdata, 0, -fval0) + exp1 = np.apply_along_axis(sp_stats.rankdata, 1, -fval1) + + tm.assert_numpy_array_equal(ranks0.values, exp0) + tm.assert_numpy_array_equal(ranks1.values, exp1) + + # bad values throw error + msg = "na_option must be one of 'keep', 'top', or 'bottom'" + + with pytest.raises(ValueError, match=msg): + float_frame.rank(na_option="bad", ascending=False) + + # invalid type + with pytest.raises(ValueError, match=msg): + float_frame.rank(na_option=True, ascending=False) + + def test_rank_axis(self): + # check if using axes' names gives the same result + df = DataFrame([[2, 1], [4, 3]]) + tm.assert_frame_equal(df.rank(axis=0), df.rank(axis="index")) + tm.assert_frame_equal(df.rank(axis=1), df.rank(axis="columns")) + + @pytest.mark.parametrize("ax", [0, 1]) + @pytest.mark.parametrize("m", ["average", "min", "max", "first", "dense"]) + def test_rank_methods_frame(self, ax, m): + sp_stats = pytest.importorskip("scipy.stats") + + xs = np.random.default_rng(2).integers(0, 21, (100, 26)) + xs = (xs - 10.0) / 10.0 + cols = [chr(ord("z") - i) for i in range(xs.shape[1])] + + for vals in [xs, xs + 1e6, xs * 1e-6]: + df = DataFrame(vals, columns=cols) + + result = df.rank(axis=ax, method=m) + sprank = np.apply_along_axis( + sp_stats.rankdata, ax, vals, m if m != "first" else "ordinal" + ) + sprank = sprank.astype(np.float64) + expected = DataFrame(sprank, columns=cols).astype("float64") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["O", "f8", "i8"]) + def test_rank_descending(self, method, dtype): + if "i" in dtype: + df = self.df.dropna().astype(dtype) + else: + df = self.df.astype(dtype) + + res = df.rank(ascending=False) + expected = (df.max() - df).rank() + tm.assert_frame_equal(res, expected) + + expected = (df.max() - df).rank(method=method) + + if dtype != "O": + res2 = df.rank(method=method, ascending=False, numeric_only=True) + tm.assert_frame_equal(res2, expected) + + res3 = df.rank(method=method, ascending=False, numeric_only=False) + tm.assert_frame_equal(res3, expected) + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("dtype", [None, object]) + def test_rank_2d_tie_methods(self, method, axis, dtype): + df = self.df + + def _check2d(df, expected, method="average", axis=0): + exp_df = DataFrame({"A": expected, "B": expected}) + + if axis == 1: + df = df.T + exp_df = exp_df.T + + result = df.rank(method=method, axis=axis) + tm.assert_frame_equal(result, exp_df) + + frame = df if dtype is None else df.astype(dtype) + _check2d(frame, self.results[method], method=method, axis=axis) + + @pytest.mark.parametrize( + "method,exp", + [ + ("dense", [[1.0, 1.0, 1.0], [1.0, 0.5, 2.0 / 3], [1.0, 0.5, 1.0 / 3]]), + ( + "min", + [ + [1.0 / 3, 1.0, 1.0], + [1.0 / 3, 1.0 / 3, 2.0 / 3], + [1.0 / 3, 1.0 / 3, 1.0 / 3], + ], + ), + ( + "max", + [[1.0, 1.0, 1.0], [1.0, 2.0 / 3, 2.0 / 3], [1.0, 2.0 / 3, 1.0 / 3]], + ), + ( + "average", + [[2.0 / 3, 1.0, 1.0], [2.0 / 3, 0.5, 2.0 / 3], [2.0 / 3, 0.5, 1.0 / 3]], + ), + ( + "first", + [ + [1.0 / 3, 1.0, 1.0], + [2.0 / 3, 1.0 / 3, 2.0 / 3], + [3.0 / 3, 2.0 / 3, 1.0 / 3], + ], + ), + ], + ) + def test_rank_pct_true(self, method, exp): + # see gh-15630. + + df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]]) + result = df.rank(method=method, pct=True) + + expected = DataFrame(exp) + tm.assert_frame_equal(result, expected) + + @pytest.mark.single_cpu + def test_pct_max_many_rows(self): + # GH 18271 + df = DataFrame( + {"A": np.arange(2**24 + 1), "B": np.arange(2**24 + 1, 0, -1)} + ) + result = df.rank(pct=True).max() + assert (result == 1).all() + + @pytest.mark.parametrize( + "contents,dtype", + [ + ( + [ + -np.inf, + -50, + -1, + -1e-20, + -1e-25, + -1e-50, + 0, + 1e-40, + 1e-20, + 1e-10, + 2, + 40, + np.inf, + ], + "float64", + ), + ( + [ + -np.inf, + -50, + -1, + -1e-20, + -1e-25, + -1e-45, + 0, + 1e-40, + 1e-20, + 1e-10, + 2, + 40, + np.inf, + ], + "float32", + ), + ([np.iinfo(np.uint8).min, 1, 2, 100, np.iinfo(np.uint8).max], "uint8"), + ( + [ + np.iinfo(np.int64).min, + -100, + 0, + 1, + 9999, + 100000, + 1e10, + np.iinfo(np.int64).max, + ], + "int64", + ), + ([NegInfinity(), "1", "A", "BA", "Ba", "C", Infinity()], "object"), + ( + [datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 5)], + "datetime64", + ), + ], + ) + def test_rank_inf_and_nan(self, contents, dtype, frame_or_series): + dtype_na_map = { + "float64": np.nan, + "float32": np.nan, + "object": None, + "datetime64": np.datetime64("nat"), + } + # Insert nans at random positions if underlying dtype has missing + # value. Then adjust the expected order by adding nans accordingly + # This is for testing whether rank calculation is affected + # when values are interwined with nan values. + values = np.array(contents, dtype=dtype) + exp_order = np.array(range(len(values)), dtype="float64") + 1.0 + if dtype in dtype_na_map: + na_value = dtype_na_map[dtype] + nan_indices = np.random.default_rng(2).choice(range(len(values)), 5) + values = np.insert(values, nan_indices, na_value) + exp_order = np.insert(exp_order, nan_indices, np.nan) + + # Shuffle the testing array and expected results in the same way + random_order = np.random.default_rng(2).permutation(len(values)) + obj = frame_or_series(values[random_order]) + expected = frame_or_series(exp_order[random_order], dtype="float64") + result = obj.rank() + tm.assert_equal(result, expected) + + def test_df_series_inf_nan_consistency(self): + # GH#32593 + index = [5, 4, 3, 2, 1, 6, 7, 8, 9, 10] + col1 = [5, 4, 3, 5, 8, 5, 2, 1, 6, 6] + col2 = [5, 4, np.nan, 5, 8, 5, np.inf, np.nan, 6, -np.inf] + df = DataFrame( + data={ + "col1": col1, + "col2": col2, + }, + index=index, + dtype="f8", + ) + df_result = df.rank() + + series_result = df.copy() + series_result["col1"] = df["col1"].rank() + series_result["col2"] = df["col2"].rank() + + tm.assert_frame_equal(df_result, series_result) + + def test_rank_both_inf(self): + # GH#32593 + df = DataFrame({"a": [-np.inf, 0, np.inf]}) + expected = DataFrame({"a": [1.0, 2.0, 3.0]}) + result = df.rank() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "na_option,ascending,expected", + [ + ("top", True, [3.0, 1.0, 2.0]), + ("top", False, [2.0, 1.0, 3.0]), + ("bottom", True, [2.0, 3.0, 1.0]), + ("bottom", False, [1.0, 3.0, 2.0]), + ], + ) + def test_rank_inf_nans_na_option( + self, frame_or_series, method, na_option, ascending, expected + ): + obj = frame_or_series([np.inf, np.nan, -np.inf]) + result = obj.rank(method=method, na_option=na_option, ascending=ascending) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "na_option,ascending,expected", + [ + ("bottom", True, [1.0, 2.0, 4.0, 3.0]), + ("bottom", False, [1.0, 2.0, 4.0, 3.0]), + ("top", True, [2.0, 3.0, 1.0, 4.0]), + ("top", False, [2.0, 3.0, 1.0, 4.0]), + ], + ) + def test_rank_object_first(self, frame_or_series, na_option, ascending, expected): + obj = frame_or_series(["foo", "foo", None, "foo"]) + result = obj.rank(method="first", na_option=na_option, ascending=ascending) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "data,expected", + [ + ({"a": [1, 2, "a"], "b": [4, 5, 6]}, DataFrame({"b": [1.0, 2.0, 3.0]})), + ({"a": [1, 2, "a"]}, DataFrame(index=range(3), columns=[])), + ], + ) + def test_rank_mixed_axis_zero(self, data, expected): + df = DataFrame(data) + with pytest.raises(TypeError, match="'<' not supported between instances of"): + df.rank() + result = df.rank(numeric_only=True) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, exp_dtype", + [("string[pyarrow]", "Int64"), ("string[pyarrow_numpy]", "float64")], + ) + def test_rank_string_dtype(self, dtype, exp_dtype): + # GH#55362 + pytest.importorskip("pyarrow") + obj = Series(["foo", "foo", None, "foo"], dtype=dtype) + result = obj.rank(method="first") + expected = Series([1, 2, None, 3], dtype=exp_dtype) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex.py new file mode 100644 index 00000000..678fec83 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex.py @@ -0,0 +1,1315 @@ +from datetime import ( + datetime, + timedelta, +) +import inspect + +import numpy as np +import pytest + +from pandas._libs.tslibs.timezones import dateutil_gettz as gettz +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat.numpy import np_version_gt2 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + Index, + MultiIndex, + Series, + date_range, + isna, +) +import pandas._testing as tm +from pandas.api.types import CategoricalDtype as CDT + + +class TestReindexSetIndex: + # Tests that check both reindex and set_index + + def test_dti_set_index_reindex_datetimeindex(self): + # GH#6631 + df = DataFrame(np.random.default_rng(2).random(6)) + idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern") + idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo") + + df = df.set_index(idx1) + tm.assert_index_equal(df.index, idx1) + df = df.reindex(idx2) + tm.assert_index_equal(df.index, idx2) + + def test_dti_set_index_reindex_freq_with_tz(self): + # GH#11314 with tz + index = date_range( + datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern" + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((24, 1)), + columns=["a"], + index=index, + ) + new_index = date_range( + datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern" + ) + + result = df.set_index(new_index) + assert result.index.freq == index.freq + + def test_set_reset_index_intervalindex(self): + df = DataFrame({"A": range(10)}) + ser = pd.cut(df.A, 5) + df["B"] = ser + df = df.set_index("B") + + df = df.reset_index() + + def test_setitem_reset_index_dtypes(self): + # GH 22060 + df = DataFrame(columns=["a", "b", "c"]).astype( + {"a": "datetime64[ns]", "b": np.int64, "c": np.float64} + ) + df1 = df.set_index(["a"]) + df1["d"] = [] + result = df1.reset_index() + expected = DataFrame(columns=["a", "b", "c", "d"], index=range(0)).astype( + {"a": "datetime64[ns]", "b": np.int64, "c": np.float64, "d": np.float64} + ) + tm.assert_frame_equal(result, expected) + + df2 = df.set_index(["a", "b"]) + df2["d"] = [] + result = df2.reset_index() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "timezone, year, month, day, hour", + [["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]], + ) + def test_reindex_timestamp_with_fold(self, timezone, year, month, day, hour): + # see gh-40817 + test_timezone = gettz(timezone) + transition_1 = pd.Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=0, + tzinfo=test_timezone, + ) + transition_2 = pd.Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=1, + tzinfo=test_timezone, + ) + df = ( + DataFrame({"index": [transition_1, transition_2], "vals": ["a", "b"]}) + .set_index("index") + .reindex(["1", "2"]) + ) + exp = DataFrame({"index": ["1", "2"], "vals": [np.nan, np.nan]}).set_index( + "index" + ) + exp = exp.astype(object) + tm.assert_frame_equal( + df, + exp, + ) + + +class TestDataFrameSelectReindex: + # These are specific reindex-based tests; other indexing tests should go in + # test_indexing + + @pytest.mark.xfail( + not IS64 or (is_platform_windows() and not np_version_gt2), + reason="Passes int32 values to DatetimeArray in make_na_array on " + "windows, 32bit linux builds", + ) + @td.skip_array_manager_not_yet_implemented + def test_reindex_tzaware_fill_value(self): + # GH#52586 + df = DataFrame([[1]]) + + ts = pd.Timestamp("2023-04-10 17:32", tz="US/Pacific") + res = df.reindex([0, 1], axis=1, fill_value=ts) + assert res.dtypes[1] == pd.DatetimeTZDtype(unit="s", tz="US/Pacific") + expected = DataFrame({0: [1], 1: [ts]}) + expected[1] = expected[1].astype(res.dtypes[1]) + tm.assert_frame_equal(res, expected) + + per = ts.tz_localize(None).to_period("s") + res = df.reindex([0, 1], axis=1, fill_value=per) + assert res.dtypes[1] == pd.PeriodDtype("s") + expected = DataFrame({0: [1], 1: [per]}) + tm.assert_frame_equal(res, expected) + + interval = pd.Interval(ts, ts + pd.Timedelta(seconds=1)) + res = df.reindex([0, 1], axis=1, fill_value=interval) + assert res.dtypes[1] == pd.IntervalDtype("datetime64[s, US/Pacific]", "right") + expected = DataFrame({0: [1], 1: [interval]}) + expected[1] = expected[1].astype(res.dtypes[1]) + tm.assert_frame_equal(res, expected) + + def test_reindex_copies(self): + # based on asv time_reindex_axis1 + N = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((N * 10, N))) + cols = np.arange(N) + np.random.default_rng(2).shuffle(cols) + + result = df.reindex(columns=cols, copy=True) + assert not np.shares_memory(result[0]._values, df[0]._values) + + # pass both columns and index + result2 = df.reindex(columns=cols, index=df.index, copy=True) + assert not np.shares_memory(result2[0]._values, df[0]._values) + + def test_reindex_copies_ea(self, using_copy_on_write): + # https://github.com/pandas-dev/pandas/pull/51197 + # also ensure to honor copy keyword for ExtensionDtypes + N = 10 + df = DataFrame( + np.random.default_rng(2).standard_normal((N * 10, N)), dtype="Float64" + ) + cols = np.arange(N) + np.random.default_rng(2).shuffle(cols) + + result = df.reindex(columns=cols, copy=True) + if using_copy_on_write: + assert np.shares_memory(result[0].array._data, df[0].array._data) + else: + assert not np.shares_memory(result[0].array._data, df[0].array._data) + + # pass both columns and index + result2 = df.reindex(columns=cols, index=df.index, copy=True) + if using_copy_on_write: + assert np.shares_memory(result2[0].array._data, df[0].array._data) + else: + assert not np.shares_memory(result2[0].array._data, df[0].array._data) + + @td.skip_array_manager_not_yet_implemented + def test_reindex_date_fill_value(self): + # passing date to dt64 is deprecated; enforced in 2.0 to cast to object + arr = date_range("2016-01-01", periods=6).values.reshape(3, 2) + df = DataFrame(arr, columns=["A", "B"], index=range(3)) + + ts = df.iloc[0, 0] + fv = ts.date() + + res = df.reindex(index=range(4), columns=["A", "B", "C"], fill_value=fv) + + expected = DataFrame( + {"A": df["A"].tolist() + [fv], "B": df["B"].tolist() + [fv], "C": [fv] * 4}, + dtype=object, + ) + tm.assert_frame_equal(res, expected) + + # only reindexing rows + res = df.reindex(index=range(4), fill_value=fv) + tm.assert_frame_equal(res, expected[["A", "B"]]) + + # same with a datetime-castable str + res = df.reindex( + index=range(4), columns=["A", "B", "C"], fill_value="2016-01-01" + ) + expected = DataFrame( + {"A": df["A"].tolist() + [ts], "B": df["B"].tolist() + [ts], "C": [ts] * 4}, + ) + tm.assert_frame_equal(res, expected) + + def test_reindex_with_multi_index(self): + # https://github.com/pandas-dev/pandas/issues/29896 + # tests for reindexing a multi-indexed DataFrame with a new MultiIndex + # + # confirms that we can reindex a multi-indexed DataFrame with a new + # MultiIndex object correctly when using no filling, backfilling, and + # padding + # + # The DataFrame, `df`, used in this test is: + # c + # a b + # -1 0 A + # 1 B + # 2 C + # 3 D + # 4 E + # 5 F + # 6 G + # 0 0 A + # 1 B + # 2 C + # 3 D + # 4 E + # 5 F + # 6 G + # 1 0 A + # 1 B + # 2 C + # 3 D + # 4 E + # 5 F + # 6 G + # + # and the other MultiIndex, `new_multi_index`, is: + # 0: 0 0.5 + # 1: 2.0 + # 2: 5.0 + # 3: 5.8 + df = DataFrame( + { + "a": [-1] * 7 + [0] * 7 + [1] * 7, + "b": list(range(7)) * 3, + "c": ["A", "B", "C", "D", "E", "F", "G"] * 3, + } + ).set_index(["a", "b"]) + new_index = [0.5, 2.0, 5.0, 5.8] + new_multi_index = MultiIndex.from_product([[0], new_index], names=["a", "b"]) + + # reindexing w/o a `method` value + reindexed = df.reindex(new_multi_index) + expected = DataFrame( + {"a": [0] * 4, "b": new_index, "c": [np.nan, "C", "F", np.nan]} + ).set_index(["a", "b"]) + tm.assert_frame_equal(expected, reindexed) + + # reindexing with backfilling + expected = DataFrame( + {"a": [0] * 4, "b": new_index, "c": ["B", "C", "F", "G"]} + ).set_index(["a", "b"]) + reindexed_with_backfilling = df.reindex(new_multi_index, method="bfill") + tm.assert_frame_equal(expected, reindexed_with_backfilling) + + reindexed_with_backfilling = df.reindex(new_multi_index, method="backfill") + tm.assert_frame_equal(expected, reindexed_with_backfilling) + + # reindexing with padding + expected = DataFrame( + {"a": [0] * 4, "b": new_index, "c": ["A", "C", "F", "F"]} + ).set_index(["a", "b"]) + reindexed_with_padding = df.reindex(new_multi_index, method="pad") + tm.assert_frame_equal(expected, reindexed_with_padding) + + reindexed_with_padding = df.reindex(new_multi_index, method="ffill") + tm.assert_frame_equal(expected, reindexed_with_padding) + + @pytest.mark.parametrize( + "method,expected_values", + [ + ("nearest", [0, 1, 1, 2]), + ("pad", [np.nan, 0, 1, 1]), + ("backfill", [0, 1, 2, 2]), + ], + ) + def test_reindex_methods(self, method, expected_values): + df = DataFrame({"x": list(range(5))}) + target = np.array([-0.1, 0.9, 1.1, 1.5]) + + expected = DataFrame({"x": expected_values}, index=target) + actual = df.reindex(target, method=method) + tm.assert_frame_equal(expected, actual) + + actual = df.reindex(target, method=method, tolerance=1) + tm.assert_frame_equal(expected, actual) + actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1]) + tm.assert_frame_equal(expected, actual) + + e2 = expected[::-1] + actual = df.reindex(target[::-1], method=method) + tm.assert_frame_equal(e2, actual) + + new_order = [3, 0, 2, 1] + e2 = expected.iloc[new_order] + actual = df.reindex(target[new_order], method=method) + tm.assert_frame_equal(e2, actual) + + switched_method = ( + "pad" if method == "backfill" else "backfill" if method == "pad" else method + ) + actual = df[::-1].reindex(target, method=switched_method) + tm.assert_frame_equal(expected, actual) + + def test_reindex_methods_nearest_special(self): + df = DataFrame({"x": list(range(5))}) + target = np.array([-0.1, 0.9, 1.1, 1.5]) + + expected = DataFrame({"x": [0, 1, 1, np.nan]}, index=target) + actual = df.reindex(target, method="nearest", tolerance=0.2) + tm.assert_frame_equal(expected, actual) + + expected = DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target) + actual = df.reindex(target, method="nearest", tolerance=[0.5, 0.01, 0.4, 0.1]) + tm.assert_frame_equal(expected, actual) + + def test_reindex_nearest_tz(self, tz_aware_fixture): + # GH26683 + tz = tz_aware_fixture + idx = date_range("2019-01-01", periods=5, tz=tz) + df = DataFrame({"x": list(range(5))}, index=idx) + + expected = df.head(3) + actual = df.reindex(idx[:3], method="nearest") + tm.assert_frame_equal(expected, actual) + + def test_reindex_nearest_tz_empty_frame(self): + # https://github.com/pandas-dev/pandas/issues/31964 + dti = pd.DatetimeIndex(["2016-06-26 14:27:26+00:00"]) + df = DataFrame(index=pd.DatetimeIndex(["2016-07-04 14:00:59+00:00"])) + expected = DataFrame(index=dti) + result = df.reindex(dti, method="nearest") + tm.assert_frame_equal(result, expected) + + def test_reindex_frame_add_nat(self): + rng = date_range("1/1/2000 00:00:00", periods=10, freq="10s") + df = DataFrame( + {"A": np.random.default_rng(2).standard_normal(len(rng)), "B": rng} + ) + + result = df.reindex(range(15)) + assert np.issubdtype(result["B"].dtype, np.dtype("M8[ns]")) + + mask = isna(result)["B"] + assert mask[-5:].all() + assert not mask[:-5].any() + + @pytest.mark.parametrize( + "method, exp_values", + [("ffill", [0, 1, 2, 3]), ("bfill", [1.0, 2.0, 3.0, np.nan])], + ) + def test_reindex_frame_tz_ffill_bfill(self, frame_or_series, method, exp_values): + # GH#38566 + obj = frame_or_series( + [0, 1, 2, 3], + index=date_range("2020-01-01 00:00:00", periods=4, freq="H", tz="UTC"), + ) + new_index = date_range("2020-01-01 00:01:00", periods=4, freq="H", tz="UTC") + result = obj.reindex(new_index, method=method, tolerance=pd.Timedelta("1 hour")) + expected = frame_or_series(exp_values, index=new_index) + tm.assert_equal(result, expected) + + def test_reindex_limit(self): + # GH 28631 + data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]] + exp_data = [ + ["A", "A", "A"], + ["B", "B", "B"], + ["C", "C", "C"], + ["D", "D", "D"], + ["D", "D", "D"], + [np.nan, np.nan, np.nan], + ] + df = DataFrame(data) + result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1) + expected = DataFrame(exp_data) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "idx, check_index_type", + [ + [["C", "B", "A"], True], + [["F", "C", "A", "D"], True], + [["A"], True], + [["A", "B", "C"], True], + [["C", "A", "B"], True], + [["C", "B"], True], + [["C", "A"], True], + [["A", "B"], True], + [["B", "A", "C"], True], + # reindex by these causes different MultiIndex levels + [["D", "F"], False], + [["A", "C", "B"], False], + ], + ) + def test_reindex_level_verify_first_level(self, idx, check_index_type): + df = DataFrame( + { + "jim": list("B" * 4 + "A" * 2 + "C" * 3), + "joe": list("abcdeabcd")[::-1], + "jolie": [10, 20, 30] * 3, + "joline": np.random.default_rng(2).integers(0, 1000, 9), + } + ) + icol = ["jim", "joe", "jolie"] + + def f(val): + return np.nonzero((df["jim"] == val).to_numpy())[0] + + i = np.concatenate(list(map(f, idx))) + left = df.set_index(icol).reindex(idx, level="jim") + right = df.iloc[i].set_index(icol) + tm.assert_frame_equal(left, right, check_index_type=check_index_type) + + @pytest.mark.parametrize( + "idx", + [ + ("mid",), + ("mid", "btm"), + ("mid", "btm", "top"), + ("mid",), + ("mid", "top"), + ("mid", "top", "btm"), + ("btm",), + ("btm", "mid"), + ("btm", "mid", "top"), + ("btm",), + ("btm", "top"), + ("btm", "top", "mid"), + ("top",), + ("top", "mid"), + ("top", "mid", "btm"), + ("top",), + ("top", "btm"), + ("top", "btm", "mid"), + ], + ) + def test_reindex_level_verify_first_level_repeats(self, idx): + df = DataFrame( + { + "jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7, + "joe": ["3rd"] * 2 + + ["1st"] * 3 + + ["2nd"] * 3 + + ["1st"] * 2 + + ["3rd"] * 3 + + ["1st"] * 2 + + ["3rd"] * 3 + + ["2nd"] * 2, + # this needs to be jointly unique with jim and joe or + # reindexing will fail ~1.5% of the time, this works + # out to needing unique groups of same size as joe + "jolie": np.concatenate( + [ + np.random.default_rng(2).choice(1000, x, replace=False) + for x in [2, 3, 3, 2, 3, 2, 3, 2] + ] + ), + "joline": np.random.default_rng(2).standard_normal(20).round(3) * 10, + } + ) + icol = ["jim", "joe", "jolie"] + + def f(val): + return np.nonzero((df["jim"] == val).to_numpy())[0] + + i = np.concatenate(list(map(f, idx))) + left = df.set_index(icol).reindex(idx, level="jim") + right = df.iloc[i].set_index(icol) + tm.assert_frame_equal(left, right) + + @pytest.mark.parametrize( + "idx, indexer", + [ + [ + ["1st", "2nd", "3rd"], + [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10, 11, 12, 13, 14, 18, 19, 15, 16, 17], + ], + [ + ["3rd", "2nd", "1st"], + [0, 1, 2, 3, 4, 10, 11, 12, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 13, 14], + ], + [["2nd", "3rd"], [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]], + [["3rd", "1st"], [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]], + ], + ) + def test_reindex_level_verify_repeats(self, idx, indexer): + df = DataFrame( + { + "jim": ["mid"] * 5 + ["btm"] * 8 + ["top"] * 7, + "joe": ["3rd"] * 2 + + ["1st"] * 3 + + ["2nd"] * 3 + + ["1st"] * 2 + + ["3rd"] * 3 + + ["1st"] * 2 + + ["3rd"] * 3 + + ["2nd"] * 2, + # this needs to be jointly unique with jim and joe or + # reindexing will fail ~1.5% of the time, this works + # out to needing unique groups of same size as joe + "jolie": np.concatenate( + [ + np.random.default_rng(2).choice(1000, x, replace=False) + for x in [2, 3, 3, 2, 3, 2, 3, 2] + ] + ), + "joline": np.random.default_rng(2).standard_normal(20).round(3) * 10, + } + ) + icol = ["jim", "joe", "jolie"] + left = df.set_index(icol).reindex(idx, level="joe") + right = df.iloc[indexer].set_index(icol) + tm.assert_frame_equal(left, right) + + @pytest.mark.parametrize( + "idx, indexer, check_index_type", + [ + [list("abcde"), [3, 2, 1, 0, 5, 4, 8, 7, 6], True], + [list("abcd"), [3, 2, 1, 0, 5, 8, 7, 6], True], + [list("abc"), [3, 2, 1, 8, 7, 6], True], + [list("eca"), [1, 3, 4, 6, 8], True], + [list("edc"), [0, 1, 4, 5, 6], True], + [list("eadbc"), [3, 0, 2, 1, 4, 5, 8, 7, 6], True], + [list("edwq"), [0, 4, 5], True], + [list("wq"), [], False], + ], + ) + def test_reindex_level_verify(self, idx, indexer, check_index_type): + df = DataFrame( + { + "jim": list("B" * 4 + "A" * 2 + "C" * 3), + "joe": list("abcdeabcd")[::-1], + "jolie": [10, 20, 30] * 3, + "joline": np.random.default_rng(2).integers(0, 1000, 9), + } + ) + icol = ["jim", "joe", "jolie"] + left = df.set_index(icol).reindex(idx, level="joe") + right = df.iloc[indexer].set_index(icol) + tm.assert_frame_equal(left, right, check_index_type=check_index_type) + + def test_non_monotonic_reindex_methods(self): + dr = date_range("2013-08-01", periods=6, freq="B") + data = np.random.default_rng(2).standard_normal((6, 1)) + df = DataFrame(data, index=dr, columns=list("A")) + df_rev = DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list("A")) + # index is not monotonic increasing or decreasing + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="pad") + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="ffill") + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="bfill") + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method="nearest") + + def test_reindex_sparse(self): + # https://github.com/pandas-dev/pandas/issues/35286 + df = DataFrame( + {"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))} + ) + result = df.reindex([0, 2]) + expected = DataFrame( + { + "A": [0.0, np.nan], + "B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)), + }, + index=[0, 2], + ) + tm.assert_frame_equal(result, expected) + + def test_reindex(self, float_frame, using_copy_on_write): + datetime_series = tm.makeTimeSeries(nper=30) + + newFrame = float_frame.reindex(datetime_series.index) + + for col in newFrame.columns: + for idx, val in newFrame[col].items(): + if idx in float_frame.index: + if np.isnan(val): + assert np.isnan(float_frame[col][idx]) + else: + assert val == float_frame[col][idx] + else: + assert np.isnan(val) + + for col, series in newFrame.items(): + assert tm.equalContents(series.index, newFrame.index) + emptyFrame = float_frame.reindex(Index([])) + assert len(emptyFrame.index) == 0 + + # Cython code should be unit-tested directly + nonContigFrame = float_frame.reindex(datetime_series.index[::2]) + + for col in nonContigFrame.columns: + for idx, val in nonContigFrame[col].items(): + if idx in float_frame.index: + if np.isnan(val): + assert np.isnan(float_frame[col][idx]) + else: + assert val == float_frame[col][idx] + else: + assert np.isnan(val) + + for col, series in nonContigFrame.items(): + assert tm.equalContents(series.index, nonContigFrame.index) + + # corner cases + + # Same index, copies values but not index if copy=False + newFrame = float_frame.reindex(float_frame.index, copy=False) + if using_copy_on_write: + assert newFrame.index.is_(float_frame.index) + else: + assert newFrame.index is float_frame.index + + # length zero + newFrame = float_frame.reindex([]) + assert newFrame.empty + assert len(newFrame.columns) == len(float_frame.columns) + + # length zero with columns reindexed with non-empty index + newFrame = float_frame.reindex([]) + newFrame = newFrame.reindex(float_frame.index) + assert len(newFrame.index) == len(float_frame.index) + assert len(newFrame.columns) == len(float_frame.columns) + + # pass non-Index + newFrame = float_frame.reindex(list(datetime_series.index)) + expected = datetime_series.index._with_freq(None) + tm.assert_index_equal(newFrame.index, expected) + + # copy with no axes + result = float_frame.reindex() + tm.assert_frame_equal(result, float_frame) + assert result is not float_frame + + def test_reindex_nan(self): + df = DataFrame( + [[1, 2], [3, 5], [7, 11], [9, 23]], + index=[2, np.nan, 1, 5], + columns=["joe", "jim"], + ) + + i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1] + tm.assert_frame_equal(df.reindex(i), df.iloc[j]) + + df.index = df.index.astype("object") + tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False) + + # GH10388 + df = DataFrame( + { + "other": ["a", "b", np.nan, "c"], + "date": ["2015-03-22", np.nan, "2012-01-08", np.nan], + "amount": [2, 3, 4, 5], + } + ) + + df["date"] = pd.to_datetime(df.date) + df["delta"] = (pd.to_datetime("2015-06-18") - df["date"]).shift(1) + + left = df.set_index(["delta", "other", "date"]).reset_index() + right = df.reindex(columns=["delta", "other", "date", "amount"]) + tm.assert_frame_equal(left, right) + + def test_reindex_name_remains(self): + s = Series(np.random.default_rng(2).random(10)) + df = DataFrame(s, index=np.arange(len(s))) + i = Series(np.arange(10), name="iname") + + df = df.reindex(i) + assert df.index.name == "iname" + + df = df.reindex(Index(np.arange(10), name="tmpname")) + assert df.index.name == "tmpname" + + s = Series(np.random.default_rng(2).random(10)) + df = DataFrame(s.T, index=np.arange(len(s))) + i = Series(np.arange(10), name="iname") + df = df.reindex(columns=i) + assert df.columns.name == "iname" + + def test_reindex_int(self, int_frame): + smaller = int_frame.reindex(int_frame.index[::2]) + + assert smaller["A"].dtype == np.int64 + + bigger = smaller.reindex(int_frame.index) + assert bigger["A"].dtype == np.float64 + + smaller = int_frame.reindex(columns=["A", "B"]) + assert smaller["A"].dtype == np.int64 + + def test_reindex_columns(self, float_frame): + new_frame = float_frame.reindex(columns=["A", "B", "E"]) + + tm.assert_series_equal(new_frame["B"], float_frame["B"]) + assert np.isnan(new_frame["E"]).all() + assert "C" not in new_frame + + # Length zero + new_frame = float_frame.reindex(columns=[]) + assert new_frame.empty + + def test_reindex_columns_method(self): + # GH 14992, reindexing over columns ignored method + df = DataFrame( + data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]], + index=[1, 2, 4], + columns=[1, 2, 4], + dtype=float, + ) + + # default method + result = df.reindex(columns=range(6)) + expected = DataFrame( + data=[ + [np.nan, 11, 12, np.nan, 13, np.nan], + [np.nan, 21, 22, np.nan, 23, np.nan], + [np.nan, 31, 32, np.nan, 33, np.nan], + ], + index=[1, 2, 4], + columns=range(6), + dtype=float, + ) + tm.assert_frame_equal(result, expected) + + # method='ffill' + result = df.reindex(columns=range(6), method="ffill") + expected = DataFrame( + data=[ + [np.nan, 11, 12, 12, 13, 13], + [np.nan, 21, 22, 22, 23, 23], + [np.nan, 31, 32, 32, 33, 33], + ], + index=[1, 2, 4], + columns=range(6), + dtype=float, + ) + tm.assert_frame_equal(result, expected) + + # method='bfill' + result = df.reindex(columns=range(6), method="bfill") + expected = DataFrame( + data=[ + [11, 11, 12, 13, 13, np.nan], + [21, 21, 22, 23, 23, np.nan], + [31, 31, 32, 33, 33, np.nan], + ], + index=[1, 2, 4], + columns=range(6), + dtype=float, + ) + tm.assert_frame_equal(result, expected) + + def test_reindex_axes(self): + # GH 3317, reindexing by both axes loses freq of the index + df = DataFrame( + np.ones((3, 3)), + index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], + columns=["a", "b", "c"], + ) + time_freq = date_range("2012-01-01", "2012-01-03", freq="d") + some_cols = ["a", "b"] + + index_freq = df.reindex(index=time_freq).index.freq + both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq + seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq + assert index_freq == both_freq + assert index_freq == seq_freq + + def test_reindex_fill_value(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + + # axis=0 + result = df.reindex(list(range(15))) + assert np.isnan(result.values[-5:]).all() + + result = df.reindex(range(15), fill_value=0) + expected = df.reindex(range(15)).fillna(0) + tm.assert_frame_equal(result, expected) + + # axis=1 + result = df.reindex(columns=range(5), fill_value=0.0) + expected = df.copy() + expected[4] = 0.0 + tm.assert_frame_equal(result, expected) + + result = df.reindex(columns=range(5), fill_value=0) + expected = df.copy() + expected[4] = 0 + tm.assert_frame_equal(result, expected) + + result = df.reindex(columns=range(5), fill_value="foo") + expected = df.copy() + expected[4] = "foo" + tm.assert_frame_equal(result, expected) + + # other dtypes + df["foo"] = "foo" + result = df.reindex(range(15), fill_value=0) + expected = df.reindex(range(15)).fillna(0) + tm.assert_frame_equal(result, expected) + + def test_reindex_uint_dtypes_fill_value(self, any_unsigned_int_numpy_dtype): + # GH#48184 + df = DataFrame({"a": [1, 2], "b": [1, 2]}, dtype=any_unsigned_int_numpy_dtype) + result = df.reindex(columns=list("abcd"), index=[0, 1, 2, 3], fill_value=10) + expected = DataFrame( + {"a": [1, 2, 10, 10], "b": [1, 2, 10, 10], "c": 10, "d": 10}, + dtype=any_unsigned_int_numpy_dtype, + ) + tm.assert_frame_equal(result, expected) + + def test_reindex_single_column_ea_index_and_columns(self, any_numeric_ea_dtype): + # GH#48190 + df = DataFrame({"a": [1, 2]}, dtype=any_numeric_ea_dtype) + result = df.reindex(columns=list("ab"), index=[0, 1, 2], fill_value=10) + expected = DataFrame( + {"a": Series([1, 2, 10], dtype=any_numeric_ea_dtype), "b": 10} + ) + tm.assert_frame_equal(result, expected) + + def test_reindex_dups(self): + # GH4746, reindex on duplicate index error messages + arr = np.random.default_rng(2).standard_normal(10) + df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]) + + # set index is ok + result = df.copy() + result.index = list(range(len(df))) + expected = DataFrame(arr, index=list(range(len(df)))) + tm.assert_frame_equal(result, expected) + + # reindex fails + msg = "cannot reindex on an axis with duplicate labels" + with pytest.raises(ValueError, match=msg): + df.reindex(index=list(range(len(df)))) + + def test_reindex_with_duplicate_columns(self): + # reindex is invalid! + df = DataFrame( + [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"] + ) + msg = "cannot reindex on an axis with duplicate labels" + with pytest.raises(ValueError, match=msg): + df.reindex(columns=["bar"]) + with pytest.raises(ValueError, match=msg): + df.reindex(columns=["bar", "foo"]) + + def test_reindex_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + expected = DataFrame( + {"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, index=[0, 1, 3] + ) + result = df.reindex([0, 1, 3]) + tm.assert_frame_equal(result, expected) + + result = df.reindex([0, 1, 3], axis=0) + tm.assert_frame_equal(result, expected) + + result = df.reindex([0, 1, 3], axis="index") + tm.assert_frame_equal(result, expected) + + def test_reindex_positional_raises(self): + # https://github.com/pandas-dev/pandas/issues/12392 + # Enforced in 2.0 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + msg = r"reindex\(\) takes from 1 to 2 positional arguments but 3 were given" + with pytest.raises(TypeError, match=msg): + df.reindex([0, 1], ["A", "B", "C"]) + + def test_reindex_axis_style_raises(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex([0, 1], columns=["A"], axis=1) + + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex([0, 1], columns=["A"], axis="index") + + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex(index=[0, 1], axis="index") + + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex(index=[0, 1], axis="columns") + + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex(columns=[0, 1], axis="columns") + + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex(index=[0, 1], columns=[0, 1], axis="columns") + + with pytest.raises(TypeError, match="Cannot specify all"): + df.reindex(labels=[0, 1], index=[0], columns=["A"]) + + # Mixing styles + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex(index=[0, 1], axis="index") + + with pytest.raises(TypeError, match="Cannot specify both 'axis'"): + df.reindex(index=[0, 1], axis="columns") + + # Duplicates + with pytest.raises(TypeError, match="multiple values"): + df.reindex([0, 1], labels=[0, 1]) + + def test_reindex_single_named_indexer(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}) + result = df.reindex([0, 1], columns=["A"]) + expected = DataFrame({"A": [1, 2]}) + tm.assert_frame_equal(result, expected) + + def test_reindex_api_equivalence(self): + # https://github.com/pandas-dev/pandas/issues/12392 + # equivalence of the labels/axis and index/columns API's + df = DataFrame( + [[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=["a", "b", "c"], + columns=["d", "e", "f"], + ) + + res1 = df.reindex(["b", "a"]) + res2 = df.reindex(index=["b", "a"]) + res3 = df.reindex(labels=["b", "a"]) + res4 = df.reindex(labels=["b", "a"], axis=0) + res5 = df.reindex(["b", "a"], axis=0) + for res in [res2, res3, res4, res5]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(columns=["e", "d"]) + res2 = df.reindex(["e", "d"], axis=1) + res3 = df.reindex(labels=["e", "d"], axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + + res1 = df.reindex(index=["b", "a"], columns=["e", "d"]) + res2 = df.reindex(columns=["e", "d"], index=["b", "a"]) + res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1) + for res in [res2, res3]: + tm.assert_frame_equal(res1, res) + + def test_reindex_boolean(self): + frame = DataFrame( + np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2] + ) + + reindexed = frame.reindex(np.arange(10)) + assert reindexed.values.dtype == np.object_ + assert isna(reindexed[0][1]) + + reindexed = frame.reindex(columns=range(3)) + assert reindexed.values.dtype == np.object_ + assert isna(reindexed[1]).all() + + def test_reindex_objects(self, float_string_frame): + reindexed = float_string_frame.reindex(columns=["foo", "A", "B"]) + assert "foo" in reindexed + + reindexed = float_string_frame.reindex(columns=["A", "B"]) + assert "foo" not in reindexed + + def test_reindex_corner(self, int_frame): + index = Index(["a", "b", "c"]) + dm = DataFrame({}).reindex(index=[1, 2, 3]) + reindexed = dm.reindex(columns=index) + tm.assert_index_equal(reindexed.columns, index) + + # ints are weird + smaller = int_frame.reindex(columns=["A", "B", "E"]) + assert smaller["E"].dtype == np.float64 + + def test_reindex_with_nans(self): + df = DataFrame( + [[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]], + columns=["a", "b"], + index=[100.0, 101.0, np.nan, 102.0, 103.0], + ) + + result = df.reindex(index=[101.0, 102.0, 103.0]) + expected = df.iloc[[1, 3, 4]] + tm.assert_frame_equal(result, expected) + + result = df.reindex(index=[103.0]) + expected = df.iloc[[4]] + tm.assert_frame_equal(result, expected) + + result = df.reindex(index=[101.0]) + expected = df.iloc[[1]] + tm.assert_frame_equal(result, expected) + + def test_reindex_multi(self): + df = DataFrame(np.random.default_rng(2).standard_normal((3, 3))) + + result = df.reindex(index=range(4), columns=range(4)) + expected = df.reindex(list(range(4))).reindex(columns=range(4)) + + tm.assert_frame_equal(result, expected) + + df = DataFrame(np.random.default_rng(2).integers(0, 10, (3, 3))) + + result = df.reindex(index=range(4), columns=range(4)) + expected = df.reindex(list(range(4))).reindex(columns=range(4)) + + tm.assert_frame_equal(result, expected) + + df = DataFrame(np.random.default_rng(2).integers(0, 10, (3, 3))) + + result = df.reindex(index=range(2), columns=range(2)) + expected = df.reindex(range(2)).reindex(columns=range(2)) + + tm.assert_frame_equal(result, expected) + + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)) + 1j, + columns=["a", "b", "c"], + ) + + result = df.reindex(index=[0, 1], columns=["a", "b"]) + expected = df.reindex([0, 1]).reindex(columns=["a", "b"]) + + tm.assert_frame_equal(result, expected) + + def test_reindex_multi_categorical_time(self): + # https://github.com/pandas-dev/pandas/issues/21390 + midx = MultiIndex.from_product( + [ + Categorical(["a", "b", "c"]), + Categorical(date_range("2012-01-01", periods=3, freq="H")), + ] + ) + df = DataFrame({"a": range(len(midx))}, index=midx) + df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]] + + result = df2.reindex(midx) + expected = DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx) + tm.assert_frame_equal(result, expected) + + def test_reindex_with_categoricalindex(self): + df = DataFrame( + { + "A": np.arange(3, dtype="int64"), + }, + index=CategoricalIndex(list("abc"), dtype=CDT(list("cabe")), name="B"), + ) + + # reindexing + # convert to a regular index + result = df.reindex(["a", "b", "e"]) + expected = DataFrame({"A": [0, 1, np.nan], "B": Series(list("abe"))}).set_index( + "B" + ) + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(["a", "b"]) + expected = DataFrame({"A": [0, 1], "B": Series(list("ab"))}).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(["e"]) + expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(["d"]) + expected = DataFrame({"A": [np.nan], "B": Series(["d"])}).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + # since we are actually reindexing with a Categorical + # then return a Categorical + cats = list("cabe") + + result = df.reindex(Categorical(["a", "e"], categories=cats)) + expected = DataFrame( + {"A": [0, np.nan], "B": Series(list("ae")).astype(CDT(cats))} + ).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(Categorical(["a"], categories=cats)) + expected = DataFrame( + {"A": [0], "B": Series(list("a")).astype(CDT(cats))} + ).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(["a", "b", "e"]) + expected = DataFrame({"A": [0, 1, np.nan], "B": Series(list("abe"))}).set_index( + "B" + ) + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(["a", "b"]) + expected = DataFrame({"A": [0, 1], "B": Series(list("ab"))}).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(["e"]) + expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + # give back the type of categorical that we received + result = df.reindex(Categorical(["a", "e"], categories=cats, ordered=True)) + expected = DataFrame( + {"A": [0, np.nan], "B": Series(list("ae")).astype(CDT(cats, ordered=True))} + ).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + result = df.reindex(Categorical(["a", "d"], categories=["a", "d"])) + expected = DataFrame( + {"A": [0, np.nan], "B": Series(list("ad")).astype(CDT(["a", "d"]))} + ).set_index("B") + tm.assert_frame_equal(result, expected, check_index_type=True) + + df2 = DataFrame( + { + "A": np.arange(6, dtype="int64"), + }, + index=CategoricalIndex(list("aabbca"), dtype=CDT(list("cabe")), name="B"), + ) + # passed duplicate indexers are not allowed + msg = "cannot reindex on an axis with duplicate labels" + with pytest.raises(ValueError, match=msg): + df2.reindex(["a", "b"]) + + # args NotImplemented ATM + msg = r"argument {} is not implemented for CategoricalIndex\.reindex" + with pytest.raises(NotImplementedError, match=msg.format("method")): + df.reindex(["a"], method="ffill") + with pytest.raises(NotImplementedError, match=msg.format("level")): + df.reindex(["a"], level=1) + with pytest.raises(NotImplementedError, match=msg.format("limit")): + df.reindex(["a"], limit=2) + + def test_reindex_signature(self): + sig = inspect.signature(DataFrame.reindex) + parameters = set(sig.parameters) + assert parameters == { + "self", + "labels", + "index", + "columns", + "axis", + "limit", + "copy", + "level", + "method", + "fill_value", + "tolerance", + } + + def test_reindex_multiindex_ffill_added_rows(self): + # GH#23693 + # reindex added rows with nan values even when fill method was specified + mi = MultiIndex.from_tuples([("a", "b"), ("d", "e")]) + df = DataFrame([[0, 7], [3, 4]], index=mi, columns=["x", "y"]) + mi2 = MultiIndex.from_tuples([("a", "b"), ("d", "e"), ("h", "i")]) + result = df.reindex(mi2, axis=0, method="ffill") + expected = DataFrame([[0, 7], [3, 4], [3, 4]], index=mi2, columns=["x", "y"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "kwargs", + [ + {"method": "pad", "tolerance": timedelta(seconds=9)}, + {"method": "backfill", "tolerance": timedelta(seconds=9)}, + {"method": "nearest"}, + {"method": None}, + ], + ) + def test_reindex_empty_frame(self, kwargs): + # GH#27315 + idx = date_range(start="2020", freq="30s", periods=3) + df = DataFrame([], index=Index([], name="time"), columns=["a"]) + result = df.reindex(idx, **kwargs) + expected = DataFrame({"a": [np.nan] * 3}, index=idx, dtype=object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "src_idx", + [ + Index([]), + CategoricalIndex([]), + ], + ) + @pytest.mark.parametrize( + "cat_idx", + [ + # No duplicates + Index([]), + CategoricalIndex([]), + Index(["A", "B"]), + CategoricalIndex(["A", "B"]), + # Duplicates: GH#38906 + Index(["A", "A"]), + CategoricalIndex(["A", "A"]), + ], + ) + def test_reindex_empty(self, src_idx, cat_idx): + df = DataFrame(columns=src_idx, index=["K"], dtype="f8") + + result = df.reindex(columns=cat_idx) + expected = DataFrame(index=["K"], columns=cat_idx, dtype="f8") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]"]) + def test_reindex_datetimelike_to_object(self, dtype): + # GH#39755 dont cast dt64/td64 to ints + mi = MultiIndex.from_product([list("ABCDE"), range(2)]) + + dti = date_range("2016-01-01", periods=10) + fv = np.timedelta64("NaT", "ns") + if dtype == "m8[ns]": + dti = dti - dti[0] + fv = np.datetime64("NaT", "ns") + + ser = Series(dti, index=mi) + ser[::3] = pd.NaT + + df = ser.unstack() + + index = df.index.append(Index([1])) + columns = df.columns.append(Index(["foo"])) + + res = df.reindex(index=index, columns=columns, fill_value=fv) + + expected = DataFrame( + { + 0: df[0].tolist() + [fv], + 1: df[1].tolist() + [fv], + "foo": np.array(["NaT"] * 6, dtype=fv.dtype), + }, + index=index, + ) + assert (res.dtypes[[0, 1]] == object).all() + assert res.iloc[0, 0] is pd.NaT + assert res.iloc[-1, 0] is fv + assert res.iloc[-1, 1] is fv + tm.assert_frame_equal(res, expected) + + @pytest.mark.parametrize( + "index_df,index_res,index_exp", + [ + ( + CategoricalIndex([], categories=["A"]), + Index(["A"]), + Index(["A"]), + ), + ( + CategoricalIndex([], categories=["A"]), + Index(["B"]), + Index(["B"]), + ), + ( + CategoricalIndex([], categories=["A"]), + CategoricalIndex(["A"]), + CategoricalIndex(["A"]), + ), + ( + CategoricalIndex([], categories=["A"]), + CategoricalIndex(["B"]), + CategoricalIndex(["B"]), + ), + ], + ) + def test_reindex_not_category(self, index_df, index_res, index_exp): + # GH#28690 + df = DataFrame(index=index_df) + result = df.reindex(index=index_res) + expected = DataFrame(index=index_exp) + tm.assert_frame_equal(result, expected) + + def test_invalid_method(self): + df = DataFrame({"A": [1, np.nan, 2]}) + + msg = "Invalid fill method" + with pytest.raises(ValueError, match=msg): + df.reindex([1, 0, 2], method="asfreq") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex_like.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex_like.py new file mode 100644 index 00000000..ce68ec28 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reindex_like.py @@ -0,0 +1,39 @@ +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +class TestDataFrameReindexLike: + def test_reindex_like(self, float_frame): + other = float_frame.reindex(index=float_frame.index[:10], columns=["C", "B"]) + + tm.assert_frame_equal(other, float_frame.reindex_like(other)) + + @pytest.mark.parametrize( + "method,expected_values", + [ + ("nearest", [0, 1, 1, 2]), + ("pad", [np.nan, 0, 1, 1]), + ("backfill", [0, 1, 2, 2]), + ], + ) + def test_reindex_like_methods(self, method, expected_values): + df = DataFrame({"x": list(range(5))}) + + result = df.reindex_like(df, method=method, tolerance=0) + tm.assert_frame_equal(df, result) + result = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0]) + tm.assert_frame_equal(df, result) + + def test_reindex_like_subclass(self): + # https://github.com/pandas-dev/pandas/issues/31925 + class MyDataFrame(DataFrame): + pass + + expected = DataFrame() + df = MyDataFrame() + result = df.reindex_like(expected) + + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename.py new file mode 100644 index 00000000..fb70e656 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename.py @@ -0,0 +1,417 @@ +from collections import ChainMap +import inspect + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, + merge, +) +import pandas._testing as tm + + +class TestRename: + def test_rename_signature(self): + sig = inspect.signature(DataFrame.rename) + parameters = set(sig.parameters) + assert parameters == { + "self", + "mapper", + "index", + "columns", + "axis", + "inplace", + "copy", + "level", + "errors", + } + + def test_rename_mi(self, frame_or_series): + obj = frame_or_series( + [11, 21, 31], + index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]), + ) + obj.rename(str.lower) + + def test_rename(self, float_frame): + mapping = {"A": "a", "B": "b", "C": "c", "D": "d"} + + renamed = float_frame.rename(columns=mapping) + renamed2 = float_frame.rename(columns=str.lower) + + tm.assert_frame_equal(renamed, renamed2) + tm.assert_frame_equal( + renamed2.rename(columns=str.upper), float_frame, check_names=False + ) + + # index + data = {"A": {"foo": 0, "bar": 1}} + + # gets sorted alphabetical + df = DataFrame(data) + renamed = df.rename(index={"foo": "bar", "bar": "foo"}) + tm.assert_index_equal(renamed.index, Index(["foo", "bar"])) + + renamed = df.rename(index=str.upper) + tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"])) + + # have to pass something + with pytest.raises(TypeError, match="must pass an index to rename"): + float_frame.rename() + + # partial columns + renamed = float_frame.rename(columns={"C": "foo", "D": "bar"}) + tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"])) + + # other axis + renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"}) + tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"])) + + # index with name + index = Index(["foo", "bar"], name="name") + renamer = DataFrame(data, index=index) + renamed = renamer.rename(index={"foo": "bar", "bar": "foo"}) + tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name")) + assert renamed.index.name == renamer.index.name + + @pytest.mark.parametrize( + "args,kwargs", + [ + ((ChainMap({"A": "a"}, {"B": "b"}),), {"axis": "columns"}), + ((), {"columns": ChainMap({"A": "a"}, {"B": "b"})}), + ], + ) + def test_rename_chainmap(self, args, kwargs): + # see gh-23859 + colAData = range(1, 11) + colBdata = np.random.default_rng(2).standard_normal(10) + + df = DataFrame({"A": colAData, "B": colBdata}) + result = df.rename(*args, **kwargs) + + expected = DataFrame({"a": colAData, "b": colBdata}) + tm.assert_frame_equal(result, expected) + + def test_rename_multiindex(self): + tuples_index = [("foo1", "bar1"), ("foo2", "bar2")] + tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")] + index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"]) + columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"]) + df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) + + # + # without specifying level -> across all levels + + renamed = df.rename( + index={"foo1": "foo3", "bar2": "bar3"}, + columns={"fizz1": "fizz3", "buzz2": "buzz3"}, + ) + new_index = MultiIndex.from_tuples( + [("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"] + ) + new_columns = MultiIndex.from_tuples( + [("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] + ) + tm.assert_index_equal(renamed.index, new_index) + tm.assert_index_equal(renamed.columns, new_columns) + assert renamed.index.names == df.index.names + assert renamed.columns.names == df.columns.names + + # + # with specifying a level (GH13766) + + # dict + new_columns = MultiIndex.from_tuples( + [("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz") + tm.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples( + [("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz") + tm.assert_index_equal(renamed.columns, new_columns) + + # function + func = str.upper + new_columns = MultiIndex.from_tuples( + [("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns=func, level=0) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level="fizz") + tm.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples( + [("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns=func, level=1) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level="buzz") + tm.assert_index_equal(renamed.columns, new_columns) + + # index + new_index = MultiIndex.from_tuples( + [("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"] + ) + renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0) + tm.assert_index_equal(renamed.index, new_index) + + def test_rename_nocopy(self, float_frame, using_copy_on_write): + renamed = float_frame.rename(columns={"C": "foo"}, copy=False) + + assert np.shares_memory(renamed["foo"]._values, float_frame["C"]._values) + + renamed.loc[:, "foo"] = 1.0 + if using_copy_on_write: + assert not (float_frame["C"] == 1.0).all() + else: + assert (float_frame["C"] == 1.0).all() + + def test_rename_inplace(self, float_frame): + float_frame.rename(columns={"C": "foo"}) + assert "C" in float_frame + assert "foo" not in float_frame + + c_values = float_frame["C"] + float_frame = float_frame.copy() + return_value = float_frame.rename(columns={"C": "foo"}, inplace=True) + assert return_value is None + + assert "C" not in float_frame + assert "foo" in float_frame + # GH 44153 + # Used to be id(float_frame["foo"]) != c_id, but flaky in the CI + assert float_frame["foo"] is not c_values + + def test_rename_bug(self): + # GH 5344 + # rename set ref_locs, and set_index was not resetting + df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}) + df = df.rename(columns={0: "a"}) + df = df.rename(columns={1: "b"}) + df = df.set_index(["a", "b"]) + df.columns = ["2001-01-01"] + expected = DataFrame( + [[1], [2]], + index=MultiIndex.from_tuples( + [("foo", "bah"), ("bar", "bas")], names=["a", "b"] + ), + columns=["2001-01-01"], + ) + tm.assert_frame_equal(df, expected) + + def test_rename_bug2(self): + # GH 19497 + # rename was changing Index to MultiIndex if Index contained tuples + + df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"]) + df = df.rename({(1, 1): (5, 4)}, axis="index") + expected = DataFrame( + data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"] + ) + tm.assert_frame_equal(df, expected) + + def test_rename_errors_raises(self): + df = DataFrame(columns=["A", "B", "C", "D"]) + with pytest.raises(KeyError, match="'E'] not found in axis"): + df.rename(columns={"A": "a", "E": "e"}, errors="raise") + + @pytest.mark.parametrize( + "mapper, errors, expected_columns", + [ + ({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]), + ({"A": "a"}, "raise", ["a", "B", "C", "D"]), + (str.lower, "raise", ["a", "b", "c", "d"]), + ], + ) + def test_rename_errors(self, mapper, errors, expected_columns): + # GH 13473 + # rename now works with errors parameter + df = DataFrame(columns=["A", "B", "C", "D"]) + result = df.rename(columns=mapper, errors=errors) + expected = DataFrame(columns=expected_columns) + tm.assert_frame_equal(result, expected) + + def test_rename_objects(self, float_string_frame): + renamed = float_string_frame.rename(columns=str.upper) + + assert "FOO" in renamed + assert "foo" not in renamed + + def test_rename_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"]) + expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) + + result = df.rename(str.lower, axis=1) + tm.assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis="columns") + tm.assert_frame_equal(result, expected) + + result = df.rename({"A": "a", "B": "b"}, axis=1) + tm.assert_frame_equal(result, expected) + + result = df.rename({"A": "a", "B": "b"}, axis="columns") + tm.assert_frame_equal(result, expected) + + # Index + expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) + result = df.rename(str.lower, axis=0) + tm.assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.rename({"X": "x", "Y": "y"}, axis=0) + tm.assert_frame_equal(result, expected) + + result = df.rename({"X": "x", "Y": "y"}, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.rename(mapper=str.lower, axis="index") + tm.assert_frame_equal(result, expected) + + def test_rename_mapper_multi(self): + df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index( + ["A", "B"] + ) + result = df.rename(str.upper) + expected = df.rename(index=str.upper) + tm.assert_frame_equal(result, expected) + + def test_rename_positional_named(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) + result = df.rename(index=str.lower, columns=str.upper) + expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) + tm.assert_frame_equal(result, expected) + + def test_rename_axis_style_raises(self): + # see gh-12392 + df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"]) + + # Named target and axis + over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis=1) + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis="columns") + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(columns=str.lower, axis="columns") + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis=0) + + # Multiple targets and axis + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(str.lower, index=str.lower, axis="columns") + + # Too many targets + over_spec_msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(str.lower, index=str.lower, columns=str.lower) + + # Duplicates + with pytest.raises(TypeError, match="multiple values"): + df.rename(id, mapper=id) + + def test_rename_positional_raises(self): + # GH 29136 + df = DataFrame(columns=["A", "B"]) + msg = r"rename\(\) takes from 1 to 2 positional arguments" + + with pytest.raises(TypeError, match=msg): + df.rename(None, str.lower) + + def test_rename_no_mappings_raises(self): + # GH 29136 + df = DataFrame([[1]]) + msg = "must pass an index to rename" + with pytest.raises(TypeError, match=msg): + df.rename() + + with pytest.raises(TypeError, match=msg): + df.rename(None, index=None) + + with pytest.raises(TypeError, match=msg): + df.rename(None, columns=None) + + with pytest.raises(TypeError, match=msg): + df.rename(None, columns=None, index=None) + + def test_rename_mapper_and_positional_arguments_raises(self): + # GH 29136 + df = DataFrame([[1]]) + msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=msg): + df.rename({}, index={}) + + with pytest.raises(TypeError, match=msg): + df.rename({}, columns={}) + + with pytest.raises(TypeError, match=msg): + df.rename({}, columns={}, index={}) + + def test_rename_with_duplicate_columns(self): + # GH#4403 + df4 = DataFrame( + {"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]}, + index=MultiIndex.from_tuples( + [(600809, 20130331)], names=["STK_ID", "RPT_Date"] + ), + ) + + df5 = DataFrame( + { + "RPT_Date": [20120930, 20121231, 20130331], + "STK_ID": [600809] * 3, + "STK_Name": ["饡驦", "饡驦", "饡驦"], + "TClose": [38.05, 41.66, 30.01], + }, + index=MultiIndex.from_tuples( + [(600809, 20120930), (600809, 20121231), (600809, 20130331)], + names=["STK_ID", "RPT_Date"], + ), + ) + # TODO: can we construct this without merge? + k = merge(df4, df5, how="inner", left_index=True, right_index=True) + result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"}) + str(result) + result.dtypes + + expected = DataFrame( + [[0.0454, 22.02, 0.0422, 20130331, 600809, "饡驦", 30.01]], + columns=[ + "RT", + "TClose", + "TExg", + "RPT_Date", + "STK_ID", + "STK_Name", + "QT_Close", + ], + ).set_index(["STK_ID", "RPT_Date"], drop=False) + tm.assert_frame_equal(result, expected) + + def test_rename_boolean_index(self): + df = DataFrame(np.arange(15).reshape(3, 5), columns=[False, True, 2, 3, 4]) + mapper = {0: "foo", 1: "bar", 2: "bah"} + res = df.rename(index=mapper) + exp = DataFrame( + np.arange(15).reshape(3, 5), + columns=[False, True, 2, 3, 4], + index=["foo", "bar", "bah"], + ) + tm.assert_frame_equal(res, exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename_axis.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename_axis.py new file mode 100644 index 00000000..dd4a77c6 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_rename_axis.py @@ -0,0 +1,111 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, +) +import pandas._testing as tm + + +class TestDataFrameRenameAxis: + def test_rename_axis_inplace(self, float_frame): + # GH#15704 + expected = float_frame.rename_axis("foo") + result = float_frame.copy() + return_value = no_return = result.rename_axis("foo", inplace=True) + assert return_value is None + + assert no_return is None + tm.assert_frame_equal(result, expected) + + expected = float_frame.rename_axis("bar", axis=1) + result = float_frame.copy() + return_value = no_return = result.rename_axis("bar", axis=1, inplace=True) + assert return_value is None + + assert no_return is None + tm.assert_frame_equal(result, expected) + + def test_rename_axis_raises(self): + # GH#17833 + df = DataFrame({"A": [1, 2], "B": [1, 2]}) + with pytest.raises(ValueError, match="Use `.rename`"): + df.rename_axis(id, axis=0) + + with pytest.raises(ValueError, match="Use `.rename`"): + df.rename_axis({0: 10, 1: 20}, axis=0) + + with pytest.raises(ValueError, match="Use `.rename`"): + df.rename_axis(id, axis=1) + + with pytest.raises(ValueError, match="Use `.rename`"): + df["A"].rename_axis(id) + + def test_rename_axis_mapper(self): + # GH#19978 + mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"]) + df = DataFrame( + {"x": list(range(len(mi))), "y": [i * 10 for i in range(len(mi))]}, index=mi + ) + + # Test for rename of the Index object of columns + result = df.rename_axis("cols", axis=1) + tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols")) + + # Test for rename of the Index object of columns using dict + result = result.rename_axis(columns={"cols": "new"}, axis=1) + tm.assert_index_equal(result.columns, Index(["x", "y"], name="new")) + + # Test for renaming index using dict + result = df.rename_axis(index={"ll": "foo"}) + assert result.index.names == ["foo", "nn"] + + # Test for renaming index using a function + result = df.rename_axis(index=str.upper, axis=0) + assert result.index.names == ["LL", "NN"] + + # Test for renaming index providing complete list + result = df.rename_axis(index=["foo", "goo"]) + assert result.index.names == ["foo", "goo"] + + # Test for changing index and columns at same time + sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"]) + result = sdf.rename_axis(index="foo", columns="meh") + assert result.index.name == "foo" + assert result.columns.name == "meh" + + # Test different error cases + with pytest.raises(TypeError, match="Must pass"): + df.rename_axis(index="wrong") + + with pytest.raises(ValueError, match="Length of names"): + df.rename_axis(index=["wrong"]) + + with pytest.raises(TypeError, match="bogus"): + df.rename_axis(bogus=None) + + @pytest.mark.parametrize( + "kwargs, rename_index, rename_columns", + [ + ({"mapper": None, "axis": 0}, True, False), + ({"mapper": None, "axis": 1}, False, True), + ({"index": None}, True, False), + ({"columns": None}, False, True), + ({"index": None, "columns": None}, True, True), + ({}, False, False), + ], + ) + def test_rename_axis_none(self, kwargs, rename_index, rename_columns): + # GH 25034 + index = Index(list("abc"), name="foo") + columns = Index(["col1", "col2"], name="bar") + data = np.arange(6).reshape(3, 2) + df = DataFrame(data, index, columns) + + result = df.rename_axis(**kwargs) + expected_index = index.rename(None) if rename_index else index + expected_columns = columns.rename(None) if rename_columns else columns + expected = DataFrame(data, expected_index, expected_columns) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reorder_levels.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reorder_levels.py new file mode 100644 index 00000000..5d6b65da --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reorder_levels.py @@ -0,0 +1,74 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, +) +import pandas._testing as tm + + +class TestReorderLevels: + def test_reorder_levels(self, frame_or_series): + index = MultiIndex( + levels=[["bar"], ["one", "two", "three"], [0, 1]], + codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], + names=["L0", "L1", "L2"], + ) + df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index) + obj = tm.get_obj(df, frame_or_series) + + # no change, position + result = obj.reorder_levels([0, 1, 2]) + tm.assert_equal(obj, result) + + # no change, labels + result = obj.reorder_levels(["L0", "L1", "L2"]) + tm.assert_equal(obj, result) + + # rotate, position + result = obj.reorder_levels([1, 2, 0]) + e_idx = MultiIndex( + levels=[["one", "two", "three"], [0, 1], ["bar"]], + codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]], + names=["L1", "L2", "L0"], + ) + expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx) + expected = tm.get_obj(expected, frame_or_series) + tm.assert_equal(result, expected) + + result = obj.reorder_levels([0, 0, 0]) + e_idx = MultiIndex( + levels=[["bar"], ["bar"], ["bar"]], + codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], + names=["L0", "L0", "L0"], + ) + expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx) + expected = tm.get_obj(expected, frame_or_series) + tm.assert_equal(result, expected) + + result = obj.reorder_levels(["L0", "L0", "L0"]) + tm.assert_equal(result, expected) + + def test_reorder_levels_swaplevel_equivalence( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.reorder_levels(["month", "day", "year"]) + expected = ymd.swaplevel(0, 1).swaplevel(1, 2) + tm.assert_frame_equal(result, expected) + + result = ymd["A"].reorder_levels(["month", "day", "year"]) + expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2) + tm.assert_series_equal(result, expected) + + result = ymd.T.reorder_levels(["month", "day", "year"], axis=1) + expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1) + tm.assert_frame_equal(result, expected) + + with pytest.raises(TypeError, match="hierarchical axis"): + ymd.reorder_levels([1, 2], axis=1) + + with pytest.raises(IndexError, match="Too many levels"): + ymd.index.reorder_levels([1, 2, 3]) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_replace.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_replace.py new file mode 100644 index 00000000..61e44b4e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_replace.py @@ -0,0 +1,1601 @@ +from __future__ import annotations + +from datetime import datetime +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +@pytest.fixture +def mix_ab() -> dict[str, list[int | str]]: + return {"a": list(range(4)), "b": list("ab..")} + + +@pytest.fixture +def mix_abc() -> dict[str, list[float | str]]: + return {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]} + + +class TestDataFrameReplace: + def test_replace_inplace(self, datetime_frame, float_string_frame): + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan + + tsframe = datetime_frame.copy() + return_value = tsframe.replace(np.nan, 0, inplace=True) + assert return_value is None + tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) + + # mixed type + mf = float_string_frame + mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan + mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan + + result = float_string_frame.replace(np.nan, 0) + expected = float_string_frame.fillna(value=0) + tm.assert_frame_equal(result, expected) + + tsframe = datetime_frame.copy() + return_value = tsframe.replace([np.nan], [0], inplace=True) + assert return_value is None + tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) + + @pytest.mark.parametrize( + "to_replace,values,expected", + [ + # lists of regexes and values + # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] + ( + [r"\s*\.\s*", r"e|f|g"], + [np.nan, "crap"], + { + "a": ["a", "b", np.nan, np.nan], + "b": ["crap"] * 3 + ["h"], + "c": ["h", "crap", "l", "o"], + }, + ), + # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] + ( + [r"\s*(\.)\s*", r"(e|f|g)"], + [r"\1\1", r"\1_crap"], + { + "a": ["a", "b", "..", ".."], + "b": ["e_crap", "f_crap", "g_crap", "h"], + "c": ["h", "e_crap", "l", "o"], + }, + ), + # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN + # or vN)] + ( + [r"\s*(\.)\s*", r"e"], + [r"\1\1", r"crap"], + { + "a": ["a", "b", "..", ".."], + "b": ["crap", "f", "g", "h"], + "c": ["h", "crap", "l", "o"], + }, + ), + ], + ) + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("use_value_regex_args", [True, False]) + def test_regex_replace_list_obj( + self, to_replace, values, expected, inplace, use_value_regex_args + ): + df = DataFrame({"a": list("ab.."), "b": list("efgh"), "c": list("helo")}) + + if use_value_regex_args: + result = df.replace(value=values, regex=to_replace, inplace=inplace) + else: + result = df.replace(to_replace, values, regex=True, inplace=inplace) + + if inplace: + assert result is None + result = df + + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + def test_regex_replace_list_mixed(self, mix_ab): + # mixed frame to make sure this doesn't break things + dfmix = DataFrame(mix_ab) + + # lists of regexes and values + # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] + to_replace_res = [r"\s*\.\s*", r"a"] + values = [np.nan, "crap"] + mix2 = {"a": list(range(4)), "b": list("ab.."), "c": list("halo")} + dfmix2 = DataFrame(mix2) + res = dfmix2.replace(to_replace_res, values, regex=True) + expec = DataFrame( + { + "a": mix2["a"], + "b": ["crap", "b", np.nan, np.nan], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] + to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] + values = [r"\1\1", r"\1_crap"] + res = dfmix.replace(to_replace_res, values, regex=True) + expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN + # or vN)] + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.replace(to_replace_res, values, regex=True) + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.replace(regex=to_replace_res, value=values) + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + def test_regex_replace_list_mixed_inplace(self, mix_ab): + dfmix = DataFrame(mix_ab) + # the same inplace + # lists of regexes and values + # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] + to_replace_res = [r"\s*\.\s*", r"a"] + values = [np.nan, "crap"] + res = dfmix.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]}) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] + to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] + values = [r"\1\1", r"\1_crap"] + res = dfmix.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN + # or vN)] + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.copy() + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] + values = [r"\1\1", r"crap", r"\1_crap"] + res = dfmix.copy() + return_value = res.replace(regex=to_replace_res, value=values, inplace=True) + assert return_value is None + expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) + tm.assert_frame_equal(res, expec) + + def test_regex_replace_dict_mixed(self, mix_abc): + dfmix = DataFrame(mix_abc) + + # dicts + # single dict {re1: v1}, search the whole frame + # need test for this... + + # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole + # frame + res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace( + {"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True + ) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the + # whole frame + res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace( + {"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True + ) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}) + res2 = dfmix.copy() + return_value = res2.replace( + regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True + ) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + # scalar -> dict + # to_replace regex, {value: value} + expec = DataFrame( + {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} + ) + res = dfmix.replace("a", {"b": np.nan}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace("a", {"b": np.nan}, regex=True, inplace=True) + assert return_value is None + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + res = dfmix.replace("a", {"b": np.nan}, regex=True) + res2 = dfmix.copy() + return_value = res2.replace(regex="a", value={"b": np.nan}, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + + def test_regex_replace_dict_nested(self, mix_abc): + # nested dicts will not work until this is implemented for Series + dfmix = DataFrame(mix_abc) + res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True) + res2 = dfmix.copy() + res4 = dfmix.copy() + return_value = res2.replace( + {"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True + ) + assert return_value is None + res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}}) + return_value = res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + tm.assert_frame_equal(res4, expec) + + def test_regex_replace_dict_nested_non_first_character(self, any_string_dtype): + # GH 25259 + dtype = any_string_dtype + df = DataFrame({"first": ["abc", "bca", "cab"]}, dtype=dtype) + expected = DataFrame({"first": [".bc", "bc.", "c.b"]}, dtype=dtype) + result = df.replace({"a": "."}, regex=True) + tm.assert_frame_equal(result, expected) + + def test_regex_replace_dict_nested_gh4115(self): + df = DataFrame({"Type": ["Q", "T", "Q", "Q", "T"], "tmp": 2}) + expected = DataFrame({"Type": [0, 1, 0, 0, 1], "tmp": 2}) + result = df.replace({"Type": {"Q": 0, "T": 1}}) + tm.assert_frame_equal(result, expected) + + def test_regex_replace_list_to_scalar(self, mix_abc): + df = DataFrame(mix_abc) + expec = DataFrame( + { + "a": mix_abc["a"], + "b": np.array([np.nan] * 4), + "c": [np.nan, np.nan, np.nan, "d"], + } + ) + res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True) + res2 = df.copy() + res3 = df.copy() + return_value = res2.replace( + [r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True + ) + assert return_value is None + return_value = res3.replace( + regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True + ) + assert return_value is None + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + def test_regex_replace_str_to_numeric(self, mix_abc): + # what happens when you try to replace a numeric value with a regex? + df = DataFrame(mix_abc) + res = df.replace(r"\s*\.\s*", 0, regex=True) + res2 = df.copy() + return_value = res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True) + assert return_value is None + res3 = df.copy() + return_value = res3.replace(regex=r"\s*\.\s*", value=0, inplace=True) + assert return_value is None + expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]}) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + def test_regex_replace_regex_list_to_numeric(self, mix_abc): + df = DataFrame(mix_abc) + res = df.replace([r"\s*\.\s*", "b"], 0, regex=True) + res2 = df.copy() + return_value = res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True) + assert return_value is None + res3 = df.copy() + return_value = res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + def test_regex_replace_series_of_regexes(self, mix_abc): + df = DataFrame(mix_abc) + s1 = Series({"b": r"\s*\.\s*"}) + s2 = Series({"b": np.nan}) + res = df.replace(s1, s2, regex=True) + res2 = df.copy() + return_value = res2.replace(s1, s2, inplace=True, regex=True) + assert return_value is None + res3 = df.copy() + return_value = res3.replace(regex=s1, value=s2, inplace=True) + assert return_value is None + expec = DataFrame( + {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} + ) + tm.assert_frame_equal(res, expec) + tm.assert_frame_equal(res2, expec) + tm.assert_frame_equal(res3, expec) + + def test_regex_replace_numeric_to_object_conversion(self, mix_abc): + df = DataFrame(mix_abc) + expec = DataFrame({"a": ["a", 1, 2, 3], "b": mix_abc["b"], "c": mix_abc["c"]}) + res = df.replace(0, "a") + tm.assert_frame_equal(res, expec) + assert res.a.dtype == np.object_ + + @pytest.mark.parametrize( + "to_replace", [{"": np.nan, ",": ""}, {",": "", "": np.nan}] + ) + def test_joint_simple_replace_and_regex_replace(self, to_replace): + # GH-39338 + df = DataFrame( + { + "col1": ["1,000", "a", "3"], + "col2": ["a", "", "b"], + "col3": ["a", "b", "c"], + } + ) + result = df.replace(regex=to_replace) + expected = DataFrame( + { + "col1": ["1000", "a", "3"], + "col2": ["a", np.nan, "b"], + "col3": ["a", "b", "c"], + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("metachar", ["[]", "()", r"\d", r"\w", r"\s"]) + def test_replace_regex_metachar(self, metachar): + df = DataFrame({"a": [metachar, "else"]}) + result = df.replace({"a": {metachar: "paren"}}) + expected = DataFrame({"a": ["paren", "else"]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "data,to_replace,expected", + [ + (["xax", "xbx"], {"a": "c", "b": "d"}, ["xcx", "xdx"]), + (["d", "", ""], {r"^\s*$": pd.NA}, ["d", pd.NA, pd.NA]), + ], + ) + def test_regex_replace_string_types( + self, data, to_replace, expected, frame_or_series, any_string_dtype + ): + # GH-41333, GH-35977 + dtype = any_string_dtype + obj = frame_or_series(data, dtype=dtype) + result = obj.replace(to_replace, regex=True) + expected = frame_or_series(expected, dtype=dtype) + + tm.assert_equal(result, expected) + + def test_replace(self, datetime_frame): + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan + + zero_filled = datetime_frame.replace(np.nan, -1e8) + tm.assert_frame_equal(zero_filled, datetime_frame.fillna(-1e8)) + tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), datetime_frame) + + datetime_frame.loc[datetime_frame.index[:5], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[-5:], "A"] = np.nan + datetime_frame.loc[datetime_frame.index[:5], "B"] = -1e8 + + # empty + df = DataFrame(index=["a", "b"]) + tm.assert_frame_equal(df, df.replace(5, 7)) + + # GH 11698 + # test for mixed data types. + df = DataFrame( + [("-", pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] + ) + df1 = df.replace("-", np.nan) + expected_df = DataFrame( + [(np.nan, pd.to_datetime("20150101")), ("a", pd.to_datetime("20150102"))] + ) + tm.assert_frame_equal(df1, expected_df) + + def test_replace_list(self): + obj = {"a": list("ab.."), "b": list("efgh"), "c": list("helo")} + dfobj = DataFrame(obj) + + # lists of regexes and values + # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN] + to_replace_res = [r".", r"e"] + values = [np.nan, "crap"] + res = dfobj.replace(to_replace_res, values) + expec = DataFrame( + { + "a": ["a", "b", np.nan, np.nan], + "b": ["crap", "f", "g", "h"], + "c": ["h", "crap", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + # list of [v1, v2, ..., vN] -> [v1, v2, .., vN] + to_replace_res = [r".", r"f"] + values = [r"..", r"crap"] + res = dfobj.replace(to_replace_res, values) + expec = DataFrame( + { + "a": ["a", "b", "..", ".."], + "b": ["e", "crap", "g", "h"], + "c": ["h", "e", "l", "o"], + } + ) + tm.assert_frame_equal(res, expec) + + def test_replace_with_empty_list(self, frame_or_series): + # GH 21977 + ser = Series([["a", "b"], [], np.nan, [1]]) + obj = DataFrame({"col": ser}) + obj = tm.get_obj(obj, frame_or_series) + expected = obj + result = obj.replace([], np.nan) + tm.assert_equal(result, expected) + + # GH 19266 + msg = ( + "NumPy boolean array indexing assignment cannot assign {size} " + "input values to the 1 output values where the mask is true" + ) + with pytest.raises(ValueError, match=msg.format(size=0)): + obj.replace({np.nan: []}) + with pytest.raises(ValueError, match=msg.format(size=2)): + obj.replace({np.nan: ["dummy", "alt"]}) + + def test_replace_series_dict(self): + # from GH 3064 + df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}}) + result = df.replace(0, {"zero": 0.5, "one": 1.0}) + expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 2.0, "b": 1.0}}) + tm.assert_frame_equal(result, expected) + + result = df.replace(0, df.mean()) + tm.assert_frame_equal(result, expected) + + # series to series/dict + df = DataFrame({"zero": {"a": 0.0, "b": 1}, "one": {"a": 2.0, "b": 0}}) + s = Series({"zero": 0.0, "one": 2.0}) + result = df.replace(s, {"zero": 0.5, "one": 1.0}) + expected = DataFrame({"zero": {"a": 0.5, "b": 1}, "one": {"a": 1.0, "b": 0.0}}) + tm.assert_frame_equal(result, expected) + + result = df.replace(s, df.mean()) + tm.assert_frame_equal(result, expected) + + def test_replace_convert(self): + # gh 3907 + df = DataFrame([["foo", "bar", "bah"], ["bar", "foo", "bah"]]) + m = {"foo": 1, "bar": 2, "bah": 3} + rep = df.replace(m) + expec = Series([np.int64] * 3) + res = rep.dtypes + tm.assert_series_equal(expec, res) + + def test_replace_mixed(self, float_string_frame): + mf = float_string_frame + mf.iloc[5:20, mf.columns.get_loc("foo")] = np.nan + mf.iloc[-10:, mf.columns.get_loc("A")] = np.nan + + result = float_string_frame.replace(np.nan, -18) + expected = float_string_frame.fillna(value=-18) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result.replace(-18, np.nan), float_string_frame) + + result = float_string_frame.replace(np.nan, -1e8) + expected = float_string_frame.fillna(value=-1e8) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result.replace(-1e8, np.nan), float_string_frame) + + def test_replace_mixed_int_block_upcasting(self): + # int block upcasting + df = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0, 1], dtype="int64"), + } + ) + expected = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0.5, 1], dtype="float64"), + } + ) + result = df.replace(0, 0.5) + tm.assert_frame_equal(result, expected) + + return_value = df.replace(0, 0.5, inplace=True) + assert return_value is None + tm.assert_frame_equal(df, expected) + + def test_replace_mixed_int_block_splitting(self): + # int block splitting + df = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0, 1], dtype="int64"), + "C": Series([1, 2], dtype="int64"), + } + ) + expected = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0.5, 1], dtype="float64"), + "C": Series([1, 2], dtype="int64"), + } + ) + result = df.replace(0, 0.5) + tm.assert_frame_equal(result, expected) + + def test_replace_mixed2(self): + # to object block upcasting + df = DataFrame( + { + "A": Series([1.0, 2.0], dtype="float64"), + "B": Series([0, 1], dtype="int64"), + } + ) + expected = DataFrame( + { + "A": Series([1, "foo"], dtype="object"), + "B": Series([0, 1], dtype="int64"), + } + ) + result = df.replace(2, "foo") + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + { + "A": Series(["foo", "bar"], dtype="object"), + "B": Series([0, "foo"], dtype="object"), + } + ) + result = df.replace([1, 2], ["foo", "bar"]) + tm.assert_frame_equal(result, expected) + + def test_replace_mixed3(self): + # test case from + df = DataFrame( + {"A": Series([3, 0], dtype="int64"), "B": Series([0, 3], dtype="int64")} + ) + result = df.replace(3, df.mean().to_dict()) + expected = df.copy().astype("float64") + m = df.mean() + expected.iloc[0, 0] = m.iloc[0] + expected.iloc[1, 1] = m.iloc[1] + tm.assert_frame_equal(result, expected) + + def test_replace_nullable_int_with_string_doesnt_cast(self): + # GH#25438 don't cast df['a'] to float64 + df = DataFrame({"a": [1, 2, 3, np.nan], "b": ["some", "strings", "here", "he"]}) + df["a"] = df["a"].astype("Int64") + + res = df.replace("", np.nan) + tm.assert_series_equal(res["a"], df["a"]) + + @pytest.mark.parametrize("dtype", ["boolean", "Int64", "Float64"]) + def test_replace_with_nullable_column(self, dtype): + # GH-44499 + nullable_ser = Series([1, 0, 1], dtype=dtype) + df = DataFrame({"A": ["A", "B", "x"], "B": nullable_ser}) + result = df.replace("x", "X") + expected = DataFrame({"A": ["A", "B", "X"], "B": nullable_ser}) + tm.assert_frame_equal(result, expected) + + def test_replace_simple_nested_dict(self): + df = DataFrame({"col": range(1, 5)}) + expected = DataFrame({"col": ["a", 2, 3, "b"]}) + + result = df.replace({"col": {1: "a", 4: "b"}}) + tm.assert_frame_equal(expected, result) + + # in this case, should be the same as the not nested version + result = df.replace({1: "a", 4: "b"}) + tm.assert_frame_equal(expected, result) + + def test_replace_simple_nested_dict_with_nonexistent_value(self): + df = DataFrame({"col": range(1, 5)}) + expected = DataFrame({"col": ["a", 2, 3, "b"]}) + + result = df.replace({-1: "-", 1: "a", 4: "b"}) + tm.assert_frame_equal(expected, result) + + result = df.replace({"col": {-1: "-", 1: "a", 4: "b"}}) + tm.assert_frame_equal(expected, result) + + def test_replace_NA_with_None(self): + # gh-45601 + df = DataFrame({"value": [42, None]}).astype({"value": "Int64"}) + result = df.replace({pd.NA: None}) + expected = DataFrame({"value": [42, None]}, dtype=object) + tm.assert_frame_equal(result, expected) + + def test_replace_NAT_with_None(self): + # gh-45836 + df = DataFrame([pd.NaT, pd.NaT]) + result = df.replace({pd.NaT: None, np.nan: None}) + expected = DataFrame([None, None]) + tm.assert_frame_equal(result, expected) + + def test_replace_with_None_keeps_categorical(self): + # gh-46634 + cat_series = Series(["b", "b", "b", "d"], dtype="category") + df = DataFrame( + { + "id": Series([5, 4, 3, 2], dtype="float64"), + "col": cat_series, + } + ) + result = df.replace({3: None}) + + expected = DataFrame( + { + "id": Series([5.0, 4.0, None, 2.0], dtype="object"), + "col": cat_series, + } + ) + tm.assert_frame_equal(result, expected) + + def test_replace_value_is_none(self, datetime_frame): + orig_value = datetime_frame.iloc[0, 0] + orig2 = datetime_frame.iloc[1, 0] + + datetime_frame.iloc[0, 0] = np.nan + datetime_frame.iloc[1, 0] = 1 + + result = datetime_frame.replace(to_replace={np.nan: 0}) + expected = datetime_frame.T.replace(to_replace={np.nan: 0}).T + tm.assert_frame_equal(result, expected) + + result = datetime_frame.replace(to_replace={np.nan: 0, 1: -1e8}) + tsframe = datetime_frame.copy() + tsframe.iloc[0, 0] = 0 + tsframe.iloc[1, 0] = -1e8 + expected = tsframe + tm.assert_frame_equal(expected, result) + datetime_frame.iloc[0, 0] = orig_value + datetime_frame.iloc[1, 0] = orig2 + + def test_replace_for_new_dtypes(self, datetime_frame): + # dtypes + tsframe = datetime_frame.copy().astype(np.float32) + tsframe.loc[tsframe.index[:5], "A"] = np.nan + tsframe.loc[tsframe.index[-5:], "A"] = np.nan + + zero_filled = tsframe.replace(np.nan, -1e8) + tm.assert_frame_equal(zero_filled, tsframe.fillna(-1e8)) + tm.assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe) + + tsframe.loc[tsframe.index[:5], "A"] = np.nan + tsframe.loc[tsframe.index[-5:], "A"] = np.nan + tsframe.loc[tsframe.index[:5], "B"] = -1e8 + + b = tsframe["B"] + b[b == -1e8] = np.nan + tsframe["B"] = b + msg = "DataFrame.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + # TODO: what is this even testing? + result = tsframe.fillna(method="bfill") + tm.assert_frame_equal(result, tsframe.fillna(method="bfill")) + + @pytest.mark.parametrize( + "frame, to_replace, value, expected", + [ + (DataFrame({"ints": [1, 2, 3]}), 1, 0, DataFrame({"ints": [0, 2, 3]})), + ( + DataFrame({"ints": [1, 2, 3]}, dtype=np.int32), + 1, + 0, + DataFrame({"ints": [0, 2, 3]}, dtype=np.int32), + ), + ( + DataFrame({"ints": [1, 2, 3]}, dtype=np.int16), + 1, + 0, + DataFrame({"ints": [0, 2, 3]}, dtype=np.int16), + ), + ( + DataFrame({"bools": [True, False, True]}), + False, + True, + DataFrame({"bools": [True, True, True]}), + ), + ( + DataFrame({"complex": [1j, 2j, 3j]}), + 1j, + 0, + DataFrame({"complex": [0j, 2j, 3j]}), + ), + ( + DataFrame( + { + "datetime64": Index( + [ + datetime(2018, 5, 28), + datetime(2018, 7, 28), + datetime(2018, 5, 28), + ] + ) + } + ), + datetime(2018, 5, 28), + datetime(2018, 7, 28), + DataFrame({"datetime64": Index([datetime(2018, 7, 28)] * 3)}), + ), + # GH 20380 + ( + DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["foo"]}), + "foo", + "bar", + DataFrame({"dt": [datetime(3017, 12, 20)], "str": ["bar"]}), + ), + # GH 36782 + ( + DataFrame({"dt": [datetime(2920, 10, 1)]}), + datetime(2920, 10, 1), + datetime(2020, 10, 1), + DataFrame({"dt": [datetime(2020, 10, 1)]}), + ), + ( + DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": [0, np.nan, 2], + } + ), + Timestamp("20130102", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } + ), + ), + # GH 35376 + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1.0, + 5, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1, + 5, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1.0, + 5.0, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ( + DataFrame([[1, 1.0], [2, 2.0]]), + 1, + 5.0, + DataFrame([[5, 5.0], [2, 2.0]]), + ), + ], + ) + def test_replace_dtypes(self, frame, to_replace, value, expected): + result = frame.replace(to_replace, value) + tm.assert_frame_equal(result, expected) + + def test_replace_input_formats_listlike(self): + # both dicts + to_rep = {"A": np.nan, "B": 0, "C": ""} + values = {"A": 0, "B": -1, "C": "missing"} + df = DataFrame( + {"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]} + ) + filled = df.replace(to_rep, values) + expected = {k: v.replace(to_rep[k], values[k]) for k, v in df.items()} + tm.assert_frame_equal(filled, DataFrame(expected)) + + result = df.replace([0, 2, 5], [5, 2, 0]) + expected = DataFrame( + {"A": [np.nan, 5, np.inf], "B": [5, 2, 0], "C": ["", "asdf", "fd"]} + ) + tm.assert_frame_equal(result, expected) + + # scalar to dict + values = {"A": 0, "B": -1, "C": "missing"} + df = DataFrame( + {"A": [np.nan, 0, np.nan], "B": [0, 2, 5], "C": ["", "asdf", "fd"]} + ) + filled = df.replace(np.nan, values) + expected = {k: v.replace(np.nan, values[k]) for k, v in df.items()} + tm.assert_frame_equal(filled, DataFrame(expected)) + + # list to list + to_rep = [np.nan, 0, ""] + values = [-2, -1, "missing"] + result = df.replace(to_rep, values) + expected = df.copy() + for rep, value in zip(to_rep, values): + return_value = expected.replace(rep, value, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + msg = r"Replacement lists must match in length\. Expecting 3 got 2" + with pytest.raises(ValueError, match=msg): + df.replace(to_rep, values[1:]) + + def test_replace_input_formats_scalar(self): + df = DataFrame( + {"A": [np.nan, 0, np.inf], "B": [0, 2, 5], "C": ["", "asdf", "fd"]} + ) + + # dict to scalar + to_rep = {"A": np.nan, "B": 0, "C": ""} + filled = df.replace(to_rep, 0) + expected = {k: v.replace(to_rep[k], 0) for k, v in df.items()} + tm.assert_frame_equal(filled, DataFrame(expected)) + + msg = "value argument must be scalar, dict, or Series" + with pytest.raises(TypeError, match=msg): + df.replace(to_rep, [np.nan, 0, ""]) + + # list to scalar + to_rep = [np.nan, 0, ""] + result = df.replace(to_rep, -1) + expected = df.copy() + for rep in to_rep: + return_value = expected.replace(rep, -1, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_replace_limit(self): + # TODO + pass + + def test_replace_dict_no_regex(self): + answer = Series( + { + 0: "Strongly Agree", + 1: "Agree", + 2: "Neutral", + 3: "Disagree", + 4: "Strongly Disagree", + } + ) + weights = { + "Agree": 4, + "Disagree": 2, + "Neutral": 3, + "Strongly Agree": 5, + "Strongly Disagree": 1, + } + expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) + result = answer.replace(weights) + tm.assert_series_equal(result, expected) + + def test_replace_series_no_regex(self): + answer = Series( + { + 0: "Strongly Agree", + 1: "Agree", + 2: "Neutral", + 3: "Disagree", + 4: "Strongly Disagree", + } + ) + weights = Series( + { + "Agree": 4, + "Disagree": 2, + "Neutral": 3, + "Strongly Agree": 5, + "Strongly Disagree": 1, + } + ) + expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1}) + result = answer.replace(weights) + tm.assert_series_equal(result, expected) + + def test_replace_dict_tuple_list_ordering_remains_the_same(self): + df = DataFrame({"A": [np.nan, 1]}) + res1 = df.replace(to_replace={np.nan: 0, 1: -1e8}) + res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0]) + res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0]) + + expected = DataFrame({"A": [0, -1e8]}) + tm.assert_frame_equal(res1, res2) + tm.assert_frame_equal(res2, res3) + tm.assert_frame_equal(res3, expected) + + def test_replace_doesnt_replace_without_regex(self): + df = DataFrame( + { + "fol": [1, 2, 2, 3], + "T_opp": ["0", "vr", "0", "0"], + "T_Dir": ["0", "0", "0", "bt"], + "T_Enh": ["vo", "0", "0", "0"], + } + ) + res = df.replace({r"\D": 1}) + tm.assert_frame_equal(df, res) + + def test_replace_bool_with_string(self): + df = DataFrame({"a": [True, False], "b": list("ab")}) + result = df.replace(True, "a") + expected = DataFrame({"a": ["a", False], "b": df.b}) + tm.assert_frame_equal(result, expected) + + def test_replace_pure_bool_with_string_no_op(self): + df = DataFrame(np.random.default_rng(2).random((2, 2)) > 0.5) + result = df.replace("asdf", "fdsa") + tm.assert_frame_equal(df, result) + + def test_replace_bool_with_bool(self): + df = DataFrame(np.random.default_rng(2).random((2, 2)) > 0.5) + result = df.replace(False, True) + expected = DataFrame(np.ones((2, 2), dtype=bool)) + tm.assert_frame_equal(result, expected) + + def test_replace_with_dict_with_bool_keys(self): + df = DataFrame({0: [True, False], 1: [False, True]}) + result = df.replace({"asdf": "asdb", True: "yes"}) + expected = DataFrame({0: ["yes", False], 1: [False, "yes"]}) + tm.assert_frame_equal(result, expected) + + def test_replace_dict_strings_vs_ints(self): + # GH#34789 + df = DataFrame({"Y0": [1, 2], "Y1": [3, 4]}) + result = df.replace({"replace_string": "test"}) + + tm.assert_frame_equal(result, df) + + result = df["Y0"].replace({"replace_string": "test"}) + tm.assert_series_equal(result, df["Y0"]) + + def test_replace_truthy(self): + df = DataFrame({"a": [True, True]}) + r = df.replace([np.inf, -np.inf], np.nan) + e = df + tm.assert_frame_equal(r, e) + + def test_nested_dict_overlapping_keys_replace_int(self): + # GH 27660 keep behaviour consistent for simple dictionary and + # nested dictionary replacement + df = DataFrame({"a": list(range(1, 5))}) + + result = df.replace({"a": dict(zip(range(1, 5), range(2, 6)))}) + expected = df.replace(dict(zip(range(1, 5), range(2, 6)))) + tm.assert_frame_equal(result, expected) + + def test_nested_dict_overlapping_keys_replace_str(self): + # GH 27660 + a = np.arange(1, 5) + astr = a.astype(str) + bstr = np.arange(2, 6).astype(str) + df = DataFrame({"a": astr}) + result = df.replace(dict(zip(astr, bstr))) + expected = df.replace({"a": dict(zip(astr, bstr))}) + tm.assert_frame_equal(result, expected) + + def test_replace_swapping_bug(self): + df = DataFrame({"a": [True, False, True]}) + res = df.replace({"a": {True: "Y", False: "N"}}) + expect = DataFrame({"a": ["Y", "N", "Y"]}) + tm.assert_frame_equal(res, expect) + + df = DataFrame({"a": [0, 1, 0]}) + res = df.replace({"a": {0: "Y", 1: "N"}}) + expect = DataFrame({"a": ["Y", "N", "Y"]}) + tm.assert_frame_equal(res, expect) + + def test_replace_period(self): + d = { + "fname": { + "out_augmented_AUG_2011.json": pd.Period(year=2011, month=8, freq="M"), + "out_augmented_JAN_2011.json": pd.Period(year=2011, month=1, freq="M"), + "out_augmented_MAY_2012.json": pd.Period(year=2012, month=5, freq="M"), + "out_augmented_SUBSIDY_WEEK.json": pd.Period( + year=2011, month=4, freq="M" + ), + "out_augmented_AUG_2012.json": pd.Period(year=2012, month=8, freq="M"), + "out_augmented_MAY_2011.json": pd.Period(year=2011, month=5, freq="M"), + "out_augmented_SEP_2013.json": pd.Period(year=2013, month=9, freq="M"), + } + } + + df = DataFrame( + [ + "out_augmented_AUG_2012.json", + "out_augmented_SEP_2013.json", + "out_augmented_SUBSIDY_WEEK.json", + "out_augmented_MAY_2012.json", + "out_augmented_MAY_2011.json", + "out_augmented_AUG_2011.json", + "out_augmented_JAN_2011.json", + ], + columns=["fname"], + ) + assert set(df.fname.values) == set(d["fname"].keys()) + + expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]}) + assert expected.dtypes.iloc[0] == "Period[M]" + result = df.replace(d) + tm.assert_frame_equal(result, expected) + + def test_replace_datetime(self): + d = { + "fname": { + "out_augmented_AUG_2011.json": Timestamp("2011-08"), + "out_augmented_JAN_2011.json": Timestamp("2011-01"), + "out_augmented_MAY_2012.json": Timestamp("2012-05"), + "out_augmented_SUBSIDY_WEEK.json": Timestamp("2011-04"), + "out_augmented_AUG_2012.json": Timestamp("2012-08"), + "out_augmented_MAY_2011.json": Timestamp("2011-05"), + "out_augmented_SEP_2013.json": Timestamp("2013-09"), + } + } + + df = DataFrame( + [ + "out_augmented_AUG_2012.json", + "out_augmented_SEP_2013.json", + "out_augmented_SUBSIDY_WEEK.json", + "out_augmented_MAY_2012.json", + "out_augmented_MAY_2011.json", + "out_augmented_AUG_2011.json", + "out_augmented_JAN_2011.json", + ], + columns=["fname"], + ) + assert set(df.fname.values) == set(d["fname"].keys()) + expected = DataFrame({"fname": [d["fname"][k] for k in df.fname.values]}) + result = df.replace(d) + tm.assert_frame_equal(result, expected) + + def test_replace_datetimetz(self): + # GH 11326 + # behaving poorly when presented with a datetime64[ns, tz] + df = DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": [0, np.nan, 2], + } + ) + result = df.replace(np.nan, 1) + expected = DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": Series([0, 1, 2], dtype="float64"), + } + ) + tm.assert_frame_equal(result, expected) + + result = df.fillna(1) + tm.assert_frame_equal(result, expected) + + result = df.replace(0, np.nan) + expected = DataFrame( + { + "A": date_range("20130101", periods=3, tz="US/Eastern"), + "B": [np.nan, np.nan, 2], + } + ) + tm.assert_frame_equal(result, expected) + + result = df.replace( + Timestamp("20130102", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + ) + expected = DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104", tz="US/Eastern"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } + ) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.iloc[1, 0] = np.nan + result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Eastern")) + tm.assert_frame_equal(result, expected) + + # pre-2.0 this would coerce to object with mismatched tzs + result = df.copy() + result.iloc[1, 0] = np.nan + result = result.replace({"A": pd.NaT}, Timestamp("20130104", tz="US/Pacific")) + expected = DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104", tz="US/Pacific").tz_convert("US/Eastern"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } + ) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.iloc[1, 0] = np.nan + result = result.replace({"A": np.nan}, Timestamp("20130104")) + expected = DataFrame( + { + "A": [ + Timestamp("20130101", tz="US/Eastern"), + Timestamp("20130104"), + Timestamp("20130103", tz="US/Eastern"), + ], + "B": [0, np.nan, 2], + } + ) + tm.assert_frame_equal(result, expected) + + def test_replace_with_empty_dictlike(self, mix_abc): + # GH 15289 + df = DataFrame(mix_abc) + tm.assert_frame_equal(df, df.replace({})) + tm.assert_frame_equal(df, df.replace(Series([], dtype=object))) + + tm.assert_frame_equal(df, df.replace({"b": {}})) + tm.assert_frame_equal(df, df.replace(Series({"b": {}}))) + + @pytest.mark.parametrize( + "to_replace, method, expected", + [ + (0, "bfill", {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), + ( + np.nan, + "bfill", + {"A": [0, 1, 2], "B": [5.0, 7.0, 7.0], "C": ["a", "b", "c"]}, + ), + ("d", "ffill", {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}), + ( + [0, 2], + "bfill", + {"A": [1, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, + ), + ( + [1, 2], + "pad", + {"A": [0, 0, 0], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, + ), + ( + (1, 2), + "bfill", + {"A": [0, 2, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}, + ), + ( + ["b", "c"], + "ffill", + {"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "a", "a"]}, + ), + ], + ) + def test_replace_method(self, to_replace, method, expected): + # GH 19632 + df = DataFrame({"A": [0, 1, 2], "B": [5, np.nan, 7], "C": ["a", "b", "c"]}) + + msg = "The 'method' keyword in DataFrame.replace is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.replace(to_replace=to_replace, value=None, method=method) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "replace_dict, final_data", + [({"a": 1, "b": 1}, [[3, 3], [2, 2]]), ({"a": 1, "b": 2}, [[3, 1], [2, 3]])], + ) + def test_categorical_replace_with_dict(self, replace_dict, final_data): + # GH 26988 + df = DataFrame([[1, 1], [2, 2]], columns=["a", "b"], dtype="category") + + final_data = np.array(final_data) + + a = pd.Categorical(final_data[:, 0], categories=[3, 2]) + + ex_cat = [3, 2] if replace_dict["b"] == 1 else [1, 3] + b = pd.Categorical(final_data[:, 1], categories=ex_cat) + + expected = DataFrame({"a": a, "b": b}) + result = df.replace(replace_dict, 3) + tm.assert_frame_equal(result, expected) + msg = ( + r"Attributes of DataFrame.iloc\[:, 0\] \(column name=\"a\"\) are " + "different" + ) + with pytest.raises(AssertionError, match=msg): + # ensure non-inplace call does not affect original + tm.assert_frame_equal(df, expected) + return_value = df.replace(replace_dict, 3, inplace=True) + assert return_value is None + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "df, to_replace, exp", + [ + ( + {"col1": [1, 2, 3], "col2": [4, 5, 6]}, + {4: 5, 5: 6, 6: 7}, + {"col1": [1, 2, 3], "col2": [5, 6, 7]}, + ), + ( + {"col1": [1, 2, 3], "col2": ["4", "5", "6"]}, + {"4": "5", "5": "6", "6": "7"}, + {"col1": [1, 2, 3], "col2": ["5", "6", "7"]}, + ), + ], + ) + def test_replace_commutative(self, df, to_replace, exp): + # GH 16051 + # DataFrame.replace() overwrites when values are non-numeric + # also added to data frame whilst issue was for series + + df = DataFrame(df) + + expected = DataFrame(exp) + result = df.replace(to_replace) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "replacer", + [ + Timestamp("20170827"), + np.int8(1), + np.int16(1), + np.float32(1), + np.float64(1), + ], + ) + def test_replace_replacer_dtype(self, request, replacer): + # GH26632 + df = DataFrame(["a"]) + result = df.replace({"a": replacer, "b": replacer}) + expected = DataFrame([replacer]) + tm.assert_frame_equal(result, expected) + + def test_replace_after_convert_dtypes(self): + # GH31517 + df = DataFrame({"grp": [1, 2, 3, 4, 5]}, dtype="Int64") + result = df.replace(1, 10) + expected = DataFrame({"grp": [10, 2, 3, 4, 5]}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + def test_replace_invalid_to_replace(self): + # GH 18634 + # API: replace() should raise an exception if invalid argument is given + df = DataFrame({"one": ["a", "b ", "c"], "two": ["d ", "e ", "f "]}) + msg = ( + r"Expecting 'to_replace' to be either a scalar, array-like, " + r"dict or None, got invalid type.*" + ) + msg2 = ( + "DataFrame.replace without 'value' and with non-dict-like " + "'to_replace' is deprecated" + ) + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + df.replace(lambda x: x.strip()) + + @pytest.mark.parametrize("dtype", ["float", "float64", "int64", "Int64", "boolean"]) + @pytest.mark.parametrize("value", [np.nan, pd.NA]) + def test_replace_no_replacement_dtypes(self, dtype, value): + # https://github.com/pandas-dev/pandas/issues/32988 + df = DataFrame(np.eye(2), dtype=dtype) + result = df.replace(to_replace=[None, -np.inf, np.inf], value=value) + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize("replacement", [np.nan, 5]) + def test_replace_with_duplicate_columns(self, replacement): + # GH 24798 + result = DataFrame({"A": [1, 2, 3], "A1": [4, 5, 6], "B": [7, 8, 9]}) + result.columns = list("AAB") + + expected = DataFrame( + {"A": [1, 2, 3], "A1": [4, 5, 6], "B": [replacement, 8, 9]} + ) + expected.columns = list("AAB") + + result["B"] = result["B"].replace(7, replacement) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("value", [pd.Period("2020-01"), pd.Interval(0, 5)]) + def test_replace_ea_ignore_float(self, frame_or_series, value): + # GH#34871 + obj = DataFrame({"Per": [value] * 3}) + obj = tm.get_obj(obj, frame_or_series) + + expected = obj.copy() + result = obj.replace(1.0, 0.0) + tm.assert_equal(expected, result) + + def test_replace_value_category_type(self): + """ + Test for #23305: to ensure category dtypes are maintained + after replace with direct values + """ + + # create input data + input_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "d"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "cat2", "cat3", "cat4"], + "col5": ["obj1", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + input_df = DataFrame(data=input_dict).astype( + {"col2": "category", "col4": "category"} + ) + input_df["col2"] = input_df["col2"].cat.reorder_categories( + ["a", "b", "c", "d"], ordered=True + ) + input_df["col4"] = input_df["col4"].cat.reorder_categories( + ["cat1", "cat2", "cat3", "cat4"], ordered=True + ) + + # create expected dataframe + expected_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "z"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "catX", "cat3", "cat4"], + "col5": ["obj9", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + expected = DataFrame(data=expected_dict).astype( + {"col2": "category", "col4": "category"} + ) + expected["col2"] = expected["col2"].cat.reorder_categories( + ["a", "b", "c", "z"], ordered=True + ) + expected["col4"] = expected["col4"].cat.reorder_categories( + ["cat1", "catX", "cat3", "cat4"], ordered=True + ) + + # replace values in input dataframe + input_df = input_df.replace("d", "z") + input_df = input_df.replace("obj1", "obj9") + result = input_df.replace("cat2", "catX") + + tm.assert_frame_equal(result, expected) + + def test_replace_dict_category_type(self): + """ + Test to ensure category dtypes are maintained + after replace with dict values + """ + # GH#35268, GH#44940 + + # create input dataframe + input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]} + # explicitly cast columns as category + input_df = DataFrame(data=input_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # create expected dataframe + expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]} + # explicitly cast columns as category + expected = DataFrame(data=expected_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # replace values in input dataframe using a dict + result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) + + tm.assert_frame_equal(result, expected) + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + df = DataFrame(["a", "b", "c"]) + regex = re.compile("^a$") + result = df.replace({regex: "z"}, regex=True) + expected = DataFrame(["z", "b", "c"]) + tm.assert_frame_equal(result, expected) + + def test_replace_intervals(self): + # https://github.com/pandas-dev/pandas/issues/35931 + df = DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]}) + result = df.replace({"a": {pd.Interval(0, 1): "x"}}) + expected = DataFrame({"a": ["x", "x"]}) + tm.assert_frame_equal(result, expected) + + def test_replace_unicode(self): + # GH: 16784 + columns_values_map = {"positive": {"正面": 1, "中立": 1, "负面": 0}} + df1 = DataFrame({"positive": np.ones(3)}) + result = df1.replace(columns_values_map) + expected = DataFrame({"positive": np.ones(3)}) + tm.assert_frame_equal(result, expected) + + def test_replace_bytes(self, frame_or_series): + # GH#38900 + obj = frame_or_series(["o"]).astype("|S") + expected = obj.copy() + obj = obj.replace({None: np.nan}) + tm.assert_equal(obj, expected) + + @pytest.mark.parametrize( + "data, to_replace, value, expected", + [ + ([1], [1.0], [0], [0]), + ([1], [1], [0], [0]), + ([1.0], [1.0], [0], [0.0]), + ([1.0], [1], [0], [0.0]), + ], + ) + @pytest.mark.parametrize("box", [list, tuple, np.array]) + def test_replace_list_with_mixed_type( + self, data, to_replace, value, expected, box, frame_or_series + ): + # GH#40371 + obj = frame_or_series(data) + expected = frame_or_series(expected) + result = obj.replace(box(to_replace), value) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("val", [2, np.nan, 2.0]) + def test_replace_value_none_dtype_numeric(self, val): + # GH#48231 + df = DataFrame({"a": [1, val]}) + result = df.replace(val, None) + expected = DataFrame({"a": [1, None]}, dtype=object) + tm.assert_frame_equal(result, expected) + + df = DataFrame({"a": [1, val]}) + result = df.replace({val: None}) + tm.assert_frame_equal(result, expected) + + def test_replace_with_nil_na(self): + # GH 32075 + ser = DataFrame({"a": ["nil", pd.NA]}) + expected = DataFrame({"a": ["anything else", pd.NA]}, index=[0, 1]) + result = ser.replace("nil", "anything else") + tm.assert_frame_equal(expected, result) + + +class TestDataFrameReplaceRegex: + @pytest.mark.parametrize( + "data", + [ + {"a": list("ab.."), "b": list("efgh")}, + {"a": list("ab.."), "b": list(range(4))}, + ], + ) + @pytest.mark.parametrize( + "to_replace,value", [(r"\s*\.\s*", np.nan), (r"\s*(\.)\s*", r"\1\1\1")] + ) + @pytest.mark.parametrize("compile_regex", [True, False]) + @pytest.mark.parametrize("regex_kwarg", [True, False]) + @pytest.mark.parametrize("inplace", [True, False]) + def test_regex_replace_scalar( + self, data, to_replace, value, compile_regex, regex_kwarg, inplace + ): + df = DataFrame(data) + expected = df.copy() + + if compile_regex: + to_replace = re.compile(to_replace) + + if regex_kwarg: + regex = to_replace + to_replace = None + else: + regex = True + + result = df.replace(to_replace, value, inplace=inplace, regex=regex) + + if inplace: + assert result is None + result = df + + if value is np.nan: + expected_replace_val = np.nan + else: + expected_replace_val = "..." + + expected.loc[expected["a"] == ".", "a"] = expected_replace_val + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("regex", [False, True]) + def test_replace_regex_dtype_frame(self, regex): + # GH-48644 + df1 = DataFrame({"A": ["0"], "B": ["0"]}) + expected_df1 = DataFrame({"A": [1], "B": [1]}) + result_df1 = df1.replace(to_replace="0", value=1, regex=regex) + tm.assert_frame_equal(result_df1, expected_df1) + + df2 = DataFrame({"A": ["0"], "B": ["1"]}) + expected_df2 = DataFrame({"A": [1], "B": ["1"]}) + result_df2 = df2.replace(to_replace="0", value=1, regex=regex) + tm.assert_frame_equal(result_df2, expected_df2) + + def test_replace_with_value_also_being_replaced(self): + # GH46306 + df = DataFrame({"A": [0, 1, 2], "B": [1, 0, 2]}) + result = df.replace({0: 1, 1: np.nan}) + expected = DataFrame({"A": [1, np.nan, 2], "B": [np.nan, 1, 2]}) + tm.assert_frame_equal(result, expected) + + def test_replace_categorical_no_replacement(self): + # GH#46672 + df = DataFrame( + { + "a": ["one", "two", None, "three"], + "b": ["one", None, "two", "three"], + }, + dtype="category", + ) + expected = df.copy() + + result = df.replace(to_replace=[".", "def"], value=["_", None]) + tm.assert_frame_equal(result, expected) + + def test_replace_object_splitting(self): + # GH#53977 + df = DataFrame({"a": ["a"], "b": "b"}) + assert len(df._mgr.blocks) == 1 + df.replace(to_replace=r"^\s*$", value="", inplace=True, regex=True) + assert len(df._mgr.blocks) == 1 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reset_index.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reset_index.py new file mode 100644 index 00000000..d99dd36f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_reset_index.py @@ -0,0 +1,802 @@ +from datetime import datetime +from itertools import product + +import numpy as np +import pytest + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, +) + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + Index, + Interval, + IntervalIndex, + MultiIndex, + RangeIndex, + Series, + Timestamp, + cut, + date_range, +) +import pandas._testing as tm + + +@pytest.fixture() +def multiindex_df(): + levels = [["A", ""], ["B", "b"]] + return DataFrame([[0, 2], [1, 3]], columns=MultiIndex.from_tuples(levels)) + + +class TestResetIndex: + def test_reset_index_empty_rangeindex(self): + # GH#45230 + df = DataFrame( + columns=["brand"], dtype=np.int64, index=RangeIndex(0, 0, 1, name="foo") + ) + + df2 = df.set_index([df.index, "brand"]) + + result = df2.reset_index([1], drop=True) + tm.assert_frame_equal(result, df[[]], check_index_type=True) + + def test_set_reset(self): + idx = Index([2**63, 2**63 + 5, 2**63 + 10], name="foo") + + # set/reset + df = DataFrame({"A": [0, 1, 2]}, index=idx) + result = df.reset_index() + assert result["foo"].dtype == np.dtype("uint64") + + df = result.set_index("foo") + tm.assert_index_equal(df.index, idx) + + def test_set_index_reset_index_dt64tz(self): + idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo") + + # set/reset + df = DataFrame({"A": [0, 1, 2]}, index=idx) + result = df.reset_index() + assert result["foo"].dtype == "datetime64[ns, US/Eastern]" + + df = result.set_index("foo") + tm.assert_index_equal(df.index, idx) + + def test_reset_index_tz(self, tz_aware_fixture): + # GH 3950 + # reset_index with single level + tz = tz_aware_fixture + idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx") + df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx) + + expected = DataFrame( + { + "idx": [ + datetime(2011, 1, 1), + datetime(2011, 1, 2), + datetime(2011, 1, 3), + datetime(2011, 1, 4), + datetime(2011, 1, 5), + ], + "a": range(5), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx", "a", "b"], + ) + expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz)) + tm.assert_frame_equal(df.reset_index(), expected) + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_frame_reset_index_tzaware_index(self, tz): + dr = date_range("2012-06-02", periods=10, tz=tz) + df = DataFrame(np.random.default_rng(2).standard_normal(len(dr)), dr) + roundtripped = df.reset_index().set_index("index") + xp = df.index.tz + rs = roundtripped.index.tz + assert xp == rs + + def test_reset_index_with_intervals(self): + idx = IntervalIndex.from_breaks(np.arange(11), name="x") + original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]] + + result = original.set_index("x") + expected = DataFrame({"y": np.arange(10)}, index=idx) + tm.assert_frame_equal(result, expected) + + result2 = result.reset_index() + tm.assert_frame_equal(result2, original) + + def test_reset_index(self, float_frame): + stacked = float_frame.stack(future_stack=True)[::2] + stacked = DataFrame({"foo": stacked, "bar": stacked}) + + names = ["first", "second"] + stacked.index.names = names + deleveled = stacked.reset_index() + for i, (lev, level_codes) in enumerate( + zip(stacked.index.levels, stacked.index.codes) + ): + values = lev.take(level_codes) + name = names[i] + tm.assert_index_equal(values, Index(deleveled[name])) + + stacked.index.names = [None, None] + deleveled2 = stacked.reset_index() + tm.assert_series_equal( + deleveled["first"], deleveled2["level_0"], check_names=False + ) + tm.assert_series_equal( + deleveled["second"], deleveled2["level_1"], check_names=False + ) + + # default name assigned + rdf = float_frame.reset_index() + exp = Series(float_frame.index.values, name="index") + tm.assert_series_equal(rdf["index"], exp) + + # default name assigned, corner case + df = float_frame.copy() + df["index"] = "foo" + rdf = df.reset_index() + exp = Series(float_frame.index.values, name="level_0") + tm.assert_series_equal(rdf["level_0"], exp) + + # but this is ok + float_frame.index.name = "index" + deleveled = float_frame.reset_index() + tm.assert_series_equal(deleveled["index"], Series(float_frame.index)) + tm.assert_index_equal(deleveled.index, Index(range(len(deleveled))), exact=True) + + # preserve column names + float_frame.columns.name = "columns" + reset = float_frame.reset_index() + assert reset.columns.name == "columns" + + # only remove certain columns + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index(["A", "B"]) + + tm.assert_frame_equal(rs, float_frame) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index()) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index()) + + rs = df.reset_index("A") + xp = float_frame.reset_index().set_index(["index", "B"]) + tm.assert_frame_equal(rs, xp) + + # test resetting in place + df = float_frame.copy() + reset = float_frame.reset_index() + return_value = df.reset_index(inplace=True) + assert return_value is None + tm.assert_frame_equal(df, reset) + + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index("A", drop=True) + xp = float_frame.copy() + del xp["A"] + xp = xp.set_index(["B"], append=True) + tm.assert_frame_equal(rs, xp) + + def test_reset_index_name(self): + df = DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], + columns=["A", "B", "C", "D"], + index=Index(range(2), name="x"), + ) + assert df.reset_index().index.name is None + assert df.reset_index(drop=True).index.name is None + return_value = df.reset_index(inplace=True) + assert return_value is None + assert df.index.name is None + + @pytest.mark.parametrize("levels", [["A", "B"], [0, 1]]) + def test_reset_index_level(self, levels): + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) + + # With MultiIndex + result = df.set_index(["A", "B"]).reset_index(level=levels[0]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) + tm.assert_frame_equal(result, df[["C", "D"]]) + + # With single-level Index (GH 16263) + result = df.set_index("A").reset_index(level=levels[0]) + tm.assert_frame_equal(result, df) + + result = df.set_index("A").reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A"]).reset_index(level=levels[0], drop=True) + tm.assert_frame_equal(result, df[["B", "C", "D"]]) + + @pytest.mark.parametrize("idx_lev", [["A", "B"], ["A"]]) + def test_reset_index_level_missing(self, idx_lev): + # Missing levels - for both MultiIndex and single-level Index: + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) + + with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): + df.set_index(idx_lev).reset_index(level=["A", "E"]) + with pytest.raises(IndexError, match="Too many levels"): + df.set_index(idx_lev).reset_index(level=[0, 1, 2]) + + def test_reset_index_right_dtype(self): + time = np.arange(0.0, 10, np.sqrt(2) / 2) + s1 = Series( + (9.81 * time**2) / 2, index=Index(time, name="time"), name="speed" + ) + df = DataFrame(s1) + + reset = s1.reset_index() + assert reset["time"].dtype == np.float64 + + reset = df.reset_index() + assert reset["time"].dtype == np.float64 + + def test_reset_index_multiindex_col(self): + vals = np.random.default_rng(2).standard_normal((3, 3)).astype(object) + idx = ["x", "y", "z"] + full = np.hstack(([[x] for x in idx], vals)) + df = DataFrame( + vals, + Index(idx, name="a"), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index() + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_fill=None) + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_level=1, col_fill="blah") + xp = DataFrame( + full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + df = DataFrame( + vals, + MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index("a") + xp = DataFrame( + full, + Index([0, 1, 2], name="d"), + columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill=None) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill="blah", col_level=1) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + def test_reset_index_multiindex_nan(self): + # GH#6322, testing reset_index on MultiIndexes + # when we have a nan or all nan + df = DataFrame( + { + "A": ["a", "b", "c"], + "B": [0, 1, np.nan], + "C": np.random.default_rng(2).random(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + { + "A": [np.nan, "b", "c"], + "B": [0, 1, 2], + "C": np.random.default_rng(2).random(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]}) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + { + "A": ["a", "b", "c"], + "B": [np.nan, np.nan, np.nan], + "C": np.random.default_rng(2).random(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + @pytest.mark.parametrize( + "name", + [ + None, + "foo", + 2, + 3.0, + pd.Timedelta(6), + Timestamp("2012-12-30", tz="UTC"), + "2012-12-31", + ], + ) + def test_reset_index_with_datetimeindex_cols(self, name): + # GH#5818 + df = DataFrame( + [[1, 2], [3, 4]], + columns=date_range("1/1/2013", "1/2/2013"), + index=["A", "B"], + ) + df.index.name = name + + result = df.reset_index() + + item = name if name is not None else "index" + columns = Index([item, datetime(2013, 1, 1), datetime(2013, 1, 2)]) + if isinstance(item, str) and item == "2012-12-31": + columns = columns.astype("datetime64[ns]") + else: + assert columns.dtype == object + + expected = DataFrame( + [["A", 1, 2], ["B", 3, 4]], + columns=columns, + ) + tm.assert_frame_equal(result, expected) + + def test_reset_index_range(self): + # GH#12071 + df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2)) + result = df.reset_index() + assert isinstance(result.index, RangeIndex) + expected = DataFrame( + [[0, 0, 0], [1, 1, 1]], + columns=["index", "A", "B"], + index=RangeIndex(stop=2), + ) + tm.assert_frame_equal(result, expected) + + def test_reset_index_multiindex_columns(self, multiindex_df): + result = multiindex_df[["B"]].rename_axis("A").reset_index() + tm.assert_frame_equal(result, multiindex_df) + + # GH#16120: already existing column + msg = r"cannot insert \('A', ''\), already exists" + with pytest.raises(ValueError, match=msg): + multiindex_df.rename_axis("A").reset_index() + + # GH#16164: multiindex (tuple) full key + result = multiindex_df.set_index([("A", "")]).reset_index() + tm.assert_frame_equal(result, multiindex_df) + + # with additional (unnamed) index level + idx_col = DataFrame( + [[0], [1]], columns=MultiIndex.from_tuples([("level_0", "")]) + ) + expected = pd.concat([idx_col, multiindex_df[[("B", "b"), ("A", "")]]], axis=1) + result = multiindex_df.set_index([("B", "b")], append=True).reset_index() + tm.assert_frame_equal(result, expected) + + # with index name which is a too long tuple... + msg = "Item must have length equal to number of levels." + with pytest.raises(ValueError, match=msg): + multiindex_df.rename_axis([("C", "c", "i")]).reset_index() + + # or too short... + levels = [["A", "a", ""], ["B", "b", "i"]] + df2 = DataFrame([[0, 2], [1, 3]], columns=MultiIndex.from_tuples(levels)) + idx_col = DataFrame( + [[0], [1]], columns=MultiIndex.from_tuples([("C", "c", "ii")]) + ) + expected = pd.concat([idx_col, df2], axis=1) + result = df2.rename_axis([("C", "c")]).reset_index(col_fill="ii") + tm.assert_frame_equal(result, expected) + + # ... which is incompatible with col_fill=None + with pytest.raises( + ValueError, + match=( + "col_fill=None is incompatible with " + r"incomplete column name \('C', 'c'\)" + ), + ): + df2.rename_axis([("C", "c")]).reset_index(col_fill=None) + + # with col_level != 0 + result = df2.rename_axis([("c", "ii")]).reset_index(col_level=1, col_fill="C") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("flag", [False, True]) + @pytest.mark.parametrize("allow_duplicates", [False, True]) + def test_reset_index_duplicate_columns_allow( + self, multiindex_df, flag, allow_duplicates + ): + # GH#44755 reset_index with duplicate column labels + df = multiindex_df.rename_axis("A") + df = df.set_flags(allows_duplicate_labels=flag) + + if flag and allow_duplicates: + result = df.reset_index(allow_duplicates=allow_duplicates) + levels = [["A", ""], ["A", ""], ["B", "b"]] + expected = DataFrame( + [[0, 0, 2], [1, 1, 3]], columns=MultiIndex.from_tuples(levels) + ) + tm.assert_frame_equal(result, expected) + else: + if not flag and allow_duplicates: + msg = ( + "Cannot specify 'allow_duplicates=True' when " + "'self.flags.allows_duplicate_labels' is False" + ) + else: + msg = r"cannot insert \('A', ''\), already exists" + with pytest.raises(ValueError, match=msg): + df.reset_index(allow_duplicates=allow_duplicates) + + @pytest.mark.parametrize("flag", [False, True]) + def test_reset_index_duplicate_columns_default(self, multiindex_df, flag): + df = multiindex_df.rename_axis("A") + df = df.set_flags(allows_duplicate_labels=flag) + + msg = r"cannot insert \('A', ''\), already exists" + with pytest.raises(ValueError, match=msg): + df.reset_index() + + @pytest.mark.parametrize("allow_duplicates", ["bad value"]) + def test_reset_index_allow_duplicates_check(self, multiindex_df, allow_duplicates): + with pytest.raises(ValueError, match="expected type bool"): + multiindex_df.reset_index(allow_duplicates=allow_duplicates) + + def test_reset_index_datetime(self, tz_naive_fixture): + # GH#3950 + tz = tz_naive_fixture + idx1 = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx1") + idx2 = Index(range(5), name="idx2", dtype="int64") + idx = MultiIndex.from_arrays([idx1, idx2]) + df = DataFrame( + {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, + index=idx, + ) + + expected = DataFrame( + { + "idx1": [ + datetime(2011, 1, 1), + datetime(2011, 1, 2), + datetime(2011, 1, 3), + datetime(2011, 1, 4), + datetime(2011, 1, 5), + ], + "idx2": np.arange(5, dtype="int64"), + "a": np.arange(5, dtype="int64"), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx1", "idx2", "a", "b"], + ) + expected["idx1"] = expected["idx1"].apply(lambda d: Timestamp(d, tz=tz)) + + tm.assert_frame_equal(df.reset_index(), expected) + + idx3 = date_range( + "1/1/2012", periods=5, freq="MS", tz="Europe/Paris", name="idx3" + ) + idx = MultiIndex.from_arrays([idx1, idx2, idx3]) + df = DataFrame( + {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, + index=idx, + ) + + expected = DataFrame( + { + "idx1": [ + datetime(2011, 1, 1), + datetime(2011, 1, 2), + datetime(2011, 1, 3), + datetime(2011, 1, 4), + datetime(2011, 1, 5), + ], + "idx2": np.arange(5, dtype="int64"), + "idx3": [ + datetime(2012, 1, 1), + datetime(2012, 2, 1), + datetime(2012, 3, 1), + datetime(2012, 4, 1), + datetime(2012, 5, 1), + ], + "a": np.arange(5, dtype="int64"), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx1", "idx2", "idx3", "a", "b"], + ) + expected["idx1"] = expected["idx1"].apply(lambda d: Timestamp(d, tz=tz)) + expected["idx3"] = expected["idx3"].apply( + lambda d: Timestamp(d, tz="Europe/Paris") + ) + tm.assert_frame_equal(df.reset_index(), expected) + + # GH#7793 + idx = MultiIndex.from_product( + [["a", "b"], date_range("20130101", periods=3, tz=tz)] + ) + df = DataFrame( + np.arange(6, dtype="int64").reshape(6, 1), columns=["a"], index=idx + ) + + expected = DataFrame( + { + "level_0": "a a a b b b".split(), + "level_1": [ + datetime(2013, 1, 1), + datetime(2013, 1, 2), + datetime(2013, 1, 3), + ] + * 2, + "a": np.arange(6, dtype="int64"), + }, + columns=["level_0", "level_1", "a"], + ) + expected["level_1"] = expected["level_1"].apply(lambda d: Timestamp(d, tz=tz)) + result = df.reset_index() + tm.assert_frame_equal(result, expected) + + def test_reset_index_period(self): + # GH#7746 + idx = MultiIndex.from_product( + [pd.period_range("20130101", periods=3, freq="M"), list("abc")], + names=["month", "feature"], + ) + + df = DataFrame( + np.arange(9, dtype="int64").reshape(-1, 1), index=idx, columns=["a"] + ) + expected = DataFrame( + { + "month": ( + [pd.Period("2013-01", freq="M")] * 3 + + [pd.Period("2013-02", freq="M")] * 3 + + [pd.Period("2013-03", freq="M")] * 3 + ), + "feature": ["a", "b", "c"] * 3, + "a": np.arange(9, dtype="int64"), + }, + columns=["month", "feature", "a"], + ) + result = df.reset_index() + tm.assert_frame_equal(result, expected) + + def test_reset_index_delevel_infer_dtype(self): + tuples = list(product(["foo", "bar"], [10, 20], [1.0, 1.1])) + index = MultiIndex.from_tuples(tuples, names=["prm0", "prm1", "prm2"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), + columns=["A", "B", "C"], + index=index, + ) + deleveled = df.reset_index() + assert is_integer_dtype(deleveled["prm1"]) + assert is_float_dtype(deleveled["prm2"]) + + def test_reset_index_with_drop( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + deleveled = ymd.reset_index(drop=True) + assert len(deleveled.columns) == len(ymd.columns) + assert deleveled.index.name == ymd.index.name + + @pytest.mark.parametrize( + "ix_data, exp_data", + [ + ( + [(pd.NaT, 1), (pd.NaT, 2)], + {"a": [pd.NaT, pd.NaT], "b": [1, 2], "x": [11, 12]}, + ), + ( + [(pd.NaT, 1), (Timestamp("2020-01-01"), 2)], + {"a": [pd.NaT, Timestamp("2020-01-01")], "b": [1, 2], "x": [11, 12]}, + ), + ( + [(pd.NaT, 1), (pd.Timedelta(123, "d"), 2)], + {"a": [pd.NaT, pd.Timedelta(123, "d")], "b": [1, 2], "x": [11, 12]}, + ), + ], + ) + def test_reset_index_nat_multiindex(self, ix_data, exp_data): + # GH#36541: that reset_index() does not raise ValueError + ix = MultiIndex.from_tuples(ix_data, names=["a", "b"]) + result = DataFrame({"x": [11, 12]}, index=ix) + result = result.reset_index() + + expected = DataFrame(exp_data) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "codes", ([[0, 0, 1, 1], [0, 1, 0, 1]], [[0, 0, -1, 1], [0, 1, 0, 1]]) + ) + def test_rest_index_multiindex_categorical_with_missing_values(self, codes): + # GH#24206 + + index = MultiIndex( + [CategoricalIndex(["A", "B"]), CategoricalIndex(["a", "b"])], codes + ) + data = {"col": range(len(index))} + df = DataFrame(data=data, index=index) + + expected = DataFrame( + { + "level_0": Categorical.from_codes(codes[0], categories=["A", "B"]), + "level_1": Categorical.from_codes(codes[1], categories=["a", "b"]), + "col": range(4), + } + ) + + res = df.reset_index() + tm.assert_frame_equal(res, expected) + + # roundtrip + res = expected.set_index(["level_0", "level_1"]).reset_index() + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize( + "array, dtype", + [ + (["a", "b"], object), + ( + pd.period_range("12-1-2000", periods=2, freq="Q-DEC"), + pd.PeriodDtype(freq="Q-DEC"), + ), + ], +) +def test_reset_index_dtypes_on_empty_frame_with_multiindex(array, dtype): + # GH 19602 - Preserve dtype on empty DataFrame with MultiIndex + idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], array]) + result = DataFrame(index=idx)[:0].reset_index().dtypes + expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": dtype}) + tm.assert_series_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex(): + # https://github.com/pandas-dev/pandas/issues/35606 + idx = MultiIndex( + levels=[[Timestamp("2020-07-20 00:00:00")], [3, 4]], + codes=[[], []], + names=["a", "b"], + ) + df = DataFrame(index=idx, columns=["c", "d"]) + result = df.reset_index() + expected = DataFrame( + columns=list("abcd"), index=RangeIndex(start=0, stop=0, step=1) + ) + expected["a"] = expected["a"].astype("datetime64[ns]") + expected["b"] = expected["b"].astype("int64") + tm.assert_frame_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby(): + # https://github.com/pandas-dev/pandas/issues/35657 + df = DataFrame({"c1": [10.0], "c2": ["a"], "c3": pd.to_datetime("2020-01-01")}) + df = df.head(0).groupby(["c2", "c3"])[["c1"]].sum() + result = df.reset_index() + expected = DataFrame( + columns=["c2", "c3", "c1"], index=RangeIndex(start=0, stop=0, step=1) + ) + expected["c3"] = expected["c3"].astype("datetime64[ns]") + expected["c1"] = expected["c1"].astype("float64") + tm.assert_frame_equal(result, expected) + + +def test_reset_index_multiindex_nat(): + # GH 11479 + idx = range(3) + tstamp = date_range("2015-07-01", freq="D", periods=3) + df = DataFrame({"id": idx, "tstamp": tstamp, "a": list("abc")}) + df.loc[2, "tstamp"] = pd.NaT + result = df.set_index(["id", "tstamp"]).reset_index("id") + expected = DataFrame( + {"id": range(3), "a": list("abc")}, + index=pd.DatetimeIndex(["2015-07-01", "2015-07-02", "NaT"], name="tstamp"), + ) + tm.assert_frame_equal(result, expected) + + +def test_reset_index_interval_columns_object_cast(): + # GH 19136 + df = DataFrame( + np.eye(2), index=Index([1, 2], name="Year"), columns=cut([1, 2], [0, 1, 2]) + ) + result = df.reset_index() + expected = DataFrame( + [[1, 1.0, 0.0], [2, 0.0, 1.0]], + columns=Index(["Year", Interval(0, 1), Interval(1, 2)]), + ) + tm.assert_frame_equal(result, expected) + + +def test_reset_index_rename(float_frame): + # GH 6878 + result = float_frame.reset_index(names="new_name") + expected = Series(float_frame.index.values, name="new_name") + tm.assert_series_equal(result["new_name"], expected) + + result = float_frame.reset_index(names=123) + expected = Series(float_frame.index.values, name=123) + tm.assert_series_equal(result[123], expected) + + +def test_reset_index_rename_multiindex(float_frame): + # GH 6878 + stacked_df = float_frame.stack(future_stack=True)[::2] + stacked_df = DataFrame({"foo": stacked_df, "bar": stacked_df}) + + names = ["first", "second"] + stacked_df.index.names = names + + result = stacked_df.reset_index() + expected = stacked_df.reset_index(names=["new_first", "new_second"]) + tm.assert_series_equal(result["first"], expected["new_first"], check_names=False) + tm.assert_series_equal(result["second"], expected["new_second"], check_names=False) + + +def test_errorreset_index_rename(float_frame): + # GH 6878 + stacked_df = float_frame.stack(future_stack=True)[::2] + stacked_df = DataFrame({"first": stacked_df, "second": stacked_df}) + + with pytest.raises( + ValueError, match="Index names must be str or 1-dimensional list" + ): + stacked_df.reset_index(names={"first": "new_first", "second": "new_second"}) + + with pytest.raises(IndexError, match="list index out of range"): + stacked_df.reset_index(names=["new_first"]) + + +def test_reset_index_false_index_name(): + result_series = Series(data=range(5, 10), index=range(0, 5)) + result_series.index.name = False + result_series.reset_index() + expected_series = Series(range(5, 10), RangeIndex(range(0, 5), name=False)) + tm.assert_series_equal(result_series, expected_series) + + # GH 38147 + result_frame = DataFrame(data=range(5, 10), index=range(0, 5)) + result_frame.index.name = False + result_frame.reset_index() + expected_frame = DataFrame(range(5, 10), RangeIndex(range(0, 5), name=False)) + tm.assert_frame_equal(result_frame, expected_frame) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_round.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_round.py new file mode 100644 index 00000000..a96df27b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_round.py @@ -0,0 +1,225 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameRound: + def test_round(self): + # GH#2665 + + # Test that rounding an empty DataFrame does nothing + df = DataFrame() + tm.assert_frame_equal(df, df.round()) + + # Here's the test frame we'll be working with + df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]}) + + # Default round to integer (i.e. decimals=0) + expected_rounded = DataFrame({"col1": [1.0, 2.0, 3.0], "col2": [1.0, 2.0, 3.0]}) + tm.assert_frame_equal(df.round(), expected_rounded) + + # Round with an integer + decimals = 2 + expected_rounded = DataFrame( + {"col1": [1.12, 2.12, 3.12], "col2": [1.23, 2.23, 3.23]} + ) + tm.assert_frame_equal(df.round(decimals), expected_rounded) + + # This should also work with np.round (since np.round dispatches to + # df.round) + tm.assert_frame_equal(np.round(df, decimals), expected_rounded) + + # Round with a list + round_list = [1, 2] + msg = "decimals must be an integer, a dict-like or a Series" + with pytest.raises(TypeError, match=msg): + df.round(round_list) + + # Round with a dictionary + expected_rounded = DataFrame( + {"col1": [1.1, 2.1, 3.1], "col2": [1.23, 2.23, 3.23]} + ) + round_dict = {"col1": 1, "col2": 2} + tm.assert_frame_equal(df.round(round_dict), expected_rounded) + + # Incomplete dict + expected_partially_rounded = DataFrame( + {"col1": [1.123, 2.123, 3.123], "col2": [1.2, 2.2, 3.2]} + ) + partial_round_dict = {"col2": 1} + tm.assert_frame_equal(df.round(partial_round_dict), expected_partially_rounded) + + # Dict with unknown elements + wrong_round_dict = {"col3": 2, "col2": 1} + tm.assert_frame_equal(df.round(wrong_round_dict), expected_partially_rounded) + + # float input to `decimals` + non_int_round_dict = {"col1": 1, "col2": 0.5} + msg = "Values in decimals must be integers" + with pytest.raises(TypeError, match=msg): + df.round(non_int_round_dict) + + # String input + non_int_round_dict = {"col1": 1, "col2": "foo"} + with pytest.raises(TypeError, match=msg): + df.round(non_int_round_dict) + + non_int_round_Series = Series(non_int_round_dict) + with pytest.raises(TypeError, match=msg): + df.round(non_int_round_Series) + + # List input + non_int_round_dict = {"col1": 1, "col2": [1, 2]} + with pytest.raises(TypeError, match=msg): + df.round(non_int_round_dict) + + non_int_round_Series = Series(non_int_round_dict) + with pytest.raises(TypeError, match=msg): + df.round(non_int_round_Series) + + # Non integer Series inputs + non_int_round_Series = Series(non_int_round_dict) + with pytest.raises(TypeError, match=msg): + df.round(non_int_round_Series) + + non_int_round_Series = Series(non_int_round_dict) + with pytest.raises(TypeError, match=msg): + df.round(non_int_round_Series) + + # Negative numbers + negative_round_dict = {"col1": -1, "col2": -2} + big_df = df * 100 + expected_neg_rounded = DataFrame( + {"col1": [110.0, 210, 310], "col2": [100.0, 200, 300]} + ) + tm.assert_frame_equal(big_df.round(negative_round_dict), expected_neg_rounded) + + # nan in Series round + nan_round_Series = Series({"col1": np.nan, "col2": 1}) + + with pytest.raises(TypeError, match=msg): + df.round(nan_round_Series) + + # Make sure this doesn't break existing Series.round + tm.assert_series_equal(df["col1"].round(1), expected_rounded["col1"]) + + # named columns + # GH#11986 + decimals = 2 + expected_rounded = DataFrame( + {"col1": [1.12, 2.12, 3.12], "col2": [1.23, 2.23, 3.23]} + ) + df.columns.name = "cols" + expected_rounded.columns.name = "cols" + tm.assert_frame_equal(df.round(decimals), expected_rounded) + + # interaction of named columns & series + tm.assert_series_equal(df["col1"].round(decimals), expected_rounded["col1"]) + tm.assert_series_equal(df.round(decimals)["col1"], expected_rounded["col1"]) + + def test_round_numpy(self): + # GH#12600 + df = DataFrame([[1.53, 1.36], [0.06, 7.01]]) + out = np.round(df, decimals=0) + expected = DataFrame([[2.0, 1.0], [0.0, 7.0]]) + tm.assert_frame_equal(out, expected) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.round(df, decimals=0, out=df) + + def test_round_numpy_with_nan(self): + # See GH#14197 + df = Series([1.53, np.nan, 0.06]).to_frame() + with tm.assert_produces_warning(None): + result = df.round() + expected = Series([2.0, np.nan, 0.0]).to_frame() + tm.assert_frame_equal(result, expected) + + def test_round_mixed_type(self): + # GH#11885 + df = DataFrame( + { + "col1": [1.1, 2.2, 3.3, 4.4], + "col2": ["1", "a", "c", "f"], + "col3": date_range("20111111", periods=4), + } + ) + round_0 = DataFrame( + { + "col1": [1.0, 2.0, 3.0, 4.0], + "col2": ["1", "a", "c", "f"], + "col3": date_range("20111111", periods=4), + } + ) + tm.assert_frame_equal(df.round(), round_0) + tm.assert_frame_equal(df.round(1), df) + tm.assert_frame_equal(df.round({"col1": 1}), df) + tm.assert_frame_equal(df.round({"col1": 0}), round_0) + tm.assert_frame_equal(df.round({"col1": 0, "col2": 1}), round_0) + tm.assert_frame_equal(df.round({"col3": 1}), df) + + def test_round_with_duplicate_columns(self): + # GH#11611 + + df = DataFrame( + np.random.default_rng(2).random([3, 3]), + columns=["A", "B", "C"], + index=["first", "second", "third"], + ) + + dfs = pd.concat((df, df), axis=1) + rounded = dfs.round() + tm.assert_index_equal(rounded.index, dfs.index) + + decimals = Series([1, 0, 2], index=["A", "B", "A"]) + msg = "Index of decimals must be unique" + with pytest.raises(ValueError, match=msg): + df.round(decimals) + + def test_round_builtin(self): + # GH#11763 + # Here's the test frame we'll be working with + df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]}) + + # Default round to integer (i.e. decimals=0) + expected_rounded = DataFrame({"col1": [1.0, 2.0, 3.0], "col2": [1.0, 2.0, 3.0]}) + tm.assert_frame_equal(round(df), expected_rounded) + + def test_round_nonunique_categorical(self): + # See GH#21809 + idx = pd.CategoricalIndex(["low"] * 3 + ["hi"] * 3) + df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=list("abc")) + + expected = df.round(3) + expected.index = idx + + df_categorical = df.copy().set_index(idx) + assert df_categorical.shape == (6, 3) + result = df_categorical.round(3) + assert result.shape == (6, 3) + + tm.assert_frame_equal(result, expected) + + def test_round_interval_category_columns(self): + # GH#30063 + columns = pd.CategoricalIndex(pd.interval_range(0, 2)) + df = DataFrame([[0.66, 1.1], [0.3, 0.25]], columns=columns) + + result = df.round() + expected = DataFrame([[1.0, 1.0], [0.0, 0.0]], columns=columns) + tm.assert_frame_equal(result, expected) + + def test_round_empty_not_input(self): + # GH#51032 + df = DataFrame() + result = df.round() + tm.assert_frame_equal(df, result) + assert df is not result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sample.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sample.py new file mode 100644 index 00000000..6b3459fb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sample.py @@ -0,0 +1,372 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm +import pandas.core.common as com + + +class TestSample: + @pytest.fixture + def obj(self, frame_or_series): + if frame_or_series is Series: + arr = np.random.default_rng(2).standard_normal(10) + else: + arr = np.random.default_rng(2).standard_normal((10, 10)) + return frame_or_series(arr, dtype=None) + + @pytest.mark.parametrize("test", list(range(10))) + def test_sample(self, test, obj): + # Fixes issue: 2419 + # Check behavior of random_state argument + # Check for stability when receives seed or random state -- run 10 + # times. + + seed = np.random.default_rng(2).integers(0, 100) + tm.assert_equal( + obj.sample(n=4, random_state=seed), obj.sample(n=4, random_state=seed) + ) + + tm.assert_equal( + obj.sample(frac=0.7, random_state=seed), + obj.sample(frac=0.7, random_state=seed), + ) + + tm.assert_equal( + obj.sample(n=4, random_state=np.random.default_rng(test)), + obj.sample(n=4, random_state=np.random.default_rng(test)), + ) + + tm.assert_equal( + obj.sample(frac=0.7, random_state=np.random.default_rng(test)), + obj.sample(frac=0.7, random_state=np.random.default_rng(test)), + ) + + tm.assert_equal( + obj.sample( + frac=2, + replace=True, + random_state=np.random.default_rng(test), + ), + obj.sample( + frac=2, + replace=True, + random_state=np.random.default_rng(test), + ), + ) + + os1, os2 = [], [] + for _ in range(2): + os1.append(obj.sample(n=4, random_state=test)) + os2.append(obj.sample(frac=0.7, random_state=test)) + tm.assert_equal(*os1) + tm.assert_equal(*os2) + + def test_sample_lengths(self, obj): + # Check lengths are right + assert len(obj.sample(n=4) == 4) + assert len(obj.sample(frac=0.34) == 3) + assert len(obj.sample(frac=0.36) == 4) + + def test_sample_invalid_random_state(self, obj): + # Check for error when random_state argument invalid. + msg = ( + "random_state must be an integer, array-like, a BitGenerator, Generator, " + "a numpy RandomState, or None" + ) + with pytest.raises(ValueError, match=msg): + obj.sample(random_state="a_string") + + def test_sample_wont_accept_n_and_frac(self, obj): + # Giving both frac and N throws error + msg = "Please enter a value for `frac` OR `n`, not both" + with pytest.raises(ValueError, match=msg): + obj.sample(n=3, frac=0.3) + + def test_sample_requires_positive_n_frac(self, obj): + with pytest.raises( + ValueError, + match="A negative number of rows requested. Please provide `n` >= 0", + ): + obj.sample(n=-3) + with pytest.raises( + ValueError, + match="A negative number of rows requested. Please provide `frac` >= 0", + ): + obj.sample(frac=-0.3) + + def test_sample_requires_integer_n(self, obj): + # Make sure float values of `n` give error + with pytest.raises(ValueError, match="Only integers accepted as `n` values"): + obj.sample(n=3.2) + + def test_sample_invalid_weight_lengths(self, obj): + # Weight length must be right + msg = "Weights and axis to be sampled must be of same length" + with pytest.raises(ValueError, match=msg): + obj.sample(n=3, weights=[0, 1]) + + with pytest.raises(ValueError, match=msg): + bad_weights = [0.5] * 11 + obj.sample(n=3, weights=bad_weights) + + with pytest.raises(ValueError, match="Fewer non-zero entries in p than size"): + bad_weight_series = Series([0, 0, 0.2]) + obj.sample(n=4, weights=bad_weight_series) + + def test_sample_negative_weights(self, obj): + # Check won't accept negative weights + bad_weights = [-0.1] * 10 + msg = "weight vector many not include negative values" + with pytest.raises(ValueError, match=msg): + obj.sample(n=3, weights=bad_weights) + + def test_sample_inf_weights(self, obj): + # Check inf and -inf throw errors: + + weights_with_inf = [0.1] * 10 + weights_with_inf[0] = np.inf + msg = "weight vector may not include `inf` values" + with pytest.raises(ValueError, match=msg): + obj.sample(n=3, weights=weights_with_inf) + + weights_with_ninf = [0.1] * 10 + weights_with_ninf[0] = -np.inf + with pytest.raises(ValueError, match=msg): + obj.sample(n=3, weights=weights_with_ninf) + + def test_sample_zero_weights(self, obj): + # All zeros raises errors + + zero_weights = [0] * 10 + with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"): + obj.sample(n=3, weights=zero_weights) + + def test_sample_missing_weights(self, obj): + # All missing weights + + nan_weights = [np.nan] * 10 + with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"): + obj.sample(n=3, weights=nan_weights) + + def test_sample_none_weights(self, obj): + # Check None are also replaced by zeros. + weights_with_None = [None] * 10 + weights_with_None[5] = 0.5 + tm.assert_equal( + obj.sample(n=1, axis=0, weights=weights_with_None), obj.iloc[5:6] + ) + + @pytest.mark.parametrize( + "func_str,arg", + [ + ("np.array", [2, 3, 1, 0]), + ("np.random.MT19937", 3), + ("np.random.PCG64", 11), + ], + ) + def test_sample_random_state(self, func_str, arg, frame_or_series): + # GH#32503 + obj = DataFrame({"col1": range(10, 20), "col2": range(20, 30)}) + obj = tm.get_obj(obj, frame_or_series) + result = obj.sample(n=3, random_state=eval(func_str)(arg)) + expected = obj.sample(n=3, random_state=com.random_state(eval(func_str)(arg))) + tm.assert_equal(result, expected) + + def test_sample_generator(self, frame_or_series): + # GH#38100 + obj = frame_or_series(np.arange(100)) + rng = np.random.default_rng(2) + + # Consecutive calls should advance the seed + result1 = obj.sample(n=50, random_state=rng) + result2 = obj.sample(n=50, random_state=rng) + assert not (result1.index.values == result2.index.values).all() + + # Matching generator initialization must give same result + # Consecutive calls should advance the seed + result1 = obj.sample(n=50, random_state=np.random.default_rng(11)) + result2 = obj.sample(n=50, random_state=np.random.default_rng(11)) + tm.assert_equal(result1, result2) + + def test_sample_upsampling_without_replacement(self, frame_or_series): + # GH#27451 + + obj = DataFrame({"A": list("abc")}) + obj = tm.get_obj(obj, frame_or_series) + + msg = ( + "Replace has to be set to `True` when " + "upsampling the population `frac` > 1." + ) + with pytest.raises(ValueError, match=msg): + obj.sample(frac=2, replace=False) + + +class TestSampleDataFrame: + # Tests which are relevant only for DataFrame, so these are + # as fully parametrized as they can get. + + def test_sample(self): + # GH#2419 + # additional specific object based tests + + # A few dataframe test with degenerate weights. + easy_weight_list = [0] * 10 + easy_weight_list[5] = 1 + + df = DataFrame( + { + "col1": range(10, 20), + "col2": range(20, 30), + "colString": ["a"] * 10, + "easyweights": easy_weight_list, + } + ) + sample1 = df.sample(n=1, weights="easyweights") + tm.assert_frame_equal(sample1, df.iloc[5:6]) + + # Ensure proper error if string given as weight for Series or + # DataFrame with axis = 1. + ser = Series(range(10)) + msg = "Strings cannot be passed as weights when sampling from a Series." + with pytest.raises(ValueError, match=msg): + ser.sample(n=3, weights="weight_column") + + msg = ( + "Strings can only be passed to weights when sampling from rows on a " + "DataFrame" + ) + with pytest.raises(ValueError, match=msg): + df.sample(n=1, weights="weight_column", axis=1) + + # Check weighting key error + with pytest.raises( + KeyError, match="'String passed to weights not a valid column'" + ): + df.sample(n=3, weights="not_a_real_column_name") + + # Check that re-normalizes weights that don't sum to one. + weights_less_than_1 = [0] * 10 + weights_less_than_1[0] = 0.5 + tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1]) + + ### + # Test axis argument + ### + + # Test axis argument + df = DataFrame({"col1": range(10), "col2": ["a"] * 10}) + second_column_weight = [0, 1] + tm.assert_frame_equal( + df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]] + ) + + # Different axis arg types + tm.assert_frame_equal( + df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]] + ) + + weight = [0] * 10 + weight[5] = 0.5 + tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6]) + tm.assert_frame_equal( + df.sample(n=1, axis="index", weights=weight), df.iloc[5:6] + ) + + # Check out of range axis values + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.sample(n=1, axis=2) + + msg = "No axis named not_a_name for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.sample(n=1, axis="not_a_name") + + ser = Series(range(10)) + with pytest.raises(ValueError, match="No axis named 1 for object type Series"): + ser.sample(n=1, axis=1) + + # Test weight length compared to correct axis + msg = "Weights and axis to be sampled must be of same length" + with pytest.raises(ValueError, match=msg): + df.sample(n=1, axis=1, weights=[0.5] * 10) + + def test_sample_axis1(self): + # Check weights with axis = 1 + easy_weight_list = [0] * 3 + easy_weight_list[2] = 1 + + df = DataFrame( + {"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10} + ) + sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) + tm.assert_frame_equal(sample1, df[["colString"]]) + + # Test default axes + tm.assert_frame_equal( + df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42) + ) + + def test_sample_aligns_weights_with_frame(self): + # Test that function aligns weights with frame + df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3]) + ser = Series([1, 0, 0], index=[3, 5, 9]) + tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser)) + + # Weights have index values to be dropped because not in + # sampled DataFrame + ser2 = Series([0.001, 0, 10000], index=[3, 5, 10]) + tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser2)) + + # Weights have empty values to be filed with zeros + ser3 = Series([0.01, 0], index=[3, 5]) + tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=ser3)) + + # No overlap in weight and sampled DataFrame indices + ser4 = Series([1, 0], index=[1, 2]) + + with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"): + df.sample(1, weights=ser4) + + def test_sample_is_copy(self): + # GH#27357, GH#30784: ensure the result of sample is an actual copy and + # doesn't track the parent dataframe / doesn't give SettingWithCopy warnings + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + df2 = df.sample(3) + + with tm.assert_produces_warning(None): + df2["d"] = 1 + + def test_sample_does_not_modify_weights(self): + # GH-42843 + result = np.array([np.nan, 1, np.nan]) + expected = result.copy() + ser = Series([1, 2, 3]) + + # Test numpy array weights won't be modified in place + ser.sample(weights=result) + tm.assert_numpy_array_equal(result, expected) + + # Test DataFrame column won't be modified in place + df = DataFrame({"values": [1, 1, 1], "weights": [1, np.nan, np.nan]}) + expected = df["weights"].copy() + + df.sample(frac=1.0, replace=True, weights="weights") + result = df["weights"] + tm.assert_series_equal(result, expected) + + def test_sample_ignore_index(self): + # GH 38581 + df = DataFrame( + {"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10} + ) + result = df.sample(3, ignore_index=True) + expected_index = Index(range(3)) + tm.assert_index_equal(result.index, expected_index, exact=True) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_select_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_select_dtypes.py new file mode 100644 index 00000000..a38d2c6f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_select_dtypes.py @@ -0,0 +1,466 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import ExtensionDtype + +import pandas as pd +from pandas import ( + DataFrame, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import ExtensionArray + + +class DummyDtype(ExtensionDtype): + type = int + + def __init__(self, numeric) -> None: + self._numeric = numeric + + @property + def name(self): + return "Dummy" + + @property + def _is_numeric(self): + return self._numeric + + +class DummyArray(ExtensionArray): + def __init__(self, data, dtype) -> None: + self.data = data + self._dtype = dtype + + def __array__(self, dtype): + return self.data + + @property + def dtype(self): + return self._dtype + + def __len__(self) -> int: + return len(self.data) + + def __getitem__(self, item): + pass + + def copy(self): + return self + + +class TestSelectDtypes: + def test_select_dtypes_include_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=[np.number]) + ei = df[["b", "c", "d", "k"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number], exclude=["timedelta"]) + ei = df[["b", "c", "d"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"]) + ei = df[["b", "c", "d", "f"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetime"]) + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetime64"]) + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetimetz"]) + ei = df[["h", "i"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include=["period"]) + + def test_select_dtypes_exclude_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + } + ) + re = df.select_dtypes(exclude=[np.number]) + ee = df[["a", "e"]] + tm.assert_frame_equal(re, ee) + + def test_select_dtypes_exclude_include_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6, dtype="u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + exclude = (np.datetime64,) + include = np.bool_, "integer" + r = df.select_dtypes(include=include, exclude=exclude) + e = df[["b", "c", "e"]] + tm.assert_frame_equal(r, e) + + exclude = ("datetime",) + include = "bool", "int64", "int32" + r = df.select_dtypes(include=include, exclude=exclude) + e = df[["b", "e"]] + tm.assert_frame_equal(r, e) + + @pytest.mark.parametrize( + "include", [(np.bool_, "int"), (np.bool_, "integer"), ("bool", int)] + ) + def test_select_dtypes_exclude_include_int(self, include): + # Fix select_dtypes(include='int') for Windows, FYI #36596 + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6, dtype="int32"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + exclude = (np.datetime64,) + result = df.select_dtypes(include=include, exclude=exclude) + expected = df[["b", "c", "e"]] + tm.assert_frame_equal(result, expected) + + def test_select_dtypes_include_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number) + ei = df[["b", "c", "d", "k"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="datetime") + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="datetime64") + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="category") + ei = df[["f"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include="period") + + def test_select_dtypes_exclude_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(exclude=np.number) + ei = df[["a", "e", "f", "g", "h", "i", "j"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(exclude="category") + ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(exclude="period") + + def test_select_dtypes_include_exclude_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number, exclude="floating") + ei = df[["b", "c", "k"]] + tm.assert_frame_equal(ri, ei) + + def test_select_dtypes_include_exclude_mixed_scalars_lists(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"]) + ei = df[["b", "c"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number, "category"], exclude="floating") + ei = df[["b", "c", "f", "k"]] + tm.assert_frame_equal(ri, ei) + + def test_select_dtypes_duplicate_columns(self): + # GH20839 + df = DataFrame( + { + "a": ["a", "b", "c"], + "b": [1, 2, 3], + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + df.columns = ["a", "a", "b", "b", "b", "c"] + + expected = DataFrame( + {"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")} + ) + + result = df.select_dtypes(include=[np.number], exclude=["floating"]) + tm.assert_frame_equal(result, expected) + + def test_select_dtypes_not_an_attr_but_still_valid_dtype(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + df["g"] = df.f.diff() + assert not hasattr(np, "u8") + r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"]) + e = df[["a", "b"]] + tm.assert_frame_equal(r, e) + + r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"]) + e = df[["a", "b", "g"]] + tm.assert_frame_equal(r, e) + + def test_select_dtypes_empty(self): + df = DataFrame({"a": list("abc"), "b": list(range(1, 4))}) + msg = "at least one of include or exclude must be nonempty" + with pytest.raises(ValueError, match=msg): + df.select_dtypes() + + def test_select_dtypes_bad_datetime64(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + with pytest.raises(ValueError, match=".+ is too specific"): + df.select_dtypes(include=["datetime64[D]"]) + + with pytest.raises(ValueError, match=".+ is too specific"): + df.select_dtypes(exclude=["datetime64[as]"]) + + def test_select_dtypes_datetime_with_tz(self): + df2 = DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern"), + "B": Timestamp("20130603", tz="CET"), + }, + index=range(5), + ) + df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1) + result = df3.select_dtypes(include=["datetime64[ns]"]) + expected = df3.reindex(columns=[]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [str, "str", np.bytes_, "S1", np.str_, "U1"]) + @pytest.mark.parametrize("arg", ["include", "exclude"]) + def test_select_dtypes_str_raises(self, dtype, arg): + df = DataFrame( + { + "a": list("abc"), + "g": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + msg = "string dtypes are not allowed" + kwargs = {arg: [dtype]} + + with pytest.raises(TypeError, match=msg): + df.select_dtypes(**kwargs) + + def test_select_dtypes_bad_arg_raises(self): + df = DataFrame( + { + "a": list("abc"), + "g": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + + msg = "data type.*not understood" + with pytest.raises(TypeError, match=msg): + df.select_dtypes(["blargy, blarg, blarg"]) + + def test_select_dtypes_typecodes(self): + # GH 11990 + df = tm.makeCustomDataframe( + 30, 3, data_gen_f=lambda x, y: np.random.default_rng(2).random() + ) + expected = df + FLOAT_TYPES = list(np.typecodes["AllFloat"]) + tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected) + + @pytest.mark.parametrize( + "arr,expected", + ( + (np.array([1, 2], dtype=np.int32), True), + (pd.array([1, 2], dtype="Int32"), True), + (DummyArray([1, 2], dtype=DummyDtype(numeric=True)), True), + (DummyArray([1, 2], dtype=DummyDtype(numeric=False)), False), + ), + ) + def test_select_dtypes_numeric(self, arr, expected): + # GH 35340 + + df = DataFrame(arr) + is_selected = df.select_dtypes(np.number).shape == df.shape + assert is_selected == expected + + def test_select_dtypes_numeric_nullable_string(self, nullable_string_dtype): + arr = pd.array(["a", "b"], dtype=nullable_string_dtype) + df = DataFrame(arr) + is_selected = df.select_dtypes(np.number).shape == df.shape + assert not is_selected + + @pytest.mark.parametrize( + "expected, float_dtypes", + [ + [ + DataFrame( + {"A": range(3), "B": range(5, 8), "C": range(10, 7, -1)} + ).astype(dtype={"A": float, "B": np.float64, "C": np.float32}), + float, + ], + [ + DataFrame( + {"A": range(3), "B": range(5, 8), "C": range(10, 7, -1)} + ).astype(dtype={"A": float, "B": np.float64, "C": np.float32}), + "float", + ], + [DataFrame({"C": range(10, 7, -1)}, dtype=np.float32), np.float32], + [ + DataFrame({"A": range(3), "B": range(5, 8)}).astype( + dtype={"A": float, "B": np.float64} + ), + np.float64, + ], + ], + ) + def test_select_dtypes_float_dtype(self, expected, float_dtypes): + # GH#42452 + dtype_dict = {"A": float, "B": np.float64, "C": np.float32} + df = DataFrame( + {"A": range(3), "B": range(5, 8), "C": range(10, 7, -1)}, + ) + df = df.astype(dtype_dict) + result = df.select_dtypes(include=float_dtypes) + tm.assert_frame_equal(result, expected) + + def test_np_bool_ea_boolean_include_number(self): + # GH 46870 + df = DataFrame( + { + "a": [1, 2, 3], + "b": pd.Series([True, False, True], dtype="boolean"), + "c": np.array([True, False, True]), + "d": pd.Categorical([True, False, True]), + "e": pd.arrays.SparseArray([True, False, True]), + } + ) + result = df.select_dtypes(include="number") + expected = DataFrame({"a": [1, 2, 3]}) + tm.assert_frame_equal(result, expected) + + def test_select_dtypes_no_view(self): + # https://github.com/pandas-dev/pandas/issues/48090 + # result of this method is not a view on the original dataframe + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + result = df.select_dtypes(include=["number"]) + result.iloc[0, 0] = 0 + tm.assert_frame_equal(df, df_orig) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_axis.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_axis.py new file mode 100644 index 00000000..8d249bc7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_axis.py @@ -0,0 +1,143 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class SharedSetAxisTests: + @pytest.fixture + def obj(self): + raise NotImplementedError("Implemented by subclasses") + + def test_set_axis(self, obj): + # GH14636; this tests setting index for both Series and DataFrame + new_index = list("abcd")[: len(obj)] + expected = obj.copy() + expected.index = new_index + result = obj.set_axis(new_index, axis=0) + tm.assert_equal(expected, result) + + def test_set_axis_copy(self, obj, using_copy_on_write): + # Test copy keyword GH#47932 + new_index = list("abcd")[: len(obj)] + + orig = obj.iloc[:] + expected = obj.copy() + expected.index = new_index + + result = obj.set_axis(new_index, axis=0, copy=True) + tm.assert_equal(expected, result) + assert result is not obj + # check we DID make a copy + if not using_copy_on_write: + if obj.ndim == 1: + assert not tm.shares_memory(result, obj) + else: + assert not any( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + + result = obj.set_axis(new_index, axis=0, copy=False) + tm.assert_equal(expected, result) + assert result is not obj + # check we did NOT make a copy + if obj.ndim == 1: + assert tm.shares_memory(result, obj) + else: + assert all( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + + # copy defaults to True + result = obj.set_axis(new_index, axis=0) + tm.assert_equal(expected, result) + assert result is not obj + if using_copy_on_write: + # check we DID NOT make a copy + if obj.ndim == 1: + assert tm.shares_memory(result, obj) + else: + assert any( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + # check we DID make a copy + elif obj.ndim == 1: + assert not tm.shares_memory(result, obj) + else: + assert not any( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + + res = obj.set_axis(new_index, copy=False) + tm.assert_equal(expected, res) + # check we did NOT make a copy + if res.ndim == 1: + assert tm.shares_memory(res, orig) + else: + assert all( + tm.shares_memory(res.iloc[:, i], orig.iloc[:, i]) + for i in range(res.shape[1]) + ) + + def test_set_axis_unnamed_kwarg_warns(self, obj): + # omitting the "axis" parameter + new_index = list("abcd")[: len(obj)] + + expected = obj.copy() + expected.index = new_index + + result = obj.set_axis(new_index) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("axis", [3, "foo"]) + def test_set_axis_invalid_axis_name(self, axis, obj): + # wrong values for the "axis" parameter + with pytest.raises(ValueError, match="No axis named"): + obj.set_axis(list("abc"), axis=axis) + + def test_set_axis_setattr_index_not_collection(self, obj): + # wrong type + msg = ( + r"Index\(\.\.\.\) must be called with a collection of some " + r"kind, None was passed" + ) + with pytest.raises(TypeError, match=msg): + obj.index = None + + def test_set_axis_setattr_index_wrong_length(self, obj): + # wrong length + msg = ( + f"Length mismatch: Expected axis has {len(obj)} elements, " + f"new values have {len(obj)-1} elements" + ) + with pytest.raises(ValueError, match=msg): + obj.index = np.arange(len(obj) - 1) + + if obj.ndim == 2: + with pytest.raises(ValueError, match="Length mismatch"): + obj.columns = obj.columns[::2] + + +class TestDataFrameSetAxis(SharedSetAxisTests): + @pytest.fixture + def obj(self): + df = DataFrame( + {"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2], "C": [4.4, 5.5, 6.6]}, + index=[2010, 2011, 2012], + ) + return df + + +class TestSeriesSetAxis(SharedSetAxisTests): + @pytest.fixture + def obj(self): + ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64") + return ser diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_index.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_index.py new file mode 100644 index 00000000..5984e591 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_set_index.py @@ -0,0 +1,702 @@ +""" +See also: test_reindex.py:TestReindexSetIndex +""" + +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + + +class TestSetIndex: + def test_set_index_multiindex(self): + # segfault in GH#3308 + d = {"t1": [2, 2.5, 3], "t2": [4, 5, 6]} + df = DataFrame(d) + tuples = [(0, 1), (0, 2), (1, 2)] + df["tuples"] = tuples + + index = MultiIndex.from_tuples(df["tuples"]) + # it works! + df.set_index(index) + + def test_set_index_empty_column(self): + # GH#1971 + df = DataFrame( + [ + {"a": 1, "p": 0}, + {"a": 2, "m": 10}, + {"a": 3, "m": 11, "p": 20}, + {"a": 4, "m": 12, "p": 21}, + ], + columns=["a", "m", "p", "x"], + ) + + result = df.set_index(["a", "x"]) + + expected = df[["m", "p"]] + expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"]) + tm.assert_frame_equal(result, expected) + + def test_set_index_empty_dataframe(self): + # GH#38419 + df1 = DataFrame( + {"a": Series(dtype="datetime64[ns]"), "b": Series(dtype="int64"), "c": []} + ) + + df2 = df1.set_index(["a", "b"]) + result = df2.index.to_frame().dtypes + expected = df1[["a", "b"]].dtypes + tm.assert_series_equal(result, expected) + + def test_set_index_multiindexcolumns(self): + columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)]) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), columns=columns + ) + + result = df.set_index(df.columns[0]) + + expected = df.iloc[:, 1:] + expected.index = df.iloc[:, 0].values + expected.index.names = [df.columns[0]] + tm.assert_frame_equal(result, expected) + + def test_set_index_timezone(self): + # GH#12358 + # tz-aware Series should retain the tz + idx = DatetimeIndex(["2014-01-01 10:10:10"], tz="UTC").tz_convert("Europe/Rome") + df = DataFrame({"A": idx}) + assert df.set_index(idx).index[0].hour == 11 + assert DatetimeIndex(Series(df.A))[0].hour == 11 + assert df.set_index(df.A).index[0].hour == 11 + + def test_set_index_cast_datetimeindex(self): + df = DataFrame( + { + "A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)], + "B": np.random.default_rng(2).standard_normal(1000), + } + ) + + idf = df.set_index("A") + assert isinstance(idf.index, DatetimeIndex) + + def test_set_index_dst(self): + di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific") + + df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index() + # single level + res = df.set_index("index") + exp = DataFrame( + data={"a": [0, 1, 2], "b": [3, 4, 5]}, + index=Index(di, name="index"), + ) + exp.index = exp.index._with_freq(None) + tm.assert_frame_equal(res, exp) + + # GH#12920 + res = df.set_index(["index", "a"]) + exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"]) + exp = DataFrame({"b": [3, 4, 5]}, index=exp_index) + tm.assert_frame_equal(res, exp) + + def test_set_index(self, float_string_frame): + df = float_string_frame + idx = Index(np.arange(len(df))[::-1]) + + df = df.set_index(idx) + tm.assert_index_equal(df.index, idx) + with pytest.raises(ValueError, match="Length mismatch"): + df.set_index(idx[::2]) + + def test_set_index_names(self): + df = tm.makeDataFrame() + df.index.name = "name" + + assert df.set_index(df.index).index.names == ["name"] + + mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"]) + mi2 = MultiIndex.from_arrays( + df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"] + ) + + df = df.set_index(["A", "B"]) + + assert df.set_index(df.index).index.names == ["A", "B"] + + # Check that set_index isn't converting a MultiIndex into an Index + assert isinstance(df.set_index(df.index).index, MultiIndex) + + # Check actual equality + tm.assert_index_equal(df.set_index(df.index).index, mi) + + idx2 = df.index.rename(["C", "D"]) + + # Check that [MultiIndex, MultiIndex] yields a MultiIndex rather + # than a pair of tuples + assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex) + + # Check equality + tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2) + + # A has duplicate values, C does not + @pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")]) + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys): + df = frame_of_index_cols + + if isinstance(keys, list): + idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys) + else: + idx = Index(df[keys], name=keys) + expected = df.drop(keys, axis=1) if drop else df + expected.index = idx + + if inplace: + result = df.copy() + return_value = result.set_index(keys, drop=drop, inplace=True) + assert return_value is None + else: + result = df.set_index(keys, drop=drop) + + tm.assert_frame_equal(result, expected) + + # A has duplicate values, C does not + @pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")]) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_append(self, frame_of_index_cols, drop, keys): + df = frame_of_index_cols + + keys = keys if isinstance(keys, list) else [keys] + idx = MultiIndex.from_arrays( + [df.index] + [df[x] for x in keys], names=[None] + keys + ) + expected = df.drop(keys, axis=1) if drop else df.copy() + expected.index = idx + + result = df.set_index(keys, drop=drop, append=True) + + tm.assert_frame_equal(result, expected) + + # A has duplicate values, C does not + @pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")]) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys): + # append to existing multiindex + df = frame_of_index_cols.set_index(["D"], drop=drop, append=True) + + keys = keys if isinstance(keys, list) else [keys] + expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True) + + result = df.set_index(keys, drop=drop, append=True) + + tm.assert_frame_equal(result, expected) + + def test_set_index_after_mutation(self): + # GH#1590 + df = DataFrame({"val": [0, 1, 2], "key": ["a", "b", "c"]}) + expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key")) + + df2 = df.loc[df.index.map(lambda indx: indx >= 1)] + result = df2.set_index("key") + tm.assert_frame_equal(result, expected) + + # MultiIndex constructor does not work directly on Series -> lambda + # Add list-of-list constructor because list is ambiguous -> lambda + # also test index name if append=True (name is duplicate here for B) + @pytest.mark.parametrize( + "box", + [ + Series, + Index, + np.array, + list, + lambda x: [list(x)], + lambda x: MultiIndex.from_arrays([x]), + ], + ) + @pytest.mark.parametrize( + "append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)] + ) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_pass_single_array( + self, frame_of_index_cols, drop, append, index_name, box + ): + df = frame_of_index_cols + df.index.name = index_name + + key = box(df["B"]) + if box == list: + # list of strings gets interpreted as list of keys + msg = "['one', 'two', 'three', 'one', 'two']" + with pytest.raises(KeyError, match=msg): + df.set_index(key, drop=drop, append=append) + else: + # np.array/list-of-list "forget" the name of B + name_mi = getattr(key, "names", None) + name = [getattr(key, "name", None)] if name_mi is None else name_mi + + result = df.set_index(key, drop=drop, append=append) + + # only valid column keys are dropped + # since B is always passed as array above, nothing is dropped + expected = df.set_index(["B"], drop=False, append=append) + expected.index.names = [index_name] + name if append else name + + tm.assert_frame_equal(result, expected) + + # MultiIndex constructor does not work directly on Series -> lambda + # also test index name if append=True (name is duplicate here for A & B) + @pytest.mark.parametrize( + "box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])] + ) + @pytest.mark.parametrize( + "append, index_name", + [(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)], + ) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_pass_arrays( + self, frame_of_index_cols, drop, append, index_name, box + ): + df = frame_of_index_cols + df.index.name = index_name + + keys = ["A", box(df["B"])] + # np.array/list "forget" the name of B + names = ["A", None if box in [np.array, list, tuple, iter] else "B"] + + result = df.set_index(keys, drop=drop, append=append) + + # only valid column keys are dropped + # since B is always passed as array above, only A is dropped, if at all + expected = df.set_index(["A", "B"], drop=False, append=append) + expected = expected.drop("A", axis=1) if drop else expected + expected.index.names = [index_name] + names if append else names + + tm.assert_frame_equal(result, expected) + + # MultiIndex constructor does not work directly on Series -> lambda + # We also emulate a "constructor" for the label -> lambda + # also test index name if append=True (name is duplicate here for A) + @pytest.mark.parametrize( + "box2", + [ + Series, + Index, + np.array, + list, + iter, + lambda x: MultiIndex.from_arrays([x]), + lambda x: x.name, + ], + ) + @pytest.mark.parametrize( + "box1", + [ + Series, + Index, + np.array, + list, + iter, + lambda x: MultiIndex.from_arrays([x]), + lambda x: x.name, + ], + ) + @pytest.mark.parametrize( + "append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)] + ) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_pass_arrays_duplicate( + self, frame_of_index_cols, drop, append, index_name, box1, box2 + ): + df = frame_of_index_cols + df.index.name = index_name + + keys = [box1(df["A"]), box2(df["A"])] + result = df.set_index(keys, drop=drop, append=append) + + # if either box is iter, it has been consumed; re-read + keys = [box1(df["A"]), box2(df["A"])] + + # need to adapt first drop for case that both keys are 'A' -- + # cannot drop the same column twice; + # plain == would give ambiguous Boolean error for containers + first_drop = ( + False + if ( + isinstance(keys[0], str) + and keys[0] == "A" + and isinstance(keys[1], str) + and keys[1] == "A" + ) + else drop + ) + # to test against already-tested behaviour, we add sequentially, + # hence second append always True; must wrap keys in list, otherwise + # box = list would be interpreted as keys + expected = df.set_index([keys[0]], drop=first_drop, append=append) + expected = expected.set_index([keys[1]], drop=drop, append=True) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("append", [True, False]) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append): + df = frame_of_index_cols + keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"]) + + result = df.set_index(keys, drop=drop, append=append) + + # setting with a MultiIndex will never drop columns + expected = df.set_index(["A", "B"], drop=False, append=append) + + tm.assert_frame_equal(result, expected) + + def test_construction_with_categorical_index(self): + ci = tm.makeCategoricalIndex(10) + ci.name = "B" + + # with Categorical + df = DataFrame( + {"A": np.random.default_rng(2).standard_normal(10), "B": ci.values} + ) + idf = df.set_index("B") + tm.assert_index_equal(idf.index, ci) + + # from a CategoricalIndex + df = DataFrame({"A": np.random.default_rng(2).standard_normal(10), "B": ci}) + idf = df.set_index("B") + tm.assert_index_equal(idf.index, ci) + + # round-trip + idf = idf.reset_index().set_index("B") + tm.assert_index_equal(idf.index, ci) + + def test_set_index_preserve_categorical_dtype(self): + # GH#13743, GH#13854 + df = DataFrame( + { + "A": [1, 2, 1, 1, 2], + "B": [10, 16, 22, 28, 34], + "C1": Categorical(list("abaab"), categories=list("bac"), ordered=False), + "C2": Categorical(list("abaab"), categories=list("bac"), ordered=True), + } + ) + for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]: + result = df.set_index(cols).reset_index() + result = result.reindex(columns=df.columns) + tm.assert_frame_equal(result, df) + + def test_set_index_datetime(self): + # GH#3950 + df = DataFrame( + { + "label": ["a", "a", "a", "b", "b", "b"], + "datetime": [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ], + "value": range(6), + } + ) + df.index = to_datetime(df.pop("datetime"), utc=True) + df.index = df.index.tz_convert("US/Pacific") + + expected = DatetimeIndex( + ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], + name="datetime", + ) + expected = expected.tz_localize("UTC").tz_convert("US/Pacific") + + df = df.set_index("label", append=True) + tm.assert_index_equal(df.index.levels[0], expected) + tm.assert_index_equal(df.index.levels[1], Index(["a", "b"], name="label")) + assert df.index.names == ["datetime", "label"] + + df = df.swaplevel(0, 1) + tm.assert_index_equal(df.index.levels[0], Index(["a", "b"], name="label")) + tm.assert_index_equal(df.index.levels[1], expected) + assert df.index.names == ["label", "datetime"] + + df = DataFrame(np.random.default_rng(2).random(6)) + idx1 = DatetimeIndex( + [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ], + tz="US/Eastern", + ) + idx2 = DatetimeIndex( + [ + "2012-04-01 09:00", + "2012-04-01 09:00", + "2012-04-01 09:00", + "2012-04-02 09:00", + "2012-04-02 09:00", + "2012-04-02 09:00", + ], + tz="US/Eastern", + ) + idx3 = date_range("2011-01-01 09:00", periods=6, tz="Asia/Tokyo") + idx3 = idx3._with_freq(None) + + df = df.set_index(idx1) + df = df.set_index(idx2, append=True) + df = df.set_index(idx3, append=True) + + expected1 = DatetimeIndex( + ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], + tz="US/Eastern", + ) + expected2 = DatetimeIndex( + ["2012-04-01 09:00", "2012-04-02 09:00"], tz="US/Eastern" + ) + + tm.assert_index_equal(df.index.levels[0], expected1) + tm.assert_index_equal(df.index.levels[1], expected2) + tm.assert_index_equal(df.index.levels[2], idx3) + + # GH#7092 + tm.assert_index_equal(df.index.get_level_values(0), idx1) + tm.assert_index_equal(df.index.get_level_values(1), idx2) + tm.assert_index_equal(df.index.get_level_values(2), idx3) + + def test_set_index_period(self): + # GH#6631 + df = DataFrame(np.random.default_rng(2).random(6)) + idx1 = period_range("2011-01-01", periods=3, freq="M") + idx1 = idx1.append(idx1) + idx2 = period_range("2013-01-01 09:00", periods=2, freq="H") + idx2 = idx2.append(idx2).append(idx2) + idx3 = period_range("2005", periods=6, freq="A") + + df = df.set_index(idx1) + df = df.set_index(idx2, append=True) + df = df.set_index(idx3, append=True) + + expected1 = period_range("2011-01-01", periods=3, freq="M") + expected2 = period_range("2013-01-01 09:00", periods=2, freq="H") + + tm.assert_index_equal(df.index.levels[0], expected1) + tm.assert_index_equal(df.index.levels[1], expected2) + tm.assert_index_equal(df.index.levels[2], idx3) + + tm.assert_index_equal(df.index.get_level_values(0), idx1) + tm.assert_index_equal(df.index.get_level_values(1), idx2) + tm.assert_index_equal(df.index.get_level_values(2), idx3) + + +class TestSetIndexInvalid: + def test_set_index_verify_integrity(self, frame_of_index_cols): + df = frame_of_index_cols + + with pytest.raises(ValueError, match="Index has duplicate keys"): + df.set_index("A", verify_integrity=True) + # with MultiIndex + with pytest.raises(ValueError, match="Index has duplicate keys"): + df.set_index([df["A"], df["A"]], verify_integrity=True) + + @pytest.mark.parametrize("append", [True, False]) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_raise_keys(self, frame_of_index_cols, drop, append): + df = frame_of_index_cols + + with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"): + # column names are A-E, as well as one tuple + df.set_index(["foo", "bar", "baz"], drop=drop, append=append) + + # non-existent key in list with arrays + with pytest.raises(KeyError, match="X"): + df.set_index([df["A"], df["B"], "X"], drop=drop, append=append) + + msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]" + # tuples always raise KeyError + with pytest.raises(KeyError, match=msg): + df.set_index(tuple(df["A"]), drop=drop, append=append) + + # also within a list + with pytest.raises(KeyError, match=msg): + df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append) + + @pytest.mark.parametrize("append", [True, False]) + @pytest.mark.parametrize("drop", [True, False]) + @pytest.mark.parametrize("box", [set], ids=["set"]) + def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append): + df = frame_of_index_cols + + msg = 'The parameter "keys" may be a column key, .*' + # forbidden type, e.g. set + with pytest.raises(TypeError, match=msg): + df.set_index(box(df["A"]), drop=drop, append=append) + + # forbidden type in list, e.g. set + with pytest.raises(TypeError, match=msg): + df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append) + + # MultiIndex constructor does not work directly on Series -> lambda + @pytest.mark.parametrize( + "box", + [Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])], + ids=["Series", "Index", "np.array", "iter", "MultiIndex"], + ) + @pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"]) + @pytest.mark.parametrize("append", [True, False]) + @pytest.mark.parametrize("drop", [True, False]) + def test_set_index_raise_on_len( + self, frame_of_index_cols, box, length, drop, append + ): + # GH 24984 + df = frame_of_index_cols # has length 5 + + values = np.random.default_rng(2).integers(0, 10, (length,)) + + msg = "Length mismatch: Expected 5 rows, received array of length.*" + + # wrong length directly + with pytest.raises(ValueError, match=msg): + df.set_index(box(values), drop=drop, append=append) + + # wrong length in list + with pytest.raises(ValueError, match=msg): + df.set_index(["A", df.A, box(values)], drop=drop, append=append) + + +class TestSetIndexCustomLabelType: + def test_set_index_custom_label_type(self): + # GH#24969 + + class Thing: + def __init__(self, name, color) -> None: + self.name = name + self.color = color + + def __str__(self) -> str: + return f"" + + # necessary for pretty KeyError + __repr__ = __str__ + + thing1 = Thing("One", "red") + thing2 = Thing("Two", "blue") + df = DataFrame({thing1: [0, 1], thing2: [2, 3]}) + expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2)) + + # use custom label directly + result = df.set_index(thing2) + tm.assert_frame_equal(result, expected) + + # custom label wrapped in list + result = df.set_index([thing2]) + tm.assert_frame_equal(result, expected) + + # missing key + thing3 = Thing("Three", "pink") + msg = "" + with pytest.raises(KeyError, match=msg): + # missing label directly + df.set_index(thing3) + + with pytest.raises(KeyError, match=msg): + # missing label in list + df.set_index([thing3]) + + def test_set_index_custom_label_hashable_iterable(self): + # GH#24969 + + # actual example discussed in GH 24984 was e.g. for shapely.geometry + # objects (e.g. a collection of Points) that can be both hashable and + # iterable; using frozenset as a stand-in for testing here + + class Thing(frozenset): + # need to stabilize repr for KeyError (due to random order in sets) + def __repr__(self) -> str: + tmp = sorted(self) + joined_reprs = ", ".join(map(repr, tmp)) + # double curly brace prints one brace in format string + return f"frozenset({{{joined_reprs}}})" + + thing1 = Thing(["One", "red"]) + thing2 = Thing(["Two", "blue"]) + df = DataFrame({thing1: [0, 1], thing2: [2, 3]}) + expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2)) + + # use custom label directly + result = df.set_index(thing2) + tm.assert_frame_equal(result, expected) + + # custom label wrapped in list + result = df.set_index([thing2]) + tm.assert_frame_equal(result, expected) + + # missing key + thing3 = Thing(["Three", "pink"]) + msg = r"frozenset\(\{'Three', 'pink'\}\)" + with pytest.raises(KeyError, match=msg): + # missing label directly + df.set_index(thing3) + + with pytest.raises(KeyError, match=msg): + # missing label in list + df.set_index([thing3]) + + def test_set_index_custom_label_type_raises(self): + # GH#24969 + + # purposefully inherit from something unhashable + class Thing(set): + def __init__(self, name, color) -> None: + self.name = name + self.color = color + + def __str__(self) -> str: + return f"" + + thing1 = Thing("One", "red") + thing2 = Thing("Two", "blue") + df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2]) + + msg = 'The parameter "keys" may be a column key, .*' + + with pytest.raises(TypeError, match=msg): + # use custom label directly + df.set_index(thing2) + + with pytest.raises(TypeError, match=msg): + # custom label wrapped in list + df.set_index([thing2]) + + def test_set_index_periodindex(self): + # GH#6631 + df = DataFrame(np.random.default_rng(2).random(6)) + idx1 = period_range("2011/01/01", periods=6, freq="M") + idx2 = period_range("2013", periods=6, freq="A") + + df = df.set_index(idx1) + tm.assert_index_equal(df.index, idx1) + df = df.set_index(idx2) + tm.assert_index_equal(df.index, idx2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_shift.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_shift.py new file mode 100644 index 00000000..1e881521 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_shift.py @@ -0,0 +1,756 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + Index, + NaT, + Series, + date_range, + offsets, +) +import pandas._testing as tm + + +class TestDataFrameShift: + def test_shift_axis1_with_valid_fill_value_one_array(self): + # Case with axis=1 that does not go through the "len(arrays)>1" path + # in DataFrame.shift + data = np.random.default_rng(2).standard_normal((5, 3)) + df = DataFrame(data) + res = df.shift(axis=1, periods=1, fill_value=12345) + expected = df.T.shift(periods=1, fill_value=12345).T + tm.assert_frame_equal(res, expected) + + # same but with an 1D ExtensionArray backing it + df2 = df[[0]].astype("Float64") + res2 = df2.shift(axis=1, periods=1, fill_value=12345) + expected2 = DataFrame([12345] * 5, dtype="Float64") + tm.assert_frame_equal(res2, expected2) + + def test_shift_deprecate_freq_and_fill_value(self, frame_or_series): + # Can't pass both! + obj = frame_or_series( + np.random.default_rng(2).standard_normal(5), + index=date_range("1/1/2000", periods=5, freq="H"), + ) + + msg = ( + "Passing a 'freq' together with a 'fill_value' silently ignores the " + "fill_value" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + obj.shift(1, fill_value=1, freq="H") + + if frame_or_series is DataFrame: + obj.columns = date_range("1/1/2000", periods=1, freq="H") + with tm.assert_produces_warning(FutureWarning, match=msg): + obj.shift(1, axis=1, fill_value=1, freq="H") + + @pytest.mark.parametrize( + "input_data, output_data", + [(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])], + ) + def test_shift_non_writable_array(self, input_data, output_data, frame_or_series): + # GH21049 Verify whether non writable numpy array is shiftable + input_data.setflags(write=False) + + result = frame_or_series(input_data).shift(1) + if frame_or_series is not Series: + # need to explicitly specify columns in the empty case + expected = frame_or_series( + output_data, + index=range(len(output_data)), + columns=range(1), + dtype="float64", + ) + else: + expected = frame_or_series(output_data, dtype="float64") + + tm.assert_equal(result, expected) + + def test_shift_mismatched_freq(self, frame_or_series): + ts = frame_or_series( + np.random.default_rng(2).standard_normal(5), + index=date_range("1/1/2000", periods=5, freq="H"), + ) + + result = ts.shift(1, freq="5T") + exp_index = ts.index.shift(1, freq="5T") + tm.assert_index_equal(result.index, exp_index) + + # GH#1063, multiple of same base + result = ts.shift(1, freq="4H") + exp_index = ts.index + offsets.Hour(4) + tm.assert_index_equal(result.index, exp_index) + + @pytest.mark.parametrize( + "obj", + [ + Series([np.arange(5)]), + date_range("1/1/2011", periods=24, freq="H"), + Series(range(5), index=date_range("2017", periods=5)), + ], + ) + @pytest.mark.parametrize("shift_size", [0, 1, 2]) + def test_shift_always_copy(self, obj, shift_size, frame_or_series): + # GH#22397 + if frame_or_series is not Series: + obj = obj.to_frame() + assert obj.shift(shift_size) is not obj + + def test_shift_object_non_scalar_fill(self): + # shift requires scalar fill_value except for object dtype + ser = Series(range(3)) + with pytest.raises(ValueError, match="fill_value must be a scalar"): + ser.shift(1, fill_value=[]) + + df = ser.to_frame() + with pytest.raises(ValueError, match="fill_value must be a scalar"): + df.shift(1, fill_value=np.arange(3)) + + obj_ser = ser.astype(object) + result = obj_ser.shift(1, fill_value={}) + assert result[0] == {} + + obj_df = obj_ser.to_frame() + result = obj_df.shift(1, fill_value={}) + assert result.iloc[0, 0] == {} + + def test_shift_int(self, datetime_frame, frame_or_series): + ts = tm.get_obj(datetime_frame, frame_or_series).astype(int) + shifted = ts.shift(1) + expected = ts.astype(float).shift(1) + tm.assert_equal(shifted, expected) + + @pytest.mark.parametrize("dtype", ["int32", "int64"]) + def test_shift_32bit_take(self, frame_or_series, dtype): + # 32-bit taking + # GH#8129 + index = date_range("2000-01-01", periods=5) + arr = np.arange(5, dtype=dtype) + s1 = frame_or_series(arr, index=index) + p = arr[1] + result = s1.shift(periods=p) + expected = frame_or_series([np.nan, 0, 1, 2, 3], index=index) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("periods", [1, 2, 3, 4]) + def test_shift_preserve_freqstr(self, periods, frame_or_series): + # GH#21275 + obj = frame_or_series( + range(periods), + index=date_range("2016-1-1 00:00:00", periods=periods, freq="H"), + ) + + result = obj.shift(1, "2H") + + expected = frame_or_series( + range(periods), + index=date_range("2016-1-1 02:00:00", periods=periods, freq="H"), + ) + tm.assert_equal(result, expected) + + def test_shift_dst(self, frame_or_series): + # GH#13926 + dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern") + obj = frame_or_series(dates) + + res = obj.shift(0) + tm.assert_equal(res, obj) + assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]" + + res = obj.shift(1) + exp_vals = [NaT] + dates.astype(object).values.tolist()[:9] + exp = frame_or_series(exp_vals) + tm.assert_equal(res, exp) + assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]" + + res = obj.shift(-2) + exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT] + exp = frame_or_series(exp_vals) + tm.assert_equal(res, exp) + assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]" + + @pytest.mark.parametrize("ex", [10, -10, 20, -20]) + def test_shift_dst_beyond(self, frame_or_series, ex): + # GH#13926 + dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern") + obj = frame_or_series(dates) + res = obj.shift(ex) + exp = frame_or_series([NaT] * 10, dtype="datetime64[ns, US/Eastern]") + tm.assert_equal(res, exp) + assert tm.get_dtype(res) == "datetime64[ns, US/Eastern]" + + def test_shift_by_zero(self, datetime_frame, frame_or_series): + # shift by 0 + obj = tm.get_obj(datetime_frame, frame_or_series) + unshifted = obj.shift(0) + tm.assert_equal(unshifted, obj) + + def test_shift(self, datetime_frame): + # naive shift + ser = datetime_frame["A"] + + shifted = datetime_frame.shift(5) + tm.assert_index_equal(shifted.index, datetime_frame.index) + + shifted_ser = ser.shift(5) + tm.assert_series_equal(shifted["A"], shifted_ser) + + shifted = datetime_frame.shift(-5) + tm.assert_index_equal(shifted.index, datetime_frame.index) + + shifted_ser = ser.shift(-5) + tm.assert_series_equal(shifted["A"], shifted_ser) + + unshifted = datetime_frame.shift(5).shift(-5) + tm.assert_numpy_array_equal( + unshifted.dropna().values, datetime_frame.values[:-5] + ) + + unshifted_ser = ser.shift(5).shift(-5) + tm.assert_numpy_array_equal(unshifted_ser.dropna().values, ser.values[:-5]) + + def test_shift_by_offset(self, datetime_frame, frame_or_series): + # shift by DateOffset + obj = tm.get_obj(datetime_frame, frame_or_series) + offset = offsets.BDay() + + shifted = obj.shift(5, freq=offset) + assert len(shifted) == len(obj) + unshifted = shifted.shift(-5, freq=offset) + tm.assert_equal(unshifted, obj) + + shifted2 = obj.shift(5, freq="B") + tm.assert_equal(shifted, shifted2) + + unshifted = obj.shift(0, freq=offset) + tm.assert_equal(unshifted, obj) + + d = obj.index[0] + shifted_d = d + offset * 5 + if frame_or_series is DataFrame: + tm.assert_series_equal(obj.xs(d), shifted.xs(shifted_d), check_names=False) + else: + tm.assert_almost_equal(obj.at[d], shifted.at[shifted_d]) + + def test_shift_with_periodindex(self, frame_or_series): + # Shifting with PeriodIndex + ps = tm.makePeriodFrame() + ps = tm.get_obj(ps, frame_or_series) + + shifted = ps.shift(1) + unshifted = shifted.shift(-1) + tm.assert_index_equal(shifted.index, ps.index) + tm.assert_index_equal(unshifted.index, ps.index) + if frame_or_series is DataFrame: + tm.assert_numpy_array_equal( + unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values + ) + else: + tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1]) + + shifted2 = ps.shift(1, "D") + shifted3 = ps.shift(1, offsets.Day()) + tm.assert_equal(shifted2, shifted3) + tm.assert_equal(ps, shifted2.shift(-1, "D")) + + msg = "does not match PeriodIndex freq" + with pytest.raises(ValueError, match=msg): + ps.shift(freq="W") + + # legacy support + shifted4 = ps.shift(1, freq="D") + tm.assert_equal(shifted2, shifted4) + + shifted5 = ps.shift(1, freq=offsets.Day()) + tm.assert_equal(shifted5, shifted4) + + def test_shift_other_axis(self): + # shift other axis + # GH#6371 + df = DataFrame(np.random.default_rng(2).random((10, 5))) + expected = pd.concat( + [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]], + ignore_index=True, + axis=1, + ) + result = df.shift(1, axis=1) + tm.assert_frame_equal(result, expected) + + def test_shift_named_axis(self): + # shift named axis + df = DataFrame(np.random.default_rng(2).random((10, 5))) + expected = pd.concat( + [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]], + ignore_index=True, + axis=1, + ) + result = df.shift(1, axis="columns") + tm.assert_frame_equal(result, expected) + + def test_shift_other_axis_with_freq(self, datetime_frame): + obj = datetime_frame.T + offset = offsets.BDay() + + # GH#47039 + shifted = obj.shift(5, freq=offset, axis=1) + assert len(shifted) == len(obj) + unshifted = shifted.shift(-5, freq=offset, axis=1) + tm.assert_equal(unshifted, obj) + + def test_shift_bool(self): + df = DataFrame({"high": [True, False], "low": [False, False]}) + rs = df.shift(1) + xp = DataFrame( + np.array([[np.nan, np.nan], [True, False]], dtype=object), + columns=["high", "low"], + ) + tm.assert_frame_equal(rs, xp) + + def test_shift_categorical1(self, frame_or_series): + # GH#9416 + obj = frame_or_series(["a", "b", "c", "d"], dtype="category") + + rt = obj.shift(1).shift(-1) + tm.assert_equal(obj.iloc[:-1], rt.dropna()) + + def get_cat_values(ndframe): + # For Series we could just do ._values; for DataFrame + # we may be able to do this if we ever have 2D Categoricals + return ndframe._mgr.arrays[0] + + cat = get_cat_values(obj) + + sp1 = obj.shift(1) + tm.assert_index_equal(obj.index, sp1.index) + assert np.all(get_cat_values(sp1).codes[:1] == -1) + assert np.all(cat.codes[:-1] == get_cat_values(sp1).codes[1:]) + + sn2 = obj.shift(-2) + tm.assert_index_equal(obj.index, sn2.index) + assert np.all(get_cat_values(sn2).codes[-2:] == -1) + assert np.all(cat.codes[2:] == get_cat_values(sn2).codes[:-2]) + + tm.assert_index_equal(cat.categories, get_cat_values(sp1).categories) + tm.assert_index_equal(cat.categories, get_cat_values(sn2).categories) + + def test_shift_categorical(self): + # GH#9416 + s1 = Series(["a", "b", "c"], dtype="category") + s2 = Series(["A", "B", "C"], dtype="category") + df = DataFrame({"one": s1, "two": s2}) + rs = df.shift(1) + xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)}) + tm.assert_frame_equal(rs, xp) + + def test_shift_categorical_fill_value(self, frame_or_series): + ts = frame_or_series(["a", "b", "c", "d"], dtype="category") + res = ts.shift(1, fill_value="a") + expected = frame_or_series( + pd.Categorical( + ["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False + ) + ) + tm.assert_equal(res, expected) + + # check for incorrect fill_value + msg = r"Cannot setitem on a Categorical with a new category \(f\)" + with pytest.raises(TypeError, match=msg): + ts.shift(1, fill_value="f") + + def test_shift_fill_value(self, frame_or_series): + # GH#24128 + dti = date_range("1/1/2000", periods=5, freq="H") + + ts = frame_or_series([1.0, 2.0, 3.0, 4.0, 5.0], index=dti) + exp = frame_or_series([0.0, 1.0, 2.0, 3.0, 4.0], index=dti) + # check that fill value works + result = ts.shift(1, fill_value=0.0) + tm.assert_equal(result, exp) + + exp = frame_or_series([0.0, 0.0, 1.0, 2.0, 3.0], index=dti) + result = ts.shift(2, fill_value=0.0) + tm.assert_equal(result, exp) + + ts = frame_or_series([1, 2, 3]) + res = ts.shift(2, fill_value=0) + assert tm.get_dtype(res) == tm.get_dtype(ts) + + # retain integer dtype + obj = frame_or_series([1, 2, 3, 4, 5], index=dti) + exp = frame_or_series([0, 1, 2, 3, 4], index=dti) + result = obj.shift(1, fill_value=0) + tm.assert_equal(result, exp) + + exp = frame_or_series([0, 0, 1, 2, 3], index=dti) + result = obj.shift(2, fill_value=0) + tm.assert_equal(result, exp) + + def test_shift_empty(self): + # Regression test for GH#8019 + df = DataFrame({"foo": []}) + rs = df.shift(-1) + + tm.assert_frame_equal(df, rs) + + def test_shift_duplicate_columns(self): + # GH#9092; verify that position-based shifting works + # in the presence of duplicate columns + column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]] + data = np.random.default_rng(2).standard_normal((20, 5)) + + shifted = [] + for columns in column_lists: + df = DataFrame(data.copy(), columns=columns) + for s in range(5): + df.iloc[:, s] = df.iloc[:, s].shift(s + 1) + df.columns = range(5) + shifted.append(df) + + # sanity check the base case + nulls = shifted[0].isna().sum() + tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64")) + + # check all answers are the same + tm.assert_frame_equal(shifted[0], shifted[1]) + tm.assert_frame_equal(shifted[0], shifted[2]) + + def test_shift_axis1_multiple_blocks(self, using_array_manager): + # GH#35488 + df1 = DataFrame(np.random.default_rng(2).integers(1000, size=(5, 3))) + df2 = DataFrame(np.random.default_rng(2).integers(1000, size=(5, 2))) + df3 = pd.concat([df1, df2], axis=1) + if not using_array_manager: + assert len(df3._mgr.blocks) == 2 + + result = df3.shift(2, axis=1) + + expected = df3.take([-1, -1, 0, 1, 2], axis=1) + # Explicit cast to float to avoid implicit cast when setting nan. + # Column names aren't unique, so directly calling `expected.astype` won't work. + expected = expected.pipe( + lambda df: df.set_axis(range(df.shape[1]), axis=1) + .astype({0: "float", 1: "float"}) + .set_axis(df.columns, axis=1) + ) + expected.iloc[:, :2] = np.nan + expected.columns = df3.columns + + tm.assert_frame_equal(result, expected) + + # Case with periods < 0 + # rebuild df3 because `take` call above consolidated + df3 = pd.concat([df1, df2], axis=1) + if not using_array_manager: + assert len(df3._mgr.blocks) == 2 + result = df3.shift(-2, axis=1) + + expected = df3.take([2, 3, 4, -1, -1], axis=1) + # Explicit cast to float to avoid implicit cast when setting nan. + # Column names aren't unique, so directly calling `expected.astype` won't work. + expected = expected.pipe( + lambda df: df.set_axis(range(df.shape[1]), axis=1) + .astype({3: "float", 4: "float"}) + .set_axis(df.columns, axis=1) + ) + expected.iloc[:, -2:] = np.nan + expected.columns = df3.columns + + tm.assert_frame_equal(result, expected) + + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support + def test_shift_axis1_multiple_blocks_with_int_fill(self): + # GH#42719 + rng = np.random.default_rng(2) + df1 = DataFrame(rng.integers(1000, size=(5, 3), dtype=int)) + df2 = DataFrame(rng.integers(1000, size=(5, 2), dtype=int)) + df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1) + result = df3.shift(2, axis=1, fill_value=np.int_(0)) + assert len(df3._mgr.blocks) == 2 + + expected = df3.take([-1, -1, 0, 1], axis=1) + expected.iloc[:, :2] = np.int_(0) + expected.columns = df3.columns + + tm.assert_frame_equal(result, expected) + + # Case with periods < 0 + df3 = pd.concat([df1.iloc[:4, 1:3], df2.iloc[:4, :]], axis=1) + result = df3.shift(-2, axis=1, fill_value=np.int_(0)) + assert len(df3._mgr.blocks) == 2 + + expected = df3.take([2, 3, -1, -1], axis=1) + expected.iloc[:, -2:] = np.int_(0) + expected.columns = df3.columns + + tm.assert_frame_equal(result, expected) + + def test_period_index_frame_shift_with_freq(self, frame_or_series): + ps = tm.makePeriodFrame() + ps = tm.get_obj(ps, frame_or_series) + + shifted = ps.shift(1, freq="infer") + unshifted = shifted.shift(-1, freq="infer") + tm.assert_equal(unshifted, ps) + + shifted2 = ps.shift(freq="D") + tm.assert_equal(shifted, shifted2) + + shifted3 = ps.shift(freq=offsets.Day()) + tm.assert_equal(shifted, shifted3) + + def test_datetime_frame_shift_with_freq(self, datetime_frame, frame_or_series): + dtobj = tm.get_obj(datetime_frame, frame_or_series) + shifted = dtobj.shift(1, freq="infer") + unshifted = shifted.shift(-1, freq="infer") + tm.assert_equal(dtobj, unshifted) + + shifted2 = dtobj.shift(freq=dtobj.index.freq) + tm.assert_equal(shifted, shifted2) + + inferred_ts = DataFrame( + datetime_frame.values, + Index(np.asarray(datetime_frame.index)), + columns=datetime_frame.columns, + ) + inferred_ts = tm.get_obj(inferred_ts, frame_or_series) + shifted = inferred_ts.shift(1, freq="infer") + expected = dtobj.shift(1, freq="infer") + expected.index = expected.index._with_freq(None) + tm.assert_equal(shifted, expected) + + unshifted = shifted.shift(-1, freq="infer") + tm.assert_equal(unshifted, inferred_ts) + + def test_period_index_frame_shift_with_freq_error(self, frame_or_series): + ps = tm.makePeriodFrame() + ps = tm.get_obj(ps, frame_or_series) + msg = "Given freq M does not match PeriodIndex freq D" + with pytest.raises(ValueError, match=msg): + ps.shift(freq="M") + + def test_datetime_frame_shift_with_freq_error( + self, datetime_frame, frame_or_series + ): + dtobj = tm.get_obj(datetime_frame, frame_or_series) + no_freq = dtobj.iloc[[0, 5, 7]] + msg = "Freq was not set in the index hence cannot be inferred" + with pytest.raises(ValueError, match=msg): + no_freq.shift(freq="infer") + + def test_shift_dt64values_int_fill_deprecated(self): + # GH#31971 + ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")]) + + with pytest.raises(TypeError, match="value should be a"): + ser.shift(1, fill_value=0) + + df = ser.to_frame() + with pytest.raises(TypeError, match="value should be a"): + df.shift(1, fill_value=0) + + # axis = 1 + df2 = DataFrame({"A": ser, "B": ser}) + df2._consolidate_inplace() + + result = df2.shift(1, axis=1, fill_value=0) + expected = DataFrame({"A": [0, 0], "B": df2["A"]}) + tm.assert_frame_equal(result, expected) + + # same thing but not consolidated; pre-2.0 we got different behavior + df3 = DataFrame({"A": ser}) + df3["B"] = ser + assert len(df3._mgr.arrays) == 2 + result = df3.shift(1, axis=1, fill_value=0) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "as_cat", + [ + pytest.param( + True, + marks=pytest.mark.xfail( + reason="_can_hold_element incorrectly always returns True" + ), + ), + False, + ], + ) + @pytest.mark.parametrize( + "vals", + [ + date_range("2020-01-01", periods=2), + date_range("2020-01-01", periods=2, tz="US/Pacific"), + pd.period_range("2020-01-01", periods=2, freq="D"), + pd.timedelta_range("2020 Days", periods=2, freq="D"), + pd.interval_range(0, 3, periods=2), + pytest.param( + pd.array([1, 2], dtype="Int64"), + marks=pytest.mark.xfail( + reason="_can_hold_element incorrectly always returns True" + ), + ), + pytest.param( + pd.array([1, 2], dtype="Float32"), + marks=pytest.mark.xfail( + reason="_can_hold_element incorrectly always returns True" + ), + ), + ], + ids=lambda x: str(x.dtype), + ) + def test_shift_dt64values_axis1_invalid_fill(self, vals, as_cat): + # GH#44564 + ser = Series(vals) + if as_cat: + ser = ser.astype("category") + + df = DataFrame({"A": ser}) + result = df.shift(-1, axis=1, fill_value="foo") + expected = DataFrame({"A": ["foo", "foo"]}) + tm.assert_frame_equal(result, expected) + + # same thing but multiple blocks + df2 = DataFrame({"A": ser, "B": ser}) + df2._consolidate_inplace() + + result = df2.shift(-1, axis=1, fill_value="foo") + expected = DataFrame({"A": df2["B"], "B": ["foo", "foo"]}) + tm.assert_frame_equal(result, expected) + + # same thing but not consolidated + df3 = DataFrame({"A": ser}) + df3["B"] = ser + assert len(df3._mgr.arrays) == 2 + result = df3.shift(-1, axis=1, fill_value="foo") + tm.assert_frame_equal(result, expected) + + def test_shift_axis1_categorical_columns(self): + # GH#38434 + ci = CategoricalIndex(["a", "b", "c"]) + df = DataFrame( + {"a": [1, 3], "b": [2, 4], "c": [5, 6]}, index=ci[:-1], columns=ci + ) + result = df.shift(axis=1) + + expected = DataFrame( + {"a": [np.nan, np.nan], "b": [1, 3], "c": [2, 4]}, index=ci[:-1], columns=ci + ) + tm.assert_frame_equal(result, expected) + + # periods != 1 + result = df.shift(2, axis=1) + expected = DataFrame( + {"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 3]}, + index=ci[:-1], + columns=ci, + ) + tm.assert_frame_equal(result, expected) + + def test_shift_axis1_many_periods(self): + # GH#44978 periods > len(columns) + df = DataFrame(np.random.default_rng(2).random((5, 3))) + shifted = df.shift(6, axis=1, fill_value=None) + + expected = df * np.nan + tm.assert_frame_equal(shifted, expected) + + shifted2 = df.shift(-6, axis=1, fill_value=None) + tm.assert_frame_equal(shifted2, expected) + + def test_shift_with_offsets_freq(self): + df = DataFrame({"x": [1, 2, 3]}, index=date_range("2000", periods=3)) + shifted = df.shift(freq="1MS") + expected = DataFrame( + {"x": [1, 2, 3]}, + index=date_range(start="02/01/2000", end="02/01/2000", periods=3), + ) + tm.assert_frame_equal(shifted, expected) + + def test_shift_with_iterable_basic_functionality(self): + # GH#44424 + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + shifts = [0, 1, 2] + + df = DataFrame(data) + shifted = df.shift(shifts) + + expected = DataFrame( + { + "a_0": [1, 2, 3], + "b_0": [4, 5, 6], + "a_1": [np.nan, 1.0, 2.0], + "b_1": [np.nan, 4.0, 5.0], + "a_2": [np.nan, np.nan, 1.0], + "b_2": [np.nan, np.nan, 4.0], + } + ) + tm.assert_frame_equal(expected, shifted) + + def test_shift_with_iterable_series(self): + # GH#44424 + data = {"a": [1, 2, 3]} + shifts = [0, 1, 2] + + df = DataFrame(data) + s = df["a"] + tm.assert_frame_equal(s.shift(shifts), df.shift(shifts)) + + def test_shift_with_iterable_freq_and_fill_value(self): + # GH#44424 + df = DataFrame( + np.random.default_rng(2).standard_normal(5), + index=date_range("1/1/2000", periods=5, freq="H"), + ) + + tm.assert_frame_equal( + # rename because shift with an iterable leads to str column names + df.shift([1], fill_value=1).rename(columns=lambda x: int(x[0])), + df.shift(1, fill_value=1), + ) + + tm.assert_frame_equal( + df.shift([1], freq="H").rename(columns=lambda x: int(x[0])), + df.shift(1, freq="H"), + ) + + msg = ( + "Passing a 'freq' together with a 'fill_value' silently ignores the " + "fill_value" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.shift([1, 2], fill_value=1, freq="H") + + def test_shift_with_iterable_check_other_arguments(self): + # GH#44424 + data = {"a": [1, 2], "b": [4, 5]} + shifts = [0, 1] + df = DataFrame(data) + + # test suffix + shifted = df[["a"]].shift(shifts, suffix="_suffix") + expected = DataFrame({"a_suffix_0": [1, 2], "a_suffix_1": [np.nan, 1.0]}) + tm.assert_frame_equal(shifted, expected) + + # check bad inputs when doing multiple shifts + msg = "If `periods` contains multiple shifts, `axis` cannot be 1." + with pytest.raises(ValueError, match=msg): + df.shift(shifts, axis=1) + + msg = "Periods must be integer, but s is ." + with pytest.raises(TypeError, match=msg): + df.shift(["s"]) + + msg = "If `periods` is an iterable, it cannot be empty." + with pytest.raises(ValueError, match=msg): + df.shift([]) + + msg = "Cannot specify `suffix` if `periods` is an int." + with pytest.raises(ValueError, match=msg): + df.shift(1, suffix="fails") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_size.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_size.py new file mode 100644 index 00000000..0c8b6473 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_size.py @@ -0,0 +1,21 @@ +import numpy as np +import pytest + +from pandas import DataFrame + + +@pytest.mark.parametrize( + "data, index, expected", + [ + ({"col1": [1], "col2": [3]}, None, 2), + ({}, None, 0), + ({"col1": [1, np.nan], "col2": [3, 4]}, None, 4), + ({"col1": [1, 2], "col2": [3, 4]}, [["a", "b"], [1, 2]], 4), + ({"col1": [1, 2, 3, 4], "col2": [3, 4, 5, 6]}, ["x", "y", "a", "b"], 8), + ], +) +def test_size(data, index, expected): + # GH#52897 + df = DataFrame(data, index=index) + assert df.size == expected + assert isinstance(df.size, int) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_index.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_index.py new file mode 100644 index 00000000..f1465c9b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_index.py @@ -0,0 +1,996 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalDtype, + CategoricalIndex, + DataFrame, + IntervalIndex, + MultiIndex, + RangeIndex, + Series, + Timestamp, +) +import pandas._testing as tm + + +class TestDataFrameSortIndex: + def test_sort_index_and_reconstruction_doc_example(self): + # doc example + df = DataFrame( + {"value": [1, 2, 3, 4]}, + index=MultiIndex( + levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]] + ), + ) + assert df.index._is_lexsorted() + assert not df.index.is_monotonic_increasing + + # sort it + expected = DataFrame( + {"value": [2, 1, 4, 3]}, + index=MultiIndex( + levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]] + ), + ) + result = df.sort_index() + assert result.index.is_monotonic_increasing + tm.assert_frame_equal(result, expected) + + # reconstruct + result = df.sort_index().copy() + result.index = result.index._sort_levels_monotonic() + assert result.index.is_monotonic_increasing + tm.assert_frame_equal(result, expected) + + def test_sort_index_non_existent_label_multiindex(self): + # GH#12261 + df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []])) + with tm.assert_produces_warning(None): + df.loc["b", "2"] = 1 + df.loc["a", "3"] = 1 + result = df.sort_index().index.is_monotonic_increasing + assert result is True + + def test_sort_index_reorder_on_ops(self): + # GH#15687 + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 2)), + index=MultiIndex.from_product( + [["a", "b"], ["big", "small"], ["red", "blu"]], + names=["letter", "size", "color"], + ), + columns=["near", "far"], + ) + df = df.sort_index() + + def my_func(group): + group.index = ["newz", "newa"] + return group + + result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index() + expected = MultiIndex.from_product( + [["a", "b"], ["big", "small"], ["newa", "newz"]], + names=["letter", "size", None], + ) + + tm.assert_index_equal(result.index, expected) + + def test_sort_index_nan_multiindex(self): + # GH#14784 + # incorrect sorting w.r.t. nans + tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]] + mi = MultiIndex.from_tuples(tuples) + + df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD")) + s = Series(np.arange(4), index=mi) + + df2 = DataFrame( + { + "date": pd.DatetimeIndex( + [ + "20121002", + "20121007", + "20130130", + "20130202", + "20130305", + "20121002", + "20121207", + "20130130", + "20130202", + "20130305", + "20130202", + "20130305", + ] + ), + "user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5], + "whole_cost": [ + 1790, + np.nan, + 280, + 259, + np.nan, + 623, + 90, + 312, + np.nan, + 301, + 359, + 801, + ], + "cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12], + } + ).set_index(["date", "user_id"]) + + # sorting frame, default nan position is last + result = df.sort_index() + expected = df.iloc[[3, 0, 2, 1], :] + tm.assert_frame_equal(result, expected) + + # sorting frame, nan position last + result = df.sort_index(na_position="last") + expected = df.iloc[[3, 0, 2, 1], :] + tm.assert_frame_equal(result, expected) + + # sorting frame, nan position first + result = df.sort_index(na_position="first") + expected = df.iloc[[1, 2, 3, 0], :] + tm.assert_frame_equal(result, expected) + + # sorting frame with removed rows + result = df2.dropna().sort_index() + expected = df2.sort_index().dropna() + tm.assert_frame_equal(result, expected) + + # sorting series, default nan position is last + result = s.sort_index() + expected = s.iloc[[3, 0, 2, 1]] + tm.assert_series_equal(result, expected) + + # sorting series, nan position last + result = s.sort_index(na_position="last") + expected = s.iloc[[3, 0, 2, 1]] + tm.assert_series_equal(result, expected) + + # sorting series, nan position first + result = s.sort_index(na_position="first") + expected = s.iloc[[1, 2, 3, 0]] + tm.assert_series_equal(result, expected) + + def test_sort_index_nan(self): + # GH#3917 + + # Test DataFrame with nan label + df = DataFrame( + {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}, + index=[1, 2, 3, 4, 5, 6, np.nan], + ) + + # NaN label, ascending=True, na_position='last' + sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last") + expected = DataFrame( + {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}, + index=[1, 2, 3, 4, 5, 6, np.nan], + ) + tm.assert_frame_equal(sorted_df, expected) + + # NaN label, ascending=True, na_position='first' + sorted_df = df.sort_index(na_position="first") + expected = DataFrame( + {"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]}, + index=[np.nan, 1, 2, 3, 4, 5, 6], + ) + tm.assert_frame_equal(sorted_df, expected) + + # NaN label, ascending=False, na_position='last' + sorted_df = df.sort_index(kind="quicksort", ascending=False) + expected = DataFrame( + {"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]}, + index=[6, 5, 4, 3, 2, 1, np.nan], + ) + tm.assert_frame_equal(sorted_df, expected) + + # NaN label, ascending=False, na_position='first' + sorted_df = df.sort_index( + kind="quicksort", ascending=False, na_position="first" + ) + expected = DataFrame( + {"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]}, + index=[np.nan, 6, 5, 4, 3, 2, 1], + ) + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_index_multi_index(self): + # GH#25775, testing that sorting by index works with a multi-index. + df = DataFrame( + {"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")} + ) + result = df.set_index(list("abc")).sort_index(level=list("ba")) + + expected = DataFrame( + {"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")} + ) + expected = expected.set_index(list("abc")) + + tm.assert_frame_equal(result, expected) + + def test_sort_index_inplace(self): + frame = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=[1, 2, 3, 4], + columns=["A", "B", "C", "D"], + ) + + # axis=0 + unordered = frame.loc[[3, 2, 4, 1]] + a_values = unordered["A"] + df = unordered.copy() + return_value = df.sort_index(inplace=True) + assert return_value is None + expected = frame + tm.assert_frame_equal(df, expected) + # GH 44153 related + # Used to be a_id != id(df["A"]), but flaky in the CI + assert a_values is not df["A"] + + df = unordered.copy() + return_value = df.sort_index(ascending=False, inplace=True) + assert return_value is None + expected = frame[::-1] + tm.assert_frame_equal(df, expected) + + # axis=1 + unordered = frame.loc[:, ["D", "B", "C", "A"]] + df = unordered.copy() + return_value = df.sort_index(axis=1, inplace=True) + assert return_value is None + expected = frame + tm.assert_frame_equal(df, expected) + + df = unordered.copy() + return_value = df.sort_index(axis=1, ascending=False, inplace=True) + assert return_value is None + expected = frame.iloc[:, ::-1] + tm.assert_frame_equal(df, expected) + + def test_sort_index_different_sortorder(self): + A = np.arange(20).repeat(5) + B = np.tile(np.arange(5), 20) + + indexer = np.random.default_rng(2).permutation(100) + A = A.take(indexer) + B = B.take(indexer) + + df = DataFrame( + {"A": A, "B": B, "C": np.random.default_rng(2).standard_normal(100)} + ) + + ex_indexer = np.lexsort((df.B.max() - df.B, df.A)) + expected = df.take(ex_indexer) + + # test with multiindex, too + idf = df.set_index(["A", "B"]) + + result = idf.sort_index(ascending=[1, 0]) + expected = idf.take(ex_indexer) + tm.assert_frame_equal(result, expected) + + # also, Series! + result = idf["C"].sort_index(ascending=[1, 0]) + tm.assert_series_equal(result, expected["C"]) + + def test_sort_index_level(self): + mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC")) + df = DataFrame([[1, 2], [3, 4]], mi) + + result = df.sort_index(level="A", sort_remaining=False) + expected = df + tm.assert_frame_equal(result, expected) + + result = df.sort_index(level=["A", "B"], sort_remaining=False) + expected = df + tm.assert_frame_equal(result, expected) + + # Error thrown by sort_index when + # first index is sorted last (GH#26053) + result = df.sort_index(level=["C", "B", "A"]) + expected = df.iloc[[1, 0]] + tm.assert_frame_equal(result, expected) + + result = df.sort_index(level=["B", "C", "A"]) + expected = df.iloc[[1, 0]] + tm.assert_frame_equal(result, expected) + + result = df.sort_index(level=["C", "A"]) + expected = df.iloc[[1, 0]] + tm.assert_frame_equal(result, expected) + + def test_sort_index_categorical_index(self): + df = DataFrame( + { + "A": np.arange(6, dtype="int64"), + "B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))), + } + ).set_index("B") + + result = df.sort_index() + expected = df.iloc[[4, 0, 1, 5, 2, 3]] + tm.assert_frame_equal(result, expected) + + result = df.sort_index(ascending=False) + expected = df.iloc[[2, 3, 0, 1, 5, 4]] + tm.assert_frame_equal(result, expected) + + def test_sort_index(self): + # GH#13496 + + frame = DataFrame( + np.arange(16).reshape(4, 4), + index=[1, 2, 3, 4], + columns=["A", "B", "C", "D"], + ) + + # axis=0 : sort rows by index labels + unordered = frame.loc[[3, 2, 4, 1]] + result = unordered.sort_index(axis=0) + expected = frame + tm.assert_frame_equal(result, expected) + + result = unordered.sort_index(ascending=False) + expected = frame[::-1] + tm.assert_frame_equal(result, expected) + + # axis=1 : sort columns by column names + unordered = frame.iloc[:, [2, 1, 3, 0]] + result = unordered.sort_index(axis=1) + tm.assert_frame_equal(result, frame) + + result = unordered.sort_index(axis=1, ascending=False) + expected = frame.iloc[:, ::-1] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("level", ["A", 0]) # GH#21052 + def test_sort_index_multiindex(self, level): + # GH#13496 + + # sort rows by specified level of multi-index + mi = MultiIndex.from_tuples( + [[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC") + ) + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi) + + expected_mi = MultiIndex.from_tuples( + [[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC") + ) + expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi) + result = df.sort_index(level=level) + tm.assert_frame_equal(result, expected) + + # sort_remaining=False + expected_mi = MultiIndex.from_tuples( + [[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC") + ) + expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi) + result = df.sort_index(level=level, sort_remaining=False) + tm.assert_frame_equal(result, expected) + + def test_sort_index_intervalindex(self): + # this is a de-facto sort via unstack + # confirming that we sort in the order of the bins + y = Series(np.random.default_rng(2).standard_normal(100)) + x1 = Series(np.sign(np.random.default_rng(2).standard_normal(100))) + x2 = pd.cut( + Series(np.random.default_rng(2).standard_normal(100)), + bins=[-3, -0.5, 0, 0.5, 3], + ) + model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"]) + + result = model.groupby(["X1", "X2"], observed=True).mean().unstack() + expected = IntervalIndex.from_tuples( + [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right" + ) + result = result.columns.levels[1].categories + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize( + "original_dict, sorted_dict, ascending, ignore_index, output_index", + [ + ({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]), + ({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]), + ({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]), + ({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]), + ], + ) + def test_sort_index_ignore_index( + self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index + ): + # GH 30114 + original_index = [2, 5, 3] + df = DataFrame(original_dict, index=original_index) + expected_df = DataFrame(sorted_dict, index=output_index) + kwargs = { + "ascending": ascending, + "ignore_index": ignore_index, + "inplace": inplace, + } + + if inplace: + result_df = df.copy() + result_df.sort_index(**kwargs) + else: + result_df = df.sort_index(**kwargs) + + tm.assert_frame_equal(result_df, expected_df) + tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index)) + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize("ignore_index", [True, False]) + def test_respect_ignore_index(self, inplace, ignore_index): + # GH 43591 + df = DataFrame({"a": [1, 2, 3]}, index=RangeIndex(4, -1, -2)) + result = df.sort_index( + ascending=False, ignore_index=ignore_index, inplace=inplace + ) + + if inplace: + result = df + if ignore_index: + expected = DataFrame({"a": [1, 2, 3]}) + else: + expected = DataFrame({"a": [1, 2, 3]}, index=RangeIndex(4, -1, -2)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize( + "original_dict, sorted_dict, ascending, ignore_index, output_index", + [ + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [1, 2], "M2": [3, 4]}, + True, + True, + [0, 1], + ), + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [2, 1], "M2": [4, 3]}, + False, + True, + [0, 1], + ), + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [1, 2], "M2": [3, 4]}, + True, + False, + MultiIndex.from_tuples([(2, 1), (3, 4)], names=list("AB")), + ), + ( + {"M1": [1, 2], "M2": [3, 4]}, + {"M1": [2, 1], "M2": [4, 3]}, + False, + False, + MultiIndex.from_tuples([(3, 4), (2, 1)], names=list("AB")), + ), + ], + ) + def test_sort_index_ignore_index_multi_index( + self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index + ): + # GH 30114, this is to test ignore_index on MultiIndex of index + mi = MultiIndex.from_tuples([(2, 1), (3, 4)], names=list("AB")) + df = DataFrame(original_dict, index=mi) + expected_df = DataFrame(sorted_dict, index=output_index) + + kwargs = { + "ascending": ascending, + "ignore_index": ignore_index, + "inplace": inplace, + } + + if inplace: + result_df = df.copy() + result_df.sort_index(**kwargs) + else: + result_df = df.sort_index(**kwargs) + + tm.assert_frame_equal(result_df, expected_df) + tm.assert_frame_equal(df, DataFrame(original_dict, index=mi)) + + def test_sort_index_categorical_multiindex(self): + # GH#15058 + df = DataFrame( + { + "a": range(6), + "l1": pd.Categorical( + ["a", "a", "b", "b", "c", "c"], + categories=["c", "a", "b"], + ordered=True, + ), + "l2": [0, 1, 0, 1, 0, 1], + } + ) + result = df.set_index(["l1", "l2"]).sort_index() + expected = DataFrame( + [4, 5, 0, 1, 2, 3], + columns=["a"], + index=MultiIndex( + levels=[ + CategoricalIndex( + ["c", "a", "b"], + categories=["c", "a", "b"], + ordered=True, + name="l1", + dtype="category", + ), + [0, 1], + ], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + names=["l1", "l2"], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_sort_index_and_reconstruction(self): + # GH#15622 + # lexsortedness should be identical + # across MultiIndex construction methods + + df = DataFrame([[1, 1], [2, 2]], index=list("ab")) + expected = DataFrame( + [[1, 1], [2, 2], [1, 1], [2, 2]], + index=MultiIndex.from_tuples( + [(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")] + ), + ) + assert expected.index._is_lexsorted() + + result = DataFrame( + [[1, 1], [2, 2], [1, 1], [2, 2]], + index=MultiIndex.from_product([[0.5, 0.8], list("ab")]), + ) + result = result.sort_index() + assert result.index.is_monotonic_increasing + + tm.assert_frame_equal(result, expected) + + result = DataFrame( + [[1, 1], [2, 2], [1, 1], [2, 2]], + index=MultiIndex( + levels=[[0.5, 0.8], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]] + ), + ) + result = result.sort_index() + assert result.index._is_lexsorted() + + tm.assert_frame_equal(result, expected) + + concatted = pd.concat([df, df], keys=[0.8, 0.5]) + result = concatted.sort_index() + + assert result.index.is_monotonic_increasing + + tm.assert_frame_equal(result, expected) + + # GH#14015 + df = DataFrame( + [[1, 2], [6, 7]], + columns=MultiIndex.from_tuples( + [(0, "20160811 12:00:00"), (0, "20160809 12:00:00")], + names=["l1", "Date"], + ), + ) + + df.columns = df.columns.set_levels( + pd.to_datetime(df.columns.levels[1]), level=1 + ) + assert not df.columns.is_monotonic_increasing + result = df.sort_index(axis=1) + assert result.columns.is_monotonic_increasing + result = df.sort_index(axis=1, level=1) + assert result.columns.is_monotonic_increasing + + # TODO: better name, de-duplicate with test_sort_index_level above + def test_sort_index_level2(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + df = frame.copy() + df.index = np.arange(len(df)) + + # axis=1 + + # series + a_sorted = frame["A"].sort_index(level=0) + + # preserve names + assert a_sorted.index.names == frame.index.names + + # inplace + rs = frame.copy() + return_value = rs.sort_index(level=0, inplace=True) + assert return_value is None + tm.assert_frame_equal(rs, frame.sort_index(level=0)) + + def test_sort_index_level_large_cardinality(self): + # GH#2684 (int64) + index = MultiIndex.from_arrays([np.arange(4000)] * 3) + df = DataFrame( + np.random.default_rng(2).standard_normal(4000).astype("int64"), index=index + ) + + # it works! + result = df.sort_index(level=0) + assert result.index._lexsort_depth == 3 + + # GH#2684 (int32) + index = MultiIndex.from_arrays([np.arange(4000)] * 3) + df = DataFrame( + np.random.default_rng(2).standard_normal(4000).astype("int32"), index=index + ) + + # it works! + result = df.sort_index(level=0) + assert (result.dtypes.values == df.dtypes.values).all() + assert result.index._lexsort_depth == 3 + + def test_sort_index_level_by_name(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + frame.index.names = ["first", "second"] + result = frame.sort_index(level="second") + expected = frame.sort_index(level=1) + tm.assert_frame_equal(result, expected) + + def test_sort_index_level_mixed(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + sorted_before = frame.sort_index(level=1) + + df = frame.copy() + df["foo"] = "bar" + sorted_after = df.sort_index(level=1) + tm.assert_frame_equal(sorted_before, sorted_after.drop(["foo"], axis=1)) + + dft = frame.T + sorted_before = dft.sort_index(level=1, axis=1) + dft["foo", "three"] = "bar" + + sorted_after = dft.sort_index(level=1, axis=1) + tm.assert_frame_equal( + sorted_before.drop([("foo", "three")], axis=1), + sorted_after.drop([("foo", "three")], axis=1), + ) + + def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.sort_index() + assert result.index.names == frame.index.names + + @pytest.mark.parametrize( + "gen,extra", + [ + ([1.0, 3.0, 2.0, 5.0], 4.0), + ([1, 3, 2, 5], 4), + ( + [ + Timestamp("20130101"), + Timestamp("20130103"), + Timestamp("20130102"), + Timestamp("20130105"), + ], + Timestamp("20130104"), + ), + (["1one", "3one", "2one", "5one"], "4one"), + ], + ) + def test_sort_index_multilevel_repr_8017(self, gen, extra): + data = np.random.default_rng(2).standard_normal((3, 4)) + + columns = MultiIndex.from_tuples([("red", i) for i in gen]) + df = DataFrame(data, index=list("def"), columns=columns) + df2 = pd.concat( + [ + df, + DataFrame( + "world", + index=list("def"), + columns=MultiIndex.from_tuples([("red", extra)]), + ), + ], + axis=1, + ) + + # check that the repr is good + # make sure that we have a correct sparsified repr + # e.g. only 1 header of read + assert str(df2).splitlines()[0].split() == ["red"] + + # GH 8017 + # sorting fails after columns added + + # construct single-dtype then sort + result = df.copy().sort_index(axis=1) + expected = df.iloc[:, [0, 2, 1, 3]] + tm.assert_frame_equal(result, expected) + + result = df2.sort_index(axis=1) + expected = df2.iloc[:, [0, 2, 1, 4, 3]] + tm.assert_frame_equal(result, expected) + + # setitem then sort + result = df.copy() + result[("red", extra)] = "world" + + result = result.sort_index(axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "categories", + [ + pytest.param(["a", "b", "c"], id="str"), + pytest.param( + [pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(2, 3)], + id="pd.Interval", + ), + ], + ) + def test_sort_index_with_categories(self, categories): + # GH#23452 + df = DataFrame( + {"foo": range(len(categories))}, + index=CategoricalIndex( + data=categories, categories=categories, ordered=True + ), + ) + df.index = df.index.reorder_categories(df.index.categories[::-1]) + result = df.sort_index() + expected = DataFrame( + {"foo": reversed(range(len(categories)))}, + index=CategoricalIndex( + data=categories[::-1], categories=categories[::-1], ordered=True + ), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "ascending", + [ + None, + [True, None], + [False, "True"], + ], + ) + def test_sort_index_ascending_bad_value_raises(self, ascending): + # GH 39434 + df = DataFrame(np.arange(64)) + length = len(df.index) + df.index = [(i - length / 2) % length for i in range(length)] + match = 'For argument "ascending" expected type bool' + with pytest.raises(ValueError, match=match): + df.sort_index(axis=0, ascending=ascending, na_position="first") + + def test_sort_index_use_inf_as_na(self): + # GH 29687 + expected = DataFrame( + {"col1": [1, 2, 3], "col2": [3, 4, 5]}, + index=pd.date_range("2020", periods=3), + ) + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = expected.sort_index() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "ascending", + [(True, False), [True, False]], + ) + def test_sort_index_ascending_tuple(self, ascending): + df = DataFrame( + { + "legs": [4, 2, 4, 2, 2], + }, + index=MultiIndex.from_tuples( + [ + ("mammal", "dog"), + ("bird", "duck"), + ("mammal", "horse"), + ("bird", "penguin"), + ("mammal", "kangaroo"), + ], + names=["class", "animal"], + ), + ) + + # parameter `ascending`` is a tuple + result = df.sort_index(level=(0, 1), ascending=ascending) + + expected = DataFrame( + { + "legs": [2, 2, 2, 4, 4], + }, + index=MultiIndex.from_tuples( + [ + ("bird", "penguin"), + ("bird", "duck"), + ("mammal", "kangaroo"), + ("mammal", "horse"), + ("mammal", "dog"), + ], + names=["class", "animal"], + ), + ) + + tm.assert_frame_equal(result, expected) + + +class TestDataFrameSortIndexKey: + def test_sort_multi_index_key(self): + # GH 25775, testing that sorting by index works with a multi-index. + df = DataFrame( + {"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")} + ).set_index(list("abc")) + + result = df.sort_index(level=list("ac"), key=lambda x: x) + + expected = DataFrame( + {"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")} + ).set_index(list("abc")) + tm.assert_frame_equal(result, expected) + + result = df.sort_index(level=list("ac"), key=lambda x: -x) + expected = DataFrame( + {"a": [3, 2, 1], "b": [0, 0, 0], "c": [0, 2, 1], "d": list("acb")} + ).set_index(list("abc")) + + tm.assert_frame_equal(result, expected) + + def test_sort_index_key(self): # issue 27237 + df = DataFrame(np.arange(6, dtype="int64"), index=list("aaBBca")) + + result = df.sort_index() + expected = df.iloc[[2, 3, 0, 1, 5, 4]] + tm.assert_frame_equal(result, expected) + + result = df.sort_index(key=lambda x: x.str.lower()) + expected = df.iloc[[0, 1, 5, 2, 3, 4]] + tm.assert_frame_equal(result, expected) + + result = df.sort_index(key=lambda x: x.str.lower(), ascending=False) + expected = df.iloc[[4, 2, 3, 0, 1, 5]] + tm.assert_frame_equal(result, expected) + + def test_sort_index_key_int(self): + df = DataFrame(np.arange(6, dtype="int64"), index=np.arange(6, dtype="int64")) + + result = df.sort_index() + tm.assert_frame_equal(result, df) + + result = df.sort_index(key=lambda x: -x) + expected = df.sort_index(ascending=False) + tm.assert_frame_equal(result, expected) + + result = df.sort_index(key=lambda x: 2 * x) + tm.assert_frame_equal(result, df) + + def test_sort_multi_index_key_str(self): + # GH 25775, testing that sorting by index works with a multi-index. + df = DataFrame( + {"a": ["B", "a", "C"], "b": [0, 1, 0], "c": list("abc"), "d": [0, 1, 2]} + ).set_index(list("abc")) + + result = df.sort_index(level="a", key=lambda x: x.str.lower()) + + expected = DataFrame( + {"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]} + ).set_index(list("abc")) + tm.assert_frame_equal(result, expected) + + result = df.sort_index( + level=list("abc"), # can refer to names + key=lambda x: x.str.lower() if x.name in ["a", "c"] else -x, + ) + + expected = DataFrame( + {"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]} + ).set_index(list("abc")) + tm.assert_frame_equal(result, expected) + + def test_changes_length_raises(self): + df = DataFrame({"A": [1, 2, 3]}) + with pytest.raises(ValueError, match="change the shape"): + df.sort_index(key=lambda x: x[:1]) + + def test_sort_index_multiindex_sparse_column(self): + # GH 29735, testing that sort_index on a multiindexed frame with sparse + # columns fills with 0. + expected = DataFrame( + { + i: pd.array([0.0, 0.0, 0.0, 0.0], dtype=pd.SparseDtype("float64", 0.0)) + for i in range(0, 4) + }, + index=MultiIndex.from_product([[1, 2], [1, 2]]), + ) + + result = expected.sort_index(level=0) + + tm.assert_frame_equal(result, expected) + + def test_sort_index_na_position(self): + # GH#51612 + df = DataFrame([1, 2], index=MultiIndex.from_tuples([(1, 1), (1, pd.NA)])) + expected = df.copy() + result = df.sort_index(level=[0, 1], na_position="last") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_index_multiindex_sort_remaining(self, ascending): + # GH #24247 + df = DataFrame( + {"A": [1, 2, 3, 4, 5], "B": [10, 20, 30, 40, 50]}, + index=MultiIndex.from_tuples( + [("a", "x"), ("a", "y"), ("b", "x"), ("b", "y"), ("c", "x")] + ), + ) + + result = df.sort_index(level=1, sort_remaining=False, ascending=ascending) + + if ascending: + expected = DataFrame( + {"A": [1, 3, 5, 2, 4], "B": [10, 30, 50, 20, 40]}, + index=MultiIndex.from_tuples( + [("a", "x"), ("b", "x"), ("c", "x"), ("a", "y"), ("b", "y")] + ), + ) + else: + expected = DataFrame( + {"A": [2, 4, 1, 3, 5], "B": [20, 40, 10, 30, 50]}, + index=MultiIndex.from_tuples( + [("a", "y"), ("b", "y"), ("a", "x"), ("b", "x"), ("c", "x")] + ), + ) + + tm.assert_frame_equal(result, expected) + + +def test_sort_index_with_sliced_multiindex(): + # GH 55379 + mi = MultiIndex.from_tuples( + [ + ("a", "10"), + ("a", "18"), + ("a", "25"), + ("b", "16"), + ("b", "26"), + ("a", "45"), + ("b", "28"), + ("a", "5"), + ("a", "50"), + ("a", "51"), + ("b", "4"), + ], + names=["group", "str"], + ) + + df = DataFrame({"x": range(len(mi))}, index=mi) + result = df.iloc[0:6].sort_index() + + expected = DataFrame( + {"x": [0, 1, 2, 5, 3, 4]}, + index=MultiIndex.from_tuples( + [ + ("a", "10"), + ("a", "18"), + ("a", "25"), + ("a", "45"), + ("b", "16"), + ("b", "26"), + ], + names=["group", "str"], + ), + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_values.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_values.py new file mode 100644 index 00000000..bd7d882f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_sort_values.py @@ -0,0 +1,940 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + NaT, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.util.version import Version + + +class TestDataFrameSortValues: + @pytest.mark.parametrize("dtype", [np.uint8, bool]) + def test_sort_values_sparse_no_warning(self, dtype): + # GH#45618 + ser = pd.Series(Categorical(["a", "b", "a"], categories=["a", "b", "c"])) + df = pd.get_dummies(ser, dtype=dtype, sparse=True) + + with tm.assert_produces_warning(None): + # No warnings about constructing Index from SparseArray + df.sort_values(by=df.columns.tolist()) + + def test_sort_values(self): + frame = DataFrame( + [[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC") + ) + + # by column (axis=0) + sorted_df = frame.sort_values(by="A") + indexer = frame["A"].argsort().values + expected = frame.loc[frame.index[indexer]] + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.sort_values(by="A", ascending=False) + indexer = indexer[::-1] + expected = frame.loc[frame.index[indexer]] + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.sort_values(by="A", ascending=False) + tm.assert_frame_equal(sorted_df, expected) + + # GH4839 + sorted_df = frame.sort_values(by=["A"], ascending=[False]) + tm.assert_frame_equal(sorted_df, expected) + + # multiple bys + sorted_df = frame.sort_values(by=["B", "C"]) + expected = frame.loc[[2, 1, 3]] + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.sort_values(by=["B", "C"], ascending=False) + tm.assert_frame_equal(sorted_df, expected[::-1]) + + sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False]) + tm.assert_frame_equal(sorted_df, expected) + + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + frame.sort_values(by=["A", "B"], axis=2, inplace=True) + + # by row (axis=1): GH#10806 + sorted_df = frame.sort_values(by=3, axis=1) + expected = frame + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.sort_values(by=3, axis=1, ascending=False) + expected = frame.reindex(columns=["C", "B", "A"]) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.sort_values(by=[1, 2], axis="columns") + expected = frame.reindex(columns=["B", "A", "C"]) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False]) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False) + expected = frame.reindex(columns=["C", "B", "A"]) + tm.assert_frame_equal(sorted_df, expected) + + msg = r"Length of ascending \(5\) != length of by \(2\)" + with pytest.raises(ValueError, match=msg): + frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5) + + def test_sort_values_by_empty_list(self): + # https://github.com/pandas-dev/pandas/issues/40258 + expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]}) + result = expected.sort_values(by=[]) + tm.assert_frame_equal(result, expected) + assert result is not expected + + def test_sort_values_inplace(self): + frame = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=[1, 2, 3, 4], + columns=["A", "B", "C", "D"], + ) + + sorted_df = frame.copy() + return_value = sorted_df.sort_values(by="A", inplace=True) + assert return_value is None + expected = frame.sort_values(by="A") + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.copy() + return_value = sorted_df.sort_values(by=1, axis=1, inplace=True) + assert return_value is None + expected = frame.sort_values(by=1, axis=1) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.copy() + return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True) + assert return_value is None + expected = frame.sort_values(by="A", ascending=False) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.copy() + return_value = sorted_df.sort_values( + by=["A", "B"], ascending=False, inplace=True + ) + assert return_value is None + expected = frame.sort_values(by=["A", "B"], ascending=False) + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_values_multicolumn(self): + A = np.arange(5).repeat(20) + B = np.tile(np.arange(5), 20) + np.random.default_rng(2).shuffle(A) + np.random.default_rng(2).shuffle(B) + frame = DataFrame( + {"A": A, "B": B, "C": np.random.default_rng(2).standard_normal(100)} + ) + + result = frame.sort_values(by=["A", "B"]) + indexer = np.lexsort((frame["B"], frame["A"])) + expected = frame.take(indexer) + tm.assert_frame_equal(result, expected) + + result = frame.sort_values(by=["A", "B"], ascending=False) + indexer = np.lexsort( + (frame["B"].rank(ascending=False), frame["A"].rank(ascending=False)) + ) + expected = frame.take(indexer) + tm.assert_frame_equal(result, expected) + + result = frame.sort_values(by=["B", "A"]) + indexer = np.lexsort((frame["A"], frame["B"])) + expected = frame.take(indexer) + tm.assert_frame_equal(result, expected) + + def test_sort_values_multicolumn_uint64(self): + # GH#9918 + # uint64 multicolumn sort + + df = DataFrame( + { + "a": pd.Series([18446637057563306014, 1162265347240853609]), + "b": pd.Series([1, 2]), + } + ) + df["a"] = df["a"].astype(np.uint64) + result = df.sort_values(["a", "b"]) + + expected = DataFrame( + { + "a": pd.Series([18446637057563306014, 1162265347240853609]), + "b": pd.Series([1, 2]), + }, + index=pd.Index([1, 0]), + ) + + tm.assert_frame_equal(result, expected) + + def test_sort_values_nan(self): + # GH#3917 + df = DataFrame( + {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]} + ) + + # sort one column only + expected = DataFrame( + {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]}, + index=[2, 0, 3, 1, 6, 4, 5], + ) + sorted_df = df.sort_values(["A"], na_position="first") + tm.assert_frame_equal(sorted_df, expected) + + expected = DataFrame( + {"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]}, + index=[2, 5, 4, 6, 1, 0, 3], + ) + sorted_df = df.sort_values(["A"], na_position="first", ascending=False) + tm.assert_frame_equal(sorted_df, expected) + + expected = df.reindex(columns=["B", "A"]) + sorted_df = df.sort_values(by=1, axis=1, na_position="first") + tm.assert_frame_equal(sorted_df, expected) + + # na_position='last', order + expected = DataFrame( + {"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]}, + index=[3, 0, 1, 6, 4, 5, 2], + ) + sorted_df = df.sort_values(["A", "B"]) + tm.assert_frame_equal(sorted_df, expected) + + # na_position='first', order + expected = DataFrame( + {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]}, + index=[2, 3, 0, 1, 6, 4, 5], + ) + sorted_df = df.sort_values(["A", "B"], na_position="first") + tm.assert_frame_equal(sorted_df, expected) + + # na_position='first', not order + expected = DataFrame( + {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]}, + index=[2, 0, 3, 1, 6, 4, 5], + ) + sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first") + tm.assert_frame_equal(sorted_df, expected) + + # na_position='last', not order + expected = DataFrame( + {"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]}, + index=[5, 4, 6, 1, 3, 0, 2], + ) + sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last") + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_values_stable_descending_sort(self): + # GH#6399 + df = DataFrame( + [[2, "first"], [2, "second"], [1, "a"], [1, "b"]], + columns=["sort_col", "order"], + ) + sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False) + tm.assert_frame_equal(df, sorted_df) + + @pytest.mark.parametrize( + "expected_idx_non_na, ascending", + [ + [ + [3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14], + [True, True], + ], + [ + [0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9], + [True, False], + ], + [ + [9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0], + [False, True], + ], + [ + [7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5], + [False, False], + ], + ], + ) + @pytest.mark.parametrize("na_position", ["first", "last"]) + def test_sort_values_stable_multicolumn_sort( + self, expected_idx_non_na, ascending, na_position + ): + # GH#38426 Clarify sort_values with mult. columns / labels is stable + df = DataFrame( + { + "A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8], + "B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4], + } + ) + # All rows with NaN in col "B" only have unique values in "A", therefore, + # only the rows with NaNs in "A" have to be treated individually: + expected_idx = ( + [11, 12, 2] + expected_idx_non_na + if na_position == "first" + else expected_idx_non_na + [2, 11, 12] + ) + expected = df.take(expected_idx) + sorted_df = df.sort_values( + ["A", "B"], ascending=ascending, na_position=na_position + ) + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_values_stable_categorial(self): + # GH#16793 + df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)}) + expected = df.copy() + sorted_df = df.sort_values("x", kind="mergesort") + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_values_datetimes(self): + # GH#3461, argsort / lexsort differences for a datetime column + df = DataFrame( + ["a", "a", "a", "b", "c", "d", "e", "f", "g"], + columns=["A"], + index=date_range("20130101", periods=9), + ) + dts = [ + Timestamp(x) + for x in [ + "2004-02-11", + "2004-01-21", + "2004-01-26", + "2005-09-20", + "2010-10-04", + "2009-05-12", + "2008-11-12", + "2010-09-28", + "2010-09-28", + ] + ] + df["B"] = dts[::2] + dts[1::2] + df["C"] = 2.0 + df["A1"] = 3.0 + + df1 = df.sort_values(by="A") + df2 = df.sort_values(by=["A"]) + tm.assert_frame_equal(df1, df2) + + df1 = df.sort_values(by="B") + df2 = df.sort_values(by=["B"]) + tm.assert_frame_equal(df1, df2) + + df1 = df.sort_values(by="B") + + df2 = df.sort_values(by=["C", "B"]) + tm.assert_frame_equal(df1, df2) + + def test_sort_values_frame_column_inplace_sort_exception( + self, float_frame, using_copy_on_write + ): + s = float_frame["A"] + float_frame_orig = float_frame.copy() + if using_copy_on_write: + # INFO(CoW) Series is a new object, so can be changed inplace + # without modifying original datafame + s.sort_values(inplace=True) + tm.assert_series_equal(s, float_frame_orig["A"].sort_values()) + # column in dataframe is not changed + tm.assert_frame_equal(float_frame, float_frame_orig) + else: + with pytest.raises(ValueError, match="This Series is a view"): + s.sort_values(inplace=True) + + cp = s.copy() + cp.sort_values() # it works! + + def test_sort_values_nat_values_in_int_column(self): + # GH#14922: "sorting with large float and multiple columns incorrect" + + # cause was that the int64 value NaT was considered as "na". Which is + # only correct for datetime64 columns. + + int_values = (2, int(NaT._value)) + float_values = (2.0, -1.797693e308) + + df = DataFrame( + {"int": int_values, "float": float_values}, columns=["int", "float"] + ) + + df_reversed = DataFrame( + {"int": int_values[::-1], "float": float_values[::-1]}, + columns=["int", "float"], + index=[1, 0], + ) + + # NaT is not a "na" for int64 columns, so na_position must not + # influence the result: + df_sorted = df.sort_values(["int", "float"], na_position="last") + tm.assert_frame_equal(df_sorted, df_reversed) + + df_sorted = df.sort_values(["int", "float"], na_position="first") + tm.assert_frame_equal(df_sorted, df_reversed) + + # reverse sorting order + df_sorted = df.sort_values(["int", "float"], ascending=False) + tm.assert_frame_equal(df_sorted, df) + + # and now check if NaT is still considered as "na" for datetime64 + # columns: + df = DataFrame( + {"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values}, + columns=["datetime", "float"], + ) + + df_reversed = DataFrame( + {"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]}, + columns=["datetime", "float"], + index=[1, 0], + ) + + df_sorted = df.sort_values(["datetime", "float"], na_position="first") + tm.assert_frame_equal(df_sorted, df_reversed) + + df_sorted = df.sort_values(["datetime", "float"], na_position="last") + tm.assert_frame_equal(df_sorted, df) + + # Ascending should not affect the results. + df_sorted = df.sort_values(["datetime", "float"], ascending=False) + tm.assert_frame_equal(df_sorted, df) + + def test_sort_nat(self): + # GH 16836 + + d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]] + d2 = [ + Timestamp(x) + for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"] + ] + df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) + + d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]] + d4 = [ + Timestamp(x) + for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"] + ] + expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) + sorted_df = df.sort_values(by=["a", "b"]) + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_values_na_position_with_categories(self): + # GH#22556 + # Positioning missing value properly when column is Categorical. + categories = ["A", "B", "C"] + category_indices = [0, 2, 4] + list_of_nans = [np.nan, np.nan] + na_indices = [1, 3] + na_position_first = "first" + na_position_last = "last" + column_name = "c" + + reversed_categories = sorted(categories, reverse=True) + reversed_category_indices = sorted(category_indices, reverse=True) + reversed_na_indices = sorted(na_indices) + + df = DataFrame( + { + column_name: Categorical( + ["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True + ) + } + ) + # sort ascending with na first + result = df.sort_values( + by=column_name, ascending=True, na_position=na_position_first + ) + expected = DataFrame( + { + column_name: Categorical( + list_of_nans + categories, categories=categories, ordered=True + ) + }, + index=na_indices + category_indices, + ) + + tm.assert_frame_equal(result, expected) + + # sort ascending with na last + result = df.sort_values( + by=column_name, ascending=True, na_position=na_position_last + ) + expected = DataFrame( + { + column_name: Categorical( + categories + list_of_nans, categories=categories, ordered=True + ) + }, + index=category_indices + na_indices, + ) + + tm.assert_frame_equal(result, expected) + + # sort descending with na first + result = df.sort_values( + by=column_name, ascending=False, na_position=na_position_first + ) + expected = DataFrame( + { + column_name: Categorical( + list_of_nans + reversed_categories, + categories=categories, + ordered=True, + ) + }, + index=reversed_na_indices + reversed_category_indices, + ) + + tm.assert_frame_equal(result, expected) + + # sort descending with na last + result = df.sort_values( + by=column_name, ascending=False, na_position=na_position_last + ) + expected = DataFrame( + { + column_name: Categorical( + reversed_categories + list_of_nans, + categories=categories, + ordered=True, + ) + }, + index=reversed_category_indices + reversed_na_indices, + ) + + tm.assert_frame_equal(result, expected) + + def test_sort_values_nat(self): + # GH#16836 + + d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]] + d2 = [ + Timestamp(x) + for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"] + ] + df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) + + d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]] + d4 = [ + Timestamp(x) + for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"] + ] + expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) + sorted_df = df.sort_values(by=["a", "b"]) + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_values_na_position_with_categories_raises(self): + df = DataFrame( + { + "c": Categorical( + ["A", np.nan, "B", np.nan, "C"], + categories=["A", "B", "C"], + ordered=True, + ) + } + ) + + with pytest.raises(ValueError, match="invalid na_position: bad_position"): + df.sort_values(by="c", ascending=False, na_position="bad_position") + + @pytest.mark.parametrize("inplace", [True, False]) + @pytest.mark.parametrize( + "original_dict, sorted_dict, ignore_index, output_index", + [ + ({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]), + ({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]), + ( + {"A": [1, 2, 3], "B": [2, 3, 4]}, + {"A": [3, 2, 1], "B": [4, 3, 2]}, + True, + [0, 1, 2], + ), + ( + {"A": [1, 2, 3], "B": [2, 3, 4]}, + {"A": [3, 2, 1], "B": [4, 3, 2]}, + False, + [2, 1, 0], + ), + ], + ) + def test_sort_values_ignore_index( + self, inplace, original_dict, sorted_dict, ignore_index, output_index + ): + # GH 30114 + df = DataFrame(original_dict) + expected = DataFrame(sorted_dict, index=output_index) + kwargs = {"ignore_index": ignore_index, "inplace": inplace} + + if inplace: + result_df = df.copy() + result_df.sort_values("A", ascending=False, **kwargs) + else: + result_df = df.sort_values("A", ascending=False, **kwargs) + + tm.assert_frame_equal(result_df, expected) + tm.assert_frame_equal(df, DataFrame(original_dict)) + + def test_sort_values_nat_na_position_default(self): + # GH 13230 + expected = DataFrame( + { + "A": [1, 2, 3, 4, 4], + "date": pd.DatetimeIndex( + [ + "2010-01-01 09:00:00", + "2010-01-01 09:00:01", + "2010-01-01 09:00:02", + "2010-01-01 09:00:03", + "NaT", + ] + ), + } + ) + result = expected.sort_values(["A", "date"]) + tm.assert_frame_equal(result, expected) + + def test_sort_values_item_cache(self, using_array_manager, using_copy_on_write): + # previous behavior incorrect retained an invalid _item_cache entry + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"] + ) + df["D"] = df["A"] * 2 + ser = df["A"] + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + df.sort_values(by="A") + + if using_copy_on_write: + ser.iloc[0] = 99 + assert df.iloc[0, 0] == df["A"][0] + assert df.iloc[0, 0] != 99 + else: + ser.values[0] = 99 + assert df.iloc[0, 0] == df["A"][0] + assert df.iloc[0, 0] == 99 + + def test_sort_values_reshaping(self): + # GH 39426 + values = list(range(21)) + expected = DataFrame([values], columns=values) + df = expected.sort_values(expected.index[0], axis=1, ignore_index=True) + + tm.assert_frame_equal(df, expected) + + def test_sort_values_no_by_inplace(self): + # GH#50643 + df = DataFrame({"a": [1, 2, 3]}) + expected = df.copy() + result = df.sort_values(by=[], inplace=True) + tm.assert_frame_equal(df, expected) + assert result is None + + def test_sort_values_no_op_reset_index(self): + # GH#52553 + df = DataFrame({"A": [10, 20], "B": [1, 5]}, index=[2, 3]) + result = df.sort_values(by="A", ignore_index=True) + expected = DataFrame({"A": [10, 20], "B": [1, 5]}) + tm.assert_frame_equal(result, expected) + + +class TestDataFrameSortKey: # test key sorting (issue 27237) + def test_sort_values_inplace_key(self, sort_by_key): + frame = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=[1, 2, 3, 4], + columns=["A", "B", "C", "D"], + ) + + sorted_df = frame.copy() + return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key) + assert return_value is None + expected = frame.sort_values(by="A", key=sort_by_key) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.copy() + return_value = sorted_df.sort_values( + by=1, axis=1, inplace=True, key=sort_by_key + ) + assert return_value is None + expected = frame.sort_values(by=1, axis=1, key=sort_by_key) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.copy() + return_value = sorted_df.sort_values( + by="A", ascending=False, inplace=True, key=sort_by_key + ) + assert return_value is None + expected = frame.sort_values(by="A", ascending=False, key=sort_by_key) + tm.assert_frame_equal(sorted_df, expected) + + sorted_df = frame.copy() + sorted_df.sort_values( + by=["A", "B"], ascending=False, inplace=True, key=sort_by_key + ) + expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key) + tm.assert_frame_equal(sorted_df, expected) + + def test_sort_values_key(self): + df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan])) + + result = df.sort_values(0) + expected = df.iloc[[0, 4, 3, 1, 2, 5]] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(0, key=lambda x: x + 5) + expected = df.iloc[[0, 4, 3, 1, 2, 5]] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(0, key=lambda x: -x, ascending=False) + expected = df.iloc[[0, 4, 3, 1, 2, 5]] + tm.assert_frame_equal(result, expected) + + def test_sort_values_by_key(self): + df = DataFrame( + { + "a": np.array([0, 3, np.nan, 3, 2, np.nan]), + "b": np.array([0, 2, np.nan, 5, 2, np.nan]), + } + ) + + result = df.sort_values("a", key=lambda x: -x) + expected = df.iloc[[1, 3, 4, 0, 2, 5]] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(by=["a", "b"], key=lambda x: -x) + expected = df.iloc[[3, 1, 4, 0, 2, 5]] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False) + expected = df.iloc[[0, 4, 1, 3, 2, 5]] + tm.assert_frame_equal(result, expected) + + def test_sort_values_by_key_by_name(self): + df = DataFrame( + { + "a": np.array([0, 3, np.nan, 3, 2, np.nan]), + "b": np.array([0, 2, np.nan, 5, 2, np.nan]), + } + ) + + def key(col): + if col.name == "a": + return -col + else: + return col + + result = df.sort_values(by="a", key=key) + expected = df.iloc[[1, 3, 4, 0, 2, 5]] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(by=["a"], key=key) + expected = df.iloc[[1, 3, 4, 0, 2, 5]] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(by="b", key=key) + expected = df.iloc[[0, 1, 4, 3, 2, 5]] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(by=["a", "b"], key=key) + expected = df.iloc[[1, 3, 4, 0, 2, 5]] + tm.assert_frame_equal(result, expected) + + def test_sort_values_key_string(self): + df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]])) + + result = df.sort_values(1) + expected = df[::-1] + tm.assert_frame_equal(result, expected) + + result = df.sort_values([0, 1], key=lambda col: col.str.lower()) + tm.assert_frame_equal(result, df) + + result = df.sort_values( + [0, 1], key=lambda col: col.str.lower(), ascending=False + ) + expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False) + tm.assert_frame_equal(result, expected) + + def test_sort_values_key_empty(self, sort_by_key): + df = DataFrame(np.array([])) + + df.sort_values(0, key=sort_by_key) + df.sort_index(key=sort_by_key) + + def test_changes_length_raises(self): + df = DataFrame({"A": [1, 2, 3]}) + with pytest.raises(ValueError, match="change the shape"): + df.sort_values("A", key=lambda x: x[:1]) + + def test_sort_values_key_axes(self): + df = DataFrame({0: ["Hello", "goodbye"], 1: [0, 1]}) + + result = df.sort_values(0, key=lambda col: col.str.lower()) + expected = df[::-1] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(1, key=lambda col: -col) + expected = df[::-1] + tm.assert_frame_equal(result, expected) + + def test_sort_values_key_dict_axis(self): + df = DataFrame({0: ["Hello", 0], 1: ["goodbye", 1]}) + + result = df.sort_values(0, key=lambda col: col.str.lower(), axis=1) + expected = df.loc[:, ::-1] + tm.assert_frame_equal(result, expected) + + result = df.sort_values(1, key=lambda col: -col, axis=1) + expected = df.loc[:, ::-1] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_sort_values_key_casts_to_categorical(self, ordered): + # https://github.com/pandas-dev/pandas/issues/36383 + categories = ["c", "b", "a"] + df = DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]}) + + def sorter(key): + if key.name == "y": + return pd.Series( + Categorical(key, categories=categories, ordered=ordered) + ) + return key + + result = df.sort_values(by=["x", "y"], key=sorter) + expected = DataFrame( + {"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0]) + ) + + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def df_none(): + return DataFrame( + { + "outer": ["a", "a", "a", "b", "b", "b"], + "inner": [1, 2, 2, 2, 1, 1], + "A": np.arange(6, 0, -1), + ("B", 5): ["one", "one", "two", "two", "one", "one"], + } + ) + + +@pytest.fixture(params=[["outer"], ["outer", "inner"]]) +def df_idx(request, df_none): + levels = request.param + return df_none.set_index(levels) + + +@pytest.fixture( + params=[ + "inner", # index level + ["outer"], # list of index level + "A", # column + [("B", 5)], # list of column + ["inner", "outer"], # two index levels + [("B", 5), "outer"], # index level and column + ["A", ("B", 5)], # Two columns + ["inner", "outer"], # two index levels and column + ] +) +def sort_names(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def ascending(request): + return request.param + + +class TestSortValuesLevelAsStr: + def test_sort_index_level_and_column_label( + self, df_none, df_idx, sort_names, ascending, request + ): + # GH#14353 + if ( + Version(np.__version__) >= Version("1.25") + and request.node.callspec.id == "df_idx0-inner-True" + ): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + + # Get index levels from df_idx + levels = df_idx.index.names + + # Compute expected by sorting on columns and the setting index + expected = df_none.sort_values( + by=sort_names, ascending=ascending, axis=0 + ).set_index(levels) + + # Compute result sorting on mix on columns and index levels + result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_sort_column_level_and_index_label( + self, df_none, df_idx, sort_names, ascending, request + ): + # GH#14353 + + # Get levels from df_idx + levels = df_idx.index.names + + # Compute expected by sorting on axis=0, setting index levels, and then + # transposing. For some cases this will result in a frame with + # multiple column levels + expected = ( + df_none.sort_values(by=sort_names, ascending=ascending, axis=0) + .set_index(levels) + .T + ) + + # Compute result by transposing and sorting on axis=1. + result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1) + + if Version(np.__version__) >= Version("1.25"): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + + tm.assert_frame_equal(result, expected) + + def test_sort_values_validate_ascending_for_value_error(self): + # GH41634 + df = DataFrame({"D": [23, 7, 21]}) + + msg = 'For argument "ascending" expected type bool, received type str.' + with pytest.raises(ValueError, match=msg): + df.sort_values(by="D", ascending="False") + + @pytest.mark.parametrize("ascending", [False, 0, 1, True]) + def test_sort_values_validate_ascending_functional(self, ascending): + df = DataFrame({"D": [23, 7, 21]}) + indexer = df["D"].argsort().values + + if not ascending: + indexer = indexer[::-1] + + expected = df.loc[df.index[indexer]] + result = df.sort_values(by="D", ascending=ascending) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swapaxes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swapaxes.py new file mode 100644 index 00000000..53a4691d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swapaxes.py @@ -0,0 +1,37 @@ +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +class TestSwapAxes: + def test_swapaxes(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_frame_equal(df.T, df.swapaxes(0, 1)) + tm.assert_frame_equal(df.T, df.swapaxes(1, 0)) + + def test_swapaxes_noop(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_frame_equal(df, df.swapaxes(0, 0)) + + def test_swapaxes_invalid_axis(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.swapaxes(2, 5) + + def test_round_empty_not_input(self): + # GH#51032 + df = DataFrame({"a": [1, 2]}) + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.swapaxes("index", "index") + tm.assert_frame_equal(df, result) + assert df is not result diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swaplevel.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swaplevel.py new file mode 100644 index 00000000..5511ac7d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_swaplevel.py @@ -0,0 +1,36 @@ +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +class TestSwaplevel: + def test_swaplevel(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + swapped = frame["A"].swaplevel() + swapped2 = frame["A"].swaplevel(0) + swapped3 = frame["A"].swaplevel(0, 1) + swapped4 = frame["A"].swaplevel("first", "second") + assert not swapped.index.equals(frame.index) + tm.assert_series_equal(swapped, swapped2) + tm.assert_series_equal(swapped, swapped3) + tm.assert_series_equal(swapped, swapped4) + + back = swapped.swaplevel() + back2 = swapped.swaplevel(0) + back3 = swapped.swaplevel(0, 1) + back4 = swapped.swaplevel("second", "first") + assert back.index.equals(frame.index) + tm.assert_series_equal(back, back2) + tm.assert_series_equal(back, back3) + tm.assert_series_equal(back, back4) + + ft = frame.T + swapped = ft.swaplevel("first", "second", axis=1) + exp = frame.swaplevel("first", "second").T + tm.assert_frame_equal(swapped, exp) + + msg = "Can only swap levels on a hierarchical axis." + with pytest.raises(TypeError, match=msg): + DataFrame(range(3)).swaplevel() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_csv.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_csv.py new file mode 100644 index 00000000..294da02e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_csv.py @@ -0,0 +1,1331 @@ +import csv +from io import StringIO +import os + +import numpy as np +import pytest + +from pandas.errors import ParserError + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + NaT, + Series, + Timestamp, + date_range, + read_csv, + to_datetime, +) +import pandas._testing as tm +import pandas.core.common as com + +from pandas.io.common import get_handle + + +class TestDataFrameToCSV: + def read_csv(self, path, **kwargs): + params = {"index_col": 0} + params.update(**kwargs) + + return read_csv(path, **params) + + def test_to_csv_from_csv1(self, float_frame, datetime_frame): + with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path: + float_frame.iloc[:5, float_frame.columns.get_loc("A")] = np.nan + + float_frame.to_csv(path) + float_frame.to_csv(path, columns=["A", "B"]) + float_frame.to_csv(path, header=False) + float_frame.to_csv(path, index=False) + + # test roundtrip + # freq does not roundtrip + datetime_frame.index = datetime_frame.index._with_freq(None) + datetime_frame.to_csv(path) + recons = self.read_csv(path, parse_dates=True) + tm.assert_frame_equal(datetime_frame, recons) + + datetime_frame.to_csv(path, index_label="index") + recons = self.read_csv(path, index_col=None, parse_dates=True) + + assert len(recons.columns) == len(datetime_frame.columns) + 1 + + # no index + datetime_frame.to_csv(path, index=False) + recons = self.read_csv(path, index_col=None, parse_dates=True) + tm.assert_almost_equal(datetime_frame.values, recons.values) + + # corner case + dm = DataFrame( + { + "s1": Series(range(3), index=np.arange(3, dtype=np.int64)), + "s2": Series(range(2), index=np.arange(2, dtype=np.int64)), + } + ) + dm.to_csv(path) + + recons = self.read_csv(path) + tm.assert_frame_equal(dm, recons) + + def test_to_csv_from_csv2(self, float_frame): + with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path: + # duplicate index + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=["a", "a", "b"], + columns=["x", "y", "z"], + ) + df.to_csv(path) + result = self.read_csv(path) + tm.assert_frame_equal(result, df) + + midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)]) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=midx, + columns=["x", "y", "z"], + ) + + df.to_csv(path) + result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False) + tm.assert_frame_equal(result, df, check_names=False) + + # column aliases + col_aliases = Index(["AA", "X", "Y", "Z"]) + float_frame.to_csv(path, header=col_aliases) + + rs = self.read_csv(path) + xp = float_frame.copy() + xp.columns = col_aliases + tm.assert_frame_equal(xp, rs) + + msg = "Writing 4 cols but got 2 aliases" + with pytest.raises(ValueError, match=msg): + float_frame.to_csv(path, header=["AA", "X"]) + + def test_to_csv_from_csv3(self): + with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path: + df1 = DataFrame(np.random.default_rng(2).standard_normal((3, 1))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((3, 1))) + + df1.to_csv(path) + df2.to_csv(path, mode="a", header=False) + xp = pd.concat([df1, df2]) + rs = read_csv(path, index_col=0) + rs.columns = [int(label) for label in rs.columns] + xp.columns = [int(label) for label in xp.columns] + tm.assert_frame_equal(xp, rs) + + def test_to_csv_from_csv4(self): + with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path: + # GH 10833 (TimedeltaIndex formatting) + dt = pd.Timedelta(seconds=1) + df = DataFrame( + {"dt_data": [i * dt for i in range(3)]}, + index=Index([i * dt for i in range(3)], name="dt_index"), + ) + df.to_csv(path) + + result = read_csv(path, index_col="dt_index") + result.index = pd.to_timedelta(result.index) + result["dt_data"] = pd.to_timedelta(result["dt_data"]) + + tm.assert_frame_equal(df, result, check_index_type=True) + + def test_to_csv_from_csv5(self, timezone_frame): + # tz, 8260 + with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path: + timezone_frame.to_csv(path) + result = read_csv(path, index_col=0, parse_dates=["A"]) + + converter = ( + lambda c: to_datetime(result[c]) + .dt.tz_convert("UTC") + .dt.tz_convert(timezone_frame[c].dt.tz) + ) + result["B"] = converter("B") + result["C"] = converter("C") + tm.assert_frame_equal(result, timezone_frame) + + def test_to_csv_cols_reordering(self): + # GH3454 + chunksize = 5 + N = int(chunksize * 2.5) + + df = tm.makeCustomDataframe(N, 3) + cs = df.columns + cols = [cs[2], cs[0]] + + with tm.ensure_clean() as path: + df.to_csv(path, columns=cols, chunksize=chunksize) + rs_c = read_csv(path, index_col=0) + + tm.assert_frame_equal(df[cols], rs_c, check_names=False) + + @pytest.mark.parametrize("cols", [None, ["b", "a"]]) + def test_to_csv_new_dupe_cols(self, cols): + chunksize = 5 + N = int(chunksize * 2.5) + + # dupe cols + df = tm.makeCustomDataframe(N, 3) + df.columns = ["a", "a", "b"] + with tm.ensure_clean() as path: + df.to_csv(path, columns=cols, chunksize=chunksize) + rs_c = read_csv(path, index_col=0) + + # we wrote them in a different order + # so compare them in that order + if cols is not None: + if df.columns.is_unique: + rs_c.columns = cols + else: + indexer, missing = df.columns.get_indexer_non_unique(cols) + rs_c.columns = df.columns.take(indexer) + + for c in cols: + obj_df = df[c] + obj_rs = rs_c[c] + if isinstance(obj_df, Series): + tm.assert_series_equal(obj_df, obj_rs) + else: + tm.assert_frame_equal(obj_df, obj_rs, check_names=False) + + # wrote in the same order + else: + rs_c.columns = df.columns + tm.assert_frame_equal(df, rs_c, check_names=False) + + @pytest.mark.slow + def test_to_csv_dtnat(self): + # GH3437 + def make_dtnat_arr(n, nnat=None): + if nnat is None: + nnat = int(n * 0.1) # 10% + s = list(date_range("2000", freq="5min", periods=n)) + if nnat: + for i in np.random.default_rng(2).integers(0, len(s), nnat): + s[i] = NaT + i = np.random.default_rng(2).integers(100) + s[-i] = NaT + s[i] = NaT + return s + + chunksize = 1000 + s1 = make_dtnat_arr(chunksize + 5) + s2 = make_dtnat_arr(chunksize + 5, 0) + + with tm.ensure_clean("1.csv") as pth: + df = DataFrame({"a": s1, "b": s2}) + df.to_csv(pth, chunksize=chunksize) + + recons = self.read_csv(pth).apply(to_datetime) + tm.assert_frame_equal(df, recons, check_names=False) + + def _return_result_expected( + self, + df, + chunksize, + r_dtype=None, + c_dtype=None, + rnlvl=None, + cnlvl=None, + dupe_col=False, + ): + kwargs = {"parse_dates": False} + if cnlvl: + if rnlvl is not None: + kwargs["index_col"] = list(range(rnlvl)) + kwargs["header"] = list(range(cnlvl)) + + with tm.ensure_clean("__tmp_to_csv_moar__") as path: + df.to_csv(path, encoding="utf8", chunksize=chunksize) + recons = self.read_csv(path, **kwargs) + else: + kwargs["header"] = 0 + + with tm.ensure_clean("__tmp_to_csv_moar__") as path: + df.to_csv(path, encoding="utf8", chunksize=chunksize) + recons = self.read_csv(path, **kwargs) + + def _to_uni(x): + if not isinstance(x, str): + return x.decode("utf8") + return x + + if dupe_col: + # read_Csv disambiguates the columns by + # labeling them dupe.1,dupe.2, etc'. monkey patch columns + recons.columns = df.columns + if rnlvl and not cnlvl: + delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)] + ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl) + recons.index = ix + recons = recons.iloc[:, rnlvl - 1 :] + + type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"} + if r_dtype: + if r_dtype == "u": # unicode + r_dtype = "O" + recons.index = np.array( + [_to_uni(label) for label in recons.index], dtype=r_dtype + ) + df.index = np.array( + [_to_uni(label) for label in df.index], dtype=r_dtype + ) + elif r_dtype == "dt": # unicode + r_dtype = "O" + recons.index = np.array( + [Timestamp(label) for label in recons.index], dtype=r_dtype + ) + df.index = np.array( + [Timestamp(label) for label in df.index], dtype=r_dtype + ) + elif r_dtype == "p": + r_dtype = "O" + idx_list = to_datetime(recons.index) + recons.index = np.array( + [Timestamp(label) for label in idx_list], dtype=r_dtype + ) + df.index = np.array( + list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype + ) + else: + r_dtype = type_map.get(r_dtype) + recons.index = np.array(recons.index, dtype=r_dtype) + df.index = np.array(df.index, dtype=r_dtype) + if c_dtype: + if c_dtype == "u": + c_dtype = "O" + recons.columns = np.array( + [_to_uni(label) for label in recons.columns], dtype=c_dtype + ) + df.columns = np.array( + [_to_uni(label) for label in df.columns], dtype=c_dtype + ) + elif c_dtype == "dt": + c_dtype = "O" + recons.columns = np.array( + [Timestamp(label) for label in recons.columns], dtype=c_dtype + ) + df.columns = np.array( + [Timestamp(label) for label in df.columns], dtype=c_dtype + ) + elif c_dtype == "p": + c_dtype = "O" + col_list = to_datetime(recons.columns) + recons.columns = np.array( + [Timestamp(label) for label in col_list], dtype=c_dtype + ) + col_list = df.columns.to_timestamp() + df.columns = np.array( + [Timestamp(label) for label in col_list], dtype=c_dtype + ) + else: + c_dtype = type_map.get(c_dtype) + recons.columns = np.array(recons.columns, dtype=c_dtype) + df.columns = np.array(df.columns, dtype=c_dtype) + return df, recons + + @pytest.mark.slow + @pytest.mark.parametrize( + "nrows", [2, 10, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251] + ) + def test_to_csv_nrows(self, nrows): + df = tm.makeCustomDataframe(nrows, 4, r_idx_type="dt", c_idx_type="s") + result, expected = self._return_result_expected(df, 1000, "dt", "s") + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.slow + @pytest.mark.parametrize( + "nrows", [2, 10, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251] + ) + @pytest.mark.parametrize( + "r_idx_type, c_idx_type", [("i", "i"), ("s", "s"), ("s", "dt"), ("p", "p")] + ) + @pytest.mark.parametrize("ncols", [1, 2, 3, 4]) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_to_csv_idx_types(self, nrows, r_idx_type, c_idx_type, ncols): + df = tm.makeCustomDataframe( + nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type + ) + result, expected = self._return_result_expected( + df, + 1000, + r_idx_type, + c_idx_type, + ) + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.slow + @pytest.mark.parametrize( + "nrows", [10, 98, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251] + ) + @pytest.mark.parametrize("ncols", [1, 2, 3, 4]) + def test_to_csv_idx_ncols(self, nrows, ncols): + df = tm.makeCustomDataframe(nrows, ncols) + result, expected = self._return_result_expected(df, 1000) + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.slow + @pytest.mark.parametrize("nrows", [10, 98, 99, 100, 101, 102]) + def test_to_csv_dup_cols(self, nrows): + df = tm.makeCustomDataframe(nrows, 3) + cols = list(df.columns) + cols[:2] = ["dupe", "dupe"] + cols[-2:] = ["dupe", "dupe"] + ix = list(df.index) + ix[:2] = ["rdupe", "rdupe"] + ix[-2:] = ["rdupe", "rdupe"] + df.index = ix + df.columns = cols + result, expected = self._return_result_expected(df, 1000, dupe_col=True) + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.slow + def test_to_csv_empty(self): + df = DataFrame(index=np.arange(10, dtype=np.int64)) + result, expected = self._return_result_expected(df, 1000) + tm.assert_frame_equal(result, expected, check_column_type=False) + + @pytest.mark.slow + def test_to_csv_chunksize(self): + chunksize = 1000 + df = tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2) + result, expected = self._return_result_expected(df, chunksize, rnlvl=2) + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.slow + @pytest.mark.parametrize( + "nrows", [2, 10, 99, 100, 101, 102, 198, 199, 200, 201, 202, 249, 250, 251] + ) + @pytest.mark.parametrize("ncols", [2, 3, 4]) + @pytest.mark.parametrize( + "df_params, func_params", + [ + [{"r_idx_nlevels": 2}, {"rnlvl": 2}], + [{"c_idx_nlevels": 2}, {"cnlvl": 2}], + [{"r_idx_nlevels": 2, "c_idx_nlevels": 2}, {"rnlvl": 2, "cnlvl": 2}], + ], + ) + def test_to_csv_params(self, nrows, df_params, func_params, ncols): + df = tm.makeCustomDataframe(nrows, ncols, **df_params) + result, expected = self._return_result_expected(df, 1000, **func_params) + tm.assert_frame_equal(result, expected, check_names=False) + + def test_to_csv_from_csv_w_some_infs(self, float_frame): + # test roundtrip with inf, -inf, nan, as full columns and mix + float_frame["G"] = np.nan + f = lambda x: [np.inf, np.nan][np.random.default_rng(2).random() < 0.5] + float_frame["H"] = float_frame.index.map(f) + + with tm.ensure_clean() as path: + float_frame.to_csv(path) + recons = self.read_csv(path) + + tm.assert_frame_equal(float_frame, recons) + tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons)) + + def test_to_csv_from_csv_w_all_infs(self, float_frame): + # test roundtrip with inf, -inf, nan, as full columns and mix + float_frame["E"] = np.inf + float_frame["F"] = -np.inf + + with tm.ensure_clean() as path: + float_frame.to_csv(path) + recons = self.read_csv(path) + + tm.assert_frame_equal(float_frame, recons) + tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons)) + + def test_to_csv_no_index(self): + # GH 3624, after appending columns, to_csv fails + with tm.ensure_clean("__tmp_to_csv_no_index__") as path: + df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]}) + df.to_csv(path, index=False) + result = read_csv(path) + tm.assert_frame_equal(df, result) + df["c3"] = Series([7, 8, 9], dtype="int64") + df.to_csv(path, index=False) + result = read_csv(path) + tm.assert_frame_equal(df, result) + + def test_to_csv_with_mix_columns(self): + # gh-11637: incorrect output when a mix of integer and string column + # names passed as columns parameter in to_csv + + df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]}) + df["test"] = "txt" + assert df.to_csv() == df.to_csv(columns=[0, 1, "test"]) + + def test_to_csv_headers(self): + # GH6186, the presence or absence of `index` incorrectly + # causes to_csv to have different header semantics. + from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"]) + with tm.ensure_clean("__tmp_to_csv_headers__") as path: + from_df.to_csv(path, header=["X", "Y"]) + recons = self.read_csv(path) + + tm.assert_frame_equal(to_df, recons) + + from_df.to_csv(path, index=False, header=["X", "Y"]) + recons = self.read_csv(path) + + return_value = recons.reset_index(inplace=True) + assert return_value is None + tm.assert_frame_equal(to_df, recons) + + def test_to_csv_multiindex(self, float_frame, datetime_frame): + frame = float_frame + old_index = frame.index + arrays = np.arange(len(old_index) * 2, dtype=np.int64).reshape(2, -1) + new_index = MultiIndex.from_arrays(arrays, names=["first", "second"]) + frame.index = new_index + + with tm.ensure_clean("__tmp_to_csv_multiindex__") as path: + frame.to_csv(path, header=False) + frame.to_csv(path, columns=["A", "B"]) + + # round trip + frame.to_csv(path) + + df = self.read_csv(path, index_col=[0, 1], parse_dates=False) + + # TODO to_csv drops column name + tm.assert_frame_equal(frame, df, check_names=False) + assert frame.index.names == df.index.names + + # needed if setUp becomes a class method + float_frame.index = old_index + + # try multiindex with dates + tsframe = datetime_frame + old_index = tsframe.index + new_index = [old_index, np.arange(len(old_index), dtype=np.int64)] + tsframe.index = MultiIndex.from_arrays(new_index) + + tsframe.to_csv(path, index_label=["time", "foo"]) + with tm.assert_produces_warning( + UserWarning, match="Could not infer format" + ): + recons = self.read_csv(path, index_col=[0, 1], parse_dates=True) + + # TODO to_csv drops column name + tm.assert_frame_equal(tsframe, recons, check_names=False) + + # do not load index + tsframe.to_csv(path) + recons = self.read_csv(path, index_col=None) + assert len(recons.columns) == len(tsframe.columns) + 2 + + # no index + tsframe.to_csv(path, index=False) + recons = self.read_csv(path, index_col=None) + tm.assert_almost_equal(recons.values, datetime_frame.values) + + # needed if setUp becomes class method + datetime_frame.index = old_index + + with tm.ensure_clean("__tmp_to_csv_multiindex__") as path: + # GH3571, GH1651, GH3141 + + def _make_frame(names=None): + if names is True: + names = ["first", "second"] + return DataFrame( + np.random.default_rng(2).integers(0, 10, size=(3, 3)), + columns=MultiIndex.from_tuples( + [("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names + ), + dtype="int64", + ) + + # column & index are multi-index + df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4) + df.to_csv(path) + result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1]) + tm.assert_frame_equal(df, result) + + # column is mi + df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4) + df.to_csv(path) + result = read_csv(path, header=[0, 1, 2, 3], index_col=0) + tm.assert_frame_equal(df, result) + + # dup column names? + df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4) + df.to_csv(path) + result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2]) + tm.assert_frame_equal(df, result) + + # writing with no index + df = _make_frame() + df.to_csv(path, index=False) + result = read_csv(path, header=[0, 1]) + tm.assert_frame_equal(df, result) + + # we lose the names here + df = _make_frame(True) + df.to_csv(path, index=False) + result = read_csv(path, header=[0, 1]) + assert com.all_none(*result.columns.names) + result.columns.names = df.columns.names + tm.assert_frame_equal(df, result) + + # whatsnew example + df = _make_frame() + df.to_csv(path) + result = read_csv(path, header=[0, 1], index_col=[0]) + tm.assert_frame_equal(df, result) + + df = _make_frame(True) + df.to_csv(path) + result = read_csv(path, header=[0, 1], index_col=[0]) + tm.assert_frame_equal(df, result) + + # invalid options + df = _make_frame(True) + df.to_csv(path) + + for i in [6, 7]: + msg = f"len of {i}, but only 5 lines in file" + with pytest.raises(ParserError, match=msg): + read_csv(path, header=list(range(i)), index_col=0) + + # write with cols + msg = "cannot specify cols with a MultiIndex" + with pytest.raises(TypeError, match=msg): + df.to_csv(path, columns=["foo", "bar"]) + + with tm.ensure_clean("__tmp_to_csv_multiindex__") as path: + # empty + tsframe[:0].to_csv(path) + recons = self.read_csv(path) + + exp = tsframe[:0] + exp.index = [] + + tm.assert_index_equal(recons.columns, exp.columns) + assert len(recons) == 0 + + def test_to_csv_interval_index(self): + # GH 28210 + df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3)) + + with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path: + df.to_csv(path) + result = self.read_csv(path, index_col=0) + + # can't roundtrip intervalindex via read_csv so check string repr (GH 23595) + expected = df.copy() + expected.index = expected.index.astype(str) + + tm.assert_frame_equal(result, expected) + + def test_to_csv_float32_nanrep(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((1, 4)).astype(np.float32) + ) + df[1] = np.nan + + with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path: + df.to_csv(path, na_rep=999) + + with open(path, encoding="utf-8") as f: + lines = f.readlines() + assert lines[1].split(",")[2] == "999" + + def test_to_csv_withcommas(self): + # Commas inside fields should be correctly escaped when saving as CSV. + df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]}) + + with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path: + df.to_csv(path) + df2 = self.read_csv(path) + tm.assert_frame_equal(df2, df) + + def test_to_csv_mixed(self): + def create_cols(name): + return [f"{name}{i:03d}" for i in range(5)] + + df_float = DataFrame( + np.random.default_rng(2).standard_normal((100, 5)), + dtype="float64", + columns=create_cols("float"), + ) + df_int = DataFrame( + np.random.default_rng(2).standard_normal((100, 5)).astype("int64"), + dtype="int64", + columns=create_cols("int"), + ) + df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool")) + df_object = DataFrame( + "foo", index=df_float.index, columns=create_cols("object") + ) + df_dt = DataFrame( + Timestamp("20010101").as_unit("ns"), + index=df_float.index, + columns=create_cols("date"), + ) + + # add in some nans + df_float.iloc[30:50, 1:3] = np.nan + df_dt.iloc[30:50, 1:3] = np.nan + + df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1) + + # dtype + dtypes = {} + for n, dtype in [ + ("float", np.float64), + ("int", np.int64), + ("bool", np.bool_), + ("object", object), + ]: + for c in create_cols(n): + dtypes[c] = dtype + + with tm.ensure_clean() as filename: + df.to_csv(filename) + rs = read_csv( + filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date") + ) + tm.assert_frame_equal(rs, df) + + def test_to_csv_dups_cols(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 30)), + columns=list(range(15)) + list(range(15)), + dtype="float64", + ) + + with tm.ensure_clean() as filename: + df.to_csv(filename) # single dtype, fine + result = read_csv(filename, index_col=0) + result.columns = df.columns + tm.assert_frame_equal(result, df) + + df_float = DataFrame( + np.random.default_rng(2).standard_normal((1000, 3)), dtype="float64" + ) + df_int = DataFrame(np.random.default_rng(2).standard_normal((1000, 3))).astype( + "int64" + ) + df_bool = DataFrame(True, index=df_float.index, columns=range(3)) + df_object = DataFrame("foo", index=df_float.index, columns=range(3)) + df_dt = DataFrame( + Timestamp("20010101").as_unit("ns"), index=df_float.index, columns=range(3) + ) + df = pd.concat( + [df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True + ) + + df.columns = [0, 1, 2] * 5 + + with tm.ensure_clean() as filename: + df.to_csv(filename) + result = read_csv(filename, index_col=0) + + # date cols + for i in ["0.4", "1.4", "2.4"]: + result[i] = to_datetime(result[i]) + + result.columns = df.columns + tm.assert_frame_equal(result, df) + + # GH3457 + + N = 10 + df = tm.makeCustomDataframe(N, 3) + df.columns = ["a", "a", "b"] + + with tm.ensure_clean() as filename: + df.to_csv(filename) + + # read_csv will rename the dups columns + result = read_csv(filename, index_col=0) + result = result.rename(columns={"a.1": "a"}) + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize("chunksize", [10000, 50000, 100000]) + def test_to_csv_chunking(self, chunksize): + aa = DataFrame({"A": range(100000)}) + aa["B"] = aa.A + 1.0 + aa["C"] = aa.A + 2.0 + aa["D"] = aa.A + 3.0 + + with tm.ensure_clean() as filename: + aa.to_csv(filename, chunksize=chunksize) + rs = read_csv(filename, index_col=0) + tm.assert_frame_equal(rs, aa) + + @pytest.mark.slow + def test_to_csv_wide_frame_formatting(self, monkeypatch): + # Issue #8621 + chunksize = 100 + df = DataFrame( + np.random.default_rng(2).standard_normal((1, chunksize + 10)), + columns=None, + index=None, + ) + with tm.ensure_clean() as filename: + with monkeypatch.context() as m: + m.setattr("pandas.io.formats.csvs._DEFAULT_CHUNKSIZE_CELLS", chunksize) + df.to_csv(filename, header=False, index=False) + rs = read_csv(filename, header=None) + tm.assert_frame_equal(rs, df) + + def test_to_csv_bug(self): + f1 = StringIO("a,1.0\nb,2.0") + df = self.read_csv(f1, header=None) + newdf = DataFrame({"t": df[df.columns[0]]}) + + with tm.ensure_clean() as path: + newdf.to_csv(path) + + recons = read_csv(path, index_col=0) + # don't check_names as t != 1 + tm.assert_frame_equal(recons, newdf, check_names=False) + + def test_to_csv_unicode(self): + df = DataFrame({"c/\u03c3": [1, 2, 3]}) + with tm.ensure_clean() as path: + df.to_csv(path, encoding="UTF-8") + df2 = read_csv(path, index_col=0, encoding="UTF-8") + tm.assert_frame_equal(df, df2) + + df.to_csv(path, encoding="UTF-8", index=False) + df2 = read_csv(path, index_col=None, encoding="UTF-8") + tm.assert_frame_equal(df, df2) + + def test_to_csv_unicode_index_col(self): + buf = StringIO("") + df = DataFrame( + [["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]], + columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"], + index=["\u05d0", "\u05d1"], + ) + + df.to_csv(buf, encoding="UTF-8") + buf.seek(0) + + df2 = read_csv(buf, index_col=0, encoding="UTF-8") + tm.assert_frame_equal(df, df2) + + def test_to_csv_stringio(self, float_frame): + buf = StringIO() + float_frame.to_csv(buf) + buf.seek(0) + recons = read_csv(buf, index_col=0) + tm.assert_frame_equal(recons, float_frame) + + def test_to_csv_float_format(self): + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + + with tm.ensure_clean() as filename: + df.to_csv(filename, float_format="%.2f") + + rs = read_csv(filename, index_col=0) + xp = DataFrame( + [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + tm.assert_frame_equal(rs, xp) + + def test_to_csv_float_format_over_decimal(self): + # GH#47436 + df = DataFrame({"a": [0.5, 1.0]}) + result = df.to_csv( + decimal=",", + float_format=lambda x: np.format_float_positional(x, trim="-"), + index=False, + ) + expected_rows = ["a", "0.5", "1"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + def test_to_csv_unicodewriter_quoting(self): + df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]}) + + buf = StringIO() + df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8") + + result = buf.getvalue() + expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"'] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + @pytest.mark.parametrize("encoding", [None, "utf-8"]) + def test_to_csv_quote_none(self, encoding): + # GH4328 + df = DataFrame({"A": ["hello", '{"hello"}']}) + buf = StringIO() + df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False) + + result = buf.getvalue() + expected_rows = ["A", "hello", '{"hello"}'] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + def test_to_csv_index_no_leading_comma(self): + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"]) + + buf = StringIO() + df.to_csv(buf, index_label=False) + + expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert buf.getvalue() == expected + + def test_to_csv_lineterminators(self): + # see gh-20353 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"]) + + with tm.ensure_clean() as path: + # case 1: CRLF as line terminator + df.to_csv(path, lineterminator="\r\n") + expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n" + + with open(path, mode="rb") as f: + assert f.read() == expected + + with tm.ensure_clean() as path: + # case 2: LF as line terminator + df.to_csv(path, lineterminator="\n") + expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n" + + with open(path, mode="rb") as f: + assert f.read() == expected + + with tm.ensure_clean() as path: + # case 3: The default line terminator(=os.linesep)(gh-21406) + df.to_csv(path) + os_linesep = os.linesep.encode("utf-8") + expected = ( + b",A,B" + + os_linesep + + b"one,1,4" + + os_linesep + + b"two,2,5" + + os_linesep + + b"three,3,6" + + os_linesep + ) + + with open(path, mode="rb") as f: + assert f.read() == expected + + def test_to_csv_from_csv_categorical(self): + # CSV with categoricals should result in the same output + # as when one would add a "normal" Series/DataFrame. + s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])) + s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"]) + res = StringIO() + + s.to_csv(res, header=False) + exp = StringIO() + + s2.to_csv(exp, header=False) + assert res.getvalue() == exp.getvalue() + + df = DataFrame({"s": s}) + df2 = DataFrame({"s": s2}) + + res = StringIO() + df.to_csv(res) + + exp = StringIO() + df2.to_csv(exp) + + assert res.getvalue() == exp.getvalue() + + def test_to_csv_path_is_none(self, float_frame): + # GH 8215 + # Make sure we return string for consistency with + # Series.to_csv() + csv_str = float_frame.to_csv(path_or_buf=None) + assert isinstance(csv_str, str) + recons = read_csv(StringIO(csv_str), index_col=0) + tm.assert_frame_equal(float_frame, recons) + + @pytest.mark.parametrize( + "df,encoding", + [ + ( + DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ), + None, + ), + # GH 21241, 21118 + (DataFrame([["abc", "def", "ghi"]], columns=["X", "Y", "Z"]), "ascii"), + (DataFrame(5 * [[123, "你好", "世界"]], columns=["X", "Y", "Z"]), "gb2312"), + ( + DataFrame( + 5 * [[123, "Γειά σου", "Κόσμε"]], # noqa: RUF001 + columns=["X", "Y", "Z"], + ), + "cp737", + ), + ], + ) + def test_to_csv_compression(self, df, encoding, compression): + with tm.ensure_clean() as filename: + df.to_csv(filename, compression=compression, encoding=encoding) + # test the round trip - to_csv -> read_csv + result = read_csv( + filename, compression=compression, index_col=0, encoding=encoding + ) + tm.assert_frame_equal(df, result) + + # test the round trip using file handle - to_csv -> read_csv + with get_handle( + filename, "w", compression=compression, encoding=encoding + ) as handles: + df.to_csv(handles.handle, encoding=encoding) + assert not handles.handle.closed + + result = read_csv( + filename, + compression=compression, + encoding=encoding, + index_col=0, + ).squeeze("columns") + tm.assert_frame_equal(df, result) + + # explicitly make sure file is compressed + with tm.decompress_file(filename, compression) as fh: + text = fh.read().decode(encoding or "utf8") + for col in df.columns: + assert col in text + + with tm.decompress_file(filename, compression) as fh: + tm.assert_frame_equal(df, read_csv(fh, index_col=0, encoding=encoding)) + + def test_to_csv_date_format(self, datetime_frame): + with tm.ensure_clean("__tmp_to_csv_date_format__") as path: + dt_index = datetime_frame.index + datetime_frame = DataFrame( + {"A": dt_index, "B": dt_index.shift(1)}, index=dt_index + ) + datetime_frame.to_csv(path, date_format="%Y%m%d") + + # Check that the data was put in the specified format + test = read_csv(path, index_col=0) + + datetime_frame_int = datetime_frame.map(lambda x: int(x.strftime("%Y%m%d"))) + datetime_frame_int.index = datetime_frame_int.index.map( + lambda x: int(x.strftime("%Y%m%d")) + ) + + tm.assert_frame_equal(test, datetime_frame_int) + + datetime_frame.to_csv(path, date_format="%Y-%m-%d") + + # Check that the data was put in the specified format + test = read_csv(path, index_col=0) + datetime_frame_str = datetime_frame.map(lambda x: x.strftime("%Y-%m-%d")) + datetime_frame_str.index = datetime_frame_str.index.map( + lambda x: x.strftime("%Y-%m-%d") + ) + + tm.assert_frame_equal(test, datetime_frame_str) + + # Check that columns get converted + datetime_frame_columns = datetime_frame.T + datetime_frame_columns.to_csv(path, date_format="%Y%m%d") + + test = read_csv(path, index_col=0) + + datetime_frame_columns = datetime_frame_columns.map( + lambda x: int(x.strftime("%Y%m%d")) + ) + # Columns don't get converted to ints by read_csv + datetime_frame_columns.columns = datetime_frame_columns.columns.map( + lambda x: x.strftime("%Y%m%d") + ) + + tm.assert_frame_equal(test, datetime_frame_columns) + + # test NaTs + nat_index = to_datetime( + ["NaT"] * 10 + ["2000-01-01", "2000-01-01", "2000-01-01"] + ) + nat_frame = DataFrame({"A": nat_index}, index=nat_index) + nat_frame.to_csv(path, date_format="%Y-%m-%d") + + test = read_csv(path, parse_dates=[0, 1], index_col=0) + + tm.assert_frame_equal(test, nat_frame) + + @pytest.mark.parametrize("td", [pd.Timedelta(0), pd.Timedelta("10s")]) + def test_to_csv_with_dst_transitions(self, td): + with tm.ensure_clean("csv_date_format_with_dst") as path: + # make sure we are not failing on transitions + times = date_range( + "2013-10-26 23:00", + "2013-10-27 01:00", + tz="Europe/London", + freq="H", + ambiguous="infer", + ) + i = times + td + i = i._with_freq(None) # freq is not preserved by read_csv + time_range = np.array(range(len(i)), dtype="int64") + df = DataFrame({"A": time_range}, index=i) + df.to_csv(path, index=True) + # we have to reconvert the index as we + # don't parse the tz's + result = read_csv(path, index_col=0) + result.index = to_datetime(result.index, utc=True).tz_convert( + "Europe/London" + ) + tm.assert_frame_equal(result, df) + + def test_to_csv_with_dst_transitions_with_pickle(self): + # GH11619 + idx = date_range("2015-01-01", "2015-12-31", freq="H", tz="Europe/Paris") + idx = idx._with_freq(None) # freq does not round-trip + idx._data._freq = None # otherwise there is trouble on unpickle + df = DataFrame({"values": 1, "idx": idx}, index=idx) + with tm.ensure_clean("csv_date_format_with_dst") as path: + df.to_csv(path, index=True) + result = read_csv(path, index_col=0) + result.index = to_datetime(result.index, utc=True).tz_convert( + "Europe/Paris" + ) + result["idx"] = to_datetime(result["idx"], utc=True).astype( + "datetime64[ns, Europe/Paris]" + ) + tm.assert_frame_equal(result, df) + + # assert working + df.astype(str) + + with tm.ensure_clean("csv_date_format_with_dst") as path: + df.to_pickle(path) + result = pd.read_pickle(path) + tm.assert_frame_equal(result, df) + + def test_to_csv_quoting(self): + df = DataFrame( + { + "c_bool": [True, False], + "c_float": [1.0, 3.2], + "c_int": [42, np.nan], + "c_string": ["a", "b,c"], + } + ) + + expected_rows = [ + ",c_bool,c_float,c_int,c_string", + "0,True,1.0,42.0,a", + '1,False,3.2,,"b,c"', + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + + result = df.to_csv() + assert result == expected + + result = df.to_csv(quoting=None) + assert result == expected + + expected_rows = [ + ",c_bool,c_float,c_int,c_string", + "0,True,1.0,42.0,a", + '1,False,3.2,,"b,c"', + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + + result = df.to_csv(quoting=csv.QUOTE_MINIMAL) + assert result == expected + + expected_rows = [ + '"","c_bool","c_float","c_int","c_string"', + '"0","True","1.0","42.0","a"', + '"1","False","3.2","","b,c"', + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + + result = df.to_csv(quoting=csv.QUOTE_ALL) + assert result == expected + + # see gh-12922, gh-13259: make sure changes to + # the formatters do not break this behaviour + expected_rows = [ + '"","c_bool","c_float","c_int","c_string"', + '0,True,1.0,42.0,"a"', + '1,False,3.2,"","b,c"', + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC) + assert result == expected + + msg = "need to escape, but no escapechar set" + with pytest.raises(csv.Error, match=msg): + df.to_csv(quoting=csv.QUOTE_NONE) + + with pytest.raises(csv.Error, match=msg): + df.to_csv(quoting=csv.QUOTE_NONE, escapechar=None) + + expected_rows = [ + ",c_bool,c_float,c_int,c_string", + "0,True,1.0,42.0,a", + "1,False,3.2,,b!,c", + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="!") + assert result == expected + + expected_rows = [ + ",c_bool,c_ffloat,c_int,c_string", + "0,True,1.0,42.0,a", + "1,False,3.2,,bf,c", + ] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + result = df.to_csv(quoting=csv.QUOTE_NONE, escapechar="f") + assert result == expected + + # see gh-3503: quoting Windows line terminators + # presents with encoding? + text_rows = ["a,b,c", '1,"test \r\n",3'] + text = tm.convert_rows_list_to_csv_str(text_rows) + df = read_csv(StringIO(text)) + + buf = StringIO() + df.to_csv(buf, encoding="utf-8", index=False) + assert buf.getvalue() == text + + # xref gh-7791: make sure the quoting parameter is passed through + # with multi-indexes + df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}) + df = df.set_index(["a", "b"]) + + expected_rows = ['"a","b","c"', '"1","3","5"', '"2","4","6"'] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert df.to_csv(quoting=csv.QUOTE_ALL) == expected + + def test_period_index_date_overflow(self): + # see gh-15982 + + dates = ["1990-01-01", "2000-01-01", "3005-01-01"] + index = pd.PeriodIndex(dates, freq="D") + + df = DataFrame([4, 5, 6], index=index) + result = df.to_csv() + + expected_rows = [",0", "1990-01-01,4", "2000-01-01,5", "3005-01-01,6"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + date_format = "%m-%d-%Y" + result = df.to_csv(date_format=date_format) + + expected_rows = [",0", "01-01-1990,4", "01-01-2000,5", "01-01-3005,6"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + # Overflow with pd.NaT + dates = ["1990-01-01", NaT, "3005-01-01"] + index = pd.PeriodIndex(dates, freq="D") + + df = DataFrame([4, 5, 6], index=index) + result = df.to_csv() + + expected_rows = [",0", "1990-01-01,4", ",5", "3005-01-01,6"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + def test_multi_index_header(self): + # see gh-5539 + columns = MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]) + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + df.columns = columns + + header = ["a", "b", "c", "d"] + result = df.to_csv(header=header) + + expected_rows = [",a,b,c,d", "0,1,2,3,4", "1,5,6,7,8"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + def test_to_csv_single_level_multi_index(self): + # see gh-26303 + index = Index([(1,), (2,), (3,)]) + df = DataFrame([[1, 2, 3]], columns=index) + df = df.reindex(columns=[(1,), (3,)]) + expected = ",1,3\n0,1,3\n" + result = df.to_csv(lineterminator="\n") + tm.assert_almost_equal(result, expected) + + def test_gz_lineend(self): + # GH 25311 + df = DataFrame({"a": [1, 2]}) + expected_rows = ["a", "1", "2"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + with tm.ensure_clean("__test_gz_lineend.csv.gz") as path: + df.to_csv(path, index=False) + with tm.decompress_file(path, compression="gzip") as f: + result = f.read().decode("utf-8") + + assert result == expected + + def test_to_csv_numpy_16_bug(self): + frame = DataFrame({"a": date_range("1/1/2000", periods=10)}) + + buf = StringIO() + frame.to_csv(buf) + + result = buf.getvalue() + assert "2000-01-01" in result + + def test_to_csv_na_quoting(self): + # GH 15891 + # Normalize carriage return for Windows OS + result = ( + DataFrame([None, None]) + .to_csv(None, header=False, index=False, na_rep="") + .replace("\r\n", "\n") + ) + expected = '""\n""\n' + assert result == expected + + def test_to_csv_categorical_and_ea(self): + # GH#46812 + df = DataFrame({"a": "x", "b": [1, pd.NA]}) + df["b"] = df["b"].astype("Int16") + df["b"] = df["b"].astype("category") + result = df.to_csv() + expected_rows = [",a,b", "0,x,1", "1,x,"] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + def test_to_csv_categorical_and_interval(self): + # GH#46297 + df = DataFrame( + { + "a": [ + pd.Interval( + Timestamp("2020-01-01"), + Timestamp("2020-01-02"), + closed="both", + ) + ] + } + ) + df["a"] = df["a"].astype("category") + result = df.to_csv() + expected_rows = [",a", '0,"[2020-01-01, 2020-01-02]"'] + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict.py new file mode 100644 index 00000000..1446a74b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict.py @@ -0,0 +1,496 @@ +from collections import ( + OrderedDict, + defaultdict, +) +from datetime import datetime + +import numpy as np +import pytest +import pytz + +from pandas import ( + NA, + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, +) +import pandas._testing as tm + + +class TestDataFrameToDict: + def test_to_dict_timestamp(self): + # GH#11247 + # split/records producing np.datetime64 rather than Timestamps + # on datetime64[ns] dtypes only + + tsmp = Timestamp("20130101") + test_data = DataFrame({"A": [tsmp, tsmp], "B": [tsmp, tsmp]}) + test_data_mixed = DataFrame({"A": [tsmp, tsmp], "B": [1, 2]}) + + expected_records = [{"A": tsmp, "B": tsmp}, {"A": tsmp, "B": tsmp}] + expected_records_mixed = [{"A": tsmp, "B": 1}, {"A": tsmp, "B": 2}] + + assert test_data.to_dict(orient="records") == expected_records + assert test_data_mixed.to_dict(orient="records") == expected_records_mixed + + expected_series = { + "A": Series([tsmp, tsmp], name="A"), + "B": Series([tsmp, tsmp], name="B"), + } + expected_series_mixed = { + "A": Series([tsmp, tsmp], name="A"), + "B": Series([1, 2], name="B"), + } + + tm.assert_dict_equal(test_data.to_dict(orient="series"), expected_series) + tm.assert_dict_equal( + test_data_mixed.to_dict(orient="series"), expected_series_mixed + ) + + expected_split = { + "index": [0, 1], + "data": [[tsmp, tsmp], [tsmp, tsmp]], + "columns": ["A", "B"], + } + expected_split_mixed = { + "index": [0, 1], + "data": [[tsmp, 1], [tsmp, 2]], + "columns": ["A", "B"], + } + + tm.assert_dict_equal(test_data.to_dict(orient="split"), expected_split) + tm.assert_dict_equal( + test_data_mixed.to_dict(orient="split"), expected_split_mixed + ) + + def test_to_dict_index_not_unique_with_index_orient(self): + # GH#22801 + # Data loss when indexes are not unique. Raise ValueError. + df = DataFrame({"a": [1, 2], "b": [0.5, 0.75]}, index=["A", "A"]) + msg = "DataFrame index must be unique for orient='index'" + with pytest.raises(ValueError, match=msg): + df.to_dict(orient="index") + + def test_to_dict_invalid_orient(self): + df = DataFrame({"A": [0, 1]}) + msg = "orient 'xinvalid' not understood" + with pytest.raises(ValueError, match=msg): + df.to_dict(orient="xinvalid") + + @pytest.mark.parametrize("orient", ["d", "l", "r", "sp", "s", "i"]) + def test_to_dict_short_orient_raises(self, orient): + # GH#32515 + df = DataFrame({"A": [0, 1]}) + with pytest.raises(ValueError, match="not understood"): + df.to_dict(orient=orient) + + @pytest.mark.parametrize("mapping", [dict, defaultdict(list), OrderedDict]) + def test_to_dict(self, mapping): + # orient= should only take the listed options + # see GH#32515 + test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}} + + # GH#16122 + recons_data = DataFrame(test_data).to_dict(into=mapping) + + for k, v in test_data.items(): + for k2, v2 in v.items(): + assert v2 == recons_data[k][k2] + + recons_data = DataFrame(test_data).to_dict("list", mapping) + + for k, v in test_data.items(): + for k2, v2 in v.items(): + assert v2 == recons_data[k][int(k2) - 1] + + recons_data = DataFrame(test_data).to_dict("series", mapping) + + for k, v in test_data.items(): + for k2, v2 in v.items(): + assert v2 == recons_data[k][k2] + + recons_data = DataFrame(test_data).to_dict("split", mapping) + expected_split = { + "columns": ["A", "B"], + "index": ["1", "2", "3"], + "data": [[1.0, "1"], [2.0, "2"], [np.nan, "3"]], + } + tm.assert_dict_equal(recons_data, expected_split) + + recons_data = DataFrame(test_data).to_dict("records", mapping) + expected_records = [ + {"A": 1.0, "B": "1"}, + {"A": 2.0, "B": "2"}, + {"A": np.nan, "B": "3"}, + ] + assert isinstance(recons_data, list) + assert len(recons_data) == 3 + for left, right in zip(recons_data, expected_records): + tm.assert_dict_equal(left, right) + + # GH#10844 + recons_data = DataFrame(test_data).to_dict("index") + + for k, v in test_data.items(): + for k2, v2 in v.items(): + assert v2 == recons_data[k2][k] + + df = DataFrame(test_data) + df["duped"] = df[df.columns[0]] + recons_data = df.to_dict("index") + comp_data = test_data.copy() + comp_data["duped"] = comp_data[df.columns[0]] + for k, v in comp_data.items(): + for k2, v2 in v.items(): + assert v2 == recons_data[k2][k] + + @pytest.mark.parametrize("mapping", [list, defaultdict, []]) + def test_to_dict_errors(self, mapping): + # GH#16122 + df = DataFrame(np.random.default_rng(2).standard_normal((3, 3))) + msg = "|".join( + [ + "unsupported type: ", + r"to_dict\(\) only accepts initialized defaultdicts", + ] + ) + with pytest.raises(TypeError, match=msg): + df.to_dict(into=mapping) + + def test_to_dict_not_unique_warning(self): + # GH#16927: When converting to a dict, if a column has a non-unique name + # it will be dropped, throwing a warning. + df = DataFrame([[1, 2, 3]], columns=["a", "a", "b"]) + with tm.assert_produces_warning(UserWarning): + df.to_dict() + + # orient - orient argument to to_dict function + # item_getter - function for extracting value from + # the resulting dict using column name and index + @pytest.mark.parametrize( + "orient,item_getter", + [ + ("dict", lambda d, col, idx: d[col][idx]), + ("records", lambda d, col, idx: d[idx][col]), + ("list", lambda d, col, idx: d[col][idx]), + ("split", lambda d, col, idx: d["data"][idx][d["columns"].index(col)]), + ("index", lambda d, col, idx: d[idx][col]), + ], + ) + def test_to_dict_box_scalars(self, orient, item_getter): + # GH#14216, GH#23753 + # make sure that we are boxing properly + df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]}) + result = df.to_dict(orient=orient) + assert isinstance(item_getter(result, "a", 0), int) + assert isinstance(item_getter(result, "b", 0), float) + + def test_to_dict_tz(self): + # GH#18372 When converting to dict with orient='records' columns of + # datetime that are tz-aware were not converted to required arrays + data = [ + (datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),), + (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc),), + ] + df = DataFrame(list(data), columns=["d"]) + + result = df.to_dict(orient="records") + expected = [ + {"d": Timestamp("2017-11-18 21:53:00.219225+0000", tz=pytz.utc)}, + {"d": Timestamp("2017-11-18 22:06:30.061810+0000", tz=pytz.utc)}, + ] + tm.assert_dict_equal(result[0], expected[0]) + tm.assert_dict_equal(result[1], expected[1]) + + @pytest.mark.parametrize( + "into, expected", + [ + ( + dict, + { + 0: {"int_col": 1, "float_col": 1.0}, + 1: {"int_col": 2, "float_col": 2.0}, + 2: {"int_col": 3, "float_col": 3.0}, + }, + ), + ( + OrderedDict, + OrderedDict( + [ + (0, {"int_col": 1, "float_col": 1.0}), + (1, {"int_col": 2, "float_col": 2.0}), + (2, {"int_col": 3, "float_col": 3.0}), + ] + ), + ), + ( + defaultdict(dict), + defaultdict( + dict, + { + 0: {"int_col": 1, "float_col": 1.0}, + 1: {"int_col": 2, "float_col": 2.0}, + 2: {"int_col": 3, "float_col": 3.0}, + }, + ), + ), + ], + ) + def test_to_dict_index_dtypes(self, into, expected): + # GH#18580 + # When using to_dict(orient='index') on a dataframe with int + # and float columns only the int columns were cast to float + + df = DataFrame({"int_col": [1, 2, 3], "float_col": [1.0, 2.0, 3.0]}) + + result = df.to_dict(orient="index", into=into) + cols = ["int_col", "float_col"] + result = DataFrame.from_dict(result, orient="index")[cols] + expected = DataFrame.from_dict(expected, orient="index")[cols] + tm.assert_frame_equal(result, expected) + + def test_to_dict_numeric_names(self): + # GH#24940 + df = DataFrame({str(i): [i] for i in range(5)}) + result = set(df.to_dict("records")[0].keys()) + expected = set(df.columns) + assert result == expected + + def test_to_dict_wide(self): + # GH#24939 + df = DataFrame({(f"A_{i:d}"): [i] for i in range(256)}) + result = df.to_dict("records")[0] + expected = {f"A_{i:d}": i for i in range(256)} + assert result == expected + + @pytest.mark.parametrize( + "data,dtype", + ( + ([True, True, False], bool), + [ + [ + datetime(2018, 1, 1), + datetime(2019, 2, 2), + datetime(2020, 3, 3), + ], + Timestamp, + ], + [[1.0, 2.0, 3.0], float], + [[1, 2, 3], int], + [["X", "Y", "Z"], str], + ), + ) + def test_to_dict_orient_dtype(self, data, dtype): + # GH22620 & GH21256 + + df = DataFrame({"a": data}) + d = df.to_dict(orient="records") + assert all(type(record["a"]) is dtype for record in d) + + @pytest.mark.parametrize( + "data,expected_dtype", + ( + [np.uint64(2), int], + [np.int64(-9), int], + [np.float64(1.1), float], + [np.bool_(True), bool], + [np.datetime64("2005-02-25"), Timestamp], + ), + ) + def test_to_dict_scalar_constructor_orient_dtype(self, data, expected_dtype): + # GH22620 & GH21256 + + df = DataFrame({"a": data}, index=[0]) + d = df.to_dict(orient="records") + result = type(d[0]["a"]) + assert result is expected_dtype + + def test_to_dict_mixed_numeric_frame(self): + # GH 12859 + df = DataFrame({"a": [1.0], "b": [9.0]}) + result = df.reset_index().to_dict("records") + expected = [{"index": 0, "a": 1.0, "b": 9.0}] + assert result == expected + + @pytest.mark.parametrize( + "index", + [ + None, + Index(["aa", "bb"]), + Index(["aa", "bb"], name="cc"), + MultiIndex.from_tuples([("a", "b"), ("a", "c")]), + MultiIndex.from_tuples([("a", "b"), ("a", "c")], names=["n1", "n2"]), + ], + ) + @pytest.mark.parametrize( + "columns", + [ + ["x", "y"], + Index(["x", "y"]), + Index(["x", "y"], name="z"), + MultiIndex.from_tuples([("x", 1), ("y", 2)]), + MultiIndex.from_tuples([("x", 1), ("y", 2)], names=["z1", "z2"]), + ], + ) + def test_to_dict_orient_tight(self, index, columns): + df = DataFrame.from_records( + [[1, 3], [2, 4]], + columns=columns, + index=index, + ) + roundtrip = DataFrame.from_dict(df.to_dict(orient="tight"), orient="tight") + + tm.assert_frame_equal(df, roundtrip) + + @pytest.mark.parametrize( + "orient", + ["dict", "list", "split", "records", "index", "tight"], + ) + @pytest.mark.parametrize( + "data,expected_types", + ( + ( + { + "a": [np.int64(1), 1, np.int64(3)], + "b": [np.float64(1.0), 2.0, np.float64(3.0)], + "c": [np.float64(1.0), 2, np.int64(3)], + "d": [np.float64(1.0), "a", np.int64(3)], + "e": [np.float64(1.0), ["a"], np.int64(3)], + "f": [np.float64(1.0), ("a",), np.int64(3)], + }, + { + "a": [int, int, int], + "b": [float, float, float], + "c": [float, float, float], + "d": [float, str, int], + "e": [float, list, int], + "f": [float, tuple, int], + }, + ), + ( + { + "a": [1, 2, 3], + "b": [1.1, 2.2, 3.3], + }, + { + "a": [int, int, int], + "b": [float, float, float], + }, + ), + ( # Make sure we have one df which is all object type cols + { + "a": [1, "hello", 3], + "b": [1.1, "world", 3.3], + }, + { + "a": [int, str, int], + "b": [float, str, float], + }, + ), + ), + ) + def test_to_dict_returns_native_types(self, orient, data, expected_types): + # GH 46751 + # Tests we get back native types for all orient types + df = DataFrame(data) + result = df.to_dict(orient) + if orient == "dict": + assertion_iterator = ( + (i, key, value) + for key, index_value_map in result.items() + for i, value in index_value_map.items() + ) + elif orient == "list": + assertion_iterator = ( + (i, key, value) + for key, values in result.items() + for i, value in enumerate(values) + ) + elif orient in {"split", "tight"}: + assertion_iterator = ( + (i, key, result["data"][i][j]) + for i in result["index"] + for j, key in enumerate(result["columns"]) + ) + elif orient == "records": + assertion_iterator = ( + (i, key, value) + for i, record in enumerate(result) + for key, value in record.items() + ) + elif orient == "index": + assertion_iterator = ( + (i, key, value) + for i, record in result.items() + for key, value in record.items() + ) + + for i, key, value in assertion_iterator: + assert value == data[key][i] + assert type(value) is expected_types[key][i] + + @pytest.mark.parametrize("orient", ["dict", "list", "series", "records", "index"]) + def test_to_dict_index_false_error(self, orient): + # GH#46398 + df = DataFrame({"col1": [1, 2], "col2": [3, 4]}, index=["row1", "row2"]) + msg = "'index=False' is only valid when 'orient' is 'split' or 'tight'" + with pytest.raises(ValueError, match=msg): + df.to_dict(orient=orient, index=False) + + @pytest.mark.parametrize( + "orient, expected", + [ + ("split", {"columns": ["col1", "col2"], "data": [[1, 3], [2, 4]]}), + ( + "tight", + { + "columns": ["col1", "col2"], + "data": [[1, 3], [2, 4]], + "column_names": [None], + }, + ), + ], + ) + def test_to_dict_index_false(self, orient, expected): + # GH#46398 + df = DataFrame({"col1": [1, 2], "col2": [3, 4]}, index=["row1", "row2"]) + result = df.to_dict(orient=orient, index=False) + tm.assert_dict_equal(result, expected) + + @pytest.mark.parametrize( + "orient, expected", + [ + ("dict", {"a": {0: 1, 1: None}}), + ("list", {"a": [1, None]}), + ("split", {"index": [0, 1], "columns": ["a"], "data": [[1], [None]]}), + ( + "tight", + { + "index": [0, 1], + "columns": ["a"], + "data": [[1], [None]], + "index_names": [None], + "column_names": [None], + }, + ), + ("records", [{"a": 1}, {"a": None}]), + ("index", {0: {"a": 1}, 1: {"a": None}}), + ], + ) + def test_to_dict_na_to_none(self, orient, expected): + # GH#50795 + df = DataFrame({"a": [1, NA]}, dtype="Int64") + result = df.to_dict(orient=orient) + assert result == expected + + def test_to_dict_masked_native_python(self): + # GH#34665 + df = DataFrame({"a": Series([1, 2], dtype="Int64"), "B": 1}) + result = df.to_dict(orient="records") + assert type(result[0]["a"]) is int + + df = DataFrame({"a": Series([1, NA], dtype="Int64"), "B": 1}) + result = df.to_dict(orient="records") + assert type(result[0]["a"]) is int diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict_of_blocks.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict_of_blocks.py new file mode 100644 index 00000000..906e7423 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_dict_of_blocks.py @@ -0,0 +1,90 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + MultiIndex, +) +import pandas._testing as tm +from pandas.core.arrays import NumpyExtensionArray + +pytestmark = td.skip_array_manager_invalid_test + + +class TestToDictOfBlocks: + def test_copy_blocks(self, float_frame): + # GH#9607 + df = DataFrame(float_frame, copy=True) + column = df.columns[0] + + # use the default copy=True, change a column + _last_df = None + blocks = df._to_dict_of_blocks(copy=True) + for _df in blocks.values(): + _last_df = _df + if column in _df: + _df.loc[:, column] = _df[column] + 1 + + # make sure we did not change the original DataFrame + assert _last_df is not None and not _last_df[column].equals(df[column]) + + def test_no_copy_blocks(self, float_frame, using_copy_on_write): + # GH#9607 + df = DataFrame(float_frame, copy=True) + column = df.columns[0] + + _last_df = None + # use the copy=False, change a column + blocks = df._to_dict_of_blocks(copy=False) + for _df in blocks.values(): + _last_df = _df + if column in _df: + _df.loc[:, column] = _df[column] + 1 + + if not using_copy_on_write: + # make sure we did change the original DataFrame + assert _last_df is not None and _last_df[column].equals(df[column]) + else: + assert _last_df is not None and not _last_df[column].equals(df[column]) + + +def test_to_dict_of_blocks_item_cache(request, using_copy_on_write): + if using_copy_on_write: + request.node.add_marker(pytest.mark.xfail(reason="CoW - not yet implemented")) + # Calling to_dict_of_blocks should not poison item_cache + df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) + df["c"] = NumpyExtensionArray(np.array([1, 2, None, 3], dtype=object)) + mgr = df._mgr + assert len(mgr.blocks) == 3 # i.e. not consolidated + + ser = df["b"] # populations item_cache["b"] + + df._to_dict_of_blocks() + + if using_copy_on_write: + # TODO(CoW) we should disallow this, so `df` doesn't get updated, + # this currently still updates df, so this test fails + ser.values[0] = "foo" + assert df.loc[0, "b"] == "a" + else: + # Check that the to_dict_of_blocks didn't break link between ser and df + ser.values[0] = "foo" + assert df.loc[0, "b"] == "foo" + + assert df["b"] is ser + + +def test_set_change_dtype_slice(): + # GH#8850 + cols = MultiIndex.from_tuples([("1st", "a"), ("2nd", "b"), ("3rd", "c")]) + df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols) + df["2nd"] = df["2nd"] * 2.0 + + blocks = df._to_dict_of_blocks() + assert sorted(blocks.keys()) == ["float64", "int64"] + tm.assert_frame_equal( + blocks["float64"], DataFrame([[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]) + ) + tm.assert_frame_equal(blocks["int64"], DataFrame([[3], [6]], columns=cols[2:])) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_numpy.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_numpy.py new file mode 100644 index 00000000..bdb9b2c0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_numpy.py @@ -0,0 +1,49 @@ +import numpy as np + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Timestamp, +) +import pandas._testing as tm + + +class TestToNumpy: + def test_to_numpy(self): + df = DataFrame({"A": [1, 2], "B": [3, 4.5]}) + expected = np.array([[1, 3], [2, 4.5]]) + result = df.to_numpy() + tm.assert_numpy_array_equal(result, expected) + + def test_to_numpy_dtype(self): + df = DataFrame({"A": [1, 2], "B": [3, 4.5]}) + expected = np.array([[1, 3], [2, 4]], dtype="int64") + result = df.to_numpy(dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + @td.skip_array_manager_invalid_test + def test_to_numpy_copy(self, using_copy_on_write): + arr = np.random.default_rng(2).standard_normal((4, 3)) + df = DataFrame(arr) + if using_copy_on_write: + assert df.values.base is not arr + assert df.to_numpy(copy=False).base is df.values.base + else: + assert df.values.base is arr + assert df.to_numpy(copy=False).base is arr + assert df.to_numpy(copy=True).base is not arr + + # we still don't want a copy when na_value=np.nan is passed, + # and that can be respected because we are already numpy-float + if using_copy_on_write: + assert df.to_numpy(copy=False).base is df.values.base + else: + assert df.to_numpy(copy=False, na_value=np.nan).base is arr + + def test_to_numpy_mixed_dtype_to_str(self): + # https://github.com/pandas-dev/pandas/issues/35455 + df = DataFrame([[Timestamp("2020-01-01 00:00:00"), 100.0]]) + result = df.to_numpy(dtype=str) + expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_period.py new file mode 100644 index 00000000..6a3e6b8c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_period.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + PeriodIndex, + Series, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestToPeriod: + def test_to_period(self, frame_or_series): + K = 5 + + dr = date_range("1/1/2000", "1/1/2001", freq="D") + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(dr), K)), + index=dr, + columns=["A", "B", "C", "D", "E"], + ) + obj["mix"] = "a" + obj = tm.get_obj(obj, frame_or_series) + + pts = obj.to_period() + exp = obj.copy() + exp.index = period_range("1/1/2000", "1/1/2001") + tm.assert_equal(pts, exp) + + pts = obj.to_period("M") + exp.index = exp.index.asfreq("M") + tm.assert_equal(pts, exp) + + def test_to_period_without_freq(self, frame_or_series): + # GH#7606 without freq + idx = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"]) + exp_idx = PeriodIndex( + ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], freq="D" + ) + + obj = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), index=idx, columns=idx + ) + obj = tm.get_obj(obj, frame_or_series) + expected = obj.copy() + expected.index = exp_idx + tm.assert_equal(obj.to_period(), expected) + + if frame_or_series is DataFrame: + expected = obj.copy() + expected.columns = exp_idx + tm.assert_frame_equal(obj.to_period(axis=1), expected) + + def test_to_period_columns(self): + dr = date_range("1/1/2000", "1/1/2001") + df = DataFrame(np.random.default_rng(2).standard_normal((len(dr), 5)), index=dr) + df["mix"] = "a" + + df = df.T + pts = df.to_period(axis=1) + exp = df.copy() + exp.columns = period_range("1/1/2000", "1/1/2001") + tm.assert_frame_equal(pts, exp) + + pts = df.to_period("M", axis=1) + tm.assert_index_equal(pts.columns, exp.columns.asfreq("M")) + + def test_to_period_invalid_axis(self): + dr = date_range("1/1/2000", "1/1/2001") + df = DataFrame(np.random.default_rng(2).standard_normal((len(dr), 5)), index=dr) + df["mix"] = "a" + + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.to_period(axis=2) + + def test_to_period_raises(self, index, frame_or_series): + # https://github.com/pandas-dev/pandas/issues/33327 + obj = Series(index=index, dtype=object) + if frame_or_series is DataFrame: + obj = obj.to_frame() + + if not isinstance(index, DatetimeIndex): + msg = f"unsupported Type {type(index).__name__}" + with pytest.raises(TypeError, match=msg): + obj.to_period() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_records.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_records.py new file mode 100644 index 00000000..27939856 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_records.py @@ -0,0 +1,523 @@ +from collections import abc +import email +from email.parser import Parser + +import numpy as np +import pytest + +from pandas import ( + CategoricalDtype, + DataFrame, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameToRecords: + def test_to_records_timeseries(self): + index = date_range("1/1/2000", periods=10) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=["a", "b", "c"], + ) + + result = df.to_records() + assert result["index"].dtype == "M8[ns]" + + result = df.to_records(index=False) + + def test_to_records_dt64(self): + df = DataFrame( + [["one", "two", "three"], ["four", "five", "six"]], + index=date_range("2012-01-01", "2012-01-02"), + ) + + expected = df.index.values[0] + result = df.to_records()["index"][0] + assert expected == result + + def test_to_records_dt64tz_column(self): + # GH#32535 dont less tz in to_records + df = DataFrame({"A": date_range("2012-01-01", "2012-01-02", tz="US/Eastern")}) + + result = df.to_records() + + assert result.dtype["A"] == object + val = result[0][1] + assert isinstance(val, Timestamp) + assert val == df.loc[0, "A"] + + def test_to_records_with_multindex(self): + # GH#3189 + index = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + data = np.zeros((8, 4)) + df = DataFrame(data, index=index) + r = df.to_records(index=True)["level_0"] + assert "bar" in r + assert "one" not in r + + def test_to_records_with_Mapping_type(self): + abc.Mapping.register(email.message.Message) + + headers = Parser().parsestr( + "From: \n" + "To: \n" + "Subject: Test message\n" + "\n" + "Body would go here\n" + ) + + frame = DataFrame.from_records([headers]) + all(x in frame for x in ["Type", "Subject", "From"]) + + def test_to_records_floats(self): + df = DataFrame(np.random.default_rng(2).random((10, 10))) + df.to_records() + + def test_to_records_index_name(self): + df = DataFrame(np.random.default_rng(2).standard_normal((3, 3))) + df.index.name = "X" + rs = df.to_records() + assert "X" in rs.dtype.fields + + df = DataFrame(np.random.default_rng(2).standard_normal((3, 3))) + rs = df.to_records() + assert "index" in rs.dtype.fields + + df.index = MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")]) + df.index.names = ["A", None] + result = df.to_records() + expected = np.rec.fromarrays( + [np.array(["a", "a", "b"]), np.array(["x", "y", "z"])] + + [np.asarray(df.iloc[:, i]) for i in range(3)], + dtype={ + "names": ["A", "level_1", "0", "1", "2"], + "formats": [ + "O", + "O", + f"{tm.ENDIAN}f8", + f"{tm.ENDIAN}f8", + f"{tm.ENDIAN}f8", + ], + }, + ) + tm.assert_numpy_array_equal(result, expected) + + def test_to_records_with_unicode_index(self): + # GH#13172 + # unicode_literals conflict with to_records + result = DataFrame([{"a": "x", "b": "y"}]).set_index("a").to_records() + expected = np.rec.array([("x", "y")], dtype=[("a", "O"), ("b", "O")]) + tm.assert_almost_equal(result, expected) + + def test_to_records_index_dtype(self): + # GH 47263: consistent data types for Index and MultiIndex + df = DataFrame( + { + 1: date_range("2022-01-01", periods=2), + 2: date_range("2022-01-01", periods=2), + 3: date_range("2022-01-01", periods=2), + } + ) + + expected = np.rec.array( + [ + ("2022-01-01", "2022-01-01", "2022-01-01"), + ("2022-01-02", "2022-01-02", "2022-01-02"), + ], + dtype=[ + ("1", f"{tm.ENDIAN}M8[ns]"), + ("2", f"{tm.ENDIAN}M8[ns]"), + ("3", f"{tm.ENDIAN}M8[ns]"), + ], + ) + + result = df.to_records(index=False) + tm.assert_almost_equal(result, expected) + + result = df.set_index(1).to_records(index=True) + tm.assert_almost_equal(result, expected) + + result = df.set_index([1, 2]).to_records(index=True) + tm.assert_almost_equal(result, expected) + + def test_to_records_with_unicode_column_names(self): + # xref issue: https://github.com/numpy/numpy/issues/2407 + # Issue GH#11879. to_records used to raise an exception when used + # with column names containing non-ascii characters in Python 2 + result = DataFrame(data={"accented_name_é": [1.0]}).to_records() + + # Note that numpy allows for unicode field names but dtypes need + # to be specified using dictionary instead of list of tuples. + expected = np.rec.array( + [(0, 1.0)], + dtype={"names": ["index", "accented_name_é"], "formats": ["=i8", "=f8"]}, + ) + tm.assert_almost_equal(result, expected) + + def test_to_records_with_categorical(self): + # GH#8626 + + # dict creation + df = DataFrame({"A": list("abc")}, dtype="category") + expected = Series(list("abc"), dtype="category", name="A") + tm.assert_series_equal(df["A"], expected) + + # list-like creation + df = DataFrame(list("abc"), dtype="category") + expected = Series(list("abc"), dtype="category", name=0) + tm.assert_series_equal(df[0], expected) + + # to record array + # this coerces + result = df.to_records() + expected = np.rec.array( + [(0, "a"), (1, "b"), (2, "c")], dtype=[("index", "=i8"), ("0", "O")] + ) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize( + "kwargs,expected", + [ + # No dtypes --> default to array dtypes. + ( + {}, + np.rec.array( + [(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", f"{tm.ENDIAN}i8"), + ("B", f"{tm.ENDIAN}f8"), + ("C", "O"), + ], + ), + ), + # Should have no effect in this case. + ( + {"index": True}, + np.rec.array( + [(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", f"{tm.ENDIAN}i8"), + ("B", f"{tm.ENDIAN}f8"), + ("C", "O"), + ], + ), + ), + # Column dtype applied across the board. Index unaffected. + ( + {"column_dtypes": f"{tm.ENDIAN}U4"}, + np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", f"{tm.ENDIAN}U4"), + ("B", f"{tm.ENDIAN}U4"), + ("C", f"{tm.ENDIAN}U4"), + ], + ), + ), + # Index dtype applied across the board. Columns unaffected. + ( + {"index_dtypes": f"{tm.ENDIAN}U1"}, + np.rec.array( + [("0", 1, 0.2, "a"), ("1", 2, 1.5, "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}U1"), + ("A", f"{tm.ENDIAN}i8"), + ("B", f"{tm.ENDIAN}f8"), + ("C", "O"), + ], + ), + ), + # Pass in a type instance. + ( + {"column_dtypes": str}, + np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", f"{tm.ENDIAN}U"), + ("B", f"{tm.ENDIAN}U"), + ("C", f"{tm.ENDIAN}U"), + ], + ), + ), + # Pass in a dtype instance. + ( + {"column_dtypes": np.dtype(np.str_)}, + np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", f"{tm.ENDIAN}U"), + ("B", f"{tm.ENDIAN}U"), + ("C", f"{tm.ENDIAN}U"), + ], + ), + ), + # Pass in a dictionary (name-only). + ( + { + "column_dtypes": { + "A": np.int8, + "B": np.float32, + "C": f"{tm.ENDIAN}U2", + } + }, + np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", "i1"), + ("B", f"{tm.ENDIAN}f4"), + ("C", f"{tm.ENDIAN}U2"), + ], + ), + ), + # Pass in a dictionary (indices-only). + ( + {"index_dtypes": {0: "int16"}}, + np.rec.array( + [(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")], + dtype=[ + ("index", "i2"), + ("A", f"{tm.ENDIAN}i8"), + ("B", f"{tm.ENDIAN}f8"), + ("C", "O"), + ], + ), + ), + # Ignore index mappings if index is not True. + ( + {"index": False, "index_dtypes": f"{tm.ENDIAN}U2"}, + np.rec.array( + [(1, 0.2, "a"), (2, 1.5, "bc")], + dtype=[ + ("A", f"{tm.ENDIAN}i8"), + ("B", f"{tm.ENDIAN}f8"), + ("C", "O"), + ], + ), + ), + # Non-existent names / indices in mapping should not error. + ( + {"index_dtypes": {0: "int16", "not-there": "float32"}}, + np.rec.array( + [(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")], + dtype=[ + ("index", "i2"), + ("A", f"{tm.ENDIAN}i8"), + ("B", f"{tm.ENDIAN}f8"), + ("C", "O"), + ], + ), + ), + # Names / indices not in mapping default to array dtype. + ( + {"column_dtypes": {"A": np.int8, "B": np.float32}}, + np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", "i1"), + ("B", f"{tm.ENDIAN}f4"), + ("C", "O"), + ], + ), + ), + # Names / indices not in dtype mapping default to array dtype. + ( + {"column_dtypes": {"A": np.dtype("int8"), "B": np.dtype("float32")}}, + np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}i8"), + ("A", "i1"), + ("B", f"{tm.ENDIAN}f4"), + ("C", "O"), + ], + ), + ), + # Mixture of everything. + ( + { + "column_dtypes": {"A": np.int8, "B": np.float32}, + "index_dtypes": f"{tm.ENDIAN}U2", + }, + np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}U2"), + ("A", "i1"), + ("B", f"{tm.ENDIAN}f4"), + ("C", "O"), + ], + ), + ), + # Invalid dype values. + ( + {"index": False, "column_dtypes": []}, + (ValueError, "Invalid dtype \\[\\] specified for column A"), + ), + ( + {"index": False, "column_dtypes": {"A": "int32", "B": 5}}, + (ValueError, "Invalid dtype 5 specified for column B"), + ), + # Numpy can't handle EA types, so check error is raised + ( + { + "index": False, + "column_dtypes": {"A": "int32", "B": CategoricalDtype(["a", "b"])}, + }, + (ValueError, "Invalid dtype category specified for column B"), + ), + # Check that bad types raise + ( + {"index": False, "column_dtypes": {"A": "int32", "B": "foo"}}, + (TypeError, "data type [\"']foo[\"'] not understood"), + ), + ], + ) + def test_to_records_dtype(self, kwargs, expected): + # see GH#18146 + df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]}) + + if not isinstance(expected, np.rec.recarray): + with pytest.raises(expected[0], match=expected[1]): + df.to_records(**kwargs) + else: + result = df.to_records(**kwargs) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize( + "df,kwargs,expected", + [ + # MultiIndex in the index. + ( + DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=list("abc") + ).set_index(["a", "b"]), + {"column_dtypes": "float64", "index_dtypes": {0: "int32", 1: "int8"}}, + np.rec.array( + [(1, 2, 3.0), (4, 5, 6.0), (7, 8, 9.0)], + dtype=[ + ("a", f"{tm.ENDIAN}i4"), + ("b", "i1"), + ("c", f"{tm.ENDIAN}f8"), + ], + ), + ), + # MultiIndex in the columns. + ( + DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + columns=MultiIndex.from_tuples( + [("a", "d"), ("b", "e"), ("c", "f")] + ), + ), + { + "column_dtypes": {0: f"{tm.ENDIAN}U1", 2: "float32"}, + "index_dtypes": "float32", + }, + np.rec.array( + [(0.0, "1", 2, 3.0), (1.0, "4", 5, 6.0), (2.0, "7", 8, 9.0)], + dtype=[ + ("index", f"{tm.ENDIAN}f4"), + ("('a', 'd')", f"{tm.ENDIAN}U1"), + ("('b', 'e')", f"{tm.ENDIAN}i8"), + ("('c', 'f')", f"{tm.ENDIAN}f4"), + ], + ), + ), + # MultiIndex in both the columns and index. + ( + DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + columns=MultiIndex.from_tuples( + [("a", "d"), ("b", "e"), ("c", "f")], names=list("ab") + ), + index=MultiIndex.from_tuples( + [("d", -4), ("d", -5), ("f", -6)], names=list("cd") + ), + ), + { + "column_dtypes": "float64", + "index_dtypes": {0: f"{tm.ENDIAN}U2", 1: "int8"}, + }, + np.rec.array( + [ + ("d", -4, 1.0, 2.0, 3.0), + ("d", -5, 4.0, 5.0, 6.0), + ("f", -6, 7, 8, 9.0), + ], + dtype=[ + ("c", f"{tm.ENDIAN}U2"), + ("d", "i1"), + ("('a', 'd')", f"{tm.ENDIAN}f8"), + ("('b', 'e')", f"{tm.ENDIAN}f8"), + ("('c', 'f')", f"{tm.ENDIAN}f8"), + ], + ), + ), + ], + ) + def test_to_records_dtype_mi(self, df, kwargs, expected): + # see GH#18146 + result = df.to_records(**kwargs) + tm.assert_almost_equal(result, expected) + + def test_to_records_dict_like(self): + # see GH#18146 + class DictLike: + def __init__(self, **kwargs) -> None: + self.d = kwargs.copy() + + def __getitem__(self, key): + return self.d.__getitem__(key) + + def __contains__(self, key) -> bool: + return key in self.d + + def keys(self): + return self.d.keys() + + df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]}) + + dtype_mappings = { + "column_dtypes": DictLike(A=np.int8, B=np.float32), + "index_dtypes": f"{tm.ENDIAN}U2", + } + + result = df.to_records(**dtype_mappings) + expected = np.rec.array( + [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[ + ("index", f"{tm.ENDIAN}U2"), + ("A", "i1"), + ("B", f"{tm.ENDIAN}f4"), + ("C", "O"), + ], + ) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("tz", ["UTC", "GMT", "US/Eastern"]) + def test_to_records_datetimeindex_with_tz(self, tz): + # GH#13937 + dr = date_range("2016-01-01", periods=10, freq="S", tz=tz) + + df = DataFrame({"datetime": dr}, index=dr) + + expected = df.to_records() + result = df.tz_convert("UTC").to_records() + + # both converted to UTC, so they are equal + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_timestamp.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_timestamp.py new file mode 100644 index 00000000..2f73e3d5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_timestamp.py @@ -0,0 +1,154 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + PeriodIndex, + Series, + Timedelta, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + + +def _get_with_delta(delta, freq="A-DEC"): + return date_range( + to_datetime("1/1/2001") + delta, + to_datetime("12/31/2009") + delta, + freq=freq, + ) + + +class TestToTimestamp: + def test_to_timestamp(self, frame_or_series): + K = 5 + index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(index), K)), + index=index, + columns=["A", "B", "C", "D", "E"], + ) + obj["mix"] = "a" + obj = tm.get_obj(obj, frame_or_series) + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = obj.to_timestamp("D", "end") + tm.assert_index_equal(result.index, exp_index) + tm.assert_numpy_array_equal(result.values, obj.values) + if frame_or_series is Series: + assert result.name == "A" + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") + result = obj.to_timestamp("D", "start") + tm.assert_index_equal(result.index, exp_index) + + result = obj.to_timestamp(how="start") + tm.assert_index_equal(result.index, exp_index) + + delta = timedelta(hours=23) + result = obj.to_timestamp("H", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = obj.to_timestamp("T", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + result = obj.to_timestamp("S", "end") + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + def test_to_timestamp_columns(self): + K = 5 + index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), K)), + index=index, + columns=["A", "B", "C", "D", "E"], + ) + df["mix"] = "a" + + # columns + df = df.T + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = df.to_timestamp("D", "end", axis=1) + tm.assert_index_equal(result.columns, exp_index) + tm.assert_numpy_array_equal(result.values, df.values) + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") + result = df.to_timestamp("D", "start", axis=1) + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23) + result = df.to_timestamp("H", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = df.to_timestamp("T", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + result = df.to_timestamp("S", "end", axis=1) + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + result1 = df.to_timestamp("5t", axis=1) + result2 = df.to_timestamp("t", axis=1) + expected = date_range("2001-01-01", "2009-01-01", freq="AS") + assert isinstance(result1.columns, DatetimeIndex) + assert isinstance(result2.columns, DatetimeIndex) + tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8) + tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8) + # PeriodIndex.to_timestamp always use 'infer' + assert result1.columns.freqstr == "AS-JAN" + assert result2.columns.freqstr == "AS-JAN" + + def test_to_timestamp_invalid_axis(self): + index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 5)), index=index + ) + + # invalid axis + with pytest.raises(ValueError, match="axis"): + obj.to_timestamp(axis=2) + + def test_to_timestamp_hourly(self, frame_or_series): + index = period_range(freq="H", start="1/1/2001", end="1/2/2001") + obj = Series(1, index=index, name="foo") + if frame_or_series is not Series: + obj = obj.to_frame() + + exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H") + result = obj.to_timestamp(how="end") + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + if frame_or_series is Series: + assert result.name == "foo" + + def test_to_timestamp_raises(self, index, frame_or_series): + # GH#33327 + obj = frame_or_series(index=index, dtype=object) + + if not isinstance(index, PeriodIndex): + msg = f"unsupported Type {type(index).__name__}" + with pytest.raises(TypeError, match=msg): + obj.to_timestamp() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_transpose.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_transpose.py new file mode 100644 index 00000000..8ff6ea37 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_transpose.py @@ -0,0 +1,177 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + DatetimeIndex, + IntervalIndex, + Series, + Timestamp, + date_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestTranspose: + def test_transpose_td64_intervals(self): + # GH#44917 + tdi = timedelta_range("0 Days", "3 Days") + ii = IntervalIndex.from_breaks(tdi) + ii = ii.insert(-1, np.nan) + df = DataFrame(ii) + + result = df.T + expected = DataFrame({i: ii[i : i + 1] for i in range(len(ii))}) + tm.assert_frame_equal(result, expected) + + def test_transpose_empty_preserves_datetimeindex(self): + # GH#41382 + df = DataFrame(index=DatetimeIndex([])) + + expected = DatetimeIndex([], dtype="datetime64[ns]", freq=None) + + result1 = df.T.sum().index + result2 = df.sum(axis=1).index + + tm.assert_index_equal(result1, expected) + tm.assert_index_equal(result2, expected) + + def test_transpose_tzaware_1col_single_tz(self): + # GH#26825 + dti = date_range("2016-04-05 04:30", periods=3, tz="UTC") + + df = DataFrame(dti) + assert (df.dtypes == dti.dtype).all() + res = df.T + assert (res.dtypes == dti.dtype).all() + + def test_transpose_tzaware_2col_single_tz(self): + # GH#26825 + dti = date_range("2016-04-05 04:30", periods=3, tz="UTC") + + df3 = DataFrame({"A": dti, "B": dti}) + assert (df3.dtypes == dti.dtype).all() + res3 = df3.T + assert (res3.dtypes == dti.dtype).all() + + def test_transpose_tzaware_2col_mixed_tz(self): + # GH#26825 + dti = date_range("2016-04-05 04:30", periods=3, tz="UTC") + dti2 = dti.tz_convert("US/Pacific") + + df4 = DataFrame({"A": dti, "B": dti2}) + assert (df4.dtypes == [dti.dtype, dti2.dtype]).all() + assert (df4.T.dtypes == object).all() + tm.assert_frame_equal(df4.T.T, df4.astype(object)) + + @pytest.mark.parametrize("tz", [None, "America/New_York"]) + def test_transpose_preserves_dtindex_equality_with_dst(self, tz): + # GH#19970 + idx = date_range("20161101", "20161130", freq="4H", tz=tz) + df = DataFrame({"a": range(len(idx)), "b": range(len(idx))}, index=idx) + result = df.T == df.T + expected = DataFrame(True, index=list("ab"), columns=idx) + tm.assert_frame_equal(result, expected) + + def test_transpose_object_to_tzaware_mixed_tz(self): + # GH#26825 + dti = date_range("2016-04-05 04:30", periods=3, tz="UTC") + dti2 = dti.tz_convert("US/Pacific") + + # mixed all-tzaware dtypes + df2 = DataFrame([dti, dti2]) + assert (df2.dtypes == object).all() + res2 = df2.T + assert (res2.dtypes == object).all() + + def test_transpose_uint64(self, uint64_frame): + result = uint64_frame.T + expected = DataFrame(uint64_frame.values.T) + expected.index = ["A", "B"] + tm.assert_frame_equal(result, expected) + + def test_transpose_float(self, float_frame): + frame = float_frame + dft = frame.T + for idx, series in dft.items(): + for col, value in series.items(): + if np.isnan(value): + assert np.isnan(frame[col][idx]) + else: + assert value == frame[col][idx] + + # mixed type + index, data = tm.getMixedTypeDict() + mixed = DataFrame(data, index=index) + + mixed_T = mixed.T + for col, s in mixed_T.items(): + assert s.dtype == np.object_ + + @td.skip_array_manager_invalid_test + def test_transpose_get_view(self, float_frame, using_copy_on_write): + dft = float_frame.T + dft.iloc[:, 5:10] = 5 + + if using_copy_on_write: + assert (float_frame.values[5:10] != 5).all() + else: + assert (float_frame.values[5:10] == 5).all() + + @td.skip_array_manager_invalid_test + def test_transpose_get_view_dt64tzget_view(self, using_copy_on_write): + dti = date_range("2016-01-01", periods=6, tz="US/Pacific") + arr = dti._data.reshape(3, 2) + df = DataFrame(arr) + assert df._mgr.nblocks == 1 + + result = df.T + assert result._mgr.nblocks == 1 + + rtrip = result._mgr.blocks[0].values + if using_copy_on_write: + assert np.shares_memory(df._mgr.blocks[0].values._ndarray, rtrip._ndarray) + else: + assert np.shares_memory(arr._ndarray, rtrip._ndarray) + + def test_transpose_not_inferring_dt(self): + # GH#51546 + df = DataFrame( + { + "a": [Timestamp("2019-12-31"), Timestamp("2019-12-31")], + }, + dtype=object, + ) + result = df.T + expected = DataFrame( + [[Timestamp("2019-12-31"), Timestamp("2019-12-31")]], + columns=[0, 1], + index=["a"], + dtype=object, + ) + tm.assert_frame_equal(result, expected) + + def test_transpose_not_inferring_dt_mixed_blocks(self): + # GH#51546 + df = DataFrame( + { + "a": Series( + [Timestamp("2019-12-31"), Timestamp("2019-12-31")], dtype=object + ), + "b": [Timestamp("2019-12-31"), Timestamp("2019-12-31")], + } + ) + result = df.T + expected = DataFrame( + [ + [Timestamp("2019-12-31"), Timestamp("2019-12-31")], + [Timestamp("2019-12-31"), Timestamp("2019-12-31")], + ], + columns=[0, 1], + index=["a", "b"], + dtype=object, + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_truncate.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_truncate.py new file mode 100644 index 00000000..4c4b0407 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_truncate.py @@ -0,0 +1,154 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameTruncate: + def test_truncate(self, datetime_frame, frame_or_series): + ts = datetime_frame[::3] + ts = tm.get_obj(ts, frame_or_series) + + start, end = datetime_frame.index[3], datetime_frame.index[6] + + start_missing = datetime_frame.index[2] + end_missing = datetime_frame.index[7] + + # neither specified + truncated = ts.truncate() + tm.assert_equal(truncated, ts) + + # both specified + expected = ts[1:3] + + truncated = ts.truncate(start, end) + tm.assert_equal(truncated, expected) + + truncated = ts.truncate(start_missing, end_missing) + tm.assert_equal(truncated, expected) + + # start specified + expected = ts[1:] + + truncated = ts.truncate(before=start) + tm.assert_equal(truncated, expected) + + truncated = ts.truncate(before=start_missing) + tm.assert_equal(truncated, expected) + + # end specified + expected = ts[:3] + + truncated = ts.truncate(after=end) + tm.assert_equal(truncated, expected) + + truncated = ts.truncate(after=end_missing) + tm.assert_equal(truncated, expected) + + # corner case, empty series/frame returned + truncated = ts.truncate(after=ts.index[0] - ts.index.freq) + assert len(truncated) == 0 + + truncated = ts.truncate(before=ts.index[-1] + ts.index.freq) + assert len(truncated) == 0 + + msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00" + with pytest.raises(ValueError, match=msg): + ts.truncate( + before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq + ) + + def test_truncate_nonsortedindex(self, frame_or_series): + # GH#17935 + + obj = DataFrame({"A": ["a", "b", "c", "d", "e"]}, index=[5, 3, 2, 9, 0]) + obj = tm.get_obj(obj, frame_or_series) + + msg = "truncate requires a sorted index" + with pytest.raises(ValueError, match=msg): + obj.truncate(before=3, after=9) + + def test_sort_values_nonsortedindex(self): + rng = date_range("2011-01-01", "2012-01-01", freq="W") + ts = DataFrame( + { + "A": np.random.default_rng(2).standard_normal(len(rng)), + "B": np.random.default_rng(2).standard_normal(len(rng)), + }, + index=rng, + ) + + decreasing = ts.sort_values("A", ascending=False) + + msg = "truncate requires a sorted index" + with pytest.raises(ValueError, match=msg): + decreasing.truncate(before="2011-11", after="2011-12") + + def test_truncate_nonsortedindex_axis1(self): + # GH#17935 + + df = DataFrame( + { + 3: np.random.default_rng(2).standard_normal(5), + 20: np.random.default_rng(2).standard_normal(5), + 2: np.random.default_rng(2).standard_normal(5), + 0: np.random.default_rng(2).standard_normal(5), + }, + columns=[3, 20, 2, 0], + ) + msg = "truncate requires a sorted index" + with pytest.raises(ValueError, match=msg): + df.truncate(before=2, after=20, axis=1) + + @pytest.mark.parametrize( + "before, after, indices", + [(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])], + ) + @pytest.mark.parametrize("dtyp", [*tm.ALL_REAL_NUMPY_DTYPES, "datetime64[ns]"]) + def test_truncate_decreasing_index( + self, before, after, indices, dtyp, frame_or_series + ): + # https://github.com/pandas-dev/pandas/issues/33756 + idx = Index([3, 2, 1, 0], dtype=dtyp) + if isinstance(idx, DatetimeIndex): + before = pd.Timestamp(before) if before is not None else None + after = pd.Timestamp(after) if after is not None else None + indices = [pd.Timestamp(i) for i in indices] + values = frame_or_series(range(len(idx)), index=idx) + result = values.truncate(before=before, after=after) + expected = values.loc[indices] + tm.assert_equal(result, expected) + + def test_truncate_multiindex(self, frame_or_series): + # GH 34564 + mi = pd.MultiIndex.from_product([[1, 2, 3, 4], ["A", "B"]], names=["L1", "L2"]) + s1 = DataFrame(range(mi.shape[0]), index=mi, columns=["col"]) + s1 = tm.get_obj(s1, frame_or_series) + + result = s1.truncate(before=2, after=3) + + df = DataFrame.from_dict( + {"L1": [2, 2, 3, 3], "L2": ["A", "B", "A", "B"], "col": [2, 3, 4, 5]} + ) + expected = df.set_index(["L1", "L2"]) + expected = tm.get_obj(expected, frame_or_series) + + tm.assert_equal(result, expected) + + def test_truncate_index_only_one_unique_value(self, frame_or_series): + # GH 42365 + obj = Series(0, index=date_range("2021-06-30", "2021-06-30")).repeat(5) + if frame_or_series is DataFrame: + obj = obj.to_frame(name="a") + + truncated = obj.truncate("2021-06-28", "2021-07-01") + + tm.assert_equal(truncated, obj) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_convert.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_convert.py new file mode 100644 index 00000000..8a484aba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_convert.py @@ -0,0 +1,131 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestTZConvert: + def test_tz_convert(self, frame_or_series): + rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") + + obj = DataFrame({"a": 1}, index=rng) + obj = tm.get_obj(obj, frame_or_series) + + result = obj.tz_convert("Europe/Berlin") + expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) + expected = tm.get_obj(expected, frame_or_series) + + assert result.index.tz.zone == "Europe/Berlin" + tm.assert_equal(result, expected) + + def test_tz_convert_axis1(self): + rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") + + obj = DataFrame({"a": 1}, index=rng) + + obj = obj.T + result = obj.tz_convert("Europe/Berlin", axis=1) + assert result.columns.tz.zone == "Europe/Berlin" + + expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) + + tm.assert_equal(result, expected.T) + + def test_tz_convert_naive(self, frame_or_series): + # can't convert tz-naive + rng = date_range("1/1/2011", periods=200, freq="D") + ts = Series(1, index=rng) + ts = frame_or_series(ts) + + with pytest.raises(TypeError, match="Cannot convert tz-naive"): + ts.tz_convert("US/Eastern") + + @pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"]) + def test_tz_convert_and_localize(self, fn): + l0 = date_range("20140701", periods=5, freq="D") + l1 = date_range("20140701", periods=5, freq="D") + + int_idx = Index(range(5)) + + if fn == "tz_convert": + l0 = l0.tz_localize("UTC") + l1 = l1.tz_localize("UTC") + + for idx in [l0, l1]: + l0_expected = getattr(idx, fn)("US/Pacific") + l1_expected = getattr(idx, fn)("US/Pacific") + + df1 = DataFrame(np.ones(5), index=l0) + df1 = getattr(df1, fn)("US/Pacific") + tm.assert_index_equal(df1.index, l0_expected) + + # MultiIndex + # GH7846 + df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) + + # freq is not preserved in MultiIndex construction + l1_expected = l1_expected._with_freq(None) + l0_expected = l0_expected._with_freq(None) + l1 = l1._with_freq(None) + l0 = l0._with_freq(None) + + df3 = getattr(df2, fn)("US/Pacific", level=0) + assert not df3.index.levels[0].equals(l0) + tm.assert_index_equal(df3.index.levels[0], l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1) + assert not df3.index.levels[1].equals(l1_expected) + + df3 = getattr(df2, fn)("US/Pacific", level=1) + tm.assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) + + # TODO: untested + getattr(df4, fn)("US/Pacific", level=1) + + tm.assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + # Bad Inputs + + # Not DatetimeIndex / PeriodIndex + with pytest.raises(TypeError, match="DatetimeIndex"): + df = DataFrame(index=int_idx) + getattr(df, fn)("US/Pacific") + + # Not DatetimeIndex / PeriodIndex + with pytest.raises(TypeError, match="DatetimeIndex"): + df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) + getattr(df, fn)("US/Pacific", level=0) + + # Invalid level + with pytest.raises(ValueError, match="not valid"): + df = DataFrame(index=l0) + getattr(df, fn)("US/Pacific", level=1) + + @pytest.mark.parametrize("copy", [True, False]) + def test_tz_convert_copy_inplace_mutate(self, copy, frame_or_series): + # GH#6326 + obj = frame_or_series( + np.arange(0, 5), + index=date_range("20131027", periods=5, freq="1H", tz="Europe/Berlin"), + ) + orig = obj.copy() + result = obj.tz_convert("UTC", copy=copy) + expected = frame_or_series(np.arange(0, 5), index=obj.index.tz_convert("UTC")) + tm.assert_equal(result, expected) + tm.assert_equal(obj, orig) + assert result.index is not obj.index + assert result is not obj diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_localize.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_localize.py new file mode 100644 index 00000000..ed2b0b24 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_tz_localize.py @@ -0,0 +1,68 @@ +from datetime import timezone + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestTZLocalize: + # See also: + # test_tz_convert_and_localize in test_tz_convert + + def test_tz_localize(self, frame_or_series): + rng = date_range("1/1/2011", periods=100, freq="H") + + obj = DataFrame({"a": 1}, index=rng) + obj = tm.get_obj(obj, frame_or_series) + + result = obj.tz_localize("utc") + expected = DataFrame({"a": 1}, rng.tz_localize("UTC")) + expected = tm.get_obj(expected, frame_or_series) + + assert result.index.tz is timezone.utc + tm.assert_equal(result, expected) + + def test_tz_localize_axis1(self): + rng = date_range("1/1/2011", periods=100, freq="H") + + df = DataFrame({"a": 1}, index=rng) + + df = df.T + result = df.tz_localize("utc", axis=1) + assert result.columns.tz is timezone.utc + + expected = DataFrame({"a": 1}, rng.tz_localize("UTC")) + + tm.assert_frame_equal(result, expected.T) + + def test_tz_localize_naive(self, frame_or_series): + # Can't localize if already tz-aware + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + ts = Series(1, index=rng) + ts = frame_or_series(ts) + + with pytest.raises(TypeError, match="Already tz-aware"): + ts.tz_localize("US/Eastern") + + @pytest.mark.parametrize("copy", [True, False]) + def test_tz_localize_copy_inplace_mutate(self, copy, frame_or_series): + # GH#6326 + obj = frame_or_series( + np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=None) + ) + orig = obj.copy() + result = obj.tz_localize("UTC", copy=copy) + expected = frame_or_series( + np.arange(0, 5), + index=date_range("20131027", periods=5, freq="1H", tz="UTC"), + ) + tm.assert_equal(result, expected) + tm.assert_equal(obj, orig) + assert result.index is not obj.index + assert result is not obj diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_update.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_update.py new file mode 100644 index 00000000..5738a25f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_update.py @@ -0,0 +1,179 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameUpdate: + def test_update_nan(self): + # #15593 #15617 + # test 1 + df1 = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)}) + df2 = DataFrame({"A": [None, 2, 3]}) + expected = df1.copy() + df1.update(df2, overwrite=False) + + tm.assert_frame_equal(df1, expected) + + # test 2 + df1 = DataFrame({"A": [1.0, None, 3], "B": date_range("2000", periods=3)}) + df2 = DataFrame({"A": [None, 2, 3]}) + expected = DataFrame({"A": [1.0, 2, 3], "B": date_range("2000", periods=3)}) + df1.update(df2, overwrite=False) + + tm.assert_frame_equal(df1, expected) + + def test_update(self): + df = DataFrame( + [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]] + ) + + other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3]) + + df.update(other) + + expected = DataFrame( + [[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]] + ) + tm.assert_frame_equal(df, expected) + + def test_update_dtypes(self): + # gh 3016 + df = DataFrame( + [[1.0, 2.0, False, True], [4.0, 5.0, True, False]], + columns=["A", "B", "bool1", "bool2"], + ) + + other = DataFrame([[45, 45]], index=[0], columns=["A", "B"]) + df.update(other) + + expected = DataFrame( + [[45.0, 45.0, False, True], [4.0, 5.0, True, False]], + columns=["A", "B", "bool1", "bool2"], + ) + tm.assert_frame_equal(df, expected) + + def test_update_nooverwrite(self): + df = DataFrame( + [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]] + ) + + other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3]) + + df.update(other, overwrite=False) + + expected = DataFrame( + [[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.0]] + ) + tm.assert_frame_equal(df, expected) + + def test_update_filtered(self): + df = DataFrame( + [[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]] + ) + + other = DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3]) + + df.update(other, filter_func=lambda x: x > 2) + + expected = DataFrame( + [[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]] + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "bad_kwarg, exception, msg", + [ + # errors must be 'ignore' or 'raise' + ({"errors": "something"}, ValueError, "The parameter errors must.*"), + ({"join": "inner"}, NotImplementedError, "Only left join is supported"), + ], + ) + def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg): + df = DataFrame([[1.5, 1, 3.0]]) + with pytest.raises(exception, match=msg): + df.update(df, **bad_kwarg) + + def test_update_raise_on_overlap(self): + df = DataFrame( + [[1.5, 1, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]] + ) + + other = DataFrame([[2.0, np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2]) + with pytest.raises(ValueError, match="Data overlaps"): + df.update(other, errors="raise") + + def test_update_from_non_df(self): + d = {"a": Series([1, 2, 3, 4]), "b": Series([5, 6, 7, 8])} + df = DataFrame(d) + + d["a"] = Series([5, 6, 7, 8]) + df.update(d) + + expected = DataFrame(d) + + tm.assert_frame_equal(df, expected) + + d = {"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]} + df = DataFrame(d) + + d["a"] = [5, 6, 7, 8] + df.update(d) + + expected = DataFrame(d) + + tm.assert_frame_equal(df, expected) + + def test_update_datetime_tz(self): + # GH 25807 + result = DataFrame([pd.Timestamp("2019", tz="UTC")]) + with tm.assert_produces_warning(None): + result.update(result) + expected = DataFrame([pd.Timestamp("2019", tz="UTC")]) + tm.assert_frame_equal(result, expected) + + def test_update_with_different_dtype(self, using_copy_on_write): + # GH#3217 + df = DataFrame({"a": [1, 3], "b": [np.nan, 2]}) + df["c"] = np.nan + if using_copy_on_write: + df.update({"c": Series(["foo"], index=[0])}) + else: + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + df["c"].update(Series(["foo"], index=[0])) + + expected = DataFrame({"a": [1, 3], "b": [np.nan, 2], "c": ["foo", np.nan]}) + tm.assert_frame_equal(df, expected) + + @td.skip_array_manager_invalid_test + def test_update_modify_view(self, using_copy_on_write): + # GH#47188 + df = DataFrame({"A": ["1", np.nan], "B": ["100", np.nan]}) + df2 = DataFrame({"A": ["a", "x"], "B": ["100", "200"]}) + df2_orig = df2.copy() + result_view = df2[:] + df2.update(df) + expected = DataFrame({"A": ["1", "x"], "B": ["100", "200"]}) + tm.assert_frame_equal(df2, expected) + if using_copy_on_write: + tm.assert_frame_equal(result_view, df2_orig) + else: + tm.assert_frame_equal(result_view, expected) + + def test_update_dt_column_with_NaT_create_column(self): + # GH#16713 + df = DataFrame({"A": [1, None], "B": [pd.NaT, pd.to_datetime("2016-01-01")]}) + df2 = DataFrame({"A": [2, 3]}) + df.update(df2, overwrite=False) + expected = DataFrame( + {"A": [1.0, 3.0], "B": [pd.NaT, pd.to_datetime("2016-01-01")]} + ) + tm.assert_frame_equal(df, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_value_counts.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_value_counts.py new file mode 100644 index 00000000..c05a9293 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_value_counts.py @@ -0,0 +1,191 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_data_frame_value_counts_unsorted(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts(sort=False) + expected = pd.Series( + data=[1, 2, 1], + index=pd.MultiIndex.from_arrays( + [(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"] + ), + name="count", + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_ascending(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts(ascending=True) + expected = pd.Series( + data=[1, 1, 2], + index=pd.MultiIndex.from_arrays( + [(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"] + ), + name="count", + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_default(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts() + expected = pd.Series( + data=[2, 1, 1], + index=pd.MultiIndex.from_arrays( + [(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"] + ), + name="count", + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_normalize(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts(normalize=True) + expected = pd.Series( + data=[0.5, 0.25, 0.25], + index=pd.MultiIndex.from_arrays( + [(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"] + ), + name="proportion", + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_single_col_default(): + df = pd.DataFrame({"num_legs": [2, 4, 4, 6]}) + + result = df.value_counts() + expected = pd.Series( + data=[2, 1, 1], + index=pd.MultiIndex.from_arrays([[4, 2, 6]], names=["num_legs"]), + name="count", + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_empty(): + df_no_cols = pd.DataFrame() + + result = df_no_cols.value_counts() + expected = pd.Series( + [], dtype=np.int64, name="count", index=np.array([], dtype=np.intp) + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_empty_normalize(): + df_no_cols = pd.DataFrame() + + result = df_no_cols.value_counts(normalize=True) + expected = pd.Series( + [], dtype=np.float64, name="proportion", index=np.array([], dtype=np.intp) + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_dropna_true(nulls_fixture): + # GH 41334 + df = pd.DataFrame( + { + "first_name": ["John", "Anne", "John", "Beth"], + "middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"], + }, + ) + result = df.value_counts() + expected = pd.Series( + data=[1, 1], + index=pd.MultiIndex.from_arrays( + [("Beth", "John"), ("Louise", "Smith")], names=["first_name", "middle_name"] + ), + name="count", + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_dropna_false(nulls_fixture): + # GH 41334 + df = pd.DataFrame( + { + "first_name": ["John", "Anne", "John", "Beth"], + "middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"], + }, + ) + + result = df.value_counts(dropna=False) + expected = pd.Series( + data=[1, 1, 1, 1], + index=pd.MultiIndex( + levels=[ + pd.Index(["Anne", "Beth", "John"]), + pd.Index(["Louise", "Smith", nulls_fixture]), + ], + codes=[[0, 1, 2, 2], [2, 0, 1, 2]], + names=["first_name", "middle_name"], + ), + name="count", + ) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("columns", (["first_name", "middle_name"], [0, 1])) +def test_data_frame_value_counts_subset(nulls_fixture, columns): + # GH 50829 + df = pd.DataFrame( + { + columns[0]: ["John", "Anne", "John", "Beth"], + columns[1]: ["Smith", nulls_fixture, nulls_fixture, "Louise"], + }, + ) + result = df.value_counts(columns[0]) + expected = pd.Series( + data=[2, 1, 1], + index=pd.Index(["John", "Anne", "Beth"], name=columns[0]), + name="count", + ) + + tm.assert_series_equal(result, expected) + + +def test_value_counts_categorical_future_warning(): + # GH#54775 + df = pd.DataFrame({"a": [1, 2, 3]}, dtype="category") + result = df.value_counts() + expected = pd.Series( + 1, + index=pd.MultiIndex.from_arrays( + [pd.Index([1, 2, 3], name="a", dtype="category")] + ), + name="count", + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_values.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_values.py new file mode 100644 index 00000000..bbca4ee1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/methods/test_values.py @@ -0,0 +1,280 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + NaT, + Series, + Timestamp, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestDataFrameValues: + @td.skip_array_manager_invalid_test + def test_values(self, float_frame, using_copy_on_write): + if using_copy_on_write: + with pytest.raises(ValueError, match="read-only"): + float_frame.values[:, 0] = 5.0 + assert (float_frame.values[:, 0] != 5).all() + else: + float_frame.values[:, 0] = 5.0 + assert (float_frame.values[:, 0] == 5).all() + + def test_more_values(self, float_string_frame): + values = float_string_frame.values + assert values.shape[1] == len(float_string_frame.columns) + + def test_values_mixed_dtypes(self, float_frame, float_string_frame): + frame = float_frame + arr = frame.values + + frame_cols = frame.columns + for i, row in enumerate(arr): + for j, value in enumerate(row): + col = frame_cols[j] + if np.isnan(value): + assert np.isnan(frame[col].iloc[i]) + else: + assert value == frame[col].iloc[i] + + # mixed type + arr = float_string_frame[["foo", "A"]].values + assert arr[0, 0] == "bar" + + df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]}) + arr = df.values + assert arr[0, 0] == 1j + + def test_values_duplicates(self): + df = DataFrame( + [[1, 2, "a", "b"], [1, 2, "a", "b"]], columns=["one", "one", "two", "two"] + ) + + result = df.values + expected = np.array([[1, 2, "a", "b"], [1, 2, "a", "b"]], dtype=object) + + tm.assert_numpy_array_equal(result, expected) + + def test_values_with_duplicate_columns(self): + df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"]) + result = df.values + expected = np.array([[1, 2.5], [3, 4.5]]) + assert (result == expected).all().all() + + @pytest.mark.parametrize("constructor", [date_range, period_range]) + def test_values_casts_datetimelike_to_object(self, constructor): + series = Series(constructor("2000-01-01", periods=10, freq="D")) + + expected = series.astype("object") + + df = DataFrame( + {"a": series, "b": np.random.default_rng(2).standard_normal(len(series))} + ) + + result = df.values.squeeze() + assert (result[:, 0] == expected.values).all() + + df = DataFrame({"a": series, "b": ["foo"] * len(series)}) + + result = df.values.squeeze() + assert (result[:, 0] == expected.values).all() + + def test_frame_values_with_tz(self): + tz = "US/Central" + df = DataFrame({"A": date_range("2000", periods=4, tz=tz)}) + result = df.values + expected = np.array( + [ + [Timestamp("2000-01-01", tz=tz)], + [Timestamp("2000-01-02", tz=tz)], + [Timestamp("2000-01-03", tz=tz)], + [Timestamp("2000-01-04", tz=tz)], + ] + ) + tm.assert_numpy_array_equal(result, expected) + + # two columns, homogeneous + + df["B"] = df["A"] + result = df.values + expected = np.concatenate([expected, expected], axis=1) + tm.assert_numpy_array_equal(result, expected) + + # three columns, heterogeneous + est = "US/Eastern" + df["C"] = df["A"].dt.tz_convert(est) + + new = np.array( + [ + [Timestamp("2000-01-01T01:00:00", tz=est)], + [Timestamp("2000-01-02T01:00:00", tz=est)], + [Timestamp("2000-01-03T01:00:00", tz=est)], + [Timestamp("2000-01-04T01:00:00", tz=est)], + ] + ) + expected = np.concatenate([expected, new], axis=1) + result = df.values + tm.assert_numpy_array_equal(result, expected) + + def test_interleave_with_tzaware(self, timezone_frame): + # interleave with object + result = timezone_frame.assign(D="foo").values + expected = np.array( + [ + [ + Timestamp("2013-01-01 00:00:00"), + Timestamp("2013-01-02 00:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + [ + Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), + NaT, + Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), + ], + [ + Timestamp("2013-01-01 00:00:00+0100", tz="CET"), + NaT, + Timestamp("2013-01-03 00:00:00+0100", tz="CET"), + ], + ["foo", "foo", "foo"], + ], + dtype=object, + ).T + tm.assert_numpy_array_equal(result, expected) + + # interleave with only datetime64[ns] + result = timezone_frame.values + expected = np.array( + [ + [ + Timestamp("2013-01-01 00:00:00"), + Timestamp("2013-01-02 00:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + [ + Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), + NaT, + Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), + ], + [ + Timestamp("2013-01-01 00:00:00+0100", tz="CET"), + NaT, + Timestamp("2013-01-03 00:00:00+0100", tz="CET"), + ], + ], + dtype=object, + ).T + tm.assert_numpy_array_equal(result, expected) + + def test_values_interleave_non_unique_cols(self): + df = DataFrame( + [[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]], + columns=["x", "x"], + index=[1, 2], + ) + + df_unique = df.copy() + df_unique.columns = ["x", "y"] + assert df_unique.values.shape == df.values.shape + tm.assert_numpy_array_equal(df_unique.values[0], df.values[0]) + tm.assert_numpy_array_equal(df_unique.values[1], df.values[1]) + + def test_values_numeric_cols(self, float_frame): + float_frame["foo"] = "bar" + + values = float_frame[["A", "B", "C", "D"]].values + assert values.dtype == np.float64 + + def test_values_lcd(self, mixed_float_frame, mixed_int_frame): + # mixed lcd + values = mixed_float_frame[["A", "B", "C", "D"]].values + assert values.dtype == np.float64 + + values = mixed_float_frame[["A", "B", "C"]].values + assert values.dtype == np.float32 + + values = mixed_float_frame[["C"]].values + assert values.dtype == np.float16 + + # GH#10364 + # B uint64 forces float because there are other signed int types + values = mixed_int_frame[["A", "B", "C", "D"]].values + assert values.dtype == np.float64 + + values = mixed_int_frame[["A", "D"]].values + assert values.dtype == np.int64 + + # B uint64 forces float because there are other signed int types + values = mixed_int_frame[["A", "B", "C"]].values + assert values.dtype == np.float64 + + # as B and C are both unsigned, no forcing to float is needed + values = mixed_int_frame[["B", "C"]].values + assert values.dtype == np.uint64 + + values = mixed_int_frame[["A", "C"]].values + assert values.dtype == np.int32 + + values = mixed_int_frame[["C", "D"]].values + assert values.dtype == np.int64 + + values = mixed_int_frame[["A"]].values + assert values.dtype == np.int32 + + values = mixed_int_frame[["C"]].values + assert values.dtype == np.uint8 + + +class TestPrivateValues: + @td.skip_array_manager_invalid_test + def test_private_values_dt64tz(self, using_copy_on_write): + dta = date_range("2000", periods=4, tz="US/Central")._data.reshape(-1, 1) + + df = DataFrame(dta, columns=["A"]) + tm.assert_equal(df._values, dta) + + if using_copy_on_write: + assert not np.shares_memory(df._values._ndarray, dta._ndarray) + else: + # we have a view + assert np.shares_memory(df._values._ndarray, dta._ndarray) + + # TimedeltaArray + tda = dta - dta + df2 = df - df + tm.assert_equal(df2._values, tda) + + @td.skip_array_manager_invalid_test + def test_private_values_dt64tz_multicol(self, using_copy_on_write): + dta = date_range("2000", periods=8, tz="US/Central")._data.reshape(-1, 2) + + df = DataFrame(dta, columns=["A", "B"]) + tm.assert_equal(df._values, dta) + + if using_copy_on_write: + assert not np.shares_memory(df._values._ndarray, dta._ndarray) + else: + # we have a view + assert np.shares_memory(df._values._ndarray, dta._ndarray) + + # TimedeltaArray + tda = dta - dta + df2 = df - df + tm.assert_equal(df2._values, tda) + + def test_private_values_dt64_multiblock(self): + dta = date_range("2000", periods=8)._data + + df = DataFrame({"A": dta[:4]}, copy=False) + df["B"] = dta[4:] + + assert len(df._mgr.arrays) == 2 + + result = df._values + expected = dta.reshape(2, 4).T + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_alter_axes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_alter_axes.py new file mode 100644 index 00000000..c68171ab --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_alter_axes.py @@ -0,0 +1,30 @@ +from datetime import datetime + +import pytz + +from pandas import DataFrame +import pandas._testing as tm + + +class TestDataFrameAlterAxes: + # Tests for setting index/columns attributes directly (i.e. __setattr__) + + def test_set_axis_setattr_index(self): + # GH 6785 + # set the index manually + + df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}]) + expected = df.set_index("ts") + df.index = df["ts"] + df.pop("ts") + tm.assert_frame_equal(df, expected) + + # Renaming + + def test_assign_columns(self, float_frame): + float_frame["hi"] = "there" + + df = float_frame.copy() + df.columns = ["foo", "bar", "baz", "quux", "foo2"] + tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False) + tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_api.py new file mode 100644 index 00000000..aa7aa896 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_api.py @@ -0,0 +1,377 @@ +from copy import deepcopy +import inspect +import pydoc + +import numpy as np +import pytest + +from pandas._config.config import option_context + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestDataFrameMisc: + def test_getitem_pop_assign_name(self, float_frame): + s = float_frame["A"] + assert s.name == "A" + + s = float_frame.pop("A") + assert s.name == "A" + + s = float_frame.loc[:, "B"] + assert s.name == "B" + + s2 = s.loc[:] + assert s2.name == "B" + + def test_get_axis(self, float_frame): + f = float_frame + assert f._get_axis_number(0) == 0 + assert f._get_axis_number(1) == 1 + assert f._get_axis_number("index") == 0 + assert f._get_axis_number("rows") == 0 + assert f._get_axis_number("columns") == 1 + + assert f._get_axis_name(0) == "index" + assert f._get_axis_name(1) == "columns" + assert f._get_axis_name("index") == "index" + assert f._get_axis_name("rows") == "index" + assert f._get_axis_name("columns") == "columns" + + assert f._get_axis(0) is f.index + assert f._get_axis(1) is f.columns + + with pytest.raises(ValueError, match="No axis named"): + f._get_axis_number(2) + + with pytest.raises(ValueError, match="No axis.*foo"): + f._get_axis_name("foo") + + with pytest.raises(ValueError, match="No axis.*None"): + f._get_axis_name(None) + + with pytest.raises(ValueError, match="No axis named"): + f._get_axis_number(None) + + def test_column_contains_raises(self, float_frame): + with pytest.raises(TypeError, match="unhashable type: 'Index'"): + float_frame.columns in float_frame + + def test_tab_completion(self): + # DataFrame whose columns are identifiers shall have them in __dir__. + df = DataFrame([list("abcd"), list("efgh")], columns=list("ABCD")) + for key in list("ABCD"): + assert key in dir(df) + assert isinstance(df.__getitem__("A"), Series) + + # DataFrame whose first-level columns are identifiers shall have + # them in __dir__. + df = DataFrame( + [list("abcd"), list("efgh")], + columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))), + ) + for key in list("ABCD"): + assert key in dir(df) + for key in list("EFGH"): + assert key not in dir(df) + assert isinstance(df.__getitem__("A"), DataFrame) + + def test_display_max_dir_items(self): + # display.max_dir_items increaes the number of columns that are in __dir__. + columns = ["a" + str(i) for i in range(420)] + values = [range(420), range(420)] + df = DataFrame(values, columns=columns) + + # The default value for display.max_dir_items is 100 + assert "a99" in dir(df) + assert "a100" not in dir(df) + + with option_context("display.max_dir_items", 300): + df = DataFrame(values, columns=columns) + assert "a299" in dir(df) + assert "a300" not in dir(df) + + with option_context("display.max_dir_items", None): + df = DataFrame(values, columns=columns) + assert "a419" in dir(df) + + def test_not_hashable(self): + empty_frame = DataFrame() + + df = DataFrame([1]) + msg = "unhashable type: 'DataFrame'" + with pytest.raises(TypeError, match=msg): + hash(df) + with pytest.raises(TypeError, match=msg): + hash(empty_frame) + + def test_column_name_contains_unicode_surrogate(self): + # GH 25509 + colname = "\ud83d" + df = DataFrame({colname: []}) + # this should not crash + assert colname not in dir(df) + assert df.columns[0] == colname + + def test_new_empty_index(self): + df1 = DataFrame(np.random.default_rng(2).standard_normal((0, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((0, 3))) + df1.index.name = "foo" + assert df2.index.name is None + + def test_get_agg_axis(self, float_frame): + cols = float_frame._get_agg_axis(0) + assert cols is float_frame.columns + + idx = float_frame._get_agg_axis(1) + assert idx is float_frame.index + + msg = r"Axis must be 0 or 1 \(got 2\)" + with pytest.raises(ValueError, match=msg): + float_frame._get_agg_axis(2) + + def test_empty(self, float_frame, float_string_frame): + empty_frame = DataFrame() + assert empty_frame.empty + + assert not float_frame.empty + assert not float_string_frame.empty + + # corner case + df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3)) + del df["A"] + assert not df.empty + + def test_len(self, float_frame): + assert len(float_frame) == len(float_frame.index) + + # single block corner case + arr = float_frame[["A", "B"]].values + expected = float_frame.reindex(columns=["A", "B"]).values + tm.assert_almost_equal(arr, expected) + + def test_axis_aliases(self, float_frame): + f = float_frame + + # reg name + expected = f.sum(axis=0) + result = f.sum(axis="index") + tm.assert_series_equal(result, expected) + + expected = f.sum(axis=1) + result = f.sum(axis="columns") + tm.assert_series_equal(result, expected) + + def test_class_axis(self): + # GH 18147 + # no exception and no empty docstring + assert pydoc.getdoc(DataFrame.index) + assert pydoc.getdoc(DataFrame.columns) + + def test_series_put_names(self, float_string_frame): + series = float_string_frame._series + for k, v in series.items(): + assert v.name == k + + def test_empty_nonzero(self): + df = DataFrame([1, 2, 3]) + assert not df.empty + df = DataFrame(index=[1], columns=[1]) + assert not df.empty + df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna() + assert df.empty + assert df.T.empty + + @pytest.mark.parametrize( + "df", + [ + DataFrame(), + DataFrame(index=[1]), + DataFrame(columns=[1]), + DataFrame({1: []}), + ], + ) + def test_empty_like(self, df): + assert df.empty + assert df.T.empty + + def test_with_datetimelikes(self): + df = DataFrame( + { + "A": date_range("20130101", periods=10), + "B": timedelta_range("1 day", periods=10), + } + ) + t = df.T + + result = t.dtypes.value_counts() + expected = Series({np.dtype("object"): 10}, name="count") + tm.assert_series_equal(result, expected) + + def test_deepcopy(self, float_frame): + cp = deepcopy(float_frame) + series = cp["A"] + series[:] = 10 + for idx, value in series.items(): + assert float_frame["A"][idx] != value + + def test_inplace_return_self(self): + # GH 1893 + + data = DataFrame( + {"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]} + ) + + def _check_f(base, f): + result = f(base) + assert result is None + + # -----DataFrame----- + + # set_index + f = lambda x: x.set_index("a", inplace=True) + _check_f(data.copy(), f) + + # reset_index + f = lambda x: x.reset_index(inplace=True) + _check_f(data.set_index("a"), f) + + # drop_duplicates + f = lambda x: x.drop_duplicates(inplace=True) + _check_f(data.copy(), f) + + # sort + f = lambda x: x.sort_values("b", inplace=True) + _check_f(data.copy(), f) + + # sort_index + f = lambda x: x.sort_index(inplace=True) + _check_f(data.copy(), f) + + # fillna + f = lambda x: x.fillna(0, inplace=True) + _check_f(data.copy(), f) + + # replace + f = lambda x: x.replace(1, 0, inplace=True) + _check_f(data.copy(), f) + + # rename + f = lambda x: x.rename({1: "foo"}, inplace=True) + _check_f(data.copy(), f) + + # -----Series----- + d = data.copy()["c"] + + # reset_index + f = lambda x: x.reset_index(inplace=True, drop=True) + _check_f(data.set_index("a")["c"], f) + + # fillna + f = lambda x: x.fillna(0, inplace=True) + _check_f(d.copy(), f) + + # replace + f = lambda x: x.replace(1, 0, inplace=True) + _check_f(d.copy(), f) + + # rename + f = lambda x: x.rename({1: "foo"}, inplace=True) + _check_f(d.copy(), f) + + def test_tab_complete_warning(self, ip, frame_or_series): + # GH 16409 + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + if frame_or_series is DataFrame: + code = "from pandas import DataFrame; obj = DataFrame()" + else: + code = "from pandas import Series; obj = Series(dtype=object)" + + ip.run_cell(code) + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None, raise_on_extra_warnings=False): + with provisionalcompleter("ignore"): + list(ip.Completer.completions("obj.", 1)) + + def test_attrs(self): + df = DataFrame({"A": [2, 3]}) + assert df.attrs == {} + df.attrs["version"] = 1 + + result = df.rename(columns=str) + assert result.attrs == {"version": 1} + + @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None]) + def test_set_flags( + self, allows_duplicate_labels, frame_or_series, using_copy_on_write + ): + obj = DataFrame({"A": [1, 2]}) + key = (0, 0) + if frame_or_series is Series: + obj = obj["A"] + key = 0 + + result = obj.set_flags(allows_duplicate_labels=allows_duplicate_labels) + + if allows_duplicate_labels is None: + # We don't update when it's not provided + assert result.flags.allows_duplicate_labels is True + else: + assert result.flags.allows_duplicate_labels is allows_duplicate_labels + + # We made a copy + assert obj is not result + + # We didn't mutate obj + assert obj.flags.allows_duplicate_labels is True + + # But we didn't copy data + if frame_or_series is Series: + assert np.may_share_memory(obj.values, result.values) + else: + assert np.may_share_memory(obj["A"].values, result["A"].values) + + result.iloc[key] = 0 + if using_copy_on_write: + assert obj.iloc[key] == 1 + else: + assert obj.iloc[key] == 0 + # set back to 1 for test below + result.iloc[key] = 1 + + # Now we do copy. + result = obj.set_flags( + copy=True, allows_duplicate_labels=allows_duplicate_labels + ) + result.iloc[key] = 10 + assert obj.iloc[key] == 1 + + def test_constructor_expanddim(self): + # GH#33628 accessing _constructor_expanddim should not raise NotImplementedError + # GH38782 pandas has no container higher than DataFrame (two-dim), so + # DataFrame._constructor_expand_dim, doesn't make sense, so is removed. + df = DataFrame() + + msg = "'DataFrame' object has no attribute '_constructor_expanddim'" + with pytest.raises(AttributeError, match=msg): + df._constructor_expanddim(np.arange(27).reshape(3, 3, 3)) + + def test_inspect_getmembers(self): + # GH38740 + pytest.importorskip("jinja2") + df = DataFrame() + msg = "DataFrame._data is deprecated" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + inspect.getmembers(df) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_arithmetic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_arithmetic.py new file mode 100644 index 00000000..e5a8feb7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_arithmetic.py @@ -0,0 +1,2129 @@ +from collections import deque +from datetime import ( + datetime, + timezone, +) +from enum import Enum +import functools +import operator +import re + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.core.computation import expressions as expr +from pandas.core.computation.expressions import _MIN_ELEMENTS +from pandas.tests.frame.common import ( + _check_mixed_float, + _check_mixed_int, +) +from pandas.util.version import Version + + +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) +def switch_numexpr_min_elements(request): + _MIN_ELEMENTS = expr._MIN_ELEMENTS + expr._MIN_ELEMENTS = request.param + yield request.param + expr._MIN_ELEMENTS = _MIN_ELEMENTS + + +class DummyElement: + def __init__(self, value, dtype) -> None: + self.value = value + self.dtype = np.dtype(dtype) + + def __array__(self): + return np.array(self.value, dtype=self.dtype) + + def __str__(self) -> str: + return f"DummyElement({self.value}, {self.dtype})" + + def __repr__(self) -> str: + return str(self) + + def astype(self, dtype, copy=False): + self.dtype = dtype + return self + + def view(self, dtype): + return type(self)(self.value.view(dtype), dtype) + + def any(self, axis=None): + return bool(self.value) + + +# ------------------------------------------------------------------- +# Comparisons + + +class TestFrameComparisons: + # Specifically _not_ flex-comparisons + + def test_comparison_with_categorical_dtype(self): + # GH#12564 + + df = DataFrame({"A": ["foo", "bar", "baz"]}) + exp = DataFrame({"A": [True, False, False]}) + + res = df == "foo" + tm.assert_frame_equal(res, exp) + + # casting to categorical shouldn't affect the result + df["A"] = df["A"].astype("category") + + res = df == "foo" + tm.assert_frame_equal(res, exp) + + def test_frame_in_list(self): + # GH#12689 this should raise at the DataFrame level, not blocks + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), columns=list("ABCD") + ) + msg = "The truth value of a DataFrame is ambiguous" + with pytest.raises(ValueError, match=msg): + df in [None] + + @pytest.mark.parametrize( + "arg, arg2", + [ + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + ], + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + [ + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + ], + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + ], + ) + def test_comparison_invalid(self, arg, arg2): + # GH4968 + # invalid date/int comparisons + x = DataFrame(arg) + y = DataFrame(arg2) + # we expect the result to match Series comparisons for + # == and !=, inequalities should raise + result = x == y + expected = DataFrame( + {col: x[col] == y[col] for col in x.columns}, + index=x.index, + columns=x.columns, + ) + tm.assert_frame_equal(result, expected) + + result = x != y + expected = DataFrame( + {col: x[col] != y[col] for col in x.columns}, + index=x.index, + columns=x.columns, + ) + tm.assert_frame_equal(result, expected) + + msgs = [ + r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", + "invalid type promotion", + ( + # npdev 1.20.0 + r"The DTypes and " + r" do not have a common DType." + ), + ] + msg = "|".join(msgs) + with pytest.raises(TypeError, match=msg): + x >= y + with pytest.raises(TypeError, match=msg): + x > y + with pytest.raises(TypeError, match=msg): + x < y + with pytest.raises(TypeError, match=msg): + x <= y + + @pytest.mark.parametrize( + "left, right", + [ + ("gt", "lt"), + ("lt", "gt"), + ("ge", "le"), + ("le", "ge"), + ("eq", "eq"), + ("ne", "ne"), + ], + ) + def test_timestamp_compare(self, left, right): + # make sure we can compare Timestamps on the right AND left hand side + # GH#4982 + df = DataFrame( + { + "dates1": pd.date_range("20010101", periods=10), + "dates2": pd.date_range("20010102", periods=10), + "intcol": np.random.default_rng(2).integers(1000000000, size=10), + "floatcol": np.random.default_rng(2).standard_normal(10), + "stringcol": [chr(100 + i) for i in range(10)], + } + ) + df.loc[np.random.default_rng(2).random(len(df)) > 0.5, "dates2"] = pd.NaT + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # no nats + if left in ["eq", "ne"]: + expected = left_f(df, pd.Timestamp("20010109")) + result = right_f(pd.Timestamp("20010109"), df) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "'(<|>)=?' not supported between " + "instances of 'numpy.ndarray' and 'Timestamp'" + ) + with pytest.raises(TypeError, match=msg): + left_f(df, pd.Timestamp("20010109")) + with pytest.raises(TypeError, match=msg): + right_f(pd.Timestamp("20010109"), df) + # nats + if left in ["eq", "ne"]: + expected = left_f(df, pd.Timestamp("nat")) + result = right_f(pd.Timestamp("nat"), df) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "'(<|>)=?' not supported between " + "instances of 'numpy.ndarray' and 'NaTType'" + ) + with pytest.raises(TypeError, match=msg): + left_f(df, pd.Timestamp("nat")) + with pytest.raises(TypeError, match=msg): + right_f(pd.Timestamp("nat"), df) + + def test_mixed_comparison(self): + # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False, + # not raise TypeError + # (this appears to be fixed before GH#22163, not sure when) + df = DataFrame([["1989-08-01", 1], ["1989-08-01", 2]]) + other = DataFrame([["a", "b"], ["c", "d"]]) + + result = df == other + assert not result.any().any() + + result = df != other + assert result.all().all() + + def test_df_boolean_comparison_error(self): + # GH#4576, GH#22880 + # comparing DataFrame against list/tuple with len(obj) matching + # len(df.columns) is supported as of GH#22800 + df = DataFrame(np.arange(6).reshape((3, 2))) + + expected = DataFrame([[False, False], [True, False], [False, False]]) + + result = df == (2, 2) + tm.assert_frame_equal(result, expected) + + result = df == [2, 2] + tm.assert_frame_equal(result, expected) + + def test_df_float_none_comparison(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), + index=range(8), + columns=["A", "B", "C"], + ) + + result = df.__eq__(None) + assert not result.any().any() + + def test_df_string_comparison(self): + df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}]) + mask_a = df.a > 1 + tm.assert_frame_equal(df[mask_a], df.loc[1:1, :]) + tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :]) + + mask_b = df.b == "foo" + tm.assert_frame_equal(df[mask_b], df.loc[0:0, :]) + tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :]) + + +class TestFrameFlexComparisons: + # TODO: test_bool_flex_frame needs a better name + @pytest.mark.parametrize("op", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_bool_flex_frame(self, op): + data = np.random.default_rng(2).standard_normal((5, 3)) + other_data = np.random.default_rng(2).standard_normal((5, 3)) + df = DataFrame(data) + other = DataFrame(other_data) + ndim_5 = np.ones(df.shape + (1, 3)) + + # DataFrame + assert df.eq(df).values.all() + assert not df.ne(df).values.any() + f = getattr(df, op) + o = getattr(operator, op) + # No NAs + tm.assert_frame_equal(f(other), o(df, other)) + # Unaligned + part_o = other.loc[3:, 1:].copy() + rs = f(part_o) + xp = o(df, part_o.reindex(index=df.index, columns=df.columns)) + tm.assert_frame_equal(rs, xp) + # ndarray + tm.assert_frame_equal(f(other.values), o(df, other.values)) + # scalar + tm.assert_frame_equal(f(0), o(df, 0)) + # NAs + msg = "Unable to coerce to Series/DataFrame" + tm.assert_frame_equal(f(np.nan), o(df, np.nan)) + with pytest.raises(ValueError, match=msg): + f(ndim_5) + + @pytest.mark.parametrize("box", [np.array, Series]) + def test_bool_flex_series(self, box): + # Series + # list/tuple + data = np.random.default_rng(2).standard_normal((5, 3)) + df = DataFrame(data) + idx_ser = box(np.random.default_rng(2).standard_normal(5)) + col_ser = box(np.random.default_rng(2).standard_normal(3)) + + idx_eq = df.eq(idx_ser, axis=0) + col_eq = df.eq(col_ser) + idx_ne = df.ne(idx_ser, axis=0) + col_ne = df.ne(col_ser) + tm.assert_frame_equal(col_eq, df == Series(col_ser)) + tm.assert_frame_equal(col_eq, -col_ne) + tm.assert_frame_equal(idx_eq, -idx_ne) + tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T) + tm.assert_frame_equal(col_eq, df.eq(list(col_ser))) + tm.assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0)) + tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0)) + + idx_gt = df.gt(idx_ser, axis=0) + col_gt = df.gt(col_ser) + idx_le = df.le(idx_ser, axis=0) + col_le = df.le(col_ser) + + tm.assert_frame_equal(col_gt, df > Series(col_ser)) + tm.assert_frame_equal(col_gt, -col_le) + tm.assert_frame_equal(idx_gt, -idx_le) + tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T) + + idx_ge = df.ge(idx_ser, axis=0) + col_ge = df.ge(col_ser) + idx_lt = df.lt(idx_ser, axis=0) + col_lt = df.lt(col_ser) + tm.assert_frame_equal(col_ge, df >= Series(col_ser)) + tm.assert_frame_equal(col_ge, -col_lt) + tm.assert_frame_equal(idx_ge, -idx_lt) + tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T) + + idx_ser = Series(np.random.default_rng(2).standard_normal(5)) + col_ser = Series(np.random.default_rng(2).standard_normal(3)) + + def test_bool_flex_frame_na(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + # NA + df.loc[0, 0] = np.nan + rs = df.eq(df) + assert not rs.loc[0, 0] + rs = df.ne(df) + assert rs.loc[0, 0] + rs = df.gt(df) + assert not rs.loc[0, 0] + rs = df.lt(df) + assert not rs.loc[0, 0] + rs = df.ge(df) + assert not rs.loc[0, 0] + rs = df.le(df) + assert not rs.loc[0, 0] + + def test_bool_flex_frame_complex_dtype(self): + # complex + arr = np.array([np.nan, 1, 6, np.nan]) + arr2 = np.array([2j, np.nan, 7, None]) + df = DataFrame({"a": arr}) + df2 = DataFrame({"a": arr2}) + + msg = "|".join( + [ + "'>' not supported between instances of '.*' and 'complex'", + r"unorderable types: .*complex\(\)", # PY35 + ] + ) + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df.gt(df2) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df["a"].gt(df2["a"]) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df.values > df2.values + + rs = df.ne(df2) + assert rs.values.all() + + arr3 = np.array([2j, np.nan, None]) + df3 = DataFrame({"a": arr3}) + + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df3.gt(2j) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df3["a"].gt(2j) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df3.values > 2j + + def test_bool_flex_frame_object_dtype(self): + # corner, dtype=object + df1 = DataFrame({"col": ["foo", np.nan, "bar"]}) + df2 = DataFrame({"col": ["foo", datetime.now(), "bar"]}) + result = df1.ne(df2) + exp = DataFrame({"col": [False, True, False]}) + tm.assert_frame_equal(result, exp) + + def test_flex_comparison_nat(self): + # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT, + # and _definitely_ not be NaN + df = DataFrame([pd.NaT]) + + result = df == pd.NaT + # result.iloc[0, 0] is a np.bool_ object + assert result.iloc[0, 0].item() is False + + result = df.eq(pd.NaT) + assert result.iloc[0, 0].item() is False + + result = df != pd.NaT + assert result.iloc[0, 0].item() is True + + result = df.ne(pd.NaT) + assert result.iloc[0, 0].item() is True + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_df_flex_cmp_constant_return_types(self, opname): + # GH 15077, non-empty DataFrame + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + const = 2 + + result = getattr(df, opname)(const).dtypes.value_counts() + tm.assert_series_equal( + result, Series([2], index=[np.dtype(bool)], name="count") + ) + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_df_flex_cmp_constant_return_types_empty(self, opname): + # GH 15077 empty DataFrame + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + const = 2 + + empty = df.iloc[:0] + result = getattr(empty, opname)(const).dtypes.value_counts() + tm.assert_series_equal( + result, Series([2], index=[np.dtype(bool)], name="count") + ) + + def test_df_flex_cmp_ea_dtype_with_ndarray_series(self): + ii = pd.IntervalIndex.from_breaks([1, 2, 3]) + df = DataFrame({"A": ii, "B": ii}) + + ser = Series([0, 0]) + res = df.eq(ser, axis=0) + + expected = DataFrame({"A": [False, False], "B": [False, False]}) + tm.assert_frame_equal(res, expected) + + ser2 = Series([1, 2], index=["A", "B"]) + res2 = df.eq(ser2, axis=1) + tm.assert_frame_equal(res2, expected) + + +# ------------------------------------------------------------------- +# Arithmetic + + +class TestFrameFlexArithmetic: + def test_floordiv_axis0(self): + # make sure we df.floordiv(ser, axis=0) matches column-wise result + arr = np.arange(3) + ser = Series(arr) + df = DataFrame({"A": ser, "B": ser}) + + result = df.floordiv(ser, axis=0) + + expected = DataFrame({col: df[col] // ser for col in df.columns}) + + tm.assert_frame_equal(result, expected) + + result2 = df.floordiv(ser.values, axis=0) + tm.assert_frame_equal(result2, expected) + + @pytest.mark.parametrize("opname", ["floordiv", "pow"]) + def test_floordiv_axis0_numexpr_path(self, opname, request): + # case that goes through numexpr and has to fall back to masked_arith_op + ne = pytest.importorskip("numexpr") + if ( + Version(ne.__version__) >= Version("2.8.7") + and opname == "pow" + and "python" in request.node.callspec.id + ): + request.node.add_marker( + pytest.mark.xfail(reason="https://github.com/pydata/numexpr/issues/454") + ) + + op = getattr(operator, opname) + + arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100 + df = DataFrame(arr) + df["C"] = 1.0 + + ser = df[0] + result = getattr(df, opname)(ser, axis=0) + + expected = DataFrame({col: op(df[col], ser) for col in df.columns}) + tm.assert_frame_equal(result, expected) + + result2 = getattr(df, opname)(ser.values, axis=0) + tm.assert_frame_equal(result2, expected) + + def test_df_add_td64_columnwise(self): + # GH 22534 Check that column-wise addition broadcasts correctly + dti = pd.date_range("2016-01-01", periods=10) + tdi = pd.timedelta_range("1", periods=10) + tser = Series(tdi) + df = DataFrame({0: dti, 1: tdi}) + + result = df.add(tser, axis=0) + expected = DataFrame({0: dti + tdi, 1: tdi + tdi}) + tm.assert_frame_equal(result, expected) + + def test_df_add_flex_filled_mixed_dtypes(self): + # GH 19611 + dti = pd.date_range("2016-01-01", periods=3) + ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]") + df = DataFrame({"A": dti, "B": ser}) + other = DataFrame({"A": ser, "B": ser}) + fill = pd.Timedelta(days=1).to_timedelta64() + result = df.add(other, fill_value=fill) + + expected = DataFrame( + { + "A": Series( + ["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]" + ), + "B": ser * 2, + } + ) + tm.assert_frame_equal(result, expected) + + def test_arith_flex_frame( + self, all_arithmetic_operators, float_frame, mixed_float_frame + ): + # one instance of parametrized fixture + op = all_arithmetic_operators + + def f(x, y): + # r-versions not in operator-stdlib; get op without "r" and invert + if op.startswith("__r"): + return getattr(operator, op.replace("__r", "__"))(y, x) + return getattr(operator, op)(x, y) + + result = getattr(float_frame, op)(2 * float_frame) + expected = f(float_frame, 2 * float_frame) + tm.assert_frame_equal(result, expected) + + # vs mix float + result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) + expected = f(mixed_float_frame, 2 * mixed_float_frame) + tm.assert_frame_equal(result, expected) + _check_mixed_float(result, dtype={"C": None}) + + @pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"]) + def test_arith_flex_frame_mixed( + self, + op, + int_frame, + mixed_int_frame, + mixed_float_frame, + switch_numexpr_min_elements, + ): + f = getattr(operator, op) + + # vs mix int + result = getattr(mixed_int_frame, op)(2 + mixed_int_frame) + expected = f(mixed_int_frame, 2 + mixed_int_frame) + + # no overflow in the uint + dtype = None + if op in ["__sub__"]: + dtype = {"B": "uint64", "C": None} + elif op in ["__add__", "__mul__"]: + dtype = {"C": None} + if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0: + # when using numexpr, the casting rules are slightly different: + # in the `2 + mixed_int_frame` operation, int32 column becomes + # and int64 column (not preserving dtype in operation with Python + # scalar), and then the int32/int64 combo results in int64 result + dtype["A"] = (2 + mixed_int_frame)["A"].dtype + tm.assert_frame_equal(result, expected) + _check_mixed_int(result, dtype=dtype) + + # vs mix float + result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) + expected = f(mixed_float_frame, 2 * mixed_float_frame) + tm.assert_frame_equal(result, expected) + _check_mixed_float(result, dtype={"C": None}) + + # vs plain int + result = getattr(int_frame, op)(2 * int_frame) + expected = f(int_frame, 2 * int_frame) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dim", range(3, 6)) + def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame, dim): + # one instance of parametrized fixture + op = all_arithmetic_operators + + # Check that arrays with dim >= 3 raise + arr = np.ones((1,) * dim) + msg = "Unable to coerce to Series/DataFrame" + with pytest.raises(ValueError, match=msg): + getattr(float_frame, op)(arr) + + def test_arith_flex_frame_corner(self, float_frame): + const_add = float_frame.add(1) + tm.assert_frame_equal(const_add, float_frame + 1) + + # corner cases + result = float_frame.add(float_frame[:0]) + tm.assert_frame_equal(result, float_frame * np.nan) + + result = float_frame[:0].add(float_frame) + tm.assert_frame_equal(result, float_frame * np.nan) + + with pytest.raises(NotImplementedError, match="fill_value"): + float_frame.add(float_frame.iloc[0], fill_value=3) + + with pytest.raises(NotImplementedError, match="fill_value"): + float_frame.add(float_frame.iloc[0], axis="index", fill_value=3) + + @pytest.mark.parametrize("op", ["add", "sub", "mul", "mod"]) + def test_arith_flex_series_ops(self, simple_frame, op): + # after arithmetic refactor, add truediv here + df = simple_frame + + row = df.xs("a") + col = df["two"] + f = getattr(df, op) + op = getattr(operator, op) + tm.assert_frame_equal(f(row), op(df, row)) + tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T) + + def test_arith_flex_series(self, simple_frame): + df = simple_frame + + row = df.xs("a") + col = df["two"] + # special case for some reason + tm.assert_frame_equal(df.add(row, axis=None), df + row) + + # cases which will be refactored after big arithmetic refactor + tm.assert_frame_equal(df.div(row), df / row) + tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T) + + @pytest.mark.parametrize("dtype", ["int64", "float64"]) + def test_arith_flex_series_broadcasting(self, dtype): + # broadcasting issue in GH 7325 + df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype=dtype) + expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) + result = df.div(df[0], axis="index") + tm.assert_frame_equal(result, expected) + + def test_arith_flex_zero_len_raises(self): + # GH 19522 passing fill_value to frame flex arith methods should + # raise even in the zero-length special cases + ser_len0 = Series([], dtype=object) + df_len0 = DataFrame(columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + + with pytest.raises(NotImplementedError, match="fill_value"): + df.add(ser_len0, fill_value="E") + + with pytest.raises(NotImplementedError, match="fill_value"): + df_len0.sub(df["A"], axis=None, fill_value=3) + + def test_flex_add_scalar_fill_value(self): + # GH#12723 + dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float") + df = DataFrame({"foo": dat}, index=range(6)) + + exp = df.fillna(0).add(2) + res = df.add(2, fill_value=0) + tm.assert_frame_equal(res, exp) + + def test_sub_alignment_with_duplicate_index(self): + # GH#5185 dup aligning operations should work + df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3]) + df2 = DataFrame([1, 2, 3], index=[1, 2, 3]) + expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3]) + result = df1.sub(df2) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("op", ["__add__", "__mul__", "__sub__", "__truediv__"]) + def test_arithmetic_with_duplicate_columns(self, op): + # operations + df = DataFrame({"A": np.arange(10), "B": np.random.default_rng(2).random(10)}) + expected = getattr(df, op)(df) + expected.columns = ["A", "A"] + df.columns = ["A", "A"] + result = getattr(df, op)(df) + tm.assert_frame_equal(result, expected) + str(result) + result.dtypes + + @pytest.mark.parametrize("level", [0, None]) + def test_broadcast_multiindex(self, level): + # GH34388 + df1 = DataFrame({"A": [0, 1, 2], "B": [1, 2, 3]}) + df1.columns = df1.columns.set_names("L1") + + df2 = DataFrame({("A", "C"): [0, 0, 0], ("A", "D"): [0, 0, 0]}) + df2.columns = df2.columns.set_names(["L1", "L2"]) + + result = df1.add(df2, level=level) + expected = DataFrame({("A", "C"): [0, 1, 2], ("A", "D"): [0, 1, 2]}) + expected.columns = expected.columns.set_names(["L1", "L2"]) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations(self): + # GH 43321 + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: [1.4, 2.4, 3.4], 2020: [3.4, 4.4, 5.4]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_series_index_to_frame_index(self): + # GH 43321 + df = DataFrame( + {2010: [1], 2020: [3]}, + index=MultiIndex.from_product([["a"], ["b"]], names=["scen", "mod"]), + ) + + series = Series( + [10.0, 20.0, 30.0], + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + expected = DataFrame( + {2010: [11.0, 21, 31.0], 2020: [13.0, 23.0, 33.0]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_no_align(self): + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["c"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: np.nan, 2020: np.nan}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "b", 2), + ("a", "c", np.nan), + ], + names=["scen", "mod", "id"], + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_part_align(self): + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "c", 2), + ], + names=["scen", "mod", "id"], + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: [1.4, 2.4, np.nan], 2020: [3.4, 4.4, np.nan]}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "c", 2), + ], + names=["scen", "mod", "id"], + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + +class TestFrameArithmetic: + def test_td64_op_nat_casting(self): + # Make sure we don't accidentally treat timedelta64(NaT) as datetime64 + # when calling dispatch_to_series in DataFrame arithmetic + ser = Series(["NaT", "NaT"], dtype="timedelta64[ns]") + df = DataFrame([[1, 2], [3, 4]]) + + result = df * ser + expected = DataFrame({0: ser, 1: ser}) + tm.assert_frame_equal(result, expected) + + def test_df_add_2d_array_rowlike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + expected = DataFrame( + [[2, 4], [4, 6], [6, 8]], + columns=df.columns, + index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype, + ) + result = df + rowlike + tm.assert_frame_equal(result, expected) + result = rowlike + df + tm.assert_frame_equal(result, expected) + + def test_df_add_2d_array_collike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + expected = DataFrame( + [[1, 2], [5, 6], [9, 10]], + columns=df.columns, + index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype, + ) + result = df + collike + tm.assert_frame_equal(result, expected) + result = collike + df + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_rowlike_broadcasts( + self, request, all_arithmetic_operators, using_array_manager + ): + # GH#23000 + opname = all_arithmetic_operators + + if using_array_manager and opname in ("__rmod__", "__rfloordiv__"): + # TODO(ArrayManager) decide on dtypes + td.mark_array_manager_not_yet_implemented(request) + + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + exvals = [ + getattr(df.loc["A"], opname)(rowlike.squeeze()), + getattr(df.loc["B"], opname)(rowlike.squeeze()), + getattr(df.loc["C"], opname)(rowlike.squeeze()), + ] + + expected = DataFrame(exvals, columns=df.columns, index=df.index) + + result = getattr(df, opname)(rowlike) + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_collike_broadcasts( + self, request, all_arithmetic_operators, using_array_manager + ): + # GH#23000 + opname = all_arithmetic_operators + + if using_array_manager and opname in ("__rmod__", "__rfloordiv__"): + # TODO(ArrayManager) decide on dtypes + td.mark_array_manager_not_yet_implemented(request) + + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + exvals = { + True: getattr(df[True], opname)(collike.squeeze()), + False: getattr(df[False], opname)(collike.squeeze()), + } + + dtype = None + if opname in ["__rmod__", "__rfloordiv__"]: + # Series ops may return mixed int/float dtypes in cases where + # DataFrame op will return all-float. So we upcast `expected` + dtype = np.common_type(*(x.values for x in exvals.values())) + + expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype) + + result = getattr(df, opname)(collike) + tm.assert_frame_equal(result, expected) + + def test_df_bool_mul_int(self): + # GH 22047, GH 22163 multiplication by 1 should result in int dtype, + # not object dtype + df = DataFrame([[False, True], [False, False]]) + result = df * 1 + + # On appveyor this comes back as np.int32 instead of np.int64, + # so we check dtype.kind instead of just dtype + kinds = result.dtypes.apply(lambda x: x.kind) + assert (kinds == "i").all() + + result = 1 * df + kinds = result.dtypes.apply(lambda x: x.kind) + assert (kinds == "i").all() + + def test_arith_mixed(self): + left = DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]}) + + result = left + left + expected = DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("col", ["A", "B"]) + def test_arith_getitem_commute(self, all_arithmetic_functions, col): + df = DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]}) + result = all_arithmetic_functions(df, 1)[col] + expected = all_arithmetic_functions(df[col], 1) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])] + ) + def test_arith_alignment_non_pandas_object(self, values): + # GH#17901 + df = DataFrame({"A": [1, 1], "B": [1, 1]}) + expected = DataFrame({"A": [2, 2], "B": [3, 3]}) + result = df + values + tm.assert_frame_equal(result, expected) + + def test_arith_non_pandas_object(self): + df = DataFrame( + np.arange(1, 10, dtype="f8").reshape(3, 3), + columns=["one", "two", "three"], + index=["a", "b", "c"], + ) + + val1 = df.xs("a").values + added = DataFrame(df.values + val1, index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val1, added) + + added = DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val1, axis=0), added) + + val2 = list(df["two"]) + + added = DataFrame(df.values + val2, index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val2, added) + + added = DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val2, axis="index"), added) + + val3 = np.random.default_rng(2).random(df.shape) + added = DataFrame(df.values + val3, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val3), added) + + def test_operations_with_interval_categories_index(self, all_arithmetic_operators): + # GH#27415 + op = all_arithmetic_operators + ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0)) + data = [1, 2] + df = DataFrame([data], columns=ind) + num = 10 + result = getattr(df, op)(num) + expected = DataFrame([[getattr(n, op)(num) for n in data]], columns=ind) + tm.assert_frame_equal(result, expected) + + def test_frame_with_frame_reindex(self): + # GH#31623 + df = DataFrame( + { + "foo": [pd.Timestamp("2019"), pd.Timestamp("2020")], + "bar": [pd.Timestamp("2018"), pd.Timestamp("2021")], + }, + columns=["foo", "bar"], + ) + df2 = df[["foo"]] + + result = df - df2 + + expected = DataFrame( + {"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]}, + columns=["bar", "foo"], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "value, dtype", + [ + (1, "i8"), + (1.0, "f8"), + (2**63, "f8"), + (1j, "complex128"), + (2**63, "complex128"), + (True, "bool"), + (np.timedelta64(20, "ns"), " b + tm.assert_frame_equal(result, expected) + + result = df.values > b + tm.assert_numpy_array_equal(result, expected.values) + + msg1d = "Unable to coerce to Series, length must be 2: given 3" + msg2d = "Unable to coerce to DataFrame, shape must be" + msg2db = "operands could not be broadcast together with shapes" + with pytest.raises(ValueError, match=msg1d): + # wrong shape + df > lst + + with pytest.raises(ValueError, match=msg1d): + # wrong shape + df > tup + + # broadcasts like ndarray (GH#23000) + result = df > b_r + tm.assert_frame_equal(result, expected) + + result = df.values > b_r + tm.assert_numpy_array_equal(result, expected.values) + + with pytest.raises(ValueError, match=msg2d): + df > b_c + + with pytest.raises(ValueError, match=msg2db): + df.values > b_c + + # == + expected = DataFrame([[False, False], [True, False], [False, False]]) + result = df == b + tm.assert_frame_equal(result, expected) + + with pytest.raises(ValueError, match=msg1d): + df == lst + + with pytest.raises(ValueError, match=msg1d): + df == tup + + # broadcasts like ndarray (GH#23000) + result = df == b_r + tm.assert_frame_equal(result, expected) + + result = df.values == b_r + tm.assert_numpy_array_equal(result, expected.values) + + with pytest.raises(ValueError, match=msg2d): + df == b_c + + assert df.values.shape != b_c.shape + + # with alignment + df = DataFrame( + np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc") + ) + expected.index = df.index + expected.columns = df.columns + + with pytest.raises(ValueError, match=msg1d): + df == lst + + with pytest.raises(ValueError, match=msg1d): + df == tup + + def test_inplace_ops_alignment(self): + # inplace ops / ops alignment + # GH 8511 + + columns = list("abcdefg") + X_orig = DataFrame( + np.arange(10 * len(columns)).reshape(-1, len(columns)), + columns=columns, + index=range(10), + ) + Z = 100 * X_orig.iloc[:, 1:-1].copy() + block1 = list("bedcf") + subs = list("bcdef") + + # add + X = X_orig.copy() + result1 = (X[block1] + Z).reindex(columns=subs) + + X[block1] += Z + result2 = X.reindex(columns=subs) + + X = X_orig.copy() + result3 = (X[block1] + Z[block1]).reindex(columns=subs) + + X[block1] += Z[block1] + result4 = X.reindex(columns=subs) + + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + tm.assert_frame_equal(result1, result4) + + # sub + X = X_orig.copy() + result1 = (X[block1] - Z).reindex(columns=subs) + + X[block1] -= Z + result2 = X.reindex(columns=subs) + + X = X_orig.copy() + result3 = (X[block1] - Z[block1]).reindex(columns=subs) + + X[block1] -= Z[block1] + result4 = X.reindex(columns=subs) + + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + tm.assert_frame_equal(result1, result4) + + def test_inplace_ops_identity(self): + # GH 5104 + # make sure that we are actually changing the object + s_orig = Series([1, 2, 3]) + df_orig = DataFrame( + np.random.default_rng(2).integers(0, 5, size=10).reshape(-1, 5) + ) + + # no dtype change + s = s_orig.copy() + s2 = s + s += 1 + tm.assert_series_equal(s, s2) + tm.assert_series_equal(s_orig + 1, s) + assert s is s2 + assert s._mgr is s2._mgr + + df = df_orig.copy() + df2 = df + df += 1 + tm.assert_frame_equal(df, df2) + tm.assert_frame_equal(df_orig + 1, df) + assert df is df2 + assert df._mgr is df2._mgr + + # dtype change + s = s_orig.copy() + s2 = s + s += 1.5 + tm.assert_series_equal(s, s2) + tm.assert_series_equal(s_orig + 1.5, s) + + df = df_orig.copy() + df2 = df + df += 1.5 + tm.assert_frame_equal(df, df2) + tm.assert_frame_equal(df_orig + 1.5, df) + assert df is df2 + assert df._mgr is df2._mgr + + # mixed dtype + arr = np.random.default_rng(2).integers(0, 10, size=5) + df_orig = DataFrame({"A": arr.copy(), "B": "foo"}) + df = df_orig.copy() + df2 = df + df["A"] += 1 + expected = DataFrame({"A": arr.copy() + 1, "B": "foo"}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, expected) + assert df._mgr is df2._mgr + + df = df_orig.copy() + df2 = df + df["A"] += 1.5 + expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, expected) + assert df._mgr is df2._mgr + + @pytest.mark.parametrize( + "op", + [ + "add", + "and", + pytest.param( + "div", + marks=pytest.mark.xfail( + raises=AttributeError, reason="__idiv__ not implemented" + ), + ), + "floordiv", + "mod", + "mul", + "or", + "pow", + "sub", + "truediv", + "xor", + ], + ) + def test_inplace_ops_identity2(self, op): + df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]}) + + operand = 2 + if op in ("and", "or", "xor"): + # cannot use floats for boolean ops + df["a"] = [True, False, True] + + df_copy = df.copy() + iop = f"__i{op}__" + op = f"__{op}__" + + # no id change and value is correct + getattr(df, iop)(operand) + expected = getattr(df_copy, op)(operand) + tm.assert_frame_equal(df, expected) + expected = id(df) + assert id(df) == expected + + @pytest.mark.parametrize( + "val", + [ + [1, 2, 3], + (1, 2, 3), + np.array([1, 2, 3], dtype=np.int64), + range(1, 4), + ], + ) + def test_alignment_non_pandas(self, val): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + + expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index) + tm.assert_frame_equal(align(df, val, axis=0)[1], expected) + + expected = DataFrame( + {"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index + ) + tm.assert_frame_equal(align(df, val, axis=1)[1], expected) + + @pytest.mark.parametrize("val", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]) + def test_alignment_non_pandas_length_mismatch(self, val): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + # length mismatch + msg = "Unable to coerce to Series, length must be 3: given 2" + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + def test_alignment_non_pandas_index_columns(self): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + tm.assert_frame_equal( + align(df, val, axis=0)[1], + DataFrame(val, index=df.index, columns=df.columns), + ) + tm.assert_frame_equal( + align(df, val, axis=1)[1], + DataFrame(val, index=df.index, columns=df.columns), + ) + + # shape mismatch + msg = "Unable to coerce to DataFrame, shape must be" + val = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + val = np.zeros((3, 3, 3)) + msg = re.escape( + "Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)" + ) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + def test_no_warning(self, all_arithmetic_operators): + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + b = df["B"] + with tm.assert_produces_warning(None): + getattr(df, all_arithmetic_operators)(b) + + def test_dunder_methods_binary(self, all_arithmetic_operators): + # GH#??? frame.__foo__ should only accept one argument + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + b = df["B"] + with pytest.raises(TypeError, match="takes 2 positional arguments"): + getattr(df, all_arithmetic_operators)(b, 0) + + def test_align_int_fill_bug(self): + # GH#910 + X = np.arange(10 * 10, dtype="float64").reshape(10, 10) + Y = np.ones((10, 1), dtype=int) + + df1 = DataFrame(X) + df1["0.X"] = Y.squeeze() + + df2 = df1.astype(float) + + result = df1 - df1.mean() + expected = df2 - df2.mean() + tm.assert_frame_equal(result, expected) + + +def test_pow_with_realignment(): + # GH#32685 pow has special semantics for operating with null values + left = DataFrame({"A": [0, 1, 2]}) + right = DataFrame(index=[0, 1, 2]) + + result = left**right + expected = DataFrame({"A": [np.nan, 1.0, np.nan]}) + tm.assert_frame_equal(result, expected) + + +# TODO: move to tests.arithmetic and parametrize +def test_pow_nan_with_zero(): + left = DataFrame({"A": [np.nan, np.nan, np.nan]}) + right = DataFrame({"A": [0, 0, 0]}) + + expected = DataFrame({"A": [1.0, 1.0, 1.0]}) + + result = left**right + tm.assert_frame_equal(result, expected) + + result = left["A"] ** right["A"] + tm.assert_series_equal(result, expected["A"]) + + +def test_dataframe_series_extension_dtypes(): + # https://github.com/pandas-dev/pandas/issues/34311 + df = DataFrame( + np.random.default_rng(2).integers(0, 100, (10, 3)), columns=["a", "b", "c"] + ) + ser = Series([1, 2, 3], index=["a", "b", "c"]) + + expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3) + expected = DataFrame(expected, columns=df.columns, dtype="Int64") + + df_ea = df.astype("Int64") + result = df_ea + ser + tm.assert_frame_equal(result, expected) + result = df_ea + ser.astype("Int64") + tm.assert_frame_equal(result, expected) + + +def test_dataframe_blockwise_slicelike(): + # GH#34367 + arr = np.random.default_rng(2).integers(0, 1000, (100, 10)) + df1 = DataFrame(arr) + # Explicit cast to float to avoid implicit cast when setting nan + df2 = df1.copy().astype({1: "float", 3: "float", 7: "float"}) + df2.iloc[0, [1, 3, 7]] = np.nan + + # Explicit cast to float to avoid implicit cast when setting nan + df3 = df1.copy().astype({5: "float"}) + df3.iloc[0, [5]] = np.nan + + # Explicit cast to float to avoid implicit cast when setting nan + df4 = df1.copy().astype({2: "float", 3: "float", 4: "float"}) + df4.iloc[0, np.arange(2, 5)] = np.nan + # Explicit cast to float to avoid implicit cast when setting nan + df5 = df1.copy().astype({4: "float", 5: "float", 6: "float"}) + df5.iloc[0, np.arange(4, 7)] = np.nan + + for left, right in [(df1, df2), (df2, df3), (df4, df5)]: + res = left + right + + expected = DataFrame({i: left[i] + right[i] for i in left.columns}) + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize( + "df, col_dtype", + [ + (DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"), + (DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")), "object"), + ], +) +def test_dataframe_operation_with_non_numeric_types(df, col_dtype): + # GH #22663 + expected = DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab")) + expected = expected.astype({"b": col_dtype}) + result = df + Series([-1.0], index=list("a")) + tm.assert_frame_equal(result, expected) + + +def test_arith_reindex_with_duplicates(): + # https://github.com/pandas-dev/pandas/issues/35194 + df1 = DataFrame(data=[[0]], columns=["second"]) + df2 = DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"]) + result = df1 + df2 + expected = DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]]) +def test_arith_list_of_arraylike_raise(to_add): + # GH 36702. Raise when trying to add list of array-like to DataFrame + df = DataFrame({"x": [1, 2], "y": [1, 2]}) + + msg = f"Unable to coerce list of {type(to_add[0])} to Series/DataFrame" + with pytest.raises(ValueError, match=msg): + df + to_add + with pytest.raises(ValueError, match=msg): + to_add + df + + +def test_inplace_arithmetic_series_update(using_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/36373 + df = DataFrame({"A": [1, 2, 3]}) + df_orig = df.copy() + series = df["A"] + vals = series._values + + series += 1 + if using_copy_on_write: + assert series._values is not vals + tm.assert_frame_equal(df, df_orig) + else: + assert series._values is vals + + expected = DataFrame({"A": [2, 3, 4]}) + tm.assert_frame_equal(df, expected) + + +def test_arithmetic_multiindex_align(): + """ + Regression test for: https://github.com/pandas-dev/pandas/issues/33765 + """ + df1 = DataFrame( + [[1]], + index=["a"], + columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]), + ) + df2 = DataFrame([[1]], index=["a"], columns=Index([0], name="a")) + expected = DataFrame( + [[0]], + index=["a"], + columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]), + ) + result = df1 - df2 + tm.assert_frame_equal(result, expected) + + +def test_bool_frame_mult_float(): + # GH 18549 + df = DataFrame(True, list("ab"), list("cd")) + result = df * 1.0 + expected = DataFrame(np.ones((2, 2)), list("ab"), list("cd")) + tm.assert_frame_equal(result, expected) + + +def test_frame_sub_nullable_int(any_int_ea_dtype): + # GH 32822 + series1 = Series([1, 2, None], dtype=any_int_ea_dtype) + series2 = Series([1, 2, 3], dtype=any_int_ea_dtype) + expected = DataFrame([0, 0, None], dtype=any_int_ea_dtype) + result = series1.to_frame() - series2.to_frame() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) +def test_frame_op_subclass_nonclass_constructor(): + # GH#43201 subclass._constructor is a function, not the subclass itself + + class SubclassedSeries(Series): + @property + def _constructor(self): + return SubclassedSeries + + @property + def _constructor_expanddim(self): + return SubclassedDataFrame + + class SubclassedDataFrame(DataFrame): + _metadata = ["my_extra_data"] + + def __init__(self, my_extra_data, *args, **kwargs) -> None: + self.my_extra_data = my_extra_data + super().__init__(*args, **kwargs) + + @property + def _constructor(self): + return functools.partial(type(self), self.my_extra_data) + + @property + def _constructor_sliced(self): + return SubclassedSeries + + sdf = SubclassedDataFrame("some_data", {"A": [1, 2, 3], "B": [4, 5, 6]}) + result = sdf * 2 + expected = SubclassedDataFrame("some_data", {"A": [2, 4, 6], "B": [8, 10, 12]}) + tm.assert_frame_equal(result, expected) + + result = sdf + sdf + tm.assert_frame_equal(result, expected) + + +def test_enum_column_equality(): + Cols = Enum("Cols", "col1 col2") + + q1 = DataFrame({Cols.col1: [1, 2, 3]}) + q2 = DataFrame({Cols.col1: [1, 2, 3]}) + + result = q1[Cols.col1] == q2[Cols.col1] + expected = Series([True, True, True], name=Cols.col1) + + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_block_internals.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_block_internals.py new file mode 100644 index 00000000..9e8d92e8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_block_internals.py @@ -0,0 +1,449 @@ +from datetime import ( + datetime, + timedelta, +) +import itertools + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Series, + Timestamp, + date_range, + option_context, +) +import pandas._testing as tm +from pandas.core.internals.blocks import NumpyBlock + +# Segregated collection of methods that require the BlockManager internal data +# structure + + +# TODO(ArrayManager) check which of those tests need to be rewritten to test the +# equivalent for ArrayManager +pytestmark = td.skip_array_manager_invalid_test + + +class TestDataFrameBlockInternals: + def test_setitem_invalidates_datetime_index_freq(self): + # GH#24096 altering a datetime64tz column inplace invalidates the + # `freq` attribute on the underlying DatetimeIndex + + dti = date_range("20130101", periods=3, tz="US/Eastern") + ts = dti[1] + + df = DataFrame({"B": dti}) + assert df["B"]._values.freq is None + + df.iloc[1, 0] = pd.NaT + assert df["B"]._values.freq is None + + # check that the DatetimeIndex was not altered in place + assert dti.freq == "D" + assert dti[1] == ts + + def test_cast_internals(self, float_frame): + casted = DataFrame(float_frame._mgr, dtype=int) + expected = DataFrame(float_frame._series, dtype=int) + tm.assert_frame_equal(casted, expected) + + casted = DataFrame(float_frame._mgr, dtype=np.int32) + expected = DataFrame(float_frame._series, dtype=np.int32) + tm.assert_frame_equal(casted, expected) + + def test_consolidate(self, float_frame): + float_frame["E"] = 7.0 + consolidated = float_frame._consolidate() + assert len(consolidated._mgr.blocks) == 1 + + # Ensure copy, do I want this? + recons = consolidated._consolidate() + assert recons is not consolidated + tm.assert_frame_equal(recons, consolidated) + + float_frame["F"] = 8.0 + assert len(float_frame._mgr.blocks) == 3 + + return_value = float_frame._consolidate_inplace() + assert return_value is None + assert len(float_frame._mgr.blocks) == 1 + + def test_consolidate_inplace(self, float_frame): + # triggers in-place consolidation + for letter in range(ord("A"), ord("Z")): + float_frame[chr(letter)] = chr(letter) + + def test_modify_values(self, float_frame, using_copy_on_write): + if using_copy_on_write: + with pytest.raises(ValueError, match="read-only"): + float_frame.values[5] = 5 + assert (float_frame.values[5] != 5).all() + return + + float_frame.values[5] = 5 + assert (float_frame.values[5] == 5).all() + + # unconsolidated + float_frame["E"] = 7.0 + col = float_frame["E"] + float_frame.values[6] = 6 + # as of 2.0 .values does not consolidate, so subsequent calls to .values + # does not share data + assert not (float_frame.values[6] == 6).all() + + assert (col == 7).all() + + def test_boolean_set_uncons(self, float_frame): + float_frame["E"] = 7.0 + + expected = float_frame.values.copy() + expected[expected > 1] = 2 + + float_frame[float_frame > 1] = 2 + tm.assert_almost_equal(expected, float_frame.values) + + def test_constructor_with_convert(self): + # this is actually mostly a test of lib.maybe_convert_objects + # #2845 + df = DataFrame({"A": [2**63 - 1]}) + result = df["A"] + expected = Series(np.asarray([2**63 - 1], np.int64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [2**63]}) + result = df["A"] + expected = Series(np.asarray([2**63], np.uint64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [datetime(2005, 1, 1), True]}) + result = df["A"] + expected = Series( + np.asarray([datetime(2005, 1, 1), True], np.object_), name="A" + ) + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [None, 1]}) + result = df["A"] + expected = Series(np.asarray([np.nan, 1], np.float64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0, 2]}) + result = df["A"] + expected = Series(np.asarray([1.0, 2], np.float64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, 3]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex128), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, 3.0]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex128), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, True]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0, None]}) + result = df["A"] + expected = Series(np.asarray([1.0, np.nan], np.float64), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1.0 + 2.0j, None]}) + result = df["A"] + expected = Series(np.asarray([1.0 + 2.0j, np.nan], np.complex128), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [2.0, 1, True, None]}) + result = df["A"] + expected = Series(np.asarray([2.0, 1, True, None], np.object_), name="A") + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [2.0, 1, datetime(2006, 1, 1), None]}) + result = df["A"] + expected = Series( + np.asarray([2.0, 1, datetime(2006, 1, 1), None], np.object_), name="A" + ) + tm.assert_series_equal(result, expected) + + def test_construction_with_mixed(self, float_string_frame): + # test construction edge cases with mixed types + + # f7u12, this does not work without extensive workaround + data = [ + [datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], + [datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)], + ] + df = DataFrame(data) + + # check dtypes + result = df.dtypes + expected = Series({"datetime64[us]": 3}) + + # mixed-type frames + float_string_frame["datetime"] = datetime.now() + float_string_frame["timedelta"] = timedelta(days=1, seconds=1) + assert float_string_frame["datetime"].dtype == "M8[us]" + assert float_string_frame["timedelta"].dtype == "m8[us]" + result = float_string_frame.dtypes + expected = Series( + [np.dtype("float64")] * 4 + + [ + np.dtype("object"), + np.dtype("datetime64[us]"), + np.dtype("timedelta64[us]"), + ], + index=list("ABCD") + ["foo", "datetime", "timedelta"], + ) + tm.assert_series_equal(result, expected) + + def test_construction_with_conversions(self): + # convert from a numpy array of non-ns timedelta64; as of 2.0 this does + # *not* convert + arr = np.array([1, 2, 3], dtype="timedelta64[s]") + df = DataFrame(index=range(3)) + df["A"] = arr + expected = DataFrame( + {"A": pd.timedelta_range("00:00:01", periods=3, freq="s")}, index=range(3) + ) + tm.assert_numpy_array_equal(df["A"].to_numpy(), arr) + + expected = DataFrame( + { + "dt1": Timestamp("20130101"), + "dt2": date_range("20130101", periods=3).astype("M8[s]"), + # 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'), + # FIXME: don't leave commented-out + }, + index=range(3), + ) + assert expected.dtypes["dt1"] == "M8[s]" + assert expected.dtypes["dt2"] == "M8[s]" + + df = DataFrame(index=range(3)) + df["dt1"] = np.datetime64("2013-01-01") + df["dt2"] = np.array( + ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]" + ) + + # df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 + # 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]') + # FIXME: don't leave commented-out + + tm.assert_frame_equal(df, expected) + + def test_constructor_compound_dtypes(self): + # GH 5191 + # compound dtypes should raise not-implementederror + + def f(dtype): + data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)) + return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype) + + msg = "compound dtypes are not implemented in the DataFrame constructor" + with pytest.raises(NotImplementedError, match=msg): + f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) + + # pre-2.0 these used to work (though results may be unexpected) + with pytest.raises(TypeError, match="argument must be"): + f("int64") + with pytest.raises(TypeError, match="argument must be"): + f("float64") + + # 10822 + msg = "^Unknown datetime string format, unable to parse: aa, at position 0$" + with pytest.raises(ValueError, match=msg): + f("M8[ns]") + + def test_pickle(self, float_string_frame, timezone_frame): + empty_frame = DataFrame() + + unpickled = tm.round_trip_pickle(float_string_frame) + tm.assert_frame_equal(float_string_frame, unpickled) + + # buglet + float_string_frame._mgr.ndim + + # empty + unpickled = tm.round_trip_pickle(empty_frame) + repr(unpickled) + + # tz frame + unpickled = tm.round_trip_pickle(timezone_frame) + tm.assert_frame_equal(timezone_frame, unpickled) + + def test_consolidate_datetime64(self): + # numpy vstack bug + + df = DataFrame( + { + "starting": pd.to_datetime( + [ + "2012-06-21 00:00", + "2012-06-23 07:00", + "2012-06-23 16:30", + "2012-06-25 08:00", + "2012-06-26 12:00", + ] + ), + "ending": pd.to_datetime( + [ + "2012-06-23 07:00", + "2012-06-23 16:30", + "2012-06-25 08:00", + "2012-06-26 12:00", + "2012-06-27 08:00", + ] + ), + "measure": [77, 65, 77, 0, 77], + } + ) + + ser_starting = df.starting + ser_starting.index = ser_starting.values + ser_starting = ser_starting.tz_localize("US/Eastern") + ser_starting = ser_starting.tz_convert("UTC") + ser_starting.index.name = "starting" + + ser_ending = df.ending + ser_ending.index = ser_ending.values + ser_ending = ser_ending.tz_localize("US/Eastern") + ser_ending = ser_ending.tz_convert("UTC") + ser_ending.index.name = "ending" + + df.starting = ser_starting.index + df.ending = ser_ending.index + + tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index) + tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index) + + def test_is_mixed_type(self, float_frame, float_string_frame): + assert not float_frame._is_mixed_type + assert float_string_frame._is_mixed_type + + def test_stale_cached_series_bug_473(self, using_copy_on_write): + # this is chained, but ok + with option_context("chained_assignment", None): + Y = DataFrame( + np.random.default_rng(2).random((4, 4)), + index=("a", "b", "c", "d"), + columns=("e", "f", "g", "h"), + ) + repr(Y) + Y["e"] = Y["e"].astype("object") + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + Y["g"]["c"] = np.nan + else: + Y["g"]["c"] = np.nan + repr(Y) + Y.sum() + Y["g"].sum() + if using_copy_on_write: + assert not pd.isna(Y["g"]["c"]) + else: + assert pd.isna(Y["g"]["c"]) + + def test_strange_column_corruption_issue(self, using_copy_on_write): + # TODO(wesm): Unclear how exactly this is related to internal matters + df = DataFrame(index=[0, 1]) + df[0] = np.nan + wasCol = {} + + with tm.assert_produces_warning(PerformanceWarning): + for i, dt in enumerate(df.index): + for col in range(100, 200): + if col not in wasCol: + wasCol[col] = 1 + df[col] = np.nan + if using_copy_on_write: + df.loc[dt, col] = i + else: + df[col][dt] = i + + myid = 100 + + first = len(df.loc[pd.isna(df[myid]), [myid]]) + second = len(df.loc[pd.isna(df[myid]), [myid]]) + assert first == second == 0 + + def test_constructor_no_pandas_array(self): + # Ensure that NumpyExtensionArray isn't allowed inside Series + # See https://github.com/pandas-dev/pandas/issues/23995 for more. + arr = Series([1, 2, 3]).array + result = DataFrame({"A": arr}) + expected = DataFrame({"A": [1, 2, 3]}) + tm.assert_frame_equal(result, expected) + assert isinstance(result._mgr.blocks[0], NumpyBlock) + assert result._mgr.blocks[0].is_numeric + + def test_add_column_with_pandas_array(self): + # GH 26390 + df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) + df["c"] = pd.arrays.NumpyExtensionArray(np.array([1, 2, None, 3], dtype=object)) + df2 = DataFrame( + { + "a": [1, 2, 3, 4], + "b": ["a", "b", "c", "d"], + "c": pd.arrays.NumpyExtensionArray( + np.array([1, 2, None, 3], dtype=object) + ), + } + ) + assert type(df["c"]._mgr.blocks[0]) == NumpyBlock + assert df["c"]._mgr.blocks[0].is_object + assert type(df2["c"]._mgr.blocks[0]) == NumpyBlock + assert df2["c"]._mgr.blocks[0].is_object + tm.assert_frame_equal(df, df2) + + +def test_update_inplace_sets_valid_block_values(using_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/33457 + df = DataFrame({"a": Series([1, 2, None], dtype="category")}) + + # inplace update of a single column + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].fillna(1, inplace=True) + else: + df["a"].fillna(1, inplace=True) + + # check we haven't put a Series into any block.values + assert isinstance(df._mgr.blocks[0].values, Categorical) + + if not using_copy_on_write: + # smoketest for OP bug from GH#35731 + assert df.isnull().sum().sum() == 0 + + +def test_nonconsolidated_item_cache_take(): + # https://github.com/pandas-dev/pandas/issues/35521 + + # create non-consolidated dataframe with object dtype columns + df = DataFrame() + df["col1"] = Series(["a"], dtype=object) + df["col2"] = Series([0], dtype=object) + + # access column (item cache) + df["col1"] == "A" + # take operation + # (regression was that this consolidated but didn't reset item cache, + # resulting in an invalid cache and the .at operation not working properly) + df[df["col2"] == 0] + + # now setting value should update actual dataframe + df.at[0, "col1"] = "A" + + expected = DataFrame({"col1": ["A"], "col2": [0]}, dtype=object) + tm.assert_frame_equal(df, expected) + assert df.at[0, "col1"] == "A" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_constructors.py new file mode 100644 index 00000000..a291b906 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_constructors.py @@ -0,0 +1,3290 @@ +import array +from collections import ( + OrderedDict, + abc, + defaultdict, + namedtuple, +) +from collections.abc import Iterator +from dataclasses import make_dataclass +from datetime import ( + date, + datetime, + timedelta, +) +import functools +import re + +import numpy as np +from numpy import ma +from numpy.ma import mrecords +import pytest +import pytz + +from pandas._libs import lib +from pandas.errors import IntCastingNaNError +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + IntervalDtype, + NumpyEADtype, + PeriodDtype, +) + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + Interval, + MultiIndex, + Period, + RangeIndex, + Series, + Timedelta, + Timestamp, + cut, + date_range, + isna, +) +import pandas._testing as tm +from pandas.arrays import ( + DatetimeArray, + IntervalArray, + PeriodArray, + SparseArray, + TimedeltaArray, +) + +MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"] +MIXED_INT_DTYPES = [ + "uint8", + "uint16", + "uint32", + "uint64", + "int8", + "int16", + "int32", + "int64", +] + + +class TestDataFrameConstructors: + def test_constructor_from_ndarray_with_str_dtype(self): + # If we don't ravel/reshape around ensure_str_array, we end up + # with an array of strings each of which is e.g. "[0 1 2]" + arr = np.arange(12).reshape(4, 3) + df = DataFrame(arr, dtype=str) + expected = DataFrame(arr.astype(str)) + tm.assert_frame_equal(df, expected) + + def test_constructor_from_2d_datetimearray(self, using_array_manager): + dti = date_range("2016-01-01", periods=6, tz="US/Pacific") + dta = dti._data.reshape(3, 2) + + df = DataFrame(dta) + expected = DataFrame({0: dta[:, 0], 1: dta[:, 1]}) + tm.assert_frame_equal(df, expected) + if not using_array_manager: + # GH#44724 big performance hit if we de-consolidate + assert len(df._mgr.blocks) == 1 + + def test_constructor_dict_with_tzaware_scalar(self): + # GH#42505 + dt = Timestamp("2019-11-03 01:00:00-0700").tz_convert("America/Los_Angeles") + dt = dt.as_unit("ns") + + df = DataFrame({"dt": dt}, index=[0]) + expected = DataFrame({"dt": [dt]}) + tm.assert_frame_equal(df, expected) + + # Non-homogeneous + df = DataFrame({"dt": dt, "value": [1]}) + expected = DataFrame({"dt": [dt], "value": [1]}) + tm.assert_frame_equal(df, expected) + + def test_construct_ndarray_with_nas_and_int_dtype(self): + # GH#26919 match Series by not casting np.nan to meaningless int + arr = np.array([[1, np.nan], [2, 3]]) + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(arr, dtype="i8") + + # check this matches Series behavior + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr[0], dtype="i8", name=0) + + def test_construct_from_list_of_datetimes(self): + df = DataFrame([datetime.now(), datetime.now()]) + assert df[0].dtype == np.dtype("M8[ns]") + + def test_constructor_from_tzaware_datetimeindex(self): + # don't cast a DatetimeIndex WITH a tz, leave as object + # GH#6032 + naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B") + idx = naive.tz_localize("US/Pacific") + + expected = Series(np.array(idx.tolist(), dtype="object"), name="B") + assert expected.dtype == idx.dtype + + # convert index to series + result = Series(idx) + tm.assert_series_equal(result, expected) + + def test_columns_with_leading_underscore_work_with_to_dict(self): + col_underscore = "_b" + df = DataFrame({"a": [1, 2], col_underscore: [3, 4]}) + d = df.to_dict(orient="records") + + ref_d = [{"a": 1, col_underscore: 3}, {"a": 2, col_underscore: 4}] + + assert ref_d == d + + def test_columns_with_leading_number_and_underscore_work_with_to_dict(self): + col_with_num = "1_b" + df = DataFrame({"a": [1, 2], col_with_num: [3, 4]}) + d = df.to_dict(orient="records") + + ref_d = [{"a": 1, col_with_num: 3}, {"a": 2, col_with_num: 4}] + + assert ref_d == d + + def test_array_of_dt64_nat_with_td64dtype_raises(self, frame_or_series): + # GH#39462 + nat = np.datetime64("NaT", "ns") + arr = np.array([nat], dtype=object) + if frame_or_series is DataFrame: + arr = arr.reshape(1, 1) + + msg = "Invalid type for timedelta scalar: " + with pytest.raises(TypeError, match=msg): + frame_or_series(arr, dtype="m8[ns]") + + @pytest.mark.parametrize("kind", ["m", "M"]) + def test_datetimelike_values_with_object_dtype(self, kind, frame_or_series): + # with dtype=object, we should cast dt64 values to Timestamps, not pydatetimes + if kind == "M": + dtype = "M8[ns]" + scalar_type = Timestamp + else: + dtype = "m8[ns]" + scalar_type = Timedelta + + arr = np.arange(6, dtype="i8").view(dtype).reshape(3, 2) + if frame_or_series is Series: + arr = arr[:, 0] + + obj = frame_or_series(arr, dtype=object) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + # go through a different path in internals.construction + obj = frame_or_series(frame_or_series(arr), dtype=object) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + obj = frame_or_series(frame_or_series(arr), dtype=NumpyEADtype(object)) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + if frame_or_series is DataFrame: + # other paths through internals.construction + sers = [Series(x) for x in arr] + obj = frame_or_series(sers, dtype=object) + assert obj._mgr.arrays[0].dtype == object + assert isinstance(obj._mgr.arrays[0].ravel()[0], scalar_type) + + def test_series_with_name_not_matching_column(self): + # GH#9232 + x = Series(range(5), name=1) + y = Series(range(5), name=0) + + result = DataFrame(x, columns=[0]) + expected = DataFrame([], columns=[0]) + tm.assert_frame_equal(result, expected) + + result = DataFrame(y, columns=[1]) + expected = DataFrame([], columns=[1]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "constructor", + [ + lambda: DataFrame(), + lambda: DataFrame(None), + lambda: DataFrame(()), + lambda: DataFrame([]), + lambda: DataFrame(_ for _ in []), + lambda: DataFrame(range(0)), + lambda: DataFrame(data=None), + lambda: DataFrame(data=()), + lambda: DataFrame(data=[]), + lambda: DataFrame(data=(_ for _ in [])), + lambda: DataFrame(data=range(0)), + ], + ) + def test_empty_constructor(self, constructor): + expected = DataFrame() + result = constructor() + assert len(result.index) == 0 + assert len(result.columns) == 0 + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "constructor", + [ + lambda: DataFrame({}), + lambda: DataFrame(data={}), + ], + ) + def test_empty_constructor_object_index(self, constructor): + expected = DataFrame(index=RangeIndex(0), columns=RangeIndex(0)) + result = constructor() + assert len(result.index) == 0 + assert len(result.columns) == 0 + tm.assert_frame_equal(result, expected, check_index_type=True) + + @pytest.mark.parametrize( + "emptylike,expected_index,expected_columns", + [ + ([[]], RangeIndex(1), RangeIndex(0)), + ([[], []], RangeIndex(2), RangeIndex(0)), + ([(_ for _ in [])], RangeIndex(1), RangeIndex(0)), + ], + ) + def test_emptylike_constructor(self, emptylike, expected_index, expected_columns): + expected = DataFrame(index=expected_index, columns=expected_columns) + result = DataFrame(emptylike) + tm.assert_frame_equal(result, expected) + + def test_constructor_mixed(self, float_string_frame): + assert float_string_frame["foo"].dtype == np.object_ + + def test_constructor_cast_failure(self): + # as of 2.0, we raise if we can't respect "dtype", previously we + # silently ignored + msg = "could not convert string to float" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64) + + # GH 3010, constructing with odd arrays + df = DataFrame(np.ones((4, 2))) + + # this is ok + df["foo"] = np.ones((4, 2)).tolist() + + # this is not ok + msg = "Expected a 1D array, got an array with shape \\(4, 2\\)" + with pytest.raises(ValueError, match=msg): + df["test"] = np.ones((4, 2)) + + # this is ok + df["foo2"] = np.ones((4, 2)).tolist() + + def test_constructor_dtype_copy(self): + orig_df = DataFrame({"col1": [1.0], "col2": [2.0], "col3": [3.0]}) + + new_df = DataFrame(orig_df, dtype=float, copy=True) + + new_df["col1"] = 200.0 + assert orig_df["col1"][0] == 1.0 + + def test_constructor_dtype_nocast_view_dataframe(self, using_copy_on_write): + df = DataFrame([[1, 2]]) + should_be_view = DataFrame(df, dtype=df[0].dtype) + if using_copy_on_write: + should_be_view.iloc[0, 0] = 99 + assert df.values[0, 0] == 1 + else: + should_be_view[0][0] = 99 + assert df.values[0, 0] == 99 + + def test_constructor_dtype_nocast_view_2d_array( + self, using_array_manager, using_copy_on_write + ): + df = DataFrame([[1, 2], [3, 4]], dtype="int64") + if not using_array_manager and not using_copy_on_write: + should_be_view = DataFrame(df.values, dtype=df[0].dtype) + should_be_view[0][0] = 97 + assert df.values[0, 0] == 97 + else: + # INFO(ArrayManager) DataFrame(ndarray) doesn't necessarily preserve + # a view on the array to ensure contiguous 1D arrays + df2 = DataFrame(df.values, dtype=df[0].dtype) + assert df2._mgr.arrays[0].flags.c_contiguous + + @td.skip_array_manager_invalid_test + def test_1d_object_array_does_not_copy(self): + # https://github.com/pandas-dev/pandas/issues/39272 + arr = np.array(["a", "b"], dtype="object") + df = DataFrame(arr, copy=False) + assert np.shares_memory(df.values, arr) + + @td.skip_array_manager_invalid_test + def test_2d_object_array_does_not_copy(self): + # https://github.com/pandas-dev/pandas/issues/39272 + arr = np.array([["a", "b"], ["c", "d"]], dtype="object") + df = DataFrame(arr, copy=False) + assert np.shares_memory(df.values, arr) + + def test_constructor_dtype_list_data(self): + df = DataFrame([[1, "2"], [None, "a"]], dtype=object) + assert df.loc[1, 0] is None + assert df.loc[0, 1] == "2" + + def test_constructor_list_of_2d_raises(self): + # https://github.com/pandas-dev/pandas/issues/32289 + a = DataFrame() + b = np.empty((0, 0)) + with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): + DataFrame([a]) + + with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): + DataFrame([b]) + + a = DataFrame({"A": [1, 2]}) + with pytest.raises(ValueError, match=r"shape=\(2, 2, 1\)"): + DataFrame([a, a]) + + @pytest.mark.parametrize( + "typ, ad", + [ + # mixed floating and integer coexist in the same frame + ["float", {}], + # add lots of types + ["float", {"A": 1, "B": "foo", "C": "bar"}], + # GH 622 + ["int", {}], + ], + ) + def test_constructor_mixed_dtypes(self, typ, ad): + if typ == "int": + dtypes = MIXED_INT_DTYPES + arrays = [ + np.array(np.random.default_rng(2).random(10), dtype=d) for d in dtypes + ] + elif typ == "float": + dtypes = MIXED_FLOAT_DTYPES + arrays = [ + np.array(np.random.default_rng(2).integers(10, size=10), dtype=d) + for d in dtypes + ] + + for d, a in zip(dtypes, arrays): + assert a.dtype == d + ad.update(dict(zip(dtypes, arrays))) + df = DataFrame(ad) + + dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES + for d in dtypes: + if d in df: + assert df.dtypes[d] == d + + def test_constructor_complex_dtypes(self): + # GH10952 + a = np.random.default_rng(2).random(10).astype(np.complex64) + b = np.random.default_rng(2).random(10).astype(np.complex128) + + df = DataFrame({"a": a, "b": b}) + assert a.dtype == df.a.dtype + assert b.dtype == df.b.dtype + + def test_constructor_dtype_str_na_values(self, string_dtype): + # https://github.com/pandas-dev/pandas/issues/21083 + df = DataFrame({"A": ["x", None]}, dtype=string_dtype) + result = df.isna() + expected = DataFrame({"A": [False, True]}) + tm.assert_frame_equal(result, expected) + assert df.iloc[1, 0] is None + + df = DataFrame({"A": ["x", np.nan]}, dtype=string_dtype) + assert np.isnan(df.iloc[1, 0]) + + def test_constructor_rec(self, float_frame): + rec = float_frame.to_records(index=False) + rec.dtype.names = list(rec.dtype.names)[::-1] + + index = float_frame.index + + df = DataFrame(rec) + tm.assert_index_equal(df.columns, Index(rec.dtype.names)) + + df2 = DataFrame(rec, index=index) + tm.assert_index_equal(df2.columns, Index(rec.dtype.names)) + tm.assert_index_equal(df2.index, index) + + # case with columns != the ones we would infer from the data + rng = np.arange(len(rec))[::-1] + df3 = DataFrame(rec, index=rng, columns=["C", "B"]) + expected = DataFrame(rec, index=rng).reindex(columns=["C", "B"]) + tm.assert_frame_equal(df3, expected) + + def test_constructor_bool(self): + df = DataFrame({0: np.ones(10, dtype=bool), 1: np.zeros(10, dtype=bool)}) + assert df.values.dtype == np.bool_ + + def test_constructor_overflow_int64(self): + # see gh-14881 + values = np.array([2**64 - i for i in range(1, 10)], dtype=np.uint64) + + result = DataFrame({"a": values}) + assert result["a"].dtype == np.uint64 + + # see gh-2355 + data_scores = [ + (6311132704823138710, 273), + (2685045978526272070, 23), + (8921811264899370420, 45), + (17019687244989530680, 270), + (9930107427299601010, 273), + ] + dtype = [("uid", "u8"), ("score", "u8")] + data = np.zeros((len(data_scores),), dtype=dtype) + data[:] = data_scores + df_crawls = DataFrame(data) + assert df_crawls["uid"].dtype == np.uint64 + + @pytest.mark.parametrize( + "values", + [ + np.array([2**64], dtype=object), + np.array([2**65]), + [2**64 + 1], + np.array([-(2**63) - 4], dtype=object), + np.array([-(2**64) - 1]), + [-(2**65) - 2], + ], + ) + def test_constructor_int_overflow(self, values): + # see gh-18584 + value = values[0] + result = DataFrame(values) + + assert result[0].dtype == object + assert result[0][0] == value + + @pytest.mark.parametrize( + "values", + [ + np.array([1], dtype=np.uint16), + np.array([1], dtype=np.uint32), + np.array([1], dtype=np.uint64), + [np.uint16(1)], + [np.uint32(1)], + [np.uint64(1)], + ], + ) + def test_constructor_numpy_uints(self, values): + # GH#47294 + value = values[0] + result = DataFrame(values) + + assert result[0].dtype == value.dtype + assert result[0][0] == value + + def test_constructor_ordereddict(self): + nitems = 100 + nums = list(range(nitems)) + np.random.default_rng(2).shuffle(nums) + expected = [f"A{i:d}" for i in nums] + df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems))) + assert expected == list(df.columns) + + def test_constructor_dict(self): + datetime_series = tm.makeTimeSeries(nper=30) + # test expects index shifted by 5 + datetime_series_short = tm.makeTimeSeries(nper=30)[5:] + + frame = DataFrame({"col1": datetime_series, "col2": datetime_series_short}) + + # col2 is padded with NaN + assert len(datetime_series) == 30 + assert len(datetime_series_short) == 25 + + tm.assert_series_equal(frame["col1"], datetime_series.rename("col1")) + + exp = Series( + np.concatenate([[np.nan] * 5, datetime_series_short.values]), + index=datetime_series.index, + name="col2", + ) + tm.assert_series_equal(exp, frame["col2"]) + + frame = DataFrame( + {"col1": datetime_series, "col2": datetime_series_short}, + columns=["col2", "col3", "col4"], + ) + + assert len(frame) == len(datetime_series_short) + assert "col1" not in frame + assert isna(frame["col3"]).all() + + # Corner cases + assert len(DataFrame()) == 0 + + # mix dict and array, wrong size - no spec for which error should raise + # first + msg = "Mixing dicts with non-Series may lead to ambiguous ordering." + with pytest.raises(ValueError, match=msg): + DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]}) + + def test_constructor_dict_length1(self): + # Length-one dict micro-optimization + frame = DataFrame({"A": {"1": 1, "2": 2}}) + tm.assert_index_equal(frame.index, Index(["1", "2"])) + + def test_constructor_dict_with_index(self): + # empty dict plus index + idx = Index([0, 1, 2]) + frame = DataFrame({}, index=idx) + assert frame.index is idx + + def test_constructor_dict_with_index_and_columns(self): + # empty dict with index and columns + idx = Index([0, 1, 2]) + frame = DataFrame({}, index=idx, columns=idx) + assert frame.index is idx + assert frame.columns is idx + assert len(frame._series) == 3 + + def test_constructor_dict_of_empty_lists(self): + # with dict of empty list and Series + frame = DataFrame({"A": [], "B": []}, columns=["A", "B"]) + tm.assert_index_equal(frame.index, RangeIndex(0), exact=True) + + def test_constructor_dict_with_none(self): + # GH 14381 + # Dict with None value + frame_none = DataFrame({"a": None}, index=[0]) + frame_none_list = DataFrame({"a": [None]}, index=[0]) + assert frame_none._get_value(0, "a") is None + assert frame_none_list._get_value(0, "a") is None + tm.assert_frame_equal(frame_none, frame_none_list) + + def test_constructor_dict_errors(self): + # GH10856 + # dict with scalar values should raise error, even if columns passed + msg = "If using all scalar values, you must pass an index" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": 0.7}) + + with pytest.raises(ValueError, match=msg): + DataFrame({"a": 0.7}, columns=["a"]) + + @pytest.mark.parametrize("scalar", [2, np.nan, None, "D"]) + def test_constructor_invalid_items_unused(self, scalar): + # No error if invalid (scalar) value is in fact not used: + result = DataFrame({"a": scalar}, columns=["b"]) + expected = DataFrame(columns=["b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("value", [2, np.nan, None, float("nan")]) + def test_constructor_dict_nan_key(self, value): + # GH 18455 + cols = [1, value, 3] + idx = ["a", value] + values = [[0, 3], [1, 4], [2, 5]] + data = {cols[c]: Series(values[c], index=idx) for c in range(3)} + result = DataFrame(data).sort_values(1).sort_values("a", axis=1) + expected = DataFrame( + np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols + ) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx).sort_values("a", axis=1) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx, columns=cols) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("value", [np.nan, None, float("nan")]) + def test_constructor_dict_nan_tuple_key(self, value): + # GH 18455 + cols = Index([(11, 21), (value, 22), (13, value)]) + idx = Index([("a", value), (value, 2)]) + values = [[0, 3], [1, 4], [2, 5]] + data = {cols[c]: Series(values[c], index=idx) for c in range(3)} + result = DataFrame(data).sort_values((11, 21)).sort_values(("a", value), axis=1) + expected = DataFrame( + np.arange(6, dtype="int64").reshape(2, 3), index=idx, columns=cols + ) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx).sort_values(("a", value), axis=1) + tm.assert_frame_equal(result, expected) + + result = DataFrame(data, index=idx, columns=cols) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_order_insertion(self): + datetime_series = tm.makeTimeSeries(nper=30) + datetime_series_short = tm.makeTimeSeries(nper=25) + + # GH19018 + # initialization ordering: by insertion order if python>= 3.6 + d = {"b": datetime_series_short, "a": datetime_series} + frame = DataFrame(data=d) + expected = DataFrame(data=d, columns=list("ba")) + tm.assert_frame_equal(frame, expected) + + def test_constructor_dict_nan_key_and_columns(self): + # GH 16894 + result = DataFrame({np.nan: [1, 2], 2: [2, 3]}, columns=[np.nan, 2]) + expected = DataFrame([[1, 2], [2, 3]], columns=[np.nan, 2]) + tm.assert_frame_equal(result, expected) + + def test_constructor_multi_index(self): + # GH 4078 + # construction error with mi and all-nan frame + tuples = [(2, 3), (3, 3), (3, 3)] + mi = MultiIndex.from_tuples(tuples) + df = DataFrame(index=mi, columns=mi) + assert isna(df).values.ravel().all() + + tuples = [(3, 3), (2, 3), (3, 3)] + mi = MultiIndex.from_tuples(tuples) + df = DataFrame(index=mi, columns=mi) + assert isna(df).values.ravel().all() + + def test_constructor_2d_index(self): + # GH 25416 + # handling of 2d index in construction + df = DataFrame([[1]], columns=[[1]], index=[1, 2]) + expected = DataFrame( + [1, 1], + index=Index([1, 2], dtype="int64"), + columns=MultiIndex(levels=[[1]], codes=[[0]]), + ) + tm.assert_frame_equal(df, expected) + + df = DataFrame([[1]], columns=[[1]], index=[[1, 2]]) + expected = DataFrame( + [1, 1], + index=MultiIndex(levels=[[1, 2]], codes=[[0, 1]]), + columns=MultiIndex(levels=[[1]], codes=[[0]]), + ) + tm.assert_frame_equal(df, expected) + + def test_constructor_error_msgs(self): + msg = "Empty data passed with indices specified." + # passing an empty array with columns specified. + with pytest.raises(ValueError, match=msg): + DataFrame(np.empty(0), index=[1]) + + msg = "Mixing dicts with non-Series may lead to ambiguous ordering." + # mix dict and array, wrong size + with pytest.raises(ValueError, match=msg): + DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]}) + + # wrong size ndarray, GH 3105 + msg = r"Shape of passed values is \(4, 3\), indices imply \(3, 3\)" + with pytest.raises(ValueError, match=msg): + DataFrame( + np.arange(12).reshape((4, 3)), + columns=["foo", "bar", "baz"], + index=date_range("2000-01-01", periods=3), + ) + + arr = np.array([[4, 5, 6]]) + msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + DataFrame(index=[0], columns=range(0, 4), data=arr) + + arr = np.array([4, 5, 6]) + msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + DataFrame(index=[0], columns=range(0, 4), data=arr) + + # higher dim raise exception + with pytest.raises(ValueError, match="Must pass 2-d input"): + DataFrame(np.zeros((3, 3, 3)), columns=["A", "B", "C"], index=[1]) + + # wrong size axis labels + msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)" + with pytest.raises(ValueError, match=msg): + DataFrame( + np.random.default_rng(2).random((2, 3)), + columns=["A", "B", "C"], + index=[1], + ) + + msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame( + np.random.default_rng(2).random((2, 3)), + columns=["A", "B"], + index=[1, 2], + ) + + # gh-26429 + msg = "2 columns passed, passed data had 10 columns" + with pytest.raises(ValueError, match=msg): + DataFrame((range(10), range(10, 20)), columns=("ones", "twos")) + + msg = "If using all scalar values, you must pass an index" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": False, "b": True}) + + def test_constructor_subclass_dict(self, dict_subclass): + # Test for passing dict subclass to constructor + data = { + "col1": dict_subclass((x, 10.0 * x) for x in range(10)), + "col2": dict_subclass((x, 20.0 * x) for x in range(10)), + } + df = DataFrame(data) + refdf = DataFrame({col: dict(val.items()) for col, val in data.items()}) + tm.assert_frame_equal(refdf, df) + + data = dict_subclass(data.items()) + df = DataFrame(data) + tm.assert_frame_equal(refdf, df) + + def test_constructor_defaultdict(self, float_frame): + # try with defaultdict + data = {} + float_frame.loc[: float_frame.index[10], "B"] = np.nan + + for k, v in float_frame.items(): + dct = defaultdict(dict) + dct.update(v.to_dict()) + data[k] = dct + frame = DataFrame(data) + expected = frame.reindex(index=float_frame.index) + tm.assert_frame_equal(float_frame, expected) + + def test_constructor_dict_block(self): + expected = np.array([[4.0, 3.0, 2.0, 1.0]]) + df = DataFrame( + {"d": [4.0], "c": [3.0], "b": [2.0], "a": [1.0]}, + columns=["d", "c", "b", "a"], + ) + tm.assert_numpy_array_equal(df.values, expected) + + def test_constructor_dict_cast(self): + # cast float tests + test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}} + frame = DataFrame(test_data, dtype=float) + assert len(frame) == 3 + assert frame["B"].dtype == np.float64 + assert frame["A"].dtype == np.float64 + + frame = DataFrame(test_data) + assert len(frame) == 3 + assert frame["B"].dtype == np.object_ + assert frame["A"].dtype == np.float64 + + def test_constructor_dict_cast2(self): + # can't cast to float + test_data = { + "A": dict(zip(range(20), tm.makeStringIndex(20))), + "B": dict(zip(range(15), np.random.default_rng(2).standard_normal(15))), + } + with pytest.raises(ValueError, match="could not convert string"): + DataFrame(test_data, dtype=float) + + def test_constructor_dict_dont_upcast(self): + d = {"Col1": {"Row1": "A String", "Row2": np.nan}} + df = DataFrame(d) + assert isinstance(df["Col1"]["Row2"], float) + + def test_constructor_dict_dont_upcast2(self): + dm = DataFrame([[1, 2], ["a", "b"]], index=[1, 2], columns=[1, 2]) + assert isinstance(dm[1][1], int) + + def test_constructor_dict_of_tuples(self): + # GH #1491 + data = {"a": (1, 2, 3), "b": (4, 5, 6)} + + result = DataFrame(data) + expected = DataFrame({k: list(v) for k, v in data.items()}) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_dict_of_ranges(self): + # GH 26356 + data = {"a": range(3), "b": range(3, 6)} + + result = DataFrame(data) + expected = DataFrame({"a": [0, 1, 2], "b": [3, 4, 5]}) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_of_iterators(self): + # GH 26349 + data = {"a": iter(range(3)), "b": reversed(range(3))} + + result = DataFrame(data) + expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]}) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_of_generators(self): + # GH 26349 + data = {"a": (i for i in (range(3))), "b": (i for i in reversed(range(3)))} + result = DataFrame(data) + expected = DataFrame({"a": [0, 1, 2], "b": [2, 1, 0]}) + tm.assert_frame_equal(result, expected) + + def test_constructor_dict_multiindex(self): + d = { + ("a", "a"): {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2}, + ("b", "a"): {("i", "i"): 6, ("i", "j"): 5, ("j", "i"): 4}, + ("b", "c"): {("i", "i"): 7, ("i", "j"): 8, ("j", "i"): 9}, + } + _d = sorted(d.items()) + df = DataFrame(d) + expected = DataFrame( + [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d]) + ).T + expected.index = MultiIndex.from_tuples(expected.index) + tm.assert_frame_equal( + df, + expected, + ) + + d["z"] = {"y": 123.0, ("i", "i"): 111, ("i", "j"): 111, ("j", "i"): 111} + _d.insert(0, ("z", d["z"])) + expected = DataFrame( + [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False) + ).T + expected.index = Index(expected.index, tupleize_cols=False) + df = DataFrame(d) + df = df.reindex(columns=expected.columns, index=expected.index) + tm.assert_frame_equal(df, expected) + + def test_constructor_dict_datetime64_index(self): + # GH 10160 + dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"] + + def create_data(constructor): + return {i: {constructor(s): 2 * i} for i, s in enumerate(dates_as_str)} + + data_datetime64 = create_data(np.datetime64) + data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d")) + data_Timestamp = create_data(Timestamp) + + expected = DataFrame( + [ + {0: 0, 1: None, 2: None, 3: None}, + {0: None, 1: 2, 2: None, 3: None}, + {0: None, 1: None, 2: 4, 3: None}, + {0: None, 1: None, 2: None, 3: 6}, + ], + index=[Timestamp(dt) for dt in dates_as_str], + ) + + result_datetime64 = DataFrame(data_datetime64) + result_datetime = DataFrame(data_datetime) + result_Timestamp = DataFrame(data_Timestamp) + tm.assert_frame_equal(result_datetime64, expected) + tm.assert_frame_equal(result_datetime, expected) + tm.assert_frame_equal(result_Timestamp, expected) + + @pytest.mark.parametrize( + "klass,name", + [ + (lambda x: np.timedelta64(x, "D"), "timedelta64"), + (lambda x: timedelta(days=x), "pytimedelta"), + (lambda x: Timedelta(x, "D"), "Timedelta[ns]"), + (lambda x: Timedelta(x, "D").as_unit("s"), "Timedelta[s]"), + ], + ) + def test_constructor_dict_timedelta64_index(self, klass, name): + # GH 10160 + td_as_int = [1, 2, 3, 4] + + data = {i: {klass(s): 2 * i} for i, s in enumerate(td_as_int)} + + expected = DataFrame( + [ + {0: 0, 1: None, 2: None, 3: None}, + {0: None, 1: 2, 2: None, 3: None}, + {0: None, 1: None, 2: 4, 3: None}, + {0: None, 1: None, 2: None, 3: 6}, + ], + index=[Timedelta(td, "D") for td in td_as_int], + ) + + result = DataFrame(data) + + tm.assert_frame_equal(result, expected) + + def test_constructor_period_dict(self): + # PeriodIndex + a = pd.PeriodIndex(["2012-01", "NaT", "2012-04"], freq="M") + b = pd.PeriodIndex(["2012-02-01", "2012-03-01", "NaT"], freq="D") + df = DataFrame({"a": a, "b": b}) + assert df["a"].dtype == a.dtype + assert df["b"].dtype == b.dtype + + # list of periods + df = DataFrame({"a": a.astype(object).tolist(), "b": b.astype(object).tolist()}) + assert df["a"].dtype == a.dtype + assert df["b"].dtype == b.dtype + + def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype): + ea_scalar, ea_dtype = ea_scalar_and_dtype + df = DataFrame({"a": ea_scalar}, index=[0]) + assert df["a"].dtype == ea_dtype + + expected = DataFrame(index=[0], columns=["a"], data=ea_scalar) + + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "data,dtype", + [ + (Period("2020-01"), PeriodDtype("M")), + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(unit="s", tz="US/Eastern"), + ), + ], + ) + def test_constructor_extension_scalar_data(self, data, dtype): + # GH 34832 + df = DataFrame(index=[0, 1], columns=["a", "b"], data=data) + + assert df["a"].dtype == dtype + assert df["b"].dtype == dtype + + arr = pd.array([data] * 2, dtype=dtype) + expected = DataFrame({"a": arr, "b": arr}) + + tm.assert_frame_equal(df, expected) + + def test_nested_dict_frame_constructor(self): + rng = pd.period_range("1/1/2000", periods=5) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)), columns=rng) + + data = {} + for col in df.columns: + for row in df.index: + data.setdefault(col, {})[row] = df._get_value(row, col) + + result = DataFrame(data, columns=rng) + tm.assert_frame_equal(result, df) + + data = {} + for col in df.columns: + for row in df.index: + data.setdefault(row, {})[col] = df._get_value(row, col) + + result = DataFrame(data, index=rng).T + tm.assert_frame_equal(result, df) + + def _check_basic_constructor(self, empty): + # mat: 2d matrix with shape (3, 2) to input. empty - makes sized + # objects + mat = empty((2, 3), dtype=float) + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + + # 1-D input + frame = DataFrame(empty((3,)), columns=["A"], index=[1, 2, 3]) + assert len(frame.index) == 3 + assert len(frame.columns) == 1 + + if empty is not np.ones: + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64) + return + else: + frame = DataFrame( + mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64 + ) + assert frame.values.dtype == np.int64 + + # wrong size axis labels + msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)" + with pytest.raises(ValueError, match=msg): + DataFrame(mat, columns=["A", "B", "C"], index=[1]) + msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame(mat, columns=["A", "B"], index=[1, 2]) + + # higher dim raise exception + with pytest.raises(ValueError, match="Must pass 2-d input"): + DataFrame(empty((3, 3, 3)), columns=["A", "B", "C"], index=[1]) + + # automatic labeling + frame = DataFrame(mat) + tm.assert_index_equal(frame.index, Index(range(2)), exact=True) + tm.assert_index_equal(frame.columns, Index(range(3)), exact=True) + + frame = DataFrame(mat, index=[1, 2]) + tm.assert_index_equal(frame.columns, Index(range(3)), exact=True) + + frame = DataFrame(mat, columns=["A", "B", "C"]) + tm.assert_index_equal(frame.index, Index(range(2)), exact=True) + + # 0-length axis + frame = DataFrame(empty((0, 3))) + assert len(frame.index) == 0 + + frame = DataFrame(empty((3, 0))) + assert len(frame.columns) == 0 + + def test_constructor_ndarray(self): + self._check_basic_constructor(np.ones) + + frame = DataFrame(["foo", "bar"], index=[0, 1], columns=["A"]) + assert len(frame) == 2 + + def test_constructor_maskedarray(self): + self._check_basic_constructor(ma.masked_all) + + # Check non-masked values + mat = ma.masked_all((2, 3), dtype=float) + mat[0, 0] = 1.0 + mat[1, 2] = 2.0 + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + assert 1.0 == frame["A"][1] + assert 2.0 == frame["C"][2] + + # what is this even checking?? + mat = ma.masked_all((2, 3), dtype=float) + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + assert np.all(~np.asarray(frame == frame)) + + @pytest.mark.filterwarnings( + "ignore:elementwise comparison failed:DeprecationWarning" + ) + def test_constructor_maskedarray_nonfloat(self): + # masked int promoted to float + mat = ma.masked_all((2, 3), dtype=int) + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + assert np.all(~np.asarray(frame == frame)) + + # cast type + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.float64) + assert frame.values.dtype == np.float64 + + # Check non-masked values + mat2 = ma.copy(mat) + mat2[0, 0] = 1 + mat2[1, 2] = 2 + frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2]) + assert 1 == frame["A"][1] + assert 2 == frame["C"][2] + + # masked np.datetime64 stays (use NaT as null) + mat = ma.masked_all((2, 3), dtype="M8[ns]") + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + assert isna(frame).values.all() + + # cast type + msg = r"datetime64\[ns\] values and dtype=int64 is not supported" + with pytest.raises(TypeError, match=msg): + DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64) + + # Check non-masked values + mat2 = ma.copy(mat) + mat2[0, 0] = 1 + mat2[1, 2] = 2 + frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2]) + assert 1 == frame["A"].view("i8")[1] + assert 2 == frame["C"].view("i8")[2] + + # masked bool promoted to object + mat = ma.masked_all((2, 3), dtype=bool) + # 2-D input + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2]) + + assert len(frame.index) == 2 + assert len(frame.columns) == 3 + assert np.all(~np.asarray(frame == frame)) + + # cast type + frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=object) + assert frame.values.dtype == object + + # Check non-masked values + mat2 = ma.copy(mat) + mat2[0, 0] = True + mat2[1, 2] = False + frame = DataFrame(mat2, columns=["A", "B", "C"], index=[1, 2]) + assert frame["A"][1] is True + assert frame["C"][2] is False + + def test_constructor_maskedarray_hardened(self): + # Check numpy masked arrays with hard masks -- from GH24574 + mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask() + result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2]) + expected = DataFrame( + {"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, + columns=["A", "B"], + index=[1, 2], + dtype=float, + ) + tm.assert_frame_equal(result, expected) + # Check case where mask is hard but no data are masked + mat_hard = ma.ones((2, 2), dtype=float).harden_mask() + result = DataFrame(mat_hard, columns=["A", "B"], index=[1, 2]) + expected = DataFrame( + {"A": [1.0, 1.0], "B": [1.0, 1.0]}, + columns=["A", "B"], + index=[1, 2], + dtype=float, + ) + tm.assert_frame_equal(result, expected) + + def test_constructor_maskedrecarray_dtype(self): + # Ensure constructor honors dtype + data = np.ma.array( + np.ma.zeros(5, dtype=[("date", " None: + self._lst = lst + + def __getitem__(self, n): + return self._lst.__getitem__(n) + + def __len__(self) -> int: + return self._lst.__len__() + + lst_containers = [DummyContainer([1, "a"]), DummyContainer([2, "b"])] + columns = ["num", "str"] + result = DataFrame(lst_containers, columns=columns) + expected = DataFrame([[1, "a"], [2, "b"]], columns=columns) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_stdlib_array(self): + # GH 4297 + # support Array + result = DataFrame({"A": array.array("i", range(10))}) + expected = DataFrame({"A": list(range(10))}) + tm.assert_frame_equal(result, expected, check_dtype=False) + + expected = DataFrame([list(range(10)), list(range(10))]) + result = DataFrame([array.array("i", range(10)), array.array("i", range(10))]) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_range(self): + # GH26342 + result = DataFrame(range(10)) + expected = DataFrame(list(range(10))) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_ranges(self): + result = DataFrame([range(10), range(10)]) + expected = DataFrame([list(range(10)), list(range(10))]) + tm.assert_frame_equal(result, expected) + + def test_constructor_iterable(self): + # GH 21987 + class Iter: + def __iter__(self) -> Iterator: + for i in range(10): + yield [1, 2, 3] + + expected = DataFrame([[1, 2, 3]] * 10) + result = DataFrame(Iter()) + tm.assert_frame_equal(result, expected) + + def test_constructor_iterator(self): + result = DataFrame(iter(range(10))) + expected = DataFrame(list(range(10))) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_iterators(self): + result = DataFrame([iter(range(10)), iter(range(10))]) + expected = DataFrame([list(range(10)), list(range(10))]) + tm.assert_frame_equal(result, expected) + + def test_constructor_generator(self): + # related #2305 + + gen1 = (i for i in range(10)) + gen2 = (i for i in range(10)) + + expected = DataFrame([list(range(10)), list(range(10))]) + result = DataFrame([gen1, gen2]) + tm.assert_frame_equal(result, expected) + + gen = ([i, "a"] for i in range(10)) + result = DataFrame(gen) + expected = DataFrame({0: range(10), 1: "a"}) + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_constructor_list_of_dicts(self): + result = DataFrame([{}]) + expected = DataFrame(index=RangeIndex(1), columns=[]) + tm.assert_frame_equal(result, expected) + + def test_constructor_ordered_dict_nested_preserve_order(self): + # see gh-18166 + nested1 = OrderedDict([("b", 1), ("a", 2)]) + nested2 = OrderedDict([("b", 2), ("a", 5)]) + data = OrderedDict([("col2", nested1), ("col1", nested2)]) + result = DataFrame(data) + data = {"col2": [1, 2], "col1": [2, 5]} + expected = DataFrame(data=data, index=["b", "a"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dict_type", [dict, OrderedDict]) + def test_constructor_ordered_dict_preserve_order(self, dict_type): + # see gh-13304 + expected = DataFrame([[2, 1]], columns=["b", "a"]) + + data = dict_type() + data["b"] = [2] + data["a"] = [1] + + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + data = dict_type() + data["b"] = 2 + data["a"] = 1 + + result = DataFrame([data]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dict_type", [dict, OrderedDict]) + def test_constructor_ordered_dict_conflicting_orders(self, dict_type): + # the first dict element sets the ordering for the DataFrame, + # even if there are conflicting orders from subsequent ones + row_one = dict_type() + row_one["b"] = 2 + row_one["a"] = 1 + + row_two = dict_type() + row_two["a"] = 1 + row_two["b"] = 2 + + row_three = {"b": 2, "a": 1} + + expected = DataFrame([[2, 1], [2, 1]], columns=["b", "a"]) + result = DataFrame([row_one, row_two]) + tm.assert_frame_equal(result, expected) + + expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=["b", "a"]) + result = DataFrame([row_one, row_two, row_three]) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_series_aligned_index(self): + series = [Series(i, index=["b", "a", "c"], name=str(i)) for i in range(3)] + result = DataFrame(series) + expected = DataFrame( + {"b": [0, 1, 2], "a": [0, 1, 2], "c": [0, 1, 2]}, + columns=["b", "a", "c"], + index=["0", "1", "2"], + ) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_derived_dicts(self): + class CustomDict(dict): + pass + + d = {"a": 1.5, "b": 3} + + data_custom = [CustomDict(d)] + data = [d] + + result_custom = DataFrame(data_custom) + result = DataFrame(data) + tm.assert_frame_equal(result, result_custom) + + def test_constructor_ragged(self): + data = { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(8), + } + with pytest.raises(ValueError, match="All arrays must be of the same length"): + DataFrame(data) + + def test_constructor_scalar(self): + idx = Index(range(3)) + df = DataFrame({"a": 0}, index=idx) + expected = DataFrame({"a": [0, 0, 0]}, index=idx) + tm.assert_frame_equal(df, expected, check_dtype=False) + + def test_constructor_Series_copy_bug(self, float_frame): + df = DataFrame(float_frame["A"], index=float_frame.index, columns=["A"]) + df.copy() + + def test_constructor_mixed_dict_and_Series(self): + data = {} + data["A"] = {"foo": 1, "bar": 2, "baz": 3} + data["B"] = Series([4, 3, 2, 1], index=["bar", "qux", "baz", "foo"]) + + result = DataFrame(data) + assert result.index.is_monotonic_increasing + + # ordering ambiguous, raise exception + with pytest.raises(ValueError, match="ambiguous ordering"): + DataFrame({"A": ["a", "b"], "B": {"a": "a", "b": "b"}}) + + # this is OK though + result = DataFrame({"A": ["a", "b"], "B": Series(["a", "b"], index=["a", "b"])}) + expected = DataFrame({"A": ["a", "b"], "B": ["a", "b"]}, index=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_constructor_mixed_type_rows(self): + # Issue 25075 + data = [[1, 2], (3, 4)] + result = DataFrame(data) + expected = DataFrame([[1, 2], [3, 4]]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "tuples,lists", + [ + ((), []), + ((()), []), + (((), ()), [(), ()]), + (((), ()), [[], []]), + (([], []), [[], []]), + (([1], [2]), [[1], [2]]), # GH 32776 + (([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]), + ], + ) + def test_constructor_tuple(self, tuples, lists): + # GH 25691 + result = DataFrame(tuples) + expected = DataFrame(lists) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_tuples(self): + result = DataFrame({"A": [(1, 2), (3, 4)]}) + expected = DataFrame({"A": Series([(1, 2), (3, 4)])}) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_namedtuples(self): + # GH11181 + named_tuple = namedtuple("Pandas", list("ab")) + tuples = [named_tuple(1, 3), named_tuple(2, 4)] + expected = DataFrame({"a": [1, 2], "b": [3, 4]}) + result = DataFrame(tuples) + tm.assert_frame_equal(result, expected) + + # with columns + expected = DataFrame({"y": [1, 2], "z": [3, 4]}) + result = DataFrame(tuples, columns=["y", "z"]) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_dataclasses(self): + # GH21910 + Point = make_dataclass("Point", [("x", int), ("y", int)]) + + data = [Point(0, 3), Point(1, 3)] + expected = DataFrame({"x": [0, 1], "y": [3, 3]}) + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_dataclasses_with_varying_types(self): + # GH21910 + # varying types + Point = make_dataclass("Point", [("x", int), ("y", int)]) + HLine = make_dataclass("HLine", [("x0", int), ("x1", int), ("y", int)]) + + data = [Point(0, 3), HLine(1, 3, 3)] + + expected = DataFrame( + {"x": [0, np.nan], "y": [3, 3], "x0": [np.nan, 1], "x1": [np.nan, 3]} + ) + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_of_dataclasses_error_thrown(self): + # GH21910 + Point = make_dataclass("Point", [("x", int), ("y", int)]) + + # expect TypeError + msg = "asdict() should be called on dataclass instances" + with pytest.raises(TypeError, match=re.escape(msg)): + DataFrame([Point(0, 0), {"x": 1, "y": 0}]) + + def test_constructor_list_of_dict_order(self): + # GH10056 + data = [ + {"First": 1, "Second": 4, "Third": 7, "Fourth": 10}, + {"Second": 5, "First": 2, "Fourth": 11, "Third": 8}, + {"Second": 6, "First": 3, "Fourth": 12, "Third": 9, "YYY": 14, "XXX": 13}, + ] + expected = DataFrame( + { + "First": [1, 2, 3], + "Second": [4, 5, 6], + "Third": [7, 8, 9], + "Fourth": [10, 11, 12], + "YYY": [None, None, 14], + "XXX": [None, None, 13], + } + ) + result = DataFrame(data) + tm.assert_frame_equal(result, expected) + + def test_constructor_Series_named(self): + a = Series([1, 2, 3], index=["a", "b", "c"], name="x") + df = DataFrame(a) + assert df.columns[0] == "x" + tm.assert_index_equal(df.index, a.index) + + # ndarray like + arr = np.random.default_rng(2).standard_normal(10) + s = Series(arr, name="x") + df = DataFrame(s) + expected = DataFrame({"x": s}) + tm.assert_frame_equal(df, expected) + + s = Series(arr, index=range(3, 13)) + df = DataFrame(s) + expected = DataFrame({0: s}) + tm.assert_frame_equal(df, expected) + + msg = r"Shape of passed values is \(10, 1\), indices imply \(10, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame(s, columns=[1, 2]) + + # #2234 + a = Series([], name="x", dtype=object) + df = DataFrame(a) + assert df.columns[0] == "x" + + # series with name and w/o + s1 = Series(arr, name="x") + df = DataFrame([s1, arr]).T + expected = DataFrame({"x": s1, "Unnamed 0": arr}, columns=["x", "Unnamed 0"]) + tm.assert_frame_equal(df, expected) + + # this is a bit non-intuitive here; the series collapse down to arrays + df = DataFrame([arr, s1]).T + expected = DataFrame({1: s1, 0: arr}, columns=[0, 1]) + tm.assert_frame_equal(df, expected) + + def test_constructor_Series_named_and_columns(self): + # GH 9232 validation + + s0 = Series(range(5), name=0) + s1 = Series(range(5), name=1) + + # matching name and column gives standard frame + tm.assert_frame_equal(DataFrame(s0, columns=[0]), s0.to_frame()) + tm.assert_frame_equal(DataFrame(s1, columns=[1]), s1.to_frame()) + + # non-matching produces empty frame + assert DataFrame(s0, columns=[1]).empty + assert DataFrame(s1, columns=[0]).empty + + def test_constructor_Series_differently_indexed(self): + # name + s1 = Series([1, 2, 3], index=["a", "b", "c"], name="x") + + # no name + s2 = Series([1, 2, 3], index=["a", "b", "c"]) + + other_index = Index(["a", "b"]) + + df1 = DataFrame(s1, index=other_index) + exp1 = DataFrame(s1.reindex(other_index)) + assert df1.columns[0] == "x" + tm.assert_frame_equal(df1, exp1) + + df2 = DataFrame(s2, index=other_index) + exp2 = DataFrame(s2.reindex(other_index)) + assert df2.columns[0] == 0 + tm.assert_index_equal(df2.index, other_index) + tm.assert_frame_equal(df2, exp2) + + @pytest.mark.parametrize( + "name_in1,name_in2,name_in3,name_out", + [ + ("idx", "idx", "idx", "idx"), + ("idx", "idx", None, None), + ("idx", None, None, None), + ("idx1", "idx2", None, None), + ("idx1", "idx1", "idx2", None), + ("idx1", "idx2", "idx3", None), + (None, None, None, None), + ], + ) + def test_constructor_index_names(self, name_in1, name_in2, name_in3, name_out): + # GH13475 + indices = [ + Index(["a", "b", "c"], name=name_in1), + Index(["b", "c", "d"], name=name_in2), + Index(["c", "d", "e"], name=name_in3), + ] + series = { + c: Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"]) + } + result = DataFrame(series) + + exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out) + expected = DataFrame( + { + "x": [0, 1, 2, np.nan, np.nan], + "y": [np.nan, 0, 1, 2, np.nan], + "z": [np.nan, np.nan, 0, 1, 2], + }, + index=exp_ind, + ) + + tm.assert_frame_equal(result, expected) + + def test_constructor_manager_resize(self, float_frame): + index = list(float_frame.index[:5]) + columns = list(float_frame.columns[:3]) + + result = DataFrame(float_frame._mgr, index=index, columns=columns) + tm.assert_index_equal(result.index, Index(index)) + tm.assert_index_equal(result.columns, Index(columns)) + + def test_constructor_mix_series_nonseries(self, float_frame): + df = DataFrame( + {"A": float_frame["A"], "B": list(float_frame["B"])}, columns=["A", "B"] + ) + tm.assert_frame_equal(df, float_frame.loc[:, ["A", "B"]]) + + msg = "does not match index length" + with pytest.raises(ValueError, match=msg): + DataFrame({"A": float_frame["A"], "B": list(float_frame["B"])[:-2]}) + + def test_constructor_miscast_na_int_dtype(self): + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64) + + def test_constructor_column_duplicates(self): + # it works! #2079 + df = DataFrame([[8, 5]], columns=["a", "a"]) + edf = DataFrame([[8, 5]]) + edf.columns = ["a", "a"] + + tm.assert_frame_equal(df, edf) + + idf = DataFrame.from_records([(8, 5)], columns=["a", "a"]) + + tm.assert_frame_equal(idf, edf) + + def test_constructor_empty_with_string_dtype(self): + # GH 9428 + expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object) + + df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str) + tm.assert_frame_equal(df, expected) + df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_) + tm.assert_frame_equal(df, expected) + df = DataFrame(index=[0, 1], columns=[0, 1], dtype="U5") + tm.assert_frame_equal(df, expected) + + def test_constructor_empty_with_string_extension(self, nullable_string_dtype): + # GH 34915 + expected = DataFrame(columns=["c1"], dtype=nullable_string_dtype) + df = DataFrame(columns=["c1"], dtype=nullable_string_dtype) + tm.assert_frame_equal(df, expected) + + def test_constructor_single_value(self): + # expecting single value upcasting here + df = DataFrame(0.0, index=[1, 2, 3], columns=["a", "b", "c"]) + tm.assert_frame_equal( + df, DataFrame(np.zeros(df.shape).astype("float64"), df.index, df.columns) + ) + + df = DataFrame(0, index=[1, 2, 3], columns=["a", "b", "c"]) + tm.assert_frame_equal( + df, DataFrame(np.zeros(df.shape).astype("int64"), df.index, df.columns) + ) + + df = DataFrame("a", index=[1, 2], columns=["a", "c"]) + tm.assert_frame_equal( + df, + DataFrame( + np.array([["a", "a"], ["a", "a"]], dtype=object), + index=[1, 2], + columns=["a", "c"], + ), + ) + + msg = "DataFrame constructor not properly called!" + with pytest.raises(ValueError, match=msg): + DataFrame("a", [1, 2]) + with pytest.raises(ValueError, match=msg): + DataFrame("a", columns=["a", "c"]) + + msg = "incompatible data and dtype" + with pytest.raises(TypeError, match=msg): + DataFrame("a", [1, 2], ["a", "c"], float) + + def test_constructor_with_datetimes(self): + intname = np.dtype(int).name + floatname = np.dtype(np.float64).name + objectname = np.dtype(np.object_).name + + # single item + df = DataFrame( + { + "A": 1, + "B": "foo", + "C": "bar", + "D": Timestamp("20010101"), + "E": datetime(2001, 1, 2, 0, 0), + }, + index=np.arange(10), + ) + result = df.dtypes + expected = Series( + [np.dtype("int64")] + + [np.dtype(objectname)] * 2 + + [np.dtype("M8[s]"), np.dtype("M8[us]")], + index=list("ABCDE"), + ) + tm.assert_series_equal(result, expected) + + # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 + # ndarray with a dtype specified) + df = DataFrame( + { + "a": 1.0, + "b": 2, + "c": "foo", + floatname: np.array(1.0, dtype=floatname), + intname: np.array(1, dtype=intname), + }, + index=np.arange(10), + ) + result = df.dtypes + expected = Series( + [np.dtype("float64")] + + [np.dtype("int64")] + + [np.dtype("object")] + + [np.dtype("float64")] + + [np.dtype(intname)], + index=["a", "b", "c", floatname, intname], + ) + tm.assert_series_equal(result, expected) + + # check with ndarray construction ndim>0 + df = DataFrame( + { + "a": 1.0, + "b": 2, + "c": "foo", + floatname: np.array([1.0] * 10, dtype=floatname), + intname: np.array([1] * 10, dtype=intname), + }, + index=np.arange(10), + ) + result = df.dtypes + expected = Series( + [np.dtype("float64")] + + [np.dtype("int64")] + + [np.dtype("object")] + + [np.dtype("float64")] + + [np.dtype(intname)], + index=["a", "b", "c", floatname, intname], + ) + tm.assert_series_equal(result, expected) + + def test_constructor_with_datetimes1(self): + # GH 2809 + ind = date_range(start="2000-01-01", freq="D", periods=10) + datetimes = [ts.to_pydatetime() for ts in ind] + datetime_s = Series(datetimes) + assert datetime_s.dtype == "M8[ns]" + + def test_constructor_with_datetimes2(self): + # GH 2810 + ind = date_range(start="2000-01-01", freq="D", periods=10) + datetimes = [ts.to_pydatetime() for ts in ind] + dates = [ts.date() for ts in ind] + df = DataFrame(datetimes, columns=["datetimes"]) + df["dates"] = dates + result = df.dtypes + expected = Series( + [np.dtype("datetime64[ns]"), np.dtype("object")], + index=["datetimes", "dates"], + ) + tm.assert_series_equal(result, expected) + + def test_constructor_with_datetimes3(self): + # GH 7594 + # don't coerce tz-aware + tz = pytz.timezone("US/Eastern") + dt = tz.localize(datetime(2012, 1, 1)) + + df = DataFrame({"End Date": dt}, index=[0]) + assert df.iat[0, 0] == dt + tm.assert_series_equal( + df.dtypes, Series({"End Date": "datetime64[us, US/Eastern]"}) + ) + + df = DataFrame([{"End Date": dt}]) + assert df.iat[0, 0] == dt + tm.assert_series_equal( + df.dtypes, Series({"End Date": "datetime64[ns, US/Eastern]"}) + ) + + def test_constructor_with_datetimes4(self): + # tz-aware (UTC and other tz's) + # GH 8411 + dr = date_range("20130101", periods=3) + df = DataFrame({"value": dr}) + assert df.iat[0, 0].tz is None + dr = date_range("20130101", periods=3, tz="UTC") + df = DataFrame({"value": dr}) + assert str(df.iat[0, 0].tz) == "UTC" + dr = date_range("20130101", periods=3, tz="US/Eastern") + df = DataFrame({"value": dr}) + assert str(df.iat[0, 0].tz) == "US/Eastern" + + def test_constructor_with_datetimes5(self): + # GH 7822 + # preserver an index with a tz on dict construction + i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern") + + expected = DataFrame({"a": i.to_series().reset_index(drop=True)}) + df = DataFrame() + df["a"] = i + tm.assert_frame_equal(df, expected) + + df = DataFrame({"a": i}) + tm.assert_frame_equal(df, expected) + + def test_constructor_with_datetimes6(self): + # multiples + i = date_range("1/1/2011", periods=5, freq="10s", tz="US/Eastern") + i_no_tz = date_range("1/1/2011", periods=5, freq="10s") + df = DataFrame({"a": i, "b": i_no_tz}) + expected = DataFrame({"a": i.to_series().reset_index(drop=True), "b": i_no_tz}) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "arr", + [ + np.array([None, None, None, None, datetime.now(), None]), + np.array([None, None, datetime.now(), None]), + [[np.datetime64("NaT")], [None]], + [[np.datetime64("NaT")], [pd.NaT]], + [[None], [np.datetime64("NaT")]], + [[None], [pd.NaT]], + [[pd.NaT], [np.datetime64("NaT")]], + [[pd.NaT], [None]], + ], + ) + def test_constructor_datetimes_with_nulls(self, arr): + # gh-15869, GH#11220 + result = DataFrame(arr).dtypes + expected = Series([np.dtype("datetime64[ns]")]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("order", ["K", "A", "C", "F"]) + @pytest.mark.parametrize( + "unit", + ["M", "D", "h", "m", "s", "ms", "us", "ns"], + ) + def test_constructor_datetimes_non_ns(self, order, unit): + dtype = f"datetime64[{unit}]" + na = np.array( + [ + ["2015-01-01", "2015-01-02", "2015-01-03"], + ["2017-01-01", "2017-01-02", "2017-02-03"], + ], + dtype=dtype, + order=order, + ) + df = DataFrame(na) + expected = DataFrame(na.astype("M8[ns]")) + if unit in ["M", "D", "h", "m"]: + with pytest.raises(TypeError, match="Cannot cast"): + expected.astype(dtype) + + # instead the constructor casts to the closest supported reso, i.e. "s" + expected = expected.astype("datetime64[s]") + else: + expected = expected.astype(dtype=dtype) + + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("order", ["K", "A", "C", "F"]) + @pytest.mark.parametrize( + "unit", + [ + "D", + "h", + "m", + "s", + "ms", + "us", + "ns", + ], + ) + def test_constructor_timedelta_non_ns(self, order, unit): + dtype = f"timedelta64[{unit}]" + na = np.array( + [ + [np.timedelta64(1, "D"), np.timedelta64(2, "D")], + [np.timedelta64(4, "D"), np.timedelta64(5, "D")], + ], + dtype=dtype, + order=order, + ) + df = DataFrame(na) + if unit in ["D", "h", "m"]: + # we get the nearest supported unit, i.e. "s" + exp_unit = "s" + else: + exp_unit = unit + exp_dtype = np.dtype(f"m8[{exp_unit}]") + expected = DataFrame( + [ + [Timedelta(1, "D"), Timedelta(2, "D")], + [Timedelta(4, "D"), Timedelta(5, "D")], + ], + dtype=exp_dtype, + ) + # TODO(2.0): ideally we should get the same 'expected' without passing + # dtype=exp_dtype. + tm.assert_frame_equal(df, expected) + + def test_constructor_for_list_with_dtypes(self): + # test list of lists/ndarrays + df = DataFrame([np.arange(5) for x in range(5)]) + result = df.dtypes + expected = Series([np.dtype("int")] * 5) + tm.assert_series_equal(result, expected) + + df = DataFrame([np.array(np.arange(5), dtype="int32") for x in range(5)]) + result = df.dtypes + expected = Series([np.dtype("int32")] * 5) + tm.assert_series_equal(result, expected) + + # overflow issue? (we always expected int64 upcasting here) + df = DataFrame({"a": [2**31, 2**31 + 1]}) + assert df.dtypes.iloc[0] == np.dtype("int64") + + # GH #2751 (construction with no index specified), make sure we cast to + # platform values + df = DataFrame([1, 2]) + assert df.dtypes.iloc[0] == np.dtype("int64") + + df = DataFrame([1.0, 2.0]) + assert df.dtypes.iloc[0] == np.dtype("float64") + + df = DataFrame({"a": [1, 2]}) + assert df.dtypes.iloc[0] == np.dtype("int64") + + df = DataFrame({"a": [1.0, 2.0]}) + assert df.dtypes.iloc[0] == np.dtype("float64") + + df = DataFrame({"a": 1}, index=range(3)) + assert df.dtypes.iloc[0] == np.dtype("int64") + + df = DataFrame({"a": 1.0}, index=range(3)) + assert df.dtypes.iloc[0] == np.dtype("float64") + + # with object list + df = DataFrame( + { + "a": [1, 2, 4, 7], + "b": [1.2, 2.3, 5.1, 6.3], + "c": list("abcd"), + "d": [datetime(2000, 1, 1) for i in range(4)], + "e": [1.0, 2, 4.0, 7], + } + ) + result = df.dtypes + expected = Series( + [ + np.dtype("int64"), + np.dtype("float64"), + np.dtype("object"), + np.dtype("datetime64[ns]"), + np.dtype("float64"), + ], + index=list("abcde"), + ) + tm.assert_series_equal(result, expected) + + def test_constructor_frame_copy(self, float_frame): + cop = DataFrame(float_frame, copy=True) + cop["A"] = 5 + assert (cop["A"] == 5).all() + assert not (float_frame["A"] == 5).all() + + def test_constructor_frame_shallow_copy(self, float_frame): + # constructing a DataFrame from DataFrame with copy=False should still + # give a "shallow" copy (share data, not attributes) + # https://github.com/pandas-dev/pandas/issues/49523 + orig = float_frame.copy() + cop = DataFrame(float_frame) + assert cop._mgr is not float_frame._mgr + # Overwriting index of copy doesn't change original + cop.index = np.arange(len(cop)) + tm.assert_frame_equal(float_frame, orig) + + def test_constructor_ndarray_copy( + self, float_frame, using_array_manager, using_copy_on_write + ): + if not using_array_manager: + arr = float_frame.values.copy() + df = DataFrame(arr) + + arr[5] = 5 + if using_copy_on_write: + assert not (df.values[5] == 5).all() + else: + assert (df.values[5] == 5).all() + + df = DataFrame(arr, copy=True) + arr[6] = 6 + assert not (df.values[6] == 6).all() + else: + arr = float_frame.values.copy() + # default: copy to ensure contiguous arrays + df = DataFrame(arr) + assert df._mgr.arrays[0].flags.c_contiguous + arr[0, 0] = 100 + assert df.iloc[0, 0] != 100 + + # manually specify copy=False + df = DataFrame(arr, copy=False) + assert not df._mgr.arrays[0].flags.c_contiguous + arr[0, 0] = 1000 + assert df.iloc[0, 0] == 1000 + + def test_constructor_series_copy(self, float_frame): + series = float_frame._series + + df = DataFrame({"A": series["A"]}, copy=True) + # TODO can be replaced with `df.loc[:, "A"] = 5` after deprecation about + # inplace mutation is enforced + df.loc[df.index[0] : df.index[-1], "A"] = 5 + + assert not (series["A"] == 5).all() + + @pytest.mark.parametrize( + "df", + [ + DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan]), + DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan]), + DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]), + DataFrame( + [[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan] + ), + DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2]), + ], + ) + def test_constructor_with_nas(self, df): + # GH 5016 + # na's in indices + # GH 21428 (non-unique columns) + + for i in range(len(df.columns)): + df.iloc[:, i] + + indexer = np.arange(len(df.columns))[isna(df.columns)] + + # No NaN found -> error + if len(indexer) == 0: + with pytest.raises(KeyError, match="^nan$"): + df.loc[:, np.nan] + # single nan should result in Series + elif len(indexer) == 1: + tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan]) + # multiple nans should result in DataFrame + else: + tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan]) + + def test_constructor_lists_to_object_dtype(self): + # from #1074 + d = DataFrame({"a": [np.nan, False]}) + assert d["a"].dtype == np.object_ + assert not d["a"][1] + + def test_constructor_ndarray_categorical_dtype(self): + cat = Categorical(["A", "B", "C"]) + arr = np.array(cat).reshape(-1, 1) + arr = np.broadcast_to(arr, (3, 4)) + + result = DataFrame(arr, dtype=cat.dtype) + + expected = DataFrame({0: cat, 1: cat, 2: cat, 3: cat}) + tm.assert_frame_equal(result, expected) + + def test_constructor_categorical(self): + # GH8626 + + # dict creation + df = DataFrame({"A": list("abc")}, dtype="category") + expected = Series(list("abc"), dtype="category", name="A") + tm.assert_series_equal(df["A"], expected) + + # to_frame + s = Series(list("abc"), dtype="category") + result = s.to_frame() + expected = Series(list("abc"), dtype="category", name=0) + tm.assert_series_equal(result[0], expected) + result = s.to_frame(name="foo") + expected = Series(list("abc"), dtype="category", name="foo") + tm.assert_series_equal(result["foo"], expected) + + # list-like creation + df = DataFrame(list("abc"), dtype="category") + expected = Series(list("abc"), dtype="category", name=0) + tm.assert_series_equal(df[0], expected) + + def test_construct_from_1item_list_of_categorical(self): + # pre-2.0 this behaved as DataFrame({0: cat}), in 2.0 we remove + # Categorical special case + # ndim != 1 + cat = Categorical(list("abc")) + df = DataFrame([cat]) + expected = DataFrame([cat.astype(object)]) + tm.assert_frame_equal(df, expected) + + def test_construct_from_list_of_categoricals(self): + # pre-2.0 this behaved as DataFrame({0: cat}), in 2.0 we remove + # Categorical special case + + df = DataFrame([Categorical(list("abc")), Categorical(list("abd"))]) + expected = DataFrame([["a", "b", "c"], ["a", "b", "d"]]) + tm.assert_frame_equal(df, expected) + + def test_from_nested_listlike_mixed_types(self): + # pre-2.0 this behaved as DataFrame({0: cat}), in 2.0 we remove + # Categorical special case + # mixed + df = DataFrame([Categorical(list("abc")), list("def")]) + expected = DataFrame([["a", "b", "c"], ["d", "e", "f"]]) + tm.assert_frame_equal(df, expected) + + def test_construct_from_listlikes_mismatched_lengths(self): + df = DataFrame([Categorical(list("abc")), Categorical(list("abdefg"))]) + expected = DataFrame([list("abc"), list("abdefg")]) + tm.assert_frame_equal(df, expected) + + def test_constructor_categorical_series(self): + items = [1, 2, 3, 1] + exp = Series(items).astype("category") + res = Series(items, dtype="category") + tm.assert_series_equal(res, exp) + + items = ["a", "b", "c", "a"] + exp = Series(items).astype("category") + res = Series(items, dtype="category") + tm.assert_series_equal(res, exp) + + # insert into frame with different index + # GH 8076 + index = date_range("20000101", periods=3) + expected = Series( + Categorical(values=[np.nan, np.nan, np.nan], categories=["a", "b", "c"]) + ) + expected.index = index + + expected = DataFrame({"x": expected}) + df = DataFrame({"x": Series(["a", "b", "c"], dtype="category")}, index=index) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "dtype", + tm.ALL_NUMERIC_DTYPES + + tm.DATETIME64_DTYPES + + tm.TIMEDELTA64_DTYPES + + tm.BOOL_DTYPES, + ) + def test_check_dtype_empty_numeric_column(self, dtype): + # GH24386: Ensure dtypes are set correctly for an empty DataFrame. + # Empty DataFrame is generated via dictionary data with non-overlapping columns. + data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) + + assert data.b.dtype == dtype + + @pytest.mark.parametrize( + "dtype", tm.STRING_DTYPES + tm.BYTES_DTYPES + tm.OBJECT_DTYPES + ) + def test_check_dtype_empty_string_column(self, request, dtype, using_array_manager): + # GH24386: Ensure dtypes are set correctly for an empty DataFrame. + # Empty DataFrame is generated via dictionary data with non-overlapping columns. + data = DataFrame({"a": [1, 2]}, columns=["b"], dtype=dtype) + + if using_array_manager and dtype in tm.BYTES_DTYPES: + # TODO(ArrayManager) astype to bytes dtypes does not yet give object dtype + td.mark_array_manager_not_yet_implemented(request) + + assert data.b.dtype.name == "object" + + def test_to_frame_with_falsey_names(self): + # GH 16114 + result = Series(name=0, dtype=object).to_frame().dtypes + expected = Series({0: object}) + tm.assert_series_equal(result, expected) + + result = DataFrame(Series(name=0, dtype=object)).dtypes + tm.assert_series_equal(result, expected) + + @pytest.mark.arm_slow + @pytest.mark.parametrize("dtype", [None, "uint8", "category"]) + def test_constructor_range_dtype(self, dtype): + expected = DataFrame({"A": [0, 1, 2, 3, 4]}, dtype=dtype or "int64") + + # GH 26342 + result = DataFrame(range(5), columns=["A"], dtype=dtype) + tm.assert_frame_equal(result, expected) + + # GH 16804 + result = DataFrame({"A": range(5)}, dtype=dtype) + tm.assert_frame_equal(result, expected) + + def test_frame_from_list_subclass(self): + # GH21226 + class List(list): + pass + + expected = DataFrame([[1, 2, 3], [4, 5, 6]]) + result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])])) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "extension_arr", + [ + Categorical(list("aabbc")), + SparseArray([1, np.nan, np.nan, np.nan]), + IntervalArray([Interval(0, 1), Interval(1, 5)]), + PeriodArray(pd.period_range(start="1/1/2017", end="1/1/2018", freq="M")), + ], + ) + def test_constructor_with_extension_array(self, extension_arr): + # GH11363 + expected = DataFrame(Series(extension_arr)) + result = DataFrame(extension_arr) + tm.assert_frame_equal(result, expected) + + def test_datetime_date_tuple_columns_from_dict(self): + # GH 10863 + v = date.today() + tup = v, v + result = DataFrame({tup: Series(range(3), index=range(3))}, columns=[tup]) + expected = DataFrame([0, 1, 2], columns=Index(Series([tup]))) + tm.assert_frame_equal(result, expected) + + def test_construct_with_two_categoricalindex_series(self): + # GH 14600 + s1 = Series([39, 6, 4], index=CategoricalIndex(["female", "male", "unknown"])) + s2 = Series( + [2, 152, 2, 242, 150], + index=CategoricalIndex(["f", "female", "m", "male", "unknown"]), + ) + result = DataFrame([s1, s2]) + expected = DataFrame( + np.array([[39, 6, 4, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]]), + columns=["female", "male", "unknown", "f", "m"], + ) + tm.assert_frame_equal(result, expected) + + def test_constructor_series_nonexact_categoricalindex(self): + # GH 42424 + ser = Series(range(0, 100)) + ser1 = cut(ser, 10).value_counts().head(5) + ser2 = cut(ser, 10).value_counts().tail(5) + result = DataFrame({"1": ser1, "2": ser2}) + index = CategoricalIndex( + [ + Interval(-0.099, 9.9, closed="right"), + Interval(9.9, 19.8, closed="right"), + Interval(19.8, 29.7, closed="right"), + Interval(29.7, 39.6, closed="right"), + Interval(39.6, 49.5, closed="right"), + Interval(49.5, 59.4, closed="right"), + Interval(59.4, 69.3, closed="right"), + Interval(69.3, 79.2, closed="right"), + Interval(79.2, 89.1, closed="right"), + Interval(89.1, 99, closed="right"), + ], + ordered=True, + ) + expected = DataFrame( + {"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index + ) + tm.assert_frame_equal(expected, result) + + def test_from_M8_structured(self): + dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))] + arr = np.array(dates, dtype=[("Date", "M8[us]"), ("Forecasting", "M8[us]")]) + df = DataFrame(arr) + + assert df["Date"][0] == dates[0][0] + assert df["Forecasting"][0] == dates[0][1] + + s = Series(arr["Date"]) + assert isinstance(s[0], Timestamp) + assert s[0] == dates[0][0] + + def test_from_datetime_subclass(self): + # GH21142 Verify whether Datetime subclasses are also of dtype datetime + class DatetimeSubclass(datetime): + pass + + data = DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]}) + assert data.datetime.dtype == "datetime64[ns]" + + def test_with_mismatched_index_length_raises(self): + # GH#33437 + dti = date_range("2016-01-01", periods=3, tz="US/Pacific") + msg = "Shape of passed values|Passed arrays should have the same length" + with pytest.raises(ValueError, match=msg): + DataFrame(dti, index=range(4)) + + def test_frame_ctor_datetime64_column(self): + rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") + dates = np.asarray(rng) + + df = DataFrame( + {"A": np.random.default_rng(2).standard_normal(len(rng)), "B": dates} + ) + assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]")) + + def test_dataframe_constructor_infer_multiindex(self): + index_lists = [["a", "a", "b", "b"], ["x", "y", "x", "y"]] + + multi = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=[np.array(x) for x in index_lists], + ) + assert isinstance(multi.index, MultiIndex) + assert not isinstance(multi.columns, MultiIndex) + + multi = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), columns=index_lists + ) + assert isinstance(multi.columns, MultiIndex) + + @pytest.mark.parametrize( + "input_vals", + [ + ([1, 2]), + (["1", "2"]), + (list(date_range("1/1/2011", periods=2, freq="H"))), + (list(date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))), + ([Interval(left=0, right=5)]), + ], + ) + def test_constructor_list_str(self, input_vals, string_dtype): + # GH#16605 + # Ensure that data elements are converted to strings when + # dtype is str, 'str', or 'U' + + result = DataFrame({"A": input_vals}, dtype=string_dtype) + expected = DataFrame({"A": input_vals}).astype({"A": string_dtype}) + tm.assert_frame_equal(result, expected) + + def test_constructor_list_str_na(self, string_dtype): + result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype) + expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("copy", [False, True]) + def test_dict_nocopy( + self, + request, + copy, + any_numeric_ea_dtype, + any_numpy_dtype, + using_array_manager, + using_copy_on_write, + ): + if ( + using_array_manager + and not copy + and any_numpy_dtype not in tm.STRING_DTYPES + tm.BYTES_DTYPES + ): + # TODO(ArrayManager) properly honor copy keyword for dict input + td.mark_array_manager_not_yet_implemented(request) + + a = np.array([1, 2], dtype=any_numpy_dtype) + b = np.array([3, 4], dtype=any_numpy_dtype) + if b.dtype.kind in ["S", "U"]: + # These get cast, making the checks below more cumbersome + pytest.skip(f"{b.dtype} get cast, making the checks below more cumbersome") + + c = pd.array([1, 2], dtype=any_numeric_ea_dtype) + c_orig = c.copy() + df = DataFrame({"a": a, "b": b, "c": c}, copy=copy) + + def get_base(obj): + if isinstance(obj, np.ndarray): + return obj.base + elif isinstance(obj.dtype, np.dtype): + # i.e. DatetimeArray, TimedeltaArray + return obj._ndarray.base + else: + raise TypeError + + def check_views(c_only: bool = False): + # written to work for either BlockManager or ArrayManager + + # Check that the underlying data behind df["c"] is still `c` + # after setting with iloc. Since we don't know which entry in + # df._mgr.arrays corresponds to df["c"], we just check that exactly + # one of these arrays is `c`. GH#38939 + assert sum(x is c for x in df._mgr.arrays) == 1 + if c_only: + # If we ever stop consolidating in setitem_with_indexer, + # this will become unnecessary. + return + + assert ( + sum( + get_base(x) is a + for x in df._mgr.arrays + if isinstance(x.dtype, np.dtype) + ) + == 1 + ) + assert ( + sum( + get_base(x) is b + for x in df._mgr.arrays + if isinstance(x.dtype, np.dtype) + ) + == 1 + ) + + if not copy: + # constructor preserves views + check_views() + + # TODO: most of the rest of this test belongs in indexing tests + if lib.is_np_dtype(df.dtypes.iloc[0], "fciuO"): + warn = None + else: + warn = FutureWarning + with tm.assert_produces_warning(warn, match="incompatible dtype"): + df.iloc[0, 0] = 0 + df.iloc[0, 1] = 0 + if not copy: + check_views(True) + + # FIXME(GH#35417): until GH#35417, iloc.setitem into EA values does not preserve + # view, so we have to check in the other direction + df.iloc[:, 2] = pd.array([45, 46], dtype=c.dtype) + assert df.dtypes.iloc[2] == c.dtype + if not copy and not using_copy_on_write: + check_views(True) + + if copy: + if a.dtype.kind == "M": + assert a[0] == a.dtype.type(1, "ns") + assert b[0] == b.dtype.type(3, "ns") + else: + assert a[0] == a.dtype.type(1) + assert b[0] == b.dtype.type(3) + # FIXME(GH#35417): enable after GH#35417 + assert c[0] == c_orig[0] # i.e. df.iloc[0, 2]=45 did *not* update c + elif not using_copy_on_write: + # TODO: we can call check_views if we stop consolidating + # in setitem_with_indexer + assert c[0] == 45 # i.e. df.iloc[0, 2]=45 *did* update c + # TODO: we can check b[0] == 0 if we stop consolidating in + # setitem_with_indexer (except for datetimelike?) + + def test_construct_from_dict_ea_series(self): + # GH#53744 - default of copy=True should also apply for Series with + # extension dtype + ser = Series([1, 2, 3], dtype="Int64") + df = DataFrame({"a": ser}) + assert not np.shares_memory(ser.values._data, df["a"].values._data) + + def test_from_series_with_name_with_columns(self): + # GH 7893 + result = DataFrame(Series(1, name="foo"), columns=["bar"]) + expected = DataFrame(columns=["bar"]) + tm.assert_frame_equal(result, expected) + + def test_nested_list_columns(self): + # GH 14467 + result = DataFrame( + [[1, 2, 3], [4, 5, 6]], columns=[["A", "A", "A"], ["a", "b", "c"]] + ) + expected = DataFrame( + [[1, 2, 3], [4, 5, 6]], + columns=MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")]), + ) + tm.assert_frame_equal(result, expected) + + def test_from_2d_object_array_of_periods_or_intervals(self): + # Period analogue to GH#26825 + pi = pd.period_range("2016-04-05", periods=3) + data = pi._data.astype(object).reshape(1, -1) + df = DataFrame(data) + assert df.shape == (1, 3) + assert (df.dtypes == pi.dtype).all() + assert (df == pi).all().all() + + ii = pd.IntervalIndex.from_breaks([3, 4, 5, 6]) + data2 = ii._data.astype(object).reshape(1, -1) + df2 = DataFrame(data2) + assert df2.shape == (1, 3) + assert (df2.dtypes == ii.dtype).all() + assert (df2 == ii).all().all() + + # mixed + data3 = np.r_[data, data2, data, data2].T + df3 = DataFrame(data3) + expected = DataFrame({0: pi, 1: ii, 2: pi, 3: ii}) + tm.assert_frame_equal(df3, expected) + + @pytest.mark.parametrize( + "col_a, col_b", + [ + ([[1], [2]], np.array([[1], [2]])), + (np.array([[1], [2]]), [[1], [2]]), + (np.array([[1], [2]]), np.array([[1], [2]])), + ], + ) + def test_error_from_2darray(self, col_a, col_b): + msg = "Per-column arrays must each be 1-dimensional" + with pytest.raises(ValueError, match=msg): + DataFrame({"a": col_a, "b": col_b}) + + def test_from_dict_with_missing_copy_false(self): + # GH#45369 filled columns should not be views of one another + df = DataFrame(index=[1, 2, 3], columns=["a", "b", "c"], copy=False) + assert not np.shares_memory(df["a"]._values, df["b"]._values) + + df.iloc[0, 0] = 0 + expected = DataFrame( + { + "a": [0, np.nan, np.nan], + "b": [np.nan, np.nan, np.nan], + "c": [np.nan, np.nan, np.nan], + }, + index=[1, 2, 3], + dtype=object, + ) + tm.assert_frame_equal(df, expected) + + def test_construction_empty_array_multi_column_raises(self): + # GH#46822 + msg = r"Shape of passed values is \(0, 1\), indices imply \(0, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame(data=np.array([]), columns=["a", "b"]) + + def test_construct_with_strings_and_none(self): + # GH#32218 + df = DataFrame(["1", "2", None], columns=["a"], dtype="str") + expected = DataFrame({"a": ["1", "2", None]}, dtype="str") + tm.assert_frame_equal(df, expected) + + def test_frame_string_inference(self): + # GH#54430 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + expected = DataFrame( + {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", "b"]}) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", "b"]}, + dtype=dtype, + columns=Index(["a"], dtype=dtype), + index=Index(["x", "y"], dtype=dtype), + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", "b"]}, index=["x", "y"]) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", 1]}, dtype="object", columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", 1]}) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", "b"]}, dtype="object", columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": ["a", "b"]}, dtype="object") + tm.assert_frame_equal(df, expected) + + def test_frame_string_inference_array_string_dtype(self): + # GH#54496 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + expected = DataFrame( + {"a": ["a", "b"]}, dtype=dtype, columns=Index(["a"], dtype=dtype) + ) + with pd.option_context("future.infer_string", True): + df = DataFrame({"a": np.array(["a", "b"])}) + tm.assert_frame_equal(df, expected) + + expected = DataFrame({0: ["a", "b"], 1: ["c", "d"]}, dtype=dtype) + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["a", "c"], ["b", "d"]])) + tm.assert_frame_equal(df, expected) + + expected = DataFrame( + {"a": ["a", "b"], "b": ["c", "d"]}, + dtype=dtype, + columns=Index(["a", "b"], dtype=dtype), + ) + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["a", "c"], ["b", "d"]]), columns=["a", "b"]) + tm.assert_frame_equal(df, expected) + + def test_frame_string_inference_block_dim(self): + # GH#55363 + pytest.importorskip("pyarrow") + with pd.option_context("future.infer_string", True): + df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]])) + assert df._mgr.blocks[0].ndim == 2 + + +class TestDataFrameConstructorIndexInference: + def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self): + rng1 = pd.period_range("1/1/1999", "1/1/2012", freq="M") + s1 = Series(np.random.default_rng(2).standard_normal(len(rng1)), rng1) + + rng2 = pd.period_range("1/1/1980", "12/1/2001", freq="M") + s2 = Series(np.random.default_rng(2).standard_normal(len(rng2)), rng2) + df = DataFrame({"s1": s1, "s2": s2}) + + exp = pd.period_range("1/1/1980", "1/1/2012", freq="M") + tm.assert_index_equal(df.index, exp) + + def test_frame_from_dict_with_mixed_tzaware_indexes(self): + # GH#44091 + dti = date_range("2016-01-01", periods=3) + + ser1 = Series(range(3), index=dti) + ser2 = Series(range(3), index=dti.tz_localize("UTC")) + ser3 = Series(range(3), index=dti.tz_localize("US/Central")) + ser4 = Series(range(3)) + + # no tz-naive, but we do have mixed tzs and a non-DTI + df1 = DataFrame({"A": ser2, "B": ser3, "C": ser4}) + exp_index = Index( + list(ser2.index) + list(ser3.index) + list(ser4.index), dtype=object + ) + tm.assert_index_equal(df1.index, exp_index) + + df2 = DataFrame({"A": ser2, "C": ser4, "B": ser3}) + exp_index3 = Index( + list(ser2.index) + list(ser4.index) + list(ser3.index), dtype=object + ) + tm.assert_index_equal(df2.index, exp_index3) + + df3 = DataFrame({"B": ser3, "A": ser2, "C": ser4}) + exp_index3 = Index( + list(ser3.index) + list(ser2.index) + list(ser4.index), dtype=object + ) + tm.assert_index_equal(df3.index, exp_index3) + + df4 = DataFrame({"C": ser4, "B": ser3, "A": ser2}) + exp_index4 = Index( + list(ser4.index) + list(ser3.index) + list(ser2.index), dtype=object + ) + tm.assert_index_equal(df4.index, exp_index4) + + # TODO: not clear if these raising is desired (no extant tests), + # but this is de facto behavior 2021-12-22 + msg = "Cannot join tz-naive with tz-aware DatetimeIndex" + with pytest.raises(TypeError, match=msg): + DataFrame({"A": ser2, "B": ser3, "C": ser4, "D": ser1}) + with pytest.raises(TypeError, match=msg): + DataFrame({"A": ser2, "B": ser3, "D": ser1}) + with pytest.raises(TypeError, match=msg): + DataFrame({"D": ser1, "A": ser2, "B": ser3}) + + @pytest.mark.parametrize( + "key_val, col_vals, col_type", + [ + ["3", ["3", "4"], "utf8"], + [3, [3, 4], "int8"], + ], + ) + def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type): + # GH 53617 + pa = pytest.importorskip("pyarrow") + cols = pd.arrays.ArrowExtensionArray( + pa.array(col_vals, type=pa.dictionary(pa.int8(), getattr(pa, col_type)())) + ) + result = DataFrame({key_val: [1, 2]}, columns=cols) + expected = DataFrame([[1, np.nan], [2, np.nan]], columns=cols) + expected.iloc[:, 1] = expected.iloc[:, 1].astype(object) + tm.assert_frame_equal(result, expected) + + +class TestDataFrameConstructorWithDtypeCoercion: + def test_floating_values_integer_dtype(self): + # GH#40110 make DataFrame behavior with arraylike floating data and + # inty dtype match Series behavior + + arr = np.random.default_rng(2).standard_normal((10, 5)) + + # GH#49599 in 2.0 we raise instead of either + # a) silently ignoring dtype and returningfloat (the old Series behavior) or + # b) rounding (the old DataFrame behavior) + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + DataFrame(arr, dtype="i8") + + df = DataFrame(arr.round(), dtype="i8") + assert (df.dtypes == "i8").all() + + # with NaNs, we go through a different path with a different warning + arr[0, 0] = np.nan + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(arr, dtype="i8") + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr[0], dtype="i8") + # The future (raising) behavior matches what we would get via astype: + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(IntCastingNaNError, match=msg): + DataFrame(arr).astype("i8") + with pytest.raises(IntCastingNaNError, match=msg): + Series(arr[0]).astype("i8") + + +class TestDataFrameConstructorWithDatetimeTZ: + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_construction_preserves_tzaware_dtypes(self, tz): + # after GH#7822 + # these retain the timezones on dict construction + dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI") + dr_tz = dr.tz_localize(tz) + df = DataFrame({"A": "foo", "B": dr_tz}, index=dr) + tz_expected = DatetimeTZDtype("ns", dr_tz.tzinfo) + assert df["B"].dtype == tz_expected + + # GH#2810 (with timezones) + datetimes_naive = [ts.to_pydatetime() for ts in dr] + datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz] + df = DataFrame({"dr": dr}) + df["dr_tz"] = dr_tz + df["datetimes_naive"] = datetimes_naive + df["datetimes_with_tz"] = datetimes_with_tz + result = df.dtypes + expected = Series( + [ + np.dtype("datetime64[ns]"), + DatetimeTZDtype(tz=tz), + np.dtype("datetime64[ns]"), + DatetimeTZDtype(tz=tz), + ], + index=["dr", "dr_tz", "datetimes_naive", "datetimes_with_tz"], + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("pydt", [True, False]) + def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt): + # GH#25843, GH#41555, GH#33401 + tz = tz_aware_fixture + ts = Timestamp("2019", tz=tz) + if pydt: + ts = ts.to_pydatetime() + + msg = ( + "Cannot convert timezone-aware data to timezone-naive dtype. " + r"Use pd.Series\(values\).dt.tz_localize\(None\) instead." + ) + with pytest.raises(ValueError, match=msg): + DataFrame({0: [ts]}, dtype="datetime64[ns]") + + msg2 = "Cannot unbox tzaware Timestamp to tznaive dtype" + with pytest.raises(TypeError, match=msg2): + DataFrame({0: ts}, index=[0], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame([ts], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame(np.array([ts], dtype=object), dtype="datetime64[ns]") + + with pytest.raises(TypeError, match=msg2): + DataFrame(ts, index=[0], columns=[0], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame([Series([ts])], dtype="datetime64[ns]") + + with pytest.raises(ValueError, match=msg): + DataFrame([[ts]], columns=[0], dtype="datetime64[ns]") + + def test_from_dict(self): + # 8260 + # support datetime64 with tz + + idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo") + dr = date_range("20130110", periods=3) + + # construction + df = DataFrame({"A": idx, "B": dr}) + assert df["A"].dtype, "M8[ns, US/Eastern" + assert df["A"].name == "A" + tm.assert_series_equal(df["A"], Series(idx, name="A")) + tm.assert_series_equal(df["B"], Series(dr, name="B")) + + def test_from_index(self): + # from index + idx2 = date_range("20130101", periods=3, tz="US/Eastern", name="foo") + df2 = DataFrame(idx2) + tm.assert_series_equal(df2["foo"], Series(idx2, name="foo")) + df2 = DataFrame(Series(idx2)) + tm.assert_series_equal(df2["foo"], Series(idx2, name="foo")) + + idx2 = date_range("20130101", periods=3, tz="US/Eastern") + df2 = DataFrame(idx2) + tm.assert_series_equal(df2[0], Series(idx2, name=0)) + df2 = DataFrame(Series(idx2)) + tm.assert_series_equal(df2[0], Series(idx2, name=0)) + + def test_frame_dict_constructor_datetime64_1680(self): + dr = date_range("1/1/2012", periods=10) + s = Series(dr, index=dr) + + # it works! + DataFrame({"a": "foo", "b": s}, index=dr) + DataFrame({"a": "foo", "b": s.values}, index=dr) + + def test_frame_datetime64_mixed_index_ctor_1681(self): + dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI") + ts = Series(dr) + + # it works! + d = DataFrame({"A": "foo", "B": ts}, index=dr) + assert d["B"].isna().all() + + def test_frame_timeseries_column(self): + # GH19157 + dr = date_range(start="20130101T10:00:00", periods=3, freq="T", tz="US/Eastern") + result = DataFrame(dr, columns=["timestamps"]) + expected = DataFrame( + { + "timestamps": [ + Timestamp("20130101T10:00:00", tz="US/Eastern"), + Timestamp("20130101T10:01:00", tz="US/Eastern"), + Timestamp("20130101T10:02:00", tz="US/Eastern"), + ] + } + ) + tm.assert_frame_equal(result, expected) + + def test_nested_dict_construction(self): + # GH22227 + columns = ["Nevada", "Ohio"] + pop = { + "Nevada": {2001: 2.4, 2002: 2.9}, + "Ohio": {2000: 1.5, 2001: 1.7, 2002: 3.6}, + } + result = DataFrame(pop, index=[2001, 2002, 2003], columns=columns) + expected = DataFrame( + [(2.4, 1.7), (2.9, 3.6), (np.nan, np.nan)], + columns=columns, + index=Index([2001, 2002, 2003]), + ) + tm.assert_frame_equal(result, expected) + + def test_from_tzaware_object_array(self): + # GH#26825 2D object array of tzaware timestamps should not raise + dti = date_range("2016-04-05 04:30", periods=3, tz="UTC") + data = dti._data.astype(object).reshape(1, -1) + df = DataFrame(data) + assert df.shape == (1, 3) + assert (df.dtypes == dti.dtype).all() + assert (df == dti).all().all() + + def test_from_tzaware_mixed_object_array(self): + # GH#26825 + arr = np.array( + [ + [ + Timestamp("2013-01-01 00:00:00"), + Timestamp("2013-01-02 00:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + [ + Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"), + pd.NaT, + Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"), + ], + [ + Timestamp("2013-01-01 00:00:00+0100", tz="CET"), + pd.NaT, + Timestamp("2013-01-03 00:00:00+0100", tz="CET"), + ], + ], + dtype=object, + ).T + res = DataFrame(arr, columns=["A", "B", "C"]) + + expected_dtypes = [ + "datetime64[ns]", + "datetime64[ns, US/Eastern]", + "datetime64[ns, CET]", + ] + assert (res.dtypes == expected_dtypes).all() + + def test_from_2d_ndarray_with_dtype(self): + # GH#12513 + array_dim2 = np.arange(10).reshape((5, 2)) + df = DataFrame(array_dim2, dtype="datetime64[ns, UTC]") + + expected = DataFrame(array_dim2).astype("datetime64[ns, UTC]") + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("typ", [set, frozenset]) + def test_construction_from_set_raises(self, typ): + # https://github.com/pandas-dev/pandas/issues/32582 + values = typ({1, 2, 3}) + msg = f"'{typ.__name__}' type is unordered" + with pytest.raises(TypeError, match=msg): + DataFrame({"a": values}) + + with pytest.raises(TypeError, match=msg): + Series(values) + + def test_construction_from_ndarray_datetimelike(self): + # ensure the underlying arrays are properly wrapped as EA when + # constructed from 2D ndarray + arr = np.arange(0, 12, dtype="datetime64[ns]").reshape(4, 3) + df = DataFrame(arr) + assert all(isinstance(arr, DatetimeArray) for arr in df._mgr.arrays) + + def test_construction_from_ndarray_with_eadtype_mismatched_columns(self): + arr = np.random.default_rng(2).standard_normal((10, 2)) + dtype = pd.array([2.0]).dtype + msg = r"len\(arrays\) must match len\(columns\)" + with pytest.raises(ValueError, match=msg): + DataFrame(arr, columns=["foo"], dtype=dtype) + + arr2 = pd.array([2.0, 3.0, 4.0]) + with pytest.raises(ValueError, match=msg): + DataFrame(arr2, columns=["foo", "bar"]) + + def test_columns_indexes_raise_on_sets(self): + # GH 47215 + data = [[1, 2, 3], [4, 5, 6]] + with pytest.raises(ValueError, match="index cannot be a set"): + DataFrame(data, index={"a", "b"}) + with pytest.raises(ValueError, match="columns cannot be a set"): + DataFrame(data, columns={"a", "b", "c"}) + + +def get1(obj): # TODO: make a helper in tm? + if isinstance(obj, Series): + return obj.iloc[0] + else: + return obj.iloc[0, 0] + + +class TestFromScalar: + @pytest.fixture(params=[list, dict, None]) + def box(self, request): + return request.param + + @pytest.fixture + def constructor(self, frame_or_series, box): + extra = {"index": range(2)} + if frame_or_series is DataFrame: + extra["columns"] = ["A"] + + if box is None: + return functools.partial(frame_or_series, **extra) + + elif box is dict: + if frame_or_series is Series: + return lambda x, **kwargs: frame_or_series( + {0: x, 1: x}, **extra, **kwargs + ) + else: + return lambda x, **kwargs: frame_or_series({"A": x}, **extra, **kwargs) + elif frame_or_series is Series: + return lambda x, **kwargs: frame_or_series([x, x], **extra, **kwargs) + else: + return lambda x, **kwargs: frame_or_series({"A": [x, x]}, **extra, **kwargs) + + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_from_nat_scalar(self, dtype, constructor): + obj = constructor(pd.NaT, dtype=dtype) + assert np.all(obj.dtypes == dtype) + assert np.all(obj.isna()) + + def test_from_timedelta_scalar_preserves_nanos(self, constructor): + td = Timedelta(1) + + obj = constructor(td, dtype="m8[ns]") + assert get1(obj) == td + + def test_from_timestamp_scalar_preserves_nanos(self, constructor, fixed_now_ts): + ts = fixed_now_ts + Timedelta(1) + + obj = constructor(ts, dtype="M8[ns]") + assert get1(obj) == ts + + def test_from_timedelta64_scalar_object(self, constructor): + td = Timedelta(1) + td64 = td.to_timedelta64() + + obj = constructor(td64, dtype=object) + assert isinstance(get1(obj), np.timedelta64) + + @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) + def test_from_scalar_datetimelike_mismatched(self, constructor, cls): + scalar = cls("NaT", "ns") + dtype = {np.datetime64: "m8[ns]", np.timedelta64: "M8[ns]"}[cls] + + if cls is np.datetime64: + msg1 = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]" + else: + msg1 = r"dtype timedelta64\[ns\] cannot be converted to datetime64\[ns\]" + msg = "|".join(["Cannot cast", msg1]) + + with pytest.raises(TypeError, match=msg): + constructor(scalar, dtype=dtype) + + scalar = cls(4, "ns") + with pytest.raises(TypeError, match=msg): + constructor(scalar, dtype=dtype) + + @pytest.mark.parametrize("cls", [datetime, np.datetime64]) + def test_from_out_of_bounds_ns_datetime( + self, constructor, cls, request, box, frame_or_series + ): + # scalar that won't fit in nanosecond dt64, but will fit in microsecond + if box is list or (frame_or_series is Series and box is dict): + mark = pytest.mark.xfail( + reason="Timestamp constructor has been updated to cast dt64 to " + "non-nano, but DatetimeArray._from_sequence has not", + strict=True, + ) + request.node.add_marker(mark) + + scalar = datetime(9999, 1, 1) + exp_dtype = "M8[us]" # pydatetime objects default to this reso + + if cls is np.datetime64: + scalar = np.datetime64(scalar, "D") + exp_dtype = "M8[s]" # closest reso to input + result = constructor(scalar) + + item = get1(result) + dtype = tm.get_dtype(result) + + assert type(item) is Timestamp + assert item.asm8.dtype == exp_dtype + assert dtype == exp_dtype + + def test_out_of_s_bounds_datetime64(self, constructor): + scalar = np.datetime64(np.iinfo(np.int64).max, "D") + result = constructor(scalar) + item = get1(result) + assert type(item) is np.datetime64 + dtype = tm.get_dtype(result) + assert dtype == object + + @pytest.mark.parametrize("cls", [timedelta, np.timedelta64]) + def test_from_out_of_bounds_ns_timedelta( + self, constructor, cls, request, box, frame_or_series + ): + # scalar that won't fit in nanosecond td64, but will fit in microsecond + if box is list or (frame_or_series is Series and box is dict): + mark = pytest.mark.xfail( + reason="TimedeltaArray constructor has been updated to cast td64 " + "to non-nano, but TimedeltaArray._from_sequence has not", + strict=True, + ) + request.node.add_marker(mark) + + scalar = datetime(9999, 1, 1) - datetime(1970, 1, 1) + exp_dtype = "m8[us]" # smallest reso that fits + if cls is np.timedelta64: + scalar = np.timedelta64(scalar, "D") + exp_dtype = "m8[s]" # closest reso to input + result = constructor(scalar) + + item = get1(result) + dtype = tm.get_dtype(result) + + assert type(item) is Timedelta + assert item.asm8.dtype == exp_dtype + assert dtype == exp_dtype + + @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) + def test_out_of_s_bounds_timedelta64(self, constructor, cls): + scalar = cls(np.iinfo(np.int64).max, "D") + result = constructor(scalar) + item = get1(result) + assert type(item) is cls + dtype = tm.get_dtype(result) + assert dtype == object + + def test_tzaware_data_tznaive_dtype(self, constructor, box, frame_or_series): + tz = "US/Eastern" + ts = Timestamp("2019", tz=tz) + + if box is None or (frame_or_series is DataFrame and box is dict): + msg = "Cannot unbox tzaware Timestamp to tznaive dtype" + err = TypeError + else: + msg = ( + "Cannot convert timezone-aware data to timezone-naive dtype. " + r"Use pd.Series\(values\).dt.tz_localize\(None\) instead." + ) + err = ValueError + + with pytest.raises(err, match=msg): + constructor(ts, dtype="M8[ns]") + + +# TODO: better location for this test? +class TestAllowNonNano: + # Until 2.0, we do not preserve non-nano dt64/td64 when passed as ndarray, + # but do preserve it when passed as DTA/TDA + + @pytest.fixture(params=[True, False]) + def as_td(self, request): + return request.param + + @pytest.fixture + def arr(self, as_td): + values = np.arange(5).astype(np.int64).view("M8[s]") + if as_td: + values = values - values[0] + return TimedeltaArray._simple_new(values, dtype=values.dtype) + else: + return DatetimeArray._simple_new(values, dtype=values.dtype) + + def test_index_allow_non_nano(self, arr): + idx = Index(arr) + assert idx.dtype == arr.dtype + + def test_dti_tdi_allow_non_nano(self, arr, as_td): + if as_td: + idx = pd.TimedeltaIndex(arr) + else: + idx = DatetimeIndex(arr) + assert idx.dtype == arr.dtype + + def test_series_allow_non_nano(self, arr): + ser = Series(arr) + assert ser.dtype == arr.dtype + + def test_frame_allow_non_nano(self, arr): + df = DataFrame(arr) + assert df.dtypes[0] == arr.dtype + + def test_frame_from_dict_allow_non_nano(self, arr): + df = DataFrame({0: arr}) + assert df.dtypes[0] == arr.dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_cumulative.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_cumulative.py new file mode 100644 index 00000000..5bd9c426 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_cumulative.py @@ -0,0 +1,81 @@ +""" +Tests for DataFrame cumulative operations + +See also +-------- +tests.series.test_cumulative +""" + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFrameCumulativeOps: + # --------------------------------------------------------------------- + # Cumulative Operations - cumsum, cummax, ... + + def test_cumulative_ops_smoke(self): + # it works + df = DataFrame({"A": np.arange(20)}, index=np.arange(20)) + df.cummax() + df.cummin() + df.cumsum() + + dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5)) + # TODO(wesm): do something with this? + dm.cumsum() + + def test_cumprod_smoke(self, datetime_frame): + datetime_frame.iloc[5:10, 0] = np.nan + datetime_frame.iloc[10:15, 1] = np.nan + datetime_frame.iloc[15:, 2] = np.nan + + # ints + df = datetime_frame.fillna(0).astype(int) + df.cumprod(0) + df.cumprod(1) + + # ints32 + df = datetime_frame.fillna(0).astype(np.int32) + df.cumprod(0) + df.cumprod(1) + + @pytest.mark.parametrize("method", ["cumsum", "cumprod", "cummin", "cummax"]) + def test_cumulative_ops_match_series_apply(self, datetime_frame, method): + datetime_frame.iloc[5:10, 0] = np.nan + datetime_frame.iloc[10:15, 1] = np.nan + datetime_frame.iloc[15:, 2] = np.nan + + # axis = 0 + result = getattr(datetime_frame, method)() + expected = datetime_frame.apply(getattr(Series, method)) + tm.assert_frame_equal(result, expected) + + # axis = 1 + result = getattr(datetime_frame, method)(axis=1) + expected = datetime_frame.apply(getattr(Series, method), axis=1) + tm.assert_frame_equal(result, expected) + + # fix issue TODO: GH ref? + assert np.shape(result) == np.shape(datetime_frame) + + def test_cumsum_preserve_dtypes(self): + # GH#19296 dont incorrectly upcast to object + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3.0], "C": [True, False, False]}) + + result = df.cumsum() + + expected = DataFrame( + { + "A": Series([1, 3, 6], dtype=np.int64), + "B": Series([1, 3, 6], dtype=np.float64), + "C": df["C"].cumsum(), + } + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_iteration.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_iteration.py new file mode 100644 index 00000000..8bc26bff --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_iteration.py @@ -0,0 +1,162 @@ +import datetime + +import numpy as np + +from pandas.compat import ( + IS64, + is_platform_windows, +) + +from pandas import ( + Categorical, + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +class TestIteration: + def test_keys(self, float_frame): + assert float_frame.keys() is float_frame.columns + + def test_iteritems(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"]) + for k, v in df.items(): + assert isinstance(v, DataFrame._constructor_sliced) + + def test_items(self): + # GH#17213, GH#13918 + cols = ["a", "b", "c"] + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols) + for c, (k, v) in zip(cols, df.items()): + assert c == k + assert isinstance(v, Series) + assert (df[k] == v).all() + + def test_items_names(self, float_string_frame): + for k, v in float_string_frame.items(): + assert v.name == k + + def test_iter(self, float_frame): + assert tm.equalContents(list(float_frame), float_frame.columns) + + def test_iterrows(self, float_frame, float_string_frame): + for k, v in float_frame.iterrows(): + exp = float_frame.loc[k] + tm.assert_series_equal(v, exp) + + for k, v in float_string_frame.iterrows(): + exp = float_string_frame.loc[k] + tm.assert_series_equal(v, exp) + + def test_iterrows_iso8601(self): + # GH#19671 + s = DataFrame( + { + "non_iso8601": ["M1701", "M1802", "M1903", "M2004"], + "iso8601": date_range("2000-01-01", periods=4, freq="M"), + } + ) + for k, v in s.iterrows(): + exp = s.loc[k] + tm.assert_series_equal(v, exp) + + def test_iterrows_corner(self): + # GH#12222 + df = DataFrame( + { + "a": [datetime.datetime(2015, 1, 1)], + "b": [None], + "c": [None], + "d": [""], + "e": [[]], + "f": [set()], + "g": [{}], + } + ) + expected = Series( + [datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}], + index=list("abcdefg"), + name=0, + dtype="object", + ) + _, result = next(df.iterrows()) + tm.assert_series_equal(result, expected) + + def test_itertuples(self, float_frame): + for i, tup in enumerate(float_frame.itertuples()): + ser = DataFrame._constructor_sliced(tup[1:]) + ser.name = tup[0] + expected = float_frame.iloc[i, :].reset_index(drop=True) + tm.assert_series_equal(ser, expected) + + df = DataFrame( + {"floats": np.random.default_rng(2).standard_normal(5), "ints": range(5)}, + columns=["floats", "ints"], + ) + + for tup in df.itertuples(index=False): + assert isinstance(tup[1], int) + + df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]}) + dfaa = df[["a", "a"]] + + assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)] + + # repr with int on 32-bit/windows + if not (is_platform_windows() or not IS64): + assert ( + repr(list(df.itertuples(name=None))) + == "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]" + ) + + tup = next(df.itertuples(name="TestName")) + assert tup._fields == ("Index", "a", "b") + assert (tup.Index, tup.a, tup.b) == tup + assert type(tup).__name__ == "TestName" + + df.columns = ["def", "return"] + tup2 = next(df.itertuples(name="TestName")) + assert tup2 == (0, 1, 4) + assert tup2._fields == ("Index", "_1", "_2") + + df3 = DataFrame({"f" + str(i): [i] for i in range(1024)}) + # will raise SyntaxError if trying to create namedtuple + tup3 = next(df3.itertuples()) + assert isinstance(tup3, tuple) + assert hasattr(tup3, "_fields") + + # GH#28282 + df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}]) + result_254_columns = next(df_254_columns.itertuples(index=False)) + assert isinstance(result_254_columns, tuple) + assert hasattr(result_254_columns, "_fields") + + df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}]) + result_255_columns = next(df_255_columns.itertuples(index=False)) + assert isinstance(result_255_columns, tuple) + assert hasattr(result_255_columns, "_fields") + + def test_sequence_like_with_categorical(self): + # GH#7839 + # make sure can iterate + df = DataFrame( + {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]} + ) + df["grade"] = Categorical(df["raw_grade"]) + + # basic sequencing testing + result = list(df.grade.values) + expected = np.array(df.grade.values).tolist() + tm.assert_almost_equal(result, expected) + + # iteration + for t in df.itertuples(index=False): + str(t) + + for row, s in df.iterrows(): + str(s) + + for c, col in df.items(): + str(col) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_logical_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_logical_ops.py new file mode 100644 index 00000000..2cc3b67e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_logical_ops.py @@ -0,0 +1,211 @@ +import operator +import re + +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + DataFrame, + Interval, + Series, + isnull, +) +import pandas._testing as tm + + +class TestDataFrameLogicalOperators: + # &, |, ^ + + @pytest.mark.parametrize( + "left, right, op, expected", + [ + ( + [True, False, np.nan], + [True, False, True], + operator.and_, + [True, False, False], + ), + ( + [True, False, True], + [True, False, np.nan], + operator.and_, + [True, False, False], + ), + ( + [True, False, np.nan], + [True, False, True], + operator.or_, + [True, False, False], + ), + ( + [True, False, True], + [True, False, np.nan], + operator.or_, + [True, False, True], + ), + ], + ) + def test_logical_operators_nans(self, left, right, op, expected, frame_or_series): + # GH#13896 + result = op(frame_or_series(left), frame_or_series(right)) + expected = frame_or_series(expected) + + tm.assert_equal(result, expected) + + def test_logical_ops_empty_frame(self): + # GH#5808 + # empty frames, non-mixed dtype + df = DataFrame(index=[1]) + + result = df & df + tm.assert_frame_equal(result, df) + + result = df | df + tm.assert_frame_equal(result, df) + + df2 = DataFrame(index=[1, 2]) + result = df & df2 + tm.assert_frame_equal(result, df2) + + dfa = DataFrame(index=[1], columns=["A"]) + + result = dfa & dfa + expected = DataFrame(False, index=[1], columns=["A"]) + tm.assert_frame_equal(result, expected) + + def test_logical_ops_bool_frame(self): + # GH#5808 + df1a_bool = DataFrame(True, index=[1], columns=["A"]) + + result = df1a_bool & df1a_bool + tm.assert_frame_equal(result, df1a_bool) + + result = df1a_bool | df1a_bool + tm.assert_frame_equal(result, df1a_bool) + + def test_logical_ops_int_frame(self): + # GH#5808 + df1a_int = DataFrame(1, index=[1], columns=["A"]) + df1a_bool = DataFrame(True, index=[1], columns=["A"]) + + result = df1a_int | df1a_bool + tm.assert_frame_equal(result, df1a_bool) + + # Check that this matches Series behavior + res_ser = df1a_int["A"] | df1a_bool["A"] + tm.assert_series_equal(res_ser, df1a_bool["A"]) + + def test_logical_ops_invalid(self): + # GH#5808 + + df1 = DataFrame(1.0, index=[1], columns=["A"]) + df2 = DataFrame(True, index=[1], columns=["A"]) + msg = re.escape("unsupported operand type(s) for |: 'float' and 'bool'") + with pytest.raises(TypeError, match=msg): + df1 | df2 + + df1 = DataFrame("foo", index=[1], columns=["A"]) + df2 = DataFrame(True, index=[1], columns=["A"]) + msg = re.escape("unsupported operand type(s) for |: 'str' and 'bool'") + with pytest.raises(TypeError, match=msg): + df1 | df2 + + def test_logical_operators(self): + def _check_bin_op(op): + result = op(df1, df2) + expected = DataFrame( + op(df1.values, df2.values), index=df1.index, columns=df1.columns + ) + assert result.values.dtype == np.bool_ + tm.assert_frame_equal(result, expected) + + def _check_unary_op(op): + result = op(df1) + expected = DataFrame(op(df1.values), index=df1.index, columns=df1.columns) + assert result.values.dtype == np.bool_ + tm.assert_frame_equal(result, expected) + + df1 = { + "a": {"a": True, "b": False, "c": False, "d": True, "e": True}, + "b": {"a": False, "b": True, "c": False, "d": False, "e": False}, + "c": {"a": False, "b": False, "c": True, "d": False, "e": False}, + "d": {"a": True, "b": False, "c": False, "d": True, "e": True}, + "e": {"a": True, "b": False, "c": False, "d": True, "e": True}, + } + + df2 = { + "a": {"a": True, "b": False, "c": True, "d": False, "e": False}, + "b": {"a": False, "b": True, "c": False, "d": False, "e": False}, + "c": {"a": True, "b": False, "c": True, "d": False, "e": False}, + "d": {"a": False, "b": False, "c": False, "d": True, "e": False}, + "e": {"a": False, "b": False, "c": False, "d": False, "e": True}, + } + + df1 = DataFrame(df1) + df2 = DataFrame(df2) + + _check_bin_op(operator.and_) + _check_bin_op(operator.or_) + _check_bin_op(operator.xor) + + _check_unary_op(operator.inv) # TODO: belongs elsewhere + + def test_logical_with_nas(self): + d = DataFrame({"a": [np.nan, False], "b": [True, True]}) + + # GH4947 + # bool comparisons should return bool + result = d["a"] | d["b"] + expected = Series([False, True]) + tm.assert_series_equal(result, expected) + + # GH4604, automatic casting here + result = d["a"].fillna(False) | d["b"] + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = d["a"].fillna(False, downcast=False) | d["b"] + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + def test_logical_ops_categorical_columns(self): + # GH#38367 + intervals = [Interval(1, 2), Interval(3, 4)] + data = DataFrame( + [[1, np.nan], [2, np.nan]], + columns=CategoricalIndex( + intervals, categories=intervals + [Interval(5, 6)] + ), + ) + mask = DataFrame( + [[False, False], [False, False]], columns=data.columns, dtype=bool + ) + result = mask | isnull(data) + expected = DataFrame( + [[False, True], [False, True]], + columns=CategoricalIndex( + intervals, categories=intervals + [Interval(5, 6)] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_int_dtype_different_index_not_bool(self): + # GH 52500 + df1 = DataFrame([1, 2, 3], index=[10, 11, 23], columns=["a"]) + df2 = DataFrame([10, 20, 30], index=[11, 10, 23], columns=["a"]) + result = np.bitwise_xor(df1, df2) + expected = DataFrame([21, 8, 29], index=[10, 11, 23], columns=["a"]) + tm.assert_frame_equal(result, expected) + + result = df1 ^ df2 + tm.assert_frame_equal(result, expected) + + def test_different_dtypes_different_index_raises(self): + # GH 52538 + df1 = DataFrame([1, 2], index=["a", "b"]) + df2 = DataFrame([3, 4], index=["b", "c"]) + with pytest.raises(TypeError, match="unsupported operand type"): + df1 & df2 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_nonunique_indexes.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_nonunique_indexes.py new file mode 100644 index 00000000..4f0d5ad5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_nonunique_indexes.py @@ -0,0 +1,350 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm + + +def check(result, expected=None): + if expected is not None: + tm.assert_frame_equal(result, expected) + result.dtypes + str(result) + + +class TestDataFrameNonuniqueIndexes: + def test_setattr_columns_vs_construct_with_columns(self): + # assignment + # GH 3687 + arr = np.random.default_rng(2).standard_normal((3, 2)) + idx = list(range(2)) + df = DataFrame(arr, columns=["A", "A"]) + df.columns = idx + expected = DataFrame(arr, columns=idx) + check(df, expected) + + def test_setattr_columns_vs_construct_with_columns_datetimeindx(self): + idx = date_range("20130101", periods=4, freq="Q-NOV") + df = DataFrame( + [[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"] + ) + df.columns = idx + expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx) + check(df, expected) + + def test_insert_with_duplicate_columns(self): + # insert + df = DataFrame( + [[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], + columns=["foo", "bar", "foo", "hello"], + ) + df["string"] = "bah" + expected = DataFrame( + [[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]], + columns=["foo", "bar", "foo", "hello", "string"], + ) + check(df, expected) + with pytest.raises(ValueError, match="Length of value"): + df.insert(0, "AnotherColumn", range(len(df.index) - 1)) + + # insert same dtype + df["foo2"] = 3 + expected = DataFrame( + [[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]], + columns=["foo", "bar", "foo", "hello", "string", "foo2"], + ) + check(df, expected) + + # set (non-dup) + df["foo2"] = 4 + expected = DataFrame( + [[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]], + columns=["foo", "bar", "foo", "hello", "string", "foo2"], + ) + check(df, expected) + df["foo2"] = 3 + + # delete (non dup) + del df["bar"] + expected = DataFrame( + [[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]], + columns=["foo", "foo", "hello", "string", "foo2"], + ) + check(df, expected) + + # try to delete again (its not consolidated) + del df["hello"] + expected = DataFrame( + [[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]], + columns=["foo", "foo", "string", "foo2"], + ) + check(df, expected) + + # consolidate + df = df._consolidate() + expected = DataFrame( + [[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]], + columns=["foo", "foo", "string", "foo2"], + ) + check(df, expected) + + # insert + df.insert(2, "new_col", 5.0) + expected = DataFrame( + [[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]], + columns=["foo", "foo", "new_col", "string", "foo2"], + ) + check(df, expected) + + # insert a dup + with pytest.raises(ValueError, match="cannot insert"): + df.insert(2, "new_col", 4.0) + + df.insert(2, "new_col", 4.0, allow_duplicates=True) + expected = DataFrame( + [ + [1, 1, 4.0, 5.0, "bah", 3], + [1, 2, 4.0, 5.0, "bah", 3], + [2, 3, 4.0, 5.0, "bah", 3], + ], + columns=["foo", "foo", "new_col", "new_col", "string", "foo2"], + ) + check(df, expected) + + # delete (dup) + del df["foo"] + expected = DataFrame( + [[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]], + columns=["new_col", "new_col", "string", "foo2"], + ) + tm.assert_frame_equal(df, expected) + + def test_dup_across_dtypes(self): + # dup across dtypes + df = DataFrame( + [[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]], + columns=["foo", "bar", "foo", "hello"], + ) + check(df) + + df["foo2"] = 7.0 + expected = DataFrame( + [[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]], + columns=["foo", "bar", "foo", "hello", "foo2"], + ) + check(df, expected) + + result = df["foo"] + expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"]) + check(result, expected) + + # multiple replacements + df["foo"] = "string" + expected = DataFrame( + [ + ["string", 1, "string", 5, 7.0], + ["string", 1, "string", 5, 7.0], + ["string", 1, "string", 5, 7.0], + ], + columns=["foo", "bar", "foo", "hello", "foo2"], + ) + check(df, expected) + + del df["foo"] + expected = DataFrame( + [[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"] + ) + check(df, expected) + + def test_column_dups_indexes(self): + # check column dups with index equal and not equal to df's index + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=["a", "b", "c", "d", "e"], + columns=["A", "B", "A"], + ) + for index in [df.index, pd.Index(list("edcba"))]: + this_df = df.copy() + expected_ser = Series(index.values, index=this_df.index) + expected_df = DataFrame( + {"A": expected_ser, "B": this_df["B"]}, + columns=["A", "B", "A"], + ) + this_df["A"] = index + check(this_df, expected_df) + + def test_changing_dtypes_with_duplicate_columns(self): + # multiple assignments that change dtypes + # the location indexer is a slice + # GH 6120 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=["that", "that"] + ) + expected = DataFrame(1.0, index=range(5), columns=["that", "that"]) + + df["that"] = 1.0 + check(df, expected) + + df = DataFrame( + np.random.default_rng(2).random((5, 2)), columns=["that", "that"] + ) + expected = DataFrame(1, index=range(5), columns=["that", "that"]) + + df["that"] = 1 + check(df, expected) + + def test_dup_columns_comparisons(self): + # equality + df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"]) + df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"]) + + # not-comparing like-labelled + msg = ( + r"Can only compare identically-labeled \(both index and columns\) " + "DataFrame objects" + ) + with pytest.raises(ValueError, match=msg): + df1 == df2 + + df1r = df1.reindex_like(df2) + result = df1r == df2 + expected = DataFrame( + [[False, True], [True, False], [False, False], [True, False]], + columns=["A", "A"], + ) + tm.assert_frame_equal(result, expected) + + def test_mixed_column_selection(self): + # mixed column selection + # GH 5639 + dfbool = DataFrame( + { + "one": Series([True, True, False], index=["a", "b", "c"]), + "two": Series([False, False, True, False], index=["a", "b", "c", "d"]), + "three": Series([False, True, True, True], index=["a", "b", "c", "d"]), + } + ) + expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1) + result = dfbool[["one", "three", "one"]] + check(result, expected) + + def test_multi_axis_dups(self): + # multi-axis dups + # GH 6121 + df = DataFrame( + np.arange(25.0).reshape(5, 5), + index=["a", "b", "c", "d", "e"], + columns=["A", "B", "C", "D", "E"], + ) + z = df[["A", "C", "A"]].copy() + expected = z.loc[["a", "c", "a"]] + + df = DataFrame( + np.arange(25.0).reshape(5, 5), + index=["a", "b", "c", "d", "e"], + columns=["A", "B", "C", "D", "E"], + ) + z = df[["A", "C", "A"]] + result = z.loc[["a", "c", "a"]] + check(result, expected) + + def test_columns_with_dups(self): + # GH 3468 related + + # basic + df = DataFrame([[1, 2]], columns=["a", "a"]) + df.columns = ["a", "a.1"] + str(df) + expected = DataFrame([[1, 2]], columns=["a", "a.1"]) + tm.assert_frame_equal(df, expected) + + df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"]) + df.columns = ["b", "a", "a.1"] + str(df) + expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"]) + tm.assert_frame_equal(df, expected) + + def test_columns_with_dup_index(self): + # with a dup index + df = DataFrame([[1, 2]], columns=["a", "a"]) + df.columns = ["b", "b"] + str(df) + expected = DataFrame([[1, 2]], columns=["b", "b"]) + tm.assert_frame_equal(df, expected) + + def test_multi_dtype(self): + # multi-dtype + df = DataFrame( + [[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], + columns=["a", "a", "b", "b", "d", "c", "c"], + ) + df.columns = list("ABCDEFG") + str(df) + expected = DataFrame( + [[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG") + ) + tm.assert_frame_equal(df, expected) + + def test_multi_dtype2(self): + df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"]) + df.columns = ["a", "a.1", "a.2", "a.3"] + str(df) + expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"]) + tm.assert_frame_equal(df, expected) + + def test_dups_across_blocks(self, using_array_manager): + # dups across blocks + df_float = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), dtype="float64" + ) + df_int = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)).astype("int64") + ) + df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns) + df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns) + df_dt = DataFrame( + pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns + ) + df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1) + + if not using_array_manager: + assert len(df._mgr.blknos) == len(df.columns) + assert len(df._mgr.blklocs) == len(df.columns) + + # testing iloc + for i in range(len(df.columns)): + df.iloc[:, i] + + def test_dup_columns_across_dtype(self): + # dup columns across dtype GH 2079/2194 + vals = [[1, -1, 2.0], [2, -2, 3.0]] + rs = DataFrame(vals, columns=["A", "A", "B"]) + xp = DataFrame(vals) + xp.columns = ["A", "A", "B"] + tm.assert_frame_equal(rs, xp) + + def test_set_value_by_index(self): + # See gh-12344 + warn = None + msg = "will attempt to set the values inplace" + + df = DataFrame(np.arange(9).reshape(3, 3).T) + df.columns = list("AAA") + expected = df.iloc[:, 2] + + with tm.assert_produces_warning(warn, match=msg): + df.iloc[:, 0] = 3 + tm.assert_series_equal(df.iloc[:, 2], expected) + + df = DataFrame(np.arange(9).reshape(3, 3).T) + df.columns = [2, float(2), str(2)] + expected = df.iloc[:, 1] + + with tm.assert_produces_warning(warn, match=msg): + df.iloc[:, 0] = 3 + tm.assert_series_equal(df.iloc[:, 1], expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_npfuncs.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_npfuncs.py new file mode 100644 index 00000000..afb53bf2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_npfuncs.py @@ -0,0 +1,89 @@ +""" +Tests for np.foo applied to DataFrame, not necessarily ufuncs. +""" +import numpy as np + +from pandas import ( + Categorical, + DataFrame, +) +import pandas._testing as tm + + +class TestAsArray: + def test_asarray_homogeneous(self): + df = DataFrame({"A": Categorical([1, 2]), "B": Categorical([1, 2])}) + result = np.asarray(df) + # may change from object in the future + expected = np.array([[1, 1], [2, 2]], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + def test_np_sqrt(self, float_frame): + with np.errstate(all="ignore"): + result = np.sqrt(float_frame) + assert isinstance(result, type(float_frame)) + assert result.index.is_(float_frame.index) + assert result.columns.is_(float_frame.columns) + + tm.assert_frame_equal(result, float_frame.apply(np.sqrt)) + + def test_sum_deprecated_axis_behavior(self): + # GH#52042 deprecated behavior of df.sum(axis=None), which gets + # called when we do np.sum(df) + + arr = np.random.default_rng(2).standard_normal((4, 3)) + df = DataFrame(arr) + + msg = "The behavior of DataFrame.sum with axis=None is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + res = np.sum(df) + + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.sum(axis=None) + tm.assert_series_equal(res, expected) + + def test_np_ravel(self): + # GH26247 + arr = np.array( + [ + [0.11197053, 0.44361564, -0.92589452], + [0.05883648, -0.00948922, -0.26469934], + ] + ) + + result = np.ravel([DataFrame(batch.reshape(1, 3)) for batch in arr]) + expected = np.array( + [ + 0.11197053, + 0.44361564, + -0.92589452, + 0.05883648, + -0.00948922, + -0.26469934, + ] + ) + tm.assert_numpy_array_equal(result, expected) + + result = np.ravel(DataFrame(arr[0].reshape(1, 3), columns=["x1", "x2", "x3"])) + expected = np.array([0.11197053, 0.44361564, -0.92589452]) + tm.assert_numpy_array_equal(result, expected) + + result = np.ravel( + [ + DataFrame(batch.reshape(1, 3), columns=["x1", "x2", "x3"]) + for batch in arr + ] + ) + expected = np.array( + [ + 0.11197053, + 0.44361564, + -0.92589452, + 0.05883648, + -0.00948922, + -0.26469934, + ] + ) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_query_eval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_query_eval.py new file mode 100644 index 00000000..72e82361 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_query_eval.py @@ -0,0 +1,1406 @@ +import operator + +import numpy as np +import pytest + +from pandas.errors import ( + NumExprClobberingError, + UndefinedVariableError, +) +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm +from pandas.core.computation.check import NUMEXPR_INSTALLED + + +@pytest.fixture(params=["python", "pandas"], ids=lambda x: x) +def parser(request): + return request.param + + +@pytest.fixture( + params=["python", pytest.param("numexpr", marks=td.skip_if_no_ne)], ids=lambda x: x +) +def engine(request): + return request.param + + +def skip_if_no_pandas_parser(parser): + if parser != "pandas": + pytest.skip(f"cannot evaluate with parser {repr(parser)}") + + +class TestCompat: + @pytest.fixture + def df(self): + return DataFrame({"A": [1, 2, 3]}) + + @pytest.fixture + def expected1(self, df): + return df[df.A > 0] + + @pytest.fixture + def expected2(self, df): + return df.A + 1 + + def test_query_default(self, df, expected1, expected2): + # GH 12749 + # this should always work, whether NUMEXPR_INSTALLED or not + result = df.query("A>0") + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1") + tm.assert_series_equal(result, expected2, check_names=False) + + def test_query_None(self, df, expected1, expected2): + result = df.query("A>0", engine=None) + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1", engine=None) + tm.assert_series_equal(result, expected2, check_names=False) + + def test_query_python(self, df, expected1, expected2): + result = df.query("A>0", engine="python") + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1", engine="python") + tm.assert_series_equal(result, expected2, check_names=False) + + def test_query_numexpr(self, df, expected1, expected2): + if NUMEXPR_INSTALLED: + result = df.query("A>0", engine="numexpr") + tm.assert_frame_equal(result, expected1) + result = df.eval("A+1", engine="numexpr") + tm.assert_series_equal(result, expected2, check_names=False) + else: + msg = ( + r"'numexpr' is not installed or an unsupported version. " + r"Cannot use engine='numexpr' for query/eval if 'numexpr' is " + r"not installed" + ) + with pytest.raises(ImportError, match=msg): + df.query("A>0", engine="numexpr") + with pytest.raises(ImportError, match=msg): + df.eval("A+1", engine="numexpr") + + +class TestDataFrameEval: + # smaller hits python, larger hits numexpr + @pytest.mark.parametrize("n", [4, 4000]) + @pytest.mark.parametrize( + "op_str,op,rop", + [ + ("+", "__add__", "__radd__"), + ("-", "__sub__", "__rsub__"), + ("*", "__mul__", "__rmul__"), + ("/", "__truediv__", "__rtruediv__"), + ], + ) + def test_ops(self, op_str, op, rop, n): + # tst ops and reversed ops in evaluation + # GH7198 + + df = DataFrame(1, index=range(n), columns=list("abcd")) + df.iloc[0] = 2 + m = df.mean() + + base = DataFrame( # noqa: F841 + np.tile(m.values, n).reshape(n, -1), columns=list("abcd") + ) + + expected = eval(f"base {op_str} df") + + # ops as strings + result = eval(f"m {op_str} df") + tm.assert_frame_equal(result, expected) + + # these are commutative + if op in ["+", "*"]: + result = getattr(df, op)(m) + tm.assert_frame_equal(result, expected) + + # these are not + elif op in ["-", "/"]: + result = getattr(df, rop)(m) + tm.assert_frame_equal(result, expected) + + def test_dataframe_sub_numexpr_path(self): + # GH7192: Note we need a large number of rows to ensure this + # goes through the numexpr path + df = DataFrame({"A": np.random.default_rng(2).standard_normal(25000)}) + df.iloc[0:5] = np.nan + expected = 1 - np.isnan(df.iloc[0:25]) + result = (1 - np.isnan(df)).iloc[0:25] + tm.assert_frame_equal(result, expected) + + def test_query_non_str(self): + # GH 11485 + df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]}) + + msg = "expr must be a string to be evaluated" + with pytest.raises(ValueError, match=msg): + df.query(lambda x: x.B == "b") + + with pytest.raises(ValueError, match=msg): + df.query(111) + + def test_query_empty_string(self): + # GH 13139 + df = DataFrame({"A": [1, 2, 3]}) + + msg = "expr cannot be an empty string" + with pytest.raises(ValueError, match=msg): + df.query("") + + def test_eval_resolvers_as_list(self): + # GH 14095 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab") + ) + dict1 = {"a": 1} + dict2 = {"b": 2} + assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] + assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] + + def test_eval_resolvers_combined(self): + # GH 34966 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab") + ) + dict1 = {"c": 2} + + # Both input and default index/column resolvers should be usable + result = df.eval("a + b * c", resolvers=[dict1]) + + expected = df["a"] + df["b"] * dict1["c"] + tm.assert_series_equal(result, expected) + + def test_eval_object_dtype_binop(self): + # GH#24883 + df = DataFrame({"a1": ["Y", "N"]}) + res = df.eval("c = ((a1 == 'Y') & True)") + expected = DataFrame({"a1": ["Y", "N"], "c": [True, False]}) + tm.assert_frame_equal(res, expected) + + +class TestDataFrameQueryWithMultiIndex: + def test_query_with_named_multiindex(self, parser, engine): + skip_if_no_pandas_parser(parser) + a = np.random.default_rng(2).choice(["red", "green"], size=10) + b = np.random.default_rng(2).choice(["eggs", "ham"], size=10) + index = MultiIndex.from_arrays([a, b], names=["color", "food"]) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index) + ind = Series( + df.index.get_level_values("color").values, index=index, name="color" + ) + + # equality + res1 = df.query('color == "red"', parser=parser, engine=engine) + res2 = df.query('"red" == color', parser=parser, engine=engine) + exp = df[ind == "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('color != "red"', parser=parser, engine=engine) + res2 = df.query('"red" != color', parser=parser, engine=engine) + exp = df[ind != "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('color == ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] == color', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('color != ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] != color', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["red"] in color', parser=parser, engine=engine) + res2 = df.query('"red" in color', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('["red"] not in color', parser=parser, engine=engine) + res2 = df.query('"red" not in color', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + def test_query_with_unnamed_multiindex(self, parser, engine): + skip_if_no_pandas_parser(parser) + a = np.random.default_rng(2).choice(["red", "green"], size=10) + b = np.random.default_rng(2).choice(["eggs", "ham"], size=10) + index = MultiIndex.from_arrays([a, b]) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index) + ind = Series(df.index.get_level_values(0).values, index=index) + + res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine) + res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine) + exp = df[ind == "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine) + res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine) + exp = df[ind != "red"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine) + res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine) + exp = df[ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine) + res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine) + exp = df[~ind.isin(["red"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # ## LEVEL 1 + ind = Series(df.index.get_level_values(1).values, index=index) + res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine) + res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine) + exp = df[ind == "eggs"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine) + res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine) + exp = df[ind != "eggs"] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine) + res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine) + exp = df[ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine) + res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine) + exp = df[~ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine) + res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine) + exp = df[ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine) + res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine) + exp = df[~ind.isin(["eggs"])] + tm.assert_frame_equal(res1, exp) + tm.assert_frame_equal(res2, exp) + + def test_query_with_partially_named_multiindex(self, parser, engine): + skip_if_no_pandas_parser(parser) + a = np.random.default_rng(2).choice(["red", "green"], size=10) + b = np.arange(10) + index = MultiIndex.from_arrays([a, b]) + index.names = [None, "rating"] + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), index=index) + res = df.query("rating == 1", parser=parser, engine=engine) + ind = Series( + df.index.get_level_values("rating").values, index=index, name="rating" + ) + exp = df[ind == 1] + tm.assert_frame_equal(res, exp) + + res = df.query("rating != 1", parser=parser, engine=engine) + ind = Series( + df.index.get_level_values("rating").values, index=index, name="rating" + ) + exp = df[ind != 1] + tm.assert_frame_equal(res, exp) + + res = df.query('ilevel_0 == "red"', parser=parser, engine=engine) + ind = Series(df.index.get_level_values(0).values, index=index) + exp = df[ind == "red"] + tm.assert_frame_equal(res, exp) + + res = df.query('ilevel_0 != "red"', parser=parser, engine=engine) + ind = Series(df.index.get_level_values(0).values, index=index) + exp = df[ind != "red"] + tm.assert_frame_equal(res, exp) + + def test_query_multiindex_get_index_resolvers(self): + df = tm.makeCustomDataframe( + 10, 3, r_idx_nlevels=2, r_idx_names=["spam", "eggs"] + ) + resolvers = df._get_index_resolvers() + + def to_series(mi, level): + level_values = mi.get_level_values(level) + s = level_values.to_series() + s.index = mi + return s + + col_series = df.columns.to_series() + expected = { + "index": df.index, + "columns": col_series, + "spam": to_series(df.index, "spam"), + "eggs": to_series(df.index, "eggs"), + "C0": col_series, + } + for k, v in resolvers.items(): + if isinstance(v, Index): + assert v.is_(expected[k]) + elif isinstance(v, Series): + tm.assert_series_equal(v, expected[k]) + else: + raise AssertionError("object must be a Series or Index") + + +@td.skip_if_no_ne +class TestDataFrameQueryNumExprPandas: + @pytest.fixture + def engine(self): + return "numexpr" + + @pytest.fixture + def parser(self): + return "pandas" + + def test_date_query_with_attribute_access(self, engine, parser): + skip_if_no_pandas_parser(parser) + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + df["dates2"] = date_range("1/1/2013", periods=5) + df["dates3"] = date_range("1/1/2014", periods=5) + res = df.query( + "@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser + ) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_no_attribute_access(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + df["dates2"] = date_range("1/1/2013", periods=5) + df["dates3"] = date_range("1/1/2014", periods=5) + res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_with_NaT(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates2"] = date_range("1/1/2013", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates3"] = pd.NaT + res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT(self, engine, parser): + n = 10 + # Cast to object to avoid implicit cast when setting entry to pd.NaT below + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))).astype( + {0: object} + ) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.iloc[0, 0] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT_duplicates(self, engine, parser): + n = 10 + d = {} + d["dates1"] = date_range("1/1/2012", periods=n) + d["dates3"] = date_range("1/1/2014", periods=n) + df = DataFrame(d) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) + expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_with_non_date(self, engine, parser): + n = 10 + df = DataFrame( + {"dates": date_range("1/1/2012", periods=n), "nondate": np.arange(n)} + ) + + result = df.query("dates == nondate", parser=parser, engine=engine) + assert len(result) == 0 + + result = df.query("dates != nondate", parser=parser, engine=engine) + tm.assert_frame_equal(result, df) + + msg = r"Invalid comparison between dtype=datetime64\[ns\] and ndarray" + for op in ["<", ">", "<=", ">="]: + with pytest.raises(TypeError, match=msg): + df.query(f"dates {op} nondate", parser=parser, engine=engine) + + def test_query_syntax_error(self, engine, parser): + df = DataFrame({"i": range(10), "+": range(3, 13), "r": range(4, 14)}) + msg = "invalid syntax" + with pytest.raises(SyntaxError, match=msg): + df.query("i - +", engine=engine, parser=parser) + + def test_query_scope(self, engine, parser): + skip_if_no_pandas_parser(parser) + + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 2)), columns=list("ab") + ) + + a, b = 1, 2 # noqa: F841 + res = df.query("a > b", engine=engine, parser=parser) + expected = df[df.a > df.b] + tm.assert_frame_equal(res, expected) + + res = df.query("@a > b", engine=engine, parser=parser) + expected = df[a > df.b] + tm.assert_frame_equal(res, expected) + + # no local variable c + with pytest.raises( + UndefinedVariableError, match="local variable 'c' is not defined" + ): + df.query("@a > b > @c", engine=engine, parser=parser) + + # no column named 'c' + with pytest.raises(UndefinedVariableError, match="name 'c' is not defined"): + df.query("@a > b > c", engine=engine, parser=parser) + + def test_query_doesnt_pickup_local(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + # we don't pick up the local 'sin' + with pytest.raises(UndefinedVariableError, match="name 'sin' is not defined"): + df.query("sin > 5", engine=engine, parser=parser) + + def test_query_builtin(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + df.index.name = "sin" + msg = "Variables in expression.+" + with pytest.raises(NumExprClobberingError, match=msg): + df.query("sin > 5", engine=engine, parser=parser) + + def test_query(self, engine, parser): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + + tm.assert_frame_equal( + df.query("a < b", engine=engine, parser=parser), df[df.a < df.b] + ) + tm.assert_frame_equal( + df.query("a + b > b * c", engine=engine, parser=parser), + df[df.a + df.b > df.b * df.c], + ) + + def test_query_index_with_name(self, engine, parser): + df = DataFrame( + np.random.default_rng(2).integers(10, size=(10, 3)), + index=Index(range(10), name="blob"), + columns=["a", "b", "c"], + ) + res = df.query("(blob < 5) & (a < b)", engine=engine, parser=parser) + expec = df[(df.index < 5) & (df.a < df.b)] + tm.assert_frame_equal(res, expec) + + res = df.query("blob < b", engine=engine, parser=parser) + expec = df[df.index < df.b] + + tm.assert_frame_equal(res, expec) + + def test_query_index_without_name(self, engine, parser): + df = DataFrame( + np.random.default_rng(2).integers(10, size=(10, 3)), + index=range(10), + columns=["a", "b", "c"], + ) + + # "index" should refer to the index + res = df.query("index < b", engine=engine, parser=parser) + expec = df[df.index < df.b] + tm.assert_frame_equal(res, expec) + + # test against a scalar + res = df.query("index < 5", engine=engine, parser=parser) + expec = df[df.index < 5] + tm.assert_frame_equal(res, expec) + + def test_nested_scope(self, engine, parser): + skip_if_no_pandas_parser(parser) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + expected = df[(df > 0) & (df2 > 0)] + + result = df.query("(@df > 0) & (@df2 > 0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + result = pd.eval("df[df > 0 and df2 > 0]", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + result = pd.eval( + "df[df > 0 and df2 > 0 and df[df > 0] > 0]", engine=engine, parser=parser + ) + expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] + tm.assert_frame_equal(result, expected) + + result = pd.eval("df[(df>0) & (df2>0)]", engine=engine, parser=parser) + expected = df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + def test_nested_raises_on_local_self_reference(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + + # can't reference ourself b/c we're a local so @ is necessary + with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"): + df.query("df > 0", engine=engine, parser=parser) + + def test_local_syntax(self, engine, parser): + skip_if_no_pandas_parser(parser) + + df = DataFrame( + np.random.default_rng(2).standard_normal((100, 10)), + columns=list("abcdefghij"), + ) + b = 1 + expect = df[df.a < b] + result = df.query("a < @b", engine=engine, parser=parser) + tm.assert_frame_equal(result, expect) + + expect = df[df.a < df.b] + result = df.query("a < b", engine=engine, parser=parser) + tm.assert_frame_equal(result, expect) + + def test_chained_cmp_and_in(self, engine, parser): + skip_if_no_pandas_parser(parser) + cols = list("abc") + df = DataFrame( + np.random.default_rng(2).standard_normal((100, len(cols))), columns=cols + ) + res = df.query( + "a < b < c and a not in b not in c", engine=engine, parser=parser + ) + ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) + expec = df[ind] + tm.assert_frame_equal(res, expec) + + def test_local_variable_with_in(self, engine, parser): + skip_if_no_pandas_parser(parser) + a = Series(np.random.default_rng(2).integers(3, size=15), name="a") + b = Series(np.random.default_rng(2).integers(10, size=15), name="b") + df = DataFrame({"a": a, "b": b}) + + expected = df.loc[(df.b - 1).isin(a)] + result = df.query("b - 1 in a", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + b = Series(np.random.default_rng(2).integers(10, size=15), name="b") + expected = df.loc[(b - 1).isin(a)] + result = df.query("@b - 1 in a", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + def test_at_inside_string(self, engine, parser): + skip_if_no_pandas_parser(parser) + c = 1 # noqa: F841 + df = DataFrame({"a": ["a", "a", "b", "b", "@c", "@c"]}) + result = df.query('a == "@c"', engine=engine, parser=parser) + expected = df[df.a == "@c"] + tm.assert_frame_equal(result, expected) + + def test_query_undefined_local(self): + engine, parser = self.engine, self.parser + skip_if_no_pandas_parser(parser) + + df = DataFrame(np.random.default_rng(2).random((10, 2)), columns=list("ab")) + with pytest.raises( + UndefinedVariableError, match="local variable 'c' is not defined" + ): + df.query("a == @c", engine=engine, parser=parser) + + def test_index_resolvers_come_after_columns_with_the_same_name( + self, engine, parser + ): + n = 1 # noqa: F841 + a = np.r_[20:101:20] + + df = DataFrame( + {"index": a, "b": np.random.default_rng(2).standard_normal(a.size)} + ) + df.index.name = "index" + result = df.query("index > 5", engine=engine, parser=parser) + expected = df[df["index"] > 5] + tm.assert_frame_equal(result, expected) + + df = DataFrame( + {"index": a, "b": np.random.default_rng(2).standard_normal(a.size)} + ) + result = df.query("ilevel_0 > 5", engine=engine, parser=parser) + expected = df.loc[df.index[df.index > 5]] + tm.assert_frame_equal(result, expected) + + df = DataFrame({"a": a, "b": np.random.default_rng(2).standard_normal(a.size)}) + df.index.name = "a" + result = df.query("a > 5", engine=engine, parser=parser) + expected = df[df.a > 5] + tm.assert_frame_equal(result, expected) + + result = df.query("index > 5", engine=engine, parser=parser) + expected = df.loc[df.index[df.index > 5]] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("op, f", [["==", operator.eq], ["!=", operator.ne]]) + def test_inf(self, op, f, engine, parser): + n = 10 + df = DataFrame( + { + "a": np.random.default_rng(2).random(n), + "b": np.random.default_rng(2).random(n), + } + ) + df.loc[::2, 0] = np.inf + q = f"a {op} inf" + expected = df[f(df.a, np.inf)] + result = df.query(q, engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + def test_check_tz_aware_index_query(self, tz_aware_fixture): + # https://github.com/pandas-dev/pandas/issues/29463 + tz = tz_aware_fixture + df_index = date_range( + start="2019-01-01", freq="1d", periods=10, tz=tz, name="time" + ) + expected = DataFrame(index=df_index) + df = DataFrame(index=df_index) + result = df.query('"2018-01-03 00:00:00+00" < time') + tm.assert_frame_equal(result, expected) + + expected = DataFrame(df_index) + result = df.reset_index().query('"2018-01-03 00:00:00+00" < time') + tm.assert_frame_equal(result, expected) + + def test_method_calls_in_query(self, engine, parser): + # https://github.com/pandas-dev/pandas/issues/22435 + n = 10 + df = DataFrame( + { + "a": 2 * np.random.default_rng(2).random(n), + "b": np.random.default_rng(2).random(n), + } + ) + expected = df[df["a"].astype("int") == 0] + result = df.query("a.astype('int') == 0", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + { + "a": np.where( + np.random.default_rng(2).random(n) < 0.5, + np.nan, + np.random.default_rng(2).standard_normal(n), + ), + "b": np.random.default_rng(2).standard_normal(n), + } + ) + expected = df[df["a"].notnull()] + result = df.query("a.notnull()", engine=engine, parser=parser) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no_ne +class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas): + @pytest.fixture + def engine(self): + return "numexpr" + + @pytest.fixture + def parser(self): + return "python" + + def test_date_query_no_attribute_access(self, engine, parser): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df["dates1"] = date_range("1/1/2012", periods=5) + df["dates2"] = date_range("1/1/2013", periods=5) + df["dates3"] = date_range("1/1/2014", periods=5) + res = df.query( + "(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_query_with_NaT(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates2"] = date_range("1/1/2013", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates3"] = pd.NaT + res = df.query( + "(dates1 < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query( + "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT(self, engine, parser): + n = 10 + # Cast to object to avoid implicit cast when setting entry to pd.NaT below + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))).astype( + {0: object} + ) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.iloc[0, 0] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + res = df.query( + "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser + ) + expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] + tm.assert_frame_equal(res, expec) + + def test_date_index_query_with_NaT_duplicates(self, engine, parser): + n = 10 + df = DataFrame(np.random.default_rng(2).standard_normal((n, 3))) + df["dates1"] = date_range("1/1/2012", periods=n) + df["dates3"] = date_range("1/1/2014", periods=n) + df.loc[np.random.default_rng(2).random(n) > 0.5, "dates1"] = pd.NaT + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None + msg = r"'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("index < 20130101 < dates3", engine=engine, parser=parser) + + def test_nested_scope(self, engine, parser): + # smoke test + x = 1 # noqa: F841 + result = pd.eval("x + 1", engine=engine, parser=parser) + assert result == 2 + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + + # don't have the pandas parser + msg = r"The '@' prefix is only supported by the pandas parser" + with pytest.raises(SyntaxError, match=msg): + df.query("(@df>0) & (@df2>0)", engine=engine, parser=parser) + + with pytest.raises(UndefinedVariableError, match="name 'df' is not defined"): + df.query("(df>0) & (df2>0)", engine=engine, parser=parser) + + expected = df[(df > 0) & (df2 > 0)] + result = pd.eval("df[(df > 0) & (df2 > 0)]", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] + result = pd.eval( + "df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]", engine=engine, parser=parser + ) + tm.assert_frame_equal(expected, result) + + def test_query_numexpr_with_min_and_max_columns(self): + df = DataFrame({"min": [1, 2, 3], "max": [4, 5, 6]}) + regex_to_match = ( + r"Variables in expression \"\(min\) == \(1\)\" " + r"overlap with builtins: \('min'\)" + ) + with pytest.raises(NumExprClobberingError, match=regex_to_match): + df.query("min == 1") + + regex_to_match = ( + r"Variables in expression \"\(max\) == \(1\)\" " + r"overlap with builtins: \('max'\)" + ) + with pytest.raises(NumExprClobberingError, match=regex_to_match): + df.query("max == 1") + + +class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas): + @pytest.fixture + def engine(self): + return "python" + + @pytest.fixture + def parser(self): + return "pandas" + + def test_query_builtin(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + df.index.name = "sin" + expected = df[df.index > 5] + result = df.query("sin > 5", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + +class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython): + @pytest.fixture + def engine(self): + return "python" + + @pytest.fixture + def parser(self): + return "python" + + def test_query_builtin(self, engine, parser): + n = m = 10 + df = DataFrame( + np.random.default_rng(2).integers(m, size=(n, 3)), columns=list("abc") + ) + + df.index.name = "sin" + expected = df[df.index > 5] + result = df.query("sin > 5", engine=engine, parser=parser) + tm.assert_frame_equal(expected, result) + + +class TestDataFrameQueryStrings: + def test_str_query_method(self, parser, engine): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"]) + df["strings"] = Series(list("aabbccddee")) + expect = df[df.strings == "a"] + + if parser != "pandas": + col = "strings" + lst = '"a"' + + lhs = [col] * 2 + [lst] * 2 + rhs = lhs[::-1] + + eq, ne = "==", "!=" + ops = 2 * ([eq] + [ne]) + msg = r"'(Not)?In' nodes are not implemented" + + for lhs, op, rhs in zip(lhs, ops, rhs): + ex = f"{lhs} {op} {rhs}" + with pytest.raises(NotImplementedError, match=msg): + df.query( + ex, + engine=engine, + parser=parser, + local_dict={"strings": df.strings}, + ) + else: + res = df.query('"a" == strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('strings == "a"', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + tm.assert_frame_equal(res, df[df.strings.isin(["a"])]) + + expect = df[df.strings != "a"] + res = df.query('strings != "a"', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('"a" != strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + tm.assert_frame_equal(res, df[~df.strings.isin(["a"])]) + + def test_str_list_query_method(self, parser, engine): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)), columns=["b"]) + df["strings"] = Series(list("aabbccddee")) + expect = df[df.strings.isin(["a", "b"])] + + if parser != "pandas": + col = "strings" + lst = '["a", "b"]' + + lhs = [col] * 2 + [lst] * 2 + rhs = lhs[::-1] + + eq, ne = "==", "!=" + ops = 2 * ([eq] + [ne]) + msg = r"'(Not)?In' nodes are not implemented" + + for lhs, op, rhs in zip(lhs, ops, rhs): + ex = f"{lhs} {op} {rhs}" + with pytest.raises(NotImplementedError, match=msg): + df.query(ex, engine=engine, parser=parser) + else: + res = df.query('strings == ["a", "b"]', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('["a", "b"] == strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + expect = df[~df.strings.isin(["a", "b"])] + + res = df.query('strings != ["a", "b"]', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + res = df.query('["a", "b"] != strings', engine=engine, parser=parser) + tm.assert_frame_equal(res, expect) + + def test_query_with_string_columns(self, parser, engine): + df = DataFrame( + { + "a": list("aaaabbbbcccc"), + "b": list("aabbccddeeff"), + "c": np.random.default_rng(2).integers(5, size=12), + "d": np.random.default_rng(2).integers(9, size=12), + } + ) + if parser == "pandas": + res = df.query("a in b", parser=parser, engine=engine) + expec = df[df.a.isin(df.b)] + tm.assert_frame_equal(res, expec) + + res = df.query("a in b and c < d", parser=parser, engine=engine) + expec = df[df.a.isin(df.b) & (df.c < df.d)] + tm.assert_frame_equal(res, expec) + else: + msg = r"'(Not)?In' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("a in b", parser=parser, engine=engine) + + msg = r"'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query("a in b and c < d", parser=parser, engine=engine) + + def test_object_array_eq_ne(self, parser, engine): + df = DataFrame( + { + "a": list("aaaabbbbcccc"), + "b": list("aabbccddeeff"), + "c": np.random.default_rng(2).integers(5, size=12), + "d": np.random.default_rng(2).integers(9, size=12), + } + ) + res = df.query("a == b", parser=parser, engine=engine) + exp = df[df.a == df.b] + tm.assert_frame_equal(res, exp) + + res = df.query("a != b", parser=parser, engine=engine) + exp = df[df.a != df.b] + tm.assert_frame_equal(res, exp) + + def test_query_with_nested_strings(self, parser, engine): + skip_if_no_pandas_parser(parser) + events = [ + f"page {n} {act}" for n in range(1, 4) for act in ["load", "exit"] + ] * 2 + stamps1 = date_range("2014-01-01 0:00:01", freq="30s", periods=6) + stamps2 = date_range("2014-02-01 1:00:01", freq="30s", periods=6) + df = DataFrame( + { + "id": np.arange(1, 7).repeat(2), + "event": events, + "timestamp": stamps1.append(stamps2), + } + ) + + expected = df[df.event == '"page 1 load"'] + res = df.query("""'"page 1 load"' in event""", parser=parser, engine=engine) + tm.assert_frame_equal(expected, res) + + def test_query_with_nested_special_character(self, parser, engine): + skip_if_no_pandas_parser(parser) + df = DataFrame({"a": ["a", "b", "test & test"], "b": [1, 2, 3]}) + res = df.query('a == "test & test"', parser=parser, engine=engine) + expec = df[df.a == "test & test"] + tm.assert_frame_equal(res, expec) + + @pytest.mark.parametrize( + "op, func", + [ + ["<", operator.lt], + [">", operator.gt], + ["<=", operator.le], + [">=", operator.ge], + ], + ) + def test_query_lex_compare_strings(self, parser, engine, op, func): + a = Series(np.random.default_rng(2).choice(list("abcde"), 20)) + b = Series(np.arange(a.size)) + df = DataFrame({"X": a, "Y": b}) + + res = df.query(f'X {op} "d"', engine=engine, parser=parser) + expected = df[func(df.X, "d")] + tm.assert_frame_equal(res, expected) + + def test_query_single_element_booleans(self, parser, engine): + columns = "bid", "bidsize", "ask", "asksize" + data = np.random.default_rng(2).integers(2, size=(1, len(columns))).astype(bool) + df = DataFrame(data, columns=columns) + res = df.query("bid & ask", engine=engine, parser=parser) + expected = df[df.bid & df.ask] + tm.assert_frame_equal(res, expected) + + def test_query_string_scalar_variable(self, parser, engine): + skip_if_no_pandas_parser(parser) + df = DataFrame( + { + "Symbol": ["BUD US", "BUD US", "IBM US", "IBM US"], + "Price": [109.70, 109.72, 183.30, 183.35], + } + ) + e = df[df.Symbol == "BUD US"] + symb = "BUD US" # noqa: F841 + r = df.query("Symbol == @symb", parser=parser, engine=engine) + tm.assert_frame_equal(e, r) + + @pytest.mark.parametrize( + "in_list", + [ + [None, "asdf", "ghjk"], + ["asdf", None, "ghjk"], + ["asdf", "ghjk", None], + [None, None, "asdf"], + ["asdf", None, None], + [None, None, None], + ], + ) + def test_query_string_null_elements(self, in_list): + # GITHUB ISSUE #31516 + parser = "pandas" + engine = "python" + expected = {i: value for i, value in enumerate(in_list) if value == "asdf"} + + df_expected = DataFrame({"a": expected}, dtype="string") + df_expected.index = df_expected.index.astype("int64") + df = DataFrame({"a": in_list}, dtype="string") + res1 = df.query("a == 'asdf'", parser=parser, engine=engine) + res2 = df[df["a"] == "asdf"] + res3 = df.query("a <= 'asdf'", parser=parser, engine=engine) + tm.assert_frame_equal(res1, df_expected) + tm.assert_frame_equal(res1, res2) + tm.assert_frame_equal(res1, res3) + tm.assert_frame_equal(res2, res3) + + +class TestDataFrameEvalWithFrame: + @pytest.fixture + def frame(self): + return DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=list("abc") + ) + + def test_simple_expr(self, frame, parser, engine): + res = frame.eval("a + b", engine=engine, parser=parser) + expect = frame.a + frame.b + tm.assert_series_equal(res, expect) + + def test_bool_arith_expr(self, frame, parser, engine): + res = frame.eval("a[a < 1] + b", engine=engine, parser=parser) + expect = frame.a[frame.a < 1] + frame.b + tm.assert_series_equal(res, expect) + + @pytest.mark.parametrize("op", ["+", "-", "*", "/"]) + def test_invalid_type_for_operator_raises(self, parser, engine, op): + df = DataFrame({"a": [1, 2], "b": ["c", "d"]}) + msg = r"unsupported operand type\(s\) for .+: '.+' and '.+'" + + with pytest.raises(TypeError, match=msg): + df.eval(f"a {op} b", engine=engine, parser=parser) + + +class TestDataFrameQueryBacktickQuoting: + @pytest.fixture + def df(self): + """ + Yields a dataframe with strings that may or may not need escaping + by backticks. The last two columns cannot be escaped by backticks + and should raise a ValueError. + """ + yield DataFrame( + { + "A": [1, 2, 3], + "B B": [3, 2, 1], + "C C": [4, 5, 6], + "C C": [7, 4, 3], + "C_C": [8, 9, 10], + "D_D D": [11, 1, 101], + "E.E": [6, 3, 5], + "F-F": [8, 1, 10], + "1e1": [2, 4, 8], + "def": [10, 11, 2], + "A (x)": [4, 1, 3], + "B(x)": [1, 1, 5], + "B (x)": [2, 7, 4], + " &^ :!€$?(} > <++*'' ": [2, 5, 6], + "": [10, 11, 1], + " A": [4, 7, 9], + " ": [1, 2, 1], + "it's": [6, 3, 1], + "that's": [9, 1, 8], + "☺": [8, 7, 6], + "foo#bar": [2, 4, 5], + 1: [5, 7, 9], + } + ) + + def test_single_backtick_variable_query(self, df): + res = df.query("1 < `B B`") + expect = df[1 < df["B B"]] + tm.assert_frame_equal(res, expect) + + def test_two_backtick_variables_query(self, df): + res = df.query("1 < `B B` and 4 < `C C`") + expect = df[(1 < df["B B"]) & (4 < df["C C"])] + tm.assert_frame_equal(res, expect) + + def test_single_backtick_variable_expr(self, df): + res = df.eval("A + `B B`") + expect = df["A"] + df["B B"] + tm.assert_series_equal(res, expect) + + def test_two_backtick_variables_expr(self, df): + res = df.eval("`B B` + `C C`") + expect = df["B B"] + df["C C"] + tm.assert_series_equal(res, expect) + + def test_already_underscore_variable(self, df): + res = df.eval("`C_C` + A") + expect = df["C_C"] + df["A"] + tm.assert_series_equal(res, expect) + + def test_same_name_but_underscores(self, df): + res = df.eval("C_C + `C C`") + expect = df["C_C"] + df["C C"] + tm.assert_series_equal(res, expect) + + def test_mixed_underscores_and_spaces(self, df): + res = df.eval("A + `D_D D`") + expect = df["A"] + df["D_D D"] + tm.assert_series_equal(res, expect) + + def test_backtick_quote_name_with_no_spaces(self, df): + res = df.eval("A + `C_C`") + expect = df["A"] + df["C_C"] + tm.assert_series_equal(res, expect) + + def test_special_characters(self, df): + res = df.eval("`E.E` + `F-F` - A") + expect = df["E.E"] + df["F-F"] - df["A"] + tm.assert_series_equal(res, expect) + + def test_start_with_digit(self, df): + res = df.eval("A + `1e1`") + expect = df["A"] + df["1e1"] + tm.assert_series_equal(res, expect) + + def test_keyword(self, df): + res = df.eval("A + `def`") + expect = df["A"] + df["def"] + tm.assert_series_equal(res, expect) + + def test_unneeded_quoting(self, df): + res = df.query("`A` > 2") + expect = df[df["A"] > 2] + tm.assert_frame_equal(res, expect) + + def test_parenthesis(self, df): + res = df.query("`A (x)` > 2") + expect = df[df["A (x)"] > 2] + tm.assert_frame_equal(res, expect) + + def test_empty_string(self, df): + res = df.query("`` > 5") + expect = df[df[""] > 5] + tm.assert_frame_equal(res, expect) + + def test_multiple_spaces(self, df): + res = df.query("`C C` > 5") + expect = df[df["C C"] > 5] + tm.assert_frame_equal(res, expect) + + def test_start_with_spaces(self, df): + res = df.eval("` A` + ` `") + expect = df[" A"] + df[" "] + tm.assert_series_equal(res, expect) + + def test_lots_of_operators_string(self, df): + res = df.query("` &^ :!€$?(} > <++*'' ` > 4") + expect = df[df[" &^ :!€$?(} > <++*'' "] > 4] + tm.assert_frame_equal(res, expect) + + def test_missing_attribute(self, df): + message = "module 'pandas' has no attribute 'thing'" + with pytest.raises(AttributeError, match=message): + df.eval("@pd.thing") + + def test_failing_quote(self, df): + msg = r"(Could not convert ).*( to a valid Python identifier.)" + with pytest.raises(SyntaxError, match=msg): + df.query("`it's` > `that's`") + + def test_failing_character_outside_range(self, df): + msg = r"(Could not convert ).*( to a valid Python identifier.)" + with pytest.raises(SyntaxError, match=msg): + df.query("`☺` > 4") + + def test_failing_hashtag(self, df): + msg = "Failed to parse backticks" + with pytest.raises(SyntaxError, match=msg): + df.query("`foo#bar` > 4") + + def test_call_non_named_expression(self, df): + """ + Only attributes and variables ('named functions') can be called. + .__call__() is not an allowed attribute because that would allow + calling anything. + https://github.com/pandas-dev/pandas/pull/32460 + """ + + def func(*_): + return 1 + + funcs = [func] # noqa: F841 + + df.eval("@func()") + + with pytest.raises(TypeError, match="Only named functions are supported"): + df.eval("@funcs[0]()") + + with pytest.raises(TypeError, match="Only named functions are supported"): + df.eval("@funcs[0].__call__()") + + def test_ea_dtypes(self, any_numeric_ea_and_arrow_dtype): + # GH#29618 + df = DataFrame( + [[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype + ) + warning = RuntimeWarning if NUMEXPR_INSTALLED else None + with tm.assert_produces_warning(warning): + result = df.eval("c = b - a") + expected = DataFrame( + [[1, 2, 1], [3, 4, 1]], + columns=["a", "b", "c"], + dtype=any_numeric_ea_and_arrow_dtype, + ) + tm.assert_frame_equal(result, expected) + + def test_ea_dtypes_and_scalar(self): + # GH#29618 + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"], dtype="Float64") + warning = RuntimeWarning if NUMEXPR_INSTALLED else None + with tm.assert_produces_warning(warning): + result = df.eval("c = b - 1") + expected = DataFrame( + [[1, 2, 1], [3, 4, 3]], columns=["a", "b", "c"], dtype="Float64" + ) + tm.assert_frame_equal(result, expected) + + def test_ea_dtypes_and_scalar_operation(self, any_numeric_ea_and_arrow_dtype): + # GH#29618 + df = DataFrame( + [[1, 2], [3, 4]], columns=["a", "b"], dtype=any_numeric_ea_and_arrow_dtype + ) + result = df.eval("c = 2 - 1") + expected = DataFrame( + { + "a": Series([1, 3], dtype=any_numeric_ea_and_arrow_dtype), + "b": Series([2, 4], dtype=any_numeric_ea_and_arrow_dtype), + "c": Series([1, 1], dtype=result["c"].dtype), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"]) + def test_query_ea_dtypes(self, dtype): + if dtype == "int64[pyarrow]": + pytest.importorskip("pyarrow") + # GH#50261 + df = DataFrame({"a": Series([1, 2], dtype=dtype)}) + ref = {2} # noqa: F841 + warning = RuntimeWarning if dtype == "Int64" and NUMEXPR_INSTALLED else None + with tm.assert_produces_warning(warning): + result = df.query("a in @ref") + expected = DataFrame({"a": Series([2], dtype=dtype, index=[1])}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("engine", ["python", "numexpr"]) + @pytest.mark.parametrize("dtype", ["int64", "Int64", "int64[pyarrow]"]) + def test_query_ea_equality_comparison(self, dtype, engine): + # GH#50261 + warning = RuntimeWarning if engine == "numexpr" else None + if engine == "numexpr" and not NUMEXPR_INSTALLED: + pytest.skip("numexpr not installed") + if dtype == "int64[pyarrow]": + pytest.importorskip("pyarrow") + df = DataFrame( + {"A": Series([1, 1, 2], dtype="Int64"), "B": Series([1, 2, 2], dtype=dtype)} + ) + with tm.assert_produces_warning(warning): + result = df.query("A == B", engine=engine) + expected = DataFrame( + { + "A": Series([1, 2], dtype="Int64", index=[0, 2]), + "B": Series([1, 2], dtype=dtype, index=[0, 2]), + } + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_reductions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_reductions.py new file mode 100644 index 00000000..bec1fcd1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_reductions.py @@ -0,0 +1,2043 @@ +from datetime import timedelta +from decimal import Decimal +import re + +from dateutil.tz import tzlocal +import numpy as np +import pytest + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat.numpy import np_version_gt2 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, + DataFrame, + Index, + Series, + Timestamp, + date_range, + isna, + notna, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +from pandas.core import ( + algorithms, + nanops, +) + +is_windows_np2_or_is32 = (is_platform_windows() and not np_version_gt2) or not IS64 +is_windows_or_is32 = is_platform_windows() or not IS64 + + +def assert_stat_op_calc( + opname, + alternative, + frame, + has_skipna=True, + check_dtype=True, + check_dates=False, + rtol=1e-5, + atol=1e-8, + skipna_alternative=None, +): + """ + Check that operator opname works as advertised on frame + + Parameters + ---------- + opname : str + Name of the operator to test on frame + alternative : function + Function that opname is tested against; i.e. "frame.opname()" should + equal "alternative(frame)". + frame : DataFrame + The object that the tests are executed on + has_skipna : bool, default True + Whether the method "opname" has the kwarg "skip_na" + check_dtype : bool, default True + Whether the dtypes of the result of "frame.opname()" and + "alternative(frame)" should be checked. + check_dates : bool, default false + Whether opname should be tested on a Datetime Series + rtol : float, default 1e-5 + Relative tolerance. + atol : float, default 1e-8 + Absolute tolerance. + skipna_alternative : function, default None + NaN-safe version of alternative + """ + f = getattr(frame, opname) + + if check_dates: + df = DataFrame({"b": date_range("1/1/2001", periods=2)}) + with tm.assert_produces_warning(None): + result = getattr(df, opname)() + assert isinstance(result, Series) + + df["a"] = range(len(df)) + with tm.assert_produces_warning(None): + result = getattr(df, opname)() + assert isinstance(result, Series) + assert len(result) + + if has_skipna: + + def wrapper(x): + return alternative(x.values) + + skipna_wrapper = tm._make_skipna_wrapper(alternative, skipna_alternative) + result0 = f(axis=0, skipna=False) + result1 = f(axis=1, skipna=False) + tm.assert_series_equal( + result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol + ) + tm.assert_series_equal( + result1, + frame.apply(wrapper, axis=1), + rtol=rtol, + atol=atol, + ) + else: + skipna_wrapper = alternative + + result0 = f(axis=0) + result1 = f(axis=1) + tm.assert_series_equal( + result0, + frame.apply(skipna_wrapper), + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + ) + + if opname in ["sum", "prod"]: + expected = frame.apply(skipna_wrapper, axis=1) + tm.assert_series_equal( + result1, expected, check_dtype=False, rtol=rtol, atol=atol + ) + + # check dtypes + if check_dtype: + lcd_dtype = frame.values.dtype + assert lcd_dtype == result0.dtype + assert lcd_dtype == result1.dtype + + # bad axis + with pytest.raises(ValueError, match="No axis named 2"): + f(axis=2) + + # all NA case + if has_skipna: + all_na = frame * np.nan + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname in ["sum", "prod"]: + unit = 1 if opname == "prod" else 0 # result for empty sum/prod + expected = Series(unit, index=r0.index, dtype=r0.dtype) + tm.assert_series_equal(r0, expected) + expected = Series(unit, index=r1.index, dtype=r1.dtype) + tm.assert_series_equal(r1, expected) + + +class TestDataFrameAnalytics: + # --------------------------------------------------------------------- + # Reductions + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize( + "opname", + [ + "count", + "sum", + "mean", + "product", + "median", + "min", + "max", + "nunique", + "var", + "std", + "sem", + pytest.param("skew", marks=td.skip_if_no_scipy), + pytest.param("kurt", marks=td.skip_if_no_scipy), + ], + ) + def test_stat_op_api_float_string_frame(self, float_string_frame, axis, opname): + if (opname in ("sum", "min", "max") and axis == 0) or opname in ( + "count", + "nunique", + ): + getattr(float_string_frame, opname)(axis=axis) + else: + if opname in ["var", "std", "sem", "skew", "kurt"]: + msg = "could not convert string to float: 'bar'" + elif opname == "product": + if axis == 1: + msg = "can't multiply sequence by non-int of type 'float'" + else: + msg = "can't multiply sequence by non-int of type 'str'" + elif opname == "sum": + msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'" + elif opname == "mean": + if axis == 0: + # different message on different builds + msg = "|".join( + [ + r"Could not convert \['.*'\] to numeric", + "Could not convert string '(bar){30}' to numeric", + ] + ) + else: + msg = r"unsupported operand type\(s\) for \+: 'float' and 'str'" + elif opname in ["min", "max"]: + msg = "'[><]=' not supported between instances of 'float' and 'str'" + elif opname == "median": + msg = re.compile(r"Cannot convert \[.*\] to numeric", flags=re.S) + with pytest.raises(TypeError, match=msg): + getattr(float_string_frame, opname)(axis=axis) + if opname != "nunique": + getattr(float_string_frame, opname)(axis=axis, numeric_only=True) + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize( + "opname", + [ + "count", + "sum", + "mean", + "product", + "median", + "min", + "max", + "var", + "std", + "sem", + pytest.param("skew", marks=td.skip_if_no_scipy), + pytest.param("kurt", marks=td.skip_if_no_scipy), + ], + ) + def test_stat_op_api_float_frame(self, float_frame, axis, opname): + getattr(float_frame, opname)(axis=axis, numeric_only=False) + + def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame): + def count(s): + return notna(s).sum() + + def nunique(s): + return len(algorithms.unique1d(s.dropna())) + + def var(x): + return np.var(x, ddof=1) + + def std(x): + return np.std(x, ddof=1) + + def sem(x): + return np.std(x, ddof=1) / np.sqrt(len(x)) + + assert_stat_op_calc( + "nunique", + nunique, + float_frame_with_na, + has_skipna=False, + check_dtype=False, + check_dates=True, + ) + + # GH#32571: rol needed for flaky CI builds + # mixed types (with upcasting happening) + assert_stat_op_calc( + "sum", + np.sum, + mixed_float_frame.astype("float32"), + check_dtype=False, + rtol=1e-3, + ) + + assert_stat_op_calc( + "sum", np.sum, float_frame_with_na, skipna_alternative=np.nansum + ) + assert_stat_op_calc("mean", np.mean, float_frame_with_na, check_dates=True) + assert_stat_op_calc( + "product", np.prod, float_frame_with_na, skipna_alternative=np.nanprod + ) + + assert_stat_op_calc("var", var, float_frame_with_na) + assert_stat_op_calc("std", std, float_frame_with_na) + assert_stat_op_calc("sem", sem, float_frame_with_na) + + assert_stat_op_calc( + "count", + count, + float_frame_with_na, + has_skipna=False, + check_dtype=False, + check_dates=True, + ) + + def test_stat_op_calc_skew_kurtosis(self, float_frame_with_na): + sp_stats = pytest.importorskip("scipy.stats") + + def skewness(x): + if len(x) < 3: + return np.nan + return sp_stats.skew(x, bias=False) + + def kurt(x): + if len(x) < 4: + return np.nan + return sp_stats.kurtosis(x, bias=False) + + assert_stat_op_calc("skew", skewness, float_frame_with_na) + assert_stat_op_calc("kurt", kurt, float_frame_with_na) + + def test_median(self, float_frame_with_na, int_frame): + def wrapper(x): + if isna(x).any(): + return np.nan + return np.median(x) + + assert_stat_op_calc("median", wrapper, float_frame_with_na, check_dates=True) + assert_stat_op_calc( + "median", wrapper, int_frame, check_dtype=False, check_dates=True + ) + + @pytest.mark.parametrize( + "method", ["sum", "mean", "prod", "var", "std", "skew", "min", "max"] + ) + @pytest.mark.parametrize( + "df", + [ + DataFrame( + { + "a": [ + -0.00049987540199591344, + -0.0016467257772919831, + 0.00067695870775883013, + ], + "b": [-0, -0, 0.0], + "c": [ + 0.00031111847529610595, + 0.0014902627951905339, + -0.00094099200035979691, + ], + }, + index=["foo", "bar", "baz"], + dtype="O", + ), + DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object), + ], + ) + @pytest.mark.filterwarnings("ignore:Mismatched null-like values:FutureWarning") + def test_stat_operators_attempt_obj_array(self, method, df, axis): + # GH#676 + assert df.values.dtype == np.object_ + result = getattr(df, method)(axis=axis) + expected = getattr(df.astype("f8"), method)(axis=axis).astype(object) + if axis in [1, "columns"] and method in ["min", "max"]: + expected[expected.isna()] = None + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("op", ["mean", "std", "var", "skew", "kurt", "sem"]) + def test_mixed_ops(self, op): + # GH#16116 + df = DataFrame( + { + "int": [1, 2, 3, 4], + "float": [1.0, 2.0, 3.0, 4.0], + "str": ["a", "b", "c", "d"], + } + ) + msg = "|".join( + [ + "Could not convert", + "could not convert", + "can't multiply sequence by non-int", + ] + ) + with pytest.raises(TypeError, match=msg): + getattr(df, op)() + + with pd.option_context("use_bottleneck", False): + msg = "|".join( + [ + "Could not convert", + "could not convert", + "can't multiply sequence by non-int", + ] + ) + with pytest.raises(TypeError, match=msg): + getattr(df, op)() + + def test_reduce_mixed_frame(self): + # GH 6806 + df = DataFrame( + { + "bool_data": [True, True, False, False, False], + "int_data": [10, 20, 30, 40, 50], + "string_data": ["a", "b", "c", "d", "e"], + } + ) + df.reindex(columns=["bool_data", "int_data", "string_data"]) + test = df.sum(axis=0) + tm.assert_numpy_array_equal( + test.values, np.array([2, 150, "abcde"], dtype=object) + ) + alt = df.T.sum(axis=1) + tm.assert_series_equal(test, alt) + + def test_nunique(self): + df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]}) + tm.assert_series_equal(df.nunique(), Series({"A": 1, "B": 3, "C": 2})) + tm.assert_series_equal( + df.nunique(dropna=False), Series({"A": 1, "B": 3, "C": 3}) + ) + tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2})) + tm.assert_series_equal( + df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2}) + ) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_mean_mixed_datetime_numeric(self, tz): + # https://github.com/pandas-dev/pandas/issues/24752 + df = DataFrame({"A": [1, 1], "B": [Timestamp("2000", tz=tz)] * 2}) + result = df.mean() + expected = Series([1.0, Timestamp("2000", tz=tz)], index=["A", "B"]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_mean_includes_datetimes(self, tz): + # https://github.com/pandas-dev/pandas/issues/24752 + # Behavior in 0.24.0rc1 was buggy. + # As of 2.0 with numeric_only=None we do *not* drop datetime columns + df = DataFrame({"A": [Timestamp("2000", tz=tz)] * 2}) + result = df.mean() + + expected = Series([Timestamp("2000", tz=tz)], index=["A"]) + tm.assert_series_equal(result, expected) + + def test_mean_mixed_string_decimal(self): + # GH 11670 + # possible bug when calculating mean of DataFrame? + + d = [ + {"A": 2, "B": None, "C": Decimal("628.00")}, + {"A": 1, "B": None, "C": Decimal("383.00")}, + {"A": 3, "B": None, "C": Decimal("651.00")}, + {"A": 2, "B": None, "C": Decimal("575.00")}, + {"A": 4, "B": None, "C": Decimal("1114.00")}, + {"A": 1, "B": "TEST", "C": Decimal("241.00")}, + {"A": 2, "B": None, "C": Decimal("572.00")}, + {"A": 4, "B": None, "C": Decimal("609.00")}, + {"A": 3, "B": None, "C": Decimal("820.00")}, + {"A": 5, "B": None, "C": Decimal("1223.00")}, + ] + + df = DataFrame(d) + + with pytest.raises(TypeError, match="unsupported operand type"): + df.mean() + result = df[["A", "C"]].mean() + expected = Series([2.7, 681.6], index=["A", "C"], dtype=object) + tm.assert_series_equal(result, expected) + + def test_var_std(self, datetime_frame): + result = datetime_frame.std(ddof=4) + expected = datetime_frame.apply(lambda x: x.std(ddof=4)) + tm.assert_almost_equal(result, expected) + + result = datetime_frame.var(ddof=4) + expected = datetime_frame.apply(lambda x: x.var(ddof=4)) + tm.assert_almost_equal(result, expected) + + arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0) + result = nanops.nanvar(arr, axis=0) + assert not (result < 0).any() + + with pd.option_context("use_bottleneck", False): + result = nanops.nanvar(arr, axis=0) + assert not (result < 0).any() + + @pytest.mark.parametrize("meth", ["sem", "var", "std"]) + def test_numeric_only_flag(self, meth): + # GH 9201 + df1 = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + columns=["foo", "bar", "baz"], + ) + # Cast to object to avoid implicit cast when setting entry to "100" below + df1 = df1.astype({"foo": object}) + # set one entry to a number in str format + df1.loc[0, "foo"] = "100" + + df2 = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + columns=["foo", "bar", "baz"], + ) + # Cast to object to avoid implicit cast when setting entry to "a" below + df2 = df2.astype({"foo": object}) + # set one entry to a non-number str + df2.loc[0, "foo"] = "a" + + result = getattr(df1, meth)(axis=1, numeric_only=True) + expected = getattr(df1[["bar", "baz"]], meth)(axis=1) + tm.assert_series_equal(expected, result) + + result = getattr(df2, meth)(axis=1, numeric_only=True) + expected = getattr(df2[["bar", "baz"]], meth)(axis=1) + tm.assert_series_equal(expected, result) + + # df1 has all numbers, df2 has a letter inside + msg = r"unsupported operand type\(s\) for -: 'float' and 'str'" + with pytest.raises(TypeError, match=msg): + getattr(df1, meth)(axis=1, numeric_only=False) + msg = "could not convert string to float: 'a'" + with pytest.raises(TypeError, match=msg): + getattr(df2, meth)(axis=1, numeric_only=False) + + def test_sem(self, datetime_frame): + result = datetime_frame.sem(ddof=4) + expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x))) + tm.assert_almost_equal(result, expected) + + arr = np.repeat(np.random.default_rng(2).random((1, 1000)), 1000, 0) + result = nanops.nansem(arr, axis=0) + assert not (result < 0).any() + + with pd.option_context("use_bottleneck", False): + result = nanops.nansem(arr, axis=0) + assert not (result < 0).any() + + @pytest.mark.parametrize( + "dropna, expected", + [ + ( + True, + { + "A": [12], + "B": [10.0], + "C": [1.0], + "D": ["a"], + "E": Categorical(["a"], categories=["a"]), + "F": to_datetime(["2000-1-2"]), + "G": to_timedelta(["1 days"]), + }, + ), + ( + False, + { + "A": [12], + "B": [10.0], + "C": [np.nan], + "D": np.array([np.nan], dtype=object), + "E": Categorical([np.nan], categories=["a"]), + "F": [pd.NaT], + "G": to_timedelta([pd.NaT]), + }, + ), + ( + True, + { + "H": [8, 9, np.nan, np.nan], + "I": [8, 9, np.nan, np.nan], + "J": [1, np.nan, np.nan, np.nan], + "K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]), + "L": to_datetime(["2000-1-2", "NaT", "NaT", "NaT"]), + "M": to_timedelta(["1 days", "nan", "nan", "nan"]), + "N": [0, 1, 2, 3], + }, + ), + ( + False, + { + "H": [8, 9, np.nan, np.nan], + "I": [8, 9, np.nan, np.nan], + "J": [1, np.nan, np.nan, np.nan], + "K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]), + "L": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]), + "M": to_timedelta(["nan", "1 days", "nan", "nan"]), + "N": [0, 1, 2, 3], + }, + ), + ], + ) + def test_mode_dropna(self, dropna, expected): + df = DataFrame( + { + "A": [12, 12, 19, 11], + "B": [10, 10, np.nan, 3], + "C": [1, np.nan, np.nan, np.nan], + "D": [np.nan, np.nan, "a", np.nan], + "E": Categorical([np.nan, np.nan, "a", np.nan]), + "F": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]), + "G": to_timedelta(["1 days", "nan", "nan", "nan"]), + "H": [8, 8, 9, 9], + "I": [9, 9, 8, 8], + "J": [1, 1, np.nan, np.nan], + "K": Categorical(["a", np.nan, "a", np.nan]), + "L": to_datetime(["2000-1-2", "2000-1-2", "NaT", "NaT"]), + "M": to_timedelta(["1 days", "nan", "1 days", "nan"]), + "N": np.arange(4, dtype="int64"), + } + ) + + result = df[sorted(expected.keys())].mode(dropna=dropna) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + def test_mode_sortwarning(self): + # Check for the warning that is raised when the mode + # results cannot be sorted + + df = DataFrame({"A": [np.nan, np.nan, "a", "a"]}) + expected = DataFrame({"A": ["a", np.nan]}) + + with tm.assert_produces_warning(UserWarning): + result = df.mode(dropna=False) + result = result.sort_values(by="A").reset_index(drop=True) + + tm.assert_frame_equal(result, expected) + + def test_mode_empty_df(self): + df = DataFrame([], columns=["a", "b"]) + result = df.mode() + expected = DataFrame([], columns=["a", "b"], index=Index([], dtype=np.int64)) + tm.assert_frame_equal(result, expected) + + def test_operators_timedelta64(self): + df = DataFrame( + { + "A": date_range("2012-1-1", periods=3, freq="D"), + "B": date_range("2012-1-2", periods=3, freq="D"), + "C": Timestamp("20120101") - timedelta(minutes=5, seconds=5), + } + ) + + diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]}) + + # min + result = diffs.min() + assert result.iloc[0] == diffs.loc[0, "A"] + assert result.iloc[1] == diffs.loc[0, "B"] + + result = diffs.min(axis=1) + assert (result == diffs.loc[0, "B"]).all() + + # max + result = diffs.max() + assert result.iloc[0] == diffs.loc[2, "A"] + assert result.iloc[1] == diffs.loc[2, "B"] + + result = diffs.max(axis=1) + assert (result == diffs["A"]).all() + + # abs + result = diffs.abs() + result2 = abs(diffs) + expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]}) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + # mixed frame + mixed = diffs.copy() + mixed["C"] = "foo" + mixed["D"] = 1 + mixed["E"] = 1.0 + mixed["F"] = Timestamp("20130101") + + # results in an object array + result = mixed.min() + expected = Series( + [ + pd.Timedelta(timedelta(seconds=5 * 60 + 5)), + pd.Timedelta(timedelta(days=-1)), + "foo", + 1, + 1.0, + Timestamp("20130101"), + ], + index=mixed.columns, + ) + tm.assert_series_equal(result, expected) + + # excludes non-numeric + result = mixed.min(axis=1, numeric_only=True) + expected = Series([1, 1, 1.0], index=[0, 1, 2]) + tm.assert_series_equal(result, expected) + + # works when only those columns are selected + result = mixed[["A", "B"]].min(1) + expected = Series([timedelta(days=-1)] * 3) + tm.assert_series_equal(result, expected) + + result = mixed[["A", "B"]].min() + expected = Series( + [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"] + ) + tm.assert_series_equal(result, expected) + + # GH 3106 + df = DataFrame( + { + "time": date_range("20130102", periods=5), + "time2": date_range("20130105", periods=5), + } + ) + df["off1"] = df["time2"] - df["time"] + assert df["off1"].dtype == "timedelta64[ns]" + + df["off2"] = df["time"] - df["time2"] + df._consolidate_inplace() + assert df["off1"].dtype == "timedelta64[ns]" + assert df["off2"].dtype == "timedelta64[ns]" + + def test_std_timedelta64_skipna_false(self): + # GH#37392 + tdi = pd.timedelta_range("1 Day", periods=10) + df = DataFrame({"A": tdi, "B": tdi}, copy=True) + df.iloc[-2, -1] = pd.NaT + + result = df.std(skipna=False) + expected = Series( + [df["A"].std(), pd.NaT], index=["A", "B"], dtype="timedelta64[ns]" + ) + tm.assert_series_equal(result, expected) + + result = df.std(axis=1, skipna=False) + expected = Series([pd.Timedelta(0)] * 8 + [pd.NaT, pd.Timedelta(0)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", [["2022-01-01", "2022-01-02", pd.NaT, "2022-01-03"], 4 * [pd.NaT]] + ) + def test_std_datetime64_with_nat( + self, values, skipna, using_array_manager, request + ): + # GH#51335 + if using_array_manager and ( + not skipna or all(value is pd.NaT for value in values) + ): + mark = pytest.mark.xfail( + reason="GH#51446: Incorrect type inference on NaT in reduction result" + ) + request.node.add_marker(mark) + df = DataFrame({"a": to_datetime(values)}) + result = df.std(skipna=skipna) + if not skipna or all(value is pd.NaT for value in values): + expected = Series({"a": pd.NaT}, dtype="timedelta64[ns]") + else: + # 86400000000000ns == 1 day + expected = Series({"a": 86400000000000}, dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected) + + def test_sum_corner(self): + empty_frame = DataFrame() + + axis0 = empty_frame.sum(0) + axis1 = empty_frame.sum(1) + assert isinstance(axis0, Series) + assert isinstance(axis1, Series) + assert len(axis0) == 0 + assert len(axis1) == 0 + + @pytest.mark.parametrize( + "index", + [ + tm.makeRangeIndex(0), + tm.makeDateIndex(0), + tm.makeNumericIndex(0, dtype=int), + tm.makeNumericIndex(0, dtype=float), + tm.makeDateIndex(0, freq="M"), + tm.makePeriodIndex(0), + ], + ) + def test_axis_1_empty(self, all_reductions, index, using_array_manager): + df = DataFrame(columns=["a"], index=index) + result = getattr(df, all_reductions)(axis=1) + if all_reductions in ("any", "all"): + expected_dtype = "bool" + elif all_reductions == "count": + expected_dtype = "int64" + else: + expected_dtype = "object" + expected = Series([], index=index, dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): + idx = ["a", "b", "c"] + df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) + # The default + result = getattr(df, method)(numeric_only=numeric_only) + expected = Series([unit, unit, unit], index=idx, dtype="float64") + tm.assert_series_equal(result, expected) + + # min_count=1 + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) + expected = Series([unit, unit, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) + expected = Series([unit, unit, unit], index=idx, dtype="float64") + tm.assert_series_equal(result, expected) + + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) + expected = Series([unit, np.nan, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count > 1 + df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) + expected = Series(result, index=["A", "B"]) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) + expected = Series(result, index=["A", "B"]) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_timedelta(self): + # prod isn't defined on timedeltas + idx = ["a", "b", "c"] + df = DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]}) + + df2 = df.apply(to_timedelta) + + # 0 by default + result = df2.sum() + expected = Series([0, 0, 0], dtype="m8[ns]", index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df2.sum(min_count=0) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df2.sum(min_count=1) + expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_min_count(self): + # https://github.com/pandas-dev/pandas/issues/39738 + df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) + result = df.sum(min_count=10) + expected = Series([np.nan, np.nan], index=["x", "y"]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("float_type", ["float16", "float32", "float64"]) + @pytest.mark.parametrize( + "kwargs, expected_result", + [ + ({"axis": 1, "min_count": 2}, [3.2, 5.3, np.nan]), + ({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]), + ({"axis": 1, "skipna": False}, [3.2, 5.3, np.nan]), + ], + ) + def test_sum_nanops_dtype_min_count(self, float_type, kwargs, expected_result): + # GH#46947 + df = DataFrame({"a": [1.0, 2.3, 4.4], "b": [2.2, 3, np.nan]}, dtype=float_type) + result = df.sum(**kwargs) + expected = Series(expected_result).astype(float_type) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("float_type", ["float16", "float32", "float64"]) + @pytest.mark.parametrize( + "kwargs, expected_result", + [ + ({"axis": 1, "min_count": 2}, [2.0, 4.0, np.nan]), + ({"axis": 1, "min_count": 3}, [np.nan, np.nan, np.nan]), + ({"axis": 1, "skipna": False}, [2.0, 4.0, np.nan]), + ], + ) + def test_prod_nanops_dtype_min_count(self, float_type, kwargs, expected_result): + # GH#46947 + df = DataFrame( + {"a": [1.0, 2.0, 4.4], "b": [2.0, 2.0, np.nan]}, dtype=float_type + ) + result = df.prod(**kwargs) + expected = Series(expected_result).astype(float_type) + tm.assert_series_equal(result, expected) + + def test_sum_object(self, float_frame): + values = float_frame.values.astype(int) + frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns) + deltas = frame * timedelta(1) + deltas.sum() + + def test_sum_bool(self, float_frame): + # ensure this works, bug report + bools = np.isnan(float_frame) + bools.sum(1) + bools.sum(0) + + def test_sum_mixed_datetime(self): + # GH#30886 + df = DataFrame({"A": date_range("2000", periods=4), "B": [1, 2, 3, 4]}).reindex( + [2, 3, 4] + ) + with pytest.raises(TypeError, match="does not support reduction 'sum'"): + df.sum() + + def test_mean_corner(self, float_frame, float_string_frame): + # unit test when have object data + with pytest.raises(TypeError, match="Could not convert"): + float_string_frame.mean(axis=0) + + # xs sum mixed type, just want to know it works... + with pytest.raises(TypeError, match="unsupported operand type"): + float_string_frame.mean(axis=1) + + # take mean of boolean column + float_frame["bool"] = float_frame["A"] > 0 + means = float_frame.mean(0) + assert means["bool"] == float_frame["bool"].values.mean() + + def test_mean_datetimelike(self): + # GH#24757 check that datetimelike are excluded by default, handled + # correctly with numeric_only=True + # As of 2.0, datetimelike are *not* excluded with numeric_only=None + + df = DataFrame( + { + "A": np.arange(3), + "B": date_range("2016-01-01", periods=3), + "C": pd.timedelta_range("1D", periods=3), + "D": pd.period_range("2016", periods=3, freq="A"), + } + ) + result = df.mean(numeric_only=True) + expected = Series({"A": 1.0}) + tm.assert_series_equal(result, expected) + + with pytest.raises(TypeError, match="mean is not implemented for PeriodArray"): + df.mean() + + def test_mean_datetimelike_numeric_only_false(self): + df = DataFrame( + { + "A": np.arange(3), + "B": date_range("2016-01-01", periods=3), + "C": pd.timedelta_range("1D", periods=3), + } + ) + + # datetime(tz) and timedelta work + result = df.mean(numeric_only=False) + expected = Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]}) + tm.assert_series_equal(result, expected) + + # mean of period is not allowed + df["D"] = pd.period_range("2016", periods=3, freq="A") + + with pytest.raises(TypeError, match="mean is not implemented for Period"): + df.mean(numeric_only=False) + + def test_mean_extensionarray_numeric_only_true(self): + # https://github.com/pandas-dev/pandas/issues/33256 + arr = np.random.default_rng(2).integers(1000, size=(10, 5)) + df = DataFrame(arr, dtype="Int64") + result = df.mean(numeric_only=True) + expected = DataFrame(arr).mean().astype("Float64") + tm.assert_series_equal(result, expected) + + def test_stats_mixed_type(self, float_string_frame): + with pytest.raises(TypeError, match="could not convert"): + float_string_frame.std(1) + with pytest.raises(TypeError, match="could not convert"): + float_string_frame.var(1) + with pytest.raises(TypeError, match="unsupported operand type"): + float_string_frame.mean(1) + with pytest.raises(TypeError, match="could not convert"): + float_string_frame.skew(1) + + def test_sum_bools(self): + df = DataFrame(index=range(1), columns=range(10)) + bools = isna(df) + assert bools.sum(axis=1)[0] == 10 + + # ---------------------------------------------------------------------- + # Index of max / min + + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_idxmin(self, float_frame, int_frame, skipna, axis): + frame = float_frame + frame.iloc[5:10] = np.nan + frame.iloc[15:20, -2:] = np.nan + for df in [frame, int_frame]: + warn = None + if skipna is False or axis == 1: + warn = None if df is int_frame else FutureWarning + msg = "The behavior of DataFrame.idxmin with all-NA values" + with tm.assert_produces_warning(warn, match=msg): + result = df.idxmin(axis=axis, skipna=skipna) + + msg2 = "The behavior of Series.idxmin" + with tm.assert_produces_warning(warn, match=msg2): + expected = df.apply(Series.idxmin, axis=axis, skipna=skipna) + expected = expected.astype(df.index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_idxmin_empty(self, index, skipna, axis): + # GH53265 + if axis == 0: + frame = DataFrame(index=index) + else: + frame = DataFrame(columns=index) + + result = frame.idxmin(axis=axis, skipna=skipna) + expected = Series(dtype=index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("numeric_only", [True, False]) + def test_idxmin_numeric_only(self, numeric_only): + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")}) + result = df.idxmin(numeric_only=numeric_only) + if numeric_only: + expected = Series([2, 1], index=["a", "b"]) + else: + expected = Series([2, 1, 0], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + def test_idxmin_axis_2(self, float_frame): + frame = float_frame + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + frame.idxmin(axis=2) + + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_idxmax(self, float_frame, int_frame, skipna, axis): + frame = float_frame + frame.iloc[5:10] = np.nan + frame.iloc[15:20, -2:] = np.nan + for df in [frame, int_frame]: + warn = None + if skipna is False or axis == 1: + warn = None if df is int_frame else FutureWarning + msg = "The behavior of DataFrame.idxmax with all-NA values" + with tm.assert_produces_warning(warn, match=msg): + result = df.idxmax(axis=axis, skipna=skipna) + + msg2 = "The behavior of Series.idxmax" + with tm.assert_produces_warning(warn, match=msg2): + expected = df.apply(Series.idxmax, axis=axis, skipna=skipna) + expected = expected.astype(df.index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_idxmax_empty(self, index, skipna, axis): + # GH53265 + if axis == 0: + frame = DataFrame(index=index) + else: + frame = DataFrame(columns=index) + + result = frame.idxmax(axis=axis, skipna=skipna) + expected = Series(dtype=index.dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("numeric_only", [True, False]) + def test_idxmax_numeric_only(self, numeric_only): + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1], "c": list("xyx")}) + result = df.idxmax(numeric_only=numeric_only) + if numeric_only: + expected = Series([1, 0], index=["a", "b"]) + else: + expected = Series([1, 0, 1], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + def test_idxmax_arrow_types(self): + # GH#55368 + pytest.importorskip("pyarrow") + + df = DataFrame({"a": [2, 3, 1], "b": [2, 1, 1]}, dtype="int64[pyarrow]") + result = df.idxmax() + expected = Series([1, 0], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([2, 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + df = DataFrame({"a": ["b", "c", "a"]}, dtype="string[pyarrow]") + result = df.idxmax(numeric_only=False) + expected = Series([1], index=["a"]) + tm.assert_series_equal(result, expected) + + result = df.idxmin(numeric_only=False) + expected = Series([2], index=["a"]) + tm.assert_series_equal(result, expected) + + def test_idxmax_axis_2(self, float_frame): + frame = float_frame + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + frame.idxmax(axis=2) + + def test_idxmax_mixed_dtype(self): + # don't cast to object, which would raise in nanops + dti = date_range("2016-01-01", periods=3) + + # Copying dti is needed for ArrayManager otherwise when we set + # df.loc[0, 3] = pd.NaT below it edits dti + df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti.copy(deep=True)}) + + result = df.idxmax() + expected = Series([1, 0, 2], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 0], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + # with NaTs + df.loc[0, 3] = pd.NaT + result = df.idxmax() + expected = Series([1, 0, 2], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 1], index=[1, 2, 3]) + tm.assert_series_equal(result, expected) + + # with multi-column dt64 block + df[4] = dti[::-1] + df._consolidate_inplace() + + result = df.idxmax() + expected = Series([1, 0, 2, 0], index=[1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + result = df.idxmin() + expected = Series([0, 2, 1, 2], index=[1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "op, expected_value", + [("idxmax", [0, 4]), ("idxmin", [0, 5])], + ) + def test_idxmax_idxmin_convert_dtypes(self, op, expected_value): + # GH 40346 + df = DataFrame( + { + "ID": [100, 100, 100, 200, 200, 200], + "value": [0, 0, 0, 1, 2, 0], + }, + dtype="Int64", + ) + df = df.groupby("ID") + + result = getattr(df, op)() + expected = DataFrame( + {"value": expected_value}, + index=Index([100, 200], name="ID", dtype="Int64"), + ) + tm.assert_frame_equal(result, expected) + + def test_idxmax_dt64_multicolumn_axis1(self): + dti = date_range("2016-01-01", periods=3) + df = DataFrame({3: dti, 4: dti[::-1]}, copy=True) + df.iloc[0, 0] = pd.NaT + + df._consolidate_inplace() + + result = df.idxmax(axis=1) + expected = Series([4, 3, 3]) + tm.assert_series_equal(result, expected) + + result = df.idxmin(axis=1) + expected = Series([4, 3, 4]) + tm.assert_series_equal(result, expected) + + # ---------------------------------------------------------------------- + # Logical reductions + + @pytest.mark.parametrize("opname", ["any", "all"]) + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("bool_only", [False, True]) + def test_any_all_mixed_float(self, opname, axis, bool_only, float_string_frame): + # make sure op works on mixed-type frame + mixed = float_string_frame + mixed["_bool_"] = np.random.default_rng(2).standard_normal(len(mixed)) > 0.5 + + getattr(mixed, opname)(axis=axis, bool_only=bool_only) + + @pytest.mark.parametrize("opname", ["any", "all"]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_any_all_bool_with_na(self, opname, axis, bool_frame_with_na): + getattr(bool_frame_with_na, opname)(axis=axis, bool_only=False) + + @pytest.mark.parametrize("opname", ["any", "all"]) + def test_any_all_bool_frame(self, opname, bool_frame_with_na): + # GH#12863: numpy gives back non-boolean data for object type + # so fill NaNs to compare with pandas behavior + frame = bool_frame_with_na.fillna(True) + alternative = getattr(np, opname) + f = getattr(frame, opname) + + def skipna_wrapper(x): + nona = x.dropna().values + return alternative(nona) + + def wrapper(x): + return alternative(x.values) + + result0 = f(axis=0, skipna=False) + result1 = f(axis=1, skipna=False) + + tm.assert_series_equal(result0, frame.apply(wrapper)) + tm.assert_series_equal(result1, frame.apply(wrapper, axis=1)) + + result0 = f(axis=0) + result1 = f(axis=1) + + tm.assert_series_equal(result0, frame.apply(skipna_wrapper)) + tm.assert_series_equal( + result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False + ) + + # bad axis + with pytest.raises(ValueError, match="No axis named 2"): + f(axis=2) + + # all NA case + all_na = frame * np.nan + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname == "any": + assert not r0.any() + assert not r1.any() + else: + assert r0.all() + assert r1.all() + + def test_any_all_extra(self): + df = DataFrame( + { + "A": [True, False, False], + "B": [True, True, False], + "C": [True, True, True], + }, + index=["a", "b", "c"], + ) + result = df[["A", "B"]].any(axis=1) + expected = Series([True, True, False], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + result = df[["A", "B"]].any(axis=1, bool_only=True) + tm.assert_series_equal(result, expected) + + result = df.all(1) + expected = Series([True, False, False], index=["a", "b", "c"]) + tm.assert_series_equal(result, expected) + + result = df.all(1, bool_only=True) + tm.assert_series_equal(result, expected) + + # Axis is None + result = df.all(axis=None).item() + assert result is False + + result = df.any(axis=None).item() + assert result is True + + result = df[["C"]].all(axis=None).item() + assert result is True + + @pytest.mark.parametrize("axis", [0, 1]) + @pytest.mark.parametrize("bool_agg_func", ["any", "all"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_any_all_object_dtype(self, axis, bool_agg_func, skipna): + # GH#35450 + df = DataFrame( + data=[ + [1, np.nan, np.nan, True], + [np.nan, 2, np.nan, True], + [np.nan, np.nan, np.nan, True], + [np.nan, np.nan, "5", np.nan], + ] + ) + result = getattr(df, bool_agg_func)(axis=axis, skipna=skipna) + expected = Series([True, True, True, True]) + tm.assert_series_equal(result, expected) + + # GH#50947 deprecates this but it is not emitting a warning in some builds. + @pytest.mark.filterwarnings( + "ignore:'any' with datetime64 dtypes is deprecated.*:FutureWarning" + ) + def test_any_datetime(self): + # GH 23070 + float_data = [1, np.nan, 3, np.nan] + datetime_data = [ + Timestamp("1960-02-15"), + Timestamp("1960-02-16"), + pd.NaT, + pd.NaT, + ] + df = DataFrame({"A": float_data, "B": datetime_data}) + + result = df.any(axis=1) + + expected = Series([True, True, True, False]) + tm.assert_series_equal(result, expected) + + def test_any_all_bool_only(self): + # GH 25101 + df = DataFrame( + {"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]} + ) + + result = df.all(bool_only=True) + expected = Series(dtype=np.bool_, index=[]) + tm.assert_series_equal(result, expected) + + df = DataFrame( + { + "col1": [1, 2, 3], + "col2": [4, 5, 6], + "col3": [None, None, None], + "col4": [False, False, True], + } + ) + + result = df.all(bool_only=True) + expected = Series({"col4": False}) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "func, data, expected", + [ + (np.any, {}, False), + (np.all, {}, True), + (np.any, {"A": []}, False), + (np.all, {"A": []}, True), + (np.any, {"A": [False, False]}, False), + (np.all, {"A": [False, False]}, False), + (np.any, {"A": [True, False]}, True), + (np.all, {"A": [True, False]}, False), + (np.any, {"A": [True, True]}, True), + (np.all, {"A": [True, True]}, True), + (np.any, {"A": [False], "B": [False]}, False), + (np.all, {"A": [False], "B": [False]}, False), + (np.any, {"A": [False, False], "B": [False, True]}, True), + (np.all, {"A": [False, False], "B": [False, True]}, False), + # other types + (np.all, {"A": Series([0.0, 1.0], dtype="float")}, False), + (np.any, {"A": Series([0.0, 1.0], dtype="float")}, True), + (np.all, {"A": Series([0, 1], dtype=int)}, False), + (np.any, {"A": Series([0, 1], dtype=int)}, True), + pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns]")}, False), + pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, False), + pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns]")}, True), + pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, True), + pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns]")}, True), + pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True), + pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns]")}, True), + pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True), + pytest.param(np.all, {"A": Series([0, 1], dtype="m8[ns]")}, False), + pytest.param(np.any, {"A": Series([0, 1], dtype="m8[ns]")}, True), + pytest.param(np.all, {"A": Series([1, 2], dtype="m8[ns]")}, True), + pytest.param(np.any, {"A": Series([1, 2], dtype="m8[ns]")}, True), + # np.all on Categorical raises, so the reduction drops the + # column, so all is being done on an empty Series, so is True + (np.all, {"A": Series([0, 1], dtype="category")}, True), + (np.any, {"A": Series([0, 1], dtype="category")}, False), + (np.all, {"A": Series([1, 2], dtype="category")}, True), + (np.any, {"A": Series([1, 2], dtype="category")}, False), + # Mix GH#21484 + pytest.param( + np.all, + { + "A": Series([10, 20], dtype="M8[ns]"), + "B": Series([10, 20], dtype="m8[ns]"), + }, + True, + ), + ], + ) + def test_any_all_np_func(self, func, data, expected): + # GH 19976 + data = DataFrame(data) + + if any(isinstance(x, CategoricalDtype) for x in data.dtypes): + with pytest.raises( + TypeError, match="dtype category does not support reduction" + ): + func(data) + + # method version + with pytest.raises( + TypeError, match="dtype category does not support reduction" + ): + getattr(DataFrame(data), func.__name__)(axis=None) + else: + msg = "'(any|all)' with datetime64 dtypes is deprecated" + if data.dtypes.apply(lambda x: x.kind == "M").any(): + warn = FutureWarning + else: + warn = None + + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + # GH#34479 + result = func(data) + assert isinstance(result, np.bool_) + assert result.item() is expected + + # method version + with tm.assert_produces_warning(warn, match=msg): + # GH#34479 + result = getattr(DataFrame(data), func.__name__)(axis=None) + assert isinstance(result, np.bool_) + assert result.item() is expected + + def test_any_all_object(self): + # GH 19976 + result = np.all(DataFrame(columns=["a", "b"])).item() + assert result is True + + result = np.any(DataFrame(columns=["a", "b"])).item() + assert result is False + + def test_any_all_object_bool_only(self): + df = DataFrame({"A": ["foo", 2], "B": [True, False]}).astype(object) + df._consolidate_inplace() + df["C"] = Series([True, True]) + + # Categorical of bools is _not_ considered booly + df["D"] = df["C"].astype("category") + + # The underlying bug is in DataFrame._get_bool_data, so we check + # that while we're here + res = df._get_bool_data() + expected = df[["C"]] + tm.assert_frame_equal(res, expected) + + res = df.all(bool_only=True, axis=0) + expected = Series([True], index=["C"]) + tm.assert_series_equal(res, expected) + + # operating on a subset of columns should not produce a _larger_ Series + res = df[["B", "C"]].all(bool_only=True, axis=0) + tm.assert_series_equal(res, expected) + + assert df.all(bool_only=True, axis=None) + + res = df.any(bool_only=True, axis=0) + expected = Series([True], index=["C"]) + tm.assert_series_equal(res, expected) + + # operating on a subset of columns should not produce a _larger_ Series + res = df[["C"]].any(bool_only=True, axis=0) + tm.assert_series_equal(res, expected) + + assert df.any(bool_only=True, axis=None) + + # --------------------------------------------------------------------- + # Unsorted + + def test_series_broadcasting(self): + # smoke test for numpy warnings + # GH 16378, GH 16306 + df = DataFrame([1.0, 1.0, 1.0]) + df_nan = DataFrame({"A": [np.nan, 2.0, np.nan]}) + s = Series([1, 1, 1]) + s_nan = Series([np.nan, np.nan, 1]) + + with tm.assert_produces_warning(None): + df_nan.clip(lower=s, axis=0) + for op in ["lt", "le", "gt", "ge", "eq", "ne"]: + getattr(df, op)(s_nan, axis=0) + + +class TestDataFrameReductions: + def test_min_max_dt64_with_NaT(self): + # Both NaT and Timestamp are in DataFrame. + df = DataFrame({"foo": [pd.NaT, pd.NaT, Timestamp("2012-05-01")]}) + + res = df.min() + exp = Series([Timestamp("2012-05-01")], index=["foo"]) + tm.assert_series_equal(res, exp) + + res = df.max() + exp = Series([Timestamp("2012-05-01")], index=["foo"]) + tm.assert_series_equal(res, exp) + + # GH12941, only NaTs are in DataFrame. + df = DataFrame({"foo": [pd.NaT, pd.NaT]}) + + res = df.min() + exp = Series([pd.NaT], index=["foo"]) + tm.assert_series_equal(res, exp) + + res = df.max() + exp = Series([pd.NaT], index=["foo"]) + tm.assert_series_equal(res, exp) + + def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture): + # GH#36907 + tz = tz_naive_fixture + if isinstance(tz, tzlocal) and is_platform_windows(): + pytest.skip( + "GH#37659 OSError raised within tzlocal bc Windows " + "chokes in times before 1970-01-01" + ) + + df = DataFrame( + { + "a": [ + Timestamp("2020-01-01 08:00:00", tz=tz), + Timestamp("1920-02-01 09:00:00", tz=tz), + ], + "b": [Timestamp("2020-02-01 08:00:00", tz=tz), pd.NaT], + } + ) + res = df.min(axis=1, skipna=False) + expected = Series([df.loc[0, "a"], pd.NaT]) + assert expected.dtype == df["a"].dtype + + tm.assert_series_equal(res, expected) + + res = df.max(axis=1, skipna=False) + expected = Series([df.loc[0, "b"], pd.NaT]) + assert expected.dtype == df["a"].dtype + + tm.assert_series_equal(res, expected) + + def test_min_max_dt64_api_consistency_with_NaT(self): + # Calling the following sum functions returned an error for dataframes but + # returned NaT for series. These tests check that the API is consistent in + # min/max calls on empty Series/DataFrames. See GH:33704 for more + # information + df = DataFrame({"x": to_datetime([])}) + expected_dt_series = Series(to_datetime([])) + # check axis 0 + assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT) + assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT) + + # check axis 1 + tm.assert_series_equal(df.min(axis=1), expected_dt_series) + tm.assert_series_equal(df.max(axis=1), expected_dt_series) + + def test_min_max_dt64_api_consistency_empty_df(self): + # check DataFrame/Series api consistency when calling min/max on an empty + # DataFrame/Series. + df = DataFrame({"x": []}) + expected_float_series = Series([], dtype=float) + # check axis 0 + assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min()) + assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max()) + # check axis 1 + tm.assert_series_equal(df.min(axis=1), expected_float_series) + tm.assert_series_equal(df.min(axis=1), expected_float_series) + + @pytest.mark.parametrize( + "initial", + ["2018-10-08 13:36:45+00:00", "2018-10-08 13:36:45+03:00"], # Non-UTC timezone + ) + @pytest.mark.parametrize("method", ["min", "max"]) + def test_preserve_timezone(self, initial: str, method): + # GH 28552 + initial_dt = to_datetime(initial) + expected = Series([initial_dt]) + df = DataFrame([expected]) + result = getattr(df, method)(axis=1) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_minmax_tzaware_skipna_axis_1(self, method, skipna): + # GH#51242 + val = to_datetime("1900-01-01", utc=True) + df = DataFrame( + {"a": Series([pd.NaT, pd.NaT, val]), "b": Series([pd.NaT, val, val])} + ) + op = getattr(df, method) + result = op(axis=1, skipna=skipna) + if skipna: + expected = Series([pd.NaT, val, val]) + else: + expected = Series([pd.NaT, pd.NaT, val]) + tm.assert_series_equal(result, expected) + + def test_frame_any_with_timedelta(self): + # GH#17667 + df = DataFrame( + { + "a": Series([0, 0]), + "t": Series([to_timedelta(0, "s"), to_timedelta(1, "ms")]), + } + ) + + result = df.any(axis=0) + expected = Series(data=[False, True], index=["a", "t"]) + tm.assert_series_equal(result, expected) + + result = df.any(axis=1) + expected = Series(data=[False, True]) + tm.assert_series_equal(result, expected) + + def test_reductions_skipna_none_raises( + self, request, frame_or_series, all_reductions + ): + if all_reductions == "count": + request.node.add_marker( + pytest.mark.xfail(reason="Count does not accept skipna") + ) + obj = frame_or_series([1, 2, 3]) + msg = 'For argument "skipna" expected type bool, received type NoneType.' + with pytest.raises(ValueError, match=msg): + getattr(obj, all_reductions)(skipna=None) + + @td.skip_array_manager_invalid_test + def test_reduction_timestamp_smallest_unit(self): + # GH#52524 + df = DataFrame( + { + "a": Series([Timestamp("2019-12-31")], dtype="datetime64[s]"), + "b": Series( + [Timestamp("2019-12-31 00:00:00.123")], dtype="datetime64[ms]" + ), + } + ) + result = df.max() + expected = Series( + [Timestamp("2019-12-31"), Timestamp("2019-12-31 00:00:00.123")], + dtype="datetime64[ms]", + index=["a", "b"], + ) + tm.assert_series_equal(result, expected) + + @td.skip_array_manager_not_yet_implemented + def test_reduction_timedelta_smallest_unit(self): + # GH#52524 + df = DataFrame( + { + "a": Series([pd.Timedelta("1 days")], dtype="timedelta64[s]"), + "b": Series([pd.Timedelta("1 days")], dtype="timedelta64[ms]"), + } + ) + result = df.max() + expected = Series( + [pd.Timedelta("1 days"), pd.Timedelta("1 days")], + dtype="timedelta64[ms]", + index=["a", "b"], + ) + tm.assert_series_equal(result, expected) + + +class TestNuisanceColumns: + @pytest.mark.parametrize("method", ["any", "all"]) + def test_any_all_categorical_dtype_nuisance_column(self, method): + # GH#36076 DataFrame should match Series behavior + ser = Series([0, 1], dtype="category", name="A") + df = ser.to_frame() + + # Double-check the Series behavior is to raise + with pytest.raises(TypeError, match="does not support reduction"): + getattr(ser, method)() + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(np, method)(ser) + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(df, method)(bool_only=False) + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(df, method)(bool_only=None) + + with pytest.raises(TypeError, match="does not support reduction"): + getattr(np, method)(df, axis=0) + + def test_median_categorical_dtype_nuisance_column(self): + # GH#21020 DataFrame.median should match Series.median + df = DataFrame({"A": Categorical([1, 2, 2, 2, 3])}) + ser = df["A"] + + # Double-check the Series behavior is to raise + with pytest.raises(TypeError, match="does not support reduction"): + ser.median() + + with pytest.raises(TypeError, match="does not support reduction"): + df.median(numeric_only=False) + + with pytest.raises(TypeError, match="does not support reduction"): + df.median() + + # same thing, but with an additional non-categorical column + df["B"] = df["A"].astype(int) + + with pytest.raises(TypeError, match="does not support reduction"): + df.median(numeric_only=False) + + with pytest.raises(TypeError, match="does not support reduction"): + df.median() + + # TODO: np.median(df, axis=0) gives np.array([2.0, 2.0]) instead + # of expected.values + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_min_max_categorical_dtype_non_ordered_nuisance_column(self, method): + # GH#28949 DataFrame.min should behave like Series.min + cat = Categorical(["a", "b", "c", "b"], ordered=False) + ser = Series(cat) + df = ser.to_frame("A") + + # Double-check the Series behavior + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(ser, method)() + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(np, method)(ser) + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(df, method)(numeric_only=False) + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(df, method)() + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(np, method)(df, axis=0) + + # same thing, but with an additional non-categorical column + df["B"] = df["A"].astype(object) + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(df, method)() + + with pytest.raises(TypeError, match="is not ordered for operation"): + getattr(np, method)(df, axis=0) + + +class TestEmptyDataFrameReductions: + @pytest.mark.parametrize( + "opname, dtype, exp_value, exp_dtype", + [ + ("sum", np.int8, 0, np.int64), + ("prod", np.int8, 1, np.int_), + ("sum", np.int64, 0, np.int64), + ("prod", np.int64, 1, np.int64), + ("sum", np.uint8, 0, np.uint64), + ("prod", np.uint8, 1, np.uint), + ("sum", np.uint64, 0, np.uint64), + ("prod", np.uint64, 1, np.uint64), + ("sum", np.float32, 0, np.float32), + ("prod", np.float32, 1, np.float32), + ("sum", np.float64, 0, np.float64), + ], + ) + def test_df_empty_min_count_0(self, opname, dtype, exp_value, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=0) + + expected = Series([exp_value, exp_value], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "opname, dtype, exp_dtype", + [ + ("sum", np.int8, np.float64), + ("prod", np.int8, np.float64), + ("sum", np.int64, np.float64), + ("prod", np.int64, np.float64), + ("sum", np.uint8, np.float64), + ("prod", np.uint8, np.float64), + ("sum", np.uint64, np.float64), + ("prod", np.uint64, np.float64), + ("sum", np.float32, np.float32), + ("prod", np.float32, np.float32), + ("sum", np.float64, np.float64), + ], + ) + def test_df_empty_min_count_1(self, opname, dtype, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=1) + + expected = Series([np.nan, np.nan], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "opname, dtype, exp_value, exp_dtype", + [ + ("sum", "Int8", 0, ("Int32" if is_windows_np2_or_is32 else "Int64")), + ("prod", "Int8", 1, ("Int32" if is_windows_np2_or_is32 else "Int64")), + ("prod", "Int8", 1, ("Int32" if is_windows_np2_or_is32 else "Int64")), + ("sum", "Int64", 0, "Int64"), + ("prod", "Int64", 1, "Int64"), + ("sum", "UInt8", 0, ("UInt32" if is_windows_np2_or_is32 else "UInt64")), + ("prod", "UInt8", 1, ("UInt32" if is_windows_np2_or_is32 else "UInt64")), + ("sum", "UInt64", 0, "UInt64"), + ("prod", "UInt64", 1, "UInt64"), + ("sum", "Float32", 0, "Float32"), + ("prod", "Float32", 1, "Float32"), + ("sum", "Float64", 0, "Float64"), + ], + ) + def test_df_empty_nullable_min_count_0(self, opname, dtype, exp_value, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=0) + + expected = Series([exp_value, exp_value], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + # TODO: why does min_count=1 impact the resulting Windows dtype + # differently than min_count=0? + @pytest.mark.parametrize( + "opname, dtype, exp_dtype", + [ + ("sum", "Int8", ("Int32" if is_windows_or_is32 else "Int64")), + ("prod", "Int8", ("Int32" if is_windows_or_is32 else "Int64")), + ("sum", "Int64", "Int64"), + ("prod", "Int64", "Int64"), + ("sum", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")), + ("prod", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")), + ("sum", "UInt64", "UInt64"), + ("prod", "UInt64", "UInt64"), + ("sum", "Float32", "Float32"), + ("prod", "Float32", "Float32"), + ("sum", "Float64", "Float64"), + ], + ) + def test_df_empty_nullable_min_count_1(self, opname, dtype, exp_dtype): + df = DataFrame({0: [], 1: []}, dtype=dtype) + result = getattr(df, opname)(min_count=1) + + expected = Series([pd.NA, pd.NA], dtype=exp_dtype) + tm.assert_series_equal(result, expected) + + +def test_sum_timedelta64_skipna_false(using_array_manager, request): + # GH#17235 + if using_array_manager: + mark = pytest.mark.xfail( + reason="Incorrect type inference on NaT in reduction result" + ) + request.node.add_marker(mark) + + arr = np.arange(8).astype(np.int64).view("m8[s]").reshape(4, 2) + arr[-1, -1] = "Nat" + + df = DataFrame(arr) + assert (df.dtypes == arr.dtype).all() + + result = df.sum(skipna=False) + expected = Series([pd.Timedelta(seconds=12), pd.NaT], dtype="m8[s]") + tm.assert_series_equal(result, expected) + + result = df.sum(axis=0, skipna=False) + tm.assert_series_equal(result, expected) + + result = df.sum(axis=1, skipna=False) + expected = Series( + [ + pd.Timedelta(seconds=1), + pd.Timedelta(seconds=5), + pd.Timedelta(seconds=9), + pd.NaT, + ], + dtype="m8[s]", + ) + tm.assert_series_equal(result, expected) + + +def test_mixed_frame_with_integer_sum(): + # https://github.com/pandas-dev/pandas/issues/34520 + df = DataFrame([["a", 1]], columns=list("ab")) + df = df.astype({"b": "Int64"}) + result = df.sum() + expected = Series(["a", 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("numeric_only", [True, False, None]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_minmax_extensionarray(method, numeric_only): + # https://github.com/pandas-dev/pandas/issues/32651 + int64_info = np.iinfo("int64") + ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype()) + df = DataFrame({"Int64": ser}) + result = getattr(df, method)(numeric_only=numeric_only) + expected = Series( + [getattr(int64_info, method)], + dtype="Int64", + index=Index(["Int64"], dtype="object"), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ts_value", [Timestamp("2000-01-01"), pd.NaT]) +def test_frame_mixed_numeric_object_with_timestamp(ts_value): + # GH 13912 + df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]}) + with pytest.raises(TypeError, match="does not support reduction"): + df.sum() + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"]) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False) + + +@pytest.mark.parametrize("method", ["min", "max", "mean", "median", "skew", "kurt"]) +@pytest.mark.parametrize("numeric_only", [True, False]) +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +def test_reduction_axis_none_returns_scalar(method, numeric_only, dtype): + # GH#21597 As of 2.0, axis=None reduces over all axes. + + df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), dtype=dtype) + + result = getattr(df, method)(axis=None, numeric_only=numeric_only) + np_arr = df.to_numpy(dtype=np.float64) + if method in {"skew", "kurt"}: + comp_mod = pytest.importorskip("scipy.stats") + if method == "kurt": + method = "kurtosis" + expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None) + tm.assert_almost_equal(result, expected) + else: + expected = getattr(np, method)(np_arr, axis=None) + assert result == expected + + +@pytest.mark.parametrize( + "kernel", + [ + "corr", + "corrwith", + "cov", + "idxmax", + "idxmin", + "kurt", + "max", + "mean", + "median", + "min", + "prod", + "quantile", + "sem", + "skew", + "std", + "sum", + "var", + ], +) +def test_fails_on_non_numeric(kernel): + # GH#46852 + df = DataFrame({"a": [1, 2, 3], "b": object}) + args = (df,) if kernel == "corrwith" else () + msg = "|".join( + [ + "not allowed for this dtype", + "argument must be a string or a number", + "not supported between instances of", + "unsupported operand type", + "argument must be a string or a real number", + ] + ) + if kernel == "median": + # slightly different message on different builds + msg1 = ( + r"Cannot convert \[\[ " + r"\]\] to numeric" + ) + msg2 = ( + r"Cannot convert \[ " + r"\] to numeric" + ) + msg = "|".join([msg1, msg2]) + with pytest.raises(TypeError, match=msg): + getattr(df, kernel)(*args) + + +@pytest.mark.parametrize( + "method", + [ + "all", + "any", + "count", + "idxmax", + "idxmin", + "kurt", + "kurtosis", + "max", + "mean", + "median", + "min", + "nunique", + "prod", + "product", + "sem", + "skew", + "std", + "sum", + "var", + ], +) +@pytest.mark.parametrize("min_count", [0, 2]) +def test_numeric_ea_axis_1(method, skipna, min_count, any_numeric_ea_dtype): + # GH 54341 + df = DataFrame( + { + "a": Series([0, 1, 2, 3], dtype=any_numeric_ea_dtype), + "b": Series([0, 1, pd.NA, 3], dtype=any_numeric_ea_dtype), + }, + ) + expected_df = DataFrame( + { + "a": [0.0, 1.0, 2.0, 3.0], + "b": [0.0, 1.0, np.nan, 3.0], + }, + ) + if method in ("count", "nunique"): + expected_dtype = "int64" + elif method in ("all", "any"): + expected_dtype = "boolean" + elif method in ( + "kurt", + "kurtosis", + "mean", + "median", + "sem", + "skew", + "std", + "var", + ) and not any_numeric_ea_dtype.startswith("Float"): + expected_dtype = "Float64" + else: + expected_dtype = any_numeric_ea_dtype + + kwargs = {} + if method not in ("count", "nunique", "quantile"): + kwargs["skipna"] = skipna + if method in ("prod", "product", "sum"): + kwargs["min_count"] = min_count + + warn = None + msg = None + if not skipna and method in ("idxmax", "idxmin"): + warn = FutureWarning + msg = f"The behavior of DataFrame.{method} with all-NA values" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(df, method)(axis=1, **kwargs) + with tm.assert_produces_warning(warn, match=msg): + expected = getattr(expected_df, method)(axis=1, **kwargs) + if method not in ("idxmax", "idxmin"): + expected = expected.astype(expected_dtype) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_repr_info.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_repr_info.py new file mode 100644 index 00000000..64d516e4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_repr_info.py @@ -0,0 +1,468 @@ +from datetime import ( + datetime, + timedelta, +) +from io import StringIO + +import numpy as np +import pytest + +from pandas import ( + NA, + Categorical, + DataFrame, + MultiIndex, + NaT, + PeriodIndex, + Series, + Timestamp, + date_range, + option_context, + period_range, +) +import pandas._testing as tm + +import pandas.io.formats.format as fmt + + +class TestDataFrameReprInfoEtc: + def test_repr_bytes_61_lines(self): + # GH#12857 + lets = list("ACDEFGHIJKLMNOP") + slen = 50 + nseqs = 1000 + words = [ + [np.random.default_rng(2).choice(lets) for x in range(slen)] + for _ in range(nseqs) + ] + df = DataFrame(words).astype("U1") + assert (df.dtypes == object).all() + + # smoke tests; at one point this raised with 61 but not 60 + repr(df) + repr(df.iloc[:60, :]) + repr(df.iloc[:61, :]) + + def test_repr_unicode_level_names(self, frame_or_series): + index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"]) + + obj = DataFrame(np.random.default_rng(2).standard_normal((2, 4)), index=index) + obj = tm.get_obj(obj, frame_or_series) + repr(obj) + + def test_assign_index_sequences(self): + # GH#2200 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}).set_index( + ["a", "b"] + ) + index = list(df.index) + index[0] = ("faz", "boo") + df.index = index + repr(df) + + # this travels an improper code path + index[0] = ["faz", "boo"] + df.index = index + repr(df) + + def test_repr_with_mi_nat(self): + df = DataFrame({"X": [1, 2]}, index=[[NaT, Timestamp("20130101")], ["a", "b"]]) + result = repr(df) + expected = " X\nNaT a 1\n2013-01-01 b 2" + assert result == expected + + def test_repr_with_different_nulls(self): + # GH45263 + df = DataFrame([1, 2, 3, 4], [True, None, np.nan, NaT]) + result = repr(df) + expected = """ 0 +True 1 +None 2 +NaN 3 +NaT 4""" + assert result == expected + + def test_repr_with_different_nulls_cols(self): + # GH45263 + d = {np.nan: [1, 2], None: [3, 4], NaT: [6, 7], True: [8, 9]} + df = DataFrame(data=d) + result = repr(df) + expected = """ NaN None NaT True +0 1 3 6 8 +1 2 4 7 9""" + assert result == expected + + def test_multiindex_na_repr(self): + # only an issue with long columns + df3 = DataFrame( + { + "A" * 30: {("A", "A0006000", "nuit"): "A0006000"}, + "B" * 30: {("A", "A0006000", "nuit"): np.nan}, + "C" * 30: {("A", "A0006000", "nuit"): np.nan}, + "D" * 30: {("A", "A0006000", "nuit"): np.nan}, + "E" * 30: {("A", "A0006000", "nuit"): "A"}, + "F" * 30: {("A", "A0006000", "nuit"): np.nan}, + } + ) + + idf = df3.set_index(["A" * 30, "C" * 30]) + repr(idf) + + def test_repr_name_coincide(self): + index = MultiIndex.from_tuples( + [("a", 0, "foo"), ("b", 1, "bar")], names=["a", "b", "c"] + ) + + df = DataFrame({"value": [0, 1]}, index=index) + + lines = repr(df).split("\n") + assert lines[2].startswith("a 0 foo") + + def test_repr_to_string( + self, + multiindex_year_month_day_dataframe_random_data, + multiindex_dataframe_random_data, + ): + ymd = multiindex_year_month_day_dataframe_random_data + frame = multiindex_dataframe_random_data + + repr(frame) + repr(ymd) + repr(frame.T) + repr(ymd.T) + + buf = StringIO() + frame.to_string(buf=buf) + ymd.to_string(buf=buf) + frame.T.to_string(buf=buf) + ymd.T.to_string(buf=buf) + + def test_repr_empty(self): + # empty + repr(DataFrame()) + + # empty with index + frame = DataFrame(index=np.arange(1000)) + repr(frame) + + def test_repr_mixed(self, float_string_frame): + buf = StringIO() + + # mixed + repr(float_string_frame) + float_string_frame.info(verbose=False, buf=buf) + + @pytest.mark.slow + def test_repr_mixed_big(self): + # big mixed + biggie = DataFrame( + { + "A": np.random.default_rng(2).standard_normal(200), + "B": tm.makeStringIndex(200), + }, + index=range(200), + ) + biggie.loc[:20, "A"] = np.nan + biggie.loc[:20, "B"] = np.nan + + repr(biggie) + + def test_repr(self, float_frame): + buf = StringIO() + + # small one + repr(float_frame) + float_frame.info(verbose=False, buf=buf) + + # even smaller + float_frame.reindex(columns=["A"]).info(verbose=False, buf=buf) + float_frame.reindex(columns=["A", "B"]).info(verbose=False, buf=buf) + + # exhausting cases in DataFrame.info + + # columns but no index + no_index = DataFrame(columns=[0, 1, 3]) + repr(no_index) + + # no columns or index + DataFrame().info(buf=buf) + + df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"]) + assert "\t" not in repr(df) + assert "\r" not in repr(df) + assert "a\n" not in repr(df) + + def test_repr_dimensions(self): + df = DataFrame([[1, 2], [3, 4]]) + with option_context("display.show_dimensions", True): + assert "2 rows x 2 columns" in repr(df) + + with option_context("display.show_dimensions", False): + assert "2 rows x 2 columns" not in repr(df) + + with option_context("display.show_dimensions", "truncate"): + assert "2 rows x 2 columns" not in repr(df) + + @pytest.mark.slow + def test_repr_big(self): + # big one + biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200)) + repr(biggie) + + def test_repr_unsortable(self, float_frame): + # columns are not sortable + + unsortable = DataFrame( + { + "foo": [1] * 50, + datetime.today(): [1] * 50, + "bar": ["bar"] * 50, + datetime.today() + timedelta(1): ["bar"] * 50, + }, + index=np.arange(50), + ) + repr(unsortable) + + fmt.set_option("display.precision", 3) + repr(float_frame) + + fmt.set_option("display.max_rows", 10, "display.max_columns", 2) + repr(float_frame) + + fmt.set_option("display.max_rows", 1000, "display.max_columns", 1000) + repr(float_frame) + + tm.reset_display_options() + + def test_repr_unicode(self): + uval = "\u03c3\u03c3\u03c3\u03c3" + + df = DataFrame({"A": [uval, uval]}) + + result = repr(df) + ex_top = " A" + assert result.split("\n")[0].rstrip() == ex_top + + df = DataFrame({"A": [uval, uval]}) + result = repr(df) + assert result.split("\n")[0].rstrip() == ex_top + + def test_unicode_string_with_unicode(self): + df = DataFrame({"A": ["\u05d0"]}) + str(df) + + def test_repr_unicode_columns(self): + df = DataFrame({"\u05d0": [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]}) + repr(df.columns) # should not raise UnicodeDecodeError + + def test_str_to_bytes_raises(self): + # GH 26447 + df = DataFrame({"A": ["abc"]}) + msg = "^'str' object cannot be interpreted as an integer$" + with pytest.raises(TypeError, match=msg): + bytes(df) + + def test_very_wide_info_repr(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 20)), + columns=np.array(["a" * 10] * 20, dtype=object), + ) + repr(df) + + def test_repr_column_name_unicode_truncation_bug(self): + # #1906 + df = DataFrame( + { + "Id": [7117434], + "StringCol": ( + "Is it possible to modify drop plot code" + "so that the output graph is displayed " + "in iphone simulator, Is it possible to " + "modify drop plot code so that the " + "output graph is \xe2\x80\xa8displayed " + "in iphone simulator.Now we are adding " + "the CSV file externally. I want to Call " + "the File through the code.." + ), + } + ) + + with option_context("display.max_columns", 20): + assert "StringCol" in repr(df) + + def test_latex_repr(self): + pytest.importorskip("jinja2") + expected = r"""\begin{tabular}{llll} +\toprule + & 0 & 1 & 2 \\ +\midrule +0 & $\alpha$ & b & c \\ +1 & 1 & 2 & 3 \\ +\bottomrule +\end{tabular} +""" + with option_context( + "styler.format.escape", None, "styler.render.repr", "latex" + ): + df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]]) + result = df._repr_latex_() + assert result == expected + + # GH 12182 + assert df._repr_latex_() is None + + def test_repr_categorical_dates_periods(self): + # normal DataFrame + dt = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern") + p = period_range("2011-01", freq="M", periods=5) + df = DataFrame({"dt": dt, "p": p}) + exp = """ dt p +0 2011-01-01 09:00:00-05:00 2011-01 +1 2011-01-01 10:00:00-05:00 2011-02 +2 2011-01-01 11:00:00-05:00 2011-03 +3 2011-01-01 12:00:00-05:00 2011-04 +4 2011-01-01 13:00:00-05:00 2011-05""" + + assert repr(df) == exp + + df2 = DataFrame({"dt": Categorical(dt), "p": Categorical(p)}) + assert repr(df2) == exp + + @pytest.mark.parametrize("arg", [np.datetime64, np.timedelta64]) + @pytest.mark.parametrize( + "box, expected", + [[Series, "0 NaT\ndtype: object"], [DataFrame, " 0\n0 NaT"]], + ) + def test_repr_np_nat_with_object(self, arg, box, expected): + # GH 25445 + result = repr(box([arg("NaT")], dtype=object)) + assert result == expected + + def test_frame_datetime64_pre1900_repr(self): + df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")}) + # it works! + repr(df) + + def test_frame_to_string_with_periodindex(self): + index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") + frame = DataFrame(np.random.default_rng(2).standard_normal((3, 4)), index=index) + + # it works! + frame.to_string() + + def test_to_string_ea_na_in_multiindex(self): + # GH#47986 + df = DataFrame( + {"a": [1, 2]}, + index=MultiIndex.from_arrays([Series([NA, 1], dtype="Int64")]), + ) + + result = df.to_string() + expected = """ a + 1 +1 2""" + assert result == expected + + def test_datetime64tz_slice_non_truncate(self): + # GH 30263 + df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")}) + expected = repr(df) + df = df.iloc[:, :5] + result = repr(df) + assert result == expected + + def test_to_records_no_typeerror_in_repr(self): + # GH 48526 + df = DataFrame([["a", "b"], ["c", "d"], ["e", "f"]], columns=["left", "right"]) + df["record"] = df[["left", "right"]].to_records() + expected = """ left right record +0 a b [0, a, b] +1 c d [1, c, d] +2 e f [2, e, f]""" + result = repr(df) + assert result == expected + + def test_to_records_with_na_record_value(self): + # GH 48526 + df = DataFrame( + [["a", np.nan], ["c", "d"], ["e", "f"]], columns=["left", "right"] + ) + df["record"] = df[["left", "right"]].to_records() + expected = """ left right record +0 a NaN [0, a, nan] +1 c d [1, c, d] +2 e f [2, e, f]""" + result = repr(df) + assert result == expected + + def test_to_records_with_na_record(self): + # GH 48526 + df = DataFrame( + [["a", "b"], [np.nan, np.nan], ["e", "f"]], columns=[np.nan, "right"] + ) + df["record"] = df[[np.nan, "right"]].to_records() + expected = """ NaN right record +0 a b [0, a, b] +1 NaN NaN [1, nan, nan] +2 e f [2, e, f]""" + result = repr(df) + assert result == expected + + def test_to_records_with_inf_as_na_record(self): + # GH 48526 + expected = """ NaN inf record +0 NaN b [0, inf, b] +1 NaN NaN [1, nan, nan] +2 e f [2, e, f]""" + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with option_context("use_inf_as_na", True): + df = DataFrame( + [[np.inf, "b"], [np.nan, np.nan], ["e", "f"]], + columns=[np.nan, np.inf], + ) + df["record"] = df[[np.nan, np.inf]].to_records() + result = repr(df) + assert result == expected + + def test_to_records_with_inf_record(self): + # GH 48526 + expected = """ NaN inf record +0 inf b [0, inf, b] +1 NaN NaN [1, nan, nan] +2 e f [2, e, f]""" + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with option_context("use_inf_as_na", False): + df = DataFrame( + [[np.inf, "b"], [np.nan, np.nan], ["e", "f"]], + columns=[np.nan, np.inf], + ) + df["record"] = df[[np.nan, np.inf]].to_records() + result = repr(df) + assert result == expected + + def test_masked_ea_with_formatter(self): + # GH#39336 + df = DataFrame( + { + "a": Series([0.123456789, 1.123456789], dtype="Float64"), + "b": Series([1, 2], dtype="Int64"), + } + ) + result = df.to_string(formatters=["{:.2f}".format, "{:.2f}".format]) + expected = """ a b +0 0.12 1.00 +1 1.12 2.00""" + assert result == expected + + def test_repr_ea_columns(self, any_string_dtype): + # GH#54797 + pytest.importorskip("pyarrow") + df = DataFrame({"long_column_name": [1, 2, 3], "col2": [4, 5, 6]}) + df.columns = df.columns.astype(any_string_dtype) + expected = """ long_column_name col2 +0 1 4 +1 2 5 +2 3 6""" + assert repr(df) == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_stack_unstack.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_stack_unstack.py new file mode 100644 index 00000000..dbd1f96f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_stack_unstack.py @@ -0,0 +1,2526 @@ +from datetime import datetime +from io import StringIO +import itertools +import re + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Period, + Series, + Timedelta, + date_range, +) +import pandas._testing as tm +from pandas.core.reshape import reshape as reshape_lib + + +@pytest.fixture(params=[True, False]) +def future_stack(request): + return request.param + + +class TestDataFrameReshape: + def test_stack_unstack(self, float_frame, future_stack): + df = float_frame.copy() + df[:] = np.arange(np.prod(df.shape)).reshape(df.shape) + + stacked = df.stack(future_stack=future_stack) + stacked_df = DataFrame({"foo": stacked, "bar": stacked}) + + unstacked = stacked.unstack() + unstacked_df = stacked_df.unstack() + + tm.assert_frame_equal(unstacked, df) + tm.assert_frame_equal(unstacked_df["bar"], df) + + unstacked_cols = stacked.unstack(0) + unstacked_cols_df = stacked_df.unstack(0) + tm.assert_frame_equal(unstacked_cols.T, df) + tm.assert_frame_equal(unstacked_cols_df["bar"].T, df) + + def test_stack_mixed_level(self, future_stack): + # GH 18310 + levels = [range(3), [3, "a", "b"], [1, 2]] + + # flat columns: + df = DataFrame(1, index=levels[0], columns=levels[1]) + result = df.stack(future_stack=future_stack) + expected = Series(1, index=MultiIndex.from_product(levels[:2])) + tm.assert_series_equal(result, expected) + + # MultiIndex columns: + df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:])) + result = df.stack(1, future_stack=future_stack) + expected = DataFrame( + 1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1] + ) + tm.assert_frame_equal(result, expected) + + # as above, but used labels in level are actually of homogeneous type + result = df[["a", "b"]].stack(1, future_stack=future_stack) + expected = expected[["a", "b"]] + tm.assert_frame_equal(result, expected) + + def test_unstack_not_consolidated(self, using_array_manager): + # Gh#34708 + df = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]}) + df2 = df[["x"]] + df2["y"] = df["y"] + if not using_array_manager: + assert len(df2._mgr.blocks) == 2 + + res = df2.unstack() + expected = df.unstack() + tm.assert_series_equal(res, expected) + + def test_unstack_fill(self, future_stack): + # GH #9746: fill_value keyword argument for Series + # and DataFrame unstack + + # From a series + data = Series([1, 2, 4, 5], dtype=np.int16) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack(fill_value=-1) + expected = DataFrame( + {"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16 + ) + tm.assert_frame_equal(result, expected) + + # From a series with incorrect data type for fill_value + result = data.unstack(fill_value=0.5) + expected = DataFrame( + {"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float + ) + tm.assert_frame_equal(result, expected) + + # GH #13971: fill_value when unstacking multiple levels: + df = DataFrame( + {"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]} + ).set_index(["x", "y", "z"]) + unstacked = df.unstack(["x", "y"], fill_value=0) + key = ("w", "b", "j") + expected = unstacked[key] + result = Series([0, 0, 2], index=unstacked.index, name=key) + tm.assert_series_equal(result, expected) + + stacked = unstacked.stack(["x", "y"], future_stack=future_stack) + stacked.index = stacked.index.reorder_levels(df.index.names) + # Workaround for GH #17886 (unnecessarily casts to float): + stacked = stacked.astype(np.int64) + result = stacked.loc[df.index] + tm.assert_frame_equal(result, df) + + # From a series + s = df["w"] + result = s.unstack(["x", "y"], fill_value=0) + expected = unstacked["w"] + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame(self): + # From a dataframe + rows = [[1, 2], [3, 4], [5, 6], [7, 8]] + df = DataFrame(rows, columns=list("AB"), dtype=np.int32) + df.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = df.unstack(fill_value=-1) + + rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]] + expected = DataFrame(rows, index=list("xyz"), dtype=np.int32) + expected.columns = MultiIndex.from_tuples( + [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")] + ) + tm.assert_frame_equal(result, expected) + + # From a mixed type dataframe + df["A"] = df["A"].astype(np.int16) + df["B"] = df["B"].astype(np.float64) + + result = df.unstack(fill_value=-1) + expected["A"] = expected["A"].astype(np.int16) + expected["B"] = expected["B"].astype(np.float64) + tm.assert_frame_equal(result, expected) + + # From a dataframe with incorrect data type for fill_value + result = df.unstack(fill_value=0.5) + + rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]] + expected = DataFrame(rows, index=list("xyz"), dtype=float) + expected.columns = MultiIndex.from_tuples( + [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")] + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_datetime(self): + # Test unstacking with date times + dv = date_range("2012-01-01", periods=4).values + data = Series(dv) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack() + expected = DataFrame( + {"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + result = data.unstack(fill_value=dv[0]) + expected = DataFrame( + {"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_timedelta(self): + # Test unstacking with time deltas + td = [Timedelta(days=i) for i in range(4)] + data = Series(td) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack() + expected = DataFrame( + {"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + result = data.unstack(fill_value=td[1]) + expected = DataFrame( + {"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_period(self): + # Test unstacking with period + periods = [ + Period("2012-01"), + Period("2012-02"), + Period("2012-03"), + Period("2012-04"), + ] + data = Series(periods) + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + result = data.unstack() + expected = DataFrame( + {"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]}, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + result = data.unstack(fill_value=periods[1]) + expected = DataFrame( + { + "a": [periods[0], periods[1], periods[3]], + "b": [periods[1], periods[2], periods[1]], + }, + index=["x", "y", "z"], + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_fill_frame_categorical(self): + # Test unstacking with categorical + data = Series(["a", "b", "c", "a"], dtype="category") + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + # By default missing values will be NaN + result = data.unstack() + expected = DataFrame( + { + "a": pd.Categorical(list("axa"), categories=list("abc")), + "b": pd.Categorical(list("bcx"), categories=list("abc")), + }, + index=list("xyz"), + ) + tm.assert_frame_equal(result, expected) + + # Fill with non-category results in a ValueError + msg = r"Cannot setitem on a Categorical with a new category \(d\)" + with pytest.raises(TypeError, match=msg): + data.unstack(fill_value="d") + + # Fill with category value replaces missing values as expected + result = data.unstack(fill_value="c") + expected = DataFrame( + { + "a": pd.Categorical(list("aca"), categories=list("abc")), + "b": pd.Categorical(list("bcc"), categories=list("abc")), + }, + index=list("xyz"), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_tuplename_in_multiindex(self): + # GH 19966 + idx = MultiIndex.from_product( + [["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")] + ) + df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx) + result = df.unstack(("A", "a")) + + expected = DataFrame( + [[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]], + columns=MultiIndex.from_tuples( + [ + ("d", "a"), + ("d", "b"), + ("d", "c"), + ("e", "a"), + ("e", "b"), + ("e", "c"), + ], + names=[None, ("A", "a")], + ), + index=Index([1, 2, 3], name=("B", "b")), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "unstack_idx, expected_values, expected_index, expected_columns", + [ + ( + ("A", "a"), + [[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]], + MultiIndex.from_tuples( + [(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"] + ), + MultiIndex.from_tuples( + [("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")], + names=[None, ("A", "a")], + ), + ), + ( + (("A", "a"), "B"), + [[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]], + Index([3, 4], name="C"), + MultiIndex.from_tuples( + [ + ("d", "a", 1), + ("d", "a", 2), + ("d", "b", 1), + ("d", "b", 2), + ("e", "a", 1), + ("e", "a", 2), + ("e", "b", 1), + ("e", "b", 2), + ], + names=[None, ("A", "a"), "B"], + ), + ), + ], + ) + def test_unstack_mixed_type_name_in_multiindex( + self, unstack_idx, expected_values, expected_index, expected_columns + ): + # GH 19966 + idx = MultiIndex.from_product( + [["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"] + ) + df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx) + result = df.unstack(unstack_idx) + + expected = DataFrame( + expected_values, columns=expected_columns, index=expected_index + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_preserve_dtypes(self): + # Checks fix for #11847 + df = DataFrame( + { + "state": ["IL", "MI", "NC"], + "index": ["a", "b", "c"], + "some_categories": Series(["a", "b", "c"]).astype("category"), + "A": np.random.default_rng(2).random(3), + "B": 1, + "C": "foo", + "D": pd.Timestamp("20010102"), + "E": Series([1.0, 50.0, 100.0]).astype("float32"), + "F": Series([3.0, 4.0, 5.0]).astype("float64"), + "G": False, + "H": Series([1, 200, 923442]).astype("int8"), + } + ) + + def unstack_and_compare(df, column_name): + unstacked1 = df.unstack([column_name]) + unstacked2 = df.unstack(column_name) + tm.assert_frame_equal(unstacked1, unstacked2) + + df1 = df.set_index(["state", "index"]) + unstack_and_compare(df1, "index") + + df1 = df.set_index(["state", "some_categories"]) + unstack_and_compare(df1, "some_categories") + + df1 = df.set_index(["F", "C"]) + unstack_and_compare(df1, "F") + + df1 = df.set_index(["G", "B", "state"]) + unstack_and_compare(df1, "B") + + df1 = df.set_index(["E", "A"]) + unstack_and_compare(df1, "E") + + df1 = df.set_index(["state", "index"]) + s = df1["A"] + unstack_and_compare(s, "index") + + def test_stack_ints(self, future_stack): + columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3))) + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 27)), columns=columns + ) + + tm.assert_frame_equal( + df.stack(level=[1, 2], future_stack=future_stack), + df.stack(level=1, future_stack=future_stack).stack( + level=1, future_stack=future_stack + ), + ) + tm.assert_frame_equal( + df.stack(level=[-2, -1], future_stack=future_stack), + df.stack(level=1, future_stack=future_stack).stack( + level=1, future_stack=future_stack + ), + ) + + df_named = df.copy() + return_value = df_named.columns.set_names(range(3), inplace=True) + assert return_value is None + + tm.assert_frame_equal( + df_named.stack(level=[1, 2], future_stack=future_stack), + df_named.stack(level=1, future_stack=future_stack).stack( + level=1, future_stack=future_stack + ), + ) + + def test_stack_mixed_levels(self, future_stack): + columns = MultiIndex.from_tuples( + [ + ("A", "cat", "long"), + ("B", "cat", "long"), + ("A", "dog", "short"), + ("B", "dog", "short"), + ], + names=["exp", "animal", "hair_length"], + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), columns=columns + ) + + animal_hair_stacked = df.stack( + level=["animal", "hair_length"], future_stack=future_stack + ) + exp_hair_stacked = df.stack( + level=["exp", "hair_length"], future_stack=future_stack + ) + + # GH #8584: Need to check that stacking works when a number + # is passed that is both a level name and in the range of + # the level numbers + df2 = df.copy() + df2.columns.names = ["exp", "animal", 1] + tm.assert_frame_equal( + df2.stack(level=["animal", 1], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df2.stack(level=["exp", 1], future_stack=future_stack), + exp_hair_stacked, + check_names=False, + ) + + # When mixed types are passed and the ints are not level + # names, raise + msg = ( + "level should contain all level names or all level numbers, not " + "a mixture of the two" + ) + with pytest.raises(ValueError, match=msg): + df2.stack(level=["animal", 0], future_stack=future_stack) + + # GH #8584: Having 0 in the level names could raise a + # strange error about lexsort depth + df3 = df.copy() + df3.columns.names = ["exp", "animal", 0] + tm.assert_frame_equal( + df3.stack(level=["animal", 0], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + + def test_stack_int_level_names(self, future_stack): + columns = MultiIndex.from_tuples( + [ + ("A", "cat", "long"), + ("B", "cat", "long"), + ("A", "dog", "short"), + ("B", "dog", "short"), + ], + names=["exp", "animal", "hair_length"], + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), columns=columns + ) + + exp_animal_stacked = df.stack( + level=["exp", "animal"], future_stack=future_stack + ) + animal_hair_stacked = df.stack( + level=["animal", "hair_length"], future_stack=future_stack + ) + exp_hair_stacked = df.stack( + level=["exp", "hair_length"], future_stack=future_stack + ) + + df2 = df.copy() + df2.columns.names = [0, 1, 2] + tm.assert_frame_equal( + df2.stack(level=[1, 2], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df2.stack(level=[0, 1], future_stack=future_stack), + exp_animal_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df2.stack(level=[0, 2], future_stack=future_stack), + exp_hair_stacked, + check_names=False, + ) + + # Out-of-order int column names + df3 = df.copy() + df3.columns.names = [2, 0, 1] + tm.assert_frame_equal( + df3.stack(level=[0, 1], future_stack=future_stack), + animal_hair_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df3.stack(level=[2, 0], future_stack=future_stack), + exp_animal_stacked, + check_names=False, + ) + tm.assert_frame_equal( + df3.stack(level=[2, 1], future_stack=future_stack), + exp_hair_stacked, + check_names=False, + ) + + def test_unstack_bool(self): + df = DataFrame( + [False, False], + index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]), + columns=["col"], + ) + rs = df.unstack() + xp = DataFrame( + np.array([[False, np.nan], [np.nan, False]], dtype=object), + index=["a", "b"], + columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]), + ) + tm.assert_frame_equal(rs, xp) + + def test_unstack_level_binding(self, future_stack): + # GH9856 + mi = MultiIndex( + levels=[["foo", "bar"], ["one", "two"], ["a", "b"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]], + names=["first", "second", "third"], + ) + s = Series(0, index=mi) + result = s.unstack([1, 2]).stack(0, future_stack=future_stack) + + expected_mi = MultiIndex( + levels=[["foo", "bar"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["first", "second"], + ) + + expected = DataFrame( + np.array( + [[0, np.nan], [np.nan, 0], [0, np.nan], [np.nan, 0]], dtype=np.float64 + ), + index=expected_mi, + columns=Index(["b", "a"], name="third"), + ) + + tm.assert_frame_equal(result, expected) + + def test_unstack_to_series(self, float_frame): + # check reversibility + data = float_frame.unstack() + + assert isinstance(data, Series) + undo = data.unstack().T + tm.assert_frame_equal(undo, float_frame) + + # check NA handling + data = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]}) + data.index = Index(["a", "b", "c"]) + result = data.unstack() + + midx = MultiIndex( + levels=[["x", "y"], ["a", "b", "c"]], + codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], + ) + expected = Series([1, 2, np.nan, 3, 4, np.nan], index=midx) + + tm.assert_series_equal(result, expected) + + # check composability of unstack + old_data = data.copy() + for _ in range(4): + data = data.unstack() + tm.assert_frame_equal(old_data, data) + + def test_unstack_dtypes(self): + # GH 2929 + rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]] + + df = DataFrame(rows, columns=list("ABCD")) + result = df.dtypes + expected = Series([np.dtype("int64")] * 4, index=list("ABCD")) + tm.assert_series_equal(result, expected) + + # single dtype + df2 = df.set_index(["A", "B"]) + df3 = df2.unstack("B") + result = df3.dtypes + expected = Series( + [np.dtype("int64")] * 4, + index=MultiIndex.from_arrays( + [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") + ), + ) + tm.assert_series_equal(result, expected) + + # mixed + df2 = df.set_index(["A", "B"]) + df2["C"] = 3.0 + df3 = df2.unstack("B") + result = df3.dtypes + expected = Series( + [np.dtype("float64")] * 2 + [np.dtype("int64")] * 2, + index=MultiIndex.from_arrays( + [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") + ), + ) + tm.assert_series_equal(result, expected) + df2["D"] = "foo" + df3 = df2.unstack("B") + result = df3.dtypes + expected = Series( + [np.dtype("float64")] * 2 + [np.dtype("object")] * 2, + index=MultiIndex.from_arrays( + [["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B") + ), + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "c, d", + ( + (np.zeros(5), np.zeros(5)), + (np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")), + ), + ) + def test_unstack_dtypes_mixed_date(self, c, d): + # GH7405 + df = DataFrame( + { + "A": ["a"] * 5, + "C": c, + "D": d, + "B": date_range("2012-01-01", periods=5), + } + ) + + right = df.iloc[:3].copy(deep=True) + + df = df.set_index(["A", "B"]) + df["D"] = df["D"].astype("int64") + + left = df.iloc[:3].unstack(0) + right = right.set_index(["A", "B"]).unstack(0) + right[("D", "a")] = right[("D", "a")].astype("int64") + + assert left.shape == (3, 2) + tm.assert_frame_equal(left, right) + + def test_unstack_non_unique_index_names(self, future_stack): + idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"]) + df = DataFrame([1, 2], index=idx) + msg = "The name c1 occurs multiple times, use a level number" + with pytest.raises(ValueError, match=msg): + df.unstack("c1") + + with pytest.raises(ValueError, match=msg): + df.T.stack("c1", future_stack=future_stack) + + def test_unstack_unused_levels(self): + # GH 17845: unused codes in index make unstack() cast int to float + idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1] + df = DataFrame([[1, 0]] * 3, index=idx) + + result = df.unstack() + exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]]) + expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col) + tm.assert_frame_equal(result, expected) + assert (result.columns.levels[1] == idx.levels[1]).all() + + # Unused items on both levels + levels = [[0, 1, 7], [0, 1, 2, 3]] + codes = [[0, 0, 1, 1], [0, 2, 0, 2]] + idx = MultiIndex(levels, codes) + block = np.arange(4).reshape(2, 2) + df = DataFrame(np.concatenate([block, block + 4]), index=idx) + result = df.unstack() + expected = DataFrame( + np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx + ) + tm.assert_frame_equal(result, expected) + assert (result.columns.levels[1] == idx.levels[1]).all() + + @pytest.mark.parametrize( + "level, idces, col_level, idx_level", + ( + (0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]), + (1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]), + ), + ) + def test_unstack_unused_levels_mixed_with_nan( + self, level, idces, col_level, idx_level + ): + # With mixed dtype and NaN + levels = [["a", 2, "c"], [1, 3, 5, 7]] + codes = [[0, -1, 1, 1], [0, 2, -1, 2]] + idx = MultiIndex(levels, codes) + data = np.arange(8) + df = DataFrame(data.reshape(4, 2), index=idx) + + result = df.unstack(level=level) + exp_data = np.zeros(18) * np.nan + exp_data[idces] = data + cols = MultiIndex.from_product([[0, 1], col_level]) + expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("cols", [["A", "C"], slice(None)]) + def test_unstack_unused_level(self, cols): + # GH 18562 : unused codes on the unstacked level + df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"]) + + ind = df.set_index(["A", "B", "C"], drop=False) + selection = ind.loc[(slice(None), slice(None), "I"), cols] + result = selection.unstack() + + expected = ind.iloc[[0]][cols] + expected.columns = MultiIndex.from_product( + [expected.columns, ["I"]], names=[None, "C"] + ) + expected.index = expected.index.droplevel("C") + tm.assert_frame_equal(result, expected) + + def test_unstack_long_index(self): + # PH 32624: Error when using a lot of indices to unstack. + # The error occurred only, if a lot of indices are used. + df = DataFrame( + [[1]], + columns=MultiIndex.from_tuples([[0]], names=["c1"]), + index=MultiIndex.from_tuples( + [[0, 0, 1, 0, 0, 0, 1]], + names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"], + ), + ) + result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"]) + expected = DataFrame( + [[1]], + columns=MultiIndex.from_tuples( + [[0, 0, 1, 0, 0, 0, 1]], + names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"], + ), + index=Index([0], name="i1"), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_multi_level_cols(self): + # PH 24729: Unstack a df with multi level columns + df = DataFrame( + [[0.0, 0.0], [0.0, 0.0]], + columns=MultiIndex.from_tuples( + [["B", "C"], ["B", "D"]], names=["c1", "c2"] + ), + index=MultiIndex.from_tuples( + [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"] + ), + ) + assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"] + + def test_unstack_multi_level_rows_and_cols(self): + # PH 28306: Unstack df with multi level cols and rows + df = DataFrame( + [[1, 2], [3, 4], [-1, -2], [-3, -4]], + columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]), + index=MultiIndex.from_tuples( + [ + ["m1", "P3", 222], + ["m1", "A5", 111], + ["m2", "P3", 222], + ["m2", "A5", 111], + ], + names=["i1", "i2", "i3"], + ), + ) + result = df.unstack(["i3", "i2"]) + expected = df.unstack(["i3"]).unstack(["i2"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("idx", [("jim", "joe"), ("joe", "jim")]) + @pytest.mark.parametrize("lev", list(range(2))) + def test_unstack_nan_index1(self, idx, lev): + # GH7466 + def cast(val): + val_str = "" if val != val else val + return f"{val_str:1}" + + df = DataFrame( + { + "jim": ["a", "b", np.nan, "d"], + "joe": ["w", "x", "y", "z"], + "jolie": ["a.w", "b.x", " .y", "d.z"], + } + ) + + left = df.set_index(["jim", "joe"]).unstack()["jolie"] + right = df.set_index(["joe", "jim"]).unstack()["jolie"].T + tm.assert_frame_equal(left, right) + + mi = df.set_index(list(idx)) + udf = mi.unstack(level=lev) + assert udf.notna().values.sum() == len(df) + mk_list = lambda a: list(a) if isinstance(a, tuple) else [a] + rows, cols = udf["jolie"].notna().values.nonzero() + for i, j in zip(rows, cols): + left = sorted(udf["jolie"].iloc[i, j].split(".")) + right = mk_list(udf["jolie"].index[i]) + mk_list(udf["jolie"].columns[j]) + right = sorted(map(cast, right)) + assert left == right + + @pytest.mark.parametrize("idx", itertools.permutations(["1st", "2nd", "3rd"])) + @pytest.mark.parametrize("lev", list(range(3))) + @pytest.mark.parametrize("col", ["4th", "5th"]) + def test_unstack_nan_index_repeats(self, idx, lev, col): + def cast(val): + val_str = "" if val != val else val + return f"{val_str:1}" + + df = DataFrame( + { + "1st": ["d"] * 3 + + [np.nan] * 5 + + ["a"] * 2 + + ["c"] * 3 + + ["e"] * 2 + + ["b"] * 5, + "2nd": ["y"] * 2 + + ["w"] * 3 + + [np.nan] * 3 + + ["z"] * 4 + + [np.nan] * 3 + + ["x"] * 3 + + [np.nan] * 2, + "3rd": [ + 67, + 39, + 53, + 72, + 57, + 80, + 31, + 18, + 11, + 30, + 59, + 50, + 62, + 59, + 76, + 52, + 14, + 53, + 60, + 51, + ], + } + ) + + df["4th"], df["5th"] = ( + df.apply(lambda r: ".".join(map(cast, r)), axis=1), + df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1), + ) + + mi = df.set_index(list(idx)) + udf = mi.unstack(level=lev) + assert udf.notna().values.sum() == 2 * len(df) + mk_list = lambda a: list(a) if isinstance(a, tuple) else [a] + rows, cols = udf[col].notna().values.nonzero() + for i, j in zip(rows, cols): + left = sorted(udf[col].iloc[i, j].split(".")) + right = mk_list(udf[col].index[i]) + mk_list(udf[col].columns[j]) + right = sorted(map(cast, right)) + assert left == right + + def test_unstack_nan_index2(self): + # GH7403 + df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)}) + # Explicit cast to avoid implicit cast when setting to np.nan + df = df.astype({"B": "float"}) + df.iloc[3, 1] = np.nan + left = df.set_index(["A", "B"]).unstack(0) + + vals = [ + [3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7], + ] + vals = list(map(list, zip(*vals))) + idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B") + cols = MultiIndex( + levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"] + ) + + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)}) + # Explicit cast to avoid implicit cast when setting to np.nan + df = df.astype({"B": "float"}) + df.iloc[2, 1] = np.nan + left = df.set_index(["A", "B"]).unstack(0) + + vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]] + cols = MultiIndex( + levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"] + ) + idx = Index([np.nan, 0, 1, 2, 3], name="B") + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)}) + # Explicit cast to avoid implicit cast when setting to np.nan + df = df.astype({"B": "float"}) + df.iloc[3, 1] = np.nan + left = df.set_index(["A", "B"]).unstack(0) + + vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]] + cols = MultiIndex( + levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"] + ) + idx = Index([np.nan, 0, 1, 2, 3], name="B") + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + def test_unstack_nan_index3(self, using_array_manager): + # GH7401 + df = DataFrame( + { + "A": list("aaaaabbbbb"), + "B": (date_range("2012-01-01", periods=5).tolist() * 2), + "C": np.arange(10), + } + ) + + df.iloc[3, 1] = np.nan + left = df.set_index(["A", "B"]).unstack() + + vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]]) + idx = Index(["a", "b"], name="A") + cols = MultiIndex( + levels=[["C"], date_range("2012-01-01", periods=5)], + codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], + names=[None, "B"], + ) + + right = DataFrame(vals, columns=cols, index=idx) + if using_array_manager: + # INFO(ArrayManager) with ArrayManager preserve dtype where possible + cols = right.columns[[1, 2, 3, 5]] + right[cols] = right[cols].astype(df["C"].dtype) + tm.assert_frame_equal(left, right) + + def test_unstack_nan_index4(self): + # GH4862 + vals = [ + ["Hg", np.nan, np.nan, 680585148], + ["U", 0.0, np.nan, 680585148], + ["Pb", 7.07e-06, np.nan, 680585148], + ["Sn", 2.3614e-05, 0.0133, 680607017], + ["Ag", 0.0, 0.0133, 680607017], + ["Hg", -0.00015, 0.0133, 680607017], + ] + df = DataFrame( + vals, + columns=["agent", "change", "dosage", "s_id"], + index=[17263, 17264, 17265, 17266, 17267, 17268], + ) + + left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack() + + vals = [ + [np.nan, np.nan, 7.07e-06, np.nan, 0.0], + [0.0, -0.00015, np.nan, 2.3614e-05, np.nan], + ] + + idx = MultiIndex( + levels=[[680585148, 680607017], [0.0133]], + codes=[[0, 1], [-1, 0]], + names=["s_id", "dosage"], + ) + + cols = MultiIndex( + levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]], + codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]], + names=[None, "agent"], + ) + + right = DataFrame(vals, columns=cols, index=idx) + tm.assert_frame_equal(left, right) + + left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"]) + tm.assert_frame_equal(left.unstack(), right) + + def test_unstack_nan_index5(self): + # GH9497 - multiple unstack with nulls + df = DataFrame( + { + "1st": [1, 2, 1, 2, 1, 2], + "2nd": date_range("2014-02-01", periods=6, freq="D"), + "jim": 100 + np.arange(6), + "joe": (np.random.default_rng(2).standard_normal(6) * 10).round(2), + } + ) + + df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02") + df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan + df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan + + left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"]) + assert left.notna().values.sum() == 2 * len(df) + + for col in ["jim", "joe"]: + for _, r in df.iterrows(): + key = r["1st"], (col, r["2nd"], r["3rd"]) + assert r[col] == left.loc[key] + + def test_stack_datetime_column_multiIndex(self, future_stack): + # GH 8039 + t = datetime(2014, 1, 1) + df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")])) + result = df.stack(future_stack=future_stack) + + eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)]) + ecols = MultiIndex.from_tuples([(t, "A")]) + expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "multiindex_columns", + [ + [0, 1, 2, 3, 4], + [0, 1, 2, 3], + [0, 1, 2, 4], + [0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [0, 1], + [0, 2], + [0, 3], + [0], + [2], + [4], + [4, 3, 2, 1, 0], + [3, 2, 1, 0], + [4, 2, 1, 0], + [2, 1, 0], + [3, 2, 1], + [4, 3, 2], + [1, 0], + [2, 0], + [3, 0], + ], + ) + @pytest.mark.parametrize("level", (-1, 0, 1, [0, 1], [1, 0])) + def test_stack_partial_multiIndex(self, multiindex_columns, level, future_stack): + # GH 8844 + dropna = False if not future_stack else lib.no_default + full_multiindex = MultiIndex.from_tuples( + [("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")], + names=["Upper", "Lower"], + ) + multiindex = full_multiindex[multiindex_columns] + df = DataFrame( + np.arange(3 * len(multiindex)).reshape(3, len(multiindex)), + columns=multiindex, + ) + result = df.stack(level=level, dropna=dropna, future_stack=future_stack) + + if isinstance(level, int) and not future_stack: + # Stacking a single level should not make any all-NaN rows, + # so df.stack(level=level, dropna=False) should be the same + # as df.stack(level=level, dropna=True). + expected = df.stack(level=level, dropna=True, future_stack=future_stack) + if isinstance(expected, Series): + tm.assert_series_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + df.columns = MultiIndex.from_tuples( + df.columns.to_numpy(), names=df.columns.names + ) + expected = df.stack(level=level, dropna=dropna, future_stack=future_stack) + if isinstance(expected, Series): + tm.assert_series_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + def test_stack_full_multiIndex(self, future_stack): + # GH 8844 + full_multiindex = MultiIndex.from_tuples( + [("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")], + names=["Upper", "Lower"], + ) + df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]]) + dropna = False if not future_stack else lib.no_default + result = df.stack(dropna=dropna, future_stack=future_stack) + expected = DataFrame( + [[0, 2], [1, np.nan], [3, 5], [4, np.nan]], + index=MultiIndex( + levels=[[0, 1], ["u", "x", "y", "z"]], + codes=[[0, 0, 1, 1], [1, 3, 1, 3]], + names=[None, "Lower"], + ), + columns=Index(["B", "C"], name="Upper"), + ) + expected["B"] = expected["B"].astype(df.dtypes.iloc[0]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [False, True]) + def test_stack_preserve_categorical_dtype(self, ordered, future_stack): + # GH13854 + cidx = pd.CategoricalIndex(list("yxz"), categories=list("xyz"), ordered=ordered) + df = DataFrame([[10, 11, 12]], columns=cidx) + result = df.stack(future_stack=future_stack) + + # `MultiIndex.from_product` preserves categorical dtype - + # it's tested elsewhere. + midx = MultiIndex.from_product([df.index, cidx]) + expected = Series([10, 11, 12], index=midx) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("ordered", [False, True]) + @pytest.mark.parametrize( + "labels,data", + [ + (list("xyz"), [10, 11, 12, 13, 14, 15]), + (list("zyx"), [14, 15, 12, 13, 10, 11]), + ], + ) + def test_stack_multi_preserve_categorical_dtype( + self, ordered, labels, data, future_stack + ): + # GH-36991 + cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered) + cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered) + midx = MultiIndex.from_product([cidx, cidx2]) + df = DataFrame([sorted(data)], columns=midx) + result = df.stack([0, 1], future_stack=future_stack) + + labels = labels if future_stack else sorted(labels) + s_cidx = pd.CategoricalIndex(labels, ordered=ordered) + expected_data = sorted(data) if future_stack else data + expected = Series( + expected_data, index=MultiIndex.from_product([[0], s_cidx, cidx2]) + ) + + tm.assert_series_equal(result, expected) + + def test_stack_preserve_categorical_dtype_values(self, future_stack): + # GH-23077 + cat = pd.Categorical(["a", "a", "b", "c"]) + df = DataFrame({"A": cat, "B": cat}) + result = df.stack(future_stack=future_stack) + index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]]) + expected = Series( + pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "index, columns", + [ + ([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])), + ], + ) + def test_stack_multi_columns_non_unique_index(self, index, columns, future_stack): + # GH-28301 + df = DataFrame(index=index, columns=columns).fillna(1) + stacked = df.stack(future_stack=future_stack) + new_index = MultiIndex.from_tuples(stacked.index.to_numpy()) + expected = DataFrame( + stacked.to_numpy(), index=new_index, columns=stacked.columns + ) + tm.assert_frame_equal(stacked, expected) + stacked_codes = np.asarray(stacked.index.codes) + expected_codes = np.asarray(new_index.codes) + tm.assert_numpy_array_equal(stacked_codes, expected_codes) + + @pytest.mark.parametrize( + "vals1, vals2, dtype1, dtype2, expected_dtype", + [ + ([1, 2], [3.0, 4.0], "Int64", "Float64", "Float64"), + ([1, 2], ["foo", "bar"], "Int64", "string", "object"), + ], + ) + def test_stack_multi_columns_mixed_extension_types( + self, vals1, vals2, dtype1, dtype2, expected_dtype, future_stack + ): + # GH45740 + df = DataFrame( + { + ("A", 1): Series(vals1, dtype=dtype1), + ("A", 2): Series(vals2, dtype=dtype2), + } + ) + result = df.stack(future_stack=future_stack) + expected = ( + df.astype(object).stack(future_stack=future_stack).astype(expected_dtype) + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("level", [0, 1]) + def test_unstack_mixed_extension_types(self, level): + index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"]) + df = DataFrame( + { + "A": pd.array([0, 1, None], dtype="Int64"), + "B": pd.Categorical(["a", "a", "b"]), + }, + index=index, + ) + + result = df.unstack(level=level) + expected = df.astype(object).unstack(level=level) + if level == 0: + expected[("A", "B")] = expected[("A", "B")].fillna(pd.NA) + else: + expected[("A", 0)] = expected[("A", 0)].fillna(pd.NA) + + expected_dtypes = Series( + [df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns + ) + tm.assert_series_equal(result.dtypes, expected_dtypes) + tm.assert_frame_equal(result.astype(object), expected) + + @pytest.mark.parametrize("level", [0, "baz"]) + def test_unstack_swaplevel_sortlevel(self, level): + # GH 20994 + mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"]) + df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"]) + df.columns.name = "foo" + + expected = DataFrame( + [[3, 1, 2, 0]], + columns=MultiIndex.from_tuples( + [("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"] + ), + ) + expected.index.name = "bar" + + result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +def test_unstack_sort_false(frame_or_series, dtype): + # GH 15105 + index = MultiIndex.from_tuples( + [("two", "z", "b"), ("two", "y", "a"), ("one", "z", "b"), ("one", "y", "a")] + ) + obj = frame_or_series(np.arange(1.0, 5.0), index=index, dtype=dtype) + result = obj.unstack(level=-1, sort=False) + + if frame_or_series is DataFrame: + expected_columns = MultiIndex.from_tuples([(0, "b"), (0, "a")]) + else: + expected_columns = ["b", "a"] + expected = DataFrame( + [[1.0, np.nan], [np.nan, 2.0], [3.0, np.nan], [np.nan, 4.0]], + columns=expected_columns, + index=MultiIndex.from_tuples( + [("two", "z"), ("two", "y"), ("one", "z"), ("one", "y")] + ), + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + result = obj.unstack(level=[1, 2], sort=False) + + if frame_or_series is DataFrame: + expected_columns = MultiIndex.from_tuples([(0, "z", "b"), (0, "y", "a")]) + else: + expected_columns = MultiIndex.from_tuples([("z", "b"), ("y", "a")]) + expected = DataFrame( + [[1.0, 2.0], [3.0, 4.0]], + index=["two", "one"], + columns=expected_columns, + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_unstack_fill_frame_object(): + # GH12815 Test unstacking with object. + data = Series(["a", "b", "c", "a"], dtype="object") + data.index = MultiIndex.from_tuples( + [("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")] + ) + + # By default missing values will be NaN + result = data.unstack() + expected = DataFrame( + {"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz") + ) + tm.assert_frame_equal(result, expected) + + # Fill with any value replaces missing values as expected + result = data.unstack(fill_value="d") + expected = DataFrame( + {"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz") + ) + tm.assert_frame_equal(result, expected) + + +def test_unstack_timezone_aware_values(): + # GH 18338 + df = DataFrame( + { + "timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")], + "a": ["a"], + "b": ["b"], + "c": ["c"], + }, + columns=["timestamp", "a", "b", "c"], + ) + result = df.set_index(["a", "b"]).unstack() + expected = DataFrame( + [[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]], + index=Index(["a"], name="a"), + columns=MultiIndex( + levels=[["timestamp", "c"], ["b"]], + codes=[[0, 1], [0, 0]], + names=[None, "b"], + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_stack_timezone_aware_values(future_stack): + # GH 19420 + ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York") + df = DataFrame({"A": ts}, index=["a", "b", "c"]) + result = df.stack(future_stack=future_stack) + expected = Series( + ts, + index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False, lib.no_default]) +def test_stack_empty_frame(dropna, future_stack): + # GH 36113 + levels = [np.array([], dtype=np.int64), np.array([], dtype=np.int64)] + expected = Series(dtype=np.float64, index=MultiIndex(levels=levels, codes=[[], []])) + if future_stack and dropna is not lib.no_default: + with pytest.raises(ValueError, match="dropna must be unspecified"): + DataFrame(dtype=np.float64).stack(dropna=dropna, future_stack=future_stack) + else: + result = DataFrame(dtype=np.float64).stack( + dropna=dropna, future_stack=future_stack + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False, lib.no_default]) +@pytest.mark.parametrize("fill_value", [None, 0]) +def test_stack_unstack_empty_frame(dropna, fill_value, future_stack): + # GH 36113 + if future_stack and dropna is not lib.no_default: + with pytest.raises(ValueError, match="dropna must be unspecified"): + DataFrame(dtype=np.int64).stack( + dropna=dropna, future_stack=future_stack + ).unstack(fill_value=fill_value) + else: + result = ( + DataFrame(dtype=np.int64) + .stack(dropna=dropna, future_stack=future_stack) + .unstack(fill_value=fill_value) + ) + expected = DataFrame(dtype=np.int64) + tm.assert_frame_equal(result, expected) + + +def test_unstack_single_index_series(): + # GH 36113 + msg = r"index must be a MultiIndex to unstack.*" + with pytest.raises(ValueError, match=msg): + Series(dtype=np.int64).unstack() + + +def test_unstacking_multi_index_df(): + # see gh-30740 + df = DataFrame( + { + "name": ["Alice", "Bob"], + "score": [9.5, 8], + "employed": [False, True], + "kids": [0, 0], + "gender": ["female", "male"], + } + ) + df = df.set_index(["name", "employed", "kids", "gender"]) + df = df.unstack(["gender"], fill_value=0) + expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0) + result = df.unstack(["employed", "kids"], fill_value=0) + expected = DataFrame( + [[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]], + index=Index(["Alice", "Bob"], name="name"), + columns=MultiIndex.from_tuples( + [ + ("score", "female", False, 0), + ("score", "female", True, 0), + ("score", "male", False, 0), + ("score", "male", True, 0), + ], + names=[None, "gender", "employed", "kids"], + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_stack_positional_level_duplicate_column_names(future_stack): + # https://github.com/pandas-dev/pandas/issues/36353 + columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"]) + df = DataFrame([[1, 1, 1, 1]], columns=columns) + result = df.stack(0, future_stack=future_stack) + + new_columns = Index(["y", "z"], name="a") + new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"]) + expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns) + + tm.assert_frame_equal(result, expected) + + +def test_unstack_non_slice_like_blocks(using_array_manager): + # Case where the mgr_locs of a DataFrame's underlying blocks are not slice-like + + mi = MultiIndex.from_product([range(5), ["A", "B", "C"]]) + df = DataFrame( + { + 0: np.random.default_rng(2).standard_normal(15), + 1: np.random.default_rng(2).standard_normal(15).astype(np.int64), + 2: np.random.default_rng(2).standard_normal(15), + 3: np.random.default_rng(2).standard_normal(15), + }, + index=mi, + ) + if not using_array_manager: + assert any(not x.mgr_locs.is_slice_like for x in df._mgr.blocks) + + res = df.unstack() + + expected = pd.concat([df[n].unstack() for n in range(4)], keys=range(4), axis=1) + tm.assert_frame_equal(res, expected) + + +def test_stack_sort_false(future_stack): + # GH 15105 + data = [[1, 2, 3.0, 4.0], [2, 3, 4.0, 5.0], [3, 4, np.nan, np.nan]] + df = DataFrame( + data, + columns=MultiIndex( + levels=[["B", "A"], ["x", "y"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]] + ), + ) + kwargs = {} if future_stack else {"sort": False} + result = df.stack(level=0, future_stack=future_stack, **kwargs) + if future_stack: + expected = DataFrame( + { + "x": [1.0, 3.0, 2.0, 4.0, 3.0, np.nan], + "y": [2.0, 4.0, 3.0, 5.0, 4.0, np.nan], + }, + index=MultiIndex.from_arrays( + [[0, 0, 1, 1, 2, 2], ["B", "A", "B", "A", "B", "A"]] + ), + ) + else: + expected = DataFrame( + {"x": [1.0, 3.0, 2.0, 4.0, 3.0], "y": [2.0, 4.0, 3.0, 5.0, 4.0]}, + index=MultiIndex.from_arrays([[0, 0, 1, 1, 2], ["B", "A", "B", "A", "B"]]), + ) + tm.assert_frame_equal(result, expected) + + # Codes sorted in this call + df = DataFrame( + data, + columns=MultiIndex.from_arrays([["B", "B", "A", "A"], ["x", "y", "x", "y"]]), + ) + kwargs = {} if future_stack else {"sort": False} + result = df.stack(level=0, future_stack=future_stack, **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_stack_sort_false_multi_level(future_stack): + # GH 15105 + idx = MultiIndex.from_tuples([("weight", "kg"), ("height", "m")]) + df = DataFrame([[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=idx) + kwargs = {} if future_stack else {"sort": False} + result = df.stack([0, 1], future_stack=future_stack, **kwargs) + expected_index = MultiIndex.from_tuples( + [ + ("cat", "weight", "kg"), + ("cat", "height", "m"), + ("dog", "weight", "kg"), + ("dog", "height", "m"), + ] + ) + expected = Series([1.0, 2.0, 3.0, 4.0], index=expected_index) + tm.assert_series_equal(result, expected) + + +class TestStackUnstackMultiLevel: + def test_unstack(self, multiindex_year_month_day_dataframe_random_data): + # just check that it works for now + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack() + unstacked.unstack() + + # test that ints work + ymd.astype(int).unstack() + + # test that int32 work + ymd.astype(np.int32).unstack() + + @pytest.mark.parametrize( + "result_rows,result_columns,index_product,expected_row", + [ + ( + [[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]], + ["ix1", "ix2", "col1", "col2", "col3", "col4"], + 2, + [None, None, 30.0, None], + ), + ( + [[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]], + ["ix1", "ix2", "col1", "col2", "col3"], + 2, + [None, None, 30.0], + ), + ( + [[1, 1, None, None, 30.0], [2, None, None, None, 30.0]], + ["ix1", "ix2", "col1", "col2", "col3"], + None, + [None, None, 30.0], + ), + ], + ) + def test_unstack_partial( + self, result_rows, result_columns, index_product, expected_row + ): + # check for regressions on this issue: + # https://github.com/pandas-dev/pandas/issues/19351 + # make sure DataFrame.unstack() works when its run on a subset of the DataFrame + # and the Index levels contain values that are not present in the subset + result = DataFrame(result_rows, columns=result_columns).set_index( + ["ix1", "ix2"] + ) + result = result.iloc[1:2].unstack("ix2") + expected = DataFrame( + [expected_row], + columns=MultiIndex.from_product( + [result_columns[2:], [index_product]], names=[None, "ix2"] + ), + index=Index([2], name="ix1"), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_multiple_no_empty_columns(self): + index = MultiIndex.from_tuples( + [(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)] + ) + + s = Series(np.random.default_rng(2).standard_normal(4), index=index) + + unstacked = s.unstack([1, 2]) + expected = unstacked.dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected) + + def test_stack(self, multiindex_year_month_day_dataframe_random_data, future_stack): + ymd = multiindex_year_month_day_dataframe_random_data + + # regular roundtrip + unstacked = ymd.unstack() + restacked = unstacked.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked, ymd) + + unlexsorted = ymd.sort_index(level=2) + + unstacked = unlexsorted.unstack(2) + restacked = unstacked.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + unlexsorted = unlexsorted[::-1] + unstacked = unlexsorted.unstack(1) + restacked = unstacked.stack(future_stack=future_stack).swaplevel(1, 2) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + unlexsorted = unlexsorted.swaplevel(0, 1) + unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1) + restacked = unstacked.stack(0, future_stack=future_stack).swaplevel(1, 2) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked.sort_index(level=0), ymd) + + # columns unsorted + unstacked = ymd.unstack() + restacked = unstacked.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + tm.assert_frame_equal(restacked, ymd) + + # more than 2 levels in the columns + unstacked = ymd.unstack(1).unstack(1) + + result = unstacked.stack(1, future_stack=future_stack) + expected = ymd.unstack() + tm.assert_frame_equal(result, expected) + + result = unstacked.stack(2, future_stack=future_stack) + expected = ymd.unstack(1) + tm.assert_frame_equal(result, expected) + + result = unstacked.stack(0, future_stack=future_stack) + expected = ymd.stack(future_stack=future_stack).unstack(1).unstack(1) + tm.assert_frame_equal(result, expected) + + # not all levels present in each echelon + unstacked = ymd.unstack(2).loc[:, ::3] + stacked = unstacked.stack(future_stack=future_stack).stack( + future_stack=future_stack + ) + ymd_stacked = ymd.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + stacked = stacked.dropna(how="all") + ymd_stacked = ymd_stacked.dropna(how="all") + tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index)) + + # stack with negative number + result = ymd.unstack(0).stack(-2, future_stack=future_stack) + expected = ymd.unstack(0).stack(0, future_stack=future_stack) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "idx, columns, exp_idx", + [ + [ + list("abab"), + ["1st", "2nd", "1st"], + MultiIndex( + levels=[["a", "b"], ["1st", "2nd"]], + codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)], + ), + ], + [ + MultiIndex.from_tuples((("a", 2), ("b", 1), ("a", 1), ("b", 2))), + ["1st", "2nd", "1st"], + MultiIndex( + levels=[["a", "b"], [1, 2], ["1st", "2nd"]], + codes=[ + np.tile(np.arange(2).repeat(3), 2), + np.repeat([1, 0, 1], [3, 6, 3]), + np.tile([0, 1, 0], 4), + ], + ), + ], + ], + ) + def test_stack_duplicate_index(self, idx, columns, exp_idx, future_stack): + # GH10417 + df = DataFrame( + np.arange(12).reshape(4, 3), + index=idx, + columns=columns, + ) + if future_stack: + msg = "Columns with duplicate values are not supported in stack" + with pytest.raises(ValueError, match=msg): + df.stack(future_stack=future_stack) + else: + result = df.stack(future_stack=future_stack) + expected = Series(np.arange(12), index=exp_idx) + tm.assert_series_equal(result, expected) + assert result.index.is_unique is False + li, ri = result.index, expected.index + tm.assert_index_equal(li, ri) + + def test_unstack_odd_failure(self, future_stack): + data = """day,time,smoker,sum,len +Fri,Dinner,No,8.25,3. +Fri,Dinner,Yes,27.03,9 +Fri,Lunch,No,3.0,1 +Fri,Lunch,Yes,13.68,6 +Sat,Dinner,No,139.63,45 +Sat,Dinner,Yes,120.77,42 +Sun,Dinner,No,180.57,57 +Sun,Dinner,Yes,66.82,19 +Thu,Dinner,No,3.0,1 +Thu,Lunch,No,117.32,44 +Thu,Lunch,Yes,51.51,17""" + + df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"]) + + # it works, #2100 + result = df.unstack(2) + + recons = result.stack(future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + recons = recons.dropna(how="all") + tm.assert_frame_equal(recons, df) + + def test_stack_mixed_dtype(self, multiindex_dataframe_random_data, future_stack): + frame = multiindex_dataframe_random_data + + df = frame.T + df["foo", "four"] = "foo" + df = df.sort_index(level=1, axis=1) + + stacked = df.stack(future_stack=future_stack) + result = df["foo"].stack(future_stack=future_stack).sort_index() + tm.assert_series_equal(stacked["foo"], result, check_names=False) + assert result.name is None + assert stacked["bar"].dtype == np.float64 + + def test_unstack_bug(self, future_stack): + df = DataFrame( + { + "state": ["naive", "naive", "naive", "active", "active", "active"], + "exp": ["a", "b", "b", "b", "a", "a"], + "barcode": [1, 2, 3, 4, 1, 3], + "v": ["hi", "hi", "bye", "bye", "bye", "peace"], + "extra": np.arange(6.0), + } + ) + + result = df.groupby(["state", "exp", "barcode", "v"]).apply(len) + + unstacked = result.unstack() + restacked = unstacked.stack(future_stack=future_stack) + tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float)) + + def test_stack_unstack_preserve_names( + self, multiindex_dataframe_random_data, future_stack + ): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack() + assert unstacked.index.name == "first" + assert unstacked.columns.names == ["exp", "second"] + + restacked = unstacked.stack(future_stack=future_stack) + assert restacked.index.names == frame.index.names + + @pytest.mark.parametrize("method", ["stack", "unstack"]) + def test_stack_unstack_wrong_level_name( + self, method, multiindex_dataframe_random_data, future_stack + ): + # GH 18303 - wrong level name should raise + frame = multiindex_dataframe_random_data + + # A DataFrame with flat axes: + df = frame.loc["foo"] + + kwargs = {"future_stack": future_stack} if method == "stack" else {} + with pytest.raises(KeyError, match="does not match index name"): + getattr(df, method)("mistake", **kwargs) + + if method == "unstack": + # Same on a Series: + s = df.iloc[:, 0] + with pytest.raises(KeyError, match="does not match index name"): + getattr(s, method)("mistake", **kwargs) + + def test_unstack_level_name(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + result = frame.unstack("second") + expected = frame.unstack(level=1) + tm.assert_frame_equal(result, expected) + + def test_stack_level_name(self, multiindex_dataframe_random_data, future_stack): + frame = multiindex_dataframe_random_data + + unstacked = frame.unstack("second") + result = unstacked.stack("exp", future_stack=future_stack) + expected = frame.unstack().stack(0, future_stack=future_stack) + tm.assert_frame_equal(result, expected) + + result = frame.stack("exp", future_stack=future_stack) + expected = frame.stack(future_stack=future_stack) + tm.assert_series_equal(result, expected) + + def test_stack_unstack_multiple( + self, multiindex_year_month_day_dataframe_random_data, future_stack + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + expected = ymd.unstack("year").unstack("month") + tm.assert_frame_equal(unstacked, expected) + assert unstacked.columns.names == expected.columns.names + + # series + s = ymd["A"] + s_unstacked = s.unstack(["year", "month"]) + tm.assert_frame_equal(s_unstacked, expected["A"]) + + restacked = unstacked.stack(["year", "month"], future_stack=future_stack) + if future_stack: + # NA values in unstacked persist to restacked in version 3 + restacked = restacked.dropna(how="all") + restacked = restacked.swaplevel(0, 1).swaplevel(1, 2) + restacked = restacked.sort_index(level=0) + + tm.assert_frame_equal(restacked, ymd) + assert restacked.index.names == ymd.index.names + + # GH #451 + unstacked = ymd.unstack([1, 2]) + expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected) + + unstacked = ymd.unstack([2, 1]) + expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all") + tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns]) + + def test_stack_names_and_numbers( + self, multiindex_year_month_day_dataframe_random_data, future_stack + ): + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + + # Can't use mixture of names and numbers to stack + with pytest.raises(ValueError, match="level should contain"): + unstacked.stack([0, "month"], future_stack=future_stack) + + def test_stack_multiple_out_of_bounds( + self, multiindex_year_month_day_dataframe_random_data, future_stack + ): + # nlevels == 3 + ymd = multiindex_year_month_day_dataframe_random_data + + unstacked = ymd.unstack(["year", "month"]) + + with pytest.raises(IndexError, match="Too many levels"): + unstacked.stack([2, 3], future_stack=future_stack) + with pytest.raises(IndexError, match="not a valid level number"): + unstacked.stack([-4, -3], future_stack=future_stack) + + def test_unstack_period_series(self): + # GH4342 + idx1 = pd.PeriodIndex( + ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], + freq="M", + name="period", + ) + idx2 = Index(["A", "B"] * 3, name="str") + value = [1, 2, 3, 4, 5, 6] + + idx = MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex( + ["2013-01", "2013-02", "2013-03"], freq="M", name="period" + ) + expected = DataFrame( + {"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"] + ) + expected.columns.name = "str" + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected.T) + + idx1 = pd.PeriodIndex( + ["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"], + freq="M", + name="period1", + ) + + idx2 = pd.PeriodIndex( + ["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"], + freq="M", + name="period2", + ) + idx = MultiIndex.from_arrays([idx1, idx2]) + s = Series(value, index=idx) + + result1 = s.unstack() + result2 = s.unstack(level=1) + result3 = s.unstack(level=0) + + e_idx = pd.PeriodIndex( + ["2013-01", "2013-02", "2013-03"], freq="M", name="period1" + ) + e_cols = pd.PeriodIndex( + ["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"], + freq="M", + name="period2", + ) + expected = DataFrame( + [ + [np.nan, np.nan, np.nan, np.nan, 2, 1], + [np.nan, np.nan, 4, 3, np.nan, np.nan], + [6, 5, np.nan, np.nan, np.nan, np.nan], + ], + index=e_idx, + columns=e_cols, + ) + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + tm.assert_frame_equal(result3, expected.T) + + def test_unstack_period_frame(self): + # GH4342 + idx1 = pd.PeriodIndex( + ["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"], + freq="M", + name="period1", + ) + idx2 = pd.PeriodIndex( + ["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"], + freq="M", + name="period2", + ) + value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]} + idx = MultiIndex.from_arrays([idx1, idx2]) + df = DataFrame(value, index=idx) + + result1 = df.unstack() + result2 = df.unstack(level=1) + result3 = df.unstack(level=0) + + e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1") + e_2 = pd.PeriodIndex( + ["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"], + freq="M", + name="period2", + ) + e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2]) + expected = DataFrame( + [[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols + ) + + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + + e_1 = pd.PeriodIndex( + ["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1" + ) + e_2 = pd.PeriodIndex( + ["2013-10", "2013-12", "2014-02"], freq="M", name="period2" + ) + e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1]) + expected = DataFrame( + [[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols + ) + + tm.assert_frame_equal(result3, expected) + + def test_stack_multiple_bug(self, future_stack): + # bug when some uniques are not present in the data GH#3170 + id_col = ([1] * 3) + ([2] * 3) + name = (["a"] * 3) + (["b"] * 3) + date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2) + var1 = np.random.default_rng(2).integers(0, 100, 6) + df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1}) + + multi = df.set_index(["DATE", "ID"]) + multi.columns.name = "Params" + unst = multi.unstack("ID") + msg = re.escape("agg function failed [how->mean,dtype->object]") + with pytest.raises(TypeError, match=msg): + unst.resample("W-THU").mean() + down = unst.resample("W-THU").mean(numeric_only=True) + rs = down.stack("ID", future_stack=future_stack) + xp = ( + unst.loc[:, ["VAR1"]] + .resample("W-THU") + .mean() + .stack("ID", future_stack=future_stack) + ) + xp.columns.name = "Params" + tm.assert_frame_equal(rs, xp) + + def test_stack_dropna(self, future_stack): + # GH#3997 + df = DataFrame({"A": ["a1", "a2"], "B": ["b1", "b2"], "C": [1, 1]}) + df = df.set_index(["A", "B"]) + + dropna = False if not future_stack else lib.no_default + stacked = df.unstack().stack(dropna=dropna, future_stack=future_stack) + assert len(stacked) > len(stacked.dropna()) + + if future_stack: + with pytest.raises(ValueError, match="dropna must be unspecified"): + df.unstack().stack(dropna=True, future_stack=future_stack) + else: + stacked = df.unstack().stack(dropna=True, future_stack=future_stack) + tm.assert_frame_equal(stacked, stacked.dropna()) + + def test_unstack_multiple_hierarchical(self, future_stack): + df = DataFrame( + index=[ + [0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 0, 0, 1, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + ], + columns=[[0, 0, 1, 1], [0, 1, 0, 1]], + ) + + df.index.names = ["a", "b", "c"] + df.columns.names = ["d", "e"] + + # it works! + df.unstack(["b", "c"]) + + def test_unstack_sparse_keyspace(self): + # memory problems with naive impl GH#2278 + # Generate Long File & Test Pivot + NUM_ROWS = 1000 + + df = DataFrame( + { + "A": np.random.default_rng(2).integers(100, size=NUM_ROWS), + "B": np.random.default_rng(3).integers(300, size=NUM_ROWS), + "C": np.random.default_rng(4).integers(-7, 7, size=NUM_ROWS), + "D": np.random.default_rng(5).integers(-19, 19, size=NUM_ROWS), + "E": np.random.default_rng(6).integers(3000, size=NUM_ROWS), + "F": np.random.default_rng(7).standard_normal(NUM_ROWS), + } + ) + + idf = df.set_index(["A", "B", "C", "D", "E"]) + + # it works! is sufficient + idf.unstack("E") + + def test_unstack_unobserved_keys(self, future_stack): + # related to GH#2278 refactoring + levels = [[0, 1], [0, 1, 2, 3]] + codes = [[0, 0, 1, 1], [0, 2, 0, 2]] + + index = MultiIndex(levels, codes) + + df = DataFrame(np.random.default_rng(2).standard_normal((4, 2)), index=index) + + result = df.unstack() + assert len(result.columns) == 4 + + recons = result.stack(future_stack=future_stack) + tm.assert_frame_equal(recons, df) + + @pytest.mark.slow + def test_unstack_number_of_levels_larger_than_int32(self, monkeypatch): + # GH#20601 + # GH 26314: Change ValueError to PerformanceWarning + + class MockUnstacker(reshape_lib._Unstacker): + def __init__(self, *args, **kwargs) -> None: + # __init__ will raise the warning + super().__init__(*args, **kwargs) + raise Exception("Don't compute final result.") + + with monkeypatch.context() as m: + m.setattr(reshape_lib, "_Unstacker", MockUnstacker) + df = DataFrame( + np.random.default_rng(2).standard_normal((2**16, 2)), + index=[np.arange(2**16), np.arange(2**16)], + ) + msg = "The following operation may generate" + with tm.assert_produces_warning(PerformanceWarning, match=msg): + with pytest.raises(Exception, match="Don't compute final result."): + df.unstack() + + @pytest.mark.parametrize( + "levels", + itertools.chain.from_iterable( + itertools.product(itertools.permutations([0, 1, 2], width), repeat=2) + for width in [2, 3] + ), + ) + @pytest.mark.parametrize("stack_lev", range(2)) + @pytest.mark.parametrize("sort", [True, False]) + def test_stack_order_with_unsorted_levels( + self, levels, stack_lev, sort, future_stack + ): + # GH#16323 + # deep check for 1-row case + columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) + df = DataFrame(columns=columns, data=[range(4)]) + kwargs = {} if future_stack else {"sort": sort} + df_stacked = df.stack(stack_lev, future_stack=future_stack, **kwargs) + for row in df.index: + for col in df.columns: + expected = df.loc[row, col] + result_row = row, col[stack_lev] + result_col = col[1 - stack_lev] + result = df_stacked.loc[result_row, result_col] + assert result == expected + + def test_stack_order_with_unsorted_levels_multi_row(self, future_stack): + # GH#16323 + + # check multi-row case + mi = MultiIndex( + levels=[["A", "C", "B"], ["B", "A", "C"]], + codes=[np.repeat(range(3), 3), np.tile(range(3), 3)], + ) + df = DataFrame( + columns=mi, index=range(5), data=np.arange(5 * len(mi)).reshape(5, -1) + ) + assert all( + df.loc[row, col] + == df.stack(0, future_stack=future_stack).loc[(row, col[0]), col[1]] + for row in df.index + for col in df.columns + ) + + def test_stack_order_with_unsorted_levels_multi_row_2(self, future_stack): + # GH#53636 + levels = ((0, 1), (1, 0)) + stack_lev = 1 + columns = MultiIndex(levels=levels, codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) + df = DataFrame(columns=columns, data=[range(4)], index=[1, 0, 2, 3]) + kwargs = {} if future_stack else {"sort": True} + result = df.stack(stack_lev, future_stack=future_stack, **kwargs) + expected_index = MultiIndex( + levels=[[0, 1, 2, 3], [0, 1]], + codes=[[1, 1, 0, 0, 2, 2, 3, 3], [1, 0, 1, 0, 1, 0, 1, 0]], + ) + expected = DataFrame( + { + 0: [0, 1, 0, 1, 0, 1, 0, 1], + 1: [2, 3, 2, 3, 2, 3, 2, 3], + }, + index=expected_index, + ) + tm.assert_frame_equal(result, expected) + + def test_stack_unstack_unordered_multiindex(self, future_stack): + # GH# 18265 + values = np.arange(5) + data = np.vstack( + [ + [f"b{x}" for x in values], # b0, b1, .. + [f"a{x}" for x in values], # a0, a1, .. + ] + ) + df = DataFrame(data.T, columns=["b", "a"]) + df.columns.name = "first" + second_level_dict = {"x": df} + multi_level_df = pd.concat(second_level_dict, axis=1) + multi_level_df.columns.names = ["second", "first"] + df = multi_level_df.reindex(sorted(multi_level_df.columns), axis=1) + result = df.stack(["first", "second"], future_stack=future_stack).unstack( + ["first", "second"] + ) + expected = DataFrame( + [["a0", "b0"], ["a1", "b1"], ["a2", "b2"], ["a3", "b3"], ["a4", "b4"]], + index=[0, 1, 2, 3, 4], + columns=MultiIndex.from_tuples( + [("a", "x"), ("b", "x")], names=["first", "second"] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_preserve_types( + self, multiindex_year_month_day_dataframe_random_data + ): + # GH#403 + ymd = multiindex_year_month_day_dataframe_random_data + ymd["E"] = "foo" + ymd["F"] = 2 + + unstacked = ymd.unstack("month") + assert unstacked["A", 1].dtype == np.float64 + assert unstacked["E", 1].dtype == np.object_ + assert unstacked["F", 1].dtype == np.float64 + + def test_unstack_group_index_overflow(self, future_stack): + codes = np.tile(np.arange(500), 2) + level = np.arange(500) + + index = MultiIndex( + levels=[level] * 8 + [[0, 1]], + codes=[codes] * 8 + [np.arange(2).repeat(500)], + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack() + assert result.shape == (500, 2) + + # test roundtrip + stacked = result.stack(future_stack=future_stack) + tm.assert_series_equal(s, stacked.reindex(s.index)) + + # put it at beginning + index = MultiIndex( + levels=[[0, 1]] + [level] * 8, + codes=[np.arange(2).repeat(500)] + [codes] * 8, + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack(0) + assert result.shape == (500, 2) + + # put it in middle + index = MultiIndex( + levels=[level] * 4 + [[0, 1]] + [level] * 4, + codes=([codes] * 4 + [np.arange(2).repeat(500)] + [codes] * 4), + ) + + s = Series(np.arange(1000), index=index) + result = s.unstack(4) + assert result.shape == (500, 2) + + def test_unstack_with_missing_int_cast_to_float(self, using_array_manager): + # https://github.com/pandas-dev/pandas/issues/37115 + df = DataFrame( + { + "a": ["A", "A", "B"], + "b": ["ca", "cb", "cb"], + "v": [10] * 3, + } + ).set_index(["a", "b"]) + + # add another int column to get 2 blocks + df["is_"] = 1 + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + result = df.unstack("b") + result[("is_", "ca")] = result[("is_", "ca")].fillna(0) + + expected = DataFrame( + [[10.0, 10.0, 1.0, 1.0], [np.nan, 10.0, 0.0, 1.0]], + index=Index(["A", "B"], dtype="object", name="a"), + columns=MultiIndex.from_tuples( + [("v", "ca"), ("v", "cb"), ("is_", "ca"), ("is_", "cb")], + names=[None, "b"], + ), + ) + if using_array_manager: + # INFO(ArrayManager) with ArrayManager preserve dtype where possible + expected[("v", "cb")] = expected[("v", "cb")].astype("int64") + expected[("is_", "cb")] = expected[("is_", "cb")].astype("int64") + tm.assert_frame_equal(result, expected) + + def test_unstack_with_level_has_nan(self): + # GH 37510 + df1 = DataFrame( + { + "L1": [1, 2, 3, 4], + "L2": [3, 4, 1, 2], + "L3": [1, 1, 1, 1], + "x": [1, 2, 3, 4], + } + ) + df1 = df1.set_index(["L1", "L2", "L3"]) + new_levels = ["n1", "n2", "n3", None] + df1.index = df1.index.set_levels(levels=new_levels, level="L1") + df1.index = df1.index.set_levels(levels=new_levels, level="L2") + + result = df1.unstack("L3")[("x", 1)].sort_index().index + expected = MultiIndex( + levels=[["n1", "n2", "n3", None], ["n1", "n2", "n3", None]], + codes=[[0, 1, 2, 3], [2, 3, 0, 1]], + names=["L1", "L2"], + ) + + tm.assert_index_equal(result, expected) + + def test_stack_nan_in_multiindex_columns(self, future_stack): + # GH#39481 + df = DataFrame( + np.zeros([1, 5]), + columns=MultiIndex.from_tuples( + [ + (0, None, None), + (0, 2, 0), + (0, 2, 1), + (0, 3, 0), + (0, 3, 1), + ], + ), + ) + result = df.stack(2, future_stack=future_stack) + if future_stack: + index = MultiIndex(levels=[[0], [0.0, 1.0]], codes=[[0, 0, 0], [-1, 0, 1]]) + columns = MultiIndex(levels=[[0], [2, 3]], codes=[[0, 0, 0], [-1, 0, 1]]) + else: + index = Index([(0, None), (0, 0), (0, 1)]) + columns = Index([(0, None), (0, 2), (0, 3)]) + expected = DataFrame( + [[0.0, np.nan, np.nan], [np.nan, 0.0, 0.0], [np.nan, 0.0, 0.0]], + index=index, + columns=columns, + ) + tm.assert_frame_equal(result, expected) + + def test_multi_level_stack_categorical(self, future_stack): + # GH 15239 + midx = MultiIndex.from_arrays( + [ + ["A"] * 2 + ["B"] * 2, + pd.Categorical(list("abab")), + pd.Categorical(list("ccdd")), + ] + ) + df = DataFrame(np.arange(8).reshape(2, 4), columns=midx) + result = df.stack([1, 2], future_stack=future_stack) + if future_stack: + expected = DataFrame( + [ + [0, np.nan], + [1, np.nan], + [np.nan, 2], + [np.nan, 3], + [4, np.nan], + [5, np.nan], + [np.nan, 6], + [np.nan, 7], + ], + columns=["A", "B"], + index=MultiIndex.from_arrays( + [ + [0] * 4 + [1] * 4, + pd.Categorical(list("abababab")), + pd.Categorical(list("ccddccdd")), + ] + ), + ) + else: + expected = DataFrame( + [ + [0, np.nan], + [np.nan, 2], + [1, np.nan], + [np.nan, 3], + [4, np.nan], + [np.nan, 6], + [5, np.nan], + [np.nan, 7], + ], + columns=["A", "B"], + index=MultiIndex.from_arrays( + [ + [0] * 4 + [1] * 4, + pd.Categorical(list("aabbaabb")), + pd.Categorical(list("cdcdcdcd")), + ] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_stack_nan_level(self, future_stack): + # GH 9406 + df_nan = DataFrame( + np.arange(4).reshape(2, 2), + columns=MultiIndex.from_tuples( + [("A", np.nan), ("B", "b")], names=["Upper", "Lower"] + ), + index=Index([0, 1], name="Num"), + dtype=np.float64, + ) + result = df_nan.stack(future_stack=future_stack) + if future_stack: + index = MultiIndex( + levels=[[0, 1], [np.nan, "b"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["Num", "Lower"], + ) + else: + index = MultiIndex.from_tuples( + [(0, np.nan), (0, "b"), (1, np.nan), (1, "b")], names=["Num", "Lower"] + ) + expected = DataFrame( + [[0.0, np.nan], [np.nan, 1], [2.0, np.nan], [np.nan, 3.0]], + columns=Index(["A", "B"], name="Upper"), + index=index, + ) + tm.assert_frame_equal(result, expected) + + def test_unstack_categorical_columns(self): + # GH 14018 + idx = MultiIndex.from_product([["A"], [0, 1]]) + df = DataFrame({"cat": pd.Categorical(["a", "b"])}, index=idx) + result = df.unstack() + expected = DataFrame( + { + 0: pd.Categorical(["a"], categories=["a", "b"]), + 1: pd.Categorical(["b"], categories=["a", "b"]), + }, + index=["A"], + ) + expected.columns = MultiIndex.from_tuples([("cat", 0), ("cat", 1)]) + tm.assert_frame_equal(result, expected) + + def test_stack_unsorted(self, future_stack): + # GH 16925 + PAE = ["ITA", "FRA"] + VAR = ["A1", "A2"] + TYP = ["CRT", "DBT", "NET"] + MI = MultiIndex.from_product([PAE, VAR, TYP], names=["PAE", "VAR", "TYP"]) + + V = list(range(len(MI))) + DF = DataFrame(data=V, index=MI, columns=["VALUE"]) + + DF = DF.unstack(["VAR", "TYP"]) + DF.columns = DF.columns.droplevel(0) + DF.loc[:, ("A0", "NET")] = 9999 + + result = DF.stack(["VAR", "TYP"], future_stack=future_stack).sort_index() + expected = ( + DF.sort_index(axis=1) + .stack(["VAR", "TYP"], future_stack=future_stack) + .sort_index() + ) + tm.assert_series_equal(result, expected) + + def test_stack_nullable_dtype(self, future_stack): + # GH#43561 + columns = MultiIndex.from_product( + [["54511", "54515"], ["r", "t_mean"]], names=["station", "element"] + ) + index = Index([1, 2, 3], name="time") + + arr = np.array([[50, 226, 10, 215], [10, 215, 9, 220], [305, 232, 111, 220]]) + df = DataFrame(arr, columns=columns, index=index, dtype=pd.Int64Dtype()) + + result = df.stack("station", future_stack=future_stack) + + expected = ( + df.astype(np.int64) + .stack("station", future_stack=future_stack) + .astype(pd.Int64Dtype()) + ) + tm.assert_frame_equal(result, expected) + + # non-homogeneous case + df[df.columns[0]] = df[df.columns[0]].astype(pd.Float64Dtype()) + result = df.stack("station", future_stack=future_stack) + + expected = DataFrame( + { + "r": pd.array( + [50.0, 10.0, 10.0, 9.0, 305.0, 111.0], dtype=pd.Float64Dtype() + ), + "t_mean": pd.array( + [226, 215, 215, 220, 232, 220], dtype=pd.Int64Dtype() + ), + }, + index=MultiIndex.from_product([index, columns.levels[0]]), + ) + expected.columns.name = "element" + tm.assert_frame_equal(result, expected) + + def test_unstack_mixed_level_names(self): + # GH#48763 + arrays = [["a", "a"], [1, 2], ["red", "blue"]] + idx = MultiIndex.from_arrays(arrays, names=("x", 0, "y")) + df = DataFrame({"m": [1, 2]}, index=idx) + result = df.unstack("x") + expected = DataFrame( + [[1], [2]], + columns=MultiIndex.from_tuples([("m", "a")], names=[None, "x"]), + index=MultiIndex.from_tuples([(1, "red"), (2, "blue")], names=[0, "y"]), + ) + tm.assert_frame_equal(result, expected) + + +def test_stack_tuple_columns(future_stack): + # GH#54948 - test stack when the input has a non-MultiIndex with tuples + df = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[("a", 1), ("a", 2), ("b", 1)] + ) + result = df.stack(future_stack=future_stack) + expected = Series( + [1, 2, 3, 4, 5, 6, 7, 8, 9], + index=MultiIndex( + levels=[[0, 1, 2], [("a", 1), ("a", 2), ("b", 1)]], + codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], + ), + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_subclass.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_subclass.py new file mode 100644 index 00000000..ef78ae62 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_subclass.py @@ -0,0 +1,814 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) + + +@pytest.fixture() +def gpd_style_subclass_df(): + class SubclassedDataFrame(DataFrame): + @property + def _constructor(self): + return SubclassedDataFrame + + return SubclassedDataFrame({"a": [1, 2, 3]}) + + +class TestDataFrameSubclassing: + def test_frame_subclassing_and_slicing(self): + # Subclass frame and ensure it returns the right class on slicing it + # In reference to PR 9632 + + class CustomSeries(Series): + @property + def _constructor(self): + return CustomSeries + + def custom_series_function(self): + return "OK" + + class CustomDataFrame(DataFrame): + """ + Subclasses pandas DF, fills DF with simulation results, adds some + custom plotting functions. + """ + + def __init__(self, *args, **kw) -> None: + super().__init__(*args, **kw) + + @property + def _constructor(self): + return CustomDataFrame + + _constructor_sliced = CustomSeries + + def custom_frame_function(self): + return "OK" + + data = {"col1": range(10), "col2": range(10)} + cdf = CustomDataFrame(data) + + # Did we get back our own DF class? + assert isinstance(cdf, CustomDataFrame) + + # Do we get back our own Series class after selecting a column? + cdf_series = cdf.col1 + assert isinstance(cdf_series, CustomSeries) + assert cdf_series.custom_series_function() == "OK" + + # Do we get back our own DF class after slicing row-wise? + cdf_rows = cdf[1:5] + assert isinstance(cdf_rows, CustomDataFrame) + assert cdf_rows.custom_frame_function() == "OK" + + # Make sure sliced part of multi-index frame is custom class + mcol = MultiIndex.from_tuples([("A", "A"), ("A", "B")]) + cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) + assert isinstance(cdf_multi["A"], CustomDataFrame) + + mcol = MultiIndex.from_tuples([("A", ""), ("B", "")]) + cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) + assert isinstance(cdf_multi2["A"], CustomSeries) + + def test_dataframe_metadata(self): + df = tm.SubclassedDataFrame( + {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"] + ) + df.testattr = "XXX" + + assert df.testattr == "XXX" + assert df[["X"]].testattr == "XXX" + assert df.loc[["a", "b"], :].testattr == "XXX" + assert df.iloc[[0, 1], :].testattr == "XXX" + + # see gh-9776 + assert df.iloc[0:1, :].testattr == "XXX" + + # see gh-10553 + unpickled = tm.round_trip_pickle(df) + tm.assert_frame_equal(df, unpickled) + assert df._metadata == unpickled._metadata + assert df.testattr == unpickled.testattr + + def test_indexing_sliced(self): + # GH 11559 + df = tm.SubclassedDataFrame( + {"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"] + ) + res = df.loc[:, "X"] + exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.iloc[:, 1] + exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.loc[:, "Z"] + exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.loc["a", :] + exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.iloc[1, :] + exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + res = df.loc["c", :] + exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c") + tm.assert_series_equal(res, exp) + assert isinstance(res, tm.SubclassedSeries) + + def test_subclass_attr_err_propagation(self): + # GH 11808 + class A(DataFrame): + @property + def nonexistence(self): + return self.i_dont_exist + + with pytest.raises(AttributeError, match=".*i_dont_exist.*"): + A().nonexistence + + def test_subclass_align(self): + # GH 12983 + df1 = tm.SubclassedDataFrame( + {"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE") + ) + df2 = tm.SubclassedDataFrame( + {"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD") + ) + + res1, res2 = df1.align(df2, axis=0) + exp1 = tm.SubclassedDataFrame( + {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, + index=list("ABCDE"), + ) + exp2 = tm.SubclassedDataFrame( + {"c": [1, 2, np.nan, 4, np.nan], "d": [1, 2, np.nan, 4, np.nan]}, + index=list("ABCDE"), + ) + assert isinstance(res1, tm.SubclassedDataFrame) + tm.assert_frame_equal(res1, exp1) + assert isinstance(res2, tm.SubclassedDataFrame) + tm.assert_frame_equal(res2, exp2) + + res1, res2 = df1.a.align(df2.c) + assert isinstance(res1, tm.SubclassedSeries) + tm.assert_series_equal(res1, exp1.a) + assert isinstance(res2, tm.SubclassedSeries) + tm.assert_series_equal(res2, exp2.c) + + def test_subclass_align_combinations(self): + # GH 12983 + df = tm.SubclassedDataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")) + s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x") + + # frame + series + res1, res2 = df.align(s, axis=0) + exp1 = tm.SubclassedDataFrame( + {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, + index=list("ABCDE"), + ) + # name is lost when + exp2 = tm.SubclassedSeries( + [1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x" + ) + + assert isinstance(res1, tm.SubclassedDataFrame) + tm.assert_frame_equal(res1, exp1) + assert isinstance(res2, tm.SubclassedSeries) + tm.assert_series_equal(res2, exp2) + + # series + frame + res1, res2 = s.align(df) + assert isinstance(res1, tm.SubclassedSeries) + tm.assert_series_equal(res1, exp2) + assert isinstance(res2, tm.SubclassedDataFrame) + tm.assert_frame_equal(res2, exp1) + + def test_subclass_iterrows(self): + # GH 13977 + df = tm.SubclassedDataFrame({"a": [1]}) + for i, row in df.iterrows(): + assert isinstance(row, tm.SubclassedSeries) + tm.assert_series_equal(row, df.loc[i]) + + def test_subclass_stack(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["a", "b", "c"], + columns=["X", "Y", "Z"], + ) + + res = df.stack(future_stack=True) + exp = tm.SubclassedSeries( + [1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")] + ) + + tm.assert_series_equal(res, exp) + + def test_subclass_stack_multi(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [ + [10, 12], + [11, 13], + [20, 22], + [21, 23], + [30, 32], + [31, 33], + [40, 42], + [41, 43], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))), + names=["aaa", "ccc", "yyy"], + ), + columns=Index(["W", "X"], name="www"), + ) + + res = df.stack(future_stack=True) + tm.assert_frame_equal(res, exp) + + res = df.stack("yyy", future_stack=True) + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [ + [10, 11], + [12, 13], + [20, 21], + [22, 23], + [30, 31], + [32, 33], + [40, 41], + [42, 43], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))), + names=["aaa", "ccc", "www"], + ), + columns=Index(["y", "z"], name="yyy"), + ) + + res = df.stack("www", future_stack=True) + tm.assert_frame_equal(res, exp) + + def test_subclass_stack_multi_mixed(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [ + [10, 11, 12.0, 13.0], + [20, 21, 22.0, 23.0], + [30, 31, 32.0, 33.0], + [40, 41, 42.0, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [ + [10, 12.0], + [11, 13.0], + [20, 22.0], + [21, 23.0], + [30, 32.0], + [31, 33.0], + [40, 42.0], + [41, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))), + names=["aaa", "ccc", "yyy"], + ), + columns=Index(["W", "X"], name="www"), + ) + + res = df.stack(future_stack=True) + tm.assert_frame_equal(res, exp) + + res = df.stack("yyy", future_stack=True) + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [ + [10.0, 11.0], + [12.0, 13.0], + [20.0, 21.0], + [22.0, 23.0], + [30.0, 31.0], + [32.0, 33.0], + [40.0, 41.0], + [42.0, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))), + names=["aaa", "ccc", "www"], + ), + columns=Index(["y", "z"], name="yyy"), + ) + + res = df.stack("www", future_stack=True) + tm.assert_frame_equal(res, exp) + + def test_subclass_unstack(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["a", "b", "c"], + columns=["X", "Y", "Z"], + ) + + res = df.unstack() + exp = tm.SubclassedSeries( + [1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list("XXXYYYZZZ"), list("abcabcabc")] + ) + + tm.assert_series_equal(res, exp) + + def test_subclass_unstack_multi(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [[10, 20, 11, 21, 12, 22, 13, 23], [30, 40, 31, 41, 32, 42, 33, 43]], + index=Index(["A", "B"], name="aaa"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))), + names=["www", "yyy", "ccc"], + ), + ) + + res = df.unstack() + tm.assert_frame_equal(res, exp) + + res = df.unstack("ccc") + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [[10, 30, 11, 31, 12, 32, 13, 33], [20, 40, 21, 41, 22, 42, 23, 43]], + index=Index(["c", "d"], name="ccc"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))), + names=["www", "yyy", "aaa"], + ), + ) + + res = df.unstack("aaa") + tm.assert_frame_equal(res, exp) + + def test_subclass_unstack_multi_mixed(self): + # GH 15564 + df = tm.SubclassedDataFrame( + [ + [10, 11, 12.0, 13.0], + [20, 21, 22.0, 23.0], + [30, 31, 32.0, 33.0], + [40, 41, 42.0, 43.0], + ], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + + exp = tm.SubclassedDataFrame( + [ + [10, 20, 11, 21, 12.0, 22.0, 13.0, 23.0], + [30, 40, 31, 41, 32.0, 42.0, 33.0, 43.0], + ], + index=Index(["A", "B"], name="aaa"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("cdcdcdcd"))), + names=["www", "yyy", "ccc"], + ), + ) + + res = df.unstack() + tm.assert_frame_equal(res, exp) + + res = df.unstack("ccc") + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame( + [ + [10, 30, 11, 31, 12.0, 32.0, 13.0, 33.0], + [20, 40, 21, 41, 22.0, 42.0, 23.0, 43.0], + ], + index=Index(["c", "d"], name="ccc"), + columns=MultiIndex.from_tuples( + list(zip(list("WWWWXXXX"), list("yyzzyyzz"), list("ABABABAB"))), + names=["www", "yyy", "aaa"], + ), + ) + + res = df.unstack("aaa") + tm.assert_frame_equal(res, exp) + + def test_subclass_pivot(self): + # GH 15564 + df = tm.SubclassedDataFrame( + { + "index": ["A", "B", "C", "C", "B", "A"], + "columns": ["One", "One", "One", "Two", "Two", "Two"], + "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + } + ) + + pivoted = df.pivot(index="index", columns="columns", values="values") + + expected = tm.SubclassedDataFrame( + { + "One": {"A": 1.0, "B": 2.0, "C": 3.0}, + "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, + } + ) + + expected.index.name, expected.columns.name = "index", "columns" + + tm.assert_frame_equal(pivoted, expected) + + def test_subclassed_melt(self): + # GH 15564 + cheese = tm.SubclassedDataFrame( + { + "first": ["John", "Mary"], + "last": ["Doe", "Bo"], + "height": [5.5, 6.0], + "weight": [130, 150], + } + ) + + melted = pd.melt(cheese, id_vars=["first", "last"]) + + expected = tm.SubclassedDataFrame( + [ + ["John", "Doe", "height", 5.5], + ["Mary", "Bo", "height", 6.0], + ["John", "Doe", "weight", 130], + ["Mary", "Bo", "weight", 150], + ], + columns=["first", "last", "variable", "value"], + ) + + tm.assert_frame_equal(melted, expected) + + def test_subclassed_wide_to_long(self): + # GH 9762 + + x = np.random.default_rng(2).standard_normal(3) + df = tm.SubclassedDataFrame( + { + "A1970": {0: "a", 1: "b", 2: "c"}, + "A1980": {0: "d", 1: "e", 2: "f"}, + "B1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A": ["a", "b", "c", "d", "e", "f"], + "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = tm.SubclassedDataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year") + + tm.assert_frame_equal(long_frame, expected) + + def test_subclassed_apply(self): + # GH 19822 + + def check_row_subclass(row): + assert isinstance(row, tm.SubclassedSeries) + + def stretch(row): + if row["variable"] == "height": + row["value"] += 0.5 + return row + + df = tm.SubclassedDataFrame( + [ + ["John", "Doe", "height", 5.5], + ["Mary", "Bo", "height", 6.0], + ["John", "Doe", "weight", 130], + ["Mary", "Bo", "weight", 150], + ], + columns=["first", "last", "variable", "value"], + ) + + df.apply(lambda x: check_row_subclass(x)) + df.apply(lambda x: check_row_subclass(x), axis=1) + + expected = tm.SubclassedDataFrame( + [ + ["John", "Doe", "height", 6.0], + ["Mary", "Bo", "height", 6.5], + ["John", "Doe", "weight", 130], + ["Mary", "Bo", "weight", 150], + ], + columns=["first", "last", "variable", "value"], + ) + + result = df.apply(lambda x: stretch(x), axis=1) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + expected = tm.SubclassedDataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + + result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + expected = tm.SubclassedSeries([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + + result = df.apply(lambda x: [1, 2, 3], axis=1) + assert not isinstance(result, tm.SubclassedDataFrame) + tm.assert_series_equal(result, expected) + + def test_subclassed_reductions(self, all_reductions): + # GH 25596 + + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = getattr(df, all_reductions)() + assert isinstance(result, tm.SubclassedSeries) + + def test_subclassed_count(self): + df = tm.SubclassedDataFrame( + { + "Person": ["John", "Myla", "Lewis", "John", "Myla"], + "Age": [24.0, np.nan, 21.0, 33, 26], + "Single": [False, True, True, True, False], + } + ) + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame({"A": [1, 0, 3], "B": [0, 5, 6], "C": [7, 8, 0]}) + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame( + [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] + ), + columns=MultiIndex.from_tuples( + list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] + ), + ) + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame() + result = df.count() + assert isinstance(result, tm.SubclassedSeries) + + def test_isin(self): + df = tm.SubclassedDataFrame( + {"num_legs": [2, 4], "num_wings": [2, 0]}, index=["falcon", "dog"] + ) + result = df.isin([0, 2]) + assert isinstance(result, tm.SubclassedDataFrame) + + def test_duplicated(self): + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.duplicated() + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame() + result = df.duplicated() + assert isinstance(result, tm.SubclassedSeries) + + @pytest.mark.parametrize("idx_method", ["idxmax", "idxmin"]) + def test_idx(self, idx_method): + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = getattr(df, idx_method)() + assert isinstance(result, tm.SubclassedSeries) + + def test_dot(self): + df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) + s = tm.SubclassedSeries([1, 1, 2, 1]) + result = df.dot(s) + assert isinstance(result, tm.SubclassedSeries) + + df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) + s = tm.SubclassedDataFrame([1, 1, 2, 1]) + result = df.dot(s) + assert isinstance(result, tm.SubclassedDataFrame) + + def test_memory_usage(self): + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.memory_usage() + assert isinstance(result, tm.SubclassedSeries) + + result = df.memory_usage(index=False) + assert isinstance(result, tm.SubclassedSeries) + + def test_corrwith(self): + pytest.importorskip("scipy") + index = ["a", "b", "c", "d", "e"] + columns = ["one", "two", "three", "four"] + df1 = tm.SubclassedDataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + index=index, + columns=columns, + ) + df2 = tm.SubclassedDataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=index[:4], + columns=columns, + ) + correls = df1.corrwith(df2, axis=1, drop=True, method="kendall") + + assert isinstance(correls, (tm.SubclassedSeries)) + + def test_asof(self): + N = 3 + rng = pd.date_range("1/1/1990", periods=N, freq="53s") + df = tm.SubclassedDataFrame( + { + "A": [np.nan, np.nan, np.nan], + "B": [np.nan, np.nan, np.nan], + "C": [np.nan, np.nan, np.nan], + }, + index=rng, + ) + + result = df.asof(rng[-2:]) + assert isinstance(result, tm.SubclassedDataFrame) + + result = df.asof(rng[-2]) + assert isinstance(result, tm.SubclassedSeries) + + result = df.asof("1989-12-31") + assert isinstance(result, tm.SubclassedSeries) + + def test_idxmin_preserves_subclass(self): + # GH 28330 + + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.idxmin() + assert isinstance(result, tm.SubclassedSeries) + + def test_idxmax_preserves_subclass(self): + # GH 28330 + + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.idxmax() + assert isinstance(result, tm.SubclassedSeries) + + def test_convert_dtypes_preserves_subclass(self, gpd_style_subclass_df): + # GH 43668 + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.convert_dtypes() + assert isinstance(result, tm.SubclassedDataFrame) + + result = gpd_style_subclass_df.convert_dtypes() + assert isinstance(result, type(gpd_style_subclass_df)) + + def test_astype_preserves_subclass(self): + # GH#40810 + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + + result = df.astype({"A": np.int64, "B": np.int32, "C": np.float64}) + assert isinstance(result, tm.SubclassedDataFrame) + + def test_equals_subclass(self): + # https://github.com/pandas-dev/pandas/pull/34402 + # allow subclass in both directions + df1 = DataFrame({"a": [1, 2, 3]}) + df2 = tm.SubclassedDataFrame({"a": [1, 2, 3]}) + assert df1.equals(df2) + assert df2.equals(df1) + + def test_replace_list_method(self): + # https://github.com/pandas-dev/pandas/pull/46018 + df = tm.SubclassedDataFrame({"A": [0, 1, 2]}) + msg = "The 'method' keyword in SubclassedDataFrame.replace is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=msg, raise_on_extra_warnings=False + ): + result = df.replace([1, 2], method="ffill") + expected = tm.SubclassedDataFrame({"A": [0, 0, 0]}) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + +class MySubclassWithMetadata(DataFrame): + _metadata = ["my_metadata"] + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + my_metadata = kwargs.pop("my_metadata", None) + if args and isinstance(args[0], MySubclassWithMetadata): + my_metadata = args[0].my_metadata # type: ignore[has-type] + self.my_metadata = my_metadata + + @property + def _constructor(self): + return MySubclassWithMetadata + + +def test_constructor_with_metadata(): + # https://github.com/pandas-dev/pandas/pull/54922 + # https://github.com/pandas-dev/pandas/issues/55120 + df = MySubclassWithMetadata( + np.random.default_rng(2).random((5, 3)), columns=["A", "B", "C"] + ) + subset = df[["A", "B"]] + assert isinstance(subset, MySubclassWithMetadata) + + +class SimpleDataFrameSubClass(DataFrame): + """A subclass of DataFrame that does not define a constructor.""" + + +class SimpleSeriesSubClass(Series): + """A subclass of Series that does not define a constructor.""" + + +class TestSubclassWithoutConstructor: + def test_copy_df(self): + expected = DataFrame({"a": [1, 2, 3]}) + result = SimpleDataFrameSubClass(expected).copy() + + assert ( + type(result) is DataFrame + ) # assert_frame_equal only checks isinstance(lhs, type(rhs)) + tm.assert_frame_equal(result, expected) + + def test_copy_series(self): + expected = Series([1, 2, 3]) + result = SimpleSeriesSubClass(expected).copy() + + tm.assert_series_equal(result, expected) + + def test_series_to_frame(self): + orig = Series([1, 2, 3]) + expected = orig.to_frame() + result = SimpleSeriesSubClass(orig).to_frame() + + assert ( + type(result) is DataFrame + ) # assert_frame_equal only checks isinstance(lhs, type(rhs)) + tm.assert_frame_equal(result, expected) + + def test_groupby(self): + df = SimpleDataFrameSubClass(DataFrame({"a": [1, 2, 3]})) + + for _, v in df.groupby("a"): + assert type(v) is DataFrame diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_ufunc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_ufunc.py new file mode 100644 index 00000000..305c0f8b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_ufunc.py @@ -0,0 +1,311 @@ +from functools import partial +import re + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_extension_array_dtype + +dtypes = [ + "int64", + "Int64", + {"A": "int64", "B": "Int64"}, +] + + +@pytest.mark.parametrize("dtype", dtypes) +def test_unary_unary(dtype): + # unary input, unary output + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result = np.positive(df) + expected = pd.DataFrame( + np.positive(values), index=df.index, columns=df.columns + ).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_unary_binary(request, dtype): + # unary input, binary output + if is_extension_array_dtype(dtype) or isinstance(dtype, dict): + request.node.add_marker( + pytest.mark.xfail( + reason="Extension / mixed with multiple outputs not implemented." + ) + ) + + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result_pandas = np.modf(df) + assert isinstance(result_pandas, tuple) + assert len(result_pandas) == 2 + expected_numpy = np.modf(values) + + for result, b in zip(result_pandas, expected_numpy): + expected = pd.DataFrame(b, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_binary_input_dispatch_binop(dtype): + # binop ufuncs are dispatched to our dunder methods. + values = np.array([[-1, -1], [1, 1]], dtype="int64") + df = pd.DataFrame(values, columns=["A", "B"], index=["a", "b"]).astype(dtype=dtype) + result = np.add(df, df) + expected = pd.DataFrame( + np.add(values, values), index=df.index, columns=df.columns + ).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "func,arg,expected", + [ + (np.add, 1, [2, 3, 4, 5]), + ( + partial(np.add, where=[[False, True], [True, False]]), + np.array([[1, 1], [1, 1]]), + [0, 3, 4, 0], + ), + (np.power, np.array([[1, 1], [2, 2]]), [1, 2, 9, 16]), + (np.subtract, 2, [-1, 0, 1, 2]), + ( + partial(np.negative, where=np.array([[False, True], [True, False]])), + None, + [0, -2, -3, 0], + ), + ], +) +def test_ufunc_passes_args(func, arg, expected): + # GH#40662 + arr = np.array([[1, 2], [3, 4]]) + df = pd.DataFrame(arr) + result_inplace = np.zeros_like(arr) + # 1-argument ufunc + if arg is None: + result = func(df, out=result_inplace) + else: + result = func(df, arg, out=result_inplace) + + expected = np.array(expected).reshape(2, 2) + tm.assert_numpy_array_equal(result_inplace, expected) + + expected = pd.DataFrame(expected) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype_a", dtypes) +@pytest.mark.parametrize("dtype_b", dtypes) +def test_binary_input_aligns_columns(request, dtype_a, dtype_b): + if ( + is_extension_array_dtype(dtype_a) + or isinstance(dtype_a, dict) + or is_extension_array_dtype(dtype_b) + or isinstance(dtype_b, dict) + ): + request.node.add_marker( + pytest.mark.xfail( + reason="Extension / mixed with multiple inputs not implemented." + ) + ) + + df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}).astype(dtype_a) + + if isinstance(dtype_a, dict) and isinstance(dtype_b, dict): + dtype_b = dtype_b.copy() + dtype_b["C"] = dtype_b.pop("B") + df2 = pd.DataFrame({"A": [1, 2], "C": [3, 4]}).astype(dtype_b) + # As of 2.0, align first before applying the ufunc + result = np.heaviside(df1, df2) + expected = np.heaviside( + np.array([[1, 3, np.nan], [2, 4, np.nan]]), + np.array([[1, np.nan, 3], [2, np.nan, 4]]), + ) + expected = pd.DataFrame(expected, index=[0, 1], columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + result = np.heaviside(df1, df2.values) + expected = pd.DataFrame([[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", dtypes) +def test_binary_input_aligns_index(request, dtype): + if is_extension_array_dtype(dtype) or isinstance(dtype, dict): + request.node.add_marker( + pytest.mark.xfail( + reason="Extension / mixed with multiple inputs not implemented." + ) + ) + df1 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).astype(dtype) + df2 = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "c"]).astype(dtype) + result = np.heaviside(df1, df2) + expected = np.heaviside( + np.array([[1, 3], [3, 4], [np.nan, np.nan]]), + np.array([[1, 3], [np.nan, np.nan], [3, 4]]), + ) + # TODO(FloatArray): this will be Float64Dtype. + expected = pd.DataFrame(expected, index=["a", "b", "c"], columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + result = np.heaviside(df1, df2.values) + expected = pd.DataFrame( + [[1.0, 1.0], [1.0, 1.0]], columns=["A", "B"], index=["a", "b"] + ) + tm.assert_frame_equal(result, expected) + + +def test_binary_frame_series_raises(): + # We don't currently implement + df = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(NotImplementedError, match="logaddexp"): + np.logaddexp(df, df["A"]) + + with pytest.raises(NotImplementedError, match="logaddexp"): + np.logaddexp(df["A"], df) + + +def test_unary_accumulate_axis(): + # https://github.com/pandas-dev/pandas/issues/39259 + df = pd.DataFrame({"a": [1, 3, 2, 4]}) + result = np.maximum.accumulate(df) + expected = pd.DataFrame({"a": [1, 3, 3, 4]}) + tm.assert_frame_equal(result, expected) + + df = pd.DataFrame({"a": [1, 3, 2, 4], "b": [0.1, 4.0, 3.0, 2.0]}) + result = np.maximum.accumulate(df) + # in theory could preserve int dtype for default axis=0 + expected = pd.DataFrame({"a": [1.0, 3.0, 3.0, 4.0], "b": [0.1, 4.0, 4.0, 4.0]}) + tm.assert_frame_equal(result, expected) + + result = np.maximum.accumulate(df, axis=0) + tm.assert_frame_equal(result, expected) + + result = np.maximum.accumulate(df, axis=1) + expected = pd.DataFrame({"a": [1.0, 3.0, 2.0, 4.0], "b": [1.0, 4.0, 3.0, 4.0]}) + tm.assert_frame_equal(result, expected) + + +def test_frame_outer_disallowed(): + df = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(NotImplementedError, match=""): + # deprecation enforced in 2.0 + np.subtract.outer(df, df) + + +def test_alignment_deprecation_enforced(): + # Enforced in 2.0 + # https://github.com/pandas-dev/pandas/issues/39184 + df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) + s1 = pd.Series([1, 2], index=["a", "b"]) + s2 = pd.Series([1, 2], index=["b", "c"]) + + # binary dataframe / dataframe + expected = pd.DataFrame({"a": [2, 4, 6], "b": [8, 10, 12]}) + + with tm.assert_produces_warning(None): + # aligned -> no warning! + result = np.add(df1, df1) + tm.assert_frame_equal(result, expected) + + result = np.add(df1, df2.values) + tm.assert_frame_equal(result, expected) + + result = np.add(df1, df2) + expected = pd.DataFrame({"a": [np.nan] * 3, "b": [5, 7, 9], "c": [np.nan] * 3}) + tm.assert_frame_equal(result, expected) + + result = np.add(df1.values, df2) + expected = pd.DataFrame({"b": [2, 4, 6], "c": [8, 10, 12]}) + tm.assert_frame_equal(result, expected) + + # binary dataframe / series + expected = pd.DataFrame({"a": [2, 3, 4], "b": [6, 7, 8]}) + + with tm.assert_produces_warning(None): + # aligned -> no warning! + result = np.add(df1, s1) + tm.assert_frame_equal(result, expected) + + result = np.add(df1, s2.values) + tm.assert_frame_equal(result, expected) + + expected = pd.DataFrame( + {"a": [np.nan] * 3, "b": [5.0, 6.0, 7.0], "c": [np.nan] * 3} + ) + result = np.add(df1, s2) + tm.assert_frame_equal(result, expected) + + msg = "Cannot apply ufunc to mixed DataFrame and Series inputs." + with pytest.raises(NotImplementedError, match=msg): + np.add(s2, df1) + + +def test_alignment_deprecation_many_inputs_enforced(): + # Enforced in 2.0 + # https://github.com/pandas-dev/pandas/issues/39184 + # test that the deprecation also works with > 2 inputs -> using a numba + # written ufunc for this because numpy itself doesn't have such ufuncs + numba = pytest.importorskip("numba") + + @numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)]) + def my_ufunc(x, y, z): + return x + y + z + + df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = pd.DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) + df3 = pd.DataFrame({"a": [1, 2, 3], "c": [4, 5, 6]}) + + result = my_ufunc(df1, df2, df3) + expected = pd.DataFrame(np.full((3, 3), np.nan), columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + # all aligned -> no warning + with tm.assert_produces_warning(None): + result = my_ufunc(df1, df1, df1) + expected = pd.DataFrame([[3.0, 12.0], [6.0, 15.0], [9.0, 18.0]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + # mixed frame / arrays + msg = ( + r"operands could not be broadcast together with shapes \(3,3\) \(3,3\) \(3,2\)" + ) + with pytest.raises(ValueError, match=msg): + my_ufunc(df1, df2, df3.values) + + # single frame -> no warning + with tm.assert_produces_warning(None): + result = my_ufunc(df1, df2.values, df3.values) + tm.assert_frame_equal(result, expected) + + # takes indices of first frame + msg = ( + r"operands could not be broadcast together with shapes \(3,2\) \(3,3\) \(3,3\)" + ) + with pytest.raises(ValueError, match=msg): + my_ufunc(df1.values, df2, df3) + + +def test_array_ufuncs_for_many_arguments(): + # GH39853 + def add3(x, y, z): + return x + y + z + + ufunc = np.frompyfunc(add3, 3, 1) + df = pd.DataFrame([[1, 2], [3, 4]]) + + result = ufunc(df, df, 1) + expected = pd.DataFrame([[3, 5], [7, 9]], dtype=object) + tm.assert_frame_equal(result, expected) + + ser = pd.Series([1, 2]) + msg = ( + "Cannot apply ufunc " + "to mixed DataFrame and Series inputs." + ) + with pytest.raises(NotImplementedError, match=re.escape(msg)): + ufunc(df, df, ser) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_unary.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_unary.py new file mode 100644 index 00000000..5e29d3c8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_unary.py @@ -0,0 +1,194 @@ +from decimal import Decimal + +import numpy as np +import pytest + +from pandas.compat.numpy import np_version_gte1p25 + +import pandas as pd +import pandas._testing as tm + + +class TestDataFrameUnaryOperators: + # __pos__, __neg__, __invert__ + + @pytest.mark.parametrize( + "df,expected", + [ + (pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})), + (pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})), + ( + pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}), + pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}), + ), + ], + ) + def test_neg_numeric(self, df, expected): + tm.assert_frame_equal(-df, expected) + tm.assert_series_equal(-df["a"], expected["a"]) + + @pytest.mark.parametrize( + "df, expected", + [ + (np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)), + ([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]), + ], + ) + def test_neg_object(self, df, expected): + # GH#21380 + df = pd.DataFrame({"a": df}) + expected = pd.DataFrame({"a": expected}) + tm.assert_frame_equal(-df, expected) + tm.assert_series_equal(-df["a"], expected["a"]) + + @pytest.mark.parametrize( + "df", + [ + pd.DataFrame({"a": ["a", "b"]}), + pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])}), + ], + ) + def test_neg_raises(self, df): + msg = ( + "bad operand type for unary -: 'str'|" + r"bad operand type for unary -: 'DatetimeArray'" + ) + with pytest.raises(TypeError, match=msg): + (-df) + with pytest.raises(TypeError, match=msg): + (-df["a"]) + + def test_invert(self, float_frame): + df = float_frame + + tm.assert_frame_equal(-(df < 0), ~(df < 0)) + + def test_invert_mixed(self): + shape = (10, 5) + df = pd.concat( + [ + pd.DataFrame(np.zeros(shape, dtype="bool")), + pd.DataFrame(np.zeros(shape, dtype=int)), + ], + axis=1, + ignore_index=True, + ) + result = ~df + expected = pd.concat( + [ + pd.DataFrame(np.ones(shape, dtype="bool")), + pd.DataFrame(-np.ones(shape, dtype=int)), + ], + axis=1, + ignore_index=True, + ) + tm.assert_frame_equal(result, expected) + + def test_invert_empty_not_input(self): + # GH#51032 + df = pd.DataFrame() + result = ~df + tm.assert_frame_equal(df, result) + assert df is not result + + @pytest.mark.parametrize( + "df", + [ + pd.DataFrame({"a": [-1, 1]}), + pd.DataFrame({"a": [False, True]}), + pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}), + ], + ) + def test_pos_numeric(self, df): + # GH#16073 + tm.assert_frame_equal(+df, df) + tm.assert_series_equal(+df["a"], df["a"]) + + @pytest.mark.parametrize( + "df", + [ + pd.DataFrame({"a": np.array([-1, 2], dtype=object)}), + pd.DataFrame({"a": [Decimal("-1.0"), Decimal("2.0")]}), + ], + ) + def test_pos_object(self, df): + # GH#21380 + tm.assert_frame_equal(+df, df) + tm.assert_series_equal(+df["a"], df["a"]) + + @pytest.mark.parametrize( + "df", + [ + pytest.param( + pd.DataFrame({"a": ["a", "b"]}), + # filterwarnings removable once min numpy version is 1.25 + marks=[ + pytest.mark.filterwarnings("ignore:Applying:DeprecationWarning") + ], + ), + ], + ) + def test_pos_object_raises(self, df): + # GH#21380 + if np_version_gte1p25: + with pytest.raises( + TypeError, match=r"^bad operand type for unary \+: \'str\'$" + ): + tm.assert_frame_equal(+df, df) + else: + tm.assert_series_equal(+df["a"], df["a"]) + + @pytest.mark.parametrize( + "df", [pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])})] + ) + def test_pos_raises(self, df): + msg = r"bad operand type for unary \+: 'DatetimeArray'" + with pytest.raises(TypeError, match=msg): + (+df) + with pytest.raises(TypeError, match=msg): + (+df["a"]) + + def test_unary_nullable(self): + df = pd.DataFrame( + { + "a": pd.array([1, -2, 3, pd.NA], dtype="Int64"), + "b": pd.array([4.0, -5.0, 6.0, pd.NA], dtype="Float32"), + "c": pd.array([True, False, False, pd.NA], dtype="boolean"), + # include numpy bool to make sure bool-vs-boolean behavior + # is consistent in non-NA locations + "d": np.array([True, False, False, True]), + } + ) + + result = +df + res_ufunc = np.positive(df) + expected = df + # TODO: assert that we have copies? + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(res_ufunc, expected) + + result = -df + res_ufunc = np.negative(df) + expected = pd.DataFrame( + { + "a": pd.array([-1, 2, -3, pd.NA], dtype="Int64"), + "b": pd.array([-4.0, 5.0, -6.0, pd.NA], dtype="Float32"), + "c": pd.array([False, True, True, pd.NA], dtype="boolean"), + "d": np.array([False, True, True, False]), + } + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(res_ufunc, expected) + + result = abs(df) + res_ufunc = np.abs(df) + expected = pd.DataFrame( + { + "a": pd.array([1, 2, 3, pd.NA], dtype="Int64"), + "b": pd.array([4.0, 5.0, 6.0, pd.NA], dtype="Float32"), + "c": pd.array([True, False, False, pd.NA], dtype="boolean"), + "d": np.array([True, False, False, True]), + } + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(res_ufunc, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_validate.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_validate.py new file mode 100644 index 00000000..e99e0a68 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/frame/test_validate.py @@ -0,0 +1,41 @@ +import pytest + +from pandas.core.frame import DataFrame + + +@pytest.fixture +def dataframe(): + return DataFrame({"a": [1, 2], "b": [3, 4]}) + + +class TestDataFrameValidate: + """Tests for error handling related to data types of method arguments.""" + + @pytest.mark.parametrize( + "func", + [ + "query", + "eval", + "set_index", + "reset_index", + "dropna", + "drop_duplicates", + "sort_values", + ], + ) + @pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, dataframe, func, inplace): + msg = 'For argument "inplace" expected type bool' + kwargs = {"inplace": inplace} + + if func == "query": + kwargs["expr"] = "a > b" + elif func == "eval": + kwargs["expr"] = "a + b" + elif func == "set_index": + kwargs["keys"] = ["a"] + elif func == "sort_values": + kwargs["by"] = ["a"] + + with pytest.raises(ValueError, match=msg): + getattr(dataframe, func)(**kwargs) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_duplicate_labels.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_duplicate_labels.py new file mode 100644 index 00000000..a81e0132 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_duplicate_labels.py @@ -0,0 +1,411 @@ +"""Tests dealing with the NDFrame.allows_duplicates.""" +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +not_implemented = pytest.mark.xfail(reason="Not implemented.") + +# ---------------------------------------------------------------------------- +# Preservation + + +class TestPreserves: + @pytest.mark.parametrize( + "cls, data", + [ + (pd.Series, np.array([])), + (pd.Series, [1, 2]), + (pd.DataFrame, {}), + (pd.DataFrame, {"A": [1, 2]}), + ], + ) + def test_construction_ok(self, cls, data): + result = cls(data) + assert result.flags.allows_duplicate_labels is True + + result = cls(data).set_flags(allows_duplicate_labels=False) + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "func", + [ + operator.itemgetter(["a"]), + operator.methodcaller("add", 1), + operator.methodcaller("rename", str.upper), + operator.methodcaller("rename", "name"), + operator.methodcaller("abs"), + np.abs, + ], + ) + def test_preserved_series(self, func): + s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + assert func(s).flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "other", [pd.Series(0, index=["a", "b", "c"]), pd.Series(0, index=["a", "b"])] + ) + # TODO: frame + @not_implemented + def test_align(self, other): + s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + a, b = s.align(other) + assert a.flags.allows_duplicate_labels is False + assert b.flags.allows_duplicate_labels is False + + def test_preserved_frame(self): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + assert df.loc[["a"]].flags.allows_duplicate_labels is False + assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False + + def test_to_frame(self): + ser = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False) + assert ser.to_frame().flags.allows_duplicate_labels is False + + @pytest.mark.parametrize("func", ["add", "sub"]) + @pytest.mark.parametrize("frame", [False, True]) + @pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")]) + def test_binops(self, func, other, frame): + df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + if frame: + df = df.to_frame() + if isinstance(other, pd.Series) and frame: + other = other.to_frame() + func = operator.methodcaller(func, other) + assert df.flags.allows_duplicate_labels is False + assert func(df).flags.allows_duplicate_labels is False + + def test_preserve_getitem(self): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + assert df[["A"]].flags.allows_duplicate_labels is False + assert df["A"].flags.allows_duplicate_labels is False + assert df.loc[0].flags.allows_duplicate_labels is False + assert df.loc[[0]].flags.allows_duplicate_labels is False + assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False + + def test_ndframe_getitem_caching_issue(self, request, using_copy_on_write): + if not using_copy_on_write: + request.node.add_marker(pytest.mark.xfail(reason="Unclear behavior.")) + # NDFrame.__getitem__ will cache the first df['A']. May need to + # invalidate that cache? Update the cached entries? + df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False) + assert df["A"].flags.allows_duplicate_labels is False + df.flags.allows_duplicate_labels = True + assert df["A"].flags.allows_duplicate_labels is True + + @pytest.mark.parametrize( + "objs, kwargs", + [ + # Series + ( + [ + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["c", "d"]), + ], + {}, + ), + ( + [ + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["a", "b"]), + ], + {"ignore_index": True}, + ), + ( + [ + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["a", "b"]), + ], + {"axis": 1}, + ), + # Frame + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"A": [1, 2]}, index=["c", "d"]), + ], + {}, + ), + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + ], + {"ignore_index": True}, + ), + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"B": [1, 2]}, index=["a", "b"]), + ], + {"axis": 1}, + ), + # Series / Frame + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.Series([1, 2], index=["a", "b"], name="B"), + ], + {"axis": 1}, + ), + ], + ) + def test_concat(self, objs, kwargs): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] + result = pd.concat(objs, **kwargs) + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "left, right, expected", + [ + # false false false + pytest.param( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags( + allows_duplicate_labels=False + ), + False, + marks=not_implemented, + ), + # false true false + pytest.param( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + False, + marks=not_implemented, + ), + # true true true + ( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + True, + ), + ], + ) + def test_merge(self, left, right, expected): + result = pd.merge(left, right, left_index=True, right_index=True) + assert result.flags.allows_duplicate_labels is expected + + @not_implemented + def test_groupby(self): + # XXX: This is under tested + # TODO: + # - apply + # - transform + # - Should passing a grouper that disallows duplicates propagate? + df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False) + result = df.groupby([0, 0, 1]).agg("count") + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize("frame", [True, False]) + @not_implemented + def test_window(self, frame): + df = pd.Series( + 1, + index=pd.date_range("2000", periods=12), + name="A", + allows_duplicate_labels=False, + ) + if frame: + df = df.to_frame() + assert df.rolling(3).mean().flags.allows_duplicate_labels is False + assert df.ewm(3).mean().flags.allows_duplicate_labels is False + assert df.expanding(3).mean().flags.allows_duplicate_labels is False + + +# ---------------------------------------------------------------------------- +# Raises + + +class TestRaises: + @pytest.mark.parametrize( + "cls, axes", + [ + (pd.Series, {"index": ["a", "a"], "dtype": float}), + (pd.DataFrame, {"index": ["a", "a"]}), + (pd.DataFrame, {"index": ["a", "a"], "columns": ["b", "b"]}), + (pd.DataFrame, {"columns": ["b", "b"]}), + ], + ) + def test_set_flags_with_duplicates(self, cls, axes): + result = cls(**axes) + assert result.flags.allows_duplicate_labels is True + + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + cls(**axes).set_flags(allows_duplicate_labels=False) + + @pytest.mark.parametrize( + "data", + [ + pd.Series(index=[0, 0], dtype=float), + pd.DataFrame(index=[0, 0]), + pd.DataFrame(columns=[0, 0]), + ], + ) + def test_setting_allows_duplicate_labels_raises(self, data): + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + data.flags.allows_duplicate_labels = False + + assert data.flags.allows_duplicate_labels is True + + def test_series_raises(self): + a = pd.Series(0, index=["a", "b"]) + b = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.concat([a, b]) + + @pytest.mark.parametrize( + "getter, target", + [ + (operator.itemgetter(["A", "A"]), None), + # loc + (operator.itemgetter(["a", "a"]), "loc"), + pytest.param(operator.itemgetter(("a", ["A", "A"])), "loc"), + (operator.itemgetter((["a", "a"], "A")), "loc"), + # iloc + (operator.itemgetter([0, 0]), "iloc"), + pytest.param(operator.itemgetter((0, [0, 0])), "iloc"), + pytest.param(operator.itemgetter(([0, 0], 0)), "iloc"), + ], + ) + def test_getitem_raises(self, getter, target): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + if target: + # df, df.loc, or df.iloc + target = getattr(df, target) + else: + target = df + + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + getter(target) + + @pytest.mark.parametrize( + "objs, kwargs", + [ + ( + [ + pd.Series(1, index=[0, 1], name="a"), + pd.Series(2, index=[0, 1], name="a"), + ], + {"axis": 1}, + ) + ], + ) + def test_concat_raises(self, objs, kwargs): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.concat(objs, **kwargs) + + @not_implemented + def test_merge_raises(self): + a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags( + allows_duplicate_labels=False + ) + b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"]) + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.merge(a, b, left_index=True, right_index=True) + + +@pytest.mark.parametrize( + "idx", + [ + pd.Index([1, 1]), + pd.Index(["a", "a"]), + pd.Index([1.1, 1.1]), + pd.PeriodIndex([pd.Period("2000", "D")] * 2), + pd.DatetimeIndex([pd.Timestamp("2000")] * 2), + pd.TimedeltaIndex([pd.Timedelta("1D")] * 2), + pd.CategoricalIndex(["a", "a"]), + pd.IntervalIndex([pd.Interval(0, 1)] * 2), + pd.MultiIndex.from_tuples([("a", 1), ("a", 1)]), + ], + ids=lambda x: type(x).__name__, +) +def test_raises_basic(idx): + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False) + + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False) + + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False) + + +def test_format_duplicate_labels_message(): + idx = pd.Index(["a", "b", "a", "b", "c"]) + result = idx._format_duplicate_message() + expected = pd.DataFrame( + {"positions": [[0, 2], [1, 3]]}, index=pd.Index(["a", "b"], name="label") + ) + tm.assert_frame_equal(result, expected) + + +def test_format_duplicate_labels_message_multi(): + idx = pd.MultiIndex.from_product([["A"], ["a", "b", "a", "b", "c"]]) + result = idx._format_duplicate_message() + expected = pd.DataFrame( + {"positions": [[0, 2], [1, 3]]}, + index=pd.MultiIndex.from_product([["A"], ["a", "b"]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_insert_raises(): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + msg = "Cannot specify" + with pytest.raises(ValueError, match=msg): + df.insert(0, "A", [3, 4], allow_duplicates=True) + + +@pytest.mark.parametrize( + "method, frame_only", + [ + (operator.methodcaller("set_index", "A", inplace=True), True), + (operator.methodcaller("reset_index", inplace=True), True), + (operator.methodcaller("rename", lambda x: x, inplace=True), False), + ], +) +def test_inplace_raises(method, frame_only): + df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags( + allows_duplicate_labels=False + ) + s = df["A"] + s.flags.allows_duplicate_labels = False + msg = "Cannot specify" + + with pytest.raises(ValueError, match=msg): + method(df) + if not frame_only: + with pytest.raises(ValueError, match=msg): + method(s) + + +def test_pickle(): + a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False) + b = tm.round_trip_pickle(a) + tm.assert_series_equal(a, b) + + a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False) + b = tm.round_trip_pickle(a) + tm.assert_frame_equal(a, b) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_finalize.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_finalize.py new file mode 100644 index 00000000..1522b83a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_finalize.py @@ -0,0 +1,772 @@ +""" +An exhaustive list of pandas methods exercising NDFrame.__finalize__. +""" +import operator +import re + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +# TODO: +# * Binary methods (mul, div, etc.) +# * Binary outputs (align, etc.) +# * top-level methods (concat, merge, get_dummies, etc.) +# * window +# * cumulative reductions + +not_implemented_mark = pytest.mark.xfail(reason="not implemented") + +mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"]) + +frame_data = ({"A": [1]},) +frame_mi_data = ({"A": [1, 2, 3, 4]}, mi) + + +# Tuple of +# - Callable: Constructor (Series, DataFrame) +# - Tuple: Constructor args +# - Callable: pass the constructed value with attrs set to this. + +_all_methods = [ + ( + pd.Series, + (np.array([0], dtype="float64")), + operator.methodcaller("view", "int64"), + ), + (pd.Series, ([0],), operator.methodcaller("take", [])), + (pd.Series, ([0],), operator.methodcaller("__getitem__", [True])), + (pd.Series, ([0],), operator.methodcaller("repeat", 2)), + (pd.Series, ([0],), operator.methodcaller("reset_index")), + (pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)), + (pd.Series, ([0],), operator.methodcaller("to_frame")), + (pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")), + (pd.Series, ([0, 0],), operator.methodcaller("duplicated")), + (pd.Series, ([0, 0],), operator.methodcaller("round")), + (pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)), + (pd.Series, ([0, 0],), operator.methodcaller("rename", "name")), + (pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])), + (pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])), + (pd.Series, ([0, 0],), operator.methodcaller("drop", [0])), + (pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)), + (pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})), + (pd.Series, ([0, 0],), operator.methodcaller("shift")), + (pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])), + (pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)), + (pd.Series, ([0, 0],), operator.methodcaller("isna")), + (pd.Series, ([0, 0],), operator.methodcaller("isnull")), + (pd.Series, ([0, 0],), operator.methodcaller("notna")), + (pd.Series, ([0, 0],), operator.methodcaller("notnull")), + (pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))), + # TODO: mul, div, etc. + ( + pd.Series, + ([0], pd.period_range("2000", periods=1)), + operator.methodcaller("to_timestamp"), + ), + ( + pd.Series, + ([0], pd.date_range("2000", periods=1)), + operator.methodcaller("to_period"), + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("dot", pd.DataFrame(index=["A"])), + ), + marks=pytest.mark.xfail(reason="Implement binary finalize"), + ), + (pd.DataFrame, frame_data, operator.methodcaller("transpose")), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))), + (pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")), + (pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1", engine="python")), + (pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")), + (pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)), + (pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])), + (pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])), + (pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})), + (pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)), + (pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")), + (pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("reset_index")), + (pd.DataFrame, frame_data, operator.methodcaller("isna")), + (pd.DataFrame, frame_data, operator.methodcaller("isnull")), + (pd.DataFrame, frame_data, operator.methodcaller("notna")), + (pd.DataFrame, frame_data, operator.methodcaller("notnull")), + (pd.DataFrame, frame_data, operator.methodcaller("dropna")), + (pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")), + (pd.DataFrame, frame_data, operator.methodcaller("duplicated")), + (pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")), + (pd.DataFrame, frame_data, operator.methodcaller("sort_index")), + (pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")), + (pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")), + (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("add", pd.DataFrame(*frame_data)), + ), + # TODO: div, mul, etc. + ( + pd.DataFrame, + frame_data, + operator.methodcaller("combine", pd.DataFrame(*frame_data), operator.add), + ), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("combine_first", pd.DataFrame(*frame_data)), + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("update", pd.DataFrame(*frame_data)), + ), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("pivot", columns="A")), + ( + pd.DataFrame, + ({"A": [1], "B": [1]},), + operator.methodcaller("pivot_table", columns="A"), + ), + ( + pd.DataFrame, + ({"A": [1], "B": [1]},), + operator.methodcaller("pivot_table", columns="A", aggfunc=["mean", "sum"]), + ), + (pd.DataFrame, frame_data, operator.methodcaller("stack")), + (pd.DataFrame, frame_data, operator.methodcaller("explode", "A")), + (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")), + ( + pd.DataFrame, + ({"A": ["a", "b", "c"], "B": [1, 3, 5], "C": [2, 4, 6]},), + operator.methodcaller("melt", id_vars=["A"], value_vars=["B"]), + ), + (pd.DataFrame, frame_data, operator.methodcaller("map", lambda x: x)), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("merge", pd.DataFrame({"A": [1]})), + ), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("round", 2)), + (pd.DataFrame, frame_data, operator.methodcaller("corr")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("cov")), + marks=[ + pytest.mark.filterwarnings("ignore::RuntimeWarning"), + ], + ), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("corrwith", pd.DataFrame(*frame_data)), + ), + (pd.DataFrame, frame_data, operator.methodcaller("count")), + (pd.DataFrame, frame_data, operator.methodcaller("nunique")), + (pd.DataFrame, frame_data, operator.methodcaller("idxmin")), + (pd.DataFrame, frame_data, operator.methodcaller("idxmax")), + (pd.DataFrame, frame_data, operator.methodcaller("mode")), + (pd.Series, [0], operator.methodcaller("mode")), + (pd.DataFrame, frame_data, operator.methodcaller("median")), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("quantile", numeric_only=True), + ), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("quantile", q=[0.25, 0.75], numeric_only=True), + ), + ( + pd.DataFrame, + ({"A": [pd.Timedelta(days=1), pd.Timedelta(days=2)]},), + operator.methodcaller("quantile", numeric_only=False), + ), + ( + pd.DataFrame, + ({"A": [np.datetime64("2022-01-01"), np.datetime64("2022-01-02")]},), + operator.methodcaller("quantile", numeric_only=True), + ), + ( + pd.DataFrame, + ({"A": [1]}, [pd.Period("2000", "D")]), + operator.methodcaller("to_timestamp"), + ), + ( + pd.DataFrame, + ({"A": [1]}, [pd.Timestamp("2000")]), + operator.methodcaller("to_period", freq="D"), + ), + (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", [1])), + (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", pd.Series([1]))), + ( + pd.DataFrame, + frame_mi_data, + operator.methodcaller("isin", pd.DataFrame({"A": [1]})), + ), + (pd.DataFrame, frame_mi_data, operator.methodcaller("droplevel", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("pop", "A")), + # Squeeze on columns, otherwise we'll end up with a scalar + (pd.DataFrame, frame_data, operator.methodcaller("squeeze", axis="columns")), + (pd.Series, ([1, 2],), operator.methodcaller("squeeze")), + (pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")), + (pd.DataFrame, frame_data, operator.methodcaller("rename_axis", columns="a")), + # Unary ops + (pd.DataFrame, frame_data, operator.neg), + (pd.Series, [1], operator.neg), + (pd.DataFrame, frame_data, operator.pos), + (pd.Series, [1], operator.pos), + (pd.DataFrame, frame_data, operator.inv), + (pd.Series, [1], operator.inv), + (pd.DataFrame, frame_data, abs), + (pd.Series, [1], abs), + (pd.DataFrame, frame_data, round), + (pd.Series, [1], round), + (pd.DataFrame, frame_data, operator.methodcaller("take", [0, 0])), + (pd.DataFrame, frame_mi_data, operator.methodcaller("xs", "a")), + (pd.Series, (1, mi), operator.methodcaller("xs", "a")), + (pd.DataFrame, frame_data, operator.methodcaller("get", "A")), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("reindex_like", pd.DataFrame({"A": [1, 2, 3]})), + ), + ( + pd.Series, + frame_data, + operator.methodcaller("reindex_like", pd.Series([0, 1, 2])), + ), + (pd.DataFrame, frame_data, operator.methodcaller("add_prefix", "_")), + (pd.DataFrame, frame_data, operator.methodcaller("add_suffix", "_")), + (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_prefix", "_")), + (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_suffix", "_")), + (pd.Series, ([3, 2],), operator.methodcaller("sort_values")), + (pd.Series, ([1] * 10,), operator.methodcaller("head")), + (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("head")), + (pd.Series, ([1] * 10,), operator.methodcaller("tail")), + (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("tail")), + (pd.Series, ([1, 2],), operator.methodcaller("sample", n=2, replace=True)), + (pd.DataFrame, (frame_data,), operator.methodcaller("sample", n=2, replace=True)), + (pd.Series, ([1, 2],), operator.methodcaller("astype", float)), + (pd.DataFrame, frame_data, operator.methodcaller("astype", float)), + (pd.Series, ([1, 2],), operator.methodcaller("copy")), + (pd.DataFrame, frame_data, operator.methodcaller("copy")), + (pd.Series, ([1, 2], None, object), operator.methodcaller("infer_objects")), + ( + pd.DataFrame, + ({"A": np.array([1, 2], dtype=object)},), + operator.methodcaller("infer_objects"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")), + (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")), + (pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")), + (pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")), + (pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)), + (pd.DataFrame, frame_data, operator.methodcaller("clip", lower=1)), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("asfreq", "H"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("asfreq", "H"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("at_time", "12:00"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("at_time", "12:00"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("between_time", "12:00", "13:00"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("between_time", "12:00", "13:00"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("last", "3D"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("last", "3D"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("rank")), + (pd.DataFrame, frame_data, operator.methodcaller("rank")), + (pd.Series, ([1, 2],), operator.methodcaller("where", np.array([True, False]))), + (pd.DataFrame, frame_data, operator.methodcaller("where", np.array([[True]]))), + (pd.Series, ([1, 2],), operator.methodcaller("mask", np.array([True, False]))), + (pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))), + (pd.Series, ([1, 2],), operator.methodcaller("truncate", before=0)), + (pd.DataFrame, frame_data, operator.methodcaller("truncate", before=0)), + ( + pd.Series, + (1, pd.date_range("2000", periods=4, tz="UTC")), + operator.methodcaller("tz_convert", "CET"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4, tz="UTC")), + operator.methodcaller("tz_convert", "CET"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("tz_localize", "CET"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("tz_localize", "CET"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("describe")), + (pd.DataFrame, frame_data, operator.methodcaller("describe")), + (pd.Series, ([1, 2],), operator.methodcaller("pct_change")), + (pd.DataFrame, frame_data, operator.methodcaller("pct_change")), + (pd.Series, ([1],), operator.methodcaller("transform", lambda x: x - x.min())), + ( + pd.DataFrame, + frame_mi_data, + operator.methodcaller("transform", lambda x: x - x.min()), + ), + (pd.Series, ([1],), operator.methodcaller("apply", lambda x: x)), + (pd.DataFrame, frame_mi_data, operator.methodcaller("apply", lambda x: x)), + # Cumulative reductions + (pd.Series, ([1],), operator.methodcaller("cumsum")), + (pd.DataFrame, frame_data, operator.methodcaller("cumsum")), + (pd.Series, ([1],), operator.methodcaller("cummin")), + (pd.DataFrame, frame_data, operator.methodcaller("cummin")), + (pd.Series, ([1],), operator.methodcaller("cummax")), + (pd.DataFrame, frame_data, operator.methodcaller("cummax")), + (pd.Series, ([1],), operator.methodcaller("cumprod")), + (pd.DataFrame, frame_data, operator.methodcaller("cumprod")), + # Reductions + (pd.DataFrame, frame_data, operator.methodcaller("any")), + (pd.DataFrame, frame_data, operator.methodcaller("all")), + (pd.DataFrame, frame_data, operator.methodcaller("min")), + (pd.DataFrame, frame_data, operator.methodcaller("max")), + (pd.DataFrame, frame_data, operator.methodcaller("sum")), + (pd.DataFrame, frame_data, operator.methodcaller("std")), + (pd.DataFrame, frame_data, operator.methodcaller("mean")), + (pd.DataFrame, frame_data, operator.methodcaller("prod")), + (pd.DataFrame, frame_data, operator.methodcaller("sem")), + (pd.DataFrame, frame_data, operator.methodcaller("skew")), + (pd.DataFrame, frame_data, operator.methodcaller("kurt")), +] + + +def idfn(x): + xpr = re.compile(r"'(.*)?'") + m = xpr.search(str(x)) + if m: + return m.group(1) + else: + return str(x) + + +@pytest.fixture(params=_all_methods, ids=lambda x: idfn(x[-1])) +def ndframe_method(request): + """ + An NDFrame method returning an NDFrame. + """ + return request.param + + +@pytest.mark.filterwarnings( + "ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning", + "ignore:last is deprecated:FutureWarning", +) +def test_finalize_called(ndframe_method): + cls, init_args, method = ndframe_method + ndframe = cls(*init_args) + + ndframe.attrs = {"a": 1} + result = method(ndframe) + + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "data", + [ + pd.Series(1, pd.date_range("2000", periods=4)), + pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + ], +) +def test_finalize_first(data): + deprecated_msg = "first is deprecated" + + data.attrs = {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = data.first("3D") + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "data", + [ + pd.Series(1, pd.date_range("2000", periods=4)), + pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + ], +) +def test_finalize_last(data): + # GH 53710 + deprecated_msg = "last is deprecated" + + data.attrs = {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = data.last("3D") + assert result.attrs == {"a": 1} + + +@not_implemented_mark +def test_finalize_called_eval_numexpr(): + pytest.importorskip("numexpr") + df = pd.DataFrame({"A": [1, 2]}) + df.attrs["A"] = 1 + result = df.eval("A + 1", engine="numexpr") + assert result.attrs == {"A": 1} + + +# ---------------------------------------------------------------------------- +# Binary operations + + +@pytest.mark.parametrize("annotate", ["left", "right", "both"]) +@pytest.mark.parametrize( + "args", + [ + (1, pd.Series([1])), + (1, pd.DataFrame({"A": [1]})), + (pd.Series([1]), 1), + (pd.DataFrame({"A": [1]}), 1), + (pd.Series([1]), pd.Series([1])), + (pd.DataFrame({"A": [1]}), pd.DataFrame({"A": [1]})), + (pd.Series([1]), pd.DataFrame({"A": [1]})), + (pd.DataFrame({"A": [1]}), pd.Series([1])), + ], + ids=lambda x: f"({type(x[0]).__name__},{type(x[1]).__name__})", +) +def test_binops(request, args, annotate, all_binary_operators): + # This generates 624 tests... Is that needed? + left, right = args + if isinstance(left, (pd.DataFrame, pd.Series)): + left.attrs = {} + if isinstance(right, (pd.DataFrame, pd.Series)): + right.attrs = {} + + if annotate == "left" and isinstance(left, int): + pytest.skip("left is an int and doesn't support .attrs") + if annotate == "right" and isinstance(right, int): + pytest.skip("right is an int and doesn't support .attrs") + + if not (isinstance(left, int) or isinstance(right, int)) and annotate != "both": + if not all_binary_operators.__name__.startswith("r"): + if annotate == "right" and isinstance(left, type(right)): + request.node.add_marker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when right has " + f"attrs and both are {type(left)}" + ) + ) + if not isinstance(left, type(right)): + if annotate == "left" and isinstance(left, pd.Series): + request.node.add_marker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + elif annotate == "right" and isinstance(right, pd.Series): + request.node.add_marker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + else: + if annotate == "left" and isinstance(left, type(right)): + request.node.add_marker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when left has " + f"attrs and both are {type(left)}" + ) + ) + if not isinstance(left, type(right)): + if annotate == "right" and isinstance(right, pd.Series): + request.node.add_marker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + elif annotate == "left" and isinstance(left, pd.Series): + request.node.add_marker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + if annotate in {"left", "both"} and not isinstance(left, int): + left.attrs = {"a": 1} + if annotate in {"right", "both"} and not isinstance(right, int): + right.attrs = {"a": 1} + + is_cmp = all_binary_operators in [ + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.lt, + operator.le, + ] + if is_cmp and isinstance(left, pd.DataFrame) and isinstance(right, pd.Series): + # in 2.0 silent alignment on comparisons was removed xref GH#28759 + left, right = left.align(right, axis=1, copy=False) + elif is_cmp and isinstance(left, pd.Series) and isinstance(right, pd.DataFrame): + right, left = right.align(left, axis=1, copy=False) + + result = all_binary_operators(left, right) + assert result.attrs == {"a": 1} + + +# ---------------------------------------------------------------------------- +# Accessors + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("capitalize"), + operator.methodcaller("casefold"), + operator.methodcaller("cat", ["a"]), + operator.methodcaller("contains", "a"), + operator.methodcaller("count", "a"), + operator.methodcaller("encode", "utf-8"), + operator.methodcaller("endswith", "a"), + operator.methodcaller("extract", r"(\w)(\d)"), + operator.methodcaller("extract", r"(\w)(\d)", expand=False), + operator.methodcaller("find", "a"), + operator.methodcaller("findall", "a"), + operator.methodcaller("get", 0), + operator.methodcaller("index", "a"), + operator.methodcaller("len"), + operator.methodcaller("ljust", 4), + operator.methodcaller("lower"), + operator.methodcaller("lstrip"), + operator.methodcaller("match", r"\w"), + operator.methodcaller("normalize", "NFC"), + operator.methodcaller("pad", 4), + operator.methodcaller("partition", "a"), + operator.methodcaller("repeat", 2), + operator.methodcaller("replace", "a", "b"), + operator.methodcaller("rfind", "a"), + operator.methodcaller("rindex", "a"), + operator.methodcaller("rjust", 4), + operator.methodcaller("rpartition", "a"), + operator.methodcaller("rstrip"), + operator.methodcaller("slice", 4), + operator.methodcaller("slice_replace", 1, repl="a"), + operator.methodcaller("startswith", "a"), + operator.methodcaller("strip"), + operator.methodcaller("swapcase"), + operator.methodcaller("translate", {"a": "b"}), + operator.methodcaller("upper"), + operator.methodcaller("wrap", 4), + operator.methodcaller("zfill", 4), + operator.methodcaller("isalnum"), + operator.methodcaller("isalpha"), + operator.methodcaller("isdigit"), + operator.methodcaller("isspace"), + operator.methodcaller("islower"), + operator.methodcaller("isupper"), + operator.methodcaller("istitle"), + operator.methodcaller("isnumeric"), + operator.methodcaller("isdecimal"), + operator.methodcaller("get_dummies"), + ], + ids=idfn, +) +def test_string_method(method): + s = pd.Series(["a1"]) + s.attrs = {"a": 1} + result = method(s.str) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("to_period"), + operator.methodcaller("tz_localize", "CET"), + operator.methodcaller("normalize"), + operator.methodcaller("strftime", "%Y"), + operator.methodcaller("round", "H"), + operator.methodcaller("floor", "H"), + operator.methodcaller("ceil", "H"), + operator.methodcaller("month_name"), + operator.methodcaller("day_name"), + ], + ids=idfn, +) +def test_datetime_method(method): + s = pd.Series(pd.date_range("2000", periods=4)) + s.attrs = {"a": 1} + result = method(s.dt) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "attr", + [ + "date", + "time", + "timetz", + "year", + "month", + "day", + "hour", + "minute", + "second", + "microsecond", + "nanosecond", + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "is_month_start", + "is_month_end", + "is_quarter_start", + "is_quarter_end", + "is_year_start", + "is_year_end", + "is_leap_year", + "daysinmonth", + "days_in_month", + ], +) +def test_datetime_property(attr): + s = pd.Series(pd.date_range("2000", periods=4)) + s.attrs = {"a": 1} + result = getattr(s.dt, attr) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "attr", ["days", "seconds", "microseconds", "nanoseconds", "components"] +) +def test_timedelta_property(attr): + s = pd.Series(pd.timedelta_range("2000", periods=4)) + s.attrs = {"a": 1} + result = getattr(s.dt, attr) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")]) +def test_timedelta_methods(method): + s = pd.Series(pd.timedelta_range("2000", periods=4)) + s.attrs = {"a": 1} + result = method(s.dt) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("add_categories", ["c"]), + operator.methodcaller("as_ordered"), + operator.methodcaller("as_unordered"), + lambda x: getattr(x, "codes"), + operator.methodcaller("remove_categories", "a"), + operator.methodcaller("remove_unused_categories"), + operator.methodcaller("rename_categories", {"a": "A", "b": "B"}), + operator.methodcaller("reorder_categories", ["b", "a"]), + operator.methodcaller("set_categories", ["A", "B"]), + ], +) +@not_implemented_mark +def test_categorical_accessor(method): + s = pd.Series(["a", "b"], dtype="category") + s.attrs = {"a": 1} + result = method(s.cat) + assert result.attrs == {"a": 1} + + +# ---------------------------------------------------------------------------- +# Groupby + + +@pytest.mark.parametrize( + "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})] +) +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("sum"), + lambda x: x.apply(lambda y: y), + lambda x: x.agg("sum"), + lambda x: x.agg("mean"), + lambda x: x.agg("median"), + ], +) +def test_groupby_finalize(obj, method): + obj.attrs = {"a": 1} + result = method(obj.groupby([0, 0], group_keys=False)) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})] +) +@pytest.mark.parametrize( + "method", + [ + lambda x: x.agg(["sum", "count"]), + lambda x: x.agg("std"), + lambda x: x.agg("var"), + lambda x: x.agg("sem"), + lambda x: x.agg("size"), + lambda x: x.agg("ohlc"), + ], +) +@not_implemented_mark +def test_groupby_finalize_not_implemented(obj, method): + obj.attrs = {"a": 1} + result = method(obj.groupby([0, 0])) + assert result.attrs == {"a": 1} + + +def test_finalize_frame_series_name(): + # https://github.com/pandas-dev/pandas/pull/37186/files#r506978889 + # ensure we don't copy the column `name` to the Series. + df = pd.DataFrame({"name": [1, 2]}) + result = pd.Series([1, 2]).__finalize__(df) + assert result.name is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_frame.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_frame.py new file mode 100644 index 00000000..620d5055 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_frame.py @@ -0,0 +1,209 @@ +from copy import deepcopy +from operator import methodcaller + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrame: + @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) + def test_set_axis_name(self, func): + df = DataFrame([[1, 2], [3, 4]]) + + result = methodcaller(func, "foo")(df) + assert df.index.name is None + assert result.index.name == "foo" + + result = methodcaller(func, "cols", axis=1)(df) + assert df.columns.name is None + assert result.columns.name == "cols" + + @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) + def test_set_axis_name_mi(self, func): + df = DataFrame( + np.empty((3, 3)), + index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]), + columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]), + ) + + level_names = ["L1", "L2"] + + result = methodcaller(func, level_names)(df) + assert result.index.names == level_names + assert result.columns.names == [None, None] + + result = methodcaller(func, level_names, axis=1)(df) + assert result.columns.names == ["L1", "L2"] + assert result.index.names == [None, None] + + def test_nonzero_single_element(self): + # allow single item via bool method + msg_warn = ( + "DataFrame.bool is now deprecated and will be removed " + "in future version of pandas" + ) + df = DataFrame([[True]]) + df1 = DataFrame([[False]]) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert df.bool() + + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert not df1.bool() + + df = DataFrame([[False, False]]) + msg_err = "The truth value of a DataFrame is ambiguous" + with pytest.raises(ValueError, match=msg_err): + bool(df) + + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err): + df.bool() + + def test_metadata_propagation_indiv_groupby(self): + # groupby + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + result = df.groupby("A").sum() + tm.assert_metadata_equivalent(df, result) + + def test_metadata_propagation_indiv_resample(self): + # resample + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 2)), + index=date_range("20130101", periods=1000, freq="s"), + ) + result = df.resample("1T") + tm.assert_metadata_equivalent(df, result) + + def test_metadata_propagation_indiv(self, monkeypatch): + # merging with override + # GH 6923 + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == "merge": + left, right = other.left, other.right + value = getattr(left, name, "") + "|" + getattr(right, name, "") + object.__setattr__(self, name, value) + elif method == "concat": + value = "+".join( + [getattr(o, name) for o in other.objs if getattr(o, name, None)] + ) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, "")) + + return self + + with monkeypatch.context() as m: + m.setattr(DataFrame, "_metadata", ["filename"]) + m.setattr(DataFrame, "__finalize__", finalize) + + df1 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"] + ) + df2 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"] + ) + DataFrame._metadata = ["filename"] + df1.filename = "fname1.csv" + df2.filename = "fname2.csv" + + result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner") + assert result.filename == "fname1.csv|fname2.csv" + + # concat + # GH#6927 + df1 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab") + ) + df1.filename = "foo" + + result = pd.concat([df1, df1]) + assert result.filename == "foo+foo" + + def test_set_attribute(self): + # Test for consistent setattr behavior when an attribute and a column + # have the same name (Issue #8994) + df = DataFrame({"x": [1, 2, 3]}) + + df.y = 2 + df["y"] = [2, 4, 6] + df.y = 5 + + assert df.y == 5 + tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y")) + + def test_deepcopy_empty(self): + # This test covers empty frame copying with non-empty column sets + # as reported in issue GH15370 + empty_frame = DataFrame(data=[], index=[], columns=["A"]) + empty_frame_copy = deepcopy(empty_frame) + + tm.assert_frame_equal(empty_frame_copy, empty_frame) + + +# formerly in Generic but only test DataFrame +class TestDataFrame2: + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + msg = 'For argument "inplace" expected type bool, received type' + with pytest.raises(ValueError, match=msg): + df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().drop("a", axis=1, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().fillna(value=0, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().replace(to_replace=1, value=7, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().interpolate(inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy()._where(cond=df.a > 2, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().mask(cond=df.a > 2, inplace=value) + + def test_unexpected_keyword(self): + # GH8597 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"] + ) + ca = pd.Categorical([0, 0, 2, 2, 3, np.nan]) + ts = df["joe"].copy() + ts[2] = np.nan + + msg = "unexpected keyword" + with pytest.raises(TypeError, match=msg): + df.drop("joe", axis=1, in_place=True) + + with pytest.raises(TypeError, match=msg): + df.reindex([1, 0], inplace=True) + + with pytest.raises(TypeError, match=msg): + ca.fillna(0, inplace=True) + + with pytest.raises(TypeError, match=msg): + ts.fillna(0, in_place=True) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_generic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_generic.py new file mode 100644 index 00000000..87beab04 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_generic.py @@ -0,0 +1,462 @@ +from copy import ( + copy, + deepcopy, +) + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_scalar + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + +# ---------------------------------------------------------------------- +# Generic types test cases + + +def construct(box, shape, value=None, dtype=None, **kwargs): + """ + construct an object for the given shape + if value is specified use that if its a scalar + if value is an array, repeat it as needed + """ + if isinstance(shape, int): + shape = tuple([shape] * box._AXIS_LEN) + if value is not None: + if is_scalar(value): + if value == "empty": + arr = None + dtype = np.float64 + + # remove the info axis + kwargs.pop(box._info_axis_name, None) + else: + arr = np.empty(shape, dtype=dtype) + arr.fill(value) + else: + fshape = np.prod(shape) + arr = value.ravel() + new_shape = fshape / arr.shape[0] + if fshape % arr.shape[0] != 0: + raise Exception("invalid value passed in construct") + + arr = np.repeat(arr, new_shape).reshape(shape) + else: + arr = np.random.default_rng(2).standard_normal(shape) + return box(arr, dtype=dtype, **kwargs) + + +class TestGeneric: + @pytest.mark.parametrize( + "func", + [ + str.lower, + {x: x.lower() for x in list("ABCD")}, + Series({x: x.lower() for x in list("ABCD")}), + ], + ) + def test_rename(self, frame_or_series, func): + # single axis + idx = list("ABCD") + + for axis in frame_or_series._AXIS_ORDERS: + kwargs = {axis: idx} + obj = construct(frame_or_series, 4, **kwargs) + + # rename a single axis + result = obj.rename(**{axis: func}) + expected = obj.copy() + setattr(expected, axis, list("abcd")) + tm.assert_equal(result, expected) + + def test_get_numeric_data(self, frame_or_series): + n = 4 + kwargs = { + frame_or_series._get_axis_name(i): list(range(n)) + for i in range(frame_or_series._AXIS_LEN) + } + + # get the numeric data + o = construct(frame_or_series, n, **kwargs) + result = o._get_numeric_data() + tm.assert_equal(result, o) + + # non-inclusion + result = o._get_bool_data() + expected = construct(frame_or_series, n, value="empty", **kwargs) + if isinstance(o, DataFrame): + # preserve columns dtype + expected.columns = o.columns[:0] + # https://github.com/pandas-dev/pandas/issues/50862 + tm.assert_equal(result.reset_index(drop=True), expected) + + # get the bool data + arr = np.array([True, True, False, True]) + o = construct(frame_or_series, n, value=arr, **kwargs) + result = o._get_numeric_data() + tm.assert_equal(result, o) + + def test_nonzero(self, frame_or_series): + # GH 4633 + # look at the boolean/nonzero behavior for objects + obj = construct(frame_or_series, shape=4) + msg = f"The truth value of a {frame_or_series.__name__} is ambiguous" + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + obj = construct(frame_or_series, shape=4, value=1) + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + obj = construct(frame_or_series, shape=4, value=np.nan) + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + # empty + obj = construct(frame_or_series, shape=0) + with pytest.raises(ValueError, match=msg): + bool(obj) + + # invalid behaviors + + obj1 = construct(frame_or_series, shape=4, value=1) + obj2 = construct(frame_or_series, shape=4, value=1) + + with pytest.raises(ValueError, match=msg): + if obj1: + pass + + with pytest.raises(ValueError, match=msg): + obj1 and obj2 + with pytest.raises(ValueError, match=msg): + obj1 or obj2 + with pytest.raises(ValueError, match=msg): + not obj1 + + def test_frame_or_series_compound_dtypes(self, frame_or_series): + # see gh-5191 + # Compound dtypes should raise NotImplementedError. + + def f(dtype): + return construct(frame_or_series, shape=3, value=1, dtype=dtype) + + msg = ( + "compound dtypes are not implemented " + f"in the {frame_or_series.__name__} constructor" + ) + + with pytest.raises(NotImplementedError, match=msg): + f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) + + # these work (though results may be unexpected) + f("int64") + f("float64") + f("M8[ns]") + + def test_metadata_propagation(self, frame_or_series): + # check that the metadata matches up on the resulting ops + + o = construct(frame_or_series, shape=3) + o.name = "foo" + o2 = construct(frame_or_series, shape=3) + o2.name = "bar" + + # ---------- + # preserving + # ---------- + + # simple ops with scalars + for op in ["__add__", "__sub__", "__truediv__", "__mul__"]: + result = getattr(o, op)(1) + tm.assert_metadata_equivalent(o, result) + + # ops with like + for op in ["__add__", "__sub__", "__truediv__", "__mul__"]: + result = getattr(o, op)(o) + tm.assert_metadata_equivalent(o, result) + + # simple boolean + for op in ["__eq__", "__le__", "__ge__"]: + v1 = getattr(o, op)(o) + tm.assert_metadata_equivalent(o, v1) + tm.assert_metadata_equivalent(o, v1 & v1) + tm.assert_metadata_equivalent(o, v1 | v1) + + # combine_first + result = o.combine_first(o2) + tm.assert_metadata_equivalent(o, result) + + # --------------------------- + # non-preserving (by default) + # --------------------------- + + # add non-like + result = o + o2 + tm.assert_metadata_equivalent(result) + + # simple boolean + for op in ["__eq__", "__le__", "__ge__"]: + # this is a name matching op + v1 = getattr(o, op)(o) + v2 = getattr(o, op)(o2) + tm.assert_metadata_equivalent(v2) + tm.assert_metadata_equivalent(v1 & v2) + tm.assert_metadata_equivalent(v1 | v2) + + def test_size_compat(self, frame_or_series): + # GH8846 + # size property should be defined + + o = construct(frame_or_series, shape=10) + assert o.size == np.prod(o.shape) + assert o.size == 10 ** len(o.axes) + + def test_split_compat(self, frame_or_series): + # xref GH8846 + o = construct(frame_or_series, shape=10) + with tm.assert_produces_warning( + FutureWarning, match=".swapaxes' is deprecated", check_stacklevel=False + ): + assert len(np.array_split(o, 5)) == 5 + assert len(np.array_split(o, 2)) == 2 + + # See gh-12301 + def test_stat_unexpected_keyword(self, frame_or_series): + obj = construct(frame_or_series, 5) + starwars = "Star Wars" + errmsg = "unexpected keyword" + + with pytest.raises(TypeError, match=errmsg): + obj.max(epic=starwars) # stat_function + with pytest.raises(TypeError, match=errmsg): + obj.var(epic=starwars) # stat_function_ddof + with pytest.raises(TypeError, match=errmsg): + obj.sum(epic=starwars) # cum_function + with pytest.raises(TypeError, match=errmsg): + obj.any(epic=starwars) # logical_function + + @pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"]) + def test_api_compat(self, func, frame_or_series): + # GH 12021 + # compat for __name__, __qualname__ + + obj = construct(frame_or_series, 5) + f = getattr(obj, func) + assert f.__name__ == func + assert f.__qualname__.endswith(func) + + def test_stat_non_defaults_args(self, frame_or_series): + obj = construct(frame_or_series, 5) + out = np.array([0]) + errmsg = "the 'out' parameter is not supported" + + with pytest.raises(ValueError, match=errmsg): + obj.max(out=out) # stat_function + with pytest.raises(ValueError, match=errmsg): + obj.var(out=out) # stat_function_ddof + with pytest.raises(ValueError, match=errmsg): + obj.sum(out=out) # cum_function + with pytest.raises(ValueError, match=errmsg): + obj.any(out=out) # logical_function + + def test_truncate_out_of_bounds(self, frame_or_series): + # GH11382 + + # small + shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1)) + small = construct(frame_or_series, shape, dtype="int8", value=1) + tm.assert_equal(small.truncate(), small) + tm.assert_equal(small.truncate(before=0, after=3e3), small) + tm.assert_equal(small.truncate(before=-1, after=2e3), small) + + # big + shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1)) + big = construct(frame_or_series, shape, dtype="int8", value=1) + tm.assert_equal(big.truncate(), big) + tm.assert_equal(big.truncate(before=0, after=3e6), big) + tm.assert_equal(big.truncate(before=-1, after=2e6), big) + + @pytest.mark.parametrize( + "func", + [copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)], + ) + @pytest.mark.parametrize("shape", [0, 1, 2]) + def test_copy_and_deepcopy(self, frame_or_series, shape, func): + # GH 15444 + obj = construct(frame_or_series, shape) + obj_copy = func(obj) + assert obj_copy is not obj + tm.assert_equal(obj_copy, obj) + + def test_data_deprecated(self, frame_or_series): + obj = frame_or_series() + msg = "(Series|DataFrame)._data is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + mgr = obj._data + assert mgr is obj._mgr + + +class TestNDFrame: + # tests that don't fit elsewhere + + @pytest.mark.parametrize( + "ser", [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()] + ) + def test_squeeze_series_noop(self, ser): + # noop + tm.assert_series_equal(ser.squeeze(), ser) + + def test_squeeze_frame_noop(self): + # noop + df = tm.makeTimeDataFrame() + tm.assert_frame_equal(df.squeeze(), df) + + def test_squeeze_frame_reindex(self): + # squeezing + df = tm.makeTimeDataFrame().reindex(columns=["A"]) + tm.assert_series_equal(df.squeeze(), df["A"]) + + def test_squeeze_0_len_dim(self): + # don't fail with 0 length dimensions GH11229 & GH8999 + empty_series = Series([], name="five", dtype=np.float64) + empty_frame = DataFrame([empty_series]) + tm.assert_series_equal(empty_series, empty_series.squeeze()) + tm.assert_series_equal(empty_series, empty_frame.squeeze()) + + def test_squeeze_axis(self): + # axis argument + df = tm.makeTimeDataFrame(nper=1).iloc[:, :1] + assert df.shape == (1, 1) + tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0]) + tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0]) + assert df.squeeze() == df.iloc[0, 0] + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.squeeze(axis=2) + msg = "No axis named x for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.squeeze(axis="x") + + def test_squeeze_axis_len_3(self): + df = tm.makeTimeDataFrame(3) + tm.assert_frame_equal(df.squeeze(axis=0), df) + + def test_numpy_squeeze(self): + s = tm.makeFloatSeries() + tm.assert_series_equal(np.squeeze(s), s) + + df = tm.makeTimeDataFrame().reindex(columns=["A"]) + tm.assert_series_equal(np.squeeze(df), df["A"]) + + @pytest.mark.parametrize( + "ser", [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()] + ) + def test_transpose_series(self, ser): + # calls implementation in pandas/core/base.py + tm.assert_series_equal(ser.transpose(), ser) + + def test_transpose_frame(self): + df = tm.makeTimeDataFrame() + tm.assert_frame_equal(df.transpose().transpose(), df) + + def test_numpy_transpose(self, frame_or_series): + obj = tm.makeTimeDataFrame() + obj = tm.get_obj(obj, frame_or_series) + + if frame_or_series is Series: + # 1D -> np.transpose is no-op + tm.assert_series_equal(np.transpose(obj), obj) + + # round-trip preserved + tm.assert_equal(np.transpose(np.transpose(obj)), obj) + + msg = "the 'axes' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.transpose(obj, axes=1) + + @pytest.mark.parametrize( + "ser", [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()] + ) + def test_take_series(self, ser): + indices = [1, 5, -2, 6, 3, -1] + out = ser.take(indices) + expected = Series( + data=ser.values.take(indices), + index=ser.index.take(indices), + dtype=ser.dtype, + ) + tm.assert_series_equal(out, expected) + + def test_take_frame(self): + indices = [1, 5, -2, 6, 3, -1] + df = tm.makeTimeDataFrame() + out = df.take(indices) + expected = DataFrame( + data=df.values.take(indices, axis=0), + index=df.index.take(indices), + columns=df.columns, + ) + tm.assert_frame_equal(out, expected) + + def test_take_invalid_kwargs(self, frame_or_series): + indices = [-3, 2, 0, 1] + + obj = tm.makeTimeDataFrame() + obj = tm.get_obj(obj, frame_or_series) + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + obj.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + obj.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + obj.take(indices, mode="clip") + + def test_axis_classmethods(self, frame_or_series): + box = frame_or_series + obj = box(dtype=object) + values = box._AXIS_TO_AXIS_NUMBER.keys() + for v in values: + assert obj._get_axis_number(v) == box._get_axis_number(v) + assert obj._get_axis_name(v) == box._get_axis_name(v) + assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v) + + def test_flags_identity(self, frame_or_series): + obj = Series([1, 2]) + if frame_or_series is DataFrame: + obj = obj.to_frame() + + assert obj.flags is obj.flags + obj2 = obj.copy() + assert obj2.flags is not obj.flags + + def test_bool_dep(self) -> None: + # GH-51749 + msg_warn = ( + "DataFrame.bool is now deprecated and will be removed " + "in future version of pandas" + ) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + DataFrame({"col": [False]}).bool() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_label_or_level_utils.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_label_or_level_utils.py new file mode 100644 index 00000000..97be46f7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_label_or_level_utils.py @@ -0,0 +1,336 @@ +import pytest + +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd + + +# Fixtures +# ======== +@pytest.fixture +def df(): + """DataFrame with columns 'L1', 'L2', and 'L3'""" + return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]}) + + +@pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]]) +def df_levels(request, df): + """DataFrame with columns or index levels 'L1', 'L2', and 'L3'""" + levels = request.param + + if levels: + df = df.set_index(levels) + + return df + + +@pytest.fixture +def df_ambig(df): + """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'""" + df = df.set_index(["L1", "L2"]) + + df["L1"] = df["L3"] + + return df + + +@pytest.fixture +def df_duplabels(df): + """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'""" + df = df.set_index(["L1"]) + df = pd.concat([df, df["L2"]], axis=1) + + return df + + +# Test is label/level reference +# ============================= +def get_labels_levels(df_levels): + expected_labels = list(df_levels.columns) + expected_levels = [name for name in df_levels.index.names if name is not None] + return expected_labels, expected_levels + + +def assert_label_reference(frame, labels, axis): + for label in labels: + assert frame._is_label_reference(label, axis=axis) + assert not frame._is_level_reference(label, axis=axis) + assert frame._is_label_or_level_reference(label, axis=axis) + + +def assert_level_reference(frame, levels, axis): + for level in levels: + assert frame._is_level_reference(level, axis=axis) + assert not frame._is_label_reference(level, axis=axis) + assert frame._is_label_or_level_reference(level, axis=axis) + + +# DataFrame +# --------- +def test_is_level_or_label_reference_df_simple(df_levels, axis): + axis = df_levels._get_axis_number(axis) + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_level_reference(df_levels, expected_levels, axis=axis) + assert_label_reference(df_levels, expected_labels, axis=axis) + + +def test_is_level_reference_df_ambig(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + + # df has both an on-axis level and off-axis label named L1 + # Therefore L1 should reference the label, not the level + assert_label_reference(df_ambig, ["L1"], axis=axis) + + # df has an on-axis level named L2 and it is not ambiguous + # Therefore L2 is an level reference + assert_level_reference(df_ambig, ["L2"], axis=axis) + + # df has a column named L3 and it not an level reference + assert_label_reference(df_ambig, ["L3"], axis=axis) + + +# Series +# ------ +def test_is_level_reference_series_simple_axis0(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_level_reference(s, ["L1"], axis=0) + assert not s._is_level_reference("L2") + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_level_reference(s, ["L1", "L2"], axis=0) + assert not s._is_level_reference("L3") + + +def test_is_level_reference_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._is_level_reference("L1", axis=1) + + +# Test _check_label_or_level_ambiguity_df +# ======================================= + + +# DataFrame +# --------- +def test_check_label_or_level_ambiguity_df(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + msg = "'L1' is both a column level and an index label" + + else: + msg = "'L1' is both an index level and a column label" + # df_ambig has both an on-axis level and off-axis label named L1 + # Therefore, L1 is ambiguous. + with pytest.raises(ValueError, match=msg): + df_ambig._check_label_or_level_ambiguity("L1", axis=axis) + + # df_ambig has an on-axis level named L2,, and it is not ambiguous. + df_ambig._check_label_or_level_ambiguity("L2", axis=axis) + + # df_ambig has an off-axis label named L3, and it is not ambiguous + assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis) + + +# Series +# ------ +def test_check_label_or_level_ambiguity_series(df): + # A series has no columns and therefore references are never ambiguous + + # Make series with L1 as index + s = df.set_index("L1").L2 + s._check_label_or_level_ambiguity("L1", axis=0) + s._check_label_or_level_ambiguity("L2", axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + s._check_label_or_level_ambiguity("L1", axis=0) + s._check_label_or_level_ambiguity("L2", axis=0) + s._check_label_or_level_ambiguity("L3", axis=0) + + +def test_check_label_or_level_ambiguity_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._check_label_or_level_ambiguity("L1", axis=1) + + +# Test _get_label_or_level_values +# =============================== +def assert_label_values(frame, labels, axis): + axis = frame._get_axis_number(axis) + for label in labels: + if axis == 0: + expected = frame[label]._values + else: + expected = frame.loc[label]._values + + result = frame._get_label_or_level_values(label, axis=axis) + assert array_equivalent(expected, result) + + +def assert_level_values(frame, levels, axis): + axis = frame._get_axis_number(axis) + for level in levels: + if axis == 0: + expected = frame.index.get_level_values(level=level)._values + else: + expected = frame.columns.get_level_values(level=level)._values + + result = frame._get_label_or_level_values(level, axis=axis) + assert array_equivalent(expected, result) + + +# DataFrame +# --------- +def test_get_label_or_level_values_df_simple(df_levels, axis): + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + axis = df_levels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_label_values(df_levels, expected_labels, axis=axis) + assert_level_values(df_levels, expected_levels, axis=axis) + + +def test_get_label_or_level_values_df_ambig(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + + # df has an on-axis level named L2, and it is not ambiguous. + assert_level_values(df_ambig, ["L2"], axis=axis) + + # df has an off-axis label named L3, and it is not ambiguous. + assert_label_values(df_ambig, ["L3"], axis=axis) + + +def test_get_label_or_level_values_df_duplabels(df_duplabels, axis): + axis = df_duplabels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_duplabels = df_duplabels.T + + # df has unambiguous level 'L1' + assert_level_values(df_duplabels, ["L1"], axis=axis) + + # df has unique label 'L3' + assert_label_values(df_duplabels, ["L3"], axis=axis) + + # df has duplicate labels 'L2' + if axis == 0: + expected_msg = "The column label 'L2' is not unique" + else: + expected_msg = "The index label 'L2' is not unique" + + with pytest.raises(ValueError, match=expected_msg): + assert_label_values(df_duplabels, ["L2"], axis=axis) + + +# Series +# ------ +def test_get_label_or_level_values_series_axis0(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_level_values(s, ["L1"], axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_level_values(s, ["L1", "L2"], axis=0) + + +def test_get_label_or_level_values_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._get_label_or_level_values("L1", axis=1) + + +# Test _drop_labels_or_levels +# =========================== +def assert_labels_dropped(frame, labels, axis): + axis = frame._get_axis_number(axis) + for label in labels: + df_dropped = frame._drop_labels_or_levels(label, axis=axis) + + if axis == 0: + assert label in frame.columns + assert label not in df_dropped.columns + else: + assert label in frame.index + assert label not in df_dropped.index + + +def assert_levels_dropped(frame, levels, axis): + axis = frame._get_axis_number(axis) + for level in levels: + df_dropped = frame._drop_labels_or_levels(level, axis=axis) + + if axis == 0: + assert level in frame.index.names + assert level not in df_dropped.index.names + else: + assert level in frame.columns.names + assert level not in df_dropped.columns.names + + +# DataFrame +# --------- +def test_drop_labels_or_levels_df(df_levels, axis): + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + axis = df_levels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_labels_dropped(df_levels, expected_labels, axis=axis) + assert_levels_dropped(df_levels, expected_levels, axis=axis) + + with pytest.raises(ValueError, match="not valid labels or levels"): + df_levels._drop_labels_or_levels("L4", axis=axis) + + +# Series +# ------ +def test_drop_labels_or_levels_series(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_levels_dropped(s, ["L1"], axis=0) + + with pytest.raises(ValueError, match="not valid labels or levels"): + s._drop_labels_or_levels("L4", axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_levels_dropped(s, ["L1", "L2"], axis=0) + + with pytest.raises(ValueError, match="not valid labels or levels"): + s._drop_labels_or_levels("L4", axis=0) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_series.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_series.py new file mode 100644 index 00000000..4ea205ac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_series.py @@ -0,0 +1,159 @@ +from operator import methodcaller + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestSeries: + @pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"]) + def test_set_axis_name_mi(self, func): + ser = Series( + [11, 21, 31], + index=MultiIndex.from_tuples( + [("A", x) for x in ["a", "B", "c"]], names=["l1", "l2"] + ), + ) + + result = methodcaller(func, ["L1", "L2"])(ser) + assert ser.index.name is None + assert ser.index.names == ["l1", "l2"] + assert result.index.name is None + assert result.index.names, ["L1", "L2"] + + def test_set_axis_name_raises(self): + ser = Series([1]) + msg = "No axis named 1 for object type Series" + with pytest.raises(ValueError, match=msg): + ser._set_axis_name(name="a", axis=1) + + def test_get_bool_data_preserve_dtype(self): + ser = Series([True, False, True]) + result = ser._get_bool_data() + tm.assert_series_equal(result, ser) + + def test_nonzero_single_element(self): + # allow single item via bool method + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + ser = Series([True]) + ser1 = Series([False]) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert ser.bool() + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert not ser1.bool() + + @pytest.mark.parametrize("data", [np.nan, pd.NaT, True, False]) + def test_nonzero_single_element_raise_1(self, data): + # single item nan to raise + series = Series([data]) + + msg = "The truth value of a Series is ambiguous" + with pytest.raises(ValueError, match=msg): + bool(series) + + @pytest.mark.parametrize("data", [np.nan, pd.NaT]) + def test_nonzero_single_element_raise_2(self, data): + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + msg_err = "bool cannot act on a non-boolean single element Series" + series = Series([data]) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err): + series.bool() + + @pytest.mark.parametrize("data", [(True, True), (False, False)]) + def test_nonzero_multiple_element_raise(self, data): + # multiple bool are still an error + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + msg_err = "The truth value of a Series is ambiguous" + series = Series([data]) + with pytest.raises(ValueError, match=msg_err): + bool(series) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err): + series.bool() + + @pytest.mark.parametrize("data", [1, 0, "a", 0.0]) + def test_nonbool_single_element_raise(self, data): + # single non-bool are an error + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + msg_err1 = "The truth value of a Series is ambiguous" + msg_err2 = "bool cannot act on a non-boolean single element Series" + series = Series([data]) + with pytest.raises(ValueError, match=msg_err1): + bool(series) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err2): + series.bool() + + def test_metadata_propagation_indiv_resample(self): + # resample + ts = Series( + np.random.default_rng(2).random(1000), + index=date_range("20130101", periods=1000, freq="s"), + name="foo", + ) + result = ts.resample("1T").mean() + tm.assert_metadata_equivalent(ts, result) + + result = ts.resample("1T").min() + tm.assert_metadata_equivalent(ts, result) + + result = ts.resample("1T").apply(lambda x: x.sum()) + tm.assert_metadata_equivalent(ts, result) + + def test_metadata_propagation_indiv(self, monkeypatch): + # check that the metadata matches up on the resulting ops + + ser = Series(range(3), range(3)) + ser.name = "foo" + ser2 = Series(range(3), range(3)) + ser2.name = "bar" + + result = ser.T + tm.assert_metadata_equivalent(ser, result) + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == "concat" and name == "filename": + value = "+".join( + [ + getattr(obj, name) + for obj in other.objs + if getattr(obj, name, None) + ] + ) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, None)) + + return self + + with monkeypatch.context() as m: + m.setattr(Series, "_metadata", ["name", "filename"]) + m.setattr(Series, "__finalize__", finalize) + + ser.filename = "foo" + ser2.filename = "bar" + + result = pd.concat([ser, ser2]) + assert result.filename == "foo+bar" + assert result.name is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_to_xarray.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_to_xarray.py new file mode 100644 index 00000000..d6eacf4f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/generic/test_to_xarray.py @@ -0,0 +1,126 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + +pytest.importorskip("xarray") + + +class TestDataFrameToXArray: + @pytest.fixture + def df(self): + return DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": Categorical(list("abc")), + "g": date_range("20130101", periods=3), + "h": date_range("20130101", periods=3, tz="US/Eastern"), + } + ) + + def test_to_xarray_index_types(self, index_flat, df): + index = index_flat + # MultiIndex is tested in test_to_xarray_with_multiindex + if len(index) == 0: + pytest.skip("Test doesn't make sense for empty index") + + from xarray import Dataset + + df.index = index[:3] + df.index.name = "foo" + df.columns.name = "bar" + result = df.to_xarray() + assert result.dims["foo"] == 3 + assert len(result.coords) == 1 + assert len(result.data_vars) == 8 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, Dataset) + + # idempotency + # datetimes w/tz are preserved + # column names are lost + expected = df.copy() + expected["f"] = expected["f"].astype(object) + expected.columns.name = None + tm.assert_frame_equal(result.to_dataframe(), expected) + + def test_to_xarray_empty(self, df): + from xarray import Dataset + + df.index.name = "foo" + result = df[0:0].to_xarray() + assert result.dims["foo"] == 0 + assert isinstance(result, Dataset) + + def test_to_xarray_with_multiindex(self, df): + from xarray import Dataset + + # MultiIndex + df.index = MultiIndex.from_product([["a"], range(3)], names=["one", "two"]) + result = df.to_xarray() + assert result.dims["one"] == 1 + assert result.dims["two"] == 3 + assert len(result.coords) == 2 + assert len(result.data_vars) == 8 + tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) + assert isinstance(result, Dataset) + + result = result.to_dataframe() + expected = df.copy() + expected["f"] = expected["f"].astype(object) + expected.columns.name = None + tm.assert_frame_equal(result, expected) + + +class TestSeriesToXArray: + def test_to_xarray_index_types(self, index_flat): + index = index_flat + # MultiIndex is tested in test_to_xarray_with_multiindex + + from xarray import DataArray + + ser = Series(range(len(index)), index=index, dtype="int64") + ser.index.name = "foo" + result = ser.to_xarray() + repr(result) + assert len(result) == len(index) + assert len(result.coords) == 1 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, DataArray) + + # idempotency + tm.assert_series_equal(result.to_series(), ser) + + def test_to_xarray_empty(self): + from xarray import DataArray + + ser = Series([], dtype=object) + ser.index.name = "foo" + result = ser.to_xarray() + assert len(result) == 0 + assert len(result.coords) == 1 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, DataArray) + + def test_to_xarray_with_multiindex(self): + from xarray import DataArray + + mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"]) + ser = Series(range(6), dtype="int64", index=mi) + result = ser.to_xarray() + assert len(result) == 2 + tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) + assert isinstance(result, DataArray) + res = result.to_series() + tm.assert_series_equal(res, ser) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/__init__.py new file mode 100644 index 00000000..446d9da4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/__init__.py @@ -0,0 +1,25 @@ +def get_groupby_method_args(name, obj): + """ + Get required arguments for a groupby method. + + When parametrizing a test over groupby methods (e.g. "sum", "mean", "fillna"), + it is often the case that arguments are required for certain methods. + + Parameters + ---------- + name: str + Name of the method. + obj: Series or DataFrame + pandas object that is being grouped. + + Returns + ------- + A tuple of required arguments for the method. + """ + if name in ("nth", "fillna", "take"): + return (0,) + if name == "quantile": + return (0.5,) + if name == "corrwith": + return (obj,) + return () diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py new file mode 100644 index 00000000..406f4b9b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_aggregate.py @@ -0,0 +1,1645 @@ +""" +test .agg behavior / note that .apply is tested generally in test_groupby.py +""" +import datetime +import functools +from functools import partial +import re + +import numpy as np +import pytest + +from pandas.errors import SpecificationError + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + concat, + to_datetime, +) +import pandas._testing as tm +from pandas.core.groupby.grouper import Grouping + + +def test_groupby_agg_no_extra_calls(): + # GH#31760 + df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]}) + gb = df.groupby("key")["value"] + + def dummy_func(x): + assert len(x) != 0 + return x.sum() + + gb.agg(dummy_func) + + +def test_agg_regression1(tsframe): + grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) + result = grouped.agg("mean") + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + +def test_agg_must_agg(df): + grouped = df.groupby("A")["C"] + + msg = "Must produce aggregated value" + with pytest.raises(Exception, match=msg): + grouped.agg(lambda x: x.describe()) + with pytest.raises(Exception, match=msg): + grouped.agg(lambda x: x.index[:2]) + + +def test_agg_ser_multi_key(df): + f = lambda x: x.sum() + results = df.C.groupby([df.A, df.B]).aggregate(f) + expected = df.groupby(["A", "B"]).sum()["C"] + tm.assert_series_equal(results, expected) + + +def test_groupby_aggregation_mixed_dtype(): + # GH 6212 + expected = DataFrame( + { + "v1": [5, 5, 7, np.nan, 3, 3, 4, 1], + "v2": [55, 55, 77, np.nan, 33, 33, 44, 11], + }, + index=MultiIndex.from_tuples( + [ + (1, 95), + (1, 99), + (2, 95), + (2, 99), + ("big", "damp"), + ("blue", "dry"), + ("red", "red"), + ("red", "wet"), + ], + names=["by1", "by2"], + ), + ) + + df = DataFrame( + { + "v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9], + "v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99], + "by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12], + "by2": [ + "wet", + "dry", + 99, + 95, + np.nan, + "damp", + 95, + 99, + "red", + 99, + np.nan, + np.nan, + ], + } + ) + + g = df.groupby(["by1", "by2"]) + result = g[["v1", "v2"]].mean() + tm.assert_frame_equal(result, expected) + + +def test_groupby_aggregation_multi_level_column(): + # GH 29772 + lst = [ + [True, True, True, False], + [True, False, np.nan, False], + [True, True, np.nan, False], + [True, True, np.nan, False], + ] + df = DataFrame( + data=lst, + columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]), + ) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(level=1, axis=1) + result = gb.sum(numeric_only=False) + expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]}) + + tm.assert_frame_equal(result, expected) + + +def test_agg_apply_corner(ts, tsframe): + # nothing to group, all NA + grouped = ts.groupby(ts * np.nan, group_keys=False) + assert ts.dtype == np.float64 + + # groupby float64 values results in a float64 Index + exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64)) + tm.assert_series_equal(grouped.sum(), exp) + tm.assert_series_equal(grouped.agg("sum"), exp) + tm.assert_series_equal(grouped.apply("sum"), exp, check_index_type=False) + + # DataFrame + grouped = tsframe.groupby(tsframe["A"] * np.nan, group_keys=False) + exp_df = DataFrame( + columns=tsframe.columns, + dtype=float, + index=Index([], name="A", dtype=np.float64), + ) + tm.assert_frame_equal(grouped.sum(), exp_df) + tm.assert_frame_equal(grouped.agg("sum"), exp_df) + + msg = "The behavior of DataFrame.sum with axis=None is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + res = grouped.apply(np.sum) + tm.assert_frame_equal(res, exp_df) + + +def test_agg_grouping_is_list_tuple(ts): + df = tm.makeTimeDataFrame() + + grouped = df.groupby(lambda x: x.year) + grouper = grouped.grouper.groupings[0].grouping_vector + grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper)) + + result = grouped.agg("mean") + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper)) + + result = grouped.agg("mean") + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + +def test_agg_python_multiindex(mframe): + grouped = mframe.groupby(["A", "B"]) + + result = grouped.agg("mean") + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]] +) +def test_aggregate_str_func(tsframe, groupbyfunc): + grouped = tsframe.groupby(groupbyfunc) + + # single series + result = grouped["A"].agg("std") + expected = grouped["A"].std() + tm.assert_series_equal(result, expected) + + # group frame by function name + result = grouped.aggregate("var") + expected = grouped.var() + tm.assert_frame_equal(result, expected) + + # group frame by function dict + result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"}) + expected = DataFrame( + { + "A": grouped["A"].var(), + "B": grouped["B"].std(), + "C": grouped["C"].mean(), + "D": grouped["D"].sem(), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_std_masked_dtype(any_numeric_ea_dtype): + # GH#35516 + df = DataFrame( + { + "a": [2, 1, 1, 1, 2, 2, 1], + "b": Series([pd.NA, 1, 2, 1, 1, 1, 2], dtype="Float64"), + } + ) + result = df.groupby("a").std() + expected = DataFrame( + {"b": [0.57735, 0]}, index=Index([1, 2], name="a"), dtype="Float64" + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_str_with_kwarg_axis_1_raises(df, reduction_func): + gb = df.groupby(level=0) + warn_msg = f"DataFrameGroupBy.{reduction_func} with axis=1 is deprecated" + if reduction_func in ("idxmax", "idxmin"): + error = TypeError + msg = "'[<>]' not supported between instances of 'float' and 'str'" + warn = FutureWarning + else: + error = ValueError + msg = f"Operation {reduction_func} does not support axis=1" + warn = None + with pytest.raises(error, match=msg): + with tm.assert_produces_warning(warn, match=warn_msg): + gb.agg(reduction_func, axis=1) + + +@pytest.mark.parametrize( + "func, expected, dtype, result_dtype_dict", + [ + ("sum", [5, 7, 9], "int64", {}), + ("std", [4.5**0.5] * 3, int, {"i": float, "j": float, "k": float}), + ("var", [4.5] * 3, int, {"i": float, "j": float, "k": float}), + ("sum", [5, 7, 9], "Int64", {"j": "int64"}), + ("std", [4.5**0.5] * 3, "Int64", {"i": float, "j": float, "k": float}), + ("var", [4.5] * 3, "Int64", {"i": "float64", "j": "float64", "k": "float64"}), + ], +) +def test_multiindex_groupby_mixed_cols_axis1(func, expected, dtype, result_dtype_dict): + # GH#43209 + df = DataFrame( + [[1, 2, 3, 4, 5, 6]] * 3, + columns=MultiIndex.from_product([["a", "b"], ["i", "j", "k"]]), + ).astype({("a", "j"): dtype, ("b", "j"): dtype}) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(level=1, axis=1) + result = gb.agg(func) + expected = DataFrame([expected] * 3, columns=["i", "j", "k"]).astype( + result_dtype_dict + ) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "func, expected_data, result_dtype_dict", + [ + ("sum", [[2, 4], [10, 12], [18, 20]], {10: "int64", 20: "int64"}), + # std should ideally return Int64 / Float64 #43330 + ("std", [[2**0.5] * 2] * 3, "float64"), + ("var", [[2] * 2] * 3, {10: "float64", 20: "float64"}), + ], +) +def test_groupby_mixed_cols_axis1(func, expected_data, result_dtype_dict): + # GH#43209 + df = DataFrame( + np.arange(12).reshape(3, 4), + index=Index([0, 1, 0], name="y"), + columns=Index([10, 20, 10, 20], name="x"), + dtype="int64", + ).astype({10: "Int64"}) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby("x", axis=1) + result = gb.agg(func) + expected = DataFrame( + data=expected_data, + index=Index([0, 1, 0], name="y"), + columns=Index([10, 20], name="x"), + ).astype(result_dtype_dict) + tm.assert_frame_equal(result, expected) + + +def test_aggregate_item_by_item(df): + grouped = df.groupby("A") + + aggfun_0 = lambda ser: ser.size + result = grouped.agg(aggfun_0) + foosum = (df.A == "foo").sum() + barsum = (df.A == "bar").sum() + K = len(result.columns) + + # GH5782 + exp = Series(np.array([foosum] * K), index=list("BCD"), name="foo") + tm.assert_series_equal(result.xs("foo"), exp) + + exp = Series(np.array([barsum] * K), index=list("BCD"), name="bar") + tm.assert_almost_equal(result.xs("bar"), exp) + + def aggfun_1(ser): + return ser.size + + result = DataFrame().groupby(df.A).agg(aggfun_1) + assert isinstance(result, DataFrame) + assert len(result) == 0 + + +def test_wrap_agg_out(three_group): + grouped = three_group.groupby(["A", "B"]) + + def func(ser): + if ser.dtype == object: + raise TypeError("Test error message") + return ser.sum() + + with pytest.raises(TypeError, match="Test error message"): + grouped.aggregate(func) + result = grouped[["D", "E", "F"]].aggregate(func) + exp_grouped = three_group.loc[:, ["A", "B", "D", "E", "F"]] + expected = exp_grouped.groupby(["A", "B"]).aggregate(func) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_functions_maintain_order(df): + # GH #610 + funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)] + msg = "is currently using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A")["C"].agg(funcs) + exp_cols = Index(["mean", "max", "min"]) + + tm.assert_index_equal(result.columns, exp_cols) + + +def test_agg_multiple_functions_same_name(): + # GH 30880 + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 3)), + index=pd.date_range("1/1/2012", freq="S", periods=1000), + columns=["A", "B", "C"], + ) + result = df.resample("3T").agg( + {"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]} + ) + expected_index = pd.date_range("1/1/2012", freq="3T", periods=6) + expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")]) + expected_values = np.array( + [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]] + ).T + expected = DataFrame( + expected_values, columns=expected_columns, index=expected_index + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_functions_same_name_with_ohlc_present(): + # GH 30880 + # ohlc expands dimensions, so different test to the above is required. + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 3)), + index=pd.date_range("1/1/2012", freq="S", periods=1000, name="dti"), + columns=Index(["A", "B", "C"], name="alpha"), + ) + result = df.resample("3T").agg( + {"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]} + ) + expected_index = pd.date_range("1/1/2012", freq="3T", periods=6, name="dti") + expected_columns = MultiIndex.from_tuples( + [ + ("A", "ohlc", "open"), + ("A", "ohlc", "high"), + ("A", "ohlc", "low"), + ("A", "ohlc", "close"), + ("A", "quantile", "A"), + ("A", "quantile", "A"), + ], + names=["alpha", None, None], + ) + non_ohlc_expected_values = np.array( + [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]] + ).T + expected_values = np.hstack([df.resample("3T").A.ohlc(), non_ohlc_expected_values]) + expected = DataFrame( + expected_values, columns=expected_columns, index=expected_index + ) + tm.assert_frame_equal(result, expected) + + +def test_multiple_functions_tuples_and_non_tuples(df): + # #1359 + # Columns B and C would cause partial failure + df = df.drop(columns=["B", "C"]) + + funcs = [("foo", "mean"), "std"] + ex_funcs = [("foo", "mean"), ("std", "std")] + + result = df.groupby("A")["D"].agg(funcs) + expected = df.groupby("A")["D"].agg(ex_funcs) + tm.assert_frame_equal(result, expected) + + result = df.groupby("A").agg(funcs) + expected = df.groupby("A").agg(ex_funcs) + tm.assert_frame_equal(result, expected) + + +def test_more_flexible_frame_multi_function(df): + grouped = df.groupby("A") + + exmean = grouped.agg({"C": "mean", "D": "mean"}) + exstd = grouped.agg({"C": "std", "D": "std"}) + + expected = concat([exmean, exstd], keys=["mean", "std"], axis=1) + expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1) + + d = {"C": ["mean", "std"], "D": ["mean", "std"]} + result = grouped.aggregate(d) + + tm.assert_frame_equal(result, expected) + + # be careful + result = grouped.aggregate({"C": "mean", "D": ["mean", "std"]}) + expected = grouped.aggregate({"C": "mean", "D": ["mean", "std"]}) + tm.assert_frame_equal(result, expected) + + def numpymean(x): + return np.mean(x) + + def numpystd(x): + return np.std(x, ddof=1) + + # this uses column selection & renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + d = {"C": "mean", "D": {"foo": "mean", "bar": "std"}} + grouped.aggregate(d) + + # But without renaming, these functions are OK + d = {"C": ["mean"], "D": [numpymean, numpystd]} + grouped.aggregate(d) + + +def test_multi_function_flexible_mix(df): + # GH #1268 + grouped = df.groupby("A") + + # Expected + d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}} + # this uses column selection & renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + grouped.aggregate(d) + + # Test 1 + d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"} + # this uses column selection & renaming + with pytest.raises(SpecificationError, match=msg): + grouped.aggregate(d) + + # Test 2 + d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"} + # this uses column selection & renaming + with pytest.raises(SpecificationError, match=msg): + grouped.aggregate(d) + + +def test_groupby_agg_coercing_bools(): + # issue 14873 + dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]}) + gp = dat.groupby("a") + + index = Index([1, 2], name="a") + + result = gp["b"].aggregate(lambda x: (x != 0).all()) + expected = Series([False, True], index=index, name="b") + tm.assert_series_equal(result, expected) + + result = gp["c"].aggregate(lambda x: x.isnull().all()) + expected = Series([True, False], index=index, name="c") + tm.assert_series_equal(result, expected) + + +def test_groupby_agg_dict_with_getitem(): + # issue 25471 + dat = DataFrame({"A": ["A", "A", "B", "B", "B"], "B": [1, 2, 1, 1, 2]}) + result = dat.groupby("A")[["B"]].agg({"B": "sum"}) + + expected = DataFrame({"B": [3, 4]}, index=["A", "B"]).rename_axis("A", axis=0) + + tm.assert_frame_equal(result, expected) + + +def test_groupby_agg_dict_dup_columns(): + # GH#55006 + df = DataFrame( + [[1, 2, 3, 4], [1, 3, 4, 5], [2, 4, 5, 6]], + columns=["a", "b", "c", "c"], + ) + gb = df.groupby("a") + result = gb.agg({"b": "sum"}) + expected = DataFrame({"b": [5, 4]}, index=Index([1, 2], name="a")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "op", + [ + lambda x: x.sum(), + lambda x: x.cumsum(), + lambda x: x.transform("sum"), + lambda x: x.transform("cumsum"), + lambda x: x.agg("sum"), + lambda x: x.agg("cumsum"), + ], +) +def test_bool_agg_dtype(op): + # GH 7001 + # Bool sum aggregations result in int + df = DataFrame({"a": [1, 1], "b": [False, True]}) + s = df.set_index("a")["b"] + + result = op(df.groupby("a"))["b"].dtype + assert is_integer_dtype(result) + + result = op(s.groupby("a")).dtype + assert is_integer_dtype(result) + + +@pytest.mark.parametrize( + "keys, agg_index", + [ + (["a"], Index([1], name="a")), + (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])), + ], +) +@pytest.mark.parametrize( + "input_dtype", ["bool", "int32", "int64", "float32", "float64"] +) +@pytest.mark.parametrize( + "result_dtype", ["bool", "int32", "int64", "float32", "float64"] +) +@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"]) +def test_callable_result_dtype_frame( + keys, agg_index, input_dtype, result_dtype, method +): + # GH 21240 + df = DataFrame({"a": [1], "b": [2], "c": [True]}) + df["c"] = df["c"].astype(input_dtype) + op = getattr(df.groupby(keys)[["c"]], method) + result = op(lambda x: x.astype(result_dtype).iloc[0]) + expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index + expected = DataFrame({"c": [df["c"].iloc[0]]}, index=expected_index).astype( + result_dtype + ) + if method == "apply": + expected.columns.names = [0] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "keys, agg_index", + [ + (["a"], Index([1], name="a")), + (["a", "b"], MultiIndex([[1], [2]], [[0], [0]], names=["a", "b"])), + ], +) +@pytest.mark.parametrize("input", [True, 1, 1.0]) +@pytest.mark.parametrize("dtype", [bool, int, float]) +@pytest.mark.parametrize("method", ["apply", "aggregate", "transform"]) +def test_callable_result_dtype_series(keys, agg_index, input, dtype, method): + # GH 21240 + df = DataFrame({"a": [1], "b": [2], "c": [input]}) + op = getattr(df.groupby(keys)["c"], method) + result = op(lambda x: x.astype(dtype).iloc[0]) + expected_index = pd.RangeIndex(0, 1) if method == "transform" else agg_index + expected = Series([df["c"].iloc[0]], index=expected_index, name="c").astype(dtype) + tm.assert_series_equal(result, expected) + + +def test_order_aggregate_multiple_funcs(): + # GH 25692 + df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]}) + + res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"]) + result = res.columns.levels[1] + + expected = Index(["sum", "max", "mean", "ohlc", "min"]) + + tm.assert_index_equal(result, expected) + + +def test_ohlc_ea_dtypes(any_numeric_ea_dtype): + # GH#37493 + df = DataFrame( + {"a": [1, 1, 2, 3, 4, 4], "b": [22, 11, pd.NA, 10, 20, pd.NA]}, + dtype=any_numeric_ea_dtype, + ) + gb = df.groupby("a") + result = gb.ohlc() + expected = DataFrame( + [[22, 22, 11, 11], [pd.NA] * 4, [10] * 4, [20] * 4], + columns=MultiIndex.from_product([["b"], ["open", "high", "low", "close"]]), + index=Index([1, 2, 3, 4], dtype=any_numeric_ea_dtype, name="a"), + dtype=any_numeric_ea_dtype, + ) + tm.assert_frame_equal(result, expected) + + gb2 = df.groupby("a", as_index=False) + result2 = gb2.ohlc() + expected2 = expected.reset_index() + tm.assert_frame_equal(result2, expected2) + + +@pytest.mark.parametrize("dtype", [np.int64, np.uint64]) +@pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"]) +def test_uint64_type_handling(dtype, how): + # GH 26310 + df = DataFrame({"x": 6903052872240755750, "y": [1, 2]}) + expected = df.groupby("y").agg({"x": how}) + df.x = df.x.astype(dtype) + result = df.groupby("y").agg({"x": how}) + if how not in ("mean", "median"): + # mean and median always result in floats + result.x = result.x.astype(np.int64) + tm.assert_frame_equal(result, expected, check_exact=True) + + +def test_func_duplicates_raises(): + # GH28426 + msg = "Function names" + df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) + with pytest.raises(SpecificationError, match=msg): + df.groupby("A").agg(["min", "min"]) + + +@pytest.mark.parametrize( + "index", + [ + pd.CategoricalIndex(list("abc")), + pd.interval_range(0, 3), + pd.period_range("2020", periods=3, freq="D"), + MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]), + ], +) +def test_agg_index_has_complex_internals(index): + # GH 31223 + df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index) + result = df.groupby("group").agg({"value": Series.nunique}) + expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group") + tm.assert_frame_equal(result, expected) + + +def test_agg_split_block(): + # https://github.com/pandas-dev/pandas/issues/31522 + df = DataFrame( + { + "key1": ["a", "a", "b", "b", "a"], + "key2": ["one", "two", "one", "two", "one"], + "key3": ["three", "three", "three", "six", "six"], + } + ) + result = df.groupby("key1").min() + expected = DataFrame( + {"key2": ["one", "one"], "key3": ["six", "six"]}, + index=Index(["a", "b"], name="key1"), + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_split_object_part_datetime(): + # https://github.com/pandas-dev/pandas/pull/31616 + df = DataFrame( + { + "A": pd.date_range("2000", periods=4), + "B": ["a", "b", "c", "d"], + "C": [1, 2, 3, 4], + "D": ["b", "c", "d", "e"], + "E": pd.date_range("2000", periods=4), + "F": [1, 2, 3, 4], + } + ).astype(object) + result = df.groupby([0, 0, 0, 0]).min() + expected = DataFrame( + { + "A": [pd.Timestamp("2000")], + "B": ["a"], + "C": [1], + "D": ["b"], + "E": [pd.Timestamp("2000")], + "F": [1], + }, + index=np.array([0]), + dtype=object, + ) + tm.assert_frame_equal(result, expected) + + +class TestNamedAggregationSeries: + def test_series_named_agg(self): + df = Series([1, 2, 3, 4]) + gr = df.groupby([0, 0, 1, 1]) + result = gr.agg(a="sum", b="min") + expected = DataFrame( + {"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=np.array([0, 1]) + ) + tm.assert_frame_equal(result, expected) + + result = gr.agg(b="min", a="sum") + expected = expected[["b", "a"]] + tm.assert_frame_equal(result, expected) + + def test_no_args_raises(self): + gr = Series([1, 2]).groupby([0, 1]) + with pytest.raises(TypeError, match="Must provide"): + gr.agg() + + # but we do allow this + result = gr.agg([]) + expected = DataFrame(columns=[]) + tm.assert_frame_equal(result, expected) + + def test_series_named_agg_duplicates_no_raises(self): + # GH28426 + gr = Series([1, 2, 3]).groupby([0, 0, 1]) + grouped = gr.agg(a="sum", b="sum") + expected = DataFrame({"a": [3, 3], "b": [3, 3]}, index=np.array([0, 1])) + tm.assert_frame_equal(expected, grouped) + + def test_mangled(self): + gr = Series([1, 2, 3]).groupby([0, 0, 1]) + result = gr.agg(a=lambda x: 0, b=lambda x: 1) + expected = DataFrame({"a": [0, 0], "b": [1, 1]}, index=np.array([0, 1])) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "inp", + [ + pd.NamedAgg(column="anything", aggfunc="min"), + ("anything", "min"), + ["anything", "min"], + ], + ) + def test_named_agg_nametuple(self, inp): + # GH34422 + s = Series([1, 1, 2, 2, 3, 3, 4, 5]) + msg = f"func is expected but received {type(inp).__name__}" + with pytest.raises(TypeError, match=msg): + s.groupby(s.values).agg(a=inp) + + +class TestNamedAggregationDataFrame: + def test_agg_relabel(self): + df = DataFrame( + {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]} + ) + result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")) + expected = DataFrame( + {"a_max": [1, 3], "b_max": [6, 8]}, + index=Index(["a", "b"], name="group"), + columns=["a_max", "b_max"], + ) + tm.assert_frame_equal(result, expected) + + # order invariance + p98 = functools.partial(np.percentile, q=98) + result = df.groupby("group").agg( + b_min=("B", "min"), + a_min=("A", "min"), + a_mean=("A", "mean"), + a_max=("A", "max"), + b_max=("B", "max"), + a_98=("A", p98), + ) + expected = DataFrame( + { + "b_min": [5, 7], + "a_min": [0, 2], + "a_mean": [0.5, 2.5], + "a_max": [1, 3], + "b_max": [6, 8], + "a_98": [0.98, 2.98], + }, + index=Index(["a", "b"], name="group"), + columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"], + ) + tm.assert_frame_equal(result, expected) + + def test_agg_relabel_non_identifier(self): + df = DataFrame( + {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]} + ) + + result = df.groupby("group").agg(**{"my col": ("A", "max")}) + expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group")) + tm.assert_frame_equal(result, expected) + + def test_duplicate_no_raises(self): + # GH 28426, if use same input function on same column, + # no error should raise + df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) + + grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min")) + expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A")) + tm.assert_frame_equal(grouped, expected) + + quant50 = functools.partial(np.percentile, q=50) + quant70 = functools.partial(np.percentile, q=70) + quant50.__name__ = "quant50" + quant70.__name__ = "quant70" + + test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]}) + + grouped = test.groupby("col1").agg( + quantile_50=("col2", quant50), quantile_70=("col2", quant70) + ) + expected = DataFrame( + {"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]}, + index=Index(["a", "b"], name="col1"), + ) + tm.assert_frame_equal(grouped, expected) + + def test_agg_relabel_with_level(self): + df = DataFrame( + {"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}, + index=MultiIndex.from_product([["A", "B"], ["a", "b"]]), + ) + result = df.groupby(level=0).agg( + aa=("A", "max"), bb=("A", "min"), cc=("B", "mean") + ) + expected = DataFrame( + {"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"] + ) + tm.assert_frame_equal(result, expected) + + def test_agg_relabel_other_raises(self): + df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]}) + grouped = df.groupby("A") + match = "Must provide" + with pytest.raises(TypeError, match=match): + grouped.agg(foo=1) + + with pytest.raises(TypeError, match=match): + grouped.agg() + + with pytest.raises(TypeError, match=match): + grouped.agg(a=("B", "max"), b=(1, 2, 3)) + + def test_missing_raises(self): + df = DataFrame({"A": [0, 1], "B": [1, 2]}) + match = re.escape("Column(s) ['C'] do not exist") + with pytest.raises(KeyError, match=match): + df.groupby("A").agg(c=("C", "sum")) + + def test_agg_namedtuple(self): + df = DataFrame({"A": [0, 1], "B": [1, 2]}) + result = df.groupby("A").agg( + b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count") + ) + expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count")) + tm.assert_frame_equal(result, expected) + + def test_mangled(self): + df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]}) + result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1)) + expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3", + [ + ( + (("y", "A"), "max"), + (("y", "A"), np.mean), + (("y", "B"), "mean"), + [1, 3], + [0.5, 2.5], + [5.5, 7.5], + ), + ( + (("y", "A"), lambda x: max(x)), + (("y", "A"), lambda x: 1), + (("y", "B"), np.mean), + [1, 3], + [1, 1], + [5.5, 7.5], + ), + ( + pd.NamedAgg(("y", "A"), "max"), + pd.NamedAgg(("y", "B"), np.mean), + pd.NamedAgg(("y", "A"), lambda x: 1), + [1, 3], + [5.5, 7.5], + [1, 1], + ), + ], +) +def test_agg_relabel_multiindex_column( + agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3 +): + # GH 29422, add tests for multiindex column cases + df = DataFrame( + {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]} + ) + df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")]) + idx = Index(["a", "b"], name=("x", "group")) + + result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")) + expected = DataFrame({"a_max": [1, 3]}, index=idx) + tm.assert_frame_equal(result, expected) + + msg = "is currently using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(("x", "group")).agg( + col_1=agg_col1, col_2=agg_col2, col_3=agg_col3 + ) + expected = DataFrame( + {"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_relabel_multiindex_raises_not_exist(): + # GH 29422, add test for raises scenario when aggregate column does not exist + df = DataFrame( + {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]} + ) + df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")]) + + with pytest.raises(KeyError, match="do not exist"): + df.groupby(("x", "group")).agg(a=(("Y", "a"), "max")) + + +def test_agg_relabel_multiindex_duplicates(): + # GH29422, add test for raises scenario when getting duplicates + # GH28426, after this change, duplicates should also work if the relabelling is + # different + df = DataFrame( + {"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]} + ) + df.columns = MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")]) + + result = df.groupby(("x", "group")).agg( + a=(("y", "A"), "min"), b=(("y", "A"), "min") + ) + idx = Index(["a", "b"], name=("x", "group")) + expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}]) +def test_groupby_aggregate_empty_key(kwargs): + # GH: 32580 + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]}) + result = df.groupby("a").agg(kwargs) + expected = DataFrame( + [1, 4], + index=Index([1, 2], dtype="int64", name="a"), + columns=MultiIndex.from_tuples([["c", "min"]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_aggregate_empty_key_empty_return(): + # GH: 32580 Check if everything works, when return is empty + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]}) + result = df.groupby("a").agg({"b": []}) + expected = DataFrame(columns=MultiIndex(levels=[["b"], []], codes=[[], []])) + tm.assert_frame_equal(result, expected) + + +def test_groupby_aggregate_empty_with_multiindex_frame(): + # GH 39178 + df = DataFrame(columns=["a", "b", "c"]) + result = df.groupby(["a", "b"], group_keys=False).agg(d=("c", list)) + expected = DataFrame( + columns=["d"], index=MultiIndex([[], []], [[], []], names=["a", "b"]) + ) + tm.assert_frame_equal(result, expected) + + +def test_grouby_agg_loses_results_with_as_index_false_relabel(): + # GH 32240: When the aggregate function relabels column names and + # as_index=False is specified, the results are dropped. + + df = DataFrame( + {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]} + ) + + grouped = df.groupby("key", as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]}) + tm.assert_frame_equal(result, expected) + + +def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex(): + # GH 32240: When the aggregate function relabels column names and + # as_index=False is specified, the results are dropped. Check if + # multiindex is returned in the right order + + df = DataFrame( + { + "key": ["x", "y", "x", "y", "x", "x"], + "key1": ["a", "b", "c", "b", "a", "c"], + "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75], + } + ) + + grouped = df.groupby(["key", "key1"], as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + expected = DataFrame( + {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]} + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)] +) +def test_multiindex_custom_func(func): + # GH 31777 + data = [[1, 4, 2], [5, 7, 1]] + df = DataFrame( + data, + columns=MultiIndex.from_arrays( + [[1, 1, 2], [3, 4, 3]], names=["Sisko", "Janeway"] + ), + ) + result = df.groupby(np.array([0, 1])).agg(func) + expected_dict = { + (1, 3): {0: 1.0, 1: 5.0}, + (1, 4): {0: 4.0, 1: 7.0}, + (2, 3): {0: 2.0, 1: 1.0}, + } + expected = DataFrame(expected_dict, index=np.array([0, 1]), columns=df.columns) + tm.assert_frame_equal(result, expected) + + +def myfunc(s): + return np.percentile(s, q=0.90) + + +@pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc]) +def test_lambda_named_agg(func): + # see gh-28467 + animals = DataFrame( + { + "kind": ["cat", "dog", "cat", "dog"], + "height": [9.1, 6.0, 9.5, 34.0], + "weight": [7.9, 7.5, 9.9, 198.0], + } + ) + + result = animals.groupby("kind").agg( + mean_height=("height", "mean"), perc90=("height", func) + ) + expected = DataFrame( + [[9.3, 9.1036], [20.0, 6.252]], + columns=["mean_height", "perc90"], + index=Index(["cat", "dog"], name="kind"), + ) + + tm.assert_frame_equal(result, expected) + + +def test_aggregate_mixed_types(): + # GH 16916 + df = DataFrame( + data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc") + ) + df["grouping"] = ["group 1", "group 1", 2] + result = df.groupby("grouping").aggregate(lambda x: x.tolist()) + expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]] + expected = DataFrame( + expected_data, + index=Index([2, "group 1"], dtype="object", name="grouping"), + columns=Index(["X", "Y", "Z"], dtype="object"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.xfail(reason="Not implemented;see GH 31256") +def test_aggregate_udf_na_extension_type(): + # https://github.com/pandas-dev/pandas/pull/31359 + # This is currently failing to cast back to Int64Dtype. + # The presence of the NA causes two problems + # 1. NA is not an instance of Int64Dtype.type (numpy.int64) + # 2. The presence of an NA forces object type, so the non-NA values is + # a Python int rather than a NumPy int64. Python ints aren't + # instances of numpy.int64. + def aggfunc(x): + if all(x > 2): + return 1 + else: + return pd.NA + + df = DataFrame({"A": pd.array([1, 2, 3])}) + result = df.groupby([1, 1, 2]).agg(aggfunc) + expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2]) + tm.assert_frame_equal(result, expected) + + +class TestLambdaMangling: + def test_basic(self): + df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]}) + result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]}) + + expected = DataFrame( + {("B", ""): [0, 0], ("B", ""): [1, 1]}, + index=Index([0, 1], name="A"), + ) + tm.assert_frame_equal(result, expected) + + def test_mangle_series_groupby(self): + gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1]) + result = gr.agg([lambda x: 0, lambda x: 1]) + exp_data = {"": [0, 0], "": [1, 1]} + expected = DataFrame(exp_data, index=np.array([0, 1])) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.") + def test_with_kwargs(self): + f1 = lambda x, y, b=1: x.sum() + y + b + f2 = lambda x, y, b=2: x.sum() + y * b + result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0) + expected = DataFrame({"": [4], "": [6]}) + tm.assert_frame_equal(result, expected) + + result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10) + expected = DataFrame({"": [13], "": [30]}) + tm.assert_frame_equal(result, expected) + + def test_agg_with_one_lambda(self): + # GH 25719, write tests for DataFrameGroupby.agg with only one lambda + df = DataFrame( + { + "kind": ["cat", "dog", "cat", "dog"], + "height": [9.1, 6.0, 9.5, 34.0], + "weight": [7.9, 7.5, 9.9, 198.0], + } + ) + + columns = ["height_sqr_min", "height_max", "weight_max"] + expected = DataFrame( + { + "height_sqr_min": [82.81, 36.00], + "height_max": [9.5, 34.0], + "weight_max": [9.9, 198.0], + }, + index=Index(["cat", "dog"], name="kind"), + columns=columns, + ) + + # check pd.NameAgg case + result1 = df.groupby(by="kind").agg( + height_sqr_min=pd.NamedAgg( + column="height", aggfunc=lambda x: np.min(x**2) + ), + height_max=pd.NamedAgg(column="height", aggfunc="max"), + weight_max=pd.NamedAgg(column="weight", aggfunc="max"), + ) + tm.assert_frame_equal(result1, expected) + + # check agg(key=(col, aggfunc)) case + result2 = df.groupby(by="kind").agg( + height_sqr_min=("height", lambda x: np.min(x**2)), + height_max=("height", "max"), + weight_max=("weight", "max"), + ) + tm.assert_frame_equal(result2, expected) + + def test_agg_multiple_lambda(self): + # GH25719, test for DataFrameGroupby.agg with multiple lambdas + # with mixed aggfunc + df = DataFrame( + { + "kind": ["cat", "dog", "cat", "dog"], + "height": [9.1, 6.0, 9.5, 34.0], + "weight": [7.9, 7.5, 9.9, 198.0], + } + ) + columns = [ + "height_sqr_min", + "height_max", + "weight_max", + "height_max_2", + "weight_min", + ] + expected = DataFrame( + { + "height_sqr_min": [82.81, 36.00], + "height_max": [9.5, 34.0], + "weight_max": [9.9, 198.0], + "height_max_2": [9.5, 34.0], + "weight_min": [7.9, 7.5], + }, + index=Index(["cat", "dog"], name="kind"), + columns=columns, + ) + + # check agg(key=(col, aggfunc)) case + result1 = df.groupby(by="kind").agg( + height_sqr_min=("height", lambda x: np.min(x**2)), + height_max=("height", "max"), + weight_max=("weight", "max"), + height_max_2=("height", lambda x: np.max(x)), + weight_min=("weight", lambda x: np.min(x)), + ) + tm.assert_frame_equal(result1, expected) + + # check pd.NamedAgg case + result2 = df.groupby(by="kind").agg( + height_sqr_min=pd.NamedAgg( + column="height", aggfunc=lambda x: np.min(x**2) + ), + height_max=pd.NamedAgg(column="height", aggfunc="max"), + weight_max=pd.NamedAgg(column="weight", aggfunc="max"), + height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)), + weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)), + ) + tm.assert_frame_equal(result2, expected) + + +def test_groupby_get_by_index(): + # GH 33439 + df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]}) + res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])}) + expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A") + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize( + "grp_col_dict, exp_data", + [ + ({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}), + ({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}), + ({"nr": "min"}, {"nr": [1, 5]}), + ], +) +def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data): + # test single aggregations on ordered categorical cols GHGH27800 + + # create the result dataframe + input_df = DataFrame( + { + "nr": [1, 2, 3, 4, 5, 6, 7, 8], + "cat_ord": list("aabbccdd"), + "cat": list("aaaabbbb"), + } + ) + + input_df = input_df.astype({"cat": "category", "cat_ord": "category"}) + input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered() + result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict) + + # create expected dataframe + cat_index = pd.CategoricalIndex( + ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category" + ) + + expected_df = DataFrame(data=exp_data, index=cat_index) + + if "cat_ord" in expected_df: + # ordered categorical columns should be preserved + dtype = input_df["cat_ord"].dtype + expected_df["cat_ord"] = expected_df["cat_ord"].astype(dtype) + + tm.assert_frame_equal(result_df, expected_df) + + +@pytest.mark.parametrize( + "grp_col_dict, exp_data", + [ + ({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]), + ({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]), + ({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]), + ], +) +def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data): + # test combined aggregations on ordered categorical cols GH27800 + + # create the result dataframe + input_df = DataFrame( + { + "nr": [1, 2, 3, 4, 5, 6, 7, 8], + "cat_ord": list("aabbccdd"), + "cat": list("aaaabbbb"), + } + ) + + input_df = input_df.astype({"cat": "category", "cat_ord": "category"}) + input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered() + result_df = input_df.groupby("cat", observed=False).agg(grp_col_dict) + + # create expected dataframe + cat_index = pd.CategoricalIndex( + ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category" + ) + + # unpack the grp_col_dict to create the multi-index tuple + # this tuple will be used to create the expected dataframe index + multi_index_list = [] + for k, v in grp_col_dict.items(): + if isinstance(v, list): + multi_index_list.extend([k, value] for value in v) + else: + multi_index_list.append([k, v]) + multi_index = MultiIndex.from_tuples(tuple(multi_index_list)) + + expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index) + for col in expected_df.columns: + if isinstance(col, tuple) and "cat_ord" in col: + # ordered categorical should be preserved + expected_df[col] = expected_df[col].astype(input_df["cat_ord"].dtype) + + tm.assert_frame_equal(result_df, expected_df) + + +def test_nonagg_agg(): + # GH 35490 - Single/Multiple agg of non-agg function give same results + # TODO: agg should raise for functions that don't aggregate + df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]}) + g = df.groupby("a") + + result = g.agg(["cumsum"]) + result.columns = result.columns.droplevel(-1) + expected = g.agg("cumsum") + + tm.assert_frame_equal(result, expected) + + +def test_aggregate_datetime_objects(): + # https://github.com/pandas-dev/pandas/issues/36003 + # ensure we don't raise an error but keep object dtype for out-of-bounds + # datetimes + df = DataFrame( + { + "A": ["X", "Y"], + "B": [ + datetime.datetime(2005, 1, 1, 10, 30, 23, 540000), + datetime.datetime(3005, 1, 1, 10, 30, 23, 540000), + ], + } + ) + result = df.groupby("A").B.max() + expected = df.set_index("A")["B"] + tm.assert_series_equal(result, expected) + + +def test_groupby_index_object_dtype(): + # GH 40014 + df = DataFrame({"c0": ["x", "x", "x"], "c1": ["x", "x", "y"], "p": [0, 1, 2]}) + df.index = df.index.astype("O") + grouped = df.groupby(["c0", "c1"]) + res = grouped.p.agg(lambda x: all(x > 0)) + # Check that providing a user-defined function in agg() + # produces the correct index shape when using an object-typed index. + expected_index = MultiIndex.from_tuples( + [("x", "x"), ("x", "y")], names=("c0", "c1") + ) + expected = Series([False, True], index=expected_index, name="p") + tm.assert_series_equal(res, expected) + + +def test_timeseries_groupby_agg(): + # GH#43290 + + def func(ser): + if ser.isna().all(): + return None + return np.sum(ser) + + df = DataFrame([1.0], index=[pd.Timestamp("2018-01-16 00:00:00+00:00")]) + res = df.groupby(lambda x: 1).agg(func) + + expected = DataFrame([[1.0]], index=[1]) + tm.assert_frame_equal(res, expected) + + +def test_groupby_agg_precision(any_real_numeric_dtype): + if any_real_numeric_dtype in tm.ALL_INT_NUMPY_DTYPES: + max_value = np.iinfo(any_real_numeric_dtype).max + if any_real_numeric_dtype in tm.FLOAT_NUMPY_DTYPES: + max_value = np.finfo(any_real_numeric_dtype).max + if any_real_numeric_dtype in tm.FLOAT_EA_DTYPES: + max_value = np.finfo(any_real_numeric_dtype.lower()).max + if any_real_numeric_dtype in tm.ALL_INT_EA_DTYPES: + max_value = np.iinfo(any_real_numeric_dtype.lower()).max + + df = DataFrame( + { + "key1": ["a"], + "key2": ["b"], + "key3": pd.array([max_value], dtype=any_real_numeric_dtype), + } + ) + arrays = [["a"], ["b"]] + index = MultiIndex.from_arrays(arrays, names=("key1", "key2")) + + expected = DataFrame( + {"key3": pd.array([max_value], dtype=any_real_numeric_dtype)}, index=index + ) + result = df.groupby(["key1", "key2"]).agg(lambda x: x) + tm.assert_frame_equal(result, expected) + + +def test_groupby_aggregate_directory(reduction_func): + # GH#32793 + if reduction_func in ["corrwith", "nth"]: + return None + + obj = DataFrame([[0, 1], [0, np.nan]]) + + result_reduced_series = obj.groupby(0).agg(reduction_func) + result_reduced_frame = obj.groupby(0).agg({1: reduction_func}) + + if reduction_func in ["size", "ngroup"]: + # names are different: None / 1 + tm.assert_series_equal( + result_reduced_series, result_reduced_frame[1], check_names=False + ) + else: + tm.assert_frame_equal(result_reduced_series, result_reduced_frame) + tm.assert_series_equal( + result_reduced_series.dtypes, result_reduced_frame.dtypes + ) + + +def test_group_mean_timedelta_nat(): + # GH43132 + data = Series(["1 day", "3 days", "NaT"], dtype="timedelta64[ns]") + expected = Series(["2 days"], dtype="timedelta64[ns]", index=np.array([0])) + + result = data.groupby([0, 0, 0]).mean() + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "input_data, expected_output", + [ + ( # no timezone + ["2021-01-01T00:00", "NaT", "2021-01-01T02:00"], + ["2021-01-01T01:00"], + ), + ( # timezone + ["2021-01-01T00:00-0100", "NaT", "2021-01-01T02:00-0100"], + ["2021-01-01T01:00-0100"], + ), + ], +) +def test_group_mean_datetime64_nat(input_data, expected_output): + # GH43132 + data = to_datetime(Series(input_data)) + expected = to_datetime(Series(expected_output, index=np.array([0]))) + + result = data.groupby([0, 0, 0]).mean() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "func, output", [("mean", [8 + 18j, 10 + 22j]), ("sum", [40 + 90j, 50 + 110j])] +) +def test_groupby_complex(func, output): + # GH#43701 + data = Series(np.arange(20).reshape(10, 2).dot([1, 2j])) + result = data.groupby(data.index % 2).agg(func) + expected = Series(output) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["min", "max", "var"]) +def test_groupby_complex_raises(func): + # GH#43701 + data = Series(np.arange(20).reshape(10, 2).dot([1, 2j])) + msg = "No matching signature found" + with pytest.raises(TypeError, match=msg): + data.groupby(data.index % 2).agg(func) + + +@pytest.mark.parametrize( + "func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}] +) +def test_multi_axis_1_raises(func): + # GH#46995 + df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]}) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby("a", axis=1) + with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"): + gb.agg(func) + + +@pytest.mark.parametrize( + "test, constant", + [ + ([[20, "A"], [20, "B"], [10, "C"]], {0: [10, 20], 1: ["C", ["A", "B"]]}), + ([[20, "A"], [20, "B"], [30, "C"]], {0: [20, 30], 1: [["A", "B"], "C"]}), + ([["a", 1], ["a", 1], ["b", 2], ["b", 3]], {0: ["a", "b"], 1: [1, [2, 3]]}), + pytest.param( + [["a", 1], ["a", 2], ["b", 3], ["b", 3]], + {0: ["a", "b"], 1: [[1, 2], 3]}, + marks=pytest.mark.xfail, + ), + ], +) +def test_agg_of_mode_list(test, constant): + # GH#25581 + df1 = DataFrame(test) + result = df1.groupby(0).agg(Series.mode) + # Mode usually only returns 1 value, but can return a list in the case of a tie. + + expected = DataFrame(constant) + expected = expected.set_index(0) + + tm.assert_frame_equal(result, expected) + + +def test_dataframe_groupy_agg_list_like_func_with_args(): + # GH#50624 + df = DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}) + gb = df.groupby("y") + + def foo1(x, a=1, c=0): + return x.sum() + a + c + + def foo2(x, b=2, c=0): + return x.sum() + b + c + + msg = r"foo1\(\) got an unexpected keyword argument 'b'" + with pytest.raises(TypeError, match=msg): + gb.agg([foo1, foo2], 3, b=3, c=4) + + result = gb.agg([foo1, foo2], 3, c=4) + expected = DataFrame( + [[8, 8], [9, 9], [10, 10]], + index=Index(["a", "b", "c"], name="y"), + columns=MultiIndex.from_tuples([("x", "foo1"), ("x", "foo2")]), + ) + tm.assert_frame_equal(result, expected) + + +def test_series_groupy_agg_list_like_func_with_args(): + # GH#50624 + s = Series([1, 2, 3]) + sgb = s.groupby(s) + + def foo1(x, a=1, c=0): + return x.sum() + a + c + + def foo2(x, b=2, c=0): + return x.sum() + b + c + + msg = r"foo1\(\) got an unexpected keyword argument 'b'" + with pytest.raises(TypeError, match=msg): + sgb.agg([foo1, foo2], 3, b=3, c=4) + + result = sgb.agg([foo1, foo2], 3, c=4) + expected = DataFrame( + [[8, 8], [9, 9], [10, 10]], index=Index([1, 2, 3]), columns=["foo1", "foo2"] + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_groupings_selection(): + # GH#51186 - a selected grouping should be in the output of agg + df = DataFrame({"a": [1, 1, 2], "b": [3, 3, 4], "c": [5, 6, 7]}) + gb = df.groupby(["a", "b"]) + selected_gb = gb[["b", "c"]] + result = selected_gb.agg(lambda x: x.sum()) + index = MultiIndex( + levels=[[1, 2], [3, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"] + ) + expected = DataFrame({"b": [6, 4], "c": [11, 7]}, index=index) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_with_as_index_false_subset_to_a_single_column(): + # GH#50724 + df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]}) + gb = df.groupby("a", as_index=False)["b"] + result = gb.agg(["sum", "mean"]) + expected = DataFrame({"a": [1, 2], "sum": [7, 5], "mean": [3.5, 5.0]}) + tm.assert_frame_equal(result, expected) + + +def test_agg_with_as_index_false_with_list(): + # GH#52849 + df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]}) + gb = df.groupby(by=["a1", "a2"], as_index=False) + result = gb.agg(["sum"]) + + expected = DataFrame( + data=[[0, 2, 4], [0, 3, 5], [1, 3, 6]], + columns=MultiIndex.from_tuples([("a1", ""), ("a2", ""), ("b", "sum")]), + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_agg_extension_timedelta_cumsum_with_named_aggregation(): + # GH#41720 + expected = DataFrame( + { + "td": { + 0: pd.Timedelta("0 days 01:00:00"), + 1: pd.Timedelta("0 days 01:15:00"), + 2: pd.Timedelta("0 days 01:15:00"), + } + } + ) + df = DataFrame( + { + "td": Series( + ["0 days 01:00:00", "0 days 00:15:00", "0 days 01:15:00"], + dtype="timedelta64[ns]", + ), + "grps": ["a", "a", "b"], + } + ) + gb = df.groupby("grps") + result = gb.agg(td=("td", "cumsum")) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_cython.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_cython.py new file mode 100644 index 00000000..ff50628a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_cython.py @@ -0,0 +1,435 @@ +""" +test cython .agg behavior +""" + +import numpy as np +import pytest + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, +) + +import pandas as pd +from pandas import ( + DataFrame, + Index, + NaT, + Series, + Timedelta, + Timestamp, + bdate_range, +) +import pandas._testing as tm +import pandas.core.common as com + + +@pytest.mark.parametrize( + "op_name", + [ + "count", + "sum", + "std", + "var", + "sem", + "mean", + pytest.param( + "median", + # ignore mean of empty slice + # and all-NaN + marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")], + ), + "prod", + "min", + "max", + ], +) +def test_cythonized_aggers(op_name): + data = { + "A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan], + "B": ["A", "B"] * 6, + "C": np.random.default_rng(2).standard_normal(12), + } + df = DataFrame(data) + df.loc[2:10:2, "C"] = np.nan + + op = lambda x: getattr(x, op_name)() + + # single column + grouped = df.drop(["B"], axis=1).groupby("A") + exp = {cat: op(group["C"]) for cat, group in grouped} + exp = DataFrame({"C": exp}) + exp.index.name = "A" + result = op(grouped) + tm.assert_frame_equal(result, exp) + + # multiple columns + grouped = df.groupby(["A", "B"]) + expd = {} + for (cat1, cat2), group in grouped: + expd.setdefault(cat1, {})[cat2] = op(group["C"]) + exp = DataFrame(expd).T.stack(future_stack=True) + exp.index.names = ["A", "B"] + exp.name = "C" + + result = op(grouped)["C"] + if op_name in ["sum", "prod"]: + tm.assert_series_equal(result, exp) + + +def test_cython_agg_boolean(): + frame = DataFrame( + { + "a": np.random.default_rng(2).integers(0, 5, 50), + "b": np.random.default_rng(2).integers(0, 2, 50).astype("bool"), + } + ) + result = frame.groupby("a")["b"].mean() + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + expected = frame.groupby("a")["b"].agg(np.mean) + + tm.assert_series_equal(result, expected) + + +def test_cython_agg_nothing_to_agg(): + frame = DataFrame( + {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25} + ) + + msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes" + with pytest.raises(TypeError, match=msg): + frame.groupby("a")["b"].mean(numeric_only=True) + + frame = DataFrame( + {"a": np.random.default_rng(2).integers(0, 5, 50), "b": ["foo", "bar"] * 25} + ) + + result = frame[["b"]].groupby(frame["a"]).mean(numeric_only=True) + expected = DataFrame( + [], index=frame["a"].sort_values().drop_duplicates(), columns=[] + ) + tm.assert_frame_equal(result, expected) + + +def test_cython_agg_nothing_to_agg_with_dates(): + frame = DataFrame( + { + "a": np.random.default_rng(2).integers(0, 5, 50), + "b": ["foo", "bar"] * 25, + "dates": pd.date_range("now", periods=50, freq="T"), + } + ) + msg = "Cannot use numeric_only=True with SeriesGroupBy.mean and non-numeric dtypes" + with pytest.raises(TypeError, match=msg): + frame.groupby("b").dates.mean(numeric_only=True) + + +def test_cython_agg_frame_columns(): + # #2113 + df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]}) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby(level=0, axis="columns").mean() + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby(level=0, axis="columns").mean() + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby(level=0, axis="columns").mean() + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby(level=0, axis="columns").mean() + + +def test_cython_agg_return_dict(): + # GH 16741 + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + + ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict()) + expected = Series( + [{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}], + index=Index(["bar", "foo"], name="A"), + name="B", + ) + tm.assert_series_equal(ts, expected) + + +def test_cython_fail_agg(): + dr = bdate_range("1/1/2000", periods=50) + ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr) + + grouped = ts.groupby(lambda x: x.month) + summed = grouped.sum() + msg = "using SeriesGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + expected = grouped.agg(np.sum) + tm.assert_series_equal(summed, expected) + + +@pytest.mark.parametrize( + "op, targop", + [ + ("mean", np.mean), + ("median", np.median), + ("var", np.var), + ("sum", np.sum), + ("prod", np.prod), + ("min", np.min), + ("max", np.max), + ("first", lambda x: x.iloc[0]), + ("last", lambda x: x.iloc[-1]), + ], +) +def test__cython_agg_general(op, targop): + df = DataFrame(np.random.default_rng(2).standard_normal(1000)) + labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float) + + result = df.groupby(labels)._cython_agg_general(op, alt=None, numeric_only=True) + warn = FutureWarning if targop in com._cython_table else None + msg = f"using DataFrameGroupBy.{op}" + with tm.assert_produces_warning(warn, match=msg): + # GH#53425 + expected = df.groupby(labels).agg(targop) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "op, targop", + [ + ("mean", np.mean), + ("median", lambda x: np.median(x) if len(x) > 0 else np.nan), + ("var", lambda x: np.var(x, ddof=1)), + ("min", np.min), + ("max", np.max), + ], +) +def test_cython_agg_empty_buckets(op, targop, observed): + df = DataFrame([11, 12, 13]) + grps = range(0, 55, 5) + + # calling _cython_agg_general directly, instead of via the user API + # which sets different values for min_count, so do that here. + g = df.groupby(pd.cut(df[0], grps), observed=observed) + result = g._cython_agg_general(op, alt=None, numeric_only=True) + + g = df.groupby(pd.cut(df[0], grps), observed=observed) + expected = g.agg(lambda x: targop(x)) + tm.assert_frame_equal(result, expected) + + +def test_cython_agg_empty_buckets_nanops(observed): + # GH-18869 can't call nanops on empty groups, so hardcode expected + # for these + df = DataFrame([11, 12, 13], columns=["a"]) + grps = np.arange(0, 25, 5, dtype=int) + # add / sum + result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( + "sum", alt=None, numeric_only=True + ) + intervals = pd.interval_range(0, 20, freq=5) + expected = DataFrame( + {"a": [0, 0, 36, 0]}, + index=pd.CategoricalIndex(intervals, name="a", ordered=True), + ) + if observed: + expected = expected[expected.a != 0] + + tm.assert_frame_equal(result, expected) + + # prod + result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( + "prod", alt=None, numeric_only=True + ) + expected = DataFrame( + {"a": [1, 1, 1716, 1]}, + index=pd.CategoricalIndex(intervals, name="a", ordered=True), + ) + if observed: + expected = expected[expected.a != 1] + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("op", ["first", "last", "max", "min"]) +@pytest.mark.parametrize( + "data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")] +) +def test_cython_with_timestamp_and_nat(op, data): + # https://github.com/pandas-dev/pandas/issues/19526 + df = DataFrame({"a": [0, 1], "b": [data, NaT]}) + index = Index([0, 1], name="a") + + # We will group by a and test the cython aggregations + expected = DataFrame({"b": [data, NaT]}, index=index) + + result = df.groupby("a").aggregate(op) + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize( + "agg", + [ + "min", + "max", + "count", + "sum", + "prod", + "var", + "mean", + "median", + "ohlc", + "cumprod", + "cumsum", + "shift", + "any", + "all", + "quantile", + "first", + "last", + "rank", + "cummin", + "cummax", + ], +) +def test_read_only_buffer_source_agg(agg): + # https://github.com/pandas-dev/pandas/issues/36014 + df = DataFrame( + { + "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0], + "species": ["setosa", "setosa", "setosa", "setosa", "setosa"], + } + ) + df._mgr.arrays[0].flags.writeable = False + + result = df.groupby(["species"]).agg({"sepal_length": agg}) + expected = df.copy().groupby(["species"]).agg({"sepal_length": agg}) + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "op_name", + [ + "count", + "sum", + "std", + "var", + "sem", + "mean", + "median", + "prod", + "min", + "max", + ], +) +def test_cython_agg_nullable_int(op_name): + # ensure that the cython-based aggregations don't fail for nullable dtype + # (eg https://github.com/pandas-dev/pandas/issues/37415) + df = DataFrame( + { + "A": ["A", "B"] * 5, + "B": pd.array([1, 2, 3, 4, 5, 6, 7, 8, 9, pd.NA], dtype="Int64"), + } + ) + result = getattr(df.groupby("A")["B"], op_name)() + df2 = df.assign(B=df["B"].astype("float64")) + expected = getattr(df2.groupby("A")["B"], op_name)() + if op_name in ("mean", "median"): + convert_integer = False + else: + convert_integer = True + expected = expected.convert_dtypes(convert_integer=convert_integer) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +def test_count_masked_returns_masked_dtype(dtype): + df = DataFrame( + { + "A": [1, 1], + "B": pd.array([1, pd.NA], dtype=dtype), + "C": pd.array([1, 1], dtype=dtype), + } + ) + result = df.groupby("A").count() + expected = DataFrame( + [[1, 2]], index=Index([1], name="A"), columns=["B", "C"], dtype="Int64" + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("with_na", [True, False]) +@pytest.mark.parametrize( + "op_name, action", + [ + # ("count", "always_int"), + ("sum", "large_int"), + # ("std", "always_float"), + ("var", "always_float"), + # ("sem", "always_float"), + ("mean", "always_float"), + ("median", "always_float"), + ("prod", "large_int"), + ("min", "preserve"), + ("max", "preserve"), + ("first", "preserve"), + ("last", "preserve"), + ], +) +@pytest.mark.parametrize( + "data", + [ + pd.array([1, 2, 3, 4], dtype="Int64"), + pd.array([1, 2, 3, 4], dtype="Int8"), + pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float32"), + pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64"), + pd.array([True, True, False, False], dtype="boolean"), + ], +) +def test_cython_agg_EA_known_dtypes(data, op_name, action, with_na): + if with_na: + data[3] = pd.NA + + df = DataFrame({"key": ["a", "a", "b", "b"], "col": data}) + grouped = df.groupby("key") + + if action == "always_int": + # always Int64 + expected_dtype = pd.Int64Dtype() + elif action == "large_int": + # for any int/bool use Int64, for float preserve dtype + if is_float_dtype(data.dtype): + expected_dtype = data.dtype + elif is_integer_dtype(data.dtype): + # match the numpy dtype we'd get with the non-nullable analogue + expected_dtype = data.dtype + else: + expected_dtype = pd.Int64Dtype() + elif action == "always_float": + # for any int/bool use Float64, for float preserve dtype + if is_float_dtype(data.dtype): + expected_dtype = data.dtype + else: + expected_dtype = pd.Float64Dtype() + elif action == "preserve": + expected_dtype = data.dtype + + result = getattr(grouped, op_name)() + assert result["col"].dtype == expected_dtype + + result = grouped.aggregate(op_name) + assert result["col"].dtype == expected_dtype + + result = getattr(grouped["col"], op_name)() + assert result.dtype == expected_dtype + + result = grouped["col"].aggregate(op_name) + assert result.dtype == expected_dtype diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_numba.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_numba.py new file mode 100644 index 00000000..ee694129 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_numba.py @@ -0,0 +1,392 @@ +import numpy as np +import pytest + +from pandas.errors import NumbaUtilError + +from pandas import ( + DataFrame, + Index, + NamedAgg, + Series, + option_context, +) +import pandas._testing as tm + +pytestmark = pytest.mark.single_cpu + + +def test_correct_function_signature(): + pytest.importorskip("numba") + + def incorrect_function(x): + return sum(x) * 2.7 + + data = DataFrame( + {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=["key", "data"], + ) + with pytest.raises(NumbaUtilError, match="The first 2"): + data.groupby("key").agg(incorrect_function, engine="numba") + + with pytest.raises(NumbaUtilError, match="The first 2"): + data.groupby("key")["data"].agg(incorrect_function, engine="numba") + + +def test_check_nopython_kwargs(): + pytest.importorskip("numba") + + def incorrect_function(values, index): + return sum(values) * 2.7 + + data = DataFrame( + {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=["key", "data"], + ) + with pytest.raises(NumbaUtilError, match="numba does not support"): + data.groupby("key").agg(incorrect_function, engine="numba", a=1) + + with pytest.raises(NumbaUtilError, match="numba does not support"): + data.groupby("key")["data"].agg(incorrect_function, engine="numba", a=1) + + +@pytest.mark.filterwarnings("ignore") +# Filter warnings when parallel=True and the function can't be parallelized by Numba +@pytest.mark.parametrize("jit", [True, False]) +@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"]) +@pytest.mark.parametrize("as_index", [True, False]) +def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index): + pytest.importorskip("numba") + + def func_numba(values, index): + return np.mean(values) * 2.7 + + if jit: + # Test accepted jitted functions + import numba + + func_numba = numba.jit(func_numba) + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + grouped = data.groupby(0, as_index=as_index) + if pandas_obj == "Series": + grouped = grouped[1] + + result = grouped.agg(func_numba, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython") + + tm.assert_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore") +# Filter warnings when parallel=True and the function can't be parallelized by Numba +@pytest.mark.parametrize("jit", [True, False]) +@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"]) +def test_cache(jit, pandas_obj, nogil, parallel, nopython): + # Test that the functions are cached correctly if we switch functions + pytest.importorskip("numba") + + def func_1(values, index): + return np.mean(values) - 3.4 + + def func_2(values, index): + return np.mean(values) * 2.7 + + if jit: + import numba + + func_1 = numba.jit(func_1) + func_2 = numba.jit(func_2) + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + grouped = data.groupby(0) + if pandas_obj == "Series": + grouped = grouped[1] + + result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython") + tm.assert_equal(result, expected) + + # Add func_2 to the cache + result = grouped.agg(func_2, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.agg(lambda x: np.mean(x) * 2.7, engine="cython") + tm.assert_equal(result, expected) + + # Retest func_1 which should use the cache + result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython") + tm.assert_equal(result, expected) + + +def test_use_global_config(): + pytest.importorskip("numba") + + def func_1(values, index): + return np.mean(values) - 3.4 + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + grouped = data.groupby(0) + expected = grouped.agg(func_1, engine="numba") + with option_context("compute.use_numba", True): + result = grouped.agg(func_1, engine=None) + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize( + "agg_kwargs", + [ + {"func": ["min", "max"]}, + {"func": "min"}, + {"func": {1: ["min", "max"], 2: "sum"}}, + {"bmin": NamedAgg(column=1, aggfunc="min")}, + ], +) +def test_multifunc_numba_vs_cython_frame(agg_kwargs): + pytest.importorskip("numba") + data = DataFrame( + { + 0: ["a", "a", "b", "b", "a"], + 1: [1.0, 2.0, 3.0, 4.0, 5.0], + 2: [1, 2, 3, 4, 5], + }, + columns=[0, 1, 2], + ) + grouped = data.groupby(0) + result = grouped.agg(**agg_kwargs, engine="numba") + expected = grouped.agg(**agg_kwargs, engine="cython") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "agg_kwargs,expected_func", + [ + ({"func": lambda values, index: values.sum()}, "sum"), + # FIXME + pytest.param( + { + "func": [ + lambda values, index: values.sum(), + lambda values, index: values.min(), + ] + }, + ["sum", "min"], + marks=pytest.mark.xfail( + reason="This doesn't work yet! Fails in nopython pipeline!" + ), + ), + ], +) +def test_multifunc_numba_udf_frame(agg_kwargs, expected_func): + pytest.importorskip("numba") + data = DataFrame( + { + 0: ["a", "a", "b", "b", "a"], + 1: [1.0, 2.0, 3.0, 4.0, 5.0], + 2: [1, 2, 3, 4, 5], + }, + columns=[0, 1, 2], + ) + grouped = data.groupby(0) + result = grouped.agg(**agg_kwargs, engine="numba") + expected = grouped.agg(expected_func, engine="cython") + # check_dtype can be removed if GH 44952 is addressed + # Currently, UDFs still always return float64 while reductions can preserve dtype + tm.assert_frame_equal(result, expected, check_dtype=False) + + +@pytest.mark.parametrize( + "agg_kwargs", + [{"func": ["min", "max"]}, {"func": "min"}, {"min_val": "min", "max_val": "max"}], +) +def test_multifunc_numba_vs_cython_series(agg_kwargs): + pytest.importorskip("numba") + labels = ["a", "a", "b", "b", "a"] + data = Series([1.0, 2.0, 3.0, 4.0, 5.0]) + grouped = data.groupby(labels) + agg_kwargs["engine"] = "numba" + result = grouped.agg(**agg_kwargs) + agg_kwargs["engine"] = "cython" + expected = grouped.agg(**agg_kwargs) + if isinstance(expected, DataFrame): + tm.assert_frame_equal(result, expected) + else: + tm.assert_series_equal(result, expected) + + +@pytest.mark.single_cpu +@pytest.mark.parametrize( + "data,agg_kwargs", + [ + (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": ["min", "max"]}), + (Series([1.0, 2.0, 3.0, 4.0, 5.0]), {"func": "min"}), + ( + DataFrame( + {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2] + ), + {"func": ["min", "max"]}, + ), + ( + DataFrame( + {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2] + ), + {"func": "min"}, + ), + ( + DataFrame( + {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2] + ), + {"func": {1: ["min", "max"], 2: "sum"}}, + ), + ( + DataFrame( + {1: [1.0, 2.0, 3.0, 4.0, 5.0], 2: [1, 2, 3, 4, 5]}, columns=[1, 2] + ), + {"min_col": NamedAgg(column=1, aggfunc="min")}, + ), + ], +) +def test_multifunc_numba_kwarg_propagation(data, agg_kwargs): + pytest.importorskip("numba") + labels = ["a", "a", "b", "b", "a"] + grouped = data.groupby(labels) + result = grouped.agg(**agg_kwargs, engine="numba", engine_kwargs={"parallel": True}) + expected = grouped.agg(**agg_kwargs, engine="numba") + if isinstance(expected, DataFrame): + tm.assert_frame_equal(result, expected) + else: + tm.assert_series_equal(result, expected) + + +def test_args_not_cached(): + # GH 41647 + pytest.importorskip("numba") + + def sum_last(values, index, n): + return values[-n:].sum() + + df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]}) + grouped_x = df.groupby("id")["x"] + result = grouped_x.agg(sum_last, 1, engine="numba") + expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id")) + tm.assert_series_equal(result, expected) + + result = grouped_x.agg(sum_last, 2, engine="numba") + expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id")) + tm.assert_series_equal(result, expected) + + +def test_index_data_correctly_passed(): + # GH 43133 + pytest.importorskip("numba") + + def f(values, index): + return np.mean(index) + + df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3]) + result = df.groupby("group").aggregate(f, engine="numba") + expected = DataFrame( + [-1.5, -3.0], columns=["v"], index=Index(["A", "B"], name="group") + ) + tm.assert_frame_equal(result, expected) + + +def test_engine_kwargs_not_cached(): + # If the user passes a different set of engine_kwargs don't return the same + # jitted function + pytest.importorskip("numba") + nogil = True + parallel = False + nopython = True + + def func_kwargs(values, index): + return nogil + parallel + nopython + + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + df = DataFrame({"value": [0, 0, 0]}) + result = df.groupby(level=0).aggregate( + func_kwargs, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame({"value": [2.0, 2.0, 2.0]}) + tm.assert_frame_equal(result, expected) + + nogil = False + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + result = df.groupby(level=0).aggregate( + func_kwargs, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame({"value": [1.0, 1.0, 1.0]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore") +def test_multiindex_one_key(nogil, parallel, nopython): + pytest.importorskip("numba") + + def numba_func(values, index): + return 1 + + df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"]) + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + result = df.groupby("A").agg( + numba_func, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame([1.0], index=Index([1], name="A"), columns=["C"]) + tm.assert_frame_equal(result, expected) + + +def test_multiindex_multi_key_not_supported(nogil, parallel, nopython): + pytest.importorskip("numba") + + def numba_func(values, index): + return 1 + + df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"]) + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + with pytest.raises(NotImplementedError, match="more than 1 grouping labels"): + df.groupby(["A", "B"]).agg( + numba_func, engine="numba", engine_kwargs=engine_kwargs + ) + + +def test_multilabel_numba_vs_cython(numba_supported_reductions): + pytest.importorskip("numba") + reduction, kwargs = numba_supported_reductions + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + gb = df.groupby(["A", "B"]) + res_agg = gb.agg(reduction, engine="numba", **kwargs) + expected_agg = gb.agg(reduction, engine="cython", **kwargs) + tm.assert_frame_equal(res_agg, expected_agg) + # Test that calling the aggregation directly also works + direct_res = getattr(gb, reduction)(engine="numba", **kwargs) + direct_expected = getattr(gb, reduction)(engine="cython", **kwargs) + tm.assert_frame_equal(direct_res, direct_expected) + + +def test_multilabel_udf_numba_vs_cython(): + pytest.importorskip("numba") + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + gb = df.groupby(["A", "B"]) + result = gb.agg(lambda values, index: values.min(), engine="numba") + expected = gb.agg(lambda x: x.min(), engine="cython") + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_other.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_other.py new file mode 100644 index 00000000..9d3ebbd3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/aggregate/test_other.py @@ -0,0 +1,669 @@ +""" +test all other .agg behavior +""" + +import datetime as dt +from functools import partial + +import numpy as np +import pytest + +from pandas.errors import SpecificationError + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, + Series, + date_range, + period_range, +) +import pandas._testing as tm + +from pandas.io.formats.printing import pprint_thing + + +def test_agg_partial_failure_raises(): + # GH#43741 + + df = DataFrame( + { + "data1": np.random.default_rng(2).standard_normal(5), + "data2": np.random.default_rng(2).standard_normal(5), + "key1": ["a", "a", "b", "b", "a"], + "key2": ["one", "two", "one", "two", "one"], + } + ) + grouped = df.groupby("key1") + + def peak_to_peak(arr): + return arr.max() - arr.min() + + with pytest.raises(TypeError, match="unsupported operand type"): + grouped.agg([peak_to_peak]) + + with pytest.raises(TypeError, match="unsupported operand type"): + grouped.agg(peak_to_peak) + + +def test_agg_datetimes_mixed(): + data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]] + + df1 = DataFrame( + { + "key": [x[0] for x in data], + "date": [x[1] for x in data], + "value": [x[2] for x in data], + } + ) + + data = [ + [ + row[0], + (dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None), + row[2], + ] + for row in data + ] + + df2 = DataFrame( + { + "key": [x[0] for x in data], + "date": [x[1] for x in data], + "value": [x[2] for x in data], + } + ) + + df1["weights"] = df1["value"] / df1["value"].sum() + gb1 = df1.groupby("date").aggregate("sum") + + df2["weights"] = df1["value"] / df1["value"].sum() + gb2 = df2.groupby("date").aggregate("sum") + + assert len(gb1) == len(gb2) + + +def test_agg_period_index(): + prng = period_range("2012-1-1", freq="M", periods=3) + df = DataFrame(np.random.default_rng(2).standard_normal((3, 2)), index=prng) + rs = df.groupby(level=0).sum() + assert isinstance(rs.index, PeriodIndex) + + # GH 3579 + index = period_range(start="1999-01", periods=5, freq="M") + s1 = Series(np.random.default_rng(2).random(len(index)), index=index) + s2 = Series(np.random.default_rng(2).random(len(index)), index=index) + df = DataFrame.from_dict({"s1": s1, "s2": s2}) + grouped = df.groupby(df.index.month) + list(grouped) + + +def test_agg_dict_parameter_cast_result_dtypes(): + # GH 12821 + + df = DataFrame( + { + "class": ["A", "A", "B", "B", "C", "C", "D", "D"], + "time": date_range("1/1/2011", periods=8, freq="H"), + } + ) + df.loc[[0, 1, 2, 5], "time"] = None + + # test for `first` function + exp = df.loc[[0, 3, 4, 6]].set_index("class") + grouped = df.groupby("class") + tm.assert_frame_equal(grouped.first(), exp) + tm.assert_frame_equal(grouped.agg("first"), exp) + tm.assert_frame_equal(grouped.agg({"time": "first"}), exp) + tm.assert_series_equal(grouped.time.first(), exp["time"]) + tm.assert_series_equal(grouped.time.agg("first"), exp["time"]) + + # test for `last` function + exp = df.loc[[0, 3, 4, 7]].set_index("class") + grouped = df.groupby("class") + tm.assert_frame_equal(grouped.last(), exp) + tm.assert_frame_equal(grouped.agg("last"), exp) + tm.assert_frame_equal(grouped.agg({"time": "last"}), exp) + tm.assert_series_equal(grouped.time.last(), exp["time"]) + tm.assert_series_equal(grouped.time.agg("last"), exp["time"]) + + # count + exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time") + tm.assert_series_equal(grouped.time.agg(len), exp) + tm.assert_series_equal(grouped.time.size(), exp) + + exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time") + tm.assert_series_equal(grouped.time.count(), exp) + + +def test_agg_cast_results_dtypes(): + # similar to GH12821 + # xref #11444 + u = [dt.datetime(2015, x + 1, 1) for x in range(12)] + v = list("aaabbbbbbccd") + df = DataFrame({"X": v, "Y": u}) + + result = df.groupby("X")["Y"].agg(len) + expected = df.groupby("X")["Y"].count() + tm.assert_series_equal(result, expected) + + +def test_aggregate_float64_no_int64(): + # see gh-11199 + df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]}) + + expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5]) + expected.index.name = "b" + + result = df.groupby("b")[["a"]].mean() + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5]) + expected.index.name = "b" + + result = df.groupby("b")[["a", "c"]].mean() + tm.assert_frame_equal(result, expected) + + +def test_aggregate_api_consistency(): + # GH 9052 + # make sure that the aggregates via dict + # are consistent + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": np.random.default_rng(2).standard_normal(8) + 1.0, + "D": np.arange(8), + } + ) + + grouped = df.groupby(["A", "B"]) + c_mean = grouped["C"].mean() + c_sum = grouped["C"].sum() + d_mean = grouped["D"].mean() + d_sum = grouped["D"].sum() + + result = grouped["D"].agg(["sum", "mean"]) + expected = pd.concat([d_sum, d_mean], axis=1) + expected.columns = ["sum", "mean"] + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped.agg(["sum", "mean"]) + expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1) + expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]]) + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped[["D", "C"]].agg(["sum", "mean"]) + expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1) + expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]]) + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped.agg({"C": "mean", "D": "sum"}) + expected = pd.concat([d_sum, c_mean], axis=1) + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]}) + expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1) + expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]]) + + msg = r"Column\(s\) \['r', 'r2'\] do not exist" + with pytest.raises(KeyError, match=msg): + grouped[["D", "C"]].agg({"r": "sum", "r2": "mean"}) + + +def test_agg_dict_renaming_deprecation(): + # 15931 + df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)}) + + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + df.groupby("A").agg( + {"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}} + ) + + msg = r"Column\(s\) \['ma'\] do not exist" + with pytest.raises(KeyError, match=msg): + df.groupby("A")[["B", "C"]].agg({"ma": "max"}) + + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + df.groupby("A").B.agg({"foo": "count"}) + + +def test_agg_compat(): + # GH 12334 + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": np.random.default_rng(2).standard_normal(8) + 1.0, + "D": np.arange(8), + } + ) + + g = df.groupby(["A", "B"]) + + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + g["D"].agg({"C": ["sum", "std"]}) + + with pytest.raises(SpecificationError, match=msg): + g["D"].agg({"C": "sum", "D": "std"}) + + +def test_agg_nested_dicts(): + # API change for disallowing these types of nested dicts + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": np.random.default_rng(2).standard_normal(8) + 1.0, + "D": np.arange(8), + } + ) + + g = df.groupby(["A", "B"]) + + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}}) + + with pytest.raises(SpecificationError, match=msg): + g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}}) + + # same name as the original column + # GH9052 + with pytest.raises(SpecificationError, match=msg): + g["D"].agg({"result1": np.sum, "result2": np.mean}) + + with pytest.raises(SpecificationError, match=msg): + g["D"].agg({"D": np.sum, "result2": np.mean}) + + +def test_agg_item_by_item_raise_typeerror(): + df = DataFrame(np.random.default_rng(2).integers(10, size=(20, 10))) + + def raiseException(df): + pprint_thing("----------------------------------------") + pprint_thing(df.to_string()) + raise TypeError("test") + + with pytest.raises(TypeError, match="test"): + df.groupby(0).agg(raiseException) + + +def test_series_agg_multikey(): + ts = tm.makeTimeSeries() + grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) + + result = grouped.agg("sum") + expected = grouped.sum() + tm.assert_series_equal(result, expected) + + +def test_series_agg_multi_pure_python(): + data = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + def bad(x): + assert len(x.values.base) > 0 + return "foo" + + result = data.groupby(["A", "B"]).agg(bad) + expected = data.groupby(["A", "B"]).agg(lambda x: "foo") + tm.assert_frame_equal(result, expected) + + +def test_agg_consistency(): + # agg with ([]) and () not consistent + # GH 6715 + def P1(a): + return np.percentile(a.dropna(), q=1) + + df = DataFrame( + { + "col1": [1, 2, 3, 4], + "col2": [10, 25, 26, 31], + "date": [ + dt.date(2013, 2, 10), + dt.date(2013, 2, 10), + dt.date(2013, 2, 11), + dt.date(2013, 2, 11), + ], + } + ) + + g = df.groupby("date") + + expected = g.agg([P1]) + expected.columns = expected.columns.levels[0] + + result = g.agg(P1) + tm.assert_frame_equal(result, expected) + + +def test_agg_callables(): + # GH 7929 + df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64) + + class fn_class: + def __call__(self, x): + return sum(x) + + equiv_callables = [ + sum, + np.sum, + lambda x: sum(x), + lambda x: x.sum(), + partial(sum), + fn_class(), + ] + + expected = df.groupby("foo").agg("sum") + for ecall in equiv_callables: + warn = FutureWarning if ecall is sum or ecall is np.sum else None + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(warn, match=msg): + result = df.groupby("foo").agg(ecall) + tm.assert_frame_equal(result, expected) + + +def test_agg_over_numpy_arrays(): + # GH 3788 + df = DataFrame( + [ + [1, np.array([10, 20, 30])], + [1, np.array([40, 50, 60])], + [2, np.array([20, 30, 40])], + ], + columns=["category", "arraydata"], + ) + gb = df.groupby("category") + + expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]] + expected_index = Index([1, 2], name="category") + expected_column = ["arraydata"] + expected = DataFrame(expected_data, index=expected_index, columns=expected_column) + + alt = gb.sum(numeric_only=False) + tm.assert_frame_equal(alt, expected) + + result = gb.agg("sum", numeric_only=False) + tm.assert_frame_equal(result, expected) + + # FIXME: the original version of this test called `gb.agg(sum)` + # and that raises TypeError if `numeric_only=False` is passed + + +@pytest.mark.parametrize("as_period", [True, False]) +def test_agg_tzaware_non_datetime_result(as_period): + # discussed in GH#29589, fixed in GH#29641, operating on tzaware values + # with function that is not dtype-preserving + dti = date_range("2012-01-01", periods=4, tz="UTC") + if as_period: + dti = dti.tz_localize(None).to_period("D") + + df = DataFrame({"a": [0, 0, 1, 1], "b": dti}) + gb = df.groupby("a") + + # Case that _does_ preserve the dtype + result = gb["b"].agg(lambda x: x.iloc[0]) + expected = Series(dti[::2], name="b") + expected.index.name = "a" + tm.assert_series_equal(result, expected) + + # Cases that do _not_ preserve the dtype + result = gb["b"].agg(lambda x: x.iloc[0].year) + expected = Series([2012, 2012], name="b") + expected.index.name = "a" + tm.assert_series_equal(result, expected) + + result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0]) + expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b") + expected.index.name = "a" + if as_period: + expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name="b") + expected.index.name = "a" + tm.assert_series_equal(result, expected) + + +def test_agg_timezone_round_trip(): + # GH 15426 + ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific") + df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]}) + + result1 = df.groupby("a")["b"].agg("min").iloc[0] + result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0] + result3 = df.groupby("a")["b"].min().iloc[0] + + assert result1 == ts + assert result2 == ts + assert result3 == ts + + dates = [ + pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5) + ] + df = DataFrame({"A": ["a", "b"] * 2, "B": dates}) + grouped = df.groupby("A") + + ts = df["B"].iloc[0] + assert ts == grouped.nth(0)["B"].iloc[0] + assert ts == grouped.head(1)["B"].iloc[0] + assert ts == grouped.first()["B"].iloc[0] + + # GH#27110 applying iloc should return a DataFrame + assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1] + + ts = df["B"].iloc[2] + assert ts == grouped.last()["B"].iloc[0] + + # GH#27110 applying iloc should return a DataFrame + assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1] + + +def test_sum_uint64_overflow(): + # see gh-14758 + # Convert to uint64 and don't overflow + df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object) + df = df + 9223372036854775807 + + index = Index( + [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64 + ) + expected = DataFrame( + {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]}, + index=index, + dtype=object, + ) + + expected.index.name = 0 + result = df.groupby(0).sum(numeric_only=False) + tm.assert_frame_equal(result, expected) + + # out column is non-numeric, so with numeric_only=True it is dropped + result2 = df.groupby(0).sum(numeric_only=True) + expected2 = expected[[]] + tm.assert_frame_equal(result2, expected2) + + +@pytest.mark.parametrize( + "structure, expected", + [ + (tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})), + (list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})), + ( + lambda x: tuple(x), + DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}), + ), + ( + lambda x: list(x), + DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}), + ), + ], +) +def test_agg_structs_dataframe(structure, expected): + df = DataFrame( + {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]} + ) + + result = df.groupby(["A", "B"]).aggregate(structure) + expected.index.names = ["A", "B"] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "structure, expected", + [ + (tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")), + (list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")), + (lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")), + (lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")), + ], +) +def test_agg_structs_series(structure, expected): + # Issue #18079 + df = DataFrame( + {"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]} + ) + + result = df.groupby("A")["C"].aggregate(structure) + expected.index.name = "A" + tm.assert_series_equal(result, expected) + + +def test_agg_category_nansum(observed): + categories = ["a", "b", "c"] + df = DataFrame( + {"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]} + ) + msg = "using SeriesGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A", observed=observed).B.agg(np.nansum) + expected = Series( + [3, 3, 0], + index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"), + name="B", + ) + if observed: + expected = expected[expected != 0] + tm.assert_series_equal(result, expected) + + +def test_agg_list_like_func(): + # GH 18473 + df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]}) + grouped = df.groupby("A", as_index=False, sort=False) + result = grouped.agg({"B": lambda x: list(x)}) + expected = DataFrame( + {"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]} + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_lambda_with_timezone(): + # GH 23683 + df = DataFrame( + { + "tag": [1, 1], + "date": [ + pd.Timestamp("2018-01-01", tz="UTC"), + pd.Timestamp("2018-01-02", tz="UTC"), + ], + } + ) + result = df.groupby("tag").agg({"date": lambda e: e.head(1)}) + expected = DataFrame( + [pd.Timestamp("2018-01-01", tz="UTC")], + index=Index([1], name="tag"), + columns=["date"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "err_cls", + [ + NotImplementedError, + RuntimeError, + KeyError, + IndexError, + OSError, + ValueError, + ArithmeticError, + AttributeError, + ], +) +def test_groupby_agg_err_catching(err_cls): + # make sure we suppress anything other than TypeError or AssertionError + # in _python_agg_general + + # Use a non-standard EA to make sure we don't go down ndarray paths + from pandas.tests.extension.decimal.array import ( + DecimalArray, + make_data, + to_decimal, + ) + + data = make_data()[:5] + df = DataFrame( + {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)} + ) + + expected = Series(to_decimal([data[0], data[3]])) + + def weird_func(x): + # weird function that raise something other than TypeError or IndexError + # in _python_agg_general + if len(x) == 0: + raise err_cls + return x.iloc[0] + + result = df["decimals"].groupby(df["id1"]).agg(weird_func) + tm.assert_series_equal(result, expected, check_names=False) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/conftest.py new file mode 100644 index 00000000..49fa9dc5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/conftest.py @@ -0,0 +1,224 @@ +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm +from pandas.core.groupby.base import ( + reduction_kernels, + transformation_kernels, +) + + +@pytest.fixture(params=[True, False]) +def sort(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def as_index(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def dropna(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def skipna(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def observed(request): + return request.param + + +@pytest.fixture +def mframe(multiindex_dataframe_random_data): + return multiindex_dataframe_random_data + + +@pytest.fixture +def df(): + return DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + + +@pytest.fixture +def ts(): + return tm.makeTimeSeries() + + +@pytest.fixture +def tsd(): + return tm.getTimeSeriesData() + + +@pytest.fixture +def tsframe(tsd): + return DataFrame(tsd) + + +@pytest.fixture +def df_mixed_floats(): + return DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.array(np.random.default_rng(2).standard_normal(8), dtype="float32"), + } + ) + + +@pytest.fixture +def three_group(): + return DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + +@pytest.fixture() +def slice_test_df(): + data = [ + [0, "a", "a0_at_0"], + [1, "b", "b0_at_1"], + [2, "a", "a1_at_2"], + [3, "b", "b1_at_3"], + [4, "c", "c0_at_4"], + [5, "a", "a2_at_5"], + [6, "a", "a3_at_6"], + [7, "a", "a4_at_7"], + ] + df = DataFrame(data, columns=["Index", "Group", "Value"]) + return df.set_index("Index") + + +@pytest.fixture() +def slice_test_grouped(slice_test_df): + return slice_test_df.groupby("Group", as_index=False) + + +@pytest.fixture(params=sorted(reduction_kernels)) +def reduction_func(request): + """ + yields the string names of all groupby reduction functions, one at a time. + """ + return request.param + + +@pytest.fixture(params=sorted(transformation_kernels)) +def transformation_func(request): + """yields the string names of all groupby transformation functions.""" + return request.param + + +@pytest.fixture(params=sorted(reduction_kernels) + sorted(transformation_kernels)) +def groupby_func(request): + """yields both aggregation and transformation functions.""" + return request.param + + +@pytest.fixture(params=[True, False]) +def parallel(request): + """parallel keyword argument for numba.jit""" + return request.param + + +# Can parameterize nogil & nopython over True | False, but limiting per +# https://github.com/pandas-dev/pandas/pull/41971#issuecomment-860607472 + + +@pytest.fixture(params=[False]) +def nogil(request): + """nogil keyword argument for numba.jit""" + return request.param + + +@pytest.fixture(params=[True]) +def nopython(request): + """nopython keyword argument for numba.jit""" + return request.param + + +@pytest.fixture( + params=[ + ("mean", {}), + ("var", {"ddof": 1}), + ("var", {"ddof": 0}), + ("std", {"ddof": 1}), + ("std", {"ddof": 0}), + ("sum", {}), + ("min", {}), + ("max", {}), + ("sum", {"min_count": 2}), + ("min", {"min_count": 2}), + ("max", {"min_count": 2}), + ], + ids=[ + "mean", + "var_1", + "var_0", + "std_1", + "std_0", + "sum", + "min", + "max", + "sum-min_count", + "min-min_count", + "max-min_count", + ], +) +def numba_supported_reductions(request): + """reductions supported with engine='numba'""" + return request.param diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_any_all.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_any_all.py new file mode 100644 index 00000000..57a83335 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_any_all.py @@ -0,0 +1,188 @@ +import builtins + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + isna, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("agg_func", ["any", "all"]) +@pytest.mark.parametrize( + "vals", + [ + ["foo", "bar", "baz"], + ["foo", "", ""], + ["", "", ""], + [1, 2, 3], + [1, 0, 0], + [0, 0, 0], + [1.0, 2.0, 3.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [True, True, True], + [True, False, False], + [False, False, False], + [np.nan, np.nan, np.nan], + ], +) +def test_groupby_bool_aggs(skipna, agg_func, vals): + df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2}) + + # Figure out expectation using Python builtin + exp = getattr(builtins, agg_func)(vals) + + # edge case for missing data with skipna and 'any' + if skipna and all(isna(vals)) and agg_func == "any": + exp = False + + expected = DataFrame( + [exp] * 2, columns=["val"], index=Index(["a", "b"], name="key") + ) + result = getattr(df.groupby("key"), agg_func)(skipna=skipna) + tm.assert_frame_equal(result, expected) + + +def test_any(): + df = DataFrame( + [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], + columns=["A", "B", "C"], + ) + expected = DataFrame( + [[True, True], [False, True]], columns=["B", "C"], index=[1, 3] + ) + expected.index.name = "A" + result = df.groupby("A").any() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +def test_bool_aggs_dup_column_labels(bool_agg_func): + # GH#21668 + df = DataFrame([[True, True]], columns=["a", "a"]) + grp_by = df.groupby([0]) + result = getattr(grp_by, bool_agg_func)() + + expected = df.set_axis(np.array([0])) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +@pytest.mark.parametrize( + "data", + [ + [False, False, False], + [True, True, True], + [pd.NA, pd.NA, pd.NA], + [False, pd.NA, False], + [True, pd.NA, True], + [True, pd.NA, False], + ], +) +def test_masked_kleene_logic(bool_agg_func, skipna, data): + # GH#37506 + ser = Series(data, dtype="boolean") + + # The result should match aggregating on the whole series. Correctness + # there is verified in test_reductions.py::test_any_all_boolean_kleene_logic + expected_data = getattr(ser, bool_agg_func)(skipna=skipna) + expected = Series(expected_data, index=np.array([0]), dtype="boolean") + + result = ser.groupby([0, 0, 0]).agg(bool_agg_func, skipna=skipna) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype1,dtype2,exp_col1,exp_col2", + [ + ( + "float", + "Float64", + np.array([True], dtype=bool), + pd.array([pd.NA], dtype="boolean"), + ), + ( + "Int64", + "float", + pd.array([pd.NA], dtype="boolean"), + np.array([True], dtype=bool), + ), + ( + "Int64", + "Int64", + pd.array([pd.NA], dtype="boolean"), + pd.array([pd.NA], dtype="boolean"), + ), + ( + "Float64", + "boolean", + pd.array([pd.NA], dtype="boolean"), + pd.array([pd.NA], dtype="boolean"), + ), + ], +) +def test_masked_mixed_types(dtype1, dtype2, exp_col1, exp_col2): + # GH#37506 + data = [1.0, np.nan] + df = DataFrame( + {"col1": pd.array(data, dtype=dtype1), "col2": pd.array(data, dtype=dtype2)} + ) + result = df.groupby([1, 1]).agg("all", skipna=False) + + expected = DataFrame({"col1": exp_col1, "col2": exp_col2}, index=np.array([1])) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +def test_masked_bool_aggs_skipna(bool_agg_func, dtype, skipna, frame_or_series): + # GH#40585 + obj = frame_or_series([pd.NA, 1], dtype=dtype) + expected_res = True + if not skipna and bool_agg_func == "all": + expected_res = pd.NA + expected = frame_or_series([expected_res], index=np.array([1]), dtype="boolean") + + result = obj.groupby([1, 1]).agg(bool_agg_func, skipna=skipna) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "bool_agg_func,data,expected_res", + [ + ("any", [pd.NA, np.nan], False), + ("any", [pd.NA, 1, np.nan], True), + ("all", [pd.NA, pd.NaT], True), + ("all", [pd.NA, False, pd.NaT], False), + ], +) +def test_object_type_missing_vals(bool_agg_func, data, expected_res, frame_or_series): + # GH#37501 + obj = frame_or_series(data, dtype=object) + result = obj.groupby([1] * len(data)).agg(bool_agg_func) + expected = frame_or_series([expected_res], index=np.array([1]), dtype="bool") + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +def test_object_NA_raises_with_skipna_false(bool_agg_func): + # GH#37501 + ser = Series([pd.NA], dtype=object) + with pytest.raises(TypeError, match="boolean value of NA is ambiguous"): + ser.groupby([1]).agg(bool_agg_func, skipna=False) + + +@pytest.mark.parametrize("bool_agg_func", ["any", "all"]) +def test_empty(frame_or_series, bool_agg_func): + # GH 45231 + kwargs = {"columns": ["a"]} if frame_or_series is DataFrame else {"name": "a"} + obj = frame_or_series(**kwargs, dtype=object) + result = getattr(obj.groupby(obj.index), bool_agg_func)() + expected = frame_or_series(**kwargs, dtype=bool) + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_api.py new file mode 100644 index 00000000..1a030841 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_api.py @@ -0,0 +1,261 @@ +""" +Tests of the groupby API, including internal consistency and with other pandas objects. + +Tests in this file should only check the existence, names, and arguments of groupby +methods. It should not test the results of any groupby operation. +""" + +import inspect + +import pytest + +from pandas import ( + DataFrame, + Series, +) +from pandas.core.groupby.base import ( + groupby_other_methods, + reduction_kernels, + transformation_kernels, +) +from pandas.core.groupby.generic import ( + DataFrameGroupBy, + SeriesGroupBy, +) + + +def test_tab_completion(mframe): + grp = mframe.groupby(level="second") + results = {v for v in dir(grp) if not v.startswith("_")} + expected = { + "A", + "B", + "C", + "agg", + "aggregate", + "apply", + "boxplot", + "filter", + "first", + "get_group", + "groups", + "hist", + "indices", + "last", + "max", + "mean", + "median", + "min", + "ngroups", + "nth", + "ohlc", + "plot", + "prod", + "size", + "std", + "sum", + "transform", + "var", + "sem", + "count", + "nunique", + "head", + "describe", + "cummax", + "quantile", + "rank", + "cumprod", + "tail", + "resample", + "cummin", + "fillna", + "cumsum", + "cumcount", + "ngroup", + "all", + "shift", + "skew", + "take", + "pct_change", + "any", + "corr", + "corrwith", + "cov", + "dtypes", + "ndim", + "diff", + "idxmax", + "idxmin", + "ffill", + "bfill", + "rolling", + "expanding", + "pipe", + "sample", + "ewm", + "value_counts", + } + assert results == expected + + +def test_all_methods_categorized(mframe): + grp = mframe.groupby(mframe.iloc[:, 0]) + names = {_ for _ in dir(grp) if not _.startswith("_")} - set(mframe.columns) + new_names = set(names) + new_names -= reduction_kernels + new_names -= transformation_kernels + new_names -= groupby_other_methods + + assert not reduction_kernels & transformation_kernels + assert not reduction_kernels & groupby_other_methods + assert not transformation_kernels & groupby_other_methods + + # new public method? + if new_names: + msg = f""" +There are uncategorized methods defined on the Grouper class: +{new_names}. + +Was a new method recently added? + +Every public method On Grouper must appear in exactly one the +following three lists defined in pandas.core.groupby.base: +- `reduction_kernels` +- `transformation_kernels` +- `groupby_other_methods` +see the comments in pandas/core/groupby/base.py for guidance on +how to fix this test. + """ + raise AssertionError(msg) + + # removed a public method? + all_categorized = reduction_kernels | transformation_kernels | groupby_other_methods + if names != all_categorized: + msg = f""" +Some methods which are supposed to be on the Grouper class +are missing: +{all_categorized - names}. + +They're still defined in one of the lists that live in pandas/core/groupby/base.py. +If you removed a method, you should update them +""" + raise AssertionError(msg) + + +def test_frame_consistency(groupby_func): + # GH#48028 + if groupby_func in ("first", "last"): + msg = "first and last are entirely different between frame and groupby" + pytest.skip(reason=msg) + + if groupby_func in ("cumcount", "ngroup"): + assert not hasattr(DataFrame, groupby_func) + return + + frame_method = getattr(DataFrame, groupby_func) + gb_method = getattr(DataFrameGroupBy, groupby_func) + result = set(inspect.signature(gb_method).parameters) + if groupby_func == "size": + # "size" is a method on GroupBy but property on DataFrame: + expected = {"self"} + else: + expected = set(inspect.signature(frame_method).parameters) + + # Exclude certain arguments from result and expected depending on the operation + # Some of these may be purposeful inconsistencies between the APIs + exclude_expected, exclude_result = set(), set() + if groupby_func in ("any", "all"): + exclude_expected = {"kwargs", "bool_only", "axis"} + elif groupby_func in ("count",): + exclude_expected = {"numeric_only", "axis"} + elif groupby_func in ("nunique",): + exclude_expected = {"axis"} + elif groupby_func in ("max", "min"): + exclude_expected = {"axis", "kwargs", "skipna"} + exclude_result = {"min_count", "engine", "engine_kwargs"} + elif groupby_func in ("mean", "std", "sum", "var"): + exclude_expected = {"axis", "kwargs", "skipna"} + exclude_result = {"engine", "engine_kwargs"} + elif groupby_func in ("median", "prod", "sem"): + exclude_expected = {"axis", "kwargs", "skipna"} + elif groupby_func in ("backfill", "bfill", "ffill", "pad"): + exclude_expected = {"downcast", "inplace", "axis"} + elif groupby_func in ("cummax", "cummin"): + exclude_expected = {"skipna", "args"} + exclude_result = {"numeric_only"} + elif groupby_func in ("cumprod", "cumsum"): + exclude_expected = {"skipna"} + elif groupby_func in ("pct_change",): + exclude_expected = {"kwargs"} + exclude_result = {"axis"} + elif groupby_func in ("rank",): + exclude_expected = {"numeric_only"} + elif groupby_func in ("quantile",): + exclude_expected = {"method", "axis"} + + # Ensure excluded arguments are actually in the signatures + assert result & exclude_result == exclude_result + assert expected & exclude_expected == exclude_expected + + result -= exclude_result + expected -= exclude_expected + assert result == expected + + +def test_series_consistency(request, groupby_func): + # GH#48028 + if groupby_func in ("first", "last"): + pytest.skip("first and last are entirely different between Series and groupby") + + if groupby_func in ("cumcount", "corrwith", "ngroup"): + assert not hasattr(Series, groupby_func) + return + + series_method = getattr(Series, groupby_func) + gb_method = getattr(SeriesGroupBy, groupby_func) + result = set(inspect.signature(gb_method).parameters) + if groupby_func == "size": + # "size" is a method on GroupBy but property on Series + expected = {"self"} + else: + expected = set(inspect.signature(series_method).parameters) + + # Exclude certain arguments from result and expected depending on the operation + # Some of these may be purposeful inconsistencies between the APIs + exclude_expected, exclude_result = set(), set() + if groupby_func in ("any", "all"): + exclude_expected = {"kwargs", "bool_only", "axis"} + elif groupby_func in ("diff",): + exclude_result = {"axis"} + elif groupby_func in ("max", "min"): + exclude_expected = {"axis", "kwargs", "skipna"} + exclude_result = {"min_count", "engine", "engine_kwargs"} + elif groupby_func in ("mean", "std", "sum", "var"): + exclude_expected = {"axis", "kwargs", "skipna"} + exclude_result = {"engine", "engine_kwargs"} + elif groupby_func in ("median", "prod", "sem"): + exclude_expected = {"axis", "kwargs", "skipna"} + elif groupby_func in ("backfill", "bfill", "ffill", "pad"): + exclude_expected = {"downcast", "inplace", "axis"} + elif groupby_func in ("cummax", "cummin"): + exclude_expected = {"skipna", "args"} + exclude_result = {"numeric_only"} + elif groupby_func in ("cumprod", "cumsum"): + exclude_expected = {"skipna"} + elif groupby_func in ("pct_change",): + exclude_expected = {"kwargs"} + exclude_result = {"axis"} + elif groupby_func in ("rank",): + exclude_expected = {"numeric_only"} + elif groupby_func in ("idxmin", "idxmax"): + exclude_expected = {"args", "kwargs"} + elif groupby_func in ("quantile",): + exclude_result = {"numeric_only"} + + # Ensure excluded arguments are actually in the signatures + assert result & exclude_result == exclude_result + assert expected & exclude_expected == exclude_expected + + result -= exclude_result + expected -= exclude_expected + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply.py new file mode 100644 index 00000000..d04ee7ce --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply.py @@ -0,0 +1,1422 @@ +from datetime import ( + date, + datetime, +) +from io import StringIO + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + bdate_range, +) +import pandas._testing as tm +from pandas.tests.groupby import get_groupby_method_args + + +def test_apply_func_that_appends_group_to_list_without_copy(): + # GH: 17718 + + df = DataFrame(1, index=list(range(10)) * 10, columns=[0]).reset_index() + groups = [] + + def store(group): + groups.append(group) + + df.groupby("index").apply(store) + expected_value = DataFrame( + {"index": [0] * 10, 0: [1] * 10}, index=pd.RangeIndex(0, 100, 10) + ) + + tm.assert_frame_equal(groups[0], expected_value) + + +def test_apply_issues(): + # GH 5788 + + s = """2011.05.16,00:00,1.40893 +2011.05.16,01:00,1.40760 +2011.05.16,02:00,1.40750 +2011.05.16,03:00,1.40649 +2011.05.17,02:00,1.40893 +2011.05.17,03:00,1.40760 +2011.05.17,04:00,1.40750 +2011.05.17,05:00,1.40649 +2011.05.18,02:00,1.40893 +2011.05.18,03:00,1.40760 +2011.05.18,04:00,1.40750 +2011.05.18,05:00,1.40649""" + + df = pd.read_csv( + StringIO(s), + header=None, + names=["date", "time", "value"], + parse_dates=[["date", "time"]], + ) + df = df.set_index("date_time") + + expected = df.groupby(df.index.date).idxmax() + result = df.groupby(df.index.date).apply(lambda x: x.idxmax()) + tm.assert_frame_equal(result, expected) + + # GH 5789 + # don't auto coerce dates + df = pd.read_csv(StringIO(s), header=None, names=["date", "time", "value"]) + exp_idx = Index( + ["2011.05.16", "2011.05.17", "2011.05.18"], dtype=object, name="date" + ) + expected = Series(["00:00", "02:00", "02:00"], index=exp_idx) + result = df.groupby("date", group_keys=False).apply( + lambda x: x["time"][x["value"].idxmax()] + ) + tm.assert_series_equal(result, expected) + + +def test_apply_trivial(): + # GH 20066 + # trivial apply: ignore input and return a constant dataframe. + df = DataFrame( + {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=["key", "data"], + ) + expected = pd.concat([df.iloc[1:], df.iloc[1:]], axis=1, keys=["float64", "object"]) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby([str(x) for x in df.dtypes], axis=1) + result = gb.apply(lambda x: df.iloc[1:]) + + tm.assert_frame_equal(result, expected) + + +def test_apply_trivial_fail(): + # GH 20066 + df = DataFrame( + {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=["key", "data"], + ) + expected = pd.concat([df, df], axis=1, keys=["float64", "object"]) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby([str(x) for x in df.dtypes], axis=1, group_keys=True) + result = gb.apply(lambda x: df) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "df, group_names", + [ + (DataFrame({"a": [1, 1, 1, 2, 3], "b": ["a", "a", "a", "b", "c"]}), [1, 2, 3]), + (DataFrame({"a": [0, 0, 1, 1], "b": [0, 1, 0, 1]}), [0, 1]), + (DataFrame({"a": [1]}), [1]), + (DataFrame({"a": [1, 1, 1, 2, 2, 1, 1, 2], "b": range(8)}), [1, 2]), + (DataFrame({"a": [1, 2, 3, 1, 2, 3], "two": [4, 5, 6, 7, 8, 9]}), [1, 2, 3]), + ( + DataFrame( + { + "a": list("aaabbbcccc"), + "B": [3, 4, 3, 6, 5, 2, 1, 9, 5, 4], + "C": [4, 0, 2, 2, 2, 7, 8, 6, 2, 8], + } + ), + ["a", "b", "c"], + ), + (DataFrame([[1, 2, 3], [2, 2, 3]], columns=["a", "b", "c"]), [1, 2]), + ], + ids=[ + "GH2936", + "GH7739 & GH10519", + "GH10519", + "GH2656", + "GH12155", + "GH20084", + "GH21417", + ], +) +def test_group_apply_once_per_group(df, group_names): + # GH2936, GH7739, GH10519, GH2656, GH12155, GH20084, GH21417 + + # This test should ensure that a function is only evaluated + # once per group. Previously the function has been evaluated twice + # on the first group to check if the Cython index slider is safe to use + # This test ensures that the side effect (append to list) is only triggered + # once per group + + names = [] + # cannot parameterize over the functions since they need external + # `names` to detect side effects + + def f_copy(group): + # this takes the fast apply path + names.append(group.name) + return group.copy() + + def f_nocopy(group): + # this takes the slow apply path + names.append(group.name) + return group + + def f_scalar(group): + # GH7739, GH2656 + names.append(group.name) + return 0 + + def f_none(group): + # GH10519, GH12155, GH21417 + names.append(group.name) + + def f_constant_df(group): + # GH2936, GH20084 + names.append(group.name) + return DataFrame({"a": [1], "b": [1]}) + + for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]: + del names[:] + + df.groupby("a", group_keys=False).apply(func) + assert names == group_names + + +def test_group_apply_once_per_group2(capsys): + # GH: 31111 + # groupby-apply need to execute len(set(group_by_columns)) times + + expected = 2 # Number of times `apply` should call a function for the current test + + df = DataFrame( + { + "group_by_column": [0, 0, 0, 0, 1, 1, 1, 1], + "test_column": ["0", "2", "4", "6", "8", "10", "12", "14"], + }, + index=["0", "2", "4", "6", "8", "10", "12", "14"], + ) + + df.groupby("group_by_column", group_keys=False).apply( + lambda df: print("function_called") + ) + + result = capsys.readouterr().out.count("function_called") + # If `groupby` behaves unexpectedly, this test will break + assert result == expected + + +def test_apply_fast_slow_identical(): + # GH 31613 + + df = DataFrame({"A": [0, 0, 1], "b": range(3)}) + + # For simple index structures we check for fast/slow apply using + # an identity check on in/output + def slow(group): + return group + + def fast(group): + return group.copy() + + fast_df = df.groupby("A", group_keys=False).apply(fast) + slow_df = df.groupby("A", group_keys=False).apply(slow) + + tm.assert_frame_equal(fast_df, slow_df) + + +@pytest.mark.parametrize( + "func", + [ + lambda x: x, + lambda x: x[:], + lambda x: x.copy(deep=False), + lambda x: x.copy(deep=True), + ], +) +def test_groupby_apply_identity_maybecopy_index_identical(func): + # GH 14927 + # Whether the function returns a copy of the input data or not should not + # have an impact on the index structure of the result since this is not + # transparent to the user + + df = DataFrame({"g": [1, 2, 2, 2], "a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}) + + result = df.groupby("g", group_keys=False).apply(func) + tm.assert_frame_equal(result, df) + + +def test_apply_with_mixed_dtype(): + # GH3480, apply with mixed dtype on axis=1 breaks in 0.11 + df = DataFrame( + { + "foo1": np.random.default_rng(2).standard_normal(6), + "foo2": ["one", "two", "two", "three", "one", "two"], + } + ) + result = df.apply(lambda x: x, axis=1).dtypes + expected = df.dtypes + tm.assert_series_equal(result, expected) + + # GH 3610 incorrect dtype conversion with as_index=False + df = DataFrame({"c1": [1, 2, 6, 6, 8]}) + df["c2"] = df.c1 / 2.0 + result1 = df.groupby("c2").mean().reset_index().c2 + result2 = df.groupby("c2", as_index=False).mean().c2 + tm.assert_series_equal(result1, result2) + + +def test_groupby_as_index_apply(): + # GH #4648 and #3417 + df = DataFrame( + { + "item_id": ["b", "b", "a", "c", "a", "b"], + "user_id": [1, 2, 1, 1, 3, 1], + "time": range(6), + } + ) + + g_as = df.groupby("user_id", as_index=True) + g_not_as = df.groupby("user_id", as_index=False) + + res_as = g_as.head(2).index + res_not_as = g_not_as.head(2).index + exp = Index([0, 1, 2, 4]) + tm.assert_index_equal(res_as, exp) + tm.assert_index_equal(res_not_as, exp) + + res_as_apply = g_as.apply(lambda x: x.head(2)).index + res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index + + # apply doesn't maintain the original ordering + # changed in GH5610 as the as_index=False returns a MI here + exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (2, 4)]) + tp = [(1, 0), (1, 2), (2, 1), (3, 4)] + exp_as_apply = MultiIndex.from_tuples(tp, names=["user_id", None]) + + tm.assert_index_equal(res_as_apply, exp_as_apply) + tm.assert_index_equal(res_not_as_apply, exp_not_as_apply) + + ind = Index(list("abcde")) + df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind) + res = df.groupby(0, as_index=False, group_keys=False).apply(lambda x: x).index + tm.assert_index_equal(res, ind) + + +def test_apply_concat_preserve_names(three_group): + grouped = three_group.groupby(["A", "B"]) + + def desc(group): + result = group.describe() + result.index.name = "stat" + return result + + def desc2(group): + result = group.describe() + result.index.name = "stat" + result = result[: len(group)] + # weirdo + return result + + def desc3(group): + result = group.describe() + + # names are different + result.index.name = f"stat_{len(group):d}" + + result = result[: len(group)] + # weirdo + return result + + result = grouped.apply(desc) + assert result.index.names == ("A", "B", "stat") + + result2 = grouped.apply(desc2) + assert result2.index.names == ("A", "B", "stat") + + result3 = grouped.apply(desc3) + assert result3.index.names == ("A", "B", None) + + +def test_apply_series_to_frame(): + def f(piece): + with np.errstate(invalid="ignore"): + logged = np.log(piece) + return DataFrame( + {"value": piece, "demeaned": piece - piece.mean(), "logged": logged} + ) + + dr = bdate_range("1/1/2000", periods=100) + ts = Series(np.random.default_rng(2).standard_normal(100), index=dr) + + grouped = ts.groupby(lambda x: x.month, group_keys=False) + result = grouped.apply(f) + + assert isinstance(result, DataFrame) + assert not hasattr(result, "name") # GH49907 + tm.assert_index_equal(result.index, ts.index) + + +def test_apply_series_yield_constant(df): + result = df.groupby(["A", "B"])["C"].apply(len) + assert result.index.names[:2] == ("A", "B") + + +def test_apply_frame_yield_constant(df): + # GH13568 + result = df.groupby(["A", "B"]).apply(len) + assert isinstance(result, Series) + assert result.name is None + + result = df.groupby(["A", "B"])[["C", "D"]].apply(len) + assert isinstance(result, Series) + assert result.name is None + + +def test_apply_frame_to_series(df): + grouped = df.groupby(["A", "B"]) + result = grouped.apply(len) + expected = grouped.count()["C"] + tm.assert_index_equal(result.index, expected.index) + tm.assert_numpy_array_equal(result.values, expected.values) + + +def test_apply_frame_not_as_index_column_name(df): + # GH 35964 - path within _wrap_applied_output not hit by a test + grouped = df.groupby(["A", "B"], as_index=False) + result = grouped.apply(len) + expected = grouped.count().rename(columns={"C": np.nan}).drop(columns="D") + # TODO(GH#34306): Use assert_frame_equal when column name is not np.nan + tm.assert_index_equal(result.index, expected.index) + tm.assert_numpy_array_equal(result.values, expected.values) + + +def test_apply_frame_concat_series(): + def trans(group): + return group.groupby("B")["C"].sum().sort_values().iloc[:2] + + def trans2(group): + grouped = group.groupby(df.reindex(group.index)["B"]) + return grouped.sum().sort_values().iloc[:2] + + df = DataFrame( + { + "A": np.random.default_rng(2).integers(0, 5, 1000), + "B": np.random.default_rng(2).integers(0, 5, 1000), + "C": np.random.default_rng(2).standard_normal(1000), + } + ) + + result = df.groupby("A").apply(trans) + exp = df.groupby("A")["C"].apply(trans2) + tm.assert_series_equal(result, exp, check_names=False) + assert result.name == "C" + + +def test_apply_transform(ts): + grouped = ts.groupby(lambda x: x.month, group_keys=False) + result = grouped.apply(lambda x: x * 2) + expected = grouped.transform(lambda x: x * 2) + tm.assert_series_equal(result, expected) + + +def test_apply_multikey_corner(tsframe): + grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) + + def f(group): + return group.sort_values("A")[-5:] + + result = grouped.apply(f) + for key, group in grouped: + tm.assert_frame_equal(result.loc[key], f(group)) + + +@pytest.mark.parametrize("group_keys", [True, False]) +def test_apply_chunk_view(group_keys): + # Low level tinkering could be unsafe, make sure not + df = DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)}) + + result = df.groupby("key", group_keys=group_keys).apply(lambda x: x.iloc[:2]) + expected = df.take([0, 1, 3, 4, 6, 7]) + if group_keys: + expected.index = MultiIndex.from_arrays( + [[1, 1, 2, 2, 3, 3], expected.index], names=["key", None] + ) + + tm.assert_frame_equal(result, expected) + + +def test_apply_no_name_column_conflict(): + df = DataFrame( + { + "name": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2], + "name2": [0, 0, 0, 1, 1, 1, 0, 0, 1, 1], + "value": range(9, -1, -1), + } + ) + + # it works! #2605 + grouped = df.groupby(["name", "name2"]) + grouped.apply(lambda x: x.sort_values("value", inplace=True)) + + +def test_apply_typecast_fail(): + df = DataFrame( + { + "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], + "c": np.tile(["a", "b", "c"], 2), + "v": np.arange(1.0, 7.0), + } + ) + + def f(group): + v = group["v"] + group["v2"] = (v - v.min()) / (v.max() - v.min()) + return group + + result = df.groupby("d", group_keys=False).apply(f) + + expected = df.copy() + expected["v2"] = np.tile([0.0, 0.5, 1], 2) + + tm.assert_frame_equal(result, expected) + + +def test_apply_multiindex_fail(): + index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]]) + df = DataFrame( + { + "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], + "c": np.tile(["a", "b", "c"], 2), + "v": np.arange(1.0, 7.0), + }, + index=index, + ) + + def f(group): + v = group["v"] + group["v2"] = (v - v.min()) / (v.max() - v.min()) + return group + + result = df.groupby("d", group_keys=False).apply(f) + + expected = df.copy() + expected["v2"] = np.tile([0.0, 0.5, 1], 2) + + tm.assert_frame_equal(result, expected) + + +def test_apply_corner(tsframe): + result = tsframe.groupby(lambda x: x.year, group_keys=False).apply(lambda x: x * 2) + expected = tsframe * 2 + tm.assert_frame_equal(result, expected) + + +def test_apply_without_copy(): + # GH 5545 + # returning a non-copy in an applied function fails + + data = DataFrame( + { + "id_field": [100, 100, 200, 300], + "category": ["a", "b", "c", "c"], + "value": [1, 2, 3, 4], + } + ) + + def filt1(x): + if x.shape[0] == 1: + return x.copy() + else: + return x[x.category == "c"] + + def filt2(x): + if x.shape[0] == 1: + return x + else: + return x[x.category == "c"] + + expected = data.groupby("id_field").apply(filt1) + result = data.groupby("id_field").apply(filt2) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("test_series", [True, False]) +def test_apply_with_duplicated_non_sorted_axis(test_series): + # GH 30667 + df = DataFrame( + [["x", "p"], ["x", "p"], ["x", "o"]], columns=["X", "Y"], index=[1, 2, 2] + ) + if test_series: + ser = df.set_index("Y")["X"] + result = ser.groupby(level=0, group_keys=False).apply(lambda x: x) + + # not expecting the order to remain the same for duplicated axis + result = result.sort_index() + expected = ser.sort_index() + tm.assert_series_equal(result, expected) + else: + result = df.groupby("Y", group_keys=False).apply(lambda x: x) + + # not expecting the order to remain the same for duplicated axis + result = result.sort_values("Y") + expected = df.sort_values("Y") + tm.assert_frame_equal(result, expected) + + +def test_apply_reindex_values(): + # GH: 26209 + # reindexing from a single column of a groupby object with duplicate indices caused + # a ValueError (cannot reindex from duplicate axis) in 0.24.2, the problem was + # solved in #30679 + values = [1, 2, 3, 4] + indices = [1, 1, 2, 2] + df = DataFrame({"group": ["Group1", "Group2"] * 2, "value": values}, index=indices) + expected = Series(values, index=indices, name="value") + + def reindex_helper(x): + return x.reindex(np.arange(x.index.min(), x.index.max() + 1)) + + # the following group by raised a ValueError + result = df.groupby("group", group_keys=False).value.apply(reindex_helper) + tm.assert_series_equal(expected, result) + + +def test_apply_corner_cases(): + # #535, can't use sliding iterator + + N = 1000 + labels = np.random.default_rng(2).integers(0, 100, size=N) + df = DataFrame( + { + "key": labels, + "value1": np.random.default_rng(2).standard_normal(N), + "value2": ["foo", "bar", "baz", "qux"] * (N // 4), + } + ) + + grouped = df.groupby("key", group_keys=False) + + def f(g): + g["value3"] = g["value1"] * 2 + return g + + result = grouped.apply(f) + assert "value3" in result + + +def test_apply_numeric_coercion_when_datetime(): + # In the past, group-by/apply operations have been over-eager + # in converting dtypes to numeric, in the presence of datetime + # columns. Various GH issues were filed, the reproductions + # for which are here. + + # GH 15670 + df = DataFrame( + {"Number": [1, 2], "Date": ["2017-03-02"] * 2, "Str": ["foo", "inf"]} + ) + expected = df.groupby(["Number"]).apply(lambda x: x.iloc[0]) + df.Date = pd.to_datetime(df.Date) + result = df.groupby(["Number"]).apply(lambda x: x.iloc[0]) + tm.assert_series_equal(result["Str"], expected["Str"]) + + # GH 15421 + df = DataFrame( + {"A": [10, 20, 30], "B": ["foo", "3", "4"], "T": [pd.Timestamp("12:31:22")] * 3} + ) + + def get_B(g): + return g.iloc[0][["B"]] + + result = df.groupby("A").apply(get_B)["B"] + expected = df.B + expected.index = df.A + tm.assert_series_equal(result, expected) + + # GH 14423 + def predictions(tool): + out = Series(index=["p1", "p2", "useTime"], dtype=object) + if "step1" in list(tool.State): + out["p1"] = str(tool[tool.State == "step1"].Machine.values[0]) + if "step2" in list(tool.State): + out["p2"] = str(tool[tool.State == "step2"].Machine.values[0]) + out["useTime"] = str(tool[tool.State == "step2"].oTime.values[0]) + return out + + df1 = DataFrame( + { + "Key": ["B", "B", "A", "A"], + "State": ["step1", "step2", "step1", "step2"], + "oTime": ["", "2016-09-19 05:24:33", "", "2016-09-19 23:59:04"], + "Machine": ["23", "36L", "36R", "36R"], + } + ) + df2 = df1.copy() + df2.oTime = pd.to_datetime(df2.oTime) + expected = df1.groupby("Key").apply(predictions).p1 + result = df2.groupby("Key").apply(predictions).p1 + tm.assert_series_equal(expected, result) + + +def test_apply_aggregating_timedelta_and_datetime(): + # Regression test for GH 15562 + # The following groupby caused ValueErrors and IndexErrors pre 0.20.0 + + df = DataFrame( + { + "clientid": ["A", "B", "C"], + "datetime": [np.datetime64("2017-02-01 00:00:00")] * 3, + } + ) + df["time_delta_zero"] = df.datetime - df.datetime + result = df.groupby("clientid").apply( + lambda ddf: Series( + {"clientid_age": ddf.time_delta_zero.min(), "date": ddf.datetime.min()} + ) + ) + expected = DataFrame( + { + "clientid": ["A", "B", "C"], + "clientid_age": [np.timedelta64(0, "D")] * 3, + "date": [np.datetime64("2017-02-01 00:00:00")] * 3, + } + ).set_index("clientid") + + tm.assert_frame_equal(result, expected) + + +def test_apply_groupby_datetimeindex(): + # GH 26182 + # groupby apply failed on dataframe with DatetimeIndex + + data = [["A", 10], ["B", 20], ["B", 30], ["C", 40], ["C", 50]] + df = DataFrame( + data, columns=["Name", "Value"], index=pd.date_range("2020-09-01", "2020-09-05") + ) + + result = df.groupby("Name").sum() + + expected = DataFrame({"Name": ["A", "B", "C"], "Value": [10, 50, 90]}) + expected.set_index("Name", inplace=True) + + tm.assert_frame_equal(result, expected) + + +def test_time_field_bug(): + # Test a fix for the following error related to GH issue 11324 When + # non-key fields in a group-by dataframe contained time-based fields + # that were not returned by the apply function, an exception would be + # raised. + + df = DataFrame({"a": 1, "b": [datetime.now() for nn in range(10)]}) + + def func_with_no_date(batch): + return Series({"c": 2}) + + def func_with_date(batch): + return Series({"b": datetime(2015, 1, 1), "c": 2}) + + dfg_no_conversion = df.groupby(by=["a"]).apply(func_with_no_date) + dfg_no_conversion_expected = DataFrame({"c": 2}, index=[1]) + dfg_no_conversion_expected.index.name = "a" + + dfg_conversion = df.groupby(by=["a"]).apply(func_with_date) + dfg_conversion_expected = DataFrame( + {"b": pd.Timestamp(2015, 1, 1).as_unit("ns"), "c": 2}, index=[1] + ) + dfg_conversion_expected.index.name = "a" + + tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected) + tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected) + + +def test_gb_apply_list_of_unequal_len_arrays(): + # GH1738 + df = DataFrame( + { + "group1": ["a", "a", "a", "b", "b", "b", "a", "a", "a", "b", "b", "b"], + "group2": ["c", "c", "d", "d", "d", "e", "c", "c", "d", "d", "d", "e"], + "weight": [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2], + "value": [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3], + } + ) + df = df.set_index(["group1", "group2"]) + df_grouped = df.groupby(level=["group1", "group2"], sort=True) + + def noddy(value, weight): + out = np.array(value * weight).repeat(3) + return out + + # the kernel function returns arrays of unequal length + # pandas sniffs the first one, sees it's an array and not + # a list, and assumed the rest are of equal length + # and so tries a vstack + + # don't die + df_grouped.apply(lambda x: noddy(x.value, x.weight)) + + +def test_groupby_apply_all_none(): + # Tests to make sure no errors if apply function returns all None + # values. Issue 9684. + test_df = DataFrame({"groups": [0, 0, 1, 1], "random_vars": [8, 7, 4, 5]}) + + def test_func(x): + pass + + result = test_df.groupby("groups").apply(test_func) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + +def test_groupby_apply_none_first(): + # GH 12824. Tests if apply returns None first. + test_df1 = DataFrame({"groups": [1, 1, 1, 2], "vars": [0, 1, 2, 3]}) + test_df2 = DataFrame({"groups": [1, 2, 2, 2], "vars": [0, 1, 2, 3]}) + + def test_func(x): + if x.shape[0] < 2: + return None + return x.iloc[[0, -1]] + + result1 = test_df1.groupby("groups").apply(test_func) + result2 = test_df2.groupby("groups").apply(test_func) + index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=["groups", None]) + index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=["groups", None]) + expected1 = DataFrame({"groups": [1, 1], "vars": [0, 2]}, index=index1) + expected2 = DataFrame({"groups": [2, 2], "vars": [1, 3]}, index=index2) + tm.assert_frame_equal(result1, expected1) + tm.assert_frame_equal(result2, expected2) + + +def test_groupby_apply_return_empty_chunk(): + # GH 22221: apply filter which returns some empty groups + df = DataFrame({"value": [0, 1], "group": ["filled", "empty"]}) + groups = df.groupby("group") + result = groups.apply(lambda group: group[group.value != 1]["value"]) + expected = Series( + [0], + name="value", + index=MultiIndex.from_product( + [["empty", "filled"], [0]], names=["group", None] + ).drop("empty"), + ) + tm.assert_series_equal(result, expected) + + +def test_apply_with_mixed_types(): + # gh-20949 + df = DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]}) + g = df.groupby("A", group_keys=False) + + result = g.transform(lambda x: x / x.sum()) + expected = DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]}) + tm.assert_frame_equal(result, expected) + + result = g.apply(lambda x: x / x.sum()) + tm.assert_frame_equal(result, expected) + + +def test_func_returns_object(): + # GH 28652 + df = DataFrame({"a": [1, 2]}, index=Index([1, 2])) + result = df.groupby("a").apply(lambda g: g.index) + expected = Series([Index([1]), Index([2])], index=Index([1, 2], name="a")) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "group_column_dtlike", + [datetime.today(), datetime.today().date(), datetime.today().time()], +) +def test_apply_datetime_issue(group_column_dtlike): + # GH-28247 + # groupby-apply throws an error if one of the columns in the DataFrame + # is a datetime object and the column labels are different from + # standard int values in range(len(num_columns)) + + df = DataFrame({"a": ["foo"], "b": [group_column_dtlike]}) + result = df.groupby("a").apply(lambda x: Series(["spam"], index=[42])) + + expected = DataFrame( + ["spam"], Index(["foo"], dtype="object", name="a"), columns=[42] + ) + tm.assert_frame_equal(result, expected) + + +def test_apply_series_return_dataframe_groups(): + # GH 10078 + tdf = DataFrame( + { + "day": { + 0: pd.Timestamp("2015-02-24 00:00:00"), + 1: pd.Timestamp("2015-02-24 00:00:00"), + 2: pd.Timestamp("2015-02-24 00:00:00"), + 3: pd.Timestamp("2015-02-24 00:00:00"), + 4: pd.Timestamp("2015-02-24 00:00:00"), + }, + "userAgent": { + 0: "some UA string", + 1: "some UA string", + 2: "some UA string", + 3: "another UA string", + 4: "some UA string", + }, + "userId": { + 0: "17661101", + 1: "17661101", + 2: "17661101", + 3: "17661101", + 4: "17661101", + }, + } + ) + + def most_common_values(df): + return Series({c: s.value_counts().index[0] for c, s in df.items()}) + + result = tdf.groupby("day").apply(most_common_values)["userId"] + expected = Series( + ["17661101"], index=pd.DatetimeIndex(["2015-02-24"], name="day"), name="userId" + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("category", [False, True]) +def test_apply_multi_level_name(category): + # https://github.com/pandas-dev/pandas/issues/31068 + b = [1, 2] * 5 + if category: + b = pd.Categorical(b, categories=[1, 2, 3]) + expected_index = pd.CategoricalIndex([1, 2, 3], categories=[1, 2, 3], name="B") + expected_values = [20, 25, 0] + else: + expected_index = Index([1, 2], name="B") + expected_values = [20, 25] + expected = DataFrame( + {"C": expected_values, "D": expected_values}, index=expected_index + ) + + df = DataFrame( + {"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))} + ).set_index(["A", "B"]) + result = df.groupby("B", observed=False).apply(lambda x: x.sum()) + tm.assert_frame_equal(result, expected) + assert df.index.names == ["A", "B"] + + +def test_groupby_apply_datetime_result_dtypes(): + # GH 14849 + data = DataFrame.from_records( + [ + (pd.Timestamp(2016, 1, 1), "red", "dark", 1, "8"), + (pd.Timestamp(2015, 1, 1), "green", "stormy", 2, "9"), + (pd.Timestamp(2014, 1, 1), "blue", "bright", 3, "10"), + (pd.Timestamp(2013, 1, 1), "blue", "calm", 4, "potato"), + ], + columns=["observation", "color", "mood", "intensity", "score"], + ) + result = data.groupby("color").apply(lambda g: g.iloc[0]).dtypes + expected = Series( + [np.dtype("datetime64[ns]"), object, object, np.int64, object], + index=["observation", "color", "mood", "intensity", "score"], + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "index", + [ + pd.CategoricalIndex(list("abc")), + pd.interval_range(0, 3), + pd.period_range("2020", periods=3, freq="D"), + MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]), + ], +) +def test_apply_index_has_complex_internals(index): + # GH 31248 + df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index) + result = df.groupby("group", group_keys=False).apply(lambda x: x) + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize( + "function, expected_values", + [ + (lambda x: x.index.to_list(), [[0, 1], [2, 3]]), + (lambda x: set(x.index.to_list()), [{0, 1}, {2, 3}]), + (lambda x: tuple(x.index.to_list()), [(0, 1), (2, 3)]), + ( + lambda x: dict(enumerate(x.index.to_list())), + [{0: 0, 1: 1}, {0: 2, 1: 3}], + ), + ( + lambda x: [{n: i} for (n, i) in enumerate(x.index.to_list())], + [[{0: 0}, {1: 1}], [{0: 2}, {1: 3}]], + ), + ], +) +def test_apply_function_returns_non_pandas_non_scalar(function, expected_values): + # GH 31441 + df = DataFrame(["A", "A", "B", "B"], columns=["groups"]) + result = df.groupby("groups").apply(function) + expected = Series(expected_values, index=Index(["A", "B"], name="groups")) + tm.assert_series_equal(result, expected) + + +def test_apply_function_returns_numpy_array(): + # GH 31605 + def fct(group): + return group["B"].values.flatten() + + df = DataFrame({"A": ["a", "a", "b", "none"], "B": [1, 2, 3, np.nan]}) + + result = df.groupby("A").apply(fct) + expected = Series( + [[1.0, 2.0], [3.0], [np.nan]], index=Index(["a", "b", "none"], name="A") + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1]) +def test_apply_function_index_return(function): + # GH: 22541 + df = DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"]) + result = df.groupby("id").apply(function) + expected = Series( + [Index([0, 4, 7, 9]), Index([1, 2, 3, 5]), Index([6, 8])], + index=Index([1, 2, 3], name="id"), + ) + tm.assert_series_equal(result, expected) + + +def test_apply_function_with_indexing_return_column(): + # GH#7002, GH#41480, GH#49256 + df = DataFrame( + { + "foo1": ["one", "two", "two", "three", "one", "two"], + "foo2": [1, 2, 4, 4, 5, 6], + } + ) + result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean()) + expected = DataFrame( + { + "foo1": ["one", "three", "two"], + "foo2": [3.0, 4.0, 4.0], + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "udf", + [(lambda x: x.copy()), (lambda x: x.copy().rename(lambda y: y + 1))], +) +@pytest.mark.parametrize("group_keys", [True, False]) +def test_apply_result_type(group_keys, udf): + # https://github.com/pandas-dev/pandas/issues/34809 + # We'd like to control whether the group keys end up in the index + # regardless of whether the UDF happens to be a transform. + df = DataFrame({"A": ["a", "b"], "B": [1, 2]}) + df_result = df.groupby("A", group_keys=group_keys).apply(udf) + series_result = df.B.groupby(df.A, group_keys=group_keys).apply(udf) + + if group_keys: + assert df_result.index.nlevels == 2 + assert series_result.index.nlevels == 2 + else: + assert df_result.index.nlevels == 1 + assert series_result.index.nlevels == 1 + + +def test_result_order_group_keys_false(): + # GH 34998 + # apply result order should not depend on whether index is the same or just equal + df = DataFrame({"A": [2, 1, 2], "B": [1, 2, 3]}) + result = df.groupby("A", group_keys=False).apply(lambda x: x) + expected = df.groupby("A", group_keys=False).apply(lambda x: x.copy()) + tm.assert_frame_equal(result, expected) + + +def test_apply_with_timezones_aware(): + # GH: 27212 + dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2 + index_no_tz = pd.DatetimeIndex(dates) + index_tz = pd.DatetimeIndex(dates, tz="UTC") + df1 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz}) + df2 = DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz}) + + result1 = df1.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) + result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) + + tm.assert_frame_equal(result1, result2) + + +def test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func): + # GH #34656 + # GH #34271 + df = DataFrame( + { + "a": [99, 99, 99, 88, 88, 88], + "b": [1, 2, 3, 4, 5, 6], + "c": [10, 20, 30, 40, 50, 60], + } + ) + + expected = DataFrame( + {"a": [264, 297], "b": [15, 6], "c": [150, 60]}, + index=Index([88, 99], name="a"), + ) + + # Check output when no other methods are called before .apply() + grp = df.groupby(by="a") + msg = "The behavior of DataFrame.sum with axis=None is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + result = grp.apply(sum) + tm.assert_frame_equal(result, expected) + + # Check output when another method is called before .apply() + grp = df.groupby(by="a") + args = get_groupby_method_args(reduction_func, df) + _ = getattr(grp, reduction_func)(*args) + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + result = grp.apply(sum) + tm.assert_frame_equal(result, expected) + + +def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp(): + # GH 29617 + + df = DataFrame( + { + "A": ["a", "a", "a", "b"], + "B": [ + date(2020, 1, 10), + date(2020, 1, 10), + date(2020, 2, 10), + date(2020, 2, 10), + ], + "C": [1, 2, 3, 4], + }, + index=Index([100, 101, 102, 103], name="idx"), + ) + + grp = df.groupby(["A", "B"]) + result = grp.apply(lambda x: x.head(1)) + + expected = df.iloc[[0, 2, 3]] + expected = expected.reset_index() + expected.index = MultiIndex.from_frame(expected[["A", "B", "idx"]]) + expected = expected.drop(columns="idx") + + tm.assert_frame_equal(result, expected) + for val in result.index.levels[1]: + assert type(val) is date + + +def test_apply_by_cols_equals_apply_by_rows_transposed(): + # GH 16646 + # Operating on the columns, or transposing and operating on the rows + # should give the same result. There was previously a bug where the + # by_rows operation would work fine, but by_cols would throw a ValueError + + df = DataFrame( + np.random.default_rng(2).random([6, 4]), + columns=MultiIndex.from_product([["A", "B"], [1, 2]]), + ) + + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.T.groupby(axis=0, level=0) + by_rows = gb.apply(lambda x: x.droplevel(axis=0, level=0)) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb2 = df.groupby(axis=1, level=0) + by_cols = gb2.apply(lambda x: x.droplevel(axis=1, level=0)) + + tm.assert_frame_equal(by_cols, by_rows.T) + tm.assert_frame_equal(by_cols, df) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_apply_dropna_with_indexed_same(dropna): + # GH 38227 + # GH#43205 + df = DataFrame( + { + "col": [1, 2, 3, 4, 5], + "group": ["a", np.nan, np.nan, "b", "b"], + }, + index=list("xxyxz"), + ) + result = df.groupby("group", dropna=dropna, group_keys=False).apply(lambda x: x) + expected = df.dropna() if dropna else df.iloc[[0, 3, 1, 2, 4]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "as_index, expected", + [ + [ + False, + DataFrame( + [[1, 1, 1], [2, 2, 1]], columns=Index(["a", "b", None], dtype=object) + ), + ], + [ + True, + Series( + [1, 1], index=MultiIndex.from_tuples([(1, 1), (2, 2)], names=["a", "b"]) + ), + ], + ], +) +def test_apply_as_index_constant_lambda(as_index, expected): + # GH 13217 + df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]}) + result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1) + tm.assert_equal(result, expected) + + +def test_sort_index_groups(): + # GH 20420 + df = DataFrame( + {"A": [1, 2, 3, 4, 5], "B": [6, 7, 8, 9, 0], "C": [1, 1, 1, 2, 2]}, + index=range(5), + ) + result = df.groupby("C").apply(lambda x: x.A.sort_index()) + expected = Series( + range(1, 6), + index=MultiIndex.from_tuples( + [(1, 0), (1, 1), (1, 2), (2, 3), (2, 4)], names=["C", None] + ), + name="A", + ) + tm.assert_series_equal(result, expected) + + +def test_positional_slice_groups_datetimelike(): + # GH 21651 + expected = DataFrame( + { + "date": pd.date_range("2010-01-01", freq="12H", periods=5), + "vals": range(5), + "let": list("abcde"), + } + ) + result = expected.groupby( + [expected.let, expected.date.dt.date], group_keys=False + ).apply(lambda x: x.iloc[0:]) + tm.assert_frame_equal(result, expected) + + +def test_groupby_apply_shape_cache_safety(): + # GH#42702 this fails if we cache_readonly Block.shape + df = DataFrame({"A": ["a", "a", "b"], "B": [1, 2, 3], "C": [4, 6, 5]}) + gb = df.groupby("A") + result = gb[["B", "C"]].apply(lambda x: x.astype(float).max() - x.min()) + + expected = DataFrame( + {"B": [1.0, 0.0], "C": [2.0, 0.0]}, index=Index(["a", "b"], name="A") + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_apply_to_series_name(): + # GH52444 + df = DataFrame.from_dict( + { + "a": ["a", "b", "a", "b"], + "b1": ["aa", "ac", "ac", "ad"], + "b2": ["aa", "aa", "aa", "ac"], + } + ) + grp = df.groupby("a")[["b1", "b2"]] + result = grp.apply(lambda x: x.unstack().value_counts()) + + expected_idx = MultiIndex.from_arrays( + arrays=[["a", "a", "b", "b", "b"], ["aa", "ac", "ac", "ad", "aa"]], + names=["a", None], + ) + expected = Series([3, 1, 2, 1, 1], index=expected_idx, name="count") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_apply_na(dropna): + # GH#28984 + df = DataFrame( + {"grp": [1, 1, 2, 2], "y": [1, 0, 2, 5], "z": [1, 2, np.nan, np.nan]} + ) + dfgrp = df.groupby("grp", dropna=dropna) + result = dfgrp.apply(lambda grp_df: grp_df.nlargest(1, "z")) + expected = dfgrp.apply(lambda x: x.sort_values("z", ascending=False).head(1)) + tm.assert_frame_equal(result, expected) + + +def test_apply_empty_string_nan_coerce_bug(): + # GH#24903 + result = ( + DataFrame( + { + "a": [1, 1, 2, 2], + "b": ["", "", "", ""], + "c": pd.to_datetime([1, 2, 3, 4], unit="s"), + } + ) + .groupby(["a", "b"]) + .apply(lambda df: df.iloc[-1]) + ) + expected = DataFrame( + [[1, "", pd.to_datetime(2, unit="s")], [2, "", pd.to_datetime(4, unit="s")]], + columns=["a", "b", "c"], + index=MultiIndex.from_tuples([(1, ""), (2, "")], names=["a", "b"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("index_values", [[1, 2, 3], [1.0, 2.0, 3.0]]) +def test_apply_index_key_error_bug(index_values): + # GH 44310 + result = DataFrame( + { + "a": ["aa", "a2", "a3"], + "b": [1, 2, 3], + }, + index=Index(index_values), + ) + expected = DataFrame( + { + "b_mean": [2.0, 3.0, 1.0], + }, + index=Index(["a2", "a3", "aa"], name="a"), + ) + result = result.groupby("a").apply( + lambda df: Series([df["b"].mean()], index=["b_mean"]) + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "arg,idx", + [ + [ + [ + 1, + 2, + 3, + ], + [ + 0.1, + 0.3, + 0.2, + ], + ], + [ + [ + 1, + 2, + 3, + ], + [ + 0.1, + 0.2, + 0.3, + ], + ], + [ + [ + 1, + 4, + 3, + ], + [ + 0.1, + 0.4, + 0.2, + ], + ], + ], +) +def test_apply_nonmonotonic_float_index(arg, idx): + # GH 34455 + expected = DataFrame({"col": arg}, index=idx) + result = expected.groupby("col", group_keys=False).apply(lambda x: x) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("args, kwargs", [([True], {}), ([], {"numeric_only": True})]) +def test_apply_str_with_args(df, args, kwargs): + # GH#46479 + gb = df.groupby("A") + result = gb.apply("sum", *args, **kwargs) + expected = gb.sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("name", ["some_name", None]) +def test_result_name_when_one_group(name): + # GH 46369 + ser = Series([1, 2], name=name) + result = ser.groupby(["a", "a"], group_keys=False).apply(lambda x: x) + expected = Series([1, 2], name=name) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, op", + [ + ("apply", lambda gb: gb.values[-1]), + ("apply", lambda gb: gb["b"].iloc[0]), + ("agg", "skew"), + ("agg", "prod"), + ("agg", "sum"), + ], +) +def test_empty_df(method, op): + # GH 47985 + empty_df = DataFrame({"a": [], "b": []}) + gb = empty_df.groupby("a", group_keys=True) + group = getattr(gb, "b") + + result = getattr(group, method)(op) + expected = Series( + [], name="b", dtype="float64", index=Index([], dtype="float64", name="a") + ) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "group_col", + [([0.0, np.nan, 0.0, 0.0]), ([np.nan, 0.0, 0.0, 0.0]), ([0, 0.0, 0.0, np.nan])], +) +def test_apply_inconsistent_output(group_col): + # GH 34478 + df = DataFrame({"group_col": group_col, "value_col": [2, 2, 2, 2]}) + + result = df.groupby("group_col").value_col.apply( + lambda x: x.value_counts().reindex(index=[1, 2, 3]) + ) + expected = Series( + [np.nan, 3.0, np.nan], + name="value_col", + index=MultiIndex.from_product([[0.0], [1, 2, 3]], names=["group_col", 0.0]), + ) + + tm.assert_series_equal(result, expected) + + +def test_apply_array_output_multi_getitem(): + # GH 18930 + df = DataFrame( + {"A": {"a": 1, "b": 2}, "B": {"a": 1, "b": 2}, "C": {"a": 1, "b": 2}} + ) + result = df.groupby("A")[["B", "C"]].apply(lambda x: np.array([0])) + expected = Series( + [np.array([0])] * 2, index=Index([1, 2], name="A"), name=("B", "C") + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply_mutate.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply_mutate.py new file mode 100644 index 00000000..9bc07b58 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_apply_mutate.py @@ -0,0 +1,147 @@ +import numpy as np + +import pandas as pd +import pandas._testing as tm + + +def test_group_by_copy(): + # GH#44803 + df = pd.DataFrame( + { + "name": ["Alice", "Bob", "Carl"], + "age": [20, 21, 20], + } + ).set_index("name") + + grp_by_same_value = df.groupby(["age"], group_keys=False).apply(lambda group: group) + grp_by_copy = df.groupby(["age"], group_keys=False).apply( + lambda group: group.copy() + ) + tm.assert_frame_equal(grp_by_same_value, grp_by_copy) + + +def test_mutate_groups(): + # GH3380 + + df = pd.DataFrame( + { + "cat1": ["a"] * 8 + ["b"] * 6, + "cat2": ["c"] * 2 + + ["d"] * 2 + + ["e"] * 2 + + ["f"] * 2 + + ["c"] * 2 + + ["d"] * 2 + + ["e"] * 2, + "cat3": [f"g{x}" for x in range(1, 15)], + "val": np.random.default_rng(2).integers(100, size=14), + } + ) + + def f_copy(x): + x = x.copy() + x["rank"] = x.val.rank(method="min") + return x.groupby("cat2")["rank"].min() + + def f_no_copy(x): + x["rank"] = x.val.rank(method="min") + return x.groupby("cat2")["rank"].min() + + grpby_copy = df.groupby("cat1").apply(f_copy) + grpby_no_copy = df.groupby("cat1").apply(f_no_copy) + tm.assert_series_equal(grpby_copy, grpby_no_copy) + + +def test_no_mutate_but_looks_like(): + # GH 8467 + # first show's mutation indicator + # second does not, but should yield the same results + df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)}) + + result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key) + result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key) + tm.assert_series_equal(result1, result2) + + +def test_apply_function_with_indexing(): + # GH: 33058 + df = pd.DataFrame( + {"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]} + ) + + def fn(x): + x.loc[x.index[-1], "col2"] = 0 + return x.col2 + + result = df.groupby(["col1"], as_index=False).apply(fn) + expected = pd.Series( + [1, 2, 0, 4, 5, 0], + index=pd.MultiIndex.from_tuples( + [(0, 0), (0, 1), (0, 2), (1, 3), (1, 4), (1, 5)] + ), + name="col2", + ) + tm.assert_series_equal(result, expected) + + +def test_apply_mutate_columns_multiindex(): + # GH 12652 + df = pd.DataFrame( + { + ("C", "julian"): [1, 2, 3], + ("B", "geoffrey"): [1, 2, 3], + ("A", "julian"): [1, 2, 3], + ("B", "julian"): [1, 2, 3], + ("A", "geoffrey"): [1, 2, 3], + ("C", "geoffrey"): [1, 2, 3], + }, + columns=pd.MultiIndex.from_tuples( + [ + ("A", "julian"), + ("A", "geoffrey"), + ("B", "julian"), + ("B", "geoffrey"), + ("C", "julian"), + ("C", "geoffrey"), + ] + ), + ) + + def add_column(grouped): + name = grouped.columns[0][1] + grouped["sum", name] = grouped.sum(axis=1) + return grouped + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(level=1, axis=1) + result = gb.apply(add_column) + expected = pd.DataFrame( + [ + [1, 1, 1, 3, 1, 1, 1, 3], + [2, 2, 2, 6, 2, 2, 2, 6], + [ + 3, + 3, + 3, + 9, + 3, + 3, + 3, + 9, + ], + ], + columns=pd.MultiIndex.from_tuples( + [ + ("geoffrey", "A", "geoffrey"), + ("geoffrey", "B", "geoffrey"), + ("geoffrey", "C", "geoffrey"), + ("geoffrey", "sum", "geoffrey"), + ("julian", "A", "julian"), + ("julian", "B", "julian"), + ("julian", "C", "julian"), + ("julian", "sum", "julian"), + ] + ), + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_bin_groupby.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_bin_groupby.py new file mode 100644 index 00000000..49b2e621 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_bin_groupby.py @@ -0,0 +1,65 @@ +import numpy as np +import pytest + +from pandas._libs import lib +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + + +def assert_block_lengths(x): + assert len(x) == len(x._mgr.blocks[0].mgr_locs) + return 0 + + +def cumsum_max(x): + x.cumsum().max() + return 0 + + +@pytest.mark.parametrize( + "func", + [ + cumsum_max, + pytest.param(assert_block_lengths, marks=td.skip_array_manager_invalid_test), + ], +) +def test_mgr_locs_updated(func): + # https://github.com/pandas-dev/pandas/issues/31802 + # Some operations may require creating new blocks, which requires + # valid mgr_locs + df = pd.DataFrame({"A": ["a", "a", "a"], "B": ["a", "b", "b"], "C": [1, 1, 1]}) + result = df.groupby(["A", "B"]).agg(func) + expected = pd.DataFrame( + {"C": [0, 0]}, + index=pd.MultiIndex.from_product([["a"], ["a", "b"]], names=["A", "B"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "binner,closed,expected", + [ + ( + np.array([0, 3, 6, 9], dtype=np.int64), + "left", + np.array([2, 5, 6], dtype=np.int64), + ), + ( + np.array([0, 3, 6, 9], dtype=np.int64), + "right", + np.array([3, 6, 6], dtype=np.int64), + ), + (np.array([0, 3, 6], dtype=np.int64), "left", np.array([2, 5], dtype=np.int64)), + ( + np.array([0, 3, 6], dtype=np.int64), + "right", + np.array([3, 6], dtype=np.int64), + ), + ], +) +def test_generate_bins(binner, closed, expected): + values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64) + result = lib.generate_bins_dt64(values, binner, closed=closed) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_categorical.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_categorical.py new file mode 100644 index 00000000..68ce58ad --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_categorical.py @@ -0,0 +1,2119 @@ +from datetime import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + Index, + MultiIndex, + Series, + qcut, +) +import pandas._testing as tm +from pandas.api.typing import SeriesGroupBy +from pandas.tests.groupby import get_groupby_method_args + + +def cartesian_product_for_groupers(result, args, names, fill_value=np.nan): + """Reindex to a cartesian production for the groupers, + preserving the nature (Categorical) of each grouper + """ + + def f(a): + if isinstance(a, (CategoricalIndex, Categorical)): + categories = a.categories + a = Categorical.from_codes( + np.arange(len(categories)), categories=categories, ordered=a.ordered + ) + return a + + index = MultiIndex.from_product(map(f, args), names=names) + return result.reindex(index, fill_value=fill_value).sort_index() + + +_results_for_groupbys_with_missing_categories = { + # This maps the builtin groupby functions to their expected outputs for + # missing categories when they are called on a categorical grouper with + # observed=False. Some functions are expected to return NaN, some zero. + # These expected values can be used across several tests (i.e. they are + # the same for SeriesGroupBy and DataFrameGroupBy) but they should only be + # hardcoded in one place. + "all": np.nan, + "any": np.nan, + "count": 0, + "corrwith": np.nan, + "first": np.nan, + "idxmax": np.nan, + "idxmin": np.nan, + "last": np.nan, + "max": np.nan, + "mean": np.nan, + "median": np.nan, + "min": np.nan, + "nth": np.nan, + "nunique": 0, + "prod": np.nan, + "quantile": np.nan, + "sem": np.nan, + "size": 0, + "skew": np.nan, + "std": np.nan, + "sum": 0, + "var": np.nan, +} + + +def test_apply_use_categorical_name(df): + cats = qcut(df.C, 4) + + def get_stats(group): + return { + "min": group.min(), + "max": group.max(), + "count": group.count(), + "mean": group.mean(), + } + + result = df.groupby(cats, observed=False).D.apply(get_stats) + assert result.index.names[0] == "C" + + +def test_basic(): # TODO: split this test + cats = Categorical( + ["a", "a", "a", "b", "b", "b", "c", "c", "c"], + categories=["a", "b", "c", "d"], + ordered=True, + ) + data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) + + exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True) + expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index) + result = data.groupby("b", observed=False).mean() + tm.assert_frame_equal(result, expected) + + cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True) + cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + + # single grouper + gb = df.groupby("A", observed=False) + exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True) + expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)}) + result = gb.sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + # GH 8623 + x = DataFrame( + [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]], + columns=["person_id", "person_name"], + ) + x["person_name"] = Categorical(x.person_name) + + g = x.groupby(["person_id"], observed=False) + result = g.transform(lambda x: x) + tm.assert_frame_equal(result, x[["person_name"]]) + + result = x.drop_duplicates("person_name") + expected = x.iloc[[0, 1]] + tm.assert_frame_equal(result, expected) + + def f(x): + return x.drop_duplicates("person_name").iloc[0] + + result = g.apply(f) + expected = x.iloc[[0, 1]].copy() + expected.index = Index([1, 2], name="person_id") + expected["person_name"] = expected["person_name"].astype("object") + tm.assert_frame_equal(result, expected) + + # GH 9921 + # Monotonic + df = DataFrame({"a": [5, 15, 25]}) + c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) + + msg = "using SeriesGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = df.a.groupby(c, observed=False).transform(sum) + tm.assert_series_equal(result, df["a"]) + + tm.assert_series_equal( + df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"] + ) + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = df.groupby(c, observed=False).transform(sum) + expected = df[["a"]] + tm.assert_frame_equal(result, expected) + + gbc = df.groupby(c, observed=False) + result = gbc.transform(lambda xs: np.max(xs, axis=0)) + tm.assert_frame_equal(result, df[["a"]]) + + result2 = gbc.transform(lambda xs: np.max(xs, axis=0)) + msg = "using DataFrameGroupBy.max" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result3 = gbc.transform(max) + result4 = gbc.transform(np.maximum.reduce) + result5 = gbc.transform(lambda xs: np.maximum.reduce(xs)) + tm.assert_frame_equal(result2, df[["a"]], check_dtype=False) + tm.assert_frame_equal(result3, df[["a"]], check_dtype=False) + tm.assert_frame_equal(result4, df[["a"]]) + tm.assert_frame_equal(result5, df[["a"]]) + + # Filter + tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"]) + tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df) + + # Non-monotonic + df = DataFrame({"a": [5, 15, 25, -5]}) + c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) + + msg = "using SeriesGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = df.a.groupby(c, observed=False).transform(sum) + tm.assert_series_equal(result, df["a"]) + + tm.assert_series_equal( + df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"] + ) + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = df.groupby(c, observed=False).transform(sum) + expected = df[["a"]] + tm.assert_frame_equal(result, expected) + + tm.assert_frame_equal( + df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]] + ) + + # GH 9603 + df = DataFrame({"a": [1, 0, 0, 0]}) + c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd"))) + result = df.groupby(c, observed=False).apply(len) + + exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered) + expected = Series([1, 0, 0, 0], index=exp_index) + expected.index.name = "a" + tm.assert_series_equal(result, expected) + + # more basic + levels = ["foo", "bar", "baz", "qux"] + codes = np.random.default_rng(2).integers(0, 4, size=100) + + cats = Categorical.from_codes(codes, levels, ordered=True) + + data = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + + result = data.groupby(cats, observed=False).mean() + + expected = data.groupby(np.asarray(cats), observed=False).mean() + exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True) + expected = expected.reindex(exp_idx) + + tm.assert_frame_equal(result, expected) + + grouped = data.groupby(cats, observed=False) + desc_result = grouped.describe() + + idx = cats.codes.argsort() + ord_labels = np.asarray(cats).take(idx) + ord_data = data.take(idx) + + exp_cats = Categorical( + ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"] + ) + expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe() + tm.assert_frame_equal(desc_result, expected) + + # GH 10460 + expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True) + exp = CategoricalIndex(expc) + tm.assert_index_equal( + (desc_result.stack(future_stack=True).index.get_level_values(0)), exp + ) + exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4) + tm.assert_index_equal( + (desc_result.stack(future_stack=True).index.get_level_values(1)), exp + ) + + +def test_level_get_group(observed): + # GH15155 + df = DataFrame( + data=np.arange(2, 22, 2), + index=MultiIndex( + levels=[CategoricalIndex(["a", "b"]), range(10)], + codes=[[0] * 5 + [1] * 5, range(10)], + names=["Index1", "Index2"], + ), + ) + g = df.groupby(level=["Index1"], observed=observed) + + # expected should equal test.loc[["a"]] + # GH15166 + expected = DataFrame( + data=np.arange(2, 12, 2), + index=MultiIndex( + levels=[CategoricalIndex(["a", "b"]), range(5)], + codes=[[0] * 5, range(5)], + names=["Index1", "Index2"], + ), + ) + result = g.get_group("a") + + tm.assert_frame_equal(result, expected) + + +def test_sorting_with_different_categoricals(): + # GH 24271 + df = DataFrame( + { + "group": ["A"] * 6 + ["B"] * 6, + "dose": ["high", "med", "low"] * 4, + "outcomes": np.arange(12.0), + } + ) + + df.dose = Categorical(df.dose, categories=["low", "med", "high"], ordered=True) + + result = df.groupby("group")["dose"].value_counts() + result = result.sort_index(level=0, sort_remaining=True) + index = ["low", "med", "high", "low", "med", "high"] + index = Categorical(index, categories=["low", "med", "high"], ordered=True) + index = [["A", "A", "A", "B", "B", "B"], CategoricalIndex(index)] + index = MultiIndex.from_arrays(index, names=["group", "dose"]) + expected = Series([2] * 6, index=index, name="count") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ordered", [True, False]) +def test_apply(ordered): + # GH 10138 + + dense = Categorical(list("abc"), ordered=ordered) + + # 'b' is in the categories but not in the list + missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered) + values = np.arange(len(dense)) + df = DataFrame({"missing": missing, "dense": dense, "values": values}) + grouped = df.groupby(["missing", "dense"], observed=True) + + # missing category 'b' should still exist in the output index + idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"]) + expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"]) + + result = grouped.apply(lambda x: np.mean(x, axis=0)) + tm.assert_frame_equal(result, expected) + + result = grouped.mean() + tm.assert_frame_equal(result, expected) + + msg = "using DataFrameGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = grouped.agg(np.mean) + tm.assert_frame_equal(result, expected) + + # but for transform we should still get back the original index + idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"]) + expected = Series(1, index=idx) + result = grouped.apply(lambda x: 1) + tm.assert_series_equal(result, expected) + + +def test_observed(observed): + # multiple groupers, don't re-expand the output space + # of the grouper + # gh-14942 (implement) + # gh-10132 (back-compat) + # gh-8138 (back-compat) + # gh-8869 + + cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True) + cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + df["C"] = ["foo", "bar"] * 2 + + # multiple groupers with a non-cat + gb = df.groupby(["A", "B", "C"], observed=observed) + exp_index = MultiIndex.from_arrays( + [cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"] + ) + expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index() + result = gb.sum() + if not observed: + expected = cartesian_product_for_groupers( + expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0 + ) + + tm.assert_frame_equal(result, expected) + + gb = df.groupby(["A", "B"], observed=observed) + exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"]) + expected = DataFrame( + {"values": [1, 2, 3, 4], "C": ["foo", "bar", "foo", "bar"]}, index=exp_index + ) + result = gb.sum() + if not observed: + expected = cartesian_product_for_groupers( + expected, [cat1, cat2], list("AB"), fill_value=0 + ) + + tm.assert_frame_equal(result, expected) + + # https://github.com/pandas-dev/pandas/issues/8138 + d = { + "cat": Categorical( + ["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True + ), + "ints": [1, 1, 2, 2], + "val": [10, 20, 30, 40], + } + df = DataFrame(d) + + # Grouping on a single column + groups_single_key = df.groupby("cat", observed=observed) + result = groups_single_key.mean() + + exp_index = CategoricalIndex( + list("ab"), name="cat", categories=list("abc"), ordered=True + ) + expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index) + if not observed: + index = CategoricalIndex( + list("abc"), name="cat", categories=list("abc"), ordered=True + ) + expected = expected.reindex(index) + + tm.assert_frame_equal(result, expected) + + # Grouping on two columns + groups_double_key = df.groupby(["cat", "ints"], observed=observed) + result = groups_double_key.agg("mean") + expected = DataFrame( + { + "val": [10.0, 30.0, 20.0, 40.0], + "cat": Categorical( + ["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True + ), + "ints": [1, 2, 1, 2], + } + ).set_index(["cat", "ints"]) + if not observed: + expected = cartesian_product_for_groupers( + expected, [df.cat.values, [1, 2]], ["cat", "ints"] + ) + + tm.assert_frame_equal(result, expected) + + # GH 10132 + for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]: + c, i = key + result = groups_double_key.get_group(key) + expected = df[(df.cat == c) & (df.ints == i)] + tm.assert_frame_equal(result, expected) + + # gh-8869 + # with as_index + d = { + "foo": [10, 8, 4, 8, 4, 1, 1], + "bar": [10, 20, 30, 40, 50, 60, 70], + "baz": ["d", "c", "e", "a", "a", "d", "c"], + } + df = DataFrame(d) + cat = pd.cut(df["foo"], np.linspace(0, 10, 3)) + df["range"] = cat + groups = df.groupby(["range", "baz"], as_index=False, observed=observed) + result = groups.agg("mean") + + groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed) + expected = groups2.agg("mean").reset_index() + tm.assert_frame_equal(result, expected) + + +def test_observed_codes_remap(observed): + d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]} + df = DataFrame(d) + values = pd.cut(df["C1"], [1, 2, 3, 6]) + values.name = "cat" + groups_double_key = df.groupby([values, "C2"], observed=observed) + + idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"]) + expected = DataFrame( + {"C1": [3.0, 3.0, 4.0, 5.0], "C3": [10.0, 100.0, 200.0, 34.0]}, index=idx + ) + if not observed: + expected = cartesian_product_for_groupers( + expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"] + ) + + result = groups_double_key.agg("mean") + tm.assert_frame_equal(result, expected) + + +def test_observed_perf(): + # we create a cartesian product, so this is + # non-performant if we don't use observed values + # gh-14942 + df = DataFrame( + { + "cat": np.random.default_rng(2).integers(0, 255, size=30000), + "int_id": np.random.default_rng(2).integers(0, 255, size=30000), + "other_id": np.random.default_rng(2).integers(0, 10000, size=30000), + "foo": 0, + } + ) + df["cat"] = df.cat.astype(str).astype("category") + + grouped = df.groupby(["cat", "int_id", "other_id"], observed=True) + result = grouped.count() + assert result.index.levels[0].nunique() == df.cat.nunique() + assert result.index.levels[1].nunique() == df.int_id.nunique() + assert result.index.levels[2].nunique() == df.other_id.nunique() + + +def test_observed_groups(observed): + # gh-20583 + # test that we have the appropriate groups + + cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"]) + df = DataFrame({"cat": cat, "vals": [1, 2, 3]}) + g = df.groupby("cat", observed=observed) + + result = g.groups + if observed: + expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")} + else: + expected = { + "a": Index([0, 2], dtype="int64"), + "b": Index([], dtype="int64"), + "c": Index([1], dtype="int64"), + } + + tm.assert_dict_equal(result, expected) + + +@pytest.mark.parametrize( + "keys, expected_values, expected_index_levels", + [ + ("a", [15, 9, 0], CategoricalIndex([1, 2, 3], name="a")), + ( + ["a", "b"], + [7, 8, 0, 0, 0, 9, 0, 0, 0], + [CategoricalIndex([1, 2, 3], name="a"), Index([4, 5, 6])], + ), + ( + ["a", "a2"], + [15, 0, 0, 0, 9, 0, 0, 0, 0], + [ + CategoricalIndex([1, 2, 3], name="a"), + CategoricalIndex([1, 2, 3], name="a"), + ], + ), + ], +) +@pytest.mark.parametrize("test_series", [True, False]) +def test_unobserved_in_index(keys, expected_values, expected_index_levels, test_series): + # GH#49354 - ensure unobserved cats occur when grouping by index levels + df = DataFrame( + { + "a": Categorical([1, 1, 2], categories=[1, 2, 3]), + "a2": Categorical([1, 1, 2], categories=[1, 2, 3]), + "b": [4, 5, 6], + "c": [7, 8, 9], + } + ).set_index(["a", "a2"]) + if "b" not in keys: + # Only keep b when it is used for grouping for consistent columns in the result + df = df.drop(columns="b") + + gb = df.groupby(keys, observed=False) + if test_series: + gb = gb["c"] + result = gb.sum() + + if len(keys) == 1: + index = expected_index_levels + else: + codes = [[0, 0, 0, 1, 1, 1, 2, 2, 2], 3 * [0, 1, 2]] + index = MultiIndex( + expected_index_levels, + codes=codes, + names=keys, + ) + expected = DataFrame({"c": expected_values}, index=index) + if test_series: + expected = expected["c"] + tm.assert_equal(result, expected) + + +def test_observed_groups_with_nan(observed): + # GH 24740 + df = DataFrame( + { + "cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]), + "vals": [1, 2, 3], + } + ) + g = df.groupby("cat", observed=observed) + result = g.groups + if observed: + expected = {"a": Index([0, 2], dtype="int64")} + else: + expected = { + "a": Index([0, 2], dtype="int64"), + "b": Index([], dtype="int64"), + "d": Index([], dtype="int64"), + } + tm.assert_dict_equal(result, expected) + + +def test_observed_nth(): + # GH 26385 + cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"]) + ser = Series([1, 2, 3]) + df = DataFrame({"cat": cat, "ser": ser}) + + result = df.groupby("cat", observed=False)["ser"].nth(0) + expected = df["ser"].iloc[[0]] + tm.assert_series_equal(result, expected) + + +def test_dataframe_categorical_with_nan(observed): + # GH 21151 + s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"]) + s2 = Series([1, 2, 3, 4]) + df = DataFrame({"s1": s1, "s2": s2}) + result = df.groupby("s1", observed=observed).first().reset_index() + if observed: + expected = DataFrame( + {"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]} + ) + else: + expected = DataFrame( + { + "s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]), + "s2": [2, np.nan, np.nan], + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("ordered", [True, False]) +@pytest.mark.parametrize("observed", [True, False]) +@pytest.mark.parametrize("sort", [True, False]) +def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort): + # GH 25871: Fix groupby sorting on ordered Categoricals + # GH 25167: Groupby with observed=True doesn't sort + + # Build a dataframe with cat having one unobserved category ('missing'), + # and a Series with identical values + label = Categorical( + ["d", "a", "b", "a", "d", "b"], + categories=["a", "b", "missing", "d"], + ordered=ordered, + ) + val = Series(["d", "a", "b", "a", "d", "b"]) + df = DataFrame({"label": label, "val": val}) + + # aggregate on the Categorical + result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first") + + # If ordering works, we expect index labels equal to aggregation results, + # except for 'observed=False': label 'missing' has aggregation None + label = Series(result.index.array, dtype="object") + aggr = Series(result.array) + if not observed: + aggr[aggr.isna()] = "missing" + if not all(label == aggr): + msg = ( + "Labels and aggregation results not consistently sorted\n" + f"for (ordered={ordered}, observed={observed}, sort={sort})\n" + f"Result:\n{result}" + ) + assert False, msg + + +def test_datetime(): + # GH9049: ensure backward compatibility + levels = pd.date_range("2014-01-01", periods=4) + codes = np.random.default_rng(2).integers(0, 4, size=100) + + cats = Categorical.from_codes(codes, levels, ordered=True) + + data = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + result = data.groupby(cats, observed=False).mean() + + expected = data.groupby(np.asarray(cats), observed=False).mean() + expected = expected.reindex(levels) + expected.index = CategoricalIndex( + expected.index, categories=expected.index, ordered=True + ) + + tm.assert_frame_equal(result, expected) + + grouped = data.groupby(cats, observed=False) + desc_result = grouped.describe() + + idx = cats.codes.argsort() + ord_labels = cats.take(idx) + ord_data = data.take(idx) + expected = ord_data.groupby(ord_labels, observed=False).describe() + tm.assert_frame_equal(desc_result, expected) + tm.assert_index_equal(desc_result.index, expected.index) + tm.assert_index_equal( + desc_result.index.get_level_values(0), expected.index.get_level_values(0) + ) + + # GH 10460 + expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True) + exp = CategoricalIndex(expc) + tm.assert_index_equal( + (desc_result.stack(future_stack=True).index.get_level_values(0)), exp + ) + exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4) + tm.assert_index_equal( + (desc_result.stack(future_stack=True).index.get_level_values(1)), exp + ) + + +def test_categorical_index(): + s = np.random.default_rng(2) + levels = ["foo", "bar", "baz", "qux"] + codes = s.integers(0, 4, size=20) + cats = Categorical.from_codes(codes, levels, ordered=True) + df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd")) + df["cats"] = cats + + # with a cat index + result = df.set_index("cats").groupby(level=0, observed=False).sum() + expected = df[list("abcd")].groupby(cats.codes, observed=False).sum() + expected.index = CategoricalIndex( + Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats" + ) + tm.assert_frame_equal(result, expected) + + # with a cat column, should produce a cat index + result = df.groupby("cats", observed=False).sum() + expected = df[list("abcd")].groupby(cats.codes, observed=False).sum() + expected.index = CategoricalIndex( + Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats" + ) + tm.assert_frame_equal(result, expected) + + +def test_describe_categorical_columns(): + # GH 11558 + cats = CategoricalIndex( + ["qux", "foo", "baz", "bar"], + categories=["foo", "bar", "baz", "qux"], + ordered=True, + ) + df = DataFrame(np.random.default_rng(2).standard_normal((20, 4)), columns=cats) + result = df.groupby([1, 2, 3, 4] * 5).describe() + + tm.assert_index_equal(result.stack(future_stack=True).columns, cats) + tm.assert_categorical_equal( + result.stack(future_stack=True).columns.values, cats.values + ) + + +def test_unstack_categorical(): + # GH11558 (example is taken from the original issue) + df = DataFrame( + {"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2} + ) + df["medium"] = df["medium"].astype("category") + + gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack() + result = gcat.describe() + + exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium") + tm.assert_index_equal(result.columns, exp_columns) + tm.assert_categorical_equal(result.columns.values, exp_columns.values) + + result = gcat["A"] + gcat["B"] + expected = Series([6, 4], index=Index(["X", "Y"], name="artist")) + tm.assert_series_equal(result, expected) + + +def test_bins_unequal_len(): + # GH3011 + series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4]) + bins = pd.cut(series.dropna().values, 4) + + # len(bins) != len(series) here + with pytest.raises(ValueError, match="Grouper and axis must be same length"): + series.groupby(bins).mean() + + +@pytest.mark.parametrize( + ["series", "data"], + [ + # Group a series with length and index equal to those of the grouper. + (Series(range(4)), {"A": [0, 3], "B": [1, 2]}), + # Group a series with length equal to that of the grouper and index unequal to + # that of the grouper. + (Series(range(4)).rename(lambda idx: idx + 1), {"A": [2], "B": [0, 1]}), + # GH44179: Group a series with length unequal to that of the grouper. + (Series(range(7)), {"A": [0, 3], "B": [1, 2]}), + ], +) +def test_categorical_series(series, data): + # Group the given series by a series with categorical data type such that group A + # takes indices 0 and 3 and group B indices 1 and 2, obtaining the values mapped in + # the given data. + groupby = series.groupby(Series(list("ABBA"), dtype="category"), observed=False) + result = groupby.aggregate(list) + expected = Series(data, index=CategoricalIndex(data.keys())) + tm.assert_series_equal(result, expected) + + +def test_as_index(): + # GH13204 + df = DataFrame( + { + "cat": Categorical([1, 2, 2], [1, 2, 3]), + "A": [10, 11, 11], + "B": [101, 102, 103], + } + ) + result = df.groupby(["cat", "A"], as_index=False, observed=True).sum() + expected = DataFrame( + { + "cat": Categorical([1, 2], categories=df.cat.cat.categories), + "A": [10, 11], + "B": [101, 205], + }, + columns=["cat", "A", "B"], + ) + tm.assert_frame_equal(result, expected) + + # function grouper + f = lambda r: df.loc[r, "A"] + msg = "A grouping .* was excluded from the result" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(["cat", f], as_index=False, observed=True).sum() + expected = DataFrame( + { + "cat": Categorical([1, 2], categories=df.cat.cat.categories), + "A": [10, 22], + "B": [101, 205], + }, + columns=["cat", "A", "B"], + ) + tm.assert_frame_equal(result, expected) + + # another not in-axis grouper (conflicting names in index) + s = Series(["a", "b", "b"], name="cat") + msg = "A grouping .* was excluded from the result" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(["cat", s], as_index=False, observed=True).sum() + tm.assert_frame_equal(result, expected) + + # is original index dropped? + group_columns = ["cat", "A"] + expected = DataFrame( + { + "cat": Categorical([1, 2], categories=df.cat.cat.categories), + "A": [10, 11], + "B": [101, 205], + }, + columns=["cat", "A", "B"], + ) + + for name in [None, "X", "B"]: + df.index = Index(list("abc"), name=name) + result = df.groupby(group_columns, as_index=False, observed=True).sum() + + tm.assert_frame_equal(result, expected) + + +def test_preserve_categories(): + # GH-13179 + categories = list("abc") + + # ordered=True + df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)}) + sort_index = CategoricalIndex(categories, categories, ordered=True, name="A") + nosort_index = CategoricalIndex(list("bac"), categories, ordered=True, name="A") + tm.assert_index_equal( + df.groupby("A", sort=True, observed=False).first().index, sort_index + ) + # GH#42482 - don't sort result when sort=False, even when ordered=True + tm.assert_index_equal( + df.groupby("A", sort=False, observed=False).first().index, nosort_index + ) + + # ordered=False + df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)}) + sort_index = CategoricalIndex(categories, categories, ordered=False, name="A") + # GH#48749 - don't change order of categories + # GH#42482 - don't sort result when sort=False, even when ordered=True + nosort_index = CategoricalIndex(list("bac"), list("abc"), ordered=False, name="A") + tm.assert_index_equal( + df.groupby("A", sort=True, observed=False).first().index, sort_index + ) + tm.assert_index_equal( + df.groupby("A", sort=False, observed=False).first().index, nosort_index + ) + + +def test_preserve_categorical_dtype(): + # GH13743, GH13854 + df = DataFrame( + { + "A": [1, 2, 1, 1, 2], + "B": [10, 16, 22, 28, 34], + "C1": Categorical(list("abaab"), categories=list("bac"), ordered=False), + "C2": Categorical(list("abaab"), categories=list("bac"), ordered=True), + } + ) + # single grouper + exp_full = DataFrame( + { + "A": [2.0, 1.0, np.nan], + "B": [25.0, 20.0, np.nan], + "C1": Categorical(list("bac"), categories=list("bac"), ordered=False), + "C2": Categorical(list("bac"), categories=list("bac"), ordered=True), + } + ) + for col in ["C1", "C2"]: + result1 = df.groupby(by=col, as_index=False, observed=False).mean( + numeric_only=True + ) + result2 = ( + df.groupby(by=col, as_index=True, observed=False) + .mean(numeric_only=True) + .reset_index() + ) + expected = exp_full.reindex(columns=result1.columns) + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, expected) + + +@pytest.mark.parametrize( + "func, values", + [ + ("first", ["second", "first"]), + ("last", ["fourth", "third"]), + ("min", ["fourth", "first"]), + ("max", ["second", "third"]), + ], +) +def test_preserve_on_ordered_ops(func, values): + # gh-18502 + # preserve the categoricals on ops + c = Categorical(["first", "second", "third", "fourth"], ordered=True) + df = DataFrame({"payload": [-1, -2, -1, -2], "col": c}) + g = df.groupby("payload") + result = getattr(g, func)() + expected = DataFrame( + {"payload": [-2, -1], "col": Series(values, dtype=c.dtype)} + ).set_index("payload") + tm.assert_frame_equal(result, expected) + + # we should also preserve categorical for SeriesGroupBy + sgb = df.groupby("payload")["col"] + result = getattr(sgb, func)() + expected = expected["col"] + tm.assert_series_equal(result, expected) + + +def test_categorical_no_compress(): + data = Series(np.random.default_rng(2).standard_normal(9)) + + codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) + cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True) + + result = data.groupby(cats, observed=False).mean() + exp = data.groupby(codes, observed=False).mean() + + exp.index = CategoricalIndex( + exp.index, categories=cats.categories, ordered=cats.ordered + ) + tm.assert_series_equal(result, exp) + + codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3]) + cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True) + + result = data.groupby(cats, observed=False).mean() + exp = data.groupby(codes, observed=False).mean().reindex(cats.categories) + exp.index = CategoricalIndex( + exp.index, categories=cats.categories, ordered=cats.ordered + ) + tm.assert_series_equal(result, exp) + + cats = Categorical( + ["a", "a", "a", "b", "b", "b", "c", "c", "c"], + categories=["a", "b", "c", "d"], + ordered=True, + ) + data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) + + result = data.groupby("b", observed=False).mean() + result = result["a"].values + exp = np.array([1, 2, 4, np.nan]) + tm.assert_numpy_array_equal(result, exp) + + +def test_groupby_empty_with_category(): + # GH-9614 + # test fix for when group by on None resulted in + # coercion of dtype categorical -> float + df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])}) + result = df.groupby("A").first()["B"] + expected = Series( + Categorical([], categories=["test", "train"]), + index=Series([], dtype="object", name="A"), + name="B", + ) + tm.assert_series_equal(result, expected) + + +def test_sort(): + # https://stackoverflow.com/questions/23814368/sorting-pandas- + # categorical-labels-after-groupby + # This should result in a properly sorted Series so that the plot + # has a sorted x axis + # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') + + df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)}) + labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=["value"], ascending=True) + df["value_group"] = pd.cut( + df.value, range(0, 10500, 500), right=False, labels=cat_labels + ) + + res = df.groupby(["value_group"], observed=False)["value_group"].count() + exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] + exp.index = CategoricalIndex(exp.index, name=exp.index.name) + tm.assert_series_equal(res, exp) + + +@pytest.mark.parametrize("ordered", [True, False]) +def test_sort2(sort, ordered): + # dataframe groupby sort was being ignored # GH 8868 + # GH#48749 - don't change order of categories + # GH#42482 - don't sort result when sort=False, even when ordered=True + df = DataFrame( + [ + ["(7.5, 10]", 10, 10], + ["(7.5, 10]", 8, 20], + ["(2.5, 5]", 5, 30], + ["(5, 7.5]", 6, 40], + ["(2.5, 5]", 4, 50], + ["(0, 2.5]", 1, 60], + ["(5, 7.5]", 7, 70], + ], + columns=["range", "foo", "bar"], + ) + df["range"] = Categorical(df["range"], ordered=ordered) + result = df.groupby("range", sort=sort, observed=False).first() + + if sort: + data_values = [[1, 60], [5, 30], [6, 40], [10, 10]] + index_values = ["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"] + else: + data_values = [[10, 10], [5, 30], [6, 40], [1, 60]] + index_values = ["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"] + expected = DataFrame( + data_values, + columns=["foo", "bar"], + index=CategoricalIndex(index_values, name="range", ordered=ordered), + ) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("ordered", [True, False]) +def test_sort_datetimelike(sort, ordered): + # GH10505 + # GH#42482 - don't sort result when sort=False, even when ordered=True + + # use same data as test_groupby_sort_categorical, which category is + # corresponding to datetime.month + df = DataFrame( + { + "dt": [ + datetime(2011, 7, 1), + datetime(2011, 7, 1), + datetime(2011, 2, 1), + datetime(2011, 5, 1), + datetime(2011, 2, 1), + datetime(2011, 1, 1), + datetime(2011, 5, 1), + ], + "foo": [10, 8, 5, 6, 4, 1, 7], + "bar": [10, 20, 30, 40, 50, 60, 70], + }, + columns=["dt", "foo", "bar"], + ) + + # ordered=True + df["dt"] = Categorical(df["dt"], ordered=ordered) + if sort: + data_values = [[1, 60], [5, 30], [6, 40], [10, 10]] + index_values = [ + datetime(2011, 1, 1), + datetime(2011, 2, 1), + datetime(2011, 5, 1), + datetime(2011, 7, 1), + ] + else: + data_values = [[10, 10], [5, 30], [6, 40], [1, 60]] + index_values = [ + datetime(2011, 7, 1), + datetime(2011, 2, 1), + datetime(2011, 5, 1), + datetime(2011, 1, 1), + ] + expected = DataFrame( + data_values, + columns=["foo", "bar"], + index=CategoricalIndex(index_values, name="dt", ordered=ordered), + ) + result = df.groupby("dt", sort=sort, observed=False).first() + tm.assert_frame_equal(result, expected) + + +def test_empty_sum(): + # https://github.com/pandas-dev/pandas/issues/18678 + df = DataFrame( + {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]} + ) + expected_idx = CategoricalIndex(["a", "b", "c"], name="A") + + # 0 by default + result = df.groupby("A", observed=False).B.sum() + expected = Series([3, 1, 0], expected_idx, name="B") + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A", observed=False).B.sum(min_count=0) + expected = Series([3, 1, 0], expected_idx, name="B") + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A", observed=False).B.sum(min_count=1) + expected = Series([3, 1, np.nan], expected_idx, name="B") + tm.assert_series_equal(result, expected) + + # min_count>1 + result = df.groupby("A", observed=False).B.sum(min_count=2) + expected = Series([3, np.nan, np.nan], expected_idx, name="B") + tm.assert_series_equal(result, expected) + + +def test_empty_prod(): + # https://github.com/pandas-dev/pandas/issues/18678 + df = DataFrame( + {"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]} + ) + + expected_idx = CategoricalIndex(["a", "b", "c"], name="A") + + # 1 by default + result = df.groupby("A", observed=False).B.prod() + expected = Series([2, 1, 1], expected_idx, name="B") + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A", observed=False).B.prod(min_count=0) + expected = Series([2, 1, 1], expected_idx, name="B") + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A", observed=False).B.prod(min_count=1) + expected = Series([2, 1, np.nan], expected_idx, name="B") + tm.assert_series_equal(result, expected) + + +def test_groupby_multiindex_categorical_datetime(): + # https://github.com/pandas-dev/pandas/issues/21390 + + df = DataFrame( + { + "key1": Categorical(list("abcbabcba")), + "key2": Categorical( + list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3 + ), + "values": np.arange(9), + } + ) + result = df.groupby(["key1", "key2"], observed=False).mean() + + idx = MultiIndex.from_product( + [ + Categorical(["a", "b", "c"]), + Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)), + ], + names=["key1", "key2"], + ) + expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "as_index, expected", + [ + ( + True, + Series( + index=MultiIndex.from_arrays( + [Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"] + ), + data=[1, 2, 3], + name="x", + ), + ), + ( + False, + DataFrame( + { + "a": Series([1, 1, 2], dtype="category"), + "b": [1, 2, 2], + "x": [1, 2, 3], + } + ), + ), + ], +) +def test_groupby_agg_observed_true_single_column(as_index, expected): + # GH-23970 + df = DataFrame( + {"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]} + ) + + result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum() + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT]) +def test_shift(fill_value): + ct = Categorical( + ["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False + ) + expected = Categorical( + [None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False + ) + res = ct.shift(1, fill_value=fill_value) + tm.assert_equal(res, expected) + + +@pytest.fixture +def df_cat(df): + """ + DataFrame with multiple categorical columns and a column of integers. + Shortened so as not to contain all possible combinations of categories. + Useful for testing `observed` kwarg functionality on GroupBy objects. + + Parameters + ---------- + df: DataFrame + Non-categorical, longer DataFrame from another fixture, used to derive + this one + + Returns + ------- + df_cat: DataFrame + """ + df_cat = df.copy()[:4] # leave out some groups + df_cat["A"] = df_cat["A"].astype("category") + df_cat["B"] = df_cat["B"].astype("category") + df_cat["C"] = Series([1, 2, 3, 4]) + df_cat = df_cat.drop(["D"], axis=1) + return df_cat + + +@pytest.mark.parametrize("operation", ["agg", "apply"]) +def test_seriesgroupby_observed_true(df_cat, operation): + # GH#24880 + # GH#49223 - order of results was wrong when grouping by index levels + lev_a = Index(["bar", "bar", "foo", "foo"], dtype=df_cat["A"].dtype, name="A") + lev_b = Index(["one", "three", "one", "two"], dtype=df_cat["B"].dtype, name="B") + index = MultiIndex.from_arrays([lev_a, lev_b]) + expected = Series(data=[2, 4, 1, 3], index=index, name="C").sort_index() + + grouped = df_cat.groupby(["A", "B"], observed=True)["C"] + msg = "using np.sum" if operation == "apply" else "using SeriesGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = getattr(grouped, operation)(sum) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("operation", ["agg", "apply"]) +@pytest.mark.parametrize("observed", [False, None]) +def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation): + # GH 24880 + # GH#49223 - order of results was wrong when grouping by index levels + index, _ = MultiIndex.from_product( + [ + CategoricalIndex(["bar", "foo"], ordered=False), + CategoricalIndex(["one", "three", "two"], ordered=False), + ], + names=["A", "B"], + ).sortlevel() + + expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C") + if operation == "agg": + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = expected.fillna(0, downcast="infer") + grouped = df_cat.groupby(["A", "B"], observed=observed)["C"] + msg = "using SeriesGroupBy.sum" if operation == "agg" else "using np.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = getattr(grouped, operation)(sum) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "observed, index, data", + [ + ( + True, + MultiIndex.from_arrays( + [ + Index(["bar"] * 4 + ["foo"] * 4, dtype="category", name="A"), + Index( + ["one", "one", "three", "three", "one", "one", "two", "two"], + dtype="category", + name="B", + ), + Index(["min", "max"] * 4), + ] + ), + [2, 2, 4, 4, 1, 1, 3, 3], + ), + ( + False, + MultiIndex.from_product( + [ + CategoricalIndex(["bar", "foo"], ordered=False), + CategoricalIndex(["one", "three", "two"], ordered=False), + Index(["min", "max"]), + ], + names=["A", "B", None], + ), + [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3], + ), + ( + None, + MultiIndex.from_product( + [ + CategoricalIndex(["bar", "foo"], ordered=False), + CategoricalIndex(["one", "three", "two"], ordered=False), + Index(["min", "max"]), + ], + names=["A", "B", None], + ), + [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3], + ), + ], +) +def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data): + # GH 24880 + expected = Series(data=data, index=index, name="C") + result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply( + lambda x: {"min": x.min(), "max": x.max()} + ) + tm.assert_series_equal(result, expected) + + +def test_groupby_categorical_series_dataframe_consistent(df_cat): + # GH 20416 + expected = df_cat.groupby(["A", "B"], observed=False)["C"].mean() + result = df_cat.groupby(["A", "B"], observed=False).mean()["C"] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])]) +def test_groupby_categorical_axis_1(code): + # GH 13420 + df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]}) + cat = Categorical.from_codes(code, categories=list("abc")) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(cat, axis=1, observed=False) + result = gb.mean() + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb2 = df.T.groupby(cat, axis=0, observed=False) + expected = gb2.mean().T + tm.assert_frame_equal(result, expected) + + +def test_groupby_cat_preserves_structure(observed, ordered): + # GH 28787 + df = DataFrame( + {"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]}, + columns=["Name", "Item"], + ) + expected = df.copy() + + result = ( + df.groupby("Name", observed=observed) + .agg(DataFrame.sum, skipna=True) + .reset_index() + ) + + tm.assert_frame_equal(result, expected) + + +def test_get_nonexistent_category(): + # Accessing a Category that is not in the dataframe + df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)}) + with pytest.raises(KeyError, match="'vau'"): + df.groupby("var").apply( + lambda rows: DataFrame( + {"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]} + ) + ) + + +def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed): + # GH 17605 + if reduction_func == "ngroup": + pytest.skip("ngroup is not truly a reduction") + + df = DataFrame( + { + "cat_1": Categorical(list("AABB"), categories=list("ABCD")), + "cat_2": Categorical(list("AB") * 2, categories=list("ABCD")), + "value": [0.1] * 4, + } + ) + args = get_groupby_method_args(reduction_func, df) + + expected_length = 4 if observed else 16 + + series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"] + + if reduction_func == "corrwith": + # TODO: implemented SeriesGroupBy.corrwith. See GH 32293 + assert not hasattr(series_groupby, reduction_func) + return + + agg = getattr(series_groupby, reduction_func) + result = agg(*args) + + assert len(result) == expected_length + + +def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( + reduction_func, request +): + # GH 17605 + # Tests whether the unobserved categories in the result contain 0 or NaN + + if reduction_func == "ngroup": + pytest.skip("ngroup is not truly a reduction") + + if reduction_func == "corrwith": # GH 32293 + mark = pytest.mark.xfail( + reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293" + ) + request.node.add_marker(mark) + + df = DataFrame( + { + "cat_1": Categorical(list("AABB"), categories=list("ABC")), + "cat_2": Categorical(list("AB") * 2, categories=list("ABC")), + "value": [0.1] * 4, + } + ) + unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")] + args = get_groupby_method_args(reduction_func, df) + + series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"] + agg = getattr(series_groupby, reduction_func) + result = agg(*args) + + zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func] + + for idx in unobserved: + val = result.loc[idx] + assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan) + + # If we expect unobserved values to be zero, we also expect the dtype to be int. + # Except for .sum(). If the observed categories sum to dtype=float (i.e. their + # sums have decimals), then the zeros for the missing categories should also be + # floats. + if zero_or_nan == 0 and reduction_func != "sum": + assert np.issubdtype(result.dtype, np.integer) + + +def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func): + # GH 23865 + # GH 27075 + # Ensure that df.groupby, when 'by' is two Categorical variables, + # does not return the categories that are not in df when observed=True + if reduction_func == "ngroup": + pytest.skip("ngroup does not return the Categories on the index") + + df = DataFrame( + { + "cat_1": Categorical(list("AABB"), categories=list("ABC")), + "cat_2": Categorical(list("1111"), categories=list("12")), + "value": [0.1, 0.1, 0.1, 0.1], + } + ) + unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")] + + df_grp = df.groupby(["cat_1", "cat_2"], observed=True) + + args = get_groupby_method_args(reduction_func, df) + res = getattr(df_grp, reduction_func)(*args) + + for cat in unobserved_cats: + assert cat not in res.index + + +@pytest.mark.parametrize("observed", [False, None]) +def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( + reduction_func, observed +): + # GH 23865 + # GH 27075 + # Ensure that df.groupby, when 'by' is two Categorical variables, + # returns the categories that are not in df when observed=False/None + + if reduction_func == "ngroup": + pytest.skip("ngroup does not return the Categories on the index") + + df = DataFrame( + { + "cat_1": Categorical(list("AABB"), categories=list("ABC")), + "cat_2": Categorical(list("1111"), categories=list("12")), + "value": [0.1, 0.1, 0.1, 0.1], + } + ) + unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")] + + df_grp = df.groupby(["cat_1", "cat_2"], observed=observed) + + args = get_groupby_method_args(reduction_func, df) + res = getattr(df_grp, reduction_func)(*args) + + expected = _results_for_groupbys_with_missing_categories[reduction_func] + + if expected is np.nan: + assert res.loc[unobserved_cats].isnull().all().all() + else: + assert (res.loc[unobserved_cats] == expected).all().all() + + +def test_series_groupby_categorical_aggregation_getitem(): + # GH 8870 + d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]} + df = DataFrame(d) + cat = pd.cut(df["foo"], np.linspace(0, 20, 5)) + df["range"] = cat + groups = df.groupby(["range", "baz"], as_index=True, sort=True, observed=False) + result = groups["foo"].agg("mean") + expected = groups.agg("mean")["foo"] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "func, expected_values", + [(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])], +) +def test_groupby_agg_categorical_columns(func, expected_values): + # 31256 + df = DataFrame( + { + "id": [0, 1, 2, 3, 4], + "groups": [0, 1, 1, 2, 2], + "value": Categorical([0, 0, 0, 0, 1]), + } + ).set_index("id") + result = df.groupby("groups").agg(func) + + expected = DataFrame( + {"value": expected_values}, index=Index([0, 1, 2], name="groups") + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_agg_non_numeric(): + df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])}) + expected = DataFrame({"A": [2, 1]}, index=np.array([1, 2])) + + result = df.groupby([1, 2, 1]).agg(Series.nunique) + tm.assert_frame_equal(result, expected) + + result = df.groupby([1, 2, 1]).nunique() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_groupby_first_returned_categorical_instead_of_dataframe(func): + # GH 28641: groupby drops index, when grouping over categorical column with + # first/last. Renamed Categorical instead of DataFrame previously. + df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()}) + df_grouped = df.groupby("A")["B"] + result = getattr(df_grouped, func)() + + # ordered categorical dtype should be preserved + expected = Series( + ["b"], index=Index([1997], name="A"), name="B", dtype=df["B"].dtype + ) + tm.assert_series_equal(result, expected) + + +def test_read_only_category_no_sort(): + # GH33410 + cats = np.array([1, 2]) + cats.flags.writeable = False + df = DataFrame( + {"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))} + ) + expected = DataFrame(data={"a": [2.0, 6.0]}, index=CategoricalIndex(cats, name="b")) + result = df.groupby("b", sort=False, observed=False).mean() + tm.assert_frame_equal(result, expected) + + +def test_sorted_missing_category_values(): + # GH 28597 + df = DataFrame( + { + "foo": [ + "small", + "large", + "large", + "large", + "medium", + "large", + "large", + "medium", + ], + "bar": ["C", "A", "A", "C", "A", "C", "A", "C"], + } + ) + df["foo"] = ( + df["foo"] + .astype("category") + .cat.set_categories(["tiny", "small", "medium", "large"], ordered=True) + ) + + expected = DataFrame( + { + "tiny": {"A": 0, "C": 0}, + "small": {"A": 0, "C": 1}, + "medium": {"A": 1, "C": 1}, + "large": {"A": 3, "C": 2}, + } + ) + expected = expected.rename_axis("bar", axis="index") + expected.columns = CategoricalIndex( + ["tiny", "small", "medium", "large"], + categories=["tiny", "small", "medium", "large"], + ordered=True, + name="foo", + dtype="category", + ) + + result = df.groupby(["bar", "foo"], observed=False).size().unstack() + + tm.assert_frame_equal(result, expected) + + +def test_agg_cython_category_not_implemented_fallback(): + # https://github.com/pandas-dev/pandas/issues/31450 + df = DataFrame({"col_num": [1, 1, 2, 3]}) + df["col_cat"] = df["col_num"].astype("category") + + result = df.groupby("col_num").col_cat.first() + + # ordered categorical dtype should definitely be preserved; + # this is unordered, so is less-clear case (if anything, it should raise) + expected = Series( + [1, 2, 3], + index=Index([1, 2, 3], name="col_num"), + name="col_cat", + dtype=df["col_cat"].dtype, + ) + tm.assert_series_equal(result, expected) + + result = df.groupby("col_num").agg({"col_cat": "first"}) + expected = expected.to_frame() + tm.assert_frame_equal(result, expected) + + +def test_aggregate_categorical_with_isnan(): + # GH 29837 + df = DataFrame( + { + "A": [1, 1, 1, 1], + "B": [1, 2, 1, 2], + "numerical_col": [0.1, 0.2, np.nan, 0.3], + "object_col": ["foo", "bar", "foo", "fee"], + "categorical_col": ["foo", "bar", "foo", "fee"], + } + ) + + df = df.astype({"categorical_col": "category"}) + + result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum()) + index = MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B")) + expected = DataFrame( + data={ + "numerical_col": [1, 0], + "object_col": [0, 0], + "categorical_col": [0, 0], + }, + index=index, + ) + tm.assert_frame_equal(result, expected) + + +def test_categorical_transform(): + # GH 29037 + df = DataFrame( + { + "package_id": [1, 1, 1, 2, 2, 3], + "status": [ + "Waiting", + "OnTheWay", + "Delivered", + "Waiting", + "OnTheWay", + "Waiting", + ], + } + ) + + delivery_status_type = pd.CategoricalDtype( + categories=["Waiting", "OnTheWay", "Delivered"], ordered=True + ) + df["status"] = df["status"].astype(delivery_status_type) + msg = "using SeriesGroupBy.max" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + df["last_status"] = df.groupby("package_id")["status"].transform(max) + result = df.copy() + + expected = DataFrame( + { + "package_id": [1, 1, 1, 2, 2, 3], + "status": [ + "Waiting", + "OnTheWay", + "Delivered", + "Waiting", + "OnTheWay", + "Waiting", + ], + "last_status": [ + "Delivered", + "Delivered", + "Delivered", + "OnTheWay", + "OnTheWay", + "Waiting", + ], + } + ) + + expected["status"] = expected["status"].astype(delivery_status_type) + + # .transform(max) should preserve ordered categoricals + expected["last_status"] = expected["last_status"].astype(delivery_status_type) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals( + func: str, observed: bool +): + # GH 34951 + cat = Categorical([0, 0, 1, 1]) + val = [0, 1, 1, 0] + df = DataFrame({"a": cat, "b": cat, "c": val}) + + cat2 = Categorical([0, 1]) + idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"]) + expected_dict = { + "first": Series([0, np.nan, np.nan, 1], idx, name="c"), + "last": Series([1, np.nan, np.nan, 0], idx, name="c"), + } + + expected = expected_dict[func] + if observed: + expected = expected.dropna().astype(np.int64) + + srs_grp = df.groupby(["a", "b"], observed=observed)["c"] + result = getattr(srs_grp, func)() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals( + func: str, observed: bool +): + # GH 34951 + cat = Categorical([0, 0, 1, 1]) + val = [0, 1, 1, 0] + df = DataFrame({"a": cat, "b": cat, "c": val}) + + cat2 = Categorical([0, 1]) + idx = MultiIndex.from_product([cat2, cat2], names=["a", "b"]) + expected_dict = { + "first": Series([0, np.nan, np.nan, 1], idx, name="c"), + "last": Series([1, np.nan, np.nan, 0], idx, name="c"), + } + + expected = expected_dict[func].to_frame() + if observed: + expected = expected.dropna().astype(np.int64) + + df_grp = df.groupby(["a", "b"], observed=observed) + result = getattr(df_grp, func)() + tm.assert_frame_equal(result, expected) + + +def test_groupby_categorical_indices_unused_categories(): + # GH#38642 + df = DataFrame( + { + "key": Categorical(["b", "b", "a"], categories=["a", "b", "c"]), + "col": range(3), + } + ) + grouped = df.groupby("key", sort=False, observed=False) + result = grouped.indices + expected = { + "b": np.array([0, 1], dtype="intp"), + "a": np.array([2], dtype="intp"), + "c": np.array([], dtype="intp"), + } + assert result.keys() == expected.keys() + for key in result.keys(): + tm.assert_numpy_array_equal(result[key], expected[key]) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_groupby_last_first_preserve_categoricaldtype(func): + # GH#33090 + df = DataFrame({"a": [1, 2, 3]}) + df["b"] = df["a"].astype("category") + result = getattr(df.groupby("a")["b"], func)() + expected = Series( + Categorical([1, 2, 3]), name="b", index=Index([1, 2, 3], name="a") + ) + tm.assert_series_equal(expected, result) + + +def test_groupby_categorical_observed_nunique(): + # GH#45128 + df = DataFrame({"a": [1, 2], "b": [1, 2], "c": [10, 11]}) + df = df.astype(dtype={"a": "category", "b": "category"}) + result = df.groupby(["a", "b"], observed=True).nunique()["c"] + expected = Series( + [1, 1], + index=MultiIndex.from_arrays( + [CategoricalIndex([1, 2], name="a"), CategoricalIndex([1, 2], name="b")] + ), + name="c", + ) + tm.assert_series_equal(result, expected) + + +def test_groupby_categorical_aggregate_functions(): + # GH#37275 + dtype = pd.CategoricalDtype(categories=["small", "big"], ordered=True) + df = DataFrame( + [[1, "small"], [1, "big"], [2, "small"]], columns=["grp", "description"] + ).astype({"description": dtype}) + + result = df.groupby("grp")["description"].max() + expected = Series( + ["big", "small"], + index=Index([1, 2], name="grp"), + name="description", + dtype=pd.CategoricalDtype(categories=["small", "big"], ordered=True), + ) + + tm.assert_series_equal(result, expected) + + +def test_groupby_categorical_dropna(observed, dropna): + # GH#48645 - dropna should have no impact on the result when there are no NA values + cat = Categorical([1, 2], categories=[1, 2, 3]) + df = DataFrame({"x": Categorical([1, 2], categories=[1, 2, 3]), "y": [3, 4]}) + gb = df.groupby("x", observed=observed, dropna=dropna) + result = gb.sum() + + if observed: + expected = DataFrame({"y": [3, 4]}, index=cat) + else: + index = CategoricalIndex([1, 2, 3], [1, 2, 3]) + expected = DataFrame({"y": [3, 4, 0]}, index=index) + expected.index.name = "x" + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("index_kind", ["range", "single", "multi"]) +@pytest.mark.parametrize("ordered", [True, False]) +def test_category_order_reducer( + request, as_index, sort, observed, reduction_func, index_kind, ordered +): + # GH#48749 + if ( + reduction_func in ("idxmax", "idxmin") + and not observed + and index_kind != "multi" + ): + msg = "GH#10694 - idxmax/min fail with unused categories" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + elif reduction_func == "corrwith" and not as_index: + msg = "GH#49950 - corrwith with as_index=False may not have grouping column" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + elif index_kind != "range" and not as_index: + pytest.skip(reason="Result doesn't have categories, nothing to test") + df = DataFrame( + { + "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered), + "b": range(4), + } + ) + if index_kind == "range": + keys = ["a"] + elif index_kind == "single": + keys = ["a"] + df = df.set_index(keys) + elif index_kind == "multi": + keys = ["a", "a2"] + df["a2"] = df["a"] + df = df.set_index(keys) + args = get_groupby_method_args(reduction_func, df) + gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed) + op_result = getattr(gb, reduction_func)(*args) + if as_index: + result = op_result.index.get_level_values("a").categories + else: + result = op_result["a"].cat.categories + expected = Index([1, 4, 3, 2]) + tm.assert_index_equal(result, expected) + + if index_kind == "multi": + result = op_result.index.get_level_values("a2").categories + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("index_kind", ["single", "multi"]) +@pytest.mark.parametrize("ordered", [True, False]) +def test_category_order_transformer( + as_index, sort, observed, transformation_func, index_kind, ordered +): + # GH#48749 + df = DataFrame( + { + "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered), + "b": range(4), + } + ) + if index_kind == "single": + keys = ["a"] + df = df.set_index(keys) + elif index_kind == "multi": + keys = ["a", "a2"] + df["a2"] = df["a"] + df = df.set_index(keys) + args = get_groupby_method_args(transformation_func, df) + gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed) + op_result = getattr(gb, transformation_func)(*args) + result = op_result.index.get_level_values("a").categories + expected = Index([1, 4, 3, 2]) + tm.assert_index_equal(result, expected) + + if index_kind == "multi": + result = op_result.index.get_level_values("a2").categories + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("index_kind", ["range", "single", "multi"]) +@pytest.mark.parametrize("method", ["head", "tail"]) +@pytest.mark.parametrize("ordered", [True, False]) +def test_category_order_head_tail( + as_index, sort, observed, method, index_kind, ordered +): + # GH#48749 + df = DataFrame( + { + "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered), + "b": range(4), + } + ) + if index_kind == "range": + keys = ["a"] + elif index_kind == "single": + keys = ["a"] + df = df.set_index(keys) + elif index_kind == "multi": + keys = ["a", "a2"] + df["a2"] = df["a"] + df = df.set_index(keys) + gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed) + op_result = getattr(gb, method)() + if index_kind == "range": + result = op_result["a"].cat.categories + else: + result = op_result.index.get_level_values("a").categories + expected = Index([1, 4, 3, 2]) + tm.assert_index_equal(result, expected) + + if index_kind == "multi": + result = op_result.index.get_level_values("a2").categories + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("index_kind", ["range", "single", "multi"]) +@pytest.mark.parametrize("method", ["apply", "agg", "transform"]) +@pytest.mark.parametrize("ordered", [True, False]) +def test_category_order_apply(as_index, sort, observed, method, index_kind, ordered): + # GH#48749 + if (method == "transform" and index_kind == "range") or ( + not as_index and index_kind != "range" + ): + pytest.skip("No categories in result, nothing to test") + df = DataFrame( + { + "a": Categorical([2, 1, 2, 3], categories=[1, 4, 3, 2], ordered=ordered), + "b": range(4), + } + ) + if index_kind == "range": + keys = ["a"] + elif index_kind == "single": + keys = ["a"] + df = df.set_index(keys) + elif index_kind == "multi": + keys = ["a", "a2"] + df["a2"] = df["a"] + df = df.set_index(keys) + gb = df.groupby(keys, as_index=as_index, sort=sort, observed=observed) + op_result = getattr(gb, method)(lambda x: x.sum(numeric_only=True)) + if (method == "transform" or not as_index) and index_kind == "range": + result = op_result["a"].cat.categories + else: + result = op_result.index.get_level_values("a").categories + expected = Index([1, 4, 3, 2]) + tm.assert_index_equal(result, expected) + + if index_kind == "multi": + result = op_result.index.get_level_values("a2").categories + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("index_kind", ["range", "single", "multi"]) +def test_many_categories(as_index, sort, index_kind, ordered): + # GH#48749 - Test when the grouper has many categories + if index_kind != "range" and not as_index: + pytest.skip(reason="Result doesn't have categories, nothing to test") + categories = np.arange(9999, -1, -1) + grouper = Categorical([2, 1, 2, 3], categories=categories, ordered=ordered) + df = DataFrame({"a": grouper, "b": range(4)}) + if index_kind == "range": + keys = ["a"] + elif index_kind == "single": + keys = ["a"] + df = df.set_index(keys) + elif index_kind == "multi": + keys = ["a", "a2"] + df["a2"] = df["a"] + df = df.set_index(keys) + gb = df.groupby(keys, as_index=as_index, sort=sort, observed=True) + result = gb.sum() + + # Test is setup so that data and index are the same values + data = [3, 2, 1] if sort else [2, 1, 3] + + index = CategoricalIndex( + data, categories=grouper.categories, ordered=ordered, name="a" + ) + if as_index: + expected = DataFrame({"b": data}) + if index_kind == "multi": + expected.index = MultiIndex.from_frame(DataFrame({"a": index, "a2": index})) + else: + expected.index = index + elif index_kind == "multi": + expected = DataFrame({"a": Series(index), "a2": Series(index), "b": data}) + else: + expected = DataFrame({"a": Series(index), "b": data}) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("cat_columns", ["a", "b", ["a", "b"]]) +@pytest.mark.parametrize("keys", ["a", "b", ["a", "b"]]) +def test_groupby_default_depr(cat_columns, keys): + # GH#43999 + df = DataFrame({"a": [1, 1, 2, 3], "b": [4, 5, 6, 7]}) + df[cat_columns] = df[cat_columns].astype("category") + msg = "The default of observed=False is deprecated" + klass = FutureWarning if set(cat_columns) & set(keys) else None + with tm.assert_produces_warning(klass, match=msg): + df.groupby(keys) + + +@pytest.mark.parametrize("test_series", [True, False]) +@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]]) +def test_agg_list(request, as_index, observed, reduction_func, test_series, keys): + # GH#52760 + if test_series and reduction_func == "corrwith": + assert not hasattr(SeriesGroupBy, "corrwith") + pytest.skip("corrwith not implemented for SeriesGroupBy") + elif reduction_func == "corrwith": + msg = "GH#32293: attempts to call SeriesGroupBy.corrwith" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + elif ( + reduction_func == "nunique" + and not test_series + and len(keys) != 1 + and not observed + and not as_index + ): + msg = "GH#52848 - raises a ValueError" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + + df = DataFrame({"a1": [0, 0, 1], "a2": [2, 3, 3], "b": [4, 5, 6]}) + df = df.astype({"a1": "category", "a2": "category"}) + if "a2" not in keys: + df = df.drop(columns="a2") + gb = df.groupby(by=keys, as_index=as_index, observed=observed) + if test_series: + gb = gb["b"] + args = get_groupby_method_args(reduction_func, df) + + result = gb.agg([reduction_func], *args) + expected = getattr(gb, reduction_func)(*args) + + if as_index and (test_series or reduction_func == "size"): + expected = expected.to_frame(reduction_func) + if not test_series: + expected.columns = MultiIndex.from_tuples( + [(ind, "") for ind in expected.columns[:-1]] + [("b", reduction_func)] + ) + elif not as_index: + expected.columns = keys + [reduction_func] + + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_counting.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_counting.py new file mode 100644 index 00000000..885e7848 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_counting.py @@ -0,0 +1,392 @@ +from itertools import product +from string import ascii_lowercase + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Period, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestCounting: + def test_cumcount(self): + df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"]) + g = df.groupby("A") + sg = g.A + + expected = Series([0, 1, 2, 0, 3]) + + tm.assert_series_equal(expected, g.cumcount()) + tm.assert_series_equal(expected, sg.cumcount()) + + def test_cumcount_empty(self): + ge = DataFrame().groupby(level=0) + se = Series(dtype=object).groupby(level=0) + + # edge case, as this is usually considered float + e = Series(dtype="int64") + + tm.assert_series_equal(e, ge.cumcount()) + tm.assert_series_equal(e, se.cumcount()) + + def test_cumcount_dupe_index(self): + df = DataFrame( + [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5 + ) + g = df.groupby("A") + sg = g.A + + expected = Series([0, 1, 2, 0, 3], index=[0] * 5) + + tm.assert_series_equal(expected, g.cumcount()) + tm.assert_series_equal(expected, sg.cumcount()) + + def test_cumcount_mi(self): + mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]]) + df = DataFrame([["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=mi) + g = df.groupby("A") + sg = g.A + + expected = Series([0, 1, 2, 0, 3], index=mi) + + tm.assert_series_equal(expected, g.cumcount()) + tm.assert_series_equal(expected, sg.cumcount()) + + def test_cumcount_groupby_not_col(self): + df = DataFrame( + [["a"], ["a"], ["a"], ["b"], ["a"]], columns=["A"], index=[0] * 5 + ) + g = df.groupby([0, 0, 0, 1, 0]) + sg = g.A + + expected = Series([0, 1, 2, 0, 3], index=[0] * 5) + + tm.assert_series_equal(expected, g.cumcount()) + tm.assert_series_equal(expected, sg.cumcount()) + + def test_ngroup(self): + df = DataFrame({"A": list("aaaba")}) + g = df.groupby("A") + sg = g.A + + expected = Series([0, 0, 0, 1, 0]) + + tm.assert_series_equal(expected, g.ngroup()) + tm.assert_series_equal(expected, sg.ngroup()) + + def test_ngroup_distinct(self): + df = DataFrame({"A": list("abcde")}) + g = df.groupby("A") + sg = g.A + + expected = Series(range(5), dtype="int64") + + tm.assert_series_equal(expected, g.ngroup()) + tm.assert_series_equal(expected, sg.ngroup()) + + def test_ngroup_one_group(self): + df = DataFrame({"A": [0] * 5}) + g = df.groupby("A") + sg = g.A + + expected = Series([0] * 5) + + tm.assert_series_equal(expected, g.ngroup()) + tm.assert_series_equal(expected, sg.ngroup()) + + def test_ngroup_empty(self): + ge = DataFrame().groupby(level=0) + se = Series(dtype=object).groupby(level=0) + + # edge case, as this is usually considered float + e = Series(dtype="int64") + + tm.assert_series_equal(e, ge.ngroup()) + tm.assert_series_equal(e, se.ngroup()) + + def test_ngroup_series_matches_frame(self): + df = DataFrame({"A": list("aaaba")}) + s = Series(list("aaaba")) + + tm.assert_series_equal(df.groupby(s).ngroup(), s.groupby(s).ngroup()) + + def test_ngroup_dupe_index(self): + df = DataFrame({"A": list("aaaba")}, index=[0] * 5) + g = df.groupby("A") + sg = g.A + + expected = Series([0, 0, 0, 1, 0], index=[0] * 5) + + tm.assert_series_equal(expected, g.ngroup()) + tm.assert_series_equal(expected, sg.ngroup()) + + def test_ngroup_mi(self): + mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]]) + df = DataFrame({"A": list("aaaba")}, index=mi) + g = df.groupby("A") + sg = g.A + expected = Series([0, 0, 0, 1, 0], index=mi) + + tm.assert_series_equal(expected, g.ngroup()) + tm.assert_series_equal(expected, sg.ngroup()) + + def test_ngroup_groupby_not_col(self): + df = DataFrame({"A": list("aaaba")}, index=[0] * 5) + g = df.groupby([0, 0, 0, 1, 0]) + sg = g.A + + expected = Series([0, 0, 0, 1, 0], index=[0] * 5) + + tm.assert_series_equal(expected, g.ngroup()) + tm.assert_series_equal(expected, sg.ngroup()) + + def test_ngroup_descending(self): + df = DataFrame(["a", "a", "b", "a", "b"], columns=["A"]) + g = df.groupby(["A"]) + + ascending = Series([0, 0, 1, 0, 1]) + descending = Series([1, 1, 0, 1, 0]) + + tm.assert_series_equal(descending, (g.ngroups - 1) - ascending) + tm.assert_series_equal(ascending, g.ngroup(ascending=True)) + tm.assert_series_equal(descending, g.ngroup(ascending=False)) + + def test_ngroup_matches_cumcount(self): + # verify one manually-worked out case works + df = DataFrame( + [["a", "x"], ["a", "y"], ["b", "x"], ["a", "x"], ["b", "y"]], + columns=["A", "X"], + ) + g = df.groupby(["A", "X"]) + g_ngroup = g.ngroup() + g_cumcount = g.cumcount() + expected_ngroup = Series([0, 1, 2, 0, 3]) + expected_cumcount = Series([0, 0, 0, 1, 0]) + + tm.assert_series_equal(g_ngroup, expected_ngroup) + tm.assert_series_equal(g_cumcount, expected_cumcount) + + def test_ngroup_cumcount_pair(self): + # brute force comparison for all small series + for p in product(range(3), repeat=4): + df = DataFrame({"a": p}) + g = df.groupby(["a"]) + + order = sorted(set(p)) + ngroupd = [order.index(val) for val in p] + cumcounted = [p[:i].count(val) for i, val in enumerate(p)] + + tm.assert_series_equal(g.ngroup(), Series(ngroupd)) + tm.assert_series_equal(g.cumcount(), Series(cumcounted)) + + def test_ngroup_respects_groupby_order(self, sort): + df = DataFrame({"a": np.random.default_rng(2).choice(list("abcdef"), 100)}) + g = df.groupby("a", sort=sort) + df["group_id"] = -1 + df["group_index"] = -1 + + for i, (_, group) in enumerate(g): + df.loc[group.index, "group_id"] = i + for j, ind in enumerate(group.index): + df.loc[ind, "group_index"] = j + + tm.assert_series_equal(Series(df["group_id"].values), g.ngroup()) + tm.assert_series_equal(Series(df["group_index"].values), g.cumcount()) + + @pytest.mark.parametrize( + "datetimelike", + [ + [Timestamp(f"2016-05-{i:02d} 20:09:25+00:00") for i in range(1, 4)], + [Timestamp(f"2016-05-{i:02d} 20:09:25") for i in range(1, 4)], + [Timestamp(f"2016-05-{i:02d} 20:09:25", tz="UTC") for i in range(1, 4)], + [Timedelta(x, unit="h") for x in range(1, 4)], + [Period(freq="2W", year=2017, month=x) for x in range(1, 4)], + ], + ) + def test_count_with_datetimelike(self, datetimelike): + # test for #13393, where DataframeGroupBy.count() fails + # when counting a datetimelike column. + + df = DataFrame({"x": ["a", "a", "b"], "y": datetimelike}) + res = df.groupby("x").count() + expected = DataFrame({"y": [2, 1]}, index=["a", "b"]) + expected.index.name = "x" + tm.assert_frame_equal(expected, res) + + def test_count_with_only_nans_in_first_group(self): + # GH21956 + df = DataFrame({"A": [np.nan, np.nan], "B": ["a", "b"], "C": [1, 2]}) + result = df.groupby(["A", "B"]).C.count() + mi = MultiIndex(levels=[[], ["a", "b"]], codes=[[], []], names=["A", "B"]) + expected = Series([], index=mi, dtype=np.int64, name="C") + tm.assert_series_equal(result, expected, check_index_type=False) + + def test_count_groupby_column_with_nan_in_groupby_column(self): + # https://github.com/pandas-dev/pandas/issues/32841 + df = DataFrame({"A": [1, 1, 1, 1, 1], "B": [5, 4, np.nan, 3, 0]}) + res = df.groupby(["B"]).count() + expected = DataFrame( + index=Index([0.0, 3.0, 4.0, 5.0], name="B"), data={"A": [1, 1, 1, 1]} + ) + tm.assert_frame_equal(expected, res) + + def test_groupby_count_dateparseerror(self): + dr = date_range(start="1/1/2012", freq="5min", periods=10) + + # BAD Example, datetimes first + ser = Series(np.arange(10), index=[dr, np.arange(10)]) + grouped = ser.groupby(lambda x: x[1] % 2 == 0) + result = grouped.count() + + ser = Series(np.arange(10), index=[np.arange(10), dr]) + grouped = ser.groupby(lambda x: x[0] % 2 == 0) + expected = grouped.count() + + tm.assert_series_equal(result, expected) + + +def test_groupby_timedelta_cython_count(): + df = DataFrame( + {"g": list("ab" * 2), "delta": np.arange(4).astype("timedelta64[ns]")} + ) + expected = Series([2, 2], index=Index(["a", "b"], name="g"), name="delta") + result = df.groupby("g").delta.count() + tm.assert_series_equal(expected, result) + + +def test_count(): + n = 1 << 15 + dr = date_range("2015-08-30", periods=n // 10, freq="T") + + df = DataFrame( + { + "1st": np.random.default_rng(2).choice(list(ascii_lowercase), n), + "2nd": np.random.default_rng(2).integers(0, 5, n), + "3rd": np.random.default_rng(2).standard_normal(n).round(3), + "4th": np.random.default_rng(2).integers(-10, 10, n), + "5th": np.random.default_rng(2).choice(dr, n), + "6th": np.random.default_rng(2).standard_normal(n).round(3), + "7th": np.random.default_rng(2).standard_normal(n).round(3), + "8th": np.random.default_rng(2).choice(dr, n) + - np.random.default_rng(2).choice(dr, 1), + "9th": np.random.default_rng(2).choice(list(ascii_lowercase), n), + } + ) + + for col in df.columns.drop(["1st", "2nd", "4th"]): + df.loc[np.random.default_rng(2).choice(n, n // 10), col] = np.nan + + df["9th"] = df["9th"].astype("category") + + for key in ["1st", "2nd", ["1st", "2nd"]]: + left = df.groupby(key).count() + right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1) + tm.assert_frame_equal(left, right) + + +def test_count_non_nulls(): + # GH#5610 + # count counts non-nulls + df = DataFrame( + [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, np.nan]], + columns=["A", "B", "C"], + ) + + count_as = df.groupby("A").count() + count_not_as = df.groupby("A", as_index=False).count() + + expected = DataFrame([[1, 2], [0, 0]], columns=["B", "C"], index=[1, 3]) + expected.index.name = "A" + tm.assert_frame_equal(count_not_as, expected.reset_index()) + tm.assert_frame_equal(count_as, expected) + + count_B = df.groupby("A")["B"].count() + tm.assert_series_equal(count_B, expected["B"]) + + +def test_count_object(): + df = DataFrame({"a": ["a"] * 3 + ["b"] * 3, "c": [2] * 3 + [3] * 3}) + result = df.groupby("c").a.count() + expected = Series([3, 3], index=Index([2, 3], name="c"), name="a") + tm.assert_series_equal(result, expected) + + df = DataFrame({"a": ["a", np.nan, np.nan] + ["b"] * 3, "c": [2] * 3 + [3] * 3}) + result = df.groupby("c").a.count() + expected = Series([1, 3], index=Index([2, 3], name="c"), name="a") + tm.assert_series_equal(result, expected) + + +def test_count_cross_type(): + # GH8169 + # Set float64 dtype to avoid upcast when setting nan below + vals = np.hstack( + ( + np.random.default_rng(2).integers(0, 5, (100, 2)), + np.random.default_rng(2).integers(0, 2, (100, 2)), + ) + ).astype("float64") + + df = DataFrame(vals, columns=["a", "b", "c", "d"]) + df[df == 2] = np.nan + expected = df.groupby(["c", "d"]).count() + + for t in ["float32", "object"]: + df["a"] = df["a"].astype(t) + df["b"] = df["b"].astype(t) + result = df.groupby(["c", "d"]).count() + tm.assert_frame_equal(result, expected) + + +def test_lower_int_prec_count(): + df = DataFrame( + { + "a": np.array([0, 1, 2, 100], np.int8), + "b": np.array([1, 2, 3, 6], np.uint32), + "c": np.array([4, 5, 6, 8], np.int16), + "grp": list("ab" * 2), + } + ) + result = df.groupby("grp").count() + expected = DataFrame( + {"a": [2, 2], "b": [2, 2], "c": [2, 2]}, index=Index(list("ab"), name="grp") + ) + tm.assert_frame_equal(result, expected) + + +def test_count_uses_size_on_exception(): + class RaisingObjectException(Exception): + pass + + class RaisingObject: + def __init__(self, msg="I will raise inside Cython") -> None: + super().__init__() + self.msg = msg + + def __eq__(self, other): + # gets called in Cython to check that raising calls the method + raise RaisingObjectException(self.msg) + + df = DataFrame({"a": [RaisingObject() for _ in range(4)], "grp": list("ab" * 2)}) + result = df.groupby("grp").count() + expected = DataFrame({"a": [2, 2]}, index=Index(list("ab"), name="grp")) + tm.assert_frame_equal(result, expected) + + +def test_count_arrow_string_array(any_string_dtype): + # GH#54751 + pytest.importorskip("pyarrow") + df = DataFrame( + {"a": [1, 2, 3], "b": Series(["a", "b", "a"], dtype=any_string_dtype)} + ) + result = df.groupby("a").count() + expected = DataFrame({"b": 1}, index=Index([1, 2, 3], name="a")) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_filters.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_filters.py new file mode 100644 index 00000000..0bb7ad4f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_filters.py @@ -0,0 +1,632 @@ +from string import ascii_lowercase + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + Timestamp, +) +import pandas._testing as tm + + +def test_filter_series(): + s = Series([1, 3, 20, 5, 22, 24, 7]) + expected_odd = Series([1, 3, 5, 7], index=[0, 1, 3, 6]) + expected_even = Series([20, 22, 24], index=[2, 4, 5]) + grouper = s.apply(lambda x: x % 2) + grouped = s.groupby(grouper) + tm.assert_series_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd) + tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 10), expected_even) + # Test dropna=False. + tm.assert_series_equal( + grouped.filter(lambda x: x.mean() < 10, dropna=False), + expected_odd.reindex(s.index), + ) + tm.assert_series_equal( + grouped.filter(lambda x: x.mean() > 10, dropna=False), + expected_even.reindex(s.index), + ) + + +def test_filter_single_column_df(): + df = DataFrame([1, 3, 20, 5, 22, 24, 7]) + expected_odd = DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6]) + expected_even = DataFrame([20, 22, 24], index=[2, 4, 5]) + grouper = df[0].apply(lambda x: x % 2) + grouped = df.groupby(grouper) + tm.assert_frame_equal(grouped.filter(lambda x: x.mean() < 10), expected_odd) + tm.assert_frame_equal(grouped.filter(lambda x: x.mean() > 10), expected_even) + # Test dropna=False. + tm.assert_frame_equal( + grouped.filter(lambda x: x.mean() < 10, dropna=False), + expected_odd.reindex(df.index), + ) + tm.assert_frame_equal( + grouped.filter(lambda x: x.mean() > 10, dropna=False), + expected_even.reindex(df.index), + ) + + +def test_filter_multi_column_df(): + df = DataFrame({"A": [1, 12, 12, 1], "B": [1, 1, 1, 1]}) + grouper = df["A"].apply(lambda x: x % 2) + grouped = df.groupby(grouper) + expected = DataFrame({"A": [12, 12], "B": [1, 1]}, index=[1, 2]) + tm.assert_frame_equal( + grouped.filter(lambda x: x["A"].sum() - x["B"].sum() > 10), expected + ) + + +def test_filter_mixed_df(): + df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) + grouper = df["A"].apply(lambda x: x % 2) + grouped = df.groupby(grouper) + expected = DataFrame({"A": [12, 12], "B": ["b", "c"]}, index=[1, 2]) + tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 10), expected) + + +def test_filter_out_all_groups(): + s = Series([1, 3, 20, 5, 22, 24, 7]) + grouper = s.apply(lambda x: x % 2) + grouped = s.groupby(grouper) + tm.assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]]) + df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) + grouper = df["A"].apply(lambda x: x % 2) + grouped = df.groupby(grouper) + tm.assert_frame_equal(grouped.filter(lambda x: x["A"].sum() > 1000), df.loc[[]]) + + +def test_filter_out_no_groups(): + s = Series([1, 3, 20, 5, 22, 24, 7]) + grouper = s.apply(lambda x: x % 2) + grouped = s.groupby(grouper) + filtered = grouped.filter(lambda x: x.mean() > 0) + tm.assert_series_equal(filtered, s) + df = DataFrame({"A": [1, 12, 12, 1], "B": "a b c d".split()}) + grouper = df["A"].apply(lambda x: x % 2) + grouped = df.groupby(grouper) + filtered = grouped.filter(lambda x: x["A"].mean() > 0) + tm.assert_frame_equal(filtered, df) + + +def test_filter_out_all_groups_in_df(): + # GH12768 + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]}) + res = df.groupby("a") + res = res.filter(lambda x: x["b"].sum() > 5, dropna=False) + expected = DataFrame({"a": [np.nan] * 3, "b": [np.nan] * 3}) + tm.assert_frame_equal(expected, res) + + df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 0]}) + res = df.groupby("a") + res = res.filter(lambda x: x["b"].sum() > 5, dropna=True) + expected = DataFrame({"a": [], "b": []}, dtype="int64") + tm.assert_frame_equal(expected, res) + + +def test_filter_condition_raises(): + def raise_if_sum_is_zero(x): + if x.sum() == 0: + raise ValueError + return x.sum() > 0 + + s = Series([-1, 0, 1, 2]) + grouper = s.apply(lambda x: x % 2) + grouped = s.groupby(grouper) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + grouped.filter(raise_if_sum_is_zero) + + +def test_filter_with_axis_in_groupby(): + # issue 11041 + index = pd.MultiIndex.from_product([range(10), [0, 1]]) + data = DataFrame(np.arange(100).reshape(-1, 20), columns=index, dtype="int64") + + msg = "DataFrame.groupby with axis=1" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = data.groupby(level=0, axis=1) + result = gb.filter(lambda x: x.iloc[0, 0] > 10) + expected = data.iloc[:, 12:20] + tm.assert_frame_equal(result, expected) + + +def test_filter_bad_shapes(): + df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)}) + s = df["B"] + g_df = df.groupby("B") + g_s = s.groupby(s) + + f = lambda x: x + msg = "filter function returned a DataFrame, but expected a scalar bool" + with pytest.raises(TypeError, match=msg): + g_df.filter(f) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + g_s.filter(f) + + f = lambda x: x == 1 + msg = "filter function returned a DataFrame, but expected a scalar bool" + with pytest.raises(TypeError, match=msg): + g_df.filter(f) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + g_s.filter(f) + + f = lambda x: np.outer(x, x) + msg = "can't multiply sequence by non-int of type 'str'" + with pytest.raises(TypeError, match=msg): + g_df.filter(f) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + g_s.filter(f) + + +def test_filter_nan_is_false(): + df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)}) + s = df["B"] + g_df = df.groupby(df["B"]) + g_s = s.groupby(s) + + f = lambda x: np.nan + tm.assert_frame_equal(g_df.filter(f), df.loc[[]]) + tm.assert_series_equal(g_s.filter(f), s[[]]) + + +def test_filter_pdna_is_false(): + # in particular, dont raise in filter trying to call bool(pd.NA) + df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)}) + ser = df["B"] + g_df = df.groupby(df["B"]) + g_s = ser.groupby(ser) + + func = lambda x: pd.NA + res = g_df.filter(func) + tm.assert_frame_equal(res, df.loc[[]]) + res = g_s.filter(func) + tm.assert_series_equal(res, ser[[]]) + + +def test_filter_against_workaround(): + # Series of ints + s = Series(np.random.default_rng(2).integers(0, 100, 1000)) + grouper = s.apply(lambda x: np.round(x, -1)) + grouped = s.groupby(grouper) + f = lambda x: x.mean() > 10 + + old_way = s[grouped.transform(f).astype("bool")] + new_way = grouped.filter(f) + tm.assert_series_equal(new_way.sort_values(), old_way.sort_values()) + + # Series of floats + s = 100 * Series(np.random.default_rng(2).random(1000)) + grouper = s.apply(lambda x: np.round(x, -1)) + grouped = s.groupby(grouper) + f = lambda x: x.mean() > 10 + old_way = s[grouped.transform(f).astype("bool")] + new_way = grouped.filter(f) + tm.assert_series_equal(new_way.sort_values(), old_way.sort_values()) + + # Set up DataFrame of ints, floats, strings. + letters = np.array(list(ascii_lowercase)) + N = 1000 + random_letters = letters.take( + np.random.default_rng(2).integers(0, 26, N, dtype=int) + ) + df = DataFrame( + { + "ints": Series(np.random.default_rng(2).integers(0, 100, N)), + "floats": N / 10 * Series(np.random.default_rng(2).random(N)), + "letters": Series(random_letters), + } + ) + + # Group by ints; filter on floats. + grouped = df.groupby("ints") + old_way = df[grouped.floats.transform(lambda x: x.mean() > N / 20).astype("bool")] + new_way = grouped.filter(lambda x: x["floats"].mean() > N / 20) + tm.assert_frame_equal(new_way, old_way) + + # Group by floats (rounded); filter on strings. + grouper = df.floats.apply(lambda x: np.round(x, -1)) + grouped = df.groupby(grouper) + old_way = df[grouped.letters.transform(lambda x: len(x) < N / 10).astype("bool")] + new_way = grouped.filter(lambda x: len(x.letters) < N / 10) + tm.assert_frame_equal(new_way, old_way) + + # Group by strings; filter on ints. + grouped = df.groupby("letters") + old_way = df[grouped.ints.transform(lambda x: x.mean() > N / 20).astype("bool")] + new_way = grouped.filter(lambda x: x["ints"].mean() > N / 20) + tm.assert_frame_equal(new_way, old_way) + + +def test_filter_using_len(): + # BUG GH4447 + df = DataFrame({"A": np.arange(8), "B": list("aabbbbcc"), "C": np.arange(8)}) + grouped = df.groupby("B") + actual = grouped.filter(lambda x: len(x) > 2) + expected = DataFrame( + {"A": np.arange(2, 6), "B": list("bbbb"), "C": np.arange(2, 6)}, + index=np.arange(2, 6, dtype=np.int64), + ) + tm.assert_frame_equal(actual, expected) + + actual = grouped.filter(lambda x: len(x) > 4) + expected = df.loc[[]] + tm.assert_frame_equal(actual, expected) + + # Series have always worked properly, but we'll test anyway. + s = df["B"] + grouped = s.groupby(s) + actual = grouped.filter(lambda x: len(x) > 2) + expected = Series(4 * ["b"], index=np.arange(2, 6, dtype=np.int64), name="B") + tm.assert_series_equal(actual, expected) + + actual = grouped.filter(lambda x: len(x) > 4) + expected = s[[]] + tm.assert_series_equal(actual, expected) + + +def test_filter_maintains_ordering(): + # Simple case: index is sequential. #4621 + df = DataFrame( + {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]} + ) + s = df["pid"] + grouped = df.groupby("tag") + actual = grouped.filter(lambda x: len(x) > 1) + expected = df.iloc[[1, 2, 4, 7]] + tm.assert_frame_equal(actual, expected) + + grouped = s.groupby(df["tag"]) + actual = grouped.filter(lambda x: len(x) > 1) + expected = s.iloc[[1, 2, 4, 7]] + tm.assert_series_equal(actual, expected) + + # Now index is sequentially decreasing. + df.index = np.arange(len(df) - 1, -1, -1) + s = df["pid"] + grouped = df.groupby("tag") + actual = grouped.filter(lambda x: len(x) > 1) + expected = df.iloc[[1, 2, 4, 7]] + tm.assert_frame_equal(actual, expected) + + grouped = s.groupby(df["tag"]) + actual = grouped.filter(lambda x: len(x) > 1) + expected = s.iloc[[1, 2, 4, 7]] + tm.assert_series_equal(actual, expected) + + # Index is shuffled. + SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3] + df.index = df.index[SHUFFLED] + s = df["pid"] + grouped = df.groupby("tag") + actual = grouped.filter(lambda x: len(x) > 1) + expected = df.iloc[[1, 2, 4, 7]] + tm.assert_frame_equal(actual, expected) + + grouped = s.groupby(df["tag"]) + actual = grouped.filter(lambda x: len(x) > 1) + expected = s.iloc[[1, 2, 4, 7]] + tm.assert_series_equal(actual, expected) + + +def test_filter_multiple_timestamp(): + # GH 10114 + df = DataFrame( + { + "A": np.arange(5, dtype="int64"), + "B": ["foo", "bar", "foo", "bar", "bar"], + "C": Timestamp("20130101"), + } + ) + + grouped = df.groupby(["B", "C"]) + + result = grouped["A"].filter(lambda x: True) + tm.assert_series_equal(df["A"], result) + + result = grouped["A"].transform(len) + expected = Series([2, 3, 2, 3, 3], name="A") + tm.assert_series_equal(result, expected) + + result = grouped.filter(lambda x: True) + tm.assert_frame_equal(df, result) + + result = grouped.transform("sum") + expected = DataFrame({"A": [2, 8, 2, 8, 8]}) + tm.assert_frame_equal(result, expected) + + result = grouped.transform(len) + expected = DataFrame({"A": [2, 3, 2, 3, 3]}) + tm.assert_frame_equal(result, expected) + + +def test_filter_and_transform_with_non_unique_int_index(): + # GH4620 + index = [1, 1, 1, 2, 1, 1, 0, 1] + df = DataFrame( + {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}, + index=index, + ) + grouped_df = df.groupby("tag") + ser = df["pid"] + grouped_ser = ser.groupby(df["tag"]) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + tm.assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + # Cast to avoid upcast when setting nan below + expected = df.copy().astype("float64") + expected.iloc[[0, 3, 5, 6]] = np.nan + tm.assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + tm.assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid") + # ^ made manually because this can get confusing! + tm.assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid") + tm.assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + tm.assert_series_equal(actual, expected) + + +def test_filter_and_transform_with_multiple_non_unique_int_index(): + # GH4620 + index = [1, 1, 1, 2, 0, 0, 0, 1] + df = DataFrame( + {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}, + index=index, + ) + grouped_df = df.groupby("tag") + ser = df["pid"] + grouped_ser = ser.groupby(df["tag"]) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + tm.assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + # Cast to avoid upcast when setting nan below + expected = df.copy().astype("float64") + expected.iloc[[0, 3, 5, 6]] = np.nan + tm.assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + tm.assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid") + # ^ made manually because this can get confusing! + tm.assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid") + tm.assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + tm.assert_series_equal(actual, expected) + + +def test_filter_and_transform_with_non_unique_float_index(): + # GH4620 + index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float) + df = DataFrame( + {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}, + index=index, + ) + grouped_df = df.groupby("tag") + ser = df["pid"] + grouped_ser = ser.groupby(df["tag"]) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + tm.assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + # Cast to avoid upcast when setting nan below + expected = df.copy().astype("float64") + expected.iloc[[0, 3, 5, 6]] = np.nan + tm.assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + tm.assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid") + # ^ made manually because this can get confusing! + tm.assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid") + tm.assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + tm.assert_series_equal(actual, expected) + + +def test_filter_and_transform_with_non_unique_timestamp_index(): + # GH4620 + t0 = Timestamp("2013-09-30 00:05:00") + t1 = Timestamp("2013-10-30 00:05:00") + t2 = Timestamp("2013-11-30 00:05:00") + index = [t1, t1, t1, t2, t1, t1, t0, t1] + df = DataFrame( + {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}, + index=index, + ) + grouped_df = df.groupby("tag") + ser = df["pid"] + grouped_ser = ser.groupby(df["tag"]) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + tm.assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + # Cast to avoid upcast when setting nan below + expected = df.copy().astype("float64") + expected.iloc[[0, 3, 5, 6]] = np.nan + tm.assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + tm.assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid") + # ^ made manually because this can get confusing! + tm.assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid") + tm.assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + tm.assert_series_equal(actual, expected) + + +def test_filter_and_transform_with_non_unique_string_index(): + # GH4620 + index = list("bbbcbbab") + df = DataFrame( + {"pid": [1, 1, 1, 2, 2, 3, 3, 3], "tag": [23, 45, 62, 24, 45, 34, 25, 62]}, + index=index, + ) + grouped_df = df.groupby("tag") + ser = df["pid"] + grouped_ser = ser.groupby(df["tag"]) + expected_indexes = [1, 2, 4, 7] + + # Filter DataFrame + actual = grouped_df.filter(lambda x: len(x) > 1) + expected = df.iloc[expected_indexes] + tm.assert_frame_equal(actual, expected) + + actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False) + # Cast to avoid upcast when setting nan below + expected = df.copy().astype("float64") + expected.iloc[[0, 3, 5, 6]] = np.nan + tm.assert_frame_equal(actual, expected) + + # Filter Series + actual = grouped_ser.filter(lambda x: len(x) > 1) + expected = ser.take(expected_indexes) + tm.assert_series_equal(actual, expected) + + actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False) + expected = Series([np.nan, 1, 1, np.nan, 2, np.nan, np.nan, 3], index, name="pid") + # ^ made manually because this can get confusing! + tm.assert_series_equal(actual, expected) + + # Transform Series + actual = grouped_ser.transform(len) + expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name="pid") + tm.assert_series_equal(actual, expected) + + # Transform (a column from) DataFrameGroupBy + actual = grouped_df.pid.transform(len) + tm.assert_series_equal(actual, expected) + + +def test_filter_has_access_to_grouped_cols(): + df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=["A", "B"]) + g = df.groupby("A") + # previously didn't have access to col A #???? + filt = g.filter(lambda x: x["A"].sum() == 2) + tm.assert_frame_equal(filt, df.iloc[[0, 1]]) + + +def test_filter_enforces_scalarness(): + df = DataFrame( + [ + ["best", "a", "x"], + ["worst", "b", "y"], + ["best", "c", "x"], + ["best", "d", "y"], + ["worst", "d", "y"], + ["worst", "d", "y"], + ["best", "d", "z"], + ], + columns=["a", "b", "c"], + ) + with pytest.raises(TypeError, match="filter function returned a.*"): + df.groupby("c").filter(lambda g: g["a"] == "best") + + +def test_filter_non_bool_raises(): + df = DataFrame( + [ + ["best", "a", 1], + ["worst", "b", 1], + ["best", "c", 1], + ["best", "d", 1], + ["worst", "d", 1], + ["worst", "d", 1], + ["best", "d", 1], + ], + columns=["a", "b", "c"], + ) + with pytest.raises(TypeError, match="filter function returned a.*"): + df.groupby("a").filter(lambda g: g.c.mean()) + + +def test_filter_dropna_with_empty_groups(): + # GH 10780 + data = Series(np.random.default_rng(2).random(9), index=np.repeat([1, 2, 3], 3)) + grouped = data.groupby(level=0) + result_false = grouped.filter(lambda x: x.mean() > 1, dropna=False) + expected_false = Series([np.nan] * 9, index=np.repeat([1, 2, 3], 3)) + tm.assert_series_equal(result_false, expected_false) + + result_true = grouped.filter(lambda x: x.mean() > 1, dropna=True) + expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64) + tm.assert_series_equal(result_true, expected_true) + + +def test_filter_consistent_result_before_after_agg_func(): + # GH 17091 + df = DataFrame({"data": range(6), "key": list("ABCABC")}) + grouper = df.groupby("key") + result = grouper.filter(lambda x: True) + expected = DataFrame({"data": range(6), "key": list("ABCABC")}) + tm.assert_frame_equal(result, expected) + + grouper.sum() + result = grouper.filter(lambda x: True) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_function.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_function.py new file mode 100644 index 00000000..ea38447b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_function.py @@ -0,0 +1,1780 @@ +import builtins +from io import StringIO +import re + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas.errors import UnsupportedFunctionCall + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.groupby import get_groupby_method_args +from pandas.util import _test_decorators as td + + +@pytest.fixture( + params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"], + ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"], +) +def dtypes_for_minmax(request): + """ + Fixture of dtypes with min and max values used for testing + cummin and cummax + """ + dtype = request.param + + np_type = dtype + if dtype == "Int64": + np_type = np.int64 + elif dtype == "Float64": + np_type = np.float64 + + min_val = ( + np.iinfo(np_type).min + if np.dtype(np_type).kind == "i" + else np.finfo(np_type).min + ) + max_val = ( + np.iinfo(np_type).max + if np.dtype(np_type).kind == "i" + else np.finfo(np_type).max + ) + + return (dtype, min_val, max_val) + + +def test_intercept_builtin_sum(): + s = Series([1.0, 2.0, np.nan, 3.0]) + grouped = s.groupby([0, 1, 2, 2]) + + msg = "using SeriesGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result = grouped.agg(builtins.sum) + msg = "using np.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#53425 + result2 = grouped.apply(builtins.sum) + expected = grouped.sum() + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + +@pytest.mark.parametrize("f", [max, min, sum]) +@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key +def test_builtins_apply(keys, f): + # see gh-8155 + rs = np.random.default_rng(2) + df = DataFrame(rs.integers(1, 7, (10, 2)), columns=["jim", "joe"]) + df["jolie"] = rs.standard_normal(10) + + gb = df.groupby(keys) + + fname = f.__name__ + + warn = None if f is not sum else FutureWarning + msg = "The behavior of DataFrame.sum with axis=None is deprecated" + with tm.assert_produces_warning( + warn, match=msg, check_stacklevel=False, raise_on_extra_warnings=False + ): + # Also warns on deprecation GH#53425 + result = gb.apply(f) + ngroups = len(df.drop_duplicates(subset=keys)) + + assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))" + assert result.shape == (ngroups, 3), assert_msg + + npfunc = lambda x: getattr(np, fname)(x, axis=0) # numpy's equivalent function + expected = gb.apply(npfunc) + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(None): + expected2 = gb.apply(lambda x: npfunc(x)) + tm.assert_frame_equal(result, expected2) + + if f != sum: + expected = gb.agg(fname).reset_index() + expected.set_index(keys, inplace=True, drop=False) + tm.assert_frame_equal(result, expected, check_dtype=False) + + tm.assert_series_equal(getattr(result, fname)(axis=0), getattr(df, fname)(axis=0)) + + +class TestNumericOnly: + # make sure that we are passing thru kwargs to our agg functions + + @pytest.fixture + def df(self): + # GH3668 + # GH5724 + df = DataFrame( + { + "group": [1, 1, 2], + "int": [1, 2, 3], + "float": [4.0, 5.0, 6.0], + "string": list("abc"), + "category_string": Series(list("abc")).astype("category"), + "category_int": [7, 8, 9], + "datetime": date_range("20130101", periods=3), + "datetimetz": date_range("20130101", periods=3, tz="US/Eastern"), + "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"), + }, + columns=[ + "group", + "int", + "float", + "string", + "category_string", + "category_int", + "datetime", + "datetimetz", + "timedelta", + ], + ) + return df + + @pytest.mark.parametrize("method", ["mean", "median"]) + def test_averages(self, df, method): + # mean / median + expected_columns_numeric = Index(["int", "float", "category_int"]) + + gb = df.groupby("group") + expected = DataFrame( + { + "category_int": [7.5, 9], + "float": [4.5, 6.0], + "timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")], + "int": [1.5, 3], + "datetime": [ + Timestamp("2013-01-01 12:00:00"), + Timestamp("2013-01-03 00:00:00"), + ], + "datetimetz": [ + Timestamp("2013-01-01 12:00:00", tz="US/Eastern"), + Timestamp("2013-01-03 00:00:00", tz="US/Eastern"), + ], + }, + index=Index([1, 2], name="group"), + columns=[ + "int", + "float", + "category_int", + ], + ) + + result = getattr(gb, method)(numeric_only=True) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + expected_columns = expected.columns + + self._check(df, method, expected_columns, expected_columns_numeric) + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_extrema(self, df, method): + # TODO: min, max *should* handle + # categorical (ordered) dtype + + expected_columns = Index( + [ + "int", + "float", + "string", + "category_int", + "datetime", + "datetimetz", + "timedelta", + ] + ) + expected_columns_numeric = expected_columns + + self._check(df, method, expected_columns, expected_columns_numeric) + + @pytest.mark.parametrize("method", ["first", "last"]) + def test_first_last(self, df, method): + expected_columns = Index( + [ + "int", + "float", + "string", + "category_string", + "category_int", + "datetime", + "datetimetz", + "timedelta", + ] + ) + expected_columns_numeric = expected_columns + + self._check(df, method, expected_columns, expected_columns_numeric) + + @pytest.mark.parametrize("method", ["sum", "cumsum"]) + def test_sum_cumsum(self, df, method): + expected_columns_numeric = Index(["int", "float", "category_int"]) + expected_columns = Index( + ["int", "float", "string", "category_int", "timedelta"] + ) + if method == "cumsum": + # cumsum loses string + expected_columns = Index(["int", "float", "category_int", "timedelta"]) + + self._check(df, method, expected_columns, expected_columns_numeric) + + @pytest.mark.parametrize("method", ["prod", "cumprod"]) + def test_prod_cumprod(self, df, method): + expected_columns = Index(["int", "float", "category_int"]) + expected_columns_numeric = expected_columns + + self._check(df, method, expected_columns, expected_columns_numeric) + + @pytest.mark.parametrize("method", ["cummin", "cummax"]) + def test_cummin_cummax(self, df, method): + # like min, max, but don't include strings + expected_columns = Index( + ["int", "float", "category_int", "datetime", "datetimetz", "timedelta"] + ) + + # GH#15561: numeric_only=False set by default like min/max + expected_columns_numeric = expected_columns + + self._check(df, method, expected_columns, expected_columns_numeric) + + def _check(self, df, method, expected_columns, expected_columns_numeric): + gb = df.groupby("group") + + # object dtypes for transformations are not implemented in Cython and + # have no Python fallback + exception = NotImplementedError if method.startswith("cum") else TypeError + + if method in ("min", "max", "cummin", "cummax", "cumsum", "cumprod"): + # The methods default to numeric_only=False and raise TypeError + msg = "|".join( + [ + "Categorical is not ordered", + f"Cannot perform {method} with non-ordered Categorical", + re.escape(f"agg function failed [how->{method},dtype->object]"), + # cumsum/cummin/cummax/cumprod + "function is not implemented for this dtype", + ] + ) + with pytest.raises(exception, match=msg): + getattr(gb, method)() + elif method in ("sum", "mean", "median", "prod"): + msg = "|".join( + [ + "category type does not support sum operations", + re.escape(f"agg function failed [how->{method},dtype->object]"), + ] + ) + with pytest.raises(exception, match=msg): + getattr(gb, method)() + else: + result = getattr(gb, method)() + tm.assert_index_equal(result.columns, expected_columns_numeric) + + if method not in ("first", "last"): + msg = "|".join( + [ + "Categorical is not ordered", + "category type does not support", + "function is not implemented for this dtype", + f"Cannot perform {method} with non-ordered Categorical", + re.escape(f"agg function failed [how->{method},dtype->object]"), + ] + ) + with pytest.raises(exception, match=msg): + getattr(gb, method)(numeric_only=False) + else: + result = getattr(gb, method)(numeric_only=False) + tm.assert_index_equal(result.columns, expected_columns) + + +class TestGroupByNonCythonPaths: + # GH#5610 non-cython calls should not include the grouper + # Tests for code not expected to go through cython paths. + + @pytest.fixture + def df(self): + df = DataFrame( + [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], + columns=["A", "B", "C"], + ) + return df + + @pytest.fixture + def gb(self, df): + gb = df.groupby("A") + return gb + + @pytest.fixture + def gni(self, df): + gni = df.groupby("A", as_index=False) + return gni + + def test_describe(self, df, gb, gni): + # describe + expected_index = Index([1, 3], name="A") + expected_col = MultiIndex( + levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]], + codes=[[0] * 8, list(range(8))], + ) + expected = DataFrame( + [ + [1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0], + [0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + ], + index=expected_index, + columns=expected_col, + ) + result = gb.describe() + tm.assert_frame_equal(result, expected) + + expected = expected.reset_index() + result = gni.describe() + tm.assert_frame_equal(result, expected) + + +def test_cython_api2(): + # this takes the fast apply path + + # cumsum (GH5614) + df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"]) + expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"]) + result = df.groupby("A").cumsum() + tm.assert_frame_equal(result, expected) + + # GH 5755 - cumsum is a transformer and should ignore as_index + result = df.groupby("A", as_index=False).cumsum() + tm.assert_frame_equal(result, expected) + + # GH 13994 + msg = "DataFrameGroupBy.cumsum with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").cumsum(axis=1) + expected = df.cumsum(axis=1) + tm.assert_frame_equal(result, expected) + + msg = "DataFrameGroupBy.cumprod with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("A").cumprod(axis=1) + expected = df.cumprod(axis=1) + tm.assert_frame_equal(result, expected) + + +def test_cython_median(): + arr = np.random.default_rng(2).standard_normal(1000) + arr[::2] = np.nan + df = DataFrame(arr) + + labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float) + labels[::17] = np.nan + + result = df.groupby(labels).median() + msg = "using DataFrameGroupBy.median" + with tm.assert_produces_warning(FutureWarning, match=msg): + exp = df.groupby(labels).agg(np.nanmedian) + tm.assert_frame_equal(result, exp) + + df = DataFrame(np.random.default_rng(2).standard_normal((1000, 5))) + msg = "using DataFrameGroupBy.median" + with tm.assert_produces_warning(FutureWarning, match=msg): + rs = df.groupby(labels).agg(np.median) + xp = df.groupby(labels).median() + tm.assert_frame_equal(rs, xp) + + +def test_median_empty_bins(observed): + df = DataFrame(np.random.default_rng(2).integers(0, 44, 500)) + + grps = range(0, 55, 5) + bins = pd.cut(df[0], grps) + + result = df.groupby(bins, observed=observed).median() + expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"] +) +@pytest.mark.parametrize( + "method,data", + [ + ("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}), + ("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}), + ("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}), + ("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}), + ("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}), + ], +) +def test_groupby_non_arithmetic_agg_types(dtype, method, data): + # GH9311, GH6620 + df = DataFrame( + [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}] + ) + + df["b"] = df.b.astype(dtype) + + if "args" not in data: + data["args"] = [] + + if "out_type" in data: + out_type = data["out_type"] + else: + out_type = dtype + + exp = data["df"] + df_out = DataFrame(exp) + + df_out["b"] = df_out.b.astype(out_type) + df_out.set_index("a", inplace=True) + + grpd = df.groupby("a") + t = getattr(grpd, method)(*data["args"]) + tm.assert_frame_equal(t, df_out) + + +@pytest.mark.parametrize( + "i", + [ + ( + Timestamp("2011-01-15 12:50:28.502376"), + Timestamp("2011-01-20 12:50:28.593448"), + ), + (24650000000000001, 24650000000000002), + ], +) +def test_groupby_non_arithmetic_agg_int_like_precision(i): + # see gh-6620, gh-9311 + df = DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) + + grp_exp = { + "first": {"expected": i[0]}, + "last": {"expected": i[1]}, + "min": {"expected": i[0]}, + "max": {"expected": i[1]}, + "nth": {"expected": i[1], "args": [1]}, + "count": {"expected": 2}, + } + + for method, data in grp_exp.items(): + if "args" not in data: + data["args"] = [] + + grouped = df.groupby("a") + res = getattr(grouped, method)(*data["args"]) + + assert res.iloc[0].b == data["expected"] + + +@pytest.mark.parametrize( + "func, values", + [ + ("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}), + ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}), + ], +) +@pytest.mark.parametrize("numeric_only", [True, False]) +def test_idxmin_idxmax_returns_int_types(func, values, numeric_only): + # GH 25444 + df = DataFrame( + { + "name": ["A", "A", "B", "B"], + "c_int": [1, 2, 3, 4], + "c_float": [4.02, 3.03, 2.04, 1.05], + "c_date": ["2019", "2018", "2016", "2017"], + } + ) + df["c_date"] = pd.to_datetime(df["c_date"]) + df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific") + df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0] + df["c_period"] = df["c_date"].dt.to_period("W") + df["c_Integer"] = df["c_int"].astype("Int64") + df["c_Floating"] = df["c_float"].astype("Float64") + + result = getattr(df.groupby("name"), func)(numeric_only=numeric_only) + + expected = DataFrame(values, index=Index(["A", "B"], name="name")) + if numeric_only: + expected = expected.drop(columns=["c_date"]) + else: + expected["c_date_tz"] = expected["c_date"] + expected["c_timedelta"] = expected["c_date"] + expected["c_period"] = expected["c_date"] + expected["c_Integer"] = expected["c_int"] + expected["c_Floating"] = expected["c_float"] + + tm.assert_frame_equal(result, expected) + + +def test_idxmin_idxmax_axis1(): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"] + ) + df["A"] = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4] + + gb = df.groupby("A") + + warn_msg = "DataFrameGroupBy.idxmax with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = gb.idxmax(axis=1) + + alt = df.iloc[:, 1:].idxmax(axis=1) + indexer = res.index.get_level_values(1) + + tm.assert_series_equal(alt[indexer], res.droplevel("A")) + + df["E"] = date_range("2016-01-01", periods=10) + gb2 = df.groupby("A") + + msg = "'>' not supported between instances of 'Timestamp' and 'float'" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + gb2.idxmax(axis=1) + + +@pytest.mark.parametrize("numeric_only", [True, False, None]) +def test_axis1_numeric_only(request, groupby_func, numeric_only): + if groupby_func in ("idxmax", "idxmin"): + pytest.skip("idxmax and idx_min tested in test_idxmin_idxmax_axis1") + if groupby_func in ("corrwith", "skew"): + msg = "GH#47723 groupby.corrwith and skew do not correctly implement axis=1" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "B", "C", "D"] + ) + df["E"] = "x" + groups = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4] + gb = df.groupby(groups) + method = getattr(gb, groupby_func) + args = get_groupby_method_args(groupby_func, df) + kwargs = {"axis": 1} + if numeric_only is not None: + # when numeric_only is None we don't pass any argument + kwargs["numeric_only"] = numeric_only + + # Functions without numeric_only and axis args + no_args = ("cumprod", "cumsum", "diff", "fillna", "pct_change", "rank", "shift") + # Functions with axis args + has_axis = ( + "cumprod", + "cumsum", + "diff", + "pct_change", + "rank", + "shift", + "cummax", + "cummin", + "idxmin", + "idxmax", + "fillna", + ) + warn_msg = f"DataFrameGroupBy.{groupby_func} with axis=1 is deprecated" + if numeric_only is not None and groupby_func in no_args: + msg = "got an unexpected keyword argument 'numeric_only'" + if groupby_func in ["cumprod", "cumsum"]: + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + method(*args, **kwargs) + else: + with pytest.raises(TypeError, match=msg): + method(*args, **kwargs) + elif groupby_func not in has_axis: + msg = "got an unexpected keyword argument 'axis'" + with pytest.raises(TypeError, match=msg): + method(*args, **kwargs) + # fillna and shift are successful even on object dtypes + elif (numeric_only is None or not numeric_only) and groupby_func not in ( + "fillna", + "shift", + ): + msgs = ( + # cummax, cummin, rank + "not supported between instances of", + # cumprod + "can't multiply sequence by non-int of type 'float'", + # cumsum, diff, pct_change + "unsupported operand type", + ) + with pytest.raises(TypeError, match=f"({'|'.join(msgs)})"): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + method(*args, **kwargs) + else: + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + result = method(*args, **kwargs) + + df_expected = df.drop(columns="E").T if numeric_only else df.T + expected = getattr(df_expected, groupby_func)(*args).T + if groupby_func == "shift" and not numeric_only: + # shift with axis=1 leaves the leftmost column as numeric + # but transposing for expected gives us object dtype + expected = expected.astype(float) + + tm.assert_equal(result, expected) + + +def test_groupby_cumprod(): + # GH 4095 + df = DataFrame({"key": ["b"] * 10, "value": 2}) + + actual = df.groupby("key")["value"].cumprod() + expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod()) + expected.name = "value" + tm.assert_series_equal(actual, expected) + + df = DataFrame({"key": ["b"] * 100, "value": 2}) + df["value"] = df["value"].astype(float) + actual = df.groupby("key")["value"].cumprod() + expected = df.groupby("key", group_keys=False)["value"].apply(lambda x: x.cumprod()) + expected.name = "value" + tm.assert_series_equal(actual, expected) + + +def test_groupby_cumprod_overflow(): + # GH#37493 if we overflow we return garbage consistent with numpy + df = DataFrame({"key": ["b"] * 4, "value": 100_000}) + actual = df.groupby("key")["value"].cumprod() + expected = Series( + [100_000, 10_000_000_000, 1_000_000_000_000_000, 7766279631452241920], + name="value", + ) + tm.assert_series_equal(actual, expected) + + numpy_result = df.groupby("key", group_keys=False)["value"].apply( + lambda x: x.cumprod() + ) + numpy_result.name = "value" + tm.assert_series_equal(actual, numpy_result) + + +def test_groupby_cumprod_nan_influences_other_columns(): + # GH#48064 + df = DataFrame( + { + "a": 1, + "b": [1, np.nan, 2], + "c": [1, 2, 3.0], + } + ) + result = df.groupby("a").cumprod(numeric_only=True, skipna=False) + expected = DataFrame({"b": [1, np.nan, np.nan], "c": [1, 2, 6.0]}) + tm.assert_frame_equal(result, expected) + + +def scipy_sem(*args, **kwargs): + from scipy.stats import sem + + return sem(*args, ddof=1, **kwargs) + + +@pytest.mark.parametrize( + "op,targop", + [ + ("mean", np.mean), + ("median", np.median), + ("std", np.std), + ("var", np.var), + ("sum", np.sum), + ("prod", np.prod), + ("min", np.min), + ("max", np.max), + ("first", lambda x: x.iloc[0]), + ("last", lambda x: x.iloc[-1]), + ("count", np.size), + pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy), + ], +) +def test_ops_general(op, targop): + df = DataFrame(np.random.default_rng(2).standard_normal(1000)) + labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float) + + result = getattr(df.groupby(labels), op)() + warn = None if op in ("first", "last", "count", "sem") else FutureWarning + msg = f"using DataFrameGroupBy.{op}" + with tm.assert_produces_warning(warn, match=msg): + expected = df.groupby(labels).agg(targop) + tm.assert_frame_equal(result, expected) + + +def test_max_nan_bug(): + raw = """,Date,app,File +-04-23,2013-04-23 00:00:00,,log080001.log +-05-06,2013-05-06 00:00:00,,log.log +-05-07,2013-05-07 00:00:00,OE,xlsx""" + + with tm.assert_produces_warning(UserWarning, match="Could not infer format"): + df = pd.read_csv(StringIO(raw), parse_dates=[0]) + gb = df.groupby("Date") + r = gb[["File"]].max() + e = gb["File"].max().to_frame() + tm.assert_frame_equal(r, e) + assert not r["File"].isna().any() + + +def test_nlargest(): + a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) + b = Series(list("a" * 5 + "b" * 5)) + gb = a.groupby(b) + r = gb.nlargest(3) + e = Series( + [7, 5, 3, 10, 9, 6], + index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]), + ) + tm.assert_series_equal(r, e) + + a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) + gb = a.groupby(b) + e = Series( + [3, 2, 1, 3, 3, 2], + index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]), + ) + tm.assert_series_equal(gb.nlargest(3, keep="last"), e) + + +def test_nlargest_mi_grouper(): + # see gh-21411 + npr = np.random.default_rng(2) + + dts = date_range("20180101", periods=10) + iterables = [dts, ["one", "two"]] + + idx = MultiIndex.from_product(iterables, names=["first", "second"]) + s = Series(npr.standard_normal(20), index=idx) + + result = s.groupby("first").nlargest(1) + + exp_idx = MultiIndex.from_tuples( + [ + (dts[0], dts[0], "one"), + (dts[1], dts[1], "one"), + (dts[2], dts[2], "one"), + (dts[3], dts[3], "two"), + (dts[4], dts[4], "one"), + (dts[5], dts[5], "one"), + (dts[6], dts[6], "one"), + (dts[7], dts[7], "one"), + (dts[8], dts[8], "one"), + (dts[9], dts[9], "one"), + ], + names=["first", "first", "second"], + ) + + exp_values = [ + 0.18905338179353307, + -0.41306354339189344, + 1.799707382720902, + 0.7738065867276614, + 0.28121066979764925, + 0.9775674511260357, + -0.3288239040579627, + 0.45495807124085547, + 0.5452887139646817, + 0.12682784711186987, + ] + + expected = Series(exp_values, index=exp_idx) + tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3) + + +def test_nsmallest(): + a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) + b = Series(list("a" * 5 + "b" * 5)) + gb = a.groupby(b) + r = gb.nsmallest(3) + e = Series( + [1, 2, 3, 0, 4, 6], + index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]), + ) + tm.assert_series_equal(r, e) + + a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) + gb = a.groupby(b) + e = Series( + [0, 1, 1, 0, 1, 2], + index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]), + ) + tm.assert_series_equal(gb.nsmallest(3, keep="last"), e) + + +@pytest.mark.parametrize( + "data, groups", + [([0, 1, 2, 3], [0, 0, 1, 1]), ([0], [0])], +) +@pytest.mark.parametrize("dtype", [None, *tm.ALL_INT_NUMPY_DTYPES]) +@pytest.mark.parametrize("method", ["nlargest", "nsmallest"]) +def test_nlargest_and_smallest_noop(data, groups, dtype, method): + # GH 15272, GH 16345, GH 29129 + # Test nlargest/smallest when it results in a noop, + # i.e. input is sorted and group size <= n + if dtype is not None: + data = np.array(data, dtype=dtype) + if method == "nlargest": + data = list(reversed(data)) + ser = Series(data, name="a") + result = getattr(ser.groupby(groups), method)(n=2) + expidx = np.array(groups, dtype=int) if isinstance(groups, list) else groups + expected = Series(data, index=MultiIndex.from_arrays([expidx, ser.index]), name="a") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["cumprod", "cumsum"]) +def test_numpy_compat(func): + # see gh-12811 + df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) + g = df.groupby("A") + + msg = "numpy operations are not valid with groupby" + + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(g, func)(1, 2, 3) + with pytest.raises(UnsupportedFunctionCall, match=msg): + getattr(g, func)(foo=1) + + +def test_cummin(dtypes_for_minmax): + dtype = dtypes_for_minmax[0] + min_val = dtypes_for_minmax[1] + + # GH 15048 + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) + expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] + + df = base_df.astype(dtype) + + expected = DataFrame({"B": expected_mins}).astype(dtype) + result = df.groupby("A").cummin() + tm.assert_frame_equal(result, expected) + result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() + tm.assert_frame_equal(result, expected) + + # Test w/ min value for dtype + df.loc[[2, 6], "B"] = min_val + df.loc[[1, 5], "B"] = min_val + 1 + expected.loc[[2, 3, 6, 7], "B"] = min_val + expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val + result = df.groupby("A").cummin() + tm.assert_frame_equal(result, expected, check_exact=True) + expected = ( + df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() + ) + tm.assert_frame_equal(result, expected, check_exact=True) + + # Test nan in some values + # Explicit cast to float to avoid implicit cast when setting nan + base_df = base_df.astype({"B": "float"}) + base_df.loc[[0, 2, 4, 6], "B"] = np.nan + expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) + result = base_df.groupby("A").cummin() + tm.assert_frame_equal(result, expected) + expected = ( + base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummin()).to_frame() + ) + tm.assert_frame_equal(result, expected) + + # GH 15561 + df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) + expected = Series(pd.to_datetime("2001"), index=[0], name="b") + + result = df.groupby("a")["b"].cummin() + tm.assert_series_equal(expected, result) + + # GH 15635 + df = DataFrame({"a": [1, 2, 1], "b": [1, 2, 2]}) + result = df.groupby("a").b.cummin() + expected = Series([1, 2, 1], name="b") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"]) +def test_cummin_max_all_nan_column(method, dtype): + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) + base_df["B"] = base_df["B"].astype(dtype) + grouped = base_df.groupby("A") + + expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype) + result = getattr(grouped, method)() + tm.assert_frame_equal(expected, result) + + result = getattr(grouped["B"], method)().to_frame() + tm.assert_frame_equal(expected, result) + + +def test_cummax(dtypes_for_minmax): + dtype = dtypes_for_minmax[0] + max_val = dtypes_for_minmax[2] + + # GH 15048 + base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) + expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] + + df = base_df.astype(dtype) + + expected = DataFrame({"B": expected_maxs}).astype(dtype) + result = df.groupby("A").cummax() + tm.assert_frame_equal(result, expected) + result = df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() + tm.assert_frame_equal(result, expected) + + # Test w/ max value for dtype + df.loc[[2, 6], "B"] = max_val + expected.loc[[2, 3, 6, 7], "B"] = max_val + result = df.groupby("A").cummax() + tm.assert_frame_equal(result, expected) + expected = ( + df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() + ) + tm.assert_frame_equal(result, expected) + + # Test nan in some values + # Explicit cast to float to avoid implicit cast when setting nan + base_df = base_df.astype({"B": "float"}) + base_df.loc[[0, 2, 4, 6], "B"] = np.nan + expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]}) + result = base_df.groupby("A").cummax() + tm.assert_frame_equal(result, expected) + expected = ( + base_df.groupby("A", group_keys=False).B.apply(lambda x: x.cummax()).to_frame() + ) + tm.assert_frame_equal(result, expected) + + # GH 15561 + df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) + expected = Series(pd.to_datetime("2001"), index=[0], name="b") + + result = df.groupby("a")["b"].cummax() + tm.assert_series_equal(expected, result) + + # GH 15635 + df = DataFrame({"a": [1, 2, 1], "b": [2, 1, 1]}) + result = df.groupby("a").b.cummax() + expected = Series([2, 1, 2], name="b") + tm.assert_series_equal(result, expected) + + +def test_cummax_i8_at_implementation_bound(): + # the minimum value used to be treated as NPY_NAT+1 instead of NPY_NAT + # for int64 dtype GH#46382 + ser = Series([pd.NaT._value + n for n in range(5)]) + df = DataFrame({"A": 1, "B": ser, "C": ser.view("M8[ns]")}) + gb = df.groupby("A") + + res = gb.cummax() + exp = df[["B", "C"]] + tm.assert_frame_equal(res, exp) + + +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize("dtype", ["float", "Int64", "Float64"]) +@pytest.mark.parametrize( + "groups,expected_data", + [ + ([1, 1, 1], [1, None, None]), + ([1, 2, 3], [1, None, 2]), + ([1, 3, 3], [1, None, None]), + ], +) +def test_cummin_max_skipna(method, dtype, groups, expected_data): + # GH-34047 + df = DataFrame({"a": Series([1, None, 2], dtype=dtype)}) + orig = df.copy() + gb = df.groupby(groups)["a"] + + result = getattr(gb, method)(skipna=False) + expected = Series(expected_data, dtype=dtype, name="a") + + # check we didn't accidentally alter df + tm.assert_frame_equal(df, orig) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +def test_cummin_max_skipna_multiple_cols(method): + # Ensure missing value in "a" doesn't cause "b" to be nan-filled + df = DataFrame({"a": [np.nan, 2.0, 2.0], "b": [2.0, 2.0, 2.0]}) + gb = df.groupby([1, 1, 1])[["a", "b"]] + + result = getattr(gb, method)(skipna=False) + expected = DataFrame({"a": [np.nan, np.nan, np.nan], "b": [2.0, 2.0, 2.0]}) + + tm.assert_frame_equal(result, expected) + + +@td.skip_if_32bit +@pytest.mark.parametrize("method", ["cummin", "cummax"]) +@pytest.mark.parametrize( + "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2**53 + 1)] +) +def test_nullable_int_not_cast_as_float(method, dtype, val): + data = [val, pd.NA] + df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype) + grouped = df.groupby("grp") + + result = grouped.transform(method) + expected = DataFrame({"b": data}, dtype=dtype) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "in_vals, out_vals", + [ + # Basics: strictly increasing (T), strictly decreasing (F), + # abs val increasing (F), non-strictly increasing (T) + ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]), + # Test with inf vals + ( + [1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf], + [True, False, True, False], + ), + # Test with nan vals; should always be False + ( + [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], + [False, False, False, False], + ), + ], +) +def test_is_monotonic_increasing(in_vals, out_vals): + # GH 17015 + source_dict = { + "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], + "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], + "C": in_vals, + } + df = DataFrame(source_dict) + result = df.groupby("B").C.is_monotonic_increasing + index = Index(list("abcd"), name="B") + expected = Series(index=index, data=out_vals, name="C") + tm.assert_series_equal(result, expected) + + # Also check result equal to manually taking x.is_monotonic_increasing. + expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "in_vals, out_vals", + [ + # Basics: strictly decreasing (T), strictly increasing (F), + # abs val decreasing (F), non-strictly increasing (T) + ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]), + # Test with inf vals + ( + [np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf], + [True, True, False, True], + ), + # Test with nan vals; should always be False + ( + [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], + [False, False, False, False], + ), + ], +) +def test_is_monotonic_decreasing(in_vals, out_vals): + # GH 17015 + source_dict = { + "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], + "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], + "C": in_vals, + } + + df = DataFrame(source_dict) + result = df.groupby("B").C.is_monotonic_decreasing + index = Index(list("abcd"), name="B") + expected = Series(index=index, data=out_vals, name="C") + tm.assert_series_equal(result, expected) + + +# describe +# -------------------------------- + + +def test_apply_describe_bug(mframe): + grouped = mframe.groupby(level="first") + grouped.describe() # it works! + + +def test_series_describe_multikey(): + ts = tm.makeTimeSeries() + grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) + result = grouped.describe() + tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False) + tm.assert_series_equal(result["std"], grouped.std(), check_names=False) + tm.assert_series_equal(result["min"], grouped.min(), check_names=False) + + +def test_series_describe_single(): + ts = tm.makeTimeSeries() + grouped = ts.groupby(lambda x: x.month) + result = grouped.apply(lambda x: x.describe()) + expected = grouped.describe().stack(future_stack=True) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("keys", ["key1", ["key1", "key2"]]) +def test_series_describe_as_index(as_index, keys): + # GH#49256 + df = DataFrame( + { + "key1": ["one", "two", "two", "three", "two"], + "key2": ["one", "two", "two", "three", "two"], + "foo2": [1, 2, 4, 4, 6], + } + ) + gb = df.groupby(keys, as_index=as_index)["foo2"] + result = gb.describe() + expected = DataFrame( + { + "key1": ["one", "three", "two"], + "count": [1.0, 1.0, 3.0], + "mean": [1.0, 4.0, 4.0], + "std": [np.nan, np.nan, 2.0], + "min": [1.0, 4.0, 2.0], + "25%": [1.0, 4.0, 3.0], + "50%": [1.0, 4.0, 4.0], + "75%": [1.0, 4.0, 5.0], + "max": [1.0, 4.0, 6.0], + } + ) + if len(keys) == 2: + expected.insert(1, "key2", expected["key1"]) + if as_index: + expected = expected.set_index(keys) + tm.assert_frame_equal(result, expected) + + +def test_series_index_name(df): + grouped = df.loc[:, ["C"]].groupby(df["A"]) + result = grouped.agg(lambda x: x.mean()) + assert result.index.name == "A" + + +def test_frame_describe_multikey(tsframe): + grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) + result = grouped.describe() + desc_groups = [] + for col in tsframe: + group = grouped[col].describe() + # GH 17464 - Remove duplicate MultiIndex levels + group_col = MultiIndex( + levels=[[col], group.columns], + codes=[[0] * len(group.columns), range(len(group.columns))], + ) + group = DataFrame(group.values, columns=group_col, index=group.index) + desc_groups.append(group) + expected = pd.concat(desc_groups, axis=1) + tm.assert_frame_equal(result, expected) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1) + result = groupedT.describe() + expected = tsframe.describe().T + # reverting the change from https://github.com/pandas-dev/pandas/pull/35441/ + expected.index = MultiIndex( + levels=[[0, 1], expected.index], + codes=[[0, 0, 1, 1], range(len(expected.index))], + ) + tm.assert_frame_equal(result, expected) + + +def test_frame_describe_tupleindex(): + # GH 14848 - regression from 0.19.0 to 0.19.1 + df1 = DataFrame( + { + "x": [1, 2, 3, 4, 5] * 3, + "y": [10, 20, 30, 40, 50] * 3, + "z": [100, 200, 300, 400, 500] * 3, + } + ) + df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5 + df2 = df1.rename(columns={"k": "key"}) + msg = "Names should be list-like for a MultiIndex" + with pytest.raises(ValueError, match=msg): + df1.groupby("k").describe() + with pytest.raises(ValueError, match=msg): + df2.groupby("key").describe() + + +def test_frame_describe_unstacked_format(): + # GH 4792 + prices = { + Timestamp("2011-01-06 10:59:05", tz=None): 24990, + Timestamp("2011-01-06 12:43:33", tz=None): 25499, + Timestamp("2011-01-06 12:54:09", tz=None): 25499, + } + volumes = { + Timestamp("2011-01-06 10:59:05", tz=None): 1500000000, + Timestamp("2011-01-06 12:43:33", tz=None): 5000000000, + Timestamp("2011-01-06 12:54:09", tz=None): 100000000, + } + df = DataFrame({"PRICE": prices, "VOLUME": volumes}) + result = df.groupby("PRICE").VOLUME.describe() + data = [ + df[df.PRICE == 24990].VOLUME.describe().values.tolist(), + df[df.PRICE == 25499].VOLUME.describe().values.tolist(), + ] + expected = DataFrame( + data, + index=Index([24990, 25499], name="PRICE"), + columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:" + "indexing past lexsort depth may impact performance:" + "pandas.errors.PerformanceWarning" +) +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]]) +def test_describe_with_duplicate_output_column_names(as_index, keys): + # GH 35314 + df = DataFrame( + { + "a1": [99, 99, 99, 88, 88, 88], + "a2": [99, 99, 99, 88, 88, 88], + "b": [1, 2, 3, 4, 5, 6], + "c": [10, 20, 30, 40, 50, 60], + }, + columns=["a1", "a2", "b", "b"], + copy=False, + ) + if keys == ["a1"]: + df = df.drop(columns="a2") + + expected = ( + DataFrame.from_records( + [ + ("b", "count", 3.0, 3.0), + ("b", "mean", 5.0, 2.0), + ("b", "std", 1.0, 1.0), + ("b", "min", 4.0, 1.0), + ("b", "25%", 4.5, 1.5), + ("b", "50%", 5.0, 2.0), + ("b", "75%", 5.5, 2.5), + ("b", "max", 6.0, 3.0), + ("b", "count", 3.0, 3.0), + ("b", "mean", 5.0, 2.0), + ("b", "std", 1.0, 1.0), + ("b", "min", 4.0, 1.0), + ("b", "25%", 4.5, 1.5), + ("b", "50%", 5.0, 2.0), + ("b", "75%", 5.5, 2.5), + ("b", "max", 6.0, 3.0), + ], + ) + .set_index([0, 1]) + .T + ) + expected.columns.names = [None, None] + if len(keys) == 2: + expected.index = MultiIndex( + levels=[[88, 99], [88, 99]], codes=[[0, 1], [0, 1]], names=["a1", "a2"] + ) + else: + expected.index = Index([88, 99], name="a1") + + if not as_index: + expected = expected.reset_index() + + result = df.groupby(keys, as_index=as_index).describe() + + tm.assert_frame_equal(result, expected) + + +def test_describe_duplicate_columns(): + # GH#50806 + df = DataFrame([[0, 1, 2, 3]]) + df.columns = [0, 1, 2, 0] + gb = df.groupby(df[1]) + result = gb.describe(percentiles=[]) + + columns = ["count", "mean", "std", "min", "50%", "max"] + frames = [ + DataFrame([[1.0, val, np.nan, val, val, val]], index=[1], columns=columns) + for val in (0.0, 2.0, 3.0) + ] + expected = pd.concat(frames, axis=1) + expected.columns = MultiIndex( + levels=[[0, 2], columns], + codes=[6 * [0] + 6 * [1] + 6 * [0], 3 * list(range(6))], + ) + expected.index.names = [1] + tm.assert_frame_equal(result, expected) + + +def test_groupby_mean_no_overflow(): + # Regression test for (#22487) + df = DataFrame( + { + "user": ["A", "A", "A", "A", "A"], + "connections": [4970, 4749, 4719, 4704, 18446744073699999744], + } + ) + assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840 + + +@pytest.mark.parametrize( + "values", + [ + { + "a": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2], + }, + {"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]}, + ], +) +@pytest.mark.parametrize("function", ["mean", "median", "var"]) +def test_apply_to_nullable_integer_returns_float(values, function): + # https://github.com/pandas-dev/pandas/issues/32219 + output = 0.5 if function == "var" else 1.5 + arr = np.array([output] * 3, dtype=float) + idx = Index([1, 2, 3], name="a", dtype="Int64") + expected = DataFrame({"b": arr}, index=idx).astype("Float64") + + groups = DataFrame(values, dtype="Int64").groupby("a") + + result = getattr(groups, function)() + tm.assert_frame_equal(result, expected) + + result = groups.agg(function) + tm.assert_frame_equal(result, expected) + + result = groups.agg([function]) + expected.columns = MultiIndex.from_tuples([("b", function)]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("min_count", [0, 10]) +def test_groupby_sum_mincount_boolean(min_count): + b = True + a = False + na = np.nan + dfg = pd.array([b, b, na, na, a, a, b], dtype="boolean") + + df = DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": dfg}) + result = df.groupby("A").sum(min_count=min_count) + if min_count == 0: + expected = DataFrame( + {"B": pd.array([3, 0, 0], dtype="Int64")}, + index=Index([1, 2, 3], name="A"), + ) + tm.assert_frame_equal(result, expected) + else: + expected = DataFrame( + {"B": pd.array([pd.NA] * 3, dtype="Int64")}, + index=Index([1, 2, 3], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_sum_below_mincount_nullable_integer(): + # https://github.com/pandas-dev/pandas/issues/32861 + df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64") + grouped = df.groupby("a") + idx = Index([0, 1, 2], name="a", dtype="Int64") + + result = grouped["b"].sum(min_count=2) + expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b") + tm.assert_series_equal(result, expected) + + result = grouped.sum(min_count=2) + expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx) + tm.assert_frame_equal(result, expected) + + +def test_mean_on_timedelta(): + # GH 17382 + df = DataFrame({"time": pd.to_timedelta(range(10)), "cat": ["A", "B"] * 5}) + result = df.groupby("cat")["time"].mean() + expected = Series( + pd.to_timedelta([4, 5]), name="time", index=Index(["A", "B"], name="cat") + ) + tm.assert_series_equal(result, expected) + + +def test_groupby_sum_timedelta_with_nat(): + # GH#42659 + df = DataFrame( + { + "a": [1, 1, 2, 2], + "b": [pd.Timedelta("1d"), pd.Timedelta("2d"), pd.Timedelta("3d"), pd.NaT], + } + ) + td3 = pd.Timedelta(days=3) + + gb = df.groupby("a") + + res = gb.sum() + expected = DataFrame({"b": [td3, td3]}, index=Index([1, 2], name="a")) + tm.assert_frame_equal(res, expected) + + res = gb["b"].sum() + tm.assert_series_equal(res, expected["b"]) + + res = gb["b"].sum(min_count=2) + expected = Series([td3, pd.NaT], dtype="m8[ns]", name="b", index=expected.index) + tm.assert_series_equal(res, expected) + + +@pytest.mark.parametrize( + "kernel, has_arg", + [ + ("all", False), + ("any", False), + ("bfill", False), + ("corr", True), + ("corrwith", True), + ("cov", True), + ("cummax", True), + ("cummin", True), + ("cumprod", True), + ("cumsum", True), + ("diff", False), + ("ffill", False), + ("fillna", False), + ("first", True), + ("idxmax", True), + ("idxmin", True), + ("last", True), + ("max", True), + ("mean", True), + ("median", True), + ("min", True), + ("nth", False), + ("nunique", False), + ("pct_change", False), + ("prod", True), + ("quantile", True), + ("sem", True), + ("skew", True), + ("std", True), + ("sum", True), + ("var", True), + ], +) +@pytest.mark.parametrize("numeric_only", [True, False, lib.no_default]) +@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]]) +def test_numeric_only(kernel, has_arg, numeric_only, keys): + # GH#46072 + # drops_nuisance: Whether the op drops nuisance columns even when numeric_only=False + # has_arg: Whether the op has a numeric_only arg + df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]}) + + args = get_groupby_method_args(kernel, df) + kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only} + + gb = df.groupby(keys) + method = getattr(gb, kernel) + if has_arg and numeric_only is True: + # Cases where b does not appear in the result + result = method(*args, **kwargs) + assert "b" not in result.columns + elif ( + # kernels that work on any dtype and have numeric_only arg + kernel in ("first", "last") + or ( + # kernels that work on any dtype and don't have numeric_only arg + kernel in ("any", "all", "bfill", "ffill", "fillna", "nth", "nunique") + and numeric_only is lib.no_default + ) + ): + result = method(*args, **kwargs) + assert "b" in result.columns + elif has_arg: + assert numeric_only is not True + # kernels that are successful on any dtype were above; this will fail + + # object dtypes for transformations are not implemented in Cython and + # have no Python fallback + exception = NotImplementedError if kernel.startswith("cum") else TypeError + + msg = "|".join( + [ + "not allowed for this dtype", + "cannot be performed against 'object' dtypes", + # On PY39 message is "a number"; on PY310 and after is "a real number" + "must be a string or a.* number", + "unsupported operand type", + "function is not implemented for this dtype", + re.escape(f"agg function failed [how->{kernel},dtype->object]"), + ] + ) + if kernel == "idxmin": + msg = "'<' not supported between instances of 'type' and 'type'" + elif kernel == "idxmax": + msg = "'>' not supported between instances of 'type' and 'type'" + with pytest.raises(exception, match=msg): + method(*args, **kwargs) + elif not has_arg and numeric_only is not lib.no_default: + with pytest.raises( + TypeError, match="got an unexpected keyword argument 'numeric_only'" + ): + method(*args, **kwargs) + else: + assert kernel in ("diff", "pct_change") + assert numeric_only is lib.no_default + # Doesn't have numeric_only argument and fails on nuisance columns + with pytest.raises(TypeError, match=r"unsupported operand type"): + method(*args, **kwargs) + + +@pytest.mark.parametrize("dtype", [bool, int, float, object]) +def test_deprecate_numeric_only_series(dtype, groupby_func, request): + # GH#46560 + grouper = [0, 0, 1] + + ser = Series([1, 0, 0], dtype=dtype) + gb = ser.groupby(grouper) + + if groupby_func == "corrwith": + # corrwith is not implemented on SeriesGroupBy + assert not hasattr(gb, groupby_func) + return + + method = getattr(gb, groupby_func) + + expected_ser = Series([1, 0, 0]) + expected_gb = expected_ser.groupby(grouper) + expected_method = getattr(expected_gb, groupby_func) + + args = get_groupby_method_args(groupby_func, ser) + + fails_on_numeric_object = ( + "corr", + "cov", + "cummax", + "cummin", + "cumprod", + "cumsum", + "quantile", + ) + # ops that give an object result on object input + obj_result = ( + "first", + "last", + "nth", + "bfill", + "ffill", + "shift", + "sum", + "diff", + "pct_change", + "var", + "mean", + "median", + "min", + "max", + "prod", + "skew", + ) + + # Test default behavior; kernels that fail may be enabled in the future but kernels + # that succeed should not be allowed to fail (without deprecation, at least) + if groupby_func in fails_on_numeric_object and dtype is object: + if groupby_func == "quantile": + msg = "cannot be performed against 'object' dtypes" + else: + msg = "is not supported for object dtype" + with pytest.raises(TypeError, match=msg): + method(*args) + elif dtype is object: + result = method(*args) + expected = expected_method(*args) + if groupby_func in obj_result: + expected = expected.astype(object) + tm.assert_series_equal(result, expected) + + has_numeric_only = ( + "first", + "last", + "max", + "mean", + "median", + "min", + "prod", + "quantile", + "sem", + "skew", + "std", + "sum", + "var", + "cummax", + "cummin", + "cumprod", + "cumsum", + ) + if groupby_func not in has_numeric_only: + msg = "got an unexpected keyword argument 'numeric_only'" + with pytest.raises(TypeError, match=msg): + method(*args, numeric_only=True) + elif dtype is object: + msg = "|".join( + [ + "SeriesGroupBy.sem called with numeric_only=True and dtype object", + "Series.skew does not allow numeric_only=True with non-numeric", + "cum(sum|prod|min|max) is not supported for object dtype", + r"Cannot use numeric_only=True with SeriesGroupBy\..* and non-numeric", + ] + ) + with pytest.raises(TypeError, match=msg): + method(*args, numeric_only=True) + elif dtype == bool and groupby_func == "quantile": + msg = "Allowing bool dtype in SeriesGroupBy.quantile" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#51424 + result = method(*args, numeric_only=True) + expected = method(*args, numeric_only=False) + tm.assert_series_equal(result, expected) + else: + result = method(*args, numeric_only=True) + expected = method(*args, numeric_only=False) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [int, float, object]) +@pytest.mark.parametrize( + "kwargs", + [ + {"percentiles": [0.10, 0.20, 0.30], "include": "all", "exclude": None}, + {"percentiles": [0.10, 0.20, 0.30], "include": None, "exclude": ["int"]}, + {"percentiles": [0.10, 0.20, 0.30], "include": ["int"], "exclude": None}, + ], +) +def test_groupby_empty_dataset(dtype, kwargs): + # GH#41575 + df = DataFrame([[1, 2, 3]], columns=["A", "B", "C"], dtype=dtype) + df["B"] = df["B"].astype(int) + df["C"] = df["C"].astype(float) + + result = df.iloc[:0].groupby("A").describe(**kwargs) + expected = df.groupby("A").describe(**kwargs).reset_index(drop=True).iloc[:0] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:0].groupby("A").B.describe(**kwargs) + expected = df.groupby("A").B.describe(**kwargs).reset_index(drop=True).iloc[:0] + expected.index = Index([]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_min_empty_string_dtype(func): + # GH#55619 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + df = DataFrame({"a": ["a"], "b": "a", "c": "a"}, dtype=dtype).iloc[:0] + result = getattr(df.groupby("a"), func)() + expected = DataFrame( + columns=["b", "c"], dtype=dtype, index=Index([], dtype=dtype, name="a") + ) + tm.assert_frame_equal(result, expected) + + +def test_corrwith_with_1_axis(): + # GH 47723 + df = DataFrame({"a": [1, 1, 2], "b": [3, 7, 4]}) + gb = df.groupby("a") + + msg = "DataFrameGroupBy.corrwith with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.corrwith(df, axis=1) + index = Index( + data=[(1, 0), (1, 1), (1, 2), (2, 2), (2, 0), (2, 1)], + name=("a", None), + ) + expected = Series([np.nan] * 6, index=index) + tm.assert_series_equal(result, expected) + + +def test_multiindex_group_all_columns_when_empty(groupby_func): + # GH 32464 + df = DataFrame({"a": [], "b": [], "c": []}).set_index(["a", "b", "c"]) + gb = df.groupby(["a", "b", "c"], group_keys=False) + method = getattr(gb, groupby_func) + args = get_groupby_method_args(groupby_func, df) + + result = method(*args).index + expected = df.index + tm.assert_index_equal(result, expected) + + +def test_duplicate_columns(request, groupby_func, as_index): + # GH#50806 + if groupby_func == "corrwith": + msg = "GH#50845 - corrwith fails when there are duplicate columns" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + df = DataFrame([[1, 3, 6], [1, 4, 7], [2, 5, 8]], columns=list("abb")) + args = get_groupby_method_args(groupby_func, df) + gb = df.groupby("a", as_index=as_index) + result = getattr(gb, groupby_func)(*args) + + expected_df = df.set_axis(["a", "b", "c"], axis=1) + expected_args = get_groupby_method_args(groupby_func, expected_df) + expected_gb = expected_df.groupby("a", as_index=as_index) + expected = getattr(expected_gb, groupby_func)(*expected_args) + if groupby_func not in ("size", "ngroup", "cumcount"): + expected = expected.rename(columns={"c": "b"}) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "op", + [ + "sum", + "prod", + "min", + "max", + "median", + "mean", + "skew", + "std", + "var", + "sem", + ], +) +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("sort", [True, False]) +def test_regression_allowlist_methods(op, axis, skipna, sort): + # GH6944 + # GH 17537 + # explicitly test the allowlist methods + raw_frame = DataFrame([0]) + if axis == 0: + frame = raw_frame + msg = "The 'axis' keyword in DataFrame.groupby is deprecated and will be" + else: + frame = raw_frame.T + msg = "DataFrame.groupby with axis=1 is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = frame.groupby(level=0, axis=axis, sort=sort) + + if op == "skew": + # skew has skipna + result = getattr(grouped, op)(skipna=skipna) + expected = frame.groupby(level=0).apply( + lambda h: getattr(h, op)(axis=axis, skipna=skipna) + ) + if sort: + expected = expected.sort_index(axis=axis) + tm.assert_frame_equal(result, expected) + else: + result = getattr(grouped, op)() + expected = frame.groupby(level=0).apply(lambda h: getattr(h, op)(axis=axis)) + if sort: + expected = expected.sort_index(axis=axis) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby.py new file mode 100644 index 00000000..49ae2175 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby.py @@ -0,0 +1,3200 @@ +from datetime import datetime +from decimal import Decimal +import re + +import numpy as np +import pytest + +from pandas.errors import ( + PerformanceWarning, + SpecificationError, +) +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Grouper, + Index, + Interval, + MultiIndex, + RangeIndex, + Series, + Timedelta, + Timestamp, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.core.arrays import BooleanArray +import pandas.core.common as com +from pandas.tests.groupby import get_groupby_method_args + +pytestmark = pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") + + +def test_repr(): + # GH18203 + result = repr(Grouper(key="A", level="B")) + expected = "Grouper(key='A', level='B', axis=0, sort=False, dropna=True)" + assert result == expected + + +def test_groupby_std_datetimelike(): + # GH#48481 + tdi = pd.timedelta_range("1 Day", periods=10000) + ser = Series(tdi) + ser[::5] *= 2 # get different std for different groups + + df = ser.to_frame("A") + + df["B"] = ser + Timestamp(0) + df["C"] = ser + Timestamp(0, tz="UTC") + df.iloc[-1] = pd.NaT # last group includes NaTs + + gb = df.groupby(list(range(5)) * 2000) + + result = gb.std() + + # Note: this does not _exactly_ match what we would get if we did + # [gb.get_group(i).std() for i in gb.groups] + # but it _does_ match the floating point error we get doing the + # same operation on int64 data xref GH#51332 + td1 = Timedelta("2887 days 11:21:02.326710176") + td4 = Timedelta("2886 days 00:42:34.664668096") + exp_ser = Series([td1 * 2, td1, td1, td1, td4], index=np.arange(5)) + expected = DataFrame({"A": exp_ser, "B": exp_ser, "C": exp_ser}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"]) +def test_basic_aggregations(dtype): + data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype) + + index = np.arange(9) + np.random.default_rng(2).shuffle(index) + data = data.reindex(index) + + grouped = data.groupby(lambda x: x // 3, group_keys=False) + + for k, v in grouped: + assert len(v) == 3 + + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + agged = grouped.aggregate(np.mean) + assert agged[1] == 1 + + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = grouped.agg(np.mean) + tm.assert_series_equal(agged, expected) # shorthand + tm.assert_series_equal(agged, grouped.mean()) + result = grouped.sum() + msg = "using SeriesGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = grouped.agg(np.sum) + tm.assert_series_equal(result, expected) + + expected = grouped.apply(lambda x: x * x.sum()) + transformed = grouped.transform(lambda x: x * x.sum()) + assert transformed[7] == 12 + tm.assert_series_equal(transformed, expected) + + value_grouped = data.groupby(data) + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = value_grouped.aggregate(np.mean) + tm.assert_series_equal(result, agged, check_index_type=False) + + # complex agg + msg = "using SeriesGroupBy.[mean|std]" + with tm.assert_produces_warning(FutureWarning, match=msg): + agged = grouped.aggregate([np.mean, np.std]) + + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + grouped.aggregate({"one": np.mean, "two": np.std}) + + group_constants = {0: 10, 1: 20, 2: 30} + msg = ( + "Pinning the groupby key to each group in SeriesGroupBy.agg is deprecated, " + "and cases that relied on it will raise in a future version" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#41090 + agged = grouped.agg(lambda x: group_constants[x.name] + x.mean()) + assert agged[1] == 21 + + # corner cases + msg = "Must produce aggregated value" + # exception raised is type Exception + with pytest.raises(Exception, match=msg): + grouped.aggregate(lambda x: x * 2) + + +def test_groupby_nonobject_dtype(mframe, df_mixed_floats): + key = mframe.index.codes[0] + grouped = mframe.groupby(key) + result = grouped.sum() + + expected = mframe.groupby(key.astype("O")).sum() + assert result.index.dtype == np.int8 + assert expected.index.dtype == np.int64 + tm.assert_frame_equal(result, expected, check_index_type=False) + + # GH 3911, mixed frame non-conversion + df = df_mixed_floats.copy() + df["value"] = range(len(df)) + + def max_value(group): + return group.loc[group["value"].idxmax()] + + applied = df.groupby("A").apply(max_value) + result = applied.dtypes + expected = df.dtypes + tm.assert_series_equal(result, expected) + + +def test_inconsistent_return_type(): + # GH5592 + # inconsistent return type + df = DataFrame( + { + "A": ["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"], + "B": Series(np.arange(7), dtype="int64"), + "C": date_range("20130101", periods=7), + } + ) + + def f_0(grp): + return grp.iloc[0] + + expected = df.groupby("A").first()[["B"]] + result = df.groupby("A").apply(f_0)[["B"]] + tm.assert_frame_equal(result, expected) + + def f_1(grp): + if grp.name == "Tiger": + return None + return grp.iloc[0] + + result = df.groupby("A").apply(f_1)[["B"]] + # Cast to avoid upcast when setting nan below + e = expected.copy().astype("float64") + e.loc["Tiger"] = np.nan + tm.assert_frame_equal(result, e) + + def f_2(grp): + if grp.name == "Pony": + return None + return grp.iloc[0] + + result = df.groupby("A").apply(f_2)[["B"]] + # Explicit cast to float to avoid implicit cast when setting nan + e = expected.copy().astype({"B": "float"}) + e.loc["Pony"] = np.nan + tm.assert_frame_equal(result, e) + + # 5592 revisited, with datetimes + def f_3(grp): + if grp.name == "Pony": + return None + return grp.iloc[0] + + result = df.groupby("A").apply(f_3)[["C"]] + e = df.groupby("A").first()[["C"]] + e.loc["Pony"] = pd.NaT + tm.assert_frame_equal(result, e) + + # scalar outputs + def f_4(grp): + if grp.name == "Pony": + return None + return grp.iloc[0].loc["C"] + + result = df.groupby("A").apply(f_4) + e = df.groupby("A").first()["C"].copy() + e.loc["Pony"] = np.nan + e.name = None + tm.assert_series_equal(result, e) + + +def test_pass_args_kwargs(ts, tsframe): + def f(x, q=None, axis=0): + return np.percentile(x, q, axis=axis) + + g = lambda x: np.percentile(x, 80, axis=0) + + # Series + ts_grouped = ts.groupby(lambda x: x.month) + agg_result = ts_grouped.agg(np.percentile, 80, axis=0) + apply_result = ts_grouped.apply(np.percentile, 80, axis=0) + trans_result = ts_grouped.transform(np.percentile, 80, axis=0) + + agg_expected = ts_grouped.quantile(0.8) + trans_expected = ts_grouped.transform(g) + + tm.assert_series_equal(apply_result, agg_expected) + tm.assert_series_equal(agg_result, agg_expected) + tm.assert_series_equal(trans_result, trans_expected) + + agg_result = ts_grouped.agg(f, q=80) + apply_result = ts_grouped.apply(f, q=80) + trans_result = ts_grouped.transform(f, q=80) + tm.assert_series_equal(agg_result, agg_expected) + tm.assert_series_equal(apply_result, agg_expected) + tm.assert_series_equal(trans_result, trans_expected) + + # DataFrame + for as_index in [True, False]: + df_grouped = tsframe.groupby(lambda x: x.month, as_index=as_index) + warn = None if as_index else FutureWarning + msg = "A grouping .* was excluded from the result" + with tm.assert_produces_warning(warn, match=msg): + agg_result = df_grouped.agg(np.percentile, 80, axis=0) + with tm.assert_produces_warning(warn, match=msg): + apply_result = df_grouped.apply(DataFrame.quantile, 0.8) + with tm.assert_produces_warning(warn, match=msg): + expected = df_grouped.quantile(0.8) + tm.assert_frame_equal(apply_result, expected, check_names=False) + tm.assert_frame_equal(agg_result, expected) + + apply_result = df_grouped.apply(DataFrame.quantile, [0.4, 0.8]) + with tm.assert_produces_warning(warn, match=msg): + expected_seq = df_grouped.quantile([0.4, 0.8]) + tm.assert_frame_equal(apply_result, expected_seq, check_names=False) + + with tm.assert_produces_warning(warn, match=msg): + agg_result = df_grouped.agg(f, q=80) + with tm.assert_produces_warning(warn, match=msg): + apply_result = df_grouped.apply(DataFrame.quantile, q=0.8) + tm.assert_frame_equal(agg_result, expected) + tm.assert_frame_equal(apply_result, expected, check_names=False) + + +@pytest.mark.parametrize("as_index", [True, False]) +def test_pass_args_kwargs_duplicate_columns(tsframe, as_index): + # go through _aggregate_frame with self.axis == 0 and duplicate columns + tsframe.columns = ["A", "B", "A", "C"] + gb = tsframe.groupby(lambda x: x.month, as_index=as_index) + + warn = None if as_index else FutureWarning + msg = "A grouping .* was excluded from the result" + with tm.assert_produces_warning(warn, match=msg): + res = gb.agg(np.percentile, 80, axis=0) + + ex_data = { + 1: tsframe[tsframe.index.month == 1].quantile(0.8), + 2: tsframe[tsframe.index.month == 2].quantile(0.8), + } + expected = DataFrame(ex_data).T + if not as_index: + # TODO: try to get this more consistent? + expected.index = Index(range(2)) + + tm.assert_frame_equal(res, expected) + + +def test_len(): + df = tm.makeTimeDataFrame() + grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]) + assert len(grouped) == len(df) + + grouped = df.groupby([lambda x: x.year, lambda x: x.month]) + expected = len({(x.year, x.month) for x in df.index}) + assert len(grouped) == expected + + # issue 11016 + df = DataFrame({"a": [np.nan] * 3, "b": [1, 2, 3]}) + assert len(df.groupby("a")) == 0 + assert len(df.groupby("b")) == 3 + assert len(df.groupby(["a", "b"])) == 3 + + +def test_basic_regression(): + # regression + result = Series([1.0 * x for x in list(range(1, 10)) * 10]) + + data = np.random.default_rng(2).random(1100) * 10.0 + groupings = Series(data) + + grouped = result.groupby(groupings) + grouped.mean() + + +@pytest.mark.parametrize( + "dtype", ["float64", "float32", "int64", "int32", "int16", "int8"] +) +def test_with_na_groups(dtype): + index = Index(np.arange(10)) + values = Series(np.ones(10), index, dtype=dtype) + labels = Series( + [np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"], + index=index, + ) + + # this SHOULD be an int + grouped = values.groupby(labels) + agged = grouped.agg(len) + expected = Series([4, 2], index=["bar", "foo"]) + + tm.assert_series_equal(agged, expected, check_dtype=False) + + # assert issubclass(agged.dtype.type, np.integer) + + # explicitly return a float from my function + def f(x): + return float(len(x)) + + agged = grouped.agg(f) + expected = Series([4.0, 2.0], index=["bar", "foo"]) + + tm.assert_series_equal(agged, expected) + + +def test_indices_concatenation_order(): + # GH 2808 + + def f1(x): + y = x[(x.b % 2) == 1] ** 2 + if y.empty: + multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"]) + res = DataFrame(columns=["a"], index=multiindex) + return res + else: + y = y.set_index(["b", "c"]) + return y + + def f2(x): + y = x[(x.b % 2) == 1] ** 2 + if y.empty: + return DataFrame() + else: + y = y.set_index(["b", "c"]) + return y + + def f3(x): + y = x[(x.b % 2) == 1] ** 2 + if y.empty: + multiindex = MultiIndex( + levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"] + ) + res = DataFrame(columns=["a", "b"], index=multiindex) + return res + else: + return y + + df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)}) + + df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)}) + + depr_msg = "The behavior of array concatenation with empty entries is deprecated" + + # correct result + result1 = df.groupby("a").apply(f1) + result2 = df2.groupby("a").apply(f1) + tm.assert_frame_equal(result1, result2) + + # should fail (not the same number of levels) + msg = "Cannot concat indices that do not have the same number of levels" + with pytest.raises(AssertionError, match=msg): + df.groupby("a").apply(f2) + with pytest.raises(AssertionError, match=msg): + df2.groupby("a").apply(f2) + + # should fail (incorrect shape) + with pytest.raises(AssertionError, match=msg): + df.groupby("a").apply(f3) + with pytest.raises(AssertionError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + df2.groupby("a").apply(f3) + + +def test_attr_wrapper(ts): + grouped = ts.groupby(lambda x: x.weekday()) + + result = grouped.std() + expected = grouped.agg(lambda x: np.std(x, ddof=1)) + tm.assert_series_equal(result, expected) + + # this is pretty cool + result = grouped.describe() + expected = {name: gp.describe() for name, gp in grouped} + expected = DataFrame(expected).T + tm.assert_frame_equal(result, expected) + + # get attribute + result = grouped.dtype + expected = grouped.agg(lambda x: x.dtype) + tm.assert_series_equal(result, expected) + + # make sure raises error + msg = "'SeriesGroupBy' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + getattr(grouped, "foo") + + +def test_frame_groupby(tsframe): + grouped = tsframe.groupby(lambda x: x.weekday()) + + # aggregate + aggregated = grouped.aggregate("mean") + assert len(aggregated) == 5 + assert len(aggregated.columns) == 4 + + # by string + tscopy = tsframe.copy() + tscopy["weekday"] = [x.weekday() for x in tscopy.index] + stragged = tscopy.groupby("weekday").aggregate("mean") + tm.assert_frame_equal(stragged, aggregated, check_names=False) + + # transform + grouped = tsframe.head(30).groupby(lambda x: x.weekday()) + transformed = grouped.transform(lambda x: x - x.mean()) + assert len(transformed) == 30 + assert len(transformed.columns) == 4 + + # transform propagate + transformed = grouped.transform(lambda x: x.mean()) + for name, group in grouped: + mean = group.mean() + for idx in group.index: + tm.assert_series_equal(transformed.xs(idx), mean, check_names=False) + + # iterate + for weekday, group in grouped: + assert group.index[0].weekday() == weekday + + # groups / group_indices + groups = grouped.groups + indices = grouped.indices + + for k, v in groups.items(): + samething = tsframe.index.take(indices[k]) + assert (samething == v).all() + + +def test_frame_groupby_columns(tsframe): + mapping = {"A": 0, "B": 0, "C": 1, "D": 1} + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = tsframe.groupby(mapping, axis=1) + + # aggregate + aggregated = grouped.aggregate("mean") + assert len(aggregated) == len(tsframe) + assert len(aggregated.columns) == 2 + + # transform + tf = lambda x: x - x.mean() + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + groupedT = tsframe.T.groupby(mapping, axis=0) + tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf)) + + # iterate + for k, v in grouped: + assert len(v.columns) == 2 + + +def test_frame_set_name_single(df): + grouped = df.groupby("A") + + result = grouped.mean(numeric_only=True) + assert result.index.name == "A" + + result = df.groupby("A", as_index=False).mean(numeric_only=True) + assert result.index.name != "A" + + result = grouped[["C", "D"]].agg("mean") + assert result.index.name == "A" + + result = grouped.agg({"C": "mean", "D": "std"}) + assert result.index.name == "A" + + result = grouped["C"].mean() + assert result.index.name == "A" + result = grouped["C"].agg("mean") + assert result.index.name == "A" + result = grouped["C"].agg(["mean", "std"]) + assert result.index.name == "A" + + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + grouped["C"].agg({"foo": "mean", "bar": "std"}) + + +def test_multi_func(df): + col1 = df["A"] + col2 = df["B"] + + grouped = df.groupby([col1.get, col2.get]) + agged = grouped.mean(numeric_only=True) + expected = df.groupby(["A", "B"]).mean() + + # TODO groupby get drops names + tm.assert_frame_equal( + agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False + ) + + # some "groups" with no data + df = DataFrame( + { + "v1": np.random.default_rng(2).standard_normal(6), + "v2": np.random.default_rng(2).standard_normal(6), + "k1": np.array(["b", "b", "b", "a", "a", "a"]), + "k2": np.array(["1", "1", "1", "2", "2", "2"]), + }, + index=["one", "two", "three", "four", "five", "six"], + ) + # only verify that it works for now + grouped = df.groupby(["k1", "k2"]) + grouped.agg("sum") + + +def test_multi_key_multiple_functions(df): + grouped = df.groupby(["A", "B"])["C"] + + agged = grouped.agg(["mean", "std"]) + expected = DataFrame({"mean": grouped.agg("mean"), "std": grouped.agg("std")}) + tm.assert_frame_equal(agged, expected) + + +def test_frame_multi_key_function_list(): + data = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + grouped = data.groupby(["A", "B"]) + funcs = ["mean", "std"] + agged = grouped.agg(funcs) + expected = pd.concat( + [grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)], + keys=["D", "E", "F"], + axis=1, + ) + assert isinstance(agged.index, MultiIndex) + assert isinstance(expected.index, MultiIndex) + tm.assert_frame_equal(agged, expected) + + +def test_frame_multi_key_function_list_partial_failure(): + data = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + grouped = data.groupby(["A", "B"]) + funcs = ["mean", "std"] + msg = re.escape("agg function failed [how->mean,dtype->object]") + with pytest.raises(TypeError, match=msg): + grouped.agg(funcs) + + +@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()]) +def test_groupby_multiple_columns(df, op): + data = df + grouped = data.groupby(["A", "B"]) + + result1 = op(grouped) + + keys = [] + values = [] + for n1, gp1 in data.groupby("A"): + for n2, gp2 in gp1.groupby("B"): + keys.append((n1, n2)) + values.append(op(gp2.loc[:, ["C", "D"]])) + + mi = MultiIndex.from_tuples(keys, names=["A", "B"]) + expected = pd.concat(values, axis=1).T + expected.index = mi + + # a little bit crude + for col in ["C", "D"]: + result_col = op(grouped[col]) + pivoted = result1[col] + exp = expected[col] + tm.assert_series_equal(result_col, exp) + tm.assert_series_equal(pivoted, exp) + + # test single series works the same + result = data["C"].groupby([data["A"], data["B"]]).mean() + expected = data.groupby(["A", "B"]).mean()["C"] + + tm.assert_series_equal(result, expected) + + +def test_as_index_select_column(): + # GH 5764 + df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) + result = df.groupby("A", as_index=False)["B"].get_group(1) + expected = Series([2, 4], name="B") + tm.assert_series_equal(result, expected) + + result = df.groupby("A", as_index=False, group_keys=True)["B"].apply( + lambda x: x.cumsum() + ) + expected = Series( + [2, 6, 6], name="B", index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)]) + ) + tm.assert_series_equal(result, expected) + + +def test_obj_arg_get_group_deprecated(): + depr_msg = "obj is deprecated" + + df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5]}) + expected = df.iloc[df.groupby("b").indices.get(4)] + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = df.groupby("b").get_group(4, obj=df) + tm.assert_frame_equal(result, expected) + + +def test_groupby_as_index_select_column_sum_empty_df(): + # GH 35246 + df = DataFrame(columns=Index(["A", "B", "C"], name="alpha")) + left = df.groupby(by="A", as_index=False)["B"].sum(numeric_only=False) + + expected = DataFrame(columns=df.columns[:2], index=range(0)) + # GH#50744 - Columns after selection shouldn't retain names + expected.columns.names = [None] + tm.assert_frame_equal(left, expected) + + +def test_groupby_as_index_agg(df): + grouped = df.groupby("A", as_index=False) + + # single-key + + result = grouped[["C", "D"]].agg("mean") + expected = grouped.mean(numeric_only=True) + tm.assert_frame_equal(result, expected) + + result2 = grouped.agg({"C": "mean", "D": "sum"}) + expected2 = grouped.mean(numeric_only=True) + expected2["D"] = grouped.sum()["D"] + tm.assert_frame_equal(result2, expected2) + + grouped = df.groupby("A", as_index=True) + + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + grouped["C"].agg({"Q": "sum"}) + + # multi-key + + grouped = df.groupby(["A", "B"], as_index=False) + + result = grouped.agg("mean") + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + result2 = grouped.agg({"C": "mean", "D": "sum"}) + expected2 = grouped.mean() + expected2["D"] = grouped.sum()["D"] + tm.assert_frame_equal(result2, expected2) + + expected3 = grouped["C"].sum() + expected3 = DataFrame(expected3).rename(columns={"C": "Q"}) + msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result3 = grouped["C"].agg({"Q": "sum"}) + tm.assert_frame_equal(result3, expected3) + + # GH7115 & GH8112 & GH8582 + df = DataFrame( + np.random.default_rng(2).integers(0, 100, (50, 3)), + columns=["jim", "joe", "jolie"], + ) + ts = Series(np.random.default_rng(2).integers(5, 10, 50), name="jim") + + gr = df.groupby(ts) + gr.nth(0) # invokes set_selection_from_grouper internally + + msg = "The behavior of DataFrame.sum with axis=None is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + res = gr.apply(sum) + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + alt = df.groupby(ts).apply(sum) + tm.assert_frame_equal(res, alt) + + for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]: + gr = df.groupby(ts, as_index=False) + left = getattr(gr, attr)() + + gr = df.groupby(ts.values, as_index=True) + right = getattr(gr, attr)().reset_index(drop=True) + + tm.assert_frame_equal(left, right) + + +def test_ops_not_as_index(reduction_func): + # GH 10355, 21090 + # Using as_index=False should not modify grouped column + + if reduction_func in ("corrwith", "nth", "ngroup"): + pytest.skip(f"GH 5755: Test not applicable for {reduction_func}") + + df = DataFrame( + np.random.default_rng(2).integers(0, 5, size=(100, 2)), columns=["a", "b"] + ) + expected = getattr(df.groupby("a"), reduction_func)() + if reduction_func == "size": + expected = expected.rename("size") + expected = expected.reset_index() + + if reduction_func != "size": + # 32 bit compat -> groupby preserves dtype whereas reset_index casts to int64 + expected["a"] = expected["a"].astype(df["a"].dtype) + + g = df.groupby("a", as_index=False) + + result = getattr(g, reduction_func)() + tm.assert_frame_equal(result, expected) + + result = g.agg(reduction_func) + tm.assert_frame_equal(result, expected) + + result = getattr(g["b"], reduction_func)() + tm.assert_frame_equal(result, expected) + + result = g["b"].agg(reduction_func) + tm.assert_frame_equal(result, expected) + + +def test_as_index_series_return_frame(df): + grouped = df.groupby("A", as_index=False) + grouped2 = df.groupby(["A", "B"], as_index=False) + + result = grouped["C"].agg("sum") + expected = grouped.agg("sum").loc[:, ["A", "C"]] + assert isinstance(result, DataFrame) + tm.assert_frame_equal(result, expected) + + result2 = grouped2["C"].agg("sum") + expected2 = grouped2.agg("sum").loc[:, ["A", "B", "C"]] + assert isinstance(result2, DataFrame) + tm.assert_frame_equal(result2, expected2) + + result = grouped["C"].sum() + expected = grouped.sum().loc[:, ["A", "C"]] + assert isinstance(result, DataFrame) + tm.assert_frame_equal(result, expected) + + result2 = grouped2["C"].sum() + expected2 = grouped2.sum().loc[:, ["A", "B", "C"]] + assert isinstance(result2, DataFrame) + tm.assert_frame_equal(result2, expected2) + + +def test_as_index_series_column_slice_raises(df): + # GH15072 + grouped = df.groupby("A", as_index=False) + msg = r"Column\(s\) C already selected" + + with pytest.raises(IndexError, match=msg): + grouped["C"].__getitem__("D") + + +def test_groupby_as_index_cython(df): + data = df + + # single-key + grouped = data.groupby("A", as_index=False) + result = grouped.mean(numeric_only=True) + expected = data.groupby(["A"]).mean(numeric_only=True) + expected.insert(0, "A", expected.index) + expected.index = RangeIndex(len(expected)) + tm.assert_frame_equal(result, expected) + + # multi-key + grouped = data.groupby(["A", "B"], as_index=False) + result = grouped.mean() + expected = data.groupby(["A", "B"]).mean() + + arrays = list(zip(*expected.index.values)) + expected.insert(0, "A", arrays[0]) + expected.insert(1, "B", arrays[1]) + expected.index = RangeIndex(len(expected)) + tm.assert_frame_equal(result, expected) + + +def test_groupby_as_index_series_scalar(df): + grouped = df.groupby(["A", "B"], as_index=False) + + # GH #421 + + result = grouped["C"].agg(len) + expected = grouped.agg(len).loc[:, ["A", "B", "C"]] + tm.assert_frame_equal(result, expected) + + +def test_groupby_as_index_corner(df, ts): + msg = "as_index=False only valid with DataFrame" + with pytest.raises(TypeError, match=msg): + ts.groupby(lambda x: x.weekday(), as_index=False) + + msg = "as_index=False only valid for axis=0" + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + df.groupby(lambda x: x.lower(), as_index=False, axis=1) + + +def test_groupby_multiple_key(): + df = tm.makeTimeDataFrame() + grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]) + agged = grouped.sum() + tm.assert_almost_equal(df.values, agged.values) + + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + grouped = df.T.groupby( + [lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1 + ) + + agged = grouped.agg(lambda x: x.sum()) + tm.assert_index_equal(agged.index, df.columns) + tm.assert_almost_equal(df.T.values, agged.values) + + agged = grouped.agg(lambda x: x.sum()) + tm.assert_almost_equal(df.T.values, agged.values) + + +def test_groupby_multi_corner(df): + # test that having an all-NA column doesn't mess you up + df = df.copy() + df["bad"] = np.nan + agged = df.groupby(["A", "B"]).mean() + + expected = df.groupby(["A", "B"]).mean() + expected["bad"] = np.nan + + tm.assert_frame_equal(agged, expected) + + +def test_raises_on_nuisance(df): + grouped = df.groupby("A") + msg = re.escape("agg function failed [how->mean,dtype->object]") + with pytest.raises(TypeError, match=msg): + grouped.agg("mean") + with pytest.raises(TypeError, match=msg): + grouped.mean() + + df = df.loc[:, ["A", "C", "D"]] + df["E"] = datetime.now() + grouped = df.groupby("A") + msg = "datetime64 type does not support sum operations" + with pytest.raises(TypeError, match=msg): + grouped.agg("sum") + with pytest.raises(TypeError, match=msg): + grouped.sum() + + # won't work with axis = 1 + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1) + msg = "does not support reduction 'sum'" + with pytest.raises(TypeError, match=msg): + grouped.agg(lambda x: x.sum(0, numeric_only=False)) + + +@pytest.mark.parametrize( + "agg_function", + ["max", "min"], +) +def test_keep_nuisance_agg(df, agg_function): + # GH 38815 + grouped = df.groupby("A") + result = getattr(grouped, agg_function)() + expected = result.copy() + expected.loc["bar", "B"] = getattr(df.loc[df["A"] == "bar", "B"], agg_function)() + expected.loc["foo", "B"] = getattr(df.loc[df["A"] == "foo", "B"], agg_function)() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "agg_function", + ["sum", "mean", "prod", "std", "var", "sem", "median"], +) +@pytest.mark.parametrize("numeric_only", [True, False]) +def test_omit_nuisance_agg(df, agg_function, numeric_only): + # GH 38774, GH 38815 + grouped = df.groupby("A") + + no_drop_nuisance = ("var", "std", "sem", "mean", "prod", "median") + if agg_function in no_drop_nuisance and not numeric_only: + # Added numeric_only as part of GH#46560; these do not drop nuisance + # columns when numeric_only is False + if agg_function in ("std", "sem"): + klass = ValueError + msg = "could not convert string to float: 'one'" + else: + klass = TypeError + msg = re.escape(f"agg function failed [how->{agg_function},dtype->object]") + with pytest.raises(klass, match=msg): + getattr(grouped, agg_function)(numeric_only=numeric_only) + else: + result = getattr(grouped, agg_function)(numeric_only=numeric_only) + if not numeric_only and agg_function == "sum": + # sum is successful on column B + columns = ["A", "B", "C", "D"] + else: + columns = ["A", "C", "D"] + expected = getattr(df.loc[:, columns].groupby("A"), agg_function)( + numeric_only=numeric_only + ) + tm.assert_frame_equal(result, expected) + + +def test_raise_on_nuisance_python_single(df): + # GH 38815 + grouped = df.groupby("A") + with pytest.raises(ValueError, match="could not convert"): + grouped.skew() + + +def test_raise_on_nuisance_python_multiple(three_group): + grouped = three_group.groupby(["A", "B"]) + msg = re.escape("agg function failed [how->mean,dtype->object]") + with pytest.raises(TypeError, match=msg): + grouped.agg("mean") + with pytest.raises(TypeError, match=msg): + grouped.mean() + + +def test_empty_groups_corner(mframe): + # handle empty groups + df = DataFrame( + { + "k1": np.array(["b", "b", "b", "a", "a", "a"]), + "k2": np.array(["1", "1", "1", "2", "2", "2"]), + "k3": ["foo", "bar"] * 3, + "v1": np.random.default_rng(2).standard_normal(6), + "v2": np.random.default_rng(2).standard_normal(6), + } + ) + + grouped = df.groupby(["k1", "k2"]) + result = grouped[["v1", "v2"]].agg("mean") + expected = grouped.mean(numeric_only=True) + tm.assert_frame_equal(result, expected) + + grouped = mframe[3:5].groupby(level=0) + agged = grouped.apply(lambda x: x.mean()) + agged_A = grouped["A"].apply("mean") + tm.assert_series_equal(agged["A"], agged_A) + assert agged.index.name == "first" + + +def test_nonsense_func(): + df = DataFrame([0]) + msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'" + with pytest.raises(TypeError, match=msg): + df.groupby(lambda x: x + "foo") + + +def test_wrap_aggregated_output_multindex(mframe): + df = mframe.T + df["baz", "two"] = "peekaboo" + + keys = [np.array([0, 0, 1]), np.array([0, 0, 1])] + msg = re.escape("agg function failed [how->mean,dtype->object]") + with pytest.raises(TypeError, match=msg): + df.groupby(keys).agg("mean") + agged = df.drop(columns=("baz", "two")).groupby(keys).agg("mean") + assert isinstance(agged.columns, MultiIndex) + + def aggfun(ser): + if ser.name == ("foo", "one"): + raise TypeError("Test error message") + return ser.sum() + + with pytest.raises(TypeError, match="Test error message"): + df.groupby(keys).aggregate(aggfun) + + +def test_groupby_level_apply(mframe): + result = mframe.groupby(level=0).count() + assert result.index.name == "first" + result = mframe.groupby(level=1).count() + assert result.index.name == "second" + + result = mframe["A"].groupby(level=0).count() + assert result.index.name == "first" + + +def test_groupby_level_mapper(mframe): + deleveled = mframe.reset_index() + + mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1} + mapper1 = {"one": 0, "two": 0, "three": 1} + + result0 = mframe.groupby(mapper0, level=0).sum() + result1 = mframe.groupby(mapper1, level=1).sum() + + mapped_level0 = np.array( + [mapper0.get(x) for x in deleveled["first"]], dtype=np.int64 + ) + mapped_level1 = np.array( + [mapper1.get(x) for x in deleveled["second"]], dtype=np.int64 + ) + expected0 = mframe.groupby(mapped_level0).sum() + expected1 = mframe.groupby(mapped_level1).sum() + expected0.index.name, expected1.index.name = "first", "second" + + tm.assert_frame_equal(result0, expected0) + tm.assert_frame_equal(result1, expected1) + + +def test_groupby_level_nonmulti(): + # GH 1313, GH 13901 + s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo")) + expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo")) + + result = s.groupby(level=0).sum() + tm.assert_series_equal(result, expected) + result = s.groupby(level=[0]).sum() + tm.assert_series_equal(result, expected) + result = s.groupby(level=-1).sum() + tm.assert_series_equal(result, expected) + result = s.groupby(level=[-1]).sum() + tm.assert_series_equal(result, expected) + + msg = "level > 0 or level < -1 only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + s.groupby(level=1) + with pytest.raises(ValueError, match=msg): + s.groupby(level=-2) + msg = "No group keys passed!" + with pytest.raises(ValueError, match=msg): + s.groupby(level=[]) + msg = "multiple levels only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + s.groupby(level=[0, 0]) + with pytest.raises(ValueError, match=msg): + s.groupby(level=[0, 1]) + msg = "level > 0 or level < -1 only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + s.groupby(level=[1]) + + +def test_groupby_complex(): + # GH 12902 + a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1]) + expected = Series((1 + 2j, 5 + 10j)) + + result = a.groupby(level=0).sum() + tm.assert_series_equal(result, expected) + + +def test_groupby_complex_numbers(): + # GH 17927 + df = DataFrame( + [ + {"a": 1, "b": 1 + 1j}, + {"a": 1, "b": 1 + 2j}, + {"a": 4, "b": 1}, + ] + ) + expected = DataFrame( + np.array([1, 1, 1], dtype=np.int64), + index=Index([(1 + 1j), (1 + 2j), (1 + 0j)], name="b"), + columns=Index(["a"], dtype="object"), + ) + result = df.groupby("b", sort=False).count() + tm.assert_frame_equal(result, expected) + + # Sorted by the magnitude of the complex numbers + expected.index = Index([(1 + 0j), (1 + 1j), (1 + 2j)], name="b") + result = df.groupby("b", sort=True).count() + tm.assert_frame_equal(result, expected) + + +def test_groupby_series_indexed_differently(): + s1 = Series( + [5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7], + index=Index(["a", "b", "c", "d", "e", "f", "g"]), + ) + s2 = Series( + [1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"]) + ) + + grouped = s1.groupby(s2) + agged = grouped.mean() + exp = s1.groupby(s2.reindex(s1.index).get).mean() + tm.assert_series_equal(agged, exp) + + +def test_groupby_with_hier_columns(): + tuples = list( + zip( + *[ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + ) + ) + index = MultiIndex.from_tuples(tuples) + columns = MultiIndex.from_tuples( + [("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")] + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), index=index, columns=columns + ) + + result = df.groupby(level=0).mean() + tm.assert_index_equal(result.columns, columns) + + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + gb = df.groupby(level=0, axis=1) + result = gb.mean() + tm.assert_index_equal(result.index, df.index) + + result = df.groupby(level=0).agg("mean") + tm.assert_index_equal(result.columns, columns) + + result = df.groupby(level=0).apply(lambda x: x.mean()) + tm.assert_index_equal(result.columns, columns) + + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + gb = df.groupby(level=0, axis=1) + result = gb.agg(lambda x: x.mean(1)) + tm.assert_index_equal(result.columns, Index(["A", "B"])) + tm.assert_index_equal(result.index, df.index) + + # add a nuisance column + sorted_columns, _ = columns.sortlevel(0) + df["A", "foo"] = "bar" + result = df.groupby(level=0).mean(numeric_only=True) + tm.assert_index_equal(result.columns, df.columns[:-1]) + + +def test_grouping_ndarray(df): + grouped = df.groupby(df["A"].values) + result = grouped.sum() + expected = df.groupby(df["A"].rename(None)).sum() + tm.assert_frame_equal(result, expected) + + +def test_groupby_wrong_multi_labels(): + index = Index([0, 1, 2, 3, 4], name="index") + data = DataFrame( + { + "foo": ["foo1", "foo1", "foo2", "foo1", "foo3"], + "bar": ["bar1", "bar2", "bar2", "bar1", "bar1"], + "baz": ["baz1", "baz1", "baz1", "baz2", "baz2"], + "spam": ["spam2", "spam3", "spam2", "spam1", "spam1"], + "data": [20, 30, 40, 50, 60], + }, + index=index, + ) + + grouped = data.groupby(["foo", "bar", "baz", "spam"]) + + result = grouped.agg("mean") + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + +def test_groupby_series_with_name(df): + result = df.groupby(df["A"]).mean(numeric_only=True) + result2 = df.groupby(df["A"], as_index=False).mean(numeric_only=True) + assert result.index.name == "A" + assert "A" in result2 + + result = df.groupby([df["A"], df["B"]]).mean() + result2 = df.groupby([df["A"], df["B"]], as_index=False).mean() + assert result.index.names == ("A", "B") + assert "A" in result2 + assert "B" in result2 + + +def test_seriesgroupby_name_attr(df): + # GH 6265 + result = df.groupby("A")["C"] + assert result.count().name == "C" + assert result.mean().name == "C" + + testFunc = lambda x: np.sum(x) * 2 + assert result.agg(testFunc).name == "C" + + +def test_consistency_name(): + # GH 12363 + + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "two", "two", "two", "one", "two"], + "C": np.random.default_rng(2).standard_normal(8) + 1.0, + "D": np.arange(8), + } + ) + + expected = df.groupby(["A"]).B.count() + result = df.B.groupby(df.A).count() + tm.assert_series_equal(result, expected) + + +def test_groupby_name_propagation(df): + # GH 6124 + def summarize(df, name=None): + return Series({"count": 1, "mean": 2, "omissions": 3}, name=name) + + def summarize_random_name(df): + # Provide a different name for each Series. In this case, groupby + # should not attempt to propagate the Series name since they are + # inconsistent. + return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"]) + + metrics = df.groupby("A").apply(summarize) + assert metrics.columns.name is None + metrics = df.groupby("A").apply(summarize, "metrics") + assert metrics.columns.name == "metrics" + metrics = df.groupby("A").apply(summarize_random_name) + assert metrics.columns.name is None + + +def test_groupby_nonstring_columns(): + df = DataFrame([np.arange(10) for x in range(10)]) + grouped = df.groupby(0) + result = grouped.mean() + expected = df.groupby(df[0]).mean() + tm.assert_frame_equal(result, expected) + + +def test_groupby_mixed_type_columns(): + # GH 13432, unorderable types in py3 + df = DataFrame([[0, 1, 2]], columns=["A", "B", 0]) + expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A")) + + result = df.groupby("A").first() + tm.assert_frame_equal(result, expected) + + result = df.groupby("A").sum() + tm.assert_frame_equal(result, expected) + + +def test_cython_grouper_series_bug_noncontig(): + arr = np.empty((100, 100)) + arr.fill(np.nan) + obj = Series(arr[:, 0]) + inds = np.tile(range(10), 10) + + result = obj.groupby(inds).agg(Series.median) + assert result.isna().all() + + +def test_series_grouper_noncontig_index(): + index = Index(["a" * 10] * 100) + + values = Series(np.random.default_rng(2).standard_normal(50), index=index[::2]) + labels = np.random.default_rng(2).integers(0, 5, 50) + + # it works! + grouped = values.groupby(labels) + + # accessing the index elements causes segfault + f = lambda x: len(set(map(id, x.index))) + grouped.agg(f) + + +def test_convert_objects_leave_decimal_alone(): + s = Series(range(5)) + labels = np.array(["a", "b", "c", "d", "e"], dtype="O") + + def convert_fast(x): + return Decimal(str(x.mean())) + + def convert_force_pure(x): + # base will be length 0 + assert len(x.values.base) > 0 + return Decimal(str(x.mean())) + + grouped = s.groupby(labels) + + result = grouped.agg(convert_fast) + assert result.dtype == np.object_ + assert isinstance(result.iloc[0], Decimal) + + result = grouped.agg(convert_force_pure) + assert result.dtype == np.object_ + assert isinstance(result.iloc[0], Decimal) + + +def test_groupby_dtype_inference_empty(): + # GH 6733 + df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")}) + assert df["x"].dtype == np.float64 + + result = df.groupby("x").first() + exp_index = Index([], name="x", dtype=np.float64) + expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")}) + tm.assert_frame_equal(result, expected, by_blocks=True) + + +def test_groupby_unit64_float_conversion(): + # GH: 30859 groupby converts unit64 to floats sometimes + df = DataFrame({"first": [1], "second": [1], "value": [16148277970000000000]}) + result = df.groupby(["first", "second"])["value"].max() + expected = Series( + [16148277970000000000], + MultiIndex.from_product([[1], [1]], names=["first", "second"]), + name="value", + ) + tm.assert_series_equal(result, expected) + + +def test_groupby_list_infer_array_like(df): + result = df.groupby(list(df["A"])).mean(numeric_only=True) + expected = df.groupby(df["A"]).mean(numeric_only=True) + tm.assert_frame_equal(result, expected, check_names=False) + + with pytest.raises(KeyError, match=r"^'foo'$"): + df.groupby(list(df["A"][:-1])) + + # pathological case of ambiguity + df = DataFrame( + { + "foo": [0, 1], + "bar": [3, 4], + "val": np.random.default_rng(2).standard_normal(2), + } + ) + + result = df.groupby(["foo", "bar"]).mean() + expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]] + + +def test_groupby_keys_same_size_as_index(): + # GH 11185 + freq = "s" + index = date_range( + start=Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq + ) + df = DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index) + result = df.groupby([Grouper(level=0, freq=freq), "metric"]).mean() + expected = df.set_index([df.index, "metric"]).astype(float) + + tm.assert_frame_equal(result, expected) + + +def test_groupby_one_row(): + # GH 11741 + msg = r"^'Z'$" + df1 = DataFrame( + np.random.default_rng(2).standard_normal((1, 4)), columns=list("ABCD") + ) + with pytest.raises(KeyError, match=msg): + df1.groupby("Z") + df2 = DataFrame( + np.random.default_rng(2).standard_normal((2, 4)), columns=list("ABCD") + ) + with pytest.raises(KeyError, match=msg): + df2.groupby("Z") + + +def test_groupby_nat_exclude(): + # GH 6992 + df = DataFrame( + { + "values": np.random.default_rng(2).standard_normal(8), + "dt": [ + np.nan, + Timestamp("2013-01-01"), + np.nan, + Timestamp("2013-02-01"), + np.nan, + Timestamp("2013-02-01"), + np.nan, + Timestamp("2013-01-01"), + ], + "str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"], + } + ) + grouped = df.groupby("dt") + + expected = [Index([1, 7]), Index([3, 5])] + keys = sorted(grouped.groups.keys()) + assert len(keys) == 2 + for k, e in zip(keys, expected): + # grouped.groups keys are np.datetime64 with system tz + # not to be affected by tz, only compare values + tm.assert_index_equal(grouped.groups[k], e) + + # confirm obj is not filtered + tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df) + assert grouped.ngroups == 2 + + expected = { + Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp), + Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp), + } + + for k in grouped.indices: + tm.assert_numpy_array_equal(grouped.indices[k], expected[k]) + + tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]]) + tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]]) + + with pytest.raises(KeyError, match=r"^NaT$"): + grouped.get_group(pd.NaT) + + nan_df = DataFrame( + {"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]} + ) + assert nan_df["nan"].dtype == "float64" + assert nan_df["nat"].dtype == "datetime64[ns]" + + for key in ["nan", "nat"]: + grouped = nan_df.groupby(key) + assert grouped.groups == {} + assert grouped.ngroups == 0 + assert grouped.indices == {} + with pytest.raises(KeyError, match=r"^nan$"): + grouped.get_group(np.nan) + with pytest.raises(KeyError, match=r"^NaT$"): + grouped.get_group(pd.NaT) + + +def test_groupby_two_group_keys_all_nan(): + # GH #36842: Grouping over two group keys shouldn't raise an error + df = DataFrame({"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 2]}) + result = df.groupby(["a", "b"]).indices + assert result == {} + + +def test_groupby_2d_malformed(): + d = DataFrame(index=range(2)) + d["group"] = ["g1", "g2"] + d["zeros"] = [0, 0] + d["ones"] = [1, 1] + d["label"] = ["l1", "l2"] + tmp = d.groupby(["group"]).mean(numeric_only=True) + res_values = np.array([[0.0, 1.0], [0.0, 1.0]]) + tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"])) + tm.assert_numpy_array_equal(tmp.values, res_values) + + +def test_int32_overflow(): + B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000))) + A = np.arange(25000) + df = DataFrame( + { + "A": A, + "B": B, + "C": A, + "D": B, + "E": np.random.default_rng(2).standard_normal(25000), + } + ) + + left = df.groupby(["A", "B", "C", "D"]).sum() + right = df.groupby(["D", "C", "B", "A"]).sum() + assert len(left) == len(right) + + +def test_groupby_sort_multi(): + df = DataFrame( + { + "a": ["foo", "bar", "baz"], + "b": [3, 2, 1], + "c": [0, 1, 2], + "d": np.random.default_rng(2).standard_normal(3), + } + ) + + tups = [tuple(row) for row in df[["a", "b", "c"]].values] + tups = com.asarray_tuplesafe(tups) + result = df.groupby(["a", "b", "c"], sort=True).sum() + tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]]) + + tups = [tuple(row) for row in df[["c", "a", "b"]].values] + tups = com.asarray_tuplesafe(tups) + result = df.groupby(["c", "a", "b"], sort=True).sum() + tm.assert_numpy_array_equal(result.index.values, tups) + + tups = [tuple(x) for x in df[["b", "c", "a"]].values] + tups = com.asarray_tuplesafe(tups) + result = df.groupby(["b", "c", "a"], sort=True).sum() + tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]]) + + df = DataFrame( + { + "a": [0, 1, 2, 0, 1, 2], + "b": [0, 0, 0, 1, 1, 1], + "d": np.random.default_rng(2).standard_normal(6), + } + ) + grouped = df.groupby(["a", "b"])["d"] + result = grouped.sum() + + def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): + tups = [tuple(row) for row in df[keys].values] + tups = com.asarray_tuplesafe(tups) + expected = f(df.groupby(tups)[field]) + for k, v in expected.items(): + assert result[k] == v + + _check_groupby(df, result, ["a", "b"], "d") + + +def test_dont_clobber_name_column(): + df = DataFrame( + {"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2} + ) + + result = df.groupby("key", group_keys=False).apply(lambda x: x) + tm.assert_frame_equal(result, df) + + +def test_skip_group_keys(): + tsf = tm.makeTimeDataFrame() + + grouped = tsf.groupby(lambda x: x.month, group_keys=False) + result = grouped.apply(lambda x: x.sort_values(by="A")[:3]) + + pieces = [group.sort_values(by="A")[:3] for key, group in grouped] + + expected = pd.concat(pieces) + tm.assert_frame_equal(result, expected) + + grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False) + result = grouped.apply(lambda x: x.sort_values()[:3]) + + pieces = [group.sort_values()[:3] for key, group in grouped] + + expected = pd.concat(pieces) + tm.assert_series_equal(result, expected) + + +def test_no_nonsense_name(float_frame): + # GH #995 + s = float_frame["C"].copy() + s.name = None + + result = s.groupby(float_frame["A"]).agg("sum") + assert result.name is None + + +def test_multifunc_sum_bug(): + # GH #1065 + x = DataFrame(np.arange(9).reshape(3, 3)) + x["test"] = 0 + x["fl"] = [1.3, 1.5, 1.6] + + grouped = x.groupby("test") + result = grouped.agg({"fl": "sum", 2: "size"}) + assert result["fl"].dtype == np.float64 + + +def test_handle_dict_return_value(df): + def f(group): + return {"max": group.max(), "min": group.min()} + + def g(group): + return Series({"max": group.max(), "min": group.min()}) + + result = df.groupby("A")["C"].apply(f) + expected = df.groupby("A")["C"].apply(g) + + assert isinstance(result, Series) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("grouper", ["A", ["A", "B"]]) +def test_set_group_name(df, grouper): + def f(group): + assert group.name is not None + return group + + def freduce(group): + assert group.name is not None + return group.sum() + + def freducex(x): + return freduce(x) + + grouped = df.groupby(grouper, group_keys=False) + + # make sure all these work + grouped.apply(f) + grouped.aggregate(freduce) + grouped.aggregate({"C": freduce, "D": freduce}) + grouped.transform(f) + + grouped["C"].apply(f) + grouped["C"].aggregate(freduce) + grouped["C"].aggregate([freduce, freducex]) + grouped["C"].transform(f) + + +def test_group_name_available_in_inference_pass(): + # gh-15062 + df = DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)}) + + names = [] + + def f(group): + names.append(group.name) + return group.copy() + + df.groupby("a", sort=False, group_keys=False).apply(f) + + expected_names = [0, 1, 2] + assert names == expected_names + + +def test_no_dummy_key_names(df): + # see gh-1291 + result = df.groupby(df["A"].values).sum() + assert result.index.name is None + + result = df.groupby([df["A"].values, df["B"].values]).sum() + assert result.index.names == (None, None) + + +def test_groupby_sort_multiindex_series(): + # series multiindex groupby sort argument was not being passed through + # _compress_group_index + # GH 9444 + index = MultiIndex( + levels=[[1, 2], [1, 2]], + codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]], + names=["a", "b"], + ) + mseries = Series([0, 1, 2, 3, 4, 5], index=index) + index = MultiIndex( + levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"] + ) + mseries_result = Series([0, 2, 4], index=index) + + result = mseries.groupby(level=["a", "b"], sort=False).first() + tm.assert_series_equal(result, mseries_result) + result = mseries.groupby(level=["a", "b"], sort=True).first() + tm.assert_series_equal(result, mseries_result.sort_index()) + + +def test_groupby_reindex_inside_function(): + periods = 1000 + ind = date_range(start="2012/1/1", freq="5min", periods=periods) + df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind) + + def agg_before(func, fix=False): + """ + Run an aggregate func on the subset of data. + """ + + def _func(data): + d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna() + if fix: + data[data.index[0]] + if len(d) == 0: + return None + return func(d) + + return _func + + grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day)) + closure_bad = grouped.agg({"high": agg_before(np.max)}) + closure_good = grouped.agg({"high": agg_before(np.max, True)}) + + tm.assert_frame_equal(closure_bad, closure_good) + + +def test_groupby_multiindex_missing_pair(): + # GH9049 + df = DataFrame( + { + "group1": ["a", "a", "a", "b"], + "group2": ["c", "c", "d", "c"], + "value": [1, 1, 1, 5], + } + ) + df = df.set_index(["group1", "group2"]) + df_grouped = df.groupby(level=["group1", "group2"], sort=True) + + res = df_grouped.agg("sum") + idx = MultiIndex.from_tuples( + [("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"] + ) + exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"]) + + tm.assert_frame_equal(res, exp) + + +def test_groupby_multiindex_not_lexsorted(): + # GH 11640 + + # define the lexsorted version + lexsorted_mi = MultiIndex.from_tuples( + [("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"] + ) + lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi) + assert lexsorted_df.columns._is_lexsorted() + + # define the non-lexsorted version + not_lexsorted_df = DataFrame( + columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]] + ) + not_lexsorted_df = not_lexsorted_df.pivot_table( + index="a", columns=["b", "c"], values="d" + ) + not_lexsorted_df = not_lexsorted_df.reset_index() + assert not not_lexsorted_df.columns._is_lexsorted() + + expected = lexsorted_df.groupby("a").mean() + with tm.assert_produces_warning(PerformanceWarning): + result = not_lexsorted_df.groupby("a").mean() + tm.assert_frame_equal(expected, result) + + # a transforming function should work regardless of sort + # GH 14776 + df = DataFrame( + {"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]} + ).set_index(["x", "y"]) + assert not df.index._is_lexsorted() + + for level in [0, 1, [0, 1]]: + for sort in [False, True]: + result = df.groupby(level=level, sort=sort, group_keys=False).apply( + DataFrame.drop_duplicates + ) + expected = df + tm.assert_frame_equal(expected, result) + + result = ( + df.sort_index() + .groupby(level=level, sort=sort, group_keys=False) + .apply(DataFrame.drop_duplicates) + ) + expected = df.sort_index() + tm.assert_frame_equal(expected, result) + + +def test_index_label_overlaps_location(): + # checking we don't have any label/location confusion in the + # wake of GH5375 + df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1]) + g = df.groupby(list("ababb")) + actual = g.filter(lambda x: len(x) > 2) + expected = df.iloc[[1, 3, 4]] + tm.assert_frame_equal(actual, expected) + + ser = df[0] + g = ser.groupby(list("ababb")) + actual = g.filter(lambda x: len(x) > 2) + expected = ser.take([1, 3, 4]) + tm.assert_series_equal(actual, expected) + + # and again, with a generic Index of floats + df.index = df.index.astype(float) + g = df.groupby(list("ababb")) + actual = g.filter(lambda x: len(x) > 2) + expected = df.iloc[[1, 3, 4]] + tm.assert_frame_equal(actual, expected) + + ser = df[0] + g = ser.groupby(list("ababb")) + actual = g.filter(lambda x: len(x) > 2) + expected = ser.take([1, 3, 4]) + tm.assert_series_equal(actual, expected) + + +def test_transform_doesnt_clobber_ints(): + # GH 7972 + n = 6 + x = np.arange(n) + df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x}) + df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x}) + + gb = df.groupby("a") + result = gb.transform("mean") + + gb2 = df2.groupby("a") + expected = gb2.transform("mean") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "sort_column", + ["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]], +) +@pytest.mark.parametrize( + "group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]] +) +def test_groupby_preserves_sort(sort_column, group_column): + # Test to ensure that groupby always preserves sort order of original + # object. Issue #8588 and #9651 + + df = DataFrame( + { + "int_groups": [3, 1, 0, 1, 0, 3, 3, 3], + "string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"], + "ints": [8, 7, 4, 5, 2, 9, 1, 1], + "floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5], + "strings": ["z", "d", "a", "e", "word", "word2", "42", "47"], + } + ) + + # Try sorting on different types and with different group types + + df = df.sort_values(by=sort_column) + g = df.groupby(group_column) + + def test_sort(x): + tm.assert_frame_equal(x, x.sort_values(by=sort_column)) + + g.apply(test_sort) + + +def test_pivot_table_values_key_error(): + # This test is designed to replicate the error in issue #14938 + df = DataFrame( + { + "eventDate": date_range(datetime.today(), periods=20, freq="M").tolist(), + "thename": range(0, 20), + } + ) + + df["year"] = df.set_index("eventDate").index.year + df["month"] = df.set_index("eventDate").index.month + + with pytest.raises(KeyError, match="'badname'"): + df.reset_index().pivot_table( + index="year", columns="month", values="badname", aggfunc="count" + ) + + +@pytest.mark.parametrize("columns", ["C", ["C"]]) +@pytest.mark.parametrize("keys", [["A"], ["A", "B"]]) +@pytest.mark.parametrize( + "values", + [ + [True], + [0], + [0.0], + ["a"], + Categorical([0]), + [to_datetime(0)], + date_range(0, 1, 1, tz="US/Eastern"), + pd.period_range("2016-01-01", periods=3, freq="D"), + pd.array([0], dtype="Int64"), + pd.array([0], dtype="Float64"), + pd.array([False], dtype="boolean"), + ], + ids=[ + "bool", + "int", + "float", + "str", + "cat", + "dt64", + "dt64tz", + "period", + "Int64", + "Float64", + "boolean", + ], +) +@pytest.mark.parametrize("method", ["attr", "agg", "apply"]) +@pytest.mark.parametrize( + "op", ["idxmax", "idxmin", "min", "max", "sum", "prod", "skew"] +) +def test_empty_groupby( + columns, keys, values, method, op, request, using_array_manager, dropna +): + # GH8093 & GH26411 + override_dtype = None + + if ( + isinstance(values, Categorical) + and len(keys) == 1 + and op in ["idxmax", "idxmin"] + ): + mark = pytest.mark.xfail( + raises=ValueError, match="attempt to get arg(min|max) of an empty sequence" + ) + request.node.add_marker(mark) + + if isinstance(values, BooleanArray) and op in ["sum", "prod"]: + # We expect to get Int64 back for these + override_dtype = "Int64" + + if isinstance(values[0], bool) and op in ("prod", "sum"): + # sum/product of bools is an integer + override_dtype = "int64" + + df = DataFrame({"A": values, "B": values, "C": values}, columns=list("ABC")) + + if hasattr(values, "dtype"): + # check that we did the construction right + assert (df.dtypes == values.dtype).all() + + df = df.iloc[:0] + + gb = df.groupby(keys, group_keys=False, dropna=dropna, observed=False)[columns] + + def get_result(**kwargs): + if method == "attr": + return getattr(gb, op)(**kwargs) + else: + return getattr(gb, method)(op, **kwargs) + + def get_categorical_invalid_expected(): + # Categorical is special without 'observed=True', we get an NaN entry + # corresponding to the unobserved group. If we passed observed=True + # to groupby, expected would just be 'df.set_index(keys)[columns]' + # as below + lev = Categorical([0], dtype=values.dtype) + if len(keys) != 1: + idx = MultiIndex.from_product([lev, lev], names=keys) + else: + # all columns are dropped, but we end up with one row + # Categorical is special without 'observed=True' + idx = Index(lev, name=keys[0]) + + expected = DataFrame([], columns=[], index=idx) + return expected + + is_per = isinstance(df.dtypes.iloc[0], pd.PeriodDtype) + is_dt64 = df.dtypes.iloc[0].kind == "M" + is_cat = isinstance(values, Categorical) + + if isinstance(values, Categorical) and not values.ordered and op in ["min", "max"]: + msg = f"Cannot perform {op} with non-ordered Categorical" + with pytest.raises(TypeError, match=msg): + get_result() + + if isinstance(columns, list): + # i.e. DataframeGroupBy, not SeriesGroupBy + result = get_result(numeric_only=True) + expected = get_categorical_invalid_expected() + tm.assert_equal(result, expected) + return + + if op in ["prod", "sum", "skew"]: + # ops that require more than just ordered-ness + if is_dt64 or is_cat or is_per: + # GH#41291 + # datetime64 -> prod and sum are invalid + if is_dt64: + msg = "datetime64 type does not support" + elif is_per: + msg = "Period type does not support" + else: + msg = "category type does not support" + if op == "skew": + msg = "|".join([msg, "does not support reduction 'skew'"]) + with pytest.raises(TypeError, match=msg): + get_result() + + if not isinstance(columns, list): + # i.e. SeriesGroupBy + return + elif op == "skew": + # TODO: test the numeric_only=True case + return + else: + # i.e. op in ["prod", "sum"]: + # i.e. DataFrameGroupBy + # ops that require more than just ordered-ness + # GH#41291 + result = get_result(numeric_only=True) + + # with numeric_only=True, these are dropped, and we get + # an empty DataFrame back + expected = df.set_index(keys)[[]] + if is_cat: + expected = get_categorical_invalid_expected() + tm.assert_equal(result, expected) + return + + result = get_result() + expected = df.set_index(keys)[columns] + if op in ["idxmax", "idxmin"]: + expected = expected.astype(df.index.dtype) + if override_dtype is not None: + expected = expected.astype(override_dtype) + if len(keys) == 1: + expected.index.name = keys[0] + tm.assert_equal(result, expected) + + +def test_empty_groupby_apply_nonunique_columns(): + # GH#44417 + df = DataFrame(np.random.default_rng(2).standard_normal((0, 4))) + df[3] = df[3].astype(np.int64) + df.columns = [0, 1, 2, 0] + gb = df.groupby(df[1], group_keys=False) + res = gb.apply(lambda x: x) + assert (res.dtypes == df.dtypes).all() + + +def test_tuple_as_grouping(): + # https://github.com/pandas-dev/pandas/issues/18314 + df = DataFrame( + { + ("a", "b"): [1, 1, 1, 1], + "a": [2, 2, 2, 2], + "b": [2, 2, 2, 2], + "c": [1, 1, 1, 1], + } + ) + + with pytest.raises(KeyError, match=r"('a', 'b')"): + df[["a", "b", "c"]].groupby(("a", "b")) + + result = df.groupby(("a", "b"))["c"].sum() + expected = Series([4], name="c", index=Index([1], name=("a", "b"))) + tm.assert_series_equal(result, expected) + + +def test_tuple_correct_keyerror(): + # https://github.com/pandas-dev/pandas/issues/18798 + df = DataFrame(1, index=range(3), columns=MultiIndex.from_product([[1, 2], [3, 4]])) + with pytest.raises(KeyError, match=r"^\(7, 8\)$"): + df.groupby((7, 8)).mean() + + +def test_groupby_agg_ohlc_non_first(): + # GH 21716 + df = DataFrame( + [[1], [1]], + columns=Index(["foo"], name="mycols"), + index=date_range("2018-01-01", periods=2, freq="D", name="dti"), + ) + + expected = DataFrame( + [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], + columns=MultiIndex.from_tuples( + ( + ("foo", "sum", "foo"), + ("foo", "ohlc", "open"), + ("foo", "ohlc", "high"), + ("foo", "ohlc", "low"), + ("foo", "ohlc", "close"), + ), + names=["mycols", None, None], + ), + index=date_range("2018-01-01", periods=2, freq="D", name="dti"), + ) + + result = df.groupby(Grouper(freq="D")).agg(["sum", "ohlc"]) + + tm.assert_frame_equal(result, expected) + + +def test_groupby_multiindex_nat(): + # GH 9236 + values = [ + (pd.NaT, "a"), + (datetime(2012, 1, 2), "a"), + (datetime(2012, 1, 2), "b"), + (datetime(2012, 1, 3), "a"), + ] + mi = MultiIndex.from_tuples(values, names=["date", None]) + ser = Series([3, 2, 2.5, 4], index=mi) + + result = ser.groupby(level=1).mean() + expected = Series([3.0, 2.5], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +def test_groupby_empty_list_raises(): + # GH 5289 + values = zip(range(10), range(10)) + df = DataFrame(values, columns=["apple", "b"]) + msg = "Grouper and axis must be same length" + with pytest.raises(ValueError, match=msg): + df.groupby([[]]) + + +def test_groupby_multiindex_series_keys_len_equal_group_axis(): + # GH 25704 + index_array = [["x", "x"], ["a", "b"], ["k", "k"]] + index_names = ["first", "second", "third"] + ri = MultiIndex.from_arrays(index_array, names=index_names) + s = Series(data=[1, 2], index=ri) + result = s.groupby(["first", "third"]).sum() + + index_array = [["x"], ["k"]] + index_names = ["first", "third"] + ei = MultiIndex.from_arrays(index_array, names=index_names) + expected = Series([3], index=ei) + + tm.assert_series_equal(result, expected) + + +def test_groupby_groups_in_BaseGrouper(): + # GH 26326 + # Test if DataFrame grouped with a pandas.Grouper has correct groups + mi = MultiIndex.from_product([["A", "B"], ["C", "D"]], names=["alpha", "beta"]) + df = DataFrame({"foo": [1, 2, 1, 2], "bar": [1, 2, 3, 4]}, index=mi) + result = df.groupby([Grouper(level="alpha"), "beta"]) + expected = df.groupby(["alpha", "beta"]) + assert result.groups == expected.groups + + result = df.groupby(["beta", Grouper(level="alpha")]) + expected = df.groupby(["beta", "alpha"]) + assert result.groups == expected.groups + + +@pytest.mark.parametrize("group_name", ["x", ["x"]]) +def test_groupby_axis_1(group_name): + # GH 27614 + df = DataFrame( + np.arange(12).reshape(3, 4), index=[0, 1, 0], columns=[10, 20, 10, 20] + ) + df.index.name = "y" + df.columns.name = "x" + + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + gb = df.groupby(group_name, axis=1) + + results = gb.sum() + expected = df.T.groupby(group_name).sum().T + tm.assert_frame_equal(results, expected) + + # test on MI column + iterables = [["bar", "baz", "foo"], ["one", "two"]] + mi = MultiIndex.from_product(iterables=iterables, names=["x", "x1"]) + df = DataFrame(np.arange(18).reshape(3, 6), index=[0, 1, 0], columns=mi) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + gb = df.groupby(group_name, axis=1) + results = gb.sum() + expected = df.T.groupby(group_name).sum().T + tm.assert_frame_equal(results, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ( + "shift", + { + "time": [ + None, + None, + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + None, + None, + ] + }, + ), + ( + "bfill", + { + "time": [ + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + ] + }, + ), + ( + "ffill", + { + "time": [ + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + ] + }, + ), + ], +) +def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected): + # GH19995, GH27992: Check that timezone does not drop in shift, bfill, and ffill + tz = tz_naive_fixture + data = { + "id": ["A", "B", "A", "B", "A", "B"], + "time": [ + Timestamp("2019-01-01 12:00:00"), + Timestamp("2019-01-01 12:30:00"), + None, + None, + Timestamp("2019-01-01 14:00:00"), + Timestamp("2019-01-01 14:30:00"), + ], + } + df = DataFrame(data).assign(time=lambda x: x.time.dt.tz_localize(tz)) + + grouped = df.groupby("id") + result = getattr(grouped, op)() + expected = DataFrame(expected).assign(time=lambda x: x.time.dt.tz_localize(tz)) + tm.assert_frame_equal(result, expected) + + +def test_groupby_only_none_group(): + # see GH21624 + # this was crashing with "ValueError: Length of passed values is 1, index implies 0" + df = DataFrame({"g": [None], "x": 1}) + actual = df.groupby("g")["x"].transform("sum") + expected = Series([np.nan], name="x") + + tm.assert_series_equal(actual, expected) + + +def test_groupby_duplicate_index(): + # GH#29189 the groupby call here used to raise + ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0]) + gb = ser.groupby(level=0) + + result = gb.mean() + expected = Series([2, 5.5, 8], index=[2.0, 4.0, 5.0]) + tm.assert_series_equal(result, expected) + + +def test_group_on_empty_multiindex(transformation_func, request): + # GH 47787 + # With one row, those are transforms so the schema should be the same + df = DataFrame( + data=[[1, Timestamp("today"), 3, 4]], + columns=["col_1", "col_2", "col_3", "col_4"], + ) + df["col_3"] = df["col_3"].astype(int) + df["col_4"] = df["col_4"].astype(int) + df = df.set_index(["col_1", "col_2"]) + if transformation_func == "fillna": + args = ("ffill",) + else: + args = () + result = df.iloc[:0].groupby(["col_1"]).transform(transformation_func, *args) + expected = df.groupby(["col_1"]).transform(transformation_func, *args).iloc[:0] + if transformation_func in ("diff", "shift"): + expected = expected.astype(int) + tm.assert_equal(result, expected) + + result = ( + df["col_3"].iloc[:0].groupby(["col_1"]).transform(transformation_func, *args) + ) + expected = ( + df["col_3"].groupby(["col_1"]).transform(transformation_func, *args).iloc[:0] + ) + if transformation_func in ("diff", "shift"): + expected = expected.astype(int) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "idx", + [ + Index(["a", "a"], name="foo"), + MultiIndex.from_tuples((("a", "a"), ("a", "a")), names=["foo", "bar"]), + ], +) +def test_dup_labels_output_shape(groupby_func, idx): + if groupby_func in {"size", "ngroup", "cumcount"}: + pytest.skip(f"Not applicable for {groupby_func}") + + df = DataFrame([[1, 1]], columns=idx) + grp_by = df.groupby([0]) + + args = get_groupby_method_args(groupby_func, df) + result = getattr(grp_by, groupby_func)(*args) + + assert result.shape == (1, 2) + tm.assert_index_equal(result.columns, idx) + + +def test_groupby_crash_on_nunique(axis): + # Fix following 30253 + dti = date_range("2016-01-01", periods=2, name="foo") + df = DataFrame({("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]}) + df.columns.names = ("bar", "baz") + df.index = dti + + axis_number = df._get_axis_number(axis) + if not axis_number: + df = df.T + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + else: + msg = "DataFrame.groupby with axis=1 is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(axis=axis_number, level=0) + result = gb.nunique() + + expected = DataFrame({"A": [1, 2], "D": [1, 1]}, index=dti) + expected.columns.name = "bar" + if not axis_number: + expected = expected.T + + tm.assert_frame_equal(result, expected) + + if axis_number == 0: + # same thing, but empty columns + with tm.assert_produces_warning(FutureWarning, match=msg): + gb2 = df[[]].groupby(axis=axis_number, level=0) + exp = expected[[]] + else: + # same thing, but empty rows + with tm.assert_produces_warning(FutureWarning, match=msg): + gb2 = df.loc[[]].groupby(axis=axis_number, level=0) + # default for empty when we can't infer a dtype is float64 + exp = expected.loc[[]].astype(np.float64) + + res = gb2.nunique() + tm.assert_frame_equal(res, exp) + + +def test_groupby_list_level(): + # GH 9790 + expected = DataFrame(np.arange(0, 9).reshape(3, 3), dtype=float) + result = expected.groupby(level=[0]).mean() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "max_seq_items, expected", + [ + (5, "{0: [0], 1: [1], 2: [2], 3: [3], 4: [4]}"), + (4, "{0: [0], 1: [1], 2: [2], 3: [3], ...}"), + (1, "{0: [0], ...}"), + ], +) +def test_groups_repr_truncates(max_seq_items, expected): + # GH 1135 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 1))) + df["a"] = df.index + + with pd.option_context("display.max_seq_items", max_seq_items): + result = df.groupby("a").groups.__repr__() + assert result == expected + + result = df.groupby(np.array(df.a)).groups.__repr__() + assert result == expected + + +def test_group_on_two_row_multiindex_returns_one_tuple_key(): + # GH 18451 + df = DataFrame([{"a": 1, "b": 2, "c": 99}, {"a": 1, "b": 2, "c": 88}]) + df = df.set_index(["a", "b"]) + + grp = df.groupby(["a", "b"]) + result = grp.indices + expected = {(1, 2): np.array([0, 1], dtype=np.int64)} + + assert len(result) == 1 + key = (1, 2) + assert (result[key] == expected[key]).all() + + +@pytest.mark.parametrize( + "klass, attr, value", + [ + (DataFrame, "level", "a"), + (DataFrame, "as_index", False), + (DataFrame, "sort", False), + (DataFrame, "group_keys", False), + (DataFrame, "observed", True), + (DataFrame, "dropna", False), + (Series, "level", "a"), + (Series, "as_index", False), + (Series, "sort", False), + (Series, "group_keys", False), + (Series, "observed", True), + (Series, "dropna", False), + ], +) +def test_subsetting_columns_keeps_attrs(klass, attr, value): + # GH 9959 - When subsetting columns, don't drop attributes + df = DataFrame({"a": [1], "b": [2], "c": [3]}) + if attr != "axis": + df = df.set_index("a") + + expected = df.groupby("a", **{attr: value}) + result = expected[["b"]] if klass is DataFrame else expected["b"] + assert getattr(result, attr) == getattr(expected, attr) + + +def test_subsetting_columns_axis_1(): + # GH 37725 + df = DataFrame({"A": [1], "B": [2], "C": [3]}) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + g = df.groupby([0, 0, 1], axis=1) + match = "Cannot subset columns when using axis=1" + with pytest.raises(ValueError, match=match): + g[["A", "B"]].sum() + + +@pytest.mark.parametrize("func", ["sum", "any", "shift"]) +def test_groupby_column_index_name_lost(func): + # GH: 29764 groupby loses index sometimes + expected = Index(["a"], name="idx") + df = DataFrame([[1]], columns=expected) + df_grouped = df.groupby([1]) + result = getattr(df_grouped, func)().columns + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "infer_string", + [ + False, + pytest.param(True, marks=td.skip_if_no("pyarrow")), + ], +) +def test_groupby_duplicate_columns(infer_string): + # GH: 31735 + df = DataFrame( + {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]} + ).astype(object) + df.columns = ["A", "B", "B"] + with pd.option_context("future.infer_string", infer_string): + result = df.groupby([0, 0, 0, 0]).min() + expected = DataFrame( + [["e", "a", 1]], index=np.array([0]), columns=["A", "B", "B"], dtype=object + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_series_with_tuple_name(): + # GH 37755 + ser = Series([1, 2, 3, 4], index=[1, 1, 2, 2], name=("a", "a")) + ser.index.name = ("b", "b") + result = ser.groupby(level=0).last() + expected = Series([2, 4], index=[1, 2], name=("a", "a")) + expected.index.name = ("b", "b") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "func, values", [("sum", [97.0, 98.0]), ("mean", [24.25, 24.5])] +) +def test_groupby_numerical_stability_sum_mean(func, values): + # GH#38778 + data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15] + df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data}) + result = getattr(df.groupby("group"), func)() + expected = DataFrame({"a": values, "b": values}, index=Index([1, 2], name="group")) + tm.assert_frame_equal(result, expected) + + +def test_groupby_numerical_stability_cumsum(): + # GH#38934 + data = [1e16, 1e16, 97, 98, -5e15, -5e15, -5e15, -5e15] + df = DataFrame({"group": [1, 2] * 4, "a": data, "b": data}) + result = df.groupby("group").cumsum() + exp_data = ( + [1e16] * 2 + [1e16 + 96, 1e16 + 98] + [5e15 + 97, 5e15 + 98] + [97.0, 98.0] + ) + expected = DataFrame({"a": exp_data, "b": exp_data}) + tm.assert_frame_equal(result, expected, check_exact=True) + + +def test_groupby_cumsum_skipna_false(): + # GH#46216 don't propagate np.nan above the diagonal + arr = np.random.default_rng(2).standard_normal((5, 5)) + df = DataFrame(arr) + for i in range(5): + df.iloc[i, i] = np.nan + + df["A"] = 1 + gb = df.groupby("A") + + res = gb.cumsum(skipna=False) + + expected = df[[0, 1, 2, 3, 4]].cumsum(skipna=False) + tm.assert_frame_equal(res, expected) + + +def test_groupby_cumsum_timedelta64(): + # GH#46216 don't ignore is_datetimelike in libgroupby.group_cumsum + dti = date_range("2016-01-01", periods=5) + ser = Series(dti) - dti[0] + ser[2] = pd.NaT + + df = DataFrame({"A": 1, "B": ser}) + gb = df.groupby("A") + + res = gb.cumsum(numeric_only=False, skipna=True) + exp = DataFrame({"B": [ser[0], ser[1], pd.NaT, ser[4], ser[4] * 2]}) + tm.assert_frame_equal(res, exp) + + res = gb.cumsum(numeric_only=False, skipna=False) + exp = DataFrame({"B": [ser[0], ser[1], pd.NaT, pd.NaT, pd.NaT]}) + tm.assert_frame_equal(res, exp) + + +def test_groupby_mean_duplicate_index(rand_series_with_duplicate_datetimeindex): + dups = rand_series_with_duplicate_datetimeindex + result = dups.groupby(level=0).mean() + expected = dups.groupby(dups.index).mean() + tm.assert_series_equal(result, expected) + + +def test_groupby_all_nan_groups_drop(): + # GH 15036 + s = Series([1, 2, 3], [np.nan, np.nan, np.nan]) + result = s.groupby(s.index).sum() + expected = Series([], index=Index([], dtype=np.float64), dtype=np.int64) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("numeric_only", [True, False]) +def test_groupby_empty_multi_column(as_index, numeric_only): + # GH 15106 & GH 41998 + df = DataFrame(data=[], columns=["A", "B", "C"]) + gb = df.groupby(["A", "B"], as_index=as_index) + result = gb.sum(numeric_only=numeric_only) + if as_index: + index = MultiIndex([[], []], [[], []], names=["A", "B"]) + columns = ["C"] if not numeric_only else [] + else: + index = RangeIndex(0) + columns = ["A", "B", "C"] if not numeric_only else ["A", "B"] + expected = DataFrame([], columns=columns, index=index) + tm.assert_frame_equal(result, expected) + + +def test_groupby_aggregation_non_numeric_dtype(): + # GH #43108 + df = DataFrame( + [["M", [1]], ["M", [1]], ["W", [10]], ["W", [20]]], columns=["MW", "v"] + ) + + expected = DataFrame( + { + "v": [[1, 1], [10, 20]], + }, + index=Index(["M", "W"], dtype="object", name="MW"), + ) + + gb = df.groupby(by=["MW"]) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + +def test_groupby_aggregation_multi_non_numeric_dtype(): + # GH #42395 + df = DataFrame( + { + "x": [1, 0, 1, 1, 0], + "y": [Timedelta(i, "days") for i in range(1, 6)], + "z": [Timedelta(i * 10, "days") for i in range(1, 6)], + } + ) + + expected = DataFrame( + { + "y": [Timedelta(i, "days") for i in range(7, 9)], + "z": [Timedelta(i * 10, "days") for i in range(7, 9)], + }, + index=Index([0, 1], dtype="int64", name="x"), + ) + + gb = df.groupby(by=["x"]) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + +def test_groupby_aggregation_numeric_with_non_numeric_dtype(): + # GH #43108 + df = DataFrame( + { + "x": [1, 0, 1, 1, 0], + "y": [Timedelta(i, "days") for i in range(1, 6)], + "z": list(range(1, 6)), + } + ) + + expected = DataFrame( + {"y": [Timedelta(7, "days"), Timedelta(8, "days")], "z": [7, 8]}, + index=Index([0, 1], dtype="int64", name="x"), + ) + + gb = df.groupby(by=["x"]) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + +def test_groupby_filtered_df_std(): + # GH 16174 + dicts = [ + {"filter_col": False, "groupby_col": True, "bool_col": True, "float_col": 10.5}, + {"filter_col": True, "groupby_col": True, "bool_col": True, "float_col": 20.5}, + {"filter_col": True, "groupby_col": True, "bool_col": True, "float_col": 30.5}, + ] + df = DataFrame(dicts) + + df_filter = df[df["filter_col"] == True] # noqa: E712 + dfgb = df_filter.groupby("groupby_col") + result = dfgb.std() + expected = DataFrame( + [[0.0, 0.0, 7.071068]], + columns=["filter_col", "bool_col", "float_col"], + index=Index([True], name="groupby_col"), + ) + tm.assert_frame_equal(result, expected) + + +def test_datetime_categorical_multikey_groupby_indices(): + # GH 26859 + df = DataFrame( + { + "a": Series(list("abc")), + "b": Series( + to_datetime(["2018-01-01", "2018-02-01", "2018-03-01"]), + dtype="category", + ), + "c": Categorical.from_codes([-1, 0, 1], categories=[0, 1]), + } + ) + result = df.groupby(["a", "b"], observed=False).indices + expected = { + ("a", Timestamp("2018-01-01 00:00:00")): np.array([0]), + ("b", Timestamp("2018-02-01 00:00:00")): np.array([1]), + ("c", Timestamp("2018-03-01 00:00:00")): np.array([2]), + } + assert result == expected + + +def test_rolling_wrong_param_min_period(): + # GH34037 + name_l = ["Alice"] * 5 + ["Bob"] * 5 + val_l = [np.nan, np.nan, 1, 2, 3] + [np.nan, 1, 2, 3, 4] + test_df = DataFrame([name_l, val_l]).T + test_df.columns = ["name", "val"] + + result_error_msg = r"__init__\(\) got an unexpected keyword argument 'min_period'" + with pytest.raises(TypeError, match=result_error_msg): + test_df.groupby("name")["val"].rolling(window=2, min_period=1).sum() + + +@pytest.mark.parametrize( + "dtype", + [ + object, + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ], +) +def test_by_column_values_with_same_starting_value(dtype): + # GH29635 + df = DataFrame( + { + "Name": ["Thomas", "Thomas", "Thomas John"], + "Credit": [1200, 1300, 900], + "Mood": Series(["sad", "happy", "happy"], dtype=dtype), + } + ) + aggregate_details = {"Mood": Series.mode, "Credit": "sum"} + + result = df.groupby(["Name"]).agg(aggregate_details) + expected_result = DataFrame( + { + "Mood": [["happy", "sad"], "happy"], + "Credit": [2500, 900], + "Name": ["Thomas", "Thomas John"], + } + ).set_index("Name") + + tm.assert_frame_equal(result, expected_result) + + +def test_groupby_none_in_first_mi_level(): + # GH#47348 + arr = [[None, 1, 0, 1], [2, 3, 2, 3]] + ser = Series(1, index=MultiIndex.from_arrays(arr, names=["a", "b"])) + result = ser.groupby(level=[0, 1]).sum() + expected = Series( + [1, 2], MultiIndex.from_tuples([(0.0, 2), (1.0, 3)], names=["a", "b"]) + ) + tm.assert_series_equal(result, expected) + + +def test_groupby_none_column_name(): + # GH#47348 + df = DataFrame({None: [1, 1, 2, 2], "b": [1, 1, 2, 3], "c": [4, 5, 6, 7]}) + result = df.groupby(by=[None]).sum() + expected = DataFrame({"b": [2, 5], "c": [9, 13]}, index=Index([1, 2], name=None)) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("selection", [None, "a", ["a"]]) +def test_single_element_list_grouping(selection): + # GH#42795, GH#53500 + df = DataFrame({"a": [1, 2], "b": [np.nan, 5], "c": [np.nan, 2]}, index=["x", "y"]) + grouped = df.groupby(["a"]) if selection is None else df.groupby(["a"])[selection] + result = [key for key, _ in grouped] + + expected = [(1,), (2,)] + assert result == expected + + +def test_groupby_string_dtype(): + # GH 40148 + df = DataFrame({"str_col": ["a", "b", "c", "a"], "num_col": [1, 2, 3, 2]}) + df["str_col"] = df["str_col"].astype("string") + expected = DataFrame( + { + "str_col": [ + "a", + "b", + "c", + ], + "num_col": [1.5, 2.0, 3.0], + } + ) + expected["str_col"] = expected["str_col"].astype("string") + grouped = df.groupby("str_col", as_index=False) + result = grouped.mean() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "level_arg, multiindex", [([0], False), ((0,), False), ([0], True), ((0,), True)] +) +def test_single_element_listlike_level_grouping_deprecation(level_arg, multiindex): + # GH 51583 + df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"]) + if multiindex: + df = df.set_index(["a", "b"]) + depr_msg = ( + "Creating a Groupby object with a length-1 list-like " + "level parameter will yield indexes as tuples in a future version. " + "To keep indexes as scalars, create Groupby objects with " + "a scalar level parameter instead." + ) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + [key for key, _ in df.groupby(level=level_arg)] + + +@pytest.mark.parametrize("func", ["sum", "cumsum", "cumprod", "prod"]) +def test_groupby_avoid_casting_to_float(func): + # GH#37493 + val = 922337203685477580 + df = DataFrame({"a": 1, "b": [val]}) + result = getattr(df.groupby("a"), func)() - val + expected = DataFrame({"b": [0]}, index=Index([1], name="a")) + if func in ["cumsum", "cumprod"]: + expected = expected.reset_index(drop=True) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func, val", [("sum", 3), ("prod", 2)]) +def test_groupby_sum_support_mask(any_numeric_ea_dtype, func, val): + # GH#37493 + df = DataFrame({"a": 1, "b": [1, 2, pd.NA]}, dtype=any_numeric_ea_dtype) + result = getattr(df.groupby("a"), func)() + expected = DataFrame( + {"b": [val]}, + index=Index([1], name="a", dtype=any_numeric_ea_dtype), + dtype=any_numeric_ea_dtype, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("val, dtype", [(111, "int"), (222, "uint")]) +def test_groupby_overflow(val, dtype): + # GH#37493 + df = DataFrame({"a": 1, "b": [val, val]}, dtype=f"{dtype}8") + result = df.groupby("a").sum() + expected = DataFrame( + {"b": [val * 2]}, + index=Index([1], name="a", dtype=f"{dtype}8"), + dtype=f"{dtype}64", + ) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a").cumsum() + expected = DataFrame({"b": [val, val * 2]}, dtype=f"{dtype}64") + tm.assert_frame_equal(result, expected) + + result = df.groupby("a").prod() + expected = DataFrame( + {"b": [val * val]}, + index=Index([1], name="a", dtype=f"{dtype}8"), + dtype=f"{dtype}64", + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("skipna, val", [(True, 3), (False, pd.NA)]) +def test_groupby_cumsum_mask(any_numeric_ea_dtype, skipna, val): + # GH#37493 + df = DataFrame({"a": 1, "b": [1, pd.NA, 2]}, dtype=any_numeric_ea_dtype) + result = df.groupby("a").cumsum(skipna=skipna) + expected = DataFrame( + {"b": [1, pd.NA, val]}, + dtype=any_numeric_ea_dtype, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "val_in, index, val_out", + [ + ( + [1.0, 2.0, 3.0, 4.0, 5.0], + ["foo", "foo", "bar", "baz", "blah"], + [3.0, 4.0, 5.0, 3.0], + ), + ( + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], + ["foo", "foo", "bar", "baz", "blah", "blah"], + [3.0, 4.0, 11.0, 3.0], + ), + ], +) +def test_groupby_index_name_in_index_content(val_in, index, val_out): + # GH 48567 + series = Series(data=val_in, name="values", index=Index(index, name="blah")) + result = series.groupby("blah").sum() + expected = Series( + data=val_out, + name="values", + index=Index(["bar", "baz", "blah", "foo"], name="blah"), + ) + tm.assert_series_equal(result, expected) + + result = series.to_frame().groupby("blah").sum() + expected = expected.to_frame() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("n", [1, 10, 32, 100, 1000]) +def test_sum_of_booleans(n): + # GH 50347 + df = DataFrame({"groupby_col": 1, "bool": [True] * n}) + df["bool"] = df["bool"].eq(True) + result = df.groupby("groupby_col").sum() + expected = DataFrame({"bool": [n]}, index=Index([1], name="groupby_col")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:invalid value encountered in remainder:RuntimeWarning" +) +@pytest.mark.parametrize("method", ["head", "tail", "nth", "first", "last"]) +def test_groupby_method_drop_na(method): + # GH 21755 + df = DataFrame({"A": ["a", np.nan, "b", np.nan, "c"], "B": range(5)}) + + if method == "nth": + result = getattr(df.groupby("A"), method)(n=0) + else: + result = getattr(df.groupby("A"), method)() + + if method in ["first", "last"]: + expected = DataFrame({"B": [0, 2, 4]}).set_index( + Series(["a", "b", "c"], name="A") + ) + else: + expected = DataFrame({"A": ["a", "b", "c"], "B": [0, 2, 4]}, index=[0, 2, 4]) + tm.assert_frame_equal(result, expected) + + +def test_groupby_reduce_period(): + # GH#51040 + pi = pd.period_range("2016-01-01", periods=100, freq="D") + grps = list(range(10)) * 10 + ser = pi.to_series() + gb = ser.groupby(grps) + + with pytest.raises(TypeError, match="Period type does not support sum operations"): + gb.sum() + with pytest.raises( + TypeError, match="Period type does not support cumsum operations" + ): + gb.cumsum() + with pytest.raises(TypeError, match="Period type does not support prod operations"): + gb.prod() + with pytest.raises( + TypeError, match="Period type does not support cumprod operations" + ): + gb.cumprod() + + res = gb.max() + expected = ser[-10:] + expected.index = Index(range(10), dtype=int) + tm.assert_series_equal(res, expected) + + res = gb.min() + expected = ser[:10] + expected.index = Index(range(10), dtype=int) + tm.assert_series_equal(res, expected) + + +def test_obj_with_exclusions_duplicate_columns(): + # GH#50806 + df = DataFrame([[0, 1, 2, 3]]) + df.columns = [0, 1, 2, 0] + gb = df.groupby(df[1]) + result = gb._obj_with_exclusions + expected = df.take([0, 2, 3], axis=1) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("numeric_only", [True, False]) +def test_groupby_numeric_only_std_no_result(numeric_only): + # GH 51080 + dicts_non_numeric = [{"a": "foo", "b": "bar"}, {"a": "car", "b": "dar"}] + df = DataFrame(dicts_non_numeric) + dfgb = df.groupby("a", as_index=False, sort=False) + + if numeric_only: + result = dfgb.std(numeric_only=True) + expected_df = DataFrame(["foo", "car"], columns=["a"]) + tm.assert_frame_equal(result, expected_df) + else: + with pytest.raises( + ValueError, match="could not convert string to float: 'bar'" + ): + dfgb.std(numeric_only=numeric_only) + + +def test_grouping_with_categorical_interval_columns(): + # GH#34164 + df = DataFrame({"x": [0.1, 0.2, 0.3, -0.4, 0.5], "w": ["a", "b", "a", "c", "a"]}) + qq = pd.qcut(df["x"], q=np.linspace(0, 1, 5)) + result = df.groupby([qq, "w"], observed=False)["x"].agg("mean") + categorical_index_level_1 = Categorical( + [ + Interval(-0.401, 0.1, closed="right"), + Interval(0.1, 0.2, closed="right"), + Interval(0.2, 0.3, closed="right"), + Interval(0.3, 0.5, closed="right"), + ], + ordered=True, + ) + index_level_2 = ["a", "b", "c"] + mi = MultiIndex.from_product( + [categorical_index_level_1, index_level_2], names=["x", "w"] + ) + expected = Series( + np.array( + [ + 0.1, + np.nan, + -0.4, + np.nan, + 0.2, + np.nan, + 0.3, + np.nan, + np.nan, + 0.5, + np.nan, + np.nan, + ] + ), + index=mi, + name="x", + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("bug_var", [1, "a"]) +def test_groupby_sum_on_nan_should_return_nan(bug_var): + # GH 24196 + df = DataFrame({"A": [bug_var, bug_var, bug_var, np.nan]}) + dfgb = df.groupby(lambda x: x) + result = dfgb.sum(min_count=1) + + expected_df = DataFrame([bug_var, bug_var, bug_var, None], columns=["A"]) + tm.assert_frame_equal(result, expected_df) + + +@pytest.mark.parametrize( + "method", + [ + "count", + "corr", + "cummax", + "cummin", + "cumprod", + "describe", + "rank", + "quantile", + "diff", + "shift", + "all", + "any", + "idxmin", + "idxmax", + "ffill", + "bfill", + "pct_change", + ], +) +def test_groupby_selection_with_methods(df, method): + # some methods which require DatetimeIndex + rng = date_range("2014", periods=len(df)) + df.index = rng + + g = df.groupby(["A"])[["C"]] + g_exp = df[["C"]].groupby(df["A"]) + # TODO check groupby with > 1 col ? + + res = getattr(g, method)() + exp = getattr(g_exp, method)() + + # should always be frames! + tm.assert_frame_equal(res, exp) + + +def test_groupby_selection_other_methods(df): + # some methods which require DatetimeIndex + rng = date_range("2014", periods=len(df)) + df.columns.name = "foo" + df.index = rng + + g = df.groupby(["A"])[["C"]] + g_exp = df[["C"]].groupby(df["A"]) + + # methods which aren't just .foo() + tm.assert_frame_equal(g.fillna(0), g_exp.fillna(0)) + msg = "DataFrameGroupBy.dtypes is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_frame_equal(g.dtypes, g_exp.dtypes) + tm.assert_frame_equal(g.apply(lambda x: x.sum()), g_exp.apply(lambda x: x.sum())) + + tm.assert_frame_equal(g.resample("D").mean(), g_exp.resample("D").mean()) + tm.assert_frame_equal(g.resample("D").ohlc(), g_exp.resample("D").ohlc()) + + tm.assert_frame_equal( + g.filter(lambda x: len(x) == 3), g_exp.filter(lambda x: len(x) == 3) + ) + + +def test_groupby_with_Time_Grouper(): + idx2 = [ + to_datetime("2016-08-31 22:08:12.000"), + to_datetime("2016-08-31 22:09:12.200"), + to_datetime("2016-08-31 22:20:12.400"), + ] + + test_data = DataFrame( + {"quant": [1.0, 1.0, 3.0], "quant2": [1.0, 1.0, 3.0], "time2": idx2} + ) + + expected_output = DataFrame( + { + "time2": date_range("2016-08-31 22:08:00", periods=13, freq="1T"), + "quant": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + "quant2": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + } + ) + + df = test_data.groupby(Grouper(key="time2", freq="1T")).count().reset_index() + + tm.assert_frame_equal(df, expected_output) + + +def test_groupby_series_with_datetimeindex_month_name(): + # GH 48509 + s = Series([0, 1, 0], index=date_range("2022-01-01", periods=3), name="jan") + result = s.groupby(s).count() + expected = Series([2, 1], name="jan") + expected.index.name = "jan" + tm.assert_series_equal(result, expected) + + +def test_get_group_axis_1(): + # GH#54858 + df = DataFrame( + { + "col1": [0, 3, 2, 3], + "col2": [4, 1, 6, 7], + "col3": [3, 8, 2, 10], + "col4": [1, 13, 6, 15], + "col5": [-4, 5, 6, -7], + } + ) + with tm.assert_produces_warning(FutureWarning, match="deprecated"): + grouped = df.groupby(axis=1, by=[1, 2, 3, 2, 1]) + result = grouped.get_group(1) + expected = DataFrame( + { + "col1": [0, 3, 2, 3], + "col5": [-4, 5, 6, -7], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_dropna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_dropna.py new file mode 100644 index 00000000..099e7bc3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_dropna.py @@ -0,0 +1,696 @@ +import numpy as np +import pytest + +from pandas.compat.pyarrow import pa_version_under7p0 + +from pandas.core.dtypes.missing import na_value_for_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.tests.groupby import get_groupby_method_args + + +@pytest.mark.parametrize( + "dropna, tuples, outputs", + [ + ( + True, + [["A", "B"], ["B", "A"]], + {"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]}, + ), + ( + False, + [["A", "B"], ["A", np.nan], ["B", "A"]], + { + "c": [13.0, 12.3, 123.23], + "d": [13.0, 233.0, 123.0], + "e": [13.0, 12.0, 1.0], + }, + ), + ], +) +def test_groupby_dropna_multi_index_dataframe_nan_in_one_group( + dropna, tuples, outputs, nulls_fixture +): + # GH 3729 this is to test that NA is in one group + df_list = [ + ["A", "B", 12, 12, 12], + ["A", nulls_fixture, 12.3, 233.0, 12], + ["B", "A", 123.23, 123, 1], + ["A", "B", 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"]) + grouped = df.groupby(["a", "b"], dropna=dropna).sum() + + mi = pd.MultiIndex.from_tuples(tuples, names=list("ab")) + + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna: + mi = mi.set_levels(["A", "B", np.nan], level="b") + expected = pd.DataFrame(outputs, index=mi) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, tuples, outputs", + [ + ( + True, + [["A", "B"], ["B", "A"]], + {"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]}, + ), + ( + False, + [["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]], + { + "c": [12.0, 13.3, 123.23, 1.0], + "d": [12.0, 234.0, 123.0, 1.0], + "e": [12.0, 13.0, 1.0, 1.0], + }, + ), + ], +) +def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups( + dropna, tuples, outputs, nulls_fixture, nulls_fixture2 +): + # GH 3729 this is to test that NA in different groups with different representations + df_list = [ + ["A", "B", 12, 12, 12], + ["A", nulls_fixture, 12.3, 233.0, 12], + ["B", "A", 123.23, 123, 1], + [nulls_fixture2, "B", 1, 1, 1.0], + ["A", nulls_fixture2, 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"]) + grouped = df.groupby(["a", "b"], dropna=dropna).sum() + + mi = pd.MultiIndex.from_tuples(tuples, names=list("ab")) + + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna: + mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]]) + expected = pd.DataFrame(outputs, index=mi) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, idx, outputs", + [ + (True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}), + ( + False, + ["A", "B", np.nan], + { + "b": [123.23, 13.0, 12.3], + "c": [123.0, 13.0, 233.0], + "d": [1.0, 13.0, 12.0], + }, + ), + ], +) +def test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs): + # GH 3729 + df_list = [ + ["B", 12, 12, 12], + [None, 12.3, 233.0, 12], + ["A", 123.23, 123, 1], + ["B", 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"]) + grouped = df.groupby("a", dropna=dropna).sum() + + expected = pd.DataFrame(outputs, index=pd.Index(idx, dtype="object", name="a")) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, idx, expected", + [ + (True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])), + ( + False, + ["a", "a", "b", np.nan], + pd.Series([3, 3, 3], index=["a", "b", np.nan]), + ), + ], +) +def test_groupby_dropna_series_level(dropna, idx, expected): + ser = pd.Series([1, 2, 3, 3], index=idx) + + result = ser.groupby(level=0, dropna=dropna).sum() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dropna, expected", + [ + (True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")), + ( + False, + pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"), + ), + ], +) +def test_groupby_dropna_series_by(dropna, expected): + ser = pd.Series( + [390.0, 350.0, 30.0, 20.0], + index=["Falcon", "Falcon", "Parrot", "Parrot"], + name="Max Speed", + ) + + result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dropna", (False, True)) +def test_grouper_dropna_propagation(dropna): + # GH 36604 + df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}) + gb = df.groupby("A", dropna=dropna) + assert gb.grouper.dropna == dropna + + +@pytest.mark.parametrize( + "index", + [ + pd.RangeIndex(0, 4), + list("abcd"), + pd.MultiIndex.from_product([(1, 2), ("R", "B")], names=["num", "col"]), + ], +) +def test_groupby_dataframe_slice_then_transform(dropna, index): + # GH35014 & GH35612 + expected_data = {"B": [2, 2, 1, np.nan if dropna else 1]} + + df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}, index=index) + gb = df.groupby("A", dropna=dropna) + + result = gb.transform(len) + expected = pd.DataFrame(expected_data, index=index) + tm.assert_frame_equal(result, expected) + + result = gb[["B"]].transform(len) + expected = pd.DataFrame(expected_data, index=index) + tm.assert_frame_equal(result, expected) + + result = gb["B"].transform(len) + expected = pd.Series(expected_data["B"], index=index, name="B") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dropna, tuples, outputs", + [ + ( + True, + [["A", "B"], ["B", "A"]], + {"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]}, + ), + ( + False, + [["A", "B"], ["A", np.nan], ["B", "A"]], + { + "c": [13.0, 12.3, 123.23], + "d": [12.0, 233.0, 123.0], + "e": [1.0, 12.0, 1.0], + }, + ), + ], +) +def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs): + # GH 3729 + df_list = [ + ["A", "B", 12, 12, 12], + ["A", None, 12.3, 233.0, 12], + ["B", "A", 123.23, 123, 1], + ["A", "B", 1, 1, 1.0], + ] + df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"]) + agg_dict = {"c": "sum", "d": "max", "e": "min"} + grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict) + + mi = pd.MultiIndex.from_tuples(tuples, names=list("ab")) + + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna: + mi = mi.set_levels(["A", "B", np.nan], level="b") + expected = pd.DataFrame(outputs, index=mi) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.arm_slow +@pytest.mark.parametrize( + "datetime1, datetime2", + [ + (pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")), + (pd.Timedelta("-2 days"), pd.Timedelta("-1 days")), + (pd.Period("2020-01-01"), pd.Period("2020-02-01")), + ], +) +@pytest.mark.parametrize("dropna, values", [(True, [12, 3]), (False, [12, 3, 6])]) +def test_groupby_dropna_datetime_like_data( + dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2 +): + # 3729 + df = pd.DataFrame( + { + "values": [1, 2, 3, 4, 5, 6], + "dt": [ + datetime1, + unique_nulls_fixture, + datetime2, + unique_nulls_fixture2, + datetime1, + datetime1, + ], + } + ) + + if dropna: + indexes = [datetime1, datetime2] + else: + indexes = [datetime1, datetime2, np.nan] + + grouped = df.groupby("dt", dropna=dropna).agg({"values": "sum"}) + expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt")) + + tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, data, selected_data, levels", + [ + pytest.param( + False, + {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0, 0]}, + ["a", "b", np.nan], + id="dropna_false_has_nan", + ), + pytest.param( + True, + {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0]}, + None, + id="dropna_true_has_nan", + ), + pytest.param( + # no nan in "groups"; dropna=True|False should be same. + False, + {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0, 0]}, + None, + id="dropna_false_no_nan", + ), + pytest.param( + # no nan in "groups"; dropna=True|False should be same. + True, + {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0, 0]}, + None, + id="dropna_true_no_nan", + ), + ], +) +def test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data, levels): + # GH 35889 + + df = pd.DataFrame(data) + gb = df.groupby("groups", dropna=dropna) + result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))})) + + mi_tuples = tuple(zip(data["groups"], selected_data["values"])) + mi = pd.MultiIndex.from_tuples(mi_tuples, names=["groups", None]) + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna and levels: + mi = mi.set_levels(levels, level="groups") + + expected = pd.DataFrame(selected_data, index=mi) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("input_index", [None, ["a"], ["a", "b"]]) +@pytest.mark.parametrize("keys", [["a"], ["a", "b"]]) +@pytest.mark.parametrize("series", [True, False]) +def test_groupby_dropna_with_multiindex_input(input_index, keys, series): + # GH#46783 + obj = pd.DataFrame( + { + "a": [1, np.nan], + "b": [1, 1], + "c": [2, 3], + } + ) + + expected = obj.set_index(keys) + if series: + expected = expected["c"] + elif input_index == ["a", "b"] and keys == ["a"]: + # Column b should not be aggregated + expected = expected[["c"]] + + if input_index is not None: + obj = obj.set_index(input_index) + gb = obj.groupby(keys, dropna=False) + if series: + gb = gb["c"] + result = gb.sum() + + tm.assert_equal(result, expected) + + +def test_groupby_nan_included(): + # GH 35646 + data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]} + df = pd.DataFrame(data) + grouped = df.groupby("group", dropna=False) + result = grouped.indices + dtype = np.intp + expected = { + "g1": np.array([0, 2], dtype=dtype), + "g2": np.array([3], dtype=dtype), + np.nan: np.array([1, 4], dtype=dtype), + } + for result_values, expected_values in zip(result.values(), expected.values()): + tm.assert_numpy_array_equal(result_values, expected_values) + assert np.isnan(list(result.keys())[2]) + assert list(result.keys())[0:2] == ["g1", "g2"] + + +def test_groupby_drop_nan_with_multi_index(): + # GH 39895 + df = pd.DataFrame([[np.nan, 0, 1]], columns=["a", "b", "c"]) + df = df.set_index(["a", "b"]) + result = df.groupby(["a", "b"], dropna=False).first() + expected = df + tm.assert_frame_equal(result, expected) + + +# sequence_index enumerates all strings made up of x, y, z of length 4 +@pytest.mark.parametrize("sequence_index", range(3**4)) +@pytest.mark.parametrize( + "dtype", + [ + None, + "UInt8", + "Int8", + "UInt16", + "Int16", + "UInt32", + "Int32", + "UInt64", + "Int64", + "Float32", + "Int64", + "Float64", + "category", + "string", + pytest.param( + "string[pyarrow]", + marks=pytest.mark.skipif( + pa_version_under7p0, reason="pyarrow is not installed" + ), + ), + "datetime64[ns]", + "period[d]", + "Sparse[float]", + ], +) +@pytest.mark.parametrize("test_series", [True, False]) +def test_no_sort_keep_na(sequence_index, dtype, test_series, as_index): + # GH#46584, GH#48794 + + # Convert sequence_index into a string sequence, e.g. 5 becomes "xxyz" + # This sequence is used for the grouper. + sequence = "".join( + [{0: "x", 1: "y", 2: "z"}[sequence_index // (3**k) % 3] for k in range(4)] + ) + + # Unique values to use for grouper, depends on dtype + if dtype in ("string", "string[pyarrow]"): + uniques = {"x": "x", "y": "y", "z": pd.NA} + elif dtype in ("datetime64[ns]", "period[d]"): + uniques = {"x": "2016-01-01", "y": "2017-01-01", "z": pd.NA} + else: + uniques = {"x": 1, "y": 2, "z": np.nan} + + df = pd.DataFrame( + { + "key": pd.Series([uniques[label] for label in sequence], dtype=dtype), + "a": [0, 1, 2, 3], + } + ) + gb = df.groupby("key", dropna=False, sort=False, as_index=as_index, observed=False) + if test_series: + gb = gb["a"] + result = gb.sum() + + # Manually compute the groupby sum, use the labels "x", "y", and "z" to avoid + # issues with hashing np.nan + summed = {} + for idx, label in enumerate(sequence): + summed[label] = summed.get(label, 0) + idx + if dtype == "category": + index = pd.CategoricalIndex( + [uniques[e] for e in summed], + df["key"].cat.categories, + name="key", + ) + elif isinstance(dtype, str) and dtype.startswith("Sparse"): + index = pd.Index( + pd.array([uniques[label] for label in summed], dtype=dtype), name="key" + ) + else: + index = pd.Index([uniques[label] for label in summed], dtype=dtype, name="key") + expected = pd.Series(summed.values(), index=index, name="a", dtype=None) + if not test_series: + expected = expected.to_frame() + if not as_index: + expected = expected.reset_index() + if dtype is not None and dtype.startswith("Sparse"): + expected["key"] = expected["key"].astype(dtype) + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("test_series", [True, False]) +@pytest.mark.parametrize("dtype", [object, None]) +def test_null_is_null_for_dtype( + sort, dtype, nulls_fixture, nulls_fixture2, test_series +): + # GH#48506 - groups should always result in using the null for the dtype + df = pd.DataFrame({"a": [1, 2]}) + groups = pd.Series([nulls_fixture, nulls_fixture2], dtype=dtype) + obj = df["a"] if test_series else df + gb = obj.groupby(groups, dropna=False, sort=sort) + result = gb.sum() + index = pd.Index([na_value_for_dtype(groups.dtype)]) + expected = pd.DataFrame({"a": [3]}, index=index) + if test_series: + tm.assert_series_equal(result, expected["a"]) + else: + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("index_kind", ["range", "single", "multi"]) +def test_categorical_reducers( + request, reduction_func, observed, sort, as_index, index_kind +): + # GH#36327 + if ( + reduction_func in ("idxmin", "idxmax") + and not observed + and index_kind != "multi" + ): + msg = "GH#10694 - idxmin/max broken for categorical with observed=False" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + + # Ensure there is at least one null value by appending to the end + values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None) + df = pd.DataFrame( + {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)} + ) + + # Strategy: Compare to dropna=True by filling null values with a new code + df_filled = df.copy() + df_filled["x"] = pd.Categorical(values, categories=[1, 2, 3, 4]).fillna(4) + + if index_kind == "range": + keys = ["x"] + elif index_kind == "single": + keys = ["x"] + df = df.set_index("x") + df_filled = df_filled.set_index("x") + else: + keys = ["x", "x2"] + df["x2"] = df["x"] + df = df.set_index(["x", "x2"]) + df_filled["x2"] = df_filled["x"] + df_filled = df_filled.set_index(["x", "x2"]) + args = get_groupby_method_args(reduction_func, df) + args_filled = get_groupby_method_args(reduction_func, df_filled) + if reduction_func == "corrwith" and index_kind == "range": + # Don't include the grouping columns so we can call reset_index + args = (args[0].drop(columns=keys),) + args_filled = (args_filled[0].drop(columns=keys),) + + gb_filled = df_filled.groupby(keys, observed=observed, sort=sort, as_index=True) + expected = getattr(gb_filled, reduction_func)(*args_filled).reset_index() + expected["x"] = expected["x"].replace(4, None) + if index_kind == "multi": + expected["x2"] = expected["x2"].replace(4, None) + if as_index: + if index_kind == "multi": + expected = expected.set_index(["x", "x2"]) + else: + expected = expected.set_index("x") + elif index_kind != "range" and reduction_func != "size": + # size, unlike other methods, has the desired behavior in GH#49519 + expected = expected.drop(columns="x") + if index_kind == "multi": + expected = expected.drop(columns="x2") + if reduction_func in ("idxmax", "idxmin") and index_kind != "range": + # expected was computed with a RangeIndex; need to translate to index values + values = expected["y"].values.tolist() + if index_kind == "single": + values = [np.nan if e == 4 else e for e in values] + else: + values = [(np.nan, np.nan) if e == (4, 4) else e for e in values] + expected["y"] = values + if reduction_func == "size": + # size, unlike other methods, has the desired behavior in GH#49519 + expected = expected.rename(columns={0: "size"}) + if as_index: + expected = expected["size"].rename(None) + + gb_keepna = df.groupby( + keys, dropna=False, observed=observed, sort=sort, as_index=as_index + ) + if as_index or index_kind == "range" or reduction_func == "size": + warn = None + else: + warn = FutureWarning + msg = "A grouping .* was excluded from the result" + with tm.assert_produces_warning(warn, match=msg): + result = getattr(gb_keepna, reduction_func)(*args) + + # size will return a Series, others are DataFrame + tm.assert_equal(result, expected) + + +def test_categorical_transformers( + request, transformation_func, observed, sort, as_index +): + # GH#36327 + if transformation_func == "fillna": + msg = "GH#49651 fillna may incorrectly reorders results when dropna=False" + request.node.add_marker(pytest.mark.xfail(reason=msg, strict=False)) + + values = np.append(np.random.default_rng(2).choice([1, 2, None], size=19), None) + df = pd.DataFrame( + {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(20)} + ) + args = get_groupby_method_args(transformation_func, df) + + # Compute result for null group + null_group_values = df[df["x"].isnull()]["y"] + if transformation_func == "cumcount": + null_group_data = list(range(len(null_group_values))) + elif transformation_func == "ngroup": + if sort: + if observed: + na_group = df["x"].nunique(dropna=False) - 1 + else: + # TODO: Should this be 3? + na_group = df["x"].nunique(dropna=False) - 1 + else: + na_group = df.iloc[: null_group_values.index[0]]["x"].nunique() + null_group_data = len(null_group_values) * [na_group] + else: + null_group_data = getattr(null_group_values, transformation_func)(*args) + null_group_result = pd.DataFrame({"y": null_group_data}) + + gb_keepna = df.groupby( + "x", dropna=False, observed=observed, sort=sort, as_index=as_index + ) + gb_dropna = df.groupby("x", dropna=True, observed=observed, sort=sort) + + msg = "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated" + if transformation_func == "pct_change": + with tm.assert_produces_warning(FutureWarning, match=msg): + result = getattr(gb_keepna, "pct_change")(*args) + else: + result = getattr(gb_keepna, transformation_func)(*args) + expected = getattr(gb_dropna, transformation_func)(*args) + + for iloc, value in zip( + df[df["x"].isnull()].index.tolist(), null_group_result.values.ravel() + ): + if expected.ndim == 1: + expected.iloc[iloc] = value + else: + expected.iloc[iloc, 0] = value + if transformation_func == "ngroup": + expected[df["x"].notnull() & expected.ge(na_group)] += 1 + if transformation_func not in ("rank", "diff", "pct_change", "shift"): + expected = expected.astype("int64") + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("method", ["head", "tail"]) +def test_categorical_head_tail(method, observed, sort, as_index): + # GH#36327 + values = np.random.default_rng(2).choice([1, 2, None], 30) + df = pd.DataFrame( + {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))} + ) + gb = df.groupby("x", dropna=False, observed=observed, sort=sort, as_index=as_index) + result = getattr(gb, method)() + + if method == "tail": + values = values[::-1] + # Take the top 5 values from each group + mask = ( + ((values == 1) & ((values == 1).cumsum() <= 5)) + | ((values == 2) & ((values == 2).cumsum() <= 5)) + # flake8 doesn't like the vectorized check for None, thinks we should use `is` + | ((values == None) & ((values == None).cumsum() <= 5)) # noqa: E711 + ) + if method == "tail": + mask = mask[::-1] + expected = df[mask] + + tm.assert_frame_equal(result, expected) + + +def test_categorical_agg(): + # GH#36327 + values = np.random.default_rng(2).choice([1, 2, None], 30) + df = pd.DataFrame( + {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))} + ) + gb = df.groupby("x", dropna=False, observed=False) + result = gb.agg(lambda x: x.sum()) + expected = gb.sum() + tm.assert_frame_equal(result, expected) + + +def test_categorical_transform(): + # GH#36327 + values = np.random.default_rng(2).choice([1, 2, None], 30) + df = pd.DataFrame( + {"x": pd.Categorical(values, categories=[1, 2, 3]), "y": range(len(values))} + ) + gb = df.groupby("x", dropna=False, observed=False) + result = gb.transform(lambda x: x.sum()) + expected = gb.transform("sum") + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_shift_diff.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_shift_diff.py new file mode 100644 index 00000000..bb4b9aa8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_shift_diff.py @@ -0,0 +1,254 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + NaT, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def test_group_shift_with_null_key(): + # This test is designed to replicate the segfault in issue #13813. + n_rows = 1200 + + # Generate a moderately large dataframe with occasional missing + # values in column `B`, and then group by [`A`, `B`]. This should + # force `-1` in `labels` array of `g.grouper.group_info` exactly + # at those places, where the group-by key is partially missing. + df = DataFrame( + [(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)], + dtype=float, + columns=["A", "B", "Z"], + index=None, + ) + g = df.groupby(["A", "B"]) + + expected = DataFrame( + [(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)], + dtype=float, + columns=["Z"], + index=None, + ) + result = g.shift(-1) + + tm.assert_frame_equal(result, expected) + + +def test_group_shift_with_fill_value(): + # GH #24128 + n_rows = 24 + df = DataFrame( + [(i % 12, i % 3, i) for i in range(n_rows)], + dtype=float, + columns=["A", "B", "Z"], + index=None, + ) + g = df.groupby(["A", "B"]) + + expected = DataFrame( + [(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)], + dtype=float, + columns=["Z"], + index=None, + ) + result = g.shift(-1, fill_value=0) + + tm.assert_frame_equal(result, expected) + + +def test_group_shift_lose_timezone(): + # GH 30134 + now_dt = Timestamp.utcnow().as_unit("ns") + df = DataFrame({"a": [1, 1], "date": now_dt}) + result = df.groupby("a").shift(0).iloc[0] + expected = Series({"date": now_dt}, name=result.name) + tm.assert_series_equal(result, expected) + + +def test_group_diff_real_series(any_real_numpy_dtype): + df = DataFrame( + {"a": [1, 2, 3, 3, 2], "b": [1, 2, 3, 4, 5]}, + dtype=any_real_numpy_dtype, + ) + result = df.groupby("a")["b"].diff() + exp_dtype = "float" + if any_real_numpy_dtype in ["int8", "int16", "float32"]: + exp_dtype = "float32" + expected = Series([np.nan, np.nan, np.nan, 1.0, 3.0], dtype=exp_dtype, name="b") + tm.assert_series_equal(result, expected) + + +def test_group_diff_real_frame(any_real_numpy_dtype): + df = DataFrame( + { + "a": [1, 2, 3, 3, 2], + "b": [1, 2, 3, 4, 5], + "c": [1, 2, 3, 4, 6], + }, + dtype=any_real_numpy_dtype, + ) + result = df.groupby("a").diff() + exp_dtype = "float" + if any_real_numpy_dtype in ["int8", "int16", "float32"]: + exp_dtype = "float32" + expected = DataFrame( + { + "b": [np.nan, np.nan, np.nan, 1.0, 3.0], + "c": [np.nan, np.nan, np.nan, 1.0, 4.0], + }, + dtype=exp_dtype, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + [ + Timestamp("2013-01-01"), + Timestamp("2013-01-02"), + Timestamp("2013-01-03"), + ], + [Timedelta("5 days"), Timedelta("6 days"), Timedelta("7 days")], + ], +) +def test_group_diff_datetimelike(data): + df = DataFrame({"a": [1, 2, 2], "b": data}) + result = df.groupby("a")["b"].diff() + expected = Series([NaT, NaT, Timedelta("1 days")], name="b") + tm.assert_series_equal(result, expected) + + +def test_group_diff_bool(): + df = DataFrame({"a": [1, 2, 3, 3, 2], "b": [True, True, False, False, True]}) + result = df.groupby("a")["b"].diff() + expected = Series([np.nan, np.nan, np.nan, False, False], name="b") + tm.assert_series_equal(result, expected) + + +def test_group_diff_object_raises(object_dtype): + df = DataFrame( + {"a": ["foo", "bar", "bar"], "b": ["baz", "foo", "foo"]}, dtype=object_dtype + ) + with pytest.raises(TypeError, match=r"unsupported operand type\(s\) for -"): + df.groupby("a")["b"].diff() + + +def test_empty_shift_with_fill(): + # GH 41264, single-index check + df = DataFrame(columns=["a", "b", "c"]) + shifted = df.groupby(["a"]).shift(1) + shifted_with_fill = df.groupby(["a"]).shift(1, fill_value=0) + tm.assert_frame_equal(shifted, shifted_with_fill) + tm.assert_index_equal(shifted.index, shifted_with_fill.index) + + +def test_multindex_empty_shift_with_fill(): + # GH 41264, multi-index check + df = DataFrame(columns=["a", "b", "c"]) + shifted = df.groupby(["a", "b"]).shift(1) + shifted_with_fill = df.groupby(["a", "b"]).shift(1, fill_value=0) + tm.assert_frame_equal(shifted, shifted_with_fill) + tm.assert_index_equal(shifted.index, shifted_with_fill.index) + + +def test_shift_periods_freq(): + # GH 54093 + data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]} + df = DataFrame(data, index=date_range(start="20100101", periods=6)) + result = df.groupby(df.index).shift(periods=-2, freq="D") + expected = DataFrame(data, index=date_range(start="2009-12-30", periods=6)) + tm.assert_frame_equal(result, expected) + + +def test_shift_deprecate_freq_and_fill_value(): + # GH 53832 + data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]} + df = DataFrame(data, index=date_range(start="20100101", periods=6)) + msg = ( + "Passing a 'freq' together with a 'fill_value' silently ignores the fill_value" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby(df.index).shift(periods=-2, freq="D", fill_value="1") + + +def test_shift_disallow_suffix_if_periods_is_int(): + # GH#44424 + data = {"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]} + df = DataFrame(data) + msg = "Cannot specify `suffix` if `periods` is an int." + with pytest.raises(ValueError, match=msg): + df.groupby("b").shift(1, suffix="fails") + + +def test_group_shift_with_multiple_periods(): + # GH#44424 + df = DataFrame({"a": [1, 2, 3, 3, 2], "b": [True, True, False, False, True]}) + + shifted_df = df.groupby("b")[["a"]].shift([0, 1]) + expected_df = DataFrame( + {"a_0": [1, 2, 3, 3, 2], "a_1": [np.nan, 1.0, np.nan, 3.0, 2.0]} + ) + tm.assert_frame_equal(shifted_df, expected_df) + + # series + shifted_series = df.groupby("b")["a"].shift([0, 1]) + tm.assert_frame_equal(shifted_series, expected_df) + + +def test_group_shift_with_multiple_periods_and_freq(): + # GH#44424 + df = DataFrame( + {"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]}, + index=date_range("1/1/2000", periods=5, freq="H"), + ) + shifted_df = df.groupby("b")[["a"]].shift( + [0, 1], + freq="H", + ) + expected_df = DataFrame( + { + "a_0": [1.0, 2.0, 3.0, 4.0, 5.0, np.nan], + "a_1": [ + np.nan, + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + ], + }, + index=date_range("1/1/2000", periods=6, freq="H"), + ) + tm.assert_frame_equal(shifted_df, expected_df) + + +def test_group_shift_with_multiple_periods_and_fill_value(): + # GH#44424 + df = DataFrame( + {"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]}, + ) + shifted_df = df.groupby("b")[["a"]].shift([0, 1], fill_value=-1) + expected_df = DataFrame( + {"a_0": [1, 2, 3, 4, 5], "a_1": [-1, 1, -1, 3, 2]}, + ) + tm.assert_frame_equal(shifted_df, expected_df) + + +def test_group_shift_with_multiple_periods_and_both_fill_and_freq_deprecated(): + # GH#44424 + df = DataFrame( + {"a": [1, 2, 3, 4, 5], "b": [True, True, False, False, True]}, + index=date_range("1/1/2000", periods=5, freq="H"), + ) + msg = ( + "Passing a 'freq' together with a 'fill_value' silently ignores the " + "fill_value" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby("b")[["a"]].shift([1, 2], fill_value=1, freq="H") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_subclass.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_subclass.py new file mode 100644 index 00000000..678211ea --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_groupby_subclass.py @@ -0,0 +1,109 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm +from pandas.tests.groupby import get_groupby_method_args + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) + + +@pytest.mark.parametrize( + "obj", + [ + tm.SubclassedDataFrame({"A": np.arange(0, 10)}), + tm.SubclassedSeries(np.arange(0, 10), name="A"), + ], +) +def test_groupby_preserves_subclass(obj, groupby_func): + # GH28330 -- preserve subclass through groupby operations + + if isinstance(obj, Series) and groupby_func in {"corrwith"}: + pytest.skip(f"Not applicable for Series and {groupby_func}") + + grouped = obj.groupby(np.arange(0, 10)) + + # Groups should preserve subclass type + assert isinstance(grouped.get_group(0), type(obj)) + + args = get_groupby_method_args(groupby_func, obj) + + result1 = getattr(grouped, groupby_func)(*args) + result2 = grouped.agg(groupby_func, *args) + + # Reduction or transformation kernels should preserve type + slices = {"ngroup", "cumcount", "size"} + if isinstance(obj, DataFrame) and groupby_func in slices: + assert isinstance(result1, tm.SubclassedSeries) + else: + assert isinstance(result1, type(obj)) + + # Confirm .agg() groupby operations return same results + if isinstance(result1, DataFrame): + tm.assert_frame_equal(result1, result2) + else: + tm.assert_series_equal(result1, result2) + + +def test_groupby_preserves_metadata(): + # GH-37343 + custom_df = tm.SubclassedDataFrame({"a": [1, 2, 3], "b": [1, 1, 2], "c": [7, 8, 9]}) + assert "testattr" in custom_df._metadata + custom_df.testattr = "hello" + for _, group_df in custom_df.groupby("c"): + assert group_df.testattr == "hello" + + # GH-45314 + def func(group): + assert isinstance(group, tm.SubclassedDataFrame) + assert hasattr(group, "testattr") + return group.testattr + + result = custom_df.groupby("c").apply(func) + expected = tm.SubclassedSeries(["hello"] * 3, index=Index([7, 8, 9], name="c")) + tm.assert_series_equal(result, expected) + + def func2(group): + assert isinstance(group, tm.SubclassedSeries) + assert hasattr(group, "testattr") + return group.testattr + + custom_series = tm.SubclassedSeries([1, 2, 3]) + custom_series.testattr = "hello" + result = custom_series.groupby(custom_df["c"]).apply(func2) + tm.assert_series_equal(result, expected) + result = custom_series.groupby(custom_df["c"]).agg(func2) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame]) +def test_groupby_resample_preserves_subclass(obj): + # GH28330 -- preserve subclass through groupby.resample() + + df = obj( + { + "Buyer": "Carl Carl Carl Carl Joe Carl".split(), + "Quantity": [18, 3, 5, 1, 9, 3], + "Date": [ + datetime(2013, 9, 1, 13, 0), + datetime(2013, 9, 1, 13, 5), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 3, 10, 0), + datetime(2013, 12, 2, 12, 0), + datetime(2013, 9, 2, 14, 0), + ], + } + ) + df = df.set_index("Date") + + # Confirm groupby.resample() preserves dataframe type + result = df.groupby("Buyer").resample("5D").sum() + assert isinstance(result, obj) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_grouping.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_grouping.py new file mode 100644 index 00000000..e0793ada --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_grouping.py @@ -0,0 +1,1169 @@ +""" +test where we are determining what we are grouping, or getting groups +""" +from datetime import ( + date, + timedelta, +) + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + Grouper, + Index, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.core.groupby.grouper import Grouping + +# selection +# -------------------------------- + + +class TestSelection: + def test_select_bad_cols(self): + df = DataFrame([[1, 2]], columns=["A", "B"]) + g = df.groupby("A") + with pytest.raises(KeyError, match="\"Columns not found: 'C'\""): + g[["C"]] + + with pytest.raises(KeyError, match="^[^A]+$"): + # A should not be referenced as a bad column... + # will have to rethink regex if you change message! + g[["A", "C"]] + + def test_groupby_duplicated_column_errormsg(self): + # GH7511 + df = DataFrame( + columns=["A", "B", "A", "C"], data=[range(4), range(2, 6), range(0, 8, 2)] + ) + + msg = "Grouper for 'A' not 1-dimensional" + with pytest.raises(ValueError, match=msg): + df.groupby("A") + with pytest.raises(ValueError, match=msg): + df.groupby(["A", "B"]) + + grouped = df.groupby("B") + c = grouped.count() + assert c.columns.nlevels == 1 + assert c.columns.size == 3 + + def test_column_select_via_attr(self, df): + result = df.groupby("A").C.sum() + expected = df.groupby("A")["C"].sum() + tm.assert_series_equal(result, expected) + + df["mean"] = 1.5 + result = df.groupby("A").mean(numeric_only=True) + expected = df.groupby("A")[["C", "D", "mean"]].agg("mean") + tm.assert_frame_equal(result, expected) + + def test_getitem_list_of_columns(self): + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + "E": np.random.default_rng(2).standard_normal(8), + } + ) + + result = df.groupby("A")[["C", "D"]].mean() + result2 = df.groupby("A")[df.columns[2:4]].mean() + + expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean() + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + def test_getitem_numeric_column_names(self): + # GH #13731 + df = DataFrame( + { + 0: list("abcd") * 2, + 2: np.random.default_rng(2).standard_normal(8), + 4: np.random.default_rng(2).standard_normal(8), + 6: np.random.default_rng(2).standard_normal(8), + } + ) + result = df.groupby(0)[df.columns[1:3]].mean() + result2 = df.groupby(0)[[2, 4]].mean() + + expected = df.loc[:, [0, 2, 4]].groupby(0).mean() + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + # per GH 23566 enforced deprecation raises a ValueError + with pytest.raises(ValueError, match="Cannot subset columns with a tuple"): + df.groupby(0)[2, 4].mean() + + def test_getitem_single_tuple_of_columns_raises(self, df): + # per GH 23566 enforced deprecation raises a ValueError + with pytest.raises(ValueError, match="Cannot subset columns with a tuple"): + df.groupby("A")["C", "D"].mean() + + def test_getitem_single_column(self): + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + "E": np.random.default_rng(2).standard_normal(8), + } + ) + + result = df.groupby("A")["C"].mean() + + as_frame = df.loc[:, ["A", "C"]].groupby("A").mean() + as_series = as_frame.iloc[:, 0] + expected = as_series + + tm.assert_series_equal(result, expected) + + def test_indices_grouped_by_tuple_with_lambda(self): + # GH 36158 + df = DataFrame( + { + "Tuples": ( + (x, y) + for x in [0, 1] + for y in np.random.default_rng(2).integers(3, 5, 5) + ) + } + ) + + gb = df.groupby("Tuples") + gb_lambda = df.groupby(lambda x: df.iloc[x, 0]) + + expected = gb.indices + result = gb_lambda.indices + + tm.assert_dict_equal(result, expected) + + +# grouping +# -------------------------------- + + +class TestGrouping: + @pytest.mark.parametrize( + "index", + [ + tm.makeFloatIndex, + tm.makeStringIndex, + tm.makeIntIndex, + tm.makeDateIndex, + tm.makePeriodIndex, + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_grouper_index_types(self, index): + # related GH5375 + # groupby misbehaving when using a Floatlike index + df = DataFrame(np.arange(10).reshape(5, 2), columns=list("AB")) + + df.index = index(len(df)) + df.groupby(list("abcde"), group_keys=False).apply(lambda x: x) + + df.index = list(reversed(df.index.tolist())) + df.groupby(list("abcde"), group_keys=False).apply(lambda x: x) + + def test_grouper_multilevel_freq(self): + # GH 7885 + # with level and freq specified in a Grouper + d0 = date.today() - timedelta(days=14) + dates = date_range(d0, date.today()) + date_index = MultiIndex.from_product([dates, dates], names=["foo", "bar"]) + df = DataFrame(np.random.default_rng(2).integers(0, 100, 225), index=date_index) + + # Check string level + expected = ( + df.reset_index() + .groupby([Grouper(key="foo", freq="W"), Grouper(key="bar", freq="W")]) + .sum() + ) + # reset index changes columns dtype to object + expected.columns = Index([0], dtype="int64") + + result = df.groupby( + [Grouper(level="foo", freq="W"), Grouper(level="bar", freq="W")] + ).sum() + tm.assert_frame_equal(result, expected) + + # Check integer level + result = df.groupby( + [Grouper(level=0, freq="W"), Grouper(level=1, freq="W")] + ).sum() + tm.assert_frame_equal(result, expected) + + def test_grouper_creation_bug(self): + # GH 8795 + df = DataFrame({"A": [0, 0, 1, 1, 2, 2], "B": [1, 2, 3, 4, 5, 6]}) + g = df.groupby("A") + expected = g.sum() + + g = df.groupby(Grouper(key="A")) + result = g.sum() + tm.assert_frame_equal(result, expected) + + msg = "Grouper axis keyword is deprecated and will be removed" + with tm.assert_produces_warning(FutureWarning, match=msg): + gpr = Grouper(key="A", axis=0) + g = df.groupby(gpr) + result = g.sum() + tm.assert_frame_equal(result, expected) + + result = g.apply(lambda x: x.sum()) + expected["A"] = [0, 2, 4] + expected = expected.loc[:, ["A", "B"]] + tm.assert_frame_equal(result, expected) + + # GH14334 + # Grouper(key=...) may be passed in a list + df = DataFrame( + {"A": [0, 0, 0, 1, 1, 1], "B": [1, 1, 2, 2, 3, 3], "C": [1, 2, 3, 4, 5, 6]} + ) + # Group by single column + expected = df.groupby("A").sum() + g = df.groupby([Grouper(key="A")]) + result = g.sum() + tm.assert_frame_equal(result, expected) + + # Group by two columns + # using a combination of strings and Grouper objects + expected = df.groupby(["A", "B"]).sum() + + # Group with two Grouper objects + g = df.groupby([Grouper(key="A"), Grouper(key="B")]) + result = g.sum() + tm.assert_frame_equal(result, expected) + + # Group with a string and a Grouper object + g = df.groupby(["A", Grouper(key="B")]) + result = g.sum() + tm.assert_frame_equal(result, expected) + + # Group with a Grouper object and a string + g = df.groupby([Grouper(key="A"), "B"]) + result = g.sum() + tm.assert_frame_equal(result, expected) + + # GH8866 + s = Series( + np.arange(8, dtype="int64"), + index=MultiIndex.from_product( + [list("ab"), range(2), date_range("20130101", periods=2)], + names=["one", "two", "three"], + ), + ) + result = s.groupby(Grouper(level="three", freq="M")).sum() + expected = Series( + [28], + index=pd.DatetimeIndex([Timestamp("2013-01-31")], freq="M", name="three"), + ) + tm.assert_series_equal(result, expected) + + # just specifying a level breaks + result = s.groupby(Grouper(level="one")).sum() + expected = s.groupby(level="one").sum() + tm.assert_series_equal(result, expected) + + def test_grouper_column_and_index(self): + # GH 14327 + + # Grouping a multi-index frame by a column and an index level should + # be equivalent to resetting the index and grouping by two columns + idx = MultiIndex.from_tuples( + [("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 2), ("b", 3)] + ) + idx.names = ["outer", "inner"] + df_multi = DataFrame( + {"A": np.arange(6), "B": ["one", "one", "two", "two", "one", "one"]}, + index=idx, + ) + result = df_multi.groupby(["B", Grouper(level="inner")]).mean(numeric_only=True) + expected = ( + df_multi.reset_index().groupby(["B", "inner"]).mean(numeric_only=True) + ) + tm.assert_frame_equal(result, expected) + + # Test the reverse grouping order + result = df_multi.groupby([Grouper(level="inner"), "B"]).mean(numeric_only=True) + expected = ( + df_multi.reset_index().groupby(["inner", "B"]).mean(numeric_only=True) + ) + tm.assert_frame_equal(result, expected) + + # Grouping a single-index frame by a column and the index should + # be equivalent to resetting the index and grouping by two columns + df_single = df_multi.reset_index("outer") + result = df_single.groupby(["B", Grouper(level="inner")]).mean( + numeric_only=True + ) + expected = ( + df_single.reset_index().groupby(["B", "inner"]).mean(numeric_only=True) + ) + tm.assert_frame_equal(result, expected) + + # Test the reverse grouping order + result = df_single.groupby([Grouper(level="inner"), "B"]).mean( + numeric_only=True + ) + expected = ( + df_single.reset_index().groupby(["inner", "B"]).mean(numeric_only=True) + ) + tm.assert_frame_equal(result, expected) + + def test_groupby_levels_and_columns(self): + # GH9344, GH9049 + idx_names = ["x", "y"] + idx = MultiIndex.from_tuples([(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names) + df = DataFrame(np.arange(12).reshape(-1, 3), index=idx) + + by_levels = df.groupby(level=idx_names).mean() + # reset_index changes columns dtype to object + by_columns = df.reset_index().groupby(idx_names).mean() + + # without casting, by_columns.columns is object-dtype + by_columns.columns = by_columns.columns.astype(np.int64) + tm.assert_frame_equal(by_levels, by_columns) + + def test_groupby_categorical_index_and_columns(self, observed): + # GH18432, adapted for GH25871 + columns = ["A", "B", "A", "B"] + categories = ["B", "A"] + data = np.array( + [[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]], int + ) + cat_columns = CategoricalIndex(columns, categories=categories, ordered=True) + df = DataFrame(data=data, columns=cat_columns) + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = df.groupby(axis=1, level=0, observed=observed).sum() + expected_data = np.array([[4, 2], [4, 2], [4, 2], [4, 2], [4, 2]], int) + expected_columns = CategoricalIndex( + categories, categories=categories, ordered=True + ) + expected = DataFrame(data=expected_data, columns=expected_columns) + tm.assert_frame_equal(result, expected) + + # test transposed version + df = DataFrame(data.T, index=cat_columns) + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(axis=0, level=0, observed=observed).sum() + expected = DataFrame(data=expected_data.T, index=expected_columns) + tm.assert_frame_equal(result, expected) + + def test_grouper_getting_correct_binner(self): + # GH 10063 + # using a non-time-based grouper and a time-based grouper + # and specifying levels + df = DataFrame( + {"A": 1}, + index=MultiIndex.from_product( + [list("ab"), date_range("20130101", periods=80)], names=["one", "two"] + ), + ) + result = df.groupby( + [Grouper(level="one"), Grouper(level="two", freq="M")] + ).sum() + expected = DataFrame( + {"A": [31, 28, 21, 31, 28, 21]}, + index=MultiIndex.from_product( + [list("ab"), date_range("20130101", freq="M", periods=3)], + names=["one", "two"], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_grouper_iter(self, df): + assert sorted(df.groupby("A").grouper) == ["bar", "foo"] + + def test_empty_groups(self, df): + # see gh-1048 + with pytest.raises(ValueError, match="No group keys passed!"): + df.groupby([]) + + def test_groupby_grouper(self, df): + grouped = df.groupby("A") + + result = df.groupby(grouped.grouper).mean(numeric_only=True) + expected = grouped.mean(numeric_only=True) + tm.assert_frame_equal(result, expected) + + def test_groupby_dict_mapping(self): + # GH #679 + s = Series({"T1": 5}) + result = s.groupby({"T1": "T2"}).agg("sum") + expected = s.groupby(["T2"]).agg("sum") + tm.assert_series_equal(result, expected) + + s = Series([1.0, 2.0, 3.0, 4.0], index=list("abcd")) + mapping = {"a": 0, "b": 0, "c": 1, "d": 1} + + result = s.groupby(mapping).mean() + result2 = s.groupby(mapping).agg("mean") + exp_key = np.array([0, 0, 1, 1], dtype=np.int64) + expected = s.groupby(exp_key).mean() + expected2 = s.groupby(exp_key).mean() + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result, result2) + tm.assert_series_equal(result, expected2) + + @pytest.mark.parametrize( + "index", + [ + [0, 1, 2, 3], + ["a", "b", "c", "d"], + [Timestamp(2021, 7, 28 + i) for i in range(4)], + ], + ) + def test_groupby_series_named_with_tuple(self, frame_or_series, index): + # GH 42731 + obj = frame_or_series([1, 2, 3, 4], index=index) + groups = Series([1, 0, 1, 0], index=index, name=("a", "a")) + result = obj.groupby(groups).last() + expected = frame_or_series([4, 3]) + expected.index.name = ("a", "a") + tm.assert_equal(result, expected) + + def test_groupby_grouper_f_sanity_checked(self): + dates = date_range("01-Jan-2013", periods=12, freq="MS") + ts = Series(np.random.default_rng(2).standard_normal(12), index=dates) + + # GH51979 + # simple check that the passed function doesn't operates on the whole index + msg = "'Timestamp' object is not subscriptable" + with pytest.raises(TypeError, match=msg): + ts.groupby(lambda key: key[0:6]) + + result = ts.groupby(lambda x: x).sum() + expected = ts.groupby(ts.index).sum() + expected.index.freq = None + tm.assert_series_equal(result, expected) + + def test_groupby_with_datetime_key(self): + # GH 51158 + df = DataFrame( + { + "id": ["a", "b"] * 3, + "b": date_range("2000-01-01", "2000-01-03", freq="9H"), + } + ) + grouper = Grouper(key="b", freq="D") + gb = df.groupby([grouper, "id"]) + + # test number of groups + expected = { + (Timestamp("2000-01-01"), "a"): [0, 2], + (Timestamp("2000-01-01"), "b"): [1], + (Timestamp("2000-01-02"), "a"): [4], + (Timestamp("2000-01-02"), "b"): [3, 5], + } + tm.assert_dict_equal(gb.groups, expected) + + # test number of group keys + assert len(gb.groups.keys()) == 4 + + def test_grouping_error_on_multidim_input(self, df): + msg = "Grouper for '' not 1-dimensional" + with pytest.raises(ValueError, match=msg): + Grouping(df.index, df[["A", "A"]]) + + def test_multiindex_passthru(self): + # GH 7997 + # regression from 0.14.1 + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + df.columns = MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)]) + + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + gb = df.groupby(axis=1, level=[0, 1]) + result = gb.first() + tm.assert_frame_equal(result, df) + + def test_multiindex_negative_level(self, mframe): + # GH 13901 + result = mframe.groupby(level=-1).sum() + expected = mframe.groupby(level="second").sum() + tm.assert_frame_equal(result, expected) + + result = mframe.groupby(level=-2).sum() + expected = mframe.groupby(level="first").sum() + tm.assert_frame_equal(result, expected) + + result = mframe.groupby(level=[-2, -1]).sum() + expected = mframe.sort_index() + tm.assert_frame_equal(result, expected) + + result = mframe.groupby(level=[-1, "first"]).sum() + expected = mframe.groupby(level=["second", "first"]).sum() + tm.assert_frame_equal(result, expected) + + def test_multifunc_select_col_integer_cols(self, df): + df.columns = np.arange(len(df.columns)) + + # it works! + msg = "Passing a dictionary to SeriesGroupBy.agg is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.groupby(1, as_index=False)[2].agg({"Q": np.mean}) + + def test_multiindex_columns_empty_level(self): + lst = [["count", "values"], ["to filter", ""]] + midx = MultiIndex.from_tuples(lst) + + df = DataFrame([[1, "A"]], columns=midx) + + grouped = df.groupby("to filter").groups + assert grouped["A"] == [0] + + grouped = df.groupby([("to filter", "")]).groups + assert grouped["A"] == [0] + + df = DataFrame([[1, "A"], [2, "B"]], columns=midx) + + expected = df.groupby("to filter").groups + result = df.groupby([("to filter", "")]).groups + assert result == expected + + df = DataFrame([[1, "A"], [2, "A"]], columns=midx) + + expected = df.groupby("to filter").groups + result = df.groupby([("to filter", "")]).groups + tm.assert_dict_equal(result, expected) + + def test_groupby_multiindex_tuple(self): + # GH 17979 + df = DataFrame( + [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]], + columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]), + ) + expected = df.groupby([("b", 1)]).groups + result = df.groupby(("b", 1)).groups + tm.assert_dict_equal(expected, result) + + df2 = DataFrame( + df.values, + columns=MultiIndex.from_arrays( + [["a", "b", "b", "c"], ["d", "d", "e", "e"]] + ), + ) + expected = df2.groupby([("b", "d")]).groups + result = df.groupby(("b", 1)).groups + tm.assert_dict_equal(expected, result) + + df3 = DataFrame(df.values, columns=[("a", "d"), ("b", "d"), ("b", "e"), "c"]) + expected = df3.groupby([("b", "d")]).groups + result = df.groupby(("b", 1)).groups + tm.assert_dict_equal(expected, result) + + def test_groupby_multiindex_partial_indexing_equivalence(self): + # GH 17977 + df = DataFrame( + [[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]], + columns=MultiIndex.from_arrays([["a", "b", "b", "c"], [1, 1, 2, 2]]), + ) + + expected_mean = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].mean() + result_mean = df.groupby([("a", 1)])["b"].mean() + tm.assert_frame_equal(expected_mean, result_mean) + + expected_sum = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].sum() + result_sum = df.groupby([("a", 1)])["b"].sum() + tm.assert_frame_equal(expected_sum, result_sum) + + expected_count = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].count() + result_count = df.groupby([("a", 1)])["b"].count() + tm.assert_frame_equal(expected_count, result_count) + + expected_min = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].min() + result_min = df.groupby([("a", 1)])["b"].min() + tm.assert_frame_equal(expected_min, result_min) + + expected_max = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].max() + result_max = df.groupby([("a", 1)])["b"].max() + tm.assert_frame_equal(expected_max, result_max) + + expected_groups = df.groupby([("a", 1)])[[("b", 1), ("b", 2)]].groups + result_groups = df.groupby([("a", 1)])["b"].groups + tm.assert_dict_equal(expected_groups, result_groups) + + @pytest.mark.parametrize("sort", [True, False]) + def test_groupby_level(self, sort, mframe, df): + # GH 17537 + frame = mframe + deleveled = frame.reset_index() + + result0 = frame.groupby(level=0, sort=sort).sum() + result1 = frame.groupby(level=1, sort=sort).sum() + + expected0 = frame.groupby(deleveled["first"].values, sort=sort).sum() + expected1 = frame.groupby(deleveled["second"].values, sort=sort).sum() + + expected0.index.name = "first" + expected1.index.name = "second" + + assert result0.index.name == "first" + assert result1.index.name == "second" + + tm.assert_frame_equal(result0, expected0) + tm.assert_frame_equal(result1, expected1) + assert result0.index.name == frame.index.names[0] + assert result1.index.name == frame.index.names[1] + + # groupby level name + result0 = frame.groupby(level="first", sort=sort).sum() + result1 = frame.groupby(level="second", sort=sort).sum() + tm.assert_frame_equal(result0, expected0) + tm.assert_frame_equal(result1, expected1) + + # axis=1 + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum() + result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum() + tm.assert_frame_equal(result0, expected0.T) + tm.assert_frame_equal(result1, expected1.T) + + # raise exception for non-MultiIndex + msg = "level > 0 or level < -1 only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + df.groupby(level=1) + + def test_groupby_level_index_names(self, axis): + # GH4014 this used to raise ValueError since 'exp'>1 (in py2) + df = DataFrame({"exp": ["A"] * 3 + ["B"] * 3, "var1": range(6)}).set_index( + "exp" + ) + if axis in (1, "columns"): + df = df.T + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + else: + depr_msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + df.groupby(level="exp", axis=axis) + msg = f"level name foo is not the name of the {df._get_axis_name(axis)}" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + df.groupby(level="foo", axis=axis) + + @pytest.mark.parametrize("sort", [True, False]) + def test_groupby_level_with_nas(self, sort): + # GH 17537 + index = MultiIndex( + levels=[[1, 0], [0, 1, 2, 3]], + codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]], + ) + + # factorizing doesn't confuse things + s = Series(np.arange(8.0), index=index) + result = s.groupby(level=0, sort=sort).sum() + expected = Series([6.0, 22.0], index=[0, 1]) + tm.assert_series_equal(result, expected) + + index = MultiIndex( + levels=[[1, 0], [0, 1, 2, 3]], + codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]], + ) + + # factorizing doesn't confuse things + s = Series(np.arange(8.0), index=index) + result = s.groupby(level=0, sort=sort).sum() + expected = Series([6.0, 18.0], index=[0.0, 1.0]) + tm.assert_series_equal(result, expected) + + def test_groupby_args(self, mframe): + # PR8618 and issue 8015 + frame = mframe + + msg = "You have to supply one of 'by' and 'level'" + with pytest.raises(TypeError, match=msg): + frame.groupby() + + msg = "You have to supply one of 'by' and 'level'" + with pytest.raises(TypeError, match=msg): + frame.groupby(by=None, level=None) + + @pytest.mark.parametrize( + "sort,labels", + [ + [True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]], + [False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]], + ], + ) + def test_level_preserve_order(self, sort, labels, mframe): + # GH 17537 + grouped = mframe.groupby(level=0, sort=sort) + exp_labels = np.array(labels, np.intp) + tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels) + + def test_grouping_labels(self, mframe): + grouped = mframe.groupby(mframe.index.get_level_values(0)) + exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp) + tm.assert_almost_equal(grouped.grouper.codes[0], exp_labels) + + def test_list_grouper_with_nat(self): + # GH 14715 + df = DataFrame({"date": date_range("1/1/2011", periods=365, freq="D")}) + df.iloc[-1] = pd.NaT + grouper = Grouper(key="date", freq="AS") + + # Grouper in a list grouping + result = df.groupby([grouper]) + expected = {Timestamp("2011-01-01"): Index(list(range(364)))} + tm.assert_dict_equal(result.groups, expected) + + # Test case without a list + result = df.groupby(grouper) + expected = {Timestamp("2011-01-01"): 365} + tm.assert_dict_equal(result.groups, expected) + + @pytest.mark.parametrize( + "func,expected", + [ + ( + "transform", + Series(name=2, dtype=np.float64), + ), + ( + "agg", + Series( + name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1) + ), + ), + ( + "apply", + Series( + name=2, dtype=np.float64, index=Index([], dtype=np.float64, name=1) + ), + ), + ], + ) + def test_evaluate_with_empty_groups(self, func, expected): + # 26208 + # test transform'ing empty groups + # (not testing other agg fns, because they return + # different index objects. + df = DataFrame({1: [], 2: []}) + g = df.groupby(1, group_keys=False) + result = getattr(g[2], func)(lambda x: x) + tm.assert_series_equal(result, expected) + + def test_groupby_empty(self): + # https://github.com/pandas-dev/pandas/issues/27190 + s = Series([], name="name", dtype="float64") + gr = s.groupby([]) + + result = gr.mean() + expected = s.set_axis(Index([], dtype=np.intp)) + tm.assert_series_equal(result, expected) + + # check group properties + assert len(gr.grouper.groupings) == 1 + tm.assert_numpy_array_equal( + gr.grouper.group_info[0], np.array([], dtype=np.dtype(np.intp)) + ) + + tm.assert_numpy_array_equal( + gr.grouper.group_info[1], np.array([], dtype=np.dtype(np.intp)) + ) + + assert gr.grouper.group_info[2] == 0 + + # check name + assert s.groupby(s).grouper.names == ["name"] + + def test_groupby_level_index_value_all_na(self): + # issue 20519 + df = DataFrame( + [["x", np.nan, 10], [None, np.nan, 20]], columns=["A", "B", "C"] + ).set_index(["A", "B"]) + result = df.groupby(level=["A", "B"]).sum() + expected = DataFrame( + data=[], + index=MultiIndex( + levels=[Index(["x"], dtype="object"), Index([], dtype="float64")], + codes=[[], []], + names=["A", "B"], + ), + columns=["C"], + dtype="int64", + ) + tm.assert_frame_equal(result, expected) + + def test_groupby_multiindex_level_empty(self): + # https://github.com/pandas-dev/pandas/issues/31670 + df = DataFrame( + [[123, "a", 1.0], [123, "b", 2.0]], columns=["id", "category", "value"] + ) + df = df.set_index(["id", "category"]) + empty = df[df.value < 0] + result = empty.groupby("id").sum() + expected = DataFrame( + dtype="float64", + columns=["value"], + index=Index([], dtype=np.int64, name="id"), + ) + tm.assert_frame_equal(result, expected) + + +# get_group +# -------------------------------- + + +class TestGetGroup: + def test_get_group(self): + # GH 5267 + # be datelike friendly + df = DataFrame( + { + "DATE": pd.to_datetime( + [ + "10-Oct-2013", + "10-Oct-2013", + "10-Oct-2013", + "11-Oct-2013", + "11-Oct-2013", + "11-Oct-2013", + ] + ), + "label": ["foo", "foo", "bar", "foo", "foo", "bar"], + "VAL": [1, 2, 3, 4, 5, 6], + } + ) + + g = df.groupby("DATE") + key = next(iter(g.groups)) + result1 = g.get_group(key) + result2 = g.get_group(Timestamp(key).to_pydatetime()) + result3 = g.get_group(str(Timestamp(key))) + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + + g = df.groupby(["DATE", "label"]) + + key = next(iter(g.groups)) + result1 = g.get_group(key) + result2 = g.get_group((Timestamp(key[0]).to_pydatetime(), key[1])) + result3 = g.get_group((str(Timestamp(key[0])), key[1])) + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + + # must pass a same-length tuple with multiple keys + msg = "must supply a tuple to get_group with multiple grouping keys" + with pytest.raises(ValueError, match=msg): + g.get_group("foo") + with pytest.raises(ValueError, match=msg): + g.get_group("foo") + msg = "must supply a same-length tuple to get_group with multiple grouping keys" + with pytest.raises(ValueError, match=msg): + g.get_group(("foo", "bar", "baz")) + + def test_get_group_empty_bins(self, observed): + d = DataFrame([3, 1, 7, 6]) + bins = [0, 5, 10, 15] + g = d.groupby(pd.cut(d[0], bins), observed=observed) + + # TODO: should prob allow a str of Interval work as well + # IOW '(0, 5]' + result = g.get_group(pd.Interval(0, 5)) + expected = DataFrame([3, 1], index=[0, 1]) + tm.assert_frame_equal(result, expected) + + msg = r"Interval\(10, 15, closed='right'\)" + with pytest.raises(KeyError, match=msg): + g.get_group(pd.Interval(10, 15)) + + def test_get_group_grouped_by_tuple(self): + # GH 8121 + df = DataFrame([[(1,), (1, 2), (1,), (1, 2)]], index=["ids"]).T + gr = df.groupby("ids") + expected = DataFrame({"ids": [(1,), (1,)]}, index=[0, 2]) + result = gr.get_group((1,)) + tm.assert_frame_equal(result, expected) + + dt = pd.to_datetime(["2010-01-01", "2010-01-02", "2010-01-01", "2010-01-02"]) + df = DataFrame({"ids": [(x,) for x in dt]}) + gr = df.groupby("ids") + result = gr.get_group(("2010-01-01",)) + expected = DataFrame({"ids": [(dt[0],), (dt[0],)]}, index=[0, 2]) + tm.assert_frame_equal(result, expected) + + def test_get_group_grouped_by_tuple_with_lambda(self): + # GH 36158 + df = DataFrame( + { + "Tuples": ( + (x, y) + for x in [0, 1] + for y in np.random.default_rng(2).integers(3, 5, 5) + ) + } + ) + + gb = df.groupby("Tuples") + gb_lambda = df.groupby(lambda x: df.iloc[x, 0]) + + expected = gb.get_group(next(iter(gb.groups.keys()))) + result = gb_lambda.get_group(next(iter(gb_lambda.groups.keys()))) + + tm.assert_frame_equal(result, expected) + + def test_groupby_with_empty(self): + index = pd.DatetimeIndex(()) + data = () + series = Series(data, index, dtype=object) + grouper = Grouper(freq="D") + grouped = series.groupby(grouper) + assert next(iter(grouped), None) is None + + def test_groupby_with_single_column(self): + df = DataFrame({"a": list("abssbab")}) + tm.assert_frame_equal(df.groupby("a").get_group("a"), df.iloc[[0, 5]]) + # GH 13530 + exp = DataFrame(index=Index(["a", "b", "s"], name="a"), columns=[]) + tm.assert_frame_equal(df.groupby("a").count(), exp) + tm.assert_frame_equal(df.groupby("a").sum(), exp) + + exp = df.iloc[[3, 4, 5]] + tm.assert_frame_equal(df.groupby("a").nth(1), exp) + + def test_gb_key_len_equal_axis_len(self): + # GH16843 + # test ensures that index and column keys are recognized correctly + # when number of keys equals axis length of groupby + df = DataFrame( + [["foo", "bar", "B", 1], ["foo", "bar", "B", 2], ["foo", "baz", "C", 3]], + columns=["first", "second", "third", "one"], + ) + df = df.set_index(["first", "second"]) + df = df.groupby(["first", "second", "third"]).size() + assert df.loc[("foo", "bar", "B")] == 2 + assert df.loc[("foo", "baz", "C")] == 1 + + +# groups & iteration +# -------------------------------- + + +class TestIteration: + def test_groups(self, df): + grouped = df.groupby(["A"]) + groups = grouped.groups + assert groups is grouped.groups # caching works + + for k, v in grouped.groups.items(): + assert (df.loc[v]["A"] == k).all() + + grouped = df.groupby(["A", "B"]) + groups = grouped.groups + assert groups is grouped.groups # caching works + + for k, v in grouped.groups.items(): + assert (df.loc[v]["A"] == k[0]).all() + assert (df.loc[v]["B"] == k[1]).all() + + def test_grouping_is_iterable(self, tsframe): + # this code path isn't used anywhere else + # not sure it's useful + grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year]) + + # test it works + for g in grouped.grouper.groupings[0]: + pass + + def test_multi_iter(self): + s = Series(np.arange(6)) + k1 = np.array(["a", "a", "a", "b", "b", "b"]) + k2 = np.array(["1", "2", "1", "2", "1", "2"]) + + grouped = s.groupby([k1, k2]) + + iterated = list(grouped) + expected = [ + ("a", "1", s[[0, 2]]), + ("a", "2", s[[1]]), + ("b", "1", s[[4]]), + ("b", "2", s[[3, 5]]), + ] + for i, ((one, two), three) in enumerate(iterated): + e1, e2, e3 = expected[i] + assert e1 == one + assert e2 == two + tm.assert_series_equal(three, e3) + + def test_multi_iter_frame(self, three_group): + k1 = np.array(["b", "b", "b", "a", "a", "a"]) + k2 = np.array(["1", "2", "1", "2", "1", "2"]) + df = DataFrame( + { + "v1": np.random.default_rng(2).standard_normal(6), + "v2": np.random.default_rng(2).standard_normal(6), + "k1": k1, + "k2": k2, + }, + index=["one", "two", "three", "four", "five", "six"], + ) + + grouped = df.groupby(["k1", "k2"]) + + # things get sorted! + iterated = list(grouped) + idx = df.index + expected = [ + ("a", "1", df.loc[idx[[4]]]), + ("a", "2", df.loc[idx[[3, 5]]]), + ("b", "1", df.loc[idx[[0, 2]]]), + ("b", "2", df.loc[idx[[1]]]), + ] + for i, ((one, two), three) in enumerate(iterated): + e1, e2, e3 = expected[i] + assert e1 == one + assert e2 == two + tm.assert_frame_equal(three, e3) + + # don't iterate through groups with no data + df["k1"] = np.array(["b", "b", "b", "a", "a", "a"]) + df["k2"] = np.array(["1", "1", "1", "2", "2", "2"]) + grouped = df.groupby(["k1", "k2"]) + # calling `dict` on a DataFrameGroupBy leads to a TypeError, + # we need to use a dictionary comprehension here + # pylint: disable-next=unnecessary-comprehension + groups = {key: gp for key, gp in grouped} # noqa: C416 + assert len(groups) == 2 + + # axis = 1 + three_levels = three_group.groupby(["A", "B", "C"]).mean() + depr_msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + grouped = three_levels.T.groupby(axis=1, level=(1, 2)) + for key, group in grouped: + pass + + def test_dictify(self, df): + dict(iter(df.groupby("A"))) + dict(iter(df.groupby(["A", "B"]))) + dict(iter(df["C"].groupby(df["A"]))) + dict(iter(df["C"].groupby([df["A"], df["B"]]))) + dict(iter(df.groupby("A")["C"])) + dict(iter(df.groupby(["A", "B"])["C"])) + + def test_groupby_with_small_elem(self): + # GH 8542 + # length=2 + df = DataFrame( + {"event": ["start", "start"], "change": [1234, 5678]}, + index=pd.DatetimeIndex(["2014-09-10", "2013-10-10"]), + ) + grouped = df.groupby([Grouper(freq="M"), "event"]) + assert len(grouped.groups) == 2 + assert grouped.ngroups == 2 + assert (Timestamp("2014-09-30"), "start") in grouped.groups + assert (Timestamp("2013-10-31"), "start") in grouped.groups + + res = grouped.get_group((Timestamp("2014-09-30"), "start")) + tm.assert_frame_equal(res, df.iloc[[0], :]) + res = grouped.get_group((Timestamp("2013-10-31"), "start")) + tm.assert_frame_equal(res, df.iloc[[1], :]) + + df = DataFrame( + {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]}, + index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-09-15"]), + ) + grouped = df.groupby([Grouper(freq="M"), "event"]) + assert len(grouped.groups) == 2 + assert grouped.ngroups == 2 + assert (Timestamp("2014-09-30"), "start") in grouped.groups + assert (Timestamp("2013-10-31"), "start") in grouped.groups + + res = grouped.get_group((Timestamp("2014-09-30"), "start")) + tm.assert_frame_equal(res, df.iloc[[0, 2], :]) + res = grouped.get_group((Timestamp("2013-10-31"), "start")) + tm.assert_frame_equal(res, df.iloc[[1], :]) + + # length=3 + df = DataFrame( + {"event": ["start", "start", "start"], "change": [1234, 5678, 9123]}, + index=pd.DatetimeIndex(["2014-09-10", "2013-10-10", "2014-08-05"]), + ) + grouped = df.groupby([Grouper(freq="M"), "event"]) + assert len(grouped.groups) == 3 + assert grouped.ngroups == 3 + assert (Timestamp("2014-09-30"), "start") in grouped.groups + assert (Timestamp("2013-10-31"), "start") in grouped.groups + assert (Timestamp("2014-08-31"), "start") in grouped.groups + + res = grouped.get_group((Timestamp("2014-09-30"), "start")) + tm.assert_frame_equal(res, df.iloc[[0], :]) + res = grouped.get_group((Timestamp("2013-10-31"), "start")) + tm.assert_frame_equal(res, df.iloc[[1], :]) + res = grouped.get_group((Timestamp("2014-08-31"), "start")) + tm.assert_frame_equal(res, df.iloc[[2], :]) + + def test_grouping_string_repr(self): + # GH 13394 + mi = MultiIndex.from_arrays([list("AAB"), list("aba")]) + df = DataFrame([[1, 2, 3]], columns=mi) + gr = df.groupby(df[("A", "a")]) + + result = gr.grouper.groupings[0].__repr__() + expected = "Grouping(('A', 'a'))" + assert result == expected + + +def test_grouping_by_key_is_in_axis(): + # GH#50413 - Groupers specified by key are in-axis + df = DataFrame({"a": [1, 1, 2], "b": [1, 1, 2], "c": [3, 4, 5]}).set_index("a") + gb = df.groupby([Grouper(level="a"), Grouper(key="b")], as_index=False) + assert not gb.grouper.groupings[0].in_axis + assert gb.grouper.groupings[1].in_axis + + # Currently only in-axis groupings are including in the result when as_index=False; + # This is likely to change in the future. + msg = "A grouping .* was excluded from the result" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.sum() + expected = DataFrame({"b": [1, 2], "c": [7, 5]}) + tm.assert_frame_equal(result, expected) + + +def test_grouper_groups(): + # GH#51182 check Grouper.groups does not raise AttributeError + df = DataFrame({"a": [1, 2, 3], "b": 1}) + grper = Grouper(key="a") + gb = df.groupby(grper) + + msg = "Use GroupBy.groups instead" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = grper.groups + assert res is gb.groups + + msg = "Use GroupBy.grouper instead" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = grper.grouper + assert res is gb.grouper + + msg = "Grouper.obj is deprecated and will be removed" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = grper.obj + assert res is gb.obj + + msg = "Use Resampler.ax instead" + with tm.assert_produces_warning(FutureWarning, match=msg): + grper.ax + + msg = "Grouper.indexer is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grper.indexer diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_index_as_string.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_index_as_string.py new file mode 100644 index 00000000..4aaf3de9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_index_as_string.py @@ -0,0 +1,85 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.fixture(params=[["inner"], ["inner", "outer"]]) +def frame(request): + levels = request.param + df = pd.DataFrame( + { + "outer": ["a", "a", "a", "b", "b", "b"], + "inner": [1, 2, 3, 1, 2, 3], + "A": np.arange(6), + "B": ["one", "one", "two", "two", "one", "one"], + } + ) + if levels: + df = df.set_index(levels) + + return df + + +@pytest.fixture() +def series(): + df = pd.DataFrame( + { + "outer": ["a", "a", "a", "b", "b", "b"], + "inner": [1, 2, 3, 1, 2, 3], + "A": np.arange(6), + "B": ["one", "one", "two", "two", "one", "one"], + } + ) + s = df.set_index(["outer", "inner", "B"])["A"] + + return s + + +@pytest.mark.parametrize( + "key_strs,groupers", + [ + ("inner", pd.Grouper(level="inner")), # Index name + (["inner"], [pd.Grouper(level="inner")]), # List of index name + (["B", "inner"], ["B", pd.Grouper(level="inner")]), # Column and index + (["inner", "B"], [pd.Grouper(level="inner"), "B"]), # Index and column + ], +) +def test_grouper_index_level_as_string(frame, key_strs, groupers): + if "B" not in key_strs or "outer" in frame.columns: + result = frame.groupby(key_strs).mean(numeric_only=True) + expected = frame.groupby(groupers).mean(numeric_only=True) + else: + result = frame.groupby(key_strs).mean() + expected = frame.groupby(groupers).mean() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "levels", + [ + "inner", + "outer", + "B", + ["inner"], + ["outer"], + ["B"], + ["inner", "outer"], + ["outer", "inner"], + ["inner", "outer", "B"], + ["B", "outer", "inner"], + ], +) +def test_grouper_index_level_as_string_series(series, levels): + # Compute expected result + if isinstance(levels, list): + groupers = [pd.Grouper(level=lv) for lv in levels] + else: + groupers = pd.Grouper(level=levels) + + expected = series.groupby(groupers).mean() + + # Compute and check result + result = series.groupby(levels).mean() + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_indexing.py new file mode 100644 index 00000000..664c52ba --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_indexing.py @@ -0,0 +1,333 @@ +# Test GroupBy._positional_selector positional grouped indexing GH#42864 + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "arg, expected_rows", + [ + [0, [0, 1, 4]], + [2, [5]], + [5, []], + [-1, [3, 4, 7]], + [-2, [1, 6]], + [-6, []], + ], +) +def test_int(slice_test_df, slice_test_grouped, arg, expected_rows): + # Test single integer + result = slice_test_grouped._positional_selector[arg] + expected = slice_test_df.iloc[expected_rows] + + tm.assert_frame_equal(result, expected) + + +def test_slice(slice_test_df, slice_test_grouped): + # Test single slice + result = slice_test_grouped._positional_selector[0:3:2] + expected = slice_test_df.iloc[[0, 1, 4, 5]] + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "arg, expected_rows", + [ + [[0, 2], [0, 1, 4, 5]], + [[0, 2, -1], [0, 1, 3, 4, 5, 7]], + [range(0, 3, 2), [0, 1, 4, 5]], + [{0, 2}, [0, 1, 4, 5]], + ], + ids=[ + "list", + "negative", + "range", + "set", + ], +) +def test_list(slice_test_df, slice_test_grouped, arg, expected_rows): + # Test lists of integers and integer valued iterables + result = slice_test_grouped._positional_selector[arg] + expected = slice_test_df.iloc[expected_rows] + + tm.assert_frame_equal(result, expected) + + +def test_ints(slice_test_df, slice_test_grouped): + # Test tuple of ints + result = slice_test_grouped._positional_selector[0, 2, -1] + expected = slice_test_df.iloc[[0, 1, 3, 4, 5, 7]] + + tm.assert_frame_equal(result, expected) + + +def test_slices(slice_test_df, slice_test_grouped): + # Test tuple of slices + result = slice_test_grouped._positional_selector[:2, -2:] + expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]] + + tm.assert_frame_equal(result, expected) + + +def test_mix(slice_test_df, slice_test_grouped): + # Test mixed tuple of ints and slices + result = slice_test_grouped._positional_selector[0, 1, -2:] + expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]] + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "arg, expected_rows", + [ + [0, [0, 1, 4]], + [[0, 2, -1], [0, 1, 3, 4, 5, 7]], + [(slice(None, 2), slice(-2, None)), [0, 1, 2, 3, 4, 6, 7]], + ], +) +def test_as_index(slice_test_df, arg, expected_rows): + # Test the default as_index behaviour + result = slice_test_df.groupby("Group", sort=False)._positional_selector[arg] + expected = slice_test_df.iloc[expected_rows] + + tm.assert_frame_equal(result, expected) + + +def test_doc_examples(): + # Test the examples in the documentation + df = pd.DataFrame( + [["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], columns=["A", "B"] + ) + + grouped = df.groupby("A", as_index=False) + + result = grouped._positional_selector[1:2] + expected = pd.DataFrame([["a", 2], ["b", 5]], columns=["A", "B"], index=[1, 4]) + + tm.assert_frame_equal(result, expected) + + result = grouped._positional_selector[1, -1] + expected = pd.DataFrame( + [["a", 2], ["a", 3], ["b", 5]], columns=["A", "B"], index=[1, 2, 4] + ) + + tm.assert_frame_equal(result, expected) + + +@pytest.fixture() +def multiindex_data(): + rng = np.random.default_rng(2) + ndates = 100 + nitems = 20 + dates = pd.date_range("20130101", periods=ndates, freq="D") + items = [f"item {i}" for i in range(nitems)] + + data = {} + for date in dates: + nitems_for_date = nitems - rng.integers(0, 12) + levels = [ + (item, rng.integers(0, 10000) / 100, rng.integers(0, 10000) / 100) + for item in items[:nitems_for_date] + ] + levels.sort(key=lambda x: x[1]) + data[date] = levels + + return data + + +def _make_df_from_data(data): + rows = {} + for date in data: + for level in data[date]: + rows[(date, level[0])] = {"A": level[1], "B": level[2]} + + df = pd.DataFrame.from_dict(rows, orient="index") + df.index.names = ("Date", "Item") + return df + + +def test_multiindex(multiindex_data): + # Test the multiindex mentioned as the use-case in the documentation + df = _make_df_from_data(multiindex_data) + result = df.groupby("Date", as_index=False).nth(slice(3, -3)) + + sliced = {date: multiindex_data[date][3:-3] for date in multiindex_data} + expected = _make_df_from_data(sliced) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("arg", [1, 5, 30, 1000, -1, -5, -30, -1000]) +@pytest.mark.parametrize("method", ["head", "tail"]) +@pytest.mark.parametrize("simulated", [True, False]) +def test_against_head_and_tail(arg, method, simulated): + # Test gives the same results as grouped head and tail + n_groups = 100 + n_rows_per_group = 30 + + data = { + "group": [ + f"group {g}" for j in range(n_rows_per_group) for g in range(n_groups) + ], + "value": [ + f"group {g} row {j}" + for j in range(n_rows_per_group) + for g in range(n_groups) + ], + } + df = pd.DataFrame(data) + grouped = df.groupby("group", as_index=False) + size = arg if arg >= 0 else n_rows_per_group + arg + + if method == "head": + result = grouped._positional_selector[:arg] + + if simulated: + indices = [ + j * n_groups + i + for j in range(size) + for i in range(n_groups) + if j * n_groups + i < n_groups * n_rows_per_group + ] + expected = df.iloc[indices] + + else: + expected = grouped.head(arg) + + else: + result = grouped._positional_selector[-arg:] + + if simulated: + indices = [ + (n_rows_per_group + j - size) * n_groups + i + for j in range(size) + for i in range(n_groups) + if (n_rows_per_group + j - size) * n_groups + i >= 0 + ] + expected = df.iloc[indices] + + else: + expected = grouped.tail(arg) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("start", [None, 0, 1, 10, -1, -10]) +@pytest.mark.parametrize("stop", [None, 0, 1, 10, -1, -10]) +@pytest.mark.parametrize("step", [None, 1, 5]) +def test_against_df_iloc(start, stop, step): + # Test that a single group gives the same results as DataFrame.iloc + n_rows = 30 + + data = { + "group": ["group 0"] * n_rows, + "value": list(range(n_rows)), + } + df = pd.DataFrame(data) + grouped = df.groupby("group", as_index=False) + + result = grouped._positional_selector[start:stop:step] + expected = df.iloc[start:stop:step] + + tm.assert_frame_equal(result, expected) + + +def test_series(): + # Test grouped Series + ser = pd.Series([1, 2, 3, 4, 5], index=["a", "a", "a", "b", "b"]) + grouped = ser.groupby(level=0) + result = grouped._positional_selector[1:2] + expected = pd.Series([2, 5], index=["a", "b"]) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("step", [1, 2, 3, 4, 5]) +def test_step(step): + # Test slice with various step values + data = [["x", f"x{i}"] for i in range(5)] + data += [["y", f"y{i}"] for i in range(4)] + data += [["z", f"z{i}"] for i in range(3)] + df = pd.DataFrame(data, columns=["A", "B"]) + + grouped = df.groupby("A", as_index=False) + + result = grouped._positional_selector[::step] + + data = [["x", f"x{i}"] for i in range(0, 5, step)] + data += [["y", f"y{i}"] for i in range(0, 4, step)] + data += [["z", f"z{i}"] for i in range(0, 3, step)] + + index = [0 + i for i in range(0, 5, step)] + index += [5 + i for i in range(0, 4, step)] + index += [9 + i for i in range(0, 3, step)] + + expected = pd.DataFrame(data, columns=["A", "B"], index=index) + + tm.assert_frame_equal(result, expected) + + +@pytest.fixture() +def column_group_df(): + return pd.DataFrame( + [[0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 0, 1, 0, 2]], + columns=["A", "B", "C", "D", "E", "F", "G"], + ) + + +def test_column_axis(column_group_df): + msg = "DataFrame.groupby with axis=1" + with tm.assert_produces_warning(FutureWarning, match=msg): + g = column_group_df.groupby(column_group_df.iloc[1], axis=1) + result = g._positional_selector[1:-1] + expected = column_group_df.iloc[:, [1, 3]] + + tm.assert_frame_equal(result, expected) + + +def test_columns_on_iter(): + # GitHub issue #44821 + df = pd.DataFrame({k: range(10) for k in "ABC"}) + + # Group-by and select columns + cols = ["A", "B"] + for _, dg in df.groupby(df.A < 4)[cols]: + tm.assert_index_equal(dg.columns, pd.Index(cols)) + assert "C" not in dg.columns + + +@pytest.mark.parametrize("func", [list, pd.Index, pd.Series, np.array]) +def test_groupby_duplicated_columns(func): + # GH#44924 + df = pd.DataFrame( + { + "A": [1, 2], + "B": [3, 3], + "C": ["G", "G"], + } + ) + result = df.groupby("C")[func(["A", "B", "A"])].mean() + expected = pd.DataFrame( + [[1.5, 3.0, 1.5]], columns=["A", "B", "A"], index=pd.Index(["G"], name="C") + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_get_nonexisting_groups(): + # GH#32492 + df = pd.DataFrame( + data={ + "A": ["a1", "a2", None], + "B": ["b1", "b2", "b1"], + "val": [1, 2, 3], + } + ) + grps = df.groupby(by=["A", "B"]) + + msg = "('a2', 'b1')" + with pytest.raises(KeyError, match=msg): + grps.get_group(("a2", "b1")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_libgroupby.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_libgroupby.py new file mode 100644 index 00000000..35b8fa93 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_libgroupby.py @@ -0,0 +1,331 @@ +import numpy as np +import pytest + +from pandas._libs import groupby as libgroupby +from pandas._libs.groupby import ( + group_cumprod, + group_cumsum, + group_mean, + group_sum, + group_var, +) + +from pandas.core.dtypes.common import ensure_platform_int + +from pandas import isna +import pandas._testing as tm + + +class GroupVarTestMixin: + def test_group_var_generic_1d(self): + prng = np.random.default_rng(2) + + out = (np.nan * np.ones((5, 1))).astype(self.dtype) + counts = np.zeros(5, dtype="int64") + values = 10 * prng.random((15, 1)).astype(self.dtype) + labels = np.tile(np.arange(5), (3,)).astype("intp") + + expected_out = ( + np.squeeze(values).reshape((5, 3), order="F").std(axis=1, ddof=1) ** 2 + )[:, np.newaxis] + expected_counts = counts + 3 + + self.algo(out, counts, values, labels) + assert np.allclose(out, expected_out, self.rtol) + tm.assert_numpy_array_equal(counts, expected_counts) + + def test_group_var_generic_1d_flat_labels(self): + prng = np.random.default_rng(2) + + out = (np.nan * np.ones((1, 1))).astype(self.dtype) + counts = np.zeros(1, dtype="int64") + values = 10 * prng.random((5, 1)).astype(self.dtype) + labels = np.zeros(5, dtype="intp") + + expected_out = np.array([[values.std(ddof=1) ** 2]]) + expected_counts = counts + 5 + + self.algo(out, counts, values, labels) + + assert np.allclose(out, expected_out, self.rtol) + tm.assert_numpy_array_equal(counts, expected_counts) + + def test_group_var_generic_2d_all_finite(self): + prng = np.random.default_rng(2) + + out = (np.nan * np.ones((5, 2))).astype(self.dtype) + counts = np.zeros(5, dtype="int64") + values = 10 * prng.random((10, 2)).astype(self.dtype) + labels = np.tile(np.arange(5), (2,)).astype("intp") + + expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2 + expected_counts = counts + 2 + + self.algo(out, counts, values, labels) + assert np.allclose(out, expected_out, self.rtol) + tm.assert_numpy_array_equal(counts, expected_counts) + + def test_group_var_generic_2d_some_nan(self): + prng = np.random.default_rng(2) + + out = (np.nan * np.ones((5, 2))).astype(self.dtype) + counts = np.zeros(5, dtype="int64") + values = 10 * prng.random((10, 2)).astype(self.dtype) + values[:, 1] = np.nan + labels = np.tile(np.arange(5), (2,)).astype("intp") + + expected_out = np.vstack( + [ + values[:, 0].reshape(5, 2, order="F").std(ddof=1, axis=1) ** 2, + np.nan * np.ones(5), + ] + ).T.astype(self.dtype) + expected_counts = counts + 2 + + self.algo(out, counts, values, labels) + tm.assert_almost_equal(out, expected_out, rtol=0.5e-06) + tm.assert_numpy_array_equal(counts, expected_counts) + + def test_group_var_constant(self): + # Regression test from GH 10448. + + out = np.array([[np.nan]], dtype=self.dtype) + counts = np.array([0], dtype="int64") + values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype) + labels = np.zeros(3, dtype="intp") + + self.algo(out, counts, values, labels) + + assert counts[0] == 3 + assert out[0, 0] >= 0 + tm.assert_almost_equal(out[0, 0], 0.0) + + +class TestGroupVarFloat64(GroupVarTestMixin): + __test__ = True + + algo = staticmethod(group_var) + dtype = np.float64 + rtol = 1e-5 + + def test_group_var_large_inputs(self): + prng = np.random.default_rng(2) + + out = np.array([[np.nan]], dtype=self.dtype) + counts = np.array([0], dtype="int64") + values = (prng.random(10**6) + 10**12).astype(self.dtype) + values.shape = (10**6, 1) + labels = np.zeros(10**6, dtype="intp") + + self.algo(out, counts, values, labels) + + assert counts[0] == 10**6 + tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3) + + +class TestGroupVarFloat32(GroupVarTestMixin): + __test__ = True + + algo = staticmethod(group_var) + dtype = np.float32 + rtol = 1e-2 + + +@pytest.mark.parametrize("dtype", ["float32", "float64"]) +def test_group_ohlc(dtype): + obj = np.array(np.random.default_rng(2).standard_normal(20), dtype=dtype) + + bins = np.array([6, 12, 20]) + out = np.zeros((3, 4), dtype) + counts = np.zeros(len(out), dtype=np.int64) + labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins]))) + + func = libgroupby.group_ohlc + func(out, counts, obj[:, None], labels) + + def _ohlc(group): + if isna(group).all(): + return np.repeat(np.nan, 4) + return [group[0], group.max(), group.min(), group[-1]] + + expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])]) + + tm.assert_almost_equal(out, expected) + tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64)) + + obj[:6] = np.nan + func(out, counts, obj[:, None], labels) + expected[0] = np.nan + tm.assert_almost_equal(out, expected) + + +def _check_cython_group_transform_cumulative(pd_op, np_op, dtype): + """ + Check a group transform that executes a cumulative function. + + Parameters + ---------- + pd_op : callable + The pandas cumulative function. + np_op : callable + The analogous one in NumPy. + dtype : type + The specified dtype of the data. + """ + is_datetimelike = False + + data = np.array([[1], [2], [3], [4]], dtype=dtype) + answer = np.zeros_like(data) + + labels = np.array([0, 0, 0, 0], dtype=np.intp) + ngroups = 1 + pd_op(answer, data, labels, ngroups, is_datetimelike) + + tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False) + + +@pytest.mark.parametrize("np_dtype", ["int64", "uint64", "float32", "float64"]) +def test_cython_group_transform_cumsum(np_dtype): + # see gh-4095 + dtype = np.dtype(np_dtype).type + pd_op, np_op = group_cumsum, np.cumsum + _check_cython_group_transform_cumulative(pd_op, np_op, dtype) + + +def test_cython_group_transform_cumprod(): + # see gh-4095 + dtype = np.float64 + pd_op, np_op = group_cumprod, np.cumprod + _check_cython_group_transform_cumulative(pd_op, np_op, dtype) + + +def test_cython_group_transform_algos(): + # see gh-4095 + is_datetimelike = False + + # with nans + labels = np.array([0, 0, 0, 0, 0], dtype=np.intp) + ngroups = 1 + + data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64") + actual = np.zeros_like(data) + actual.fill(np.nan) + group_cumprod(actual, data, labels, ngroups, is_datetimelike) + expected = np.array([1, 2, 6, np.nan, 24], dtype="float64") + tm.assert_numpy_array_equal(actual[:, 0], expected) + + actual = np.zeros_like(data) + actual.fill(np.nan) + group_cumsum(actual, data, labels, ngroups, is_datetimelike) + expected = np.array([1, 3, 6, np.nan, 10], dtype="float64") + tm.assert_numpy_array_equal(actual[:, 0], expected) + + # timedelta + is_datetimelike = True + data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None] + actual = np.zeros_like(data, dtype="int64") + group_cumsum(actual, data.view("int64"), labels, ngroups, is_datetimelike) + expected = np.array( + [ + np.timedelta64(1, "ns"), + np.timedelta64(2, "ns"), + np.timedelta64(3, "ns"), + np.timedelta64(4, "ns"), + np.timedelta64(5, "ns"), + ] + ) + tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected) + + +def test_cython_group_mean_datetimelike(): + actual = np.zeros(shape=(1, 1), dtype="float64") + counts = np.array([0], dtype="int64") + data = ( + np.array( + [np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")], + dtype="m8[ns]", + )[:, None] + .view("int64") + .astype("float64") + ) + labels = np.zeros(len(data), dtype=np.intp) + + group_mean(actual, counts, data, labels, is_datetimelike=True) + + tm.assert_numpy_array_equal(actual[:, 0], np.array([3], dtype="float64")) + + +def test_cython_group_mean_wrong_min_count(): + actual = np.zeros(shape=(1, 1), dtype="float64") + counts = np.zeros(1, dtype="int64") + data = np.zeros(1, dtype="float64")[:, None] + labels = np.zeros(1, dtype=np.intp) + + with pytest.raises(AssertionError, match="min_count"): + group_mean(actual, counts, data, labels, is_datetimelike=True, min_count=0) + + +def test_cython_group_mean_not_datetimelike_but_has_NaT_values(): + actual = np.zeros(shape=(1, 1), dtype="float64") + counts = np.array([0], dtype="int64") + data = ( + np.array( + [np.timedelta64("NaT"), np.timedelta64("NaT")], + dtype="m8[ns]", + )[:, None] + .view("int64") + .astype("float64") + ) + labels = np.zeros(len(data), dtype=np.intp) + + group_mean(actual, counts, data, labels, is_datetimelike=False) + + tm.assert_numpy_array_equal( + actual[:, 0], np.array(np.divide(np.add(data[0], data[1]), 2), dtype="float64") + ) + + +def test_cython_group_mean_Inf_at_begining_and_end(): + # GH 50367 + actual = np.array([[np.nan, np.nan], [np.nan, np.nan]], dtype="float64") + counts = np.array([0, 0], dtype="int64") + data = np.array( + [[np.inf, 1.0], [1.0, 2.0], [2.0, 3.0], [3.0, 4.0], [4.0, 5.0], [5, np.inf]], + dtype="float64", + ) + labels = np.array([0, 1, 0, 1, 0, 1], dtype=np.intp) + + group_mean(actual, counts, data, labels, is_datetimelike=False) + + expected = np.array([[np.inf, 3], [3, np.inf]], dtype="float64") + + tm.assert_numpy_array_equal( + actual, + expected, + ) + + +@pytest.mark.parametrize( + "values, out", + [ + ([[np.inf], [np.inf], [np.inf]], [[np.inf], [np.inf]]), + ([[np.inf], [np.inf], [-np.inf]], [[np.inf], [np.nan]]), + ([[np.inf], [-np.inf], [np.inf]], [[np.inf], [np.nan]]), + ([[np.inf], [-np.inf], [-np.inf]], [[np.inf], [-np.inf]]), + ], +) +def test_cython_group_sum_Inf_at_begining_and_end(values, out): + # GH #53606 + actual = np.array([[np.nan], [np.nan]], dtype="float64") + counts = np.array([0, 0], dtype="int64") + data = np.array(values, dtype="float64") + labels = np.array([0, 1, 1], dtype=np.intp) + + group_sum(actual, counts, data, labels, None, is_datetimelike=False) + + expected = np.array(out, dtype="float64") + + tm.assert_numpy_array_equal( + actual, + expected, + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_min_max.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_min_max.py new file mode 100644 index 00000000..30c7e1df --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_min_max.py @@ -0,0 +1,272 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +def test_max_min_non_numeric(): + # #2700 + aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]}) + + result = aa.groupby("nn").max() + assert "ss" in result + + result = aa.groupby("nn").max(numeric_only=False) + assert "ss" in result + + result = aa.groupby("nn").min() + assert "ss" in result + + result = aa.groupby("nn").min(numeric_only=False) + assert "ss" in result + + +def test_max_min_object_multiple_columns(using_array_manager): + # GH#41111 case where the aggregation is valid for some columns but not + # others; we split object blocks column-wise, consistent with + # DataFrame._reduce + + df = DataFrame( + { + "A": [1, 1, 2, 2, 3], + "B": [1, "foo", 2, "bar", False], + "C": ["a", "b", "c", "d", "e"], + } + ) + df._consolidate_inplace() # should already be consolidate, but double-check + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + gb = df.groupby("A") + + result = gb[["C"]].max() + # "max" is valid for column "C" but not for "B" + ei = Index([1, 2, 3], name="A") + expected = DataFrame({"C": ["b", "d", "e"]}, index=ei) + tm.assert_frame_equal(result, expected) + + result = gb[["C"]].min() + # "min" is valid for column "C" but not for "B" + ei = Index([1, 2, 3], name="A") + expected = DataFrame({"C": ["a", "c", "e"]}, index=ei) + tm.assert_frame_equal(result, expected) + + +def test_min_date_with_nans(): + # GH26321 + dates = pd.to_datetime( + Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d" + ).dt.date + df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates}) + + result = df.groupby("b", as_index=False)["c"].min()["c"] + expected = pd.to_datetime( + Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d" + ).dt.date + tm.assert_series_equal(result, expected) + + result = df.groupby("b")["c"].min() + expected.index.name = "b" + tm.assert_series_equal(result, expected) + + +def test_max_inat(): + # GH#40767 dont interpret iNaT as NaN + ser = Series([1, iNaT]) + key = np.array([1, 1], dtype=np.int64) + gb = ser.groupby(key) + + result = gb.max(min_count=2) + expected = Series({1: 1}, dtype=np.int64) + tm.assert_series_equal(result, expected, check_exact=True) + + result = gb.min(min_count=2) + expected = Series({1: iNaT}, dtype=np.int64) + tm.assert_series_equal(result, expected, check_exact=True) + + # not enough entries -> gets masked to NaN + result = gb.min(min_count=3) + expected = Series({1: np.nan}) + tm.assert_series_equal(result, expected, check_exact=True) + + +def test_max_inat_not_all_na(): + # GH#40767 dont interpret iNaT as NaN + + # make sure we dont round iNaT+1 to iNaT + ser = Series([1, iNaT, 2, iNaT + 1]) + gb = ser.groupby([1, 2, 3, 3]) + result = gb.min(min_count=2) + + # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy + expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1}) + expected.index = expected.index.astype(int) + tm.assert_series_equal(result, expected, check_exact=True) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_groupby_aggregate_period_column(func): + # GH 31471 + groups = [1, 2] + periods = pd.period_range("2020", periods=2, freq="Y") + df = DataFrame({"a": groups, "b": periods}) + + result = getattr(df.groupby("a")["b"], func)() + idx = Index([1, 2], name="a") + expected = Series(periods, index=idx, name="b") + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_groupby_aggregate_period_frame(func): + # GH 31471 + groups = [1, 2] + periods = pd.period_range("2020", periods=2, freq="Y") + df = DataFrame({"a": groups, "b": periods}) + + result = getattr(df.groupby("a"), func)() + idx = Index([1, 2], name="a") + expected = DataFrame({"b": periods}, index=idx) + + tm.assert_frame_equal(result, expected) + + +def test_aggregate_numeric_object_dtype(): + # https://github.com/pandas-dev/pandas/issues/39329 + # simplified case: multiple object columns where one is all-NaN + # -> gets split as the all-NaN is inferred as float + df = DataFrame( + {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4}, + ).astype(object) + result = df.groupby("key").min() + expected = ( + DataFrame( + {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]}, + ) + .set_index("key") + .astype(object) + ) + tm.assert_frame_equal(result, expected) + + # same but with numbers + df = DataFrame( + {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)}, + ).astype(object) + result = df.groupby("key").min() + expected = ( + DataFrame({"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]}) + .set_index("key") + .astype(object) + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_aggregate_categorical_lost_index(func: str): + # GH: 28641 groupby drops index, when grouping over categorical column with min/max + ds = Series(["b"], dtype="category").cat.as_ordered() + df = DataFrame({"A": [1997], "B": ds}) + result = df.groupby("A").agg({"B": func}) + expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A")) + + # ordered categorical dtype should be preserved + expected["B"] = expected["B"].astype(ds.dtype) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Float64", "Float32", "boolean"]) +def test_groupby_min_max_nullable(dtype): + if dtype == "Int64": + # GH#41743 avoid precision loss + ts = 1618556707013635762 + elif dtype == "boolean": + ts = 0 + else: + ts = 4.0 + + df = DataFrame({"id": [2, 2], "ts": [ts, ts + 1]}) + df["ts"] = df["ts"].astype(dtype) + + gb = df.groupby("id") + + result = gb.min() + expected = df.iloc[:1].set_index("id") + tm.assert_frame_equal(result, expected) + + res_max = gb.max() + expected_max = df.iloc[1:].set_index("id") + tm.assert_frame_equal(res_max, expected_max) + + result2 = gb.min(min_count=3) + expected2 = DataFrame({"ts": [pd.NA]}, index=expected.index, dtype=dtype) + tm.assert_frame_equal(result2, expected2) + + res_max2 = gb.max(min_count=3) + tm.assert_frame_equal(res_max2, expected2) + + # Case with NA values + df2 = DataFrame({"id": [2, 2, 2], "ts": [ts, pd.NA, ts + 1]}) + df2["ts"] = df2["ts"].astype(dtype) + gb2 = df2.groupby("id") + + result3 = gb2.min() + tm.assert_frame_equal(result3, expected) + + res_max3 = gb2.max() + tm.assert_frame_equal(res_max3, expected_max) + + result4 = gb2.min(min_count=100) + tm.assert_frame_equal(result4, expected2) + + res_max4 = gb2.max(min_count=100) + tm.assert_frame_equal(res_max4, expected2) + + +def test_min_max_nullable_uint64_empty_group(): + # don't raise NotImplementedError from libgroupby + cat = pd.Categorical([0] * 10, categories=[0, 1]) + df = DataFrame({"A": cat, "B": pd.array(np.arange(10, dtype=np.uint64))}) + gb = df.groupby("A", observed=False) + + res = gb.min() + + idx = pd.CategoricalIndex([0, 1], dtype=cat.dtype, name="A") + expected = DataFrame({"B": pd.array([0, pd.NA], dtype="UInt64")}, index=idx) + tm.assert_frame_equal(res, expected) + + res = gb.max() + expected.iloc[0, 0] = 9 + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize("func", ["first", "last", "min", "max"]) +def test_groupby_min_max_categorical(func): + # GH: 52151 + df = DataFrame( + { + "col1": pd.Categorical(["A"], categories=list("AB"), ordered=True), + "col2": pd.Categorical([1], categories=[1, 2], ordered=True), + "value": 0.1, + } + ) + result = getattr(df.groupby("col1", observed=False), func)() + + idx = pd.CategoricalIndex(data=["A", "B"], name="col1", ordered=True) + expected = DataFrame( + { + "col2": pd.Categorical([1, None], categories=[1, 2], ordered=True), + "value": [0.1, None], + }, + index=idx, + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_missing.py new file mode 100644 index 00000000..37bf2227 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_missing.py @@ -0,0 +1,161 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + date_range, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_groupby_column_index_name_lost_fill_funcs(func): + # GH: 29764 groupby loses index sometimes + df = DataFrame( + [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]], + columns=Index(["type", "a", "b"], name="idx"), + ) + df_grouped = df.groupby(["type"])[["a", "b"]] + result = getattr(df_grouped, func)().columns + expected = Index(["a", "b"], name="idx") + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_groupby_fill_duplicate_column_names(func): + # GH: 25610 ValueError with duplicate column names + df1 = DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]}) + df2 = DataFrame({"field1": [1, np.nan, 4]}) + df_grouped = pd.concat([df1, df2], axis=1).groupby(by=["field2"]) + expected = DataFrame( + [[1, 1.0], [3, np.nan], [4, 4.0]], columns=["field1", "field1"] + ) + result = getattr(df_grouped, func)() + tm.assert_frame_equal(result, expected) + + +def test_ffill_missing_arguments(): + # GH 14955 + df = DataFrame({"a": [1, 2], "b": [1, 1]}) + with pytest.raises(ValueError, match="Must specify a fill"): + df.groupby("b").fillna() + + +@pytest.mark.parametrize( + "method, expected", [("ffill", [None, "a", "a"]), ("bfill", ["a", "a", None])] +) +def test_fillna_with_string_dtype(method, expected): + # GH 40250 + df = DataFrame({"a": pd.array([None, "a", None], dtype="string"), "b": [0, 0, 0]}) + grp = df.groupby("b") + msg = "DataFrameGroupBy.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grp.fillna(method=method) + expected = DataFrame({"a": pd.array(expected, dtype="string")}) + tm.assert_frame_equal(result, expected) + + +def test_fill_consistency(): + # GH9221 + # pass thru keyword arguments to the generated wrapper + # are set if the passed kw is None (only) + df = DataFrame( + index=pd.MultiIndex.from_product( + [["value1", "value2"], date_range("2014-01-01", "2014-01-06")] + ), + columns=Index(["1", "2"], name="id"), + ) + df["1"] = [ + np.nan, + 1, + np.nan, + np.nan, + 11, + np.nan, + np.nan, + 2, + np.nan, + np.nan, + 22, + np.nan, + ] + df["2"] = [ + np.nan, + 3, + np.nan, + np.nan, + 33, + np.nan, + np.nan, + 4, + np.nan, + np.nan, + 44, + np.nan, + ] + + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby(level=0, axis=0).fillna(method="ffill") + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("method", ["ffill", "bfill"]) +@pytest.mark.parametrize("dropna", [True, False]) +@pytest.mark.parametrize("has_nan_group", [True, False]) +def test_ffill_handles_nan_groups(dropna, method, has_nan_group): + # GH 34725 + + df_without_nan_rows = DataFrame([(1, 0.1), (2, 0.2)]) + + ridx = [-1, 0, -1, -1, 1, -1] + df = df_without_nan_rows.reindex(ridx).reset_index(drop=True) + + group_b = np.nan if has_nan_group else "b" + df["group_col"] = pd.Series(["a"] * 3 + [group_b] * 3) + + grouped = df.groupby(by="group_col", dropna=dropna) + result = getattr(grouped, method)(limit=None) + + expected_rows = { + ("ffill", True, True): [-1, 0, 0, -1, -1, -1], + ("ffill", True, False): [-1, 0, 0, -1, 1, 1], + ("ffill", False, True): [-1, 0, 0, -1, 1, 1], + ("ffill", False, False): [-1, 0, 0, -1, 1, 1], + ("bfill", True, True): [0, 0, -1, -1, -1, -1], + ("bfill", True, False): [0, 0, -1, 1, 1, -1], + ("bfill", False, True): [0, 0, -1, 1, 1, -1], + ("bfill", False, False): [0, 0, -1, 1, 1, -1], + } + + ridx = expected_rows.get((method, dropna, has_nan_group)) + expected = df_without_nan_rows.reindex(ridx).reset_index(drop=True) + # columns are a 'take' on df.columns, which are object dtype + expected.columns = expected.columns.astype(object) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("min_count, value", [(2, np.nan), (-1, 1.0)]) +@pytest.mark.parametrize("func", ["first", "last", "max", "min"]) +def test_min_count(func, min_count, value): + # GH#37821 + df = DataFrame({"a": [1] * 3, "b": [1, np.nan, np.nan], "c": [np.nan] * 3}) + result = getattr(df.groupby("a"), func)(min_count=min_count) + expected = DataFrame({"b": [value], "c": [np.nan]}, index=Index([1], name="a")) + tm.assert_frame_equal(result, expected) + + +def test_indices_with_missing(): + # GH 9304 + df = DataFrame({"a": [1, 1, np.nan], "b": [2, 3, 4], "c": [5, 6, 7]}) + g = df.groupby(["a", "b"]) + result = g.indices + expected = {(1.0, 2): np.array([0]), (1.0, 3): np.array([1])} + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nth.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nth.py new file mode 100644 index 00000000..1cf4a90e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nth.py @@ -0,0 +1,875 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, + isna, +) +import pandas._testing as tm + + +def test_first_last_nth(df): + # tests for first / last / nth + grouped = df.groupby("A") + first = grouped.first() + expected = df.loc[[1, 0], ["B", "C", "D"]] + expected.index = Index(["bar", "foo"], name="A") + expected = expected.sort_index() + tm.assert_frame_equal(first, expected) + + nth = grouped.nth(0) + expected = df.loc[[0, 1]] + tm.assert_frame_equal(nth, expected) + + last = grouped.last() + expected = df.loc[[5, 7], ["B", "C", "D"]] + expected.index = Index(["bar", "foo"], name="A") + tm.assert_frame_equal(last, expected) + + nth = grouped.nth(-1) + expected = df.iloc[[5, 7]] + tm.assert_frame_equal(nth, expected) + + nth = grouped.nth(1) + expected = df.iloc[[2, 3]] + tm.assert_frame_equal(nth, expected) + + # it works! + grouped["B"].first() + grouped["B"].last() + grouped["B"].nth(0) + + df.loc[df["A"] == "foo", "B"] = np.nan + assert isna(grouped["B"].first()["foo"]) + assert isna(grouped["B"].last()["foo"]) + assert isna(grouped["B"].nth(0).iloc[0]) + + # v0.14.0 whatsnew + df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) + g = df.groupby("A") + result = g.first() + expected = df.iloc[[1, 2]].set_index("A") + tm.assert_frame_equal(result, expected) + + expected = df.iloc[[1, 2]] + result = g.nth(0, dropna="any") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("method", ["first", "last"]) +def test_first_last_with_na_object(method, nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/32123 + groups = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby("a") + result = getattr(groups, method)() + + if method == "first": + values = [1, 3] + else: + values = [2, 3] + + values = np.array(values, dtype=result["b"].dtype) + idx = Index([1, 2], name="a") + expected = DataFrame({"b": values}, index=idx) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("index", [0, -1]) +def test_nth_with_na_object(index, nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/32123 + df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}) + groups = df.groupby("a") + result = groups.nth(index) + expected = df.iloc[[0, 2]] if index == 0 else df.iloc[[1, 3]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("method", ["first", "last"]) +def test_first_last_with_None(method): + # https://github.com/pandas-dev/pandas/issues/32800 + # None should be preserved as object dtype + df = DataFrame.from_dict({"id": ["a"], "value": [None]}) + groups = df.groupby("id", as_index=False) + result = getattr(groups, method)() + + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("method", ["first", "last"]) +@pytest.mark.parametrize( + "df, expected", + [ + ( + DataFrame({"id": "a", "value": [None, "foo", np.nan]}), + DataFrame({"value": ["foo"]}, index=Index(["a"], name="id")), + ), + ( + DataFrame({"id": "a", "value": [np.nan]}, dtype=object), + DataFrame({"value": [None]}, index=Index(["a"], name="id")), + ), + ], +) +def test_first_last_with_None_expanded(method, df, expected): + # GH 32800, 38286 + result = getattr(df.groupby("id"), method)() + tm.assert_frame_equal(result, expected) + + +def test_first_last_nth_dtypes(df_mixed_floats): + df = df_mixed_floats.copy() + df["E"] = True + df["F"] = 1 + + # tests for first / last / nth + grouped = df.groupby("A") + first = grouped.first() + expected = df.loc[[1, 0], ["B", "C", "D", "E", "F"]] + expected.index = Index(["bar", "foo"], name="A") + expected = expected.sort_index() + tm.assert_frame_equal(first, expected) + + last = grouped.last() + expected = df.loc[[5, 7], ["B", "C", "D", "E", "F"]] + expected.index = Index(["bar", "foo"], name="A") + expected = expected.sort_index() + tm.assert_frame_equal(last, expected) + + nth = grouped.nth(1) + expected = df.iloc[[2, 3]] + tm.assert_frame_equal(nth, expected) + + # GH 2763, first/last shifting dtypes + idx = list(range(10)) + idx.append(9) + s = Series(data=range(11), index=idx, name="IntCol") + assert s.dtype == "int64" + f = s.groupby(level=0).first() + assert f.dtype == "int64" + + +def test_first_last_nth_nan_dtype(): + # GH 33591 + df = DataFrame({"data": ["A"], "nans": Series([None], dtype=object)}) + grouped = df.groupby("data") + + expected = df.set_index("data").nans + tm.assert_series_equal(grouped.nans.first(), expected) + tm.assert_series_equal(grouped.nans.last(), expected) + + expected = df.nans + tm.assert_series_equal(grouped.nans.nth(-1), expected) + tm.assert_series_equal(grouped.nans.nth(0), expected) + + +def test_first_strings_timestamps(): + # GH 11244 + test = DataFrame( + { + Timestamp("2012-01-01 00:00:00"): ["a", "b"], + Timestamp("2012-01-02 00:00:00"): ["c", "d"], + "name": ["e", "e"], + "aaaa": ["f", "g"], + } + ) + result = test.groupby("name").first() + expected = DataFrame( + [["a", "c", "f"]], + columns=Index([Timestamp("2012-01-01"), Timestamp("2012-01-02"), "aaaa"]), + index=Index(["e"], name="name"), + ) + tm.assert_frame_equal(result, expected) + + +def test_nth(): + df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) + g = df.groupby("A") + + tm.assert_frame_equal(g.nth(0), df.iloc[[0, 2]]) + tm.assert_frame_equal(g.nth(1), df.iloc[[1]]) + tm.assert_frame_equal(g.nth(2), df.loc[[]]) + tm.assert_frame_equal(g.nth(-1), df.iloc[[1, 2]]) + tm.assert_frame_equal(g.nth(-2), df.iloc[[0]]) + tm.assert_frame_equal(g.nth(-3), df.loc[[]]) + tm.assert_series_equal(g.B.nth(0), df.B.iloc[[0, 2]]) + tm.assert_series_equal(g.B.nth(1), df.B.iloc[[1]]) + tm.assert_frame_equal(g[["B"]].nth(0), df[["B"]].iloc[[0, 2]]) + + tm.assert_frame_equal(g.nth(0, dropna="any"), df.iloc[[1, 2]]) + tm.assert_frame_equal(g.nth(-1, dropna="any"), df.iloc[[1, 2]]) + + tm.assert_frame_equal(g.nth(7, dropna="any"), df.iloc[:0]) + tm.assert_frame_equal(g.nth(2, dropna="any"), df.iloc[:0]) + + # out of bounds, regression from 0.13.1 + # GH 6621 + df = DataFrame( + { + "color": {0: "green", 1: "green", 2: "red", 3: "red", 4: "red"}, + "food": {0: "ham", 1: "eggs", 2: "eggs", 3: "ham", 4: "pork"}, + "two": { + 0: 1.5456590000000001, + 1: -0.070345000000000005, + 2: -2.4004539999999999, + 3: 0.46206000000000003, + 4: 0.52350799999999997, + }, + "one": { + 0: 0.56573799999999996, + 1: -0.9742360000000001, + 2: 1.033801, + 3: -0.78543499999999999, + 4: 0.70422799999999997, + }, + } + ).set_index(["color", "food"]) + + result = df.groupby(level=0, as_index=False).nth(2) + expected = df.iloc[[-1]] + tm.assert_frame_equal(result, expected) + + result = df.groupby(level=0, as_index=False).nth(3) + expected = df.loc[[]] + tm.assert_frame_equal(result, expected) + + # GH 7559 + # from the vbench + df = DataFrame(np.random.default_rng(2).integers(1, 10, (100, 2)), dtype="int64") + s = df[1] + g = df[0] + expected = s.groupby(g).first() + expected2 = s.groupby(g).apply(lambda x: x.iloc[0]) + tm.assert_series_equal(expected2, expected, check_names=False) + assert expected.name == 1 + assert expected2.name == 1 + + # validate first + v = s[g == 1].iloc[0] + assert expected.iloc[0] == v + assert expected2.iloc[0] == v + + with pytest.raises(ValueError, match="For a DataFrame"): + s.groupby(g, sort=False).nth(0, dropna=True) + + # doc example + df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) + g = df.groupby("A") + result = g.B.nth(0, dropna="all") + expected = df.B.iloc[[1, 2]] + tm.assert_series_equal(result, expected) + + # test multiple nth values + df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]], columns=["A", "B"]) + g = df.groupby("A") + + tm.assert_frame_equal(g.nth(0), df.iloc[[0, 3]]) + tm.assert_frame_equal(g.nth([0]), df.iloc[[0, 3]]) + tm.assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]]) + tm.assert_frame_equal(g.nth([0, -1]), df.iloc[[0, 2, 3, 4]]) + tm.assert_frame_equal(g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]]) + tm.assert_frame_equal(g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]]) + tm.assert_frame_equal(g.nth([2]), df.iloc[[2]]) + tm.assert_frame_equal(g.nth([3, 4]), df.loc[[]]) + + business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B") + df = DataFrame(1, index=business_dates, columns=["a", "b"]) + # get the first, fourth and last two business days for each month + key = [df.index.year, df.index.month] + result = df.groupby(key, as_index=False).nth([0, 3, -2, -1]) + expected_dates = pd.to_datetime( + [ + "2014/4/1", + "2014/4/4", + "2014/4/29", + "2014/4/30", + "2014/5/1", + "2014/5/6", + "2014/5/29", + "2014/5/30", + "2014/6/2", + "2014/6/5", + "2014/6/27", + "2014/6/30", + ] + ) + expected = DataFrame(1, columns=["a", "b"], index=expected_dates) + tm.assert_frame_equal(result, expected) + + +def test_nth_multi_grouper(three_group): + # PR 9090, related to issue 8979 + # test nth on multiple groupers + grouped = three_group.groupby(["A", "B"]) + result = grouped.nth(0) + expected = three_group.iloc[[0, 3, 4, 7]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data, expected_first, expected_last", + [ + ( + { + "id": ["A"], + "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"), + "foo": [1], + }, + { + "id": ["A"], + "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"), + "foo": [1], + }, + { + "id": ["A"], + "time": Timestamp("2012-02-01 14:00:00", tz="US/Central"), + "foo": [1], + }, + ), + ( + { + "id": ["A", "B", "A"], + "time": [ + Timestamp("2012-01-01 13:00:00", tz="America/New_York"), + Timestamp("2012-02-01 14:00:00", tz="US/Central"), + Timestamp("2012-03-01 12:00:00", tz="Europe/London"), + ], + "foo": [1, 2, 3], + }, + { + "id": ["A", "B"], + "time": [ + Timestamp("2012-01-01 13:00:00", tz="America/New_York"), + Timestamp("2012-02-01 14:00:00", tz="US/Central"), + ], + "foo": [1, 2], + }, + { + "id": ["A", "B"], + "time": [ + Timestamp("2012-03-01 12:00:00", tz="Europe/London"), + Timestamp("2012-02-01 14:00:00", tz="US/Central"), + ], + "foo": [3, 2], + }, + ), + ], +) +def test_first_last_tz(data, expected_first, expected_last): + # GH15884 + # Test that the timezone is retained when calling first + # or last on groupby with as_index=False + + df = DataFrame(data) + + result = df.groupby("id", as_index=False).first() + expected = DataFrame(expected_first) + cols = ["id", "time", "foo"] + tm.assert_frame_equal(result[cols], expected[cols]) + + result = df.groupby("id", as_index=False)["time"].first() + tm.assert_frame_equal(result, expected[["id", "time"]]) + + result = df.groupby("id", as_index=False).last() + expected = DataFrame(expected_last) + cols = ["id", "time", "foo"] + tm.assert_frame_equal(result[cols], expected[cols]) + + result = df.groupby("id", as_index=False)["time"].last() + tm.assert_frame_equal(result, expected[["id", "time"]]) + + +@pytest.mark.parametrize( + "method, ts, alpha", + [ + ["first", Timestamp("2013-01-01", tz="US/Eastern"), "a"], + ["last", Timestamp("2013-01-02", tz="US/Eastern"), "b"], + ], +) +def test_first_last_tz_multi_column(method, ts, alpha): + # GH 21603 + category_string = Series(list("abc")).astype("category") + df = DataFrame( + { + "group": [1, 1, 2], + "category_string": category_string, + "datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"), + } + ) + result = getattr(df.groupby("group"), method)() + expected = DataFrame( + { + "category_string": pd.Categorical( + [alpha, "c"], dtype=category_string.dtype + ), + "datetimetz": [ts, Timestamp("2013-01-03", tz="US/Eastern")], + }, + index=Index([1, 2], name="group"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + pd.array([True, False], dtype="boolean"), + pd.array([1, 2], dtype="Int64"), + pd.to_datetime(["2020-01-01", "2020-02-01"]), + pd.to_timedelta([1, 2], unit="D"), + ], +) +@pytest.mark.parametrize("function", ["first", "last", "min", "max"]) +def test_first_last_extension_array_keeps_dtype(values, function): + # https://github.com/pandas-dev/pandas/issues/33071 + # https://github.com/pandas-dev/pandas/issues/32194 + df = DataFrame({"a": [1, 2], "b": values}) + grouped = df.groupby("a") + idx = Index([1, 2], name="a") + expected_series = Series(values, name="b", index=idx) + expected_frame = DataFrame({"b": values}, index=idx) + + result_series = getattr(grouped["b"], function)() + tm.assert_series_equal(result_series, expected_series) + + result_frame = grouped.agg({"b": function}) + tm.assert_frame_equal(result_frame, expected_frame) + + +def test_nth_multi_index_as_expected(): + # PR 9090, related to issue 8979 + # test nth on MultiIndex + three_group = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + } + ) + grouped = three_group.groupby(["A", "B"]) + result = grouped.nth(0) + expected = three_group.iloc[[0, 3, 4, 7]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "op, n, expected_rows", + [ + ("head", -1, [0]), + ("head", 0, []), + ("head", 1, [0, 2]), + ("head", 7, [0, 1, 2]), + ("tail", -1, [1]), + ("tail", 0, []), + ("tail", 1, [1, 2]), + ("tail", 7, [0, 1, 2]), + ], +) +@pytest.mark.parametrize("columns", [None, [], ["A"], ["B"], ["A", "B"]]) +@pytest.mark.parametrize("as_index", [True, False]) +def test_groupby_head_tail(op, n, expected_rows, columns, as_index): + df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) + g = df.groupby("A", as_index=as_index) + expected = df.iloc[expected_rows] + if columns is not None: + g = g[columns] + expected = expected[columns] + result = getattr(g, op)(n) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "op, n, expected_cols", + [ + ("head", -1, [0]), + ("head", 0, []), + ("head", 1, [0, 2]), + ("head", 7, [0, 1, 2]), + ("tail", -1, [1]), + ("tail", 0, []), + ("tail", 1, [1, 2]), + ("tail", 7, [0, 1, 2]), + ], +) +def test_groupby_head_tail_axis_1(op, n, expected_cols): + # GH 9772 + df = DataFrame( + [[1, 2, 3], [1, 4, 5], [2, 6, 7], [3, 8, 9]], columns=["A", "B", "C"] + ) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + g = df.groupby([0, 0, 1], axis=1) + expected = df.iloc[:, expected_cols] + result = getattr(g, op)(n) + tm.assert_frame_equal(result, expected) + + +def test_group_selection_cache(): + # GH 12839 nth, head, and tail should return same result consistently + df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) + expected = df.iloc[[0, 2]] + + g = df.groupby("A") + result1 = g.head(n=2) + result2 = g.nth(0) + tm.assert_frame_equal(result1, df) + tm.assert_frame_equal(result2, expected) + + g = df.groupby("A") + result1 = g.tail(n=2) + result2 = g.nth(0) + tm.assert_frame_equal(result1, df) + tm.assert_frame_equal(result2, expected) + + g = df.groupby("A") + result1 = g.nth(0) + result2 = g.head(n=2) + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, df) + + g = df.groupby("A") + result1 = g.nth(0) + result2 = g.tail(n=2) + tm.assert_frame_equal(result1, expected) + tm.assert_frame_equal(result2, df) + + +def test_nth_empty(): + # GH 16064 + df = DataFrame(index=[0], columns=["a", "b", "c"]) + result = df.groupby("a").nth(10) + expected = df.iloc[:0] + tm.assert_frame_equal(result, expected) + + result = df.groupby(["a", "b"]).nth(10) + expected = df.iloc[:0] + tm.assert_frame_equal(result, expected) + + +def test_nth_column_order(): + # GH 20760 + # Check that nth preserves column order + df = DataFrame( + [[1, "b", 100], [1, "a", 50], [1, "a", np.nan], [2, "c", 200], [2, "d", 150]], + columns=["A", "C", "B"], + ) + result = df.groupby("A").nth(0) + expected = df.iloc[[0, 3]] + tm.assert_frame_equal(result, expected) + + result = df.groupby("A").nth(-1, dropna="any") + expected = df.iloc[[1, 4]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [None, "any", "all"]) +def test_nth_nan_in_grouper(dropna): + # GH 26011 + df = DataFrame( + { + "a": [np.nan, "a", np.nan, "b", np.nan], + "b": [0, 2, 4, 6, 8], + "c": [1, 3, 5, 7, 9], + } + ) + result = df.groupby("a").nth(0, dropna=dropna) + expected = df.iloc[[1, 3]] + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [None, "any", "all"]) +def test_nth_nan_in_grouper_series(dropna): + # GH 26454 + df = DataFrame( + { + "a": [np.nan, "a", np.nan, "b", np.nan], + "b": [0, 2, 4, 6, 8], + } + ) + result = df.groupby("a")["b"].nth(0, dropna=dropna) + expected = df["b"].iloc[[1, 3]] + + tm.assert_series_equal(result, expected) + + +def test_first_categorical_and_datetime_data_nat(): + # GH 20520 + df = DataFrame( + { + "group": ["first", "first", "second", "third", "third"], + "time": 5 * [np.datetime64("NaT")], + "categories": Series(["a", "b", "c", "a", "b"], dtype="category"), + } + ) + result = df.groupby("group").first() + expected = DataFrame( + { + "time": 3 * [np.datetime64("NaT")], + "categories": Series(["a", "c", "a"]).astype( + pd.CategoricalDtype(["a", "b", "c"]) + ), + } + ) + expected.index = Index(["first", "second", "third"], name="group") + tm.assert_frame_equal(result, expected) + + +def test_first_multi_key_groupby_categorical(): + # GH 22512 + df = DataFrame( + { + "A": [1, 1, 1, 2, 2], + "B": [100, 100, 200, 100, 100], + "C": ["apple", "orange", "mango", "mango", "orange"], + "D": ["jupiter", "mercury", "mars", "venus", "venus"], + } + ) + df = df.astype({"D": "category"}) + result = df.groupby(by=["A", "B"]).first() + expected = DataFrame( + { + "C": ["apple", "mango", "mango"], + "D": Series(["jupiter", "mars", "venus"]).astype( + pd.CategoricalDtype(["jupiter", "mars", "mercury", "venus"]) + ), + } + ) + expected.index = MultiIndex.from_tuples( + [(1, 100), (1, 200), (2, 100)], names=["A", "B"] + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("method", ["first", "last", "nth"]) +def test_groupby_last_first_nth_with_none(method, nulls_fixture): + # GH29645 + expected = Series(["y"]) + data = Series( + [nulls_fixture, nulls_fixture, nulls_fixture, "y", nulls_fixture], + index=[0, 0, 0, 0, 0], + ).groupby(level=0) + + if method == "nth": + result = getattr(data, method)(3) + else: + result = getattr(data, method)() + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "arg, expected_rows", + [ + [slice(None, 3, 2), [0, 1, 4, 5]], + [slice(None, -2), [0, 2, 5]], + [[slice(None, 2), slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]], + [[0, 1, slice(-2, None)], [0, 1, 2, 3, 4, 6, 7]], + ], +) +def test_slice(slice_test_df, slice_test_grouped, arg, expected_rows): + # Test slices GH #42947 + + result = slice_test_grouped.nth[arg] + equivalent = slice_test_grouped.nth(arg) + expected = slice_test_df.iloc[expected_rows] + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(equivalent, expected) + + +def test_nth_indexed(slice_test_df, slice_test_grouped): + # Test index notation GH #44688 + + result = slice_test_grouped.nth[0, 1, -2:] + equivalent = slice_test_grouped.nth([0, 1, slice(-2, None)]) + expected = slice_test_df.iloc[[0, 1, 2, 3, 4, 6, 7]] + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(equivalent, expected) + + +def test_invalid_argument(slice_test_grouped): + # Test for error on invalid argument + + with pytest.raises(TypeError, match="Invalid index"): + slice_test_grouped.nth(3.14) + + +def test_negative_step(slice_test_grouped): + # Test for error on negative slice step + + with pytest.raises(ValueError, match="Invalid step"): + slice_test_grouped.nth(slice(None, None, -1)) + + +def test_np_ints(slice_test_df, slice_test_grouped): + # Test np ints work + + result = slice_test_grouped.nth(np.array([0, 1])) + expected = slice_test_df.iloc[[0, 1, 2, 3, 4]] + tm.assert_frame_equal(result, expected) + + +def test_groupby_nth_with_column_axis(): + # GH43926 + df = DataFrame( + [ + [4, 5, 6], + [8, 8, 7], + ], + index=["z", "y"], + columns=["C", "B", "A"], + ) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(df.iloc[1], axis=1) + result = gb.nth(0) + expected = df.iloc[:, [0, 2]] + tm.assert_frame_equal(result, expected) + + +def test_groupby_nth_interval(): + # GH#24205 + idx_result = MultiIndex( + [ + pd.CategoricalIndex([pd.Interval(0, 1), pd.Interval(1, 2)]), + pd.CategoricalIndex([pd.Interval(0, 10), pd.Interval(10, 20)]), + ], + [[0, 0, 0, 1, 1], [0, 1, 1, 0, -1]], + ) + df_result = DataFrame({"col": range(len(idx_result))}, index=idx_result) + result = df_result.groupby(level=[0, 1], observed=False).nth(0) + val_expected = [0, 1, 3] + idx_expected = MultiIndex( + [ + pd.CategoricalIndex([pd.Interval(0, 1), pd.Interval(1, 2)]), + pd.CategoricalIndex([pd.Interval(0, 10), pd.Interval(10, 20)]), + ], + [[0, 0, 1], [0, 1, 0]], + ) + expected = DataFrame(val_expected, index=idx_expected, columns=["col"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "start, stop, expected_values, expected_columns", + [ + (None, None, [0, 1, 2, 3, 4], list("ABCDE")), + (None, 1, [0, 3], list("AD")), + (None, 9, [0, 1, 2, 3, 4], list("ABCDE")), + (None, -1, [0, 1, 3], list("ABD")), + (1, None, [1, 2, 4], list("BCE")), + (1, -1, [1], list("B")), + (-1, None, [2, 4], list("CE")), + (-1, 2, [4], list("E")), + ], +) +@pytest.mark.parametrize("method", ["call", "index"]) +def test_nth_slices_with_column_axis( + start, stop, expected_values, expected_columns, method +): + df = DataFrame([range(5)], columns=[list("ABCDE")]) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby([5, 5, 5, 6, 6], axis=1) + result = { + "call": lambda start, stop: gb.nth(slice(start, stop)), + "index": lambda start, stop: gb.nth[start:stop], + }[method](start, stop) + expected = DataFrame([expected_values], columns=[expected_columns]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:invalid value encountered in remainder:RuntimeWarning" +) +def test_head_tail_dropna_true(): + # GH#45089 + df = DataFrame( + [["a", "z"], ["b", np.nan], ["c", np.nan], ["c", np.nan]], columns=["X", "Y"] + ) + expected = DataFrame([["a", "z"]], columns=["X", "Y"]) + + result = df.groupby(["X", "Y"]).head(n=1) + tm.assert_frame_equal(result, expected) + + result = df.groupby(["X", "Y"]).tail(n=1) + tm.assert_frame_equal(result, expected) + + result = df.groupby(["X", "Y"]).nth(n=0) + tm.assert_frame_equal(result, expected) + + +def test_head_tail_dropna_false(): + # GH#45089 + df = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"]) + expected = DataFrame([["a", "z"], ["b", np.nan], ["c", np.nan]], columns=["X", "Y"]) + + result = df.groupby(["X", "Y"], dropna=False).head(n=1) + tm.assert_frame_equal(result, expected) + + result = df.groupby(["X", "Y"], dropna=False).tail(n=1) + tm.assert_frame_equal(result, expected) + + result = df.groupby(["X", "Y"], dropna=False).nth(n=0) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("selection", ("b", ["b"], ["b", "c"])) +@pytest.mark.parametrize("dropna", ["any", "all", None]) +def test_nth_after_selection(selection, dropna): + # GH#11038, GH#53518 + df = DataFrame( + { + "a": [1, 1, 2], + "b": [np.nan, 3, 4], + "c": [5, 6, 7], + } + ) + gb = df.groupby("a")[selection] + result = gb.nth(0, dropna=dropna) + if dropna == "any" or (dropna == "all" and selection != ["b", "c"]): + locs = [1, 2] + else: + locs = [0, 2] + expected = df.loc[locs, selection] + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_numba.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_numba.py new file mode 100644 index 00000000..ee7d3424 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_numba.py @@ -0,0 +1,80 @@ +import pytest + +from pandas import ( + DataFrame, + Series, + option_context, +) +import pandas._testing as tm + +pytestmark = pytest.mark.single_cpu + +pytest.importorskip("numba") + + +@pytest.mark.filterwarnings("ignore") +# Filter warnings when parallel=True and the function can't be parallelized by Numba +class TestEngine: + def test_cython_vs_numba_frame( + self, sort, nogil, parallel, nopython, numba_supported_reductions + ): + func, kwargs = numba_supported_reductions + df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)}) + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + gb = df.groupby("a", sort=sort) + result = getattr(gb, func)( + engine="numba", engine_kwargs=engine_kwargs, **kwargs + ) + expected = getattr(gb, func)(**kwargs) + tm.assert_frame_equal(result, expected) + + def test_cython_vs_numba_getitem( + self, sort, nogil, parallel, nopython, numba_supported_reductions + ): + func, kwargs = numba_supported_reductions + df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)}) + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + gb = df.groupby("a", sort=sort)["c"] + result = getattr(gb, func)( + engine="numba", engine_kwargs=engine_kwargs, **kwargs + ) + expected = getattr(gb, func)(**kwargs) + tm.assert_series_equal(result, expected) + + def test_cython_vs_numba_series( + self, sort, nogil, parallel, nopython, numba_supported_reductions + ): + func, kwargs = numba_supported_reductions + ser = Series(range(3), index=[1, 2, 1], name="foo") + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + gb = ser.groupby(level=0, sort=sort) + result = getattr(gb, func)( + engine="numba", engine_kwargs=engine_kwargs, **kwargs + ) + expected = getattr(gb, func)(**kwargs) + tm.assert_series_equal(result, expected) + + def test_as_index_false_unsupported(self, numba_supported_reductions): + func, kwargs = numba_supported_reductions + df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)}) + gb = df.groupby("a", as_index=False) + with pytest.raises(NotImplementedError, match="as_index=False"): + getattr(gb, func)(engine="numba", **kwargs) + + def test_axis_1_unsupported(self, numba_supported_reductions): + func, kwargs = numba_supported_reductions + df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)}) + gb = df.groupby("a", axis=1) + with pytest.raises(NotImplementedError, match="axis=1"): + getattr(gb, func)(engine="numba", **kwargs) + + def test_no_engine_doesnt_raise(self): + # GH55520 + df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)}) + gb = df.groupby("a") + # Make sure behavior of functions w/out engine argument don't raise + # when the global use_numba option is set + with option_context("compute.use_numba", True): + res = gb.agg({"b": "first"}) + expected = gb.agg({"b": "first"}) + tm.assert_frame_equal(res, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nunique.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nunique.py new file mode 100644 index 00000000..9c9e32d9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_nunique.py @@ -0,0 +1,190 @@ +import datetime as dt +from string import ascii_lowercase + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + NaT, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +@pytest.mark.slow +@pytest.mark.parametrize("sort", [False, True]) +@pytest.mark.parametrize("dropna", [False, True]) +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize("with_nan", [True, False]) +@pytest.mark.parametrize("keys", [["joe"], ["joe", "jim"]]) +def test_series_groupby_nunique(sort, dropna, as_index, with_nan, keys): + n = 100 + m = 10 + days = date_range("2015-08-23", periods=10) + df = DataFrame( + { + "jim": np.random.default_rng(2).choice(list(ascii_lowercase), n), + "joe": np.random.default_rng(2).choice(days, n), + "julie": np.random.default_rng(2).integers(0, m, n), + } + ) + if with_nan: + df = df.astype({"julie": float}) # Explicit cast to avoid implicit cast below + df.loc[1::17, "jim"] = None + df.loc[3::37, "joe"] = None + df.loc[7::19, "julie"] = None + df.loc[8::19, "julie"] = None + df.loc[9::19, "julie"] = None + original_df = df.copy() + gr = df.groupby(keys, as_index=as_index, sort=sort) + left = gr["julie"].nunique(dropna=dropna) + + gr = df.groupby(keys, as_index=as_index, sort=sort) + right = gr["julie"].apply(Series.nunique, dropna=dropna) + if not as_index: + right = right.reset_index(drop=True) + + if as_index: + tm.assert_series_equal(left, right, check_names=False) + else: + tm.assert_frame_equal(left, right, check_names=False) + tm.assert_frame_equal(df, original_df) + + +def test_nunique(): + df = DataFrame({"A": list("abbacc"), "B": list("abxacc"), "C": list("abbacx")}) + + expected = DataFrame({"A": list("abc"), "B": [1, 2, 1], "C": [1, 1, 2]}) + result = df.groupby("A", as_index=False).nunique() + tm.assert_frame_equal(result, expected) + + # as_index + expected.index = list("abc") + expected.index.name = "A" + expected = expected.drop(columns="A") + result = df.groupby("A").nunique() + tm.assert_frame_equal(result, expected) + + # with na + result = df.replace({"x": None}).groupby("A").nunique(dropna=False) + tm.assert_frame_equal(result, expected) + + # dropna + expected = DataFrame({"B": [1] * 3, "C": [1] * 3}, index=list("abc")) + expected.index.name = "A" + result = df.replace({"x": None}).groupby("A").nunique() + tm.assert_frame_equal(result, expected) + + +def test_nunique_with_object(): + # GH 11077 + data = DataFrame( + [ + [100, 1, "Alice"], + [200, 2, "Bob"], + [300, 3, "Charlie"], + [-400, 4, "Dan"], + [500, 5, "Edith"], + ], + columns=["amount", "id", "name"], + ) + + result = data.groupby(["id", "amount"])["name"].nunique() + index = MultiIndex.from_arrays([data.id, data.amount]) + expected = Series([1] * 5, name="name", index=index) + tm.assert_series_equal(result, expected) + + +def test_nunique_with_empty_series(): + # GH 12553 + data = Series(name="name", dtype=object) + result = data.groupby(level=0).nunique() + expected = Series(name="name", dtype="int64") + tm.assert_series_equal(result, expected) + + +def test_nunique_with_timegrouper(): + # GH 13453 + test = DataFrame( + { + "time": [ + Timestamp("2016-06-28 09:35:35"), + Timestamp("2016-06-28 16:09:30"), + Timestamp("2016-06-28 16:46:28"), + ], + "data": ["1", "2", "3"], + } + ).set_index("time") + result = test.groupby(pd.Grouper(freq="h"))["data"].nunique() + expected = test.groupby(pd.Grouper(freq="h"))["data"].apply(Series.nunique) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "key, data, dropna, expected", + [ + ( + ["x", "x", "x"], + [Timestamp("2019-01-01"), NaT, Timestamp("2019-01-01")], + True, + Series([1], index=pd.Index(["x"], name="key"), name="data"), + ), + ( + ["x", "x", "x"], + [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], + True, + Series([1], index=pd.Index(["x"], name="key"), name="data"), + ), + ( + ["x", "x", "x", "y", "y"], + [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], + False, + Series([2, 2], index=pd.Index(["x", "y"], name="key"), name="data"), + ), + ( + ["x", "x", "x", "x", "y"], + [dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1), NaT, dt.date(2019, 1, 1)], + False, + Series([2, 1], index=pd.Index(["x", "y"], name="key"), name="data"), + ), + ], +) +def test_nunique_with_NaT(key, data, dropna, expected): + # GH 27951 + df = DataFrame({"key": key, "data": data}) + result = df.groupby(["key"])["data"].nunique(dropna=dropna) + tm.assert_series_equal(result, expected) + + +def test_nunique_preserves_column_level_names(): + # GH 23222 + test = DataFrame([1, 2, 2], columns=pd.Index(["A"], name="level_0")) + result = test.groupby([0, 0, 0]).nunique() + expected = DataFrame([2], index=np.array([0]), columns=test.columns) + tm.assert_frame_equal(result, expected) + + +def test_nunique_transform_with_datetime(): + # GH 35109 - transform with nunique on datetimes results in integers + df = DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"]) + result = df.groupby([0, 0, 1])["date"].transform("nunique") + expected = Series([2, 2, 1], name="date") + tm.assert_series_equal(result, expected) + + +def test_empty_categorical(observed): + # GH#21334 + cat = Series([1]).astype("category") + ser = cat[:0] + gb = ser.groupby(ser, observed=observed) + result = gb.nunique() + if observed: + expected = Series([], index=cat[:0], dtype="int64") + else: + expected = Series([0], index=cat, dtype="int64") + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_pipe.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_pipe.py new file mode 100644 index 00000000..7d5c1625 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_pipe.py @@ -0,0 +1,80 @@ +import numpy as np + +import pandas as pd +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + + +def test_pipe(): + # Test the pipe method of DataFrameGroupBy. + # Issue #17871 + + random_state = np.random.default_rng(2) + + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": random_state.standard_normal(8), + "C": random_state.standard_normal(8), + } + ) + + def f(dfgb): + return dfgb.B.max() - dfgb.C.min().min() + + def square(srs): + return srs**2 + + # Note that the transformations are + # GroupBy -> Series + # Series -> Series + # This then chains the GroupBy.pipe and the + # NDFrame.pipe methods + result = df.groupby("A").pipe(f).pipe(square) + + index = Index(["bar", "foo"], dtype="object", name="A") + expected = pd.Series([3.749306591013693, 6.717707873081384], name="B", index=index) + + tm.assert_series_equal(expected, result) + + +def test_pipe_args(): + # Test passing args to the pipe method of DataFrameGroupBy. + # Issue #17871 + + df = DataFrame( + { + "group": ["A", "A", "B", "B", "C"], + "x": [1.0, 2.0, 3.0, 2.0, 5.0], + "y": [10.0, 100.0, 1000.0, -100.0, -1000.0], + } + ) + + def f(dfgb, arg1): + filtered = dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False) + return filtered.groupby("group") + + def g(dfgb, arg2): + return dfgb.sum() / dfgb.sum().sum() + arg2 + + def h(df, arg3): + return df.x + df.y - arg3 + + result = df.groupby("group").pipe(f, 0).pipe(g, 10).pipe(h, 100) + + # Assert the results here + index = Index(["A", "B"], name="group") + expected = pd.Series([-79.5160891089, -78.4839108911], index=index) + + tm.assert_series_equal(result, expected) + + # test SeriesGroupby.pipe + ser = pd.Series([1, 1, 2, 2, 3, 3]) + result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count()) + + expected = pd.Series([4, 8, 12], index=Index([1, 2, 3], dtype=np.int64)) + + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_quantile.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_quantile.py new file mode 100644 index 00000000..5a12f9a8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_quantile.py @@ -0,0 +1,503 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] +) +@pytest.mark.parametrize( + "a_vals,b_vals", + [ + # Ints + ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]), + ([1, 2, 3, 4], [4, 3, 2, 1]), + ([1, 2, 3, 4, 5], [4, 3, 2, 1]), + # Floats + ([1.0, 2.0, 3.0, 4.0, 5.0], [5.0, 4.0, 3.0, 2.0, 1.0]), + # Missing data + ([1.0, np.nan, 3.0, np.nan, 5.0], [5.0, np.nan, 3.0, np.nan, 1.0]), + ([np.nan, 4.0, np.nan, 2.0, np.nan], [np.nan, 4.0, np.nan, 2.0, np.nan]), + # Timestamps + ( + pd.date_range("1/1/18", freq="D", periods=5), + pd.date_range("1/1/18", freq="D", periods=5)[::-1], + ), + ( + pd.date_range("1/1/18", freq="D", periods=5).as_unit("s"), + pd.date_range("1/1/18", freq="D", periods=5)[::-1].as_unit("s"), + ), + # All NA + ([np.nan] * 5, [np.nan] * 5), + ], +) +@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1]) +def test_quantile(interpolation, a_vals, b_vals, q, request): + if ( + interpolation == "nearest" + and q == 0.5 + and isinstance(b_vals, list) + and b_vals == [4, 3, 2, 1] + ): + request.node.add_marker( + pytest.mark.xfail( + reason="Unclear numpy expectation for nearest " + "result with equidistant data" + ) + ) + all_vals = pd.concat([pd.Series(a_vals), pd.Series(b_vals)]) + + a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation) + b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation) + + df = DataFrame({"key": ["a"] * len(a_vals) + ["b"] * len(b_vals), "val": all_vals}) + + expected = DataFrame( + [a_expected, b_expected], columns=["val"], index=Index(["a", "b"], name="key") + ) + if all_vals.dtype.kind == "M" and expected.dtypes.values[0].kind == "M": + # TODO(non-nano): this should be unnecessary once array_to_datetime + # correctly infers non-nano from Timestamp.unit + expected = expected.astype(all_vals.dtype) + result = df.groupby("key").quantile(q, interpolation=interpolation) + + tm.assert_frame_equal(result, expected) + + +def test_quantile_array(): + # https://github.com/pandas-dev/pandas/issues/27526 + df = DataFrame({"A": [0, 1, 2, 3, 4]}) + key = np.array([0, 0, 1, 1, 1], dtype=np.int64) + result = df.groupby(key).quantile([0.25]) + + index = pd.MultiIndex.from_product([[0, 1], [0.25]]) + expected = DataFrame({"A": [0.25, 2.50]}, index=index) + tm.assert_frame_equal(result, expected) + + df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]}) + index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]]) + + key = np.array([0, 0, 1, 1], dtype=np.int64) + result = df.groupby(key).quantile([0.25, 0.75]) + expected = DataFrame( + {"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array2(): + # https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959 + arr = np.random.default_rng(2).integers(0, 5, size=(10, 3), dtype=np.int64) + df = DataFrame(arr, columns=list("ABC")) + result = df.groupby("A").quantile([0.3, 0.7]) + expected = DataFrame( + { + "B": [2.0, 2.0, 2.3, 2.7, 0.3, 0.7, 3.2, 4.0, 0.3, 0.7], + "C": [1.0, 1.0, 1.9, 3.0999999999999996, 0.3, 0.7, 2.6, 3.0, 1.2, 2.8], + }, + index=pd.MultiIndex.from_product( + [[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None] + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array_no_sort(): + df = DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]}) + key = np.array([1, 0, 1], dtype=np.int64) + result = df.groupby(key, sort=False).quantile([0.25, 0.5, 0.75]) + expected = DataFrame( + {"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]}, + index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]), + ) + tm.assert_frame_equal(result, expected) + + result = df.groupby(key, sort=False).quantile([0.75, 0.25]) + expected = DataFrame( + {"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]}, + index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_quantile_array_multiple_levels(): + df = DataFrame( + {"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]} + ) + result = df.groupby(["c", "d"]).quantile([0.25, 0.75]) + index = pd.MultiIndex.from_tuples( + [("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)], + names=["c", "d", None], + ) + expected = DataFrame( + {"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("frame_size", [(2, 3), (100, 10)]) +@pytest.mark.parametrize("groupby", [[0], [0, 1]]) +@pytest.mark.parametrize("q", [[0.5, 0.6]]) +def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, q): + # GH30289 + nrow, ncol = frame_size + df = DataFrame(np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol)) + + idx_levels = [np.arange(min(nrow, 4))] * len(groupby) + [q] + idx_codes = [[x for x in range(min(nrow, 4)) for _ in q]] * len(groupby) + [ + list(range(len(q))) * min(nrow, 4) + ] + expected_index = pd.MultiIndex( + levels=idx_levels, codes=idx_codes, names=groupby + [None] + ) + expected_values = [ + [float(x)] * (ncol - len(groupby)) for x in range(min(nrow, 4)) for _ in q + ] + expected_columns = [x for x in range(ncol) if x not in groupby] + expected = DataFrame( + expected_values, index=expected_index, columns=expected_columns + ) + result = df.groupby(groupby).quantile(q) + + tm.assert_frame_equal(result, expected) + + +def test_quantile_raises(): + df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"]) + + with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"): + df.groupby("key").quantile() + + +def test_quantile_out_of_bounds_q_raises(): + # https://github.com/pandas-dev/pandas/issues/27470 + df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)}) + g = df.groupby([0, 0, 0, 1, 1, 1]) + with pytest.raises(ValueError, match="Got '50.0' instead"): + g.quantile(50) + + with pytest.raises(ValueError, match="Got '-1.0' instead"): + g.quantile(-1) + + +def test_quantile_missing_group_values_no_segfaults(): + # GH 28662 + data = np.array([1.0, np.nan, 1.0]) + df = DataFrame({"key": data, "val": range(3)}) + + # Random segfaults; would have been guaranteed in loop + grp = df.groupby("key") + for _ in range(100): + grp.quantile() + + +@pytest.mark.parametrize( + "key, val, expected_key, expected_val", + [ + ([1.0, np.nan, 3.0, np.nan], range(4), [1.0, 3.0], [0.0, 2.0]), + ([1.0, np.nan, 2.0, 2.0], range(4), [1.0, 2.0], [0.0, 2.5]), + (["a", "b", "b", np.nan], range(4), ["a", "b"], [0, 1.5]), + ([0], [42], [0], [42.0]), + ([], [], np.array([], dtype="float64"), np.array([], dtype="float64")), + ], +) +def test_quantile_missing_group_values_correct_results( + key, val, expected_key, expected_val +): + # GH 28662, GH 33200, GH 33569 + df = DataFrame({"key": key, "val": val}) + + expected = DataFrame( + expected_val, index=Index(expected_key, name="key"), columns=["val"] + ) + + grp = df.groupby("key") + + result = grp.quantile(0.5) + tm.assert_frame_equal(result, expected) + + result = grp.quantile() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + pd.array([1, 0, None] * 2, dtype="Int64"), + pd.array([True, False, None] * 2, dtype="boolean"), + ], +) +@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]]) +def test_groupby_quantile_nullable_array(values, q): + # https://github.com/pandas-dev/pandas/issues/33136 + df = DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values}) + result = df.groupby("a")["b"].quantile(q) + + if isinstance(q, list): + idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None]) + true_quantiles = [0.0, 0.5, 1.0] + else: + idx = Index(["x", "y"], name="a") + true_quantiles = [0.5] + + expected = pd.Series(true_quantiles * 2, index=idx, name="b", dtype="Float64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]]) +@pytest.mark.parametrize("numeric_only", [True, False]) +def test_groupby_quantile_raises_on_invalid_dtype(q, numeric_only): + df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]}) + if numeric_only: + result = df.groupby("a").quantile(q, numeric_only=numeric_only) + expected = df.groupby("a")[["b"]].quantile(q) + tm.assert_frame_equal(result, expected) + else: + with pytest.raises( + TypeError, match="'quantile' cannot be performed against 'object' dtypes!" + ): + df.groupby("a").quantile(q, numeric_only=numeric_only) + + +def test_groupby_quantile_NA_float(any_float_dtype): + # GH#42849 + df = DataFrame({"x": [1, 1], "y": [0.2, np.nan]}, dtype=any_float_dtype) + result = df.groupby("x")["y"].quantile(0.5) + exp_index = Index([1.0], dtype=any_float_dtype, name="x") + + if any_float_dtype in ["Float32", "Float64"]: + expected_dtype = any_float_dtype + else: + expected_dtype = None + + expected = pd.Series([0.2], dtype=expected_dtype, index=exp_index, name="y") + tm.assert_series_equal(result, expected) + + result = df.groupby("x")["y"].quantile([0.5, 0.75]) + expected = pd.Series( + [0.2] * 2, + index=pd.MultiIndex.from_product((exp_index, [0.5, 0.75]), names=["x", None]), + name="y", + dtype=expected_dtype, + ) + tm.assert_series_equal(result, expected) + + +def test_groupby_quantile_NA_int(any_int_ea_dtype): + # GH#42849 + df = DataFrame({"x": [1, 1], "y": [2, 5]}, dtype=any_int_ea_dtype) + result = df.groupby("x")["y"].quantile(0.5) + expected = pd.Series( + [3.5], + dtype="Float64", + index=Index([1], name="x", dtype=any_int_ea_dtype), + name="y", + ) + tm.assert_series_equal(expected, result) + + result = df.groupby("x").quantile(0.5) + expected = DataFrame( + {"y": 3.5}, dtype="Float64", index=Index([1], name="x", dtype=any_int_ea_dtype) + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "interpolation, val1, val2", [("lower", 2, 2), ("higher", 2, 3), ("nearest", 2, 2)] +) +def test_groupby_quantile_all_na_group_masked( + interpolation, val1, val2, any_numeric_ea_dtype +): + # GH#37493 + df = DataFrame( + {"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype + ) + result = df.groupby("a").quantile(q=[0.5, 0.7], interpolation=interpolation) + expected = DataFrame( + {"b": [val1, val2, pd.NA, pd.NA]}, + dtype=any_numeric_ea_dtype, + index=pd.MultiIndex.from_arrays( + [pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype), [0.5, 0.7, 0.5, 0.7]], + names=["a", None], + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("interpolation", ["midpoint", "linear"]) +def test_groupby_quantile_all_na_group_masked_interp( + interpolation, any_numeric_ea_dtype +): + # GH#37493 + df = DataFrame( + {"a": [1, 1, 1, 2], "b": [1, 2, 3, pd.NA]}, dtype=any_numeric_ea_dtype + ) + result = df.groupby("a").quantile(q=[0.5, 0.75], interpolation=interpolation) + + if any_numeric_ea_dtype == "Float32": + expected_dtype = any_numeric_ea_dtype + else: + expected_dtype = "Float64" + + expected = DataFrame( + {"b": [2.0, 2.5, pd.NA, pd.NA]}, + dtype=expected_dtype, + index=pd.MultiIndex.from_arrays( + [ + pd.Series([1, 1, 2, 2], dtype=any_numeric_ea_dtype), + [0.5, 0.75, 0.5, 0.75], + ], + names=["a", None], + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["Float64", "Float32"]) +def test_groupby_quantile_allNA_column(dtype): + # GH#42849 + df = DataFrame({"x": [1, 1], "y": [pd.NA] * 2}, dtype=dtype) + result = df.groupby("x")["y"].quantile(0.5) + expected = pd.Series( + [np.nan], dtype=dtype, index=Index([1.0], dtype=dtype), name="y" + ) + expected.index.name = "x" + tm.assert_series_equal(expected, result) + + +def test_groupby_timedelta_quantile(): + # GH: 29485 + df = DataFrame( + {"value": pd.to_timedelta(np.arange(4), unit="s"), "group": [1, 1, 2, 2]} + ) + result = df.groupby("group").quantile(0.99) + expected = DataFrame( + { + "value": [ + pd.Timedelta("0 days 00:00:00.990000"), + pd.Timedelta("0 days 00:00:02.990000"), + ] + }, + index=Index([1, 2], name="group"), + ) + tm.assert_frame_equal(result, expected) + + +def test_columns_groupby_quantile(): + # GH 33795 + df = DataFrame( + np.arange(12).reshape(3, -1), + index=list("XYZ"), + columns=pd.Series(list("ABAB"), name="col"), + ) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby("col", axis=1) + result = gb.quantile(q=[0.8, 0.2]) + expected = DataFrame( + [ + [1.6, 0.4, 2.6, 1.4], + [5.6, 4.4, 6.6, 5.4], + [9.6, 8.4, 10.6, 9.4], + ], + index=list("XYZ"), + columns=pd.MultiIndex.from_tuples( + [("A", 0.8), ("A", 0.2), ("B", 0.8), ("B", 0.2)], names=["col", None] + ), + ) + + tm.assert_frame_equal(result, expected) + + +def test_timestamp_groupby_quantile(): + # GH 33168 + df = DataFrame( + { + "timestamp": pd.date_range( + start="2020-04-19 00:00:00", freq="1T", periods=100, tz="UTC" + ).floor("1H"), + "category": list(range(1, 101)), + "value": list(range(101, 201)), + } + ) + + result = df.groupby("timestamp").quantile([0.2, 0.8]) + + expected = DataFrame( + [ + {"category": 12.8, "value": 112.8}, + {"category": 48.2, "value": 148.2}, + {"category": 68.8, "value": 168.8}, + {"category": 92.2, "value": 192.2}, + ], + index=pd.MultiIndex.from_tuples( + [ + (pd.Timestamp("2020-04-19 00:00:00+00:00"), 0.2), + (pd.Timestamp("2020-04-19 00:00:00+00:00"), 0.8), + (pd.Timestamp("2020-04-19 01:00:00+00:00"), 0.2), + (pd.Timestamp("2020-04-19 01:00:00+00:00"), 0.8), + ], + names=("timestamp", None), + ), + ) + + tm.assert_frame_equal(result, expected) + + +def test_groupby_quantile_dt64tz_period(): + # GH#51373 + dti = pd.date_range("2016-01-01", periods=1000) + ser = pd.Series(dti) + df = ser.to_frame() + df[1] = dti.tz_localize("US/Pacific") + df[2] = dti.to_period("D") + df[3] = dti - dti[0] + df.iloc[-1] = pd.NaT + + by = np.tile(np.arange(5), 200) + gb = df.groupby(by) + + result = gb.quantile(0.5) + + # Check that we match the group-by-group result + exp = {i: df.iloc[i::5].quantile(0.5) for i in range(5)} + expected = DataFrame(exp).T.infer_objects() + expected.index = expected.index.astype(int) + + tm.assert_frame_equal(result, expected) + + +def test_groupby_quantile_nonmulti_levels_order(): + # Non-regression test for GH #53009 + ind = pd.MultiIndex.from_tuples( + [ + (0, "a", "B"), + (0, "a", "A"), + (0, "b", "B"), + (0, "b", "A"), + (1, "a", "B"), + (1, "a", "A"), + (1, "b", "B"), + (1, "b", "A"), + ], + names=["sample", "cat0", "cat1"], + ) + ser = pd.Series(range(8), index=ind) + result = ser.groupby(level="cat1", sort=False).quantile([0.2, 0.8]) + + qind = pd.MultiIndex.from_tuples( + [("B", 0.2), ("B", 0.8), ("A", 0.2), ("A", 0.8)], names=["cat1", None] + ) + expected = pd.Series([1.2, 4.8, 2.2, 5.8], index=qind) + + tm.assert_series_equal(result, expected) + + # We need to check that index levels are not sorted + expected_levels = pd.core.indexes.frozen.FrozenList([["B", "A"], [0.2, 0.8]]) + tm.assert_equal(result.index.levels, expected_levels) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_raises.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_raises.py new file mode 100644 index 00000000..f9a2b3d4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_raises.py @@ -0,0 +1,688 @@ +# Only tests that raise an error and have no better location should go here. +# Tests for specific groupby methods should go in their respective +# test file. + +import datetime +import re + +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + Grouper, + Series, +) +import pandas._testing as tm +from pandas.tests.groupby import get_groupby_method_args + + +@pytest.fixture( + params=[ + "a", + ["a"], + ["a", "b"], + Grouper(key="a"), + lambda x: x % 2, + [0, 0, 0, 1, 2, 2, 2, 3, 3], + np.array([0, 0, 0, 1, 2, 2, 2, 3, 3]), + dict(zip(range(9), [0, 0, 0, 1, 2, 2, 2, 3, 3])), + Series([1, 1, 1, 1, 1, 2, 2, 2, 2]), + [Series([1, 1, 1, 1, 1, 2, 2, 2, 2]), Series([3, 3, 4, 4, 4, 4, 4, 3, 3])], + ] +) +def by(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def groupby_series(request): + return request.param + + +@pytest.fixture +def df_with_string_col(): + df = DataFrame( + { + "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], + "c": range(9), + "d": list("xyzwtyuio"), + } + ) + return df + + +@pytest.fixture +def df_with_datetime_col(): + df = DataFrame( + { + "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], + "c": range(9), + "d": datetime.datetime(2005, 1, 1, 10, 30, 23, 540000), + } + ) + return df + + +@pytest.fixture +def df_with_timedelta_col(): + df = DataFrame( + { + "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], + "c": range(9), + "d": datetime.timedelta(days=1), + } + ) + return df + + +@pytest.fixture +def df_with_cat_col(): + df = DataFrame( + { + "a": [1, 1, 1, 1, 1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4, 4, 4, 3, 3], + "c": range(9), + "d": Categorical( + ["a", "a", "a", "a", "b", "b", "b", "b", "c"], + categories=["a", "b", "c", "d"], + ordered=True, + ), + } + ) + return df + + +def _call_and_check(klass, msg, how, gb, groupby_func, args): + if klass is None: + if how == "method": + getattr(gb, groupby_func)(*args) + elif how == "agg": + gb.agg(groupby_func, *args) + else: + gb.transform(groupby_func, *args) + else: + with pytest.raises(klass, match=msg): + if how == "method": + getattr(gb, groupby_func)(*args) + elif how == "agg": + gb.agg(groupby_func, *args) + else: + gb.transform(groupby_func, *args) + + +@pytest.mark.parametrize("how", ["method", "agg", "transform"]) +def test_groupby_raises_string( + how, by, groupby_series, groupby_func, df_with_string_col +): + df = df_with_string_col + args = get_groupby_method_args(groupby_func, df) + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + if groupby_func == "corrwith": + assert not hasattr(gb, "corrwith") + return + + klass, msg = { + "all": (None, ""), + "any": (None, ""), + "bfill": (None, ""), + "corrwith": (TypeError, "Could not convert"), + "count": (None, ""), + "cumcount": (None, ""), + "cummax": ( + (NotImplementedError, TypeError), + "(function|cummax) is not (implemented|supported) for (this|object) dtype", + ), + "cummin": ( + (NotImplementedError, TypeError), + "(function|cummin) is not (implemented|supported) for (this|object) dtype", + ), + "cumprod": ( + (NotImplementedError, TypeError), + "(function|cumprod) is not (implemented|supported) for (this|object) dtype", + ), + "cumsum": ( + (NotImplementedError, TypeError), + "(function|cumsum) is not (implemented|supported) for (this|object) dtype", + ), + "diff": (TypeError, "unsupported operand type"), + "ffill": (None, ""), + "fillna": (None, ""), + "first": (None, ""), + "idxmax": (None, ""), + "idxmin": (None, ""), + "last": (None, ""), + "max": (None, ""), + "mean": ( + TypeError, + re.escape("agg function failed [how->mean,dtype->object]"), + ), + "median": ( + TypeError, + re.escape("agg function failed [how->median,dtype->object]"), + ), + "min": (None, ""), + "ngroup": (None, ""), + "nunique": (None, ""), + "pct_change": (TypeError, "unsupported operand type"), + "prod": ( + TypeError, + re.escape("agg function failed [how->prod,dtype->object]"), + ), + "quantile": (TypeError, "cannot be performed against 'object' dtypes!"), + "rank": (None, ""), + "sem": (ValueError, "could not convert string to float"), + "shift": (None, ""), + "size": (None, ""), + "skew": (ValueError, "could not convert string to float"), + "std": (ValueError, "could not convert string to float"), + "sum": (None, ""), + "var": ( + TypeError, + re.escape("agg function failed [how->var,dtype->object]"), + ), + }[groupby_func] + + _call_and_check(klass, msg, how, gb, groupby_func, args) + + +@pytest.mark.parametrize("how", ["agg", "transform"]) +def test_groupby_raises_string_udf(how, by, groupby_series, df_with_string_col): + df = df_with_string_col + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + def func(x): + raise TypeError("Test error message") + + with pytest.raises(TypeError, match="Test error message"): + getattr(gb, how)(func) + + +@pytest.mark.parametrize("how", ["agg", "transform"]) +@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean]) +def test_groupby_raises_string_np( + how, by, groupby_series, groupby_func_np, df_with_string_col +): + # GH#50749 + df = df_with_string_col + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + klass, msg = { + np.sum: (None, ""), + np.mean: ( + TypeError, + re.escape("agg function failed [how->mean,dtype->object]"), + ), + }[groupby_func_np] + + if groupby_series: + warn_msg = "using SeriesGroupBy.[sum|mean]" + else: + warn_msg = "using DataFrameGroupBy.[sum|mean]" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + _call_and_check(klass, msg, how, gb, groupby_func_np, ()) + + +@pytest.mark.parametrize("how", ["method", "agg", "transform"]) +def test_groupby_raises_datetime( + how, by, groupby_series, groupby_func, df_with_datetime_col +): + df = df_with_datetime_col + args = get_groupby_method_args(groupby_func, df) + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + if groupby_func == "corrwith": + assert not hasattr(gb, "corrwith") + return + + klass, msg = { + "all": (None, ""), + "any": (None, ""), + "bfill": (None, ""), + "corrwith": (TypeError, "cannot perform __mul__ with this index type"), + "count": (None, ""), + "cumcount": (None, ""), + "cummax": (None, ""), + "cummin": (None, ""), + "cumprod": (TypeError, "datetime64 type does not support cumprod operations"), + "cumsum": (TypeError, "datetime64 type does not support cumsum operations"), + "diff": (None, ""), + "ffill": (None, ""), + "fillna": (None, ""), + "first": (None, ""), + "idxmax": (None, ""), + "idxmin": (None, ""), + "last": (None, ""), + "max": (None, ""), + "mean": (None, ""), + "median": (None, ""), + "min": (None, ""), + "ngroup": (None, ""), + "nunique": (None, ""), + "pct_change": (TypeError, "cannot perform __truediv__ with this index type"), + "prod": (TypeError, "datetime64 type does not support prod"), + "quantile": (None, ""), + "rank": (None, ""), + "sem": (None, ""), + "shift": (None, ""), + "size": (None, ""), + "skew": ( + TypeError, + "|".join( + [ + r"dtype datetime64\[ns\] does not support reduction", + "datetime64 type does not support skew operations", + ] + ), + ), + "std": (None, ""), + "sum": (TypeError, "datetime64 type does not support sum operations"), + "var": (TypeError, "datetime64 type does not support var operations"), + }[groupby_func] + + warn = None + warn_msg = f"'{groupby_func}' with datetime64 dtypes is deprecated" + if groupby_func in ["any", "all"]: + warn = FutureWarning + + with tm.assert_produces_warning(warn, match=warn_msg): + _call_and_check(klass, msg, how, gb, groupby_func, args) + + +@pytest.mark.parametrize("how", ["agg", "transform"]) +def test_groupby_raises_datetime_udf(how, by, groupby_series, df_with_datetime_col): + df = df_with_datetime_col + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + def func(x): + raise TypeError("Test error message") + + with pytest.raises(TypeError, match="Test error message"): + getattr(gb, how)(func) + + +@pytest.mark.parametrize("how", ["agg", "transform"]) +@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean]) +def test_groupby_raises_datetime_np( + how, by, groupby_series, groupby_func_np, df_with_datetime_col +): + # GH#50749 + df = df_with_datetime_col + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + klass, msg = { + np.sum: (TypeError, "datetime64 type does not support sum operations"), + np.mean: (None, ""), + }[groupby_func_np] + + if groupby_series: + warn_msg = "using SeriesGroupBy.[sum|mean]" + else: + warn_msg = "using DataFrameGroupBy.[sum|mean]" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + _call_and_check(klass, msg, how, gb, groupby_func_np, ()) + + +@pytest.mark.parametrize("func", ["prod", "cumprod", "skew", "var"]) +def test_groupby_raises_timedelta(func, df_with_timedelta_col): + df = df_with_timedelta_col + gb = df.groupby(by="a") + + _call_and_check( + TypeError, + "timedelta64 type does not support .* operations", + "method", + gb, + func, + [], + ) + + +@pytest.mark.parametrize("how", ["method", "agg", "transform"]) +def test_groupby_raises_category( + how, by, groupby_series, groupby_func, using_copy_on_write, df_with_cat_col +): + # GH#50749 + df = df_with_cat_col + args = get_groupby_method_args(groupby_func, df) + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + if groupby_func == "corrwith": + assert not hasattr(gb, "corrwith") + return + + klass, msg = { + "all": (None, ""), + "any": (None, ""), + "bfill": (None, ""), + "corrwith": ( + TypeError, + r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'", + ), + "count": (None, ""), + "cumcount": (None, ""), + "cummax": ( + (NotImplementedError, TypeError), + "(category type does not support cummax operations|" + "category dtype not supported|" + "cummax is not supported for category dtype)", + ), + "cummin": ( + (NotImplementedError, TypeError), + "(category type does not support cummin operations|" + "category dtype not supported|" + "cummin is not supported for category dtype)", + ), + "cumprod": ( + (NotImplementedError, TypeError), + "(category type does not support cumprod operations|" + "category dtype not supported|" + "cumprod is not supported for category dtype)", + ), + "cumsum": ( + (NotImplementedError, TypeError), + "(category type does not support cumsum operations|" + "category dtype not supported|" + "cumsum is not supported for category dtype)", + ), + "diff": ( + TypeError, + r"unsupported operand type\(s\) for -: 'Categorical' and 'Categorical'", + ), + "ffill": (None, ""), + "fillna": ( + TypeError, + r"Cannot setitem on a Categorical with a new category \(0\), " + "set the categories first", + ) + if not using_copy_on_write + else (None, ""), # no-op with CoW + "first": (None, ""), + "idxmax": (None, ""), + "idxmin": (None, ""), + "last": (None, ""), + "max": (None, ""), + "mean": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'mean'", + "category dtype does not support aggregation 'mean'", + ] + ), + ), + "median": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'median'", + "category dtype does not support aggregation 'median'", + ] + ), + ), + "min": (None, ""), + "ngroup": (None, ""), + "nunique": (None, ""), + "pct_change": ( + TypeError, + r"unsupported operand type\(s\) for /: 'Categorical' and 'Categorical'", + ), + "prod": (TypeError, "category type does not support prod operations"), + "quantile": (TypeError, "No matching signature found"), + "rank": (None, ""), + "sem": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'sem'", + "category dtype does not support aggregation 'sem'", + ] + ), + ), + "shift": (None, ""), + "size": (None, ""), + "skew": ( + TypeError, + "|".join( + [ + "dtype category does not support reduction 'skew'", + "category type does not support skew operations", + ] + ), + ), + "std": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'std'", + "category dtype does not support aggregation 'std'", + ] + ), + ), + "sum": (TypeError, "category type does not support sum operations"), + "var": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'var'", + "category dtype does not support aggregation 'var'", + ] + ), + ), + }[groupby_func] + + _call_and_check(klass, msg, how, gb, groupby_func, args) + + +@pytest.mark.parametrize("how", ["agg", "transform"]) +def test_groupby_raises_category_udf(how, by, groupby_series, df_with_cat_col): + # GH#50749 + df = df_with_cat_col + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + def func(x): + raise TypeError("Test error message") + + with pytest.raises(TypeError, match="Test error message"): + getattr(gb, how)(func) + + +@pytest.mark.parametrize("how", ["agg", "transform"]) +@pytest.mark.parametrize("groupby_func_np", [np.sum, np.mean]) +def test_groupby_raises_category_np( + how, by, groupby_series, groupby_func_np, df_with_cat_col +): + # GH#50749 + df = df_with_cat_col + gb = df.groupby(by=by) + + if groupby_series: + gb = gb["d"] + + klass, msg = { + np.sum: (TypeError, "category type does not support sum operations"), + np.mean: ( + TypeError, + "category dtype does not support aggregation 'mean'", + ), + }[groupby_func_np] + + if groupby_series: + warn_msg = "using SeriesGroupBy.[sum|mean]" + else: + warn_msg = "using DataFrameGroupBy.[sum|mean]" + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + _call_and_check(klass, msg, how, gb, groupby_func_np, ()) + + +@pytest.mark.parametrize("how", ["method", "agg", "transform"]) +def test_groupby_raises_category_on_category( + how, + by, + groupby_series, + groupby_func, + observed, + using_copy_on_write, + df_with_cat_col, +): + # GH#50749 + df = df_with_cat_col + df["a"] = Categorical( + ["a", "a", "a", "a", "b", "b", "b", "b", "c"], + categories=["a", "b", "c", "d"], + ordered=True, + ) + args = get_groupby_method_args(groupby_func, df) + gb = df.groupby(by=by, observed=observed) + + if groupby_series: + gb = gb["d"] + + if groupby_func == "corrwith": + assert not hasattr(gb, "corrwith") + return + + empty_groups = any(group.empty for group in gb.groups.values()) + + klass, msg = { + "all": (None, ""), + "any": (None, ""), + "bfill": (None, ""), + "corrwith": ( + TypeError, + r"unsupported operand type\(s\) for \*: 'Categorical' and 'int'", + ), + "count": (None, ""), + "cumcount": (None, ""), + "cummax": ( + (NotImplementedError, TypeError), + "(cummax is not supported for category dtype|" + "category dtype not supported|" + "category type does not support cummax operations)", + ), + "cummin": ( + (NotImplementedError, TypeError), + "(cummin is not supported for category dtype|" + "category dtype not supported|" + "category type does not support cummin operations)", + ), + "cumprod": ( + (NotImplementedError, TypeError), + "(cumprod is not supported for category dtype|" + "category dtype not supported|" + "category type does not support cumprod operations)", + ), + "cumsum": ( + (NotImplementedError, TypeError), + "(cumsum is not supported for category dtype|" + "category dtype not supported|" + "category type does not support cumsum operations)", + ), + "diff": (TypeError, "unsupported operand type"), + "ffill": (None, ""), + "fillna": ( + TypeError, + r"Cannot setitem on a Categorical with a new category \(0\), " + "set the categories first", + ) + if not using_copy_on_write + else (None, ""), # no-op with CoW + "first": (None, ""), + "idxmax": (ValueError, "attempt to get argmax of an empty sequence") + if empty_groups + else (None, ""), + "idxmin": (ValueError, "attempt to get argmin of an empty sequence") + if empty_groups + else (None, ""), + "last": (None, ""), + "max": (None, ""), + "mean": (TypeError, "category dtype does not support aggregation 'mean'"), + "median": (TypeError, "category dtype does not support aggregation 'median'"), + "min": (None, ""), + "ngroup": (None, ""), + "nunique": (None, ""), + "pct_change": (TypeError, "unsupported operand type"), + "prod": (TypeError, "category type does not support prod operations"), + "quantile": (TypeError, ""), + "rank": (None, ""), + "sem": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'sem'", + "category dtype does not support aggregation 'sem'", + ] + ), + ), + "shift": (None, ""), + "size": (None, ""), + "skew": ( + TypeError, + "|".join( + [ + "category type does not support skew operations", + "dtype category does not support reduction 'skew'", + ] + ), + ), + "std": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'std'", + "category dtype does not support aggregation 'std'", + ] + ), + ), + "sum": (TypeError, "category type does not support sum operations"), + "var": ( + TypeError, + "|".join( + [ + "'Categorical' .* does not support reduction 'var'", + "category dtype does not support aggregation 'var'", + ] + ), + ), + }[groupby_func] + + _call_and_check(klass, msg, how, gb, groupby_func, args) + + +def test_subsetting_columns_axis_1_raises(): + # GH 35443 + df = DataFrame({"a": [1], "b": [2], "c": [3]}) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby("a", axis=1) + with pytest.raises(ValueError, match="Cannot subset columns when using axis=1"): + gb["b"] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_rank.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_rank.py new file mode 100644 index 00000000..5d85a078 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_rank.py @@ -0,0 +1,712 @@ +from datetime import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + NaT, + Series, + concat, +) +import pandas._testing as tm + + +def test_rank_unordered_categorical_typeerror(): + # GH#51034 should be TypeError, not NotImplementedError + cat = pd.Categorical([], ordered=False) + ser = Series(cat) + df = ser.to_frame() + + msg = "Cannot perform rank with non-ordered Categorical" + + gb = ser.groupby(cat, observed=False) + with pytest.raises(TypeError, match=msg): + gb.rank() + + gb2 = df.groupby(cat, observed=False) + with pytest.raises(TypeError, match=msg): + gb2.rank() + + +def test_rank_apply(): + lev1 = np.array(["a" * 10] * 100, dtype=object) + lev2 = np.array(["b" * 10] * 130, dtype=object) + lab1 = np.random.default_rng(2).integers(0, 100, size=500, dtype=int) + lab2 = np.random.default_rng(2).integers(0, 130, size=500, dtype=int) + + df = DataFrame( + { + "value": np.random.default_rng(2).standard_normal(500), + "key1": lev1.take(lab1), + "key2": lev2.take(lab2), + } + ) + + result = df.groupby(["key1", "key2"]).value.rank() + + expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])] + expected = concat(expected, axis=0) + expected = expected.reindex(result.index) + tm.assert_series_equal(result, expected) + + result = df.groupby(["key1", "key2"]).value.rank(pct=True) + + expected = [ + piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"]) + ] + expected = concat(expected, axis=0) + expected = expected.reindex(result.index) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]]) +@pytest.mark.parametrize( + "vals", + [ + np.array([2, 2, 8, 2, 6], dtype=dtype) + for dtype in ["i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "f8", "f4", "f2"] + ] + + [ + [ + pd.Timestamp("2018-01-02"), + pd.Timestamp("2018-01-02"), + pd.Timestamp("2018-01-08"), + pd.Timestamp("2018-01-02"), + pd.Timestamp("2018-01-06"), + ], + [ + pd.Timestamp("2018-01-02", tz="US/Pacific"), + pd.Timestamp("2018-01-02", tz="US/Pacific"), + pd.Timestamp("2018-01-08", tz="US/Pacific"), + pd.Timestamp("2018-01-02", tz="US/Pacific"), + pd.Timestamp("2018-01-06", tz="US/Pacific"), + ], + [ + pd.Timestamp("2018-01-02") - pd.Timestamp(0), + pd.Timestamp("2018-01-02") - pd.Timestamp(0), + pd.Timestamp("2018-01-08") - pd.Timestamp(0), + pd.Timestamp("2018-01-02") - pd.Timestamp(0), + pd.Timestamp("2018-01-06") - pd.Timestamp(0), + ], + [ + pd.Timestamp("2018-01-02").to_period("D"), + pd.Timestamp("2018-01-02").to_period("D"), + pd.Timestamp("2018-01-08").to_period("D"), + pd.Timestamp("2018-01-02").to_period("D"), + pd.Timestamp("2018-01-06").to_period("D"), + ], + ], + ids=lambda x: type(x[0]), +) +@pytest.mark.parametrize( + "ties_method,ascending,pct,exp", + [ + ("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]), + ("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]), + ("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]), + ("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]), + ("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]), + ("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]), + ("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]), + ("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]), + ("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]), + ("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]), + ("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]), + ("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]), + ("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]), + ("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]), + ("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]), + ("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]), + ("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]), + ("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]), + ("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]), + ("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]), + ], +) +def test_rank_args(grps, vals, ties_method, ascending, pct, exp): + key = np.repeat(grps, len(vals)) + + orig_vals = vals + vals = list(vals) * len(grps) + if isinstance(orig_vals, np.ndarray): + vals = np.array(vals, dtype=orig_vals.dtype) + + df = DataFrame({"key": key, "val": vals}) + result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct) + + exp_df = DataFrame(exp * len(grps), columns=["val"]) + tm.assert_frame_equal(result, exp_df) + + +@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]]) +@pytest.mark.parametrize( + "vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]] +) +@pytest.mark.parametrize( + "ties_method,ascending,na_option,exp", + [ + ("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]), + ("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]), + ("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]), + ("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]), + ("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]), + ("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]), + ("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]), + ("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]), + ("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]), + ("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]), + ("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]), + ("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]), + ("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]), + ("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]), + ("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]), + ("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]), + ("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]), + ("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]), + ("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]), + ("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]), + ("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]), + ("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]), + ("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]), + ("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]), + ("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]), + ("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]), + ("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]), + ("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]), + ("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]), + ("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]), + ], +) +def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp): + # GH 20561 + key = np.repeat(grps, len(vals)) + vals = vals * len(grps) + df = DataFrame({"key": key, "val": vals}) + result = df.groupby("key").rank( + method=ties_method, ascending=ascending, na_option=na_option + ) + exp_df = DataFrame(exp * len(grps), columns=["val"]) + tm.assert_frame_equal(result, exp_df) + + +@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]]) +@pytest.mark.parametrize( + "vals", + [ + np.array([2, 2, np.nan, 8, 2, 6, np.nan, np.nan], dtype=dtype) + for dtype in ["f8", "f4", "f2"] + ] + + [ + [ + pd.Timestamp("2018-01-02"), + pd.Timestamp("2018-01-02"), + np.nan, + pd.Timestamp("2018-01-08"), + pd.Timestamp("2018-01-02"), + pd.Timestamp("2018-01-06"), + np.nan, + np.nan, + ], + [ + pd.Timestamp("2018-01-02", tz="US/Pacific"), + pd.Timestamp("2018-01-02", tz="US/Pacific"), + np.nan, + pd.Timestamp("2018-01-08", tz="US/Pacific"), + pd.Timestamp("2018-01-02", tz="US/Pacific"), + pd.Timestamp("2018-01-06", tz="US/Pacific"), + np.nan, + np.nan, + ], + [ + pd.Timestamp("2018-01-02") - pd.Timestamp(0), + pd.Timestamp("2018-01-02") - pd.Timestamp(0), + np.nan, + pd.Timestamp("2018-01-08") - pd.Timestamp(0), + pd.Timestamp("2018-01-02") - pd.Timestamp(0), + pd.Timestamp("2018-01-06") - pd.Timestamp(0), + np.nan, + np.nan, + ], + [ + pd.Timestamp("2018-01-02").to_period("D"), + pd.Timestamp("2018-01-02").to_period("D"), + np.nan, + pd.Timestamp("2018-01-08").to_period("D"), + pd.Timestamp("2018-01-02").to_period("D"), + pd.Timestamp("2018-01-06").to_period("D"), + np.nan, + np.nan, + ], + ], + ids=lambda x: type(x[0]), +) +@pytest.mark.parametrize( + "ties_method,ascending,na_option,pct,exp", + [ + ( + "average", + True, + "keep", + False, + [2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan], + ), + ( + "average", + True, + "keep", + True, + [0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan], + ), + ( + "average", + False, + "keep", + False, + [4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan], + ), + ( + "average", + False, + "keep", + True, + [0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan], + ), + ("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]), + ("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]), + ( + "min", + False, + "keep", + False, + [3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan], + ), + ("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]), + ("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]), + ("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]), + ( + "max", + False, + "keep", + False, + [5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan], + ), + ("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]), + ( + "first", + True, + "keep", + False, + [1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan], + ), + ( + "first", + True, + "keep", + True, + [0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan], + ), + ( + "first", + False, + "keep", + False, + [3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan], + ), + ( + "first", + False, + "keep", + True, + [0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan], + ), + ( + "dense", + True, + "keep", + False, + [1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan], + ), + ( + "dense", + True, + "keep", + True, + [ + 1.0 / 3.0, + 1.0 / 3.0, + np.nan, + 3.0 / 3.0, + 1.0 / 3.0, + 2.0 / 3.0, + np.nan, + np.nan, + ], + ), + ( + "dense", + False, + "keep", + False, + [3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan], + ), + ( + "dense", + False, + "keep", + True, + [ + 3.0 / 3.0, + 3.0 / 3.0, + np.nan, + 1.0 / 3.0, + 3.0 / 3.0, + 2.0 / 3.0, + np.nan, + np.nan, + ], + ), + ("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]), + ( + "average", + True, + "bottom", + True, + [0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875], + ), + ("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]), + ( + "average", + False, + "bottom", + True, + [0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875], + ), + ("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]), + ( + "min", + True, + "bottom", + True, + [0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75], + ), + ("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]), + ( + "min", + False, + "bottom", + True, + [0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75], + ), + ("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]), + ("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]), + ("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]), + ( + "max", + False, + "bottom", + True, + [0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0], + ), + ("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]), + ( + "first", + True, + "bottom", + True, + [0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0], + ), + ("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]), + ( + "first", + False, + "bottom", + True, + [0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0], + ), + ("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]), + ("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]), + ("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]), + ("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]), + ], +) +def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp): + key = np.repeat(grps, len(vals)) + + orig_vals = vals + vals = list(vals) * len(grps) + if isinstance(orig_vals, np.ndarray): + vals = np.array(vals, dtype=orig_vals.dtype) + + df = DataFrame({"key": key, "val": vals}) + result = df.groupby("key").rank( + method=ties_method, ascending=ascending, na_option=na_option, pct=pct + ) + + exp_df = DataFrame(exp * len(grps), columns=["val"]) + tm.assert_frame_equal(result, exp_df) + + +@pytest.mark.parametrize( + "pct,exp", [(False, [3.0, 3.0, 3.0, 3.0, 3.0]), (True, [0.6, 0.6, 0.6, 0.6, 0.6])] +) +def test_rank_resets_each_group(pct, exp): + df = DataFrame( + {"key": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], "val": [1] * 10} + ) + result = df.groupby("key").rank(pct=pct) + exp_df = DataFrame(exp * 2, columns=["val"]) + tm.assert_frame_equal(result, exp_df) + + +@pytest.mark.parametrize( + "dtype", ["int64", "int32", "uint64", "uint32", "float64", "float32"] +) +@pytest.mark.parametrize("upper", [True, False]) +def test_rank_avg_even_vals(dtype, upper): + if upper: + # use IntegerDtype/FloatingDtype + dtype = dtype[0].upper() + dtype[1:] + dtype = dtype.replace("Ui", "UI") + df = DataFrame({"key": ["a"] * 4, "val": [1] * 4}) + df["val"] = df["val"].astype(dtype) + assert df["val"].dtype == dtype + + result = df.groupby("key").rank() + exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"]) + if upper: + exp_df = exp_df.astype("Float64") + tm.assert_frame_equal(result, exp_df) + + +@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"]) +@pytest.mark.parametrize("ascending", [True, False]) +@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"]) +@pytest.mark.parametrize("pct", [True, False]) +@pytest.mark.parametrize( + "vals", [["bar", "bar", "foo", "bar", "baz"], ["bar", np.nan, "foo", np.nan, "baz"]] +) +def test_rank_object_dtype(ties_method, ascending, na_option, pct, vals): + df = DataFrame({"key": ["foo"] * 5, "val": vals}) + mask = df["val"].isna() + + gb = df.groupby("key") + res = gb.rank(method=ties_method, ascending=ascending, na_option=na_option, pct=pct) + + # construct our expected by using numeric values with the same ordering + if mask.any(): + df2 = DataFrame({"key": ["foo"] * 5, "val": [0, np.nan, 2, np.nan, 1]}) + else: + df2 = DataFrame({"key": ["foo"] * 5, "val": [0, 0, 2, 0, 1]}) + + gb2 = df2.groupby("key") + alt = gb2.rank( + method=ties_method, ascending=ascending, na_option=na_option, pct=pct + ) + + tm.assert_frame_equal(res, alt) + + +@pytest.mark.parametrize("na_option", [True, "bad", 1]) +@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"]) +@pytest.mark.parametrize("ascending", [True, False]) +@pytest.mark.parametrize("pct", [True, False]) +@pytest.mark.parametrize( + "vals", + [ + ["bar", "bar", "foo", "bar", "baz"], + ["bar", np.nan, "foo", np.nan, "baz"], + [1, np.nan, 2, np.nan, 3], + ], +) +def test_rank_naoption_raises(ties_method, ascending, na_option, pct, vals): + df = DataFrame({"key": ["foo"] * 5, "val": vals}) + msg = "na_option must be one of 'keep', 'top', or 'bottom'" + + with pytest.raises(ValueError, match=msg): + df.groupby("key").rank( + method=ties_method, ascending=ascending, na_option=na_option, pct=pct + ) + + +def test_rank_empty_group(): + # see gh-22519 + column = "A" + df = DataFrame({"A": [0, 1, 0], "B": [1.0, np.nan, 2.0]}) + + result = df.groupby(column).B.rank(pct=True) + expected = Series([0.5, np.nan, 1.0], name="B") + tm.assert_series_equal(result, expected) + + result = df.groupby(column).rank(pct=True) + expected = DataFrame({"B": [0.5, np.nan, 1.0]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "input_key,input_value,output_value", + [ + ([1, 2], [1, 1], [1.0, 1.0]), + ([1, 1, 2, 2], [1, 2, 1, 2], [0.5, 1.0, 0.5, 1.0]), + ([1, 1, 2, 2], [1, 2, 1, np.nan], [0.5, 1.0, 1.0, np.nan]), + ([1, 1, 2], [1, 2, np.nan], [0.5, 1.0, np.nan]), + ], +) +def test_rank_zero_div(input_key, input_value, output_value): + # GH 23666 + df = DataFrame({"A": input_key, "B": input_value}) + + result = df.groupby("A").rank(method="dense", pct=True) + expected = DataFrame({"B": output_value}) + tm.assert_frame_equal(result, expected) + + +def test_rank_min_int(): + # GH-32859 + df = DataFrame( + { + "grp": [1, 1, 2], + "int_col": [ + np.iinfo(np.int64).min, + np.iinfo(np.int64).max, + np.iinfo(np.int64).min, + ], + "datetimelike": [NaT, datetime(2001, 1, 1), NaT], + } + ) + + result = df.groupby("grp").rank() + expected = DataFrame( + {"int_col": [1.0, 2.0, 1.0], "datetimelike": [np.nan, 1.0, np.nan]} + ) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("use_nan", [True, False]) +def test_rank_pct_equal_values_on_group_transition(use_nan): + # GH#40518 + fill_value = np.nan if use_nan else 3 + df = DataFrame( + [ + [-1, 1], + [-1, 2], + [1, fill_value], + [-1, fill_value], + ], + columns=["group", "val"], + ) + result = df.groupby(["group"])["val"].rank( + method="dense", + pct=True, + ) + if use_nan: + expected = Series([0.5, 1, np.nan, np.nan], name="val") + else: + expected = Series([1 / 3, 2 / 3, 1, 1], name="val") + + tm.assert_series_equal(result, expected) + + +def test_rank_multiindex(): + # GH27721 + df = concat( + { + "a": DataFrame({"col1": [3, 4], "col2": [1, 2]}), + "b": DataFrame({"col3": [5, 6], "col4": [7, 8]}), + }, + axis=1, + ) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(level=0, axis=1) + msg = "DataFrameGroupBy.rank with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.rank(axis=1) + + expected = concat( + [ + df["a"].rank(axis=1), + df["b"].rank(axis=1), + ], + axis=1, + keys=["a", "b"], + ) + tm.assert_frame_equal(result, expected) + + +def test_groupby_axis0_rank_axis1(): + # GH#41320 + df = DataFrame( + {0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]}, + index=["a", "a", "b", "b"], + ) + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(level=0, axis=0) + + msg = "DataFrameGroupBy.rank with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = gb.rank(axis=1) + + # This should match what we get when "manually" operating group-by-group + expected = concat([df.loc["a"].rank(axis=1), df.loc["b"].rank(axis=1)], axis=0) + tm.assert_frame_equal(res, expected) + + # check that we haven't accidentally written a case that coincidentally + # matches rank(axis=0) + msg = "The 'axis' keyword in DataFrameGroupBy.rank" + with tm.assert_produces_warning(FutureWarning, match=msg): + alt = gb.rank(axis=0) + assert not alt.equals(expected) + + +def test_groupby_axis0_cummax_axis1(): + # case where groupby axis is 0 and axis keyword in transform is 1 + + # df has mixed dtype -> multiple blocks + df = DataFrame( + {0: [1, 3, 5, 7], 1: [2, 4, 6, 8], 2: [1.5, 3.5, 5.5, 7.5]}, + index=["a", "a", "b", "b"], + ) + msg = "The 'axis' keyword in DataFrame.groupby is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(level=0, axis=0) + + msg = "DataFrameGroupBy.cummax with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + cmax = gb.cummax(axis=1) + expected = df[[0, 1]].astype(np.float64) + expected[2] = expected[1] + tm.assert_frame_equal(cmax, expected) + + +def test_non_unique_index(): + # GH 16577 + df = DataFrame( + {"A": [1.0, 2.0, 3.0, np.nan], "value": 1.0}, + index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4, + ) + result = df.groupby([df.index, "A"]).value.rank(ascending=True, pct=True) + expected = Series( + [1.0, 1.0, 1.0, np.nan], + index=[pd.Timestamp("20170101", tz="US/Eastern")] * 4, + name="value", + ) + tm.assert_series_equal(result, expected) + + +def test_rank_categorical(): + cat = pd.Categorical(["a", "a", "b", np.nan, "c", "b"], ordered=True) + cat2 = pd.Categorical([1, 2, 3, np.nan, 4, 5], ordered=True) + + df = DataFrame({"col1": [0, 1, 0, 1, 0, 1], "col2": cat, "col3": cat2}) + + gb = df.groupby("col1") + + res = gb.rank() + + expected = df.astype(object).groupby("col1").rank() + tm.assert_frame_equal(res, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_sample.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_sample.py new file mode 100644 index 00000000..4dd47474 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_sample.py @@ -0,0 +1,154 @@ +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)]) +def test_groupby_sample_balanced_groups_shape(n, frac): + values = [1] * 10 + [2] * 10 + df = DataFrame({"a": values, "b": values}) + + result = df.groupby("a").sample(n=n, frac=frac) + values = [1] * 2 + [2] * 2 + expected = DataFrame({"a": values, "b": values}, index=result.index) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].sample(n=n, frac=frac) + expected = Series(values, name="b", index=result.index) + tm.assert_series_equal(result, expected) + + +def test_groupby_sample_unbalanced_groups_shape(): + values = [1] * 10 + [2] * 20 + df = DataFrame({"a": values, "b": values}) + + result = df.groupby("a").sample(n=5) + values = [1] * 5 + [2] * 5 + expected = DataFrame({"a": values, "b": values}, index=result.index) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].sample(n=5) + expected = Series(values, name="b", index=result.index) + tm.assert_series_equal(result, expected) + + +def test_groupby_sample_index_value_spans_groups(): + values = [1] * 3 + [2] * 3 + df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2]) + + result = df.groupby("a").sample(n=2) + values = [1] * 2 + [2] * 2 + expected = DataFrame({"a": values, "b": values}, index=result.index) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].sample(n=2) + expected = Series(values, name="b", index=result.index) + tm.assert_series_equal(result, expected) + + +def test_groupby_sample_n_and_frac_raises(): + df = DataFrame({"a": [1, 2], "b": [1, 2]}) + msg = "Please enter a value for `frac` OR `n`, not both" + + with pytest.raises(ValueError, match=msg): + df.groupby("a").sample(n=1, frac=1.0) + + with pytest.raises(ValueError, match=msg): + df.groupby("a")["b"].sample(n=1, frac=1.0) + + +def test_groupby_sample_frac_gt_one_without_replacement_raises(): + df = DataFrame({"a": [1, 2], "b": [1, 2]}) + msg = "Replace has to be set to `True` when upsampling the population `frac` > 1." + + with pytest.raises(ValueError, match=msg): + df.groupby("a").sample(frac=1.5, replace=False) + + with pytest.raises(ValueError, match=msg): + df.groupby("a")["b"].sample(frac=1.5, replace=False) + + +@pytest.mark.parametrize("n", [-1, 1.5]) +def test_groupby_sample_invalid_n_raises(n): + df = DataFrame({"a": [1, 2], "b": [1, 2]}) + + if n < 0: + msg = "A negative number of rows requested. Please provide `n` >= 0." + else: + msg = "Only integers accepted as `n` values" + + with pytest.raises(ValueError, match=msg): + df.groupby("a").sample(n=n) + + with pytest.raises(ValueError, match=msg): + df.groupby("a")["b"].sample(n=n) + + +def test_groupby_sample_oversample(): + values = [1] * 10 + [2] * 10 + df = DataFrame({"a": values, "b": values}) + + result = df.groupby("a").sample(frac=2.0, replace=True) + values = [1] * 20 + [2] * 20 + expected = DataFrame({"a": values, "b": values}, index=result.index) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].sample(frac=2.0, replace=True) + expected = Series(values, name="b", index=result.index) + tm.assert_series_equal(result, expected) + + +def test_groupby_sample_without_n_or_frac(): + values = [1] * 10 + [2] * 10 + df = DataFrame({"a": values, "b": values}) + + result = df.groupby("a").sample(n=None, frac=None) + expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].sample(n=None, frac=None) + expected = Series([1, 2], name="b", index=result.index) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "index, expected_index", + [(["w", "x", "y", "z"], ["w", "w", "y", "y"]), ([3, 4, 5, 6], [3, 3, 5, 5])], +) +def test_groupby_sample_with_weights(index, expected_index): + # GH 39927 - tests for integer index needed + values = [1] * 2 + [2] * 2 + df = DataFrame({"a": values, "b": values}, index=Index(index)) + + result = df.groupby("a").sample(n=2, replace=True, weights=[1, 0, 1, 0]) + expected = DataFrame({"a": values, "b": values}, index=Index(expected_index)) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].sample(n=2, replace=True, weights=[1, 0, 1, 0]) + expected = Series(values, name="b", index=Index(expected_index)) + tm.assert_series_equal(result, expected) + + +def test_groupby_sample_with_selections(): + # GH 39928 + values = [1] * 10 + [2] * 10 + df = DataFrame({"a": values, "b": values, "c": values}) + + result = df.groupby("a")[["b", "c"]].sample(n=None, frac=None) + expected = DataFrame({"b": [1, 2], "c": [1, 2]}, index=result.index) + tm.assert_frame_equal(result, expected) + + +def test_groupby_sample_with_empty_inputs(): + # GH48459 + df = DataFrame({"a": [], "b": []}) + groupby_df = df.groupby("a") + + result = groupby_df.sample() + expected = df + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_size.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_size.py new file mode 100644 index 00000000..93a4e743 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_size.py @@ -0,0 +1,130 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype + +from pandas import ( + DataFrame, + Index, + PeriodIndex, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]]) +def test_size(df, by): + grouped = df.groupby(by=by) + result = grouped.size() + for key, group in grouped: + assert result[key] == len(group) + + +@pytest.mark.parametrize( + "by", + [ + [0, 0, 0, 0], + [0, 1, 1, 1], + [1, 0, 1, 1], + [0, None, None, None], + pytest.param([None, None, None, None], marks=pytest.mark.xfail), + ], +) +def test_size_axis_1(df, axis_1, by, sort, dropna): + # GH#45715 + counts = {key: sum(value == key for value in by) for key in dict.fromkeys(by)} + if dropna: + counts = {key: value for key, value in counts.items() if key is not None} + expected = Series(counts, dtype="int64") + if sort: + expected = expected.sort_index() + if is_integer_dtype(expected.index.dtype) and not any(x is None for x in by): + expected.index = expected.index.astype(int) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = df.groupby(by=by, axis=axis_1, sort=sort, dropna=dropna) + result = grouped.size() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("by", ["A", "B", ["A", "B"]]) +@pytest.mark.parametrize("sort", [True, False]) +def test_size_sort(sort, by): + df = DataFrame(np.random.default_rng(2).choice(20, (1000, 3)), columns=list("ABC")) + left = df.groupby(by=by, sort=sort).size() + right = df.groupby(by=by, sort=sort)["C"].apply(lambda a: a.shape[0]) + tm.assert_series_equal(left, right, check_names=False) + + +def test_size_series_dataframe(): + # https://github.com/pandas-dev/pandas/issues/11699 + df = DataFrame(columns=["A", "B"]) + out = Series(dtype="int64", index=Index([], name="A")) + tm.assert_series_equal(df.groupby("A").size(), out) + + +def test_size_groupby_all_null(): + # https://github.com/pandas-dev/pandas/issues/23050 + # Assert no 'Value Error : Length of passed values is 2, index implies 0' + df = DataFrame({"A": [None, None]}) # all-null groups + result = df.groupby("A").size() + expected = Series(dtype="int64", index=Index([], name="A")) + tm.assert_series_equal(result, expected) + + +def test_size_period_index(): + # https://github.com/pandas-dev/pandas/issues/34010 + ser = Series([1], index=PeriodIndex(["2000"], name="A", freq="D")) + grp = ser.groupby(level="A") + result = grp.size() + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize("as_index", [True, False]) +def test_size_on_categorical(as_index): + df = DataFrame([[1, 1], [2, 2]], columns=["A", "B"]) + df["A"] = df["A"].astype("category") + result = df.groupby(["A", "B"], as_index=as_index, observed=False).size() + + expected = DataFrame( + [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"] + ) + expected["A"] = expected["A"].astype("category") + if as_index: + expected = expected.set_index(["A", "B"])["size"].rename(None) + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["Int64", "Float64", "boolean"]) +def test_size_series_masked_type_returns_Int64(dtype): + # GH 54132 + ser = Series([1, 1, 1], index=["a", "a", "b"], dtype=dtype) + result = ser.groupby(level=0).size() + expected = Series([2, 1], dtype="Int64", index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [ + object, + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], +) +def test_size_strings(dtype): + # GH#55627 + df = DataFrame({"a": ["a", "a", "b"], "b": "a"}, dtype=dtype) + result = df.groupby("a")["b"].size() + exp_dtype = "Int64" if dtype == "string[pyarrow]" else "int64" + expected = Series( + [2, 1], + index=Index(["a", "b"], name="a", dtype=dtype), + name="b", + dtype=exp_dtype, + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_skew.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_skew.py new file mode 100644 index 00000000..563da89b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_skew.py @@ -0,0 +1,27 @@ +import numpy as np + +import pandas as pd +import pandas._testing as tm + + +def test_groupby_skew_equivalence(): + # Test that that groupby skew method (which uses libgroupby.group_skew) + # matches the results of operating group-by-group (which uses nanops.nanskew) + nrows = 1000 + ngroups = 3 + ncols = 2 + nan_frac = 0.05 + + arr = np.random.default_rng(2).standard_normal((nrows, ncols)) + arr[np.random.default_rng(2).random(nrows) < nan_frac] = np.nan + + df = pd.DataFrame(arr) + grps = np.random.default_rng(2).integers(0, ngroups, size=nrows) + gb = df.groupby(grps) + + result = gb.skew() + + grpwise = [grp.skew().to_frame(i).T for i, grp in gb] + expected = pd.concat(grpwise, axis=0) + expected.index = expected.index.astype(result.index.dtype) # 32bit builds + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_timegrouper.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_timegrouper.py new file mode 100644 index 00000000..527e7c60 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_timegrouper.py @@ -0,0 +1,927 @@ +""" +test with the TimeGrouper / grouping with datetimes +""" +from datetime import ( + datetime, + timedelta, +) +from io import StringIO + +import numpy as np +import pytest +import pytz + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + date_range, + offsets, +) +import pandas._testing as tm +from pandas.core.groupby.grouper import Grouper +from pandas.core.groupby.ops import BinGrouper + + +@pytest.fixture +def frame_for_truncated_bingrouper(): + """ + DataFrame used by groupby_with_truncated_bingrouper, made into + a separate fixture for easier re-use in + test_groupby_apply_timegrouper_with_nat_apply_squeeze + """ + df = DataFrame( + { + "Quantity": [18, 3, 5, 1, 9, 3], + "Date": [ + Timestamp(2013, 9, 1, 13, 0), + Timestamp(2013, 9, 1, 13, 5), + Timestamp(2013, 10, 1, 20, 0), + Timestamp(2013, 10, 3, 10, 0), + pd.NaT, + Timestamp(2013, 9, 2, 14, 0), + ], + } + ) + return df + + +@pytest.fixture +def groupby_with_truncated_bingrouper(frame_for_truncated_bingrouper): + """ + GroupBy object such that gb.grouper is a BinGrouper and + len(gb.grouper.result_index) < len(gb.grouper.group_keys_seq) + + Aggregations on this groupby should have + + dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date") + + As either the index or an index level. + """ + df = frame_for_truncated_bingrouper + + tdg = Grouper(key="Date", freq="5D") + gb = df.groupby(tdg) + + # check we're testing the case we're interested in + assert len(gb.grouper.result_index) != len(gb.grouper.group_keys_seq) + + return gb + + +class TestGroupBy: + def test_groupby_with_timegrouper(self): + # GH 4161 + # TimeGrouper requires a sorted index + # also verifies that the resultant index has the correct name + df_original = DataFrame( + { + "Buyer": "Carl Carl Carl Carl Joe Carl".split(), + "Quantity": [18, 3, 5, 1, 9, 3], + "Date": [ + datetime(2013, 9, 1, 13, 0), + datetime(2013, 9, 1, 13, 5), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 3, 10, 0), + datetime(2013, 12, 2, 12, 0), + datetime(2013, 9, 2, 14, 0), + ], + } + ) + + # GH 6908 change target column's order + df_reordered = df_original.sort_values(by="Quantity") + + for df in [df_original, df_reordered]: + df = df.set_index(["Date"]) + + expected = DataFrame( + {"Buyer": 0, "Quantity": 0}, + index=date_range( + "20130901", "20131205", freq="5D", name="Date", inclusive="left" + ), + ) + # Cast to object to avoid implicit cast when setting entry to "CarlCarlCarl" + expected = expected.astype({"Buyer": object}) + expected.iloc[0, 0] = "CarlCarlCarl" + expected.iloc[6, 0] = "CarlCarl" + expected.iloc[18, 0] = "Joe" + expected.iloc[[0, 6, 18], 1] = np.array([24, 6, 9], dtype="int64") + + result1 = df.resample("5D").sum() + tm.assert_frame_equal(result1, expected) + + df_sorted = df.sort_index() + result2 = df_sorted.groupby(Grouper(freq="5D")).sum() + tm.assert_frame_equal(result2, expected) + + result3 = df.groupby(Grouper(freq="5D")).sum() + tm.assert_frame_equal(result3, expected) + + @pytest.mark.parametrize("should_sort", [True, False]) + def test_groupby_with_timegrouper_methods(self, should_sort): + # GH 3881 + # make sure API of timegrouper conforms + + df = DataFrame( + { + "Branch": "A A A A A B".split(), + "Buyer": "Carl Mark Carl Joe Joe Carl".split(), + "Quantity": [1, 3, 5, 8, 9, 3], + "Date": [ + datetime(2013, 1, 1, 13, 0), + datetime(2013, 1, 1, 13, 5), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 2, 10, 0), + datetime(2013, 12, 2, 12, 0), + datetime(2013, 12, 2, 14, 0), + ], + } + ) + + if should_sort: + df = df.sort_values(by="Quantity", ascending=False) + + df = df.set_index("Date", drop=False) + g = df.groupby(Grouper(freq="6M")) + assert g.group_keys + + assert isinstance(g.grouper, BinGrouper) + groups = g.groups + assert isinstance(groups, dict) + assert len(groups) == 3 + + def test_timegrouper_with_reg_groups(self): + # GH 3794 + # allow combination of timegrouper/reg groups + + df_original = DataFrame( + { + "Branch": "A A A A A A A B".split(), + "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(), + "Quantity": [1, 3, 5, 1, 8, 1, 9, 3], + "Date": [ + datetime(2013, 1, 1, 13, 0), + datetime(2013, 1, 1, 13, 5), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 2, 10, 0), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 2, 10, 0), + datetime(2013, 12, 2, 12, 0), + datetime(2013, 12, 2, 14, 0), + ], + } + ).set_index("Date") + + df_sorted = df_original.sort_values(by="Quantity", ascending=False) + + for df in [df_original, df_sorted]: + expected = DataFrame( + { + "Buyer": "Carl Joe Mark".split(), + "Quantity": [10, 18, 3], + "Date": [ + datetime(2013, 12, 31, 0, 0), + datetime(2013, 12, 31, 0, 0), + datetime(2013, 12, 31, 0, 0), + ], + } + ).set_index(["Date", "Buyer"]) + + msg = "The default value of numeric_only" + result = df.groupby([Grouper(freq="A"), "Buyer"]).sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + { + "Buyer": "Carl Mark Carl Joe".split(), + "Quantity": [1, 3, 9, 18], + "Date": [ + datetime(2013, 1, 1, 0, 0), + datetime(2013, 1, 1, 0, 0), + datetime(2013, 7, 1, 0, 0), + datetime(2013, 7, 1, 0, 0), + ], + } + ).set_index(["Date", "Buyer"]) + result = df.groupby([Grouper(freq="6MS"), "Buyer"]).sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + df_original = DataFrame( + { + "Branch": "A A A A A A A B".split(), + "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(), + "Quantity": [1, 3, 5, 1, 8, 1, 9, 3], + "Date": [ + datetime(2013, 10, 1, 13, 0), + datetime(2013, 10, 1, 13, 5), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 2, 10, 0), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 2, 10, 0), + datetime(2013, 10, 2, 12, 0), + datetime(2013, 10, 2, 14, 0), + ], + } + ).set_index("Date") + + df_sorted = df_original.sort_values(by="Quantity", ascending=False) + for df in [df_original, df_sorted]: + expected = DataFrame( + { + "Buyer": "Carl Joe Mark Carl Joe".split(), + "Quantity": [6, 8, 3, 4, 10], + "Date": [ + datetime(2013, 10, 1, 0, 0), + datetime(2013, 10, 1, 0, 0), + datetime(2013, 10, 1, 0, 0), + datetime(2013, 10, 2, 0, 0), + datetime(2013, 10, 2, 0, 0), + ], + } + ).set_index(["Date", "Buyer"]) + + result = df.groupby([Grouper(freq="1D"), "Buyer"]).sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + result = df.groupby([Grouper(freq="1M"), "Buyer"]).sum(numeric_only=True) + expected = DataFrame( + { + "Buyer": "Carl Joe Mark".split(), + "Quantity": [10, 18, 3], + "Date": [ + datetime(2013, 10, 31, 0, 0), + datetime(2013, 10, 31, 0, 0), + datetime(2013, 10, 31, 0, 0), + ], + } + ).set_index(["Date", "Buyer"]) + tm.assert_frame_equal(result, expected) + + # passing the name + df = df.reset_index() + result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum( + numeric_only=True + ) + tm.assert_frame_equal(result, expected) + + with pytest.raises(KeyError, match="'The grouper name foo is not found'"): + df.groupby([Grouper(freq="1M", key="foo"), "Buyer"]).sum() + + # passing the level + df = df.set_index("Date") + result = df.groupby([Grouper(freq="1M", level="Date"), "Buyer"]).sum( + numeric_only=True + ) + tm.assert_frame_equal(result, expected) + result = df.groupby([Grouper(freq="1M", level=0), "Buyer"]).sum( + numeric_only=True + ) + tm.assert_frame_equal(result, expected) + + with pytest.raises(ValueError, match="The level foo is not valid"): + df.groupby([Grouper(freq="1M", level="foo"), "Buyer"]).sum() + + # multi names + df = df.copy() + df["Date"] = df.index + offsets.MonthEnd(2) + result = df.groupby([Grouper(freq="1M", key="Date"), "Buyer"]).sum( + numeric_only=True + ) + expected = DataFrame( + { + "Buyer": "Carl Joe Mark".split(), + "Quantity": [10, 18, 3], + "Date": [ + datetime(2013, 11, 30, 0, 0), + datetime(2013, 11, 30, 0, 0), + datetime(2013, 11, 30, 0, 0), + ], + } + ).set_index(["Date", "Buyer"]) + tm.assert_frame_equal(result, expected) + + # error as we have both a level and a name! + msg = "The Grouper cannot specify both a key and a level!" + with pytest.raises(ValueError, match=msg): + df.groupby( + [Grouper(freq="1M", key="Date", level="Date"), "Buyer"] + ).sum() + + # single groupers + expected = DataFrame( + [[31]], + columns=["Quantity"], + index=DatetimeIndex( + [datetime(2013, 10, 31, 0, 0)], freq=offsets.MonthEnd(), name="Date" + ), + ) + result = df.groupby(Grouper(freq="1M")).sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + result = df.groupby([Grouper(freq="1M")]).sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + expected.index = expected.index.shift(1) + assert expected.index.freq == offsets.MonthEnd() + result = df.groupby(Grouper(freq="1M", key="Date")).sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + result = df.groupby([Grouper(freq="1M", key="Date")]).sum(numeric_only=True) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("freq", ["D", "M", "A", "Q-APR"]) + def test_timegrouper_with_reg_groups_freq(self, freq): + # GH 6764 multiple grouping with/without sort + df = DataFrame( + { + "date": pd.to_datetime( + [ + "20121002", + "20121007", + "20130130", + "20130202", + "20130305", + "20121002", + "20121207", + "20130130", + "20130202", + "20130305", + "20130202", + "20130305", + ] + ), + "user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5], + "whole_cost": [ + 1790, + 364, + 280, + 259, + 201, + 623, + 90, + 312, + 359, + 301, + 359, + 801, + ], + "cost1": [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12], + } + ).set_index("date") + + expected = ( + df.groupby("user_id")["whole_cost"] + .resample(freq) + .sum(min_count=1) # XXX + .dropna() + .reorder_levels(["date", "user_id"]) + .sort_index() + .astype("int64") + ) + expected.name = "whole_cost" + + result1 = ( + df.sort_index().groupby([Grouper(freq=freq), "user_id"])["whole_cost"].sum() + ) + tm.assert_series_equal(result1, expected) + + result2 = df.groupby([Grouper(freq=freq), "user_id"])["whole_cost"].sum() + tm.assert_series_equal(result2, expected) + + def test_timegrouper_get_group(self): + # GH 6914 + + df_original = DataFrame( + { + "Buyer": "Carl Joe Joe Carl Joe Carl".split(), + "Quantity": [18, 3, 5, 1, 9, 3], + "Date": [ + datetime(2013, 9, 1, 13, 0), + datetime(2013, 9, 1, 13, 5), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 3, 10, 0), + datetime(2013, 12, 2, 12, 0), + datetime(2013, 9, 2, 14, 0), + ], + } + ) + df_reordered = df_original.sort_values(by="Quantity") + + # single grouping + expected_list = [ + df_original.iloc[[0, 1, 5]], + df_original.iloc[[2, 3]], + df_original.iloc[[4]], + ] + dt_list = ["2013-09-30", "2013-10-31", "2013-12-31"] + + for df in [df_original, df_reordered]: + grouped = df.groupby(Grouper(freq="M", key="Date")) + for t, expected in zip(dt_list, expected_list): + dt = Timestamp(t) + result = grouped.get_group(dt) + tm.assert_frame_equal(result, expected) + + # multiple grouping + expected_list = [ + df_original.iloc[[1]], + df_original.iloc[[3]], + df_original.iloc[[4]], + ] + g_list = [("Joe", "2013-09-30"), ("Carl", "2013-10-31"), ("Joe", "2013-12-31")] + + for df in [df_original, df_reordered]: + grouped = df.groupby(["Buyer", Grouper(freq="M", key="Date")]) + for (b, t), expected in zip(g_list, expected_list): + dt = Timestamp(t) + result = grouped.get_group((b, dt)) + tm.assert_frame_equal(result, expected) + + # with index + df_original = df_original.set_index("Date") + df_reordered = df_original.sort_values(by="Quantity") + + expected_list = [ + df_original.iloc[[0, 1, 5]], + df_original.iloc[[2, 3]], + df_original.iloc[[4]], + ] + + for df in [df_original, df_reordered]: + grouped = df.groupby(Grouper(freq="M")) + for t, expected in zip(dt_list, expected_list): + dt = Timestamp(t) + result = grouped.get_group(dt) + tm.assert_frame_equal(result, expected) + + def test_timegrouper_apply_return_type_series(self): + # Using `apply` with the `TimeGrouper` should give the + # same return type as an `apply` with a `Grouper`. + # Issue #11742 + df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]}) + df_dt = df.copy() + df_dt["date"] = pd.to_datetime(df_dt["date"]) + + def sumfunc_series(x): + return Series([x["value"].sum()], ("sum",)) + + expected = df.groupby(Grouper(key="date")).apply(sumfunc_series) + result = df_dt.groupby(Grouper(freq="M", key="date")).apply(sumfunc_series) + tm.assert_frame_equal( + result.reset_index(drop=True), expected.reset_index(drop=True) + ) + + def test_timegrouper_apply_return_type_value(self): + # Using `apply` with the `TimeGrouper` should give the + # same return type as an `apply` with a `Grouper`. + # Issue #11742 + df = DataFrame({"date": ["10/10/2000", "11/10/2000"], "value": [10, 13]}) + df_dt = df.copy() + df_dt["date"] = pd.to_datetime(df_dt["date"]) + + def sumfunc_value(x): + return x.value.sum() + + expected = df.groupby(Grouper(key="date")).apply(sumfunc_value) + result = df_dt.groupby(Grouper(freq="M", key="date")).apply(sumfunc_value) + tm.assert_series_equal( + result.reset_index(drop=True), expected.reset_index(drop=True) + ) + + def test_groupby_groups_datetimeindex(self): + # GH#1430 + periods = 1000 + ind = date_range(start="2012/1/1", freq="5min", periods=periods) + df = DataFrame( + {"high": np.arange(periods), "low": np.arange(periods)}, index=ind + ) + grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day)) + + # it works! + groups = grouped.groups + assert isinstance(next(iter(groups.keys())), datetime) + + # GH#11442 + index = date_range("2015/01/01", periods=5, name="date") + df = DataFrame({"A": [5, 6, 7, 8, 9], "B": [1, 2, 3, 4, 5]}, index=index) + result = df.groupby(level="date").groups + dates = ["2015-01-05", "2015-01-04", "2015-01-03", "2015-01-02", "2015-01-01"] + expected = { + Timestamp(date): DatetimeIndex([date], name="date") for date in dates + } + tm.assert_dict_equal(result, expected) + + grouped = df.groupby(level="date") + for date in dates: + result = grouped.get_group(date) + data = [[df.loc[date, "A"], df.loc[date, "B"]]] + expected_index = DatetimeIndex([date], name="date", freq="D") + expected = DataFrame(data, columns=list("AB"), index=expected_index) + tm.assert_frame_equal(result, expected) + + def test_groupby_groups_datetimeindex_tz(self): + # GH 3950 + dates = [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ] + df = DataFrame( + { + "label": ["a", "a", "a", "b", "b", "b"], + "datetime": dates, + "value1": np.arange(6, dtype="int64"), + "value2": [1, 2] * 3, + } + ) + df["datetime"] = df["datetime"].apply(lambda d: Timestamp(d, tz="US/Pacific")) + + exp_idx1 = DatetimeIndex( + [ + "2011-07-19 07:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 09:00:00", + ], + tz="US/Pacific", + name="datetime", + ) + exp_idx2 = Index(["a", "b"] * 3, name="label") + exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2]) + expected = DataFrame( + {"value1": [0, 3, 1, 4, 2, 5], "value2": [1, 2, 2, 1, 1, 2]}, + index=exp_idx, + columns=["value1", "value2"], + ) + + result = df.groupby(["datetime", "label"]).sum() + tm.assert_frame_equal(result, expected) + + # by level + didx = DatetimeIndex(dates, tz="Asia/Tokyo") + df = DataFrame( + {"value1": np.arange(6, dtype="int64"), "value2": [1, 2, 3, 1, 2, 3]}, + index=didx, + ) + + exp_idx = DatetimeIndex( + ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], + tz="Asia/Tokyo", + ) + expected = DataFrame( + {"value1": [3, 5, 7], "value2": [2, 4, 6]}, + index=exp_idx, + columns=["value1", "value2"], + ) + + result = df.groupby(level=0).sum() + tm.assert_frame_equal(result, expected) + + def test_frame_datetime64_handling_groupby(self): + # it works! + df = DataFrame( + [(3, np.datetime64("2012-07-03")), (3, np.datetime64("2012-07-04"))], + columns=["a", "date"], + ) + result = df.groupby("a").first() + assert result["date"][3] == Timestamp("2012-07-03") + + def test_groupby_multi_timezone(self): + # combining multiple / different timezones yields UTC + + data = """0,2000-01-28 16:47:00,America/Chicago +1,2000-01-29 16:48:00,America/Chicago +2,2000-01-30 16:49:00,America/Los_Angeles +3,2000-01-31 16:50:00,America/Chicago +4,2000-01-01 16:50:00,America/New_York""" + + df = pd.read_csv(StringIO(data), header=None, names=["value", "date", "tz"]) + result = df.groupby("tz", group_keys=False).date.apply( + lambda x: pd.to_datetime(x).dt.tz_localize(x.name) + ) + + expected = Series( + [ + Timestamp("2000-01-28 16:47:00-0600", tz="America/Chicago"), + Timestamp("2000-01-29 16:48:00-0600", tz="America/Chicago"), + Timestamp("2000-01-30 16:49:00-0800", tz="America/Los_Angeles"), + Timestamp("2000-01-31 16:50:00-0600", tz="America/Chicago"), + Timestamp("2000-01-01 16:50:00-0500", tz="America/New_York"), + ], + name="date", + dtype=object, + ) + tm.assert_series_equal(result, expected) + + tz = "America/Chicago" + res_values = df.groupby("tz").date.get_group(tz) + result = pd.to_datetime(res_values).dt.tz_localize(tz) + exp_values = Series( + ["2000-01-28 16:47:00", "2000-01-29 16:48:00", "2000-01-31 16:50:00"], + index=[0, 1, 3], + name="date", + ) + expected = pd.to_datetime(exp_values).dt.tz_localize(tz) + tm.assert_series_equal(result, expected) + + def test_groupby_groups_periods(self): + dates = [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ] + df = DataFrame( + { + "label": ["a", "a", "a", "b", "b", "b"], + "period": [pd.Period(d, freq="H") for d in dates], + "value1": np.arange(6, dtype="int64"), + "value2": [1, 2] * 3, + } + ) + + exp_idx1 = pd.PeriodIndex( + [ + "2011-07-19 07:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 09:00:00", + ], + freq="H", + name="period", + ) + exp_idx2 = Index(["a", "b"] * 3, name="label") + exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2]) + expected = DataFrame( + {"value1": [0, 3, 1, 4, 2, 5], "value2": [1, 2, 2, 1, 1, 2]}, + index=exp_idx, + columns=["value1", "value2"], + ) + + result = df.groupby(["period", "label"]).sum() + tm.assert_frame_equal(result, expected) + + # by level + didx = pd.PeriodIndex(dates, freq="H") + df = DataFrame( + {"value1": np.arange(6, dtype="int64"), "value2": [1, 2, 3, 1, 2, 3]}, + index=didx, + ) + + exp_idx = pd.PeriodIndex( + ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], + freq="H", + ) + expected = DataFrame( + {"value1": [3, 5, 7], "value2": [2, 4, 6]}, + index=exp_idx, + columns=["value1", "value2"], + ) + + result = df.groupby(level=0).sum() + tm.assert_frame_equal(result, expected) + + def test_groupby_first_datetime64(self): + df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)]) + df[1] = df[1].view("M8[ns]") + + assert issubclass(df[1].dtype.type, np.datetime64) + + result = df.groupby(level=0).first() + got_dt = result[1].dtype + assert issubclass(got_dt.type, np.datetime64) + + result = df[1].groupby(level=0).first() + got_dt = result.dtype + assert issubclass(got_dt.type, np.datetime64) + + def test_groupby_max_datetime64(self): + # GH 5869 + # datetimelike dtype conversion from int + df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)}) + # TODO: can we retain second reso in .apply here? + expected = df.groupby("A")["A"].apply(lambda x: x.max()).astype("M8[s]") + result = df.groupby("A")["A"].max() + tm.assert_series_equal(result, expected) + + def test_groupby_datetime64_32_bit(self): + # GH 6410 / numpy 4328 + # 32-bit under 1.9-dev indexing issue + + df = DataFrame({"A": range(2), "B": [Timestamp("2000-01-1")] * 2}) + result = df.groupby("A")["B"].transform("min") + expected = Series([Timestamp("2000-01-1")] * 2, name="B") + tm.assert_series_equal(result, expected) + + def test_groupby_with_timezone_selection(self): + # GH 11616 + # Test that column selection returns output in correct timezone. + + df = DataFrame( + { + "factor": np.random.default_rng(2).integers(0, 3, size=60), + "time": date_range("01/01/2000 00:00", periods=60, freq="s", tz="UTC"), + } + ) + df1 = df.groupby("factor").max()["time"] + df2 = df.groupby("factor")["time"].max() + tm.assert_series_equal(df1, df2) + + def test_timezone_info(self): + # see gh-11682: Timezone info lost when broadcasting + # scalar datetime to DataFrame + + df = DataFrame({"a": [1], "b": [datetime.now(pytz.utc)]}) + assert df["b"][0].tzinfo == pytz.utc + df = DataFrame({"a": [1, 2, 3]}) + df["b"] = datetime.now(pytz.utc) + assert df["b"][0].tzinfo == pytz.utc + + def test_datetime_count(self): + df = DataFrame( + {"a": [1, 2, 3] * 2, "dates": date_range("now", periods=6, freq="T")} + ) + result = df.groupby("a").dates.count() + expected = Series([2, 2, 2], index=Index([1, 2, 3], name="a"), name="dates") + tm.assert_series_equal(result, expected) + + def test_first_last_max_min_on_time_data(self): + # GH 10295 + # Verify that NaT is not in the result of max, min, first and last on + # Dataframe with datetime or timedelta values. + df_test = DataFrame( + { + "dt": [ + np.nan, + "2015-07-24 10:10", + "2015-07-25 11:11", + "2015-07-23 12:12", + np.nan, + ], + "td": [ + np.nan, + timedelta(days=1), + timedelta(days=2), + timedelta(days=3), + np.nan, + ], + } + ) + df_test.dt = pd.to_datetime(df_test.dt) + df_test["group"] = "A" + df_ref = df_test[df_test.dt.notna()] + + grouped_test = df_test.groupby("group") + grouped_ref = df_ref.groupby("group") + + tm.assert_frame_equal(grouped_ref.max(), grouped_test.max()) + tm.assert_frame_equal(grouped_ref.min(), grouped_test.min()) + tm.assert_frame_equal(grouped_ref.first(), grouped_test.first()) + tm.assert_frame_equal(grouped_ref.last(), grouped_test.last()) + + def test_nunique_with_timegrouper_and_nat(self): + # GH 17575 + test = DataFrame( + { + "time": [ + Timestamp("2016-06-28 09:35:35"), + pd.NaT, + Timestamp("2016-06-28 16:46:28"), + ], + "data": ["1", "2", "3"], + } + ) + + grouper = Grouper(key="time", freq="h") + result = test.groupby(grouper)["data"].nunique() + expected = test[test.time.notnull()].groupby(grouper)["data"].nunique() + expected.index = expected.index._with_freq(None) + tm.assert_series_equal(result, expected) + + def test_scalar_call_versus_list_call(self): + # Issue: 17530 + data_frame = { + "location": ["shanghai", "beijing", "shanghai"], + "time": Series( + ["2017-08-09 13:32:23", "2017-08-11 23:23:15", "2017-08-11 22:23:15"], + dtype="datetime64[ns]", + ), + "value": [1, 2, 3], + } + data_frame = DataFrame(data_frame).set_index("time") + grouper = Grouper(freq="D") + + grouped = data_frame.groupby(grouper) + result = grouped.count() + grouped = data_frame.groupby([grouper]) + expected = grouped.count() + + tm.assert_frame_equal(result, expected) + + def test_grouper_period_index(self): + # GH 32108 + periods = 2 + index = pd.period_range( + start="2018-01", periods=periods, freq="M", name="Month" + ) + period_series = Series(range(periods), index=index) + result = period_series.groupby(period_series.index.month).sum() + + expected = Series( + range(0, periods), index=Index(range(1, periods + 1), name=index.name) + ) + tm.assert_series_equal(result, expected) + + def test_groupby_apply_timegrouper_with_nat_dict_returns( + self, groupby_with_truncated_bingrouper + ): + # GH#43500 case where gb.grouper.result_index and gb.grouper.group_keys_seq + # have different lengths that goes through the `isinstance(values[0], dict)` + # path + gb = groupby_with_truncated_bingrouper + + res = gb["Quantity"].apply(lambda x: {"foo": len(x)}) + + dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date") + mi = MultiIndex.from_arrays([dti, ["foo"] * len(dti)]) + expected = Series([3, 0, 0, 0, 0, 0, 2], index=mi, name="Quantity") + tm.assert_series_equal(res, expected) + + def test_groupby_apply_timegrouper_with_nat_scalar_returns( + self, groupby_with_truncated_bingrouper + ): + # GH#43500 Previously raised ValueError bc used index with incorrect + # length in wrap_applied_result + gb = groupby_with_truncated_bingrouper + + res = gb["Quantity"].apply(lambda x: x.iloc[0] if len(x) else np.nan) + + dti = date_range("2013-09-01", "2013-10-01", freq="5D", name="Date") + expected = Series( + [18, np.nan, np.nan, np.nan, np.nan, np.nan, 5], + index=dti._with_freq(None), + name="Quantity", + ) + + tm.assert_series_equal(res, expected) + + def test_groupby_apply_timegrouper_with_nat_apply_squeeze( + self, frame_for_truncated_bingrouper + ): + df = frame_for_truncated_bingrouper + + # We need to create a GroupBy object with only one non-NaT group, + # so use a huge freq so that all non-NaT dates will be grouped together + tdg = Grouper(key="Date", freq="100Y") + gb = df.groupby(tdg) + + # check that we will go through the singular_series path + # in _wrap_applied_output_series + assert gb.ngroups == 1 + assert gb._selected_obj._get_axis(gb.axis).nlevels == 1 + + # function that returns a Series + res = gb.apply(lambda x: x["Quantity"] * 2) + + expected = DataFrame( + [[36, 6, 6, 10, 2]], + index=Index([Timestamp("2013-12-31")], name="Date"), + columns=Index([0, 1, 5, 2, 3], name="Quantity"), + ) + tm.assert_frame_equal(res, expected) + + @pytest.mark.single_cpu + def test_groupby_agg_numba_timegrouper_with_nat( + self, groupby_with_truncated_bingrouper + ): + pytest.importorskip("numba") + + # See discussion in GH#43487 + gb = groupby_with_truncated_bingrouper + + result = gb["Quantity"].aggregate( + lambda values, index: np.nanmean(values), engine="numba" + ) + + expected = gb["Quantity"].aggregate("mean") + tm.assert_series_equal(result, expected) + + result_df = gb[["Quantity"]].aggregate( + lambda values, index: np.nanmean(values), engine="numba" + ) + expected_df = gb[["Quantity"]].aggregate("mean") + tm.assert_frame_equal(result_df, expected_df) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_value_counts.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_value_counts.py new file mode 100644 index 00000000..070bdda9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/test_value_counts.py @@ -0,0 +1,1175 @@ +""" +these are systematically testing all of the args to value_counts +with different size combinations. This is to ensure stability of the sorting +and proper parameter handling +""" + +from itertools import product + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + Grouper, + Index, + MultiIndex, + Series, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.util.version import Version + + +def tests_value_counts_index_names_category_column(): + # GH44324 Missing name of index category column + df = DataFrame( + { + "gender": ["female"], + "country": ["US"], + } + ) + df["gender"] = df["gender"].astype("category") + result = df.groupby("country")["gender"].value_counts() + + # Construct expected, very specific multiindex + df_mi_expected = DataFrame([["US", "female"]], columns=["country", "gender"]) + df_mi_expected["gender"] = df_mi_expected["gender"].astype("category") + mi_expected = MultiIndex.from_frame(df_mi_expected) + expected = Series([1], index=mi_expected, name="count") + + tm.assert_series_equal(result, expected) + + +# our starting frame +def seed_df(seed_nans, n, m): + days = date_range("2015-08-24", periods=10) + + frame = DataFrame( + { + "1st": np.random.default_rng(2).choice(list("abcd"), n), + "2nd": np.random.default_rng(2).choice(days, n), + "3rd": np.random.default_rng(2).integers(1, m + 1, n), + } + ) + + if seed_nans: + # Explicitly cast to float to avoid implicit cast when setting nan + frame["3rd"] = frame["3rd"].astype("float") + frame.loc[1::11, "1st"] = np.nan + frame.loc[3::17, "2nd"] = np.nan + frame.loc[7::19, "3rd"] = np.nan + frame.loc[8::19, "3rd"] = np.nan + frame.loc[9::19, "3rd"] = np.nan + + return frame + + +# create input df, keys, and the bins +binned = [] +ids = [] +for seed_nans in [True, False]: + for n, m in product((100, 1000), (5, 20)): + df = seed_df(seed_nans, n, m) + bins = None, np.arange(0, max(5, df["3rd"].max()) + 1, 2) + keys = "1st", "2nd", ["1st", "2nd"] + for k, b in product(keys, bins): + binned.append((df, k, b, n, m)) + ids.append(f"{k}-{n}-{m}") + + +@pytest.mark.slow +@pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids) +@pytest.mark.parametrize("isort", [True, False]) +@pytest.mark.parametrize("normalize, name", [(True, "proportion"), (False, "count")]) +@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("ascending", [True, False]) +@pytest.mark.parametrize("dropna", [True, False]) +def test_series_groupby_value_counts( + df, keys, bins, n, m, isort, normalize, name, sort, ascending, dropna +): + def rebuild_index(df): + arr = list(map(df.index.get_level_values, range(df.index.nlevels))) + df.index = MultiIndex.from_arrays(arr, names=df.index.names) + return df + + kwargs = { + "normalize": normalize, + "sort": sort, + "ascending": ascending, + "dropna": dropna, + "bins": bins, + } + + gr = df.groupby(keys, sort=isort) + left = gr["3rd"].value_counts(**kwargs) + + gr = df.groupby(keys, sort=isort) + right = gr["3rd"].apply(Series.value_counts, **kwargs) + right.index.names = right.index.names[:-1] + ["3rd"] + # https://github.com/pandas-dev/pandas/issues/49909 + right = right.rename(name) + + # have to sort on index because of unstable sort on values + left, right = map(rebuild_index, (left, right)) # xref GH9212 + tm.assert_series_equal(left.sort_index(), right.sort_index()) + + +@pytest.mark.parametrize("utc", [True, False]) +def test_series_groupby_value_counts_with_grouper(utc): + # GH28479 + df = DataFrame( + { + "Timestamp": [ + 1565083561, + 1565083561 + 86400, + 1565083561 + 86500, + 1565083561 + 86400 * 2, + 1565083561 + 86400 * 3, + 1565083561 + 86500 * 3, + 1565083561 + 86400 * 4, + ], + "Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"], + } + ).drop([3]) + + df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s") + dfg = df.groupby(Grouper(freq="1D", key="Datetime")) + + # have to sort on index because of unstable sort on values xref GH9212 + result = dfg["Food"].value_counts().sort_index() + expected = dfg["Food"].apply(Series.value_counts).sort_index() + expected.index.names = result.index.names + # https://github.com/pandas-dev/pandas/issues/49909 + expected = expected.rename("count") + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]]) +def test_series_groupby_value_counts_empty(columns): + # GH39172 + df = DataFrame(columns=columns) + dfg = df.groupby(columns[:-1]) + + result = dfg[columns[-1]].value_counts() + expected = Series([], dtype=result.dtype, name="count") + expected.index = MultiIndex.from_arrays([[]] * len(columns), names=columns) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]]) +def test_series_groupby_value_counts_one_row(columns): + # GH42618 + df = DataFrame(data=[range(len(columns))], columns=columns) + dfg = df.groupby(columns[:-1]) + + result = dfg[columns[-1]].value_counts() + expected = df.value_counts() + + tm.assert_series_equal(result, expected) + + +def test_series_groupby_value_counts_on_categorical(): + # GH38672 + + s = Series(Categorical(["a"], categories=["a", "b"])) + result = s.groupby([0]).value_counts() + + expected = Series( + data=[1, 0], + index=MultiIndex.from_arrays( + [ + np.array([0, 0]), + CategoricalIndex( + ["a", "b"], categories=["a", "b"], ordered=False, dtype="category" + ), + ] + ), + name="count", + ) + + # Expected: + # 0 a 1 + # b 0 + # dtype: int64 + + tm.assert_series_equal(result, expected) + + +def test_series_groupby_value_counts_no_sort(): + # GH#50482 + df = DataFrame( + { + "gender": ["male", "male", "female", "male", "female", "male"], + "education": ["low", "medium", "high", "low", "high", "low"], + "country": ["US", "FR", "US", "FR", "FR", "FR"], + } + ) + gb = df.groupby(["country", "gender"], sort=False)["education"] + result = gb.value_counts(sort=False) + index = MultiIndex( + levels=[["US", "FR"], ["male", "female"], ["low", "medium", "high"]], + codes=[[0, 1, 0, 1, 1], [0, 0, 1, 0, 1], [0, 1, 2, 0, 2]], + names=["country", "gender", "education"], + ) + expected = Series([1, 1, 1, 2, 1], index=index, name="count") + tm.assert_series_equal(result, expected) + + +@pytest.fixture +def education_df(): + return DataFrame( + { + "gender": ["male", "male", "female", "male", "female", "male"], + "education": ["low", "medium", "high", "low", "high", "low"], + "country": ["US", "FR", "US", "FR", "FR", "FR"], + } + ) + + +def test_axis(education_df): + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gp = education_df.groupby("country", axis=1) + with pytest.raises(NotImplementedError, match="axis"): + gp.value_counts() + + +def test_bad_subset(education_df): + gp = education_df.groupby("country") + with pytest.raises(ValueError, match="subset"): + gp.value_counts(subset=["country"]) + + +def test_basic(education_df, request): + # gh43564 + if Version(np.__version__) >= Version("1.25"): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + result = education_df.groupby("country")[["gender", "education"]].value_counts( + normalize=True + ) + expected = Series( + data=[0.5, 0.25, 0.25, 0.5, 0.5], + index=MultiIndex.from_tuples( + [ + ("FR", "male", "low"), + ("FR", "female", "high"), + ("FR", "male", "medium"), + ("US", "female", "high"), + ("US", "male", "low"), + ], + names=["country", "gender", "education"], + ), + name="proportion", + ) + tm.assert_series_equal(result, expected) + + +def _frame_value_counts(df, keys, normalize, sort, ascending): + return df[keys].value_counts(normalize=normalize, sort=sort, ascending=ascending) + + +@pytest.mark.parametrize("groupby", ["column", "array", "function"]) +@pytest.mark.parametrize("normalize, name", [(True, "proportion"), (False, "count")]) +@pytest.mark.parametrize( + "sort, ascending", + [ + (False, None), + (True, True), + (True, False), + ], +) +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize("frame", [True, False]) +def test_against_frame_and_seriesgroupby( + education_df, groupby, normalize, name, sort, ascending, as_index, frame, request +): + # test all parameters: + # - Use column, array or function as by= parameter + # - Whether or not to normalize + # - Whether or not to sort and how + # - Whether or not to use the groupby as an index + # - 3-way compare against: + # - apply with :meth:`~DataFrame.value_counts` + # - `~SeriesGroupBy.value_counts` + if Version(np.__version__) >= Version("1.25") and frame and sort and normalize: + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + by = { + "column": "country", + "array": education_df["country"].values, + "function": lambda x: education_df["country"][x] == "US", + }[groupby] + + gp = education_df.groupby(by=by, as_index=as_index) + result = gp[["gender", "education"]].value_counts( + normalize=normalize, sort=sort, ascending=ascending + ) + if frame: + # compare against apply with DataFrame value_counts + expected = gp.apply( + _frame_value_counts, ["gender", "education"], normalize, sort, ascending + ) + + if as_index: + tm.assert_series_equal(result, expected) + else: + name = "proportion" if normalize else "count" + expected = expected.reset_index().rename({0: name}, axis=1) + if groupby == "column": + expected = expected.rename({"level_0": "country"}, axis=1) + expected["country"] = np.where(expected["country"], "US", "FR") + elif groupby == "function": + expected["level_0"] = expected["level_0"] == 1 + else: + expected["level_0"] = np.where(expected["level_0"], "US", "FR") + tm.assert_frame_equal(result, expected) + else: + # compare against SeriesGroupBy value_counts + education_df["both"] = education_df["gender"] + "-" + education_df["education"] + expected = gp["both"].value_counts( + normalize=normalize, sort=sort, ascending=ascending + ) + expected.name = name + if as_index: + index_frame = expected.index.to_frame(index=False) + index_frame["gender"] = index_frame["both"].str.split("-").str.get(0) + index_frame["education"] = index_frame["both"].str.split("-").str.get(1) + del index_frame["both"] + index_frame = index_frame.rename({0: None}, axis=1) + expected.index = MultiIndex.from_frame(index_frame) + tm.assert_series_equal(result, expected) + else: + expected.insert(1, "gender", expected["both"].str.split("-").str.get(0)) + expected.insert(2, "education", expected["both"].str.split("-").str.get(1)) + del expected["both"] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [ + object, + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], +) +@pytest.mark.parametrize("normalize", [True, False]) +@pytest.mark.parametrize( + "sort, ascending, expected_rows, expected_count, expected_group_size", + [ + (False, None, [0, 1, 2, 3, 4], [1, 1, 1, 2, 1], [1, 3, 1, 3, 1]), + (True, False, [4, 3, 1, 2, 0], [1, 2, 1, 1, 1], [1, 3, 3, 1, 1]), + (True, True, [4, 1, 3, 2, 0], [1, 1, 2, 1, 1], [1, 3, 3, 1, 1]), + ], +) +def test_compound( + education_df, + normalize, + sort, + ascending, + expected_rows, + expected_count, + expected_group_size, + dtype, +): + education_df = education_df.astype(dtype) + education_df.columns = education_df.columns.astype(dtype) + # Multiple groupby keys and as_index=False + gp = education_df.groupby(["country", "gender"], as_index=False, sort=False) + result = gp["education"].value_counts( + normalize=normalize, sort=sort, ascending=ascending + ) + expected = DataFrame() + for column in ["country", "gender", "education"]: + expected[column] = [education_df[column][row] for row in expected_rows] + expected = expected.astype(dtype) + expected.columns = expected.columns.astype(dtype) + if normalize: + expected["proportion"] = expected_count + expected["proportion"] /= expected_group_size + if dtype == "string[pyarrow]": + expected["proportion"] = expected["proportion"].convert_dtypes() + else: + expected["count"] = expected_count + if dtype == "string[pyarrow]": + expected["count"] = expected["count"].convert_dtypes() + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def animals_df(): + return DataFrame( + {"key": [1, 1, 1, 1], "num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + +@pytest.mark.parametrize( + "sort, ascending, normalize, name, expected_data, expected_index", + [ + (False, None, False, "count", [1, 2, 1], [(1, 1, 1), (2, 4, 6), (2, 0, 0)]), + (True, True, False, "count", [1, 1, 2], [(1, 1, 1), (2, 6, 4), (2, 0, 0)]), + (True, False, False, "count", [2, 1, 1], [(1, 1, 1), (4, 2, 6), (0, 2, 0)]), + ( + True, + False, + True, + "proportion", + [0.5, 0.25, 0.25], + [(1, 1, 1), (4, 2, 6), (0, 2, 0)], + ), + ], +) +def test_data_frame_value_counts( + animals_df, sort, ascending, normalize, name, expected_data, expected_index +): + # 3-way compare with :meth:`~DataFrame.value_counts` + # Tests from frame/methods/test_value_counts.py + result_frame = animals_df.value_counts( + sort=sort, ascending=ascending, normalize=normalize + ) + expected = Series( + data=expected_data, + index=MultiIndex.from_arrays( + expected_index, names=["key", "num_legs", "num_wings"] + ), + name=name, + ) + tm.assert_series_equal(result_frame, expected) + + result_frame_groupby = animals_df.groupby("key").value_counts( + sort=sort, ascending=ascending, normalize=normalize + ) + + tm.assert_series_equal(result_frame_groupby, expected) + + +@pytest.fixture +def nulls_df(): + n = np.nan + return DataFrame( + { + "A": [1, 1, n, 4, n, 6, 6, 6, 6], + "B": [1, 1, 3, n, n, 6, 6, 6, 6], + "C": [1, 2, 3, 4, 5, 6, n, 8, n], + "D": [1, 2, 3, 4, 5, 6, 7, n, n], + } + ) + + +@pytest.mark.parametrize( + "group_dropna, count_dropna, expected_rows, expected_values", + [ + ( + False, + False, + [0, 1, 3, 5, 7, 6, 8, 2, 4], + [0.5, 0.5, 1.0, 0.25, 0.25, 0.25, 0.25, 1.0, 1.0], + ), + (False, True, [0, 1, 3, 5, 2, 4], [0.5, 0.5, 1.0, 1.0, 1.0, 1.0]), + (True, False, [0, 1, 5, 7, 6, 8], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25]), + (True, True, [0, 1, 5], [0.5, 0.5, 1.0]), + ], +) +def test_dropna_combinations( + nulls_df, group_dropna, count_dropna, expected_rows, expected_values, request +): + if Version(np.__version__) >= Version("1.25") and not group_dropna: + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + gp = nulls_df.groupby(["A", "B"], dropna=group_dropna) + result = gp.value_counts(normalize=True, sort=True, dropna=count_dropna) + columns = DataFrame() + for column in nulls_df.columns: + columns[column] = [nulls_df[column][row] for row in expected_rows] + index = MultiIndex.from_frame(columns) + expected = Series(data=expected_values, index=index, name="proportion") + tm.assert_series_equal(result, expected) + + +@pytest.fixture +def names_with_nulls_df(nulls_fixture): + return DataFrame( + { + "key": [1, 1, 1, 1], + "first_name": ["John", "Anne", "John", "Beth"], + "middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"], + }, + ) + + +@pytest.mark.parametrize( + "dropna, expected_data, expected_index", + [ + ( + True, + [1, 1], + MultiIndex.from_arrays( + [(1, 1), ("Beth", "John"), ("Louise", "Smith")], + names=["key", "first_name", "middle_name"], + ), + ), + ( + False, + [1, 1, 1, 1], + MultiIndex( + levels=[ + Index([1]), + Index(["Anne", "Beth", "John"]), + Index(["Louise", "Smith", np.nan]), + ], + codes=[[0, 0, 0, 0], [0, 1, 2, 2], [2, 0, 1, 2]], + names=["key", "first_name", "middle_name"], + ), + ), + ], +) +@pytest.mark.parametrize("normalize, name", [(False, "count"), (True, "proportion")]) +def test_data_frame_value_counts_dropna( + names_with_nulls_df, dropna, normalize, name, expected_data, expected_index +): + # GH 41334 + # 3-way compare with :meth:`~DataFrame.value_counts` + # Tests with nulls from frame/methods/test_value_counts.py + result_frame = names_with_nulls_df.value_counts(dropna=dropna, normalize=normalize) + expected = Series( + data=expected_data, + index=expected_index, + name=name, + ) + if normalize: + expected /= float(len(expected_data)) + + tm.assert_series_equal(result_frame, expected) + + result_frame_groupby = names_with_nulls_df.groupby("key").value_counts( + dropna=dropna, normalize=normalize + ) + + tm.assert_series_equal(result_frame_groupby, expected) + + +@pytest.mark.parametrize("as_index", [False, True]) +@pytest.mark.parametrize("observed", [False, True]) +@pytest.mark.parametrize( + "normalize, name, expected_data", + [ + ( + False, + "count", + np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64), + ), + ( + True, + "proportion", + np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]), + ), + ], +) +def test_categorical_single_grouper_with_only_observed_categories( + education_df, as_index, observed, normalize, name, expected_data, request +): + # Test single categorical grouper with only observed grouping categories + # when non-groupers are also categorical + if Version(np.__version__) >= Version("1.25"): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + + gp = education_df.astype("category").groupby( + "country", as_index=as_index, observed=observed + ) + result = gp.value_counts(normalize=normalize) + + expected_index = MultiIndex.from_tuples( + [ + ("FR", "male", "low"), + ("FR", "female", "high"), + ("FR", "male", "medium"), + ("FR", "female", "low"), + ("FR", "female", "medium"), + ("FR", "male", "high"), + ("US", "female", "high"), + ("US", "male", "low"), + ("US", "female", "low"), + ("US", "female", "medium"), + ("US", "male", "high"), + ("US", "male", "medium"), + ], + names=["country", "gender", "education"], + ) + + expected_series = Series( + data=expected_data, + index=expected_index, + name=name, + ) + for i in range(3): + expected_series.index = expected_series.index.set_levels( + CategoricalIndex(expected_series.index.levels[i]), level=i + ) + + if as_index: + tm.assert_series_equal(result, expected_series) + else: + expected = expected_series.reset_index( + name="proportion" if normalize else "count" + ) + tm.assert_frame_equal(result, expected) + + +def assert_categorical_single_grouper( + education_df, as_index, observed, expected_index, normalize, name, expected_data +): + # Test single categorical grouper when non-groupers are also categorical + education_df = education_df.copy().astype("category") + + # Add non-observed grouping categories + education_df["country"] = education_df["country"].cat.add_categories(["ASIA"]) + + gp = education_df.groupby("country", as_index=as_index, observed=observed) + result = gp.value_counts(normalize=normalize) + + expected_series = Series( + data=expected_data, + index=MultiIndex.from_tuples( + expected_index, + names=["country", "gender", "education"], + ), + name=name, + ) + for i in range(3): + index_level = CategoricalIndex(expected_series.index.levels[i]) + if i == 0: + index_level = index_level.set_categories( + education_df["country"].cat.categories + ) + expected_series.index = expected_series.index.set_levels(index_level, level=i) + + if as_index: + tm.assert_series_equal(result, expected_series) + else: + expected = expected_series.reset_index(name=name) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize( + "normalize, name, expected_data", + [ + ( + False, + "count", + np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64), + ), + ( + True, + "proportion", + np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]), + ), + ], +) +def test_categorical_single_grouper_observed_true( + education_df, as_index, normalize, name, expected_data, request +): + # GH#46357 + + if Version(np.__version__) >= Version("1.25"): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + + expected_index = [ + ("FR", "male", "low"), + ("FR", "female", "high"), + ("FR", "male", "medium"), + ("FR", "female", "low"), + ("FR", "female", "medium"), + ("FR", "male", "high"), + ("US", "female", "high"), + ("US", "male", "low"), + ("US", "female", "low"), + ("US", "female", "medium"), + ("US", "male", "high"), + ("US", "male", "medium"), + ] + + assert_categorical_single_grouper( + education_df=education_df, + as_index=as_index, + observed=True, + expected_index=expected_index, + normalize=normalize, + name=name, + expected_data=expected_data, + ) + + +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize( + "normalize, name, expected_data", + [ + ( + False, + "count", + np.array( + [2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int64 + ), + ), + ( + True, + "proportion", + np.array( + [ + 0.5, + 0.25, + 0.25, + 0.0, + 0.0, + 0.0, + 0.5, + 0.5, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] + ), + ), + ], +) +def test_categorical_single_grouper_observed_false( + education_df, as_index, normalize, name, expected_data, request +): + # GH#46357 + + if Version(np.__version__) >= Version("1.25"): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + + expected_index = [ + ("FR", "male", "low"), + ("FR", "female", "high"), + ("FR", "male", "medium"), + ("FR", "female", "low"), + ("FR", "male", "high"), + ("FR", "female", "medium"), + ("US", "female", "high"), + ("US", "male", "low"), + ("US", "male", "medium"), + ("US", "male", "high"), + ("US", "female", "medium"), + ("US", "female", "low"), + ("ASIA", "male", "low"), + ("ASIA", "male", "high"), + ("ASIA", "female", "medium"), + ("ASIA", "female", "low"), + ("ASIA", "female", "high"), + ("ASIA", "male", "medium"), + ] + + assert_categorical_single_grouper( + education_df=education_df, + as_index=as_index, + observed=False, + expected_index=expected_index, + normalize=normalize, + name=name, + expected_data=expected_data, + ) + + +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize( + "observed, expected_index", + [ + ( + False, + [ + ("FR", "high", "female"), + ("FR", "high", "male"), + ("FR", "low", "male"), + ("FR", "low", "female"), + ("FR", "medium", "male"), + ("FR", "medium", "female"), + ("US", "high", "female"), + ("US", "high", "male"), + ("US", "low", "male"), + ("US", "low", "female"), + ("US", "medium", "female"), + ("US", "medium", "male"), + ], + ), + ( + True, + [ + ("FR", "high", "female"), + ("FR", "low", "male"), + ("FR", "medium", "male"), + ("US", "high", "female"), + ("US", "low", "male"), + ], + ), + ], +) +@pytest.mark.parametrize( + "normalize, name, expected_data", + [ + ( + False, + "count", + np.array([1, 0, 2, 0, 1, 0, 1, 0, 1, 0, 0, 0], dtype=np.int64), + ), + ( + True, + "proportion", + # NaN values corresponds to non-observed groups + np.array([1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]), + ), + ], +) +def test_categorical_multiple_groupers( + education_df, as_index, observed, expected_index, normalize, name, expected_data +): + # GH#46357 + + # Test multiple categorical groupers when non-groupers are non-categorical + education_df = education_df.copy() + education_df["country"] = education_df["country"].astype("category") + education_df["education"] = education_df["education"].astype("category") + + gp = education_df.groupby( + ["country", "education"], as_index=as_index, observed=observed + ) + result = gp.value_counts(normalize=normalize) + + expected_series = Series( + data=expected_data[expected_data > 0.0] if observed else expected_data, + index=MultiIndex.from_tuples( + expected_index, + names=["country", "education", "gender"], + ), + name=name, + ) + for i in range(2): + expected_series.index = expected_series.index.set_levels( + CategoricalIndex(expected_series.index.levels[i]), level=i + ) + + if as_index: + tm.assert_series_equal(result, expected_series) + else: + expected = expected_series.reset_index( + name="proportion" if normalize else "count" + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("as_index", [False, True]) +@pytest.mark.parametrize("observed", [False, True]) +@pytest.mark.parametrize( + "normalize, name, expected_data", + [ + ( + False, + "count", + np.array([2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0], dtype=np.int64), + ), + ( + True, + "proportion", + # NaN values corresponds to non-observed groups + np.array([0.5, 0.25, 0.25, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]), + ), + ], +) +def test_categorical_non_groupers( + education_df, as_index, observed, normalize, name, expected_data, request +): + # GH#46357 Test non-observed categories are included in the result, + # regardless of `observed` + + if Version(np.__version__) >= Version("1.25"): + request.node.add_marker( + pytest.mark.xfail( + reason=( + "pandas default unstable sorting of duplicates" + "issue with numpy>=1.25 with AVX instructions" + ), + strict=False, + ) + ) + + education_df = education_df.copy() + education_df["gender"] = education_df["gender"].astype("category") + education_df["education"] = education_df["education"].astype("category") + + gp = education_df.groupby("country", as_index=as_index, observed=observed) + result = gp.value_counts(normalize=normalize) + + expected_index = [ + ("FR", "male", "low"), + ("FR", "female", "high"), + ("FR", "male", "medium"), + ("FR", "female", "low"), + ("FR", "female", "medium"), + ("FR", "male", "high"), + ("US", "female", "high"), + ("US", "male", "low"), + ("US", "female", "low"), + ("US", "female", "medium"), + ("US", "male", "high"), + ("US", "male", "medium"), + ] + expected_series = Series( + data=expected_data, + index=MultiIndex.from_tuples( + expected_index, + names=["country", "gender", "education"], + ), + name=name, + ) + for i in range(1, 3): + expected_series.index = expected_series.index.set_levels( + CategoricalIndex(expected_series.index.levels[i]), level=i + ) + + if as_index: + tm.assert_series_equal(result, expected_series) + else: + expected = expected_series.reset_index( + name="proportion" if normalize else "count" + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "normalize, expected_label, expected_values", + [ + (False, "count", [1, 1, 1]), + (True, "proportion", [0.5, 0.5, 1.0]), + ], +) +def test_mixed_groupings(normalize, expected_label, expected_values): + # Test multiple groupings + df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) + gp = df.groupby([[4, 5, 4], "A", lambda i: 7 if i == 1 else 8], as_index=False) + result = gp.value_counts(sort=True, normalize=normalize) + expected = DataFrame( + { + "level_0": np.array([4, 4, 5], dtype=int), + "A": [1, 1, 2], + "level_2": [8, 8, 7], + "B": [1, 3, 2], + expected_label: expected_values, + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "test, columns, expected_names", + [ + ("repeat", list("abbde"), ["a", None, "d", "b", "b", "e"]), + ("level", list("abcd") + ["level_1"], ["a", None, "d", "b", "c", "level_1"]), + ], +) +@pytest.mark.parametrize("as_index", [False, True]) +def test_column_label_duplicates(test, columns, expected_names, as_index): + # GH 44992 + # Test for duplicate input column labels and generated duplicate labels + df = DataFrame([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]], columns=columns) + expected_data = [(1, 0, 7, 3, 5, 9), (2, 1, 8, 4, 6, 10)] + keys = ["a", np.array([0, 1], dtype=np.int64), "d"] + result = df.groupby(keys, as_index=as_index).value_counts() + if as_index: + expected = Series( + data=(1, 1), + index=MultiIndex.from_tuples( + expected_data, + names=expected_names, + ), + name="count", + ) + tm.assert_series_equal(result, expected) + else: + expected_data = [list(row) + [1] for row in expected_data] + expected_columns = list(expected_names) + expected_columns[1] = "level_1" + expected_columns.append("count") + expected = DataFrame(expected_data, columns=expected_columns) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "normalize, expected_label", + [ + (False, "count"), + (True, "proportion"), + ], +) +def test_result_label_duplicates(normalize, expected_label): + # Test for result column label duplicating an input column label + gb = DataFrame([[1, 2, 3]], columns=["a", "b", expected_label]).groupby( + "a", as_index=False + ) + msg = f"Column label '{expected_label}' is duplicate of result column" + with pytest.raises(ValueError, match=msg): + gb.value_counts(normalize=normalize) + + +def test_ambiguous_grouping(): + # Test that groupby is not confused by groupings length equal to row count + df = DataFrame({"a": [1, 1]}) + gb = df.groupby(np.array([1, 1], dtype=np.int64)) + result = gb.value_counts() + expected = Series( + [2], index=MultiIndex.from_tuples([[1, 1]], names=[None, "a"]), name="count" + ) + tm.assert_series_equal(result, expected) + + +def test_subset_overlaps_gb_key_raises(): + # GH 46383 + df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1]) + msg = "Keys {'c1'} in subset cannot be in the groupby column keys." + with pytest.raises(ValueError, match=msg): + df.groupby("c1").value_counts(subset=["c1"]) + + +def test_subset_doesnt_exist_in_frame(): + # GH 46383 + df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1]) + msg = "Keys {'c3'} in subset do not exist in the DataFrame." + with pytest.raises(ValueError, match=msg): + df.groupby("c1").value_counts(subset=["c3"]) + + +def test_subset(): + # GH 46383 + df = DataFrame({"c1": ["a", "b", "c"], "c2": ["x", "y", "y"]}, index=[0, 1, 1]) + result = df.groupby(level=0).value_counts(subset=["c2"]) + expected = Series( + [1, 2], + index=MultiIndex.from_arrays([[0, 1], ["x", "y"]], names=[None, "c2"]), + name="count", + ) + tm.assert_series_equal(result, expected) + + +def test_subset_duplicate_columns(): + # GH 46383 + df = DataFrame( + [["a", "x", "x"], ["b", "y", "y"], ["b", "y", "y"]], + index=[0, 1, 1], + columns=["c1", "c2", "c2"], + ) + result = df.groupby(level=0).value_counts(subset=["c2"]) + expected = Series( + [1, 2], + index=MultiIndex.from_arrays( + [[0, 1], ["x", "y"], ["x", "y"]], names=[None, "c2", "c2"] + ), + name="count", + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("utc", [True, False]) +def test_value_counts_time_grouper(utc): + # GH#50486 + df = DataFrame( + { + "Timestamp": [ + 1565083561, + 1565083561 + 86400, + 1565083561 + 86500, + 1565083561 + 86400 * 2, + 1565083561 + 86400 * 3, + 1565083561 + 86500 * 3, + 1565083561 + 86400 * 4, + ], + "Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"], + } + ).drop([3]) + + df["Datetime"] = to_datetime(df["Timestamp"], utc=utc, unit="s") + gb = df.groupby(Grouper(freq="1D", key="Datetime")) + result = gb.value_counts() + dates = to_datetime( + ["2019-08-06", "2019-08-07", "2019-08-09", "2019-08-10"], utc=utc + ) + timestamps = df["Timestamp"].unique() + index = MultiIndex( + levels=[dates, timestamps, ["apple", "banana", "orange", "pear"]], + codes=[[0, 1, 1, 2, 2, 3], range(6), [0, 0, 1, 2, 2, 3]], + names=["Datetime", "Timestamp", "Food"], + ) + expected = Series(1, index=index, name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_integer_columns(): + # GH#55627 + df = DataFrame({1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"]}) + gp = df.groupby([1, 2], as_index=False, sort=False) + result = gp[3].value_counts() + expected = DataFrame( + {1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"], "count": 1} + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_numba.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_numba.py new file mode 100644 index 00000000..61fcc930 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_numba.py @@ -0,0 +1,284 @@ +import numpy as np +import pytest + +from pandas.errors import NumbaUtilError + +from pandas import ( + DataFrame, + Series, + option_context, +) +import pandas._testing as tm + +pytestmark = pytest.mark.single_cpu + + +def test_correct_function_signature(): + pytest.importorskip("numba") + + def incorrect_function(x): + return x + 1 + + data = DataFrame( + {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=["key", "data"], + ) + with pytest.raises(NumbaUtilError, match="The first 2"): + data.groupby("key").transform(incorrect_function, engine="numba") + + with pytest.raises(NumbaUtilError, match="The first 2"): + data.groupby("key")["data"].transform(incorrect_function, engine="numba") + + +def test_check_nopython_kwargs(): + pytest.importorskip("numba") + + def incorrect_function(values, index): + return values + 1 + + data = DataFrame( + {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, + columns=["key", "data"], + ) + with pytest.raises(NumbaUtilError, match="numba does not support"): + data.groupby("key").transform(incorrect_function, engine="numba", a=1) + + with pytest.raises(NumbaUtilError, match="numba does not support"): + data.groupby("key")["data"].transform(incorrect_function, engine="numba", a=1) + + +@pytest.mark.filterwarnings("ignore") +# Filter warnings when parallel=True and the function can't be parallelized by Numba +@pytest.mark.parametrize("jit", [True, False]) +@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"]) +@pytest.mark.parametrize("as_index", [True, False]) +def test_numba_vs_cython(jit, pandas_obj, nogil, parallel, nopython, as_index): + pytest.importorskip("numba") + + def func(values, index): + return values + 1 + + if jit: + # Test accepted jitted functions + import numba + + func = numba.jit(func) + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + grouped = data.groupby(0, as_index=as_index) + if pandas_obj == "Series": + grouped = grouped[1] + + result = grouped.transform(func, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.transform(lambda x: x + 1, engine="cython") + + tm.assert_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore") +# Filter warnings when parallel=True and the function can't be parallelized by Numba +@pytest.mark.parametrize("jit", [True, False]) +@pytest.mark.parametrize("pandas_obj", ["Series", "DataFrame"]) +def test_cache(jit, pandas_obj, nogil, parallel, nopython): + # Test that the functions are cached correctly if we switch functions + pytest.importorskip("numba") + + def func_1(values, index): + return values + 1 + + def func_2(values, index): + return values * 5 + + if jit: + import numba + + func_1 = numba.jit(func_1) + func_2 = numba.jit(func_2) + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + grouped = data.groupby(0) + if pandas_obj == "Series": + grouped = grouped[1] + + result = grouped.transform(func_1, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.transform(lambda x: x + 1, engine="cython") + tm.assert_equal(result, expected) + + result = grouped.transform(func_2, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.transform(lambda x: x * 5, engine="cython") + tm.assert_equal(result, expected) + + # Retest func_1 which should use the cache + result = grouped.transform(func_1, engine="numba", engine_kwargs=engine_kwargs) + expected = grouped.transform(lambda x: x + 1, engine="cython") + tm.assert_equal(result, expected) + + +def test_use_global_config(): + pytest.importorskip("numba") + + def func_1(values, index): + return values + 1 + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + grouped = data.groupby(0) + expected = grouped.transform(func_1, engine="numba") + with option_context("compute.use_numba", True): + result = grouped.transform(func_1, engine=None) + tm.assert_frame_equal(expected, result) + + +# TODO: Test more than just reductions (e.g. actually test transformations once we have +@pytest.mark.parametrize( + "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}] +) +def test_string_cython_vs_numba(agg_func, numba_supported_reductions): + pytest.importorskip("numba") + agg_func, kwargs = numba_supported_reductions + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + grouped = data.groupby(0) + + result = grouped.transform(agg_func, engine="numba", **kwargs) + expected = grouped.transform(agg_func, engine="cython", **kwargs) + tm.assert_frame_equal(result, expected) + + result = grouped[1].transform(agg_func, engine="numba", **kwargs) + expected = grouped[1].transform(agg_func, engine="cython", **kwargs) + tm.assert_series_equal(result, expected) + + +def test_args_not_cached(): + # GH 41647 + pytest.importorskip("numba") + + def sum_last(values, index, n): + return values[-n:].sum() + + df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]}) + grouped_x = df.groupby("id")["x"] + result = grouped_x.transform(sum_last, 1, engine="numba") + expected = Series([1.0] * 4, name="x") + tm.assert_series_equal(result, expected) + + result = grouped_x.transform(sum_last, 2, engine="numba") + expected = Series([2.0] * 4, name="x") + tm.assert_series_equal(result, expected) + + +def test_index_data_correctly_passed(): + # GH 43133 + pytest.importorskip("numba") + + def f(values, index): + return index - 1 + + df = DataFrame({"group": ["A", "A", "B"], "v": [4, 5, 6]}, index=[-1, -2, -3]) + result = df.groupby("group").transform(f, engine="numba") + expected = DataFrame([-4.0, -3.0, -2.0], columns=["v"], index=[-1, -2, -3]) + tm.assert_frame_equal(result, expected) + + +def test_engine_kwargs_not_cached(): + # If the user passes a different set of engine_kwargs don't return the same + # jitted function + pytest.importorskip("numba") + nogil = True + parallel = False + nopython = True + + def func_kwargs(values, index): + return nogil + parallel + nopython + + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + df = DataFrame({"value": [0, 0, 0]}) + result = df.groupby(level=0).transform( + func_kwargs, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame({"value": [2.0, 2.0, 2.0]}) + tm.assert_frame_equal(result, expected) + + nogil = False + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + result = df.groupby(level=0).transform( + func_kwargs, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame({"value": [1.0, 1.0, 1.0]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore") +def test_multiindex_one_key(nogil, parallel, nopython): + pytest.importorskip("numba") + + def numba_func(values, index): + return 1 + + df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"]) + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + result = df.groupby("A").transform( + numba_func, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame([{"A": 1, "B": 2, "C": 1.0}]).set_index(["A", "B"]) + tm.assert_frame_equal(result, expected) + + +def test_multiindex_multi_key_not_supported(nogil, parallel, nopython): + pytest.importorskip("numba") + + def numba_func(values, index): + return 1 + + df = DataFrame([{"A": 1, "B": 2, "C": 3}]).set_index(["A", "B"]) + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + with pytest.raises(NotImplementedError, match="more than 1 grouping labels"): + df.groupby(["A", "B"]).transform( + numba_func, engine="numba", engine_kwargs=engine_kwargs + ) + + +def test_multilabel_numba_vs_cython(numba_supported_reductions): + pytest.importorskip("numba") + reduction, kwargs = numba_supported_reductions + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + gb = df.groupby(["A", "B"]) + res_agg = gb.transform(reduction, engine="numba", **kwargs) + expected_agg = gb.transform(reduction, engine="cython", **kwargs) + tm.assert_frame_equal(res_agg, expected_agg) + + +def test_multilabel_udf_numba_vs_cython(): + pytest.importorskip("numba") + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + gb = df.groupby(["A", "B"]) + result = gb.transform( + lambda values, index: (values - values.min()) / (values.max() - values.min()), + engine="numba", + ) + expected = gb.transform( + lambda x: (x - x.min()) / (x.max() - x.min()), engine="cython" + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_transform.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_transform.py new file mode 100644 index 00000000..d51c618b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/groupby/transform/test_transform.py @@ -0,0 +1,1631 @@ +""" test with the .transform """ +from io import StringIO + +import numpy as np +import pytest + +from pandas._libs import lib + +from pandas.core.dtypes.common import ensure_platform_int + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + MultiIndex, + Series, + Timestamp, + concat, + date_range, +) +import pandas._testing as tm +from pandas.tests.groupby import get_groupby_method_args + + +def assert_fp_equal(a, b): + assert (np.abs(a - b) < 1e-12).all() + + +def test_transform(): + data = Series(np.arange(9) // 3, index=np.arange(9)) + + index = np.arange(9) + np.random.default_rng(2).shuffle(index) + data = data.reindex(index) + + grouped = data.groupby(lambda x: x // 3) + + transformed = grouped.transform(lambda x: x * x.sum()) + assert transformed[7] == 12 + + # GH 8046 + # make sure that we preserve the input order + + df = DataFrame( + np.arange(6, dtype="int64").reshape(3, 2), columns=["a", "b"], index=[0, 2, 1] + ) + key = [0, 0, 1] + expected = ( + df.sort_index() + .groupby(key) + .transform(lambda x: x - x.mean()) + .groupby(key) + .mean() + ) + result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(key).mean() + tm.assert_frame_equal(result, expected) + + def demean(arr): + return arr - arr.mean(axis=0) + + people = DataFrame( + np.random.default_rng(2).standard_normal((5, 5)), + columns=["a", "b", "c", "d", "e"], + index=["Joe", "Steve", "Wes", "Jim", "Travis"], + ) + key = ["one", "two", "one", "two", "one"] + result = people.groupby(key).transform(demean).groupby(key).mean() + expected = people.groupby(key, group_keys=False).apply(demean).groupby(key).mean() + tm.assert_frame_equal(result, expected) + + # GH 8430 + df = tm.makeTimeDataFrame() + g = df.groupby(pd.Grouper(freq="M")) + g.transform(lambda x: x - 1) + + # GH 9700 + df = DataFrame({"a": range(5, 10), "b": range(5)}) + msg = "using DataFrameGroupBy.max" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby("a").transform(max) + expected = DataFrame({"b": range(5)}) + tm.assert_frame_equal(result, expected) + + +def test_transform_fast(): + df = DataFrame( + { + "id": np.arange(100000) / 3, + "val": np.random.default_rng(2).standard_normal(100000), + } + ) + + grp = df.groupby("id")["val"] + + values = np.repeat(grp.mean().values, ensure_platform_int(grp.count().values)) + expected = Series(values, index=df.index, name="val") + + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grp.transform(np.mean) + tm.assert_series_equal(result, expected) + + result = grp.transform("mean") + tm.assert_series_equal(result, expected) + + # GH 12737 + df = DataFrame( + { + "grouping": [0, 1, 1, 3], + "f": [1.1, 2.1, 3.1, 4.5], + "d": date_range("2014-1-1", "2014-1-4"), + "i": [1, 2, 3, 4], + }, + columns=["grouping", "f", "i", "d"], + ) + result = df.groupby("grouping").transform("first") + + dates = [ + Timestamp("2014-1-1"), + Timestamp("2014-1-2"), + Timestamp("2014-1-2"), + Timestamp("2014-1-4"), + ] + expected = DataFrame( + {"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]}, + columns=["f", "i", "d"], + ) + tm.assert_frame_equal(result, expected) + + # selection + result = df.groupby("grouping")[["f", "i"]].transform("first") + expected = expected[["f", "i"]] + tm.assert_frame_equal(result, expected) + + # dup columns + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["g", "a", "a"]) + result = df.groupby("g").transform("first") + expected = df.drop("g", axis=1) + tm.assert_frame_equal(result, expected) + + +def test_transform_broadcast(tsframe, ts): + grouped = ts.groupby(lambda x: x.month) + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.transform(np.mean) + + tm.assert_index_equal(result.index, ts.index) + for _, gp in grouped: + assert_fp_equal(result.reindex(gp.index), gp.mean()) + + grouped = tsframe.groupby(lambda x: x.month) + msg = "using DataFrameGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.transform(np.mean) + tm.assert_index_equal(result.index, tsframe.index) + for _, gp in grouped: + agged = gp.mean(axis=0) + res = result.reindex(gp.index) + for col in tsframe: + assert_fp_equal(res[col], agged[col]) + + # group columns + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1) + msg = "using DataFrameGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = grouped.transform(np.mean) + tm.assert_index_equal(result.index, tsframe.index) + tm.assert_index_equal(result.columns, tsframe.columns) + for _, gp in grouped: + agged = gp.mean(1) + res = result.reindex(columns=gp.columns) + for idx in gp.index: + assert_fp_equal(res.xs(idx), agged[idx]) + + +def test_transform_axis_1(request, transformation_func): + # GH 36308 + + df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"]) + args = get_groupby_method_args(transformation_func, df) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby([0, 0, 1], axis=1) + result = gb.transform(transformation_func, *args) + expected = df.T.groupby([0, 0, 1]).transform(transformation_func, *args).T + + if transformation_func in ["diff", "shift"]: + # Result contains nans, so transpose coerces to float + expected["b"] = expected["b"].astype("int64") + + # cumcount returns Series; the rest are DataFrame + tm.assert_equal(result, expected) + + +def test_transform_axis_1_reducer(request, reduction_func): + # GH#45715 + if reduction_func in ( + "corrwith", + "ngroup", + "nth", + ): + marker = pytest.mark.xfail(reason="transform incorrectly fails - GH#45986") + request.node.add_marker(marker) + + df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"]) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby([0, 0, 1], axis=1) + + result = gb.transform(reduction_func) + expected = df.T.groupby([0, 0, 1]).transform(reduction_func).T + tm.assert_equal(result, expected) + + +def test_transform_axis_ts(tsframe): + # make sure that we are setting the axes + # correctly when on axis=0 or 1 + # in the presence of a non-monotonic indexer + # GH12713 + + base = tsframe.iloc[0:5] + r = len(base.index) + c = len(base.columns) + tso = DataFrame( + np.random.default_rng(2).standard_normal((r, c)), + index=base.index, + columns=base.columns, + dtype="float64", + ) + # monotonic + ts = tso + grouped = ts.groupby(lambda x: x.weekday(), group_keys=False) + result = ts - grouped.transform("mean") + expected = grouped.apply(lambda x: x - x.mean(axis=0)) + tm.assert_frame_equal(result, expected) + + ts = ts.T + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = ts.groupby(lambda x: x.weekday(), axis=1, group_keys=False) + result = ts - grouped.transform("mean") + expected = grouped.apply(lambda x: (x.T - x.mean(1)).T) + tm.assert_frame_equal(result, expected) + + # non-monotonic + ts = tso.iloc[[1, 0] + list(range(2, len(base)))] + grouped = ts.groupby(lambda x: x.weekday(), group_keys=False) + result = ts - grouped.transform("mean") + expected = grouped.apply(lambda x: x - x.mean(axis=0)) + tm.assert_frame_equal(result, expected) + + ts = ts.T + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = ts.groupby(lambda x: x.weekday(), axis=1, group_keys=False) + result = ts - grouped.transform("mean") + expected = grouped.apply(lambda x: (x.T - x.mean(1)).T) + tm.assert_frame_equal(result, expected) + + +def test_transform_dtype(): + # GH 9807 + # Check transform dtype output is preserved + df = DataFrame([[1, 3], [2, 3]]) + result = df.groupby(1).transform("mean") + expected = DataFrame([[1.5], [1.5]]) + tm.assert_frame_equal(result, expected) + + +def test_transform_bug(): + # GH 5712 + # transforming on a datetime column + df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)}) + result = df.groupby("A")["B"].transform(lambda x: x.rank(ascending=False)) + expected = Series(np.arange(5, 0, step=-1), name="B", dtype="float64") + tm.assert_series_equal(result, expected) + + +def test_transform_numeric_to_boolean(): + # GH 16875 + # inconsistency in transforming boolean values + expected = Series([True, True], name="A") + + df = DataFrame({"A": [1.1, 2.2], "B": [1, 2]}) + result = df.groupby("B").A.transform(lambda x: True) + tm.assert_series_equal(result, expected) + + df = DataFrame({"A": [1, 2], "B": [1, 2]}) + result = df.groupby("B").A.transform(lambda x: True) + tm.assert_series_equal(result, expected) + + +def test_transform_datetime_to_timedelta(): + # GH 15429 + # transforming a datetime to timedelta + df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)}) + expected = Series( + Timestamp("20130101") - Timestamp("20130101"), index=range(5), name="A" + ) + + # this does date math without changing result type in transform + base_time = df["A"][0] + result = ( + df.groupby("A")["A"].transform(lambda x: x.max() - x.min() + base_time) + - base_time + ) + tm.assert_series_equal(result, expected) + + # this does date math and causes the transform to return timedelta + result = df.groupby("A")["A"].transform(lambda x: x.max() - x.min()) + tm.assert_series_equal(result, expected) + + +def test_transform_datetime_to_numeric(): + # GH 10972 + # convert dt to float + df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")}) + result = df.groupby("a").b.transform( + lambda x: x.dt.dayofweek - x.dt.dayofweek.mean() + ) + + expected = Series([-0.5, 0.5], name="b") + tm.assert_series_equal(result, expected) + + # convert dt to int + df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")}) + result = df.groupby("a").b.transform( + lambda x: x.dt.dayofweek - x.dt.dayofweek.min() + ) + + expected = Series([0, 1], dtype=np.int32, name="b") + tm.assert_series_equal(result, expected) + + +def test_transform_casting(): + # 13046 + data = """ + idx A ID3 DATETIME + 0 B-028 b76cd912ff "2014-10-08 13:43:27" + 1 B-054 4a57ed0b02 "2014-10-08 14:26:19" + 2 B-076 1a682034f8 "2014-10-08 14:29:01" + 3 B-023 b76cd912ff "2014-10-08 18:39:34" + 4 B-023 f88g8d7sds "2014-10-08 18:40:18" + 5 B-033 b76cd912ff "2014-10-08 18:44:30" + 6 B-032 b76cd912ff "2014-10-08 18:46:00" + 7 B-037 b76cd912ff "2014-10-08 18:52:15" + 8 B-046 db959faf02 "2014-10-08 18:59:59" + 9 B-053 b76cd912ff "2014-10-08 19:17:48" + 10 B-065 b76cd912ff "2014-10-08 19:21:38" + """ + df = pd.read_csv( + StringIO(data), sep=r"\s+", index_col=[0], parse_dates=["DATETIME"] + ) + + result = df.groupby("ID3")["DATETIME"].transform(lambda x: x.diff()) + assert lib.is_np_dtype(result.dtype, "m") + + result = df[["ID3", "DATETIME"]].groupby("ID3").transform(lambda x: x.diff()) + assert lib.is_np_dtype(result.DATETIME.dtype, "m") + + +def test_transform_multiple(ts): + grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) + + grouped.transform(lambda x: x * 2) + + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped.transform(np.mean) + + +def test_dispatch_transform(tsframe): + df = tsframe[::5].reindex(tsframe.index) + + grouped = df.groupby(lambda x: x.month) + + msg = "DataFrameGroupBy.fillna with 'method' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + filled = grouped.fillna(method="pad") + msg = "Series.fillna with 'method' is deprecated" + fillit = lambda x: x.fillna(method="pad") + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby(lambda x: x.month).transform(fillit) + tm.assert_frame_equal(filled, expected) + + +def test_transform_fillna_null(): + df = DataFrame( + { + "price": [10, 10, 20, 20, 30, 30], + "color": [10, 10, 20, 20, 30, 30], + "cost": (100, 200, 300, 400, 500, 600), + } + ) + with pytest.raises(ValueError, match="Must specify a fill 'value' or 'method'"): + df.groupby(["price"]).transform("fillna") + with pytest.raises(ValueError, match="Must specify a fill 'value' or 'method'"): + df.groupby(["price"]).fillna() + + +def test_transform_transformation_func(transformation_func): + # GH 30918 + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "bar", "bar", "baz"], + "B": [1, 2, np.nan, 3, 3, np.nan, 4], + }, + index=date_range("2020-01-01", "2020-01-07"), + ) + if transformation_func == "cumcount": + test_op = lambda x: x.transform("cumcount") + mock_op = lambda x: Series(range(len(x)), x.index) + elif transformation_func == "fillna": + test_op = lambda x: x.transform("fillna", value=0) + mock_op = lambda x: x.fillna(value=0) + elif transformation_func == "ngroup": + test_op = lambda x: x.transform("ngroup") + counter = -1 + + def mock_op(x): + nonlocal counter + counter += 1 + return Series(counter, index=x.index) + + else: + test_op = lambda x: x.transform(transformation_func) + mock_op = lambda x: getattr(x, transformation_func)() + + msg = "The default fill_method='pad' in DataFrame.pct_change is deprecated" + groupby_msg = ( + "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated" + ) + if transformation_func == "pct_change": + with tm.assert_produces_warning(FutureWarning, match=groupby_msg): + result = test_op(df.groupby("A")) + else: + result = test_op(df.groupby("A")) + + # pass the group in same order as iterating `for ... in df.groupby(...)` + # but reorder to match df's index since this is a transform + groups = [df[["B"]].iloc[4:6], df[["B"]].iloc[6:], df[["B"]].iloc[:4]] + if transformation_func == "pct_change": + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = concat([mock_op(g) for g in groups]).sort_index() + else: + expected = concat([mock_op(g) for g in groups]).sort_index() + # sort_index does not preserve the freq + expected = expected.set_axis(df.index) + + if transformation_func in ("cumcount", "ngroup"): + tm.assert_series_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + +def test_transform_select_columns(df): + f = lambda x: x.mean() + result = df.groupby("A")[["C", "D"]].transform(f) + + selection = df[["C", "D"]] + expected = selection.groupby(df["A"]).transform(f) + + tm.assert_frame_equal(result, expected) + + +def test_transform_nuisance_raises(df): + # case that goes through _transform_item_by_item + + df.columns = ["A", "B", "B", "D"] + + # this also tests orderings in transform between + # series/frame to make sure it's consistent + grouped = df.groupby("A") + + gbc = grouped["B"] + with pytest.raises(TypeError, match="Could not convert"): + gbc.transform(lambda x: np.mean(x)) + + with pytest.raises(TypeError, match="Could not convert"): + df.groupby("A").transform(lambda x: np.mean(x)) + + +def test_transform_function_aliases(df): + result = df.groupby("A").transform("mean", numeric_only=True) + msg = "using DataFrameGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby("A")[["C", "D"]].transform(np.mean) + tm.assert_frame_equal(result, expected) + + result = df.groupby("A")["C"].transform("mean") + msg = "using SeriesGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = df.groupby("A")["C"].transform(np.mean) + tm.assert_series_equal(result, expected) + + +def test_series_fast_transform_date(): + # GH 13191 + df = DataFrame( + {"grouping": [np.nan, 1, 1, 3], "d": date_range("2014-1-1", "2014-1-4")} + ) + result = df.groupby("grouping")["d"].transform("first") + dates = [ + pd.NaT, + Timestamp("2014-1-2"), + Timestamp("2014-1-2"), + Timestamp("2014-1-4"), + ] + expected = Series(dates, name="d") + tm.assert_series_equal(result, expected) + + +def test_transform_length(): + # GH 9697 + df = DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]}) + expected = Series([3.0] * 4) + + def nsum(x): + return np.nansum(x) + + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + results = [ + df.groupby("col1").transform(sum)["col2"], + df.groupby("col1")["col2"].transform(sum), + df.groupby("col1").transform(nsum)["col2"], + df.groupby("col1")["col2"].transform(nsum), + ] + for result in results: + tm.assert_series_equal(result, expected, check_names=False) + + +def test_transform_coercion(): + # 14457 + # when we are transforming be sure to not coerce + # via assignment + df = DataFrame({"A": ["a", "a", "b", "b"], "B": [0, 1, 3, 4]}) + g = df.groupby("A") + + msg = "using DataFrameGroupBy.mean" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = g.transform(np.mean) + + result = g.transform(lambda x: np.mean(x, axis=0)) + tm.assert_frame_equal(result, expected) + + +def test_groupby_transform_with_int(): + # GH 3740, make sure that we might upcast on item-by-item transform + + # floats + df = DataFrame( + { + "A": [1, 1, 1, 2, 2, 2], + "B": Series(1, dtype="float64"), + "C": Series([1, 2, 3, 1, 2, 3], dtype="float64"), + "D": "foo", + } + ) + with np.errstate(all="ignore"): + result = df.groupby("A")[["B", "C"]].transform( + lambda x: (x - x.mean()) / x.std() + ) + expected = DataFrame( + {"B": np.nan, "C": Series([-1, 0, 1, -1, 0, 1], dtype="float64")} + ) + tm.assert_frame_equal(result, expected) + + # int case + df = DataFrame( + { + "A": [1, 1, 1, 2, 2, 2], + "B": 1, + "C": [1, 2, 3, 1, 2, 3], + "D": "foo", + } + ) + with np.errstate(all="ignore"): + with pytest.raises(TypeError, match="Could not convert"): + df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) + result = df.groupby("A")[["B", "C"]].transform( + lambda x: (x - x.mean()) / x.std() + ) + expected = DataFrame({"B": np.nan, "C": [-1.0, 0.0, 1.0, -1.0, 0.0, 1.0]}) + tm.assert_frame_equal(result, expected) + + # int that needs float conversion + s = Series([2, 3, 4, 10, 5, -1]) + df = DataFrame({"A": [1, 1, 1, 2, 2, 2], "B": 1, "C": s, "D": "foo"}) + with np.errstate(all="ignore"): + with pytest.raises(TypeError, match="Could not convert"): + df.groupby("A").transform(lambda x: (x - x.mean()) / x.std()) + result = df.groupby("A")[["B", "C"]].transform( + lambda x: (x - x.mean()) / x.std() + ) + + s1 = s.iloc[0:3] + s1 = (s1 - s1.mean()) / s1.std() + s2 = s.iloc[3:6] + s2 = (s2 - s2.mean()) / s2.std() + expected = DataFrame({"B": np.nan, "C": concat([s1, s2])}) + tm.assert_frame_equal(result, expected) + + # int doesn't get downcasted + result = df.groupby("A")[["B", "C"]].transform(lambda x: x * 2 / 2) + expected = DataFrame({"B": 1.0, "C": [2.0, 3.0, 4.0, 10.0, 5.0, -1.0]}) + tm.assert_frame_equal(result, expected) + + +def test_groupby_transform_with_nan_group(): + # GH 9941 + df = DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]}) + msg = "using SeriesGroupBy.max" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.groupby(df.b)["a"].transform(max) + expected = Series([1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a") + tm.assert_series_equal(result, expected) + + +def test_transform_mixed_type(): + index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]]) + df = DataFrame( + { + "d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], + "c": np.tile(["a", "b", "c"], 2), + "v": np.arange(1.0, 7.0), + }, + index=index, + ) + + def f(group): + group["g"] = group["d"] * 2 + return group[:1] + + grouped = df.groupby("c") + result = grouped.apply(f) + + assert result["d"].dtype == np.float64 + + # this is by definition a mutating operation! + with pd.option_context("mode.chained_assignment", None): + for key, group in grouped: + res = f(group) + tm.assert_frame_equal(res, result.loc[key]) + + +@pytest.mark.parametrize( + "op, args, targop", + [ + ("cumprod", (), lambda x: x.cumprod()), + ("cumsum", (), lambda x: x.cumsum()), + ("shift", (-1,), lambda x: x.shift(-1)), + ("shift", (1,), lambda x: x.shift()), + ], +) +def test_cython_transform_series(op, args, targop): + # GH 4095 + s = Series(np.random.default_rng(2).standard_normal(1000)) + s_missing = s.copy() + s_missing.iloc[2:10] = np.nan + labels = np.random.default_rng(2).integers(0, 50, size=1000).astype(float) + + # series + for data in [s, s_missing]: + # print(data.head()) + expected = data.groupby(labels).transform(targop) + + tm.assert_series_equal(expected, data.groupby(labels).transform(op, *args)) + tm.assert_series_equal(expected, getattr(data.groupby(labels), op)(*args)) + + +@pytest.mark.parametrize("op", ["cumprod", "cumsum"]) +@pytest.mark.parametrize("skipna", [False, True]) +@pytest.mark.parametrize( + "input, exp", + [ + # When everything is NaN + ({"key": ["b"] * 10, "value": np.nan}, Series([np.nan] * 10, name="value")), + # When there is a single NaN + ( + {"key": ["b"] * 10 + ["a"] * 2, "value": [3] * 3 + [np.nan] + [3] * 8}, + { + ("cumprod", False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0], + ("cumprod", True): [ + 3.0, + 9.0, + 27.0, + np.nan, + 81.0, + 243.0, + 729.0, + 2187.0, + 6561.0, + 19683.0, + 3.0, + 9.0, + ], + ("cumsum", False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0], + ("cumsum", True): [ + 3.0, + 6.0, + 9.0, + np.nan, + 12.0, + 15.0, + 18.0, + 21.0, + 24.0, + 27.0, + 3.0, + 6.0, + ], + }, + ), + ], +) +def test_groupby_cum_skipna(op, skipna, input, exp): + df = DataFrame(input) + result = df.groupby("key")["value"].transform(op, skipna=skipna) + if isinstance(exp, dict): + expected = exp[(op, skipna)] + else: + expected = exp + expected = Series(expected, name="value") + tm.assert_series_equal(expected, result) + + +@pytest.fixture +def frame(): + floating = Series(np.random.default_rng(2).standard_normal(10)) + floating_missing = floating.copy() + floating_missing.iloc[2:7] = np.nan + strings = list("abcde") * 2 + strings_missing = strings[:] + strings_missing[5] = np.nan + + df = DataFrame( + { + "float": floating, + "float_missing": floating_missing, + "int": [1, 1, 1, 1, 2] * 2, + "datetime": date_range("1990-1-1", periods=10), + "timedelta": pd.timedelta_range(1, freq="s", periods=10), + "string": strings, + "string_missing": strings_missing, + "cat": Categorical(strings), + }, + ) + return df + + +@pytest.fixture +def frame_mi(frame): + frame.index = MultiIndex.from_product([range(5), range(2)]) + return frame + + +@pytest.mark.slow +@pytest.mark.parametrize( + "op, args, targop", + [ + ("cumprod", (), lambda x: x.cumprod()), + ("cumsum", (), lambda x: x.cumsum()), + ("shift", (-1,), lambda x: x.shift(-1)), + ("shift", (1,), lambda x: x.shift()), + ], +) +@pytest.mark.parametrize("df_fix", ["frame", "frame_mi"]) +@pytest.mark.parametrize( + "gb_target", + [ + {"by": np.random.default_rng(2).integers(0, 50, size=10).astype(float)}, + {"level": 0}, + {"by": "string"}, + pytest.param({"by": "string_missing"}, marks=pytest.mark.xfail), + {"by": ["int", "string"]}, + ], +) +def test_cython_transform_frame(request, op, args, targop, df_fix, gb_target): + df = request.getfixturevalue(df_fix) + gb = df.groupby(group_keys=False, **gb_target) + + if op != "shift" and "int" not in gb_target: + # numeric apply fastpath promotes dtype so have + # to apply separately and concat + i = gb[["int"]].apply(targop) + f = gb[["float", "float_missing"]].apply(targop) + expected = concat([f, i], axis=1) + else: + expected = gb.apply(targop) + + expected = expected.sort_index(axis=1) + if op == "shift": + depr_msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + expected["string_missing"] = expected["string_missing"].fillna( + np.nan, downcast=False + ) + expected["string"] = expected["string"].fillna(np.nan, downcast=False) + + result = gb[expected.columns].transform(op, *args).sort_index(axis=1) + tm.assert_frame_equal(result, expected) + result = getattr(gb[expected.columns], op)(*args).sort_index(axis=1) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.slow +@pytest.mark.parametrize( + "op, args, targop", + [ + ("cumprod", (), lambda x: x.cumprod()), + ("cumsum", (), lambda x: x.cumsum()), + ("shift", (-1,), lambda x: x.shift(-1)), + ("shift", (1,), lambda x: x.shift()), + ], +) +@pytest.mark.parametrize("df_fix", ["frame", "frame_mi"]) +@pytest.mark.parametrize( + "gb_target", + [ + {"by": np.random.default_rng(2).integers(0, 50, size=10).astype(float)}, + {"level": 0}, + {"by": "string"}, + # TODO: create xfail condition given other params + # {"by": 'string_missing'}, + {"by": ["int", "string"]}, + ], +) +@pytest.mark.parametrize( + "column", + [ + "float", + "float_missing", + "int", + "datetime", + "timedelta", + "string", + "string_missing", + ], +) +def test_cython_transform_frame_column( + request, op, args, targop, df_fix, gb_target, column +): + df = request.getfixturevalue(df_fix) + gb = df.groupby(group_keys=False, **gb_target) + c = column + if ( + c not in ["float", "int", "float_missing"] + and op != "shift" + and not (c == "timedelta" and op == "cumsum") + ): + msg = "|".join( + [ + "does not support .* operations", + ".* is not supported for object dtype", + "is not implemented for this dtype", + ] + ) + with pytest.raises(TypeError, match=msg): + gb[c].transform(op) + with pytest.raises(TypeError, match=msg): + getattr(gb[c], op)() + else: + expected = gb[c].apply(targop) + expected.name = c + if c in ["string_missing", "string"]: + depr_msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + expected = expected.fillna(np.nan, downcast=False) + + res = gb[c].transform(op, *args) + tm.assert_series_equal(expected, res) + res2 = getattr(gb[c], op)(*args) + tm.assert_series_equal(expected, res2) + + +def test_transform_with_non_scalar_group(): + # GH 10165 + cols = MultiIndex.from_tuples( + [ + ("syn", "A"), + ("foo", "A"), + ("non", "A"), + ("syn", "C"), + ("foo", "C"), + ("non", "C"), + ("syn", "T"), + ("foo", "T"), + ("non", "T"), + ("syn", "G"), + ("foo", "G"), + ("non", "G"), + ] + ) + df = DataFrame( + np.random.default_rng(2).integers(1, 10, (4, 12)), + columns=cols, + index=["A", "C", "G", "T"], + ) + + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + gb = df.groupby(axis=1, level=1) + msg = "transform must return a scalar value for each group.*" + with pytest.raises(ValueError, match=msg): + gb.transform(lambda z: z.div(z.sum(axis=1), axis=0)) + + +@pytest.mark.parametrize( + "cols,expected", + [ + ("a", Series([1, 1, 1], name="a")), + ( + ["a", "c"], + DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}), + ), + ], +) +@pytest.mark.parametrize("agg_func", ["count", "rank", "size"]) +def test_transform_numeric_ret(cols, expected, agg_func): + # GH#19200 and GH#27469 + df = DataFrame( + {"a": date_range("2018-01-01", periods=3), "b": range(3), "c": range(7, 10)} + ) + result = df.groupby("b")[cols].transform(agg_func) + + if agg_func == "rank": + expected = expected.astype("float") + elif agg_func == "size" and cols == ["a", "c"]: + # transform("size") returns a Series + expected = expected["a"].rename(None) + tm.assert_equal(result, expected) + + +def test_transform_ffill(): + # GH 24211 + data = [["a", 0.0], ["a", float("nan")], ["b", 1.0], ["b", float("nan")]] + df = DataFrame(data, columns=["key", "values"]) + result = df.groupby("key").transform("ffill") + expected = DataFrame({"values": [0.0, 0.0, 1.0, 1.0]}) + tm.assert_frame_equal(result, expected) + result = df.groupby("key")["values"].transform("ffill") + expected = Series([0.0, 0.0, 1.0, 1.0], name="values") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("mix_groupings", [True, False]) +@pytest.mark.parametrize("as_series", [True, False]) +@pytest.mark.parametrize("val1,val2", [("foo", "bar"), (1, 2), (1.0, 2.0)]) +@pytest.mark.parametrize( + "fill_method,limit,exp_vals", + [ + ( + "ffill", + None, + [np.nan, np.nan, "val1", "val1", "val1", "val2", "val2", "val2"], + ), + ("ffill", 1, [np.nan, np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan]), + ( + "bfill", + None, + ["val1", "val1", "val1", "val2", "val2", "val2", np.nan, np.nan], + ), + ("bfill", 1, [np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan, np.nan]), + ], +) +def test_group_fill_methods( + mix_groupings, as_series, val1, val2, fill_method, limit, exp_vals +): + vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan] + _exp_vals = list(exp_vals) + # Overwrite placeholder values + for index, exp_val in enumerate(_exp_vals): + if exp_val == "val1": + _exp_vals[index] = val1 + elif exp_val == "val2": + _exp_vals[index] = val2 + + # Need to modify values and expectations depending on the + # Series / DataFrame that we ultimately want to generate + if mix_groupings: # ['a', 'b', 'a, 'b', ...] + keys = ["a", "b"] * len(vals) + + def interweave(list_obj): + temp = [] + for x in list_obj: + temp.extend([x, x]) + + return temp + + _exp_vals = interweave(_exp_vals) + vals = interweave(vals) + else: # ['a', 'a', 'a', ... 'b', 'b', 'b'] + keys = ["a"] * len(vals) + ["b"] * len(vals) + _exp_vals = _exp_vals * 2 + vals = vals * 2 + + df = DataFrame({"key": keys, "val": vals}) + if as_series: + result = getattr(df.groupby("key")["val"], fill_method)(limit=limit) + exp = Series(_exp_vals, name="val") + tm.assert_series_equal(result, exp) + else: + result = getattr(df.groupby("key"), fill_method)(limit=limit) + exp = DataFrame({"val": _exp_vals}) + tm.assert_frame_equal(result, exp) + + +@pytest.mark.parametrize("fill_method", ["ffill", "bfill"]) +def test_pad_stable_sorting(fill_method): + # GH 21207 + x = [0] * 20 + y = [np.nan] * 10 + [1] * 10 + + if fill_method == "bfill": + y = y[::-1] + + df = DataFrame({"x": x, "y": y}) + expected = df.drop("x", axis=1) + + result = getattr(df.groupby("x"), fill_method)() + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "freq", + [ + None, + pytest.param( + "D", + marks=pytest.mark.xfail( + reason="GH#23918 before method uses freq in vectorized approach" + ), + ), + ], +) +@pytest.mark.parametrize("periods", [1, -1]) +@pytest.mark.parametrize("fill_method", ["ffill", "bfill", None]) +@pytest.mark.parametrize("limit", [None, 1]) +def test_pct_change(frame_or_series, freq, periods, fill_method, limit): + # GH 21200, 21621, 30463 + vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4] + keys = ["a", "b"] + key_v = np.repeat(keys, len(vals)) + df = DataFrame({"key": key_v, "vals": vals * 2}) + + df_g = df + if fill_method is not None: + df_g = getattr(df.groupby("key"), fill_method)(limit=limit) + grp = df_g.groupby(df.key) + + expected = grp["vals"].obj / grp["vals"].shift(periods) - 1 + + gb = df.groupby("key") + + if frame_or_series is Series: + gb = gb["vals"] + else: + expected = expected.to_frame("vals") + + msg = ( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + f"{type(gb).__name__}.pct_change are deprecated" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.pct_change( + periods=periods, fill_method=fill_method, limit=limit, freq=freq + ) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "func, expected_status", + [ + ("ffill", ["shrt", "shrt", "lng", np.nan, "shrt", "ntrl", "ntrl"]), + ("bfill", ["shrt", "lng", "lng", "shrt", "shrt", "ntrl", np.nan]), + ], +) +def test_ffill_bfill_non_unique_multilevel(func, expected_status): + # GH 19437 + date = pd.to_datetime( + [ + "2018-01-01", + "2018-01-01", + "2018-01-01", + "2018-01-01", + "2018-01-02", + "2018-01-01", + "2018-01-02", + ] + ) + symbol = ["MSFT", "MSFT", "MSFT", "AAPL", "AAPL", "TSLA", "TSLA"] + status = ["shrt", np.nan, "lng", np.nan, "shrt", "ntrl", np.nan] + + df = DataFrame({"date": date, "symbol": symbol, "status": status}) + df = df.set_index(["date", "symbol"]) + result = getattr(df.groupby("symbol")["status"], func)() + + index = MultiIndex.from_tuples( + tuples=list(zip(*[date, symbol])), names=["date", "symbol"] + ) + expected = Series(expected_status, index=index, name="status") + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", [np.any, np.all]) +def test_any_all_np_func(func): + # GH 20653 + df = DataFrame( + [["foo", True], [np.nan, True], ["foo", True]], columns=["key", "val"] + ) + + exp = Series([True, np.nan, True], name="val") + + msg = "using SeriesGroupBy.[any|all]" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.groupby("key")["val"].transform(func) + tm.assert_series_equal(res, exp) + + +def test_groupby_transform_rename(): + # https://github.com/pandas-dev/pandas/issues/23461 + def demean_rename(x): + result = x - x.mean() + + if isinstance(x, Series): + return result + + result = result.rename(columns={c: f"{c}_demeaned" for c in result.columns}) + + return result + + df = DataFrame({"group": list("ababa"), "value": [1, 1, 1, 2, 2]}) + expected = DataFrame({"value": [-1.0 / 3, -0.5, -1.0 / 3, 0.5, 2.0 / 3]}) + + result = df.groupby("group").transform(demean_rename) + tm.assert_frame_equal(result, expected) + result_single = df.groupby("group").value.transform(demean_rename) + tm.assert_series_equal(result_single, expected["value"]) + + +@pytest.mark.parametrize("func", [min, max, np.min, np.max, "first", "last"]) +def test_groupby_transform_timezone_column(func): + # GH 24198 + ts = pd.to_datetime("now", utc=True).tz_convert("Asia/Singapore") + result = DataFrame({"end_time": [ts], "id": [1]}) + warn = FutureWarning if not isinstance(func, str) else None + msg = "using SeriesGroupBy.[min|max]" + with tm.assert_produces_warning(warn, match=msg): + result["max_end_time"] = result.groupby("id").end_time.transform(func) + expected = DataFrame([[ts, 1, ts]], columns=["end_time", "id", "max_end_time"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "func, values", + [ + ("idxmin", ["1/1/2011"] * 2 + ["1/3/2011"] * 7 + ["1/10/2011"]), + ("idxmax", ["1/2/2011"] * 2 + ["1/9/2011"] * 7 + ["1/10/2011"]), + ], +) +def test_groupby_transform_with_datetimes(func, values): + # GH 15306 + dates = date_range("1/1/2011", periods=10, freq="D") + + stocks = DataFrame({"price": np.arange(10.0)}, index=dates) + stocks["week_id"] = dates.isocalendar().week + + result = stocks.groupby(stocks["week_id"])["price"].transform(func) + + expected = Series(data=pd.to_datetime(values), index=dates, name="price") + + tm.assert_series_equal(result, expected) + + +def test_groupby_transform_dtype(): + # GH 22243 + df = DataFrame({"a": [1], "val": [1.35]}) + + result = df["val"].transform(lambda x: x.map(lambda y: f"+{y}")) + expected1 = Series(["+1.35"], name="val", dtype="object") + tm.assert_series_equal(result, expected1) + + result = df.groupby("a")["val"].transform(lambda x: x.map(lambda y: f"+{y}")) + tm.assert_series_equal(result, expected1) + + result = df.groupby("a")["val"].transform(lambda x: x.map(lambda y: f"+({y})")) + expected2 = Series(["+(1.35)"], name="val", dtype="object") + tm.assert_series_equal(result, expected2) + + df["val"] = df["val"].astype(object) + result = df.groupby("a")["val"].transform(lambda x: x.map(lambda y: f"+{y}")) + tm.assert_series_equal(result, expected1) + + +@pytest.mark.parametrize("func", ["cumsum", "cumprod", "cummin", "cummax"]) +def test_transform_absent_categories(func): + # GH 16771 + # cython transforms with more groups than rows + x_vals = [1] + x_cats = range(2) + y = [1] + df = DataFrame({"x": Categorical(x_vals, x_cats), "y": y}) + result = getattr(df.y.groupby(df.x, observed=False), func)() + expected = df.y + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["ffill", "bfill", "shift"]) +@pytest.mark.parametrize("key, val", [("level", 0), ("by", Series([0]))]) +def test_ffill_not_in_axis(func, key, val): + # GH 21521 + df = DataFrame([[np.nan]]) + result = getattr(df.groupby(**{key: val}), func)() + expected = df + + tm.assert_frame_equal(result, expected) + + +def test_transform_invalid_name_raises(): + # GH#27486 + df = DataFrame({"a": [0, 1, 1, 2]}) + g = df.groupby(["a", "b", "b", "c"]) + with pytest.raises(ValueError, match="not a valid function name"): + g.transform("some_arbitrary_name") + + # method exists on the object, but is not a valid transformation/agg + assert hasattr(g, "aggregate") # make sure the method exists + with pytest.raises(ValueError, match="not a valid function name"): + g.transform("aggregate") + + # Test SeriesGroupBy + g = df["a"].groupby(["a", "b", "b", "c"]) + with pytest.raises(ValueError, match="not a valid function name"): + g.transform("some_arbitrary_name") + + +def test_transform_agg_by_name(request, reduction_func, frame_or_series): + func = reduction_func + + obj = DataFrame( + {"a": [0, 0, 0, 1, 1, 1], "b": range(6)}, + index=["A", "B", "C", "D", "E", "F"], + ) + if frame_or_series is Series: + obj = obj["a"] + + g = obj.groupby(np.repeat([0, 1], 3)) + + if func == "corrwith" and isinstance(obj, Series): # GH#32293 + # TODO: implement SeriesGroupBy.corrwith + assert not hasattr(g, func) + return + + args = get_groupby_method_args(reduction_func, obj) + result = g.transform(func, *args) + + # this is the *definition* of a transformation + tm.assert_index_equal(result.index, obj.index) + + if func not in ("ngroup", "size") and obj.ndim == 2: + # size/ngroup return a Series, unlike other transforms + tm.assert_index_equal(result.columns, obj.columns) + + # verify that values were broadcasted across each group + assert len(set(DataFrame(result).iloc[-3:, -1])) == 1 + + +def test_transform_lambda_with_datetimetz(): + # GH 27496 + df = DataFrame( + { + "time": [ + Timestamp("2010-07-15 03:14:45"), + Timestamp("2010-11-19 18:47:06"), + ], + "timezone": ["Etc/GMT+4", "US/Eastern"], + } + ) + result = df.groupby(["timezone"])["time"].transform( + lambda x: x.dt.tz_localize(x.name) + ) + expected = Series( + [ + Timestamp("2010-07-15 03:14:45", tz="Etc/GMT+4"), + Timestamp("2010-11-19 18:47:06", tz="US/Eastern"), + ], + name="time", + ) + tm.assert_series_equal(result, expected) + + +def test_transform_fastpath_raises(): + # GH#29631 case where fastpath defined in groupby.generic _choose_path + # raises, but slow_path does not + + df = DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]}) + gb = df.groupby("A") + + def func(grp): + # we want a function such that func(frame) fails but func.apply(frame) + # works + if grp.ndim == 2: + # Ensure that fast_path fails + raise NotImplementedError("Don't cross the streams") + return grp * 2 + + # Check that the fastpath raises, see _transform_general + obj = gb._obj_with_exclusions + gen = gb.grouper.get_iterator(obj, axis=gb.axis) + fast_path, slow_path = gb._define_paths(func) + _, group = next(gen) + + with pytest.raises(NotImplementedError, match="Don't cross the streams"): + fast_path(group) + + result = gb.transform(func) + + expected = DataFrame([2, -2, 2, 4], columns=["B"]) + tm.assert_frame_equal(result, expected) + + +def test_transform_lambda_indexing(): + # GH 7883 + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "flux", "foo", "flux"], + "B": ["one", "one", "two", "three", "two", "six", "five", "three"], + "C": range(8), + "D": range(8), + "E": range(8), + } + ) + df = df.set_index(["A", "B"]) + df = df.sort_index() + result = df.groupby(level="A").transform(lambda x: x.iloc[-1]) + expected = DataFrame( + { + "C": [3, 3, 7, 7, 4, 4, 4, 4], + "D": [3, 3, 7, 7, 4, 4, 4, 4], + "E": [3, 3, 7, 7, 4, 4, 4, 4], + }, + index=MultiIndex.from_tuples( + [ + ("bar", "one"), + ("bar", "three"), + ("flux", "six"), + ("flux", "three"), + ("foo", "five"), + ("foo", "one"), + ("foo", "two"), + ("foo", "two"), + ], + names=["A", "B"], + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_categorical_and_not_categorical_key(observed): + # Checks that groupby-transform, when grouping by both a categorical + # and a non-categorical key, doesn't try to expand the output to include + # non-observed categories but instead matches the input shape. + # GH 32494 + df_with_categorical = DataFrame( + { + "A": Categorical(["a", "b", "a"], categories=["a", "b", "c"]), + "B": [1, 2, 3], + "C": ["a", "b", "a"], + } + ) + df_without_categorical = DataFrame( + {"A": ["a", "b", "a"], "B": [1, 2, 3], "C": ["a", "b", "a"]} + ) + + # DataFrame case + result = df_with_categorical.groupby(["A", "C"], observed=observed).transform("sum") + expected = df_without_categorical.groupby(["A", "C"]).transform("sum") + tm.assert_frame_equal(result, expected) + expected_explicit = DataFrame({"B": [4, 2, 4]}) + tm.assert_frame_equal(result, expected_explicit) + + # Series case + result = df_with_categorical.groupby(["A", "C"], observed=observed)["B"].transform( + "sum" + ) + expected = df_without_categorical.groupby(["A", "C"])["B"].transform("sum") + tm.assert_series_equal(result, expected) + expected_explicit = Series([4, 2, 4], name="B") + tm.assert_series_equal(result, expected_explicit) + + +def test_string_rank_grouping(): + # GH 19354 + df = DataFrame({"A": [1, 1, 2], "B": [1, 2, 3]}) + result = df.groupby("A").transform("rank") + expected = DataFrame({"B": [1.0, 2.0, 1.0]}) + tm.assert_frame_equal(result, expected) + + +def test_transform_cumcount(): + # GH 27472 + df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)}) + grp = df.groupby(np.repeat([0, 1], 3)) + + result = grp.cumcount() + expected = Series([0, 1, 2, 0, 1, 2]) + tm.assert_series_equal(result, expected) + + result = grp.transform("cumcount") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("keys", [["A1"], ["A1", "A2"]]) +def test_null_group_lambda_self(sort, dropna, keys): + # GH 17093 + size = 50 + nulls1 = np.random.default_rng(2).choice([False, True], size) + nulls2 = np.random.default_rng(2).choice([False, True], size) + # Whether a group contains a null value or not + nulls_grouper = nulls1 if len(keys) == 1 else nulls1 | nulls2 + + a1 = np.random.default_rng(2).integers(0, 5, size=size).astype(float) + a1[nulls1] = np.nan + a2 = np.random.default_rng(2).integers(0, 5, size=size).astype(float) + a2[nulls2] = np.nan + values = np.random.default_rng(2).integers(0, 5, size=a1.shape) + df = DataFrame({"A1": a1, "A2": a2, "B": values}) + + expected_values = values + if dropna and nulls_grouper.any(): + expected_values = expected_values.astype(float) + expected_values[nulls_grouper] = np.nan + expected = DataFrame(expected_values, columns=["B"]) + + gb = df.groupby(keys, dropna=dropna, sort=sort) + result = gb[["B"]].transform(lambda x: x) + tm.assert_frame_equal(result, expected) + + +def test_null_group_str_reducer(request, dropna, reduction_func): + # GH 17093 + if reduction_func == "corrwith": + msg = "incorrectly raises" + request.node.add_marker(pytest.mark.xfail(reason=msg)) + + index = [1, 2, 3, 4] # test transform preserves non-standard index + df = DataFrame({"A": [1, 1, np.nan, np.nan], "B": [1, 2, 2, 3]}, index=index) + gb = df.groupby("A", dropna=dropna) + + args = get_groupby_method_args(reduction_func, df) + + # Manually handle reducers that don't fit the generic pattern + # Set expected with dropna=False, then replace if necessary + if reduction_func == "first": + expected = DataFrame({"B": [1, 1, 2, 2]}, index=index) + elif reduction_func == "last": + expected = DataFrame({"B": [2, 2, 3, 3]}, index=index) + elif reduction_func == "nth": + expected = DataFrame({"B": [1, 1, 2, 2]}, index=index) + elif reduction_func == "size": + expected = Series([2, 2, 2, 2], index=index) + elif reduction_func == "corrwith": + expected = DataFrame({"B": [1.0, 1.0, 1.0, 1.0]}, index=index) + else: + expected_gb = df.groupby("A", dropna=False) + buffer = [] + for idx, group in expected_gb: + res = getattr(group["B"], reduction_func)() + buffer.append(Series(res, index=group.index)) + expected = concat(buffer).to_frame("B") + if dropna: + dtype = object if reduction_func in ("any", "all") else float + expected = expected.astype(dtype) + if expected.ndim == 2: + expected.iloc[[2, 3], 0] = np.nan + else: + expected.iloc[[2, 3]] = np.nan + + result = gb.transform(reduction_func, *args) + tm.assert_equal(result, expected) + + +def test_null_group_str_transformer(request, dropna, transformation_func): + # GH 17093 + df = DataFrame({"A": [1, 1, np.nan], "B": [1, 2, 2]}, index=[1, 2, 3]) + args = get_groupby_method_args(transformation_func, df) + gb = df.groupby("A", dropna=dropna) + + buffer = [] + for k, (idx, group) in enumerate(gb): + if transformation_func == "cumcount": + # DataFrame has no cumcount method + res = DataFrame({"B": range(len(group))}, index=group.index) + elif transformation_func == "ngroup": + res = DataFrame(len(group) * [k], index=group.index, columns=["B"]) + else: + res = getattr(group[["B"]], transformation_func)(*args) + buffer.append(res) + if dropna: + dtype = object if transformation_func in ("any", "all") else None + buffer.append(DataFrame([[np.nan]], index=[3], dtype=dtype, columns=["B"])) + expected = concat(buffer) + + if transformation_func in ("cumcount", "ngroup"): + # ngroup/cumcount always returns a Series as it counts the groups, not values + expected = expected["B"].rename(None) + + msg = "The default fill_method='ffill' in DataFrameGroupBy.pct_change is deprecated" + if transformation_func == "pct_change" and not dropna: + with tm.assert_produces_warning(FutureWarning, match=msg): + result = gb.transform("pct_change", *args) + else: + result = gb.transform(transformation_func, *args) + + tm.assert_equal(result, expected) + + +def test_null_group_str_reducer_series(request, dropna, reduction_func): + # GH 17093 + index = [1, 2, 3, 4] # test transform preserves non-standard index + ser = Series([1, 2, 2, 3], index=index) + gb = ser.groupby([1, 1, np.nan, np.nan], dropna=dropna) + + if reduction_func == "corrwith": + # corrwith not implemented for SeriesGroupBy + assert not hasattr(gb, reduction_func) + return + + args = get_groupby_method_args(reduction_func, ser) + + # Manually handle reducers that don't fit the generic pattern + # Set expected with dropna=False, then replace if necessary + if reduction_func == "first": + expected = Series([1, 1, 2, 2], index=index) + elif reduction_func == "last": + expected = Series([2, 2, 3, 3], index=index) + elif reduction_func == "nth": + expected = Series([1, 1, 2, 2], index=index) + elif reduction_func == "size": + expected = Series([2, 2, 2, 2], index=index) + elif reduction_func == "corrwith": + expected = Series([1, 1, 2, 2], index=index) + else: + expected_gb = ser.groupby([1, 1, np.nan, np.nan], dropna=False) + buffer = [] + for idx, group in expected_gb: + res = getattr(group, reduction_func)() + buffer.append(Series(res, index=group.index)) + expected = concat(buffer) + if dropna: + dtype = object if reduction_func in ("any", "all") else float + expected = expected.astype(dtype) + expected.iloc[[2, 3]] = np.nan + + result = gb.transform(reduction_func, *args) + tm.assert_series_equal(result, expected) + + +def test_null_group_str_transformer_series(dropna, transformation_func): + # GH 17093 + ser = Series([1, 2, 2], index=[1, 2, 3]) + args = get_groupby_method_args(transformation_func, ser) + gb = ser.groupby([1, 1, np.nan], dropna=dropna) + + buffer = [] + for k, (idx, group) in enumerate(gb): + if transformation_func == "cumcount": + # Series has no cumcount method + res = Series(range(len(group)), index=group.index) + elif transformation_func == "ngroup": + res = Series(k, index=group.index) + else: + res = getattr(group, transformation_func)(*args) + buffer.append(res) + if dropna: + dtype = object if transformation_func in ("any", "all") else None + buffer.append(Series([np.nan], index=[3], dtype=dtype)) + expected = concat(buffer) + + with tm.assert_produces_warning(None): + result = gb.transform(transformation_func, *args) + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "func, expected_values", + [ + (Series.sort_values, [5, 4, 3, 2, 1]), + (lambda x: x.head(1), [5.0, np.nan, 3, 2, np.nan]), + ], +) +@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]]) +@pytest.mark.parametrize("keys_in_index", [True, False]) +def test_transform_aligns(func, frame_or_series, expected_values, keys, keys_in_index): + # GH#45648 - transform should align with the input's index + df = DataFrame({"a1": [1, 1, 3, 2, 2], "b": [5, 4, 3, 2, 1]}) + if "a2" in keys: + df["a2"] = df["a1"] + if keys_in_index: + df = df.set_index(keys, append=True) + + gb = df.groupby(keys) + if frame_or_series is Series: + gb = gb["b"] + + result = gb.transform(func) + expected = DataFrame({"b": expected_values}, index=df.index) + if frame_or_series is Series: + expected = expected["b"] + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("keys", ["A", ["A", "B"]]) +def test_as_index_no_change(keys, df, groupby_func): + # GH#49834 - as_index should have no impact on DataFrameGroupBy.transform + if keys == "A": + # Column B is string dtype; will fail on some ops + df = df.drop(columns="B") + args = get_groupby_method_args(groupby_func, df) + gb_as_index_true = df.groupby(keys, as_index=True) + gb_as_index_false = df.groupby(keys, as_index=False) + result = gb_as_index_true.transform(groupby_func, *args) + expected = gb_as_index_false.transform(groupby_func, *args) + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_constructors.py new file mode 100644 index 00000000..60abbfc4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_constructors.py @@ -0,0 +1,59 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) +import pandas._testing as tm + + +class TestIndexConstructor: + # Tests for the Index constructor, specifically for cases that do + # not return a subclass + + @pytest.mark.parametrize("value", [1, np.int64(1)]) + def test_constructor_corner(self, value): + # corner case + msg = ( + r"Index\(\.\.\.\) must be called with a collection of some " + f"kind, {value} was passed" + ) + with pytest.raises(TypeError, match=msg): + Index(value) + + @pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]]) + def test_construction_list_mixed_tuples(self, index_vals): + # see gh-10697: if we are constructing from a mixed list of tuples, + # make sure that we are independent of the sorting order. + index = Index(index_vals) + assert isinstance(index, Index) + assert not isinstance(index, MultiIndex) + + def test_constructor_cast(self): + msg = "could not convert string to float" + with pytest.raises(ValueError, match=msg): + Index(["a", "b", "c"], dtype=float) + + @pytest.mark.parametrize("tuple_list", [[()], [(), ()]]) + def test_construct_empty_tuples(self, tuple_list): + # GH #45608 + result = Index(tuple_list) + expected = MultiIndex.from_tuples(tuple_list) + + tm.assert_index_equal(result, expected) + + def test_index_string_inference(self): + # GH#54430 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + expected = Index(["a", "b"], dtype=dtype) + with pd.option_context("future.infer_string", True): + ser = Index(["a", "b"]) + tm.assert_index_equal(ser, expected) + + expected = Index(["a", 1], dtype="object") + with pd.option_context("future.infer_string", True): + ser = Index(["a", 1]) + tm.assert_index_equal(ser, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_formats.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_formats.py new file mode 100644 index 00000000..9053d45d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_formats.py @@ -0,0 +1,148 @@ +import numpy as np +import pytest + +import pandas._config.config as cf + +from pandas import Index + + +class TestIndexRendering: + @pytest.mark.parametrize( + "index,expected", + [ + # ASCII + # short + ( + Index(["a", "bb", "ccc"]), + """Index(['a', 'bb', 'ccc'], dtype='object')""", + ), + # multiple lines + ( + Index(["a", "bb", "ccc"] * 10), + "Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', " + "'bb', 'ccc', 'a', 'bb', 'ccc',\n" + " 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', " + "'bb', 'ccc', 'a', 'bb', 'ccc',\n" + " 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n" + " dtype='object')", + ), + # truncated + ( + Index(["a", "bb", "ccc"] * 100), + "Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n" + " ...\n" + " 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n" + " dtype='object', length=300)", + ), + # Non-ASCII + # short + ( + Index(["あ", "いい", "ううう"]), + """Index(['あ', 'いい', 'ううう'], dtype='object')""", + ), + # multiple lines + ( + Index(["あ", "いい", "ううう"] * 10), + ( + "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" + " 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" + " 'あ', 'いい', 'ううう', 'あ', 'いい', " + "'ううう'],\n" + " dtype='object')" + ), + ), + # truncated + ( + Index(["あ", "いい", "ううう"] * 100), + ( + "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " + "'あ', 'いい', 'ううう', 'あ',\n" + " ...\n" + " 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', " + "'ううう', 'あ', 'いい', 'ううう'],\n" + " dtype='object', length=300)" + ), + ), + ], + ) + def test_string_index_repr(self, index, expected): + result = repr(index) + assert result == expected + + @pytest.mark.parametrize( + "index,expected", + [ + # short + ( + Index(["あ", "いい", "ううう"]), + ("Index(['あ', 'いい', 'ううう'], dtype='object')"), + ), + # multiple lines + ( + Index(["あ", "いい", "ううう"] * 10), + ( + "Index(['あ', 'いい', 'ううう', 'あ', 'いい', " + "'ううう', 'あ', 'いい', 'ううう',\n" + " 'あ', 'いい', 'ううう', 'あ', 'いい', " + "'ううう', 'あ', 'いい', 'ううう',\n" + " 'あ', 'いい', 'ううう', 'あ', 'いい', " + "'ううう', 'あ', 'いい', 'ううう',\n" + " 'あ', 'いい', 'ううう'],\n" + " dtype='object')" + "" + ), + ), + # truncated + ( + Index(["あ", "いい", "ううう"] * 100), + ( + "Index(['あ', 'いい', 'ううう', 'あ', 'いい', " + "'ううう', 'あ', 'いい', 'ううう',\n" + " 'あ',\n" + " ...\n" + " 'ううう', 'あ', 'いい', 'ううう', 'あ', " + "'いい', 'ううう', 'あ', 'いい',\n" + " 'ううう'],\n" + " dtype='object', length=300)" + ), + ), + ], + ) + def test_string_index_repr_with_unicode_option(self, index, expected): + # Enable Unicode option ----------------------------------------- + with cf.option_context("display.unicode.east_asian_width", True): + result = repr(index) + assert result == expected + + def test_repr_summary(self): + with cf.option_context("display.max_seq_items", 10): + result = repr(Index(np.arange(1000))) + assert len(result) < 200 + assert "..." in result + + def test_summary_bug(self): + # GH#3869 + ind = Index(["{other}%s", "~:{range}:0"], name="A") + result = ind._summary() + # shouldn't be formatted accidentally. + assert "~:{range}:0" in result + assert "{other}%s" in result + + def test_index_repr_bool_nan(self): + # GH32146 + arr = Index([True, False, np.nan], dtype=object) + exp1 = arr.format() + out1 = ["True", "False", "NaN"] + assert out1 == exp1 + + exp2 = repr(arr) + out2 = "Index([True, False, nan], dtype='object')" + assert out2 == exp2 + + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = Index(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + assert idx.format() == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_indexing.py new file mode 100644 index 00000000..2988fa7d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_indexing.py @@ -0,0 +1,104 @@ +import numpy as np +import pytest + +from pandas._libs import index as libindex + +import pandas as pd +from pandas import ( + Index, + NaT, +) +import pandas._testing as tm + + +class TestGetSliceBounds: + @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) + def test_get_slice_bounds_within(self, side, expected): + index = Index(list("abcdef")) + result = index.get_slice_bound("e", side=side) + assert result == expected + + @pytest.mark.parametrize("side", ["left", "right"]) + @pytest.mark.parametrize( + "data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)] + ) + def test_get_slice_bounds_outside(self, side, expected, data, bound): + index = Index(data) + result = index.get_slice_bound(bound, side=side) + assert result == expected + + def test_get_slice_bounds_invalid_side(self): + with pytest.raises(ValueError, match="Invalid value for side kwarg"): + Index([]).get_slice_bound("a", side="middle") + + +class TestGetIndexerNonUnique: + def test_get_indexer_non_unique_dtype_mismatch(self): + # GH#25459 + indexes, missing = Index(["A", "B"]).get_indexer_non_unique(Index([0])) + tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes) + tm.assert_numpy_array_equal(np.array([0], dtype=np.intp), missing) + + @pytest.mark.parametrize( + "idx_values,idx_non_unique", + [ + ([np.nan, 100, 200, 100], [np.nan, 100]), + ([np.nan, 100.0, 200.0, 100.0], [np.nan, 100.0]), + ], + ) + def test_get_indexer_non_unique_int_index(self, idx_values, idx_non_unique): + indexes, missing = Index(idx_values).get_indexer_non_unique(Index([np.nan])) + tm.assert_numpy_array_equal(np.array([0], dtype=np.intp), indexes) + tm.assert_numpy_array_equal(np.array([], dtype=np.intp), missing) + + indexes, missing = Index(idx_values).get_indexer_non_unique( + Index(idx_non_unique) + ) + tm.assert_numpy_array_equal(np.array([0, 1, 3], dtype=np.intp), indexes) + tm.assert_numpy_array_equal(np.array([], dtype=np.intp), missing) + + +class TestGetLoc: + @pytest.mark.slow # to_flat_index takes a while + def test_get_loc_tuple_monotonic_above_size_cutoff(self, monkeypatch): + # Go through the libindex path for which using + # _bin_search vs ndarray.searchsorted makes a difference + + with monkeypatch.context(): + monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 100) + lev = list("ABCD") + dti = pd.date_range("2016-01-01", periods=10) + + mi = pd.MultiIndex.from_product([lev, range(5), dti]) + oidx = mi.to_flat_index() + + loc = len(oidx) // 2 + tup = oidx[loc] + + res = oidx.get_loc(tup) + assert res == loc + + def test_get_loc_nan_object_dtype_nonmonotonic_nonunique(self): + # case that goes through _maybe_get_bool_indexer + idx = Index(["foo", np.nan, None, "foo", 1.0, None], dtype=object) + + # we dont raise KeyError on nan + res = idx.get_loc(np.nan) + assert res == 1 + + # we only match on None, not on np.nan + res = idx.get_loc(None) + expected = np.array([False, False, True, False, False, True]) + tm.assert_numpy_array_equal(res, expected) + + # we don't match at all on mismatched NA + with pytest.raises(KeyError, match="NaT"): + idx.get_loc(NaT) + + +def test_getitem_boolean_ea_indexer(): + # GH#45806 + ser = pd.Series([True, False, pd.NA], dtype="boolean") + result = ser.index[ser] + expected = Index([0]) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_pickle.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_pickle.py new file mode 100644 index 00000000..c670921d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_pickle.py @@ -0,0 +1,11 @@ +from pandas import Index +import pandas._testing as tm + + +def test_pickle_preserves_object_dtype(): + # GH#43188, GH#43155 don't infer numeric dtype + index = Index([1, 2, 3], dtype=object) + + result = tm.round_trip_pickle(index) + assert result.dtype == object + tm.assert_index_equal(index, result) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_reshape.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_reshape.py new file mode 100644 index 00000000..6586f5f9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_reshape.py @@ -0,0 +1,93 @@ +""" +Tests for ndarray-like method on the base Index class +""" +import numpy as np +import pytest + +from pandas import Index +import pandas._testing as tm + + +class TestReshape: + def test_repeat(self): + repeats = 2 + index = Index([1, 2, 3]) + expected = Index([1, 1, 2, 2, 3, 3]) + + result = index.repeat(repeats) + tm.assert_index_equal(result, expected) + + def test_insert(self): + # GH 7256 + # validate neg/pos inserts + result = Index(["b", "c", "d"]) + + # test 0th element + tm.assert_index_equal(Index(["a", "b", "c", "d"]), result.insert(0, "a")) + + # test Nth element that follows Python list behavior + tm.assert_index_equal(Index(["b", "c", "e", "d"]), result.insert(-1, "e")) + + # test loc +/- neq (0, -1) + tm.assert_index_equal(result.insert(1, "z"), result.insert(-2, "z")) + + # test empty + null_index = Index([]) + tm.assert_index_equal(Index(["a"]), null_index.insert(0, "a")) + + def test_insert_missing(self, nulls_fixture): + # GH#22295 + # test there is no mangling of NA values + expected = Index(["a", nulls_fixture, "b", "c"]) + result = Index(list("abc")).insert(1, nulls_fixture) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "val", [(1, 2), np.datetime64("2019-12-31"), np.timedelta64(1, "D")] + ) + @pytest.mark.parametrize("loc", [-1, 2]) + def test_insert_datetime_into_object(self, loc, val): + # GH#44509 + idx = Index(["1", "2", "3"]) + result = idx.insert(loc, val) + expected = Index(["1", "2", val, "3"]) + tm.assert_index_equal(result, expected) + assert type(expected[2]) is type(val) + + def test_insert_none_into_string_numpy(self): + # GH#55365 + pytest.importorskip("pyarrow") + index = Index(["a", "b", "c"], dtype="string[pyarrow_numpy]") + result = index.insert(-1, None) + expected = Index(["a", "b", None, "c"], dtype="string[pyarrow_numpy]") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "pos,expected", + [ + (0, Index(["b", "c", "d"], name="index")), + (-1, Index(["a", "b", "c"], name="index")), + ], + ) + def test_delete(self, pos, expected): + index = Index(["a", "b", "c", "d"], name="index") + result = index.delete(pos) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + def test_delete_raises(self): + index = Index(["a", "b", "c", "d"], name="index") + msg = "index 5 is out of bounds for axis 0 with size 4" + with pytest.raises(IndexError, match=msg): + index.delete(5) + + def test_append_multiple(self): + index = Index(["a", "b", "c", "d", "e", "f"]) + + foos = [index[:2], index[2:4], index[4:]] + result = foos[0].append(foos[1:]) + tm.assert_index_equal(result, index) + + # empty + result = index.append([]) + tm.assert_index_equal(result, index) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_setops.py new file mode 100644 index 00000000..488f79ee --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_setops.py @@ -0,0 +1,259 @@ +from datetime import datetime + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + Series, +) +import pandas._testing as tm +from pandas.core.algorithms import safe_sort + + +class TestIndexSetOps: + @pytest.mark.parametrize( + "method", ["union", "intersection", "difference", "symmetric_difference"] + ) + def test_setops_sort_validation(self, method): + idx1 = Index(["a", "b"]) + idx2 = Index(["b", "c"]) + + with pytest.raises(ValueError, match="The 'sort' keyword only takes"): + getattr(idx1, method)(idx2, sort=2) + + # sort=True is supported as of GH#?? + getattr(idx1, method)(idx2, sort=True) + + def test_setops_preserve_object_dtype(self): + idx = Index([1, 2, 3], dtype=object) + result = idx.intersection(idx[1:]) + expected = idx[1:] + tm.assert_index_equal(result, expected) + + # if other is not monotonic increasing, intersection goes through + # a different route + result = idx.intersection(idx[1:][::-1]) + tm.assert_index_equal(result, expected) + + result = idx._union(idx[1:], sort=None) + expected = idx + tm.assert_numpy_array_equal(result, expected.values) + + result = idx.union(idx[1:], sort=None) + tm.assert_index_equal(result, expected) + + # if other is not monotonic increasing, _union goes through + # a different route + result = idx._union(idx[1:][::-1], sort=None) + tm.assert_numpy_array_equal(result, expected.values) + + result = idx.union(idx[1:][::-1], sort=None) + tm.assert_index_equal(result, expected) + + def test_union_base(self): + index = Index([0, "a", 1, "b", 2, "c"]) + first = index[3:] + second = index[:5] + + result = first.union(second) + + expected = Index([0, 1, 2, "a", "b", "c"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("klass", [np.array, Series, list]) + def test_union_different_type_base(self, klass): + # GH 10149 + index = Index([0, "a", 1, "b", 2, "c"]) + first = index[3:] + second = index[:5] + + result = first.union(klass(second.values)) + + assert tm.equalContents(result, index) + + def test_union_sort_other_incomparable(self): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = Index([1, pd.Timestamp("2000")]) + # default (sort=None) + with tm.assert_produces_warning(RuntimeWarning): + result = idx.union(idx[:1]) + + tm.assert_index_equal(result, idx) + + # sort=None + with tm.assert_produces_warning(RuntimeWarning): + result = idx.union(idx[:1], sort=None) + tm.assert_index_equal(result, idx) + + # sort=False + result = idx.union(idx[:1], sort=False) + tm.assert_index_equal(result, idx) + + def test_union_sort_other_incomparable_true(self): + idx = Index([1, pd.Timestamp("2000")]) + with pytest.raises(TypeError, match=".*"): + idx.union(idx[:1], sort=True) + + def test_intersection_equal_sort_true(self): + idx = Index(["c", "a", "b"]) + sorted_ = Index(["a", "b", "c"]) + tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_) + + def test_intersection_base(self, sort): + # (same results for py2 and py3 but sortedness not tested elsewhere) + index = Index([0, "a", 1, "b", 2, "c"]) + first = index[:5] + second = index[:3] + + expected = Index([0, 1, "a"]) if sort is None else Index([0, "a", 1]) + result = first.intersection(second, sort=sort) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("klass", [np.array, Series, list]) + def test_intersection_different_type_base(self, klass, sort): + # GH 10149 + index = Index([0, "a", 1, "b", 2, "c"]) + first = index[:5] + second = index[:3] + + result = first.intersection(klass(second.values), sort=sort) + assert tm.equalContents(result, second) + + def test_intersection_nosort(self): + result = Index(["c", "b", "a"]).intersection(["b", "a"]) + expected = Index(["b", "a"]) + tm.assert_index_equal(result, expected) + + def test_intersection_equal_sort(self): + idx = Index(["c", "a", "b"]) + tm.assert_index_equal(idx.intersection(idx, sort=False), idx) + tm.assert_index_equal(idx.intersection(idx, sort=None), idx) + + def test_intersection_str_dates(self, sort): + dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] + + i1 = Index(dt_dates, dtype=object) + i2 = Index(["aa"], dtype=object) + result = i2.intersection(i1, sort=sort) + + assert len(result) == 0 + + @pytest.mark.parametrize( + "index2,expected_arr", + [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B"])], + ) + def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort): + # non-monotonic non-unique + index1 = Index(["A", "B", "A", "C"]) + expected = Index(expected_arr, dtype="object") + result = index1.intersection(index2, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + def test_difference_base(self, sort): + # (same results for py2 and py3 but sortedness not tested elsewhere) + index = Index([0, "a", 1, "b", 2, "c"]) + first = index[:4] + second = index[3:] + + result = first.difference(second, sort) + expected = Index([0, "a", 1]) + if sort is None: + expected = Index(safe_sort(expected)) + tm.assert_index_equal(result, expected) + + def test_symmetric_difference(self): + # (same results for py2 and py3 but sortedness not tested elsewhere) + index = Index([0, "a", 1, "b", 2, "c"]) + first = index[:4] + second = index[3:] + + result = first.symmetric_difference(second) + expected = Index([0, 1, 2, "a", "c"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "method,expected,sort", + [ + ( + "intersection", + np.array( + [(1, "A"), (2, "A"), (1, "B"), (2, "B")], + dtype=[("num", int), ("let", "S1")], + ), + False, + ), + ( + "intersection", + np.array( + [(1, "A"), (1, "B"), (2, "A"), (2, "B")], + dtype=[("num", int), ("let", "S1")], + ), + None, + ), + ( + "union", + np.array( + [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")], + dtype=[("num", int), ("let", "S1")], + ), + None, + ), + ], + ) + def test_tuple_union_bug(self, method, expected, sort): + index1 = Index( + np.array( + [(1, "A"), (2, "A"), (1, "B"), (2, "B")], + dtype=[("num", int), ("let", "S1")], + ) + ) + index2 = Index( + np.array( + [(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")], + dtype=[("num", int), ("let", "S1")], + ) + ) + + result = getattr(index1, method)(index2, sort=sort) + assert result.ndim == 1 + + expected = Index(expected) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("first_list", [["b", "a"], []]) + @pytest.mark.parametrize("second_list", [["a", "b"], []]) + @pytest.mark.parametrize( + "first_name, second_name, expected_name", + [("A", "B", None), (None, "B", None), ("A", None, None)], + ) + def test_union_name_preservation( + self, first_list, second_list, first_name, second_name, expected_name, sort + ): + first = Index(first_list, name=first_name) + second = Index(second_list, name=second_name) + union = first.union(second, sort=sort) + + vals = set(first_list).union(second_list) + + if sort is None and len(first_list) > 0 and len(second_list) > 0: + expected = Index(sorted(vals), name=expected_name) + tm.assert_index_equal(union, expected) + else: + expected = Index(vals, name=expected_name) + tm.equalContents(union, expected) + + @pytest.mark.parametrize( + "diff_type, expected", + [["difference", [1, "B"]], ["symmetric_difference", [1, 2, "B", "C"]]], + ) + def test_difference_object_type(self, diff_type, expected): + # GH 13432 + idx1 = Index([0, 1, "A", "B"]) + idx2 = Index([0, 2, "A", "C"]) + result = getattr(idx1, diff_type)(idx2) + expected = Index(expected) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_where.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_where.py new file mode 100644 index 00000000..0c896973 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/base_class/test_where.py @@ -0,0 +1,13 @@ +import numpy as np + +from pandas import Index +import pandas._testing as tm + + +class TestWhere: + def test_where_intlike_str_doesnt_cast_ints(self): + idx = Index(range(3)) + mask = np.array([True, False, True]) + res = idx.where(mask, "2") + expected = Index([0, "2", 2]) + tm.assert_index_equal(res, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_append.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_append.py new file mode 100644 index 00000000..b48c3219 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_append.py @@ -0,0 +1,62 @@ +import pytest + +from pandas import ( + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestAppend: + @pytest.fixture + def ci(self): + categories = list("cab") + return CategoricalIndex(list("aabbca"), categories=categories, ordered=False) + + def test_append(self, ci): + # append cats with the same categories + result = ci[:3].append(ci[3:]) + tm.assert_index_equal(result, ci, exact=True) + + foos = [ci[:1], ci[1:3], ci[3:]] + result = foos[0].append(foos[1:]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_empty(self, ci): + # empty + result = ci.append([]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_mismatched_categories(self, ci): + # appending with different categories or reordered is not ok + msg = "all inputs must be Index" + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.set_categories(list("abcd"))) + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.reorder_categories(list("abc"))) + + def test_append_category_objects(self, ci): + # with objects + result = ci.append(Index(["c", "a"])) + expected = CategoricalIndex(list("aabbcaca"), categories=ci.categories) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_non_categories(self, ci): + # invalid objects -> cast to object via concat_compat + result = ci.append(Index(["a", "d"])) + expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"]) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_object(self, ci): + # GH#14298 - if base object is not categorical -> coerce to object + result = Index(["c", "a"]).append(ci) + expected = Index(list("caaabbca")) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_to_another(self): + # hits Index._concat + fst = Index(["a", "b"]) + snd = CategoricalIndex(["d", "e"]) + result = fst.append(snd) + expected = Index(["a", "b", "d", "e"]) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_astype.py new file mode 100644 index 00000000..da1d692f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_astype.py @@ -0,0 +1,90 @@ +from datetime import date + +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, + IntervalIndex, +) +import pandas._testing as tm + + +class TestAstype: + def test_astype(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = ci.astype(object) + tm.assert_index_equal(result, Index(np.array(ci))) + + # this IS equal, but not the same class + assert result.equals(ci) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + # interval + ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], right=[2, 4], closed="right") + + ci = CategoricalIndex( + Categorical.from_codes([0, 1, -1], categories=ii, ordered=True) + ) + + result = ci.astype("interval") + expected = ii.take([0, 1, -1], allow_fill=True, fill_value=np.nan) + tm.assert_index_equal(result, expected) + + result = IntervalIndex(result.values) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("index_ordered", [True, False]) + def test_astype_category(self, name, dtype_ordered, index_ordered): + # GH#18630 + index = CategoricalIndex( + list("aabbca"), categories=list("cab"), ordered=index_ordered + ) + if name: + index = index.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex( + index.tolist(), + name=name, + categories=index.categories, + ordered=dtype_ordered, + ) + tm.assert_index_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype) + tm.assert_index_equal(result, expected) + + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = index.astype("category") + expected = index + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("box", [True, False]) + def test_categorical_date_roundtrip(self, box): + # astype to categorical and back should preserve date objects + v = date.today() + + obj = Index([v, v]) + assert obj.dtype == object + if box: + obj = obj.array + + cat = obj.astype("category") + + rtrip = cat.astype(object) + assert rtrip.dtype == object + assert type(rtrip[0]) is date diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_category.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_category.py new file mode 100644 index 00000000..64cbe657 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_category.py @@ -0,0 +1,390 @@ +import numpy as np +import pytest + +from pandas._libs import index as libindex +from pandas._libs.arrays import NDArrayBacked + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, +) +import pandas._testing as tm +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, +) + + +class TestCategoricalIndex: + @pytest.fixture + def simple_index(self) -> CategoricalIndex: + return CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + def test_can_hold_identifiers(self): + idx = CategoricalIndex(list("aabbca"), categories=None, ordered=False) + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is True + + def test_insert(self, simple_index): + ci = simple_index + categories = ci.categories + + # test 0th element + result = ci.insert(0, "a") + expected = CategoricalIndex(list("aaabbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test Nth element that follows Python list behavior + result = ci.insert(-1, "a") + expected = CategoricalIndex(list("aabbcaa"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test empty + result = CategoricalIndex([], categories=categories).insert(0, "a") + expected = CategoricalIndex(["a"], categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # invalid -> cast to object + expected = ci.astype(object).insert(0, "d") + result = ci.insert(0, "d") + tm.assert_index_equal(result, expected, exact=True) + + # GH 18295 (test missing) + expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"]) + for na in (np.nan, pd.NaT, None): + result = CategoricalIndex(list("aabcb")).insert(1, na) + tm.assert_index_equal(result, expected) + + def test_insert_na_mismatched_dtype(self): + ci = CategoricalIndex([0, 1, 1]) + result = ci.insert(0, pd.NaT) + expected = Index([pd.NaT, 0, 1, 1], dtype=object) + tm.assert_index_equal(result, expected) + + def test_delete(self, simple_index): + ci = simple_index + categories = ci.categories + + result = ci.delete(0) + expected = CategoricalIndex(list("abbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + result = ci.delete(-1) + expected = CategoricalIndex(list("aabbc"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + with tm.external_error_raised((IndexError, ValueError)): + # Either depending on NumPy version + ci.delete(10) + + @pytest.mark.parametrize( + "data, non_lexsorted_data", + [[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]], + ) + def test_is_monotonic(self, data, non_lexsorted_data): + c = CategoricalIndex(data) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, ordered=True) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + c = CategoricalIndex(data, categories=reversed(data), ordered=True) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + # test when data is neither monotonic increasing nor decreasing + reordered_data = [data[0], data[2], data[1]] + c = CategoricalIndex(reordered_data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is False + + # non lexsorted categories + categories = non_lexsorted_data + + c = CategoricalIndex(categories[:2], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(categories[1:3], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + def test_has_duplicates(self): + idx = CategoricalIndex([0, 0, 0], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1], categories=[2, 3], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1, 2, 3], categories=[1, 2, 3], name="foo") + assert idx.is_unique is True + assert idx.has_duplicates is False + + @pytest.mark.parametrize( + "data, categories, expected", + [ + ( + [1, 1, 1], + [1, 2, 3], + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [1, 1, 1], + list("abc"), + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [2, "a", "b"], + list("abc"), + { + "first": np.zeros(shape=(3), dtype=np.bool_), + "last": np.zeros(shape=(3), dtype=np.bool_), + False: np.zeros(shape=(3), dtype=np.bool_), + }, + ), + ( + list("abb"), + list("abc"), + { + "first": np.array([False, False, True]), + "last": np.array([False, True, False]), + False: np.array([False, True, True]), + }, + ), + ], + ) + def test_drop_duplicates(self, data, categories, expected): + idx = CategoricalIndex(data, categories=categories, name="foo") + for keep, e in expected.items(): + tm.assert_numpy_array_equal(idx.duplicated(keep=keep), e) + e = idx[~e] + result = idx.drop_duplicates(keep=keep) + tm.assert_index_equal(result, e) + + @pytest.mark.parametrize( + "data, categories, expected_data", + [ + ([1, 1, 1], [1, 2, 3], [1]), + ([1, 1, 1], list("abc"), [np.nan]), + ([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]), + ([2, "a", "b"], list("abc"), [np.nan, "a", "b"]), + ], + ) + def test_unique(self, data, categories, expected_data, ordered): + dtype = CategoricalDtype(categories, ordered=ordered) + + idx = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(expected_data, dtype=dtype) + tm.assert_index_equal(idx.unique(), expected) + + def test_repr_roundtrip(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + str(ci) + tm.assert_index_equal(eval(repr(ci)), ci, exact=True) + + # formatting + str(ci) + + # long format + # this is not reprable + ci = CategoricalIndex(np.random.default_rng(2).integers(0, 5, size=100)) + str(ci) + + def test_isin(self): + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + tm.assert_numpy_array_equal( + ci.isin(["c"]), np.array([False, False, False, True, False, False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b"]), np.array([True] * 5 + [False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b", np.nan]), np.array([True] * 6) + ) + + # mismatched categorical -> coerced to ndarray so doesn't matter + result = ci.isin(ci.set_categories(list("abcdefghi"))) + expected = np.array([True] * 6) + tm.assert_numpy_array_equal(result, expected) + + result = ci.isin(ci.set_categories(list("defghi"))) + expected = np.array([False] * 5 + [True]) + tm.assert_numpy_array_equal(result, expected) + + def test_identical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + assert ci1.identical(ci1) + assert ci1.identical(ci1.copy()) + assert not ci1.identical(ci2) + + def test_ensure_copied_data(self): + # gh-12309: Check the "copy" argument of each + # Index.__new__ is honored. + # + # Must be tested separately from other indexes because + # self.values is not an ndarray. + index = tm.makeCategoricalIndex(10) + + result = CategoricalIndex(index.values, copy=True) + tm.assert_index_equal(index, result) + assert not np.shares_memory(result._data._codes, index._data._codes) + + result = CategoricalIndex(index.values, copy=False) + assert result._data._codes is index._data._codes + + def test_frame_repr(self): + df = pd.DataFrame({"A": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"])) + result = repr(df) + expected = " A\na 1\nb 2\nc 3" + assert result == expected + + +class TestCategoricalIndex2: + def test_view_i8(self): + # GH#25464 + ci = tm.makeCategoricalIndex(100) + msg = "When changing to a larger dtype, its size must be a divisor" + with pytest.raises(ValueError, match=msg): + ci.view("i8") + with pytest.raises(ValueError, match=msg): + ci._data.view("i8") + + ci = ci[:-4] # length divisible by 8 + + res = ci.view("i8") + expected = ci._data.codes.view("i8") + tm.assert_numpy_array_equal(res, expected) + + cat = ci._data + tm.assert_numpy_array_equal(cat.view("i8"), expected) + + @pytest.mark.parametrize( + "dtype, engine_type", + [ + (np.int8, libindex.Int8Engine), + (np.int16, libindex.Int16Engine), + (np.int32, libindex.Int32Engine), + (np.int64, libindex.Int64Engine), + ], + ) + def test_engine_type(self, dtype, engine_type): + if dtype != np.int64: + # num. of uniques required to push CategoricalIndex.codes to a + # dtype (128 categories required for .codes dtype to be int16 etc.) + num_uniques = {np.int8: 1, np.int16: 128, np.int32: 32768}[dtype] + ci = CategoricalIndex(range(num_uniques)) + else: + # having 2**32 - 2**31 categories would be very memory-intensive, + # so we cheat a bit with the dtype + ci = CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1) + arr = ci.values._ndarray.astype("int64") + NDArrayBacked.__init__(ci._data, arr, ci.dtype) + assert np.issubdtype(ci.codes.dtype, dtype) + assert isinstance(ci._engine, engine_type) + + @pytest.mark.parametrize( + "func,op_name", + [ + (lambda idx: idx - idx, "__sub__"), + (lambda idx: idx + idx, "__add__"), + (lambda idx: idx - ["a", "b"], "__sub__"), + (lambda idx: idx + ["a", "b"], "__add__"), + (lambda idx: ["a", "b"] - idx, "__rsub__"), + (lambda idx: ["a", "b"] + idx, "__radd__"), + ], + ) + def test_disallow_addsub_ops(self, func, op_name): + # GH 10039 + # set ops (+/-) raise TypeError + idx = Index(Categorical(["a", "b"])) + cat_or_list = "'(Categorical|list)' and '(Categorical|list)'" + msg = "|".join( + [ + f"cannot perform {op_name} with this index type: CategoricalIndex", + "can only concatenate list", + rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}", + ] + ) + with pytest.raises(TypeError, match=msg): + func(idx) + + def test_method_delegation(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.set_categories(list("cab")) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cab")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.rename_categories(list("efg")) + tm.assert_index_equal( + result, CategoricalIndex(list("ffggef"), categories=list("efg")) + ) + + # GH18862 (let rename_categories take callables) + result = ci.rename_categories(lambda x: x.upper()) + tm.assert_index_equal( + result, CategoricalIndex(list("AABBCA"), categories=list("CAB")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.add_categories(["d"]) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cabd")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")), + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_unordered() + tm.assert_index_equal(result, ci) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_ordered() + tm.assert_index_equal( + result, + CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True), + ) + + # invalid + msg = "cannot use inplace with CategoricalIndex" + with pytest.raises(ValueError, match=msg): + ci.set_categories(list("cab"), inplace=True) + + def test_remove_maintains_order(self): + ci = CategoricalIndex(list("abcdda"), categories=list("abcd")) + result = ci.reorder_categories(["d", "c", "b", "a"], ordered=True) + tm.assert_index_equal( + result, + CategoricalIndex(list("abcdda"), categories=list("dcba"), ordered=True), + ) + result = result.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex( + ["a", "b", np.nan, "d", "d", "a"], categories=list("dba"), ordered=True + ), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_constructors.py new file mode 100644 index 00000000..f0c5307f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_constructors.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestCategoricalIndexConstructors: + def test_construction_disallows_scalar(self): + msg = "must be called with a collection of some kind" + with pytest.raises(TypeError, match=msg): + CategoricalIndex(data=1, categories=list("abcd"), ordered=False) + with pytest.raises(TypeError, match=msg): + CategoricalIndex(categories=list("abcd"), ordered=False) + + def test_construction(self): + ci = CategoricalIndex(list("aabbca"), categories=list("abcd"), ordered=False) + categories = ci.categories + + result = Index(ci) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + result = Index(ci.values) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + # empty + result = CategoricalIndex([], categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal(result.codes, np.array([], dtype="int8")) + assert not result.ordered + + # passing categories + result = CategoricalIndex(list("aabbca"), categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + + c = Categorical(list("aabbca")) + result = CategoricalIndex(c) + tm.assert_index_equal(result.categories, Index(list("abc"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(c, categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + ci = CategoricalIndex(c, categories=list("abcd")) + result = CategoricalIndex(ci) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab")) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + expected = CategoricalIndex( + ci, categories=list("ab"), ordered=True, dtype="category" + ) + tm.assert_index_equal(result, expected, exact=True) + + # turn me to an Index + result = Index(np.array(ci)) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + def test_construction_with_dtype(self): + # specify dtype + ci = CategoricalIndex(list("aabbca"), categories=list("abc"), ordered=False) + + result = Index(np.array(ci), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + result = Index(np.array(ci).tolist(), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + # these are generally only equal when the categories are reordered + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = Index(np.array(ci), dtype="category").reorder_categories(ci.categories) + tm.assert_index_equal(result, ci, exact=True) + + # make sure indexes are handled + idx = Index(range(3)) + expected = CategoricalIndex([0, 1, 2], categories=idx, ordered=True) + result = CategoricalIndex(idx, categories=idx, ordered=True) + tm.assert_index_equal(result, expected, exact=True) + + def test_construction_empty_with_bool_categories(self): + # see GH#22702 + cat = CategoricalIndex([], categories=[True, False]) + categories = sorted(cat.categories.tolist()) + assert categories == [False, True] + + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH#18109 + data, cats, ordered = "a a b b".split(), "c b a".split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(data, categories=cats, ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # GH#19032 + result = Index(data, dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + # error when combining categories/ordered and dtype kwargs + msg = "Cannot specify `categories` or `ordered` together with `dtype`." + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, categories=cats, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, ordered=ordered, dtype=dtype) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_equals.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_equals.py new file mode 100644 index 00000000..a8353f30 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_equals.py @@ -0,0 +1,96 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + MultiIndex, +) + + +class TestEquals: + def test_equals_categorical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + + assert ci1.equals(ci1) + assert not ci1.equals(ci2) + assert ci1.equals(ci1.astype(object)) + assert ci1.astype(object).equals(ci1) + + assert (ci1 == ci1).all() + assert not (ci1 != ci1).all() + assert not (ci1 > ci1).all() + assert not (ci1 < ci1).all() + assert (ci1 <= ci1).all() + assert (ci1 >= ci1).all() + + assert not (ci1 == 1).all() + assert (ci1 == Index(["a", "b"])).all() + assert (ci1 == ci1.values).all() + + # invalid comparisons + with pytest.raises(ValueError, match="Lengths must match"): + ci1 == Index(["a", "b", "c"]) + + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + ci1 == ci2 + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, ordered=False) + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, categories=list("abc")) + + # tests + # make sure that we are testing for category inclusion properly + ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + # Same categories, but different order + # Unordered + assert ci.equals(CategoricalIndex(list("aabca"))) + # Ordered + assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True)) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + assert not ci.equals(CategoricalIndex(list("aabca"))) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca") + [np.nan]) + assert ci.equals(CategoricalIndex(list("aabca") + [np.nan])) + assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True)) + assert ci.equals(ci.copy()) + + def test_equals_categorical_unordered(self): + # https://github.com/pandas-dev/pandas/issues/16603 + a = CategoricalIndex(["A"], categories=["A", "B"]) + b = CategoricalIndex(["A"], categories=["B", "A"]) + c = CategoricalIndex(["C"], categories=["B", "A"]) + assert a.equals(b) + assert not a.equals(c) + assert not b.equals(c) + + def test_equals_non_category(self): + # GH#37667 Case where other contains a value not among ci's + # categories ("D") and also contains np.nan + ci = CategoricalIndex(["A", "B", np.nan, np.nan]) + other = Index(["A", "B", "D", np.nan]) + + assert not ci.equals(other) + + def test_equals_multiindex(self): + # dont raise NotImplementedError when calling is_dtype_compat + + mi = MultiIndex.from_arrays([["A", "B", "C", "D"], range(4)]) + ci = mi.to_flat_index().astype("category") + + assert not ci.equals(mi) + + def test_equals_string_dtype(self, any_string_dtype): + # GH#55364 + idx = CategoricalIndex(list("abc"), name="B") + other = Index(["a", "b", "c"], name="B", dtype=any_string_dtype) + assert idx.equals(other) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_fillna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_fillna.py new file mode 100644 index 00000000..09de578f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_fillna.py @@ -0,0 +1,54 @@ +import numpy as np +import pytest + +from pandas import CategoricalIndex +import pandas._testing as tm + + +class TestFillNA: + def test_fillna_categorical(self): + # GH#11343 + idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x") + # fill by value in categories + exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x") + tm.assert_index_equal(idx.fillna(1.0), exp) + + cat = idx._data + + # fill by value not in categories raises TypeError on EA, casts on CI + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + cat.fillna(2.0) + + result = idx.fillna(2.0) + expected = idx.astype(object).fillna(2.0) + tm.assert_index_equal(result, expected) + + def test_fillna_copies_with_no_nas(self): + # Nothing to fill, should still get a copy for the Categorical method, + # but OK to get a view on CategoricalIndex method + ci = CategoricalIndex([0, 1, 1]) + result = ci.fillna(0) + assert result is not ci + assert tm.shares_memory(result, ci) + + # But at the EA level we always get a copy. + cat = ci._data + result = cat.fillna(0) + assert result._ndarray is not cat._ndarray + assert result._ndarray.base is None + assert not tm.shares_memory(result, cat) + + def test_fillna_validates_with_no_nas(self): + # We validate the fill value even if fillna is a no-op + ci = CategoricalIndex([2, 3, 3]) + cat = ci._data + + msg = "Cannot setitem on a Categorical with a new category" + res = ci.fillna(False) + # nothing to fill, so we dont cast + tm.assert_index_equal(res, ci) + + # Same check directly on the Categorical + with pytest.raises(TypeError, match=msg): + cat.fillna(False) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_formats.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_formats.py new file mode 100644 index 00000000..7dbcaaa8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_formats.py @@ -0,0 +1,113 @@ +""" +Tests for CategoricalIndex.__repr__ and related methods. +""" +import pandas._config.config as cf + +from pandas import CategoricalIndex + + +class TestCategoricalIndexRepr: + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = CategoricalIndex(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + assert idx.format() == expected + + def test_string_categorical_index_repr(self): + # short + idx = CategoricalIndex(["a", "bb", "ccc"]) + expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["a", "bb", "ccc"] * 10) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["a", "bb", "ccc"] * 100) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + ... + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("abcdefghijklmmo")) + expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + 'm', 'm', 'o'], + categories=['a', 'b', 'c', 'd', ..., 'k', 'l', 'm', 'o'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', + 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # Enable Unicode option ----------------------------------------- + with cf.option_context("display.unicode.east_asian_width", True): + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', + 'さ', 'し', 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_indexing.py new file mode 100644 index 00000000..49eb79da --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_indexing.py @@ -0,0 +1,420 @@ +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + IntervalIndex, + Timestamp, +) +import pandas._testing as tm + + +class TestTake: + def test_take_fill_value(self): + # GH 12631 + + # numeric category + idx = CategoricalIndex([1, 2, 3], name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = CategoricalIndex([2, 1, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = CategoricalIndex([2, 1, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # object category + idx = CategoricalIndex( + list("CBA"), categories=list("ABC"), ordered=True, name="xxx" + ) + result = idx.take(np.array([1, 0, -1])) + expected = CategoricalIndex( + list("BCA"), categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = CategoricalIndex( + ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = CategoricalIndex( + list("BCA"), categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + def test_take_fill_value_datetime(self): + # datetime category + idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx") + idx = CategoricalIndex(idx) + result = idx.take(np.array([1, 0, -1])) + expected = pd.DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" + ) + expected = CategoricalIndex(expected) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx") + exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"]) + expected = CategoricalIndex(expected, categories=exp_cats) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = pd.DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" + ) + expected = CategoricalIndex(expected) + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + def test_take_invalid_kwargs(self): + idx = CategoricalIndex([1, 2, 3], name="foo") + indices = [1, 0, -1] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + +class TestGetLoc: + def test_get_loc(self): + # GH 12531 + cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc")) + idx1 = Index(list("abcde")) + assert cidx1.get_loc("a") == idx1.get_loc("a") + assert cidx1.get_loc("e") == idx1.get_loc("e") + + for i in [cidx1, idx1]: + with pytest.raises(KeyError, match="'NOT-EXIST'"): + i.get_loc("NOT-EXIST") + + # non-unique + cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc")) + idx2 = Index(list("aacded")) + + # results in bool array + res = cidx2.get_loc("d") + tm.assert_numpy_array_equal(res, idx2.get_loc("d")) + tm.assert_numpy_array_equal( + res, np.array([False, False, False, True, False, True]) + ) + # unique element results in scalar + res = cidx2.get_loc("e") + assert res == idx2.get_loc("e") + assert res == 4 + + for i in [cidx2, idx2]: + with pytest.raises(KeyError, match="'NOT-EXIST'"): + i.get_loc("NOT-EXIST") + + # non-unique, sliceable + cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc")) + idx3 = Index(list("aabbb")) + + # results in slice + res = cidx3.get_loc("a") + assert res == idx3.get_loc("a") + assert res == slice(0, 2, None) + + res = cidx3.get_loc("b") + assert res == idx3.get_loc("b") + assert res == slice(2, 5, None) + + for i in [cidx3, idx3]: + with pytest.raises(KeyError, match="'c'"): + i.get_loc("c") + + def test_get_loc_unique(self): + cidx = CategoricalIndex(list("abc")) + result = cidx.get_loc("b") + assert result == 1 + + def test_get_loc_monotonic_nonunique(self): + cidx = CategoricalIndex(list("abbc")) + result = cidx.get_loc("b") + expected = slice(1, 3, None) + assert result == expected + + def test_get_loc_nonmonotonic_nonunique(self): + cidx = CategoricalIndex(list("abcb")) + result = cidx.get_loc("b") + expected = np.array([False, True, False, True], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_get_loc_nan(self): + # GH#41933 + ci = CategoricalIndex(["A", "B", np.nan]) + res = ci.get_loc(np.nan) + + assert res == 2 + + +class TestGetIndexer: + def test_get_indexer_base(self): + # Determined by cat ordering. + idx = CategoricalIndex(list("cab"), categories=list("cab")) + expected = np.arange(len(idx), dtype=np.intp) + + actual = idx.get_indexer(idx) + tm.assert_numpy_array_equal(expected, actual) + + with pytest.raises(ValueError, match="Invalid fill method"): + idx.get_indexer(idx, method="invalid") + + def test_get_indexer_requires_unique(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + oidx = Index(np.array(ci)) + + msg = "Reindexing only valid with uniquely valued Index objects" + + for n in [1, 2, 5, len(ci)]: + finder = oidx[np.random.default_rng(2).integers(0, len(ci), size=n)] + + with pytest.raises(InvalidIndexError, match=msg): + ci.get_indexer(finder) + + # see gh-17323 + # + # Even when indexer is equal to the + # members in the index, we should + # respect duplicates instead of taking + # the fast-track path. + for finder in [list("aabbca"), list("aababca")]: + with pytest.raises(InvalidIndexError, match=msg): + ci.get_indexer(finder) + + def test_get_indexer_non_unique(self): + idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc")) + idx2 = CategoricalIndex(list("abf")) + + for indexer in [idx2, list("abf"), Index(list("abf"))]: + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + idx1.get_indexer(indexer) + + r1, _ = idx1.get_indexer_non_unique(indexer) + expected = np.array([0, 1, 2, -1], dtype=np.intp) + tm.assert_almost_equal(r1, expected) + + def test_get_indexer_method(self): + idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc")) + idx2 = CategoricalIndex(list("abf")) + + msg = "method pad not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="pad") + msg = "method backfill not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="backfill") + + msg = "method nearest not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="nearest") + + def test_get_indexer_array(self): + arr = np.array( + [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")], + dtype=object, + ) + cats = [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")] + ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype="category") + result = ci.get_indexer(arr) + expected = np.array([0, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_same_order(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["a", "b"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19551 + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["b", "a"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_nans_in_index_and_target(self): + # GH 45361 + ci = CategoricalIndex([1, 2, np.nan, 3]) + other1 = [2, 3, 4, np.nan] + res1 = ci.get_indexer(other1) + expected1 = np.array([1, 3, -1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(res1, expected1) + other2 = [1, 4, 2, 3] + res2 = ci.get_indexer(other2) + expected2 = np.array([0, -1, 1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(res2, expected2) + + +class TestWhere: + def test_where(self, listlike_box): + klass = listlike_box + + i = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + cond = [True] * len(i) + expected = i + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) + + cond = [False] + [True] * (len(i) - 1) + expected = CategoricalIndex([np.nan] + i[1:].tolist(), categories=i.categories) + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) + + def test_where_non_categories(self): + ci = CategoricalIndex(["a", "b", "c", "d"]) + mask = np.array([True, False, True, False]) + + result = ci.where(mask, 2) + expected = Index(["a", 2, "c", 2], dtype=object) + tm.assert_index_equal(result, expected) + + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + # Test the Categorical method directly + ci._data._where(mask, 2) + + +class TestContains: + def test_contains(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=False) + + assert "a" in ci + assert "z" not in ci + assert "e" not in ci + assert np.nan not in ci + + # assert codes NOT in index + assert 0 not in ci + assert 1 not in ci + + def test_contains_nan(self): + ci = CategoricalIndex(list("aabbca") + [np.nan], categories=list("cabdef")) + assert np.nan in ci + + @pytest.mark.parametrize("unwrap", [True, False]) + def test_contains_na_dtype(self, unwrap): + dti = pd.date_range("2016-01-01", periods=100).insert(0, pd.NaT) + pi = dti.to_period("D") + tdi = dti - dti[-1] + ci = CategoricalIndex(dti) + + obj = ci + if unwrap: + obj = ci._data + + assert np.nan in obj + assert None in obj + assert pd.NaT in obj + assert np.datetime64("NaT") in obj + assert np.timedelta64("NaT") not in obj + + obj2 = CategoricalIndex(tdi) + if unwrap: + obj2 = obj2._data + + assert np.nan in obj2 + assert None in obj2 + assert pd.NaT in obj2 + assert np.datetime64("NaT") not in obj2 + assert np.timedelta64("NaT") in obj2 + + obj3 = CategoricalIndex(pi) + if unwrap: + obj3 = obj3._data + + assert np.nan in obj3 + assert None in obj3 + assert pd.NaT in obj3 + assert np.datetime64("NaT") not in obj3 + assert np.timedelta64("NaT") not in obj3 + + @pytest.mark.parametrize( + "item, expected", + [ + (pd.Interval(0, 1), True), + (1.5, True), + (pd.Interval(0.5, 1.5), False), + ("a", False), + (Timestamp(1), False), + (pd.Timedelta(1), False), + ], + ids=str, + ) + def test_contains_interval(self, item, expected): + # GH 23705 + ci = CategoricalIndex(IntervalIndex.from_breaks(range(3))) + result = item in ci + assert result is expected + + def test_contains_list(self): + # GH#21729 + idx = CategoricalIndex([1, 2, 3]) + + assert "a" not in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a"] in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a", "b"] in idx diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_map.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_map.py new file mode 100644 index 00000000..baf83659 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_map.py @@ -0,0 +1,144 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "data, categories", + [ + (list("abcbca"), list("cab")), + (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)), + ], + ids=["string", "interval"], +) +def test_map_str(data, categories, ordered): + # GH 31202 - override base class since we want to maintain categorical/ordered + index = CategoricalIndex(data, categories=categories, ordered=ordered) + result = index.map(str) + expected = CategoricalIndex( + map(str, data), categories=map(str, categories), ordered=ordered + ) + tm.assert_index_equal(result, expected) + + +def test_map(): + ci = CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True) + result = ci.map(lambda x: x.lower()) + exp = CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_index_equal(result, exp) + + ci = CategoricalIndex( + list("ABABC"), categories=list("BAC"), ordered=False, name="XXX" + ) + result = ci.map(lambda x: x.lower()) + exp = CategoricalIndex( + list("ababc"), categories=list("bac"), ordered=False, name="XXX" + ) + tm.assert_index_equal(result, exp) + + # GH 12766: Return an index not an array + tm.assert_index_equal( + ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX") + ) + + # change categories dtype + ci = CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False) + + def f(x): + return {"A": 10, "B": 20, "C": 30}.get(x) + + result = ci.map(f) + exp = CategoricalIndex([10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False) + tm.assert_index_equal(result, exp) + + result = ci.map(Series([10, 20, 30], index=["A", "B", "C"])) + tm.assert_index_equal(result, exp) + + result = ci.map({"A": 10, "B": 20, "C": 30}) + tm.assert_index_equal(result, exp) + + +def test_map_with_categorical_series(): + # GH 12756 + a = Index([1, 2, 3, 4]) + b = Series(["even", "odd", "even", "odd"], dtype="category") + c = Series(["even", "odd", "even", "odd"]) + + exp = CategoricalIndex(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(b), exp) + exp = Index(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(c), exp) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], pd.isna, Index([False, False, np.nan])), + ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + CategoricalIndex([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_ignore(data, f, expected): # GH 24241 + values = CategoricalIndex(data) + result = values.map(f, na_action="ignore") + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, Index([False, False, True])), + ([1, 2, np.nan], pd.isna, Index([False, False, True])), + ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + CategoricalIndex([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_none(data, f, expected): # GH 24241 + values = CategoricalIndex(data) + result = values.map(f, na_action=None) + tm.assert_index_equal(result, expected) + + +def test_map_with_dict_or_series(): + orig_values = ["a", "B", 1, "a"] + new_values = ["one", 2, 3.0, "one"] + cur_index = CategoricalIndex(orig_values, name="XXX") + expected = CategoricalIndex(new_values, name="XXX", categories=[3.0, 2, "one"]) + + mapper = Series(new_values[:-1], index=orig_values[:-1]) + result = cur_index.map(mapper) + # Order of categories in result can be different + tm.assert_index_equal(result, expected) + + mapper = dict(zip(orig_values[:-1], new_values[:-1])) + result = cur_index.map(mapper) + # Order of categories in result can be different + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_reindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_reindex.py new file mode 100644 index 00000000..8ca5c609 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/categorical/test_reindex.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + Interval, +) +import pandas._testing as tm + + +class TestReindex: + def test_reindex_list_non_unique(self): + # GH#11586 + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_list_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_duplicate_target(self): + # See GH25459 + cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"]) + res, indexer = cat.reindex(["a", "c", "c"]) + exp = Index(["a", "c", "c"], dtype="object") + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + res, indexer = cat.reindex( + CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + ) + exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + def test_reindex_empty_index(self): + # See GH16770 + c = CategoricalIndex([]) + res, indexer = c.reindex(["a", "b"]) + tm.assert_index_equal(res, Index(["a", "b"]), exact=True) + tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) + + def test_reindex_categorical_added_category(self): + # GH 42424 + ci = CategoricalIndex( + [Interval(0, 1, closed="right"), Interval(1, 2, closed="right")], + ordered=True, + ) + ci_add = CategoricalIndex( + [ + Interval(0, 1, closed="right"), + Interval(1, 2, closed="right"), + Interval(2, 3, closed="right"), + Interval(3, 4, closed="right"), + ], + ordered=True, + ) + result, _ = ci.reindex(ci_add) + expected = ci_add + tm.assert_index_equal(expected, result) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/conftest.py new file mode 100644 index 00000000..458a37c9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/conftest.py @@ -0,0 +1,61 @@ +import numpy as np +import pytest + +from pandas import ( + Series, + array, +) +import pandas._testing as tm + + +@pytest.fixture(params=[None, False]) +def sort(request): + """ + Valid values for the 'sort' parameter used in the Index + setops methods (intersection, union, etc.) + + Caution: + Don't confuse this one with the "sort" fixture used + for DataFrame.append or concat. That one has + parameters [True, False]. + + We can't combine them as sort=True is not permitted + in the Index setops methods. + """ + return request.param + + +@pytest.fixture(params=["D", "3D", "-3D", "H", "2H", "-2H", "T", "2T", "S", "-3S"]) +def freq_sample(request): + """ + Valid values for 'freq' parameter used to create date_range and + timedelta_range.. + """ + return request.param + + +@pytest.fixture(params=[list, tuple, np.array, array, Series]) +def listlike_box(request): + """ + Types that may be passed as the indexer to searchsorted. + """ + return request.param + + +@pytest.fixture( + params=tm.ALL_REAL_NUMPY_DTYPES + + [ + "object", + "category", + "datetime64[ns]", + "timedelta64[ns]", + ] +) +def any_dtype_for_small_pos_integer_indexes(request): + """ + Dtypes that can be given to an Index with small positive integers. + + This means that for any dtype `x` in the params list, `Index([1, 2, 3], dtype=x)` is + valid and gives the correct Index (sub-)class. + """ + return request.param diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_drop_duplicates.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_drop_duplicates.py new file mode 100644 index 00000000..e5da06cb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_drop_duplicates.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +from pandas import ( + PeriodIndex, + Series, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class DropDuplicates: + def test_drop_duplicates_metadata(self, idx): + # GH#10115 + result = idx.drop_duplicates() + tm.assert_index_equal(idx, result) + assert idx.freq == result.freq + + idx_dup = idx.append(idx) + result = idx_dup.drop_duplicates() + + expected = idx + if not isinstance(idx, PeriodIndex): + # freq is reset except for PeriodIndex + assert idx_dup.freq is None + assert result.freq is None + expected = idx._with_freq(None) + else: + assert result.freq == expected.freq + + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "keep, expected, index", + [ + ( + "first", + np.concatenate(([False] * 10, [True] * 5)), + np.arange(0, 10, dtype=np.int64), + ), + ( + "last", + np.concatenate(([True] * 5, [False] * 10)), + np.arange(5, 15, dtype=np.int64), + ), + ( + False, + np.concatenate(([True] * 5, [False] * 5, [True] * 5)), + np.arange(5, 10, dtype=np.int64), + ), + ], + ) + def test_drop_duplicates(self, keep, expected, index, idx): + # to check Index/Series compat + idx = idx.append(idx[:5]) + + tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected) + expected = idx[~expected] + + result = idx.drop_duplicates(keep=keep) + tm.assert_index_equal(result, expected) + + result = Series(idx).drop_duplicates(keep=keep) + expected = Series(expected, index=index) + tm.assert_series_equal(result, expected) + + +class TestDropDuplicatesPeriodIndex(DropDuplicates): + @pytest.fixture(params=["D", "3D", "H", "2H", "T", "2T", "S", "3S"]) + def freq(self, request): + return request.param + + @pytest.fixture + def idx(self, freq): + return period_range("2011-01-01", periods=10, freq=freq, name="idx") + + +class TestDropDuplicatesDatetimeIndex(DropDuplicates): + @pytest.fixture + def idx(self, freq_sample): + return date_range("2011-01-01", freq=freq_sample, periods=10, name="idx") + + +class TestDropDuplicatesTimedeltaIndex(DropDuplicates): + @pytest.fixture + def idx(self, freq_sample): + return timedelta_range("1 day", periods=10, freq=freq_sample, name="idx") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_equals.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_equals.py new file mode 100644 index 00000000..d85d7103 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_equals.py @@ -0,0 +1,180 @@ +""" +Tests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex +""" +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + DatetimeIndex, + Index, + PeriodIndex, + TimedeltaIndex, + date_range, + period_range, +) +import pandas._testing as tm + + +class EqualsTests: + def test_not_equals_numeric(self, index): + assert not index.equals(Index(index.asi8)) + assert not index.equals(Index(index.asi8.astype("u8"))) + assert not index.equals(Index(index.asi8).astype("f8")) + + def test_equals(self, index): + assert index.equals(index) + assert index.equals(index.astype(object)) + assert index.equals(CategoricalIndex(index)) + assert index.equals(CategoricalIndex(index.astype(object))) + + def test_not_equals_non_arraylike(self, index): + assert not index.equals(list(index)) + + def test_not_equals_strings(self, index): + other = Index([str(x) for x in index], dtype=object) + assert not index.equals(other) + assert not index.equals(CategoricalIndex(other)) + + def test_not_equals_misc_strs(self, index): + other = Index(list("abc")) + assert not index.equals(other) + + +class TestPeriodIndexEquals(EqualsTests): + @pytest.fixture + def index(self): + return period_range("2013-01-01", periods=5, freq="D") + + # TODO: de-duplicate with other test_equals2 methods + @pytest.mark.parametrize("freq", ["D", "M"]) + def test_equals2(self, freq): + # GH#13107 + idx = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H") + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # same internal, different tz + idx3 = PeriodIndex._simple_new( + idx._values._simple_new(idx._values.asi8, dtype=pd.PeriodDtype("H")) + ) + tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) + assert not idx.equals(idx3) + assert not idx.equals(idx3.copy()) + assert not idx.equals(idx3.astype(object)) + assert not idx.astype(object).equals(idx3) + assert not idx.equals(list(idx3)) + assert not idx.equals(pd.Series(idx3)) + + +class TestDatetimeIndexEquals(EqualsTests): + @pytest.fixture + def index(self): + return date_range("2013-01-01", periods=5) + + def test_equals2(self): + # GH#13107 + idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"]) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific") + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # same internal, different tz + idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific") + tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) + assert not idx.equals(idx3) + assert not idx.equals(idx3.copy()) + assert not idx.equals(idx3.astype(object)) + assert not idx.astype(object).equals(idx3) + assert not idx.equals(list(idx3)) + assert not idx.equals(pd.Series(idx3)) + + # check that we do not raise when comparing with OutOfBounds objects + oob = Index([datetime(2500, 1, 1)] * 3, dtype=object) + assert not idx.equals(oob) + assert not idx2.equals(oob) + assert not idx3.equals(oob) + + # check that we do not raise when comparing with OutOfBounds dt64 + oob2 = oob.map(np.datetime64) + assert not idx.equals(oob2) + assert not idx2.equals(oob2) + assert not idx3.equals(oob2) + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_not_equals_bday(self, freq): + rng = date_range("2009-01-01", "2010-01-01", freq=freq) + assert not rng.equals(list(rng)) + + +class TestTimedeltaIndexEquals(EqualsTests): + @pytest.fixture + def index(self): + return tm.makeTimedeltaIndex(10) + + def test_equals2(self): + # GH#13107 + idx = TimedeltaIndex(["1 days", "2 days", "NaT"]) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = TimedeltaIndex(["2 days", "1 days", "NaT"]) + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.astype(object).equals(idx2.astype(object)) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # Check that we dont raise OverflowError on comparisons outside the + # implementation range GH#28532 + oob = Index([timedelta(days=10**6)] * 3, dtype=object) + assert not idx.equals(oob) + assert not idx2.equals(oob) + + oob2 = Index([np.timedelta64(x) for x in oob], dtype=object) + assert (oob == oob2).all() + assert not idx.equals(oob2) + assert not idx2.equals(oob2) + + oob3 = oob.map(np.timedelta64) + assert (oob3 == oob).all() + assert not idx.equals(oob3) + assert not idx2.equals(oob3) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_indexing.py new file mode 100644 index 00000000..ee712860 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_indexing.py @@ -0,0 +1,45 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, +) +import pandas._testing as tm + +dtlike_dtypes = [ + np.dtype("timedelta64[ns]"), + np.dtype("datetime64[ns]"), + pd.DatetimeTZDtype("ns", "Asia/Tokyo"), + pd.PeriodDtype("ns"), +] + + +@pytest.mark.parametrize("ldtype", dtlike_dtypes) +@pytest.mark.parametrize("rdtype", dtlike_dtypes) +def test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype): + vals = np.tile(3600 * 10**9 * np.arange(3), 2) + + def construct(dtype): + if dtype is dtlike_dtypes[-1]: + # PeriodArray will try to cast ints to strings + return DatetimeIndex(vals).astype(dtype) + return Index(vals, dtype=dtype) + + left = construct(ldtype) + right = construct(rdtype) + + result = left.get_indexer_non_unique(right) + + if ldtype is rdtype: + ex1 = np.array([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp) + ex2 = np.array([], dtype=np.intp) + tm.assert_numpy_array_equal(result[0], ex1) + tm.assert_numpy_array_equal(result[1], ex2) + + else: + no_matches = np.array([-1] * 6, dtype=np.intp) + missing = np.arange(6, dtype=np.intp) + tm.assert_numpy_array_equal(result[0], no_matches) + tm.assert_numpy_array_equal(result[1], missing) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_is_monotonic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_is_monotonic.py new file mode 100644 index 00000000..088ccc40 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_is_monotonic.py @@ -0,0 +1,46 @@ +from pandas import ( + Index, + NaT, + date_range, +) + + +def test_is_monotonic_with_nat(): + # GH#31437 + # PeriodIndex.is_monotonic_increasing should behave analogously to DatetimeIndex, + # in particular never be monotonic when we have NaT + dti = date_range("2016-01-01", periods=3) + pi = dti.to_period("D") + tdi = Index(dti.view("timedelta64[ns]")) + + for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert obj.is_monotonic_increasing + assert obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique + + dti1 = dti.insert(0, NaT) + pi1 = dti1.to_period("D") + tdi1 = Index(dti1.view("timedelta64[ns]")) + + for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique + + dti2 = dti.insert(3, NaT) + pi2 = dti2.to_period("H") + tdi2 = Index(dti2.view("timedelta64[ns]")) + + for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_nat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_nat.py new file mode 100644 index 00000000..50cf29d0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_nat.py @@ -0,0 +1,53 @@ +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + NaT, + PeriodIndex, + TimedeltaIndex, +) +import pandas._testing as tm + + +class NATests: + def test_nat(self, index_without_na): + empty_index = index_without_na[:0] + + index_with_na = index_without_na.copy(deep=True) + index_with_na._data[1] = NaT + + assert empty_index._na_value is NaT + assert index_with_na._na_value is NaT + assert index_without_na._na_value is NaT + + idx = index_without_na + assert idx._can_hold_na + + tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) + assert idx.hasnans is False + + idx = index_with_na + assert idx._can_hold_na + + tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) + assert idx.hasnans is True + + +class TestDatetimeIndexNA(NATests): + @pytest.fixture + def index_without_na(self, tz_naive_fixture): + tz = tz_naive_fixture + return DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) + + +class TestTimedeltaIndexNA(NATests): + @pytest.fixture + def index_without_na(self): + return TimedeltaIndex(["1 days", "2 days"]) + + +class TestPeriodIndexNA(NATests): + @pytest.fixture + def index_without_na(self): + return PeriodIndex(["2011-01-01", "2011-01-02"], freq="D") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_sort_values.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_sort_values.py new file mode 100644 index 00000000..ab1c15f0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_sort_values.py @@ -0,0 +1,315 @@ +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + Index, + NaT, + PeriodIndex, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +def check_freq_ascending(ordered, orig, ascending): + """ + Check the expected freq on a PeriodIndex/DatetimeIndex/TimedeltaIndex + when the original index is generated (or generate-able) with + period_range/date_range/timedelta_range. + """ + if isinstance(ordered, PeriodIndex): + assert ordered.freq == orig.freq + elif isinstance(ordered, (DatetimeIndex, TimedeltaIndex)): + if ascending: + assert ordered.freq.n == orig.freq.n + else: + assert ordered.freq.n == -1 * orig.freq.n + + +def check_freq_nonmonotonic(ordered, orig): + """ + Check the expected freq on a PeriodIndex/DatetimeIndex/TimedeltaIndex + when the original index is _not_ generated (or generate-able) with + period_range/date_range//timedelta_range. + """ + if isinstance(ordered, PeriodIndex): + assert ordered.freq == orig.freq + elif isinstance(ordered, (DatetimeIndex, TimedeltaIndex)): + assert ordered.freq is None + + +class TestSortValues: + @pytest.fixture(params=[DatetimeIndex, TimedeltaIndex, PeriodIndex]) + def non_monotonic_idx(self, request): + if request.param is DatetimeIndex: + return DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"]) + elif request.param is PeriodIndex: + dti = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"]) + return dti.to_period("D") + else: + return TimedeltaIndex( + ["1 day 00:00:05", "1 day 00:00:01", "1 day 00:00:02"] + ) + + def test_argmin_argmax(self, non_monotonic_idx): + assert non_monotonic_idx.argmin() == 1 + assert non_monotonic_idx.argmax() == 0 + + def test_sort_values(self, non_monotonic_idx): + idx = non_monotonic_idx + ordered = idx.sort_values() + assert ordered.is_monotonic_increasing + ordered = idx.sort_values(ascending=False) + assert ordered[::-1].is_monotonic_increasing + + ordered, dexer = idx.sort_values(return_indexer=True) + assert ordered.is_monotonic_increasing + tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp)) + + ordered, dexer = idx.sort_values(return_indexer=True, ascending=False) + assert ordered[::-1].is_monotonic_increasing + tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp)) + + def check_sort_values_with_freq(self, idx): + ordered = idx.sort_values() + tm.assert_index_equal(ordered, idx) + check_freq_ascending(ordered, idx, True) + + ordered = idx.sort_values(ascending=False) + expected = idx[::-1] + tm.assert_index_equal(ordered, expected) + check_freq_ascending(ordered, idx, False) + + ordered, indexer = idx.sort_values(return_indexer=True) + tm.assert_index_equal(ordered, idx) + tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2], dtype=np.intp)) + check_freq_ascending(ordered, idx, True) + + ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) + expected = idx[::-1] + tm.assert_index_equal(ordered, expected) + tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0], dtype=np.intp)) + check_freq_ascending(ordered, idx, False) + + @pytest.mark.parametrize("freq", ["D", "H"]) + def test_sort_values_with_freq_timedeltaindex(self, freq): + # GH#10295 + idx = timedelta_range(start=f"1{freq}", periods=3, freq=freq).rename("idx") + + self.check_sort_values_with_freq(idx) + + @pytest.mark.parametrize( + "idx", + [ + DatetimeIndex( + ["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx" + ), + DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], + freq="H", + name="tzidx", + tz="Asia/Tokyo", + ), + ], + ) + def test_sort_values_with_freq_datetimeindex(self, idx): + self.check_sort_values_with_freq(idx) + + @pytest.mark.parametrize("freq", ["D", "2D", "4D"]) + def test_sort_values_with_freq_periodindex(self, freq): + # here with_freq refers to being period_range-like + idx = PeriodIndex( + ["2011-01-01", "2011-01-02", "2011-01-03"], freq=freq, name="idx" + ) + self.check_sort_values_with_freq(idx) + + @pytest.mark.parametrize( + "idx", + [ + PeriodIndex(["2011", "2012", "2013"], name="pidx", freq="A"), + Index([2011, 2012, 2013], name="idx"), # for compatibility check + ], + ) + def test_sort_values_with_freq_periodindex2(self, idx): + # here with_freq indicates this is period_range-like + self.check_sort_values_with_freq(idx) + + def check_sort_values_without_freq(self, idx, expected): + ordered = idx.sort_values(na_position="first") + tm.assert_index_equal(ordered, expected) + check_freq_nonmonotonic(ordered, idx) + + if not idx.isna().any(): + ordered = idx.sort_values() + tm.assert_index_equal(ordered, expected) + check_freq_nonmonotonic(ordered, idx) + + ordered = idx.sort_values(ascending=False) + tm.assert_index_equal(ordered, expected[::-1]) + check_freq_nonmonotonic(ordered, idx) + + ordered, indexer = idx.sort_values(return_indexer=True, na_position="first") + tm.assert_index_equal(ordered, expected) + + exp = np.array([0, 4, 3, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, exp) + check_freq_nonmonotonic(ordered, idx) + + if not idx.isna().any(): + ordered, indexer = idx.sort_values(return_indexer=True) + tm.assert_index_equal(ordered, expected) + + exp = np.array([0, 4, 3, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, exp) + check_freq_nonmonotonic(ordered, idx) + + ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) + tm.assert_index_equal(ordered, expected[::-1]) + + exp = np.array([2, 1, 3, 0, 4], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, exp) + check_freq_nonmonotonic(ordered, idx) + + def test_sort_values_without_freq_timedeltaindex(self): + # GH#10295 + + idx = TimedeltaIndex( + ["1 hour", "3 hour", "5 hour", "2 hour ", "1 hour"], name="idx1" + ) + expected = TimedeltaIndex( + ["1 hour", "1 hour", "2 hour", "3 hour", "5 hour"], name="idx1" + ) + self.check_sort_values_without_freq(idx, expected) + + @pytest.mark.parametrize( + "index_dates,expected_dates", + [ + ( + ["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"], + ["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"], + ), + ( + ["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"], + ["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"], + ), + ( + [NaT, "2011-01-03", "2011-01-05", "2011-01-02", NaT], + [NaT, NaT, "2011-01-02", "2011-01-03", "2011-01-05"], + ), + ], + ) + def test_sort_values_without_freq_datetimeindex( + self, index_dates, expected_dates, tz_naive_fixture + ): + tz = tz_naive_fixture + + # without freq + idx = DatetimeIndex(index_dates, tz=tz, name="idx") + expected = DatetimeIndex(expected_dates, tz=tz, name="idx") + + self.check_sort_values_without_freq(idx, expected) + + @pytest.mark.parametrize( + "idx,expected", + [ + ( + PeriodIndex( + [ + "2011-01-01", + "2011-01-03", + "2011-01-05", + "2011-01-02", + "2011-01-01", + ], + freq="D", + name="idx1", + ), + PeriodIndex( + [ + "2011-01-01", + "2011-01-01", + "2011-01-02", + "2011-01-03", + "2011-01-05", + ], + freq="D", + name="idx1", + ), + ), + ( + PeriodIndex( + [ + "2011-01-01", + "2011-01-03", + "2011-01-05", + "2011-01-02", + "2011-01-01", + ], + freq="D", + name="idx2", + ), + PeriodIndex( + [ + "2011-01-01", + "2011-01-01", + "2011-01-02", + "2011-01-03", + "2011-01-05", + ], + freq="D", + name="idx2", + ), + ), + ( + PeriodIndex( + [NaT, "2011-01-03", "2011-01-05", "2011-01-02", NaT], + freq="D", + name="idx3", + ), + PeriodIndex( + [NaT, NaT, "2011-01-02", "2011-01-03", "2011-01-05"], + freq="D", + name="idx3", + ), + ), + ( + PeriodIndex( + ["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A" + ), + PeriodIndex( + ["2011", "2011", "2012", "2013", "2015"], name="pidx", freq="A" + ), + ), + ( + # For compatibility check + Index([2011, 2013, 2015, 2012, 2011], name="idx"), + Index([2011, 2011, 2012, 2013, 2015], name="idx"), + ), + ], + ) + def test_sort_values_without_freq_periodindex(self, idx, expected): + # here without_freq means not generateable by period_range + self.check_sort_values_without_freq(idx, expected) + + def test_sort_values_without_freq_periodindex_nat(self): + # doesn't quite fit into check_sort_values_without_freq + idx = PeriodIndex(["2011", "2013", "NaT", "2011"], name="pidx", freq="D") + expected = PeriodIndex(["NaT", "2011", "2011", "2013"], name="pidx", freq="D") + + ordered = idx.sort_values(na_position="first") + tm.assert_index_equal(ordered, expected) + check_freq_nonmonotonic(ordered, idx) + + ordered = idx.sort_values(ascending=False) + tm.assert_index_equal(ordered, expected[::-1]) + check_freq_nonmonotonic(ordered, idx) + + +def test_order_stability_compat(): + # GH#35922. sort_values is stable both for normal and datetime-like Index + pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A") + iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx") + ordered1, indexer1 = pidx.sort_values(return_indexer=True, ascending=False) + ordered2, indexer2 = iidx.sort_values(return_indexer=True, ascending=False) + tm.assert_numpy_array_equal(indexer1, indexer2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_value_counts.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_value_counts.py new file mode 100644 index 00000000..a0f05a1a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimelike_/test_value_counts.py @@ -0,0 +1,103 @@ +import numpy as np + +from pandas import ( + DatetimeIndex, + NaT, + PeriodIndex, + Series, + TimedeltaIndex, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestValueCounts: + # GH#7735 + + def test_value_counts_unique_datetimeindex(self, tz_naive_fixture): + tz = tz_naive_fixture + orig = date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz) + self._check_value_counts_with_repeats(orig) + + def test_value_counts_unique_timedeltaindex(self): + orig = timedelta_range("1 days 09:00:00", freq="H", periods=10) + self._check_value_counts_with_repeats(orig) + + def test_value_counts_unique_periodindex(self): + orig = period_range("2011-01-01 09:00", freq="H", periods=10) + self._check_value_counts_with_repeats(orig) + + def _check_value_counts_with_repeats(self, orig): + # create repeated values, 'n'th element is repeated by n+1 times + idx = type(orig)( + np.repeat(orig._values, range(1, len(orig) + 1)), dtype=orig.dtype + ) + + exp_idx = orig[::-1] + if not isinstance(exp_idx, PeriodIndex): + exp_idx = exp_idx._with_freq(None) + expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64", name="count") + + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(), expected) + + tm.assert_index_equal(idx.unique(), orig) + + def test_value_counts_unique_datetimeindex2(self, tz_naive_fixture): + tz = tz_naive_fixture + idx = DatetimeIndex( + [ + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 08:00", + "2013-01-01 08:00", + NaT, + ], + tz=tz, + ) + self._check_value_counts_dropna(idx) + + def test_value_counts_unique_timedeltaindex2(self): + idx = TimedeltaIndex( + [ + "1 days 09:00:00", + "1 days 09:00:00", + "1 days 09:00:00", + "1 days 08:00:00", + "1 days 08:00:00", + NaT, + ] + ) + self._check_value_counts_dropna(idx) + + def test_value_counts_unique_periodindex2(self): + idx = PeriodIndex( + [ + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 09:00", + "2013-01-01 08:00", + "2013-01-01 08:00", + NaT, + ], + freq="H", + ) + self._check_value_counts_dropna(idx) + + def _check_value_counts_dropna(self, idx): + exp_idx = idx[[2, 3]] + expected = Series([3, 2], index=exp_idx, name="count") + + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(), expected) + + exp_idx = idx[[2, 3, -1]] + expected = Series([3, 2, 1], index=exp_idx, name="count") + + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(dropna=False), expected) + + tm.assert_index_equal(idx.unique(), exp_idx) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.py new file mode 100644 index 00000000..d339639d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.py @@ -0,0 +1,310 @@ +from datetime import datetime + +import dateutil +import numpy as np +import pytest +import pytz + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + NaT, + PeriodIndex, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeIndex: + def test_astype(self): + # GH 13149, GH 13209 + idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan], name="idx") + + result = idx.astype(object) + expected = Index( + [Timestamp("2016-05-16")] + [NaT] * 3, dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(np.int64) + expected = Index( + [1463356800000000000] + [-9223372036854775808] * 3, + dtype=np.int64, + name="idx", + ) + tm.assert_index_equal(result, expected) + + rng = date_range("1/1/2000", periods=10, name="idx") + result = rng.astype("i8") + tm.assert_index_equal(result, Index(rng.asi8, name="idx")) + tm.assert_numpy_array_equal(result.values, rng.asi8) + + def test_astype_uint(self): + arr = date_range("2000", periods=2, name="idx") + + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint64") + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint32") + + def test_astype_with_tz(self): + # with tz + rng = date_range("1/1/2000", periods=10, tz="US/Eastern") + msg = "Cannot use .astype to convert from timezone-aware" + with pytest.raises(TypeError, match=msg): + # deprecated + rng.astype("datetime64[ns]") + with pytest.raises(TypeError, match=msg): + # check DatetimeArray while we're here deprecated + rng._data.astype("datetime64[ns]") + + def test_astype_tzaware_to_tzaware(self): + # GH 18951: tz-aware to tz-aware + idx = date_range("20170101", periods=4, tz="US/Pacific") + result = idx.astype("datetime64[ns, US/Eastern]") + expected = date_range("20170101 03:00:00", periods=4, tz="US/Eastern") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_astype_tznaive_to_tzaware(self): + # GH 18951: tz-naive to tz-aware + idx = date_range("20170101", periods=4) + idx = idx._with_freq(None) # tz_localize does not preserve freq + msg = "Cannot use .astype to convert from timezone-naive" + with pytest.raises(TypeError, match=msg): + # dt64->dt64tz deprecated + idx.astype("datetime64[ns, US/Eastern]") + with pytest.raises(TypeError, match=msg): + # dt64->dt64tz deprecated + idx._data.astype("datetime64[ns, US/Eastern]") + + def test_astype_str_nat(self): + # GH 13149, GH 13209 + # verify that we are returning NaT as a string (and not unicode) + + idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan]) + result = idx.astype(str) + expected = Index(["2016-05-16", "NaT", "NaT", "NaT"], dtype=object) + tm.assert_index_equal(result, expected) + + def test_astype_str(self): + # test astype string - #10442 + dti = date_range("2012-01-01", periods=4, name="test_name") + result = dti.astype(str) + expected = Index( + ["2012-01-01", "2012-01-02", "2012-01-03", "2012-01-04"], + name="test_name", + dtype=object, + ) + tm.assert_index_equal(result, expected) + + def test_astype_str_tz_and_name(self): + # test astype string with tz and name + dti = date_range("2012-01-01", periods=3, name="test_name", tz="US/Eastern") + result = dti.astype(str) + expected = Index( + [ + "2012-01-01 00:00:00-05:00", + "2012-01-02 00:00:00-05:00", + "2012-01-03 00:00:00-05:00", + ], + name="test_name", + dtype=object, + ) + tm.assert_index_equal(result, expected) + + def test_astype_str_freq_and_name(self): + # test astype string with freqH and name + dti = date_range("1/1/2011", periods=3, freq="H", name="test_name") + result = dti.astype(str) + expected = Index( + ["2011-01-01 00:00:00", "2011-01-01 01:00:00", "2011-01-01 02:00:00"], + name="test_name", + dtype=object, + ) + tm.assert_index_equal(result, expected) + + def test_astype_str_freq_and_tz(self): + # test astype string with freqH and timezone + dti = date_range( + "3/6/2012 00:00", periods=2, freq="H", tz="Europe/London", name="test_name" + ) + result = dti.astype(str) + expected = Index( + ["2012-03-06 00:00:00+00:00", "2012-03-06 01:00:00+00:00"], + dtype=object, + name="test_name", + ) + tm.assert_index_equal(result, expected) + + def test_astype_datetime64(self): + # GH 13149, GH 13209 + idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan], name="idx") + + result = idx.astype("datetime64[ns]") + tm.assert_index_equal(result, idx) + assert result is not idx + + result = idx.astype("datetime64[ns]", copy=False) + tm.assert_index_equal(result, idx) + assert result is idx + + idx_tz = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan], tz="EST", name="idx") + msg = "Cannot use .astype to convert from timezone-aware" + with pytest.raises(TypeError, match=msg): + # dt64tz->dt64 deprecated + result = idx_tz.astype("datetime64[ns]") + + def test_astype_object(self): + rng = date_range("1/1/2000", periods=20) + + casted = rng.astype("O") + exp_values = list(rng) + + tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_)) + assert casted.tolist() == exp_values + + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo"]) + def test_astype_object_tz(self, tz): + idx = date_range(start="2013-01-01", periods=4, freq="M", name="idx", tz=tz) + expected_list = [ + Timestamp("2013-01-31", tz=tz), + Timestamp("2013-02-28", tz=tz), + Timestamp("2013-03-31", tz=tz), + Timestamp("2013-04-30", tz=tz), + ] + expected = Index(expected_list, dtype=object, name="idx") + result = idx.astype(object) + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype_object_with_nat(self): + idx = DatetimeIndex( + [datetime(2013, 1, 1), datetime(2013, 1, 2), NaT, datetime(2013, 1, 4)], + name="idx", + ) + expected_list = [ + Timestamp("2013-01-01"), + Timestamp("2013-01-02"), + NaT, + Timestamp("2013-01-04"), + ] + expected = Index(expected_list, dtype=object, name="idx") + result = idx.astype(object) + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + @pytest.mark.parametrize( + "dtype", + [float, "timedelta64", "timedelta64[ns]", "datetime64", "datetime64[D]"], + ) + def test_astype_raises(self, dtype): + # GH 13149, GH 13209 + idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan]) + msg = "Cannot cast DatetimeIndex to dtype" + if dtype == "datetime64": + msg = "Casting to unit-less dtype 'datetime64' is not supported" + with pytest.raises(TypeError, match=msg): + idx.astype(dtype) + + def test_index_convert_to_datetime_array(self): + def _check_rng(rng): + converted = rng.to_pydatetime() + assert isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + assert isinstance(x, datetime) + assert x == stamp.to_pydatetime() + assert x.tzinfo == stamp.tzinfo + + rng = date_range("20090415", "20090519") + rng_eastern = date_range("20090415", "20090519", tz="US/Eastern") + rng_utc = date_range("20090415", "20090519", tz="utc") + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + + def test_index_convert_to_datetime_array_explicit_pytz(self): + def _check_rng(rng): + converted = rng.to_pydatetime() + assert isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + assert isinstance(x, datetime) + assert x == stamp.to_pydatetime() + assert x.tzinfo == stamp.tzinfo + + rng = date_range("20090415", "20090519") + rng_eastern = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern")) + rng_utc = date_range("20090415", "20090519", tz=pytz.utc) + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + + def test_index_convert_to_datetime_array_dateutil(self): + def _check_rng(rng): + converted = rng.to_pydatetime() + assert isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + assert isinstance(x, datetime) + assert x == stamp.to_pydatetime() + assert x.tzinfo == stamp.tzinfo + + rng = date_range("20090415", "20090519") + rng_eastern = date_range("20090415", "20090519", tz="dateutil/US/Eastern") + rng_utc = date_range("20090415", "20090519", tz=dateutil.tz.tzutc()) + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + + @pytest.mark.parametrize( + "tz, dtype", + [["US/Pacific", "datetime64[ns, US/Pacific]"], [None, "datetime64[ns]"]], + ) + def test_integer_index_astype_datetime(self, tz, dtype): + # GH 20997, 20964, 24559 + val = [Timestamp("2018-01-01", tz=tz).as_unit("ns")._value] + result = Index(val, name="idx").astype(dtype) + expected = DatetimeIndex(["2018-01-01"], tz=tz, name="idx") + tm.assert_index_equal(result, expected) + + def test_dti_astype_period(self): + idx = DatetimeIndex([NaT, "2011-01-01", "2011-02-01"], name="idx") + + res = idx.astype("period[M]") + exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx") + tm.assert_index_equal(res, exp) + + res = idx.astype("period[3M]") + exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="3M", name="idx") + tm.assert_index_equal(res, exp) + + +class TestAstype: + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_astype_category(self, tz): + obj = date_range("2000", periods=2, tz=tz, name="idx") + result = obj.astype("category") + expected = pd.CategoricalIndex( + [Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)], + name="idx", + ) + tm.assert_index_equal(result, expected) + + result = obj._data.astype("category") + expected = expected.values + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_astype_array_fallback(self, tz): + obj = date_range("2000", periods=2, tz=tz, name="idx") + result = obj.astype(bool) + expected = Index(np.array([True, True]), name="idx") + tm.assert_index_equal(result, expected) + + result = obj._data.astype(bool) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_factorize.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_factorize.py new file mode 100644 index 00000000..3ad927f1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_factorize.py @@ -0,0 +1,125 @@ +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + Index, + date_range, + factorize, +) +import pandas._testing as tm + + +class TestDatetimeIndexFactorize: + def test_factorize(self): + idx1 = DatetimeIndex( + ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"] + ) + + exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp) + exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"]) + + arr, idx = idx1.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + assert idx.freq == exp_idx.freq + + arr, idx = idx1.factorize(sort=True) + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + assert idx.freq == exp_idx.freq + + # tz must be preserved + idx1 = idx1.tz_localize("Asia/Tokyo") + exp_idx = exp_idx.tz_localize("Asia/Tokyo") + + arr, idx = idx1.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + assert idx.freq == exp_idx.freq + + idx2 = DatetimeIndex( + ["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"] + ) + + exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp) + exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"]) + arr, idx = idx2.factorize(sort=True) + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + assert idx.freq == exp_idx.freq + + exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp) + exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"]) + arr, idx = idx2.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + assert idx.freq == exp_idx.freq + + def test_factorize_preserves_freq(self): + # GH#38120 freq should be preserved + idx3 = date_range("2000-01", periods=4, freq="M", tz="Asia/Tokyo") + exp_arr = np.array([0, 1, 2, 3], dtype=np.intp) + + arr, idx = idx3.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, idx3) + assert idx.freq == idx3.freq + + arr, idx = factorize(idx3) + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, idx3) + assert idx.freq == idx3.freq + + def test_factorize_tz(self, tz_naive_fixture, index_or_series): + tz = tz_naive_fixture + # GH#13750 + base = date_range("2016-11-05", freq="H", periods=100, tz=tz) + idx = base.repeat(5) + + exp_arr = np.arange(100, dtype=np.intp).repeat(5) + + obj = index_or_series(idx) + + arr, res = obj.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + expected = base._with_freq(None) + tm.assert_index_equal(res, expected) + assert res.freq == expected.freq + + def test_factorize_dst(self, index_or_series): + # GH#13750 + idx = date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern") + obj = index_or_series(idx) + + arr, res = obj.factorize() + tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp)) + tm.assert_index_equal(res, idx) + if index_or_series is Index: + assert res.freq == idx.freq + + idx = date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern") + obj = index_or_series(idx) + + arr, res = obj.factorize() + tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp)) + tm.assert_index_equal(res, idx) + if index_or_series is Index: + assert res.freq == idx.freq + + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize_no_freq_non_nano(self, tz_naive_fixture, sort): + # GH#51978 case that does not go through the fastpath based on + # non-None freq + tz = tz_naive_fixture + idx = date_range("2016-11-06", freq="H", periods=5, tz=tz)[[0, 4, 1, 3, 2]] + exp_codes, exp_uniques = idx.factorize(sort=sort) + + res_codes, res_uniques = idx.as_unit("s").factorize(sort=sort) + + tm.assert_numpy_array_equal(res_codes, exp_codes) + tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s")) + + res_codes, res_uniques = idx.as_unit("s").to_series().factorize(sort=sort) + tm.assert_numpy_array_equal(res_codes, exp_codes) + tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_fillna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_fillna.py new file mode 100644 index 00000000..5fbe60bb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_fillna.py @@ -0,0 +1,62 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestDatetimeIndexFillNA: + @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"]) + def test_fillna_datetime64(self, tz): + # GH 11343 + idx = pd.DatetimeIndex(["2011-01-01 09:00", pd.NaT, "2011-01-01 11:00"]) + + exp = pd.DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"] + ) + tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00")), exp) + + # tz mismatch + exp = pd.Index( + [ + pd.Timestamp("2011-01-01 09:00"), + pd.Timestamp("2011-01-01 10:00", tz=tz), + pd.Timestamp("2011-01-01 11:00"), + ], + dtype=object, + ) + tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00", tz=tz)), exp) + + # object + exp = pd.Index( + [pd.Timestamp("2011-01-01 09:00"), "x", pd.Timestamp("2011-01-01 11:00")], + dtype=object, + ) + tm.assert_index_equal(idx.fillna("x"), exp) + + idx = pd.DatetimeIndex(["2011-01-01 09:00", pd.NaT, "2011-01-01 11:00"], tz=tz) + + exp = pd.DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], tz=tz + ) + tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00", tz=tz)), exp) + + exp = pd.Index( + [ + pd.Timestamp("2011-01-01 09:00", tz=tz), + pd.Timestamp("2011-01-01 10:00"), + pd.Timestamp("2011-01-01 11:00", tz=tz), + ], + dtype=object, + ) + tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00")), exp) + + # object + exp = pd.Index( + [ + pd.Timestamp("2011-01-01 09:00", tz=tz), + "x", + pd.Timestamp("2011-01-01 11:00", tz=tz), + ], + dtype=object, + ) + tm.assert_index_equal(idx.fillna("x"), exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py new file mode 100644 index 00000000..cedf8cd5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py @@ -0,0 +1,256 @@ +from datetime import datetime + +import numpy as np +import pytest +import pytz + +from pandas import ( + NA, + DatetimeIndex, + Index, + NaT, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestInsert: + @pytest.mark.parametrize("null", [None, np.nan, np.datetime64("NaT"), NaT, NA]) + @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) + def test_insert_nat(self, tz, null): + # GH#16537, GH#18295 (test missing) + + idx = DatetimeIndex(["2017-01-01"], tz=tz) + expected = DatetimeIndex(["NaT", "2017-01-01"], tz=tz) + if tz is not None and isinstance(null, np.datetime64): + expected = Index([null, idx[0]], dtype=object) + + res = idx.insert(0, null) + tm.assert_index_equal(res, expected) + + @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) + def test_insert_invalid_na(self, tz): + idx = DatetimeIndex(["2017-01-01"], tz=tz) + + item = np.timedelta64("NaT") + result = idx.insert(0, item) + expected = Index([item] + list(idx), dtype=object) + tm.assert_index_equal(result, expected) + + def test_insert_empty_preserves_freq(self, tz_naive_fixture): + # GH#33573 + tz = tz_naive_fixture + dti = DatetimeIndex([], tz=tz, freq="D") + item = Timestamp("2017-04-05").tz_localize(tz) + + result = dti.insert(0, item) + assert result.freq == dti.freq + + # But not when we insert an item that doesn't conform to freq + dti = DatetimeIndex([], tz=tz, freq="W-THU") + result = dti.insert(0, item) + assert result.freq is None + + def test_insert(self): + idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"], name="idx") + + result = idx.insert(2, datetime(2000, 1, 5)) + exp = DatetimeIndex( + ["2000-01-04", "2000-01-01", "2000-01-05", "2000-01-02"], name="idx" + ) + tm.assert_index_equal(result, exp) + + # insertion of non-datetime should coerce to object index + result = idx.insert(1, "inserted") + expected = Index( + [ + datetime(2000, 1, 4), + "inserted", + datetime(2000, 1, 1), + datetime(2000, 1, 2), + ], + name="idx", + ) + assert not isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + idx = date_range("1/1/2000", periods=3, freq="M", name="idx") + + # preserve freq + expected_0 = DatetimeIndex( + ["1999-12-31", "2000-01-31", "2000-02-29", "2000-03-31"], + name="idx", + freq="M", + ) + expected_3 = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30"], + name="idx", + freq="M", + ) + + # reset freq to None + expected_1_nofreq = DatetimeIndex( + ["2000-01-31", "2000-01-31", "2000-02-29", "2000-03-31"], + name="idx", + freq=None, + ) + expected_3_nofreq = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], + name="idx", + freq=None, + ) + + cases = [ + (0, datetime(1999, 12, 31), expected_0), + (-3, datetime(1999, 12, 31), expected_0), + (3, datetime(2000, 4, 30), expected_3), + (1, datetime(2000, 1, 31), expected_1_nofreq), + (3, datetime(2000, 1, 2), expected_3_nofreq), + ] + + for n, d, expected in cases: + result = idx.insert(n, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + # reset freq to None + result = idx.insert(3, datetime(2000, 1, 2)) + expected = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], + name="idx", + freq=None, + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq is None + + for tz in ["US/Pacific", "Asia/Singapore"]: + idx = date_range("1/1/2000 09:00", periods=6, freq="H", tz=tz, name="idx") + # preserve freq + expected = date_range( + "1/1/2000 09:00", periods=7, freq="H", tz=tz, name="idx" + ) + for d in [ + Timestamp("2000-01-01 15:00", tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 15)), + ]: + result = idx.insert(6, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + expected = DatetimeIndex( + [ + "2000-01-01 09:00", + "2000-01-01 10:00", + "2000-01-01 11:00", + "2000-01-01 12:00", + "2000-01-01 13:00", + "2000-01-01 14:00", + "2000-01-01 10:00", + ], + name="idx", + tz=tz, + freq=None, + ) + # reset freq to None + for d in [ + Timestamp("2000-01-01 10:00", tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 10)), + ]: + result = idx.insert(6, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.tz == expected.tz + assert result.freq is None + + # TODO: also changes DataFrame.__setitem__ with expansion + def test_insert_mismatched_tzawareness(self): + # see GH#7299 + idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") + + # mismatched tz-awareness + item = Timestamp("2000-01-04") + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + # mismatched tz-awareness + item = datetime(2000, 1, 4) + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + # TODO: also changes DataFrame.__setitem__ with expansion + def test_insert_mismatched_tz(self): + # see GH#7299 + # pre-2.0 with mismatched tzs we would cast to object + idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") + + # mismatched tz -> cast to object (could reasonably cast to same tz or UTC) + item = Timestamp("2000-01-04", tz="US/Eastern") + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]), + name="idx", + ) + assert expected.dtype == idx.dtype + tm.assert_index_equal(result, expected) + + item = datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern")) + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]), + name="idx", + ) + assert expected.dtype == idx.dtype + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "item", [0, np.int64(0), np.float64(0), np.array(0), np.timedelta64(456)] + ) + def test_insert_mismatched_types_raises(self, tz_aware_fixture, item): + # GH#33703 dont cast these to dt64 + tz = tz_aware_fixture + dti = date_range("2019-11-04", periods=9, freq="-1D", name=9, tz=tz) + + result = dti.insert(1, item) + + if isinstance(item, np.ndarray): + assert item.item() == 0 + expected = Index([dti[0], 0] + list(dti[1:]), dtype=object, name=9) + else: + expected = Index([dti[0], item] + list(dti[1:]), dtype=object, name=9) + + tm.assert_index_equal(result, expected) + + def test_insert_castable_str(self, tz_aware_fixture): + # GH#33703 + tz = tz_aware_fixture + dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz) + + value = "2019-11-05" + result = dti.insert(0, value) + + ts = Timestamp(value).tz_localize(tz) + expected = DatetimeIndex([ts] + list(dti), dtype=dti.dtype, name=9) + tm.assert_index_equal(result, expected) + + def test_insert_non_castable_str(self, tz_aware_fixture): + # GH#33703 + tz = tz_aware_fixture + dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz) + + value = "foo" + result = dti.insert(0, value) + + expected = Index(["foo"] + list(dti), dtype=object, name=9) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_isocalendar.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_isocalendar.py new file mode 100644 index 00000000..128a8b3e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_isocalendar.py @@ -0,0 +1,20 @@ +from pandas import ( + DataFrame, + DatetimeIndex, +) +import pandas._testing as tm + + +def test_isocalendar_returns_correct_values_close_to_new_year_with_tz(): + # GH#6538: Check that DatetimeIndex and its TimeStamp elements + # return the same weekofyear accessor close to new year w/ tz + dates = ["2013/12/29", "2013/12/30", "2013/12/31"] + dates = DatetimeIndex(dates, tz="Europe/Brussels") + result = dates.isocalendar() + expected_data_frame = DataFrame( + [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]], + columns=["year", "week", "day"], + index=dates, + dtype="UInt32", + ) + tm.assert_frame_equal(result, expected_data_frame) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_repeat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_repeat.py new file mode 100644 index 00000000..c18109a2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_repeat.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestRepeat: + def test_repeat_range(self, tz_naive_fixture): + tz = tz_naive_fixture + rng = date_range("1/1/2000", "1/1/2001") + + result = rng.repeat(5) + assert result.freq is None + assert len(result) == 5 * len(rng) + + index = date_range("2001-01-01", periods=2, freq="D", tz=tz) + exp = DatetimeIndex( + ["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz + ) + for res in [index.repeat(2), np.repeat(index, 2)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + index = date_range("2001-01-01", periods=2, freq="2D", tz=tz) + exp = DatetimeIndex( + ["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz + ) + for res in [index.repeat(2), np.repeat(index, 2)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + index = DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz) + exp = DatetimeIndex( + [ + "2001-01-01", + "2001-01-01", + "2001-01-01", + "NaT", + "NaT", + "NaT", + "2003-01-01", + "2003-01-01", + "2003-01-01", + ], + tz=tz, + ) + for res in [index.repeat(3), np.repeat(index, 3)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + def test_repeat(self, tz_naive_fixture): + tz = tz_naive_fixture + reps = 2 + msg = "the 'axis' parameter is not supported" + + rng = date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz) + + expected_rng = DatetimeIndex( + [ + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 00:30:00", tz=tz), + Timestamp("2016-01-01 00:30:00", tz=tz), + ] + ) + + res = rng.repeat(reps) + tm.assert_index_equal(res, expected_rng) + assert res.freq is None + + tm.assert_index_equal(np.repeat(rng, reps), expected_rng) + with pytest.raises(ValueError, match=msg): + np.repeat(rng, reps, axis=1) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_shift.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_shift.py new file mode 100644 index 00000000..65bdfc90 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_shift.py @@ -0,0 +1,162 @@ +from datetime import datetime + +import pytest +import pytz + +from pandas.errors import NullFrequencyError + +import pandas as pd +from pandas import ( + DatetimeIndex, + Series, + date_range, +) +import pandas._testing as tm + +START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) + + +class TestDatetimeIndexShift: + # ------------------------------------------------------------- + # DatetimeIndex.shift is used in integer addition + + def test_dti_shift_tzaware(self, tz_naive_fixture): + # GH#9903 + tz = tz_naive_fixture + idx = DatetimeIndex([], name="xxx", tz=tz) + tm.assert_index_equal(idx.shift(0, freq="H"), idx) + tm.assert_index_equal(idx.shift(3, freq="H"), idx) + + idx = DatetimeIndex( + ["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"], + name="xxx", + tz=tz, + freq="H", + ) + tm.assert_index_equal(idx.shift(0, freq="H"), idx) + exp = DatetimeIndex( + ["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"], + name="xxx", + tz=tz, + freq="H", + ) + tm.assert_index_equal(idx.shift(3, freq="H"), exp) + exp = DatetimeIndex( + ["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"], + name="xxx", + tz=tz, + freq="H", + ) + tm.assert_index_equal(idx.shift(-3, freq="H"), exp) + + def test_dti_shift_freqs(self): + # test shift for DatetimeIndex and non DatetimeIndex + # GH#8083 + drange = date_range("20130101", periods=5) + result = drange.shift(1) + expected = DatetimeIndex( + ["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"], + freq="D", + ) + tm.assert_index_equal(result, expected) + + result = drange.shift(-1) + expected = DatetimeIndex( + ["2012-12-31", "2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04"], + freq="D", + ) + tm.assert_index_equal(result, expected) + + result = drange.shift(3, freq="2D") + expected = DatetimeIndex( + ["2013-01-07", "2013-01-08", "2013-01-09", "2013-01-10", "2013-01-11"], + freq="D", + ) + tm.assert_index_equal(result, expected) + + def test_dti_shift_int(self): + rng = date_range("1/1/2000", periods=20) + + result = rng + 5 * rng.freq + expected = rng.shift(5) + tm.assert_index_equal(result, expected) + + result = rng - 5 * rng.freq + expected = rng.shift(-5) + tm.assert_index_equal(result, expected) + + def test_dti_shift_no_freq(self): + # GH#19147 + dti = DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None) + with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"): + dti.shift(2) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_shift_localized(self, tzstr): + dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI") + dr_tz = dr.tz_localize(tzstr) + + result = dr_tz.shift(1, "10T") + assert result.tz == dr_tz.tz + + def test_dti_shift_across_dst(self): + # GH 8616 + idx = date_range("2013-11-03", tz="America/Chicago", periods=7, freq="H") + s = Series(index=idx[:-1], dtype=object) + result = s.shift(freq="H") + expected = Series(index=idx[1:], dtype=object) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "shift, result_time", + [ + [0, "2014-11-14 00:00:00"], + [-1, "2014-11-13 23:00:00"], + [1, "2014-11-14 01:00:00"], + ], + ) + def test_dti_shift_near_midnight(self, shift, result_time): + # GH 8616 + dt = datetime(2014, 11, 14, 0) + dt_est = pytz.timezone("EST").localize(dt) + s = Series(data=[1], index=[dt_est]) + result = s.shift(shift, freq="H") + expected = Series(1, index=DatetimeIndex([result_time], tz="EST")) + tm.assert_series_equal(result, expected) + + def test_shift_periods(self): + # GH#22458 : argument 'n' was deprecated in favor of 'periods' + idx = date_range(start=START, end=END, periods=3) + tm.assert_index_equal(idx.shift(periods=0), idx) + tm.assert_index_equal(idx.shift(0), idx) + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_shift_bday(self, freq): + rng = date_range(START, END, freq=freq) + shifted = rng.shift(5) + assert shifted[0] == rng[5] + assert shifted.freq == rng.freq + + shifted = rng.shift(-5) + assert shifted[5] == rng[0] + assert shifted.freq == rng.freq + + shifted = rng.shift(0) + assert shifted[0] == rng[0] + assert shifted.freq == rng.freq + + def test_shift_bmonth(self): + rng = date_range(START, END, freq=pd.offsets.BMonthEnd()) + shifted = rng.shift(1, freq=pd.offsets.BDay()) + assert shifted[0] == rng[0] + pd.offsets.BDay() + + rng = date_range(START, END, freq=pd.offsets.BMonthEnd()) + with tm.assert_produces_warning(pd.errors.PerformanceWarning): + shifted = rng.shift(1, freq=pd.offsets.CDay()) + assert shifted[0] == rng[0] + pd.offsets.CDay() + + def test_shift_empty(self): + # GH#14811 + dti = date_range(start="2016-10-21", end="2016-10-21", freq="BM") + result = dti.shift(1) + tm.assert_index_equal(result, dti) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_snap.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_snap.py new file mode 100644 index 00000000..7064e9e7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_snap.py @@ -0,0 +1,47 @@ +import pytest + +from pandas import ( + DatetimeIndex, + date_range, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("tz", [None, "Asia/Shanghai", "Europe/Berlin"]) +@pytest.mark.parametrize("name", [None, "my_dti"]) +@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) +def test_dti_snap(name, tz, unit): + dti = DatetimeIndex( + [ + "1/1/2002", + "1/2/2002", + "1/3/2002", + "1/4/2002", + "1/5/2002", + "1/6/2002", + "1/7/2002", + ], + name=name, + tz=tz, + freq="D", + ) + dti = dti.as_unit(unit) + + result = dti.snap(freq="W-MON") + expected = date_range("12/31/2001", "1/7/2002", name=name, tz=tz, freq="w-mon") + expected = expected.repeat([3, 4]) + expected = expected.as_unit(unit) + tm.assert_index_equal(result, expected) + assert result.tz == expected.tz + assert result.freq is None + assert expected.freq is None + + result = dti.snap(freq="B") + + expected = date_range("1/1/2002", "1/7/2002", name=name, tz=tz, freq="b") + expected = expected.repeat([1, 1, 1, 2, 2]) + expected = expected.as_unit(unit) + tm.assert_index_equal(result, expected) + assert result.tz == expected.tz + assert result.freq is None + assert expected.freq is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_frame.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_frame.py new file mode 100644 index 00000000..c829109d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_frame.py @@ -0,0 +1,28 @@ +from pandas import ( + DataFrame, + Index, + date_range, +) +import pandas._testing as tm + + +class TestToFrame: + def test_to_frame_datetime_tz(self): + # GH#25809 + idx = date_range(start="2019-01-01", end="2019-01-30", freq="D", tz="UTC") + result = idx.to_frame() + expected = DataFrame(idx, index=idx) + tm.assert_frame_equal(result, expected) + + def test_to_frame_respects_none_name(self): + # GH#44212 if we explicitly pass name=None, then that should be respected, + # not changed to 0 + # GH-45448 this is first deprecated to only change in the future + idx = date_range(start="2019-01-01", end="2019-01-30", freq="D", tz="UTC") + result = idx.to_frame(name=None) + exp_idx = Index([None], dtype=object) + tm.assert_index_equal(exp_idx, result.columns) + + result = idx.rename("foo").to_frame(name=None) + exp_idx = Index([None], dtype=object) + tm.assert_index_equal(exp_idx, result.columns) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_period.py new file mode 100644 index 00000000..14de6c59 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_period.py @@ -0,0 +1,189 @@ +import dateutil.tz +from dateutil.tz import tzlocal +import pytest +import pytz + +from pandas._libs.tslibs.ccalendar import MONTHS +from pandas._libs.tslibs.offsets import MonthEnd +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG + +from pandas import ( + DatetimeIndex, + Period, + PeriodIndex, + Timestamp, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestToPeriod: + def test_dti_to_period(self): + dti = date_range(start="1/1/2005", end="12/1/2005", freq="M") + pi1 = dti.to_period() + pi2 = dti.to_period(freq="D") + pi3 = dti.to_period(freq="3D") + + assert pi1[0] == Period("Jan 2005", freq="M") + assert pi2[0] == Period("1/31/2005", freq="D") + assert pi3[0] == Period("1/31/2005", freq="3D") + + assert pi1[-1] == Period("Nov 2005", freq="M") + assert pi2[-1] == Period("11/30/2005", freq="D") + assert pi3[-1], Period("11/30/2005", freq="3D") + + tm.assert_index_equal(pi1, period_range("1/1/2005", "11/1/2005", freq="M")) + tm.assert_index_equal( + pi2, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("D") + ) + tm.assert_index_equal( + pi3, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("3D") + ) + + @pytest.mark.parametrize("month", MONTHS) + def test_to_period_quarterly(self, month): + # make sure we can make the round trip + freq = f"Q-{month}" + rng = period_range("1989Q3", "1991Q3", freq=freq) + stamps = rng.to_timestamp() + result = stamps.to_period(freq) + tm.assert_index_equal(rng, result) + + @pytest.mark.parametrize("off", ["BQ", "QS", "BQS"]) + def test_to_period_quarterlyish(self, off): + rng = date_range("01-Jan-2012", periods=8, freq=off) + prng = rng.to_period() + assert prng.freq == "Q-DEC" + + @pytest.mark.parametrize("off", ["BA", "AS", "BAS"]) + def test_to_period_annualish(self, off): + rng = date_range("01-Jan-2012", periods=8, freq=off) + prng = rng.to_period() + assert prng.freq == "A-DEC" + + def test_to_period_monthish(self): + offsets = ["MS", "BM"] + for off in offsets: + rng = date_range("01-Jan-2012", periods=8, freq=off) + prng = rng.to_period() + assert prng.freq == "M" + + rng = date_range("01-Jan-2012", periods=8, freq="M") + prng = rng.to_period() + assert prng.freq == "M" + + with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): + date_range("01-Jan-2012", periods=8, freq="EOM") + + @pytest.mark.parametrize("freq", ["2M", MonthEnd(2)]) + def test_dti_to_period_2monthish(self, freq): + dti = date_range("2020-01-01", periods=3, freq=freq) + pi = dti.to_period() + + tm.assert_index_equal(pi, period_range("2020-01", "2020-05", freq=freq)) + + def test_to_period_infer(self): + # https://github.com/pandas-dev/pandas/issues/33358 + rng = date_range( + start="2019-12-22 06:40:00+00:00", + end="2019-12-22 08:45:00+00:00", + freq="5min", + ) + + with tm.assert_produces_warning(UserWarning): + pi1 = rng.to_period("5min") + + with tm.assert_produces_warning(UserWarning): + pi2 = rng.to_period() + + tm.assert_index_equal(pi1, pi2) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_period_dt64_round_trip(self): + dti = date_range("1/1/2000", "1/7/2002", freq="B") + pi = dti.to_period() + tm.assert_index_equal(pi.to_timestamp(), dti) + + dti = date_range("1/1/2000", "1/7/2002", freq="B") + pi = dti.to_period(freq="H") + tm.assert_index_equal(pi.to_timestamp(), dti) + + def test_to_period_millisecond(self): + index = DatetimeIndex( + [ + Timestamp("2007-01-01 10:11:12.123456Z"), + Timestamp("2007-01-01 10:11:13.789123Z"), + ] + ) + + with tm.assert_produces_warning(UserWarning): + # warning that timezone info will be lost + period = index.to_period(freq="L") + assert 2 == len(period) + assert period[0] == Period("2007-01-01 10:11:12.123Z", "L") + assert period[1] == Period("2007-01-01 10:11:13.789Z", "L") + + def test_to_period_microsecond(self): + index = DatetimeIndex( + [ + Timestamp("2007-01-01 10:11:12.123456Z"), + Timestamp("2007-01-01 10:11:13.789123Z"), + ] + ) + + with tm.assert_produces_warning(UserWarning): + # warning that timezone info will be lost + period = index.to_period(freq="U") + assert 2 == len(period) + assert period[0] == Period("2007-01-01 10:11:12.123456Z", "U") + assert period[1] == Period("2007-01-01 10:11:13.789123Z", "U") + + @pytest.mark.parametrize( + "tz", + ["US/Eastern", pytz.utc, tzlocal(), "dateutil/US/Eastern", dateutil.tz.tzutc()], + ) + def test_to_period_tz(self, tz): + ts = date_range("1/1/2000", "2/1/2000", tz=tz) + + with tm.assert_produces_warning(UserWarning): + # GH#21333 warning that timezone info will be lost + # filter warning about freq deprecation + + result = ts.to_period()[0] + expected = ts[0].to_period(ts.freq) + + assert result == expected + + expected = date_range("1/1/2000", "2/1/2000").to_period() + + with tm.assert_produces_warning(UserWarning): + # GH#21333 warning that timezone info will be lost + result = ts.to_period(ts.freq) + + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("tz", ["Etc/GMT-1", "Etc/GMT+1"]) + def test_to_period_tz_utc_offset_consistency(self, tz): + # GH#22905 + ts = date_range("1/1/2000", "2/1/2000", tz="Etc/GMT-1") + with tm.assert_produces_warning(UserWarning): + result = ts.to_period()[0] + expected = ts[0].to_period(ts.freq) + assert result == expected + + def test_to_period_nofreq(self): + idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"]) + msg = "You must pass a freq argument as current index has none." + with pytest.raises(ValueError, match=msg): + idx.to_period() + + idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="infer") + assert idx.freqstr == "D" + expected = PeriodIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="D") + tm.assert_index_equal(idx.to_period(), expected) + + # GH#7606 + idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) + assert idx.freqstr is None + tm.assert_index_equal(idx.to_period(), expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_series.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_series.py new file mode 100644 index 00000000..0c397c8a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_to_series.py @@ -0,0 +1,18 @@ +import numpy as np + +from pandas import ( + DatetimeIndex, + Series, +) +import pandas._testing as tm + + +class TestToSeries: + def test_to_series(self): + naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B") + idx = naive.tz_localize("US/Pacific") + + expected = Series(np.array(idx.tolist(), dtype="object"), name="B") + result = idx.to_series(index=[0, 1]) + assert expected.dtype == idx.dtype + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_asof.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_asof.py new file mode 100644 index 00000000..7adc4003 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_asof.py @@ -0,0 +1,31 @@ +from datetime import timedelta + +from pandas import ( + Index, + Timestamp, + date_range, + isna, +) +import pandas._testing as tm + + +class TestAsOf: + def test_asof_partial(self): + index = date_range("2010-01-01", periods=2, freq="m") + expected = Timestamp("2010-02-28") + result = index.asof("2010-02") + assert result == expected + assert not isinstance(result, Index) + + def test_asof(self): + index = tm.makeDateIndex(100) + + dt = index[0] + assert index.asof(dt) == dt + assert isna(index.asof(dt - timedelta(1))) + + dt = index[-1] + assert index.asof(dt + timedelta(1)) == dt + + dt = index[0].to_pydatetime() + assert isinstance(index.asof(dt), Timestamp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_constructors.py new file mode 100644 index 00000000..fc7ce19c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_constructors.py @@ -0,0 +1,1118 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, + timezone, +) +from functools import partial +from operator import attrgetter + +import dateutil +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + astype_overflowsafe, +) + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + Timestamp, + date_range, + offsets, + to_datetime, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + period_array, +) + + +class TestDatetimeIndex: + def test_closed_deprecated(self): + # GH#52628 + msg = "The 'closed' keyword" + with tm.assert_produces_warning(FutureWarning, match=msg): + DatetimeIndex([], closed=True) + + def test_normalize_deprecated(self): + # GH#52628 + msg = "The 'normalize' keyword" + with tm.assert_produces_warning(FutureWarning, match=msg): + DatetimeIndex([], normalize=True) + + def test_from_dt64_unsupported_unit(self): + # GH#49292 + val = np.datetime64(1, "D") + result = DatetimeIndex([val], tz="US/Pacific") + + expected = DatetimeIndex([val.astype("M8[s]")], tz="US/Pacific") + tm.assert_index_equal(result, expected) + + def test_explicit_tz_none(self): + # GH#48659 + dti = date_range("2016-01-01", periods=10, tz="UTC") + + msg = "Passed data is timezone-aware, incompatible with 'tz=None'" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(dti, tz=None) + + with pytest.raises(ValueError, match=msg): + DatetimeIndex(np.array(dti), tz=None) + + msg = "Cannot pass both a timezone-aware dtype and tz=None" + with pytest.raises(ValueError, match=msg): + DatetimeIndex([], dtype="M8[ns, UTC]", tz=None) + + @pytest.mark.parametrize( + "dt_cls", [DatetimeIndex, DatetimeArray._from_sequence_not_strict] + ) + def test_freq_validation_with_nat(self, dt_cls): + # GH#11587 make sure we get a useful error message when generate_range + # raises + msg = ( + "Inferred frequency None from passed values does not conform " + "to passed frequency D" + ) + with pytest.raises(ValueError, match=msg): + dt_cls([pd.NaT, Timestamp("2011-01-01")], freq="D") + with pytest.raises(ValueError, match=msg): + dt_cls([pd.NaT, Timestamp("2011-01-01")._value], freq="D") + + # TODO: better place for tests shared by DTI/TDI? + @pytest.mark.parametrize( + "index", + [ + date_range("2016-01-01", periods=5, tz="US/Pacific"), + pd.timedelta_range("1 Day", periods=5), + ], + ) + def test_shallow_copy_inherits_array_freq(self, index): + # If we pass a DTA/TDA to shallow_copy and dont specify a freq, + # we should inherit the array's freq, not our own. + array = index._data + + arr = array[[0, 3, 2, 4, 1]] + assert arr.freq is None + + result = index._shallow_copy(arr) + assert result.freq is None + + def test_categorical_preserves_tz(self): + # GH#18664 retain tz when going DTI-->Categorical-->DTI + dti = DatetimeIndex( + [pd.NaT, "2015-01-01", "1999-04-06 15:14:13", "2015-01-01"], tz="US/Eastern" + ) + + for dtobj in [dti, dti._data]: + # works for DatetimeIndex or DatetimeArray + + ci = pd.CategoricalIndex(dtobj) + carr = pd.Categorical(dtobj) + cser = pd.Series(ci) + + for obj in [ci, carr, cser]: + result = DatetimeIndex(obj) + tm.assert_index_equal(result, dti) + + def test_dti_with_period_data_raises(self): + # GH#23675 + data = pd.PeriodIndex(["2016Q1", "2016Q2"], freq="Q") + + with pytest.raises(TypeError, match="PeriodDtype data is invalid"): + DatetimeIndex(data) + + with pytest.raises(TypeError, match="PeriodDtype data is invalid"): + to_datetime(data) + + with pytest.raises(TypeError, match="PeriodDtype data is invalid"): + DatetimeIndex(period_array(data)) + + with pytest.raises(TypeError, match="PeriodDtype data is invalid"): + to_datetime(period_array(data)) + + def test_dti_with_timedelta64_data_raises(self): + # GH#23675 deprecated, enforrced in GH#29794 + data = np.array([0], dtype="m8[ns]") + msg = r"timedelta64\[ns\] cannot be converted to datetime64" + with pytest.raises(TypeError, match=msg): + DatetimeIndex(data) + + with pytest.raises(TypeError, match=msg): + to_datetime(data) + + with pytest.raises(TypeError, match=msg): + DatetimeIndex(pd.TimedeltaIndex(data)) + + with pytest.raises(TypeError, match=msg): + to_datetime(pd.TimedeltaIndex(data)) + + def test_constructor_from_sparse_array(self): + # https://github.com/pandas-dev/pandas/issues/35843 + values = [ + Timestamp("2012-05-01T01:00:00.000000"), + Timestamp("2016-05-01T01:00:00.000000"), + ] + arr = pd.arrays.SparseArray(values) + result = Index(arr) + assert type(result) is Index + assert result.dtype == arr.dtype + + def test_construction_caching(self): + df = pd.DataFrame( + { + "dt": date_range("20130101", periods=3), + "dttz": date_range("20130101", periods=3, tz="US/Eastern"), + "dt_with_null": [ + Timestamp("20130101"), + pd.NaT, + Timestamp("20130103"), + ], + "dtns": date_range("20130101", periods=3, freq="ns"), + } + ) + assert df.dttz.dtype.tz.zone == "US/Eastern" + + @pytest.mark.parametrize( + "kwargs", + [{"tz": "dtype.tz"}, {"dtype": "dtype"}, {"dtype": "dtype", "tz": "dtype.tz"}], + ) + def test_construction_with_alt(self, kwargs, tz_aware_fixture): + tz = tz_aware_fixture + i = date_range("20130101", periods=5, freq="H", tz=tz) + kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()} + result = DatetimeIndex(i, **kwargs) + tm.assert_index_equal(i, result) + + @pytest.mark.parametrize( + "kwargs", + [{"tz": "dtype.tz"}, {"dtype": "dtype"}, {"dtype": "dtype", "tz": "dtype.tz"}], + ) + def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture): + tz = tz_aware_fixture + i = date_range("20130101", periods=5, freq="H", tz=tz) + i = i._with_freq(None) + kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()} + + if "tz" in kwargs: + result = DatetimeIndex(i.asi8, tz="UTC").tz_convert(kwargs["tz"]) + + expected = DatetimeIndex(i, **kwargs) + tm.assert_index_equal(result, expected) + + # localize into the provided tz + i2 = DatetimeIndex(i.tz_localize(None).asi8, tz="UTC") + expected = i.tz_localize(None).tz_localize("UTC") + tm.assert_index_equal(i2, expected) + + # incompat tz/dtype + msg = "cannot supply both a tz and a dtype with a tz" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz="US/Pacific") + + def test_construction_index_with_mixed_timezones(self): + # gh-11488: no tz results in DatetimeIndex + result = Index([Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx") + exp = DatetimeIndex( + [Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx" + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None + + # same tz results in DatetimeIndex + result = Index( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"), + ], + name="idx", + ) + exp = DatetimeIndex( + [Timestamp("2011-01-01 10:00"), Timestamp("2011-01-02 10:00")], + tz="Asia/Tokyo", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz + + # same tz results in DatetimeIndex (DST) + result = Index( + [ + Timestamp("2011-01-01 10:00", tz="US/Eastern"), + Timestamp("2011-08-01 10:00", tz="US/Eastern"), + ], + name="idx", + ) + exp = DatetimeIndex( + [Timestamp("2011-01-01 10:00"), Timestamp("2011-08-01 10:00")], + tz="US/Eastern", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz + + # Different tz results in Index(dtype=object) + result = Index( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + name="idx", + ) + exp = Index( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + dtype="object", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) + + result = Index( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + name="idx", + ) + exp = Index( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + dtype="object", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) + + msg = "DatetimeIndex has mixed timezones" + msg_depr = "parsing datetimes with mixed time zones will raise an error" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg_depr): + DatetimeIndex(["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]) + + # length = 1 + result = Index([Timestamp("2011-01-01")], name="idx") + exp = DatetimeIndex([Timestamp("2011-01-01")], name="idx") + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None + + # length = 1 with tz + result = Index([Timestamp("2011-01-01 10:00", tz="Asia/Tokyo")], name="idx") + exp = DatetimeIndex( + [Timestamp("2011-01-01 10:00")], tz="Asia/Tokyo", name="idx" + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz + + def test_construction_index_with_mixed_timezones_with_NaT(self): + # see gh-11488 + result = Index( + [pd.NaT, Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-02")], + name="idx", + ) + exp = DatetimeIndex( + [pd.NaT, Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-02")], + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None + + # Same tz results in DatetimeIndex + result = Index( + [ + pd.NaT, + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + pd.NaT, + Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"), + ], + name="idx", + ) + exp = DatetimeIndex( + [ + pd.NaT, + Timestamp("2011-01-01 10:00"), + pd.NaT, + Timestamp("2011-01-02 10:00"), + ], + tz="Asia/Tokyo", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz + + # same tz results in DatetimeIndex (DST) + result = Index( + [ + Timestamp("2011-01-01 10:00", tz="US/Eastern"), + pd.NaT, + Timestamp("2011-08-01 10:00", tz="US/Eastern"), + ], + name="idx", + ) + exp = DatetimeIndex( + [Timestamp("2011-01-01 10:00"), pd.NaT, Timestamp("2011-08-01 10:00")], + tz="US/Eastern", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz + + # different tz results in Index(dtype=object) + result = Index( + [ + pd.NaT, + Timestamp("2011-01-01 10:00"), + pd.NaT, + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + name="idx", + ) + exp = Index( + [ + pd.NaT, + Timestamp("2011-01-01 10:00"), + pd.NaT, + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + dtype="object", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) + + result = Index( + [ + pd.NaT, + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + pd.NaT, + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + name="idx", + ) + exp = Index( + [ + pd.NaT, + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + pd.NaT, + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + dtype="object", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) + + # all NaT + result = Index([pd.NaT, pd.NaT], name="idx") + exp = DatetimeIndex([pd.NaT, pd.NaT], name="idx") + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None + + def test_construction_dti_with_mixed_timezones(self): + # GH 11488 (not changed, added explicit tests) + + # no tz results in DatetimeIndex + result = DatetimeIndex( + [Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx" + ) + exp = DatetimeIndex( + [Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx" + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + + # same tz results in DatetimeIndex + result = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"), + ], + name="idx", + ) + exp = DatetimeIndex( + [Timestamp("2011-01-01 10:00"), Timestamp("2011-01-02 10:00")], + tz="Asia/Tokyo", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + + # same tz results in DatetimeIndex (DST) + result = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="US/Eastern"), + Timestamp("2011-08-01 10:00", tz="US/Eastern"), + ], + name="idx", + ) + exp = DatetimeIndex( + [Timestamp("2011-01-01 10:00"), Timestamp("2011-08-01 10:00")], + tz="US/Eastern", + name="idx", + ) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + + # tz mismatch affecting to tz-aware raises TypeError/ValueError + + msg = "cannot be converted to datetime64" + with pytest.raises(ValueError, match=msg): + DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + name="idx", + ) + + # pre-2.0 this raised bc of awareness mismatch. in 2.0 with a tz# + # specified we behave as if this was called pointwise, so + # the naive Timestamp is treated as a wall time. + dti = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + tz="Asia/Tokyo", + name="idx", + ) + expected = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern").tz_convert("Asia/Tokyo"), + ], + tz="Asia/Tokyo", + name="idx", + ) + tm.assert_index_equal(dti, expected) + + # pre-2.0 mixed-tz scalars raised even if a tz/dtype was specified. + # as of 2.0 we successfully return the requested tz/dtype + dti = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + tz="US/Eastern", + name="idx", + ) + expected = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo").tz_convert("US/Eastern"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + tz="US/Eastern", + name="idx", + ) + tm.assert_index_equal(dti, expected) + + # same thing but pass dtype instead of tz + dti = DatetimeIndex( + [ + Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"), + Timestamp("2011-01-02 10:00", tz="US/Eastern"), + ], + dtype="M8[ns, US/Eastern]", + name="idx", + ) + tm.assert_index_equal(dti, expected) + + def test_construction_base_constructor(self): + arr = [Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-03")] + tm.assert_index_equal(Index(arr), DatetimeIndex(arr)) + tm.assert_index_equal(Index(np.array(arr)), DatetimeIndex(np.array(arr))) + + arr = [np.nan, pd.NaT, Timestamp("2011-01-03")] + tm.assert_index_equal(Index(arr), DatetimeIndex(arr)) + tm.assert_index_equal(Index(np.array(arr)), DatetimeIndex(np.array(arr))) + + def test_construction_outofbounds(self): + # GH 13663 + dates = [ + datetime(3000, 1, 1), + datetime(4000, 1, 1), + datetime(5000, 1, 1), + datetime(6000, 1, 1), + ] + exp = Index(dates, dtype=object) + # coerces to object + tm.assert_index_equal(Index(dates), exp) + + msg = "^Out of bounds nanosecond timestamp: 3000-01-01 00:00:00, at position 0$" + with pytest.raises(OutOfBoundsDatetime, match=msg): + # can't create DatetimeIndex + DatetimeIndex(dates) + + def test_construction_with_ndarray(self): + # GH 5152 + dates = [datetime(2013, 10, 7), datetime(2013, 10, 8), datetime(2013, 10, 9)] + data = DatetimeIndex(dates, freq=offsets.BDay()).values + result = DatetimeIndex(data, freq=offsets.BDay()) + expected = DatetimeIndex(["2013-10-07", "2013-10-08", "2013-10-09"], freq="B") + tm.assert_index_equal(result, expected) + + def test_integer_values_and_tz_interpreted_as_utc(self): + # GH-24559 + val = np.datetime64("2000-01-01 00:00:00", "ns") + values = np.array([val.view("i8")]) + + result = DatetimeIndex(values).tz_localize("US/Central") + + expected = DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central") + tm.assert_index_equal(result, expected) + + # but UTC is *not* deprecated. + with tm.assert_produces_warning(None): + result = DatetimeIndex(values, tz="UTC") + expected = DatetimeIndex(["2000-01-01T00:00:00"], tz="US/Central") + + def test_constructor_coverage(self): + rng = date_range("1/1/2000", periods=10.5) + exp = date_range("1/1/2000", periods=10) + tm.assert_index_equal(rng, exp) + + msg = "periods must be a number, got foo" + with pytest.raises(TypeError, match=msg): + date_range(start="1/1/2000", periods="foo", freq="D") + + msg = r"DatetimeIndex\(\.\.\.\) must be called with a collection" + with pytest.raises(TypeError, match=msg): + DatetimeIndex("1/1/2000") + + # generator expression + gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10)) + result = DatetimeIndex(gen) + expected = DatetimeIndex( + [datetime(2000, 1, 1) + timedelta(i) for i in range(10)] + ) + tm.assert_index_equal(result, expected) + + # NumPy string array + strings = np.array(["2000-01-01", "2000-01-02", "2000-01-03"]) + result = DatetimeIndex(strings) + expected = DatetimeIndex(strings.astype("O")) + tm.assert_index_equal(result, expected) + + from_ints = DatetimeIndex(expected.asi8) + tm.assert_index_equal(from_ints, expected) + + # string with NaT + strings = np.array(["2000-01-01", "2000-01-02", "NaT"]) + result = DatetimeIndex(strings) + expected = DatetimeIndex(strings.astype("O")) + tm.assert_index_equal(result, expected) + + from_ints = DatetimeIndex(expected.asi8) + tm.assert_index_equal(from_ints, expected) + + # non-conforming + msg = ( + "Inferred frequency None from passed values does not conform " + "to passed frequency D" + ) + with pytest.raises(ValueError, match=msg): + DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"], freq="D") + + msg = ( + "Of the four parameters: start, end, periods, and freq, exactly " + "three must be specified" + ) + with pytest.raises(ValueError, match=msg): + date_range(start="2011-01-01", freq="b") + with pytest.raises(ValueError, match=msg): + date_range(end="2011-01-01", freq="B") + with pytest.raises(ValueError, match=msg): + date_range(periods=10, freq="D") + + @pytest.mark.parametrize("freq", ["AS", "W-SUN"]) + def test_constructor_datetime64_tzformat(self, freq): + # see GH#6572: ISO 8601 format results in stdlib timezone object + idx = date_range( + "2013-01-01T00:00:00-05:00", "2016-01-01T23:59:59-05:00", freq=freq + ) + expected = date_range( + "2013-01-01T00:00:00", + "2016-01-01T23:59:59", + freq=freq, + tz=timezone(timedelta(minutes=-300)), + ) + tm.assert_index_equal(idx, expected) + # Unable to use `US/Eastern` because of DST + expected_i8 = date_range( + "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="America/Lima" + ) + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + + idx = date_range( + "2013-01-01T00:00:00+09:00", "2016-01-01T23:59:59+09:00", freq=freq + ) + expected = date_range( + "2013-01-01T00:00:00", + "2016-01-01T23:59:59", + freq=freq, + tz=timezone(timedelta(minutes=540)), + ) + tm.assert_index_equal(idx, expected) + expected_i8 = date_range( + "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="Asia/Tokyo" + ) + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + + # Non ISO 8601 format results in dateutil.tz.tzoffset + idx = date_range("2013/1/1 0:00:00-5:00", "2016/1/1 23:59:59-5:00", freq=freq) + expected = date_range( + "2013-01-01T00:00:00", + "2016-01-01T23:59:59", + freq=freq, + tz=timezone(timedelta(minutes=-300)), + ) + tm.assert_index_equal(idx, expected) + # Unable to use `US/Eastern` because of DST + expected_i8 = date_range( + "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="America/Lima" + ) + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + + idx = date_range("2013/1/1 0:00:00+9:00", "2016/1/1 23:59:59+09:00", freq=freq) + expected = date_range( + "2013-01-01T00:00:00", + "2016-01-01T23:59:59", + freq=freq, + tz=timezone(timedelta(minutes=540)), + ) + tm.assert_index_equal(idx, expected) + expected_i8 = date_range( + "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="Asia/Tokyo" + ) + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + + def test_constructor_dtype(self): + # passing a dtype with a tz should localize + idx = DatetimeIndex( + ["2013-01-01", "2013-01-02"], dtype="datetime64[ns, US/Eastern]" + ) + expected = DatetimeIndex(["2013-01-01", "2013-01-02"]).tz_localize("US/Eastern") + tm.assert_index_equal(idx, expected) + + idx = DatetimeIndex(["2013-01-01", "2013-01-02"], tz="US/Eastern") + tm.assert_index_equal(idx, expected) + + def test_constructor_dtype_tz_mismatch_raises(self): + # if we already have a tz and its not the same, then raise + idx = DatetimeIndex( + ["2013-01-01", "2013-01-02"], dtype="datetime64[ns, US/Eastern]" + ) + + msg = ( + "cannot supply both a tz and a timezone-naive dtype " + r"\(i\.e\. datetime64\[ns\]\)" + ) + with pytest.raises(ValueError, match=msg): + DatetimeIndex(idx, dtype="datetime64[ns]") + + # this is effectively trying to convert tz's + msg = "data is already tz-aware US/Eastern, unable to set specified tz: CET" + with pytest.raises(TypeError, match=msg): + DatetimeIndex(idx, dtype="datetime64[ns, CET]") + msg = "cannot supply both a tz and a dtype with a tz" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(idx, tz="CET", dtype="datetime64[ns, US/Eastern]") + + result = DatetimeIndex(idx, dtype="datetime64[ns, US/Eastern]") + tm.assert_index_equal(idx, result) + + @pytest.mark.parametrize("dtype", [object, np.int32, np.int64]) + def test_constructor_invalid_dtype_raises(self, dtype): + # GH 23986 + msg = "Unexpected value for 'dtype'" + with pytest.raises(ValueError, match=msg): + DatetimeIndex([1, 2], dtype=dtype) + + def test_constructor_name(self): + idx = date_range(start="2000-01-01", periods=1, freq="A", name="TEST") + assert idx.name == "TEST" + + def test_000constructor_resolution(self): + # 2252 + t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1) + idx = DatetimeIndex([t1]) + + assert idx.nanosecond[0] == t1.nanosecond + + def test_disallow_setting_tz(self): + # GH 3746 + dti = DatetimeIndex(["2010"], tz="UTC") + msg = "Cannot directly set timezone" + with pytest.raises(AttributeError, match=msg): + dti.tz = pytz.timezone("US/Pacific") + + @pytest.mark.parametrize( + "tz", + [ + None, + "America/Los_Angeles", + pytz.timezone("America/Los_Angeles"), + Timestamp("2000", tz="America/Los_Angeles").tz, + ], + ) + def test_constructor_start_end_with_tz(self, tz): + # GH 18595 + start = Timestamp("2013-01-01 06:00:00", tz="America/Los_Angeles") + end = Timestamp("2013-01-02 06:00:00", tz="America/Los_Angeles") + result = date_range(freq="D", start=start, end=end, tz=tz) + expected = DatetimeIndex( + ["2013-01-01 06:00:00", "2013-01-02 06:00:00"], + tz="America/Los_Angeles", + freq="D", + ) + tm.assert_index_equal(result, expected) + # Especially assert that the timezone is consistent for pytz + assert pytz.timezone("America/Los_Angeles") is result.tz + + @pytest.mark.parametrize("tz", ["US/Pacific", "US/Eastern", "Asia/Tokyo"]) + def test_constructor_with_non_normalized_pytz(self, tz): + # GH 18595 + non_norm_tz = Timestamp("2010", tz=tz).tz + result = DatetimeIndex(["2010"], tz=non_norm_tz) + assert pytz.timezone(tz) is result.tz + + def test_constructor_timestamp_near_dst(self): + # GH 20854 + ts = [ + Timestamp("2016-10-30 03:00:00+0300", tz="Europe/Helsinki"), + Timestamp("2016-10-30 03:00:00+0200", tz="Europe/Helsinki"), + ] + result = DatetimeIndex(ts) + expected = DatetimeIndex([ts[0].to_pydatetime(), ts[1].to_pydatetime()]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) + @pytest.mark.parametrize("box", [np.array, partial(np.array, dtype=object), list]) + @pytest.mark.parametrize( + "tz, dtype", + [("US/Pacific", "datetime64[ns, US/Pacific]"), (None, "datetime64[ns]")], + ) + def test_constructor_with_int_tz(self, klass, box, tz, dtype): + # GH 20997, 20964 + ts = Timestamp("2018-01-01", tz=tz).as_unit("ns") + result = klass(box([ts._value]), dtype=dtype) + expected = klass([ts]) + assert result == expected + + def test_construction_int_rountrip(self, tz_naive_fixture): + # GH 12619, GH#24559 + tz = tz_naive_fixture + + result = 1293858000000000000 + expected = DatetimeIndex([result], tz=tz).asi8[0] + assert result == expected + + def test_construction_from_replaced_timestamps_with_dst(self): + # GH 18785 + index = date_range( + Timestamp(2000, 1, 1), + Timestamp(2005, 1, 1), + freq="MS", + tz="Australia/Melbourne", + ) + test = pd.DataFrame({"data": range(len(index))}, index=index) + test = test.resample("Y").mean() + result = DatetimeIndex([x.replace(month=6, day=1) for x in test.index]) + expected = DatetimeIndex( + [ + "2000-06-01 00:00:00", + "2001-06-01 00:00:00", + "2002-06-01 00:00:00", + "2003-06-01 00:00:00", + "2004-06-01 00:00:00", + "2005-06-01 00:00:00", + ], + tz="Australia/Melbourne", + ) + tm.assert_index_equal(result, expected) + + def test_construction_with_tz_and_tz_aware_dti(self): + # GH 23579 + dti = date_range("2016-01-01", periods=3, tz="US/Central") + msg = "data is already tz-aware US/Central, unable to set specified tz" + with pytest.raises(TypeError, match=msg): + DatetimeIndex(dti, tz="Asia/Tokyo") + + def test_construction_with_nat_and_tzlocal(self): + tz = dateutil.tz.tzlocal() + result = DatetimeIndex(["2018", "NaT"], tz=tz) + expected = DatetimeIndex([Timestamp("2018", tz=tz), pd.NaT]) + tm.assert_index_equal(result, expected) + + def test_constructor_with_ambiguous_keyword_arg(self): + # GH 35297 + + expected = DatetimeIndex( + ["2020-11-01 01:00:00", "2020-11-02 01:00:00"], + dtype="datetime64[ns, America/New_York]", + freq="D", + ambiguous=False, + ) + + # ambiguous keyword in start + timezone = "America/New_York" + start = Timestamp(year=2020, month=11, day=1, hour=1).tz_localize( + timezone, ambiguous=False + ) + result = date_range(start=start, periods=2, ambiguous=False) + tm.assert_index_equal(result, expected) + + # ambiguous keyword in end + timezone = "America/New_York" + end = Timestamp(year=2020, month=11, day=2, hour=1).tz_localize( + timezone, ambiguous=False + ) + result = date_range(end=end, periods=2, ambiguous=False) + tm.assert_index_equal(result, expected) + + def test_constructor_with_nonexistent_keyword_arg(self, warsaw): + # GH 35297 + timezone = warsaw + + # nonexistent keyword in start + start = Timestamp("2015-03-29 02:30:00").tz_localize( + timezone, nonexistent="shift_forward" + ) + result = date_range(start=start, periods=2, freq="H") + expected = DatetimeIndex( + [ + Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), + Timestamp("2015-03-29 04:00:00+02:00", tz=timezone), + ] + ) + + tm.assert_index_equal(result, expected) + + # nonexistent keyword in end + end = Timestamp("2015-03-29 02:30:00").tz_localize( + timezone, nonexistent="shift_forward" + ) + result = date_range(end=end, periods=2, freq="H") + expected = DatetimeIndex( + [ + Timestamp("2015-03-29 01:00:00+01:00", tz=timezone), + Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), + ] + ) + + tm.assert_index_equal(result, expected) + + def test_constructor_no_precision_raises(self): + # GH-24753, GH-24739 + + msg = "with no precision is not allowed" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(["2000"], dtype="datetime64") + + msg = "The 'datetime64' dtype has no unit. Please pass in" + with pytest.raises(ValueError, match=msg): + Index(["2000"], dtype="datetime64") + + def test_constructor_wrong_precision_raises(self): + dti = DatetimeIndex(["2000"], dtype="datetime64[us]") + assert dti.dtype == "M8[us]" + assert dti[0] == Timestamp(2000, 1, 1) + + def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(self): + # GH 27011 + result = Index(np.array([Timestamp("2019", tz="UTC"), np.nan], dtype=object)) + expected = DatetimeIndex([Timestamp("2019", tz="UTC"), pd.NaT]) + tm.assert_index_equal(result, expected) + + +class TestTimeSeries: + def test_dti_constructor_preserve_dti_freq(self): + rng = date_range("1/1/2000", "1/2/2000", freq="5min") + + rng2 = DatetimeIndex(rng) + assert rng.freq == rng2.freq + + def test_explicit_none_freq(self): + # Explicitly passing freq=None is respected + rng = date_range("1/1/2000", "1/2/2000", freq="5min") + + result = DatetimeIndex(rng, freq=None) + assert result.freq is None + + result = DatetimeIndex(rng._data, freq=None) + assert result.freq is None + + dta = DatetimeArray(rng, freq=None) + assert dta.freq is None + + def test_dti_constructor_years_only(self, tz_naive_fixture): + tz = tz_naive_fixture + # GH 6961 + rng1 = date_range("2014", "2015", freq="M", tz=tz) + expected1 = date_range("2014-01-31", "2014-12-31", freq="M", tz=tz) + + rng2 = date_range("2014", "2015", freq="MS", tz=tz) + expected2 = date_range("2014-01-01", "2015-01-01", freq="MS", tz=tz) + + rng3 = date_range("2014", "2020", freq="A", tz=tz) + expected3 = date_range("2014-12-31", "2019-12-31", freq="A", tz=tz) + + rng4 = date_range("2014", "2020", freq="AS", tz=tz) + expected4 = date_range("2014-01-01", "2020-01-01", freq="AS", tz=tz) + + for rng, expected in [ + (rng1, expected1), + (rng2, expected2), + (rng3, expected3), + (rng4, expected4), + ]: + tm.assert_index_equal(rng, expected) + + def test_dti_constructor_small_int(self, any_int_numpy_dtype): + # see gh-13721 + exp = DatetimeIndex( + [ + "1970-01-01 00:00:00.00000000", + "1970-01-01 00:00:00.00000001", + "1970-01-01 00:00:00.00000002", + ] + ) + + arr = np.array([0, 10, 20], dtype=any_int_numpy_dtype) + tm.assert_index_equal(DatetimeIndex(arr), exp) + + def test_ctor_str_intraday(self): + rng = DatetimeIndex(["1-1-2000 00:00:01"]) + assert rng[0].second == 1 + + def test_is_(self): + dti = date_range(start="1/1/2005", end="12/1/2005", freq="M") + assert dti.is_(dti) + assert dti.is_(dti.view()) + assert not dti.is_(dti.copy()) + + def test_index_cast_datetime64_other_units(self): + arr = np.arange(0, 100, 10, dtype=np.int64).view("M8[D]") + idx = Index(arr) + + assert (idx.values == astype_overflowsafe(arr, dtype=np.dtype("M8[ns]"))).all() + + def test_constructor_int64_nocopy(self): + # GH#1624 + arr = np.arange(1000, dtype=np.int64) + index = DatetimeIndex(arr) + + arr[50:100] = -1 + assert (index.asi8[50:100] == -1).all() + + arr = np.arange(1000, dtype=np.int64) + index = DatetimeIndex(arr, copy=True) + + arr[50:100] = -1 + assert (index.asi8[50:100] != -1).all() + + @pytest.mark.parametrize( + "freq", ["M", "Q", "A", "D", "B", "BH", "T", "S", "L", "U", "H", "N", "C"] + ) + def test_from_freq_recreate_from_data(self, freq): + org = date_range(start="2001/02/01 09:00", freq=freq, periods=1) + idx = DatetimeIndex(org, freq=freq) + tm.assert_index_equal(idx, org) + + org = date_range( + start="2001/02/01 09:00", freq=freq, tz="US/Pacific", periods=1 + ) + idx = DatetimeIndex(org, freq=freq, tz="US/Pacific") + tm.assert_index_equal(idx, org) + + def test_datetimeindex_constructor_misc(self): + arr = ["1/1/2005", "1/2/2005", "Jn 3, 2005", "2005-01-04"] + msg = r"(\(')?Unknown datetime string format(:', 'Jn 3, 2005'\))?" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(arr) + + arr = ["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"] + idx1 = DatetimeIndex(arr) + + arr = [datetime(2005, 1, 1), "1/2/2005", "1/3/2005", "2005-01-04"] + idx2 = DatetimeIndex(arr) + + arr = [Timestamp(datetime(2005, 1, 1)), "1/2/2005", "1/3/2005", "2005-01-04"] + idx3 = DatetimeIndex(arr) + + arr = np.array(["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"], dtype="O") + idx4 = DatetimeIndex(arr) + + idx5 = DatetimeIndex(["12/05/2007", "25/01/2008"], dayfirst=True) + idx6 = DatetimeIndex( + ["2007/05/12", "2008/01/25"], dayfirst=False, yearfirst=True + ) + tm.assert_index_equal(idx5, idx6) + + for other in [idx2, idx3, idx4]: + assert (idx1.values == other.values).all() + + sdate = datetime(1999, 12, 25) + edate = datetime(2000, 1, 1) + idx = date_range(start=sdate, freq="1B", periods=20) + assert len(idx) == 20 + assert idx[0] == sdate + 0 * offsets.BDay() + assert idx.freq == "B" + + idx1 = date_range(start=sdate, end=edate, freq="W-SUN") + idx2 = date_range(start=sdate, end=edate, freq=offsets.Week(weekday=6)) + assert len(idx1) == len(idx2) + assert idx1.freq == idx2.freq + + idx1 = date_range(start=sdate, end=edate, freq="QS") + idx2 = date_range( + start=sdate, end=edate, freq=offsets.QuarterBegin(startingMonth=1) + ) + assert len(idx1) == len(idx2) + assert idx1.freq == idx2.freq + + idx1 = date_range(start=sdate, end=edate, freq="BQ") + idx2 = date_range( + start=sdate, end=edate, freq=offsets.BQuarterEnd(startingMonth=12) + ) + assert len(idx1) == len(idx2) + assert idx1.freq == idx2.freq + + def test_pass_datetimeindex_to_index(self): + # Bugs in #1396 + rng = date_range("1/1/2000", "3/1/2000") + idx = Index(rng, dtype=object) + + expected = Index(rng.to_pydatetime(), dtype=object) + + tm.assert_numpy_array_equal(idx.values, expected.values) + + def test_date_range_tuple_freq_raises(self): + # GH#34703 + edate = datetime(2000, 1, 1) + with pytest.raises(TypeError, match="pass as a string instead"): + date_range(end=edate, freq=("D", 5), periods=20) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_date_range.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_date_range.py new file mode 100644 index 00000000..2e2e33e2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_date_range.py @@ -0,0 +1,1304 @@ +""" +test date_range, bdate_range construction from the convenience range functions +""" + +from datetime import ( + datetime, + time, + timedelta, +) + +import numpy as np +import pytest +import pytz +from pytz import timezone + +from pandas._libs.tslibs import timezones +from pandas._libs.tslibs.offsets import ( + BDay, + CDay, + DateOffset, + MonthEnd, + prefix_mapping, +) +from pandas.errors import OutOfBoundsDatetime +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + Timedelta, + Timestamp, + bdate_range, + date_range, + offsets, +) +import pandas._testing as tm +from pandas.core.arrays.datetimes import _generate_range as generate_range + +START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) + + +def _get_expected_range( + begin_to_match, + end_to_match, + both_range, + inclusive_endpoints, +): + """Helper to get expected range from a both inclusive range""" + left_match = begin_to_match == both_range[0] + right_match = end_to_match == both_range[-1] + + if inclusive_endpoints == "left" and right_match: + expected_range = both_range[:-1] + elif inclusive_endpoints == "right" and left_match: + expected_range = both_range[1:] + elif inclusive_endpoints == "neither" and left_match and right_match: + expected_range = both_range[1:-1] + elif inclusive_endpoints == "neither" and right_match: + expected_range = both_range[:-1] + elif inclusive_endpoints == "neither" and left_match: + expected_range = both_range[1:] + elif inclusive_endpoints == "both": + expected_range = both_range[:] + else: + expected_range = both_range[:] + + return expected_range + + +class TestTimestampEquivDateRange: + # Older tests in TestTimeSeries constructed their `stamp` objects + # using `date_range` instead of the `Timestamp` constructor. + # TestTimestampEquivDateRange checks that these are equivalent in the + # pertinent cases. + + def test_date_range_timestamp_equiv(self): + rng = date_range("20090415", "20090519", tz="US/Eastern") + stamp = rng[0] + + ts = Timestamp("20090415", tz="US/Eastern") + assert ts == stamp + + def test_date_range_timestamp_equiv_dateutil(self): + rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern") + stamp = rng[0] + + ts = Timestamp("20090415", tz="dateutil/US/Eastern") + assert ts == stamp + + def test_date_range_timestamp_equiv_explicit_pytz(self): + rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern")) + stamp = rng[0] + + ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern")) + assert ts == stamp + + @td.skip_if_windows + def test_date_range_timestamp_equiv_explicit_dateutil(self): + from pandas._libs.tslibs.timezones import dateutil_gettz as gettz + + rng = date_range("20090415", "20090519", tz=gettz("US/Eastern")) + stamp = rng[0] + + ts = Timestamp("20090415", tz=gettz("US/Eastern")) + assert ts == stamp + + def test_date_range_timestamp_equiv_from_datetime_instance(self): + datetime_instance = datetime(2014, 3, 4) + # build a timestamp with a frequency, since then it supports + # addition/subtraction of integers + timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0] + + ts = Timestamp(datetime_instance) + assert ts == timestamp_instance + + def test_date_range_timestamp_equiv_preserve_frequency(self): + timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0] + ts = Timestamp("2014-03-05") + + assert timestamp_instance == ts + + +class TestDateRanges: + @pytest.mark.parametrize("freq", ["N", "U", "L", "T", "S", "H", "D"]) + def test_date_range_edges(self, freq): + # GH#13672 + td = Timedelta(f"1{freq}") + ts = Timestamp("1970-01-01") + + idx = date_range( + start=ts + td, + end=ts + 4 * td, + freq=freq, + ) + exp = DatetimeIndex( + [ts + n * td for n in range(1, 5)], + freq=freq, + ) + tm.assert_index_equal(idx, exp) + + # start after end + idx = date_range( + start=ts + 4 * td, + end=ts + td, + freq=freq, + ) + exp = DatetimeIndex([], freq=freq) + tm.assert_index_equal(idx, exp) + + # start matches end + idx = date_range( + start=ts + td, + end=ts + td, + freq=freq, + ) + exp = DatetimeIndex([ts + td], freq=freq) + tm.assert_index_equal(idx, exp) + + def test_date_range_near_implementation_bound(self): + # GH#??? + freq = Timedelta(1) + + with pytest.raises(OutOfBoundsDatetime, match="Cannot generate range with"): + date_range(end=Timestamp.min, periods=2, freq=freq) + + def test_date_range_nat(self): + # GH#11587 + msg = "Neither `start` nor `end` can be NaT" + with pytest.raises(ValueError, match=msg): + date_range(start="2016-01-01", end=pd.NaT, freq="D") + with pytest.raises(ValueError, match=msg): + date_range(start=pd.NaT, end="2016-01-01", freq="D") + + def test_date_range_multiplication_overflow(self): + # GH#24255 + # check that overflows in calculating `addend = periods * stride` + # are caught + with tm.assert_produces_warning(None): + # we should _not_ be seeing a overflow RuntimeWarning + dti = date_range(start="1677-09-22", periods=213503, freq="D") + + assert dti[0] == Timestamp("1677-09-22") + assert len(dti) == 213503 + + msg = "Cannot generate range with" + with pytest.raises(OutOfBoundsDatetime, match=msg): + date_range("1969-05-04", periods=200000000, freq="30000D") + + def test_date_range_unsigned_overflow_handling(self): + # GH#24255 + # case where `addend = periods * stride` overflows int64 bounds + # but not uint64 bounds + dti = date_range(start="1677-09-22", end="2262-04-11", freq="D") + + dti2 = date_range(start=dti[0], periods=len(dti), freq="D") + assert dti2.equals(dti) + + dti3 = date_range(end=dti[-1], periods=len(dti), freq="D") + assert dti3.equals(dti) + + def test_date_range_int64_overflow_non_recoverable(self): + # GH#24255 + # case with start later than 1970-01-01, overflow int64 but not uint64 + msg = "Cannot generate range with" + with pytest.raises(OutOfBoundsDatetime, match=msg): + date_range(start="1970-02-01", periods=106752 * 24, freq="H") + + # case with end before 1970-01-01, overflow int64 but not uint64 + with pytest.raises(OutOfBoundsDatetime, match=msg): + date_range(end="1969-11-14", periods=106752 * 24, freq="H") + + @pytest.mark.slow + @pytest.mark.parametrize( + "s_ts, e_ts", [("2262-02-23", "1969-11-14"), ("1970-02-01", "1677-10-22")] + ) + def test_date_range_int64_overflow_stride_endpoint_different_signs( + self, s_ts, e_ts + ): + # cases where stride * periods overflow int64 and stride/endpoint + # have different signs + start = Timestamp(s_ts) + end = Timestamp(e_ts) + + expected = date_range(start=start, end=end, freq="-1H") + assert expected[0] == start + assert expected[-1] == end + + dti = date_range(end=end, periods=len(expected), freq="-1H") + tm.assert_index_equal(dti, expected) + + def test_date_range_out_of_bounds(self): + # GH#14187 + msg = "Cannot generate range" + with pytest.raises(OutOfBoundsDatetime, match=msg): + date_range("2016-01-01", periods=100000, freq="D") + with pytest.raises(OutOfBoundsDatetime, match=msg): + date_range(end="1763-10-12", periods=100000, freq="D") + + def test_date_range_gen_error(self): + rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min") + assert len(rng) == 4 + + @pytest.mark.parametrize("freq", ["AS", "YS"]) + def test_begin_year_alias(self, freq): + # see gh-9313 + rng = date_range("1/1/2013", "7/1/2017", freq=freq) + exp = DatetimeIndex( + ["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"], + freq=freq, + ) + tm.assert_index_equal(rng, exp) + + @pytest.mark.parametrize("freq", ["A", "Y"]) + def test_end_year_alias(self, freq): + # see gh-9313 + rng = date_range("1/1/2013", "7/1/2017", freq=freq) + exp = DatetimeIndex( + ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq + ) + tm.assert_index_equal(rng, exp) + + @pytest.mark.parametrize("freq", ["BA", "BY"]) + def test_business_end_year_alias(self, freq): + # see gh-9313 + rng = date_range("1/1/2013", "7/1/2017", freq=freq) + exp = DatetimeIndex( + ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq + ) + tm.assert_index_equal(rng, exp) + + def test_date_range_negative_freq(self): + # GH 11018 + rng = date_range("2011-12-31", freq="-2A", periods=3) + exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A") + tm.assert_index_equal(rng, exp) + assert rng.freq == "-2A" + + rng = date_range("2011-01-31", freq="-2M", periods=3) + exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M") + tm.assert_index_equal(rng, exp) + assert rng.freq == "-2M" + + def test_date_range_bms_bug(self): + # #1645 + rng = date_range("1/1/2000", periods=10, freq="BMS") + + ex_first = Timestamp("2000-01-03") + assert rng[0] == ex_first + + def test_date_range_normalize(self): + snap = datetime.today() + n = 50 + + rng = date_range(snap, periods=n, normalize=False, freq="2D") + + offset = timedelta(2) + values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset) + + tm.assert_index_equal(rng, values) + + rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B") + the_time = time(8, 15) + for val in rng: + assert val.time() == the_time + + def test_date_range_fy5252(self): + dr = date_range( + start="2013-01-01", + periods=2, + freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"), + ) + assert dr[0] == Timestamp("2013-01-31") + assert dr[1] == Timestamp("2014-01-30") + + def test_date_range_ambiguous_arguments(self): + # #2538 + start = datetime(2011, 1, 1, 5, 3, 40) + end = datetime(2011, 1, 1, 8, 9, 40) + + msg = ( + "Of the four parameters: start, end, periods, and " + "freq, exactly three must be specified" + ) + with pytest.raises(ValueError, match=msg): + date_range(start, end, periods=10, freq="s") + + def test_date_range_convenience_periods(self): + # GH 20808 + result = date_range("2018-04-24", "2018-04-27", periods=3) + expected = DatetimeIndex( + ["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"], + freq=None, + ) + + tm.assert_index_equal(result, expected) + + # Test if spacing remains linear if tz changes to dst in range + result = date_range( + "2018-04-01 01:00:00", + "2018-04-01 04:00:00", + tz="Australia/Sydney", + periods=3, + ) + expected = DatetimeIndex( + [ + Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"), + Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"), + Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"), + ] + ) + tm.assert_index_equal(result, expected) + + def test_date_range_index_comparison(self): + rng = date_range("2011-01-01", periods=3, tz="US/Eastern") + df = Series(rng).to_frame() + arr = np.array([rng.to_list()]).T + arr2 = np.array([rng]).T + + with pytest.raises(ValueError, match="Unable to coerce to Series"): + rng == df + + with pytest.raises(ValueError, match="Unable to coerce to Series"): + df == rng + + expected = DataFrame([True, True, True]) + + results = df == arr2 + tm.assert_frame_equal(results, expected) + + expected = Series([True, True, True], name=0) + + results = df[0] == arr2[:, 0] + tm.assert_series_equal(results, expected) + + expected = np.array( + [[True, False, False], [False, True, False], [False, False, True]] + ) + results = rng == arr + tm.assert_numpy_array_equal(results, expected) + + @pytest.mark.parametrize( + "start,end,result_tz", + [ + ["20180101", "20180103", "US/Eastern"], + [datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"], + [Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"], + [ + Timestamp("20180101", tz="US/Eastern"), + Timestamp("20180103", tz="US/Eastern"), + "US/Eastern", + ], + [ + Timestamp("20180101", tz="US/Eastern"), + Timestamp("20180103", tz="US/Eastern"), + None, + ], + ], + ) + def test_date_range_linspacing_tz(self, start, end, result_tz): + # GH 20983 + result = date_range(start, end, periods=3, tz=result_tz) + expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern") + tm.assert_index_equal(result, expected) + + def test_date_range_businesshour(self): + idx = DatetimeIndex( + [ + "2014-07-04 09:00", + "2014-07-04 10:00", + "2014-07-04 11:00", + "2014-07-04 12:00", + "2014-07-04 13:00", + "2014-07-04 14:00", + "2014-07-04 15:00", + "2014-07-04 16:00", + ], + freq="BH", + ) + rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH") + tm.assert_index_equal(idx, rng) + + idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH") + rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH") + tm.assert_index_equal(idx, rng) + + idx = DatetimeIndex( + [ + "2014-07-04 09:00", + "2014-07-04 10:00", + "2014-07-04 11:00", + "2014-07-04 12:00", + "2014-07-04 13:00", + "2014-07-04 14:00", + "2014-07-04 15:00", + "2014-07-04 16:00", + "2014-07-07 09:00", + "2014-07-07 10:00", + "2014-07-07 11:00", + "2014-07-07 12:00", + "2014-07-07 13:00", + "2014-07-07 14:00", + "2014-07-07 15:00", + "2014-07-07 16:00", + "2014-07-08 09:00", + "2014-07-08 10:00", + "2014-07-08 11:00", + "2014-07-08 12:00", + "2014-07-08 13:00", + "2014-07-08 14:00", + "2014-07-08 15:00", + "2014-07-08 16:00", + ], + freq="BH", + ) + rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH") + tm.assert_index_equal(idx, rng) + + def test_date_range_timedelta(self): + start = "2020-01-01" + end = "2020-01-11" + rng1 = date_range(start, end, freq="3D") + rng2 = date_range(start, end, freq=timedelta(days=3)) + tm.assert_index_equal(rng1, rng2) + + def test_range_misspecified(self): + # GH #1095 + msg = ( + "Of the four parameters: start, end, periods, and " + "freq, exactly three must be specified" + ) + + with pytest.raises(ValueError, match=msg): + date_range(start="1/1/2000") + + with pytest.raises(ValueError, match=msg): + date_range(end="1/1/2000") + + with pytest.raises(ValueError, match=msg): + date_range(periods=10) + + with pytest.raises(ValueError, match=msg): + date_range(start="1/1/2000", freq="H") + + with pytest.raises(ValueError, match=msg): + date_range(end="1/1/2000", freq="H") + + with pytest.raises(ValueError, match=msg): + date_range(periods=10, freq="H") + + with pytest.raises(ValueError, match=msg): + date_range() + + def test_compat_replace(self): + # https://github.com/statsmodels/statsmodels/issues/3349 + # replace should take ints/longs for compat + result = date_range(Timestamp("1960-04-01 00:00:00"), periods=76, freq="QS-JAN") + assert len(result) == 76 + + def test_catch_infinite_loop(self): + offset = offsets.DateOffset(minute=5) + # blow up, don't loop forever + msg = "Offset did not increment date" + with pytest.raises(ValueError, match=msg): + date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset) + + @pytest.mark.parametrize("periods", (1, 2)) + def test_wom_len(self, periods): + # https://github.com/pandas-dev/pandas/issues/20517 + res = date_range(start="20110101", periods=periods, freq="WOM-1MON") + assert len(res) == periods + + def test_construct_over_dst(self): + # GH 20854 + pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize( + "US/Pacific", ambiguous=True + ) + pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize( + "US/Pacific", ambiguous=False + ) + expect_data = [ + Timestamp("2010-11-07 00:00:00", tz="US/Pacific"), + pre_dst, + pst_dst, + ] + expected = DatetimeIndex(expect_data, freq="H") + result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific") + tm.assert_index_equal(result, expected) + + def test_construct_with_different_start_end_string_format(self): + # GH 12064 + result = date_range( + "2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H" + ) + expected = DatetimeIndex( + [ + Timestamp("2013-01-01 00:00:00+09:00"), + Timestamp("2013-01-01 01:00:00+09:00"), + Timestamp("2013-01-01 02:00:00+09:00"), + ], + freq="H", + ) + tm.assert_index_equal(result, expected) + + def test_error_with_zero_monthends(self): + msg = r"Offset <0 \* MonthEnds> did not increment date" + with pytest.raises(ValueError, match=msg): + date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0)) + + def test_range_bug(self): + # GH #770 + offset = DateOffset(months=3) + result = date_range("2011-1-1", "2012-1-31", freq=offset) + + start = datetime(2011, 1, 1) + expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset) + tm.assert_index_equal(result, expected) + + def test_range_tz_pytz(self): + # see gh-2906 + tz = timezone("US/Eastern") + start = tz.localize(datetime(2011, 1, 1)) + end = tz.localize(datetime(2011, 1, 3)) + + dr = date_range(start=start, periods=3) + assert dr.tz.zone == tz.zone + assert dr[0] == start + assert dr[2] == end + + dr = date_range(end=end, periods=3) + assert dr.tz.zone == tz.zone + assert dr[0] == start + assert dr[2] == end + + dr = date_range(start=start, end=end) + assert dr.tz.zone == tz.zone + assert dr[0] == start + assert dr[2] == end + + @pytest.mark.parametrize( + "start, end", + [ + [ + Timestamp(datetime(2014, 3, 6), tz="US/Eastern"), + Timestamp(datetime(2014, 3, 12), tz="US/Eastern"), + ], + [ + Timestamp(datetime(2013, 11, 1), tz="US/Eastern"), + Timestamp(datetime(2013, 11, 6), tz="US/Eastern"), + ], + ], + ) + def test_range_tz_dst_straddle_pytz(self, start, end): + dr = date_range(start, end, freq="D") + assert dr[0] == start + assert dr[-1] == end + assert np.all(dr.hour == 0) + + dr = date_range(start, end, freq="D", tz="US/Eastern") + assert dr[0] == start + assert dr[-1] == end + assert np.all(dr.hour == 0) + + dr = date_range( + start.replace(tzinfo=None), + end.replace(tzinfo=None), + freq="D", + tz="US/Eastern", + ) + assert dr[0] == start + assert dr[-1] == end + assert np.all(dr.hour == 0) + + def test_range_tz_dateutil(self): + # see gh-2906 + + # Use maybe_get_tz to fix filename in tz under dateutil. + from pandas._libs.tslibs.timezones import maybe_get_tz + + tz = lambda x: maybe_get_tz("dateutil/" + x) + + start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern")) + end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern")) + + dr = date_range(start=start, periods=3) + assert dr.tz == tz("US/Eastern") + assert dr[0] == start + assert dr[2] == end + + dr = date_range(end=end, periods=3) + assert dr.tz == tz("US/Eastern") + assert dr[0] == start + assert dr[2] == end + + dr = date_range(start=start, end=end) + assert dr.tz == tz("US/Eastern") + assert dr[0] == start + assert dr[2] == end + + @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) + def test_range_closed(self, freq, inclusive_endpoints_fixture): + begin = datetime(2011, 1, 1) + end = datetime(2014, 1, 1) + + result_range = date_range( + begin, end, inclusive=inclusive_endpoints_fixture, freq=freq + ) + both_range = date_range(begin, end, inclusive="both", freq=freq) + expected_range = _get_expected_range( + begin, end, both_range, inclusive_endpoints_fixture + ) + + tm.assert_index_equal(expected_range, result_range) + + @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) + def test_range_closed_with_tz_aware_start_end( + self, freq, inclusive_endpoints_fixture + ): + # GH12409, GH12684 + begin = Timestamp("2011/1/1", tz="US/Eastern") + end = Timestamp("2014/1/1", tz="US/Eastern") + + result_range = date_range( + begin, end, inclusive=inclusive_endpoints_fixture, freq=freq + ) + both_range = date_range(begin, end, inclusive="both", freq=freq) + expected_range = _get_expected_range( + begin, + end, + both_range, + inclusive_endpoints_fixture, + ) + + tm.assert_index_equal(expected_range, result_range) + + @pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"]) + def test_range_with_tz_closed_with_tz_aware_start_end( + self, freq, inclusive_endpoints_fixture + ): + begin = Timestamp("2011/1/1") + end = Timestamp("2014/1/1") + begintz = Timestamp("2011/1/1", tz="US/Eastern") + endtz = Timestamp("2014/1/1", tz="US/Eastern") + + result_range = date_range( + begin, + end, + inclusive=inclusive_endpoints_fixture, + freq=freq, + tz="US/Eastern", + ) + both_range = date_range( + begin, end, inclusive="both", freq=freq, tz="US/Eastern" + ) + expected_range = _get_expected_range( + begintz, + endtz, + both_range, + inclusive_endpoints_fixture, + ) + + tm.assert_index_equal(expected_range, result_range) + + def test_range_closed_boundary(self, inclusive_endpoints_fixture): + # GH#11804 + right_boundary = date_range( + "2015-09-12", + "2015-12-01", + freq="QS-MAR", + inclusive=inclusive_endpoints_fixture, + ) + left_boundary = date_range( + "2015-09-01", + "2015-09-12", + freq="QS-MAR", + inclusive=inclusive_endpoints_fixture, + ) + both_boundary = date_range( + "2015-09-01", + "2015-12-01", + freq="QS-MAR", + inclusive=inclusive_endpoints_fixture, + ) + neither_boundary = date_range( + "2015-09-11", + "2015-09-12", + freq="QS-MAR", + inclusive=inclusive_endpoints_fixture, + ) + + expected_right = both_boundary + expected_left = both_boundary + expected_both = both_boundary + + if inclusive_endpoints_fixture == "right": + expected_left = both_boundary[1:] + elif inclusive_endpoints_fixture == "left": + expected_right = both_boundary[:-1] + elif inclusive_endpoints_fixture == "both": + expected_right = both_boundary[1:] + expected_left = both_boundary[:-1] + + expected_neither = both_boundary[1:-1] + + tm.assert_index_equal(right_boundary, expected_right) + tm.assert_index_equal(left_boundary, expected_left) + tm.assert_index_equal(both_boundary, expected_both) + tm.assert_index_equal(neither_boundary, expected_neither) + + def test_years_only(self): + # GH 6961 + dr = date_range("2014", "2015", freq="M") + assert dr[0] == datetime(2014, 1, 31) + assert dr[-1] == datetime(2014, 12, 31) + + def test_freq_divides_end_in_nanos(self): + # GH 10885 + result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min") + result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min") + expected_1 = DatetimeIndex( + ["2005-01-12 10:00:00", "2005-01-12 15:45:00"], + dtype="datetime64[ns]", + freq="345T", + tz=None, + ) + expected_2 = DatetimeIndex( + ["2005-01-13 10:00:00", "2005-01-13 15:45:00"], + dtype="datetime64[ns]", + freq="345T", + tz=None, + ) + tm.assert_index_equal(result_1, expected_1) + tm.assert_index_equal(result_2, expected_2) + + def test_cached_range_bug(self): + rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6)) + assert len(rng) == 50 + assert rng[0] == datetime(2010, 9, 1, 5) + + def test_timezone_comparison_bug(self): + # smoke test + start = Timestamp("20130220 10:00", tz="US/Eastern") + result = date_range(start, periods=2, tz="US/Eastern") + assert len(result) == 2 + + def test_timezone_comparison_assert(self): + start = Timestamp("20130220 10:00", tz="US/Eastern") + msg = "Inferred time zone not equal to passed time zone" + with pytest.raises(AssertionError, match=msg): + date_range(start, periods=2, tz="Europe/Berlin") + + def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture): + # GH 23270 + tz = tz_aware_fixture + result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz) + expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[ + ::-1 + ] + tm.assert_index_equal(result, expected) + + def test_range_where_start_equal_end(self, inclusive_endpoints_fixture): + # GH 43394 + start = "2021-09-02" + end = "2021-09-02" + result = date_range( + start=start, end=end, freq="D", inclusive=inclusive_endpoints_fixture + ) + + both_range = date_range(start=start, end=end, freq="D", inclusive="both") + if inclusive_endpoints_fixture == "neither": + expected = both_range[1:-1] + elif inclusive_endpoints_fixture in ("left", "right", "both"): + expected = both_range[:] + + tm.assert_index_equal(result, expected) + + def test_freq_dateoffset_with_relateivedelta_nanos(self): + # GH 46877 + freq = DateOffset(hours=10, days=57, nanoseconds=3) + result = date_range(end="1970-01-01 00:00:00", periods=10, freq=freq, name="a") + expected = DatetimeIndex( + [ + "1968-08-02T05:59:59.999999973", + "1968-09-28T15:59:59.999999976", + "1968-11-25T01:59:59.999999979", + "1969-01-21T11:59:59.999999982", + "1969-03-19T21:59:59.999999985", + "1969-05-16T07:59:59.999999988", + "1969-07-12T17:59:59.999999991", + "1969-09-08T03:59:59.999999994", + "1969-11-04T13:59:59.999999997", + "1970-01-01T00:00:00.000000000", + ], + name="a", + ) + tm.assert_index_equal(result, expected) + + +class TestDateRangeTZ: + """Tests for date_range with timezones""" + + def test_hongkong_tz_convert(self): + # GH#1673 smoke test + dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong") + + # it works! + dr.hour + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_date_range_span_dst_transition(self, tzstr): + # GH#1778 + + # Standard -> Daylight Savings Time + dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern") + + assert (dr.hour == 0).all() + + dr = date_range("2012-11-02", periods=10, tz=tzstr) + result = dr.hour + expected = pd.Index([0] * 10, dtype="int32") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_date_range_timezone_str_argument(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + result = date_range("1/1/2000", periods=10, tz=tzstr) + expected = date_range("1/1/2000", periods=10, tz=tz) + + tm.assert_index_equal(result, expected) + + def test_date_range_with_fixedoffset_noname(self): + from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name + + off = fixed_off_no_name + start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) + end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) + rng = date_range(start=start, end=end) + assert off == rng.tz + + idx = pd.Index([start, end]) + assert off == idx.tz + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_date_range_with_tz(self, tzstr): + stamp = Timestamp("3/11/2012 05:00", tz=tzstr) + assert stamp.hour == 5 + + rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr) + + assert stamp == rng[1] + + +class TestGenRangeGeneration: + def test_generate(self): + rng1 = list(generate_range(START, END, periods=None, offset=BDay(), unit="ns")) + rng2 = list(generate_range(START, END, periods=None, offset="B", unit="ns")) + assert rng1 == rng2 + + def test_generate_cday(self): + rng1 = list(generate_range(START, END, periods=None, offset=CDay(), unit="ns")) + rng2 = list(generate_range(START, END, periods=None, offset="C", unit="ns")) + assert rng1 == rng2 + + def test_1(self): + rng = list( + generate_range( + start=datetime(2009, 3, 25), + end=None, + periods=2, + offset=BDay(), + unit="ns", + ) + ) + expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)] + assert rng == expected + + def test_2(self): + rng = list( + generate_range( + start=datetime(2008, 1, 1), + end=datetime(2008, 1, 3), + periods=None, + offset=BDay(), + unit="ns", + ) + ) + expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)] + assert rng == expected + + def test_3(self): + rng = list( + generate_range( + start=datetime(2008, 1, 5), + end=datetime(2008, 1, 6), + periods=None, + offset=BDay(), + unit="ns", + ) + ) + expected = [] + assert rng == expected + + def test_precision_finer_than_offset(self): + # GH#9907 + result1 = date_range( + start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q" + ) + result2 = date_range( + start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W" + ) + expected1_list = [ + "2015-06-30 00:00:03", + "2015-09-30 00:00:03", + "2015-12-31 00:00:03", + "2016-03-31 00:00:03", + ] + expected2_list = [ + "2015-04-19 00:00:03", + "2015-04-26 00:00:03", + "2015-05-03 00:00:03", + "2015-05-10 00:00:03", + "2015-05-17 00:00:03", + "2015-05-24 00:00:03", + "2015-05-31 00:00:03", + "2015-06-07 00:00:03", + "2015-06-14 00:00:03", + "2015-06-21 00:00:03", + ] + expected1 = DatetimeIndex( + expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None + ) + expected2 = DatetimeIndex( + expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None + ) + tm.assert_index_equal(result1, expected1) + tm.assert_index_equal(result2, expected2) + + dt1, dt2 = "2017-01-01", "2017-01-01" + tz1, tz2 = "US/Eastern", "Europe/London" + + @pytest.mark.parametrize( + "start,end", + [ + (Timestamp(dt1, tz=tz1), Timestamp(dt2)), + (Timestamp(dt1), Timestamp(dt2, tz=tz2)), + (Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)), + (Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)), + ], + ) + def test_mismatching_tz_raises_err(self, start, end): + # issue 18488 + msg = "Start and end cannot both be tz-aware with different timezones" + with pytest.raises(TypeError, match=msg): + date_range(start, end) + with pytest.raises(TypeError, match=msg): + date_range(start, end, freq=BDay()) + + +class TestBusinessDateRange: + def test_constructor(self): + bdate_range(START, END, freq=BDay()) + bdate_range(START, periods=20, freq=BDay()) + bdate_range(end=START, periods=20, freq=BDay()) + + msg = "periods must be a number, got B" + with pytest.raises(TypeError, match=msg): + date_range("2011-1-1", "2012-1-1", "B") + + with pytest.raises(TypeError, match=msg): + bdate_range("2011-1-1", "2012-1-1", "B") + + msg = "freq must be specified for bdate_range; use date_range instead" + with pytest.raises(TypeError, match=msg): + bdate_range(START, END, periods=10, freq=None) + + def test_misc(self): + end = datetime(2009, 5, 13) + dr = bdate_range(end=end, periods=20) + firstDate = end - 19 * BDay() + + assert len(dr) == 20 + assert dr[0] == firstDate + assert dr[-1] == end + + def test_date_parse_failure(self): + badly_formed_date = "2007/100/1" + + msg = "Unknown datetime string format, unable to parse: 2007/100/1" + with pytest.raises(ValueError, match=msg): + Timestamp(badly_formed_date) + + with pytest.raises(ValueError, match=msg): + bdate_range(start=badly_formed_date, periods=10) + + with pytest.raises(ValueError, match=msg): + bdate_range(end=badly_formed_date, periods=10) + + with pytest.raises(ValueError, match=msg): + bdate_range(badly_formed_date, badly_formed_date) + + def test_daterange_bug_456(self): + # GH #456 + rng1 = bdate_range("12/5/2011", "12/5/2011") + rng2 = bdate_range("12/2/2011", "12/5/2011") + assert rng2._data.freq == BDay() + + result = rng1.union(rng2) + assert isinstance(result, DatetimeIndex) + + @pytest.mark.parametrize("inclusive", ["left", "right", "neither", "both"]) + def test_bdays_and_open_boundaries(self, inclusive): + # GH 6673 + start = "2018-07-21" # Saturday + end = "2018-07-29" # Sunday + result = date_range(start, end, freq="B", inclusive=inclusive) + + bday_start = "2018-07-23" # Monday + bday_end = "2018-07-27" # Friday + expected = date_range(bday_start, bday_end, freq="D") + tm.assert_index_equal(result, expected) + # Note: we do _not_ expect the freqs to match here + + def test_bday_near_overflow(self): + # GH#24252 avoid doing unnecessary addition that _would_ overflow + start = Timestamp.max.floor("D").to_pydatetime() + rng = date_range(start, end=None, periods=1, freq="B") + expected = DatetimeIndex([start], freq="B") + tm.assert_index_equal(rng, expected) + + def test_bday_overflow_error(self): + # GH#24252 check that we get OutOfBoundsDatetime and not OverflowError + msg = "Out of bounds nanosecond timestamp" + start = Timestamp.max.floor("D").to_pydatetime() + with pytest.raises(OutOfBoundsDatetime, match=msg): + date_range(start, periods=2, freq="B") + + +class TestCustomDateRange: + def test_constructor(self): + bdate_range(START, END, freq=CDay()) + bdate_range(START, periods=20, freq=CDay()) + bdate_range(end=START, periods=20, freq=CDay()) + + msg = "periods must be a number, got C" + with pytest.raises(TypeError, match=msg): + date_range("2011-1-1", "2012-1-1", "C") + + with pytest.raises(TypeError, match=msg): + bdate_range("2011-1-1", "2012-1-1", "C") + + def test_misc(self): + end = datetime(2009, 5, 13) + dr = bdate_range(end=end, periods=20, freq="C") + firstDate = end - 19 * CDay() + + assert len(dr) == 20 + assert dr[0] == firstDate + assert dr[-1] == end + + def test_daterange_bug_456(self): + # GH #456 + rng1 = bdate_range("12/5/2011", "12/5/2011", freq="C") + rng2 = bdate_range("12/2/2011", "12/5/2011", freq="C") + assert rng2._data.freq == CDay() + + result = rng1.union(rng2) + assert isinstance(result, DatetimeIndex) + + def test_cdaterange(self): + result = bdate_range("2013-05-01", periods=3, freq="C") + expected = DatetimeIndex(["2013-05-01", "2013-05-02", "2013-05-03"], freq="C") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_cdaterange_weekmask(self): + result = bdate_range( + "2013-05-01", periods=3, freq="C", weekmask="Sun Mon Tue Wed Thu" + ) + expected = DatetimeIndex( + ["2013-05-01", "2013-05-02", "2013-05-05"], freq=result.freq + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + # raise with non-custom freq + msg = ( + "a custom frequency string is required when holidays or " + "weekmask are passed, got frequency B" + ) + with pytest.raises(ValueError, match=msg): + bdate_range("2013-05-01", periods=3, weekmask="Sun Mon Tue Wed Thu") + + def test_cdaterange_holidays(self): + result = bdate_range("2013-05-01", periods=3, freq="C", holidays=["2013-05-01"]) + expected = DatetimeIndex( + ["2013-05-02", "2013-05-03", "2013-05-06"], freq=result.freq + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + # raise with non-custom freq + msg = ( + "a custom frequency string is required when holidays or " + "weekmask are passed, got frequency B" + ) + with pytest.raises(ValueError, match=msg): + bdate_range("2013-05-01", periods=3, holidays=["2013-05-01"]) + + def test_cdaterange_weekmask_and_holidays(self): + result = bdate_range( + "2013-05-01", + periods=3, + freq="C", + weekmask="Sun Mon Tue Wed Thu", + holidays=["2013-05-01"], + ) + expected = DatetimeIndex( + ["2013-05-02", "2013-05-05", "2013-05-06"], freq=result.freq + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + # raise with non-custom freq + msg = ( + "a custom frequency string is required when holidays or " + "weekmask are passed, got frequency B" + ) + with pytest.raises(ValueError, match=msg): + bdate_range( + "2013-05-01", + periods=3, + weekmask="Sun Mon Tue Wed Thu", + holidays=["2013-05-01"], + ) + + @pytest.mark.parametrize( + "freq", [freq for freq in prefix_mapping if freq.startswith("C")] + ) + def test_all_custom_freq(self, freq): + # should not raise + bdate_range( + START, END, freq=freq, weekmask="Mon Wed Fri", holidays=["2009-03-14"] + ) + + bad_freq = freq + "FOO" + msg = f"invalid custom frequency string: {bad_freq}" + with pytest.raises(ValueError, match=msg): + bdate_range(START, END, freq=bad_freq) + + @pytest.mark.parametrize( + "start_end", + [ + ("2018-01-01T00:00:01.000Z", "2018-01-03T00:00:01.000Z"), + ("2018-01-01T00:00:00.010Z", "2018-01-03T00:00:00.010Z"), + ("2001-01-01T00:00:00.010Z", "2001-01-03T00:00:00.010Z"), + ], + ) + def test_range_with_millisecond_resolution(self, start_end): + # https://github.com/pandas-dev/pandas/issues/24110 + start, end = start_end + result = date_range(start=start, end=end, periods=2, inclusive="left") + expected = DatetimeIndex([start]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "start,period,expected", + [ + ("2022-07-23 00:00:00+02:00", 1, ["2022-07-25 00:00:00+02:00"]), + ("2022-07-22 00:00:00+02:00", 1, ["2022-07-22 00:00:00+02:00"]), + ( + "2022-07-22 00:00:00+02:00", + 2, + ["2022-07-22 00:00:00+02:00", "2022-07-25 00:00:00+02:00"], + ), + ], + ) + def test_range_with_timezone_and_custombusinessday(self, start, period, expected): + # GH49441 + result = date_range(start=start, periods=period, freq="C") + expected = DatetimeIndex(expected) + tm.assert_index_equal(result, expected) + + +def test_date_range_with_custom_holidays(): + # GH 30593 + freq = offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"]) + result = date_range(start="2020-11-25 15:00", periods=4, freq=freq) + expected = DatetimeIndex( + [ + "2020-11-25 15:00:00", + "2020-11-25 16:00:00", + "2020-11-27 15:00:00", + "2020-11-27 16:00:00", + ], + freq=freq, + ) + tm.assert_index_equal(result, expected) + + +class TestDateRangeNonNano: + def test_date_range_reso_validation(self): + msg = "'unit' must be one of 's', 'ms', 'us', 'ns'" + with pytest.raises(ValueError, match=msg): + date_range("2016-01-01", "2016-03-04", periods=3, unit="h") + + def test_date_range_freq_higher_than_reso(self): + # freq being higher-resolution than reso is a problem + msg = "Use a lower freq or a higher unit instead" + with pytest.raises(ValueError, match=msg): + # # TODO give a more useful or informative message? + date_range("2016-01-01", "2016-01-02", freq="ns", unit="ms") + + def test_date_range_freq_matches_reso(self): + # GH#49106 matching reso is OK + dti = date_range("2016-01-01", "2016-01-01 00:00:01", freq="ms", unit="ms") + rng = np.arange(1_451_606_400_000, 1_451_606_401_001, dtype=np.int64) + expected = DatetimeIndex(rng.view("M8[ms]"), freq="ms") + tm.assert_index_equal(dti, expected) + + dti = date_range("2016-01-01", "2016-01-01 00:00:01", freq="us", unit="us") + rng = np.arange(1_451_606_400_000_000, 1_451_606_401_000_001, dtype=np.int64) + expected = DatetimeIndex(rng.view("M8[us]"), freq="us") + tm.assert_index_equal(dti, expected) + + dti = date_range("2016-01-01", "2016-01-01 00:00:00.001", freq="ns", unit="ns") + rng = np.arange( + 1_451_606_400_000_000_000, 1_451_606_400_001_000_001, dtype=np.int64 + ) + expected = DatetimeIndex(rng.view("M8[ns]"), freq="ns") + tm.assert_index_equal(dti, expected) + + def test_date_range_freq_lower_than_endpoints(self): + start = Timestamp("2022-10-19 11:50:44.719781") + end = Timestamp("2022-10-19 11:50:47.066458") + + # start and end cannot be cast to "s" unit without lossy rounding, + # so we do not allow this in date_range + with pytest.raises(ValueError, match="Cannot losslessly convert units"): + date_range(start, end, periods=3, unit="s") + + # but we can losslessly cast to "us" + dti = date_range(start, end, periods=2, unit="us") + rng = np.array( + [start.as_unit("us")._value, end.as_unit("us")._value], dtype=np.int64 + ) + expected = DatetimeIndex(rng.view("M8[us]")) + tm.assert_index_equal(dti, expected) + + def test_date_range_non_nano(self): + start = np.datetime64("1066-10-14") # Battle of Hastings + end = np.datetime64("2305-07-13") # Jean-Luc Picard's birthday + + dti = date_range(start, end, freq="D", unit="s") + assert dti.freq == "D" + assert dti.dtype == "M8[s]" + + exp = np.arange( + start.astype("M8[s]").view("i8"), + (end + 1).astype("M8[s]").view("i8"), + 24 * 3600, + ).view("M8[s]") + + tm.assert_numpy_array_equal(dti.to_numpy(), exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetime.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetime.py new file mode 100644 index 00000000..e5e6d99c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetime.py @@ -0,0 +1,203 @@ +from datetime import date + +import dateutil +import numpy as np +import pytest + +from pandas.compat.numpy import np_long + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Timestamp, + date_range, + offsets, +) +import pandas._testing as tm + + +class TestDatetimeIndex: + def test_sub_datetime_preserves_freq(self, tz_naive_fixture): + # GH#48818 + dti = date_range("2016-01-01", periods=12, tz=tz_naive_fixture) + + res = dti - dti[0] + expected = pd.timedelta_range("0 Days", "11 Days") + tm.assert_index_equal(res, expected) + assert res.freq == expected.freq + + @pytest.mark.xfail( + reason="The inherited freq is incorrect bc dti.freq is incorrect " + "https://github.com/pandas-dev/pandas/pull/48818/files#r982793461" + ) + def test_sub_datetime_preserves_freq_across_dst(self): + # GH#48818 + ts = Timestamp("2016-03-11", tz="US/Pacific") + dti = date_range(ts, periods=4) + + res = dti - dti[0] + expected = pd.TimedeltaIndex( + [ + pd.Timedelta(days=0), + pd.Timedelta(days=1), + pd.Timedelta(days=2), + pd.Timedelta(days=2, hours=23), + ] + ) + tm.assert_index_equal(res, expected) + assert res.freq == expected.freq + + def test_time_overflow_for_32bit_machines(self): + # GH8943. On some machines NumPy defaults to np.int32 (for example, + # 32-bit Linux machines). In the function _generate_regular_range + # found in tseries/index.py, `periods` gets multiplied by `strides` + # (which has value 1e9) and since the max value for np.int32 is ~2e9, + # and since those machines won't promote np.int32 to np.int64, we get + # overflow. + periods = np_long(1000) + + idx1 = date_range(start="2000", periods=periods, freq="S") + assert len(idx1) == periods + + idx2 = date_range(end="2000", periods=periods, freq="S") + assert len(idx2) == periods + + def test_nat(self): + assert DatetimeIndex([np.nan])[0] is pd.NaT + + def test_week_of_month_frequency(self): + # GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise + d1 = date(2002, 9, 1) + d2 = date(2013, 10, 27) + d3 = date(2012, 9, 30) + idx1 = DatetimeIndex([d1, d2]) + idx2 = DatetimeIndex([d3]) + result_append = idx1.append(idx2) + expected = DatetimeIndex([d1, d2, d3]) + tm.assert_index_equal(result_append, expected) + result_union = idx1.union(idx2) + expected = DatetimeIndex([d1, d3, d2]) + tm.assert_index_equal(result_union, expected) + + # GH 5115 + result = date_range("2013-1-1", periods=4, freq="WOM-1SAT") + dates = ["2013-01-05", "2013-02-02", "2013-03-02", "2013-04-06"] + expected = DatetimeIndex(dates, freq="WOM-1SAT") + tm.assert_index_equal(result, expected) + + def test_append_nondatetimeindex(self): + rng = date_range("1/1/2000", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.append(idx) + assert isinstance(result[0], Timestamp) + + def test_iteration_preserves_tz(self): + # see gh-8890 + index = date_range("2012-01-01", periods=3, freq="H", tz="US/Eastern") + + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result == expected + + index = date_range( + "2012-01-01", periods=3, freq="H", tz=dateutil.tz.tzoffset(None, -28800) + ) + + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result._repr_base == expected._repr_base + assert result == expected + + # 9100 + index = DatetimeIndex( + ["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"] + ) + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result._repr_base == expected._repr_base + assert result == expected + + @pytest.mark.parametrize("periods", [0, 9999, 10000, 10001]) + def test_iteration_over_chunksize(self, periods): + # GH21012 + + index = date_range("2000-01-01 00:00:00", periods=periods, freq="min") + num = 0 + for stamp in index: + assert index[num] == stamp + num += 1 + assert num == len(index) + + def test_misc_coverage(self): + rng = date_range("1/1/2000", periods=5) + result = rng.groupby(rng.day) + assert isinstance(next(iter(result.values()))[0], Timestamp) + + def test_groupby_function_tuple_1677(self): + df = DataFrame( + np.random.default_rng(2).random(100), + index=date_range("1/1/2000", periods=100), + ) + monthly_group = df.groupby(lambda x: (x.year, x.month)) + + result = monthly_group.mean() + assert isinstance(result.index[0], tuple) + + def assert_index_parameters(self, index): + assert index.freq == "40960N" + assert index.inferred_freq == "40960N" + + def test_ns_index(self): + nsamples = 400 + ns = int(1e9 / 24414) + dtstart = np.datetime64("2012-09-20T00:00:00") + + dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, "ns") + freq = ns * offsets.Nano() + index = DatetimeIndex(dt, freq=freq, name="time") + self.assert_index_parameters(index) + + new_index = date_range(start=index[0], end=index[-1], freq=index.freq) + self.assert_index_parameters(new_index) + + def test_asarray_tz_naive(self): + # This shouldn't produce a warning. + idx = date_range("2000", periods=2) + # M8[ns] by default + result = np.asarray(idx) + + expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + # optionally, object + result = np.asarray(idx, dtype=object) + + expected = np.array([Timestamp("2000-01-01"), Timestamp("2000-01-02")]) + tm.assert_numpy_array_equal(result, expected) + + def test_asarray_tz_aware(self): + tz = "US/Central" + idx = date_range("2000", periods=2, tz=tz) + expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]") + result = np.asarray(idx, dtype="datetime64[ns]") + + tm.assert_numpy_array_equal(result, expected) + + # Old behavior with no warning + result = np.asarray(idx, dtype="M8[ns]") + + tm.assert_numpy_array_equal(result, expected) + + # Future behavior with no warning + expected = np.array( + [Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)] + ) + result = np.asarray(idx, dtype=object) + + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetimelike.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetimelike.py new file mode 100644 index 00000000..a6bee20d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_datetimelike.py @@ -0,0 +1,10 @@ +""" generic tests from the Datetimelike class """ +from pandas import date_range + + +class TestDatetimeIndex: + def test_format(self): + # GH35439 + idx = date_range("20130101", periods=5) + expected = [f"{x:%Y-%m-%d}" for x in idx] + assert idx.format() == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_delete.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_delete.py new file mode 100644 index 00000000..e9de5a05 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_delete.py @@ -0,0 +1,138 @@ +import pytest + +from pandas import ( + DatetimeIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDelete: + def test_delete(self): + idx = date_range(start="2000-01-01", periods=5, freq="M", name="idx") + + # preserve freq + expected_0 = date_range(start="2000-02-01", periods=4, freq="M", name="idx") + expected_4 = date_range(start="2000-01-01", periods=4, freq="M", name="idx") + + # reset freq to None + expected_1 = DatetimeIndex( + ["2000-01-31", "2000-03-31", "2000-04-30", "2000-05-31"], + freq=None, + name="idx", + ) + + cases = { + 0: expected_0, + -5: expected_0, + -1: expected_4, + 4: expected_4, + 1: expected_1, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + with pytest.raises((IndexError, ValueError), match="out of bounds"): + # either depending on numpy version + idx.delete(5) + + for tz in [None, "Asia/Tokyo", "US/Pacific"]: + idx = date_range( + start="2000-01-01 09:00", periods=10, freq="H", name="idx", tz=tz + ) + + expected = date_range( + start="2000-01-01 10:00", periods=9, freq="H", name="idx", tz=tz + ) + result = idx.delete(0) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "H" + assert result.tz == expected.tz + + expected = date_range( + start="2000-01-01 09:00", periods=9, freq="H", name="idx", tz=tz + ) + result = idx.delete(-1) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "H" + assert result.tz == expected.tz + + def test_delete_slice(self): + idx = date_range(start="2000-01-01", periods=10, freq="D", name="idx") + + # preserve freq + expected_0_2 = date_range(start="2000-01-04", periods=7, freq="D", name="idx") + expected_7_9 = date_range(start="2000-01-01", periods=7, freq="D", name="idx") + + # reset freq to None + expected_3_5 = DatetimeIndex( + [ + "2000-01-01", + "2000-01-02", + "2000-01-03", + "2000-01-07", + "2000-01-08", + "2000-01-09", + "2000-01-10", + ], + freq=None, + name="idx", + ) + + cases = { + (0, 1, 2): expected_0_2, + (7, 8, 9): expected_7_9, + (3, 4, 5): expected_3_5, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + result = idx.delete(slice(n[0], n[-1] + 1)) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + for tz in [None, "Asia/Tokyo", "US/Pacific"]: + ts = Series( + 1, + index=date_range( + "2000-01-01 09:00", periods=10, freq="H", name="idx", tz=tz + ), + ) + # preserve freq + result = ts.drop(ts.index[:5]).index + expected = date_range( + "2000-01-01 14:00", periods=5, freq="H", name="idx", tz=tz + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + # reset freq to None + result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index + expected = DatetimeIndex( + [ + "2000-01-01 09:00", + "2000-01-01 11:00", + "2000-01-01 13:00", + "2000-01-01 15:00", + "2000-01-01 17:00", + ], + freq=None, + name="idx", + tz=tz, + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_formats.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_formats.py new file mode 100644 index 00000000..cb3e0179 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_formats.py @@ -0,0 +1,297 @@ +from datetime import datetime + +import dateutil.tz +import numpy as np +import pytest +import pytz + +import pandas as pd +from pandas import ( + DatetimeIndex, + Series, +) +import pandas._testing as tm + + +def test_format_native_types(): + index = pd.date_range(freq="1D", periods=3, start="2017-01-01") + + # First, with no arguments. + expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype=object) + + result = index._format_native_types() + tm.assert_numpy_array_equal(result, expected) + + # No NaN values, so na_rep has no effect + result = index._format_native_types(na_rep="pandas") + tm.assert_numpy_array_equal(result, expected) + + # Make sure date formatting works + expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype=object) + + result = index._format_native_types(date_format="%m-%Y-%d") + tm.assert_numpy_array_equal(result, expected) + + # NULL object handling should work + index = DatetimeIndex(["2017-01-01", pd.NaT, "2017-01-03"]) + expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object) + + result = index._format_native_types() + tm.assert_numpy_array_equal(result, expected) + + expected = np.array(["2017-01-01", "pandas", "2017-01-03"], dtype=object) + + result = index._format_native_types(na_rep="pandas") + tm.assert_numpy_array_equal(result, expected) + + result = index._format_native_types(date_format="%Y-%m-%d %H:%M:%S.%f") + expected = np.array( + ["2017-01-01 00:00:00.000000", "NaT", "2017-01-03 00:00:00.000000"], + dtype=object, + ) + tm.assert_numpy_array_equal(result, expected) + + # invalid format + result = index._format_native_types(date_format="foo") + expected = np.array(["foo", "NaT", "foo"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +class TestDatetimeIndexRendering: + def test_dti_repr_short(self): + dr = pd.date_range(start="1/1/2012", periods=1) + repr(dr) + + dr = pd.date_range(start="1/1/2012", periods=2) + repr(dr) + + dr = pd.date_range(start="1/1/2012", periods=3) + repr(dr) + + @pytest.mark.parametrize( + "dates, freq, expected_repr", + [ + ( + ["2012-01-01 00:00:00"], + "60T", + ( + "DatetimeIndex(['2012-01-01 00:00:00'], " + "dtype='datetime64[ns]', freq='60T')" + ), + ), + ( + ["2012-01-01 00:00:00", "2012-01-01 01:00:00"], + "60T", + "DatetimeIndex(['2012-01-01 00:00:00', '2012-01-01 01:00:00'], " + "dtype='datetime64[ns]', freq='60T')", + ), + ( + ["2012-01-01"], + "24H", + "DatetimeIndex(['2012-01-01'], dtype='datetime64[ns]', freq='24H')", + ), + ], + ) + def test_dti_repr_time_midnight(self, dates, freq, expected_repr): + # GH53634 + dti = DatetimeIndex(dates, freq) + actual_repr = repr(dti) + assert actual_repr == expected_repr + + @pytest.mark.parametrize("method", ["__repr__", "__str__"]) + def test_dti_representation(self, method): + idxs = [] + idxs.append(DatetimeIndex([], freq="D")) + idxs.append(DatetimeIndex(["2011-01-01"], freq="D")) + idxs.append(DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")) + idxs.append(DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")) + idxs.append( + DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], + freq="H", + tz="Asia/Tokyo", + ) + ) + idxs.append( + DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="US/Eastern" + ) + ) + idxs.append( + DatetimeIndex(["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="UTC") + ) + + exp = [] + exp.append("DatetimeIndex([], dtype='datetime64[ns]', freq='D')") + exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')") + exp.append( + "DatetimeIndex(['2011-01-01', '2011-01-02'], " + "dtype='datetime64[ns]', freq='D')" + ) + exp.append( + "DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " + "dtype='datetime64[ns]', freq='D')" + ) + exp.append( + "DatetimeIndex(['2011-01-01 09:00:00+09:00', " + "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']" + ", dtype='datetime64[ns, Asia/Tokyo]', freq='H')" + ) + exp.append( + "DatetimeIndex(['2011-01-01 09:00:00-05:00', " + "'2011-01-01 10:00:00-05:00', 'NaT'], " + "dtype='datetime64[ns, US/Eastern]', freq=None)" + ) + exp.append( + "DatetimeIndex(['2011-01-01 09:00:00+00:00', " + "'2011-01-01 10:00:00+00:00', 'NaT'], " + "dtype='datetime64[ns, UTC]', freq=None)" + "" + ) + + with pd.option_context("display.width", 300): + for indx, expected in zip(idxs, exp): + result = getattr(indx, method)() + assert result == expected + + def test_dti_representation_to_series(self): + idx1 = DatetimeIndex([], freq="D") + idx2 = DatetimeIndex(["2011-01-01"], freq="D") + idx3 = DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D") + idx4 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D") + idx5 = DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], + freq="H", + tz="Asia/Tokyo", + ) + idx6 = DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="US/Eastern" + ) + idx7 = DatetimeIndex(["2011-01-01 09:00", "2011-01-02 10:15"]) + + exp1 = """Series([], dtype: datetime64[ns])""" + + exp2 = "0 2011-01-01\ndtype: datetime64[ns]" + + exp3 = "0 2011-01-01\n1 2011-01-02\ndtype: datetime64[ns]" + + exp4 = ( + "0 2011-01-01\n" + "1 2011-01-02\n" + "2 2011-01-03\n" + "dtype: datetime64[ns]" + ) + + exp5 = ( + "0 2011-01-01 09:00:00+09:00\n" + "1 2011-01-01 10:00:00+09:00\n" + "2 2011-01-01 11:00:00+09:00\n" + "dtype: datetime64[ns, Asia/Tokyo]" + ) + + exp6 = ( + "0 2011-01-01 09:00:00-05:00\n" + "1 2011-01-01 10:00:00-05:00\n" + "2 NaT\n" + "dtype: datetime64[ns, US/Eastern]" + ) + + exp7 = ( + "0 2011-01-01 09:00:00\n" + "1 2011-01-02 10:15:00\n" + "dtype: datetime64[ns]" + ) + + with pd.option_context("display.width", 300): + for idx, expected in zip( + [idx1, idx2, idx3, idx4, idx5, idx6, idx7], + [exp1, exp2, exp3, exp4, exp5, exp6, exp7], + ): + result = repr(Series(idx)) + assert result == expected + + def test_dti_summary(self): + # GH#9116 + idx1 = DatetimeIndex([], freq="D") + idx2 = DatetimeIndex(["2011-01-01"], freq="D") + idx3 = DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D") + idx4 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D") + idx5 = DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], + freq="H", + tz="Asia/Tokyo", + ) + idx6 = DatetimeIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="US/Eastern" + ) + + exp1 = "DatetimeIndex: 0 entries\nFreq: D" + + exp2 = "DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\nFreq: D" + + exp3 = "DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\nFreq: D" + + exp4 = "DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\nFreq: D" + + exp5 = ( + "DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 " + "to 2011-01-01 11:00:00+09:00\n" + "Freq: H" + ) + + exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT""" + + for idx, expected in zip( + [idx1, idx2, idx3, idx4, idx5, idx6], [exp1, exp2, exp3, exp4, exp5, exp6] + ): + result = idx._summary() + assert result == expected + + def test_dti_business_repr(self): + # only really care that it works + repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))) + + def test_dti_business_summary(self): + rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1)) + rng._summary() + rng[2:2]._summary() + + def test_dti_business_summary_pytz(self): + pd.bdate_range("1/1/2005", "1/1/2009", tz=pytz.utc)._summary() + + def test_dti_business_summary_dateutil(self): + pd.bdate_range("1/1/2005", "1/1/2009", tz=dateutil.tz.tzutc())._summary() + + def test_dti_custom_business_repr(self): + # only really care that it works + repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1), freq="C")) + + def test_dti_custom_business_summary(self): + rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1), freq="C") + rng._summary() + rng[2:2]._summary() + + def test_dti_custom_business_summary_pytz(self): + pd.bdate_range("1/1/2005", "1/1/2009", freq="C", tz=pytz.utc)._summary() + + def test_dti_custom_business_summary_dateutil(self): + pd.bdate_range( + "1/1/2005", "1/1/2009", freq="C", tz=dateutil.tz.tzutc() + )._summary() + + +class TestFormat: + def test_format_with_name_time_info(self): + # bug I fixed 12/20/2011 + dates = pd.date_range("2011-01-01 04:00:00", periods=10, name="something") + + formatted = dates.format(name=True) + assert formatted[0] == "something" + + def test_format_datetime_with_time(self): + dti = DatetimeIndex([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)]) + + result = dti.format() + expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"] + assert len(result) == 2 + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_freq_attr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_freq_attr.py new file mode 100644 index 00000000..f5821a31 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_freq_attr.py @@ -0,0 +1,61 @@ +import pytest + +from pandas import ( + DatetimeIndex, + date_range, +) + +from pandas.tseries.offsets import ( + BDay, + DateOffset, + Day, + Hour, +) + + +class TestFreq: + def test_freq_setter_errors(self): + # GH#20678 + idx = DatetimeIndex(["20180101", "20180103", "20180105"]) + + # setting with an incompatible freq + msg = ( + "Inferred frequency 2D from passed values does not conform to " + "passed frequency 5D" + ) + with pytest.raises(ValueError, match=msg): + idx._data.freq = "5D" + + # setting with non-freq string + with pytest.raises(ValueError, match="Invalid frequency"): + idx._data.freq = "foo" + + @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []]) + @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)]) + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_freq_setter(self, values, freq, tz): + # GH#20678 + idx = DatetimeIndex(values, tz=tz) + + # can set to an offset, converting from string if necessary + idx._data.freq = freq + assert idx.freq == freq + assert isinstance(idx.freq, DateOffset) + + # can reset to None + idx._data.freq = None + assert idx.freq is None + + def test_freq_view_safe(self): + # Setting the freq for one DatetimeIndex shouldn't alter the freq + # for another that views the same data + + dti = date_range("2016-01-01", periods=5) + dta = dti._data + + dti2 = DatetimeIndex(dta)._with_freq(None) + assert dti2.freq is None + + # Original was not altered + assert dti.freq == "D" + assert dta.freq == "D" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_indexing.py new file mode 100644 index 00000000..cfbf1a75 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_indexing.py @@ -0,0 +1,714 @@ +from datetime import ( + date, + datetime, + time, + timedelta, +) + +import numpy as np +import pytest + +from pandas.compat.numpy import np_long + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + Timestamp, + bdate_range, + date_range, + notna, +) +import pandas._testing as tm + +from pandas.tseries.frequencies import to_offset + +START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) + + +class TestGetItem: + def test_getitem_slice_keeps_name(self): + # GH4226 + st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles") + et = Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles") + dr = date_range(st, et, freq="H", name="timebucket") + assert dr[1:].name == dr.name + + def test_getitem(self): + idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx") + idx2 = date_range( + "2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx" + ) + + for idx in [idx1, idx2]: + result = idx[0] + assert result == Timestamp("2011-01-01", tz=idx.tz) + + result = idx[0:5] + expected = date_range( + "2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[0:10:2] + expected = date_range( + "2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[-20:-5:3] + expected = date_range( + "2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[4::-1] + expected = DatetimeIndex( + ["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"], + freq="-1D", + tz=idx.tz, + name="idx", + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_dti_business_getitem(self, freq): + rng = bdate_range(START, END, freq=freq) + smaller = rng[:5] + exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq=freq) + tm.assert_index_equal(smaller, exp) + assert smaller.freq == exp.freq + assert smaller.freq == rng.freq + + sliced = rng[::5] + assert sliced.freq == to_offset(freq) * 5 + + fancy_indexed = rng[[4, 3, 2, 1, 0]] + assert len(fancy_indexed) == 5 + assert isinstance(fancy_indexed, DatetimeIndex) + assert fancy_indexed.freq is None + + # 32-bit vs. 64-bit platforms + assert rng[4] == rng[np_long(4)] + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_dti_business_getitem_matplotlib_hackaround(self, freq): + rng = bdate_range(START, END, freq=freq) + with pytest.raises(ValueError, match="Multi-dimensional indexing"): + # GH#30588 multi-dimensional indexing deprecated + rng[:, None] + + def test_getitem_int_list(self): + dti = date_range(start="1/1/2005", end="12/1/2005", freq="M") + dti2 = dti[[1, 3, 5]] + + v1 = dti2[0] + v2 = dti2[1] + v3 = dti2[2] + + assert v1 == Timestamp("2/28/2005") + assert v2 == Timestamp("4/30/2005") + assert v3 == Timestamp("6/30/2005") + + # getitem with non-slice drops freq + assert dti2.freq is None + + +class TestWhere: + def test_where_doesnt_retain_freq(self): + dti = date_range("20130101", periods=3, freq="D", name="idx") + cond = [True, True, False] + expected = DatetimeIndex([dti[0], dti[1], dti[0]], freq=None, name="idx") + + result = dti.where(cond, dti[::-1]) + tm.assert_index_equal(result, expected) + + def test_where_other(self): + # other is ndarray or Index + i = date_range("20130101", periods=3, tz="US/Eastern") + + for arr in [np.nan, pd.NaT]: + result = i.where(notna(i), other=arr) + expected = i + tm.assert_index_equal(result, expected) + + i2 = i.copy() + i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist()) + result = i.where(notna(i2), i2) + tm.assert_index_equal(result, i2) + + i2 = i.copy() + i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist()) + result = i.where(notna(i2), i2._values) + tm.assert_index_equal(result, i2) + + def test_where_invalid_dtypes(self): + dti = date_range("20130101", periods=3, tz="US/Eastern") + + tail = dti[2:].tolist() + i2 = Index([pd.NaT, pd.NaT] + tail) + + mask = notna(i2) + + # passing tz-naive ndarray to tzaware DTI + result = dti.where(mask, i2.values) + expected = Index([pd.NaT.asm8, pd.NaT.asm8] + tail, dtype=object) + tm.assert_index_equal(result, expected) + + # passing tz-aware DTI to tznaive DTI + naive = dti.tz_localize(None) + result = naive.where(mask, i2) + expected = Index([i2[0], i2[1]] + naive[2:].tolist(), dtype=object) + tm.assert_index_equal(result, expected) + + pi = i2.tz_localize(None).to_period("D") + result = dti.where(mask, pi) + expected = Index([pi[0], pi[1]] + tail, dtype=object) + tm.assert_index_equal(result, expected) + + tda = i2.asi8.view("timedelta64[ns]") + result = dti.where(mask, tda) + expected = Index([tda[0], tda[1]] + tail, dtype=object) + assert isinstance(expected[0], np.timedelta64) + tm.assert_index_equal(result, expected) + + result = dti.where(mask, i2.asi8) + expected = Index([pd.NaT._value, pd.NaT._value] + tail, dtype=object) + assert isinstance(expected[0], int) + tm.assert_index_equal(result, expected) + + # non-matching scalar + td = pd.Timedelta(days=4) + result = dti.where(mask, td) + expected = Index([td, td] + tail, dtype=object) + assert expected[0] is td + tm.assert_index_equal(result, expected) + + def test_where_mismatched_nat(self, tz_aware_fixture): + tz = tz_aware_fixture + dti = date_range("2013-01-01", periods=3, tz=tz) + cond = np.array([True, False, True]) + + tdnat = np.timedelta64("NaT", "ns") + expected = Index([dti[0], tdnat, dti[2]], dtype=object) + assert expected[1] is tdnat + + result = dti.where(cond, tdnat) + tm.assert_index_equal(result, expected) + + def test_where_tz(self): + i = date_range("20130101", periods=3, tz="US/Eastern") + result = i.where(notna(i)) + expected = i + tm.assert_index_equal(result, expected) + + i2 = i.copy() + i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist()) + result = i.where(notna(i2)) + expected = i2 + tm.assert_index_equal(result, expected) + + +class TestTake: + def test_take_nan_first_datetime(self): + index = DatetimeIndex([pd.NaT, Timestamp("20130101"), Timestamp("20130102")]) + result = index.take([-1, 0, 1]) + expected = DatetimeIndex([index[-1], index[0], index[1]]) + tm.assert_index_equal(result, expected) + + def test_take(self): + # GH#10295 + idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx") + idx2 = date_range( + "2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx" + ) + + for idx in [idx1, idx2]: + result = idx.take([0]) + assert result == Timestamp("2011-01-01", tz=idx.tz) + + result = idx.take([0, 1, 2]) + expected = date_range( + "2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([0, 2, 4]) + expected = date_range( + "2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([7, 4, 1]) + expected = date_range( + "2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([3, 2, 5]) + expected = DatetimeIndex( + ["2011-01-04", "2011-01-03", "2011-01-06"], + freq=None, + tz=idx.tz, + name="idx", + ) + tm.assert_index_equal(result, expected) + assert result.freq is None + + result = idx.take([-3, 2, 5]) + expected = DatetimeIndex( + ["2011-01-29", "2011-01-03", "2011-01-06"], + freq=None, + tz=idx.tz, + name="idx", + ) + tm.assert_index_equal(result, expected) + assert result.freq is None + + def test_take_invalid_kwargs(self): + idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx") + indices = [1, 6, 5, 9, 10, 13, 15, 3] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + # TODO: This method came from test_datetime; de-dup with version above + @pytest.mark.parametrize("tz", [None, "US/Eastern", "Asia/Tokyo"]) + def test_take2(self, tz): + dates = [ + datetime(2010, 1, 1, 14), + datetime(2010, 1, 1, 15), + datetime(2010, 1, 1, 17), + datetime(2010, 1, 1, 21), + ] + + idx = date_range( + start="2010-01-01 09:00", + end="2010-02-01 09:00", + freq="H", + tz=tz, + name="idx", + ) + expected = DatetimeIndex(dates, freq=None, name="idx", tz=tz) + + taken1 = idx.take([5, 6, 8, 12]) + taken2 = idx[[5, 6, 8, 12]] + + for taken in [taken1, taken2]: + tm.assert_index_equal(taken, expected) + assert isinstance(taken, DatetimeIndex) + assert taken.freq is None + assert taken.tz == expected.tz + assert taken.name == expected.name + + def test_take_fill_value(self): + # GH#12631 + idx = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx") + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx") + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx") + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "out of bounds" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + def test_take_fill_value_with_timezone(self): + idx = DatetimeIndex( + ["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", tz="US/Eastern" + ) + result = idx.take(np.array([1, 0, -1])) + expected = DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern" + ) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = DatetimeIndex( + ["2011-02-01", "2011-01-01", "NaT"], name="xxx", tz="US/Eastern" + ) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern" + ) + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "out of bounds" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + +class TestGetLoc: + def test_get_loc_key_unit_mismatch(self): + idx = date_range("2000-01-01", periods=3) + key = idx[1].as_unit("ms") + loc = idx.get_loc(key) + assert loc == 1 + assert key in idx + + def test_get_loc_key_unit_mismatch_not_castable(self): + dta = date_range("2000-01-01", periods=3)._data.astype("M8[s]") + dti = DatetimeIndex(dta) + key = dta[0].as_unit("ns") + pd.Timedelta(1) + + with pytest.raises( + KeyError, match=r"Timestamp\('2000-01-01 00:00:00.000000001'\)" + ): + dti.get_loc(key) + + assert key not in dti + + def test_get_loc_time_obj(self): + # time indexing + idx = date_range("2000-01-01", periods=24, freq="H") + + result = idx.get_loc(time(12)) + expected = np.array([12]) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + result = idx.get_loc(time(12, 30)) + expected = np.array([]) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + def test_get_loc_time_obj2(self): + # GH#8667 + + from pandas._libs.index import _SIZE_CUTOFF + + ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64) + key = time(15, 11, 30) + start = key.hour * 3600 + key.minute * 60 + key.second + step = 24 * 3600 + + for n in ns: + idx = date_range("2014-11-26", periods=n, freq="S") + ts = pd.Series(np.random.default_rng(2).standard_normal(n), index=idx) + locs = np.arange(start, n, step, dtype=np.intp) + + result = ts.index.get_loc(key) + tm.assert_numpy_array_equal(result, locs) + tm.assert_series_equal(ts[key], ts.iloc[locs]) + + left, right = ts.copy(), ts.copy() + left[key] *= -10 + right.iloc[locs] *= -10 + tm.assert_series_equal(left, right) + + def test_get_loc_time_nat(self): + # GH#35114 + # Case where key's total microseconds happens to match iNaT % 1e6 // 1000 + tic = time(minute=12, second=43, microsecond=145224) + dti = DatetimeIndex([pd.NaT]) + + loc = dti.get_loc(tic) + expected = np.array([], dtype=np.intp) + tm.assert_numpy_array_equal(loc, expected) + + def test_get_loc_nat(self): + # GH#20464 + index = DatetimeIndex(["1/3/2000", "NaT"]) + assert index.get_loc(pd.NaT) == 1 + + assert index.get_loc(None) == 1 + + assert index.get_loc(np.nan) == 1 + + assert index.get_loc(pd.NA) == 1 + + assert index.get_loc(np.datetime64("NaT")) == 1 + + with pytest.raises(KeyError, match="NaT"): + index.get_loc(np.timedelta64("NaT")) + + @pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)]) + def test_get_loc_timedelta_invalid_key(self, key): + # GH#20464 + dti = date_range("1970-01-01", periods=10) + msg = "Cannot index DatetimeIndex with [Tt]imedelta" + with pytest.raises(TypeError, match=msg): + dti.get_loc(key) + + def test_get_loc_reasonable_key_error(self): + # GH#1062 + index = DatetimeIndex(["1/3/2000"]) + with pytest.raises(KeyError, match="2000"): + index.get_loc("1/1/2000") + + def test_get_loc_year_str(self): + rng = date_range("1/1/2000", "1/1/2010") + + result = rng.get_loc("2009") + expected = slice(3288, 3653) + assert result == expected + + +class TestContains: + def test_dti_contains_with_duplicates(self): + d = datetime(2011, 12, 5, 20, 30) + ix = DatetimeIndex([d, d]) + assert d in ix + + @pytest.mark.parametrize( + "vals", + [ + [0, 1, 0], + [0, 0, -1], + [0, -1, -1], + ["2015", "2015", "2016"], + ["2015", "2015", "2014"], + ], + ) + def test_contains_nonunique(self, vals): + # GH#9512 + idx = DatetimeIndex(vals) + assert idx[0] in idx + + +class TestGetIndexer: + def test_get_indexer_date_objs(self): + rng = date_range("1/1/2000", periods=20) + + result = rng.get_indexer(rng.map(lambda x: x.date())) + expected = rng.get_indexer(rng) + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer(self): + idx = date_range("2000-01-01", periods=3) + exp = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(idx.get_indexer(idx), exp) + + target = idx[0] + pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"]) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 hour")), + np.array([0, -1, 1], dtype=np.intp), + ) + tol_raw = [ + pd.Timedelta("1 hour"), + pd.Timedelta("1 hour"), + pd.Timedelta("1 hour").to_timedelta64(), + ] + tm.assert_numpy_array_equal( + idx.get_indexer( + target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw] + ), + np.array([0, -1, 1], dtype=np.intp), + ) + tol_bad = [ + pd.Timedelta("2 hour").to_timedelta64(), + pd.Timedelta("1 hour").to_timedelta64(), + "foo", + ] + msg = "Could not convert 'foo' to NumPy timedelta" + with pytest.raises(ValueError, match=msg): + idx.get_indexer(target, "nearest", tolerance=tol_bad) + with pytest.raises(ValueError, match="abbreviation w/o a number"): + idx.get_indexer(idx[[0]], method="nearest", tolerance="foo") + + @pytest.mark.parametrize( + "target", + [ + [date(2020, 1, 1), Timestamp("2020-01-02")], + [Timestamp("2020-01-01"), date(2020, 1, 2)], + ], + ) + def test_get_indexer_mixed_dtypes(self, target): + # https://github.com/pandas-dev/pandas/issues/33741 + values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")]) + result = values.get_indexer(target) + expected = np.array([0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "target, positions", + [ + ([date(9999, 1, 1), Timestamp("2020-01-01")], [-1, 0]), + ([Timestamp("2020-01-01"), date(9999, 1, 1)], [0, -1]), + ([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]), + ], + ) + def test_get_indexer_out_of_bounds_date(self, target, positions): + values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")]) + + result = values.get_indexer(target) + expected = np.array(positions, dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_pad_requires_monotonicity(self): + rng = date_range("1/1/2000", "3/1/2000", freq="B") + + # neither monotonic increasing or decreasing + rng2 = rng[[1, 0, 2]] + + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(ValueError, match=msg): + rng2.get_indexer(rng, method="pad") + + +class TestMaybeCastSliceBound: + def test_maybe_cast_slice_bounds_empty(self): + # GH#14354 + empty_idx = date_range(freq="1H", periods=0, end="2015") + + right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right") + exp = Timestamp("2015-01-02 23:59:59.999999999") + assert right == exp + + left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left") + exp = Timestamp("2015-01-02 00:00:00") + assert left == exp + + def test_maybe_cast_slice_duplicate_monotonic(self): + # https://github.com/pandas-dev/pandas/issues/16515 + idx = DatetimeIndex(["2017", "2017"]) + result = idx._maybe_cast_slice_bound("2017-01-01", "left") + expected = Timestamp("2017-01-01") + assert result == expected + + +class TestGetSliceBounds: + @pytest.mark.parametrize("box", [date, datetime, Timestamp]) + @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) + def test_get_slice_bounds_datetime_within( + self, box, side, expected, tz_aware_fixture + ): + # GH 35690 + tz = tz_aware_fixture + index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz) + key = box(year=2000, month=1, day=7) + + if tz is not None: + with pytest.raises(TypeError, match="Cannot compare tz-naive"): + # GH#36148 we require tzawareness-compat as of 2.0 + index.get_slice_bound(key, side=side) + else: + result = index.get_slice_bound(key, side=side) + assert result == expected + + @pytest.mark.parametrize("box", [datetime, Timestamp]) + @pytest.mark.parametrize("side", ["left", "right"]) + @pytest.mark.parametrize("year, expected", [(1999, 0), (2020, 30)]) + def test_get_slice_bounds_datetime_outside( + self, box, side, year, expected, tz_aware_fixture + ): + # GH 35690 + tz = tz_aware_fixture + index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz) + key = box(year=year, month=1, day=7) + + if tz is not None: + with pytest.raises(TypeError, match="Cannot compare tz-naive"): + # GH#36148 we require tzawareness-compat as of 2.0 + index.get_slice_bound(key, side=side) + else: + result = index.get_slice_bound(key, side=side) + assert result == expected + + @pytest.mark.parametrize("box", [datetime, Timestamp]) + def test_slice_datetime_locs(self, box, tz_aware_fixture): + # GH 34077 + tz = tz_aware_fixture + index = DatetimeIndex(["2010-01-01", "2010-01-03"]).tz_localize(tz) + key = box(2010, 1, 1) + + if tz is not None: + with pytest.raises(TypeError, match="Cannot compare tz-naive"): + # GH#36148 we require tzawareness-compat as of 2.0 + index.slice_locs(key, box(2010, 1, 2)) + else: + result = index.slice_locs(key, box(2010, 1, 2)) + expected = (0, 1) + assert result == expected + + +class TestIndexerBetweenTime: + def test_indexer_between_time(self): + # GH#11818 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time" + with pytest.raises(ValueError, match=msg): + rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + @pytest.mark.parametrize("unit", ["us", "ms", "s"]) + def test_indexer_between_time_non_nano(self, unit): + # For simple cases like this, the non-nano indexer_between_time + # should match the nano result + + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + arr_nano = rng._data._ndarray + + arr = arr_nano.astype(f"M8[{unit}]") + + dta = type(rng._data)._simple_new(arr, dtype=arr.dtype) + dti = DatetimeIndex(dta) + assert dti.dtype == arr.dtype + + tic = time(1, 25) + toc = time(2, 29) + + result = dti.indexer_between_time(tic, toc) + expected = rng.indexer_between_time(tic, toc) + tm.assert_numpy_array_equal(result, expected) + + # case with non-zero micros in arguments + tic = time(1, 25, 0, 45678) + toc = time(2, 29, 0, 1234) + + result = dti.indexer_between_time(tic, toc) + expected = rng.indexer_between_time(tic, toc) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_join.py new file mode 100644 index 00000000..ccfdb55f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_join.py @@ -0,0 +1,157 @@ +from datetime import ( + datetime, + timezone, +) + +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + Index, + Timestamp, + date_range, + to_datetime, +) +import pandas._testing as tm + +from pandas.tseries.offsets import ( + BDay, + BMonthEnd, +) + + +class TestJoin: + def test_does_not_convert_mixed_integer(self): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args, **kwargs: np.random.default_rng( + 2 + ).standard_normal(), + r_idx_type="i", + c_idx_type="dt", + ) + cols = df.columns.join(df.index, how="outer") + joined = cols.join(df.columns) + assert cols.dtype == np.dtype("O") + assert cols.dtype == joined.dtype + tm.assert_numpy_array_equal(cols.values, joined.values) + + def test_join_self(self, join_type): + index = date_range("1/1/2000", periods=10) + joined = index.join(index, how=join_type) + assert index is joined + + def test_join_with_period_index(self, join_type): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args: np.random.default_rng(2).integers(2), + c_idx_type="p", + r_idx_type="dt", + ) + s = df.iloc[:5, 0] + + expected = df.columns.astype("O").join(s.index, how=join_type) + result = df.columns.join(s.index, how=join_type) + tm.assert_index_equal(expected, result) + + def test_join_object_index(self): + rng = date_range("1/1/2000", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.join(idx, how="outer") + assert isinstance(result[0], Timestamp) + + def test_join_utc_convert(self, join_type): + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + + left = rng.tz_convert("US/Eastern") + right = rng.tz_convert("Europe/Berlin") + + result = left.join(left[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz == left.tz + + result = left.join(right[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz is timezone.utc + + def test_datetimeindex_union_join_empty(self, sort): + dti = date_range(start="1/1/2001", end="2/1/2001", freq="D") + empty = Index([]) + + result = dti.union(empty, sort=sort) + expected = dti.astype("O") + tm.assert_index_equal(result, expected) + + result = dti.join(empty) + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, dti) + + def test_join_nonunique(self): + idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"]) + idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"]) + rs = idx1.join(idx2, how="outer") + assert rs.is_monotonic_increasing + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_outer_join(self, freq): + # should just behave as union + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + rng = date_range(start=start, end=end, freq=freq) + + # overlapping + left = rng[:10] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # non-overlapping, gap in middle + left = rng[:5] + right = rng[10:] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + # non-overlapping, no gap + left = rng[:5] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # overlapping, but different offset + other = date_range(start, end, freq=BMonthEnd()) + + the_join = rng.join(other, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + def test_naive_aware_conflicts(self): + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + naive = date_range(start, end, freq=BDay(), tz=None) + aware = date_range(start, end, freq=BDay(), tz="Asia/Hong_Kong") + + msg = "tz-naive.*tz-aware" + with pytest.raises(TypeError, match=msg): + naive.join(aware) + + with pytest.raises(TypeError, match=msg): + aware.join(naive) + + @pytest.mark.parametrize("tz", [None, "US/Pacific"]) + def test_join_preserves_freq(self, tz): + # GH#32157 + dti = date_range("2016-01-01", periods=10, tz=tz) + result = dti[:5].join(dti[5:], how="outer") + assert result.freq == dti.freq + tm.assert_index_equal(result, dti) + + result = dti[:5].join(dti[6:], how="outer") + assert result.freq is None + expected = dti.delete(5) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_map.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_map.py new file mode 100644 index 00000000..45698ef2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_map.py @@ -0,0 +1,47 @@ +import pytest + +from pandas import ( + DatetimeIndex, + Index, + MultiIndex, + Period, + date_range, +) +import pandas._testing as tm + + +class TestMap: + def test_map(self): + rng = date_range("1/1/2000", periods=10) + + f = lambda x: x.strftime("%Y%m%d") + result = rng.map(f) + exp = Index([f(x) for x in rng], dtype=" return Index + for accessor in DatetimeArray._field_ops: + res = getattr(dti, accessor) + assert len(res) == 365 + assert isinstance(res, Index) + assert res.name == "name" + + # boolean accessors -> return array + for accessor in DatetimeArray._bool_ops: + res = getattr(dti, accessor) + assert len(res) == 365 + assert isinstance(res, np.ndarray) + + # test boolean indexing + res = dti[dti.is_quarter_start] + exp = dti[[0, 90, 181, 273]] + tm.assert_index_equal(res, exp) + res = dti[dti.is_leap_year] + exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name") + tm.assert_index_equal(res, exp) + + def test_datetimeindex_accessors2(self): + dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4) + + assert sum(dti.is_quarter_start) == 0 + assert sum(dti.is_quarter_end) == 4 + assert sum(dti.is_year_start) == 0 + assert sum(dti.is_year_end) == 1 + + def test_datetimeindex_accessors3(self): + # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, + bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu") + dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) + msg = "Custom business days is not supported by is_month_start" + with pytest.raises(ValueError, match=msg): + dti.is_month_start + + def test_datetimeindex_accessors4(self): + dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) + + assert dti.is_month_start[0] == 1 + + def test_datetimeindex_accessors5(self): + freq_m = to_offset("M") + bm = to_offset("BM") + qfeb = to_offset("Q-FEB") + qsfeb = to_offset("QS-FEB") + bq = to_offset("BQ") + bqs_apr = to_offset("BQS-APR") + as_nov = to_offset("AS-NOV") + + tests = [ + (freq_m.is_month_start(Timestamp("2013-06-01")), 1), + (bm.is_month_start(Timestamp("2013-06-01")), 0), + (freq_m.is_month_start(Timestamp("2013-06-03")), 0), + (bm.is_month_start(Timestamp("2013-06-03")), 1), + (qfeb.is_month_end(Timestamp("2013-02-28")), 1), + (qfeb.is_quarter_end(Timestamp("2013-02-28")), 1), + (qfeb.is_year_end(Timestamp("2013-02-28")), 1), + (qfeb.is_month_start(Timestamp("2013-03-01")), 1), + (qfeb.is_quarter_start(Timestamp("2013-03-01")), 1), + (qfeb.is_year_start(Timestamp("2013-03-01")), 1), + (qsfeb.is_month_end(Timestamp("2013-03-31")), 1), + (qsfeb.is_quarter_end(Timestamp("2013-03-31")), 0), + (qsfeb.is_year_end(Timestamp("2013-03-31")), 0), + (qsfeb.is_month_start(Timestamp("2013-02-01")), 1), + (qsfeb.is_quarter_start(Timestamp("2013-02-01")), 1), + (qsfeb.is_year_start(Timestamp("2013-02-01")), 1), + (bq.is_month_end(Timestamp("2013-06-30")), 0), + (bq.is_quarter_end(Timestamp("2013-06-30")), 0), + (bq.is_year_end(Timestamp("2013-06-30")), 0), + (bq.is_month_end(Timestamp("2013-06-28")), 1), + (bq.is_quarter_end(Timestamp("2013-06-28")), 1), + (bq.is_year_end(Timestamp("2013-06-28")), 0), + (bqs_apr.is_month_end(Timestamp("2013-06-30")), 0), + (bqs_apr.is_quarter_end(Timestamp("2013-06-30")), 0), + (bqs_apr.is_year_end(Timestamp("2013-06-30")), 0), + (bqs_apr.is_month_end(Timestamp("2013-06-28")), 1), + (bqs_apr.is_quarter_end(Timestamp("2013-06-28")), 1), + (bqs_apr.is_year_end(Timestamp("2013-03-29")), 1), + (as_nov.is_year_start(Timestamp("2013-11-01")), 1), + (as_nov.is_year_end(Timestamp("2013-10-31")), 1), + (Timestamp("2012-02-01").days_in_month, 29), + (Timestamp("2013-02-01").days_in_month, 28), + ] + + for ts, value in tests: + assert ts == value + + def test_datetimeindex_accessors6(self): + # GH 6538: Check that DatetimeIndex and its TimeStamp elements + # return the same weekofyear accessor close to new year w/ tz + dates = ["2013/12/29", "2013/12/30", "2013/12/31"] + dates = DatetimeIndex(dates, tz="Europe/Brussels") + expected = [52, 1, 1] + assert dates.isocalendar().week.tolist() == expected + assert [d.weekofyear for d in dates] == expected + + # GH 12806 + # error: Unsupported operand types for + ("List[None]" and "List[str]") + @pytest.mark.parametrize( + "time_locale", [None] + tm.get_locales() # type: ignore[operator] + ) + def test_datetime_name_accessors(self, time_locale): + # Test Monday -> Sunday and January -> December, in that sequence + if time_locale is None: + # If the time_locale is None, day-name and month_name should + # return the english attributes + expected_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + expected_months = [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_days = calendar.day_name[:] + expected_months = calendar.month_name[1:] + + # GH#11128 + dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365) + english_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + for day, name, eng_name in zip(range(4, 11), expected_days, english_days): + name = name.capitalize() + assert dti.day_name(locale=time_locale)[day] == name + assert dti.day_name(locale=None)[day] == eng_name + ts = Timestamp(datetime(2016, 4, day)) + assert ts.day_name(locale=time_locale) == name + dti = dti.append(DatetimeIndex([pd.NaT])) + assert np.isnan(dti.day_name(locale=time_locale)[-1]) + ts = Timestamp(pd.NaT) + assert np.isnan(ts.day_name(locale=time_locale)) + + # GH#12805 + dti = date_range(freq="M", start="2012", end="2013") + result = dti.month_name(locale=time_locale) + expected = Index([month.capitalize() for month in expected_months]) + + # work around different normalization schemes + # https://github.com/pandas-dev/pandas/issues/22342 + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") + + tm.assert_index_equal(result, expected) + + for date, expected in zip(dti, expected_months): + result = date.month_name(locale=time_locale) + expected = expected.capitalize() + + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", result) + + assert result == expected + dti = dti.append(DatetimeIndex([pd.NaT])) + assert np.isnan(dti.month_name(locale=time_locale)[-1]) + + def test_nanosecond_field(self): + dti = DatetimeIndex(np.arange(10)) + expected = Index(np.arange(10, dtype=np.int32)) + + tm.assert_index_equal(dti.nanosecond, expected) + + +def test_iter_readonly(): + # GH#28055 ints_to_pydatetime with readonly array + arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")]) + arr.setflags(write=False) + dti = pd.to_datetime(arr) + list(dti) + + +def test_add_timedelta_preserves_freq(): + # GH#37295 should hold for any DTI with freq=None or Tick freq + tz = "Canada/Eastern" + dti = date_range( + start=Timestamp("2019-03-26 00:00:00-0400", tz=tz), + end=Timestamp("2020-10-17 00:00:00-0400", tz=tz), + freq="D", + ) + result = dti + Timedelta(days=1) + assert result.freq == dti.freq diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_npfuncs.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_npfuncs.py new file mode 100644 index 00000000..301466c0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_npfuncs.py @@ -0,0 +1,13 @@ +import numpy as np + +from pandas import date_range +import pandas._testing as tm + + +class TestSplit: + def test_split_non_utc(self): + # GH#14042 + indices = date_range("2016-01-01 00:00:00+0200", freq="S", periods=10) + result = np.split(indices, indices_or_sections=[])[0] + expected = indices._with_freq(None) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_ops.py new file mode 100644 index 00000000..d6ef4198 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_ops.py @@ -0,0 +1,85 @@ +from datetime import datetime + +from dateutil.tz import tzlocal +import pytest + +from pandas.compat import IS64 + +from pandas import ( + DatetimeIndex, + Index, + bdate_range, + date_range, +) +import pandas._testing as tm + +START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) + + +class TestDatetimeIndexOps: + @pytest.mark.parametrize( + "freq,expected", + [ + ("A", "day"), + ("Q", "day"), + ("M", "day"), + ("D", "day"), + ("H", "hour"), + ("T", "minute"), + ("S", "second"), + ("L", "millisecond"), + ("U", "microsecond"), + ], + ) + def test_resolution(self, request, tz_naive_fixture, freq, expected): + tz = tz_naive_fixture + if freq == "A" and not IS64 and isinstance(tz, tzlocal): + request.node.add_marker( + pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038") + ) + + idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz) + assert idx.resolution == expected + + def test_infer_freq(self, freq_sample): + # GH 11018 + idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10) + result = DatetimeIndex(idx.asi8, freq="infer") + tm.assert_index_equal(idx, result) + assert result.freq == freq_sample + + +@pytest.mark.parametrize("freq", ["B", "C"]) +class TestBusinessDatetimeIndex: + @pytest.fixture + def rng(self, freq): + return bdate_range(START, END, freq=freq) + + def test_comparison(self, rng): + d = rng[10] + + comp = rng > d + assert comp[11] + assert not comp[9] + + def test_copy(self, rng): + cp = rng.copy() + repr(cp) + tm.assert_index_equal(cp, rng) + + def test_identical(self, rng): + t1 = rng.copy() + t2 = rng.copy() + assert t1.identical(t2) + + # name + t1 = t1.rename("foo") + assert t1.equals(t2) + assert not t1.identical(t2) + t2 = t2.rename("foo") + assert t1.identical(t2) + + # freq + t2v = Index(t2.values) + assert t1.equals(t2v) + assert not t1.identical(t2v) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_partial_slicing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_partial_slicing.py new file mode 100644 index 00000000..7978e596 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -0,0 +1,461 @@ +""" test partial slicing on Series/Frame """ + +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestSlicing: + def test_string_index_series_name_converted(self): + # GH#1644 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=date_range("1/1/2000", periods=10), + ) + + result = df.loc["1/3/2000"] + assert result.name == df.index[2] + + result = df.T["1/3/2000"] + assert result.name == df.index[2] + + def test_stringified_slice_with_tz(self): + # GH#2658 + start = "2013-01-07" + idx = date_range(start=start, freq="1d", periods=10, tz="US/Eastern") + df = DataFrame(np.arange(10), index=idx) + df["2013-01-14 23:44:34.437768-05:00":] # no exception here + + def test_return_type_doesnt_depend_on_monotonicity(self): + # GH#24892 we get Series back regardless of whether our DTI is monotonic + dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3) + ser = Series(range(3), index=dti) + + # non-monotonic index + ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]]) + + # key with resolution strictly lower than "min" + key = "2015-5-14 00" + + # monotonic increasing index + result = ser.loc[key] + expected = ser.iloc[1:] + tm.assert_series_equal(result, expected) + + # monotonic decreasing index + result = ser.iloc[::-1].loc[key] + expected = ser.iloc[::-1][:-1] + tm.assert_series_equal(result, expected) + + # non-monotonic index + result2 = ser2.loc[key] + expected2 = ser2.iloc[::2] + tm.assert_series_equal(result2, expected2) + + def test_return_type_doesnt_depend_on_monotonicity_higher_reso(self): + # GH#24892 we get Series back regardless of whether our DTI is monotonic + dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3) + ser = Series(range(3), index=dti) + + # non-monotonic index + ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]]) + + # key with resolution strictly *higher) than "min" + key = "2015-5-14 00:00:00" + + # monotonic increasing index + result = ser.loc[key] + assert result == 1 + + # monotonic decreasing index + result = ser.iloc[::-1].loc[key] + assert result == 1 + + # non-monotonic index + result2 = ser2.loc[key] + assert result2 == 0 + + def test_monotone_DTI_indexing_bug(self): + # GH 19362 + # Testing accessing the first element in a monotonic descending + # partial string indexing. + + df = DataFrame(list(range(5))) + date_list = [ + "2018-01-02", + "2017-02-10", + "2016-03-10", + "2015-03-15", + "2014-03-16", + ] + date_index = DatetimeIndex(date_list) + df["date"] = date_index + expected = DataFrame({0: list(range(5)), "date": date_index}) + tm.assert_frame_equal(df, expected) + + # We get a slice because df.index's resolution is hourly and we + # are slicing with a daily-resolution string. If both were daily, + # we would get a single item back + dti = date_range("20170101 01:00:00", periods=3) + df = DataFrame({"A": [1, 2, 3]}, index=dti[::-1]) + + expected = DataFrame({"A": 1}, index=dti[-1:][::-1]) + result = df.loc["2017-01-03"] + tm.assert_frame_equal(result, expected) + + result2 = df.iloc[::-1].loc["2017-01-03"] + expected2 = expected.iloc[::-1] + tm.assert_frame_equal(result2, expected2) + + def test_slice_year(self): + dti = date_range(freq="B", start=datetime(2005, 1, 1), periods=500) + + s = Series(np.arange(len(dti)), index=dti) + result = s["2005"] + expected = s[s.index.year == 2005] + tm.assert_series_equal(result, expected) + + df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) + result = df.loc["2005"] + expected = df[df.index.year == 2005] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "partial_dtime", + [ + "2019", + "2019Q4", + "Dec 2019", + "2019-12-31", + "2019-12-31 23", + "2019-12-31 23:59", + ], + ) + def test_slice_end_of_period_resolution(self, partial_dtime): + # GH#31064 + dti = date_range("2019-12-31 23:59:55.999999999", periods=10, freq="s") + + ser = Series(range(10), index=dti) + result = ser[partial_dtime] + expected = ser.iloc[:5] + tm.assert_series_equal(result, expected) + + def test_slice_quarter(self): + dti = date_range(freq="D", start=datetime(2000, 6, 1), periods=500) + + s = Series(np.arange(len(dti)), index=dti) + assert len(s["2001Q1"]) == 90 + + df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) + assert len(df.loc["1Q01"]) == 90 + + def test_slice_month(self): + dti = date_range(freq="D", start=datetime(2005, 1, 1), periods=500) + s = Series(np.arange(len(dti)), index=dti) + assert len(s["2005-11"]) == 30 + + df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) + assert len(df.loc["2005-11"]) == 30 + + tm.assert_series_equal(s["2005-11"], s["11-2005"]) + + def test_partial_slice(self): + rng = date_range(freq="D", start=datetime(2005, 1, 1), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-05":"2006-02"] + expected = s["20050501":"20060228"] + tm.assert_series_equal(result, expected) + + result = s["2005-05":] + expected = s["20050501":] + tm.assert_series_equal(result, expected) + + result = s[:"2006-02"] + expected = s[:"20060228"] + tm.assert_series_equal(result, expected) + + result = s["2005-1-1"] + assert result == s.iloc[0] + + with pytest.raises(KeyError, match=r"^'2004-12-31'$"): + s["2004-12-31"] + + def test_partial_slice_daily(self): + rng = date_range(freq="H", start=datetime(2005, 1, 31), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-1-31"] + tm.assert_series_equal(result, s.iloc[:24]) + + with pytest.raises(KeyError, match=r"^'2004-12-31 00'$"): + s["2004-12-31 00"] + + def test_partial_slice_hourly(self): + rng = date_range(freq="T", start=datetime(2005, 1, 1, 20, 0, 0), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-1-1"] + tm.assert_series_equal(result, s.iloc[: 60 * 4]) + + result = s["2005-1-1 20"] + tm.assert_series_equal(result, s.iloc[:60]) + + assert s["2005-1-1 20:00"] == s.iloc[0] + with pytest.raises(KeyError, match=r"^'2004-12-31 00:15'$"): + s["2004-12-31 00:15"] + + def test_partial_slice_minutely(self): + rng = date_range(freq="S", start=datetime(2005, 1, 1, 23, 59, 0), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-1-1 23:59"] + tm.assert_series_equal(result, s.iloc[:60]) + + result = s["2005-1-1"] + tm.assert_series_equal(result, s.iloc[:60]) + + assert s[Timestamp("2005-1-1 23:59:00")] == s.iloc[0] + with pytest.raises(KeyError, match=r"^'2004-12-31 00:00:00'$"): + s["2004-12-31 00:00:00"] + + def test_partial_slice_second_precision(self): + rng = date_range( + start=datetime(2005, 1, 1, 0, 0, 59, microsecond=999990), + periods=20, + freq="US", + ) + s = Series(np.arange(20), rng) + + tm.assert_series_equal(s["2005-1-1 00:00"], s.iloc[:10]) + tm.assert_series_equal(s["2005-1-1 00:00:59"], s.iloc[:10]) + + tm.assert_series_equal(s["2005-1-1 00:01"], s.iloc[10:]) + tm.assert_series_equal(s["2005-1-1 00:01:00"], s.iloc[10:]) + + assert s[Timestamp("2005-1-1 00:00:59.999990")] == s.iloc[0] + with pytest.raises(KeyError, match="2005-1-1 00:00:00"): + s["2005-1-1 00:00:00"] + + def test_partial_slicing_dataframe(self): + # GH14856 + # Test various combinations of string slicing resolution vs. + # index resolution + # - If string resolution is less precise than index resolution, + # string is considered a slice + # - If string resolution is equal to or more precise than index + # resolution, string is considered an exact match + formats = [ + "%Y", + "%Y-%m", + "%Y-%m-%d", + "%Y-%m-%d %H", + "%Y-%m-%d %H:%M", + "%Y-%m-%d %H:%M:%S", + ] + resolutions = ["year", "month", "day", "hour", "minute", "second"] + for rnum, resolution in enumerate(resolutions[2:], 2): + # we check only 'day', 'hour', 'minute' and 'second' + unit = Timedelta("1 " + resolution) + middate = datetime(2012, 1, 1, 0, 0, 0) + index = DatetimeIndex([middate - unit, middate, middate + unit]) + values = [1, 2, 3] + df = DataFrame({"a": values}, index, dtype=np.int64) + assert df.index.resolution == resolution + + # Timestamp with the same resolution as index + # Should be exact match for Series (return scalar) + # and raise KeyError for Frame + for timestamp, expected in zip(index, values): + ts_string = timestamp.strftime(formats[rnum]) + # make ts_string as precise as index + result = df["a"][ts_string] + assert isinstance(result, np.int64) + assert result == expected + msg = rf"^'{ts_string}'$" + with pytest.raises(KeyError, match=msg): + df[ts_string] + + # Timestamp with resolution less precise than index + for fmt in formats[:rnum]: + for element, theslice in [[0, slice(None, 1)], [1, slice(1, None)]]: + ts_string = index[element].strftime(fmt) + + # Series should return slice + result = df["a"][ts_string] + expected = df["a"][theslice] + tm.assert_series_equal(result, expected) + + # pre-2.0 df[ts_string] was overloaded to interpret this + # as slicing along index + with pytest.raises(KeyError, match=ts_string): + df[ts_string] + + # Timestamp with resolution more precise than index + # Compatible with existing key + # Should return scalar for Series + # and raise KeyError for Frame + for fmt in formats[rnum + 1 :]: + ts_string = index[1].strftime(fmt) + result = df["a"][ts_string] + assert isinstance(result, np.int64) + assert result == 2 + msg = rf"^'{ts_string}'$" + with pytest.raises(KeyError, match=msg): + df[ts_string] + + # Not compatible with existing key + # Should raise KeyError + for fmt, res in list(zip(formats, resolutions))[rnum + 1 :]: + ts = index[1] + Timedelta("1 " + res) + ts_string = ts.strftime(fmt) + msg = rf"^'{ts_string}'$" + with pytest.raises(KeyError, match=msg): + df["a"][ts_string] + with pytest.raises(KeyError, match=msg): + df[ts_string] + + def test_partial_slicing_with_multiindex(self): + # GH 4758 + # partial string indexing with a multi-index buggy + df = DataFrame( + { + "ACCOUNT": ["ACCT1", "ACCT1", "ACCT1", "ACCT2"], + "TICKER": ["ABC", "MNP", "XYZ", "XYZ"], + "val": [1, 2, 3, 4], + }, + index=date_range("2013-06-19 09:30:00", periods=4, freq="5T"), + ) + df_multi = df.set_index(["ACCOUNT", "TICKER"], append=True) + + expected = DataFrame( + [[1]], index=Index(["ABC"], name="TICKER"), columns=["val"] + ) + result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1")] + tm.assert_frame_equal(result, expected) + + expected = df_multi.loc[ + (Timestamp("2013-06-19 09:30:00", tz=None), "ACCT1", "ABC") + ] + result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1", "ABC")] + tm.assert_series_equal(result, expected) + + # partial string indexing on first level, scalar indexing on the other two + result = df_multi.loc[("2013-06-19", "ACCT1", "ABC")] + expected = df_multi.iloc[:1].droplevel([1, 2]) + tm.assert_frame_equal(result, expected) + + def test_partial_slicing_with_multiindex_series(self): + # GH 4294 + # partial slice on a series mi + ser = DataFrame( + np.random.default_rng(2).random((1000, 1000)), + index=date_range("2000-1-1", periods=1000), + ).stack(future_stack=True) + + s2 = ser[:-1].copy() + expected = s2["2000-1-4"] + result = s2[Timestamp("2000-1-4")] + tm.assert_series_equal(result, expected) + + result = ser[Timestamp("2000-1-4")] + expected = ser["2000-1-4"] + tm.assert_series_equal(result, expected) + + df2 = DataFrame(ser) + expected = df2.xs("2000-1-4") + result = df2.loc[Timestamp("2000-1-4")] + tm.assert_frame_equal(result, expected) + + def test_partial_slice_requires_monotonicity(self): + # Disallowed since 2.0 (GH 37819) + ser = Series(np.arange(10), date_range("2014-01-01", periods=10)) + + nonmonotonic = ser.iloc[[3, 5, 4]] + timestamp = Timestamp("2014-01-10") + with pytest.raises( + KeyError, match="Value based partial slicing on non-monotonic" + ): + nonmonotonic["2014-01-10":] + + with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"): + nonmonotonic[timestamp:] + + with pytest.raises( + KeyError, match="Value based partial slicing on non-monotonic" + ): + nonmonotonic.loc["2014-01-10":] + + with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"): + nonmonotonic.loc[timestamp:] + + def test_loc_datetime_length_one(self): + # GH16071 + df = DataFrame( + columns=["1"], + index=date_range("2016-10-01T00:00:00", "2016-10-01T23:59:59"), + ) + result = df.loc[datetime(2016, 10, 1) :] + tm.assert_frame_equal(result, df) + + result = df.loc["2016-10-01T00:00:00":] + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize( + "start", + [ + "2018-12-02 21:50:00+00:00", + Timestamp("2018-12-02 21:50:00+00:00"), + Timestamp("2018-12-02 21:50:00+00:00").to_pydatetime(), + ], + ) + @pytest.mark.parametrize( + "end", + [ + "2018-12-02 21:52:00+00:00", + Timestamp("2018-12-02 21:52:00+00:00"), + Timestamp("2018-12-02 21:52:00+00:00").to_pydatetime(), + ], + ) + def test_getitem_with_datestring_with_UTC_offset(self, start, end): + # GH 24076 + idx = date_range( + start="2018-12-02 14:50:00-07:00", + end="2018-12-02 14:50:00-07:00", + freq="1min", + ) + df = DataFrame(1, index=idx, columns=["A"]) + result = df[start:end] + expected = df.iloc[0:3, :] + tm.assert_frame_equal(result, expected) + + # GH 16785 + start = str(start) + end = str(end) + with pytest.raises(ValueError, match="Both dates must"): + df[start : end[:-4] + "1:00"] + + with pytest.raises(ValueError, match="The index must be timezone"): + df = df.tz_localize(None) + df[start:end] + + def test_slice_reduce_to_series(self): + # GH 27516 + df = DataFrame({"A": range(24)}, index=date_range("2000", periods=24, freq="M")) + expected = Series( + range(12), index=date_range("2000", periods=12, freq="M"), name="A" + ) + result = df.loc["2000", "A"] + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_pickle.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_pickle.py new file mode 100644 index 00000000..922b4a18 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_pickle.py @@ -0,0 +1,45 @@ +import pytest + +from pandas import ( + NaT, + date_range, + to_datetime, +) +import pandas._testing as tm + + +class TestPickle: + def test_pickle(self): + # GH#4606 + idx = to_datetime(["2013-01-01", NaT, "2014-01-06"]) + idx_p = tm.round_trip_pickle(idx) + assert idx_p[0] == idx[0] + assert idx_p[1] is NaT + assert idx_p[2] == idx[2] + + def test_pickle_dont_infer_freq(self): + # GH#11002 + # don't infer freq + idx = date_range("1750-1-1", "2050-1-1", freq="7D") + idx_p = tm.round_trip_pickle(idx) + tm.assert_index_equal(idx, idx_p) + + def test_pickle_after_set_freq(self): + dti = date_range("20130101", periods=3, tz="US/Eastern", name="foo") + dti = dti._with_freq(None) + + res = tm.round_trip_pickle(dti) + tm.assert_index_equal(res, dti) + + def test_roundtrip_pickle_with_tz(self): + # GH#8367 + # round-trip of timezone + index = date_range("20130101", periods=3, tz="US/Eastern", name="foo") + unpickled = tm.round_trip_pickle(index) + tm.assert_index_equal(index, unpickled) + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_pickle_unpickle(self, freq): + rng = date_range("2009-01-01", "2010-01-01", freq=freq) + unpickled = tm.round_trip_pickle(rng) + assert unpickled.freq == freq diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_reindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_reindex.py new file mode 100644 index 00000000..e4911aa3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_reindex.py @@ -0,0 +1,56 @@ +from datetime import timedelta + +import numpy as np + +from pandas import ( + DatetimeIndex, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeIndexReindex: + def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self): + # GH#7774 + index = date_range("2013-01-01", periods=3, tz="US/Eastern") + assert str(index.reindex([])[0].tz) == "US/Eastern" + assert str(index.reindex(np.array([]))[0].tz) == "US/Eastern" + + def test_reindex_with_same_tz_nearest(self): + # GH#32740 + rng_a = date_range("2010-01-01", "2010-01-02", periods=24, tz="utc") + rng_b = date_range("2010-01-01", "2010-01-02", periods=23, tz="utc") + result1, result2 = rng_a.reindex( + rng_b, method="nearest", tolerance=timedelta(seconds=20) + ) + expected_list1 = [ + "2010-01-01 00:00:00", + "2010-01-01 01:05:27.272727272", + "2010-01-01 02:10:54.545454545", + "2010-01-01 03:16:21.818181818", + "2010-01-01 04:21:49.090909090", + "2010-01-01 05:27:16.363636363", + "2010-01-01 06:32:43.636363636", + "2010-01-01 07:38:10.909090909", + "2010-01-01 08:43:38.181818181", + "2010-01-01 09:49:05.454545454", + "2010-01-01 10:54:32.727272727", + "2010-01-01 12:00:00", + "2010-01-01 13:05:27.272727272", + "2010-01-01 14:10:54.545454545", + "2010-01-01 15:16:21.818181818", + "2010-01-01 16:21:49.090909090", + "2010-01-01 17:27:16.363636363", + "2010-01-01 18:32:43.636363636", + "2010-01-01 19:38:10.909090909", + "2010-01-01 20:43:38.181818181", + "2010-01-01 21:49:05.454545454", + "2010-01-01 22:54:32.727272727", + "2010-01-02 00:00:00", + ] + expected1 = DatetimeIndex( + expected_list1, dtype="datetime64[ns, UTC]", freq=None + ) + expected2 = np.array([0] + [-1] * 21 + [23], dtype=np.dtype("intp")) + tm.assert_index_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py new file mode 100644 index 00000000..f07a9dce --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -0,0 +1,347 @@ +""" +Tests for DatetimeIndex methods behaving like their Timestamp counterparts +""" +from datetime import datetime + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + to_offset, +) +from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG + +import pandas as pd +from pandas import ( + DatetimeIndex, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeIndexOps: + def test_dti_time(self): + rng = date_range("1/1/2000", freq="12min", periods=10) + result = pd.Index(rng).time + expected = [t.time() for t in rng] + assert (result == expected).all() + + def test_dti_date(self): + rng = date_range("1/1/2000", freq="12H", periods=10) + result = pd.Index(rng).date + expected = [t.date() for t in rng] + assert (result == expected).all() + + @pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]]) + def test_dti_date_out_of_range(self, data): + # GH#1475 + msg = ( + "^Out of bounds nanosecond timestamp: " + "1400-01-01( 00:00:00)?, at position 0$" + ) + with pytest.raises(OutOfBoundsDatetime, match=msg): + DatetimeIndex(data) + + @pytest.mark.parametrize( + "field", + [ + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "days_in_month", + "is_month_start", + "is_month_end", + "is_quarter_start", + "is_quarter_end", + "is_year_start", + "is_year_end", + ], + ) + def test_dti_timestamp_fields(self, field): + # extra fields from DatetimeIndex like quarter and week + idx = tm.makeDateIndex(100) + expected = getattr(idx, field)[-1] + + result = getattr(Timestamp(idx[-1]), field) + assert result == expected + + def test_dti_timestamp_isocalendar_fields(self): + idx = tm.makeDateIndex(100) + expected = tuple(idx.isocalendar().iloc[-1].to_list()) + result = idx[-1].isocalendar() + assert result == expected + + # ---------------------------------------------------------------- + # DatetimeIndex.round + + def test_round_daily(self): + dti = date_range("20130101 09:10:11", periods=5) + result = dti.round("D") + expected = date_range("20130101", periods=5) + tm.assert_index_equal(result, expected) + + dti = dti.tz_localize("UTC").tz_convert("US/Eastern") + result = dti.round("D") + expected = date_range("20130101", periods=5).tz_localize("US/Eastern") + tm.assert_index_equal(result, expected) + + result = dti.round("s") + tm.assert_index_equal(result, dti) + + @pytest.mark.parametrize( + "freq, error_msg", + [ + ("Y", " is a non-fixed frequency"), + ("M", " is a non-fixed frequency"), + ("foobar", "Invalid frequency: foobar"), + ], + ) + def test_round_invalid(self, freq, error_msg): + dti = date_range("20130101 09:10:11", periods=5) + dti = dti.tz_localize("UTC").tz_convert("US/Eastern") + with pytest.raises(ValueError, match=error_msg): + dti.round(freq) + + def test_round(self, tz_naive_fixture): + tz = tz_naive_fixture + rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz) + elt = rng[1] + + expected_rng = DatetimeIndex( + [ + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 01:00:00", tz=tz), + Timestamp("2016-01-01 02:00:00", tz=tz), + Timestamp("2016-01-01 02:00:00", tz=tz), + ] + ) + expected_elt = expected_rng[1] + + tm.assert_index_equal(rng.round(freq="H"), expected_rng) + assert elt.round(freq="H") == expected_elt + + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + rng.round(freq="foo") + with pytest.raises(ValueError, match=msg): + elt.round(freq="foo") + + msg = " is a non-fixed frequency" + with pytest.raises(ValueError, match=msg): + rng.round(freq="M") + with pytest.raises(ValueError, match=msg): + elt.round(freq="M") + + # GH#14440 & GH#15578 + index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz) + result = index.round("ms") + expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz) + tm.assert_index_equal(result, expected) + + for freq in ["us", "ns"]: + tm.assert_index_equal(index, index.round(freq)) + + index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz) + result = index.round("ms") + expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz) + tm.assert_index_equal(result, expected) + + index = DatetimeIndex(["2016-10-17 12:00:00.001501031"]) + result = index.round("10ns") + expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"]) + tm.assert_index_equal(result, expected) + + with tm.assert_produces_warning(False): + ts = "2016-10-17 12:00:00.001501031" + DatetimeIndex([ts]).round("1010ns") + + def test_no_rounding_occurs(self, tz_naive_fixture): + # GH 21262 + tz = tz_naive_fixture + rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz) + + expected_rng = DatetimeIndex( + [ + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 00:02:00", tz=tz), + Timestamp("2016-01-01 00:04:00", tz=tz), + Timestamp("2016-01-01 00:06:00", tz=tz), + Timestamp("2016-01-01 00:08:00", tz=tz), + ] + ) + + tm.assert_index_equal(rng.round(freq="2T"), expected_rng) + + @pytest.mark.parametrize( + "test_input, rounder, freq, expected", + [ + (["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]), + (["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]), + ( + ["2117-01-01 00:00:45.000000012"], + "floor", + "10ns", + ["2117-01-01 00:00:45.000000010"], + ), + ( + ["1823-01-01 00:00:01.000000012"], + "ceil", + "10ns", + ["1823-01-01 00:00:01.000000020"], + ), + (["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]), + (["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]), + (["2018-01-01 00:15:00"], "ceil", "15T", ["2018-01-01 00:15:00"]), + (["2018-01-01 00:15:00"], "floor", "15T", ["2018-01-01 00:15:00"]), + (["1823-01-01 03:00:00"], "ceil", "3H", ["1823-01-01 03:00:00"]), + (["1823-01-01 03:00:00"], "floor", "3H", ["1823-01-01 03:00:00"]), + ( + ("NaT", "1823-01-01 00:00:01"), + "floor", + "1s", + ("NaT", "1823-01-01 00:00:01"), + ), + ( + ("NaT", "1823-01-01 00:00:01"), + "ceil", + "1s", + ("NaT", "1823-01-01 00:00:01"), + ), + ], + ) + def test_ceil_floor_edge(self, test_input, rounder, freq, expected): + dt = DatetimeIndex(list(test_input)) + func = getattr(dt, rounder) + result = func(freq) + expected = DatetimeIndex(list(expected)) + assert expected.equals(result) + + @pytest.mark.parametrize( + "start, index_freq, periods", + [("2018-01-01", "12H", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)], + ) + @pytest.mark.parametrize( + "round_freq", + [ + "2ns", + "3ns", + "4ns", + "5ns", + "6ns", + "7ns", + "250ns", + "500ns", + "750ns", + "1us", + "19us", + "250us", + "500us", + "750us", + "1s", + "2s", + "3s", + "12H", + "1D", + ], + ) + def test_round_int64(self, start, index_freq, periods, round_freq): + dt = date_range(start=start, freq=index_freq, periods=periods) + unit = to_offset(round_freq).nanos + + # test floor + result = dt.floor(round_freq) + diff = dt.asi8 - result.asi8 + mod = result.asi8 % unit + assert (mod == 0).all(), f"floor not a {round_freq} multiple" + assert (0 <= diff).all() and (diff < unit).all(), "floor error" + + # test ceil + result = dt.ceil(round_freq) + diff = result.asi8 - dt.asi8 + mod = result.asi8 % unit + assert (mod == 0).all(), f"ceil not a {round_freq} multiple" + assert (0 <= diff).all() and (diff < unit).all(), "ceil error" + + # test round + result = dt.round(round_freq) + diff = abs(result.asi8 - dt.asi8) + mod = result.asi8 % unit + assert (mod == 0).all(), f"round not a {round_freq} multiple" + assert (diff <= unit // 2).all(), "round error" + if unit % 2 == 0: + assert ( + result.asi8[diff == unit // 2] % 2 == 0 + ).all(), "round half to even error" + + # ---------------------------------------------------------------- + # DatetimeIndex.normalize + + def test_normalize(self): + rng = date_range("1/1/2000 9:30", periods=10, freq="D") + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D") + tm.assert_index_equal(result, expected) + + arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype( + "datetime64[ns]" + ) + rng_ns = DatetimeIndex(arr_ns) + rng_ns_normalized = rng_ns.normalize() + + arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype( + "datetime64[ns]" + ) + expected = DatetimeIndex(arr_ns) + tm.assert_index_equal(rng_ns_normalized, expected) + + assert result.is_normalized + assert not rng.is_normalized + + def test_normalize_nat(self): + dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")]) + result = dti.normalize() + expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")]) + tm.assert_index_equal(result, expected) + + +class TestDateTimeIndexToJulianDate: + def test_1700(self): + dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D") + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_2000(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D") + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_hour(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="H") + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_minute(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="T") + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_second(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="S") + r1 = pd.Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, pd.Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_setops.py new file mode 100644 index 00000000..adf7acfa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_setops.py @@ -0,0 +1,604 @@ +from datetime import datetime + +import numpy as np +import pytest +import pytz + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Series, + bdate_range, + date_range, +) +import pandas._testing as tm + +from pandas.tseries.offsets import ( + BMonthEnd, + Minute, + MonthEnd, +) + +START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) + + +class TestDatetimeIndexSetOps: + tz = [ + None, + "UTC", + "Asia/Tokyo", + "US/Eastern", + "dateutil/Asia/Singapore", + "dateutil/US/Pacific", + ] + + # TODO: moved from test_datetimelike; dedup with version below + def test_union2(self, sort): + everything = tm.makeDateIndex(10) + first = everything[:5] + second = everything[5:] + union = first.union(second, sort=sort) + tm.assert_index_equal(union, everything) + + @pytest.mark.parametrize("box", [np.array, Series, list]) + def test_union3(self, sort, box): + everything = tm.makeDateIndex(10) + first = everything[:5] + second = everything[5:] + + # GH 10149 support listlike inputs other than Index objects + expected = first.union(second, sort=sort) + case = box(second.values) + result = first.union(case, sort=sort) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("tz", tz) + def test_union(self, tz, sort): + rng1 = date_range("1/1/2000", freq="D", periods=5, tz=tz) + other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz) + expected1 = date_range("1/1/2000", freq="D", periods=10, tz=tz) + expected1_notsorted = DatetimeIndex(list(other1) + list(rng1)) + + rng2 = date_range("1/1/2000", freq="D", periods=5, tz=tz) + other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz) + expected2 = date_range("1/1/2000", freq="D", periods=8, tz=tz) + expected2_notsorted = DatetimeIndex(list(other2) + list(rng2[:3])) + + rng3 = date_range("1/1/2000", freq="D", periods=5, tz=tz) + other3 = DatetimeIndex([], tz=tz) + expected3 = date_range("1/1/2000", freq="D", periods=5, tz=tz) + expected3_notsorted = rng3 + + for rng, other, exp, exp_notsorted in [ + (rng1, other1, expected1, expected1_notsorted), + (rng2, other2, expected2, expected2_notsorted), + (rng3, other3, expected3, expected3_notsorted), + ]: + result_union = rng.union(other, sort=sort) + tm.assert_index_equal(result_union, exp) + + result_union = other.union(rng, sort=sort) + if sort is None: + tm.assert_index_equal(result_union, exp) + else: + tm.assert_index_equal(result_union, exp_notsorted) + + def test_union_coverage(self, sort): + idx = DatetimeIndex(["2000-01-03", "2000-01-01", "2000-01-02"]) + ordered = DatetimeIndex(idx.sort_values(), freq="infer") + result = ordered.union(idx, sort=sort) + tm.assert_index_equal(result, ordered) + + result = ordered[:0].union(ordered, sort=sort) + tm.assert_index_equal(result, ordered) + assert result.freq == ordered.freq + + def test_union_bug_1730(self, sort): + rng_a = date_range("1/1/2012", periods=4, freq="3H") + rng_b = date_range("1/1/2012", periods=4, freq="4H") + + result = rng_a.union(rng_b, sort=sort) + exp = list(rng_a) + list(rng_b[1:]) + if sort is None: + exp = DatetimeIndex(sorted(exp)) + else: + exp = DatetimeIndex(exp) + tm.assert_index_equal(result, exp) + + def test_union_bug_1745(self, sort): + left = DatetimeIndex(["2012-05-11 15:19:49.695000"]) + right = DatetimeIndex( + [ + "2012-05-29 13:04:21.322000", + "2012-05-11 15:27:24.873000", + "2012-05-11 15:31:05.350000", + ] + ) + + result = left.union(right, sort=sort) + exp = DatetimeIndex( + [ + "2012-05-11 15:19:49.695000", + "2012-05-29 13:04:21.322000", + "2012-05-11 15:27:24.873000", + "2012-05-11 15:31:05.350000", + ] + ) + if sort is None: + exp = exp.sort_values() + tm.assert_index_equal(result, exp) + + def test_union_bug_4564(self, sort): + from pandas import DateOffset + + left = date_range("2013-01-01", "2013-02-01") + right = left + DateOffset(minutes=15) + + result = left.union(right, sort=sort) + exp = list(left) + list(right) + if sort is None: + exp = DatetimeIndex(sorted(exp)) + else: + exp = DatetimeIndex(exp) + tm.assert_index_equal(result, exp) + + def test_union_freq_both_none(self, sort): + # GH11086 + expected = bdate_range("20150101", periods=10) + expected._data.freq = None + + result = expected.union(expected, sort=sort) + tm.assert_index_equal(result, expected) + assert result.freq is None + + def test_union_freq_infer(self): + # When taking the union of two DatetimeIndexes, we infer + # a freq even if the arguments don't have freq. This matches + # TimedeltaIndex behavior. + dti = date_range("2016-01-01", periods=5) + left = dti[[0, 1, 3, 4]] + right = dti[[2, 3, 1]] + + assert left.freq is None + assert right.freq is None + + result = left.union(right) + tm.assert_index_equal(result, dti) + assert result.freq == "D" + + def test_union_dataframe_index(self): + rng1 = date_range("1/1/1999", "1/1/2012", freq="MS") + s1 = Series(np.random.default_rng(2).standard_normal(len(rng1)), rng1) + + rng2 = date_range("1/1/1980", "12/1/2001", freq="MS") + s2 = Series(np.random.default_rng(2).standard_normal(len(rng2)), rng2) + df = DataFrame({"s1": s1, "s2": s2}) + + exp = date_range("1/1/1980", "1/1/2012", freq="MS") + tm.assert_index_equal(df.index, exp) + + def test_union_with_DatetimeIndex(self, sort): + i1 = Index(np.arange(0, 20, 2, dtype=np.int64)) + i2 = date_range(start="2012-01-03 00:00:00", periods=10, freq="D") + # Works + i1.union(i2, sort=sort) + # Fails with "AttributeError: can't set attribute" + i2.union(i1, sort=sort) + + # TODO: moved from test_datetimelike; de-duplicate with version below + def test_intersection2(self): + first = tm.makeDateIndex(10) + second = first[5:] + intersect = first.intersection(second) + assert tm.equalContents(intersect, second) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.intersection(case) + assert tm.equalContents(result, second) + + third = Index(["a", "b", "c"]) + result = first.intersection(third) + expected = Index([], dtype=object) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "tz", [None, "Asia/Tokyo", "US/Eastern", "dateutil/US/Pacific"] + ) + def test_intersection(self, tz, sort): + # GH 4690 (with tz) + base = date_range("6/1/2000", "6/30/2000", freq="D", name="idx") + + # if target has the same name, it is preserved + rng2 = date_range("5/15/2000", "6/20/2000", freq="D", name="idx") + expected2 = date_range("6/1/2000", "6/20/2000", freq="D", name="idx") + + # if target name is different, it will be reset + rng3 = date_range("5/15/2000", "6/20/2000", freq="D", name="other") + expected3 = date_range("6/1/2000", "6/20/2000", freq="D", name=None) + + rng4 = date_range("7/1/2000", "7/31/2000", freq="D", name="idx") + expected4 = DatetimeIndex([], freq="D", name="idx") + + for rng, expected in [ + (rng2, expected2), + (rng3, expected3), + (rng4, expected4), + ]: + result = base.intersection(rng) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + # non-monotonic + base = DatetimeIndex( + ["2011-01-05", "2011-01-04", "2011-01-02", "2011-01-03"], tz=tz, name="idx" + ) + + rng2 = DatetimeIndex( + ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], tz=tz, name="idx" + ) + expected2 = DatetimeIndex(["2011-01-04", "2011-01-02"], tz=tz, name="idx") + + rng3 = DatetimeIndex( + ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], + tz=tz, + name="other", + ) + expected3 = DatetimeIndex(["2011-01-04", "2011-01-02"], tz=tz, name=None) + + # GH 7880 + rng4 = date_range("7/1/2000", "7/31/2000", freq="D", tz=tz, name="idx") + expected4 = DatetimeIndex([], tz=tz, name="idx") + assert expected4.freq is None + + for rng, expected in [ + (rng2, expected2), + (rng3, expected3), + (rng4, expected4), + ]: + result = base.intersection(rng, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + # parametrize over both anchored and non-anchored freqs, as they + # have different code paths + @pytest.mark.parametrize("freq", ["T", "B"]) + def test_intersection_empty(self, tz_aware_fixture, freq): + # empty same freq GH2129 + tz = tz_aware_fixture + rng = date_range("6/1/2000", "6/15/2000", freq=freq, tz=tz) + result = rng[0:0].intersection(rng) + assert len(result) == 0 + assert result.freq == rng.freq + + result = rng.intersection(rng[0:0]) + assert len(result) == 0 + assert result.freq == rng.freq + + # no overlap GH#33604 + check_freq = freq != "T" # We don't preserve freq on non-anchored offsets + result = rng[:3].intersection(rng[-3:]) + tm.assert_index_equal(result, rng[:0]) + if check_freq: + # We don't preserve freq on non-anchored offsets + assert result.freq == rng.freq + + # swapped left and right + result = rng[-3:].intersection(rng[:3]) + tm.assert_index_equal(result, rng[:0]) + if check_freq: + # We don't preserve freq on non-anchored offsets + assert result.freq == rng.freq + + def test_intersection_bug_1708(self): + from pandas import DateOffset + + index_1 = date_range("1/1/2012", periods=4, freq="12H") + index_2 = index_1 + DateOffset(hours=1) + + result = index_1.intersection(index_2) + assert len(result) == 0 + + @pytest.mark.parametrize("tz", tz) + def test_difference(self, tz, sort): + rng_dates = ["1/2/2000", "1/3/2000", "1/1/2000", "1/4/2000", "1/5/2000"] + + rng1 = DatetimeIndex(rng_dates, tz=tz) + other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz) + expected1 = DatetimeIndex(rng_dates, tz=tz) + + rng2 = DatetimeIndex(rng_dates, tz=tz) + other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz) + expected2 = DatetimeIndex(rng_dates[:3], tz=tz) + + rng3 = DatetimeIndex(rng_dates, tz=tz) + other3 = DatetimeIndex([], tz=tz) + expected3 = DatetimeIndex(rng_dates, tz=tz) + + for rng, other, expected in [ + (rng1, other1, expected1), + (rng2, other2, expected2), + (rng3, other3, expected3), + ]: + result_diff = rng.difference(other, sort) + if sort is None and len(other): + # We dont sort (yet?) when empty GH#24959 + expected = expected.sort_values() + tm.assert_index_equal(result_diff, expected) + + def test_difference_freq(self, sort): + # GH14323: difference of DatetimeIndex should not preserve frequency + + index = date_range("20160920", "20160925", freq="D") + other = date_range("20160921", "20160924", freq="D") + expected = DatetimeIndex(["20160920", "20160925"], freq=None) + idx_diff = index.difference(other, sort) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = date_range("20160922", "20160925", freq="D") + idx_diff = index.difference(other, sort) + expected = DatetimeIndex(["20160920", "20160921"], freq=None) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + def test_datetimeindex_diff(self, sort): + dti1 = date_range(freq="Q-JAN", start=datetime(1997, 12, 31), periods=100) + dti2 = date_range(freq="Q-JAN", start=datetime(1997, 12, 31), periods=98) + assert len(dti1.difference(dti2, sort)) == 2 + + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Eastern"]) + def test_setops_preserve_freq(self, tz): + rng = date_range("1/1/2000", "1/1/2002", name="idx", tz=tz) + + result = rng[:50].union(rng[50:100]) + assert result.name == rng.name + assert result.freq == rng.freq + assert result.tz == rng.tz + + result = rng[:50].union(rng[30:100]) + assert result.name == rng.name + assert result.freq == rng.freq + assert result.tz == rng.tz + + result = rng[:50].union(rng[60:100]) + assert result.name == rng.name + assert result.freq is None + assert result.tz == rng.tz + + result = rng[:50].intersection(rng[25:75]) + assert result.name == rng.name + assert result.freqstr == "D" + assert result.tz == rng.tz + + nofreq = DatetimeIndex(list(rng[25:75]), name="other") + result = rng[:50].union(nofreq) + assert result.name is None + assert result.freq == rng.freq + assert result.tz == rng.tz + + result = rng[:50].intersection(nofreq) + assert result.name is None + assert result.freq == rng.freq + assert result.tz == rng.tz + + def test_intersection_non_tick_no_fastpath(self): + # GH#42104 + dti = DatetimeIndex( + [ + "2018-12-31", + "2019-03-31", + "2019-06-30", + "2019-09-30", + "2019-12-31", + "2020-03-31", + ], + freq="Q-DEC", + ) + result = dti[::2].intersection(dti[1::2]) + expected = dti[:0] + tm.assert_index_equal(result, expected) + + +class TestBusinessDatetimeIndex: + def test_union(self, sort): + rng = bdate_range(START, END) + # overlapping + left = rng[:10] + right = rng[5:10] + + the_union = left.union(right, sort=sort) + assert isinstance(the_union, DatetimeIndex) + + # non-overlapping, gap in middle + left = rng[:5] + right = rng[10:] + + the_union = left.union(right, sort=sort) + assert isinstance(the_union, Index) + + # non-overlapping, no gap + left = rng[:5] + right = rng[5:10] + + the_union = left.union(right, sort=sort) + assert isinstance(the_union, DatetimeIndex) + + # order does not matter + if sort is None: + tm.assert_index_equal(right.union(left, sort=sort), the_union) + else: + expected = DatetimeIndex(list(right) + list(left)) + tm.assert_index_equal(right.union(left, sort=sort), expected) + + # overlapping, but different offset + rng = date_range(START, END, freq=BMonthEnd()) + + the_union = rng.union(rng, sort=sort) + assert isinstance(the_union, DatetimeIndex) + + def test_union_not_cacheable(self, sort): + rng = date_range("1/1/2000", periods=50, freq=Minute()) + rng1 = rng[10:] + rng2 = rng[:25] + the_union = rng1.union(rng2, sort=sort) + if sort is None: + tm.assert_index_equal(the_union, rng) + else: + expected = DatetimeIndex(list(rng[10:]) + list(rng[:10])) + tm.assert_index_equal(the_union, expected) + + rng1 = rng[10:] + rng2 = rng[15:35] + the_union = rng1.union(rng2, sort=sort) + expected = rng[10:] + tm.assert_index_equal(the_union, expected) + + def test_intersection(self): + rng = date_range("1/1/2000", periods=50, freq=Minute()) + rng1 = rng[10:] + rng2 = rng[:25] + the_int = rng1.intersection(rng2) + expected = rng[10:25] + tm.assert_index_equal(the_int, expected) + assert isinstance(the_int, DatetimeIndex) + assert the_int.freq == rng.freq + + the_int = rng1.intersection(rng2.view(DatetimeIndex)) + tm.assert_index_equal(the_int, expected) + + # non-overlapping + the_int = rng[:10].intersection(rng[10:]) + expected = DatetimeIndex([]) + tm.assert_index_equal(the_int, expected) + + def test_intersection_bug(self): + # GH #771 + a = bdate_range("11/30/2011", "12/31/2011") + b = bdate_range("12/10/2011", "12/20/2011") + result = a.intersection(b) + tm.assert_index_equal(result, b) + assert result.freq == b.freq + + def test_intersection_list(self): + # GH#35876 + # values is not an Index -> no name -> retain "a" + values = [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")] + idx = DatetimeIndex(values, name="a") + res = idx.intersection(values) + tm.assert_index_equal(res, idx) + + def test_month_range_union_tz_pytz(self, sort): + from pytz import timezone + + tz = timezone("US/Eastern") + + early_start = datetime(2011, 1, 1) + early_end = datetime(2011, 3, 1) + + late_start = datetime(2011, 3, 1) + late_end = datetime(2011, 5, 1) + + early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=MonthEnd()) + late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd()) + + early_dr.union(late_dr, sort=sort) + + @td.skip_if_windows + def test_month_range_union_tz_dateutil(self, sort): + from pandas._libs.tslibs.timezones import dateutil_gettz + + tz = dateutil_gettz("US/Eastern") + + early_start = datetime(2011, 1, 1) + early_end = datetime(2011, 3, 1) + + late_start = datetime(2011, 3, 1) + late_end = datetime(2011, 5, 1) + + early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=MonthEnd()) + late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd()) + + early_dr.union(late_dr, sort=sort) + + @pytest.mark.parametrize("sort", [False, None]) + def test_intersection_duplicates(self, sort): + # GH#38196 + idx1 = Index( + [ + pd.Timestamp("2019-12-13"), + pd.Timestamp("2019-12-12"), + pd.Timestamp("2019-12-12"), + ] + ) + result = idx1.intersection(idx1, sort=sort) + expected = Index([pd.Timestamp("2019-12-13"), pd.Timestamp("2019-12-12")]) + tm.assert_index_equal(result, expected) + + +class TestCustomDatetimeIndex: + def test_union(self, sort): + # overlapping + rng = bdate_range(START, END, freq="C") + left = rng[:10] + right = rng[5:10] + + the_union = left.union(right, sort=sort) + assert isinstance(the_union, DatetimeIndex) + + # non-overlapping, gap in middle + left = rng[:5] + right = rng[10:] + + the_union = left.union(right, sort) + assert isinstance(the_union, Index) + + # non-overlapping, no gap + left = rng[:5] + right = rng[5:10] + + the_union = left.union(right, sort=sort) + assert isinstance(the_union, DatetimeIndex) + + # order does not matter + if sort is None: + tm.assert_index_equal(right.union(left, sort=sort), the_union) + + # overlapping, but different offset + rng = date_range(START, END, freq=BMonthEnd()) + + the_union = rng.union(rng, sort=sort) + assert isinstance(the_union, DatetimeIndex) + + def test_intersection_bug(self): + # GH #771 + a = bdate_range("11/30/2011", "12/31/2011", freq="C") + b = bdate_range("12/10/2011", "12/20/2011", freq="C") + result = a.intersection(b) + tm.assert_index_equal(result, b) + assert result.freq == b.freq + + @pytest.mark.parametrize( + "tz", [None, "UTC", "Europe/Berlin", pytz.FixedOffset(-60)] + ) + def test_intersection_dst_transition(self, tz): + # GH 46702: Europe/Berlin has DST transition + idx1 = date_range("2020-03-27", periods=5, freq="D", tz=tz) + idx2 = date_range("2020-03-30", periods=5, freq="D", tz=tz) + result = idx1.intersection(idx2) + expected = date_range("2020-03-30", periods=2, freq="D", tz=tz) + tm.assert_index_equal(result, expected) + + # GH#45863 same problem for union + index1 = date_range("2021-10-28", periods=3, freq="D", tz="Europe/London") + index2 = date_range("2021-10-30", periods=4, freq="D", tz="Europe/London") + result = index1.union(index2) + expected = date_range("2021-10-28", periods=6, freq="D", tz="Europe/London") + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_timezones.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_timezones.py new file mode 100644 index 00000000..09b06ecd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_timezones.py @@ -0,0 +1,1214 @@ +""" +Tests for DatetimeIndex timezone-related methods +""" +from datetime import ( + date, + datetime, + time, + timedelta, + timezone, + tzinfo, +) + +import dateutil +from dateutil.tz import ( + gettz, + tzlocal, +) +import numpy as np +import pytest +import pytz + +try: + from zoneinfo import ZoneInfo +except ImportError: + # Cannot assign to a type [misc] + ZoneInfo = None # type: ignore[misc, assignment] + +from pandas._libs.tslibs import ( + conversion, + timezones, +) +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + Timestamp, + bdate_range, + date_range, + isna, + to_datetime, +) +import pandas._testing as tm + + +class FixedOffset(tzinfo): + """Fixed offset in minutes east from UTC.""" + + def __init__(self, offset, name) -> None: + self.__offset = timedelta(minutes=offset) + self.__name = name + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return self.__name + + def dst(self, dt): + return timedelta(0) + + +fixed_off = FixedOffset(-420, "-07:00") +fixed_off_no_name = FixedOffset(-330, None) + + +class TestDatetimeIndexTimezones: + # ------------------------------------------------------------- + # DatetimeIndex.tz_convert + def test_tz_convert_nat(self): + # GH#5546 + dates = [pd.NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize("US/Pacific") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) + idx = idx.tz_convert("US/Eastern") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern")) + idx = idx.tz_convert("UTC") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC")) + + dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize("US/Pacific") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) + idx = idx.tz_convert("US/Eastern") + expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) + + idx = idx + pd.offsets.Hour(5) + expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) + idx = idx.tz_convert("US/Pacific") + expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) + + idx = idx + np.timedelta64(3, "h") + expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) + + idx = idx.tz_convert("US/Eastern") + expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_tz_convert_compat_timestamp(self, prefix): + strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] + idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern") + + conv = idx[0].tz_convert(prefix + "US/Pacific") + expected = idx.tz_convert(prefix + "US/Pacific")[0] + + assert conv == expected + + def test_dti_tz_convert_hour_overflow_dst(self): + # Regression test for: + # https://github.com/pandas-dev/pandas/issues/13306 + + # sorted case US/Eastern -> UTC + ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"] + tt = DatetimeIndex(ts).tz_localize("US/Eastern") + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # sorted case UTC -> US/Eastern + ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"] + tt = DatetimeIndex(ts).tz_localize("UTC") + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case US/Eastern -> UTC + ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"] + tt = DatetimeIndex(ts).tz_localize("US/Eastern") + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case UTC -> US/Eastern + ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"] + tt = DatetimeIndex(ts).tz_localize("UTC") + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz): + # Regression test for GH#13306 + + # sorted case US/Eastern -> UTC + ts = [ + Timestamp("2008-05-12 09:50:00", tz=tz), + Timestamp("2008-12-12 09:50:35", tz=tz), + Timestamp("2009-05-12 09:50:32", tz=tz), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # sorted case UTC -> US/Eastern + ts = [ + Timestamp("2008-05-12 13:50:00", tz="UTC"), + Timestamp("2008-12-12 14:50:35", tz="UTC"), + Timestamp("2009-05-12 13:50:32", tz="UTC"), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case US/Eastern -> UTC + ts = [ + Timestamp("2008-05-12 09:50:00", tz=tz), + Timestamp("2008-12-12 09:50:35", tz=tz), + Timestamp("2008-05-12 09:50:32", tz=tz), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case UTC -> US/Eastern + ts = [ + Timestamp("2008-05-12 13:50:00", tz="UTC"), + Timestamp("2008-12-12 14:50:35", tz="UTC"), + Timestamp("2008-05-12 13:50:32", tz="UTC"), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + @pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)]) + def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n): + # Regression test for tslib.tz_convert(vals, tz1, tz2). + # See https://github.com/pandas-dev/pandas/issues/4496 for details. + idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq) + idx = idx.tz_localize("UTC") + idx = idx.tz_convert("Europe/Moscow") + + expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + def test_dti_tz_convert_dst(self): + for freq, n in [("H", 1), ("T", 60), ("S", 3600)]: + # Start DST + idx = date_range( + "2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC" + ) + idx = idx.tz_convert("US/Eastern") + expected = np.repeat( + np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]), + np.array([n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + idx = date_range( + "2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + expected = np.repeat( + np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + np.array([n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + # End DST + idx = date_range( + "2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC" + ) + idx = idx.tz_convert("US/Eastern") + expected = np.repeat( + np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]), + np.array([n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + idx = date_range( + "2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + expected = np.repeat( + np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + # daily + # Start DST + idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC") + idx = idx.tz_convert("US/Eastern") + tm.assert_index_equal(idx.hour, Index([19, 19], dtype=np.int32)) + + idx = date_range( + "2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + tm.assert_index_equal(idx.hour, Index([5, 5], dtype=np.int32)) + + # End DST + idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC") + idx = idx.tz_convert("US/Eastern") + tm.assert_index_equal(idx.hour, Index([20, 20], dtype=np.int32)) + + idx = date_range( + "2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + tm.assert_index_equal(idx.hour, Index([4, 4], dtype=np.int32)) + + def test_tz_convert_roundtrip(self, tz_aware_fixture): + tz = tz_aware_fixture + idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC") + exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M") + + idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC") + exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D") + + idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC") + exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H") + + idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC") + exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T") + + for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]: + converted = idx.tz_convert(tz) + reset = converted.tz_convert(None) + tm.assert_index_equal(reset, expected) + assert reset.tzinfo is None + expected = converted.tz_convert("UTC").tz_localize(None) + expected = expected._with_freq("infer") + tm.assert_index_equal(reset, expected) + + def test_dti_tz_convert_tzlocal(self): + # GH#13583 + # tz_convert doesn't affect to internal + dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC") + dti2 = dti.tz_convert(dateutil.tz.tzlocal()) + tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) + + dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) + dti2 = dti.tz_convert(None) + tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) + + @pytest.mark.parametrize( + "tz", + [ + "US/Eastern", + "dateutil/US/Eastern", + pytz.timezone("US/Eastern"), + gettz("US/Eastern"), + ], + ) + def test_dti_tz_convert_utc_to_local_no_modify(self, tz): + rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc") + rng_eastern = rng.tz_convert(tz) + + # Values are unmodified + tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) + + assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz)) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_tz_convert_unsorted(self, tzstr): + dr = date_range("2012-03-09", freq="H", periods=100, tz="utc") + dr = dr.tz_convert(tzstr) + + result = dr[::-1].hour + exp = dr.hour[::-1] + tm.assert_almost_equal(result, exp) + + # ------------------------------------------------------------- + # DatetimeIndex.tz_localize + + def test_tz_localize_utc_copies(self, utc_fixture): + # GH#46460 + times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] + index = DatetimeIndex(times) + + res = index.tz_localize(utc_fixture) + assert not tm.shares_memory(res, index) + + res2 = index._data.tz_localize(utc_fixture) + assert not tm.shares_memory(index._data, res2) + + def test_dti_tz_localize_nonexistent_raise_coerce(self): + # GH#13057 + times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] + index = DatetimeIndex(times) + tz = "US/Eastern" + with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): + index.tz_localize(tz=tz) + + with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): + index.tz_localize(tz=tz, nonexistent="raise") + + result = index.tz_localize(tz=tz, nonexistent="NaT") + test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"] + dti = to_datetime(test_times, utc=True) + expected = dti.tz_convert("US/Eastern") + tm.assert_index_equal(result, expected) + + easts = [pytz.timezone("US/Eastern"), gettz("US/Eastern")] + if ZoneInfo is not None: + try: + tz = ZoneInfo("US/Eastern") + except KeyError: + # no tzdata + pass + else: + easts.append(tz) + + @pytest.mark.parametrize("tz", easts) + def test_dti_tz_localize_ambiguous_infer(self, tz): + # November 6, 2011, fall back, repeat 2 AM hour + # With no repeated hours, we cannot infer the transition + dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour()) + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + dr.tz_localize(tz) + + # With repeated hours, we can infer the transition + dr = date_range( + datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz + ) + times = [ + "11/06/2011 00:00", + "11/06/2011 01:00", + "11/06/2011 01:00", + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + di = DatetimeIndex(times) + localized = di.tz_localize(tz, ambiguous="infer") + expected = dr._with_freq(None) + tm.assert_index_equal(expected, localized) + tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer")) + + # When there is no dst transition, nothing special happens + dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) + localized = dr.tz_localize(tz) + localized_infer = dr.tz_localize(tz, ambiguous="infer") + tm.assert_index_equal(localized, localized_infer) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_tz_localize_ambiguous_times(self, tz): + # March 13, 2011, spring forward, skip from 2 AM to 3 AM + dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour()) + with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"): + dr.tz_localize(tz) + + # after dst transition, it works + dr = date_range( + datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz + ) + + # November 6, 2011, fall back, repeat 2 AM hour + dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour()) + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + dr.tz_localize(tz) + + # UTC is OK + dr = date_range( + datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc + ) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_tz_localize_pass_dates_to_utc(self, tzstr): + strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] + + idx = DatetimeIndex(strdates) + conv = idx.tz_localize(tzstr) + + fromdates = DatetimeIndex(strdates, tz=tzstr) + + assert conv.tz == fromdates.tz + tm.assert_numpy_array_equal(conv.values, fromdates.values) + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_tz_localize(self, prefix): + tzstr = prefix + "US/Eastern" + dti = date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L") + dti2 = dti.tz_localize(tzstr) + + dti_utc = date_range( + start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc" + ) + + tm.assert_numpy_array_equal(dti2.values, dti_utc.values) + + dti3 = dti2.tz_convert(prefix + "US/Pacific") + tm.assert_numpy_array_equal(dti3.values, dti_utc.values) + + dti = date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L") + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + dti.tz_localize(tzstr) + + dti = date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L") + with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"): + dti.tz_localize(tzstr) + + @pytest.mark.parametrize( + "tz", + [ + "US/Eastern", + "dateutil/US/Eastern", + pytz.timezone("US/Eastern"), + gettz("US/Eastern"), + ], + ) + def test_dti_tz_localize_utc_conversion(self, tz): + # Localizing to time zone should: + # 1) check for DST ambiguities + # 2) convert to UTC + + rng = date_range("3/10/2012", "3/11/2012", freq="30T") + + converted = rng.tz_localize(tz) + expected_naive = rng + pd.offsets.Hour(5) + tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) + + # DST ambiguity, this should fail + rng = date_range("3/11/2012", "3/12/2012", freq="30T") + # Is this really how it should fail?? + with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"): + rng.tz_localize(tz) + + def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): + # note: this tz tests that a tz-naive index can be localized + # and de-localized successfully, when there are no DST transitions + # in the range. + idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T") + tz = tz_aware_fixture + localized = idx.tz_localize(tz) + # can't localize a tz-aware object + with pytest.raises( + TypeError, match="Already tz-aware, use tz_convert to convert" + ): + localized.tz_localize(tz) + reset = localized.tz_localize(None) + assert reset.tzinfo is None + expected = idx._with_freq(None) + tm.assert_index_equal(reset, expected) + + def test_dti_tz_localize_naive(self): + rng = date_range("1/1/2011", periods=100, freq="H") + + conv = rng.tz_localize("US/Pacific") + exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific") + + tm.assert_index_equal(conv, exp._with_freq(None)) + + def test_dti_tz_localize_tzlocal(self): + # GH#13583 + offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) + offset = int(offset.total_seconds() * 1000000000) + + dti = date_range(start="2001-01-01", end="2001-03-01") + dti2 = dti.tz_localize(dateutil.tz.tzlocal()) + tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) + + dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) + dti2 = dti.tz_localize(None) + tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_tz_localize_ambiguous_nat(self, tz): + times = [ + "11/06/2011 00:00", + "11/06/2011 01:00", + "11/06/2011 01:00", + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + di = DatetimeIndex(times) + localized = di.tz_localize(tz, ambiguous="NaT") + + times = [ + "11/06/2011 00:00", + np.nan, + np.nan, + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + di_test = DatetimeIndex(times, tz="US/Eastern") + + # left dtype is datetime64[ns, US/Eastern] + # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] + tm.assert_numpy_array_equal(di_test.values, localized.values) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_tz_localize_ambiguous_flags(self, tz): + # November 6, 2011, fall back, repeat 2 AM hour + + # Pass in flags to determine right dst transition + dr = date_range( + datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz + ) + times = [ + "11/06/2011 00:00", + "11/06/2011 01:00", + "11/06/2011 01:00", + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + + # Test tz_localize + di = DatetimeIndex(times) + is_dst = [1, 1, 0, 0, 0] + localized = di.tz_localize(tz, ambiguous=is_dst) + expected = dr._with_freq(None) + tm.assert_index_equal(expected, localized) + tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst)) + + localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) + tm.assert_index_equal(dr, localized) + + localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool")) + tm.assert_index_equal(dr, localized) + + # Test constructor + localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) + tm.assert_index_equal(dr, localized) + + # Test duplicate times where inferring the dst fails + times += times + di = DatetimeIndex(times) + + # When the sizes are incompatible, make sure error is raised + msg = "Length of ambiguous bool-array must be the same size as vals" + with pytest.raises(Exception, match=msg): + di.tz_localize(tz, ambiguous=is_dst) + + # When sizes are compatible and there are repeats ('infer' won't work) + is_dst = np.hstack((is_dst, is_dst)) + localized = di.tz_localize(tz, ambiguous=is_dst) + dr = dr.append(dr) + tm.assert_index_equal(dr, localized) + + # When there is no dst transition, nothing special happens + dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) + is_dst = np.array([1] * 10) + localized = dr.tz_localize(tz) + localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) + tm.assert_index_equal(localized, localized_is_dst) + + # TODO: belongs outside tz_localize tests? + @pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"]) + def test_dti_construction_ambiguous_endpoint(self, tz): + # construction with an ambiguous end-point + # GH#11626 + + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + date_range( + "2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H" + ) + + times = date_range( + "2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer" + ) + assert times[0] == Timestamp("2013-10-26 23:00", tz=tz) + assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz) + + @pytest.mark.parametrize( + "tz, option, expected", + [ + ["US/Pacific", "shift_forward", "2019-03-10 03:00"], + ["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"], + ["US/Pacific", "shift_backward", "2019-03-10 01:00"], + ["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"], + ["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"], + ], + ) + def test_dti_construction_nonexistent_endpoint(self, tz, option, expected): + # construction with an nonexistent end-point + + with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"): + date_range( + "2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H" + ) + + times = date_range( + "2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option + ) + assert times[-1] == Timestamp(expected, tz=tz) + + def test_dti_tz_localize_bdate_range(self): + dr = bdate_range("1/1/2009", "1/1/2010") + dr_utc = bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc) + localized = dr.tz_localize(pytz.utc) + tm.assert_index_equal(dr_utc, localized) + + @pytest.mark.parametrize( + "start_ts, tz, end_ts, shift", + [ + ["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 01:59:59.999999999", + "backward", + ], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 03:20:00", + timedelta(hours=1), + ], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 01:20:00", + timedelta(hours=-1), + ], + ["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 01:59:59.999999999", + "backward", + ], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 03:33:00", + timedelta(hours=1), + ], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 01:33:00", + timedelta(hours=-1), + ], + ], + ) + @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) + def test_dti_tz_localize_nonexistent_shift( + self, start_ts, tz, end_ts, shift, tz_type + ): + # GH 8917 + tz = tz_type + tz + if isinstance(shift, str): + shift = "shift_" + shift + dti = DatetimeIndex([Timestamp(start_ts)]) + result = dti.tz_localize(tz, nonexistent=shift) + expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("offset", [-1, 1]) + def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, warsaw): + # GH 8917 + tz = warsaw + dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")]) + msg = "The provided timedelta will relocalize on a nonexistent time" + with pytest.raises(ValueError, match=msg): + dti.tz_localize(tz, nonexistent=timedelta(seconds=offset)) + + # ------------------------------------------------------------- + # DatetimeIndex.normalize + + def test_normalize_tz(self): + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern") + + result = rng.normalize() # does not preserve freq + expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern") + tm.assert_index_equal(result, expected._with_freq(None)) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC") + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) + result = rng.normalize() # does not preserve freq + expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) + tm.assert_index_equal(result, expected._with_freq(None)) + + assert result.is_normalized + assert not rng.is_normalized + + @td.skip_if_windows + @pytest.mark.parametrize( + "timezone", + [ + "US/Pacific", + "US/Eastern", + "UTC", + "Asia/Kolkata", + "Asia/Shanghai", + "Australia/Canberra", + ], + ) + def test_normalize_tz_local(self, timezone): + # GH#13459 + with tm.set_timezone(timezone): + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) + expected = expected._with_freq(None) + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + # ------------------------------------------------------------ + # DatetimeIndex.__new__ + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_constructor_static_tzinfo(self, prefix): + # it works! + index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST") + index.hour + index[0] + + def test_dti_constructor_with_fixed_tz(self): + off = FixedOffset(420, "+07:00") + start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) + end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) + rng = date_range(start=start, end=end) + assert off == rng.tz + + rng2 = date_range(start, periods=len(rng), tz=off) + tm.assert_index_equal(rng, rng2) + + rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00") + assert (rng.values == rng3.values).all() + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_convert_datetime_list(self, tzstr): + dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo") + dr2 = DatetimeIndex(list(dr), name="foo", freq="D") + tm.assert_index_equal(dr, dr2) + + def test_dti_construction_univalent(self): + rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern") + rng2 = DatetimeIndex(data=rng, tz="US/Eastern") + tm.assert_index_equal(rng, rng2) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_from_tzaware_datetime(self, tz): + d = [datetime(2012, 8, 19, tzinfo=tz)] + + index = DatetimeIndex(d) + assert timezones.tz_compare(index.tz, tz) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_tz_constructors(self, tzstr): + """Test different DatetimeIndex constructions with timezone + Follow-up of GH#4229 + """ + arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"] + + idx1 = to_datetime(arr).tz_localize(tzstr) + idx2 = date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr) + idx2 = idx2._with_freq(None) # the others all have freq=None + idx3 = DatetimeIndex(arr, tz=tzstr) + idx4 = DatetimeIndex(np.array(arr), tz=tzstr) + + for other in [idx2, idx3, idx4]: + tm.assert_index_equal(idx1, other) + + # ------------------------------------------------------------- + # Unsorted + + @pytest.mark.parametrize( + "dtype", + [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], + ) + def test_date_accessor(self, dtype): + # Regression test for GH#21230 + expected = np.array([date(2018, 6, 4), pd.NaT]) + + index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype) + result = index.date + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], + ) + def test_time_accessor(self, dtype): + # Regression test for GH#21267 + expected = np.array([time(10, 20, 30), pd.NaT]) + + index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype) + result = index.time + + tm.assert_numpy_array_equal(result, expected) + + def test_timetz_accessor(self, tz_naive_fixture): + # GH21358 + tz = timezones.maybe_get_tz(tz_naive_fixture) + + expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT]) + + index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz) + result = index.timetz + + tm.assert_numpy_array_equal(result, expected) + + def test_dti_drop_dont_lose_tz(self): + # GH#2621 + ind = date_range("2012-12-01", periods=10, tz="utc") + ind = ind.drop(ind[-1]) + + assert ind.tz is not None + + def test_dti_tz_conversion_freq(self, tz_naive_fixture): + # GH25241 + t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H") + assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq + t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T") + assert t4.tz_convert(tz="UTC").freq == t4.freq + + def test_drop_dst_boundary(self): + # see gh-18031 + tz = "Europe/Brussels" + freq = "15min" + + start = Timestamp("201710290100", tz=tz) + end = Timestamp("201710290300", tz=tz) + index = date_range(start=start, end=end, freq=freq) + + expected = DatetimeIndex( + [ + "201710290115", + "201710290130", + "201710290145", + "201710290200", + "201710290215", + "201710290230", + "201710290245", + "201710290200", + "201710290215", + "201710290230", + "201710290245", + "201710290300", + ], + tz=tz, + freq=freq, + ambiguous=[ + True, + True, + True, + True, + True, + True, + True, + False, + False, + False, + False, + False, + ], + ) + result = index.drop(index[0]) + tm.assert_index_equal(result, expected) + + def test_date_range_localize(self): + rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern") + rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern") + rng3 = date_range("3/11/2012 03:00", periods=15, freq="H") + rng3 = rng3.tz_localize("US/Eastern") + + tm.assert_index_equal(rng._with_freq(None), rng3) + + # DST transition time + val = rng[0] + exp = Timestamp("3/11/2012 03:00", tz="US/Eastern") + + assert val.hour == 3 + assert exp.hour == 3 + assert val == exp # same UTC value + tm.assert_index_equal(rng[:2], rng2) + + # Right before the DST transition + rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern") + rng2 = DatetimeIndex( + ["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="H" + ) + tm.assert_index_equal(rng, rng2) + exp = Timestamp("3/11/2012 00:00", tz="US/Eastern") + assert exp.hour == 0 + assert rng[0] == exp + exp = Timestamp("3/11/2012 01:00", tz="US/Eastern") + assert exp.hour == 1 + assert rng[1] == exp + + rng = date_range("3/11/2012 00:00", periods=10, freq="H", tz="US/Eastern") + assert rng[2].hour == 3 + + def test_timestamp_equality_different_timezones(self): + utc_range = date_range("1/1/2000", periods=20, tz="UTC") + eastern_range = utc_range.tz_convert("US/Eastern") + berlin_range = utc_range.tz_convert("Europe/Berlin") + + for a, b, c in zip(utc_range, eastern_range, berlin_range): + assert a == b + assert b == c + assert a == c + + assert (utc_range == eastern_range).all() + assert (utc_range == berlin_range).all() + assert (berlin_range == eastern_range).all() + + def test_dti_intersection(self): + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + + left = rng[10:90][::-1] + right = rng[20:80][::-1] + + assert left.tz == rng.tz + result = left.intersection(right) + assert result.tz == left.tz + + def test_dti_equals_with_tz(self): + left = date_range("1/1/2011", periods=100, freq="H", tz="utc") + right = date_range("1/1/2011", periods=100, freq="H", tz="US/Eastern") + + assert not left.equals(right) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_tz_nat(self, tzstr): + idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT]) + + assert isna(idx[1]) + assert idx[0].tzinfo is not None + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_astype_asobject_tzinfos(self, tzstr): + # GH#1345 + + # dates around a dst transition + rng = date_range("2/13/2010", "5/6/2010", tz=tzstr) + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_with_timezone_repr(self, tzstr): + rng = date_range("4/13/2010", "5/6/2010") + + rng_eastern = rng.tz_localize(tzstr) + + rng_repr = repr(rng_eastern) + assert "2010-04-13 00:00:00" in rng_repr + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_take_dont_lose_meta(self, tzstr): + rng = date_range("1/1/2000", periods=20, tz=tzstr) + + result = rng.take(range(5)) + assert result.tz == rng.tz + assert result.freq == rng.freq + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_utc_box_timestamp_and_localize(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc") + rng_eastern = rng.tz_convert(tzstr) + + expected = rng[-1].astimezone(tz) + + stamp = rng_eastern[-1] + assert stamp == expected + assert stamp.tzinfo == expected.tzinfo + + # right tzinfo + rng = date_range("3/13/2012", "3/14/2012", freq="H", tz="utc") + rng_eastern = rng.tz_convert(tzstr) + # test not valid for dateutil timezones. + # assert 'EDT' in repr(rng_eastern[0].tzinfo) + assert "EDT" in repr(rng_eastern[0].tzinfo) or "tzfile" in repr( + rng_eastern[0].tzinfo + ) + + def test_dti_to_pydatetime(self): + dt = dateutil.parser.parse("2012-06-13T01:39:00Z") + dt = dt.replace(tzinfo=tzlocal()) + + arr = np.array([dt], dtype=object) + + result = to_datetime(arr, utc=True) + assert result.tz is timezone.utc + + rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal()) + arr = rng.to_pydatetime() + result = to_datetime(arr, utc=True) + assert result.tz is timezone.utc + + def test_dti_to_pydatetime_fizedtz(self): + dates = np.array( + [ + datetime(2000, 1, 1, tzinfo=fixed_off), + datetime(2000, 1, 2, tzinfo=fixed_off), + datetime(2000, 1, 3, tzinfo=fixed_off), + ] + ) + dti = DatetimeIndex(dates) + + result = dti.to_pydatetime() + tm.assert_numpy_array_equal(dates, result) + + result = dti._mpl_repr() + tm.assert_numpy_array_equal(dates, result) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")]) + def test_with_tz(self, tz): + # just want it to work + start = datetime(2011, 3, 12, tzinfo=pytz.utc) + dr = bdate_range(start, periods=50, freq=pd.offsets.Hour()) + assert dr.tz is pytz.utc + + # DateRange with naive datetimes + dr = bdate_range("1/1/2005", "1/1/2009", tz=pytz.utc) + dr = bdate_range("1/1/2005", "1/1/2009", tz=tz) + + # normalized + central = dr.tz_convert(tz) + assert central.tz is tz + naive = central[0].to_pydatetime().replace(tzinfo=None) + comp = conversion.localize_pydatetime(naive, tz).tzinfo + assert central[0].tz is comp + + # compare vs a localized tz + naive = dr[0].to_pydatetime().replace(tzinfo=None) + comp = conversion.localize_pydatetime(naive, tz).tzinfo + assert central[0].tz is comp + + # datetimes with tzinfo set + dr = bdate_range( + datetime(2005, 1, 1, tzinfo=pytz.utc), datetime(2009, 1, 1, tzinfo=pytz.utc) + ) + msg = "Start and end cannot both be tz-aware with different timezones" + with pytest.raises(Exception, match=msg): + bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), "1/1/2009", tz=tz) + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_field_access_localize(self, prefix): + strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] + rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern") + assert (rng.hour == 0).all() + + # a more unusual time zone, #1946 + dr = date_range( + "2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan" + ) + + expected = Index(np.arange(10, dtype=np.int32)) + tm.assert_index_equal(dr.hour, expected) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_convert_tz_aware_datetime_datetime(self, tz): + # GH#1581 + dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)] + + dates_aware = [conversion.localize_pydatetime(x, tz) for x in dates] + result = DatetimeIndex(dates_aware) + assert timezones.tz_compare(result.tz, tz) + + converted = to_datetime(dates_aware, utc=True) + ex_vals = np.array([Timestamp(x).as_unit("ns")._value for x in dates_aware]) + tm.assert_numpy_array_equal(converted.asi8, ex_vals) + assert converted.tz is timezone.utc + + # Note: not difference, as there is no symmetry requirement there + @pytest.mark.parametrize("setop", ["union", "intersection", "symmetric_difference"]) + def test_dti_setop_aware(self, setop): + # non-overlapping + # GH#39328 as of 2.0 we cast these to UTC instead of object + rng = date_range("2012-11-15 00:00:00", periods=6, freq="H", tz="US/Central") + + rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H", tz="US/Eastern") + + result = getattr(rng, setop)(rng2) + + left = rng.tz_convert("UTC") + right = rng2.tz_convert("UTC") + expected = getattr(left, setop)(right) + tm.assert_index_equal(result, expected) + assert result.tz == left.tz + if len(result): + assert result[0].tz is timezone.utc + assert result[-1].tz is timezone.utc + + def test_dti_union_mixed(self): + # GH 21671 + rng = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT]) + rng2 = DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo") + result = rng.union(rng2) + expected = Index( + [ + Timestamp("2011-01-01"), + pd.NaT, + Timestamp("2012-01-01", tz="Asia/Tokyo"), + Timestamp("2012-01-02", tz="Asia/Tokyo"), + ], + dtype=object, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)] + ) + def test_iteration_preserves_nanoseconds(self, tz): + # GH 19603 + index = DatetimeIndex( + ["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz + ) + for i, ts in enumerate(index): + assert ts == index[i] # pylint: disable=unnecessary-list-index-lookup + + +def test_tz_localize_invalidates_freq(): + # we only preserve freq in unambiguous cases + + # if localized to US/Eastern, this crosses a DST transition + dti = date_range("2014-03-08 23:00", "2014-03-09 09:00", freq="H") + assert dti.freq == "H" + + result = dti.tz_localize(None) # no-op + assert result.freq == "H" + + result = dti.tz_localize("UTC") # unambiguous freq preservation + assert result.freq == "H" + + result = dti.tz_localize("US/Eastern", nonexistent="shift_forward") + assert result.freq is None + assert result.inferred_freq is None # i.e. we are not _too_ strict here + + # Case where we _can_ keep freq because we're length==1 + dti2 = dti[:1] + result = dti2.tz_localize("US/Eastern") + assert result.freq == "H" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_unique.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_unique.py new file mode 100644 index 00000000..5319bf59 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/test_unique.py @@ -0,0 +1,76 @@ +from datetime import ( + datetime, + timedelta, +) + +from pandas import ( + DatetimeIndex, + NaT, + Timestamp, +) +import pandas._testing as tm + + +def test_unique(tz_naive_fixture): + idx = DatetimeIndex(["2017"] * 2, tz=tz_naive_fixture) + expected = idx[:1] + + result = idx.unique() + tm.assert_index_equal(result, expected) + # GH#21737 + # Ensure the underlying data is consistent + assert result[0] == expected[0] + + +def test_index_unique(rand_series_with_duplicate_datetimeindex): + dups = rand_series_with_duplicate_datetimeindex + index = dups.index + + uniques = index.unique() + expected = DatetimeIndex( + [ + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ] + ) + assert uniques.dtype == "M8[ns]" # sanity + tm.assert_index_equal(uniques, expected) + assert index.nunique() == 4 + + # GH#2563 + assert isinstance(uniques, DatetimeIndex) + + dups_local = index.tz_localize("US/Eastern") + dups_local.name = "foo" + result = dups_local.unique() + expected = DatetimeIndex(expected, name="foo") + expected = expected.tz_localize("US/Eastern") + assert result.tz is not None + assert result.name == "foo" + tm.assert_index_equal(result, expected) + + +def test_index_unique2(): + # NaT, note this is excluded + arr = [1370745748 + t for t in range(20)] + [NaT._value] + idx = DatetimeIndex(arr * 3) + tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) + assert idx.nunique() == 20 + assert idx.nunique(dropna=False) == 21 + + +def test_index_unique3(): + arr = [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20) + ] + [NaT] + idx = DatetimeIndex(arr * 3) + tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) + assert idx.nunique() == 20 + assert idx.nunique(dropna=False) == 21 + + +def test_is_unique_monotonic(rand_series_with_duplicate_datetimeindex): + index = rand_series_with_duplicate_datetimeindex.index + assert not index.is_unique diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_astype.py new file mode 100644 index 00000000..59c555b9 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_astype.py @@ -0,0 +1,248 @@ +import re + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) + +from pandas import ( + CategoricalIndex, + Index, + IntervalIndex, + NaT, + Timedelta, + Timestamp, + interval_range, +) +import pandas._testing as tm + + +class AstypeTests: + """Tests common to IntervalIndex with any subtype""" + + def test_astype_idempotent(self, index): + result = index.astype("interval") + tm.assert_index_equal(result, index) + + result = index.astype(index.dtype) + tm.assert_index_equal(result, index) + + def test_astype_object(self, index): + result = index.astype(object) + expected = Index(index.values, dtype="object") + tm.assert_index_equal(result, expected) + assert not result.equals(index) + + def test_astype_category(self, index): + result = index.astype("category") + expected = CategoricalIndex(index.values) + tm.assert_index_equal(result, expected) + + result = index.astype(CategoricalDtype()) + tm.assert_index_equal(result, expected) + + # non-default params + categories = index.dropna().unique().values[:-1] + dtype = CategoricalDtype(categories=categories, ordered=True) + result = index.astype(dtype) + expected = CategoricalIndex(index.values, categories=categories, ordered=True) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [ + "int64", + "uint64", + "float64", + "complex128", + "period[M]", + "timedelta64", + "timedelta64[ns]", + "datetime64", + "datetime64[ns]", + "datetime64[ns, US/Eastern]", + ], + ) + def test_astype_cannot_cast(self, index, dtype): + msg = "Cannot cast IntervalIndex to dtype" + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + def test_astype_invalid_dtype(self, index): + msg = "data type [\"']fake_dtype[\"'] not understood" + with pytest.raises(TypeError, match=msg): + index.astype("fake_dtype") + + +class TestIntSubtype(AstypeTests): + """Tests specific to IntervalIndex with integer-like subtype""" + + indexes = [ + IntervalIndex.from_breaks(np.arange(-10, 11, dtype="int64")), + IntervalIndex.from_breaks(np.arange(100, dtype="uint64"), closed="left"), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize( + "subtype", ["float64", "datetime64[ns]", "timedelta64[ns]"] + ) + def test_subtype_conversion(self, index, subtype): + dtype = IntervalDtype(subtype, index.closed) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "subtype_start, subtype_end", [("int64", "uint64"), ("uint64", "int64")] + ) + def test_subtype_integer(self, subtype_start, subtype_end): + index = IntervalIndex.from_breaks(np.arange(100, dtype=subtype_start)) + dtype = IntervalDtype(subtype_end, index.closed) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype_end), + index.right.astype(subtype_end), + closed=index.closed, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.xfail(reason="GH#15832") + def test_subtype_integer_errors(self): + # int64 -> uint64 fails with negative values + index = interval_range(-10, 10) + dtype = IntervalDtype("uint64", "right") + + # Until we decide what the exception message _should_ be, we + # assert something that it should _not_ be. + # We should _not_ be getting a message suggesting that the -10 + # has been wrapped around to a large-positive integer + msg = "^(?!(left side of interval must be <= right side))" + with pytest.raises(ValueError, match=msg): + index.astype(dtype) + + +class TestFloatSubtype(AstypeTests): + """Tests specific to IntervalIndex with float subtype""" + + indexes = [ + interval_range(-10.0, 10.0, closed="neither"), + IntervalIndex.from_arrays( + [-1.5, np.nan, 0.0, 0.0, 1.5], [-0.5, np.nan, 1.0, 1.0, 3.0], closed="both" + ), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize("subtype", ["int64", "uint64"]) + def test_subtype_integer(self, subtype): + index = interval_range(0.0, 10.0) + dtype = IntervalDtype(subtype, "right") + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + ) + tm.assert_index_equal(result, expected) + + # raises with NA + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(ValueError, match=msg): + index.insert(0, np.nan).astype(dtype) + + @pytest.mark.parametrize("subtype", ["int64", "uint64"]) + def test_subtype_integer_with_non_integer_borders(self, subtype): + index = interval_range(0.0, 3.0, freq=0.25) + dtype = IntervalDtype(subtype, "right") + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + ) + tm.assert_index_equal(result, expected) + + def test_subtype_integer_errors(self): + # float64 -> uint64 fails with negative values + index = interval_range(-10.0, 10.0) + dtype = IntervalDtype("uint64", "right") + msg = re.escape( + "Cannot convert interval[float64, right] to interval[uint64, right]; " + "subtypes are incompatible" + ) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + @pytest.mark.parametrize("subtype", ["datetime64[ns]", "timedelta64[ns]"]) + def test_subtype_datetimelike(self, index, subtype): + dtype = IntervalDtype(subtype, "right") + msg = "Cannot convert .* to .*; subtypes are incompatible" + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + +class TestDatetimelikeSubtype(AstypeTests): + """Tests specific to IntervalIndex with datetime-like subtype""" + + indexes = [ + interval_range(Timestamp("2018-01-01"), periods=10, closed="neither"), + interval_range(Timestamp("2018-01-01"), periods=10).insert(2, NaT), + interval_range(Timestamp("2018-01-01", tz="US/Eastern"), periods=10), + interval_range(Timedelta("0 days"), periods=10, closed="both"), + interval_range(Timedelta("0 days"), periods=10).insert(2, NaT), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize("subtype", ["int64", "uint64"]) + def test_subtype_integer(self, index, subtype): + dtype = IntervalDtype(subtype, "right") + + if subtype != "int64": + msg = ( + r"Cannot convert interval\[(timedelta64|datetime64)\[ns.*\], .*\] " + r"to interval\[uint64, .*\]" + ) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + return + + result = index.astype(dtype) + new_left = index.left.astype(subtype) + new_right = index.right.astype(subtype) + + expected = IntervalIndex.from_arrays(new_left, new_right, closed=index.closed) + tm.assert_index_equal(result, expected) + + def test_subtype_float(self, index): + dtype = IntervalDtype("float64", "right") + msg = "Cannot convert .* to .*; subtypes are incompatible" + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + def test_subtype_datetimelike(self): + # datetime -> timedelta raises + dtype = IntervalDtype("timedelta64[ns]", "right") + msg = "Cannot convert .* to .*; subtypes are incompatible" + + index = interval_range(Timestamp("2018-01-01"), periods=10) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + index = interval_range(Timestamp("2018-01-01", tz="CET"), periods=10) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + # timedelta -> datetime raises + dtype = IntervalDtype("datetime64[ns]", "right") + index = interval_range(Timedelta("0 days"), periods=10) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_base.py new file mode 100644 index 00000000..e0155a13 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_base.py @@ -0,0 +1,56 @@ +import numpy as np +import pytest + +from pandas import IntervalIndex +import pandas._testing as tm + + +class TestInterval: + """ + Tests specific to the shared common index tests; unrelated tests should be placed + in test_interval.py or the specific test file (e.g. test_astype.py) + """ + + @pytest.fixture + def simple_index(self) -> IntervalIndex: + return IntervalIndex.from_breaks(range(11), closed="right") + + @pytest.fixture + def index(self): + return tm.makeIntervalIndex(10) + + def test_take(self, closed): + index = IntervalIndex.from_breaks(range(11), closed=closed) + + result = index.take(range(10)) + tm.assert_index_equal(result, index) + + result = index.take([0, 0, 1]) + expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], closed=closed) + tm.assert_index_equal(result, expected) + + def test_where(self, simple_index, listlike_box): + klass = listlike_box + + idx = simple_index + cond = [True] * len(idx) + expected = idx + result = expected.where(klass(cond)) + tm.assert_index_equal(result, expected) + + cond = [False] + [True] * len(idx[1:]) + expected = IntervalIndex([np.nan] + idx[1:].tolist()) + result = idx.where(klass(cond)) + tm.assert_index_equal(result, expected) + + def test_getitem_2d_deprecated(self, simple_index): + # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable + idx = simple_index + with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"): + idx[:, None] + with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"): + # GH#44051 + idx[True] + with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"): + # GH#44051 + idx[False] diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_constructors.py new file mode 100644 index 00000000..9524288b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_constructors.py @@ -0,0 +1,478 @@ +from functools import partial + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import IntervalDtype + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, + Interval, + IntervalIndex, + date_range, + notna, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import IntervalArray +import pandas.core.common as com + + +@pytest.fixture(params=[None, "foo"]) +def name(request): + return request.param + + +class ConstructorTests: + """ + Common tests for all variations of IntervalIndex construction. Input data + to be supplied in breaks format, then converted by the subclass method + get_kwargs_from_breaks to the expected format. + """ + + @pytest.fixture( + params=[ + ([3, 14, 15, 92, 653], np.int64), + (np.arange(10, dtype="int64"), np.int64), + (Index(np.arange(-10, 11, dtype=np.int64)), np.int64), + (Index(np.arange(10, 31, dtype=np.uint64)), np.uint64), + (Index(np.arange(20, 30, 0.5), dtype=np.float64), np.float64), + (date_range("20180101", periods=10), " Interval(0.5, 1.5) + tm.assert_numpy_array_equal(actual, expected) + + actual = self.index == self.index + expected = np.array([True, True]) + tm.assert_numpy_array_equal(actual, expected) + actual = self.index <= self.index + tm.assert_numpy_array_equal(actual, expected) + actual = self.index >= self.index + tm.assert_numpy_array_equal(actual, expected) + + actual = self.index < self.index + expected = np.array([False, False]) + tm.assert_numpy_array_equal(actual, expected) + actual = self.index > self.index + tm.assert_numpy_array_equal(actual, expected) + + actual = self.index == IntervalIndex.from_breaks([0, 1, 2], "left") + tm.assert_numpy_array_equal(actual, expected) + + actual = self.index == self.index.values + tm.assert_numpy_array_equal(actual, np.array([True, True])) + actual = self.index.values == self.index + tm.assert_numpy_array_equal(actual, np.array([True, True])) + actual = self.index <= self.index.values + tm.assert_numpy_array_equal(actual, np.array([True, True])) + actual = self.index != self.index.values + tm.assert_numpy_array_equal(actual, np.array([False, False])) + actual = self.index > self.index.values + tm.assert_numpy_array_equal(actual, np.array([False, False])) + actual = self.index.values > self.index + tm.assert_numpy_array_equal(actual, np.array([False, False])) + + # invalid comparisons + actual = self.index == 0 + tm.assert_numpy_array_equal(actual, np.array([False, False])) + actual = self.index == self.index.left + tm.assert_numpy_array_equal(actual, np.array([False, False])) + + msg = "|".join( + [ + "not supported between instances of 'int' and '.*.Interval'", + r"Invalid comparison between dtype=interval\[int64, right\] and ", + ] + ) + with pytest.raises(TypeError, match=msg): + self.index > 0 + with pytest.raises(TypeError, match=msg): + self.index <= 0 + with pytest.raises(TypeError, match=msg): + self.index > np.arange(2) + + msg = "Lengths must match to compare" + with pytest.raises(ValueError, match=msg): + self.index > np.arange(3) + + def test_missing_values(self, closed): + idx = Index( + [np.nan, Interval(0, 1, closed=closed), Interval(1, 2, closed=closed)] + ) + idx2 = IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2], closed=closed) + assert idx.equals(idx2) + + msg = ( + "missing values must be missing in the same location both left " + "and right sides" + ) + with pytest.raises(ValueError, match=msg): + IntervalIndex.from_arrays( + [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed + ) + + tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False])) + + def test_sort_values(self, closed): + index = self.create_index(closed=closed) + + result = index.sort_values() + tm.assert_index_equal(result, index) + + result = index.sort_values(ascending=False) + tm.assert_index_equal(result, index[::-1]) + + # with nan + index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)]) + + result = index.sort_values() + expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan]) + tm.assert_index_equal(result, expected) + + result = index.sort_values(ascending=False, na_position="first") + expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_datetime(self, tz): + start = Timestamp("2000-01-01", tz=tz) + dates = date_range(start=start, periods=10) + index = IntervalIndex.from_breaks(dates) + + # test mid + start = Timestamp("2000-01-01T12:00", tz=tz) + expected = date_range(start=start, periods=9) + tm.assert_index_equal(index.mid, expected) + + # __contains__ doesn't check individual points + assert Timestamp("2000-01-01", tz=tz) not in index + assert Timestamp("2000-01-01T12", tz=tz) not in index + assert Timestamp("2000-01-02", tz=tz) not in index + iv_true = Interval( + Timestamp("2000-01-02", tz=tz), Timestamp("2000-01-03", tz=tz) + ) + iv_false = Interval( + Timestamp("1999-12-31", tz=tz), Timestamp("2000-01-01", tz=tz) + ) + assert iv_true in index + assert iv_false not in index + + # .contains does check individual points + assert not index.contains(Timestamp("2000-01-01", tz=tz)).any() + assert index.contains(Timestamp("2000-01-01T12", tz=tz)).any() + assert index.contains(Timestamp("2000-01-02", tz=tz)).any() + + # test get_indexer + start = Timestamp("1999-12-31T12:00", tz=tz) + target = date_range(start=start, periods=7, freq="12H") + actual = index.get_indexer(target) + expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype="intp") + tm.assert_numpy_array_equal(actual, expected) + + start = Timestamp("2000-01-08T18:00", tz=tz) + target = date_range(start=start, periods=7, freq="6H") + actual = index.get_indexer(target) + expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype="intp") + tm.assert_numpy_array_equal(actual, expected) + + def test_append(self, closed): + index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed) + index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed) + + result = index1.append(index2) + expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3], closed=closed) + tm.assert_index_equal(result, expected) + + result = index1.append([index1, index2]) + expected = IntervalIndex.from_arrays( + [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed + ) + tm.assert_index_equal(result, expected) + + for other_closed in {"left", "right", "both", "neither"} - {closed}: + index_other_closed = IntervalIndex.from_arrays( + [0, 1], [1, 2], closed=other_closed + ) + result = index1.append(index_other_closed) + expected = index1.astype(object).append(index_other_closed.astype(object)) + tm.assert_index_equal(result, expected) + + def test_is_non_overlapping_monotonic(self, closed): + # Should be True in all cases + tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is True + + idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) + assert idx.is_non_overlapping_monotonic is True + + # Should be False in all cases (overlapping) + tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False + + idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) + assert idx.is_non_overlapping_monotonic is False + + # Should be False in all cases (non-monotonic) + tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False + + idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) + assert idx.is_non_overlapping_monotonic is False + + # Should be False for closed='both', otherwise True (GH16560) + if closed == "both": + idx = IntervalIndex.from_breaks(range(4), closed=closed) + assert idx.is_non_overlapping_monotonic is False + else: + idx = IntervalIndex.from_breaks(range(4), closed=closed) + assert idx.is_non_overlapping_monotonic is True + + @pytest.mark.parametrize( + "start, shift, na_value", + [ + (0, 1, np.nan), + (Timestamp("2018-01-01"), Timedelta("1 day"), pd.NaT), + (Timedelta("0 days"), Timedelta("1 day"), pd.NaT), + ], + ) + def test_is_overlapping(self, start, shift, na_value, closed): + # GH 23309 + # see test_interval_tree.py for extensive tests; interface tests here + + # non-overlapping + tuples = [(start + n * shift, start + (n + 1) * shift) for n in (0, 2, 4)] + index = IntervalIndex.from_tuples(tuples, closed=closed) + assert index.is_overlapping is False + + # non-overlapping with NA + tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] + index = IntervalIndex.from_tuples(tuples, closed=closed) + assert index.is_overlapping is False + + # overlapping + tuples = [(start + n * shift, start + (n + 2) * shift) for n in range(3)] + index = IntervalIndex.from_tuples(tuples, closed=closed) + assert index.is_overlapping is True + + # overlapping with NA + tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] + index = IntervalIndex.from_tuples(tuples, closed=closed) + assert index.is_overlapping is True + + # common endpoints + tuples = [(start + n * shift, start + (n + 1) * shift) for n in range(3)] + index = IntervalIndex.from_tuples(tuples, closed=closed) + result = index.is_overlapping + expected = closed == "both" + assert result is expected + + # common endpoints with NA + tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] + index = IntervalIndex.from_tuples(tuples, closed=closed) + result = index.is_overlapping + assert result is expected + + # intervals with duplicate left values + a = [10, 15, 20, 25, 30, 35, 40, 45, 45, 50, 55, 60, 65, 70, 75, 80, 85] + b = [15, 20, 25, 30, 35, 40, 45, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90] + index = IntervalIndex.from_arrays(a, b, closed="right") + result = index.is_overlapping + assert result is False + + @pytest.mark.parametrize( + "tuples", + [ + list(zip(range(10), range(1, 11))), + list( + zip( + date_range("20170101", periods=10), + date_range("20170101", periods=10), + ) + ), + list( + zip( + timedelta_range("0 days", periods=10), + timedelta_range("1 day", periods=10), + ) + ), + ], + ) + def test_to_tuples(self, tuples): + # GH 18756 + idx = IntervalIndex.from_tuples(tuples) + result = idx.to_tuples() + expected = Index(com.asarray_tuplesafe(tuples)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "tuples", + [ + list(zip(range(10), range(1, 11))) + [np.nan], + list( + zip( + date_range("20170101", periods=10), + date_range("20170101", periods=10), + ) + ) + + [np.nan], + list( + zip( + timedelta_range("0 days", periods=10), + timedelta_range("1 day", periods=10), + ) + ) + + [np.nan], + ], + ) + @pytest.mark.parametrize("na_tuple", [True, False]) + def test_to_tuples_na(self, tuples, na_tuple): + # GH 18756 + idx = IntervalIndex.from_tuples(tuples) + result = idx.to_tuples(na_tuple=na_tuple) + + # check the non-NA portion + expected_notna = Index(com.asarray_tuplesafe(tuples[:-1])) + result_notna = result[:-1] + tm.assert_index_equal(result_notna, expected_notna) + + # check the NA portion + result_na = result[-1] + if na_tuple: + assert isinstance(result_na, tuple) + assert len(result_na) == 2 + assert all(isna(x) for x in result_na) + else: + assert isna(result_na) + + def test_nbytes(self): + # GH 19209 + left = np.arange(0, 4, dtype="i8") + right = np.arange(1, 5, dtype="i8") + + result = IntervalIndex.from_arrays(left, right).nbytes + expected = 64 # 4 * 8 * 2 + assert result == expected + + @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"]) + def test_set_closed(self, name, closed, new_closed): + # GH 21670 + index = interval_range(0, 5, closed=closed, name=name) + result = index.set_closed(new_closed) + expected = interval_range(0, 5, closed=new_closed, name=name) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("bad_closed", ["foo", 10, "LEFT", True, False]) + def test_set_closed_errors(self, bad_closed): + # GH 21670 + index = interval_range(0, 5) + msg = f"invalid option for 'closed': {bad_closed}" + with pytest.raises(ValueError, match=msg): + index.set_closed(bad_closed) + + def test_is_all_dates(self): + # GH 23576 + year_2017 = Interval( + Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00") + ) + year_2017_index = IntervalIndex([year_2017]) + assert not year_2017_index._is_all_dates + + +def test_dir(): + # GH#27571 dir(interval_index) should not raise + index = IntervalIndex.from_arrays([0, 1], [1, 2]) + result = dir(index) + assert "str" not in result + + +def test_searchsorted_different_argument_classes(listlike_box): + # https://github.com/pandas-dev/pandas/issues/32762 + values = IntervalIndex([Interval(0, 1), Interval(1, 2)]) + result = values.searchsorted(listlike_box(values)) + expected = np.array([0, 1], dtype=result.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = values._data.searchsorted(listlike_box(values)) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] +) +def test_searchsorted_invalid_argument(arg): + values = IntervalIndex([Interval(0, 1), Interval(1, 2)]) + msg = "'<' not supported between instances of 'pandas._libs.interval.Interval' and " + with pytest.raises(TypeError, match=msg): + values.searchsorted(arg) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_range.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_range.py new file mode 100644 index 00000000..57783265 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_range.py @@ -0,0 +1,365 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_integer + +from pandas import ( + DateOffset, + Interval, + IntervalIndex, + Timedelta, + Timestamp, + date_range, + interval_range, + timedelta_range, +) +import pandas._testing as tm + +from pandas.tseries.offsets import Day + + +@pytest.fixture(params=[None, "foo"]) +def name(request): + return request.param + + +class TestIntervalRange: + @pytest.mark.parametrize("freq, periods", [(1, 100), (2.5, 40), (5, 20), (25, 4)]) + def test_constructor_numeric(self, closed, name, freq, periods): + start, end = 0, 100 + breaks = np.arange(101, step=freq) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # GH 20976: linspace behavior defined from start/end/periods + result = interval_range( + start=start, end=end, periods=periods, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + @pytest.mark.parametrize( + "freq, periods", [("D", 364), ("2D", 182), ("22D18H", 16), ("M", 11)] + ) + def test_constructor_timestamp(self, closed, name, freq, periods, tz): + start, end = Timestamp("20180101", tz=tz), Timestamp("20181231", tz=tz) + breaks = date_range(start=start, end=end, freq=freq) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # GH 20976: linspace behavior defined from start/end/periods + if not breaks.freq.is_anchored() and tz is None: + # matches expected only for non-anchored offsets and tz naive + # (anchored/DST transitions cause unequal spacing in expected) + result = interval_range( + start=start, end=end, periods=periods, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "freq, periods", [("D", 100), ("2D12H", 40), ("5D", 20), ("25D", 4)] + ) + def test_constructor_timedelta(self, closed, name, freq, periods): + start, end = Timedelta("0 days"), Timedelta("100 days") + breaks = timedelta_range(start=start, end=end, freq=freq) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + # defined from start/end/freq + result = interval_range( + start=start, end=end, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # defined from start/periods/freq + result = interval_range( + start=start, periods=periods, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # defined from end/periods/freq + result = interval_range( + end=end, periods=periods, freq=freq, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + # GH 20976: linspace behavior defined from start/end/periods + result = interval_range( + start=start, end=end, periods=periods, name=name, closed=closed + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "start, end, freq, expected_endpoint", + [ + (0, 10, 3, 9), + (0, 10, 1.5, 9), + (0.5, 10, 3, 9.5), + (Timedelta("0D"), Timedelta("10D"), "2D4H", Timedelta("8D16H")), + ( + Timestamp("2018-01-01"), + Timestamp("2018-02-09"), + "MS", + Timestamp("2018-02-01"), + ), + ( + Timestamp("2018-01-01", tz="US/Eastern"), + Timestamp("2018-01-20", tz="US/Eastern"), + "5D12H", + Timestamp("2018-01-17 12:00:00", tz="US/Eastern"), + ), + ], + ) + def test_early_truncation(self, start, end, freq, expected_endpoint): + # index truncates early if freq causes end to be skipped + result = interval_range(start=start, end=end, freq=freq) + result_endpoint = result.right[-1] + assert result_endpoint == expected_endpoint + + @pytest.mark.parametrize( + "start, end, freq", + [(0.5, None, None), (None, 4.5, None), (0.5, None, 1.5), (None, 6.5, 1.5)], + ) + def test_no_invalid_float_truncation(self, start, end, freq): + # GH 21161 + if freq is None: + breaks = [0.5, 1.5, 2.5, 3.5, 4.5] + else: + breaks = [0.5, 2.0, 3.5, 5.0, 6.5] + expected = IntervalIndex.from_breaks(breaks) + + result = interval_range(start=start, end=end, periods=4, freq=freq) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "start, mid, end", + [ + ( + Timestamp("2018-03-10", tz="US/Eastern"), + Timestamp("2018-03-10 23:30:00", tz="US/Eastern"), + Timestamp("2018-03-12", tz="US/Eastern"), + ), + ( + Timestamp("2018-11-03", tz="US/Eastern"), + Timestamp("2018-11-04 00:30:00", tz="US/Eastern"), + Timestamp("2018-11-05", tz="US/Eastern"), + ), + ], + ) + def test_linspace_dst_transition(self, start, mid, end): + # GH 20976: linspace behavior defined from start/end/periods + # accounts for the hour gained/lost during DST transition + result = interval_range(start=start, end=end, periods=2) + expected = IntervalIndex.from_breaks([start, mid, end]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("freq", [2, 2.0]) + @pytest.mark.parametrize("end", [10, 10.0]) + @pytest.mark.parametrize("start", [0, 0.0]) + def test_float_subtype(self, start, end, freq): + # Has float subtype if any of start/end/freq are float, even if all + # resulting endpoints can safely be upcast to integers + + # defined from start/end/freq + index = interval_range(start=start, end=end, freq=freq) + result = index.dtype.subtype + expected = "int64" if is_integer(start + end + freq) else "float64" + assert result == expected + + # defined from start/periods/freq + index = interval_range(start=start, periods=5, freq=freq) + result = index.dtype.subtype + expected = "int64" if is_integer(start + freq) else "float64" + assert result == expected + + # defined from end/periods/freq + index = interval_range(end=end, periods=5, freq=freq) + result = index.dtype.subtype + expected = "int64" if is_integer(end + freq) else "float64" + assert result == expected + + # GH 20976: linspace behavior defined from start/end/periods + index = interval_range(start=start, end=end, periods=5) + result = index.dtype.subtype + expected = "int64" if is_integer(start + end) else "float64" + assert result == expected + + def test_constructor_coverage(self): + # float value for periods + expected = interval_range(start=0, periods=10) + result = interval_range(start=0, periods=10.5) + tm.assert_index_equal(result, expected) + + # equivalent timestamp-like start/end + start, end = Timestamp("2017-01-01"), Timestamp("2017-01-15") + expected = interval_range(start=start, end=end) + + result = interval_range(start=start.to_pydatetime(), end=end.to_pydatetime()) + tm.assert_index_equal(result, expected) + + result = interval_range(start=start.asm8, end=end.asm8) + tm.assert_index_equal(result, expected) + + # equivalent freq with timestamp + equiv_freq = [ + "D", + Day(), + Timedelta(days=1), + timedelta(days=1), + DateOffset(days=1), + ] + for freq in equiv_freq: + result = interval_range(start=start, end=end, freq=freq) + tm.assert_index_equal(result, expected) + + # equivalent timedelta-like start/end + start, end = Timedelta(days=1), Timedelta(days=10) + expected = interval_range(start=start, end=end) + + result = interval_range(start=start.to_pytimedelta(), end=end.to_pytimedelta()) + tm.assert_index_equal(result, expected) + + result = interval_range(start=start.asm8, end=end.asm8) + tm.assert_index_equal(result, expected) + + # equivalent freq with timedelta + equiv_freq = ["D", Day(), Timedelta(days=1), timedelta(days=1)] + for freq in equiv_freq: + result = interval_range(start=start, end=end, freq=freq) + tm.assert_index_equal(result, expected) + + def test_errors(self): + # not enough params + msg = ( + "Of the four parameters: start, end, periods, and freq, " + "exactly three must be specified" + ) + + with pytest.raises(ValueError, match=msg): + interval_range(start=0) + + with pytest.raises(ValueError, match=msg): + interval_range(end=5) + + with pytest.raises(ValueError, match=msg): + interval_range(periods=2) + + with pytest.raises(ValueError, match=msg): + interval_range() + + # too many params + with pytest.raises(ValueError, match=msg): + interval_range(start=0, end=5, periods=6, freq=1.5) + + # mixed units + msg = "start, end, freq need to be type compatible" + with pytest.raises(TypeError, match=msg): + interval_range(start=0, end=Timestamp("20130101"), freq=2) + + with pytest.raises(TypeError, match=msg): + interval_range(start=0, end=Timedelta("1 day"), freq=2) + + with pytest.raises(TypeError, match=msg): + interval_range(start=0, end=10, freq="D") + + with pytest.raises(TypeError, match=msg): + interval_range(start=Timestamp("20130101"), end=10, freq="D") + + with pytest.raises(TypeError, match=msg): + interval_range( + start=Timestamp("20130101"), end=Timedelta("1 day"), freq="D" + ) + + with pytest.raises(TypeError, match=msg): + interval_range( + start=Timestamp("20130101"), end=Timestamp("20130110"), freq=2 + ) + + with pytest.raises(TypeError, match=msg): + interval_range(start=Timedelta("1 day"), end=10, freq="D") + + with pytest.raises(TypeError, match=msg): + interval_range( + start=Timedelta("1 day"), end=Timestamp("20130110"), freq="D" + ) + + with pytest.raises(TypeError, match=msg): + interval_range(start=Timedelta("1 day"), end=Timedelta("10 days"), freq=2) + + # invalid periods + msg = "periods must be a number, got foo" + with pytest.raises(TypeError, match=msg): + interval_range(start=0, periods="foo") + + # invalid start + msg = "start must be numeric or datetime-like, got foo" + with pytest.raises(ValueError, match=msg): + interval_range(start="foo", periods=10) + + # invalid end + msg = r"end must be numeric or datetime-like, got \(0, 1\]" + with pytest.raises(ValueError, match=msg): + interval_range(end=Interval(0, 1), periods=10) + + # invalid freq for datetime-like + msg = "freq must be numeric or convertible to DateOffset, got foo" + with pytest.raises(ValueError, match=msg): + interval_range(start=0, end=10, freq="foo") + + with pytest.raises(ValueError, match=msg): + interval_range(start=Timestamp("20130101"), periods=10, freq="foo") + + with pytest.raises(ValueError, match=msg): + interval_range(end=Timedelta("1 day"), periods=10, freq="foo") + + # mixed tz + start = Timestamp("2017-01-01", tz="US/Eastern") + end = Timestamp("2017-01-07", tz="US/Pacific") + msg = "Start and end cannot both be tz-aware with different timezones" + with pytest.raises(TypeError, match=msg): + interval_range(start=start, end=end) + + def test_float_freq(self): + # GH 54477 + result = interval_range(0, 1, freq=0.1) + expected = IntervalIndex.from_breaks([0 + 0.1 * n for n in range(11)]) + tm.assert_index_equal(result, expected) + + result = interval_range(0, 1, freq=0.6) + expected = IntervalIndex.from_breaks([0, 0.6]) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_tree.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_tree.py new file mode 100644 index 00000000..45b25f25 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval_tree.py @@ -0,0 +1,209 @@ +from itertools import permutations + +import numpy as np +import pytest + +from pandas._libs.interval import IntervalTree +from pandas.compat import IS64 + +import pandas._testing as tm + + +def skipif_32bit(param): + """ + Skip parameters in a parametrize on 32bit systems. Specifically used + here to skip leaf_size parameters related to GH 23440. + """ + marks = pytest.mark.skipif(not IS64, reason="GH 23440: int type mismatch on 32bit") + return pytest.param(param, marks=marks) + + +@pytest.fixture(params=["int64", "float64", "uint64"]) +def dtype(request): + return request.param + + +@pytest.fixture(params=[skipif_32bit(1), skipif_32bit(2), 10]) +def leaf_size(request): + """ + Fixture to specify IntervalTree leaf_size parameter; to be used with the + tree fixture. + """ + return request.param + + +@pytest.fixture( + params=[ + np.arange(5, dtype="int64"), + np.arange(5, dtype="uint64"), + np.arange(5, dtype="float64"), + np.array([0, 1, 2, 3, 4, np.nan], dtype="float64"), + ] +) +def tree(request, leaf_size): + left = request.param + return IntervalTree(left, left + 2, leaf_size=leaf_size) + + +class TestIntervalTree: + def test_get_indexer(self, tree): + result = tree.get_indexer(np.array([1.0, 5.5, 6.5])) + expected = np.array([0, 4, -1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + with pytest.raises( + KeyError, match="'indexer does not intersect a unique set of intervals'" + ): + tree.get_indexer(np.array([3.0])) + + @pytest.mark.parametrize( + "dtype, target_value, target_dtype", + [("int64", 2**63 + 1, "uint64"), ("uint64", -1, "int64")], + ) + def test_get_indexer_overflow(self, dtype, target_value, target_dtype): + left, right = np.array([0, 1], dtype=dtype), np.array([1, 2], dtype=dtype) + tree = IntervalTree(left, right) + + result = tree.get_indexer(np.array([target_value], dtype=target_dtype)) + expected = np.array([-1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_non_unique(self, tree): + indexer, missing = tree.get_indexer_non_unique(np.array([1.0, 2.0, 6.5])) + + result = indexer[:1] + expected = np.array([0], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + result = np.sort(indexer[1:3]) + expected = np.array([0, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + result = np.sort(indexer[3:]) + expected = np.array([-1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + result = missing + expected = np.array([2], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, target_value, target_dtype", + [("int64", 2**63 + 1, "uint64"), ("uint64", -1, "int64")], + ) + def test_get_indexer_non_unique_overflow(self, dtype, target_value, target_dtype): + left, right = np.array([0, 2], dtype=dtype), np.array([1, 3], dtype=dtype) + tree = IntervalTree(left, right) + target = np.array([target_value], dtype=target_dtype) + + result_indexer, result_missing = tree.get_indexer_non_unique(target) + expected_indexer = np.array([-1], dtype="intp") + tm.assert_numpy_array_equal(result_indexer, expected_indexer) + + expected_missing = np.array([0], dtype="intp") + tm.assert_numpy_array_equal(result_missing, expected_missing) + + def test_duplicates(self, dtype): + left = np.array([0, 0, 0], dtype=dtype) + tree = IntervalTree(left, left + 1) + + with pytest.raises( + KeyError, match="'indexer does not intersect a unique set of intervals'" + ): + tree.get_indexer(np.array([0.5])) + + indexer, missing = tree.get_indexer_non_unique(np.array([0.5])) + result = np.sort(indexer) + expected = np.array([0, 1, 2], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + result = missing + expected = np.array([], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "leaf_size", [skipif_32bit(1), skipif_32bit(10), skipif_32bit(100), 10000] + ) + def test_get_indexer_closed(self, closed, leaf_size): + x = np.arange(1000, dtype="float64") + found = x.astype("intp") + not_found = (-1 * np.ones(1000)).astype("intp") + + tree = IntervalTree(x, x + 0.5, closed=closed, leaf_size=leaf_size) + tm.assert_numpy_array_equal(found, tree.get_indexer(x + 0.25)) + + expected = found if tree.closed_left else not_found + tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.0)) + + expected = found if tree.closed_right else not_found + tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.5)) + + @pytest.mark.parametrize( + "left, right, expected", + [ + (np.array([0, 1, 4], dtype="int64"), np.array([2, 3, 5]), True), + (np.array([0, 1, 2], dtype="int64"), np.array([5, 4, 3]), True), + (np.array([0, 1, np.nan]), np.array([5, 4, np.nan]), True), + (np.array([0, 2, 4], dtype="int64"), np.array([1, 3, 5]), False), + (np.array([0, 2, np.nan]), np.array([1, 3, np.nan]), False), + ], + ) + @pytest.mark.parametrize("order", (list(x) for x in permutations(range(3)))) + def test_is_overlapping(self, closed, order, left, right, expected): + # GH 23309 + tree = IntervalTree(left[order], right[order], closed=closed) + result = tree.is_overlapping + assert result is expected + + @pytest.mark.parametrize("order", (list(x) for x in permutations(range(3)))) + def test_is_overlapping_endpoints(self, closed, order): + """shared endpoints are marked as overlapping""" + # GH 23309 + left, right = np.arange(3, dtype="int64"), np.arange(1, 4) + tree = IntervalTree(left[order], right[order], closed=closed) + result = tree.is_overlapping + expected = closed == "both" + assert result is expected + + @pytest.mark.parametrize( + "left, right", + [ + (np.array([], dtype="int64"), np.array([], dtype="int64")), + (np.array([0], dtype="int64"), np.array([1], dtype="int64")), + (np.array([np.nan]), np.array([np.nan])), + (np.array([np.nan] * 3), np.array([np.nan] * 3)), + ], + ) + def test_is_overlapping_trivial(self, closed, left, right): + # GH 23309 + tree = IntervalTree(left, right, closed=closed) + assert tree.is_overlapping is False + + @pytest.mark.skipif(not IS64, reason="GH 23440") + def test_construction_overflow(self): + # GH 25485 + left, right = np.arange(101, dtype="int64"), [np.iinfo(np.int64).max] * 101 + tree = IntervalTree(left, right) + + # pivot should be average of left/right medians + result = tree.root.pivot + expected = (50 + np.iinfo(np.int64).max) / 2 + assert result == expected + + @pytest.mark.xfail(not IS64, reason="GH 23440") + @pytest.mark.parametrize( + "left, right, expected", + [ + ([-np.inf, 1.0], [1.0, 2.0], 0.0), + ([-np.inf, -2.0], [-2.0, -1.0], -2.0), + ([-2.0, -1.0], [-1.0, np.inf], 0.0), + ([1.0, 2.0], [2.0, np.inf], 2.0), + ], + ) + def test_inf_bound_infinite_recursion(self, left, right, expected): + # GH 46658 + + tree = IntervalTree(left * 101, right * 101) + + result = tree.root.pivot + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_join.py new file mode 100644 index 00000000..2f42c530 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_join.py @@ -0,0 +1,44 @@ +import pytest + +from pandas import ( + IntervalIndex, + MultiIndex, + RangeIndex, +) +import pandas._testing as tm + + +@pytest.fixture +def range_index(): + return RangeIndex(3, name="range_index") + + +@pytest.fixture +def interval_index(): + return IntervalIndex.from_tuples( + [(0.0, 1.0), (1.0, 2.0), (1.5, 2.5)], name="interval_index" + ) + + +def test_join_overlapping_in_mi_to_same_intervalindex(range_index, interval_index): + # GH-45661 + multi_index = MultiIndex.from_product([interval_index, range_index]) + result = multi_index.join(interval_index) + + tm.assert_index_equal(result, multi_index) + + +def test_join_overlapping_to_multiindex_with_same_interval(range_index, interval_index): + # GH-45661 + multi_index = MultiIndex.from_product([interval_index, range_index]) + result = interval_index.join(multi_index) + + tm.assert_index_equal(result, multi_index) + + +def test_join_overlapping_interval_to_another_intervalindex(interval_index): + # GH-45661 + flipped_interval_index = interval_index[::-1] + result = interval_index.join(flipped_interval_index) + + tm.assert_index_equal(result, interval_index) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_pickle.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_pickle.py new file mode 100644 index 00000000..308a90e7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_pickle.py @@ -0,0 +1,13 @@ +import pytest + +from pandas import IntervalIndex +import pandas._testing as tm + + +class TestPickle: + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): + # https://github.com/pandas-dev/pandas/issues/35658 + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_setops.py new file mode 100644 index 00000000..059b0b75 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_setops.py @@ -0,0 +1,202 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + IntervalIndex, + Timestamp, + interval_range, +) +import pandas._testing as tm + + +def monotonic_index(start, end, dtype="int64", closed="right"): + return IntervalIndex.from_breaks(np.arange(start, end, dtype=dtype), closed=closed) + + +def empty_index(dtype="int64", closed="right"): + return IntervalIndex(np.array([], dtype=dtype), closed=closed) + + +class TestIntervalIndex: + def test_union(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + other = monotonic_index(5, 13, closed=closed) + + expected = monotonic_index(0, 13, closed=closed) + result = index[::-1].union(other, sort=sort) + if sort is None: + tm.assert_index_equal(result, expected) + assert tm.equalContents(result, expected) + + result = other[::-1].union(index, sort=sort) + if sort is None: + tm.assert_index_equal(result, expected) + assert tm.equalContents(result, expected) + + tm.assert_index_equal(index.union(index, sort=sort), index) + tm.assert_index_equal(index.union(index[:1], sort=sort), index) + + def test_union_empty_result(self, closed, sort): + # GH 19101: empty result, same dtype + index = empty_index(dtype="int64", closed=closed) + result = index.union(index, sort=sort) + tm.assert_index_equal(result, index) + + # GH 19101: empty result, different numeric dtypes -> common dtype is f8 + other = empty_index(dtype="float64", closed=closed) + result = index.union(other, sort=sort) + expected = other + tm.assert_index_equal(result, expected) + + other = index.union(index, sort=sort) + tm.assert_index_equal(result, expected) + + other = empty_index(dtype="uint64", closed=closed) + result = index.union(other, sort=sort) + tm.assert_index_equal(result, expected) + + result = other.union(index, sort=sort) + tm.assert_index_equal(result, expected) + + def test_intersection(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + other = monotonic_index(5, 13, closed=closed) + + expected = monotonic_index(5, 11, closed=closed) + result = index[::-1].intersection(other, sort=sort) + if sort is None: + tm.assert_index_equal(result, expected) + assert tm.equalContents(result, expected) + + result = other[::-1].intersection(index, sort=sort) + if sort is None: + tm.assert_index_equal(result, expected) + assert tm.equalContents(result, expected) + + tm.assert_index_equal(index.intersection(index, sort=sort), index) + + # GH 26225: nested intervals + index = IntervalIndex.from_tuples([(1, 2), (1, 3), (1, 4), (0, 2)]) + other = IntervalIndex.from_tuples([(1, 2), (1, 3)]) + expected = IntervalIndex.from_tuples([(1, 2), (1, 3)]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + # GH 26225 + index = IntervalIndex.from_tuples([(0, 3), (0, 2)]) + other = IntervalIndex.from_tuples([(0, 2), (1, 3)]) + expected = IntervalIndex.from_tuples([(0, 2)]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + # GH 26225: duplicate nan element + index = IntervalIndex([np.nan, np.nan]) + other = IntervalIndex([np.nan]) + expected = IntervalIndex([np.nan]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + def test_intersection_empty_result(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + + # GH 19101: empty result, same dtype + other = monotonic_index(300, 314, closed=closed) + expected = empty_index(dtype="int64", closed=closed) + result = index.intersection(other, sort=sort) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, different numeric dtypes -> common dtype is float64 + other = monotonic_index(300, 314, dtype="float64", closed=closed) + result = index.intersection(other, sort=sort) + expected = other[:0] + tm.assert_index_equal(result, expected) + + other = monotonic_index(300, 314, dtype="uint64", closed=closed) + result = index.intersection(other, sort=sort) + tm.assert_index_equal(result, expected) + + def test_intersection_duplicates(self): + # GH#38743 + index = IntervalIndex.from_tuples([(1, 2), (1, 2), (2, 3), (3, 4)]) + other = IntervalIndex.from_tuples([(1, 2), (2, 3)]) + expected = IntervalIndex.from_tuples([(1, 2), (2, 3)]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + def test_difference(self, closed, sort): + index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], closed=closed) + result = index.difference(index[:1], sort=sort) + expected = index[1:] + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, same dtype + result = index.difference(index, sort=sort) + expected = empty_index(dtype="int64", closed=closed) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, different dtypes + other = IntervalIndex.from_arrays( + index.left.astype("float64"), index.right, closed=closed + ) + result = index.difference(other, sort=sort) + tm.assert_index_equal(result, expected) + + def test_symmetric_difference(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + result = index[1:].symmetric_difference(index[:-1], sort=sort) + expected = IntervalIndex([index[0], index[-1]]) + if sort is None: + tm.assert_index_equal(result, expected) + assert tm.equalContents(result, expected) + + # GH 19101: empty result, same dtype + result = index.symmetric_difference(index, sort=sort) + expected = empty_index(dtype="int64", closed=closed) + if sort is None: + tm.assert_index_equal(result, expected) + assert tm.equalContents(result, expected) + + # GH 19101: empty result, different dtypes + other = IntervalIndex.from_arrays( + index.left.astype("float64"), index.right, closed=closed + ) + result = index.symmetric_difference(other, sort=sort) + expected = empty_index(dtype="float64", closed=closed) + tm.assert_index_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:'<' not supported between:RuntimeWarning") + @pytest.mark.parametrize( + "op_name", ["union", "intersection", "difference", "symmetric_difference"] + ) + def test_set_incompatible_types(self, closed, op_name, sort): + index = monotonic_index(0, 11, closed=closed) + set_op = getattr(index, op_name) + + # TODO: standardize return type of non-union setops type(self vs other) + # non-IntervalIndex + if op_name == "difference": + expected = index + else: + expected = getattr(index.astype("O"), op_name)(Index([1, 2, 3])) + result = set_op(Index([1, 2, 3]), sort=sort) + tm.assert_index_equal(result, expected) + + # mixed closed -> cast to object + for other_closed in {"right", "left", "both", "neither"} - {closed}: + other = monotonic_index(0, 11, closed=other_closed) + expected = getattr(index.astype(object), op_name)(other, sort=sort) + if op_name == "difference": + expected = index + result = set_op(other, sort=sort) + tm.assert_index_equal(result, expected) + + # GH 19016: incompatible dtypes -> cast to object + other = interval_range(Timestamp("20180101"), periods=9, closed=closed) + expected = getattr(index.astype(object), op_name)(other, sort=sort) + if op_name == "difference": + expected = index + result = set_op(other, sort=sort) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/conftest.py new file mode 100644 index 00000000..3cc4fa47 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/conftest.py @@ -0,0 +1,77 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) + + +# Note: identical the "multi" entry in the top-level "index" fixture +@pytest.fixture +def idx(): + # a MultiIndex used to test the general functionality of the + # general functionality of this object + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 1, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + index_names = ["first", "second"] + mi = MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=index_names, + verify_integrity=False, + ) + return mi + + +@pytest.fixture +def idx_dup(): + # compare tests/indexes/multi/conftest.py + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 1, 0, 1, 1]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + index_names = ["first", "second"] + mi = MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=index_names, + verify_integrity=False, + ) + return mi + + +@pytest.fixture +def index_names(): + # names that match those in the idx fixture for testing equality of + # names assigned to the idx + return ["first", "second"] + + +@pytest.fixture +def narrow_multi_index(): + """ + Return a MultiIndex that is narrower than the display (<80 characters). + """ + n = 1000 + ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n)) + dti = pd.date_range("2000-01-01", freq="s", periods=n * 2) + return MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=["a", "b", "dti"]) + + +@pytest.fixture +def wide_multi_index(): + """ + Return a MultiIndex that is wider than the display (>80 characters). + """ + n = 1000 + ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n)) + dti = pd.date_range("2000-01-01", freq="s", periods=n * 2) + levels = [ci, ci.codes + 9, dti, dti, dti] + names = ["a", "b", "dti_1", "dti_2", "dti_3"] + return MultiIndex.from_arrays(levels, names=names) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_analytics.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_analytics.py new file mode 100644 index 00000000..7097aa2b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_analytics.py @@ -0,0 +1,263 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + MultiIndex, + date_range, + period_range, +) +import pandas._testing as tm + + +def test_infer_objects(idx): + with pytest.raises(NotImplementedError, match="to_frame"): + idx.infer_objects() + + +def test_shift(idx): + # GH8083 test the base class for shift + msg = ( + "This method is only implemented for DatetimeIndex, PeriodIndex and " + "TimedeltaIndex; Got type MultiIndex" + ) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1, 2) + + +def test_groupby(idx): + groups = idx.groupby(np.array([1, 1, 1, 2, 2, 2])) + labels = idx.tolist() + exp = {1: labels[:3], 2: labels[3:]} + tm.assert_dict_equal(groups, exp) + + # GH5620 + groups = idx.groupby(idx) + exp = {key: [key] for key in idx} + tm.assert_dict_equal(groups, exp) + + +def test_truncate_multiindex(): + # GH 34564 for MultiIndex level names check + major_axis = Index(list(range(4))) + minor_axis = Index(list(range(2))) + + major_codes = np.array([0, 0, 1, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + + index = MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["L1", "L2"], + ) + + result = index.truncate(before=1) + assert "foo" not in result.levels[0] + assert 1 in result.levels[0] + assert index.names == result.names + + result = index.truncate(after=1) + assert 2 not in result.levels[0] + assert 1 in result.levels[0] + assert index.names == result.names + + result = index.truncate(before=1, after=2) + assert len(result.levels[0]) == 2 + assert index.names == result.names + + msg = "after < before" + with pytest.raises(ValueError, match=msg): + index.truncate(3, 1) + + +# TODO: reshape + + +def test_reorder_levels(idx): + # this blows up + with pytest.raises(IndexError, match="^Too many levels"): + idx.reorder_levels([2, 1, 0]) + + +def test_numpy_repeat(): + reps = 2 + numbers = [1, 2, 3] + names = np.array(["foo", "bar"]) + + m = MultiIndex.from_product([numbers, names], names=names) + expected = MultiIndex.from_product([numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(np.repeat(m, reps), expected) + + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.repeat(m, reps, axis=1) + + +def test_append_mixed_dtypes(): + # GH 13660 + dti = date_range("2011-01-01", freq="M", periods=3) + dti_tz = date_range("2011-01-01", freq="M", periods=3, tz="US/Eastern") + pi = period_range("2011-01", freq="M", periods=3) + + mi = MultiIndex.from_arrays( + [[1, 2, 3], [1.1, np.nan, 3.3], ["a", "b", "c"], dti, dti_tz, pi] + ) + assert mi.nlevels == 6 + + res = mi.append(mi) + exp = MultiIndex.from_arrays( + [ + [1, 2, 3, 1, 2, 3], + [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], + ["a", "b", "c", "a", "b", "c"], + dti.append(dti), + dti_tz.append(dti_tz), + pi.append(pi), + ] + ) + tm.assert_index_equal(res, exp) + + other = MultiIndex.from_arrays( + [ + ["x", "y", "z"], + ["x", "y", "z"], + ["x", "y", "z"], + ["x", "y", "z"], + ["x", "y", "z"], + ["x", "y", "z"], + ] + ) + + res = mi.append(other) + exp = MultiIndex.from_arrays( + [ + [1, 2, 3, "x", "y", "z"], + [1.1, np.nan, 3.3, "x", "y", "z"], + ["a", "b", "c", "x", "y", "z"], + dti.append(Index(["x", "y", "z"])), + dti_tz.append(Index(["x", "y", "z"])), + pi.append(Index(["x", "y", "z"])), + ] + ) + tm.assert_index_equal(res, exp) + + +def test_iter(idx): + result = list(idx) + expected = [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ] + assert result == expected + + +def test_sub(idx): + first = idx + + # - now raises (previously was set op difference) + msg = "cannot perform __sub__ with this index type: MultiIndex" + with pytest.raises(TypeError, match=msg): + first - idx[-3:] + with pytest.raises(TypeError, match=msg): + idx[-3:] - first + with pytest.raises(TypeError, match=msg): + idx[-3:] - first.tolist() + msg = "cannot perform __rsub__ with this index type: MultiIndex" + with pytest.raises(TypeError, match=msg): + first.tolist() - idx[-3:] + + +def test_map(idx): + # callable + index = idx + + result = index.map(lambda x: x) + tm.assert_index_equal(result, index) + + +@pytest.mark.parametrize( + "mapper", + [ + lambda values, idx: {i: e for e, i in zip(values, idx)}, + lambda values, idx: pd.Series(values, idx), + ], +) +def test_map_dictlike(idx, mapper): + identity = mapper(idx.values, idx) + + # we don't infer to uint64 dtype for a dict + if idx.dtype == np.uint64 and isinstance(identity, dict): + expected = idx.astype("int64") + else: + expected = idx + + result = idx.map(identity) + tm.assert_index_equal(result, expected) + + # empty mappable + expected = Index([np.nan] * len(idx)) + result = idx.map(mapper(expected, idx)) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "func", + [ + np.exp, + np.exp2, + np.expm1, + np.log, + np.log2, + np.log10, + np.log1p, + np.sqrt, + np.sin, + np.cos, + np.tan, + np.arcsin, + np.arccos, + np.arctan, + np.sinh, + np.cosh, + np.tanh, + np.arcsinh, + np.arccosh, + np.arctanh, + np.deg2rad, + np.rad2deg, + ], + ids=lambda func: func.__name__, +) +def test_numpy_ufuncs(idx, func): + # test ufuncs of numpy. see: + # https://numpy.org/doc/stable/reference/ufuncs.html + + expected_exception = TypeError + msg = ( + "loop of ufunc does not support argument 0 of type tuple which " + f"has no callable {func.__name__} method" + ) + with pytest.raises(expected_exception, match=msg): + func(idx) + + +@pytest.mark.parametrize( + "func", + [np.isfinite, np.isinf, np.isnan, np.signbit], + ids=lambda func: func.__name__, +) +def test_numpy_type_funcs(idx, func): + msg = ( + f"ufunc '{func.__name__}' not supported for the input types, and the inputs " + "could not be safely coerced to any supported types according to " + "the casting rule ''safe''" + ) + with pytest.raises(TypeError, match=msg): + func(idx) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_astype.py new file mode 100644 index 00000000..29908537 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_astype.py @@ -0,0 +1,30 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas._testing as tm + + +def test_astype(idx): + expected = idx.copy() + actual = idx.astype("O") + tm.assert_copy(actual.levels, expected.levels) + tm.assert_copy(actual.codes, expected.codes) + assert actual.names == list(expected.names) + + with pytest.raises(TypeError, match="^Setting.*dtype.*object"): + idx.astype(np.dtype(int)) + + +@pytest.mark.parametrize("ordered", [True, False]) +def test_astype_category(idx, ordered): + # GH 18630 + msg = "> 1 ndim Categorical are not supported at this time" + with pytest.raises(NotImplementedError, match=msg): + idx.astype(CategoricalDtype(ordered=ordered)) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + with pytest.raises(NotImplementedError, match=msg): + idx.astype("category") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_compat.py new file mode 100644 index 00000000..f91856c3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_compat.py @@ -0,0 +1,122 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import MultiIndex +import pandas._testing as tm + + +def test_numeric_compat(idx): + with pytest.raises(TypeError, match="cannot perform __mul__"): + idx * 1 + + with pytest.raises(TypeError, match="cannot perform __rmul__"): + 1 * idx + + div_err = "cannot perform __truediv__" + with pytest.raises(TypeError, match=div_err): + idx / 1 + + div_err = div_err.replace(" __", " __r") + with pytest.raises(TypeError, match=div_err): + 1 / idx + + with pytest.raises(TypeError, match="cannot perform __floordiv__"): + idx // 1 + + with pytest.raises(TypeError, match="cannot perform __rfloordiv__"): + 1 // idx + + +@pytest.mark.parametrize("method", ["all", "any", "__invert__"]) +def test_logical_compat(idx, method): + msg = f"cannot perform {method}" + + with pytest.raises(TypeError, match=msg): + getattr(idx, method)() + + +def test_inplace_mutation_resets_values(): + levels = [["a", "b", "c"], [4]] + levels2 = [[1, 2, 3], ["a"]] + codes = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]] + + mi1 = MultiIndex(levels=levels, codes=codes) + mi2 = MultiIndex(levels=levels2, codes=codes) + + # instantiating MultiIndex should not access/cache _.values + assert "_values" not in mi1._cache + assert "_values" not in mi2._cache + + vals = mi1.values.copy() + vals2 = mi2.values.copy() + + # accessing .values should cache ._values + assert mi1._values is mi1._cache["_values"] + assert mi1.values is mi1._cache["_values"] + assert isinstance(mi1._cache["_values"], np.ndarray) + + # Make sure level setting works + new_vals = mi1.set_levels(levels2).values + tm.assert_almost_equal(vals2, new_vals) + + # Doesn't drop _values from _cache [implementation detail] + tm.assert_almost_equal(mi1._cache["_values"], vals) + + # ...and values is still same too + tm.assert_almost_equal(mi1.values, vals) + + # Make sure label setting works too + codes2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] + exp_values = np.empty((6,), dtype=object) + exp_values[:] = [(1, "a")] * 6 + + # Must be 1d array of tuples + assert exp_values.shape == (6,) + + new_mi = mi2.set_codes(codes2) + assert "_values" not in new_mi._cache + new_values = new_mi.values + assert "_values" in new_mi._cache + + # Shouldn't change cache + tm.assert_almost_equal(mi2._cache["_values"], vals2) + + # Should have correct values + tm.assert_almost_equal(exp_values, new_values) + + +def test_boxable_categorical_values(): + cat = pd.Categorical(pd.date_range("2012-01-01", periods=3, freq="H")) + result = MultiIndex.from_product([["a", "b", "c"], cat]).values + expected = pd.Series( + [ + ("a", pd.Timestamp("2012-01-01 00:00:00")), + ("a", pd.Timestamp("2012-01-01 01:00:00")), + ("a", pd.Timestamp("2012-01-01 02:00:00")), + ("b", pd.Timestamp("2012-01-01 00:00:00")), + ("b", pd.Timestamp("2012-01-01 01:00:00")), + ("b", pd.Timestamp("2012-01-01 02:00:00")), + ("c", pd.Timestamp("2012-01-01 00:00:00")), + ("c", pd.Timestamp("2012-01-01 01:00:00")), + ("c", pd.Timestamp("2012-01-01 02:00:00")), + ] + ).values + tm.assert_numpy_array_equal(result, expected) + result = pd.DataFrame({"a": ["a", "b", "c"], "b": cat, "c": np.array(cat)}).values + expected = pd.DataFrame( + { + "a": ["a", "b", "c"], + "b": [ + pd.Timestamp("2012-01-01 00:00:00"), + pd.Timestamp("2012-01-01 01:00:00"), + pd.Timestamp("2012-01-01 02:00:00"), + ], + "c": [ + pd.Timestamp("2012-01-01 00:00:00"), + pd.Timestamp("2012-01-01 01:00:00"), + pd.Timestamp("2012-01-01 02:00:00"), + ], + } + ).values + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_constructors.py new file mode 100644 index 00000000..91ec1b24 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_constructors.py @@ -0,0 +1,860 @@ +from datetime import ( + date, + datetime, +) +import itertools + +import numpy as np +import pytest + +from pandas.compat import pa_version_under7p0 + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + +import pandas as pd +from pandas import ( + Index, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def test_constructor_single_level(): + result = MultiIndex( + levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"] + ) + assert isinstance(result, MultiIndex) + expected = Index(["foo", "bar", "baz", "qux"], name="first") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["first"] + + +def test_constructor_no_levels(): + msg = "non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex(levels=[], codes=[]) + + msg = "Must pass both levels and codes" + with pytest.raises(TypeError, match=msg): + MultiIndex(levels=[]) + with pytest.raises(TypeError, match=msg): + MultiIndex(codes=[]) + + +def test_constructor_nonhashable_names(): + # GH 20527 + levels = [[1, 2], ["one", "two"]] + codes = [[0, 0, 1, 1], [0, 1, 0, 1]] + names = (["foo"], ["bar"]) + msg = r"MultiIndex\.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + MultiIndex(levels=levels, codes=codes, names=names) + + # With .rename() + mi = MultiIndex( + levels=[[1, 2], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=("foo", "bar"), + ) + renamed = [["fooo"], ["barr"]] + with pytest.raises(TypeError, match=msg): + mi.rename(names=renamed) + + # With .set_names() + with pytest.raises(TypeError, match=msg): + mi.set_names(names=renamed) + + +def test_constructor_mismatched_codes_levels(idx): + codes = [np.array([1]), np.array([2]), np.array([3])] + levels = ["a"] + + msg = "Length of levels and codes must be the same" + with pytest.raises(ValueError, match=msg): + MultiIndex(levels=levels, codes=codes) + + length_error = ( + r"On level 0, code max \(3\) >= length of level \(1\)\. " + "NOTE: this index is in an inconsistent state" + ) + label_error = r"Unequal code lengths: \[4, 2\]" + code_value_error = r"On level 0, code value \(-2\) < -1" + + # important to check that it's looking at the right thing. + with pytest.raises(ValueError, match=length_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, 1, 2, 3], [0, 3, 4, 1]]) + + with pytest.raises(ValueError, match=label_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, 0, 0, 0], [0, 0]]) + + # external API + with pytest.raises(ValueError, match=length_error): + idx.copy().set_levels([["a"], ["b"]]) + + with pytest.raises(ValueError, match=label_error): + idx.copy().set_codes([[0, 0, 0, 0], [0, 0]]) + + # test set_codes with verify_integrity=False + # the setting should not raise any value error + idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]], verify_integrity=False) + + # code value smaller than -1 + with pytest.raises(ValueError, match=code_value_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, -2], [0, 0]]) + + +def test_na_levels(): + # GH26408 + # test if codes are re-assigned value -1 for levels + # with missing values (NaN, NaT, None) + result = MultiIndex( + levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[0, -1, 1, 2, 3, 4]] + ) + expected = MultiIndex( + levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[-1, -1, -1, -1, 3, 4]] + ) + tm.assert_index_equal(result, expected) + + result = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[0, -1, 1, 2, 3, 4]] + ) + expected = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[-1, -1, 1, -1, 3, -1]] + ) + tm.assert_index_equal(result, expected) + + # verify set_levels and set_codes + result = MultiIndex( + levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]] + ).set_levels([[np.nan, "s", pd.NaT, 128, None]]) + tm.assert_index_equal(result, expected) + + result = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[1, 2, 2, 2, 2, 2]] + ).set_codes([[0, -1, 1, 2, 3, 4]]) + tm.assert_index_equal(result, expected) + + +def test_copy_in_constructor(): + levels = np.array(["a", "b", "c"]) + codes = np.array([1, 1, 2, 0, 0, 1, 1]) + val = codes[0] + mi = MultiIndex(levels=[levels, levels], codes=[codes, codes], copy=True) + assert mi.codes[0][0] == val + codes[0] = 15 + assert mi.codes[0][0] == val + val = levels[0] + levels[0] = "PANDA" + assert mi.levels[0][0] == val + + +# ---------------------------------------------------------------------------- +# from_arrays +# ---------------------------------------------------------------------------- +def test_from_arrays(idx): + arrays = [ + np.asarray(lev).take(level_codes) + for lev, level_codes in zip(idx.levels, idx.codes) + ] + + # list of arrays as input + result = MultiIndex.from_arrays(arrays, names=idx.names) + tm.assert_index_equal(result, idx) + + # infer correctly + result = MultiIndex.from_arrays([[pd.NaT, Timestamp("20130101")], ["a", "b"]]) + assert result.levels[0].equals(Index([Timestamp("20130101")])) + assert result.levels[1].equals(Index(["a", "b"])) + + +def test_from_arrays_iterator(idx): + # GH 18434 + arrays = [ + np.asarray(lev).take(level_codes) + for lev, level_codes in zip(idx.levels, idx.codes) + ] + + # iterator as input + result = MultiIndex.from_arrays(iter(arrays), names=idx.names) + tm.assert_index_equal(result, idx) + + # invalid iterator input + msg = "Input must be a list / sequence of array-likes." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_arrays(0) + + +def test_from_arrays_tuples(idx): + arrays = tuple( + tuple(np.asarray(lev).take(level_codes)) + for lev, level_codes in zip(idx.levels, idx.codes) + ) + + # tuple of tuples as input + result = MultiIndex.from_arrays(arrays, names=idx.names) + tm.assert_index_equal(result, idx) + + +@pytest.mark.parametrize( + ("idx1", "idx2"), + [ + ( + pd.period_range("2011-01-01", freq="D", periods=3), + pd.period_range("2015-01-01", freq="H", periods=3), + ), + ( + date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"), + date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo"), + ), + ( + pd.timedelta_range("1 days", freq="D", periods=3), + pd.timedelta_range("2 hours", freq="H", periods=3), + ), + ], +) +def test_from_arrays_index_series_period_datetimetz_and_timedelta(idx1, idx2): + result = MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + tm.assert_index_equal(result, result2) + + +def test_from_arrays_index_datetimelike_mixed(): + idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern") + idx2 = date_range("2015-01-01 10:00", freq="H", periods=3) + idx3 = pd.timedelta_range("1 days", freq="D", periods=3) + idx4 = pd.period_range("2011-01-01", freq="D", periods=3) + + result = MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + tm.assert_index_equal(result.get_level_values(2), idx3) + tm.assert_index_equal(result.get_level_values(3), idx4) + + result2 = MultiIndex.from_arrays( + [Series(idx1), Series(idx2), Series(idx3), Series(idx4)] + ) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + tm.assert_index_equal(result2.get_level_values(2), idx3) + tm.assert_index_equal(result2.get_level_values(3), idx4) + + tm.assert_index_equal(result, result2) + + +def test_from_arrays_index_series_categorical(): + # GH13743 + idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False) + idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True) + + result = MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + result3 = MultiIndex.from_arrays([idx1.values, idx2.values]) + tm.assert_index_equal(result3.get_level_values(0), idx1) + tm.assert_index_equal(result3.get_level_values(1), idx2) + + +def test_from_arrays_empty(): + # 0 levels + msg = "Must pass non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_arrays(arrays=[]) + + # 1 level + result = MultiIndex.from_arrays(arrays=[[]], names=["A"]) + assert isinstance(result, MultiIndex) + expected = Index([], name="A") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] + + # N levels + for N in [2, 3]: + arrays = [[]] * N + names = list("ABC")[:N] + result = MultiIndex.from_arrays(arrays=arrays, names=names) + expected = MultiIndex(levels=[[]] * N, codes=[[]] * N, names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "invalid_sequence_of_arrays", + [ + 1, + [1], + [1, 2], + [[1], 2], + [1, [2]], + "a", + ["a"], + ["a", "b"], + [["a"], "b"], + (1,), + (1, 2), + ([1], 2), + (1, [2]), + "a", + ("a",), + ("a", "b"), + (["a"], "b"), + [(1,), 2], + [1, (2,)], + [("a",), "b"], + ((1,), 2), + (1, (2,)), + (("a",), "b"), + ], +) +def test_from_arrays_invalid_input(invalid_sequence_of_arrays): + msg = "Input must be a list / sequence of array-likes" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays) + + +@pytest.mark.parametrize( + "idx1, idx2", [([1, 2, 3], ["a", "b"]), ([], ["a", "b"]), ([1, 2, 3], [])] +) +def test_from_arrays_different_lengths(idx1, idx2): + # see gh-13599 + msg = "^all arrays must be same length$" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_arrays([idx1, idx2]) + + +def test_from_arrays_respects_none_names(): + # GH27292 + a = Series([1, 2, 3], name="foo") + b = Series(["a", "b", "c"], name="bar") + + result = MultiIndex.from_arrays([a, b], names=None) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b", "c"]], codes=[[0, 1, 2], [0, 1, 2]], names=None + ) + + tm.assert_index_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# from_tuples +# ---------------------------------------------------------------------------- +def test_from_tuples(): + msg = "Cannot infer number of levels from empty list" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_tuples([]) + + expected = MultiIndex( + levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"] + ) + + # input tuples + result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=["a", "b"]) + tm.assert_index_equal(result, expected) + + +def test_from_tuples_iterator(): + # GH 18434 + # input iterator for tuples + expected = MultiIndex( + levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"] + ) + + result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=["a", "b"]) + tm.assert_index_equal(result, expected) + + # input non-iterables + msg = "Input must be a list / sequence of tuple-likes." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_tuples(0) + + +def test_from_tuples_empty(): + # GH 16777 + result = MultiIndex.from_tuples([], names=["a", "b"]) + expected = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"]) + tm.assert_index_equal(result, expected) + + +def test_from_tuples_index_values(idx): + result = MultiIndex.from_tuples(idx) + assert (result.values == idx.values).all() + + +def test_tuples_with_name_string(): + # GH 15110 and GH 14848 + + li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] + msg = "Names should be list-like for a MultiIndex" + with pytest.raises(ValueError, match=msg): + Index(li, name="abc") + with pytest.raises(ValueError, match=msg): + Index(li, name="a") + + +def test_from_tuples_with_tuple_label(): + # GH 15457 + expected = pd.DataFrame( + [[2, 1, 2], [4, (1, 2), 3]], columns=["a", "b", "c"] + ).set_index(["a", "b"]) + idx = MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=("a", "b")) + result = pd.DataFrame([2, 3], columns=["c"], index=idx) + tm.assert_frame_equal(expected, result) + + +# ---------------------------------------------------------------------------- +# from_product +# ---------------------------------------------------------------------------- +def test_from_product_empty_zero_levels(): + # 0 levels + msg = "Must pass non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_product([]) + + +def test_from_product_empty_one_level(): + result = MultiIndex.from_product([[]], names=["A"]) + expected = Index([], name="A") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] + + +@pytest.mark.parametrize( + "first, second", [([], []), (["foo", "bar", "baz"], []), ([], ["a", "b", "c"])] +) +def test_from_product_empty_two_levels(first, second): + names = ["A", "B"] + result = MultiIndex.from_product([first, second], names=names) + expected = MultiIndex(levels=[first, second], codes=[[], []], names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("N", list(range(4))) +def test_from_product_empty_three_levels(N): + # GH12258 + names = ["A", "B", "C"] + lvl2 = list(range(N)) + result = MultiIndex.from_product([[], lvl2, []], names=names) + expected = MultiIndex(levels=[[], lvl2, []], codes=[[], [], []], names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "invalid_input", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]] +) +def test_from_product_invalid_input(invalid_input): + msg = r"Input must be a list / sequence of iterables|Input must be list-like" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_product(iterables=invalid_input) + + +def test_from_product_datetimeindex(): + dt_index = date_range("2000-01-01", periods=2) + mi = MultiIndex.from_product([[1, 2], dt_index]) + etalon = construct_1d_object_array_from_listlike( + [ + (1, Timestamp("2000-01-01")), + (1, Timestamp("2000-01-02")), + (2, Timestamp("2000-01-01")), + (2, Timestamp("2000-01-02")), + ] + ) + tm.assert_numpy_array_equal(mi.values, etalon) + + +def test_from_product_rangeindex(): + # RangeIndex is preserved by factorize, so preserved in levels + rng = Index(range(5)) + other = ["a", "b"] + mi = MultiIndex.from_product([rng, other]) + tm.assert_index_equal(mi._levels[0], rng, exact=True) + + +@pytest.mark.parametrize("ordered", [False, True]) +@pytest.mark.parametrize("f", [lambda x: x, lambda x: Series(x), lambda x: x.values]) +def test_from_product_index_series_categorical(ordered, f): + # GH13743 + first = ["foo", "bar"] + + idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=ordered) + expected = pd.CategoricalIndex( + list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered + ) + + result = MultiIndex.from_product([first, f(idx)]) + tm.assert_index_equal(result.get_level_values(1), expected) + + +def test_from_product(): + first = ["foo", "bar", "buz"] + second = ["a", "b", "c"] + names = ["first", "second"] + result = MultiIndex.from_product([first, second], names=names) + + tuples = [ + ("foo", "a"), + ("foo", "b"), + ("foo", "c"), + ("bar", "a"), + ("bar", "b"), + ("bar", "c"), + ("buz", "a"), + ("buz", "b"), + ("buz", "c"), + ] + expected = MultiIndex.from_tuples(tuples, names=names) + + tm.assert_index_equal(result, expected) + + +def test_from_product_iterator(): + # GH 18434 + first = ["foo", "bar", "buz"] + second = ["a", "b", "c"] + names = ["first", "second"] + tuples = [ + ("foo", "a"), + ("foo", "b"), + ("foo", "c"), + ("bar", "a"), + ("bar", "b"), + ("bar", "c"), + ("buz", "a"), + ("buz", "b"), + ("buz", "c"), + ] + expected = MultiIndex.from_tuples(tuples, names=names) + + # iterator as input + result = MultiIndex.from_product(iter([first, second]), names=names) + tm.assert_index_equal(result, expected) + + # Invalid non-iterable input + msg = "Input must be a list / sequence of iterables." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_product(0) + + +@pytest.mark.parametrize( + "a, b, expected_names", + [ + ( + Series([1, 2, 3], name="foo"), + Series(["a", "b"], name="bar"), + ["foo", "bar"], + ), + (Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]), + ([1, 2, 3], ["a", "b"], None), + ], +) +def test_from_product_infer_names(a, b, expected_names): + # GH27292 + result = MultiIndex.from_product([a, b]) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b"]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + names=expected_names, + ) + tm.assert_index_equal(result, expected) + + +def test_from_product_respects_none_names(): + # GH27292 + a = Series([1, 2, 3], name="foo") + b = Series(["a", "b"], name="bar") + + result = MultiIndex.from_product([a, b], names=None) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b"]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + names=None, + ) + tm.assert_index_equal(result, expected) + + +def test_from_product_readonly(): + # GH#15286 passing read-only array to from_product + a = np.array(range(3)) + b = ["a", "b"] + expected = MultiIndex.from_product([a, b]) + + a.setflags(write=False) + result = MultiIndex.from_product([a, b]) + tm.assert_index_equal(result, expected) + + +def test_create_index_existing_name(idx): + # GH11193, when an existing index is passed, and a new name is not + # specified, the new index should inherit the previous object name + index = idx + index.names = ["foo", "bar"] + result = Index(index) + expected = Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ) + ) + tm.assert_index_equal(result, expected) + + result = Index(index, name="A") + expected = Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ), + name="A", + ) + tm.assert_index_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# from_frame +# ---------------------------------------------------------------------------- +def test_from_frame(): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], columns=["L1", "L2"] + ) + expected = MultiIndex.from_tuples( + [("a", "a"), ("a", "b"), ("b", "a"), ("b", "b")], names=["L1", "L2"] + ) + result = MultiIndex.from_frame(df) + tm.assert_index_equal(expected, result) + + +@pytest.mark.skipif(pa_version_under7p0, reason="minimum pyarrow not installed") +def test_from_frame_missing_values_multiIndex(): + # GH 39984 + import pyarrow as pa + + df = pd.DataFrame( + { + "a": Series([1, 2, None], dtype="Int64"), + "b": pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + } + ) + multi_indexed = MultiIndex.from_frame(df) + expected = MultiIndex.from_arrays( + [ + Series([1, 2, None]).astype("Int64"), + pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + ], + names=["a", "b"], + ) + tm.assert_index_equal(multi_indexed, expected) + + +@pytest.mark.parametrize( + "non_frame", + [ + Series([1, 2, 3, 4]), + [1, 2, 3, 4], + [[1, 2], [3, 4], [5, 6]], + Index([1, 2, 3, 4]), + np.array([[1, 2], [3, 4], [5, 6]]), + 27, + ], +) +def test_from_frame_error(non_frame): + # GH 22420 + with pytest.raises(TypeError, match="Input must be a DataFrame"): + MultiIndex.from_frame(non_frame) + + +def test_from_frame_dtype_fidelity(): + # GH 22420 + df = pd.DataFrame( + { + "dates": date_range("19910905", periods=6, tz="US/Eastern"), + "a": [1, 1, 1, 2, 2, 2], + "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + "c": ["x", "x", "y", "z", "x", "y"], + } + ) + original_dtypes = df.dtypes.to_dict() + + expected_mi = MultiIndex.from_arrays( + [ + date_range("19910905", periods=6, tz="US/Eastern"), + [1, 1, 1, 2, 2, 2], + pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + ["x", "x", "y", "z", "x", "y"], + ], + names=["dates", "a", "b", "c"], + ) + mi = MultiIndex.from_frame(df) + mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} + + tm.assert_index_equal(expected_mi, mi) + assert original_dtypes == mi_dtypes + + +@pytest.mark.parametrize( + "names_in,names_out", [(None, [("L1", "x"), ("L2", "y")]), (["x", "y"], ["x", "y"])] +) +def test_from_frame_valid_names(names_in, names_out): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + ) + mi = MultiIndex.from_frame(df, names=names_in) + assert mi.names == names_out + + +@pytest.mark.parametrize( + "names,expected_error_msg", + [ + ("bad_input", "Names should be list-like for a MultiIndex"), + (["a", "b", "c"], "Length of names must match number of levels in MultiIndex"), + ], +) +def test_from_frame_invalid_names(names, expected_error_msg): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + ) + with pytest.raises(ValueError, match=expected_error_msg): + MultiIndex.from_frame(df, names=names) + + +def test_index_equal_empty_iterable(): + # #16844 + a = MultiIndex(levels=[[], []], codes=[[], []], names=["a", "b"]) + b = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"]) + tm.assert_index_equal(a, b) + + +def test_raise_invalid_sortorder(): + # Test that the MultiIndex constructor raise when a incorrect sortorder is given + # GH#28518 + + levels = [[0, 1], [0, 1, 2]] + + # Correct sortorder + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 + ) + + with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2 + ) + + with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1 + ) + + +def test_datetimeindex(): + idx1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo" + ) + idx2 = date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern") + idx = MultiIndex.from_arrays([idx1, idx2]) + + expected1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"], tz="Asia/Tokyo" + ) + + tm.assert_index_equal(idx.levels[0], expected1) + tm.assert_index_equal(idx.levels[1], idx2) + + # from datetime combos + # GH 7888 + date1 = np.datetime64("today") + date2 = datetime.today() + date3 = Timestamp.today() + + for d1, d2 in itertools.product([date1, date2, date3], [date1, date2, date3]): + index = MultiIndex.from_product([[d1], [d2]]) + assert isinstance(index.levels[0], pd.DatetimeIndex) + assert isinstance(index.levels[1], pd.DatetimeIndex) + + # but NOT date objects, matching Index behavior + date4 = date.today() + index = MultiIndex.from_product([[date4], [date2]]) + assert not isinstance(index.levels[0], pd.DatetimeIndex) + assert isinstance(index.levels[1], pd.DatetimeIndex) + + +def test_constructor_with_tz(): + index = pd.DatetimeIndex( + ["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific" + ) + columns = pd.DatetimeIndex( + ["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo" + ) + + result = MultiIndex.from_arrays([index, columns]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) + + result = MultiIndex.from_arrays([Series(index), Series(columns)]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) + + +def test_multiindex_inference_consistency(): + # check that inference behavior matches the base class + + v = date.today() + + arr = [v, v] + + idx = Index(arr) + assert idx.dtype == object + + mi = MultiIndex.from_arrays([arr]) + lev = mi.levels[0] + assert lev.dtype == object + + mi = MultiIndex.from_product([arr]) + lev = mi.levels[0] + assert lev.dtype == object + + mi = MultiIndex.from_tuples([(x,) for x in arr]) + lev = mi.levels[0] + assert lev.dtype == object + + +def test_dtype_representation(): + # GH#46900 + pmidx = MultiIndex.from_arrays([[1], ["a"]], names=[("a", "b"), ("c", "d")]) + result = pmidx.dtypes + expected = Series( + ["int64", "object"], index=MultiIndex.from_tuples([("a", "b"), ("c", "d")]) + ) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_conversion.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_conversion.py new file mode 100644 index 00000000..3c2ca045 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_conversion.py @@ -0,0 +1,164 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, +) +import pandas._testing as tm + + +def test_to_numpy(idx): + result = idx.to_numpy() + exp = idx.values + tm.assert_numpy_array_equal(result, exp) + + +def test_to_frame(): + tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] + + index = MultiIndex.from_tuples(tuples) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] + index = MultiIndex.from_tuples(tuples, names=["first", "second"]) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + # See GH-22580 + index = MultiIndex.from_tuples(tuples) + result = index.to_frame(index=False, name=["first", "second"]) + expected = DataFrame(tuples) + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + result = index.to_frame(name=["first", "second"]) + expected.index = index + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + msg = "'name' must be a list / sequence of column names." + with pytest.raises(TypeError, match=msg): + index.to_frame(name="first") + + msg = "'name' should have same length as number of levels on index." + with pytest.raises(ValueError, match=msg): + index.to_frame(name=["first"]) + + # Tests for datetime index + index = MultiIndex.from_product([range(5), pd.date_range("20130101", periods=3)]) + result = index.to_frame(index=False) + expected = DataFrame( + { + 0: np.repeat(np.arange(5, dtype="int64"), 3), + 1: np.tile(pd.date_range("20130101", periods=3), 5), + } + ) + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + # See GH-22580 + result = index.to_frame(index=False, name=["first", "second"]) + expected = DataFrame( + { + "first": np.repeat(np.arange(5, dtype="int64"), 3), + "second": np.tile(pd.date_range("20130101", periods=3), 5), + } + ) + tm.assert_frame_equal(result, expected) + + result = index.to_frame(name=["first", "second"]) + expected.index = index + tm.assert_frame_equal(result, expected) + + +def test_to_frame_dtype_fidelity(): + # GH 22420 + mi = MultiIndex.from_arrays( + [ + pd.date_range("19910905", periods=6, tz="US/Eastern"), + [1, 1, 1, 2, 2, 2], + pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + ["x", "x", "y", "z", "x", "y"], + ], + names=["dates", "a", "b", "c"], + ) + original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} + + expected_df = DataFrame( + { + "dates": pd.date_range("19910905", periods=6, tz="US/Eastern"), + "a": [1, 1, 1, 2, 2, 2], + "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + "c": ["x", "x", "y", "z", "x", "y"], + } + ) + df = mi.to_frame(index=False) + df_dtypes = df.dtypes.to_dict() + + tm.assert_frame_equal(df, expected_df) + assert original_dtypes == df_dtypes + + +def test_to_frame_resulting_column_order(): + # GH 22420 + expected = ["z", 0, "a"] + mi = MultiIndex.from_arrays( + [["a", "b", "c"], ["x", "y", "z"], ["q", "w", "e"]], names=expected + ) + result = mi.to_frame().columns.tolist() + assert result == expected + + +def test_to_frame_duplicate_labels(): + # GH 45245 + data = [(1, 2), (3, 4)] + names = ["a", "a"] + index = MultiIndex.from_tuples(data, names=names) + with pytest.raises(ValueError, match="Cannot create duplicate column labels"): + index.to_frame() + + result = index.to_frame(allow_duplicates=True) + expected = DataFrame(data, index=index, columns=names) + tm.assert_frame_equal(result, expected) + + names = [None, 0] + index = MultiIndex.from_tuples(data, names=names) + with pytest.raises(ValueError, match="Cannot create duplicate column labels"): + index.to_frame() + + result = index.to_frame(allow_duplicates=True) + expected = DataFrame(data, index=index, columns=[0, 0]) + tm.assert_frame_equal(result, expected) + + +def test_to_flat_index(idx): + expected = pd.Index( + ( + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ), + tupleize_cols=False, + ) + result = idx.to_flat_index() + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_copy.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_copy.py new file mode 100644 index 00000000..2e09a580 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_copy.py @@ -0,0 +1,96 @@ +from copy import ( + copy, + deepcopy, +) + +import pytest + +from pandas import MultiIndex +import pandas._testing as tm + + +def assert_multiindex_copied(copy, original): + # Levels should be (at least, shallow copied) + tm.assert_copy(copy.levels, original.levels) + tm.assert_almost_equal(copy.codes, original.codes) + + # Labels doesn't matter which way copied + tm.assert_almost_equal(copy.codes, original.codes) + assert copy.codes is not original.codes + + # Names doesn't matter which way copied + assert copy.names == original.names + assert copy.names is not original.names + + # Sort order should be copied + assert copy.sortorder == original.sortorder + + +def test_copy(idx): + i_copy = idx.copy() + + assert_multiindex_copied(i_copy, idx) + + +def test_shallow_copy(idx): + i_copy = idx._view() + + assert_multiindex_copied(i_copy, idx) + + +def test_view(idx): + i_view = idx.view() + assert_multiindex_copied(i_view, idx) + + +@pytest.mark.parametrize("func", [copy, deepcopy]) +def test_copy_and_deepcopy(func): + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + idx_copy = func(idx) + assert idx_copy is not idx + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize("deep", [True, False]) +def test_copy_method(deep): + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + idx_copy = idx.copy(deep=deep) + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize("deep", [True, False]) +@pytest.mark.parametrize( + "kwarg, value", + [ + ("names", ["third", "fourth"]), + ], +) +def test_copy_method_kwargs(deep, kwarg, value): + # gh-12309: Check that the "name" argument as well other kwargs are honored + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + idx_copy = idx.copy(**{kwarg: value, "deep": deep}) + assert getattr(idx_copy, kwarg) == value + + +def test_copy_deep_false_retains_id(): + # GH#47878 + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + + res = idx.copy(deep=False) + assert res._id is idx._id diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_drop.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_drop.py new file mode 100644 index 00000000..99c8ebb1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_drop.py @@ -0,0 +1,190 @@ +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) +import pandas._testing as tm + + +def test_drop(idx): + dropped = idx.drop([("foo", "two"), ("qux", "one")]) + + index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")]) + dropped2 = idx.drop(index) + + expected = idx[[0, 2, 3, 5]] + tm.assert_index_equal(dropped, expected) + tm.assert_index_equal(dropped2, expected) + + dropped = idx.drop(["bar"]) + expected = idx[[0, 1, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = idx.drop("foo") + expected = idx[[2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + index = MultiIndex.from_tuples([("bar", "two")]) + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): + idx.drop([("bar", "two")]) + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): + idx.drop(index) + with pytest.raises(KeyError, match=r"^'two'$"): + idx.drop(["foo", "two"]) + + # partially correct argument + mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")]) + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): + idx.drop(mixed_index) + + # error='ignore' + dropped = idx.drop(index, errors="ignore") + expected = idx[[0, 1, 2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = idx.drop(mixed_index, errors="ignore") + expected = idx[[0, 1, 2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = idx.drop(["foo", "two"], errors="ignore") + expected = idx[[2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + # mixed partial / full drop + dropped = idx.drop(["foo", ("qux", "one")]) + expected = idx[[2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + # mixed partial / full drop / error='ignore' + mixed_index = ["foo", ("qux", "one"), "two"] + with pytest.raises(KeyError, match=r"^'two'$"): + idx.drop(mixed_index) + dropped = idx.drop(mixed_index, errors="ignore") + expected = idx[[2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + +def test_droplevel_with_names(idx): + index = idx[idx.get_loc("foo")] + dropped = index.droplevel(0) + assert dropped.name == "second" + + index = MultiIndex( + levels=[Index(range(4)), Index(range(4)), Index(range(4))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + names=["one", "two", "three"], + ) + dropped = index.droplevel(0) + assert dropped.names == ("two", "three") + + dropped = index.droplevel("two") + expected = index.droplevel(1) + assert dropped.equals(expected) + + +def test_droplevel_list(): + index = MultiIndex( + levels=[Index(range(4)), Index(range(4)), Index(range(4))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + names=["one", "two", "three"], + ) + + dropped = index[:2].droplevel(["three", "one"]) + expected = index[:2].droplevel(2).droplevel(0) + assert dropped.equals(expected) + + dropped = index[:2].droplevel([]) + expected = index[:2] + assert dropped.equals(expected) + + msg = ( + "Cannot remove 3 levels from an index with 3 levels: " + "at least one level must be left" + ) + with pytest.raises(ValueError, match=msg): + index[:2].droplevel(["one", "two", "three"]) + + with pytest.raises(KeyError, match="'Level four not found'"): + index[:2].droplevel(["one", "four"]) + + +def test_drop_not_lexsorted(): + # GH 12078 + + # define the lexsorted version of the multi-index + tuples = [("a", ""), ("b1", "c1"), ("b2", "c2")] + lexsorted_mi = MultiIndex.from_tuples(tuples, names=["b", "c"]) + assert lexsorted_mi._is_lexsorted() + + # and the not-lexsorted version + df = pd.DataFrame( + columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]] + ) + df = df.pivot_table(index="a", columns=["b", "c"], values="d") + df = df.reset_index() + not_lexsorted_mi = df.columns + assert not not_lexsorted_mi._is_lexsorted() + + # compare the results + tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi) + with tm.assert_produces_warning(PerformanceWarning): + tm.assert_index_equal(lexsorted_mi.drop("a"), not_lexsorted_mi.drop("a")) + + +def test_drop_with_nan_in_index(nulls_fixture): + # GH#18853 + mi = MultiIndex.from_tuples([("blah", nulls_fixture)], names=["name", "date"]) + msg = r"labels \[Timestamp\('2001-01-01 00:00:00'\)\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop(pd.Timestamp("2001"), level="date") + + +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +def test_drop_with_non_monotonic_duplicates(): + # GH#33494 + mi = MultiIndex.from_tuples([(1, 2), (2, 3), (1, 2)]) + result = mi.drop((1, 2)) + expected = MultiIndex.from_tuples([(2, 3)]) + tm.assert_index_equal(result, expected) + + +def test_single_level_drop_partially_missing_elements(): + # GH 37820 + + mi = MultiIndex.from_tuples([(1, 2), (2, 2), (3, 2)]) + msg = r"labels \[4\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop(4, level=0) + with pytest.raises(KeyError, match=msg): + mi.drop([1, 4], level=0) + msg = r"labels \[nan\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop([np.nan], level=0) + with pytest.raises(KeyError, match=msg): + mi.drop([np.nan, 1, 2, 3], level=0) + + mi = MultiIndex.from_tuples([(np.nan, 1), (1, 2)]) + msg = r"labels \['a'\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop([np.nan, 1, "a"], level=0) + + +def test_droplevel_multiindex_one_level(): + # GH#37208 + index = MultiIndex.from_tuples([(2,)], names=("b",)) + result = index.droplevel([]) + expected = Index([2], name="b") + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_duplicates.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_duplicates.py new file mode 100644 index 00000000..ee1edaa2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_duplicates.py @@ -0,0 +1,344 @@ +from itertools import product + +import numpy as np +import pytest + +from pandas._libs import ( + hashtable, + index as libindex, +) + +from pandas import ( + NA, + DatetimeIndex, + MultiIndex, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("names", [None, ["first", "second"]]) +def test_unique(names): + mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names) + + res = mi.unique() + exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) + tm.assert_index_equal(res, exp) + + mi = MultiIndex.from_arrays([list("aaaa"), list("abab")], names=names) + res = mi.unique() + exp = MultiIndex.from_arrays([list("aa"), list("ab")], names=mi.names) + tm.assert_index_equal(res, exp) + + mi = MultiIndex.from_arrays([list("aaaa"), list("aaaa")], names=names) + res = mi.unique() + exp = MultiIndex.from_arrays([["a"], ["a"]], names=mi.names) + tm.assert_index_equal(res, exp) + + # GH #20568 - empty MI + mi = MultiIndex.from_arrays([[], []], names=names) + res = mi.unique() + tm.assert_index_equal(mi, res) + + +def test_unique_datetimelike(): + idx1 = DatetimeIndex( + ["2015-01-01", "2015-01-01", "2015-01-01", "2015-01-01", "NaT", "NaT"] + ) + idx2 = DatetimeIndex( + ["2015-01-01", "2015-01-01", "2015-01-02", "2015-01-02", "NaT", "2015-01-01"], + tz="Asia/Tokyo", + ) + result = MultiIndex.from_arrays([idx1, idx2]).unique() + + eidx1 = DatetimeIndex(["2015-01-01", "2015-01-01", "NaT", "NaT"]) + eidx2 = DatetimeIndex( + ["2015-01-01", "2015-01-02", "NaT", "2015-01-01"], tz="Asia/Tokyo" + ) + exp = MultiIndex.from_arrays([eidx1, eidx2]) + tm.assert_index_equal(result, exp) + + +@pytest.mark.parametrize("level", [0, "first", 1, "second"]) +def test_unique_level(idx, level): + # GH #17896 - with level= argument + result = idx.unique(level=level) + expected = idx.get_level_values(level).unique() + tm.assert_index_equal(result, expected) + + # With already unique level + mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], names=["first", "second"]) + result = mi.unique(level=level) + expected = mi.get_level_values(level) + tm.assert_index_equal(result, expected) + + # With empty MI + mi = MultiIndex.from_arrays([[], []], names=["first", "second"]) + result = mi.unique(level=level) + expected = mi.get_level_values(level) + tm.assert_index_equal(result, expected) + + +def test_duplicate_multiindex_codes(): + # GH 17464 + # Make sure that a MultiIndex with duplicate levels throws a ValueError + msg = r"Level values must be unique: \[[A', ]+\] on level 0" + with pytest.raises(ValueError, match=msg): + mi = MultiIndex([["A"] * 10, range(10)], [[0] * 10, range(10)]) + + # And that using set_levels with duplicate levels fails + mi = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) + msg = r"Level values must be unique: \[[AB', ]+\] on level 0" + with pytest.raises(ValueError, match=msg): + mi.set_levels([["A", "B", "A", "A", "B"], [2, 1, 3, -2, 5]]) + + +@pytest.mark.parametrize("names", [["a", "b", "a"], [1, 1, 2], [1, "a", 1]]) +def test_duplicate_level_names(names): + # GH18872, GH19029 + mi = MultiIndex.from_product([[0, 1]] * 3, names=names) + assert mi.names == names + + # With .rename() + mi = MultiIndex.from_product([[0, 1]] * 3) + mi = mi.rename(names) + assert mi.names == names + + # With .rename(., level=) + mi.rename(names[1], level=1, inplace=True) + mi = mi.rename([names[0], names[2]], level=[0, 2]) + assert mi.names == names + + +def test_duplicate_meta_data(): + # GH 10115 + mi = MultiIndex( + levels=[[0, 1], [0, 1, 2]], codes=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]] + ) + + for idx in [ + mi, + mi.set_names([None, None]), + mi.set_names([None, "Num"]), + mi.set_names(["Upper", "Num"]), + ]: + assert idx.has_duplicates + assert idx.drop_duplicates().names == idx.names + + +def test_has_duplicates(idx, idx_dup): + # see fixtures + assert idx.is_unique is True + assert idx.has_duplicates is False + assert idx_dup.is_unique is False + assert idx_dup.has_duplicates is True + + mi = MultiIndex( + levels=[[0, 1], [0, 1, 2]], codes=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]] + ) + assert mi.is_unique is False + assert mi.has_duplicates is True + + # single instance of NaN + mi_nan = MultiIndex( + levels=[["a", "b"], [0, 1]], codes=[[-1, 0, 0, 1, 1], [-1, 0, 1, 0, 1]] + ) + assert mi_nan.is_unique is True + assert mi_nan.has_duplicates is False + + # multiple instances of NaN + mi_nan_dup = MultiIndex( + levels=[["a", "b"], [0, 1]], codes=[[-1, -1, 0, 0, 1, 1], [-1, -1, 0, 1, 0, 1]] + ) + assert mi_nan_dup.is_unique is False + assert mi_nan_dup.has_duplicates is True + + +def test_has_duplicates_from_tuples(): + # GH 9075 + t = [ + ("x", "out", "z", 5, "y", "in", "z", 169), + ("x", "out", "z", 7, "y", "in", "z", 119), + ("x", "out", "z", 9, "y", "in", "z", 135), + ("x", "out", "z", 13, "y", "in", "z", 145), + ("x", "out", "z", 14, "y", "in", "z", 158), + ("x", "out", "z", 16, "y", "in", "z", 122), + ("x", "out", "z", 17, "y", "in", "z", 160), + ("x", "out", "z", 18, "y", "in", "z", 180), + ("x", "out", "z", 20, "y", "in", "z", 143), + ("x", "out", "z", 21, "y", "in", "z", 128), + ("x", "out", "z", 22, "y", "in", "z", 129), + ("x", "out", "z", 25, "y", "in", "z", 111), + ("x", "out", "z", 28, "y", "in", "z", 114), + ("x", "out", "z", 29, "y", "in", "z", 121), + ("x", "out", "z", 31, "y", "in", "z", 126), + ("x", "out", "z", 32, "y", "in", "z", 155), + ("x", "out", "z", 33, "y", "in", "z", 123), + ("x", "out", "z", 12, "y", "in", "z", 144), + ] + + mi = MultiIndex.from_tuples(t) + assert not mi.has_duplicates + + +@pytest.mark.parametrize("nlevels", [4, 8]) +@pytest.mark.parametrize("with_nulls", [True, False]) +def test_has_duplicates_overflow(nlevels, with_nulls): + # handle int64 overflow if possible + # no overflow with 4 + # overflow possible with 8 + codes = np.tile(np.arange(500), 2) + level = np.arange(500) + + if with_nulls: # inject some null values + codes[500] = -1 # common nan value + codes = [codes.copy() for i in range(nlevels)] + for i in range(nlevels): + codes[i][500 + i - nlevels // 2] = -1 + + codes += [np.array([-1, 1]).repeat(500)] + else: + codes = [codes] * nlevels + [np.arange(2).repeat(500)] + + levels = [level] * nlevels + [[0, 1]] + + # no dups + mi = MultiIndex(levels=levels, codes=codes) + assert not mi.has_duplicates + + # with a dup + if with_nulls: + + def f(a): + return np.insert(a, 1000, a[0]) + + codes = list(map(f, codes)) + mi = MultiIndex(levels=levels, codes=codes) + else: + values = mi.values.tolist() + mi = MultiIndex.from_tuples(values + [values[0]]) + + assert mi.has_duplicates + + +@pytest.mark.parametrize( + "keep, expected", + [ + ("first", np.array([False, False, False, True, True, False])), + ("last", np.array([False, True, True, False, False, False])), + (False, np.array([False, True, True, True, True, False])), + ], +) +def test_duplicated(idx_dup, keep, expected): + result = idx_dup.duplicated(keep=keep) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.arm_slow +def test_duplicated_hashtable_impl(keep, monkeypatch): + # GH 9125 + n, k = 6, 10 + levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] + codes = [np.random.default_rng(2).choice(n, k * n) for _ in levels] + with monkeypatch.context() as m: + m.setattr(libindex, "_SIZE_CUTOFF", 50) + mi = MultiIndex(levels=levels, codes=codes) + + result = mi.duplicated(keep=keep) + expected = hashtable.duplicated(mi.values, keep=keep) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("val", [101, 102]) +def test_duplicated_with_nan(val): + # GH5873 + mi = MultiIndex.from_arrays([[101, val], [3.5, np.nan]]) + assert not mi.has_duplicates + + tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(2, dtype="bool")) + + +@pytest.mark.parametrize("n", range(1, 6)) +@pytest.mark.parametrize("m", range(1, 5)) +def test_duplicated_with_nan_multi_shape(n, m): + # GH5873 + # all possible unique combinations, including nan + codes = product(range(-1, n), range(-1, m)) + mi = MultiIndex( + levels=[list("abcde")[:n], list("WXYZ")[:m]], + codes=np.random.default_rng(2).permutation(list(codes)).T, + ) + assert len(mi) == (n + 1) * (m + 1) + assert not mi.has_duplicates + + tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(len(mi), dtype="bool")) + + +def test_duplicated_drop_duplicates(): + # GH#4060 + idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2])) + + expected = np.array([False, False, False, True, False, False], dtype=bool) + duplicated = idx.duplicated() + tm.assert_numpy_array_equal(duplicated, expected) + assert duplicated.dtype == bool + expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2])) + tm.assert_index_equal(idx.drop_duplicates(), expected) + + expected = np.array([True, False, False, False, False, False]) + duplicated = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(duplicated, expected) + assert duplicated.dtype == bool + expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2])) + tm.assert_index_equal(idx.drop_duplicates(keep="last"), expected) + + expected = np.array([True, False, False, True, False, False]) + duplicated = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(duplicated, expected) + assert duplicated.dtype == bool + expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2])) + tm.assert_index_equal(idx.drop_duplicates(keep=False), expected) + + +@pytest.mark.parametrize( + "dtype", + [ + np.complex64, + np.complex128, + ], +) +def test_duplicated_series_complex_numbers(dtype): + # GH 17927 + expected = Series( + [False, False, False, True, False, False, False, True, False, True], + dtype=bool, + ) + result = Series( + [ + np.nan + np.nan * 1j, + 0, + 1j, + 1j, + 1, + 1 + 1j, + 1 + 2j, + 1 + 1j, + np.nan, + np.nan + np.nan * 1j, + ], + dtype=dtype, + ).duplicated() + tm.assert_series_equal(result, expected) + + +def test_midx_unique_ea_dtype(): + # GH#48335 + vals_a = Series([1, 2, NA, NA], dtype="Int64") + vals_b = np.array([1, 2, 3, 3]) + midx = MultiIndex.from_arrays([vals_a, vals_b], names=["a", "b"]) + result = midx.unique() + + exp_vals_a = Series([1, 2, NA], dtype="Int64") + exp_vals_b = np.array([1, 2, 3]) + expected = MultiIndex.from_arrays([exp_vals_a, exp_vals_b], names=["a", "b"]) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_equivalence.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_equivalence.py new file mode 100644 index 00000000..9babbd5b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_equivalence.py @@ -0,0 +1,284 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_any_real_numeric_dtype + +import pandas as pd +from pandas import ( + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + + +def test_equals(idx): + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.equals(idx.to_flat_index()) + assert idx.equals(idx.to_flat_index().astype("category")) + + assert not idx.equals(list(idx)) + assert not idx.equals(np.array(idx)) + + same_values = Index(idx, dtype=object) + assert idx.equals(same_values) + assert same_values.equals(idx) + + if idx.nlevels == 1: + # do not test MultiIndex + assert not idx.equals(Series(idx)) + + +def test_equals_op(idx): + # GH9947, GH10637 + index_a = idx + + n = len(index_a) + index_b = index_a[0:-1] + index_c = index_a[0:-1].append(index_a[-2:-1]) + index_d = index_a[0:1] + with pytest.raises(ValueError, match="Lengths must match"): + index_a == index_b + expected1 = np.array([True] * n) + expected2 = np.array([True] * (n - 1) + [False]) + tm.assert_numpy_array_equal(index_a == index_a, expected1) + tm.assert_numpy_array_equal(index_a == index_c, expected2) + + # test comparisons with numpy arrays + array_a = np.array(index_a) + array_b = np.array(index_a[0:-1]) + array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) + array_d = np.array(index_a[0:1]) + with pytest.raises(ValueError, match="Lengths must match"): + index_a == array_b + tm.assert_numpy_array_equal(index_a == array_a, expected1) + tm.assert_numpy_array_equal(index_a == array_c, expected2) + + # test comparisons with Series + series_a = Series(array_a) + series_b = Series(array_b) + series_c = Series(array_c) + series_d = Series(array_d) + with pytest.raises(ValueError, match="Lengths must match"): + index_a == series_b + + tm.assert_numpy_array_equal(index_a == series_a, expected1) + tm.assert_numpy_array_equal(index_a == series_c, expected2) + + # cases where length is 1 for one of them + with pytest.raises(ValueError, match="Lengths must match"): + index_a == index_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == array_d + msg = "Can only compare identically-labeled Series objects" + with pytest.raises(ValueError, match=msg): + series_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + series_a == array_d + + # comparing with a scalar should broadcast; note that we are excluding + # MultiIndex because in this case each item in the index is a tuple of + # length 2, and therefore is considered an array of length 2 in the + # comparison instead of a scalar + if not isinstance(index_a, MultiIndex): + expected3 = np.array([False] * (len(index_a) - 2) + [True, False]) + # assuming the 2nd to last item is unique in the data + item = index_a[-2] + tm.assert_numpy_array_equal(index_a == item, expected3) + tm.assert_series_equal(series_a == item, Series(expected3)) + + +def test_compare_tuple(): + # GH#21517 + mi = MultiIndex.from_product([[1, 2]] * 2) + + all_false = np.array([False, False, False, False]) + + result = mi == mi[0] + expected = np.array([True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = mi != mi[0] + tm.assert_numpy_array_equal(result, ~expected) + + result = mi < mi[0] + tm.assert_numpy_array_equal(result, all_false) + + result = mi <= mi[0] + tm.assert_numpy_array_equal(result, expected) + + result = mi > mi[0] + tm.assert_numpy_array_equal(result, ~expected) + + result = mi >= mi[0] + tm.assert_numpy_array_equal(result, ~all_false) + + +def test_compare_tuple_strs(): + # GH#34180 + + mi = MultiIndex.from_tuples([("a", "b"), ("b", "c"), ("c", "a")]) + + result = mi == ("c", "a") + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = mi == ("c",) + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_equals_multi(idx): + assert idx.equals(idx) + assert not idx.equals(idx.values) + assert idx.equals(Index(idx.values)) + + assert idx.equal_levels(idx) + assert not idx.equals(idx[:-1]) + assert not idx.equals(idx[-1]) + + # different number of levels + index = MultiIndex( + levels=[Index(list(range(4))), Index(list(range(4))), Index(list(range(4)))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + ) + + index2 = MultiIndex(levels=index.levels[:-1], codes=index.codes[:-1]) + assert not index.equals(index2) + assert not index.equal_levels(index2) + + # levels are different + major_axis = Index(list(range(4))) + minor_axis = Index(list(range(2))) + + major_codes = np.array([0, 0, 1, 2, 2, 3]) + minor_codes = np.array([0, 1, 0, 0, 1, 0]) + + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + assert not idx.equals(index) + assert not idx.equal_levels(index) + + # some of the labels are different + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 2, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + assert not idx.equals(index) + + +def test_identical(idx): + mi = idx.copy() + mi2 = idx.copy() + assert mi.identical(mi2) + + mi = mi.set_names(["new1", "new2"]) + assert mi.equals(mi2) + assert not mi.identical(mi2) + + mi2 = mi2.set_names(["new1", "new2"]) + assert mi.identical(mi2) + + mi4 = Index(mi.tolist(), tupleize_cols=False) + assert not mi.identical(mi4) + assert mi.equals(mi4) + + +def test_equals_operator(idx): + # GH9785 + assert (idx == idx).all() + + +def test_equals_missing_values(): + # make sure take is not using -1 + i = MultiIndex.from_tuples([(0, pd.NaT), (0, pd.Timestamp("20130101"))]) + result = i[0:1].equals(i[0]) + assert not result + result = i[1:2].equals(i[1]) + assert not result + + +def test_equals_missing_values_differently_sorted(): + # GH#38439 + mi1 = MultiIndex.from_tuples([(81.0, np.nan), (np.nan, np.nan)]) + mi2 = MultiIndex.from_tuples([(np.nan, np.nan), (81.0, np.nan)]) + assert not mi1.equals(mi2) + + mi2 = MultiIndex.from_tuples([(81.0, np.nan), (np.nan, np.nan)]) + assert mi1.equals(mi2) + + +def test_is_(): + mi = MultiIndex.from_tuples(zip(range(10), range(10))) + assert mi.is_(mi) + assert mi.is_(mi.view()) + assert mi.is_(mi.view().view().view().view()) + mi2 = mi.view() + # names are metadata, they don't change id + mi2.names = ["A", "B"] + assert mi2.is_(mi) + assert mi.is_(mi2) + + assert not mi.is_(mi.set_names(["C", "D"])) + # levels are inherent properties, they change identity + mi3 = mi2.set_levels([list(range(10)), list(range(10))]) + assert not mi3.is_(mi2) + # shouldn't change + assert mi2.is_(mi) + mi4 = mi3.view() + + # GH 17464 - Remove duplicate MultiIndex levels + mi4 = mi4.set_levels([list(range(10)), list(range(10))]) + assert not mi4.is_(mi3) + mi5 = mi.view() + mi5 = mi5.set_levels(mi5.levels) + assert not mi5.is_(mi) + + +def test_is_all_dates(idx): + assert not idx._is_all_dates + + +def test_is_numeric(idx): + # MultiIndex is never numeric + assert not is_any_real_numeric_dtype(idx) + + +def test_multiindex_compare(): + # GH 21149 + # Ensure comparison operations for MultiIndex with nlevels == 1 + # behave consistently with those for MultiIndex with nlevels > 1 + + midx = MultiIndex.from_product([[0, 1]]) + + # Equality self-test: MultiIndex object vs self + expected = Series([True, True]) + result = Series(midx == midx) + tm.assert_series_equal(result, expected) + + # Greater than comparison: MultiIndex object vs self + expected = Series([False, False]) + result = Series(midx > midx) + tm.assert_series_equal(result, expected) + + +def test_equals_ea_int_regular_int(): + # GH#46026 + mi1 = MultiIndex.from_arrays([Index([1, 2], dtype="Int64"), [3, 4]]) + mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]]) + assert not mi1.equals(mi2) + assert not mi2.equals(mi1) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_formats.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_formats.py new file mode 100644 index 00000000..011f61fa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_formats.py @@ -0,0 +1,223 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) + + +def test_format(idx): + idx.format() + idx[:0].format() + + +def test_format_integer_names(): + index = MultiIndex( + levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1] + ) + index.format(names=True) + + +def test_format_sparse_config(idx): + # GH1538 + with pd.option_context("display.multi_sparse", False): + result = idx.format() + assert result[1] == "foo two" + + +def test_format_sparse_display(): + index = MultiIndex( + levels=[[0, 1], [0, 1], [0, 1], [0]], + codes=[ + [0, 0, 0, 1, 1, 1], + [0, 0, 1, 0, 0, 1], + [0, 1, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0], + ], + ) + + result = index.format() + assert result[3] == "1 0 0 0" + + +def test_repr_with_unicode_data(): + with pd.option_context("display.encoding", "UTF-8"): + d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} + index = pd.DataFrame(d).set_index(["a", "b"]).index + assert "\\" not in repr(index) # we don't want unicode-escaped + + +def test_repr_roundtrip_raises(): + mi = MultiIndex.from_product([list("ab"), range(3)], names=["first", "second"]) + msg = "Must pass both levels and codes" + with pytest.raises(TypeError, match=msg): + eval(repr(mi)) + + +def test_unicode_string_with_unicode(): + d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} + idx = pd.DataFrame(d).set_index(["a", "b"]).index + str(idx) + + +def test_repr_max_seq_item_setting(idx): + # GH10182 + idx = idx.repeat(50) + with pd.option_context("display.max_seq_items", None): + repr(idx) + assert "..." not in str(idx) + + +class TestRepr: + def test_unicode_repr_issues(self): + levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])] + codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)] + index = MultiIndex(levels=levels, codes=codes) + + repr(index.levels) + repr(index.get_level_values(1)) + + def test_repr_max_seq_items_equal_to_n(self, idx): + # display.max_seq_items == n + with pd.option_context("display.max_seq_items", 6): + result = idx.__repr__() + expected = """\ +MultiIndex([('foo', 'one'), + ('foo', 'two'), + ('bar', 'one'), + ('baz', 'two'), + ('qux', 'one'), + ('qux', 'two')], + names=['first', 'second'])""" + assert result == expected + + def test_repr(self, idx): + result = idx[:1].__repr__() + expected = """\ +MultiIndex([('foo', 'one')], + names=['first', 'second'])""" + assert result == expected + + result = idx.__repr__() + expected = """\ +MultiIndex([('foo', 'one'), + ('foo', 'two'), + ('bar', 'one'), + ('baz', 'two'), + ('qux', 'one'), + ('qux', 'two')], + names=['first', 'second'])""" + assert result == expected + + with pd.option_context("display.max_seq_items", 5): + result = idx.__repr__() + expected = """\ +MultiIndex([('foo', 'one'), + ('foo', 'two'), + ... + ('qux', 'one'), + ('qux', 'two')], + names=['first', 'second'], length=6)""" + assert result == expected + + # display.max_seq_items == 1 + with pd.option_context("display.max_seq_items", 1): + result = idx.__repr__() + expected = """\ +MultiIndex([... + ('qux', 'two')], + names=['first', ...], length=6)""" + assert result == expected + + def test_rjust(self, narrow_multi_index): + mi = narrow_multi_index + result = mi[:1].__repr__() + expected = """\ +MultiIndex([('a', 9, '2000-01-01 00:00:00')], + names=['a', 'b', 'dti'])""" + assert result == expected + + result = mi[::500].__repr__() + expected = """\ +MultiIndex([( 'a', 9, '2000-01-01 00:00:00'), + ( 'a', 9, '2000-01-01 00:08:20'), + ('abc', 10, '2000-01-01 00:16:40'), + ('abc', 10, '2000-01-01 00:25:00')], + names=['a', 'b', 'dti'])""" + assert result == expected + + result = mi.__repr__() + expected = """\ +MultiIndex([( 'a', 9, '2000-01-01 00:00:00'), + ( 'a', 9, '2000-01-01 00:00:01'), + ( 'a', 9, '2000-01-01 00:00:02'), + ( 'a', 9, '2000-01-01 00:00:03'), + ( 'a', 9, '2000-01-01 00:00:04'), + ( 'a', 9, '2000-01-01 00:00:05'), + ( 'a', 9, '2000-01-01 00:00:06'), + ( 'a', 9, '2000-01-01 00:00:07'), + ( 'a', 9, '2000-01-01 00:00:08'), + ( 'a', 9, '2000-01-01 00:00:09'), + ... + ('abc', 10, '2000-01-01 00:33:10'), + ('abc', 10, '2000-01-01 00:33:11'), + ('abc', 10, '2000-01-01 00:33:12'), + ('abc', 10, '2000-01-01 00:33:13'), + ('abc', 10, '2000-01-01 00:33:14'), + ('abc', 10, '2000-01-01 00:33:15'), + ('abc', 10, '2000-01-01 00:33:16'), + ('abc', 10, '2000-01-01 00:33:17'), + ('abc', 10, '2000-01-01 00:33:18'), + ('abc', 10, '2000-01-01 00:33:19')], + names=['a', 'b', 'dti'], length=2000)""" + assert result == expected + + def test_tuple_width(self, wide_multi_index): + mi = wide_multi_index + result = mi[:1].__repr__() + expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)], + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa: E501 + assert result == expected + + result = mi[:10].__repr__() + expected = """\ +MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...), + ('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...), + ('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...), + ('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...), + ('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...), + ('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...), + ('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...), + ('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...), + ('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...), + ('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)], + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" + assert result == expected + + result = mi.__repr__() + expected = """\ +MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...), + ( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...), + ( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...), + ( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...), + ( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...), + ( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...), + ( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...), + ( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...), + ( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...), + ( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...), + ... + ('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...), + ('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...), + ('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...), + ('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...), + ('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...), + ('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...), + ('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...), + ('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...), + ('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...), + ('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)], + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)""" + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_level_values.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_level_values.py new file mode 100644 index 00000000..84907f52 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_level_values.py @@ -0,0 +1,124 @@ +import numpy as np + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + MultiIndex, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestGetLevelValues: + def test_get_level_values_box_datetime64(self): + dates = date_range("1/1/2000", periods=4) + levels = [dates, [0, 1]] + codes = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]] + + index = MultiIndex(levels=levels, codes=codes) + + assert isinstance(index.get_level_values(0)[0], Timestamp) + + +def test_get_level_values(idx): + result = idx.get_level_values(0) + expected = Index(["foo", "foo", "bar", "baz", "qux", "qux"], name="first") + tm.assert_index_equal(result, expected) + assert result.name == "first" + + result = idx.get_level_values("first") + expected = idx.get_level_values(0) + tm.assert_index_equal(result, expected) + + # GH 10460 + index = MultiIndex( + levels=[CategoricalIndex(["A", "B"]), CategoricalIndex([1, 2, 3])], + codes=[np.array([0, 0, 0, 1, 1, 1]), np.array([0, 1, 2, 0, 1, 2])], + ) + + exp = CategoricalIndex(["A", "A", "A", "B", "B", "B"]) + tm.assert_index_equal(index.get_level_values(0), exp) + exp = CategoricalIndex([1, 2, 3, 1, 2, 3]) + tm.assert_index_equal(index.get_level_values(1), exp) + + +def test_get_level_values_all_na(): + # GH#17924 when level entirely consists of nan + arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = Index([np.nan, np.nan, np.nan], dtype=np.float64) + tm.assert_index_equal(result, expected) + + result = index.get_level_values(1) + expected = Index(["a", np.nan, 1], dtype=object) + tm.assert_index_equal(result, expected) + + +def test_get_level_values_int_with_na(): + # GH#17924 + arrays = [["a", "b", "b"], [1, np.nan, 2]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = Index([1, np.nan, 2]) + tm.assert_index_equal(result, expected) + + arrays = [["a", "b", "b"], [np.nan, np.nan, 2]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = Index([np.nan, np.nan, 2]) + tm.assert_index_equal(result, expected) + + +def test_get_level_values_na(): + arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = Index([np.nan, np.nan, np.nan]) + tm.assert_index_equal(result, expected) + + result = index.get_level_values(1) + expected = Index(["a", np.nan, 1]) + tm.assert_index_equal(result, expected) + + arrays = [["a", "b", "b"], pd.DatetimeIndex([0, 1, pd.NaT])] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = pd.DatetimeIndex([0, 1, pd.NaT]) + tm.assert_index_equal(result, expected) + + arrays = [[], []] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = Index([], dtype=object) + tm.assert_index_equal(result, expected) + + +def test_get_level_values_when_periods(): + # GH33131. See also discussion in GH32669. + # This test can probably be removed when PeriodIndex._engine is removed. + from pandas import ( + Period, + PeriodIndex, + ) + + idx = MultiIndex.from_arrays( + [PeriodIndex([Period("2019Q1"), Period("2019Q2")], name="b")] + ) + idx2 = MultiIndex.from_arrays( + [idx._get_level_values(level) for level in range(idx.nlevels)] + ) + assert all(x.is_monotonic_increasing for x in idx2.levels) + + +def test_values_loses_freq_of_underlying_index(): + # GH#49054 + idx = pd.DatetimeIndex(date_range("20200101", periods=3, freq="BM")) + expected = idx.copy(deep=True) + idx2 = Index([1, 2, 3]) + midx = MultiIndex(levels=[idx, idx2], codes=[[0, 1, 2], [0, 1, 2]]) + midx.values + assert idx.freq is not None + tm.assert_index_equal(idx, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_set.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_set.py new file mode 100644 index 00000000..0720a1e1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_get_set.py @@ -0,0 +1,379 @@ +import numpy as np +import pytest + +from pandas.compat import PY311 + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas import ( + CategoricalIndex, + MultiIndex, +) +import pandas._testing as tm + + +def assert_matching(actual, expected, check_dtype=False): + # avoid specifying internal representation + # as much as possible + assert len(actual) == len(expected) + for act, exp in zip(actual, expected): + act = np.asarray(act) + exp = np.asarray(exp) + tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) + + +def test_get_level_number_integer(idx): + idx.names = [1, 0] + assert idx._get_level_number(1) == 0 + assert idx._get_level_number(0) == 1 + msg = "Too many levels: Index has only 2 levels, not 3" + with pytest.raises(IndexError, match=msg): + idx._get_level_number(2) + with pytest.raises(KeyError, match="Level fourth not found"): + idx._get_level_number("fourth") + + +def test_get_dtypes(): + # Test MultiIndex.dtypes (# Gh37062) + idx_multitype = MultiIndex.from_product( + [[1, 2, 3], ["a", "b", "c"], pd.date_range("20200101", periods=2, tz="UTC")], + names=["int", "string", "dt"], + ) + expected = pd.Series( + { + "int": np.dtype("int64"), + "string": np.dtype("O"), + "dt": DatetimeTZDtype(tz="utc"), + } + ) + tm.assert_series_equal(expected, idx_multitype.dtypes) + + +def test_get_dtypes_no_level_name(): + # Test MultiIndex.dtypes (# GH38580 ) + idx_multitype = MultiIndex.from_product( + [ + [1, 2, 3], + ["a", "b", "c"], + pd.date_range("20200101", periods=2, tz="UTC"), + ], + ) + expected = pd.Series( + { + "level_0": np.dtype("int64"), + "level_1": np.dtype("O"), + "level_2": DatetimeTZDtype(tz="utc"), + } + ) + tm.assert_series_equal(expected, idx_multitype.dtypes) + + +def test_get_dtypes_duplicate_level_names(): + # Test MultiIndex.dtypes with non-unique level names (# GH45174) + result = MultiIndex.from_product( + [ + [1, 2, 3], + ["a", "b", "c"], + pd.date_range("20200101", periods=2, tz="UTC"), + ], + names=["A", "A", "A"], + ).dtypes + expected = pd.Series( + [np.dtype("int64"), np.dtype("O"), DatetimeTZDtype(tz="utc")], + index=["A", "A", "A"], + ) + tm.assert_series_equal(result, expected) + + +def test_get_level_number_out_of_bounds(multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + with pytest.raises(IndexError, match="Too many levels"): + frame.index._get_level_number(2) + with pytest.raises(IndexError, match="not a valid level number"): + frame.index._get_level_number(-3) + + +def test_set_name_methods(idx, index_names): + # so long as these are synonyms, we don't need to test set_names + assert idx.rename == idx.set_names + new_names = [name + "SUFFIX" for name in index_names] + ind = idx.set_names(new_names) + assert idx.names == index_names + assert ind.names == new_names + msg = "Length of names must match number of levels in MultiIndex" + with pytest.raises(ValueError, match=msg): + ind.set_names(new_names + new_names) + new_names2 = [name + "SUFFIX2" for name in new_names] + res = ind.set_names(new_names2, inplace=True) + assert res is None + assert ind.names == new_names2 + + # set names for specific level (# GH7792) + ind = idx.set_names(new_names[0], level=0) + assert idx.names == index_names + assert ind.names == [new_names[0], index_names[1]] + + res = ind.set_names(new_names2[0], level=0, inplace=True) + assert res is None + assert ind.names == [new_names2[0], index_names[1]] + + # set names for multiple levels + ind = idx.set_names(new_names, level=[0, 1]) + assert idx.names == index_names + assert ind.names == new_names + + res = ind.set_names(new_names2, level=[0, 1], inplace=True) + assert res is None + assert ind.names == new_names2 + + +def test_set_levels_codes_directly(idx): + # setting levels/codes directly raises AttributeError + + levels = idx.levels + new_levels = [[lev + "a" for lev in level] for level in levels] + + codes = idx.codes + major_codes, minor_codes = codes + major_codes = [(x + 1) % 3 for x in major_codes] + minor_codes = [(x + 1) % 1 for x in minor_codes] + new_codes = [major_codes, minor_codes] + + msg = "Can't set attribute" + with pytest.raises(AttributeError, match=msg): + idx.levels = new_levels + + msg = ( + "property 'codes' of 'MultiIndex' object has no setter" + if PY311 + else "can't set attribute" + ) + with pytest.raises(AttributeError, match=msg): + idx.codes = new_codes + + +def test_set_levels(idx): + # side note - you probably wouldn't want to use levels and codes + # directly like this - but it is possible. + levels = idx.levels + new_levels = [[lev + "a" for lev in level] for level in levels] + + # level changing [w/o mutation] + ind2 = idx.set_levels(new_levels) + assert_matching(ind2.levels, new_levels) + assert_matching(idx.levels, levels) + + # level changing specific level [w/o mutation] + ind2 = idx.set_levels(new_levels[0], level=0) + assert_matching(ind2.levels, [new_levels[0], levels[1]]) + assert_matching(idx.levels, levels) + + ind2 = idx.set_levels(new_levels[1], level=1) + assert_matching(ind2.levels, [levels[0], new_levels[1]]) + assert_matching(idx.levels, levels) + + # level changing multiple levels [w/o mutation] + ind2 = idx.set_levels(new_levels, level=[0, 1]) + assert_matching(ind2.levels, new_levels) + assert_matching(idx.levels, levels) + + # illegal level changing should not change levels + # GH 13754 + original_index = idx.copy() + with pytest.raises(ValueError, match="^On"): + idx.set_levels(["c"], level=0) + assert_matching(idx.levels, original_index.levels, check_dtype=True) + + with pytest.raises(ValueError, match="^On"): + idx.set_codes([0, 1, 2, 3, 4, 5], level=0) + assert_matching(idx.codes, original_index.codes, check_dtype=True) + + with pytest.raises(TypeError, match="^Levels"): + idx.set_levels("c", level=0) + assert_matching(idx.levels, original_index.levels, check_dtype=True) + + with pytest.raises(TypeError, match="^Codes"): + idx.set_codes(1, level=0) + assert_matching(idx.codes, original_index.codes, check_dtype=True) + + +def test_set_codes(idx): + # side note - you probably wouldn't want to use levels and codes + # directly like this - but it is possible. + codes = idx.codes + major_codes, minor_codes = codes + major_codes = [(x + 1) % 3 for x in major_codes] + minor_codes = [(x + 1) % 1 for x in minor_codes] + new_codes = [major_codes, minor_codes] + + # changing codes w/o mutation + ind2 = idx.set_codes(new_codes) + assert_matching(ind2.codes, new_codes) + assert_matching(idx.codes, codes) + + # codes changing specific level w/o mutation + ind2 = idx.set_codes(new_codes[0], level=0) + assert_matching(ind2.codes, [new_codes[0], codes[1]]) + assert_matching(idx.codes, codes) + + ind2 = idx.set_codes(new_codes[1], level=1) + assert_matching(ind2.codes, [codes[0], new_codes[1]]) + assert_matching(idx.codes, codes) + + # codes changing multiple levels w/o mutation + ind2 = idx.set_codes(new_codes, level=[0, 1]) + assert_matching(ind2.codes, new_codes) + assert_matching(idx.codes, codes) + + # label changing for levels of different magnitude of categories + ind = MultiIndex.from_tuples([(0, i) for i in range(130)]) + new_codes = range(129, -1, -1) + expected = MultiIndex.from_tuples([(0, i) for i in new_codes]) + + # [w/o mutation] + result = ind.set_codes(codes=new_codes, level=1) + assert result.equals(expected) + + +def test_set_levels_codes_names_bad_input(idx): + levels, codes = idx.levels, idx.codes + names = idx.names + + with pytest.raises(ValueError, match="Length of levels"): + idx.set_levels([levels[0]]) + + with pytest.raises(ValueError, match="Length of codes"): + idx.set_codes([codes[0]]) + + with pytest.raises(ValueError, match="Length of names"): + idx.set_names([names[0]]) + + # shouldn't scalar data error, instead should demand list-like + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_levels(levels[0]) + + # shouldn't scalar data error, instead should demand list-like + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_codes(codes[0]) + + # shouldn't scalar data error, instead should demand list-like + with pytest.raises(TypeError, match="list-like"): + idx.set_names(names[0]) + + # should have equal lengths + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_levels(levels[0], level=[0, 1]) + + with pytest.raises(TypeError, match="list-like"): + idx.set_levels(levels, level=0) + + # should have equal lengths + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_codes(codes[0], level=[0, 1]) + + with pytest.raises(TypeError, match="list-like"): + idx.set_codes(codes, level=0) + + # should have equal lengths + with pytest.raises(ValueError, match="Length of names"): + idx.set_names(names[0], level=[0, 1]) + + with pytest.raises(TypeError, match="Names must be a"): + idx.set_names(names, level=0) + + +@pytest.mark.parametrize("inplace", [True, False]) +def test_set_names_with_nlevel_1(inplace): + # GH 21149 + # Ensure that .set_names for MultiIndex with + # nlevels == 1 does not raise any errors + expected = MultiIndex(levels=[[0, 1]], codes=[[0, 1]], names=["first"]) + m = MultiIndex.from_product([[0, 1]]) + result = m.set_names("first", level=0, inplace=inplace) + + if inplace: + result = m + + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("ordered", [True, False]) +def test_set_levels_categorical(ordered): + # GH13854 + index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]]) + + cidx = CategoricalIndex(list("bac"), ordered=ordered) + result = index.set_levels(cidx, level=0) + expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], codes=index.codes) + tm.assert_index_equal(result, expected) + + result_lvl = result.get_level_values(0) + expected_lvl = CategoricalIndex( + list("bacb"), categories=cidx.categories, ordered=cidx.ordered + ) + tm.assert_index_equal(result_lvl, expected_lvl) + + +def test_set_value_keeps_names(): + # motivating example from #3742 + lev1 = ["hans", "hans", "hans", "grethe", "grethe", "grethe"] + lev2 = ["1", "2", "3"] * 2 + idx = MultiIndex.from_arrays([lev1, lev2], names=["Name", "Number"]) + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + columns=["one", "two", "three", "four"], + index=idx, + ) + df = df.sort_index() + assert df._is_copy is None + assert df.index.names == ("Name", "Number") + df.at[("grethe", "4"), "one"] = 99.34 + assert df._is_copy is None + assert df.index.names == ("Name", "Number") + + +def test_set_levels_with_iterable(): + # GH23273 + sizes = [1, 2, 3] + colors = ["black"] * 3 + index = MultiIndex.from_arrays([sizes, colors], names=["size", "color"]) + + result = index.set_levels(map(int, ["3", "2", "1"]), level="size") + + expected_sizes = [3, 2, 1] + expected = MultiIndex.from_arrays([expected_sizes, colors], names=["size", "color"]) + tm.assert_index_equal(result, expected) + + +def test_set_empty_level(): + # GH#48636 + midx = MultiIndex.from_arrays([[]], names=["A"]) + result = midx.set_levels(pd.DatetimeIndex([]), level=0) + expected = MultiIndex.from_arrays([pd.DatetimeIndex([])], names=["A"]) + tm.assert_index_equal(result, expected) + + +def test_set_levels_pos_args_removal(): + # https://github.com/pandas-dev/pandas/issues/41485 + idx = MultiIndex.from_tuples( + [ + (1, "one"), + (3, "one"), + ], + names=["foo", "bar"], + ) + with pytest.raises(TypeError, match="positional arguments"): + idx.set_levels(["a", "b", "c"], 0) + + with pytest.raises(TypeError, match="positional arguments"): + idx.set_codes([[0, 1], [1, 0]], 0) + + +def test_set_levels_categorical_keep_dtype(): + # GH#52125 + midx = MultiIndex.from_arrays([[5, 6]]) + result = midx.set_levels(levels=pd.Categorical([1, 2]), level=0) + expected = MultiIndex.from_arrays([pd.Categorical([1, 2])]) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_indexing.py new file mode 100644 index 00000000..78b2c493 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_indexing.py @@ -0,0 +1,973 @@ +from datetime import timedelta +import re + +import numpy as np +import pytest + +from pandas.errors import ( + InvalidIndexError, + PerformanceWarning, +) + +import pandas as pd +from pandas import ( + Categorical, + Index, + MultiIndex, + date_range, +) +import pandas._testing as tm + + +class TestSliceLocs: + def test_slice_locs_partial(self, idx): + sorted_idx, _ = idx.sortlevel(0) + + result = sorted_idx.slice_locs(("foo", "two"), ("qux", "one")) + assert result == (1, 5) + + result = sorted_idx.slice_locs(None, ("qux", "one")) + assert result == (0, 5) + + result = sorted_idx.slice_locs(("foo", "two"), None) + assert result == (1, len(sorted_idx)) + + result = sorted_idx.slice_locs("bar", "baz") + assert result == (2, 4) + + def test_slice_locs(self): + df = tm.makeTimeDataFrame() + stacked = df.stack(future_stack=True) + idx = stacked.index + + slob = slice(*idx.slice_locs(df.index[5], df.index[15])) + sliced = stacked[slob] + expected = df[5:16].stack(future_stack=True) + tm.assert_almost_equal(sliced.values, expected.values) + + slob = slice( + *idx.slice_locs( + df.index[5] + timedelta(seconds=30), + df.index[15] - timedelta(seconds=30), + ) + ) + sliced = stacked[slob] + expected = df[6:15].stack(future_stack=True) + tm.assert_almost_equal(sliced.values, expected.values) + + def test_slice_locs_with_type_mismatch(self): + df = tm.makeTimeDataFrame() + stacked = df.stack(future_stack=True) + idx = stacked.index + with pytest.raises(TypeError, match="^Level type mismatch"): + idx.slice_locs((1, 3)) + with pytest.raises(TypeError, match="^Level type mismatch"): + idx.slice_locs(df.index[5] + timedelta(seconds=30), (5, 2)) + df = tm.makeCustomDataframe(5, 5) + stacked = df.stack(future_stack=True) + idx = stacked.index + with pytest.raises(TypeError, match="^Level type mismatch"): + idx.slice_locs(timedelta(seconds=30)) + # TODO: Try creating a UnicodeDecodeError in exception message + with pytest.raises(TypeError, match="^Level type mismatch"): + idx.slice_locs(df.index[1], (16, "a")) + + def test_slice_locs_not_sorted(self): + index = MultiIndex( + levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + ) + msg = "[Kk]ey length.*greater than MultiIndex lexsort depth" + with pytest.raises(KeyError, match=msg): + index.slice_locs((1, 0, 1), (2, 1, 0)) + + # works + sorted_index, _ = index.sortlevel(0) + # should there be a test case here??? + sorted_index.slice_locs((1, 0, 1), (2, 1, 0)) + + def test_slice_locs_not_contained(self): + # some searchsorted action + + index = MultiIndex( + levels=[[0, 2, 4, 6], [0, 2, 4]], + codes=[[0, 0, 0, 1, 1, 2, 3, 3, 3], [0, 1, 2, 1, 2, 2, 0, 1, 2]], + ) + + result = index.slice_locs((1, 0), (5, 2)) + assert result == (3, 6) + + result = index.slice_locs(1, 5) + assert result == (3, 6) + + result = index.slice_locs((2, 2), (5, 2)) + assert result == (3, 6) + + result = index.slice_locs(2, 5) + assert result == (3, 6) + + result = index.slice_locs((1, 0), (6, 3)) + assert result == (3, 8) + + result = index.slice_locs(-1, 10) + assert result == (0, len(index)) + + @pytest.mark.parametrize( + "index_arr,expected,start_idx,end_idx", + [ + ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, None), + ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, "b"), + ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, ("b", "e")), + ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), None), + ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), "c"), + ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), ("c", "e")), + ], + ) + def test_slice_locs_with_missing_value( + self, index_arr, expected, start_idx, end_idx + ): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.slice_locs(start=start_idx, end=end_idx) + assert result == expected + + +class TestPutmask: + def test_putmask_with_wrong_mask(self, idx): + # GH18368 + + msg = "putmask: mask and data must be the same size" + with pytest.raises(ValueError, match=msg): + idx.putmask(np.ones(len(idx) + 1, np.bool_), 1) + + with pytest.raises(ValueError, match=msg): + idx.putmask(np.ones(len(idx) - 1, np.bool_), 1) + + with pytest.raises(ValueError, match=msg): + idx.putmask("foo", 1) + + def test_putmask_multiindex_other(self): + # GH#43212 `value` is also a MultiIndex + + left = MultiIndex.from_tuples([(np.nan, 6), (np.nan, 6), ("a", 4)]) + right = MultiIndex.from_tuples([("a", 1), ("a", 1), ("d", 1)]) + mask = np.array([True, True, False]) + + result = left.putmask(mask, right) + + expected = MultiIndex.from_tuples([right[0], right[1], left[2]]) + tm.assert_index_equal(result, expected) + + def test_putmask_keep_dtype(self, any_numeric_ea_dtype): + # GH#49830 + midx = MultiIndex.from_arrays( + [pd.Series([1, 2, 3], dtype=any_numeric_ea_dtype), [10, 11, 12]] + ) + midx2 = MultiIndex.from_arrays( + [pd.Series([5, 6, 7], dtype=any_numeric_ea_dtype), [-1, -2, -3]] + ) + result = midx.putmask([True, False, False], midx2) + expected = MultiIndex.from_arrays( + [pd.Series([5, 2, 3], dtype=any_numeric_ea_dtype), [-1, 11, 12]] + ) + tm.assert_index_equal(result, expected) + + def test_putmask_keep_dtype_shorter_value(self, any_numeric_ea_dtype): + # GH#49830 + midx = MultiIndex.from_arrays( + [pd.Series([1, 2, 3], dtype=any_numeric_ea_dtype), [10, 11, 12]] + ) + midx2 = MultiIndex.from_arrays( + [pd.Series([5], dtype=any_numeric_ea_dtype), [-1]] + ) + result = midx.putmask([True, False, False], midx2) + expected = MultiIndex.from_arrays( + [pd.Series([5, 2, 3], dtype=any_numeric_ea_dtype), [-1, 11, 12]] + ) + tm.assert_index_equal(result, expected) + + +class TestGetIndexer: + def test_get_indexer(self): + major_axis = Index(np.arange(4)) + minor_axis = Index(np.arange(2)) + + major_codes = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp) + minor_codes = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp) + + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + idx1 = index[:5] + idx2 = index[[1, 3, 5]] + + r1 = idx1.get_indexer(idx2) + tm.assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp)) + + r1 = idx2.get_indexer(idx1, method="pad") + e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp) + tm.assert_almost_equal(r1, e1) + + r2 = idx2.get_indexer(idx1[::-1], method="pad") + tm.assert_almost_equal(r2, e1[::-1]) + + rffill1 = idx2.get_indexer(idx1, method="ffill") + tm.assert_almost_equal(r1, rffill1) + + r1 = idx2.get_indexer(idx1, method="backfill") + e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp) + tm.assert_almost_equal(r1, e1) + + r2 = idx2.get_indexer(idx1[::-1], method="backfill") + tm.assert_almost_equal(r2, e1[::-1]) + + rbfill1 = idx2.get_indexer(idx1, method="bfill") + tm.assert_almost_equal(r1, rbfill1) + + # pass non-MultiIndex + r1 = idx1.get_indexer(idx2.values) + rexp1 = idx1.get_indexer(idx2) + tm.assert_almost_equal(r1, rexp1) + + r1 = idx1.get_indexer([1, 2, 3]) + assert (r1 == [-1, -1, -1]).all() + + # create index with duplicates + idx1 = Index(list(range(10)) + list(range(10))) + idx2 = Index(list(range(20))) + + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + idx1.get_indexer(idx2) + + def test_get_indexer_nearest(self): + midx = MultiIndex.from_tuples([("a", 1), ("b", 2)]) + msg = ( + "method='nearest' not implemented yet for MultiIndex; " + "see GitHub issue 9365" + ) + with pytest.raises(NotImplementedError, match=msg): + midx.get_indexer(["a"], method="nearest") + msg = "tolerance not implemented yet for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + midx.get_indexer(["a"], method="pad", tolerance=2) + + def test_get_indexer_categorical_time(self): + # https://github.com/pandas-dev/pandas/issues/21390 + midx = MultiIndex.from_product( + [ + Categorical(["a", "b", "c"]), + Categorical(date_range("2012-01-01", periods=3, freq="H")), + ] + ) + result = midx.get_indexer(midx) + tm.assert_numpy_array_equal(result, np.arange(9, dtype=np.intp)) + + @pytest.mark.parametrize( + "index_arr,labels,expected", + [ + ( + [[1, np.nan, 2], [3, 4, 5]], + [1, np.nan, 2], + np.array([-1, -1, -1], dtype=np.intp), + ), + ([[1, np.nan, 2], [3, 4, 5]], [(np.nan, 4)], np.array([1], dtype=np.intp)), + ([[1, 2, 3], [np.nan, 4, 5]], [(1, np.nan)], np.array([0], dtype=np.intp)), + ( + [[1, 2, 3], [np.nan, 4, 5]], + [np.nan, 4, 5], + np.array([-1, -1, -1], dtype=np.intp), + ), + ], + ) + def test_get_indexer_with_missing_value(self, index_arr, labels, expected): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.get_indexer(labels) + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_methods(self): + # https://github.com/pandas-dev/pandas/issues/29896 + # test getting an indexer for another index with different methods + # confirms that getting an indexer without a filling method, getting an + # indexer and backfilling, and getting an indexer and padding all behave + # correctly in the case where all of the target values fall in between + # several levels in the MultiIndex into which they are getting an indexer + # + # visually, the MultiIndexes used in this test are: + # mult_idx_1: + # 0: -1 0 + # 1: 2 + # 2: 3 + # 3: 4 + # 4: 0 0 + # 5: 2 + # 6: 3 + # 7: 4 + # 8: 1 0 + # 9: 2 + # 10: 3 + # 11: 4 + # + # mult_idx_2: + # 0: 0 1 + # 1: 3 + # 2: 4 + mult_idx_1 = MultiIndex.from_product([[-1, 0, 1], [0, 2, 3, 4]]) + mult_idx_2 = MultiIndex.from_product([[0], [1, 3, 4]]) + + indexer = mult_idx_1.get_indexer(mult_idx_2) + expected = np.array([-1, 6, 7], dtype=indexer.dtype) + tm.assert_almost_equal(expected, indexer) + + backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="backfill") + expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype) + tm.assert_almost_equal(expected, backfill_indexer) + + # ensure the legacy "bfill" option functions identically to "backfill" + backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill") + expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype) + tm.assert_almost_equal(expected, backfill_indexer) + + pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="pad") + expected = np.array([4, 6, 7], dtype=pad_indexer.dtype) + tm.assert_almost_equal(expected, pad_indexer) + + # ensure the legacy "ffill" option functions identically to "pad" + pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill") + expected = np.array([4, 6, 7], dtype=pad_indexer.dtype) + tm.assert_almost_equal(expected, pad_indexer) + + def test_get_indexer_three_or_more_levels(self): + # https://github.com/pandas-dev/pandas/issues/29896 + # tests get_indexer() on MultiIndexes with 3+ levels + # visually, these are + # mult_idx_1: + # 0: 1 2 5 + # 1: 7 + # 2: 4 5 + # 3: 7 + # 4: 6 5 + # 5: 7 + # 6: 3 2 5 + # 7: 7 + # 8: 4 5 + # 9: 7 + # 10: 6 5 + # 11: 7 + # + # mult_idx_2: + # 0: 1 1 8 + # 1: 1 5 9 + # 2: 1 6 7 + # 3: 2 1 6 + # 4: 2 7 6 + # 5: 2 7 8 + # 6: 3 6 8 + mult_idx_1 = MultiIndex.from_product([[1, 3], [2, 4, 6], [5, 7]]) + mult_idx_2 = MultiIndex.from_tuples( + [ + (1, 1, 8), + (1, 5, 9), + (1, 6, 7), + (2, 1, 6), + (2, 7, 7), + (2, 7, 8), + (3, 6, 8), + ] + ) + # sanity check + assert mult_idx_1.is_monotonic_increasing + assert mult_idx_1.is_unique + assert mult_idx_2.is_monotonic_increasing + assert mult_idx_2.is_unique + + # show the relationships between the two + assert mult_idx_2[0] < mult_idx_1[0] + assert mult_idx_1[3] < mult_idx_2[1] < mult_idx_1[4] + assert mult_idx_1[5] == mult_idx_2[2] + assert mult_idx_1[5] < mult_idx_2[3] < mult_idx_1[6] + assert mult_idx_1[5] < mult_idx_2[4] < mult_idx_1[6] + assert mult_idx_1[5] < mult_idx_2[5] < mult_idx_1[6] + assert mult_idx_1[-1] < mult_idx_2[6] + + indexer_no_fill = mult_idx_1.get_indexer(mult_idx_2) + expected = np.array([-1, -1, 5, -1, -1, -1, -1], dtype=indexer_no_fill.dtype) + tm.assert_almost_equal(expected, indexer_no_fill) + + # test with backfilling + indexer_backfilled = mult_idx_1.get_indexer(mult_idx_2, method="backfill") + expected = np.array([0, 4, 5, 6, 6, 6, -1], dtype=indexer_backfilled.dtype) + tm.assert_almost_equal(expected, indexer_backfilled) + + # now, the same thing, but forward-filled (aka "padded") + indexer_padded = mult_idx_1.get_indexer(mult_idx_2, method="pad") + expected = np.array([-1, 3, 5, 5, 5, 5, 11], dtype=indexer_padded.dtype) + tm.assert_almost_equal(expected, indexer_padded) + + # now, do the indexing in the other direction + assert mult_idx_2[0] < mult_idx_1[0] < mult_idx_2[1] + assert mult_idx_2[0] < mult_idx_1[1] < mult_idx_2[1] + assert mult_idx_2[0] < mult_idx_1[2] < mult_idx_2[1] + assert mult_idx_2[0] < mult_idx_1[3] < mult_idx_2[1] + assert mult_idx_2[1] < mult_idx_1[4] < mult_idx_2[2] + assert mult_idx_2[2] == mult_idx_1[5] + assert mult_idx_2[5] < mult_idx_1[6] < mult_idx_2[6] + assert mult_idx_2[5] < mult_idx_1[7] < mult_idx_2[6] + assert mult_idx_2[5] < mult_idx_1[8] < mult_idx_2[6] + assert mult_idx_2[5] < mult_idx_1[9] < mult_idx_2[6] + assert mult_idx_2[5] < mult_idx_1[10] < mult_idx_2[6] + assert mult_idx_2[5] < mult_idx_1[11] < mult_idx_2[6] + + indexer = mult_idx_2.get_indexer(mult_idx_1) + expected = np.array( + [-1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1], dtype=indexer.dtype + ) + tm.assert_almost_equal(expected, indexer) + + backfill_indexer = mult_idx_2.get_indexer(mult_idx_1, method="bfill") + expected = np.array( + [1, 1, 1, 1, 2, 2, 6, 6, 6, 6, 6, 6], dtype=backfill_indexer.dtype + ) + tm.assert_almost_equal(expected, backfill_indexer) + + pad_indexer = mult_idx_2.get_indexer(mult_idx_1, method="pad") + expected = np.array( + [0, 0, 0, 0, 1, 2, 5, 5, 5, 5, 5, 5], dtype=pad_indexer.dtype + ) + tm.assert_almost_equal(expected, pad_indexer) + + def test_get_indexer_crossing_levels(self): + # https://github.com/pandas-dev/pandas/issues/29896 + # tests a corner case with get_indexer() with MultiIndexes where, when we + # need to "carry" across levels, proper tuple ordering is respected + # + # the MultiIndexes used in this test, visually, are: + # mult_idx_1: + # 0: 1 1 1 1 + # 1: 2 + # 2: 2 1 + # 3: 2 + # 4: 1 2 1 1 + # 5: 2 + # 6: 2 1 + # 7: 2 + # 8: 2 1 1 1 + # 9: 2 + # 10: 2 1 + # 11: 2 + # 12: 2 2 1 1 + # 13: 2 + # 14: 2 1 + # 15: 2 + # + # mult_idx_2: + # 0: 1 3 2 2 + # 1: 2 3 2 2 + mult_idx_1 = MultiIndex.from_product([[1, 2]] * 4) + mult_idx_2 = MultiIndex.from_tuples([(1, 3, 2, 2), (2, 3, 2, 2)]) + + # show the tuple orderings, which get_indexer() should respect + assert mult_idx_1[7] < mult_idx_2[0] < mult_idx_1[8] + assert mult_idx_1[-1] < mult_idx_2[1] + + indexer = mult_idx_1.get_indexer(mult_idx_2) + expected = np.array([-1, -1], dtype=indexer.dtype) + tm.assert_almost_equal(expected, indexer) + + backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill") + expected = np.array([8, -1], dtype=backfill_indexer.dtype) + tm.assert_almost_equal(expected, backfill_indexer) + + pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill") + expected = np.array([7, 15], dtype=pad_indexer.dtype) + tm.assert_almost_equal(expected, pad_indexer) + + def test_get_indexer_kwarg_validation(self): + # GH#41918 + mi = MultiIndex.from_product([range(3), ["A", "B"]]) + + msg = "limit argument only valid if doing pad, backfill or nearest" + with pytest.raises(ValueError, match=msg): + mi.get_indexer(mi[:-1], limit=4) + + msg = "tolerance argument only valid if doing pad, backfill or nearest" + with pytest.raises(ValueError, match=msg): + mi.get_indexer(mi[:-1], tolerance="piano") + + def test_get_indexer_nan(self): + # GH#37222 + idx1 = MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]) + idx2 = MultiIndex.from_product([["A"], [np.nan, 2.0]], names=["id1", "id2"]) + expected = np.array([-1, 1]) + result = idx2.get_indexer(idx1) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + result = idx1.get_indexer(idx2) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + +def test_getitem(idx): + # scalar + assert idx[2] == ("bar", "one") + + # slice + result = idx[2:5] + expected = idx[[2, 3, 4]] + assert result.equals(expected) + + # boolean + result = idx[[True, False, True, False, True, True]] + result2 = idx[np.array([True, False, True, False, True, True])] + expected = idx[[0, 2, 4, 5]] + assert result.equals(expected) + assert result2.equals(expected) + + +def test_getitem_group_select(idx): + sorted_idx, _ = idx.sortlevel(0) + assert sorted_idx.get_loc("baz") == slice(3, 4) + assert sorted_idx.get_loc("foo") == slice(0, 2) + + +@pytest.mark.parametrize("ind1", [[True] * 5, Index([True] * 5)]) +@pytest.mark.parametrize( + "ind2", + [[True, False, True, False, False], Index([True, False, True, False, False])], +) +def test_getitem_bool_index_all(ind1, ind2): + # GH#22533 + idx = MultiIndex.from_tuples([(10, 1), (20, 2), (30, 3), (40, 4), (50, 5)]) + tm.assert_index_equal(idx[ind1], idx) + + expected = MultiIndex.from_tuples([(10, 1), (30, 3)]) + tm.assert_index_equal(idx[ind2], expected) + + +@pytest.mark.parametrize("ind1", [[True], Index([True])]) +@pytest.mark.parametrize("ind2", [[False], Index([False])]) +def test_getitem_bool_index_single(ind1, ind2): + # GH#22533 + idx = MultiIndex.from_tuples([(10, 1)]) + tm.assert_index_equal(idx[ind1], idx) + + expected = MultiIndex( + levels=[np.array([], dtype=np.int64), np.array([], dtype=np.int64)], + codes=[[], []], + ) + tm.assert_index_equal(idx[ind2], expected) + + +class TestGetLoc: + def test_get_loc(self, idx): + assert idx.get_loc(("foo", "two")) == 1 + assert idx.get_loc(("baz", "two")) == 3 + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): + idx.get_loc(("bar", "two")) + with pytest.raises(KeyError, match=r"^'quux'$"): + idx.get_loc("quux") + + # 3 levels + index = MultiIndex( + levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + ) + with pytest.raises(KeyError, match=r"^\(1, 1\)$"): + index.get_loc((1, 1)) + assert index.get_loc((2, 0)) == slice(3, 5) + + def test_get_loc_duplicates(self): + index = Index([2, 2, 2, 2]) + result = index.get_loc(2) + expected = slice(0, 4) + assert result == expected + + index = Index(["c", "a", "a", "b", "b"]) + rs = index.get_loc("c") + xp = 0 + assert rs == xp + + with pytest.raises(KeyError, match="2"): + index.get_loc(2) + + def test_get_loc_level(self): + index = MultiIndex( + levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + ) + loc, new_index = index.get_loc_level((0, 1)) + expected = slice(1, 2) + exp_index = index[expected].droplevel(0).droplevel(0) + assert loc == expected + assert new_index.equals(exp_index) + + loc, new_index = index.get_loc_level((0, 1, 0)) + expected = 1 + assert loc == expected + assert new_index is None + + with pytest.raises(KeyError, match=r"^\(2, 2\)$"): + index.get_loc_level((2, 2)) + # GH 22221: unused label + with pytest.raises(KeyError, match=r"^2$"): + index.drop(2).get_loc_level(2) + # Unused label on unsorted level: + with pytest.raises(KeyError, match=r"^2$"): + index.drop(1, level=2).get_loc_level(2, level=2) + + index = MultiIndex( + levels=[[2000], list(range(4))], + codes=[np.array([0, 0, 0, 0]), np.array([0, 1, 2, 3])], + ) + result, new_index = index.get_loc_level((2000, slice(None, None))) + expected = slice(None, None) + assert result == expected + assert new_index.equals(index.droplevel(0)) + + @pytest.mark.parametrize("dtype1", [int, float, bool, str]) + @pytest.mark.parametrize("dtype2", [int, float, bool, str]) + def test_get_loc_multiple_dtypes(self, dtype1, dtype2): + # GH 18520 + levels = [np.array([0, 1]).astype(dtype1), np.array([0, 1]).astype(dtype2)] + idx = MultiIndex.from_product(levels) + assert idx.get_loc(idx[2]) == 2 + + @pytest.mark.parametrize("level", [0, 1]) + @pytest.mark.parametrize("dtypes", [[int, float], [float, int]]) + def test_get_loc_implicit_cast(self, level, dtypes): + # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa + levels = [["a", "b"], ["c", "d"]] + key = ["b", "d"] + lev_dtype, key_dtype = dtypes + levels[level] = np.array([0, 1], dtype=lev_dtype) + key[level] = key_dtype(1) + idx = MultiIndex.from_product(levels) + assert idx.get_loc(tuple(key)) == 3 + + @pytest.mark.parametrize("dtype", [bool, object]) + def test_get_loc_cast_bool(self, dtype): + # GH 19086 : int is casted to bool, but not vice-versa (for object dtype) + # With bool dtype, we don't cast in either direction. + levels = [Index([False, True], dtype=dtype), np.arange(2, dtype="int64")] + idx = MultiIndex.from_product(levels) + + if dtype is bool: + with pytest.raises(KeyError, match=r"^\(0, 1\)$"): + assert idx.get_loc((0, 1)) == 1 + with pytest.raises(KeyError, match=r"^\(1, 0\)$"): + assert idx.get_loc((1, 0)) == 2 + else: + # We use python object comparisons, which treat 0 == False and 1 == True + assert idx.get_loc((0, 1)) == 1 + assert idx.get_loc((1, 0)) == 2 + + with pytest.raises(KeyError, match=r"^\(False, True\)$"): + idx.get_loc((False, True)) + with pytest.raises(KeyError, match=r"^\(True, False\)$"): + idx.get_loc((True, False)) + + @pytest.mark.parametrize("level", [0, 1]) + def test_get_loc_nan(self, level, nulls_fixture): + # GH 18485 : NaN in MultiIndex + levels = [["a", "b"], ["c", "d"]] + key = ["b", "d"] + levels[level] = np.array([0, nulls_fixture], dtype=type(nulls_fixture)) + key[level] = nulls_fixture + idx = MultiIndex.from_product(levels) + assert idx.get_loc(tuple(key)) == 3 + + def test_get_loc_missing_nan(self): + # GH 8569 + idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]]) + assert isinstance(idx.get_loc(1), slice) + with pytest.raises(KeyError, match=r"^3$"): + idx.get_loc(3) + with pytest.raises(KeyError, match=r"^nan$"): + idx.get_loc(np.nan) + with pytest.raises(InvalidIndexError, match=r"\[nan\]"): + # listlike/non-hashable raises TypeError + idx.get_loc([np.nan]) + + def test_get_loc_with_values_including_missing_values(self): + # issue 19132 + idx = MultiIndex.from_product([[np.nan, 1]] * 2) + expected = slice(0, 2, None) + assert idx.get_loc(np.nan) == expected + + idx = MultiIndex.from_arrays([[np.nan, 1, 2, np.nan]]) + expected = np.array([True, False, False, True]) + tm.assert_numpy_array_equal(idx.get_loc(np.nan), expected) + + idx = MultiIndex.from_product([[np.nan, 1]] * 3) + expected = slice(2, 4, None) + assert idx.get_loc((np.nan, 1)) == expected + + def test_get_loc_duplicates2(self): + # TODO: de-duplicate with test_get_loc_duplicates above? + index = MultiIndex( + levels=[["D", "B", "C"], [0, 26, 27, 37, 57, 67, 75, 82]], + codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], + names=["tag", "day"], + ) + + assert index.get_loc("D") == slice(0, 3) + + def test_get_loc_past_lexsort_depth(self): + # GH#30053 + idx = MultiIndex( + levels=[["a"], [0, 7], [1]], + codes=[[0, 0], [1, 0], [0, 0]], + names=["x", "y", "z"], + sortorder=0, + ) + key = ("a", 7) + + with tm.assert_produces_warning(PerformanceWarning): + # PerformanceWarning: indexing past lexsort depth may impact performance + result = idx.get_loc(key) + + assert result == slice(0, 1, None) + + def test_multiindex_get_loc_list_raises(self): + # GH#35878 + idx = MultiIndex.from_tuples([("a", 1), ("b", 2)]) + msg = r"\[\]" + with pytest.raises(InvalidIndexError, match=msg): + idx.get_loc([]) + + def test_get_loc_nested_tuple_raises_keyerror(self): + # raise KeyError, not TypeError + mi = MultiIndex.from_product([range(3), range(4), range(5), range(6)]) + key = ((2, 3, 4), "foo") + + with pytest.raises(KeyError, match=re.escape(str(key))): + mi.get_loc(key) + + +class TestWhere: + def test_where(self): + i = MultiIndex.from_tuples([("A", 1), ("A", 2)]) + + msg = r"\.where is not supported for MultiIndex operations" + with pytest.raises(NotImplementedError, match=msg): + i.where(True) + + def test_where_array_like(self, listlike_box): + mi = MultiIndex.from_tuples([("A", 1), ("A", 2)]) + cond = [False, True] + msg = r"\.where is not supported for MultiIndex operations" + with pytest.raises(NotImplementedError, match=msg): + mi.where(listlike_box(cond)) + + +class TestContains: + def test_contains_top_level(self): + midx = MultiIndex.from_product([["A", "B"], [1, 2]]) + assert "A" in midx + assert "A" not in midx._engine + + def test_contains_with_nat(self): + # MI with a NaT + mi = MultiIndex( + levels=[["C"], date_range("2012-01-01", periods=5)], + codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], + names=[None, "B"], + ) + assert ("C", pd.Timestamp("2012-01-01")) in mi + for val in mi.values: + assert val in mi + + def test_contains(self, idx): + assert ("foo", "two") in idx + assert ("bar", "two") not in idx + assert None not in idx + + def test_contains_with_missing_value(self): + # GH#19132 + idx = MultiIndex.from_arrays([[1, np.nan, 2]]) + assert np.nan in idx + + idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]]) + assert np.nan not in idx + assert (1, np.nan) in idx + + def test_multiindex_contains_dropped(self): + # GH#19027 + # test that dropped MultiIndex levels are not in the MultiIndex + # despite continuing to be in the MultiIndex's levels + idx = MultiIndex.from_product([[1, 2], [3, 4]]) + assert 2 in idx + idx = idx.drop(2) + + # drop implementation keeps 2 in the levels + assert 2 in idx.levels[0] + # but it should no longer be in the index itself + assert 2 not in idx + + # also applies to strings + idx = MultiIndex.from_product([["a", "b"], ["c", "d"]]) + assert "a" in idx + idx = idx.drop("a") + assert "a" in idx.levels[0] + assert "a" not in idx + + def test_contains_td64_level(self): + # GH#24570 + tx = pd.timedelta_range("09:30:00", "16:00:00", freq="30 min") + idx = MultiIndex.from_arrays([tx, np.arange(len(tx))]) + assert tx[0] in idx + assert "element_not_exit" not in idx + assert "0 day 09:30:00" in idx + + @pytest.mark.slow + def test_large_mi_contains(self): + # GH#10645 + result = MultiIndex.from_arrays([range(10**6), range(10**6)]) + assert (10**6, 0) not in result + + +def test_timestamp_multiindex_indexer(): + # https://github.com/pandas-dev/pandas/issues/26944 + idx = MultiIndex.from_product( + [ + date_range("2019-01-01T00:15:33", periods=100, freq="H", name="date"), + ["x"], + [3], + ] + ) + df = pd.DataFrame({"foo": np.arange(len(idx))}, idx) + result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"] + qidx = MultiIndex.from_product( + [ + date_range( + start="2019-01-02T00:15:33", + end="2019-01-05T03:15:33", + freq="H", + name="date", + ), + ["x"], + [3], + ] + ) + should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name="foo") + tm.assert_series_equal(result, should_be) + + +@pytest.mark.parametrize( + "index_arr,expected,target,algo", + [ + ([[np.nan, "a", "b"], ["c", "d", "e"]], 0, np.nan, "left"), + ([[np.nan, "a", "b"], ["c", "d", "e"]], 1, (np.nan, "c"), "right"), + ([["a", "b", "c"], ["d", np.nan, "d"]], 1, ("b", np.nan), "left"), + ], +) +def test_get_slice_bound_with_missing_value(index_arr, expected, target, algo): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.get_slice_bound(target, side=algo) + assert result == expected + + +@pytest.mark.parametrize( + "index_arr,expected,start_idx,end_idx", + [ + ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 2, None), np.nan, 1), + ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 3, None), np.nan, (2, 5)), + ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), 3), + ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), (3, 5)), + ], +) +def test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_idx): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.slice_indexer(start=start_idx, end=end_idx) + assert result == expected + + +def test_pyint_engine(): + # GH#18519 : when combinations of codes cannot be represented in 64 + # bits, the index underlying the MultiIndex engine works with Python + # integers, rather than uint64. + N = 5 + keys = [ + tuple(arr) + for arr in [ + [0] * 10 * N, + [1] * 10 * N, + [2] * 10 * N, + [np.nan] * N + [2] * 9 * N, + [0] * N + [2] * 9 * N, + [np.nan] * N + [2] * 8 * N + [0] * N, + ] + ] + # Each level contains 4 elements (including NaN), so it is represented + # in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a + # 64 bit engine and truncating the first levels, the fourth and fifth + # keys would collide; if truncating the last levels, the fifth and + # sixth; if rotating bits rather than shifting, the third and fifth. + + for idx, key_value in enumerate(keys): + index = MultiIndex.from_tuples(keys) + assert index.get_loc(key_value) == idx + + expected = np.arange(idx + 1, dtype=np.intp) + result = index.get_indexer([keys[i] for i in expected]) + tm.assert_numpy_array_equal(result, expected) + + # With missing key: + idces = range(len(keys)) + expected = np.array([-1] + list(idces), dtype=np.intp) + missing = tuple([0, 1] * 5 * N) + result = index.get_indexer([missing] + [keys[i] for i in idces]) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "keys,expected", + [ + ((slice(None), [5, 4]), [1, 0]), + ((slice(None), [4, 5]), [0, 1]), + (([True, False, True], [4, 6]), [0, 2]), + (([True, False, True], [6, 4]), [0, 2]), + ((2, [4, 5]), [0, 1]), + ((2, [5, 4]), [1, 0]), + (([2], [4, 5]), [0, 1]), + (([2], [5, 4]), [1, 0]), + ], +) +def test_get_locs_reordering(keys, expected): + # GH48384 + idx = MultiIndex.from_arrays( + [ + [2, 2, 1], + [4, 5, 6], + ] + ) + result = idx.get_locs(keys) + expected = np.array(expected, dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + +def test_get_indexer_for_multiindex_with_nans(nulls_fixture): + # GH37222 + idx1 = MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]) + idx2 = MultiIndex.from_product([["A"], [nulls_fixture, 2.0]], names=["id1", "id2"]) + + result = idx2.get_indexer(idx1) + expected = np.array([-1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + result = idx1.get_indexer(idx2) + expected = np.array([-1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_integrity.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_integrity.py new file mode 100644 index 00000000..45dd484e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_integrity.py @@ -0,0 +1,281 @@ +import re + +import numpy as np +import pytest + +from pandas._libs import index as libindex + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + +import pandas as pd +from pandas import ( + Index, + IntervalIndex, + MultiIndex, + RangeIndex, +) +import pandas._testing as tm + + +def test_labels_dtypes(): + # GH 8456 + i = MultiIndex.from_tuples([("A", 1), ("A", 2)]) + assert i.codes[0].dtype == "int8" + assert i.codes[1].dtype == "int8" + + i = MultiIndex.from_product([["a"], range(40)]) + assert i.codes[1].dtype == "int8" + i = MultiIndex.from_product([["a"], range(400)]) + assert i.codes[1].dtype == "int16" + i = MultiIndex.from_product([["a"], range(40000)]) + assert i.codes[1].dtype == "int32" + + i = MultiIndex.from_product([["a"], range(1000)]) + assert (i.codes[0] >= 0).all() + assert (i.codes[1] >= 0).all() + + +def test_values_boxed(): + tuples = [ + (1, pd.Timestamp("2000-01-01")), + (2, pd.NaT), + (3, pd.Timestamp("2000-01-03")), + (1, pd.Timestamp("2000-01-04")), + (2, pd.Timestamp("2000-01-02")), + (3, pd.Timestamp("2000-01-03")), + ] + result = MultiIndex.from_tuples(tuples) + expected = construct_1d_object_array_from_listlike(tuples) + tm.assert_numpy_array_equal(result.values, expected) + # Check that code branches for boxed values produce identical results + tm.assert_numpy_array_equal(result.values[:4], result[:4].values) + + +def test_values_multiindex_datetimeindex(): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(10**18, 10**18 + 5) + naive = pd.DatetimeIndex(ints) + + aware = pd.DatetimeIndex(ints, tz="US/Central") + + idx = MultiIndex.from_arrays([naive, aware]) + result = idx.values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware) + + # n_lev > n_lab + result = idx[:2].values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive[:2]) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware[:2]) + + +def test_values_multiindex_periodindex(): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(2007, 2012) + pidx = pd.PeriodIndex(ints, freq="D") + + idx = MultiIndex.from_arrays([ints, pidx]) + result = idx.values + + outer = Index([x[0] for x in result]) + tm.assert_index_equal(outer, Index(ints, dtype=np.int64)) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx) + + # n_lev > n_lab + result = idx[:2].values + + outer = Index([x[0] for x in result]) + tm.assert_index_equal(outer, Index(ints[:2], dtype=np.int64)) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx[:2]) + + +def test_consistency(): + # need to construct an overflow + major_axis = list(range(70000)) + minor_axis = list(range(10)) + + major_codes = np.arange(70000) + minor_codes = np.repeat(range(10), 7000) + + # the fact that is works means it's consistent + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + + # inconsistent + major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1]) + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + + assert index.is_unique is False + + +@pytest.mark.slow +def test_hash_collisions(): + # non-smoke test that we don't get hash collisions + + index = MultiIndex.from_product( + [np.arange(1000), np.arange(1000)], names=["one", "two"] + ) + result = index.get_indexer(index.values) + tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp")) + + for i in [0, 1, len(index) - 2, len(index) - 1]: + result = index.get_loc(index[i]) + assert result == i + + +def test_dims(): + pass + + +def test_take_invalid_kwargs(): + vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]] + idx = MultiIndex.from_product(vals, names=["str", "dt"]) + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + +def test_isna_behavior(idx): + # should not segfault GH5123 + # NOTE: if MI representation changes, may make sense to allow + # isna(MI) + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + pd.isna(idx) + + +def test_large_multiindex_error(): + # GH12527 + df_below_1000000 = pd.DataFrame( + 1, index=MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"] + ) + with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): + df_below_1000000.loc[(-1, 0), "dest"] + with pytest.raises(KeyError, match=r"^\(3, 0\)$"): + df_below_1000000.loc[(3, 0), "dest"] + df_above_1000000 = pd.DataFrame( + 1, index=MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"] + ) + with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): + df_above_1000000.loc[(-1, 0), "dest"] + with pytest.raises(KeyError, match=r"^\(3, 0\)$"): + df_above_1000000.loc[(3, 0), "dest"] + + +def test_mi_hashtable_populated_attribute_error(monkeypatch): + # GH 18165 + monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 50) + r = range(50) + df = pd.DataFrame({"a": r, "b": r}, index=MultiIndex.from_arrays([r, r])) + + msg = "'Series' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + df["a"].foo() + + +def test_can_hold_identifiers(idx): + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is True + + +def test_metadata_immutable(idx): + levels, codes = idx.levels, idx.codes + # shouldn't be able to set at either the top level or base level + mutable_regex = re.compile("does not support mutable operations") + with pytest.raises(TypeError, match=mutable_regex): + levels[0] = levels[0] + with pytest.raises(TypeError, match=mutable_regex): + levels[0][0] = levels[0][0] + # ditto for labels + with pytest.raises(TypeError, match=mutable_regex): + codes[0] = codes[0] + with pytest.raises(ValueError, match="assignment destination is read-only"): + codes[0][0] = codes[0][0] + # and for names + names = idx.names + with pytest.raises(TypeError, match=mutable_regex): + names[0] = names[0] + + +def test_level_setting_resets_attributes(): + ind = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) + assert ind.is_monotonic_increasing + ind = ind.set_levels([["A", "B"], [1, 3, 2]]) + # if this fails, probably didn't reset the cache correctly. + assert not ind.is_monotonic_increasing + + +def test_rangeindex_fallback_coercion_bug(): + # GH 12893 + df1 = pd.DataFrame(np.arange(100).reshape((10, 10))) + df2 = pd.DataFrame(np.arange(100).reshape((10, 10))) + df = pd.concat( + {"df1": df1.stack(future_stack=True), "df2": df2.stack(future_stack=True)}, + axis=1, + ) + df.index.names = ["fizz", "buzz"] + + str(df) + expected = pd.DataFrame( + {"df2": np.arange(100), "df1": np.arange(100)}, + index=MultiIndex.from_product([range(10), range(10)], names=["fizz", "buzz"]), + ) + tm.assert_frame_equal(df, expected, check_like=True) + + result = df.index.get_level_values("fizz") + expected = Index(np.arange(10, dtype=np.int64), name="fizz").repeat(10) + tm.assert_index_equal(result, expected) + + result = df.index.get_level_values("buzz") + expected = Index(np.tile(np.arange(10, dtype=np.int64), 10), name="buzz") + tm.assert_index_equal(result, expected) + + +def test_memory_usage(idx): + result = idx.memory_usage() + if len(idx): + idx.get_loc(idx[0]) + result2 = idx.memory_usage() + result3 = idx.memory_usage(deep=True) + + # RangeIndex, IntervalIndex + # don't have engines + if not isinstance(idx, (RangeIndex, IntervalIndex)): + assert result2 > result + + if idx.inferred_type == "object": + assert result3 > result2 + + else: + # we report 0 for no-length + assert result == 0 + + +def test_nlevels(idx): + assert idx.nlevels == 2 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_isin.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_isin.py new file mode 100644 index 00000000..68fdf253 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_isin.py @@ -0,0 +1,103 @@ +import numpy as np +import pytest + +from pandas import MultiIndex +import pandas._testing as tm + + +def test_isin_nan(): + idx = MultiIndex.from_arrays([["foo", "bar"], [1.0, np.nan]]) + tm.assert_numpy_array_equal(idx.isin([("bar", np.nan)]), np.array([False, True])) + tm.assert_numpy_array_equal( + idx.isin([("bar", float("nan"))]), np.array([False, True]) + ) + + +def test_isin_missing(nulls_fixture): + # GH48905 + mi1 = MultiIndex.from_tuples([(1, nulls_fixture)]) + mi2 = MultiIndex.from_tuples([(1, 1), (1, 2)]) + result = mi2.isin(mi1) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin(): + values = [("foo", 2), ("bar", 3), ("quux", 4)] + + idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)]) + result = idx.isin(values) + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + # empty, return dtype bool + idx = MultiIndex.from_arrays([[], []]) + result = idx.isin(values) + assert len(result) == 0 + assert result.dtype == np.bool_ + + +def test_isin_level_kwarg(): + idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)]) + + vals_0 = ["foo", "bar", "quux"] + vals_1 = [2, 3, 10] + + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0)) + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2)) + + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1)) + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1)) + + msg = "Too many levels: Index has only 2 levels, not 6" + with pytest.raises(IndexError, match=msg): + idx.isin(vals_0, level=5) + msg = "Too many levels: Index has only 2 levels, -5 is not a valid level number" + with pytest.raises(IndexError, match=msg): + idx.isin(vals_0, level=-5) + + with pytest.raises(KeyError, match=r"'Level 1\.0 not found'"): + idx.isin(vals_0, level=1.0) + with pytest.raises(KeyError, match=r"'Level -1\.0 not found'"): + idx.isin(vals_1, level=-1.0) + with pytest.raises(KeyError, match="'Level A not found'"): + idx.isin(vals_1, level="A") + + idx.names = ["A", "B"] + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level="A")) + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level="B")) + + with pytest.raises(KeyError, match="'Level C not found'"): + idx.isin(vals_1, level="C") + + +@pytest.mark.parametrize( + "labels,expected,level", + [ + ([("b", np.nan)], np.array([False, False, True]), None), + ([np.nan, "a"], np.array([True, True, False]), 0), + (["d", np.nan], np.array([False, True, True]), 1), + ], +) +def test_isin_multi_index_with_missing_value(labels, expected, level): + # GH 19132 + midx = MultiIndex.from_arrays([[np.nan, "a", "b"], ["c", "d", np.nan]]) + result = midx.isin(labels, level=level) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin_empty(): + # GH#51599 + midx = MultiIndex.from_arrays([[1, 2], [3, 4]]) + result = midx.isin([]) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin_generator(): + # GH#52568 + midx = MultiIndex.from_tuples([(1, 2)]) + result = midx.isin(x for x in [(1, 2)]) + expected = np.array([True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_join.py new file mode 100644 index 00000000..700af142 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_join.py @@ -0,0 +1,271 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Interval, + MultiIndex, + Series, + StringDtype, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "other", [Index(["three", "one", "two"]), Index(["one"]), Index(["one", "three"])] +) +def test_join_level(idx, other, join_type): + join_index, lidx, ridx = other.join( + idx, how=join_type, level="second", return_indexers=True + ) + + exp_level = other.join(idx.levels[1], how=join_type) + assert join_index.levels[0].equals(idx.levels[0]) + assert join_index.levels[1].equals(exp_level) + + # pare down levels + mask = np.array([x[1] in exp_level for x in idx], dtype=bool) + exp_values = idx.values[mask] + tm.assert_numpy_array_equal(join_index.values, exp_values) + + if join_type in ("outer", "inner"): + join_index2, ridx2, lidx2 = idx.join( + other, how=join_type, level="second", return_indexers=True + ) + + assert join_index.equals(join_index2) + tm.assert_numpy_array_equal(lidx, lidx2) + tm.assert_numpy_array_equal(ridx, ridx2) + tm.assert_numpy_array_equal(join_index2.values, exp_values) + + +def test_join_level_corner_case(idx): + # some corner cases + index = Index(["three", "one", "two"]) + result = index.join(idx, level="second") + assert isinstance(result, MultiIndex) + + with pytest.raises(TypeError, match="Join.*MultiIndex.*ambiguous"): + idx.join(idx, level=1) + + +def test_join_self(idx, join_type): + joined = idx.join(idx, how=join_type) + tm.assert_index_equal(joined, idx) + + +def test_join_multi(): + # GH 10665 + midx = MultiIndex.from_product([np.arange(4), np.arange(4)], names=["a", "b"]) + idx = Index([1, 2, 5], name="b") + + # inner + jidx, lidx, ridx = midx.join(idx, how="inner", return_indexers=True) + exp_idx = MultiIndex.from_product([np.arange(4), [1, 2]], names=["a", "b"]) + exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp) + exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how="inner", return_indexers=True) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + # keep MultiIndex + jidx, lidx, ridx = midx.join(idx, how="left", return_indexers=True) + exp_ridx = np.array( + [-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1], dtype=np.intp + ) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how="right", return_indexers=True) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + + +def test_join_self_unique(idx, join_type): + if idx.is_unique: + joined = idx.join(idx, how=join_type) + assert (idx == joined).all() + + +def test_join_multi_wrong_order(): + # GH 25760 + # GH 28956 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["b", "a"]) + + join_idx, lidx, ridx = midx1.join(midx2, return_indexers=True) + + exp_ridx = np.array([-1, -1, -1, -1], dtype=np.intp) + + tm.assert_index_equal(midx1, join_idx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + + +def test_join_multi_return_indexers(): + # GH 34074 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4], [5, 6]], names=["a", "b", "c"]) + midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + + result = midx1.join(midx2, return_indexers=False) + tm.assert_index_equal(result, midx1) + + +def test_join_overlapping_interval_level(): + # GH 44096 + idx_1 = MultiIndex.from_tuples( + [ + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (1, Interval(2.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), # interval limit is here at 3.0, not at 2.0 + (2, Interval(3.0, 5.0)), + ], + names=["num", "interval"], + ) + + idx_2 = MultiIndex.from_tuples( + [ + (1, Interval(2.0, 5.0)), + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (2, Interval(3.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), + ], + names=["num", "interval"], + ) + + expected = MultiIndex.from_tuples( + [ + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (1, Interval(2.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), + (2, Interval(3.0, 5.0)), + ], + names=["num", "interval"], + ) + result = idx_1.join(idx_2, how="outer") + + tm.assert_index_equal(result, expected) + + +def test_join_midx_ea(): + # GH#49277 + midx = MultiIndex.from_arrays( + [Series([1, 1, 3], dtype="Int64"), Series([1, 2, 3], dtype="Int64")], + names=["a", "b"], + ) + midx2 = MultiIndex.from_arrays( + [Series([1], dtype="Int64"), Series([3], dtype="Int64")], names=["a", "c"] + ) + result = midx.join(midx2, how="inner") + expected = MultiIndex.from_arrays( + [ + Series([1, 1], dtype="Int64"), + Series([1, 2], dtype="Int64"), + Series([3, 3], dtype="Int64"), + ], + names=["a", "b", "c"], + ) + tm.assert_index_equal(result, expected) + + +def test_join_midx_string(): + # GH#49277 + midx = MultiIndex.from_arrays( + [ + Series(["a", "a", "c"], dtype=StringDtype()), + Series(["a", "b", "c"], dtype=StringDtype()), + ], + names=["a", "b"], + ) + midx2 = MultiIndex.from_arrays( + [Series(["a"], dtype=StringDtype()), Series(["c"], dtype=StringDtype())], + names=["a", "c"], + ) + result = midx.join(midx2, how="inner") + expected = MultiIndex.from_arrays( + [ + Series(["a", "a"], dtype=StringDtype()), + Series(["a", "b"], dtype=StringDtype()), + Series(["c", "c"], dtype=StringDtype()), + ], + names=["a", "b", "c"], + ) + tm.assert_index_equal(result, expected) + + +def test_join_multi_with_nan(): + # GH29252 + df1 = DataFrame( + data={"col1": [1.1, 1.2]}, + index=MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]), + ) + df2 = DataFrame( + data={"col2": [2.1, 2.2]}, + index=MultiIndex.from_product([["A"], [np.nan, 2.0]], names=["id1", "id2"]), + ) + result = df1.join(df2) + expected = DataFrame( + data={"col1": [1.1, 1.2], "col2": [np.nan, 2.2]}, + index=MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("val", [0, 5]) +def test_join_dtypes(any_numeric_ea_dtype, val): + # GH#49830 + midx = MultiIndex.from_arrays([Series([1, 2], dtype=any_numeric_ea_dtype), [3, 4]]) + midx2 = MultiIndex.from_arrays( + [Series([1, val, val], dtype=any_numeric_ea_dtype), [3, 4, 4]] + ) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_arrays( + [Series([val, val, 1, 2], dtype=any_numeric_ea_dtype), [4, 4, 3, 4]] + ).sort_values() + tm.assert_index_equal(result, expected) + + +def test_join_dtypes_all_nan(any_numeric_ea_dtype): + # GH#49830 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [np.nan, np.nan]] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 0, 0], dtype=any_numeric_ea_dtype), [np.nan, np.nan, np.nan]] + ) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_arrays( + [ + Series([0, 0, 1, 2], dtype=any_numeric_ea_dtype), + [np.nan, np.nan, np.nan, np.nan], + ] + ) + tm.assert_index_equal(result, expected) + + +def test_join_index_levels(): + # GH#53093 + midx = midx = MultiIndex.from_tuples([("a", "2019-02-01"), ("a", "2019-02-01")]) + midx2 = MultiIndex.from_tuples([("a", "2019-01-31")]) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_tuples( + [("a", "2019-01-31"), ("a", "2019-02-01"), ("a", "2019-02-01")] + ) + tm.assert_index_equal(result.levels[1], expected.levels[1]) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_lexsort.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_lexsort.py new file mode 100644 index 00000000..fc16a419 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_lexsort.py @@ -0,0 +1,46 @@ +from pandas import MultiIndex + + +class TestIsLexsorted: + def test_is_lexsorted(self): + levels = [[0, 1], [0, 1, 2]] + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] + ) + assert index._is_lexsorted() + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]] + ) + assert not index._is_lexsorted() + + index = MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]] + ) + assert not index._is_lexsorted() + assert index._lexsort_depth == 0 + + +class TestLexsortDepth: + def test_lexsort_depth(self): + # Test that lexsort_depth return the correct sortorder + # when it was given to the MultiIndex const. + # GH#28518 + + levels = [[0, 1], [0, 1, 2]] + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 + ) + assert index._lexsort_depth == 2 + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=1 + ) + assert index._lexsort_depth == 1 + + index = MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=0 + ) + assert index._lexsort_depth == 0 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_missing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_missing.py new file mode 100644 index 00000000..14ffc42f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_missing.py @@ -0,0 +1,111 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import MultiIndex +import pandas._testing as tm + + +def test_fillna(idx): + # GH 11343 + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.fillna(idx[0]) + + +def test_dropna(): + # GH 6194 + idx = MultiIndex.from_arrays( + [ + [1, np.nan, 3, np.nan, 5], + [1, 2, np.nan, np.nan, 5], + ["a", "b", "c", np.nan, "e"], + ] + ) + + exp = MultiIndex.from_arrays([[1, 5], [1, 5], ["a", "e"]]) + tm.assert_index_equal(idx.dropna(), exp) + tm.assert_index_equal(idx.dropna(how="any"), exp) + + exp = MultiIndex.from_arrays( + [[1, np.nan, 3, 5], [1, 2, np.nan, 5], ["a", "b", "c", "e"]] + ) + tm.assert_index_equal(idx.dropna(how="all"), exp) + + msg = "invalid how option: xxx" + with pytest.raises(ValueError, match=msg): + idx.dropna(how="xxx") + + # GH26408 + # test if missing values are dropped for multiindex constructed + # from codes and values + idx = MultiIndex( + levels=[[np.nan, None, pd.NaT, "128", 2], [np.nan, None, pd.NaT, "128", 2]], + codes=[[0, -1, 1, 2, 3, 4], [0, -1, 3, 3, 3, 4]], + ) + expected = MultiIndex.from_arrays([["128", 2], ["128", 2]]) + tm.assert_index_equal(idx.dropna(), expected) + tm.assert_index_equal(idx.dropna(how="any"), expected) + + expected = MultiIndex.from_arrays( + [[np.nan, np.nan, "128", 2], ["128", "128", "128", 2]] + ) + tm.assert_index_equal(idx.dropna(how="all"), expected) + + +def test_nulls(idx): + # this is really a smoke test for the methods + # as these are adequately tested for function elsewhere + + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.isna() + + +@pytest.mark.xfail(reason="isna is not defined for MultiIndex") +def test_hasnans_isnans(idx): + # GH 11343, added tests for hasnans / isnans + index = idx.copy() + + # cases in indices doesn't include NaN + expected = np.array([False] * len(index), dtype=bool) + tm.assert_numpy_array_equal(index._isnan, expected) + assert index.hasnans is False + + index = idx.copy() + values = index.values + values[1] = np.nan + + index = type(idx)(values) + + expected = np.array([False] * len(index), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(index._isnan, expected) + assert index.hasnans is True + + +def test_nan_stays_float(): + # GH 7031 + idx0 = MultiIndex(levels=[["A", "B"], []], codes=[[1, 0], [-1, -1]], names=[0, 1]) + idx1 = MultiIndex(levels=[["C"], ["D"]], codes=[[0], [0]], names=[0, 1]) + idxm = idx0.join(idx1, how="outer") + assert pd.isna(idx0.get_level_values(1)).all() + # the following failed in 0.14.1 + assert pd.isna(idxm.get_level_values(1)[:-1]).all() + + df0 = pd.DataFrame([[1, 2]], index=idx0) + df1 = pd.DataFrame([[3, 4]], index=idx1) + dfm = df0 - df1 + assert pd.isna(df0.index.get_level_values(1)).all() + # the following failed in 0.14.1 + assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + +def test_tuples_have_na(): + index = MultiIndex( + levels=[[1, 0], [0, 1, 2, 3]], + codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]], + ) + + assert pd.isna(index[4][0]) + assert pd.isna(index.values[4][0]) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_monotonic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_monotonic.py new file mode 100644 index 00000000..2b0b3f7c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_monotonic.py @@ -0,0 +1,188 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + MultiIndex, +) + + +def test_is_monotonic_increasing_lexsorted(lexsorted_two_level_string_multiindex): + # string ordering + mi = lexsorted_two_level_string_multiindex + assert mi.is_monotonic_increasing is False + assert Index(mi.values).is_monotonic_increasing is False + assert mi._is_strictly_monotonic_increasing is False + assert Index(mi.values)._is_strictly_monotonic_increasing is False + + +def test_is_monotonic_increasing(): + i = MultiIndex.from_product([np.arange(10), np.arange(10)], names=["one", "two"]) + assert i.is_monotonic_increasing is True + assert i._is_strictly_monotonic_increasing is True + assert Index(i.values).is_monotonic_increasing is True + assert i._is_strictly_monotonic_increasing is True + + i = MultiIndex.from_product( + [np.arange(10, 0, -1), np.arange(10)], names=["one", "two"] + ) + assert i.is_monotonic_increasing is False + assert i._is_strictly_monotonic_increasing is False + assert Index(i.values).is_monotonic_increasing is False + assert Index(i.values)._is_strictly_monotonic_increasing is False + + i = MultiIndex.from_product( + [np.arange(10), np.arange(10, 0, -1)], names=["one", "two"] + ) + assert i.is_monotonic_increasing is False + assert i._is_strictly_monotonic_increasing is False + assert Index(i.values).is_monotonic_increasing is False + assert Index(i.values)._is_strictly_monotonic_increasing is False + + i = MultiIndex.from_product([[1.0, np.nan, 2.0], ["a", "b", "c"]]) + assert i.is_monotonic_increasing is False + assert i._is_strictly_monotonic_increasing is False + assert Index(i.values).is_monotonic_increasing is False + assert Index(i.values)._is_strictly_monotonic_increasing is False + + i = MultiIndex( + levels=[["bar", "baz", "foo", "qux"], ["mom", "next", "zenith"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + assert i.is_monotonic_increasing is True + assert Index(i.values).is_monotonic_increasing is True + assert i._is_strictly_monotonic_increasing is True + assert Index(i.values)._is_strictly_monotonic_increasing is True + + # mixed levels, hits the TypeError + i = MultiIndex( + levels=[ + [1, 2, 3, 4], + [ + "gb00b03mlx29", + "lu0197800237", + "nl0000289783", + "nl0000289965", + "nl0000301109", + ], + ], + codes=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], + names=["household_id", "asset_id"], + ) + + assert i.is_monotonic_increasing is False + assert i._is_strictly_monotonic_increasing is False + + # empty + i = MultiIndex.from_arrays([[], []]) + assert i.is_monotonic_increasing is True + assert Index(i.values).is_monotonic_increasing is True + assert i._is_strictly_monotonic_increasing is True + assert Index(i.values)._is_strictly_monotonic_increasing is True + + +def test_is_monotonic_decreasing(): + i = MultiIndex.from_product( + [np.arange(9, -1, -1), np.arange(9, -1, -1)], names=["one", "two"] + ) + assert i.is_monotonic_decreasing is True + assert i._is_strictly_monotonic_decreasing is True + assert Index(i.values).is_monotonic_decreasing is True + assert i._is_strictly_monotonic_decreasing is True + + i = MultiIndex.from_product( + [np.arange(10), np.arange(10, 0, -1)], names=["one", "two"] + ) + assert i.is_monotonic_decreasing is False + assert i._is_strictly_monotonic_decreasing is False + assert Index(i.values).is_monotonic_decreasing is False + assert Index(i.values)._is_strictly_monotonic_decreasing is False + + i = MultiIndex.from_product( + [np.arange(10, 0, -1), np.arange(10)], names=["one", "two"] + ) + assert i.is_monotonic_decreasing is False + assert i._is_strictly_monotonic_decreasing is False + assert Index(i.values).is_monotonic_decreasing is False + assert Index(i.values)._is_strictly_monotonic_decreasing is False + + i = MultiIndex.from_product([[2.0, np.nan, 1.0], ["c", "b", "a"]]) + assert i.is_monotonic_decreasing is False + assert i._is_strictly_monotonic_decreasing is False + assert Index(i.values).is_monotonic_decreasing is False + assert Index(i.values)._is_strictly_monotonic_decreasing is False + + # string ordering + i = MultiIndex( + levels=[["qux", "foo", "baz", "bar"], ["three", "two", "one"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + assert i.is_monotonic_decreasing is False + assert Index(i.values).is_monotonic_decreasing is False + assert i._is_strictly_monotonic_decreasing is False + assert Index(i.values)._is_strictly_monotonic_decreasing is False + + i = MultiIndex( + levels=[["qux", "foo", "baz", "bar"], ["zenith", "next", "mom"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["first", "second"], + ) + assert i.is_monotonic_decreasing is True + assert Index(i.values).is_monotonic_decreasing is True + assert i._is_strictly_monotonic_decreasing is True + assert Index(i.values)._is_strictly_monotonic_decreasing is True + + # mixed levels, hits the TypeError + i = MultiIndex( + levels=[ + [4, 3, 2, 1], + [ + "nl0000301109", + "nl0000289965", + "nl0000289783", + "lu0197800237", + "gb00b03mlx29", + ], + ], + codes=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]], + names=["household_id", "asset_id"], + ) + + assert i.is_monotonic_decreasing is False + assert i._is_strictly_monotonic_decreasing is False + + # empty + i = MultiIndex.from_arrays([[], []]) + assert i.is_monotonic_decreasing is True + assert Index(i.values).is_monotonic_decreasing is True + assert i._is_strictly_monotonic_decreasing is True + assert Index(i.values)._is_strictly_monotonic_decreasing is True + + +def test_is_strictly_monotonic_increasing(): + idx = MultiIndex( + levels=[["bar", "baz"], ["mom", "next"]], codes=[[0, 0, 1, 1], [0, 0, 0, 1]] + ) + assert idx.is_monotonic_increasing is True + assert idx._is_strictly_monotonic_increasing is False + + +def test_is_strictly_monotonic_decreasing(): + idx = MultiIndex( + levels=[["baz", "bar"], ["next", "mom"]], codes=[[0, 0, 1, 1], [0, 0, 0, 1]] + ) + assert idx.is_monotonic_decreasing is True + assert idx._is_strictly_monotonic_decreasing is False + + +@pytest.mark.parametrize("attr", ["is_monotonic_increasing", "is_monotonic_decreasing"]) +@pytest.mark.parametrize( + "values", + [[(np.nan,), (1,), (2,)], [(1,), (np.nan,), (2,)], [(1,), (2,), (np.nan,)]], +) +def test_is_monotonic_with_nans(values, attr): + # GH: 37220 + idx = MultiIndex.from_tuples(values, names=["test"]) + assert getattr(idx, attr) is False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_names.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_names.py new file mode 100644 index 00000000..8ae643eb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_names.py @@ -0,0 +1,201 @@ +import pytest + +import pandas as pd +from pandas import MultiIndex +import pandas._testing as tm + + +def check_level_names(index, names): + assert [level.name for level in index.levels] == list(names) + + +def test_slice_keep_name(): + x = MultiIndex.from_tuples([("a", "b"), (1, 2), ("c", "d")], names=["x", "y"]) + assert x[1:].names == x.names + + +def test_index_name_retained(): + # GH9857 + result = pd.DataFrame({"x": [1, 2, 6], "y": [2, 2, 8], "z": [-5, 0, 5]}) + result = result.set_index("z") + result.loc[10] = [9, 10] + df_expected = pd.DataFrame( + {"x": [1, 2, 6, 9], "y": [2, 2, 8, 10], "z": [-5, 0, 5, 10]} + ) + df_expected = df_expected.set_index("z") + tm.assert_frame_equal(result, df_expected) + + +def test_changing_names(idx): + assert [level.name for level in idx.levels] == ["first", "second"] + + view = idx.view() + copy = idx.copy() + shallow_copy = idx._view() + + # changing names should not change level names on object + new_names = [name + "a" for name in idx.names] + idx.names = new_names + check_level_names(idx, ["firsta", "seconda"]) + + # and not on copies + check_level_names(view, ["first", "second"]) + check_level_names(copy, ["first", "second"]) + check_level_names(shallow_copy, ["first", "second"]) + + # and copies shouldn't change original + shallow_copy.names = [name + "c" for name in shallow_copy.names] + check_level_names(idx, ["firsta", "seconda"]) + + +def test_take_preserve_name(idx): + taken = idx.take([3, 0, 1]) + assert taken.names == idx.names + + +def test_copy_names(): + # Check that adding a "names" parameter to the copy is honored + # GH14302 + multi_idx = MultiIndex.from_tuples([(1, 2), (3, 4)], names=["MyName1", "MyName2"]) + multi_idx1 = multi_idx.copy() + + assert multi_idx.equals(multi_idx1) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx1.names == ["MyName1", "MyName2"] + + multi_idx2 = multi_idx.copy(names=["NewName1", "NewName2"]) + + assert multi_idx.equals(multi_idx2) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx2.names == ["NewName1", "NewName2"] + + multi_idx3 = multi_idx.copy(name=["NewName1", "NewName2"]) + + assert multi_idx.equals(multi_idx3) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx3.names == ["NewName1", "NewName2"] + + # gh-35592 + with pytest.raises(ValueError, match="Length of new names must be 2, got 1"): + multi_idx.copy(names=["mario"]) + + with pytest.raises(TypeError, match="MultiIndex.name must be a hashable type"): + multi_idx.copy(names=[["mario"], ["luigi"]]) + + +def test_names(idx, index_names): + # names are assigned in setup + assert index_names == ["first", "second"] + level_names = [level.name for level in idx.levels] + assert level_names == index_names + + # setting bad names on existing + index = idx + with pytest.raises(ValueError, match="^Length of names"): + setattr(index, "names", list(index.names) + ["third"]) + with pytest.raises(ValueError, match="^Length of names"): + setattr(index, "names", []) + + # initializing with bad names (should always be equivalent) + major_axis, minor_axis = idx.levels + major_codes, minor_codes = idx.codes + with pytest.raises(ValueError, match="^Length of names"): + MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["first"], + ) + with pytest.raises(ValueError, match="^Length of names"): + MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["first", "second", "third"], + ) + + # names are assigned on index, but not transferred to the levels + index.names = ["a", "b"] + level_names = [level.name for level in index.levels] + assert level_names == ["a", "b"] + + +def test_duplicate_level_names_access_raises(idx): + # GH19029 + idx.names = ["foo", "foo"] + with pytest.raises(ValueError, match="name foo occurs multiple times"): + idx._get_level_number("foo") + + +def test_get_names_from_levels(): + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + + assert idx.levels[0].name == "a" + assert idx.levels[1].name == "b" + + +def test_setting_names_from_levels_raises(): + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[0].name = "foo" + + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[1].name = "foo" + + new = pd.Series(1, index=idx.levels[0]) + with pytest.raises(RuntimeError, match="set_names"): + new.index.name = "bar" + + assert pd.Index._no_setting_name is False + assert pd.RangeIndex._no_setting_name is False + + +@pytest.mark.parametrize("func", ["rename", "set_names"]) +@pytest.mark.parametrize( + "rename_dict, exp_names", + [ + ({"x": "z"}, ["z", "y", "z"]), + ({"x": "z", "y": "x"}, ["z", "x", "z"]), + ({"y": "z"}, ["x", "z", "x"]), + ({}, ["x", "y", "x"]), + ({"z": "a"}, ["x", "y", "x"]), + ({"y": "z", "a": "b"}, ["x", "z", "x"]), + ], +) +def test_name_mi_with_dict_like_duplicate_names(func, rename_dict, exp_names): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=["x", "y", "x"]) + result = getattr(mi, func)(rename_dict) + expected = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=exp_names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("func", ["rename", "set_names"]) +@pytest.mark.parametrize( + "rename_dict, exp_names", + [ + ({"x": "z"}, ["z", "y"]), + ({"x": "z", "y": "x"}, ["z", "x"]), + ({"a": "z"}, ["x", "y"]), + ({}, ["x", "y"]), + ], +) +def test_name_mi_with_dict_like(func, rename_dict, exp_names): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"]) + result = getattr(mi, func)(rename_dict) + expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=exp_names) + tm.assert_index_equal(result, expected) + + +def test_index_name_with_dict_like_raising(): + # GH#20421 + ix = pd.Index([1, 2]) + msg = "Can only pass dict-like as `names` for MultiIndex." + with pytest.raises(TypeError, match=msg): + ix.set_names({"x": "z"}) + + +def test_multiindex_name_and_level_raising(): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"]) + with pytest.raises(TypeError, match="Can not pass level for dictlike `names`."): + mi.set_names(names={"x": "z"}, level={"x": "z"}) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_partial_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_partial_indexing.py new file mode 100644 index 00000000..47efc43d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_partial_indexing.py @@ -0,0 +1,148 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + IndexSlice, + MultiIndex, + date_range, +) +import pandas._testing as tm + + +@pytest.fixture +def df(): + # c1 + # 2016-01-01 00:00:00 a 0 + # b 1 + # c 2 + # 2016-01-01 12:00:00 a 3 + # b 4 + # c 5 + # 2016-01-02 00:00:00 a 6 + # b 7 + # c 8 + # 2016-01-02 12:00:00 a 9 + # b 10 + # c 11 + # 2016-01-03 00:00:00 a 12 + # b 13 + # c 14 + dr = date_range("2016-01-01", "2016-01-03", freq="12H") + abc = ["a", "b", "c"] + mi = MultiIndex.from_product([dr, abc]) + frame = DataFrame({"c1": range(0, 15)}, index=mi) + return frame + + +def test_partial_string_matching_single_index(df): + # partial string matching on a single index + for df_swap in [df.swaplevel(), df.swaplevel(0), df.swaplevel(0, 1)]: + df_swap = df_swap.sort_index() + just_a = df_swap.loc["a"] + result = just_a.loc["2016-01-01"] + expected = df.loc[IndexSlice[:, "a"], :].iloc[0:2] + expected.index = expected.index.droplevel(1) + tm.assert_frame_equal(result, expected) + + +def test_get_loc_partial_timestamp_multiindex(df): + mi = df.index + key = ("2016-01-01", "a") + loc = mi.get_loc(key) + + expected = np.zeros(len(mi), dtype=bool) + expected[[0, 3]] = True + tm.assert_numpy_array_equal(loc, expected) + + key2 = ("2016-01-02", "a") + loc2 = mi.get_loc(key2) + expected2 = np.zeros(len(mi), dtype=bool) + expected2[[6, 9]] = True + tm.assert_numpy_array_equal(loc2, expected2) + + key3 = ("2016-01", "a") + loc3 = mi.get_loc(key3) + expected3 = np.zeros(len(mi), dtype=bool) + expected3[mi.get_level_values(1).get_loc("a")] = True + tm.assert_numpy_array_equal(loc3, expected3) + + key4 = ("2016", "a") + loc4 = mi.get_loc(key4) + expected4 = expected3 + tm.assert_numpy_array_equal(loc4, expected4) + + # non-monotonic + taker = np.arange(len(mi), dtype=np.intp) + taker[::2] = taker[::-2] + mi2 = mi.take(taker) + loc5 = mi2.get_loc(key) + expected5 = np.zeros(len(mi2), dtype=bool) + expected5[[3, 14]] = True + tm.assert_numpy_array_equal(loc5, expected5) + + +def test_partial_string_timestamp_multiindex(df): + # GH10331 + df_swap = df.swaplevel(0, 1).sort_index() + SLC = IndexSlice + + # indexing with IndexSlice + result = df.loc[SLC["2016-01-01":"2016-02-01", :], :] + expected = df + tm.assert_frame_equal(result, expected) + + # match on secondary index + result = df_swap.loc[SLC[:, "2016-01-01":"2016-01-01"], :] + expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]] + tm.assert_frame_equal(result, expected) + + # partial string match on year only + result = df.loc["2016"] + expected = df + tm.assert_frame_equal(result, expected) + + # partial string match on date + result = df.loc["2016-01-01"] + expected = df.iloc[0:6] + tm.assert_frame_equal(result, expected) + + # partial string match on date and hour, from middle + result = df.loc["2016-01-02 12"] + # hourly resolution, same as index.levels[0], so we are _not_ slicing on + # that level, so that level gets dropped + expected = df.iloc[9:12].droplevel(0) + tm.assert_frame_equal(result, expected) + + # partial string match on secondary index + result = df_swap.loc[SLC[:, "2016-01-02"], :] + expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]] + tm.assert_frame_equal(result, expected) + + # tuple selector with partial string match on date + # "2016-01-01" has daily resolution, so _is_ a slice on the first level. + result = df.loc[("2016-01-01", "a"), :] + expected = df.iloc[[0, 3]] + expected = df.iloc[[0, 3]].droplevel(1) + tm.assert_frame_equal(result, expected) + + # Slicing date on first level should break (of course) bc the DTI is the + # second level on df_swap + with pytest.raises(KeyError, match="'2016-01-01'"): + df_swap.loc["2016-01-01"] + + +def test_partial_string_timestamp_multiindex_str_key_raises(df): + # Even though this syntax works on a single index, this is somewhat + # ambiguous and we don't want to extend this behavior forward to work + # in multi-indexes. This would amount to selecting a scalar from a + # column. + with pytest.raises(KeyError, match="'2016-01-01'"): + df["2016-01-01"] + + +def test_partial_string_timestamp_multiindex_daily_resolution(df): + # GH12685 (partial string with daily resolution or below) + result = df.loc[IndexSlice["2013-03":"2013-03", :], :] + expected = df.iloc[118:180] + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_pickle.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_pickle.py new file mode 100644 index 00000000..1d8b7214 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_pickle.py @@ -0,0 +1,10 @@ +import pytest + +from pandas import MultiIndex + + +def test_pickle_compat_construction(): + # this is testing for pickle compat + # need an object to create with + with pytest.raises(TypeError, match="Must pass both levels and codes"): + MultiIndex() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reindex.py new file mode 100644 index 00000000..77a52713 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reindex.py @@ -0,0 +1,171 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) +import pandas._testing as tm + + +def test_reindex(idx): + result, indexer = idx.reindex(list(idx[:4])) + assert isinstance(result, MultiIndex) + assert result.names == ["first", "second"] + assert [level.name for level in result.levels] == ["first", "second"] + + result, indexer = idx.reindex(list(idx)) + assert isinstance(result, MultiIndex) + assert indexer is None + assert result.names == ["first", "second"] + assert [level.name for level in result.levels] == ["first", "second"] + + +def test_reindex_level(idx): + index = Index(["one"]) + + target, indexer = idx.reindex(index, level="second") + target2, indexer2 = index.reindex(idx, level="second") + + exp_index = idx.join(index, level="second", how="right") + exp_index2 = idx.join(index, level="second", how="left") + + assert target.equals(exp_index) + exp_indexer = np.array([0, 2, 4]) + tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False) + + assert target2.equals(exp_index2) + exp_indexer2 = np.array([0, -1, 0, -1, 0, -1]) + tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False) + + with pytest.raises(TypeError, match="Fill method not supported"): + idx.reindex(idx, method="pad", level="second") + + +def test_reindex_preserves_names_when_target_is_list_or_ndarray(idx): + # GH6552 + idx = idx.copy() + target = idx.copy() + idx.names = target.names = [None, None] + + other_dtype = MultiIndex.from_product([[1, 2], [3, 4]]) + + # list & ndarray cases + assert idx.reindex([])[0].names == [None, None] + assert idx.reindex(np.array([]))[0].names == [None, None] + assert idx.reindex(target.tolist())[0].names == [None, None] + assert idx.reindex(target.values)[0].names == [None, None] + assert idx.reindex(other_dtype.tolist())[0].names == [None, None] + assert idx.reindex(other_dtype.values)[0].names == [None, None] + + idx.names = ["foo", "bar"] + assert idx.reindex([])[0].names == ["foo", "bar"] + assert idx.reindex(np.array([]))[0].names == ["foo", "bar"] + assert idx.reindex(target.tolist())[0].names == ["foo", "bar"] + assert idx.reindex(target.values)[0].names == ["foo", "bar"] + assert idx.reindex(other_dtype.tolist())[0].names == ["foo", "bar"] + assert idx.reindex(other_dtype.values)[0].names == ["foo", "bar"] + + +def test_reindex_lvl_preserves_names_when_target_is_list_or_array(): + # GH7774 + idx = MultiIndex.from_product([[0, 1], ["a", "b"]], names=["foo", "bar"]) + assert idx.reindex([], level=0)[0].names == ["foo", "bar"] + assert idx.reindex([], level=1)[0].names == ["foo", "bar"] + + +def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array(): + # GH7774 + idx = MultiIndex.from_product([[0, 1], ["a", "b"]]) + assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64 + assert idx.reindex([], level=1)[0].levels[1].dtype.type == np.object_ + + # case with EA levels + cat = pd.Categorical(["foo", "bar"]) + dti = pd.date_range("2016-01-01", periods=2, tz="US/Pacific") + mi = MultiIndex.from_product([cat, dti]) + assert mi.reindex([], level=0)[0].levels[0].dtype == cat.dtype + assert mi.reindex([], level=1)[0].levels[1].dtype == dti.dtype + + +def test_reindex_base(idx): + expected = np.arange(idx.size, dtype=np.intp) + + actual = idx.get_indexer(idx) + tm.assert_numpy_array_equal(expected, actual) + + with pytest.raises(ValueError, match="Invalid fill method"): + idx.get_indexer(idx, method="invalid") + + +def test_reindex_non_unique(): + idx = MultiIndex.from_tuples([(0, 0), (1, 1), (1, 1), (2, 2)]) + a = pd.Series(np.arange(4), index=idx) + new_idx = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)]) + + msg = "cannot handle a non-unique multi-index!" + with pytest.raises(ValueError, match=msg): + a.reindex(new_idx) + + +@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]]) +def test_reindex_empty_with_level(values): + # GH41170 + idx = MultiIndex.from_arrays(values) + result, result_indexer = idx.reindex(np.array(["b"]), level=0) + expected = MultiIndex(levels=[["b"], values[1]], codes=[[], []]) + expected_indexer = np.array([], dtype=result_indexer.dtype) + tm.assert_index_equal(result, expected) + tm.assert_numpy_array_equal(result_indexer, expected_indexer) + + +def test_reindex_not_all_tuples(): + keys = [("i", "i"), ("i", "j"), ("j", "i"), "j"] + mi = MultiIndex.from_tuples(keys[:-1]) + idx = Index(keys) + res, indexer = mi.reindex(idx) + + tm.assert_index_equal(res, idx) + expected = np.array([0, 1, 2, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + +def test_reindex_limit_arg_with_multiindex(): + # GH21247 + + idx = MultiIndex.from_tuples([(3, "A"), (4, "A"), (4, "B")]) + + df = pd.Series([0.02, 0.01, 0.012], index=idx) + + new_idx = MultiIndex.from_tuples( + [ + (3, "A"), + (3, "B"), + (4, "A"), + (4, "B"), + (4, "C"), + (5, "B"), + (5, "C"), + (6, "B"), + (6, "C"), + ] + ) + + with pytest.raises( + ValueError, + match="limit argument only valid if doing pad, backfill or nearest reindexing", + ): + df.reindex(new_idx, fill_value=0, limit=1) + + +def test_reindex_with_none_in_nested_multiindex(): + # GH42883 + index = MultiIndex.from_tuples([(("a", None), 1), (("b", None), 2)]) + index2 = MultiIndex.from_tuples([(("b", None), 2), (("a", None), 1)]) + df1_dtype = pd.DataFrame([1, 2], index=index) + df2_dtype = pd.DataFrame([2, 1], index=index2) + + result = df1_dtype.reindex_like(df2_dtype) + expected = df2_dtype + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reshape.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reshape.py new file mode 100644 index 00000000..06dbb33a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_reshape.py @@ -0,0 +1,224 @@ +from datetime import datetime + +import numpy as np +import pytest +import pytz + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) +import pandas._testing as tm + + +def test_insert(idx): + # key contained in all levels + new_index = idx.insert(0, ("bar", "two")) + assert new_index.equal_levels(idx) + assert new_index[0] == ("bar", "two") + + # key not contained in all levels + new_index = idx.insert(0, ("abc", "three")) + + exp0 = Index(list(idx.levels[0]) + ["abc"], name="first") + tm.assert_index_equal(new_index.levels[0], exp0) + assert new_index.names == ["first", "second"] + + exp1 = Index(list(idx.levels[1]) + ["three"], name="second") + tm.assert_index_equal(new_index.levels[1], exp1) + assert new_index[0] == ("abc", "three") + + # key wrong length + msg = "Item must have length equal to number of levels" + with pytest.raises(ValueError, match=msg): + idx.insert(0, ("foo2",)) + + left = pd.DataFrame([["a", "b", 0], ["b", "d", 1]], columns=["1st", "2nd", "3rd"]) + left.set_index(["1st", "2nd"], inplace=True) + ts = left["3rd"].copy(deep=True) + + left.loc[("b", "x"), "3rd"] = 2 + left.loc[("b", "a"), "3rd"] = -1 + left.loc[("b", "b"), "3rd"] = 3 + left.loc[("a", "x"), "3rd"] = 4 + left.loc[("a", "w"), "3rd"] = 5 + left.loc[("a", "a"), "3rd"] = 6 + + ts.loc[("b", "x")] = 2 + ts.loc["b", "a"] = -1 + ts.loc[("b", "b")] = 3 + ts.loc["a", "x"] = 4 + ts.loc[("a", "w")] = 5 + ts.loc["a", "a"] = 6 + + right = pd.DataFrame( + [ + ["a", "b", 0], + ["b", "d", 1], + ["b", "x", 2], + ["b", "a", -1], + ["b", "b", 3], + ["a", "x", 4], + ["a", "w", 5], + ["a", "a", 6], + ], + columns=["1st", "2nd", "3rd"], + ) + right.set_index(["1st", "2nd"], inplace=True) + # FIXME data types changes to float because + # of intermediate nan insertion; + tm.assert_frame_equal(left, right, check_dtype=False) + tm.assert_series_equal(ts, right["3rd"]) + + +def test_insert2(): + # GH9250 + idx = ( + [("test1", i) for i in range(5)] + + [("test2", i) for i in range(6)] + + [("test", 17), ("test", 18)] + ) + + left = pd.Series(np.linspace(0, 10, 11), MultiIndex.from_tuples(idx[:-2])) + + left.loc[("test", 17)] = 11 + left.loc[("test", 18)] = 12 + + right = pd.Series(np.linspace(0, 12, 13), MultiIndex.from_tuples(idx)) + + tm.assert_series_equal(left, right) + + +def test_append(idx): + result = idx[:3].append(idx[3:]) + assert result.equals(idx) + + foos = [idx[:1], idx[1:3], idx[3:]] + result = foos[0].append(foos[1:]) + assert result.equals(idx) + + # empty + result = idx.append([]) + assert result.equals(idx) + + +def test_append_index(): + idx1 = Index([1.1, 1.2, 1.3]) + idx2 = pd.date_range("2011-01-01", freq="D", periods=3, tz="Asia/Tokyo") + idx3 = Index(["A", "B", "C"]) + + midx_lv2 = MultiIndex.from_arrays([idx1, idx2]) + midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3]) + + result = idx1.append(midx_lv2) + + # see gh-7112 + tz = pytz.timezone("Asia/Tokyo") + expected_tuples = [ + (1.1, tz.localize(datetime(2011, 1, 1))), + (1.2, tz.localize(datetime(2011, 1, 2))), + (1.3, tz.localize(datetime(2011, 1, 3))), + ] + expected = Index([1.1, 1.2, 1.3] + expected_tuples) + tm.assert_index_equal(result, expected) + + result = midx_lv2.append(idx1) + expected = Index(expected_tuples + [1.1, 1.2, 1.3]) + tm.assert_index_equal(result, expected) + + result = midx_lv2.append(midx_lv2) + expected = MultiIndex.from_arrays([idx1.append(idx1), idx2.append(idx2)]) + tm.assert_index_equal(result, expected) + + result = midx_lv2.append(midx_lv3) + tm.assert_index_equal(result, expected) + + result = midx_lv3.append(midx_lv2) + expected = Index._simple_new( + np.array( + [ + (1.1, tz.localize(datetime(2011, 1, 1)), "A"), + (1.2, tz.localize(datetime(2011, 1, 2)), "B"), + (1.3, tz.localize(datetime(2011, 1, 3)), "C"), + ] + + expected_tuples, + dtype=object, + ), + None, + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("name, exp", [("b", "b"), ("c", None)]) +def test_append_names_match(name, exp): + # GH#48288 + midx = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + midx2 = MultiIndex.from_arrays([[3], [5]], names=["a", name]) + result = midx.append(midx2) + expected = MultiIndex.from_arrays([[1, 2, 3], [3, 4, 5]], names=["a", exp]) + tm.assert_index_equal(result, expected) + + +def test_append_names_dont_match(): + # GH#48288 + midx = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + midx2 = MultiIndex.from_arrays([[3], [5]], names=["x", "y"]) + result = midx.append(midx2) + expected = MultiIndex.from_arrays([[1, 2, 3], [3, 4, 5]], names=None) + tm.assert_index_equal(result, expected) + + +def test_append_overlapping_interval_levels(): + # GH 54934 + ivl1 = pd.IntervalIndex.from_breaks([0.0, 1.0, 2.0]) + ivl2 = pd.IntervalIndex.from_breaks([0.5, 1.5, 2.5]) + mi1 = MultiIndex.from_product([ivl1, ivl1]) + mi2 = MultiIndex.from_product([ivl2, ivl2]) + result = mi1.append(mi2) + expected = MultiIndex.from_tuples( + [ + (pd.Interval(0.0, 1.0), pd.Interval(0.0, 1.0)), + (pd.Interval(0.0, 1.0), pd.Interval(1.0, 2.0)), + (pd.Interval(1.0, 2.0), pd.Interval(0.0, 1.0)), + (pd.Interval(1.0, 2.0), pd.Interval(1.0, 2.0)), + (pd.Interval(0.5, 1.5), pd.Interval(0.5, 1.5)), + (pd.Interval(0.5, 1.5), pd.Interval(1.5, 2.5)), + (pd.Interval(1.5, 2.5), pd.Interval(0.5, 1.5)), + (pd.Interval(1.5, 2.5), pd.Interval(1.5, 2.5)), + ] + ) + tm.assert_index_equal(result, expected) + + +def test_repeat(): + reps = 2 + numbers = [1, 2, 3] + names = np.array(["foo", "bar"]) + + m = MultiIndex.from_product([numbers, names], names=names) + expected = MultiIndex.from_product([numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(m.repeat(reps), expected) + + +def test_insert_base(idx): + result = idx[1:4] + + # test 0th element + assert idx[0:4].equals(result.insert(0, idx[0])) + + +def test_delete_base(idx): + expected = idx[1:] + result = idx.delete(0) + assert result.equals(expected) + assert result.name == expected.name + + expected = idx[:-1] + result = idx.delete(-1) + assert result.equals(expected) + assert result.name == expected.name + + msg = "index 6 is out of bounds for axis 0 with size 6" + with pytest.raises(IndexError, match=msg): + idx.delete(len(idx)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_setops.py new file mode 100644 index 00000000..c951403f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_setops.py @@ -0,0 +1,765 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + Index, + IntervalIndex, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.api.types import ( + is_float_dtype, + is_unsigned_integer_dtype, +) + + +@pytest.mark.parametrize("case", [0.5, "xxx"]) +@pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] +) +def test_set_ops_error_cases(idx, case, sort, method): + # non-iterable input + msg = "Input must be Index or array-like" + with pytest.raises(TypeError, match=msg): + getattr(idx, method)(case, sort=sort) + + +@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list]) +def test_intersection_base(idx, sort, klass): + first = idx[2::-1] # first 3 elements reversed + second = idx[:5] + + if klass is not MultiIndex: + second = klass(second.values) + + intersect = first.intersection(second, sort=sort) + if sort is None: + expected = first.sort_values() + else: + expected = first + tm.assert_index_equal(intersect, expected) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.intersection([1, 2, 3], sort=sort) + + +@pytest.mark.arm_slow +@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list]) +def test_union_base(idx, sort, klass): + first = idx[::-1] + second = idx[:5] + + if klass is not MultiIndex: + second = klass(second.values) + + union = first.union(second, sort=sort) + if sort is None: + expected = first.sort_values() + else: + expected = first + tm.assert_index_equal(union, expected) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.union([1, 2, 3], sort=sort) + + +def test_difference_base(idx, sort): + second = idx[4:] + answer = idx[:4] + result = idx.difference(second, sort=sort) + + if sort is None: + answer = answer.sort_values() + + assert result.equals(answer) + tm.assert_index_equal(result, answer) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = idx.difference(case, sort=sort) + tm.assert_index_equal(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + idx.difference([1, 2, 3], sort=sort) + + +def test_symmetric_difference(idx, sort): + first = idx[1:] + second = idx[:-1] + answer = idx[[-1, 0]] + result = first.symmetric_difference(second, sort=sort) + + if sort is None: + answer = answer.sort_values() + + tm.assert_index_equal(result, answer) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.symmetric_difference(case, sort=sort) + tm.assert_index_equal(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.symmetric_difference([1, 2, 3], sort=sort) + + +def test_multiindex_symmetric_difference(): + # GH 13490 + idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=["a", "b"]) + result = idx.symmetric_difference(idx) + assert result.names == idx.names + + idx2 = idx.copy().rename(["A", "B"]) + result = idx.symmetric_difference(idx2) + assert result.names == [None, None] + + +def test_empty(idx): + # GH 15270 + assert not idx.empty + assert idx[:0].empty + + +def test_difference(idx, sort): + first = idx + result = first.difference(idx[-3:], sort=sort) + vals = idx[:-3].values + + if sort is None: + vals = sorted(vals) + + expected = MultiIndex.from_tuples(vals, sortorder=0, names=idx.names) + + assert isinstance(result, MultiIndex) + assert result.equals(expected) + assert result.names == idx.names + tm.assert_index_equal(result, expected) + + # empty difference: reflexive + result = idx.difference(idx, sort=sort) + expected = idx[:0] + assert result.equals(expected) + assert result.names == idx.names + + # empty difference: superset + result = idx[-3:].difference(idx, sort=sort) + expected = idx[:0] + assert result.equals(expected) + assert result.names == idx.names + + # empty difference: degenerate + result = idx[:0].difference(idx, sort=sort) + expected = idx[:0] + assert result.equals(expected) + assert result.names == idx.names + + # names not the same + chunklet = idx[-3:] + chunklet.names = ["foo", "baz"] + result = first.difference(chunklet, sort=sort) + assert result.names == (None, None) + + # empty, but non-equal + result = idx.difference(idx.sortlevel(1)[0], sort=sort) + assert len(result) == 0 + + # raise Exception called with non-MultiIndex + result = first.difference(first.values, sort=sort) + assert result.equals(first[:0]) + + # name from empty array + result = first.difference([], sort=sort) + assert first.equals(result) + assert first.names == result.names + + # name from non-empty array + result = first.difference([("foo", "one")], sort=sort) + expected = MultiIndex.from_tuples( + [("bar", "one"), ("baz", "two"), ("foo", "two"), ("qux", "one"), ("qux", "two")] + ) + expected.names = first.names + assert first.names == result.names + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.difference([1, 2, 3, 4, 5], sort=sort) + + +def test_difference_sort_special(): + # GH-24959 + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + # sort=None, the default + result = idx.difference([]) + tm.assert_index_equal(result, idx) + + +def test_difference_sort_special_true(): + # TODO(GH#25151): decide on True behaviour + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + result = idx.difference([], sort=True) + expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) + tm.assert_index_equal(result, expected) + + +def test_difference_sort_incomparable(): + # GH-24959 + idx = MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]]) + + other = MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]]) + # sort=None, the default + msg = "sort order is undefined for incomparable objects" + with tm.assert_produces_warning(RuntimeWarning, match=msg): + result = idx.difference(other) + tm.assert_index_equal(result, idx) + + # sort=False + result = idx.difference(other, sort=False) + tm.assert_index_equal(result, idx) + + +def test_difference_sort_incomparable_true(): + idx = MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]]) + other = MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]]) + + # TODO: this is raising in constructing a Categorical when calling + # algos.safe_sort. Should we catch and re-raise with a better message? + msg = "'values' is not ordered, please explicitly specify the categories order " + with pytest.raises(TypeError, match=msg): + idx.difference(other, sort=True) + + +def test_union(idx, sort): + piece1 = idx[:5][::-1] + piece2 = idx[3:] + + the_union = piece1.union(piece2, sort=sort) + + if sort is None: + tm.assert_index_equal(the_union, idx.sort_values()) + + assert tm.equalContents(the_union, idx) + + # corner case, pass self or empty thing: + the_union = idx.union(idx, sort=sort) + tm.assert_index_equal(the_union, idx) + + the_union = idx.union(idx[:0], sort=sort) + tm.assert_index_equal(the_union, idx) + + tuples = idx.values + result = idx[:4].union(tuples[4:], sort=sort) + if sort is None: + tm.equalContents(result, idx) + else: + assert result.equals(idx) + + +def test_union_with_regular_index(idx): + other = Index(["A", "B", "C"]) + + result = other.union(idx) + assert ("foo", "one") in result + assert "B" in result + + msg = "The values in the array are unorderable" + with tm.assert_produces_warning(RuntimeWarning, match=msg): + result2 = idx.union(other) + # This is more consistent now, if sorting fails then we don't sort at all + # in the MultiIndex case. + assert not result.equals(result2) + + +def test_intersection(idx, sort): + piece1 = idx[:5][::-1] + piece2 = idx[3:] + + the_int = piece1.intersection(piece2, sort=sort) + + if sort is None: + tm.assert_index_equal(the_int, idx[3:5]) + assert tm.equalContents(the_int, idx[3:5]) + + # corner case, pass self + the_int = idx.intersection(idx, sort=sort) + tm.assert_index_equal(the_int, idx) + + # empty intersection: disjoint + empty = idx[:2].intersection(idx[2:], sort=sort) + expected = idx[:0] + assert empty.equals(expected) + + tuples = idx.values + result = idx.intersection(tuples) + assert result.equals(idx) + + +@pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] +) +def test_setop_with_categorical(idx, sort, method): + other = idx.to_flat_index().astype("category") + res_names = [None] * idx.nlevels + + result = getattr(idx, method)(other, sort=sort) + expected = getattr(idx, method)(idx, sort=sort).rename(res_names) + tm.assert_index_equal(result, expected) + + result = getattr(idx, method)(other[:5], sort=sort) + expected = getattr(idx, method)(idx[:5], sort=sort).rename(res_names) + tm.assert_index_equal(result, expected) + + +def test_intersection_non_object(idx, sort): + other = Index(range(3), name="foo") + + result = idx.intersection(other, sort=sort) + expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=None) + tm.assert_index_equal(result, expected, exact=True) + + # if we pass a length-0 ndarray (i.e. no name, we retain our idx.name) + result = idx.intersection(np.asarray(other)[:0], sort=sort) + expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=idx.names) + tm.assert_index_equal(result, expected, exact=True) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + # With non-zero length non-index, we try and fail to convert to tuples + idx.intersection(np.asarray(other), sort=sort) + + +def test_intersect_equal_sort(): + # GH-24959 + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + tm.assert_index_equal(idx.intersection(idx, sort=False), idx) + tm.assert_index_equal(idx.intersection(idx, sort=None), idx) + + +def test_intersect_equal_sort_true(): + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) + result = idx.intersection(idx, sort=True) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("slice_", [slice(None), slice(0)]) +def test_union_sort_other_empty(slice_): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + + # default, sort=None + other = idx[slice_] + tm.assert_index_equal(idx.union(other), idx) + tm.assert_index_equal(other.union(idx), idx) + + # sort=False + tm.assert_index_equal(idx.union(other, sort=False), idx) + + +def test_union_sort_other_empty_sort(): + # TODO(GH#25151): decide on True behaviour + # # sort=True + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + other = idx[:0] + result = idx.union(other, sort=True) + expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) + tm.assert_index_equal(result, expected) + + +def test_union_sort_other_incomparable(): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]]) + + # default, sort=None + with tm.assert_produces_warning(RuntimeWarning): + result = idx.union(idx[:1]) + tm.assert_index_equal(result, idx) + + # sort=False + result = idx.union(idx[:1], sort=False) + tm.assert_index_equal(result, idx) + + +def test_union_sort_other_incomparable_sort(): + idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]]) + msg = "'<' not supported between instances of 'Timestamp' and 'int'" + with pytest.raises(TypeError, match=msg): + idx.union(idx[:1], sort=True) + + +def test_union_non_object_dtype_raises(): + # GH#32646 raise NotImplementedError instead of less-informative error + mi = MultiIndex.from_product([["a", "b"], [1, 2]]) + + idx = mi.levels[1] + + msg = "Can only union MultiIndex with MultiIndex or Index of tuples" + with pytest.raises(NotImplementedError, match=msg): + mi.union(idx) + + +def test_union_empty_self_different_names(): + # GH#38423 + mi = MultiIndex.from_arrays([[]]) + mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + result = mi.union(mi2) + expected = MultiIndex.from_arrays([[1, 2], [3, 4]]) + tm.assert_index_equal(result, expected) + + +def test_union_multiindex_empty_rangeindex(): + # GH#41234 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + ri = pd.RangeIndex(0) + + result_left = mi.union(ri) + tm.assert_index_equal(mi, result_left, check_names=False) + + result_right = ri.union(mi) + tm.assert_index_equal(mi, result_right, check_names=False) + + +@pytest.mark.parametrize( + "method", ["union", "intersection", "difference", "symmetric_difference"] +) +def test_setops_sort_validation(method): + idx1 = MultiIndex.from_product([["a", "b"], [1, 2]]) + idx2 = MultiIndex.from_product([["b", "c"], [1, 2]]) + + with pytest.raises(ValueError, match="The 'sort' keyword only takes"): + getattr(idx1, method)(idx2, sort=2) + + # sort=True is supported as of GH#? + getattr(idx1, method)(idx2, sort=True) + + +@pytest.mark.parametrize("val", [pd.NA, 100]) +def test_difference_keep_ea_dtypes(any_numeric_ea_dtype, val): + # GH#48606 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [2, 1]], names=["a", None] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 2, val], dtype=any_numeric_ea_dtype), [1, 1, 3]] + ) + result = midx.difference(midx2) + expected = MultiIndex.from_arrays([Series([1], dtype=any_numeric_ea_dtype), [2]]) + tm.assert_index_equal(result, expected) + + result = midx.difference(midx.sort_values(ascending=False)) + expected = MultiIndex.from_arrays( + [Series([], dtype=any_numeric_ea_dtype), Series([], dtype=np.int64)], + names=["a", None], + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("val", [pd.NA, 5]) +def test_symmetric_difference_keeping_ea_dtype(any_numeric_ea_dtype, val): + # GH#48607 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [2, 1]], names=["a", None] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 2, val], dtype=any_numeric_ea_dtype), [1, 1, 3]] + ) + result = midx.symmetric_difference(midx2) + expected = MultiIndex.from_arrays( + [Series([1, 1, val], dtype=any_numeric_ea_dtype), [1, 2, 3]] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + ("tuples", "exp_tuples"), + [ + ([("val1", "test1")], [("val1", "test1")]), + ([("val1", "test1"), ("val1", "test1")], [("val1", "test1")]), + ( + [("val2", "test2"), ("val1", "test1")], + [("val2", "test2"), ("val1", "test1")], + ), + ], +) +def test_intersect_with_duplicates(tuples, exp_tuples): + # GH#36915 + left = MultiIndex.from_tuples(tuples, names=["first", "second"]) + right = MultiIndex.from_tuples( + [("val1", "test1"), ("val1", "test1"), ("val2", "test2")], + names=["first", "second"], + ) + result = left.intersection(right) + expected = MultiIndex.from_tuples(exp_tuples, names=["first", "second"]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "data, names, expected", + [ + ((1,), None, [None, None]), + ((1,), ["a"], [None, None]), + ((1,), ["b"], [None, None]), + ((1, 2), ["c", "d"], [None, None]), + ((1, 2), ["b", "a"], [None, None]), + ((1, 2, 3), ["a", "b", "c"], [None, None]), + ((1, 2), ["a", "c"], ["a", None]), + ((1, 2), ["c", "b"], [None, "b"]), + ((1, 2), ["a", "b"], ["a", "b"]), + ((1, 2), [None, "b"], [None, "b"]), + ], +) +def test_maybe_match_names(data, names, expected): + # GH#38323 + mi = MultiIndex.from_tuples([], names=["a", "b"]) + mi2 = MultiIndex.from_tuples([data], names=names) + result = mi._maybe_match_names(mi2) + assert result == expected + + +def test_intersection_equal_different_names(): + # GH#30302 + mi1 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["c", "b"]) + mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + + result = mi1.intersection(mi2) + expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=[None, "b"]) + tm.assert_index_equal(result, expected) + + +def test_intersection_different_names(): + # GH#38323 + mi = MultiIndex.from_arrays([[1], [3]], names=["c", "b"]) + mi2 = MultiIndex.from_arrays([[1], [3]]) + result = mi.intersection(mi2) + tm.assert_index_equal(result, mi2) + + +def test_intersection_with_missing_values_on_both_sides(nulls_fixture): + # GH#38623 + mi1 = MultiIndex.from_arrays([[3, nulls_fixture, 4, nulls_fixture], [1, 2, 4, 2]]) + mi2 = MultiIndex.from_arrays([[3, nulls_fixture, 3], [1, 2, 4]]) + result = mi1.intersection(mi2) + expected = MultiIndex.from_arrays([[3, nulls_fixture], [1, 2]]) + tm.assert_index_equal(result, expected) + + +def test_union_with_missing_values_on_both_sides(nulls_fixture): + # GH#38623 + mi1 = MultiIndex.from_arrays([[1, nulls_fixture]]) + mi2 = MultiIndex.from_arrays([[1, nulls_fixture, 3]]) + result = mi1.union(mi2) + expected = MultiIndex.from_arrays([[1, 3, nulls_fixture]]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +@pytest.mark.parametrize("sort", [None, False]) +def test_union_nan_got_duplicated(dtype, sort): + # GH#38977, GH#49010 + mi1 = MultiIndex.from_arrays([pd.array([1.0, np.nan], dtype=dtype), [2, 3]]) + mi2 = MultiIndex.from_arrays([pd.array([1.0, np.nan, 3.0], dtype=dtype), [2, 3, 4]]) + result = mi1.union(mi2, sort=sort) + if sort is None: + expected = MultiIndex.from_arrays( + [pd.array([1.0, 3.0, np.nan], dtype=dtype), [2, 4, 3]] + ) + else: + expected = mi2 + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("val", [4, 1]) +def test_union_keep_ea_dtype(any_numeric_ea_dtype, val): + # GH#48505 + + arr1 = Series([val, 2], dtype=any_numeric_ea_dtype) + arr2 = Series([2, 1], dtype=any_numeric_ea_dtype) + midx = MultiIndex.from_arrays([arr1, [1, 2]], names=["a", None]) + midx2 = MultiIndex.from_arrays([arr2, [2, 1]]) + result = midx.union(midx2) + if val == 4: + expected = MultiIndex.from_arrays( + [Series([1, 2, 4], dtype=any_numeric_ea_dtype), [1, 2, 1]] + ) + else: + expected = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [1, 2]] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dupe_val", [3, pd.NA]) +def test_union_with_duplicates_keep_ea_dtype(dupe_val, any_numeric_ea_dtype): + # GH48900 + mi1 = MultiIndex.from_arrays( + [ + Series([1, dupe_val, 2], dtype=any_numeric_ea_dtype), + Series([1, dupe_val, 2], dtype=any_numeric_ea_dtype), + ] + ) + mi2 = MultiIndex.from_arrays( + [ + Series([2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + Series([2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + ] + ) + result = mi1.union(mi2) + expected = MultiIndex.from_arrays( + [ + Series([1, 2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + Series([1, 2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + ] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_union_duplicates(index, request): + # GH#38977 + if index.empty or isinstance(index, (IntervalIndex, CategoricalIndex)): + pytest.skip(f"No duplicates in an empty {type(index).__name__}") + + values = index.unique().values.tolist() + mi1 = MultiIndex.from_arrays([values, [1] * len(values)]) + mi2 = MultiIndex.from_arrays([[values[0]] + values, [1] * (len(values) + 1)]) + result = mi2.union(mi1) + expected = mi2.sort_values() + tm.assert_index_equal(result, expected) + + if ( + is_unsigned_integer_dtype(mi2.levels[0]) + and (mi2.get_level_values(0) < 2**63).all() + ): + # GH#47294 - union uses lib.fast_zip, converting data to Python integers + # and loses type information. Result is then unsigned only when values are + # sufficiently large to require unsigned dtype. This happens only if other + # has dups or one of both have missing values + expected = expected.set_levels( + [expected.levels[0].astype(np.int64), expected.levels[1]] + ) + elif is_float_dtype(mi2.levels[0]): + # mi2 has duplicates witch is a different path than above, Fix that path + # to use correct float dtype? + expected = expected.set_levels( + [expected.levels[0].astype(float), expected.levels[1]] + ) + + result = mi1.union(mi2) + tm.assert_index_equal(result, expected) + + +def test_union_keep_dtype_precision(any_real_numeric_dtype): + # GH#48498 + arr1 = Series([4, 1, 1], dtype=any_real_numeric_dtype) + arr2 = Series([1, 4], dtype=any_real_numeric_dtype) + midx = MultiIndex.from_arrays([arr1, [2, 1, 1]], names=["a", None]) + midx2 = MultiIndex.from_arrays([arr2, [1, 2]], names=["a", None]) + + result = midx.union(midx2) + expected = MultiIndex.from_arrays( + ([Series([1, 1, 4], dtype=any_real_numeric_dtype), [1, 1, 2]]), + names=["a", None], + ) + tm.assert_index_equal(result, expected) + + +def test_union_keep_ea_dtype_with_na(any_numeric_ea_dtype): + # GH#48498 + arr1 = Series([4, pd.NA], dtype=any_numeric_ea_dtype) + arr2 = Series([1, pd.NA], dtype=any_numeric_ea_dtype) + midx = MultiIndex.from_arrays([arr1, [2, 1]], names=["a", None]) + midx2 = MultiIndex.from_arrays([arr2, [1, 2]]) + result = midx.union(midx2) + expected = MultiIndex.from_arrays( + [Series([1, 4, pd.NA, pd.NA], dtype=any_numeric_ea_dtype), [1, 2, 1, 2]] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "levels1, levels2, codes1, codes2, names", + [ + ( + [["a", "b", "c"], [0, ""]], + [["c", "d", "b"], [""]], + [[0, 1, 2], [1, 1, 1]], + [[0, 1, 2], [0, 0, 0]], + ["name1", "name2"], + ), + ], +) +def test_intersection_lexsort_depth(levels1, levels2, codes1, codes2, names): + # GH#25169 + mi1 = MultiIndex(levels=levels1, codes=codes1, names=names) + mi2 = MultiIndex(levels=levels2, codes=codes2, names=names) + mi_int = mi1.intersection(mi2) + assert mi_int._lexsort_depth == 2 + + +@pytest.mark.parametrize( + "a", + [pd.Categorical(["a", "b"], categories=["a", "b"]), ["a", "b"]], +) +@pytest.mark.parametrize( + "b", + [ + pd.Categorical(["a", "b"], categories=["b", "a"], ordered=True), + pd.Categorical(["a", "b"], categories=["b", "a"]), + ], +) +def test_intersection_with_non_lex_sorted_categories(a, b): + # GH#49974 + other = ["1", "2"] + + df1 = DataFrame({"x": a, "y": other}) + df2 = DataFrame({"x": b, "y": other}) + + expected = MultiIndex.from_arrays([a, other], names=["x", "y"]) + + res1 = MultiIndex.from_frame(df1).intersection( + MultiIndex.from_frame(df2.sort_values(["x", "y"])) + ) + res2 = MultiIndex.from_frame(df1).intersection(MultiIndex.from_frame(df2)) + res3 = MultiIndex.from_frame(df1.sort_values(["x", "y"])).intersection( + MultiIndex.from_frame(df2) + ) + res4 = MultiIndex.from_frame(df1.sort_values(["x", "y"])).intersection( + MultiIndex.from_frame(df2.sort_values(["x", "y"])) + ) + + tm.assert_index_equal(res1, expected) + tm.assert_index_equal(res2, expected) + tm.assert_index_equal(res3, expected) + tm.assert_index_equal(res4, expected) + + +@pytest.mark.parametrize("val", [pd.NA, 100]) +def test_intersection_keep_ea_dtypes(val, any_numeric_ea_dtype): + # GH#48604 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [2, 1]], names=["a", None] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 2, val], dtype=any_numeric_ea_dtype), [1, 1, 3]] + ) + result = midx.intersection(midx2) + expected = MultiIndex.from_arrays([Series([2], dtype=any_numeric_ea_dtype), [1]]) + tm.assert_index_equal(result, expected) + + +def test_union_with_na_when_constructing_dataframe(): + # GH43222 + series1 = Series((1,), index=MultiIndex.from_tuples(((None, None),))) + series2 = Series((10, 20), index=MultiIndex.from_tuples(((None, None), ("a", "b")))) + result = DataFrame([series1, series2]) + expected = DataFrame({(np.nan, np.nan): [1.0, 10.0], ("a", "b"): [np.nan, 20.0]}) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_sorting.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_sorting.py new file mode 100644 index 00000000..08c1a409 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_sorting.py @@ -0,0 +1,340 @@ +import numpy as np +import pytest + +from pandas.errors import ( + PerformanceWarning, + UnsortedIndexError, +) + +from pandas import ( + CategoricalIndex, + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.core.indexes.frozen import FrozenList + + +def test_sortlevel(idx): + tuples = list(idx) + np.random.default_rng(2).shuffle(tuples) + + index = MultiIndex.from_tuples(tuples) + + sorted_idx, _ = index.sortlevel(0) + expected = MultiIndex.from_tuples(sorted(tuples)) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(0, ascending=False) + assert sorted_idx.equals(expected[::-1]) + + sorted_idx, _ = index.sortlevel(1) + by1 = sorted(tuples, key=lambda x: (x[1], x[0])) + expected = MultiIndex.from_tuples(by1) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(1, ascending=False) + assert sorted_idx.equals(expected[::-1]) + + +def test_sortlevel_not_sort_remaining(): + mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC")) + sorted_idx, _ = mi.sortlevel("A", sort_remaining=False) + assert sorted_idx.equals(mi) + + +def test_sortlevel_deterministic(): + tuples = [ + ("bar", "one"), + ("foo", "two"), + ("qux", "two"), + ("foo", "one"), + ("baz", "two"), + ("qux", "one"), + ] + + index = MultiIndex.from_tuples(tuples) + + sorted_idx, _ = index.sortlevel(0) + expected = MultiIndex.from_tuples(sorted(tuples)) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(0, ascending=False) + assert sorted_idx.equals(expected[::-1]) + + sorted_idx, _ = index.sortlevel(1) + by1 = sorted(tuples, key=lambda x: (x[1], x[0])) + expected = MultiIndex.from_tuples(by1) + assert sorted_idx.equals(expected) + + sorted_idx, _ = index.sortlevel(1, ascending=False) + assert sorted_idx.equals(expected[::-1]) + + +def test_sortlevel_na_position(): + # GH#51612 + midx = MultiIndex.from_tuples([(1, np.nan), (1, 1)]) + result = midx.sortlevel(level=[0, 1], na_position="last")[0] + expected = MultiIndex.from_tuples([(1, 1), (1, np.nan)]) + tm.assert_index_equal(result, expected) + + +def test_numpy_argsort(idx): + result = np.argsort(idx) + expected = idx.argsort() + tm.assert_numpy_array_equal(result, expected) + + # these are the only two types that perform + # pandas compatibility input validation - the + # rest already perform separate (or no) such + # validation via their 'values' attribute as + # defined in pandas.core.indexes/base.py - they + # cannot be changed at the moment due to + # backwards compatibility concerns + if isinstance(type(idx), (CategoricalIndex, RangeIndex)): + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(idx, axis=1) + + msg = "the 'kind' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(idx, kind="mergesort") + + msg = "the 'order' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(idx, order=("a", "b")) + + +def test_unsortedindex(): + # GH 11897 + mi = MultiIndex.from_tuples( + [("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")], + names=["one", "two"], + ) + df = DataFrame([[i, 10 * i] for i in range(6)], index=mi, columns=["one", "two"]) + + # GH 16734: not sorted, but no real slicing + result = df.loc(axis=0)["z", "a"] + expected = df.iloc[0] + tm.assert_series_equal(result, expected) + + msg = ( + "MultiIndex slicing requires the index to be lexsorted: " + r"slicing on levels \[1\], lexsort depth 0" + ) + with pytest.raises(UnsortedIndexError, match=msg): + df.loc(axis=0)["z", slice("a")] + df.sort_index(inplace=True) + assert len(df.loc(axis=0)["z", :]) == 2 + + with pytest.raises(KeyError, match="'q'"): + df.loc(axis=0)["q", :] + + +def test_unsortedindex_doc_examples(): + # https://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex + dfm = DataFrame( + { + "jim": [0, 0, 1, 1], + "joe": ["x", "x", "z", "y"], + "jolie": np.random.default_rng(2).random(4), + } + ) + + dfm = dfm.set_index(["jim", "joe"]) + with tm.assert_produces_warning(PerformanceWarning): + dfm.loc[(1, "z")] + + msg = r"Key length \(2\) was greater than MultiIndex lexsort depth \(1\)" + with pytest.raises(UnsortedIndexError, match=msg): + dfm.loc[(0, "y"):(1, "z")] + + assert not dfm.index._is_lexsorted() + assert dfm.index._lexsort_depth == 1 + + # sort it + dfm = dfm.sort_index() + dfm.loc[(1, "z")] + dfm.loc[(0, "y"):(1, "z")] + + assert dfm.index._is_lexsorted() + assert dfm.index._lexsort_depth == 2 + + +def test_reconstruct_sort(): + # starts off lexsorted & monotonic + mi = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) + assert mi.is_monotonic_increasing + recons = mi._sort_levels_monotonic() + assert recons.is_monotonic_increasing + assert mi is recons + + assert mi.equals(recons) + assert Index(mi.values).equals(Index(recons.values)) + + # cannot convert to lexsorted + mi = MultiIndex.from_tuples( + [("z", "a"), ("x", "a"), ("y", "b"), ("x", "b"), ("y", "a"), ("z", "b")], + names=["one", "two"], + ) + assert not mi.is_monotonic_increasing + recons = mi._sort_levels_monotonic() + assert not recons.is_monotonic_increasing + assert mi.equals(recons) + assert Index(mi.values).equals(Index(recons.values)) + + # cannot convert to lexsorted + mi = MultiIndex( + levels=[["b", "d", "a"], [1, 2, 3]], + codes=[[0, 1, 0, 2], [2, 0, 0, 1]], + names=["col1", "col2"], + ) + assert not mi.is_monotonic_increasing + recons = mi._sort_levels_monotonic() + assert not recons.is_monotonic_increasing + assert mi.equals(recons) + assert Index(mi.values).equals(Index(recons.values)) + + +def test_reconstruct_remove_unused(): + # xref to GH 2770 + df = DataFrame( + [["deleteMe", 1, 9], ["keepMe", 2, 9], ["keepMeToo", 3, 9]], + columns=["first", "second", "third"], + ) + df2 = df.set_index(["first", "second"], drop=False) + df2 = df2[df2["first"] != "deleteMe"] + + # removed levels are there + expected = MultiIndex( + levels=[["deleteMe", "keepMe", "keepMeToo"], [1, 2, 3]], + codes=[[1, 2], [1, 2]], + names=["first", "second"], + ) + result = df2.index + tm.assert_index_equal(result, expected) + + expected = MultiIndex( + levels=[["keepMe", "keepMeToo"], [2, 3]], + codes=[[0, 1], [0, 1]], + names=["first", "second"], + ) + result = df2.index.remove_unused_levels() + tm.assert_index_equal(result, expected) + + # idempotent + result2 = result.remove_unused_levels() + tm.assert_index_equal(result2, expected) + assert result2.is_(result) + + +@pytest.mark.parametrize( + "first_type,second_type", [("int64", "int64"), ("datetime64[D]", "str")] +) +def test_remove_unused_levels_large(first_type, second_type): + # GH16556 + + # because tests should be deterministic (and this test in particular + # checks that levels are removed, which is not the case for every + # random input): + rng = np.random.default_rng(10) # seed is arbitrary value that works + + size = 1 << 16 + df = DataFrame( + { + "first": rng.integers(0, 1 << 13, size).astype(first_type), + "second": rng.integers(0, 1 << 10, size).astype(second_type), + "third": rng.random(size), + } + ) + df = df.groupby(["first", "second"]).sum() + df = df[df.third < 0.1] + + result = df.index.remove_unused_levels() + assert len(result.levels[0]) < len(df.index.levels[0]) + assert len(result.levels[1]) < len(df.index.levels[1]) + assert result.equals(df.index) + + expected = df.reset_index().set_index(["first", "second"]).index + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("level0", [["a", "d", "b"], ["a", "d", "b", "unused"]]) +@pytest.mark.parametrize( + "level1", [["w", "x", "y", "z"], ["w", "x", "y", "z", "unused"]] +) +def test_remove_unused_nan(level0, level1): + # GH 18417 + mi = MultiIndex(levels=[level0, level1], codes=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]]) + + result = mi.remove_unused_levels() + tm.assert_index_equal(result, mi) + for level in 0, 1: + assert "unused" not in result.levels[level] + + +def test_argsort(idx): + result = idx.argsort() + expected = idx.values.argsort() + tm.assert_numpy_array_equal(result, expected) + + +def test_remove_unused_levels_with_nan(): + # GH 37510 + idx = Index([(1, np.nan), (3, 4)]).rename(["id1", "id2"]) + idx = idx.set_levels(["a", np.nan], level="id1") + idx = idx.remove_unused_levels() + result = idx.levels + expected = FrozenList([["a", np.nan], [4]]) + assert str(result) == str(expected) + + +def test_sort_values_nan(): + # GH48495, GH48626 + midx = MultiIndex(levels=[["A", "B", "C"], ["D"]], codes=[[1, 0, 2], [-1, -1, 0]]) + result = midx.sort_values() + expected = MultiIndex( + levels=[["A", "B", "C"], ["D"]], codes=[[0, 1, 2], [-1, -1, 0]] + ) + tm.assert_index_equal(result, expected) + + +def test_sort_values_incomparable(): + # GH48495 + mi = MultiIndex.from_arrays( + [ + [1, Timestamp("2000-01-01")], + [3, 4], + ] + ) + match = "'<' not supported between instances of 'Timestamp' and 'int'" + with pytest.raises(TypeError, match=match): + mi.sort_values() + + +@pytest.mark.parametrize("na_position", ["first", "last"]) +@pytest.mark.parametrize("dtype", ["float64", "Int64", "Float64"]) +def test_sort_values_with_na_na_position(dtype, na_position): + # 51612 + arrays = [ + Series([1, 1, 2], dtype=dtype), + Series([1, None, 3], dtype=dtype), + ] + index = MultiIndex.from_arrays(arrays) + result = index.sort_values(na_position=na_position) + if na_position == "first": + arrays = [ + Series([1, 1, 2], dtype=dtype), + Series([None, 1, 3], dtype=dtype), + ] + else: + arrays = [ + Series([1, 1, 2], dtype=dtype), + Series([1, None, 3], dtype=dtype), + ] + expected = MultiIndex.from_arrays(arrays) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_take.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_take.py new file mode 100644 index 00000000..543cba25 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_take.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_take(idx): + indexer = [4, 3, 0, 2] + result = idx.take(indexer) + expected = idx[indexer] + assert result.equals(expected) + + # GH 10791 + msg = "'MultiIndex' object has no attribute 'freq'" + with pytest.raises(AttributeError, match=msg): + idx.freq + + +def test_take_invalid_kwargs(idx): + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + +def test_take_fill_value(): + # GH 12631 + vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]] + idx = pd.MultiIndex.from_product(vals, names=["str", "dt"]) + + result = idx.take(np.array([1, 0, -1])) + exp_vals = [ + ("A", pd.Timestamp("2011-01-02")), + ("A", pd.Timestamp("2011-01-01")), + ("B", pd.Timestamp("2011-01-02")), + ] + expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"]) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + exp_vals = [ + ("A", pd.Timestamp("2011-01-02")), + ("A", pd.Timestamp("2011-01-01")), + (np.nan, pd.NaT), + ] + expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"]) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + exp_vals = [ + ("A", pd.Timestamp("2011-01-02")), + ("A", pd.Timestamp("2011-01-01")), + ("B", pd.Timestamp("2011-01-02")), + ] + expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"]) + tm.assert_index_equal(result, expected) + + msg = "When allow_fill=True and fill_value is not None, all indices must be >= -1" + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for( axis 0 with)? size 4" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_astype.py new file mode 100644 index 00000000..1c2df600 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_astype.py @@ -0,0 +1,95 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + to_datetime, + to_timedelta, +) +import pandas._testing as tm + + +class TestAstype: + def test_astype_float64_to_uint64(self): + # GH#45309 used to incorrectly return Index with int64 dtype + idx = Index([0.0, 5.0, 10.0, 15.0, 20.0], dtype=np.float64) + result = idx.astype("u8") + expected = Index([0, 5, 10, 15, 20], dtype=np.uint64) + tm.assert_index_equal(result, expected, exact=True) + + idx_with_negatives = idx - 10 + with pytest.raises(ValueError, match="losslessly"): + idx_with_negatives.astype(np.uint64) + + def test_astype_float64_to_object(self): + float_index = Index([0.0, 2.5, 5.0, 7.5, 10.0], dtype=np.float64) + result = float_index.astype(object) + assert result.equals(float_index) + assert float_index.equals(result) + assert isinstance(result, Index) and result.dtype == object + + def test_astype_float64_mixed_to_object(self): + # mixed int-float + idx = Index([1.5, 2, 3, 4, 5], dtype=np.float64) + idx.name = "foo" + result = idx.astype(object) + assert result.equals(idx) + assert idx.equals(result) + assert isinstance(result, Index) and result.dtype == object + + @pytest.mark.parametrize("dtype", ["int16", "int32", "int64"]) + def test_astype_float64_to_int_dtype(self, dtype): + # GH#12881 + # a float astype int + idx = Index([0, 1, 2], dtype=np.float64) + result = idx.astype(dtype) + expected = Index([0, 1, 2], dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + idx = Index([0, 1.1, 2], dtype=np.float64) + result = idx.astype(dtype) + expected = Index([0, 1, 2], dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + @pytest.mark.parametrize("dtype", ["float32", "float64"]) + def test_astype_float64_to_float_dtype(self, dtype): + # GH#12881 + # a float astype int + idx = Index([0, 1, 2], dtype=np.float64) + result = idx.astype(dtype) + assert isinstance(result, Index) and result.dtype == dtype + + @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) + def test_astype_float_to_datetimelike(self, dtype): + # GH#49660 pre-2.0 Index.astype from floating to M8/m8/Period raised, + # inconsistent with Series.astype + idx = Index([0, 1.1, 2], dtype=np.float64) + + result = idx.astype(dtype) + if dtype[0] == "M": + expected = to_datetime(idx.values) + else: + expected = to_timedelta(idx.values) + tm.assert_index_equal(result, expected) + + # check that we match Series behavior + result = idx.to_series().set_axis(range(3)).astype(dtype) + expected = expected.to_series().set_axis(range(3)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [int, "int16", "int32", "int64"]) + @pytest.mark.parametrize("non_finite", [np.inf, np.nan]) + def test_cannot_cast_inf_to_int(self, non_finite, dtype): + # GH#13149 + idx = Index([1, 2, non_finite], dtype=np.float64) + + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(ValueError, match=msg): + idx.astype(dtype) + + def test_astype_from_object(self): + index = Index([1.0, np.nan, 0.2], dtype="object") + result = index.astype(float) + expected = Index([1.0, np.nan, 0.2], dtype=np.float64) + assert result.dtype == expected.dtype + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_indexing.py new file mode 100644 index 00000000..cd28d519 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_indexing.py @@ -0,0 +1,611 @@ +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +from pandas import ( + NA, + Index, + RangeIndex, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowExtensionArray, + FloatingArray, +) + + +@pytest.fixture +def index_large(): + # large values used in Index[uint64] tests where no compat needed with Int64/Float64 + large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] + return Index(large, dtype=np.uint64) + + +class TestGetLoc: + def test_get_loc(self): + index = Index([0, 1, 2]) + assert index.get_loc(1) == 1 + + def test_get_loc_raises_bad_label(self): + index = Index([0, 1, 2]) + with pytest.raises(InvalidIndexError, match=r"\[1, 2\]"): + index.get_loc([1, 2]) + + def test_get_loc_float64(self): + idx = Index([0.0, 1.0, 2.0], dtype=np.float64) + + with pytest.raises(KeyError, match="^'foo'$"): + idx.get_loc("foo") + with pytest.raises(KeyError, match=r"^1\.5$"): + idx.get_loc(1.5) + with pytest.raises(KeyError, match="^True$"): + idx.get_loc(True) + with pytest.raises(KeyError, match="^False$"): + idx.get_loc(False) + + def test_get_loc_na(self): + idx = Index([np.nan, 1, 2], dtype=np.float64) + assert idx.get_loc(1) == 1 + assert idx.get_loc(np.nan) == 0 + + idx = Index([np.nan, 1, np.nan], dtype=np.float64) + assert idx.get_loc(1) == 1 + + # representable by slice [0:2:2] + msg = "'Cannot get left slice bound for non-unique label: nan'" + with pytest.raises(KeyError, match=msg): + idx.slice_locs(np.nan) + # not representable by slice + idx = Index([np.nan, 1, np.nan, np.nan], dtype=np.float64) + assert idx.get_loc(1) == 1 + msg = "'Cannot get left slice bound for non-unique label: nan" + with pytest.raises(KeyError, match=msg): + idx.slice_locs(np.nan) + + def test_get_loc_missing_nan(self): + # GH#8569 + idx = Index([1, 2], dtype=np.float64) + assert idx.get_loc(1) == 0 + with pytest.raises(KeyError, match=r"^3$"): + idx.get_loc(3) + with pytest.raises(KeyError, match="^nan$"): + idx.get_loc(np.nan) + with pytest.raises(InvalidIndexError, match=r"\[nan\]"): + # listlike/non-hashable raises TypeError + idx.get_loc([np.nan]) + + @pytest.mark.parametrize("vals", [[1], [1.0], [Timestamp("2019-12-31")], ["test"]]) + def test_get_loc_float_index_nan_with_method(self, vals): + # GH#39382 + idx = Index(vals) + with pytest.raises(KeyError, match="nan"): + idx.get_loc(np.nan) + + @pytest.mark.parametrize("dtype", ["f8", "i8", "u8"]) + def test_get_loc_numericindex_none_raises(self, dtype): + # case that goes through searchsorted and key is non-comparable to values + arr = np.arange(10**7, dtype=dtype) + idx = Index(arr) + with pytest.raises(KeyError, match="None"): + idx.get_loc(None) + + def test_get_loc_overflows(self): + # unique but non-monotonic goes through IndexEngine.mapping.get_item + idx = Index([0, 2, 1]) + + val = np.iinfo(np.int64).max + 1 + + with pytest.raises(KeyError, match=str(val)): + idx.get_loc(val) + with pytest.raises(KeyError, match=str(val)): + idx._engine.get_loc(val) + + +class TestGetIndexer: + def test_get_indexer(self): + index1 = Index([1, 2, 3, 4, 5]) + index2 = Index([2, 4, 6]) + + r1 = index1.get_indexer(index2) + e1 = np.array([1, 3, -1], dtype=np.intp) + tm.assert_almost_equal(r1, e1) + + @pytest.mark.parametrize("reverse", [True, False]) + @pytest.mark.parametrize( + "expected,method", + [ + (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "pad"), + (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "ffill"), + (np.array([0, 0, 1, 1, 2], dtype=np.intp), "backfill"), + (np.array([0, 0, 1, 1, 2], dtype=np.intp), "bfill"), + ], + ) + def test_get_indexer_methods(self, reverse, expected, method): + index1 = Index([1, 2, 3, 4, 5]) + index2 = Index([2, 4, 6]) + + if reverse: + index1 = index1[::-1] + expected = expected[::-1] + + result = index2.get_indexer(index1, method=method) + tm.assert_almost_equal(result, expected) + + def test_get_indexer_invalid(self): + # GH10411 + index = Index(np.arange(10)) + + with pytest.raises(ValueError, match="tolerance argument"): + index.get_indexer([1, 0], tolerance=1) + + with pytest.raises(ValueError, match="limit argument"): + index.get_indexer([1, 0], limit=1) + + @pytest.mark.parametrize( + "method, tolerance, indexer, expected", + [ + ("pad", None, [0, 5, 9], [0, 5, 9]), + ("backfill", None, [0, 5, 9], [0, 5, 9]), + ("nearest", None, [0, 5, 9], [0, 5, 9]), + ("pad", 0, [0, 5, 9], [0, 5, 9]), + ("backfill", 0, [0, 5, 9], [0, 5, 9]), + ("nearest", 0, [0, 5, 9], [0, 5, 9]), + ("pad", None, [0.2, 1.8, 8.5], [0, 1, 8]), + ("backfill", None, [0.2, 1.8, 8.5], [1, 2, 9]), + ("nearest", None, [0.2, 1.8, 8.5], [0, 2, 9]), + ("pad", 1, [0.2, 1.8, 8.5], [0, 1, 8]), + ("backfill", 1, [0.2, 1.8, 8.5], [1, 2, 9]), + ("nearest", 1, [0.2, 1.8, 8.5], [0, 2, 9]), + ("pad", 0.2, [0.2, 1.8, 8.5], [0, -1, -1]), + ("backfill", 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]), + ("nearest", 0.2, [0.2, 1.8, 8.5], [0, 2, -1]), + ], + ) + def test_get_indexer_nearest(self, method, tolerance, indexer, expected): + index = Index(np.arange(10)) + + actual = index.get_indexer(indexer, method=method, tolerance=tolerance) + tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) + + @pytest.mark.parametrize("listtype", [list, tuple, Series, np.array]) + @pytest.mark.parametrize( + "tolerance, expected", + list( + zip( + [[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]], + [[0, 2, -1], [0, -1, -1], [-1, 2, 9]], + ) + ), + ) + def test_get_indexer_nearest_listlike_tolerance( + self, tolerance, expected, listtype + ): + index = Index(np.arange(10)) + + actual = index.get_indexer( + [0.2, 1.8, 8.5], method="nearest", tolerance=listtype(tolerance) + ) + tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) + + def test_get_indexer_nearest_error(self): + index = Index(np.arange(10)) + with pytest.raises(ValueError, match="limit argument"): + index.get_indexer([1, 0], method="nearest", limit=1) + + with pytest.raises(ValueError, match="tolerance size must match"): + index.get_indexer([1, 0], method="nearest", tolerance=[1, 2, 3]) + + @pytest.mark.parametrize( + "method,expected", + [("pad", [8, 7, 0]), ("backfill", [9, 8, 1]), ("nearest", [9, 7, 0])], + ) + def test_get_indexer_nearest_decreasing(self, method, expected): + index = Index(np.arange(10))[::-1] + + actual = index.get_indexer([0, 5, 9], method=method) + tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp)) + + actual = index.get_indexer([0.2, 1.8, 8.5], method=method) + tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) + + @pytest.mark.parametrize("idx_dtype", ["int64", "float64", "uint64", "range"]) + @pytest.mark.parametrize("method", ["get_indexer", "get_indexer_non_unique"]) + def test_get_indexer_numeric_index_boolean_target(self, method, idx_dtype): + # GH 16877 + + if idx_dtype == "range": + numeric_index = RangeIndex(4) + else: + numeric_index = Index(np.arange(4, dtype=idx_dtype)) + + other = Index([True, False, True]) + + result = getattr(numeric_index, method)(other) + expected = np.array([-1, -1, -1], dtype=np.intp) + if method == "get_indexer": + tm.assert_numpy_array_equal(result, expected) + else: + missing = np.arange(3, dtype=np.intp) + tm.assert_numpy_array_equal(result[0], expected) + tm.assert_numpy_array_equal(result[1], missing) + + @pytest.mark.parametrize("method", ["pad", "backfill", "nearest"]) + def test_get_indexer_with_method_numeric_vs_bool(self, method): + left = Index([1, 2, 3]) + right = Index([True, False]) + + with pytest.raises(TypeError, match="Cannot compare"): + left.get_indexer(right, method=method) + + with pytest.raises(TypeError, match="Cannot compare"): + right.get_indexer(left, method=method) + + def test_get_indexer_numeric_vs_bool(self): + left = Index([1, 2, 3]) + right = Index([True, False]) + + res = left.get_indexer(right) + expected = -1 * np.ones(len(right), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + res = right.get_indexer(left) + expected = -1 * np.ones(len(left), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + res = left.get_indexer_non_unique(right)[0] + expected = -1 * np.ones(len(right), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + res = right.get_indexer_non_unique(left)[0] + expected = -1 * np.ones(len(left), dtype=np.intp) + tm.assert_numpy_array_equal(res, expected) + + def test_get_indexer_float64(self): + idx = Index([0.0, 1.0, 2.0], dtype=np.float64) + tm.assert_numpy_array_equal( + idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp) + ) + + target = [-0.1, 0.5, 1.1] + tm.assert_numpy_array_equal( + idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) + ) + + def test_get_indexer_nan(self): + # GH#7820 + result = Index([1, 2, np.nan], dtype=np.float64).get_indexer([np.nan]) + expected = np.array([2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_int64(self): + index = Index(range(0, 20, 2), dtype=np.int64) + target = Index(np.arange(10), dtype=np.int64) + indexer = index.get_indexer(target) + expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10), dtype=np.int64) + indexer = index.get_indexer(target, method="pad") + expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10), dtype=np.int64) + indexer = index.get_indexer(target, method="backfill") + expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + def test_get_indexer_uint64(self, index_large): + target = Index(np.arange(10).astype("uint64") * 5 + 2**63) + indexer = index_large.get_indexer(target) + expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10).astype("uint64") * 5 + 2**63) + indexer = index_large.get_indexer(target, method="pad") + expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + target = Index(np.arange(10).astype("uint64") * 5 + 2**63) + indexer = index_large.get_indexer(target, method="backfill") + expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + @pytest.mark.parametrize("val, val2", [(4, 5), (4, 4), (4, NA), (NA, NA)]) + def test_get_loc_masked(self, val, val2, any_numeric_ea_and_arrow_dtype): + # GH#39133 + idx = Index([1, 2, 3, val, val2], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_loc(2) + assert result == 1 + + with pytest.raises(KeyError, match="9"): + idx.get_loc(9) + + def test_get_loc_masked_na(self, any_numeric_ea_and_arrow_dtype): + # GH#39133 + idx = Index([1, 2, NA], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_loc(NA) + assert result == 2 + + idx = Index([1, 2, NA, NA], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_loc(NA) + tm.assert_numpy_array_equal(result, np.array([False, False, True, True])) + + idx = Index([1, 2, 3], dtype=any_numeric_ea_and_arrow_dtype) + with pytest.raises(KeyError, match="NA"): + idx.get_loc(NA) + + def test_get_loc_masked_na_and_nan(self): + # GH#39133 + idx = Index( + FloatingArray( + np.array([1, 2, 1, np.nan]), mask=np.array([False, False, True, False]) + ) + ) + result = idx.get_loc(NA) + assert result == 2 + result = idx.get_loc(np.nan) + assert result == 3 + + idx = Index( + FloatingArray(np.array([1, 2, 1.0]), mask=np.array([False, False, True])) + ) + result = idx.get_loc(NA) + assert result == 2 + with pytest.raises(KeyError, match="nan"): + idx.get_loc(np.nan) + + idx = Index( + FloatingArray( + np.array([1, 2, np.nan]), mask=np.array([False, False, False]) + ) + ) + result = idx.get_loc(np.nan) + assert result == 2 + with pytest.raises(KeyError, match="NA"): + idx.get_loc(NA) + + @pytest.mark.parametrize("val", [4, 2]) + def test_get_indexer_masked_na(self, any_numeric_ea_and_arrow_dtype, val): + # GH#39133 + idx = Index([1, 2, NA, 3, val], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.get_indexer_for([1, NA, 5]) + expected = np.array([0, 2, -1]) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize("dtype", ["boolean", "bool[pyarrow]"]) + def test_get_indexer_masked_na_boolean(self, dtype): + # GH#39133 + if dtype == "bool[pyarrow]": + pytest.importorskip("pyarrow") + idx = Index([True, False, NA], dtype=dtype) + result = idx.get_loc(False) + assert result == 1 + result = idx.get_loc(NA) + assert result == 2 + + def test_get_indexer_arrow_dictionary_target(self): + pa = pytest.importorskip("pyarrow") + target = Index( + ArrowExtensionArray( + pa.array([1, 2], type=pa.dictionary(pa.int8(), pa.int8())) + ) + ) + idx = Index([1]) + + result = idx.get_indexer(target) + expected = np.array([0, -1], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + result_1, result_2 = idx.get_indexer_non_unique(target) + expected_1, expected_2 = np.array([0, -1], dtype=np.int64), np.array( + [1], dtype=np.int64 + ) + tm.assert_numpy_array_equal(result_1, expected_1) + tm.assert_numpy_array_equal(result_2, expected_2) + + +class TestWhere: + @pytest.mark.parametrize( + "index", + [ + Index(np.arange(5, dtype="float64")), + Index(range(0, 20, 2), dtype=np.int64), + Index(np.arange(5, dtype="uint64")), + ], + ) + def test_where(self, listlike_box, index): + cond = [True] * len(index) + expected = index + result = index.where(listlike_box(cond)) + + cond = [False] + [True] * (len(index) - 1) + expected = Index([index._na_value] + index[1:].tolist(), dtype=np.float64) + result = index.where(listlike_box(cond)) + tm.assert_index_equal(result, expected) + + def test_where_uint64(self): + idx = Index([0, 6, 2], dtype=np.uint64) + mask = np.array([False, True, False]) + other = np.array([1], dtype=np.int64) + + expected = Index([1, 6, 1], dtype=np.uint64) + + result = idx.where(mask, other) + tm.assert_index_equal(result, expected) + + result = idx.putmask(~mask, other) + tm.assert_index_equal(result, expected) + + def test_where_infers_type_instead_of_trying_to_convert_string_to_float(self): + # GH 32413 + index = Index([1, np.nan]) + cond = index.notna() + other = Index(["a", "b"], dtype="string") + + expected = Index([1.0, "b"]) + result = index.where(cond, other) + + tm.assert_index_equal(result, expected) + + +class TestTake: + @pytest.mark.parametrize("idx_dtype", [np.float64, np.int64, np.uint64]) + def test_take_preserve_name(self, idx_dtype): + index = Index([1, 2, 3, 4], dtype=idx_dtype, name="foo") + taken = index.take([3, 0, 1]) + assert index.name == taken.name + + def test_take_fill_value_float64(self): + # GH 12631 + idx = Index([1.0, 2.0, 3.0], name="xxx", dtype=np.float64) + result = idx.take(np.array([1, 0, -1])) + expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx") + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = Index([2.0, 1.0, np.nan], dtype=np.float64, name="xxx") + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = Index([2.0, 1.0, 3.0], dtype=np.float64, name="xxx") + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + @pytest.mark.parametrize("dtype", [np.int64, np.uint64]) + def test_take_fill_value_ints(self, dtype): + # see gh-12631 + idx = Index([1, 2, 3], dtype=dtype, name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = Index([2, 1, 3], dtype=dtype, name="xxx") + tm.assert_index_equal(result, expected) + + name = type(idx).__name__ + msg = f"Unable to fill values because {name} cannot contain NA" + + # fill_value=True + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -1]), fill_value=True) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = Index([2, 1, 3], dtype=dtype, name="xxx") + tm.assert_index_equal(result, expected) + + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + +class TestContains: + @pytest.mark.parametrize("dtype", [np.float64, np.int64, np.uint64]) + def test_contains_none(self, dtype): + # GH#35788 should return False, not raise TypeError + index = Index([0, 1, 2, 3, 4], dtype=dtype) + assert None not in index + + def test_contains_float64_nans(self): + index = Index([1.0, 2.0, np.nan], dtype=np.float64) + assert np.nan in index + + def test_contains_float64_not_nans(self): + index = Index([1.0, 2.0, np.nan], dtype=np.float64) + assert 1.0 in index + + +class TestSliceLocs: + @pytest.mark.parametrize("dtype", [int, float]) + def test_slice_locs(self, dtype): + index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) + n = len(index) + + assert index.slice_locs(start=2) == (2, n) + assert index.slice_locs(start=3) == (3, n) + assert index.slice_locs(3, 8) == (3, 6) + assert index.slice_locs(5, 10) == (3, n) + assert index.slice_locs(end=8) == (0, 6) + assert index.slice_locs(end=9) == (0, 7) + + # reversed + index2 = index[::-1] + assert index2.slice_locs(8, 2) == (2, 6) + assert index2.slice_locs(7, 3) == (2, 5) + + @pytest.mark.parametrize("dtype", [int, float]) + def test_slice_locs_float_locs(self, dtype): + index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) + n = len(index) + assert index.slice_locs(5.0, 10.0) == (3, n) + assert index.slice_locs(4.5, 10.5) == (3, 8) + + index2 = index[::-1] + assert index2.slice_locs(8.5, 1.5) == (2, 6) + assert index2.slice_locs(10.5, -1) == (0, n) + + @pytest.mark.parametrize("dtype", [int, float]) + def test_slice_locs_dup_numeric(self, dtype): + index = Index(np.array([10, 12, 12, 14], dtype=dtype)) + assert index.slice_locs(12, 12) == (1, 3) + assert index.slice_locs(11, 13) == (1, 3) + + index2 = index[::-1] + assert index2.slice_locs(12, 12) == (1, 3) + assert index2.slice_locs(13, 11) == (1, 3) + + def test_slice_locs_na(self): + index = Index([np.nan, 1, 2]) + assert index.slice_locs(1) == (1, 3) + assert index.slice_locs(np.nan) == (0, 3) + + index = Index([0, np.nan, np.nan, 1, 2]) + assert index.slice_locs(np.nan) == (1, 5) + + def test_slice_locs_na_raises(self): + index = Index([np.nan, 1, 2]) + with pytest.raises(KeyError, match=""): + index.slice_locs(start=1.5) + + with pytest.raises(KeyError, match=""): + index.slice_locs(end=1.5) + + +class TestGetSliceBounds: + @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) + def test_get_slice_bounds_within(self, side, expected): + index = Index(range(6)) + result = index.get_slice_bound(4, side=side) + assert result == expected + + @pytest.mark.parametrize("side", ["left", "right"]) + @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) + def test_get_slice_bounds_outside(self, side, expected, bound): + index = Index(range(6)) + result = index.get_slice_bound(bound, side=side) + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_join.py new file mode 100644 index 00000000..93ff6238 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_join.py @@ -0,0 +1,380 @@ +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.indexes.api import Index + + +class TestJoinInt64Index: + def test_join_non_unique(self): + left = Index([4, 4, 3, 3]) + + joined, lidx, ridx = left.join(left, return_indexers=True) + + exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) + tm.assert_index_equal(joined, exp_joined) + + exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(lidx, exp_lidx) + + exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + def test_join_inner(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = Index([2, 12], dtype=np.int64) + elidx = np.array([1, 6], dtype=np.intp) + eridx = np.array([4, 1], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="inner", return_indexers=True) + + res2 = index.intersection(other_mono) + tm.assert_index_equal(res, res2) + + elidx = np.array([1, 6], dtype=np.intp) + eridx = np.array([1, 4], dtype=np.intp) + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + eres = index + eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="left", return_indexers=True) + eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # non-unique + idx = Index([1, 1, 2, 5]) + idx2 = Index([1, 2, 5, 7, 9]) + res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) + eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 + eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + res, lidx, ridx = index.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) + + assert isinstance(other, Index) and other.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # monotonic + res, lidx, ridx = index.join(other_mono, how="right", return_indexers=True) + eres = other_mono + elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) + assert isinstance(other, Index) and other.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # non-unique + idx = Index([1, 1, 2, 5]) + idx2 = Index([1, 2, 5, 7, 9]) + res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) + eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 + elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_non_int_index(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([3, 6, 7, 8, 10], dtype=object) + + outer = index.join(other, how="outer") + outer2 = other.join(index, how="outer") + expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index.join(other, how="inner") + inner2 = other.join(index, how="inner") + expected = Index([6, 8, 10]) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index.join(other, how="left") + tm.assert_index_equal(left, index.astype(object)) + + left2 = other.join(index, how="left") + tm.assert_index_equal(left2, other) + + right = index.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index, how="right") + tm.assert_index_equal(right2, index.astype(object)) + + def test_join_outer(self): + index = Index(range(0, 20, 2), dtype=np.int64) + other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64) + other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64) + + # not monotonic + # guarantee of sortedness + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25], dtype=np.int64) + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) + eridx = np.array( + [-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp + ) + + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="outer", return_indexers=True) + noidx_res = index.join(other_mono, how="outer") + tm.assert_index_equal(res, noidx_res) + + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) + eridx = np.array( + [-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp + ) + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + +class TestJoinUInt64Index: + @pytest.fixture + def index_large(self): + # large values used in TestUInt64Index where no compat needed with int64/float64 + large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] + return Index(large, dtype=np.uint64) + + def test_join_inner(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = Index(2**63 + np.array([10, 25], dtype="uint64")) + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([5, 2], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="inner", return_indexers=True + ) + + res2 = index_large.intersection(other_mono) + tm.assert_index_equal(res, res2) + + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([3, 5], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="left", return_indexers=True) + eres = index_large + eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join(other_mono, how="left", return_indexers=True) + eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # non-unique + idx = Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64")) + idx2 = Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) + res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) + + # 1 is in idx2, so it should be x2 + eres = Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) + eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) + + tm.assert_numpy_array_equal(lidx, elidx) + assert isinstance(other, Index) and other.dtype == np.uint64 + tm.assert_index_equal(res, eres) + assert ridx is None + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="right", return_indexers=True + ) + eres = other_mono + elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) + + assert isinstance(other, Index) and other.dtype == np.uint64 + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_index_equal(res, eres) + assert ridx is None + + # non-unique + idx = Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64")) + idx2 = Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) + res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) + + # 1 is in idx2, so it should be x2 + eres = Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) + elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_non_int_index(self, index_large): + other = Index( + 2**63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object + ) + + outer = index_large.join(other, how="outer") + outer2 = other.join(index_large, how="outer") + expected = Index( + 2**63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64") + ) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index_large.join(other, how="inner") + inner2 = other.join(index_large, how="inner") + expected = Index(2**63 + np.array([10, 20], dtype="uint64")) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index_large.join(other, how="left") + tm.assert_index_equal(left, index_large.astype(object)) + + left2 = other.join(index_large, how="left") + tm.assert_index_equal(left2, other) + + right = index_large.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index_large, how="right") + tm.assert_index_equal(right2, index_large.astype(object)) + + def test_join_outer(self, index_large): + other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64")) + + # not monotonic + # guarantee of sortedness + res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True) + noidx_res = index_large.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = Index( + 2**63 + np.array([0, 1, 2, 7, 10, 12, 15, 20, 25], dtype="uint64") + ) + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="outer", return_indexers=True + ) + noidx_res = index_large.join(other_mono, how="outer") + tm.assert_index_equal(res, noidx_res) + + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.uint64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_numeric.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_numeric.py new file mode 100644 index 00000000..8cd29580 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_numeric.py @@ -0,0 +1,529 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + Series, +) +import pandas._testing as tm + + +class TestFloatNumericIndex: + @pytest.fixture(params=[np.float64, np.float32]) + def dtype(self, request): + return request.param + + @pytest.fixture + def simple_index(self, dtype): + values = np.arange(5, dtype=dtype) + return Index(values) + + @pytest.fixture( + params=[ + [1.5, 2, 3, 4, 5], + [0.0, 2.5, 5.0, 7.5, 10.0], + [5, 4, 3, 2, 1.5], + [10.0, 7.5, 5.0, 2.5, 0.0], + ], + ids=["mixed", "float", "mixed_dec", "float_dec"], + ) + def index(self, request, dtype): + return Index(request.param, dtype=dtype) + + @pytest.fixture + def mixed_index(self, dtype): + return Index([1.5, 2, 3, 4, 5], dtype=dtype) + + @pytest.fixture + def float_index(self, dtype): + return Index([0.0, 2.5, 5.0, 7.5, 10.0], dtype=dtype) + + def test_repr_roundtrip(self, index): + tm.assert_index_equal(eval(repr(index)), index, exact=True) + + def check_coerce(self, a, b, is_float_index=True): + assert a.equals(b) + tm.assert_index_equal(a, b, exact=False) + if is_float_index: + assert isinstance(b, Index) + else: + assert type(b) is Index + + def test_constructor_from_list_no_dtype(self): + index = Index([1.5, 2.5, 3.5]) + assert index.dtype == np.float64 + + def test_constructor(self, dtype): + index_cls = Index + + # explicit construction + index = index_cls([1, 2, 3, 4, 5], dtype=dtype) + + assert isinstance(index, index_cls) + assert index.dtype == dtype + + expected = np.array([1, 2, 3, 4, 5], dtype=dtype) + tm.assert_numpy_array_equal(index.values, expected) + + index = index_cls(np.array([1, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + # nan handling + result = index_cls([np.nan, np.nan], dtype=dtype) + assert pd.isna(result.values).all() + + result = index_cls(np.array([np.nan]), dtype=dtype) + assert pd.isna(result.values).all() + + def test_constructor_invalid(self): + index_cls = Index + cls_name = index_cls.__name__ + # invalid + msg = ( + rf"{cls_name}\(\.\.\.\) must be called with a collection of " + r"some kind, 0\.0 was passed" + ) + with pytest.raises(TypeError, match=msg): + index_cls(0.0) + + def test_constructor_coerce(self, mixed_index, float_index): + self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) + self.check_coerce(float_index, Index(np.arange(5) * 2.5)) + + result = Index(np.array(np.arange(5) * 2.5, dtype=object)) + assert result.dtype == object # as of 2.0 to match Series + self.check_coerce(float_index, result.astype("float64")) + + def test_constructor_explicit(self, mixed_index, float_index): + # these don't auto convert + self.check_coerce( + float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False + ) + self.check_coerce( + mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False + ) + + def test_type_coercion_fail(self, any_int_numpy_dtype): + # see gh-15832 + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + Index([1, 2, 3.5], dtype=any_int_numpy_dtype) + + def test_equals_numeric(self): + index_cls = Index + + idx = index_cls([1.0, 2.0]) + assert idx.equals(idx) + assert idx.identical(idx) + + idx2 = index_cls([1.0, 2.0]) + assert idx.equals(idx2) + + idx = index_cls([1.0, np.nan]) + assert idx.equals(idx) + assert idx.identical(idx) + + idx2 = index_cls([1.0, np.nan]) + assert idx.equals(idx2) + + @pytest.mark.parametrize( + "other", + ( + Index([1, 2], dtype=np.int64), + Index([1.0, 2.0], dtype=object), + Index([1, 2], dtype=object), + ), + ) + def test_equals_numeric_other_index_type(self, other): + idx = Index([1.0, 2.0]) + assert idx.equals(other) + assert other.equals(idx) + + @pytest.mark.parametrize( + "vals", + [ + pd.date_range("2016-01-01", periods=3), + pd.timedelta_range("1 Day", periods=3), + ], + ) + def test_lookups_datetimelike_values(self, vals, dtype): + # If we have datetime64 or timedelta64 values, make sure they are + # wrapped correctly GH#31163 + ser = Series(vals, index=range(3, 6)) + ser.index = ser.index.astype(dtype) + + expected = vals[1] + + result = ser[4.0] + assert isinstance(result, type(expected)) and result == expected + result = ser[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.loc[4.0] + assert isinstance(result, type(expected)) and result == expected + result = ser.loc[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.at[4.0] + assert isinstance(result, type(expected)) and result == expected + # GH#31329 .at[4] should cast to 4.0, matching .loc behavior + result = ser.at[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.iloc[1] + assert isinstance(result, type(expected)) and result == expected + + result = ser.iat[1] + assert isinstance(result, type(expected)) and result == expected + + def test_doesnt_contain_all_the_things(self): + idx = Index([np.nan]) + assert not idx.isin([0]).item() + assert not idx.isin([1]).item() + assert idx.isin([np.nan]).item() + + def test_nan_multiple_containment(self): + index_cls = Index + + idx = index_cls([1.0, np.nan]) + tm.assert_numpy_array_equal(idx.isin([1.0]), np.array([True, False])) + tm.assert_numpy_array_equal(idx.isin([2.0, np.pi]), np.array([False, False])) + tm.assert_numpy_array_equal(idx.isin([np.nan]), np.array([False, True])) + tm.assert_numpy_array_equal(idx.isin([1.0, np.nan]), np.array([True, True])) + idx = index_cls([1.0, 2.0]) + tm.assert_numpy_array_equal(idx.isin([np.nan]), np.array([False, False])) + + def test_fillna_float64(self): + index_cls = Index + # GH 11343 + idx = Index([1.0, np.nan, 3.0], dtype=float, name="x") + # can't downcast + exp = Index([1.0, 0.1, 3.0], name="x") + tm.assert_index_equal(idx.fillna(0.1), exp, exact=True) + + # downcast + exp = index_cls([1.0, 2.0, 3.0], name="x") + tm.assert_index_equal(idx.fillna(2), exp) + + # object + exp = Index([1.0, "obj", 3.0], name="x") + tm.assert_index_equal(idx.fillna("obj"), exp, exact=True) + + def test_logical_compat(self, simple_index): + idx = simple_index + assert idx.all() == idx.values.all() + assert idx.any() == idx.values.any() + + assert idx.all() == idx.to_series().all() + assert idx.any() == idx.to_series().any() + + +class TestNumericInt: + @pytest.fixture(params=[np.int64, np.int32, np.int16, np.int8, np.uint64]) + def dtype(self, request): + return request.param + + @pytest.fixture + def simple_index(self, dtype): + return Index(range(0, 20, 2), dtype=dtype) + + def test_is_monotonic(self): + index_cls = Index + + index = index_cls([1, 2, 3, 4]) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index._is_strictly_monotonic_increasing is True + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_decreasing is False + + index = index_cls([4, 3, 2, 1]) + assert index.is_monotonic_increasing is False + assert index._is_strictly_monotonic_increasing is False + assert index._is_strictly_monotonic_decreasing is True + + index = index_cls([1]) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is True + + def test_is_strictly_monotonic(self): + index_cls = Index + + index = index_cls([1, 1, 2, 3]) + assert index.is_monotonic_increasing is True + assert index._is_strictly_monotonic_increasing is False + + index = index_cls([3, 2, 1, 1]) + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_decreasing is False + + index = index_cls([1, 1]) + assert index.is_monotonic_increasing + assert index.is_monotonic_decreasing + assert not index._is_strictly_monotonic_increasing + assert not index._is_strictly_monotonic_decreasing + + def test_logical_compat(self, simple_index): + idx = simple_index + assert idx.all() == idx.values.all() + assert idx.any() == idx.values.any() + + def test_identical(self, simple_index, dtype): + index = simple_index + + idx = Index(index.copy()) + assert idx.identical(index) + + same_values_different_type = Index(idx, dtype=object) + assert not idx.identical(same_values_different_type) + + idx = index.astype(dtype=object) + idx = idx.rename("foo") + same_values = Index(idx, dtype=object) + assert same_values.identical(idx) + + assert not idx.identical(index) + assert Index(same_values, name="foo", dtype=object).identical(idx) + + assert not index.astype(dtype=object).identical(index.astype(dtype=dtype)) + + def test_cant_or_shouldnt_cast(self, dtype): + msg = r"invalid literal for int\(\) with base 10: 'foo'" + + # can't + data = ["foo", "bar", "baz"] + with pytest.raises(ValueError, match=msg): + Index(data, dtype=dtype) + + def test_view_index(self, simple_index): + index = simple_index + index.view(Index) + + def test_prevent_casting(self, simple_index): + index = simple_index + result = index.astype("O") + assert result.dtype == np.object_ + + +class TestIntNumericIndex: + @pytest.fixture(params=[np.int64, np.int32, np.int16, np.int8]) + def dtype(self, request): + return request.param + + def test_constructor_from_list_no_dtype(self): + index = Index([1, 2, 3]) + assert index.dtype == np.int64 + + def test_constructor(self, dtype): + index_cls = Index + + # scalar raise Exception + msg = ( + rf"{index_cls.__name__}\(\.\.\.\) must be called with a collection of some " + "kind, 5 was passed" + ) + with pytest.raises(TypeError, match=msg): + index_cls(5) + + # copy + # pass list, coerce fine + index = index_cls([-5, 0, 1, 2], dtype=dtype) + arr = index.values.copy() + new_index = index_cls(arr, copy=True) + tm.assert_index_equal(new_index, index, exact=True) + val = arr[0] + 3000 + + # this should not change index + arr[0] = val + assert new_index[0] != val + + if dtype == np.int64: + # pass list, coerce fine + index = index_cls([-5, 0, 1, 2], dtype=dtype) + expected = Index([-5, 0, 1, 2], dtype=dtype) + tm.assert_index_equal(index, expected) + + # from iterable + index = index_cls(iter([-5, 0, 1, 2]), dtype=dtype) + expected = index_cls([-5, 0, 1, 2], dtype=dtype) + tm.assert_index_equal(index, expected, exact=True) + + # interpret list-like + expected = index_cls([5, 0], dtype=dtype) + for cls in [Index, index_cls]: + for idx in [ + cls([5, 0], dtype=dtype), + cls(np.array([5, 0]), dtype=dtype), + cls(Series([5, 0]), dtype=dtype), + ]: + tm.assert_index_equal(idx, expected) + + def test_constructor_corner(self, dtype): + index_cls = Index + + arr = np.array([1, 2, 3, 4], dtype=object) + + index = index_cls(arr, dtype=dtype) + assert index.values.dtype == index.dtype + if dtype == np.int64: + without_dtype = Index(arr) + # as of 2.0 we do not infer a dtype when we get an object-dtype + # ndarray of numbers, matching Series behavior + assert without_dtype.dtype == object + + tm.assert_index_equal(index, without_dtype.astype(np.int64)) + + # preventing casting + arr = np.array([1, "2", 3, "4"], dtype=object) + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + index_cls(arr, dtype=dtype) + + def test_constructor_coercion_signed_to_unsigned( + self, + any_unsigned_int_numpy_dtype, + ): + # see gh-15832 + msg = "Trying to coerce negative values to unsigned integers" + + with pytest.raises(OverflowError, match=msg): + Index([-1], dtype=any_unsigned_int_numpy_dtype) + + def test_constructor_np_signed(self, any_signed_int_numpy_dtype): + # GH#47475 + scalar = np.dtype(any_signed_int_numpy_dtype).type(1) + result = Index([scalar]) + expected = Index([1], dtype=any_signed_int_numpy_dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_constructor_np_unsigned(self, any_unsigned_int_numpy_dtype): + # GH#47475 + scalar = np.dtype(any_unsigned_int_numpy_dtype).type(1) + result = Index([scalar]) + expected = Index([1], dtype=any_unsigned_int_numpy_dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_coerce_list(self): + # coerce things + arr = Index([1, 2, 3, 4]) + assert isinstance(arr, Index) + + # but not if explicit dtype passed + arr = Index([1, 2, 3, 4], dtype=object) + assert type(arr) is Index + + +class TestFloat16Index: + # float 16 indexes not supported + # GH 49535 + def test_constructor(self): + index_cls = Index + dtype = np.float16 + + msg = "float16 indexes are not supported" + + # explicit construction + with pytest.raises(NotImplementedError, match=msg): + index_cls([1, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1, 2, 3, 4, 5]), dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + + # nan handling + with pytest.raises(NotImplementedError, match=msg): + index_cls([np.nan, np.nan], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([np.nan]), dtype=dtype) + + +@pytest.mark.parametrize( + "box", + [list, lambda x: np.array(x, dtype=object), lambda x: Index(x, dtype=object)], +) +def test_uint_index_does_not_convert_to_float64(box): + # https://github.com/pandas-dev/pandas/issues/28279 + # https://github.com/pandas-dev/pandas/issues/28023 + series = Series( + [0, 1, 2, 3, 4, 5], + index=[ + 7606741985629028552, + 17876870360202815256, + 17876870360202815256, + 13106359306506049338, + 8991270399732411471, + 8991270399732411472, + ], + ) + + result = series.loc[box([7606741985629028552, 17876870360202815256])] + + expected = Index( + [7606741985629028552, 17876870360202815256, 17876870360202815256], + dtype="uint64", + ) + tm.assert_index_equal(result.index, expected) + + tm.assert_equal(result, series.iloc[:3]) + + +def test_float64_index_equals(): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = Index([1.0, 2, 3]) + string_index = Index(["1", "2", "3"]) + + result = float_index.equals(string_index) + assert result is False + + result = string_index.equals(float_index) + assert result is False + + +def test_map_dtype_inference_unsigned_to_signed(): + # GH#44609 cases where we don't retain dtype + idx = Index([1, 2, 3], dtype=np.uint64) + result = idx.map(lambda x: -x) + expected = Index([-1, -2, -3], dtype=np.int64) + tm.assert_index_equal(result, expected) + + +def test_map_dtype_inference_overflows(): + # GH#44609 case where we have to upcast + idx = Index(np.array([1, 2, 3], dtype=np.int8)) + result = idx.map(lambda x: x * 1000) + # TODO: we could plausibly try to infer down to int16 here + expected = Index([1000, 2000, 3000], dtype=np.int64) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_setops.py new file mode 100644 index 00000000..d3789f24 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/numeric/test_setops.py @@ -0,0 +1,165 @@ +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.indexes.api import ( + Index, + RangeIndex, +) + + +@pytest.fixture +def index_large(): + # large values used in TestUInt64Index where no compat needed with int64/float64 + large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25] + return Index(large, dtype=np.uint64) + + +class TestSetOps: + @pytest.mark.parametrize("dtype", ["f8", "u8", "i8"]) + def test_union_non_numeric(self, dtype): + # corner case, non-numeric + index = Index(np.arange(5, dtype=dtype), dtype=dtype) + assert index.dtype == dtype + + other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) + result = index.union(other) + expected = Index(np.concatenate((index, other))) + tm.assert_index_equal(result, expected) + + result = other.union(index) + expected = Index(np.concatenate((other, index))) + tm.assert_index_equal(result, expected) + + def test_intersection(self): + index = Index(range(5), dtype=np.int64) + + other = Index([1, 2, 3, 4, 5]) + result = index.intersection(other) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected) + + result = other.intersection(index) + expected = Index( + np.sort(np.asarray(np.intersect1d(index.values, other.values))) + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["int64", "uint64"]) + def test_int_float_union_dtype(self, dtype): + # https://github.com/pandas-dev/pandas/issues/26778 + # [u]int | float -> float + index = Index([0, 2, 3], dtype=dtype) + other = Index([0.5, 1.5], dtype=np.float64) + expected = Index([0.0, 0.5, 1.5, 2.0, 3.0], dtype=np.float64) + result = index.union(other) + tm.assert_index_equal(result, expected) + + result = other.union(index) + tm.assert_index_equal(result, expected) + + def test_range_float_union_dtype(self): + # https://github.com/pandas-dev/pandas/issues/26778 + index = RangeIndex(start=0, stop=3) + other = Index([0.5, 1.5], dtype=np.float64) + result = index.union(other) + expected = Index([0.0, 0.5, 1, 1.5, 2.0], dtype=np.float64) + tm.assert_index_equal(result, expected) + + result = other.union(index) + tm.assert_index_equal(result, expected) + + def test_range_uint64_union_dtype(self): + # https://github.com/pandas-dev/pandas/issues/26778 + index = RangeIndex(start=0, stop=3) + other = Index([0, 10], dtype=np.uint64) + result = index.union(other) + expected = Index([0, 1, 2, 10], dtype=object) + tm.assert_index_equal(result, expected) + + result = other.union(index) + tm.assert_index_equal(result, expected) + + def test_float64_index_difference(self): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = Index([1.0, 2, 3]) + string_index = Index(["1", "2", "3"]) + + result = float_index.difference(string_index) + tm.assert_index_equal(result, float_index) + + result = string_index.difference(float_index) + tm.assert_index_equal(result, string_index) + + def test_intersection_uint64_outside_int64_range(self, index_large): + other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20]) + result = index_large.intersection(other) + expected = Index(np.sort(np.intersect1d(index_large.values, other.values))) + tm.assert_index_equal(result, expected) + + result = other.intersection(index_large) + expected = Index( + np.sort(np.asarray(np.intersect1d(index_large.values, other.values))) + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "index2,keeps_name", + [ + (Index([4, 7, 6, 5, 3], name="index"), True), + (Index([4, 7, 6, 5, 3], name="other"), False), + ], + ) + def test_intersection_monotonic(self, index2, keeps_name, sort): + index1 = Index([5, 3, 2, 4, 1], name="index") + expected = Index([5, 3, 4]) + + if keeps_name: + expected.name = "index" + + result = index1.intersection(index2, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + def test_symmetric_difference(self, sort): + # smoke + index1 = Index([5, 2, 3, 4], name="index1") + index2 = Index([2, 3, 4, 1]) + result = index1.symmetric_difference(index2, sort=sort) + expected = Index([5, 1]) + assert tm.equalContents(result, expected) + assert result.name is None + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + +class TestSetOpsSort: + @pytest.mark.parametrize("slice_", [slice(None), slice(0)]) + def test_union_sort_other_special(self, slice_): + # https://github.com/pandas-dev/pandas/issues/24959 + + idx = Index([1, 0, 2]) + # default, sort=None + other = idx[slice_] + tm.assert_index_equal(idx.union(other), idx) + tm.assert_index_equal(other.union(idx), idx) + + # sort=False + tm.assert_index_equal(idx.union(other, sort=False), idx) + + @pytest.mark.parametrize("slice_", [slice(None), slice(0)]) + def test_union_sort_special_true(self, slice_): + idx = Index([1, 0, 2]) + # default, sort=None + other = idx[slice_] + + result = idx.union(other, sort=True) + expected = Index([0, 1, 2]) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_astype.py new file mode 100644 index 00000000..273b39b5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_astype.py @@ -0,0 +1,33 @@ +import pytest + +from pandas import ( + Index, + NaT, + Series, +) +import pandas._testing as tm + + +def test_astype_str_from_bytes(): + # https://github.com/pandas-dev/pandas/issues/38607 + # GH#49658 pre-2.0 Index called .values.astype(str) here, which effectively + # did a .decode() on the bytes object. In 2.0 we go through + # ensure_string_array which does f"{val}" + idx = Index(["あ", b"a"], dtype="object") + result = idx.astype(str) + expected = Index(["あ", "a"], dtype="object") + tm.assert_index_equal(result, expected) + + # while we're here, check that Series.astype behaves the same + result = Series(idx).astype(str) + expected = Series(expected) + tm.assert_series_equal(result, expected) + + +def test_astype_invalid_nas_to_tdt64_raises(): + # GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT + idx = Index([NaT.asm8] * 2, dtype=object) + + msg = r"Invalid type for timedelta scalar: " + with pytest.raises(TypeError, match=msg): + idx.astype("m8[ns]") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_indexing.py new file mode 100644 index 00000000..93d46ebd --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/object/test_indexing.py @@ -0,0 +1,208 @@ +from decimal import Decimal + +import numpy as np +import pytest + +from pandas._libs.missing import is_matching_na +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import Index +import pandas._testing as tm + + +class TestGetIndexer: + @pytest.mark.parametrize( + "method,expected", + [ + ("pad", np.array([-1, 0, 1, 1], dtype=np.intp)), + ("backfill", np.array([0, 0, 1, -1], dtype=np.intp)), + ], + ) + def test_get_indexer_strings(self, method, expected): + index = Index(["b", "c"]) + actual = index.get_indexer(["a", "b", "c", "d"], method=method) + + tm.assert_numpy_array_equal(actual, expected) + + def test_get_indexer_strings_raises(self): + index = Index(["b", "c"]) + + msg = r"unsupported operand type\(s\) for -: 'str' and 'str'" + with pytest.raises(TypeError, match=msg): + index.get_indexer(["a", "b", "c", "d"], method="nearest") + + with pytest.raises(TypeError, match=msg): + index.get_indexer(["a", "b", "c", "d"], method="pad", tolerance=2) + + with pytest.raises(TypeError, match=msg): + index.get_indexer( + ["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2] + ) + + def test_get_indexer_with_NA_values( + self, unique_nulls_fixture, unique_nulls_fixture2 + ): + # GH#22332 + # check pairwise, that no pair of na values + # is mangled + if unique_nulls_fixture is unique_nulls_fixture2: + return # skip it, values are not unique + arr = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object) + index = Index(arr, dtype=object) + result = index.get_indexer( + [unique_nulls_fixture, unique_nulls_fixture2, "Unknown"] + ) + expected = np.array([0, 1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + +class TestGetIndexerNonUnique: + def test_get_indexer_non_unique_nas(self, nulls_fixture): + # even though this isn't non-unique, this should still work + index = Index(["a", "b", nulls_fixture]) + indexer, missing = index.get_indexer_non_unique([nulls_fixture]) + + expected_indexer = np.array([2], dtype=np.intp) + expected_missing = np.array([], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + # actually non-unique + index = Index(["a", nulls_fixture, "b", nulls_fixture]) + indexer, missing = index.get_indexer_non_unique([nulls_fixture]) + + expected_indexer = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + # matching-but-not-identical nans + if is_matching_na(nulls_fixture, float("NaN")): + index = Index(["a", float("NaN"), "b", float("NaN")]) + match_but_not_identical = True + elif is_matching_na(nulls_fixture, Decimal("NaN")): + index = Index(["a", Decimal("NaN"), "b", Decimal("NaN")]) + match_but_not_identical = True + else: + match_but_not_identical = False + + if match_but_not_identical: + indexer, missing = index.get_indexer_non_unique([nulls_fixture]) + + expected_indexer = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning") + def test_get_indexer_non_unique_np_nats(self, np_nat_fixture, np_nat_fixture2): + expected_missing = np.array([], dtype=np.intp) + # matching-but-not-identical nats + if is_matching_na(np_nat_fixture, np_nat_fixture2): + # ensure nats are different objects + index = Index( + np.array( + ["2021-10-02", np_nat_fixture.copy(), np_nat_fixture2.copy()], + dtype=object, + ), + dtype=object, + ) + # pass as index to prevent target from being casted to DatetimeIndex + indexer, missing = index.get_indexer_non_unique( + Index([np_nat_fixture], dtype=object) + ) + expected_indexer = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + # dt64nat vs td64nat + else: + try: + np_nat_fixture == np_nat_fixture2 + except (TypeError, OverflowError): + # Numpy will raise on uncomparable types, like + # np.datetime64('NaT', 'Y') and np.datetime64('NaT', 'ps') + # https://github.com/numpy/numpy/issues/22762 + return + index = Index( + np.array( + [ + "2021-10-02", + np_nat_fixture, + np_nat_fixture2, + np_nat_fixture, + np_nat_fixture2, + ], + dtype=object, + ), + dtype=object, + ) + # pass as index to prevent target from being casted to DatetimeIndex + indexer, missing = index.get_indexer_non_unique( + Index([np_nat_fixture], dtype=object) + ) + expected_indexer = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + +class TestSliceLocs: + @pytest.mark.parametrize( + "dtype", + [ + "object", + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ], + ) + @pytest.mark.parametrize( + "in_slice,expected", + [ + # error: Slice index must be an integer or None + (pd.IndexSlice[::-1], "yxdcb"), + (pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc] + (pd.IndexSlice["b"::-1], "b"), # type: ignore[misc] + (pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc] + (pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc] + # absent labels + (pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc] + (pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc] + (pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc] + (pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc] + (pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc] + (pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc] + (pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc] + ], + ) + def test_slice_locs_negative_step(self, in_slice, expected, dtype): + index = Index(list("bcdxy"), dtype=dtype) + + s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step) + result = index[s_start : s_stop : in_slice.step] + expected = Index(list(expected), dtype=dtype) + tm.assert_index_equal(result, expected) + + @td.skip_if_no("pyarrow") + def test_slice_locs_negative_step_oob(self): + index = Index(list("bcdxy"), dtype="string[pyarrow_numpy]") + + result = index[-10:5:1] + tm.assert_index_equal(result, index) + + result = index[4:-10:-1] + expected = Index(list("yxdcb"), dtype="string[pyarrow_numpy]") + tm.assert_index_equal(result, expected) + + def test_slice_locs_dup(self): + index = Index(["a", "a", "b", "c", "d", "d"]) + assert index.slice_locs("a", "d") == (0, 6) + assert index.slice_locs(end="d") == (0, 6) + assert index.slice_locs("a", "c") == (0, 4) + assert index.slice_locs("b", "d") == (2, 6) + + index2 = index[::-1] + assert index2.slice_locs("d", "a") == (0, 6) + assert index2.slice_locs(end="a") == (0, 6) + assert index2.slice_locs("d", "b") == (0, 4) + assert index2.slice_locs("c", "a") == (2, 6) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_asfreq.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_asfreq.py new file mode 100644 index 00000000..4f5cfbad --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_asfreq.py @@ -0,0 +1,138 @@ +import pytest + +from pandas import ( + PeriodIndex, + Series, + period_range, +) +import pandas._testing as tm + + +class TestPeriodIndex: + def test_asfreq(self): + pi1 = period_range(freq="A", start="1/1/2001", end="1/1/2001") + pi2 = period_range(freq="Q", start="1/1/2001", end="1/1/2001") + pi3 = period_range(freq="M", start="1/1/2001", end="1/1/2001") + pi4 = period_range(freq="D", start="1/1/2001", end="1/1/2001") + pi5 = period_range(freq="H", start="1/1/2001", end="1/1/2001 00:00") + pi6 = period_range(freq="Min", start="1/1/2001", end="1/1/2001 00:00") + pi7 = period_range(freq="S", start="1/1/2001", end="1/1/2001 00:00:00") + + assert pi1.asfreq("Q", "S") == pi2 + assert pi1.asfreq("Q", "s") == pi2 + assert pi1.asfreq("M", "start") == pi3 + assert pi1.asfreq("D", "StarT") == pi4 + assert pi1.asfreq("H", "beGIN") == pi5 + assert pi1.asfreq("Min", "S") == pi6 + assert pi1.asfreq("S", "S") == pi7 + + assert pi2.asfreq("A", "S") == pi1 + assert pi2.asfreq("M", "S") == pi3 + assert pi2.asfreq("D", "S") == pi4 + assert pi2.asfreq("H", "S") == pi5 + assert pi2.asfreq("Min", "S") == pi6 + assert pi2.asfreq("S", "S") == pi7 + + assert pi3.asfreq("A", "S") == pi1 + assert pi3.asfreq("Q", "S") == pi2 + assert pi3.asfreq("D", "S") == pi4 + assert pi3.asfreq("H", "S") == pi5 + assert pi3.asfreq("Min", "S") == pi6 + assert pi3.asfreq("S", "S") == pi7 + + assert pi4.asfreq("A", "S") == pi1 + assert pi4.asfreq("Q", "S") == pi2 + assert pi4.asfreq("M", "S") == pi3 + assert pi4.asfreq("H", "S") == pi5 + assert pi4.asfreq("Min", "S") == pi6 + assert pi4.asfreq("S", "S") == pi7 + + assert pi5.asfreq("A", "S") == pi1 + assert pi5.asfreq("Q", "S") == pi2 + assert pi5.asfreq("M", "S") == pi3 + assert pi5.asfreq("D", "S") == pi4 + assert pi5.asfreq("Min", "S") == pi6 + assert pi5.asfreq("S", "S") == pi7 + + assert pi6.asfreq("A", "S") == pi1 + assert pi6.asfreq("Q", "S") == pi2 + assert pi6.asfreq("M", "S") == pi3 + assert pi6.asfreq("D", "S") == pi4 + assert pi6.asfreq("H", "S") == pi5 + assert pi6.asfreq("S", "S") == pi7 + + assert pi7.asfreq("A", "S") == pi1 + assert pi7.asfreq("Q", "S") == pi2 + assert pi7.asfreq("M", "S") == pi3 + assert pi7.asfreq("D", "S") == pi4 + assert pi7.asfreq("H", "S") == pi5 + assert pi7.asfreq("Min", "S") == pi6 + + msg = "How must be one of S or E" + with pytest.raises(ValueError, match=msg): + pi7.asfreq("T", "foo") + result1 = pi1.asfreq("3M") + result2 = pi1.asfreq("M") + expected = period_range(freq="M", start="2001-12", end="2001-12") + tm.assert_numpy_array_equal(result1.asi8, expected.asi8) + assert result1.freqstr == "3M" + tm.assert_numpy_array_equal(result2.asi8, expected.asi8) + assert result2.freqstr == "M" + + def test_asfreq_nat(self): + idx = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-04"], freq="M") + result = idx.asfreq(freq="Q") + expected = PeriodIndex(["2011Q1", "2011Q1", "NaT", "2011Q2"], freq="Q") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("freq", ["D", "3D"]) + def test_asfreq_mult_pi(self, freq): + pi = PeriodIndex(["2001-01", "2001-02", "NaT", "2001-03"], freq="2M") + + result = pi.asfreq(freq) + exp = PeriodIndex(["2001-02-28", "2001-03-31", "NaT", "2001-04-30"], freq=freq) + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + result = pi.asfreq(freq, how="S") + exp = PeriodIndex(["2001-01-01", "2001-02-01", "NaT", "2001-03-01"], freq=freq) + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + def test_asfreq_combined_pi(self): + pi = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="H") + exp = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="25H") + for freq, how in zip(["1D1H", "1H1D"], ["S", "E"]): + result = pi.asfreq(freq, how=how) + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + for freq in ["1D1H", "1H1D"]: + pi = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq) + result = pi.asfreq("H") + exp = PeriodIndex(["2001-01-02 00:00", "2001-01-03 02:00", "NaT"], freq="H") + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + pi = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq) + result = pi.asfreq("H", how="S") + exp = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="H") + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + def test_astype_asfreq(self): + pi1 = PeriodIndex(["2011-01-01", "2011-02-01", "2011-03-01"], freq="D") + exp = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M") + tm.assert_index_equal(pi1.asfreq("M"), exp) + tm.assert_index_equal(pi1.astype("period[M]"), exp) + + exp = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="3M") + tm.assert_index_equal(pi1.asfreq("3M"), exp) + tm.assert_index_equal(pi1.astype("period[3M]"), exp) + + def test_asfreq_with_different_n(self): + ser = Series([1, 2], index=PeriodIndex(["2020-01", "2020-03"], freq="2M")) + result = ser.asfreq("M") + + excepted = Series([1, 2], index=PeriodIndex(["2020-02", "2020-04"], freq="M")) + tm.assert_series_equal(result, excepted) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_astype.py new file mode 100644 index 00000000..e54cd73a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_astype.py @@ -0,0 +1,148 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + DatetimeIndex, + Index, + NaT, + Period, + PeriodIndex, + period_range, +) +import pandas._testing as tm + + +class TestPeriodIndexAsType: + @pytest.mark.parametrize("dtype", [float, "timedelta64", "timedelta64[ns]"]) + def test_astype_raises(self, dtype): + # GH#13149, GH#13209 + idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.nan], freq="D") + msg = "Cannot cast PeriodIndex to dtype" + with pytest.raises(TypeError, match=msg): + idx.astype(dtype) + + def test_astype_conversion(self): + # GH#13149, GH#13209 + idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.nan], freq="D", name="idx") + + result = idx.astype(object) + expected = Index( + [Period("2016-05-16", freq="D")] + [Period(NaT, freq="D")] * 3, + dtype="object", + name="idx", + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(np.int64) + expected = Index( + [16937] + [-9223372036854775808] * 3, dtype=np.int64, name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(str) + expected = Index([str(x) for x in idx], name="idx") + tm.assert_index_equal(result, expected) + + idx = period_range("1990", "2009", freq="A", name="idx") + result = idx.astype("i8") + tm.assert_index_equal(result, Index(idx.asi8, name="idx")) + tm.assert_numpy_array_equal(result.values, idx.asi8) + + def test_astype_uint(self): + arr = period_range("2000", periods=2, name="idx") + + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint64") + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint32") + + def test_astype_object(self): + idx = PeriodIndex([], freq="M") + + exp = np.array([], dtype=object) + tm.assert_numpy_array_equal(idx.astype(object).values, exp) + tm.assert_numpy_array_equal(idx._mpl_repr(), exp) + + idx = PeriodIndex(["2011-01", NaT], freq="M") + + exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object) + tm.assert_numpy_array_equal(idx.astype(object).values, exp) + tm.assert_numpy_array_equal(idx._mpl_repr(), exp) + + exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object) + idx = PeriodIndex(["2011-01-01", NaT], freq="D") + tm.assert_numpy_array_equal(idx.astype(object).values, exp) + tm.assert_numpy_array_equal(idx._mpl_repr(), exp) + + # TODO: de-duplicate this version (from test_ops) with the one above + # (from test_period) + def test_astype_object2(self): + idx = period_range(start="2013-01-01", periods=4, freq="M", name="idx") + expected_list = [ + Period("2013-01-31", freq="M"), + Period("2013-02-28", freq="M"), + Period("2013-03-31", freq="M"), + Period("2013-04-30", freq="M"), + ] + expected = Index(expected_list, dtype=object, name="idx") + result = idx.astype(object) + assert isinstance(result, Index) + assert result.dtype == object + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert idx.tolist() == expected_list + + idx = PeriodIndex( + ["2013-01-01", "2013-01-02", "NaT", "2013-01-04"], freq="D", name="idx" + ) + expected_list = [ + Period("2013-01-01", freq="D"), + Period("2013-01-02", freq="D"), + Period("NaT", freq="D"), + Period("2013-01-04", freq="D"), + ] + expected = Index(expected_list, dtype=object, name="idx") + result = idx.astype(object) + assert isinstance(result, Index) + assert result.dtype == object + tm.assert_index_equal(result, expected) + for i in [0, 1, 3]: + assert result[i] == expected[i] + assert result[2] is NaT + assert result.name == expected.name + + result_list = idx.tolist() + for i in [0, 1, 3]: + assert result_list[i] == expected_list[i] + assert result_list[2] is NaT + + def test_astype_category(self): + obj = period_range("2000", periods=2, name="idx") + result = obj.astype("category") + expected = CategoricalIndex( + [Period("2000-01-01", freq="D"), Period("2000-01-02", freq="D")], name="idx" + ) + tm.assert_index_equal(result, expected) + + result = obj._data.astype("category") + expected = expected.values + tm.assert_categorical_equal(result, expected) + + def test_astype_array_fallback(self): + obj = period_range("2000", periods=2, name="idx") + result = obj.astype(bool) + expected = Index(np.array([True, True]), name="idx") + tm.assert_index_equal(result, expected) + + result = obj._data.astype(bool) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_period_astype_to_timestamp(self): + pi = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M") + + exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern") + res = pi.astype("datetime64[ns, US/Eastern]") + tm.assert_index_equal(res, exp) + assert res.freq == exp.freq diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_factorize.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_factorize.py new file mode 100644 index 00000000..7705da02 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_factorize.py @@ -0,0 +1,54 @@ +import numpy as np + +from pandas import ( + PeriodIndex, + factorize, +) +import pandas._testing as tm + + +class TestFactorize: + def test_factorize(self): + idx1 = PeriodIndex( + ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M" + ) + + exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp) + exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M") + + arr, idx = idx1.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + + arr, idx = idx1.factorize(sort=True) + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + + idx2 = PeriodIndex( + ["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M" + ) + + exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp) + arr, idx = idx2.factorize(sort=True) + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + + exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp) + exp_idx = PeriodIndex(["2014-03", "2014-02", "2014-01"], freq="M") + arr, idx = idx2.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + + def test_factorize_complex(self): # TODO: WTF is this test doing here?s + # GH 17927 + array = [1, 2, 2 + 1j] + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + labels, uniques = factorize(array) + + expected_labels = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(labels, expected_labels) + + # Should return a complex dtype in the future + expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_fillna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_fillna.py new file mode 100644 index 00000000..12a07bac --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_fillna.py @@ -0,0 +1,41 @@ +from pandas import ( + Index, + NaT, + Period, + PeriodIndex, +) +import pandas._testing as tm + + +class TestFillNA: + def test_fillna_period(self): + # GH#11343 + idx = PeriodIndex(["2011-01-01 09:00", NaT, "2011-01-01 11:00"], freq="H") + + exp = PeriodIndex( + ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], freq="H" + ) + result = idx.fillna(Period("2011-01-01 10:00", freq="H")) + tm.assert_index_equal(result, exp) + + exp = Index( + [ + Period("2011-01-01 09:00", freq="H"), + "x", + Period("2011-01-01 11:00", freq="H"), + ], + dtype=object, + ) + result = idx.fillna("x") + tm.assert_index_equal(result, exp) + + exp = Index( + [ + Period("2011-01-01 09:00", freq="H"), + Period("2011-01-01", freq="D"), + Period("2011-01-01 11:00", freq="H"), + ], + dtype=object, + ) + result = idx.fillna(Period("2011-01-01", freq="D")) + tm.assert_index_equal(result, exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_insert.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_insert.py new file mode 100644 index 00000000..32bbe09d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_insert.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + PeriodIndex, + period_range, +) +import pandas._testing as tm + + +class TestInsert: + @pytest.mark.parametrize("na", [np.nan, NaT, None]) + def test_insert(self, na): + # GH#18295 (test missing) + expected = PeriodIndex(["2017Q1", NaT, "2017Q2", "2017Q3", "2017Q4"], freq="Q") + result = period_range("2017Q1", periods=4, freq="Q").insert(1, na) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_is_full.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_is_full.py new file mode 100644 index 00000000..490f199a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_is_full.py @@ -0,0 +1,23 @@ +import pytest + +from pandas import PeriodIndex + + +def test_is_full(): + index = PeriodIndex([2005, 2007, 2009], freq="A") + assert not index.is_full + + index = PeriodIndex([2005, 2006, 2007], freq="A") + assert index.is_full + + index = PeriodIndex([2005, 2005, 2007], freq="A") + assert not index.is_full + + index = PeriodIndex([2005, 2005, 2006], freq="A") + assert index.is_full + + index = PeriodIndex([2006, 2005, 2005], freq="A") + with pytest.raises(ValueError, match="Index is not monotonic"): + index.is_full + + assert index[:0].is_full diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_repeat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_repeat.py new file mode 100644 index 00000000..fc344b06 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_repeat.py @@ -0,0 +1,26 @@ +import numpy as np +import pytest + +from pandas import ( + PeriodIndex, + period_range, +) +import pandas._testing as tm + + +class TestRepeat: + @pytest.mark.parametrize("use_numpy", [True, False]) + @pytest.mark.parametrize( + "index", + [ + period_range("2000-01-01", periods=3, freq="D"), + period_range("2001-01-01", periods=3, freq="2D"), + PeriodIndex(["2001-01", "NaT", "2003-01"], freq="M"), + ], + ) + def test_repeat_freqstr(self, index, use_numpy): + # GH#10183 + expected = PeriodIndex([per for per in index for _ in range(3)]) + result = np.repeat(index, 3) if use_numpy else index.repeat(3) + tm.assert_index_equal(result, expected) + assert result.freqstr == index.freqstr diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_shift.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_shift.py new file mode 100644 index 00000000..48dc5f0e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_shift.py @@ -0,0 +1,122 @@ +import numpy as np +import pytest + +from pandas import ( + PeriodIndex, + period_range, +) +import pandas._testing as tm + + +class TestPeriodIndexShift: + # --------------------------------------------------------------- + # PeriodIndex.shift is used by __add__ and __sub__ + + def test_pi_shift_ndarray(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + result = idx.shift(np.array([1, 2, 3, 4])) + expected = PeriodIndex( + ["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.shift(np.array([1, -2, 3, -4])) + expected = PeriodIndex( + ["2011-02", "2010-12", "NaT", "2010-12"], freq="M", name="idx" + ) + tm.assert_index_equal(result, expected) + + def test_shift(self): + pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="A", start="1/1/2002", end="12/1/2010") + + tm.assert_index_equal(pi1.shift(0), pi1) + + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(1), pi2) + + pi1 = period_range(freq="A", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="A", start="1/1/2000", end="12/1/2008") + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(-1), pi2) + + pi1 = period_range(freq="M", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="M", start="2/1/2001", end="1/1/2010") + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(1), pi2) + + pi1 = period_range(freq="M", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="M", start="12/1/2000", end="11/1/2009") + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(-1), pi2) + + pi1 = period_range(freq="D", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="D", start="1/2/2001", end="12/2/2009") + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(1), pi2) + + pi1 = period_range(freq="D", start="1/1/2001", end="12/1/2009") + pi2 = period_range(freq="D", start="12/31/2000", end="11/30/2009") + assert len(pi1) == len(pi2) + tm.assert_index_equal(pi1.shift(-1), pi2) + + def test_shift_corner_cases(self): + # GH#9903 + idx = PeriodIndex([], name="xxx", freq="H") + + msg = "`freq` argument is not supported for PeriodIndex.shift" + with pytest.raises(TypeError, match=msg): + # period shift doesn't accept freq + idx.shift(1, freq="H") + + tm.assert_index_equal(idx.shift(0), idx) + tm.assert_index_equal(idx.shift(3), idx) + + idx = PeriodIndex( + ["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"], + name="xxx", + freq="H", + ) + tm.assert_index_equal(idx.shift(0), idx) + exp = PeriodIndex( + ["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"], + name="xxx", + freq="H", + ) + tm.assert_index_equal(idx.shift(3), exp) + exp = PeriodIndex( + ["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"], + name="xxx", + freq="H", + ) + tm.assert_index_equal(idx.shift(-3), exp) + + def test_shift_nat(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + result = idx.shift(1) + expected = PeriodIndex( + ["2011-02", "2011-03", "NaT", "2011-05"], freq="M", name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + def test_shift_gh8083(self): + # test shift for PeriodIndex + # GH#8083 + drange = period_range("20130101", periods=5, freq="D") + result = drange.shift(1) + expected = PeriodIndex( + ["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"], + freq="D", + ) + tm.assert_index_equal(result, expected) + + def test_shift_periods(self): + # GH #22458 : argument 'n' was deprecated in favor of 'periods' + idx = period_range(freq="A", start="1/1/2001", end="12/1/2009") + tm.assert_index_equal(idx.shift(periods=0), idx) + tm.assert_index_equal(idx.shift(0), idx) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_to_timestamp.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_to_timestamp.py new file mode 100644 index 00000000..8bb0c351 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_to_timestamp.py @@ -0,0 +1,132 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + NaT, + PeriodIndex, + Timedelta, + Timestamp, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestToTimestamp: + def test_to_timestamp_non_contiguous(self): + # GH#44100 + dti = date_range("2021-10-18", periods=9, freq="D") + pi = dti.to_period() + + result = pi[::2].to_timestamp() + expected = dti[::2] + tm.assert_index_equal(result, expected) + + result = pi._data[::2].to_timestamp() + expected = dti._data[::2] + # TODO: can we get the freq to round-trip? + tm.assert_datetime_array_equal(result, expected, check_freq=False) + + result = pi[::-1].to_timestamp() + expected = dti[::-1] + tm.assert_index_equal(result, expected) + + result = pi._data[::-1].to_timestamp() + expected = dti._data[::-1] + tm.assert_datetime_array_equal(result, expected, check_freq=False) + + result = pi[::2][::-1].to_timestamp() + expected = dti[::2][::-1] + tm.assert_index_equal(result, expected) + + result = pi._data[::2][::-1].to_timestamp() + expected = dti._data[::2][::-1] + tm.assert_datetime_array_equal(result, expected, check_freq=False) + + def test_to_timestamp_freq(self): + idx = period_range("2017", periods=12, freq="A-DEC") + result = idx.to_timestamp() + expected = date_range("2017", periods=12, freq="AS-JAN") + tm.assert_index_equal(result, expected) + + def test_to_timestamp_pi_nat(self): + # GH#7228 + index = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx") + + result = index.to_timestamp("D") + expected = DatetimeIndex( + [NaT, datetime(2011, 1, 1), datetime(2011, 2, 1)], name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.name == "idx" + + result2 = result.to_period(freq="M") + tm.assert_index_equal(result2, index) + assert result2.name == "idx" + + result3 = result.to_period(freq="3M") + exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="3M", name="idx") + tm.assert_index_equal(result3, exp) + assert result3.freqstr == "3M" + + msg = "Frequency must be positive, because it represents span: -2A" + with pytest.raises(ValueError, match=msg): + result.to_period(freq="-2A") + + def test_to_timestamp_preserve_name(self): + index = period_range(freq="A", start="1/1/2001", end="12/1/2009", name="foo") + assert index.name == "foo" + + conv = index.to_timestamp("D") + assert conv.name == "foo" + + def test_to_timestamp_quarterly_bug(self): + years = np.arange(1960, 2000).repeat(4) + quarters = np.tile(list(range(1, 5)), 40) + + pindex = PeriodIndex(year=years, quarter=quarters) + + stamps = pindex.to_timestamp("D", "end") + expected = DatetimeIndex([x.to_timestamp("D", "end") for x in pindex]) + tm.assert_index_equal(stamps, expected) + assert stamps.freq == expected.freq + + def test_to_timestamp_pi_mult(self): + idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="2M", name="idx") + + result = idx.to_timestamp() + expected = DatetimeIndex(["2011-01-01", "NaT", "2011-02-01"], name="idx") + tm.assert_index_equal(result, expected) + + result = idx.to_timestamp(how="E") + expected = DatetimeIndex(["2011-02-28", "NaT", "2011-03-31"], name="idx") + expected = expected + Timedelta(1, "D") - Timedelta(1, "ns") + tm.assert_index_equal(result, expected) + + def test_to_timestamp_pi_combined(self): + idx = period_range(start="2011", periods=2, freq="1D1H", name="idx") + + result = idx.to_timestamp() + expected = DatetimeIndex(["2011-01-01 00:00", "2011-01-02 01:00"], name="idx") + tm.assert_index_equal(result, expected) + + result = idx.to_timestamp(how="E") + expected = DatetimeIndex( + ["2011-01-02 00:59:59", "2011-01-03 01:59:59"], name="idx" + ) + expected = expected + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result, expected) + + result = idx.to_timestamp(how="E", freq="H") + expected = DatetimeIndex(["2011-01-02 00:00", "2011-01-03 01:00"], name="idx") + expected = expected + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result, expected) + + def test_to_timestamp_1703(self): + index = period_range("1/1/2012", periods=4, freq="D") + + result = index.to_timestamp() + assert result[0] == Timestamp("1/1/2012") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_constructors.py new file mode 100644 index 00000000..7d4d6816 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_constructors.py @@ -0,0 +1,567 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs.period import IncompatibleFrequency + +from pandas.core.dtypes.dtypes import PeriodDtype + +from pandas import ( + Index, + NaT, + Period, + PeriodIndex, + Series, + date_range, + offsets, + period_range, +) +import pandas._testing as tm +from pandas.core.arrays import PeriodArray + + +class TestPeriodIndex: + def test_construction_base_constructor(self): + # GH 13664 + arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")] + tm.assert_index_equal(Index(arr), PeriodIndex(arr)) + tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr))) + + arr = [np.nan, NaT, Period("2011-03", freq="M")] + tm.assert_index_equal(Index(arr), PeriodIndex(arr)) + tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr))) + + arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="D")] + tm.assert_index_equal(Index(arr), Index(arr, dtype=object)) + + tm.assert_index_equal(Index(np.array(arr)), Index(np.array(arr), dtype=object)) + + def test_base_constructor_with_period_dtype(self): + dtype = PeriodDtype("D") + values = ["2011-01-01", "2012-03-04", "2014-05-01"] + result = Index(values, dtype=dtype) + + expected = PeriodIndex(values, dtype=dtype) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "values_constructor", [list, np.array, PeriodIndex, PeriodArray._from_sequence] + ) + def test_index_object_dtype(self, values_constructor): + # Index(periods, dtype=object) is an Index (not an PeriodIndex) + periods = [ + Period("2011-01", freq="M"), + NaT, + Period("2011-03", freq="M"), + ] + values = values_constructor(periods) + result = Index(values, dtype=object) + + assert type(result) is Index + tm.assert_numpy_array_equal(result.values, np.array(values)) + + def test_constructor_use_start_freq(self): + # GH #1118 + msg1 = "Period with BDay freq is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg1): + p = Period("4/2/2012", freq="B") + msg2 = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg2): + expected = period_range(start="4/2/2012", periods=10, freq="B") + + with tm.assert_produces_warning(FutureWarning, match=msg2): + index = period_range(start=p, periods=10) + tm.assert_index_equal(index, expected) + + def test_constructor_field_arrays(self): + # GH #1264 + + years = np.arange(1990, 2010).repeat(4)[2:-2] + quarters = np.tile(np.arange(1, 5), 20)[2:-2] + + index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC") + expected = period_range("1990Q3", "2009Q2", freq="Q-DEC") + tm.assert_index_equal(index, expected) + + index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC") + tm.assert_numpy_array_equal(index.asi8, index2.asi8) + + index = PeriodIndex(year=years, quarter=quarters) + tm.assert_index_equal(index, expected) + + years = [2007, 2007, 2007] + months = [1, 2] + + msg = "Mismatched Period array lengths" + with pytest.raises(ValueError, match=msg): + PeriodIndex(year=years, month=months, freq="M") + with pytest.raises(ValueError, match=msg): + PeriodIndex(year=years, month=months, freq="2M") + + years = [2007, 2007, 2007] + months = [1, 2, 3] + idx = PeriodIndex(year=years, month=months, freq="M") + exp = period_range("2007-01", periods=3, freq="M") + tm.assert_index_equal(idx, exp) + + def test_constructor_U(self): + # U was used as undefined period + with pytest.raises(ValueError, match="Invalid frequency: X"): + period_range("2007-1-1", periods=500, freq="X") + + def test_constructor_nano(self): + idx = period_range( + start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N" + ) + exp = PeriodIndex( + [ + Period(ordinal=1, freq="N"), + Period(ordinal=2, freq="N"), + Period(ordinal=3, freq="N"), + Period(ordinal=4, freq="N"), + ], + freq="N", + ) + tm.assert_index_equal(idx, exp) + + def test_constructor_arrays_negative_year(self): + years = np.arange(1960, 2000, dtype=np.int64).repeat(4) + quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40) + + pindex = PeriodIndex(year=years, quarter=quarters) + + tm.assert_index_equal(pindex.year, Index(years)) + tm.assert_index_equal(pindex.quarter, Index(quarters)) + + def test_constructor_invalid_quarters(self): + msg = "Quarter must be 1 <= q <= 4" + with pytest.raises(ValueError, match=msg): + PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC") + + def test_constructor_corner(self): + result = period_range("2007-01", periods=10.5, freq="M") + exp = period_range("2007-01", periods=10, freq="M") + tm.assert_index_equal(result, exp) + + def test_constructor_with_without_freq(self): + # GH53687 + start = Period("2002-01-01 00:00", freq="30T") + exp = period_range(start=start, periods=5, freq=start.freq) + result = period_range(start=start, periods=5) + tm.assert_index_equal(exp, result) + + def test_constructor_fromarraylike(self): + idx = period_range("2007-01", periods=20, freq="M") + + # values is an array of Period, thus can retrieve freq + tm.assert_index_equal(PeriodIndex(idx.values), idx) + tm.assert_index_equal(PeriodIndex(list(idx.values)), idx) + + msg = "freq not specified and cannot be inferred" + with pytest.raises(ValueError, match=msg): + PeriodIndex(idx.asi8) + with pytest.raises(ValueError, match=msg): + PeriodIndex(list(idx.asi8)) + + msg = "'Period' object is not iterable" + with pytest.raises(TypeError, match=msg): + PeriodIndex(data=Period("2007", freq="A")) + + result = PeriodIndex(iter(idx)) + tm.assert_index_equal(result, idx) + + result = PeriodIndex(idx) + tm.assert_index_equal(result, idx) + + result = PeriodIndex(idx, freq="M") + tm.assert_index_equal(result, idx) + + result = PeriodIndex(idx, freq=offsets.MonthEnd()) + tm.assert_index_equal(result, idx) + assert result.freq == "M" + + result = PeriodIndex(idx, freq="2M") + tm.assert_index_equal(result, idx.asfreq("2M")) + assert result.freq == "2M" + + result = PeriodIndex(idx, freq=offsets.MonthEnd(2)) + tm.assert_index_equal(result, idx.asfreq("2M")) + assert result.freq == "2M" + + result = PeriodIndex(idx, freq="D") + exp = idx.asfreq("D", "e") + tm.assert_index_equal(result, exp) + + def test_constructor_datetime64arr(self): + vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64) + vals = vals.view(np.dtype("M8[us]")) + + pi = PeriodIndex(vals, freq="D") + + expected = PeriodIndex(vals.astype("M8[ns]"), freq="D") + tm.assert_index_equal(pi, expected) + + @pytest.mark.parametrize("box", [None, "series", "index"]) + def test_constructor_datetime64arr_ok(self, box): + # https://github.com/pandas-dev/pandas/issues/23438 + data = date_range("2017", periods=4, freq="M") + if box is None: + data = data._values + elif box == "series": + data = Series(data) + + result = PeriodIndex(data, freq="D") + expected = PeriodIndex( + ["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D" + ) + tm.assert_index_equal(result, expected) + + def test_constructor_dtype(self): + # passing a dtype with a tz should localize + idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]") + exp = PeriodIndex(["2013-01", "2013-03"], freq="M") + tm.assert_index_equal(idx, exp) + assert idx.dtype == "period[M]" + + idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]") + exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D") + tm.assert_index_equal(idx, exp) + assert idx.dtype == "period[3D]" + + # if we already have a freq and its not the same, then asfreq + # (not changed) + idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D") + + res = PeriodIndex(idx, dtype="period[M]") + exp = PeriodIndex(["2013-01", "2013-01"], freq="M") + tm.assert_index_equal(res, exp) + assert res.dtype == "period[M]" + + res = PeriodIndex(idx, freq="M") + tm.assert_index_equal(res, exp) + assert res.dtype == "period[M]" + + msg = "specified freq and dtype are different" + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex(["2011-01"], freq="M", dtype="period[D]") + + def test_constructor_empty(self): + idx = PeriodIndex([], freq="M") + assert isinstance(idx, PeriodIndex) + assert len(idx) == 0 + assert idx.freq == "M" + + with pytest.raises(ValueError, match="freq not specified"): + PeriodIndex([]) + + def test_constructor_pi_nat(self): + idx = PeriodIndex( + [Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")] + ) + exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M") + tm.assert_index_equal(idx, exp) + + idx = PeriodIndex( + np.array([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")]) + ) + tm.assert_index_equal(idx, exp) + + idx = PeriodIndex( + [NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")] + ) + exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M") + tm.assert_index_equal(idx, exp) + + idx = PeriodIndex( + np.array( + [NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")] + ) + ) + tm.assert_index_equal(idx, exp) + + idx = PeriodIndex([NaT, NaT, "2011-01", "2011-01"], freq="M") + tm.assert_index_equal(idx, exp) + + with pytest.raises(ValueError, match="freq not specified"): + PeriodIndex([NaT, NaT]) + + with pytest.raises(ValueError, match="freq not specified"): + PeriodIndex(np.array([NaT, NaT])) + + with pytest.raises(ValueError, match="freq not specified"): + PeriodIndex(["NaT", "NaT"]) + + with pytest.raises(ValueError, match="freq not specified"): + PeriodIndex(np.array(["NaT", "NaT"])) + + def test_constructor_incompat_freq(self): + msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)" + + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")]) + + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex( + np.array( + [Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")] + ) + ) + + # first element is NaT + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex([NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]) + + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex( + np.array( + [NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")] + ) + ) + + def test_constructor_mixed(self): + idx = PeriodIndex(["2011-01", NaT, Period("2011-01", freq="M")]) + exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M") + tm.assert_index_equal(idx, exp) + + idx = PeriodIndex(["NaT", NaT, Period("2011-01", freq="M")]) + exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M") + tm.assert_index_equal(idx, exp) + + idx = PeriodIndex([Period("2011-01-01", freq="D"), NaT, "2012-01-01"]) + exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D") + tm.assert_index_equal(idx, exp) + + def test_constructor_simple_new(self): + idx = period_range("2007-01", name="p", periods=2, freq="M") + + with pytest.raises(AssertionError, match=""): + idx._simple_new(idx, name="p") + + result = idx._simple_new(idx._data, name="p") + tm.assert_index_equal(result, idx) + + msg = "Should be numpy array of type i8" + with pytest.raises(AssertionError, match=msg): + # Need ndarray, not int64 Index + type(idx._data)._simple_new(Index(idx.asi8), dtype=idx.dtype) + + arr = type(idx._data)._simple_new(idx.asi8, dtype=idx.dtype) + result = idx._simple_new(arr, name="p") + tm.assert_index_equal(result, idx) + + def test_constructor_simple_new_empty(self): + # GH13079 + idx = PeriodIndex([], freq="M", name="p") + with pytest.raises(AssertionError, match=""): + idx._simple_new(idx, name="p") + + result = idx._simple_new(idx._data, name="p") + tm.assert_index_equal(result, idx) + + @pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])]) + def test_constructor_floats(self, floats): + with pytest.raises(AssertionError, match="= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for( axis 0 with)? size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + +class TestGetValue: + @pytest.mark.parametrize("freq", ["H", "D"]) + def test_get_value_datetime_hourly(self, freq): + # get_loc and get_value should treat datetime objects symmetrically + # TODO: this test used to test get_value, which is removed in 2.0. + # should this test be moved somewhere, or is what's left redundant? + dti = date_range("2016-01-01", periods=3, freq="MS") + pi = dti.to_period(freq) + ser = Series(range(7, 10), index=pi) + + ts = dti[0] + + assert pi.get_loc(ts) == 0 + assert ser[ts] == 7 + assert ser.loc[ts] == 7 + + ts2 = ts + Timedelta(hours=3) + if freq == "H": + with pytest.raises(KeyError, match="2016-01-01 03:00"): + pi.get_loc(ts2) + with pytest.raises(KeyError, match="2016-01-01 03:00"): + ser[ts2] + with pytest.raises(KeyError, match="2016-01-01 03:00"): + ser.loc[ts2] + else: + assert pi.get_loc(ts2) == 0 + assert ser[ts2] == 7 + assert ser.loc[ts2] == 7 + + +class TestContains: + def test_contains(self): + # GH 17717 + p0 = Period("2017-09-01") + p1 = Period("2017-09-02") + p2 = Period("2017-09-03") + p3 = Period("2017-09-04") + + ps0 = [p0, p1, p2] + idx0 = PeriodIndex(ps0) + + for p in ps0: + assert p in idx0 + assert str(p) in idx0 + + # GH#31172 + # Higher-resolution period-like are _not_ considered as contained + key = "2017-09-01 00:00:01" + assert key not in idx0 + with pytest.raises(KeyError, match=key): + idx0.get_loc(key) + + assert "2017-09" in idx0 + + assert p3 not in idx0 + + def test_contains_freq_mismatch(self): + rng = period_range("2007-01", freq="M", periods=10) + + assert Period("2007-01", freq="M") in rng + assert Period("2007-01", freq="D") not in rng + assert Period("2007-01", freq="2M") not in rng + + def test_contains_nat(self): + # see gh-13582 + idx = period_range("2007-01", freq="M", periods=10) + assert NaT not in idx + assert None not in idx + assert float("nan") not in idx + assert np.nan not in idx + + idx = PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M") + assert NaT in idx + assert None in idx + assert float("nan") in idx + assert np.nan in idx + + +class TestAsOfLocs: + def test_asof_locs_mismatched_type(self): + dti = date_range("2016-01-01", periods=3) + pi = dti.to_period("D") + pi2 = dti.to_period("H") + + mask = np.array([0, 1, 0], dtype=bool) + + msg = "must be DatetimeIndex or PeriodIndex" + with pytest.raises(TypeError, match=msg): + pi.asof_locs(pd.Index(pi.asi8, dtype=np.int64), mask) + + with pytest.raises(TypeError, match=msg): + pi.asof_locs(pd.Index(pi.asi8, dtype=np.float64), mask) + + with pytest.raises(TypeError, match=msg): + # TimedeltaIndex + pi.asof_locs(dti - dti, mask) + + msg = "Input has different freq=H" + with pytest.raises(libperiod.IncompatibleFrequency, match=msg): + pi.asof_locs(pi2, mask) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_join.py new file mode 100644 index 00000000..191dba2b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_join.py @@ -0,0 +1,58 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import IncompatibleFrequency + +from pandas import ( + Index, + PeriodIndex, + period_range, +) +import pandas._testing as tm + + +class TestJoin: + def test_join_outer_indexer(self): + pi = period_range("1/1/2000", "1/20/2000", freq="D") + + result = pi._outer_indexer(pi) + tm.assert_extension_array_equal(result[0], pi._values) + tm.assert_numpy_array_equal(result[1], np.arange(len(pi), dtype=np.intp)) + tm.assert_numpy_array_equal(result[2], np.arange(len(pi), dtype=np.intp)) + + def test_joins(self, join_type): + index = period_range("1/1/2000", "1/20/2000", freq="D") + + joined = index.join(index[:-5], how=join_type) + + assert isinstance(joined, PeriodIndex) + assert joined.freq == index.freq + + def test_join_self(self, join_type): + index = period_range("1/1/2000", "1/20/2000", freq="D") + + res = index.join(index, how=join_type) + assert index is res + + def test_join_does_not_recur(self): + df = tm.makeCustomDataframe( + 3, + 2, + data_gen_f=lambda *args: np.random.default_rng(2).integers(2), + c_idx_type="p", + r_idx_type="dt", + ) + ser = df.iloc[:2, 0] + + res = ser.index.join(df.columns, how="outer") + expected = Index( + [ser.index[0], ser.index[1], df.columns[0], df.columns[1]], object + ) + tm.assert_index_equal(res, expected) + + def test_join_mismatched_freq_raises(self): + index = period_range("1/1/2000", "1/20/2000", freq="D") + index3 = period_range("1/1/2000", "1/20/2000", freq="2D") + msg = r".*Input has different freq=2D from Period\(freq=D\)" + with pytest.raises(IncompatibleFrequency, match=msg): + index.join(index3) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_monotonic.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_monotonic.py new file mode 100644 index 00000000..15cb8f71 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_monotonic.py @@ -0,0 +1,42 @@ +from pandas import ( + Period, + PeriodIndex, +) + + +def test_is_monotonic_increasing(): + # GH#17717 + p0 = Period("2017-09-01") + p1 = Period("2017-09-02") + p2 = Period("2017-09-03") + + idx_inc0 = PeriodIndex([p0, p1, p2]) + idx_inc1 = PeriodIndex([p0, p1, p1]) + idx_dec0 = PeriodIndex([p2, p1, p0]) + idx_dec1 = PeriodIndex([p2, p1, p1]) + idx = PeriodIndex([p1, p2, p0]) + + assert idx_inc0.is_monotonic_increasing is True + assert idx_inc1.is_monotonic_increasing is True + assert idx_dec0.is_monotonic_increasing is False + assert idx_dec1.is_monotonic_increasing is False + assert idx.is_monotonic_increasing is False + + +def test_is_monotonic_decreasing(): + # GH#17717 + p0 = Period("2017-09-01") + p1 = Period("2017-09-02") + p2 = Period("2017-09-03") + + idx_inc0 = PeriodIndex([p0, p1, p2]) + idx_inc1 = PeriodIndex([p0, p1, p1]) + idx_dec0 = PeriodIndex([p2, p1, p0]) + idx_dec1 = PeriodIndex([p2, p1, p1]) + idx = PeriodIndex([p1, p2, p0]) + + assert idx_inc0.is_monotonic_decreasing is False + assert idx_inc1.is_monotonic_decreasing is False + assert idx_dec0.is_monotonic_decreasing is True + assert idx_dec1.is_monotonic_decreasing is True + assert idx.is_monotonic_decreasing is False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_partial_slicing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_partial_slicing.py new file mode 100644 index 00000000..e52866ab --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_partial_slicing.py @@ -0,0 +1,195 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + PeriodIndex, + Series, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestPeriodIndex: + def test_getitem_periodindex_duplicates_string_slice(self, using_copy_on_write): + # monotonic + idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN") + ts = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx) + original = ts.copy() + + result = ts["2007"] + expected = ts[1:3] + tm.assert_series_equal(result, expected) + result[:] = 1 + if using_copy_on_write: + tm.assert_series_equal(ts, original) + else: + assert (ts[1:3] == 1).all() + + # not monotonic + idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq="A-JUN") + ts = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx) + + result = ts["2007"] + expected = ts[idx == "2007"] + tm.assert_series_equal(result, expected) + + def test_getitem_periodindex_quarter_string(self): + pi = PeriodIndex(["2Q05", "3Q05", "4Q05", "1Q06", "2Q06"], freq="Q") + ser = Series(np.random.default_rng(2).random(len(pi)), index=pi).cumsum() + # Todo: fix these accessors! + assert ser["05Q4"] == ser.iloc[2] + + def test_pindex_slice_index(self): + pi = period_range(start="1/1/10", end="12/31/12", freq="M") + s = Series(np.random.default_rng(2).random(len(pi)), index=pi) + res = s["2010"] + exp = s[0:12] + tm.assert_series_equal(res, exp) + res = s["2011"] + exp = s[12:24] + tm.assert_series_equal(res, exp) + + @pytest.mark.parametrize("make_range", [date_range, period_range]) + def test_range_slice_day(self, make_range): + # GH#6716 + idx = make_range(start="2013/01/01", freq="D", periods=400) + + msg = "slice indices must be integers or None or have an __index__ method" + # slices against index should raise IndexError + values = [ + "2014", + "2013/02", + "2013/01/02", + "2013/02/01 9H", + "2013/02/01 09:00", + ] + for v in values: + with pytest.raises(TypeError, match=msg): + idx[v:] + + s = Series(np.random.default_rng(2).random(len(idx)), index=idx) + + tm.assert_series_equal(s["2013/01/02":], s[1:]) + tm.assert_series_equal(s["2013/01/02":"2013/01/05"], s[1:5]) + tm.assert_series_equal(s["2013/02":], s[31:]) + tm.assert_series_equal(s["2014":], s[365:]) + + invalid = ["2013/02/01 9H", "2013/02/01 09:00"] + for v in invalid: + with pytest.raises(TypeError, match=msg): + idx[v:] + + @pytest.mark.parametrize("make_range", [date_range, period_range]) + def test_range_slice_seconds(self, make_range): + # GH#6716 + idx = make_range(start="2013/01/01 09:00:00", freq="S", periods=4000) + msg = "slice indices must be integers or None or have an __index__ method" + + # slices against index should raise IndexError + values = [ + "2014", + "2013/02", + "2013/01/02", + "2013/02/01 9H", + "2013/02/01 09:00", + ] + for v in values: + with pytest.raises(TypeError, match=msg): + idx[v:] + + s = Series(np.random.default_rng(2).random(len(idx)), index=idx) + + tm.assert_series_equal(s["2013/01/01 09:05":"2013/01/01 09:10"], s[300:660]) + tm.assert_series_equal(s["2013/01/01 10:00":"2013/01/01 10:05"], s[3600:3960]) + tm.assert_series_equal(s["2013/01/01 10H":], s[3600:]) + tm.assert_series_equal(s[:"2013/01/01 09:30"], s[:1860]) + for d in ["2013/01/01", "2013/01", "2013"]: + tm.assert_series_equal(s[d:], s) + + @pytest.mark.parametrize("make_range", [date_range, period_range]) + def test_range_slice_outofbounds(self, make_range): + # GH#5407 + idx = make_range(start="2013/10/01", freq="D", periods=10) + + df = DataFrame({"units": [100 + i for i in range(10)]}, index=idx) + empty = DataFrame(index=type(idx)([], freq="D"), columns=["units"]) + empty["units"] = empty["units"].astype("int64") + + tm.assert_frame_equal(df["2013/09/01":"2013/09/30"], empty) + tm.assert_frame_equal(df["2013/09/30":"2013/10/02"], df.iloc[:2]) + tm.assert_frame_equal(df["2013/10/01":"2013/10/02"], df.iloc[:2]) + tm.assert_frame_equal(df["2013/10/02":"2013/09/30"], empty) + tm.assert_frame_equal(df["2013/10/15":"2013/10/17"], empty) + tm.assert_frame_equal(df["2013-06":"2013-09"], empty) + tm.assert_frame_equal(df["2013-11":"2013-12"], empty) + + @pytest.mark.parametrize("make_range", [date_range, period_range]) + def test_maybe_cast_slice_bound(self, make_range, frame_or_series): + idx = make_range(start="2013/10/01", freq="D", periods=10) + + obj = DataFrame({"units": [100 + i for i in range(10)]}, index=idx) + obj = tm.get_obj(obj, frame_or_series) + + msg = ( + f"cannot do slice indexing on {type(idx).__name__} with " + r"these indexers \[foo\] of type str" + ) + + # Check the lower-level calls are raising where expected. + with pytest.raises(TypeError, match=msg): + idx._maybe_cast_slice_bound("foo", "left") + with pytest.raises(TypeError, match=msg): + idx.get_slice_bound("foo", "left") + + with pytest.raises(TypeError, match=msg): + obj["2013/09/30":"foo"] + with pytest.raises(TypeError, match=msg): + obj["foo":"2013/09/30"] + with pytest.raises(TypeError, match=msg): + obj.loc["2013/09/30":"foo"] + with pytest.raises(TypeError, match=msg): + obj.loc["foo":"2013/09/30"] + + def test_partial_slice_doesnt_require_monotonicity(self): + # See also: DatetimeIndex test ofm the same name + dti = date_range("2014-01-01", periods=30, freq="30D") + pi = dti.to_period("D") + + ser_montonic = Series(np.arange(30), index=pi) + + shuffler = list(range(0, 30, 2)) + list(range(1, 31, 2)) + ser = ser_montonic.iloc[shuffler] + nidx = ser.index + + # Manually identified locations of year==2014 + indexer_2014 = np.array( + [0, 1, 2, 3, 4, 5, 6, 15, 16, 17, 18, 19, 20], dtype=np.intp + ) + assert (nidx[indexer_2014].year == 2014).all() + assert not (nidx[~indexer_2014].year == 2014).any() + + result = nidx.get_loc("2014") + tm.assert_numpy_array_equal(result, indexer_2014) + + expected = ser.iloc[indexer_2014] + result = ser.loc["2014"] + tm.assert_series_equal(result, expected) + + result = ser["2014"] + tm.assert_series_equal(result, expected) + + # Manually identified locations where ser.index is within Mat 2015 + indexer_may2015 = np.array([23], dtype=np.intp) + assert nidx[23].year == 2015 and nidx[23].month == 5 + + result = nidx.get_loc("May 2015") + tm.assert_numpy_array_equal(result, indexer_may2015) + + expected = ser.iloc[indexer_may2015] + result = ser.loc["May 2015"] + tm.assert_series_equal(result, expected) + + result = ser["May 2015"] + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period.py new file mode 100644 index 00000000..6d8ae179 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period.py @@ -0,0 +1,320 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs.period import IncompatibleFrequency + +from pandas import ( + Index, + NaT, + Period, + PeriodIndex, + Series, + date_range, + offsets, + period_range, +) +import pandas._testing as tm + + +class TestPeriodIndex: + def test_make_time_series(self): + index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + series = Series(1, index=index) + assert isinstance(series, Series) + + def test_view_asi8(self): + idx = PeriodIndex([], freq="M") + + exp = np.array([], dtype=np.int64) + tm.assert_numpy_array_equal(idx.view("i8"), exp) + tm.assert_numpy_array_equal(idx.asi8, exp) + + idx = PeriodIndex(["2011-01", NaT], freq="M") + + exp = np.array([492, -9223372036854775808], dtype=np.int64) + tm.assert_numpy_array_equal(idx.view("i8"), exp) + tm.assert_numpy_array_equal(idx.asi8, exp) + + exp = np.array([14975, -9223372036854775808], dtype=np.int64) + idx = PeriodIndex(["2011-01-01", NaT], freq="D") + tm.assert_numpy_array_equal(idx.view("i8"), exp) + tm.assert_numpy_array_equal(idx.asi8, exp) + + def test_values(self): + idx = PeriodIndex([], freq="M") + + exp = np.array([], dtype=object) + tm.assert_numpy_array_equal(idx.values, exp) + tm.assert_numpy_array_equal(idx.to_numpy(), exp) + + exp = np.array([], dtype=np.int64) + tm.assert_numpy_array_equal(idx.asi8, exp) + + idx = PeriodIndex(["2011-01", NaT], freq="M") + + exp = np.array([Period("2011-01", freq="M"), NaT], dtype=object) + tm.assert_numpy_array_equal(idx.values, exp) + tm.assert_numpy_array_equal(idx.to_numpy(), exp) + exp = np.array([492, -9223372036854775808], dtype=np.int64) + tm.assert_numpy_array_equal(idx.asi8, exp) + + idx = PeriodIndex(["2011-01-01", NaT], freq="D") + + exp = np.array([Period("2011-01-01", freq="D"), NaT], dtype=object) + tm.assert_numpy_array_equal(idx.values, exp) + tm.assert_numpy_array_equal(idx.to_numpy(), exp) + exp = np.array([14975, -9223372036854775808], dtype=np.int64) + tm.assert_numpy_array_equal(idx.asi8, exp) + + def test_period_index_length(self): + pi = period_range(freq="A", start="1/1/2001", end="12/1/2009") + assert len(pi) == 9 + + pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009") + assert len(pi) == 4 * 9 + + pi = period_range(freq="M", start="1/1/2001", end="12/1/2009") + assert len(pi) == 12 * 9 + + msg = "Period with BDay freq is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + start = Period("02-Apr-2005", "B") + i1 = period_range(start=start, periods=20) + assert len(i1) == 20 + assert i1.freq == start.freq + assert i1[0] == start + + end_intv = Period("2006-12-31", "W") + i1 = period_range(end=end_intv, periods=10) + assert len(i1) == 10 + assert i1.freq == end_intv.freq + assert i1[-1] == end_intv + + end_intv = Period("2006-12-31", "1w") + i2 = period_range(end=end_intv, periods=10) + assert len(i1) == len(i2) + assert (i1 == i2).all() + assert i1.freq == i2.freq + + msg = "start and end must have same freq" + msg2 = "Period with BDay freq is deprecated" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + period_range(start=start, end=end_intv) + + with tm.assert_produces_warning(FutureWarning, match=msg2): + end_intv = Period("2005-05-01", "B") + with tm.assert_produces_warning(FutureWarning, match=msg2): + i1 = period_range(start=start, end=end_intv) + + msg = ( + "Of the three parameters: start, end, and periods, exactly two " + "must be specified" + ) + with pytest.raises(ValueError, match=msg): + period_range(start=start) + + # infer freq from first element + with tm.assert_produces_warning(FutureWarning, match=msg2): + i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")]) + assert len(i2) == 2 + assert i2[0] == end_intv + + with tm.assert_produces_warning(FutureWarning, match=msg2): + i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")])) + assert len(i2) == 2 + assert i2[0] == end_intv + + # Mixed freq should fail + vals = [end_intv, Period("2006-12-31", "w")] + msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)" + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex(vals) + vals = np.array(vals) + with pytest.raises(ValueError, match=msg): + PeriodIndex(vals) + + @pytest.mark.parametrize( + "field", + [ + "year", + "month", + "day", + "hour", + "minute", + "second", + "weekofyear", + "week", + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "qyear", + "days_in_month", + ], + ) + @pytest.mark.parametrize( + "periodindex", + [ + period_range(freq="A", start="1/1/2001", end="12/1/2005"), + period_range(freq="Q", start="1/1/2001", end="12/1/2002"), + period_range(freq="M", start="1/1/2001", end="1/1/2002"), + period_range(freq="D", start="12/1/2001", end="6/1/2001"), + period_range(freq="H", start="12/31/2001", end="1/1/2002 23:00"), + period_range(freq="Min", start="12/31/2001", end="1/1/2002 00:20"), + period_range( + freq="S", start="12/31/2001 00:00:00", end="12/31/2001 00:05:00" + ), + period_range(end=Period("2006-12-31", "W"), periods=10), + ], + ) + def test_fields(self, periodindex, field): + periods = list(periodindex) + ser = Series(periodindex) + + field_idx = getattr(periodindex, field) + assert len(periodindex) == len(field_idx) + for x, val in zip(periods, field_idx): + assert getattr(x, field) == val + + if len(ser) == 0: + return + + field_s = getattr(ser.dt, field) + assert len(periodindex) == len(field_s) + for x, val in zip(periods, field_s): + assert getattr(x, field) == val + + def test_is_(self): + create_index = lambda: period_range(freq="A", start="1/1/2001", end="12/1/2009") + index = create_index() + assert index.is_(index) + assert not index.is_(create_index()) + assert index.is_(index.view()) + assert index.is_(index.view().view().view().view().view()) + assert index.view().is_(index) + ind2 = index.view() + index.name = "Apple" + assert ind2.is_(index) + assert not index.is_(index[:]) + assert not index.is_(index.asfreq("M")) + assert not index.is_(index.asfreq("A")) + + assert not index.is_(index - 2) + assert not index.is_(index - 0) + + def test_index_unique(self): + idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq="A-JUN") + expected = PeriodIndex([2000, 2007, 2009], freq="A-JUN") + tm.assert_index_equal(idx.unique(), expected) + assert idx.nunique() == 3 + + def test_negative_ordinals(self): + Period(ordinal=-1000, freq="A") + Period(ordinal=0, freq="A") + + idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq="A") + idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq="A") + tm.assert_index_equal(idx1, idx2) + + def test_pindex_fieldaccessor_nat(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2012-03", "2012-04"], freq="D", name="name" + ) + + exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name="name") + tm.assert_index_equal(idx.year, exp) + exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name="name") + tm.assert_index_equal(idx.month, exp) + + def test_pindex_multiples(self): + expected = PeriodIndex( + ["2011-01", "2011-03", "2011-05", "2011-07", "2011-09", "2011-11"], + freq="2M", + ) + + pi = period_range(start="1/1/11", end="12/31/11", freq="2M") + tm.assert_index_equal(pi, expected) + assert pi.freq == offsets.MonthEnd(2) + assert pi.freqstr == "2M" + + pi = period_range(start="1/1/11", periods=6, freq="2M") + tm.assert_index_equal(pi, expected) + assert pi.freq == offsets.MonthEnd(2) + assert pi.freqstr == "2M" + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.filterwarnings("ignore:Period with BDay freq:FutureWarning") + def test_iteration(self): + index = period_range(start="1/1/10", periods=4, freq="B") + + result = list(index) + assert isinstance(result[0], Period) + assert result[0].freq == index.freq + + def test_with_multi_index(self): + # #1705 + index = date_range("1/1/2012", periods=4, freq="12H") + index_as_arrays = [index.to_period(freq="D"), index.hour] + + s = Series([0, 1, 2, 3], index_as_arrays) + + assert isinstance(s.index.levels[0], PeriodIndex) + + assert isinstance(s.index.values[0][0], Period) + + def test_map(self): + # test_map_dictlike generally tests + + index = PeriodIndex([2005, 2007, 2009], freq="A") + result = index.map(lambda x: x.ordinal) + exp = Index([x.ordinal for x in index]) + tm.assert_index_equal(result, exp) + + def test_format_empty(self): + # GH35712 + empty_idx = PeriodIndex([], freq="A") + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] + + +def test_maybe_convert_timedelta(): + pi = PeriodIndex(["2000", "2001"], freq="D") + offset = offsets.Day(2) + assert pi._maybe_convert_timedelta(offset) == 2 + assert pi._maybe_convert_timedelta(2) == 2 + + offset = offsets.BusinessDay() + msg = r"Input has different freq=B from PeriodIndex\(freq=D\)" + with pytest.raises(ValueError, match=msg): + pi._maybe_convert_timedelta(offset) + + +@pytest.mark.parametrize("array", [True, False]) +def test_dunder_array(array): + obj = PeriodIndex(["2000-01-01", "2001-01-01"], freq="D") + if array: + obj = obj._data + + expected = np.array([obj[0], obj[1]], dtype=object) + result = np.array(obj) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(obj) + tm.assert_numpy_array_equal(result, expected) + + expected = obj.asi8 + for dtype in ["i8", "int64", np.int64]: + result = np.array(obj, dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(obj, dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + for dtype in ["float64", "int32", "uint64"]: + msg = "argument must be" + with pytest.raises(TypeError, match=msg): + np.array(obj, dtype=dtype) + with pytest.raises(TypeError, match=msg): + np.array(obj, dtype=getattr(np, dtype)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period_range.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period_range.py new file mode 100644 index 00000000..c94ddf57 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_period_range.py @@ -0,0 +1,121 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + Period, + PeriodIndex, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestPeriodRange: + def test_required_arguments(self): + msg = ( + "Of the three parameters: start, end, and periods, exactly two " + "must be specified" + ) + with pytest.raises(ValueError, match=msg): + period_range("2011-1-1", "2012-1-1", "B") + + @pytest.mark.parametrize("freq", ["D", "W", "M", "Q", "A"]) + def test_construction_from_string(self, freq): + # non-empty + expected = date_range( + start="2017-01-01", periods=5, freq=freq, name="foo" + ).to_period() + start, end = str(expected[0]), str(expected[-1]) + + result = period_range(start=start, end=end, freq=freq, name="foo") + tm.assert_index_equal(result, expected) + + result = period_range(start=start, periods=5, freq=freq, name="foo") + tm.assert_index_equal(result, expected) + + result = period_range(end=end, periods=5, freq=freq, name="foo") + tm.assert_index_equal(result, expected) + + # empty + expected = PeriodIndex([], freq=freq, name="foo") + + result = period_range(start=start, periods=0, freq=freq, name="foo") + tm.assert_index_equal(result, expected) + + result = period_range(end=end, periods=0, freq=freq, name="foo") + tm.assert_index_equal(result, expected) + + result = period_range(start=end, end=start, freq=freq, name="foo") + tm.assert_index_equal(result, expected) + + def test_construction_from_period(self): + # upsampling + start, end = Period("2017Q1", freq="Q"), Period("2018Q1", freq="Q") + expected = date_range( + start="2017-03-31", end="2018-03-31", freq="M", name="foo" + ).to_period() + result = period_range(start=start, end=end, freq="M", name="foo") + tm.assert_index_equal(result, expected) + + # downsampling + start, end = Period("2017-1", freq="M"), Period("2019-12", freq="M") + expected = date_range( + start="2017-01-31", end="2019-12-31", freq="Q", name="foo" + ).to_period() + result = period_range(start=start, end=end, freq="Q", name="foo") + tm.assert_index_equal(result, expected) + + # test for issue # 21793 + start, end = Period("2017Q1", freq="Q"), Period("2018Q1", freq="Q") + idx = period_range(start=start, end=end, freq="Q", name="foo") + result = idx == idx.values + expected = np.array([True, True, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + # empty + expected = PeriodIndex([], freq="W", name="foo") + + result = period_range(start=start, periods=0, freq="W", name="foo") + tm.assert_index_equal(result, expected) + + result = period_range(end=end, periods=0, freq="W", name="foo") + tm.assert_index_equal(result, expected) + + result = period_range(start=end, end=start, freq="W", name="foo") + tm.assert_index_equal(result, expected) + + def test_errors(self): + # not enough params + msg = ( + "Of the three parameters: start, end, and periods, " + "exactly two must be specified" + ) + with pytest.raises(ValueError, match=msg): + period_range(start="2017Q1") + + with pytest.raises(ValueError, match=msg): + period_range(end="2017Q1") + + with pytest.raises(ValueError, match=msg): + period_range(periods=5) + + with pytest.raises(ValueError, match=msg): + period_range() + + # too many params + with pytest.raises(ValueError, match=msg): + period_range(start="2017Q1", end="2018Q1", periods=8, freq="Q") + + # start/end NaT + msg = "start and end must not be NaT" + with pytest.raises(ValueError, match=msg): + period_range(start=NaT, end="2018Q1") + + with pytest.raises(ValueError, match=msg): + period_range(start="2017Q1", end=NaT) + + # invalid periods param + msg = "periods must be a number, got foo" + with pytest.raises(TypeError, match=msg): + period_range(start="2017Q1", periods="foo") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_pickle.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_pickle.py new file mode 100644 index 00000000..cb981ab1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_pickle.py @@ -0,0 +1,26 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + PeriodIndex, + period_range, +) +import pandas._testing as tm + +from pandas.tseries import offsets + + +class TestPickle: + @pytest.mark.parametrize("freq", ["D", "M", "A"]) + def test_pickle_round_trip(self, freq): + idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.nan], freq=freq) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) + + def test_pickle_freq(self): + # GH#2891 + prng = period_range("1/1/2011", "1/1/2012", freq="M") + new_prng = tm.round_trip_pickle(prng) + assert new_prng.freq == offsets.MonthEnd() + assert new_prng.freqstr == "M" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_resolution.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_resolution.py new file mode 100644 index 00000000..7ecbde75 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_resolution.py @@ -0,0 +1,23 @@ +import pytest + +import pandas as pd + + +class TestResolution: + @pytest.mark.parametrize( + "freq,expected", + [ + ("A", "year"), + ("Q", "quarter"), + ("M", "month"), + ("D", "day"), + ("H", "hour"), + ("T", "minute"), + ("S", "second"), + ("L", "millisecond"), + ("U", "microsecond"), + ], + ) + def test_resolution(self, freq, expected): + idx = pd.period_range(start="2013-04-01", periods=30, freq=freq) + assert idx.resolution == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_scalar_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_scalar_compat.py new file mode 100644 index 00000000..c9644498 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_scalar_compat.py @@ -0,0 +1,38 @@ +"""Tests for PeriodIndex behaving like a vectorized Period scalar""" + +import pytest + +from pandas import ( + Timedelta, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestPeriodIndexOps: + def test_start_time(self): + # GH#17157 + index = period_range(freq="M", start="2016-01-01", end="2016-05-31") + expected_index = date_range("2016-01-01", end="2016-05-31", freq="MS") + tm.assert_index_equal(index.start_time, expected_index) + + def test_end_time(self): + # GH#17157 + index = period_range(freq="M", start="2016-01-01", end="2016-05-31") + expected_index = date_range("2016-01-01", end="2016-05-31", freq="M") + expected_index += Timedelta(1, "D") - Timedelta(1, "ns") + tm.assert_index_equal(index.end_time, expected_index) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_end_time_business_friday(self): + # GH#34449 + pi = period_range("1990-01-05", freq="B", periods=1) + result = pi.end_time + + dti = date_range("1990-01-05", freq="D", periods=1)._with_freq(None) + expected = dti + Timedelta(days=1, nanoseconds=-1) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_searchsorted.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_searchsorted.py new file mode 100644 index 00000000..b9863d1b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_searchsorted.py @@ -0,0 +1,80 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import IncompatibleFrequency + +from pandas import ( + NaT, + Period, + PeriodIndex, +) +import pandas._testing as tm + + +class TestSearchsorted: + @pytest.mark.parametrize("freq", ["D", "2D"]) + def test_searchsorted(self, freq): + pidx = PeriodIndex( + ["2014-01-01", "2014-01-02", "2014-01-03", "2014-01-04", "2014-01-05"], + freq=freq, + ) + + p1 = Period("2014-01-01", freq=freq) + assert pidx.searchsorted(p1) == 0 + + p2 = Period("2014-01-04", freq=freq) + assert pidx.searchsorted(p2) == 3 + + assert pidx.searchsorted(NaT) == 5 + + msg = "Input has different freq=H from PeriodArray" + with pytest.raises(IncompatibleFrequency, match=msg): + pidx.searchsorted(Period("2014-01-01", freq="H")) + + msg = "Input has different freq=5D from PeriodArray" + with pytest.raises(IncompatibleFrequency, match=msg): + pidx.searchsorted(Period("2014-01-01", freq="5D")) + + def test_searchsorted_different_argument_classes(self, listlike_box): + pidx = PeriodIndex( + ["2014-01-01", "2014-01-02", "2014-01-03", "2014-01-04", "2014-01-05"], + freq="D", + ) + result = pidx.searchsorted(listlike_box(pidx)) + expected = np.arange(len(pidx), dtype=result.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = pidx._data.searchsorted(listlike_box(pidx)) + tm.assert_numpy_array_equal(result, expected) + + def test_searchsorted_invalid(self): + pidx = PeriodIndex( + ["2014-01-01", "2014-01-02", "2014-01-03", "2014-01-04", "2014-01-05"], + freq="D", + ) + + other = np.array([0, 1], dtype=np.int64) + + msg = "|".join( + [ + "searchsorted requires compatible dtype or scalar", + "value should be a 'Period', 'NaT', or array of those. Got", + ] + ) + with pytest.raises(TypeError, match=msg): + pidx.searchsorted(other) + + with pytest.raises(TypeError, match=msg): + pidx.searchsorted(other.astype("timedelta64[ns]")) + + with pytest.raises(TypeError, match=msg): + pidx.searchsorted(np.timedelta64(4)) + + with pytest.raises(TypeError, match=msg): + pidx.searchsorted(np.timedelta64("NaT", "ms")) + + with pytest.raises(TypeError, match=msg): + pidx.searchsorted(np.datetime64(4, "ns")) + + with pytest.raises(TypeError, match=msg): + pidx.searchsorted(np.datetime64("NaT", "ns")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_setops.py new file mode 100644 index 00000000..af89d712 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_setops.py @@ -0,0 +1,361 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + PeriodIndex, + date_range, + period_range, +) +import pandas._testing as tm + + +def _permute(obj): + return obj.take(np.random.default_rng(2).permutation(len(obj))) + + +class TestPeriodIndex: + def test_union(self, sort): + # union + other1 = period_range("1/1/2000", freq="D", periods=5) + rng1 = period_range("1/6/2000", freq="D", periods=5) + expected1 = PeriodIndex( + [ + "2000-01-06", + "2000-01-07", + "2000-01-08", + "2000-01-09", + "2000-01-10", + "2000-01-01", + "2000-01-02", + "2000-01-03", + "2000-01-04", + "2000-01-05", + ], + freq="D", + ) + + rng2 = period_range("1/1/2000", freq="D", periods=5) + other2 = period_range("1/4/2000", freq="D", periods=5) + expected2 = period_range("1/1/2000", freq="D", periods=8) + + rng3 = period_range("1/1/2000", freq="D", periods=5) + other3 = PeriodIndex([], freq="D") + expected3 = period_range("1/1/2000", freq="D", periods=5) + + rng4 = period_range("2000-01-01 09:00", freq="H", periods=5) + other4 = period_range("2000-01-02 09:00", freq="H", periods=5) + expected4 = PeriodIndex( + [ + "2000-01-01 09:00", + "2000-01-01 10:00", + "2000-01-01 11:00", + "2000-01-01 12:00", + "2000-01-01 13:00", + "2000-01-02 09:00", + "2000-01-02 10:00", + "2000-01-02 11:00", + "2000-01-02 12:00", + "2000-01-02 13:00", + ], + freq="H", + ) + + rng5 = PeriodIndex( + ["2000-01-01 09:01", "2000-01-01 09:03", "2000-01-01 09:05"], freq="T" + ) + other5 = PeriodIndex( + ["2000-01-01 09:01", "2000-01-01 09:05", "2000-01-01 09:08"], freq="T" + ) + expected5 = PeriodIndex( + [ + "2000-01-01 09:01", + "2000-01-01 09:03", + "2000-01-01 09:05", + "2000-01-01 09:08", + ], + freq="T", + ) + + rng6 = period_range("2000-01-01", freq="M", periods=7) + other6 = period_range("2000-04-01", freq="M", periods=7) + expected6 = period_range("2000-01-01", freq="M", periods=10) + + rng7 = period_range("2003-01-01", freq="A", periods=5) + other7 = period_range("1998-01-01", freq="A", periods=8) + expected7 = PeriodIndex( + [ + "2003", + "2004", + "2005", + "2006", + "2007", + "1998", + "1999", + "2000", + "2001", + "2002", + ], + freq="A", + ) + + rng8 = PeriodIndex( + ["1/3/2000", "1/2/2000", "1/1/2000", "1/5/2000", "1/4/2000"], freq="D" + ) + other8 = period_range("1/6/2000", freq="D", periods=5) + expected8 = PeriodIndex( + [ + "1/3/2000", + "1/2/2000", + "1/1/2000", + "1/5/2000", + "1/4/2000", + "1/6/2000", + "1/7/2000", + "1/8/2000", + "1/9/2000", + "1/10/2000", + ], + freq="D", + ) + + for rng, other, expected in [ + (rng1, other1, expected1), + (rng2, other2, expected2), + (rng3, other3, expected3), + (rng4, other4, expected4), + (rng5, other5, expected5), + (rng6, other6, expected6), + (rng7, other7, expected7), + (rng8, other8, expected8), + ]: + result_union = rng.union(other, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result_union, expected) + + def test_union_misc(self, sort): + index = period_range("1/1/2000", "1/20/2000", freq="D") + + result = index[:-5].union(index[10:], sort=sort) + tm.assert_index_equal(result, index) + + # not in order + result = _permute(index[:-5]).union(_permute(index[10:]), sort=sort) + if sort is None: + tm.assert_index_equal(result, index) + assert tm.equalContents(result, index) + + # cast if different frequencies + index = period_range("1/1/2000", "1/20/2000", freq="D") + index2 = period_range("1/1/2000", "1/20/2000", freq="W-WED") + result = index.union(index2, sort=sort) + expected = index.astype(object).union(index2.astype(object), sort=sort) + tm.assert_index_equal(result, expected) + + def test_intersection(self, sort): + index = period_range("1/1/2000", "1/20/2000", freq="D") + + result = index[:-5].intersection(index[10:], sort=sort) + tm.assert_index_equal(result, index[10:-5]) + + # not in order + left = _permute(index[:-5]) + right = _permute(index[10:]) + result = left.intersection(right, sort=sort) + if sort is None: + tm.assert_index_equal(result, index[10:-5]) + assert tm.equalContents(result, index[10:-5]) + + # cast if different frequencies + index = period_range("1/1/2000", "1/20/2000", freq="D") + index2 = period_range("1/1/2000", "1/20/2000", freq="W-WED") + + result = index.intersection(index2, sort=sort) + expected = pd.Index([], dtype=object) + tm.assert_index_equal(result, expected) + + index3 = period_range("1/1/2000", "1/20/2000", freq="2D") + result = index.intersection(index3, sort=sort) + tm.assert_index_equal(result, expected) + + def test_intersection_cases(self, sort): + base = period_range("6/1/2000", "6/30/2000", freq="D", name="idx") + + # if target has the same name, it is preserved + rng2 = period_range("5/15/2000", "6/20/2000", freq="D", name="idx") + expected2 = period_range("6/1/2000", "6/20/2000", freq="D", name="idx") + + # if target name is different, it will be reset + rng3 = period_range("5/15/2000", "6/20/2000", freq="D", name="other") + expected3 = period_range("6/1/2000", "6/20/2000", freq="D", name=None) + + rng4 = period_range("7/1/2000", "7/31/2000", freq="D", name="idx") + expected4 = PeriodIndex([], name="idx", freq="D") + + for rng, expected in [ + (rng2, expected2), + (rng3, expected3), + (rng4, expected4), + ]: + result = base.intersection(rng, sort=sort) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + # non-monotonic + base = PeriodIndex( + ["2011-01-05", "2011-01-04", "2011-01-02", "2011-01-03"], + freq="D", + name="idx", + ) + + rng2 = PeriodIndex( + ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], + freq="D", + name="idx", + ) + expected2 = PeriodIndex(["2011-01-04", "2011-01-02"], freq="D", name="idx") + + rng3 = PeriodIndex( + ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], + freq="D", + name="other", + ) + expected3 = PeriodIndex(["2011-01-04", "2011-01-02"], freq="D", name=None) + + rng4 = period_range("7/1/2000", "7/31/2000", freq="D", name="idx") + expected4 = PeriodIndex([], freq="D", name="idx") + + for rng, expected in [ + (rng2, expected2), + (rng3, expected3), + (rng4, expected4), + ]: + result = base.intersection(rng, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == "D" + + # empty same freq + rng = date_range("6/1/2000", "6/15/2000", freq="T") + result = rng[0:0].intersection(rng) + assert len(result) == 0 + + result = rng.intersection(rng[0:0]) + assert len(result) == 0 + + def test_difference(self, sort): + # diff + period_rng = ["1/3/2000", "1/2/2000", "1/1/2000", "1/5/2000", "1/4/2000"] + rng1 = PeriodIndex(period_rng, freq="D") + other1 = period_range("1/6/2000", freq="D", periods=5) + expected1 = rng1 + + rng2 = PeriodIndex(period_rng, freq="D") + other2 = period_range("1/4/2000", freq="D", periods=5) + expected2 = PeriodIndex(["1/3/2000", "1/2/2000", "1/1/2000"], freq="D") + + rng3 = PeriodIndex(period_rng, freq="D") + other3 = PeriodIndex([], freq="D") + expected3 = rng3 + + period_rng = [ + "2000-01-01 10:00", + "2000-01-01 09:00", + "2000-01-01 12:00", + "2000-01-01 11:00", + "2000-01-01 13:00", + ] + rng4 = PeriodIndex(period_rng, freq="H") + other4 = period_range("2000-01-02 09:00", freq="H", periods=5) + expected4 = rng4 + + rng5 = PeriodIndex( + ["2000-01-01 09:03", "2000-01-01 09:01", "2000-01-01 09:05"], freq="T" + ) + other5 = PeriodIndex(["2000-01-01 09:01", "2000-01-01 09:05"], freq="T") + expected5 = PeriodIndex(["2000-01-01 09:03"], freq="T") + + period_rng = [ + "2000-02-01", + "2000-01-01", + "2000-06-01", + "2000-07-01", + "2000-05-01", + "2000-03-01", + "2000-04-01", + ] + rng6 = PeriodIndex(period_rng, freq="M") + other6 = period_range("2000-04-01", freq="M", periods=7) + expected6 = PeriodIndex(["2000-02-01", "2000-01-01", "2000-03-01"], freq="M") + + period_rng = ["2003", "2007", "2006", "2005", "2004"] + rng7 = PeriodIndex(period_rng, freq="A") + other7 = period_range("1998-01-01", freq="A", periods=8) + expected7 = PeriodIndex(["2007", "2006"], freq="A") + + for rng, other, expected in [ + (rng1, other1, expected1), + (rng2, other2, expected2), + (rng3, other3, expected3), + (rng4, other4, expected4), + (rng5, other5, expected5), + (rng6, other6, expected6), + (rng7, other7, expected7), + ]: + result_difference = rng.difference(other, sort=sort) + if sort is None and len(other): + # We dont sort (yet?) when empty GH#24959 + expected = expected.sort_values() + tm.assert_index_equal(result_difference, expected) + + def test_difference_freq(self, sort): + # GH14323: difference of Period MUST preserve frequency + # but the ability to union results must be preserved + + index = period_range("20160920", "20160925", freq="D") + + other = period_range("20160921", "20160924", freq="D") + expected = PeriodIndex(["20160920", "20160925"], freq="D") + idx_diff = index.difference(other, sort) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = period_range("20160922", "20160925", freq="D") + idx_diff = index.difference(other, sort) + expected = PeriodIndex(["20160920", "20160921"], freq="D") + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + def test_intersection_equal_duplicates(self): + # GH#38302 + idx = period_range("2011-01-01", periods=2) + idx_dup = idx.append(idx) + result = idx_dup.intersection(idx_dup) + tm.assert_index_equal(result, idx) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_union_duplicates(self): + # GH#36289 + idx = period_range("2011-01-01", periods=2) + idx_dup = idx.append(idx) + + idx2 = period_range("2011-01-02", periods=2) + idx2_dup = idx2.append(idx2) + result = idx_dup.union(idx2_dup) + + expected = PeriodIndex( + [ + "2011-01-01", + "2011-01-01", + "2011-01-02", + "2011-01-02", + "2011-01-03", + "2011-01-03", + ], + freq="D", + ) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_tools.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_tools.py new file mode 100644 index 00000000..13509bd5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/period/test_tools.py @@ -0,0 +1,52 @@ +import numpy as np +import pytest + +from pandas import ( + Period, + PeriodIndex, + period_range, +) +import pandas._testing as tm + + +class TestPeriodRepresentation: + """ + Wish to match NumPy units + """ + + @pytest.mark.parametrize( + "freq, base_date", + [ + ("W-THU", "1970-01-01"), + ("D", "1970-01-01"), + ("B", "1970-01-01"), + ("H", "1970-01-01"), + ("T", "1970-01-01"), + ("S", "1970-01-01"), + ("L", "1970-01-01"), + ("U", "1970-01-01"), + ("N", "1970-01-01"), + ("M", "1970-01"), + ("A", 1970), + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_freq(self, freq, base_date): + rng = period_range(start=base_date, periods=10, freq=freq) + exp = np.arange(10, dtype=np.int64) + + tm.assert_numpy_array_equal(rng.asi8, exp) + + +class TestPeriodIndexConversion: + def test_tolist(self): + index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + rs = index.tolist() + for x in rs: + assert isinstance(x, Period) + + recon = PeriodIndex(rs) + tm.assert_index_equal(index, recon) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_constructors.py new file mode 100644 index 00000000..5e6f1607 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_constructors.py @@ -0,0 +1,164 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + Index, + RangeIndex, + Series, +) +import pandas._testing as tm + + +class TestRangeIndexConstructors: + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize( + "args, kwargs, start, stop, step", + [ + ((5,), {}, 0, 5, 1), + ((1, 5), {}, 1, 5, 1), + ((1, 5, 2), {}, 1, 5, 2), + ((0,), {}, 0, 0, 1), + ((0, 0), {}, 0, 0, 1), + ((), {"start": 0}, 0, 0, 1), + ((), {"stop": 0}, 0, 0, 1), + ], + ) + def test_constructor(self, args, kwargs, start, stop, step, name): + result = RangeIndex(*args, name=name, **kwargs) + expected = Index(np.arange(start, stop, step, dtype=np.int64), name=name) + assert isinstance(result, RangeIndex) + assert result.name is name + assert result._range == range(start, stop, step) + tm.assert_index_equal(result, expected, exact="equiv") + + def test_constructor_invalid_args(self): + msg = "RangeIndex\\(\\.\\.\\.\\) must be called with integers" + with pytest.raises(TypeError, match=msg): + RangeIndex() + + with pytest.raises(TypeError, match=msg): + RangeIndex(name="Foo") + + # we don't allow on a bare Index + msg = ( + r"Index\(\.\.\.\) must be called with a collection of some " + r"kind, 0 was passed" + ) + with pytest.raises(TypeError, match=msg): + Index(0) + + @pytest.mark.parametrize( + "args", + [ + Index(["a", "b"]), + Series(["a", "b"]), + np.array(["a", "b"]), + [], + np.arange(0, 10), + np.array([1]), + [1], + ], + ) + def test_constructor_additional_invalid_args(self, args): + msg = f"Value needs to be a scalar value, was type {type(args).__name__}" + with pytest.raises(TypeError, match=msg): + RangeIndex(args) + + @pytest.mark.parametrize("args", ["foo", datetime(2000, 1, 1, 0, 0)]) + def test_constructor_invalid_args_wrong_type(self, args): + msg = f"Wrong type {type(args)} for value {args}" + with pytest.raises(TypeError, match=msg): + RangeIndex(args) + + def test_constructor_same(self): + # pass thru w and w/o copy + index = RangeIndex(1, 5, 2) + result = RangeIndex(index, copy=False) + assert result.identical(index) + + result = RangeIndex(index, copy=True) + tm.assert_index_equal(result, index, exact=True) + + result = RangeIndex(index) + tm.assert_index_equal(result, index, exact=True) + + with pytest.raises( + ValueError, + match="Incorrect `dtype` passed: expected signed integer, received float64", + ): + RangeIndex(index, dtype="float64") + + def test_constructor_range_object(self): + result = RangeIndex(range(1, 5, 2)) + expected = RangeIndex(1, 5, 2) + tm.assert_index_equal(result, expected, exact=True) + + def test_constructor_range(self): + result = RangeIndex.from_range(range(1, 5, 2)) + expected = RangeIndex(1, 5, 2) + tm.assert_index_equal(result, expected, exact=True) + + result = RangeIndex.from_range(range(5, 6)) + expected = RangeIndex(5, 6, 1) + tm.assert_index_equal(result, expected, exact=True) + + # an invalid range + result = RangeIndex.from_range(range(5, 1)) + expected = RangeIndex(0, 0, 1) + tm.assert_index_equal(result, expected, exact=True) + + result = RangeIndex.from_range(range(5)) + expected = RangeIndex(0, 5, 1) + tm.assert_index_equal(result, expected, exact=True) + + result = Index(range(1, 5, 2)) + expected = RangeIndex(1, 5, 2) + tm.assert_index_equal(result, expected, exact=True) + + msg = ( + r"(RangeIndex.)?from_range\(\) got an unexpected keyword argument( 'copy')?" + ) + with pytest.raises(TypeError, match=msg): + RangeIndex.from_range(range(10), copy=True) + + def test_constructor_name(self): + # GH#12288 + orig = RangeIndex(10) + orig.name = "original" + + copy = RangeIndex(orig) + copy.name = "copy" + + assert orig.name == "original" + assert copy.name == "copy" + + new = Index(copy) + assert new.name == "copy" + + new.name = "new" + assert orig.name == "original" + assert copy.name == "copy" + assert new.name == "new" + + def test_constructor_corner(self): + arr = np.array([1, 2, 3, 4], dtype=object) + index = RangeIndex(1, 5) + assert index.values.dtype == np.int64 + expected = Index(arr).astype("int64") + + tm.assert_index_equal(index, expected, exact="equiv") + + # non-int raise Exception + with pytest.raises(TypeError, match=r"Wrong type \"): + RangeIndex("1", "10", "1") + with pytest.raises(TypeError, match=r"Wrong type \"): + RangeIndex(1.1, 10.2, 1.3) + + # invalid passed type + with pytest.raises( + ValueError, + match="Incorrect `dtype` passed: expected signed integer, received float64", + ): + RangeIndex(1, 5, dtype="float64") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_indexing.py new file mode 100644 index 00000000..6202074a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_indexing.py @@ -0,0 +1,137 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + RangeIndex, +) +import pandas._testing as tm + + +class TestGetIndexer: + def test_get_indexer(self): + index = RangeIndex(start=0, stop=20, step=2) + target = RangeIndex(10) + indexer = index.get_indexer(target) + expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + def test_get_indexer_pad(self): + index = RangeIndex(start=0, stop=20, step=2) + target = RangeIndex(10) + indexer = index.get_indexer(target, method="pad") + expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + def test_get_indexer_backfill(self): + index = RangeIndex(start=0, stop=20, step=2) + target = RangeIndex(10) + indexer = index.get_indexer(target, method="backfill") + expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + def test_get_indexer_limit(self): + # GH#28631 + idx = RangeIndex(4) + target = RangeIndex(6) + result = idx.get_indexer(target, method="pad", limit=1) + expected = np.array([0, 1, 2, 3, 3, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("stop", [0, -1, -2]) + def test_get_indexer_decreasing(self, stop): + # GH#28678 + index = RangeIndex(7, stop, -3) + result = index.get_indexer(range(9)) + expected = np.array([-1, 2, -1, -1, 1, -1, -1, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + +class TestTake: + def test_take_preserve_name(self): + index = RangeIndex(1, 5, name="foo") + taken = index.take([3, 0, 1]) + assert index.name == taken.name + + def test_take_fill_value(self): + # GH#12631 + idx = RangeIndex(1, 4, name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = Index([2, 1, 3], dtype=np.int64, name="xxx") + tm.assert_index_equal(result, expected) + + # fill_value + msg = "Unable to fill values because RangeIndex cannot contain NA" + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -1]), fill_value=True) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = Index([2, 1, 3], dtype=np.int64, name="xxx") + tm.assert_index_equal(result, expected) + + msg = "Unable to fill values because RangeIndex cannot contain NA" + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + def test_take_raises_index_error(self): + idx = RangeIndex(1, 4, name="xxx") + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + msg = "index -4 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -4])) + + # no errors + result = idx.take(np.array([1, -3])) + expected = Index([2, 1], dtype=np.int64, name="xxx") + tm.assert_index_equal(result, expected) + + def test_take_accepts_empty_array(self): + idx = RangeIndex(1, 4, name="foo") + result = idx.take(np.array([])) + expected = Index([], dtype=np.int64, name="foo") + tm.assert_index_equal(result, expected) + + # empty index + idx = RangeIndex(0, name="foo") + result = idx.take(np.array([])) + expected = Index([], dtype=np.int64, name="foo") + tm.assert_index_equal(result, expected) + + def test_take_accepts_non_int64_array(self): + idx = RangeIndex(1, 4, name="foo") + result = idx.take(np.array([2, 1], dtype=np.uint32)) + expected = Index([3, 2], dtype=np.int64, name="foo") + tm.assert_index_equal(result, expected) + + def test_take_when_index_has_step(self): + idx = RangeIndex(1, 11, 3, name="foo") # [1, 4, 7, 10] + result = idx.take(np.array([1, 0, -1, -4])) + expected = Index([4, 1, 10, 1], dtype=np.int64, name="foo") + tm.assert_index_equal(result, expected) + + def test_take_when_index_has_negative_step(self): + idx = RangeIndex(11, -4, -2, name="foo") # [11, 9, 7, 5, 3, 1, -1, -3] + result = idx.take(np.array([1, 0, -1, -8])) + expected = Index([9, 11, -3, 11], dtype=np.int64, name="foo") + tm.assert_index_equal(result, expected) + + +class TestWhere: + def test_where_putmask_range_cast(self): + # GH#43240 + idx = RangeIndex(0, 5, name="test") + + mask = np.array([True, True, False, False, False]) + result = idx.putmask(mask, 10) + expected = Index([10, 10, 2, 3, 4], dtype=np.int64, name="test") + tm.assert_index_equal(result, expected) + + result = idx.where(~mask, 10) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_join.py new file mode 100644 index 00000000..682b5c8d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_join.py @@ -0,0 +1,177 @@ +import numpy as np + +from pandas import ( + Index, + RangeIndex, +) +import pandas._testing as tm + + +class TestJoin: + def test_join_outer(self): + # join with Index[int64] + index = RangeIndex(start=0, stop=20, step=2) + other = Index(np.arange(25, 14, -1, dtype=np.int64)) + + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = Index( + [0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ) + elidx = np.array( + [0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9, -1, -1, -1, -1, -1, -1, -1], + dtype=np.intp, + ) + eridx = np.array( + [-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0], + dtype=np.intp, + ) + + assert isinstance(res, Index) and res.dtype == np.dtype(np.int64) + assert not isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres, exact=True) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # join with RangeIndex + other = RangeIndex(25, 14, -1) + + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + assert isinstance(res, Index) and res.dtype == np.int64 + assert not isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_inner(self): + # Join with non-RangeIndex + index = RangeIndex(start=0, stop=20, step=2) + other = Index(np.arange(25, 14, -1, dtype=np.int64)) + + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = Index([16, 18]) + elidx = np.array([8, 9], dtype=np.intp) + eridx = np.array([9, 7], dtype=np.intp) + + assert isinstance(res, Index) and res.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # Join two RangeIndex + other = RangeIndex(25, 14, -1) + + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) + + assert isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres, exact="equiv") + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self): + # Join with Index[int64] + index = RangeIndex(start=0, stop=20, step=2) + other = Index(np.arange(25, 14, -1, dtype=np.int64)) + + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + eres = index + eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp) + + assert isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # Join withRangeIndex + other = Index(np.arange(25, 14, -1, dtype=np.int64)) + + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + + assert isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self): + # Join with Index[int64] + index = RangeIndex(start=0, stop=20, step=2) + other = Index(np.arange(25, 14, -1, dtype=np.int64)) + + res, lidx, ridx = index.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp) + + assert isinstance(other, Index) and other.dtype == np.int64 + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # Join withRangeIndex + other = RangeIndex(25, 14, -1) + + res, lidx, ridx = index.join(other, how="right", return_indexers=True) + eres = other + + assert isinstance(other, RangeIndex) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + def test_join_non_int_index(self): + index = RangeIndex(start=0, stop=20, step=2) + other = Index([3, 6, 7, 8, 10], dtype=object) + + outer = index.join(other, how="outer") + outer2 = other.join(index, how="outer") + expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index.join(other, how="inner") + inner2 = other.join(index, how="inner") + expected = Index([6, 8, 10]) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index.join(other, how="left") + tm.assert_index_equal(left, index.astype(object)) + + left2 = other.join(index, how="left") + tm.assert_index_equal(left2, other) + + right = index.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index, how="right") + tm.assert_index_equal(right2, index.astype(object)) + + def test_join_non_unique(self): + index = RangeIndex(start=0, stop=20, step=2) + other = Index([4, 4, 3, 3]) + + res, lidx, ridx = index.join(other, return_indexers=True) + + eres = Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18]) + elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp) + eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_self(self, join_type): + index = RangeIndex(start=0, stop=20, step=2) + joined = index.join(index, how=join_type) + assert index is joined diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_range.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_range.py new file mode 100644 index 00000000..5f137df2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_range.py @@ -0,0 +1,609 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.common import ensure_platform_int + +import pandas as pd +from pandas import ( + Index, + RangeIndex, +) +import pandas._testing as tm + +# aliases to make some tests easier to read +RI = RangeIndex + + +class TestRangeIndex: + @pytest.fixture + def simple_index(self): + return RangeIndex(start=0, stop=20, step=2) + + def test_constructor_unwraps_index(self): + result = RangeIndex(1, 3) + expected = np.array([1, 2], dtype=np.int64) + tm.assert_numpy_array_equal(result._data, expected) + + def test_can_hold_identifiers(self, simple_index): + idx = simple_index + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is False + + def test_too_many_names(self, simple_index): + index = simple_index + with pytest.raises(ValueError, match="^Length"): + index.names = ["roger", "harold"] + + @pytest.mark.parametrize( + "index, start, stop, step", + [ + (RangeIndex(5), 0, 5, 1), + (RangeIndex(0, 5), 0, 5, 1), + (RangeIndex(5, step=2), 0, 5, 2), + (RangeIndex(1, 5, 2), 1, 5, 2), + ], + ) + def test_start_stop_step_attrs(self, index, start, stop, step): + # GH 25710 + assert index.start == start + assert index.stop == stop + assert index.step == step + + def test_copy(self): + i = RangeIndex(5, name="Foo") + i_copy = i.copy() + assert i_copy is not i + assert i_copy.identical(i) + assert i_copy._range == range(0, 5, 1) + assert i_copy.name == "Foo" + + def test_repr(self): + i = RangeIndex(5, name="Foo") + result = repr(i) + expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')" + assert result == expected + + result = eval(result) + tm.assert_index_equal(result, i, exact=True) + + i = RangeIndex(5, 0, -1) + result = repr(i) + expected = "RangeIndex(start=5, stop=0, step=-1)" + assert result == expected + + result = eval(result) + tm.assert_index_equal(result, i, exact=True) + + def test_insert(self): + idx = RangeIndex(5, name="Foo") + result = idx[1:4] + + # test 0th element + tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]), exact="equiv") + + # GH 18295 (test missing) + expected = Index([0, np.nan, 1, 2, 3, 4], dtype=np.float64) + for na in [np.nan, None, pd.NA]: + result = RangeIndex(5).insert(1, na) + tm.assert_index_equal(result, expected) + + result = RangeIndex(5).insert(1, pd.NaT) + expected = Index([0, pd.NaT, 1, 2, 3, 4], dtype=object) + tm.assert_index_equal(result, expected) + + def test_insert_edges_preserves_rangeindex(self): + idx = Index(range(4, 9, 2)) + + result = idx.insert(0, 2) + expected = Index(range(2, 9, 2)) + tm.assert_index_equal(result, expected, exact=True) + + result = idx.insert(3, 10) + expected = Index(range(4, 11, 2)) + tm.assert_index_equal(result, expected, exact=True) + + def test_insert_middle_preserves_rangeindex(self): + # insert in the middle + idx = Index(range(0, 3, 2)) + result = idx.insert(1, 1) + expected = Index(range(3)) + tm.assert_index_equal(result, expected, exact=True) + + idx = idx * 2 + result = idx.insert(1, 2) + expected = expected * 2 + tm.assert_index_equal(result, expected, exact=True) + + def test_delete(self): + idx = RangeIndex(5, name="Foo") + expected = idx[1:] + result = idx.delete(0) + tm.assert_index_equal(result, expected, exact=True) + assert result.name == expected.name + + expected = idx[:-1] + result = idx.delete(-1) + tm.assert_index_equal(result, expected, exact=True) + assert result.name == expected.name + + msg = "index 5 is out of bounds for axis 0 with size 5" + with pytest.raises((IndexError, ValueError), match=msg): + # either depending on numpy version + result = idx.delete(len(idx)) + + def test_delete_preserves_rangeindex(self): + idx = Index(range(2), name="foo") + + result = idx.delete([1]) + expected = Index(range(1), name="foo") + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(1) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_preserves_rangeindex_middle(self): + idx = Index(range(3), name="foo") + result = idx.delete(1) + expected = idx[::2] + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(-2) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_preserves_rangeindex_list_at_end(self): + idx = RangeIndex(0, 6, 1) + + loc = [2, 3, 4, 5] + result = idx.delete(loc) + expected = idx[:2] + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_preserves_rangeindex_list_middle(self): + idx = RangeIndex(0, 6, 1) + + loc = [1, 2, 3, 4] + result = idx.delete(loc) + expected = RangeIndex(0, 6, 5) + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_all_preserves_rangeindex(self): + idx = RangeIndex(0, 6, 1) + + loc = [0, 1, 2, 3, 4, 5] + result = idx.delete(loc) + expected = idx[:0] + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact=True) + + def test_delete_not_preserving_rangeindex(self): + idx = RangeIndex(0, 6, 1) + + loc = [0, 3, 5] + result = idx.delete(loc) + expected = Index([1, 2, 4]) + tm.assert_index_equal(result, expected, exact=True) + + result = idx.delete(loc[::-1]) + tm.assert_index_equal(result, expected, exact=True) + + def test_view(self): + i = RangeIndex(0, name="Foo") + i_view = i.view() + assert i_view.name == "Foo" + + i_view = i.view("i8") + tm.assert_numpy_array_equal(i.values, i_view) + + i_view = i.view(RangeIndex) + tm.assert_index_equal(i, i_view) + + def test_dtype(self, simple_index): + index = simple_index + assert index.dtype == np.int64 + + def test_cache(self): + # GH 26565, GH26617, GH35432, GH53387 + # This test checks whether _cache has been set. + # Calling RangeIndex._cache["_data"] creates an int64 array of the same length + # as the RangeIndex and stores it in _cache. + idx = RangeIndex(0, 100, 10) + + assert idx._cache == {} + + repr(idx) + assert idx._cache == {} + + str(idx) + assert idx._cache == {} + + idx.get_loc(20) + assert idx._cache == {} + + 90 in idx # True + assert idx._cache == {} + + 91 in idx # False + assert idx._cache == {} + + idx.all() + assert idx._cache == {} + + idx.any() + assert idx._cache == {} + + for _ in idx: + pass + assert idx._cache == {} + + idx.format() + assert idx._cache == {} + + df = pd.DataFrame({"a": range(10)}, index=idx) + + str(df) + assert idx._cache == {} + + df.loc[50] + assert idx._cache == {} + + with pytest.raises(KeyError, match="51"): + df.loc[51] + assert idx._cache == {} + + df.loc[10:50] + assert idx._cache == {} + + df.iloc[5:10] + assert idx._cache == {} + + # after calling take, _cache may contain other keys, but not "_data" + idx.take([3, 0, 1]) + assert "_data" not in idx._cache + + df.loc[[50]] + assert "_data" not in idx._cache + + df.iloc[[5, 6, 7, 8, 9]] + assert "_data" not in idx._cache + + # idx._cache should contain a _data entry after call to idx._data + idx._data + assert isinstance(idx._data, np.ndarray) + assert idx._data is idx._data # check cached value is reused + assert "_data" in idx._cache + expected = np.arange(0, 100, 10, dtype="int64") + tm.assert_numpy_array_equal(idx._cache["_data"], expected) + + def test_is_monotonic(self): + index = RangeIndex(0, 20, 2) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is False + + index = RangeIndex(4, 0, -1) + assert index.is_monotonic_increasing is False + assert index._is_strictly_monotonic_increasing is False + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_decreasing is True + + index = RangeIndex(1, 2) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is True + + index = RangeIndex(2, 1) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is True + + index = RangeIndex(1, 1) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is True + + @pytest.mark.parametrize( + "left,right", + [ + (RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)), + (RangeIndex(0), RangeIndex(1, -1, 3)), + (RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)), + (RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)), + ], + ) + def test_equals_range(self, left, right): + assert left.equals(right) + assert right.equals(left) + + def test_logical_compat(self, simple_index): + idx = simple_index + assert idx.all() == idx.values.all() + assert idx.any() == idx.values.any() + + def test_identical(self, simple_index): + index = simple_index + i = Index(index.copy()) + assert i.identical(index) + + # we don't allow object dtype for RangeIndex + if isinstance(index, RangeIndex): + return + + same_values_different_type = Index(i, dtype=object) + assert not i.identical(same_values_different_type) + + i = index.copy(dtype=object) + i = i.rename("foo") + same_values = Index(i, dtype=object) + assert same_values.identical(index.copy(dtype=object)) + + assert not i.identical(index) + assert Index(same_values, name="foo", dtype=object).identical(i) + + assert not index.copy(dtype=object).identical(index.copy(dtype="int64")) + + def test_nbytes(self): + # memory savings vs int index + idx = RangeIndex(0, 1000) + assert idx.nbytes < Index(idx._values).nbytes / 10 + + # constant memory usage + i2 = RangeIndex(0, 10) + assert idx.nbytes == i2.nbytes + + @pytest.mark.parametrize( + "start,stop,step", + [ + # can't + ("foo", "bar", "baz"), + # shouldn't + ("0", "1", "2"), + ], + ) + def test_cant_or_shouldnt_cast(self, start, stop, step): + msg = f"Wrong type {type(start)} for value {start}" + with pytest.raises(TypeError, match=msg): + RangeIndex(start, stop, step) + + def test_view_index(self, simple_index): + index = simple_index + index.view(Index) + + def test_prevent_casting(self, simple_index): + index = simple_index + result = index.astype("O") + assert result.dtype == np.object_ + + def test_repr_roundtrip(self, simple_index): + index = simple_index + tm.assert_index_equal(eval(repr(index)), index) + + def test_slice_keep_name(self): + idx = RangeIndex(1, 2, name="asdf") + assert idx.name == idx[1:].name + + @pytest.mark.parametrize( + "index", + [ + RangeIndex(start=0, stop=20, step=2, name="foo"), + RangeIndex(start=18, stop=-1, step=-2, name="bar"), + ], + ids=["index_inc", "index_dec"], + ) + def test_has_duplicates(self, index): + assert index.is_unique + assert not index.has_duplicates + + def test_extended_gcd(self, simple_index): + index = simple_index + result = index._extended_gcd(6, 10) + assert result[0] == result[1] * 6 + result[2] * 10 + assert 2 == result[0] + + result = index._extended_gcd(10, 6) + assert 2 == result[1] * 10 + result[2] * 6 + assert 2 == result[0] + + def test_min_fitting_element(self): + result = RangeIndex(0, 20, 2)._min_fitting_element(1) + assert 2 == result + + result = RangeIndex(1, 6)._min_fitting_element(1) + assert 1 == result + + result = RangeIndex(18, -2, -2)._min_fitting_element(1) + assert 2 == result + + result = RangeIndex(5, 0, -1)._min_fitting_element(1) + assert 1 == result + + big_num = 500000000000000000000000 + + result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num) + assert big_num == result + + def test_slice_specialised(self, simple_index): + index = simple_index + index.name = "foo" + + # scalar indexing + res = index[1] + expected = 2 + assert res == expected + + res = index[-1] + expected = 18 + assert res == expected + + # slicing + # slice value completion + index_slice = index[:] + expected = index + tm.assert_index_equal(index_slice, expected) + + # positive slice values + index_slice = index[7:10:2] + expected = Index([14, 18], name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + # negative slice values + index_slice = index[-1:-5:-2] + expected = Index([18, 14], name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + # stop overshoot + index_slice = index[2:100:4] + expected = Index([4, 12], name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + # reverse + index_slice = index[::-1] + expected = Index(index.values[::-1], name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + index_slice = index[-8::-1] + expected = Index([4, 2, 0], name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + index_slice = index[-40::-1] + expected = Index(np.array([], dtype=np.int64), name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + index_slice = index[40::-1] + expected = Index(index.values[40::-1], name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + index_slice = index[10::-1] + expected = Index(index.values[::-1], name="foo") + tm.assert_index_equal(index_slice, expected, exact="equiv") + + @pytest.mark.parametrize("step", set(range(-5, 6)) - {0}) + def test_len_specialised(self, step): + # make sure that our len is the same as np.arange calc + start, stop = (0, 5) if step > 0 else (5, 0) + + arr = np.arange(start, stop, step) + index = RangeIndex(start, stop, step) + assert len(index) == len(arr) + + index = RangeIndex(stop, start, step) + assert len(index) == 0 + + @pytest.mark.parametrize( + "indices, expected", + [ + ([RI(1, 12, 5)], RI(1, 12, 5)), + ([RI(0, 6, 4)], RI(0, 6, 4)), + ([RI(1, 3), RI(3, 7)], RI(1, 7)), + ([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)), + ([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)), + ([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)), + ([RI(-4, -8), RI(-8, -12)], RI(0, 0)), + ([RI(-4, -8), RI(3, -4)], RI(0, 0)), + ([RI(-4, -8), RI(3, 5)], RI(3, 5)), + ([RI(-4, -2), RI(3, 5)], Index([-4, -3, 3, 4])), + ([RI(-2), RI(3, 5)], RI(3, 5)), + ([RI(2), RI(2)], Index([0, 1, 0, 1])), + ([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)), + ([RI(2), RI(3, 5), RI(5, 8, 4)], Index([0, 1, 3, 4, 5])), + ([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)), + ([RI(3), Index([-1, 3, 15])], Index([0, 1, 2, -1, 3, 15])), + ([RI(3), Index([-1, 3.1, 15.0])], Index([0, 1, 2, -1, 3.1, 15.0])), + ([RI(3), Index(["a", None, 14])], Index([0, 1, 2, "a", None, 14])), + ([RI(3, 1), Index(["a", None, 14])], Index(["a", None, 14])), + ], + ) + def test_append(self, indices, expected): + # GH16212 + result = indices[0].append(indices[1:]) + tm.assert_index_equal(result, expected, exact=True) + + if len(indices) == 2: + # Append single item rather than list + result2 = indices[0].append(indices[1]) + tm.assert_index_equal(result2, expected, exact=True) + + def test_engineless_lookup(self): + # GH 16685 + # Standard lookup on RangeIndex should not require the engine to be + # created + idx = RangeIndex(2, 10, 3) + + assert idx.get_loc(5) == 1 + tm.assert_numpy_array_equal( + idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2])) + ) + with pytest.raises(KeyError, match="3"): + idx.get_loc(3) + + assert "_engine" not in idx._cache + + # Different types of scalars can be excluded immediately, no need to + # use the _engine + with pytest.raises(KeyError, match="'a'"): + idx.get_loc("a") + + assert "_engine" not in idx._cache + + def test_format_empty(self): + # GH35712 + empty_idx = RangeIndex(0) + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] + + @pytest.mark.parametrize( + "RI", + [ + RangeIndex(0, -1, -1), + RangeIndex(0, 1, 1), + RangeIndex(1, 3, 2), + RangeIndex(0, -1, -2), + RangeIndex(-3, -5, -2), + ], + ) + def test_append_len_one(self, RI): + # GH39401 + result = RI.append([]) + tm.assert_index_equal(result, RI, exact=True) + + @pytest.mark.parametrize("base", [RangeIndex(0, 2), Index([0, 1])]) + def test_isin_range(self, base): + # GH#41151 + values = RangeIndex(0, 1) + result = base.isin(values) + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_sort_values_key(self): + # GH#43666, GH#52764 + sort_order = {8: 2, 6: 0, 4: 8, 2: 10, 0: 12} + values = RangeIndex(0, 10, 2) + result = values.sort_values(key=lambda x: x.map(sort_order)) + expected = Index([6, 8, 4, 2, 0], dtype="int64") + tm.assert_index_equal(result, expected, check_exact=True) + + # check this matches the Series.sort_values behavior + ser = values.to_series() + result2 = ser.sort_values(key=lambda x: x.map(sort_order)) + tm.assert_series_equal(result2, expected.to_series(), check_exact=True) + + def test_range_index_rsub_by_const(self): + # GH#53255 + result = 3 - RangeIndex(0, 4, 1) + expected = RangeIndex(3, -1, -1) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_setops.py new file mode 100644 index 00000000..d417b8b7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_setops.py @@ -0,0 +1,493 @@ +from datetime import ( + datetime, + timedelta, +) + +from hypothesis import ( + assume, + given, + strategies as st, +) +import numpy as np +import pytest + +from pandas import ( + Index, + RangeIndex, +) +import pandas._testing as tm + + +class TestRangeIndexSetOps: + @pytest.mark.parametrize("dtype", [None, "int64", "uint64"]) + def test_intersection_mismatched_dtype(self, dtype): + # check that we cast to float, not object + index = RangeIndex(start=0, stop=20, step=2, name="foo") + index = Index(index, dtype=dtype) + + flt = index.astype(np.float64) + + # bc index.equals(flt), we go through fastpath and get RangeIndex back + result = index.intersection(flt) + tm.assert_index_equal(result, index, exact=True) + + result = flt.intersection(index) + tm.assert_index_equal(result, flt, exact=True) + + # neither empty, not-equals + result = index.intersection(flt[1:]) + tm.assert_index_equal(result, flt[1:], exact=True) + + result = flt[1:].intersection(index) + tm.assert_index_equal(result, flt[1:], exact=True) + + # empty other + result = index.intersection(flt[:0]) + tm.assert_index_equal(result, flt[:0], exact=True) + + result = flt[:0].intersection(index) + tm.assert_index_equal(result, flt[:0], exact=True) + + def test_intersection_empty(self, sort, names): + # name retention on empty intersections + index = RangeIndex(start=0, stop=20, step=2, name=names[0]) + + # empty other + result = index.intersection(index[:0].rename(names[1]), sort=sort) + tm.assert_index_equal(result, index[:0].rename(names[2]), exact=True) + + # empty self + result = index[:0].intersection(index.rename(names[1]), sort=sort) + tm.assert_index_equal(result, index[:0].rename(names[2]), exact=True) + + def test_intersection(self, sort): + # intersect with Index with dtype int64 + index = RangeIndex(start=0, stop=20, step=2) + other = Index(np.arange(1, 6)) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected) + + result = other.intersection(index, sort=sort) + expected = Index( + np.sort(np.asarray(np.intersect1d(index.values, other.values))) + ) + tm.assert_index_equal(result, expected) + + # intersect with increasing RangeIndex + other = RangeIndex(1, 6) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected, exact="equiv") + + # intersect with decreasing RangeIndex + other = RangeIndex(5, 0, -1) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) + tm.assert_index_equal(result, expected, exact="equiv") + + # reversed (GH 17296) + result = other.intersection(index, sort=sort) + tm.assert_index_equal(result, expected, exact="equiv") + + # GH 17296: intersect two decreasing RangeIndexes + first = RangeIndex(10, -2, -2) + other = RangeIndex(5, -4, -1) + expected = first.astype(int).intersection(other.astype(int), sort=sort) + result = first.intersection(other, sort=sort).astype(int) + tm.assert_index_equal(result, expected) + + # reversed + result = other.intersection(first, sort=sort).astype(int) + tm.assert_index_equal(result, expected) + + index = RangeIndex(5, name="foo") + + # intersect of non-overlapping indices + other = RangeIndex(5, 10, 1, name="foo") + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1, name="foo") + tm.assert_index_equal(result, expected) + + other = RangeIndex(-1, -5, -1) + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1) + tm.assert_index_equal(result, expected) + + # intersection of empty indices + other = RangeIndex(0, 0, 1) + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1) + tm.assert_index_equal(result, expected) + + result = other.intersection(index, sort=sort) + tm.assert_index_equal(result, expected) + + def test_intersection_non_overlapping_gcd(self, sort, names): + # intersection of non-overlapping values based on start value and gcd + index = RangeIndex(1, 10, 2, name=names[0]) + other = RangeIndex(0, 10, 4, name=names[1]) + result = index.intersection(other, sort=sort) + expected = RangeIndex(0, 0, 1, name=names[2]) + tm.assert_index_equal(result, expected) + + def test_union_noncomparable(self, sort): + # corner case, Index with non-int64 dtype + index = RangeIndex(start=0, stop=20, step=2) + other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) + result = index.union(other, sort=sort) + expected = Index(np.concatenate((index, other))) + tm.assert_index_equal(result, expected) + + result = other.union(index, sort=sort) + expected = Index(np.concatenate((other, index))) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "idx1, idx2, expected_sorted, expected_notsorted", + [ + ( + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + ), + ( + RangeIndex(0, 10, 1), + RangeIndex(5, 20, 1), + RangeIndex(0, 20, 1), + RangeIndex(0, 20, 1), + ), + ( + RangeIndex(0, 10, 1), + RangeIndex(10, 20, 1), + RangeIndex(0, 20, 1), + RangeIndex(0, 20, 1), + ), + ( + RangeIndex(0, -10, -1), + RangeIndex(0, -10, -1), + RangeIndex(0, -10, -1), + RangeIndex(0, -10, -1), + ), + ( + RangeIndex(0, -10, -1), + RangeIndex(-10, -20, -1), + RangeIndex(-19, 1, 1), + RangeIndex(0, -20, -1), + ), + ( + RangeIndex(0, 10, 2), + RangeIndex(1, 10, 2), + RangeIndex(0, 10, 1), + Index(list(range(0, 10, 2)) + list(range(1, 10, 2))), + ), + ( + RangeIndex(0, 11, 2), + RangeIndex(1, 12, 2), + RangeIndex(0, 12, 1), + Index(list(range(0, 11, 2)) + list(range(1, 12, 2))), + ), + ( + RangeIndex(0, 21, 4), + RangeIndex(-2, 24, 4), + RangeIndex(-2, 24, 2), + Index(list(range(0, 21, 4)) + list(range(-2, 24, 4))), + ), + ( + RangeIndex(0, -20, -2), + RangeIndex(-1, -21, -2), + RangeIndex(-19, 1, 1), + Index(list(range(0, -20, -2)) + list(range(-1, -21, -2))), + ), + ( + RangeIndex(0, 100, 5), + RangeIndex(0, 100, 20), + RangeIndex(0, 100, 5), + RangeIndex(0, 100, 5), + ), + ( + RangeIndex(0, -100, -5), + RangeIndex(5, -100, -20), + RangeIndex(-95, 10, 5), + Index(list(range(0, -100, -5)) + [5]), + ), + ( + RangeIndex(0, -11, -1), + RangeIndex(1, -12, -4), + RangeIndex(-11, 2, 1), + Index(list(range(0, -11, -1)) + [1, -11]), + ), + (RangeIndex(0), RangeIndex(0), RangeIndex(0), RangeIndex(0)), + ( + RangeIndex(0, -10, -2), + RangeIndex(0), + RangeIndex(0, -10, -2), + RangeIndex(0, -10, -2), + ), + ( + RangeIndex(0, 100, 2), + RangeIndex(100, 150, 200), + RangeIndex(0, 102, 2), + RangeIndex(0, 102, 2), + ), + ( + RangeIndex(0, -100, -2), + RangeIndex(-100, 50, 102), + RangeIndex(-100, 4, 2), + Index(list(range(0, -100, -2)) + [-100, 2]), + ), + ( + RangeIndex(0, -100, -1), + RangeIndex(0, -50, -3), + RangeIndex(-99, 1, 1), + RangeIndex(0, -100, -1), + ), + ( + RangeIndex(0, 1, 1), + RangeIndex(5, 6, 10), + RangeIndex(0, 6, 5), + RangeIndex(0, 10, 5), + ), + ( + RangeIndex(0, 10, 5), + RangeIndex(-5, -6, -20), + RangeIndex(-5, 10, 5), + Index([0, 5, -5]), + ), + ( + RangeIndex(0, 3, 1), + RangeIndex(4, 5, 1), + Index([0, 1, 2, 4]), + Index([0, 1, 2, 4]), + ), + ( + RangeIndex(0, 10, 1), + Index([], dtype=np.int64), + RangeIndex(0, 10, 1), + RangeIndex(0, 10, 1), + ), + ( + RangeIndex(0), + Index([1, 5, 6]), + Index([1, 5, 6]), + Index([1, 5, 6]), + ), + # GH 43885 + ( + RangeIndex(0, 10), + RangeIndex(0, 5), + RangeIndex(0, 10), + RangeIndex(0, 10), + ), + ], + ids=lambda x: repr(x) if isinstance(x, RangeIndex) else x, + ) + def test_union_sorted(self, idx1, idx2, expected_sorted, expected_notsorted): + res1 = idx1.union(idx2, sort=None) + tm.assert_index_equal(res1, expected_sorted, exact=True) + + res1 = idx1.union(idx2, sort=False) + tm.assert_index_equal(res1, expected_notsorted, exact=True) + + res2 = idx2.union(idx1, sort=None) + res3 = Index(idx1._values, name=idx1.name).union(idx2, sort=None) + tm.assert_index_equal(res2, expected_sorted, exact=True) + tm.assert_index_equal(res3, expected_sorted, exact="equiv") + + def test_union_same_step_misaligned(self): + # GH#44019 + left = RangeIndex(range(0, 20, 4)) + right = RangeIndex(range(1, 21, 4)) + + result = left.union(right) + expected = Index([0, 1, 4, 5, 8, 9, 12, 13, 16, 17]) + tm.assert_index_equal(result, expected, exact=True) + + def test_difference(self): + # GH#12034 Cases where we operate against another RangeIndex and may + # get back another RangeIndex + obj = RangeIndex.from_range(range(1, 10), name="foo") + + result = obj.difference(obj) + expected = RangeIndex.from_range(range(0), name="foo") + tm.assert_index_equal(result, expected, exact=True) + + result = obj.difference(expected.rename("bar")) + tm.assert_index_equal(result, obj.rename(None), exact=True) + + result = obj.difference(obj[:3]) + tm.assert_index_equal(result, obj[3:], exact=True) + + result = obj.difference(obj[-3:]) + tm.assert_index_equal(result, obj[:-3], exact=True) + + # Flipping the step of 'other' doesn't affect the result, but + # flipping the stepof 'self' does when sort=None + result = obj[::-1].difference(obj[-3:]) + tm.assert_index_equal(result, obj[:-3], exact=True) + + result = obj[::-1].difference(obj[-3:], sort=False) + tm.assert_index_equal(result, obj[:-3][::-1], exact=True) + + result = obj[::-1].difference(obj[-3:][::-1]) + tm.assert_index_equal(result, obj[:-3], exact=True) + + result = obj[::-1].difference(obj[-3:][::-1], sort=False) + tm.assert_index_equal(result, obj[:-3][::-1], exact=True) + + result = obj.difference(obj[2:6]) + expected = Index([1, 2, 7, 8, 9], name="foo") + tm.assert_index_equal(result, expected, exact=True) + + def test_difference_sort(self): + # GH#44085 ensure we respect the sort keyword + + idx = Index(range(4))[::-1] + other = Index(range(3, 4)) + + result = idx.difference(other) + expected = Index(range(3)) + tm.assert_index_equal(result, expected, exact=True) + + result = idx.difference(other, sort=False) + expected = expected[::-1] + tm.assert_index_equal(result, expected, exact=True) + + # case where the intersection is empty + other = range(10, 12) + result = idx.difference(other, sort=None) + expected = idx[::-1] + tm.assert_index_equal(result, expected, exact=True) + + def test_difference_mismatched_step(self): + obj = RangeIndex.from_range(range(1, 10), name="foo") + + result = obj.difference(obj[::2]) + expected = obj[1::2] + tm.assert_index_equal(result, expected, exact=True) + + result = obj[::-1].difference(obj[::2], sort=False) + tm.assert_index_equal(result, expected[::-1], exact=True) + + result = obj.difference(obj[1::2]) + expected = obj[::2] + tm.assert_index_equal(result, expected, exact=True) + + result = obj[::-1].difference(obj[1::2], sort=False) + tm.assert_index_equal(result, expected[::-1], exact=True) + + def test_difference_interior_overlap_endpoints_preserved(self): + left = RangeIndex(range(4)) + right = RangeIndex(range(1, 3)) + + result = left.difference(right) + expected = RangeIndex(0, 4, 3) + assert expected.tolist() == [0, 3] + tm.assert_index_equal(result, expected, exact=True) + + def test_difference_endpoints_overlap_interior_preserved(self): + left = RangeIndex(-8, 20, 7) + right = RangeIndex(13, -9, -3) + + result = left.difference(right) + expected = RangeIndex(-1, 13, 7) + assert expected.tolist() == [-1, 6] + tm.assert_index_equal(result, expected, exact=True) + + def test_difference_interior_non_preserving(self): + # case with intersection of length 1 but RangeIndex is not preserved + idx = Index(range(10)) + + other = idx[3:4] + result = idx.difference(other) + expected = Index([0, 1, 2, 4, 5, 6, 7, 8, 9]) + tm.assert_index_equal(result, expected, exact=True) + + # case with other.step / self.step > 2 + other = idx[::3] + result = idx.difference(other) + expected = Index([1, 2, 4, 5, 7, 8]) + tm.assert_index_equal(result, expected, exact=True) + + # cases with only reaching one end of left + obj = Index(range(20)) + other = obj[:10:2] + result = obj.difference(other) + expected = Index([1, 3, 5, 7, 9] + list(range(10, 20))) + tm.assert_index_equal(result, expected, exact=True) + + other = obj[1:11:2] + result = obj.difference(other) + expected = Index([0, 2, 4, 6, 8, 10] + list(range(11, 20))) + tm.assert_index_equal(result, expected, exact=True) + + def test_symmetric_difference(self): + # GH#12034 Cases where we operate against another RangeIndex and may + # get back another RangeIndex + left = RangeIndex.from_range(range(1, 10), name="foo") + + result = left.symmetric_difference(left) + expected = RangeIndex.from_range(range(0), name="foo") + tm.assert_index_equal(result, expected) + + result = left.symmetric_difference(expected.rename("bar")) + tm.assert_index_equal(result, left.rename(None)) + + result = left[:-2].symmetric_difference(left[2:]) + expected = Index([1, 2, 8, 9], name="foo") + tm.assert_index_equal(result, expected, exact=True) + + right = RangeIndex.from_range(range(10, 15)) + + result = left.symmetric_difference(right) + expected = RangeIndex.from_range(range(1, 15)) + tm.assert_index_equal(result, expected) + + result = left.symmetric_difference(right[1:]) + expected = Index([1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]) + tm.assert_index_equal(result, expected, exact=True) + + +def assert_range_or_not_is_rangelike(index): + """ + Check that we either have a RangeIndex or that this index *cannot* + be represented as a RangeIndex. + """ + if not isinstance(index, RangeIndex) and len(index) > 0: + diff = index[:-1] - index[1:] + assert not (diff == diff[0]).all() + + +@given( + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), + st.integers(-20, 20), +) +def test_range_difference(start1, stop1, step1, start2, stop2, step2): + # test that + # a) we match Index[int64].difference and + # b) we return RangeIndex whenever it is possible to do so. + assume(step1 != 0) + assume(step2 != 0) + + left = RangeIndex(start1, stop1, step1) + right = RangeIndex(start2, stop2, step2) + + result = left.difference(right, sort=None) + assert_range_or_not_is_rangelike(result) + + left_int64 = Index(left.to_numpy()) + right_int64 = Index(right.to_numpy()) + + alt = left_int64.difference(right_int64, sort=None) + tm.assert_index_equal(result, alt, exact="equiv") + + result = left.difference(right, sort=False) + assert_range_or_not_is_rangelike(result) + + alt = left_int64.difference(right_int64, sort=False) + tm.assert_index_equal(result, alt, exact="equiv") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_any_index.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_any_index.py new file mode 100644 index 00000000..10204cfb --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_any_index.py @@ -0,0 +1,172 @@ +""" +Tests that can be parametrized over _any_ Index object. +""" +import re + +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +import pandas._testing as tm + + +def test_boolean_context_compat(index): + # GH#7897 + with pytest.raises(ValueError, match="The truth value of a"): + if index: + pass + + with pytest.raises(ValueError, match="The truth value of a"): + bool(index) + + +def test_sort(index): + msg = "cannot sort an Index object in-place, use sort_values instead" + with pytest.raises(TypeError, match=msg): + index.sort() + + +def test_hash_error(index): + with pytest.raises(TypeError, match=f"unhashable type: '{type(index).__name__}'"): + hash(index) + + +def test_mutability(index): + if not len(index): + pytest.skip("Test doesn't make sense for empty index") + msg = "Index does not support mutable operations" + with pytest.raises(TypeError, match=msg): + index[0] = index[0] + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_map_identity_mapping(index, request): + # GH#12766 + + result = index.map(lambda x: x) + if index.dtype == object and result.dtype == bool: + assert (index == result).all() + # TODO: could work that into the 'exact="equiv"'? + return # FIXME: doesn't belong in this file anymore! + tm.assert_index_equal(result, index, exact="equiv") + + +def test_wrong_number_names(index): + names = index.nlevels * ["apple", "banana", "carrot"] + with pytest.raises(ValueError, match="^Length"): + index.names = names + + +def test_view_preserves_name(index): + assert index.view().name == index.name + + +def test_ravel(index): + # GH#19956 ravel returning ndarray is deprecated, in 2.0 returns a view on self + res = index.ravel() + tm.assert_index_equal(res, index) + + +class TestConversion: + def test_to_series(self, index): + # assert that we are creating a copy of the index + + ser = index.to_series() + assert ser.values is not index.values + assert ser.index is not index + assert ser.name == index.name + + def test_to_series_with_arguments(self, index): + # GH#18699 + + # index kwarg + ser = index.to_series(index=index) + + assert ser.values is not index.values + assert ser.index is index + assert ser.name == index.name + + # name kwarg + ser = index.to_series(name="__test") + + assert ser.values is not index.values + assert ser.index is not index + assert ser.name != index.name + + def test_tolist_matches_list(self, index): + assert index.tolist() == list(index) + + +class TestRoundTrips: + def test_pickle_roundtrip(self, index): + result = tm.round_trip_pickle(index) + tm.assert_index_equal(result, index, exact=True) + if result.nlevels > 1: + # GH#8367 round-trip with timezone + assert index.equal_levels(result) + + def test_pickle_preserves_name(self, index): + original_name, index.name = index.name, "foo" + unpickled = tm.round_trip_pickle(index) + assert index.equals(unpickled) + index.name = original_name + + +class TestIndexing: + def test_get_loc_listlike_raises_invalid_index_error(self, index): + # and never TypeError + key = np.array([0, 1], dtype=np.intp) + + with pytest.raises(InvalidIndexError, match=r"\[0 1\]"): + index.get_loc(key) + + with pytest.raises(InvalidIndexError, match=r"\[False True\]"): + index.get_loc(key.astype(bool)) + + def test_getitem_ellipsis(self, index): + # GH#21282 + result = index[...] + assert result.equals(index) + assert result is not index + + def test_slice_keeps_name(self, index): + assert index.name == index[1:].name + + @pytest.mark.parametrize("item", [101, "no_int", 2.5]) + def test_getitem_error(self, index, item): + msg = "|".join( + [ + r"index 101 is out of bounds for axis 0 with size [\d]+", + re.escape( + "only integers, slices (`:`), ellipsis (`...`), " + "numpy.newaxis (`None`) and integer or boolean arrays " + "are valid indices" + ), + "index out of bounds", # string[pyarrow] + ] + ) + with pytest.raises(IndexError, match=msg): + index[item] + + +class TestRendering: + def test_str(self, index): + # test the string repr + index.name = "foo" + assert "'foo'" in str(index) + assert type(index).__name__ in str(index) + + +class TestReductions: + def test_argmax_axis_invalid(self, index): + # GH#23081 + msg = r"`axis` must be fewer than the number of dimensions \(1\)" + with pytest.raises(ValueError, match=msg): + index.argmax(axis=1) + with pytest.raises(ValueError, match=msg): + index.argmin(axis=2) + with pytest.raises(ValueError, match=msg): + index.min(axis=-2) + with pytest.raises(ValueError, match=msg): + index.max(axis=-3) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_base.py new file mode 100644 index 00000000..44b77447 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_base.py @@ -0,0 +1,1644 @@ +from collections import defaultdict +from datetime import datetime +from io import StringIO +import math +import operator +import re + +import numpy as np +import pytest + +from pandas.compat import IS64 +from pandas.errors import InvalidIndexError +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import ( + is_any_real_numeric_dtype, + is_numeric_dtype, + is_object_dtype, +) + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + DatetimeIndex, + IntervalIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + date_range, + period_range, +) +import pandas._testing as tm +from pandas.core.indexes.api import ( + Index, + MultiIndex, + _get_combined_index, + ensure_index, + ensure_index_from_sequences, +) + + +class TestIndex: + @pytest.fixture + def simple_index(self) -> Index: + return Index(list("abcde")) + + def test_can_hold_identifiers(self, simple_index): + index = simple_index + key = index[0] + assert index._can_hold_identifiers_and_holds_name(key) is True + + @pytest.mark.parametrize("index", ["datetime"], indirect=True) + def test_new_axis(self, index): + # TODO: a bunch of scattered tests check this deprecation is enforced. + # de-duplicate/centralize them. + with pytest.raises(ValueError, match="Multi-dimensional indexing"): + # GH#30588 multi-dimensional indexing deprecated + index[None, :] + + def test_constructor_regular(self, index): + tm.assert_contains_all(index, index) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_constructor_casting(self, index): + # casting + arr = np.array(index) + new_index = Index(arr) + tm.assert_contains_all(arr, new_index) + tm.assert_index_equal(index, new_index) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_constructor_copy(self, index): + arr = np.array(index) + new_index = Index(arr, copy=True, name="name") + assert isinstance(new_index, Index) + assert new_index.name == "name" + tm.assert_numpy_array_equal(arr, new_index.values) + arr[0] = "SOMEBIGLONGSTRING" + assert new_index[0] != "SOMEBIGLONGSTRING" + + @pytest.mark.parametrize("cast_as_obj", [True, False]) + @pytest.mark.parametrize( + "index", + [ + date_range( + "2015-01-01 10:00", + freq="D", + periods=3, + tz="US/Eastern", + name="Green Eggs & Ham", + ), # DTI with tz + date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz + pd.timedelta_range("1 days", freq="D", periods=3), # td + period_range("2015-01-01", freq="D", periods=3), # period + ], + ) + def test_constructor_from_index_dtlike(self, cast_as_obj, index): + if cast_as_obj: + result = Index(index.astype(object)) + else: + result = Index(index) + + tm.assert_index_equal(result, index) + + if isinstance(index, DatetimeIndex): + assert result.tz == index.tz + if cast_as_obj: + # GH#23524 check that Index(dti, dtype=object) does not + # incorrectly raise ValueError, and that nanoseconds are not + # dropped + index += pd.Timedelta(nanoseconds=50) + result = Index(index, dtype=object) + assert result.dtype == np.object_ + assert list(result) == list(index) + + @pytest.mark.parametrize( + "index,has_tz", + [ + ( + date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"), + True, + ), # datetimetz + (pd.timedelta_range("1 days", freq="D", periods=3), False), # td + (period_range("2015-01-01", freq="D", periods=3), False), # period + ], + ) + def test_constructor_from_series_dtlike(self, index, has_tz): + result = Index(Series(index)) + tm.assert_index_equal(result, index) + + if has_tz: + assert result.tz == index.tz + + def test_constructor_from_series_freq(self): + # GH 6273 + # create from a series, passing a freq + dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"] + expected = DatetimeIndex(dts, freq="MS") + + s = Series(pd.to_datetime(dts)) + result = DatetimeIndex(s, freq="MS") + + tm.assert_index_equal(result, expected) + + def test_constructor_from_frame_series_freq(self): + # GH 6273 + # create from a series, passing a freq + dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"] + expected = DatetimeIndex(dts, freq="MS") + + df = DataFrame(np.random.default_rng(2).random((5, 3))) + df["date"] = dts + result = DatetimeIndex(df["date"], freq="MS") + + assert df["date"].dtype == object + expected.name = "date" + tm.assert_index_equal(result, expected) + + expected = Series(dts, name="date") + tm.assert_series_equal(df["date"], expected) + + # GH 6274 + # infer freq of same + freq = pd.infer_freq(df["date"]) + assert freq == "MS" + + def test_constructor_int_dtype_nan(self): + # see gh-15187 + data = [np.nan] + expected = Index(data, dtype=np.float64) + result = Index(data, dtype="float") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "klass,dtype,na_val", + [ + (Index, np.float64, np.nan), + (DatetimeIndex, "datetime64[ns]", pd.NaT), + ], + ) + def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val): + # GH 13467 + na_list = [na_val, na_val] + expected = klass(na_list) + assert expected.dtype == dtype + + result = Index(na_list) + tm.assert_index_equal(result, expected) + + result = Index(np.array(na_list)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "vals,dtype", + [ + ([1, 2, 3, 4, 5], "int"), + ([1.1, np.nan, 2.2, 3.0], "float"), + (["A", "B", "C", np.nan], "obj"), + ], + ) + def test_constructor_simple_new(self, vals, dtype): + index = Index(vals, name=dtype) + result = index._simple_new(index.values, dtype) + tm.assert_index_equal(result, index) + + @pytest.mark.parametrize("attr", ["values", "asi8"]) + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) + def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass): + # Test constructing with a datetimetz dtype + # .values produces numpy datetimes, so these are considered naive + # .asi8 produces integers, so these are considered epoch timestamps + # ^the above will be true in a later version. Right now we `.view` + # the i8 values as NS_DTYPE, effectively treating them as wall times. + index = date_range("2011-01-01", periods=5) + arg = getattr(index, attr) + index = index.tz_localize(tz_naive_fixture) + dtype = index.dtype + + # As of 2.0 astype raises on dt64.astype(dt64tz) + err = tz_naive_fixture is not None + msg = "Cannot use .astype to convert from timezone-naive dtype to" + + if attr == "asi8": + result = DatetimeIndex(arg).tz_localize(tz_naive_fixture) + tm.assert_index_equal(result, index) + elif klass is Index: + with pytest.raises(TypeError, match="unexpected keyword"): + klass(arg, tz=tz_naive_fixture) + else: + result = klass(arg, tz=tz_naive_fixture) + tm.assert_index_equal(result, index) + + if attr == "asi8": + if err: + with pytest.raises(TypeError, match=msg): + DatetimeIndex(arg).astype(dtype) + else: + result = DatetimeIndex(arg).astype(dtype) + tm.assert_index_equal(result, index) + else: + result = klass(arg, dtype=dtype) + tm.assert_index_equal(result, index) + + if attr == "asi8": + result = DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture) + tm.assert_index_equal(result, index) + elif klass is Index: + with pytest.raises(TypeError, match="unexpected keyword"): + klass(arg, tz=tz_naive_fixture) + else: + result = klass(list(arg), tz=tz_naive_fixture) + tm.assert_index_equal(result, index) + + if attr == "asi8": + if err: + with pytest.raises(TypeError, match=msg): + DatetimeIndex(list(arg)).astype(dtype) + else: + result = DatetimeIndex(list(arg)).astype(dtype) + tm.assert_index_equal(result, index) + else: + result = klass(list(arg), dtype=dtype) + tm.assert_index_equal(result, index) + + @pytest.mark.parametrize("attr", ["values", "asi8"]) + @pytest.mark.parametrize("klass", [Index, TimedeltaIndex]) + def test_constructor_dtypes_timedelta(self, attr, klass): + index = pd.timedelta_range("1 days", periods=5) + index = index._with_freq(None) # won't be preserved by constructors + dtype = index.dtype + + values = getattr(index, attr) + + result = klass(values, dtype=dtype) + tm.assert_index_equal(result, index) + + result = klass(list(values), dtype=dtype) + tm.assert_index_equal(result, index) + + @pytest.mark.parametrize("value", [[], iter([]), (_ for _ in [])]) + @pytest.mark.parametrize( + "klass", + [ + Index, + CategoricalIndex, + DatetimeIndex, + TimedeltaIndex, + ], + ) + def test_constructor_empty(self, value, klass): + empty = klass(value) + assert isinstance(empty, klass) + assert not len(empty) + + @pytest.mark.parametrize( + "empty,klass", + [ + (PeriodIndex([], freq="D"), PeriodIndex), + (PeriodIndex(iter([]), freq="D"), PeriodIndex), + (PeriodIndex((_ for _ in []), freq="D"), PeriodIndex), + (RangeIndex(step=1), RangeIndex), + (MultiIndex(levels=[[1, 2], ["blue", "red"]], codes=[[], []]), MultiIndex), + ], + ) + def test_constructor_empty_special(self, empty, klass): + assert isinstance(empty, klass) + assert not len(empty) + + @pytest.mark.parametrize( + "index", + [ + "datetime", + "float64", + "float32", + "int64", + "int32", + "period", + "range", + "repeats", + "timedelta", + "tuples", + "uint64", + "uint32", + ], + indirect=True, + ) + def test_view_with_args(self, index): + index.view("i8") + + @pytest.mark.parametrize( + "index", + [ + "string", + pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")), + "bool-object", + "bool-dtype", + "empty", + ], + indirect=True, + ) + def test_view_with_args_object_array_raises(self, index): + if index.dtype == bool: + msg = "When changing to a larger dtype" + with pytest.raises(ValueError, match=msg): + index.view("i8") + else: + msg = "Cannot change data-type for object array" + with pytest.raises(TypeError, match=msg): + index.view("i8") + + @pytest.mark.parametrize( + "index", + ["int64", "int32", "range"], + indirect=True, + ) + def test_astype(self, index): + casted = index.astype("i8") + + # it works! + casted.get_loc(5) + + # pass on name + index.name = "foobar" + casted = index.astype("i8") + assert casted.name == "foobar" + + def test_equals_object(self): + # same + assert Index(["a", "b", "c"]).equals(Index(["a", "b", "c"])) + + @pytest.mark.parametrize( + "comp", [Index(["a", "b"]), Index(["a", "b", "d"]), ["a", "b", "c"]] + ) + def test_not_equals_object(self, comp): + assert not Index(["a", "b", "c"]).equals(comp) + + def test_identical(self): + # index + i1 = Index(["a", "b", "c"]) + i2 = Index(["a", "b", "c"]) + + assert i1.identical(i2) + + i1 = i1.rename("foo") + assert i1.equals(i2) + assert not i1.identical(i2) + + i2 = i2.rename("foo") + assert i1.identical(i2) + + i3 = Index([("a", "a"), ("a", "b"), ("b", "a")]) + i4 = Index([("a", "a"), ("a", "b"), ("b", "a")], tupleize_cols=False) + assert not i3.identical(i4) + + def test_is_(self): + ind = Index(range(10)) + assert ind.is_(ind) + assert ind.is_(ind.view().view().view().view()) + assert not ind.is_(Index(range(10))) + assert not ind.is_(ind.copy()) + assert not ind.is_(ind.copy(deep=False)) + assert not ind.is_(ind[:]) + assert not ind.is_(np.array(range(10))) + + # quasi-implementation dependent + assert ind.is_(ind.view()) + ind2 = ind.view() + ind2.name = "bob" + assert ind.is_(ind2) + assert ind2.is_(ind) + # doesn't matter if Indices are *actually* views of underlying data, + assert not ind.is_(Index(ind.values)) + arr = np.array(range(1, 11)) + ind1 = Index(arr, copy=False) + ind2 = Index(arr, copy=False) + assert not ind1.is_(ind2) + + def test_asof_numeric_vs_bool_raises(self): + left = Index([1, 2, 3]) + right = Index([True, False], dtype=object) + + msg = "Cannot compare dtypes int64 and bool" + with pytest.raises(TypeError, match=msg): + left.asof(right[0]) + # TODO: should right.asof(left[0]) also raise? + + with pytest.raises(InvalidIndexError, match=re.escape(str(right))): + left.asof(right) + + with pytest.raises(InvalidIndexError, match=re.escape(str(left))): + right.asof(left) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_booleanindex(self, index): + bool_index = np.ones(len(index), dtype=bool) + bool_index[5:30:2] = False + + sub_index = index[bool_index] + + for i, val in enumerate(sub_index): + assert sub_index.get_loc(val) == i + + sub_index = index[list(bool_index)] + for i, val in enumerate(sub_index): + assert sub_index.get_loc(val) == i + + def test_fancy(self, simple_index): + index = simple_index + sl = index[[1, 2, 3]] + for i in sl: + assert i == sl[sl.get_loc(i)] + + @pytest.mark.parametrize( + "index", + ["string", "int64", "int32", "uint64", "uint32", "float64", "float32"], + indirect=True, + ) + @pytest.mark.parametrize("dtype", [int, np.bool_]) + def test_empty_fancy(self, index, dtype): + empty_arr = np.array([], dtype=dtype) + empty_index = type(index)([], dtype=index.dtype) + + assert index[[]].identical(empty_index) + assert index[empty_arr].identical(empty_index) + + @pytest.mark.parametrize( + "index", + ["string", "int64", "int32", "uint64", "uint32", "float64", "float32"], + indirect=True, + ) + def test_empty_fancy_raises(self, index): + # DatetimeIndex is excluded, because it overrides getitem and should + # be tested separately. + empty_farr = np.array([], dtype=np.float64) + empty_index = type(index)([], dtype=index.dtype) + + assert index[[]].identical(empty_index) + # np.ndarray only accepts ndarray of int & bool dtypes, so should Index + msg = r"arrays used as indices must be of integer \(or boolean\) type" + with pytest.raises(IndexError, match=msg): + index[empty_farr] + + def test_union_dt_as_obj(self, simple_index): + # TODO: Replace with fixturesult + index = simple_index + date_index = date_range("2019-01-01", periods=10) + first_cat = index.union(date_index) + second_cat = index.union(index) + + appended = np.append(index, date_index.astype("O")) + + assert tm.equalContents(first_cat, appended) + assert tm.equalContents(second_cat, index) + tm.assert_contains_all(index, first_cat) + tm.assert_contains_all(index, second_cat) + tm.assert_contains_all(date_index, first_cat) + + def test_map_with_tuples(self): + # GH 12766 + + # Test that returning a single tuple from an Index + # returns an Index. + index = tm.makeIntIndex(3) + result = tm.makeIntIndex(3).map(lambda x: (x,)) + expected = Index([(i,) for i in index]) + tm.assert_index_equal(result, expected) + + # Test that returning a tuple from a map of a single index + # returns a MultiIndex object. + result = index.map(lambda x: (x, x == 1)) + expected = MultiIndex.from_tuples([(i, i == 1) for i in index]) + tm.assert_index_equal(result, expected) + + def test_map_with_tuples_mi(self): + # Test that returning a single object from a MultiIndex + # returns an Index. + first_level = ["foo", "bar", "baz"] + multi_index = MultiIndex.from_tuples(zip(first_level, [1, 2, 3])) + reduced_index = multi_index.map(lambda x: x[0]) + tm.assert_index_equal(reduced_index, Index(first_level)) + + @pytest.mark.parametrize( + "attr", ["makeDateIndex", "makePeriodIndex", "makeTimedeltaIndex"] + ) + def test_map_tseries_indices_return_index(self, attr): + index = getattr(tm, attr)(10) + expected = Index([1] * 10) + result = index.map(lambda x: 1) + tm.assert_index_equal(expected, result) + + def test_map_tseries_indices_accsr_return_index(self): + date_index = tm.makeDateIndex(24, freq="h", name="hourly") + result = date_index.map(lambda x: x.hour) + expected = Index(np.arange(24, dtype="int64"), name="hourly") + tm.assert_index_equal(result, expected, exact=True) + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: Series(values, index), + ], + ) + def test_map_dictlike_simple(self, mapper): + # GH 12756 + expected = Index(["foo", "bar", "baz"]) + index = tm.makeIntIndex(3) + result = index.map(mapper(expected.values, index)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: Series(values, index), + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_map_dictlike(self, index, mapper, request): + # GH 12756 + if isinstance(index, CategoricalIndex): + pytest.skip("Tested in test_categorical") + elif not index.is_unique: + pytest.skip("Cannot map duplicated index") + + rng = np.arange(len(index), 0, -1, dtype=np.int64) + + if index.empty: + # to match proper result coercion for uints + expected = Index([]) + elif is_numeric_dtype(index.dtype): + expected = index._constructor(rng, dtype=index.dtype) + elif type(index) is Index and index.dtype != object: + # i.e. EA-backed, for now just Nullable + expected = Index(rng, dtype=index.dtype) + else: + expected = Index(rng) + + result = index.map(mapper(expected, index)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "mapper", + [Series(["foo", 2.0, "baz"], index=[0, 2, -1]), {0: "foo", 2: 2.0, -1: "baz"}], + ) + def test_map_with_non_function_missing_values(self, mapper): + # GH 12756 + expected = Index([2.0, np.nan, "foo"]) + result = Index([2, 1, 0]).map(mapper) + + tm.assert_index_equal(expected, result) + + def test_map_na_exclusion(self): + index = Index([1.5, np.nan, 3, np.nan, 5]) + + result = index.map(lambda x: x * 2, na_action="ignore") + expected = index * 2 + tm.assert_index_equal(result, expected) + + def test_map_defaultdict(self): + index = Index([1, 2, 3]) + default_dict = defaultdict(lambda: "blank") + default_dict[1] = "stuff" + result = index.map(default_dict) + expected = Index(["stuff", "blank", "blank"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("name,expected", [("foo", "foo"), ("bar", None)]) + def test_append_empty_preserve_name(self, name, expected): + left = Index([], name="foo") + right = Index([1, 2, 3], name=name) + + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left.append(right) + assert result.name == expected + + @pytest.mark.parametrize( + "index, expected", + [ + ("string", False), + ("bool-object", False), + ("bool-dtype", False), + ("categorical", False), + ("int64", True), + ("int32", True), + ("uint64", True), + ("uint32", True), + ("datetime", False), + ("float64", True), + ("float32", True), + ], + indirect=["index"], + ) + def test_is_numeric(self, index, expected): + assert is_any_real_numeric_dtype(index) is expected + + @pytest.mark.parametrize( + "index, expected", + [ + ("string", True), + ("bool-object", True), + ("bool-dtype", False), + ("categorical", False), + ("int64", False), + ("int32", False), + ("uint64", False), + ("uint32", False), + ("datetime", False), + ("float64", False), + ("float32", False), + ], + indirect=["index"], + ) + def test_is_object(self, index, expected): + assert is_object_dtype(index) is expected + + def test_summary(self, index): + index._summary() + + def test_format_bug(self): + # GH 14626 + # windows has different precision on datetime.datetime.now (it doesn't + # include us since the default for Timestamp shows these but Index + # formatting does not we are skipping) + now = datetime.now() + if not str(now).endswith("000"): + index = Index([now]) + formatted = index.format() + expected = [str(index[0])] + assert formatted == expected + + Index([]).format() + + @pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]]) + def test_format_missing(self, vals, nulls_fixture): + # 2845 + vals = list(vals) # Copy for each iteration + vals.append(nulls_fixture) + index = Index(vals, dtype=object) + # TODO: case with complex dtype? + + formatted = index.format() + null_repr = "NaN" if isinstance(nulls_fixture, float) else str(nulls_fixture) + expected = [str(index[0]), str(index[1]), str(index[2]), null_repr] + + assert formatted == expected + assert index[3] is nulls_fixture + + @pytest.mark.parametrize("op", ["any", "all"]) + def test_logical_compat(self, op, simple_index): + index = simple_index + left = getattr(index, op)() + assert left == getattr(index.values, op)() + right = getattr(index.to_series(), op)() + # left might not match right exactly in e.g. string cases where the + # because we use np.any/all instead of .any/all + assert bool(left) == bool(right) + + @pytest.mark.parametrize( + "index", ["string", "int64", "int32", "float64", "float32"], indirect=True + ) + def test_drop_by_str_label(self, index): + n = len(index) + drop = index[list(range(5, 10))] + dropped = index.drop(drop) + + expected = index[list(range(5)) + list(range(10, n))] + tm.assert_index_equal(dropped, expected) + + dropped = index.drop(index[0]) + expected = index[1:] + tm.assert_index_equal(dropped, expected) + + @pytest.mark.parametrize( + "index", ["string", "int64", "int32", "float64", "float32"], indirect=True + ) + @pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]]) + def test_drop_by_str_label_raises_missing_keys(self, index, keys): + with pytest.raises(KeyError, match=""): + index.drop(keys) + + @pytest.mark.parametrize( + "index", ["string", "int64", "int32", "float64", "float32"], indirect=True + ) + def test_drop_by_str_label_errors_ignore(self, index): + n = len(index) + drop = index[list(range(5, 10))] + mixed = drop.tolist() + ["foo"] + dropped = index.drop(mixed, errors="ignore") + + expected = index[list(range(5)) + list(range(10, n))] + tm.assert_index_equal(dropped, expected) + + dropped = index.drop(["foo", "bar"], errors="ignore") + expected = index[list(range(n))] + tm.assert_index_equal(dropped, expected) + + def test_drop_by_numeric_label_loc(self): + # TODO: Parametrize numeric and str tests after self.strIndex fixture + index = Index([1, 2, 3]) + dropped = index.drop(1) + expected = Index([2, 3]) + + tm.assert_index_equal(dropped, expected) + + def test_drop_by_numeric_label_raises_missing_keys(self): + index = Index([1, 2, 3]) + with pytest.raises(KeyError, match=""): + index.drop([3, 4]) + + @pytest.mark.parametrize( + "key,expected", [(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))] + ) + def test_drop_by_numeric_label_errors_ignore(self, key, expected): + index = Index([1, 2, 3]) + dropped = index.drop(key, errors="ignore") + + tm.assert_index_equal(dropped, expected) + + @pytest.mark.parametrize( + "values", + [["a", "b", ("c", "d")], ["a", ("c", "d"), "b"], [("c", "d"), "a", "b"]], + ) + @pytest.mark.parametrize("to_drop", [[("c", "d"), "a"], ["a", ("c", "d")]]) + def test_drop_tuple(self, values, to_drop): + # GH 18304 + index = Index(values) + expected = Index(["b"]) + + result = index.drop(to_drop) + tm.assert_index_equal(result, expected) + + removed = index.drop(to_drop[0]) + for drop_me in to_drop[1], [to_drop[1]]: + result = removed.drop(drop_me) + tm.assert_index_equal(result, expected) + + removed = index.drop(to_drop[1]) + msg = rf"\"\[{re.escape(to_drop[1].__repr__())}\] not found in axis\"" + for drop_me in to_drop[1], [to_drop[1]]: + with pytest.raises(KeyError, match=msg): + removed.drop(drop_me) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_drop_with_duplicates_in_index(self, index): + # GH38051 + if len(index) == 0 or isinstance(index, MultiIndex): + pytest.skip("Test doesn't make sense for empty MultiIndex") + if isinstance(index, IntervalIndex) and not IS64: + pytest.skip("Cannot test IntervalIndex with int64 dtype on 32 bit platform") + index = index.unique().repeat(2) + expected = index[2:] + result = index.drop(index[0]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "attr", + [ + "is_monotonic_increasing", + "is_monotonic_decreasing", + "_is_strictly_monotonic_increasing", + "_is_strictly_monotonic_decreasing", + ], + ) + def test_is_monotonic_incomparable(self, attr): + index = Index([5, datetime.now(), 7]) + assert not getattr(index, attr) + + @pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}]) + @pytest.mark.parametrize( + "index,expected", + [ + (Index(["qux", "baz", "foo", "bar"]), np.array([False, False, True, True])), + (Index([]), np.array([], dtype=bool)), # empty + ], + ) + def test_isin(self, values, index, expected): + result = index.isin(values) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2): + # Test cartesian product of null fixtures and ensure that we don't + # mangle the various types (save a corner case with PyPy) + + # all nans are the same + if ( + isinstance(nulls_fixture, float) + and isinstance(nulls_fixture2, float) + and math.isnan(nulls_fixture) + and math.isnan(nulls_fixture2) + ): + tm.assert_numpy_array_equal( + Index(["a", nulls_fixture]).isin([nulls_fixture2]), + np.array([False, True]), + ) + + elif nulls_fixture is nulls_fixture2: # should preserve NA type + tm.assert_numpy_array_equal( + Index(["a", nulls_fixture]).isin([nulls_fixture2]), + np.array([False, True]), + ) + + else: + tm.assert_numpy_array_equal( + Index(["a", nulls_fixture]).isin([nulls_fixture2]), + np.array([False, False]), + ) + + def test_isin_nan_common_float64(self, nulls_fixture, float_numpy_dtype): + dtype = float_numpy_dtype + + if nulls_fixture is pd.NaT or nulls_fixture is pd.NA: + # Check 1) that we cannot construct a float64 Index with this value + # and 2) that with an NaN we do not have .isin(nulls_fixture) + msg = ( + r"float\(\) argument must be a string or a (real )?number, " + f"not {repr(type(nulls_fixture).__name__)}" + ) + with pytest.raises(TypeError, match=msg): + Index([1.0, nulls_fixture], dtype=dtype) + + idx = Index([1.0, np.nan], dtype=dtype) + assert not idx.isin([nulls_fixture]).any() + return + + idx = Index([1.0, nulls_fixture], dtype=dtype) + res = idx.isin([np.nan]) + tm.assert_numpy_array_equal(res, np.array([False, True])) + + # we cannot compare NaT with NaN + res = idx.isin([pd.NaT]) + tm.assert_numpy_array_equal(res, np.array([False, False])) + + @pytest.mark.parametrize("level", [0, -1]) + @pytest.mark.parametrize( + "index", + [ + Index(["qux", "baz", "foo", "bar"]), + Index([1.0, 2.0, 3.0, 4.0], dtype=np.float64), + ], + ) + def test_isin_level_kwarg(self, level, index): + values = index.tolist()[-2:] + ["nonexisting"] + + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(expected, index.isin(values, level=level)) + + index.name = "foobar" + tm.assert_numpy_array_equal(expected, index.isin(values, level="foobar")) + + def test_isin_level_kwarg_bad_level_raises(self, index): + for level in [10, index.nlevels, -(index.nlevels + 1)]: + with pytest.raises(IndexError, match="Too many levels"): + index.isin([], level=level) + + @pytest.mark.parametrize("label", [1.0, "foobar", "xyzzy", np.nan]) + def test_isin_level_kwarg_bad_label_raises(self, label, index): + if isinstance(index, MultiIndex): + index = index.rename(["foo", "bar"] + index.names[2:]) + msg = f"'Level {label} not found'" + else: + index = index.rename("foo") + msg = rf"Requested level \({label}\) does not match index name \(foo\)" + with pytest.raises(KeyError, match=msg): + index.isin([], level=label) + + @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) + def test_isin_empty(self, empty): + # see gh-16991 + index = Index(["a", "b"]) + expected = np.array([False, False]) + + result = index.isin(empty) + tm.assert_numpy_array_equal(expected, result) + + @td.skip_if_no("pyarrow") + def test_isin_arrow_string_null(self): + # GH#55821 + index = Index(["a", "b"], dtype="string[pyarrow_numpy]") + result = index.isin([None]) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + [1, 2, 3, 4], + [1.0, 2.0, 3.0, 4.0], + [True, True, True, True], + ["foo", "bar", "baz", "qux"], + date_range("2018-01-01", freq="D", periods=4), + ], + ) + def test_boolean_cmp(self, values): + index = Index(values) + result = index == values + expected = np.array([True, True, True, True], dtype=bool) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + @pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")]) + def test_get_level_values(self, index, name, level): + expected = index.copy() + if name: + expected.name = name + + result = expected.get_level_values(level) + tm.assert_index_equal(result, expected) + + def test_slice_keep_name(self): + index = Index(["a", "b"], name="asdf") + assert index.name == index[1:].name + + @pytest.mark.parametrize( + "index", + [ + "string", + "datetime", + "int64", + "int32", + "uint64", + "uint32", + "float64", + "float32", + ], + indirect=True, + ) + def test_join_self(self, index, join_type): + joined = index.join(index, how=join_type) + assert index is joined + + @pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"]) + def test_str_attribute(self, method): + # GH9068 + index = Index([" jack", "jill ", " jesse ", "frank"]) + expected = Index([getattr(str, method)(x) for x in index.values]) + + result = getattr(index.str, method)() + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + Index(range(5)), + tm.makeDateIndex(10), + MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]), + period_range(start="2000", end="2010", freq="A"), + ], + ) + def test_str_attribute_raises(self, index): + with pytest.raises(AttributeError, match="only use .str accessor"): + index.str.repeat(2) + + @pytest.mark.parametrize( + "expand,expected", + [ + (None, Index([["a", "b", "c"], ["d", "e"], ["f"]])), + (False, Index([["a", "b", "c"], ["d", "e"], ["f"]])), + ( + True, + MultiIndex.from_tuples( + [("a", "b", "c"), ("d", "e", np.nan), ("f", np.nan, np.nan)] + ), + ), + ], + ) + def test_str_split(self, expand, expected): + index = Index(["a b c", "d e", "f"]) + if expand is not None: + result = index.str.split(expand=expand) + else: + result = index.str.split() + + tm.assert_index_equal(result, expected) + + def test_str_bool_return(self): + # test boolean case, should return np.array instead of boolean Index + index = Index(["a1", "a2", "b1", "b2"]) + result = index.str.startswith("a") + expected = np.array([True, True, False, False]) + + tm.assert_numpy_array_equal(result, expected) + assert isinstance(result, np.ndarray) + + def test_str_bool_series_indexing(self): + index = Index(["a1", "a2", "b1", "b2"]) + s = Series(range(4), index=index) + + result = s[s.index.str.startswith("a")] + expected = Series(range(2), index=["a1", "a2"]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "index,expected", [(Index(list("abcd")), True), (Index(range(4)), False)] + ) + def test_tab_completion(self, index, expected): + # GH 9910 + result = "str" in dir(index) + assert result == expected + + def test_indexing_doesnt_change_class(self): + index = Index([1, 2, 3, "a", "b", "c"]) + + assert index[1:3].identical(Index([2, 3], dtype=np.object_)) + assert index[[0, 1]].identical(Index([1, 2], dtype=np.object_)) + + def test_outer_join_sort(self): + left_index = Index(np.random.default_rng(2).permutation(15)) + right_index = tm.makeDateIndex(10) + + with tm.assert_produces_warning(RuntimeWarning): + result = left_index.join(right_index, how="outer") + + # right_index in this case because DatetimeIndex has join precedence + # over int64 Index + with tm.assert_produces_warning(RuntimeWarning): + expected = right_index.astype(object).union(left_index.astype(object)) + + tm.assert_index_equal(result, expected) + + def test_take_fill_value(self): + # GH 12631 + index = Index(list("ABC"), name="xxx") + result = index.take(np.array([1, 0, -1])) + expected = Index(list("BAC"), name="xxx") + tm.assert_index_equal(result, expected) + + # fill_value + result = index.take(np.array([1, 0, -1]), fill_value=True) + expected = Index(["B", "A", np.nan], name="xxx") + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = index.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = Index(["B", "A", "C"], name="xxx") + tm.assert_index_equal(result, expected) + + def test_take_fill_value_none_raises(self): + index = Index(list("ABC"), name="xxx") + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + + with pytest.raises(ValueError, match=msg): + index.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + index.take(np.array([1, 0, -5]), fill_value=True) + + def test_take_bad_bounds_raises(self): + index = Index(list("ABC"), name="xxx") + with pytest.raises(IndexError, match="out of bounds"): + index.take(np.array([1, -5])) + + @pytest.mark.parametrize("name", [None, "foobar"]) + @pytest.mark.parametrize( + "labels", + [ + [], + np.array([]), + ["A", "B", "C"], + ["C", "B", "A"], + np.array(["A", "B", "C"]), + np.array(["C", "B", "A"]), + # Must preserve name even if dtype changes + date_range("20130101", periods=3).values, + date_range("20130101", periods=3).tolist(), + ], + ) + def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name, labels): + # GH6552 + index = Index([0, 1, 2]) + index.name = name + assert index.reindex(labels)[0].name == name + + @pytest.mark.parametrize("labels", [[], np.array([]), np.array([], dtype=np.int64)]) + def test_reindex_preserves_type_if_target_is_empty_list_or_array(self, labels): + # GH7774 + index = Index(list("abc")) + assert index.reindex(labels)[0].dtype.type == np.object_ + + @pytest.mark.parametrize( + "labels,dtype", + [ + (DatetimeIndex([]), np.datetime64), + ], + ) + def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dtype): + # GH7774 + index = Index(list("abc")) + assert index.reindex(labels)[0].dtype.type == dtype + + def test_reindex_doesnt_preserve_type_if_target_is_empty_index_numeric( + self, any_real_numpy_dtype + ): + # GH7774 + dtype = any_real_numpy_dtype + index = Index(list("abc")) + labels = Index([], dtype=dtype) + assert index.reindex(labels)[0].dtype == dtype + + def test_reindex_no_type_preserve_target_empty_mi(self): + index = Index(list("abc")) + result = index.reindex( + MultiIndex([Index([], np.int64), Index([], np.float64)], [[], []]) + )[0] + assert result.levels[0].dtype.type == np.int64 + assert result.levels[1].dtype.type == np.float64 + + def test_reindex_ignoring_level(self): + # GH#35132 + idx = Index([1, 2, 3], name="x") + idx2 = Index([1, 2, 3, 4], name="x") + expected = Index([1, 2, 3, 4], name="x") + result, _ = idx.reindex(idx2, level="x") + tm.assert_index_equal(result, expected) + + def test_groupby(self): + index = Index(range(5)) + result = index.groupby(np.array([1, 1, 2, 2, 2])) + expected = {1: Index([0, 1]), 2: Index([2, 3, 4])} + + tm.assert_dict_equal(result, expected) + + @pytest.mark.parametrize( + "mi,expected", + [ + (MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])), + (MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False])), + ], + ) + def test_equals_op_multiindex(self, mi, expected): + # GH9785 + # test comparisons of multiindex + df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1]) + + result = df.index == mi + tm.assert_numpy_array_equal(result, expected) + + def test_equals_op_multiindex_identify(self): + df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1]) + + result = df.index == df.index + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]), + Index(["foo", "bar", "baz"]), + ], + ) + def test_equals_op_mismatched_multiindex_raises(self, index): + df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1]) + + with pytest.raises(ValueError, match="Lengths must match"): + df.index == index + + def test_equals_op_index_vs_mi_same_length(self): + mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) + index = Index(["foo", "bar", "baz"]) + + result = mi == index + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "dt_conv, arg", + [ + (pd.to_datetime, ["2000-01-01", "2000-01-02"]), + (pd.to_timedelta, ["01:02:03", "01:02:04"]), + ], + ) + def test_dt_conversion_preserves_name(self, dt_conv, arg): + # GH 10875 + index = Index(arg, name="label") + assert index.name == dt_conv(index).name + + def test_cached_properties_not_settable(self): + index = Index([1, 2, 3]) + with pytest.raises(AttributeError, match="Can't set attribute"): + index.is_unique = False + + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; idx = pd.Index([1, 2])" + ip.run_cell(code) + + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None, raise_on_extra_warnings=False): + with provisionalcompleter("ignore"): + list(ip.Completer.completions("idx.", 4)) + + def test_contains_method_removed(self, index): + # GH#30103 method removed for all types except IntervalIndex + if isinstance(index, IntervalIndex): + index.contains(1) + else: + msg = f"'{type(index).__name__}' object has no attribute 'contains'" + with pytest.raises(AttributeError, match=msg): + index.contains(1) + + def test_sortlevel(self): + index = Index([5, 4, 3, 2, 1]) + with pytest.raises(Exception, match="ascending must be a single bool value or"): + index.sortlevel(ascending="True") + + with pytest.raises( + Exception, match="ascending must be a list of bool values of length 1" + ): + index.sortlevel(ascending=[True, True]) + + with pytest.raises(Exception, match="ascending must be a bool value"): + index.sortlevel(ascending=["True"]) + + expected = Index([1, 2, 3, 4, 5]) + result = index.sortlevel(ascending=[True]) + tm.assert_index_equal(result[0], expected) + + expected = Index([1, 2, 3, 4, 5]) + result = index.sortlevel(ascending=True) + tm.assert_index_equal(result[0], expected) + + expected = Index([5, 4, 3, 2, 1]) + result = index.sortlevel(ascending=False) + tm.assert_index_equal(result[0], expected) + + def test_sortlevel_na_position(self): + # GH#51612 + idx = Index([1, np.nan]) + result = idx.sortlevel(na_position="first")[0] + expected = Index([np.nan, 1]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "periods, expected_results", + [ + (1, [np.nan, 10, 10, 10, 10]), + (2, [np.nan, np.nan, 20, 20, 20]), + (3, [np.nan, np.nan, np.nan, 30, 30]), + ], + ) + def test_index_diff(self, periods, expected_results): + # GH#19708 + idx = Index([10, 20, 30, 40, 50]) + result = idx.diff(periods) + expected = Index(expected_results) + + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "decimals, expected_results", + [ + (0, [1.0, 2.0, 3.0]), + (1, [1.2, 2.3, 3.5]), + (2, [1.23, 2.35, 3.46]), + ], + ) + def test_index_round(self, decimals, expected_results): + # GH#19708 + idx = Index([1.234, 2.345, 3.456]) + result = idx.round(decimals) + expected = Index(expected_results) + + tm.assert_index_equal(result, expected) + + +class TestMixedIntIndex: + # Mostly the tests from common.py for which the results differ + # in py2 and py3 because ints and strings are uncomparable in py3 + # (GH 13514) + @pytest.fixture + def simple_index(self) -> Index: + return Index([0, "a", 1, "b", 2, "c"]) + + def test_argsort(self, simple_index): + index = simple_index + with pytest.raises(TypeError, match="'>|<' not supported"): + index.argsort() + + def test_numpy_argsort(self, simple_index): + index = simple_index + with pytest.raises(TypeError, match="'>|<' not supported"): + np.argsort(index) + + def test_copy_name(self, simple_index): + # Check that "name" argument passed at initialization is honoured + # GH12309 + index = simple_index + + first = type(index)(index, copy=True, name="mario") + second = type(first)(first, copy=False) + + # Even though "copy=False", we want a new object. + assert first is not second + tm.assert_index_equal(first, second) + + assert first.name == "mario" + assert second.name == "mario" + + s1 = Series(2, index=first) + s2 = Series(3, index=second[:-1]) + + s3 = s1 * s2 + + assert s3.index.name == "mario" + + def test_copy_name2(self): + # Check that adding a "name" parameter to the copy is honored + # GH14302 + index = Index([1, 2], name="MyName") + index1 = index.copy() + + tm.assert_index_equal(index, index1) + + index2 = index.copy(name="NewName") + tm.assert_index_equal(index, index2, check_names=False) + assert index.name == "MyName" + assert index2.name == "NewName" + + def test_unique_na(self): + idx = Index([2, np.nan, 2, 1], name="my_index") + expected = Index([2, np.nan, 1], name="my_index") + result = idx.unique() + tm.assert_index_equal(result, expected) + + def test_logical_compat(self, simple_index): + index = simple_index + assert index.all() == index.values.all() + assert index.any() == index.values.any() + + @pytest.mark.parametrize("how", ["any", "all"]) + @pytest.mark.parametrize("dtype", [None, object, "category"]) + @pytest.mark.parametrize( + "vals,expected", + [ + ([1, 2, 3], [1, 2, 3]), + ([1.0, 2.0, 3.0], [1.0, 2.0, 3.0]), + ([1.0, 2.0, np.nan, 3.0], [1.0, 2.0, 3.0]), + (["A", "B", "C"], ["A", "B", "C"]), + (["A", np.nan, "B", "C"], ["A", "B", "C"]), + ], + ) + def test_dropna(self, how, dtype, vals, expected): + # GH 6194 + index = Index(vals, dtype=dtype) + result = index.dropna(how=how) + expected = Index(expected, dtype=dtype) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("how", ["any", "all"]) + @pytest.mark.parametrize( + "index,expected", + [ + ( + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + ), + ( + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + ), + ( + TimedeltaIndex(["1 days", "2 days", "3 days"]), + TimedeltaIndex(["1 days", "2 days", "3 days"]), + ), + ( + TimedeltaIndex([pd.NaT, "1 days", "2 days", "3 days", pd.NaT]), + TimedeltaIndex(["1 days", "2 days", "3 days"]), + ), + ( + PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), + PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), + ), + ( + PeriodIndex(["2012-02", "2012-04", "NaT", "2012-05"], freq="M"), + PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), + ), + ], + ) + def test_dropna_dt_like(self, how, index, expected): + result = index.dropna(how=how) + tm.assert_index_equal(result, expected) + + def test_dropna_invalid_how_raises(self): + msg = "invalid how option: xxx" + with pytest.raises(ValueError, match=msg): + Index([1, 2, 3]).dropna(how="xxx") + + @pytest.mark.parametrize( + "index", + [ + Index([np.nan]), + Index([np.nan, 1]), + Index([1, 2, np.nan]), + Index(["a", "b", np.nan]), + pd.to_datetime(["NaT"]), + pd.to_datetime(["NaT", "2000-01-01"]), + pd.to_datetime(["2000-01-01", "NaT", "2000-01-02"]), + pd.to_timedelta(["1 day", "NaT"]), + ], + ) + def test_is_monotonic_na(self, index): + assert index.is_monotonic_increasing is False + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_increasing is False + assert index._is_strictly_monotonic_decreasing is False + + def test_int_name_format(self, frame_or_series): + index = Index(["a", "b", "c"], name=0) + result = frame_or_series(list(range(3)), index=index) + assert "0" in repr(result) + + def test_str_to_bytes_raises(self): + # GH 26447 + index = Index([str(x) for x in range(10)]) + msg = "^'str' object cannot be interpreted as an integer$" + with pytest.raises(TypeError, match=msg): + bytes(index) + + @pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning") + def test_index_with_tuple_bool(self): + # GH34123 + # TODO: also this op right now produces FutureWarning from numpy + # https://github.com/numpy/numpy/issues/11521 + idx = Index([("a", "b"), ("b", "c"), ("c", "a")]) + result = idx == ("c", "a") + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + +class TestIndexUtils: + @pytest.mark.parametrize( + "data, names, expected", + [ + ([[1, 2, 3]], None, Index([1, 2, 3])), + ([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")), + ( + [["a", "a"], ["c", "d"]], + None, + MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]]), + ), + ( + [["a", "a"], ["c", "d"]], + ["L1", "L2"], + MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]], names=["L1", "L2"]), + ), + ], + ) + def test_ensure_index_from_sequences(self, data, names, expected): + result = ensure_index_from_sequences(data, names) + tm.assert_index_equal(result, expected) + + def test_ensure_index_mixed_closed_intervals(self): + # GH27172 + intervals = [ + pd.Interval(0, 1, closed="left"), + pd.Interval(1, 2, closed="right"), + pd.Interval(2, 3, closed="neither"), + pd.Interval(3, 4, closed="both"), + ] + result = ensure_index(intervals) + expected = Index(intervals, dtype=object) + tm.assert_index_equal(result, expected) + + def test_ensure_index_uint64(self): + # with both 0 and a large-uint64, np.array will infer to float64 + # https://github.com/numpy/numpy/issues/19146 + # but a more accurate choice would be uint64 + values = [0, np.iinfo(np.uint64).max] + + result = ensure_index(values) + assert list(result) == values + + expected = Index(values, dtype="uint64") + tm.assert_index_equal(result, expected) + + def test_get_combined_index(self): + result = _get_combined_index([]) + expected = Index([]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "opname", + [ + "eq", + "ne", + "le", + "lt", + "ge", + "gt", + "add", + "radd", + "sub", + "rsub", + "mul", + "rmul", + "truediv", + "rtruediv", + "floordiv", + "rfloordiv", + "pow", + "rpow", + "mod", + "divmod", + ], +) +def test_generated_op_names(opname, index): + opname = f"__{opname}__" + method = getattr(index, opname) + assert method.__name__ == opname + + +@pytest.mark.parametrize("index_maker", tm.index_subclass_makers_generator()) +def test_index_subclass_constructor_wrong_kwargs(index_maker): + # GH #19348 + with pytest.raises(TypeError, match="unexpected keyword argument"): + index_maker(foo="bar") + + +def test_deprecated_fastpath(): + msg = "[Uu]nexpected keyword argument" + with pytest.raises(TypeError, match=msg): + Index(np.array(["a", "b"], dtype=object), name="test", fastpath=True) + + with pytest.raises(TypeError, match=msg): + Index(np.array([1, 2, 3], dtype="int64"), name="test", fastpath=True) + + with pytest.raises(TypeError, match=msg): + RangeIndex(0, 5, 2, name="test", fastpath=True) + + with pytest.raises(TypeError, match=msg): + CategoricalIndex(["a", "b", "c"], name="test", fastpath=True) + + +def test_shape_of_invalid_index(): + # Pre-2.0, it was possible to create "invalid" index objects backed by + # a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125 + # about this). However, as long as this is not solved in general,this test ensures + # that the returned shape is consistent with this underlying array for + # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775) + idx = Index([0, 1, 2, 3]) + with pytest.raises(ValueError, match="Multi-dimensional indexing"): + # GH#30588 multi-dimensional indexing deprecated + idx[:, None] + + +@pytest.mark.parametrize("dtype", [None, np.int64, np.uint64, np.float64]) +def test_validate_1d_input(dtype): + # GH#27125 check that we do not have >1-dimensional input + msg = "Index data must be 1-dimensional" + + arr = np.arange(8).reshape(2, 2, 2) + with pytest.raises(ValueError, match=msg): + Index(arr, dtype=dtype) + + df = DataFrame(arr.reshape(4, 2)) + with pytest.raises(ValueError, match=msg): + Index(df, dtype=dtype) + + # GH#13601 trying to assign a multi-dimensional array to an index is not allowed + ser = Series(0, range(4)) + with pytest.raises(ValueError, match=msg): + ser.index = np.array([[2, 3]] * 4, dtype=dtype) + + +@pytest.mark.parametrize( + "klass, extra_kwargs", + [ + [Index, {}], + *[[lambda x: Index(x, dtype=dtyp), {}] for dtyp in tm.ALL_REAL_NUMPY_DTYPES], + [DatetimeIndex, {}], + [TimedeltaIndex, {}], + [PeriodIndex, {"freq": "Y"}], + ], +) +def test_construct_from_memoryview(klass, extra_kwargs): + # GH 13120 + result = klass(memoryview(np.arange(2000, 2005)), **extra_kwargs) + expected = klass(list(range(2000, 2005)), **extra_kwargs) + tm.assert_index_equal(result, expected, exact=True) + + +@pytest.mark.parametrize("op", [operator.lt, operator.gt]) +def test_nan_comparison_same_object(op): + # GH#47105 + idx = Index([np.nan]) + expected = np.array([False]) + + result = op(idx, idx) + tm.assert_numpy_array_equal(result, expected) + + result = op(idx, idx.copy()) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_common.py new file mode 100644 index 00000000..6245a129 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_common.py @@ -0,0 +1,502 @@ +""" +Collection of tests asserting things that should be true for +any index subclass except for MultiIndex. Makes use of the `index_flat` +fixture defined in pandas/conftest.py. +""" +from copy import ( + copy, + deepcopy, +) +import re + +import numpy as np +import pytest + +from pandas.compat import IS64 +from pandas.compat.numpy import np_version_gte1p25 + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_numeric_dtype, +) + +import pandas as pd +from pandas import ( + CategoricalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, +) +import pandas._testing as tm + + +class TestCommon: + @pytest.mark.parametrize("name", [None, "new_name"]) + def test_to_frame(self, name, index_flat, using_copy_on_write): + # see GH#15230, GH#22580 + idx = index_flat + + if name: + idx_name = name + else: + idx_name = idx.name or 0 + + df = idx.to_frame(name=idx_name) + + assert df.index is idx + assert len(df.columns) == 1 + assert df.columns[0] == idx_name + if not using_copy_on_write: + assert df[idx_name].values is not idx.values + + df = idx.to_frame(index=False, name=idx_name) + assert df.index is not idx + + def test_droplevel(self, index_flat): + # GH 21115 + # MultiIndex is tested separately in test_multi.py + index = index_flat + + assert index.droplevel([]).equals(index) + + for level in [index.name, [index.name]]: + if isinstance(index.name, tuple) and level is index.name: + # GH 21121 : droplevel with tuple name + continue + msg = ( + "Cannot remove 1 levels from an index with 1 levels: at least one " + "level must be left." + ) + with pytest.raises(ValueError, match=msg): + index.droplevel(level) + + for level in "wrong", ["wrong"]: + with pytest.raises( + KeyError, + match=r"'Requested level \(wrong\) does not match index name \(None\)'", + ): + index.droplevel(level) + + def test_constructor_non_hashable_name(self, index_flat): + # GH 20527 + index = index_flat + + message = "Index.name must be a hashable type" + renamed = [["1"]] + + # With .rename() + with pytest.raises(TypeError, match=message): + index.rename(name=renamed) + + # With .set_names() + with pytest.raises(TypeError, match=message): + index.set_names(names=renamed) + + def test_constructor_unwraps_index(self, index_flat): + a = index_flat + # Passing dtype is necessary for Index([True, False], dtype=object) + # case. + b = type(a)(a, dtype=a.dtype) + tm.assert_equal(a._data, b._data) + + def test_to_flat_index(self, index_flat): + # 22866 + index = index_flat + + result = index.to_flat_index() + tm.assert_index_equal(result, index) + + def test_set_name_methods(self, index_flat): + # MultiIndex tested separately + index = index_flat + new_name = "This is the new name for this index" + + original_name = index.name + new_ind = index.set_names([new_name]) + assert new_ind.name == new_name + assert index.name == original_name + res = index.rename(new_name, inplace=True) + + # should return None + assert res is None + assert index.name == new_name + assert index.names == [new_name] + with pytest.raises(ValueError, match="Level must be None"): + index.set_names("a", level=0) + + # rename in place just leaves tuples and other containers alone + name = ("A", "B") + index.rename(name, inplace=True) + assert index.name == name + assert index.names == [name] + + @pytest.mark.xfail + def test_set_names_single_label_no_level(self, index_flat): + with pytest.raises(TypeError, match="list-like"): + # should still fail even if it would be the right length + index_flat.set_names("a") + + def test_copy_and_deepcopy(self, index_flat): + index = index_flat + + for func in (copy, deepcopy): + idx_copy = func(index) + assert idx_copy is not index + assert idx_copy.equals(index) + + new_copy = index.copy(deep=True, name="banana") + assert new_copy.name == "banana" + + def test_copy_name(self, index_flat): + # GH#12309: Check that the "name" argument + # passed at initialization is honored. + index = index_flat + + first = type(index)(index, copy=True, name="mario") + second = type(first)(first, copy=False) + + # Even though "copy=False", we want a new object. + assert first is not second + tm.assert_index_equal(first, second) + + # Not using tm.assert_index_equal() since names differ. + assert index.equals(first) + + assert first.name == "mario" + assert second.name == "mario" + + # TODO: belongs in series arithmetic tests? + s1 = pd.Series(2, index=first) + s2 = pd.Series(3, index=second[:-1]) + # See GH#13365 + s3 = s1 * s2 + assert s3.index.name == "mario" + + def test_copy_name2(self, index_flat): + # GH#35592 + index = index_flat + + assert index.copy(name="mario").name == "mario" + + with pytest.raises(ValueError, match="Length of new names must be 1, got 2"): + index.copy(name=["mario", "luigi"]) + + msg = f"{type(index).__name__}.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + index.copy(name=[["mario"]]) + + def test_unique_level(self, index_flat): + # don't test a MultiIndex here (as its tested separated) + index = index_flat + + # GH 17896 + expected = index.drop_duplicates() + for level in [0, index.name, None]: + result = index.unique(level=level) + tm.assert_index_equal(result, expected) + + msg = "Too many levels: Index has only 1 level, not 4" + with pytest.raises(IndexError, match=msg): + index.unique(level=3) + + msg = ( + rf"Requested level \(wrong\) does not match index name " + rf"\({re.escape(index.name.__repr__())}\)" + ) + with pytest.raises(KeyError, match=msg): + index.unique(level="wrong") + + def test_unique(self, index_flat): + # MultiIndex tested separately + index = index_flat + if not len(index): + pytest.skip("Skip check for empty Index and MultiIndex") + + idx = index[[0] * 5] + idx_unique = index[[0]] + + # We test against `idx_unique`, so first we make sure it's unique + # and doesn't contain nans. + assert idx_unique.is_unique is True + try: + assert idx_unique.hasnans is False + except NotImplementedError: + pass + + result = idx.unique() + tm.assert_index_equal(result, idx_unique) + + # nans: + if not index._can_hold_na: + pytest.skip("Skip na-check if index cannot hold na") + + vals = index._values[[0] * 5] + vals[0] = np.nan + + vals_unique = vals[:2] + idx_nan = index._shallow_copy(vals) + idx_unique_nan = index._shallow_copy(vals_unique) + assert idx_unique_nan.is_unique is True + + assert idx_nan.dtype == index.dtype + assert idx_unique_nan.dtype == index.dtype + + expected = idx_unique_nan + for pos, i in enumerate([idx_nan, idx_unique_nan]): + result = i.unique() + tm.assert_index_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:Period with BDay freq:FutureWarning") + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_searchsorted_monotonic(self, index_flat, request): + # GH17271 + index = index_flat + # not implemented for tuple searches in MultiIndex + # or Intervals searches in IntervalIndex + if isinstance(index, pd.IntervalIndex): + mark = pytest.mark.xfail( + reason="IntervalIndex.searchsorted does not support Interval arg", + raises=NotImplementedError, + ) + request.node.add_marker(mark) + + # nothing to test if the index is empty + if index.empty: + pytest.skip("Skip check for empty Index") + value = index[0] + + # determine the expected results (handle dupes for 'right') + expected_left, expected_right = 0, (index == value).argmin() + if expected_right == 0: + # all values are the same, expected_right should be length + expected_right = len(index) + + # test _searchsorted_monotonic in all cases + # test searchsorted only for increasing + if index.is_monotonic_increasing: + ssm_left = index._searchsorted_monotonic(value, side="left") + assert expected_left == ssm_left + + ssm_right = index._searchsorted_monotonic(value, side="right") + assert expected_right == ssm_right + + ss_left = index.searchsorted(value, side="left") + assert expected_left == ss_left + + ss_right = index.searchsorted(value, side="right") + assert expected_right == ss_right + + elif index.is_monotonic_decreasing: + ssm_left = index._searchsorted_monotonic(value, side="left") + assert expected_left == ssm_left + + ssm_right = index._searchsorted_monotonic(value, side="right") + assert expected_right == ssm_right + else: + # non-monotonic should raise. + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(ValueError, match=msg): + index._searchsorted_monotonic(value, side="left") + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_drop_duplicates(self, index_flat, keep): + # MultiIndex is tested separately + index = index_flat + if isinstance(index, RangeIndex): + pytest.skip( + "RangeIndex is tested in test_drop_duplicates_no_duplicates " + "as it cannot hold duplicates" + ) + if len(index) == 0: + pytest.skip( + "empty index is tested in test_drop_duplicates_no_duplicates " + "as it cannot hold duplicates" + ) + + # make unique index + holder = type(index) + unique_values = list(set(index)) + dtype = index.dtype if is_numeric_dtype(index) else None + unique_idx = holder(unique_values, dtype=dtype) + + # make duplicated index + n = len(unique_idx) + duplicated_selection = np.random.default_rng(2).choice(n, int(n * 1.5)) + idx = holder(unique_idx.values[duplicated_selection]) + + # Series.duplicated is tested separately + expected_duplicated = ( + pd.Series(duplicated_selection).duplicated(keep=keep).values + ) + tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated) + + # Series.drop_duplicates is tested separately + expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep)) + tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_drop_duplicates_no_duplicates(self, index_flat): + # MultiIndex is tested separately + index = index_flat + + # make unique index + if isinstance(index, RangeIndex): + # RangeIndex cannot have duplicates + unique_idx = index + else: + holder = type(index) + unique_values = list(set(index)) + dtype = index.dtype if is_numeric_dtype(index) else None + unique_idx = holder(unique_values, dtype=dtype) + + # check on unique index + expected_duplicated = np.array([False] * len(unique_idx), dtype="bool") + tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated) + result_dropped = unique_idx.drop_duplicates() + tm.assert_index_equal(result_dropped, unique_idx) + # validate shallow copy + assert result_dropped is not unique_idx + + def test_drop_duplicates_inplace(self, index): + msg = r"drop_duplicates\(\) got an unexpected keyword argument" + with pytest.raises(TypeError, match=msg): + index.drop_duplicates(inplace=True) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_has_duplicates(self, index_flat): + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates. + index = index_flat + holder = type(index) + if not len(index) or isinstance(index, RangeIndex): + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates. + # RangeIndex is unique by definition. + pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex") + + idx = holder([index[0]] * 5) + assert idx.is_unique is False + assert idx.has_duplicates is True + + @pytest.mark.parametrize( + "dtype", + ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"], + ) + def test_astype_preserves_name(self, index, dtype): + # https://github.com/pandas-dev/pandas/issues/32013 + if isinstance(index, MultiIndex): + index.names = ["idx" + str(i) for i in range(index.nlevels)] + else: + index.name = "idx" + + warn = None + if index.dtype.kind == "c" and dtype in ["float64", "int64", "uint64"]: + # imaginary components discarded + if np_version_gte1p25: + warn = np.exceptions.ComplexWarning + else: + warn = np.ComplexWarning + + is_pyarrow_str = str(index.dtype) == "string[pyarrow]" and dtype == "category" + try: + # Some of these conversions cannot succeed so we use a try / except + with tm.assert_produces_warning( + warn, + raise_on_extra_warnings=is_pyarrow_str, + check_stacklevel=False, + ): + result = index.astype(dtype) + except (ValueError, TypeError, NotImplementedError, SystemError): + return + + if isinstance(index, MultiIndex): + assert result.names == index.names + else: + assert result.name == index.name + + def test_hasnans_isnans(self, index_flat): + # GH#11343, added tests for hasnans / isnans + index = index_flat + + # cases in indices doesn't include NaN + idx = index.copy(deep=True) + expected = np.array([False] * len(idx), dtype=bool) + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is False + + idx = index.copy(deep=True) + values = idx._values + + if len(index) == 0: + return + elif is_integer_dtype(index.dtype): + return + elif index.dtype == bool: + # values[1] = np.nan below casts to True! + return + + values[1] = np.nan + + idx = type(index)(values) + + expected = np.array([False] * len(idx), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is True + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize("na_position", [None, "middle"]) +def test_sort_values_invalid_na_position(index_with_missing, na_position): + with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"): + index_with_missing.sort_values(na_position=na_position) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize("na_position", ["first", "last"]) +def test_sort_values_with_missing(index_with_missing, na_position, request): + # GH 35584. Test that sort_values works with missing values, + # sort non-missing and place missing according to na_position + + if isinstance(index_with_missing, CategoricalIndex): + request.node.add_marker( + pytest.mark.xfail( + reason="missing value sorting order not well-defined", strict=False + ) + ) + + missing_count = np.sum(index_with_missing.isna()) + not_na_vals = index_with_missing[index_with_missing.notna()].values + sorted_values = np.sort(not_na_vals) + if na_position == "first": + sorted_values = np.concatenate([[None] * missing_count, sorted_values]) + else: + sorted_values = np.concatenate([sorted_values, [None] * missing_count]) + + # Explicitly pass dtype needed for Index backed by EA e.g. IntegerArray + expected = type(index_with_missing)(sorted_values, dtype=index_with_missing.dtype) + + result = index_with_missing.sort_values(na_position=na_position) + tm.assert_index_equal(result, expected) + + +def test_ndarray_compat_properties(index): + if isinstance(index, PeriodIndex) and not IS64: + pytest.skip("Overflow") + idx = index + assert idx.T.equals(idx) + assert idx.transpose().equals(idx) + + values = idx.values + + assert idx.shape == values.shape + assert idx.ndim == values.ndim + assert idx.size == values.size + + if not isinstance(index, (RangeIndex, MultiIndex)): + # These two are not backed by an ndarray + assert idx.nbytes == values.nbytes + + # test for validity + idx.nbytes + idx.values.nbytes diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_datetimelike.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_datetimelike.py new file mode 100644 index 00000000..5ad2e9b2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_datetimelike.py @@ -0,0 +1,169 @@ +""" generic datetimelike tests """ + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestDatetimeLike: + @pytest.fixture( + params=[ + pd.period_range("20130101", periods=5, freq="D"), + pd.TimedeltaIndex( + [ + "0 days 01:00:00", + "1 days 01:00:00", + "2 days 01:00:00", + "3 days 01:00:00", + "4 days 01:00:00", + ], + dtype="timedelta64[ns]", + freq="D", + ), + pd.DatetimeIndex( + ["2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05"], + dtype="datetime64[ns]", + freq="D", + ), + ] + ) + def simple_index(self, request): + return request.param + + def test_isin(self, simple_index): + index = simple_index[:4] + result = index.isin(index) + assert result.all() + + result = index.isin(list(index)) + assert result.all() + + result = index.isin([index[2], 5]) + expected = np.array([False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_argsort_matches_array(self, simple_index): + idx = simple_index + idx = idx.insert(1, pd.NaT) + + result = idx.argsort() + expected = idx._data.argsort() + tm.assert_numpy_array_equal(result, expected) + + def test_can_hold_identifiers(self, simple_index): + idx = simple_index + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is False + + def test_shift_identity(self, simple_index): + idx = simple_index + tm.assert_index_equal(idx, idx.shift(0)) + + def test_shift_empty(self, simple_index): + # GH#14811 + idx = simple_index[:0] + tm.assert_index_equal(idx, idx.shift(1)) + + def test_str(self, simple_index): + # test the string repr + idx = simple_index.copy() + idx.name = "foo" + assert f"length={len(idx)}" not in str(idx) + assert "'foo'" in str(idx) + assert type(idx).__name__ in str(idx) + + if hasattr(idx, "tz"): + if idx.tz is not None: + assert idx.tz in str(idx) + if isinstance(idx, pd.PeriodIndex): + assert f"dtype='period[{idx.freqstr}]'" in str(idx) + else: + assert f"freq='{idx.freqstr}'" in str(idx) + + def test_view(self, simple_index): + idx = simple_index + + idx_view = idx.view("i8") + result = type(simple_index)(idx) + tm.assert_index_equal(result, idx) + + idx_view = idx.view(type(simple_index)) + result = type(simple_index)(idx) + tm.assert_index_equal(result, idx_view) + + def test_map_callable(self, simple_index): + index = simple_index + expected = index + index.freq + result = index.map(lambda x: x + index.freq) + tm.assert_index_equal(result, expected) + + # map to NaT + result = index.map(lambda x: pd.NaT if x == index[0] else x) + expected = pd.Index([pd.NaT] + index[1:].tolist()) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: pd.Series(values, index, dtype=object), + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_map_dictlike(self, mapper, simple_index): + index = simple_index + expected = index + index.freq + + # don't compare the freqs + if isinstance(expected, (pd.DatetimeIndex, pd.TimedeltaIndex)): + expected = expected._with_freq(None) + + result = index.map(mapper(expected, index)) + tm.assert_index_equal(result, expected) + + expected = pd.Index([pd.NaT] + index[1:].tolist()) + result = index.map(mapper(expected, index)) + tm.assert_index_equal(result, expected) + + # empty map; these map to np.nan because we cannot know + # to re-infer things + expected = pd.Index([np.nan] * len(index)) + result = index.map(mapper([], [])) + tm.assert_index_equal(result, expected) + + def test_getitem_preserves_freq(self, simple_index): + index = simple_index + assert index.freq is not None + + result = index[:] + assert result.freq == index.freq + + def test_where_cast_str(self, simple_index): + index = simple_index + + mask = np.ones(len(index), dtype=bool) + mask[-1] = False + + result = index.where(mask, str(index[0])) + expected = index.where(mask, index[0]) + tm.assert_index_equal(result, expected) + + result = index.where(mask, [str(index[0])]) + tm.assert_index_equal(result, expected) + + expected = index.astype(object).where(mask, "foo") + result = index.where(mask, "foo") + tm.assert_index_equal(result, expected) + + result = index.where(mask, ["foo"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_diff(self, unit): + # GH 55080 + dti = pd.to_datetime([10, 20, 30], unit=unit).as_unit(unit) + result = dti.diff(1) + expected = pd.TimedeltaIndex([pd.NaT, 10, 10], unit=unit).as_unit(unit) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_engines.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_engines.py new file mode 100644 index 00000000..468c2240 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_engines.py @@ -0,0 +1,192 @@ +import re + +import numpy as np +import pytest + +from pandas._libs import index as libindex + +import pandas as pd + + +@pytest.fixture( + params=[ + (libindex.Int64Engine, np.int64), + (libindex.Int32Engine, np.int32), + (libindex.Int16Engine, np.int16), + (libindex.Int8Engine, np.int8), + (libindex.UInt64Engine, np.uint64), + (libindex.UInt32Engine, np.uint32), + (libindex.UInt16Engine, np.uint16), + (libindex.UInt8Engine, np.uint8), + (libindex.Float64Engine, np.float64), + (libindex.Float32Engine, np.float32), + ], + ids=lambda x: x[0].__name__, +) +def numeric_indexing_engine_type_and_dtype(request): + return request.param + + +class TestDatetimeEngine: + @pytest.mark.parametrize( + "scalar", + [ + pd.Timedelta(pd.Timestamp("2016-01-01").asm8.view("m8[ns]")), + pd.Timestamp("2016-01-01")._value, + pd.Timestamp("2016-01-01").to_pydatetime(), + pd.Timestamp("2016-01-01").to_datetime64(), + ], + ) + def test_not_contains_requires_timestamp(self, scalar): + dti1 = pd.date_range("2016-01-01", periods=3) + dti2 = dti1.insert(1, pd.NaT) # non-monotonic + dti3 = dti1.insert(3, dti1[0]) # non-unique + dti4 = pd.date_range("2016-01-01", freq="ns", periods=2_000_000) + dti5 = dti4.insert(0, dti4[0]) # over size threshold, not unique + + msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))]) + for dti in [dti1, dti2, dti3, dti4, dti5]: + with pytest.raises(TypeError, match=msg): + scalar in dti._engine + + with pytest.raises(KeyError, match=msg): + dti._engine.get_loc(scalar) + + +class TestTimedeltaEngine: + @pytest.mark.parametrize( + "scalar", + [ + pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")), + pd.Timedelta(days=42)._value, + pd.Timedelta(days=42).to_pytimedelta(), + pd.Timedelta(days=42).to_timedelta64(), + ], + ) + def test_not_contains_requires_timedelta(self, scalar): + tdi1 = pd.timedelta_range("42 days", freq="9h", periods=1234) + tdi2 = tdi1.insert(1, pd.NaT) # non-monotonic + tdi3 = tdi1.insert(3, tdi1[0]) # non-unique + tdi4 = pd.timedelta_range("42 days", freq="ns", periods=2_000_000) + tdi5 = tdi4.insert(0, tdi4[0]) # over size threshold, not unique + + msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))]) + for tdi in [tdi1, tdi2, tdi3, tdi4, tdi5]: + with pytest.raises(TypeError, match=msg): + scalar in tdi._engine + + with pytest.raises(KeyError, match=msg): + tdi._engine.get_loc(scalar) + + +class TestNumericEngine: + def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype): + engine_type, dtype = numeric_indexing_engine_type_and_dtype + num = 1000 + arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) + + # monotonic increasing + engine = engine_type(arr) + assert engine.is_monotonic_increasing is True + assert engine.is_monotonic_decreasing is False + + # monotonic decreasing + engine = engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is True + + # neither monotonic increasing or decreasing + arr = np.array([1] * num + [2] * num + [1] * num, dtype=dtype) + engine = engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is False + + def test_is_unique(self, numeric_indexing_engine_type_and_dtype): + engine_type, dtype = numeric_indexing_engine_type_and_dtype + + # unique + arr = np.array([1, 3, 2], dtype=dtype) + engine = engine_type(arr) + assert engine.is_unique is True + + # not unique + arr = np.array([1, 2, 1], dtype=dtype) + engine = engine_type(arr) + assert engine.is_unique is False + + def test_get_loc(self, numeric_indexing_engine_type_and_dtype): + engine_type, dtype = numeric_indexing_engine_type_and_dtype + + # unique + arr = np.array([1, 2, 3], dtype=dtype) + engine = engine_type(arr) + assert engine.get_loc(2) == 1 + + # monotonic + num = 1000 + arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) + engine = engine_type(arr) + assert engine.get_loc(2) == slice(1000, 2000) + + # not monotonic + arr = np.array([1, 2, 3] * num, dtype=dtype) + engine = engine_type(arr) + expected = np.array([False, True, False] * num, dtype=bool) + result = engine.get_loc(2) + assert (result == expected).all() + + +class TestObjectEngine: + engine_type = libindex.ObjectEngine + dtype = np.object_ + values = list("abc") + + def test_is_monotonic(self): + num = 1000 + arr = np.array(["a"] * num + ["a"] * num + ["c"] * num, dtype=self.dtype) + + # monotonic increasing + engine = self.engine_type(arr) + assert engine.is_monotonic_increasing is True + assert engine.is_monotonic_decreasing is False + + # monotonic decreasing + engine = self.engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is True + + # neither monotonic increasing or decreasing + arr = np.array(["a"] * num + ["b"] * num + ["a"] * num, dtype=self.dtype) + engine = self.engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is False + + def test_is_unique(self): + # unique + arr = np.array(self.values, dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.is_unique is True + + # not unique + arr = np.array(["a", "b", "a"], dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.is_unique is False + + def test_get_loc(self): + # unique + arr = np.array(self.values, dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.get_loc("b") == 1 + + # monotonic + num = 1000 + arr = np.array(["a"] * num + ["b"] * num + ["c"] * num, dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.get_loc("b") == slice(1000, 2000) + + # not monotonic + arr = np.array(self.values * num, dtype=self.dtype) + engine = self.engine_type(arr) + expected = np.array([False, True, False] * num, dtype=bool) + result = engine.get_loc("b") + assert (result == expected).all() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_frozen.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_frozen.py new file mode 100644 index 00000000..ace66b5b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_frozen.py @@ -0,0 +1,113 @@ +import re + +import pytest + +from pandas.core.indexes.frozen import FrozenList + + +@pytest.fixture +def lst(): + return [1, 2, 3, 4, 5] + + +@pytest.fixture +def container(lst): + return FrozenList(lst) + + +@pytest.fixture +def unicode_container(): + return FrozenList(["\u05d0", "\u05d1", "c"]) + + +class TestFrozenList: + def check_mutable_error(self, *args, **kwargs): + # Pass whatever function you normally would to pytest.raises + # (after the Exception kind). + mutable_regex = re.compile("does not support mutable operations") + msg = "'(_s)?re.(SRE_)?Pattern' object is not callable" + with pytest.raises(TypeError, match=msg): + mutable_regex(*args, **kwargs) + + def test_no_mutable_funcs(self, container): + def setitem(): + container[0] = 5 + + self.check_mutable_error(setitem) + + def setslice(): + container[1:2] = 3 + + self.check_mutable_error(setslice) + + def delitem(): + del container[0] + + self.check_mutable_error(delitem) + + def delslice(): + del container[0:3] + + self.check_mutable_error(delslice) + + mutable_methods = ("extend", "pop", "remove", "insert") + + for meth in mutable_methods: + self.check_mutable_error(getattr(container, meth)) + + def test_slicing_maintains_type(self, container, lst): + result = container[1:2] + expected = lst[1:2] + self.check_result(result, expected) + + def check_result(self, result, expected): + assert isinstance(result, FrozenList) + assert result == expected + + def test_string_methods_dont_fail(self, container): + repr(container) + str(container) + bytes(container) + + def test_tricky_container(self, unicode_container): + repr(unicode_container) + str(unicode_container) + + def test_add(self, container, lst): + result = container + (1, 2, 3) + expected = FrozenList(lst + [1, 2, 3]) + self.check_result(result, expected) + + result = (1, 2, 3) + container + expected = FrozenList([1, 2, 3] + lst) + self.check_result(result, expected) + + def test_iadd(self, container, lst): + q = r = container + + q += [5] + self.check_result(q, lst + [5]) + + # Other shouldn't be mutated. + self.check_result(r, lst) + + def test_union(self, container, lst): + result = container.union((1, 2, 3)) + expected = FrozenList(lst + [1, 2, 3]) + self.check_result(result, expected) + + def test_difference(self, container): + result = container.difference([2]) + expected = FrozenList([1, 3, 4, 5]) + self.check_result(result, expected) + + def test_difference_dupe(self): + result = FrozenList([1, 2, 3, 2]).difference([2]) + expected = FrozenList([1, 3]) + self.check_result(result, expected) + + def test_tricky_container_to_bytes_raises(self, unicode_container): + # GH 26447 + msg = "^'str' object cannot be interpreted as an integer$" + with pytest.raises(TypeError, match=msg): + bytes(unicode_container) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_index_new.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_index_new.py new file mode 100644 index 00000000..d35c3566 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_index_new.py @@ -0,0 +1,403 @@ +""" +Tests for the Index constructor conducting inference. +""" +from datetime import ( + datetime, + timedelta, +) +from decimal import Decimal + +import numpy as np +import pytest + +from pandas import ( + NA, + Categorical, + CategoricalIndex, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + NaT, + PeriodIndex, + Series, + TimedeltaIndex, + Timestamp, + array, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestIndexConstructorInference: + def test_object_all_bools(self): + # GH#49594 match Series behavior on ndarray[object] of all bools + arr = np.array([True, False], dtype=object) + res = Index(arr) + assert res.dtype == object + + # since the point is matching Series behavior, let's double check + assert Series(arr).dtype == object + + def test_object_all_complex(self): + # GH#49594 match Series behavior on ndarray[object] of all complex + arr = np.array([complex(1), complex(2)], dtype=object) + res = Index(arr) + assert res.dtype == object + + # since the point is matching Series behavior, let's double check + assert Series(arr).dtype == object + + @pytest.mark.parametrize("val", [NaT, None, np.nan, float("nan")]) + def test_infer_nat(self, val): + # GH#49340 all NaT/None/nan and at least 1 NaT -> datetime64[ns], + # matching Series behavior + values = [NaT, val] + + idx = Index(values) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + idx = Index(values[::-1]) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + idx = Index(np.array(values, dtype=object)) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + idx = Index(np.array(values, dtype=object)[::-1]) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + @pytest.mark.parametrize("na_value", [None, np.nan]) + @pytest.mark.parametrize("vtype", [list, tuple, iter]) + def test_construction_list_tuples_nan(self, na_value, vtype): + # GH#18505 : valid tuples containing NaN + values = [(1, "two"), (3.0, na_value)] + result = Index(vtype(values)) + expected = MultiIndex.from_tuples(values) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [int, "int64", "int32", "int16", "int8", "uint64", "uint32", "uint16", "uint8"], + ) + def test_constructor_int_dtype_float(self, dtype): + # GH#18400 + expected = Index([0, 1, 2, 3], dtype=dtype) + result = Index([0.0, 1.0, 2.0, 3.0], dtype=dtype) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", [[True, False, True], np.array([True, False, True], dtype=bool)] + ) + def test_constructor_dtypes_to_object(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=bool) + else: + index = Index(vals) + + assert type(index) is Index + assert index.dtype == bool + + def test_constructor_categorical_to_object(self): + # GH#32167 Categorical data and dtype=object should return object-dtype + ci = CategoricalIndex(range(5)) + result = Index(ci, dtype=object) + assert not isinstance(result, CategoricalIndex) + + def test_constructor_infer_periodindex(self): + xp = period_range("2012-1-1", freq="M", periods=3) + rs = Index(xp) + tm.assert_index_equal(rs, xp) + assert isinstance(rs, PeriodIndex) + + def test_from_list_of_periods(self): + rng = period_range("1/1/2000", periods=20, freq="D") + periods = list(rng) + + result = Index(periods) + assert isinstance(result, PeriodIndex) + + @pytest.mark.parametrize("pos", [0, 1]) + @pytest.mark.parametrize( + "klass,dtype,ctor", + [ + (DatetimeIndex, "datetime64[ns]", np.datetime64("nat")), + (TimedeltaIndex, "timedelta64[ns]", np.timedelta64("nat")), + ], + ) + def test_constructor_infer_nat_dt_like( + self, pos, klass, dtype, ctor, nulls_fixture, request + ): + if isinstance(nulls_fixture, Decimal): + # We dont cast these to datetime64/timedelta64 + pytest.skip( + f"We don't cast {type(nulls_fixture).__name__} to " + "datetime64/timedelta64" + ) + + expected = klass([NaT, NaT]) + assert expected.dtype == dtype + data = [ctor] + data.insert(pos, nulls_fixture) + + warn = None + if nulls_fixture is NA: + expected = Index([NA, NaT]) + mark = pytest.mark.xfail(reason="Broken with np.NaT ctor; see GH 31884") + request.node.add_marker(mark) + # GH#35942 numpy will emit a DeprecationWarning within the + # assert_index_equal calls. Since we can't do anything + # about it until GH#31884 is fixed, we suppress that warning. + warn = DeprecationWarning + + result = Index(data) + + with tm.assert_produces_warning(warn): + tm.assert_index_equal(result, expected) + + result = Index(np.array(data, dtype=object)) + + with tm.assert_produces_warning(warn): + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("swap_objs", [True, False]) + def test_constructor_mixed_nat_objs_infers_object(self, swap_objs): + # mixed np.datetime64/timedelta64 nat results in object + data = [np.datetime64("nat"), np.timedelta64("nat")] + if swap_objs: + data = data[::-1] + + expected = Index(data, dtype=object) + tm.assert_index_equal(Index(data), expected) + tm.assert_index_equal(Index(np.array(data, dtype=object)), expected) + + @pytest.mark.parametrize("swap_objs", [True, False]) + def test_constructor_datetime_and_datetime64(self, swap_objs): + data = [Timestamp(2021, 6, 8, 9, 42), np.datetime64("now")] + if swap_objs: + data = data[::-1] + expected = DatetimeIndex(data) + + tm.assert_index_equal(Index(data), expected) + tm.assert_index_equal(Index(np.array(data, dtype=object)), expected) + + +class TestDtypeEnforced: + # check we don't silently ignore the dtype keyword + + def test_constructor_object_dtype_with_ea_data(self, any_numeric_ea_dtype): + # GH#45206 + arr = array([0], dtype=any_numeric_ea_dtype) + + idx = Index(arr, dtype=object) + assert idx.dtype == object + + @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"]) + def test_constructor_range_values_mismatched_dtype(self, dtype): + rng = Index(range(5)) + + result = Index(rng, dtype=dtype) + assert result.dtype == dtype + + result = Index(range(5), dtype=dtype) + assert result.dtype == dtype + + @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"]) + def test_constructor_categorical_values_mismatched_non_ea_dtype(self, dtype): + cat = Categorical([1, 2, 3]) + + result = Index(cat, dtype=dtype) + assert result.dtype == dtype + + def test_constructor_categorical_values_mismatched_dtype(self): + dti = date_range("2016-01-01", periods=3) + cat = Categorical(dti) + result = Index(cat, dti.dtype) + tm.assert_index_equal(result, dti) + + dti2 = dti.tz_localize("Asia/Tokyo") + cat2 = Categorical(dti2) + result = Index(cat2, dti2.dtype) + tm.assert_index_equal(result, dti2) + + ii = IntervalIndex.from_breaks(range(5)) + cat3 = Categorical(ii) + result = Index(cat3, dtype=ii.dtype) + tm.assert_index_equal(result, ii) + + def test_constructor_ea_values_mismatched_categorical_dtype(self): + dti = date_range("2016-01-01", periods=3) + result = Index(dti, dtype="category") + expected = CategoricalIndex(dti) + tm.assert_index_equal(result, expected) + + dti2 = date_range("2016-01-01", periods=3, tz="US/Pacific") + result = Index(dti2, dtype="category") + expected = CategoricalIndex(dti2) + tm.assert_index_equal(result, expected) + + def test_constructor_period_values_mismatched_dtype(self): + pi = period_range("2016-01-01", periods=3, freq="D") + result = Index(pi, dtype="category") + expected = CategoricalIndex(pi) + tm.assert_index_equal(result, expected) + + def test_constructor_timedelta64_values_mismatched_dtype(self): + # check we don't silently ignore the dtype keyword + tdi = timedelta_range("4 Days", periods=5) + result = Index(tdi, dtype="category") + expected = CategoricalIndex(tdi) + tm.assert_index_equal(result, expected) + + def test_constructor_interval_values_mismatched_dtype(self): + dti = date_range("2016-01-01", periods=3) + ii = IntervalIndex.from_breaks(dti) + result = Index(ii, dtype="category") + expected = CategoricalIndex(ii) + tm.assert_index_equal(result, expected) + + def test_constructor_datetime64_values_mismatched_period_dtype(self): + dti = date_range("2016-01-01", periods=3) + result = Index(dti, dtype="Period[D]") + expected = dti.to_period("D") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["int64", "uint64"]) + def test_constructor_int_dtype_nan_raises(self, dtype): + # see GH#15187 + data = [np.nan] + msg = "cannot convert" + with pytest.raises(ValueError, match=msg): + Index(data, dtype=dtype) + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3]), + np.array([1, 2, 3], dtype=int), + # below should coerce + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_int(self, vals, any_int_numpy_dtype): + dtype = any_int_numpy_dtype + index = Index(vals, dtype=dtype) + assert index.dtype == dtype + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0]), + np.array([1, 2, 3], dtype=int), + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_float(self, vals, float_numpy_dtype): + dtype = float_numpy_dtype + index = Index(vals, dtype=dtype) + assert index.dtype == dtype + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3], dtype=int), + np.array(["2011-01-01", "2011-01-02"], dtype="datetime64[ns]"), + [datetime(2011, 1, 1), datetime(2011, 1, 2)], + ], + ) + def test_constructor_dtypes_to_categorical(self, vals): + index = Index(vals, dtype="category") + assert isinstance(index, CategoricalIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + Index(np.array([np.datetime64("2011-01-01"), np.datetime64("2011-01-02")])), + Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]), + ], + ) + def test_constructor_dtypes_to_datetime(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, DatetimeIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]), + [timedelta(1), timedelta(1)], + ], + ) + def test_constructor_dtypes_to_timedelta(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, TimedeltaIndex) + + +class TestIndexConstructorUnwrapping: + # Test passing different arraylike values to pd.Index + + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) + def test_constructor_from_series_dt64(self, klass): + stamps = [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")] + expected = DatetimeIndex(stamps) + ser = Series(stamps) + result = klass(ser) + tm.assert_index_equal(result, expected) + + def test_constructor_no_pandas_array(self): + ser = Series([1, 2, 3]) + result = Index(ser.array) + expected = Index([1, 2, 3]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "array", + [ + np.arange(5), + np.array(["a", "b", "c"]), + date_range("2000-01-01", periods=3).values, + ], + ) + def test_constructor_ndarray_like(self, array): + # GH#5460#issuecomment-44474502 + # it should be possible to convert any object that satisfies the numpy + # ndarray interface directly into an Index + class ArrayLike: + def __init__(self, array) -> None: + self.array = array + + def __array__(self, dtype=None) -> np.ndarray: + return self.array + + expected = Index(array) + result = Index(ArrayLike(array)) + tm.assert_index_equal(result, expected) + + +class TestIndexConstructionErrors: + def test_constructor_overflow_int64(self): + # see GH#15832 + msg = ( + "The elements provided in the data cannot " + "all be casted to the dtype int64" + ) + with pytest.raises(OverflowError, match=msg): + Index([np.iinfo(np.uint64).max - 1], dtype="int64") diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_indexing.py new file mode 100644 index 00000000..1ea47f63 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_indexing.py @@ -0,0 +1,357 @@ +""" +test_indexing tests the following Index methods: + __getitem__ + get_loc + get_value + __contains__ + take + where + get_indexer + get_indexer_for + slice_locs + asof_locs + +The corresponding tests.indexes.[index_type].test_indexing files +contain tests for the corresponding methods specific to those Index subclasses. +""" +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_scalar, +) + +from pandas import ( + NA, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + NaT, + PeriodIndex, + TimedeltaIndex, +) +import pandas._testing as tm + + +class TestTake: + def test_take_invalid_kwargs(self, index): + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + index.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + index.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + index.take(indices, mode="clip") + + def test_take(self, index): + indexer = [4, 3, 0, 2] + if len(index) < 5: + pytest.skip("Test doesn't make sense since not enough elements") + + result = index.take(indexer) + expected = index[indexer] + assert result.equals(expected) + + if not isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + # GH 10791 + msg = r"'(.*Index)' object has no attribute 'freq'" + with pytest.raises(AttributeError, match=msg): + index.freq + + def test_take_indexer_type(self): + # GH#42875 + integer_index = Index([0, 1, 2, 3]) + scalar_index = 1 + msg = "Expected indices to be array-like" + with pytest.raises(TypeError, match=msg): + integer_index.take(scalar_index) + + def test_take_minus1_without_fill(self, index): + # -1 does not get treated as NA unless allow_fill=True is passed + if len(index) == 0: + # Test is not applicable + pytest.skip("Test doesn't make sense for empty index") + + result = index.take([0, 0, -1]) + + expected = index.take([0, 0, len(index) - 1]) + tm.assert_index_equal(result, expected) + + +class TestContains: + @pytest.mark.parametrize( + "index,val", + [ + (Index([0, 1, 2]), 2), + (Index([0, 1, "2"]), "2"), + (Index([0, 1, 2, np.inf, 4]), 4), + (Index([0, 1, 2, np.nan, 4]), 4), + (Index([0, 1, 2, np.inf]), np.inf), + (Index([0, 1, 2, np.nan]), np.nan), + ], + ) + def test_index_contains(self, index, val): + assert val in index + + @pytest.mark.parametrize( + "index,val", + [ + (Index([0, 1, 2]), "2"), + (Index([0, 1, "2"]), 2), + (Index([0, 1, 2, np.inf]), 4), + (Index([0, 1, 2, np.nan]), 4), + (Index([0, 1, 2, np.inf]), np.nan), + (Index([0, 1, 2, np.nan]), np.inf), + # Checking if np.inf in int64 Index should not cause an OverflowError + # Related to GH 16957 + (Index([0, 1, 2], dtype=np.int64), np.inf), + (Index([0, 1, 2], dtype=np.int64), np.nan), + (Index([0, 1, 2], dtype=np.uint64), np.inf), + (Index([0, 1, 2], dtype=np.uint64), np.nan), + ], + ) + def test_index_not_contains(self, index, val): + assert val not in index + + @pytest.mark.parametrize( + "index,val", [(Index([0, 1, "2"]), 0), (Index([0, 1, "2"]), "2")] + ) + def test_mixed_index_contains(self, index, val): + # GH#19860 + assert val in index + + @pytest.mark.parametrize( + "index,val", [(Index([0, 1, "2"]), "1"), (Index([0, 1, "2"]), 2)] + ) + def test_mixed_index_not_contains(self, index, val): + # GH#19860 + assert val not in index + + def test_contains_with_float_index(self, any_real_numpy_dtype): + # GH#22085 + dtype = any_real_numpy_dtype + data = [0, 1, 2, 3] if not is_float_dtype(dtype) else [0.1, 1.1, 2.2, 3.3] + index = Index(data, dtype=dtype) + + if not is_float_dtype(index.dtype): + assert 1.1 not in index + assert 1.0 in index + assert 1 in index + else: + assert 1.1 in index + assert 1.0 not in index + assert 1 not in index + + def test_contains_requires_hashable_raises(self, index): + if isinstance(index, MultiIndex): + return # TODO: do we want this to raise? + + msg = "unhashable type: 'list'" + with pytest.raises(TypeError, match=msg): + [] in index + + msg = "|".join( + [ + r"unhashable type: 'dict'", + r"must be real number, not dict", + r"an integer is required", + r"\{\}", + r"pandas\._libs\.interval\.IntervalTree' is not iterable", + ] + ) + with pytest.raises(TypeError, match=msg): + {} in index._engine + + +class TestGetLoc: + def test_get_loc_non_hashable(self, index): + with pytest.raises(InvalidIndexError, match="[0, 1]"): + index.get_loc([0, 1]) + + def test_get_loc_non_scalar_hashable(self, index): + # GH52877 + from enum import Enum + + class E(Enum): + X1 = "x1" + + assert not is_scalar(E.X1) + + exc = KeyError + msg = "" + if isinstance( + index, + ( + DatetimeIndex, + TimedeltaIndex, + PeriodIndex, + IntervalIndex, + ), + ): + # TODO: make these more consistent? + exc = InvalidIndexError + msg = "E.X1" + with pytest.raises(exc, match=msg): + index.get_loc(E.X1) + + def test_get_loc_generator(self, index): + exc = KeyError + if isinstance( + index, + ( + DatetimeIndex, + TimedeltaIndex, + PeriodIndex, + IntervalIndex, + MultiIndex, + ), + ): + # TODO: make these more consistent? + exc = InvalidIndexError + with pytest.raises(exc, match="generator object"): + # MultiIndex specifically checks for generator; others for scalar + index.get_loc(x for x in range(5)) + + def test_get_loc_masked_duplicated_na(self): + # GH#48411 + idx = Index([1, 2, NA, NA], dtype="Int64") + result = idx.get_loc(NA) + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + +class TestGetIndexer: + def test_get_indexer_base(self, index): + if index._index_as_unique: + expected = np.arange(index.size, dtype=np.intp) + actual = index.get_indexer(index) + tm.assert_numpy_array_equal(expected, actual) + else: + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + index.get_indexer(index) + + with pytest.raises(ValueError, match="Invalid fill method"): + index.get_indexer(index, method="invalid") + + def test_get_indexer_consistency(self, index): + # See GH#16819 + + if index._index_as_unique: + indexer = index.get_indexer(index[0:2]) + assert isinstance(indexer, np.ndarray) + assert indexer.dtype == np.intp + else: + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + index.get_indexer(index[0:2]) + + indexer, _ = index.get_indexer_non_unique(index[0:2]) + assert isinstance(indexer, np.ndarray) + assert indexer.dtype == np.intp + + def test_get_indexer_masked_duplicated_na(self): + # GH#48411 + idx = Index([1, 2, NA, NA], dtype="Int64") + result = idx.get_indexer_for(Index([1, NA], dtype="Int64")) + expected = np.array([0, 2, 3], dtype=result.dtype) + tm.assert_numpy_array_equal(result, expected) + + +class TestConvertSliceIndexer: + def test_convert_almost_null_slice(self, index): + # slice with None at both ends, but not step + + key = slice(None, None, "foo") + + if isinstance(index, IntervalIndex): + msg = "label-based slicing with step!=1 is not supported for IntervalIndex" + with pytest.raises(ValueError, match=msg): + index._convert_slice_indexer(key, "loc") + else: + msg = "'>=' not supported between instances of 'str' and 'int'" + with pytest.raises(TypeError, match=msg): + index._convert_slice_indexer(key, "loc") + + +class TestPutmask: + def test_putmask_with_wrong_mask(self, index): + # GH#18368 + if not len(index): + pytest.skip("Test doesn't make sense for empty index") + + fill = index[0] + + msg = "putmask: mask and data must be the same size" + with pytest.raises(ValueError, match=msg): + index.putmask(np.ones(len(index) + 1, np.bool_), fill) + + with pytest.raises(ValueError, match=msg): + index.putmask(np.ones(len(index) - 1, np.bool_), fill) + + with pytest.raises(ValueError, match=msg): + index.putmask("foo", fill) + + +@pytest.mark.parametrize( + "idx", [Index([1, 2, 3]), Index([0.1, 0.2, 0.3]), Index(["a", "b", "c"])] +) +def test_getitem_deprecated_float(idx): + # https://github.com/pandas-dev/pandas/issues/34191 + + msg = "Indexing with a float is no longer supported" + with pytest.raises(IndexError, match=msg): + idx[1.0] + + +@pytest.mark.parametrize( + "idx,target,expected", + [ + ([np.nan, "var1", np.nan], [np.nan], np.array([0, 2], dtype=np.intp)), + ( + [np.nan, "var1", np.nan], + [np.nan, "var1"], + np.array([0, 2, 1], dtype=np.intp), + ), + ( + np.array([np.nan, "var1", np.nan], dtype=object), + [np.nan], + np.array([0, 2], dtype=np.intp), + ), + ( + DatetimeIndex(["2020-08-05", NaT, NaT]), + [NaT], + np.array([1, 2], dtype=np.intp), + ), + (["a", "b", "a", np.nan], [np.nan], np.array([3], dtype=np.intp)), + ( + np.array(["b", np.nan, float("NaN"), "b"], dtype=object), + Index([np.nan], dtype=object), + np.array([1, 2], dtype=np.intp), + ), + ], +) +def test_get_indexer_non_unique_multiple_nans(idx, target, expected): + # GH 35392 + axis = Index(idx) + actual = axis.get_indexer_for(target) + tm.assert_numpy_array_equal(actual, expected) + + +def test_get_indexer_non_unique_nans_in_object_dtype_target(nulls_fixture): + idx = Index([1.0, 2.0]) + target = Index([1, nulls_fixture], dtype="object") + + result_idx, result_missing = idx.get_indexer_non_unique(target) + tm.assert_numpy_array_equal(result_idx, np.array([0, -1], dtype=np.intp)) + tm.assert_numpy_array_equal(result_missing, np.array([1], dtype=np.intp)) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_numpy_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_numpy_compat.py new file mode 100644 index 00000000..ace78d77 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_numpy_compat.py @@ -0,0 +1,189 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + DatetimeIndex, + Index, + PeriodIndex, + TimedeltaIndex, + isna, +) +import pandas._testing as tm +from pandas.api.types import ( + is_complex_dtype, + is_numeric_dtype, +) +from pandas.core.arrays import BooleanArray +from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin + + +def test_numpy_ufuncs_out(index): + result = index == index + + out = np.empty(index.shape, dtype=bool) + np.equal(index, index, out=out) + tm.assert_numpy_array_equal(out, result) + + if not index._is_multi: + # same thing on the ExtensionArray + out = np.empty(index.shape, dtype=bool) + np.equal(index.array, index.array, out=out) + tm.assert_numpy_array_equal(out, result) + + +@pytest.mark.parametrize( + "func", + [ + np.exp, + np.exp2, + np.expm1, + np.log, + np.log2, + np.log10, + np.log1p, + np.sqrt, + np.sin, + np.cos, + np.tan, + np.arcsin, + np.arccos, + np.arctan, + np.sinh, + np.cosh, + np.tanh, + np.arcsinh, + np.arccosh, + np.arctanh, + np.deg2rad, + np.rad2deg, + ], + ids=lambda x: x.__name__, +) +def test_numpy_ufuncs_basic(index, func): + # test ufuncs of numpy, see: + # https://numpy.org/doc/stable/reference/ufuncs.html + + if isinstance(index, DatetimeIndexOpsMixin): + with tm.external_error_raised((TypeError, AttributeError)): + with np.errstate(all="ignore"): + func(index) + elif is_numeric_dtype(index) and not ( + is_complex_dtype(index) and func in [np.deg2rad, np.rad2deg] + ): + # coerces to float (e.g. np.sin) + with np.errstate(all="ignore"): + result = func(index) + arr_result = func(index.values) + if arr_result.dtype == np.float16: + arr_result = arr_result.astype(np.float32) + exp = Index(arr_result, name=index.name) + + tm.assert_index_equal(result, exp) + if isinstance(index.dtype, np.dtype) and is_numeric_dtype(index): + if is_complex_dtype(index): + assert result.dtype == index.dtype + elif index.dtype in ["bool", "int8", "uint8"]: + assert result.dtype in ["float16", "float32"] + elif index.dtype in ["int16", "uint16", "float32"]: + assert result.dtype == "float32" + else: + assert result.dtype == "float64" + else: + # e.g. np.exp with Int64 -> Float64 + assert type(result) is Index + # raise AttributeError or TypeError + elif len(index) == 0: + pass + else: + with tm.external_error_raised((TypeError, AttributeError)): + with np.errstate(all="ignore"): + func(index) + + +@pytest.mark.parametrize( + "func", [np.isfinite, np.isinf, np.isnan, np.signbit], ids=lambda x: x.__name__ +) +def test_numpy_ufuncs_other(index, func): + # test ufuncs of numpy, see: + # https://numpy.org/doc/stable/reference/ufuncs.html + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + if func in (np.isfinite, np.isinf, np.isnan): + # numpy 1.18 changed isinf and isnan to not raise on dt64/td64 + result = func(index) + assert isinstance(result, np.ndarray) + + out = np.empty(index.shape, dtype=bool) + func(index, out=out) + tm.assert_numpy_array_equal(out, result) + else: + with tm.external_error_raised(TypeError): + func(index) + + elif isinstance(index, PeriodIndex): + with tm.external_error_raised(TypeError): + func(index) + + elif is_numeric_dtype(index) and not ( + is_complex_dtype(index) and func is np.signbit + ): + # Results in bool array + result = func(index) + if not isinstance(index.dtype, np.dtype): + # e.g. Int64 we expect to get BooleanArray back + assert isinstance(result, BooleanArray) + else: + assert isinstance(result, np.ndarray) + + out = np.empty(index.shape, dtype=bool) + func(index, out=out) + + if not isinstance(index.dtype, np.dtype): + tm.assert_numpy_array_equal(out, result._data) + else: + tm.assert_numpy_array_equal(out, result) + + elif len(index) == 0: + pass + else: + with tm.external_error_raised(TypeError): + func(index) + + +@pytest.mark.parametrize("func", [np.maximum, np.minimum]) +def test_numpy_ufuncs_reductions(index, func, request): + # TODO: overlap with tests.series.test_ufunc.test_reductions + if len(index) == 0: + pytest.skip("Test doesn't make sense for empty index.") + + if isinstance(index, CategoricalIndex) and index.dtype.ordered is False: + with pytest.raises(TypeError, match="is not ordered for"): + func.reduce(index) + return + else: + result = func.reduce(index) + + if func is np.maximum: + expected = index.max(skipna=False) + else: + expected = index.min(skipna=False) + # TODO: do we have cases both with and without NAs? + + assert type(result) is type(expected) + if isna(result): + assert isna(expected) + else: + assert result == expected + + +@pytest.mark.parametrize("func", [np.bitwise_and, np.bitwise_or, np.bitwise_xor]) +def test_numpy_ufuncs_bitwise(func): + # https://github.com/pandas-dev/pandas/issues/46769 + idx1 = Index([1, 2, 3, 4], dtype="int64") + idx2 = Index([3, 4, 5, 6], dtype="int64") + + with tm.assert_produces_warning(None): + result = func(idx1, idx2) + + expected = Index(func(idx1.values, idx2.values)) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_old_base.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_old_base.py new file mode 100644 index 00000000..79dc423f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_old_base.py @@ -0,0 +1,1025 @@ +from __future__ import annotations + +from datetime import datetime +import gc + +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + CategoricalIndex, + DatetimeIndex, + DatetimeTZDtype, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + isna, + period_range, +) +import pandas._testing as tm +from pandas.core.arrays import BaseMaskedArray + + +class TestBase: + @pytest.fixture( + params=[ + RangeIndex(start=0, stop=20, step=2), + Index(np.arange(5, dtype=np.float64)), + Index(np.arange(5, dtype=np.float32)), + Index(np.arange(5, dtype=np.uint64)), + Index(range(0, 20, 2), dtype=np.int64), + Index(range(0, 20, 2), dtype=np.int32), + Index(range(0, 20, 2), dtype=np.int16), + Index(range(0, 20, 2), dtype=np.int8), + Index(list("abcde")), + Index([0, "a", 1, "b", 2, "c"]), + period_range("20130101", periods=5, freq="D"), + TimedeltaIndex( + [ + "0 days 01:00:00", + "1 days 01:00:00", + "2 days 01:00:00", + "3 days 01:00:00", + "4 days 01:00:00", + ], + dtype="timedelta64[ns]", + freq="D", + ), + DatetimeIndex( + ["2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05"], + dtype="datetime64[ns]", + freq="D", + ), + IntervalIndex.from_breaks(range(11), closed="right"), + ] + ) + def simple_index(self, request): + return request.param + + def test_pickle_compat_construction(self, simple_index): + # need an object to create with + if isinstance(simple_index, RangeIndex): + pytest.skip("RangeIndex() is a valid constructor") + msg = "|".join( + [ + r"Index\(\.\.\.\) must be called with a collection of some " + r"kind, None was passed", + r"DatetimeIndex\(\) must be called with a collection of some " + r"kind, None was passed", + r"TimedeltaIndex\(\) must be called with a collection of some " + r"kind, None was passed", + r"__new__\(\) missing 1 required positional argument: 'data'", + r"__new__\(\) takes at least 2 arguments \(1 given\)", + ] + ) + with pytest.raises(TypeError, match=msg): + type(simple_index)() + + def test_shift(self, simple_index): + # GH8083 test the base class for shift + if isinstance(simple_index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)): + pytest.skip("Tested in test_ops/test_arithmetic") + idx = simple_index + msg = ( + f"This method is only implemented for DatetimeIndex, PeriodIndex and " + f"TimedeltaIndex; Got type {type(idx).__name__}" + ) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1, 2) + + def test_constructor_name_unhashable(self, simple_index): + # GH#29069 check that name is hashable + # See also same-named test in tests.series.test_constructors + idx = simple_index + with pytest.raises(TypeError, match="Index.name must be a hashable type"): + type(idx)(idx, name=[]) + + def test_create_index_existing_name(self, simple_index): + # GH11193, when an existing index is passed, and a new name is not + # specified, the new index should inherit the previous object name + expected = simple_index.copy() + if not isinstance(expected, MultiIndex): + expected.name = "foo" + result = Index(expected) + tm.assert_index_equal(result, expected) + + result = Index(expected, name="bar") + expected.name = "bar" + tm.assert_index_equal(result, expected) + else: + expected.names = ["foo", "bar"] + result = Index(expected) + tm.assert_index_equal( + result, + Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ), + names=["foo", "bar"], + ), + ) + + result = Index(expected, names=["A", "B"]) + tm.assert_index_equal( + result, + Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ), + names=["A", "B"], + ), + ) + + def test_numeric_compat(self, simple_index): + idx = simple_index + # Check that this doesn't cover MultiIndex case, if/when it does, + # we can remove multi.test_compat.test_numeric_compat + assert not isinstance(idx, MultiIndex) + if type(idx) is Index: + pytest.skip("Not applicable for Index") + if is_numeric_dtype(simple_index.dtype) or isinstance( + simple_index, TimedeltaIndex + ): + pytest.skip("Tested elsewhere.") + + typ = type(idx._data).__name__ + cls = type(idx).__name__ + lmsg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'", + "cannot perform (__mul__|__truediv__|__floordiv__) with " + f"this index type: ({cls}|{typ})", + ] + ) + with pytest.raises(TypeError, match=lmsg): + idx * 1 + rmsg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'", + "cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with " + f"this index type: ({cls}|{typ})", + ] + ) + with pytest.raises(TypeError, match=rmsg): + 1 * idx + + div_err = lmsg.replace("*", "/") + with pytest.raises(TypeError, match=div_err): + idx / 1 + div_err = rmsg.replace("*", "/") + with pytest.raises(TypeError, match=div_err): + 1 / idx + + floordiv_err = lmsg.replace("*", "//") + with pytest.raises(TypeError, match=floordiv_err): + idx // 1 + floordiv_err = rmsg.replace("*", "//") + with pytest.raises(TypeError, match=floordiv_err): + 1 // idx + + def test_logical_compat(self, simple_index): + if simple_index.dtype == object: + pytest.skip("Tested elsewhere.") + idx = simple_index + if idx.dtype.kind in "iufcbm": + assert idx.all() == idx._values.all() + assert idx.all() == idx.to_series().all() + assert idx.any() == idx._values.any() + assert idx.any() == idx.to_series().any() + else: + msg = "cannot perform (any|all)" + if isinstance(idx, IntervalIndex): + msg = ( + r"'IntervalArray' with dtype interval\[.*\] does " + "not support reduction '(any|all)'" + ) + with pytest.raises(TypeError, match=msg): + idx.all() + with pytest.raises(TypeError, match=msg): + idx.any() + + def test_repr_roundtrip(self, simple_index): + if isinstance(simple_index, IntervalIndex): + pytest.skip(f"Not a valid repr for {type(simple_index).__name__}") + idx = simple_index + tm.assert_index_equal(eval(repr(idx)), idx) + + def test_repr_max_seq_item_setting(self, simple_index): + # GH10182 + if isinstance(simple_index, IntervalIndex): + pytest.skip(f"Not a valid repr for {type(simple_index).__name__}") + idx = simple_index + idx = idx.repeat(50) + with pd.option_context("display.max_seq_items", None): + repr(idx) + assert "..." not in str(idx) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_ensure_copied_data(self, index): + # Check the "copy" argument of each Index.__new__ is honoured + # GH12309 + init_kwargs = {} + if isinstance(index, PeriodIndex): + # Needs "freq" specification: + init_kwargs["freq"] = index.freq + elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): + pytest.skip( + "RangeIndex cannot be initialized from data, " + "MultiIndex and CategoricalIndex are tested separately" + ) + elif index.dtype == object and index.inferred_type == "boolean": + init_kwargs["dtype"] = index.dtype + + index_type = type(index) + result = index_type(index.values, copy=True, **init_kwargs) + if isinstance(index.dtype, DatetimeTZDtype): + result = result.tz_localize("UTC").tz_convert(index.tz) + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + index = index._with_freq(None) + + tm.assert_index_equal(index, result) + + if isinstance(index, PeriodIndex): + # .values an object array of Period, thus copied + result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) + tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same") + elif isinstance(index, IntervalIndex): + # checked in test_interval.py + pass + elif type(index) is Index and not isinstance(index.dtype, np.dtype): + result = index_type(index.values, copy=False, **init_kwargs) + tm.assert_index_equal(result, index) + + if isinstance(index._values, BaseMaskedArray): + assert np.shares_memory(index._values._data, result._values._data) + tm.assert_numpy_array_equal( + index._values._data, result._values._data, check_same="same" + ) + assert np.shares_memory(index._values._mask, result._values._mask) + tm.assert_numpy_array_equal( + index._values._mask, result._values._mask, check_same="same" + ) + elif index.dtype == "string[python]": + assert np.shares_memory(index._values._ndarray, result._values._ndarray) + tm.assert_numpy_array_equal( + index._values._ndarray, result._values._ndarray, check_same="same" + ) + elif index.dtype == "string[pyarrow]": + assert tm.shares_memory(result._values, index._values) + else: + raise NotImplementedError(index.dtype) + else: + result = index_type(index.values, copy=False, **init_kwargs) + tm.assert_numpy_array_equal(index.values, result.values, check_same="same") + + def test_memory_usage(self, index): + index._engine.clear_mapping() + result = index.memory_usage() + if index.empty: + # we report 0 for no-length + assert result == 0 + return + + # non-zero length + index.get_loc(index[0]) + result2 = index.memory_usage() + result3 = index.memory_usage(deep=True) + + # RangeIndex, IntervalIndex + # don't have engines + # Index[EA] has engine but it does not have a Hashtable .mapping + if not isinstance(index, (RangeIndex, IntervalIndex)) and not ( + type(index) is Index and not isinstance(index.dtype, np.dtype) + ): + assert result2 > result + + if index.inferred_type == "object": + assert result3 > result2 + + def test_argsort(self, index): + if isinstance(index, CategoricalIndex): + pytest.skip(f"{type(self).__name__} separately tested") + + result = index.argsort() + expected = np.array(index).argsort() + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + def test_numpy_argsort(self, index): + result = np.argsort(index) + expected = index.argsort() + tm.assert_numpy_array_equal(result, expected) + + result = np.argsort(index, kind="mergesort") + expected = index.argsort(kind="mergesort") + tm.assert_numpy_array_equal(result, expected) + + # these are the only two types that perform + # pandas compatibility input validation - the + # rest already perform separate (or no) such + # validation via their 'values' attribute as + # defined in pandas.core.indexes/base.py - they + # cannot be changed at the moment due to + # backwards compatibility concerns + if isinstance(index, (CategoricalIndex, RangeIndex)): + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(index, axis=1) + + msg = "the 'order' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(index, order=("a", "b")) + + def test_repeat(self, simple_index): + rep = 2 + idx = simple_index.copy() + new_index_cls = idx._constructor + expected = new_index_cls(idx.values.repeat(rep), name=idx.name) + tm.assert_index_equal(idx.repeat(rep), expected) + + idx = simple_index + rep = np.arange(len(idx)) + expected = new_index_cls(idx.values.repeat(rep), name=idx.name) + tm.assert_index_equal(idx.repeat(rep), expected) + + def test_numpy_repeat(self, simple_index): + rep = 2 + idx = simple_index + expected = idx.repeat(rep) + tm.assert_index_equal(np.repeat(idx, rep), expected) + + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.repeat(idx, rep, axis=0) + + def test_where(self, listlike_box, simple_index): + if isinstance(simple_index, (IntervalIndex, PeriodIndex)) or is_numeric_dtype( + simple_index.dtype + ): + pytest.skip("Tested elsewhere.") + klass = listlike_box + + idx = simple_index + if isinstance(idx, (DatetimeIndex, TimedeltaIndex)): + # where does not preserve freq + idx = idx._with_freq(None) + + cond = [True] * len(idx) + result = idx.where(klass(cond)) + expected = idx + tm.assert_index_equal(result, expected) + + cond = [False] + [True] * len(idx[1:]) + expected = Index([idx._na_value] + idx[1:].tolist(), dtype=idx.dtype) + result = idx.where(klass(cond)) + tm.assert_index_equal(result, expected) + + def test_insert_base(self, index): + result = index[1:4] + + if not len(index): + pytest.skip("Not applicable for empty index") + + # test 0th element + assert index[0:4].equals(result.insert(0, index[0])) + + def test_insert_out_of_bounds(self, index): + # TypeError/IndexError matches what np.insert raises in these cases + + if len(index) > 0: + err = TypeError + else: + err = IndexError + if len(index) == 0: + # 0 vs 0.5 in error message varies with numpy version + msg = "index (0|0.5) is out of bounds for axis 0 with size 0" + else: + msg = "slice indices must be integers or None or have an __index__ method" + with pytest.raises(err, match=msg): + index.insert(0.5, "foo") + + msg = "|".join( + [ + r"index -?\d+ is out of bounds for axis 0 with size \d+", + "loc must be an integer between", + ] + ) + with pytest.raises(IndexError, match=msg): + index.insert(len(index) + 1, 1) + + with pytest.raises(IndexError, match=msg): + index.insert(-len(index) - 1, 1) + + def test_delete_base(self, index): + if not len(index): + pytest.skip("Not applicable for empty index") + + if isinstance(index, RangeIndex): + # tested in class + pytest.skip(f"{type(self).__name__} tested elsewhere") + + expected = index[1:] + result = index.delete(0) + assert result.equals(expected) + assert result.name == expected.name + + expected = index[:-1] + result = index.delete(-1) + assert result.equals(expected) + assert result.name == expected.name + + length = len(index) + msg = f"index {length} is out of bounds for axis 0 with size {length}" + with pytest.raises(IndexError, match=msg): + index.delete(length) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_equals(self, index): + if isinstance(index, IntervalIndex): + pytest.skip(f"{type(index).__name__} tested elsewhere") + + is_ea_idx = type(index) is Index and not isinstance(index.dtype, np.dtype) + + assert index.equals(index) + assert index.equals(index.copy()) + if not is_ea_idx: + # doesn't hold for e.g. IntegerDtype + assert index.equals(index.astype(object)) + + assert not index.equals(list(index)) + assert not index.equals(np.array(index)) + + # Cannot pass in non-int64 dtype to RangeIndex + if not isinstance(index, RangeIndex) and not is_ea_idx: + same_values = Index(index, dtype=object) + assert index.equals(same_values) + assert same_values.equals(index) + + if index.nlevels == 1: + # do not test MultiIndex + assert not index.equals(Series(index)) + + def test_equals_op(self, simple_index): + # GH9947, GH10637 + index_a = simple_index + + n = len(index_a) + index_b = index_a[0:-1] + index_c = index_a[0:-1].append(index_a[-2:-1]) + index_d = index_a[0:1] + + msg = "Lengths must match|could not be broadcast" + with pytest.raises(ValueError, match=msg): + index_a == index_b + expected1 = np.array([True] * n) + expected2 = np.array([True] * (n - 1) + [False]) + tm.assert_numpy_array_equal(index_a == index_a, expected1) + tm.assert_numpy_array_equal(index_a == index_c, expected2) + + # test comparisons with numpy arrays + array_a = np.array(index_a) + array_b = np.array(index_a[0:-1]) + array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) + array_d = np.array(index_a[0:1]) + with pytest.raises(ValueError, match=msg): + index_a == array_b + tm.assert_numpy_array_equal(index_a == array_a, expected1) + tm.assert_numpy_array_equal(index_a == array_c, expected2) + + # test comparisons with Series + series_a = Series(array_a) + series_b = Series(array_b) + series_c = Series(array_c) + series_d = Series(array_d) + with pytest.raises(ValueError, match=msg): + index_a == series_b + + tm.assert_numpy_array_equal(index_a == series_a, expected1) + tm.assert_numpy_array_equal(index_a == series_c, expected2) + + # cases where length is 1 for one of them + with pytest.raises(ValueError, match="Lengths must match"): + index_a == index_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == array_d + msg = "Can only compare identically-labeled Series objects" + with pytest.raises(ValueError, match=msg): + series_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + series_a == array_d + + # comparing with a scalar should broadcast; note that we are excluding + # MultiIndex because in this case each item in the index is a tuple of + # length 2, and therefore is considered an array of length 2 in the + # comparison instead of a scalar + if not isinstance(index_a, MultiIndex): + expected3 = np.array([False] * (len(index_a) - 2) + [True, False]) + # assuming the 2nd to last item is unique in the data + item = index_a[-2] + tm.assert_numpy_array_equal(index_a == item, expected3) + tm.assert_series_equal(series_a == item, Series(expected3)) + + def test_format(self, simple_index): + # GH35439 + if is_numeric_dtype(simple_index.dtype) or isinstance( + simple_index, DatetimeIndex + ): + pytest.skip("Tested elsewhere.") + idx = simple_index + expected = [str(x) for x in idx] + assert idx.format() == expected + + def test_format_empty(self, simple_index): + # GH35712 + if isinstance(simple_index, (PeriodIndex, RangeIndex)): + pytest.skip("Tested elsewhere") + empty_idx = type(simple_index)([]) + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] + + def test_fillna(self, index): + # GH 11343 + if len(index) == 0: + pytest.skip("Not relevant for empty index") + elif index.dtype == bool: + pytest.skip(f"{index.dtype} cannot hold NAs") + elif isinstance(index, Index) and is_integer_dtype(index.dtype): + pytest.skip(f"Not relevant for Index with {index.dtype}") + elif isinstance(index, MultiIndex): + idx = index.copy(deep=True) + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.fillna(idx[0]) + else: + idx = index.copy(deep=True) + result = idx.fillna(idx[0]) + tm.assert_index_equal(result, idx) + assert result is not idx + + msg = "'value' must be a scalar, passed: " + with pytest.raises(TypeError, match=msg): + idx.fillna([idx[0]]) + + idx = index.copy(deep=True) + values = idx._values + + values[1] = np.nan + + idx = type(index)(values) + + msg = "does not support 'downcast'" + msg2 = r"The 'downcast' keyword in .*Index\.fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg2): + with pytest.raises(NotImplementedError, match=msg): + # For now at least, we only raise if there are NAs present + idx.fillna(idx[0], downcast="infer") + + expected = np.array([False] * len(idx), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is True + + def test_nulls(self, index): + # this is really a smoke test for the methods + # as these are adequately tested for function elsewhere + if len(index) == 0: + tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool)) + elif isinstance(index, MultiIndex): + idx = index.copy() + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.isna() + elif not index.hasnans: + tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool)) + tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool)) + else: + result = isna(index) + tm.assert_numpy_array_equal(index.isna(), result) + tm.assert_numpy_array_equal(index.notna(), ~result) + + def test_empty(self, simple_index): + # GH 15270 + idx = simple_index + assert not idx.empty + assert idx[:0].empty + + def test_join_self_unique(self, join_type, simple_index): + idx = simple_index + if idx.is_unique: + joined = idx.join(idx, how=join_type) + assert (idx == joined).all() + + def test_map(self, simple_index): + # callable + if isinstance(simple_index, (TimedeltaIndex, PeriodIndex)): + pytest.skip("Tested elsewhere.") + idx = simple_index + + result = idx.map(lambda x: x) + # RangeIndex are equivalent to the similar Index with int64 dtype + tm.assert_index_equal(result, idx, exact="equiv") + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: Series(values, index), + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_map_dictlike(self, mapper, simple_index, request): + idx = simple_index + if isinstance(idx, (DatetimeIndex, TimedeltaIndex, PeriodIndex)): + pytest.skip("Tested elsewhere.") + + identity = mapper(idx.values, idx) + + result = idx.map(identity) + # RangeIndex are equivalent to the similar Index with int64 dtype + tm.assert_index_equal(result, idx, exact="equiv") + + # empty mappable + dtype = None + if idx.dtype.kind == "f": + dtype = idx.dtype + + expected = Index([np.nan] * len(idx), dtype=dtype) + result = idx.map(mapper(expected, idx)) + tm.assert_index_equal(result, expected) + + def test_map_str(self, simple_index): + # GH 31202 + if isinstance(simple_index, CategoricalIndex): + pytest.skip("See test_map.py") + idx = simple_index + result = idx.map(str) + expected = Index([str(x) for x in idx], dtype=object) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("copy", [True, False]) + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("ordered", [True, False]) + def test_astype_category(self, copy, name, ordered, simple_index): + # GH 18630 + idx = simple_index + if name: + idx = idx.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=ordered) + result = idx.astype(dtype, copy=copy) + expected = CategoricalIndex(idx, name=name, ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # non-standard categories + dtype = CategoricalDtype(idx.unique().tolist()[:-1], ordered) + result = idx.astype(dtype, copy=copy) + expected = CategoricalIndex(idx, name=name, dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + result = idx.astype("category", copy=copy) + expected = CategoricalIndex(idx, name=name) + tm.assert_index_equal(result, expected, exact=True) + + def test_is_unique(self, simple_index): + # initialize a unique index + index = simple_index.drop_duplicates() + assert index.is_unique is True + + # empty index should be unique + index_empty = index[:0] + assert index_empty.is_unique is True + + # test basic dupes + index_dup = index.insert(0, index[0]) + assert index_dup.is_unique is False + + # single NA should be unique + index_na = index.insert(0, np.nan) + assert index_na.is_unique is True + + # multiple NA should not be unique + index_na_dup = index_na.insert(0, np.nan) + assert index_na_dup.is_unique is False + + @pytest.mark.arm_slow + def test_engine_reference_cycle(self, simple_index): + # GH27585 + index = simple_index + nrefs_pre = len(gc.get_referrers(index)) + index._engine + assert len(gc.get_referrers(index)) == nrefs_pre + + def test_getitem_2d_deprecated(self, simple_index): + # GH#30588, GH#31479 + if isinstance(simple_index, IntervalIndex): + pytest.skip("Tested elsewhere") + idx = simple_index + msg = "Multi-dimensional indexing" + with pytest.raises(ValueError, match=msg): + idx[:, None] + + if not isinstance(idx, RangeIndex): + # GH#44051 RangeIndex already raised pre-2.0 with a different message + with pytest.raises(ValueError, match=msg): + idx[True] + with pytest.raises(ValueError, match=msg): + idx[False] + else: + msg = "only integers, slices" + with pytest.raises(IndexError, match=msg): + idx[True] + with pytest.raises(IndexError, match=msg): + idx[False] + + def test_copy_shares_cache(self, simple_index): + # GH32898, GH36840 + idx = simple_index + idx.get_loc(idx[0]) # populates the _cache. + copy = idx.copy() + + assert copy._cache is idx._cache + + def test_shallow_copy_shares_cache(self, simple_index): + # GH32669, GH36840 + idx = simple_index + idx.get_loc(idx[0]) # populates the _cache. + shallow_copy = idx._view() + + assert shallow_copy._cache is idx._cache + + shallow_copy = idx._shallow_copy(idx._data) + assert shallow_copy._cache is not idx._cache + assert shallow_copy._cache == {} + + def test_index_groupby(self, simple_index): + idx = simple_index[:5] + to_groupby = np.array([1, 2, np.nan, 2, 1]) + tm.assert_dict_equal( + idx.groupby(to_groupby), {1.0: idx[[0, 4]], 2.0: idx[[1, 3]]} + ) + + to_groupby = DatetimeIndex( + [ + datetime(2011, 11, 1), + datetime(2011, 12, 1), + pd.NaT, + datetime(2011, 12, 1), + datetime(2011, 11, 1), + ], + tz="UTC", + ).values + + ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")] + expected = {ex_keys[0]: idx[[0, 4]], ex_keys[1]: idx[[1, 3]]} + tm.assert_dict_equal(idx.groupby(to_groupby), expected) + + def test_append_preserves_dtype(self, simple_index): + # In particular Index with dtype float32 + index = simple_index + N = len(index) + + result = index.append(index) + assert result.dtype == index.dtype + tm.assert_index_equal(result[:N], index, check_exact=True) + tm.assert_index_equal(result[N:], index, check_exact=True) + + alt = index.take(list(range(N)) * 2) + tm.assert_index_equal(result, alt, check_exact=True) + + def test_inv(self, simple_index): + idx = simple_index + + if idx.dtype.kind in ["i", "u"]: + res = ~idx + expected = Index(~idx.values, name=idx.name) + tm.assert_index_equal(res, expected) + + # check that we are matching Series behavior + res2 = ~Series(idx) + tm.assert_series_equal(res2, Series(expected)) + else: + if idx.dtype.kind == "f": + msg = "ufunc 'invert' not supported for the input types" + else: + msg = "bad operand" + with pytest.raises(TypeError, match=msg): + ~idx + + # check that we get the same behavior with Series + with pytest.raises(TypeError, match=msg): + ~Series(idx) + + def test_is_boolean_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_boolean() + + def test_is_floating_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_floating() + + def test_is_integer_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_integer() + + def test_holds_integer_deprecated(self, simple_index): + # GH50243 + idx = simple_index + msg = f"{type(idx).__name__}.holds_integer is deprecated. " + with tm.assert_produces_warning(FutureWarning, match=msg): + idx.holds_integer() + + def test_is_numeric_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning( + FutureWarning, + match=f"{type(idx).__name__}.is_numeric is deprecated. ", + ): + idx.is_numeric() + + def test_is_categorical_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning( + FutureWarning, + match=r"Use pandas\.api\.types\.is_categorical_dtype instead", + ): + idx.is_categorical() + + def test_is_interval_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_interval() + + def test_is_object_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_object() + + +class TestNumericBase: + @pytest.fixture( + params=[ + RangeIndex(start=0, stop=20, step=2), + Index(np.arange(5, dtype=np.float64)), + Index(np.arange(5, dtype=np.float32)), + Index(np.arange(5, dtype=np.uint64)), + Index(range(0, 20, 2), dtype=np.int64), + Index(range(0, 20, 2), dtype=np.int32), + Index(range(0, 20, 2), dtype=np.int16), + Index(range(0, 20, 2), dtype=np.int8), + ] + ) + def simple_index(self, request): + return request.param + + def test_constructor_unwraps_index(self, simple_index): + if isinstance(simple_index, RangeIndex): + pytest.skip("Tested elsewhere.") + index_cls = type(simple_index) + dtype = simple_index.dtype + + idx = Index([1, 2], dtype=dtype) + result = index_cls(idx) + expected = np.array([1, 2], dtype=idx.dtype) + tm.assert_numpy_array_equal(result._data, expected) + + def test_can_hold_identifiers(self, simple_index): + idx = simple_index + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is False + + def test_view(self, simple_index): + if isinstance(simple_index, RangeIndex): + pytest.skip("Tested elsewhere.") + index_cls = type(simple_index) + dtype = simple_index.dtype + + idx = index_cls([], dtype=dtype, name="Foo") + idx_view = idx.view() + assert idx_view.name == "Foo" + + idx_view = idx.view(dtype) + tm.assert_index_equal(idx, index_cls(idx_view, name="Foo"), exact=True) + + idx_view = idx.view(index_cls) + tm.assert_index_equal(idx, index_cls(idx_view, name="Foo"), exact=True) + + def test_format(self, simple_index): + # GH35439 + if isinstance(simple_index, DatetimeIndex): + pytest.skip("Tested elsewhere") + idx = simple_index + max_width = max(len(str(x)) for x in idx) + expected = [str(x).ljust(max_width) for x in idx] + assert idx.format() == expected + + def test_insert_non_na(self, simple_index): + # GH#43921 inserting an element that we know we can hold should + # not change dtype or type (except for RangeIndex) + index = simple_index + + result = index.insert(0, index[0]) + + expected = Index([index[0]] + list(index), dtype=index.dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_insert_na(self, nulls_fixture, simple_index): + # GH 18295 (test missing) + index = simple_index + na_val = nulls_fixture + + if na_val is pd.NaT: + expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) + else: + expected = Index([index[0], np.nan] + list(index[1:])) + # GH#43921 we preserve float dtype + if index.dtype.kind == "f": + expected = Index(expected, dtype=index.dtype) + + result = index.insert(1, na_val) + tm.assert_index_equal(result, expected, exact=True) + + def test_arithmetic_explicit_conversions(self, simple_index): + # GH 8608 + # add/sub are overridden explicitly for Float/Int Index + index_cls = type(simple_index) + if index_cls is RangeIndex: + idx = RangeIndex(5) + else: + idx = index_cls(np.arange(5, dtype="int64")) + + # float conversions + arr = np.arange(5, dtype="int64") * 3.2 + expected = Index(arr, dtype=np.float64) + fidx = idx * 3.2 + tm.assert_index_equal(fidx, expected) + fidx = 3.2 * idx + tm.assert_index_equal(fidx, expected) + + # interops with numpy arrays + expected = Index(arr, dtype=np.float64) + a = np.zeros(5, dtype="float64") + result = fidx - a + tm.assert_index_equal(result, expected) + + expected = Index(-arr, dtype=np.float64) + a = np.zeros(5, dtype="float64") + result = a - fidx + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("complex_dtype", [np.complex64, np.complex128]) + def test_astype_to_complex(self, complex_dtype, simple_index): + result = simple_index.astype(complex_dtype) + + assert type(result) is Index and result.dtype == complex_dtype + + def test_cast_string(self, simple_index): + if isinstance(simple_index, RangeIndex): + pytest.skip("casting of strings not relevant for RangeIndex") + result = type(simple_index)(["0", "1", "2"], dtype=simple_index.dtype) + expected = type(simple_index)([0, 1, 2], dtype=simple_index.dtype) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_setops.py new file mode 100644 index 00000000..a64994ef --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_setops.py @@ -0,0 +1,908 @@ +""" +The tests in this package are to ensure the proper resultant dtypes of +set operations. +""" +from datetime import datetime +import operator + +import numpy as np +import pytest + +from pandas._libs import lib + +from pandas.core.dtypes.cast import find_common_type + +from pandas import ( + CategoricalDtype, + CategoricalIndex, + DatetimeTZDtype, + Index, + MultiIndex, + PeriodDtype, + RangeIndex, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.api.types import ( + is_signed_integer_dtype, + pandas_dtype, +) + + +def test_union_same_types(index): + # Union with a non-unique, non-monotonic index raises error + # Only needed for bool index factory + idx1 = index.sort_values() + idx2 = index.sort_values() + assert idx1.union(idx2).dtype == idx1.dtype + + +def test_union_different_types(index_flat, index_flat2, request): + # This test only considers combinations of indices + # GH 23525 + idx1 = index_flat + idx2 = index_flat2 + + if ( + not idx1.is_unique + and not idx2.is_unique + and idx1.dtype.kind == "i" + and idx2.dtype.kind == "b" + ) or ( + not idx2.is_unique + and not idx1.is_unique + and idx2.dtype.kind == "i" + and idx1.dtype.kind == "b" + ): + # Each condition had idx[1|2].is_monotonic_decreasing + # but failed when e.g. + # idx1 = Index( + # [True, True, True, True, True, True, True, True, False, False], dtype='bool' + # ) + # idx2 = Index([0, 0, 1, 1, 2, 2], dtype='int64') + mark = pytest.mark.xfail( + reason="GH#44000 True==1", raises=ValueError, strict=False + ) + request.node.add_marker(mark) + + common_dtype = find_common_type([idx1.dtype, idx2.dtype]) + + warn = None + msg = "'<' not supported between" + if not len(idx1) or not len(idx2): + pass + elif (idx1.dtype.kind == "c" and (not lib.is_np_dtype(idx2.dtype, "iufc"))) or ( + idx2.dtype.kind == "c" and (not lib.is_np_dtype(idx1.dtype, "iufc")) + ): + # complex objects non-sortable + warn = RuntimeWarning + elif ( + isinstance(idx1.dtype, PeriodDtype) and isinstance(idx2.dtype, CategoricalDtype) + ) or ( + isinstance(idx2.dtype, PeriodDtype) and isinstance(idx1.dtype, CategoricalDtype) + ): + warn = FutureWarning + msg = r"PeriodDtype\[B\] is deprecated" + mark = pytest.mark.xfail( + reason="Warning not produced on all builds", + raises=AssertionError, + strict=False, + ) + request.node.add_marker(mark) + + any_uint64 = np.uint64 in (idx1.dtype, idx2.dtype) + idx1_signed = is_signed_integer_dtype(idx1.dtype) + idx2_signed = is_signed_integer_dtype(idx2.dtype) + + # Union with a non-unique, non-monotonic index raises error + # This applies to the boolean index + idx1 = idx1.sort_values() + idx2 = idx2.sort_values() + + with tm.assert_produces_warning(warn, match=msg): + res1 = idx1.union(idx2) + res2 = idx2.union(idx1) + + if any_uint64 and (idx1_signed or idx2_signed): + assert res1.dtype == np.dtype("O") + assert res2.dtype == np.dtype("O") + else: + assert res1.dtype == common_dtype + assert res2.dtype == common_dtype + + +@pytest.mark.parametrize( + "idx_fact1,idx_fact2", + [ + (tm.makeIntIndex, tm.makeRangeIndex), + (tm.makeFloatIndex, tm.makeIntIndex), + (tm.makeFloatIndex, tm.makeRangeIndex), + (tm.makeFloatIndex, tm.makeUIntIndex), + ], +) +def test_compatible_inconsistent_pairs(idx_fact1, idx_fact2): + # GH 23525 + idx1 = idx_fact1(10) + idx2 = idx_fact2(20) + + res1 = idx1.union(idx2) + res2 = idx2.union(idx1) + + assert res1.dtype in (idx1.dtype, idx2.dtype) + assert res2.dtype in (idx1.dtype, idx2.dtype) + + +@pytest.mark.parametrize( + "left, right, expected", + [ + ("int64", "int64", "int64"), + ("int64", "uint64", "object"), + ("int64", "float64", "float64"), + ("uint64", "float64", "float64"), + ("uint64", "uint64", "uint64"), + ("float64", "float64", "float64"), + ("datetime64[ns]", "int64", "object"), + ("datetime64[ns]", "uint64", "object"), + ("datetime64[ns]", "float64", "object"), + ("datetime64[ns, CET]", "int64", "object"), + ("datetime64[ns, CET]", "uint64", "object"), + ("datetime64[ns, CET]", "float64", "object"), + ("Period[D]", "int64", "object"), + ("Period[D]", "uint64", "object"), + ("Period[D]", "float64", "object"), + ], +) +@pytest.mark.parametrize("names", [("foo", "foo", "foo"), ("foo", "bar", None)]) +def test_union_dtypes(left, right, expected, names): + left = pandas_dtype(left) + right = pandas_dtype(right) + a = Index([], dtype=left, name=names[0]) + b = Index([], dtype=right, name=names[1]) + result = a.union(b) + assert result.dtype == expected + assert result.name == names[2] + + # Testing name retention + # TODO: pin down desired dtype; do we want it to be commutative? + result = a.intersection(b) + assert result.name == names[2] + + +@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]]) +def test_intersection_duplicates(values): + # GH#31326 + a = Index(values) + b = Index([3, 3]) + result = a.intersection(b) + expected = Index([3]) + tm.assert_index_equal(result, expected) + + +class TestSetOps: + # Set operation tests shared by all indexes in the `index` fixture + @pytest.mark.parametrize("case", [0.5, "xxx"]) + @pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] + ) + def test_set_ops_error_cases(self, case, method, index): + # non-iterable input + msg = "Input must be Index or array-like" + with pytest.raises(TypeError, match=msg): + getattr(index, method)(case) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_intersection_base(self, index): + if isinstance(index, CategoricalIndex): + pytest.skip(f"Not relevant for {type(index).__name__}") + + first = index[:5] + second = index[:3] + intersect = first.intersection(second) + assert tm.equalContents(intersect, second) + + if isinstance(index.dtype, DatetimeTZDtype): + # The second.values below will drop tz, so the rest of this test + # is not applicable. + return + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.intersection(case) + assert tm.equalContents(result, second) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.intersection([1, 2, 3]) + + @pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_union_base(self, index): + first = index[3:] + second = index[:5] + everything = index + + union = first.union(second) + assert tm.equalContents(union, everything) + + if isinstance(index.dtype, DatetimeTZDtype): + # The second.values below will drop tz, so the rest of this test + # is not applicable. + return + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.union(case) + assert tm.equalContents(result, everything) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.union([1, 2, 3]) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" + ) + def test_difference_base(self, sort, index): + first = index[2:] + second = index[:4] + if index.inferred_type == "boolean": + # i think (TODO: be sure) there assumptions baked in about + # the index fixture that don't hold here? + answer = set(first).difference(set(second)) + elif isinstance(index, CategoricalIndex): + answer = [] + else: + answer = index[4:] + result = first.difference(second, sort) + assert tm.equalContents(result, answer) + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.difference(case, sort) + assert tm.equalContents(result, answer) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.difference([1, 2, 3], sort) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" + ) + def test_symmetric_difference(self, index): + if isinstance(index, CategoricalIndex): + pytest.skip(f"Not relevant for {type(index).__name__}") + if len(index) < 2: + pytest.skip("Too few values for test") + if index[0] in index[1:] or index[-1] in index[:-1]: + # index fixture has e.g. an index of bools that does not satisfy this, + # another with [0, 0, 1, 1, 2, 2] + pytest.skip("Index values no not satisfy test condition.") + + first = index[1:] + second = index[:-1] + answer = index[[0, -1]] + result = first.symmetric_difference(second) + assert tm.equalContents(result, answer) + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.symmetric_difference(case) + assert tm.equalContents(result, answer) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.symmetric_difference([1, 2, 3]) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_corner_union(self, index_flat, fname, sname, expected_name): + # GH#9943, GH#9862 + # Test unions with various name combinations + # Do not test MultiIndex or repeats + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat + + # Test copy.union(copy) + first = index.copy().set_names(fname) + second = index.copy().set_names(sname) + union = first.union(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test copy.union(empty) + first = index.copy().set_names(fname) + second = index.drop(index).set_names(sname) + union = first.union(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test empty.union(copy) + first = index.drop(index).set_names(fname) + second = index.copy().set_names(sname) + union = first.union(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test empty.union(empty) + first = index.drop(index).set_names(fname) + second = index.drop(index).set_names(sname) + union = first.union(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(union, expected) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_union_unequal(self, index_flat, fname, sname, expected_name): + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat + + # test copy.union(subset) - need sort for unicode and string + first = index.copy().set_names(fname) + second = index[1:].set_names(sname) + union = first.union(second).sort_values() + expected = index.set_names(expected_name).sort_values() + tm.assert_index_equal(union, expected) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_corner_intersect(self, index_flat, fname, sname, expected_name): + # GH#35847 + # Test intersections with various name combinations + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat + + # Test copy.intersection(copy) + first = index.copy().set_names(fname) + second = index.copy().set_names(sname) + intersect = first.intersection(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + # Test copy.intersection(empty) + first = index.copy().set_names(fname) + second = index.drop(index).set_names(sname) + intersect = first.intersection(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + # Test empty.intersection(copy) + first = index.drop(index).set_names(fname) + second = index.copy().set_names(sname) + intersect = first.intersection(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + # Test empty.intersection(empty) + first = index.drop(index).set_names(fname) + second = index.drop(index).set_names(sname) + intersect = first.intersection(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_intersect_unequal(self, index_flat, fname, sname, expected_name): + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat + + # test copy.intersection(subset) - need sort for unicode and string + first = index.copy().set_names(fname) + second = index[1:].set_names(sname) + intersect = first.intersection(second).sort_values() + expected = index[1:].set_names(expected_name).sort_values() + tm.assert_index_equal(intersect, expected) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_intersection_name_retention_with_nameless(self, index): + if isinstance(index, MultiIndex): + index = index.rename(list(range(index.nlevels))) + else: + index = index.rename("foo") + + other = np.asarray(index) + + result = index.intersection(other) + assert result.name == index.name + + # empty other, same dtype + result = index.intersection(other[:0]) + assert result.name == index.name + + # empty `self` + result = index[:0].intersection(other) + assert result.name == index.name + + def test_difference_preserves_type_empty(self, index, sort): + # GH#20040 + # If taking difference of a set and itself, it + # needs to preserve the type of the index + if not index.is_unique: + pytest.skip("Not relevant since index is not unique") + result = index.difference(index, sort=sort) + expected = index[:0] + tm.assert_index_equal(result, expected, exact=True) + + def test_difference_name_retention_equals(self, index, names): + if isinstance(index, MultiIndex): + names = [[x] * index.nlevels for x in names] + index = index.rename(names[0]) + other = index.rename(names[1]) + + assert index.equals(other) + + result = index.difference(other) + expected = index[:0].rename(names[2]) + tm.assert_index_equal(result, expected) + + def test_intersection_difference_match_empty(self, index, sort): + # GH#20040 + # Test that the intersection of an index with an + # empty index produces the same index as the difference + # of an index with itself. Test for all types + if not index.is_unique: + pytest.skip("Not relevant because index is not unique") + inter = index.intersection(index[:0]) + diff = index.difference(index, sort=sort) + tm.assert_index_equal(inter, diff, exact=True) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" +) +@pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] +) +def test_setop_with_categorical(index_flat, sort, method): + # MultiIndex tested separately in tests.indexes.multi.test_setops + index = index_flat + + other = index.astype("category") + exact = "equiv" if isinstance(index, RangeIndex) else True + + result = getattr(index, method)(other, sort=sort) + expected = getattr(index, method)(index, sort=sort) + tm.assert_index_equal(result, expected, exact=exact) + + result = getattr(index, method)(other[:5], sort=sort) + expected = getattr(index, method)(index[:5], sort=sort) + tm.assert_index_equal(result, expected, exact=exact) + + +def test_intersection_duplicates_all_indexes(index): + # GH#38743 + if index.empty: + # No duplicates in empty indexes + pytest.skip("Not relevant for empty Index") + + idx = index + idx_non_unique = idx[[0, 0, 1, 2]] + + assert idx.intersection(idx_non_unique).equals(idx_non_unique.intersection(idx)) + assert idx.intersection(idx_non_unique).is_unique + + +def test_union_duplicate_index_subsets_of_each_other( + any_dtype_for_small_pos_integer_indexes, +): + # GH#31326 + dtype = any_dtype_for_small_pos_integer_indexes + a = Index([1, 2, 2, 3], dtype=dtype) + b = Index([3, 3, 4], dtype=dtype) + + expected = Index([1, 2, 2, 3, 3, 4], dtype=dtype) + if isinstance(a, CategoricalIndex): + expected = Index([1, 2, 2, 3, 3, 4]) + result = a.union(b) + tm.assert_index_equal(result, expected) + result = a.union(b, sort=False) + tm.assert_index_equal(result, expected) + + +def test_union_with_duplicate_index_and_non_monotonic( + any_dtype_for_small_pos_integer_indexes, +): + # GH#36289 + dtype = any_dtype_for_small_pos_integer_indexes + a = Index([1, 0, 0], dtype=dtype) + b = Index([0, 1], dtype=dtype) + expected = Index([0, 0, 1], dtype=dtype) + + result = a.union(b) + tm.assert_index_equal(result, expected) + + result = b.union(a) + tm.assert_index_equal(result, expected) + + +def test_union_duplicate_index_different_dtypes(): + # GH#36289 + a = Index([1, 2, 2, 3]) + b = Index(["1", "0", "0"]) + expected = Index([1, 2, 2, 3, "1", "0", "0"]) + result = a.union(b, sort=False) + tm.assert_index_equal(result, expected) + + +def test_union_same_value_duplicated_in_both(): + # GH#36289 + a = Index([0, 0, 1]) + b = Index([0, 0, 1, 2]) + result = a.union(b) + expected = Index([0, 0, 1, 2]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dup", [1, np.nan]) +def test_union_nan_in_both(dup): + # GH#36289 + a = Index([np.nan, 1, 2, 2]) + b = Index([np.nan, dup, 1, 2]) + result = a.union(b, sort=False) + expected = Index([np.nan, dup, 1.0, 2.0, 2.0]) + tm.assert_index_equal(result, expected) + + +def test_union_rangeindex_sort_true(): + # GH 53490 + idx1 = RangeIndex(1, 100, 6) + idx2 = RangeIndex(1, 50, 3) + result = idx1.union(idx2, sort=True) + expected = Index( + [ + 1, + 4, + 7, + 10, + 13, + 16, + 19, + 22, + 25, + 28, + 31, + 34, + 37, + 40, + 43, + 46, + 49, + 55, + 61, + 67, + 73, + 79, + 85, + 91, + 97, + ] + ) + tm.assert_index_equal(result, expected) + + +def test_union_with_duplicate_index_not_subset_and_non_monotonic( + any_dtype_for_small_pos_integer_indexes, +): + # GH#36289 + dtype = any_dtype_for_small_pos_integer_indexes + a = Index([1, 0, 2], dtype=dtype) + b = Index([0, 0, 1], dtype=dtype) + expected = Index([0, 0, 1, 2], dtype=dtype) + if isinstance(a, CategoricalIndex): + expected = Index([0, 0, 1, 2]) + + result = a.union(b) + tm.assert_index_equal(result, expected) + + result = b.union(a) + tm.assert_index_equal(result, expected) + + +def test_union_int_categorical_with_nan(): + ci = CategoricalIndex([1, 2, np.nan]) + assert ci.categories.dtype.kind == "i" + + idx = Index([1, 2]) + + result = idx.union(ci) + expected = Index([1, 2, np.nan], dtype=np.float64) + tm.assert_index_equal(result, expected) + + result = ci.union(idx) + tm.assert_index_equal(result, expected) + + +class TestSetOpsUnsorted: + # These may eventually belong in a dtype-specific test_setops, or + # parametrized over a more general fixture + def test_intersect_str_dates(self): + dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] + + index1 = Index(dt_dates, dtype=object) + index2 = Index(["aa"], dtype=object) + result = index2.intersection(index1) + + expected = Index([], dtype=object) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_intersection(self, index, sort): + first = index[:20] + second = index[:10] + intersect = first.intersection(second, sort=sort) + if sort is None: + tm.assert_index_equal(intersect, second.sort_values()) + assert tm.equalContents(intersect, second) + + # Corner cases + inter = first.intersection(first, sort=sort) + assert inter is first + + @pytest.mark.parametrize( + "index2,keeps_name", + [ + (Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name + (Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names + (Index([3, 4, 5, 6, 7]), False), + ], + ) + def test_intersection_name_preservation(self, index2, keeps_name, sort): + index1 = Index([1, 2, 3, 4, 5], name="index") + expected = Index([3, 4, 5]) + result = index1.intersection(index2, sort) + + if keeps_name: + expected.name = "index" + + assert result.name == expected.name + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + @pytest.mark.parametrize( + "first_name,second_name,expected_name", + [("A", "A", "A"), ("A", "B", None), (None, "B", None)], + ) + def test_intersection_name_preservation2( + self, index, first_name, second_name, expected_name, sort + ): + first = index[5:20] + second = index[:10] + first.name = first_name + second.name = second_name + intersect = first.intersection(second, sort=sort) + assert intersect.name == expected_name + + def test_chained_union(self, sort): + # Chained unions handles names correctly + i1 = Index([1, 2], name="i1") + i2 = Index([5, 6], name="i2") + i3 = Index([3, 4], name="i3") + union = i1.union(i2.union(i3, sort=sort), sort=sort) + expected = i1.union(i2, sort=sort).union(i3, sort=sort) + tm.assert_index_equal(union, expected) + + j1 = Index([1, 2], name="j1") + j2 = Index([], name="j2") + j3 = Index([], name="j3") + union = j1.union(j2.union(j3, sort=sort), sort=sort) + expected = j1.union(j2, sort=sort).union(j3, sort=sort) + tm.assert_index_equal(union, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_union(self, index, sort): + first = index[5:20] + second = index[:10] + everything = index[:20] + + union = first.union(second, sort=sort) + if sort is None: + tm.assert_index_equal(union, everything.sort_values()) + assert tm.equalContents(union, everything) + + @pytest.mark.parametrize("klass", [np.array, Series, list]) + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_union_from_iterables(self, index, klass, sort): + # GH#10149 + first = index[5:20] + second = index[:10] + everything = index[:20] + + case = klass(second.values) + result = first.union(case, sort=sort) + if sort is None: + tm.assert_index_equal(result, everything.sort_values()) + assert tm.equalContents(result, everything) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_union_identity(self, index, sort): + first = index[5:20] + + union = first.union(first, sort=sort) + # i.e. identity is not preserved when sort is True + assert (union is first) is (not sort) + + # This should no longer be the same object, since [] is not consistent, + # both objects will be recast to dtype('O') + union = first.union([], sort=sort) + assert (union is first) is (not sort) + + union = Index([]).union(first, sort=sort) + assert (union is first) is (not sort) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + @pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")]) + def test_difference_name_preservation(self, index, second_name, expected, sort): + first = index[5:20] + second = index[:10] + answer = index[10:20] + + first.name = "name" + second.name = second_name + result = first.difference(second, sort=sort) + + assert tm.equalContents(result, answer) + + if expected is None: + assert result.name is None + else: + assert result.name == expected + + def test_difference_empty_arg(self, index, sort): + first = index[5:20] + first.name = "name" + result = first.difference([], sort) + + tm.assert_index_equal(result, first) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_difference_identity(self, index, sort): + first = index[5:20] + first.name = "name" + result = first.difference(first, sort) + + assert len(result) == 0 + assert result.name == first.name + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_difference_sort(self, index, sort): + first = index[5:20] + second = index[:10] + + result = first.difference(second, sort) + expected = index[10:20] + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) + def test_difference_incomparable(self, opname): + a = Index([3, Timestamp("2000"), 1]) + b = Index([2, Timestamp("1999"), 1]) + op = operator.methodcaller(opname, b) + + with tm.assert_produces_warning(RuntimeWarning): + # sort=None, the default + result = op(a) + expected = Index([3, Timestamp("2000"), 2, Timestamp("1999")]) + if opname == "difference": + expected = expected[:2] + tm.assert_index_equal(result, expected) + + # sort=False + op = operator.methodcaller(opname, b, sort=False) + result = op(a) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) + def test_difference_incomparable_true(self, opname): + a = Index([3, Timestamp("2000"), 1]) + b = Index([2, Timestamp("1999"), 1]) + op = operator.methodcaller(opname, b, sort=True) + + msg = "'<' not supported between instances of 'Timestamp' and 'int'" + with pytest.raises(TypeError, match=msg): + op(a) + + def test_symmetric_difference_mi(self, sort): + index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])) + index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)]) + result = index1.symmetric_difference(index2, sort=sort) + expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)]) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + assert tm.equalContents(result, expected) + + @pytest.mark.parametrize( + "index2,expected", + [ + (Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])), + (Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])), + ], + ) + def test_symmetric_difference_missing(self, index2, expected, sort): + # GH#13514 change: {nan} - {nan} == {} + # (GH#6444, sorting of nans, is no longer an issue) + index1 = Index([1, np.nan, 2, 3]) + + result = index1.symmetric_difference(index2, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + def test_symmetric_difference_non_index(self, sort): + index1 = Index([1, 2, 3, 4], name="index1") + index2 = np.array([2, 3, 4, 5]) + expected = Index([1, 5]) + result = index1.symmetric_difference(index2, sort=sort) + assert tm.equalContents(result, expected) + assert result.name == "index1" + + result = index1.symmetric_difference(index2, result_name="new_name", sort=sort) + assert tm.equalContents(result, expected) + assert result.name == "new_name" + + def test_union_ea_dtypes(self, any_numeric_ea_and_arrow_dtype): + # GH#51365 + idx = Index([1, 2, 3], dtype=any_numeric_ea_and_arrow_dtype) + idx2 = Index([3, 4, 5], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.union(idx2) + expected = Index([1, 2, 3, 4, 5], dtype=any_numeric_ea_and_arrow_dtype) + tm.assert_index_equal(result, expected) + + def test_union_string_array(self, any_string_dtype): + idx1 = Index(["a"], dtype=any_string_dtype) + idx2 = Index(["b"], dtype=any_string_dtype) + result = idx1.union(idx2) + expected = Index(["a", "b"], dtype=any_string_dtype) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_subclass.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_subclass.py new file mode 100644 index 00000000..c3287e1d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/test_subclass.py @@ -0,0 +1,40 @@ +""" +Tests involving custom Index subclasses +""" +import numpy as np + +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + + +class CustomIndex(Index): + def __new__(cls, data, name=None): + # assert that this index class cannot hold strings + if any(isinstance(val, str) for val in data): + raise TypeError("CustomIndex cannot hold strings") + + if name is None and hasattr(data, "name"): + name = data.name + data = np.array(data, dtype="O") + + return cls._simple_new(data, name) + + +def test_insert_fallback_to_base_index(): + # https://github.com/pandas-dev/pandas/issues/47071 + + idx = CustomIndex([1, 2, 3]) + result = idx.insert(0, "string") + expected = Index(["string", 1, 2, 3], dtype=object) + tm.assert_index_equal(result, expected) + + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 3)), + columns=idx, + index=Index([1, 2], name="string"), + ) + result = df.reset_index() + tm.assert_index_equal(result.columns, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py new file mode 100644 index 00000000..f69f0fd3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py @@ -0,0 +1,125 @@ +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + NaT, + Timedelta, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestTimedeltaIndex: + def test_astype_object(self): + idx = timedelta_range(start="1 days", periods=4, freq="D", name="idx") + expected_list = [ + Timedelta("1 days"), + Timedelta("2 days"), + Timedelta("3 days"), + Timedelta("4 days"), + ] + result = idx.astype(object) + expected = Index(expected_list, dtype=object, name="idx") + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype_object_with_nat(self): + idx = TimedeltaIndex( + [timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name="idx" + ) + expected_list = [ + Timedelta("1 days"), + Timedelta("2 days"), + NaT, + Timedelta("4 days"), + ] + result = idx.astype(object) + expected = Index(expected_list, dtype=object, name="idx") + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype(self): + # GH 13149, GH 13209 + idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan], name="idx") + + result = idx.astype(object) + expected = Index( + [Timedelta("1 days 03:46:40")] + [NaT] * 3, dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(np.int64) + expected = Index( + [100000000000000] + [-9223372036854775808] * 3, dtype=np.int64, name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(str) + expected = Index([str(x) for x in idx], name="idx") + tm.assert_index_equal(result, expected) + + rng = timedelta_range("1 days", periods=10) + result = rng.astype("i8") + tm.assert_index_equal(result, Index(rng.asi8)) + tm.assert_numpy_array_equal(rng.asi8, result.values) + + def test_astype_uint(self): + arr = timedelta_range("1H", periods=2) + + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint64") + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint32") + + def test_astype_timedelta64(self): + # GH 13149, GH 13209 + idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan]) + + msg = ( + r"Cannot convert from timedelta64\[ns\] to timedelta64. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + with pytest.raises(ValueError, match=msg): + idx.astype("timedelta64") + + result = idx.astype("timedelta64[ns]") + tm.assert_index_equal(result, idx) + assert result is not idx + + result = idx.astype("timedelta64[ns]", copy=False) + tm.assert_index_equal(result, idx) + assert result is idx + + @pytest.mark.parametrize("dtype", [float, "datetime64", "datetime64[ns]"]) + def test_astype_raises(self, dtype): + # GH 13149, GH 13209 + idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan]) + msg = "Cannot cast TimedeltaIndex to dtype" + with pytest.raises(TypeError, match=msg): + idx.astype(dtype) + + def test_astype_category(self): + obj = timedelta_range("1H", periods=2, freq="H") + + result = obj.astype("category") + expected = pd.CategoricalIndex([Timedelta("1H"), Timedelta("2H")]) + tm.assert_index_equal(result, expected) + + result = obj._data.astype("category") + expected = expected.values + tm.assert_categorical_equal(result, expected) + + def test_astype_array_fallback(self): + obj = timedelta_range("1H", periods=2) + result = obj.astype(bool) + expected = Index(np.array([True, True])) + tm.assert_index_equal(result, expected) + + result = obj._data.astype(bool) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_factorize.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_factorize.py new file mode 100644 index 00000000..24ab3888 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_factorize.py @@ -0,0 +1,40 @@ +import numpy as np + +from pandas import ( + TimedeltaIndex, + factorize, + timedelta_range, +) +import pandas._testing as tm + + +class TestTimedeltaIndexFactorize: + def test_factorize(self): + idx1 = TimedeltaIndex(["1 day", "1 day", "2 day", "2 day", "3 day", "3 day"]) + + exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp) + exp_idx = TimedeltaIndex(["1 day", "2 day", "3 day"]) + + arr, idx = idx1.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + assert idx.freq == exp_idx.freq + + arr, idx = idx1.factorize(sort=True) + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, exp_idx) + assert idx.freq == exp_idx.freq + + def test_factorize_preserves_freq(self): + # GH#38120 freq should be preserved + idx3 = timedelta_range("1 day", periods=4, freq="s") + exp_arr = np.array([0, 1, 2, 3], dtype=np.intp) + arr, idx = idx3.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, idx3) + assert idx.freq == idx3.freq + + arr, idx = factorize(idx3) + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(idx, idx3) + assert idx.freq == idx3.freq diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_fillna.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_fillna.py new file mode 100644 index 00000000..40aa95d0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_fillna.py @@ -0,0 +1,22 @@ +from pandas import ( + Index, + NaT, + Timedelta, + TimedeltaIndex, +) +import pandas._testing as tm + + +class TestFillNA: + def test_fillna_timedelta(self): + # GH#11343 + idx = TimedeltaIndex(["1 day", NaT, "3 day"]) + + exp = TimedeltaIndex(["1 day", "2 day", "3 day"]) + tm.assert_index_equal(idx.fillna(Timedelta("2 day")), exp) + + exp = TimedeltaIndex(["1 day", "3 hour", "3 day"]) + idx.fillna(Timedelta("3 hour")) + + exp = Index([Timedelta("1 day"), "x", Timedelta("3 day")], dtype=object) + tm.assert_index_equal(idx.fillna("x"), exp) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py new file mode 100644 index 00000000..f8164102 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py @@ -0,0 +1,145 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas._libs import lib + +import pandas as pd +from pandas import ( + Index, + Timedelta, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestTimedeltaIndexInsert: + def test_insert(self): + idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx") + + result = idx.insert(2, timedelta(days=5)) + exp = TimedeltaIndex(["4day", "1day", "5day", "2day"], name="idx") + tm.assert_index_equal(result, exp) + + # insertion of non-datetime should coerce to object index + result = idx.insert(1, "inserted") + expected = Index( + [Timedelta("4day"), "inserted", Timedelta("1day"), Timedelta("2day")], + name="idx", + ) + assert not isinstance(result, TimedeltaIndex) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + idx = timedelta_range("1day 00:00:01", periods=3, freq="s", name="idx") + + # preserve freq + expected_0 = TimedeltaIndex( + ["1day", "1day 00:00:01", "1day 00:00:02", "1day 00:00:03"], + name="idx", + freq="s", + ) + expected_3 = TimedeltaIndex( + ["1day 00:00:01", "1day 00:00:02", "1day 00:00:03", "1day 00:00:04"], + name="idx", + freq="s", + ) + + # reset freq to None + expected_1_nofreq = TimedeltaIndex( + ["1day 00:00:01", "1day 00:00:01", "1day 00:00:02", "1day 00:00:03"], + name="idx", + freq=None, + ) + expected_3_nofreq = TimedeltaIndex( + ["1day 00:00:01", "1day 00:00:02", "1day 00:00:03", "1day 00:00:05"], + name="idx", + freq=None, + ) + + cases = [ + (0, Timedelta("1day"), expected_0), + (-3, Timedelta("1day"), expected_0), + (3, Timedelta("1day 00:00:04"), expected_3), + (1, Timedelta("1day 00:00:01"), expected_1_nofreq), + (3, Timedelta("1day 00:00:05"), expected_3_nofreq), + ] + + for n, d, expected in cases: + result = idx.insert(n, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + @pytest.mark.parametrize( + "null", [None, np.nan, np.timedelta64("NaT"), pd.NaT, pd.NA] + ) + def test_insert_nat(self, null): + # GH 18295 (test missing) + idx = timedelta_range("1day", "3day") + result = idx.insert(1, null) + expected = TimedeltaIndex(["1day", pd.NaT, "2day", "3day"]) + tm.assert_index_equal(result, expected) + + def test_insert_invalid_na(self): + idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx") + + item = np.datetime64("NaT") + result = idx.insert(0, item) + + expected = Index([item] + list(idx), dtype=object, name="idx") + tm.assert_index_equal(result, expected) + + # Also works if we pass a different dt64nat object + item2 = np.datetime64("NaT") + result = idx.insert(0, item2) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "item", [0, np.int64(0), np.float64(0), np.array(0), np.datetime64(456, "us")] + ) + def test_insert_mismatched_types_raises(self, item): + # GH#33703 dont cast these to td64 + tdi = TimedeltaIndex(["4day", "1day", "2day"], name="idx") + + result = tdi.insert(1, item) + + expected = Index( + [tdi[0], lib.item_from_zerodim(item)] + list(tdi[1:]), + dtype=object, + name="idx", + ) + tm.assert_index_equal(result, expected) + + def test_insert_castable_str(self): + idx = timedelta_range("1day", "3day") + + result = idx.insert(0, "1 Day") + + expected = TimedeltaIndex([idx[0]] + list(idx)) + tm.assert_index_equal(result, expected) + + def test_insert_non_castable_str(self): + idx = timedelta_range("1day", "3day") + + result = idx.insert(0, "foo") + + expected = Index(["foo"] + list(idx), dtype=object) + tm.assert_index_equal(result, expected) + + def test_insert_empty(self): + # Corner case inserting with length zero doesn't raise IndexError + # GH#33573 for freq preservation + idx = timedelta_range("1 Day", periods=3) + td = idx[0] + + result = idx[:0].insert(0, td) + assert result.freq == "D" + + with pytest.raises(IndexError, match="loc must be an integer between"): + result = idx[:0].insert(1, td) + + with pytest.raises(IndexError, match="loc must be an integer between"): + result = idx[:0].insert(-1, td) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py new file mode 100644 index 00000000..2a9b58d1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py @@ -0,0 +1,34 @@ +import numpy as np + +from pandas import ( + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestRepeat: + def test_repeat(self): + index = timedelta_range("1 days", periods=2, freq="D") + exp = TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"]) + for res in [index.repeat(2), np.repeat(index, 2)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + index = TimedeltaIndex(["1 days", "NaT", "3 days"]) + exp = TimedeltaIndex( + [ + "1 days", + "1 days", + "1 days", + "NaT", + "NaT", + "NaT", + "3 days", + "3 days", + "3 days", + ] + ) + for res in [index.repeat(3), np.repeat(index, 3)]: + tm.assert_index_equal(res, exp) + assert res.freq is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py new file mode 100644 index 00000000..f49af73f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py @@ -0,0 +1,76 @@ +import pytest + +from pandas.errors import NullFrequencyError + +import pandas as pd +from pandas import TimedeltaIndex +import pandas._testing as tm + + +class TestTimedeltaIndexShift: + # ------------------------------------------------------------- + # TimedeltaIndex.shift is used by __add__/__sub__ + + def test_tdi_shift_empty(self): + # GH#9903 + idx = TimedeltaIndex([], name="xxx") + tm.assert_index_equal(idx.shift(0, freq="H"), idx) + tm.assert_index_equal(idx.shift(3, freq="H"), idx) + + def test_tdi_shift_hours(self): + # GH#9903 + idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") + tm.assert_index_equal(idx.shift(0, freq="H"), idx) + exp = TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx") + tm.assert_index_equal(idx.shift(3, freq="H"), exp) + exp = TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx") + tm.assert_index_equal(idx.shift(-3, freq="H"), exp) + + def test_tdi_shift_minutes(self): + # GH#9903 + idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") + tm.assert_index_equal(idx.shift(0, freq="T"), idx) + exp = TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx") + tm.assert_index_equal(idx.shift(3, freq="T"), exp) + exp = TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx") + tm.assert_index_equal(idx.shift(-3, freq="T"), exp) + + def test_tdi_shift_int(self): + # GH#8083 + tdi = pd.to_timedelta(range(5), unit="d") + trange = tdi._with_freq("infer") + pd.offsets.Hour(1) + result = trange.shift(1) + expected = TimedeltaIndex( + [ + "1 days 01:00:00", + "2 days 01:00:00", + "3 days 01:00:00", + "4 days 01:00:00", + "5 days 01:00:00", + ], + freq="D", + ) + tm.assert_index_equal(result, expected) + + def test_tdi_shift_nonstandard_freq(self): + # GH#8083 + tdi = pd.to_timedelta(range(5), unit="d") + trange = tdi._with_freq("infer") + pd.offsets.Hour(1) + result = trange.shift(3, freq="2D 1s") + expected = TimedeltaIndex( + [ + "6 days 01:00:03", + "7 days 01:00:03", + "8 days 01:00:03", + "9 days 01:00:03", + "10 days 01:00:03", + ], + freq="D", + ) + tm.assert_index_equal(result, expected) + + def test_shift_no_freq(self): + # GH#19147 + tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None) + with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"): + tdi.shift(2) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_constructors.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_constructors.py new file mode 100644 index 00000000..a3de699a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_constructors.py @@ -0,0 +1,281 @@ +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Timedelta, + TimedeltaIndex, + timedelta_range, + to_timedelta, +) +import pandas._testing as tm +from pandas.core.arrays.timedeltas import ( + TimedeltaArray, + sequence_to_td64ns, +) + + +class TestTimedeltaIndex: + def test_closed_deprecated(self): + # GH#52628 + msg = "The 'closed' keyword" + with tm.assert_produces_warning(FutureWarning, match=msg): + TimedeltaIndex([], closed=True) + + def test_array_of_dt64_nat_raises(self): + # GH#39462 + nat = np.datetime64("NaT", "ns") + arr = np.array([nat], dtype=object) + + msg = "Invalid type for timedelta scalar" + with pytest.raises(TypeError, match=msg): + TimedeltaIndex(arr) + + with pytest.raises(TypeError, match=msg): + TimedeltaArray._from_sequence(arr) + + with pytest.raises(TypeError, match=msg): + sequence_to_td64ns(arr) + + with pytest.raises(TypeError, match=msg): + to_timedelta(arr) + + @pytest.mark.parametrize("unit", ["Y", "y", "M"]) + def test_unit_m_y_raises(self, unit): + msg = "Units 'M', 'Y', and 'y' are no longer supported" + with pytest.raises(ValueError, match=msg): + TimedeltaIndex([1, 3, 7], unit) + + def test_int64_nocopy(self): + # GH#23539 check that a copy isn't made when we pass int64 data + # and copy=False + arr = np.arange(10, dtype=np.int64) + tdi = TimedeltaIndex(arr, copy=False) + assert tdi._data._ndarray.base is arr + + def test_infer_from_tdi(self): + # GH#23539 + # fast-path for inferring a frequency if the passed data already + # has one + tdi = timedelta_range("1 second", periods=10**7, freq="1s") + + result = TimedeltaIndex(tdi, freq="infer") + assert result.freq == tdi.freq + + # check that inferred_freq was not called by checking that the + # value has not been cached + assert "inferred_freq" not in getattr(result, "_cache", {}) + + def test_infer_from_tdi_mismatch(self): + # GH#23539 + # fast-path for invalidating a frequency if the passed data already + # has one and it does not match the `freq` input + tdi = timedelta_range("1 second", periods=100, freq="1s") + + msg = ( + "Inferred frequency .* from passed values does " + "not conform to passed frequency" + ) + with pytest.raises(ValueError, match=msg): + TimedeltaIndex(tdi, freq="D") + + with pytest.raises(ValueError, match=msg): + # GH#23789 + TimedeltaArray(tdi, freq="D") + + with pytest.raises(ValueError, match=msg): + TimedeltaIndex(tdi._data, freq="D") + + with pytest.raises(ValueError, match=msg): + TimedeltaArray(tdi._data, freq="D") + + def test_dt64_data_invalid(self): + # GH#23539 + # passing tz-aware DatetimeIndex raises, naive or ndarray[datetime64] + # raise as of GH#29794 + dti = pd.date_range("2016-01-01", periods=3) + + msg = "cannot be converted to timedelta64" + with pytest.raises(TypeError, match=msg): + TimedeltaIndex(dti.tz_localize("Europe/Brussels")) + + with pytest.raises(TypeError, match=msg): + TimedeltaIndex(dti) + + with pytest.raises(TypeError, match=msg): + TimedeltaIndex(np.asarray(dti)) + + def test_float64_ns_rounded(self): + # GH#23539 without specifying a unit, floats are regarded as nanos, + # and fractional portions are truncated + tdi = TimedeltaIndex([2.3, 9.7]) + expected = TimedeltaIndex([2, 9]) + tm.assert_index_equal(tdi, expected) + + # integral floats are non-lossy + tdi = TimedeltaIndex([2.0, 9.0]) + expected = TimedeltaIndex([2, 9]) + tm.assert_index_equal(tdi, expected) + + # NaNs get converted to NaT + tdi = TimedeltaIndex([2.0, np.nan]) + expected = TimedeltaIndex([Timedelta(nanoseconds=2), pd.NaT]) + tm.assert_index_equal(tdi, expected) + + def test_float64_unit_conversion(self): + # GH#23539 + tdi = TimedeltaIndex([1.5, 2.25], unit="D") + expected = TimedeltaIndex([Timedelta(days=1.5), Timedelta(days=2.25)]) + tm.assert_index_equal(tdi, expected) + + def test_construction_base_constructor(self): + arr = [Timedelta("1 days"), pd.NaT, Timedelta("3 days")] + tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr)) + tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr))) + + arr = [np.nan, pd.NaT, Timedelta("1 days")] + tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr)) + tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr))) + + def test_constructor(self): + expected = TimedeltaIndex( + [ + "1 days", + "1 days 00:00:05", + "2 days", + "2 days 00:00:02", + "0 days 00:00:03", + ] + ) + result = TimedeltaIndex( + [ + "1 days", + "1 days, 00:00:05", + np.timedelta64(2, "D"), + timedelta(days=2, seconds=2), + pd.offsets.Second(3), + ] + ) + tm.assert_index_equal(result, expected) + + expected = TimedeltaIndex( + ["0 days 00:00:00", "0 days 00:00:01", "0 days 00:00:02"] + ) + tm.assert_index_equal(TimedeltaIndex(range(3), unit="s"), expected) + expected = TimedeltaIndex( + ["0 days 00:00:00", "0 days 00:00:05", "0 days 00:00:09"] + ) + tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit="s"), expected) + expected = TimedeltaIndex( + ["0 days 00:00:00.400", "0 days 00:00:00.450", "0 days 00:00:01.200"] + ) + tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit="ms"), expected) + + def test_constructor_iso(self): + # GH #21877 + expected = timedelta_range("1s", periods=9, freq="s") + durations = [f"P0DT0H0M{i}S" for i in range(1, 10)] + result = to_timedelta(durations) + tm.assert_index_equal(result, expected) + + def test_constructor_coverage(self): + rng = timedelta_range("1 days", periods=10.5) + exp = timedelta_range("1 days", periods=10) + tm.assert_index_equal(rng, exp) + + msg = "periods must be a number, got foo" + with pytest.raises(TypeError, match=msg): + timedelta_range(start="1 days", periods="foo", freq="D") + + msg = ( + r"TimedeltaIndex\(\.\.\.\) must be called with a collection of some kind, " + "'1 days' was passed" + ) + with pytest.raises(TypeError, match=msg): + TimedeltaIndex("1 days") + + # generator expression + gen = (timedelta(i) for i in range(10)) + result = TimedeltaIndex(gen) + expected = TimedeltaIndex([timedelta(i) for i in range(10)]) + tm.assert_index_equal(result, expected) + + # NumPy string array + strings = np.array(["1 days", "2 days", "3 days"]) + result = TimedeltaIndex(strings) + expected = to_timedelta([1, 2, 3], unit="d") + tm.assert_index_equal(result, expected) + + from_ints = TimedeltaIndex(expected.asi8) + tm.assert_index_equal(from_ints, expected) + + # non-conforming freq + msg = ( + "Inferred frequency None from passed values does not conform to " + "passed frequency D" + ) + with pytest.raises(ValueError, match=msg): + TimedeltaIndex(["1 days", "2 days", "4 days"], freq="D") + + msg = ( + "Of the four parameters: start, end, periods, and freq, exactly " + "three must be specified" + ) + with pytest.raises(ValueError, match=msg): + timedelta_range(periods=10, freq="D") + + def test_constructor_name(self): + idx = timedelta_range(start="1 days", periods=1, freq="D", name="TEST") + assert idx.name == "TEST" + + # GH10025 + idx2 = TimedeltaIndex(idx, name="something else") + assert idx2.name == "something else" + + def test_constructor_no_precision_raises(self): + # GH-24753, GH-24739 + + msg = "with no precision is not allowed" + with pytest.raises(ValueError, match=msg): + TimedeltaIndex(["2000"], dtype="timedelta64") + + msg = "The 'timedelta64' dtype has no unit. Please pass in" + with pytest.raises(ValueError, match=msg): + pd.Index(["2000"], dtype="timedelta64") + + def test_constructor_wrong_precision_raises(self): + msg = r"dtype timedelta64\[D\] cannot be converted to timedelta64\[ns\]" + with pytest.raises(ValueError, match=msg): + TimedeltaIndex(["2000"], dtype="timedelta64[D]") + + # "timedelta64[us]" was unsupported pre-2.0, but now this works. + tdi = TimedeltaIndex(["2000"], dtype="timedelta64[us]") + assert tdi.dtype == "m8[us]" + + def test_explicit_none_freq(self): + # Explicitly passing freq=None is respected + tdi = timedelta_range(1, periods=5) + assert tdi.freq is not None + + result = TimedeltaIndex(tdi, freq=None) + assert result.freq is None + + result = TimedeltaIndex(tdi._data, freq=None) + assert result.freq is None + + tda = TimedeltaArray(tdi, freq=None) + assert tda.freq is None + + def test_from_categorical(self): + tdi = timedelta_range(1, periods=5) + + cat = pd.Categorical(tdi) + + result = TimedeltaIndex(cat) + tm.assert_index_equal(result, tdi) + + ci = pd.CategoricalIndex(tdi) + result = TimedeltaIndex(ci) + tm.assert_index_equal(result, tdi) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_delete.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_delete.py new file mode 100644 index 00000000..6e6f5470 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_delete.py @@ -0,0 +1,71 @@ +from pandas import ( + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestTimedeltaIndexDelete: + def test_delete(self): + idx = timedelta_range(start="1 Days", periods=5, freq="D", name="idx") + + # preserve freq + expected_0 = timedelta_range(start="2 Days", periods=4, freq="D", name="idx") + expected_4 = timedelta_range(start="1 Days", periods=4, freq="D", name="idx") + + # reset freq to None + expected_1 = TimedeltaIndex( + ["1 day", "3 day", "4 day", "5 day"], freq=None, name="idx" + ) + + cases = { + 0: expected_0, + -5: expected_0, + -1: expected_4, + 4: expected_4, + 1: expected_1, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + with tm.external_error_raised((IndexError, ValueError)): + # either depending on numpy version + idx.delete(5) + + def test_delete_slice(self): + idx = timedelta_range(start="1 days", periods=10, freq="D", name="idx") + + # preserve freq + expected_0_2 = timedelta_range(start="4 days", periods=7, freq="D", name="idx") + expected_7_9 = timedelta_range(start="1 days", periods=7, freq="D", name="idx") + + # reset freq to None + expected_3_5 = TimedeltaIndex( + ["1 d", "2 d", "3 d", "7 d", "8 d", "9 d", "10d"], freq=None, name="idx" + ) + + cases = { + (0, 1, 2): expected_0_2, + (7, 8, 9): expected_7_9, + (3, 4, 5): expected_3_5, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + result = idx.delete(slice(n[0], n[-1] + 1)) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + def test_delete_doesnt_infer_freq(self): + # GH#30655 behavior matches DatetimeIndex + + tdi = TimedeltaIndex(["1 Day", "2 Days", None, "3 Days", "4 Days"]) + result = tdi.delete(2) + assert result.freq is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_formats.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_formats.py new file mode 100644 index 00000000..751f9e4c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_formats.py @@ -0,0 +1,93 @@ +import pytest + +import pandas as pd +from pandas import ( + Series, + TimedeltaIndex, +) + + +class TestTimedeltaIndexRendering: + @pytest.mark.parametrize("method", ["__repr__", "__str__"]) + def test_representation(self, method): + idx1 = TimedeltaIndex([], freq="D") + idx2 = TimedeltaIndex(["1 days"], freq="D") + idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D") + idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") + idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) + + exp1 = "TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')" + + exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')" + + exp3 = "TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')" + + exp4 = ( + "TimedeltaIndex(['1 days', '2 days', '3 days'], " + "dtype='timedelta64[ns]', freq='D')" + ) + + exp5 = ( + "TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', " + "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)" + ) + + with pd.option_context("display.width", 300): + for idx, expected in zip( + [idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5] + ): + result = getattr(idx, method)() + assert result == expected + + def test_representation_to_series(self): + idx1 = TimedeltaIndex([], freq="D") + idx2 = TimedeltaIndex(["1 days"], freq="D") + idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D") + idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") + idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) + + exp1 = """Series([], dtype: timedelta64[ns])""" + + exp2 = "0 1 days\ndtype: timedelta64[ns]" + + exp3 = "0 1 days\n1 2 days\ndtype: timedelta64[ns]" + + exp4 = "0 1 days\n1 2 days\n2 3 days\ndtype: timedelta64[ns]" + + exp5 = ( + "0 1 days 00:00:01\n" + "1 2 days 00:00:00\n" + "2 3 days 00:00:00\n" + "dtype: timedelta64[ns]" + ) + + with pd.option_context("display.width", 300): + for idx, expected in zip( + [idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5] + ): + result = repr(Series(idx)) + assert result == expected + + def test_summary(self): + # GH#9116 + idx1 = TimedeltaIndex([], freq="D") + idx2 = TimedeltaIndex(["1 days"], freq="D") + idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D") + idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D") + idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"]) + + exp1 = "TimedeltaIndex: 0 entries\nFreq: D" + + exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\nFreq: D" + + exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\nFreq: D" + + exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\nFreq: D" + + exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00" + + for idx, expected in zip( + [idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5] + ): + result = idx._summary() + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_freq_attr.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_freq_attr.py new file mode 100644 index 00000000..868da432 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_freq_attr.py @@ -0,0 +1,72 @@ +import pytest + +from pandas import TimedeltaIndex + +from pandas.tseries.offsets import ( + DateOffset, + Day, + Hour, + MonthEnd, +) + + +class TestFreq: + @pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []]) + @pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)]) + def test_freq_setter(self, values, freq): + # GH#20678 + idx = TimedeltaIndex(values) + + # can set to an offset, converting from string if necessary + idx._data.freq = freq + assert idx.freq == freq + assert isinstance(idx.freq, DateOffset) + + # can reset to None + idx._data.freq = None + assert idx.freq is None + + def test_with_freq_empty_requires_tick(self): + idx = TimedeltaIndex([]) + + off = MonthEnd(1) + msg = "TimedeltaArray/Index freq must be a Tick" + with pytest.raises(TypeError, match=msg): + idx._with_freq(off) + with pytest.raises(TypeError, match=msg): + idx._data._with_freq(off) + + def test_freq_setter_errors(self): + # GH#20678 + idx = TimedeltaIndex(["0 days", "2 days", "4 days"]) + + # setting with an incompatible freq + msg = ( + "Inferred frequency 2D from passed values does not conform to " + "passed frequency 5D" + ) + with pytest.raises(ValueError, match=msg): + idx._data.freq = "5D" + + # setting with a non-fixed frequency + msg = r"<2 \* BusinessDays> is a non-fixed frequency" + with pytest.raises(ValueError, match=msg): + idx._data.freq = "2B" + + # setting with non-freq string + with pytest.raises(ValueError, match="Invalid frequency"): + idx._data.freq = "foo" + + def test_freq_view_safe(self): + # Setting the freq for one TimedeltaIndex shouldn't alter the freq + # for another that views the same data + + tdi = TimedeltaIndex(["0 days", "2 days", "4 days"], freq="2D") + tda = tdi._data + + tdi2 = TimedeltaIndex(tda)._with_freq(None) + assert tdi2.freq is None + + # Original was not altered + assert tdi.freq == "2D" + assert tda.freq == "2D" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_indexing.py new file mode 100644 index 00000000..31cc8e18 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_indexing.py @@ -0,0 +1,347 @@ +from datetime import datetime +import re + +import numpy as np +import pytest + +from pandas import ( + Index, + NaT, + Timedelta, + TimedeltaIndex, + Timestamp, + notna, + offsets, + timedelta_range, + to_timedelta, +) +import pandas._testing as tm + + +class TestGetItem: + def test_getitem_slice_keeps_name(self): + # GH#4226 + tdi = timedelta_range("1d", "5d", freq="H", name="timebucket") + assert tdi[1:].name == tdi.name + + def test_getitem(self): + idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx") + + for idx in [idx1]: + result = idx[0] + assert result == Timedelta("1 day") + + result = idx[0:5] + expected = timedelta_range("1 day", "5 day", freq="D", name="idx") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[0:10:2] + expected = timedelta_range("1 day", "9 day", freq="2D", name="idx") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[-20:-5:3] + expected = timedelta_range("12 day", "24 day", freq="3D", name="idx") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[4::-1] + expected = TimedeltaIndex( + ["5 day", "4 day", "3 day", "2 day", "1 day"], freq="-1D", name="idx" + ) + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + @pytest.mark.parametrize( + "key", + [ + Timestamp("1970-01-01"), + Timestamp("1970-01-02"), + datetime(1970, 1, 1), + Timestamp("1970-01-03").to_datetime64(), + # non-matching NA values + np.datetime64("NaT"), + ], + ) + def test_timestamp_invalid_key(self, key): + # GH#20464 + tdi = timedelta_range(0, periods=10) + with pytest.raises(KeyError, match=re.escape(repr(key))): + tdi.get_loc(key) + + +class TestGetLoc: + def test_get_loc_key_unit_mismatch(self): + idx = to_timedelta(["0 days", "1 days", "2 days"]) + key = idx[1].as_unit("ms") + loc = idx.get_loc(key) + assert loc == 1 + + def test_get_loc_key_unit_mismatch_not_castable(self): + tdi = to_timedelta(["0 days", "1 days", "2 days"]).astype("m8[s]") + assert tdi.dtype == "m8[s]" + key = tdi[0].as_unit("ns") + Timedelta(1) + + with pytest.raises(KeyError, match=r"Timedelta\('0 days 00:00:00.000000001'\)"): + tdi.get_loc(key) + + assert key not in tdi + + def test_get_loc(self): + idx = to_timedelta(["0 days", "1 days", "2 days"]) + + # GH 16909 + assert idx.get_loc(idx[1].to_timedelta64()) == 1 + + # GH 16896 + assert idx.get_loc("0 days") == 0 + + def test_get_loc_nat(self): + tidx = TimedeltaIndex(["1 days 01:00:00", "NaT", "2 days 01:00:00"]) + + assert tidx.get_loc(NaT) == 1 + assert tidx.get_loc(None) == 1 + assert tidx.get_loc(float("nan")) == 1 + assert tidx.get_loc(np.nan) == 1 + + +class TestGetIndexer: + def test_get_indexer(self): + idx = to_timedelta(["0 days", "1 days", "2 days"]) + tm.assert_numpy_array_equal( + idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp) + ) + + target = to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"]) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp) + ) + + res = idx.get_indexer(target, "nearest", tolerance=Timedelta("1 hour")) + tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp)) + + +class TestWhere: + def test_where_doesnt_retain_freq(self): + tdi = timedelta_range("1 day", periods=3, freq="D", name="idx") + cond = [True, True, False] + expected = TimedeltaIndex([tdi[0], tdi[1], tdi[0]], freq=None, name="idx") + + result = tdi.where(cond, tdi[::-1]) + tm.assert_index_equal(result, expected) + + def test_where_invalid_dtypes(self, fixed_now_ts): + tdi = timedelta_range("1 day", periods=3, freq="D", name="idx") + + tail = tdi[2:].tolist() + i2 = Index([NaT, NaT] + tail) + mask = notna(i2) + + expected = Index([NaT._value, NaT._value] + tail, dtype=object, name="idx") + assert isinstance(expected[0], int) + result = tdi.where(mask, i2.asi8) + tm.assert_index_equal(result, expected) + + ts = i2 + fixed_now_ts + expected = Index([ts[0], ts[1]] + tail, dtype=object, name="idx") + result = tdi.where(mask, ts) + tm.assert_index_equal(result, expected) + + per = (i2 + fixed_now_ts).to_period("D") + expected = Index([per[0], per[1]] + tail, dtype=object, name="idx") + result = tdi.where(mask, per) + tm.assert_index_equal(result, expected) + + ts = fixed_now_ts + expected = Index([ts, ts] + tail, dtype=object, name="idx") + result = tdi.where(mask, ts) + tm.assert_index_equal(result, expected) + + def test_where_mismatched_nat(self): + tdi = timedelta_range("1 day", periods=3, freq="D", name="idx") + cond = np.array([True, False, False]) + + dtnat = np.datetime64("NaT", "ns") + expected = Index([tdi[0], dtnat, dtnat], dtype=object, name="idx") + assert expected[2] is dtnat + result = tdi.where(cond, dtnat) + tm.assert_index_equal(result, expected) + + +class TestTake: + def test_take(self): + # GH 10295 + idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx") + + for idx in [idx1]: + result = idx.take([0]) + assert result == Timedelta("1 day") + + result = idx.take([-1]) + assert result == Timedelta("31 day") + + result = idx.take([0, 1, 2]) + expected = timedelta_range("1 day", "3 day", freq="D", name="idx") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([0, 2, 4]) + expected = timedelta_range("1 day", "5 day", freq="2D", name="idx") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([7, 4, 1]) + expected = timedelta_range("8 day", "2 day", freq="-3D", name="idx") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([3, 2, 5]) + expected = TimedeltaIndex(["4 day", "3 day", "6 day"], name="idx") + tm.assert_index_equal(result, expected) + assert result.freq is None + + result = idx.take([-3, 2, 5]) + expected = TimedeltaIndex(["29 day", "3 day", "6 day"], name="idx") + tm.assert_index_equal(result, expected) + assert result.freq is None + + def test_take_invalid_kwargs(self): + idx = timedelta_range("1 day", "31 day", freq="D", name="idx") + indices = [1, 6, 5, 9, 10, 13, 15, 3] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + def test_take_equiv_getitem(self): + tds = ["1day 02:00:00", "1 day 04:00:00", "1 day 10:00:00"] + idx = timedelta_range(start="1d", end="2d", freq="H", name="idx") + expected = TimedeltaIndex(tds, freq=None, name="idx") + + taken1 = idx.take([2, 4, 10]) + taken2 = idx[[2, 4, 10]] + + for taken in [taken1, taken2]: + tm.assert_index_equal(taken, expected) + assert isinstance(taken, TimedeltaIndex) + assert taken.freq is None + assert taken.name == expected.name + + def test_take_fill_value(self): + # GH 12631 + idx = TimedeltaIndex(["1 days", "2 days", "3 days"], name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx") + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = TimedeltaIndex(["2 days", "1 days", "NaT"], name="xxx") + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx") + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + +class TestMaybeCastSliceBound: + @pytest.fixture(params=["increasing", "decreasing", None]) + def monotonic(self, request): + return request.param + + @pytest.fixture + def tdi(self, monotonic): + tdi = timedelta_range("1 Day", periods=10) + if monotonic == "decreasing": + tdi = tdi[::-1] + elif monotonic is None: + taker = np.arange(10, dtype=np.intp) + np.random.default_rng(2).shuffle(taker) + tdi = tdi.take(taker) + return tdi + + def test_maybe_cast_slice_bound_invalid_str(self, tdi): + # test the low-level _maybe_cast_slice_bound and that we get the + # expected exception+message all the way up the stack + msg = ( + "cannot do slice indexing on TimedeltaIndex with these " + r"indexers \[foo\] of type str" + ) + with pytest.raises(TypeError, match=msg): + tdi._maybe_cast_slice_bound("foo", side="left") + with pytest.raises(TypeError, match=msg): + tdi.get_slice_bound("foo", side="left") + with pytest.raises(TypeError, match=msg): + tdi.slice_locs("foo", None, None) + + def test_slice_invalid_str_with_timedeltaindex( + self, tdi, frame_or_series, indexer_sl + ): + obj = frame_or_series(range(10), index=tdi) + + msg = ( + "cannot do slice indexing on TimedeltaIndex with these " + r"indexers \[foo\] of type str" + ) + with pytest.raises(TypeError, match=msg): + indexer_sl(obj)["foo":] + with pytest.raises(TypeError, match=msg): + indexer_sl(obj)["foo":-1] + with pytest.raises(TypeError, match=msg): + indexer_sl(obj)[:"foo"] + with pytest.raises(TypeError, match=msg): + indexer_sl(obj)[tdi[0] : "foo"] + + +class TestContains: + def test_contains_nonunique(self): + # GH#9512 + for vals in ( + [0, 1, 0], + [0, 0, -1], + [0, -1, -1], + ["00:01:00", "00:01:00", "00:02:00"], + ["00:01:00", "00:01:00", "00:00:01"], + ): + idx = TimedeltaIndex(vals) + assert idx[0] in idx + + def test_contains(self): + # Checking for any NaT-like objects + # GH#13603 + td = to_timedelta(range(5), unit="d") + offsets.Hour(1) + for v in [NaT, None, float("nan"), np.nan]: + assert v not in td + + td = to_timedelta([NaT]) + for v in [NaT, None, float("nan"), np.nan]: + assert v in td diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_join.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_join.py new file mode 100644 index 00000000..f3b12aa2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_join.py @@ -0,0 +1,55 @@ +import numpy as np + +from pandas import ( + Index, + Timedelta, + timedelta_range, +) +import pandas._testing as tm + + +class TestJoin: + def test_append_join_nondatetimeindex(self): + rng = timedelta_range("1 days", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.append(idx) + assert isinstance(result[0], Timedelta) + + # it works + rng.join(idx, how="outer") + + def test_join_self(self, join_type): + index = timedelta_range("1 day", periods=10) + joined = index.join(index, how=join_type) + tm.assert_index_equal(index, joined) + + def test_does_not_convert_mixed_integer(self): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args, **kwargs: np.random.default_rng( + 2 + ).standard_normal(), + r_idx_type="i", + c_idx_type="td", + ) + str(df) + + cols = df.columns.join(df.index, how="outer") + joined = cols.join(df.columns) + assert cols.dtype == np.dtype("O") + assert cols.dtype == joined.dtype + tm.assert_index_equal(cols, joined) + + def test_join_preserves_freq(self): + # GH#32157 + tdi = timedelta_range("1 day", periods=10) + result = tdi[:5].join(tdi[5:], how="outer") + assert result.freq == tdi.freq + tm.assert_index_equal(result, tdi) + + result = tdi[:5].join(tdi[6:], how="outer") + assert result.freq is None + expected = tdi.delete(5) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_ops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_ops.py new file mode 100644 index 00000000..f6013baf --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_ops.py @@ -0,0 +1,14 @@ +from pandas import ( + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestTimedeltaIndexOps: + def test_infer_freq(self, freq_sample): + # GH#11018 + idx = timedelta_range("1", freq=freq_sample, periods=10) + result = TimedeltaIndex(idx.asi8, freq="infer") + tm.assert_index_equal(idx, result) + assert result.freq == freq_sample diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_pickle.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_pickle.py new file mode 100644 index 00000000..befe7097 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_pickle.py @@ -0,0 +1,11 @@ +from pandas import timedelta_range +import pandas._testing as tm + + +class TestPickle: + def test_pickle_after_set_freq(self): + tdi = timedelta_range("1 day", periods=4, freq="s") + tdi = tdi._with_freq(None) + + res = tm.round_trip_pickle(tdi) + tm.assert_index_equal(res, tdi) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_scalar_compat.py new file mode 100644 index 00000000..9f470b40 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_scalar_compat.py @@ -0,0 +1,142 @@ +""" +Tests for TimedeltaIndex methods behaving like their Timedelta counterparts +""" + +import numpy as np +import pytest + +from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG + +from pandas import ( + Index, + Series, + Timedelta, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestVectorizedTimedelta: + def test_tdi_total_seconds(self): + # GH#10939 + # test index + rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s") + expt = [ + 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9, + 1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456.0 / 1e9, + ] + tm.assert_almost_equal(rng.total_seconds(), Index(expt)) + + # test Series + ser = Series(rng) + s_expt = Series(expt, index=[0, 1]) + tm.assert_series_equal(ser.dt.total_seconds(), s_expt) + + # with nat + ser[1] = np.nan + s_expt = Series( + [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9, np.nan], + index=[0, 1], + ) + tm.assert_series_equal(ser.dt.total_seconds(), s_expt) + + def test_tdi_total_seconds_all_nat(self): + # with both nat + ser = Series([np.nan, np.nan], dtype="timedelta64[ns]") + result = ser.dt.total_seconds() + expected = Series([np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + def test_tdi_round(self): + td = timedelta_range(start="16801 days", periods=5, freq="30Min") + elt = td[1] + + expected_rng = TimedeltaIndex( + [ + Timedelta("16801 days 00:00:00"), + Timedelta("16801 days 00:00:00"), + Timedelta("16801 days 01:00:00"), + Timedelta("16801 days 02:00:00"), + Timedelta("16801 days 02:00:00"), + ] + ) + expected_elt = expected_rng[1] + + tm.assert_index_equal(td.round(freq="H"), expected_rng) + assert elt.round(freq="H") == expected_elt + + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + td.round(freq="foo") + with pytest.raises(ValueError, match=msg): + elt.round(freq="foo") + + msg = " is a non-fixed frequency" + with pytest.raises(ValueError, match=msg): + td.round(freq="M") + with pytest.raises(ValueError, match=msg): + elt.round(freq="M") + + @pytest.mark.parametrize( + "freq,msg", + [ + ("Y", " is a non-fixed frequency"), + ("M", " is a non-fixed frequency"), + ("foobar", "Invalid frequency: foobar"), + ], + ) + def test_tdi_round_invalid(self, freq, msg): + t1 = timedelta_range("1 days", periods=3, freq="1 min 2 s 3 us") + + with pytest.raises(ValueError, match=msg): + t1.round(freq) + with pytest.raises(ValueError, match=msg): + # Same test for TimedeltaArray + t1._data.round(freq) + + # TODO: de-duplicate with test_tdi_round + def test_round(self): + t1 = timedelta_range("1 days", periods=3, freq="1 min 2 s 3 us") + t2 = -1 * t1 + t1a = timedelta_range("1 days", periods=3, freq="1 min 2 s") + t1c = TimedeltaIndex([1, 1, 1], unit="D") + + # note that negative times round DOWN! so don't give whole numbers + for freq, s1, s2 in [ + ("N", t1, t2), + ("U", t1, t2), + ( + "L", + t1a, + TimedeltaIndex( + ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"] + ), + ), + ( + "S", + t1a, + TimedeltaIndex( + ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"] + ), + ), + ("12T", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])), + ("H", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])), + ("d", t1c, TimedeltaIndex([-1, -1, -1], unit="D")), + ]: + r1 = t1.round(freq) + tm.assert_index_equal(r1, s1) + r2 = t2.round(freq) + tm.assert_index_equal(r2, s2) + + def test_components(self): + rng = timedelta_range("1 days, 10:11:12", periods=2, freq="s") + rng.components + + # with nat + s = Series(rng) + s[1] = np.nan + + result = s.dt.components + assert not result.iloc[0].isna().all() + assert result.iloc[1].isna().all() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py new file mode 100644 index 00000000..710571ef --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py @@ -0,0 +1,28 @@ +import numpy as np +import pytest + +from pandas import ( + TimedeltaIndex, + Timestamp, +) +import pandas._testing as tm + + +class TestSearchSorted: + def test_searchsorted_different_argument_classes(self, listlike_box): + idx = TimedeltaIndex(["1 day", "2 days", "3 days"]) + result = idx.searchsorted(listlike_box(idx)) + expected = np.arange(len(idx), dtype=result.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = idx._data.searchsorted(listlike_box(idx)) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] + ) + def test_searchsorted_invalid_argument_dtype(self, arg): + idx = TimedeltaIndex(["1 day", "2 days", "3 days"]) + msg = "value should be a 'Timedelta', 'NaT', or array of those. Got" + with pytest.raises(TypeError, match=msg): + idx.searchsorted(arg) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_setops.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_setops.py new file mode 100644 index 00000000..cb6dce1e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_setops.py @@ -0,0 +1,252 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + +from pandas.tseries.offsets import Hour + + +class TestTimedeltaIndex: + def test_union(self): + i1 = timedelta_range("1day", periods=5) + i2 = timedelta_range("3day", periods=5) + result = i1.union(i2) + expected = timedelta_range("1day", periods=7) + tm.assert_index_equal(result, expected) + + i1 = Index(np.arange(0, 20, 2, dtype=np.int64)) + i2 = timedelta_range(start="1 day", periods=10, freq="D") + i1.union(i2) # Works + i2.union(i1) # Fails with "AttributeError: can't set attribute" + + def test_union_sort_false(self): + tdi = timedelta_range("1day", periods=5) + + left = tdi[3:] + right = tdi[:3] + + # Check that we are testing the desired code path + assert left._can_fast_union(right) + + result = left.union(right) + tm.assert_index_equal(result, tdi) + + result = left.union(right, sort=False) + expected = TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"]) + tm.assert_index_equal(result, expected) + + def test_union_coverage(self): + idx = TimedeltaIndex(["3d", "1d", "2d"]) + ordered = TimedeltaIndex(idx.sort_values(), freq="infer") + result = ordered.union(idx) + tm.assert_index_equal(result, ordered) + + result = ordered[:0].union(ordered) + tm.assert_index_equal(result, ordered) + assert result.freq == ordered.freq + + def test_union_bug_1730(self): + rng_a = timedelta_range("1 day", periods=4, freq="3H") + rng_b = timedelta_range("1 day", periods=4, freq="4H") + + result = rng_a.union(rng_b) + exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b))) + tm.assert_index_equal(result, exp) + + def test_union_bug_1745(self): + left = TimedeltaIndex(["1 day 15:19:49.695000"]) + right = TimedeltaIndex( + ["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"] + ) + + result = left.union(right) + exp = TimedeltaIndex(sorted(set(left) | set(right))) + tm.assert_index_equal(result, exp) + + def test_union_bug_4564(self): + left = timedelta_range("1 day", "30d") + right = left + pd.offsets.Minute(15) + + result = left.union(right) + exp = TimedeltaIndex(sorted(set(left) | set(right))) + tm.assert_index_equal(result, exp) + + def test_union_freq_infer(self): + # When taking the union of two TimedeltaIndexes, we infer + # a freq even if the arguments don't have freq. This matches + # DatetimeIndex behavior. + tdi = timedelta_range("1 Day", periods=5) + left = tdi[[0, 1, 3, 4]] + right = tdi[[2, 3, 1]] + + assert left.freq is None + assert right.freq is None + + result = left.union(right) + tm.assert_index_equal(result, tdi) + assert result.freq == "D" + + def test_intersection_bug_1708(self): + index_1 = timedelta_range("1 day", periods=4, freq="h") + index_2 = index_1 + pd.offsets.Hour(5) + + result = index_1.intersection(index_2) + assert len(result) == 0 + + index_1 = timedelta_range("1 day", periods=4, freq="h") + index_2 = index_1 + pd.offsets.Hour(1) + + result = index_1.intersection(index_2) + expected = timedelta_range("1 day 01:00:00", periods=3, freq="h") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_intersection_equal(self, sort): + # GH 24471 Test intersection outcome given the sort keyword + # for equal indices intersection should return the original index + first = timedelta_range("1 day", periods=4, freq="h") + second = timedelta_range("1 day", periods=4, freq="h") + intersect = first.intersection(second, sort=sort) + if sort is None: + tm.assert_index_equal(intersect, second.sort_values()) + assert tm.equalContents(intersect, second) + + # Corner cases + inter = first.intersection(first, sort=sort) + assert inter is first + + @pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)]) + def test_intersection_zero_length(self, period_1, period_2, sort): + # GH 24471 test for non overlap the intersection should be zero length + index_1 = timedelta_range("1 day", periods=period_1, freq="h") + index_2 = timedelta_range("1 day", periods=period_2, freq="h") + expected = timedelta_range("1 day", periods=0, freq="h") + result = index_1.intersection(index_2, sort=sort) + tm.assert_index_equal(result, expected) + + def test_zero_length_input_index(self, sort): + # GH 24966 test for 0-len intersections are copied + index_1 = timedelta_range("1 day", periods=0, freq="h") + index_2 = timedelta_range("1 day", periods=3, freq="h") + result = index_1.intersection(index_2, sort=sort) + assert index_1 is not result + assert index_2 is not result + tm.assert_copy(result, index_1) + + @pytest.mark.parametrize( + "rng, expected", + # if target has the same name, it is preserved + [ + ( + timedelta_range("1 day", periods=5, freq="h", name="idx"), + timedelta_range("1 day", periods=4, freq="h", name="idx"), + ), + # if target name is different, it will be reset + ( + timedelta_range("1 day", periods=5, freq="h", name="other"), + timedelta_range("1 day", periods=4, freq="h", name=None), + ), + # if no overlap exists return empty index + ( + timedelta_range("1 day", periods=10, freq="h", name="idx")[5:], + TimedeltaIndex([], freq="h", name="idx"), + ), + ], + ) + def test_intersection(self, rng, expected, sort): + # GH 4690 (with tz) + base = timedelta_range("1 day", periods=4, freq="h", name="idx") + result = base.intersection(rng, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + @pytest.mark.parametrize( + "rng, expected", + # part intersection works + [ + ( + TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"), + TimedeltaIndex(["2 hour", "4 hour"], name="idx"), + ), + # reordered part intersection + ( + TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"), + TimedeltaIndex(["1 hour", "2 hour"], name=None), + ), + # reversed index + ( + TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[ + ::-1 + ], + TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"), + ), + ], + ) + def test_intersection_non_monotonic(self, rng, expected, sort): + # 24471 non-monotonic + base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx") + result = base.intersection(rng, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + # if reversed order, frequency is still the same + if all(base == rng[::-1]) and sort is None: + assert isinstance(result.freq, Hour) + else: + assert result.freq is None + + +class TestTimedeltaIndexDifference: + def test_difference_freq(self, sort): + # GH14323: Difference of TimedeltaIndex should not preserve frequency + + index = timedelta_range("0 days", "5 days", freq="D") + + other = timedelta_range("1 days", "4 days", freq="D") + expected = TimedeltaIndex(["0 days", "5 days"], freq=None) + idx_diff = index.difference(other, sort) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = timedelta_range("2 days", "5 days", freq="D") + idx_diff = index.difference(other, sort) + expected = TimedeltaIndex(["0 days", "1 days"], freq=None) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + def test_difference_sort(self, sort): + index = TimedeltaIndex( + ["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"] + ) + + other = timedelta_range("1 days", "4 days", freq="D") + idx_diff = index.difference(other, sort) + + expected = TimedeltaIndex(["5 days", "0 days"], freq=None) + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = timedelta_range("2 days", "5 days", freq="D") + idx_diff = index.difference(other, sort) + expected = TimedeltaIndex(["1 days", "0 days"], freq=None) + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta.py new file mode 100644 index 00000000..6af43829 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -0,0 +1,154 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas import ( + Index, + NaT, + Series, + Timedelta, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import TimedeltaArray + + +class TestTimedeltaIndex: + @pytest.fixture + def index(self): + return tm.makeTimedeltaIndex(10) + + def test_misc_coverage(self): + rng = timedelta_range("1 day", periods=5) + result = rng.groupby(rng.days) + assert isinstance(next(iter(result.values()))[0], Timedelta) + + def test_map(self): + # test_map_dictlike generally tests + + rng = timedelta_range("1 day", periods=10) + + f = lambda x: x.days + result = rng.map(f) + exp = Index([f(x) for x in rng], dtype=np.int64) + tm.assert_index_equal(result, exp) + + def test_pass_TimedeltaIndex_to_index(self): + rng = timedelta_range("1 days", "10 days") + idx = Index(rng, dtype=object) + + expected = Index(rng.to_pytimedelta(), dtype=object) + + tm.assert_numpy_array_equal(idx.values, expected.values) + + def test_fields(self): + rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s") + tm.assert_index_equal(rng.days, Index([1, 1], dtype=np.int64)) + tm.assert_index_equal( + rng.seconds, + Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype=np.int32), + ) + tm.assert_index_equal( + rng.microseconds, + Index([100 * 1000 + 123, 100 * 1000 + 123], dtype=np.int32), + ) + tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype=np.int32)) + + msg = "'TimedeltaIndex' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format("hours")): + rng.hours + with pytest.raises(AttributeError, match=msg.format("minutes")): + rng.minutes + with pytest.raises(AttributeError, match=msg.format("milliseconds")): + rng.milliseconds + + # with nat + s = Series(rng) + s[1] = np.nan + + tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1])) + tm.assert_series_equal( + s.dt.seconds, Series([10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]) + ) + + # preserve name (GH15589) + rng.name = "name" + assert rng.days.name == "name" + + def test_freq_conversion_always_floating(self): + # pre-2.0 td64 astype converted to float64. now for supported units + # (s, ms, us, ns) this converts to the requested dtype. + # This matches TDA and Series + tdi = timedelta_range("1 Day", periods=30) + + res = tdi.astype("m8[s]") + exp_values = np.asarray(tdi).astype("m8[s]") + exp_tda = TimedeltaArray._simple_new( + exp_values, dtype=exp_values.dtype, freq=tdi.freq + ) + expected = Index(exp_tda) + assert expected.dtype == "m8[s]" + tm.assert_index_equal(res, expected) + + # check this matches Series and TimedeltaArray + res = tdi._data.astype("m8[s]") + tm.assert_equal(res, expected._values) + + res = tdi.to_series().astype("m8[s]") + tm.assert_equal(res._values, expected._values._with_freq(None)) + + def test_freq_conversion(self, index_or_series): + # doc example + + scalar = Timedelta(days=31) + td = index_or_series( + [scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT], + dtype="m8[ns]", + ) + + result = td / np.timedelta64(1, "D") + expected = index_or_series( + [31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan] + ) + tm.assert_equal(result, expected) + + # We don't support "D" reso, so we use the pre-2.0 behavior + # casting to float64 + msg = ( + r"Cannot convert from timedelta64\[ns\] to timedelta64\[D\]. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + with pytest.raises(ValueError, match=msg): + td.astype("timedelta64[D]") + + result = td / np.timedelta64(1, "s") + expected = index_or_series( + [31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan] + ) + tm.assert_equal(result, expected) + + exp_values = np.asarray(td).astype("m8[s]") + exp_tda = TimedeltaArray._simple_new(exp_values, dtype=exp_values.dtype) + expected = index_or_series(exp_tda) + assert expected.dtype == "m8[s]" + result = td.astype("timedelta64[s]") + tm.assert_equal(result, expected) + + def test_arithmetic_zero_freq(self): + # GH#51575 don't get a .freq with freq.n = 0 + tdi = timedelta_range(0, periods=100, freq="ns") + result = tdi / 2 + assert result.freq is None + expected = tdi[:50].repeat(2) + tm.assert_index_equal(result, expected) + + result2 = tdi // 2 + assert result2.freq is None + expected2 = expected + tm.assert_index_equal(result2, expected2) + + result3 = tdi * 0 + assert result3.freq is None + expected3 = tdi[:1].repeat(100) + tm.assert_index_equal(result3, expected3) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py new file mode 100644 index 00000000..72bdc6da --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py @@ -0,0 +1,114 @@ +import numpy as np +import pytest + +from pandas import ( + Timedelta, + timedelta_range, + to_timedelta, +) +import pandas._testing as tm + +from pandas.tseries.offsets import ( + Day, + Second, +) + + +class TestTimedeltas: + def test_timedelta_range_unit(self): + # GH#49824 + tdi = timedelta_range("0 Days", periods=10, freq="100000D", unit="s") + exp_arr = (np.arange(10, dtype="i8") * 100_000).view("m8[D]").astype("m8[s]") + tm.assert_numpy_array_equal(tdi.to_numpy(), exp_arr) + + def test_timedelta_range(self): + expected = to_timedelta(np.arange(5), unit="D") + result = timedelta_range("0 days", periods=5, freq="D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta(np.arange(11), unit="D") + result = timedelta_range("0 days", "10 days", freq="D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta(np.arange(5), unit="D") + Second(2) + Day() + result = timedelta_range("1 days, 00:00:02", "5 days, 00:00:02", freq="D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta([1, 3, 5, 7, 9], unit="D") + Second(2) + result = timedelta_range("1 days, 00:00:02", periods=5, freq="2D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta(np.arange(50), unit="min") * 30 + result = timedelta_range("0 days", freq="30min", periods=50) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "depr_unit, unit", + [ + ("T", "minute"), + ("t", "minute"), + ("L", "millisecond"), + ("l", "millisecond"), + ], + ) + def test_timedelta_units_T_L_deprecated(self, depr_unit, unit): + depr_msg = f"Unit '{depr_unit}' is deprecated." + + expected = to_timedelta(np.arange(5), unit=unit) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = to_timedelta(np.arange(5), unit=depr_unit) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "periods, freq", [(3, "2D"), (5, "D"), (6, "19H12T"), (7, "16H"), (9, "12H")] + ) + def test_linspace_behavior(self, periods, freq): + # GH 20976 + result = timedelta_range(start="0 days", end="4 days", periods=periods) + expected = timedelta_range(start="0 days", end="4 days", freq=freq) + tm.assert_index_equal(result, expected) + + def test_errors(self): + # not enough params + msg = ( + "Of the four parameters: start, end, periods, and freq, " + "exactly three must be specified" + ) + with pytest.raises(ValueError, match=msg): + timedelta_range(start="0 days") + + with pytest.raises(ValueError, match=msg): + timedelta_range(end="5 days") + + with pytest.raises(ValueError, match=msg): + timedelta_range(periods=2) + + with pytest.raises(ValueError, match=msg): + timedelta_range() + + # too many params + with pytest.raises(ValueError, match=msg): + timedelta_range(start="0 days", end="5 days", periods=10, freq="H") + + @pytest.mark.parametrize( + "start, end, freq, expected_periods", + [ + ("1D", "10D", "2D", (10 - 1) // 2 + 1), + ("2D", "30D", "3D", (30 - 2) // 3 + 1), + ("2s", "50s", "5s", (50 - 2) // 5 + 1), + # tests that worked before GH 33498: + ("4D", "16D", "3D", (16 - 4) // 3 + 1), + ("8D", "16D", "40s", (16 * 3600 * 24 - 8 * 3600 * 24) // 40 + 1), + ], + ) + def test_timedelta_range_freq_divide_end(self, start, end, freq, expected_periods): + # GH 33498 only the cases where `(end % freq) == 0` used to fail + res = timedelta_range(start=start, end=end, freq=freq) + assert Timedelta(start) == res[0] + assert Timedelta(end) >= res[-1] + assert len(res) == expected_periods + + def test_timedelta_range_infer_freq(self): + # https://github.com/pandas-dev/pandas/issues/35897 + result = timedelta_range("0s", "1s", periods=31) + assert result.freq is None diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/common.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/common.py new file mode 100644 index 00000000..2af76f69 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/common.py @@ -0,0 +1,40 @@ +""" common utilities """ +from __future__ import annotations + +from typing import ( + Any, + Literal, +) + + +def _mklbl(prefix: str, n: int): + return [f"{prefix}{i}" for i in range(n)] + + +def check_indexing_smoketest_or_raises( + obj, + method: Literal["iloc", "loc"], + key: Any, + axes: Literal[0, 1] | None = None, + fails=None, +) -> None: + if axes is None: + axes_list = [0, 1] + else: + assert axes in [0, 1] + axes_list = [axes] + + for ax in axes_list: + if ax < obj.ndim: + # create a tuple accessor + new_axes = [slice(None)] * obj.ndim + new_axes[ax] = key + axified = tuple(new_axes) + try: + getattr(obj, method).__getitem__(axified) + except (IndexError, TypeError, KeyError) as detail: + # if we are in fails, the ok, otherwise raise it + if fails is not None: + if isinstance(detail, fails): + return + raise diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/conftest.py new file mode 100644 index 00000000..4184c6a0 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/conftest.py @@ -0,0 +1,127 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + date_range, +) + + +@pytest.fixture +def series_ints(): + return Series(np.random.default_rng(2).random(4), index=np.arange(0, 8, 2)) + + +@pytest.fixture +def frame_ints(): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=np.arange(0, 8, 2), + columns=np.arange(0, 12, 3), + ) + + +@pytest.fixture +def series_uints(): + return Series( + np.random.default_rng(2).random(4), + index=Index(np.arange(0, 8, 2, dtype=np.uint64)), + ) + + +@pytest.fixture +def frame_uints(): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=Index(range(0, 8, 2), dtype=np.uint64), + columns=Index(range(0, 12, 3), dtype=np.uint64), + ) + + +@pytest.fixture +def series_labels(): + return Series(np.random.default_rng(2).standard_normal(4), index=list("abcd")) + + +@pytest.fixture +def frame_labels(): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=list("abcd"), + columns=list("ABCD"), + ) + + +@pytest.fixture +def series_ts(): + return Series( + np.random.default_rng(2).standard_normal(4), + index=date_range("20130101", periods=4), + ) + + +@pytest.fixture +def frame_ts(): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=date_range("20130101", periods=4), + ) + + +@pytest.fixture +def series_floats(): + return Series( + np.random.default_rng(2).random(4), + index=Index(range(0, 8, 2), dtype=np.float64), + ) + + +@pytest.fixture +def frame_floats(): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=Index(range(0, 8, 2), dtype=np.float64), + columns=Index(range(0, 12, 3), dtype=np.float64), + ) + + +@pytest.fixture +def series_mixed(): + return Series(np.random.default_rng(2).standard_normal(4), index=[2, 4, "null", 8]) + + +@pytest.fixture +def frame_mixed(): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), index=[2, 4, "null", 8] + ) + + +@pytest.fixture +def frame_empty(): + return DataFrame() + + +@pytest.fixture +def series_empty(): + return Series(dtype=object) + + +@pytest.fixture +def frame_multi(): + return DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=MultiIndex.from_product([[1, 2], [3, 4]]), + columns=MultiIndex.from_product([[5, 6], [7, 8]]), + ) + + +@pytest.fixture +def series_multi(): + return Series( + np.random.default_rng(2).random(4), + index=MultiIndex.from_product([[1, 2], [3, 4]]), + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval.py new file mode 100644 index 00000000..717cb7de --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval.py @@ -0,0 +1,174 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + IntervalIndex, + Series, +) +import pandas._testing as tm + + +class TestIntervalIndex: + @pytest.fixture + def series_with_interval_index(self): + return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) + + def test_getitem_with_scalar(self, series_with_interval_index, indexer_sl): + ser = series_with_interval_index.copy() + + expected = ser.iloc[:3] + tm.assert_series_equal(expected, indexer_sl(ser)[:3]) + tm.assert_series_equal(expected, indexer_sl(ser)[:2.5]) + tm.assert_series_equal(expected, indexer_sl(ser)[0.1:2.5]) + if indexer_sl is tm.loc: + tm.assert_series_equal(expected, ser.loc[-1:3]) + + expected = ser.iloc[1:4] + tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2.5, 3.5]]) + tm.assert_series_equal(expected, indexer_sl(ser)[[2, 3, 4]]) + tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 3, 4]]) + + expected = ser.iloc[2:5] + tm.assert_series_equal(expected, indexer_sl(ser)[ser >= 2]) + + @pytest.mark.parametrize("direction", ["increasing", "decreasing"]) + def test_getitem_nonoverlapping_monotonic(self, direction, closed, indexer_sl): + tpls = [(0, 1), (2, 3), (4, 5)] + if direction == "decreasing": + tpls = tpls[::-1] + + idx = IntervalIndex.from_tuples(tpls, closed=closed) + ser = Series(list("abc"), idx) + + for key, expected in zip(idx.left, ser): + if idx.closed_left: + assert indexer_sl(ser)[key] == expected + else: + with pytest.raises(KeyError, match=str(key)): + indexer_sl(ser)[key] + + for key, expected in zip(idx.right, ser): + if idx.closed_right: + assert indexer_sl(ser)[key] == expected + else: + with pytest.raises(KeyError, match=str(key)): + indexer_sl(ser)[key] + + for key, expected in zip(idx.mid, ser): + assert indexer_sl(ser)[key] == expected + + def test_getitem_non_matching(self, series_with_interval_index, indexer_sl): + ser = series_with_interval_index.copy() + + # this is a departure from our current + # indexing scheme, but simpler + with pytest.raises(KeyError, match=r"\[-1\] not in index"): + indexer_sl(ser)[[-1, 3, 4, 5]] + + with pytest.raises(KeyError, match=r"\[-1\] not in index"): + indexer_sl(ser)[[-1, 3]] + + @pytest.mark.slow + def test_loc_getitem_large_series(self): + ser = Series( + np.arange(1000000), index=IntervalIndex.from_breaks(np.arange(1000001)) + ) + + result1 = ser.loc[:80000] + result2 = ser.loc[0:80000] + result3 = ser.loc[0:80000:1] + tm.assert_series_equal(result1, result2) + tm.assert_series_equal(result1, result3) + + def test_loc_getitem_frame(self): + # CategoricalIndex with IntervalIndex categories + df = DataFrame({"A": range(10)}) + ser = pd.cut(df.A, 5) + df["B"] = ser + df = df.set_index("B") + + result = df.loc[4] + expected = df.iloc[4:6] + tm.assert_frame_equal(result, expected) + + with pytest.raises(KeyError, match="10"): + df.loc[10] + + # single list-like + result = df.loc[[4]] + expected = df.iloc[4:6] + tm.assert_frame_equal(result, expected) + + # non-unique + result = df.loc[[4, 5]] + expected = df.take([4, 5, 4, 5]) + tm.assert_frame_equal(result, expected) + + with pytest.raises(KeyError, match=r"None of \[\[10\]\] are"): + df.loc[[10]] + + # partial missing + with pytest.raises(KeyError, match=r"\[10\] not in index"): + df.loc[[10, 4]] + + def test_getitem_interval_with_nans(self, frame_or_series, indexer_sl): + # GH#41831 + + index = IntervalIndex([np.nan, np.nan]) + key = index[:-1] + + obj = frame_or_series(range(2), index=index) + if frame_or_series is DataFrame and indexer_sl is tm.setitem: + obj = obj.T + + result = indexer_sl(obj)[key] + expected = obj + + tm.assert_equal(result, expected) + + +class TestIntervalIndexInsideMultiIndex: + def test_mi_intervalindex_slicing_with_scalar(self): + # GH#27456 + ii = IntervalIndex.from_arrays( + [0, 1, 10, 11, 0, 1, 10, 11], [1, 2, 11, 12, 1, 2, 11, 12], name="MP" + ) + idx = pd.MultiIndex.from_arrays( + [ + pd.Index(["FC", "FC", "FC", "FC", "OWNER", "OWNER", "OWNER", "OWNER"]), + pd.Index( + ["RID1", "RID1", "RID2", "RID2", "RID1", "RID1", "RID2", "RID2"] + ), + ii, + ] + ) + + idx.names = ["Item", "RID", "MP"] + df = DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8]}) + df.index = idx + + query_df = DataFrame( + { + "Item": ["FC", "OWNER", "FC", "OWNER", "OWNER"], + "RID": ["RID1", "RID1", "RID1", "RID2", "RID2"], + "MP": [0.2, 1.5, 1.6, 11.1, 10.9], + } + ) + + query_df = query_df.sort_index() + + idx = pd.MultiIndex.from_arrays([query_df.Item, query_df.RID, query_df.MP]) + query_df.index = idx + result = df.value.loc[query_df.index] + + # the IntervalIndex level is indexed with floats, which map to + # the intervals containing them. Matching the behavior we would get + # with _only_ an IntervalIndex, we get an IntervalIndex level back. + sliced_level = ii.take([0, 1, 1, 3, 2]) + expected_index = pd.MultiIndex.from_arrays( + [idx.get_level_values(0), idx.get_level_values(1), sliced_level] + ) + expected = Series([1, 6, 2, 8, 7], index=expected_index, name="value") + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval_new.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval_new.py new file mode 100644 index 00000000..62f44a36 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/interval/test_interval_new.py @@ -0,0 +1,229 @@ +import re + +import numpy as np +import pytest + +from pandas.compat import IS64 + +from pandas import ( + Index, + Interval, + IntervalIndex, + Series, +) +import pandas._testing as tm + + +class TestIntervalIndex: + @pytest.fixture + def series_with_interval_index(self): + return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) + + def test_loc_with_interval(self, series_with_interval_index, indexer_sl): + # loc with single label / list of labels: + # - Intervals: only exact matches + # - scalars: those that contain it + + ser = series_with_interval_index.copy() + + expected = 0 + result = indexer_sl(ser)[Interval(0, 1)] + assert result == expected + + expected = ser.iloc[3:5] + result = indexer_sl(ser)[[Interval(3, 4), Interval(4, 5)]] + tm.assert_series_equal(expected, result) + + # missing or not exact + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")): + indexer_sl(ser)[Interval(3, 5, closed="left")] + + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): + indexer_sl(ser)[Interval(3, 5)] + + with pytest.raises( + KeyError, match=re.escape("Interval(-2, 0, closed='right')") + ): + indexer_sl(ser)[Interval(-2, 0)] + + with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")): + indexer_sl(ser)[Interval(5, 6)] + + def test_loc_with_scalar(self, series_with_interval_index, indexer_sl): + # loc with single label / list of labels: + # - Intervals: only exact matches + # - scalars: those that contain it + + ser = series_with_interval_index.copy() + + assert indexer_sl(ser)[1] == 0 + assert indexer_sl(ser)[1.5] == 1 + assert indexer_sl(ser)[2] == 1 + + expected = ser.iloc[1:4] + tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2.5, 3.5]]) + tm.assert_series_equal(expected, indexer_sl(ser)[[2, 3, 4]]) + tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 3, 4]]) + + expected = ser.iloc[[1, 1, 2, 1]] + tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2, 2.5, 1.5]]) + + expected = ser.iloc[2:5] + tm.assert_series_equal(expected, indexer_sl(ser)[ser >= 2]) + + def test_loc_with_slices(self, series_with_interval_index, indexer_sl): + # loc with slices: + # - Interval objects: only works with exact matches + # - scalars: only works for non-overlapping, monotonic intervals, + # and start/stop select location based on the interval that + # contains them: + # (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop)) + + ser = series_with_interval_index.copy() + + # slice of interval + + expected = ser.iloc[:3] + result = indexer_sl(ser)[Interval(0, 1) : Interval(2, 3)] + tm.assert_series_equal(expected, result) + + expected = ser.iloc[3:] + result = indexer_sl(ser)[Interval(3, 4) :] + tm.assert_series_equal(expected, result) + + msg = "Interval objects are not currently supported" + with pytest.raises(NotImplementedError, match=msg): + indexer_sl(ser)[Interval(3, 6) :] + + with pytest.raises(NotImplementedError, match=msg): + indexer_sl(ser)[Interval(3, 4, closed="left") :] + + def test_slice_step_ne1(self, series_with_interval_index): + # GH#31658 slice of scalar with step != 1 + ser = series_with_interval_index.copy() + expected = ser.iloc[0:4:2] + + result = ser[0:4:2] + tm.assert_series_equal(result, expected) + + result2 = ser[0:4][::2] + tm.assert_series_equal(result2, expected) + + def test_slice_float_start_stop(self, series_with_interval_index): + # GH#31658 slicing with integers is positional, with floats is not + # supported + ser = series_with_interval_index.copy() + + msg = "label-based slicing with step!=1 is not supported for IntervalIndex" + with pytest.raises(ValueError, match=msg): + ser[1.5:9.5:2] + + def test_slice_interval_step(self, series_with_interval_index): + # GH#31658 allows for integer step!=1, not Interval step + ser = series_with_interval_index.copy() + msg = "label-based slicing with step!=1 is not supported for IntervalIndex" + with pytest.raises(ValueError, match=msg): + ser[0 : 4 : Interval(0, 1)] + + def test_loc_with_overlap(self, indexer_sl): + idx = IntervalIndex.from_tuples([(1, 5), (3, 7)]) + ser = Series(range(len(idx)), index=idx) + + # scalar + expected = ser + result = indexer_sl(ser)[4] + tm.assert_series_equal(expected, result) + + result = indexer_sl(ser)[[4]] + tm.assert_series_equal(expected, result) + + # interval + expected = 0 + result = indexer_sl(ser)[Interval(1, 5)] + result == expected + + expected = ser + result = indexer_sl(ser)[[Interval(1, 5), Interval(3, 7)]] + tm.assert_series_equal(expected, result) + + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): + indexer_sl(ser)[Interval(3, 5)] + + msg = r"None of \[\[Interval\(3, 5, closed='right'\)\]\]" + with pytest.raises(KeyError, match=msg): + indexer_sl(ser)[[Interval(3, 5)]] + + # slices with interval (only exact matches) + expected = ser + result = indexer_sl(ser)[Interval(1, 5) : Interval(3, 7)] + tm.assert_series_equal(expected, result) + + msg = ( + "'can only get slices from an IntervalIndex if bounds are " + "non-overlapping and all monotonic increasing or decreasing'" + ) + with pytest.raises(KeyError, match=msg): + indexer_sl(ser)[Interval(1, 6) : Interval(3, 8)] + + if indexer_sl is tm.loc: + # slices with scalar raise for overlapping intervals + # TODO KeyError is the appropriate error? + with pytest.raises(KeyError, match=msg): + ser.loc[1:4] + + def test_non_unique(self, indexer_sl): + idx = IntervalIndex.from_tuples([(1, 3), (3, 7)]) + ser = Series(range(len(idx)), index=idx) + + result = indexer_sl(ser)[Interval(1, 3)] + assert result == 0 + + result = indexer_sl(ser)[[Interval(1, 3)]] + expected = ser.iloc[0:1] + tm.assert_series_equal(expected, result) + + def test_non_unique_moar(self, indexer_sl): + idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)]) + ser = Series(range(len(idx)), index=idx) + + expected = ser.iloc[[0, 1]] + result = indexer_sl(ser)[Interval(1, 3)] + tm.assert_series_equal(expected, result) + + expected = ser + result = indexer_sl(ser)[Interval(1, 3) :] + tm.assert_series_equal(expected, result) + + expected = ser.iloc[[0, 1]] + result = indexer_sl(ser)[[Interval(1, 3)]] + tm.assert_series_equal(expected, result) + + def test_loc_getitem_missing_key_error_message( + self, frame_or_series, series_with_interval_index + ): + # GH#27365 + ser = series_with_interval_index.copy() + obj = frame_or_series(ser) + with pytest.raises(KeyError, match=r"\[6\]"): + obj.loc[[4, 5, 6]] + + +@pytest.mark.xfail(not IS64, reason="GH 23440") +@pytest.mark.parametrize( + "intervals", + [ + ([Interval(-np.inf, 0.0), Interval(0.0, 1.0)]), + ([Interval(-np.inf, -2.0), Interval(-2.0, -1.0)]), + ([Interval(-1.0, 0.0), Interval(0.0, np.inf)]), + ([Interval(1.0, 2.0), Interval(2.0, np.inf)]), + ], +) +def test_repeating_interval_index_with_infs(intervals): + # GH 46658 + + interval_index = Index(intervals * 51) + + expected = np.arange(1, 102, 2, dtype=np.intp) + result = interval_index.get_indexer_for([intervals[1]]) + + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_chaining_and_caching.py new file mode 100644 index 00000000..7adc6976 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_chaining_and_caching.py @@ -0,0 +1,81 @@ +import numpy as np +import pytest + +from pandas.errors import SettingWithCopyError +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm + + +def test_detect_chained_assignment(using_copy_on_write): + # Inplace ops, originally from: + # https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug + a = [12, 23] + b = [123, None] + c = [1234, 2345] + d = [12345, 23456] + tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")] + events = { + ("eyes", "left"): a, + ("eyes", "right"): b, + ("ears", "left"): c, + ("ears", "right"): d, + } + multiind = MultiIndex.from_tuples(tuples, names=["part", "side"]) + zed = DataFrame(events, index=["a", "b"], columns=multiind) + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + zed["eyes"]["right"].fillna(value=555, inplace=True) + else: + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(SettingWithCopyError, match=msg): + zed["eyes"]["right"].fillna(value=555, inplace=True) + + +@td.skip_array_manager_invalid_test # with ArrayManager df.loc[0] is not a view +def test_cache_updating(using_copy_on_write): + # 5216 + # make sure that we don't try to set a dead cache + a = np.random.default_rng(2).random((10, 3)) + df = DataFrame(a, columns=["x", "y", "z"]) + df_original = df.copy() + tuples = [(i, j) for i in range(5) for j in range(2)] + index = MultiIndex.from_tuples(tuples) + df.index = index + + # setting via chained assignment + # but actually works, since everything is a view + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.loc[0]["z"].iloc[0] = 1.0 + assert df.loc[(0, 0), "z"] == df_original.loc[0, "z"] + else: + df.loc[0]["z"].iloc[0] = 1.0 + result = df.loc[(0, 0), "z"] + assert result == 1 + + # correct setting + df.loc[(0, 0), "z"] = 2 + result = df.loc[(0, 0), "z"] + assert result == 2 + + +@pytest.mark.slow +def test_indexer_caching(): + # GH5727 + # make sure that indexers are in the _internal_names_set + n = 1000001 + index = MultiIndex.from_arrays([np.arange(n), np.arange(n)]) + s = Series(np.zeros(n), index=index) + str(s) + + # setitem + expected = Series(np.ones(n), index=index) + s[s == 0] = 1 + tm.assert_series_equal(s, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_datetime.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_datetime.py new file mode 100644 index 00000000..d325971e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_datetime.py @@ -0,0 +1,50 @@ +from datetime import datetime + +import numpy as np + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Period, + Series, + period_range, + to_datetime, +) +import pandas._testing as tm + + +def test_multiindex_period_datetime(): + # GH4861, using datetime in period of multiindex raises exception + + idx1 = Index(["a", "a", "a", "b", "b"]) + idx2 = period_range("2012-01", periods=len(idx1), freq="M") + s = Series(np.random.default_rng(2).standard_normal(len(idx1)), [idx1, idx2]) + + # try Period as index + expected = s.iloc[0] + result = s.loc["a", Period("2012-01")] + assert result == expected + + # try datetime as index + result = s.loc["a", datetime(2012, 1, 1)] + assert result == expected + + +def test_multiindex_datetime_columns(): + # GH35015, using datetime as column indices raises exception + + mi = MultiIndex.from_tuples( + [(to_datetime("02/29/2020"), to_datetime("03/01/2020"))], names=["a", "b"] + ) + + df = DataFrame([], columns=mi) + + expected_df = DataFrame( + [], + columns=MultiIndex.from_arrays( + [[to_datetime("02/29/2020")], [to_datetime("03/01/2020")]], names=["a", "b"] + ), + ) + + tm.assert_frame_equal(df, expected_df) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_getitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_getitem.py new file mode 100644 index 00000000..e2fbc065 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_getitem.py @@ -0,0 +1,393 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.core.indexing import IndexingError + +# ---------------------------------------------------------------------------- +# test indexing of Series with multi-level Index +# ---------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "access_method", + [lambda s, x: s[:, x], lambda s, x: s.loc[:, x], lambda s, x: s.xs(x, level=1)], +) +@pytest.mark.parametrize( + "level1_value, expected", + [(0, Series([1], index=[0])), (1, Series([2, 3], index=[1, 2]))], +) +def test_series_getitem_multiindex(access_method, level1_value, expected): + # GH 6018 + # series regression getitem with a multi-index + + mi = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)], names=["A", "B"]) + ser = Series([1, 2, 3], index=mi) + expected.index.name = "A" + + result = access_method(ser, level1_value) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("level0_value", ["D", "A"]) +def test_series_getitem_duplicates_multiindex(level0_value): + # GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise + # the appropriate error, only in PY3 of course! + + index = MultiIndex( + levels=[[level0_value, "B", "C"], [0, 26, 27, 37, 57, 67, 75, 82]], + codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], + names=["tag", "day"], + ) + arr = np.random.default_rng(2).standard_normal((len(index), 1)) + df = DataFrame(arr, index=index, columns=["val"]) + + # confirm indexing on missing value raises KeyError + if level0_value != "A": + with pytest.raises(KeyError, match=r"^'A'$"): + df.val["A"] + + with pytest.raises(KeyError, match=r"^'X'$"): + df.val["X"] + + result = df.val[level0_value] + expected = Series( + arr.ravel()[0:3], name="val", index=Index([26, 37, 57], name="day") + ) + tm.assert_series_equal(result, expected) + + +def test_series_getitem(multiindex_year_month_day_dataframe_random_data, indexer_sl): + s = multiindex_year_month_day_dataframe_random_data["A"] + expected = s.reindex(s.index[42:65]) + expected.index = expected.index.droplevel(0).droplevel(0) + + result = indexer_sl(s)[2000, 3] + tm.assert_series_equal(result, expected) + + +def test_series_getitem_returns_scalar( + multiindex_year_month_day_dataframe_random_data, indexer_sl +): + s = multiindex_year_month_day_dataframe_random_data["A"] + expected = s.iloc[49] + + result = indexer_sl(s)[2000, 3, 10] + assert result == expected + + +@pytest.mark.parametrize( + "indexer,expected_error,expected_error_msg", + [ + (lambda s: s.__getitem__((2000, 3, 4)), KeyError, r"^\(2000, 3, 4\)$"), + (lambda s: s[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"), + (lambda s: s.loc[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"), + (lambda s: s.loc[(2000, 3, 4, 5)], IndexingError, "Too many indexers"), + (lambda s: s.__getitem__(len(s)), KeyError, ""), # match should include len(s) + (lambda s: s[len(s)], KeyError, ""), # match should include len(s) + ( + lambda s: s.iloc[len(s)], + IndexError, + "single positional indexer is out-of-bounds", + ), + ], +) +def test_series_getitem_indexing_errors( + multiindex_year_month_day_dataframe_random_data, + indexer, + expected_error, + expected_error_msg, +): + s = multiindex_year_month_day_dataframe_random_data["A"] + with pytest.raises(expected_error, match=expected_error_msg): + indexer(s) + + +def test_series_getitem_corner_generator( + multiindex_year_month_day_dataframe_random_data, +): + s = multiindex_year_month_day_dataframe_random_data["A"] + result = s[(x > 0 for x in s)] + expected = s[s > 0] + tm.assert_series_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# test indexing of DataFrame with multi-level Index +# ---------------------------------------------------------------------------- + + +def test_getitem_simple(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data.T + expected = df.values[:, 0] + result = df["foo", "one"].values + tm.assert_almost_equal(result, expected) + + +@pytest.mark.parametrize( + "indexer,expected_error_msg", + [ + (lambda df: df[("foo", "four")], r"^\('foo', 'four'\)$"), + (lambda df: df["foobar"], r"^'foobar'$"), + ], +) +def test_frame_getitem_simple_key_error( + multiindex_dataframe_random_data, indexer, expected_error_msg +): + df = multiindex_dataframe_random_data.T + with pytest.raises(KeyError, match=expected_error_msg): + indexer(df) + + +def test_frame_getitem_multicolumn_empty_level(): + df = DataFrame({"a": ["1", "2", "3"], "b": ["2", "3", "4"]}) + df.columns = [ + ["level1 item1", "level1 item2"], + ["", "level2 item2"], + ["level3 item1", "level3 item2"], + ] + + result = df["level1 item1"] + expected = DataFrame( + [["1"], ["2"], ["3"]], index=df.index, columns=["level3 item1"] + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "indexer,expected_slice", + [ + (lambda df: df["foo"], slice(3)), + (lambda df: df["bar"], slice(3, 5)), + (lambda df: df.loc[:, "bar"], slice(3, 5)), + ], +) +def test_frame_getitem_toplevel( + multiindex_dataframe_random_data, indexer, expected_slice +): + df = multiindex_dataframe_random_data.T + expected = df.reindex(columns=df.columns[expected_slice]) + expected.columns = expected.columns.droplevel(0) + result = indexer(df) + tm.assert_frame_equal(result, expected) + + +def test_frame_mixed_depth_get(): + arrays = [ + ["a", "top", "top", "routine1", "routine1", "routine2"], + ["", "OD", "OD", "result1", "result2", "result1"], + ["", "wx", "wy", "", "", ""], + ] + + tuples = sorted(zip(*arrays)) + index = MultiIndex.from_tuples(tuples) + df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index) + + result = df["a"] + expected = df["a", "", ""].rename("a") + tm.assert_series_equal(result, expected) + + result = df["routine1", "result1"] + expected = df["routine1", "result1", ""] + expected = expected.rename(("routine1", "result1")) + tm.assert_series_equal(result, expected) + + +def test_frame_getitem_nan_multiindex(nulls_fixture): + # GH#29751 + # loc on a multiindex containing nan values + n = nulls_fixture # for code readability + cols = ["a", "b", "c"] + df = DataFrame( + [[11, n, 13], [21, n, 23], [31, n, 33], [41, n, 43]], + columns=cols, + ).set_index(["a", "b"]) + df["c"] = df["c"].astype("int64") + + idx = (21, n) + result = df.loc[:idx] + expected = DataFrame([[11, n, 13], [21, n, 23]], columns=cols).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") + tm.assert_frame_equal(result, expected) + + result = df.loc[idx:] + expected = DataFrame( + [[21, n, 23], [31, n, 33], [41, n, 43]], columns=cols + ).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") + tm.assert_frame_equal(result, expected) + + idx1, idx2 = (21, n), (31, n) + result = df.loc[idx1:idx2] + expected = DataFrame([[21, n, 23], [31, n, 33]], columns=cols).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "indexer,expected", + [ + ( + (["b"], ["bar", np.nan]), + ( + DataFrame( + [[2, 3], [5, 6]], + columns=MultiIndex.from_tuples([("b", "bar"), ("b", np.nan)]), + dtype="int64", + ) + ), + ), + ( + (["a", "b"]), + ( + DataFrame( + [[1, 2, 3], [4, 5, 6]], + columns=MultiIndex.from_tuples( + [("a", "foo"), ("b", "bar"), ("b", np.nan)] + ), + dtype="int64", + ) + ), + ), + ( + (["b"]), + ( + DataFrame( + [[2, 3], [5, 6]], + columns=MultiIndex.from_tuples([("b", "bar"), ("b", np.nan)]), + dtype="int64", + ) + ), + ), + ( + (["b"], ["bar"]), + ( + DataFrame( + [[2], [5]], + columns=MultiIndex.from_tuples([("b", "bar")]), + dtype="int64", + ) + ), + ), + ( + (["b"], [np.nan]), + ( + DataFrame( + [[3], [6]], + columns=MultiIndex( + codes=[[1], [-1]], levels=[["a", "b"], ["bar", "foo"]] + ), + dtype="int64", + ) + ), + ), + (("b", np.nan), Series([3, 6], dtype="int64", name=("b", np.nan))), + ], +) +def test_frame_getitem_nan_cols_multiindex( + indexer, + expected, + nulls_fixture, +): + # Slicing MultiIndex including levels with nan values, for more information + # see GH#25154 + df = DataFrame( + [[1, 2, 3], [4, 5, 6]], + columns=MultiIndex.from_tuples( + [("a", "foo"), ("b", "bar"), ("b", nulls_fixture)] + ), + dtype="int64", + ) + + result = df.loc[:, indexer] + tm.assert_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# test indexing of DataFrame with multi-level Index with duplicates +# ---------------------------------------------------------------------------- + + +@pytest.fixture +def dataframe_with_duplicate_index(): + """Fixture for DataFrame used in tests for gh-4145 and gh-4146""" + data = [["a", "d", "e", "c", "f", "b"], [1, 4, 5, 3, 6, 2], [1, 4, 5, 3, 6, 2]] + index = ["h1", "h3", "h5"] + columns = MultiIndex( + levels=[["A", "B"], ["A1", "A2", "B1", "B2"]], + codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]], + names=["main", "sub"], + ) + return DataFrame(data, index=index, columns=columns) + + +@pytest.mark.parametrize( + "indexer", [lambda df: df[("A", "A1")], lambda df: df.loc[:, ("A", "A1")]] +) +def test_frame_mi_access(dataframe_with_duplicate_index, indexer): + # GH 4145 + df = dataframe_with_duplicate_index + index = Index(["h1", "h3", "h5"]) + columns = MultiIndex.from_tuples([("A", "A1")], names=["main", "sub"]) + expected = DataFrame([["a", 1, 1]], index=columns, columns=index).T + + result = indexer(df) + tm.assert_frame_equal(result, expected) + + +def test_frame_mi_access_returns_series(dataframe_with_duplicate_index): + # GH 4146, not returning a block manager when selecting a unique index + # from a duplicate index + # as of 4879, this returns a Series (which is similar to what happens + # with a non-unique) + df = dataframe_with_duplicate_index + expected = Series(["a", 1, 1], index=["h1", "h3", "h5"], name="A1") + result = df["A"]["A1"] + tm.assert_series_equal(result, expected) + + +def test_frame_mi_access_returns_frame(dataframe_with_duplicate_index): + # selecting a non_unique from the 2nd level + df = dataframe_with_duplicate_index + expected = DataFrame( + [["d", 4, 4], ["e", 5, 5]], + index=Index(["B2", "B2"], name="sub"), + columns=["h1", "h3", "h5"], + ).T + result = df["A"]["B2"] + tm.assert_frame_equal(result, expected) + + +def test_frame_mi_empty_slice(): + # GH 15454 + df = DataFrame(0, index=range(2), columns=MultiIndex.from_product([[1], [2]])) + result = df[[]] + expected = DataFrame( + index=[0, 1], columns=MultiIndex(levels=[[1], [2]], codes=[[], []]) + ) + tm.assert_frame_equal(result, expected) + + +def test_loc_empty_multiindex(): + # GH#36936 + arrays = [["a", "a", "b", "a"], ["a", "a", "b", "b"]] + index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2")) + df = DataFrame([1, 2, 3, 4], index=index, columns=["value"]) + + # loc on empty multiindex == loc with False mask + empty_multiindex = df.loc[df.loc[:, "value"] == 0, :].index + result = df.loc[empty_multiindex, :] + expected = df.loc[[False] * len(df.index), :] + tm.assert_frame_equal(result, expected) + + # replacing value with loc on empty multiindex + df.loc[df.loc[df.loc[:, "value"] == 0].index, "value"] = 5 + result = df + expected = DataFrame([1, 2, 3, 4], index=index, columns=["value"]) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_iloc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_iloc.py new file mode 100644 index 00000000..8939ecc7 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_iloc.py @@ -0,0 +1,171 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, +) +import pandas._testing as tm + + +@pytest.fixture +def simple_multiindex_dataframe(): + """ + Factory function to create simple 3 x 3 dataframe with + both columns and row MultiIndex using supplied data or + random data by default. + """ + + data = np.random.default_rng(2).standard_normal((3, 3)) + return DataFrame( + data, columns=[[2, 2, 4], [6, 8, 10]], index=[[4, 4, 8], [8, 10, 12]] + ) + + +@pytest.mark.parametrize( + "indexer, expected", + [ + ( + lambda df: df.iloc[0], + lambda arr: Series(arr[0], index=[[2, 2, 4], [6, 8, 10]], name=(4, 8)), + ), + ( + lambda df: df.iloc[2], + lambda arr: Series(arr[2], index=[[2, 2, 4], [6, 8, 10]], name=(8, 12)), + ), + ( + lambda df: df.iloc[:, 2], + lambda arr: Series(arr[:, 2], index=[[4, 4, 8], [8, 10, 12]], name=(4, 10)), + ), + ], +) +def test_iloc_returns_series(indexer, expected, simple_multiindex_dataframe): + df = simple_multiindex_dataframe + arr = df.values + result = indexer(df) + expected = expected(arr) + tm.assert_series_equal(result, expected) + + +def test_iloc_returns_dataframe(simple_multiindex_dataframe): + df = simple_multiindex_dataframe + result = df.iloc[[0, 1]] + expected = df.xs(4, drop_level=False) + tm.assert_frame_equal(result, expected) + + +def test_iloc_returns_scalar(simple_multiindex_dataframe): + df = simple_multiindex_dataframe + arr = df.values + result = df.iloc[2, 2] + expected = arr[2, 2] + assert result == expected + + +def test_iloc_getitem_multiple_items(): + # GH 5528 + tup = zip(*[["a", "a", "b", "b"], ["x", "y", "x", "y"]]) + index = MultiIndex.from_tuples(tup) + df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), index=index) + result = df.iloc[[2, 3]] + expected = df.xs("b", drop_level=False) + tm.assert_frame_equal(result, expected) + + +def test_iloc_getitem_labels(): + # this is basically regular indexing + arr = np.random.default_rng(2).standard_normal((4, 3)) + df = DataFrame( + arr, + columns=[["i", "i", "j"], ["A", "A", "B"]], + index=[["i", "i", "j", "k"], ["X", "X", "Y", "Y"]], + ) + result = df.iloc[2, 2] + expected = arr[2, 2] + assert result == expected + + +def test_frame_getitem_slice(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + result = df.iloc[:4] + expected = df[:4] + tm.assert_frame_equal(result, expected) + + +def test_frame_setitem_slice(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + df.iloc[:4] = 0 + + assert (df.values[:4] == 0).all() + assert (df.values[4:] != 0).all() + + +def test_indexing_ambiguity_bug_1678(): + # GH 1678 + columns = MultiIndex.from_tuples( + [("Ohio", "Green"), ("Ohio", "Red"), ("Colorado", "Green")] + ) + index = MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]) + + df = DataFrame(np.arange(12).reshape((4, 3)), index=index, columns=columns) + + result = df.iloc[:, 1] + expected = df.loc[:, ("Ohio", "Red")] + tm.assert_series_equal(result, expected) + + +def test_iloc_integer_locations(): + # GH 13797 + data = [ + ["str00", "str01"], + ["str10", "str11"], + ["str20", "srt21"], + ["str30", "str31"], + ["str40", "str41"], + ] + + index = MultiIndex.from_tuples( + [("CC", "A"), ("CC", "B"), ("CC", "B"), ("BB", "a"), ("BB", "b")] + ) + + expected = DataFrame(data) + df = DataFrame(data, index=index) + + result = DataFrame([[df.iloc[r, c] for c in range(2)] for r in range(5)]) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data, indexes, values, expected_k", + [ + # test without indexer value in first level of MultiIndex + ([[2, 22, 5], [2, 33, 6]], [0, -1, 1], [2, 3, 1], [7, 10]), + # test like code sample 1 in the issue + ([[1, 22, 555], [1, 33, 666]], [0, -1, 1], [200, 300, 100], [755, 1066]), + # test like code sample 2 in the issue + ([[1, 3, 7], [2, 4, 8]], [0, -1, 1], [10, 10, 1000], [17, 1018]), + # test like code sample 3 in the issue + ([[1, 11, 4], [2, 22, 5], [3, 33, 6]], [0, -1, 1], [4, 7, 10], [8, 15, 13]), + ], +) +def test_iloc_setitem_int_multiindex_series(data, indexes, values, expected_k): + # GH17148 + df = DataFrame(data=data, columns=["i", "j", "k"]) + df = df.set_index(["i", "j"]) + + series = df.k.copy() + for i, v in zip(indexes, values): + series.iloc[i] += v + + df["k"] = expected_k + expected = df.k + tm.assert_series_equal(series, expected) + + +def test_getitem_iloc(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + result = df.iloc[2] + expected = df.xs(df.index[2]) + tm.assert_series_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py new file mode 100644 index 00000000..c6fc1659 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py @@ -0,0 +1,118 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +@pytest.fixture +def m(): + return 5 + + +@pytest.fixture +def n(): + return 100 + + +@pytest.fixture +def cols(): + return ["jim", "joe", "jolie", "joline", "jolia"] + + +@pytest.fixture +def vals(n): + vals = [ + np.random.default_rng(2).integers(0, 10, n), + np.random.default_rng(2).choice(list("abcdefghij"), n), + np.random.default_rng(2).choice( + pd.date_range("20141009", periods=10).tolist(), n + ), + np.random.default_rng(2).choice(list("ZYXWVUTSRQ"), n), + np.random.default_rng(2).standard_normal(n), + ] + vals = list(map(tuple, zip(*vals))) + return vals + + +@pytest.fixture +def keys(n, m, vals): + # bunch of keys for testing + keys = [ + np.random.default_rng(2).integers(0, 11, m), + np.random.default_rng(2).choice(list("abcdefghijk"), m), + np.random.default_rng(2).choice( + pd.date_range("20141009", periods=11).tolist(), m + ), + np.random.default_rng(2).choice(list("ZYXWVUTSRQP"), m), + ] + keys = list(map(tuple, zip(*keys))) + keys += [t[:-1] for t in vals[:: n // m]] + return keys + + +# covers both unique index and non-unique index +@pytest.fixture +def df(vals, cols): + return DataFrame(vals, columns=cols) + + +@pytest.fixture +def a(df): + return pd.concat([df, df]) + + +@pytest.fixture +def b(df, cols): + return df.drop_duplicates(subset=cols[:-1]) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +@pytest.mark.parametrize("lexsort_depth", list(range(5))) +@pytest.mark.parametrize("frame_fixture", ["a", "b"]) +def test_multiindex_get_loc(request, lexsort_depth, keys, frame_fixture, cols): + # GH7724, GH2646 + + frame = request.getfixturevalue(frame_fixture) + if lexsort_depth == 0: + df = frame.copy(deep=False) + else: + df = frame.sort_values(by=cols[:lexsort_depth]) + + mi = df.set_index(cols[:-1]) + assert not mi.index._lexsort_depth < lexsort_depth + for key in keys: + mask = np.ones(len(df), dtype=bool) + + # test for all partials of this key + for i, k in enumerate(key): + mask &= df.iloc[:, i] == k + + if not mask.any(): + assert key[: i + 1] not in mi.index + continue + + assert key[: i + 1] in mi.index + right = df[mask].copy(deep=False) + + if i + 1 != len(key): # partial key + return_value = right.drop(cols[: i + 1], axis=1, inplace=True) + assert return_value is None + return_value = right.set_index(cols[i + 1 : -1], inplace=True) + assert return_value is None + tm.assert_frame_equal(mi.loc[key[: i + 1]], right) + + else: # full key + return_value = right.set_index(cols[:-1], inplace=True) + assert return_value is None + if len(right) == 1: # single hit + right = Series( + right["jolia"].values, name=right.index[0], index=["jolia"] + ) + tm.assert_series_equal(mi.loc[key[: i + 1]], right) + else: # multi hit + tm.assert_frame_equal(mi.loc[key[: i + 1]], right) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_loc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_loc.py new file mode 100644 index 00000000..c8b10f72 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_loc.py @@ -0,0 +1,979 @@ +import numpy as np +import pytest + +from pandas.errors import ( + IndexingError, + PerformanceWarning, +) + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + + +@pytest.fixture +def single_level_multiindex(): + """single level MultiIndex""" + return MultiIndex( + levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"] + ) + + +@pytest.fixture +def frame_random_data_integer_multi_index(): + levels = [[0, 1], [0, 1, 2]] + codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] + index = MultiIndex(levels=levels, codes=codes) + return DataFrame(np.random.default_rng(2).standard_normal((6, 2)), index=index) + + +class TestMultiIndexLoc: + def test_loc_setitem_frame_with_multiindex(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + frame.loc[("bar", "two"), "B"] = 5 + assert frame.loc[("bar", "two"), "B"] == 5 + + # with integer labels + df = frame.copy() + df.columns = list(range(3)) + df.loc[("bar", "two"), 1] = 7 + assert df.loc[("bar", "two"), 1] == 7 + + def test_loc_getitem_general(self, any_real_numpy_dtype): + # GH#2817 + dtype = any_real_numpy_dtype + data = { + "amount": {0: 700, 1: 600, 2: 222, 3: 333, 4: 444}, + "col": {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0}, + "num": {0: 12, 1: 11, 2: 12, 3: 12, 4: 12}, + } + df = DataFrame(data) + df = df.astype({"col": dtype, "num": dtype}) + df = df.set_index(keys=["col", "num"]) + key = 4.0, 12 + + # emits a PerformanceWarning, ok + with tm.assert_produces_warning(PerformanceWarning): + tm.assert_frame_equal(df.loc[key], df.iloc[2:]) + + # this is ok + return_value = df.sort_index(inplace=True) + assert return_value is None + res = df.loc[key] + + # col has float dtype, result should be float64 Index + col_arr = np.array([4.0] * 3, dtype=dtype) + year_arr = np.array([12] * 3, dtype=dtype) + index = MultiIndex.from_arrays([col_arr, year_arr], names=["col", "num"]) + expected = DataFrame({"amount": [222, 333, 444]}, index=index) + tm.assert_frame_equal(res, expected) + + def test_loc_getitem_multiindex_missing_label_raises(self): + # GH#21593 + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=[[2, 2, 4], [6, 8, 10]], + index=[[4, 4, 8], [8, 10, 12]], + ) + + with pytest.raises(KeyError, match=r"^2$"): + df.loc[2] + + def test_loc_getitem_list_of_tuples_with_multiindex( + self, multiindex_year_month_day_dataframe_random_data + ): + ser = multiindex_year_month_day_dataframe_random_data["A"] + expected = ser.reindex(ser.index[49:51]) + result = ser.loc[[(2000, 3, 10), (2000, 3, 13)]] + tm.assert_series_equal(result, expected) + + def test_loc_getitem_series(self): + # GH14730 + # passing a series as a key with a MultiIndex + index = MultiIndex.from_product([[1, 2, 3], ["A", "B", "C"]]) + x = Series(index=index, data=range(9), dtype=np.float64) + y = Series([1, 3]) + expected = Series( + data=[0, 1, 2, 6, 7, 8], + index=MultiIndex.from_product([[1, 3], ["A", "B", "C"]]), + dtype=np.float64, + ) + result = x.loc[y] + tm.assert_series_equal(result, expected) + + result = x.loc[[1, 3]] + tm.assert_series_equal(result, expected) + + # GH15424 + y1 = Series([1, 3], index=[1, 2]) + result = x.loc[y1] + tm.assert_series_equal(result, expected) + + empty = Series(data=[], dtype=np.float64) + expected = Series( + [], + index=MultiIndex(levels=index.levels, codes=[[], []], dtype=np.float64), + dtype=np.float64, + ) + result = x.loc[empty] + tm.assert_series_equal(result, expected) + + def test_loc_getitem_array(self): + # GH15434 + # passing an array as a key with a MultiIndex + index = MultiIndex.from_product([[1, 2, 3], ["A", "B", "C"]]) + x = Series(index=index, data=range(9), dtype=np.float64) + y = np.array([1, 3]) + expected = Series( + data=[0, 1, 2, 6, 7, 8], + index=MultiIndex.from_product([[1, 3], ["A", "B", "C"]]), + dtype=np.float64, + ) + result = x.loc[y] + tm.assert_series_equal(result, expected) + + # empty array: + empty = np.array([]) + expected = Series( + [], + index=MultiIndex(levels=index.levels, codes=[[], []], dtype=np.float64), + dtype="float64", + ) + result = x.loc[empty] + tm.assert_series_equal(result, expected) + + # 0-dim array (scalar): + scalar = np.int64(1) + expected = Series(data=[0, 1, 2], index=["A", "B", "C"], dtype=np.float64) + result = x.loc[scalar] + tm.assert_series_equal(result, expected) + + def test_loc_multiindex_labels(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=[["i", "i", "j"], ["A", "A", "B"]], + index=[["i", "i", "j"], ["X", "X", "Y"]], + ) + + # the first 2 rows + expected = df.iloc[[0, 1]].droplevel(0) + result = df.loc["i"] + tm.assert_frame_equal(result, expected) + + # 2nd (last) column + expected = df.iloc[:, [2]].droplevel(0, axis=1) + result = df.loc[:, "j"] + tm.assert_frame_equal(result, expected) + + # bottom right corner + expected = df.iloc[[2], [2]].droplevel(0).droplevel(0, axis=1) + result = df.loc["j"].loc[:, "j"] + tm.assert_frame_equal(result, expected) + + # with a tuple + expected = df.iloc[[0, 1]] + result = df.loc[("i", "X")] + tm.assert_frame_equal(result, expected) + + def test_loc_multiindex_ints(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=[[2, 2, 4], [6, 8, 10]], + index=[[4, 4, 8], [8, 10, 12]], + ) + expected = df.iloc[[0, 1]].droplevel(0) + result = df.loc[4] + tm.assert_frame_equal(result, expected) + + def test_loc_multiindex_missing_label_raises(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=[[2, 2, 4], [6, 8, 10]], + index=[[4, 4, 8], [8, 10, 12]], + ) + + with pytest.raises(KeyError, match=r"^2$"): + df.loc[2] + + @pytest.mark.parametrize("key, pos", [([2, 4], [0, 1]), ([2], []), ([2, 3], [])]) + def test_loc_multiindex_list_missing_label(self, key, pos): + # GH 27148 - lists with missing labels _do_ raise + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=[[2, 2, 4], [6, 8, 10]], + index=[[4, 4, 8], [8, 10, 12]], + ) + + with pytest.raises(KeyError, match="not in index"): + df.loc[key] + + def test_loc_multiindex_too_many_dims_raises(self): + # GH 14885 + s = Series( + range(8), + index=MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]), + ) + + with pytest.raises(KeyError, match=r"^\('a', 'b'\)$"): + s.loc["a", "b"] + with pytest.raises(KeyError, match=r"^\('a', 'd', 'g'\)$"): + s.loc["a", "d", "g"] + with pytest.raises(IndexingError, match="Too many indexers"): + s.loc["a", "d", "g", "j"] + + def test_loc_multiindex_indexer_none(self): + # GH6788 + # multi-index indexer is None (meaning take all) + attributes = ["Attribute" + str(i) for i in range(1)] + attribute_values = ["Value" + str(i) for i in range(5)] + + index = MultiIndex.from_product([attributes, attribute_values]) + df = 0.1 * np.random.default_rng(2).standard_normal((10, 1 * 5)) + 0.5 + df = DataFrame(df, columns=index) + result = df[attributes] + tm.assert_frame_equal(result, df) + + # GH 7349 + # loc with a multi-index seems to be doing fallback + df = DataFrame( + np.arange(12).reshape(-1, 1), + index=MultiIndex.from_product([[1, 2, 3, 4], [1, 2, 3]]), + ) + + expected = df.loc[([1, 2],), :] + result = df.loc[[1, 2]] + tm.assert_frame_equal(result, expected) + + def test_loc_multiindex_incomplete(self): + # GH 7399 + # incomplete indexers + s = Series( + np.arange(15, dtype="int64"), + MultiIndex.from_product([range(5), ["a", "b", "c"]]), + ) + expected = s.loc[:, "a":"c"] + + result = s.loc[0:4, "a":"c"] + tm.assert_series_equal(result, expected) + + result = s.loc[:4, "a":"c"] + tm.assert_series_equal(result, expected) + + result = s.loc[0:, "a":"c"] + tm.assert_series_equal(result, expected) + + # GH 7400 + # multiindexer getitem with list of indexers skips wrong element + s = Series( + np.arange(15, dtype="int64"), + MultiIndex.from_product([range(5), ["a", "b", "c"]]), + ) + expected = s.iloc[[6, 7, 8, 12, 13, 14]] + result = s.loc[2:4:2, "a":"c"] + tm.assert_series_equal(result, expected) + + def test_get_loc_single_level(self, single_level_multiindex): + single_level = single_level_multiindex + s = Series( + np.random.default_rng(2).standard_normal(len(single_level)), + index=single_level, + ) + for k in single_level.values: + s[k] + + def test_loc_getitem_int_slice(self): + # GH 3053 + # loc should treat integer slices like label slices + + index = MultiIndex.from_product([[6, 7, 8], ["a", "b"]]) + df = DataFrame(np.random.default_rng(2).standard_normal((6, 6)), index, index) + result = df.loc[6:8, :] + expected = df + tm.assert_frame_equal(result, expected) + + index = MultiIndex.from_product([[10, 20, 30], ["a", "b"]]) + df = DataFrame(np.random.default_rng(2).standard_normal((6, 6)), index, index) + result = df.loc[20:30, :] + expected = df.iloc[2:] + tm.assert_frame_equal(result, expected) + + # doc examples + result = df.loc[10, :] + expected = df.iloc[0:2] + expected.index = ["a", "b"] + tm.assert_frame_equal(result, expected) + + result = df.loc[:, 10] + expected = df[10] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "indexer_type_1", (list, tuple, set, slice, np.ndarray, Series, Index) + ) + @pytest.mark.parametrize( + "indexer_type_2", (list, tuple, set, slice, np.ndarray, Series, Index) + ) + def test_loc_getitem_nested_indexer(self, indexer_type_1, indexer_type_2): + # GH #19686 + # .loc should work with nested indexers which can be + # any list-like objects (see `is_list_like` (`pandas.api.types`)) or slices + + def convert_nested_indexer(indexer_type, keys): + if indexer_type == np.ndarray: + return np.array(keys) + if indexer_type == slice: + return slice(*keys) + return indexer_type(keys) + + a = [10, 20, 30] + b = [1, 2, 3] + index = MultiIndex.from_product([a, b]) + df = DataFrame( + np.arange(len(index), dtype="int64"), index=index, columns=["Data"] + ) + + keys = ([10, 20], [2, 3]) + types = (indexer_type_1, indexer_type_2) + + # check indexers with all the combinations of nested objects + # of all the valid types + indexer = tuple( + convert_nested_indexer(indexer_type, k) + for indexer_type, k in zip(types, keys) + ) + if indexer_type_1 is set or indexer_type_2 is set: + with pytest.raises(TypeError, match="as an indexer is not supported"): + df.loc[indexer, "Data"] + + return + else: + result = df.loc[indexer, "Data"] + expected = Series( + [1, 2, 4, 5], name="Data", index=MultiIndex.from_product(keys) + ) + + tm.assert_series_equal(result, expected) + + def test_multiindex_loc_one_dimensional_tuple(self, frame_or_series): + # GH#37711 + mi = MultiIndex.from_tuples([("a", "A"), ("b", "A")]) + obj = frame_or_series([1, 2], index=mi) + obj.loc[("a",)] = 0 + expected = frame_or_series([0, 2], index=mi) + tm.assert_equal(obj, expected) + + @pytest.mark.parametrize("indexer", [("a",), ("a")]) + def test_multiindex_one_dimensional_tuple_columns(self, indexer): + # GH#37711 + mi = MultiIndex.from_tuples([("a", "A"), ("b", "A")]) + obj = DataFrame([1, 2], index=mi) + obj.loc[indexer, :] = 0 + expected = DataFrame([0, 2], index=mi) + tm.assert_frame_equal(obj, expected) + + @pytest.mark.parametrize( + "indexer, exp_value", [(slice(None), 1.0), ((1, 2), np.nan)] + ) + def test_multiindex_setitem_columns_enlarging(self, indexer, exp_value): + # GH#39147 + mi = MultiIndex.from_tuples([(1, 2), (3, 4)]) + df = DataFrame([[1, 2], [3, 4]], index=mi, columns=["a", "b"]) + df.loc[indexer, ["c", "d"]] = 1.0 + expected = DataFrame( + [[1, 2, 1.0, 1.0], [3, 4, exp_value, exp_value]], + index=mi, + columns=["a", "b", "c", "d"], + ) + tm.assert_frame_equal(df, expected) + + def test_sorted_multiindex_after_union(self): + # GH#44752 + midx = MultiIndex.from_product( + [pd.date_range("20110101", periods=2), Index(["a", "b"])] + ) + ser1 = Series(1, index=midx) + ser2 = Series(1, index=midx[:2]) + df = pd.concat([ser1, ser2], axis=1) + expected = df.copy() + result = df.loc["2011-01-01":"2011-01-02"] + tm.assert_frame_equal(result, expected) + + df = DataFrame({0: ser1, 1: ser2}) + result = df.loc["2011-01-01":"2011-01-02"] + tm.assert_frame_equal(result, expected) + + df = pd.concat([ser1, ser2.reindex(ser1.index)], axis=1) + result = df.loc["2011-01-01":"2011-01-02"] + tm.assert_frame_equal(result, expected) + + def test_loc_no_second_level_index(self): + # GH#43599 + df = DataFrame( + index=MultiIndex.from_product([list("ab"), list("cd"), list("e")]), + columns=["Val"], + ) + res = df.loc[np.s_[:, "c", :]] + expected = DataFrame( + index=MultiIndex.from_product([list("ab"), list("e")]), columns=["Val"] + ) + tm.assert_frame_equal(res, expected) + + def test_loc_multi_index_key_error(self): + # GH 51892 + df = DataFrame( + { + (1, 2): ["a", "b", "c"], + (1, 3): ["d", "e", "f"], + (2, 2): ["g", "h", "i"], + (2, 4): ["j", "k", "l"], + } + ) + with pytest.raises(KeyError, match=r"(1, 4)"): + df.loc[0, (1, 4)] + + +@pytest.mark.parametrize( + "indexer, pos", + [ + ([], []), # empty ok + (["A"], slice(3)), + (["A", "D"], []), # "D" isn't present -> raise + (["D", "E"], []), # no values found -> raise + (["D"], []), # same, with single item list: GH 27148 + (pd.IndexSlice[:, ["foo"]], slice(2, None, 3)), + (pd.IndexSlice[:, ["foo", "bah"]], slice(2, None, 3)), + ], +) +def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, pos): + # GH 7866 + # multi-index slicing with missing indexers + idx = MultiIndex.from_product( + [["A", "B", "C"], ["foo", "bar", "baz"]], names=["one", "two"] + ) + ser = Series(np.arange(9, dtype="int64"), index=idx).sort_index() + expected = ser.iloc[pos] + + if expected.size == 0 and indexer != []: + with pytest.raises(KeyError, match=str(indexer)): + ser.loc[indexer] + elif indexer == (slice(None), ["foo", "bah"]): + # "bah" is not in idx.levels[1], raising KeyError enforced in 2.0 + with pytest.raises(KeyError, match="'bah'"): + ser.loc[indexer] + else: + result = ser.loc[indexer] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("columns_indexer", [([], slice(None)), (["foo"], [])]) +def test_loc_getitem_duplicates_multiindex_empty_indexer(columns_indexer): + # GH 8737 + # empty indexer + multi_index = MultiIndex.from_product((["foo", "bar", "baz"], ["alpha", "beta"])) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 6)), + index=range(5), + columns=multi_index, + ) + df = df.sort_index(level=0, axis=1) + + expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0]) + result = df.loc[:, columns_indexer] + tm.assert_frame_equal(result, expected) + + +def test_loc_getitem_duplicates_multiindex_non_scalar_type_object(): + # regression from < 0.14.0 + # GH 7914 + df = DataFrame( + [[np.mean, np.median], ["mean", "median"]], + columns=MultiIndex.from_tuples([("functs", "mean"), ("functs", "median")]), + index=["function", "name"], + ) + result = df.loc["function", ("functs", "mean")] + expected = np.mean + assert result == expected + + +def test_loc_getitem_tuple_plus_slice(): + # GH 671 + df = DataFrame( + { + "a": np.arange(10), + "b": np.arange(10), + "c": np.random.default_rng(2).standard_normal(10), + "d": np.random.default_rng(2).standard_normal(10), + } + ).set_index(["a", "b"]) + expected = df.loc[0, 0] + result = df.loc[(0, 0), :] + tm.assert_series_equal(result, expected) + + +def test_loc_getitem_int(frame_random_data_integer_multi_index): + df = frame_random_data_integer_multi_index + result = df.loc[1] + expected = df[-3:] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + + +def test_loc_getitem_int_raises_exception(frame_random_data_integer_multi_index): + df = frame_random_data_integer_multi_index + with pytest.raises(KeyError, match=r"^3$"): + df.loc[3] + + +def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + + # test setup - check key not in dataframe + with pytest.raises(KeyError, match=r"^\('bar', 'three'\)$"): + df.loc[("bar", "three"), "B"] + + # in theory should be inserting in a sorted space???? + df.loc[("bar", "three"), "B"] = 0 + expected = 0 + result = df.sort_index().loc[("bar", "three"), "B"] + assert result == expected + + +def test_loc_setitem_single_column_slice(): + # case from https://github.com/pandas-dev/pandas/issues/27841 + df = DataFrame( + "string", + index=list("abcd"), + columns=MultiIndex.from_product([["Main"], ("another", "one")]), + ) + df["labels"] = "a" + df.loc[:, "labels"] = df.index + tm.assert_numpy_array_equal(np.asarray(df["labels"]), np.asarray(df.index)) + + # test with non-object block + df = DataFrame( + np.nan, + index=range(4), + columns=MultiIndex.from_tuples([("A", "1"), ("A", "2"), ("B", "1")]), + ) + expected = df.copy() + df.loc[:, "B"] = np.arange(4) + expected.iloc[:, 2] = np.arange(4) + tm.assert_frame_equal(df, expected) + + +def test_loc_nan_multiindex(): + # GH 5286 + tups = [ + ("Good Things", "C", np.nan), + ("Good Things", "R", np.nan), + ("Bad Things", "C", np.nan), + ("Bad Things", "T", np.nan), + ("Okay Things", "N", "B"), + ("Okay Things", "N", "D"), + ("Okay Things", "B", np.nan), + ("Okay Things", "D", np.nan), + ] + df = DataFrame( + np.ones((8, 4)), + columns=Index(["d1", "d2", "d3", "d4"]), + index=MultiIndex.from_tuples(tups, names=["u1", "u2", "u3"]), + ) + result = df.loc["Good Things"].loc["C"] + expected = DataFrame( + np.ones((1, 4)), + index=Index([np.nan], dtype="object", name="u3"), + columns=Index(["d1", "d2", "d3", "d4"], dtype="object"), + ) + tm.assert_frame_equal(result, expected) + + +def test_loc_period_string_indexing(): + # GH 9892 + a = pd.period_range("2013Q1", "2013Q4", freq="Q") + i = (1111, 2222, 3333) + idx = MultiIndex.from_product((a, i), names=("Period", "CVR")) + df = DataFrame( + index=idx, + columns=( + "OMS", + "OMK", + "RES", + "DRIFT_IND", + "OEVRIG_IND", + "FIN_IND", + "VARE_UD", + "LOEN_UD", + "FIN_UD", + ), + ) + result = df.loc[("2013Q1", 1111), "OMS"] + + alt = df.loc[(a[0], 1111), "OMS"] + assert np.isnan(alt) + + # Because the resolution of the string matches, it is an exact lookup, + # not a slice + assert np.isnan(result) + + alt = df.loc[("2013Q1", 1111), "OMS"] + assert np.isnan(alt) + + +def test_loc_datetime_mask_slicing(): + # GH 16699 + dt_idx = pd.to_datetime(["2017-05-04", "2017-05-05"]) + m_idx = MultiIndex.from_product([dt_idx, dt_idx], names=["Idx1", "Idx2"]) + df = DataFrame( + data=[[1, 2], [3, 4], [5, 6], [7, 6]], index=m_idx, columns=["C1", "C2"] + ) + result = df.loc[(dt_idx[0], (df.index.get_level_values(1) > "2017-05-04")), "C1"] + expected = Series( + [3], + name="C1", + index=MultiIndex.from_tuples( + [(pd.Timestamp("2017-05-04"), pd.Timestamp("2017-05-05"))], + names=["Idx1", "Idx2"], + ), + ) + tm.assert_series_equal(result, expected) + + +def test_loc_datetime_series_tuple_slicing(): + # https://github.com/pandas-dev/pandas/issues/35858 + date = pd.Timestamp("2000") + ser = Series( + 1, + index=MultiIndex.from_tuples([("a", date)], names=["a", "b"]), + name="c", + ) + result = ser.loc[:, [date]] + tm.assert_series_equal(result, ser) + + +def test_loc_with_mi_indexer(): + # https://github.com/pandas-dev/pandas/issues/35351 + df = DataFrame( + data=[["a", 1], ["a", 0], ["b", 1], ["c", 2]], + index=MultiIndex.from_tuples( + [(0, 1), (1, 0), (1, 1), (1, 1)], names=["index", "date"] + ), + columns=["author", "price"], + ) + idx = MultiIndex.from_tuples([(0, 1), (1, 1)], names=["index", "date"]) + result = df.loc[idx, :] + expected = DataFrame( + [["a", 1], ["b", 1], ["c", 2]], + index=MultiIndex.from_tuples([(0, 1), (1, 1), (1, 1)], names=["index", "date"]), + columns=["author", "price"], + ) + tm.assert_frame_equal(result, expected) + + +def test_loc_mi_with_level1_named_0(): + # GH#37194 + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + + ser = Series(range(3), index=dti) + df = ser.to_frame() + df[1] = dti + + df2 = df.set_index(0, append=True) + assert df2.index.names == (None, 0) + df2.index.get_loc(dti[0]) # smoke test + + result = df2.loc[dti[0]] + expected = df2.iloc[[0]].droplevel(None) + tm.assert_frame_equal(result, expected) + + ser2 = df2[1] + assert ser2.index.names == (None, 0) + + result = ser2.loc[dti[0]] + expected = ser2.iloc[[0]].droplevel(None) + tm.assert_series_equal(result, expected) + + +def test_getitem_str_slice(datapath): + # GH#15928 + path = datapath("reshape", "merge", "data", "quotes2.csv") + df = pd.read_csv(path, parse_dates=["time"]) + df2 = df.set_index(["ticker", "time"]).sort_index() + + res = df2.loc[("AAPL", slice("2016-05-25 13:30:00")), :].droplevel(0) + expected = df2.loc["AAPL"].loc[slice("2016-05-25 13:30:00"), :] + tm.assert_frame_equal(res, expected) + + +def test_3levels_leading_period_index(): + # GH#24091 + pi = pd.PeriodIndex( + ["20181101 1100", "20181101 1200", "20181102 1300", "20181102 1400"], + name="datetime", + freq="D", + ) + lev2 = ["A", "A", "Z", "W"] + lev3 = ["B", "C", "Q", "F"] + mi = MultiIndex.from_arrays([pi, lev2, lev3]) + + ser = Series(range(4), index=mi, dtype=np.float64) + result = ser.loc[(pi[0], "A", "B")] + assert result == 0.0 + + +class TestKeyErrorsWithMultiIndex: + def test_missing_keys_raises_keyerror(self): + # GH#27420 KeyError, not TypeError + df = DataFrame(np.arange(12).reshape(4, 3), columns=["A", "B", "C"]) + df2 = df.set_index(["A", "B"]) + + with pytest.raises(KeyError, match="1"): + df2.loc[(1, 6)] + + def test_missing_key_raises_keyerror2(self): + # GH#21168 KeyError, not "IndexingError: Too many indexers" + ser = Series(-1, index=MultiIndex.from_product([[0, 1]] * 2)) + + with pytest.raises(KeyError, match=r"\(0, 3\)"): + ser.loc[0, 3] + + def test_missing_key_combination(self): + # GH: 19556 + mi = MultiIndex.from_arrays( + [ + np.array(["a", "a", "b", "b"]), + np.array(["1", "2", "2", "3"]), + np.array(["c", "d", "c", "d"]), + ], + names=["one", "two", "three"], + ) + df = DataFrame(np.random.default_rng(2).random((4, 3)), index=mi) + msg = r"\('b', '1', slice\(None, None, None\)\)" + with pytest.raises(KeyError, match=msg): + df.loc[("b", "1", slice(None)), :] + with pytest.raises(KeyError, match=msg): + df.index.get_locs(("b", "1", slice(None))) + with pytest.raises(KeyError, match=r"\('b', '1'\)"): + df.loc[("b", "1"), :] + + +def test_getitem_loc_commutability(multiindex_year_month_day_dataframe_random_data): + df = multiindex_year_month_day_dataframe_random_data + ser = df["A"] + result = ser[2000, 5] + expected = df.loc[2000, 5]["A"] + tm.assert_series_equal(result, expected) + + +def test_loc_with_nan(): + # GH: 27104 + df = DataFrame( + {"col": [1, 2, 5], "ind1": ["a", "d", np.nan], "ind2": [1, 4, 5]} + ).set_index(["ind1", "ind2"]) + result = df.loc[["a"]] + expected = DataFrame( + {"col": [1]}, index=MultiIndex.from_tuples([("a", 1)], names=["ind1", "ind2"]) + ) + tm.assert_frame_equal(result, expected) + + result = df.loc["a"] + expected = DataFrame({"col": [1]}, index=Index([1], name="ind2")) + tm.assert_frame_equal(result, expected) + + +def test_getitem_non_found_tuple(): + # GH: 25236 + df = DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]).set_index( + ["a", "b", "c"] + ) + with pytest.raises(KeyError, match=r"\(2\.0, 2\.0, 3\.0\)"): + df.loc[(2.0, 2.0, 3.0)] + + +def test_get_loc_datetime_index(): + # GH#24263 + index = pd.date_range("2001-01-01", periods=100) + mi = MultiIndex.from_arrays([index]) + # Check if get_loc matches for Index and MultiIndex + assert mi.get_loc("2001-01") == slice(0, 31, None) + assert index.get_loc("2001-01") == slice(0, 31, None) + + loc = mi[::2].get_loc("2001-01") + expected = index[::2].get_loc("2001-01") + assert loc == expected + + loc = mi.repeat(2).get_loc("2001-01") + expected = index.repeat(2).get_loc("2001-01") + assert loc == expected + + loc = mi.append(mi).get_loc("2001-01") + expected = index.append(index).get_loc("2001-01") + # TODO: standardize return type for MultiIndex.get_loc + tm.assert_numpy_array_equal(loc.nonzero()[0], expected) + + +def test_loc_setitem_indexer_differently_ordered(): + # GH#34603 + mi = MultiIndex.from_product([["a", "b"], [0, 1]]) + df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=mi) + + indexer = ("a", [1, 0]) + df.loc[indexer, :] = np.array([[9, 10], [11, 12]]) + expected = DataFrame([[11, 12], [9, 10], [5, 6], [7, 8]], index=mi) + tm.assert_frame_equal(df, expected) + + +def test_loc_getitem_index_differently_ordered_slice_none(): + # GH#31330 + df = DataFrame( + [[1, 2], [3, 4], [5, 6], [7, 8]], + index=[["a", "a", "b", "b"], [1, 2, 1, 2]], + columns=["a", "b"], + ) + result = df.loc[(slice(None), [2, 1]), :] + expected = DataFrame( + [[3, 4], [7, 8], [1, 2], [5, 6]], + index=[["a", "b", "a", "b"], [2, 2, 1, 1]], + columns=["a", "b"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("indexer", [[1, 2, 7, 6, 2, 3, 8, 7], [1, 2, 7, 6, 3, 8]]) +def test_loc_getitem_index_differently_ordered_slice_none_duplicates(indexer): + # GH#40978 + df = DataFrame( + [1] * 8, + index=MultiIndex.from_tuples( + [(1, 1), (1, 2), (1, 7), (1, 6), (2, 2), (2, 3), (2, 8), (2, 7)] + ), + columns=["a"], + ) + result = df.loc[(slice(None), indexer), :] + expected = DataFrame( + [1] * 8, + index=[[1, 1, 2, 1, 2, 1, 2, 2], [1, 2, 2, 7, 7, 6, 3, 8]], + columns=["a"], + ) + tm.assert_frame_equal(result, expected) + + result = df.loc[df.index.isin(indexer, level=1), :] + tm.assert_frame_equal(result, df) + + +def test_loc_getitem_drops_levels_for_one_row_dataframe(): + # GH#10521 "x" and "z" are both scalar indexing, so those levels are dropped + mi = MultiIndex.from_arrays([["x"], ["y"], ["z"]], names=["a", "b", "c"]) + df = DataFrame({"d": [0]}, index=mi) + expected = df.droplevel([0, 2]) + result = df.loc["x", :, "z"] + tm.assert_frame_equal(result, expected) + + ser = Series([0], index=mi) + result = ser.loc["x", :, "z"] + expected = Series([0], index=Index(["y"], name="b")) + tm.assert_series_equal(result, expected) + + +def test_mi_columns_loc_list_label_order(): + # GH 10710 + cols = MultiIndex.from_product([["A", "B", "C"], [1, 2]]) + df = DataFrame(np.zeros((5, 6)), columns=cols) + result = df.loc[:, ["B", "A"]] + expected = DataFrame( + np.zeros((5, 4)), + columns=MultiIndex.from_tuples([("B", 1), ("B", 2), ("A", 1), ("A", 2)]), + ) + tm.assert_frame_equal(result, expected) + + +def test_mi_partial_indexing_list_raises(): + # GH 13501 + frame = DataFrame( + np.arange(12).reshape((4, 3)), + index=[["a", "a", "b", "b"], [1, 2, 1, 2]], + columns=[["Ohio", "Ohio", "Colorado"], ["Green", "Red", "Green"]], + ) + frame.index.names = ["key1", "key2"] + frame.columns.names = ["state", "color"] + with pytest.raises(KeyError, match="\\[2\\] not in index"): + frame.loc[["b", 2], "Colorado"] + + +def test_mi_indexing_list_nonexistent_raises(): + # GH 15452 + s = Series(range(4), index=MultiIndex.from_product([[1, 2], ["a", "b"]])) + with pytest.raises(KeyError, match="\\['not' 'found'\\] not in index"): + s.loc[["not", "found"]] + + +def test_mi_add_cell_missing_row_non_unique(): + # GH 16018 + result = DataFrame( + [[1, 2, 5, 6], [3, 4, 7, 8]], + index=["a", "a"], + columns=MultiIndex.from_product([[1, 2], ["A", "B"]]), + ) + result.loc["c"] = -1 + result.loc["c", (1, "A")] = 3 + result.loc["d", (1, "A")] = 3 + expected = DataFrame( + [ + [1.0, 2.0, 5.0, 6.0], + [3.0, 4.0, 7.0, 8.0], + [3.0, -1.0, -1, -1], + [3.0, np.nan, np.nan, np.nan], + ], + index=["a", "a", "c", "d"], + columns=MultiIndex.from_product([[1, 2], ["A", "B"]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_loc_get_scalar_casting_to_float(): + # GH#41369 + df = DataFrame( + {"a": 1.0, "b": 2}, index=MultiIndex.from_arrays([[3], [4]], names=["c", "d"]) + ) + result = df.loc[(3, 4), "b"] + assert result == 2 + assert isinstance(result, np.int64) + result = df.loc[[(3, 4)], "b"].iloc[0] + assert result == 2 + assert isinstance(result, np.int64) + + +def test_loc_empty_single_selector_with_names(): + # GH 19517 + idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=[1, 0]) + s2 = Series(index=idx, dtype=np.float64) + result = s2.loc["a"] + expected = Series([np.nan, np.nan], index=Index(["A", "B"], name=0)) + tm.assert_series_equal(result, expected) + + +def test_loc_keyerror_rightmost_key_missing(): + # GH 20951 + + df = DataFrame( + { + "A": [100, 100, 200, 200, 300, 300], + "B": [10, 10, 20, 21, 31, 33], + "C": range(6), + } + ) + df = df.set_index(["A", "B"]) + with pytest.raises(KeyError, match="^1$"): + df.loc[(100, 1)] + + +def test_multindex_series_loc_with_tuple_label(): + # GH#43908 + mi = MultiIndex.from_tuples([(1, 2), (3, (4, 5))]) + ser = Series([1, 2], index=mi) + result = ser.loc[(3, (4, 5))] + assert result == 2 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_multiindex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_multiindex.py new file mode 100644 index 00000000..3d2ed1d1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_multiindex.py @@ -0,0 +1,236 @@ +import numpy as np +import pytest + +import pandas._libs.index as _index +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + CategoricalDtype, + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.core.arrays.boolean import BooleanDtype + + +class TestMultiIndexBasic: + def test_multiindex_perf_warn(self): + df = DataFrame( + { + "jim": [0, 0, 1, 1], + "joe": ["x", "x", "z", "y"], + "jolie": np.random.default_rng(2).random(4), + } + ).set_index(["jim", "joe"]) + + with tm.assert_produces_warning(PerformanceWarning): + df.loc[(1, "z")] + + df = df.iloc[[2, 1, 3, 0]] + with tm.assert_produces_warning(PerformanceWarning): + df.loc[(0,)] + + def test_indexing_over_hashtable_size_cutoff(self): + n = 10000 + + old_cutoff = _index._SIZE_CUTOFF + _index._SIZE_CUTOFF = 20000 + + s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n)))) + + # hai it works! + assert s[("a", 5)] == 5 + assert s[("a", 6)] == 6 + assert s[("a", 7)] == 7 + + _index._SIZE_CUTOFF = old_cutoff + + def test_multi_nan_indexing(self): + # GH 3588 + df = DataFrame( + { + "a": ["R1", "R2", np.nan, "R4"], + "b": ["C1", "C2", "C3", "C4"], + "c": [10, 15, np.nan, 20], + } + ) + result = df.set_index(["a", "b"], drop=False) + expected = DataFrame( + { + "a": ["R1", "R2", np.nan, "R4"], + "b": ["C1", "C2", "C3", "C4"], + "c": [10, 15, np.nan, 20], + }, + index=[ + Index(["R1", "R2", np.nan, "R4"], name="a"), + Index(["C1", "C2", "C3", "C4"], name="b"), + ], + ) + tm.assert_frame_equal(result, expected) + + def test_exclusive_nat_column_indexing(self): + # GH 38025 + # test multi indexing when one column exclusively contains NaT values + df = DataFrame( + { + "a": [pd.NaT, pd.NaT, pd.NaT, pd.NaT], + "b": ["C1", "C2", "C3", "C4"], + "c": [10, 15, np.nan, 20], + } + ) + df = df.set_index(["a", "b"]) + expected = DataFrame( + { + "c": [10, 15, np.nan, 20], + }, + index=[ + Index([pd.NaT, pd.NaT, pd.NaT, pd.NaT], name="a"), + Index(["C1", "C2", "C3", "C4"], name="b"), + ], + ) + tm.assert_frame_equal(df, expected) + + def test_nested_tuples_duplicates(self): + # GH#30892 + + dti = pd.to_datetime(["20190101", "20190101", "20190102"]) + idx = Index(["a", "a", "c"]) + mi = MultiIndex.from_arrays([dti, idx], names=["index1", "index2"]) + + df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi) + + expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi) + + df2 = df.copy(deep=True) + df2.loc[(dti[0], "a"), "c2"] = 1.0 + tm.assert_frame_equal(df2, expected) + + df3 = df.copy(deep=True) + df3.loc[[(dti[0], "a")], "c2"] = 1.0 + tm.assert_frame_equal(df3, expected) + + def test_multiindex_with_datatime_level_preserves_freq(self): + # https://github.com/pandas-dev/pandas/issues/35563 + idx = Index(range(2), name="A") + dti = pd.date_range("2020-01-01", periods=7, freq="D", name="B") + mi = MultiIndex.from_product([idx, dti]) + df = DataFrame(np.random.default_rng(2).standard_normal((14, 2)), index=mi) + result = df.loc[0].index + tm.assert_index_equal(result, dti) + assert result.freq == dti.freq + + def test_multiindex_complex(self): + # GH#42145 + complex_data = [1 + 2j, 4 - 3j, 10 - 1j] + non_complex_data = [3, 4, 5] + result = DataFrame( + { + "x": complex_data, + "y": non_complex_data, + "z": non_complex_data, + } + ) + result.set_index(["x", "y"], inplace=True) + expected = DataFrame( + {"z": non_complex_data}, + index=MultiIndex.from_arrays( + [complex_data, non_complex_data], + names=("x", "y"), + ), + ) + tm.assert_frame_equal(result, expected) + + def test_rename_multiindex_with_duplicates(self): + # GH 38015 + mi = MultiIndex.from_tuples([("A", "cat"), ("B", "cat"), ("B", "cat")]) + df = DataFrame(index=mi) + df = df.rename(index={"A": "Apple"}, level=0) + + mi2 = MultiIndex.from_tuples([("Apple", "cat"), ("B", "cat"), ("B", "cat")]) + expected = DataFrame(index=mi2) + tm.assert_frame_equal(df, expected) + + def test_series_align_multiindex_with_nan_overlap_only(self): + # GH 38439 + mi1 = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]]) + mi2 = MultiIndex.from_arrays([[np.nan, 82.0], [np.nan, np.nan]]) + ser1 = Series([1, 2], index=mi1) + ser2 = Series([1, 2], index=mi2) + result1, result2 = ser1.align(ser2) + + mi = MultiIndex.from_arrays([[81.0, 82.0, np.nan], [np.nan, np.nan, np.nan]]) + expected1 = Series([1.0, np.nan, 2.0], index=mi) + expected2 = Series([np.nan, 2.0, 1.0], index=mi) + + tm.assert_series_equal(result1, expected1) + tm.assert_series_equal(result2, expected2) + + def test_series_align_multiindex_with_nan(self): + # GH 38439 + mi1 = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]]) + mi2 = MultiIndex.from_arrays([[np.nan, 81.0], [np.nan, np.nan]]) + ser1 = Series([1, 2], index=mi1) + ser2 = Series([1, 2], index=mi2) + result1, result2 = ser1.align(ser2) + + mi = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]]) + expected1 = Series([1, 2], index=mi) + expected2 = Series([2, 1], index=mi) + + tm.assert_series_equal(result1, expected1) + tm.assert_series_equal(result2, expected2) + + def test_nunique_smoke(self): + # GH 34019 + n = DataFrame([[1, 2], [1, 2]]).set_index([0, 1]).index.nunique() + assert n == 1 + + def test_multiindex_repeated_keys(self): + # GH19414 + tm.assert_series_equal( + Series([1, 2], MultiIndex.from_arrays([["a", "b"]])).loc[ + ["a", "a", "b", "b"] + ], + Series([1, 1, 2, 2], MultiIndex.from_arrays([["a", "a", "b", "b"]])), + ) + + def test_multiindex_with_na_missing_key(self): + # GH46173 + df = DataFrame.from_dict( + { + ("foo",): [1, 2, 3], + ("bar",): [5, 6, 7], + (None,): [8, 9, 0], + } + ) + with pytest.raises(KeyError, match="missing_key"): + df[[("missing_key",)]] + + def test_multiindex_dtype_preservation(self): + # GH51261 + columns = MultiIndex.from_tuples([("A", "B")], names=["lvl1", "lvl2"]) + df = DataFrame(["value"], columns=columns).astype("category") + df_no_multiindex = df["A"] + assert isinstance(df_no_multiindex["B"].dtype, CategoricalDtype) + + # geopandas 1763 analogue + df = DataFrame( + [[1, 0], [0, 1]], + columns=[ + ["foo", "foo"], + ["location", "location"], + ["x", "y"], + ], + ).assign(bools=Series([True, False], dtype="boolean")) + assert isinstance(df["bools"].dtype, BooleanDtype) + + def test_multiindex_from_tuples_with_nan(self): + # GH#23578 + result = MultiIndex.from_tuples([("a", "b", "c"), np.nan, ("d", "", "")]) + expected = MultiIndex.from_tuples( + [("a", "b", "c"), (np.nan, np.nan, np.nan), ("d", "", "")] + ) + tm.assert_index_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_partial.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_partial.py new file mode 100644 index 00000000..081da385 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_partial.py @@ -0,0 +1,262 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + MultiIndex, + date_range, + to_datetime, +) +import pandas._testing as tm + + +class TestMultiIndexPartial: + def test_getitem_partial_int(self): + # GH 12416 + # with single item + l1 = [10, 20] + l2 = ["a", "b"] + df = DataFrame(index=range(2), columns=MultiIndex.from_product([l1, l2])) + expected = DataFrame(index=range(2), columns=l2) + result = df[20] + tm.assert_frame_equal(result, expected) + + # with list + expected = DataFrame( + index=range(2), columns=MultiIndex.from_product([l1[1:], l2]) + ) + result = df[[20]] + tm.assert_frame_equal(result, expected) + + # missing item: + with pytest.raises(KeyError, match="1"): + df[1] + with pytest.raises(KeyError, match=r"'\[1\] not in index'"): + df[[1]] + + def test_series_slice_partial(self): + pass + + def test_xs_partial( + self, + multiindex_dataframe_random_data, + multiindex_year_month_day_dataframe_random_data, + ): + frame = multiindex_dataframe_random_data + ymd = multiindex_year_month_day_dataframe_random_data + result = frame.xs("foo") + result2 = frame.loc["foo"] + expected = frame.T["foo"].T + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, result2) + + result = ymd.xs((2000, 4)) + expected = ymd.loc[2000, 4] + tm.assert_frame_equal(result, expected) + + # ex from #1796 + index = MultiIndex( + levels=[["foo", "bar"], ["one", "two"], [-1, 1]], + codes=[ + [0, 0, 0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 0, 0, 1, 1], + [0, 1, 0, 1, 0, 1, 0, 1], + ], + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), + index=index, + columns=list("abcd"), + ) + + result = df.xs(("foo", "one")) + expected = df.loc["foo", "one"] + tm.assert_frame_equal(result, expected) + + def test_getitem_partial(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + ymd = ymd.T + result = ymd[2000, 2] + + expected = ymd.reindex(columns=ymd.columns[ymd.columns.codes[1] == 1]) + expected.columns = expected.columns.droplevel(0).droplevel(0) + tm.assert_frame_equal(result, expected) + + def test_fancy_slice_partial( + self, + multiindex_dataframe_random_data, + multiindex_year_month_day_dataframe_random_data, + ): + frame = multiindex_dataframe_random_data + result = frame.loc["bar":"baz"] + expected = frame[3:7] + tm.assert_frame_equal(result, expected) + + ymd = multiindex_year_month_day_dataframe_random_data + result = ymd.loc[(2000, 2):(2000, 4)] + lev = ymd.index.codes[1] + expected = ymd[(lev >= 1) & (lev <= 3)] + tm.assert_frame_equal(result, expected) + + def test_getitem_partial_column_select(self): + idx = MultiIndex( + codes=[[0, 0, 0], [0, 1, 1], [1, 0, 1]], + levels=[["a", "b"], ["x", "y"], ["p", "q"]], + ) + df = DataFrame(np.random.default_rng(2).random((3, 2)), index=idx) + + result = df.loc[("a", "y"), :] + expected = df.loc[("a", "y")] + tm.assert_frame_equal(result, expected) + + result = df.loc[("a", "y"), [1, 0]] + expected = df.loc[("a", "y")][[1, 0]] + tm.assert_frame_equal(result, expected) + + with pytest.raises(KeyError, match=r"\('a', 'foo'\)"): + df.loc[("a", "foo"), :] + + # TODO(ArrayManager) rewrite test to not use .values + # exp.loc[2000, 4].values[:] select multiple columns -> .values is not a view + @td.skip_array_manager_invalid_test + def test_partial_set( + self, multiindex_year_month_day_dataframe_random_data, using_copy_on_write + ): + # GH #397 + ymd = multiindex_year_month_day_dataframe_random_data + df = ymd.copy() + exp = ymd.copy() + df.loc[2000, 4] = 0 + exp.iloc[65:85] = 0 + tm.assert_frame_equal(df, exp) + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"].loc[2000, 4] = 1 + df.loc[(2000, 4), "A"] = 1 + else: + df["A"].loc[2000, 4] = 1 + exp.iloc[65:85, 0] = 1 + tm.assert_frame_equal(df, exp) + + df.loc[2000] = 5 + exp.iloc[:100] = 5 + tm.assert_frame_equal(df, exp) + + # this works...for now + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"].iloc[14] = 5 + df["A"].iloc[14] == exp["A"].iloc[14] + else: + df["A"].iloc[14] = 5 + assert df["A"].iloc[14] == 5 + + @pytest.mark.parametrize("dtype", [int, float]) + def test_getitem_intkey_leading_level( + self, multiindex_year_month_day_dataframe_random_data, dtype + ): + # GH#33355 dont fall-back to positional when leading level is int + ymd = multiindex_year_month_day_dataframe_random_data + levels = ymd.index.levels + ymd.index = ymd.index.set_levels([levels[0].astype(dtype)] + levels[1:]) + ser = ymd["A"] + mi = ser.index + assert isinstance(mi, MultiIndex) + if dtype is int: + assert mi.levels[0].dtype == np.dtype(int) + else: + assert mi.levels[0].dtype == np.float64 + + assert 14 not in mi.levels[0] + assert not mi.levels[0]._should_fallback_to_positional + assert not mi._should_fallback_to_positional + + with pytest.raises(KeyError, match="14"): + ser[14] + + # --------------------------------------------------------------------- + + def test_setitem_multiple_partial(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + expected = frame.copy() + result = frame.copy() + result.loc[["foo", "bar"]] = 0 + expected.loc["foo"] = 0 + expected.loc["bar"] = 0 + tm.assert_frame_equal(result, expected) + + expected = frame.copy() + result = frame.copy() + result.loc["foo":"bar"] = 0 + expected.loc["foo"] = 0 + expected.loc["bar"] = 0 + tm.assert_frame_equal(result, expected) + + expected = frame["A"].copy() + result = frame["A"].copy() + result.loc[["foo", "bar"]] = 0 + expected.loc["foo"] = 0 + expected.loc["bar"] = 0 + tm.assert_series_equal(result, expected) + + expected = frame["A"].copy() + result = frame["A"].copy() + result.loc["foo":"bar"] = 0 + expected.loc["foo"] = 0 + expected.loc["bar"] = 0 + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "indexer, exp_idx, exp_values", + [ + (slice("2019-2", None), [to_datetime("2019-02-01")], [2, 3]), + ( + slice(None, "2019-2"), + date_range("2019", periods=2, freq="MS"), + [0, 1, 2, 3], + ), + ], + ) + def test_partial_getitem_loc_datetime(self, indexer, exp_idx, exp_values): + # GH: 25165 + date_idx = date_range("2019", periods=2, freq="MS") + df = DataFrame( + list(range(4)), + index=MultiIndex.from_product([date_idx, [0, 1]], names=["x", "y"]), + ) + expected = DataFrame( + exp_values, + index=MultiIndex.from_product([exp_idx, [0, 1]], names=["x", "y"]), + ) + result = df[indexer] + tm.assert_frame_equal(result, expected) + result = df.loc[indexer] + tm.assert_frame_equal(result, expected) + + result = df.loc(axis=0)[indexer] + tm.assert_frame_equal(result, expected) + + result = df.loc[indexer, :] + tm.assert_frame_equal(result, expected) + + df2 = df.swaplevel(0, 1).sort_index() + expected = expected.swaplevel(0, 1).sort_index() + + result = df2.loc[:, indexer, :] + tm.assert_frame_equal(result, expected) + + +def test_loc_getitem_partial_both_axis(): + # gh-12660 + iterables = [["a", "b"], [2, 1]] + columns = MultiIndex.from_product(iterables, names=["col1", "col2"]) + rows = MultiIndex.from_product(iterables, names=["row1", "row2"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), index=rows, columns=columns + ) + expected = df.iloc[:2, 2:].droplevel("row1").droplevel("col1", axis=1) + result = df.loc["a", "b"] + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_setitem.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_setitem.py new file mode 100644 index 00000000..51b94c3b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_setitem.py @@ -0,0 +1,576 @@ +import numpy as np +import pytest + +from pandas.errors import SettingWithCopyError +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, + Timestamp, + date_range, + isna, + notna, +) +import pandas._testing as tm + + +def assert_equal(a, b): + assert a == b + + +class TestMultiIndexSetItem: + def check(self, target, indexers, value, compare_fn=assert_equal, expected=None): + target.loc[indexers] = value + result = target.loc[indexers] + if expected is None: + expected = value + compare_fn(result, expected) + + def test_setitem_multiindex(self): + # GH#7190 + cols = ["A", "w", "l", "a", "x", "X", "d", "profit"] + index = MultiIndex.from_product( + [np.arange(0, 100), np.arange(0, 80)], names=["time", "firm"] + ) + t, n = 0, 2 + + df = DataFrame( + np.nan, + columns=cols, + index=index, + ) + self.check(target=df, indexers=((t, n), "X"), value=0) + + df = DataFrame(-999, columns=cols, index=index) + self.check(target=df, indexers=((t, n), "X"), value=1) + + df = DataFrame(columns=cols, index=index) + self.check(target=df, indexers=((t, n), "X"), value=2) + + # gh-7218: assigning with 0-dim arrays + df = DataFrame(-999, columns=cols, index=index) + self.check( + target=df, + indexers=((t, n), "X"), + value=np.array(3), + expected=3, + ) + + def test_setitem_multiindex2(self): + # GH#5206 + df = DataFrame( + np.arange(25).reshape(5, 5), columns="A,B,C,D,E".split(","), dtype=float + ) + df["F"] = 99 + row_selection = df["A"] % 2 == 0 + col_selection = ["B", "C"] + df.loc[row_selection, col_selection] = df["F"] + output = DataFrame(99.0, index=[0, 2, 4], columns=["B", "C"]) + tm.assert_frame_equal(df.loc[row_selection, col_selection], output) + self.check( + target=df, + indexers=(row_selection, col_selection), + value=df["F"], + compare_fn=tm.assert_frame_equal, + expected=output, + ) + + def test_setitem_multiindex3(self): + # GH#11372 + idx = MultiIndex.from_product( + [["A", "B", "C"], date_range("2015-01-01", "2015-04-01", freq="MS")] + ) + cols = MultiIndex.from_product( + [["foo", "bar"], date_range("2016-01-01", "2016-02-01", freq="MS")] + ) + + df = DataFrame( + np.random.default_rng(2).random((12, 4)), index=idx, columns=cols + ) + + subidx = MultiIndex.from_tuples( + [("A", Timestamp("2015-01-01")), ("A", Timestamp("2015-02-01"))] + ) + subcols = MultiIndex.from_tuples( + [("foo", Timestamp("2016-01-01")), ("foo", Timestamp("2016-02-01"))] + ) + + vals = DataFrame( + np.random.default_rng(2).random((2, 2)), index=subidx, columns=subcols + ) + self.check( + target=df, + indexers=(subidx, subcols), + value=vals, + compare_fn=tm.assert_frame_equal, + ) + # set all columns + vals = DataFrame( + np.random.default_rng(2).random((2, 4)), index=subidx, columns=cols + ) + self.check( + target=df, + indexers=(subidx, slice(None, None, None)), + value=vals, + compare_fn=tm.assert_frame_equal, + ) + # identity + copy = df.copy() + self.check( + target=df, + indexers=(df.index, df.columns), + value=df, + compare_fn=tm.assert_frame_equal, + expected=copy, + ) + + # TODO(ArrayManager) df.loc["bar"] *= 2 doesn't raise an error but results in + # all NaNs -> doesn't work in the "split" path (also for BlockManager actually) + @td.skip_array_manager_not_yet_implemented + def test_multiindex_setitem(self): + # GH 3738 + # setting with a multi-index right hand side + arrays = [ + np.array(["bar", "bar", "baz", "qux", "qux", "bar"]), + np.array(["one", "two", "one", "one", "two", "one"]), + np.arange(0, 6, 1), + ] + + df_orig = DataFrame( + np.random.default_rng(2).standard_normal((6, 3)), + index=arrays, + columns=["A", "B", "C"], + ).sort_index() + + expected = df_orig.loc[["bar"]] * 2 + df = df_orig.copy() + df.loc[["bar"]] *= 2 + tm.assert_frame_equal(df.loc[["bar"]], expected) + + # raise because these have differing levels + msg = "cannot align on a multi-index with out specifying the join levels" + with pytest.raises(TypeError, match=msg): + df.loc["bar"] *= 2 + + def test_multiindex_setitem2(self): + # from SO + # https://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation + df_orig = DataFrame.from_dict( + { + "price": { + ("DE", "Coal", "Stock"): 2, + ("DE", "Gas", "Stock"): 4, + ("DE", "Elec", "Demand"): 1, + ("FR", "Gas", "Stock"): 5, + ("FR", "Solar", "SupIm"): 0, + ("FR", "Wind", "SupIm"): 0, + } + } + ) + df_orig.index = MultiIndex.from_tuples( + df_orig.index, names=["Sit", "Com", "Type"] + ) + + expected = df_orig.copy() + expected.iloc[[0, 2, 3]] *= 2 + + idx = pd.IndexSlice + df = df_orig.copy() + df.loc[idx[:, :, "Stock"], :] *= 2 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[idx[:, :, "Stock"], "price"] *= 2 + tm.assert_frame_equal(df, expected) + + def test_multiindex_assignment(self): + # GH3777 part 2 + + # mixed dtype + df = DataFrame( + np.random.default_rng(2).integers(5, 10, size=9).reshape(3, 3), + columns=list("abc"), + index=[[4, 4, 8], [8, 10, 12]], + ) + df["d"] = np.nan + arr = np.array([0.0, 1.0]) + + df.loc[4, "d"] = arr + tm.assert_series_equal(df.loc[4, "d"], Series(arr, index=[8, 10], name="d")) + + def test_multiindex_assignment_single_dtype(self, using_copy_on_write): + # GH3777 part 2b + # single dtype + arr = np.array([0.0, 1.0]) + + df = DataFrame( + np.random.default_rng(2).integers(5, 10, size=9).reshape(3, 3), + columns=list("abc"), + index=[[4, 4, 8], [8, 10, 12]], + dtype=np.int64, + ) + view = df["c"].iloc[:2].values + + # arr can be losslessly cast to int, so this setitem is inplace + df.loc[4, "c"] = arr + exp = Series(arr, index=[8, 10], name="c", dtype="int64") + result = df.loc[4, "c"] + tm.assert_series_equal(result, exp) + + # extra check for inplace-ness + if not using_copy_on_write: + tm.assert_numpy_array_equal(view, exp.values) + + # arr + 0.5 cannot be cast losslessly to int, so we upcast + with tm.assert_produces_warning( + FutureWarning, match="item of incompatible dtype" + ): + df.loc[4, "c"] = arr + 0.5 + result = df.loc[4, "c"] + exp = exp + 0.5 + tm.assert_series_equal(result, exp) + + # scalar ok + df.loc[4, "c"] = 10 + exp = Series(10, index=[8, 10], name="c", dtype="float64") + tm.assert_series_equal(df.loc[4, "c"], exp) + + # invalid assignments + msg = "Must have equal len keys and value when setting with an iterable" + with pytest.raises(ValueError, match=msg): + df.loc[4, "c"] = [0, 1, 2, 3] + + with pytest.raises(ValueError, match=msg): + df.loc[4, "c"] = [0] + + # But with a length-1 listlike column indexer this behaves like + # `df.loc[4, "c"] = 0 + df.loc[4, ["c"]] = [0] + assert (df.loc[4, "c"] == 0).all() + + def test_groupby_example(self): + # groupby example + NUM_ROWS = 100 + NUM_COLS = 10 + col_names = ["A" + num for num in map(str, np.arange(NUM_COLS).tolist())] + index_cols = col_names[:5] + + df = DataFrame( + np.random.default_rng(2).integers(5, size=(NUM_ROWS, NUM_COLS)), + dtype=np.int64, + columns=col_names, + ) + df = df.set_index(index_cols).sort_index() + grp = df.groupby(level=index_cols[:4]) + df["new_col"] = np.nan + + # we are actually operating on a copy here + # but in this case, that's ok + for name, df2 in grp: + new_vals = np.arange(df2.shape[0]) + df.loc[name, "new_col"] = new_vals + + def test_series_setitem(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + s = ymd["A"] + + s[2000, 3] = np.nan + assert isna(s.values[42:65]).all() + assert notna(s.values[:42]).all() + assert notna(s.values[65:]).all() + + s[2000, 3, 10] = np.nan + assert isna(s.iloc[49]) + + with pytest.raises(KeyError, match="49"): + # GH#33355 dont fall-back to positional when leading level is int + s[49] + + def test_frame_getitem_setitem_boolean(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + df = frame.T.copy() + values = df.values.copy() + + result = df[df > 0] + expected = df.where(df > 0) + tm.assert_frame_equal(result, expected) + + df[df > 0] = 5 + values[values > 0] = 5 + tm.assert_almost_equal(df.values, values) + + df[df == 5] = 0 + values[values == 5] = 0 + tm.assert_almost_equal(df.values, values) + + # a df that needs alignment first + df[df[:-1] < 0] = 2 + np.putmask(values[:-1], values[:-1] < 0, 2) + tm.assert_almost_equal(df.values, values) + + with pytest.raises(TypeError, match="boolean values only"): + df[df * 0] = 2 + + def test_frame_getitem_setitem_multislice(self): + levels = [["t1", "t2"], ["a", "b", "c"]] + codes = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]] + midx = MultiIndex(codes=codes, levels=levels, names=[None, "id"]) + df = DataFrame({"value": [1, 2, 3, 7, 8]}, index=midx) + + result = df.loc[:, "value"] + tm.assert_series_equal(df["value"], result) + + result = df.loc[df.index[1:3], "value"] + tm.assert_series_equal(df["value"][1:3], result) + + result = df.loc[:, :] + tm.assert_frame_equal(df, result) + + result = df + df.loc[:, "value"] = 10 + result["value"] = 10 + tm.assert_frame_equal(df, result) + + df.loc[:, :] = 10 + tm.assert_frame_equal(df, result) + + def test_frame_setitem_multi_column(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=[["a", "a", "b", "b"], [0, 1, 0, 1]], + ) + + cp = df.copy() + cp["a"] = cp["b"] + tm.assert_frame_equal(cp["a"], cp["b"]) + + # set with ndarray + cp = df.copy() + cp["a"] = cp["b"].values + tm.assert_frame_equal(cp["a"], cp["b"]) + + def test_frame_setitem_multi_column2(self): + # --------------------------------------- + # GH#1803 + columns = MultiIndex.from_tuples([("A", "1"), ("A", "2"), ("B", "1")]) + df = DataFrame(index=[1, 3, 5], columns=columns) + + # Works, but adds a column instead of updating the two existing ones + df["A"] = 0.0 # Doesn't work + assert (df["A"].values == 0).all() + + # it broadcasts + df["B", "1"] = [1, 2, 3] + df["A"] = df["B", "1"] + + sliced_a1 = df["A", "1"] + sliced_a2 = df["A", "2"] + sliced_b1 = df["B", "1"] + tm.assert_series_equal(sliced_a1, sliced_b1, check_names=False) + tm.assert_series_equal(sliced_a2, sliced_b1, check_names=False) + assert sliced_a1.name == ("A", "1") + assert sliced_a2.name == ("A", "2") + assert sliced_b1.name == ("B", "1") + + def test_loc_getitem_tuple_plus_columns( + self, multiindex_year_month_day_dataframe_random_data + ): + # GH #1013 + ymd = multiindex_year_month_day_dataframe_random_data + df = ymd[:5] + + result = df.loc[(2000, 1, 6), ["A", "B", "C"]] + expected = df.loc[2000, 1, 6][["A", "B", "C"]] + tm.assert_series_equal(result, expected) + + def test_loc_getitem_setitem_slice_integers(self, frame_or_series): + index = MultiIndex( + levels=[[0, 1, 2], [0, 2]], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]] + ) + + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 4)), + index=index, + columns=["a", "b", "c", "d"], + ) + obj = tm.get_obj(obj, frame_or_series) + + res = obj.loc[1:2] + exp = obj.reindex(obj.index[2:]) + tm.assert_equal(res, exp) + + obj.loc[1:2] = 7 + assert (obj.loc[1:2] == 7).values.all() + + def test_setitem_change_dtype(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + dft = frame.T + s = dft["foo", "two"] + dft["foo", "two"] = s > s.median() + tm.assert_series_equal(dft["foo", "two"], s > s.median()) + # assert isinstance(dft._data.blocks[1].items, MultiIndex) + + reindexed = dft.reindex(columns=[("foo", "two")]) + tm.assert_series_equal(reindexed["foo", "two"], s > s.median()) + + def test_set_column_scalar_with_loc( + self, multiindex_dataframe_random_data, using_copy_on_write + ): + frame = multiindex_dataframe_random_data + subset = frame.index[[1, 4, 5]] + + frame.loc[subset] = 99 + assert (frame.loc[subset].values == 99).all() + + frame_original = frame.copy() + col = frame["B"] + col[subset] = 97 + if using_copy_on_write: + # chained setitem doesn't work with CoW + tm.assert_frame_equal(frame, frame_original) + else: + assert (frame.loc[subset, "B"] == 97).all() + + def test_nonunique_assignment_1750(self): + df = DataFrame( + [[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]], columns=list("ABCD") + ) + + df = df.set_index(["A", "B"]) + mi = MultiIndex.from_tuples([(1, 1)]) + + df.loc[mi, "C"] = "_" + + assert (df.xs((1, 1))["C"] == "_").all() + + def test_astype_assignment_with_dups(self): + # GH 4686 + # assignment with dups that has a dtype change + cols = MultiIndex.from_tuples([("A", "1"), ("B", "1"), ("A", "2")]) + df = DataFrame(np.arange(3).reshape((1, 3)), columns=cols, dtype=object) + index = df.index.copy() + + df["A"] = df["A"].astype(np.float64) + tm.assert_index_equal(df.index, index) + + def test_setitem_nonmonotonic(self): + # https://github.com/pandas-dev/pandas/issues/31449 + index = MultiIndex.from_tuples( + [("a", "c"), ("b", "x"), ("a", "d")], names=["l1", "l2"] + ) + df = DataFrame(data=[0, 1, 2], index=index, columns=["e"]) + df.loc["a", "e"] = np.arange(99, 101, dtype="int64") + expected = DataFrame({"e": [99, 1, 100]}, index=index) + tm.assert_frame_equal(df, expected) + + +class TestSetitemWithExpansionMultiIndex: + def test_setitem_new_column_mixed_depth(self): + arrays = [ + ["a", "top", "top", "routine1", "routine1", "routine2"], + ["", "OD", "OD", "result1", "result2", "result1"], + ["", "wx", "wy", "", "", ""], + ] + + tuples = sorted(zip(*arrays)) + index = MultiIndex.from_tuples(tuples) + df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index) + + result = df.copy() + expected = df.copy() + result["b"] = [1, 2, 3, 4] + expected["b", "", ""] = [1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + def test_setitem_new_column_all_na(self): + # GH#1534 + mix = MultiIndex.from_tuples([("1a", "2a"), ("1a", "2b"), ("1a", "2c")]) + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix) + s = Series({(1, 1): 1, (1, 2): 2}) + df["new"] = s + assert df["new"].isna().all() + + def test_setitem_enlargement_keep_index_names(self): + # GH#53053 + mi = MultiIndex.from_tuples([(1, 2, 3)], names=["i1", "i2", "i3"]) + df = DataFrame(data=[[10, 20, 30]], index=mi, columns=["A", "B", "C"]) + df.loc[(0, 0, 0)] = df.loc[(1, 2, 3)] + mi_expected = MultiIndex.from_tuples( + [(1, 2, 3), (0, 0, 0)], names=["i1", "i2", "i3"] + ) + expected = DataFrame( + data=[[10, 20, 30], [10, 20, 30]], + index=mi_expected, + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(df, expected) + + +@td.skip_array_manager_invalid_test # df["foo"] select multiple columns -> .values +# is not a view +def test_frame_setitem_view_direct( + multiindex_dataframe_random_data, using_copy_on_write +): + # this works because we are modifying the underlying array + # really a no-no + df = multiindex_dataframe_random_data.T + if using_copy_on_write: + with pytest.raises(ValueError, match="read-only"): + df["foo"].values[:] = 0 + assert (df["foo"].values != 0).all() + else: + df["foo"].values[:] = 0 + assert (df["foo"].values == 0).all() + + +def test_frame_setitem_copy_raises( + multiindex_dataframe_random_data, using_copy_on_write +): + # will raise/warn as its chained assignment + df = multiindex_dataframe_random_data.T + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["foo"]["one"] = 2 + else: + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(SettingWithCopyError, match=msg): + df["foo"]["one"] = 2 + + +def test_frame_setitem_copy_no_write( + multiindex_dataframe_random_data, using_copy_on_write +): + frame = multiindex_dataframe_random_data.T + expected = frame + df = frame.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["foo"]["one"] = 2 + else: + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(SettingWithCopyError, match=msg): + df["foo"]["one"] = 2 + + result = df + tm.assert_frame_equal(result, expected) + + +def test_frame_setitem_partial_multiindex(): + # GH 54875 + df = DataFrame( + { + "a": [1, 2, 3], + "b": [3, 4, 5], + "c": 6, + "d": 7, + } + ).set_index(["a", "b", "c"]) + ser = Series(8, index=df.index.droplevel("c")) + result = df.copy() + result["d"] = ser + expected = df.copy() + expected["d"] = 8 + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_slice.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_slice.py new file mode 100644 index 00000000..6a78b224 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_slice.py @@ -0,0 +1,795 @@ +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas.errors import UnsortedIndexError + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.tests.indexing.common import _mklbl + + +class TestMultiIndexSlicers: + def test_per_axis_per_level_getitem(self): + # GH6134 + # example test case + ix = MultiIndex.from_product( + [_mklbl("A", 5), _mklbl("B", 7), _mklbl("C", 4), _mklbl("D", 2)] + ) + df = DataFrame(np.arange(len(ix.to_numpy())), index=ix) + + result = df.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :] + expected = df.loc[ + [ + ( + a, + b, + c, + d, + ) + for a, b, c, d in df.index.values + if a in ("A1", "A2", "A3") and c in ("C1", "C3") + ] + ] + tm.assert_frame_equal(result, expected) + + expected = df.loc[ + [ + ( + a, + b, + c, + d, + ) + for a, b, c, d in df.index.values + if a in ("A1", "A2", "A3") and c in ("C1", "C2", "C3") + ] + ] + result = df.loc[(slice("A1", "A3"), slice(None), slice("C1", "C3")), :] + tm.assert_frame_equal(result, expected) + + # test multi-index slicing with per axis and per index controls + index = MultiIndex.from_tuples( + [("A", 1), ("A", 2), ("A", 3), ("B", 1)], names=["one", "two"] + ) + columns = MultiIndex.from_tuples( + [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], + names=["lvl0", "lvl1"], + ) + + df = DataFrame( + np.arange(16, dtype="int64").reshape(4, 4), index=index, columns=columns + ) + df = df.sort_index(axis=0).sort_index(axis=1) + + # identity + result = df.loc[(slice(None), slice(None)), :] + tm.assert_frame_equal(result, df) + result = df.loc[(slice(None), slice(None)), (slice(None), slice(None))] + tm.assert_frame_equal(result, df) + result = df.loc[:, (slice(None), slice(None))] + tm.assert_frame_equal(result, df) + + # index + result = df.loc[(slice(None), [1]), :] + expected = df.iloc[[0, 3]] + tm.assert_frame_equal(result, expected) + + result = df.loc[(slice(None), 1), :] + expected = df.iloc[[0, 3]] + tm.assert_frame_equal(result, expected) + + # columns + result = df.loc[:, (slice(None), ["foo"])] + expected = df.iloc[:, [1, 3]] + tm.assert_frame_equal(result, expected) + + # both + result = df.loc[(slice(None), 1), (slice(None), ["foo"])] + expected = df.iloc[[0, 3], [1, 3]] + tm.assert_frame_equal(result, expected) + + result = df.loc["A", "a"] + expected = DataFrame( + {"bar": [1, 5, 9], "foo": [0, 4, 8]}, + index=Index([1, 2, 3], name="two"), + columns=Index(["bar", "foo"], name="lvl1"), + ) + tm.assert_frame_equal(result, expected) + + result = df.loc[(slice(None), [1, 2]), :] + expected = df.iloc[[0, 1, 3]] + tm.assert_frame_equal(result, expected) + + # multi-level series + s = Series(np.arange(len(ix.to_numpy())), index=ix) + result = s.loc["A1":"A3", :, ["C1", "C3"]] + expected = s.loc[ + [ + ( + a, + b, + c, + d, + ) + for a, b, c, d in s.index.values + if a in ("A1", "A2", "A3") and c in ("C1", "C3") + ] + ] + tm.assert_series_equal(result, expected) + + # boolean indexers + result = df.loc[(slice(None), df.loc[:, ("a", "bar")] > 5), :] + expected = df.iloc[[2, 3]] + tm.assert_frame_equal(result, expected) + + msg = ( + "cannot index with a boolean indexer " + "that is not the same length as the index" + ) + with pytest.raises(ValueError, match=msg): + df.loc[(slice(None), np.array([True, False])), :] + + with pytest.raises(KeyError, match=r"\[1\] not in index"): + # slice(None) is on the index, [1] is on the columns, but 1 is + # not in the columns, so we raise + # This used to treat [1] as positional GH#16396 + df.loc[slice(None), [1]] + + # not lexsorted + assert df.index._lexsort_depth == 2 + df = df.sort_index(level=1, axis=0) + assert df.index._lexsort_depth == 0 + + msg = ( + "MultiIndex slicing requires the index to be " + r"lexsorted: slicing on levels \[1\], lexsort depth 0" + ) + with pytest.raises(UnsortedIndexError, match=msg): + df.loc[(slice(None), slice("bar")), :] + + # GH 16734: not sorted, but no real slicing + result = df.loc[(slice(None), df.loc[:, ("a", "bar")] > 5), :] + tm.assert_frame_equal(result, df.iloc[[1, 3], :]) + + def test_multiindex_slicers_non_unique(self): + # GH 7106 + # non-unique mi index support + df = ( + DataFrame( + { + "A": ["foo", "foo", "foo", "foo"], + "B": ["a", "a", "a", "a"], + "C": [1, 2, 1, 3], + "D": [1, 2, 3, 4], + } + ) + .set_index(["A", "B", "C"]) + .sort_index() + ) + assert not df.index.is_unique + expected = ( + DataFrame({"A": ["foo", "foo"], "B": ["a", "a"], "C": [1, 1], "D": [1, 3]}) + .set_index(["A", "B", "C"]) + .sort_index() + ) + result = df.loc[(slice(None), slice(None), 1), :] + tm.assert_frame_equal(result, expected) + + # this is equivalent of an xs expression + result = df.xs(1, level=2, drop_level=False) + tm.assert_frame_equal(result, expected) + + df = ( + DataFrame( + { + "A": ["foo", "foo", "foo", "foo"], + "B": ["a", "a", "a", "a"], + "C": [1, 2, 1, 2], + "D": [1, 2, 3, 4], + } + ) + .set_index(["A", "B", "C"]) + .sort_index() + ) + assert not df.index.is_unique + expected = ( + DataFrame({"A": ["foo", "foo"], "B": ["a", "a"], "C": [1, 1], "D": [1, 3]}) + .set_index(["A", "B", "C"]) + .sort_index() + ) + result = df.loc[(slice(None), slice(None), 1), :] + assert not result.index.is_unique + tm.assert_frame_equal(result, expected) + + # GH12896 + # numpy-implementation dependent bug + ints = [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 12, + 13, + 14, + 14, + 16, + 17, + 18, + 19, + 200000, + 200000, + ] + n = len(ints) + idx = MultiIndex.from_arrays([["a"] * n, ints]) + result = Series([1] * n, index=idx) + result = result.sort_index() + result = result.loc[(slice(None), slice(100000))] + expected = Series([1] * (n - 2), index=idx[:-2]).sort_index() + tm.assert_series_equal(result, expected) + + def test_multiindex_slicers_datetimelike(self): + # GH 7429 + # buggy/inconsistent behavior when slicing with datetime-like + dates = [datetime(2012, 1, 1, 12, 12, 12) + timedelta(days=i) for i in range(6)] + freq = [1, 2] + index = MultiIndex.from_product([dates, freq], names=["date", "frequency"]) + + df = DataFrame( + np.arange(6 * 2 * 4, dtype="int64").reshape(-1, 4), + index=index, + columns=list("ABCD"), + ) + + # multi-axis slicing + idx = pd.IndexSlice + expected = df.iloc[[0, 2, 4], [0, 1]] + result = df.loc[ + ( + slice( + Timestamp("2012-01-01 12:12:12"), Timestamp("2012-01-03 12:12:12") + ), + slice(1, 1), + ), + slice("A", "B"), + ] + tm.assert_frame_equal(result, expected) + + result = df.loc[ + ( + idx[ + Timestamp("2012-01-01 12:12:12") : Timestamp("2012-01-03 12:12:12") + ], + idx[1:1], + ), + slice("A", "B"), + ] + tm.assert_frame_equal(result, expected) + + result = df.loc[ + ( + slice( + Timestamp("2012-01-01 12:12:12"), Timestamp("2012-01-03 12:12:12") + ), + 1, + ), + slice("A", "B"), + ] + tm.assert_frame_equal(result, expected) + + # with strings + result = df.loc[ + (slice("2012-01-01 12:12:12", "2012-01-03 12:12:12"), slice(1, 1)), + slice("A", "B"), + ] + tm.assert_frame_equal(result, expected) + + result = df.loc[ + (idx["2012-01-01 12:12:12":"2012-01-03 12:12:12"], 1), idx["A", "B"] + ] + tm.assert_frame_equal(result, expected) + + def test_multiindex_slicers_edges(self): + # GH 8132 + # various edge cases + df = DataFrame( + { + "A": ["A0"] * 5 + ["A1"] * 5 + ["A2"] * 5, + "B": ["B0", "B0", "B1", "B1", "B2"] * 3, + "DATE": [ + "2013-06-11", + "2013-07-02", + "2013-07-09", + "2013-07-30", + "2013-08-06", + "2013-06-11", + "2013-07-02", + "2013-07-09", + "2013-07-30", + "2013-08-06", + "2013-09-03", + "2013-10-01", + "2013-07-09", + "2013-08-06", + "2013-09-03", + ], + "VALUES": [22, 35, 14, 9, 4, 40, 18, 4, 2, 5, 1, 2, 3, 4, 2], + } + ) + + df["DATE"] = pd.to_datetime(df["DATE"]) + df1 = df.set_index(["A", "B", "DATE"]) + df1 = df1.sort_index() + + # A1 - Get all values under "A0" and "A1" + result = df1.loc[(slice("A1")), :] + expected = df1.iloc[0:10] + tm.assert_frame_equal(result, expected) + + # A2 - Get all values from the start to "A2" + result = df1.loc[(slice("A2")), :] + expected = df1 + tm.assert_frame_equal(result, expected) + + # A3 - Get all values under "B1" or "B2" + result = df1.loc[(slice(None), slice("B1", "B2")), :] + expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13, 14]] + tm.assert_frame_equal(result, expected) + + # A4 - Get all values between 2013-07-02 and 2013-07-09 + result = df1.loc[(slice(None), slice(None), slice("20130702", "20130709")), :] + expected = df1.iloc[[1, 2, 6, 7, 12]] + tm.assert_frame_equal(result, expected) + + # B1 - Get all values in B0 that are also under A0, A1 and A2 + result = df1.loc[(slice("A2"), slice("B0")), :] + expected = df1.iloc[[0, 1, 5, 6, 10, 11]] + tm.assert_frame_equal(result, expected) + + # B2 - Get all values in B0, B1 and B2 (similar to what #2 is doing for + # the As) + result = df1.loc[(slice(None), slice("B2")), :] + expected = df1 + tm.assert_frame_equal(result, expected) + + # B3 - Get all values from B1 to B2 and up to 2013-08-06 + result = df1.loc[(slice(None), slice("B1", "B2"), slice("2013-08-06")), :] + expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13]] + tm.assert_frame_equal(result, expected) + + # B4 - Same as A4 but the start of the date slice is not a key. + # shows indexing on a partial selection slice + result = df1.loc[(slice(None), slice(None), slice("20130701", "20130709")), :] + expected = df1.iloc[[1, 2, 6, 7, 12]] + tm.assert_frame_equal(result, expected) + + def test_per_axis_per_level_doc_examples(self): + # test index maker + idx = pd.IndexSlice + + # from indexing.rst / advanced + index = MultiIndex.from_product( + [_mklbl("A", 4), _mklbl("B", 2), _mklbl("C", 4), _mklbl("D", 2)] + ) + columns = MultiIndex.from_tuples( + [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], + names=["lvl0", "lvl1"], + ) + df = DataFrame( + np.arange(len(index) * len(columns), dtype="int64").reshape( + (len(index), len(columns)) + ), + index=index, + columns=columns, + ) + result = df.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :] + expected = df.loc[ + [ + ( + a, + b, + c, + d, + ) + for a, b, c, d in df.index.values + if a in ("A1", "A2", "A3") and c in ("C1", "C3") + ] + ] + tm.assert_frame_equal(result, expected) + result = df.loc[idx["A1":"A3", :, ["C1", "C3"]], :] + tm.assert_frame_equal(result, expected) + + result = df.loc[(slice(None), slice(None), ["C1", "C3"]), :] + expected = df.loc[ + [ + ( + a, + b, + c, + d, + ) + for a, b, c, d in df.index.values + if c in ("C1", "C3") + ] + ] + tm.assert_frame_equal(result, expected) + result = df.loc[idx[:, :, ["C1", "C3"]], :] + tm.assert_frame_equal(result, expected) + + # not sorted + msg = ( + "MultiIndex slicing requires the index to be lexsorted: " + r"slicing on levels \[1\], lexsort depth 1" + ) + with pytest.raises(UnsortedIndexError, match=msg): + df.loc["A1", ("a", slice("foo"))] + + # GH 16734: not sorted, but no real slicing + tm.assert_frame_equal( + df.loc["A1", (slice(None), "foo")], df.loc["A1"].iloc[:, [0, 2]] + ) + + df = df.sort_index(axis=1) + + # slicing + df.loc["A1", (slice(None), "foo")] + df.loc[(slice(None), slice(None), ["C1", "C3"]), (slice(None), "foo")] + + # setitem + df.loc(axis=0)[:, :, ["C1", "C3"]] = -10 + + def test_loc_axis_arguments(self): + index = MultiIndex.from_product( + [_mklbl("A", 4), _mklbl("B", 2), _mklbl("C", 4), _mklbl("D", 2)] + ) + columns = MultiIndex.from_tuples( + [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], + names=["lvl0", "lvl1"], + ) + df = ( + DataFrame( + np.arange(len(index) * len(columns), dtype="int64").reshape( + (len(index), len(columns)) + ), + index=index, + columns=columns, + ) + .sort_index() + .sort_index(axis=1) + ) + + # axis 0 + result = df.loc(axis=0)["A1":"A3", :, ["C1", "C3"]] + expected = df.loc[ + [ + ( + a, + b, + c, + d, + ) + for a, b, c, d in df.index.values + if a in ("A1", "A2", "A3") and c in ("C1", "C3") + ] + ] + tm.assert_frame_equal(result, expected) + + result = df.loc(axis="index")[:, :, ["C1", "C3"]] + expected = df.loc[ + [ + ( + a, + b, + c, + d, + ) + for a, b, c, d in df.index.values + if c in ("C1", "C3") + ] + ] + tm.assert_frame_equal(result, expected) + + # axis 1 + result = df.loc(axis=1)[:, "foo"] + expected = df.loc[:, (slice(None), "foo")] + tm.assert_frame_equal(result, expected) + + result = df.loc(axis="columns")[:, "foo"] + expected = df.loc[:, (slice(None), "foo")] + tm.assert_frame_equal(result, expected) + + # invalid axis + for i in [-1, 2, "foo"]: + msg = f"No axis named {i} for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.loc(axis=i)[:, :, ["C1", "C3"]] + + def test_loc_axis_single_level_multi_col_indexing_multiindex_col_df(self): + # GH29519 + df = DataFrame( + np.arange(27).reshape(3, 9), + columns=MultiIndex.from_product([["a1", "a2", "a3"], ["b1", "b2", "b3"]]), + ) + result = df.loc(axis=1)["a1":"a2"] + expected = df.iloc[:, :-3] + + tm.assert_frame_equal(result, expected) + + def test_loc_axis_single_level_single_col_indexing_multiindex_col_df(self): + # GH29519 + df = DataFrame( + np.arange(27).reshape(3, 9), + columns=MultiIndex.from_product([["a1", "a2", "a3"], ["b1", "b2", "b3"]]), + ) + result = df.loc(axis=1)["a1"] + expected = df.iloc[:, :3] + expected.columns = ["b1", "b2", "b3"] + + tm.assert_frame_equal(result, expected) + + def test_loc_ax_single_level_indexer_simple_df(self): + # GH29519 + # test single level indexing on single index column data frame + df = DataFrame(np.arange(9).reshape(3, 3), columns=["a", "b", "c"]) + result = df.loc(axis=1)["a"] + expected = Series(np.array([0, 3, 6]), name="a") + tm.assert_series_equal(result, expected) + + def test_per_axis_per_level_setitem(self): + # test index maker + idx = pd.IndexSlice + + # test multi-index slicing with per axis and per index controls + index = MultiIndex.from_tuples( + [("A", 1), ("A", 2), ("A", 3), ("B", 1)], names=["one", "two"] + ) + columns = MultiIndex.from_tuples( + [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], + names=["lvl0", "lvl1"], + ) + + df_orig = DataFrame( + np.arange(16, dtype="int64").reshape(4, 4), index=index, columns=columns + ) + df_orig = df_orig.sort_index(axis=0).sort_index(axis=1) + + # identity + df = df_orig.copy() + df.loc[(slice(None), slice(None)), :] = 100 + expected = df_orig.copy() + expected.iloc[:, :] = 100 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc(axis=0)[:, :] = 100 + expected = df_orig.copy() + expected.iloc[:, :] = 100 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[(slice(None), slice(None)), (slice(None), slice(None))] = 100 + expected = df_orig.copy() + expected.iloc[:, :] = 100 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[:, (slice(None), slice(None))] = 100 + expected = df_orig.copy() + expected.iloc[:, :] = 100 + tm.assert_frame_equal(df, expected) + + # index + df = df_orig.copy() + df.loc[(slice(None), [1]), :] = 100 + expected = df_orig.copy() + expected.iloc[[0, 3]] = 100 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[(slice(None), 1), :] = 100 + expected = df_orig.copy() + expected.iloc[[0, 3]] = 100 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc(axis=0)[:, 1] = 100 + expected = df_orig.copy() + expected.iloc[[0, 3]] = 100 + tm.assert_frame_equal(df, expected) + + # columns + df = df_orig.copy() + df.loc[:, (slice(None), ["foo"])] = 100 + expected = df_orig.copy() + expected.iloc[:, [1, 3]] = 100 + tm.assert_frame_equal(df, expected) + + # both + df = df_orig.copy() + df.loc[(slice(None), 1), (slice(None), ["foo"])] = 100 + expected = df_orig.copy() + expected.iloc[[0, 3], [1, 3]] = 100 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[idx[:, 1], idx[:, ["foo"]]] = 100 + expected = df_orig.copy() + expected.iloc[[0, 3], [1, 3]] = 100 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc["A", "a"] = 100 + expected = df_orig.copy() + expected.iloc[0:3, 0:2] = 100 + tm.assert_frame_equal(df, expected) + + # setting with a list-like + df = df_orig.copy() + df.loc[(slice(None), 1), (slice(None), ["foo"])] = np.array( + [[100, 100], [100, 100]], dtype="int64" + ) + expected = df_orig.copy() + expected.iloc[[0, 3], [1, 3]] = 100 + tm.assert_frame_equal(df, expected) + + # not enough values + df = df_orig.copy() + + msg = "setting an array element with a sequence." + with pytest.raises(ValueError, match=msg): + df.loc[(slice(None), 1), (slice(None), ["foo"])] = np.array( + [[100], [100, 100]], dtype="int64" + ) + + msg = "Must have equal len keys and value when setting with an iterable" + with pytest.raises(ValueError, match=msg): + df.loc[(slice(None), 1), (slice(None), ["foo"])] = np.array( + [100, 100, 100, 100], dtype="int64" + ) + + # with an alignable rhs + df = df_orig.copy() + df.loc[(slice(None), 1), (slice(None), ["foo"])] = ( + df.loc[(slice(None), 1), (slice(None), ["foo"])] * 5 + ) + expected = df_orig.copy() + expected.iloc[[0, 3], [1, 3]] = expected.iloc[[0, 3], [1, 3]] * 5 + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[(slice(None), 1), (slice(None), ["foo"])] *= df.loc[ + (slice(None), 1), (slice(None), ["foo"]) + ] + expected = df_orig.copy() + expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]] + tm.assert_frame_equal(df, expected) + + rhs = df_orig.loc[(slice(None), 1), (slice(None), ["foo"])].copy() + rhs.loc[:, ("c", "bah")] = 10 + df = df_orig.copy() + df.loc[(slice(None), 1), (slice(None), ["foo"])] *= rhs + expected = df_orig.copy() + expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]] + tm.assert_frame_equal(df, expected) + + def test_multiindex_label_slicing_with_negative_step(self): + ser = Series( + np.arange(20), MultiIndex.from_product([list("abcde"), np.arange(4)]) + ) + SLC = pd.IndexSlice + + tm.assert_indexing_slices_equivalent(ser, SLC[::-1], SLC[::-1]) + + tm.assert_indexing_slices_equivalent(ser, SLC["d"::-1], SLC[15::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",)::-1], SLC[15::-1]) + + tm.assert_indexing_slices_equivalent(ser, SLC[:"d":-1], SLC[:11:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:("d",):-1], SLC[:11:-1]) + + tm.assert_indexing_slices_equivalent(ser, SLC["d":"b":-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",):"b":-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["d":("b",):-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",):("b",):-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["b":"d":-1], SLC[:0]) + + tm.assert_indexing_slices_equivalent(ser, SLC[("c", 2)::-1], SLC[10::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:("c", 2):-1], SLC[:9:-1]) + tm.assert_indexing_slices_equivalent( + ser, SLC[("e", 0):("c", 2):-1], SLC[16:9:-1] + ) + + def test_multiindex_slice_first_level(self): + # GH 12697 + freq = ["a", "b", "c", "d"] + idx = MultiIndex.from_product([freq, range(500)]) + df = DataFrame(list(range(2000)), index=idx, columns=["Test"]) + df_slice = df.loc[pd.IndexSlice[:, 30:70], :] + result = df_slice.loc["a"] + expected = DataFrame(list(range(30, 71)), columns=["Test"], index=range(30, 71)) + tm.assert_frame_equal(result, expected) + result = df_slice.loc["d"] + expected = DataFrame( + list(range(1530, 1571)), columns=["Test"], index=range(30, 71) + ) + tm.assert_frame_equal(result, expected) + + def test_int_series_slicing(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + s = ymd["A"] + result = s[5:] + expected = s.reindex(s.index[5:]) + tm.assert_series_equal(result, expected) + + exp = ymd["A"].copy() + s[5:] = 0 + exp.iloc[5:] = 0 + tm.assert_numpy_array_equal(s.values, exp.values) + + result = ymd[5:] + expected = ymd.reindex(s.index[5:]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, loc, iloc", + [ + # dtype = int, step = -1 + ("int", slice(None, None, -1), slice(None, None, -1)), + ("int", slice(3, None, -1), slice(3, None, -1)), + ("int", slice(None, 1, -1), slice(None, 0, -1)), + ("int", slice(3, 1, -1), slice(3, 0, -1)), + # dtype = int, step = -2 + ("int", slice(None, None, -2), slice(None, None, -2)), + ("int", slice(3, None, -2), slice(3, None, -2)), + ("int", slice(None, 1, -2), slice(None, 0, -2)), + ("int", slice(3, 1, -2), slice(3, 0, -2)), + # dtype = str, step = -1 + ("str", slice(None, None, -1), slice(None, None, -1)), + ("str", slice("d", None, -1), slice(3, None, -1)), + ("str", slice(None, "b", -1), slice(None, 0, -1)), + ("str", slice("d", "b", -1), slice(3, 0, -1)), + # dtype = str, step = -2 + ("str", slice(None, None, -2), slice(None, None, -2)), + ("str", slice("d", None, -2), slice(3, None, -2)), + ("str", slice(None, "b", -2), slice(None, 0, -2)), + ("str", slice("d", "b", -2), slice(3, 0, -2)), + ], + ) + def test_loc_slice_negative_stepsize(self, dtype, loc, iloc): + # GH#38071 + labels = { + "str": list("abcde"), + "int": range(5), + }[dtype] + + mi = MultiIndex.from_arrays([labels] * 2) + df = DataFrame(1.0, index=mi, columns=["A"]) + + SLC = pd.IndexSlice + + expected = df.iloc[iloc, :] + result_get_loc = df.loc[SLC[loc], :] + result_get_locs_level_0 = df.loc[SLC[loc, :], :] + result_get_locs_level_1 = df.loc[SLC[:, loc], :] + + tm.assert_frame_equal(result_get_loc, expected) + tm.assert_frame_equal(result_get_locs_level_0, expected) + tm.assert_frame_equal(result_get_locs_level_1, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_sorted.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_sorted.py new file mode 100644 index 00000000..cf3fa529 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/multiindex/test_sorted.py @@ -0,0 +1,153 @@ +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + MultiIndex, + Series, + array, +) +import pandas._testing as tm + + +class TestMultiIndexSorted: + def test_getitem_multilevel_index_tuple_not_sorted(self): + index_columns = list("abc") + df = DataFrame( + [[0, 1, 0, "x"], [0, 0, 1, "y"]], columns=index_columns + ["data"] + ) + df = df.set_index(index_columns) + query_index = df.index[:1] + rs = df.loc[query_index, "data"] + + xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=["a", "b", "c"]) + xp = Series(["x"], index=xp_idx, name="data") + tm.assert_series_equal(rs, xp) + + def test_getitem_slice_not_sorted(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + df = frame.sort_index(level=1).T + + # buglet with int typechecking + result = df.iloc[:, : np.int32(3)] + expected = df.reindex(columns=df.columns[:3]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("key", [None, lambda x: x]) + def test_frame_getitem_not_sorted2(self, key): + # 13431 + df = DataFrame( + { + "col1": ["b", "d", "b", "a"], + "col2": [3, 1, 1, 2], + "data": ["one", "two", "three", "four"], + } + ) + + df2 = df.set_index(["col1", "col2"]) + df2_original = df2.copy() + + df2.index = df2.index.set_levels(["b", "d", "a"], level="col1") + df2.index = df2.index.set_codes([0, 1, 0, 2], level="col1") + assert not df2.index.is_monotonic_increasing + + assert df2_original.index.equals(df2.index) + expected = df2.sort_index(key=key) + assert expected.index.is_monotonic_increasing + + result = df2.sort_index(level=0, key=key) + assert result.index.is_monotonic_increasing + tm.assert_frame_equal(result, expected) + + def test_sort_values_key(self): + arrays = [ + ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = zip(*arrays) + index = MultiIndex.from_tuples(tuples) + index = index.sort_values( # sort by third letter + key=lambda x: x.map(lambda entry: entry[2]) + ) + result = DataFrame(range(8), index=index) + + arrays = [ + ["foo", "foo", "bar", "bar", "qux", "qux", "baz", "baz"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = zip(*arrays) + index = MultiIndex.from_tuples(tuples) + expected = DataFrame(range(8), index=index) + + tm.assert_frame_equal(result, expected) + + def test_argsort_with_na(self): + # GH48495 + arrays = [ + array([2, NA, 1], dtype="Int64"), + array([1, 2, 3], dtype="Int64"), + ] + index = MultiIndex.from_arrays(arrays) + result = index.argsort() + expected = np.array([2, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_sort_values_with_na(self): + # GH48495 + arrays = [ + array([2, NA, 1], dtype="Int64"), + array([1, 2, 3], dtype="Int64"), + ] + index = MultiIndex.from_arrays(arrays) + index = index.sort_values() + result = DataFrame(range(3), index=index) + + arrays = [ + array([1, 2, NA], dtype="Int64"), + array([3, 1, 2], dtype="Int64"), + ] + index = MultiIndex.from_arrays(arrays) + expected = DataFrame(range(3), index=index) + + tm.assert_frame_equal(result, expected) + + def test_frame_getitem_not_sorted(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + df = frame.T + df["foo", "four"] = "foo" + + arrays = [np.array(x) for x in zip(*df.columns.values)] + + result = df["foo"] + result2 = df.loc[:, "foo"] + expected = df.reindex(columns=df.columns[arrays[0] == "foo"]) + expected.columns = expected.columns.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + df = df.T + result = df.xs("foo") + result2 = df.loc["foo"] + expected = df.reindex(df.index[arrays[0] == "foo"]) + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected) + + def test_series_getitem_not_sorted(self): + arrays = [ + ["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = zip(*arrays) + index = MultiIndex.from_tuples(tuples) + s = Series(np.random.default_rng(2).standard_normal(8), index=index) + + arrays = [np.array(x) for x in zip(*index.values)] + + result = s["qux"] + result2 = s.loc["qux"] + expected = s[arrays[0] == "qux"] + expected.index = expected.index.droplevel(0) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_at.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_at.py new file mode 100644 index 00000000..7504c984 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_at.py @@ -0,0 +1,252 @@ +from datetime import ( + datetime, + timezone, +) + +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +from pandas import ( + CategoricalDtype, + CategoricalIndex, + DataFrame, + DatetimeIndex, + MultiIndex, + Series, + Timestamp, +) +import pandas._testing as tm + + +def test_at_timezone(): + # https://github.com/pandas-dev/pandas/issues/33544 + result = DataFrame({"foo": [datetime(2000, 1, 1)]}) + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + result.at[0, "foo"] = datetime(2000, 1, 2, tzinfo=timezone.utc) + expected = DataFrame( + {"foo": [datetime(2000, 1, 2, tzinfo=timezone.utc)]}, dtype=object + ) + tm.assert_frame_equal(result, expected) + + +def test_selection_methods_of_assigned_col(): + # GH 29282 + df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = DataFrame(data={"c": [7, 8, 9]}, index=[2, 1, 0]) + df["c"] = df2["c"] + df.at[1, "c"] = 11 + result = df + expected = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [9, 11, 7]}) + tm.assert_frame_equal(result, expected) + result = df.at[1, "c"] + assert result == 11 + + result = df["c"] + expected = Series([9, 11, 7], name="c") + tm.assert_series_equal(result, expected) + + result = df[["c"]] + expected = DataFrame({"c": [9, 11, 7]}) + tm.assert_frame_equal(result, expected) + + +class TestAtSetItem: + def test_at_setitem_item_cache_cleared(self): + # GH#22372 Note the multi-step construction is necessary to trigger + # the original bug. pandas/issues/22372#issuecomment-413345309 + df = DataFrame(index=[0]) + df["x"] = 1 + df["cost"] = 2 + + # accessing df["cost"] adds "cost" to the _item_cache + df["cost"] + + # This loc[[0]] lookup used to call _consolidate_inplace at the + # BlockManager level, which failed to clear the _item_cache + df.loc[[0]] + + df.at[0, "x"] = 4 + df.at[0, "cost"] = 789 + + expected = DataFrame({"x": [4], "cost": 789}, index=[0]) + tm.assert_frame_equal(df, expected) + + # And in particular, check that the _item_cache has updated correctly. + tm.assert_series_equal(df["cost"], expected["cost"]) + + def test_at_setitem_mixed_index_assignment(self): + # GH#19860 + ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2]) + ser.at["a"] = 11 + assert ser.iat[0] == 11 + ser.at[1] = 22 + assert ser.iat[3] == 22 + + def test_at_setitem_categorical_missing(self): + df = DataFrame( + index=range(3), columns=range(3), dtype=CategoricalDtype(["foo", "bar"]) + ) + df.at[1, 1] = "foo" + + expected = DataFrame( + [ + [np.nan, np.nan, np.nan], + [np.nan, "foo", np.nan], + [np.nan, np.nan, np.nan], + ], + dtype=CategoricalDtype(["foo", "bar"]), + ) + + tm.assert_frame_equal(df, expected) + + def test_at_setitem_multiindex(self): + df = DataFrame( + np.zeros((3, 2), dtype="int64"), + columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]), + ) + df.at[0, "a"] = 10 + expected = DataFrame( + [[10, 10], [0, 0], [0, 0]], + columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]), + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("row", (Timestamp("2019-01-01"), "2019-01-01")) + def test_at_datetime_index(self, row): + # Set float64 dtype to avoid upcast when setting .5 + df = DataFrame( + data=[[1] * 2], index=DatetimeIndex(data=["2019-01-01", "2019-01-02"]) + ).astype({0: "float64"}) + expected = DataFrame( + data=[[0.5, 1], [1.0, 1]], + index=DatetimeIndex(data=["2019-01-01", "2019-01-02"]), + ) + + df.at[row, 0] = 0.5 + tm.assert_frame_equal(df, expected) + + +class TestAtSetItemWithExpansion: + def test_at_setitem_expansion_series_dt64tz_value(self, tz_naive_fixture): + # GH#25506 + ts = Timestamp("2017-08-05 00:00:00+0100", tz=tz_naive_fixture) + result = Series(ts) + result.at[1] = ts + expected = Series([ts, ts]) + tm.assert_series_equal(result, expected) + + +class TestAtWithDuplicates: + def test_at_with_duplicate_axes_requires_scalar_lookup(self): + # GH#33041 check that falling back to loc doesn't allow non-scalar + # args to slip in + + arr = np.random.default_rng(2).standard_normal(6).reshape(3, 2) + df = DataFrame(arr, columns=["A", "A"]) + + msg = "Invalid call for scalar access" + with pytest.raises(ValueError, match=msg): + df.at[[1, 2]] + with pytest.raises(ValueError, match=msg): + df.at[1, ["A"]] + with pytest.raises(ValueError, match=msg): + df.at[:, "A"] + + with pytest.raises(ValueError, match=msg): + df.at[[1, 2]] = 1 + with pytest.raises(ValueError, match=msg): + df.at[1, ["A"]] = 1 + with pytest.raises(ValueError, match=msg): + df.at[:, "A"] = 1 + + +class TestAtErrors: + # TODO: De-duplicate/parametrize + # test_at_series_raises_key_error2, test_at_frame_raises_key_error2 + + def test_at_series_raises_key_error(self, indexer_al): + # GH#31724 .at should match .loc + + ser = Series([1, 2, 3], index=[3, 2, 1]) + result = indexer_al(ser)[1] + assert result == 3 + + with pytest.raises(KeyError, match="a"): + indexer_al(ser)["a"] + + def test_at_frame_raises_key_error(self, indexer_al): + # GH#31724 .at should match .loc + + df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1]) + + result = indexer_al(df)[1, 0] + assert result == 3 + + with pytest.raises(KeyError, match="a"): + indexer_al(df)["a", 0] + + with pytest.raises(KeyError, match="a"): + indexer_al(df)[1, "a"] + + def test_at_series_raises_key_error2(self, indexer_al): + # at should not fallback + # GH#7814 + # GH#31724 .at should match .loc + ser = Series([1, 2, 3], index=list("abc")) + result = indexer_al(ser)["a"] + assert result == 1 + + with pytest.raises(KeyError, match="^0$"): + indexer_al(ser)[0] + + def test_at_frame_raises_key_error2(self, indexer_al): + # GH#31724 .at should match .loc + df = DataFrame({"A": [1, 2, 3]}, index=list("abc")) + result = indexer_al(df)["a", "A"] + assert result == 1 + + with pytest.raises(KeyError, match="^0$"): + indexer_al(df)["a", 0] + + def test_at_frame_multiple_columns(self): + # GH#48296 - at shouldn't modify multiple columns + df = DataFrame({"a": [1, 2], "b": [3, 4]}) + new_row = [6, 7] + with pytest.raises( + InvalidIndexError, + match=f"You can only assign a scalar value not a \\{type(new_row)}", + ): + df.at[5] = new_row + + def test_at_getitem_mixed_index_no_fallback(self): + # GH#19860 + ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2]) + with pytest.raises(KeyError, match="^0$"): + ser.at[0] + with pytest.raises(KeyError, match="^4$"): + ser.at[4] + + def test_at_categorical_integers(self): + # CategoricalIndex with integer categories that don't happen to match + # the Categorical's codes + ci = CategoricalIndex([3, 4]) + + arr = np.arange(4).reshape(2, 2) + frame = DataFrame(arr, index=ci) + + for df in [frame, frame.T]: + for key in [0, 1]: + with pytest.raises(KeyError, match=str(key)): + df.at[key, key] + + def test_at_applied_for_rows(self): + # GH#48729 .at should raise InvalidIndexError when assigning rows + df = DataFrame(index=["a"], columns=["col1", "col2"]) + new_row = [123, 15] + with pytest.raises( + InvalidIndexError, + match=f"You can only assign a scalar value not a \\{type(new_row)}", + ): + df.at["a"] = new_row diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_categorical.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_categorical.py new file mode 100644 index 00000000..b45d197a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_categorical.py @@ -0,0 +1,563 @@ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + DataFrame, + Index, + Interval, + Series, + Timedelta, + Timestamp, +) +import pandas._testing as tm +from pandas.api.types import CategoricalDtype as CDT + + +@pytest.fixture +def df(): + return DataFrame( + { + "A": np.arange(6, dtype="int64"), + }, + index=CategoricalIndex(list("aabbca"), dtype=CDT(list("cab")), name="B"), + ) + + +@pytest.fixture +def df2(): + return DataFrame( + { + "A": np.arange(6, dtype="int64"), + }, + index=CategoricalIndex(list("aabbca"), dtype=CDT(list("cabe")), name="B"), + ) + + +class TestCategoricalIndex: + def test_loc_scalar(self, df): + dtype = CDT(list("cab")) + result = df.loc["a"] + bidx = Series(list("aaa"), name="B").astype(dtype) + assert bidx.dtype == dtype + + expected = DataFrame({"A": [0, 1, 5]}, index=Index(bidx)) + tm.assert_frame_equal(result, expected) + + df = df.copy() + df.loc["a"] = 20 + bidx2 = Series(list("aabbca"), name="B").astype(dtype) + assert bidx2.dtype == dtype + expected = DataFrame( + { + "A": [20, 20, 2, 3, 4, 20], + }, + index=Index(bidx2), + ) + tm.assert_frame_equal(df, expected) + + # value not in the categories + with pytest.raises(KeyError, match=r"^'d'$"): + df.loc["d"] + + df2 = df.copy() + expected = df2.copy() + expected.index = expected.index.astype(object) + expected.loc["d"] = 10 + df2.loc["d"] = 10 + tm.assert_frame_equal(df2, expected) + + def test_loc_setitem_with_expansion_non_category(self, df): + # Setting-with-expansion with a new key "d" that is not among caegories + df.loc["a"] = 20 + + # Setting a new row on an existing column + df3 = df.copy() + df3.loc["d", "A"] = 10 + bidx3 = Index(list("aabbcad"), name="B") + expected3 = DataFrame( + { + "A": [20, 20, 2, 3, 4, 20, 10.0], + }, + index=Index(bidx3), + ) + tm.assert_frame_equal(df3, expected3) + + # Settig a new row _and_ new column + df4 = df.copy() + df4.loc["d", "C"] = 10 + expected3 = DataFrame( + { + "A": [20, 20, 2, 3, 4, 20, np.nan], + "C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 10], + }, + index=Index(bidx3), + ) + tm.assert_frame_equal(df4, expected3) + + def test_loc_getitem_scalar_non_category(self, df): + with pytest.raises(KeyError, match="^1$"): + df.loc[1] + + def test_slicing(self): + cat = Series(Categorical([1, 2, 3, 4])) + reverse = cat[::-1] + exp = np.array([4, 3, 2, 1], dtype=np.int64) + tm.assert_numpy_array_equal(reverse.__array__(), exp) + + df = DataFrame({"value": (np.arange(100) + 1).astype("int64")}) + df["D"] = pd.cut(df.value, bins=[0, 25, 50, 75, 100]) + + expected = Series([11, Interval(0, 25)], index=["value", "D"], name=10) + result = df.iloc[10] + tm.assert_series_equal(result, expected) + + expected = DataFrame( + {"value": np.arange(11, 21).astype("int64")}, + index=np.arange(10, 20).astype("int64"), + ) + expected["D"] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100]) + result = df.iloc[10:20] + tm.assert_frame_equal(result, expected) + + expected = Series([9, Interval(0, 25)], index=["value", "D"], name=8) + result = df.loc[8] + tm.assert_series_equal(result, expected) + + def test_slicing_and_getting_ops(self): + # systematically test the slicing operations: + # for all slicing ops: + # - returning a dataframe + # - returning a column + # - returning a row + # - returning a single value + + cats = Categorical( + ["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"] + ) + idx = Index(["h", "i", "j", "k", "l", "m", "n"]) + values = [1, 2, 3, 4, 5, 6, 7] + df = DataFrame({"cats": cats, "values": values}, index=idx) + + # the expected values + cats2 = Categorical(["b", "c"], categories=["a", "b", "c"]) + idx2 = Index(["j", "k"]) + values2 = [3, 4] + + # 2:4,: | "j":"k",: + exp_df = DataFrame({"cats": cats2, "values": values2}, index=idx2) + + # :,"cats" | :,0 + exp_col = Series(cats, index=idx, name="cats") + + # "j",: | 2,: + exp_row = Series(["b", 3], index=["cats", "values"], dtype="object", name="j") + + # "j","cats | 2,0 + exp_val = "b" + + # iloc + # frame + res_df = df.iloc[2:4, :] + tm.assert_frame_equal(res_df, exp_df) + assert isinstance(res_df["cats"].dtype, CategoricalDtype) + + # row + res_row = df.iloc[2, :] + tm.assert_series_equal(res_row, exp_row) + assert isinstance(res_row["cats"], str) + + # col + res_col = df.iloc[:, 0] + tm.assert_series_equal(res_col, exp_col) + assert isinstance(res_col.dtype, CategoricalDtype) + + # single value + res_val = df.iloc[2, 0] + assert res_val == exp_val + + # loc + # frame + res_df = df.loc["j":"k", :] + tm.assert_frame_equal(res_df, exp_df) + assert isinstance(res_df["cats"].dtype, CategoricalDtype) + + # row + res_row = df.loc["j", :] + tm.assert_series_equal(res_row, exp_row) + assert isinstance(res_row["cats"], str) + + # col + res_col = df.loc[:, "cats"] + tm.assert_series_equal(res_col, exp_col) + assert isinstance(res_col.dtype, CategoricalDtype) + + # single value + res_val = df.loc["j", "cats"] + assert res_val == exp_val + + # single value + res_val = df.loc["j", df.columns[0]] + assert res_val == exp_val + + # iat + res_val = df.iat[2, 0] + assert res_val == exp_val + + # at + res_val = df.at["j", "cats"] + assert res_val == exp_val + + # fancy indexing + exp_fancy = df.iloc[[2]] + + res_fancy = df[df["cats"] == "b"] + tm.assert_frame_equal(res_fancy, exp_fancy) + res_fancy = df[df["values"] == 3] + tm.assert_frame_equal(res_fancy, exp_fancy) + + # get_value + res_val = df.at["j", "cats"] + assert res_val == exp_val + + # i : int, slice, or sequence of integers + res_row = df.iloc[2] + tm.assert_series_equal(res_row, exp_row) + assert isinstance(res_row["cats"], str) + + res_df = df.iloc[slice(2, 4)] + tm.assert_frame_equal(res_df, exp_df) + assert isinstance(res_df["cats"].dtype, CategoricalDtype) + + res_df = df.iloc[[2, 3]] + tm.assert_frame_equal(res_df, exp_df) + assert isinstance(res_df["cats"].dtype, CategoricalDtype) + + res_col = df.iloc[:, 0] + tm.assert_series_equal(res_col, exp_col) + assert isinstance(res_col.dtype, CategoricalDtype) + + res_df = df.iloc[:, slice(0, 2)] + tm.assert_frame_equal(res_df, df) + assert isinstance(res_df["cats"].dtype, CategoricalDtype) + + res_df = df.iloc[:, [0, 1]] + tm.assert_frame_equal(res_df, df) + assert isinstance(res_df["cats"].dtype, CategoricalDtype) + + def test_slicing_doc_examples(self): + # GH 7918 + cats = Categorical( + ["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"] + ) + idx = Index(["h", "i", "j", "k", "l", "m", "n"]) + values = [1, 2, 2, 2, 3, 4, 5] + df = DataFrame({"cats": cats, "values": values}, index=idx) + + result = df.iloc[2:4, :] + expected = DataFrame( + { + "cats": Categorical(["b", "b"], categories=["a", "b", "c"]), + "values": [2, 2], + }, + index=["j", "k"], + ) + tm.assert_frame_equal(result, expected) + + result = df.iloc[2:4, :].dtypes + expected = Series(["category", "int64"], ["cats", "values"]) + tm.assert_series_equal(result, expected) + + result = df.loc["h":"j", "cats"] + expected = Series( + Categorical(["a", "b", "b"], categories=["a", "b", "c"]), + index=["h", "i", "j"], + name="cats", + ) + tm.assert_series_equal(result, expected) + + result = df.loc["h":"j", df.columns[0:1]] + expected = DataFrame( + {"cats": Categorical(["a", "b", "b"], categories=["a", "b", "c"])}, + index=["h", "i", "j"], + ) + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_listlike_labels(self, df): + # list of labels + result = df.loc[["c", "a"]] + expected = df.iloc[[4, 0, 1, 5]] + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_loc_getitem_listlike_unused_category(self, df2): + # GH#37901 a label that is in index.categories but not in index + # listlike containing an element in the categories but not in the values + with pytest.raises(KeyError, match=re.escape("['e'] not in index")): + df2.loc[["a", "b", "e"]] + + def test_loc_getitem_label_unused_category(self, df2): + # element in the categories but not in the values + with pytest.raises(KeyError, match=r"^'e'$"): + df2.loc["e"] + + def test_loc_getitem_non_category(self, df2): + # not all labels in the categories + with pytest.raises(KeyError, match=re.escape("['d'] not in index")): + df2.loc[["a", "d"]] + + def test_loc_setitem_expansion_label_unused_category(self, df2): + # assigning with a label that is in the categories but not in the index + df = df2.copy() + df.loc["e"] = 20 + result = df.loc[["a", "b", "e"]] + exp_index = CategoricalIndex(list("aaabbe"), categories=list("cabe"), name="B") + expected = DataFrame({"A": [0, 1, 5, 2, 3, 20]}, index=exp_index) + tm.assert_frame_equal(result, expected) + + def test_loc_listlike_dtypes(self): + # GH 11586 + + # unique categories and codes + index = CategoricalIndex(["a", "b", "c"]) + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=index) + + # unique slice + res = df.loc[["a", "b"]] + exp_index = CategoricalIndex(["a", "b"], categories=index.categories) + exp = DataFrame({"A": [1, 2], "B": [4, 5]}, index=exp_index) + tm.assert_frame_equal(res, exp, check_index_type=True) + + # duplicated slice + res = df.loc[["a", "a", "b"]] + + exp_index = CategoricalIndex(["a", "a", "b"], categories=index.categories) + exp = DataFrame({"A": [1, 1, 2], "B": [4, 4, 5]}, index=exp_index) + tm.assert_frame_equal(res, exp, check_index_type=True) + + with pytest.raises(KeyError, match=re.escape("['x'] not in index")): + df.loc[["a", "x"]] + + def test_loc_listlike_dtypes_duplicated_categories_and_codes(self): + # duplicated categories and codes + index = CategoricalIndex(["a", "b", "a"]) + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=index) + + # unique slice + res = df.loc[["a", "b"]] + exp = DataFrame( + {"A": [1, 3, 2], "B": [4, 6, 5]}, index=CategoricalIndex(["a", "a", "b"]) + ) + tm.assert_frame_equal(res, exp, check_index_type=True) + + # duplicated slice + res = df.loc[["a", "a", "b"]] + exp = DataFrame( + {"A": [1, 3, 1, 3, 2], "B": [4, 6, 4, 6, 5]}, + index=CategoricalIndex(["a", "a", "a", "a", "b"]), + ) + tm.assert_frame_equal(res, exp, check_index_type=True) + + with pytest.raises(KeyError, match=re.escape("['x'] not in index")): + df.loc[["a", "x"]] + + def test_loc_listlike_dtypes_unused_category(self): + # contains unused category + index = CategoricalIndex(["a", "b", "a", "c"], categories=list("abcde")) + df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}, index=index) + + res = df.loc[["a", "b"]] + exp = DataFrame( + {"A": [1, 3, 2], "B": [5, 7, 6]}, + index=CategoricalIndex(["a", "a", "b"], categories=list("abcde")), + ) + tm.assert_frame_equal(res, exp, check_index_type=True) + + # duplicated slice + res = df.loc[["a", "a", "b"]] + exp = DataFrame( + {"A": [1, 3, 1, 3, 2], "B": [5, 7, 5, 7, 6]}, + index=CategoricalIndex(["a", "a", "a", "a", "b"], categories=list("abcde")), + ) + tm.assert_frame_equal(res, exp, check_index_type=True) + + with pytest.raises(KeyError, match=re.escape("['x'] not in index")): + df.loc[["a", "x"]] + + def test_loc_getitem_listlike_unused_category_raises_keyerror(self): + # key that is an *unused* category raises + index = CategoricalIndex(["a", "b", "a", "c"], categories=list("abcde")) + df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}, index=index) + + with pytest.raises(KeyError, match="e"): + # For comparison, check the scalar behavior + df.loc["e"] + + with pytest.raises(KeyError, match=re.escape("['e'] not in index")): + df.loc[["a", "e"]] + + def test_ix_categorical_index(self): + # GH 12531 + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=list("ABC"), + columns=list("XYZ"), + ) + cdf = df.copy() + cdf.index = CategoricalIndex(df.index) + cdf.columns = CategoricalIndex(df.columns) + + expect = Series(df.loc["A", :], index=cdf.columns, name="A") + tm.assert_series_equal(cdf.loc["A", :], expect) + + expect = Series(df.loc[:, "X"], index=cdf.index, name="X") + tm.assert_series_equal(cdf.loc[:, "X"], expect) + + exp_index = CategoricalIndex(list("AB"), categories=["A", "B", "C"]) + expect = DataFrame(df.loc[["A", "B"], :], columns=cdf.columns, index=exp_index) + tm.assert_frame_equal(cdf.loc[["A", "B"], :], expect) + + exp_columns = CategoricalIndex(list("XY"), categories=["X", "Y", "Z"]) + expect = DataFrame(df.loc[:, ["X", "Y"]], index=cdf.index, columns=exp_columns) + tm.assert_frame_equal(cdf.loc[:, ["X", "Y"]], expect) + + def test_ix_categorical_index_non_unique(self): + # non-unique + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=list("ABA"), + columns=list("XYX"), + ) + cdf = df.copy() + cdf.index = CategoricalIndex(df.index) + cdf.columns = CategoricalIndex(df.columns) + + exp_index = CategoricalIndex(list("AA"), categories=["A", "B"]) + expect = DataFrame(df.loc["A", :], columns=cdf.columns, index=exp_index) + tm.assert_frame_equal(cdf.loc["A", :], expect) + + exp_columns = CategoricalIndex(list("XX"), categories=["X", "Y"]) + expect = DataFrame(df.loc[:, "X"], index=cdf.index, columns=exp_columns) + tm.assert_frame_equal(cdf.loc[:, "X"], expect) + + expect = DataFrame( + df.loc[["A", "B"], :], + columns=cdf.columns, + index=CategoricalIndex(list("AAB")), + ) + tm.assert_frame_equal(cdf.loc[["A", "B"], :], expect) + + expect = DataFrame( + df.loc[:, ["X", "Y"]], + index=cdf.index, + columns=CategoricalIndex(list("XXY")), + ) + tm.assert_frame_equal(cdf.loc[:, ["X", "Y"]], expect) + + def test_loc_slice(self, df): + # GH9748 + msg = ( + "cannot do slice indexing on CategoricalIndex with these " + r"indexers \[1\] of type int" + ) + with pytest.raises(TypeError, match=msg): + df.loc[1:5] + + result = df.loc["b":"c"] + expected = df.iloc[[2, 3, 4]] + tm.assert_frame_equal(result, expected) + + def test_loc_and_at_with_categorical_index(self): + # GH 20629 + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], index=CategoricalIndex(["A", "B", "C"]) + ) + + s = df[0] + assert s.loc["A"] == 1 + assert s.at["A"] == 1 + + assert df.loc["B", 1] == 4 + assert df.at["B", 1] == 4 + + @pytest.mark.parametrize( + "idx_values", + [ + # python types + [1, 2, 3], + [-1, -2, -3], + [1.5, 2.5, 3.5], + [-1.5, -2.5, -3.5], + # numpy int/uint + *(np.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_NUMPY_DTYPES), + # numpy floats + *(np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in tm.FLOAT_NUMPY_DTYPES), + # numpy object + np.array([1, "b", 3.5], dtype=object), + # pandas scalars + [Interval(1, 4), Interval(4, 6), Interval(6, 9)], + [Timestamp(2019, 1, 1), Timestamp(2019, 2, 1), Timestamp(2019, 3, 1)], + [Timedelta(1, "d"), Timedelta(2, "d"), Timedelta(3, "D")], + # pandas Integer arrays + *(pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES), + # other pandas arrays + pd.IntervalIndex.from_breaks([1, 4, 6, 9]).array, + pd.date_range("2019-01-01", periods=3).array, + pd.timedelta_range(start="1d", periods=3).array, + ], + ) + def test_loc_getitem_with_non_string_categories(self, idx_values, ordered): + # GH-17569 + cat_idx = CategoricalIndex(idx_values, ordered=ordered) + df = DataFrame({"A": ["foo", "bar", "baz"]}, index=cat_idx) + sl = slice(idx_values[0], idx_values[1]) + + # scalar selection + result = df.loc[idx_values[0]] + expected = Series(["foo"], index=["A"], name=idx_values[0]) + tm.assert_series_equal(result, expected) + + # list selection + result = df.loc[idx_values[:2]] + expected = DataFrame(["foo", "bar"], index=cat_idx[:2], columns=["A"]) + tm.assert_frame_equal(result, expected) + + # slice selection + result = df.loc[sl] + expected = DataFrame(["foo", "bar"], index=cat_idx[:2], columns=["A"]) + tm.assert_frame_equal(result, expected) + + # scalar assignment + result = df.copy() + result.loc[idx_values[0]] = "qux" + expected = DataFrame({"A": ["qux", "bar", "baz"]}, index=cat_idx) + tm.assert_frame_equal(result, expected) + + # list assignment + result = df.copy() + result.loc[idx_values[:2], "A"] = ["qux", "qux2"] + expected = DataFrame({"A": ["qux", "qux2", "baz"]}, index=cat_idx) + tm.assert_frame_equal(result, expected) + + # slice assignment + result = df.copy() + result.loc[sl, "A"] = ["qux", "qux2"] + expected = DataFrame({"A": ["qux", "qux2", "baz"]}, index=cat_idx) + tm.assert_frame_equal(result, expected) + + def test_getitem_categorical_with_nan(self): + # GH#41933 + ci = CategoricalIndex(["A", "B", np.nan]) + + ser = Series(range(3), index=ci) + + assert ser[np.nan] == 2 + assert ser.loc[np.nan] == 2 + + df = DataFrame(ser) + assert df.loc[np.nan, 0] == 2 + assert df.loc[np.nan][0] == 2 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_chaining_and_caching.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_chaining_and_caching.py new file mode 100644 index 00000000..f36fdf0d --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_chaining_and_caching.py @@ -0,0 +1,631 @@ +from string import ascii_letters as letters + +import numpy as np +import pytest + +from pandas.errors import ( + SettingWithCopyError, + SettingWithCopyWarning, +) +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + Timestamp, + date_range, + option_context, +) +import pandas._testing as tm + +msg = "A value is trying to be set on a copy of a slice from a DataFrame" + + +def random_text(nobs=100): + # Construct a DataFrame where each row is a random slice from 'letters' + idxs = np.random.default_rng(2).integers(len(letters), size=(nobs, 2)) + idxs.sort(axis=1) + strings = [letters[x[0] : x[1]] for x in idxs] + + return DataFrame(strings, columns=["letters"]) + + +class TestCaching: + def test_slice_consolidate_invalidate_item_cache(self, using_copy_on_write): + # this is chained assignment, but will 'work' + with option_context("chained_assignment", None): + # #3970 + df = DataFrame({"aa": np.arange(5), "bb": [2.2] * 5}) + + # Creates a second float block + df["cc"] = 0.0 + + # caches a reference to the 'bb' series + df["bb"] + + # repr machinery triggers consolidation + repr(df) + + # Assignment to wrong series + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["bb"].iloc[0] = 0.17 + else: + df["bb"].iloc[0] = 0.17 + df._clear_item_cache() + if not using_copy_on_write: + tm.assert_almost_equal(df["bb"][0], 0.17) + else: + # with ArrayManager, parent is not mutated with chained assignment + tm.assert_almost_equal(df["bb"][0], 2.2) + + @pytest.mark.parametrize("do_ref", [True, False]) + def test_setitem_cache_updating(self, do_ref): + # GH 5424 + cont = ["one", "two", "three", "four", "five", "six", "seven"] + + df = DataFrame({"a": cont, "b": cont[3:] + cont[:3], "c": np.arange(7)}) + + # ref the cache + if do_ref: + df.loc[0, "c"] + + # set it + df.loc[7, "c"] = 1 + + assert df.loc[0, "c"] == 0.0 + assert df.loc[7, "c"] == 1.0 + + def test_setitem_cache_updating_slices(self, using_copy_on_write): + # GH 7084 + # not updating cache on series setting with slices + expected = DataFrame( + {"A": [600, 600, 600]}, index=date_range("5/7/2014", "5/9/2014") + ) + out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014")) + df = DataFrame({"C": ["A", "A", "A"], "D": [100, 200, 300]}) + + # loop through df to update out + six = Timestamp("5/7/2014") + eix = Timestamp("5/9/2014") + for ix, row in df.iterrows(): + out.loc[six:eix, row["C"]] = out.loc[six:eix, row["C"]] + row["D"] + + tm.assert_frame_equal(out, expected) + tm.assert_series_equal(out["A"], expected["A"]) + + # try via a chain indexing + # this actually works + out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014")) + out_original = out.copy() + for ix, row in df.iterrows(): + v = out[row["C"]][six:eix] + row["D"] + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + out[row["C"]][six:eix] = v + else: + out[row["C"]][six:eix] = v + + if not using_copy_on_write: + tm.assert_frame_equal(out, expected) + tm.assert_series_equal(out["A"], expected["A"]) + else: + tm.assert_frame_equal(out, out_original) + tm.assert_series_equal(out["A"], out_original["A"]) + + out = DataFrame({"A": [0, 0, 0]}, index=date_range("5/7/2014", "5/9/2014")) + for ix, row in df.iterrows(): + out.loc[six:eix, row["C"]] += row["D"] + + tm.assert_frame_equal(out, expected) + tm.assert_series_equal(out["A"], expected["A"]) + + def test_altering_series_clears_parent_cache(self, using_copy_on_write): + # GH #33675 + df = DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) + ser = df["A"] + + if using_copy_on_write: + assert "A" not in df._item_cache + else: + assert "A" in df._item_cache + + # Adding a new entry to ser swaps in a new array, so "A" needs to + # be removed from df._item_cache + ser["c"] = 5 + assert len(ser) == 3 + assert "A" not in df._item_cache + assert df["A"] is not ser + assert len(df["A"]) == 2 + + +class TestChaining: + def test_setitem_chained_setfault(self, using_copy_on_write): + # GH6026 + data = ["right", "left", "left", "left", "right", "left", "timeout"] + mdata = ["right", "left", "left", "left", "right", "left", "none"] + + df = DataFrame({"response": np.array(data)}) + mask = df.response == "timeout" + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.response[mask] = "none" + tm.assert_frame_equal(df, DataFrame({"response": data})) + else: + df.response[mask] = "none" + tm.assert_frame_equal(df, DataFrame({"response": mdata})) + + recarray = np.rec.fromarrays([data], names=["response"]) + df = DataFrame(recarray) + mask = df.response == "timeout" + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.response[mask] = "none" + tm.assert_frame_equal(df, DataFrame({"response": data})) + else: + df.response[mask] = "none" + tm.assert_frame_equal(df, DataFrame({"response": mdata})) + + df = DataFrame({"response": data, "response1": data}) + df_original = df.copy() + mask = df.response == "timeout" + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.response[mask] = "none" + tm.assert_frame_equal(df, df_original) + else: + df.response[mask] = "none" + tm.assert_frame_equal(df, DataFrame({"response": mdata, "response1": data})) + + # GH 6056 + expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]}) + df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"].iloc[0] = np.nan + expected = DataFrame({"A": ["foo", "bar", "bah", "foo", "bar"]}) + else: + df["A"].iloc[0] = np.nan + expected = DataFrame({"A": [np.nan, "bar", "bah", "foo", "bar"]}) + result = df.head() + tm.assert_frame_equal(result, expected) + + df = DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.A.iloc[0] = np.nan + else: + df.A.iloc[0] = np.nan + result = df.head() + tm.assert_frame_equal(result, expected) + + @pytest.mark.arm_slow + def test_detect_chained_assignment(self, using_copy_on_write): + with option_context("chained_assignment", "raise"): + # work with the chain + expected = DataFrame([[-5, 1], [-6, 3]], columns=list("AB")) + df = DataFrame( + np.arange(4).reshape(2, 2), columns=list("AB"), dtype="int64" + ) + df_original = df.copy() + assert df._is_copy is None + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"][0] = -5 + with tm.raises_chained_assignment_error(): + df["A"][1] = -6 + tm.assert_frame_equal(df, df_original) + else: + df["A"][0] = -5 + df["A"][1] = -6 + tm.assert_frame_equal(df, expected) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_raises( + self, using_array_manager, using_copy_on_write + ): + # test with the chaining + df = DataFrame( + { + "A": Series(range(2), dtype="int64"), + "B": np.array(np.arange(2, 4), dtype=np.float64), + } + ) + df_original = df.copy() + assert df._is_copy is None + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"][0] = -5 + with tm.raises_chained_assignment_error(): + df["A"][1] = -6 + tm.assert_frame_equal(df, df_original) + elif not using_array_manager: + with pytest.raises(SettingWithCopyError, match=msg): + df["A"][0] = -5 + + with pytest.raises(SettingWithCopyError, match=msg): + df["A"][1] = np.nan + + assert df["A"]._is_copy is None + else: + # INFO(ArrayManager) for ArrayManager it doesn't matter that it's + # a mixed dataframe + df["A"][0] = -5 + df["A"][1] = -6 + expected = DataFrame([[-5, 2], [-6, 3]], columns=list("AB")) + expected["B"] = expected["B"].astype("float64") + tm.assert_frame_equal(df, expected) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_fails(self, using_copy_on_write): + # Using a copy (the chain), fails + df = DataFrame( + { + "A": Series(range(2), dtype="int64"), + "B": np.array(np.arange(2, 4), dtype=np.float64), + } + ) + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.loc[0]["A"] = -5 + else: + with pytest.raises(SettingWithCopyError, match=msg): + df.loc[0]["A"] = -5 + + @pytest.mark.arm_slow + def test_detect_chained_assignment_doc_example(self, using_copy_on_write): + # Doc example + df = DataFrame( + { + "a": ["one", "one", "two", "three", "two", "one", "six"], + "c": Series(range(7), dtype="int64"), + } + ) + assert df._is_copy is None + + if using_copy_on_write: + indexer = df.a.str.startswith("o") + with tm.raises_chained_assignment_error(): + df[indexer]["c"] = 42 + else: + with pytest.raises(SettingWithCopyError, match=msg): + indexer = df.a.str.startswith("o") + df[indexer]["c"] = 42 + + @pytest.mark.arm_slow + def test_detect_chained_assignment_object_dtype( + self, using_array_manager, using_copy_on_write + ): + expected = DataFrame({"A": [111, "bbb", "ccc"], "B": [1, 2, 3]}) + df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]}) + df_original = df.copy() + + if not using_copy_on_write: + with pytest.raises(SettingWithCopyError, match=msg): + df.loc[0]["A"] = 111 + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["A"][0] = 111 + tm.assert_frame_equal(df, df_original) + elif not using_array_manager: + with pytest.raises(SettingWithCopyError, match=msg): + df["A"][0] = 111 + + df.loc[0, "A"] = 111 + tm.assert_frame_equal(df, expected) + else: + # INFO(ArrayManager) for ArrayManager it doesn't matter that it's + # a mixed dataframe + df["A"][0] = 111 + tm.assert_frame_equal(df, expected) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_is_copy_pickle(self): + # gh-5475: Make sure that is_copy is picked up reconstruction + df = DataFrame({"A": [1, 2]}) + assert df._is_copy is None + + with tm.ensure_clean("__tmp__pickle") as path: + df.to_pickle(path) + df2 = pd.read_pickle(path) + df2["B"] = df2["A"] + df2["B"] = df2["A"] + + @pytest.mark.arm_slow + def test_detect_chained_assignment_setting_entire_column(self): + # gh-5597: a spurious raise as we are setting the entire column here + + df = random_text(100000) + + # Always a copy + x = df.iloc[[0, 1, 2]] + assert x._is_copy is not None + + x = df.iloc[[0, 1, 2, 4]] + assert x._is_copy is not None + + # Explicitly copy + indexer = df.letters.apply(lambda x: len(x) > 10) + df = df.loc[indexer].copy() + + assert df._is_copy is None + df["letters"] = df["letters"].apply(str.lower) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_implicit_take(self): + # Implicitly take + df = random_text(100000) + indexer = df.letters.apply(lambda x: len(x) > 10) + df = df.loc[indexer] + + assert df._is_copy is not None + df["letters"] = df["letters"].apply(str.lower) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_implicit_take2(self, using_copy_on_write): + if using_copy_on_write: + pytest.skip("_is_copy is not always set for CoW") + # Implicitly take 2 + df = random_text(100000) + indexer = df.letters.apply(lambda x: len(x) > 10) + + df = df.loc[indexer] + assert df._is_copy is not None + df.loc[:, "letters"] = df["letters"].apply(str.lower) + + # with the enforcement of #45333 in 2.0, the .loc[:, letters] setting + # is inplace, so df._is_copy remains non-None. + assert df._is_copy is not None + + df["letters"] = df["letters"].apply(str.lower) + assert df._is_copy is None + + @pytest.mark.arm_slow + def test_detect_chained_assignment_str(self): + df = random_text(100000) + indexer = df.letters.apply(lambda x: len(x) > 10) + df.loc[indexer, "letters"] = df.loc[indexer, "letters"].apply(str.lower) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_is_copy(self): + # an identical take, so no copy + df = DataFrame({"a": [1]}).dropna() + assert df._is_copy is None + df["a"] += 1 + + @pytest.mark.arm_slow + def test_detect_chained_assignment_sorting(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + ser = df.iloc[:, 0].sort_values() + + tm.assert_series_equal(ser, df.iloc[:, 0].sort_values()) + tm.assert_series_equal(ser, df[0].sort_values()) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_false_positives(self): + # see gh-6025: false positives + df = DataFrame({"column1": ["a", "a", "a"], "column2": [4, 8, 9]}) + str(df) + + df["column1"] = df["column1"] + "b" + str(df) + + df = df[df["column2"] != 8] + str(df) + + df["column1"] = df["column1"] + "c" + str(df) + + @pytest.mark.arm_slow + def test_detect_chained_assignment_undefined_column(self, using_copy_on_write): + # from SO: + # https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc + df = DataFrame(np.arange(0, 9), columns=["count"]) + df["group"] = "b" + df_original = df.copy() + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.iloc[0:5]["group"] = "a" + tm.assert_frame_equal(df, df_original) + else: + with pytest.raises(SettingWithCopyError, match=msg): + df.iloc[0:5]["group"] = "a" + + @pytest.mark.arm_slow + def test_detect_chained_assignment_changing_dtype( + self, using_array_manager, using_copy_on_write + ): + # Mixed type setting but same dtype & changing dtype + df = DataFrame( + { + "A": date_range("20130101", periods=5), + "B": np.random.default_rng(2).standard_normal(5), + "C": np.arange(5, dtype="int64"), + "D": ["a", "b", "c", "d", "e"], + } + ) + df_original = df.copy() + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.loc[2]["D"] = "foo" + with tm.raises_chained_assignment_error(): + df.loc[2]["C"] = "foo" + with tm.raises_chained_assignment_error(extra_warnings=(FutureWarning,)): + df["C"][2] = "foo" + tm.assert_frame_equal(df, df_original) + + if not using_copy_on_write: + with pytest.raises(SettingWithCopyError, match=msg): + df.loc[2]["D"] = "foo" + + with pytest.raises(SettingWithCopyError, match=msg): + df.loc[2]["C"] = "foo" + + if not using_array_manager: + with pytest.raises(SettingWithCopyError, match=msg): + df["C"][2] = "foo" + else: + # INFO(ArrayManager) for ArrayManager it doesn't matter if it's + # changing the dtype or not + df["C"][2] = "foo" + assert df.loc[2, "C"] == "foo" + + def test_setting_with_copy_bug(self, using_copy_on_write): + # operating on a copy + df = DataFrame( + {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]} + ) + df_original = df.copy() + mask = pd.isna(df.c) + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df[["c"]][mask] = df[["b"]][mask] + tm.assert_frame_equal(df, df_original) + else: + with pytest.raises(SettingWithCopyError, match=msg): + df[["c"]][mask] = df[["b"]][mask] + + def test_setting_with_copy_bug_no_warning(self): + # invalid warning as we are returning a new object + # GH 8730 + df1 = DataFrame({"x": Series(["a", "b", "c"]), "y": Series(["d", "e", "f"])}) + df2 = df1[["x"]] + + # this should not raise + df2["y"] = ["g", "h", "i"] + + def test_detect_chained_assignment_warnings_errors(self, using_copy_on_write): + df = DataFrame({"A": ["aaa", "bbb", "ccc"], "B": [1, 2, 3]}) + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df.loc[0]["A"] = 111 + return + + with option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + df.loc[0]["A"] = 111 + + with option_context("chained_assignment", "raise"): + with pytest.raises(SettingWithCopyError, match=msg): + df.loc[0]["A"] = 111 + + @pytest.mark.parametrize("rhs", [3, DataFrame({0: [1, 2, 3, 4]})]) + def test_detect_chained_assignment_warning_stacklevel( + self, rhs, using_copy_on_write + ): + # GH#42570 + df = DataFrame(np.arange(25).reshape(5, 5)) + df_original = df.copy() + chained = df.loc[:3] + with option_context("chained_assignment", "warn"): + if not using_copy_on_write: + with tm.assert_produces_warning(SettingWithCopyWarning) as t: + chained[2] = rhs + assert t[0].filename == __file__ + else: + # INFO(CoW) no warning, and original dataframe not changed + with tm.assert_produces_warning(None): + chained[2] = rhs + tm.assert_frame_equal(df, df_original) + + # TODO(ArrayManager) fast_xs with array-like scalars is not yet working + @td.skip_array_manager_not_yet_implemented + def test_chained_getitem_with_lists(self): + # GH6394 + # Regression in chained getitem indexing with embedded list-like from + # 0.12 + + df = DataFrame({"A": 5 * [np.zeros(3)], "B": 5 * [np.ones(3)]}) + expected = df["A"].iloc[2] + result = df.loc[2, "A"] + tm.assert_numpy_array_equal(result, expected) + result2 = df.iloc[2]["A"] + tm.assert_numpy_array_equal(result2, expected) + result3 = df["A"].loc[2] + tm.assert_numpy_array_equal(result3, expected) + result4 = df["A"].iloc[2] + tm.assert_numpy_array_equal(result4, expected) + + def test_cache_updating(self): + # GH 4939, make sure to update the cache on setitem + + df = tm.makeDataFrame() + df["A"] # cache series + df.loc["Hello Friend"] = df.iloc[0] + assert "Hello Friend" in df["A"].index + assert "Hello Friend" in df["B"].index + + def test_cache_updating2(self, using_copy_on_write): + # 10264 + df = DataFrame( + np.zeros((5, 5), dtype="int64"), + columns=["a", "b", "c", "d", "e"], + index=range(5), + ) + df["f"] = 0 + df_orig = df.copy() + if using_copy_on_write: + with pytest.raises(ValueError, match="read-only"): + df.f.values[3] = 1 + tm.assert_frame_equal(df, df_orig) + return + + df.f.values[3] = 1 + + df.f.values[3] = 2 + expected = DataFrame( + np.zeros((5, 6), dtype="int64"), + columns=["a", "b", "c", "d", "e", "f"], + index=range(5), + ) + expected.at[3, "f"] = 2 + tm.assert_frame_equal(df, expected) + expected = Series([0, 0, 0, 2, 0], name="f") + tm.assert_series_equal(df.f, expected) + + def test_iloc_setitem_chained_assignment(self, using_copy_on_write): + # GH#3970 + with option_context("chained_assignment", None): + df = DataFrame({"aa": range(5), "bb": [2.2] * 5}) + df["cc"] = 0.0 + + ck = [True] * len(df) + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["bb"].iloc[0] = 0.13 + else: + df["bb"].iloc[0] = 0.13 + + # GH#3970 this lookup used to break the chained setting to 0.15 + df.iloc[ck] + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["bb"].iloc[0] = 0.15 + else: + df["bb"].iloc[0] = 0.15 + + if not using_copy_on_write: + assert df["bb"].iloc[0] == 0.15 + else: + assert df["bb"].iloc[0] == 2.2 + + def test_getitem_loc_assignment_slice_state(self, using_copy_on_write): + # GH 13569 + df = DataFrame({"a": [10, 20, 30]}) + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].loc[4] = 40 + else: + df["a"].loc[4] = 40 + tm.assert_frame_equal(df, DataFrame({"a": [10, 20, 30]})) + tm.assert_series_equal(df["a"], Series([10, 20, 30], name="a")) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_check_indexer.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_check_indexer.py new file mode 100644 index 00000000..975a31b8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_check_indexer.py @@ -0,0 +1,105 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.indexers import check_array_indexer + + +@pytest.mark.parametrize( + "indexer, expected", + [ + # integer + ([1, 2], np.array([1, 2], dtype=np.intp)), + (np.array([1, 2], dtype="int64"), np.array([1, 2], dtype=np.intp)), + (pd.array([1, 2], dtype="Int32"), np.array([1, 2], dtype=np.intp)), + (pd.Index([1, 2]), np.array([1, 2], dtype=np.intp)), + # boolean + ([True, False, True], np.array([True, False, True], dtype=np.bool_)), + (np.array([True, False, True]), np.array([True, False, True], dtype=np.bool_)), + ( + pd.array([True, False, True], dtype="boolean"), + np.array([True, False, True], dtype=np.bool_), + ), + # other + ([], np.array([], dtype=np.intp)), + ], +) +def test_valid_input(indexer, expected): + arr = np.array([1, 2, 3]) + result = check_array_indexer(arr, indexer) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")] +) +def test_boolean_na_returns_indexer(indexer): + # https://github.com/pandas-dev/pandas/issues/31503 + arr = np.array([1, 2, 3]) + + result = check_array_indexer(arr, indexer) + expected = np.array([True, False, False], dtype=bool) + + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "indexer", + [ + [True, False], + pd.array([True, False], dtype="boolean"), + np.array([True, False], dtype=np.bool_), + ], +) +def test_bool_raise_length(indexer): + arr = np.array([1, 2, 3]) + + msg = "Boolean index has wrong length" + with pytest.raises(IndexError, match=msg): + check_array_indexer(arr, indexer) + + +@pytest.mark.parametrize( + "indexer", [[0, 1, None], pd.array([0, 1, pd.NA], dtype="Int64")] +) +def test_int_raise_missing_values(indexer): + arr = np.array([1, 2, 3]) + + msg = "Cannot index with an integer indexer containing NA values" + with pytest.raises(ValueError, match=msg): + check_array_indexer(arr, indexer) + + +@pytest.mark.parametrize( + "indexer", + [ + [0.0, 1.0], + np.array([1.0, 2.0], dtype="float64"), + np.array([True, False], dtype=object), + pd.Index([True, False], dtype=object), + ], +) +def test_raise_invalid_array_dtypes(indexer): + arr = np.array([1, 2, 3]) + + msg = "arrays used as indices must be of integer or boolean type" + with pytest.raises(IndexError, match=msg): + check_array_indexer(arr, indexer) + + +def test_raise_nullable_string_dtype(nullable_string_dtype): + indexer = pd.array(["a", "b"], dtype=nullable_string_dtype) + arr = np.array([1, 2, 3]) + + msg = "arrays used as indices must be of integer or boolean type" + with pytest.raises(IndexError, match=msg): + check_array_indexer(arr, indexer) + + +@pytest.mark.parametrize("indexer", [None, Ellipsis, slice(0, 3), (None,)]) +def test_pass_through_non_array_likes(indexer): + arr = np.array([1, 2, 3]) + + result = check_array_indexer(arr, indexer) + assert result == indexer diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_coercion.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_coercion.py new file mode 100644 index 00000000..2c397290 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_coercion.py @@ -0,0 +1,906 @@ +from __future__ import annotations + +from datetime import ( + datetime, + timedelta, +) +import itertools + +import numpy as np +import pytest + +from pandas.compat import ( + IS64, + is_platform_windows, +) + +import pandas as pd +import pandas._testing as tm + +############################################################### +# Index / Series common tests which may trigger dtype coercions +############################################################### + + +@pytest.fixture(autouse=True, scope="class") +def check_comprehensiveness(request): + # Iterate over combination of dtype, method and klass + # and ensure that each are contained within a collected test + cls = request.cls + combos = itertools.product(cls.klasses, cls.dtypes, [cls.method]) + + def has_test(combo): + klass, dtype, method = combo + cls_funcs = request.node.session.items + return any( + klass in x.name and dtype in x.name and method in x.name for x in cls_funcs + ) + + opts = request.config.option + if opts.lf or opts.keyword: + # If we are running with "last-failed" or -k foo, we expect to only + # run a subset of tests. + yield + + else: + for combo in combos: + if not has_test(combo): + raise AssertionError( + f"test method is not defined: {cls.__name__}, {combo}" + ) + + yield + + +class CoercionBase: + klasses = ["index", "series"] + dtypes = [ + "object", + "int64", + "float64", + "complex128", + "bool", + "datetime64", + "datetime64tz", + "timedelta64", + "period", + ] + + @property + def method(self): + raise NotImplementedError(self) + + +class TestSetitemCoercion(CoercionBase): + method = "setitem" + + # disable comprehensiveness tests, as most of these have been moved to + # tests.series.indexing.test_setitem in SetitemCastingEquivalents subclasses. + klasses: list[str] = [] + + def test_setitem_series_no_coercion_from_values_list(self): + # GH35865 - int casted to str when internally calling np.array(ser.values) + ser = pd.Series(["a", 1]) + ser[:] = list(ser.values) + + expected = pd.Series(["a", 1]) + + tm.assert_series_equal(ser, expected) + + def _assert_setitem_index_conversion( + self, original_series, loc_key, expected_index, expected_dtype + ): + """test index's coercion triggered by assign key""" + temp = original_series.copy() + # GH#33469 pre-2.0 with int loc_key and temp.index.dtype == np.float64 + # `temp[loc_key] = 5` treated loc_key as positional + temp[loc_key] = 5 + exp = pd.Series([1, 2, 3, 4, 5], index=expected_index) + tm.assert_series_equal(temp, exp) + # check dtype explicitly for sure + assert temp.index.dtype == expected_dtype + + temp = original_series.copy() + temp.loc[loc_key] = 5 + exp = pd.Series([1, 2, 3, 4, 5], index=expected_index) + tm.assert_series_equal(temp, exp) + # check dtype explicitly for sure + assert temp.index.dtype == expected_dtype + + @pytest.mark.parametrize( + "val,exp_dtype", [("x", object), (5, IndexError), (1.1, object)] + ) + def test_setitem_index_object(self, val, exp_dtype): + obj = pd.Series([1, 2, 3, 4], index=list("abcd")) + assert obj.index.dtype == object + + if exp_dtype is IndexError: + temp = obj.copy() + warn_msg = "Series.__setitem__ treating keys as positions is deprecated" + msg = "index 5 is out of bounds for axis 0 with size 4" + with pytest.raises(exp_dtype, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + temp[5] = 5 + else: + exp_index = pd.Index(list("abcd") + [val]) + self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype) + + @pytest.mark.parametrize( + "val,exp_dtype", [(5, np.int64), (1.1, np.float64), ("x", object)] + ) + def test_setitem_index_int64(self, val, exp_dtype): + obj = pd.Series([1, 2, 3, 4]) + assert obj.index.dtype == np.int64 + + exp_index = pd.Index([0, 1, 2, 3, val]) + self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype) + + @pytest.mark.parametrize( + "val,exp_dtype", [(5, np.float64), (5.1, np.float64), ("x", object)] + ) + def test_setitem_index_float64(self, val, exp_dtype, request): + obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1]) + assert obj.index.dtype == np.float64 + + exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val]) + self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype) + + @pytest.mark.xfail(reason="Test not implemented") + def test_setitem_series_period(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_setitem_index_complex128(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_setitem_index_bool(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_setitem_index_datetime64(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_setitem_index_datetime64tz(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_setitem_index_timedelta64(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_setitem_index_period(self): + raise NotImplementedError + + +class TestInsertIndexCoercion(CoercionBase): + klasses = ["index"] + method = "insert" + + def _assert_insert_conversion(self, original, value, expected, expected_dtype): + """test coercion triggered by insert""" + target = original.copy() + res = target.insert(1, value) + tm.assert_index_equal(res, expected) + assert res.dtype == expected_dtype + + @pytest.mark.parametrize( + "insert, coerced_val, coerced_dtype", + [ + (1, 1, object), + (1.1, 1.1, object), + (False, False, object), + ("x", "x", object), + ], + ) + def test_insert_index_object(self, insert, coerced_val, coerced_dtype): + obj = pd.Index(list("abcd")) + assert obj.dtype == object + + exp = pd.Index(["a", coerced_val, "b", "c", "d"]) + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) + + @pytest.mark.parametrize( + "insert, coerced_val, coerced_dtype", + [ + (1, 1, None), + (1.1, 1.1, np.float64), + (False, False, object), # GH#36319 + ("x", "x", object), + ], + ) + def test_insert_int_index( + self, any_int_numpy_dtype, insert, coerced_val, coerced_dtype + ): + dtype = any_int_numpy_dtype + obj = pd.Index([1, 2, 3, 4], dtype=dtype) + coerced_dtype = coerced_dtype if coerced_dtype is not None else dtype + + exp = pd.Index([1, coerced_val, 2, 3, 4], dtype=coerced_dtype) + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) + + @pytest.mark.parametrize( + "insert, coerced_val, coerced_dtype", + [ + (1, 1.0, None), + (1.1, 1.1, np.float64), + (False, False, object), # GH#36319 + ("x", "x", object), + ], + ) + def test_insert_float_index( + self, float_numpy_dtype, insert, coerced_val, coerced_dtype + ): + dtype = float_numpy_dtype + obj = pd.Index([1.0, 2.0, 3.0, 4.0], dtype=dtype) + coerced_dtype = coerced_dtype if coerced_dtype is not None else dtype + + exp = pd.Index([1.0, coerced_val, 2.0, 3.0, 4.0], dtype=coerced_dtype) + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) + + @pytest.mark.parametrize( + "fill_val,exp_dtype", + [ + (pd.Timestamp("2012-01-01"), "datetime64[ns]"), + (pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"), + ], + ids=["datetime64", "datetime64tz"], + ) + @pytest.mark.parametrize( + "insert_value", + [pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), 1], + ) + def test_insert_index_datetimes(self, fill_val, exp_dtype, insert_value): + obj = pd.DatetimeIndex( + ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], tz=fill_val.tz + ) + assert obj.dtype == exp_dtype + + exp = pd.DatetimeIndex( + ["2011-01-01", fill_val.date(), "2011-01-02", "2011-01-03", "2011-01-04"], + tz=fill_val.tz, + ) + self._assert_insert_conversion(obj, fill_val, exp, exp_dtype) + + if fill_val.tz: + # mismatched tzawareness + ts = pd.Timestamp("2012-01-01") + result = obj.insert(1, ts) + expected = obj.astype(object).insert(1, ts) + assert expected.dtype == object + tm.assert_index_equal(result, expected) + + ts = pd.Timestamp("2012-01-01", tz="Asia/Tokyo") + result = obj.insert(1, ts) + # once deprecation is enforced: + expected = obj.insert(1, ts.tz_convert(obj.dtype.tz)) + assert expected.dtype == obj.dtype + tm.assert_index_equal(result, expected) + + else: + # mismatched tzawareness + ts = pd.Timestamp("2012-01-01", tz="Asia/Tokyo") + result = obj.insert(1, ts) + expected = obj.astype(object).insert(1, ts) + assert expected.dtype == object + tm.assert_index_equal(result, expected) + + item = 1 + result = obj.insert(1, item) + expected = obj.astype(object).insert(1, item) + assert expected[1] == item + assert expected.dtype == object + tm.assert_index_equal(result, expected) + + def test_insert_index_timedelta64(self): + obj = pd.TimedeltaIndex(["1 day", "2 day", "3 day", "4 day"]) + assert obj.dtype == "timedelta64[ns]" + + # timedelta64 + timedelta64 => timedelta64 + exp = pd.TimedeltaIndex(["1 day", "10 day", "2 day", "3 day", "4 day"]) + self._assert_insert_conversion( + obj, pd.Timedelta("10 day"), exp, "timedelta64[ns]" + ) + + for item in [pd.Timestamp("2012-01-01"), 1]: + result = obj.insert(1, item) + expected = obj.astype(object).insert(1, item) + assert expected.dtype == object + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "insert, coerced_val, coerced_dtype", + [ + (pd.Period("2012-01", freq="M"), "2012-01", "period[M]"), + (pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01"), object), + (1, 1, object), + ("x", "x", object), + ], + ) + def test_insert_index_period(self, insert, coerced_val, coerced_dtype): + obj = pd.PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq="M") + assert obj.dtype == "period[M]" + + data = [ + pd.Period("2011-01", freq="M"), + coerced_val, + pd.Period("2011-02", freq="M"), + pd.Period("2011-03", freq="M"), + pd.Period("2011-04", freq="M"), + ] + if isinstance(insert, pd.Period): + exp = pd.PeriodIndex(data, freq="M") + self._assert_insert_conversion(obj, insert, exp, coerced_dtype) + + # string that can be parsed to appropriate PeriodDtype + self._assert_insert_conversion(obj, str(insert), exp, coerced_dtype) + + else: + result = obj.insert(0, insert) + expected = obj.astype(object).insert(0, insert) + tm.assert_index_equal(result, expected) + + # TODO: ATM inserting '2012-01-01 00:00:00' when we have obj.freq=="M" + # casts that string to Period[M], not clear that is desirable + if not isinstance(insert, pd.Timestamp): + # non-castable string + result = obj.insert(0, str(insert)) + expected = obj.astype(object).insert(0, str(insert)) + tm.assert_index_equal(result, expected) + + @pytest.mark.xfail(reason="Test not implemented") + def test_insert_index_complex128(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_insert_index_bool(self): + raise NotImplementedError + + +class TestWhereCoercion(CoercionBase): + method = "where" + _cond = np.array([True, False, True, False]) + + def _assert_where_conversion( + self, original, cond, values, expected, expected_dtype + ): + """test coercion triggered by where""" + target = original.copy() + res = target.where(cond, values) + tm.assert_equal(res, expected) + assert res.dtype == expected_dtype + + def _construct_exp(self, obj, klass, fill_val, exp_dtype): + if fill_val is True: + values = klass([True, False, True, True]) + elif isinstance(fill_val, (datetime, np.datetime64)): + values = pd.date_range(fill_val, periods=4) + else: + values = klass(x * fill_val for x in [5, 6, 7, 8]) + + exp = klass([obj[0], values[1], obj[2], values[3]], dtype=exp_dtype) + return values, exp + + def _run_test(self, obj, fill_val, klass, exp_dtype): + cond = klass(self._cond) + + exp = klass([obj[0], fill_val, obj[2], fill_val], dtype=exp_dtype) + self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype) + + values, exp = self._construct_exp(obj, klass, fill_val, exp_dtype) + self._assert_where_conversion(obj, cond, values, exp, exp_dtype) + + @pytest.mark.parametrize( + "fill_val,exp_dtype", + [(1, object), (1.1, object), (1 + 1j, object), (True, object)], + ) + def test_where_object(self, index_or_series, fill_val, exp_dtype): + klass = index_or_series + obj = klass(list("abcd")) + assert obj.dtype == object + self._run_test(obj, fill_val, klass, exp_dtype) + + @pytest.mark.parametrize( + "fill_val,exp_dtype", + [(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], + ) + def test_where_int64(self, index_or_series, fill_val, exp_dtype, request): + klass = index_or_series + + obj = klass([1, 2, 3, 4]) + assert obj.dtype == np.int64 + self._run_test(obj, fill_val, klass, exp_dtype) + + @pytest.mark.parametrize( + "fill_val, exp_dtype", + [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], + ) + def test_where_float64(self, index_or_series, fill_val, exp_dtype, request): + klass = index_or_series + + obj = klass([1.1, 2.2, 3.3, 4.4]) + assert obj.dtype == np.float64 + self._run_test(obj, fill_val, klass, exp_dtype) + + @pytest.mark.parametrize( + "fill_val,exp_dtype", + [ + (1, np.complex128), + (1.1, np.complex128), + (1 + 1j, np.complex128), + (True, object), + ], + ) + def test_where_complex128(self, index_or_series, fill_val, exp_dtype): + klass = index_or_series + obj = klass([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j], dtype=np.complex128) + assert obj.dtype == np.complex128 + self._run_test(obj, fill_val, klass, exp_dtype) + + @pytest.mark.parametrize( + "fill_val,exp_dtype", + [(1, object), (1.1, object), (1 + 1j, object), (True, np.bool_)], + ) + def test_where_series_bool(self, fill_val, exp_dtype): + klass = pd.Series # TODO: use index_or_series once we have Index[bool] + + obj = klass([True, False, True, False]) + assert obj.dtype == np.bool_ + self._run_test(obj, fill_val, klass, exp_dtype) + + @pytest.mark.parametrize( + "fill_val,exp_dtype", + [ + (pd.Timestamp("2012-01-01"), "datetime64[ns]"), + (pd.Timestamp("2012-01-01", tz="US/Eastern"), object), + ], + ids=["datetime64", "datetime64tz"], + ) + def test_where_datetime64(self, index_or_series, fill_val, exp_dtype): + klass = index_or_series + + obj = klass(pd.date_range("2011-01-01", periods=4, freq="D")._with_freq(None)) + assert obj.dtype == "datetime64[ns]" + + fv = fill_val + # do the check with each of the available datetime scalars + if exp_dtype == "datetime64[ns]": + for scalar in [fv, fv.to_pydatetime(), fv.to_datetime64()]: + self._run_test(obj, scalar, klass, exp_dtype) + else: + for scalar in [fv, fv.to_pydatetime()]: + self._run_test(obj, fill_val, klass, exp_dtype) + + @pytest.mark.xfail(reason="Test not implemented") + def test_where_index_complex128(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_where_index_bool(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_where_series_timedelta64(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_where_series_period(self): + raise NotImplementedError + + @pytest.mark.parametrize( + "value", [pd.Timedelta(days=9), timedelta(days=9), np.timedelta64(9, "D")] + ) + def test_where_index_timedelta64(self, value): + tdi = pd.timedelta_range("1 Day", periods=4) + cond = np.array([True, False, False, True]) + + expected = pd.TimedeltaIndex(["1 Day", value, value, "4 Days"]) + result = tdi.where(cond, value) + tm.assert_index_equal(result, expected) + + # wrong-dtyped NaT + dtnat = np.datetime64("NaT", "ns") + expected = pd.Index([tdi[0], dtnat, dtnat, tdi[3]], dtype=object) + assert expected[1] is dtnat + + result = tdi.where(cond, dtnat) + tm.assert_index_equal(result, expected) + + def test_where_index_period(self): + dti = pd.date_range("2016-01-01", periods=3, freq="QS") + pi = dti.to_period("Q") + + cond = np.array([False, True, False]) + + # Passing a valid scalar + value = pi[-1] + pi.freq * 10 + expected = pd.PeriodIndex([value, pi[1], value]) + result = pi.where(cond, value) + tm.assert_index_equal(result, expected) + + # Case passing ndarray[object] of Periods + other = np.asarray(pi + pi.freq * 10, dtype=object) + result = pi.where(cond, other) + expected = pd.PeriodIndex([other[0], pi[1], other[2]]) + tm.assert_index_equal(result, expected) + + # Passing a mismatched scalar -> casts to object + td = pd.Timedelta(days=4) + expected = pd.Index([td, pi[1], td], dtype=object) + result = pi.where(cond, td) + tm.assert_index_equal(result, expected) + + per = pd.Period("2020-04-21", "D") + expected = pd.Index([per, pi[1], per], dtype=object) + result = pi.where(cond, per) + tm.assert_index_equal(result, expected) + + +class TestFillnaSeriesCoercion(CoercionBase): + # not indexing, but place here for consistency + + method = "fillna" + + @pytest.mark.xfail(reason="Test not implemented") + def test_has_comprehensive_tests(self): + raise NotImplementedError + + def _assert_fillna_conversion(self, original, value, expected, expected_dtype): + """test coercion triggered by fillna""" + target = original.copy() + res = target.fillna(value) + tm.assert_equal(res, expected) + assert res.dtype == expected_dtype + + @pytest.mark.parametrize( + "fill_val, fill_dtype", + [(1, object), (1.1, object), (1 + 1j, object), (True, object)], + ) + def test_fillna_object(self, index_or_series, fill_val, fill_dtype): + klass = index_or_series + obj = klass(["a", np.nan, "c", "d"]) + assert obj.dtype == object + + exp = klass(["a", fill_val, "c", "d"]) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize( + "fill_val,fill_dtype", + [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], + ) + def test_fillna_float64(self, index_or_series, fill_val, fill_dtype): + klass = index_or_series + obj = klass([1.1, np.nan, 3.3, 4.4]) + assert obj.dtype == np.float64 + + exp = klass([1.1, fill_val, 3.3, 4.4]) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize( + "fill_val,fill_dtype", + [ + (1, np.complex128), + (1.1, np.complex128), + (1 + 1j, np.complex128), + (True, object), + ], + ) + def test_fillna_complex128(self, index_or_series, fill_val, fill_dtype): + klass = index_or_series + obj = klass([1 + 1j, np.nan, 3 + 3j, 4 + 4j], dtype=np.complex128) + assert obj.dtype == np.complex128 + + exp = klass([1 + 1j, fill_val, 3 + 3j, 4 + 4j]) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize( + "fill_val,fill_dtype", + [ + (pd.Timestamp("2012-01-01"), "datetime64[ns]"), + (pd.Timestamp("2012-01-01", tz="US/Eastern"), object), + (1, object), + ("x", object), + ], + ids=["datetime64", "datetime64tz", "object", "object"], + ) + def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype): + klass = index_or_series + obj = klass( + [ + pd.Timestamp("2011-01-01"), + pd.NaT, + pd.Timestamp("2011-01-03"), + pd.Timestamp("2011-01-04"), + ] + ) + assert obj.dtype == "datetime64[ns]" + + exp = klass( + [ + pd.Timestamp("2011-01-01"), + fill_val, + pd.Timestamp("2011-01-03"), + pd.Timestamp("2011-01-04"), + ] + ) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize( + "fill_val,fill_dtype", + [ + (pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"), + (pd.Timestamp("2012-01-01"), object), + # pre-2.0 with a mismatched tz we would get object result + (pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), "datetime64[ns, US/Eastern]"), + (1, object), + ("x", object), + ], + ) + def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype): + klass = index_or_series + tz = "US/Eastern" + + obj = klass( + [ + pd.Timestamp("2011-01-01", tz=tz), + pd.NaT, + pd.Timestamp("2011-01-03", tz=tz), + pd.Timestamp("2011-01-04", tz=tz), + ] + ) + assert obj.dtype == "datetime64[ns, US/Eastern]" + + if getattr(fill_val, "tz", None) is None: + fv = fill_val + else: + fv = fill_val.tz_convert(tz) + exp = klass( + [ + pd.Timestamp("2011-01-01", tz=tz), + fv, + pd.Timestamp("2011-01-03", tz=tz), + pd.Timestamp("2011-01-04", tz=tz), + ] + ) + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.parametrize( + "fill_val", + [ + 1, + 1.1, + 1 + 1j, + True, + pd.Interval(1, 2, closed="left"), + pd.Timestamp("2012-01-01", tz="US/Eastern"), + pd.Timestamp("2012-01-01"), + pd.Timedelta(days=1), + pd.Period("2016-01-01", "D"), + ], + ) + def test_fillna_interval(self, index_or_series, fill_val): + ii = pd.interval_range(1.0, 5.0, closed="right").insert(1, np.nan) + assert isinstance(ii.dtype, pd.IntervalDtype) + obj = index_or_series(ii) + + exp = index_or_series([ii[0], fill_val, ii[2], ii[3], ii[4]], dtype=object) + + fill_dtype = object + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.xfail(reason="Test not implemented") + def test_fillna_series_int64(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_fillna_index_int64(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_fillna_series_bool(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_fillna_index_bool(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_fillna_series_timedelta64(self): + raise NotImplementedError + + @pytest.mark.parametrize( + "fill_val", + [ + 1, + 1.1, + 1 + 1j, + True, + pd.Interval(1, 2, closed="left"), + pd.Timestamp("2012-01-01", tz="US/Eastern"), + pd.Timestamp("2012-01-01"), + pd.Timedelta(days=1), + pd.Period("2016-01-01", "W"), + ], + ) + def test_fillna_series_period(self, index_or_series, fill_val): + pi = pd.period_range("2016-01-01", periods=4, freq="D").insert(1, pd.NaT) + assert isinstance(pi.dtype, pd.PeriodDtype) + obj = index_or_series(pi) + + exp = index_or_series([pi[0], fill_val, pi[2], pi[3], pi[4]], dtype=object) + + fill_dtype = object + self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) + + @pytest.mark.xfail(reason="Test not implemented") + def test_fillna_index_timedelta64(self): + raise NotImplementedError + + @pytest.mark.xfail(reason="Test not implemented") + def test_fillna_index_period(self): + raise NotImplementedError + + +class TestReplaceSeriesCoercion(CoercionBase): + klasses = ["series"] + method = "replace" + + rep: dict[str, list] = {} + rep["object"] = ["a", "b"] + rep["int64"] = [4, 5] + rep["float64"] = [1.1, 2.2] + rep["complex128"] = [1 + 1j, 2 + 2j] + rep["bool"] = [True, False] + rep["datetime64[ns]"] = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-03")] + + for tz in ["UTC", "US/Eastern"]: + # to test tz => different tz replacement + key = f"datetime64[ns, {tz}]" + rep[key] = [ + pd.Timestamp("2011-01-01", tz=tz), + pd.Timestamp("2011-01-03", tz=tz), + ] + + rep["timedelta64[ns]"] = [pd.Timedelta("1 day"), pd.Timedelta("2 day")] + + @pytest.fixture(params=["dict", "series"]) + def how(self, request): + return request.param + + @pytest.fixture( + params=[ + "object", + "int64", + "float64", + "complex128", + "bool", + "datetime64[ns]", + "datetime64[ns, UTC]", + "datetime64[ns, US/Eastern]", + "timedelta64[ns]", + ] + ) + def from_key(self, request): + return request.param + + @pytest.fixture( + params=[ + "object", + "int64", + "float64", + "complex128", + "bool", + "datetime64[ns]", + "datetime64[ns, UTC]", + "datetime64[ns, US/Eastern]", + "timedelta64[ns]", + ], + ids=[ + "object", + "int64", + "float64", + "complex128", + "bool", + "datetime64", + "datetime64tz", + "datetime64tz", + "timedelta64", + ], + ) + def to_key(self, request): + return request.param + + @pytest.fixture + def replacer(self, how, from_key, to_key): + """ + Object we will pass to `Series.replace` + """ + if how == "dict": + replacer = dict(zip(self.rep[from_key], self.rep[to_key])) + elif how == "series": + replacer = pd.Series(self.rep[to_key], index=self.rep[from_key]) + else: + raise ValueError + return replacer + + def test_replace_series(self, how, to_key, from_key, replacer): + index = pd.Index([3, 4], name="xxx") + obj = pd.Series(self.rep[from_key], index=index, name="yyy") + assert obj.dtype == from_key + + if from_key.startswith("datetime") and to_key.startswith("datetime"): + # tested below + return + elif from_key in ["datetime64[ns, US/Eastern]", "datetime64[ns, UTC]"]: + # tested below + return + + result = obj.replace(replacer) + + if (from_key == "float64" and to_key in ("int64")) or ( + from_key == "complex128" and to_key in ("int64", "float64") + ): + if not IS64 or is_platform_windows(): + pytest.skip(f"32-bit platform buggy: {from_key} -> {to_key}") + + # Expected: do not downcast by replacement + exp = pd.Series(self.rep[to_key], index=index, name="yyy", dtype=from_key) + + else: + exp = pd.Series(self.rep[to_key], index=index, name="yyy") + assert exp.dtype == to_key + + tm.assert_series_equal(result, exp) + + @pytest.mark.parametrize( + "to_key", + ["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"], + indirect=True, + ) + @pytest.mark.parametrize( + "from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], indirect=True + ) + def test_replace_series_datetime_tz(self, how, to_key, from_key, replacer): + index = pd.Index([3, 4], name="xyz") + obj = pd.Series(self.rep[from_key], index=index, name="yyy") + assert obj.dtype == from_key + + result = obj.replace(replacer) + + exp = pd.Series(self.rep[to_key], index=index, name="yyy") + assert exp.dtype == to_key + + tm.assert_series_equal(result, exp) + + @pytest.mark.parametrize( + "to_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], + indirect=True, + ) + @pytest.mark.parametrize( + "from_key", + ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"], + indirect=True, + ) + def test_replace_series_datetime_datetime(self, how, to_key, from_key, replacer): + index = pd.Index([3, 4], name="xyz") + obj = pd.Series(self.rep[from_key], index=index, name="yyy") + assert obj.dtype == from_key + + result = obj.replace(replacer) + + exp = pd.Series(self.rep[to_key], index=index, name="yyy") + if isinstance(obj.dtype, pd.DatetimeTZDtype) and isinstance( + exp.dtype, pd.DatetimeTZDtype + ): + # with mismatched tzs, we retain the original dtype as of 2.0 + exp = exp.astype(obj.dtype) + else: + assert exp.dtype == to_key + + tm.assert_series_equal(result, exp) + + @pytest.mark.xfail(reason="Test not implemented") + def test_replace_series_period(self): + raise NotImplementedError diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_datetime.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_datetime.py new file mode 100644 index 00000000..6510612b --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_datetime.py @@ -0,0 +1,188 @@ +import re + +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeIndex: + def test_get_loc_naive_dti_aware_str_deprecated(self): + # GH#46903 + ts = Timestamp("20130101")._value + dti = pd.DatetimeIndex([ts + 50 + i for i in range(100)]) + ser = Series(range(100), index=dti) + + key = "2013-01-01 00:00:00.000000050+0000" + msg = re.escape(repr(key)) + with pytest.raises(KeyError, match=msg): + ser[key] + + with pytest.raises(KeyError, match=msg): + dti.get_loc(key) + + def test_indexing_with_datetime_tz(self): + # GH#8260 + # support datetime64 with tz + + idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo") + dr = date_range("20130110", periods=3) + df = DataFrame({"A": idx, "B": dr}) + df["C"] = idx + df.iloc[1, 1] = pd.NaT + df.iloc[1, 2] = pd.NaT + + expected = Series( + [Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT], + index=list("ABC"), + dtype="object", + name=1, + ) + + # indexing + result = df.iloc[1] + tm.assert_series_equal(result, expected) + result = df.loc[1] + tm.assert_series_equal(result, expected) + + def test_indexing_fast_xs(self): + # indexing - fast_xs + df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")}) + result = df.iloc[5] + expected = Series( + [Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5 + ) + tm.assert_series_equal(result, expected) + + result = df.loc[5] + tm.assert_series_equal(result, expected) + + # indexing - boolean + result = df[df.a > df.a[3]] + expected = df.iloc[4:] + tm.assert_frame_equal(result, expected) + + def test_consistency_with_tz_aware_scalar(self): + # xef gh-12938 + # various ways of indexing the same tz-aware scalar + df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame() + + df = pd.concat([df, df]).reset_index(drop=True) + expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels") + + result = df[0][0] + assert result == expected + + result = df.iloc[0, 0] + assert result == expected + + result = df.loc[0, 0] + assert result == expected + + result = df.iat[0, 0] + assert result == expected + + result = df.at[0, 0] + assert result == expected + + result = df[0].loc[0] + assert result == expected + + result = df[0].at[0] + assert result == expected + + def test_indexing_with_datetimeindex_tz(self, indexer_sl): + # GH 12050 + # indexing on a series with a datetimeindex with tz + index = date_range("2015-01-01", periods=2, tz="utc") + + ser = Series(range(2), index=index, dtype="int64") + + # list-like indexing + + for sel in (index, list(index)): + # getitem + result = indexer_sl(ser)[sel] + expected = ser.copy() + if sel is not index: + expected.index = expected.index._with_freq(None) + tm.assert_series_equal(result, expected) + + # setitem + result = ser.copy() + indexer_sl(result)[sel] = 1 + expected = Series(1, index=index) + tm.assert_series_equal(result, expected) + + # single element indexing + + # getitem + assert indexer_sl(ser)[index[1]] == 1 + + # setitem + result = ser.copy() + indexer_sl(result)[index[1]] = 5 + expected = Series([0, 5], index=index) + tm.assert_series_equal(result, expected) + + def test_nanosecond_getitem_setitem_with_tz(self): + # GH 11679 + data = ["2016-06-28 08:30:00.123456789"] + index = pd.DatetimeIndex(data, dtype="datetime64[ns, America/Chicago]") + df = DataFrame({"a": [10]}, index=index) + result = df.loc[df.index[0]] + expected = Series(10, index=["a"], name=df.index[0]) + tm.assert_series_equal(result, expected) + + result = df.copy() + result.loc[df.index[0], "a"] = -1 + expected = DataFrame(-1, index=index, columns=["a"]) + tm.assert_frame_equal(result, expected) + + def test_getitem_str_slice_millisecond_resolution(self, frame_or_series): + # GH#33589 + + keys = [ + "2017-10-25T16:25:04.151", + "2017-10-25T16:25:04.252", + "2017-10-25T16:50:05.237", + "2017-10-25T16:50:05.238", + ] + obj = frame_or_series( + [1, 2, 3, 4], + index=[Timestamp(x) for x in keys], + ) + result = obj[keys[1] : keys[2]] + expected = frame_or_series( + [2, 3], + index=[ + Timestamp(keys[1]), + Timestamp(keys[2]), + ], + ) + tm.assert_equal(result, expected) + + def test_getitem_pyarrow_index(self, frame_or_series): + # GH 53644 + pytest.importorskip("pyarrow") + obj = frame_or_series( + range(5), + index=date_range("2020", freq="D", periods=5).astype( + "timestamp[us][pyarrow]" + ), + ) + result = obj.loc[obj.index[:-3]] + expected = frame_or_series( + range(2), + index=date_range("2020", freq="D", periods=2).astype( + "timestamp[us][pyarrow]" + ), + ) + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_floats.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_floats.py new file mode 100644 index 00000000..c9fbf957 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_floats.py @@ -0,0 +1,686 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + RangeIndex, + Series, +) +import pandas._testing as tm + + +def gen_obj(klass, index): + if klass is Series: + obj = Series(np.arange(len(index)), index=index) + else: + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(index), len(index))), + index=index, + columns=index, + ) + return obj + + +class TestFloatIndexers: + def check(self, result, original, indexer, getitem): + """ + comparator for results + we need to take care if we are indexing on a + Series or a frame + """ + if isinstance(original, Series): + expected = original.iloc[indexer] + elif getitem: + expected = original.iloc[:, indexer] + else: + expected = original.iloc[indexer] + + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize( + "index_func", + [ + tm.makeStringIndex, + tm.makeCategoricalIndex, + tm.makeDateIndex, + tm.makeTimedeltaIndex, + tm.makePeriodIndex, + ], + ) + def test_scalar_non_numeric(self, index_func, frame_or_series, indexer_sl): + # GH 4892 + # float_indexers should raise exceptions + # on appropriate Index types & accessors + + i = index_func(5) + s = gen_obj(frame_or_series, i) + + # getting + with pytest.raises(KeyError, match="^3.0$"): + indexer_sl(s)[3.0] + + # contains + assert 3.0 not in s + + s2 = s.copy() + indexer_sl(s2)[3.0] = 10 + + if indexer_sl is tm.setitem: + assert 3.0 in s2.axes[-1] + elif indexer_sl is tm.loc: + assert 3.0 in s2.axes[0] + else: + assert 3.0 not in s2.axes[0] + assert 3.0 not in s2.axes[-1] + + @pytest.mark.parametrize( + "index_func", + [ + tm.makeStringIndex, + tm.makeCategoricalIndex, + tm.makeDateIndex, + tm.makeTimedeltaIndex, + tm.makePeriodIndex, + ], + ) + def test_scalar_non_numeric_series_fallback(self, index_func): + # fallsback to position selection, series only + i = index_func(5) + s = Series(np.arange(len(i)), index=i) + + msg = "Series.__getitem__ treating keys as positions is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + s[3] + with pytest.raises(KeyError, match="^3.0$"): + s[3.0] + + def test_scalar_with_mixed(self, indexer_sl): + s2 = Series([1, 2, 3], index=["a", "b", "c"]) + s3 = Series([1, 2, 3], index=["a", "b", 1.5]) + + # lookup in a pure string index with an invalid indexer + + with pytest.raises(KeyError, match="^1.0$"): + indexer_sl(s2)[1.0] + + with pytest.raises(KeyError, match=r"^1\.0$"): + indexer_sl(s2)[1.0] + + result = indexer_sl(s2)["b"] + expected = 2 + assert result == expected + + # mixed index so we have label + # indexing + with pytest.raises(KeyError, match="^1.0$"): + indexer_sl(s3)[1.0] + + if indexer_sl is not tm.loc: + # __getitem__ falls back to positional + msg = "Series.__getitem__ treating keys as positions is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s3[1] + expected = 2 + assert result == expected + + with pytest.raises(KeyError, match=r"^1\.0$"): + indexer_sl(s3)[1.0] + + result = indexer_sl(s3)[1.5] + expected = 3 + assert result == expected + + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) + def test_scalar_integer(self, index_func, frame_or_series, indexer_sl): + getitem = indexer_sl is not tm.loc + + # test how scalar float indexers work on int indexes + + # integer index + i = index_func(5) + obj = gen_obj(frame_or_series, i) + + # coerce to equal int + + result = indexer_sl(obj)[3.0] + self.check(result, obj, 3, getitem) + + if isinstance(obj, Series): + + def compare(x, y): + assert x == y + + expected = 100 + else: + compare = tm.assert_series_equal + if getitem: + expected = Series(100, index=range(len(obj)), name=3) + else: + expected = Series(100.0, index=range(len(obj)), name=3) + + s2 = obj.copy() + indexer_sl(s2)[3.0] = 100 + + result = indexer_sl(s2)[3.0] + compare(result, expected) + + result = indexer_sl(s2)[3] + compare(result, expected) + + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) + def test_scalar_integer_contains_float(self, index_func, frame_or_series): + # contains + # integer index + index = index_func(5) + obj = gen_obj(frame_or_series, index) + + # coerce to equal int + assert 3.0 in obj + + def test_scalar_float(self, frame_or_series): + # scalar float indexers work on a float index + index = Index(np.arange(5.0)) + s = gen_obj(frame_or_series, index) + + # assert all operations except for iloc are ok + indexer = index[3] + for idxr in [tm.loc, tm.setitem]: + getitem = idxr is not tm.loc + + # getting + result = idxr(s)[indexer] + self.check(result, s, 3, getitem) + + # setting + s2 = s.copy() + + result = idxr(s2)[indexer] + self.check(result, s, 3, getitem) + + # random float is a KeyError + with pytest.raises(KeyError, match=r"^3\.5$"): + idxr(s)[3.5] + + # contains + assert 3.0 in s + + # iloc succeeds with an integer + expected = s.iloc[3] + s2 = s.copy() + + s2.iloc[3] = expected + result = s2.iloc[3] + self.check(result, s, 3, False) + + @pytest.mark.parametrize( + "index_func", + [ + tm.makeStringIndex, + tm.makeDateIndex, + tm.makeTimedeltaIndex, + tm.makePeriodIndex, + ], + ) + @pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + def test_slice_non_numeric(self, index_func, idx, frame_or_series, indexer_sli): + # GH 4892 + # float_indexers should raise exceptions + # on appropriate Index types & accessors + + index = index_func(5) + s = gen_obj(frame_or_series, index) + + # getitem + if indexer_sli is tm.iloc: + msg = ( + "cannot do positional indexing " + rf"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " + "type float" + ) + else: + msg = ( + "cannot do slice indexing " + rf"on {type(index).__name__} with these indexers " + r"\[(3|4)(\.0)?\] " + r"of type (float|int)" + ) + with pytest.raises(TypeError, match=msg): + indexer_sli(s)[idx] + + # setitem + if indexer_sli is tm.iloc: + # otherwise we keep the same message as above + msg = "slice indices must be integers or None or have an __index__ method" + with pytest.raises(TypeError, match=msg): + indexer_sli(s)[idx] = 0 + + def test_slice_integer(self): + # same as above, but for Integer based indexes + # these coerce to a like integer + # oob indicates if we are out of bounds + # of positional indexing + for index, oob in [ + (Index(np.arange(5, dtype=np.int64)), False), + (RangeIndex(5), False), + (Index(np.arange(5, dtype=np.int64) + 10), True), + ]: + # s is an in-range index + s = Series(range(5), index=index) + + # getitem + for idx in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: + result = s.loc[idx] + + # these are all label indexing + # except getitem which is positional + # empty + if oob: + indexer = slice(0, 0) + else: + indexer = slice(3, 5) + self.check(result, s, indexer, False) + + # getitem out-of-bounds + for idx in [slice(-6, 6), slice(-6.0, 6.0)]: + result = s.loc[idx] + + # these are all label indexing + # except getitem which is positional + # empty + if oob: + indexer = slice(0, 0) + else: + indexer = slice(-6, 6) + self.check(result, s, indexer, False) + + # positional indexing + msg = ( + "cannot do slice indexing " + rf"on {type(index).__name__} with these indexers \[-6\.0\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[slice(-6.0, 6.0)] + + # getitem odd floats + for idx, res1 in [ + (slice(2.5, 4), slice(3, 5)), + (slice(2, 3.5), slice(2, 4)), + (slice(2.5, 3.5), slice(3, 4)), + ]: + result = s.loc[idx] + if oob: + res = slice(0, 0) + else: + res = res1 + + self.check(result, s, res, False) + + # positional indexing + msg = ( + "cannot do slice indexing " + rf"on {type(index).__name__} with these indexers \[(2|3)\.5\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[idx] + + @pytest.mark.parametrize("idx", [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]) + def test_integer_positional_indexing(self, idx): + """make sure that we are raising on positional indexing + w.r.t. an integer index + """ + s = Series(range(2, 6), index=range(2, 6)) + + result = s[2:4] + expected = s.iloc[2:4] + tm.assert_series_equal(result, expected) + + klass = RangeIndex + msg = ( + "cannot do (slice|positional) indexing " + rf"on {klass.__name__} with these indexers \[(2|4)\.0\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[idx] + with pytest.raises(TypeError, match=msg): + s.iloc[idx] + + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) + def test_slice_integer_frame_getitem(self, index_func): + # similar to above, but on the getitem dim (of a DataFrame) + index = index_func(5) + + s = DataFrame(np.random.default_rng(2).standard_normal((5, 2)), index=index) + + # getitem + for idx in [slice(0.0, 1), slice(0, 1.0), slice(0.0, 1.0)]: + result = s.loc[idx] + indexer = slice(0, 2) + self.check(result, s, indexer, False) + + # positional indexing + msg = ( + "cannot do slice indexing " + rf"on {type(index).__name__} with these indexers \[(0|1)\.0\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[idx] + + # getitem out-of-bounds + for idx in [slice(-10, 10), slice(-10.0, 10.0)]: + result = s.loc[idx] + self.check(result, s, slice(-10, 10), True) + + # positional indexing + msg = ( + "cannot do slice indexing " + rf"on {type(index).__name__} with these indexers \[-10\.0\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[slice(-10.0, 10.0)] + + # getitem odd floats + for idx, res in [ + (slice(0.5, 1), slice(1, 2)), + (slice(0, 0.5), slice(0, 1)), + (slice(0.5, 1.5), slice(1, 2)), + ]: + result = s.loc[idx] + self.check(result, s, res, False) + + # positional indexing + msg = ( + "cannot do slice indexing " + rf"on {type(index).__name__} with these indexers \[0\.5\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[idx] + + @pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) + def test_float_slice_getitem_with_integer_index_raises(self, idx, index_func): + # similar to above, but on the getitem dim (of a DataFrame) + index = index_func(5) + + s = DataFrame(np.random.default_rng(2).standard_normal((5, 2)), index=index) + + # setitem + sc = s.copy() + sc.loc[idx] = 0 + result = sc.loc[idx].values.ravel() + assert (result == 0).all() + + # positional indexing + msg = ( + "cannot do slice indexing " + rf"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[idx] = 0 + + with pytest.raises(TypeError, match=msg): + s[idx] + + @pytest.mark.parametrize("idx", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + def test_slice_float(self, idx, frame_or_series, indexer_sl): + # same as above, but for floats + index = Index(np.arange(5.0)) + 0.1 + s = gen_obj(frame_or_series, index) + + expected = s.iloc[3:4] + + # getitem + result = indexer_sl(s)[idx] + assert isinstance(result, type(s)) + tm.assert_equal(result, expected) + + # setitem + s2 = s.copy() + indexer_sl(s2)[idx] = 0 + result = indexer_sl(s2)[idx].values.ravel() + assert (result == 0).all() + + def test_floating_index_doc_example(self): + index = Index([1.5, 2, 3, 4.5, 5]) + s = Series(range(5), index=index) + assert s[3] == 2 + assert s.loc[3] == 2 + assert s.iloc[3] == 3 + + def test_floating_misc(self, indexer_sl): + # related 236 + # scalar/slicing of a float index + s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64) + + # label based slicing + result = indexer_sl(s)[1.0:3.0] + expected = Series(1, index=[2.5]) + tm.assert_series_equal(result, expected) + + # exact indexing when found + + result = indexer_sl(s)[5.0] + assert result == 2 + + result = indexer_sl(s)[5] + assert result == 2 + + # value not found (and no fallbacking at all) + + # scalar integers + with pytest.raises(KeyError, match=r"^4$"): + indexer_sl(s)[4] + + # fancy floats/integers create the correct entry (as nan) + # fancy tests + expected = Series([2, 0], index=Index([5.0, 0.0], dtype=np.float64)) + for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float + tm.assert_series_equal(indexer_sl(s)[fancy_idx], expected) + + expected = Series([2, 0], index=Index([5, 0], dtype="float64")) + for fancy_idx in [[5, 0], np.array([5, 0])]: + tm.assert_series_equal(indexer_sl(s)[fancy_idx], expected) + + warn = FutureWarning if indexer_sl is tm.setitem else None + msg = r"The behavior of obj\[i:j\] with a float-dtype index" + + # all should return the same as we are slicing 'the same' + with tm.assert_produces_warning(warn, match=msg): + result1 = indexer_sl(s)[2:5] + result2 = indexer_sl(s)[2.0:5.0] + result3 = indexer_sl(s)[2.0:5] + result4 = indexer_sl(s)[2.1:5] + tm.assert_series_equal(result1, result2) + tm.assert_series_equal(result1, result3) + tm.assert_series_equal(result1, result4) + + expected = Series([1, 2], index=[2.5, 5.0]) + with tm.assert_produces_warning(warn, match=msg): + result = indexer_sl(s)[2:5] + + tm.assert_series_equal(result, expected) + + # list selection + result1 = indexer_sl(s)[[0.0, 5, 10]] + result2 = s.iloc[[0, 2, 4]] + tm.assert_series_equal(result1, result2) + + with pytest.raises(KeyError, match="not in index"): + indexer_sl(s)[[1.6, 5, 10]] + + with pytest.raises(KeyError, match="not in index"): + indexer_sl(s)[[0, 1, 2]] + + result = indexer_sl(s)[[2.5, 5]] + tm.assert_series_equal(result, Series([1, 2], index=[2.5, 5.0])) + + result = indexer_sl(s)[[2.5]] + tm.assert_series_equal(result, Series([1], index=[2.5])) + + def test_floatindex_slicing_bug(self, float_numpy_dtype): + # GH 5557, related to slicing a float index + dtype = float_numpy_dtype + ser = { + 256: 2321.0, + 1: 78.0, + 2: 2716.0, + 3: 0.0, + 4: 369.0, + 5: 0.0, + 6: 269.0, + 7: 0.0, + 8: 0.0, + 9: 0.0, + 10: 3536.0, + 11: 0.0, + 12: 24.0, + 13: 0.0, + 14: 931.0, + 15: 0.0, + 16: 101.0, + 17: 78.0, + 18: 9643.0, + 19: 0.0, + 20: 0.0, + 21: 0.0, + 22: 63761.0, + 23: 0.0, + 24: 446.0, + 25: 0.0, + 26: 34773.0, + 27: 0.0, + 28: 729.0, + 29: 78.0, + 30: 0.0, + 31: 0.0, + 32: 3374.0, + 33: 0.0, + 34: 1391.0, + 35: 0.0, + 36: 361.0, + 37: 0.0, + 38: 61808.0, + 39: 0.0, + 40: 0.0, + 41: 0.0, + 42: 6677.0, + 43: 0.0, + 44: 802.0, + 45: 0.0, + 46: 2691.0, + 47: 0.0, + 48: 3582.0, + 49: 0.0, + 50: 734.0, + 51: 0.0, + 52: 627.0, + 53: 70.0, + 54: 2584.0, + 55: 0.0, + 56: 324.0, + 57: 0.0, + 58: 605.0, + 59: 0.0, + 60: 0.0, + 61: 0.0, + 62: 3989.0, + 63: 10.0, + 64: 42.0, + 65: 0.0, + 66: 904.0, + 67: 0.0, + 68: 88.0, + 69: 70.0, + 70: 8172.0, + 71: 0.0, + 72: 0.0, + 73: 0.0, + 74: 64902.0, + 75: 0.0, + 76: 347.0, + 77: 0.0, + 78: 36605.0, + 79: 0.0, + 80: 379.0, + 81: 70.0, + 82: 0.0, + 83: 0.0, + 84: 3001.0, + 85: 0.0, + 86: 1630.0, + 87: 7.0, + 88: 364.0, + 89: 0.0, + 90: 67404.0, + 91: 9.0, + 92: 0.0, + 93: 0.0, + 94: 7685.0, + 95: 0.0, + 96: 1017.0, + 97: 0.0, + 98: 2831.0, + 99: 0.0, + 100: 2963.0, + 101: 0.0, + 102: 854.0, + 103: 0.0, + 104: 0.0, + 105: 0.0, + 106: 0.0, + 107: 0.0, + 108: 0.0, + 109: 0.0, + 110: 0.0, + 111: 0.0, + 112: 0.0, + 113: 0.0, + 114: 0.0, + 115: 0.0, + 116: 0.0, + 117: 0.0, + 118: 0.0, + 119: 0.0, + 120: 0.0, + 121: 0.0, + 122: 0.0, + 123: 0.0, + 124: 0.0, + 125: 0.0, + 126: 67744.0, + 127: 22.0, + 128: 264.0, + 129: 0.0, + 260: 197.0, + 268: 0.0, + 265: 0.0, + 269: 0.0, + 261: 0.0, + 266: 1198.0, + 267: 0.0, + 262: 2629.0, + 258: 775.0, + 257: 0.0, + 263: 0.0, + 259: 0.0, + 264: 163.0, + 250: 10326.0, + 251: 0.0, + 252: 1228.0, + 253: 0.0, + 254: 2769.0, + 255: 0.0, + } + + # smoke test for the repr + s = Series(ser, dtype=dtype) + result = s.value_counts() + assert result.index.dtype == dtype + str(result) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iat.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iat.py new file mode 100644 index 00000000..4497c16e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iat.py @@ -0,0 +1,48 @@ +import numpy as np + +from pandas import ( + DataFrame, + Series, + period_range, +) + + +def test_iat(float_frame): + for i, row in enumerate(float_frame.index): + for j, col in enumerate(float_frame.columns): + result = float_frame.iat[i, j] + expected = float_frame.at[row, col] + assert result == expected + + +def test_iat_duplicate_columns(): + # https://github.com/pandas-dev/pandas/issues/11754 + df = DataFrame([[1, 2]], columns=["x", "x"]) + assert df.iat[0, 0] == 1 + + +def test_iat_getitem_series_with_period_index(): + # GH#4390, iat incorrectly indexing + index = period_range("1/1/2001", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(10), index=index) + expected = ser[index[0]] + result = ser.iat[0] + assert expected == result + + +def test_iat_setitem_item_cache_cleared(indexer_ial, using_copy_on_write): + # GH#45684 + data = {"x": np.arange(8, dtype=np.int64), "y": np.int64(0)} + df = DataFrame(data).copy() + ser = df["y"] + + # previously this iat setting would split the block and fail to clear + # the item_cache. + indexer_ial(df)[7, 0] = 9999 + + indexer_ial(df)[7, 1] = 1234 + + assert df.iat[7, 1] == 1234 + if not using_copy_on_write: + assert ser.iloc[-1] == 1234 + assert df.iloc[-1, -1] == 1234 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iloc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iloc.py new file mode 100644 index 00000000..bc760433 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_iloc.py @@ -0,0 +1,1462 @@ +""" test positional based indexing with iloc """ + +from datetime import datetime +import re + +import numpy as np +import pytest + +from pandas.errors import IndexingError +import pandas.util._test_decorators as td + +from pandas import ( + NA, + Categorical, + CategoricalDtype, + DataFrame, + Index, + Interval, + NaT, + Series, + Timestamp, + array, + concat, + date_range, + interval_range, + isna, + to_datetime, +) +import pandas._testing as tm +from pandas.api.types import is_scalar +from pandas.tests.indexing.common import check_indexing_smoketest_or_raises + +# We pass through the error message from numpy +_slice_iloc_msg = re.escape( + "only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) " + "and integer or boolean arrays are valid indices" +) + + +class TestiLoc: + @pytest.mark.parametrize("key", [2, -1, [0, 1, 2]]) + @pytest.mark.parametrize("kind", ["series", "frame"]) + @pytest.mark.parametrize( + "col", + ["labels", "mixed", "ts", "floats", "empty"], + ) + def test_iloc_getitem_int_and_list_int(self, key, kind, col, request): + obj = request.getfixturevalue(f"{kind}_{col}") + check_indexing_smoketest_or_raises( + obj, + "iloc", + key, + fails=IndexError, + ) + + # array of ints (GH5006), make sure that a single indexer is returning + # the correct type + + +class TestiLocBaseIndependent: + """Tests Independent Of Base Class""" + + @pytest.mark.parametrize( + "key", + [ + slice(None), + slice(3), + range(3), + [0, 1, 2], + Index(range(3)), + np.asarray([0, 1, 2]), + ], + ) + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) + def test_iloc_setitem_fullcol_categorical(self, indexer, key, using_array_manager): + frame = DataFrame({0: range(3)}, dtype=object) + + cat = Categorical(["alpha", "beta", "gamma"]) + + if not using_array_manager: + assert frame._mgr.blocks[0]._can_hold_element(cat) + + df = frame.copy() + orig_vals = df.values + + indexer(df)[key, 0] = cat + + expected = DataFrame({0: cat}).astype(object) + if not using_array_manager: + assert np.shares_memory(df[0].values, orig_vals) + + tm.assert_frame_equal(df, expected) + + # check we dont have a view on cat (may be undesired GH#39986) + df.iloc[0, 0] = "gamma" + assert cat[0] != "gamma" + + # pre-2.0 with mixed dataframe ("split" path) we always overwrote the + # column. as of 2.0 we correctly write "into" the column, so + # we retain the object dtype. + frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)}) + df = frame.copy() + orig_vals = df.values + indexer(df)[key, 0] = cat + expected = DataFrame({0: cat.astype(object), 1: range(3)}) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("box", [array, Series]) + def test_iloc_setitem_ea_inplace(self, frame_or_series, box, using_copy_on_write): + # GH#38952 Case with not setting a full column + # IntegerArray without NAs + arr = array([1, 2, 3, 4]) + obj = frame_or_series(arr.to_numpy("i8")) + + if frame_or_series is Series: + values = obj.values + else: + values = obj._mgr.arrays[0] + + if frame_or_series is Series: + obj.iloc[:2] = box(arr[2:]) + else: + obj.iloc[:2, 0] = box(arr[2:]) + + expected = frame_or_series(np.array([3, 4, 3, 4], dtype="i8")) + tm.assert_equal(obj, expected) + + # Check that we are actually in-place + if frame_or_series is Series: + if using_copy_on_write: + assert obj.values is not values + assert np.shares_memory(obj.values, values) + else: + assert obj.values is values + else: + assert np.shares_memory(obj[0].values, values) + + def test_is_scalar_access(self): + # GH#32085 index with duplicates doesn't matter for _is_scalar_access + index = Index([1, 2, 1]) + ser = Series(range(3), index=index) + + assert ser.iloc._is_scalar_access((1,)) + + df = ser.to_frame() + assert df.iloc._is_scalar_access((1, 0)) + + def test_iloc_exceeds_bounds(self): + # GH6296 + # iloc should allow indexers that exceed the bounds + df = DataFrame(np.random.default_rng(2).random((20, 5)), columns=list("ABCDE")) + + # lists of positions should raise IndexError! + msg = "positional indexers are out-of-bounds" + with pytest.raises(IndexError, match=msg): + df.iloc[:, [0, 1, 2, 3, 4, 5]] + with pytest.raises(IndexError, match=msg): + df.iloc[[1, 30]] + with pytest.raises(IndexError, match=msg): + df.iloc[[1, -30]] + with pytest.raises(IndexError, match=msg): + df.iloc[[100]] + + s = df["A"] + with pytest.raises(IndexError, match=msg): + s.iloc[[100]] + with pytest.raises(IndexError, match=msg): + s.iloc[[-100]] + + # still raise on a single indexer + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + df.iloc[30] + with pytest.raises(IndexError, match=msg): + df.iloc[-30] + + # GH10779 + # single positive/negative indexer exceeding Series bounds should raise + # an IndexError + with pytest.raises(IndexError, match=msg): + s.iloc[30] + with pytest.raises(IndexError, match=msg): + s.iloc[-30] + + # slices are ok + result = df.iloc[:, 4:10] # 0 < start < len < stop + expected = df.iloc[:, 4:] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, -4:-10] # stop < 0 < start < len + expected = df.iloc[:, :0] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down) + expected = df.iloc[:, :4:-1] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down) + expected = df.iloc[:, 4::-1] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, -10:4] # start < 0 < stop < len + expected = df.iloc[:, :4] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, 10:4] # 0 < stop < len < start + expected = df.iloc[:, :0] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down) + expected = df.iloc[:, :0] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, 10:11] # 0 < len < start < stop + expected = df.iloc[:, :0] + tm.assert_frame_equal(result, expected) + + # slice bounds exceeding is ok + result = s.iloc[18:30] + expected = s.iloc[18:] + tm.assert_series_equal(result, expected) + + result = s.iloc[30:] + expected = s.iloc[:0] + tm.assert_series_equal(result, expected) + + result = s.iloc[30::-1] + expected = s.iloc[::-1] + tm.assert_series_equal(result, expected) + + # doc example + def check(result, expected): + str(result) + result.dtypes + tm.assert_frame_equal(result, expected) + + dfl = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB") + ) + check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index, columns=[])) + check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]]) + check(dfl.iloc[4:6], dfl.iloc[[4]]) + + msg = "positional indexers are out-of-bounds" + with pytest.raises(IndexError, match=msg): + dfl.iloc[[4, 5, 6]] + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + dfl.iloc[:, 4] + + @pytest.mark.parametrize("index,columns", [(np.arange(20), list("ABCDE"))]) + @pytest.mark.parametrize( + "index_vals,column_vals", + [ + ([slice(None), ["A", "D"]]), + (["1", "2"], slice(None)), + ([datetime(2019, 1, 1)], slice(None)), + ], + ) + def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals): + # GH 25753 + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), len(columns))), + index=index, + columns=columns, + ) + msg = ".iloc requires numeric indexers, got" + with pytest.raises(IndexError, match=msg): + df.iloc[index_vals, column_vals] + + def test_iloc_getitem_invalid_scalar(self, frame_or_series): + # GH 21982 + + obj = DataFrame(np.arange(100).reshape(10, 10)) + obj = tm.get_obj(obj, frame_or_series) + + with pytest.raises(TypeError, match="Cannot index by location index"): + obj.iloc["a"] + + def test_iloc_array_not_mutating_negative_indices(self): + # GH 21867 + array_with_neg_numbers = np.array([1, 2, -1]) + array_copy = array_with_neg_numbers.copy() + df = DataFrame( + {"A": [100, 101, 102], "B": [103, 104, 105], "C": [106, 107, 108]}, + index=[1, 2, 3], + ) + df.iloc[array_with_neg_numbers] + tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) + df.iloc[:, array_with_neg_numbers] + tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) + + def test_iloc_getitem_neg_int_can_reach_first_index(self): + # GH10547 and GH10779 + # negative integers should be able to reach index 0 + df = DataFrame({"A": [2, 3, 5], "B": [7, 11, 13]}) + s = df["A"] + + expected = df.iloc[0] + result = df.iloc[-3] + tm.assert_series_equal(result, expected) + + expected = df.iloc[[0]] + result = df.iloc[[-3]] + tm.assert_frame_equal(result, expected) + + expected = s.iloc[0] + result = s.iloc[-3] + assert result == expected + + expected = s.iloc[[0]] + result = s.iloc[[-3]] + tm.assert_series_equal(result, expected) + + # check the length 1 Series case highlighted in GH10547 + expected = Series(["a"], index=["A"]) + result = expected.iloc[[-1]] + tm.assert_series_equal(result, expected) + + def test_iloc_getitem_dups(self): + # GH 6766 + df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) + df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) + df = concat([df1, df2], axis=1) + + # cross-sectional indexing + result = df.iloc[0, 0] + assert isna(result) + + result = df.iloc[0, :] + expected = Series([np.nan, 1, 3, 3], index=["A", "B", "A", "B"], name=0) + tm.assert_series_equal(result, expected) + + def test_iloc_getitem_array(self): + df = DataFrame( + [ + {"A": 1, "B": 2, "C": 3}, + {"A": 100, "B": 200, "C": 300}, + {"A": 1000, "B": 2000, "C": 3000}, + ] + ) + + expected = DataFrame([{"A": 1, "B": 2, "C": 3}]) + tm.assert_frame_equal(df.iloc[[0]], expected) + + expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}]) + tm.assert_frame_equal(df.iloc[[0, 1]], expected) + + expected = DataFrame([{"B": 2, "C": 3}, {"B": 2000, "C": 3000}], index=[0, 2]) + result = df.iloc[[0, 2], [1, 2]] + tm.assert_frame_equal(result, expected) + + def test_iloc_getitem_bool(self): + df = DataFrame( + [ + {"A": 1, "B": 2, "C": 3}, + {"A": 100, "B": 200, "C": 300}, + {"A": 1000, "B": 2000, "C": 3000}, + ] + ) + + expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}]) + result = df.iloc[[True, True, False]] + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + [{"A": 1, "B": 2, "C": 3}, {"A": 1000, "B": 2000, "C": 3000}], index=[0, 2] + ) + result = df.iloc[lambda x: x.index % 2 == 0] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("index", [[True, False], [True, False, True, False]]) + def test_iloc_getitem_bool_diff_len(self, index): + # GH26658 + s = Series([1, 2, 3]) + msg = f"Boolean index has wrong length: {len(index)} instead of {len(s)}" + with pytest.raises(IndexError, match=msg): + s.iloc[index] + + def test_iloc_getitem_slice(self): + df = DataFrame( + [ + {"A": 1, "B": 2, "C": 3}, + {"A": 100, "B": 200, "C": 300}, + {"A": 1000, "B": 2000, "C": 3000}, + ] + ) + + expected = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 100, "B": 200, "C": 300}]) + result = df.iloc[:2] + tm.assert_frame_equal(result, expected) + + expected = DataFrame([{"A": 100, "B": 200}], index=[1]) + result = df.iloc[1:2, 0:2] + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + [{"A": 1, "C": 3}, {"A": 100, "C": 300}, {"A": 1000, "C": 3000}] + ) + result = df.iloc[:, lambda df: [0, 2]] + tm.assert_frame_equal(result, expected) + + def test_iloc_getitem_slice_dups(self): + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "A", "B", "B"], + ) + df2 = DataFrame( + np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2), + columns=["A", "C"], + ) + + # axis=1 + df = concat([df1, df2], axis=1) + tm.assert_frame_equal(df.iloc[:, :4], df1) + tm.assert_frame_equal(df.iloc[:, 4:], df2) + + df = concat([df2, df1], axis=1) + tm.assert_frame_equal(df.iloc[:, :2], df2) + tm.assert_frame_equal(df.iloc[:, 2:], df1) + + exp = concat([df2, df1.iloc[:, [0]]], axis=1) + tm.assert_frame_equal(df.iloc[:, 0:3], exp) + + # axis=0 + df = concat([df, df], axis=0) + tm.assert_frame_equal(df.iloc[0:10, :2], df2) + tm.assert_frame_equal(df.iloc[0:10, 2:], df1) + tm.assert_frame_equal(df.iloc[10:, :2], df2) + tm.assert_frame_equal(df.iloc[10:, 2:], df1) + + def test_iloc_setitem(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=np.arange(0, 8, 2), + columns=np.arange(0, 12, 3), + ) + + df.iloc[1, 1] = 1 + result = df.iloc[1, 1] + assert result == 1 + + df.iloc[:, 2:3] = 0 + expected = df.iloc[:, 2:3] + result = df.iloc[:, 2:3] + tm.assert_frame_equal(result, expected) + + # GH5771 + s = Series(0, index=[4, 5, 6]) + s.iloc[1:2] += 1 + expected = Series([0, 1, 0], index=[4, 5, 6]) + tm.assert_series_equal(s, expected) + + def test_iloc_setitem_axis_argument(self): + # GH45032 + df = DataFrame([[6, "c", 10], [7, "d", 11], [8, "e", 12]]) + expected = DataFrame([[6, "c", 10], [7, "d", 11], [5, 5, 5]]) + df.iloc(axis=0)[2] = 5 + tm.assert_frame_equal(df, expected) + + df = DataFrame([[6, "c", 10], [7, "d", 11], [8, "e", 12]]) + expected = DataFrame([[6, "c", 5], [7, "d", 5], [8, "e", 5]]) + df.iloc(axis=1)[2] = 5 + tm.assert_frame_equal(df, expected) + + def test_iloc_setitem_list(self): + # setitem with an iloc list + df = DataFrame( + np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"] + ) + df.iloc[[0, 1], [1, 2]] + df.iloc[[0, 1], [1, 2]] += 100 + + expected = DataFrame( + np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)), + index=["A", "B", "C"], + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(df, expected) + + def test_iloc_setitem_pandas_object(self): + # GH 17193 + s_orig = Series([0, 1, 2, 3]) + expected = Series([0, -1, -2, 3]) + + s = s_orig.copy() + s.iloc[Series([1, 2])] = [-1, -2] + tm.assert_series_equal(s, expected) + + s = s_orig.copy() + s.iloc[Index([1, 2])] = [-1, -2] + tm.assert_series_equal(s, expected) + + def test_iloc_setitem_dups(self): + # GH 6766 + # iloc with a mask aligning from another iloc + df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) + df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) + df = concat([df1, df2], axis=1) + + expected = df.fillna(3) + inds = np.isnan(df.iloc[:, 0]) + mask = inds[inds].index + df.iloc[mask, 0] = df.iloc[mask, 2] + tm.assert_frame_equal(df, expected) + + # del a dup column across blocks + expected = DataFrame({0: [1, 2], 1: [3, 4]}) + expected.columns = ["B", "B"] + del df["A"] + tm.assert_frame_equal(df, expected) + + # assign back to self + df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]] + tm.assert_frame_equal(df, expected) + + # reversed x 2 + df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True) + df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True) + tm.assert_frame_equal(df, expected) + + def test_iloc_setitem_frame_duplicate_columns_multiple_blocks( + self, using_array_manager + ): + # Same as the "assign back to self" check in test_iloc_setitem_dups + # but on a DataFrame with multiple blocks + df = DataFrame([[0, 1], [2, 3]], columns=["B", "B"]) + + # setting float values that can be held by existing integer arrays + # is inplace + df.iloc[:, 0] = df.iloc[:, 0].astype("f8") + if not using_array_manager: + assert len(df._mgr.blocks) == 1 + + # if the assigned values cannot be held by existing integer arrays, + # we cast + df.iloc[:, 0] = df.iloc[:, 0] + 0.5 + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + expected = df.copy() + + # assign back to self + df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]] + + tm.assert_frame_equal(df, expected) + + # TODO: GH#27620 this test used to compare iloc against ix; check if this + # is redundant with another test comparing iloc against loc + def test_iloc_getitem_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=range(0, 20, 2), + columns=range(0, 8, 2), + ) + + result = df.iloc[2] + exp = df.loc[4] + tm.assert_series_equal(result, exp) + + result = df.iloc[2, 2] + exp = df.loc[4, 4] + assert result == exp + + # slice + result = df.iloc[4:8] + expected = df.loc[8:14] + tm.assert_frame_equal(result, expected) + + result = df.iloc[:, 2:3] + expected = df.loc[:, 4:5] + tm.assert_frame_equal(result, expected) + + # list of integers + result = df.iloc[[0, 1, 3]] + expected = df.loc[[0, 2, 6]] + tm.assert_frame_equal(result, expected) + + result = df.iloc[[0, 1, 3], [0, 1]] + expected = df.loc[[0, 2, 6], [0, 2]] + tm.assert_frame_equal(result, expected) + + # neg indices + result = df.iloc[[-1, 1, 3], [-1, 1]] + expected = df.loc[[18, 2, 6], [6, 2]] + tm.assert_frame_equal(result, expected) + + # dups indices + result = df.iloc[[-1, -1, 1, 3], [-1, 1]] + expected = df.loc[[18, 18, 2, 6], [6, 2]] + tm.assert_frame_equal(result, expected) + + # with index-like + s = Series(index=range(1, 5), dtype=object) + result = df.iloc[s.index] + expected = df.loc[[2, 4, 6, 8]] + tm.assert_frame_equal(result, expected) + + def test_iloc_getitem_labelled_frame(self): + # try with labelled frame + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=list("abcdefghij"), + columns=list("ABCD"), + ) + + result = df.iloc[1, 1] + exp = df.loc["b", "B"] + assert result == exp + + result = df.iloc[:, 2:3] + expected = df.loc[:, ["C"]] + tm.assert_frame_equal(result, expected) + + # negative indexing + result = df.iloc[-1, -1] + exp = df.loc["j", "D"] + assert result == exp + + # out-of-bounds exception + msg = "index 5 is out of bounds for axis 0 with size 4" + with pytest.raises(IndexError, match=msg): + df.iloc[10, 5] + + # trying to use a label + msg = ( + r"Location based indexing can only have \[integer, integer " + r"slice \(START point is INCLUDED, END point is EXCLUDED\), " + r"listlike of integers, boolean array\] types" + ) + with pytest.raises(ValueError, match=msg): + df.iloc["j", "D"] + + def test_iloc_getitem_doc_issue(self, using_array_manager): + # multi axis slicing issue with single block + # surfaced in GH 6059 + + arr = np.random.default_rng(2).standard_normal((6, 4)) + index = date_range("20130101", periods=6) + columns = list("ABCD") + df = DataFrame(arr, index=index, columns=columns) + + # defines ref_locs + df.describe() + + result = df.iloc[3:5, 0:2] + str(result) + result.dtypes + + expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=columns[0:2]) + tm.assert_frame_equal(result, expected) + + # for dups + df.columns = list("aaaa") + result = df.iloc[3:5, 0:2] + str(result) + result.dtypes + + expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=list("aa")) + tm.assert_frame_equal(result, expected) + + # related + arr = np.random.default_rng(2).standard_normal((6, 4)) + index = list(range(0, 12, 2)) + columns = list(range(0, 8, 2)) + df = DataFrame(arr, index=index, columns=columns) + + if not using_array_manager: + df._mgr.blocks[0].mgr_locs + result = df.iloc[1:5, 2:4] + str(result) + result.dtypes + expected = DataFrame(arr[1:5, 2:4], index=index[1:5], columns=columns[2:4]) + tm.assert_frame_equal(result, expected) + + def test_iloc_setitem_series(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=list("abcdefghij"), + columns=list("ABCD"), + ) + + df.iloc[1, 1] = 1 + result = df.iloc[1, 1] + assert result == 1 + + df.iloc[:, 2:3] = 0 + expected = df.iloc[:, 2:3] + result = df.iloc[:, 2:3] + tm.assert_frame_equal(result, expected) + + s = Series(np.random.default_rng(2).standard_normal(10), index=range(0, 20, 2)) + + s.iloc[1] = 1 + result = s.iloc[1] + assert result == 1 + + s.iloc[:4] = 0 + expected = s.iloc[:4] + result = s.iloc[:4] + tm.assert_series_equal(result, expected) + + s = Series([-1] * 6) + s.iloc[0::2] = [0, 2, 4] + s.iloc[1::2] = [1, 3, 5] + result = s + expected = Series([0, 1, 2, 3, 4, 5]) + tm.assert_series_equal(result, expected) + + def test_iloc_setitem_list_of_lists(self): + # GH 7551 + # list-of-list is set incorrectly in mixed vs. single dtyped frames + df = DataFrame( + {"A": np.arange(5, dtype="int64"), "B": np.arange(5, 10, dtype="int64")} + ) + df.iloc[2:4] = [[10, 11], [12, 13]] + expected = DataFrame({"A": [0, 1, 10, 12, 4], "B": [5, 6, 11, 13, 9]}) + tm.assert_frame_equal(df, expected) + + df = DataFrame( + {"A": ["a", "b", "c", "d", "e"], "B": np.arange(5, 10, dtype="int64")} + ) + df.iloc[2:4] = [["x", 11], ["y", 13]] + expected = DataFrame({"A": ["a", "b", "x", "y", "e"], "B": [5, 6, 11, 13, 9]}) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("indexer", [[0], slice(None, 1, None), np.array([0])]) + @pytest.mark.parametrize("value", [["Z"], np.array(["Z"])]) + def test_iloc_setitem_with_scalar_index(self, indexer, value): + # GH #19474 + # assigning like "df.iloc[0, [0]] = ['Z']" should be evaluated + # elementwisely, not using "setter('A', ['Z'])". + + # Set object type to avoid upcast when setting "Z" + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]).astype({"A": object}) + df.iloc[0, indexer] = value + result = df.iloc[0, 0] + + assert is_scalar(result) and result == "Z" + + @pytest.mark.filterwarnings("ignore::UserWarning") + def test_iloc_mask(self): + # GH 3631, iloc with a mask (of a series) should raise + df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"]) + mask = df.a % 2 == 0 + msg = "iLocation based boolean indexing cannot use an indexable as a mask" + with pytest.raises(ValueError, match=msg): + df.iloc[mask] + mask.index = range(len(mask)) + msg = "iLocation based boolean indexing on an integer type is not available" + with pytest.raises(NotImplementedError, match=msg): + df.iloc[mask] + + # ndarray ok + result = df.iloc[np.array([True] * len(mask), dtype=bool)] + tm.assert_frame_equal(result, df) + + # the possibilities + locs = np.arange(4) + nums = 2**locs + reps = [bin(num) for num in nums] + df = DataFrame({"locs": locs, "nums": nums}, reps) + + expected = { + (None, ""): "0b1100", + (None, ".loc"): "0b1100", + (None, ".iloc"): "0b1100", + ("index", ""): "0b11", + ("index", ".loc"): "0b11", + ("index", ".iloc"): ( + "iLocation based boolean indexing cannot use an indexable as a mask" + ), + ("locs", ""): "Unalignable boolean Series provided as indexer " + "(index of the boolean Series and of the indexed " + "object do not match).", + ("locs", ".loc"): "Unalignable boolean Series provided as indexer " + "(index of the boolean Series and of the " + "indexed object do not match).", + ("locs", ".iloc"): ( + "iLocation based boolean indexing on an " + "integer type is not available" + ), + } + + # UserWarnings from reindex of a boolean mask + for idx in [None, "index", "locs"]: + mask = (df.nums > 2).values + if idx: + mask_index = getattr(df, idx)[::-1] + mask = Series(mask, list(mask_index)) + for method in ["", ".loc", ".iloc"]: + try: + if method: + accessor = getattr(df, method[1:]) + else: + accessor = df + answer = str(bin(accessor[mask]["nums"].sum())) + except (ValueError, IndexingError, NotImplementedError) as e: + answer = str(e) + + key = ( + idx, + method, + ) + r = expected.get(key) + if r != answer: + raise AssertionError( + f"[{key}] does not match [{answer}], received [{r}]" + ) + + def test_iloc_non_unique_indexing(self): + # GH 4017, non-unique indexing (on the axis) + df = DataFrame({"A": [0.1] * 3000, "B": [1] * 3000}) + idx = np.arange(30) * 99 + expected = df.iloc[idx] + + df3 = concat([df, 2 * df, 3 * df]) + result = df3.iloc[idx] + + tm.assert_frame_equal(result, expected) + + df2 = DataFrame({"A": [0.1] * 1000, "B": [1] * 1000}) + df2 = concat([df2, 2 * df2, 3 * df2]) + + with pytest.raises(KeyError, match="not in index"): + df2.loc[idx] + + def test_iloc_empty_list_indexer_is_ok(self): + df = tm.makeCustomDataframe(5, 2) + # vertical empty + tm.assert_frame_equal( + df.iloc[:, []], + df.iloc[:, :0], + check_index_type=True, + check_column_type=True, + ) + # horizontal empty + tm.assert_frame_equal( + df.iloc[[], :], + df.iloc[:0, :], + check_index_type=True, + check_column_type=True, + ) + # horizontal empty + tm.assert_frame_equal( + df.iloc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True + ) + + def test_identity_slice_returns_new_object(self, using_copy_on_write): + # GH13873 + original_df = DataFrame({"a": [1, 2, 3]}) + sliced_df = original_df.iloc[:] + assert sliced_df is not original_df + + # should be a shallow copy + assert np.shares_memory(original_df["a"], sliced_df["a"]) + + # Setting using .loc[:, "a"] sets inplace so alters both sliced and orig + # depending on CoW + original_df.loc[:, "a"] = [4, 4, 4] + if using_copy_on_write: + assert (sliced_df["a"] == [1, 2, 3]).all() + else: + assert (sliced_df["a"] == 4).all() + + original_series = Series([1, 2, 3, 4, 5, 6]) + sliced_series = original_series.iloc[:] + assert sliced_series is not original_series + + # should also be a shallow copy + original_series[:3] = [7, 8, 9] + if using_copy_on_write: + # shallow copy not updated (CoW) + assert all(sliced_series[:3] == [1, 2, 3]) + else: + assert all(sliced_series[:3] == [7, 8, 9]) + + def test_indexing_zerodim_np_array(self): + # GH24919 + df = DataFrame([[1, 2], [3, 4]]) + result = df.iloc[np.array(0)] + s = Series([1, 2], name=0) + tm.assert_series_equal(result, s) + + def test_series_indexing_zerodim_np_array(self): + # GH24919 + s = Series([1, 2]) + result = s.iloc[np.array(0)] + assert result == 1 + + def test_iloc_setitem_categorical_updates_inplace(self): + # Mixed dtype ensures we go through take_split_path in setitem_with_indexer + cat = Categorical(["A", "B", "C"]) + df = DataFrame({1: cat, 2: [1, 2, 3]}, copy=False) + + assert tm.shares_memory(df[1], cat) + + # With the enforcement of GH#45333 in 2.0, this modifies original + # values inplace + df.iloc[:, 0] = cat[::-1] + + assert tm.shares_memory(df[1], cat) + expected = Categorical(["C", "B", "A"], categories=["A", "B", "C"]) + tm.assert_categorical_equal(cat, expected) + + def test_iloc_with_boolean_operation(self): + # GH 20627 + result = DataFrame([[0, 1], [2, 3], [4, 5], [6, np.nan]]) + result.iloc[result.index <= 2] *= 2 + expected = DataFrame([[0, 2], [4, 6], [8, 10], [6, np.nan]]) + tm.assert_frame_equal(result, expected) + + result.iloc[result.index > 2] *= 2 + expected = DataFrame([[0, 2], [4, 6], [8, 10], [12, np.nan]]) + tm.assert_frame_equal(result, expected) + + result.iloc[[True, True, False, False]] *= 2 + expected = DataFrame([[0, 4], [8, 12], [8, 10], [12, np.nan]]) + tm.assert_frame_equal(result, expected) + + result.iloc[[False, False, True, True]] /= 2 + expected = DataFrame([[0, 4.0], [8, 12.0], [4, 5.0], [6, np.nan]]) + tm.assert_frame_equal(result, expected) + + def test_iloc_getitem_singlerow_slice_categoricaldtype_gives_series(self): + # GH#29521 + df = DataFrame({"x": Categorical("a b c d e".split())}) + result = df.iloc[0] + raw_cat = Categorical(["a"], categories=["a", "b", "c", "d", "e"]) + expected = Series(raw_cat, index=["x"], name=0, dtype="category") + + tm.assert_series_equal(result, expected) + + def test_iloc_getitem_categorical_values(self): + # GH#14580 + # test iloc() on Series with Categorical data + + ser = Series([1, 2, 3]).astype("category") + + # get slice + result = ser.iloc[0:2] + expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) + tm.assert_series_equal(result, expected) + + # get list of indexes + result = ser.iloc[[0, 1]] + expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) + tm.assert_series_equal(result, expected) + + # get boolean array + result = ser.iloc[[True, False, False]] + expected = Series([1]).astype(CategoricalDtype([1, 2, 3])) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("value", [None, NaT, np.nan]) + def test_iloc_setitem_td64_values_cast_na(self, value): + # GH#18586 + series = Series([0, 1, 2], dtype="timedelta64[ns]") + series.iloc[0] = value + expected = Series([NaT, 1, 2], dtype="timedelta64[ns]") + tm.assert_series_equal(series, expected) + + @pytest.mark.parametrize("not_na", [Interval(0, 1), "a", 1.0]) + def test_setitem_mix_of_nan_and_interval(self, not_na, nulls_fixture): + # GH#27937 + dtype = CategoricalDtype(categories=[not_na]) + ser = Series( + [nulls_fixture, nulls_fixture, nulls_fixture, nulls_fixture], dtype=dtype + ) + ser.iloc[:3] = [nulls_fixture, not_na, nulls_fixture] + exp = Series([nulls_fixture, not_na, nulls_fixture, nulls_fixture], dtype=dtype) + tm.assert_series_equal(ser, exp) + + def test_iloc_setitem_empty_frame_raises_with_3d_ndarray(self): + idx = Index([]) + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), len(idx))), + index=idx, + columns=idx, + ) + nd3 = np.random.default_rng(2).integers(5, size=(2, 2, 2)) + + msg = f"Cannot set values with ndim > {obj.ndim}" + with pytest.raises(ValueError, match=msg): + obj.iloc[nd3] = 0 + + @pytest.mark.parametrize("indexer", [tm.loc, tm.iloc]) + def test_iloc_getitem_read_only_values(self, indexer): + # GH#10043 this is fundamentally a test for iloc, but test loc while + # we're here + rw_array = np.eye(10) + rw_df = DataFrame(rw_array) + + ro_array = np.eye(10) + ro_array.setflags(write=False) + ro_df = DataFrame(ro_array) + + tm.assert_frame_equal(indexer(rw_df)[[1, 2, 3]], indexer(ro_df)[[1, 2, 3]]) + tm.assert_frame_equal(indexer(rw_df)[[1]], indexer(ro_df)[[1]]) + tm.assert_series_equal(indexer(rw_df)[1], indexer(ro_df)[1]) + tm.assert_frame_equal(indexer(rw_df)[1:3], indexer(ro_df)[1:3]) + + def test_iloc_getitem_readonly_key(self): + # GH#17192 iloc with read-only array raising TypeError + df = DataFrame({"data": np.ones(100, dtype="float64")}) + indices = np.array([1, 3, 6]) + indices.flags.writeable = False + + result = df.iloc[indices] + expected = df.loc[[1, 3, 6]] + tm.assert_frame_equal(result, expected) + + result = df["data"].iloc[indices] + expected = df["data"].loc[[1, 3, 6]] + tm.assert_series_equal(result, expected) + + def test_iloc_assign_series_to_df_cell(self): + # GH 37593 + df = DataFrame(columns=["a"], index=[0]) + df.iloc[0, 0] = Series([1, 2, 3]) + expected = DataFrame({"a": [Series([1, 2, 3])]}, columns=["a"], index=[0]) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("klass", [list, np.array]) + def test_iloc_setitem_bool_indexer(self, klass): + # GH#36741 + df = DataFrame({"flag": ["x", "y", "z"], "value": [1, 3, 4]}) + indexer = klass([True, False, False]) + df.iloc[indexer, 1] = df.iloc[indexer, 1] * 2 + expected = DataFrame({"flag": ["x", "y", "z"], "value": [2, 3, 4]}) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("indexer", [[1], slice(1, 2)]) + def test_iloc_setitem_pure_position_based(self, indexer): + # GH#22046 + df1 = DataFrame({"a2": [11, 12, 13], "b2": [14, 15, 16]}) + df2 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df2.iloc[:, indexer] = df1.iloc[:, [0]] + expected = DataFrame({"a": [1, 2, 3], "b": [11, 12, 13], "c": [7, 8, 9]}) + tm.assert_frame_equal(df2, expected) + + def test_iloc_setitem_dictionary_value(self): + # GH#37728 + df = DataFrame({"x": [1, 2], "y": [2, 2]}) + rhs = {"x": 9, "y": 99} + df.iloc[1] = rhs + expected = DataFrame({"x": [1, 9], "y": [2, 99]}) + tm.assert_frame_equal(df, expected) + + # GH#38335 same thing, mixed dtypes + df = DataFrame({"x": [1, 2], "y": [2.0, 2.0]}) + df.iloc[1] = rhs + expected = DataFrame({"x": [1, 9], "y": [2.0, 99.0]}) + tm.assert_frame_equal(df, expected) + + def test_iloc_getitem_float_duplicates(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=[0.1, 0.2, 0.2], + columns=list("abc"), + ) + expect = df.iloc[1:] + tm.assert_frame_equal(df.loc[0.2], expect) + + expect = df.iloc[1:, 0] + tm.assert_series_equal(df.loc[0.2, "a"], expect) + + df.index = [1, 0.2, 0.2] + expect = df.iloc[1:] + tm.assert_frame_equal(df.loc[0.2], expect) + + expect = df.iloc[1:, 0] + tm.assert_series_equal(df.loc[0.2, "a"], expect) + + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), + index=[1, 0.2, 0.2, 1], + columns=list("abc"), + ) + expect = df.iloc[1:-1] + tm.assert_frame_equal(df.loc[0.2], expect) + + expect = df.iloc[1:-1, 0] + tm.assert_series_equal(df.loc[0.2, "a"], expect) + + df.index = [0.1, 0.2, 2, 0.2] + expect = df.iloc[[1, -1]] + tm.assert_frame_equal(df.loc[0.2], expect) + + expect = df.iloc[[1, -1], 0] + tm.assert_series_equal(df.loc[0.2, "a"], expect) + + def test_iloc_setitem_custom_object(self): + # iloc with an object + class TO: + def __init__(self, value) -> None: + self.value = value + + def __str__(self) -> str: + return f"[{self.value}]" + + __repr__ = __str__ + + def __eq__(self, other) -> bool: + return self.value == other.value + + def view(self): + return self + + df = DataFrame(index=[0, 1], columns=[0]) + df.iloc[1, 0] = TO(1) + df.iloc[1, 0] = TO(2) + + result = DataFrame(index=[0, 1], columns=[0]) + result.iloc[1, 0] = TO(2) + + tm.assert_frame_equal(result, df) + + # remains object dtype even after setting it back + df = DataFrame(index=[0, 1], columns=[0]) + df.iloc[1, 0] = TO(1) + df.iloc[1, 0] = np.nan + result = DataFrame(index=[0, 1], columns=[0]) + + tm.assert_frame_equal(result, df) + + def test_iloc_getitem_with_duplicates(self): + df = DataFrame( + np.random.default_rng(2).random((3, 3)), + columns=list("ABC"), + index=list("aab"), + ) + + result = df.iloc[0] + assert isinstance(result, Series) + tm.assert_almost_equal(result.values, df.values[0]) + + result = df.T.iloc[:, 0] + assert isinstance(result, Series) + tm.assert_almost_equal(result.values, df.values[0]) + + def test_iloc_getitem_with_duplicates2(self): + # GH#2259 + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2]) + result = df.iloc[:, [0]] + expected = df.take([0], axis=1) + tm.assert_frame_equal(result, expected) + + def test_iloc_interval(self): + # GH#17130 + df = DataFrame({Interval(1, 2): [1, 2]}) + + result = df.iloc[0] + expected = Series({Interval(1, 2): 1}, name=0) + tm.assert_series_equal(result, expected) + + result = df.iloc[:, 0] + expected = Series([1, 2], name=Interval(1, 2)) + tm.assert_series_equal(result, expected) + + result = df.copy() + result.iloc[:, 0] += 1 + expected = DataFrame({Interval(1, 2): [2, 3]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("indexing_func", [list, np.array]) + @pytest.mark.parametrize("rhs_func", [list, np.array]) + def test_loc_setitem_boolean_list(self, rhs_func, indexing_func): + # GH#20438 testing specifically list key, not arraylike + ser = Series([0, 1, 2]) + ser.iloc[indexing_func([True, False, True])] = rhs_func([5, 10]) + expected = Series([5, 1, 10]) + tm.assert_series_equal(ser, expected) + + df = DataFrame({"a": [0, 1, 2]}) + df.iloc[indexing_func([True, False, True])] = rhs_func([[5], [10]]) + expected = DataFrame({"a": [5, 1, 10]}) + tm.assert_frame_equal(df, expected) + + def test_iloc_getitem_slice_negative_step_ea_block(self): + # GH#44551 + df = DataFrame({"A": [1, 2, 3]}, dtype="Int64") + + res = df.iloc[:, ::-1] + tm.assert_frame_equal(res, df) + + df["B"] = "foo" + res = df.iloc[:, ::-1] + expected = DataFrame({"B": df["B"], "A": df["A"]}) + tm.assert_frame_equal(res, expected) + + def test_iloc_setitem_2d_ndarray_into_ea_block(self): + # GH#44703 + df = DataFrame({"status": ["a", "b", "c"]}, dtype="category") + df.iloc[np.array([0, 1]), np.array([0])] = np.array([["a"], ["a"]]) + + expected = DataFrame({"status": ["a", "a", "c"]}, dtype=df["status"].dtype) + tm.assert_frame_equal(df, expected) + + @td.skip_array_manager_not_yet_implemented + def test_iloc_getitem_int_single_ea_block_view(self): + # GH#45241 + # TODO: make an extension interface test for this? + arr = interval_range(1, 10.0)._values + df = DataFrame(arr) + + # ser should be a *view* on the DataFrame data + ser = df.iloc[2] + + # if we have a view, then changing arr[2] should also change ser[0] + assert arr[2] != arr[-1] # otherwise the rest isn't meaningful + arr[2] = arr[-1] + assert ser[0] == arr[-1] + + def test_iloc_setitem_multicolumn_to_datetime(self): + # GH#20511 + df = DataFrame({"A": ["2022-01-01", "2022-01-02"], "B": ["2021", "2022"]}) + + df.iloc[:, [0]] = DataFrame({"A": to_datetime(["2021", "2022"])}) + expected = DataFrame( + { + "A": [ + Timestamp("2021-01-01 00:00:00"), + Timestamp("2022-01-01 00:00:00"), + ], + "B": ["2021", "2022"], + } + ) + tm.assert_frame_equal(df, expected, check_dtype=False) + + +class TestILocErrors: + # NB: this test should work for _any_ Series we can pass as + # series_with_simple_index + def test_iloc_float_raises(self, series_with_simple_index, frame_or_series): + # GH#4892 + # float_indexers should raise exceptions + # on appropriate Index types & accessors + # this duplicates the code below + # but is specifically testing for the error + # message + + obj = series_with_simple_index + if frame_or_series is DataFrame: + obj = obj.to_frame() + + msg = "Cannot index by location index with a non-integer key" + with pytest.raises(TypeError, match=msg): + obj.iloc[3.0] + + with pytest.raises(IndexError, match=_slice_iloc_msg): + obj.iloc[3.0] = 0 + + def test_iloc_getitem_setitem_fancy_exceptions(self, float_frame): + with pytest.raises(IndexingError, match="Too many indexers"): + float_frame.iloc[:, :, :] + + with pytest.raises(IndexError, match="too many indices for array"): + # GH#32257 we let numpy do validation, get their exception + float_frame.iloc[:, :, :] = 1 + + def test_iloc_frame_indexer(self): + # GH#39004 + df = DataFrame({"a": [1, 2, 3]}) + indexer = DataFrame({"a": [True, False, True]}) + msg = "DataFrame indexer for .iloc is not supported. Consider using .loc" + with pytest.raises(TypeError, match=msg): + df.iloc[indexer] = 1 + + msg = ( + "DataFrame indexer is not allowed for .iloc\n" + "Consider using .loc for automatic alignment." + ) + with pytest.raises(IndexError, match=msg): + df.iloc[indexer] + + +class TestILocSetItemDuplicateColumns: + def test_iloc_setitem_scalar_duplicate_columns(self): + # GH#15686, duplicate columns and mixed dtype + df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}]) + df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}]) + df = concat([df1, df2], axis=1) + df.iloc[0, 0] = -1 + + assert df.iloc[0, 0] == -1 + assert df.iloc[0, 2] == 3 + assert df.dtypes.iloc[2] == np.int64 + + def test_iloc_setitem_list_duplicate_columns(self): + # GH#22036 setting with same-sized list + df = DataFrame([[0, "str", "str2"]], columns=["a", "b", "b"]) + + df.iloc[:, 2] = ["str3"] + + expected = DataFrame([[0, "str", "str3"]], columns=["a", "b", "b"]) + tm.assert_frame_equal(df, expected) + + def test_iloc_setitem_series_duplicate_columns(self): + df = DataFrame( + np.arange(8, dtype=np.int64).reshape(2, 4), columns=["A", "B", "A", "B"] + ) + df.iloc[:, 0] = df.iloc[:, 0].astype(np.float64) + assert df.dtypes.iloc[2] == np.int64 + + @pytest.mark.parametrize( + ["dtypes", "init_value", "expected_value"], + [("int64", "0", 0), ("float", "1.2", 1.2)], + ) + def test_iloc_setitem_dtypes_duplicate_columns( + self, dtypes, init_value, expected_value + ): + # GH#22035 + df = DataFrame([[init_value, "str", "str2"]], columns=["a", "b", "b"]) + + # with the enforcement of GH#45333 in 2.0, this sets values inplace, + # so we retain object dtype + df.iloc[:, 0] = df.iloc[:, 0].astype(dtypes) + + expected_df = DataFrame( + [[expected_value, "str", "str2"]], + columns=["a", "b", "b"], + dtype=object, + ) + tm.assert_frame_equal(df, expected_df) + + +class TestILocCallable: + def test_frame_iloc_getitem_callable(self): + # GH#11485 + df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD")) + + # return location + res = df.iloc[lambda x: [1, 3]] + tm.assert_frame_equal(res, df.iloc[[1, 3]]) + + res = df.iloc[lambda x: [1, 3], :] + tm.assert_frame_equal(res, df.iloc[[1, 3], :]) + + res = df.iloc[lambda x: [1, 3], lambda x: 0] + tm.assert_series_equal(res, df.iloc[[1, 3], 0]) + + res = df.iloc[lambda x: [1, 3], lambda x: [0]] + tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) + + # mixture + res = df.iloc[[1, 3], lambda x: 0] + tm.assert_series_equal(res, df.iloc[[1, 3], 0]) + + res = df.iloc[[1, 3], lambda x: [0]] + tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) + + res = df.iloc[lambda x: [1, 3], 0] + tm.assert_series_equal(res, df.iloc[[1, 3], 0]) + + res = df.iloc[lambda x: [1, 3], [0]] + tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) + + def test_frame_iloc_setitem_callable(self): + # GH#11485 + df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD")) + + # return location + res = df.copy() + res.iloc[lambda x: [1, 3]] = 0 + exp = df.copy() + exp.iloc[[1, 3]] = 0 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.iloc[lambda x: [1, 3], :] = -1 + exp = df.copy() + exp.iloc[[1, 3], :] = -1 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.iloc[lambda x: [1, 3], lambda x: 0] = 5 + exp = df.copy() + exp.iloc[[1, 3], 0] = 5 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.iloc[lambda x: [1, 3], lambda x: [0]] = 25 + exp = df.copy() + exp.iloc[[1, 3], [0]] = 25 + tm.assert_frame_equal(res, exp) + + # mixture + res = df.copy() + res.iloc[[1, 3], lambda x: 0] = -3 + exp = df.copy() + exp.iloc[[1, 3], 0] = -3 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.iloc[[1, 3], lambda x: [0]] = -5 + exp = df.copy() + exp.iloc[[1, 3], [0]] = -5 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.iloc[lambda x: [1, 3], 0] = 10 + exp = df.copy() + exp.iloc[[1, 3], 0] = 10 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.iloc[lambda x: [1, 3], [0]] = [-5, -5] + exp = df.copy() + exp.iloc[[1, 3], [0]] = [-5, -5] + tm.assert_frame_equal(res, exp) + + +class TestILocSeries: + def test_iloc(self, using_copy_on_write): + ser = Series( + np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2)) + ) + ser_original = ser.copy() + + for i in range(len(ser)): + result = ser.iloc[i] + exp = ser[ser.index[i]] + tm.assert_almost_equal(result, exp) + + # pass a slice + result = ser.iloc[slice(1, 3)] + expected = ser.loc[2:4] + tm.assert_series_equal(result, expected) + + # test slice is a view + with tm.assert_produces_warning(None): + # GH#45324 make sure we aren't giving a spurious FutureWarning + result[:] = 0 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_original) + else: + assert (ser.iloc[1:3] == 0).all() + + # list of integers + result = ser.iloc[[0, 2, 3, 4, 5]] + expected = ser.reindex(ser.index[[0, 2, 3, 4, 5]]) + tm.assert_series_equal(result, expected) + + def test_iloc_getitem_nonunique(self): + ser = Series([0, 1, 2], index=[0, 1, 0]) + assert ser.iloc[2] == 2 + + def test_iloc_setitem_pure_position_based(self): + # GH#22046 + ser1 = Series([1, 2, 3]) + ser2 = Series([4, 5, 6], index=[1, 0, 2]) + ser1.iloc[1:3] = ser2.iloc[1:3] + expected = Series([1, 5, 6]) + tm.assert_series_equal(ser1, expected) + + def test_iloc_nullable_int64_size_1_nan(self): + # GH 31861 + result = DataFrame({"a": ["test"], "b": [np.nan]}) + result.loc[:, "b"] = result.loc[:, "b"].astype("Int64") + expected = DataFrame({"a": ["test"], "b": array([NA], dtype="Int64")}) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexers.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexers.py new file mode 100644 index 00000000..ddc5c039 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexers.py @@ -0,0 +1,61 @@ +# Tests aimed at pandas.core.indexers +import numpy as np +import pytest + +from pandas.core.indexers import ( + is_scalar_indexer, + length_of_indexer, + validate_indices, +) + + +def test_length_of_indexer(): + arr = np.zeros(4, dtype=bool) + arr[0] = 1 + result = length_of_indexer(arr) + assert result == 1 + + +def test_is_scalar_indexer(): + indexer = (0, 1) + assert is_scalar_indexer(indexer, 2) + assert not is_scalar_indexer(indexer[0], 2) + + indexer = (np.array([2]), 1) + assert not is_scalar_indexer(indexer, 2) + + indexer = (np.array([2]), np.array([3])) + assert not is_scalar_indexer(indexer, 2) + + indexer = (np.array([2]), np.array([3, 4])) + assert not is_scalar_indexer(indexer, 2) + + assert not is_scalar_indexer(slice(None), 1) + + indexer = 0 + assert is_scalar_indexer(indexer, 1) + + indexer = (0,) + assert is_scalar_indexer(indexer, 1) + + +class TestValidateIndices: + def test_validate_indices_ok(self): + indices = np.asarray([0, 1]) + validate_indices(indices, 2) + validate_indices(indices[:0], 0) + validate_indices(np.array([-1, -1]), 0) + + def test_validate_indices_low(self): + indices = np.asarray([0, -2]) + with pytest.raises(ValueError, match="'indices' contains"): + validate_indices(indices, 2) + + def test_validate_indices_high(self): + indices = np.asarray([0, 1, 2]) + with pytest.raises(IndexError, match="indices are out"): + validate_indices(indices, 2) + + def test_validate_indices_empty(self): + with pytest.raises(IndexError, match="indices are out"): + validate_indices(np.array([0, 1]), 0) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexing.py new file mode 100644 index 00000000..54e204c4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_indexing.py @@ -0,0 +1,1142 @@ +""" test fancy indexing & misc """ + +import array +from datetime import datetime +import re +import weakref + +import numpy as np +import pytest + +from pandas.errors import IndexingError + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, + is_object_dtype, +) + +import pandas as pd +from pandas import ( + DataFrame, + Index, + NaT, + Series, + date_range, + offsets, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.indexing.common import _mklbl +from pandas.tests.indexing.test_floats import gen_obj + +# ------------------------------------------------------------------------ +# Indexing test cases + + +class TestFancy: + """pure get/set item & fancy indexing""" + + def test_setitem_ndarray_1d(self): + # GH5508 + + # len of indexer vs length of the 1d ndarray + df = DataFrame(index=Index(np.arange(1, 11), dtype=np.int64)) + df["foo"] = np.zeros(10, dtype=np.float64) + df["bar"] = np.zeros(10, dtype=complex) + + # invalid + msg = "Must have equal len keys and value when setting with an iterable" + with pytest.raises(ValueError, match=msg): + df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0]) + + # valid + df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0]) + + result = df.loc[df.index[2:6], "bar"] + expected = Series( + [2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar" + ) + tm.assert_series_equal(result, expected) + + def test_setitem_ndarray_1d_2(self): + # GH5508 + + # dtype getting changed? + df = DataFrame(index=Index(np.arange(1, 11))) + df["foo"] = np.zeros(10, dtype=np.float64) + df["bar"] = np.zeros(10, dtype=complex) + + msg = "Must have equal len keys and value when setting with an iterable" + with pytest.raises(ValueError, match=msg): + df[2:5] = np.arange(1, 4) * 1j + + @pytest.mark.filterwarnings( + "ignore:Series.__getitem__ treating keys as positions is deprecated:" + "FutureWarning" + ) + def test_getitem_ndarray_3d( + self, index, frame_or_series, indexer_sli, using_array_manager + ): + # GH 25567 + obj = gen_obj(frame_or_series, index) + idxr = indexer_sli(obj) + nd3 = np.random.default_rng(2).integers(5, size=(2, 2, 2)) + + msgs = [] + if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]: + msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]") + if using_array_manager: + msgs.append("Passed array should be 1-dimensional") + if frame_or_series is Series or indexer_sli is tm.iloc: + msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)") + if using_array_manager: + msgs.append("indexer should be 1-dimensional") + if indexer_sli is tm.loc or ( + frame_or_series is Series and indexer_sli is tm.setitem + ): + msgs.append("Cannot index with multidimensional key") + if frame_or_series is DataFrame and indexer_sli is tm.setitem: + msgs.append("Index data must be 1-dimensional") + if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc: + msgs.append("Index data must be 1-dimensional") + if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)): + msgs.append("Data must be 1-dimensional") + if len(index) == 0 or isinstance(index, pd.MultiIndex): + msgs.append("positional indexers are out-of-bounds") + if type(index) is Index and not isinstance(index._values, np.ndarray): + # e.g. Int64 + msgs.append("values must be a 1D array") + + # string[pyarrow] + msgs.append("only handle 1-dimensional arrays") + + msg = "|".join(msgs) + + potential_errors = (IndexError, ValueError, NotImplementedError) + with pytest.raises(potential_errors, match=msg): + idxr[nd3] + + @pytest.mark.filterwarnings( + "ignore:Series.__setitem__ treating keys as positions is deprecated:" + "FutureWarning" + ) + def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli): + # GH 25567 + obj = gen_obj(frame_or_series, index) + idxr = indexer_sli(obj) + nd3 = np.random.default_rng(2).integers(5, size=(2, 2, 2)) + + if indexer_sli is tm.iloc: + err = ValueError + msg = f"Cannot set values with ndim > {obj.ndim}" + else: + err = ValueError + msg = "|".join( + [ + r"Buffer has wrong number of dimensions \(expected 1, got 3\)", + "Cannot set values with ndim > 1", + "Index data must be 1-dimensional", + "Data must be 1-dimensional", + "Array conditional must be same shape as self", + ] + ) + + with pytest.raises(err, match=msg): + idxr[nd3] = 0 + + def test_getitem_ndarray_0d(self): + # GH#24924 + key = np.array(0) + + # dataframe __getitem__ + df = DataFrame([[1, 2], [3, 4]]) + result = df[key] + expected = Series([1, 3], name=0) + tm.assert_series_equal(result, expected) + + # series __getitem__ + ser = Series([1, 2]) + result = ser[key] + assert result == 1 + + def test_inf_upcast(self): + # GH 16957 + # We should be able to use np.inf as a key + # np.inf should cause an index to convert to float + + # Test with np.inf in rows + df = DataFrame(columns=[0]) + df.loc[1] = 1 + df.loc[2] = 2 + df.loc[np.inf] = 3 + + # make sure we can look up the value + assert df.loc[np.inf, 0] == 3 + + result = df.index + expected = Index([1, 2, np.inf], dtype=np.float64) + tm.assert_index_equal(result, expected) + + def test_setitem_dtype_upcast(self): + # GH3216 + df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) + df["c"] = np.nan + assert df["c"].dtype == np.float64 + + with tm.assert_produces_warning( + FutureWarning, match="item of incompatible dtype" + ): + df.loc[0, "c"] = "foo" + expected = DataFrame( + [{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}] + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("val", [3.14, "wxyz"]) + def test_setitem_dtype_upcast2(self, val): + # GH10280 + df = DataFrame( + np.arange(6, dtype="int64").reshape(2, 3), + index=list("ab"), + columns=["foo", "bar", "baz"], + ) + + left = df.copy() + with tm.assert_produces_warning( + FutureWarning, match="item of incompatible dtype" + ): + left.loc["a", "bar"] = val + right = DataFrame( + [[0, val, 2], [3, 4, 5]], + index=list("ab"), + columns=["foo", "bar", "baz"], + ) + + tm.assert_frame_equal(left, right) + assert is_integer_dtype(left["foo"]) + assert is_integer_dtype(left["baz"]) + + def test_setitem_dtype_upcast3(self): + left = DataFrame( + np.arange(6, dtype="int64").reshape(2, 3) / 10.0, + index=list("ab"), + columns=["foo", "bar", "baz"], + ) + with tm.assert_produces_warning( + FutureWarning, match="item of incompatible dtype" + ): + left.loc["a", "bar"] = "wxyz" + + right = DataFrame( + [[0, "wxyz", 0.2], [0.3, 0.4, 0.5]], + index=list("ab"), + columns=["foo", "bar", "baz"], + ) + + tm.assert_frame_equal(left, right) + assert is_float_dtype(left["foo"]) + assert is_float_dtype(left["baz"]) + + def test_dups_fancy_indexing(self): + # GH 3455 + + df = tm.makeCustomDataframe(10, 3) + df.columns = ["a", "a", "b"] + result = df[["b", "a"]].columns + expected = Index(["b", "a", "a"]) + tm.assert_index_equal(result, expected) + + def test_dups_fancy_indexing_across_dtypes(self): + # across dtypes + df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa")) + df.head() + str(df) + result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]]) + result.columns = list("aaaaaaa") # GH#3468 + + # GH#3509 smoke tests for indexing with duplicate columns + df.iloc[:, 4] + result.iloc[:, 4] + + tm.assert_frame_equal(df, result) + + def test_dups_fancy_indexing_not_in_order(self): + # GH 3561, dups not in selected order + df = DataFrame( + {"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")}, + index=["A", "A", "B", "C"], + ) + rows = ["C", "B"] + expected = DataFrame( + {"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows + ) + result = df.loc[rows] + tm.assert_frame_equal(result, expected) + + result = df.loc[Index(rows)] + tm.assert_frame_equal(result, expected) + + rows = ["C", "B", "E"] + with pytest.raises(KeyError, match="not in index"): + df.loc[rows] + + # see GH5553, make sure we use the right indexer + rows = ["F", "G", "H", "C", "B", "E"] + with pytest.raises(KeyError, match="not in index"): + df.loc[rows] + + def test_dups_fancy_indexing_only_missing_label(self): + # List containing only missing label + dfnu = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), index=list("AABCD") + ) + with pytest.raises( + KeyError, + match=re.escape( + "\"None of [Index(['E'], dtype='object')] are in the [index]\"" + ), + ): + dfnu.loc[["E"]] + + @pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")]) + def test_dups_fancy_indexing_missing_label(self, vals): + # GH 4619; duplicate indexer with missing label + df = DataFrame({"A": vals}) + with pytest.raises(KeyError, match="not in index"): + df.loc[[0, 8, 0]] + + def test_dups_fancy_indexing_non_unique(self): + # non unique with non unique selector + df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"]) + with pytest.raises(KeyError, match="not in index"): + df.loc[["A", "A", "E"]] + + def test_dups_fancy_indexing2(self): + # GH 5835 + # dups on index and missing values + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 5)), + columns=["A", "B", "B", "B", "A"], + ) + + with pytest.raises(KeyError, match="not in index"): + df.loc[:, ["A", "B", "C"]] + + def test_dups_fancy_indexing3(self): + # GH 6504, multi-axis indexing + df = DataFrame( + np.random.default_rng(2).standard_normal((9, 2)), + index=[1, 1, 1, 2, 2, 2, 3, 3, 3], + columns=["a", "b"], + ) + + expected = df.iloc[0:6] + result = df.loc[[1, 2]] + tm.assert_frame_equal(result, expected) + + expected = df + result = df.loc[:, ["a", "b"]] + tm.assert_frame_equal(result, expected) + + expected = df.iloc[0:6, :] + result = df.loc[[1, 2], ["a", "b"]] + tm.assert_frame_equal(result, expected) + + def test_duplicate_int_indexing(self, indexer_sl): + # GH 17347 + ser = Series(range(3), index=[1, 1, 3]) + expected = Series(range(2), index=[1, 1]) + result = indexer_sl(ser)[[1]] + tm.assert_series_equal(result, expected) + + def test_indexing_mixed_frame_bug(self): + # GH3492 + df = DataFrame( + {"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}} + ) + + # this works, new column is created correctly + df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x) + + # this does not work, ie column test is not changed + idx = df["test"] == "_" + temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x) + df.loc[idx, "test"] = temp + assert df.iloc[0, 2] == "-----" + + def test_multitype_list_index_access(self): + # GH 10610 + df = DataFrame( + np.random.default_rng(2).random((10, 5)), columns=["a"] + [20, 21, 22, 23] + ) + + with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")): + df[[22, 26, -8]] + assert df[21].shape[0] == df.shape[0] + + def test_set_index_nan(self): + # GH 3586 + df = DataFrame( + { + "PRuid": { + 17: "nonQC", + 18: "nonQC", + 19: "nonQC", + 20: "10", + 21: "11", + 22: "12", + 23: "13", + 24: "24", + 25: "35", + 26: "46", + 27: "47", + 28: "48", + 29: "59", + 30: "10", + }, + "QC": { + 17: 0.0, + 18: 0.0, + 19: 0.0, + 20: np.nan, + 21: np.nan, + 22: np.nan, + 23: np.nan, + 24: 1.0, + 25: np.nan, + 26: np.nan, + 27: np.nan, + 28: np.nan, + 29: np.nan, + 30: np.nan, + }, + "data": { + 17: 7.9544899999999998, + 18: 8.0142609999999994, + 19: 7.8591520000000008, + 20: 0.86140349999999999, + 21: 0.87853110000000001, + 22: 0.8427041999999999, + 23: 0.78587700000000005, + 24: 0.73062459999999996, + 25: 0.81668560000000001, + 26: 0.81927080000000008, + 27: 0.80705009999999999, + 28: 0.81440240000000008, + 29: 0.80140849999999997, + 30: 0.81307740000000006, + }, + "year": { + 17: 2006, + 18: 2007, + 19: 2008, + 20: 1985, + 21: 1985, + 22: 1985, + 23: 1985, + 24: 1985, + 25: 1985, + 26: 1985, + 27: 1985, + 28: 1985, + 29: 1985, + 30: 1986, + }, + } + ).reset_index() + + result = ( + df.set_index(["year", "PRuid", "QC"]) + .reset_index() + .reindex(columns=df.columns) + ) + tm.assert_frame_equal(result, df) + + def test_multi_assign(self): + # GH 3626, an assignment of a sub-df to a df + # set float64 to avoid upcast when setting nan + df = DataFrame( + { + "FC": ["a", "b", "a", "b", "a", "b"], + "PF": [0, 0, 0, 0, 1, 1], + "col1": list(range(6)), + "col2": list(range(6, 12)), + } + ).astype({"col2": "float64"}) + df.iloc[1, 0] = np.nan + df2 = df.copy() + + mask = ~df2.FC.isna() + cols = ["col1", "col2"] + + dft = df2 * 2 + dft.iloc[3, 3] = np.nan + + expected = DataFrame( + { + "FC": ["a", np.nan, "a", "b", "a", "b"], + "PF": [0, 0, 0, 0, 1, 1], + "col1": Series([0, 1, 4, 6, 8, 10]), + "col2": [12, 7, 16, np.nan, 20, 22], + } + ) + + # frame on rhs + df2.loc[mask, cols] = dft.loc[mask, cols] + tm.assert_frame_equal(df2, expected) + + # with an ndarray on rhs + # coerces to float64 because values has float64 dtype + # GH 14001 + expected = DataFrame( + { + "FC": ["a", np.nan, "a", "b", "a", "b"], + "PF": [0, 0, 0, 0, 1, 1], + "col1": [0, 1, 4, 6, 8, 10], + "col2": [12, 7, 16, np.nan, 20, 22], + } + ) + df2 = df.copy() + df2.loc[mask, cols] = dft.loc[mask, cols].values + tm.assert_frame_equal(df2, expected) + + def test_multi_assign_broadcasting_rhs(self): + # broadcasting on the rhs is required + df = DataFrame( + { + "A": [1, 2, 0, 0, 0], + "B": [0, 0, 0, 10, 11], + "C": [0, 0, 0, 10, 11], + "D": [3, 4, 5, 6, 7], + } + ) + + expected = df.copy() + mask = expected["A"] == 0 + for col in ["A", "B"]: + expected.loc[mask, col] = df["D"] + + df.loc[df["A"] == 0, ["A", "B"]] = df["D"] + tm.assert_frame_equal(df, expected) + + def test_setitem_list(self): + # GH 6043 + # iloc with a list + df = DataFrame(index=[0, 1], columns=[0]) + df.iloc[1, 0] = [1, 2, 3] + df.iloc[1, 0] = [1, 2] + + result = DataFrame(index=[0, 1], columns=[0]) + result.iloc[1, 0] = [1, 2] + + tm.assert_frame_equal(result, df) + + def test_string_slice(self): + # GH 14424 + # string indexing against datetimelike with object + # dtype should properly raises KeyError + df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object)) + assert df.index._is_all_dates + with pytest.raises(KeyError, match="'2011'"): + df["2011"] + + with pytest.raises(KeyError, match="'2011'"): + df.loc["2011", 0] + + def test_string_slice_empty(self): + # GH 14424 + + df = DataFrame() + assert not df.index._is_all_dates + with pytest.raises(KeyError, match="'2011'"): + df["2011"] + + with pytest.raises(KeyError, match="^0$"): + df.loc["2011", 0] + + def test_astype_assignment(self): + # GH4312 (iloc) + df_orig = DataFrame( + [["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") + ) + + df = df_orig.copy() + + # with the enforcement of GH#45333 in 2.0, this setting is attempted inplace, + # so object dtype is retained + df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64) + expected = DataFrame( + [[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") + ) + expected["A"] = expected["A"].astype(object) + expected["B"] = expected["B"].astype(object) + tm.assert_frame_equal(df, expected) + + # GH5702 (loc) + df = df_orig.copy() + df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64) + expected = DataFrame( + [[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") + ) + expected["A"] = expected["A"].astype(object) + tm.assert_frame_equal(df, expected) + + df = df_orig.copy() + df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64) + expected = DataFrame( + [["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") + ) + expected["B"] = expected["B"].astype(object) + expected["C"] = expected["C"].astype(object) + tm.assert_frame_equal(df, expected) + + def test_astype_assignment_full_replacements(self): + # full replacements / no nans + df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]}) + + # With the enforcement of GH#45333 in 2.0, this assignment occurs inplace, + # so float64 is retained + df.iloc[:, 0] = df["A"].astype(np.int64) + expected = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]}) + tm.assert_frame_equal(df, expected) + + df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]}) + df.loc[:, "A"] = df["A"].astype(np.int64) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("indexer", [tm.getitem, tm.loc]) + def test_index_type_coercion(self, indexer): + # GH 11836 + # if we have an index type and set it with something that looks + # to numpy like the same, but is actually, not + # (e.g. setting with a float or string '0') + # then we need to coerce to object + + # integer indexes + for s in [Series(range(5)), Series(range(5), index=range(1, 6))]: + assert is_integer_dtype(s.index) + + s2 = s.copy() + indexer(s2)[0.1] = 0 + assert is_float_dtype(s2.index) + assert indexer(s2)[0.1] == 0 + + s2 = s.copy() + indexer(s2)[0.0] = 0 + exp = s.index + if 0 not in s: + exp = Index(s.index.tolist() + [0]) + tm.assert_index_equal(s2.index, exp) + + s2 = s.copy() + indexer(s2)["0"] = 0 + assert is_object_dtype(s2.index) + + for s in [Series(range(5), index=np.arange(5.0))]: + assert is_float_dtype(s.index) + + s2 = s.copy() + indexer(s2)[0.1] = 0 + assert is_float_dtype(s2.index) + assert indexer(s2)[0.1] == 0 + + s2 = s.copy() + indexer(s2)[0.0] = 0 + tm.assert_index_equal(s2.index, s.index) + + s2 = s.copy() + indexer(s2)["0"] = 0 + assert is_object_dtype(s2.index) + + +class TestMisc: + def test_float_index_to_mixed(self): + df = DataFrame( + { + 0.0: np.random.default_rng(2).random(10), + 1.0: np.random.default_rng(2).random(10), + } + ) + df["a"] = 10 + + expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10}) + tm.assert_frame_equal(expected, df) + + def test_float_index_non_scalar_assignment(self): + df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0]) + df.loc[df.index[:2]] = 1 + expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index) + tm.assert_frame_equal(expected, df) + + def test_loc_setitem_fullindex_views(self): + df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0]) + df2 = df.copy() + df.loc[df.index] = df.loc[df.index] + tm.assert_frame_equal(df, df2) + + def test_rhs_alignment(self): + # GH8258, tests that both rows & columns are aligned to what is + # assigned to. covers both uniform data-type & multi-type cases + def run_tests(df, rhs, right_loc, right_iloc): + # label, index, slice + lbl_one, idx_one, slice_one = list("bcd"), [1, 2, 3], slice(1, 4) + lbl_two, idx_two, slice_two = ["joe", "jolie"], [1, 2], slice(1, 3) + + left = df.copy() + left.loc[lbl_one, lbl_two] = rhs + tm.assert_frame_equal(left, right_loc) + + left = df.copy() + left.iloc[idx_one, idx_two] = rhs + tm.assert_frame_equal(left, right_iloc) + + left = df.copy() + left.iloc[slice_one, slice_two] = rhs + tm.assert_frame_equal(left, right_iloc) + + xs = np.arange(20).reshape(5, 4) + cols = ["jim", "joe", "jolie", "joline"] + df = DataFrame(xs, columns=cols, index=list("abcde"), dtype="int64") + + # right hand side; permute the indices and multiplpy by -2 + rhs = -2 * df.iloc[3:0:-1, 2:0:-1] + + # expected `right` result; just multiply by -2 + right_iloc = df.copy() + right_iloc["joe"] = [1, 14, 10, 6, 17] + right_iloc["jolie"] = [2, 13, 9, 5, 18] + right_iloc.iloc[1:4, 1:3] *= -2 + right_loc = df.copy() + right_loc.iloc[1:4, 1:3] *= -2 + + # run tests with uniform dtypes + run_tests(df, rhs, right_loc, right_iloc) + + # make frames multi-type & re-run tests + for frame in [df, rhs, right_loc, right_iloc]: + frame["joe"] = frame["joe"].astype("float64") + frame["jolie"] = frame["jolie"].map(lambda x: f"@{x}") + right_iloc["joe"] = [1.0, "@-28", "@-20", "@-12", 17.0] + right_iloc["jolie"] = ["@2", -26.0, -18.0, -10.0, "@18"] + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + run_tests(df, rhs, right_loc, right_iloc) + + @pytest.mark.parametrize( + "idx", [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)] + ) + def test_str_label_slicing_with_negative_step(self, idx): + SLC = pd.IndexSlice + + idx = Index(idx) + ser = Series(np.arange(20), index=idx) + tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1]) + tm.assert_indexing_slices_equivalent( + ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1] + ) + tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] : idx[13] : -1], SLC[:0]) + + def test_slice_with_zero_step_raises(self, index, indexer_sl, frame_or_series): + obj = frame_or_series(np.arange(len(index)), index=index) + with pytest.raises(ValueError, match="slice step cannot be zero"): + indexer_sl(obj)[::0] + + def test_loc_setitem_indexing_assignment_dict_already_exists(self): + index = Index([-5, 0, 5], name="z") + df = DataFrame({"x": [1, 2, 6], "y": [2, 2, 8]}, index=index) + expected = df.copy() + rhs = {"x": 9, "y": 99} + df.loc[5] = rhs + expected.loc[5] = [9, 99] + tm.assert_frame_equal(df, expected) + + # GH#38335 same thing, mixed dtypes + df = DataFrame({"x": [1, 2, 6], "y": [2.0, 2.0, 8.0]}, index=index) + df.loc[5] = rhs + expected = DataFrame({"x": [1, 2, 9], "y": [2.0, 2.0, 99.0]}, index=index) + tm.assert_frame_equal(df, expected) + + def test_iloc_getitem_indexing_dtypes_on_empty(self): + # Check that .iloc returns correct dtypes GH9983 + df = DataFrame({"a": [1, 2, 3], "b": ["b", "b2", "b3"]}) + df2 = df.iloc[[], :] + + assert df2.loc[:, "a"].dtype == np.int64 + tm.assert_series_equal(df2.loc[:, "a"], df2.iloc[:, 0]) + + @pytest.mark.parametrize("size", [5, 999999, 1000000]) + def test_loc_range_in_series_indexing(self, size): + # range can cause an indexing error + # GH 11652 + s = Series(index=range(size), dtype=np.float64) + s.loc[range(1)] = 42 + tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0])) + + s.loc[range(2)] = 43 + tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1])) + + def test_partial_boolean_frame_indexing(self): + # GH 17170 + df = DataFrame( + np.arange(9.0).reshape(3, 3), index=list("abc"), columns=list("ABC") + ) + index_df = DataFrame(1, index=list("ab"), columns=list("AB")) + result = df[index_df.notnull()] + expected = DataFrame( + np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]), + index=list("abc"), + columns=list("ABC"), + ) + tm.assert_frame_equal(result, expected) + + def test_no_reference_cycle(self): + df = DataFrame({"a": [0, 1], "b": [2, 3]}) + for name in ("loc", "iloc", "at", "iat"): + getattr(df, name) + wr = weakref.ref(df) + del df + assert wr() is None + + def test_label_indexing_on_nan(self, nulls_fixture): + # GH 32431 + df = Series([1, "{1,2}", 1, nulls_fixture]) + vc = df.value_counts(dropna=False) + result1 = vc.loc[nulls_fixture] + result2 = vc[nulls_fixture] + + expected = 1 + assert result1 == expected + assert result2 == expected + + +class TestDataframeNoneCoercion: + EXPECTED_SINGLE_ROW_RESULTS = [ + # For numeric series, we should coerce to NaN. + ([1, 2, 3], [np.nan, 2, 3], FutureWarning), + ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0], None), + # For datetime series, we should coerce to NaT. + ( + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)], + None, + ), + # For objects, we should preserve the None value. + (["foo", "bar", "baz"], [None, "bar", "baz"], None), + ] + + @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS) + def test_coercion_with_loc(self, expected): + start_data, expected_result, warn = expected + + start_dataframe = DataFrame({"foo": start_data}) + start_dataframe.loc[0, ["foo"]] = None + + expected_dataframe = DataFrame({"foo": expected_result}) + tm.assert_frame_equal(start_dataframe, expected_dataframe) + + @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS) + def test_coercion_with_setitem_and_dataframe(self, expected): + start_data, expected_result, warn = expected + + start_dataframe = DataFrame({"foo": start_data}) + start_dataframe[start_dataframe["foo"] == start_dataframe["foo"][0]] = None + + expected_dataframe = DataFrame({"foo": expected_result}) + tm.assert_frame_equal(start_dataframe, expected_dataframe) + + @pytest.mark.parametrize("expected", EXPECTED_SINGLE_ROW_RESULTS) + def test_none_coercion_loc_and_dataframe(self, expected): + start_data, expected_result, warn = expected + + start_dataframe = DataFrame({"foo": start_data}) + start_dataframe.loc[start_dataframe["foo"] == start_dataframe["foo"][0]] = None + + expected_dataframe = DataFrame({"foo": expected_result}) + tm.assert_frame_equal(start_dataframe, expected_dataframe) + + def test_none_coercion_mixed_dtypes(self): + start_dataframe = DataFrame( + { + "a": [1, 2, 3], + "b": [1.0, 2.0, 3.0], + "c": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], + "d": ["a", "b", "c"], + } + ) + start_dataframe.iloc[0] = None + + exp = DataFrame( + { + "a": [np.nan, 2, 3], + "b": [np.nan, 2.0, 3.0], + "c": [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)], + "d": [None, "b", "c"], + } + ) + tm.assert_frame_equal(start_dataframe, exp) + + +class TestDatetimelikeCoercion: + def test_setitem_dt64_string_scalar(self, tz_naive_fixture, indexer_sli): + # dispatching _can_hold_element to underlying DatetimeArray + tz = tz_naive_fixture + + dti = date_range("2016-01-01", periods=3, tz=tz) + ser = Series(dti.copy(deep=True)) + + values = ser._values + + newval = "2018-01-01" + values._validate_setitem_value(newval) + + indexer_sli(ser)[0] = newval + + if tz is None: + # TODO(EA2D): we can make this no-copy in tz-naive case too + assert ser.dtype == dti.dtype + assert ser._values._ndarray is values._ndarray + else: + assert ser._values is values + + @pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index]) + @pytest.mark.parametrize( + "key", [[0, 1], slice(0, 2), np.array([True, True, False])] + ) + def test_setitem_dt64_string_values(self, tz_naive_fixture, indexer_sli, key, box): + # dispatching _can_hold_element to underling DatetimeArray + tz = tz_naive_fixture + + if isinstance(key, slice) and indexer_sli is tm.loc: + key = slice(0, 1) + + dti = date_range("2016-01-01", periods=3, tz=tz) + ser = Series(dti.copy(deep=True)) + + values = ser._values + + newvals = box(["2019-01-01", "2010-01-02"]) + values._validate_setitem_value(newvals) + + indexer_sli(ser)[key] = newvals + + if tz is None: + # TODO(EA2D): we can make this no-copy in tz-naive case too + assert ser.dtype == dti.dtype + assert ser._values._ndarray is values._ndarray + else: + assert ser._values is values + + @pytest.mark.parametrize("scalar", ["3 Days", offsets.Hour(4)]) + def test_setitem_td64_scalar(self, indexer_sli, scalar): + # dispatching _can_hold_element to underling TimedeltaArray + tdi = timedelta_range("1 Day", periods=3) + ser = Series(tdi.copy(deep=True)) + + values = ser._values + values._validate_setitem_value(scalar) + + indexer_sli(ser)[0] = scalar + assert ser._values._ndarray is values._ndarray + + @pytest.mark.parametrize("box", [list, np.array, pd.array, pd.Categorical, Index]) + @pytest.mark.parametrize( + "key", [[0, 1], slice(0, 2), np.array([True, True, False])] + ) + def test_setitem_td64_string_values(self, indexer_sli, key, box): + # dispatching _can_hold_element to underling TimedeltaArray + if isinstance(key, slice) and indexer_sli is tm.loc: + key = slice(0, 1) + + tdi = timedelta_range("1 Day", periods=3) + ser = Series(tdi.copy(deep=True)) + + values = ser._values + + newvals = box(["10 Days", "44 hours"]) + values._validate_setitem_value(newvals) + + indexer_sli(ser)[key] = newvals + assert ser._values._ndarray is values._ndarray + + +def test_extension_array_cross_section(): + # A cross-section of a homogeneous EA should be an EA + df = DataFrame( + { + "A": pd.array([1, 2], dtype="Int64"), + "B": pd.array([3, 4], dtype="Int64"), + }, + index=["a", "b"], + ) + expected = Series(pd.array([1, 3], dtype="Int64"), index=["A", "B"], name="a") + result = df.loc["a"] + tm.assert_series_equal(result, expected) + + result = df.iloc[0] + tm.assert_series_equal(result, expected) + + +def test_extension_array_cross_section_converts(): + # all numeric columns -> numeric series + df = DataFrame( + { + "A": pd.array([1, 2], dtype="Int64"), + "B": np.array([1, 2], dtype="int64"), + }, + index=["a", "b"], + ) + result = df.loc["a"] + expected = Series([1, 1], dtype="Int64", index=["A", "B"], name="a") + tm.assert_series_equal(result, expected) + + result = df.iloc[0] + tm.assert_series_equal(result, expected) + + # mixed columns -> object series + df = DataFrame( + {"A": pd.array([1, 2], dtype="Int64"), "B": np.array(["a", "b"])}, + index=["a", "b"], + ) + result = df.loc["a"] + expected = Series([1, "a"], dtype=object, index=["A", "B"], name="a") + tm.assert_series_equal(result, expected) + + result = df.iloc[0] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ser, keys", + [(Series([10]), (0, 0)), (Series([1, 2, 3], index=list("abc")), (0, 1))], +) +def test_ser_tup_indexer_exceeds_dimensions(ser, keys, indexer_li): + # GH#13831 + exp_err, exp_msg = IndexingError, "Too many indexers" + with pytest.raises(exp_err, match=exp_msg): + indexer_li(ser)[keys] + + if indexer_li == tm.iloc: + # For iloc.__setitem__ we let numpy handle the error reporting. + exp_err, exp_msg = IndexError, "too many indices for array" + + with pytest.raises(exp_err, match=exp_msg): + indexer_li(ser)[keys] = 0 + + +def test_ser_list_indexer_exceeds_dimensions(indexer_li): + # GH#13831 + # Make sure an exception is raised when a tuple exceeds the dimension of the series, + # but not list when a list is used. + ser = Series([10]) + res = indexer_li(ser)[[0, 0]] + exp = Series([10, 10], index=Index([0, 0])) + tm.assert_series_equal(res, exp) + + +@pytest.mark.parametrize( + "value", [(0, 1), [0, 1], np.array([0, 1]), array.array("b", [0, 1])] +) +def test_scalar_setitem_with_nested_value(value): + # For numeric data, we try to unpack and thus raise for mismatching length + df = DataFrame({"A": [1, 2, 3]}) + msg = "|".join( + [ + "Must have equal len keys and value", + "setting an array element with a sequence", + ] + ) + with pytest.raises(ValueError, match=msg): + df.loc[0, "B"] = value + + # TODO For object dtype this happens as well, but should we rather preserve + # the nested data and set as such? + df = DataFrame({"A": [1, 2, 3], "B": np.array([1, "a", "b"], dtype=object)}) + with pytest.raises(ValueError, match="Must have equal len keys and value"): + df.loc[0, "B"] = value + # if isinstance(value, np.ndarray): + # assert (df.loc[0, "B"] == value).all() + # else: + # assert df.loc[0, "B"] == value + + +@pytest.mark.parametrize( + "value", [(0, 1), [0, 1], np.array([0, 1]), array.array("b", [0, 1])] +) +def test_scalar_setitem_series_with_nested_value(value, indexer_sli): + # For numeric data, we try to unpack and thus raise for mismatching length + ser = Series([1, 2, 3]) + with pytest.raises(ValueError, match="setting an array element with a sequence"): + indexer_sli(ser)[0] = value + + # but for object dtype we preserve the nested data and set as such + ser = Series([1, "a", "b"], dtype=object) + indexer_sli(ser)[0] = value + if isinstance(value, np.ndarray): + assert (ser.loc[0] == value).all() + else: + assert ser.loc[0] == value + + +@pytest.mark.parametrize( + "value", [(0.0,), [0.0], np.array([0.0]), array.array("d", [0.0])] +) +def test_scalar_setitem_with_nested_value_length1(value): + # https://github.com/pandas-dev/pandas/issues/46268 + + # For numeric data, assigning length-1 array to scalar position gets unpacked + df = DataFrame({"A": [1, 2, 3]}) + df.loc[0, "B"] = value + expected = DataFrame({"A": [1, 2, 3], "B": [0.0, np.nan, np.nan]}) + tm.assert_frame_equal(df, expected) + + # but for object dtype we preserve the nested data + df = DataFrame({"A": [1, 2, 3], "B": np.array([1, "a", "b"], dtype=object)}) + df.loc[0, "B"] = value + if isinstance(value, np.ndarray): + assert (df.loc[0, "B"] == value).all() + else: + assert df.loc[0, "B"] == value + + +@pytest.mark.parametrize( + "value", [(0.0,), [0.0], np.array([0.0]), array.array("d", [0.0])] +) +def test_scalar_setitem_series_with_nested_value_length1(value, indexer_sli): + # For numeric data, assigning length-1 array to scalar position gets unpacked + # TODO this only happens in case of ndarray, should we make this consistent + # for all list-likes? (as happens for DataFrame.(i)loc, see test above) + ser = Series([1.0, 2.0, 3.0]) + if isinstance(value, np.ndarray): + indexer_sli(ser)[0] = value + expected = Series([0.0, 2.0, 3.0]) + tm.assert_series_equal(ser, expected) + else: + with pytest.raises( + ValueError, match="setting an array element with a sequence" + ): + indexer_sli(ser)[0] = value + + # but for object dtype we preserve the nested data + ser = Series([1, "a", "b"], dtype=object) + indexer_sli(ser)[0] = value + if isinstance(value, np.ndarray): + assert (ser.loc[0] == value).all() + else: + assert ser.loc[0] == value + + +def test_object_dtype_series_set_series_element(): + # GH 48933 + s1 = Series(dtype="O", index=["a", "b"]) + + s1["a"] = Series() + s1.loc["b"] = Series() + + tm.assert_series_equal(s1.loc["a"], Series()) + tm.assert_series_equal(s1.loc["b"], Series()) + + s2 = Series(dtype="O", index=["a", "b"]) + + s2.iloc[1] = Series() + tm.assert_series_equal(s2.iloc[1], Series()) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_loc.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_loc.py new file mode 100644 index 00000000..8b2730b3 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_loc.py @@ -0,0 +1,3291 @@ +""" test label based indexing with loc """ +from collections import namedtuple +from datetime import ( + date, + datetime, + time, + timedelta, +) +import re + +from dateutil.tz import gettz +import numpy as np +import pytest + +from pandas.errors import IndexingError +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IndexSlice, + MultiIndex, + Period, + PeriodIndex, + Series, + SparseDtype, + Timedelta, + Timestamp, + date_range, + timedelta_range, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +from pandas.api.types import is_scalar +from pandas.core.indexing import _one_ellipsis_message +from pandas.tests.indexing.common import check_indexing_smoketest_or_raises + + +@pytest.mark.parametrize( + "series, new_series, expected_ser", + [ + [[np.nan, np.nan, "b"], ["a", np.nan, np.nan], [False, True, True]], + [[np.nan, "b"], ["a", np.nan], [False, True]], + ], +) +def test_not_change_nan_loc(series, new_series, expected_ser): + # GH 28403 + df = DataFrame({"A": series}) + df.loc[:, "A"] = new_series + expected = DataFrame({"A": expected_ser}) + tm.assert_frame_equal(df.isna(), expected) + tm.assert_frame_equal(df.notna(), ~expected) + + +class TestLoc: + def test_none_values_on_string_columns(self): + # Issue #32218 + df = DataFrame(["1", "2", None], columns=["a"], dtype="str") + + assert df.loc[2, "a"] is None + + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_int(self, kind, request): + # int label + obj = request.getfixturevalue(f"{kind}_labels") + check_indexing_smoketest_or_raises(obj, "loc", 2, fails=KeyError) + + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_label(self, kind, request): + # label + obj = request.getfixturevalue(f"{kind}_empty") + check_indexing_smoketest_or_raises(obj, "loc", "c", fails=KeyError) + + @pytest.mark.parametrize( + "key, typs, axes", + [ + ["f", ["ints", "uints", "labels", "mixed", "ts"], None], + ["f", ["floats"], None], + [20, ["ints", "uints", "mixed"], None], + [20, ["labels"], None], + [20, ["ts"], 0], + [20, ["floats"], 0], + ], + ) + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_label_out_of_range(self, key, typs, axes, kind, request): + for typ in typs: + obj = request.getfixturevalue(f"{kind}_{typ}") + # out of range label + check_indexing_smoketest_or_raises( + obj, "loc", key, axes=axes, fails=KeyError + ) + + @pytest.mark.parametrize( + "key, typs", + [ + [[0, 1, 2], ["ints", "uints", "floats"]], + [[1, 3.0, "A"], ["ints", "uints", "floats"]], + ], + ) + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_label_list(self, key, typs, kind, request): + for typ in typs: + obj = request.getfixturevalue(f"{kind}_{typ}") + # list of labels + check_indexing_smoketest_or_raises(obj, "loc", key, fails=KeyError) + + @pytest.mark.parametrize( + "key, typs, axes", + [ + [[0, 1, 2], ["empty"], None], + [[0, 2, 10], ["ints", "uints", "floats"], 0], + [[3, 6, 7], ["ints", "uints", "floats"], 1], + # GH 17758 - MultiIndex and missing keys + [[(1, 3), (1, 4), (2, 5)], ["multi"], 0], + ], + ) + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_label_list_with_missing(self, key, typs, axes, kind, request): + for typ in typs: + obj = request.getfixturevalue(f"{kind}_{typ}") + check_indexing_smoketest_or_raises( + obj, "loc", key, axes=axes, fails=KeyError + ) + + @pytest.mark.parametrize("typs", ["ints", "uints"]) + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_label_list_fails(self, typs, kind, request): + # fails + obj = request.getfixturevalue(f"{kind}_{typs}") + check_indexing_smoketest_or_raises( + obj, "loc", [20, 30, 40], axes=1, fails=KeyError + ) + + def test_loc_getitem_label_array_like(self): + # TODO: test something? + # array like + pass + + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_bool(self, kind, request): + obj = request.getfixturevalue(f"{kind}_empty") + # boolean indexers + b = [True, False, True, False] + + check_indexing_smoketest_or_raises(obj, "loc", b, fails=IndexError) + + @pytest.mark.parametrize( + "slc, typs, axes, fails", + [ + [ + slice(1, 3), + ["labels", "mixed", "empty", "ts", "floats"], + None, + TypeError, + ], + [slice("20130102", "20130104"), ["ts"], 1, TypeError], + [slice(2, 8), ["mixed"], 0, TypeError], + [slice(2, 8), ["mixed"], 1, KeyError], + [slice(2, 4, 2), ["mixed"], 0, TypeError], + ], + ) + @pytest.mark.parametrize("kind", ["series", "frame"]) + def test_loc_getitem_label_slice(self, slc, typs, axes, fails, kind, request): + # label slices (with ints) + + # real label slices + + # GH 14316 + for typ in typs: + obj = request.getfixturevalue(f"{kind}_{typ}") + check_indexing_smoketest_or_raises( + obj, + "loc", + slc, + axes=axes, + fails=fails, + ) + + def test_setitem_from_duplicate_axis(self): + # GH#34034 + df = DataFrame( + [[20, "a"], [200, "a"], [200, "a"]], + columns=["col1", "col2"], + index=[10, 1, 1], + ) + df.loc[1, "col1"] = np.arange(2) + expected = DataFrame( + [[20, "a"], [0, "a"], [1, "a"]], columns=["col1", "col2"], index=[10, 1, 1] + ) + tm.assert_frame_equal(df, expected) + + def test_column_types_consistent(self): + # GH 26779 + df = DataFrame( + data={ + "channel": [1, 2, 3], + "A": ["String 1", np.nan, "String 2"], + "B": [ + Timestamp("2019-06-11 11:00:00"), + pd.NaT, + Timestamp("2019-06-11 12:00:00"), + ], + } + ) + df2 = DataFrame( + data={"A": ["String 3"], "B": [Timestamp("2019-06-11 12:00:00")]} + ) + # Change Columns A and B to df2.values wherever Column A is NaN + df.loc[df["A"].isna(), ["A", "B"]] = df2.values + expected = DataFrame( + data={ + "channel": [1, 2, 3], + "A": ["String 1", "String 3", "String 2"], + "B": [ + Timestamp("2019-06-11 11:00:00"), + Timestamp("2019-06-11 12:00:00"), + Timestamp("2019-06-11 12:00:00"), + ], + } + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "obj, key, exp", + [ + ( + DataFrame([[1]], columns=Index([False])), + IndexSlice[:, False], + Series([1], name=False), + ), + (Series([1], index=Index([False])), False, [1]), + (DataFrame([[1]], index=Index([False])), False, Series([1], name=False)), + ], + ) + def test_loc_getitem_single_boolean_arg(self, obj, key, exp): + # GH 44322 + res = obj.loc[key] + if isinstance(exp, (DataFrame, Series)): + tm.assert_equal(res, exp) + else: + assert res == exp + + +class TestLocBaseIndependent: + # Tests for loc that do not depend on subclassing Base + def test_loc_npstr(self): + # GH#45580 + df = DataFrame(index=date_range("2021", "2022")) + result = df.loc[np.array(["2021/6/1"])[0] :] + expected = df.iloc[151:] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "msg, key", + [ + (r"Period\('2019', 'A-DEC'\), 'foo', 'bar'", (Period(2019), "foo", "bar")), + (r"Period\('2019', 'A-DEC'\), 'y1', 'bar'", (Period(2019), "y1", "bar")), + (r"Period\('2019', 'A-DEC'\), 'foo', 'z1'", (Period(2019), "foo", "z1")), + ( + r"Period\('2018', 'A-DEC'\), Period\('2016', 'A-DEC'\), 'bar'", + (Period(2018), Period(2016), "bar"), + ), + (r"Period\('2018', 'A-DEC'\), 'foo', 'y1'", (Period(2018), "foo", "y1")), + ( + r"Period\('2017', 'A-DEC'\), 'foo', Period\('2015', 'A-DEC'\)", + (Period(2017), "foo", Period(2015)), + ), + (r"Period\('2017', 'A-DEC'\), 'z1', 'bar'", (Period(2017), "z1", "bar")), + ], + ) + def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key): + # GH#20684 + """ + parse_datetime_string_with_reso return parameter if type not matched. + PeriodIndex.get_loc takes returned value from parse_datetime_string_with_reso + as a tuple. + If first argument is Period and a tuple has 3 items, + process go on not raise exception + """ + df = DataFrame( + { + "A": [Period(2019), "x1", "x2"], + "B": [Period(2018), Period(2016), "y1"], + "C": [Period(2017), "z1", Period(2015)], + "V1": [1, 2, 3], + "V2": [10, 20, 30], + } + ).set_index(["A", "B", "C"]) + with pytest.raises(KeyError, match=msg): + df.loc[key] + + def test_loc_getitem_missing_unicode_key(self): + df = DataFrame({"a": [1]}) + with pytest.raises(KeyError, match="\u05d0"): + df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError + + def test_loc_getitem_dups(self): + # GH 5678 + # repeated getitems on a dup index returning a ndarray + df = DataFrame( + np.random.default_rng(2).random((20, 5)), + index=["ABCDE"[x % 5] for x in range(20)], + ) + expected = df.loc["A", 0] + result = df.loc[:, 0].loc["A"] + tm.assert_series_equal(result, expected) + + def test_loc_getitem_dups2(self): + # GH4726 + # dup indexing with iloc/loc + df = DataFrame( + [[1, 2, "foo", "bar", Timestamp("20130101")]], + columns=["a", "a", "a", "a", "a"], + index=[1], + ) + expected = Series( + [1, 2, "foo", "bar", Timestamp("20130101")], + index=["a", "a", "a", "a", "a"], + name=1, + ) + + result = df.iloc[0] + tm.assert_series_equal(result, expected) + + result = df.loc[1] + tm.assert_series_equal(result, expected) + + def test_loc_setitem_dups(self): + # GH 6541 + df_orig = DataFrame( + { + "me": list("rttti"), + "foo": list("aaade"), + "bar": np.arange(5, dtype="float64") * 1.34 + 2, + "bar2": np.arange(5, dtype="float64") * -0.34 + 2, + } + ).set_index("me") + + indexer = ( + "r", + ["bar", "bar2"], + ) + df = df_orig.copy() + df.loc[indexer] *= 2.0 + tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer]) + + indexer = ( + "r", + "bar", + ) + df = df_orig.copy() + df.loc[indexer] *= 2.0 + assert df.loc[indexer] == 2.0 * df_orig.loc[indexer] + + indexer = ( + "t", + ["bar", "bar2"], + ) + df = df_orig.copy() + df.loc[indexer] *= 2.0 + tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer]) + + def test_loc_setitem_slice(self): + # GH10503 + + # assigning the same type should not change the type + df1 = DataFrame({"a": [0, 1, 1], "b": Series([100, 200, 300], dtype="uint32")}) + ix = df1["a"] == 1 + newb1 = df1.loc[ix, "b"] + 1 + df1.loc[ix, "b"] = newb1 + expected = DataFrame( + {"a": [0, 1, 1], "b": Series([100, 201, 301], dtype="uint32")} + ) + tm.assert_frame_equal(df1, expected) + + # assigning a new type should get the inferred type + df2 = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64") + ix = df1["a"] == 1 + newb2 = df2.loc[ix, "b"] + with tm.assert_produces_warning( + FutureWarning, match="item of incompatible dtype" + ): + df1.loc[ix, "b"] = newb2 + expected = DataFrame({"a": [0, 1, 1], "b": [100, 200, 300]}, dtype="uint64") + tm.assert_frame_equal(df2, expected) + + def test_loc_setitem_dtype(self): + # GH31340 + df = DataFrame({"id": ["A"], "a": [1.2], "b": [0.0], "c": [-2.5]}) + cols = ["a", "b", "c"] + df.loc[:, cols] = df.loc[:, cols].astype("float32") + + # pre-2.0 this setting would swap in new arrays, in 2.0 it is correctly + # in-place, consistent with non-split-path + expected = DataFrame( + { + "id": ["A"], + "a": np.array([1.2], dtype="float64"), + "b": np.array([0.0], dtype="float64"), + "c": np.array([-2.5], dtype="float64"), + } + ) # id is inferred as object + + tm.assert_frame_equal(df, expected) + + def test_getitem_label_list_with_missing(self): + s = Series(range(3), index=["a", "b", "c"]) + + # consistency + with pytest.raises(KeyError, match="not in index"): + s[["a", "d"]] + + s = Series(range(3)) + with pytest.raises(KeyError, match="not in index"): + s[[0, 3]] + + @pytest.mark.parametrize("index", [[True, False], [True, False, True, False]]) + def test_loc_getitem_bool_diff_len(self, index): + # GH26658 + s = Series([1, 2, 3]) + msg = f"Boolean index has wrong length: {len(index)} instead of {len(s)}" + with pytest.raises(IndexError, match=msg): + s.loc[index] + + def test_loc_getitem_int_slice(self): + # TODO: test something here? + pass + + def test_loc_to_fail(self): + # GH3449 + df = DataFrame( + np.random.default_rng(2).random((3, 3)), + index=["a", "b", "c"], + columns=["e", "f", "g"], + ) + + msg = ( + rf"\"None of \[Index\(\[1, 2\], dtype='{np.dtype(int)}'\)\] are " + r"in the \[index\]\"" + ) + with pytest.raises(KeyError, match=msg): + df.loc[[1, 2], [1, 2]] + + def test_loc_to_fail2(self): + # GH 7496 + # loc should not fallback + + s = Series(dtype=object) + s.loc[1] = 1 + s.loc["a"] = 2 + + with pytest.raises(KeyError, match=r"^-1$"): + s.loc[-1] + + msg = ( + rf"\"None of \[Index\(\[-1, -2\], dtype='{np.dtype(int)}'\)\] are " + r"in the \[index\]\"" + ) + with pytest.raises(KeyError, match=msg): + s.loc[[-1, -2]] + + msg = r"\"None of \[Index\(\['4'\], dtype='object'\)\] are in the \[index\]\"" + with pytest.raises(KeyError, match=msg): + s.loc[["4"]] + + s.loc[-1] = 3 + with pytest.raises(KeyError, match="not in index"): + s.loc[[-1, -2]] + + s["a"] = 2 + msg = ( + rf"\"None of \[Index\(\[-2\], dtype='{np.dtype(int)}'\)\] are " + r"in the \[index\]\"" + ) + with pytest.raises(KeyError, match=msg): + s.loc[[-2]] + + del s["a"] + + with pytest.raises(KeyError, match=msg): + s.loc[[-2]] = 0 + + def test_loc_to_fail3(self): + # inconsistency between .loc[values] and .loc[values,:] + # GH 7999 + df = DataFrame([["a"], ["b"]], index=[1, 2], columns=["value"]) + + msg = ( + rf"\"None of \[Index\(\[3\], dtype='{np.dtype(int)}'\)\] are " + r"in the \[index\]\"" + ) + with pytest.raises(KeyError, match=msg): + df.loc[[3], :] + + with pytest.raises(KeyError, match=msg): + df.loc[[3]] + + def test_loc_getitem_list_with_fail(self): + # 15747 + # should KeyError if *any* missing labels + + s = Series([1, 2, 3]) + + s.loc[[2]] + + msg = f"\"None of [Index([3], dtype='{np.dtype(int)}')] are in the [index]" + with pytest.raises(KeyError, match=re.escape(msg)): + s.loc[[3]] + + # a non-match and a match + with pytest.raises(KeyError, match="not in index"): + s.loc[[2, 3]] + + def test_loc_index(self): + # gh-17131 + # a boolean index should index like a boolean numpy array + + df = DataFrame( + np.random.default_rng(2).random(size=(5, 10)), + index=["alpha_0", "alpha_1", "alpha_2", "beta_0", "beta_1"], + ) + + mask = df.index.map(lambda x: "alpha" in x) + expected = df.loc[np.array(mask)] + + result = df.loc[mask] + tm.assert_frame_equal(result, expected) + + result = df.loc[mask.values] + tm.assert_frame_equal(result, expected) + + result = df.loc[pd.array(mask, dtype="boolean")] + tm.assert_frame_equal(result, expected) + + def test_loc_general(self): + df = DataFrame( + np.random.default_rng(2).random((4, 4)), + columns=["A", "B", "C", "D"], + index=["A", "B", "C", "D"], + ) + + # want this to work + result = df.loc[:, "A":"B"].iloc[0:2, :] + assert (result.columns == ["A", "B"]).all() + assert (result.index == ["A", "B"]).all() + + # mixed type + result = DataFrame({"a": [Timestamp("20130101")], "b": [1]}).iloc[0] + expected = Series([Timestamp("20130101"), 1], index=["a", "b"], name=0) + tm.assert_series_equal(result, expected) + assert result.dtype == object + + @pytest.fixture + def frame_for_consistency(self): + return DataFrame( + { + "date": date_range("2000-01-01", "2000-01-5"), + "val": Series(range(5), dtype=np.int64), + } + ) + + @pytest.mark.parametrize( + "val", + [0, np.array(0, dtype=np.int64), np.array([0, 0, 0, 0, 0], dtype=np.int64)], + ) + def test_loc_setitem_consistency(self, frame_for_consistency, val): + # GH 6149 + # coerce similarly for setitem and loc when rows have a null-slice + expected = DataFrame( + { + "date": Series(0, index=range(5), dtype=np.int64), + "val": Series(range(5), dtype=np.int64), + } + ) + df = frame_for_consistency.copy() + df.loc[:, "date"] = val + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_consistency_dt64_to_str(self, frame_for_consistency): + # GH 6149 + # coerce similarly for setitem and loc when rows have a null-slice + + expected = DataFrame( + { + "date": Series("foo", index=range(5)), + "val": Series(range(5), dtype=np.int64), + } + ) + df = frame_for_consistency.copy() + df.loc[:, "date"] = "foo" + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_consistency_dt64_to_float(self, frame_for_consistency): + # GH 6149 + # coerce similarly for setitem and loc when rows have a null-slice + expected = DataFrame( + { + "date": Series(1.0, index=range(5)), + "val": Series(range(5), dtype=np.int64), + } + ) + df = frame_for_consistency.copy() + df.loc[:, "date"] = 1.0 + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_consistency_single_row(self): + # GH 15494 + # setting on frame with single row + df = DataFrame({"date": Series([Timestamp("20180101")])}) + df.loc[:, "date"] = "string" + expected = DataFrame({"date": Series(["string"])}) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_consistency_empty(self): + # empty (essentially noops) + # before the enforcement of #45333 in 2.0, the loc.setitem here would + # change the dtype of df.x to int64 + expected = DataFrame(columns=["x", "y"]) + df = DataFrame(columns=["x", "y"]) + with tm.assert_produces_warning(None): + df.loc[:, "x"] = 1 + tm.assert_frame_equal(df, expected) + + # setting with setitem swaps in a new array, so changes the dtype + df = DataFrame(columns=["x", "y"]) + df["x"] = 1 + expected["x"] = expected["x"].astype(np.int64) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_consistency_slice_column_len(self): + # .loc[:,column] setting with slice == len of the column + # GH10408 + levels = [ + ["Region_1"] * 4, + ["Site_1", "Site_1", "Site_2", "Site_2"], + [3987227376, 3980680971, 3977723249, 3977723089], + ] + mi = MultiIndex.from_arrays(levels, names=["Region", "Site", "RespondentID"]) + + clevels = [ + ["Respondent", "Respondent", "Respondent", "OtherCat", "OtherCat"], + ["Something", "StartDate", "EndDate", "Yes/No", "SomethingElse"], + ] + cols = MultiIndex.from_arrays(clevels, names=["Level_0", "Level_1"]) + + values = [ + ["A", "5/25/2015 10:59", "5/25/2015 11:22", "Yes", np.nan], + ["A", "5/21/2015 9:40", "5/21/2015 9:52", "Yes", "Yes"], + ["A", "5/20/2015 8:27", "5/20/2015 8:41", "Yes", np.nan], + ["A", "5/20/2015 8:33", "5/20/2015 9:09", "Yes", "No"], + ] + df = DataFrame(values, index=mi, columns=cols) + + df.loc[:, ("Respondent", "StartDate")] = to_datetime( + df.loc[:, ("Respondent", "StartDate")] + ) + df.loc[:, ("Respondent", "EndDate")] = to_datetime( + df.loc[:, ("Respondent", "EndDate")] + ) + df = df.infer_objects(copy=False) + + # Adding a new key + df.loc[:, ("Respondent", "Duration")] = ( + df.loc[:, ("Respondent", "EndDate")] + - df.loc[:, ("Respondent", "StartDate")] + ) + + # timedelta64[m] -> float, so this cannot be done inplace, so + # no warning + df.loc[:, ("Respondent", "Duration")] = df.loc[ + :, ("Respondent", "Duration") + ] / Timedelta(60_000_000_000) + + expected = Series( + [23.0, 12.0, 14.0, 36.0], index=df.index, name=("Respondent", "Duration") + ) + tm.assert_series_equal(df[("Respondent", "Duration")], expected) + + @pytest.mark.parametrize("unit", ["Y", "M", "D", "h", "m", "s", "ms", "us"]) + def test_loc_assign_non_ns_datetime(self, unit): + # GH 27395, non-ns dtype assignment via .loc should work + # and return the same result when using simple assignment + df = DataFrame( + { + "timestamp": [ + np.datetime64("2017-02-11 12:41:29"), + np.datetime64("1991-11-07 04:22:37"), + ] + } + ) + + df.loc[:, unit] = df.loc[:, "timestamp"].values.astype(f"datetime64[{unit}]") + df["expected"] = df.loc[:, "timestamp"].values.astype(f"datetime64[{unit}]") + expected = Series(df.loc[:, "expected"], name=unit) + tm.assert_series_equal(df.loc[:, unit], expected) + + def test_loc_modify_datetime(self): + # see gh-28837 + df = DataFrame.from_dict( + {"date": [1485264372711, 1485265925110, 1540215845888, 1540282121025]} + ) + + df["date_dt"] = to_datetime(df["date"], unit="ms", cache=True) + + df.loc[:, "date_dt_cp"] = df.loc[:, "date_dt"] + df.loc[[2, 3], "date_dt_cp"] = df.loc[[2, 3], "date_dt"] + + expected = DataFrame( + [ + [1485264372711, "2017-01-24 13:26:12.711", "2017-01-24 13:26:12.711"], + [1485265925110, "2017-01-24 13:52:05.110", "2017-01-24 13:52:05.110"], + [1540215845888, "2018-10-22 13:44:05.888", "2018-10-22 13:44:05.888"], + [1540282121025, "2018-10-23 08:08:41.025", "2018-10-23 08:08:41.025"], + ], + columns=["date", "date_dt", "date_dt_cp"], + ) + + columns = ["date_dt", "date_dt_cp"] + expected[columns] = expected[columns].apply(to_datetime) + + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_frame_with_reindex(self): + # GH#6254 setting issue + df = DataFrame(index=[3, 5, 4], columns=["A"], dtype=float) + df.loc[[4, 3, 5], "A"] = np.array([1, 2, 3], dtype="int64") + + # setting integer values into a float dataframe with loc is inplace, + # so we retain float dtype + ser = Series([2, 3, 1], index=[3, 5, 4], dtype=float) + expected = DataFrame({"A": ser}) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_frame_with_reindex_mixed(self): + # GH#40480 + df = DataFrame(index=[3, 5, 4], columns=["A", "B"], dtype=float) + df["B"] = "string" + df.loc[[4, 3, 5], "A"] = np.array([1, 2, 3], dtype="int64") + ser = Series([2, 3, 1], index=[3, 5, 4], dtype="int64") + # pre-2.0 this setting swapped in a new array, now it is inplace + # consistent with non-split-path + expected = DataFrame({"A": ser.astype(float)}) + expected["B"] = "string" + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_frame_with_inverted_slice(self): + # GH#40480 + df = DataFrame(index=[1, 2, 3], columns=["A", "B"], dtype=float) + df["B"] = "string" + df.loc[slice(3, 0, -1), "A"] = np.array([1, 2, 3], dtype="int64") + # pre-2.0 this setting swapped in a new array, now it is inplace + # consistent with non-split-path + expected = DataFrame({"A": [3.0, 2.0, 1.0], "B": "string"}, index=[1, 2, 3]) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_empty_frame(self): + # GH#6252 setting with an empty frame + keys1 = ["@" + str(i) for i in range(5)] + val1 = np.arange(5, dtype="int64") + + keys2 = ["@" + str(i) for i in range(4)] + val2 = np.arange(4, dtype="int64") + + index = list(set(keys1).union(keys2)) + df = DataFrame(index=index) + df["A"] = np.nan + df.loc[keys1, "A"] = val1 + + df["B"] = np.nan + df.loc[keys2, "B"] = val2 + + # Because df["A"] was initialized as float64, setting values into it + # is inplace, so that dtype is retained + sera = Series(val1, index=keys1, dtype=np.float64) + serb = Series(val2, index=keys2) + expected = DataFrame({"A": sera, "B": serb}).reindex(index=index) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + index=list("abcd"), + columns=list("ABCD"), + ) + + result = df.iloc[0, 0] + + df.loc["a", "A"] = 1 + result = df.loc["a", "A"] + assert result == 1 + + result = df.iloc[0, 0] + assert result == 1 + + df.loc[:, "B":"D"] = 0 + expected = df.loc[:, "B":"D"] + result = df.iloc[:, 1:] + tm.assert_frame_equal(result, expected) + + def test_loc_setitem_frame_nan_int_coercion_invalid(self): + # GH 8669 + # invalid coercion of nan -> int + df = DataFrame({"A": [1, 2, 3], "B": np.nan}) + df.loc[df.B > df.A, "B"] = df.A + expected = DataFrame({"A": [1, 2, 3], "B": np.nan}) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_frame_mixed_labels(self): + # GH 6546 + # setting with mixed labels + df = DataFrame({1: [1, 2], 2: [3, 4], "a": ["a", "b"]}) + + result = df.loc[0, [1, 2]] + expected = Series( + [1, 3], index=Index([1, 2], dtype=object), dtype=object, name=0 + ) + tm.assert_series_equal(result, expected) + + expected = DataFrame({1: [5, 2], 2: [6, 4], "a": ["a", "b"]}) + df.loc[0, [1, 2]] = [5, 6] + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_frame_multiples(self): + # multiple setting + df = DataFrame( + {"A": ["foo", "bar", "baz"], "B": Series(range(3), dtype=np.int64)} + ) + rhs = df.loc[1:2] + rhs.index = df.index[0:2] + df.loc[0:1] = rhs + expected = DataFrame( + {"A": ["bar", "baz", "baz"], "B": Series([1, 2, 2], dtype=np.int64)} + ) + tm.assert_frame_equal(df, expected) + + # multiple setting with frame on rhs (with M8) + df = DataFrame( + { + "date": date_range("2000-01-01", "2000-01-5"), + "val": Series(range(5), dtype=np.int64), + } + ) + expected = DataFrame( + { + "date": [ + Timestamp("20000101"), + Timestamp("20000102"), + Timestamp("20000101"), + Timestamp("20000102"), + Timestamp("20000103"), + ], + "val": Series([0, 1, 0, 1, 2], dtype=np.int64), + } + ) + rhs = df.loc[0:2] + rhs.index = df.index[2:5] + df.loc[2:4] = rhs + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "indexer", [["A"], slice(None, "A", None), np.array(["A"])] + ) + @pytest.mark.parametrize("value", [["Z"], np.array(["Z"])]) + def test_loc_setitem_with_scalar_index(self, indexer, value): + # GH #19474 + # assigning like "df.loc[0, ['A']] = ['Z']" should be evaluated + # elementwisely, not using "setter('A', ['Z'])". + + # Set object dtype to avoid upcast when setting 'Z' + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]).astype({"A": object}) + df.loc[0, indexer] = value + result = df.loc[0, "A"] + + assert is_scalar(result) and result == "Z" + + @pytest.mark.parametrize( + "index,box,expected", + [ + ( + ([0, 2], ["A", "B", "C", "D"]), + 7, + DataFrame( + [[7, 7, 7, 7], [3, 4, np.nan, np.nan], [7, 7, 7, 7]], + columns=["A", "B", "C", "D"], + ), + ), + ( + (1, ["C", "D"]), + [7, 8], + DataFrame( + [[1, 2, np.nan, np.nan], [3, 4, 7, 8], [5, 6, np.nan, np.nan]], + columns=["A", "B", "C", "D"], + ), + ), + ( + (1, ["A", "B", "C"]), + np.array([7, 8, 9], dtype=np.int64), + DataFrame( + [[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]], columns=["A", "B", "C"] + ), + ), + ( + (slice(1, 3, None), ["B", "C", "D"]), + [[7, 8, 9], [10, 11, 12]], + DataFrame( + [[1, 2, np.nan, np.nan], [3, 7, 8, 9], [5, 10, 11, 12]], + columns=["A", "B", "C", "D"], + ), + ), + ( + (slice(1, 3, None), ["C", "A", "D"]), + np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int64), + DataFrame( + [[1, 2, np.nan, np.nan], [8, 4, 7, 9], [11, 6, 10, 12]], + columns=["A", "B", "C", "D"], + ), + ), + ( + (slice(None, None, None), ["A", "C"]), + DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]), + DataFrame( + [[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"] + ), + ), + ], + ) + def test_loc_setitem_missing_columns(self, index, box, expected): + # GH 29334 + df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"]) + + df.loc[index] = box + tm.assert_frame_equal(df, expected) + + def test_loc_coercion(self): + # GH#12411 + df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC"), pd.NaT]}) + expected = df.dtypes + + result = df.iloc[[0]] + tm.assert_series_equal(result.dtypes, expected) + + result = df.iloc[[1]] + tm.assert_series_equal(result.dtypes, expected) + + def test_loc_coercion2(self): + # GH#12045 + df = DataFrame({"date": [datetime(2012, 1, 1), datetime(1012, 1, 2)]}) + expected = df.dtypes + + result = df.iloc[[0]] + tm.assert_series_equal(result.dtypes, expected) + + result = df.iloc[[1]] + tm.assert_series_equal(result.dtypes, expected) + + def test_loc_coercion3(self): + # GH#11594 + df = DataFrame({"text": ["some words"] + [None] * 9}) + expected = df.dtypes + + result = df.iloc[0:2] + tm.assert_series_equal(result.dtypes, expected) + + result = df.iloc[3:] + tm.assert_series_equal(result.dtypes, expected) + + def test_setitem_new_key_tz(self, indexer_sl): + # GH#12862 should not raise on assigning the second value + vals = [ + to_datetime(42).tz_localize("UTC"), + to_datetime(666).tz_localize("UTC"), + ] + expected = Series(vals, index=["foo", "bar"]) + + ser = Series(dtype=object) + indexer_sl(ser)["foo"] = vals[0] + indexer_sl(ser)["bar"] = vals[1] + + tm.assert_series_equal(ser, expected) + + def test_loc_non_unique(self): + # GH3659 + # non-unique indexer with loc slice + # https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs + + # these are going to raise because the we are non monotonic + df = DataFrame( + {"A": [1, 2, 3, 4, 5, 6], "B": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3] + ) + msg = "'Cannot get left slice bound for non-unique label: 1'" + with pytest.raises(KeyError, match=msg): + df.loc[1:] + msg = "'Cannot get left slice bound for non-unique label: 0'" + with pytest.raises(KeyError, match=msg): + df.loc[0:] + msg = "'Cannot get left slice bound for non-unique label: 1'" + with pytest.raises(KeyError, match=msg): + df.loc[1:2] + + # monotonic are ok + df = DataFrame( + {"A": [1, 2, 3, 4, 5, 6], "B": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3] + ).sort_index(axis=0) + result = df.loc[1:] + expected = DataFrame({"A": [2, 4, 5, 6], "B": [4, 6, 7, 8]}, index=[1, 1, 2, 3]) + tm.assert_frame_equal(result, expected) + + result = df.loc[0:] + tm.assert_frame_equal(result, df) + + result = df.loc[1:2] + expected = DataFrame({"A": [2, 4, 5], "B": [4, 6, 7]}, index=[1, 1, 2]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.arm_slow + @pytest.mark.parametrize("length, l2", [[900, 100], [900000, 100000]]) + def test_loc_non_unique_memory_error(self, length, l2): + # GH 4280 + # non_unique index with a large selection triggers a memory error + + columns = list("ABCDEFG") + + df = pd.concat( + [ + DataFrame( + np.random.default_rng(2).standard_normal((length, len(columns))), + index=np.arange(length), + columns=columns, + ), + DataFrame(np.ones((l2, len(columns))), index=[0] * l2, columns=columns), + ] + ) + + assert df.index.is_unique is False + + mask = np.arange(l2) + result = df.loc[mask] + expected = pd.concat( + [ + df.take([0]), + DataFrame( + np.ones((len(mask), len(columns))), + index=[0] * len(mask), + columns=columns, + ), + df.take(mask[1:]), + ] + ) + tm.assert_frame_equal(result, expected) + + def test_loc_name(self): + # GH 3880 + df = DataFrame([[1, 1], [1, 1]]) + df.index.name = "index_name" + result = df.iloc[[0, 1]].index.name + assert result == "index_name" + + result = df.loc[[0, 1]].index.name + assert result == "index_name" + + def test_loc_empty_list_indexer_is_ok(self): + df = tm.makeCustomDataframe(5, 2) + # vertical empty + tm.assert_frame_equal( + df.loc[:, []], df.iloc[:, :0], check_index_type=True, check_column_type=True + ) + # horizontal empty + tm.assert_frame_equal( + df.loc[[], :], df.iloc[:0, :], check_index_type=True, check_column_type=True + ) + # horizontal empty + tm.assert_frame_equal( + df.loc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True + ) + + def test_identity_slice_returns_new_object(self, using_copy_on_write): + # GH13873 + + original_df = DataFrame({"a": [1, 2, 3]}) + sliced_df = original_df.loc[:] + assert sliced_df is not original_df + assert original_df[:] is not original_df + assert original_df.loc[:, :] is not original_df + + # should be a shallow copy + assert np.shares_memory(original_df["a"]._values, sliced_df["a"]._values) + + # Setting using .loc[:, "a"] sets inplace so alters both sliced and orig + # depending on CoW + original_df.loc[:, "a"] = [4, 4, 4] + if using_copy_on_write: + assert (sliced_df["a"] == [1, 2, 3]).all() + else: + assert (sliced_df["a"] == 4).all() + + # These should not return copies + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + if using_copy_on_write: + assert df[0] is not df.loc[:, 0] + else: + assert df[0] is df.loc[:, 0] + + # Same tests for Series + original_series = Series([1, 2, 3, 4, 5, 6]) + sliced_series = original_series.loc[:] + assert sliced_series is not original_series + assert original_series[:] is not original_series + + original_series[:3] = [7, 8, 9] + if using_copy_on_write: + assert all(sliced_series[:3] == [1, 2, 3]) + else: + assert all(sliced_series[:3] == [7, 8, 9]) + + def test_loc_copy_vs_view(self, request, using_copy_on_write): + # GH 15631 + + if not using_copy_on_write: + mark = pytest.mark.xfail(reason="accidental fix reverted - GH37497") + request.node.add_marker(mark) + x = DataFrame(zip(range(3), range(3)), columns=["a", "b"]) + + y = x.copy() + q = y.loc[:, "a"] + q += 2 + + tm.assert_frame_equal(x, y) + + z = x.copy() + q = z.loc[x.index, "a"] + q += 2 + + tm.assert_frame_equal(x, z) + + def test_loc_uint64(self): + # GH20722 + # Test whether loc accept uint64 max value as index. + umax = np.iinfo("uint64").max + ser = Series([1, 2], index=[umax - 1, umax]) + + result = ser.loc[umax - 1] + expected = ser.iloc[0] + assert result == expected + + result = ser.loc[[umax - 1]] + expected = ser.iloc[[0]] + tm.assert_series_equal(result, expected) + + result = ser.loc[[umax - 1, umax]] + tm.assert_series_equal(result, ser) + + def test_loc_uint64_disallow_negative(self): + # GH#41775 + umax = np.iinfo("uint64").max + ser = Series([1, 2], index=[umax - 1, umax]) + + with pytest.raises(KeyError, match="-1"): + # don't wrap around + ser.loc[-1] + + with pytest.raises(KeyError, match="-1"): + # don't wrap around + ser.loc[[-1]] + + def test_loc_setitem_empty_append_expands_rows(self): + # GH6173, various appends to an empty dataframe + + data = [1, 2, 3] + expected = DataFrame( + {"x": data, "y": np.array([np.nan] * len(data), dtype=object)} + ) + + # appends to fit length of data + df = DataFrame(columns=["x", "y"]) + df.loc[:, "x"] = data + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_empty_append_expands_rows_mixed_dtype(self): + # GH#37932 same as test_loc_setitem_empty_append_expands_rows + # but with mixed dtype so we go through take_split_path + data = [1, 2, 3] + expected = DataFrame( + {"x": data, "y": np.array([np.nan] * len(data), dtype=object)} + ) + + df = DataFrame(columns=["x", "y"]) + df["x"] = df["x"].astype(np.int64) + df.loc[:, "x"] = data + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_empty_append_single_value(self): + # only appends one value + expected = DataFrame({"x": [1.0], "y": [np.nan]}) + df = DataFrame(columns=["x", "y"], dtype=float) + df.loc[0, "x"] = expected.loc[0, "x"] + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_empty_append_raises(self): + # GH6173, various appends to an empty dataframe + + data = [1, 2] + df = DataFrame(columns=["x", "y"]) + df.index = df.index.astype(np.int64) + msg = ( + rf"None of \[Index\(\[0, 1\], dtype='{np.dtype(int)}'\)\] " + r"are in the \[index\]" + ) + with pytest.raises(KeyError, match=msg): + df.loc[[0, 1], "x"] = data + + msg = "|".join( + [ + "cannot copy sequence with size 2 to array axis with dimension 0", + r"could not broadcast input array from shape \(2,\) into shape \(0,\)", + "Must have equal len keys and value when setting with an iterable", + ] + ) + with pytest.raises(ValueError, match=msg): + df.loc[0:2, "x"] = data + + def test_indexing_zerodim_np_array(self): + # GH24924 + df = DataFrame([[1, 2], [3, 4]]) + result = df.loc[np.array(0)] + s = Series([1, 2], name=0) + tm.assert_series_equal(result, s) + + def test_series_indexing_zerodim_np_array(self): + # GH24924 + s = Series([1, 2]) + result = s.loc[np.array(0)] + assert result == 1 + + def test_loc_reverse_assignment(self): + # GH26939 + data = [1, 2, 3, 4, 5, 6] + [None] * 4 + expected = Series(data, index=range(2010, 2020)) + + result = Series(index=range(2010, 2020), dtype=np.float64) + result.loc[2015:2010:-1] = [6, 5, 4, 3, 2, 1] + + tm.assert_series_equal(result, expected) + + def test_loc_setitem_str_to_small_float_conversion_type(self): + # GH#20388 + + col_data = [str(np.random.default_rng(2).random() * 1e-12) for _ in range(5)] + result = DataFrame(col_data, columns=["A"]) + expected = DataFrame(col_data, columns=["A"], dtype=object) + tm.assert_frame_equal(result, expected) + + # assigning with loc/iloc attempts to set the values inplace, which + # in this case is successful + result.loc[result.index, "A"] = [float(x) for x in col_data] + expected = DataFrame(col_data, columns=["A"], dtype=float).astype(object) + tm.assert_frame_equal(result, expected) + + # assigning the entire column using __setitem__ swaps in the new array + # GH#??? + result["A"] = [float(x) for x in col_data] + expected = DataFrame(col_data, columns=["A"], dtype=float) + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_time_object(self, frame_or_series): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + mask = (rng.hour == 9) & (rng.minute == 30) + + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng + ) + obj = tm.get_obj(obj, frame_or_series) + + result = obj.loc[time(9, 30)] + exp = obj.loc[mask] + tm.assert_equal(result, exp) + + chunk = obj.loc["1/4/2000":] + result = chunk.loc[time(9, 30)] + expected = result[-1:] + + # Without resetting the freqs, these are 5 min and 1440 min, respectively + result.index = result.index._with_freq(None) + expected.index = expected.index._with_freq(None) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"]) + @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex]) + def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype): + sp_sparse = pytest.importorskip("scipy.sparse") + + spmatrix_t = getattr(sp_sparse, spmatrix_t) + + # The bug is triggered by a sparse matrix with purely sparse columns. So the + # recipe below generates a rectangular matrix of dimension (5, 7) where all the + # diagonal cells are ones, meaning the last two columns are purely sparse. + rows, cols = 5, 7 + spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype) + df = DataFrame.sparse.from_spmatrix(spmatrix) + + # regression test for GH#34526 + itr_idx = range(2, rows) + result = df.loc[itr_idx].values + expected = spmatrix.toarray()[itr_idx] + tm.assert_numpy_array_equal(result, expected) + + # regression test for GH#34540 + result = df.loc[itr_idx].dtypes.values + expected = np.full(cols, SparseDtype(dtype, fill_value=0)) + tm.assert_numpy_array_equal(result, expected) + + def test_loc_getitem_listlike_all_retains_sparse(self): + df = DataFrame({"A": pd.array([0, 0], dtype=SparseDtype("int64"))}) + result = df.loc[[0, 1]] + tm.assert_frame_equal(result, df) + + def test_loc_getitem_sparse_frame(self): + # GH34687 + sp_sparse = pytest.importorskip("scipy.sparse") + + df = DataFrame.sparse.from_spmatrix(sp_sparse.eye(5)) + result = df.loc[range(2)] + expected = DataFrame( + [[1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0]], + dtype=SparseDtype("float64", 0.0), + ) + tm.assert_frame_equal(result, expected) + + result = df.loc[range(2)].loc[range(1)] + expected = DataFrame( + [[1.0, 0.0, 0.0, 0.0, 0.0]], dtype=SparseDtype("float64", 0.0) + ) + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_sparse_series(self): + # GH34687 + s = Series([1.0, 0.0, 0.0, 0.0, 0.0], dtype=SparseDtype("float64", 0.0)) + + result = s.loc[range(2)] + expected = Series([1.0, 0.0], dtype=SparseDtype("float64", 0.0)) + tm.assert_series_equal(result, expected) + + result = s.loc[range(3)].loc[range(2)] + expected = Series([1.0, 0.0], dtype=SparseDtype("float64", 0.0)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("indexer", ["loc", "iloc"]) + def test_getitem_single_row_sparse_df(self, indexer): + # GH#46406 + df = DataFrame([[1.0, 0.0, 1.5], [0.0, 2.0, 0.0]], dtype=SparseDtype(float)) + result = getattr(df, indexer)[0] + expected = Series([1.0, 0.0, 1.5], dtype=SparseDtype(float), name=0) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("key_type", [iter, np.array, Series, Index]) + def test_loc_getitem_iterable(self, float_frame, key_type): + idx = key_type(["A", "B", "C"]) + result = float_frame.loc[:, idx] + expected = float_frame.loc[:, ["A", "B", "C"]] + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_timedelta_0seconds(self): + # GH#10583 + df = DataFrame(np.random.default_rng(2).normal(size=(10, 4))) + df.index = timedelta_range(start="0s", periods=10, freq="s") + expected = df.loc[Timedelta("0s") :, :] + result = df.loc["0s":, :] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "val,expected", [(2**63 - 1, Series([1])), (2**63, Series([2]))] + ) + def test_loc_getitem_uint64_scalar(self, val, expected): + # see GH#19399 + df = DataFrame([1, 2], index=[2**63 - 1, 2**63]) + result = df.loc[val] + + expected.name = val + tm.assert_series_equal(result, expected) + + def test_loc_setitem_int_label_with_float_index(self, float_numpy_dtype): + # note labels are floats + dtype = float_numpy_dtype + ser = Series(["a", "b", "c"], index=Index([0, 0.5, 1], dtype=dtype)) + expected = ser.copy() + + ser.loc[1] = "zoo" + expected.iloc[2] = "zoo" + + tm.assert_series_equal(ser, expected) + + @pytest.mark.parametrize( + "indexer, expected", + [ + # The test name is a misnomer in the 0 case as df.index[indexer] + # is a scalar. + (0, [20, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + (slice(4, 8), [0, 1, 2, 3, 20, 20, 20, 20, 8, 9]), + ([3, 5], [0, 1, 2, 20, 4, 20, 6, 7, 8, 9]), + ], + ) + def test_loc_setitem_listlike_with_timedelta64index(self, indexer, expected): + # GH#16637 + tdi = to_timedelta(range(10), unit="s") + df = DataFrame({"x": range(10)}, dtype="int64", index=tdi) + + df.loc[df.index[indexer], "x"] = 20 + + expected = DataFrame( + expected, + index=tdi, + columns=["x"], + dtype="int64", + ) + + tm.assert_frame_equal(expected, df) + + def test_loc_setitem_categorical_values_partial_column_slice(self): + # Assigning a Category to parts of a int/... column uses the values of + # the Categorical + df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")}) + exp = DataFrame({"a": [1, "b", "b", 1, 1], "b": list("aabba")}) + with tm.assert_produces_warning( + FutureWarning, match="item of incompatible dtype" + ): + df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"]) + df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"]) + tm.assert_frame_equal(df, exp) + + def test_loc_setitem_single_row_categorical(self): + # GH#25495 + df = DataFrame({"Alpha": ["a"], "Numeric": [0]}) + categories = Categorical(df["Alpha"], categories=["a", "b", "c"]) + + # pre-2.0 this swapped in a new array, in 2.0 it operates inplace, + # consistent with non-split-path + df.loc[:, "Alpha"] = categories + + result = df["Alpha"] + expected = Series(categories, index=df.index, name="Alpha").astype(object) + tm.assert_series_equal(result, expected) + + # double-check that the non-loc setting retains categoricalness + df["Alpha"] = categories + tm.assert_series_equal(df["Alpha"], Series(categories, name="Alpha")) + + def test_loc_setitem_datetime_coercion(self): + # GH#1048 + df = DataFrame({"c": [Timestamp("2010-10-01")] * 3}) + df.loc[0:1, "c"] = np.datetime64("2008-08-08") + assert Timestamp("2008-08-08") == df.loc[0, "c"] + assert Timestamp("2008-08-08") == df.loc[1, "c"] + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + df.loc[2, "c"] = date(2005, 5, 5) + assert Timestamp("2005-05-05").date() == df.loc[2, "c"] + + @pytest.mark.parametrize("idxer", ["var", ["var"]]) + def test_loc_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture): + # GH#11365 + tz = tz_naive_fixture + idx = date_range(start="2015-07-12", periods=3, freq="H", tz=tz) + expected = DataFrame(1.2, index=idx, columns=["var"]) + # if result started off with object dtype, then the .loc.__setitem__ + # below would retain object dtype + result = DataFrame(index=idx, columns=["var"], dtype=np.float64) + result.loc[:, idxer] = expected + tm.assert_frame_equal(result, expected) + + def test_loc_setitem_time_key(self, using_array_manager): + index = date_range("2012-01-01", "2012-01-05", freq="30min") + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 5)), index=index + ) + akey = time(12, 0, 0) + bkey = slice(time(13, 0, 0), time(14, 0, 0)) + ainds = [24, 72, 120, 168] + binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172] + + result = df.copy() + result.loc[akey] = 0 + result = result.loc[akey] + expected = df.loc[akey].copy() + expected.loc[:] = 0 + if using_array_manager: + # TODO(ArrayManager) we are still overwriting columns + expected = expected.astype(float) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.loc[akey] = 0 + result.loc[akey] = df.iloc[ainds] + tm.assert_frame_equal(result, df) + + result = df.copy() + result.loc[bkey] = 0 + result = result.loc[bkey] + expected = df.loc[bkey].copy() + expected.loc[:] = 0 + if using_array_manager: + # TODO(ArrayManager) we are still overwriting columns + expected = expected.astype(float) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.loc[bkey] = 0 + result.loc[bkey] = df.iloc[binds] + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize("key", ["A", ["A"], ("A", slice(None))]) + def test_loc_setitem_unsorted_multiindex_columns(self, key): + # GH#38601 + mi = MultiIndex.from_tuples([("A", 4), ("B", "3"), ("A", "2")]) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=mi) + obj = df.copy() + obj.loc[:, key] = np.zeros((2, 2), dtype="int64") + expected = DataFrame([[0, 2, 0], [0, 5, 0]], columns=mi) + tm.assert_frame_equal(obj, expected) + + df = df.sort_index(axis=1) + df.loc[:, key] = np.zeros((2, 2), dtype="int64") + expected = expected.sort_index(axis=1) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_uint_drop(self, any_int_numpy_dtype): + # see GH#18311 + # assigning series.loc[0] = 4 changed series.dtype to int + series = Series([1, 2, 3], dtype=any_int_numpy_dtype) + series.loc[0] = 4 + expected = Series([4, 2, 3], dtype=any_int_numpy_dtype) + tm.assert_series_equal(series, expected) + + def test_loc_setitem_td64_non_nano(self): + # GH#14155 + ser = Series(10 * [np.timedelta64(10, "m")]) + ser.loc[[1, 2, 3]] = np.timedelta64(20, "m") + expected = Series(10 * [np.timedelta64(10, "m")]) + expected.loc[[1, 2, 3]] = Timedelta(np.timedelta64(20, "m")) + tm.assert_series_equal(ser, expected) + + def test_loc_setitem_2d_to_1d_raises(self): + data = np.random.default_rng(2).standard_normal((2, 2)) + # float64 dtype to avoid upcast when trying to set float data + ser = Series(range(2), dtype="float64") + + msg = "|".join( + [ + r"shape mismatch: value array of shape \(2,2\)", + r"cannot reshape array of size 4 into shape \(2,\)", + ] + ) + with pytest.raises(ValueError, match=msg): + ser.loc[range(2)] = data + + msg = r"could not broadcast input array from shape \(2,2\) into shape \(2,?\)" + with pytest.raises(ValueError, match=msg): + ser.loc[:] = data + + def test_loc_getitem_interval_index(self): + # GH#19977 + index = pd.interval_range(start=0, periods=3) + df = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=index, columns=["A", "B", "C"] + ) + + expected = 1 + result = df.loc[0.5, "A"] + tm.assert_almost_equal(result, expected) + + def test_loc_getitem_interval_index2(self): + # GH#19977 + index = pd.interval_range(start=0, periods=3, closed="both") + df = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=index, columns=["A", "B", "C"] + ) + + index_exp = pd.interval_range(start=0, periods=2, freq=1, closed="both") + expected = Series([1, 4], index=index_exp, name="A") + result = df.loc[1, "A"] + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tpl", [(1,), (1, 2)]) + def test_loc_getitem_index_single_double_tuples(self, tpl): + # GH#20991 + idx = Index( + [(1,), (1, 2)], + name="A", + tupleize_cols=False, + ) + df = DataFrame(index=idx) + + result = df.loc[[tpl]] + idx = Index([tpl], name="A", tupleize_cols=False) + expected = DataFrame(index=idx) + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_index_namedtuple(self): + IndexType = namedtuple("IndexType", ["a", "b"]) + idx1 = IndexType("foo", "bar") + idx2 = IndexType("baz", "bof") + index = Index([idx1, idx2], name="composite_index", tupleize_cols=False) + df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"]) + + result = df.loc[IndexType("foo", "bar")]["A"] + assert result == 1 + + def test_loc_setitem_single_column_mixed(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + index=["a", "b", "c", "d", "e"], + columns=["foo", "bar", "baz"], + ) + df["str"] = "qux" + df.loc[df.index[::2], "str"] = np.nan + expected = np.array([np.nan, "qux", np.nan, "qux", np.nan], dtype=object) + tm.assert_almost_equal(df["str"].values, expected) + + def test_loc_setitem_cast2(self): + # GH#7704 + # dtype conversion on setting + df = DataFrame(np.random.default_rng(2).random((30, 3)), columns=tuple("ABC")) + df["event"] = np.nan + with tm.assert_produces_warning( + FutureWarning, match="item of incompatible dtype" + ): + df.loc[10, "event"] = "foo" + result = df.dtypes + expected = Series( + [np.dtype("float64")] * 3 + [np.dtype("object")], + index=["A", "B", "C", "event"], + ) + tm.assert_series_equal(result, expected) + + def test_loc_setitem_cast3(self): + # Test that data type is preserved . GH#5782 + df = DataFrame({"one": np.arange(6, dtype=np.int8)}) + df.loc[1, "one"] = 6 + assert df.dtypes.one == np.dtype(np.int8) + df.one = np.int8(7) + assert df.dtypes.one == np.dtype(np.int8) + + def test_loc_setitem_range_key(self, frame_or_series): + # GH#45479 don't treat range key as positional + obj = frame_or_series(range(5), index=[3, 4, 1, 0, 2]) + + values = [9, 10, 11] + if obj.ndim == 2: + values = [[9], [10], [11]] + + obj.loc[range(3)] = values + + expected = frame_or_series([0, 1, 10, 9, 11], index=obj.index) + tm.assert_equal(obj, expected) + + +class TestLocWithEllipsis: + @pytest.fixture(params=[tm.loc, tm.iloc]) + def indexer(self, request): + # Test iloc while we're here + return request.param + + @pytest.fixture + def obj(self, series_with_simple_index, frame_or_series): + obj = series_with_simple_index + if frame_or_series is not Series: + obj = obj.to_frame() + return obj + + def test_loc_iloc_getitem_ellipsis(self, obj, indexer): + result = indexer(obj)[...] + tm.assert_equal(result, obj) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_loc_iloc_getitem_leading_ellipses(self, series_with_simple_index, indexer): + obj = series_with_simple_index + key = 0 if (indexer is tm.iloc or len(obj) == 0) else obj.index[0] + + if indexer is tm.loc and obj.index.inferred_type == "boolean": + # passing [False] will get interpreted as a boolean mask + # TODO: should it? unambiguous when lengths dont match? + return + if indexer is tm.loc and isinstance(obj.index, MultiIndex): + msg = "MultiIndex does not support indexing with Ellipsis" + with pytest.raises(NotImplementedError, match=msg): + result = indexer(obj)[..., [key]] + + elif len(obj) != 0: + result = indexer(obj)[..., [key]] + expected = indexer(obj)[[key]] + tm.assert_series_equal(result, expected) + + key2 = 0 if indexer is tm.iloc else obj.name + df = obj.to_frame() + result = indexer(df)[..., [key2]] + expected = indexer(df)[:, [key2]] + tm.assert_frame_equal(result, expected) + + def test_loc_iloc_getitem_ellipses_only_one_ellipsis(self, obj, indexer): + # GH37750 + key = 0 if (indexer is tm.iloc or len(obj) == 0) else obj.index[0] + + with pytest.raises(IndexingError, match=_one_ellipsis_message): + indexer(obj)[..., ...] + + with pytest.raises(IndexingError, match=_one_ellipsis_message): + indexer(obj)[..., [key], ...] + + with pytest.raises(IndexingError, match=_one_ellipsis_message): + indexer(obj)[..., ..., key] + + # one_ellipsis_message takes precedence over "Too many indexers" + # only when the first key is Ellipsis + with pytest.raises(IndexingError, match="Too many indexers"): + indexer(obj)[key, ..., ...] + + +class TestLocWithMultiIndex: + @pytest.mark.parametrize( + "keys, expected", + [ + (["b", "a"], [["b", "b", "a", "a"], [1, 2, 1, 2]]), + (["a", "b"], [["a", "a", "b", "b"], [1, 2, 1, 2]]), + ((["a", "b"], [1, 2]), [["a", "a", "b", "b"], [1, 2, 1, 2]]), + ((["a", "b"], [2, 1]), [["a", "a", "b", "b"], [2, 1, 2, 1]]), + ((["b", "a"], [2, 1]), [["b", "b", "a", "a"], [2, 1, 2, 1]]), + ((["b", "a"], [1, 2]), [["b", "b", "a", "a"], [1, 2, 1, 2]]), + ((["c", "a"], [2, 1]), [["c", "a", "a"], [1, 2, 1]]), + ], + ) + @pytest.mark.parametrize("dim", ["index", "columns"]) + def test_loc_getitem_multilevel_index_order(self, dim, keys, expected): + # GH#22797 + # Try to respect order of keys given for MultiIndex.loc + kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]} + df = DataFrame(np.arange(25).reshape(5, 5), **kwargs) + exp_index = MultiIndex.from_arrays(expected) + if dim == "index": + res = df.loc[keys, :] + tm.assert_index_equal(res.index, exp_index) + elif dim == "columns": + res = df.loc[:, keys] + tm.assert_index_equal(res.columns, exp_index) + + def test_loc_preserve_names(self, multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.loc[2000] + result2 = ymd["A"].loc[2000] + assert result.index.names == ymd.index.names[1:] + assert result2.index.names == ymd.index.names[1:] + + result = ymd.loc[2000, 2] + result2 = ymd["A"].loc[2000, 2] + assert result.index.name == ymd.index.names[2] + assert result2.index.name == ymd.index.names[2] + + def test_loc_getitem_multiindex_nonunique_len_zero(self): + # GH#13691 + mi = MultiIndex.from_product([[0], [1, 1]]) + ser = Series(0, index=mi) + + res = ser.loc[[]] + + expected = ser[:0] + tm.assert_series_equal(res, expected) + + res2 = ser.loc[ser.iloc[0:0]] + tm.assert_series_equal(res2, expected) + + def test_loc_getitem_access_none_value_in_multiindex(self): + # GH#34318: test that you can access a None value using .loc + # through a Multiindex + + ser = Series([None], MultiIndex.from_arrays([["Level1"], ["Level2"]])) + result = ser.loc[("Level1", "Level2")] + assert result is None + + midx = MultiIndex.from_product([["Level1"], ["Level2_a", "Level2_b"]]) + ser = Series([None] * len(midx), dtype=object, index=midx) + result = ser.loc[("Level1", "Level2_a")] + assert result is None + + ser = Series([1] * len(midx), dtype=object, index=midx) + result = ser.loc[("Level1", "Level2_a")] + assert result == 1 + + def test_loc_setitem_multiindex_slice(self): + # GH 34870 + + index = MultiIndex.from_tuples( + zip( + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ), + names=["first", "second"], + ) + + result = Series([1, 1, 1, 1, 1, 1, 1, 1], index=index) + result.loc[("baz", "one"):("foo", "two")] = 100 + + expected = Series([1, 1, 100, 100, 100, 100, 1, 1], index=index) + + tm.assert_series_equal(result, expected) + + def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self): + times = date_range("2000-01-01", freq="10min", periods=100000) + ser = Series(range(100000), times) + result = ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)] + tm.assert_series_equal(result, ser) + + def test_loc_getitem_datetime_string_with_datetimeindex(self): + # GH 16710 + df = DataFrame( + {"a": range(10), "b": range(10)}, + index=date_range("2010-01-01", "2010-01-10"), + ) + result = df.loc[["2010-01-01", "2010-01-05"], ["a", "b"]] + expected = DataFrame( + {"a": [0, 4], "b": [0, 4]}, + index=DatetimeIndex(["2010-01-01", "2010-01-05"]), + ) + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_sorted_index_level_with_duplicates(self): + # GH#4516 sorting a MultiIndex with duplicates and multiple dtypes + mi = MultiIndex.from_tuples( + [ + ("foo", "bar"), + ("foo", "bar"), + ("bah", "bam"), + ("bah", "bam"), + ("foo", "bar"), + ("bah", "bam"), + ], + names=["A", "B"], + ) + df = DataFrame( + [ + [1.0, 1], + [2.0, 2], + [3.0, 3], + [4.0, 4], + [5.0, 5], + [6.0, 6], + ], + index=mi, + columns=["C", "D"], + ) + df = df.sort_index(level=0) + + expected = DataFrame( + [[1.0, 1], [2.0, 2], [5.0, 5]], columns=["C", "D"], index=mi.take([0, 1, 4]) + ) + + result = df.loc[("foo", "bar")] + tm.assert_frame_equal(result, expected) + + def test_additional_element_to_categorical_series_loc(self): + # GH#47677 + result = Series(["a", "b", "c"], dtype="category") + result.loc[3] = 0 + expected = Series(["a", "b", "c", 0], dtype="object") + tm.assert_series_equal(result, expected) + + def test_additional_categorical_element_loc(self): + # GH#47677 + result = Series(["a", "b", "c"], dtype="category") + result.loc[3] = "a" + expected = Series(["a", "b", "c", "a"], dtype="category") + tm.assert_series_equal(result, expected) + + def test_loc_set_nan_in_categorical_series(self, any_numeric_ea_dtype): + # GH#47677 + srs = Series( + [1, 2, 3], + dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)), + ) + # enlarge + srs.loc[3] = np.nan + expected = Series( + [1, 2, 3, np.nan], + dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)), + ) + tm.assert_series_equal(srs, expected) + # set into + srs.loc[1] = np.nan + expected = Series( + [1, np.nan, 3, np.nan], + dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)), + ) + tm.assert_series_equal(srs, expected) + + @pytest.mark.parametrize("na", (np.nan, pd.NA, None, pd.NaT)) + def test_loc_consistency_series_enlarge_set_into(self, na): + # GH#47677 + srs_enlarge = Series(["a", "b", "c"], dtype="category") + srs_enlarge.loc[3] = na + + srs_setinto = Series(["a", "b", "c", "a"], dtype="category") + srs_setinto.loc[3] = na + + tm.assert_series_equal(srs_enlarge, srs_setinto) + expected = Series(["a", "b", "c", na], dtype="category") + tm.assert_series_equal(srs_enlarge, expected) + + def test_loc_getitem_preserves_index_level_category_dtype(self): + # GH#15166 + df = DataFrame( + data=np.arange(2, 22, 2), + index=MultiIndex( + levels=[CategoricalIndex(["a", "b"]), range(10)], + codes=[[0] * 5 + [1] * 5, range(10)], + names=["Index1", "Index2"], + ), + ) + + expected = CategoricalIndex( + ["a", "b"], + categories=["a", "b"], + ordered=False, + name="Index1", + dtype="category", + ) + + result = df.index.levels[0] + tm.assert_index_equal(result, expected) + + result = df.loc[["a"]].index.levels[0] + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("lt_value", [30, 10]) + def test_loc_multiindex_levels_contain_values_not_in_index_anymore(self, lt_value): + # GH#41170 + df = DataFrame({"a": [12, 23, 34, 45]}, index=[list("aabb"), [0, 1, 2, 3]]) + with pytest.raises(KeyError, match=r"\['b'\] not in index"): + df.loc[df["a"] < lt_value, :].loc[["b"], :] + + def test_loc_multiindex_null_slice_na_level(self): + # GH#42055 + lev1 = np.array([np.nan, np.nan]) + lev2 = ["bar", "baz"] + mi = MultiIndex.from_arrays([lev1, lev2]) + ser = Series([0, 1], index=mi) + result = ser.loc[:, "bar"] + + # TODO: should we have name="bar"? + expected = Series([0], index=[np.nan]) + tm.assert_series_equal(result, expected) + + def test_loc_drops_level(self): + # Based on test_series_varied_multiindex_alignment, where + # this used to fail to drop the first level + mi = MultiIndex.from_product( + [list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"] + ) + ser = Series(range(8), index=mi) + + loc_result = ser.loc["a", :, :] + expected = ser.index.droplevel(0)[:4] + tm.assert_index_equal(loc_result.index, expected) + + +class TestLocSetitemWithExpansion: + @pytest.mark.slow + def test_loc_setitem_with_expansion_large_dataframe(self): + # GH#10692 + result = DataFrame({"x": range(10**6)}, dtype="int64") + result.loc[len(result)] = len(result) + 1 + expected = DataFrame({"x": range(10**6 + 1)}, dtype="int64") + tm.assert_frame_equal(result, expected) + + def test_loc_setitem_empty_series(self): + # GH#5226 + + # partially set with an empty object series + ser = Series(dtype=object) + ser.loc[1] = 1 + tm.assert_series_equal(ser, Series([1], index=[1])) + ser.loc[3] = 3 + tm.assert_series_equal(ser, Series([1, 3], index=[1, 3])) + + def test_loc_setitem_empty_series_float(self): + # GH#5226 + + # partially set with an empty object series + ser = Series(dtype=object) + ser.loc[1] = 1.0 + tm.assert_series_equal(ser, Series([1.0], index=[1])) + ser.loc[3] = 3.0 + tm.assert_series_equal(ser, Series([1.0, 3.0], index=[1, 3])) + + def test_loc_setitem_empty_series_str_idx(self): + # GH#5226 + + # partially set with an empty object series + ser = Series(dtype=object) + ser.loc["foo"] = 1 + tm.assert_series_equal(ser, Series([1], index=["foo"])) + ser.loc["bar"] = 3 + tm.assert_series_equal(ser, Series([1, 3], index=["foo", "bar"])) + ser.loc[3] = 4 + tm.assert_series_equal(ser, Series([1, 3, 4], index=["foo", "bar", 3])) + + def test_loc_setitem_incremental_with_dst(self): + # GH#20724 + base = datetime(2015, 11, 1, tzinfo=gettz("US/Pacific")) + idxs = [base + timedelta(seconds=i * 900) for i in range(16)] + result = Series([0], index=[idxs[0]]) + for ts in idxs: + result.loc[ts] = 1 + expected = Series(1, index=idxs) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "conv", + [ + lambda x: x, + lambda x: x.to_datetime64(), + lambda x: x.to_pydatetime(), + lambda x: np.datetime64(x), + ], + ids=["self", "to_datetime64", "to_pydatetime", "np.datetime64"], + ) + def test_loc_setitem_datetime_keys_cast(self, conv): + # GH#9516 + dt1 = Timestamp("20130101 09:00:00") + dt2 = Timestamp("20130101 10:00:00") + df = DataFrame() + df.loc[conv(dt1), "one"] = 100 + df.loc[conv(dt2), "one"] = 200 + + expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2]) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_categorical_column_retains_dtype(self, ordered): + # GH16360 + result = DataFrame({"A": [1]}) + result.loc[:, "B"] = Categorical(["b"], ordered=ordered) + expected = DataFrame({"A": [1], "B": Categorical(["b"], ordered=ordered)}) + tm.assert_frame_equal(result, expected) + + def test_loc_setitem_with_expansion_and_existing_dst(self): + # GH#18308 + start = Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid") + end = Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid") + ts = Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid") + idx = date_range(start, end, inclusive="left", freq="H") + assert ts not in idx # i.e. result.loc setitem is with-expansion + + result = DataFrame(index=idx, columns=["value"]) + result.loc[ts, "value"] = 12 + expected = DataFrame( + [np.nan] * len(idx) + [12], + index=idx.append(DatetimeIndex([ts])), + columns=["value"], + dtype=object, + ) + tm.assert_frame_equal(result, expected) + + def test_setitem_with_expansion(self): + # indexing - setting an element + df = DataFrame( + data=to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]), + columns=["time"], + ) + df["new_col"] = ["new", "old"] + df.time = df.set_index("time").index.tz_localize("UTC") + v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific") + + # pre-2.0 trying to set a single element on a part of a different + # timezone converted to object; in 2.0 it retains dtype + df2 = df.copy() + df2.loc[df2.new_col == "new", "time"] = v + + expected = Series([v[0].tz_convert("UTC"), df.loc[1, "time"]], name="time") + tm.assert_series_equal(df2.time, expected) + + v = df.loc[df.new_col == "new", "time"] + Timedelta("1s") + df.loc[df.new_col == "new", "time"] = v + tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v) + + def test_loc_setitem_with_expansion_inf_upcast_empty(self): + # Test with np.inf in columns + df = DataFrame() + df.loc[0, 0] = 1 + df.loc[1, 1] = 2 + df.loc[0, np.inf] = 3 + + result = df.columns + expected = Index([0, 1, np.inf], dtype=np.float64) + tm.assert_index_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:indexing past lexsort depth") + def test_loc_setitem_with_expansion_nonunique_index(self, index): + # GH#40096 + if not len(index): + pytest.skip("Not relevant for empty Index") + + index = index.repeat(2) # ensure non-unique + N = len(index) + arr = np.arange(N).astype(np.int64) + + orig = DataFrame(arr, index=index, columns=[0]) + + # key that will requiring object-dtype casting in the index + key = "kapow" + assert key not in index # otherwise test is invalid + # TODO: using a tuple key breaks here in many cases + + exp_index = index.insert(len(index), key) + if isinstance(index, MultiIndex): + assert exp_index[-1][0] == key + else: + assert exp_index[-1] == key + exp_data = np.arange(N + 1).astype(np.float64) + expected = DataFrame(exp_data, index=exp_index, columns=[0]) + + # Add new row, but no new columns + df = orig.copy() + df.loc[key, 0] = N + tm.assert_frame_equal(df, expected) + + # add new row on a Series + ser = orig.copy()[0] + ser.loc[key] = N + # the series machinery lets us preserve int dtype instead of float + expected = expected[0].astype(np.int64) + tm.assert_series_equal(ser, expected) + + # add new row and new column + df = orig.copy() + df.loc[key, 1] = N + expected = DataFrame( + {0: list(arr) + [np.nan], 1: [np.nan] * N + [float(N)]}, + index=exp_index, + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "dtype", ["Int32", "Int64", "UInt32", "UInt64", "Float32", "Float64"] + ) + def test_loc_setitem_with_expansion_preserves_nullable_int(self, dtype): + # GH#42099 + ser = Series([0, 1, 2, 3], dtype=dtype) + df = DataFrame({"data": ser}) + + result = DataFrame(index=df.index) + result.loc[df.index, "data"] = ser + + tm.assert_frame_equal(result, df) + + result = DataFrame(index=df.index) + result.loc[df.index, "data"] = ser._values + tm.assert_frame_equal(result, df) + + +class TestLocCallable: + def test_frame_loc_getitem_callable(self): + # GH#11485 + df = DataFrame({"A": [1, 2, 3, 4], "B": list("aabb"), "C": [1, 2, 3, 4]}) + # iloc cannot use boolean Series (see GH3635) + + # return bool indexer + res = df.loc[lambda x: x.A > 2] + tm.assert_frame_equal(res, df.loc[df.A > 2]) + + res = df.loc[lambda x: x.B == "b", :] + tm.assert_frame_equal(res, df.loc[df.B == "b", :]) + + res = df.loc[lambda x: x.A > 2, lambda x: x.columns == "B"] + tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]]) + + res = df.loc[lambda x: x.A > 2, lambda x: "B"] + tm.assert_series_equal(res, df.loc[df.A > 2, "B"]) + + res = df.loc[lambda x: x.A > 2, lambda x: ["A", "B"]] + tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]]) + + res = df.loc[lambda x: x.A == 2, lambda x: ["A", "B"]] + tm.assert_frame_equal(res, df.loc[df.A == 2, ["A", "B"]]) + + # scalar + res = df.loc[lambda x: 1, lambda x: "A"] + assert res == df.loc[1, "A"] + + def test_frame_loc_getitem_callable_mixture(self): + # GH#11485 + df = DataFrame({"A": [1, 2, 3, 4], "B": list("aabb"), "C": [1, 2, 3, 4]}) + + res = df.loc[lambda x: x.A > 2, ["A", "B"]] + tm.assert_frame_equal(res, df.loc[df.A > 2, ["A", "B"]]) + + res = df.loc[[2, 3], lambda x: ["A", "B"]] + tm.assert_frame_equal(res, df.loc[[2, 3], ["A", "B"]]) + + res = df.loc[3, lambda x: ["A", "B"]] + tm.assert_series_equal(res, df.loc[3, ["A", "B"]]) + + def test_frame_loc_getitem_callable_labels(self): + # GH#11485 + df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD")) + + # return label + res = df.loc[lambda x: ["A", "C"]] + tm.assert_frame_equal(res, df.loc[["A", "C"]]) + + res = df.loc[lambda x: ["A", "C"], :] + tm.assert_frame_equal(res, df.loc[["A", "C"], :]) + + res = df.loc[lambda x: ["A", "C"], lambda x: "X"] + tm.assert_series_equal(res, df.loc[["A", "C"], "X"]) + + res = df.loc[lambda x: ["A", "C"], lambda x: ["X"]] + tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]]) + + # mixture + res = df.loc[["A", "C"], lambda x: "X"] + tm.assert_series_equal(res, df.loc[["A", "C"], "X"]) + + res = df.loc[["A", "C"], lambda x: ["X"]] + tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]]) + + res = df.loc[lambda x: ["A", "C"], "X"] + tm.assert_series_equal(res, df.loc[["A", "C"], "X"]) + + res = df.loc[lambda x: ["A", "C"], ["X"]] + tm.assert_frame_equal(res, df.loc[["A", "C"], ["X"]]) + + def test_frame_loc_setitem_callable(self): + # GH#11485 + df = DataFrame({"X": [1, 2, 3, 4], "Y": list("aabb")}, index=list("ABCD")) + + # return label + res = df.copy() + res.loc[lambda x: ["A", "C"]] = -20 + exp = df.copy() + exp.loc[["A", "C"]] = -20 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.loc[lambda x: ["A", "C"], :] = 20 + exp = df.copy() + exp.loc[["A", "C"], :] = 20 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.loc[lambda x: ["A", "C"], lambda x: "X"] = -1 + exp = df.copy() + exp.loc[["A", "C"], "X"] = -1 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.loc[lambda x: ["A", "C"], lambda x: ["X"]] = [5, 10] + exp = df.copy() + exp.loc[["A", "C"], ["X"]] = [5, 10] + tm.assert_frame_equal(res, exp) + + # mixture + res = df.copy() + res.loc[["A", "C"], lambda x: "X"] = np.array([-1, -2]) + exp = df.copy() + exp.loc[["A", "C"], "X"] = np.array([-1, -2]) + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.loc[["A", "C"], lambda x: ["X"]] = 10 + exp = df.copy() + exp.loc[["A", "C"], ["X"]] = 10 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.loc[lambda x: ["A", "C"], "X"] = -2 + exp = df.copy() + exp.loc[["A", "C"], "X"] = -2 + tm.assert_frame_equal(res, exp) + + res = df.copy() + res.loc[lambda x: ["A", "C"], ["X"]] = -4 + exp = df.copy() + exp.loc[["A", "C"], ["X"]] = -4 + tm.assert_frame_equal(res, exp) + + +class TestPartialStringSlicing: + def test_loc_getitem_partial_string_slicing_datetimeindex(self): + # GH#35509 + df = DataFrame( + {"col1": ["a", "b", "c"], "col2": [1, 2, 3]}, + index=to_datetime(["2020-08-01", "2020-07-02", "2020-08-05"]), + ) + expected = DataFrame( + {"col1": ["a", "c"], "col2": [1, 3]}, + index=to_datetime(["2020-08-01", "2020-08-05"]), + ) + result = df.loc["2020-08"] + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_partial_string_slicing_with_periodindex(self): + pi = pd.period_range(start="2017-01-01", end="2018-01-01", freq="M") + ser = pi.to_series() + result = ser.loc[:"2017-12"] + expected = ser.iloc[:-1] + + tm.assert_series_equal(result, expected) + + def test_loc_getitem_partial_string_slicing_with_timedeltaindex(self): + ix = timedelta_range(start="1 day", end="2 days", freq="1H") + ser = ix.to_series() + result = ser.loc[:"1 days"] + expected = ser.iloc[:-1] + + tm.assert_series_equal(result, expected) + + def test_loc_getitem_str_timedeltaindex(self): + # GH#16896 + df = DataFrame({"x": range(3)}, index=to_timedelta(range(3), unit="days")) + expected = df.iloc[0] + sliced = df.loc["0 days"] + tm.assert_series_equal(sliced, expected) + + @pytest.mark.parametrize("indexer_end", [None, "2020-01-02 23:59:59.999999999"]) + def test_loc_getitem_partial_slice_non_monotonicity( + self, tz_aware_fixture, indexer_end, frame_or_series + ): + # GH#33146 + obj = frame_or_series( + [1] * 5, + index=DatetimeIndex( + [ + Timestamp("2019-12-30"), + Timestamp("2020-01-01"), + Timestamp("2019-12-25"), + Timestamp("2020-01-02 23:59:59.999999999"), + Timestamp("2019-12-19"), + ], + tz=tz_aware_fixture, + ), + ) + expected = frame_or_series( + [1] * 2, + index=DatetimeIndex( + [ + Timestamp("2020-01-01"), + Timestamp("2020-01-02 23:59:59.999999999"), + ], + tz=tz_aware_fixture, + ), + ) + indexer = slice("2020-01-01", indexer_end) + + result = obj[indexer] + tm.assert_equal(result, expected) + + result = obj.loc[indexer] + tm.assert_equal(result, expected) + + +class TestLabelSlicing: + def test_loc_getitem_slicing_datetimes_frame(self): + # GH#7523 + + # unique + df_unique = DataFrame( + np.arange(4.0, dtype="float64"), + index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]], + ) + + # duplicates + df_dups = DataFrame( + np.arange(5.0, dtype="float64"), + index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]], + ) + + for df in [df_unique, df_dups]: + result = df.loc[datetime(2001, 1, 1, 10) :] + tm.assert_frame_equal(result, df) + result = df.loc[: datetime(2001, 1, 4, 10)] + tm.assert_frame_equal(result, df) + result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)] + tm.assert_frame_equal(result, df) + + result = df.loc[datetime(2001, 1, 1, 11) :] + expected = df.iloc[1:] + tm.assert_frame_equal(result, expected) + result = df.loc["20010101 11":] + tm.assert_frame_equal(result, expected) + + def test_loc_getitem_label_slice_across_dst(self): + # GH#21846 + idx = date_range( + "2017-10-29 01:30:00", tz="Europe/Berlin", periods=5, freq="30 min" + ) + series2 = Series([0, 1, 2, 3, 4], index=idx) + + t_1 = Timestamp("2017-10-29 02:30:00+02:00", tz="Europe/Berlin") + t_2 = Timestamp("2017-10-29 02:00:00+01:00", tz="Europe/Berlin") + result = series2.loc[t_1:t_2] + expected = Series([2, 3], index=idx[2:4]) + tm.assert_series_equal(result, expected) + + result = series2[t_1] + expected = 2 + assert result == expected + + @pytest.mark.parametrize( + "index", + [ + pd.period_range(start="2017-01-01", end="2018-01-01", freq="M"), + timedelta_range(start="1 day", end="2 days", freq="1H"), + ], + ) + def test_loc_getitem_label_slice_period_timedelta(self, index): + ser = index.to_series() + result = ser.loc[: index[-2]] + expected = ser.iloc[:-1] + + tm.assert_series_equal(result, expected) + + def test_loc_getitem_slice_floats_inexact(self): + index = [52195.504153, 52196.303147, 52198.369883] + df = DataFrame(np.random.default_rng(2).random((3, 2)), index=index) + + s1 = df.loc[52195.1:52196.5] + assert len(s1) == 2 + + s1 = df.loc[52195.1:52196.6] + assert len(s1) == 2 + + s1 = df.loc[52195.1:52198.9] + assert len(s1) == 3 + + def test_loc_getitem_float_slice_floatindex(self, float_numpy_dtype): + dtype = float_numpy_dtype + ser = Series( + np.random.default_rng(2).random(10), index=np.arange(10, 20, dtype=dtype) + ) + + assert len(ser.loc[12.0:]) == 8 + assert len(ser.loc[12.5:]) == 7 + + idx = np.arange(10, 20, dtype=dtype) + idx[2] = 12.2 + ser.index = idx + assert len(ser.loc[12.0:]) == 8 + assert len(ser.loc[12.5:]) == 7 + + @pytest.mark.parametrize( + "start,stop, expected_slice", + [ + [np.timedelta64(0, "ns"), None, slice(0, 11)], + [np.timedelta64(1, "D"), np.timedelta64(6, "D"), slice(1, 7)], + [None, np.timedelta64(4, "D"), slice(0, 5)], + ], + ) + def test_loc_getitem_slice_label_td64obj(self, start, stop, expected_slice): + # GH#20393 + ser = Series(range(11), timedelta_range("0 days", "10 days")) + result = ser.loc[slice(start, stop)] + expected = ser.iloc[expected_slice] + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("start", ["2018", "2020"]) + def test_loc_getitem_slice_unordered_dt_index(self, frame_or_series, start): + obj = frame_or_series( + [1, 2, 3], + index=[Timestamp("2016"), Timestamp("2019"), Timestamp("2017")], + ) + with pytest.raises( + KeyError, match="Value based partial slicing on non-monotonic" + ): + obj.loc[start:"2022"] + + @pytest.mark.parametrize("value", [1, 1.5]) + def test_loc_getitem_slice_labels_int_in_object_index(self, frame_or_series, value): + # GH: 26491 + obj = frame_or_series(range(4), index=[value, "first", 2, "third"]) + result = obj.loc[value:"third"] + expected = frame_or_series(range(4), index=[value, "first", 2, "third"]) + tm.assert_equal(result, expected) + + def test_loc_getitem_slice_columns_mixed_dtype(self): + # GH: 20975 + df = DataFrame({"test": 1, 1: 2, 2: 3}, index=[0]) + expected = DataFrame( + data=[[2, 3]], index=[0], columns=Index([1, 2], dtype=object) + ) + tm.assert_frame_equal(df.loc[:, 1:], expected) + + +class TestLocBooleanLabelsAndSlices: + @pytest.mark.parametrize("bool_value", [True, False]) + def test_loc_bool_incompatible_index_raises( + self, index, frame_or_series, bool_value + ): + # GH20432 + message = f"{bool_value}: boolean label can not be used without a boolean index" + if index.inferred_type != "boolean": + obj = frame_or_series(index=index, dtype="object") + with pytest.raises(KeyError, match=message): + obj.loc[bool_value] + + @pytest.mark.parametrize("bool_value", [True, False]) + def test_loc_bool_should_not_raise(self, frame_or_series, bool_value): + obj = frame_or_series( + index=Index([True, False], dtype="boolean"), dtype="object" + ) + obj.loc[bool_value] + + def test_loc_bool_slice_raises(self, index, frame_or_series): + # GH20432 + message = ( + r"slice\(True, False, None\): boolean values can not be used in a slice" + ) + obj = frame_or_series(index=index, dtype="object") + with pytest.raises(TypeError, match=message): + obj.loc[True:False] + + +class TestLocBooleanMask: + def test_loc_setitem_bool_mask_timedeltaindex(self): + # GH#14946 + df = DataFrame({"x": range(10)}) + df.index = to_timedelta(range(10), unit="s") + conditions = [df["x"] > 3, df["x"] == 3, df["x"] < 3] + expected_data = [ + [0, 1, 2, 3, 10, 10, 10, 10, 10, 10], + [0, 1, 2, 10, 4, 5, 6, 7, 8, 9], + [10, 10, 10, 3, 4, 5, 6, 7, 8, 9], + ] + for cond, data in zip(conditions, expected_data): + result = df.copy() + result.loc[cond, "x"] = 10 + + expected = DataFrame( + data, + index=to_timedelta(range(10), unit="s"), + columns=["x"], + dtype="int64", + ) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_loc_setitem_mask_with_datetimeindex_tz(self, tz): + # GH#16889 + # support .loc with alignment and tz-aware DatetimeIndex + mask = np.array([True, False, True, False]) + + idx = date_range("20010101", periods=4, tz=tz) + df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64") + + result = df.copy() + result.loc[mask, :] = df.loc[mask, :] + tm.assert_frame_equal(result, df) + + result = df.copy() + result.loc[mask] = df.loc[mask] + tm.assert_frame_equal(result, df) + + def test_loc_setitem_mask_and_label_with_datetimeindex(self): + # GH#9478 + # a datetimeindex alignment issue with partial setting + df = DataFrame( + np.arange(6.0).reshape(3, 2), + columns=list("AB"), + index=date_range("1/1/2000", periods=3, freq="1H"), + ) + expected = df.copy() + expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT] + + mask = df.A < 1 + df.loc[mask, "C"] = df.loc[mask].index + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_mask_td64_series_value(self): + # GH#23462 key list of bools, value is a Series + td1 = Timedelta(0) + td2 = Timedelta(28767471428571405) + df = DataFrame({"col": Series([td1, td2])}) + df_copy = df.copy() + ser = Series([td1]) + + expected = df["col"].iloc[1]._value + df.loc[[True, False]] = ser + result = df["col"].iloc[1]._value + + assert expected == result + tm.assert_frame_equal(df, df_copy) + + @td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values + def test_loc_setitem_boolean_and_column(self, float_frame): + expected = float_frame.copy() + mask = float_frame["A"] > 0 + + float_frame.loc[mask, "B"] = 0 + + values = expected.values.copy() + values[mask.values, 1] = 0 + expected = DataFrame(values, index=expected.index, columns=expected.columns) + tm.assert_frame_equal(float_frame, expected) + + def test_loc_setitem_ndframe_values_alignment(self, using_copy_on_write): + # GH#45501 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df.loc[[False, False, True], ["a"]] = DataFrame( + {"a": [10, 20, 30]}, index=[2, 1, 0] + ) + + expected = DataFrame({"a": [1, 2, 10], "b": [4, 5, 6]}) + tm.assert_frame_equal(df, expected) + + # same thing with Series RHS + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df.loc[[False, False, True], ["a"]] = Series([10, 11, 12], index=[2, 1, 0]) + tm.assert_frame_equal(df, expected) + + # same thing but setting "a" instead of ["a"] + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df.loc[[False, False, True], "a"] = Series([10, 11, 12], index=[2, 1, 0]) + tm.assert_frame_equal(df, expected) + + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + ser = df["a"] + ser.loc[[False, False, True]] = Series([10, 11, 12], index=[2, 1, 0]) + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + tm.assert_frame_equal(df, expected) + + def test_loc_indexer_empty_broadcast(self): + # GH#51450 + df = DataFrame({"a": [], "b": []}, dtype=object) + expected = df.copy() + df.loc[np.array([], dtype=np.bool_), ["a"]] = df["a"] + tm.assert_frame_equal(df, expected) + + def test_loc_indexer_all_false_broadcast(self): + # GH#51450 + df = DataFrame({"a": ["x"], "b": ["y"]}, dtype=object) + expected = df.copy() + df.loc[np.array([False], dtype=np.bool_), ["a"]] = df["b"] + tm.assert_frame_equal(df, expected) + + def test_loc_indexer_length_one(self): + # GH#51435 + df = DataFrame({"a": ["x"], "b": ["y"]}, dtype=object) + expected = DataFrame({"a": ["y"], "b": ["y"]}, dtype=object) + df.loc[np.array([True], dtype=np.bool_), ["a"]] = df["b"] + tm.assert_frame_equal(df, expected) + + +class TestLocListlike: + @pytest.mark.parametrize("box", [lambda x: x, np.asarray, list]) + def test_loc_getitem_list_of_labels_categoricalindex_with_na(self, box): + # passing a list can include valid categories _or_ NA values + ci = CategoricalIndex(["A", "B", np.nan]) + ser = Series(range(3), index=ci) + + result = ser.loc[box(ci)] + tm.assert_series_equal(result, ser) + + result = ser[box(ci)] + tm.assert_series_equal(result, ser) + + result = ser.to_frame().loc[box(ci)] + tm.assert_frame_equal(result, ser.to_frame()) + + ser2 = ser[:-1] + ci2 = ci[1:] + # but if there are no NAs present, this should raise KeyError + msg = "not in index" + with pytest.raises(KeyError, match=msg): + ser2.loc[box(ci2)] + + with pytest.raises(KeyError, match=msg): + ser2[box(ci2)] + + with pytest.raises(KeyError, match=msg): + ser2.to_frame().loc[box(ci2)] + + def test_loc_getitem_series_label_list_missing_values(self): + # gh-11428 + key = np.array( + ["2001-01-04", "2001-01-02", "2001-01-04", "2001-01-14"], dtype="datetime64" + ) + ser = Series([2, 5, 8, 11], date_range("2001-01-01", freq="D", periods=4)) + with pytest.raises(KeyError, match="not in index"): + ser.loc[key] + + def test_loc_getitem_series_label_list_missing_integer_values(self): + # GH: 25927 + ser = Series( + index=np.array([9730701000001104, 10049011000001109]), + data=np.array([999000011000001104, 999000011000001104]), + ) + with pytest.raises(KeyError, match="not in index"): + ser.loc[np.array([9730701000001104, 10047311000001102])] + + @pytest.mark.parametrize("to_period", [True, False]) + def test_loc_getitem_listlike_of_datetimelike_keys(self, to_period): + # GH#11497 + + idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx") + if to_period: + idx = idx.to_period("D") + ser = Series([0.1, 0.2], index=idx, name="s") + + keys = [Timestamp("2011-01-01"), Timestamp("2011-01-02")] + if to_period: + keys = [x.to_period("D") for x in keys] + result = ser.loc[keys] + exp = Series([0.1, 0.2], index=idx, name="s") + if not to_period: + exp.index = exp.index._with_freq(None) + tm.assert_series_equal(result, exp, check_index_type=True) + + keys = [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02"), + Timestamp("2011-01-01"), + ] + if to_period: + keys = [x.to_period("D") for x in keys] + exp = Series( + [0.2, 0.2, 0.1], index=Index(keys, name="idx", dtype=idx.dtype), name="s" + ) + result = ser.loc[keys] + tm.assert_series_equal(result, exp, check_index_type=True) + + keys = [ + Timestamp("2011-01-03"), + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), + ] + if to_period: + keys = [x.to_period("D") for x in keys] + + with pytest.raises(KeyError, match="not in index"): + ser.loc[keys] + + def test_loc_named_index(self): + # GH 42790 + df = DataFrame( + [[1, 2], [4, 5], [7, 8]], + index=["cobra", "viper", "sidewinder"], + columns=["max_speed", "shield"], + ) + expected = df.iloc[:2] + expected.index.name = "foo" + result = df.loc[Index(["cobra", "viper"], name="foo")] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "columns, column_key, expected_columns", + [ + ([2011, 2012, 2013], [2011, 2012], [0, 1]), + ([2011, 2012, "All"], [2011, 2012], [0, 1]), + ([2011, 2012, "All"], [2011, "All"], [0, 2]), + ], +) +def test_loc_getitem_label_list_integer_labels(columns, column_key, expected_columns): + # gh-14836 + df = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=columns, index=list("ABC") + ) + expected = df.iloc[:, expected_columns] + result = df.loc[["A", "B", "C"], column_key] + + tm.assert_frame_equal(result, expected, check_column_type=True) + + +def test_loc_setitem_float_intindex(): + # GH 8720 + rand_data = np.random.default_rng(2).standard_normal((8, 4)) + result = DataFrame(rand_data) + result.loc[:, 0.5] = np.nan + expected_data = np.hstack((rand_data, np.array([np.nan] * 8).reshape(8, 1))) + expected = DataFrame(expected_data, columns=[0.0, 1.0, 2.0, 3.0, 0.5]) + tm.assert_frame_equal(result, expected) + + result = DataFrame(rand_data) + result.loc[:, 0.5] = np.nan + tm.assert_frame_equal(result, expected) + + +def test_loc_axis_1_slice(): + # GH 10586 + cols = [(yr, m) for yr in [2014, 2015] for m in [7, 8, 9, 10]] + df = DataFrame( + np.ones((10, 8)), + index=tuple("ABCDEFGHIJ"), + columns=MultiIndex.from_tuples(cols), + ) + result = df.loc(axis=1)[(2014, 9):(2015, 8)] + expected = DataFrame( + np.ones((10, 4)), + index=tuple("ABCDEFGHIJ"), + columns=MultiIndex.from_tuples([(2014, 9), (2014, 10), (2015, 7), (2015, 8)]), + ) + tm.assert_frame_equal(result, expected) + + +def test_loc_set_dataframe_multiindex(): + # GH 14592 + expected = DataFrame( + "a", index=range(2), columns=MultiIndex.from_product([range(2), range(2)]) + ) + result = expected.copy() + result.loc[0, [(0, 1)]] = result.loc[0, [(0, 1)]] + tm.assert_frame_equal(result, expected) + + +def test_loc_mixed_int_float(): + # GH#19456 + ser = Series(range(2), Index([1, 2.0], dtype=object)) + + result = ser.loc[1] + assert result == 0 + + +def test_loc_with_positional_slice_raises(): + # GH#31840 + ser = Series(range(4), index=["A", "B", "C", "D"]) + + with pytest.raises(TypeError, match="Slicing a positional slice with .loc"): + ser.loc[:3] = 2 + + +def test_loc_slice_disallows_positional(): + # GH#16121, GH#24612, GH#31810 + dti = date_range("2016-01-01", periods=3) + df = DataFrame(np.random.default_rng(2).random((3, 2)), index=dti) + + ser = df[0] + + msg = ( + "cannot do slice indexing on DatetimeIndex with these " + r"indexers \[1\] of type int" + ) + + for obj in [df, ser]: + with pytest.raises(TypeError, match=msg): + obj.loc[1:3] + + with pytest.raises(TypeError, match="Slicing a positional slice with .loc"): + # GH#31840 enforce incorrect behavior + obj.loc[1:3] = 1 + + with pytest.raises(TypeError, match=msg): + df.loc[1:3, 1] + + with pytest.raises(TypeError, match="Slicing a positional slice with .loc"): + # GH#31840 enforce incorrect behavior + df.loc[1:3, 1] = 2 + + +def test_loc_datetimelike_mismatched_dtypes(): + # GH#32650 dont mix and match datetime/timedelta/period dtypes + + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), + columns=["a", "b", "c"], + index=date_range("2012", freq="H", periods=5), + ) + # create dataframe with non-unique DatetimeIndex + df = df.iloc[[0, 2, 2, 3]].copy() + + dti = df.index + tdi = pd.TimedeltaIndex(dti.asi8) # matching i8 values + + msg = r"None of \[TimedeltaIndex.* are in the \[index\]" + with pytest.raises(KeyError, match=msg): + df.loc[tdi] + + with pytest.raises(KeyError, match=msg): + df["a"].loc[tdi] + + +def test_loc_with_period_index_indexer(): + # GH#4125 + idx = pd.period_range("2002-01", "2003-12", freq="M") + df = DataFrame(np.random.default_rng(2).standard_normal((24, 10)), index=idx) + tm.assert_frame_equal(df, df.loc[idx]) + tm.assert_frame_equal(df, df.loc[list(idx)]) + tm.assert_frame_equal(df, df.loc[list(idx)]) + tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]]) + tm.assert_frame_equal(df, df.loc[list(idx)]) + + +def test_loc_setitem_multiindex_timestamp(): + # GH#13831 + vals = np.random.default_rng(2).standard_normal((8, 6)) + idx = date_range("1/1/2000", periods=8) + cols = ["A", "B", "C", "D", "E", "F"] + exp = DataFrame(vals, index=idx, columns=cols) + exp.loc[exp.index[1], ("A", "B")] = np.nan + vals[1][0:2] = np.nan + res = DataFrame(vals, index=idx, columns=cols) + tm.assert_frame_equal(res, exp) + + +def test_loc_getitem_multiindex_tuple_level(): + # GH#27591 + lev1 = ["a", "b", "c"] + lev2 = [(0, 1), (1, 0)] + lev3 = [0, 1] + cols = MultiIndex.from_product([lev1, lev2, lev3], names=["x", "y", "z"]) + df = DataFrame(6, index=range(5), columns=cols) + + # the lev2[0] here should be treated as a single label, not as a sequence + # of labels + result = df.loc[:, (lev1[0], lev2[0], lev3[0])] + + # TODO: i think this actually should drop levels + expected = df.iloc[:, :1] + tm.assert_frame_equal(result, expected) + + alt = df.xs((lev1[0], lev2[0], lev3[0]), level=[0, 1, 2], axis=1) + tm.assert_frame_equal(alt, expected) + + # same thing on a Series + ser = df.iloc[0] + expected2 = ser.iloc[:1] + + alt2 = ser.xs((lev1[0], lev2[0], lev3[0]), level=[0, 1, 2], axis=0) + tm.assert_series_equal(alt2, expected2) + + result2 = ser.loc[lev1[0], lev2[0], lev3[0]] + assert result2 == 6 + + +def test_loc_getitem_nullable_index_with_duplicates(): + # GH#34497 + df = DataFrame( + data=np.array([[1, 2, 3, 4], [5, 6, 7, 8], [1, 2, np.nan, np.nan]]).T, + columns=["a", "b", "c"], + dtype="Int64", + ) + df2 = df.set_index("c") + assert df2.index.dtype == "Int64" + + res = df2.loc[1] + expected = Series([1, 5], index=df2.columns, dtype="Int64", name=1) + tm.assert_series_equal(res, expected) + + # pd.NA and duplicates in an object-dtype Index + df2.index = df2.index.astype(object) + res = df2.loc[1] + tm.assert_series_equal(res, expected) + + +@pytest.mark.parametrize("value", [300, np.uint16(300), np.int16(300)]) +def test_loc_setitem_uint8_upcast(value): + # GH#26049 + + df = DataFrame([1, 2, 3, 4], columns=["col1"], dtype="uint8") + with tm.assert_produces_warning(FutureWarning, match="item of incompatible dtype"): + df.loc[2, "col1"] = value # value that can't be held in uint8 + + expected = DataFrame([1, 2, 300, 4], columns=["col1"], dtype="uint16") + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "fill_val,exp_dtype", + [ + (Timestamp("2022-01-06"), "datetime64[ns]"), + (Timestamp("2022-01-07", tz="US/Eastern"), "datetime64[ns, US/Eastern]"), + ], +) +def test_loc_setitem_using_datetimelike_str_as_index(fill_val, exp_dtype): + data = ["2022-01-02", "2022-01-03", "2022-01-04", fill_val.date()] + index = DatetimeIndex(data, tz=fill_val.tz, dtype=exp_dtype) + df = DataFrame([10, 11, 12, 14], columns=["a"], index=index) + # adding new row using an unexisting datetime-like str index + df.loc["2022-01-08", "a"] = 13 + + data.append("2022-01-08") + expected_index = DatetimeIndex(data, dtype=exp_dtype) + tm.assert_index_equal(df.index, expected_index, exact=True) + + +def test_loc_set_int_dtype(): + # GH#23326 + df = DataFrame([list("abc")]) + df.loc[:, "col1"] = 5 + + expected = DataFrame({0: ["a"], 1: ["b"], 2: ["c"], "col1": [5]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning") +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_loc_periodindex_3_levels(): + # GH#24091 + p_index = PeriodIndex( + ["20181101 1100", "20181101 1200", "20181102 1300", "20181102 1400"], + name="datetime", + freq="B", + ) + mi_series = DataFrame( + [["A", "B", 1.0], ["A", "C", 2.0], ["Z", "Q", 3.0], ["W", "F", 4.0]], + index=p_index, + columns=["ONE", "TWO", "VALUES"], + ) + mi_series = mi_series.set_index(["ONE", "TWO"], append=True)["VALUES"] + assert mi_series.loc[(p_index[0], "A", "B")] == 1.0 + + +def test_loc_setitem_pyarrow_strings(): + # GH#52319 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "strings": Series(["A", "B", "C"], dtype="string[pyarrow]"), + "ids": Series([True, True, False]), + } + ) + new_value = Series(["X", "Y"]) + df.loc[df.ids, "strings"] = new_value + + expected_df = DataFrame( + { + "strings": Series(["X", "Y", "C"], dtype="string[pyarrow]"), + "ids": Series([True, True, False]), + } + ) + + tm.assert_frame_equal(df, expected_df) + + +class TestLocSeries: + @pytest.mark.parametrize("val,expected", [(2**63 - 1, 3), (2**63, 4)]) + def test_loc_uint64(self, val, expected): + # see GH#19399 + ser = Series({2**63 - 1: 3, 2**63: 4}) + assert ser.loc[val] == expected + + def test_loc_getitem(self, string_series, datetime_series): + inds = string_series.index[[3, 4, 7]] + tm.assert_series_equal(string_series.loc[inds], string_series.reindex(inds)) + tm.assert_series_equal(string_series.iloc[5::2], string_series[5::2]) + + # slice with indices + d1, d2 = datetime_series.index[[5, 15]] + result = datetime_series.loc[d1:d2] + expected = datetime_series.truncate(d1, d2) + tm.assert_series_equal(result, expected) + + # boolean + mask = string_series > string_series.median() + tm.assert_series_equal(string_series.loc[mask], string_series[mask]) + + # ask for index value + assert datetime_series.loc[d1] == datetime_series[d1] + assert datetime_series.loc[d2] == datetime_series[d2] + + def test_loc_getitem_not_monotonic(self, datetime_series): + d1, d2 = datetime_series.index[[5, 15]] + + ts2 = datetime_series[::2].iloc[[1, 2, 0]] + + msg = r"Timestamp\('2000-01-10 00:00:00'\)" + with pytest.raises(KeyError, match=msg): + ts2.loc[d1:d2] + with pytest.raises(KeyError, match=msg): + ts2.loc[d1:d2] = 0 + + def test_loc_getitem_setitem_integer_slice_keyerrors(self): + ser = Series( + np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2)) + ) + + # this is OK + cp = ser.copy() + cp.iloc[4:10] = 0 + assert (cp.iloc[4:10] == 0).all() + + # so is this + cp = ser.copy() + cp.iloc[3:11] = 0 + assert (cp.iloc[3:11] == 0).values.all() + + result = ser.iloc[2:6] + result2 = ser.loc[3:11] + expected = ser.reindex([4, 6, 8, 10]) + + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + # non-monotonic, raise KeyError + s2 = ser.iloc[list(range(5)) + list(range(9, 4, -1))] + with pytest.raises(KeyError, match=r"^3$"): + s2.loc[3:11] + with pytest.raises(KeyError, match=r"^3$"): + s2.loc[3:11] = 0 + + def test_loc_getitem_iterator(self, string_series): + idx = iter(string_series.index[:10]) + result = string_series.loc[idx] + tm.assert_series_equal(result, string_series[:10]) + + def test_loc_setitem_boolean(self, string_series): + mask = string_series > string_series.median() + + result = string_series.copy() + result.loc[mask] = 0 + expected = string_series + expected[mask] = 0 + tm.assert_series_equal(result, expected) + + def test_loc_setitem_corner(self, string_series): + inds = list(string_series.index[[5, 8, 12]]) + string_series.loc[inds] = 5 + msg = r"\['foo'\] not in index" + with pytest.raises(KeyError, match=msg): + string_series.loc[inds + ["foo"]] = 5 + + def test_basic_setitem_with_labels(self, datetime_series): + indices = datetime_series.index[[5, 10, 15]] + + cp = datetime_series.copy() + exp = datetime_series.copy() + cp[indices] = 0 + exp.loc[indices] = 0 + tm.assert_series_equal(cp, exp) + + cp = datetime_series.copy() + exp = datetime_series.copy() + cp[indices[0] : indices[2]] = 0 + exp.loc[indices[0] : indices[2]] = 0 + tm.assert_series_equal(cp, exp) + + def test_loc_setitem_listlike_of_ints(self): + # integer indexes, be careful + ser = Series( + np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2)) + ) + inds = [0, 4, 6] + arr_inds = np.array([0, 4, 6]) + + cp = ser.copy() + exp = ser.copy() + ser[inds] = 0 + ser.loc[inds] = 0 + tm.assert_series_equal(cp, exp) + + cp = ser.copy() + exp = ser.copy() + ser[arr_inds] = 0 + ser.loc[arr_inds] = 0 + tm.assert_series_equal(cp, exp) + + inds_notfound = [0, 4, 5, 6] + arr_inds_notfound = np.array([0, 4, 5, 6]) + msg = r"\[5\] not in index" + with pytest.raises(KeyError, match=msg): + ser[inds_notfound] = 0 + with pytest.raises(Exception, match=msg): + ser[arr_inds_notfound] = 0 + + def test_loc_setitem_dt64tz_values(self): + # GH#12089 + ser = Series( + date_range("2011-01-01", periods=3, tz="US/Eastern"), + index=["a", "b", "c"], + ) + s2 = ser.copy() + expected = Timestamp("2011-01-03", tz="US/Eastern") + s2.loc["a"] = expected + result = s2.loc["a"] + assert result == expected + + s2 = ser.copy() + s2.iloc[0] = expected + result = s2.iloc[0] + assert result == expected + + s2 = ser.copy() + s2["a"] = expected + result = s2["a"] + assert result == expected + + @pytest.mark.parametrize("array_fn", [np.array, pd.array, list, tuple]) + @pytest.mark.parametrize("size", [0, 4, 5, 6]) + def test_loc_iloc_setitem_with_listlike(self, size, array_fn): + # GH37748 + # testing insertion, in a Series of size N (here 5), of a listlike object + # of size 0, N-1, N, N+1 + + arr = array_fn([0] * size) + expected = Series([arr, 0, 0, 0, 0], index=list("abcde"), dtype=object) + + ser = Series(0, index=list("abcde"), dtype=object) + ser.loc["a"] = arr + tm.assert_series_equal(ser, expected) + + ser = Series(0, index=list("abcde"), dtype=object) + ser.iloc[0] = arr + tm.assert_series_equal(ser, expected) + + @pytest.mark.parametrize("indexer", [IndexSlice["A", :], ("A", slice(None))]) + def test_loc_series_getitem_too_many_dimensions(self, indexer): + # GH#35349 + ser = Series( + index=MultiIndex.from_tuples([("A", "0"), ("A", "1"), ("B", "0")]), + data=[21, 22, 23], + ) + msg = "Too many indexers" + with pytest.raises(IndexingError, match=msg): + ser.loc[indexer, :] + + with pytest.raises(IndexingError, match=msg): + ser.loc[indexer, :] = 1 + + def test_loc_setitem(self, string_series): + inds = string_series.index[[3, 4, 7]] + + result = string_series.copy() + result.loc[inds] = 5 + + expected = string_series.copy() + expected.iloc[[3, 4, 7]] = 5 + tm.assert_series_equal(result, expected) + + result.iloc[5:10] = 10 + expected[5:10] = 10 + tm.assert_series_equal(result, expected) + + # set slice with indices + d1, d2 = string_series.index[[5, 15]] + result.loc[d1:d2] = 6 + expected[5:16] = 6 # because it's inclusive + tm.assert_series_equal(result, expected) + + # set index value + string_series.loc[d1] = 4 + string_series.loc[d2] = 6 + assert string_series[d1] == 4 + assert string_series[d2] == 6 + + @pytest.mark.parametrize("dtype", ["object", "string"]) + def test_loc_assign_dict_to_row(self, dtype): + # GH41044 + df = DataFrame({"A": ["abc", "def"], "B": ["ghi", "jkl"]}, dtype=dtype) + df.loc[0, :] = {"A": "newA", "B": "newB"} + + expected = DataFrame({"A": ["newA", "def"], "B": ["newB", "jkl"]}, dtype=dtype) + + tm.assert_frame_equal(df, expected) + + @td.skip_array_manager_invalid_test + def test_loc_setitem_dict_timedelta_multiple_set(self): + # GH 16309 + result = DataFrame(columns=["time", "value"]) + result.loc[1] = {"time": Timedelta(6, unit="s"), "value": "foo"} + result.loc[1] = {"time": Timedelta(6, unit="s"), "value": "foo"} + expected = DataFrame( + [[Timedelta(6, unit="s"), "foo"]], columns=["time", "value"], index=[1] + ) + tm.assert_frame_equal(result, expected) + + def test_loc_set_multiple_items_in_multiple_new_columns(self): + # GH 25594 + df = DataFrame(index=[1, 2], columns=["a"]) + df.loc[1, ["b", "c"]] = [6, 7] + + expected = DataFrame( + { + "a": Series([np.nan, np.nan], dtype="object"), + "b": [6, np.nan], + "c": [7, np.nan], + }, + index=[1, 2], + ) + + tm.assert_frame_equal(df, expected) + + def test_getitem_loc_str_periodindex(self): + # GH#33964 + msg = "Period with BDay freq is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + index = pd.period_range(start="2000", periods=20, freq="B") + series = Series(range(20), index=index) + assert series.loc["2000-01-14"] == 9 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_na_indexing.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_na_indexing.py new file mode 100644 index 00000000..5364cfe8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_na_indexing.py @@ -0,0 +1,75 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "values, dtype", + [ + ([], "object"), + ([1, 2, 3], "int64"), + ([1.0, 2.0, 3.0], "float64"), + (["a", "b", "c"], "object"), + (["a", "b", "c"], "string"), + ([1, 2, 3], "datetime64[ns]"), + ([1, 2, 3], "datetime64[ns, CET]"), + ([1, 2, 3], "timedelta64[ns]"), + (["2000", "2001", "2002"], "Period[D]"), + ([1, 0, 3], "Sparse"), + ([pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(3, 4)], "interval"), + ], +) +@pytest.mark.parametrize( + "mask", [[True, False, False], [True, True, True], [False, False, False]] +) +@pytest.mark.parametrize("indexer_class", [list, pd.array, pd.Index, pd.Series]) +@pytest.mark.parametrize("frame", [True, False]) +def test_series_mask_boolean(values, dtype, mask, indexer_class, frame): + # In case len(values) < 3 + index = ["a", "b", "c"][: len(values)] + mask = mask[: len(values)] + + obj = pd.Series(values, dtype=dtype, index=index) + if frame: + if len(values) == 0: + # Otherwise obj is an empty DataFrame with shape (0, 1) + obj = pd.DataFrame(dtype=dtype, index=index) + else: + obj = obj.to_frame() + + if indexer_class is pd.array: + mask = pd.array(mask, dtype="boolean") + elif indexer_class is pd.Series: + mask = pd.Series(mask, index=obj.index, dtype="boolean") + else: + mask = indexer_class(mask) + + expected = obj[mask] + + result = obj[mask] + tm.assert_equal(result, expected) + + if indexer_class is pd.Series: + msg = "iLocation based boolean indexing cannot use an indexable as a mask" + with pytest.raises(ValueError, match=msg): + result = obj.iloc[mask] + tm.assert_equal(result, expected) + else: + result = obj.iloc[mask] + tm.assert_equal(result, expected) + + result = obj.loc[mask] + tm.assert_equal(result, expected) + + +def test_na_treated_as_false(frame_or_series, indexer_sli): + # https://github.com/pandas-dev/pandas/issues/31503 + obj = frame_or_series([1, 2, 3]) + + mask = pd.array([True, False, None], dtype="boolean") + + result = indexer_sli(obj)[mask] + expected = indexer_sli(obj)[mask.fillna(False)] + + tm.assert_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_partial.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_partial.py new file mode 100644 index 00000000..8f499644 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_partial.py @@ -0,0 +1,679 @@ +""" +test setting *parts* of objects both positionally and label based + +TODO: these should be split among the indexer tests +""" + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Period, + Series, + Timestamp, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestEmptyFrameSetitemExpansion: + def test_empty_frame_setitem_index_name_retained(self): + # GH#31368 empty frame has non-None index.name -> retained + df = DataFrame({}, index=pd.RangeIndex(0, name="df_index")) + series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) + + df["series"] = series + expected = DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") + ) + + tm.assert_frame_equal(df, expected) + + def test_empty_frame_setitem_index_name_inherited(self): + # GH#36527 empty frame has None index.name -> not retained + df = DataFrame() + series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) + df["series"] = series + expected = DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index") + ) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_zerolen_series_columns_align(self): + # columns will align + df = DataFrame(columns=["A", "B"]) + df.loc[0] = Series(1, index=range(4)) + expected = DataFrame(columns=["A", "B"], index=[0], dtype=np.float64) + tm.assert_frame_equal(df, expected) + + # columns will align + df = DataFrame(columns=["A", "B"]) + df.loc[0] = Series(1, index=["B"]) + + exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64") + tm.assert_frame_equal(df, exp) + + def test_loc_setitem_zerolen_list_length_must_match_columns(self): + # list-like must conform + df = DataFrame(columns=["A", "B"]) + + msg = "cannot set a row with mismatched columns" + with pytest.raises(ValueError, match=msg): + df.loc[0] = [1, 2, 3] + + df = DataFrame(columns=["A", "B"]) + df.loc[3] = [6, 7] # length matches len(df.columns) --> OK! + + exp = DataFrame([[6, 7]], index=[3], columns=["A", "B"], dtype=np.int64) + tm.assert_frame_equal(df, exp) + + def test_partial_set_empty_frame(self): + # partially set with an empty object + # frame + df = DataFrame() + + msg = "cannot set a frame with no defined columns" + + with pytest.raises(ValueError, match=msg): + df.loc[1] = 1 + + with pytest.raises(ValueError, match=msg): + df.loc[1] = Series([1], index=["foo"]) + + msg = "cannot set a frame with no defined index and a scalar" + with pytest.raises(ValueError, match=msg): + df.loc[:, 1] = 1 + + def test_partial_set_empty_frame2(self): + # these work as they don't really change + # anything but the index + # GH#5632 + expected = DataFrame(columns=["foo"], index=Index([], dtype="object")) + + df = DataFrame(index=Index([], dtype="object")) + df["foo"] = Series([], dtype="object") + + tm.assert_frame_equal(df, expected) + + df = DataFrame(index=Index([])) + df["foo"] = Series(df.index) + + tm.assert_frame_equal(df, expected) + + df = DataFrame(index=Index([])) + df["foo"] = df.index + + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame3(self): + expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) + expected["foo"] = expected["foo"].astype("float64") + + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = [] + + tm.assert_frame_equal(df, expected) + + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = Series(np.arange(len(df)), dtype="float64") + + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame4(self): + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = range(len(df)) + + expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) + # range is int-dtype-like, so we get int64 dtype + expected["foo"] = expected["foo"].astype("int64") + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame5(self): + df = DataFrame() + tm.assert_index_equal(df.columns, pd.RangeIndex(0)) + df2 = DataFrame() + df2[1] = Series([1], index=["foo"]) + df.loc[:, 1] = Series([1], index=["foo"]) + tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1])) + tm.assert_frame_equal(df, df2) + + def test_partial_set_empty_frame_no_index(self): + # no index to start + expected = DataFrame({0: Series(1, index=range(4))}, columns=["A", "B", 0]) + + df = DataFrame(columns=["A", "B"]) + df[0] = Series(1, index=range(4)) + df.dtypes + str(df) + tm.assert_frame_equal(df, expected) + + df = DataFrame(columns=["A", "B"]) + df.loc[:, 0] = Series(1, index=range(4)) + df.dtypes + str(df) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_row(self): + # GH#5720, GH#5744 + # don't create rows when empty + expected = DataFrame(columns=["A", "B", "New"], index=Index([], dtype="int64")) + expected["A"] = expected["A"].astype("int64") + expected["B"] = expected["B"].astype("float64") + expected["New"] = expected["New"].astype("float64") + + df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) + y = df[df.A > 5] + y["New"] = np.nan + tm.assert_frame_equal(y, expected) + + expected = DataFrame(columns=["a", "b", "c c", "d"]) + expected["d"] = expected["d"].astype("int64") + df = DataFrame(columns=["a", "b", "c c"]) + df["d"] = 3 + tm.assert_frame_equal(df, expected) + tm.assert_series_equal(df["c c"], Series(name="c c", dtype=object)) + + # reindex columns is ok + df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) + y = df[df.A > 5] + result = y.reindex(columns=["A", "B", "C"]) + expected = DataFrame(columns=["A", "B", "C"]) + expected["A"] = expected["A"].astype("int64") + expected["B"] = expected["B"].astype("float64") + expected["C"] = expected["C"].astype("float64") + tm.assert_frame_equal(result, expected) + + def test_partial_set_empty_frame_set_series(self): + # GH#5756 + # setting with empty Series + df = DataFrame(Series(dtype=object)) + expected = DataFrame({0: Series(dtype=object)}) + tm.assert_frame_equal(df, expected) + + df = DataFrame(Series(name="foo", dtype=object)) + expected = DataFrame({"foo": Series(dtype=object)}) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_empty_copy_assignment(self): + # GH#5932 + # copy on empty with assignment fails + df = DataFrame(index=[0]) + df = df.copy() + df["a"] = 0 + expected = DataFrame(0, index=[0], columns=["a"]) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_empty_consistencies(self): + # GH#6171 + # consistency on empty frames + df = DataFrame(columns=["x", "y"]) + df["x"] = [1, 2] + expected = DataFrame({"x": [1, 2], "y": [np.nan, np.nan]}) + tm.assert_frame_equal(df, expected, check_dtype=False) + + df = DataFrame(columns=["x", "y"]) + df["x"] = ["1", "2"] + expected = DataFrame({"x": ["1", "2"], "y": [np.nan, np.nan]}, dtype=object) + tm.assert_frame_equal(df, expected) + + df = DataFrame(columns=["x", "y"]) + df.loc[0, "x"] = 1 + expected = DataFrame({"x": [1], "y": [np.nan]}) + tm.assert_frame_equal(df, expected, check_dtype=False) + + +class TestPartialSetting: + def test_partial_setting(self): + # GH2578, allow ix and friends to partially set + + # series + s_orig = Series([1, 2, 3]) + + s = s_orig.copy() + s[5] = 5 + expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5]) + tm.assert_series_equal(s, expected) + + s = s_orig.copy() + s.loc[5] = 5 + expected = Series([1, 2, 3, 5], index=[0, 1, 2, 5]) + tm.assert_series_equal(s, expected) + + s = s_orig.copy() + s[5] = 5.0 + expected = Series([1, 2, 3, 5.0], index=[0, 1, 2, 5]) + tm.assert_series_equal(s, expected) + + s = s_orig.copy() + s.loc[5] = 5.0 + expected = Series([1, 2, 3, 5.0], index=[0, 1, 2, 5]) + tm.assert_series_equal(s, expected) + + # iloc/iat raise + s = s_orig.copy() + + msg = "iloc cannot enlarge its target object" + with pytest.raises(IndexError, match=msg): + s.iloc[3] = 5.0 + + msg = "index 3 is out of bounds for axis 0 with size 3" + with pytest.raises(IndexError, match=msg): + s.iat[3] = 5.0 + + def test_partial_setting_frame(self, using_array_manager): + df_orig = DataFrame( + np.arange(6).reshape(3, 2), columns=["A", "B"], dtype="int64" + ) + + # iloc/iat raise + df = df_orig.copy() + + msg = "iloc cannot enlarge its target object" + with pytest.raises(IndexError, match=msg): + df.iloc[4, 2] = 5.0 + + msg = "index 2 is out of bounds for axis 0 with size 2" + if using_array_manager: + msg = "list index out of range" + with pytest.raises(IndexError, match=msg): + df.iat[4, 2] = 5.0 + + # row setting where it exists + expected = DataFrame({"A": [0, 4, 4], "B": [1, 5, 5]}) + df = df_orig.copy() + df.iloc[1] = df.iloc[2] + tm.assert_frame_equal(df, expected) + + expected = DataFrame({"A": [0, 4, 4], "B": [1, 5, 5]}) + df = df_orig.copy() + df.loc[1] = df.loc[2] + tm.assert_frame_equal(df, expected) + + # like 2578, partial setting with dtype preservation + expected = DataFrame({"A": [0, 2, 4, 4], "B": [1, 3, 5, 5]}) + df = df_orig.copy() + df.loc[3] = df.loc[2] + tm.assert_frame_equal(df, expected) + + # single dtype frame, overwrite + expected = DataFrame({"A": [0, 2, 4], "B": [0, 2, 4]}) + df = df_orig.copy() + df.loc[:, "B"] = df.loc[:, "A"] + tm.assert_frame_equal(df, expected) + + # mixed dtype frame, overwrite + expected = DataFrame({"A": [0, 2, 4], "B": Series([0.0, 2.0, 4.0])}) + df = df_orig.copy() + df["B"] = df["B"].astype(np.float64) + # as of 2.0, df.loc[:, "B"] = ... attempts (and here succeeds) at + # setting inplace + df.loc[:, "B"] = df.loc[:, "A"] + tm.assert_frame_equal(df, expected) + + # single dtype frame, partial setting + expected = df_orig.copy() + expected["C"] = df["A"] + df = df_orig.copy() + df.loc[:, "C"] = df.loc[:, "A"] + tm.assert_frame_equal(df, expected) + + # mixed frame, partial setting + expected = df_orig.copy() + expected["C"] = df["A"] + df = df_orig.copy() + df.loc[:, "C"] = df.loc[:, "A"] + tm.assert_frame_equal(df, expected) + + def test_partial_setting2(self): + # GH 8473 + dates = date_range("1/1/2000", periods=8) + df_orig = DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), + index=dates, + columns=["A", "B", "C", "D"], + ) + + expected = pd.concat( + [df_orig, DataFrame({"A": 7}, index=dates[-1:] + dates.freq)], sort=True + ) + df = df_orig.copy() + df.loc[dates[-1] + dates.freq, "A"] = 7 + tm.assert_frame_equal(df, expected) + df = df_orig.copy() + df.at[dates[-1] + dates.freq, "A"] = 7 + tm.assert_frame_equal(df, expected) + + exp_other = DataFrame({0: 7}, index=dates[-1:] + dates.freq) + expected = pd.concat([df_orig, exp_other], axis=1) + + df = df_orig.copy() + df.loc[dates[-1] + dates.freq, 0] = 7 + tm.assert_frame_equal(df, expected) + df = df_orig.copy() + df.at[dates[-1] + dates.freq, 0] = 7 + tm.assert_frame_equal(df, expected) + + def test_partial_setting_mixed_dtype(self): + # in a mixed dtype environment, try to preserve dtypes + # by appending + df = DataFrame([[True, 1], [False, 2]], columns=["female", "fitness"]) + + s = df.loc[1].copy() + s.name = 2 + expected = pd.concat([df, DataFrame(s).T.infer_objects()]) + + df.loc[2] = df.loc[1] + tm.assert_frame_equal(df, expected) + + def test_series_partial_set(self): + # partial set with new index + # Regression from GH4825 + ser = Series([0.1, 0.2], index=[1, 2]) + + # loc equiv to .reindex + expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3]) + with pytest.raises(KeyError, match=r"not in index"): + ser.loc[[3, 2, 3]] + + result = ser.reindex([3, 2, 3]) + tm.assert_series_equal(result, expected, check_index_type=True) + + expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, "x"]) + with pytest.raises(KeyError, match="not in index"): + ser.loc[[3, 2, 3, "x"]] + + result = ser.reindex([3, 2, 3, "x"]) + tm.assert_series_equal(result, expected, check_index_type=True) + + expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1]) + result = ser.loc[[2, 2, 1]] + tm.assert_series_equal(result, expected, check_index_type=True) + + expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, "x", 1]) + with pytest.raises(KeyError, match="not in index"): + ser.loc[[2, 2, "x", 1]] + + result = ser.reindex([2, 2, "x", 1]) + tm.assert_series_equal(result, expected, check_index_type=True) + + # raises as nothing is in the index + msg = ( + rf"\"None of \[Index\(\[3, 3, 3\], dtype='{np.dtype(int)}'\)\] " + r"are in the \[index\]\"" + ) + with pytest.raises(KeyError, match=msg): + ser.loc[[3, 3, 3]] + + expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3]) + with pytest.raises(KeyError, match="not in index"): + ser.loc[[2, 2, 3]] + + result = ser.reindex([2, 2, 3]) + tm.assert_series_equal(result, expected, check_index_type=True) + + s = Series([0.1, 0.2, 0.3], index=[1, 2, 3]) + expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4]) + with pytest.raises(KeyError, match="not in index"): + s.loc[[3, 4, 4]] + + result = s.reindex([3, 4, 4]) + tm.assert_series_equal(result, expected, check_index_type=True) + + s = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]) + expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3]) + with pytest.raises(KeyError, match="not in index"): + s.loc[[5, 3, 3]] + + result = s.reindex([5, 3, 3]) + tm.assert_series_equal(result, expected, check_index_type=True) + + s = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]) + expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4]) + with pytest.raises(KeyError, match="not in index"): + s.loc[[5, 4, 4]] + + result = s.reindex([5, 4, 4]) + tm.assert_series_equal(result, expected, check_index_type=True) + + s = Series([0.1, 0.2, 0.3, 0.4], index=[4, 5, 6, 7]) + expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2]) + with pytest.raises(KeyError, match="not in index"): + s.loc[[7, 2, 2]] + + result = s.reindex([7, 2, 2]) + tm.assert_series_equal(result, expected, check_index_type=True) + + s = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]) + expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5]) + with pytest.raises(KeyError, match="not in index"): + s.loc[[4, 5, 5]] + + result = s.reindex([4, 5, 5]) + tm.assert_series_equal(result, expected, check_index_type=True) + + # iloc + expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1]) + result = ser.iloc[[1, 1, 0, 0]] + tm.assert_series_equal(result, expected, check_index_type=True) + + def test_series_partial_set_with_name(self): + # GH 11497 + + idx = Index([1, 2], dtype="int64", name="idx") + ser = Series([0.1, 0.2], index=idx, name="s") + + # loc + with pytest.raises(KeyError, match=r"\[3\] not in index"): + ser.loc[[3, 2, 3]] + + with pytest.raises(KeyError, match=r"not in index"): + ser.loc[[3, 2, 3, "x"]] + + exp_idx = Index([2, 2, 1], dtype="int64", name="idx") + expected = Series([0.2, 0.2, 0.1], index=exp_idx, name="s") + result = ser.loc[[2, 2, 1]] + tm.assert_series_equal(result, expected, check_index_type=True) + + with pytest.raises(KeyError, match=r"\['x'\] not in index"): + ser.loc[[2, 2, "x", 1]] + + # raises as nothing is in the index + msg = ( + rf"\"None of \[Index\(\[3, 3, 3\], dtype='{np.dtype(int)}', " + r"name='idx'\)\] are in the \[index\]\"" + ) + with pytest.raises(KeyError, match=msg): + ser.loc[[3, 3, 3]] + + with pytest.raises(KeyError, match="not in index"): + ser.loc[[2, 2, 3]] + + idx = Index([1, 2, 3], dtype="int64", name="idx") + with pytest.raises(KeyError, match="not in index"): + Series([0.1, 0.2, 0.3], index=idx, name="s").loc[[3, 4, 4]] + + idx = Index([1, 2, 3, 4], dtype="int64", name="idx") + with pytest.raises(KeyError, match="not in index"): + Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[5, 3, 3]] + + idx = Index([1, 2, 3, 4], dtype="int64", name="idx") + with pytest.raises(KeyError, match="not in index"): + Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[5, 4, 4]] + + idx = Index([4, 5, 6, 7], dtype="int64", name="idx") + with pytest.raises(KeyError, match="not in index"): + Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[7, 2, 2]] + + idx = Index([1, 2, 3, 4], dtype="int64", name="idx") + with pytest.raises(KeyError, match="not in index"): + Series([0.1, 0.2, 0.3, 0.4], index=idx, name="s").loc[[4, 5, 5]] + + # iloc + exp_idx = Index([2, 2, 1, 1], dtype="int64", name="idx") + expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name="s") + result = ser.iloc[[1, 1, 0, 0]] + tm.assert_series_equal(result, expected, check_index_type=True) + + @pytest.mark.parametrize("key", [100, 100.0]) + def test_setitem_with_expansion_numeric_into_datetimeindex(self, key): + # GH#4940 inserting non-strings + orig = tm.makeTimeDataFrame() + df = orig.copy() + + df.loc[key, :] = df.iloc[0] + ex_index = Index(list(orig.index) + [key], dtype=object, name=orig.index.name) + ex_data = np.concatenate([orig.values, df.iloc[[0]].values], axis=0) + expected = DataFrame(ex_data, index=ex_index, columns=orig.columns) + + tm.assert_frame_equal(df, expected) + + def test_partial_set_invalid(self): + # GH 4940 + # allow only setting of 'valid' values + + orig = tm.makeTimeDataFrame() + + # allow object conversion here + df = orig.copy() + df.loc["a", :] = df.iloc[0] + ser = Series(df.iloc[0], name="a") + exp = pd.concat([orig, DataFrame(ser).T.infer_objects()]) + tm.assert_frame_equal(df, exp) + tm.assert_index_equal(df.index, Index(orig.index.tolist() + ["a"])) + assert df.index.dtype == "object" + + @pytest.mark.parametrize( + "idx,labels,expected_idx", + [ + ( + period_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-08", "2000-01-12"], + [ + Period("2000-01-04", freq="D"), + Period("2000-01-08", freq="D"), + Period("2000-01-12", freq="D"), + ], + ), + ( + date_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-08", "2000-01-12"], + [ + Timestamp("2000-01-04"), + Timestamp("2000-01-08"), + Timestamp("2000-01-12"), + ], + ), + ( + pd.timedelta_range(start="1 day", periods=20), + ["4D", "8D", "12D"], + [pd.Timedelta("4 day"), pd.Timedelta("8 day"), pd.Timedelta("12 day")], + ), + ], + ) + def test_loc_with_list_of_strings_representing_datetimes( + self, idx, labels, expected_idx, frame_or_series + ): + # GH 11278 + obj = frame_or_series(range(20), index=idx) + + expected_value = [3, 7, 11] + expected = frame_or_series(expected_value, expected_idx) + + tm.assert_equal(expected, obj.loc[labels]) + if frame_or_series is Series: + tm.assert_series_equal(expected, obj[labels]) + + @pytest.mark.parametrize( + "idx,labels", + [ + ( + period_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-30"], + ), + ( + date_range(start="2000", periods=20, freq="D"), + ["2000-01-04", "2000-01-30"], + ), + (pd.timedelta_range(start="1 day", periods=20), ["3 day", "30 day"]), + ], + ) + def test_loc_with_list_of_strings_representing_datetimes_missing_value( + self, idx, labels + ): + # GH 11278 + ser = Series(range(20), index=idx) + df = DataFrame(range(20), index=idx) + msg = r"not in index" + + with pytest.raises(KeyError, match=msg): + ser.loc[labels] + with pytest.raises(KeyError, match=msg): + ser[labels] + with pytest.raises(KeyError, match=msg): + df.loc[labels] + + @pytest.mark.parametrize( + "idx,labels,msg", + [ + ( + period_range(start="2000", periods=20, freq="D"), + ["4D", "8D"], + ( + r"None of \[Index\(\['4D', '8D'\], dtype='object'\)\] " + r"are in the \[index\]" + ), + ), + ( + date_range(start="2000", periods=20, freq="D"), + ["4D", "8D"], + ( + r"None of \[Index\(\['4D', '8D'\], dtype='object'\)\] " + r"are in the \[index\]" + ), + ), + ( + pd.timedelta_range(start="1 day", periods=20), + ["2000-01-04", "2000-01-08"], + ( + r"None of \[Index\(\['2000-01-04', '2000-01-08'\], " + r"dtype='object'\)\] are in the \[index\]" + ), + ), + ], + ) + def test_loc_with_list_of_strings_representing_datetimes_not_matched_type( + self, idx, labels, msg + ): + # GH 11278 + ser = Series(range(20), index=idx) + df = DataFrame(range(20), index=idx) + + with pytest.raises(KeyError, match=msg): + ser.loc[labels] + with pytest.raises(KeyError, match=msg): + ser[labels] + with pytest.raises(KeyError, match=msg): + df.loc[labels] + + +class TestStringSlicing: + def test_slice_irregular_datetime_index_with_nan(self): + # GH36953 + index = pd.to_datetime(["2012-01-01", "2012-01-02", "2012-01-03", None]) + df = DataFrame(range(len(index)), index=index) + expected = DataFrame(range(len(index[:3])), index=index[:3]) + with pytest.raises(KeyError, match="non-existing keys is not allowed"): + # Upper bound is not in index (which is unordered) + # GH53983 + # GH37819 + df["2012-01-01":"2012-01-04"] + # Need this precision for right bound since the right slice + # bound is "rounded" up to the largest timepoint smaller than + # the next "resolution"-step of the provided point. + # e.g. 2012-01-03 is rounded up to 2012-01-04 - 1ns + result = df["2012-01-01":"2012-01-03 00:00:00.000000000"] + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_scalar.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_scalar.py new file mode 100644 index 00000000..2753b357 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/indexing/test_scalar.py @@ -0,0 +1,301 @@ +""" test scalar indexing, including at and iat """ +from datetime import ( + datetime, + timedelta, +) +import itertools + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def generate_indices(f, values=False): + """ + generate the indices + if values is True , use the axis values + is False, use the range + """ + axes = f.axes + if values: + axes = (list(range(len(ax))) for ax in axes) + + return itertools.product(*axes) + + +class TestScalar: + @pytest.mark.parametrize("kind", ["series", "frame"]) + @pytest.mark.parametrize("col", ["ints", "uints"]) + def test_iat_set_ints(self, kind, col, request): + f = request.getfixturevalue(f"{kind}_{col}") + indices = generate_indices(f, True) + for i in indices: + f.iat[i] = 1 + expected = f.values[i] + tm.assert_almost_equal(expected, 1) + + @pytest.mark.parametrize("kind", ["series", "frame"]) + @pytest.mark.parametrize("col", ["labels", "ts", "floats"]) + def test_iat_set_other(self, kind, col, request): + f = request.getfixturevalue(f"{kind}_{col}") + msg = "iAt based indexing can only have integer indexers" + with pytest.raises(ValueError, match=msg): + idx = next(generate_indices(f, False)) + f.iat[idx] = 1 + + @pytest.mark.parametrize("kind", ["series", "frame"]) + @pytest.mark.parametrize("col", ["ints", "uints", "labels", "ts", "floats"]) + def test_at_set_ints_other(self, kind, col, request): + f = request.getfixturevalue(f"{kind}_{col}") + indices = generate_indices(f, False) + for i in indices: + f.at[i] = 1 + expected = f.loc[i] + tm.assert_almost_equal(expected, 1) + + +class TestAtAndiAT: + # at and iat tests that don't need Base class + + def test_float_index_at_iat(self): + ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3]) + for el, item in ser.items(): + assert ser.at[el] == item + for i in range(len(ser)): + assert ser.iat[i] == i + 1 + + def test_at_iat_coercion(self): + # as timestamp is not a tuple! + dates = date_range("1/1/2000", periods=8) + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), + index=dates, + columns=["A", "B", "C", "D"], + ) + s = df["A"] + + result = s.at[dates[5]] + xp = s.values[5] + assert result == xp + + @pytest.mark.parametrize( + "ser, expected", + [ + [ + Series(["2014-01-01", "2014-02-02"], dtype="datetime64[ns]"), + Timestamp("2014-02-02"), + ], + [ + Series(["1 days", "2 days"], dtype="timedelta64[ns]"), + Timedelta("2 days"), + ], + ], + ) + def test_iloc_iat_coercion_datelike(self, indexer_ial, ser, expected): + # GH 7729 + # make sure we are boxing the returns + result = indexer_ial(ser)[1] + assert result == expected + + def test_imethods_with_dups(self): + # GH6493 + # iat/iloc with dups + + s = Series(range(5), index=[1, 1, 2, 2, 3], dtype="int64") + result = s.iloc[2] + assert result == 2 + result = s.iat[2] + assert result == 2 + + msg = "index 10 is out of bounds for axis 0 with size 5" + with pytest.raises(IndexError, match=msg): + s.iat[10] + msg = "index -10 is out of bounds for axis 0 with size 5" + with pytest.raises(IndexError, match=msg): + s.iat[-10] + + result = s.iloc[[2, 3]] + expected = Series([2, 3], [2, 2], dtype="int64") + tm.assert_series_equal(result, expected) + + df = s.to_frame() + result = df.iloc[2] + expected = Series(2, index=[0], name=2) + tm.assert_series_equal(result, expected) + + result = df.iat[2, 0] + assert result == 2 + + def test_frame_at_with_duplicate_axes(self): + # GH#33041 + arr = np.random.default_rng(2).standard_normal(6).reshape(3, 2) + df = DataFrame(arr, columns=["A", "A"]) + + result = df.at[0, "A"] + expected = df.iloc[0] + + tm.assert_series_equal(result, expected) + + result = df.T.at["A", 0] + tm.assert_series_equal(result, expected) + + # setter + df.at[1, "A"] = 2 + expected = Series([2.0, 2.0], index=["A", "A"], name=1) + tm.assert_series_equal(df.iloc[1], expected) + + def test_at_getitem_dt64tz_values(self): + # gh-15822 + df = DataFrame( + { + "name": ["John", "Anderson"], + "date": [ + Timestamp(2017, 3, 13, 13, 32, 56), + Timestamp(2017, 2, 16, 12, 10, 3), + ], + } + ) + df["date"] = df["date"].dt.tz_localize("Asia/Shanghai") + + expected = Timestamp("2017-03-13 13:32:56+0800", tz="Asia/Shanghai") + + result = df.loc[0, "date"] + assert result == expected + + result = df.at[0, "date"] + assert result == expected + + def test_mixed_index_at_iat_loc_iloc_series(self): + # GH 19860 + s = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2]) + for el, item in s.items(): + assert s.at[el] == s.loc[el] == item + for i in range(len(s)): + assert s.iat[i] == s.iloc[i] == i + 1 + + with pytest.raises(KeyError, match="^4$"): + s.at[4] + with pytest.raises(KeyError, match="^4$"): + s.loc[4] + + def test_mixed_index_at_iat_loc_iloc_dataframe(self): + # GH 19860 + df = DataFrame( + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]], columns=["a", "b", "c", 1, 2] + ) + for rowIdx, row in df.iterrows(): + for el, item in row.items(): + assert df.at[rowIdx, el] == df.loc[rowIdx, el] == item + + for row in range(2): + for i in range(5): + assert df.iat[row, i] == df.iloc[row, i] == row * 5 + i + + with pytest.raises(KeyError, match="^3$"): + df.at[0, 3] + with pytest.raises(KeyError, match="^3$"): + df.loc[0, 3] + + def test_iat_setter_incompatible_assignment(self): + # GH 23236 + result = DataFrame({"a": [0.0, 1.0], "b": [4, 5]}) + result.iat[0, 0] = None + expected = DataFrame({"a": [None, 1], "b": [4, 5]}) + tm.assert_frame_equal(result, expected) + + +def test_iat_dont_wrap_object_datetimelike(): + # GH#32809 .iat calls go through DataFrame._get_value, should not + # call maybe_box_datetimelike + dti = date_range("2016-01-01", periods=3) + tdi = dti - dti + ser = Series(dti.to_pydatetime(), dtype=object) + ser2 = Series(tdi.to_pytimedelta(), dtype=object) + df = DataFrame({"A": ser, "B": ser2}) + assert (df.dtypes == object).all() + + for result in [df.at[0, "A"], df.iat[0, 0], df.loc[0, "A"], df.iloc[0, 0]]: + assert result is ser[0] + assert isinstance(result, datetime) + assert not isinstance(result, Timestamp) + + for result in [df.at[1, "B"], df.iat[1, 1], df.loc[1, "B"], df.iloc[1, 1]]: + assert result is ser2[1] + assert isinstance(result, timedelta) + assert not isinstance(result, Timedelta) + + +def test_at_with_tuple_index_get(): + # GH 26989 + # DataFrame.at getter works with Index of tuples + df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)]) + assert df.index.nlevels == 1 + assert df.at[(1, 2), "a"] == 1 + + # Series.at getter works with Index of tuples + series = df["a"] + assert series.index.nlevels == 1 + assert series.at[(1, 2)] == 1 + + +def test_at_with_tuple_index_set(): + # GH 26989 + # DataFrame.at setter works with Index of tuples + df = DataFrame({"a": [1, 2]}, index=[(1, 2), (3, 4)]) + assert df.index.nlevels == 1 + df.at[(1, 2), "a"] = 2 + assert df.at[(1, 2), "a"] == 2 + + # Series.at setter works with Index of tuples + series = df["a"] + assert series.index.nlevels == 1 + series.at[1, 2] = 3 + assert series.at[1, 2] == 3 + + +class TestMultiIndexScalar: + def test_multiindex_at_get(self): + # GH 26989 + # DataFrame.at and DataFrame.loc getter works with MultiIndex + df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]]) + assert df.index.nlevels == 2 + assert df.at[(1, 3), "a"] == 1 + assert df.loc[(1, 3), "a"] == 1 + + # Series.at and Series.loc getter works with MultiIndex + series = df["a"] + assert series.index.nlevels == 2 + assert series.at[1, 3] == 1 + assert series.loc[1, 3] == 1 + + def test_multiindex_at_set(self): + # GH 26989 + # DataFrame.at and DataFrame.loc setter works with MultiIndex + df = DataFrame({"a": [1, 2]}, index=[[1, 2], [3, 4]]) + assert df.index.nlevels == 2 + df.at[(1, 3), "a"] = 3 + assert df.at[(1, 3), "a"] == 3 + df.loc[(1, 3), "a"] = 4 + assert df.loc[(1, 3), "a"] == 4 + + # Series.at and Series.loc setter works with MultiIndex + series = df["a"] + assert series.index.nlevels == 2 + series.at[1, 3] = 5 + assert series.at[1, 3] == 5 + series.loc[1, 3] = 6 + assert series.loc[1, 3] == 6 + + def test_multiindex_at_get_one_level(self): + # GH#38053 + s2 = Series((0, 1), index=[[False, True]]) + result = s2.at[False] + assert result == 0 diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_impl.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_impl.py new file mode 100644 index 00000000..97a38856 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_impl.py @@ -0,0 +1,364 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +from pandas.compat import ( + is_ci_environment, + is_platform_windows, +) +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm +from pandas.core.interchange.column import PandasColumn +from pandas.core.interchange.dataframe_protocol import ( + ColumnNullType, + DtypeKind, +) +from pandas.core.interchange.from_dataframe import from_dataframe +from pandas.core.interchange.utils import ArrowCTypes + + +@pytest.fixture +def data_categorical(): + return { + "ordered": pd.Categorical(list("testdata") * 30, ordered=True), + "unordered": pd.Categorical(list("testdata") * 30, ordered=False), + } + + +@pytest.fixture +def string_data(): + return { + "separator data": [ + "abC|DeF,Hik", + "234,3245.67", + "gSaf,qWer|Gre", + "asd3,4sad|", + np.nan, + ] + } + + +@pytest.mark.parametrize("data", [("ordered", True), ("unordered", False)]) +def test_categorical_dtype(data, data_categorical): + df = pd.DataFrame({"A": (data_categorical[data[0]])}) + + col = df.__dataframe__().get_column_by_name("A") + assert col.dtype[0] == DtypeKind.CATEGORICAL + assert col.null_count == 0 + assert col.describe_null == (ColumnNullType.USE_SENTINEL, -1) + assert col.num_chunks() == 1 + desc_cat = col.describe_categorical + assert desc_cat["is_ordered"] == data[1] + assert desc_cat["is_dictionary"] is True + assert isinstance(desc_cat["categories"], PandasColumn) + tm.assert_series_equal( + desc_cat["categories"]._col, pd.Series(["a", "d", "e", "s", "t"]) + ) + + tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + + +def test_categorical_pyarrow(): + # GH 49889 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"] + table = pa.table({"weekday": pa.array(arr).dictionary_encode()}) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + weekday = pd.Categorical( + arr, categories=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + ) + expected = pd.DataFrame({"weekday": weekday}) + tm.assert_frame_equal(result, expected) + + +def test_empty_categorical_pyarrow(): + # https://github.com/pandas-dev/pandas/issues/53077 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = [None] + table = pa.table({"arr": pa.array(arr, "float64").dictionary_encode()}) + exchange_df = table.__dataframe__() + result = pd.api.interchange.from_dataframe(exchange_df) + expected = pd.DataFrame({"arr": pd.Categorical([np.nan])}) + tm.assert_frame_equal(result, expected) + + +def test_large_string_pyarrow(): + # GH 52795 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = ["Mon", "Tue"] + table = pa.table({"weekday": pa.array(arr, "large_string")}) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + expected = pd.DataFrame({"weekday": ["Mon", "Tue"]}) + tm.assert_frame_equal(result, expected) + + # check round-trip + assert pa.Table.equals(pa.interchange.from_dataframe(result), table) + + +@pytest.mark.parametrize( + ("offset", "length", "expected_values"), + [ + (0, None, [3.3, float("nan"), 2.1]), + (1, None, [float("nan"), 2.1]), + (2, None, [2.1]), + (0, 2, [3.3, float("nan")]), + (0, 1, [3.3]), + (1, 1, [float("nan")]), + ], +) +def test_bitmasks_pyarrow(offset, length, expected_values): + # GH 52795 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = [3.3, None, 2.1] + table = pa.table({"arr": arr}).slice(offset, length) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + expected = pd.DataFrame({"arr": expected_values}) + tm.assert_frame_equal(result, expected) + + # check round-trip + assert pa.Table.equals(pa.interchange.from_dataframe(result), table) + + +@pytest.mark.parametrize( + "data", + [ + lambda: np.random.default_rng(2).integers(-100, 100), + lambda: np.random.default_rng(2).integers(1, 100), + lambda: np.random.default_rng(2).random(), + lambda: np.random.default_rng(2).choice([True, False]), + lambda: datetime( + year=np.random.default_rng(2).integers(1900, 2100), + month=np.random.default_rng(2).integers(1, 12), + day=np.random.default_rng(2).integers(1, 20), + ), + ], +) +def test_dataframe(data): + NCOLS, NROWS = 10, 20 + data = { + f"col{int((i - NCOLS / 2) % NCOLS + 1)}": [data() for _ in range(NROWS)] + for i in range(NCOLS) + } + df = pd.DataFrame(data) + + df2 = df.__dataframe__() + + assert df2.num_columns() == NCOLS + assert df2.num_rows() == NROWS + + assert list(df2.column_names()) == list(data.keys()) + + indices = (0, 2) + names = tuple(list(data.keys())[idx] for idx in indices) + + result = from_dataframe(df2.select_columns(indices)) + expected = from_dataframe(df2.select_columns_by_name(names)) + tm.assert_frame_equal(result, expected) + + assert isinstance(result.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) + assert isinstance(expected.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) + + +def test_missing_from_masked(): + df = pd.DataFrame( + { + "x": np.array([1.0, 2.0, 3.0, 4.0, 0.0]), + "y": np.array([1.5, 2.5, 3.5, 4.5, 0]), + "z": np.array([1.0, 0.0, 1.0, 1.0, 1.0]), + } + ) + + df2 = df.__dataframe__() + + rng = np.random.default_rng(2) + dict_null = {col: rng.integers(low=0, high=len(df)) for col in df.columns} + for col, num_nulls in dict_null.items(): + null_idx = df.index[ + rng.choice(np.arange(len(df)), size=num_nulls, replace=False) + ] + df.loc[null_idx, col] = None + + df2 = df.__dataframe__() + + assert df2.get_column_by_name("x").null_count == dict_null["x"] + assert df2.get_column_by_name("y").null_count == dict_null["y"] + assert df2.get_column_by_name("z").null_count == dict_null["z"] + + +@pytest.mark.parametrize( + "data", + [ + {"x": [1.5, 2.5, 3.5], "y": [9.2, 10.5, 11.8]}, + {"x": [1, 2, 0], "y": [9.2, 10.5, 11.8]}, + { + "x": np.array([True, True, False]), + "y": np.array([1, 2, 0]), + "z": np.array([9.2, 10.5, 11.8]), + }, + ], +) +def test_mixed_data(data): + df = pd.DataFrame(data) + df2 = df.__dataframe__() + + for col_name in df.columns: + assert df2.get_column_by_name(col_name).null_count == 0 + + +def test_mixed_missing(): + df = pd.DataFrame( + { + "x": np.array([True, None, False, None, True]), + "y": np.array([None, 2, None, 1, 2]), + "z": np.array([9.2, 10.5, None, 11.8, None]), + } + ) + + df2 = df.__dataframe__() + + for col_name in df.columns: + assert df2.get_column_by_name(col_name).null_count == 2 + + +def test_string(string_data): + test_str_data = string_data["separator data"] + [""] + df = pd.DataFrame({"A": test_str_data}) + col = df.__dataframe__().get_column_by_name("A") + + assert col.size() == 6 + assert col.null_count == 1 + assert col.dtype[0] == DtypeKind.STRING + assert col.describe_null == (ColumnNullType.USE_BYTEMASK, 0) + + df_sliced = df[1:] + col = df_sliced.__dataframe__().get_column_by_name("A") + assert col.size() == 5 + assert col.null_count == 1 + assert col.dtype[0] == DtypeKind.STRING + assert col.describe_null == (ColumnNullType.USE_BYTEMASK, 0) + + +def test_nonstring_object(): + df = pd.DataFrame({"A": ["a", 10, 1.0, ()]}) + col = df.__dataframe__().get_column_by_name("A") + with pytest.raises(NotImplementedError, match="not supported yet"): + col.dtype + + +def test_datetime(): + df = pd.DataFrame({"A": [pd.Timestamp("2022-01-01"), pd.NaT]}) + col = df.__dataframe__().get_column_by_name("A") + + assert col.size() == 2 + assert col.null_count == 1 + assert col.dtype[0] == DtypeKind.DATETIME + assert col.describe_null == (ColumnNullType.USE_SENTINEL, iNaT) + + tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + + +@td.skip_if_np_lt("1.23") +def test_categorical_to_numpy_dlpack(): + # https://github.com/pandas-dev/pandas/issues/48393 + df = pd.DataFrame({"A": pd.Categorical(["a", "b", "a"])}) + col = df.__dataframe__().get_column_by_name("A") + result = np.from_dlpack(col.get_buffers()["data"][0]) + expected = np.array([0, 1, 0], dtype="int8") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("data", [{}, {"a": []}]) +def test_empty_pyarrow(data): + # GH 53155 + pytest.importorskip("pyarrow", "11.0.0") + from pyarrow.interchange import from_dataframe as pa_from_dataframe + + expected = pd.DataFrame(data) + arrow_df = pa_from_dataframe(expected) + result = from_dataframe(arrow_df) + tm.assert_frame_equal(result, expected) + + +def test_multi_chunk_pyarrow() -> None: + pa = pytest.importorskip("pyarrow", "11.0.0") + n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + names = ["n_legs"] + table = pa.table([n_legs], names=names) + with pytest.raises( + RuntimeError, + match="To join chunks a copy is required which is " + "forbidden by allow_copy=False", + ): + pd.api.interchange.from_dataframe(table, allow_copy=False) + + +@pytest.mark.parametrize("tz", ["UTC", "US/Pacific"]) +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +def test_datetimetzdtype(tz, unit): + # GH 54239 + tz_data = ( + pd.date_range("2018-01-01", periods=5, freq="D").tz_localize(tz).as_unit(unit) + ) + df = pd.DataFrame({"ts_tz": tz_data}) + tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + + +def test_interchange_from_non_pandas_tz_aware(request): + # GH 54239, 54287 + pa = pytest.importorskip("pyarrow", "11.0.0") + import pyarrow.compute as pc + + if is_platform_windows() and is_ci_environment(): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + "TODO: Set ARROW_TIMEZONE_DATABASE environment variable " + "on CI to path to the tzdata for pyarrow." + ), + ) + request.node.add_marker(mark) + + arr = pa.array([datetime(2020, 1, 1), None, datetime(2020, 1, 2)]) + arr = pc.assume_timezone(arr, "Asia/Kathmandu") + table = pa.table({"arr": arr}) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + + expected = pd.DataFrame( + ["2020-01-01 00:00:00+05:45", "NaT", "2020-01-02 00:00:00+05:45"], + columns=["arr"], + dtype="datetime64[us, Asia/Kathmandu]", + ) + tm.assert_frame_equal(expected, result) + + +def test_interchange_from_corrected_buffer_dtypes(monkeypatch) -> None: + # https://github.com/pandas-dev/pandas/issues/54781 + df = pd.DataFrame({"a": ["foo", "bar"]}).__dataframe__() + interchange = df.__dataframe__() + column = interchange.get_column_by_name("a") + buffers = column.get_buffers() + buffers_data = buffers["data"] + buffer_dtype = buffers_data[1] + buffer_dtype = ( + DtypeKind.UINT, + 8, + ArrowCTypes.UINT8, + buffer_dtype[3], + ) + buffers["data"] = (buffers_data[0], buffer_dtype) + column.get_buffers = lambda: buffers + interchange.get_column_by_name = lambda _: column + monkeypatch.setattr(df, "__dataframe__", lambda allow_copy: interchange) + pd.api.interchange.from_dataframe(df) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_spec_conformance.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_spec_conformance.py new file mode 100644 index 00000000..7c02379c --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_spec_conformance.py @@ -0,0 +1,175 @@ +""" +A verbatim copy (vendored) of the spec tests. +Taken from https://github.com/data-apis/dataframe-api +""" +import ctypes +import math + +import pytest + +import pandas as pd + + +@pytest.fixture +def df_from_dict(): + def maker(dct, is_categorical=False): + df = pd.DataFrame(dct) + return df.astype("category") if is_categorical else df + + return maker + + +@pytest.mark.parametrize( + "test_data", + [ + {"a": ["foo", "bar"], "b": ["baz", "qux"]}, + {"a": [1.5, 2.5, 3.5], "b": [9.2, 10.5, 11.8]}, + {"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]}, + ], + ids=["str_data", "float_data", "int_data"], +) +def test_only_one_dtype(test_data, df_from_dict): + columns = list(test_data.keys()) + df = df_from_dict(test_data) + dfX = df.__dataframe__() + + column_size = len(test_data[columns[0]]) + for column in columns: + null_count = dfX.get_column_by_name(column).null_count + assert null_count == 0 + assert isinstance(null_count, int) + assert dfX.get_column_by_name(column).size() == column_size + assert dfX.get_column_by_name(column).offset == 0 + + +def test_mixed_dtypes(df_from_dict): + df = df_from_dict( + { + "a": [1, 2, 3], # dtype kind INT = 0 + "b": [3, 4, 5], # dtype kind INT = 0 + "c": [1.5, 2.5, 3.5], # dtype kind FLOAT = 2 + "d": [9, 10, 11], # dtype kind INT = 0 + "e": [True, False, True], # dtype kind BOOLEAN = 20 + "f": ["a", "", "c"], # dtype kind STRING = 21 + } + ) + dfX = df.__dataframe__() + # for meanings of dtype[0] see the spec; we cannot import the spec here as this + # file is expected to be vendored *anywhere*; + # values for dtype[0] are explained above + columns = {"a": 0, "b": 0, "c": 2, "d": 0, "e": 20, "f": 21} + + for column, kind in columns.items(): + colX = dfX.get_column_by_name(column) + assert colX.null_count == 0 + assert isinstance(colX.null_count, int) + assert colX.size() == 3 + assert colX.offset == 0 + + assert colX.dtype[0] == kind + + assert dfX.get_column_by_name("c").dtype[1] == 64 + + +def test_na_float(df_from_dict): + df = df_from_dict({"a": [1.0, math.nan, 2.0]}) + dfX = df.__dataframe__() + colX = dfX.get_column_by_name("a") + assert colX.null_count == 1 + assert isinstance(colX.null_count, int) + + +def test_noncategorical(df_from_dict): + df = df_from_dict({"a": [1, 2, 3]}) + dfX = df.__dataframe__() + colX = dfX.get_column_by_name("a") + with pytest.raises(TypeError, match=".*categorical.*"): + colX.describe_categorical + + +def test_categorical(df_from_dict): + df = df_from_dict( + {"weekday": ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]}, + is_categorical=True, + ) + + colX = df.__dataframe__().get_column_by_name("weekday") + categorical = colX.describe_categorical + assert isinstance(categorical["is_ordered"], bool) + assert isinstance(categorical["is_dictionary"], bool) + + +def test_dataframe(df_from_dict): + df = df_from_dict( + {"x": [True, True, False], "y": [1, 2, 0], "z": [9.2, 10.5, 11.8]} + ) + dfX = df.__dataframe__() + + assert dfX.num_columns() == 3 + assert dfX.num_rows() == 3 + assert dfX.num_chunks() == 1 + assert list(dfX.column_names()) == ["x", "y", "z"] + assert list(dfX.select_columns((0, 2)).column_names()) == list( + dfX.select_columns_by_name(("x", "z")).column_names() + ) + + +@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)]) +def test_df_get_chunks(size, n_chunks, df_from_dict): + df = df_from_dict({"x": list(range(size))}) + dfX = df.__dataframe__() + chunks = list(dfX.get_chunks(n_chunks)) + assert len(chunks) == n_chunks + assert sum(chunk.num_rows() for chunk in chunks) == size + + +@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)]) +def test_column_get_chunks(size, n_chunks, df_from_dict): + df = df_from_dict({"x": list(range(size))}) + dfX = df.__dataframe__() + chunks = list(dfX.get_column(0).get_chunks(n_chunks)) + assert len(chunks) == n_chunks + assert sum(chunk.size() for chunk in chunks) == size + + +def test_get_columns(df_from_dict): + df = df_from_dict({"a": [0, 1], "b": [2.5, 3.5]}) + dfX = df.__dataframe__() + for colX in dfX.get_columns(): + assert colX.size() == 2 + assert colX.num_chunks() == 1 + # for meanings of dtype[0] see the spec; we cannot import the spec here as this + # file is expected to be vendored *anywhere* + assert dfX.get_column(0).dtype[0] == 0 # INT + assert dfX.get_column(1).dtype[0] == 2 # FLOAT + + +def test_buffer(df_from_dict): + arr = [0, 1, -1] + df = df_from_dict({"a": arr}) + dfX = df.__dataframe__() + colX = dfX.get_column(0) + bufX = colX.get_buffers() + + dataBuf, dataDtype = bufX["data"] + + assert dataBuf.bufsize > 0 + assert dataBuf.ptr != 0 + device, _ = dataBuf.__dlpack_device__() + + # for meanings of dtype[0] see the spec; we cannot import the spec here as this + # file is expected to be vendored *anywhere* + assert dataDtype[0] == 0 # INT + + if device == 1: # CPU-only as we're going to directly read memory here + bitwidth = dataDtype[1] + ctype = { + 8: ctypes.c_int8, + 16: ctypes.c_int16, + 32: ctypes.c_int32, + 64: ctypes.c_int64, + }[bitwidth] + + for idx, truth in enumerate(arr): + val = ctype.from_address(dataBuf.ptr + idx * (bitwidth // 8)).value + assert val == truth, f"Buffer at index {idx} mismatch" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_utils.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_utils.py new file mode 100644 index 00000000..a47bc275 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/interchange/test_utils.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.interchange.utils import dtype_to_arrow_c_fmt + +# TODO: use ArrowSchema to get reference C-string. +# At the time, there is no way to access ArrowSchema holding a type format string +# from python. The only way to access it is to export the structure to a C-pointer, +# see DataType._export_to_c() method defined in +# https://github.com/apache/arrow/blob/master/python/pyarrow/types.pxi + + +@pytest.mark.parametrize( + "pandas_dtype, c_string", + [ + (np.dtype("bool"), "b"), + (np.dtype("int8"), "c"), + (np.dtype("uint8"), "C"), + (np.dtype("int16"), "s"), + (np.dtype("uint16"), "S"), + (np.dtype("int32"), "i"), + (np.dtype("uint32"), "I"), + (np.dtype("int64"), "l"), + (np.dtype("uint64"), "L"), + (np.dtype("float16"), "e"), + (np.dtype("float32"), "f"), + (np.dtype("float64"), "g"), + (pd.Series(["a"]).dtype, "u"), + ( + pd.Series([0]).astype("datetime64[ns]").dtype, + "tsn:", + ), + (pd.CategoricalDtype(["a"]), "l"), + (np.dtype("O"), "u"), + ], +) +def test_dtype_to_arrow_c_fmt(pandas_dtype, c_string): # PR01 + """Test ``dtype_to_arrow_c_fmt`` utility function.""" + assert dtype_to_arrow_c_fmt(pandas_dtype) == c_string + + +@pytest.mark.parametrize( + "pa_dtype, args_kwargs, c_string", + [ + ["null", {}, "n"], + ["bool_", {}, "b"], + ["uint8", {}, "C"], + ["uint16", {}, "S"], + ["uint32", {}, "I"], + ["uint64", {}, "L"], + ["int8", {}, "c"], + ["int16", {}, "S"], + ["int32", {}, "i"], + ["int64", {}, "l"], + ["float16", {}, "e"], + ["float32", {}, "f"], + ["float64", {}, "g"], + ["string", {}, "u"], + ["binary", {}, "z"], + ["time32", ("s",), "tts"], + ["time32", ("ms",), "ttm"], + ["time64", ("us",), "ttu"], + ["time64", ("ns",), "ttn"], + ["date32", {}, "tdD"], + ["date64", {}, "tdm"], + ["timestamp", {"unit": "s"}, "tss:"], + ["timestamp", {"unit": "ms"}, "tsm:"], + ["timestamp", {"unit": "us"}, "tsu:"], + ["timestamp", {"unit": "ns"}, "tsn:"], + ["timestamp", {"unit": "ns", "tz": "UTC"}, "tsn:UTC"], + ["duration", ("s",), "tDs"], + ["duration", ("ms",), "tDm"], + ["duration", ("us",), "tDu"], + ["duration", ("ns",), "tDn"], + ["decimal128", {"precision": 4, "scale": 2}, "d:4,2"], + ], +) +def test_dtype_to_arrow_c_fmt_arrowdtype(pa_dtype, args_kwargs, c_string): + # GH 52323 + pa = pytest.importorskip("pyarrow") + if not args_kwargs: + pa_type = getattr(pa, pa_dtype)() + elif isinstance(args_kwargs, tuple): + pa_type = getattr(pa, pa_dtype)(*args_kwargs) + else: + pa_type = getattr(pa, pa_dtype)(**args_kwargs) + arrow_type = pd.ArrowDtype(pa_type) + assert dtype_to_arrow_c_fmt(arrow_type) == c_string diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_api.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_api.py new file mode 100644 index 00000000..5cd6c718 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_api.py @@ -0,0 +1,53 @@ +""" +Tests for the pseudo-public API implemented in internals/api.py and exposed +in core.internals +""" + +import pandas as pd +from pandas.core import internals +from pandas.core.internals import api + + +def test_internals_api(): + assert internals.make_block is api.make_block + + +def test_namespace(): + # SUBJECT TO CHANGE + + modules = [ + "blocks", + "concat", + "managers", + "construction", + "array_manager", + "base", + "api", + "ops", + ] + expected = [ + "Block", + "DatetimeTZBlock", + "ExtensionBlock", + "make_block", + "DataManager", + "ArrayManager", + "BlockManager", + "SingleDataManager", + "SingleBlockManager", + "SingleArrayManager", + "concatenate_managers", + "create_block_manager_from_blocks", + ] + + result = [x for x in dir(internals) if not x.startswith("__")] + assert set(result) == set(expected + modules) + + +def test_make_block_2d_with_dti(): + # GH#41168 + dti = pd.date_range("2012", periods=3, tz="UTC") + blk = api.make_block(dti, placement=[0]) + + assert blk.shape == (1, 3) + assert blk.values.shape == (1, 3) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_internals.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_internals.py new file mode 100644 index 00000000..4b23829a --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_internals.py @@ -0,0 +1,1442 @@ +from datetime import ( + date, + datetime, +) +import itertools +import re + +import numpy as np +import pytest + +from pandas._libs.internals import BlockPlacement +from pandas.compat import IS64 +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_scalar + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + Series, + Timedelta, + Timestamp, + period_range, +) +import pandas._testing as tm +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + DatetimeArray, + SparseArray, + TimedeltaArray, +) +from pandas.core.internals import ( + BlockManager, + SingleBlockManager, + make_block, +) +from pandas.core.internals.blocks import ( + ensure_block_shape, + maybe_coerce_values, + new_block, +) + +# this file contains BlockManager specific tests +# TODO(ArrayManager) factor out interleave_dtype tests +pytestmark = td.skip_array_manager_invalid_test + + +@pytest.fixture(params=[new_block, make_block]) +def block_maker(request): + """ + Fixture to test both the internal new_block and pseudo-public make_block. + """ + return request.param + + +@pytest.fixture +def mgr(): + return create_mgr( + "a: f8; b: object; c: f8; d: object; e: f8;" + "f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;" + "k: M8[ns, US/Eastern]; l: M8[ns, CET];" + ) + + +def assert_block_equal(left, right): + tm.assert_numpy_array_equal(left.values, right.values) + assert left.dtype == right.dtype + assert isinstance(left.mgr_locs, BlockPlacement) + assert isinstance(right.mgr_locs, BlockPlacement) + tm.assert_numpy_array_equal(left.mgr_locs.as_array, right.mgr_locs.as_array) + + +def get_numeric_mat(shape): + arr = np.arange(shape[0]) + return np.lib.stride_tricks.as_strided( + x=arr, shape=shape, strides=(arr.itemsize,) + (0,) * (len(shape) - 1) + ).copy() + + +N = 10 + + +def create_block(typestr, placement, item_shape=None, num_offset=0, maker=new_block): + """ + Supported typestr: + + * float, f8, f4, f2 + * int, i8, i4, i2, i1 + * uint, u8, u4, u2, u1 + * complex, c16, c8 + * bool + * object, string, O + * datetime, dt, M8[ns], M8[ns, tz] + * timedelta, td, m8[ns] + * sparse (SparseArray with fill_value=0.0) + * sparse_na (SparseArray with fill_value=np.nan) + * category, category2 + + """ + placement = BlockPlacement(placement) + num_items = len(placement) + + if item_shape is None: + item_shape = (N,) + + shape = (num_items,) + item_shape + + mat = get_numeric_mat(shape) + + if typestr in ( + "float", + "f8", + "f4", + "f2", + "int", + "i8", + "i4", + "i2", + "i1", + "uint", + "u8", + "u4", + "u2", + "u1", + ): + values = mat.astype(typestr) + num_offset + elif typestr in ("complex", "c16", "c8"): + values = 1.0j * (mat.astype(typestr) + num_offset) + elif typestr in ("object", "string", "O"): + values = np.reshape([f"A{i:d}" for i in mat.ravel() + num_offset], shape) + elif typestr in ("b", "bool"): + values = np.ones(shape, dtype=np.bool_) + elif typestr in ("datetime", "dt", "M8[ns]"): + values = (mat * 1e9).astype("M8[ns]") + elif typestr.startswith("M8[ns"): + # datetime with tz + m = re.search(r"M8\[ns,\s*(\w+\/?\w*)\]", typestr) + assert m is not None, f"incompatible typestr -> {typestr}" + tz = m.groups()[0] + assert num_items == 1, "must have only 1 num items for a tz-aware" + values = DatetimeIndex(np.arange(N) * 10**9, tz=tz)._data + values = ensure_block_shape(values, ndim=len(shape)) + elif typestr in ("timedelta", "td", "m8[ns]"): + values = (mat * 1).astype("m8[ns]") + elif typestr in ("category",): + values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4]) + elif typestr in ("category2",): + values = Categorical(["a", "a", "a", "a", "b", "b", "c", "c", "c", "d"]) + elif typestr in ("sparse", "sparse_na"): + if shape[-1] != 10: + # We also are implicitly assuming this in the category cases above + raise NotImplementedError + + assert all(s == 1 for s in shape[:-1]) + if typestr.endswith("_na"): + fill_value = np.nan + else: + fill_value = 0.0 + values = SparseArray( + [fill_value, fill_value, 1, 2, 3, fill_value, 4, 5, fill_value, 6], + fill_value=fill_value, + ) + arr = values.sp_values.view() + arr += num_offset - 1 + else: + raise ValueError(f'Unsupported typestr: "{typestr}"') + + values = maybe_coerce_values(values) + return maker(values, placement=placement, ndim=len(shape)) + + +def create_single_mgr(typestr, num_rows=None): + if num_rows is None: + num_rows = N + + return SingleBlockManager( + create_block(typestr, placement=slice(0, num_rows), item_shape=()), + Index(np.arange(num_rows)), + ) + + +def create_mgr(descr, item_shape=None): + """ + Construct BlockManager from string description. + + String description syntax looks similar to np.matrix initializer. It looks + like this:: + + a,b,c: f8; d,e,f: i8 + + Rules are rather simple: + + * see list of supported datatypes in `create_block` method + * components are semicolon-separated + * each component is `NAME,NAME,NAME: DTYPE_ID` + * whitespace around colons & semicolons are removed + * components with same DTYPE_ID are combined into single block + * to force multiple blocks with same dtype, use '-SUFFIX':: + + 'a:f8-1; b:f8-2; c:f8-foobar' + + """ + if item_shape is None: + item_shape = (N,) + + offset = 0 + mgr_items = [] + block_placements = {} + for d in descr.split(";"): + d = d.strip() + if not len(d): + continue + names, blockstr = d.partition(":")[::2] + blockstr = blockstr.strip() + names = names.strip().split(",") + + mgr_items.extend(names) + placement = list(np.arange(len(names)) + offset) + try: + block_placements[blockstr].extend(placement) + except KeyError: + block_placements[blockstr] = placement + offset += len(names) + + mgr_items = Index(mgr_items) + + blocks = [] + num_offset = 0 + for blockstr, placement in block_placements.items(): + typestr = blockstr.split("-")[0] + blocks.append( + create_block( + typestr, placement, item_shape=item_shape, num_offset=num_offset + ) + ) + num_offset += len(placement) + + sblocks = sorted(blocks, key=lambda b: b.mgr_locs[0]) + return BlockManager( + tuple(sblocks), + [mgr_items] + [Index(np.arange(n)) for n in item_shape], + ) + + +@pytest.fixture +def fblock(): + return create_block("float", [0, 2, 4]) + + +class TestBlock: + def test_constructor(self): + int32block = create_block("i4", [0]) + assert int32block.dtype == np.int32 + + @pytest.mark.parametrize( + "typ, data", + [ + ["float", [0, 2, 4]], + ["complex", [7]], + ["object", [1, 3]], + ["bool", [5]], + ], + ) + def test_pickle(self, typ, data): + blk = create_block(typ, data) + assert_block_equal(tm.round_trip_pickle(blk), blk) + + def test_mgr_locs(self, fblock): + assert isinstance(fblock.mgr_locs, BlockPlacement) + tm.assert_numpy_array_equal( + fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.intp) + ) + + def test_attrs(self, fblock): + assert fblock.shape == fblock.values.shape + assert fblock.dtype == fblock.values.dtype + assert len(fblock) == len(fblock.values) + + def test_copy(self, fblock): + cop = fblock.copy() + assert cop is not fblock + assert_block_equal(fblock, cop) + + def test_delete(self, fblock): + newb = fblock.copy() + locs = newb.mgr_locs + nb = newb.delete(0)[0] + assert newb.mgr_locs is locs + + assert nb is not newb + + tm.assert_numpy_array_equal( + nb.mgr_locs.as_array, np.array([2, 4], dtype=np.intp) + ) + assert not (newb.values[0] == 1).all() + assert (nb.values[0] == 1).all() + + newb = fblock.copy() + locs = newb.mgr_locs + nb = newb.delete(1) + assert len(nb) == 2 + assert newb.mgr_locs is locs + + tm.assert_numpy_array_equal( + nb[0].mgr_locs.as_array, np.array([0], dtype=np.intp) + ) + tm.assert_numpy_array_equal( + nb[1].mgr_locs.as_array, np.array([4], dtype=np.intp) + ) + assert not (newb.values[1] == 2).all() + assert (nb[1].values[0] == 2).all() + + newb = fblock.copy() + nb = newb.delete(2) + assert len(nb) == 1 + tm.assert_numpy_array_equal( + nb[0].mgr_locs.as_array, np.array([0, 2], dtype=np.intp) + ) + assert (nb[0].values[1] == 1).all() + + newb = fblock.copy() + + with pytest.raises(IndexError, match=None): + newb.delete(3) + + def test_delete_datetimelike(self): + # dont use np.delete on values, as that will coerce from DTA/TDA to ndarray + arr = np.arange(20, dtype="i8").reshape(5, 4).view("m8[ns]") + df = DataFrame(arr) + blk = df._mgr.blocks[0] + assert isinstance(blk.values, TimedeltaArray) + + nb = blk.delete(1) + assert len(nb) == 2 + assert isinstance(nb[0].values, TimedeltaArray) + assert isinstance(nb[1].values, TimedeltaArray) + + df = DataFrame(arr.view("M8[ns]")) + blk = df._mgr.blocks[0] + assert isinstance(blk.values, DatetimeArray) + + nb = blk.delete([1, 3]) + assert len(nb) == 2 + assert isinstance(nb[0].values, DatetimeArray) + assert isinstance(nb[1].values, DatetimeArray) + + def test_split(self): + # GH#37799 + values = np.random.default_rng(2).standard_normal((3, 4)) + blk = new_block(values, placement=BlockPlacement([3, 1, 6]), ndim=2) + result = blk._split() + + # check that we get views, not copies + values[:] = -9999 + assert (blk.values == -9999).all() + + assert len(result) == 3 + expected = [ + new_block(values[[0]], placement=BlockPlacement([3]), ndim=2), + new_block(values[[1]], placement=BlockPlacement([1]), ndim=2), + new_block(values[[2]], placement=BlockPlacement([6]), ndim=2), + ] + for res, exp in zip(result, expected): + assert_block_equal(res, exp) + + +class TestBlockManager: + def test_attrs(self): + mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2") + assert mgr.nblocks == 2 + assert len(mgr) == 6 + + def test_duplicate_ref_loc_failure(self): + tmp_mgr = create_mgr("a:bool; a: f8") + + axes, blocks = tmp_mgr.axes, tmp_mgr.blocks + + blocks[0].mgr_locs = BlockPlacement(np.array([0])) + blocks[1].mgr_locs = BlockPlacement(np.array([0])) + + # test trying to create block manager with overlapping ref locs + + msg = "Gaps in blk ref_locs" + + with pytest.raises(AssertionError, match=msg): + mgr = BlockManager(blocks, axes) + mgr._rebuild_blknos_and_blklocs() + + blocks[0].mgr_locs = BlockPlacement(np.array([0])) + blocks[1].mgr_locs = BlockPlacement(np.array([1])) + mgr = BlockManager(blocks, axes) + mgr.iget(1) + + def test_pickle(self, mgr): + mgr2 = tm.round_trip_pickle(mgr) + tm.assert_frame_equal(DataFrame(mgr), DataFrame(mgr2)) + + # GH2431 + assert hasattr(mgr2, "_is_consolidated") + assert hasattr(mgr2, "_known_consolidated") + + # reset to False on load + assert not mgr2._is_consolidated + assert not mgr2._known_consolidated + + @pytest.mark.parametrize("mgr_string", ["a,a,a:f8", "a: f8; a: i8"]) + def test_non_unique_pickle(self, mgr_string): + mgr = create_mgr(mgr_string) + mgr2 = tm.round_trip_pickle(mgr) + tm.assert_frame_equal(DataFrame(mgr), DataFrame(mgr2)) + + def test_categorical_block_pickle(self): + mgr = create_mgr("a: category") + mgr2 = tm.round_trip_pickle(mgr) + tm.assert_frame_equal(DataFrame(mgr), DataFrame(mgr2)) + + smgr = create_single_mgr("category") + smgr2 = tm.round_trip_pickle(smgr) + tm.assert_series_equal(Series(smgr), Series(smgr2)) + + def test_iget(self): + cols = Index(list("abc")) + values = np.random.default_rng(2).random((3, 3)) + block = new_block( + values=values.copy(), + placement=BlockPlacement(np.arange(3, dtype=np.intp)), + ndim=values.ndim, + ) + mgr = BlockManager(blocks=(block,), axes=[cols, Index(np.arange(3))]) + + tm.assert_almost_equal(mgr.iget(0).internal_values(), values[0]) + tm.assert_almost_equal(mgr.iget(1).internal_values(), values[1]) + tm.assert_almost_equal(mgr.iget(2).internal_values(), values[2]) + + def test_set(self): + mgr = create_mgr("a,b,c: int", item_shape=(3,)) + + mgr.insert(len(mgr.items), "d", np.array(["foo"] * 3)) + mgr.iset(1, np.array(["bar"] * 3)) + tm.assert_numpy_array_equal(mgr.iget(0).internal_values(), np.array([0] * 3)) + tm.assert_numpy_array_equal( + mgr.iget(1).internal_values(), np.array(["bar"] * 3, dtype=np.object_) + ) + tm.assert_numpy_array_equal(mgr.iget(2).internal_values(), np.array([2] * 3)) + tm.assert_numpy_array_equal( + mgr.iget(3).internal_values(), np.array(["foo"] * 3, dtype=np.object_) + ) + + def test_set_change_dtype(self, mgr): + mgr.insert(len(mgr.items), "baz", np.zeros(N, dtype=bool)) + + mgr.iset(mgr.items.get_loc("baz"), np.repeat("foo", N)) + idx = mgr.items.get_loc("baz") + assert mgr.iget(idx).dtype == np.object_ + + mgr2 = mgr.consolidate() + mgr2.iset(mgr2.items.get_loc("baz"), np.repeat("foo", N)) + idx = mgr2.items.get_loc("baz") + assert mgr2.iget(idx).dtype == np.object_ + + mgr2.insert( + len(mgr2.items), + "quux", + np.random.default_rng(2).standard_normal(N).astype(int), + ) + idx = mgr2.items.get_loc("quux") + assert mgr2.iget(idx).dtype == np.dtype(int) + + mgr2.iset( + mgr2.items.get_loc("quux"), np.random.default_rng(2).standard_normal(N) + ) + assert mgr2.iget(idx).dtype == np.float64 + + def test_copy(self, mgr): + cp = mgr.copy(deep=False) + for blk, cp_blk in zip(mgr.blocks, cp.blocks): + # view assertion + tm.assert_equal(cp_blk.values, blk.values) + if isinstance(blk.values, np.ndarray): + assert cp_blk.values.base is blk.values.base + else: + # DatetimeTZBlock has DatetimeIndex values + assert cp_blk.values._ndarray.base is blk.values._ndarray.base + + # copy(deep=True) consolidates, so the block-wise assertions will + # fail is mgr is not consolidated + mgr._consolidate_inplace() + cp = mgr.copy(deep=True) + for blk, cp_blk in zip(mgr.blocks, cp.blocks): + bvals = blk.values + cpvals = cp_blk.values + + tm.assert_equal(cpvals, bvals) + + if isinstance(cpvals, np.ndarray): + lbase = cpvals.base + rbase = bvals.base + else: + lbase = cpvals._ndarray.base + rbase = bvals._ndarray.base + + # copy assertion we either have a None for a base or in case of + # some blocks it is an array (e.g. datetimetz), but was copied + if isinstance(cpvals, DatetimeArray): + assert (lbase is None and rbase is None) or (lbase is not rbase) + elif not isinstance(cpvals, np.ndarray): + assert lbase is not rbase + else: + assert lbase is None and rbase is None + + def test_sparse(self): + mgr = create_mgr("a: sparse-1; b: sparse-2") + assert mgr.as_array().dtype == np.float64 + + def test_sparse_mixed(self): + mgr = create_mgr("a: sparse-1; b: sparse-2; c: f8") + assert len(mgr.blocks) == 3 + assert isinstance(mgr, BlockManager) + + @pytest.mark.parametrize( + "mgr_string, dtype", + [("c: f4; d: f2", np.float32), ("c: f4; d: f2; e: f8", np.float64)], + ) + def test_as_array_float(self, mgr_string, dtype): + mgr = create_mgr(mgr_string) + assert mgr.as_array().dtype == dtype + + @pytest.mark.parametrize( + "mgr_string, dtype", + [ + ("a: bool-1; b: bool-2", np.bool_), + ("a: i8-1; b: i8-2; c: i4; d: i2; e: u1", np.int64), + ("c: i4; d: i2; e: u1", np.int32), + ], + ) + def test_as_array_int_bool(self, mgr_string, dtype): + mgr = create_mgr(mgr_string) + assert mgr.as_array().dtype == dtype + + def test_as_array_datetime(self): + mgr = create_mgr("h: datetime-1; g: datetime-2") + assert mgr.as_array().dtype == "M8[ns]" + + def test_as_array_datetime_tz(self): + mgr = create_mgr("h: M8[ns, US/Eastern]; g: M8[ns, CET]") + assert mgr.iget(0).dtype == "datetime64[ns, US/Eastern]" + assert mgr.iget(1).dtype == "datetime64[ns, CET]" + assert mgr.as_array().dtype == "object" + + @pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"]) + def test_astype(self, t): + # coerce all + mgr = create_mgr("c: f4; d: f2; e: f8") + + t = np.dtype(t) + tmgr = mgr.astype(t) + assert tmgr.iget(0).dtype.type == t + assert tmgr.iget(1).dtype.type == t + assert tmgr.iget(2).dtype.type == t + + # mixed + mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8") + + t = np.dtype(t) + tmgr = mgr.astype(t, errors="ignore") + assert tmgr.iget(2).dtype.type == t + assert tmgr.iget(4).dtype.type == t + assert tmgr.iget(5).dtype.type == t + assert tmgr.iget(6).dtype.type == t + + assert tmgr.iget(0).dtype.type == np.object_ + assert tmgr.iget(1).dtype.type == np.object_ + if t != np.int64: + assert tmgr.iget(3).dtype.type == np.datetime64 + else: + assert tmgr.iget(3).dtype.type == t + + def test_convert(self): + def _compare(old_mgr, new_mgr): + """compare the blocks, numeric compare ==, object don't""" + old_blocks = set(old_mgr.blocks) + new_blocks = set(new_mgr.blocks) + assert len(old_blocks) == len(new_blocks) + + # compare non-numeric + for b in old_blocks: + found = False + for nb in new_blocks: + if (b.values == nb.values).all(): + found = True + break + assert found + + for b in new_blocks: + found = False + for ob in old_blocks: + if (b.values == ob.values).all(): + found = True + break + assert found + + # noops + mgr = create_mgr("f: i8; g: f8") + new_mgr = mgr.convert(copy=True) + _compare(mgr, new_mgr) + + # convert + mgr = create_mgr("a,b,foo: object; f: i8; g: f8") + mgr.iset(0, np.array(["1"] * N, dtype=np.object_)) + mgr.iset(1, np.array(["2."] * N, dtype=np.object_)) + mgr.iset(2, np.array(["foo."] * N, dtype=np.object_)) + new_mgr = mgr.convert(copy=True) + assert new_mgr.iget(0).dtype == np.object_ + assert new_mgr.iget(1).dtype == np.object_ + assert new_mgr.iget(2).dtype == np.object_ + assert new_mgr.iget(3).dtype == np.int64 + assert new_mgr.iget(4).dtype == np.float64 + + mgr = create_mgr( + "a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2" + ) + mgr.iset(0, np.array(["1"] * N, dtype=np.object_)) + mgr.iset(1, np.array(["2."] * N, dtype=np.object_)) + mgr.iset(2, np.array(["foo."] * N, dtype=np.object_)) + new_mgr = mgr.convert(copy=True) + assert new_mgr.iget(0).dtype == np.object_ + assert new_mgr.iget(1).dtype == np.object_ + assert new_mgr.iget(2).dtype == np.object_ + assert new_mgr.iget(3).dtype == np.int32 + assert new_mgr.iget(4).dtype == np.bool_ + assert new_mgr.iget(5).dtype.type, np.datetime64 + assert new_mgr.iget(6).dtype == np.int64 + assert new_mgr.iget(7).dtype == np.float64 + assert new_mgr.iget(8).dtype == np.float16 + + def test_interleave(self): + # self + for dtype in ["f8", "i8", "object", "bool", "complex", "M8[ns]", "m8[ns]"]: + mgr = create_mgr(f"a: {dtype}") + assert mgr.as_array().dtype == dtype + mgr = create_mgr(f"a: {dtype}; b: {dtype}") + assert mgr.as_array().dtype == dtype + + @pytest.mark.parametrize( + "mgr_string, dtype", + [ + ("a: category", "i8"), + ("a: category; b: category", "i8"), + ("a: category; b: category2", "object"), + ("a: category2", "object"), + ("a: category2; b: category2", "object"), + ("a: f8", "f8"), + ("a: f8; b: i8", "f8"), + ("a: f4; b: i8", "f8"), + ("a: f4; b: i8; d: object", "object"), + ("a: bool; b: i8", "object"), + ("a: complex", "complex"), + ("a: f8; b: category", "object"), + ("a: M8[ns]; b: category", "object"), + ("a: M8[ns]; b: bool", "object"), + ("a: M8[ns]; b: i8", "object"), + ("a: m8[ns]; b: bool", "object"), + ("a: m8[ns]; b: i8", "object"), + ("a: M8[ns]; b: m8[ns]", "object"), + ], + ) + def test_interleave_dtype(self, mgr_string, dtype): + # will be converted according the actual dtype of the underlying + mgr = create_mgr("a: category") + assert mgr.as_array().dtype == "i8" + mgr = create_mgr("a: category; b: category2") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: category2") + assert mgr.as_array().dtype == "object" + + # combinations + mgr = create_mgr("a: f8") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: f8; b: i8") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: f4; b: i8") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: f4; b: i8; d: object") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: bool; b: i8") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: complex") + assert mgr.as_array().dtype == "complex" + mgr = create_mgr("a: f8; b: category") + assert mgr.as_array().dtype == "f8" + mgr = create_mgr("a: M8[ns]; b: category") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: M8[ns]; b: bool") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: M8[ns]; b: i8") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: m8[ns]; b: bool") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: m8[ns]; b: i8") + assert mgr.as_array().dtype == "object" + mgr = create_mgr("a: M8[ns]; b: m8[ns]") + assert mgr.as_array().dtype == "object" + + def test_consolidate_ordering_issues(self, mgr): + mgr.iset(mgr.items.get_loc("f"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("d"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("b"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("g"), np.random.default_rng(2).standard_normal(N)) + mgr.iset(mgr.items.get_loc("h"), np.random.default_rng(2).standard_normal(N)) + + # we have datetime/tz blocks in mgr + cons = mgr.consolidate() + assert cons.nblocks == 4 + cons = mgr.consolidate().get_numeric_data() + assert cons.nblocks == 1 + assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement) + tm.assert_numpy_array_equal( + cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.intp) + ) + + def test_reindex_items(self): + # mgr is not consolidated, f8 & f8-2 blocks + mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2") + + reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0) + # reindex_axis does not consolidate_inplace, as that risks failing to + # invalidate _item_cache + assert not reindexed.is_consolidated() + + tm.assert_index_equal(reindexed.items, Index(["g", "c", "a", "d"])) + tm.assert_almost_equal( + mgr.iget(6).internal_values(), reindexed.iget(0).internal_values() + ) + tm.assert_almost_equal( + mgr.iget(2).internal_values(), reindexed.iget(1).internal_values() + ) + tm.assert_almost_equal( + mgr.iget(0).internal_values(), reindexed.iget(2).internal_values() + ) + tm.assert_almost_equal( + mgr.iget(3).internal_values(), reindexed.iget(3).internal_values() + ) + + def test_get_numeric_data(self, using_copy_on_write): + mgr = create_mgr( + "int: int; float: float; complex: complex;" + "str: object; bool: bool; obj: object; dt: datetime", + item_shape=(3,), + ) + mgr.iset(5, np.array([1, 2, 3], dtype=np.object_)) + + numeric = mgr.get_numeric_data() + tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"])) + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + numeric.iget(numeric.items.get_loc("float")).internal_values(), + ) + + # Check sharing + numeric.iset( + numeric.items.get_loc("float"), + np.array([100.0, 200.0, 300.0]), + inplace=True, + ) + if using_copy_on_write: + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + np.array([1.0, 1.0, 1.0]), + ) + else: + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + np.array([100.0, 200.0, 300.0]), + ) + + numeric2 = mgr.get_numeric_data(copy=True) + tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"])) + numeric2.iset( + numeric2.items.get_loc("float"), + np.array([1000.0, 2000.0, 3000.0]), + inplace=True, + ) + if using_copy_on_write: + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + np.array([1.0, 1.0, 1.0]), + ) + else: + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("float")).internal_values(), + np.array([100.0, 200.0, 300.0]), + ) + + def test_get_bool_data(self, using_copy_on_write): + mgr = create_mgr( + "int: int; float: float; complex: complex;" + "str: object; bool: bool; obj: object; dt: datetime", + item_shape=(3,), + ) + mgr.iset(6, np.array([True, False, True], dtype=np.object_)) + + bools = mgr.get_bool_data() + tm.assert_index_equal(bools.items, Index(["bool"])) + tm.assert_almost_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + bools.iget(bools.items.get_loc("bool")).internal_values(), + ) + + bools.iset(0, np.array([True, False, True]), inplace=True) + if using_copy_on_write: + tm.assert_numpy_array_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + np.array([True, True, True]), + ) + else: + tm.assert_numpy_array_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + np.array([True, False, True]), + ) + + # Check sharing + bools2 = mgr.get_bool_data(copy=True) + bools2.iset(0, np.array([False, True, False])) + if using_copy_on_write: + tm.assert_numpy_array_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + np.array([True, True, True]), + ) + else: + tm.assert_numpy_array_equal( + mgr.iget(mgr.items.get_loc("bool")).internal_values(), + np.array([True, False, True]), + ) + + def test_unicode_repr_doesnt_raise(self): + repr(create_mgr("b,\u05d0: object")) + + @pytest.mark.parametrize( + "mgr_string", ["a,b,c: i8-1; d,e,f: i8-2", "a,a,a: i8-1; b,b,b: i8-2"] + ) + def test_equals(self, mgr_string): + # unique items + bm1 = create_mgr(mgr_string) + bm2 = BlockManager(bm1.blocks[::-1], bm1.axes) + assert bm1.equals(bm2) + + @pytest.mark.parametrize( + "mgr_string", + [ + "a:i8;b:f8", # basic case + "a:i8;b:f8;c:c8;d:b", # many types + "a:i8;e:dt;f:td;g:string", # more types + "a:i8;b:category;c:category2", # categories + "c:sparse;d:sparse_na;b:f8", # sparse + ], + ) + def test_equals_block_order_different_dtypes(self, mgr_string): + # GH 9330 + bm = create_mgr(mgr_string) + block_perms = itertools.permutations(bm.blocks) + for bm_perm in block_perms: + bm_this = BlockManager(tuple(bm_perm), bm.axes) + assert bm.equals(bm_this) + assert bm_this.equals(bm) + + def test_single_mgr_ctor(self): + mgr = create_single_mgr("f8", num_rows=5) + assert mgr.external_values().tolist() == [0.0, 1.0, 2.0, 3.0, 4.0] + + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2") + + msg = ( + 'For argument "inplace" expected type bool, ' + f"received type {type(value).__name__}." + ) + with pytest.raises(ValueError, match=msg): + bm1.replace_list([1], [2], inplace=value) + + def test_iset_split_block(self): + bm = create_mgr("a,b,c: i8; d: f8") + bm._iset_split_block(0, np.array([0])) + tm.assert_numpy_array_equal( + bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32") + ) + # First indexer currently does not have a block associated with it in case + tm.assert_numpy_array_equal( + bm.blknos, np.array([0, 0, 0, 1], dtype="int64" if IS64 else "int32") + ) + assert len(bm.blocks) == 2 + + def test_iset_split_block_values(self): + bm = create_mgr("a,b,c: i8; d: f8") + bm._iset_split_block(0, np.array([0]), np.array([list(range(10))])) + tm.assert_numpy_array_equal( + bm.blklocs, np.array([0, 0, 1, 0], dtype="int64" if IS64 else "int32") + ) + # First indexer currently does not have a block associated with it in case + tm.assert_numpy_array_equal( + bm.blknos, np.array([0, 2, 2, 1], dtype="int64" if IS64 else "int32") + ) + assert len(bm.blocks) == 3 + + +def _as_array(mgr): + if mgr.ndim == 1: + return mgr.external_values() + return mgr.as_array().T + + +class TestIndexing: + # Nosetests-style data-driven tests. + # + # This test applies different indexing routines to block managers and + # compares the outcome to the result of same operations on np.ndarray. + # + # NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests + # and are disabled. + + MANAGERS = [ + create_single_mgr("f8", N), + create_single_mgr("i8", N), + # 2-dim + create_mgr("a,b,c,d,e,f: f8", item_shape=(N,)), + create_mgr("a,b,c,d,e,f: i8", item_shape=(N,)), + create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N,)), + create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N,)), + ] + + @pytest.mark.parametrize("mgr", MANAGERS) + def test_get_slice(self, mgr): + def assert_slice_ok(mgr, axis, slobj): + mat = _as_array(mgr) + + # we maybe using an ndarray to test slicing and + # might not be the full length of the axis + if isinstance(slobj, np.ndarray): + ax = mgr.axes[axis] + if len(ax) and len(slobj) and len(slobj) != len(ax): + slobj = np.concatenate( + [slobj, np.zeros(len(ax) - len(slobj), dtype=bool)] + ) + + if isinstance(slobj, slice): + sliced = mgr.get_slice(slobj, axis=axis) + elif ( + mgr.ndim == 1 + and axis == 0 + and isinstance(slobj, np.ndarray) + and slobj.dtype == bool + ): + sliced = mgr.get_rows_with_mask(slobj) + else: + # BlockManager doesn't support non-slice, SingleBlockManager + # doesn't support axis > 0 + raise TypeError(slobj) + + mat_slobj = (slice(None),) * axis + (slobj,) + tm.assert_numpy_array_equal( + mat[mat_slobj], _as_array(sliced), check_dtype=False + ) + tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis]) + + assert mgr.ndim <= 2, mgr.ndim + for ax in range(mgr.ndim): + # slice + assert_slice_ok(mgr, ax, slice(None)) + assert_slice_ok(mgr, ax, slice(3)) + assert_slice_ok(mgr, ax, slice(100)) + assert_slice_ok(mgr, ax, slice(1, 4)) + assert_slice_ok(mgr, ax, slice(3, 0, -2)) + + if mgr.ndim < 2: + # 2D only support slice objects + + # boolean mask + assert_slice_ok(mgr, ax, np.array([], dtype=np.bool_)) + assert_slice_ok(mgr, ax, np.ones(mgr.shape[ax], dtype=np.bool_)) + assert_slice_ok(mgr, ax, np.zeros(mgr.shape[ax], dtype=np.bool_)) + + if mgr.shape[ax] >= 3: + assert_slice_ok(mgr, ax, np.arange(mgr.shape[ax]) % 3 == 0) + assert_slice_ok( + mgr, ax, np.array([True, True, False], dtype=np.bool_) + ) + + @pytest.mark.parametrize("mgr", MANAGERS) + def test_take(self, mgr): + def assert_take_ok(mgr, axis, indexer): + mat = _as_array(mgr) + taken = mgr.take(indexer, axis) + tm.assert_numpy_array_equal( + np.take(mat, indexer, axis), _as_array(taken), check_dtype=False + ) + tm.assert_index_equal(mgr.axes[axis].take(indexer), taken.axes[axis]) + + for ax in range(mgr.ndim): + # take/fancy indexer + assert_take_ok(mgr, ax, indexer=np.array([], dtype=np.intp)) + assert_take_ok(mgr, ax, indexer=np.array([0, 0, 0], dtype=np.intp)) + assert_take_ok( + mgr, ax, indexer=np.array(list(range(mgr.shape[ax])), dtype=np.intp) + ) + + if mgr.shape[ax] >= 3: + assert_take_ok(mgr, ax, indexer=np.array([0, 1, 2], dtype=np.intp)) + assert_take_ok(mgr, ax, indexer=np.array([-1, -2, -3], dtype=np.intp)) + + @pytest.mark.parametrize("mgr", MANAGERS) + @pytest.mark.parametrize("fill_value", [None, np.nan, 100.0]) + def test_reindex_axis(self, fill_value, mgr): + def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value): + mat = _as_array(mgr) + indexer = mgr.axes[axis].get_indexer_for(new_labels) + + reindexed = mgr.reindex_axis(new_labels, axis, fill_value=fill_value) + tm.assert_numpy_array_equal( + algos.take_nd(mat, indexer, axis, fill_value=fill_value), + _as_array(reindexed), + check_dtype=False, + ) + tm.assert_index_equal(reindexed.axes[axis], new_labels) + + for ax in range(mgr.ndim): + assert_reindex_axis_is_ok(mgr, ax, Index([]), fill_value) + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax], fill_value) + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][[0, 0, 0]], fill_value) + assert_reindex_axis_is_ok(mgr, ax, Index(["foo", "bar", "baz"]), fill_value) + assert_reindex_axis_is_ok( + mgr, ax, Index(["foo", mgr.axes[ax][0], "baz"]), fill_value + ) + + if mgr.shape[ax] >= 3: + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][:-3], fill_value) + assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][-3::-1], fill_value) + assert_reindex_axis_is_ok( + mgr, ax, mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value + ) + + @pytest.mark.parametrize("mgr", MANAGERS) + @pytest.mark.parametrize("fill_value", [None, np.nan, 100.0]) + def test_reindex_indexer(self, fill_value, mgr): + def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value): + mat = _as_array(mgr) + reindexed_mat = algos.take_nd(mat, indexer, axis, fill_value=fill_value) + reindexed = mgr.reindex_indexer( + new_labels, indexer, axis, fill_value=fill_value + ) + tm.assert_numpy_array_equal( + reindexed_mat, _as_array(reindexed), check_dtype=False + ) + tm.assert_index_equal(reindexed.axes[axis], new_labels) + + for ax in range(mgr.ndim): + assert_reindex_indexer_is_ok( + mgr, ax, Index([]), np.array([], dtype=np.intp), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, + ax, + Index(["foo"] * mgr.shape[ax]), + np.arange(mgr.shape[ax]), + fill_value, + ) + assert_reindex_indexer_is_ok( + mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, Index(["foo", "bar", "baz"]), np.array([0, 0, 0]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, ax, Index(["foo", "bar", "baz"]), np.array([-1, 0, -1]), fill_value + ) + assert_reindex_indexer_is_ok( + mgr, + ax, + Index(["foo", mgr.axes[ax][0], "baz"]), + np.array([-1, -1, -1]), + fill_value, + ) + + if mgr.shape[ax] >= 3: + assert_reindex_indexer_is_ok( + mgr, + ax, + Index(["foo", "bar", "baz"]), + np.array([0, 1, 2]), + fill_value, + ) + + +class TestBlockPlacement: + @pytest.mark.parametrize( + "slc, expected", + [ + (slice(0, 4), 4), + (slice(0, 4, 2), 2), + (slice(0, 3, 2), 2), + (slice(0, 1, 2), 1), + (slice(1, 0, -1), 1), + ], + ) + def test_slice_len(self, slc, expected): + assert len(BlockPlacement(slc)) == expected + + @pytest.mark.parametrize("slc", [slice(1, 1, 0), slice(1, 2, 0)]) + def test_zero_step_raises(self, slc): + msg = "slice step cannot be zero" + with pytest.raises(ValueError, match=msg): + BlockPlacement(slc) + + def test_slice_canonize_negative_stop(self): + # GH#37524 negative stop is OK with negative step and positive start + slc = slice(3, -1, -2) + + bp = BlockPlacement(slc) + assert bp.indexer == slice(3, None, -2) + + @pytest.mark.parametrize( + "slc", + [ + slice(None, None), + slice(10, None), + slice(None, None, -1), + slice(None, 10, -1), + # These are "unbounded" because negative index will + # change depending on container shape. + slice(-1, None), + slice(None, -1), + slice(-1, -1), + slice(-1, None, -1), + slice(None, -1, -1), + slice(-1, -1, -1), + ], + ) + def test_unbounded_slice_raises(self, slc): + msg = "unbounded slice" + with pytest.raises(ValueError, match=msg): + BlockPlacement(slc) + + @pytest.mark.parametrize( + "slc", + [ + slice(0, 0), + slice(100, 0), + slice(100, 100), + slice(100, 100, -1), + slice(0, 100, -1), + ], + ) + def test_not_slice_like_slices(self, slc): + assert not BlockPlacement(slc).is_slice_like + + @pytest.mark.parametrize( + "arr, slc", + [ + ([0], slice(0, 1, 1)), + ([100], slice(100, 101, 1)), + ([0, 1, 2], slice(0, 3, 1)), + ([0, 5, 10], slice(0, 15, 5)), + ([0, 100], slice(0, 200, 100)), + ([2, 1], slice(2, 0, -1)), + ], + ) + def test_array_to_slice_conversion(self, arr, slc): + assert BlockPlacement(arr).as_slice == slc + + @pytest.mark.parametrize( + "arr", + [ + [], + [-1], + [-1, -2, -3], + [-10], + [-1], + [-1, 0, 1, 2], + [-2, 0, 2, 4], + [1, 0, -1], + [1, 1, 1], + ], + ) + def test_not_slice_like_arrays(self, arr): + assert not BlockPlacement(arr).is_slice_like + + @pytest.mark.parametrize( + "slc, expected", + [(slice(0, 3), [0, 1, 2]), (slice(0, 0), []), (slice(3, 0), [])], + ) + def test_slice_iter(self, slc, expected): + assert list(BlockPlacement(slc)) == expected + + @pytest.mark.parametrize( + "slc, arr", + [ + (slice(0, 3), [0, 1, 2]), + (slice(0, 0), []), + (slice(3, 0), []), + (slice(3, 0, -1), [3, 2, 1]), + ], + ) + def test_slice_to_array_conversion(self, slc, arr): + tm.assert_numpy_array_equal( + BlockPlacement(slc).as_array, np.asarray(arr, dtype=np.intp) + ) + + def test_blockplacement_add(self): + bpl = BlockPlacement(slice(0, 5)) + assert bpl.add(1).as_slice == slice(1, 6, 1) + assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2) + assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5] + + @pytest.mark.parametrize( + "val, inc, expected", + [ + (slice(0, 0), 0, []), + (slice(1, 4), 0, [1, 2, 3]), + (slice(3, 0, -1), 0, [3, 2, 1]), + ([1, 2, 4], 0, [1, 2, 4]), + (slice(0, 0), 10, []), + (slice(1, 4), 10, [11, 12, 13]), + (slice(3, 0, -1), 10, [13, 12, 11]), + ([1, 2, 4], 10, [11, 12, 14]), + (slice(0, 0), -1, []), + (slice(1, 4), -1, [0, 1, 2]), + ([1, 2, 4], -1, [0, 1, 3]), + ], + ) + def test_blockplacement_add_int(self, val, inc, expected): + assert list(BlockPlacement(val).add(inc)) == expected + + @pytest.mark.parametrize("val", [slice(1, 4), [1, 2, 4]]) + def test_blockplacement_add_int_raises(self, val): + msg = "iadd causes length change" + with pytest.raises(ValueError, match=msg): + BlockPlacement(val).add(-10) + + +class TestCanHoldElement: + @pytest.fixture( + params=[ + lambda x: x, + lambda x: x.to_series(), + lambda x: x._data, + lambda x: list(x), + lambda x: x.astype(object), + lambda x: np.asarray(x), + lambda x: x[0], + lambda x: x[:0], + ] + ) + def element(self, request): + """ + Functions that take an Index and return an element that should have + blk._can_hold_element(element) for a Block with this index's dtype. + """ + return request.param + + def test_datetime_block_can_hold_element(self): + block = create_block("datetime", [0]) + + assert block._can_hold_element([]) + + # We will check that block._can_hold_element iff arr.__setitem__ works + arr = pd.array(block.values.ravel()) + + # coerce None + assert block._can_hold_element(None) + arr[0] = None + assert arr[0] is pd.NaT + + # coerce different types of datetime objects + vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)] + for val in vals: + assert block._can_hold_element(val) + arr[0] = val + + val = date(2010, 10, 10) + assert not block._can_hold_element(val) + + msg = ( + "value should be a 'Timestamp', 'NaT', " + "or array of those. Got 'date' instead." + ) + with pytest.raises(TypeError, match=msg): + arr[0] = val + + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_interval_can_hold_element_emptylist(self, dtype, element): + arr = np.array([1, 3, 4], dtype=dtype) + ii = IntervalIndex.from_breaks(arr) + blk = new_block(ii._data, BlockPlacement([1]), ndim=2) + + assert blk._can_hold_element([]) + # TODO: check this holds for all blocks + + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_interval_can_hold_element(self, dtype, element): + arr = np.array([1, 3, 4, 9], dtype=dtype) + ii = IntervalIndex.from_breaks(arr) + blk = new_block(ii._data, BlockPlacement([1]), ndim=2) + + elem = element(ii) + self.check_series_setitem(elem, ii, True) + assert blk._can_hold_element(elem) + + # Careful: to get the expected Series-inplace behavior we need + # `elem` to not have the same length as `arr` + ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither") + elem = element(ii2) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, ii, False) + assert not blk._can_hold_element(elem) + + ii3 = IntervalIndex.from_breaks([Timestamp(1), Timestamp(3), Timestamp(4)]) + elem = element(ii3) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, ii, False) + assert not blk._can_hold_element(elem) + + ii4 = IntervalIndex.from_breaks([Timedelta(1), Timedelta(3), Timedelta(4)]) + elem = element(ii4) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, ii, False) + assert not blk._can_hold_element(elem) + + def test_period_can_hold_element_emptylist(self): + pi = period_range("2016", periods=3, freq="A") + blk = new_block(pi._data.reshape(1, 3), BlockPlacement([1]), ndim=2) + + assert blk._can_hold_element([]) + + def test_period_can_hold_element(self, element): + pi = period_range("2016", periods=3, freq="A") + + elem = element(pi) + self.check_series_setitem(elem, pi, True) + + # Careful: to get the expected Series-inplace behavior we need + # `elem` to not have the same length as `arr` + pi2 = pi.asfreq("D")[:-1] + elem = element(pi2) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, pi, False) + + dti = pi.to_timestamp("S")[:-1] + elem = element(dti) + with tm.assert_produces_warning(FutureWarning): + self.check_series_setitem(elem, pi, False) + + def check_can_hold_element(self, obj, elem, inplace: bool): + blk = obj._mgr.blocks[0] + if inplace: + assert blk._can_hold_element(elem) + else: + assert not blk._can_hold_element(elem) + + def check_series_setitem(self, elem, index: Index, inplace: bool): + arr = index._data.copy() + ser = Series(arr, copy=False) + + self.check_can_hold_element(ser, elem, inplace) + + if is_scalar(elem): + ser[0] = elem + else: + ser[: len(elem)] = elem + + if inplace: + assert ser.array is arr # i.e. setting was done inplace + else: + assert ser.dtype == object + + +class TestShouldStore: + def test_should_store_categorical(self): + cat = Categorical(["A", "B", "C"]) + df = DataFrame(cat) + blk = df._mgr.blocks[0] + + # matching dtype + assert blk.should_store(cat) + assert blk.should_store(cat[:-1]) + + # different dtype + assert not blk.should_store(cat.as_ordered()) + + # ndarray instead of Categorical + assert not blk.should_store(np.asarray(cat)) + + +def test_validate_ndim(): + values = np.array([1.0, 2.0]) + placement = BlockPlacement(slice(2)) + msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]" + + with pytest.raises(ValueError, match=msg): + make_block(values, placement, ndim=2) + + +def test_block_shape(): + idx = Index([0, 1, 2, 3, 4]) + a = Series([1, 2, 3]).reindex(idx) + b = Series(Categorical([1, 2, 3])).reindex(idx) + + assert a._mgr.blocks[0].mgr_locs.indexer == b._mgr.blocks[0].mgr_locs.indexer + + +def test_make_block_no_pandas_array(block_maker): + # https://github.com/pandas-dev/pandas/pull/24866 + arr = pd.arrays.NumpyExtensionArray(np.array([1, 2])) + + # NumpyExtensionArray, no dtype + result = block_maker(arr, BlockPlacement(slice(len(arr))), ndim=arr.ndim) + assert result.dtype.kind in ["i", "u"] + + if block_maker is make_block: + # new_block requires caller to unwrap NumpyExtensionArray + assert result.is_extension is False + + # NumpyExtensionArray, NumpyEADtype + result = block_maker(arr, slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim) + assert result.dtype.kind in ["i", "u"] + assert result.is_extension is False + + # new_block no longer taked dtype keyword + # ndarray, NumpyEADtype + result = block_maker( + arr.to_numpy(), slice(len(arr)), dtype=arr.dtype, ndim=arr.ndim + ) + assert result.dtype.kind in ["i", "u"] + assert result.is_extension is False diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_managers.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_managers.py new file mode 100644 index 00000000..75aa901f --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/internals/test_managers.py @@ -0,0 +1,70 @@ +""" +Testing interaction between the different managers (BlockManager, ArrayManager) +""" +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd +import pandas._testing as tm +from pandas.core.internals import ( + ArrayManager, + BlockManager, + SingleArrayManager, + SingleBlockManager, +) + + +def test_dataframe_creation(): + with pd.option_context("mode.data_manager", "block"): + df_block = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}) + assert isinstance(df_block._mgr, BlockManager) + + with pd.option_context("mode.data_manager", "array"): + df_array = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}) + assert isinstance(df_array._mgr, ArrayManager) + + # also ensure both are seen as equal + tm.assert_frame_equal(df_block, df_array) + + # conversion from one manager to the other + result = df_block._as_manager("block") + assert isinstance(result._mgr, BlockManager) + result = df_block._as_manager("array") + assert isinstance(result._mgr, ArrayManager) + tm.assert_frame_equal(result, df_block) + assert all( + array_equivalent(left, right) + for left, right in zip(result._mgr.arrays, df_array._mgr.arrays) + ) + + result = df_array._as_manager("array") + assert isinstance(result._mgr, ArrayManager) + result = df_array._as_manager("block") + assert isinstance(result._mgr, BlockManager) + tm.assert_frame_equal(result, df_array) + assert len(result._mgr.blocks) == 2 + + +def test_series_creation(): + with pd.option_context("mode.data_manager", "block"): + s_block = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"]) + assert isinstance(s_block._mgr, SingleBlockManager) + + with pd.option_context("mode.data_manager", "array"): + s_array = pd.Series([1, 2, 3], name="A", index=["a", "b", "c"]) + assert isinstance(s_array._mgr, SingleArrayManager) + + # also ensure both are seen as equal + tm.assert_series_equal(s_block, s_array) + + # conversion from one manager to the other + result = s_block._as_manager("block") + assert isinstance(result._mgr, SingleBlockManager) + result = s_block._as_manager("array") + assert isinstance(result._mgr, SingleArrayManager) + tm.assert_series_equal(result, s_block) + + result = s_array._as_manager("array") + assert isinstance(result._mgr, SingleArrayManager) + result = s_array._as_manager("block") + assert isinstance(result._mgr, SingleBlockManager) + tm.assert_series_equal(result, s_array) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/conftest.py new file mode 100644 index 00000000..701bfe37 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/conftest.py @@ -0,0 +1,252 @@ +import shlex +import subprocess +import time +import uuid + +import pytest + +from pandas.compat import ( + is_ci_environment, + is_platform_arm, + is_platform_mac, + is_platform_windows, +) +import pandas.util._test_decorators as td + +import pandas.io.common as icom +from pandas.io.parsers import read_csv + + +@pytest.fixture +def compression_to_extension(): + return {value: key for key, value in icom.extension_to_compression.items()} + + +@pytest.fixture +def tips_file(datapath): + """Path to the tips dataset""" + return datapath("io", "data", "csv", "tips.csv") + + +@pytest.fixture +def jsonl_file(datapath): + """Path to a JSONL dataset""" + return datapath("io", "parser", "data", "items.jsonl") + + +@pytest.fixture +def salaries_table(datapath): + """DataFrame with the salaries dataset""" + return read_csv(datapath("io", "parser", "data", "salaries.csv"), sep="\t") + + +@pytest.fixture +def feather_file(datapath): + return datapath("io", "data", "feather", "feather-0_3_1.feather") + + +@pytest.fixture +def xml_file(datapath): + return datapath("io", "data", "xml", "books.xml") + + +@pytest.fixture +def s3so(worker_id): + if is_ci_environment(): + url = "http://localhost:5000/" + else: + worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw") + url = f"http://127.0.0.1:555{worker_id}/" + return {"client_kwargs": {"endpoint_url": url}} + + +@pytest.fixture(scope="function" if is_ci_environment() else "session") +def monkeysession(): + with pytest.MonkeyPatch.context() as mp: + yield mp + + +@pytest.fixture(scope="function" if is_ci_environment() else "session") +def s3_base(worker_id, monkeysession): + """ + Fixture for mocking S3 interaction. + + Sets up moto server in separate process locally + Return url for motoserver/moto CI service + """ + pytest.importorskip("s3fs") + pytest.importorskip("boto3") + + # temporary workaround as moto fails for botocore >= 1.11 otherwise, + # see https://github.com/spulec/moto/issues/1924 & 1952 + monkeysession.setenv("AWS_ACCESS_KEY_ID", "foobar_key") + monkeysession.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret") + if is_ci_environment(): + if is_platform_arm() or is_platform_mac() or is_platform_windows(): + # NOT RUN on Windows/macOS/ARM, only Ubuntu + # - subprocess in CI can cause timeouts + # - GitHub Actions do not support + # container services for the above OSs + # - CircleCI will probably hit the Docker rate pull limit + pytest.skip( + "S3 tests do not have a corresponding service in " + "Windows, macOS or ARM platforms" + ) + else: + yield "http://localhost:5000" + else: + requests = pytest.importorskip("requests") + pytest.importorskip("moto", minversion="1.3.14") + pytest.importorskip("flask") # server mode needs flask too + + # Launching moto in server mode, i.e., as a separate process + # with an S3 endpoint on localhost + + worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw") + endpoint_port = f"555{worker_id}" + endpoint_uri = f"http://127.0.0.1:{endpoint_port}/" + + # pipe to null to avoid logging in terminal + with subprocess.Popen( + shlex.split(f"moto_server s3 -p {endpoint_port}"), + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) as proc: + timeout = 5 + while timeout > 0: + try: + # OK to go once server is accepting connections + r = requests.get(endpoint_uri) + if r.ok: + break + except Exception: + pass + timeout -= 0.1 + time.sleep(0.1) + yield endpoint_uri + + proc.terminate() + + +@pytest.fixture +def s3_resource(s3_base): + import boto3 + + s3 = boto3.resource("s3", endpoint_url=s3_base) + return s3 + + +@pytest.fixture +def s3_public_bucket(s3_resource): + bucket = s3_resource.Bucket(f"pandas-test-{uuid.uuid4()}") + bucket.create() + yield bucket + bucket.objects.delete() + bucket.delete() + + +@pytest.fixture +def s3_public_bucket_with_data( + s3_public_bucket, tips_file, jsonl_file, feather_file, xml_file +): + """ + The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + """ + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ("books.xml", xml_file), + ] + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + s3_public_bucket.put_object(Key=s3_key, Body=f) + return s3_public_bucket + + +@pytest.fixture +def s3_private_bucket(s3_resource): + bucket = s3_resource.Bucket(f"cant_get_it-{uuid.uuid4()}") + bucket.create(ACL="private") + yield bucket + bucket.objects.delete() + bucket.delete() + + +@pytest.fixture +def s3_private_bucket_with_data( + s3_private_bucket, tips_file, jsonl_file, feather_file, xml_file +): + """ + The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + """ + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ("books.xml", xml_file), + ] + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + s3_private_bucket.put_object(Key=s3_key, Body=f) + return s3_private_bucket + + +_compression_formats_params = [ + (".no_compress", None), + ("", None), + (".gz", "gzip"), + (".GZ", "gzip"), + (".bz2", "bz2"), + (".BZ2", "bz2"), + (".zip", "zip"), + (".ZIP", "zip"), + (".xz", "xz"), + (".XZ", "xz"), + pytest.param((".zst", "zstd"), marks=td.skip_if_no("zstandard")), + pytest.param((".ZST", "zstd"), marks=td.skip_if_no("zstandard")), +] + + +@pytest.fixture(params=_compression_formats_params[1:]) +def compression_format(request): + return request.param + + +@pytest.fixture(params=_compression_formats_params) +def compression_ext(request): + return request.param[0] + + +@pytest.fixture( + params=[ + "python", + pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")), + ] +) +def string_storage(request): + """ + Parametrized fixture for pd.options.mode.string_storage. + + * 'python' + * 'pyarrow' + """ + return request.param diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/conftest.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/conftest.py new file mode 100644 index 00000000..15ff52d5 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/conftest.py @@ -0,0 +1,41 @@ +import pytest + +import pandas._testing as tm + +from pandas.io.parsers import read_csv + + +@pytest.fixture +def frame(float_frame): + """ + Returns the first ten items in fixture "float_frame". + """ + return float_frame[:10] + + +@pytest.fixture +def tsframe(): + return tm.makeTimeDataFrame()[:5] + + +@pytest.fixture(params=[True, False]) +def merge_cells(request): + return request.param + + +@pytest.fixture +def df_ref(datapath): + """ + Obtain the reference data from read_csv with the Python engine. + """ + filepath = datapath("io", "data", "csv", "test1.csv") + df_ref = read_csv(filepath, index_col=0, parse_dates=True, engine="python") + return df_ref + + +@pytest.fixture(params=[".xls", ".xlsx", ".xlsm", ".ods", ".xlsb"]) +def read_ext(request): + """ + Valid extensions for reading Excel files. + """ + return request.param diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odf.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odf.py new file mode 100644 index 00000000..25079b23 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odf.py @@ -0,0 +1,50 @@ +import functools + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +pytest.importorskip("odf") + + +@pytest.fixture(autouse=True) +def cd_and_set_engine(monkeypatch, datapath): + func = functools.partial(pd.read_excel, engine="odf") + monkeypatch.setattr(pd, "read_excel", func) + monkeypatch.chdir(datapath("io", "data", "excel")) + + +def test_read_invalid_types_raises(): + # the invalid_value_type.ods required manually editing + # of the included content.xml file + with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"): + pd.read_excel("invalid_value_type.ods") + + +def test_read_writer_table(): + # Also test reading tables from an text OpenDocument file + # (.odt) + index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header") + expected = pd.DataFrame( + [[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]], + index=index, + columns=["Column 1", "Unnamed: 2", "Column 3"], + ) + + result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0) + + tm.assert_frame_equal(result, expected) + + +def test_read_newlines_between_xml_elements_table(): + # GH#45598 + expected = pd.DataFrame( + [[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]], + columns=["Column 1", "Column 2", "Column 3"], + ) + + result = pd.read_excel("test_newlines.ods") + + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odswriter.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odswriter.py new file mode 100644 index 00000000..21d31ec8 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_odswriter.py @@ -0,0 +1,49 @@ +import re + +import pytest + +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter + +odf = pytest.importorskip("odf") + +pytestmark = pytest.mark.parametrize("ext", [".ods"]) + + +def test_write_append_mode_raises(ext): + msg = "Append mode is not supported with odf!" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=msg): + ExcelWriter(f, engine="odf", mode="a") + + +@pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}]) +def test_engine_kwargs(ext, engine_kwargs): + # GH 42286 + # GH 43445 + # test for error: OpenDocumentSpreadsheet does not accept any arguments + with tm.ensure_clean(ext) as f: + if engine_kwargs is not None: + error = re.escape( + "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'" + ) + with pytest.raises( + TypeError, + match=error, + ): + ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) + else: + with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _: + pass + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f) as writer: + assert writer.sheets == {} + table = odf.table.Table(name="test_name") + writer.book.spreadsheet.addElement(table) + assert writer.sheets == {"test_name": table} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_openpyxl.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_openpyxl.py new file mode 100644 index 00000000..b8d41164 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_openpyxl.py @@ -0,0 +1,398 @@ +import contextlib +from pathlib import Path +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.excel import ( + ExcelWriter, + _OpenpyxlWriter, +) + +openpyxl = pytest.importorskip("openpyxl") + +pytestmark = pytest.mark.parametrize("ext", [".xlsx"]) + + +def test_to_excel_styleconverter(ext): + from openpyxl import styles + + hstyle = { + "font": {"color": "00FF0000", "bold": True}, + "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, + "alignment": {"horizontal": "center", "vertical": "top"}, + "fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}}, + "number_format": {"format_code": "0.00"}, + "protection": {"locked": True, "hidden": False}, + } + + font_color = styles.Color("00FF0000") + font = styles.Font(bold=True, color=font_color) + side = styles.Side(style=styles.borders.BORDER_THIN) + border = styles.Border(top=side, right=side, bottom=side, left=side) + alignment = styles.Alignment(horizontal="center", vertical="top") + fill_color = styles.Color(rgb="006666FF", tint=0.3) + fill = styles.PatternFill(patternType="solid", fgColor=fill_color) + + number_format = "0.00" + + protection = styles.Protection(locked=True, hidden=False) + + kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle) + assert kw["font"] == font + assert kw["border"] == border + assert kw["alignment"] == alignment + assert kw["fill"] == fill + assert kw["number_format"] == number_format + assert kw["protection"] == protection + + +def test_write_cells_merge_styled(ext): + from pandas.io.formats.excel import ExcelCell + + sheet_name = "merge_styled" + + sty_b1 = {"font": {"color": "00FF0000"}} + sty_a2 = {"font": {"color": "0000FF00"}} + + initial_cells = [ + ExcelCell(col=1, row=0, val=42, style=sty_b1), + ExcelCell(col=0, row=1, val=99, style=sty_a2), + ] + + sty_merged = {"font": {"color": "000000FF", "bold": True}} + sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged) + openpyxl_sty_merged = sty_kwargs["font"] + merge_cells = [ + ExcelCell( + col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged + ) + ] + + with tm.ensure_clean(ext) as path: + with _OpenpyxlWriter(path) as writer: + writer._write_cells(initial_cells, sheet_name=sheet_name) + writer._write_cells(merge_cells, sheet_name=sheet_name) + + wks = writer.sheets[sheet_name] + xcell_b1 = wks["B1"] + xcell_a2 = wks["A2"] + assert xcell_b1.font == openpyxl_sty_merged + assert xcell_a2.font == openpyxl_sty_merged + + +@pytest.mark.parametrize("iso_dates", [True, False]) +def test_engine_kwargs_write(ext, iso_dates): + # GH 42286 GH 43445 + engine_kwargs = {"iso_dates": iso_dates} + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer: + assert writer.book.iso_dates == iso_dates + # ExcelWriter won't allow us to close without writing something + DataFrame().to_excel(writer) + + +def test_engine_kwargs_append_invalid(ext): + # GH 43445 + # test whether an invalid engine kwargs actually raises + with tm.ensure_clean(ext) as f: + DataFrame(["hello", "world"]).to_excel(f) + with pytest.raises( + TypeError, + match=re.escape( + "load_workbook() got an unexpected keyword argument 'apple_banana'" + ), + ): + with ExcelWriter( + f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"} + ) as writer: + # ExcelWriter needs us to write something to close properly + DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2") + + +@pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")]) +def test_engine_kwargs_append_data_only(ext, data_only, expected): + # GH 43445 + # tests whether the data_only engine_kwarg actually works well for + # openpyxl's load_workbook + with tm.ensure_clean(ext) as f: + DataFrame(["=1+1"]).to_excel(f) + with ExcelWriter( + f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only} + ) as writer: + assert writer.sheets["Sheet1"]["B2"].value == expected + # ExcelWriter needs us to writer something to close properly? + DataFrame().to_excel(writer, sheet_name="Sheet2") + + +@pytest.mark.parametrize( + "mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])] +) +def test_write_append_mode(ext, mode, expected): + df = DataFrame([1], columns=["baz"]) + + with tm.ensure_clean(ext) as f: + wb = openpyxl.Workbook() + wb.worksheets[0].title = "foo" + wb.worksheets[0]["A1"].value = "foo" + wb.create_sheet("bar") + wb.worksheets[1]["A1"].value = "bar" + wb.save(f) + + with ExcelWriter(f, engine="openpyxl", mode=mode) as writer: + df.to_excel(writer, sheet_name="baz", index=False) + + with contextlib.closing(openpyxl.load_workbook(f)) as wb2: + result = [sheet.title for sheet in wb2.worksheets] + assert result == expected + + for index, cell_value in enumerate(expected): + assert wb2.worksheets[index]["A1"].value == cell_value + + +@pytest.mark.parametrize( + "if_sheet_exists,num_sheets,expected", + [ + ("new", 2, ["apple", "banana"]), + ("replace", 1, ["pear"]), + ("overlay", 1, ["pear", "banana"]), + ], +) +def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected): + # GH 40230 + df1 = DataFrame({"fruit": ["apple", "banana"]}) + df2 = DataFrame({"fruit": ["pear"]}) + + with tm.ensure_clean(ext) as f: + df1.to_excel(f, engine="openpyxl", sheet_name="foo", index=False) + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists + ) as writer: + df2.to_excel(writer, sheet_name="foo", index=False) + + with contextlib.closing(openpyxl.load_workbook(f)) as wb: + assert len(wb.sheetnames) == num_sheets + assert wb.sheetnames[0] == "foo" + result = pd.read_excel(wb, "foo", engine="openpyxl") + assert list(result["fruit"]) == expected + if len(wb.sheetnames) == 2: + result = pd.read_excel(wb, wb.sheetnames[1], engine="openpyxl") + tm.assert_frame_equal(result, df2) + + +@pytest.mark.parametrize( + "startrow, startcol, greeting, goodbye", + [ + (0, 0, ["poop", "world"], ["goodbye", "people"]), + (0, 1, ["hello", "world"], ["poop", "people"]), + (1, 0, ["hello", "poop"], ["goodbye", "people"]), + (1, 1, ["hello", "world"], ["goodbye", "poop"]), + ], +) +def test_append_overlay_startrow_startcol(ext, startrow, startcol, greeting, goodbye): + df1 = DataFrame({"greeting": ["hello", "world"], "goodbye": ["goodbye", "people"]}) + df2 = DataFrame(["poop"]) + + with tm.ensure_clean(ext) as f: + df1.to_excel(f, engine="openpyxl", sheet_name="poo", index=False) + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists="overlay" + ) as writer: + # use startrow+1 because we don't have a header + df2.to_excel( + writer, + index=False, + header=False, + startrow=startrow + 1, + startcol=startcol, + sheet_name="poo", + ) + + result = pd.read_excel(f, sheet_name="poo", engine="openpyxl") + expected = DataFrame({"greeting": greeting, "goodbye": goodbye}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "if_sheet_exists,msg", + [ + ( + "invalid", + "'invalid' is not valid for if_sheet_exists. Valid options " + "are 'error', 'new', 'replace' and 'overlay'.", + ), + ( + "error", + "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", + ), + ( + None, + "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", + ), + ], +) +def test_if_sheet_exists_raises(ext, if_sheet_exists, msg): + # GH 40230 + df = DataFrame({"fruit": ["pear"]}) + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=re.escape(msg)): + df.to_excel(f, "foo", engine="openpyxl") + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists + ) as writer: + df.to_excel(writer, sheet_name="foo") + + +def test_to_excel_with_openpyxl_engine(ext): + # GH 29854 + with tm.ensure_clean(ext) as filename: + df1 = DataFrame({"A": np.linspace(1, 10, 10)}) + df2 = DataFrame({"B": np.linspace(1, 20, 10)}) + df = pd.concat([df1, df2], axis=1) + styled = df.style.map( + lambda val: f"color: {'red' if val < 0 else 'black'}" + ).highlight_max() + + styled.to_excel(filename, engine="openpyxl") + + +@pytest.mark.parametrize("read_only", [True, False]) +def test_read_workbook(datapath, ext, read_only): + # GH 39528 + filename = datapath("io", "data", "excel", "test1" + ext) + with contextlib.closing( + openpyxl.load_workbook(filename, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = pd.read_excel(filename) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "header, expected_data", + [ + ( + 0, + { + "Title": [np.nan, "A", 1, 2, 3], + "Unnamed: 1": [np.nan, "B", 4, 5, 6], + "Unnamed: 2": [np.nan, "C", 7, 8, 9], + }, + ), + (2, {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}), + ], +) +@pytest.mark.parametrize( + "filename", ["dimension_missing", "dimension_small", "dimension_large"] +) +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_bad_dimension( + datapath, ext, header, expected_data, filename, read_only +): + # GH 38956, 39001 - no/incorrect dimension information + path = datapath("io", "data", "excel", f"{filename}{ext}") + if read_only is None: + result = pd.read_excel(path, header=header) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl", header=header) + expected = DataFrame(expected_data) + tm.assert_frame_equal(result, expected) + + +def test_append_mode_file(ext): + # GH 39576 + df = DataFrame() + + with tm.ensure_clean(ext) as f: + df.to_excel(f, engine="openpyxl") + + with ExcelWriter( + f, mode="a", engine="openpyxl", if_sheet_exists="new" + ) as writer: + df.to_excel(writer) + + # make sure that zip files are not concatenated by making sure that + # "docProps/app.xml" only occurs twice in the file + data = Path(f).read_bytes() + first = data.find(b"docProps/app.xml") + second = data.find(b"docProps/app.xml", first + 1) + third = data.find(b"docProps/app.xml", second + 1) + assert second != -1 and third == -1 + + +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_empty_trailing_rows(datapath, ext, read_only): + # GH 39181 + path = datapath("io", "data", "excel", f"empty_trailing_rows{ext}") + if read_only is None: + result = pd.read_excel(path) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = DataFrame( + { + "Title": [np.nan, "A", 1, 2, 3], + "Unnamed: 1": [np.nan, "B", 4, 5, 6], + "Unnamed: 2": [np.nan, "C", 7, 8, 9], + } + ) + tm.assert_frame_equal(result, expected) + + +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_empty_with_blank_row(datapath, ext, read_only): + # GH 39547 - empty excel file with a row that has no data + path = datapath("io", "data", "excel", f"empty_with_blank_row{ext}") + if read_only is None: + result = pd.read_excel(path) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="openpyxl") as writer: + assert writer.sheets == {} + sheet = writer.book.create_sheet("test_name", 0) + assert writer.sheets == {"test_name": sheet} + + +def test_ints_spelled_with_decimals(datapath, ext): + # GH 46988 - openpyxl returns this sheet with floats + path = datapath("io", "data", "excel", f"ints_spelled_with_decimals{ext}") + result = pd.read_excel(path) + expected = DataFrame(range(2, 12), columns=[1]) + tm.assert_frame_equal(result, expected) + + +def test_read_multiindex_header_no_index_names(datapath, ext): + # GH#47487 + path = datapath("io", "data", "excel", f"multiindex_no_index_names{ext}") + result = pd.read_excel(path, index_col=[0, 1, 2], header=[0, 1, 2]) + expected = DataFrame( + [[np.nan, "x", "x", "x"], ["x", np.nan, np.nan, np.nan]], + columns=pd.MultiIndex.from_tuples( + [("X", "Y", "A1"), ("X", "Y", "A2"), ("XX", "YY", "B1"), ("XX", "YY", "B2")] + ), + index=pd.MultiIndex.from_tuples([("A", "AA", "AAA"), ("A", "BB", "BBB")]), + ) + tm.assert_frame_equal(result, expected) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_readers.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_readers.py new file mode 100644 index 00000000..f7e11f24 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_readers.py @@ -0,0 +1,1723 @@ +from datetime import ( + datetime, + time, +) +from functools import partial +from io import BytesIO +import os +from pathlib import Path +import platform +import re +from urllib.error import URLError +from zipfile import BadZipFile + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"] +engine_params = [ + # Add any engines to test here + # When defusedxml is installed it triggers deprecation warnings for + # xlrd and openpyxl, so catch those here + pytest.param( + "xlrd", + marks=[ + td.skip_if_no("xlrd"), + ], + ), + pytest.param( + "openpyxl", + marks=[ + td.skip_if_no("openpyxl"), + ], + ), + pytest.param( + None, + marks=[ + td.skip_if_no("xlrd"), + ], + ), + pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")), + pytest.param("odf", marks=td.skip_if_no("odf")), +] + + +def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool: + """ + Filter out invalid (engine, ext) pairs instead of skipping, as that + produces 500+ pytest.skips. + """ + engine = engine.values[0] + if engine == "openpyxl" and read_ext == ".xls": + return False + if engine == "odf" and read_ext != ".ods": + return False + if read_ext == ".ods" and engine != "odf": + return False + if engine == "pyxlsb" and read_ext != ".xlsb": + return False + if read_ext == ".xlsb" and engine != "pyxlsb": + return False + if engine == "xlrd" and read_ext != ".xls": + return False + return True + + +def _transfer_marks(engine, read_ext): + """ + engine gives us a pytest.param object with some marks, read_ext is just + a string. We need to generate a new pytest.param inheriting the marks. + """ + values = engine.values + (read_ext,) + new_param = pytest.param(values, marks=engine.marks) + return new_param + + +@pytest.fixture( + params=[ + _transfer_marks(eng, ext) + for eng in engine_params + for ext in read_ext_params + if _is_valid_engine_ext_pair(eng, ext) + ], + ids=str, +) +def engine_and_read_ext(request): + """ + Fixture for Excel reader engine and read_ext, only including valid pairs. + """ + return request.param + + +@pytest.fixture +def engine(engine_and_read_ext): + engine, read_ext = engine_and_read_ext + return engine + + +@pytest.fixture +def read_ext(engine_and_read_ext): + engine, read_ext = engine_and_read_ext + return read_ext + + +class TestReaders: + @pytest.fixture(autouse=True) + def cd_and_set_engine(self, engine, datapath, monkeypatch): + """ + Change directory and set engine for read_excel calls. + """ + func = partial(pd.read_excel, engine=engine) + monkeypatch.chdir(datapath("io", "data", "excel")) + monkeypatch.setattr(pd, "read_excel", func) + + def test_engine_used(self, read_ext, engine, monkeypatch): + # GH 38884 + def parser(self, *args, **kwargs): + return self.engine + + monkeypatch.setattr(pd.ExcelFile, "parse", parser) + + expected_defaults = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + + with open("test1" + read_ext, "rb") as f: + result = pd.read_excel(f) + + if engine is not None: + expected = engine + else: + expected = expected_defaults[read_ext[1:]] + assert result == expected + + def test_engine_kwargs(self, read_ext, engine): + # GH#52214 + expected_defaults = { + "xlsx": {"foo": "abcd"}, + "xlsm": {"foo": 123}, + "xlsb": {"foo": "True"}, + "xls": {"foo": True}, + "ods": {"foo": "abcd"}, + } + + if read_ext[1:] in {"xls", "xlsb"}: + msg = re.escape(r"open_workbook() got an unexpected keyword argument 'foo'") + elif read_ext[1:] == "ods": + msg = re.escape(r"load() got an unexpected keyword argument 'foo'") + else: + msg = re.escape(r"load_workbook() got an unexpected keyword argument 'foo'") + + if engine is not None: + with pytest.raises(TypeError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet1", + index_col=0, + engine_kwargs=expected_defaults[read_ext[1:]], + ) + + def test_usecols_int(self, read_ext): + # usecols as int + msg = "Passing an integer for `usecols`" + with pytest.raises(ValueError, match=msg): + pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 + ) + + # usecols as int + with pytest.raises(ValueError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=3, + ) + + def test_usecols_list(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + df_ref = df_ref.reindex(columns=["B", "C"]) + df1 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3] + ) + df2 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=[0, 2, 3], + ) + + # TODO add index to xls file) + tm.assert_frame_equal(df1, df_ref, check_names=False) + tm.assert_frame_equal(df2, df_ref, check_names=False) + + def test_usecols_str(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + df1 = df_ref.reindex(columns=["A", "B", "C"]) + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A:D", + ) + + # TODO add index to xls, read xls ignores index name ? + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + df1 = df_ref.reindex(columns=["B", "C"]) + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C,D", + ) + # TODO add index to xls file + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + df1 = df_ref.reindex(columns=["B", "C"]) + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C:D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C:D", + ) + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + @pytest.mark.parametrize( + "usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]] + ) + def test_usecols_diff_positional_int_columns_order( + self, request, read_ext, usecols, df_ref + ): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + expected = df_ref[["A", "C"]] + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols + ) + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]]) + def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref): + expected = df_ref[["B", "D"]] + expected.index = range(len(expected)) + + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols) + tm.assert_frame_equal(result, expected, check_names=False) + + def test_read_excel_without_slicing(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + expected = df_ref + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) + tm.assert_frame_equal(result, expected, check_names=False) + + def test_usecols_excel_range_str(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + expected = df_ref[["C", "D"]] + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E" + ) + tm.assert_frame_equal(result, expected, check_names=False) + + def test_usecols_excel_range_str_invalid(self, read_ext): + msg = "Invalid column name: E1" + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols="D:E1") + + def test_index_col_label_error(self, read_ext): + msg = "list indices must be integers.*, not str" + + with pytest.raises(TypeError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet1", + index_col=["A"], + usecols=["A", "C"], + ) + + def test_index_col_str(self, read_ext): + # see gh-52716 + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet3", index_col="A") + expected = DataFrame( + columns=["B", "C", "D", "E", "F"], index=Index([], name="A") + ) + tm.assert_frame_equal(result, expected) + + def test_index_col_empty(self, read_ext): + # see gh-9208 + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet3", index_col=["A", "B", "C"] + ) + expected = DataFrame( + columns=["D", "E", "F"], + index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("index_col", [None, 2]) + def test_index_col_with_unnamed(self, read_ext, index_col): + # see gh-18792 + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet4", index_col=index_col + ) + expected = DataFrame( + [["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"] + ) + if index_col: + expected = expected.set_index(expected.columns[index_col]) + + tm.assert_frame_equal(result, expected) + + def test_usecols_pass_non_existent_column(self, read_ext): + msg = ( + "Usecols do not match columns, " + "columns expected but not found: " + r"\['E'\]" + ) + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, usecols=["E"]) + + def test_usecols_wrong_type(self, read_ext): + msg = ( + "'usecols' must either be list-like of " + "all strings, all unicode, all integers or a callable." + ) + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, usecols=["E1", 0]) + + def test_excel_stop_iterator(self, read_ext): + parsed = pd.read_excel("test2" + read_ext, sheet_name="Sheet1") + expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_cell_error_na(self, request, read_ext): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1") + expected = DataFrame([[np.nan]], columns=["Test"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_table(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet2", skiprows=[1], index_col=0 + ) + # TODO add index to file + tm.assert_frame_equal(df1, df_ref, check_names=False) + tm.assert_frame_equal(df2, df_ref, check_names=False) + + df3 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, skipfooter=1 + ) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + def test_reader_special_dtypes(self, request, read_ext): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + expected = DataFrame.from_dict( + { + "IntCol": [1, 2, -3, 4, 0], + "FloatCol": [1.25, 2.25, 1.83, 1.92, 0.0000000005], + "BoolCol": [True, False, True, True, False], + "StrCol": [1, 2, 3, 4, 5], + "Str2Col": ["a", 3, "c", "d", "e"], + "DateCol": [ + datetime(2013, 10, 30), + datetime(2013, 10, 31), + datetime(1905, 1, 1), + datetime(2013, 12, 14), + datetime(2015, 3, 14), + ], + }, + ) + basename = "test_types" + + # should read in correctly and infer types + actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + # if not coercing number, then int comes in as float + float_expected = expected.copy() + float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0 + actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, float_expected) + + # check setting Index (assuming xls and xlsx are the same here) + for icol, name in enumerate(expected.columns): + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", index_col=icol + ) + exp = expected.set_index(name) + tm.assert_frame_equal(actual, exp) + + expected["StrCol"] = expected["StrCol"].apply(str) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", converters={"StrCol": str} + ) + tm.assert_frame_equal(actual, expected) + + # GH8212 - support for converters and missing values + def test_reader_converters(self, read_ext): + basename = "test_converters" + + expected = DataFrame.from_dict( + { + "IntCol": [1, 2, -3, -1000, 0], + "FloatCol": [12.5, np.nan, 18.3, 19.2, 0.000000005], + "BoolCol": ["Found", "Found", "Found", "Not found", "Found"], + "StrCol": ["1", np.nan, "3", "4", "5"], + } + ) + + converters = { + "IntCol": lambda x: int(x) if x != "" else -1000, + "FloatCol": lambda x: 10 * x if x else np.nan, + 2: lambda x: "Found" if x != "" else "Not found", + 3: lambda x: str(x) if x else "", + } + + # should read in correctly and set types of single cells (not array + # dtypes) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", converters=converters + ) + tm.assert_frame_equal(actual, expected) + + def test_reader_dtype(self, read_ext): + # GH 8212 + basename = "testdtype" + actual = pd.read_excel(basename + read_ext) + + expected = DataFrame( + { + "a": [1, 2, 3, 4], + "b": [2.5, 3.5, 4.5, 5.5], + "c": [1, 2, 3, 4], + "d": [1.0, 2.0, np.nan, 4.0], + } + ).reindex(columns=["a", "b", "c", "d"]) + + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str} + ) + + expected["a"] = expected["a"].astype("float64") + expected["b"] = expected["b"].astype("float32") + expected["c"] = ["001", "002", "003", "004"] + tm.assert_frame_equal(actual, expected) + + msg = "Unable to convert column d to type int64" + with pytest.raises(ValueError, match=msg): + pd.read_excel(basename + read_ext, dtype={"d": "int64"}) + + @pytest.mark.parametrize( + "dtype,expected", + [ + ( + None, + DataFrame( + { + "a": [1, 2, 3, 4], + "b": [2.5, 3.5, 4.5, 5.5], + "c": [1, 2, 3, 4], + "d": [1.0, 2.0, np.nan, 4.0], + } + ), + ), + ( + {"a": "float64", "b": "float32", "c": str, "d": str}, + DataFrame( + { + "a": Series([1, 2, 3, 4], dtype="float64"), + "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"), + "c": ["001", "002", "003", "004"], + "d": ["1", "2", np.nan, "4"], + } + ), + ), + ], + ) + def test_reader_dtype_str(self, read_ext, dtype, expected): + # see gh-20377 + basename = "testdtype" + + actual = pd.read_excel(basename + read_ext, dtype=dtype) + tm.assert_frame_equal(actual, expected) + + def test_dtype_backend(self, read_ext, dtype_backend): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + df = DataFrame( + { + "a": Series([1, 3], dtype="Int64"), + "b": Series([2.5, 4.5], dtype="Float64"), + "c": Series([True, False], dtype="boolean"), + "d": Series(["a", "b"], dtype="string"), + "e": Series([pd.NA, 6], dtype="Int64"), + "f": Series([pd.NA, 7.5], dtype="Float64"), + "g": Series([pd.NA, True], dtype="boolean"), + "h": Series([pd.NA, "a"], dtype="string"), + "i": Series([pd.Timestamp("2019-12-31")] * 2), + "j": Series([pd.NA, pd.NA], dtype="Int64"), + } + ) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, "test", index=False) + result = pd.read_excel( + file_path, sheet_name="test", dtype_backend=dtype_backend + ) + if dtype_backend == "pyarrow": + import pyarrow as pa + + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(df[col], from_pandas=True)) + for col in df.columns + } + ) + # pyarrow by default infers timestamp resolution as us, not ns + expected["i"] = ArrowExtensionArray( + expected["i"].array._pa_array.cast(pa.timestamp(unit="us")) + ) + # pyarrow supports a null type, so don't have to default to Int64 + expected["j"] = ArrowExtensionArray(pa.array([None, None])) + else: + expected = df + tm.assert_frame_equal(result, expected) + + def test_dtype_backend_and_dtype(self, read_ext): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + df = DataFrame({"a": [np.nan, 1.0], "b": [2.5, np.nan]}) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, "test", index=False) + result = pd.read_excel( + file_path, + sheet_name="test", + dtype_backend="numpy_nullable", + dtype="float64", + ) + tm.assert_frame_equal(result, df) + + def test_dtype_backend_string(self, read_ext, string_storage): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + pa = pytest.importorskip("pyarrow") + + with pd.option_context("mode.string_storage", string_storage): + df = DataFrame( + { + "a": np.array(["a", "b"], dtype=np.object_), + "b": np.array(["x", pd.NA], dtype=np.object_), + } + ) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, "test", index=False) + result = pd.read_excel( + file_path, sheet_name="test", dtype_backend="numpy_nullable" + ) + + if string_storage == "python": + expected = DataFrame( + { + "a": StringArray(np.array(["a", "b"], dtype=np.object_)), + "b": StringArray(np.array(["x", pd.NA], dtype=np.object_)), + } + ) + else: + expected = DataFrame( + { + "a": ArrowStringArray(pa.array(["a", "b"])), + "b": ArrowStringArray(pa.array(["x", None])), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)]) + def test_dtype_mangle_dup_cols(self, read_ext, dtypes, exp_value): + # GH#35211 + basename = "df_mangle_dup_col_dtypes" + dtype_dict = {"a": str, **dtypes} + dtype_dict_copy = dtype_dict.copy() + # GH#42462 + result = pd.read_excel(basename + read_ext, dtype=dtype_dict) + expected = DataFrame({"a": ["1"], "a.1": [exp_value]}) + assert dtype_dict == dtype_dict_copy, "dtype dict changed" + tm.assert_frame_equal(result, expected) + + def test_reader_spaces(self, read_ext): + # see gh-32207 + basename = "test_spaces" + + actual = pd.read_excel(basename + read_ext) + expected = DataFrame( + { + "testcol": [ + "this is great", + "4 spaces", + "1 trailing ", + " 1 leading", + "2 spaces multiple times", + ] + } + ) + tm.assert_frame_equal(actual, expected) + + # gh-36122, gh-35802 + @pytest.mark.parametrize( + "basename,expected", + [ + ("gh-35802", DataFrame({"COLUMN": ["Test (1)"]})), + ("gh-36122", DataFrame(columns=["got 2nd sa"])), + ], + ) + def test_read_excel_ods_nested_xml(self, engine, read_ext, basename, expected): + # see gh-35802 + if engine != "odf": + pytest.skip(f"Skipped for engine: {engine}") + + actual = pd.read_excel(basename + read_ext) + tm.assert_frame_equal(actual, expected) + + def test_reading_all_sheets(self, read_ext): + # Test reading all sheet names by setting sheet_name to None, + # Ensure a dict is returned. + # See PR #9450 + basename = "test_multisheet" + dfs = pd.read_excel(basename + read_ext, sheet_name=None) + # ensure this is not alphabetical to test order preservation + expected_keys = ["Charlie", "Alpha", "Beta"] + tm.assert_contains_all(expected_keys, dfs.keys()) + # Issue 9930 + # Ensure sheet order is preserved + assert expected_keys == list(dfs.keys()) + + def test_reading_multiple_specific_sheets(self, read_ext): + # Test reading specific sheet names by specifying a mixed list + # of integers and strings, and confirm that duplicated sheet + # references (positions/names) are removed properly. + # Ensure a dict is returned + # See PR #9450 + basename = "test_multisheet" + # Explicitly request duplicates. Only the set should be returned. + expected_keys = [2, "Charlie", "Charlie"] + dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys) + expected_keys = list(set(expected_keys)) + tm.assert_contains_all(expected_keys, dfs.keys()) + assert len(expected_keys) == len(dfs.keys()) + + def test_reading_all_sheets_with_blank(self, read_ext): + # Test reading all sheet names by setting sheet_name to None, + # In the case where some sheets are blank. + # Issue #11711 + basename = "blank_with_header" + dfs = pd.read_excel(basename + read_ext, sheet_name=None) + expected_keys = ["Sheet1", "Sheet2", "Sheet3"] + tm.assert_contains_all(expected_keys, dfs.keys()) + + # GH6403 + def test_read_excel_blank(self, read_ext): + actual = pd.read_excel("blank" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, DataFrame()) + + def test_read_excel_blank_with_header(self, read_ext): + expected = DataFrame(columns=["col_1", "col_2"]) + actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + def test_exception_message_includes_sheet_name(self, read_ext): + # GH 48706 + with pytest.raises(ValueError, match=r" \(sheet: Sheet1\)$"): + pd.read_excel("blank_with_header" + read_ext, header=[1], sheet_name=None) + with pytest.raises(ZeroDivisionError, match=r" \(sheet: Sheet1\)$"): + pd.read_excel("test1" + read_ext, usecols=lambda x: 1 / 0, sheet_name=None) + + @pytest.mark.filterwarnings("ignore:Cell A4 is marked:UserWarning:openpyxl") + def test_date_conversion_overflow(self, request, engine, read_ext): + # GH 10001 : pandas.ExcelFile ignore parse_dates=False + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + expected = DataFrame( + [ + [pd.Timestamp("2016-03-12"), "Marc Johnson"], + [pd.Timestamp("2016-03-16"), "Jack Black"], + [1e20, "Timothy Brown"], + ], + columns=["DateColWithBigInt", "StringCol"], + ) + + if engine == "openpyxl": + request.node.add_marker( + pytest.mark.xfail(reason="Maybe not supported by openpyxl") + ) + + if engine is None and read_ext in (".xlsx", ".xlsm"): + # GH 35029 + request.node.add_marker( + pytest.mark.xfail(reason="Defaults to openpyxl, maybe not supported") + ) + + result = pd.read_excel("testdateoverflow" + read_ext) + tm.assert_frame_equal(result, expected) + + def test_sheet_name(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + filename = "test1" + sheet_name = "Sheet1" + + df1 = pd.read_excel( + filename + read_ext, sheet_name=sheet_name, index_col=0 + ) # doc + df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name) + + tm.assert_frame_equal(df1, df_ref, check_names=False) + tm.assert_frame_equal(df2, df_ref, check_names=False) + + def test_excel_read_buffer(self, read_ext): + pth = "test1" + read_ext + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0) + with open(pth, "rb") as f: + actual = pd.read_excel(f, sheet_name="Sheet1", index_col=0) + tm.assert_frame_equal(expected, actual) + + def test_bad_engine_raises(self): + bad_engine = "foo" + with pytest.raises(ValueError, match="Unknown engine: foo"): + pd.read_excel("", engine=bad_engine) + + @pytest.mark.parametrize( + "sheet_name", + [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]], + ) + def test_bad_sheetname_raises(self, read_ext, sheet_name): + # GH 39250 + msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found" + with pytest.raises(ValueError, match=msg): + pd.read_excel("blank" + read_ext, sheet_name=sheet_name) + + def test_missing_file_raises(self, read_ext): + bad_file = f"foo{read_ext}" + # CI tests with other languages, translates to "No such file or directory" + match = "|".join( + [ + "(No such file or directory", + "没有那个文件或目录", + "File o directory non esistente)", + ] + ) + with pytest.raises(FileNotFoundError, match=match): + pd.read_excel(bad_file) + + def test_corrupt_bytes_raises(self, engine): + bad_stream = b"foo" + if engine is None: + error = ValueError + msg = ( + "Excel file format cannot be determined, you must " + "specify an engine manually." + ) + elif engine == "xlrd": + from xlrd import XLRDError + + error = XLRDError + msg = ( + "Unsupported format, or corrupt file: Expected BOF " + "record; found b'foo'" + ) + else: + error = BadZipFile + msg = "File is not a zip file" + with pytest.raises(error, match=msg): + pd.read_excel(BytesIO(bad_stream)) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_read_from_http_url(self, httpserver, read_ext): + with open("test1" + read_ext, "rb") as f: + httpserver.serve_content(content=f.read()) + url_table = pd.read_excel(httpserver.url) + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @td.skip_if_not_us_locale + @pytest.mark.single_cpu + def test_read_from_s3_url(self, read_ext, s3_public_bucket, s3so): + # Bucket created in tests/io/conftest.py + with open("test1" + read_ext, "rb") as f: + s3_public_bucket.put_object(Key="test1" + read_ext, Body=f) + + url = f"s3://{s3_public_bucket.name}/test1" + read_ext + + url_table = pd.read_excel(url, storage_options=s3so) + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @pytest.mark.single_cpu + def test_read_from_s3_object(self, read_ext, s3_public_bucket, s3so): + # GH 38788 + # Bucket created in tests/io/conftest.py + with open("test1" + read_ext, "rb") as f: + s3_public_bucket.put_object(Key="test1" + read_ext, Body=f) + + import s3fs + + s3 = s3fs.S3FileSystem(**s3so) + + with s3.open(f"s3://{s3_public_bucket.name}/test1" + read_ext) as f: + url_table = pd.read_excel(f) + + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @pytest.mark.slow + def test_read_from_file_url(self, read_ext, datapath): + # FILE + localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext) + local_table = pd.read_excel(localtable) + + try: + url_table = pd.read_excel("file://localhost/" + localtable) + except URLError: + # fails on some systems + platform_info = " ".join(platform.uname()).strip() + pytest.skip(f"failing on {platform_info}") + + tm.assert_frame_equal(url_table, local_table) + + def test_read_from_pathlib_path(self, read_ext): + # GH12655 + str_path = "test1" + read_ext + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) + + path_obj = Path("test1" + read_ext) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + @td.skip_if_no("py.path") + def test_read_from_py_localpath(self, read_ext): + # GH12655 + from py.path import local as LocalPath + + str_path = os.path.join("test1" + read_ext) + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) + + path_obj = LocalPath().join("test1" + read_ext) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + def test_close_from_py_localpath(self, read_ext): + # GH31467 + str_path = os.path.join("test1" + read_ext) + with open(str_path, "rb") as f: + x = pd.read_excel(f, sheet_name="Sheet1", index_col=0) + del x + # should not throw an exception because the passed file was closed + f.read() + + def test_reader_seconds(self, request, engine, read_ext): + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + # Test reading times with and without milliseconds. GH5945. + expected = DataFrame.from_dict( + { + "Time": [ + time(1, 2, 3), + time(2, 45, 56, 100000), + time(4, 29, 49, 200000), + time(6, 13, 42, 300000), + time(7, 57, 35, 400000), + time(9, 41, 28, 500000), + time(11, 25, 21, 600000), + time(13, 9, 14, 700000), + time(14, 53, 7, 800000), + time(16, 37, 0, 900000), + time(18, 20, 54), + ] + } + ) + + actual = pd.read_excel("times_1900" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + def test_read_excel_multiindex(self, request, read_ext): + # see gh-4679 + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]]) + mi_file = "testmultiindex" + read_ext + + # "mi_column" sheet + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=mi, + ) + + actual = pd.read_excel( + mi_file, sheet_name="mi_column", header=[0, 1], index_col=0 + ) + tm.assert_frame_equal(actual, expected) + + # "mi_index" sheet + expected.index = mi + expected.columns = ["a", "b", "c", "d"] + + actual = pd.read_excel(mi_file, sheet_name="mi_index", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected, check_names=False) + + # "both" sheet + expected.columns = mi + + actual = pd.read_excel( + mi_file, sheet_name="both", index_col=[0, 1], header=[0, 1] + ) + tm.assert_frame_equal(actual, expected, check_names=False) + + # "mi_index_name" sheet + expected.columns = ["a", "b", "c", "d"] + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = pd.read_excel(mi_file, sheet_name="mi_index_name", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "mi_column_name" sheet + expected.index = list(range(4)) + expected.columns = mi.set_names(["c1", "c2"]) + actual = pd.read_excel( + mi_file, sheet_name="mi_column_name", header=[0, 1], index_col=0 + ) + tm.assert_frame_equal(actual, expected) + + # see gh-11317 + # "name_with_int" sheet + expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"]) + + actual = pd.read_excel( + mi_file, sheet_name="name_with_int", index_col=0, header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "both_name" sheet + expected.columns = mi.set_names(["c1", "c2"]) + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = pd.read_excel( + mi_file, sheet_name="both_name", index_col=[0, 1], header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "both_skiprows" sheet + actual = pd.read_excel( + mi_file, + sheet_name="both_name_skiprows", + index_col=[0, 1], + header=[0, 1], + skiprows=2, + ) + tm.assert_frame_equal(actual, expected) + + @pytest.mark.parametrize( + "sheet_name,idx_lvl2", + [ + ("both_name_blank_after_mi_name", [np.nan, "b", "a", "b"]), + ("both_name_multiple_blanks", [np.nan] * 4), + ], + ) + def test_read_excel_multiindex_blank_after_name( + self, request, read_ext, sheet_name, idx_lvl2 + ): + # GH34673 + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb (GH4679" + ) + ) + + mi_file = "testmultiindex" + read_ext + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"]) + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=mi, + index=MultiIndex.from_arrays( + (["foo", "foo", "bar", "bar"], idx_lvl2), + names=["ilvl1", "ilvl2"], + ), + ) + result = pd.read_excel( + mi_file, + sheet_name=sheet_name, + index_col=[0, 1], + header=[0, 1], + ) + tm.assert_frame_equal(result, expected) + + def test_read_excel_multiindex_header_only(self, read_ext): + # see gh-11733. + # + # Don't try to parse a header name if there isn't one. + mi_file = "testmultiindex" + read_ext + result = pd.read_excel(mi_file, sheet_name="index_col_none", header=[0, 1]) + + exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")]) + expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns) + tm.assert_frame_equal(result, expected) + + def test_excel_old_index_format(self, read_ext): + # see gh-4679 + filename = "test_index_name_pre17" + read_ext + + # We detect headers to determine if index names exist, so + # that "index" name in the "names" version of the data will + # now be interpreted as rows that include null data. + data = np.array( + [ + [np.nan, np.nan, np.nan, np.nan, np.nan], + ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"], + ], + dtype=object, + ) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex( + levels=[ + ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], + ["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"], + ], + codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], + names=[None, None], + ) + si = Index( + ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None + ) + + expected = DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(filename, sheet_name="single_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(filename, sheet_name="multi_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # The analogous versions of the "names" version data + # where there are explicitly no names for the indices. + data = np.array( + [ + ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"], + ] + ) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex( + levels=[ + ["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], + ["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"], + ], + codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], + names=[None, None], + ) + si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None) + + expected = DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(filename, sheet_name="multi_no_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected, check_names=False) + + def test_read_excel_bool_header_arg(self, read_ext): + # GH 6114 + msg = "Passing a bool to header is invalid" + for arg in [True, False]: + with pytest.raises(TypeError, match=msg): + pd.read_excel("test1" + read_ext, header=arg) + + def test_read_excel_skiprows(self, request, read_ext): + # GH 4903 + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + actual = pd.read_excel( + "testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2] + ) + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=np.array([0, 2]), + ) + tm.assert_frame_equal(actual, expected) + + # GH36435 + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=lambda x: x in [0, 2], + ) + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=3, + names=["a", "b", "c", "d"], + ) + expected = DataFrame( + [ + # [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_skiprows_callable_not_in(self, request, read_ext): + # GH 4903 + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=lambda x: x not in [1, 3, 5], + ) + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + # [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + # [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows(self, read_ext): + # GH 16645 + num_rows_to_pull = 5 + actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull) + expected = pd.read_excel("test1" + read_ext) + expected = expected[:num_rows_to_pull] + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext): + # GH 16645 + expected = pd.read_excel("test1" + read_ext) + num_records_in_file = len(expected) + num_rows_to_pull = num_records_in_file + 10 + actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_non_integer_parameter(self, read_ext): + # GH 16645 + msg = "'nrows' must be an integer >=0" + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, nrows="5") + + @pytest.mark.parametrize( + "filename,sheet_name,header,index_col,skiprows", + [ + ("testmultiindex", "mi_column", [0, 1], 0, None), + ("testmultiindex", "mi_index", None, [0, 1], None), + ("testmultiindex", "both", [0, 1], [0, 1], None), + ("testmultiindex", "mi_column_name", [0, 1], 0, None), + ("testskiprows", "skiprows_list", None, None, [0, 2]), + ("testskiprows", "skiprows_list", None, None, lambda x: x in (0, 2)), + ], + ) + def test_read_excel_nrows_params( + self, read_ext, filename, sheet_name, header, index_col, skiprows + ): + """ + For various parameters, we should get the same result whether we + limit the rows during load (nrows=3) or after (df.iloc[:3]). + """ + # GH 46894 + expected = pd.read_excel( + filename + read_ext, + sheet_name=sheet_name, + header=header, + index_col=index_col, + skiprows=skiprows, + ).iloc[:3] + actual = pd.read_excel( + filename + read_ext, + sheet_name=sheet_name, + header=header, + index_col=index_col, + skiprows=skiprows, + nrows=3, + ) + tm.assert_frame_equal(actual, expected) + + def test_deprecated_kwargs(self, read_ext): + with pytest.raises(TypeError, match="but 3 positional arguments"): + pd.read_excel("test1" + read_ext, "Sheet1", 0) + + def test_no_header_with_list_index_col(self, read_ext): + # GH 31783 + file_name = "testmultiindex" + read_ext + data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)] + idx = MultiIndex.from_tuples( + [("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1) + ) + expected = DataFrame(data, index=idx, columns=(2, 3)) + result = pd.read_excel( + file_name, sheet_name="index_col_none", index_col=[0, 1], header=None + ) + tm.assert_frame_equal(expected, result) + + def test_one_col_noskip_blank_line(self, read_ext): + # GH 39808 + file_name = "one_col_blank_line" + read_ext + data = [0.5, np.nan, 1, 2] + expected = DataFrame(data, columns=["numbers"]) + result = pd.read_excel(file_name) + tm.assert_frame_equal(result, expected) + + def test_multiheader_two_blank_lines(self, read_ext): + # GH 40442 + file_name = "testmultiindex" + read_ext + columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")]) + data = [[np.nan, np.nan], [np.nan, np.nan], [1, 3], [2, 4]] + expected = DataFrame(data, columns=columns) + result = pd.read_excel( + file_name, sheet_name="mi_column_empty_rows", header=[0, 1] + ) + tm.assert_frame_equal(result, expected) + + def test_trailing_blanks(self, read_ext): + """ + Sheets can contain blank cells with no data. Some of our readers + were including those cells, creating many empty rows and columns + """ + file_name = "trailing_blanks" + read_ext + result = pd.read_excel(file_name) + assert result.shape == (3, 3) + + def test_ignore_chartsheets_by_str(self, request, engine, read_ext): + # GH 41448 + if engine == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"): + pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1") + + def test_ignore_chartsheets_by_int(self, request, engine, read_ext): + # GH 41448 + if engine == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises( + ValueError, match="Worksheet index 1 is invalid, 1 worksheets found" + ): + pd.read_excel("chartsheet" + read_ext, sheet_name=1) + + def test_euro_decimal_format(self, read_ext): + # copied from read_csv + result = pd.read_excel("test_decimal" + read_ext, decimal=",", skiprows=1) + expected = DataFrame( + [ + [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], + [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], + [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], + ], + columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], + ) + tm.assert_frame_equal(result, expected) + + +class TestExcelFileRead: + def test_deprecate_bytes_input(self, engine, read_ext): + # GH 53830 + msg = ( + "Passing bytes to 'read_excel' is deprecated and " + "will be removed in a future version. To read from a " + "byte string, wrap it in a `BytesIO` object." + ) + + with tm.assert_produces_warning( + FutureWarning, match=msg, raise_on_extra_warnings=False + ): + with open("test1" + read_ext, "rb") as f: + pd.read_excel(f.read(), engine=engine) + + @pytest.fixture(autouse=True) + def cd_and_set_engine(self, engine, datapath, monkeypatch): + """ + Change directory and set engine for ExcelFile objects. + """ + func = partial(pd.ExcelFile, engine=engine) + monkeypatch.chdir(datapath("io", "data", "excel")) + monkeypatch.setattr(pd, "ExcelFile", func) + + def test_engine_used(self, read_ext, engine): + expected_defaults = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + + with pd.ExcelFile("test1" + read_ext) as excel: + result = excel.engine + + if engine is not None: + expected = engine + else: + expected = expected_defaults[read_ext[1:]] + assert result == expected + + def test_excel_passes_na(self, read_ext): + with pd.ExcelFile("test4" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] + ) + expected = DataFrame( + [["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + with pd.ExcelFile("test4" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] + ) + expected = DataFrame( + [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + # 13967 + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] + ) + expected = DataFrame( + [["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] + ) + expected = DataFrame( + [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + @pytest.mark.parametrize("na_filter", [None, True, False]) + def test_excel_passes_na_filter(self, read_ext, na_filter): + # gh-25453 + kwargs = {} + + if na_filter is not None: + kwargs["na_filter"] = na_filter + + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, + sheet_name="Sheet1", + keep_default_na=True, + na_values=["apple"], + **kwargs, + ) + + if na_filter is False: + expected = [["1.#QNAN"], [1], ["nan"], ["apple"], ["rabbit"]] + else: + expected = [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]] + + expected = DataFrame(expected, columns=["Test"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_table_sheet_by_index(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + with pd.ExcelFile("test1" + read_ext) as excel: + df1 = pd.read_excel(excel, sheet_name=0, index_col=0) + df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, df_ref, check_names=False) + tm.assert_frame_equal(df2, df_ref, check_names=False) + + with pd.ExcelFile("test1" + read_ext) as excel: + df1 = excel.parse(0, index_col=0) + df2 = excel.parse(1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, df_ref, check_names=False) + tm.assert_frame_equal(df2, df_ref, check_names=False) + + with pd.ExcelFile("test1" + read_ext) as excel: + df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + with pd.ExcelFile("test1" + read_ext) as excel: + df3 = excel.parse(0, index_col=0, skipfooter=1) + + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + def test_sheet_name(self, request, read_ext, df_ref): + if read_ext == ".xlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + filename = "test1" + sheet_name = "Sheet1" + + with pd.ExcelFile(filename + read_ext) as excel: + df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc + + with pd.ExcelFile(filename + read_ext) as excel: + df2_parse = excel.parse(index_col=0, sheet_name=sheet_name) + + tm.assert_frame_equal(df1_parse, df_ref, check_names=False) + tm.assert_frame_equal(df2_parse, df_ref, check_names=False) + + @pytest.mark.parametrize( + "sheet_name", + [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]], + ) + def test_bad_sheetname_raises(self, read_ext, sheet_name): + # GH 39250 + msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found" + with pytest.raises(ValueError, match=msg): + with pd.ExcelFile("blank" + read_ext) as excel: + excel.parse(sheet_name=sheet_name) + + def test_excel_read_buffer(self, engine, read_ext): + pth = "test1" + read_ext + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine) + + with open(pth, "rb") as f: + with pd.ExcelFile(f) as xls: + actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + def test_reader_closes_file(self, engine, read_ext): + with open("test1" + read_ext, "rb") as f: + with pd.ExcelFile(f) as xlsx: + # parses okay + pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine) + + assert f.closed + + def test_conflicting_excel_engines(self, read_ext): + # GH 26566 + msg = "Engine should not be specified when passing an ExcelFile" + + with pd.ExcelFile("test1" + read_ext) as xl: + with pytest.raises(ValueError, match=msg): + pd.read_excel(xl, engine="foo") + + def test_excel_read_binary(self, engine, read_ext): + # GH 15914 + expected = pd.read_excel("test1" + read_ext, engine=engine) + + with open("test1" + read_ext, "rb") as f: + data = f.read() + + actual = pd.read_excel(BytesIO(data), engine=engine) + tm.assert_frame_equal(expected, actual) + + def test_excel_read_binary_via_read_excel(self, read_ext, engine): + # GH 38424 + with open("test1" + read_ext, "rb") as f: + result = pd.read_excel(f) + expected = pd.read_excel("test1" + read_ext, engine=engine) + tm.assert_frame_equal(result, expected) + + def test_read_excel_header_index_out_of_range(self, engine): + # GH#43143 + with open("df_header_oob.xlsx", "rb") as f: + with pytest.raises(ValueError, match="exceeds maximum"): + pd.read_excel(f, header=[0, 1]) + + @pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"]) + def test_header_with_index_col(self, filename): + # GH 33476 + idx = Index(["Z"], name="I2") + cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"]) + expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64") + result = pd.read_excel( + filename, sheet_name="Sheet1", index_col=0, header=[0, 1] + ) + tm.assert_frame_equal(expected, result) + + def test_read_datetime_multiindex(self, request, engine, read_ext): + # GH 34748 + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + f = "test_datetime_mi" + read_ext + with pd.ExcelFile(f) as excel: + actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine) + expected_column_index = MultiIndex.from_tuples( + [(pd.to_datetime("02/29/2020"), pd.to_datetime("03/01/2020"))], + names=[ + pd.to_datetime("02/29/2020").to_pydatetime(), + pd.to_datetime("03/01/2020").to_pydatetime(), + ], + ) + expected = DataFrame([], index=[], columns=expected_column_index) + + tm.assert_frame_equal(expected, actual) + + def test_engine_invalid_option(self, read_ext): + # read_ext includes the '.' hence the weird formatting + with pytest.raises(ValueError, match="Value must be one of *"): + with pd.option_context(f"io.excel{read_ext}.reader", "abc"): + pass + + def test_ignore_chartsheets(self, request, engine, read_ext): + # GH 41448 + if engine == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pd.ExcelFile("chartsheet" + read_ext) as excel: + assert excel.sheet_names == ["Sheet1"] + + def test_corrupt_files_closed(self, engine, read_ext): + # GH41778 + errors = (BadZipFile,) + if engine is None: + pytest.skip(f"Invalid test for engine={engine}") + elif engine == "xlrd": + import xlrd + + errors = (BadZipFile, xlrd.biffh.XLRDError) + + with tm.ensure_clean(f"corrupt{read_ext}") as file: + Path(file).write_text("corrupt", encoding="utf-8") + with tm.assert_produces_warning(False): + try: + pd.ExcelFile(file, engine=engine) + except errors: + pass diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_style.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_style.py new file mode 100644 index 00000000..3ca86378 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_style.py @@ -0,0 +1,294 @@ +import contextlib +import time + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + read_excel, +) +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter +from pandas.io.formats.excel import ExcelFormatter + +pytest.importorskip("jinja2") +# jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel +# could compute styles and render to excel without jinja2, since there is no +# 'template' file, but this needs the import error to delayed until render time. + + +def assert_equal_cell_styles(cell1, cell2): + # TODO: should find a better way to check equality + assert cell1.alignment.__dict__ == cell2.alignment.__dict__ + assert cell1.border.__dict__ == cell2.border.__dict__ + assert cell1.fill.__dict__ == cell2.fill.__dict__ + assert cell1.font.__dict__ == cell2.font.__dict__ + assert cell1.number_format == cell2.number_format + assert cell1.protection.__dict__ == cell2.protection.__dict__ + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +def test_styler_to_excel_unstyled(engine): + # compare DataFrame.to_excel and Styler.to_excel when no styles applied + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((2, 2))) + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + df.style.to_excel(writer, sheet_name="unstyled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns): + assert len(col1) == len(col2) + for cell1, cell2 in zip(col1, col2): + assert cell1.value == cell2.value + assert_equal_cell_styles(cell1, cell2) + + +shared_style_params = [ + ( + "background-color: #111222", + ["fill", "fgColor", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ( + "color: #111222", + ["font", "color", "value"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("font-family: Arial;", ["font", "name"], "arial"), + ("font-weight: bold;", ["font", "b"], True), + ("font-style: italic;", ["font", "i"], True), + ("text-decoration: underline;", ["font", "u"], "single"), + ("number-format: $??,???.00;", ["number_format"], "$??,???.00"), + ("text-align: left;", ["alignment", "horizontal"], "left"), + ( + "vertical-align: bottom;", + ["alignment", "vertical"], + {"xlsxwriter": None, "openpyxl": "bottom"}, # xlsxwriter Fails + ), + ("vertical-align: middle;", ["alignment", "vertical"], "center"), + # Border widths + ("border-left: 2pt solid red", ["border", "left", "style"], "medium"), + ("border-left: 1pt dotted red", ["border", "left", "style"], "dotted"), + ("border-left: 2pt dotted red", ["border", "left", "style"], "mediumDashDotDot"), + ("border-left: 1pt dashed red", ["border", "left", "style"], "dashed"), + ("border-left: 2pt dashed red", ["border", "left", "style"], "mediumDashed"), + ("border-left: 1pt solid red", ["border", "left", "style"], "thin"), + ("border-left: 3pt solid red", ["border", "left", "style"], "thick"), + # Border expansion + ( + "border-left: 2pt solid #111222", + ["border", "left", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "top", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "top", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "right", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "right", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "bottom", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "bottom", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "left", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "left", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + # Border styles + ( + "border-left-style: hair; border-left-color: black", + ["border", "left", "style"], + "hair", + ), +] + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("css, attrs, expected", shared_style_params) +def test_styler_to_excel_basic(engine, css, attrs, expected): + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: css) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test unstyled data cell does not have expected styles + # test styled cell has expected styles + u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2) + for attr in attrs: + u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr) + + if isinstance(expected, dict): + assert u_cell is None or u_cell != expected[engine] + assert s_cell == expected[engine] + else: + assert u_cell is None or u_cell != expected + assert s_cell == expected + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("css, attrs, expected", shared_style_params) +def test_styler_to_excel_basic_indexes(engine, css, attrs, expected): + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + + styler = df.style + styler.map_index(lambda x: css, axis=0) + styler.map_index(lambda x: css, axis=1) + + null_styler = df.style + null_styler.map(lambda x: "null: css;") + null_styler.map_index(lambda x: "null: css;", axis=0) + null_styler.map_index(lambda x: "null: css;", axis=1) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + null_styler.to_excel(writer, sheet_name="null_styled") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test null styled index cells does not have expected styles + # test styled cell has expected styles + ui_cell, si_cell = wb["null_styled"].cell(2, 1), wb["styled"].cell(2, 1) + uc_cell, sc_cell = wb["null_styled"].cell(1, 2), wb["styled"].cell(1, 2) + for attr in attrs: + ui_cell, si_cell = getattr(ui_cell, attr, None), getattr(si_cell, attr) + uc_cell, sc_cell = getattr(uc_cell, attr, None), getattr(sc_cell, attr) + + if isinstance(expected, dict): + assert ui_cell is None or ui_cell != expected[engine] + assert si_cell == expected[engine] + assert uc_cell is None or uc_cell != expected[engine] + assert sc_cell == expected[engine] + else: + assert ui_cell is None or ui_cell != expected + assert si_cell == expected + assert uc_cell is None or uc_cell != expected + assert sc_cell == expected + + +# From https://openpyxl.readthedocs.io/en/stable/api/openpyxl.styles.borders.html +# Note: Leaving behavior of "width"-type styles undefined; user should use border-width +# instead +excel_border_styles = [ + # "thin", + "dashed", + "mediumDashDot", + "dashDotDot", + "hair", + "dotted", + "mediumDashDotDot", + # "medium", + "double", + "dashDot", + "slantDashDot", + # "thick", + "mediumDashed", +] + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("border_style", excel_border_styles) +def test_styler_to_excel_border_style(engine, border_style): + css = f"border-left: {border_style} black thin" + attrs = ["border", "left", "style"] + expected = border_style + + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: css) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test unstyled data cell does not have expected styles + # test styled cell has expected styles + u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2) + for attr in attrs: + u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr) + + if isinstance(expected, dict): + assert u_cell is None or u_cell != expected[engine] + assert s_cell == expected[engine] + else: + assert u_cell is None or u_cell != expected + assert s_cell == expected + + +def test_styler_custom_converter(): + openpyxl = pytest.importorskip("openpyxl") + + def custom_converter(css): + return {"font": {"color": {"rgb": "111222"}}} + + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: "color: #888999") + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine="openpyxl") as writer: + ExcelFormatter(styler, style_converter=custom_converter).write( + writer, sheet_name="custom" + ) + + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + assert wb["custom"].cell(2, 2).font.color.value == "00111222" + + +@pytest.mark.single_cpu +@td.skip_if_not_us_locale +def test_styler_to_s3(s3_public_bucket, s3so): + # GH#46381 + + mock_bucket_name, target_file = s3_public_bucket.name, "test.xlsx" + df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) + styler = df.style.set_sticky(axis="index") + styler.to_excel(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so) + timeout = 5 + while True: + if target_file in (obj.key for obj in s3_public_bucket.objects.all()): + break + time.sleep(0.1) + timeout -= 0.1 + assert timeout > 0, "Timed out waiting for file to appear on moto" + result = read_excel( + f"s3://{mock_bucket_name}/{target_file}", index_col=0, storage_options=s3so + ) + tm.assert_frame_equal(result, df) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_writers.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_writers.py new file mode 100644 index 00000000..3ab703ea --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_writers.py @@ -0,0 +1,1383 @@ +from datetime import ( + date, + datetime, + timedelta, +) +from functools import partial +from io import BytesIO +import os +import re + +import numpy as np +import pytest + +from pandas.compat._constants import PY310 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + option_context, +) +import pandas._testing as tm + +from pandas.io.excel import ( + ExcelFile, + ExcelWriter, + _OpenpyxlWriter, + _XlsxWriter, + register_writer, +) +from pandas.io.excel._util import _writers + + +@pytest.fixture +def path(ext): + """ + Fixture to open file for use in each test case. + """ + with tm.ensure_clean(ext) as file_path: + yield file_path + + +@pytest.fixture +def set_engine(engine, ext): + """ + Fixture to set engine for use in each test case. + + Rather than requiring `engine=...` to be provided explicitly as an + argument in each test, this fixture sets a global option to dictate + which engine should be used to write Excel files. After executing + the test it rolls back said change to the global option. + """ + option_name = f"io.excel.{ext.strip('.')}.writer" + with option_context(option_name, engine): + yield + + +@pytest.mark.parametrize( + "ext", + [ + pytest.param(".xlsx", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]), + pytest.param(".xlsm", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]), + pytest.param( + ".xlsx", marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")] + ), + pytest.param(".ods", marks=td.skip_if_no("odf")), + ], +) +class TestRoundTrip: + @pytest.mark.parametrize( + "header,expected", + [(None, DataFrame([np.nan] * 4)), (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))], + ) + def test_read_one_empty_col_no_header(self, ext, header, expected): + # xref gh-12292 + filename = "no_header" + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + + with tm.ensure_clean(ext) as path: + df.to_excel(path, filename, index=False, header=False) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "header,expected", + [(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))], + ) + def test_read_one_empty_col_with_header(self, ext, header, expected): + filename = "with_header" + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + + with tm.ensure_clean(ext) as path: + df.to_excel(path, "with_header", index=False, header=True) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) + + tm.assert_frame_equal(result, expected) + + def test_set_column_names_in_parameter(self, ext): + # GH 12870 : pass down column names associated with + # keyword argument names + refdf = DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"]) + + with tm.ensure_clean(ext) as pth: + with ExcelWriter(pth) as writer: + refdf.to_excel(writer, "Data_no_head", header=False, index=False) + refdf.to_excel(writer, "Data_with_head", index=False) + + refdf.columns = ["A", "B"] + + with ExcelFile(pth) as reader: + xlsdf_no_head = pd.read_excel( + reader, sheet_name="Data_no_head", header=None, names=["A", "B"] + ) + xlsdf_with_head = pd.read_excel( + reader, + sheet_name="Data_with_head", + index_col=None, + names=["A", "B"], + ) + + tm.assert_frame_equal(xlsdf_no_head, refdf) + tm.assert_frame_equal(xlsdf_with_head, refdf) + + def test_creating_and_reading_multiple_sheets(self, ext): + # see gh-9450 + # + # Test reading multiple sheets, from a runtime + # created Excel file with multiple sheets. + def tdf(col_sheet_name): + d, i = [11, 22, 33], [1, 2, 3] + return DataFrame(d, i, columns=[col_sheet_name]) + + sheets = ["AAA", "BBB", "CCC"] + + dfs = [tdf(s) for s in sheets] + dfs = dict(zip(sheets, dfs)) + + with tm.ensure_clean(ext) as pth: + with ExcelWriter(pth) as ew: + for sheetname, df in dfs.items(): + df.to_excel(ew, sheetname) + + dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0) + + for s in sheets: + tm.assert_frame_equal(dfs[s], dfs_returned[s]) + + def test_read_excel_multiindex_empty_level(self, ext): + # see gh-12453 + with tm.ensure_clean(ext) as path: + df = DataFrame( + { + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", ""): {0: 0}, + } + ) + + expected = DataFrame( + { + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", "Unnamed: 4_level_1"): {0: 0}, + } + ) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + df = DataFrame( + { + ("Beg", ""): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7}, + } + ) + + expected = DataFrame( + { + ("Beg", "Unnamed: 1_level_1"): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7}, + } + ) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + @pytest.mark.parametrize("c_idx_names", [True, False]) + @pytest.mark.parametrize("r_idx_names", [True, False]) + @pytest.mark.parametrize("c_idx_levels", [1, 3]) + @pytest.mark.parametrize("r_idx_levels", [1, 3]) + def test_excel_multindex_roundtrip( + self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels, request + ): + # see gh-4679 + with tm.ensure_clean(ext) as pth: + if (c_idx_levels == 1 and c_idx_names) and not ( + r_idx_levels == 3 and not r_idx_names + ): + mark = pytest.mark.xfail( + reason="Column index name cannot be serialized unless " + "it's a MultiIndex" + ) + request.node.add_marker(mark) + + # Empty name case current read in as + # unnamed levels, not Nones. + check_names = r_idx_names or r_idx_levels <= 1 + + df = tm.makeCustomDataframe( + 5, 5, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels + ) + df.to_excel(pth) + + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[0, :] = np.nan + df.to_excel(pth) + + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[-1, :] = np.nan + df.to_excel(pth) + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + def test_read_excel_parse_dates(self, ext): + # see gh-11544, gh-12051 + df = DataFrame( + {"col": [1, 2, 3], "date_strings": pd.date_range("2012-01-01", periods=3)} + ) + df2 = df.copy() + df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y") + + with tm.ensure_clean(ext) as pth: + df2.to_excel(pth) + + res = pd.read_excel(pth, index_col=0) + tm.assert_frame_equal(df2, res) + + res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0) + tm.assert_frame_equal(df, res) + + date_parser = lambda x: datetime.strptime(x, "%m/%d/%Y") + with tm.assert_produces_warning( + FutureWarning, + match="use 'date_format' instead", + raise_on_extra_warnings=False, + ): + res = pd.read_excel( + pth, + parse_dates=["date_strings"], + date_parser=date_parser, + index_col=0, + ) + tm.assert_frame_equal(df, res) + res = pd.read_excel( + pth, parse_dates=["date_strings"], date_format="%m/%d/%Y", index_col=0 + ) + tm.assert_frame_equal(df, res) + + def test_multiindex_interval_datetimes(self, ext): + # GH 30986 + midx = MultiIndex.from_arrays( + [ + range(4), + pd.interval_range( + start=pd.Timestamp("2020-01-01"), periods=4, freq="6M" + ), + ] + ) + df = DataFrame(range(4), index=midx) + with tm.ensure_clean(ext) as pth: + df.to_excel(pth) + result = pd.read_excel(pth, index_col=[0, 1]) + expected = DataFrame( + range(4), + MultiIndex.from_arrays( + [ + range(4), + [ + "(2020-01-31, 2020-07-31]", + "(2020-07-31, 2021-01-31]", + "(2021-01-31, 2021-07-31]", + "(2021-07-31, 2022-01-31]", + ], + ] + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "engine,ext", + [ + pytest.param( + "openpyxl", + ".xlsx", + marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")], + ), + pytest.param( + "openpyxl", + ".xlsm", + marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")], + ), + pytest.param( + "xlsxwriter", + ".xlsx", + marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")], + ), + pytest.param("odf", ".ods", marks=td.skip_if_no("odf")), + ], +) +@pytest.mark.usefixtures("set_engine") +class TestExcelWriter: + def test_excel_sheet_size(self, path): + # GH 26080 + breaking_row_count = 2**20 + 1 + breaking_col_count = 2**14 + 1 + # purposely using two arrays to prevent memory issues while testing + row_arr = np.zeros(shape=(breaking_row_count, 1)) + col_arr = np.zeros(shape=(1, breaking_col_count)) + row_df = DataFrame(row_arr) + col_df = DataFrame(col_arr) + + msg = "sheet is too large" + with pytest.raises(ValueError, match=msg): + row_df.to_excel(path) + + with pytest.raises(ValueError, match=msg): + col_df.to_excel(path) + + def test_excel_sheet_by_name_raise(self, path): + gt = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + gt.to_excel(path) + + with ExcelFile(path) as xl: + df = pd.read_excel(xl, sheet_name=0, index_col=0) + + tm.assert_frame_equal(gt, df) + + msg = "Worksheet named '0' not found" + with pytest.raises(ValueError, match=msg): + pd.read_excel(xl, "0") + + def test_excel_writer_context_manager(self, frame, path): + with ExcelWriter(path) as writer: + frame.to_excel(writer, "Data1") + frame2 = frame.copy() + frame2.columns = frame.columns[::-1] + frame2.to_excel(writer, "Data2") + + with ExcelFile(path) as reader: + found_df = pd.read_excel(reader, sheet_name="Data1", index_col=0) + found_df2 = pd.read_excel(reader, sheet_name="Data2", index_col=0) + + tm.assert_frame_equal(found_df, frame) + tm.assert_frame_equal(found_df2, frame2) + + def test_roundtrip(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, "test1") + frame.to_excel(path, "test1", columns=["A", "B"]) + frame.to_excel(path, "test1", header=False) + frame.to_excel(path, "test1", index=False) + + # test roundtrip + frame.to_excel(path, "test1") + recons = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, "test1", index=False) + recons = pd.read_excel(path, sheet_name="test1", index_col=None) + recons.index = frame.index + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, "test1", na_rep="NA") + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["NA"]) + tm.assert_frame_equal(frame, recons) + + # GH 3611 + frame.to_excel(path, "test1", na_rep="88") + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["88"]) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, "test1", na_rep="88") + recons = pd.read_excel( + path, sheet_name="test1", index_col=0, na_values=[88, 88.0] + ) + tm.assert_frame_equal(frame, recons) + + # GH 6573 + frame.to_excel(path, "Sheet1") + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, "0") + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(frame, recons) + + # GH 8825 Pandas Series should provide to_excel method + s = frame["A"] + s.to_excel(path) + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(s.to_frame(), recons) + + def test_mixed(self, frame, path): + mixed_frame = frame.copy() + mixed_frame["foo"] = "bar" + + mixed_frame.to_excel(path, "test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(mixed_frame, recons) + + def test_ts_frame(self, tsframe, path): + df = tsframe + + # freq doesn't round-trip + index = pd.DatetimeIndex(np.asarray(df.index), freq=None) + df.index = index + + df.to_excel(path, "test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(df, recons) + + def test_basics_with_nan(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + frame.to_excel(path, "test1") + frame.to_excel(path, "test1", columns=["A", "B"]) + frame.to_excel(path, "test1", header=False) + frame.to_excel(path, "test1", index=False) + + @pytest.mark.parametrize("np_type", [np.int8, np.int16, np.int32, np.int64]) + def test_int_types(self, np_type, path): + # Test np.int values read come back as int + # (rather than float which is Excel's format). + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(10, 2)), dtype=np_type + ) + df.to_excel(path, "test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + int_frame = df.astype(np.int64) + tm.assert_frame_equal(int_frame, recons) + + recons2 = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(int_frame, recons2) + + @pytest.mark.parametrize("np_type", [np.float16, np.float32, np.float64]) + def test_float_types(self, np_type, path): + # Test np.float values read come back as float. + df = DataFrame(np.random.default_rng(2).random(10), dtype=np_type) + df.to_excel(path, "test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np_type + ) + + tm.assert_frame_equal(df, recons) + + def test_bool_types(self, path): + # Test np.bool_ values read come back as float. + df = DataFrame([1, 0, True, False], dtype=np.bool_) + df.to_excel(path, "test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.bool_ + ) + + tm.assert_frame_equal(df, recons) + + def test_inf_roundtrip(self, path): + df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)]) + df.to_excel(path, "test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + tm.assert_frame_equal(df, recons) + + def test_sheets(self, frame, tsframe, path): + # freq doesn't round-trip + index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) + tsframe.index = index + + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, "test1") + frame.to_excel(path, "test1", columns=["A", "B"]) + frame.to_excel(path, "test1", header=False) + frame.to_excel(path, "test1", index=False) + + # Test writing to separate sheets + with ExcelWriter(path) as writer: + frame.to_excel(writer, "test1") + tsframe.to_excel(writer, "test2") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(frame, recons) + recons = pd.read_excel(reader, sheet_name="test2", index_col=0) + tm.assert_frame_equal(tsframe, recons) + assert 2 == len(reader.sheet_names) + assert "test1" == reader.sheet_names[0] + assert "test2" == reader.sheet_names[1] + + def test_colaliases(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, "test1") + frame.to_excel(path, "test1", columns=["A", "B"]) + frame.to_excel(path, "test1", header=False) + frame.to_excel(path, "test1", index=False) + + # column aliases + col_aliases = Index(["AA", "X", "Y", "Z"]) + frame.to_excel(path, "test1", header=col_aliases) + with ExcelFile(path) as reader: + rs = pd.read_excel(reader, sheet_name="test1", index_col=0) + xp = frame.copy() + xp.columns = col_aliases + tm.assert_frame_equal(xp, rs) + + def test_roundtrip_indexlabels(self, merge_cells, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, "test1") + frame.to_excel(path, "test1", columns=["A", "B"]) + frame.to_excel(path, "test1", header=False) + frame.to_excel(path, "test1", index=False) + + # test index_label + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel(path, "test1", index_label=["test"], merge_cells=merge_cells) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + assert df.index.names == recons.index.names + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel( + path, + "test1", + index_label=["test", "dummy", "dummy2"], + merge_cells=merge_cells, + ) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + assert df.index.names == recons.index.names + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel(path, "test1", index_label="test", merge_cells=merge_cells) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + tm.assert_frame_equal(df, recons.astype(bool)) + + frame.to_excel( + path, + "test1", + columns=["A", "B", "C", "D"], + index=False, + merge_cells=merge_cells, + ) + # take 'A' and 'B' as indexes (same row as cols 'C', 'D') + df = frame.copy() + df = df.set_index(["A", "B"]) + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + tm.assert_frame_equal(df, recons) + + def test_excel_roundtrip_indexname(self, merge_cells, path): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + df.index.name = "foo" + + df.to_excel(path, merge_cells=merge_cells) + + with ExcelFile(path) as xf: + result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0) + + tm.assert_frame_equal(result, df) + assert result.index.name == "foo" + + def test_excel_roundtrip_datetime(self, merge_cells, tsframe, path): + # datetime.date, not sure what to test here exactly + + # freq does not round-trip + index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) + tsframe.index = index + + tsf = tsframe.copy() + + tsf.index = [x.date() for x in tsframe.index] + tsf.to_excel(path, "test1", merge_cells=merge_cells) + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + tm.assert_frame_equal(tsframe, recons) + + def test_excel_date_datetime_format(self, ext, path): + # see gh-4133 + # + # Excel output format strings + df = DataFrame( + [ + [date(2014, 1, 31), date(1999, 9, 24)], + [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ], + index=["DATE", "DATETIME"], + columns=["X", "Y"], + ) + df_expected = DataFrame( + [ + [datetime(2014, 1, 31), datetime(1999, 9, 24)], + [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ], + index=["DATE", "DATETIME"], + columns=["X", "Y"], + ) + + with tm.ensure_clean(ext) as filename2: + with ExcelWriter(path) as writer1: + df.to_excel(writer1, "test1") + + with ExcelWriter( + filename2, + date_format="DD.MM.YYYY", + datetime_format="DD.MM.YYYY HH-MM-SS", + ) as writer2: + df.to_excel(writer2, "test1") + + with ExcelFile(path) as reader1: + rs1 = pd.read_excel(reader1, sheet_name="test1", index_col=0) + + with ExcelFile(filename2) as reader2: + rs2 = pd.read_excel(reader2, sheet_name="test1", index_col=0) + + tm.assert_frame_equal(rs1, rs2) + + # Since the reader returns a datetime object for dates, + # we need to use df_expected to check the result. + tm.assert_frame_equal(rs2, df_expected) + + def test_to_excel_interval_no_labels(self, path): + # see gh-19242 + # + # Test writing Interval without labels. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64 + ) + expected = df.copy() + + df["new"] = pd.cut(df[0], 10) + expected["new"] = pd.cut(expected[0], 10).astype(str) + + df.to_excel(path, "test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_interval_labels(self, path): + # see gh-19242 + # + # Test writing Interval with labels. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64 + ) + expected = df.copy() + intervals = pd.cut( + df[0], 10, labels=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"] + ) + df["new"] = intervals + expected["new"] = pd.Series(list(intervals)) + + df.to_excel(path, "test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_timedelta(self, path): + # see gh-19242, gh-9155 + # + # Test writing timedelta to xls. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), + columns=["A"], + dtype=np.int64, + ) + expected = df.copy() + + df["new"] = df["A"].apply(lambda x: timedelta(seconds=x)) + expected["new"] = expected["A"].apply( + lambda x: timedelta(seconds=x).total_seconds() / 86400 + ) + + df.to_excel(path, "test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_periodindex(self, tsframe, path): + xp = tsframe.resample("M", kind="period").mean() + + xp.to_excel(path, "sht1") + + with ExcelFile(path) as reader: + rs = pd.read_excel(reader, sheet_name="sht1", index_col=0) + tm.assert_frame_equal(xp, rs.to_period("M")) + + def test_to_excel_multiindex(self, merge_cells, frame, path): + arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1) + new_index = MultiIndex.from_arrays(arrays, names=["first", "second"]) + frame.index = new_index + + frame.to_excel(path, "test1", header=False) + frame.to_excel(path, "test1", columns=["A", "B"]) + + # round trip + frame.to_excel(path, "test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + df = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + tm.assert_frame_equal(frame, df) + + # GH13511 + def test_to_excel_multiindex_nan_label(self, merge_cells, path): + df = DataFrame( + { + "A": [None, 2, 3], + "B": [10, 20, 30], + "C": np.random.default_rng(2).random(3), + } + ) + df = df.set_index(["A", "B"]) + + df.to_excel(path, merge_cells=merge_cells) + df1 = pd.read_excel(path, index_col=[0, 1]) + tm.assert_frame_equal(df, df1) + + # Test for Issue 11328. If column indices are integers, make + # sure they are handled correctly for either setting of + # merge_cells + def test_to_excel_multiindex_cols(self, merge_cells, frame, path): + arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1) + new_index = MultiIndex.from_arrays(arrays, names=["first", "second"]) + frame.index = new_index + + new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)]) + frame.columns = new_cols_index + header = [0, 1] + if not merge_cells: + header = 0 + + # round trip + frame.to_excel(path, "test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + df = pd.read_excel( + reader, sheet_name="test1", header=header, index_col=[0, 1] + ) + if not merge_cells: + fm = frame.columns.format(sparsify=False, adjoin=False, names=False) + frame.columns = [".".join(map(str, q)) for q in zip(*fm)] + tm.assert_frame_equal(frame, df) + + def test_to_excel_multiindex_dates(self, merge_cells, tsframe, path): + # try multiindex with dates + new_index = [tsframe.index, np.arange(len(tsframe.index), dtype=np.int64)] + tsframe.index = MultiIndex.from_arrays(new_index) + + tsframe.index.names = ["time", "foo"] + tsframe.to_excel(path, "test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + + tm.assert_frame_equal(tsframe, recons) + assert recons.index.names == ("time", "foo") + + def test_to_excel_multiindex_no_write_index(self, path): + # Test writing and re-reading a MI without the index. GH 5616. + + # Initial non-MI frame. + frame1 = DataFrame({"a": [10, 20], "b": [30, 40], "c": [50, 60]}) + + # Add a MI. + frame2 = frame1.copy() + multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)]) + frame2.index = multi_index + + # Write out to Excel without the index. + frame2.to_excel(path, "test1", index=False) + + # Read it back in. + with ExcelFile(path) as reader: + frame3 = pd.read_excel(reader, sheet_name="test1") + + # Test that it is the same as the initial frame. + tm.assert_frame_equal(frame1, frame3) + + def test_to_excel_empty_multiindex(self, path): + # GH 19543. + expected = DataFrame([], columns=[0, 1, 2]) + + df = DataFrame([], index=MultiIndex.from_tuples([], names=[0, 1]), columns=[2]) + df.to_excel(path, "test1") + + with ExcelFile(path) as reader: + result = pd.read_excel(reader, sheet_name="test1") + tm.assert_frame_equal( + result, expected, check_index_type=False, check_dtype=False + ) + + def test_to_excel_float_format(self, path): + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.to_excel(path, "test1", float_format="%.2f") + + with ExcelFile(path) as reader: + result = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = DataFrame( + [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + tm.assert_frame_equal(result, expected) + + def test_to_excel_output_encoding(self, ext): + # Avoid mixed inferred_type. + df = DataFrame( + [["\u0192", "\u0193", "\u0194"], ["\u0195", "\u0196", "\u0197"]], + index=["A\u0192", "B"], + columns=["X\u0193", "Y", "Z"], + ) + + with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename: + df.to_excel(filename, sheet_name="TestSheet") + result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0) + tm.assert_frame_equal(result, df) + + def test_to_excel_unicode_filename(self, ext): + with tm.ensure_clean("\u0192u." + ext) as filename: + try: + with open(filename, "wb"): + pass + except UnicodeEncodeError: + pytest.skip("No unicode file names on this system") + + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.to_excel(filename, "test1", float_format="%.2f") + + with ExcelFile(filename) as reader: + result = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = DataFrame( + [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("use_headers", [True, False]) + @pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3]) + @pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3]) + def test_excel_010_hemstring( + self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path + ): + def roundtrip(data, header=True, parser_hdr=0, index=True): + data.to_excel(path, header=header, merge_cells=merge_cells, index=index) + + with ExcelFile(path) as xf: + return pd.read_excel( + xf, sheet_name=xf.sheet_names[0], header=parser_hdr + ) + + # Basic test. + parser_header = 0 if use_headers else None + res = roundtrip(DataFrame([0]), use_headers, parser_header) + + assert res.shape == (1, 2) + assert res.iloc[0, 0] is not np.nan + + # More complex tests with multi-index. + nrows = 5 + ncols = 3 + + # ensure limited functionality in 0.10 + # override of gh-2370 until sorted out in 0.11 + + df = tm.makeCustomDataframe( + nrows, ncols, r_idx_nlevels=r_idx_nlevels, c_idx_nlevels=c_idx_nlevels + ) + + # This if will be removed once multi-column Excel writing + # is implemented. For now fixing gh-9794. + if c_idx_nlevels > 1: + msg = ( + "Writing to Excel with MultiIndex columns and no index " + "\\('index'=False\\) is not yet implemented." + ) + with pytest.raises(NotImplementedError, match=msg): + roundtrip(df, use_headers, index=False) + else: + res = roundtrip(df, use_headers) + + if use_headers: + assert res.shape == (nrows, ncols + r_idx_nlevels) + else: + # First row taken as columns. + assert res.shape == (nrows - 1, ncols + r_idx_nlevels) + + # No NaNs. + for r in range(len(res.index)): + for c in range(len(res.columns)): + assert res.iloc[r, c] is not np.nan + + def test_duplicated_columns(self, path): + # see gh-5235 + df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B"]) + df.to_excel(path, "test1") + expected = DataFrame( + [[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B.1"] + ) + + # By default, we mangle. + result = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(result, expected) + + # see gh-11007, gh-10970 + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"]) + df.to_excel(path, "test1") + + result = pd.read_excel(path, sheet_name="test1", index_col=0) + expected = DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"] + ) + tm.assert_frame_equal(result, expected) + + # see gh-10982 + df.to_excel(path, "test1", index=False, header=False) + result = pd.read_excel(path, sheet_name="test1", header=None) + + expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + tm.assert_frame_equal(result, expected) + + def test_swapped_columns(self, path): + # Test for issue #5427. + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) + write_frame.to_excel(path, "test1", columns=["B", "A"]) + + read_frame = pd.read_excel(path, sheet_name="test1", header=0) + + tm.assert_series_equal(write_frame["A"], read_frame["A"]) + tm.assert_series_equal(write_frame["B"], read_frame["B"]) + + def test_invalid_columns(self, path): + # see gh-10982 + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) + + with pytest.raises(KeyError, match="Not all names specified"): + write_frame.to_excel(path, "test1", columns=["B", "C"]) + + with pytest.raises( + KeyError, match="'passes columns are not ALL present dataframe'" + ): + write_frame.to_excel(path, "test1", columns=["C", "D"]) + + @pytest.mark.parametrize( + "to_excel_index,read_excel_index_col", + [ + (True, 0), # Include index in write to file + (False, None), # Dont include index in write to file + ], + ) + def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col): + # GH 31677 + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]}) + write_frame.to_excel( + path, "col_subset_bug", columns=["A", "B"], index=to_excel_index + ) + + expected = write_frame[["A", "B"]] + read_frame = pd.read_excel( + path, sheet_name="col_subset_bug", index_col=read_excel_index_col + ) + + tm.assert_frame_equal(expected, read_frame) + + def test_comment_arg(self, path): + # see gh-18735 + # + # Test the comment argument functionality to pd.read_excel. + + # Create file to read in. + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, "test_c") + + # Read file without comment arg. + result1 = pd.read_excel(path, sheet_name="test_c", index_col=0) + + result1.iloc[1, 0] = None + result1.iloc[1, 1] = None + result1.iloc[2, 1] = None + + result2 = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) + tm.assert_frame_equal(result1, result2) + + def test_comment_default(self, path): + # Re issue #18735 + # Test the comment argument default to pd.read_excel + + # Create file to read in + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, "test_c") + + # Read file with default and explicit comment=None + result1 = pd.read_excel(path, sheet_name="test_c") + result2 = pd.read_excel(path, sheet_name="test_c", comment=None) + tm.assert_frame_equal(result1, result2) + + def test_comment_used(self, path): + # see gh-18735 + # + # Test the comment argument is working as expected when used. + + # Create file to read in. + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, "test_c") + + # Test read_frame_comment against manually produced expected output. + expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]}) + result = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) + tm.assert_frame_equal(result, expected) + + def test_comment_empty_line(self, path): + # Re issue #18735 + # Test that pd.read_excel ignores commented lines at the end of file + + df = DataFrame({"a": ["1", "#2"], "b": ["2", "3"]}) + df.to_excel(path, index=False) + + # Test that all-comment lines at EoF are ignored + expected = DataFrame({"a": [1], "b": [2]}) + result = pd.read_excel(path, comment="#") + tm.assert_frame_equal(result, expected) + + def test_datetimes(self, path): + # Test writing and reading datetimes. For issue #9139. (xref #9185) + datetimes = [ + datetime(2013, 1, 13, 1, 2, 3), + datetime(2013, 1, 13, 2, 45, 56), + datetime(2013, 1, 13, 4, 29, 49), + datetime(2013, 1, 13, 6, 13, 42), + datetime(2013, 1, 13, 7, 57, 35), + datetime(2013, 1, 13, 9, 41, 28), + datetime(2013, 1, 13, 11, 25, 21), + datetime(2013, 1, 13, 13, 9, 14), + datetime(2013, 1, 13, 14, 53, 7), + datetime(2013, 1, 13, 16, 37, 0), + datetime(2013, 1, 13, 18, 20, 52), + ] + + write_frame = DataFrame({"A": datetimes}) + write_frame.to_excel(path, "Sheet1") + read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0) + + tm.assert_series_equal(write_frame["A"], read_frame["A"]) + + def test_bytes_io(self, engine): + # see gh-7074 + with BytesIO() as bio: + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + + # Pass engine explicitly, as there is no file path to infer from. + with ExcelWriter(bio, engine=engine) as writer: + df.to_excel(writer) + + bio.seek(0) + reread_df = pd.read_excel(bio, index_col=0) + tm.assert_frame_equal(df, reread_df) + + def test_engine_kwargs(self, engine, path): + # GH#52368 + df = DataFrame([{"A": 1, "B": 2}, {"A": 3, "B": 4}]) + + msgs = { + "odf": r"OpenDocumentSpreadsheet() got an unexpected keyword " + r"argument 'foo'", + "openpyxl": r"__init__() got an unexpected keyword argument 'foo'", + "xlsxwriter": r"__init__() got an unexpected keyword argument 'foo'", + } + + if PY310: + msgs[ + "openpyxl" + ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" + msgs[ + "xlsxwriter" + ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" + + # Handle change in error message for openpyxl (write and append mode) + if engine == "openpyxl" and not os.path.exists(path): + msgs[ + "openpyxl" + ] = r"load_workbook() got an unexpected keyword argument 'foo'" + + with pytest.raises(TypeError, match=re.escape(msgs[engine])): + df.to_excel( + path, + engine=engine, + engine_kwargs={"foo": "bar"}, + ) + + def test_write_lists_dict(self, path): + # see gh-8188. + df = DataFrame( + { + "mixed": ["a", ["b", "c"], {"d": "e", "f": 2}], + "numeric": [1, 2, 3.0], + "str": ["apple", "banana", "cherry"], + } + ) + df.to_excel(path, "Sheet1") + read = pd.read_excel(path, sheet_name="Sheet1", header=0, index_col=0) + + expected = df.copy() + expected.mixed = expected.mixed.apply(str) + expected.numeric = expected.numeric.astype("int64") + + tm.assert_frame_equal(read, expected) + + def test_render_as_column_name(self, path): + # see gh-34331 + df = DataFrame({"render": [1, 2], "data": [3, 4]}) + df.to_excel(path, "Sheet1") + read = pd.read_excel(path, "Sheet1", index_col=0) + expected = df + tm.assert_frame_equal(read, expected) + + def test_true_and_false_value_options(self, path): + # see gh-13347 + df = DataFrame([["foo", "bar"]], columns=["col1", "col2"]) + expected = df.replace({"foo": True, "bar": False}) + + df.to_excel(path) + read_frame = pd.read_excel( + path, true_values=["foo"], false_values=["bar"], index_col=0 + ) + tm.assert_frame_equal(read_frame, expected) + + def test_freeze_panes(self, path): + # see gh-15160 + expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"]) + expected.to_excel(path, "Sheet1", freeze_panes=(1, 1)) + + result = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(result, expected) + + def test_path_path_lib(self, engine, ext): + df = tm.makeDataFrame() + writer = partial(df.to_excel, engine=engine) + + reader = partial(pd.read_excel, index_col=0) + result = tm.round_trip_pathlib(writer, reader, path=f"foo{ext}") + tm.assert_frame_equal(result, df) + + def test_path_local_path(self, engine, ext): + df = tm.makeDataFrame() + writer = partial(df.to_excel, engine=engine) + + reader = partial(pd.read_excel, index_col=0) + result = tm.round_trip_localpath(writer, reader, path=f"foo{ext}") + tm.assert_frame_equal(result, df) + + def test_merged_cell_custom_objects(self, path): + # see GH-27006 + mi = MultiIndex.from_tuples( + [ + (pd.Period("2018"), pd.Period("2018Q1")), + (pd.Period("2018"), pd.Period("2018Q2")), + ] + ) + expected = DataFrame(np.ones((2, 2), dtype="int64"), columns=mi) + expected.to_excel(path) + result = pd.read_excel(path, header=[0, 1], index_col=0) + # need to convert PeriodIndexes to standard Indexes for assert equal + expected.columns = expected.columns.set_levels( + [[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]], + level=[0, 1], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path): + # GH 27008, GH 7056 + tz = tz_aware_fixture + data = pd.Timestamp("2019", tz=tz) + df = DataFrame([data], dtype=dtype) + with pytest.raises(ValueError, match="Excel does not support"): + df.to_excel(path) + + data = data.to_pydatetime() + df = DataFrame([data], dtype=dtype) + with pytest.raises(ValueError, match="Excel does not support"): + df.to_excel(path) + + def test_excel_duplicate_columns_with_names(self, path): + # GH#39695 + df = DataFrame({"A": [0, 1], "B": [10, 11]}) + df.to_excel(path, columns=["A", "B", "A"], index=False) + + result = pd.read_excel(path) + expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=["A", "B", "A.1"]) + tm.assert_frame_equal(result, expected) + + def test_if_sheet_exists_raises(self, ext): + # GH 40230 + msg = "if_sheet_exists is only valid in append mode (mode='a')" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=re.escape(msg)): + ExcelWriter(f, if_sheet_exists="replace") + + def test_excel_writer_empty_frame(self, engine, ext): + # GH#45793 + with tm.ensure_clean(ext) as path: + with ExcelWriter(path, engine=engine) as writer: + DataFrame().to_excel(writer) + result = pd.read_excel(path) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + def test_to_excel_empty_frame(self, engine, ext): + # GH#45793 + with tm.ensure_clean(ext) as path: + DataFrame().to_excel(path, engine=engine) + result = pd.read_excel(path) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + +class TestExcelWriterEngineTests: + @pytest.mark.parametrize( + "klass,ext", + [ + pytest.param(_XlsxWriter, ".xlsx", marks=td.skip_if_no("xlsxwriter")), + pytest.param(_OpenpyxlWriter, ".xlsx", marks=td.skip_if_no("openpyxl")), + ], + ) + def test_ExcelWriter_dispatch(self, klass, ext): + with tm.ensure_clean(ext) as path: + with ExcelWriter(path) as writer: + if ext == ".xlsx" and td.safe_import("xlsxwriter"): + # xlsxwriter has preference over openpyxl if both installed + assert isinstance(writer, _XlsxWriter) + else: + assert isinstance(writer, klass) + + def test_ExcelWriter_dispatch_raises(self): + with pytest.raises(ValueError, match="No engine"): + ExcelWriter("nothing") + + def test_register_writer(self): + class DummyClass(ExcelWriter): + called_save = False + called_write_cells = False + called_sheets = False + _supported_extensions = ("xlsx", "xls") + _engine = "dummy" + + def book(self): + pass + + def _save(self): + type(self).called_save = True + + def _write_cells(self, *args, **kwargs): + type(self).called_write_cells = True + + @property + def sheets(self): + type(self).called_sheets = True + + @classmethod + def assert_called_and_reset(cls): + assert cls.called_save + assert cls.called_write_cells + assert not cls.called_sheets + cls.called_save = False + cls.called_write_cells = False + + register_writer(DummyClass) + + with option_context("io.excel.xlsx.writer", "dummy"): + path = "something.xlsx" + with tm.ensure_clean(path) as filepath: + with ExcelWriter(filepath) as writer: + assert isinstance(writer, DummyClass) + df = tm.makeCustomDataframe(1, 1) + df.to_excel(filepath) + DummyClass.assert_called_and_reset() + + with tm.ensure_clean("something.xls") as filepath: + df.to_excel(filepath, engine="dummy") + DummyClass.assert_called_and_reset() + + +@td.skip_if_no("xlrd") +@td.skip_if_no("openpyxl") +class TestFSPath: + def test_excelfile_fspath(self): + with tm.ensure_clean("foo.xlsx") as path: + df = DataFrame({"A": [1, 2]}) + df.to_excel(path) + with ExcelFile(path) as xl: + result = os.fspath(xl) + assert result == path + + def test_excelwriter_fspath(self): + with tm.ensure_clean("foo.xlsx") as path: + with ExcelWriter(path) as writer: + assert os.fspath(writer) == str(path) + + +@pytest.mark.parametrize("klass", _writers.values()) +def test_subclass_attr(klass): + # testing that subclasses of ExcelWriter don't have public attributes (issue 49602) + attrs_base = {name for name in dir(ExcelWriter) if not name.startswith("_")} + attrs_klass = {name for name in dir(klass) if not name.startswith("_")} + assert not attrs_base.symmetric_difference(attrs_klass) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlrd.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlrd.py new file mode 100644 index 00000000..50902986 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlrd.py @@ -0,0 +1,59 @@ +import io + +import pytest + +import pandas as pd +import pandas._testing as tm + +from pandas.io.excel import ExcelFile +from pandas.io.excel._base import inspect_excel_format + +xlrd = pytest.importorskip("xlrd") + + +@pytest.fixture(params=[".xls"]) +def read_ext_xlrd(request): + """ + Valid extensions for reading Excel files with xlrd. + + Similar to read_ext, but excludes .ods, .xlsb, and for xlrd>2 .xlsx, .xlsm + """ + return request.param + + +def test_read_xlrd_book(read_ext_xlrd, datapath): + engine = "xlrd" + sheet_name = "Sheet1" + pth = datapath("io", "data", "excel", "test1.xls") + with xlrd.open_workbook(pth) as book: + with ExcelFile(book, engine=engine) as xl: + result = pd.read_excel(xl, sheet_name=sheet_name, index_col=0) + + expected = pd.read_excel( + book, sheet_name=sheet_name, engine=engine, index_col=0 + ) + tm.assert_frame_equal(result, expected) + + +def test_read_xlsx_fails(datapath): + # GH 29375 + from xlrd.biffh import XLRDError + + path = datapath("io", "data", "excel", "test1.xlsx") + with pytest.raises(XLRDError, match="Excel xlsx file; not supported"): + pd.read_excel(path, engine="xlrd") + + +@pytest.mark.parametrize( + "file_header", + [ + b"\x09\x00\x04\x00\x07\x00\x10\x00", + b"\x09\x02\x06\x00\x00\x00\x10\x00", + b"\x09\x04\x06\x00\x00\x00\x10\x00", + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1", + ], +) +def test_read_old_xls_files(file_header): + # GH 41226 + f = io.BytesIO(file_header) + assert inspect_excel_format(f) == "xls" diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlsxwriter.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlsxwriter.py new file mode 100644 index 00000000..c4d02d71 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/excel/test_xlsxwriter.py @@ -0,0 +1,78 @@ +import contextlib + +import pytest + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter + +xlsxwriter = pytest.importorskip("xlsxwriter") + +pytestmark = pytest.mark.parametrize("ext", [".xlsx"]) + + +def test_column_format(ext): + # Test that column formats are applied to cells. Test for issue #9167. + # Applicable to xlsxwriter only. + openpyxl = pytest.importorskip("openpyxl") + + with tm.ensure_clean(ext) as path: + frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]}) + + with ExcelWriter(path) as writer: + frame.to_excel(writer) + + # Add a number format to col B and ensure it is applied to cells. + num_format = "#,##0" + write_workbook = writer.book + write_worksheet = write_workbook.worksheets()[0] + col_format = write_workbook.add_format({"num_format": num_format}) + write_worksheet.set_column("B:B", None, col_format) + + with contextlib.closing(openpyxl.load_workbook(path)) as read_workbook: + try: + read_worksheet = read_workbook["Sheet1"] + except TypeError: + # compat + read_worksheet = read_workbook.get_sheet_by_name(name="Sheet1") + + # Get the number format from the cell. + try: + cell = read_worksheet["B2"] + except TypeError: + # compat + cell = read_worksheet.cell("B2") + + try: + read_num_format = cell.number_format + except AttributeError: + read_num_format = cell.style.number_format._format_code + + assert read_num_format == num_format + + +def test_write_append_mode_raises(ext): + msg = "Append mode is not supported with xlsxwriter!" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=msg): + ExcelWriter(f, engine="xlsxwriter", mode="a") + + +@pytest.mark.parametrize("nan_inf_to_errors", [True, False]) +def test_engine_kwargs(ext, nan_inf_to_errors): + # GH 42286 + engine_kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}} + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="xlsxwriter", engine_kwargs=engine_kwargs) as writer: + assert writer.book.nan_inf_to_errors == nan_inf_to_errors + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="xlsxwriter") as writer: + assert writer.sheets == {} + sheet = writer.book.add_worksheet("test_name") + assert writer.sheets == {"test_name": sheet} diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/__init__.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_bar.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_bar.py new file mode 100644 index 00000000..19884aaa --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_bar.py @@ -0,0 +1,307 @@ +import numpy as np +import pytest + +from pandas import DataFrame + +pytest.importorskip("jinja2") + + +def bar_grad(a=None, b=None, c=None, d=None): + """Used in multiple tests to simplify formatting of expected result""" + ret = [("width", "10em")] + if all(x is None for x in [a, b, c, d]): + return ret + return ret + [ + ( + "background", + f"linear-gradient(90deg,{','.join([x for x in [a, b, c, d] if x])})", + ) + ] + + +def no_bar(): + return bar_grad() + + +def bar_to(x, color="#d65f5f"): + return bar_grad(f" {color} {x:.1f}%", f" transparent {x:.1f}%") + + +def bar_from_to(x, y, color="#d65f5f"): + return bar_grad( + f" transparent {x:.1f}%", + f" {color} {x:.1f}%", + f" {color} {y:.1f}%", + f" transparent {y:.1f}%", + ) + + +@pytest.fixture +def df_pos(): + return DataFrame([[1], [2], [3]]) + + +@pytest.fixture +def df_neg(): + return DataFrame([[-1], [-2], [-3]]) + + +@pytest.fixture +def df_mix(): + return DataFrame([[-3], [1], [2]]) + + +@pytest.mark.parametrize( + "align, exp", + [ + ("left", [no_bar(), bar_to(50), bar_to(100)]), + ("right", [bar_to(100), bar_from_to(50, 100), no_bar()]), + ("mid", [bar_to(33.33), bar_to(66.66), bar_to(100)]), + ("zero", [bar_from_to(50, 66.7), bar_from_to(50, 83.3), bar_from_to(50, 100)]), + ("mean", [bar_to(50), no_bar(), bar_from_to(50, 100)]), + (2.0, [bar_to(50), no_bar(), bar_from_to(50, 100)]), + (np.median, [bar_to(50), no_bar(), bar_from_to(50, 100)]), + ], +) +def test_align_positive_cases(df_pos, align, exp): + # test different align cases for all positive values + result = df_pos.style.bar(align=align)._compute().ctx + expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]} + assert result == expected + + +@pytest.mark.parametrize( + "align, exp", + [ + ("left", [bar_to(100), bar_to(50), no_bar()]), + ("right", [no_bar(), bar_from_to(50, 100), bar_to(100)]), + ("mid", [bar_from_to(66.66, 100), bar_from_to(33.33, 100), bar_to(100)]), + ("zero", [bar_from_to(33.33, 50), bar_from_to(16.66, 50), bar_to(50)]), + ("mean", [bar_from_to(50, 100), no_bar(), bar_to(50)]), + (-2.0, [bar_from_to(50, 100), no_bar(), bar_to(50)]), + (np.median, [bar_from_to(50, 100), no_bar(), bar_to(50)]), + ], +) +def test_align_negative_cases(df_neg, align, exp): + # test different align cases for all negative values + result = df_neg.style.bar(align=align)._compute().ctx + expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]} + assert result == expected + + +@pytest.mark.parametrize( + "align, exp", + [ + ("left", [no_bar(), bar_to(80), bar_to(100)]), + ("right", [bar_to(100), bar_from_to(80, 100), no_bar()]), + ("mid", [bar_to(60), bar_from_to(60, 80), bar_from_to(60, 100)]), + ("zero", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]), + ("mean", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]), + (-0.0, [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]), + (np.nanmedian, [bar_to(50), no_bar(), bar_from_to(50, 62.5)]), + ], +) +@pytest.mark.parametrize("nans", [True, False]) +def test_align_mixed_cases(df_mix, align, exp, nans): + # test different align cases for mixed positive and negative values + # also test no impact of NaNs and no_bar + expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]} + if nans: + df_mix.loc[3, :] = np.nan + expected.update({(3, 0): no_bar()}) + result = df_mix.style.bar(align=align)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "align, exp", + [ + ( + "left", + { + "index": [[no_bar(), no_bar()], [bar_to(100), bar_to(100)]], + "columns": [[no_bar(), bar_to(100)], [no_bar(), bar_to(100)]], + "none": [[no_bar(), bar_to(33.33)], [bar_to(66.66), bar_to(100)]], + }, + ), + ( + "mid", + { + "index": [[bar_to(33.33), bar_to(50)], [bar_to(100), bar_to(100)]], + "columns": [[bar_to(50), bar_to(100)], [bar_to(75), bar_to(100)]], + "none": [[bar_to(25), bar_to(50)], [bar_to(75), bar_to(100)]], + }, + ), + ( + "zero", + { + "index": [ + [bar_from_to(50, 66.66), bar_from_to(50, 75)], + [bar_from_to(50, 100), bar_from_to(50, 100)], + ], + "columns": [ + [bar_from_to(50, 75), bar_from_to(50, 100)], + [bar_from_to(50, 87.5), bar_from_to(50, 100)], + ], + "none": [ + [bar_from_to(50, 62.5), bar_from_to(50, 75)], + [bar_from_to(50, 87.5), bar_from_to(50, 100)], + ], + }, + ), + ( + 2, + { + "index": [ + [bar_to(50), no_bar()], + [bar_from_to(50, 100), bar_from_to(50, 100)], + ], + "columns": [ + [bar_to(50), no_bar()], + [bar_from_to(50, 75), bar_from_to(50, 100)], + ], + "none": [ + [bar_from_to(25, 50), no_bar()], + [bar_from_to(50, 75), bar_from_to(50, 100)], + ], + }, + ), + ], +) +@pytest.mark.parametrize("axis", ["index", "columns", "none"]) +def test_align_axis(align, exp, axis): + # test all axis combinations with positive values and different aligns + data = DataFrame([[1, 2], [3, 4]]) + result = ( + data.style.bar(align=align, axis=None if axis == "none" else axis) + ._compute() + .ctx + ) + expected = { + (0, 0): exp[axis][0][0], + (0, 1): exp[axis][0][1], + (1, 0): exp[axis][1][0], + (1, 1): exp[axis][1][1], + } + assert result == expected + + +@pytest.mark.parametrize( + "values, vmin, vmax", + [ + ("positive", 1.5, 2.5), + ("negative", -2.5, -1.5), + ("mixed", -2.5, 1.5), + ], +) +@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately +@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"]) +def test_vmin_vmax_clipping(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align): + # test that clipping occurs if any vmin > data_values or vmax < data_values + if align == "mid": # mid acts as left or right in each case + if values == "positive": + align = "left" + elif values == "negative": + align = "right" + df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values] + vmin = None if nullify == "vmin" else vmin + vmax = None if nullify == "vmax" else vmax + + clip_df = df.where(df <= (vmax if vmax else 999), other=vmax) + clip_df = clip_df.where(clip_df >= (vmin if vmin else -999), other=vmin) + + result = ( + df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"]) + ._compute() + .ctx + ) + expected = clip_df.style.bar(align=align, color=["red", "green"])._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "values, vmin, vmax", + [ + ("positive", 0.5, 4.5), + ("negative", -4.5, -0.5), + ("mixed", -4.5, 4.5), + ], +) +@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately +@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"]) +def test_vmin_vmax_widening(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align): + # test that widening occurs if any vmax > data_values or vmin < data_values + if align == "mid": # mid acts as left or right in each case + if values == "positive": + align = "left" + elif values == "negative": + align = "right" + df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values] + vmin = None if nullify == "vmin" else vmin + vmax = None if nullify == "vmax" else vmax + + expand_df = df.copy() + expand_df.loc[3, :], expand_df.loc[4, :] = vmin, vmax + + result = ( + df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"]) + ._compute() + .ctx + ) + expected = expand_df.style.bar(align=align, color=["red", "green"])._compute().ctx + assert result.items() <= expected.items() + + +def test_numerics(): + # test data is pre-selected for numeric values + data = DataFrame([[1, "a"], [2, "b"]]) + result = data.style.bar()._compute().ctx + assert (0, 1) not in result + assert (1, 1) not in result + + +@pytest.mark.parametrize( + "align, exp", + [ + ("left", [no_bar(), bar_to(100, "green")]), + ("right", [bar_to(100, "red"), no_bar()]), + ("mid", [bar_to(25, "red"), bar_from_to(25, 100, "green")]), + ("zero", [bar_from_to(33.33, 50, "red"), bar_from_to(50, 100, "green")]), + ], +) +def test_colors_mixed(align, exp): + data = DataFrame([[-1], [3]]) + result = data.style.bar(align=align, color=["red", "green"])._compute().ctx + assert result == {(0, 0): exp[0], (1, 0): exp[1]} + + +def test_bar_align_height(): + # test when keyword height is used 'no-repeat center' and 'background-size' present + data = DataFrame([[1], [2]]) + result = data.style.bar(align="left", height=50)._compute().ctx + bg_s = "linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%) no-repeat center" + expected = { + (0, 0): [("width", "10em")], + (1, 0): [ + ("width", "10em"), + ("background", bg_s), + ("background-size", "100% 50.0%"), + ], + } + assert result == expected + + +def test_bar_value_error_raises(): + df = DataFrame({"A": [-100, -60, -30, -20]}) + + msg = "`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or" + with pytest.raises(ValueError, match=msg): + df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"]).to_html() + + msg = r"`width` must be a value in \[0, 100\]" + with pytest.raises(ValueError, match=msg): + df.style.bar(width=200).to_html() + + msg = r"`height` must be a value in \[0, 100\]" + with pytest.raises(ValueError, match=msg): + df.style.bar(height=200).to_html() diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_exceptions.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_exceptions.py new file mode 100644 index 00000000..d52e3a37 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_exceptions.py @@ -0,0 +1,44 @@ +import pytest + +jinja2 = pytest.importorskip("jinja2") + +from pandas import ( + DataFrame, + MultiIndex, +) + +from pandas.io.formats.style import Styler + + +@pytest.fixture +def df(): + return DataFrame( + data=[[0, -0.609], [1, -1.228]], + columns=["A", "B"], + index=["x", "y"], + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +def test_concat_bad_columns(styler): + msg = "`other.data` must have same columns as `Styler.data" + with pytest.raises(ValueError, match=msg): + styler.concat(DataFrame([[1, 2]]).style) + + +def test_concat_bad_type(styler): + msg = "`other` must be of type `Styler`" + with pytest.raises(TypeError, match=msg): + styler.concat(DataFrame([[1, 2]])) + + +def test_concat_bad_index_levels(styler, df): + df = df.copy() + df.index = MultiIndex.from_tuples([(0, 0), (1, 1)]) + msg = "number of index levels must be same in `other`" + with pytest.raises(ValueError, match=msg): + styler.concat(df.style) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_format.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_format.py new file mode 100644 index 00000000..1c84816e --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_format.py @@ -0,0 +1,562 @@ +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + IndexSlice, + MultiIndex, + NaT, + Timestamp, + option_context, +) + +pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler +from pandas.io.formats.style_render import _str_escape + + +@pytest.fixture +def df(): + return DataFrame( + data=[[0, -0.609], [1, -1.228]], + columns=["A", "B"], + index=["x", "y"], + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +@pytest.fixture +def df_multi(): + return DataFrame( + data=np.arange(16).reshape(4, 4), + columns=MultiIndex.from_product([["A", "B"], ["a", "b"]]), + index=MultiIndex.from_product([["X", "Y"], ["x", "y"]]), + ) + + +@pytest.fixture +def styler_multi(df_multi): + return Styler(df_multi, uuid_len=0) + + +def test_display_format(styler): + ctx = styler.format("{:0.1f}")._translate(True, True) + assert all(["display_value" in c for c in row] for row in ctx["body"]) + assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"]) + assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3 + + +@pytest.mark.parametrize("index", [True, False]) +@pytest.mark.parametrize("columns", [True, False]) +def test_display_format_index(styler, index, columns): + exp_index = ["x", "y"] + if index: + styler.format_index(lambda v: v.upper(), axis=0) # test callable + exp_index = ["X", "Y"] + + exp_columns = ["A", "B"] + if columns: + styler.format_index("*{}*", axis=1) # test string + exp_columns = ["*A*", "*B*"] + + ctx = styler._translate(True, True) + + for r, row in enumerate(ctx["body"]): + assert row[0]["display_value"] == exp_index[r] + + for c, col in enumerate(ctx["head"][1:]): + assert col["display_value"] == exp_columns[c] + + +def test_format_dict(styler): + ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "0.0" + assert ctx["body"][0][2]["display_value"] == "-60.90%" + + +def test_format_index_dict(styler): + ctx = styler.format_index({0: lambda v: v.upper()})._translate(True, True) + for i, val in enumerate(["X", "Y"]): + assert ctx["body"][i][0]["display_value"] == val + + +def test_format_string(styler): + ctx = styler.format("{:.2f}")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "0.00" + assert ctx["body"][0][2]["display_value"] == "-0.61" + assert ctx["body"][1][1]["display_value"] == "1.00" + assert ctx["body"][1][2]["display_value"] == "-1.23" + + +def test_format_callable(styler): + ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "pos" + assert ctx["body"][0][2]["display_value"] == "neg" + assert ctx["body"][1][1]["display_value"] == "pos" + assert ctx["body"][1][2]["display_value"] == "neg" + + +def test_format_with_na_rep(): + # GH 21527 28358 + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + + ctx = df.style.format(None, na_rep="-")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + + ctx = df.style.format("{:.2%}", na_rep="-")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][1]["display_value"] == "110.00%" + assert ctx["body"][1][2]["display_value"] == "120.00%" + + ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate(True, True) + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][2]["display_value"] == "120.00%" + + +def test_format_index_with_na_rep(): + df = DataFrame([[1, 2, 3, 4, 5]], columns=["A", None, np.nan, NaT, NA]) + ctx = df.style.format_index(None, na_rep="--", axis=1)._translate(True, True) + assert ctx["head"][0][1]["display_value"] == "A" + for i in [2, 3, 4, 5]: + assert ctx["head"][0][i]["display_value"] == "--" + + +def test_format_non_numeric_na(): + # GH 21527 28358 + df = DataFrame( + { + "object": [None, np.nan, "foo"], + "datetime": [None, NaT, Timestamp("20120101")], + } + ) + ctx = df.style.format(None, na_rep="-")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][1]["display_value"] == "-" + assert ctx["body"][1][2]["display_value"] == "-" + + +@pytest.mark.parametrize( + "func, attr, kwargs", + [ + ("format", "_display_funcs", {}), + ("format_index", "_display_funcs_index", {"axis": 0}), + ("format_index", "_display_funcs_columns", {"axis": 1}), + ], +) +def test_format_clear(styler, func, attr, kwargs): + assert (0, 0) not in getattr(styler, attr) # using default + getattr(styler, func)("{:.2f}", **kwargs) + assert (0, 0) in getattr(styler, attr) # formatter is specified + getattr(styler, func)(**kwargs) + assert (0, 0) not in getattr(styler, attr) # formatter cleared to default + + +@pytest.mark.parametrize( + "escape, exp", + [ + ("html", "<>&"%$#_{}~^\\~ ^ \\ "), + ( + "latex", + '<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum ' + "\\textbackslash \\textasciitilde \\space \\textasciicircum \\space " + "\\textbackslash \\space ", + ), + ], +) +def test_format_escape_html(escape, exp): + chars = '<>&"%$#_{}~^\\~ ^ \\ ' + df = DataFrame([[chars]]) + + s = Styler(df, uuid_len=0).format("&{0}&", escape=None) + expected = f'
&{chars}&&{exp}&X&<>&">X&
+ + + + + + + + + + + + + + + + +
 A
a2.610000
b2.690000
+ + + """ + ) + assert result == expected + + +def test_w3_html_format(styler): + styler.set_uuid("").set_table_styles([{"selector": "th", "props": "att2:v2;"}]).map( + lambda x: "att1:v1;" + ).set_table_attributes('class="my-cls1" style="attr3:v3;"').set_td_classes( + DataFrame(["my-cls2"], index=["a"], columns=["A"]) + ).format( + "{:.1f}" + ).set_caption( + "A comprehensive test" + ) + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + +
A comprehensive test
 A
a2.6
b2.7
+ """ + ) + assert expected == styler.to_html() + + +def test_colspan_w3(): + # GH 36223 + df = DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]]) + styler = Styler(df, uuid="_", cell_ids=False) + assert '
l0l0
+ + + + + + + + + + + + + + + + +
 A
a2.610000
b2.690000
+ + + """ + ) + assert result == expected + + +def test_doctype(styler): + result = styler.to_html(doctype_html=False) + assert "" not in result + assert "" not in result + assert "" not in result + assert "" not in result + + +def test_doctype_encoding(styler): + with option_context("styler.render.encoding", "ASCII"): + result = styler.to_html(doctype_html=True) + assert '' in result + result = styler.to_html(doctype_html=True, encoding="ANSI") + assert '' in result + + +def test_bold_headers_arg(styler): + result = styler.to_html(bold_headers=True) + assert "th {\n font-weight: bold;\n}" in result + result = styler.to_html() + assert "th {\n font-weight: bold;\n}" not in result + + +def test_caption_arg(styler): + result = styler.to_html(caption="foo bar") + assert "
foo barfoo bar
2.6100002.690000abA
+ + + + + + + + + + + + + + + + + + + + + + + + +
 n1a
 n2c
n1n2 
ac0
+ """ + ) + result = styler_mi.to_html() + assert result == expected + + +def test_include_css_style_rules_only_for_visible_cells(styler_mi): + # GH 43619 + result = ( + styler_mi.set_uuid("") + .map(lambda v: "color: blue;") + .hide(styler_mi.data.columns[1:], axis="columns") + .hide(styler_mi.data.index[1:], axis="index") + .to_html() + ) + expected_styles = dedent( + """\ + + """ + ) + assert expected_styles in result + + +def test_include_css_style_rules_only_for_visible_index_labels(styler_mi): + # GH 43619 + result = ( + styler_mi.set_uuid("") + .map_index(lambda v: "color: blue;", axis="index") + .hide(styler_mi.data.columns, axis="columns") + .hide(styler_mi.data.index[1:], axis="index") + .to_html() + ) + expected_styles = dedent( + """\ + + """ + ) + assert expected_styles in result + + +def test_include_css_style_rules_only_for_visible_column_labels(styler_mi): + # GH 43619 + result = ( + styler_mi.set_uuid("") + .map_index(lambda v: "color: blue;", axis="columns") + .hide(styler_mi.data.columns[1:], axis="columns") + .hide(styler_mi.data.index, axis="index") + .to_html() + ) + expected_styles = dedent( + """\ + + """ + ) + assert expected_styles in result + + +def test_hiding_index_columns_multiindex_alignment(): + # gh 43644 + midx = MultiIndex.from_product( + [["i0", "j0"], ["i1"], ["i2", "j2"]], names=["i-0", "i-1", "i-2"] + ) + cidx = MultiIndex.from_product( + [["c0"], ["c1", "d1"], ["c2", "d2"]], names=["c-0", "c-1", "c-2"] + ) + df = DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=cidx) + styler = Styler(df, uuid_len=0) + styler.hide(level=1, axis=0).hide(level=0, axis=1) + styler.hide([("j0", "i1", "j2")], axis=0) + styler.hide([("c0", "d1", "d2")], axis=1) + result = styler.to_html() + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 c-1c1d1
 c-2c2d2c2
i-0i-2   
i0i2012
j2456
j0i28910
+ """ + ) + assert result == expected + + +def test_hiding_index_columns_multiindex_trimming(): + # gh 44272 + df = DataFrame(np.arange(64).reshape(8, 8)) + df.columns = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]]) + df.index = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]]) + df.index.names, df.columns.names = ["a", "b"], ["c", "d"] + styler = Styler(df, cell_ids=False, uuid_len=0) + styler.hide([(0, 0), (0, 1), (1, 0)], axis=1).hide([(0, 0), (0, 1), (1, 0)], axis=0) + with option_context("styler.render.max_rows", 4, "styler.render.max_columns", 4): + result = styler.to_html() + + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 c123
 d1010...
ab     
1127282930...
2035363738...
143444546...
3051525354...
.....................
+ """ + ) + + assert result == expected + + +@pytest.mark.parametrize("type", ["data", "index"]) +@pytest.mark.parametrize( + "text, exp, found", + [ + ("no link, just text", False, ""), + ("subdomain not www: sub.web.com", False, ""), + ("www subdomain: www.web.com other", True, "www.web.com"), + ("scheme full structure: http://www.web.com", True, "http://www.web.com"), + ("scheme no top-level: http://www.web", True, "http://www.web"), + ("no scheme, no top-level: www.web", False, "www.web"), + ("https scheme: https://www.web.com", True, "https://www.web.com"), + ("ftp scheme: ftp://www.web", True, "ftp://www.web"), + ("ftps scheme: ftps://www.web", True, "ftps://www.web"), + ("subdirectories: www.web.com/directory", True, "www.web.com/directory"), + ("Multiple domains: www.1.2.3.4", True, "www.1.2.3.4"), + ("with port: http://web.com:80", True, "http://web.com:80"), + ( + "full net_loc scheme: http://user:pass@web.com", + True, + "http://user:pass@web.com", + ), + ( + "with valid special chars: http://web.com/,.':;~!@#$*()[]", + True, + "http://web.com/,.':;~!@#$*()[]", + ), + ], +) +def test_rendered_links(type, text, exp, found): + if type == "data": + df = DataFrame([text]) + styler = df.style.format(hyperlinks="html") + else: + df = DataFrame([0], index=[text]) + styler = df.style.format_index(hyperlinks="html") + + rendered = f'{found}' + result = styler.to_html() + assert (rendered in result) is exp + assert (text in result) is not exp # test conversion done when expected and not + + +def test_multiple_rendered_links(): + links = ("www.a.b", "http://a.c", "https://a.d", "ftp://a.e") + # pylint: disable-next=consider-using-f-string + df = DataFrame(["text {} {} text {} {}".format(*links)]) + result = df.style.format(hyperlinks="html").to_html() + href = '{0}' + for link in links: + assert href.format(link) in result + assert href.format("text") not in result + + +def test_concat(styler): + other = styler.data.agg(["mean"]).style + styler.concat(other).set_uuid("X") + result = styler.to_html() + fp = "foot0_" + expected = dedent( + f"""\ +
b2.690000
mean2.650000
+ """ + ) + assert expected in result + + +def test_concat_recursion(styler): + df = styler.data + styler1 = styler + styler2 = Styler(df.agg(["mean"]), precision=3) + styler3 = Styler(df.agg(["mean"]), precision=4) + styler1.concat(styler2.concat(styler3)).set_uuid("X") + result = styler.to_html() + # notice that the second concat (last
b2.690000
mean2.650
mean2.6500
+ """ + ) + assert expected in result + + +def test_concat_chain(styler): + df = styler.data + styler1 = styler + styler2 = Styler(df.agg(["mean"]), precision=3) + styler3 = Styler(df.agg(["mean"]), precision=4) + styler1.concat(styler2).concat(styler3).set_uuid("X") + result = styler.to_html() + fp1 = "foot0_" + fp2 = "foot1_" + expected = dedent( + f"""\ + + b + 2.690000 + + + mean + 2.650 + + + mean + 2.6500 + + + + """ + ) + assert expected in result + + +def test_concat_combined(): + def html_lines(foot_prefix: str): + assert foot_prefix.endswith("_") or foot_prefix == "" + fp = foot_prefix + return indent( + dedent( + f"""\ + + a + 2.610000 + + + b + 2.690000 + + """ + ), + prefix=" " * 4, + ) + + df = DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"]) + s1 = df.style.highlight_max(color="red") + s2 = df.style.highlight_max(color="green") + s3 = df.style.highlight_max(color="blue") + s4 = df.style.highlight_max(color="yellow") + + result = s1.concat(s2).concat(s3.concat(s4)).set_uuid("X").to_html() + expected_css = dedent( + """\ + + """ + ) + expected_table = ( + dedent( + """\ + + + + + + + + + """ + ) + + html_lines("") + + html_lines("foot0_") + + html_lines("foot1_") + + html_lines("foot1_foot0_") + + dedent( + """\ + +
 A
+ """ + ) + ) + assert expected_css + expected_table == result + + +def test_to_html_na_rep_non_scalar_data(datapath): + # GH47103 + df = DataFrame([{"a": 1, "b": [1, 2, 3], "c": np.nan}]) + result = df.style.format(na_rep="-").to_html(table_uuid="test") + expected = """\ + + + + + + + + + + + + + + + + + + +
 abc
01[1, 2, 3]-
+""" + assert result == expected diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_matplotlib.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_matplotlib.py new file mode 100644 index 00000000..fb7a77f1 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_matplotlib.py @@ -0,0 +1,335 @@ +import gc + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + IndexSlice, + Series, +) + +pytest.importorskip("matplotlib") +pytest.importorskip("jinja2") + +import matplotlib as mpl + +from pandas.io.formats.style import Styler + + +@pytest.fixture(autouse=True) +def mpl_cleanup(): + # matplotlib/testing/decorators.py#L24 + # 1) Resets units registry + # 2) Resets rc_context + # 3) Closes all figures + mpl = pytest.importorskip("matplotlib") + mpl_units = pytest.importorskip("matplotlib.units") + plt = pytest.importorskip("matplotlib.pyplot") + orig_units_registry = mpl_units.registry.copy() + with mpl.rc_context(): + mpl.use("template") + yield + mpl_units.registry.clear() + mpl_units.registry.update(orig_units_registry) + plt.close("all") + # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501 + gc.collect(1) + + +@pytest.fixture +def df(): + return DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +@pytest.fixture +def df_blank(): + return DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"]) + + +@pytest.fixture +def styler_blank(df_blank): + return Styler(df_blank, uuid_len=0) + + +@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"]) +def test_function_gradient(styler, f): + for c_map in [None, "YlOrRd"]: + result = getattr(styler, f)(cmap=c_map)._compute().ctx + assert all("#" in x[0][1] for x in result.values()) + assert result[(0, 0)] == result[(0, 1)] + assert result[(1, 0)] == result[(1, 1)] + + +@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"]) +def test_background_gradient_color(styler, f): + result = getattr(styler, f)(subset=IndexSlice[1, "A"])._compute().ctx + if f == "background_gradient": + assert result[(1, 0)] == [("background-color", "#fff7fb"), ("color", "#000000")] + elif f == "text_gradient": + assert result[(1, 0)] == [("color", "#fff7fb")] + + +@pytest.mark.parametrize( + "axis, expected", + [ + (0, ["low", "low", "high", "high"]), + (1, ["low", "high", "low", "high"]), + (None, ["low", "mid", "mid", "high"]), + ], +) +@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"]) +def test_background_gradient_axis(styler, axis, expected, f): + if f == "background_gradient": + colors = { + "low": [("background-color", "#f7fbff"), ("color", "#000000")], + "mid": [("background-color", "#abd0e6"), ("color", "#000000")], + "high": [("background-color", "#08306b"), ("color", "#f1f1f1")], + } + elif f == "text_gradient": + colors = { + "low": [("color", "#f7fbff")], + "mid": [("color", "#abd0e6")], + "high": [("color", "#08306b")], + } + result = getattr(styler, f)(cmap="Blues", axis=axis)._compute().ctx + for i, cell in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]): + assert result[cell] == colors[expected[i]] + + +@pytest.mark.parametrize( + "cmap, expected", + [ + ( + "PuBu", + { + (4, 5): [("background-color", "#86b0d3"), ("color", "#000000")], + (4, 6): [("background-color", "#83afd3"), ("color", "#f1f1f1")], + }, + ), + ( + "YlOrRd", + { + (4, 8): [("background-color", "#fd913e"), ("color", "#000000")], + (4, 9): [("background-color", "#fd8f3d"), ("color", "#f1f1f1")], + }, + ), + ( + None, + { + (7, 0): [("background-color", "#48c16e"), ("color", "#f1f1f1")], + (7, 1): [("background-color", "#4cc26c"), ("color", "#000000")], + }, + ), + ], +) +def test_text_color_threshold(cmap, expected): + # GH 39888 + df = DataFrame(np.arange(100).reshape(10, 10)) + result = df.style.background_gradient(cmap=cmap, axis=None)._compute().ctx + for k in expected.keys(): + assert result[k] == expected[k] + + +def test_background_gradient_vmin_vmax(): + # GH 12145 + df = DataFrame(range(5)) + ctx = df.style.background_gradient(vmin=1, vmax=3)._compute().ctx + assert ctx[(0, 0)] == ctx[(1, 0)] + assert ctx[(4, 0)] == ctx[(3, 0)] + + +def test_background_gradient_int64(): + # GH 28869 + df1 = Series(range(3)).to_frame() + df2 = Series(range(3), dtype="Int64").to_frame() + ctx1 = df1.style.background_gradient()._compute().ctx + ctx2 = df2.style.background_gradient()._compute().ctx + assert ctx2[(0, 0)] == ctx1[(0, 0)] + assert ctx2[(1, 0)] == ctx1[(1, 0)] + assert ctx2[(2, 0)] == ctx1[(2, 0)] + + +@pytest.mark.parametrize( + "axis, gmap, expected", + [ + ( + 0, + [1, 2], + { + (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 0): [("background-color", "#023858"), ("color", "#f1f1f1")], + (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + }, + ), + ( + 1, + [1, 2], + { + (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (0, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + }, + ), + ( + None, + np.array([[2, 1], [1, 2]]), + { + (0, 0): [("background-color", "#023858"), ("color", "#f1f1f1")], + (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + }, + ), + ], +) +def test_background_gradient_gmap_array(styler_blank, axis, gmap, expected): + # tests when gmap is given as a sequence and converted to ndarray + result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "gmap, axis", [([1, 2, 3], 0), ([1, 2], 1), (np.array([[1, 2], [1, 2]]), None)] +) +def test_background_gradient_gmap_array_raises(gmap, axis): + # test when gmap as converted ndarray is bad shape + df = DataFrame([[0, 0, 0], [0, 0, 0]]) + msg = "supplied 'gmap' is not correct shape" + with pytest.raises(ValueError, match=msg): + df.style.background_gradient(gmap=gmap, axis=axis)._compute() + + +@pytest.mark.parametrize( + "gmap", + [ + DataFrame( # reverse the columns + [[2, 1], [1, 2]], columns=["B", "A"], index=["X", "Y"] + ), + DataFrame( # reverse the index + [[2, 1], [1, 2]], columns=["A", "B"], index=["Y", "X"] + ), + DataFrame( # reverse the index and columns + [[1, 2], [2, 1]], columns=["B", "A"], index=["Y", "X"] + ), + DataFrame( # add unnecessary columns + [[1, 2, 3], [2, 1, 3]], columns=["A", "B", "C"], index=["X", "Y"] + ), + DataFrame( # add unnecessary index + [[1, 2], [2, 1], [3, 3]], columns=["A", "B"], index=["X", "Y", "Z"] + ), + ], +) +@pytest.mark.parametrize( + "subset, exp_gmap", # exp_gmap is underlying map DataFrame should conform to + [ + (None, [[1, 2], [2, 1]]), + (["A"], [[1], [2]]), # slice only column "A" in data and gmap + (["B", "A"], [[2, 1], [1, 2]]), # reverse the columns in data + (IndexSlice["X", :], [[1, 2]]), # slice only index "X" in data and gmap + (IndexSlice[["Y", "X"], :], [[2, 1], [1, 2]]), # reverse the index in data + ], +) +def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap): + # test gmap given as DataFrame that it aligns to the data including subset + expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap, subset=subset) + result = styler_blank.background_gradient(axis=None, gmap=gmap, subset=subset) + assert expected._compute().ctx == result._compute().ctx + + +@pytest.mark.parametrize( + "gmap, axis, exp_gmap", + [ + (Series([2, 1], index=["Y", "X"]), 0, [[1, 1], [2, 2]]), # revrse the index + (Series([2, 1], index=["B", "A"]), 1, [[1, 2], [1, 2]]), # revrse the cols + (Series([1, 2, 3], index=["X", "Y", "Z"]), 0, [[1, 1], [2, 2]]), # add idx + (Series([1, 2, 3], index=["A", "B", "C"]), 1, [[1, 2], [1, 2]]), # add col + ], +) +def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap): + # test gmap given as Series that it aligns to the data including subset + expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap)._compute() + result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute() + assert expected.ctx == result.ctx + + +@pytest.mark.parametrize( + "gmap, axis", + [ + (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 1), + (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 0), + ], +) +def test_background_gradient_gmap_wrong_dataframe(styler_blank, gmap, axis): + # test giving a gmap in DataFrame but with wrong axis + msg = "'gmap' is a DataFrame but underlying data for operations is a Series" + with pytest.raises(ValueError, match=msg): + styler_blank.background_gradient(gmap=gmap, axis=axis)._compute() + + +def test_background_gradient_gmap_wrong_series(styler_blank): + # test giving a gmap in Series form but with wrong axis + msg = "'gmap' is a Series but underlying data for operations is a DataFrame" + gmap = Series([1, 2], index=["X", "Y"]) + with pytest.raises(ValueError, match=msg): + styler_blank.background_gradient(gmap=gmap, axis=None)._compute() + + +def test_background_gradient_nullable_dtypes(): + # GH 50712 + df1 = DataFrame([[1], [0], [np.nan]], dtype=float) + df2 = DataFrame([[1], [0], [None]], dtype="Int64") + + ctx1 = df1.style.background_gradient()._compute().ctx + ctx2 = df2.style.background_gradient()._compute().ctx + assert ctx1 == ctx2 + + +@pytest.mark.parametrize( + "cmap", + ["PuBu", mpl.colormaps["PuBu"]], +) +def test_bar_colormap(cmap): + data = DataFrame([[1, 2], [3, 4]]) + ctx = data.style.bar(cmap=cmap, axis=None)._compute().ctx + pubu_colors = { + (0, 0): "#d0d1e6", + (1, 0): "#056faf", + (0, 1): "#73a9cf", + (1, 1): "#023858", + } + for k, v in pubu_colors.items(): + assert v in ctx[k][1][1] + + +def test_bar_color_raises(df): + msg = "`color` must be string or list or tuple of 2 strings" + with pytest.raises(ValueError, match=msg): + df.style.bar(color={"a", "b"}).to_html() + with pytest.raises(ValueError, match=msg): + df.style.bar(color=["a", "b", "c"]).to_html() + + msg = "`color` and `cmap` cannot both be given" + with pytest.raises(ValueError, match=msg): + df.style.bar(color="something", cmap="something else").to_html() + + +@pytest.mark.parametrize( + "plot_method", + ["scatter", "hexbin"], +) +def test_pass_colormap_instance(df, plot_method): + # https://github.com/pandas-dev/pandas/issues/49374 + cmap = mpl.colors.ListedColormap([[1, 1, 1], [0, 0, 0]]) + df["c"] = df.A + df.B + kwargs = {"x": "A", "y": "B", "c": "c", "colormap": cmap} + if plot_method == "hexbin": + kwargs["C"] = kwargs.pop("c") + getattr(df.plot, plot_method)(**kwargs) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_non_unique.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_non_unique.py new file mode 100644 index 00000000..e4d31fe2 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_non_unique.py @@ -0,0 +1,140 @@ +from textwrap import dedent + +import pytest + +from pandas import ( + DataFrame, + IndexSlice, +) + +pytest.importorskip("jinja2") + +from pandas.io.formats.style import Styler + + +@pytest.fixture +def df(): + return DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["i", "j", "j"], + columns=["c", "d", "d"], + dtype=float, + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +def test_format_non_unique(df): + # GH 41269 + + # test dict + html = df.style.format({"d": "{:.1f}"}).to_html() + for val in ["1.000000<", "4.000000<", "7.000000<"]: + assert val in html + for val in ["2.0<", "3.0<", "5.0<", "6.0<", "8.0<", "9.0<"]: + assert val in html + + # test subset + html = df.style.format(precision=1, subset=IndexSlice["j", "d"]).to_html() + for val in ["1.000000<", "4.000000<", "7.000000<", "2.000000<", "3.000000<"]: + assert val in html + for val in ["5.0<", "6.0<", "8.0<", "9.0<"]: + assert val in html + + +@pytest.mark.parametrize("func", ["apply", "map"]) +def test_apply_map_non_unique_raises(df, func): + # GH 41269 + if func == "apply": + op = lambda s: ["color: red;"] * len(s) + else: + op = lambda v: "color: red;" + + with pytest.raises(KeyError, match="`Styler.apply` and `.map` are not"): + getattr(df.style, func)(op)._compute() + + +def test_table_styles_dict_non_unique_index(styler): + styles = styler.set_table_styles( + {"j": [{"selector": "td", "props": "a: v;"}]}, axis=1 + ).table_styles + assert styles == [ + {"selector": "td.row1", "props": [("a", "v")]}, + {"selector": "td.row2", "props": [("a", "v")]}, + ] + + +def test_table_styles_dict_non_unique_columns(styler): + styles = styler.set_table_styles( + {"d": [{"selector": "td", "props": "a: v;"}]}, axis=0 + ).table_styles + assert styles == [ + {"selector": "td.col1", "props": [("a", "v")]}, + {"selector": "td.col2", "props": [("a", "v")]}, + ] + + +def test_tooltips_non_unique_raises(styler): + # ttips has unique keys + ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"]) + styler.set_tooltips(ttips=ttips) # OK + + # ttips has non-unique columns + ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"]) + with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"): + styler.set_tooltips(ttips=ttips) + + # ttips has non-unique index + ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"]) + with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"): + styler.set_tooltips(ttips=ttips) + + +def test_set_td_classes_non_unique_raises(styler): + # classes has unique keys + classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"]) + styler.set_td_classes(classes=classes) # OK + + # classes has non-unique columns + classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"]) + with pytest.raises(KeyError, match="Classes render only if `classes` has unique"): + styler.set_td_classes(classes=classes) + + # classes has non-unique index + classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"]) + with pytest.raises(KeyError, match="Classes render only if `classes` has unique"): + styler.set_td_classes(classes=classes) + + +def test_hide_columns_non_unique(styler): + ctx = styler.hide(["d"], axis="columns")._translate(True, True) + + assert ctx["head"][0][1]["display_value"] == "c" + assert ctx["head"][0][1]["is_visible"] is True + + assert ctx["head"][0][2]["display_value"] == "d" + assert ctx["head"][0][2]["is_visible"] is False + + assert ctx["head"][0][3]["display_value"] == "d" + assert ctx["head"][0][3]["is_visible"] is False + + assert ctx["body"][0][1]["is_visible"] is True + assert ctx["body"][0][2]["is_visible"] is False + assert ctx["body"][0][3]["is_visible"] is False + + +def test_latex_non_unique(styler): + result = styler.to_latex() + assert result == dedent( + """\ + \\begin{tabular}{lrrr} + & c & d & d \\\\ + i & 1.000000 & 2.000000 & 3.000000 \\\\ + j & 4.000000 & 5.000000 & 6.000000 \\\\ + j & 7.000000 & 8.000000 & 9.000000 \\\\ + \\end{tabular} + """ + ) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_style.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_style.py new file mode 100644 index 00000000..6fa72bd4 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_style.py @@ -0,0 +1,1588 @@ +import contextlib +import copy +import re +from textwrap import dedent + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + IndexSlice, + MultiIndex, + Series, + option_context, +) +import pandas._testing as tm + +jinja2 = pytest.importorskip("jinja2") +from pandas.io.formats.style import ( # isort:skip + Styler, +) +from pandas.io.formats.style_render import ( + _get_level_lengths, + _get_trimming_maximums, + maybe_convert_css_to_tuples, + non_reducing_slice, +) + + +@pytest.fixture +def mi_df(): + return DataFrame( + [[1, 2], [3, 4]], + index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]), + columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]), + dtype=int, + ) + + +@pytest.fixture +def mi_styler(mi_df): + return Styler(mi_df, uuid_len=0) + + +@pytest.fixture +def mi_styler_comp(mi_styler): + # comprehensively add features to mi_styler + mi_styler = mi_styler._copy(deepcopy=True) + mi_styler.css = {**mi_styler.css, "row": "ROW", "col": "COL"} + mi_styler.uuid_len = 5 + mi_styler.uuid = "abcde" + mi_styler.set_caption("capt") + mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}]) + mi_styler.hide(axis="columns") + mi_styler.hide([("c0", "c1_a")], axis="columns", names=True) + mi_styler.hide(axis="index") + mi_styler.hide([("i0", "i1_a")], axis="index", names=True) + mi_styler.set_table_attributes('class="box"') + other = mi_styler.data.agg(["mean"]) + other.index = MultiIndex.from_product([[""], other.index]) + mi_styler.concat(other.style) + mi_styler.format(na_rep="MISSING", precision=3) + mi_styler.format_index(precision=2, axis=0) + mi_styler.format_index(precision=4, axis=1) + mi_styler.highlight_max(axis=None) + mi_styler.map_index(lambda x: "color: white;", axis=0) + mi_styler.map_index(lambda x: "color: black;", axis=1) + mi_styler.set_td_classes( + DataFrame( + [["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns + ) + ) + mi_styler.set_tooltips( + DataFrame( + [["a2", "b2"], ["a2", "c2"]], + index=mi_styler.index, + columns=mi_styler.columns, + ) + ) + return mi_styler + + +@pytest.fixture +def blank_value(): + return " " + + +@pytest.fixture +def df(): + df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)}) + return df + + +@pytest.fixture +def styler(df): + df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)}) + return Styler(df) + + +@pytest.mark.parametrize( + "sparse_columns, exp_cols", + [ + ( + True, + [ + {"is_visible": True, "attributes": 'colspan="2"', "value": "c0"}, + {"is_visible": False, "attributes": "", "value": "c0"}, + ], + ), + ( + False, + [ + {"is_visible": True, "attributes": "", "value": "c0"}, + {"is_visible": True, "attributes": "", "value": "c0"}, + ], + ), + ], +) +def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols): + exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"} + exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"} + + ctx = mi_styler._translate(True, sparse_columns) + + assert exp_cols[0].items() <= ctx["head"][0][2].items() + assert exp_cols[1].items() <= ctx["head"][0][3].items() + assert exp_l1_c0.items() <= ctx["head"][1][2].items() + assert exp_l1_c1.items() <= ctx["head"][1][3].items() + + +@pytest.mark.parametrize( + "sparse_index, exp_rows", + [ + ( + True, + [ + {"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"}, + {"is_visible": False, "attributes": "", "value": "i0"}, + ], + ), + ( + False, + [ + {"is_visible": True, "attributes": "", "value": "i0"}, + {"is_visible": True, "attributes": "", "value": "i0"}, + ], + ), + ], +) +def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows): + exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"} + exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"} + + ctx = mi_styler._translate(sparse_index, True) + + assert exp_rows[0].items() <= ctx["body"][0][0].items() + assert exp_rows[1].items() <= ctx["body"][1][0].items() + assert exp_l1_r0.items() <= ctx["body"][0][1].items() + assert exp_l1_r1.items() <= ctx["body"][1][1].items() + + +def test_mi_styler_sparsify_options(mi_styler): + with option_context("styler.sparse.index", False): + html1 = mi_styler.to_html() + with option_context("styler.sparse.index", True): + html2 = mi_styler.to_html() + + assert html1 != html2 + + with option_context("styler.sparse.columns", False): + html1 = mi_styler.to_html() + with option_context("styler.sparse.columns", True): + html2 = mi_styler.to_html() + + assert html1 != html2 + + +@pytest.mark.parametrize( + "rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn", + [ + (100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements + (1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols + (4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows + (1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row + (4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col + (100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts + ], +) +def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn): + rn, cn = _get_trimming_maximums( + rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5 + ) + assert (rn, cn) == (exp_rn, exp_cn) + + +@pytest.mark.parametrize( + "option, val", + [ + ("styler.render.max_elements", 6), + ("styler.render.max_rows", 3), + ], +) +def test_render_trimming_rows(option, val): + # test auto and specific trimming of rows + df = DataFrame(np.arange(120).reshape(60, 2)) + with option_context(option, val): + ctx = df.style._translate(True, True) + assert len(ctx["head"][0]) == 3 # index + 2 data cols + assert len(ctx["body"]) == 4 # 3 data rows + trimming row + assert len(ctx["body"][0]) == 3 # index + 2 data cols + + +@pytest.mark.parametrize( + "option, val", + [ + ("styler.render.max_elements", 6), + ("styler.render.max_columns", 2), + ], +) +def test_render_trimming_cols(option, val): + # test auto and specific trimming of cols + df = DataFrame(np.arange(30).reshape(3, 10)) + with option_context(option, val): + ctx = df.style._translate(True, True) + assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col + assert len(ctx["body"]) == 3 # 3 data rows + assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col + + +def test_render_trimming_mi(): + midx = MultiIndex.from_product([[1, 2], [1, 2, 3]]) + df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx) + with option_context("styler.render.max_elements", 4): + ctx = df.style._translate(True, True) + + assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row + assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items() + assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items() + assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items() + assert len(ctx["body"]) == 3 # 2 data rows + trimming row + + +def test_render_empty_mi(): + # GH 43305 + df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"])) + expected = dedent( + """\ + > + + +   + one + + + """ + ) + assert expected in df.style.to_html() + + +@pytest.mark.parametrize("comprehensive", [True, False]) +@pytest.mark.parametrize("render", [True, False]) +@pytest.mark.parametrize("deepcopy", [True, False]) +def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp): + styler = mi_styler_comp if comprehensive else mi_styler + styler.uuid_len = 5 + + s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check + assert s2 is not styler + + if render: + styler.to_html() + + excl = [ + "cellstyle_map", # render time vars.. + "cellstyle_map_columns", + "cellstyle_map_index", + "template_latex", # render templates are class level + "template_html", + "template_html_style", + "template_html_table", + ] + if not deepcopy: # check memory locations are equal for all included attributes + for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]: + assert id(getattr(s2, attr)) == id(getattr(styler, attr)) + else: # check memory locations are different for nested or mutable vars + shallow = [ + "data", + "columns", + "index", + "uuid_len", + "uuid", + "caption", + "cell_ids", + "hide_index_", + "hide_columns_", + "hide_index_names", + "hide_column_names", + "table_attributes", + ] + for attr in shallow: + assert id(getattr(s2, attr)) == id(getattr(styler, attr)) + + for attr in [ + a + for a in styler.__dict__ + if (not callable(a) and a not in excl and a not in shallow) + ]: + if getattr(s2, attr) is None: + assert id(getattr(s2, attr)) == id(getattr(styler, attr)) + else: + assert id(getattr(s2, attr)) != id(getattr(styler, attr)) + + +@pytest.mark.parametrize("deepcopy", [True, False]) +def test_inherited_copy(mi_styler, deepcopy): + # Ensure that the inherited class is preserved when a Styler object is copied. + # GH 52728 + class CustomStyler(Styler): + pass + + custom_styler = CustomStyler(mi_styler.data) + custom_styler_copy = ( + copy.deepcopy(custom_styler) if deepcopy else copy.copy(custom_styler) + ) + assert isinstance(custom_styler_copy, CustomStyler) + + +def test_clear(mi_styler_comp): + # NOTE: if this test fails for new features then 'mi_styler_comp' should be updated + # to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature + # GH 40675 + styler = mi_styler_comp + styler._compute() # execute applied methods + + clean_copy = Styler(styler.data, uuid=styler.uuid) + + excl = [ + "data", + "index", + "columns", + "uuid", + "uuid_len", # uuid is set to be the same on styler and clean_copy + "cell_ids", + "cellstyle_map", # execution time only + "cellstyle_map_columns", # execution time only + "cellstyle_map_index", # execution time only + "template_latex", # render templates are class level + "template_html", + "template_html_style", + "template_html_table", + ] + # tests vars are not same vals on obj and clean copy before clear (except for excl) + for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]: + res = getattr(styler, attr) == getattr(clean_copy, attr) + if hasattr(res, "__iter__") and len(res) > 0: + assert not all(res) # some element in iterable differs + elif hasattr(res, "__iter__") and len(res) == 0: + pass # empty array + else: + assert not res # explicit var differs + + # test vars have same vales on obj and clean copy after clearing + styler.clear() + for attr in [a for a in styler.__dict__ if not callable(a)]: + res = getattr(styler, attr) == getattr(clean_copy, attr) + assert all(res) if hasattr(res, "__iter__") else res + + +def test_export(mi_styler_comp, mi_styler): + exp_attrs = [ + "_todo", + "hide_index_", + "hide_index_names", + "hide_columns_", + "hide_column_names", + "table_attributes", + "table_styles", + "css", + ] + for attr in exp_attrs: + check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr) + assert not ( + all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check + ) + + export = mi_styler_comp.export() + used = mi_styler.use(export) + for attr in exp_attrs: + check = getattr(used, attr) == getattr(mi_styler_comp, attr) + assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check + + used.to_html() + + +def test_hide_raises(mi_styler): + msg = "`subset` and `level` cannot be passed simultaneously" + with pytest.raises(ValueError, match=msg): + mi_styler.hide(axis="index", subset="something", level="something else") + + msg = "`level` must be of type `int`, `str` or list of such" + with pytest.raises(ValueError, match=msg): + mi_styler.hide(axis="index", level={"bad": 1, "type": 2}) + + +@pytest.mark.parametrize("level", [1, "one", [1], ["one"]]) +def test_hide_index_level(mi_styler, level): + mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"] + ctx = mi_styler.hide(axis="index", level=level)._translate(False, True) + assert len(ctx["head"][0]) == 3 + assert len(ctx["head"][1]) == 3 + assert len(ctx["head"][2]) == 4 + assert ctx["head"][2][0]["is_visible"] + assert not ctx["head"][2][1]["is_visible"] + + assert ctx["body"][0][0]["is_visible"] + assert not ctx["body"][0][1]["is_visible"] + assert ctx["body"][1][0]["is_visible"] + assert not ctx["body"][1][1]["is_visible"] + + +@pytest.mark.parametrize("level", [1, "one", [1], ["one"]]) +@pytest.mark.parametrize("names", [True, False]) +def test_hide_columns_level(mi_styler, level, names): + mi_styler.columns.names = ["zero", "one"] + if names: + mi_styler.index.names = ["zero", "one"] + ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False) + assert len(ctx["head"]) == (2 if names else 1) + + +@pytest.mark.parametrize("method", ["map", "apply"]) +@pytest.mark.parametrize("axis", ["index", "columns"]) +def test_apply_map_header(method, axis): + # GH 41893 + df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"]) + func = { + "apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s], + "map": lambda v: "attr: val" if ("A" in v or "C" in v) else "", + } + + # test execution added to todo + result = getattr(df.style, f"{method}_index")(func[method], axis=axis) + assert len(result._todo) == 1 + assert len(getattr(result, f"ctx_{axis}")) == 0 + + # test ctx object on compute + result._compute() + expected = { + (0, 0): [("attr", "val")], + } + assert getattr(result, f"ctx_{axis}") == expected + + +@pytest.mark.parametrize("method", ["apply", "map"]) +@pytest.mark.parametrize("axis", ["index", "columns"]) +def test_apply_map_header_mi(mi_styler, method, axis): + # GH 41893 + func = { + "apply": lambda s: ["attr: val;" if "b" in v else "" for v in s], + "map": lambda v: "attr: val" if "b" in v else "", + } + result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute() + expected = {(1, 1): [("attr", "val")]} + assert getattr(result, f"ctx_{axis}") == expected + + +def test_apply_map_header_raises(mi_styler): + # GH 41893 + with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"): + mi_styler.map_index(lambda v: "attr: val;", axis="bad")._compute() + + +class TestStyler: + def test_init_non_pandas(self): + msg = "``data`` must be a Series or DataFrame" + with pytest.raises(TypeError, match=msg): + Styler([1, 2, 3]) + + def test_init_series(self): + result = Styler(Series([1, 2])) + assert result.data.ndim == 2 + + def test_repr_html_ok(self, styler): + styler._repr_html_() + + def test_repr_html_mathjax(self, styler): + # gh-19824 / 41395 + assert "tex2jax_ignore" not in styler._repr_html_() + + with option_context("styler.html.mathjax", False): + assert "tex2jax_ignore" in styler._repr_html_() + + def test_update_ctx(self, styler): + styler._update_ctx(DataFrame({"A": ["color: red", "color: blue"]})) + expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]} + assert styler.ctx == expected + + def test_update_ctx_flatten_multi_and_trailing_semi(self, styler): + attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]}) + styler._update_ctx(attrs) + expected = { + (0, 0): [("color", "red"), ("foo", "bar")], + (1, 0): [("color", "blue"), ("foo", "baz")], + } + assert styler.ctx == expected + + def test_render(self): + df = DataFrame({"A": [0, 1]}) + style = lambda x: Series(["color: red", "color: blue"], name=x.name) + s = Styler(df, uuid="AB").apply(style) + s.to_html() + # it worked? + + def test_multiple_render(self, df): + # GH 39396 + s = Styler(df, uuid_len=0).map(lambda x: "color: red;", subset=["A"]) + s.to_html() # do 2 renders to ensure css styles not duplicated + assert ( + '" in s.to_html() + ) + + def test_render_empty_dfs(self): + empty_df = DataFrame() + es = Styler(empty_df) + es.to_html() + # An index but no columns + DataFrame(columns=["a"]).style.to_html() + # A column but no index + DataFrame(index=["a"]).style.to_html() + # No IndexError raised? + + def test_render_double(self): + df = DataFrame({"A": [0, 1]}) + style = lambda x: Series( + ["color: red; border: 1px", "color: blue; border: 2px"], name=x.name + ) + s = Styler(df, uuid="AB").apply(style) + s.to_html() + # it worked? + + def test_set_properties(self): + df = DataFrame({"A": [0, 1]}) + result = df.style.set_properties(color="white", size="10px")._compute().ctx + # order is deterministic + v = [("color", "white"), ("size", "10px")] + expected = {(0, 0): v, (1, 0): v} + assert result.keys() == expected.keys() + for v1, v2 in zip(result.values(), expected.values()): + assert sorted(v1) == sorted(v2) + + def test_set_properties_subset(self): + df = DataFrame({"A": [0, 1]}) + result = ( + df.style.set_properties(subset=IndexSlice[0, "A"], color="white") + ._compute() + .ctx + ) + expected = {(0, 0): [("color", "white")]} + assert result == expected + + def test_empty_index_name_doesnt_display(self, blank_value): + # https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902 + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + result = df.style._translate(True, True) + assert len(result["head"]) == 1 + expected = { + "class": "blank level0", + "type": "th", + "value": blank_value, + "is_visible": True, + "display_value": blank_value, + } + assert expected.items() <= result["head"][0][0].items() + + def test_index_name(self): + # https://github.com/pandas-dev/pandas/issues/11655 + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + result = df.set_index("A").style._translate(True, True) + expected = { + "class": "index_name level0", + "type": "th", + "value": "A", + "is_visible": True, + "display_value": "A", + } + assert expected.items() <= result["head"][1][0].items() + + def test_numeric_columns(self): + # https://github.com/pandas-dev/pandas/issues/12125 + # smoke test for _translate + df = DataFrame({0: [1, 2, 3]}) + df.style._translate(True, True) + + def test_apply_axis(self): + df = DataFrame({"A": [0, 0], "B": [1, 1]}) + f = lambda x: [f"val: {x.max()}" for v in x] + result = df.style.apply(f, axis=1) + assert len(result._todo) == 1 + assert len(result.ctx) == 0 + result._compute() + expected = { + (0, 0): [("val", "1")], + (0, 1): [("val", "1")], + (1, 0): [("val", "1")], + (1, 1): [("val", "1")], + } + assert result.ctx == expected + + result = df.style.apply(f, axis=0) + expected = { + (0, 0): [("val", "0")], + (0, 1): [("val", "1")], + (1, 0): [("val", "0")], + (1, 1): [("val", "1")], + } + result._compute() + assert result.ctx == expected + result = df.style.apply(f) # default + result._compute() + assert result.ctx == expected + + @pytest.mark.parametrize("axis", [0, 1]) + def test_apply_series_return(self, axis): + # GH 42014 + df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"]) + + # test Series return where len(Series) < df.index or df.columns but labels OK + func = lambda s: Series(["color: red;"], index=["Y"]) + result = df.style.apply(func, axis=axis)._compute().ctx + assert result[(1, 1)] == [("color", "red")] + assert result[(1 - axis, axis)] == [("color", "red")] + + # test Series return where labels align but different order + func = lambda s: Series(["color: red;", "color: blue;"], index=["Y", "X"]) + result = df.style.apply(func, axis=axis)._compute().ctx + assert result[(0, 0)] == [("color", "blue")] + assert result[(1, 1)] == [("color", "red")] + assert result[(1 - axis, axis)] == [("color", "red")] + assert result[(axis, 1 - axis)] == [("color", "blue")] + + @pytest.mark.parametrize("index", [False, True]) + @pytest.mark.parametrize("columns", [False, True]) + def test_apply_dataframe_return(self, index, columns): + # GH 42014 + df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"]) + idxs = ["X", "Y"] if index else ["Y"] + cols = ["X", "Y"] if columns else ["Y"] + df_styles = DataFrame("color: red;", index=idxs, columns=cols) + result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx + + assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present + assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index + assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols + assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X) + + @pytest.mark.parametrize( + "slice_", + [ + IndexSlice[:], + IndexSlice[:, ["A"]], + IndexSlice[[1], :], + IndexSlice[[1], ["A"]], + IndexSlice[:2, ["A", "B"]], + ], + ) + @pytest.mark.parametrize("axis", [0, 1]) + def test_apply_subset(self, slice_, axis, df): + def h(x, color="bar"): + return Series(f"color: {color}", index=x.index, name=x.name) + + result = df.style.apply(h, axis=axis, subset=slice_, color="baz")._compute().ctx + expected = { + (r, c): [("color", "baz")] + for r, row in enumerate(df.index) + for c, col in enumerate(df.columns) + if row in df.loc[slice_].index and col in df.loc[slice_].columns + } + assert result == expected + + @pytest.mark.parametrize( + "slice_", + [ + IndexSlice[:], + IndexSlice[:, ["A"]], + IndexSlice[[1], :], + IndexSlice[[1], ["A"]], + IndexSlice[:2, ["A", "B"]], + ], + ) + def test_map_subset(self, slice_, df): + result = df.style.map(lambda x: "color:baz;", subset=slice_)._compute().ctx + expected = { + (r, c): [("color", "baz")] + for r, row in enumerate(df.index) + for c, col in enumerate(df.columns) + if row in df.loc[slice_].index and col in df.loc[slice_].columns + } + assert result == expected + + @pytest.mark.parametrize( + "slice_", + [ + IndexSlice[:, IndexSlice["x", "A"]], + IndexSlice[:, IndexSlice[:, "A"]], + IndexSlice[:, IndexSlice[:, ["A", "C"]]], # missing col element + IndexSlice[IndexSlice["a", 1], :], + IndexSlice[IndexSlice[:, 1], :], + IndexSlice[IndexSlice[:, [1, 3]], :], # missing row element + IndexSlice[:, ("x", "A")], + IndexSlice[("a", 1), :], + ], + ) + def test_map_subset_multiindex(self, slice_): + # GH 19861 + # edited for GH 33562 + if ( + isinstance(slice_[-1], tuple) + and isinstance(slice_[-1][-1], list) + and "C" in slice_[-1][-1] + ): + ctx = pytest.raises(KeyError, match="C") + elif ( + isinstance(slice_[0], tuple) + and isinstance(slice_[0][1], list) + and 3 in slice_[0][1] + ): + ctx = pytest.raises(KeyError, match="3") + else: + ctx = contextlib.nullcontext() + + idx = MultiIndex.from_product([["a", "b"], [1, 2]]) + col = MultiIndex.from_product([["x", "y"], ["A", "B"]]) + df = DataFrame(np.random.default_rng(2).random((4, 4)), columns=col, index=idx) + + with ctx: + df.style.map(lambda x: "color: red;", subset=slice_).to_html() + + def test_map_subset_multiindex_code(self): + # https://github.com/pandas-dev/pandas/issues/25858 + # Checks styler.map works with multindex when codes are provided + codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]]) + columns = MultiIndex( + levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""] + ) + df = DataFrame( + [[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns + ) + pct_subset = IndexSlice[:, IndexSlice[:, "%":"%"]] + + def color_negative_red(val): + color = "red" if val < 0 else "black" + return f"color: {color}" + + df.loc[pct_subset] + df.style.map(color_negative_red, subset=pct_subset) + + @pytest.mark.parametrize( + "stylefunc", ["background_gradient", "bar", "text_gradient"] + ) + def test_subset_for_boolean_cols(self, stylefunc): + # GH47838 + df = DataFrame( + [ + [1, 2], + [3, 4], + ], + columns=[False, True], + ) + styled = getattr(df.style, stylefunc)() + styled._compute() + assert set(styled.ctx) == {(0, 0), (0, 1), (1, 0), (1, 1)} + + def test_empty(self): + df = DataFrame({"A": [1, 0]}) + s = df.style + s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]} + + result = s._translate(True, True)["cellstyle"] + expected = [ + {"props": [("color", "red")], "selectors": ["row0_col0"]}, + {"props": [("", "")], "selectors": ["row1_col0"]}, + ] + assert result == expected + + def test_duplicate(self): + df = DataFrame({"A": [1, 0]}) + s = df.style + s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]} + + result = s._translate(True, True)["cellstyle"] + expected = [ + {"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]} + ] + assert result == expected + + def test_init_with_na_rep(self): + # GH 21527 28358 + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + + ctx = Styler(df, na_rep="NA")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "NA" + assert ctx["body"][0][2]["display_value"] == "NA" + + def test_caption(self, df): + styler = Styler(df, caption="foo") + result = styler.to_html() + assert all(["caption" in result, "foo" in result]) + + styler = df.style + result = styler.set_caption("baz") + assert styler is result + assert styler.caption == "baz" + + def test_uuid(self, df): + styler = Styler(df, uuid="abc123") + result = styler.to_html() + assert "abc123" in result + + styler = df.style + result = styler.set_uuid("aaa") + assert result is styler + assert result.uuid == "aaa" + + def test_unique_id(self): + # See https://github.com/pandas-dev/pandas/issues/16780 + df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]}) + result = df.style.to_html(uuid="test") + assert "test" in result + ids = re.findall('id="(.*?)"', result) + assert np.unique(ids).size == len(ids) + + def test_table_styles(self, df): + style = [{"selector": "th", "props": [("foo", "bar")]}] # default format + styler = Styler(df, table_styles=style) + result = " ".join(styler.to_html().split()) + assert "th { foo: bar; }" in result + + styler = df.style + result = styler.set_table_styles(style) + assert styler is result + assert styler.table_styles == style + + # GH 39563 + style = [{"selector": "th", "props": "foo:bar;"}] # css string format + styler = df.style.set_table_styles(style) + result = " ".join(styler.to_html().split()) + assert "th { foo: bar; }" in result + + def test_table_styles_multiple(self, df): + ctx = df.style.set_table_styles( + [ + {"selector": "th,td", "props": "color:red;"}, + {"selector": "tr", "props": "color:green;"}, + ] + )._translate(True, True)["table_styles"] + assert ctx == [ + {"selector": "th", "props": [("color", "red")]}, + {"selector": "td", "props": [("color", "red")]}, + {"selector": "tr", "props": [("color", "green")]}, + ] + + def test_table_styles_dict_multiple_selectors(self, df): + # GH 44011 + result = df.style.set_table_styles( + { + "B": [ + {"selector": "th,td", "props": [("border-left", "2px solid black")]} + ] + } + )._translate(True, True)["table_styles"] + + expected = [ + {"selector": "th.col1", "props": [("border-left", "2px solid black")]}, + {"selector": "td.col1", "props": [("border-left", "2px solid black")]}, + ] + + assert result == expected + + def test_maybe_convert_css_to_tuples(self): + expected = [("a", "b"), ("c", "d e")] + assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected + assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected + expected = [] + assert maybe_convert_css_to_tuples("") == expected + + def test_maybe_convert_css_to_tuples_err(self): + msg = "Styles supplied as string must follow CSS rule formats" + with pytest.raises(ValueError, match=msg): + maybe_convert_css_to_tuples("err") + + def test_table_attributes(self, df): + attributes = 'class="foo" data-bar' + styler = Styler(df, table_attributes=attributes) + result = styler.to_html() + assert 'class="foo" data-bar' in result + + result = df.style.set_table_attributes(attributes).to_html() + assert 'class="foo" data-bar' in result + + def test_apply_none(self): + def f(x): + return DataFrame( + np.where(x == x.max(), "color: red", ""), + index=x.index, + columns=x.columns, + ) + + result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx + assert result[(1, 1)] == [("color", "red")] + + def test_trim(self, df): + result = df.style.to_html() # trim=True + assert result.count("#") == 0 + + result = df.style.highlight_max().to_html() + assert result.count("#") == len(df.columns) + + def test_export(self, df, styler): + f = lambda x: "color: red" if x > 0 else "color: blue" + g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}" + style1 = styler + style1.map(f).map(g, z="b").highlight_max()._compute() # = render + result = style1.export() + style2 = df.style + style2.use(result) + assert style1._todo == style2._todo + style2.to_html() + + def test_bad_apply_shape(self): + df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"]) + + msg = "resulted in the apply method collapsing to a Series." + with pytest.raises(ValueError, match=msg): + df.style._apply(lambda x: "x") + + msg = "created invalid {} labels" + with pytest.raises(ValueError, match=msg.format("index")): + df.style._apply(lambda x: [""]) + + with pytest.raises(ValueError, match=msg.format("index")): + df.style._apply(lambda x: ["", "", "", ""]) + + with pytest.raises(ValueError, match=msg.format("index")): + df.style._apply(lambda x: Series(["a:v;", ""], index=["A", "C"]), axis=0) + + with pytest.raises(ValueError, match=msg.format("columns")): + df.style._apply(lambda x: ["", "", ""], axis=1) + + with pytest.raises(ValueError, match=msg.format("columns")): + df.style._apply(lambda x: Series(["a:v;", ""], index=["X", "Z"]), axis=1) + + msg = "returned ndarray with wrong shape" + with pytest.raises(ValueError, match=msg): + df.style._apply(lambda x: np.array([[""], [""]]), axis=None) + + def test_apply_bad_return(self): + def f(x): + return "" + + df = DataFrame([[1, 2], [3, 4]]) + msg = ( + "must return a DataFrame or ndarray when passed to `Styler.apply` " + "with axis=None" + ) + with pytest.raises(TypeError, match=msg): + df.style._apply(f, axis=None) + + @pytest.mark.parametrize("axis", ["index", "columns"]) + def test_apply_bad_labels(self, axis): + def f(x): + return DataFrame(**{axis: ["bad", "labels"]}) + + df = DataFrame([[1, 2], [3, 4]]) + msg = f"created invalid {axis} labels." + with pytest.raises(ValueError, match=msg): + df.style._apply(f, axis=None) + + def test_get_level_lengths(self): + index = MultiIndex.from_product([["a", "b"], [0, 1, 2]]) + expected = { + (0, 0): 3, + (0, 3): 3, + (1, 0): 1, + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 1, + (1, 5): 1, + } + result = _get_level_lengths(index, sparsify=True, max_index=100) + tm.assert_dict_equal(result, expected) + + expected = { + (0, 0): 1, + (0, 1): 1, + (0, 2): 1, + (0, 3): 1, + (0, 4): 1, + (0, 5): 1, + (1, 0): 1, + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 1, + (1, 5): 1, + } + result = _get_level_lengths(index, sparsify=False, max_index=100) + tm.assert_dict_equal(result, expected) + + def test_get_level_lengths_un_sorted(self): + index = MultiIndex.from_arrays([[1, 1, 2, 1], ["a", "b", "b", "d"]]) + expected = { + (0, 0): 2, + (0, 2): 1, + (0, 3): 1, + (1, 0): 1, + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + } + result = _get_level_lengths(index, sparsify=True, max_index=100) + tm.assert_dict_equal(result, expected) + + expected = { + (0, 0): 1, + (0, 1): 1, + (0, 2): 1, + (0, 3): 1, + (1, 0): 1, + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + } + result = _get_level_lengths(index, sparsify=False, max_index=100) + tm.assert_dict_equal(result, expected) + + def test_mi_sparse_index_names(self, blank_value): + # Test the class names and displayed value are correct on rendering MI names + df = DataFrame( + {"A": [1, 2]}, + index=MultiIndex.from_arrays( + [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"] + ), + ) + result = df.style._translate(True, True) + head = result["head"][1] + expected = [ + { + "class": "index_name level0", + "display_value": "idx_level_0", + "is_visible": True, + }, + { + "class": "index_name level1", + "display_value": "idx_level_1", + "is_visible": True, + }, + { + "class": "blank col0", + "display_value": blank_value, + "is_visible": True, + }, + ] + for i, expected_dict in enumerate(expected): + assert expected_dict.items() <= head[i].items() + + def test_mi_sparse_column_names(self, blank_value): + df = DataFrame( + np.arange(16).reshape(4, 4), + index=MultiIndex.from_arrays( + [["a", "a", "b", "a"], [0, 1, 1, 2]], + names=["idx_level_0", "idx_level_1"], + ), + columns=MultiIndex.from_arrays( + [["C1", "C1", "C2", "C2"], [1, 0, 1, 0]], names=["colnam_0", "colnam_1"] + ), + ) + result = Styler(df, cell_ids=False)._translate(True, True) + + for level in [0, 1]: + head = result["head"][level] + expected = [ + { + "class": "blank", + "display_value": blank_value, + "is_visible": True, + }, + { + "class": f"index_name level{level}", + "display_value": f"colnam_{level}", + "is_visible": True, + }, + ] + for i, expected_dict in enumerate(expected): + assert expected_dict.items() <= head[i].items() + + def test_hide_column_headers(self, df, styler): + ctx = styler.hide(axis="columns")._translate(True, True) + assert len(ctx["head"]) == 0 # no header entries with an unnamed index + + df.index.name = "some_name" + ctx = df.style.hide(axis="columns")._translate(True, True) + assert len(ctx["head"]) == 1 + # index names still visible, changed in #42101, reverted in 43404 + + def test_hide_single_index(self, df): + # GH 14194 + # single unnamed index + ctx = df.style._translate(True, True) + assert ctx["body"][0][0]["is_visible"] + assert ctx["head"][0][0]["is_visible"] + ctx2 = df.style.hide(axis="index")._translate(True, True) + assert not ctx2["body"][0][0]["is_visible"] + assert not ctx2["head"][0][0]["is_visible"] + + # single named index + ctx3 = df.set_index("A").style._translate(True, True) + assert ctx3["body"][0][0]["is_visible"] + assert len(ctx3["head"]) == 2 # 2 header levels + assert ctx3["head"][0][0]["is_visible"] + + ctx4 = df.set_index("A").style.hide(axis="index")._translate(True, True) + assert not ctx4["body"][0][0]["is_visible"] + assert len(ctx4["head"]) == 1 # only 1 header levels + assert not ctx4["head"][0][0]["is_visible"] + + def test_hide_multiindex(self): + # GH 14194 + df = DataFrame( + {"A": [1, 2], "B": [1, 2]}, + index=MultiIndex.from_arrays( + [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"] + ), + ) + ctx1 = df.style._translate(True, True) + # tests for 'a' and '0' + assert ctx1["body"][0][0]["is_visible"] + assert ctx1["body"][0][1]["is_visible"] + # check for blank header rows + assert len(ctx1["head"][0]) == 4 # two visible indexes and two data columns + + ctx2 = df.style.hide(axis="index")._translate(True, True) + # tests for 'a' and '0' + assert not ctx2["body"][0][0]["is_visible"] + assert not ctx2["body"][0][1]["is_visible"] + # check for blank header rows + assert len(ctx2["head"][0]) == 3 # one hidden (col name) and two data columns + assert not ctx2["head"][0][0]["is_visible"] + + def test_hide_columns_single_level(self, df): + # GH 14194 + # test hiding single column + ctx = df.style._translate(True, True) + assert ctx["head"][0][1]["is_visible"] + assert ctx["head"][0][1]["display_value"] == "A" + assert ctx["head"][0][2]["is_visible"] + assert ctx["head"][0][2]["display_value"] == "B" + assert ctx["body"][0][1]["is_visible"] # col A, row 1 + assert ctx["body"][1][2]["is_visible"] # col B, row 1 + + ctx = df.style.hide("A", axis="columns")._translate(True, True) + assert not ctx["head"][0][1]["is_visible"] + assert not ctx["body"][0][1]["is_visible"] # col A, row 1 + assert ctx["body"][1][2]["is_visible"] # col B, row 1 + + # test hiding multiple columns + ctx = df.style.hide(["A", "B"], axis="columns")._translate(True, True) + assert not ctx["head"][0][1]["is_visible"] + assert not ctx["head"][0][2]["is_visible"] + assert not ctx["body"][0][1]["is_visible"] # col A, row 1 + assert not ctx["body"][1][2]["is_visible"] # col B, row 1 + + def test_hide_columns_index_mult_levels(self): + # GH 14194 + # setup dataframe with multiple column levels and indices + i1 = MultiIndex.from_arrays( + [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"] + ) + i2 = MultiIndex.from_arrays( + [["b", "b"], [0, 1]], names=["col_level_0", "col_level_1"] + ) + df = DataFrame([[1, 2], [3, 4]], index=i1, columns=i2) + ctx = df.style._translate(True, True) + # column headers + assert ctx["head"][0][2]["is_visible"] + assert ctx["head"][1][2]["is_visible"] + assert ctx["head"][1][3]["display_value"] == "1" + # indices + assert ctx["body"][0][0]["is_visible"] + # data + assert ctx["body"][1][2]["is_visible"] + assert ctx["body"][1][2]["display_value"] == "3" + assert ctx["body"][1][3]["is_visible"] + assert ctx["body"][1][3]["display_value"] == "4" + + # hide top column level, which hides both columns + ctx = df.style.hide("b", axis="columns")._translate(True, True) + assert not ctx["head"][0][2]["is_visible"] # b + assert not ctx["head"][1][2]["is_visible"] # 0 + assert not ctx["body"][1][2]["is_visible"] # 3 + assert ctx["body"][0][0]["is_visible"] # index + + # hide first column only + ctx = df.style.hide([("b", 0)], axis="columns")._translate(True, True) + assert not ctx["head"][0][2]["is_visible"] # b + assert ctx["head"][0][3]["is_visible"] # b + assert not ctx["head"][1][2]["is_visible"] # 0 + assert not ctx["body"][1][2]["is_visible"] # 3 + assert ctx["body"][1][3]["is_visible"] + assert ctx["body"][1][3]["display_value"] == "4" + + # hide second column and index + ctx = df.style.hide([("b", 1)], axis=1).hide(axis=0)._translate(True, True) + assert not ctx["body"][0][0]["is_visible"] # index + assert len(ctx["head"][0]) == 3 + assert ctx["head"][0][1]["is_visible"] # b + assert ctx["head"][1][1]["is_visible"] # 0 + assert not ctx["head"][1][2]["is_visible"] # 1 + assert not ctx["body"][1][3]["is_visible"] # 4 + assert ctx["body"][1][2]["is_visible"] + assert ctx["body"][1][2]["display_value"] == "3" + + # hide top row level, which hides both rows so body empty + ctx = df.style.hide("a", axis="index")._translate(True, True) + assert ctx["body"] == [] + + # hide first row only + ctx = df.style.hide(("a", 0), axis="index")._translate(True, True) + for i in [0, 1, 2, 3]: + assert "row1" in ctx["body"][0][i]["class"] # row0 not included in body + assert ctx["body"][0][i]["is_visible"] + + def test_pipe(self, df): + def set_caption_from_template(styler, a, b): + return styler.set_caption(f"Dataframe with a = {a} and b = {b}") + + styler = df.style.pipe(set_caption_from_template, "A", b="B") + assert "Dataframe with a = A and b = B" in styler.to_html() + + # Test with an argument that is a (callable, keyword_name) pair. + def f(a, b, styler): + return (a, b, styler) + + styler = df.style + result = styler.pipe((f, "styler"), a=1, b=2) + assert result == (1, 2, styler) + + def test_no_cell_ids(self): + # GH 35588 + # GH 35663 + df = DataFrame(data=[[0]]) + styler = Styler(df, uuid="_", cell_ids=False) + styler.to_html() + s = styler.to_html() # render twice to ensure ctx is not updated + assert s.find('') != -1 + + @pytest.mark.parametrize( + "classes", + [ + DataFrame( + data=[["", "test-class"], [np.nan, None]], + columns=["A", "B"], + index=["a", "b"], + ), + DataFrame(data=[["test-class"]], columns=["B"], index=["a"]), + DataFrame(data=[["test-class", "unused"]], columns=["B", "C"], index=["a"]), + ], + ) + def test_set_data_classes(self, classes): + # GH 36159 + df = DataFrame(data=[[0, 1], [2, 3]], columns=["A", "B"], index=["a", "b"]) + s = Styler(df, uuid_len=0, cell_ids=False).set_td_classes(classes).to_html() + assert '0' in s + assert '1' in s + assert '2' in s + assert '3' in s + # GH 39317 + s = Styler(df, uuid_len=0, cell_ids=True).set_td_classes(classes).to_html() + assert '0' in s + assert '1' in s + assert '2' in s + assert '3' in s + + def test_set_data_classes_reindex(self): + # GH 39317 + df = DataFrame( + data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=[0, 1, 2], index=[0, 1, 2] + ) + classes = DataFrame( + data=[["mi", "ma"], ["mu", "mo"]], + columns=[0, 2], + index=[0, 2], + ) + s = Styler(df, uuid_len=0).set_td_classes(classes).to_html() + assert '0' in s + assert '2' in s + assert '4' in s + assert '6' in s + assert '8' in s + + def test_chaining_table_styles(self): + # GH 35607 + df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"]) + styler = df.style.set_table_styles( + [{"selector": "", "props": [("background-color", "yellow")]}] + ).set_table_styles( + [{"selector": ".col0", "props": [("background-color", "blue")]}], + overwrite=False, + ) + assert len(styler.table_styles) == 2 + + def test_column_and_row_styling(self): + # GH 35607 + df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"]) + s = Styler(df, uuid_len=0) + s = s.set_table_styles({"A": [{"selector": "", "props": [("color", "blue")]}]}) + assert "#T_ .col0 {\n color: blue;\n}" in s.to_html() + s = s.set_table_styles( + {0: [{"selector": "", "props": [("color", "blue")]}]}, axis=1 + ) + assert "#T_ .row0 {\n color: blue;\n}" in s.to_html() + + @pytest.mark.parametrize("len_", [1, 5, 32, 33, 100]) + def test_uuid_len(self, len_): + # GH 36345 + df = DataFrame(data=[["A"]]) + s = Styler(df, uuid_len=len_, cell_ids=False).to_html() + strt = s.find('id="T_') + end = s[strt + 6 :].find('"') + if len_ > 32: + assert end == 32 + else: + assert end == len_ + + @pytest.mark.parametrize("len_", [-2, "bad", None]) + def test_uuid_len_raises(self, len_): + # GH 36345 + df = DataFrame(data=[["A"]]) + msg = "``uuid_len`` must be an integer in range \\[0, 32\\]." + with pytest.raises(TypeError, match=msg): + Styler(df, uuid_len=len_, cell_ids=False).to_html() + + @pytest.mark.parametrize( + "slc", + [ + IndexSlice[:, :], + IndexSlice[:, 1], + IndexSlice[1, :], + IndexSlice[[1], [1]], + IndexSlice[1, [1]], + IndexSlice[[1], 1], + IndexSlice[1], + IndexSlice[1, 1], + slice(None, None, None), + [0, 1], + np.array([0, 1]), + Series([0, 1]), + ], + ) + def test_non_reducing_slice(self, slc): + df = DataFrame([[0, 1], [2, 3]]) + + tslice_ = non_reducing_slice(slc) + assert isinstance(df.loc[tslice_], DataFrame) + + @pytest.mark.parametrize("box", [list, Series, np.array]) + def test_list_slice(self, box): + # like dataframe getitem + subset = box(["A"]) + + df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"]) + expected = IndexSlice[:, ["A"]] + + result = non_reducing_slice(subset) + tm.assert_frame_equal(df.loc[result], df.loc[expected]) + + def test_non_reducing_slice_on_multiindex(self): + # GH 19861 + dic = { + ("a", "d"): [1, 4], + ("a", "c"): [2, 3], + ("b", "c"): [3, 2], + ("b", "d"): [4, 1], + } + df = DataFrame(dic, index=[0, 1]) + idx = IndexSlice + slice_ = idx[:, idx["b", "d"]] + tslice_ = non_reducing_slice(slice_) + + result = df.loc[tslice_] + expected = DataFrame({("b", "d"): [4, 1]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "slice_", + [ + IndexSlice[:, :], + # check cols + IndexSlice[:, IndexSlice[["a"]]], # inferred deeper need list + IndexSlice[:, IndexSlice[["a"], ["c"]]], # inferred deeper need list + IndexSlice[:, IndexSlice["a", "c", :]], + IndexSlice[:, IndexSlice["a", :, "e"]], + IndexSlice[:, IndexSlice[:, "c", "e"]], + IndexSlice[:, IndexSlice["a", ["c", "d"], :]], # check list + IndexSlice[:, IndexSlice["a", ["c", "d", "-"], :]], # don't allow missing + IndexSlice[:, IndexSlice["a", ["c", "d", "-"], "e"]], # no slice + # check rows + IndexSlice[IndexSlice[["U"]], :], # inferred deeper need list + IndexSlice[IndexSlice[["U"], ["W"]], :], # inferred deeper need list + IndexSlice[IndexSlice["U", "W", :], :], + IndexSlice[IndexSlice["U", :, "Y"], :], + IndexSlice[IndexSlice[:, "W", "Y"], :], + IndexSlice[IndexSlice[:, "W", ["Y", "Z"]], :], # check list + IndexSlice[IndexSlice[:, "W", ["Y", "Z", "-"]], :], # don't allow missing + IndexSlice[IndexSlice["U", "W", ["Y", "Z", "-"]], :], # no slice + # check simultaneous + IndexSlice[IndexSlice[:, "W", "Y"], IndexSlice["a", "c", :]], + ], + ) + def test_non_reducing_multi_slice_on_multiindex(self, slice_): + # GH 33562 + cols = MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]) + idxs = MultiIndex.from_product([["U", "V"], ["W", "X"], ["Y", "Z"]]) + df = DataFrame(np.arange(64).reshape(8, 8), columns=cols, index=idxs) + + for lvl in [0, 1]: + key = slice_[lvl] + if isinstance(key, tuple): + for subkey in key: + if isinstance(subkey, list) and "-" in subkey: + # not present in the index level, raises KeyError since 2.0 + with pytest.raises(KeyError, match="-"): + df.loc[slice_] + return + + expected = df.loc[slice_] + result = df.loc[non_reducing_slice(slice_)] + tm.assert_frame_equal(result, expected) + + +def test_hidden_index_names(mi_df): + mi_df.index.names = ["Lev0", "Lev1"] + mi_styler = mi_df.style + ctx = mi_styler._translate(True, True) + assert len(ctx["head"]) == 3 # 2 column index levels + 1 index names row + + mi_styler.hide(axis="index", names=True) + ctx = mi_styler._translate(True, True) + assert len(ctx["head"]) == 2 # index names row is unparsed + for i in range(4): + assert ctx["body"][0][i]["is_visible"] # 2 index levels + 2 data values visible + + mi_styler.hide(axis="index", level=1) + ctx = mi_styler._translate(True, True) + assert len(ctx["head"]) == 2 # index names row is still hidden + assert ctx["body"][0][0]["is_visible"] is True + assert ctx["body"][0][1]["is_visible"] is False + + +def test_hidden_column_names(mi_df): + mi_df.columns.names = ["Lev0", "Lev1"] + mi_styler = mi_df.style + ctx = mi_styler._translate(True, True) + assert ctx["head"][0][1]["display_value"] == "Lev0" + assert ctx["head"][1][1]["display_value"] == "Lev1" + + mi_styler.hide(names=True, axis="columns") + ctx = mi_styler._translate(True, True) + assert ctx["head"][0][1]["display_value"] == " " + assert ctx["head"][1][1]["display_value"] == " " + + mi_styler.hide(level=0, axis="columns") + ctx = mi_styler._translate(True, True) + assert len(ctx["head"]) == 1 # no index names and only one visible column headers + assert ctx["head"][0][1]["display_value"] == " " + + +@pytest.mark.parametrize("caption", [1, ("a", "b", "c"), (1, "s")]) +def test_caption_raises(mi_styler, caption): + msg = "`caption` must be either a string or 2-tuple of strings." + with pytest.raises(ValueError, match=msg): + mi_styler.set_caption(caption) + + +def test_hiding_headers_over_index_no_sparsify(): + # GH 43464 + midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]]) + df = DataFrame(9, index=midx, columns=[0]) + ctx = df.style._translate(False, False) + assert len(ctx["body"]) == 6 + ctx = df.style.hide((1, "a"), axis=0)._translate(False, False) + assert len(ctx["body"]) == 4 + assert "row2" in ctx["body"][0][0]["class"] + + +def test_hiding_headers_over_columns_no_sparsify(): + # GH 43464 + midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]]) + df = DataFrame(9, columns=midx, index=[0]) + ctx = df.style._translate(False, False) + for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]: + assert ctx["head"][ix[0]][ix[1]]["is_visible"] is True + ctx = df.style.hide((1, "a"), axis="columns")._translate(False, False) + for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]: + assert ctx["head"][ix[0]][ix[1]]["is_visible"] is False + + +def test_get_level_lengths_mi_hidden(): + # GH 43464 + index = MultiIndex.from_arrays([[1, 1, 1, 2, 2, 2], ["a", "a", "b", "a", "a", "b"]]) + expected = { + (0, 2): 1, + (0, 3): 1, + (0, 4): 1, + (0, 5): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 1, + (1, 5): 1, + } + result = _get_level_lengths( + index, + sparsify=False, + max_index=100, + hidden_elements=[0, 1, 0, 1], # hidden element can repeat if duplicated index + ) + tm.assert_dict_equal(result, expected) + + +def test_row_trimming_hide_index(): + # gh 43703 + df = DataFrame([[1], [2], [3], [4], [5]]) + with option_context("styler.render.max_rows", 2): + ctx = df.style.hide([0, 1], axis="index")._translate(True, True) + assert len(ctx["body"]) == 3 + for r, val in enumerate(["3", "4", "..."]): + assert ctx["body"][r][1]["display_value"] == val + + +def test_row_trimming_hide_index_mi(): + # gh 44247 + df = DataFrame([[1], [2], [3], [4], [5]]) + df.index = MultiIndex.from_product([[0], [0, 1, 2, 3, 4]]) + with option_context("styler.render.max_rows", 2): + ctx = df.style.hide([(0, 0), (0, 1)], axis="index")._translate(True, True) + assert len(ctx["body"]) == 3 + + # level 0 index headers (sparsified) + assert {"value": 0, "attributes": 'rowspan="2"', "is_visible": True}.items() <= ctx[ + "body" + ][0][0].items() + assert {"value": 0, "attributes": "", "is_visible": False}.items() <= ctx["body"][ + 1 + ][0].items() + assert {"value": "...", "is_visible": True}.items() <= ctx["body"][2][0].items() + + for r, val in enumerate(["2", "3", "..."]): + assert ctx["body"][r][1]["display_value"] == val # level 1 index headers + for r, val in enumerate(["3", "4", "..."]): + assert ctx["body"][r][2]["display_value"] == val # data values + + +def test_col_trimming_hide_columns(): + # gh 44272 + df = DataFrame([[1, 2, 3, 4, 5]]) + with option_context("styler.render.max_columns", 2): + ctx = df.style.hide([0, 1], axis="columns")._translate(True, True) + + assert len(ctx["head"][0]) == 6 # blank, [0, 1 (hidden)], [2 ,3 (visible)], + trim + for c, vals in enumerate([(1, False), (2, True), (3, True), ("...", True)]): + assert ctx["head"][0][c + 2]["value"] == vals[0] + assert ctx["head"][0][c + 2]["is_visible"] == vals[1] + + assert len(ctx["body"][0]) == 6 # index + 2 hidden + 2 visible + trimming col + + +def test_no_empty_apply(mi_styler): + # 45313 + mi_styler.apply(lambda s: ["a:v;"] * 2, subset=[False, False]) + mi_styler._compute() + + +@pytest.mark.parametrize("format", ["html", "latex", "string"]) +def test_output_buffer(mi_styler, format): + # gh 47053 + with tm.ensure_clean(f"delete_me.{format}") as f: + getattr(mi_styler, f"to_{format}")(f) diff --git a/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_to_latex.py b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_to_latex.py new file mode 100644 index 00000000..7f5b6b30 --- /dev/null +++ b/dbdpy-env/lib/python3.9/site-packages/pandas/tests/io/formats/style/test_to_latex.py @@ -0,0 +1,1087 @@ +from textwrap import dedent + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + option_context, +) + +pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler +from pandas.io.formats.style_render import ( + _parse_latex_cell_styles, + _parse_latex_css_conversion, + _parse_latex_header_span, + _parse_latex_table_styles, + _parse_latex_table_wrapping, +) + + +@pytest.fixture +def df(): + return DataFrame({"A": [0, 1], "B": [-0.61, -1.22], "C": ["ab", "cd"]}) + + +@pytest.fixture +def df_ext(): + return DataFrame( + {"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]} + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0, precision=2) + + +def test_minimal_latex_tabular(styler): + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & A & B & C \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + assert styler.to_latex() == expected + + +def test_tabular_hrules(styler): + expected = dedent( + """\ + \\begin{tabular}{lrrl} + \\toprule + & A & B & C \\\\ + \\midrule + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\bottomrule + \\end{tabular} + """ + ) + assert styler.to_latex(hrules=True) == expected + + +def test_tabular_custom_hrules(styler): + styler.set_table_styles( + [ + {"selector": "toprule", "props": ":hline"}, + {"selector": "bottomrule", "props": ":otherline"}, + ] + ) # no midrule + expected = dedent( + """\ + \\begin{tabular}{lrrl} + \\hline + & A & B & C \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\otherline + \\end{tabular} + """ + ) + assert styler.to_latex() == expected + + +def test_column_format(styler): + # default setting is already tested in `test_latex_minimal_tabular` + styler.set_table_styles([{"selector": "column_format", "props": ":cccc"}]) + + assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_format="rrrr") + styler.set_table_styles([{"selector": "column_format", "props": ":r|r|cc"}]) + assert "\\begin{tabular}{r|r|cc}" in styler.to_latex() + + +def test_siunitx_cols(styler): + expected = dedent( + """\ + \\begin{tabular}{lSSl} + {} & {A} & {B} & {C} \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + assert styler.to_latex(siunitx=True) == expected + + +def test_position(styler): + assert "\\begin{table}[h!]" in styler.to_latex(position="h!") + assert "\\end{table}" in styler.to_latex(position="h!") + styler.set_table_styles([{"selector": "position", "props": ":b!"}]) + assert "\\begin{table}[b!]" in styler.to_latex() + assert "\\end{table}" in styler.to_latex() + + +@pytest.mark.parametrize("env", [None, "longtable"]) +def test_label(styler, env): + assert "\n\\label{text}" in styler.to_latex(label="text", environment=env) + styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}]) + assert "\n\\label{more :text}" in styler.to_latex(environment=env) + + +def test_position_float_raises(styler): + msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering'," + with pytest.raises(ValueError, match=msg): + styler.to_latex(position_float="bad_string") + + msg = "`position_float` cannot be used in 'longtable' `environment`" + with pytest.raises(ValueError, match=msg): + styler.to_latex(position_float="centering", environment="longtable") + + +@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")]) +@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")]) +@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")]) +@pytest.mark.parametrize("column_format", [(None, ""), ("rcrl", "{tabular}{rcrl}")]) +@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")]) +def test_kwargs_combinations( + styler, label, position, caption, column_format, position_float +): + result = styler.to_latex( + label=label[0], + position=position[0], + caption=caption[0], + column_format=column_format[0], + position_float=position_float[0], + ) + assert label[1] in result + assert position[1] in result + assert caption[1] in result + assert column_format[1] in result + assert position_float[1] in result + + +def test_custom_table_styles(styler): + styler.set_table_styles( + [ + {"selector": "mycommand", "props": ":{myoptions}"}, + {"selector": "mycommand2", "props": ":{myoptions2}"}, + ] + ) + expected = dedent( + """\ + \\begin{table} + \\mycommand{myoptions} + \\mycommand2{myoptions2} + """ + ) + assert expected in styler.to_latex() + + +def test_cell_styling(styler): + styler.highlight_max(props="itshape:;Huge:--wrap;") + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & A & B & C \\\\ + 0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\ + 1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\ + \\end{tabular} + """ + ) + assert expected == styler.to_latex() + + +def test_multiindex_columns(df): + cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df.columns = cidx + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & \\multicolumn{2}{r}{A} & B \\\\ + & a & b & c \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + s = df.style.format(precision=2) + assert expected == s.to_latex() + + # non-sparse + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & A & A & B \\\\ + & a & b & c \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + s = df.style.format(precision=2) + assert expected == s.to_latex(sparse_columns=False) + + +def test_multiindex_row(df_ext): + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index = ridx + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & A & B & C \\\\ + \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\ + & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + styler = df_ext.style.format(precision=2) + result = styler.to_latex() + assert expected == result + + # non-sparse + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & A & B & C \\\\ + A & a & 0 & -0.61 & ab \\\\ + A & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + result = styler.to_latex(sparse_index=False) + assert expected == result + + +def test_multirow_naive(df_ext): + ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")]) + df_ext.index = ridx + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & A & B & C \\\\ + X & x & 0 & -0.61 & ab \\\\ + & y & 1 & -1.22 & cd \\\\ + Y & z & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + styler = df_ext.style.format(precision=2) + result = styler.to_latex(multirow_align="naive") + assert expected == result + + +def test_multiindex_row_and_col(df_ext): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & \\multicolumn{2}{l}{Z} & Y \\\\ + & & a & b & c \\\\ + \\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\ + & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + styler = df_ext.style.format(precision=2) + result = styler.to_latex(multirow_align="b", multicol_align="l") + assert result == expected + + # non-sparse + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & Z & Z & Y \\\\ + & & a & b & c \\\\ + A & a & 0 & -0.61 & ab \\\\ + A & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + result = styler.to_latex(sparse_index=False, sparse_columns=False) + assert result == expected + + +@pytest.mark.parametrize( + "multicol_align, siunitx, header", + [ + ("naive-l", False, " & A & &"), + ("naive-r", False, " & & & A"), + ("naive-l", True, "{} & {A} & {} & {}"), + ("naive-r", True, "{} & {} & {} & {A}"), + ], +) +def test_multicol_naive(df, multicol_align, siunitx, header): + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")]) + df.columns = ridx + level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}" + col_format = "lrrl" if not siunitx else "lSSl" + expected = dedent( + f"""\ + \\begin{{tabular}}{{{col_format}}} + {header} \\\\ + {level1} \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{{tabular}} + """ + ) + styler = df.style.format(precision=2) + result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx) + assert expected == result + + +def test_multi_options(df_ext): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + styler = df_ext.style.format(precision=2) + + expected = dedent( + """\ + & & \\multicolumn{2}{r}{Z} & Y \\\\ + & & a & b & c \\\\ + \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\ + """ + ) + result = styler.to_latex() + assert expected in result + + with option_context("styler.latex.multicol_align", "l"): + assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex() + + with option_context("styler.latex.multirow_align", "b"): + assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex() + + +def test_multiindex_columns_hidden(): + df = DataFrame([[1, 2, 3, 4]]) + df.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)]) + s = df.style + assert "{tabular}{lrrrr}" in s.to_latex() + s.set_table_styles([]) # reset the position command + s.hide([("A", 2)], axis="columns") + assert "{tabular}{lrrr}" in s.to_latex() + + +@pytest.mark.parametrize( + "option, value", + [ + ("styler.sparse.index", True), + ("styler.sparse.index", False), + ("styler.sparse.columns", True), + ("styler.sparse.columns", False), + ], +) +def test_sparse_options(df_ext, option, value): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + styler = df_ext.style + + latex1 = styler.to_latex() + with option_context(option, value): + latex2 = styler.to_latex() + assert (latex1 == latex2) is value + + +def test_hidden_index(styler): + styler.hide(axis="index") + expected = dedent( + """\ + \\begin{tabular}{rrl} + A & B & C \\\\ + 0 & -0.61 & ab \\\\ + 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + assert styler.to_latex() == expected + + +@pytest.mark.parametrize("environment", ["table", "figure*", None]) +def test_comprehensive(df_ext, environment): + # test as many low level features simultaneously as possible + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + stlr = df_ext.style + stlr.set_caption("mycap") + stlr.set_table_styles( + [ + {"selector": "label", "props": ":{fig§item}"}, + {"selector": "position", "props": ":h!"}, + {"selector": "position_float", "props": ":centering"}, + {"selector": "column_format", "props": ":rlrlr"}, + {"selector": "toprule", "props": ":toprule"}, + {"selector": "midrule", "props": ":midrule"}, + {"selector": "bottomrule", "props": ":bottomrule"}, + {"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command + ] + ) + stlr.highlight_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap") + stlr.highlight_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")]) + + expected = ( + """\ +\\begin{table}[h!] +\\centering +\\caption{mycap} +\\label{fig:item} +\\rowcolors{3}{pink}{} +\\begin{tabular}{rlrlr} +\\toprule + & & \\multicolumn{2}{r}{Z} & Y \\\\ + & & a & b & c \\\\ +\\midrule +\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\ + & b & 1 & -1.22 & cd \\\\ +B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """ + """\ +\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\ +\\bottomrule +\\end{tabular} +\\end{table} +""" + ).replace("table", environment if environment else "table") + result = stlr.format(precision=2).to_latex(environment=environment) + assert result == expected + + +def test_environment_option(styler): + with option_context("styler.latex.environment", "bar-env"): + assert "\\begin{bar-env}" in styler.to_latex() + assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env") + + +def test_parse_latex_table_styles(styler): + styler.set_table_styles( + [ + {"selector": "foo", "props": [("attr", "value")]}, + {"selector": "bar", "props": [("attr", "overwritten")]}, + {"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]}, + {"selector": "label", "props": [("", "{fig§item}")]}, + ] + ) + assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz" + + # test '§' replaced by ':' [for CSS compatibility] + assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}" + + +def test_parse_latex_cell_styles_basic(): # test nesting + cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")] + expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}" + assert _parse_latex_cell_styles(cell_style, "text") == expected + + +@pytest.mark.parametrize( + "wrap_arg, expected", + [ # test wrapping + ("", "\\ "), + ("--wrap", "{\\ }"), + ("--nowrap", "\\ "), + ("--lwrap", "{\\} "), + ("--dwrap", "{\\}{}"), + ("--rwrap", "\\{}"), + ], +) +def test_parse_latex_cell_styles_braces(wrap_arg, expected): + cell_style = [("", f"{wrap_arg}")] + assert _parse_latex_cell_styles(cell_style, "") == expected + + +def test_parse_latex_header_span(): + cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []} + expected = "\\multicolumn{3}{Y}{text}" + assert _parse_latex_header_span(cell, "X", "Y") == expected + + cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []} + expected = "\\multirow[X]{5}{*}{text}" + assert _parse_latex_header_span(cell, "X", "Y") == expected + + cell = {"display_value": "text", "cellstyle": []} + assert _parse_latex_header_span(cell, "X", "Y") == "text" + + cell = {"display_value": "text", "cellstyle": [("bfseries", "--rwrap")]} + assert _parse_latex_header_span(cell, "X", "Y") == "\\bfseries{text}" + + +def test_parse_latex_table_wrapping(styler): + styler.set_table_styles( + [ + {"selector": "toprule", "props": ":value"}, + {"selector": "bottomrule", "props": ":value"}, + {"selector": "midrule", "props": ":value"}, + {"selector": "column_format", "props": ":value"}, + ] + ) + assert _parse_latex_table_wrapping(styler.table_styles, styler.caption) is False + assert _parse_latex_table_wrapping(styler.table_styles, "some caption") is True + styler.set_table_styles( + [ + {"selector": "not-ignored", "props": ":value"}, + ], + overwrite=False, + ) + assert _parse_latex_table_wrapping(styler.table_styles, None) is True + + +def test_short_caption(styler): + result = styler.to_latex(caption=("full cap", "short cap")) + assert "\\caption[short cap]{full cap}" in result + + +@pytest.mark.parametrize( + "css, expected", + [ + ([("color", "red")], [("color", "{red}")]), # test color and input format types + ( + [("color", "rgb(128, 128, 128 )")], + [("color", "[rgb]{0.502, 0.502, 0.502}")], + ), + ( + [("color", "rgb(128, 50%, 25% )")], + [("color", "[rgb]{0.502, 0.500, 0.250}")], + ), + ( + [("color", "rgba(128,128,128,1)")], + [("color", "[rgb]{0.502, 0.502, 0.502}")], + ), + ([("color", "#FF00FF")], [("color", "[HTML]{FF00FF}")]), + ([("color", "#F0F")], [("color", "[HTML]{FF00FF}")]), + ([("font-weight", "bold")], [("bfseries", "")]), # test font-weight and types + ([("font-weight", "bolder")], [("bfseries", "")]), + ([("font-weight", "normal")], []), + ([("background-color", "red")], [("cellcolor", "{red}--lwrap")]), + ( + [("background-color", "#FF00FF")], # test background-color command and wrap + [("cellcolor", "[HTML]{FF00FF}--lwrap")], + ), + ([("font-style", "italic")], [("itshape", "")]), # test font-style and types + ([("font-style", "oblique")], [("slshape", "")]), + ([("font-style", "normal")], []), + ([("color", "red /*--dwrap*/")], [("color", "{red}--dwrap")]), # css comments + ([("background-color", "red /* --dwrap */")], [("cellcolor", "{red}--dwrap")]), + ], +) +def test_parse_latex_css_conversion(css, expected): + result = _parse_latex_css_conversion(css) + assert result == expected + + +@pytest.mark.parametrize( + "env, inner_env", + [ + (None, "tabular"), + ("table", "tabular"), + ("longtable", "longtable"), + ], +) +@pytest.mark.parametrize( + "convert, exp", [(True, "bfseries"), (False, "font-weightbold")] +) +def test_parse_latex_css_convert_minimal(styler, env, inner_env, convert, exp): + # parameters ensure longtable template is also tested + styler.highlight_max(props="font-weight:bold;") + result = styler.to_latex(convert_css=convert, environment=env) + expected = dedent( + f"""\ + 0 & 0 & \\{exp} -0.61 & ab \\\\ + 1 & \\{exp} 1 & -1.22 & \\{exp} cd \\\\ + \\end{{{inner_env}}} + """ + ) + assert expected in result + + +def test_parse_latex_css_conversion_option(): + css = [("command", "option--latex--wrap")] + expected = [("command", "option--wrap")] + result = _parse_latex_css_conversion(css) + assert result == expected + + +def test_styler_object_after_render(styler): + # GH 42320 + pre_render = styler._copy(deepcopy=True) + styler.to_latex( + column_format="rllr", + position="h", + position_float="centering", + hrules=True, + label="my lab", + caption="my cap", + ) + + assert pre_render.table_styles == styler.table_styles + assert pre_render.caption == styler.caption + + +def test_longtable_comprehensive(styler): + result = styler.to_latex( + environment="longtable", hrules=True, label="fig:A", caption=("full", "short") + ) + expected = dedent( + """\ + \\begin{longtable}{lrrl} + \\caption[short]{full} \\label{fig:A} \\\\ + \\toprule + & A & B & C \\\\ + \\midrule + \\endfirsthead + \\caption[]{full} \\\\ + \\toprule + & A & B & C \\\\ + \\midrule + \\endhead + \\midrule + \\multicolumn{4}{r}{Continued on next page} \\\\ + \\midrule + \\endfoot + \\bottomrule + \\endlastfoot + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{longtable} + """ + ) + assert result == expected + + +def test_longtable_minimal(styler): + result = styler.to_latex(environment="longtable") + expected = dedent( + """\ + \\begin{longtable}{lrrl} + & A & B & C \\\\ + \\endfirsthead + & A & B & C \\\\ + \\endhead + \\multicolumn{4}{r}{Continued on next page} \\\\ + \\endfoot + \\endlastfoot + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{longtable} + """ + ) + assert result == expected + + +@pytest.mark.parametrize( + "sparse, exp, siunitx", + [ + (True, "{} & \\multicolumn{2}{r}{A} & {B}", True), + (False, "{} & {A} & {A} & {B}", True), + (True, " & \\multicolumn{2}{r}{A} & B", False), + (False, " & A & A & B", False), + ], +) +def test_longtable_multiindex_columns(df, sparse, exp, siunitx): + cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df.columns = cidx + with_si = "{} & {a} & {b} & {c} \\\\" + without_si = " & a & b & c \\\\" + expected = dedent( + f"""\ + \\begin{{longtable}}{{l{"SS" if siunitx else "rr"}l}} + {exp} \\\\ + {with_si if siunitx else without_si} + \\endfirsthead + {exp} \\\\ + {with_si if siunitx else without_si} + \\endhead + """ + ) + result = df.style.to_latex( + environment="longtable", sparse_columns=sparse, siunitx=siunitx + ) + assert expected in result + + +@pytest.mark.parametrize( + "caption, cap_exp", + [ + ("full", ("{full}", "")), + (("full", "short"), ("{full}", "[short]")), + ], +) +@pytest.mark.parametrize("label, lab_exp", [(None, ""), ("tab:A", " \\label{tab:A}")]) +def test_longtable_caption_label(styler, caption, cap_exp, label, lab_exp): + cap_exp1 = f"\\caption{cap_exp[1]}{cap_exp[0]}" + cap_exp2 = f"\\caption[]{cap_exp[0]}" + + expected = dedent( + f"""\ + {cap_exp1}{lab_exp} \\\\ + & A & B & C \\\\ + \\endfirsthead + {cap_exp2} \\\\ + """ + ) + assert expected in styler.to_latex( + environment="longtable", caption=caption, label=label + ) + + +@pytest.mark.parametrize("index", [True, False]) +@pytest.mark.parametrize( + "columns, siunitx", + [ + (True, True), + (True, False), + (False, False), + ], +) +def test_apply_map_header_render_mi(df_ext, index, columns, siunitx): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + styler = df_ext.style + + func = lambda v: "bfseries: --rwrap" if "A" in v or "Z" in v or "c" in v else None + + if index: + styler.map_index(func, axis="index") + if columns: + styler.map_index(func, axis="columns") + + result = styler.to_latex(siunitx=siunitx) + + expected_index = dedent( + """\ + \\multirow[c]{2}{*}{\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\ + \\bfseries{} & b & 1 & -1.220000 & cd \\\\ + B & \\bfseries{c} & 2 & -2.220000 & de \\\\ + """ + ) + assert (expected_index in result) is index + + exp_cols_si = dedent( + """\ + {} & {} & \\multicolumn{2}{r}{\\bfseries{Z}} & {Y} \\\\ + {} & {} & {a} & {b} & {\\bfseries{c}} \\\\ + """ + ) + exp_cols_no_si = """\ + & & \\multicolumn{2}{r}{\\bfseries{Z}} & Y \\\\ + & & a & b & \\bfseries{c} \\\\ +""" + assert ((exp_cols_si if siunitx else exp_cols_no_si) in result) is columns + + +def test_repr_option(styler): + assert "